Merge branch 'develop' into custom_csp_router_task
Some checks are pending
EIVE/eive-obsw/pipeline/pr-develop Build queued...

This commit is contained in:
Robin Müller 2023-03-24 14:17:20 +01:00
commit 5ec001652f
25 changed files with 241 additions and 117 deletions

View File

@ -18,10 +18,23 @@ will consitute of a breaking change warranting a new major release:
## Fixed
- PAPB busy polling now implemented properly with an upper bound of how often the PAPB is allowed
to be busy before returning the BUSY returnvalue. Also propagate and check for that case properly.
Ideally, this will never be an issue and the PAPB VC interface should never block for a longer
period.
## Added
- The persistent TM stores now have low priorities and behave like background threads now. This
should prevent them from blocking or slowing down the system even during dumps
(at least in theory).
- STR: Fix weird issues on datalink layer data reception which sometimes occur.
## Changed
- Rework FSFW OSALs to properly support regular scheduling (NICE priorities) and real-time
scheduling.
- Tweak scheduling priorities.
- STR: Move datalink layer to `StrComHandler` completely. DLL is now completely hidden from
device handler.
- STR: Is now scheduled twice in ACS PST.

View File

@ -72,8 +72,9 @@ void scheduling::initTasks() {
#if OBSW_ADD_SA_DEPL == 1
// Could add this to the core controller but the core controller does so many thing that I would
// prefer to have the solar array deployment in a seprate task.
PeriodicTaskIF* solarArrayDeplTask = factory->createPeriodicTask(
"SOLAR_ARRAY_DEPL", 65, PeriodicTaskIF::MINIMUM_STACK_SIZE, 0.4, missedDeadlineFunc);
PeriodicTaskIF* solarArrayDeplTask =
factory->createPeriodicTask("SOLAR_ARRAY_DEPL", 65, PeriodicTaskIF::MINIMUM_STACK_SIZE, 0.4,
missedDeadlineFunc, &RR_SCHEDULING);
result = solarArrayDeplTask->addComponent(objects::SOLAR_ARRAY_DEPL_HANDLER);
if (result != returnvalue::OK) {
scheduling::printAddObjectError("SOLAR_ARRAY_DEPL", objects::SOLAR_ARRAY_DEPL_HANDLER);
@ -81,7 +82,7 @@ void scheduling::initTasks() {
#endif
PeriodicTaskIF* coreCtrlTask = factory->createPeriodicTask(
"CORE_CTRL", 60, PeriodicTaskIF::MINIMUM_STACK_SIZE, 0.4, missedDeadlineFunc);
"CORE_CTRL", 60, PeriodicTaskIF::MINIMUM_STACK_SIZE, 0.4, missedDeadlineFunc, &RR_SCHEDULING);
result = coreCtrlTask->addComponent(objects::CORE_CONTROLLER);
if (result != returnvalue::OK) {
scheduling::printAddObjectError("CORE_CTRL", objects::CORE_CONTROLLER);
@ -89,7 +90,7 @@ void scheduling::initTasks() {
/* TMTC Distribution */
PeriodicTaskIF* tmTcDistributor = factory->createPeriodicTask(
"DIST", 45, PeriodicTaskIF::MINIMUM_STACK_SIZE, 0.2, missedDeadlineFunc);
"TC_DIST", 45, PeriodicTaskIF::MINIMUM_STACK_SIZE, 0.2, missedDeadlineFunc, &RR_SCHEDULING);
#if OBSW_ADD_TCPIP_SERVERS == 1
#if OBSW_ADD_TMTC_UDP_SERVER == 1
result = tmTcDistributor->addComponent(objects::UDP_TMTC_SERVER);
@ -119,16 +120,18 @@ void scheduling::initTasks() {
#if OBSW_ADD_TCPIP_SERVERS == 1
#if OBSW_ADD_TMTC_UDP_SERVER == 1
PeriodicTaskIF* udpPollingTask = factory->createPeriodicTask(
"UDP_TMTC_POLLING", 65, PeriodicTaskIF::MINIMUM_STACK_SIZE, 2.0, missedDeadlineFunc);
PeriodicTaskIF* udpPollingTask =
factory->createPeriodicTask("UDP_TMTC_POLLING", 60, PeriodicTaskIF::MINIMUM_STACK_SIZE, 2.0,
missedDeadlineFunc, &RR_SCHEDULING);
result = udpPollingTask->addComponent(objects::UDP_TMTC_POLLING_TASK);
if (result != returnvalue::OK) {
scheduling::printAddObjectError("UDP_POLLING", objects::UDP_TMTC_POLLING_TASK);
}
#endif
#if OBSW_ADD_TMTC_TCP_SERVER == 1
PeriodicTaskIF* tcpPollingTask = factory->createPeriodicTask(
"TCP_TMTC_POLLING", 65, PeriodicTaskIF::MINIMUM_STACK_SIZE, 2.0, missedDeadlineFunc);
PeriodicTaskIF* tcpPollingTask =
factory->createPeriodicTask("TCP_TMTC_POLLING", 60, PeriodicTaskIF::MINIMUM_STACK_SIZE, 2.0,
missedDeadlineFunc, &RR_SCHEDULING);
result = tcpPollingTask->addComponent(objects::TCP_TMTC_POLLING_TASK);
if (result != returnvalue::OK) {
scheduling::printAddObjectError("UDP_POLLING", objects::TCP_TMTC_POLLING_TASK);
@ -136,8 +139,9 @@ void scheduling::initTasks() {
#endif
#endif
PeriodicTaskIF* genericSysTask = factory->createPeriodicTask(
"SYSTEM_TASK", 50, PeriodicTaskIF::MINIMUM_STACK_SIZE, 0.5, missedDeadlineFunc);
PeriodicTaskIF* genericSysTask =
factory->createPeriodicTask("SYSTEM_TASK", 50, PeriodicTaskIF::MINIMUM_STACK_SIZE, 0.5,
missedDeadlineFunc, &RR_SCHEDULING);
result = genericSysTask->addComponent(objects::EIVE_SYSTEM);
if (result != returnvalue::OK) {
scheduling::printAddObjectError("EIVE_SYSTEM", objects::EIVE_SYSTEM);
@ -171,7 +175,7 @@ void scheduling::initTasks() {
// Runs in IRQ mode, frequency does not really matter
PeriodicTaskIF* pdecHandlerTask = factory->createPeriodicTask(
"PDEC_HANDLER", 75, PeriodicTaskIF::MINIMUM_STACK_SIZE, 1.0, missedDeadlineFunc);
"PDEC_HANDLER", 75, PeriodicTaskIF::MINIMUM_STACK_SIZE, 1.0, nullptr, &RR_SCHEDULING);
result = pdecHandlerTask->addComponent(objects::PDEC_HANDLER);
if (result != returnvalue::OK) {
scheduling::printAddObjectError("PDEC Handler", objects::PDEC_HANDLER);
@ -179,50 +183,53 @@ void scheduling::initTasks() {
#endif /* OBSW_ADD_CCSDS_IP_CORE == 1 */
// All the TM store tasks run in permanent loops, frequency does not matter
PeriodicTaskIF* liveTmTask =
factory->createPeriodicTask("LIVE_TM", 55, PeriodicTaskIF::MINIMUM_STACK_SIZE, 1.0, nullptr);
PeriodicTaskIF* liveTmTask = factory->createPeriodicTask(
"LIVE_TM", 55, PeriodicTaskIF::MINIMUM_STACK_SIZE, 1.0, nullptr, &RR_SCHEDULING);
result = liveTmTask->addComponent(objects::LIVE_TM_TASK);
if (result != returnvalue::OK) {
scheduling::printAddObjectError("LIVE_TM", objects::LIVE_TM_TASK);
}
PeriodicTaskIF* logTmTask = factory->createPeriodicTask(
"LOG_STORE_AND_TM", 45, PeriodicTaskIF::MINIMUM_STACK_SIZE, 1.0, nullptr);
"LOG_PSTORE", 0, PeriodicTaskIF::MINIMUM_STACK_SIZE, 1.0, nullptr);
result = logTmTask->addComponent(objects::LOG_STORE_AND_TM_TASK);
if (result != returnvalue::OK) {
scheduling::printAddObjectError("LOG_STORE_AND_TM", objects::LOG_STORE_AND_TM_TASK);
}
PeriodicTaskIF* hkTmTask = factory->createPeriodicTask(
"HK_STORE_AND_TM", 45, PeriodicTaskIF::MINIMUM_STACK_SIZE, 1.0, nullptr);
"HK_PSTORE", 0, PeriodicTaskIF::MINIMUM_STACK_SIZE, 1.0, nullptr);
result = hkTmTask->addComponent(objects::HK_STORE_AND_TM_TASK);
if (result != returnvalue::OK) {
scheduling::printAddObjectError("HK_STORE_AND_TM", objects::HK_STORE_AND_TM_TASK);
}
PeriodicTaskIF* cfdpTmTask = factory->createPeriodicTask(
"CFDP_STORE_AND_TM", 45, PeriodicTaskIF::MINIMUM_STACK_SIZE, 1.0, nullptr);
"CFDP_PSTORE", 0, PeriodicTaskIF::MINIMUM_STACK_SIZE, 1.0, nullptr);
result = cfdpTmTask->addComponent(objects::CFDP_STORE_AND_TM_TASK);
if (result != returnvalue::OK) {
scheduling::printAddObjectError("CFDP_STORE_AND_TM", objects::CFDP_STORE_AND_TM_TASK);
}
#if OBSW_ADD_CFDP_COMPONENTS == 1
PeriodicTaskIF* cfdpTask = factory->createPeriodicTask(
"CFDP Handler", 45, PeriodicTaskIF::MINIMUM_STACK_SIZE, 0.4, missedDeadlineFunc);
PeriodicTaskIF* cfdpTask =
factory->createPeriodicTask("CFDP_HANDLER", 40, PeriodicTaskIF::MINIMUM_STACK_SIZE, 0.4,
missedDeadlineFunc, &RR_SCHEDULING);
result = cfdpTask->addComponent(objects::CFDP_HANDLER);
if (result != returnvalue::OK) {
scheduling::printAddObjectError("CFDP Handler", objects::CFDP_HANDLER);
}
#endif
PeriodicTaskIF* gpsTask = factory->createPeriodicTask(
"GPS_TASK", 50, PeriodicTaskIF::MINIMUM_STACK_SIZE * 2, 0.4, missedDeadlineFunc);
PeriodicTaskIF* gpsTask =
factory->createPeriodicTask("GPS_TASK", 50, PeriodicTaskIF::MINIMUM_STACK_SIZE * 2, 0.4,
missedDeadlineFunc, &RR_SCHEDULING);
result = gpsTask->addComponent(objects::GPS_CONTROLLER);
if (result != returnvalue::OK) {
scheduling::printAddObjectError("GPS_CTRL", objects::GPS_CONTROLLER);
}
#if OBSW_ADD_ACS_BOARD == 1
PeriodicTaskIF* acsBrdPolling = factory->createPeriodicTask(
"ACS_BOARD_POLLING", 85, PeriodicTaskIF::MINIMUM_STACK_SIZE * 2, 0.4, missedDeadlineFunc);
PeriodicTaskIF* acsBrdPolling =
factory->createPeriodicTask("ACS_BOARD_POLLING", 85, PeriodicTaskIF::MINIMUM_STACK_SIZE * 2,
0.4, missedDeadlineFunc, &RR_SCHEDULING);
result = acsBrdPolling->addComponent(objects::ACS_BOARD_POLLING_TASK);
if (result != returnvalue::OK) {
scheduling::printAddObjectError("ACS_BOARD_POLLING", objects::ACS_BOARD_POLLING_TASK);
@ -230,16 +237,18 @@ void scheduling::initTasks() {
#endif
#if OBSW_ADD_RW == 1
PeriodicTaskIF* rwPolling = factory->createPeriodicTask(
"RW_POLLING_TASK", 85, PeriodicTaskIF::MINIMUM_STACK_SIZE * 2, 0.4, missedDeadlineFunc);
PeriodicTaskIF* rwPolling =
factory->createPeriodicTask("RW_POLLING_TASK", 85, PeriodicTaskIF::MINIMUM_STACK_SIZE * 2,
0.4, missedDeadlineFunc, &RR_SCHEDULING);
result = rwPolling->addComponent(objects::RW_POLLING_TASK);
if (result != returnvalue::OK) {
scheduling::printAddObjectError("RW_POLLING_TASK", objects::RW_POLLING_TASK);
}
#endif
#if OBSW_ADD_MGT == 1
PeriodicTaskIF* imtqPolling = factory->createPeriodicTask(
"IMTQ_POLLING_TASK", 85, PeriodicTaskIF::MINIMUM_STACK_SIZE * 2, 0.4, missedDeadlineFunc);
PeriodicTaskIF* imtqPolling =
factory->createPeriodicTask("IMTQ_POLLING_TASK", 85, PeriodicTaskIF::MINIMUM_STACK_SIZE * 2,
0.4, missedDeadlineFunc, &RR_SCHEDULING);
result = imtqPolling->addComponent(objects::IMTQ_POLLING);
if (result != returnvalue::OK) {
scheduling::printAddObjectError("IMTQ_POLLING_TASK", objects::IMTQ_POLLING);
@ -247,16 +256,18 @@ void scheduling::initTasks() {
#endif
#if OBSW_ADD_SUN_SENSORS == 1
PeriodicTaskIF* susPolling = factory->createPeriodicTask(
"SUS_POLLING_TASK", 85, PeriodicTaskIF::MINIMUM_STACK_SIZE * 2, 0.4, missedDeadlineFunc);
PeriodicTaskIF* susPolling =
factory->createPeriodicTask("SUS_POLLING_TASK", 85, PeriodicTaskIF::MINIMUM_STACK_SIZE * 2,
0.4, missedDeadlineFunc, &RR_SCHEDULING);
result = susPolling->addComponent(objects::SUS_POLLING_TASK);
if (result != returnvalue::OK) {
scheduling::printAddObjectError("SUS_POLLING_TASK", objects::SUS_POLLING_TASK);
}
#endif
PeriodicTaskIF* acsSysTask = factory->createPeriodicTask(
"ACS_SYS_TASK", 55, PeriodicTaskIF::MINIMUM_STACK_SIZE * 2, 0.4, missedDeadlineFunc);
PeriodicTaskIF* acsSysTask =
factory->createPeriodicTask("ACS_SYS_TASK", 55, PeriodicTaskIF::MINIMUM_STACK_SIZE * 2, 0.4,
missedDeadlineFunc, &RR_SCHEDULING);
result = acsSysTask->addComponent(objects::ACS_SUBSYSTEM);
if (result != returnvalue::OK) {
scheduling::printAddObjectError("ACS_SUBSYSTEM", objects::ACS_SUBSYSTEM);
@ -283,7 +294,7 @@ void scheduling::initTasks() {
}
PeriodicTaskIF* tcsSystemTask = factory->createPeriodicTask(
"TCS_TASK", 55, PeriodicTaskIF::MINIMUM_STACK_SIZE, 0.5, missedDeadlineFunc);
"TCS_TASK", 50, PeriodicTaskIF::MINIMUM_STACK_SIZE, 0.5, missedDeadlineFunc, &RR_SCHEDULING);
scheduling::scheduleRtdSensors(tcsSystemTask);
result = tcsSystemTask->addComponent(objects::TCS_SUBSYSTEM);
if (result != returnvalue::OK) {
@ -308,8 +319,9 @@ void scheduling::initTasks() {
#if OBSW_ADD_STAR_TRACKER == 1
// Relatively high priority to make sure STR COM works well.
PeriodicTaskIF* strHelperTask = factory->createPeriodicTask(
"STR_HELPER", 30, PeriodicTaskIF::MINIMUM_STACK_SIZE, 0.2, missedDeadlineFunc);
PeriodicTaskIF* strHelperTask =
factory->createPeriodicTask("STR_HELPER", 30, PeriodicTaskIF::MINIMUM_STACK_SIZE, 0.2,
missedDeadlineFunc, &RR_SCHEDULING);
result = strHelperTask->addComponent(objects::STR_COM_IF);
if (result != returnvalue::OK) {
scheduling::printAddObjectError("STR_HELPER", objects::STR_COM_IF);
@ -317,8 +329,9 @@ void scheduling::initTasks() {
#endif /* OBSW_ADD_STAR_TRACKER == 1 */
#if OBSW_ADD_PLOC_MPSOC == 1
PeriodicTaskIF* mpsocHelperTask = factory->createPeriodicTask(
"PLOC_MPSOC_HELPER", 20, PeriodicTaskIF::MINIMUM_STACK_SIZE, 0.2, missedDeadlineFunc);
PeriodicTaskIF* mpsocHelperTask =
factory->createPeriodicTask("PLOC_MPSOC_HELPER", 0, PeriodicTaskIF::MINIMUM_STACK_SIZE, 0.2,
missedDeadlineFunc);
result = mpsocHelperTask->addComponent(objects::PLOC_MPSOC_HELPER);
if (result != returnvalue::OK) {
scheduling::printAddObjectError("PLOC_MPSOC_HELPER", objects::PLOC_MPSOC_HELPER);
@ -326,8 +339,9 @@ void scheduling::initTasks() {
#endif /* OBSW_ADD_PLOC_MPSOC */
#if OBSW_ADD_PLOC_SUPERVISOR == 1
PeriodicTaskIF* supvHelperTask = factory->createPeriodicTask(
"PLOC_SUPV_HELPER", 10, PeriodicTaskIF::MINIMUM_STACK_SIZE, 1.0, missedDeadlineFunc);
PeriodicTaskIF* supvHelperTask =
factory->createPeriodicTask("PLOC_SUPV_HELPER", 0, PeriodicTaskIF::MINIMUM_STACK_SIZE, 1.0,
missedDeadlineFunc);
result = supvHelperTask->addComponent(objects::PLOC_SUPERVISOR_HELPER);
if (result != returnvalue::OK) {
scheduling::printAddObjectError("PLOC_SUPV_HELPER", objects::PLOC_SUPERVISOR_HELPER);
@ -335,7 +349,7 @@ void scheduling::initTasks() {
#endif /* OBSW_ADD_PLOC_SUPERVISOR */
PeriodicTaskIF* plTask = factory->createPeriodicTask(
"PL_TASK", 30, PeriodicTaskIF::MINIMUM_STACK_SIZE, 0.8, missedDeadlineFunc);
"PL_TASK", 30, PeriodicTaskIF::MINIMUM_STACK_SIZE, 0.8, missedDeadlineFunc, &RR_SCHEDULING);
plTask->addComponent(objects::CAM_SWITCHER);
scheduling::addMpsocSupvHandlers(plTask);
scheduling::scheduleScexDev(plTask);
@ -456,8 +470,9 @@ void scheduling::createPstTasks(TaskFactory& factory, TaskDeadlineMissedFunction
#else
static constexpr float acsPstPeriod = 0.4;
#endif
FixedTimeslotTaskIF* acsTcsPst = factory.createFixedTimeslotTask(
"ACS_TCS_PST", 80, PeriodicTaskIF::MINIMUM_STACK_SIZE * 2, acsPstPeriod, missedDeadlineFunc);
FixedTimeslotTaskIF* acsTcsPst =
factory.createFixedTimeslotTask("ACS_TCS_PST", 85, PeriodicTaskIF::MINIMUM_STACK_SIZE * 2,
acsPstPeriod, missedDeadlineFunc, &RR_SCHEDULING);
result = pst::pstTcsAndAcs(acsTcsPst, cfg);
if (result != returnvalue::OK) {
if (result == FixedTimeslotTaskIF::SLOT_LIST_EMPTY) {
@ -471,8 +486,9 @@ void scheduling::createPstTasks(TaskFactory& factory, TaskDeadlineMissedFunction
/* Polling Sequence Table Default */
#if OBSW_ADD_SPI_TEST_CODE == 0
FixedTimeslotTaskIF* syrlinksPst = factory.createFixedTimeslotTask(
"SYRLINKS", 45, PeriodicTaskIF::MINIMUM_STACK_SIZE * 4, 0.5, missedDeadlineFunc);
FixedTimeslotTaskIF* syrlinksPst =
factory.createFixedTimeslotTask("SYRLINKS", 50, PeriodicTaskIF::MINIMUM_STACK_SIZE * 4, 0.5,
missedDeadlineFunc, &RR_SCHEDULING);
result = pst::pstSyrlinks(syrlinksPst);
if (result != returnvalue::OK) {
if (result == FixedTimeslotTaskIF::SLOT_LIST_EMPTY) {
@ -486,8 +502,9 @@ void scheduling::createPstTasks(TaskFactory& factory, TaskDeadlineMissedFunction
#endif
#if OBSW_ADD_I2C_TEST_CODE == 0
FixedTimeslotTaskIF* i2cPst = factory.createFixedTimeslotTask(
"I2C_PST", 50, PeriodicTaskIF::MINIMUM_STACK_SIZE * 4, 0.4, missedDeadlineFunc);
FixedTimeslotTaskIF* i2cPst =
factory.createFixedTimeslotTask("I2C_PS_PST", 60, PeriodicTaskIF::MINIMUM_STACK_SIZE * 4, 0.6,
missedDeadlineFunc, &RR_SCHEDULING);
result = pst::pstI2cProcessingSystem(i2cPst);
if (result != returnvalue::OK) {
if (result == FixedTimeslotTaskIF::SLOT_LIST_EMPTY) {
@ -500,8 +517,9 @@ void scheduling::createPstTasks(TaskFactory& factory, TaskDeadlineMissedFunction
}
#endif
FixedTimeslotTaskIF* gomSpacePstTask = factory.createFixedTimeslotTask(
"GS_PST_TASK", 65, PeriodicTaskIF::MINIMUM_STACK_SIZE * 4, 0.5, missedDeadlineFunc);
FixedTimeslotTaskIF* gomSpacePstTask =
factory.createFixedTimeslotTask("GS_PST_TASK", 65, PeriodicTaskIF::MINIMUM_STACK_SIZE * 4,
0.5, missedDeadlineFunc, &RR_SCHEDULING);
result = pst::pstGompaceCan(gomSpacePstTask);
if (result != returnvalue::OK) {
if (result != FixedTimeslotTaskIF::SLOT_LIST_EMPTY) {
@ -515,8 +533,9 @@ void scheduling::createPusTasks(TaskFactory& factory, TaskDeadlineMissedFunction
std::vector<PeriodicTaskIF*>& taskVec) {
ReturnValue_t result = returnvalue::OK;
/* PUS Services */
PeriodicTaskIF* pusHighPrio = factory.createPeriodicTask(
"PUS_HIGH_PRIO", 60, PeriodicTaskIF::MINIMUM_STACK_SIZE, 0.2, missedDeadlineFunc);
PeriodicTaskIF* pusHighPrio =
factory.createPeriodicTask("PUS_HIGH_PRIO", 60, PeriodicTaskIF::MINIMUM_STACK_SIZE, 0.2,
missedDeadlineFunc, &RR_SCHEDULING);
result = pusHighPrio->addComponent(objects::PUS_SERVICE_1_VERIFICATION);
if (result != returnvalue::OK) {
scheduling::printAddObjectError("PUS_VERIF", objects::PUS_SERVICE_1_VERIFICATION);
@ -535,8 +554,9 @@ void scheduling::createPusTasks(TaskFactory& factory, TaskDeadlineMissedFunction
}
taskVec.push_back(pusHighPrio);
PeriodicTaskIF* pusMedPrio = factory.createPeriodicTask(
"PUS_MED_PRIO", 40, PeriodicTaskIF::MINIMUM_STACK_SIZE, 0.8, missedDeadlineFunc);
PeriodicTaskIF* pusMedPrio =
factory.createPeriodicTask("PUS_MED_PRIO", 45, PeriodicTaskIF::MINIMUM_STACK_SIZE, 0.8,
missedDeadlineFunc, &RR_SCHEDULING);
result = pusMedPrio->addComponent(objects::PUS_SERVICE_3_HOUSEKEEPING);
if (result != returnvalue::OK) {
scheduling::printAddObjectError("PUS_3", objects::PUS_SERVICE_3_HOUSEKEEPING);

2
fsfw

@ -1 +1 @@
Subproject commit f8a7c1d4ed621a3375db0da9b9e9f8d5484abbc1
Subproject commit a937b457f900a6a6b26bc0b42a5357f014840a67

View File

@ -1,5 +1,6 @@
#include <fsfw_hal/linux/uio/UioMapper.h>
#include <linux/ipcore/PapbVcInterface.h>
#include <unistd.h>
#include "fsfw/serviceinterface/ServiceInterface.h"
@ -15,47 +16,67 @@ PapbVcInterface::~PapbVcInterface() {}
ReturnValue_t PapbVcInterface::initialize() {
UioMapper uioMapper(uioFile, mapNum);
return uioMapper.getMappedAdress(&vcBaseReg, UioMapper::Permissions::WRITE_ONLY);
uint32_t* baseReg;
ReturnValue_t result = uioMapper.getMappedAdress(&baseReg, UioMapper::Permissions::WRITE_ONLY);
if (result != returnvalue::OK) {
return result;
}
vcBaseReg = baseReg;
return returnvalue::OK;
}
ReturnValue_t PapbVcInterface::write(const uint8_t* data, size_t size) {
if (pollPapbBusySignal() == returnvalue::OK) {
if (pollPapbBusySignal(0, 0) == returnvalue::OK) {
startPacketTransfer();
} else {
return DirectTmSinkIF::IS_BUSY;
}
for (size_t idx = 0; idx < size; idx++) {
if (pollPapbBusySignal() == returnvalue::OK) {
*(vcBaseReg + DATA_REG_OFFSET) = static_cast<uint32_t>(*(data + idx));
if (pollPapbBusySignal(10, 10) == returnvalue::OK) {
*(vcBaseReg + DATA_REG_OFFSET) = static_cast<uint32_t>(data[idx]);
} else {
sif::warning << "PapbVcInterface::write: Only written " << idx << " of " << size << " data"
<< std::endl;
abortPacketTransfer();
return returnvalue::FAILED;
}
}
if (pollPapbBusySignal() == returnvalue::OK) {
endPacketTransfer();
if (pollPapbBusySignal(10, 10) == returnvalue::OK) {
completePacketTransfer();
} else {
abortPacketTransfer();
return returnvalue::FAILED;
}
return returnvalue::OK;
}
void PapbVcInterface::startPacketTransfer() { *vcBaseReg = CONFIG_START; }
void PapbVcInterface::endPacketTransfer() { *vcBaseReg = CONFIG_END; }
void PapbVcInterface::completePacketTransfer() { *vcBaseReg = CONFIG_END; }
ReturnValue_t PapbVcInterface::pollPapbBusySignal() const {
ReturnValue_t PapbVcInterface::pollPapbBusySignal(uint32_t maxPollRetries,
uint32_t retryDelayUs) const {
gpio::Levels papbBusyState = gpio::Levels::LOW;
ReturnValue_t result = returnvalue::OK;
ReturnValue_t result;
uint32_t busyIdx = 0;
/** Check if PAPB interface is ready to receive data */
result = gpioComIF->readGpio(papbBusyId, papbBusyState);
if (result != returnvalue::OK) {
sif::warning << "PapbVcInterface::pollPapbBusySignal: Failed to read papb busy signal"
<< std::endl;
return returnvalue::FAILED;
}
if (papbBusyState == gpio::Levels::LOW) {
return PAPB_BUSY;
}
while (true) {
/** Check if PAPB interface is ready to receive data */
result = gpioComIF->readGpio(papbBusyId, papbBusyState);
if (result != returnvalue::OK) {
sif::warning << "PapbVcInterface::pollPapbBusySignal: Failed to read papb busy signal"
<< std::endl;
return returnvalue::FAILED;
}
if (papbBusyState == gpio::Levels::HIGH) {
return returnvalue::OK;
}
busyIdx++;
if (busyIdx >= maxPollRetries) {
return PAPB_BUSY;
}
usleep(retryDelayUs);
}
return returnvalue::OK;
}
@ -79,7 +100,9 @@ void PapbVcInterface::isVcInterfaceBufferEmpty() {
return;
}
bool PapbVcInterface::isBusy() const { return pollPapbBusySignal() == PAPB_BUSY; }
bool PapbVcInterface::isBusy() const { return pollPapbBusySignal(0, 0) == PAPB_BUSY; }
void PapbVcInterface::cancelTransfer() { abortPacketTransfer(); }
ReturnValue_t PapbVcInterface::sendTestFrame() {
/** Size of one complete transfer frame data field amounts to 1105 bytes */
@ -97,3 +120,5 @@ ReturnValue_t PapbVcInterface::sendTestFrame() {
return returnvalue::OK;
}
void PapbVcInterface::abortPacketTransfer() { *vcBaseReg = CONFIG_ABORT; }

View File

@ -41,6 +41,8 @@ class PapbVcInterface : public VirtualChannelIF {
*/
ReturnValue_t write(const uint8_t* data, size_t size) override;
void cancelTransfer() override;
ReturnValue_t initialize() override;
private:
@ -51,16 +53,21 @@ class PapbVcInterface : public VirtualChannelIF {
/**
* Configuration bits:
* bit[1:0]: Size of data (1,2,3 or 4 bytes). 1 Byte <=> b00
* bit[2]: Set this bit to 1 to abort a transfered packet
* bit[2]: Set this bit to 1 to abort a transferred packet
* bit[3]: Signals to VcInterface the start of a new telemetry packet
*/
static const uint32_t CONFIG_START = 0x8;
static constexpr uint32_t CONFIG_START = 0b00001000;
/**
* Abort a transferred packet.
*/
static constexpr uint32_t CONFIG_ABORT = 0b00000100;
/**
* Writing this word to the VcInterface base address signals to the virtual channel interface
* that a complete tm packet has been transferred.
*/
static const uint32_t CONFIG_END = 0x0;
static constexpr uint32_t CONFIG_END = 0x0;
/**
* Writing to this offset within the memory space of a virtual channel will insert data for
@ -79,7 +86,7 @@ class PapbVcInterface : public VirtualChannelIF {
std::string uioFile;
int mapNum = 0;
uint32_t* vcBaseReg = nullptr;
volatile uint32_t* vcBaseReg = nullptr;
uint32_t vcOffset = 0;
@ -89,11 +96,13 @@ class PapbVcInterface : public VirtualChannelIF {
*/
void startPacketTransfer();
void abortPacketTransfer();
/**
* @brief This function sends the config byte to the virtual channel interface of the PTME
* IP Core to signal the end of a packet transfer.
*/
void endPacketTransfer();
void completePacketTransfer();
/**
* @brief This function reads the papb busy signal indicating whether the virtual channel
@ -102,7 +111,7 @@ class PapbVcInterface : public VirtualChannelIF {
*
* @return returnvalue::OK when ready to receive data else PAPB_BUSY.
*/
ReturnValue_t pollPapbBusySignal() const;
ReturnValue_t pollPapbBusySignal(uint32_t maxPollRetries, uint32_t retryDelayUs) const;
/**
* @brief This function can be used for debugging to check whether there are packets in

View File

@ -20,7 +20,6 @@ ReturnValue_t Ptme::initialize() {
}
ReturnValue_t Ptme::writeToVc(uint8_t vcId, const uint8_t* data, size_t size) {
ReturnValue_t result = returnvalue::OK;
VcInterfaceMapIter vcInterfaceMapIter = vcInterfaceMap.find(vcId);
if (vcInterfaceMapIter == vcInterfaceMap.end()) {
sif::warning << "Ptme::writeToVc: No virtual channel interface found for the virtual "
@ -28,8 +27,7 @@ ReturnValue_t Ptme::writeToVc(uint8_t vcId, const uint8_t* data, size_t size) {
<< static_cast<unsigned int>(vcId) << std::endl;
return UNKNOWN_VC_ID;
}
result = vcInterfaceMapIter->second->write(data, size);
return result;
return vcInterfaceMapIter->second->write(data, size);
}
void Ptme::addVcInterface(VcId_t vcId, VirtualChannelIF* vc) {
@ -62,3 +60,11 @@ bool Ptme::isBusy(uint8_t vcId) const {
}
return vcInterfaceMapIter->second->isBusy();
}
void Ptme::cancelTransfer(uint8_t vcId) {
VcInterfaceMapIter vcInterfaceMapIter = vcInterfaceMap.find(vcId);
if (vcInterfaceMapIter == vcInterfaceMap.end()) {
return;
}
return vcInterfaceMapIter->second->cancelTransfer();
}

View File

@ -36,6 +36,7 @@ class Ptme : public PtmeIF, public SystemObject {
ReturnValue_t initialize() override;
ReturnValue_t writeToVc(uint8_t vcId, const uint8_t* data, size_t size) override;
bool isBusy(uint8_t vcId) const override;
void cancelTransfer(uint8_t vcId) override;
/**
* @brief This function adds the reference to a virtual channel interface to the vcInterface

View File

@ -23,6 +23,7 @@ class PtmeIF {
*/
virtual ReturnValue_t writeToVc(uint8_t vcId, const uint8_t* data, size_t size) = 0;
virtual bool isBusy(uint8_t vcId) const = 0;
virtual void cancelTransfer(uint8_t vcId) = 0;
};
#endif /* LINUX_OBC_PTMEIF_H_ */

View File

@ -18,6 +18,7 @@ class VirtualChannelIF : public DirectTmSinkIF {
virtual ~VirtualChannelIF(){};
virtual ReturnValue_t initialize() = 0;
virtual void cancelTransfer() = 0;
};
#endif /* LINUX_OBC_VCINTERFACEIF_H_ */

View File

@ -8,6 +8,9 @@
#include "ObjectFactory.h"
#include "eive/objects.h"
PosixThreadArgs scheduling::RR_SCHEDULING = {.policy = SchedulingPolicy::RR};
PosixThreadArgs scheduling::NORMAL_SCHEDULING;
void scheduling::scheduleScexReader(TaskFactory& factory, PeriodicTaskIF*& scexReaderTask) {
using namespace scheduling;
ReturnValue_t result = returnvalue::OK;
@ -18,8 +21,9 @@ void scheduling::scheduleScexReader(TaskFactory& factory, PeriodicTaskIF*& scexR
#endif
result = returnvalue::OK;
scexReaderTask = factory.createPeriodicTask(
"SCEX_UART_READER", 20, PeriodicTaskIF::MINIMUM_STACK_SIZE, 2.0, missedDeadlineFunc);
scexReaderTask =
factory.createPeriodicTask("SCEX_UART_READER", 20, PeriodicTaskIF::MINIMUM_STACK_SIZE, 2.0,
missedDeadlineFunc, &NORMAL_SCHEDULING);
result = scexReaderTask->addComponent(objects::SCEX_UART_READER);
if (result != returnvalue::OK) {
printAddObjectError("SCEX_UART_READER", objects::SCEX_UART_READER);

View File

@ -1,8 +1,13 @@
#pragma once
#include <fsfw/osal/linux/PosixThread.h>
#include <fsfw/tasks/TaskFactory.h>
namespace scheduling {
extern PosixThreadArgs RR_SCHEDULING;
extern PosixThreadArgs NORMAL_SCHEDULING;
void scheduleScexDev(PeriodicTaskIF*& scexDevHandler);
void scheduleScexReader(TaskFactory& factory, PeriodicTaskIF*& scexReaderTask);
void addMpsocSupvHandlers(PeriodicTaskIF* task);

View File

@ -206,6 +206,8 @@ ReturnValue_t CcsdsIpCoreHandler::executeAction(ActionId_t actionId, MessageQueu
void CcsdsIpCoreHandler::updateLinkState() { linkState = LINK_UP; }
void CcsdsIpCoreHandler::enableTransmit() {
// Reset PTME on each transmit enable.
updateBatPriorityFromParam();
#ifndef TE0720_1CFA
gpioIF->pullHigh(ptmeGpios.enableTxClock);
gpioIF->pullHigh(ptmeGpios.enableTxData);

View File

@ -153,7 +153,7 @@ class CcsdsIpCoreHandler : public SystemObject,
PtmeConfig& ptmeConfig;
PtmeGpios ptmeGpios;
// BAT priority bit on by default to enable priority selection mode for the PTME.
uint8_t batPriorityParam = 1;
uint8_t batPriorityParam = 0;
bool updateBatPriorityOnTxOff = false;
GpioIF* gpioIF = nullptr;

View File

@ -11,6 +11,9 @@ ReturnValue_t LiveTmTask::performOperation(uint8_t opCode) {
while (true) {
// The funnel tasks are scheduled here directly as well.
ReturnValue_t result = channel.sendNextTm();
if (result == DirectTmSinkIF::IS_BUSY) {
sif::error << "Lost live TM, PAPB busy" << std::endl;
}
if (result == MessageQueueIF::EMPTY) {
if (tmFunnelCd.hasTimedOut()) {
pusFunnel.performOperation(0);

View File

@ -13,26 +13,32 @@ PersistentLogTmStoreTask::PersistentLogTmStoreTask(object_id_t objectId, Storage
miscStoreContext(persTmStore::DUMP_MISC_STORE_DONE) {}
ReturnValue_t PersistentLogTmStoreTask::performOperation(uint8_t opCode) {
bool someonesBusy = false;
auto stateHandlingForStore = [&](bool storeIsBusy) {
if (storeIsBusy) {
someonesBusy = true;
}
if (fileHasSwapped) {
someFileWasSwapped = fileHasSwapped;
}
};
while (true) {
if (not cyclicStoreCheck()) {
continue;
}
bool someonesBusy = false;
bool busy = false;
busy = handleOneStore(stores.okStore, okStoreContext);
if (busy) {
someonesBusy = true;
}
busy = handleOneStore(stores.notOkStore, notOkStoreContext);
if (busy) {
someonesBusy = true;
}
busy = handleOneStore(stores.miscStore, miscStoreContext);
if (busy) {
someonesBusy = true;
}
someonesBusy = false;
someFileWasSwapped = false;
stateHandlingForStore(handleOneStore(stores.okStore, okStoreContext));
stateHandlingForStore(handleOneStore(stores.notOkStore, notOkStoreContext));
stateHandlingForStore(handleOneStore(stores.miscStore, miscStoreContext));
if (not someonesBusy) {
TaskFactory::delayTask(40);
TaskFactory::delayTask(100);
} else /* and graceDelayDuringDumping.hasTimedOut()*/ {
if (someFileWasSwapped) {
TaskFactory::delayTask(20);
}
// TaskFactory::delayTask(2);
// graceDelayDuringDumping.resetTimer();
}
}
}

View File

@ -32,6 +32,8 @@ class PersistentLogTmStoreTask : public TmStoreTaskBase, public ExecutableObject
DumpContext notOkStoreContext;
DumpContext miscStoreContext;
Countdown tcHandlingCd = Countdown(400);
Countdown graceDelayDuringDumping = Countdown(200);
bool someFileWasSwapped = false;
bool initStoresIfPossible();
};

View File

@ -17,7 +17,15 @@ ReturnValue_t PersistentSingleTmStoreTask::performOperation(uint8_t opCode) {
}
bool busy = handleOneStore(storeWithQueue, dumpContext);
if (not busy) {
TaskFactory::delayTask(40);
TaskFactory::delayTask(100);
} else {
if (fileHasSwapped) {
TaskFactory::delayTask(20);
}
// if (fileHasSwapped and graceDelayDuringDumping.hasTimedOut()) {
// TaskFactory::delayTask(2);
// graceDelayDuringDumping.resetTimer();
// }
}
}
}

View File

@ -19,6 +19,7 @@ class PersistentSingleTmStoreTask : public TmStoreTaskBase, public ExecutableObj
PersistentTmStoreWithTmQueue& storeWithQueue;
DumpContext dumpContext;
Countdown tcHandlingCd = Countdown(400);
Countdown graceDelayDuringDumping = Countdown(100);
bool initStoresIfPossible();
};

View File

@ -195,11 +195,7 @@ ReturnValue_t PersistentTmStore::startDumpFromUpTo(uint32_t fromUnixSeconds,
dumpParams.fromUnixTime = fromUnixSeconds;
dumpParams.untilUnixTime = upToUnixSeconds;
state = State::DUMPING;
if (loadNextDumpFile() == DUMP_DONE) {
// State will be set inside the function loading the next file.
return DUMP_DONE;
}
return returnvalue::OK;
return loadNextDumpFile();
}
ReturnValue_t PersistentTmStore::loadNextDumpFile() {
@ -218,7 +214,7 @@ ReturnValue_t PersistentTmStore::loadNextDumpFile() {
}
sif::debug << "Path: " << dumpParams.dirEntry.path() << std::endl;
// Can't even read CCSDS header.
// File empty or can't even read CCSDS header.
if (dumpParams.fileSize <= 6) {
continue;
}
@ -247,14 +243,12 @@ ReturnValue_t PersistentTmStore::loadNextDumpFile() {
static_cast<std::streamsize>(dumpParams.fileSize));
// Increment iterator for next cycle.
dumpParams.dirIter++;
break;
return returnvalue::OK;
}
}
if (dumpParams.dirIter == directory_iterator()) {
state = State::IDLE;
return DUMP_DONE;
}
return returnvalue::OK;
// Directory iterator was consumed and we are done.
state = State::IDLE;
return DUMP_DONE;
}
ReturnValue_t PersistentTmStore::dumpNextPacket(DirectTmSinkIF& tmSink, size_t& dumpedLen,

View File

@ -25,10 +25,12 @@ bool TmStoreTaskBase::handleOneStore(PersistentTmStoreWithTmQueue& store,
// Dump TMs when applicable
if (store.getState() == PersistentTmStore::State::DUMPING) {
size_t dumpedLen = 0;
bool fileHasSwapped;
if (not channel.isBusy()) {
tmSinkBusyCd.resetTimer();
result = store.dumpNextPacket(channel, dumpedLen, fileHasSwapped);
if (result == DirectTmSinkIF::IS_BUSY) {
sif::warning << "Unexpected PAPB busy" << std::endl;
}
if ((result == PersistentTmStore::DUMP_DONE or result == returnvalue::OK) and dumpedLen > 0) {
dumpContext.dumpedBytes += dumpedLen;
dumpContext.numberOfDumpedPackets += 1;
@ -43,6 +45,13 @@ bool TmStoreTaskBase::handleOneStore(PersistentTmStoreWithTmQueue& store,
} else if (result == returnvalue::OK) {
dumpsPerformed = true;
}
} else {
dumpContext.ptmeBusyCounter++;
if (dumpContext.ptmeBusyCounter == 50) {
sif::warning << "PTME busy for longer period. Dumped length so far: "
<< dumpContext.dumpedBytes << std::endl;
dumpContext.ptmeBusyCounter = 0;
}
}
if (cancelDumpCd.hasTimedOut() or tmSinkBusyCd.hasTimedOut()) {
triggerEvent(persTmStore::DUMP_WAS_CANCELLED, store.getObjectId());

View File

@ -16,6 +16,7 @@ class TmStoreTaskBase : public SystemObject {
const Event eventIfDone;
uint32_t numberOfDumpedPackets = 0;
uint32_t dumpedBytes = 0;
uint32_t ptmeBusyCounter = 0;
};
TmStoreTaskBase(object_id_t objectId, StorageManagerIF& ipcStore, VirtualChannel& channel,
@ -45,6 +46,7 @@ class TmStoreTaskBase : public SystemObject {
Countdown tmSinkBusyCd = Countdown(60 * 1000);
VirtualChannel& channel;
bool storesInitialized = false;
bool fileHasSwapped = false;
SdCardMountedIF& sdcMan;
};

View File

@ -25,4 +25,12 @@ uint8_t VirtualChannel::getVcid() const { return vcId; }
const char* VirtualChannel::getName() const { return vcName.c_str(); }
bool VirtualChannel::isBusy() const { return ptme.isBusy(vcId); }
bool VirtualChannel::isBusy() const {
// Data is discarded, so channel is not busy.
if (linkStateProvider.load()) {
return false;
}
return ptme.isBusy(vcId);
}
void VirtualChannel::cancelTransfer() { ptme.cancelTransfer(vcId); }

View File

@ -28,6 +28,7 @@ class VirtualChannel : public SystemObject, public VirtualChannelIF {
ReturnValue_t sendNextTm(const uint8_t* data, size_t size);
bool isBusy() const override;
ReturnValue_t write(const uint8_t* data, size_t size) override;
void cancelTransfer() override;
uint8_t getVcid() const;
const char* getName() const;

View File

@ -36,7 +36,10 @@ ReturnValue_t VirtualChannelWithQueue::sendNextTm() {
return result;
}
write(data, size);
result = write(data, size);
if (result != returnvalue::OK) {
return result;
}
tmStore.deleteData(storeId);
if (result != returnvalue::OK) {
return result;

2
tmtc

@ -1 +1 @@
Subproject commit 2263938b8b1324b309a44d70c291800050ff4178
Subproject commit 9f17661f529e045e92714817bdc52572f976da2d