2023-03-31 01:14:59 +02:00
|
|
|
#include "LiveTmTask.h"
|
|
|
|
|
|
|
|
#include <fsfw/ipc/QueueFactory.h>
|
|
|
|
#include <fsfw/subsystem/helper.h>
|
|
|
|
#include <fsfw/tasks/TaskFactory.h>
|
|
|
|
#include <fsfw/timemanager/Stopwatch.h>
|
|
|
|
|
2023-09-11 20:16:54 +02:00
|
|
|
#include "mission/sysDefs.h"
|
|
|
|
|
2023-09-14 10:38:43 +02:00
|
|
|
static constexpr bool DEBUG_TM_QUEUE_SPEED = false;
|
2023-09-11 20:16:54 +02:00
|
|
|
std::atomic_bool signals::CFDP_CHANNEL_THROTTLE_SIGNAL = false;
|
2023-10-13 13:21:28 +02:00
|
|
|
std::atomic_uint32_t signals::CFDP_MSG_COUNTER = 0;
|
2023-09-11 20:16:54 +02:00
|
|
|
|
2023-03-31 01:14:59 +02:00
|
|
|
LiveTmTask::LiveTmTask(object_id_t objectId, PusTmFunnel& pusFunnel, CfdpTmFunnel& cfdpFunnel,
|
2023-09-11 20:16:54 +02:00
|
|
|
VirtualChannel& channel, const std::atomic_bool& ptmeLocked,
|
|
|
|
uint32_t regularTmQueueDepth, uint32_t cfdpQueueDepth)
|
2023-03-31 01:14:59 +02:00
|
|
|
: SystemObject(objectId),
|
|
|
|
modeHelper(this),
|
|
|
|
pusFunnel(pusFunnel),
|
|
|
|
cfdpFunnel(cfdpFunnel),
|
2023-03-31 16:51:30 +02:00
|
|
|
channel(channel),
|
|
|
|
ptmeLocked(ptmeLocked) {
|
2023-03-31 01:14:59 +02:00
|
|
|
requestQueue = QueueFactory::instance()->createMessageQueue();
|
2023-09-11 20:16:54 +02:00
|
|
|
cfdpTmQueue = QueueFactory::instance()->createMessageQueue(cfdpQueueDepth);
|
|
|
|
regularTmQueue = QueueFactory::instance()->createMessageQueue(regularTmQueueDepth);
|
2023-03-31 01:14:59 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
ReturnValue_t LiveTmTask::performOperation(uint8_t opCode) {
|
|
|
|
readCommandQueue();
|
2023-09-11 20:16:54 +02:00
|
|
|
bool handledTm;
|
|
|
|
ReturnValue_t result;
|
2023-09-14 10:38:43 +02:00
|
|
|
uint32_t consecutiveRegularCounter = 0;
|
|
|
|
uint32_t consecutiveCfdpCounter = 0;
|
|
|
|
bool isCfdp = false;
|
2023-03-31 01:14:59 +02:00
|
|
|
while (true) {
|
2023-09-14 10:38:43 +02:00
|
|
|
isCfdp = false;
|
2023-09-11 20:16:54 +02:00
|
|
|
// TODO: Must read CFDP TM queue and regular TM queue and forward them. Handle regular queue
|
|
|
|
// first.
|
|
|
|
handledTm = false;
|
|
|
|
if (!channel.isBusy()) {
|
|
|
|
result = handleRegularTmQueue();
|
|
|
|
if (result == MessageQueueIF::EMPTY) {
|
|
|
|
result = handleCfdpTmQueue();
|
2023-09-14 10:38:43 +02:00
|
|
|
isCfdp = true;
|
2023-09-11 20:16:54 +02:00
|
|
|
}
|
|
|
|
if (result == returnvalue::OK) {
|
|
|
|
handledTm = true;
|
2023-09-14 10:38:43 +02:00
|
|
|
if (DEBUG_TM_QUEUE_SPEED) {
|
|
|
|
if (isCfdp) {
|
|
|
|
consecutiveCfdpCounter++;
|
|
|
|
} else {
|
|
|
|
consecutiveRegularCounter++;
|
|
|
|
}
|
|
|
|
}
|
2023-09-11 20:16:54 +02:00
|
|
|
}
|
|
|
|
}
|
2023-10-13 10:57:58 +02:00
|
|
|
if (channel.isBusy() and !throttlePeriodOngoing) {
|
2023-09-11 20:16:54 +02:00
|
|
|
// Throttle CFDP packet creator. It is by far the most relevant data creator, so throttling
|
2023-10-13 10:57:58 +02:00
|
|
|
// it is the easiest way to handle back pressure for now in a sensible way.
|
|
|
|
throttleCfdp();
|
2023-10-13 13:21:28 +02:00
|
|
|
} else if (!channel.isBusy() and throttlePeriodOngoing) {
|
|
|
|
// Half full/empty flow control: Release the CFDP is the queue is empty enough.
|
|
|
|
if (signals::CFDP_MSG_COUNTER <= config::LIVE_CHANNEL_CFDP_QUEUE_SIZE / 2) {
|
2023-10-13 10:57:58 +02:00
|
|
|
releaseCfdp();
|
|
|
|
}
|
2023-03-31 01:14:59 +02:00
|
|
|
}
|
2023-09-11 20:16:54 +02:00
|
|
|
if (!handledTm) {
|
2023-03-31 01:14:59 +02:00
|
|
|
if (tmFunnelCd.hasTimedOut()) {
|
|
|
|
pusFunnel.performOperation(0);
|
|
|
|
cfdpFunnel.performOperation(0);
|
|
|
|
tmFunnelCd.resetTimer();
|
|
|
|
}
|
|
|
|
// Read command queue during idle times.
|
|
|
|
readCommandQueue();
|
2023-09-14 10:38:43 +02:00
|
|
|
if (DEBUG_TM_QUEUE_SPEED) {
|
|
|
|
if (consecutiveCfdpCounter > 0) {
|
|
|
|
sif::debug << "Concecutive CFDP TM handled: " << consecutiveCfdpCounter << std::endl;
|
|
|
|
}
|
|
|
|
if (consecutiveRegularCounter > 0) {
|
|
|
|
sif::debug << "Concecutive regular TM handled: " << consecutiveRegularCounter
|
|
|
|
<< std::endl;
|
|
|
|
}
|
|
|
|
consecutiveRegularCounter = 0;
|
|
|
|
consecutiveCfdpCounter = 0;
|
|
|
|
}
|
2023-03-31 01:14:59 +02:00
|
|
|
// 40 ms IDLE delay. Might tweak this in the future.
|
|
|
|
TaskFactory::delayTask(40);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
MessageQueueId_t LiveTmTask::getCommandQueue() const { return requestQueue->getId(); }
|
|
|
|
|
|
|
|
void LiveTmTask::getMode(Mode_t* mode, Submode_t* submode) {
|
|
|
|
if (mode != nullptr) {
|
|
|
|
*mode = this->mode;
|
|
|
|
}
|
|
|
|
if (submode != nullptr) {
|
|
|
|
*submode = SUBMODE_NONE;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
ReturnValue_t LiveTmTask::checkModeCommand(Mode_t mode, Submode_t submode,
|
|
|
|
uint32_t* msToReachTheMode) {
|
|
|
|
if (mode == MODE_ON or mode == MODE_OFF) {
|
|
|
|
return returnvalue::OK;
|
|
|
|
}
|
|
|
|
return returnvalue::FAILED;
|
|
|
|
}
|
|
|
|
|
|
|
|
void LiveTmTask::startTransition(Mode_t mode, Submode_t submode) {
|
|
|
|
this->mode = mode;
|
|
|
|
modeHelper.modeChanged(mode, submode);
|
|
|
|
announceMode(false);
|
|
|
|
}
|
|
|
|
|
|
|
|
void LiveTmTask::announceMode(bool recursive) { triggerEvent(MODE_INFO, mode, SUBMODE_NONE); }
|
|
|
|
|
|
|
|
object_id_t LiveTmTask::getObjectId() const { return SystemObject::getObjectId(); }
|
|
|
|
|
|
|
|
const HasHealthIF* LiveTmTask::getOptHealthIF() const { return nullptr; }
|
|
|
|
|
|
|
|
const HasModesIF& LiveTmTask::getModeIF() const { return *this; }
|
|
|
|
|
|
|
|
ReturnValue_t LiveTmTask::connectModeTreeParent(HasModeTreeChildrenIF& parent) {
|
|
|
|
return modetree::connectModeTreeParent(parent, *this, nullptr, modeHelper);
|
|
|
|
}
|
|
|
|
|
|
|
|
void LiveTmTask::readCommandQueue(void) {
|
|
|
|
CommandMessage commandMessage;
|
|
|
|
ReturnValue_t result = returnvalue::FAILED;
|
|
|
|
|
|
|
|
result = requestQueue->receiveMessage(&commandMessage);
|
|
|
|
if (result == returnvalue::OK) {
|
|
|
|
result = modeHelper.handleModeCommand(&commandMessage);
|
|
|
|
if (result == returnvalue::OK) {
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
CommandMessage reply;
|
|
|
|
reply.setReplyRejected(CommandMessage::UNKNOWN_COMMAND, commandMessage.getCommand());
|
|
|
|
requestQueue->reply(&reply);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2023-10-13 13:21:28 +02:00
|
|
|
ReturnValue_t LiveTmTask::handleRegularTmQueue() {
|
|
|
|
return handleGenericTmQueue(*regularTmQueue, false);
|
|
|
|
}
|
2023-09-11 20:16:54 +02:00
|
|
|
|
2023-10-13 13:21:28 +02:00
|
|
|
ReturnValue_t LiveTmTask::handleCfdpTmQueue() { return handleGenericTmQueue(*cfdpTmQueue, true); }
|
2023-09-13 13:09:07 +02:00
|
|
|
|
2023-10-13 13:21:28 +02:00
|
|
|
ReturnValue_t LiveTmTask::handleGenericTmQueue(MessageQueueIF& queue, bool isCfdp) {
|
2023-09-13 13:09:07 +02:00
|
|
|
TmTcMessage message;
|
|
|
|
ReturnValue_t result = queue.receiveMessage(&message);
|
|
|
|
if (result == MessageQueueIF::EMPTY) {
|
|
|
|
return result;
|
|
|
|
}
|
2023-10-13 13:21:28 +02:00
|
|
|
if (signals::CFDP_MSG_COUNTER > 0) {
|
|
|
|
signals::CFDP_MSG_COUNTER--;
|
|
|
|
}
|
2023-09-13 13:09:07 +02:00
|
|
|
store_address_t storeId = message.getStorageId();
|
|
|
|
const uint8_t* data = nullptr;
|
|
|
|
size_t size = 0;
|
|
|
|
result = tmStore->getData(storeId, &data, &size);
|
|
|
|
if (result != returnvalue::OK) {
|
|
|
|
sif::warning << "VirtualChannel::performOperation: Failed to read data from TM store"
|
|
|
|
<< std::endl;
|
|
|
|
tmStore->deleteData(storeId);
|
|
|
|
return result;
|
|
|
|
}
|
|
|
|
|
2023-09-14 10:23:00 +02:00
|
|
|
if (!ptmeLocked) {
|
2023-10-13 10:04:03 +02:00
|
|
|
size_t partiallyWrittenSize = 0;
|
|
|
|
result = channel.write(data, size, partiallyWrittenSize);
|
|
|
|
if (result == DirectTmSinkIF::PARTIALLY_WRITTEN) {
|
|
|
|
// Already throttle CFDP.
|
2023-10-13 10:57:58 +02:00
|
|
|
throttleCfdp();
|
2023-10-13 10:04:03 +02:00
|
|
|
result = channel.handleLastWriteSynchronously(data, size, partiallyWrittenSize, 200);
|
|
|
|
if (result != returnvalue::OK) {
|
|
|
|
// TODO: Event? Might lead to dangerous spam though..
|
2023-10-13 13:21:28 +02:00
|
|
|
sif::warning << "LiveTmTask: Synchronous write of last segment failed with code 0x"
|
|
|
|
<< std::setw(4) << std::hex << result << std::dec << std::endl;
|
2023-10-13 10:04:03 +02:00
|
|
|
}
|
2023-10-13 13:21:28 +02:00
|
|
|
// minimumPeriodThrottleCd.resetTimer();
|
2023-10-13 10:04:03 +02:00
|
|
|
}
|
2023-09-13 13:09:07 +02:00
|
|
|
}
|
|
|
|
// Try delete in any case, ignore failures (which should not happen), it is more important to
|
|
|
|
// propagate write errors.
|
|
|
|
tmStore->deleteData(storeId);
|
|
|
|
return result;
|
|
|
|
}
|
2023-09-11 20:16:54 +02:00
|
|
|
|
2023-10-13 10:57:58 +02:00
|
|
|
void LiveTmTask::throttleCfdp() {
|
|
|
|
throttlePeriodOngoing = true;
|
2023-10-13 13:21:28 +02:00
|
|
|
// minimumPeriodThrottleCd.resetTimer();
|
2023-10-13 10:57:58 +02:00
|
|
|
signals::CFDP_CHANNEL_THROTTLE_SIGNAL = true;
|
|
|
|
}
|
|
|
|
|
|
|
|
void LiveTmTask::releaseCfdp() {
|
|
|
|
throttlePeriodOngoing = false;
|
|
|
|
signals::CFDP_CHANNEL_THROTTLE_SIGNAL = false;
|
|
|
|
}
|
|
|
|
|
2023-03-31 01:14:59 +02:00
|
|
|
ModeTreeChildIF& LiveTmTask::getModeTreeChildIF() { return *this; }
|
2023-03-31 12:11:31 +02:00
|
|
|
|
|
|
|
ReturnValue_t LiveTmTask::initialize() {
|
|
|
|
modeHelper.initialize();
|
2023-09-13 13:09:07 +02:00
|
|
|
tmStore = ObjectManager::instance()->get<StorageManagerIF>(objects::TM_STORE);
|
|
|
|
if (tmStore == nullptr) {
|
|
|
|
return ObjectManagerIF::CHILD_INIT_FAILED;
|
|
|
|
}
|
2023-03-31 12:11:31 +02:00
|
|
|
return returnvalue::OK;
|
|
|
|
}
|
2023-09-11 20:16:54 +02:00
|
|
|
|
|
|
|
MessageQueueId_t LiveTmTask::getNormalLiveQueueId() const { return regularTmQueue->getId(); }
|
|
|
|
|
|
|
|
MessageQueueId_t LiveTmTask::getCfdpLiveQueueId() const { return cfdpTmQueue->getId(); }
|