eive-obsw/mission/com/LiveTmTask.cpp

230 lines
7.7 KiB
C++
Raw Normal View History

2023-03-31 01:14:59 +02:00
#include "LiveTmTask.h"
#include <fsfw/ipc/QueueFactory.h>
#include <fsfw/subsystem/helper.h>
#include <fsfw/tasks/TaskFactory.h>
#include <fsfw/timemanager/Stopwatch.h>
2023-09-11 20:16:54 +02:00
#include "mission/sysDefs.h"
2023-09-14 10:38:43 +02:00
static constexpr bool DEBUG_TM_QUEUE_SPEED = false;
2023-09-11 20:16:54 +02:00
std::atomic_bool signals::CFDP_CHANNEL_THROTTLE_SIGNAL = false;
2023-10-13 13:21:28 +02:00
std::atomic_uint32_t signals::CFDP_MSG_COUNTER = 0;
2023-09-11 20:16:54 +02:00
2023-03-31 01:14:59 +02:00
LiveTmTask::LiveTmTask(object_id_t objectId, PusTmFunnel& pusFunnel, CfdpTmFunnel& cfdpFunnel,
2023-09-11 20:16:54 +02:00
VirtualChannel& channel, const std::atomic_bool& ptmeLocked,
uint32_t regularTmQueueDepth, uint32_t cfdpQueueDepth)
2023-03-31 01:14:59 +02:00
: SystemObject(objectId),
modeHelper(this),
pusFunnel(pusFunnel),
cfdpFunnel(cfdpFunnel),
channel(channel),
ptmeLocked(ptmeLocked) {
2023-03-31 01:14:59 +02:00
requestQueue = QueueFactory::instance()->createMessageQueue();
2023-09-11 20:16:54 +02:00
cfdpTmQueue = QueueFactory::instance()->createMessageQueue(cfdpQueueDepth);
regularTmQueue = QueueFactory::instance()->createMessageQueue(regularTmQueueDepth);
2023-03-31 01:14:59 +02:00
}
ReturnValue_t LiveTmTask::performOperation(uint8_t opCode) {
readCommandQueue();
2023-09-11 20:16:54 +02:00
bool handledTm;
ReturnValue_t result;
2023-09-14 10:38:43 +02:00
uint32_t consecutiveRegularCounter = 0;
uint32_t consecutiveCfdpCounter = 0;
bool isCfdp = false;
2023-03-31 01:14:59 +02:00
while (true) {
2023-09-14 10:38:43 +02:00
isCfdp = false;
2023-09-11 20:16:54 +02:00
// TODO: Must read CFDP TM queue and regular TM queue and forward them. Handle regular queue
// first.
handledTm = false;
2023-10-13 14:20:50 +02:00
updateBusyFlag();
if (!channelIsBusy) {
2023-09-11 20:16:54 +02:00
result = handleRegularTmQueue();
if (result == MessageQueueIF::EMPTY) {
result = handleCfdpTmQueue();
2023-09-14 10:38:43 +02:00
isCfdp = true;
2023-09-11 20:16:54 +02:00
}
if (result == returnvalue::OK) {
handledTm = true;
2023-09-14 10:38:43 +02:00
if (DEBUG_TM_QUEUE_SPEED) {
if (isCfdp) {
consecutiveCfdpCounter++;
} else {
consecutiveRegularCounter++;
}
}
2023-10-13 15:10:52 +02:00
} else if (result != MessageQueueIF::EMPTY) {
sif::warning << "LiveTmTask: TM queue failure, returncode 0x" << std::hex << std::setw(4)
<< result << std::dec << std::endl;
2023-09-11 20:16:54 +02:00
}
}
2023-10-13 14:20:50 +02:00
if (channelIsBusy and !throttlePeriodOngoing) {
2023-09-11 20:16:54 +02:00
// Throttle CFDP packet creator. It is by far the most relevant data creator, so throttling
2023-10-13 10:57:58 +02:00
// it is the easiest way to handle back pressure for now in a sensible way.
throttleCfdp();
2023-10-13 14:20:50 +02:00
} else if (!channelIsBusy and throttlePeriodOngoing) {
2023-10-13 13:21:28 +02:00
// Half full/empty flow control: Release the CFDP is the queue is empty enough.
if (signals::CFDP_MSG_COUNTER <= config::LIVE_CHANNEL_CFDP_QUEUE_SIZE / 2) {
2023-10-13 10:57:58 +02:00
releaseCfdp();
}
2023-03-31 01:14:59 +02:00
}
2023-09-11 20:16:54 +02:00
if (!handledTm) {
2023-03-31 01:14:59 +02:00
if (tmFunnelCd.hasTimedOut()) {
pusFunnel.performOperation(0);
cfdpFunnel.performOperation(0);
tmFunnelCd.resetTimer();
}
// Read command queue during idle times.
readCommandQueue();
2023-09-14 10:38:43 +02:00
if (DEBUG_TM_QUEUE_SPEED) {
if (consecutiveCfdpCounter > 0) {
2023-10-13 14:00:44 +02:00
sif::debug << "Consecutive CFDP TM handled: " << consecutiveCfdpCounter << std::endl;
2023-09-14 10:38:43 +02:00
}
if (consecutiveRegularCounter > 0) {
2023-10-13 14:00:44 +02:00
sif::debug << "Consecutive regular TM handled: " << consecutiveRegularCounter
2023-09-14 10:38:43 +02:00
<< std::endl;
}
consecutiveRegularCounter = 0;
consecutiveCfdpCounter = 0;
}
2023-03-31 01:14:59 +02:00
// 40 ms IDLE delay. Might tweak this in the future.
TaskFactory::delayTask(40);
}
}
}
MessageQueueId_t LiveTmTask::getCommandQueue() const { return requestQueue->getId(); }
void LiveTmTask::getMode(Mode_t* mode, Submode_t* submode) {
if (mode != nullptr) {
*mode = this->mode;
}
if (submode != nullptr) {
*submode = SUBMODE_NONE;
}
}
ReturnValue_t LiveTmTask::checkModeCommand(Mode_t mode, Submode_t submode,
uint32_t* msToReachTheMode) {
if (mode == MODE_ON or mode == MODE_OFF) {
return returnvalue::OK;
}
return returnvalue::FAILED;
}
void LiveTmTask::startTransition(Mode_t mode, Submode_t submode) {
this->mode = mode;
modeHelper.modeChanged(mode, submode);
announceMode(false);
}
void LiveTmTask::announceMode(bool recursive) { triggerEvent(MODE_INFO, mode, SUBMODE_NONE); }
object_id_t LiveTmTask::getObjectId() const { return SystemObject::getObjectId(); }
const HasHealthIF* LiveTmTask::getOptHealthIF() const { return nullptr; }
const HasModesIF& LiveTmTask::getModeIF() const { return *this; }
ReturnValue_t LiveTmTask::connectModeTreeParent(HasModeTreeChildrenIF& parent) {
return modetree::connectModeTreeParent(parent, *this, nullptr, modeHelper);
}
void LiveTmTask::readCommandQueue(void) {
CommandMessage commandMessage;
ReturnValue_t result = returnvalue::FAILED;
result = requestQueue->receiveMessage(&commandMessage);
if (result == returnvalue::OK) {
result = modeHelper.handleModeCommand(&commandMessage);
if (result == returnvalue::OK) {
return;
}
CommandMessage reply;
reply.setReplyRejected(CommandMessage::UNKNOWN_COMMAND, commandMessage.getCommand());
requestQueue->reply(&reply);
return;
}
}
2023-10-13 13:21:28 +02:00
ReturnValue_t LiveTmTask::handleRegularTmQueue() {
return handleGenericTmQueue(*regularTmQueue, false);
}
2023-09-11 20:16:54 +02:00
2023-10-13 13:21:28 +02:00
ReturnValue_t LiveTmTask::handleCfdpTmQueue() { return handleGenericTmQueue(*cfdpTmQueue, true); }
2023-09-13 13:09:07 +02:00
2023-10-13 13:21:28 +02:00
ReturnValue_t LiveTmTask::handleGenericTmQueue(MessageQueueIF& queue, bool isCfdp) {
2023-09-13 13:09:07 +02:00
TmTcMessage message;
ReturnValue_t result = queue.receiveMessage(&message);
if (result == MessageQueueIF::EMPTY) {
return result;
}
2023-10-13 13:21:28 +02:00
if (signals::CFDP_MSG_COUNTER > 0) {
signals::CFDP_MSG_COUNTER--;
}
2023-09-13 13:09:07 +02:00
store_address_t storeId = message.getStorageId();
const uint8_t* data = nullptr;
size_t size = 0;
result = tmStore->getData(storeId, &data, &size);
if (result != returnvalue::OK) {
sif::warning << "VirtualChannel::performOperation: Failed to read data from TM store"
<< std::endl;
tmStore->deleteData(storeId);
return result;
}
2023-09-14 10:23:00 +02:00
if (!ptmeLocked) {
2023-10-13 15:10:52 +02:00
size_t writtenSize = 0;
result = channel.write(data, size, writtenSize);
2023-10-13 10:04:03 +02:00
if (result == DirectTmSinkIF::PARTIALLY_WRITTEN) {
// Already throttle CFDP.
2023-10-13 10:57:58 +02:00
throttleCfdp();
2023-10-13 15:10:52 +02:00
result = channel.handleWriteCompletionSynchronously(writtenSize, 200);
2023-10-13 10:04:03 +02:00
if (result != returnvalue::OK) {
// TODO: Event? Might lead to dangerous spam though..
2023-10-13 13:21:28 +02:00
sif::warning << "LiveTmTask: Synchronous write of last segment failed with code 0x"
<< std::setw(4) << std::hex << result << std::dec << std::endl;
2023-10-13 10:04:03 +02:00
}
2023-10-13 14:20:50 +02:00
// This is a bit of a hack: If a partial write was performed and synchronously finished, we
// treat this like a busy channel.
channelIsBusy = true;
2023-10-13 10:04:03 +02:00
}
2023-09-13 13:09:07 +02:00
}
// Try delete in any case, ignore failures (which should not happen), it is more important to
// propagate write errors.
tmStore->deleteData(storeId);
return result;
}
2023-09-11 20:16:54 +02:00
2023-10-13 10:57:58 +02:00
void LiveTmTask::throttleCfdp() {
throttlePeriodOngoing = true;
signals::CFDP_CHANNEL_THROTTLE_SIGNAL = true;
2023-10-13 14:00:44 +02:00
sif::debug << "throttling CFDP" << std::endl;
2023-10-13 10:57:58 +02:00
}
void LiveTmTask::releaseCfdp() {
throttlePeriodOngoing = false;
signals::CFDP_CHANNEL_THROTTLE_SIGNAL = false;
2023-10-13 14:00:44 +02:00
sif::debug << "releasing CFDP" << std::endl;
2023-10-13 10:57:58 +02:00
}
2023-10-13 14:20:50 +02:00
void LiveTmTask::updateBusyFlag() {
// We cache this as a member, because the busy bit can toggle very quickly..
channelIsBusy = channel.isBusy();
}
2023-03-31 01:14:59 +02:00
ModeTreeChildIF& LiveTmTask::getModeTreeChildIF() { return *this; }
2023-03-31 12:11:31 +02:00
ReturnValue_t LiveTmTask::initialize() {
modeHelper.initialize();
2023-09-13 13:09:07 +02:00
tmStore = ObjectManager::instance()->get<StorageManagerIF>(objects::TM_STORE);
if (tmStore == nullptr) {
return ObjectManagerIF::CHILD_INIT_FAILED;
}
2023-03-31 12:11:31 +02:00
return returnvalue::OK;
}
2023-09-11 20:16:54 +02:00
MessageQueueId_t LiveTmTask::getNormalLiveQueueId() const { return regularTmQueue->getId(); }
MessageQueueId_t LiveTmTask::getCfdpLiveQueueId() const { return cfdpTmQueue->getId(); }