Merge branch 'development' of https://egit.irs.uni-stuttgart.de/eive/fsfw into development

This commit is contained in:
Jakob Meier 2021-03-26 12:25:20 +01:00
commit 42720e6f7d
72 changed files with 1938 additions and 1055 deletions

View File

@ -8,7 +8,9 @@
*/ */
template<typename T, size_t MAX_SIZE, typename count_t = uint8_t> template<typename T, size_t MAX_SIZE, typename count_t = uint8_t>
class FixedArrayList: public ArrayList<T, count_t> { class FixedArrayList: public ArrayList<T, count_t> {
static_assert(MAX_SIZE <= (pow(2,sizeof(count_t)*8)-1), "count_t is not large enough to hold MAX_SIZE"); #if !defined(_MSC_VER)
static_assert(MAX_SIZE <= (std::pow(2,sizeof(count_t)*8)-1), "count_t is not large enough to hold MAX_SIZE");
#endif
private: private:
T data[MAX_SIZE]; T data[MAX_SIZE];
public: public:

View File

@ -1,6 +1,6 @@
#include "SharedRingBuffer.h" #include "SharedRingBuffer.h"
#include "../ipc/MutexFactory.h" #include "../ipc/MutexFactory.h"
#include "../ipc/MutexHelper.h" #include "../ipc/MutexGuard.h"
SharedRingBuffer::SharedRingBuffer(object_id_t objectId, const size_t size, SharedRingBuffer::SharedRingBuffer(object_id_t objectId, const size_t size,
bool overwriteOld, size_t maxExcessBytes): bool overwriteOld, size_t maxExcessBytes):

View File

@ -66,7 +66,7 @@ public:
/** /**
* The mutex handle can be accessed directly, for example to perform * The mutex handle can be accessed directly, for example to perform
* the lock with the #MutexHelper for a RAII compliant lock operation. * the lock with the #MutexGuard for a RAII compliant lock operation.
* @return * @return
*/ */
MutexIF* getMutexHandle() const; MutexIF* getMutexHandle() const;

View File

@ -17,14 +17,6 @@ ReturnValue_t ExtendedControllerBase::executeAction(ActionId_t actionId,
return HasReturnvaluesIF::RETURN_OK; return HasReturnvaluesIF::RETURN_OK;
} }
ReturnValue_t ExtendedControllerBase::initializeLocalDataPool(
localpool::DataPool &localDataPoolMap, LocalDataPoolManager &poolManager) {
/* Needs to be overriden and implemented by child class. */
return HasReturnvaluesIF::RETURN_OK;
}
object_id_t ExtendedControllerBase::getObjectId() const { object_id_t ExtendedControllerBase::getObjectId() const {
return SystemObject::getObjectId(); return SystemObject::getObjectId();
} }
@ -107,14 +99,6 @@ MessageQueueId_t ExtendedControllerBase::getCommandQueue() const {
return commandQueue->getId(); return commandQueue->getId();
} }
LocalPoolDataSetBase* ExtendedControllerBase::getDataSetHandle(sid_t sid) {
#if FSFW_CPP_OSTREAM_ENABLED == 1
sif::warning << "ExtendedControllerBase::getDataSetHandle: No child "
<< " implementation provided, returning nullptr!" << std::endl;
#endif
return nullptr;
}
LocalDataPoolManager* ExtendedControllerBase::getHkManagerHandle() { LocalDataPoolManager* ExtendedControllerBase::getHkManagerHandle() {
return &poolManager; return &poolManager;
} }

View File

@ -61,11 +61,11 @@ protected:
/* HasLocalDatapoolIF overrides */ /* HasLocalDatapoolIF overrides */
virtual LocalDataPoolManager* getHkManagerHandle() override; virtual LocalDataPoolManager* getHkManagerHandle() override;
virtual object_id_t getObjectId() const override; virtual object_id_t getObjectId() const override;
virtual ReturnValue_t initializeLocalDataPool(
localpool::DataPool& localDataPoolMap,
LocalDataPoolManager& poolManager) override;
virtual uint32_t getPeriodicOperationFrequency() const override; virtual uint32_t getPeriodicOperationFrequency() const override;
virtual LocalPoolDataSetBase* getDataSetHandle(sid_t sid) override;
virtual ReturnValue_t initializeLocalDataPool(localpool::DataPool& localDataPoolMap,
LocalDataPoolManager& poolManager) override = 0;
virtual LocalPoolDataSetBase* getDataSetHandle(sid_t sid) override = 0;
}; };

View File

@ -8,13 +8,16 @@
PoolDataSetBase::PoolDataSetBase(PoolVariableIF** registeredVariablesArray, PoolDataSetBase::PoolDataSetBase(PoolVariableIF** registeredVariablesArray,
const size_t maxFillCount): const size_t maxFillCount):
registeredVariables(registeredVariablesArray), registeredVariables(registeredVariablesArray),
maxFillCount(maxFillCount) { maxFillCount(maxFillCount) {}
}
PoolDataSetBase::~PoolDataSetBase() {} PoolDataSetBase::~PoolDataSetBase() {}
ReturnValue_t PoolDataSetBase::registerVariable(PoolVariableIF *variable) { ReturnValue_t PoolDataSetBase::registerVariable(PoolVariableIF *variable) {
if(registeredVariables == nullptr) {
/* Underlying container invalid */
return HasReturnvaluesIF::RETURN_FAILED;
}
if (state != States::STATE_SET_UNINITIALISED) { if (state != States::STATE_SET_UNINITIALISED) {
#if FSFW_CPP_OSTREAM_ENABLED == 1 #if FSFW_CPP_OSTREAM_ENABLED == 1
sif::error << "DataSet::registerVariable: Call made in wrong position." << std::endl; sif::error << "DataSet::registerVariable: Call made in wrong position." << std::endl;

View File

@ -9,8 +9,8 @@
* and unlock a data pool and read/commit semantics. * and unlock a data pool and read/commit semantics.
*/ */
class PoolDataSetIF: class PoolDataSetIF:
public DataSetIF, virtual public DataSetIF,
public ReadCommitIF { virtual public ReadCommitIF {
public: public:
virtual~ PoolDataSetIF() {}; virtual~ PoolDataSetIF() {};

View File

@ -7,7 +7,7 @@
template <typename T> template <typename T>
PoolEntry<T>::PoolEntry(std::initializer_list<T> initValue, bool setValid ): PoolEntry<T>::PoolEntry(std::initializer_list<T> initValue, bool setValid ):
length(initValue.size()), valid(setValid) { length(static_cast<uint8_t>(initValue.size())), valid(setValid) {
this->address = new T[this->length]; this->address = new T[this->length];
if(initValue.size() == 0) { if(initValue.size() == 0) {
std::memset(this->address, 0, this->getByteSize()); std::memset(this->address, 0, this->getByteSize());

View File

@ -1,13 +1,15 @@
#ifndef FRAMEWORK_DATAPOOL_SHAREDDATASETIF_H_ #ifndef FRAMEWORK_DATAPOOL_SHAREDDATASETIF_H_
#define FRAMEWORK_DATAPOOL_SHAREDDATASETIF_H_ #define FRAMEWORK_DATAPOOL_SHAREDDATASETIF_H_
#include "PoolDataSetIF.h" #include "PoolDataSetIF.h"
class SharedDataSetIF: public PoolDataSetIF { class SharedDataSetIF {
public: public:
virtual ~SharedDataSetIF() {}; virtual ~SharedDataSetIF() {};
private: private:
virtual ReturnValue_t lockDataset(dur_millis_t mutexTimeout) = 0; virtual ReturnValue_t lockDataset(MutexIF::TimeoutType timeoutType,
dur_millis_t mutexTimeout) = 0;
virtual ReturnValue_t unlockDataset() = 0; virtual ReturnValue_t unlockDataset() = 0;
}; };

View File

@ -65,34 +65,45 @@ public:
* usually be the period the pool owner performs its periodic operation. * usually be the period the pool owner performs its periodic operation.
* @return * @return
*/ */
virtual uint32_t getPeriodicOperationFrequency() const = 0; virtual dur_millis_t getPeriodicOperationFrequency() const = 0;
/** /**
* @brief This function will be called by the manager if an update * @brief This function will be called by the manager if an update
* notification is received. * notification is received.
* @details HasLocalDataPoolIF * @details HasLocalDataPoolIF
* Can be overriden by the child class to handle changed datasets. * Can be overriden by the child class to handle changed datasets.
* @param sid * @param sid SID of the updated set
* @param storeId If a snapshot was requested, data will be located inside * @param storeId If a snapshot was requested, data will be located inside
* the IPC store with this store ID. * the IPC store with this store ID.
* @param clearMessage If this is set to true, the pool manager will take care of
* clearing the store automatically
*/ */
virtual void handleChangedDataset(sid_t sid, virtual void handleChangedDataset(sid_t sid,
store_address_t storeId = storeId::INVALID_STORE_ADDRESS) { store_address_t storeId = storeId::INVALID_STORE_ADDRESS,
return; bool* clearMessage = nullptr) {
if(clearMessage != nullptr) {
*clearMessage = true;
}
} }
/** /**
* @brief This function will be called by the manager if an update * @brief This function will be called by the manager if an update
* notification is received. * notification is received.
* @details * @details
* Can be overriden by the child class to handle changed pool IDs. * Can be overriden by the child class to handle changed pool variables.
* @param sid * @param gpid GPID of the updated variable.
* @param storeId If a snapshot was requested, data will be located inside * @param storeId If a snapshot was requested, data will be located inside
* the IPC store with this store ID. * the IPC store with this store ID.
* @param clearMessage Relevant for snapshots. If the boolean this points to is set to true,
* the pool manager will take care of clearing the store automatically
* after the callback.
*/ */
virtual void handleChangedPoolVariable(gp_id_t globPoolId, virtual void handleChangedPoolVariable(gp_id_t gpid,
store_address_t storeId = storeId::INVALID_STORE_ADDRESS) { store_address_t storeId = storeId::INVALID_STORE_ADDRESS,
return; bool* clearMessage = nullptr) {
if(clearMessage != nullptr) {
*clearMessage = true;
}
} }
/** /**

View File

@ -10,7 +10,7 @@
#include "../housekeeping/AcceptsHkPacketsIF.h" #include "../housekeeping/AcceptsHkPacketsIF.h"
#include "../timemanager/CCSDSTime.h" #include "../timemanager/CCSDSTime.h"
#include "../ipc/MutexFactory.h" #include "../ipc/MutexFactory.h"
#include "../ipc/MutexHelper.h" #include "../ipc/MutexGuard.h"
#include "../ipc/QueueFactory.h" #include "../ipc/QueueFactory.h"
#include <array> #include <array>
@ -38,7 +38,11 @@ LocalDataPoolManager::LocalDataPoolManager(HasLocalDataPoolIF* owner, MessageQue
hkQueue = queueToUse; hkQueue = queueToUse;
} }
LocalDataPoolManager::~LocalDataPoolManager() {} LocalDataPoolManager::~LocalDataPoolManager() {
if(mutex != nullptr) {
MutexFactory::instance()->deleteMutex(mutex);
}
}
ReturnValue_t LocalDataPoolManager::initialize(MessageQueueIF* queueToUse) { ReturnValue_t LocalDataPoolManager::initialize(MessageQueueIF* queueToUse) {
if(queueToUse == nullptr) { if(queueToUse == nullptr) {
@ -132,13 +136,16 @@ ReturnValue_t LocalDataPoolManager::performHkOperation() {
ReturnValue_t LocalDataPoolManager::handleHkUpdate(HkReceiver& receiver, ReturnValue_t LocalDataPoolManager::handleHkUpdate(HkReceiver& receiver,
ReturnValue_t& status) { ReturnValue_t& status) {
if(receiver.dataType == DataType::LOCAL_POOL_VARIABLE) { if(receiver.dataType == DataType::LOCAL_POOL_VARIABLE) {
// Update packets shall only be generated from datasets. /* Update packets shall only be generated from datasets. */
return HasReturnvaluesIF::RETURN_FAILED; return HasReturnvaluesIF::RETURN_FAILED;
} }
LocalPoolDataSetBase* dataSet = HasLocalDpIFManagerAttorney::getDataSetHandle(owner, LocalPoolDataSetBase* dataSet = HasLocalDpIFManagerAttorney::getDataSetHandle(owner,
receiver.dataId.sid); receiver.dataId.sid);
if(dataSet == nullptr) {
return DATASET_NOT_FOUND;
}
if(dataSet->hasChanged()) { if(dataSet->hasChanged()) {
// prepare and send update notification /* Prepare and send update notification */
ReturnValue_t result = generateHousekeepingPacket( ReturnValue_t result = generateHousekeepingPacket(
receiver.dataId.sid, dataSet, true); receiver.dataId.sid, dataSet, true);
if(result != HasReturnvaluesIF::RETURN_OK) { if(result != HasReturnvaluesIF::RETURN_OK) {
@ -328,7 +335,7 @@ void LocalDataPoolManager::handleChangeResetLogic(
toReset->setChanged(false); toReset->setChanged(false);
} }
/* All recipients have been notified, reset the changed flag */ /* All recipients have been notified, reset the changed flag */
if(changeInfo.currentUpdateCounter <= 1) { else if(changeInfo.currentUpdateCounter <= 1) {
toReset->setChanged(false); toReset->setChanged(false);
changeInfo.currentUpdateCounter = 0; changeInfo.currentUpdateCounter = 0;
} }
@ -372,7 +379,7 @@ ReturnValue_t LocalDataPoolManager::subscribeForPeriodicPacket(sid_t sid,
LocalPoolDataSetAttorney::setReportingEnabled(*dataSet, enableReporting); LocalPoolDataSetAttorney::setReportingEnabled(*dataSet, enableReporting);
LocalPoolDataSetAttorney::setDiagnostic(*dataSet, isDiagnostics); LocalPoolDataSetAttorney::setDiagnostic(*dataSet, isDiagnostics);
LocalPoolDataSetAttorney::initializePeriodicHelper(*dataSet, collectionInterval, LocalPoolDataSetAttorney::initializePeriodicHelper(*dataSet, collectionInterval,
owner->getPeriodicOperationFrequency(), isDiagnostics); owner->getPeriodicOperationFrequency());
} }
hkReceivers.push_back(hkReceiver); hkReceivers.push_back(hkReceiver);
@ -398,7 +405,6 @@ ReturnValue_t LocalDataPoolManager::subscribeForUpdatePacket(sid_t sid,
hkReceiver.destinationQueue = hkReceiverObject->getHkQueue(); hkReceiver.destinationQueue = hkReceiverObject->getHkQueue();
LocalPoolDataSetBase* dataSet = HasLocalDpIFManagerAttorney::getDataSetHandle(owner, sid); LocalPoolDataSetBase* dataSet = HasLocalDpIFManagerAttorney::getDataSetHandle(owner, sid);
//LocalPoolDataSetBase* dataSet = owner->getDataSetHandle(sid);
if(dataSet != nullptr) { if(dataSet != nullptr) {
LocalPoolDataSetAttorney::setReportingEnabled(*dataSet, true); LocalPoolDataSetAttorney::setReportingEnabled(*dataSet, true);
LocalPoolDataSetAttorney::setDiagnostic(*dataSet, isDiagnostics); LocalPoolDataSetAttorney::setDiagnostic(*dataSet, isDiagnostics);
@ -516,11 +522,19 @@ ReturnValue_t LocalDataPoolManager::handleHousekeepingMessage(
} }
case(HousekeepingMessage::REPORT_DIAGNOSTICS_REPORT_STRUCTURES): { case(HousekeepingMessage::REPORT_DIAGNOSTICS_REPORT_STRUCTURES): {
return generateSetStructurePacket(sid, true); result = generateSetStructurePacket(sid, true);
if(result == HasReturnvaluesIF::RETURN_OK) {
return result;
}
break;
} }
case(HousekeepingMessage::REPORT_HK_REPORT_STRUCTURES): { case(HousekeepingMessage::REPORT_HK_REPORT_STRUCTURES): {
return generateSetStructurePacket(sid, false); result = generateSetStructurePacket(sid, false);
if(result == HasReturnvaluesIF::RETURN_OK) {
return result;
}
break;
} }
case(HousekeepingMessage::MODIFY_DIAGNOSTICS_REPORT_COLLECTION_INTERVAL): case(HousekeepingMessage::MODIFY_DIAGNOSTICS_REPORT_COLLECTION_INTERVAL):
case(HousekeepingMessage::MODIFY_PARAMETER_REPORT_COLLECTION_INTERVAL): { case(HousekeepingMessage::MODIFY_PARAMETER_REPORT_COLLECTION_INTERVAL): {
@ -540,14 +554,15 @@ ReturnValue_t LocalDataPoolManager::handleHousekeepingMessage(
case(HousekeepingMessage::GENERATE_ONE_PARAMETER_REPORT): case(HousekeepingMessage::GENERATE_ONE_PARAMETER_REPORT):
case(HousekeepingMessage::GENERATE_ONE_DIAGNOSTICS_REPORT): { case(HousekeepingMessage::GENERATE_ONE_DIAGNOSTICS_REPORT): {
LocalPoolDataSetBase* dataSet =HasLocalDpIFManagerAttorney::getDataSetHandle(owner, sid); LocalPoolDataSetBase* dataSet =HasLocalDpIFManagerAttorney::getDataSetHandle(owner, sid);
//LocalPoolDataSetBase* dataSet = owner->getDataSetHandle(sid);
if(command == HousekeepingMessage::GENERATE_ONE_PARAMETER_REPORT if(command == HousekeepingMessage::GENERATE_ONE_PARAMETER_REPORT
and LocalPoolDataSetAttorney::isDiagnostics(*dataSet)) { and LocalPoolDataSetAttorney::isDiagnostics(*dataSet)) {
return WRONG_HK_PACKET_TYPE; result = WRONG_HK_PACKET_TYPE;
break;
} }
else if(command == HousekeepingMessage::GENERATE_ONE_DIAGNOSTICS_REPORT else if(command == HousekeepingMessage::GENERATE_ONE_DIAGNOSTICS_REPORT
and not LocalPoolDataSetAttorney::isDiagnostics(*dataSet)) { and not LocalPoolDataSetAttorney::isDiagnostics(*dataSet)) {
return WRONG_HK_PACKET_TYPE; result = WRONG_HK_PACKET_TYPE;
break;
} }
return generateHousekeepingPacket(HousekeepingMessage::getSid(message), return generateHousekeepingPacket(HousekeepingMessage::getSid(message),
dataSet, true); dataSet, true);
@ -566,14 +581,22 @@ ReturnValue_t LocalDataPoolManager::handleHousekeepingMessage(
case(HousekeepingMessage::UPDATE_SNAPSHOT_SET): { case(HousekeepingMessage::UPDATE_SNAPSHOT_SET): {
store_address_t storeId; store_address_t storeId;
HousekeepingMessage::getUpdateSnapshotSetCommand(message, &storeId); HousekeepingMessage::getUpdateSnapshotSetCommand(message, &storeId);
owner->handleChangedDataset(sid, storeId); bool clearMessage = true;
owner->handleChangedDataset(sid, storeId, &clearMessage);
if(clearMessage) {
message->clear();
}
return HasReturnvaluesIF::RETURN_OK; return HasReturnvaluesIF::RETURN_OK;
} }
case(HousekeepingMessage::UPDATE_SNAPSHOT_VARIABLE): { case(HousekeepingMessage::UPDATE_SNAPSHOT_VARIABLE): {
store_address_t storeId; store_address_t storeId;
gp_id_t globPoolId = HousekeepingMessage::getUpdateSnapshotVariableCommand(message, gp_id_t globPoolId = HousekeepingMessage::getUpdateSnapshotVariableCommand(message,
&storeId); &storeId);
owner->handleChangedPoolVariable(globPoolId, storeId); bool clearMessage = true;
owner->handleChangedPoolVariable(globPoolId, storeId, &clearMessage);
if(clearMessage) {
message->clear();
}
return HasReturnvaluesIF::RETURN_OK; return HasReturnvaluesIF::RETURN_OK;
} }
@ -616,7 +639,7 @@ ReturnValue_t LocalDataPoolManager::generateHousekeepingPacket(sid_t sid,
LocalPoolDataSetBase* dataSet, bool forDownlink, LocalPoolDataSetBase* dataSet, bool forDownlink,
MessageQueueId_t destination) { MessageQueueId_t destination) {
if(dataSet == nullptr) { if(dataSet == nullptr) {
// Configuration error. /* Configuration error. */
printWarningOrError(sif::OutputTypes::OUT_WARNING, printWarningOrError(sif::OutputTypes::OUT_WARNING,
"generateHousekeepingPacket", "generateHousekeepingPacket",
DATASET_NOT_FOUND); DATASET_NOT_FOUND);
@ -632,7 +655,7 @@ ReturnValue_t LocalDataPoolManager::generateHousekeepingPacket(sid_t sid,
return result; return result;
} }
// and now we set a HK message and send it the HK packet destination. /* Now we set a HK message and send it the HK packet destination. */
CommandMessage hkMessage; CommandMessage hkMessage;
if(LocalPoolDataSetAttorney::isDiagnostics(*dataSet)) { if(LocalPoolDataSetAttorney::isDiagnostics(*dataSet)) {
HousekeepingMessage::setHkDiagnosticsReply(&hkMessage, sid, storeId); HousekeepingMessage::setHkDiagnosticsReply(&hkMessage, sid, storeId);
@ -642,7 +665,7 @@ ReturnValue_t LocalDataPoolManager::generateHousekeepingPacket(sid_t sid,
} }
if(hkQueue == nullptr) { if(hkQueue == nullptr) {
// error, no queue available to send packet with. /* Error, no queue available to send packet with. */
printWarningOrError(sif::OutputTypes::OUT_WARNING, printWarningOrError(sif::OutputTypes::OUT_WARNING,
"generateHousekeepingPacket", "generateHousekeepingPacket",
QUEUE_OR_DESTINATION_INVALID); QUEUE_OR_DESTINATION_INVALID);
@ -650,7 +673,7 @@ ReturnValue_t LocalDataPoolManager::generateHousekeepingPacket(sid_t sid,
} }
if(destination == MessageQueueIF::NO_QUEUE) { if(destination == MessageQueueIF::NO_QUEUE) {
if(hkDestinationId == MessageQueueIF::NO_QUEUE) { if(hkDestinationId == MessageQueueIF::NO_QUEUE) {
// error, all destinations invalid /* Error, all destinations invalid */
printWarningOrError(sif::OutputTypes::OUT_WARNING, printWarningOrError(sif::OutputTypes::OUT_WARNING,
"generateHousekeepingPacket", "generateHousekeepingPacket",
QUEUE_OR_DESTINATION_INVALID); QUEUE_OR_DESTINATION_INVALID);
@ -729,6 +752,12 @@ void LocalDataPoolManager::performPeriodicHkGeneration(HkReceiver& receiver) {
ReturnValue_t LocalDataPoolManager::togglePeriodicGeneration(sid_t sid, ReturnValue_t LocalDataPoolManager::togglePeriodicGeneration(sid_t sid,
bool enable, bool isDiagnostics) { bool enable, bool isDiagnostics) {
LocalPoolDataSetBase* dataSet = HasLocalDpIFManagerAttorney::getDataSetHandle(owner, sid); LocalPoolDataSetBase* dataSet = HasLocalDpIFManagerAttorney::getDataSetHandle(owner, sid);
if(dataSet == nullptr) {
printWarningOrError(sif::OutputTypes::OUT_WARNING, "togglePeriodicGeneration",
DATASET_NOT_FOUND);
return DATASET_NOT_FOUND;
}
if((LocalPoolDataSetAttorney::isDiagnostics(*dataSet) and not isDiagnostics) or if((LocalPoolDataSetAttorney::isDiagnostics(*dataSet) and not isDiagnostics) or
(not LocalPoolDataSetAttorney::isDiagnostics(*dataSet) and isDiagnostics)) { (not LocalPoolDataSetAttorney::isDiagnostics(*dataSet) and isDiagnostics)) {
return WRONG_HK_PACKET_TYPE; return WRONG_HK_PACKET_TYPE;
@ -746,6 +775,12 @@ ReturnValue_t LocalDataPoolManager::togglePeriodicGeneration(sid_t sid,
ReturnValue_t LocalDataPoolManager::changeCollectionInterval(sid_t sid, ReturnValue_t LocalDataPoolManager::changeCollectionInterval(sid_t sid,
float newCollectionInterval, bool isDiagnostics) { float newCollectionInterval, bool isDiagnostics) {
LocalPoolDataSetBase* dataSet = HasLocalDpIFManagerAttorney::getDataSetHandle(owner, sid); LocalPoolDataSetBase* dataSet = HasLocalDpIFManagerAttorney::getDataSetHandle(owner, sid);
if(dataSet == nullptr) {
printWarningOrError(sif::OutputTypes::OUT_WARNING, "changeCollectionInterval",
DATASET_NOT_FOUND);
return DATASET_NOT_FOUND;
}
bool targetIsDiagnostics = LocalPoolDataSetAttorney::isDiagnostics(*dataSet); bool targetIsDiagnostics = LocalPoolDataSetAttorney::isDiagnostics(*dataSet);
if((targetIsDiagnostics and not isDiagnostics) or if((targetIsDiagnostics and not isDiagnostics) or
(not targetIsDiagnostics and isDiagnostics)) { (not targetIsDiagnostics and isDiagnostics)) {
@ -756,7 +791,7 @@ ReturnValue_t LocalDataPoolManager::changeCollectionInterval(sid_t sid,
LocalPoolDataSetAttorney::getPeriodicHelper(*dataSet); LocalPoolDataSetAttorney::getPeriodicHelper(*dataSet);
if(periodicHelper == nullptr) { if(periodicHelper == nullptr) {
// config error /* Configuration error, set might not have a corresponding pool manager */
return PERIODIC_HELPER_INVALID; return PERIODIC_HELPER_INVALID;
} }
@ -766,13 +801,11 @@ ReturnValue_t LocalDataPoolManager::changeCollectionInterval(sid_t sid,
ReturnValue_t LocalDataPoolManager::generateSetStructurePacket(sid_t sid, ReturnValue_t LocalDataPoolManager::generateSetStructurePacket(sid_t sid,
bool isDiagnostics) { bool isDiagnostics) {
// Get and check dataset first. /* Get and check dataset first. */
//LocalPoolDataSetBase* dataSet = owner->getDataSetHandle(sid);
LocalPoolDataSetBase* dataSet = HasLocalDpIFManagerAttorney::getDataSetHandle(owner, sid); LocalPoolDataSetBase* dataSet = HasLocalDpIFManagerAttorney::getDataSetHandle(owner, sid);
if(dataSet == nullptr) { if(dataSet == nullptr) {
printWarningOrError(sif::OutputTypes::OUT_WARNING, printWarningOrError(sif::OutputTypes::OUT_WARNING,
"performPeriodicHkGeneration", "performPeriodicHkGeneration", DATASET_NOT_FOUND);
DATASET_NOT_FOUND);
return DATASET_NOT_FOUND; return DATASET_NOT_FOUND;
} }
@ -831,6 +864,10 @@ ReturnValue_t LocalDataPoolManager::generateSetStructurePacket(sid_t sid,
void LocalDataPoolManager::clearReceiversList() { void LocalDataPoolManager::clearReceiversList() {
/* Clear the vector completely and releases allocated memory. */ /* Clear the vector completely and releases allocated memory. */
HkReceivers().swap(hkReceivers); HkReceivers().swap(hkReceivers);
/* Also clear the reset helper if it exists */
if(hkUpdateResetList != nullptr) {
HkUpdateResetList().swap(*hkUpdateResetList);
}
} }
MutexIF* LocalDataPoolManager::getLocalPoolMutex() { MutexIF* LocalDataPoolManager::getLocalPoolMutex() {
@ -843,6 +880,7 @@ object_id_t LocalDataPoolManager::getCreatorObjectId() const {
void LocalDataPoolManager::printWarningOrError(sif::OutputTypes outputType, void LocalDataPoolManager::printWarningOrError(sif::OutputTypes outputType,
const char* functionName, ReturnValue_t error, const char* errorPrint) { const char* functionName, ReturnValue_t error, const char* errorPrint) {
#if FSFW_VERBOSE_LEVEL >= 1
if(errorPrint == nullptr) { if(errorPrint == nullptr) {
if(error == DATASET_NOT_FOUND) { if(error == DATASET_NOT_FOUND) {
errorPrint = "Dataset not found"; errorPrint = "Dataset not found";
@ -873,7 +911,6 @@ void LocalDataPoolManager::printWarningOrError(sif::OutputTypes outputType,
} }
if(outputType == sif::OutputTypes::OUT_WARNING) { if(outputType == sif::OutputTypes::OUT_WARNING) {
#if FSFW_VERBOSE_LEVEL >= 1
#if FSFW_CPP_OSTREAM_ENABLED == 1 #if FSFW_CPP_OSTREAM_ENABLED == 1
sif::warning << "LocalDataPoolManager::" << functionName sif::warning << "LocalDataPoolManager::" << functionName
<< ": Object ID 0x" << std::setw(8) << std::setfill('0') << ": Object ID 0x" << std::setw(8) << std::setfill('0')
@ -883,10 +920,8 @@ void LocalDataPoolManager::printWarningOrError(sif::OutputTypes outputType,
sif::printWarning("LocalDataPoolManager::%s: Object ID 0x%08x | %s\n", sif::printWarning("LocalDataPoolManager::%s: Object ID 0x%08x | %s\n",
functionName, owner->getObjectId(), errorPrint); functionName, owner->getObjectId(), errorPrint);
#endif /* FSFW_CPP_OSTREAM_ENABLED == 1 */ #endif /* FSFW_CPP_OSTREAM_ENABLED == 1 */
#endif /* FSFW_VERBOSE_LEVEL >= 1 */
} }
else if(outputType == sif::OutputTypes::OUT_ERROR) { else if(outputType == sif::OutputTypes::OUT_ERROR) {
#if FSFW_VERBOSE_LEVEL >= 1
#if FSFW_CPP_OSTREAM_ENABLED == 1 #if FSFW_CPP_OSTREAM_ENABLED == 1
sif::error << "LocalDataPoolManager::" << functionName sif::error << "LocalDataPoolManager::" << functionName
<< ": Object ID 0x" << std::setw(8) << std::setfill('0') << ": Object ID 0x" << std::setw(8) << std::setfill('0')
@ -896,8 +931,8 @@ void LocalDataPoolManager::printWarningOrError(sif::OutputTypes outputType,
sif::printError("LocalDataPoolManager::%s: Object ID 0x%08x | %s\n", sif::printError("LocalDataPoolManager::%s: Object ID 0x%08x | %s\n",
functionName, owner->getObjectId(), errorPrint); functionName, owner->getObjectId(), errorPrint);
#endif /* FSFW_CPP_OSTREAM_ENABLED == 1 */ #endif /* FSFW_CPP_OSTREAM_ENABLED == 1 */
#endif /* FSFW_VERBOSE_LEVEL >= 1 */
} }
#endif /* #if FSFW_VERBOSE_LEVEL >= 1 */
} }
LocalDataPoolManager* LocalDataPoolManager::getPoolManagerHandle() { LocalDataPoolManager* LocalDataPoolManager::getPoolManagerHandle() {

View File

@ -14,7 +14,7 @@
#include "../ipc/MutexIF.h" #include "../ipc/MutexIF.h"
#include "../ipc/CommandMessage.h" #include "../ipc/CommandMessage.h"
#include "../ipc/MessageQueueIF.h" #include "../ipc/MessageQueueIF.h"
#include "../ipc/MutexHelper.h" #include "../ipc/MutexGuard.h"
#include <map> #include <map>
#include <vector> #include <vector>
@ -391,6 +391,10 @@ protected:
template<class T> inline template<class T> inline
ReturnValue_t LocalDataPoolManager::fetchPoolEntry(lp_id_t localPoolId, PoolEntry<T> **poolEntry) { ReturnValue_t LocalDataPoolManager::fetchPoolEntry(lp_id_t localPoolId, PoolEntry<T> **poolEntry) {
if(poolEntry == nullptr) {
return HasReturnvaluesIF::RETURN_FAILED;
}
auto poolIter = localPoolMap.find(localPoolId); auto poolIter = localPoolMap.find(localPoolId);
if (poolIter == localPoolMap.end()) { if (poolIter == localPoolMap.end()) {
printWarningOrError(sif::OutputTypes::OUT_WARNING, "fetchPoolEntry", printWarningOrError(sif::OutputTypes::OUT_WARNING, "fetchPoolEntry",

View File

@ -44,7 +44,7 @@ LocalPoolDataSetBase::LocalPoolDataSetBase(HasLocalDataPoolIF *hkOwner,
LocalPoolDataSetBase::LocalPoolDataSetBase(sid_t sid, PoolVariableIF** registeredVariablesArray, LocalPoolDataSetBase::LocalPoolDataSetBase(sid_t sid, PoolVariableIF** registeredVariablesArray,
const size_t maxNumberOfVariables): const size_t maxNumberOfVariables):
PoolDataSetBase(registeredVariablesArray, maxNumberOfVariables) { PoolDataSetBase(registeredVariablesArray, maxNumberOfVariables) {
HasLocalDataPoolIF* hkOwner = objectManager->get<HasLocalDataPoolIF>( HasLocalDataPoolIF* hkOwner = objectManager->get<HasLocalDataPoolIF>(
sid.objectId); sid.objectId);
if(hkOwner != nullptr) { if(hkOwner != nullptr) {
@ -58,8 +58,7 @@ LocalPoolDataSetBase::LocalPoolDataSetBase(sid_t sid, PoolVariableIF** registere
this->sid = sid; this->sid = sid;
} }
LocalPoolDataSetBase::LocalPoolDataSetBase( LocalPoolDataSetBase::LocalPoolDataSetBase(PoolVariableIF **registeredVariablesArray,
PoolVariableIF **registeredVariablesArray,
const size_t maxNumberOfVariables, bool protectEveryReadCommitCall): const size_t maxNumberOfVariables, bool protectEveryReadCommitCall):
PoolDataSetBase(registeredVariablesArray, maxNumberOfVariables) { PoolDataSetBase(registeredVariablesArray, maxNumberOfVariables) {
this->setReadCommitProtectionBehaviour(protectEveryReadCommitCall); this->setReadCommitProtectionBehaviour(protectEveryReadCommitCall);
@ -95,14 +94,23 @@ ReturnValue_t LocalPoolDataSetBase::serializeWithValidityBuffer(uint8_t **buffer
size_t *size, size_t maxSize, size_t *size, size_t maxSize,
SerializeIF::Endianness streamEndianness) const { SerializeIF::Endianness streamEndianness) const {
ReturnValue_t result = HasReturnvaluesIF::RETURN_OK; ReturnValue_t result = HasReturnvaluesIF::RETURN_OK;
uint8_t validityMaskSize = std::ceil(static_cast<float>(fillCount)/8.0); const uint8_t validityMaskSize = std::ceil(static_cast<float>(fillCount)/8.0);
uint8_t validityMask[validityMaskSize] = {}; uint8_t* validityPtr = nullptr;
#ifdef _MSC_VER
/* Use a std::vector here because MSVC will (rightly) not create a fixed size array
with a non constant size specifier */
std::vector<uint8_t> validityMask(validityMaskSize);
validityPtr = validityMask.data();
#else
uint8_t validityMask[validityMaskSize] = {0};
validityPtr = validityMask;
#endif
uint8_t validBufferIndex = 0; uint8_t validBufferIndex = 0;
uint8_t validBufferIndexBit = 0; uint8_t validBufferIndexBit = 0;
for (uint16_t count = 0; count < fillCount; count++) { for (uint16_t count = 0; count < fillCount; count++) {
if(registeredVariables[count]->isValid()) { if(registeredVariables[count]->isValid()) {
/* Set bit at correct position */ /* Set bit at correct position */
bitutil::bitSet(validityMask + validBufferIndex, validBufferIndexBit); bitutil::bitSet(validityPtr + validBufferIndex, validBufferIndexBit);
} }
if(validBufferIndexBit == 7) { if(validBufferIndexBit == 7) {
validBufferIndex ++; validBufferIndex ++;
@ -123,7 +131,7 @@ ReturnValue_t LocalPoolDataSetBase::serializeWithValidityBuffer(uint8_t **buffer
return SerializeIF::BUFFER_TOO_SHORT; return SerializeIF::BUFFER_TOO_SHORT;
} }
// copy validity buffer to end // copy validity buffer to end
std::memcpy(*buffer, validityMask, validityMaskSize); std::memcpy(*buffer, validityPtr, validityMaskSize);
*size += validityMaskSize; *size += validityMaskSize;
return result; return result;
} }
@ -262,11 +270,9 @@ bool LocalPoolDataSetBase::getReportingEnabled() const {
return reportingEnabled; return reportingEnabled;
} }
void LocalPoolDataSetBase::initializePeriodicHelper( void LocalPoolDataSetBase::initializePeriodicHelper(float collectionInterval,
float collectionInterval, dur_millis_t minimumPeriodicInterval, dur_millis_t minimumPeriodicInterval, uint8_t nonDiagIntervalFactor) {
bool isDiagnostics, uint8_t nonDiagIntervalFactor) { periodicHelper->initialize(collectionInterval, minimumPeriodicInterval, nonDiagIntervalFactor);
periodicHelper->initialize(collectionInterval, minimumPeriodicInterval,
isDiagnostics, nonDiagIntervalFactor);
} }
void LocalPoolDataSetBase::setChanged(bool changed) { void LocalPoolDataSetBase::setChanged(bool changed) {
@ -306,3 +312,12 @@ void LocalPoolDataSetBase::setAllVariablesReadOnly() {
registeredVariables[idx]->setReadWriteMode(pool_rwm_t::VAR_READ); registeredVariables[idx]->setReadWriteMode(pool_rwm_t::VAR_READ);
} }
} }
float LocalPoolDataSetBase::getCollectionInterval() const {
if(periodicHelper != nullptr) {
return periodicHelper->getCollectionIntervalInSeconds();
}
else {
return 0.0;
}
}

View File

@ -166,6 +166,16 @@ public:
object_id_t getCreatorObjectId(); object_id_t getCreatorObjectId();
bool getReportingEnabled() const;
/**
* Returns the current periodic HK generation interval this set
* belongs to a HK manager and the interval is not 0. Otherwise,
* returns 0.0
* @return
*/
float getCollectionInterval() const;
protected: protected:
sid_t sid; sid_t sid;
//! This mutex is used if the data is created by one object only. //! This mutex is used if the data is created by one object only.
@ -180,11 +190,9 @@ protected:
*/ */
bool reportingEnabled = false; bool reportingEnabled = false;
void setReportingEnabled(bool enabled); void setReportingEnabled(bool enabled);
bool getReportingEnabled() const;
void initializePeriodicHelper(float collectionInterval, void initializePeriodicHelper(float collectionInterval, dur_millis_t minimumPeriodicInterval,
dur_millis_t minimumPeriodicInterval, uint8_t nonDiagIntervalFactor = 5);
bool isDiagnostics, uint8_t nonDiagIntervalFactor = 5);
/** /**
* If the valid state of a dataset is always relevant to the whole * If the valid state of a dataset is always relevant to the whole

View File

@ -25,7 +25,7 @@ inline LocalPoolVector<T, vectorSize>::LocalPoolVector(gp_id_t globalPoolId,
template<typename T, uint16_t vectorSize> template<typename T, uint16_t vectorSize>
inline ReturnValue_t LocalPoolVector<T, vectorSize>::read( inline ReturnValue_t LocalPoolVector<T, vectorSize>::read(
MutexIF::TimeoutType timeoutType, uint32_t timeoutMs) { MutexIF::TimeoutType timeoutType, uint32_t timeoutMs) {
MutexHelper(LocalDpManagerAttorney::getMutexHandle(*hkManager), timeoutType, timeoutMs); MutexGuard(LocalDpManagerAttorney::getMutexHandle(*hkManager), timeoutType, timeoutMs);
return readWithoutLock(); return readWithoutLock();
} }
template<typename T, uint16_t vectorSize> template<typename T, uint16_t vectorSize>
@ -64,7 +64,7 @@ inline ReturnValue_t LocalPoolVector<T, vectorSize>::commit(bool valid,
template<typename T, uint16_t vectorSize> template<typename T, uint16_t vectorSize>
inline ReturnValue_t LocalPoolVector<T, vectorSize>::commit( inline ReturnValue_t LocalPoolVector<T, vectorSize>::commit(
MutexIF::TimeoutType timeoutType, uint32_t timeoutMs) { MutexIF::TimeoutType timeoutType, uint32_t timeoutMs) {
MutexHelper(LocalDpManagerAttorney::getMutexHandle(*hkManager), timeoutType, timeoutMs); MutexGuard(LocalDpManagerAttorney::getMutexHandle(*hkManager), timeoutType, timeoutMs);
return commitWithoutLock(); return commitWithoutLock();
} }

View File

@ -1,16 +1,37 @@
#include "SharedLocalDataSet.h" #include "SharedLocalDataSet.h"
SharedLocalDataSet::SharedLocalDataSet(object_id_t objectId, sid_t sid, SharedLocalDataSet::SharedLocalDataSet(object_id_t objectId, sid_t sid,
const size_t maxSize): SystemObject(objectId), const size_t maxSize): SystemObject(objectId),
LocalPoolDataSetBase(sid, nullptr, maxSize) { LocalPoolDataSetBase(sid, nullptr, maxSize), poolVarVector(maxSize) {
this->setContainer(poolVarVector.data()); this->setContainer(poolVarVector.data());
datasetLock = MutexFactory::instance()->createMutex(); datasetLock = MutexFactory::instance()->createMutex();
} }
ReturnValue_t SharedLocalDataSet::lockDataset(dur_millis_t mutexTimeout) { SharedLocalDataSet::SharedLocalDataSet(object_id_t objectId,
return datasetLock->lockMutex(MutexIF::TimeoutType::WAITING, mutexTimeout); HasLocalDataPoolIF *owner, uint32_t setId,
const size_t maxSize): SystemObject(objectId),
LocalPoolDataSetBase(owner, setId, nullptr, maxSize), poolVarVector(maxSize) {
this->setContainer(poolVarVector.data());
datasetLock = MutexFactory::instance()->createMutex();
}
ReturnValue_t SharedLocalDataSet::lockDataset(MutexIF::TimeoutType timeoutType,
dur_millis_t mutexTimeout) {
if(datasetLock != nullptr) {
return datasetLock->lockMutex(timeoutType, mutexTimeout);
}
return HasReturnvaluesIF::RETURN_FAILED;
}
SharedLocalDataSet::~SharedLocalDataSet() {
MutexFactory::instance()->deleteMutex(datasetLock);
} }
ReturnValue_t SharedLocalDataSet::unlockDataset() { ReturnValue_t SharedLocalDataSet::unlockDataset() {
return datasetLock->unlockMutex(); if(datasetLock != nullptr) {
return datasetLock->unlockMutex();
}
return HasReturnvaluesIF::RETURN_FAILED;
} }

View File

@ -11,16 +11,22 @@
* multiple threads. It provides a lock in addition to all other functionalities provided * multiple threads. It provides a lock in addition to all other functionalities provided
* by the LocalPoolDataSetBase class. * by the LocalPoolDataSetBase class.
* *
* TODO: override and protect read, commit and some other calls used by pool manager. * The user is completely responsible for lockingand unlocking the dataset when using the
* shared dataset.
*/ */
class SharedLocalDataSet: class SharedLocalDataSet:
public SystemObject, public SystemObject,
public LocalPoolDataSetBase, public LocalPoolDataSetBase,
public SharedDataSetIF { public SharedDataSetIF {
public: public:
SharedLocalDataSet(object_id_t objectId, sid_t sid, SharedLocalDataSet(object_id_t objectId, HasLocalDataPoolIF* owner, uint32_t setId,
const size_t maxSize); const size_t maxSize);
ReturnValue_t lockDataset(dur_millis_t mutexTimeout) override; SharedLocalDataSet(object_id_t objectId, sid_t sid, const size_t maxSize);
virtual~ SharedLocalDataSet();
ReturnValue_t lockDataset(MutexIF::TimeoutType timeoutType = MutexIF::TimeoutType::WAITING,
dur_millis_t mutexTimeout = 20) override;
ReturnValue_t unlockDataset() override; ReturnValue_t unlockDataset() override;
private: private:

View File

@ -0,0 +1,12 @@
#ifndef FSFW_DATAPOOLLOCAL_DATAPOOLLOCAL_H_
#define FSFW_DATAPOOLLOCAL_DATAPOOLLOCAL_H_
/* Collected related headers */
#include "LocalPoolVariable.h"
#include "LocalPoolVector.h"
#include "StaticLocalDataSet.h"
#include "LocalDataSet.h"
#include "SharedLocalDataSet.h"
#endif /* FSFW_DATAPOOLLOCAL_DATAPOOLLOCAL_H_ */

View File

@ -14,9 +14,8 @@ private:
} }
static void initializePeriodicHelper(LocalPoolDataSetBase& set, float collectionInterval, static void initializePeriodicHelper(LocalPoolDataSetBase& set, float collectionInterval,
uint32_t minimumPeriodicIntervalMs, uint32_t minimumPeriodicIntervalMs, uint8_t nonDiagIntervalFactor = 5) {
bool isDiagnostics, uint8_t nonDiagIntervalFactor = 5) { set.initializePeriodicHelper(collectionInterval, minimumPeriodicIntervalMs,
set.initializePeriodicHelper(collectionInterval, minimumPeriodicIntervalMs, isDiagnostics,
nonDiagIntervalFactor); nonDiagIntervalFactor);
} }

View File

@ -96,11 +96,11 @@ union gp_id_t {
return raw == INVALID_GPID; return raw == INVALID_GPID;
} }
bool operator==(const sid_t& other) const { bool operator==(const gp_id_t& other) const {
return raw == other.raw; return raw == other.raw;
} }
bool operator!=(const sid_t& other) const { bool operator!=(const gp_id_t& other) const {
return not (raw == other.raw); return not (raw == other.raw);
} }
}; };

View File

@ -308,6 +308,14 @@ void DeviceHandlerBase::doStateMachine() {
uint32_t currentUptime; uint32_t currentUptime;
Clock::getUptime(&currentUptime); Clock::getUptime(&currentUptime);
if (currentUptime - timeoutStart >= childTransitionDelay) { if (currentUptime - timeoutStart >= childTransitionDelay) {
#if FSFW_VERBOSE_LEVEL >= 1
char printout[60];
sprintf(printout, "Transition timeout (%lu) occured !",
static_cast<unsigned long>(childTransitionDelay));
/* Very common configuration error, so print it */
printWarningOrError(sif::OutputTypes::OUT_WARNING, "doStateMachine",
RETURN_FAILED, printout);
#endif
triggerEvent(MODE_TRANSITION_FAILED, childTransitionFailure, 0); triggerEvent(MODE_TRANSITION_FAILED, childTransitionFailure, 0);
setMode(transitionSourceMode, transitionSourceSubMode); setMode(transitionSourceMode, transitionSourceSubMode);
break; break;

View File

@ -119,7 +119,7 @@ public:
DeviceHandlerIF::DEFAULT_THERMAL_STATE_POOL_ID, DeviceHandlerIF::DEFAULT_THERMAL_STATE_POOL_ID,
lp_id_t thermalRequestPoolId = lp_id_t thermalRequestPoolId =
DeviceHandlerIF::DEFAULT_THERMAL_HEATING_REQUEST_POOL_ID, DeviceHandlerIF::DEFAULT_THERMAL_HEATING_REQUEST_POOL_ID,
uint32_t thermalSetId = DeviceHandlerIF::DEFAULT_THERMAL_SET_ID); uint32_t thermalSetId = DeviceHandlerIF::DEFAULT_THERMAL_SET_ID);
/** /**
* @brief Helper function to ease device handler development. * @brief Helper function to ease device handler development.
* This will instruct the transition to MODE_ON immediately * This will instruct the transition to MODE_ON immediately

View File

@ -76,7 +76,9 @@ void arrayprinter::printHex(const uint8_t *data, size_t size,
} }
} }
} }
#if FSFW_DISABLE_PRINTOUT == 0
printf("[%s]\n", printBuffer); printf("[%s]\n", printBuffer);
#endif /* FSFW_DISABLE_PRINTOUT == 0 */
#endif #endif
} }
@ -117,7 +119,9 @@ void arrayprinter::printDec(const uint8_t *data, size_t size,
} }
} }
} }
#if FSFW_DISABLE_PRINTOUT == 0
printf("[%s]\n", printBuffer); printf("[%s]\n", printBuffer);
#endif /* FSFW_DISABLE_PRINTOUT == 0 */
#endif #endif
} }

View File

@ -1,5 +1,5 @@
#include "HealthTable.h" #include "HealthTable.h"
#include "../ipc/MutexHelper.h" #include "../ipc/MutexGuard.h"
#include "../ipc/MutexFactory.h" #include "../ipc/MutexFactory.h"
#include "../serialize/SerializeAdapter.h" #include "../serialize/SerializeAdapter.h"
@ -31,7 +31,7 @@ ReturnValue_t HealthTable::registerObject(object_id_t object,
void HealthTable::setHealth(object_id_t object, void HealthTable::setHealth(object_id_t object,
HasHealthIF::HealthState newState) { HasHealthIF::HealthState newState) {
MutexHelper(mutex, timeoutType, mutexTimeoutMs); MutexGuard(mutex, timeoutType, mutexTimeoutMs);
HealthMap::iterator iter = healthMap.find(object); HealthMap::iterator iter = healthMap.find(object);
if (iter != healthMap.end()) { if (iter != healthMap.end()) {
iter->second = newState; iter->second = newState;
@ -40,7 +40,7 @@ void HealthTable::setHealth(object_id_t object,
HasHealthIF::HealthState HealthTable::getHealth(object_id_t object) { HasHealthIF::HealthState HealthTable::getHealth(object_id_t object) {
HasHealthIF::HealthState state = HasHealthIF::HEALTHY; HasHealthIF::HealthState state = HasHealthIF::HEALTHY;
MutexHelper(mutex, timeoutType, mutexTimeoutMs); MutexGuard(mutex, timeoutType, mutexTimeoutMs);
HealthMap::iterator iter = healthMap.find(object); HealthMap::iterator iter = healthMap.find(object);
if (iter != healthMap.end()) { if (iter != healthMap.end()) {
state = iter->second; state = iter->second;
@ -49,7 +49,7 @@ HasHealthIF::HealthState HealthTable::getHealth(object_id_t object) {
} }
bool HealthTable::hasHealth(object_id_t object) { bool HealthTable::hasHealth(object_id_t object) {
MutexHelper(mutex, timeoutType, mutexTimeoutMs); MutexGuard(mutex, timeoutType, mutexTimeoutMs);
HealthMap::iterator iter = healthMap.find(object); HealthMap::iterator iter = healthMap.find(object);
if (iter != healthMap.end()) { if (iter != healthMap.end()) {
return true; return true;
@ -58,14 +58,14 @@ bool HealthTable::hasHealth(object_id_t object) {
} }
size_t HealthTable::getPrintSize() { size_t HealthTable::getPrintSize() {
MutexHelper(mutex, timeoutType, mutexTimeoutMs); MutexGuard(mutex, timeoutType, mutexTimeoutMs);
uint32_t size = healthMap.size() * sizeof(object_id_t) + uint32_t size = healthMap.size() * sizeof(object_id_t) +
sizeof(HasHealthIF::HealthState) + sizeof(uint16_t); sizeof(HasHealthIF::HealthState) + sizeof(uint16_t);
return size; return size;
} }
void HealthTable::printAll(uint8_t* pointer, size_t maxSize) { void HealthTable::printAll(uint8_t* pointer, size_t maxSize) {
MutexHelper(mutex, timeoutType, mutexTimeoutMs); MutexGuard(mutex, timeoutType, mutexTimeoutMs);
size_t size = 0; size_t size = 0;
uint16_t count = healthMap.size(); uint16_t count = healthMap.size();
SerializeAdapter::serialize(&count, SerializeAdapter::serialize(&count,
@ -81,7 +81,7 @@ void HealthTable::printAll(uint8_t* pointer, size_t maxSize) {
ReturnValue_t HealthTable::iterate(HealthEntry *value, bool reset) { ReturnValue_t HealthTable::iterate(HealthEntry *value, bool reset) {
ReturnValue_t result = HasReturnvaluesIF::RETURN_OK; ReturnValue_t result = HasReturnvaluesIF::RETURN_OK;
MutexHelper(mutex, timeoutType, mutexTimeoutMs); MutexGuard(mutex, timeoutType, mutexTimeoutMs);
if (reset) { if (reset) {
mapIterator = healthMap.begin(); mapIterator = healthMap.begin();
} }

View File

@ -84,15 +84,21 @@ void HousekeepingMessage::setCollectionIntervalModificationCommand(
else { else {
command->setCommand(MODIFY_PARAMETER_REPORT_COLLECTION_INTERVAL); command->setCommand(MODIFY_PARAMETER_REPORT_COLLECTION_INTERVAL);
} }
command->setParameter3(collectionInterval);
/* Raw storage of the float in the message. Do not use setParameter3, does
implicit conversion to integer type! */
std::memcpy(command->getData() + 2 * sizeof(uint32_t), &collectionInterval,
sizeof(collectionInterval));
setSid(command, sid); setSid(command, sid);
} }
sid_t HousekeepingMessage::getCollectionIntervalModificationCommand( sid_t HousekeepingMessage::getCollectionIntervalModificationCommand(
const CommandMessage* command, float* newCollectionInterval) { const CommandMessage* command, float* newCollectionInterval) {
if(newCollectionInterval != nullptr) { if(newCollectionInterval != nullptr) {
*newCollectionInterval = command->getParameter3(); std::memcpy(newCollectionInterval, command->getData() + 2 * sizeof(uint32_t),
sizeof(*newCollectionInterval));
} }
return getSid(command); return getSid(command);
@ -151,7 +157,8 @@ void HousekeepingMessage::clear(CommandMessage* message) {
case(DIAGNOSTICS_REPORT): case(DIAGNOSTICS_REPORT):
case(HK_DEFINITIONS_REPORT): case(HK_DEFINITIONS_REPORT):
case(DIAGNOSTICS_DEFINITION_REPORT): case(DIAGNOSTICS_DEFINITION_REPORT):
case(UPDATE_SNAPSHOT_SET): { case(UPDATE_SNAPSHOT_SET):
case(UPDATE_SNAPSHOT_VARIABLE): {
store_address_t storeId; store_address_t storeId;
getHkDataReply(message, &storeId); getHkDataReply(message, &storeId);
StorageManagerIF *ipcStore = objectManager->get<StorageManagerIF>( StorageManagerIF *ipcStore = objectManager->get<StorageManagerIF>(

View File

@ -11,7 +11,8 @@
* @brief This helper class will be used to serialize and deserialize update housekeeping packets * @brief This helper class will be used to serialize and deserialize update housekeeping packets
* into the store. * into the store.
*/ */
class HousekeepingSnapshot: public SerializeIF { class HousekeepingSnapshot:
public SerializeIF {
public: public:
/** /**
@ -36,6 +37,17 @@ public:
timeStamp(timeStamp), timeStampSize(timeStampSize), timeStamp(timeStamp), timeStampSize(timeStampSize),
updateData(dataSetPtr) {}; updateData(dataSetPtr) {};
/**
* Update packet constructor for pool variables.
* @param timeStamp
* @param timeStampSize
* @param dataSetPtr
*/
HousekeepingSnapshot(CCSDSTime::CDS_short* cdsShort, LocalPoolObjectBase* dataSetPtr):
timeStamp(reinterpret_cast<uint8_t*>(cdsShort)),
timeStampSize(sizeof(CCSDSTime::CDS_short)), updateData(dataSetPtr) {};
/** /**
* Update packet constructor for pool variables. * Update packet constructor for pool variables.
* @param timeStamp * @param timeStamp
@ -47,8 +59,8 @@ public:
timeStamp(timeStamp), timeStampSize(timeStampSize), timeStamp(timeStamp), timeStampSize(timeStampSize),
updateData(dataSetPtr) {}; updateData(dataSetPtr) {};
virtual ReturnValue_t serialize(uint8_t **buffer, size_t *size, virtual ReturnValue_t serialize(uint8_t **buffer, size_t *size, size_t maxSize,
size_t maxSize, Endianness streamEndianness) const { Endianness streamEndianness) const {
if(timeStamp != nullptr) { if(timeStamp != nullptr) {
/* Endianness will always be MACHINE, so we can simply use memcpy /* Endianness will always be MACHINE, so we can simply use memcpy
here. */ here. */

View File

@ -4,46 +4,87 @@
#include <cmath> #include <cmath>
PeriodicHousekeepingHelper::PeriodicHousekeepingHelper( PeriodicHousekeepingHelper::PeriodicHousekeepingHelper(
LocalPoolDataSetBase* owner): owner(owner) {} LocalPoolDataSetBase* owner): owner(owner) {}
void PeriodicHousekeepingHelper::initialize(float collectionInterval, void PeriodicHousekeepingHelper::initialize(float collectionInterval,
dur_millis_t minimumPeriodicInterval, bool isDiagnostics, dur_millis_t minimumPeriodicInterval, uint8_t nonDiagIntervalFactor) {
uint8_t nonDiagIntervalFactor) { this->minimumPeriodicInterval = minimumPeriodicInterval;
this->minimumPeriodicInterval = minimumPeriodicInterval; this->nonDiagIntervalFactor = nonDiagIntervalFactor;
if(not isDiagnostics) { collectionIntervalTicks = intervalSecondsToIntervalTicks(collectionInterval);
this->minimumPeriodicInterval = this->minimumPeriodicInterval * /* This will cause a checkOpNecessary call to be true immediately. I think it's okay
nonDiagIntervalFactor; if a HK packet is generated immediately instead of waiting one generation cycle. */
} internalTickCounter = collectionIntervalTicks;
collectionIntervalTicks = intervalSecondsToInterval(collectionInterval);
} }
float PeriodicHousekeepingHelper::getCollectionIntervalInSeconds() { float PeriodicHousekeepingHelper::getCollectionIntervalInSeconds() const {
return intervalToIntervalSeconds(collectionIntervalTicks); return intervalTicksToSeconds(collectionIntervalTicks);
} }
bool PeriodicHousekeepingHelper::checkOpNecessary() { bool PeriodicHousekeepingHelper::checkOpNecessary() {
if(internalTickCounter >= collectionIntervalTicks) { if(internalTickCounter >= collectionIntervalTicks) {
internalTickCounter = 1; internalTickCounter = 1;
return true; return true;
} }
internalTickCounter++; internalTickCounter++;
return false; return false;
} }
uint32_t PeriodicHousekeepingHelper::intervalSecondsToInterval( uint32_t PeriodicHousekeepingHelper::intervalSecondsToIntervalTicks(
float collectionIntervalSeconds) { float collectionIntervalSeconds) {
return std::ceil(collectionIntervalSeconds * 1000 if(owner == nullptr) {
/ minimumPeriodicInterval); return 0;
}
bool isDiagnostics = owner->isDiagnostics();
/* Avoid division by zero */
if(minimumPeriodicInterval == 0) {
if(isDiagnostics) {
/* Perform operation each cycle */
return 1;
}
else {
return nonDiagIntervalFactor;
}
}
else {
dur_millis_t intervalInMs = collectionIntervalSeconds * 1000;
uint32_t divisor = minimumPeriodicInterval;
if(not isDiagnostics) {
/* We need to multiply the divisor because non-diagnostics only
allow a multiple of the minimum periodic interval */
divisor *= nonDiagIntervalFactor;
}
uint32_t ticks = std::ceil(static_cast<float>(intervalInMs) / divisor);
if(not isDiagnostics) {
/* Now we need to multiply the calculated ticks with the factor as as well
because the minimum tick count to generate a non-diagnostic is the factor itself.
Example calculation for non-diagnostic with
0.4 second interval and 0.2 second task interval.
Resultant tick count of 5 is equal to operation each second.
Examle calculation for non-diagnostic with 2.0 second interval and 0.2 second
task interval.
Resultant tick count of 10 is equal to operatin every 2 seconds.
Example calculation for diagnostic with 0.4 second interval and 0.3
second task interval. Resulting tick count of 2 is equal to operation
every 0.6 seconds. */
ticks *= nonDiagIntervalFactor;
}
return ticks;
}
} }
float PeriodicHousekeepingHelper::intervalToIntervalSeconds( float PeriodicHousekeepingHelper::intervalTicksToSeconds(
uint32_t collectionInterval) { uint32_t collectionInterval) const {
return static_cast<float>(collectionInterval * /* Number of ticks times the minimum interval is in milliseconds, so we divide by 1000 to get
minimumPeriodicInterval); the value in seconds */
return static_cast<float>(collectionInterval * minimumPeriodicInterval / 1000.0);
} }
void PeriodicHousekeepingHelper::changeCollectionInterval( void PeriodicHousekeepingHelper::changeCollectionInterval(
float newIntervalSeconds) { float newIntervalSeconds) {
collectionIntervalTicks = intervalSecondsToInterval(newIntervalSeconds); collectionIntervalTicks = intervalSecondsToIntervalTicks(newIntervalSeconds);
} }

View File

@ -10,18 +10,19 @@ class PeriodicHousekeepingHelper {
public: public:
PeriodicHousekeepingHelper(LocalPoolDataSetBase* owner); PeriodicHousekeepingHelper(LocalPoolDataSetBase* owner);
void initialize(float collectionInterval, void initialize(float collectionInterval, dur_millis_t minimumPeriodicInterval,
dur_millis_t minimumPeriodicInterval, bool isDiagnostics, uint8_t nonDiagIntervalFactor);
uint8_t nonDiagIntervalFactor);
void changeCollectionInterval(float newInterval); void changeCollectionInterval(float newInterval);
float getCollectionIntervalInSeconds(); float getCollectionIntervalInSeconds() const;
bool checkOpNecessary(); bool checkOpNecessary();
private: private:
LocalPoolDataSetBase* owner = nullptr; LocalPoolDataSetBase* owner = nullptr;
uint8_t nonDiagIntervalFactor = 0;
uint32_t intervalSecondsToInterval(float collectionIntervalSeconds); uint32_t intervalSecondsToIntervalTicks(float collectionIntervalSeconds);
float intervalToIntervalSeconds(uint32_t collectionInterval); float intervalTicksToSeconds(uint32_t collectionInterval) const;
dur_millis_t minimumPeriodicInterval = 0; dur_millis_t minimumPeriodicInterval = 0;
uint32_t internalTickCounter = 1; uint32_t internalTickCounter = 1;

60
ipc/MutexGuard.h Normal file
View File

@ -0,0 +1,60 @@
#ifndef FRAMEWORK_IPC_MUTEXGUARD_H_
#define FRAMEWORK_IPC_MUTEXGUARD_H_
#include "MutexFactory.h"
#include "../serviceinterface/ServiceInterface.h"
class MutexGuard {
public:
MutexGuard(MutexIF* mutex, MutexIF::TimeoutType timeoutType =
MutexIF::TimeoutType::BLOCKING, uint32_t timeoutMs = 0):
internalMutex(mutex) {
if(mutex == nullptr) {
#if FSFW_VERBOSE_LEVEL >= 1
#if FSFW_CPP_OSTREAM_ENABLED == 1
sif::error << "MutexGuard: Passed mutex is invalid!" << std::endl;
#else
sif::printError("MutexGuard: Passed mutex is invalid!\n");
#endif /* FSFW_CPP_OSTREAM_ENABLED == 1 */
#endif /* FSFW_VERBOSE_LEVEL >= 1 */
return;
}
result = mutex->lockMutex(timeoutType,
timeoutMs);
#if FSFW_VERBOSE_LEVEL >= 1
if(result == MutexIF::MUTEX_TIMEOUT) {
#if FSFW_CPP_OSTREAM_ENABLED == 1
sif::error << "MutexGuard: Lock of mutex failed with timeout of "
<< timeoutMs << " milliseconds!" << std::endl;
#else
sif::printError("MutexGuard: Lock of mutex failed with timeout of %lu milliseconds\n",
timeoutMs);
#endif /* FSFW_CPP_OSTREAM_ENABLED == 1 */
}
else if(result != HasReturnvaluesIF::RETURN_OK) {
#if FSFW_CPP_OSTREAM_ENABLED == 1
sif::error << "MutexGuard: Lock of Mutex failed with code " << result << std::endl;
#else
sif::printError("MutexGuard: Lock of Mutex failed with code %d\n", result);
#endif /* FSFW_CPP_OSTREAM_ENABLED == 1 */
}
#else
#endif /* FSFW_VERBOSE_LEVEL >= 1 */
}
ReturnValue_t getLockResult() const {
return result;
}
~MutexGuard() {
if(internalMutex != nullptr) {
internalMutex->unlockMutex();
}
}
private:
MutexIF* internalMutex;
ReturnValue_t result = HasReturnvaluesIF::RETURN_FAILED;
};
#endif /* FRAMEWORK_IPC_MUTEXGUARD_H_ */

View File

@ -1,57 +0,0 @@
#ifndef FRAMEWORK_IPC_MUTEXHELPER_H_
#define FRAMEWORK_IPC_MUTEXHELPER_H_
#include "MutexFactory.h"
#include "../serviceinterface/ServiceInterface.h"
class MutexHelper {
public:
MutexHelper(MutexIF* mutex, MutexIF::TimeoutType timeoutType =
MutexIF::TimeoutType::BLOCKING, uint32_t timeoutMs = 0):
internalMutex(mutex) {
if(mutex == nullptr) {
#if FSFW_VERBOSE_LEVEL >= 1
#if FSFW_CPP_OSTREAM_ENABLED == 1
sif::error << "MutexHelper: Passed mutex is invalid!" << std::endl;
#else
sif::printError("MutexHelper: Passed mutex is invalid!\n");
#endif /* FSFW_CPP_OSTREAM_ENABLED == 1 */
#endif /* FSFW_VERBOSE_LEVEL >= 1 */
return;
}
ReturnValue_t status = mutex->lockMutex(timeoutType,
timeoutMs);
#if FSFW_VERBOSE_LEVEL >= 1
if(status == MutexIF::MUTEX_TIMEOUT) {
#if FSFW_CPP_OSTREAM_ENABLED == 1
sif::error << "MutexHelper: Lock of mutex failed with timeout of "
<< timeoutMs << " milliseconds!" << std::endl;
#else
sif::printError("MutexHelper: Lock of mutex failed with timeout of %lu milliseconds\n",
timeoutMs);
#endif /* FSFW_CPP_OSTREAM_ENABLED == 1 */
}
else if(status != HasReturnvaluesIF::RETURN_OK) {
#if FSFW_CPP_OSTREAM_ENABLED == 1
sif::error << "MutexHelper: Lock of Mutex failed with code " << status << std::endl;
#else
sif::printError("MutexHelper: Lock of Mutex failed with code %d\n", status);
#endif /* FSFW_CPP_OSTREAM_ENABLED == 1 */
}
#else
/* To avoid unused variable warning */
static_cast<void>(status);
#endif /* FSFW_VERBOSE_LEVEL >= 1 */
}
~MutexHelper() {
if(internalMutex != nullptr) {
internalMutex->unlockMutex();
}
}
private:
MutexIF* internalMutex;
};
#endif /* FRAMEWORK_IPC_MUTEXHELPER_H_ */

View File

@ -31,4 +31,6 @@ else()
message(FATAL_ERROR "The host OS could not be determined! Aborting.") message(FATAL_ERROR "The host OS could not be determined! Aborting.")
endif() endif()
endif() endif()
add_subdirectory(common)

View File

@ -0,0 +1,3 @@
target_sources(${LIB_FSFW_NAME} PRIVATE
tcpipCommon.cpp
)

View File

@ -0,0 +1,36 @@
#include "tcpipCommon.h"
void tcpip::determineErrorStrings(Protocol protocol, ErrorSources errorSrc, std::string &protStr,
std::string &srcString) {
if(protocol == Protocol::TCP) {
protStr = "TCP";
}
else if(protocol == Protocol::UDP) {
protStr = "UDP";
}
else {
protStr = "Unknown protocol";
}
if(errorSrc == ErrorSources::SETSOCKOPT_CALL) {
srcString = "setsockopt call";
}
else if(errorSrc == ErrorSources::SOCKET_CALL) {
srcString = "socket call";
}
else if(errorSrc == ErrorSources::LISTEN_CALL) {
srcString = "listen call";
}
else if(errorSrc == ErrorSources::ACCEPT_CALL) {
srcString = "accept call";
}
else if(errorSrc == ErrorSources::RECVFROM_CALL) {
srcString = "recvfrom call";
}
else if(errorSrc == ErrorSources::GETADDRINFO_CALL) {
srcString = "getaddrinfo call";
}
else {
srcString = "unknown call";
}
}

36
osal/common/tcpipCommon.h Normal file
View File

@ -0,0 +1,36 @@
#ifndef FSFW_OSAL_COMMON_TCPIPCOMMON_H_
#define FSFW_OSAL_COMMON_TCPIPCOMMON_H_
#include "../../timemanager/clockDefinitions.h"
#include <string>
namespace tcpip {
const char* const DEFAULT_UDP_SERVER_PORT = "7301";
const char* const DEFAULT_TCP_SERVER_PORT = "7303";
enum class Protocol {
UDP,
TCP
};
enum class ErrorSources {
GETADDRINFO_CALL,
SOCKET_CALL,
SETSOCKOPT_CALL,
BIND_CALL,
RECV_CALL,
RECVFROM_CALL,
LISTEN_CALL,
ACCEPT_CALL,
SENDTO_CALL
};
void determineErrorStrings(Protocol protocol, ErrorSources errorSrc, std::string& protStr,
std::string& srcString);
}
#endif /* FSFW_OSAL_COMMON_TCPIPCOMMON_H_ */

View File

@ -3,7 +3,7 @@
#include "../../serviceinterface/ServiceInterfaceStream.h" #include "../../serviceinterface/ServiceInterfaceStream.h"
#include "../../ipc/MutexFactory.h" #include "../../ipc/MutexFactory.h"
#include "../../ipc/MutexHelper.h" #include "../../ipc/MutexGuard.h"
MessageQueue::MessageQueue(size_t messageDepth, size_t maxMessageSize): MessageQueue::MessageQueue(size_t messageDepth, size_t maxMessageSize):
messageSize(maxMessageSize), messageDepth(messageDepth) { messageSize(maxMessageSize), messageDepth(messageDepth) {
@ -65,7 +65,7 @@ ReturnValue_t MessageQueue::receiveMessage(MessageQueueMessageIF* message) {
} }
// not sure this will work.. // not sure this will work..
//*message = std::move(messageQueue.front()); //*message = std::move(messageQueue.front());
MutexHelper mutexLock(queueLock, MutexIF::TimeoutType::WAITING, 20); MutexGuard mutexLock(queueLock, MutexIF::TimeoutType::WAITING, 20);
MessageQueueMessage* currentMessage = &messageQueue.front(); MessageQueueMessage* currentMessage = &messageQueue.front();
std::copy(currentMessage->getBuffer(), std::copy(currentMessage->getBuffer(),
currentMessage->getBuffer() + messageSize, message->getBuffer()); currentMessage->getBuffer() + messageSize, message->getBuffer());
@ -130,7 +130,7 @@ ReturnValue_t MessageQueue::sendMessageFromMessageQueue(MessageQueueId_t sendTo,
return HasReturnvaluesIF::RETURN_FAILED; return HasReturnvaluesIF::RETURN_FAILED;
} }
if(targetQueue->messageQueue.size() < targetQueue->messageDepth) { if(targetQueue->messageQueue.size() < targetQueue->messageDepth) {
MutexHelper mutexLock(targetQueue->queueLock, MutexGuard mutexLock(targetQueue->queueLock,
MutexIF::TimeoutType::WAITING, 20); MutexIF::TimeoutType::WAITING, 20);
// not ideal, works for now though. // not ideal, works for now though.
MessageQueueMessage* mqmMessage = MessageQueueMessage* mqmMessage =

View File

@ -24,5 +24,7 @@ MutexIF* MutexFactory::createMutex() {
} }
void MutexFactory::deleteMutex(MutexIF* mutex) { void MutexFactory::deleteMutex(MutexIF* mutex) {
delete mutex; if(mutex != nullptr) {
delete mutex;
}
} }

View File

@ -2,7 +2,7 @@
#include "../../serviceinterface/ServiceInterface.h" #include "../../serviceinterface/ServiceInterface.h"
#include "../../ipc/MutexFactory.h" #include "../../ipc/MutexFactory.h"
#include "../../ipc/MutexHelper.h" #include "../../ipc/MutexGuard.h"
QueueMapManager* QueueMapManager::mqManagerInstance = nullptr; QueueMapManager* QueueMapManager::mqManagerInstance = nullptr;
@ -43,7 +43,7 @@ ReturnValue_t QueueMapManager::addMessageQueue(
MessageQueueIF* QueueMapManager::getMessageQueue( MessageQueueIF* QueueMapManager::getMessageQueue(
MessageQueueId_t messageQueueId) const { MessageQueueId_t messageQueueId) const {
MutexHelper(mapLock, MutexIF::TimeoutType::WAITING, 50); MutexGuard(mapLock, MutexIF::TimeoutType::WAITING, 50);
auto queueIter = queueMap.find(messageQueueId); auto queueIter = queueMap.find(messageQueueId);
if(queueIter != queueMap.end()) { if(queueIter != queueMap.end()) {
return queueIter->second; return queueIter->second;

View File

@ -1,7 +1,5 @@
#include "../../tasks/SemaphoreFactory.h" #include "../../tasks/SemaphoreFactory.h"
#include "../../osal/linux/BinarySemaphore.h" #include "../../serviceinterface/ServiceInterface.h"
#include "../../osal/linux/CountingSemaphore.h"
#include "../../serviceinterface/ServiceInterfaceStream.h"
SemaphoreFactory* SemaphoreFactory::factoryInstance = nullptr; SemaphoreFactory* SemaphoreFactory::factoryInstance = nullptr;

View File

@ -16,6 +16,7 @@ target_sources(${LIB_FSFW_NAME}
TcUnixUdpPollingTask.cpp TcUnixUdpPollingTask.cpp
TmTcUnixUdpBridge.cpp TmTcUnixUdpBridge.cpp
Timer.cpp Timer.cpp
tcpipHelpers.cpp
) )
find_package(Threads REQUIRED) find_package(Threads REQUIRED)

View File

@ -6,252 +6,250 @@
#include <errno.h> #include <errno.h>
PosixThread::PosixThread(const char* name_, int priority_, size_t stackSize_): PosixThread::PosixThread(const char* name_, int priority_, size_t stackSize_):
thread(0),priority(priority_),stackSize(stackSize_) { thread(0),priority(priority_),stackSize(stackSize_) {
name[0] = '\0'; name[0] = '\0';
std::strncat(name, name_, PTHREAD_MAX_NAMELEN - 1); std::strncat(name, name_, PTHREAD_MAX_NAMELEN - 1);
} }
PosixThread::~PosixThread() { PosixThread::~PosixThread() {
//No deletion and no free of Stack Pointer //No deletion and no free of Stack Pointer
} }
ReturnValue_t PosixThread::sleep(uint64_t ns) { ReturnValue_t PosixThread::sleep(uint64_t ns) {
//TODO sleep might be better with timer instead of sleep() //TODO sleep might be better with timer instead of sleep()
timespec time; timespec time;
time.tv_sec = ns/1000000000; time.tv_sec = ns/1000000000;
time.tv_nsec = ns - time.tv_sec*1e9; time.tv_nsec = ns - time.tv_sec*1e9;
//Remaining Time is not set here //Remaining Time is not set here
int status = nanosleep(&time,NULL); int status = nanosleep(&time,NULL);
if(status != 0){ if(status != 0){
switch(errno){ switch(errno){
case EINTR: case EINTR:
//The nanosleep() function was interrupted by a signal. //The nanosleep() function was interrupted by a signal.
return HasReturnvaluesIF::RETURN_FAILED; return HasReturnvaluesIF::RETURN_FAILED;
case EINVAL: case EINVAL:
//The rqtp argument specified a nanosecond value less than zero or //The rqtp argument specified a nanosecond value less than zero or
// greater than or equal to 1000 million. // greater than or equal to 1000 million.
return HasReturnvaluesIF::RETURN_FAILED; return HasReturnvaluesIF::RETURN_FAILED;
default: default:
return HasReturnvaluesIF::RETURN_FAILED; return HasReturnvaluesIF::RETURN_FAILED;
} }
} }
return HasReturnvaluesIF::RETURN_OK; return HasReturnvaluesIF::RETURN_OK;
} }
void PosixThread::suspend() { void PosixThread::suspend() {
//Wait for SIGUSR1 //Wait for SIGUSR1
int caughtSig = 0; int caughtSig = 0;
sigset_t waitSignal; sigset_t waitSignal;
sigemptyset(&waitSignal); sigemptyset(&waitSignal);
sigaddset(&waitSignal, SIGUSR1); sigaddset(&waitSignal, SIGUSR1);
sigwait(&waitSignal, &caughtSig); sigwait(&waitSignal, &caughtSig);
if (caughtSig != SIGUSR1) { if (caughtSig != SIGUSR1) {
#if FSFW_CPP_OSTREAM_ENABLED == 1 #if FSFW_CPP_OSTREAM_ENABLED == 1
sif::error << "FixedTimeslotTask: Unknown Signal received: " << sif::error << "FixedTimeslotTask: Unknown Signal received: " <<
caughtSig << std::endl; caughtSig << std::endl;
#endif #endif
} }
} }
void PosixThread::resume(){ void PosixThread::resume(){
/* Signal the thread to start. Makes sense to call kill to start or? ;) /* Signal the thread to start. Makes sense to call kill to start or? ;)
* According to POSIX raise(signal) will call pthread_kill(pthread_self(), sig),
* According to Posix raise(signal) will call pthread_kill(pthread_self(), sig), but as the call must be done from the thread itself this is not possible here */
* but as the call must be done from the thread itsself this is not possible here pthread_kill(thread,SIGUSR1);
*/
pthread_kill(thread,SIGUSR1);
} }
bool PosixThread::delayUntil(uint64_t* const prevoiusWakeTime_ms, bool PosixThread::delayUntil(uint64_t* const prevoiusWakeTime_ms,
const uint64_t delayTime_ms) { const uint64_t delayTime_ms) {
uint64_t nextTimeToWake_ms; uint64_t nextTimeToWake_ms;
bool shouldDelay = false; bool shouldDelay = false;
//Get current Time /* Get current Time */
const uint64_t currentTime_ms = getCurrentMonotonicTimeMs(); const uint64_t currentTime_ms = getCurrentMonotonicTimeMs();
/* Generate the tick time at which the task wants to wake. */ /* Generate the tick time at which the task wants to wake. */
nextTimeToWake_ms = (*prevoiusWakeTime_ms) + delayTime_ms; nextTimeToWake_ms = (*prevoiusWakeTime_ms) + delayTime_ms;
if (currentTime_ms < *prevoiusWakeTime_ms) { if (currentTime_ms < *prevoiusWakeTime_ms) {
/* The tick count has overflowed since this function was /* The tick count has overflowed since this function was
lasted called. In this case the only time we should ever lasted called. In this case the only time we should ever
actually delay is if the wake time has also overflowed, actually delay is if the wake time has also overflowed,
and the wake time is greater than the tick time. When this and the wake time is greater than the tick time. When this
is the case it is as if neither time had overflowed. */ is the case it is as if neither time had overflowed. */
if ((nextTimeToWake_ms < *prevoiusWakeTime_ms) if ((nextTimeToWake_ms < *prevoiusWakeTime_ms)
&& (nextTimeToWake_ms > currentTime_ms)) { && (nextTimeToWake_ms > currentTime_ms)) {
shouldDelay = true; shouldDelay = true;
} }
} else { } else {
/* The tick time has not overflowed. In this case we will /* The tick time has not overflowed. In this case we will
delay if either the wake time has overflowed, and/or the delay if either the wake time has overflowed, and/or the
tick time is less than the wake time. */ tick time is less than the wake time. */
if ((nextTimeToWake_ms < *prevoiusWakeTime_ms) if ((nextTimeToWake_ms < *prevoiusWakeTime_ms)
|| (nextTimeToWake_ms > currentTime_ms)) { || (nextTimeToWake_ms > currentTime_ms)) {
shouldDelay = true; shouldDelay = true;
} }
} }
/* Update the wake time ready for the next call. */ /* Update the wake time ready for the next call. */
(*prevoiusWakeTime_ms) = nextTimeToWake_ms; (*prevoiusWakeTime_ms) = nextTimeToWake_ms;
if (shouldDelay) { if (shouldDelay) {
uint64_t sleepTime = nextTimeToWake_ms - currentTime_ms; uint64_t sleepTime = nextTimeToWake_ms - currentTime_ms;
PosixThread::sleep(sleepTime * 1000000ull); PosixThread::sleep(sleepTime * 1000000ull);
return true; return true;
} }
//We are shifting the time in case the deadline was missed like rtems /* We are shifting the time in case the deadline was missed like RTEMS */
(*prevoiusWakeTime_ms) = currentTime_ms; (*prevoiusWakeTime_ms) = currentTime_ms;
return false; return false;
} }
uint64_t PosixThread::getCurrentMonotonicTimeMs(){ uint64_t PosixThread::getCurrentMonotonicTimeMs(){
timespec timeNow; timespec timeNow;
clock_gettime(CLOCK_MONOTONIC_RAW, &timeNow); clock_gettime(CLOCK_MONOTONIC_RAW, &timeNow);
uint64_t currentTime_ms = (uint64_t) timeNow.tv_sec * 1000 uint64_t currentTime_ms = (uint64_t) timeNow.tv_sec * 1000
+ timeNow.tv_nsec / 1000000; + timeNow.tv_nsec / 1000000;
return currentTime_ms; return currentTime_ms;
} }
void PosixThread::createTask(void* (*fnc_)(void*), void* arg_) { void PosixThread::createTask(void* (*fnc_)(void*), void* arg_) {
#if FSFW_CPP_OSTREAM_ENABLED == 1 #if FSFW_CPP_OSTREAM_ENABLED == 1
//sif::debug << "PosixThread::createTask" << std::endl; //sif::debug << "PosixThread::createTask" << std::endl;
#endif #endif
/* /*
* The attr argument points to a pthread_attr_t structure whose contents * The attr argument points to a pthread_attr_t structure whose contents
are used at thread creation time to determine attributes for the new * are used at thread creation time to determine attributes for the new
thread; this structure is initialized using pthread_attr_init(3) and * thread; this structure is initialized using pthread_attr_init(3) and
related functions. If attr is NULL, then the thread is created with * related functions. If attr is NULL, then the thread is created with
default attributes. * default attributes.
*/ */
pthread_attr_t attributes; pthread_attr_t attributes;
int status = pthread_attr_init(&attributes); int status = pthread_attr_init(&attributes);
if(status != 0){ if(status != 0){
#if FSFW_CPP_OSTREAM_ENABLED == 1 #if FSFW_CPP_OSTREAM_ENABLED == 1
sif::error << "Posix Thread attribute init failed with: " << sif::error << "Posix Thread attribute init failed with: " <<
strerror(status) << std::endl; strerror(status) << std::endl;
#endif #endif
} }
void* stackPointer; void* stackPointer;
status = posix_memalign(&stackPointer, sysconf(_SC_PAGESIZE), stackSize); status = posix_memalign(&stackPointer, sysconf(_SC_PAGESIZE), stackSize);
if(status != 0){ if(status != 0){
#if FSFW_CPP_OSTREAM_ENABLED == 1 #if FSFW_CPP_OSTREAM_ENABLED == 1
sif::error << "PosixThread::createTask: Stack init failed with: " << sif::error << "PosixThread::createTask: Stack init failed with: " <<
strerror(status) << std::endl; strerror(status) << std::endl;
#endif #endif
if(errno == ENOMEM) { if(errno == ENOMEM) {
size_t stackMb = stackSize/10e6; size_t stackMb = stackSize/10e6;
#if FSFW_CPP_OSTREAM_ENABLED == 1 #if FSFW_CPP_OSTREAM_ENABLED == 1
sif::error << "PosixThread::createTask: Insufficient memory for" sif::error << "PosixThread::createTask: Insufficient memory for"
" the requested " << stackMb << " MB" << std::endl; " the requested " << stackMb << " MB" << std::endl;
#else #else
sif::printError("PosixThread::createTask: Insufficient memory for " sif::printError("PosixThread::createTask: Insufficient memory for "
"the requested %lu MB\n", static_cast<unsigned long>(stackMb)); "the requested %lu MB\n", static_cast<unsigned long>(stackMb));
#endif #endif
} }
else if(errno == EINVAL) { else if(errno == EINVAL) {
#if FSFW_CPP_OSTREAM_ENABLED == 1 #if FSFW_CPP_OSTREAM_ENABLED == 1
sif::error << "PosixThread::createTask: Wrong alignment argument!" sif::error << "PosixThread::createTask: Wrong alignment argument!"
<< std::endl; << std::endl;
#else #else
sif::printError("PosixThread::createTask: " sif::printError("PosixThread::createTask: "
"Wrong alignment argument!\n"); "Wrong alignment argument!\n");
#endif #endif
} }
return; return;
} }
status = pthread_attr_setstack(&attributes, stackPointer, stackSize); status = pthread_attr_setstack(&attributes, stackPointer, stackSize);
if(status != 0){ if(status != 0){
#if FSFW_CPP_OSTREAM_ENABLED == 1 #if FSFW_CPP_OSTREAM_ENABLED == 1
sif::error << "PosixThread::createTask: pthread_attr_setstack " sif::error << "PosixThread::createTask: pthread_attr_setstack "
" failed with: " << strerror(status) << std::endl; " failed with: " << strerror(status) << std::endl;
sif::error << "Make sure the specified stack size is valid and is " sif::error << "Make sure the specified stack size is valid and is "
"larger than the minimum allowed stack size." << std::endl; "larger than the minimum allowed stack size." << std::endl;
#endif #endif
} }
status = pthread_attr_setinheritsched(&attributes, PTHREAD_EXPLICIT_SCHED); status = pthread_attr_setinheritsched(&attributes, PTHREAD_EXPLICIT_SCHED);
if(status != 0){ if(status != 0){
#if FSFW_CPP_OSTREAM_ENABLED == 1 #if FSFW_CPP_OSTREAM_ENABLED == 1
sif::error << "Posix Thread attribute setinheritsched failed with: " << sif::error << "Posix Thread attribute setinheritsched failed with: " <<
strerror(status) << std::endl; strerror(status) << std::endl;
#endif #endif
} }
// TODO FIFO -> This needs root privileges for the process // TODO FIFO -> This needs root privileges for the process
status = pthread_attr_setschedpolicy(&attributes,SCHED_FIFO); status = pthread_attr_setschedpolicy(&attributes,SCHED_FIFO);
if(status != 0){ if(status != 0){
#if FSFW_CPP_OSTREAM_ENABLED == 1 #if FSFW_CPP_OSTREAM_ENABLED == 1
sif::error << "Posix Thread attribute schedule policy failed with: " << sif::error << "Posix Thread attribute schedule policy failed with: " <<
strerror(status) << std::endl; strerror(status) << std::endl;
#endif #endif
} }
sched_param scheduleParams; sched_param scheduleParams;
scheduleParams.__sched_priority = priority; scheduleParams.__sched_priority = priority;
status = pthread_attr_setschedparam(&attributes, &scheduleParams); status = pthread_attr_setschedparam(&attributes, &scheduleParams);
if(status != 0){ if(status != 0){
#if FSFW_CPP_OSTREAM_ENABLED == 1 #if FSFW_CPP_OSTREAM_ENABLED == 1
sif::error << "Posix Thread attribute schedule params failed with: " << sif::error << "Posix Thread attribute schedule params failed with: " <<
strerror(status) << std::endl; strerror(status) << std::endl;
#endif #endif
} }
//Set Signal Mask for suspend until startTask is called //Set Signal Mask for suspend until startTask is called
sigset_t waitSignal; sigset_t waitSignal;
sigemptyset(&waitSignal); sigemptyset(&waitSignal);
sigaddset(&waitSignal, SIGUSR1); sigaddset(&waitSignal, SIGUSR1);
status = pthread_sigmask(SIG_BLOCK, &waitSignal, NULL); status = pthread_sigmask(SIG_BLOCK, &waitSignal, NULL);
if(status != 0){ if(status != 0){
#if FSFW_CPP_OSTREAM_ENABLED == 1 #if FSFW_CPP_OSTREAM_ENABLED == 1
sif::error << "Posix Thread sigmask failed failed with: " << sif::error << "Posix Thread sigmask failed failed with: " <<
strerror(status) << " errno: " << strerror(errno) << std::endl; strerror(status) << " errno: " << strerror(errno) << std::endl;
#endif #endif
} }
status = pthread_create(&thread,&attributes,fnc_,arg_); status = pthread_create(&thread,&attributes,fnc_,arg_);
if(status != 0){ if(status != 0){
#if FSFW_CPP_OSTREAM_ENABLED == 1 #if FSFW_CPP_OSTREAM_ENABLED == 1
sif::error << "Posix Thread create failed with: " << sif::error << "Posix Thread create failed with: " <<
strerror(status) << std::endl; strerror(status) << std::endl;
#endif #endif
} }
status = pthread_setname_np(thread,name); status = pthread_setname_np(thread,name);
if(status != 0){ if(status != 0){
#if FSFW_CPP_OSTREAM_ENABLED == 1 #if FSFW_CPP_OSTREAM_ENABLED == 1
sif::error << "PosixThread::createTask: setname failed with: " << sif::error << "PosixThread::createTask: setname failed with: " <<
strerror(status) << std::endl; strerror(status) << std::endl;
#endif #endif
if(status == ERANGE) { if(status == ERANGE) {
#if FSFW_CPP_OSTREAM_ENABLED == 1 #if FSFW_CPP_OSTREAM_ENABLED == 1
sif::error << "PosixThread::createTask: Task name length longer" sif::error << "PosixThread::createTask: Task name length longer"
" than 16 chars. Truncating.." << std::endl; " than 16 chars. Truncating.." << std::endl;
#endif #endif
name[15] = '\0'; name[15] = '\0';
status = pthread_setname_np(thread,name); status = pthread_setname_np(thread,name);
if(status != 0){ if(status != 0){
#if FSFW_CPP_OSTREAM_ENABLED == 1 #if FSFW_CPP_OSTREAM_ENABLED == 1
sif::error << "PosixThread::createTask: Setting name" sif::error << "PosixThread::createTask: Setting name"
" did not work.." << std::endl; " did not work.." << std::endl;
#endif #endif
} }
} }
} }
status = pthread_attr_destroy(&attributes); status = pthread_attr_destroy(&attributes);
if(status!=0){ if(status!=0){
#if FSFW_CPP_OSTREAM_ENABLED == 1 #if FSFW_CPP_OSTREAM_ENABLED == 1
sif::error << "Posix Thread attribute destroy failed with: " << sif::error << "Posix Thread attribute destroy failed with: " <<
strerror(status) << std::endl; strerror(status) << std::endl;
#endif #endif
} }
} }

View File

@ -2,6 +2,7 @@
#include "PeriodicPosixTask.h" #include "PeriodicPosixTask.h"
#include "../../tasks/TaskFactory.h" #include "../../tasks/TaskFactory.h"
#include "../../serviceinterface/ServiceInterface.h"
#include "../../returnvalues/HasReturnvaluesIF.h" #include "../../returnvalues/HasReturnvaluesIF.h"
//TODO: Different variant than the lazy loading in QueueFactory. What's better and why? //TODO: Different variant than the lazy loading in QueueFactory. What's better and why?

View File

@ -1,7 +1,9 @@
#include "TcUnixUdpPollingTask.h" #include "TcUnixUdpPollingTask.h"
#include "tcpipHelpers.h"
#include "../../globalfunctions/arrayprinter.h" #include "../../globalfunctions/arrayprinter.h"
#include <errno.h> #define FSFW_UDP_RCV_WIRETAPPING_ENABLED 0
TcUnixUdpPollingTask::TcUnixUdpPollingTask(object_id_t objectId, TcUnixUdpPollingTask::TcUnixUdpPollingTask(object_id_t objectId,
object_id_t tmtcUnixUdpBridge, size_t frameSize, object_id_t tmtcUnixUdpBridge, size_t frameSize,
@ -15,8 +17,8 @@ TcUnixUdpPollingTask::TcUnixUdpPollingTask(object_id_t objectId,
this->frameSize = DEFAULT_MAX_FRAME_SIZE; this->frameSize = DEFAULT_MAX_FRAME_SIZE;
} }
// Set up reception buffer with specified frame size. /* Set up reception buffer with specified frame size.
// For now, it is assumed that only one frame is held in the buffer! For now, it is assumed that only one frame is held in the buffer! */
receptionBuffer.reserve(this->frameSize); receptionBuffer.reserve(this->frameSize);
receptionBuffer.resize(this->frameSize); receptionBuffer.resize(this->frameSize);
@ -31,34 +33,37 @@ TcUnixUdpPollingTask::TcUnixUdpPollingTask(object_id_t objectId,
TcUnixUdpPollingTask::~TcUnixUdpPollingTask() {} TcUnixUdpPollingTask::~TcUnixUdpPollingTask() {}
ReturnValue_t TcUnixUdpPollingTask::performOperation(uint8_t opCode) { ReturnValue_t TcUnixUdpPollingTask::performOperation(uint8_t opCode) {
// Poll for new UDP datagrams in permanent loop. /* Sender Address is cached here. */
while(1) { struct sockaddr_in senderAddress;
//! Sender Address is cached here. socklen_t senderAddressSize = sizeof(senderAddress);
struct sockaddr_in senderAddress;
socklen_t senderSockLen = sizeof(senderAddress);
ssize_t bytesReceived = recvfrom(serverUdpSocket,
receptionBuffer.data(), frameSize, receptionFlags,
reinterpret_cast<sockaddr*>(&senderAddress), &senderSockLen);
if(bytesReceived < 0) {
// handle error
#if FSFW_CPP_OSTREAM_ENABLED == 1
sif::error << "TcSocketPollingTask::performOperation: Reception"
"error." << std::endl;
#endif
handleReadError();
/* Poll for new UDP datagrams in permanent loop. */
while(true) {
ssize_t bytesReceived = recvfrom(
serverUdpSocket,
receptionBuffer.data(),
frameSize,
receptionFlags,
reinterpret_cast<sockaddr*>(&senderAddress),
&senderAddressSize
);
if(bytesReceived < 0) {
/* Handle error */
#if FSFW_CPP_OSTREAM_ENABLED == 1
sif::error << "TcSocketPollingTask::performOperation: Reception error." << std::endl;
#endif
tcpip::handleError(tcpip::Protocol::UDP, tcpip::ErrorSources::RECVFROM_CALL, 500);
continue; continue;
} }
#if FSFW_CPP_OSTREAM_ENABLED == 1 #if FSFW_CPP_OSTREAM_ENABLED == 1 && FSFW_UDP_RCV_WIRETAPPING_ENABLED == 1
// sif::debug << "TcSocketPollingTask::performOperation: " << bytesReceived sif::debug << "TcSocketPollingTask::performOperation: " << bytesReceived
// << " bytes received" << std::endl; << " bytes received" << std::endl;
#endif #endif
ReturnValue_t result = handleSuccessfullTcRead(bytesReceived); ReturnValue_t result = handleSuccessfullTcRead(bytesReceived);
if(result != HasReturnvaluesIF::RETURN_FAILED) { if(result != HasReturnvaluesIF::RETURN_FAILED) {
} }
tmtcBridge->registerCommConnect();
tmtcBridge->checkAndSetClientAddress(senderAddress); tmtcBridge->checkAndSetClientAddress(senderAddress);
} }
return HasReturnvaluesIF::RETURN_OK; return HasReturnvaluesIF::RETURN_OK;
@ -67,15 +72,21 @@ ReturnValue_t TcUnixUdpPollingTask::performOperation(uint8_t opCode) {
ReturnValue_t TcUnixUdpPollingTask::handleSuccessfullTcRead(size_t bytesRead) { ReturnValue_t TcUnixUdpPollingTask::handleSuccessfullTcRead(size_t bytesRead) {
store_address_t storeId; store_address_t storeId;
ReturnValue_t result = tcStore->addData(&storeId,
receptionBuffer.data(), bytesRead); #if FSFW_UDP_RCV_WIRETAPPING_ENABLED == 1
// arrayprinter::print(receptionBuffer.data(), bytesRead); arrayprinter::print(receptionBuffer.data(), bytesRead);
#endif
ReturnValue_t result = tcStore->addData(&storeId, receptionBuffer.data(), bytesRead);
if (result != HasReturnvaluesIF::RETURN_OK) { if (result != HasReturnvaluesIF::RETURN_OK) {
#if FSFW_VERBOSE_LEVEL >= 1
#if FSFW_CPP_OSTREAM_ENABLED == 1 #if FSFW_CPP_OSTREAM_ENABLED == 1
sif::error << "TcSerialPollingTask::transferPusToSoftwareBus: Data " sif::error << "TcUnixUdpPollingTask::handleSuccessfullTcRead: Data "
"storage failed" << std::endl; "storage failed" << std::endl;
sif::error << "Packet size: " << bytesRead << std::endl; sif::error << "Packet size: " << bytesRead << std::endl;
#endif #else
#endif /* FSFW_CPP_OSTREAM_ENABLED == 1 */
#endif /* FSFW_VERBOSE_LEVEL >= 1 */
return HasReturnvaluesIF::RETURN_FAILED; return HasReturnvaluesIF::RETURN_FAILED;
} }
@ -83,10 +94,13 @@ ReturnValue_t TcUnixUdpPollingTask::handleSuccessfullTcRead(size_t bytesRead) {
result = MessageQueueSenderIF::sendMessage(targetTcDestination, &message); result = MessageQueueSenderIF::sendMessage(targetTcDestination, &message);
if (result != HasReturnvaluesIF::RETURN_OK) { if (result != HasReturnvaluesIF::RETURN_OK) {
#if FSFW_VERBOSE_LEVEL >= 1
#if FSFW_CPP_OSTREAM_ENABLED == 1 #if FSFW_CPP_OSTREAM_ENABLED == 1
sif::error << "Serial Polling: Sending message to queue failed" sif::error << "TcUnixUdpPollingTask::handleSuccessfullTcRead: Sending message to queue "
<< std::endl; "failed" << std::endl;
#endif #else
#endif /* FSFW_CPP_OSTREAM_ENABLED == 1 */
#endif /* FSFW_VERBOSE_LEVEL >= 1 */
tcStore->deleteData(storeId); tcStore->deleteData(storeId);
} }
return result; return result;
@ -111,15 +125,16 @@ ReturnValue_t TcUnixUdpPollingTask::initialize() {
return ObjectManagerIF::CHILD_INIT_FAILED; return ObjectManagerIF::CHILD_INIT_FAILED;
} }
serverUdpSocket = tmtcBridge->serverSocket;
return HasReturnvaluesIF::RETURN_OK; return HasReturnvaluesIF::RETURN_OK;
} }
ReturnValue_t TcUnixUdpPollingTask::initializeAfterTaskCreation() { ReturnValue_t TcUnixUdpPollingTask::initializeAfterTaskCreation() {
// Initialize the destination after task creation. This ensures /* Initialize the destination after task creation. This ensures
// that the destination will be set in the TMTC bridge. that the destination has already been set in the TMTC bridge. */
targetTcDestination = tmtcBridge->getRequestQueue(); targetTcDestination = tmtcBridge->getRequestQueue();
/* The server socket is set up in the bridge intialization. Calling this function here
ensures that it is set up properly in any case*/
serverUdpSocket = tmtcBridge->serverSocket;
return HasReturnvaluesIF::RETURN_OK; return HasReturnvaluesIF::RETURN_OK;
} }
@ -135,24 +150,3 @@ void TcUnixUdpPollingTask::setTimeout(double timeoutSeconds) {
#endif #endif
} }
} }
// TODO: sleep after error detection to prevent spam
void TcUnixUdpPollingTask::handleReadError() {
switch(errno) {
case(EAGAIN): {
// todo: When working in timeout mode, this will occur more often
// and is not an error.
#if FSFW_CPP_OSTREAM_ENABLED == 1
sif::error << "TcUnixUdpPollingTask::handleReadError: Timeout."
<< std::endl;
#endif
break;
}
default: {
#if FSFW_CPP_OSTREAM_ENABLED == 1
sif::error << "TcUnixUdpPollingTask::handleReadError: "
<< strerror(errno) << std::endl;
#endif
}
}
}

View File

@ -48,6 +48,7 @@ private:
object_id_t tmtcBridgeId = objects::NO_OBJECT; object_id_t tmtcBridgeId = objects::NO_OBJECT;
TmTcUnixUdpBridge* tmtcBridge = nullptr; TmTcUnixUdpBridge* tmtcBridge = nullptr;
MessageQueueId_t targetTcDestination = MessageQueueIF::NO_QUEUE; MessageQueueId_t targetTcDestination = MessageQueueIF::NO_QUEUE;
//! Reception flags: https://linux.die.net/man/2/recvfrom. //! Reception flags: https://linux.die.net/man/2/recvfrom.
int receptionFlags = 0; int receptionFlags = 0;
@ -61,7 +62,6 @@ private:
timeval receptionTimeout; timeval receptionTimeout;
ReturnValue_t handleSuccessfullTcRead(size_t bytesRead); ReturnValue_t handleSuccessfullTcRead(size_t bytesRead);
void handleReadError();
}; };
#endif /* FRAMEWORK_OSAL_LINUX_TCSOCKETPOLLINGTASK_H_ */ #endif /* FRAMEWORK_OSAL_LINUX_TCSOCKETPOLLINGTASK_H_ */

View File

@ -1,206 +1,157 @@
#include "TmTcUnixUdpBridge.h" #include "TmTcUnixUdpBridge.h"
#include "tcpipHelpers.h"
#include "../../serviceinterface/ServiceInterface.h" #include "../../serviceinterface/ServiceInterface.h"
#include "../../ipc/MutexHelper.h" #include "../../ipc/MutexGuard.h"
#include <errno.h>
#include <arpa/inet.h> #include <arpa/inet.h>
#include <unistd.h>
#include <netdb.h>
#include <cstring>
TmTcUnixUdpBridge::TmTcUnixUdpBridge(object_id_t objectId, //! Debugging preprocessor define.
object_id_t tcDestination, object_id_t tmStoreId, object_id_t tcStoreId, #define FSFW_UDP_SEND_WIRETAPPING_ENABLED 0
uint16_t serverPort, uint16_t clientPort):
const std::string TmTcUnixUdpBridge::DEFAULT_UDP_SERVER_PORT = tcpip::DEFAULT_UDP_SERVER_PORT;
TmTcUnixUdpBridge::TmTcUnixUdpBridge(object_id_t objectId, object_id_t tcDestination,
object_id_t tmStoreId, object_id_t tcStoreId, std::string udpServerPort):
TmTcBridge(objectId, tcDestination, tmStoreId, tcStoreId) { TmTcBridge(objectId, tcDestination, tmStoreId, tcStoreId) {
mutex = MutexFactory::instance()->createMutex(); if(udpServerPort == "") {
this->udpServerPort = DEFAULT_UDP_SERVER_PORT;
}
else {
this->udpServerPort = udpServerPort;
}
uint16_t setServerPort = DEFAULT_UDP_SERVER_PORT; mutex = MutexFactory::instance()->createMutex();
if(serverPort != 0xFFFF) { communicationLinkUp = false;
setServerPort = serverPort; }
}
uint16_t setClientPort = DEFAULT_UDP_CLIENT_PORT; ReturnValue_t TmTcUnixUdpBridge::initialize() {
if(clientPort != 0xFFFF) { using namespace tcpip;
setClientPort = clientPort;
}
// Set up UDP socket: https://man7.org/linux/man-pages/man7/ip.7.html ReturnValue_t result = TmTcBridge::initialize();
//clientSocket = socket(AF_INET, SOCK_DGRAM, 0); if(result != HasReturnvaluesIF::RETURN_OK) {
serverSocket = socket(AF_INET, SOCK_DGRAM, IPPROTO_UDP);
if(serverSocket < 0) {
#if FSFW_CPP_OSTREAM_ENABLED == 1 #if FSFW_CPP_OSTREAM_ENABLED == 1
sif::error << "TmTcUnixUdpBridge::TmTcUnixUdpBridge: Could not open" sif::error << "TmTcUnixUdpBridge::initialize: TmTcBridge initialization failed!"
" UDP socket!" << std::endl; << std::endl;
#endif #endif
handleSocketError(); return result;
return; }
}
serverAddress.sin_family = AF_INET; struct addrinfo *addrResult = nullptr;
struct addrinfo hints;
// Accept packets from any interface. std::memset(&hints, 0, sizeof(hints));
//serverAddress.sin_addr.s_addr = inet_addr("127.73.73.0"); hints.ai_family = AF_INET;
serverAddress.sin_addr.s_addr = htonl(INADDR_ANY); hints.ai_socktype = SOCK_DGRAM;
serverAddress.sin_port = htons(setServerPort); hints.ai_protocol = IPPROTO_UDP;
serverAddressLen = sizeof(serverAddress); hints.ai_flags = AI_PASSIVE;
setsockopt(serverSocket, SOL_SOCKET, SO_REUSEADDR, &serverSocketOptions,
sizeof(serverSocketOptions));
clientAddress.sin_family = AF_INET; /* Set up UDP socket:
clientAddress.sin_addr.s_addr = htonl(INADDR_ANY); https://man7.org/linux/man-pages/man3/getaddrinfo.3.html
clientAddress.sin_port = htons(setClientPort); Passing nullptr as the first parameter and specifying AI_PASSIVE in hints will cause
clientAddressLen = sizeof(clientAddress); getaddrinfo to assign the address 0.0.0.0 (any address) */
int retval = getaddrinfo(nullptr, udpServerPort.c_str(), &hints, &addrResult);
int result = bind(serverSocket, if (retval != 0) {
reinterpret_cast<struct sockaddr*>(&serverAddress),
serverAddressLen);
if(result == -1) {
#if FSFW_CPP_OSTREAM_ENABLED == 1 #if FSFW_CPP_OSTREAM_ENABLED == 1
sif::error << "TmTcUnixUdpBridge::TmTcUnixUdpBridge: Could not bind " sif::warning << "TmTcWinUdpBridge::TmTcWinUdpBridge: Retrieving address info failed!" <<
"local port " << setServerPort << " to server socket!" std::endl;
<< std::endl;
#endif #endif
handleBindError(); return HasReturnvaluesIF::RETURN_FAILED;
return; }
}
/* Set up UDP socket: https://man7.org/linux/man-pages/man7/ip.7.html */
serverSocket = socket(addrResult->ai_family, addrResult->ai_socktype, addrResult->ai_protocol);
if(serverSocket < 0) {
#if FSFW_CPP_OSTREAM_ENABLED == 1
sif::error << "TmTcUnixUdpBridge::TmTcUnixUdpBridge: Could not open UDP socket!" <<
std::endl;
#else
sif::printError("TmTcUnixUdpBridge::TmTcUnixUdpBridge: Could not open UDP socket!\n");
#endif /* FSFW_CPP_OSTREAM_ENABLED == 1 */
freeaddrinfo(addrResult);
handleError(Protocol::UDP, ErrorSources::SOCKET_CALL);
return HasReturnvaluesIF::RETURN_FAILED;
}
retval = bind(serverSocket, addrResult->ai_addr, static_cast<int>(addrResult->ai_addrlen));
if(retval != 0) {
#if FSFW_CPP_OSTREAM_ENABLED == 1
sif::warning << "TmTcWinUdpBridge::TmTcWinUdpBridge: Could not bind "
"local port (" << udpServerPort << ") to server socket!" << std::endl;
#endif
freeaddrinfo(addrResult);
handleError(Protocol::UDP, ErrorSources::BIND_CALL);
return HasReturnvaluesIF::RETURN_FAILED;
}
return HasReturnvaluesIF::RETURN_OK;
} }
TmTcUnixUdpBridge::~TmTcUnixUdpBridge() { TmTcUnixUdpBridge::~TmTcUnixUdpBridge() {
if(mutex != nullptr) {
MutexFactory::instance()->deleteMutex(mutex);
}
close(serverSocket);
} }
ReturnValue_t TmTcUnixUdpBridge::sendTm(const uint8_t *data, size_t dataLen) { ReturnValue_t TmTcUnixUdpBridge::sendTm(const uint8_t *data, size_t dataLen) {
int flags = 0; int flags = 0;
MutexHelper lock(mutex, MutexIF::TimeoutType::WAITING, 10); /* The target address can be set by different threads so this lock ensures thread-safety */
MutexGuard lock(mutex, timeoutType, mutexTimeoutMs);
if(ipAddrAnySet){ #if FSFW_CPP_OSTREAM_ENABLED == 1 && FSFW_UDP_SEND_WIRETAPPING_ENABLED == 1
clientAddress.sin_addr.s_addr = htons(INADDR_ANY); char ipAddress [15];
//clientAddress.sin_addr.s_addr = inet_addr("127.73.73.1"); sif::debug << "IP Address Sender: "<<
clientAddressLen = sizeof(serverAddress); inet_ntop(AF_INET,&clientAddress.sin_addr.s_addr, ipAddress, 15) << std::endl;
}
// char ipAddress [15];
#if FSFW_CPP_OSTREAM_ENABLED == 1
// sif::debug << "IP Address Sender: "<< inet_ntop(AF_INET,
// &clientAddress.sin_addr.s_addr, ipAddress, 15) << std::endl;
#endif #endif
ssize_t bytesSent = sendto(serverSocket, data, dataLen, flags, ssize_t bytesSent = sendto(
reinterpret_cast<sockaddr*>(&clientAddress), clientAddressLen); serverSocket,
if(bytesSent < 0) { data,
dataLen,
flags,
reinterpret_cast<sockaddr*>(&clientAddress),
clientAddressLen
);
if(bytesSent < 0) {
#if FSFW_CPP_OSTREAM_ENABLED == 1 #if FSFW_CPP_OSTREAM_ENABLED == 1
sif::error << "TmTcUnixUdpBridge::sendTm: Send operation failed." sif::warning << "TmTcUnixUdpBridge::sendTm: Send operation failed." << std::endl;
<< std::endl;
#endif #endif
handleSendError(); tcpip::handleError(tcpip::Protocol::UDP, tcpip::ErrorSources::SENDTO_CALL);
} }
#if FSFW_CPP_OSTREAM_ENABLED == 1
// sif::debug << "TmTcUnixUdpBridge::sendTm: " << bytesSent << " bytes were" #if FSFW_CPP_OSTREAM_ENABLED == 1 && FSFW_UDP_SEND_WIRETAPPING_ENABLED == 1
// " sent." << std::endl; sif::debug << "TmTcUnixUdpBridge::sendTm: " << bytesSent << " bytes were"
" sent." << std::endl;
#endif #endif
return HasReturnvaluesIF::RETURN_OK;
return HasReturnvaluesIF::RETURN_OK;
} }
void TmTcUnixUdpBridge::checkAndSetClientAddress(sockaddr_in& newAddress) { void TmTcUnixUdpBridge::checkAndSetClientAddress(sockaddr_in& newAddress) {
MutexHelper lock(mutex, MutexIF::TimeoutType::WAITING, 10); /* The target address can be set by different threads so this lock ensures thread-safety */
MutexGuard lock(mutex, timeoutType, mutexTimeoutMs);
// char ipAddress [15]; #if FSFW_CPP_OSTREAM_ENABLED == 1 && FSFW_UDP_RCV_WIRETAPPING_ENABLED == 1
#if FSFW_CPP_OSTREAM_ENABLED == 1 char ipAddress [15];
// sif::debug << "IP Address Sender: "<< inet_ntop(AF_INET, sif::debug << "IP Address Sender: "<< inet_ntop(AF_INET,
// &newAddress.sin_addr.s_addr, ipAddress, 15) << std::endl; &newAddress.sin_addr.s_addr, ipAddress, 15) << std::endl;
// sif::debug << "IP Address Old: " << inet_ntop(AF_INET, sif::debug << "IP Address Old: " << inet_ntop(AF_INET,
// &clientAddress.sin_addr.s_addr, ipAddress, 15) << std::endl; &clientAddress.sin_addr.s_addr, ipAddress, 15) << std::endl;
#endif #endif
registerCommConnect();
// Set new IP address if it has changed. /* Set new IP address to reply to. */
if(clientAddress.sin_addr.s_addr != newAddress.sin_addr.s_addr) { clientAddress = newAddress;
clientAddress.sin_addr.s_addr = newAddress.sin_addr.s_addr; clientAddressLen = sizeof(clientAddress);
clientAddressLen = sizeof(clientAddress);
}
} }
void TmTcUnixUdpBridge::setMutexProperties(MutexIF::TimeoutType timeoutType,
void TmTcUnixUdpBridge::handleSocketError() { dur_millis_t timeoutMs) {
// See: https://man7.org/linux/man-pages/man2/socket.2.html this->timeoutType = timeoutType;
switch(errno) { this->mutexTimeoutMs = timeoutMs;
case(EACCES):
case(EINVAL):
case(EMFILE):
case(ENFILE):
case(EAFNOSUPPORT):
case(ENOBUFS):
case(ENOMEM):
case(EPROTONOSUPPORT):
#if FSFW_CPP_OSTREAM_ENABLED == 1
sif::error << "TmTcUnixBridge::handleSocketError: Socket creation failed"
<< " with " << strerror(errno) << std::endl;
#endif
break;
default:
#if FSFW_CPP_OSTREAM_ENABLED == 1
sif::error << "TmTcUnixBridge::handleSocketError: Unknown error"
<< std::endl;
#endif
break;
}
} }
void TmTcUnixUdpBridge::handleBindError() {
// See: https://man7.org/linux/man-pages/man2/bind.2.html
switch(errno) {
case(EACCES): {
/*
Ephermeral ports can be shown with following command:
sysctl -A | grep ip_local_port_range
*/
#if FSFW_CPP_OSTREAM_ENABLED == 1
sif::error << "TmTcUnixBridge::handleBindError: Port access issue."
"Ports 1-1024 are reserved on UNIX systems and require root "
"rights while ephermeral ports should not be used as well."
<< std::endl;
#endif
}
break;
case(EADDRINUSE):
case(EBADF):
case(EINVAL):
case(ENOTSOCK):
case(EADDRNOTAVAIL):
case(EFAULT):
case(ELOOP):
case(ENAMETOOLONG):
case(ENOENT):
case(ENOMEM):
case(ENOTDIR):
case(EROFS): {
#if FSFW_CPP_OSTREAM_ENABLED == 1
sif::error << "TmTcUnixBridge::handleBindError: Socket creation failed"
<< " with " << strerror(errno) << std::endl;
#endif
break;
}
default:
#if FSFW_CPP_OSTREAM_ENABLED == 1
sif::error << "TmTcUnixBridge::handleBindError: Unknown error"
<< std::endl;
#endif
break;
}
}
void TmTcUnixUdpBridge::handleSendError() {
switch(errno) {
default: {
#if FSFW_CPP_OSTREAM_ENABLED == 1
sif::error << "TmTcUnixBridge::handleSendError: "
<< strerror(errno) << std::endl;
#else
sif::printError("TmTcUnixBridge::handleSendError: %s\n",
strerror(errno));
#endif
}
}
}
void TmTcUnixUdpBridge::setClientAddressToAny(bool ipAddrAnySet){
this->ipAddrAnySet = ipAddrAnySet;
}

View File

@ -7,45 +7,43 @@
#include <netinet/in.h> #include <netinet/in.h>
#include <netinet/udp.h> #include <netinet/udp.h>
class TmTcUnixUdpBridge: public TmTcBridge { class TmTcUnixUdpBridge:
friend class TcUnixUdpPollingTask; public TmTcBridge {
friend class TcUnixUdpPollingTask;
public: public:
// The ports chosen here should not be used by any other process.
// List of used ports on Linux: /etc/services
static constexpr uint16_t DEFAULT_UDP_SERVER_PORT = 7301;
static constexpr uint16_t DEFAULT_UDP_CLIENT_PORT = 7302;
TmTcUnixUdpBridge(object_id_t objectId, object_id_t tcDestination, /* The ports chosen here should not be used by any other process.
object_id_t tmStoreId, object_id_t tcStoreId, List of used ports on Linux: /etc/services */
uint16_t serverPort = 0xFFFF,uint16_t clientPort = 0xFFFF); static const std::string DEFAULT_UDP_SERVER_PORT;
virtual~ TmTcUnixUdpBridge();
void checkAndSetClientAddress(sockaddr_in& clientAddress); TmTcUnixUdpBridge(object_id_t objectId, object_id_t tcDestination,
object_id_t tmStoreId, object_id_t tcStoreId,
std::string serverPort = "");
virtual~ TmTcUnixUdpBridge();
/**
* Set properties of internal mutex.
*/
void setMutexProperties(MutexIF::TimeoutType timeoutType, dur_millis_t timeoutMs);
ReturnValue_t initialize() override;
void checkAndSetClientAddress(sockaddr_in& clientAddress);
void setClientAddressToAny(bool ipAddrAnySet);
protected: protected:
virtual ReturnValue_t sendTm(const uint8_t * data, size_t dataLen) override; virtual ReturnValue_t sendTm(const uint8_t * data, size_t dataLen) override;
private: private:
int serverSocket = 0; int serverSocket = 0;
std::string udpServerPort;
const int serverSocketOptions = 0; struct sockaddr_in clientAddress;
socklen_t clientAddressLen = 0;
struct sockaddr_in clientAddress; //! Access to the client address is mutex protected as it is set by another task.
socklen_t clientAddressLen = 0; MutexIF::TimeoutType timeoutType = MutexIF::TimeoutType::WAITING;
dur_millis_t mutexTimeoutMs = 20;
struct sockaddr_in serverAddress; MutexIF* mutex;
socklen_t serverAddressLen = 0;
bool ipAddrAnySet = false;
//! Access to the client address is mutex protected as it is set
//! by another task.
MutexIF* mutex;
void handleSocketError();
void handleBindError();
void handleSendError();
}; };
#endif /* FRAMEWORK_OSAL_LINUX_TMTCUNIXUDPBRIDGE_H_ */ #endif /* FRAMEWORK_OSAL_LINUX_TMTCUNIXUDPBRIDGE_H_ */

108
osal/linux/tcpipHelpers.cpp Normal file
View File

@ -0,0 +1,108 @@
#include "tcpipHelpers.h"
#include "../../tasks/TaskFactory.h"
#include <errno.h>
#include <string>
void tcpip::handleError(Protocol protocol, ErrorSources errorSrc, dur_millis_t sleepDuration) {
int errCode = errno;
std::string protocolString;
std::string errorSrcString;
determineErrorStrings(protocol, errorSrc, protocolString, errorSrcString);
std::string infoString;
switch(errCode) {
case(EACCES): {
infoString = "EACCES";
break;
}
case(EINVAL): {
infoString = "EINVAL";
break;
}
case(EAGAIN): {
infoString = "EAGAIN";
break;
}
case(EMFILE): {
infoString = "EMFILE";
break;
}
case(ENFILE): {
infoString = "ENFILE";
break;
}
case(EAFNOSUPPORT): {
infoString = "EAFNOSUPPORT";
break;
}
case(ENOBUFS): {
infoString = "ENOBUFS";
break;
}
case(ENOMEM): {
infoString = "ENOMEM";
break;
}
case(EPROTONOSUPPORT): {
infoString = "EPROTONOSUPPORT";
break;
}
case(EADDRINUSE): {
infoString = "EADDRINUSE";
break;
}
case(EBADF): {
infoString = "EBADF";
break;
}
case(ENOTSOCK): {
infoString = "ENOTSOCK";
break;
}
case(EADDRNOTAVAIL): {
infoString = "EADDRNOTAVAIL";
break;
}
case(EFAULT): {
infoString = "EFAULT";
break;
}
case(ELOOP): {
infoString = "ELOOP";
break;
}
case(ENAMETOOLONG): {
infoString = "ENAMETOOLONG";
break;
}
case(ENOENT): {
infoString = "ENOENT";
break;
}
case(ENOTDIR): {
infoString = "ENOTDIR";
break;
}
case(EROFS): {
infoString = "EROFS";
break;
}
default: {
infoString = "Error code: " + std::to_string(errCode);
}
}
#if FSFW_CPP_OSTREAM_ENABLED == 1
sif::warning << "tcpip::handleError: " << protocolString << " | " << errorSrcString <<
" | " << infoString << std::endl;
#else
sif::printWarning("tcpip::handleError: %s | %s | %s\n", protocolString,
errorSrcString, infoString);
#endif /* FSFW_CPP_OSTREAM_ENABLED == 1 */
if(sleepDuration > 0) {
TaskFactory::instance()->delayTask(sleepDuration);
}
}

14
osal/linux/tcpipHelpers.h Normal file
View File

@ -0,0 +1,14 @@
#ifndef FSFW_OSAL_LINUX_TCPIPHELPERS_H_
#define FSFW_OSAL_LINUX_TCPIPHELPERS_H_
#include "../../timemanager/clockDefinitions.h"
#include "../common/tcpipCommon.h"
namespace tcpip {
void handleError(Protocol protocol, ErrorSources errorSrc, dur_millis_t sleepDuration = 0);
}
#endif /* FSFW_OSAL_LINUX_TCPIPHELPERS_H_ */

View File

@ -1,7 +1,7 @@
#include "RtemsBasic.h" #include "RtemsBasic.h"
#include "../../timemanager/Clock.h" #include "../../timemanager/Clock.h"
#include "../../ipc/MutexHelper.h" #include "../../ipc/MutexGuard.h"
#include <rtems/score/todimpl.h> #include <rtems/score/todimpl.h>
#include <rtems/rtems/clockimpl.h> #include <rtems/rtems/clockimpl.h>
@ -183,7 +183,7 @@ ReturnValue_t Clock::setLeapSeconds(const uint16_t leapSeconds_) {
if(checkOrCreateClockMutex()!=HasReturnvaluesIF::RETURN_OK){ if(checkOrCreateClockMutex()!=HasReturnvaluesIF::RETURN_OK){
return HasReturnvaluesIF::RETURN_FAILED; return HasReturnvaluesIF::RETURN_FAILED;
} }
MutexHelper helper(timeMutex); MutexGuard helper(timeMutex);
leapSeconds = leapSeconds_; leapSeconds = leapSeconds_;
@ -196,7 +196,7 @@ ReturnValue_t Clock::getLeapSeconds(uint16_t* leapSeconds_) {
if(timeMutex==nullptr){ if(timeMutex==nullptr){
return HasReturnvaluesIF::RETURN_FAILED; return HasReturnvaluesIF::RETURN_FAILED;
} }
MutexHelper helper(timeMutex); MutexGuard helper(timeMutex);
*leapSeconds_ = leapSeconds; *leapSeconds_ = leapSeconds;

View File

@ -1,11 +1,11 @@
target_sources(${LIB_FSFW_NAME} target_sources(${LIB_FSFW_NAME} PRIVATE
PRIVATE TcWinUdpPollingTask.cpp
TcWinUdpPollingTask.cpp TmTcWinUdpBridge.cpp
TmTcWinUdpBridge.cpp TcWinTcpServer.cpp
tcpipHelpers.cpp
) )
target_link_libraries(${LIB_FSFW_NAME} target_link_libraries(${LIB_FSFW_NAME} PRIVATE
PRIVATE wsock32
wsock32 ws2_32
ws2_32
) )

View File

@ -0,0 +1,131 @@
#include "TcWinTcpServer.h"
#include "tcpipHelpers.h"
#include "../../serviceinterface/ServiceInterface.h"
#include <winsock2.h>
#include <ws2tcpip.h>
const std::string TcWinTcpServer::DEFAULT_TCP_SERVER_PORT = "7301";
const std::string TcWinTcpServer::DEFAULT_TCP_CLIENT_PORT = "7302";
TcWinTcpServer::TcWinTcpServer(object_id_t objectId, object_id_t tmtcUnixUdpBridge,
std::string customTcpServerPort):
SystemObject(objectId), tcpPort(customTcpServerPort) {
if(tcpPort == "") {
tcpPort = DEFAULT_TCP_SERVER_PORT;
}
}
ReturnValue_t TcWinTcpServer::initialize() {
using namespace tcpip;
int retval = 0;
struct addrinfo *addrResult = nullptr;
struct addrinfo hints;
/* Initiates Winsock DLL. */
WSAData wsaData;
WORD wVersionRequested = MAKEWORD(2, 2);
int err = WSAStartup(wVersionRequested, &wsaData);
if (err != 0) {
/* Tell the user that we could not find a usable Winsock DLL. */
#if FSFW_CPP_OSTREAM_ENABLED == 1
sif::error << "TmTcWinUdpBridge::TmTcWinUdpBridge: WSAStartup failed with error: " <<
err << std::endl;
#endif
return HasReturnvaluesIF::RETURN_FAILED;
}
ZeroMemory(&hints, sizeof (hints));
hints.ai_family = AF_INET;
hints.ai_socktype = SOCK_STREAM;
hints.ai_protocol = IPPROTO_TCP;
hints.ai_flags = AI_PASSIVE;
retval = getaddrinfo(nullptr, tcpPort.c_str(), &hints, &addrResult);
if (retval != 0) {
#if FSFW_CPP_OSTREAM_ENABLED == 1
sif::warning << "TcWinTcpServer::TcWinTcpServer: Retrieving address info failed!" <<
std::endl;
#endif
handleError(Protocol::TCP, ErrorSources::GETADDRINFO_CALL);
return HasReturnvaluesIF::RETURN_FAILED;
}
/* Open TCP (stream) socket */
listenerTcpSocket = socket(addrResult->ai_family, addrResult->ai_socktype,
addrResult->ai_protocol);
if(listenerTcpSocket == INVALID_SOCKET) {
#if FSFW_CPP_OSTREAM_ENABLED == 1
sif::warning << "TcWinTcpServer::TcWinTcpServer: Socket creation failed!" << std::endl;
#endif
freeaddrinfo(addrResult);
handleError(Protocol::TCP, ErrorSources::SOCKET_CALL);
return HasReturnvaluesIF::RETURN_FAILED;
}
retval = bind(listenerTcpSocket, addrResult->ai_addr, static_cast<int>(addrResult->ai_addrlen));
if(retval == SOCKET_ERROR) {
#if FSFW_CPP_OSTREAM_ENABLED == 1
sif::warning << "TcWinTcpServer::TcWinTcpServer: Binding socket failed!" <<
std::endl;
#endif
freeaddrinfo(addrResult);
handleError(Protocol::TCP, ErrorSources::BIND_CALL);
}
freeaddrinfo(addrResult);
return HasReturnvaluesIF::RETURN_OK;
}
TcWinTcpServer::~TcWinTcpServer() {
closesocket(listenerTcpSocket);
WSACleanup();
}
ReturnValue_t TcWinTcpServer::performOperation(uint8_t opCode) {
using namespace tcpip;
/* If a connection is accepted, the corresponding socket will be assigned to the new socket */
SOCKET clientSocket;
sockaddr_in clientSockAddr;
int connectorSockAddrLen = 0;
int retval = 0;
/* Listen for connection requests permanently for lifetime of program */
while(true) {
retval = listen(listenerTcpSocket, currentBacklog);
if(retval == SOCKET_ERROR) {
handleError(Protocol::TCP, ErrorSources::LISTEN_CALL, 500);
continue;
}
clientSocket = accept(listenerTcpSocket, reinterpret_cast<sockaddr*>(&clientSockAddr),
&connectorSockAddrLen);
if(clientSocket == INVALID_SOCKET) {
handleError(Protocol::TCP, ErrorSources::ACCEPT_CALL, 500);
continue;
};
retval = recv(clientSocket, reinterpret_cast<char*>(receptionBuffer.data()),
receptionBuffer.size(), 0);
if(retval > 0) {
#if FSFW_TCP_RCV_WIRETAPPING_ENABLED == 1
sif::info << "TcWinTcpServer::performOperation: Received " << retval << " bytes."
std::endl;
#endif
handleError(Protocol::TCP, ErrorSources::RECV_CALL, 500);
}
else if(retval == 0) {
}
else {
}
/* Done, shut down connection */
retval = shutdown(clientSocket, SD_SEND);
}
return HasReturnvaluesIF::RETURN_OK;
}

View File

@ -0,0 +1,47 @@
#ifndef FSFW_OSAL_WINDOWS_TCWINTCPSERVER_H_
#define FSFW_OSAL_WINDOWS_TCWINTCPSERVER_H_
#include "../../objectmanager/SystemObject.h"
#include "../../tasks/ExecutableObjectIF.h"
#include <string>
#include <vector>
//! Debugging preprocessor define.
#define FSFW_TCP_RCV_WIRETAPPING_ENABLED 0
/**
* @brief Windows TCP server used to receive telecommands on a Windows Host
* @details
* Based on: https://docs.microsoft.com/en-us/windows/win32/winsock/complete-server-code
*/
class TcWinTcpServer:
public SystemObject,
public ExecutableObjectIF {
public:
/* The ports chosen here should not be used by any other process. */
static const std::string DEFAULT_TCP_SERVER_PORT;
static const std::string DEFAULT_TCP_CLIENT_PORT;
TcWinTcpServer(object_id_t objectId, object_id_t tmtcUnixUdpBridge,
std::string customTcpServerPort = "");
virtual~ TcWinTcpServer();
ReturnValue_t initialize() override;
ReturnValue_t performOperation(uint8_t opCode) override;
private:
std::string tcpPort;
SOCKET listenerTcpSocket = 0;
struct sockaddr_in tcpAddress;
int tcpAddrLen = sizeof(tcpAddress);
int currentBacklog = 3;
std::vector<uint8_t> receptionBuffer;
int tcpSockOpt = 0;
};
#endif /* FSFW_OSAL_WINDOWS_TCWINTCPSERVER_H_ */

View File

@ -1,9 +1,12 @@
#include "TcWinUdpPollingTask.h" #include "TcWinUdpPollingTask.h"
#include "tcpipHelpers.h"
#include "../../globalfunctions/arrayprinter.h" #include "../../globalfunctions/arrayprinter.h"
#include "../../serviceinterface/ServiceInterfaceStream.h" #include "../../serviceinterface/ServiceInterfaceStream.h"
#include <winsock2.h> #include <winsock2.h>
#include <windows.h>
//! Debugging preprocessor define.
#define FSFW_UDP_RCV_WIRETAPPING_ENABLED 0
TcWinUdpPollingTask::TcWinUdpPollingTask(object_id_t objectId, TcWinUdpPollingTask::TcWinUdpPollingTask(object_id_t objectId,
object_id_t tmtcUnixUdpBridge, size_t frameSize, object_id_t tmtcUnixUdpBridge, size_t frameSize,
@ -16,8 +19,8 @@ TcWinUdpPollingTask::TcWinUdpPollingTask(object_id_t objectId,
this->frameSize = DEFAULT_MAX_FRAME_SIZE; this->frameSize = DEFAULT_MAX_FRAME_SIZE;
} }
// Set up reception buffer with specified frame size. /* Set up reception buffer with specified frame size.
// For now, it is assumed that only one frame is held in the buffer! For now, it is assumed that only one frame is held in the buffer! */
receptionBuffer.reserve(this->frameSize); receptionBuffer.reserve(this->frameSize);
receptionBuffer.resize(this->frameSize); receptionBuffer.resize(this->frameSize);
@ -32,34 +35,37 @@ TcWinUdpPollingTask::TcWinUdpPollingTask(object_id_t objectId,
TcWinUdpPollingTask::~TcWinUdpPollingTask() {} TcWinUdpPollingTask::~TcWinUdpPollingTask() {}
ReturnValue_t TcWinUdpPollingTask::performOperation(uint8_t opCode) { ReturnValue_t TcWinUdpPollingTask::performOperation(uint8_t opCode) {
// Poll for new UDP datagrams in permanent loop. /* Sender Address is cached here. */
struct sockaddr_in senderAddress;
int senderAddressSize = sizeof(senderAddress);
/* Poll for new UDP datagrams in permanent loop. */
while(true) { while(true) {
//! Sender Address is cached here. int bytesReceived = recvfrom(
struct sockaddr_in senderAddress; serverUdpSocket,
int senderAddressSize = sizeof(senderAddress); reinterpret_cast<char*>(receptionBuffer.data()),
ssize_t bytesReceived = recvfrom(serverUdpSocket, frameSize,
reinterpret_cast<char*>(receptionBuffer.data()), frameSize, receptionFlags,
receptionFlags, reinterpret_cast<sockaddr*>(&senderAddress), reinterpret_cast<sockaddr*>(&senderAddress),
&senderAddressSize); &senderAddressSize
);
if(bytesReceived == SOCKET_ERROR) { if(bytesReceived == SOCKET_ERROR) {
// handle error /* Handle error */
#if FSFW_CPP_OSTREAM_ENABLED == 1 #if FSFW_CPP_OSTREAM_ENABLED == 1
sif::error << "TcWinUdpPollingTask::performOperation: Reception" sif::error << "TcWinUdpPollingTask::performOperation: Reception error." << std::endl;
" error." << std::endl;
#endif #endif
handleReadError(); tcpip::handleError(tcpip::Protocol::UDP, tcpip::ErrorSources::RECVFROM_CALL, 1000);
continue; continue;
} }
#if FSFW_CPP_OSTREAM_ENABLED == 1 #if FSFW_CPP_OSTREAM_ENABLED == 1 && FSFW_UDP_RCV_WIRETAPPING_ENABLED == 1
//sif::debug << "TcWinUdpPollingTask::performOperation: " << bytesReceived sif::debug << "TcWinUdpPollingTask::performOperation: " << bytesReceived <<
// << " bytes received" << std::endl; " bytes received" << std::endl;
#endif #endif
ReturnValue_t result = handleSuccessfullTcRead(bytesReceived); ReturnValue_t result = handleSuccessfullTcRead(bytesReceived);
if(result != HasReturnvaluesIF::RETURN_FAILED) { if(result != HasReturnvaluesIF::RETURN_FAILED) {
} }
tmtcBridge->registerCommConnect();
tmtcBridge->checkAndSetClientAddress(senderAddress); tmtcBridge->checkAndSetClientAddress(senderAddress);
} }
return HasReturnvaluesIF::RETURN_OK; return HasReturnvaluesIF::RETURN_OK;
@ -68,15 +74,20 @@ ReturnValue_t TcWinUdpPollingTask::performOperation(uint8_t opCode) {
ReturnValue_t TcWinUdpPollingTask::handleSuccessfullTcRead(size_t bytesRead) { ReturnValue_t TcWinUdpPollingTask::handleSuccessfullTcRead(size_t bytesRead) {
store_address_t storeId; store_address_t storeId;
ReturnValue_t result = tcStore->addData(&storeId,
receptionBuffer.data(), bytesRead); #if FSFW_UDP_RCV_WIRETAPPING_ENABLED == 1
// arrayprinter::print(receptionBuffer.data(), bytesRead); arrayprinter::print(receptionBuffer.data(), bytesRead);
if (result != HasReturnvaluesIF::RETURN_OK) {
#if FSFW_CPP_OSTREAM_ENABLED == 1
sif::error << "TcSerialPollingTask::transferPusToSoftwareBus: Data "
"storage failed" << std::endl;
sif::error << "Packet size: " << bytesRead << std::endl;
#endif #endif
ReturnValue_t result = tcStore->addData(&storeId, receptionBuffer.data(), bytesRead);
if (result != HasReturnvaluesIF::RETURN_OK) {
#if FSFW_VERBOSE_LEVEL >= 1
#if FSFW_CPP_OSTREAM_ENABLED == 1
sif::warning<< "TcWinUdpPollingTask::transferPusToSoftwareBus: Data storage failed." <<
std::endl;
sif::warning << "Packet size: " << bytesRead << std::endl;
#endif /* FSFW_CPP_OSTREAM_ENABLED == 1 */
#endif /* FSFW_VERBOSE_LEVEL >= 1 */
return HasReturnvaluesIF::RETURN_FAILED; return HasReturnvaluesIF::RETURN_FAILED;
} }
@ -84,10 +95,12 @@ ReturnValue_t TcWinUdpPollingTask::handleSuccessfullTcRead(size_t bytesRead) {
result = MessageQueueSenderIF::sendMessage(targetTcDestination, &message); result = MessageQueueSenderIF::sendMessage(targetTcDestination, &message);
if (result != HasReturnvaluesIF::RETURN_OK) { if (result != HasReturnvaluesIF::RETURN_OK) {
#if FSFW_VERBOSE_LEVEL >= 1
#if FSFW_CPP_OSTREAM_ENABLED == 1 #if FSFW_CPP_OSTREAM_ENABLED == 1
sif::error << "Serial Polling: Sending message to queue failed" sif::warning << "TcWinUdpPollingTask::handleSuccessfullTcRead: "
<< std::endl; " Sending message to queue failed" << std::endl;
#endif #endif /* FSFW_CPP_OSTREAM_ENABLED == 1 */
#endif /* FSFW_VERBOSE_LEVEL >= 1 */
tcStore->deleteData(storeId); tcStore->deleteData(storeId);
} }
return result; return result;
@ -97,8 +110,7 @@ ReturnValue_t TcWinUdpPollingTask::initialize() {
tcStore = objectManager->get<StorageManagerIF>(objects::TC_STORE); tcStore = objectManager->get<StorageManagerIF>(objects::TC_STORE);
if (tcStore == nullptr) { if (tcStore == nullptr) {
#if FSFW_CPP_OSTREAM_ENABLED == 1 #if FSFW_CPP_OSTREAM_ENABLED == 1
sif::error << "TcSerialPollingTask::initialize: TC Store uninitialized!" sif::error << "TcWinUdpPollingTask::initialize: TC store uninitialized!" << std::endl;
<< std::endl;
#endif #endif
return ObjectManagerIF::CHILD_INIT_FAILED; return ObjectManagerIF::CHILD_INIT_FAILED;
} }
@ -106,25 +118,21 @@ ReturnValue_t TcWinUdpPollingTask::initialize() {
tmtcBridge = objectManager->get<TmTcWinUdpBridge>(tmtcBridgeId); tmtcBridge = objectManager->get<TmTcWinUdpBridge>(tmtcBridgeId);
if(tmtcBridge == nullptr) { if(tmtcBridge == nullptr) {
#if FSFW_CPP_OSTREAM_ENABLED == 1 #if FSFW_CPP_OSTREAM_ENABLED == 1
sif::error << "TcSocketPollingTask::TcSocketPollingTask: Invalid" sif::error << "TcWinUdpPollingTask::initialize: Invalid TMTC bridge object!" <<
" TMTC bridge object!" << std::endl; std::endl;
#endif #endif
return ObjectManagerIF::CHILD_INIT_FAILED; return ObjectManagerIF::CHILD_INIT_FAILED;
} }
serverUdpSocket = tmtcBridge->serverSocket;
#if FSFW_CPP_OSTREAM_ENABLED == 1
//sif::info << "TcWinUdpPollingTask::initialize: Server UDP socket "
// << serverUdpSocket << std::endl;
#endif
return HasReturnvaluesIF::RETURN_OK; return HasReturnvaluesIF::RETURN_OK;
} }
ReturnValue_t TcWinUdpPollingTask::initializeAfterTaskCreation() { ReturnValue_t TcWinUdpPollingTask::initializeAfterTaskCreation() {
// Initialize the destination after task creation. This ensures /* Initialize the destination after task creation. This ensures
// that the destination has already been set in the TMTC bridge. that the destination has already been set in the TMTC bridge. */
targetTcDestination = tmtcBridge->getRequestQueue(); targetTcDestination = tmtcBridge->getRequestQueue();
/* The server socket is set up in the bridge intialization. Calling this function here
ensures that it is set up properly in any case*/
serverUdpSocket = tmtcBridge->serverSocket;
return HasReturnvaluesIF::RETURN_OK; return HasReturnvaluesIF::RETURN_OK;
} }
@ -139,39 +147,3 @@ void TcWinUdpPollingTask::setTimeout(double timeoutSeconds) {
#endif #endif
} }
} }
void TcWinUdpPollingTask::handleReadError() {
int error = WSAGetLastError();
switch(error) {
case(WSANOTINITIALISED): {
#if FSFW_CPP_OSTREAM_ENABLED == 1
sif::info << "TcWinUdpPollingTask::handleReadError: WSANOTINITIALISED: "
<< "WSAStartup(...) call " << "necessary" << std::endl;
#endif
break;
}
case(WSAEFAULT): {
#if FSFW_CPP_OSTREAM_ENABLED == 1
sif::info << "TcWinUdpPollingTask::handleReadError: WSADEFAULT: "
<< "Bad address " << std::endl;
#endif
break;
}
case(WSAEINVAL): {
#if FSFW_CPP_OSTREAM_ENABLED == 1
sif::info << "TcWinUdpPollingTask::handleReadError: WSAEINVAL: "
<< "Invalid input parameters. " << std::endl;
#endif
break;
}
default: {
#if FSFW_CPP_OSTREAM_ENABLED == 1
sif::info << "TcWinUdpPollingTask::handleReadError: Error code: "
<< error << std::endl;
#endif
break;
}
}
// to prevent spam.
Sleep(1000);
}

View File

@ -23,7 +23,7 @@ class TcWinUdpPollingTask: public SystemObject,
public: public:
static constexpr size_t DEFAULT_MAX_FRAME_SIZE = 2048; static constexpr size_t DEFAULT_MAX_FRAME_SIZE = 2048;
//! 0.5 default milliseconds timeout for now. //! 0.5 default milliseconds timeout for now.
static constexpr timeval DEFAULT_TIMEOUT = {.tv_sec = 0, .tv_usec = 500}; static constexpr timeval DEFAULT_TIMEOUT = {0, 500};
TcWinUdpPollingTask(object_id_t objectId, object_id_t tmtcUnixUdpBridge, TcWinUdpPollingTask(object_id_t objectId, object_id_t tmtcUnixUdpBridge,
size_t frameSize = 0, double timeoutSeconds = -1); size_t frameSize = 0, double timeoutSeconds = -1);
@ -48,11 +48,12 @@ private:
object_id_t tmtcBridgeId = objects::NO_OBJECT; object_id_t tmtcBridgeId = objects::NO_OBJECT;
TmTcWinUdpBridge* tmtcBridge = nullptr; TmTcWinUdpBridge* tmtcBridge = nullptr;
MessageQueueId_t targetTcDestination = MessageQueueIF::NO_QUEUE; MessageQueueId_t targetTcDestination = MessageQueueIF::NO_QUEUE;
//! Reception flags: https://linux.die.net/man/2/recvfrom.
//! See: https://docs.microsoft.com/en-us/windows/win32/api/winsock/nf-winsock-recvfrom
int receptionFlags = 0; int receptionFlags = 0;
//! Server socket, which is member of TMTC bridge and is assigned in //! Server socket, which is member of TMTC bridge.
//! constructor //! Will be cached shortly after SW intialization.
SOCKET serverUdpSocket = 0; SOCKET serverUdpSocket = 0;
std::vector<uint8_t> receptionBuffer; std::vector<uint8_t> receptionBuffer;
@ -61,7 +62,6 @@ private:
timeval receptionTimeout; timeval receptionTimeout;
ReturnValue_t handleSuccessfullTcRead(size_t bytesRead); ReturnValue_t handleSuccessfullTcRead(size_t bytesRead);
void handleReadError();
}; };
#endif /* FRAMEWORK_OSAL_LINUX_TCSOCKETPOLLINGTASK_H_ */ #endif /* FRAMEWORK_OSAL_LINUX_TCSOCKETPOLLINGTASK_H_ */

View File

@ -1,14 +1,40 @@
#include <fsfw/ipc/MutexHelper.h>
#include "TmTcWinUdpBridge.h" #include "TmTcWinUdpBridge.h"
#include "tcpipHelpers.h"
TmTcWinUdpBridge::TmTcWinUdpBridge(object_id_t objectId, #include <fsfw/serviceinterface/ServiceInterface.h>
object_id_t tcDestination, object_id_t tmStoreId, object_id_t tcStoreId, #include <fsfw/ipc/MutexGuard.h>
uint16_t serverPort, uint16_t clientPort): #include <ws2tcpip.h>
//! Debugging preprocessor define.
#define FSFW_UDP_SEND_WIRETAPPING_ENABLED 0
const std::string TmTcWinUdpBridge::DEFAULT_UDP_SERVER_PORT = tcpip::DEFAULT_UDP_SERVER_PORT;
TmTcWinUdpBridge::TmTcWinUdpBridge(object_id_t objectId, object_id_t tcDestination,
object_id_t tmStoreId, object_id_t tcStoreId, std::string udpServerPort):
TmTcBridge(objectId, tcDestination, tmStoreId, tcStoreId) { TmTcBridge(objectId, tcDestination, tmStoreId, tcStoreId) {
if(udpServerPort == "") {
this->udpServerPort = DEFAULT_UDP_SERVER_PORT;
}
else {
this->udpServerPort = udpServerPort;
}
mutex = MutexFactory::instance()->createMutex(); mutex = MutexFactory::instance()->createMutex();
communicationLinkUp = false; communicationLinkUp = false;
}
// Initiates Winsock DLL. ReturnValue_t TmTcWinUdpBridge::initialize() {
ReturnValue_t result = TmTcBridge::initialize();
if(result != HasReturnvaluesIF::RETURN_OK) {
#if FSFW_CPP_OSTREAM_ENABLED == 1
sif::error << "TmTcWinUdpBridge::initialize: TmTcBridge initialization failed!"
<< std::endl;
#endif
return result;
}
/* Initiates Winsock DLL. */
WSAData wsaData; WSAData wsaData;
WORD wVersionRequested = MAKEWORD(2, 2); WORD wVersionRequested = MAKEWORD(2, 2);
int err = WSAStartup(wVersionRequested, &wsaData); int err = WSAStartup(wVersionRequested, &wsaData);
@ -16,197 +42,125 @@ TmTcWinUdpBridge::TmTcWinUdpBridge(object_id_t objectId,
/* Tell the user that we could not find a usable */ /* Tell the user that we could not find a usable */
/* Winsock DLL. */ /* Winsock DLL. */
#if FSFW_CPP_OSTREAM_ENABLED == 1 #if FSFW_CPP_OSTREAM_ENABLED == 1
sif::error << "TmTcWinUdpBridge::TmTcWinUdpBridge:" sif::error << "TmTcWinUdpBridge::TmTcWinUdpBridge: WSAStartup failed with error: " <<
"WSAStartup failed with error: " << err << std::endl; err << std::endl;
#else
sif::printError("TmTcWinUdpBridge::TmTcWinUdpBridge: WSAStartup failed with error: %d\n",
err);
#endif #endif
return; return HasReturnvaluesIF::RETURN_FAILED;
} }
uint16_t setServerPort = DEFAULT_UDP_SERVER_PORT; struct addrinfo *addrResult = nullptr;
if(serverPort != 0xFFFF) { struct addrinfo hints;
setServerPort = serverPort;
ZeroMemory(&hints, sizeof (hints));
hints.ai_family = AF_INET;
hints.ai_socktype = SOCK_DGRAM;
hints.ai_protocol = IPPROTO_UDP;
/* See:
https://docs.microsoft.com/en-us/windows/win32/api/ws2tcpip/nf-ws2tcpip-getaddrinfo
for information about AI_PASSIVE. */
hints.ai_flags = AI_PASSIVE;
/* Set up UDP socket:
https://en.wikipedia.org/wiki/Getaddrinfo
Passing nullptr as the first parameter and specifying AI_PASSIVE in hints will cause
getaddrinfo to assign the address 0.0.0.0 (any address) */
int retval = getaddrinfo(nullptr, udpServerPort.c_str(), &hints, &addrResult);
if (retval != 0) {
#if FSFW_CPP_OSTREAM_ENABLED == 1
sif::warning << "TmTcWinUdpBridge::TmTcWinUdpBridge: Retrieving address info failed!" <<
std::endl;
#endif
return HasReturnvaluesIF::RETURN_FAILED;
} }
uint16_t setClientPort = DEFAULT_UDP_CLIENT_PORT; serverSocket = socket(addrResult->ai_family, addrResult->ai_socktype, addrResult->ai_protocol);
if(clientPort != 0xFFFF) {
setClientPort = clientPort;
}
// Set up UDP socket: https://man7.org/linux/man-pages/man7/ip.7.html
//clientSocket = socket(AF_INET, SOCK_DGRAM, 0);
serverSocket = socket(AF_INET, SOCK_DGRAM, IPPROTO_UDP);
if(serverSocket == INVALID_SOCKET) { if(serverSocket == INVALID_SOCKET) {
#if FSFW_CPP_OSTREAM_ENABLED == 1 #if FSFW_CPP_OSTREAM_ENABLED == 1
sif::error << "TmTcWinUdpBridge::TmTcWinUdpBridge: Could not open" sif::warning << "TmTcWinUdpBridge::TmTcWinUdpBridge: Could not open UDP socket!" <<
" UDP socket!" << std::endl; std::endl;
#endif #endif
handleSocketError(); freeaddrinfo(addrResult);
return; tcpip::handleError(tcpip::Protocol::UDP, tcpip::ErrorSources::SOCKET_CALL);
return HasReturnvaluesIF::RETURN_FAILED;
} }
serverAddress.sin_family = AF_INET; retval = bind(serverSocket, addrResult->ai_addr, static_cast<int>(addrResult->ai_addrlen));
if(retval != 0) {
// Accept packets from any interface. (potentially insecure).
serverAddress.sin_addr.s_addr = htonl(INADDR_ANY);
serverAddress.sin_port = htons(setServerPort);
serverAddressLen = sizeof(serverAddress);
setsockopt(serverSocket, SOL_SOCKET, SO_REUSEADDR,
reinterpret_cast<const char*>(&serverSocketOptions),
sizeof(serverSocketOptions));
clientAddress.sin_family = AF_INET;
clientAddress.sin_addr.s_addr = htonl(INADDR_ANY);
clientAddress.sin_port = htons(setClientPort);
clientAddressLen = sizeof(clientAddress);
int result = bind(serverSocket,
reinterpret_cast<struct sockaddr*>(&serverAddress),
serverAddressLen);
if(result != 0) {
#if FSFW_CPP_OSTREAM_ENABLED == 1 #if FSFW_CPP_OSTREAM_ENABLED == 1
sif::error << "TmTcWinUdpBridge::TmTcWinUdpBridge: Could not bind " sif::error << "TmTcWinUdpBridge::TmTcWinUdpBridge: Could not bind "
"local port " << setServerPort << " to server socket!" "local port (" << udpServerPort << ") to server socket!" << std::endl;
<< std::endl;
#endif #endif
handleBindError(); freeaddrinfo(addrResult);
tcpip::handleError(tcpip::Protocol::UDP, tcpip::ErrorSources::BIND_CALL);
} }
freeaddrinfo(addrResult);
return HasReturnvaluesIF::RETURN_OK;
} }
TmTcWinUdpBridge::~TmTcWinUdpBridge() { TmTcWinUdpBridge::~TmTcWinUdpBridge() {
if(mutex != nullptr) {
MutexFactory::instance()->deleteMutex(mutex);
}
closesocket(serverSocket);
WSACleanup(); WSACleanup();
} }
ReturnValue_t TmTcWinUdpBridge::sendTm(const uint8_t *data, size_t dataLen) { ReturnValue_t TmTcWinUdpBridge::sendTm(const uint8_t *data, size_t dataLen) {
int flags = 0; int flags = 0;
//clientAddress.sin_addr.s_addr = htons(INADDR_ANY); /* The target address can be set by different threads so this lock ensures thread-safety */
//clientAddressLen = sizeof(serverAddress); MutexGuard lock(mutex, timeoutType, mutexTimeoutMs);
// char ipAddress [15]; #if FSFW_CPP_OSTREAM_ENABLED == 1 && FSFW_UDP_SEND_WIRETAPPING_ENABLED == 1
#if FSFW_CPP_OSTREAM_ENABLED == 1 char ipAddress [15];
// sif::debug << "IP Address Sender: "<< inet_ntop(AF_INET, sif::debug << "IP Address Sender: "<< inet_ntop(AF_INET,
// &clientAddress.sin_addr.s_addr, ipAddress, 15) << std::endl; &clientAddress.sin_addr.s_addr, ipAddress, 15) << std::endl;
#endif #endif
ssize_t bytesSent = sendto(serverSocket, int bytesSent = sendto(
reinterpret_cast<const char*>(data), dataLen, flags, serverSocket,
reinterpret_cast<sockaddr*>(&clientAddress), clientAddressLen); reinterpret_cast<const char*>(data),
dataLen,
flags,
reinterpret_cast<sockaddr*>(&clientAddress),
clientAddressLen
);
if(bytesSent == SOCKET_ERROR) { if(bytesSent == SOCKET_ERROR) {
#if FSFW_CPP_OSTREAM_ENABLED == 1 #if FSFW_CPP_OSTREAM_ENABLED == 1
sif::error << "TmTcWinUdpBridge::sendTm: Send operation failed." sif::warning << "TmTcWinUdpBridge::sendTm: Send operation failed." << std::endl;
<< std::endl;
#endif #endif
handleSendError(); tcpip::handleError(tcpip::Protocol::UDP, tcpip::ErrorSources::SENDTO_CALL);
} }
#if FSFW_CPP_OSTREAM_ENABLED == 1 #if FSFW_CPP_OSTREAM_ENABLED == 1 && FSFW_UDP_SEND_WIRETAPPING_ENABLED == 1
// sif::debug << "TmTcUnixUdpBridge::sendTm: " << bytesSent << " bytes were" sif::debug << "TmTcUnixUdpBridge::sendTm: " << bytesSent << " bytes were"
// " sent." << std::endl; " sent." << std::endl;
#endif #endif
return HasReturnvaluesIF::RETURN_OK; return HasReturnvaluesIF::RETURN_OK;
} }
void TmTcWinUdpBridge::checkAndSetClientAddress(sockaddr_in newAddress) { void TmTcWinUdpBridge::checkAndSetClientAddress(sockaddr_in& newAddress) {
MutexHelper lock(mutex, MutexIF::TimeoutType::WAITING, 10); /* The target address can be set by different threads so this lock ensures thread-safety */
MutexGuard lock(mutex, timeoutType, mutexTimeoutMs);
// char ipAddress [15]; #if FSFW_CPP_OSTREAM_ENABLED == 1 && FSFW_UDP_SEND_WIRETAPPING_ENABLED == 1
#if FSFW_CPP_OSTREAM_ENABLED == 1 char ipAddress [15];
// sif::debug << "IP Address Sender: "<< inet_ntop(AF_INET, sif::debug << "IP Address Sender: "<< inet_ntop(AF_INET,
// &newAddress.sin_addr.s_addr, ipAddress, 15) << std::endl; &newAddress.sin_addr.s_addr, ipAddress, 15) << std::endl;
// sif::debug << "IP Address Old: " << inet_ntop(AF_INET, sif::debug << "IP Address Old: " << inet_ntop(AF_INET,
// &clientAddress.sin_addr.s_addr, ipAddress, 15) << std::endl; &clientAddress.sin_addr.s_addr, ipAddress, 15) << std::endl;
#endif #endif
registerCommConnect(); registerCommConnect();
// Set new IP address if it has changed. /* Set new IP address to reply to */
if(clientAddress.sin_addr.s_addr != newAddress.sin_addr.s_addr) { clientAddress = newAddress;
clientAddress.sin_addr.s_addr = newAddress.sin_addr.s_addr; clientAddressLen = sizeof(clientAddress);
clientAddressLen = sizeof(clientAddress);
}
} }
void TmTcWinUdpBridge::handleSocketError() { void TmTcWinUdpBridge::setMutexProperties(MutexIF::TimeoutType timeoutType,
int errCode = WSAGetLastError(); dur_millis_t timeoutMs) {
switch(errCode) { this->timeoutType = timeoutType;
case(WSANOTINITIALISED): { this->mutexTimeoutMs = timeoutMs;
#if FSFW_CPP_OSTREAM_ENABLED == 1
sif::info << "TmTcWinUdpBridge::handleSocketError: WSANOTINITIALISED: "
<< "WSAStartup(...) call necessary" << std::endl;
#endif
break;
}
default: {
/*
https://docs.microsoft.com/en-us/windows/win32/winsock/
windows-sockets-error-codes-2
*/
#if FSFW_CPP_OSTREAM_ENABLED == 1
sif::info << "TmTcWinUdpBridge::handleSocketError: Error code: "
<< errCode << std::endl;
#endif
break;
}
}
} }
void TmTcWinUdpBridge::handleBindError() {
int errCode = WSAGetLastError();
switch(errCode) {
case(WSANOTINITIALISED): {
#if FSFW_CPP_OSTREAM_ENABLED == 1
sif::info << "TmTcWinUdpBridge::handleBindError: WSANOTINITIALISED: "
<< "WSAStartup(...) call " << "necessary" << std::endl;
#endif
break;
}
case(WSAEADDRINUSE): {
#if FSFW_CPP_OSTREAM_ENABLED == 1
sif::warning << "TmTcWinUdpBridge::handleBindError: WSAEADDRINUSE: "
<< "Port is already in use!" << std::endl;
#endif
break;
}
default: {
/*
https://docs.microsoft.com/en-us/windows/win32/winsock/
windows-sockets-error-codes-2
*/
#if FSFW_CPP_OSTREAM_ENABLED == 1
sif::info << "TmTcWinUdpBridge::handleBindError: Error code: "
<< errCode << std::endl;
#endif
break;
}
}
}
void TmTcWinUdpBridge::handleSendError() {
int errCode = WSAGetLastError();
switch(errCode) {
case(WSANOTINITIALISED): {
#if FSFW_CPP_OSTREAM_ENABLED == 1
sif::info << "TmTcWinUdpBridge::handleSendError: WSANOTINITIALISED: "
<< "WSAStartup(...) call necessary" << std::endl;
#endif
break;
}
case(WSAEADDRNOTAVAIL): {
#if FSFW_CPP_OSTREAM_ENABLED == 1
sif::info << "TmTcWinUdpBridge::handleSendError: WSAEADDRNOTAVAIL: "
<< "Check target address. " << std::endl;
#endif
break;
}
default: {
/*
https://docs.microsoft.com/en-us/windows/win32/winsock/
windows-sockets-error-codes-2
*/
#if FSFW_CPP_OSTREAM_ENABLED == 1
sif::info << "TmTcWinUdpBridge::handleSendError: Error code: "
<< errCode << std::endl;
#endif
break;
}
}
}

View File

@ -3,47 +3,43 @@
#include "../../tmtcservices/TmTcBridge.h" #include "../../tmtcservices/TmTcBridge.h"
#include <string>
#include <winsock2.h> #include <winsock2.h>
#include <windows.h>
class TmTcWinUdpBridge: public TmTcBridge { class TmTcWinUdpBridge: public TmTcBridge {
friend class TcWinUdpPollingTask; friend class TcWinUdpPollingTask;
public: public:
// The ports chosen here should not be used by any other process. /* The ports chosen here should not be used by any other process. */
static constexpr uint16_t DEFAULT_UDP_SERVER_PORT = 7301; static const std::string DEFAULT_UDP_SERVER_PORT;
static constexpr uint16_t DEFAULT_UDP_CLIENT_PORT = 7302;
TmTcWinUdpBridge(object_id_t objectId, object_id_t tcDestination, TmTcWinUdpBridge(object_id_t objectId, object_id_t tcDestination,
object_id_t tmStoreId, object_id_t tcStoreId, object_id_t tmStoreId, object_id_t tcStoreId, std::string udpServerPort = "");
uint16_t serverPort = 0xFFFF,uint16_t clientPort = 0xFFFF);
virtual~ TmTcWinUdpBridge(); virtual~ TmTcWinUdpBridge();
void checkAndSetClientAddress(sockaddr_in clientAddress); /**
* Set properties of internal mutex.
*/
void setMutexProperties(MutexIF::TimeoutType timeoutType, dur_millis_t timeoutMs);
ReturnValue_t initialize() override;
void checkAndSetClientAddress(sockaddr_in& clientAddress);
protected: protected:
virtual ReturnValue_t sendTm(const uint8_t * data, size_t dataLen) override; virtual ReturnValue_t sendTm(const uint8_t * data, size_t dataLen) override;
private: private:
SOCKET serverSocket = 0; SOCKET serverSocket = 0;
std::string udpServerPort;
const int serverSocketOptions = 0;
struct sockaddr_in clientAddress; struct sockaddr_in clientAddress;
int clientAddressLen = 0; int clientAddressLen = 0;
struct sockaddr_in serverAddress; //! Access to the client address is mutex protected as it is set by another task.
int serverAddressLen = 0; MutexIF::TimeoutType timeoutType = MutexIF::TimeoutType::WAITING;
dur_millis_t mutexTimeoutMs = 20;
//! Access to the client address is mutex protected as it is set
//! by another task.
MutexIF* mutex; MutexIF* mutex;
void handleSocketError();
void handleBindError();
void handleSendError();
}; };
#endif /* FSFW_OSAL_HOST_TMTCWINUDPBRIDGE_H_ */ #endif /* FSFW_OSAL_HOST_TMTCWINUDPBRIDGE_H_ */

View File

@ -0,0 +1,63 @@
#include "tcpipHelpers.h"
#include <FSFWConfig.h>
#include "../../tasks/TaskFactory.h"
#include "../../serviceinterface/ServiceInterface.h"
#include <winsock2.h>
#include <string>
void tcpip::handleError(Protocol protocol, ErrorSources errorSrc, dur_millis_t sleepDuration) {
#if FSFW_VERBOSE_LEVEL >= 1
int errCode = WSAGetLastError();
std::string protocolString;
std::string errorSrcString;
determineErrorStrings(protocol, errorSrc, protocolString, errorSrcString);
std::string infoString;
switch(errCode) {
case(WSANOTINITIALISED): {
infoString = "WSANOTINITIALISED";
break;
}
case(WSAEADDRINUSE): {
infoString = "WSAEADDRINUSE";
break;
}
case(WSAEFAULT): {
infoString = "WSAEFAULT";
break;
}
case(WSAEADDRNOTAVAIL): {
infoString = "WSAEADDRNOTAVAIL";
break;
}
case(WSAEINVAL): {
infoString = "WSAEINVAL";
break;
}
default: {
/*
https://docs.microsoft.com/en-us/windows/win32/winsock/windows-sockets-error-codes-2
*/
infoString = "Error code: " + std::to_string(errCode);
break;
}
}
#if FSFW_CPP_OSTREAM_ENABLED == 1
sif::warning << "tcpip::handleError: " << protocolString << " | " << errorSrcString <<
" | " << infoString << std::endl;
#else
sif::printWarning("tcpip::handleError: %s | %s | %s\n", protocolString,
errorSrcString, infoString);
#endif /* FSFW_CPP_OSTREAM_ENABLED == 1 */
#endif /* FSFW_VERBOSE_LEVEL >= 1 */
if(sleepDuration > 0) {
TaskFactory::instance()->delayTask(sleepDuration);
}
}

View File

@ -0,0 +1,15 @@
#ifndef FSFW_OSAL_WINDOWS_TCPIPHELPERS_H_
#define FSFW_OSAL_WINDOWS_TCPIPHELPERS_H_
#include "../../timemanager/clockDefinitions.h"
#include "../common/tcpipCommon.h"
namespace tcpip {
void handleError(Protocol protocol, ErrorSources errorSrc, dur_millis_t sleepDuration = 0);
}
#endif /* FSFW_OSAL_WINDOWS_TCPIPHELPERS_H_ */

View File

@ -1,6 +1,7 @@
#include "PowerComponent.h" #include "PowerComponent.h"
#include "../serialize/SerializeAdapter.h" #include "../serialize/SerializeAdapter.h"
PowerComponent::PowerComponent(): switchId1(0xFF), switchId2(0xFF), PowerComponent::PowerComponent(): switchId1(0xFF), switchId2(0xFF),
doIHaveTwoSwitches(false) { doIHaveTwoSwitches(false) {
} }
@ -8,23 +9,23 @@ PowerComponent::PowerComponent(): switchId1(0xFF), switchId2(0xFF),
PowerComponent::PowerComponent(object_id_t setId, uint8_t moduleId, float min, PowerComponent::PowerComponent(object_id_t setId, uint8_t moduleId, float min,
float max, uint8_t switchId1, bool twoSwitches, uint8_t switchId2) : float max, uint8_t switchId1, bool twoSwitches, uint8_t switchId2) :
deviceObjectId(setId), switchId1(switchId1), switchId2(switchId2), deviceObjectId(setId), switchId1(switchId1), switchId2(switchId2),
doIHaveTwoSwitches(twoSwitches), min(min), max(max), doIHaveTwoSwitches(twoSwitches), minPower(min), maxPower(max),
moduleId(moduleId) { moduleId(moduleId) {
} }
ReturnValue_t PowerComponent::serialize(uint8_t** buffer, size_t* size, ReturnValue_t PowerComponent::serialize(uint8_t** buffer, size_t* size,
size_t maxSize, Endianness streamEndianness) const { size_t maxSize, Endianness streamEndianness) const {
ReturnValue_t result = SerializeAdapter::serialize(&min, buffer, ReturnValue_t result = SerializeAdapter::serialize(&minPower, buffer,
size, maxSize, streamEndianness); size, maxSize, streamEndianness);
if (result != HasReturnvaluesIF::RETURN_OK) { if (result != HasReturnvaluesIF::RETURN_OK) {
return result; return result;
} }
return SerializeAdapter::serialize(&max, buffer, size, maxSize, return SerializeAdapter::serialize(&maxPower, buffer, size, maxSize,
streamEndianness); streamEndianness);
} }
size_t PowerComponent::getSerializedSize() const { size_t PowerComponent::getSerializedSize() const {
return sizeof(min) + sizeof(max); return sizeof(minPower) + sizeof(maxPower);
} }
object_id_t PowerComponent::getDeviceObjectId() { object_id_t PowerComponent::getDeviceObjectId() {
@ -44,21 +45,21 @@ bool PowerComponent::hasTwoSwitches() {
} }
float PowerComponent::getMin() { float PowerComponent::getMin() {
return min; return minPower;
} }
float PowerComponent::getMax() { float PowerComponent::getMax() {
return max; return maxPower;
} }
ReturnValue_t PowerComponent::deSerialize(const uint8_t** buffer, size_t* size, ReturnValue_t PowerComponent::deSerialize(const uint8_t** buffer, size_t* size,
Endianness streamEndianness) { Endianness streamEndianness) {
ReturnValue_t result = SerializeAdapter::deSerialize(&min, buffer, ReturnValue_t result = SerializeAdapter::deSerialize(&minPower, buffer,
size, streamEndianness); size, streamEndianness);
if (result != HasReturnvaluesIF::RETURN_OK) { if (result != HasReturnvaluesIF::RETURN_OK) {
return result; return result;
} }
return SerializeAdapter::deSerialize(&max, buffer, size, streamEndianness); return SerializeAdapter::deSerialize(&maxPower, buffer, size, streamEndianness);
} }
ReturnValue_t PowerComponent::getParameter(uint8_t domainId, uint8_t uniqueId, ReturnValue_t PowerComponent::getParameter(uint8_t domainId, uint8_t uniqueId,
@ -69,10 +70,10 @@ ReturnValue_t PowerComponent::getParameter(uint8_t domainId, uint8_t uniqueId,
} }
switch (uniqueId) { switch (uniqueId) {
case 0: case 0:
parameterWrapper->set<>(min); parameterWrapper->set<>(minPower);
break; break;
case 1: case 1:
parameterWrapper->set<>(max); parameterWrapper->set<>(maxPower);
break; break;
default: default:
return INVALID_IDENTIFIER_ID; return INVALID_IDENTIFIER_ID;

View File

@ -9,7 +9,7 @@
class PowerComponent: public PowerComponentIF { class PowerComponent: public PowerComponentIF {
public: public:
PowerComponent(object_id_t setId, uint8_t moduleId, float min, float max, PowerComponent(object_id_t setId, uint8_t moduleId, float minPower, float maxPower,
uint8_t switchId1, bool twoSwitches = false, uint8_t switchId1, bool twoSwitches = false,
uint8_t switchId2 = 0xFF); uint8_t switchId2 = 0xFF);
@ -41,8 +41,8 @@ private:
const bool doIHaveTwoSwitches; const bool doIHaveTwoSwitches;
float min = 0.0; float minPower = 0.0;
float max = 0.0; float maxPower = 0.0;
uint8_t moduleId = 0; uint8_t moduleId = 0;

View File

@ -159,7 +159,7 @@ ReturnValue_t Service3Housekeeping::prepareCollectionIntervalModificationCommand
CommandMessage *command, object_id_t objectId, bool isDiagnostics, CommandMessage *command, object_id_t objectId, bool isDiagnostics,
const uint8_t *tcData, size_t tcDataLen) { const uint8_t *tcData, size_t tcDataLen) {
if(tcDataLen < sizeof(sid_t) + sizeof(float)) { if(tcDataLen < sizeof(sid_t) + sizeof(float)) {
// SID plus the size of the new collection intervL. /* SID plus the size of the new collection interval. */
return CommandingServiceBase::INVALID_TC; return CommandingServiceBase::INVALID_TC;
} }

View File

@ -15,7 +15,7 @@ PoolManager::~PoolManager(void) {
ReturnValue_t PoolManager::reserveSpace(const size_t size, ReturnValue_t PoolManager::reserveSpace(const size_t size,
store_address_t* address, bool ignoreFault) { store_address_t* address, bool ignoreFault) {
MutexHelper mutexHelper(mutex, MutexIF::TimeoutType::WAITING, MutexGuard mutexHelper(mutex, MutexIF::TimeoutType::WAITING,
mutexTimeoutMs); mutexTimeoutMs);
ReturnValue_t status = LocalPool::reserveSpace(size, ReturnValue_t status = LocalPool::reserveSpace(size,
address,ignoreFault); address,ignoreFault);
@ -32,7 +32,7 @@ ReturnValue_t PoolManager::deleteData(
". id is "<< storeId.packetIndex << std::endl; ". id is "<< storeId.packetIndex << std::endl;
#endif #endif
#endif #endif
MutexHelper mutexHelper(mutex, MutexIF::TimeoutType::WAITING, MutexGuard mutexHelper(mutex, MutexIF::TimeoutType::WAITING,
mutexTimeoutMs); mutexTimeoutMs);
return LocalPool::deleteData(storeId); return LocalPool::deleteData(storeId);
} }
@ -40,7 +40,7 @@ ReturnValue_t PoolManager::deleteData(
ReturnValue_t PoolManager::deleteData(uint8_t* buffer, ReturnValue_t PoolManager::deleteData(uint8_t* buffer,
size_t size, store_address_t* storeId) { size_t size, store_address_t* storeId) {
MutexHelper mutexHelper(mutex, MutexIF::TimeoutType::WAITING, 20); MutexGuard mutexHelper(mutex, MutexIF::TimeoutType::WAITING, 20);
ReturnValue_t status = LocalPool::deleteData(buffer, ReturnValue_t status = LocalPool::deleteData(buffer,
size, storeId); size, storeId);
return status; return status;

View File

@ -3,7 +3,7 @@
#include "LocalPool.h" #include "LocalPool.h"
#include "StorageAccessor.h" #include "StorageAccessor.h"
#include "../ipc/MutexHelper.h" #include "../ipc/MutexGuard.h"
/** /**

View File

@ -173,6 +173,9 @@ ReturnValue_t TmTcBridge::handleTmQueue() {
ReturnValue_t TmTcBridge::storeDownlinkData(TmTcMessage *message) { ReturnValue_t TmTcBridge::storeDownlinkData(TmTcMessage *message) {
store_address_t storeId = 0; store_address_t storeId = 0;
if(tmFifo == nullptr) {
return HasReturnvaluesIF::RETURN_FAILED;
}
if(tmFifo->full()) { if(tmFifo->full()) {
#if FSFW_CPP_OSTREAM_ENABLED == 1 #if FSFW_CPP_OSTREAM_ENABLED == 1

View File

@ -70,7 +70,7 @@ TEST_CASE( "Action Helper" , "[ActionHelper]") {
SECTION("Handle finish"){ SECTION("Handle finish"){
CHECK(not testMqMock.wasMessageSent()); CHECK(not testMqMock.wasMessageSent());
ReturnValue_t status = 0x9876; ReturnValue_t status = 0x9876;
actionHelper.finish(true, testMqMock.getId(), testActionId, status); actionHelper.finish(false, testMqMock.getId(), testActionId, status);
CHECK(testMqMock.wasMessageSent()); CHECK(testMqMock.wasMessageSent());
CommandMessage testMessage; CommandMessage testMessage;
REQUIRE(testMqMock.receiveMessage(&testMessage) == static_cast<uint32_t>(HasReturnvaluesIF::RETURN_OK)); REQUIRE(testMqMock.receiveMessage(&testMessage) == static_cast<uint32_t>(HasReturnvaluesIF::RETURN_OK));

View File

@ -3,4 +3,5 @@ target_sources(${TARGET_NAME} PRIVATE
LocalPoolVectorTest.cpp LocalPoolVectorTest.cpp
DataSetTest.cpp DataSetTest.cpp
LocalPoolManagerTest.cpp LocalPoolManagerTest.cpp
LocalPoolOwnerBase.cpp
) )

View File

@ -4,13 +4,14 @@
#include <catch2/catch_approx.hpp> #include <catch2/catch_approx.hpp>
#include <fsfw/datapoollocal/HasLocalDataPoolIF.h> #include <fsfw/datapoollocal/HasLocalDataPoolIF.h>
#include <fsfw/datapoollocal/SharedLocalDataSet.h>
#include <fsfw/datapoollocal/StaticLocalDataSet.h> #include <fsfw/datapoollocal/StaticLocalDataSet.h>
#include <fsfw/datapool/PoolReadHelper.h> #include <fsfw/datapool/PoolReadGuard.h>
#include <fsfw/globalfunctions/bitutility.h> #include <fsfw/globalfunctions/bitutility.h>
#include <unittest/core/CatchDefinitions.h> #include <unittest/core/CatchDefinitions.h>
TEST_CASE("LocalDataSet" , "[LocDataSetTest]") { TEST_CASE("DataSetTest" , "[DataSetTest]") {
LocalPoolOwnerBase* poolOwner = objectManager-> LocalPoolOwnerBase* poolOwner = objectManager->
get<LocalPoolOwnerBase>(objects::TEST_LOCAL_POOL_OWNER_BASE); get<LocalPoolOwnerBase>(objects::TEST_LOCAL_POOL_OWNER_BASE);
REQUIRE(poolOwner != nullptr); REQUIRE(poolOwner != nullptr);
@ -21,6 +22,7 @@ TEST_CASE("LocalDataSet" , "[LocDataSetTest]") {
SECTION("BasicTest") { SECTION("BasicTest") {
/* Test some basic functions */ /* Test some basic functions */
CHECK(localSet.getReportingEnabled() == false);
CHECK(localSet.getLocalPoolIdsSerializedSize(false) == 3 * sizeof(lp_id_t)); CHECK(localSet.getLocalPoolIdsSerializedSize(false) == 3 * sizeof(lp_id_t));
CHECK(localSet.getLocalPoolIdsSerializedSize(true) == CHECK(localSet.getLocalPoolIdsSerializedSize(true) ==
3 * sizeof(lp_id_t) + sizeof(uint8_t)); 3 * sizeof(lp_id_t) + sizeof(uint8_t));
@ -54,7 +56,7 @@ TEST_CASE("LocalDataSet" , "[LocDataSetTest]") {
{ {
/* Test read operation. Values should be all zeros */ /* Test read operation. Values should be all zeros */
PoolReadHelper readHelper(&localSet); PoolReadGuard readHelper(&localSet);
REQUIRE(readHelper.getReadResult() == retval::CATCH_OK); REQUIRE(readHelper.getReadResult() == retval::CATCH_OK);
CHECK(not localSet.isValid()); CHECK(not localSet.isValid());
CHECK(localSet.localPoolVarUint8.value == 0); CHECK(localSet.localPoolVarUint8.value == 0);
@ -79,10 +81,15 @@ TEST_CASE("LocalDataSet" , "[LocDataSetTest]") {
localSet.localPoolVarUint8 = 0; localSet.localPoolVarUint8 = 0;
localSet.localPoolVarFloat = 0; localSet.localPoolVarFloat = 0;
localSet.setAllVariablesReadOnly();
CHECK(localSet.localPoolUint16Vec.getReadWriteMode() == pool_rwm_t::VAR_READ);
CHECK(localSet.localPoolVarUint8.getReadWriteMode() == pool_rwm_t::VAR_READ);
CHECK(localSet.localPoolVarFloat.getReadWriteMode() == pool_rwm_t::VAR_READ);
{ {
/* Now we read again and check whether our zeroed values were overwritten with /* Now we read again and check whether our zeroed values were overwritten with
the values in the pool */ the values in the pool */
PoolReadHelper readHelper(&localSet); PoolReadGuard readHelper(&localSet);
REQUIRE(readHelper.getReadResult() == retval::CATCH_OK); REQUIRE(readHelper.getReadResult() == retval::CATCH_OK);
CHECK(localSet.isValid()); CHECK(localSet.isValid());
CHECK(localSet.localPoolVarUint8.value == 232); CHECK(localSet.localPoolVarUint8.value == 232);
@ -201,6 +208,75 @@ TEST_CASE("LocalDataSet" , "[LocDataSetTest]") {
} }
SECTION("MorePoolVariables") {
LocalDataSet set(poolOwner, 2, 10);
/* Register same variables again to get more than 8 registered variables */
for(uint8_t idx = 0; idx < 8; idx ++) {
REQUIRE(set.registerVariable(&localSet.localPoolVarUint8) == retval::CATCH_OK);
}
REQUIRE(set.registerVariable(&localSet.localPoolVarUint8) == retval::CATCH_OK);
REQUIRE(set.registerVariable(&localSet.localPoolUint16Vec) == retval::CATCH_OK);
set.setValidityBufferGeneration(true);
{
PoolReadGuard readHelper(&localSet);
localSet.localPoolVarUint8.value = 42;
localSet.localPoolVarUint8.setValid(true);
localSet.localPoolUint16Vec.setValid(false);
}
size_t maxSize = set.getSerializedSize();
CHECK(maxSize == 9 + sizeof(uint16_t) * 3 + 2);
size_t serSize = 0;
/* Already reserve additional space for validity buffer, will be needed later */
uint8_t buffer[maxSize + 1];
uint8_t* buffPtr = buffer;
CHECK(set.serialize(&buffPtr, &serSize, maxSize,
SerializeIF::Endianness::MACHINE) == retval::CATCH_OK);
std::array<uint8_t, 2> validityBuffer;
std::memcpy(validityBuffer.data(), buffer + 9 + sizeof(uint16_t) * 3, 2);
/* The first 9 variables should be valid */
CHECK(validityBuffer[0] == 0xff);
CHECK(bitutil::bitGet(validityBuffer.data() + 1, 0) == true);
CHECK(bitutil::bitGet(validityBuffer.data() + 1, 1) == false);
/* Now we invert the validity */
validityBuffer[0] = 0;
validityBuffer[1] = 0b0100'0000;
std::memcpy(buffer + 9 + sizeof(uint16_t) * 3, validityBuffer.data(), 2);
const uint8_t* constBuffPtr = buffer;
size_t sizeToDeSerialize = serSize;
CHECK(set.deSerialize(&constBuffPtr, &sizeToDeSerialize, SerializeIF::Endianness::MACHINE)
== retval::CATCH_OK);
CHECK(localSet.localPoolVarUint8.isValid() == false);
CHECK(localSet.localPoolUint16Vec.isValid() == true);
}
SECTION("SharedDataSet") {
object_id_t sharedSetId = objects::SHARED_SET_ID;
SharedLocalDataSet sharedSet(sharedSetId, poolOwner, lpool::testSetId, 5);
localSet.localPoolVarUint8.setReadWriteMode(pool_rwm_t::VAR_WRITE);
localSet.localPoolUint16Vec.setReadWriteMode(pool_rwm_t::VAR_WRITE);
CHECK(sharedSet.registerVariable(&localSet.localPoolVarUint8) == retval::CATCH_OK);
CHECK(sharedSet.registerVariable(&localSet.localPoolUint16Vec) == retval::CATCH_OK);
CHECK(sharedSet.initialize() == retval::CATCH_OK);
CHECK(sharedSet.lockDataset() == retval::CATCH_OK);
CHECK(sharedSet.unlockDataset() == retval::CATCH_OK);
{
//PoolReadGuard rg(&sharedSet);
//CHECK(rg.getReadResult() == retval::CATCH_OK);
localSet.localPoolVarUint8.value = 5;
localSet.localPoolUint16Vec.value[0] = 1;
localSet.localPoolUint16Vec.value[1] = 2;
localSet.localPoolUint16Vec.value[2] = 3;
CHECK(sharedSet.commit() == retval::CATCH_OK);
}
sharedSet.setReadCommitProtectionBehaviour(true);
}
/* we need to reset the subscription list because the pool owner /* we need to reset the subscription list because the pool owner
is a global object. */ is a global object. */
CHECK(poolOwner->reset() == retval::CATCH_OK); CHECK(poolOwner->reset() == retval::CATCH_OK);

View File

@ -3,7 +3,7 @@
#include <catch2/catch_test_macros.hpp> #include <catch2/catch_test_macros.hpp>
#include <catch2/catch_approx.hpp> #include <catch2/catch_approx.hpp>
#include <fsfw/datapool/PoolReadHelper.h> #include <fsfw/datapool/PoolReadGuard.h>
#include <fsfw/datapoollocal/HasLocalDataPoolIF.h> #include <fsfw/datapoollocal/HasLocalDataPoolIF.h>
#include <fsfw/datapoollocal/StaticLocalDataSet.h> #include <fsfw/datapoollocal/StaticLocalDataSet.h>
#include <fsfw/housekeeping/HousekeepingSnapshot.h> #include <fsfw/housekeeping/HousekeepingSnapshot.h>
@ -20,14 +20,21 @@ TEST_CASE("LocalPoolManagerTest" , "[LocManTest]") {
REQUIRE(poolOwner->initializeHkManager() == retval::CATCH_OK); REQUIRE(poolOwner->initializeHkManager() == retval::CATCH_OK);
REQUIRE(poolOwner->initializeHkManagerAfterTaskCreation() REQUIRE(poolOwner->initializeHkManagerAfterTaskCreation()
== retval::CATCH_OK); == retval::CATCH_OK);
//REQUIRE(poolOwner->dataset.assignPointers() == retval::CATCH_OK);
MessageQueueMockBase* mqMock = poolOwner->getMockQueueHandle(); MessageQueueMockBase* mqMock = poolOwner->getMockQueueHandle();
REQUIRE(mqMock != nullptr); REQUIRE(mqMock != nullptr);
CommandMessage messageSent; CommandMessage messageSent;
uint8_t messagesSent = 0; uint8_t messagesSent = 0;
SECTION("BasicTest") { SECTION("BasicTest") {
{
/* For code coverage, should not crash */
LocalDataPoolManager manager(nullptr, nullptr);
}
auto owner = poolOwner->poolManager.getOwner();
REQUIRE(owner != nullptr);
CHECK(owner->getObjectId() == objects::TEST_LOCAL_POOL_OWNER_BASE);
/* Subscribe for message generation on update. */ /* Subscribe for message generation on update. */
REQUIRE(poolOwner->subscribeWrapperSetUpdate() == retval::CATCH_OK); REQUIRE(poolOwner->subscribeWrapperSetUpdate() == retval::CATCH_OK);
/* Subscribe for an update message. */ /* Subscribe for an update message. */
@ -72,10 +79,10 @@ TEST_CASE("LocalPoolManagerTest" , "[LocManTest]") {
} }
SECTION("SnapshotUpdateTests") { SECTION("SetSnapshotUpdateTest") {
/* Set the variables in the set to certain values. These are checked later. */ /* Set the variables in the set to certain values. These are checked later. */
{ {
PoolReadHelper readHelper(&poolOwner->dataset); PoolReadGuard readHelper(&poolOwner->dataset);
REQUIRE(readHelper.getReadResult() == retval::CATCH_OK); REQUIRE(readHelper.getReadResult() == retval::CATCH_OK);
poolOwner->dataset.localPoolVarUint8.value = 5; poolOwner->dataset.localPoolVarUint8.value = 5;
poolOwner->dataset.localPoolVarFloat.value = -12.242; poolOwner->dataset.localPoolVarFloat.value = -12.242;
@ -137,7 +144,69 @@ TEST_CASE("LocalPoolManagerTest" , "[LocManTest]") {
CHECK(cdsShort.msDay_ll == Catch::Approx(timeCdsNow.msDay_ll).margin(1)); CHECK(cdsShort.msDay_ll == Catch::Approx(timeCdsNow.msDay_ll).margin(1));
} }
SECTION("AdvancedTests") { SECTION("VariableSnapshotTest") {
/* Acquire subscription interface */
ProvidesDataPoolSubscriptionIF* subscriptionIF = poolOwner->getSubscriptionInterface();
REQUIRE(subscriptionIF != nullptr);
/* Subscribe for variable snapshot */
REQUIRE(poolOwner->subscribeWrapperVariableSnapshot(lpool::uint8VarId) == retval::CATCH_OK);
auto poolVar = dynamic_cast<lp_var_t<uint8_t>*>(
poolOwner->getPoolObjectHandle(lpool::uint8VarId));
REQUIRE(poolVar != nullptr);
{
PoolReadGuard rg(poolVar);
CHECK(rg.getReadResult() == retval::CATCH_OK);
poolVar->value = 25;
}
poolVar->setChanged(true);
/* Store current time, we are going to check the (approximate) time equality later */
CCSDSTime::CDS_short timeCdsNow;
timeval now;
Clock::getClock_timeval(&now);
CCSDSTime::convertToCcsds(&timeCdsNow, &now);
REQUIRE(poolOwner->poolManager.performHkOperation() == retval::CATCH_OK);
/* Check update snapshot was sent. */
REQUIRE(mqMock->wasMessageSent(&messagesSent) == true);
CHECK(messagesSent == 1);
/* Should have been reset. */
CHECK(poolVar->hasChanged() == false);
REQUIRE(mqMock->receiveMessage(&messageSent) == retval::CATCH_OK);
CHECK(messageSent.getCommand() == static_cast<int>(
HousekeepingMessage::UPDATE_SNAPSHOT_VARIABLE));
/* Now we deserialize the snapshot into a new dataset instance */
CCSDSTime::CDS_short cdsShort;
lp_var_t<uint8_t> varCopy = lp_var_t<uint8_t>(lpool::uint8VarGpid);
HousekeepingSnapshot snapshot(&cdsShort, &varCopy);
store_address_t storeId;
HousekeepingMessage::getUpdateSnapshotVariableCommand(&messageSent, &storeId);
ConstAccessorPair accessorPair = tglob::getIpcStoreHandle()->getData(storeId);
REQUIRE(accessorPair.first == retval::CATCH_OK);
const uint8_t* readOnlyPtr = accessorPair.second.data();
size_t sizeToDeserialize = accessorPair.second.size();
CHECK(varCopy.value == 0);
/* Fill the dataset and timestamp */
REQUIRE(snapshot.deSerialize(&readOnlyPtr, &sizeToDeserialize,
SerializeIF::Endianness::MACHINE) == retval::CATCH_OK);
CHECK(varCopy.value == 25);
/* Now we check that both times are equal */
CHECK(cdsShort.pField == timeCdsNow.pField);
CHECK(cdsShort.dayLSB == Catch::Approx(timeCdsNow.dayLSB).margin(1));
CHECK(cdsShort.dayMSB == Catch::Approx(timeCdsNow.dayMSB).margin(1));
CHECK(cdsShort.msDay_h == Catch::Approx(timeCdsNow.msDay_h).margin(1));
CHECK(cdsShort.msDay_hh == Catch::Approx(timeCdsNow.msDay_hh).margin(1));
CHECK(cdsShort.msDay_l == Catch::Approx(timeCdsNow.msDay_l).margin(1));
CHECK(cdsShort.msDay_ll == Catch::Approx(timeCdsNow.msDay_ll).margin(1));
}
SECTION("VariableNotificationTest") {
/* Acquire subscription interface */ /* Acquire subscription interface */
ProvidesDataPoolSubscriptionIF* subscriptionIF = poolOwner->getSubscriptionInterface(); ProvidesDataPoolSubscriptionIF* subscriptionIF = poolOwner->getSubscriptionInterface();
REQUIRE(subscriptionIF != nullptr); REQUIRE(subscriptionIF != nullptr);
@ -149,6 +218,7 @@ TEST_CASE("LocalPoolManagerTest" , "[LocManTest]") {
poolOwner->getPoolObjectHandle(lpool::uint8VarId)); poolOwner->getPoolObjectHandle(lpool::uint8VarId));
REQUIRE(poolVar != nullptr); REQUIRE(poolVar != nullptr);
poolVar->setChanged(true); poolVar->setChanged(true);
REQUIRE(poolVar->hasChanged() == true);
REQUIRE(poolOwner->poolManager.performHkOperation() == retval::CATCH_OK); REQUIRE(poolOwner->poolManager.performHkOperation() == retval::CATCH_OK);
/* Check update notification was sent. */ /* Check update notification was sent. */
@ -190,6 +260,164 @@ TEST_CASE("LocalPoolManagerTest" , "[LocManTest]") {
REQUIRE(mqMock->receiveMessage(&messageSent) == static_cast<int>(MessageQueueIF::EMPTY)); REQUIRE(mqMock->receiveMessage(&messageSent) == static_cast<int>(MessageQueueIF::EMPTY));
} }
SECTION("PeriodicHKAndMessaging") {
/* Now we subcribe for a HK periodic generation. Even when it's difficult to simulate
the temporal behaviour correctly the HK manager should generate a HK packet
immediately and the periodic helper depends on HK op function calls anyway instead of
using the clock, so we could also just call performHkOperation multiple times */
REQUIRE(poolOwner->subscribePeriodicHk(true) == retval::CATCH_OK);
REQUIRE(poolOwner->poolManager.performHkOperation() == retval::CATCH_OK);
/* Now HK packet should be sent as message immediately. */
REQUIRE(mqMock->wasMessageSent(&messagesSent) == true);
CHECK(messagesSent == 1);
CHECK(mqMock->popMessage() == retval::CATCH_OK);
LocalPoolDataSetBase* setHandle = poolOwner->getDataSetHandle(lpool::testSid);
REQUIRE(setHandle != nullptr);
CHECK(poolOwner->poolManager.generateHousekeepingPacket(lpool::testSid,
setHandle, false) == retval::CATCH_OK);
REQUIRE(mqMock->wasMessageSent(&messagesSent) == true);
CHECK(messagesSent == 1);
CHECK(mqMock->popMessage() == retval::CATCH_OK);
CHECK(setHandle->getReportingEnabled() == true);
CommandMessage hkCmd;
HousekeepingMessage::setToggleReportingCommand(&hkCmd, lpool::testSid, false, false);
CHECK(poolOwner->poolManager.handleHousekeepingMessage(&hkCmd) == retval::CATCH_OK);
CHECK(setHandle->getReportingEnabled() == false);
REQUIRE(mqMock->wasMessageSent(&messagesSent) == true);
CHECK(messagesSent == 1);
CHECK(mqMock->popMessage() == retval::CATCH_OK);
HousekeepingMessage::setToggleReportingCommand(&hkCmd, lpool::testSid, true, false);
CHECK(poolOwner->poolManager.handleHousekeepingMessage(&hkCmd) == retval::CATCH_OK);
CHECK(setHandle->getReportingEnabled() == true);
REQUIRE(mqMock->wasMessageSent(&messagesSent) == true);
CHECK(mqMock->popMessage() == retval::CATCH_OK);
HousekeepingMessage::setToggleReportingCommand(&hkCmd, lpool::testSid, false, false);
CHECK(poolOwner->poolManager.handleHousekeepingMessage(&hkCmd) == retval::CATCH_OK);
CHECK(setHandle->getReportingEnabled() == false);
REQUIRE(mqMock->wasMessageSent(&messagesSent) == true);
CHECK(mqMock->popMessage() == retval::CATCH_OK);
HousekeepingMessage::setCollectionIntervalModificationCommand(&hkCmd,
lpool::testSid, 0.4, false);
CHECK(poolOwner->poolManager.handleHousekeepingMessage(&hkCmd) == retval::CATCH_OK);
/* For non-diagnostics and a specified minimum frequency of 0.2 seconds, the
resulting collection interval should be 1.0 second */
CHECK(poolOwner->dataset.getCollectionInterval() == 1.0);
REQUIRE(mqMock->wasMessageSent(&messagesSent) == true);
CHECK(messagesSent == 1);
CHECK(mqMock->popMessage() == retval::CATCH_OK);
HousekeepingMessage::setStructureReportingCommand(&hkCmd, lpool::testSid, false);
REQUIRE(poolOwner->poolManager.performHkOperation() == retval::CATCH_OK);
CHECK(poolOwner->poolManager.handleHousekeepingMessage(&hkCmd) == retval::CATCH_OK);
/* Now HK packet should be sent as message. */
REQUIRE(mqMock->wasMessageSent(&messagesSent) == true);
CHECK(messagesSent == 1);
CHECK(mqMock->popMessage() == retval::CATCH_OK);
HousekeepingMessage::setOneShotReportCommand(&hkCmd, lpool::testSid, false);
CHECK(poolOwner->poolManager.handleHousekeepingMessage(&hkCmd) == retval::CATCH_OK);
REQUIRE(mqMock->wasMessageSent(&messagesSent) == true);
CHECK(messagesSent == 1);
CHECK(mqMock->popMessage() == retval::CATCH_OK);
HousekeepingMessage::setUpdateNotificationSetCommand(&hkCmd, lpool::testSid);
sid_t sidToCheck;
store_address_t storeId;
CHECK(poolOwner->poolManager.handleHousekeepingMessage(&hkCmd) == retval::CATCH_OK);
CHECK(poolOwner->changedDataSetCallbackWasCalled(sidToCheck, storeId) == true);
CHECK(sidToCheck == lpool::testSid);
/* Now we test the handling is the dataset is set to diagnostic */
poolOwner->dataset.setDiagnostic(true);
HousekeepingMessage::setStructureReportingCommand(&hkCmd, lpool::testSid, false);
CHECK(poolOwner->poolManager.handleHousekeepingMessage(&hkCmd) ==
static_cast<int>(LocalDataPoolManager::WRONG_HK_PACKET_TYPE));
/* We still expect a failure message being sent */
REQUIRE(mqMock->wasMessageSent(&messagesSent) == true);
CHECK(messagesSent == 1);
CHECK(mqMock->popMessage() == retval::CATCH_OK);
HousekeepingMessage::setCollectionIntervalModificationCommand(&hkCmd,
lpool::testSid, 0.4, false);
CHECK(poolOwner->poolManager.handleHousekeepingMessage(&hkCmd) ==
static_cast<int>(LocalDataPoolManager::WRONG_HK_PACKET_TYPE));
REQUIRE(mqMock->wasMessageSent(&messagesSent) == true);
CHECK(messagesSent == 1);
CHECK(mqMock->popMessage() == retval::CATCH_OK);
HousekeepingMessage::setStructureReportingCommand(&hkCmd, lpool::testSid, false);
CHECK(poolOwner->poolManager.handleHousekeepingMessage(&hkCmd) ==
static_cast<int>(LocalDataPoolManager::WRONG_HK_PACKET_TYPE));
REQUIRE(mqMock->wasMessageSent(&messagesSent) == true);
CHECK(messagesSent == 1);
CHECK(mqMock->popMessage() == retval::CATCH_OK);
HousekeepingMessage::setStructureReportingCommand(&hkCmd, lpool::testSid, true);
CHECK(poolOwner->poolManager.handleHousekeepingMessage(&hkCmd) == retval::CATCH_OK);
REQUIRE(mqMock->wasMessageSent(&messagesSent) == true);
CHECK(messagesSent == 1);
CHECK(mqMock->popMessage() == retval::CATCH_OK);
HousekeepingMessage::setCollectionIntervalModificationCommand(&hkCmd, lpool::testSid, 0.4,
true);
CHECK(poolOwner->poolManager.handleHousekeepingMessage(&hkCmd) == retval::CATCH_OK);
REQUIRE(mqMock->wasMessageSent(&messagesSent) == true);
CHECK(messagesSent == 1);
CHECK(mqMock->popMessage() == retval::CATCH_OK);
HousekeepingMessage::setToggleReportingCommand(&hkCmd, lpool::testSid, true, true);
CHECK(poolOwner->poolManager.handleHousekeepingMessage(&hkCmd) == retval::CATCH_OK);
REQUIRE(mqMock->wasMessageSent(&messagesSent) == true);
CHECK(messagesSent == 1);
CHECK(mqMock->popMessage() == retval::CATCH_OK);
HousekeepingMessage::setToggleReportingCommand(&hkCmd, lpool::testSid, false, true);
CHECK(poolOwner->poolManager.handleHousekeepingMessage(&hkCmd) == retval::CATCH_OK);
REQUIRE(mqMock->wasMessageSent(&messagesSent) == true);
CHECK(messagesSent == 1);
CHECK(mqMock->popMessage() == retval::CATCH_OK);
HousekeepingMessage::setOneShotReportCommand(&hkCmd, lpool::testSid, false);
CHECK(poolOwner->poolManager.handleHousekeepingMessage(&hkCmd) ==
static_cast<int>(LocalDataPoolManager::WRONG_HK_PACKET_TYPE));
REQUIRE(mqMock->wasMessageSent(&messagesSent) == true);
CHECK(messagesSent == 1);
CHECK(mqMock->popMessage() == retval::CATCH_OK);
HousekeepingMessage::setOneShotReportCommand(&hkCmd, lpool::testSid, true);
CHECK(poolOwner->poolManager.handleHousekeepingMessage(&hkCmd) == retval::CATCH_OK);
REQUIRE(mqMock->wasMessageSent(&messagesSent) == true);
CHECK(messagesSent == 1);
CHECK(mqMock->popMessage() == retval::CATCH_OK);
HousekeepingMessage::setUpdateNotificationVariableCommand(&hkCmd, lpool::uint8VarGpid);
gp_id_t gpidToCheck;
CHECK(poolOwner->poolManager.handleHousekeepingMessage(&hkCmd) == retval::CATCH_OK);
CHECK(poolOwner->changedVariableCallbackWasCalled(gpidToCheck, storeId) == true);
CHECK(gpidToCheck == lpool::uint8VarGpid);
HousekeepingMessage::setUpdateSnapshotSetCommand(&hkCmd, lpool::testSid,
storeId::INVALID_STORE_ADDRESS);
CHECK(poolOwner->poolManager.handleHousekeepingMessage(&hkCmd) == retval::CATCH_OK);
CHECK(poolOwner->changedDataSetCallbackWasCalled(sidToCheck, storeId) == true);
CHECK(sidToCheck == lpool::testSid);
HousekeepingMessage::setUpdateSnapshotVariableCommand(&hkCmd, lpool::uint8VarGpid,
storeId::INVALID_STORE_ADDRESS);
CHECK(poolOwner->poolManager.handleHousekeepingMessage(&hkCmd) == retval::CATCH_OK);
CHECK(poolOwner->changedVariableCallbackWasCalled(gpidToCheck, storeId) == true);
CHECK(gpidToCheck == lpool::uint8VarGpid);
poolOwner->poolManager.printPoolEntry(lpool::uint8VarId);
}
/* we need to reset the subscription list because the pool owner /* we need to reset the subscription list because the pool owner
is a global object. */ is a global object. */
CHECK(poolOwner->reset() == retval::CATCH_OK); CHECK(poolOwner->reset() == retval::CATCH_OK);

View File

@ -0,0 +1,141 @@
#include "LocalPoolOwnerBase.h"
LocalPoolOwnerBase::LocalPoolOwnerBase(object_id_t objectId):
SystemObject(objectId), poolManager(this, messageQueue),
dataset(this, lpool::testSetId) {
messageQueue = new MessageQueueMockBase();
}
LocalPoolOwnerBase::~LocalPoolOwnerBase() {
QueueFactory::instance()->deleteMessageQueue(messageQueue);
}
ReturnValue_t LocalPoolOwnerBase::initializeHkManager() {
if(not initialized) {
initialized = true;
return poolManager.initialize(messageQueue);
}
return HasReturnvaluesIF::RETURN_OK;
}
ReturnValue_t LocalPoolOwnerBase::initializeLocalDataPool(localpool::DataPool &localDataPoolMap,
LocalDataPoolManager &poolManager) {
// Default initialization empty for now.
localDataPoolMap.emplace(lpool::uint8VarId,
new PoolEntry<uint8_t>({0}));
localDataPoolMap.emplace(lpool::floatVarId,
new PoolEntry<float>({0}));
localDataPoolMap.emplace(lpool::uint32VarId,
new PoolEntry<uint32_t>({0}));
localDataPoolMap.emplace(lpool::uint16Vec3Id,
new PoolEntry<uint16_t>({0, 0, 0}));
localDataPoolMap.emplace(lpool::int64Vec2Id,
new PoolEntry<int64_t>({0, 0}));
return HasReturnvaluesIF::RETURN_OK;
}
LocalPoolObjectBase* LocalPoolOwnerBase::getPoolObjectHandle(lp_id_t localPoolId) {
if(localPoolId == lpool::uint8VarId) {
return &testUint8;
}
else if(localPoolId == lpool::uint16Vec3Id) {
return &testUint16Vec;
}
else if(localPoolId == lpool::floatVarId) {
return &testFloat;
}
else if(localPoolId == lpool::int64Vec2Id) {
return &testInt64Vec;
}
else if(localPoolId == lpool::uint32VarId) {
return &testUint32;
}
else {
return &testUint8;
}
}
ReturnValue_t LocalPoolOwnerBase::reset() {
resetSubscriptionList();
ReturnValue_t status = HasReturnvaluesIF::RETURN_OK;
{
PoolReadGuard readHelper(&dataset);
if(readHelper.getReadResult() != HasReturnvaluesIF::RETURN_OK) {
status = readHelper.getReadResult();
}
dataset.localPoolVarUint8.value = 0;
dataset.localPoolVarFloat.value = 0.0;
dataset.localPoolUint16Vec.value[0] = 0;
dataset.localPoolUint16Vec.value[1] = 0;
dataset.localPoolUint16Vec.value[2] = 0;
dataset.setValidity(false, true);
}
{
PoolReadGuard readHelper(&testUint32);
if(readHelper.getReadResult() != HasReturnvaluesIF::RETURN_OK) {
status = readHelper.getReadResult();
}
testUint32.value = 0;
testUint32.setValid(false);
}
{
PoolReadGuard readHelper(&testInt64Vec);
if(readHelper.getReadResult() != HasReturnvaluesIF::RETURN_OK) {
status = readHelper.getReadResult();
}
testInt64Vec.value[0] = 0;
testInt64Vec.value[1] = 0;
testInt64Vec.setValid(false);
}
return status;
}
bool LocalPoolOwnerBase::changedDataSetCallbackWasCalled(sid_t &sid, store_address_t &storeId) {
bool condition = false;
if(not this->changedDatasetSid.notSet()) {
condition = true;
}
sid = changedDatasetSid;
storeId = storeIdForChangedSet;
this->changedDatasetSid.raw = sid_t::INVALID_SID;
this->storeIdForChangedSet = storeId::INVALID_STORE_ADDRESS;
return condition;
}
void LocalPoolOwnerBase::handleChangedDataset(sid_t sid, store_address_t storeId,
bool* clearMessage) {
this->changedDatasetSid = sid;
this->storeIdForChangedSet = storeId;
}
bool LocalPoolOwnerBase::changedVariableCallbackWasCalled(gp_id_t &gpid, store_address_t &storeId) {
bool condition = false;
if(not this->changedPoolVariableGpid.notSet()) {
condition = true;
}
gpid = changedPoolVariableGpid;
storeId = storeIdForChangedVariable;
this->changedPoolVariableGpid.raw = gp_id_t::INVALID_GPID;
this->storeIdForChangedVariable = storeId::INVALID_STORE_ADDRESS;
return condition;
}
ReturnValue_t LocalPoolOwnerBase::initializeHkManagerAfterTaskCreation() {
if(not initializedAfterTaskCreation) {
initializedAfterTaskCreation = true;
return poolManager.initializeAfterTaskCreation();
}
return HasReturnvaluesIF::RETURN_OK;
}
void LocalPoolOwnerBase::handleChangedPoolVariable(gp_id_t globPoolId, store_address_t storeId,
bool* clearMessage) {
this->changedPoolVariableGpid = globPoolId;
this->storeIdForChangedVariable = storeId;
}

View File

@ -1,16 +1,17 @@
#ifndef FSFW_UNITTEST_TESTS_DATAPOOLLOCAL_LOCALPOOLOWNERBASE_H_ #ifndef FSFW_UNITTEST_TESTS_DATAPOOLLOCAL_LOCALPOOLOWNERBASE_H_
#define FSFW_UNITTEST_TESTS_DATAPOOLLOCAL_LOCALPOOLOWNERBASE_H_ #define FSFW_UNITTEST_TESTS_DATAPOOLLOCAL_LOCALPOOLOWNERBASE_H_
#include <testcfg/objects/systemObjectList.h>
#include <fsfw/datapoollocal/HasLocalDataPoolIF.h> #include <fsfw/datapoollocal/HasLocalDataPoolIF.h>
#include <fsfw/datapoollocal/LocalDataSet.h> #include <fsfw/datapoollocal/LocalDataSet.h>
#include <fsfw/objectmanager/SystemObject.h> #include <fsfw/objectmanager/SystemObject.h>
#include <fsfw/datapoollocal/LocalPoolVariable.h> #include <fsfw/datapoollocal/LocalPoolVariable.h>
#include <fsfw/datapoollocal/LocalPoolVector.h> #include <fsfw/datapoollocal/LocalPoolVector.h>
#include <fsfw/ipc/QueueFactory.h> #include <fsfw/ipc/QueueFactory.h>
#include <testcfg/objects/systemObjectList.h>
#include <fsfw/datapoollocal/StaticLocalDataSet.h> #include <fsfw/datapoollocal/StaticLocalDataSet.h>
#include <fsfw/unittest/tests/mocks/MessageQueueMockBase.h> #include <fsfw/unittest/tests/mocks/MessageQueueMockBase.h>
#include "../../../datapool/PoolReadHelper.h" #include <fsfw/datapool/PoolReadGuard.h>
namespace lpool { namespace lpool {
static constexpr lp_id_t uint8VarId = 0; static constexpr lp_id_t uint8VarId = 0;
@ -36,10 +37,11 @@ class LocalPoolStaticTestDataSet: public StaticLocalDataSet<3> {
public: public:
LocalPoolStaticTestDataSet(): LocalPoolStaticTestDataSet():
StaticLocalDataSet(lpool::testSid) { StaticLocalDataSet(lpool::testSid) {
} }
LocalPoolStaticTestDataSet(HasLocalDataPoolIF* owner, uint32_t setId): LocalPoolStaticTestDataSet(HasLocalDataPoolIF* owner, uint32_t setId):
StaticLocalDataSet(owner, setId) { StaticLocalDataSet(owner, setId) {
} }
lp_var_t<uint8_t> localPoolVarUint8 = lp_var_t<uint8_t>(lpool::uint8VarGpid, this); lp_var_t<uint8_t> localPoolVarUint8 = lp_var_t<uint8_t>(lpool::uint8VarGpid, this);
@ -52,8 +54,7 @@ private:
class LocalPoolTestDataSet: public LocalDataSet { class LocalPoolTestDataSet: public LocalDataSet {
public: public:
LocalPoolTestDataSet(): LocalPoolTestDataSet():
LocalDataSet(lpool::testSid, lpool::dataSetMaxVariables) { LocalDataSet(lpool::testSid, lpool::dataSetMaxVariables) {}
}
LocalPoolTestDataSet(HasLocalDataPoolIF* owner, uint32_t setId): LocalPoolTestDataSet(HasLocalDataPoolIF* owner, uint32_t setId):
LocalDataSet(owner, setId, lpool::dataSetMaxVariables) { LocalDataSet(owner, setId, lpool::dataSetMaxVariables) {
@ -63,42 +64,26 @@ public:
lp_var_t<float> localPoolVarFloat = lp_var_t<float>(lpool::floatVarGpid, this); lp_var_t<float> localPoolVarFloat = lp_var_t<float>(lpool::floatVarGpid, this);
lp_vec_t<uint16_t, 3> localPoolUint16Vec = lp_vec_t<uint16_t, 3>(lpool::uint16Vec3Gpid, this); lp_vec_t<uint16_t, 3> localPoolUint16Vec = lp_vec_t<uint16_t, 3>(lpool::uint16Vec3Gpid, this);
void setDiagnostic(bool isDiagnostic) {
LocalPoolDataSetBase::setDiagnostic(isDiagnostic);
}
private: private:
}; };
class LocalPoolOwnerBase: public SystemObject, public HasLocalDataPoolIF { class LocalPoolOwnerBase: public SystemObject, public HasLocalDataPoolIF {
public: public:
LocalPoolOwnerBase( LocalPoolOwnerBase(object_id_t objectId = objects::TEST_LOCAL_POOL_OWNER_BASE);
object_id_t objectId = objects::TEST_LOCAL_POOL_OWNER_BASE):
SystemObject(objectId), poolManager(this, messageQueue),
dataset(this, lpool::testSetId) {
messageQueue = new MessageQueueMockBase();
}
~LocalPoolOwnerBase() { ~LocalPoolOwnerBase();
QueueFactory::instance()->deleteMessageQueue(messageQueue);
}
object_id_t getObjectId() const override { object_id_t getObjectId() const override {
return SystemObject::getObjectId(); return SystemObject::getObjectId();
} }
ReturnValue_t initializeHkManager() { ReturnValue_t initializeHkManager();
if(not initialized) {
initialized = true;
return poolManager.initialize(messageQueue);
}
return HasReturnvaluesIF::RETURN_OK;
}
ReturnValue_t initializeHkManagerAfterTaskCreation() { ReturnValue_t initializeHkManagerAfterTaskCreation();
if(not initializedAfterTaskCreation) {
initializedAfterTaskCreation = true;
return poolManager.initializeAfterTaskCreation();
}
return HasReturnvaluesIF::RETURN_OK;
}
/** Command queue for housekeeping messages. */ /** Command queue for housekeeping messages. */
MessageQueueId_t getCommandQueue() const override { MessageQueueId_t getCommandQueue() const override {
@ -106,30 +91,15 @@ public:
} }
// This is called by initializeAfterTaskCreation of the HK manager. // This is called by initializeAfterTaskCreation of the HK manager.
virtual ReturnValue_t initializeLocalDataPool( virtual ReturnValue_t initializeLocalDataPool(localpool::DataPool& localDataPoolMap,
localpool::DataPool& localDataPoolMap, LocalDataPoolManager& poolManager) override;
LocalDataPoolManager& poolManager) {
// Default initialization empty for now.
localDataPoolMap.emplace(lpool::uint8VarId,
new PoolEntry<uint8_t>({0}));
localDataPoolMap.emplace(lpool::floatVarId,
new PoolEntry<float>({0}));
localDataPoolMap.emplace(lpool::uint32VarId,
new PoolEntry<uint32_t>({0}));
localDataPoolMap.emplace(lpool::uint16Vec3Id,
new PoolEntry<uint16_t>({0, 0, 0}));
localDataPoolMap.emplace(lpool::int64Vec2Id,
new PoolEntry<int64_t>({0, 0}));
return HasReturnvaluesIF::RETURN_OK;
}
LocalDataPoolManager* getHkManagerHandle() override { LocalDataPoolManager* getHkManagerHandle() override {
return &poolManager; return &poolManager;
} }
uint32_t getPeriodicOperationFrequency() const override { dur_millis_t getPeriodicOperationFrequency() const override {
return 0; return 200;
} }
/** /**
@ -142,32 +112,16 @@ public:
return &dataset; return &dataset;
} }
virtual LocalPoolObjectBase* getPoolObjectHandle( virtual LocalPoolObjectBase* getPoolObjectHandle(lp_id_t localPoolId) override;
lp_id_t localPoolId) override {
if(localPoolId == lpool::uint8VarId) {
return &testUint8;
}
else if(localPoolId == lpool::uint16Vec3Id) {
return &testUint16Vec;
}
else if(localPoolId == lpool::floatVarId) {
return &testFloat;
}
else if(localPoolId == lpool::int64Vec2Id) {
return &testInt64Vec;
}
else if(localPoolId == lpool::uint32VarId) {
return &testUint32;
}
else {
return &testUint8;
}
}
MessageQueueMockBase* getMockQueueHandle() const { MessageQueueMockBase* getMockQueueHandle() const {
return dynamic_cast<MessageQueueMockBase*>(messageQueue); return dynamic_cast<MessageQueueMockBase*>(messageQueue);
} }
ReturnValue_t subscribePeriodicHk(bool enableReporting) {
return poolManager.subscribeForPeriodicPacket(lpool::testSid, enableReporting, 0.2, false);
}
ReturnValue_t subscribeWrapperSetUpdate() { ReturnValue_t subscribeWrapperSetUpdate() {
return poolManager.subscribeForSetUpdateMessage(lpool::testSetId, return poolManager.subscribeForSetUpdateMessage(lpool::testSetId,
objects::NO_OBJECT, objects::HK_RECEIVER_MOCK, false); objects::NO_OBJECT, objects::HK_RECEIVER_MOCK, false);
@ -188,51 +142,33 @@ public:
MessageQueueIF::NO_QUEUE, objects::HK_RECEIVER_MOCK, false); MessageQueueIF::NO_QUEUE, objects::HK_RECEIVER_MOCK, false);
} }
ReturnValue_t reset() { ReturnValue_t subscribeWrapperVariableSnapshot(lp_id_t localPoolId) {
resetSubscriptionList(); return poolManager.subscribeForVariableUpdateMessage(localPoolId,
ReturnValue_t status = HasReturnvaluesIF::RETURN_OK; MessageQueueIF::NO_QUEUE, objects::HK_RECEIVER_MOCK, true);
{
PoolReadGuard readHelper(&dataset);
if(readHelper.getReadResult() != HasReturnvaluesIF::RETURN_OK) {
status = readHelper.getReadResult();
}
dataset.localPoolVarUint8.value = 0;
dataset.localPoolVarFloat.value = 0.0;
dataset.localPoolUint16Vec.value[0] = 0;
dataset.localPoolUint16Vec.value[1] = 0;
dataset.localPoolUint16Vec.value[2] = 0;
dataset.setValidity(false, true);
}
{
PoolReadGuard readHelper(&testUint32);
if(readHelper.getReadResult() != HasReturnvaluesIF::RETURN_OK) {
status = readHelper.getReadResult();
}
testUint32.value = 0;
testUint32.setValid(false);
}
{
PoolReadGuard readHelper(&testInt64Vec);
if(readHelper.getReadResult() != HasReturnvaluesIF::RETURN_OK) {
status = readHelper.getReadResult();
}
testInt64Vec.value[0] = 0;
testInt64Vec.value[1] = 0;
testInt64Vec.setValid(false);
}
return status;
} }
ReturnValue_t reset();
void resetSubscriptionList() { void resetSubscriptionList() {
poolManager.clearReceiversList(); poolManager.clearReceiversList();
} }
bool changedDataSetCallbackWasCalled(sid_t& sid, store_address_t& storeId);
bool changedVariableCallbackWasCalled(gp_id_t& gpid, store_address_t& storeId);
LocalDataPoolManager poolManager; LocalDataPoolManager poolManager;
LocalPoolTestDataSet dataset; LocalPoolTestDataSet dataset;
private: private:
void handleChangedDataset(sid_t sid, store_address_t storeId, bool* clearMessage) override;
sid_t changedDatasetSid;
store_address_t storeIdForChangedSet;
void handleChangedPoolVariable(gp_id_t globPoolId, store_address_t storeId,
bool* clearMessage) override;
gp_id_t changedPoolVariableGpid;
store_address_t storeIdForChangedVariable;
lp_var_t<uint8_t> testUint8 = lp_var_t<uint8_t>(this, lpool::uint8VarId); lp_var_t<uint8_t> testUint8 = lp_var_t<uint8_t>(this, lpool::uint8VarId);
lp_var_t<float> testFloat = lp_var_t<float>(this, lpool::floatVarId); lp_var_t<float> testFloat = lp_var_t<float>(this, lpool::floatVarId);
lp_var_t<uint32_t> testUint32 = lp_var_t<uint32_t>(this, lpool::uint32VarId); lp_var_t<uint32_t> testUint32 = lp_var_t<uint32_t>(this, lpool::uint32VarId);

View File

@ -10,8 +10,7 @@ TEST_CASE("LocalPoolVariable" , "[LocPoolVarTest]") {
get<LocalPoolOwnerBase>(objects::TEST_LOCAL_POOL_OWNER_BASE); get<LocalPoolOwnerBase>(objects::TEST_LOCAL_POOL_OWNER_BASE);
REQUIRE(poolOwner != nullptr); REQUIRE(poolOwner != nullptr);
REQUIRE(poolOwner->initializeHkManager() == retval::CATCH_OK); REQUIRE(poolOwner->initializeHkManager() == retval::CATCH_OK);
REQUIRE(poolOwner->initializeHkManagerAfterTaskCreation() REQUIRE(poolOwner->initializeHkManagerAfterTaskCreation() == retval::CATCH_OK);
== retval::CATCH_OK);
SECTION("Basic Tests") { SECTION("Basic Tests") {
/* very basic test. */ /* very basic test. */

View File

@ -115,6 +115,7 @@ TEST_CASE("LocalPoolVector" , "[LocPoolVecTest]") {
REQUIRE(readOnlyVec.commit() == REQUIRE(readOnlyVec.commit() ==
static_cast<int>(PoolVariableIF::INVALID_READ_WRITE_MODE)); static_cast<int>(PoolVariableIF::INVALID_READ_WRITE_MODE));
} }
poolOwner->reset();
} }

View File

@ -29,16 +29,24 @@ public:
return tempMessageSent; return tempMessageSent;
} }
/**
* Pop a message, clearing it in the process.
* @return
*/
ReturnValue_t popMessage() {
CommandMessage message;
message.clear();
return receiveMessage(&message);
}
virtual ReturnValue_t reply( MessageQueueMessageIF* message ) { virtual ReturnValue_t reply( MessageQueueMessageIF* message ) {
//messageSent = true;
//lastMessage = *(dynamic_cast<MessageQueueMessage*>(message));
return sendMessage(myQueueId, message); return sendMessage(myQueueId, message);
return HasReturnvaluesIF::RETURN_OK;
}; };
virtual ReturnValue_t receiveMessage(MessageQueueMessageIF* message, virtual ReturnValue_t receiveMessage(MessageQueueMessageIF* message,
MessageQueueId_t *receivedFrom) { MessageQueueId_t *receivedFrom) {
return receiveMessage(message); return receiveMessage(message);
} }
virtual ReturnValue_t receiveMessage(MessageQueueMessageIF* message) { virtual ReturnValue_t receiveMessage(MessageQueueMessageIF* message) {
if(messagesSentQueue.empty()) { if(messagesSentQueue.empty()) {
return MessageQueueIF::EMPTY; return MessageQueueIF::EMPTY;
@ -61,21 +69,13 @@ public:
virtual ReturnValue_t sendMessageFrom( MessageQueueId_t sendTo, virtual ReturnValue_t sendMessageFrom( MessageQueueId_t sendTo,
MessageQueueMessageIF* message, MessageQueueId_t sentFrom, MessageQueueMessageIF* message, MessageQueueId_t sentFrom,
bool ignoreFault = false ) { bool ignoreFault = false ) {
//messageSent = true;
//lastMessage = *(dynamic_cast<MessageQueueMessage*>(message));
//return HasReturnvaluesIF::RETURN_OK;
return sendMessage(sendTo, message); return sendMessage(sendTo, message);
} }
virtual ReturnValue_t sendToDefaultFrom( MessageQueueMessageIF* message, virtual ReturnValue_t sendToDefaultFrom( MessageQueueMessageIF* message,
MessageQueueId_t sentFrom, bool ignoreFault = false ) { MessageQueueId_t sentFrom, bool ignoreFault = false ) {
//messageSent = true;
//lastMessage = *(dynamic_cast<MessageQueueMessage*>(message));
//return HasReturnvaluesIF::RETURN_OK;
return sendMessage(myQueueId, message); return sendMessage(myQueueId, message);
} }
virtual ReturnValue_t sendToDefault( MessageQueueMessageIF* message ) { virtual ReturnValue_t sendToDefault( MessageQueueMessageIF* message ) {
//messageSent = true;
//lastMessage = *(dynamic_cast<MessageQueueMessage*>(message));
return sendMessage(myQueueId, message); return sendMessage(myQueueId, message);
} }
virtual ReturnValue_t sendMessage( MessageQueueId_t sendTo, virtual ReturnValue_t sendMessage( MessageQueueId_t sendTo,
@ -114,7 +114,6 @@ public:
private: private:
std::queue<MessageQueueMessage> messagesSentQueue; std::queue<MessageQueueMessage> messagesSentQueue;
//MessageQueueMessage lastMessage;
}; };