Merge remote-tracking branch 'upstream/development' into mueller/action-diag-output

This commit is contained in:
Robin Müller 2021-02-08 14:29:07 +01:00
commit 712ba5124c
20 changed files with 3374 additions and 637 deletions

View File

@ -18,7 +18,7 @@ public:
* This function is protected because it should only be used by the * This function is protected because it should only be used by the
* class imlementing the interface. * class imlementing the interface.
*/ */
virtual LocalDataPoolManager* getHkManagerHandle() = 0; virtual LocalDataPoolManager* getPoolManagerHandle() = 0;
protected: protected:

View File

@ -5,8 +5,8 @@
#include "internal/LocalPoolDataSetAttorney.h" #include "internal/LocalPoolDataSetAttorney.h"
#include "internal/HasLocalDpIFManagerAttorney.h" #include "internal/HasLocalDpIFManagerAttorney.h"
#include "../housekeeping/HousekeepingPacketUpdate.h"
#include "../housekeeping/HousekeepingSetPacket.h" #include "../housekeeping/HousekeepingSetPacket.h"
#include "../housekeeping/HousekeepingSnapshot.h"
#include "../housekeeping/AcceptsHkPacketsIF.h" #include "../housekeeping/AcceptsHkPacketsIF.h"
#include "../timemanager/CCSDSTime.h" #include "../timemanager/CCSDSTime.h"
#include "../ipc/MutexFactory.h" #include "../ipc/MutexFactory.h"
@ -226,7 +226,7 @@ ReturnValue_t LocalDataPoolManager::handleNotificationSnapshot(
Clock::getClock_timeval(&now); Clock::getClock_timeval(&now);
CCSDSTime::CDS_short cds; CCSDSTime::CDS_short cds;
CCSDSTime::convertToCcsds(&cds, &now); CCSDSTime::convertToCcsds(&cds, &now);
HousekeepingPacketUpdate updatePacket(reinterpret_cast<uint8_t*>(&cds), HousekeepingSnapshot updatePacket(reinterpret_cast<uint8_t*>(&cds),
sizeof(cds), HasLocalDpIFManagerAttorney::getPoolObjectHandle(owner, sizeof(cds), HasLocalDpIFManagerAttorney::getPoolObjectHandle(owner,
receiver.dataId.localPoolId)); receiver.dataId.localPoolId));
@ -264,7 +264,7 @@ ReturnValue_t LocalDataPoolManager::handleNotificationSnapshot(
Clock::getClock_timeval(&now); Clock::getClock_timeval(&now);
CCSDSTime::CDS_short cds; CCSDSTime::CDS_short cds;
CCSDSTime::convertToCcsds(&cds, &now); CCSDSTime::convertToCcsds(&cds, &now);
HousekeepingPacketUpdate updatePacket(reinterpret_cast<uint8_t*>(&cds), HousekeepingSnapshot updatePacket(reinterpret_cast<uint8_t*>(&cds),
sizeof(cds), HasLocalDpIFManagerAttorney::getDataSetHandle(owner, sizeof(cds), HasLocalDpIFManagerAttorney::getDataSetHandle(owner,
receiver.dataId.sid)); receiver.dataId.sid));
@ -292,7 +292,7 @@ ReturnValue_t LocalDataPoolManager::handleNotificationSnapshot(
} }
ReturnValue_t LocalDataPoolManager::addUpdateToStore( ReturnValue_t LocalDataPoolManager::addUpdateToStore(
HousekeepingPacketUpdate& updatePacket, store_address_t& storeId) { HousekeepingSnapshot& updatePacket, store_address_t& storeId) {
size_t updatePacketSize = updatePacket.getSerializedSize(); size_t updatePacketSize = updatePacket.getSerializedSize();
uint8_t *storePtr = nullptr; uint8_t *storePtr = nullptr;
ReturnValue_t result = ipcStore->getFreeElement(&storeId, ReturnValue_t result = ipcStore->getFreeElement(&storeId,
@ -906,6 +906,6 @@ void LocalDataPoolManager::printWarningOrError(sif::OutputTypes outputType,
} }
} }
LocalDataPoolManager* LocalDataPoolManager::getHkManagerHandle() { LocalDataPoolManager* LocalDataPoolManager::getPoolManagerHandle() {
return this; return this;
} }

View File

@ -24,7 +24,7 @@ void setStaticFrameworkObjectIds();
} }
class LocalPoolDataSetBase; class LocalPoolDataSetBase;
class HousekeepingPacketUpdate; class HousekeepingSnapshot;
class HasLocalDataPoolIF; class HasLocalDataPoolIF;
class LocalDataPool; class LocalDataPool;
@ -52,7 +52,8 @@ class LocalDataPool;
* Each pool entry has a valid state too. * Each pool entry has a valid state too.
* @author R. Mueller * @author R. Mueller
*/ */
class LocalDataPoolManager: public ProvidesDataPoolSubscriptionIF, class LocalDataPoolManager:
public ProvidesDataPoolSubscriptionIF,
public AccessPoolManagerIF { public AccessPoolManagerIF {
friend void (Factory::setStaticFrameworkObjectIds)(); friend void (Factory::setStaticFrameworkObjectIds)();
//! Some classes using the pool manager directly need to access class internals of the //! Some classes using the pool manager directly need to access class internals of the
@ -62,7 +63,6 @@ public:
static constexpr uint8_t INTERFACE_ID = CLASS_ID::HOUSEKEEPING_MANAGER; static constexpr uint8_t INTERFACE_ID = CLASS_ID::HOUSEKEEPING_MANAGER;
static constexpr ReturnValue_t QUEUE_OR_DESTINATION_INVALID = MAKE_RETURN_CODE(0); static constexpr ReturnValue_t QUEUE_OR_DESTINATION_INVALID = MAKE_RETURN_CODE(0);
static constexpr ReturnValue_t WRONG_HK_PACKET_TYPE = MAKE_RETURN_CODE(1); static constexpr ReturnValue_t WRONG_HK_PACKET_TYPE = MAKE_RETURN_CODE(1);
static constexpr ReturnValue_t REPORTING_STATUS_UNCHANGED = MAKE_RETURN_CODE(2); static constexpr ReturnValue_t REPORTING_STATUS_UNCHANGED = MAKE_RETURN_CODE(2);
static constexpr ReturnValue_t PERIODIC_HELPER_INVALID = MAKE_RETURN_CODE(3); static constexpr ReturnValue_t PERIODIC_HELPER_INVALID = MAKE_RETURN_CODE(3);
@ -179,8 +179,6 @@ public:
MessageQueueId_t targetQueueId, MessageQueueId_t targetQueueId,
bool generateSnapshot) override; bool generateSnapshot) override;
MutexIF* getLocalPoolMutex() override;
/** /**
* Non-Diagnostics packets usually have a lower minimum sampling frequency * Non-Diagnostics packets usually have a lower minimum sampling frequency
* than diagnostic packets. * than diagnostic packets.
@ -243,12 +241,9 @@ public:
UPDATE_SNAPSHOT, UPDATE_SNAPSHOT,
}; };
/** /** Different data types are possible in the HK receiver map. For example, updates can be
* Different data types are possible in the HK receiver map. requested for full datasets or for single pool variables. Periodic reporting is only possible
* For example, updates can be requested for full datasets or for data sets. */
* for single pool variables. Periodic reporting is only possible for
* data sets.
*/
enum class DataType: uint8_t { enum class DataType: uint8_t {
LOCAL_POOL_VARIABLE, LOCAL_POOL_VARIABLE,
DATA_SET DATA_SET
@ -267,11 +262,19 @@ public:
object_id_t getCreatorObjectId() const; object_id_t getCreatorObjectId() const;
virtual LocalDataPoolManager* getHkManagerHandle() override; /**
* Get the pointer to the mutex. Can be used to lock the data pool
* externally. Use with care and don't forget to unlock locked mutexes!
* For now, only friend classes can accss this function.
* @return
*/
MutexIF* getMutexHandle();
virtual LocalDataPoolManager* getPoolManagerHandle() override;
private: private:
localpool::DataPool localPoolMap; localpool::DataPool localPoolMap;
//! Every housekeeping data manager has a mutex to protect access /** Every housekeeping data manager has a mutex to protect access
//! to it's data pool. to it's data pool. */
MutexIF* mutex = nullptr; MutexIF* mutex = nullptr;
/** The class which actually owns the manager (and its datapool). */ /** The class which actually owns the manager (and its datapool). */
@ -333,13 +336,6 @@ private:
/** Global IPC store is used to store all packets. */ /** Global IPC store is used to store all packets. */
StorageManagerIF* ipcStore = nullptr; StorageManagerIF* ipcStore = nullptr;
/**
* Get the pointer to the mutex. Can be used to lock the data pool
* externally. Use with care and don't forget to unlock locked mutexes!
* For now, only friend classes can accss this function.
* @return
*/
MutexIF* getMutexHandle();
/** /**
* Read a variable by supplying its local pool ID and assign the pool * Read a variable by supplying its local pool ID and assign the pool
@ -364,6 +360,8 @@ private:
*/ */
ReturnValue_t initializeHousekeepingPoolEntriesOnce(); ReturnValue_t initializeHousekeepingPoolEntriesOnce();
MutexIF* getLocalPoolMutex() override;
ReturnValue_t serializeHkPacketIntoStore( ReturnValue_t serializeHkPacketIntoStore(
HousekeepingPacketDownlink& hkPacket, HousekeepingPacketDownlink& hkPacket,
store_address_t& storeId, bool forDownlink, size_t* serializedSize); store_address_t& storeId, bool forDownlink, size_t* serializedSize);
@ -386,7 +384,7 @@ private:
ReturnValue_t& status); ReturnValue_t& status);
ReturnValue_t handleNotificationSnapshot(HkReceiver& hkReceiver, ReturnValue_t handleNotificationSnapshot(HkReceiver& hkReceiver,
ReturnValue_t& status); ReturnValue_t& status);
ReturnValue_t addUpdateToStore(HousekeepingPacketUpdate& updatePacket, ReturnValue_t addUpdateToStore(HousekeepingSnapshot& updatePacket,
store_address_t& storeId); store_address_t& storeId);
void printWarningOrError(sif::OutputTypes outputType, void printWarningOrError(sif::OutputTypes outputType,

View File

@ -4,11 +4,26 @@
#include "LocalPoolDataSetBase.h" #include "LocalPoolDataSetBase.h"
#include <vector> #include <vector>
/**
* @brief This dataset type can be used to group related pool variables if the number of
* variables should not be fixed.
* @details
* This will is the primary data structure to organize pool variables into
* sets which can be accessed via the housekeeping service interface or
* which can be sent to other software objects.
*
* It is recommended to read the documentation of the LocalPoolDataSetBase
* class for more information on how this class works and how to use it.
* @tparam capacity Capacity of the static dataset, which is usually known
* beforehand.
*/
class LocalDataSet: public LocalPoolDataSetBase { class LocalDataSet: public LocalPoolDataSetBase {
public: public:
LocalDataSet(HasLocalDataPoolIF* hkOwner, uint32_t setId, LocalDataSet(HasLocalDataPoolIF* hkOwner, uint32_t setId,
const size_t maxSize); const size_t maxSize);
LocalDataSet(sid_t sid, const size_t maxSize); LocalDataSet(sid_t sid, const size_t maxSize);
virtual~ LocalDataSet(); virtual~ LocalDataSet();
//! Copying forbidden for now. //! Copying forbidden for now.

View File

@ -28,7 +28,7 @@ LocalPoolDataSetBase::LocalPoolDataSetBase(HasLocalDataPoolIF *hkOwner,
AccessPoolManagerIF* accessor = HasLocalDpIFUserAttorney::getAccessorHandle(hkOwner); AccessPoolManagerIF* accessor = HasLocalDpIFUserAttorney::getAccessorHandle(hkOwner);
if(accessor != nullptr) { if(accessor != nullptr) {
poolManager = accessor->getHkManagerHandle(); poolManager = accessor->getPoolManagerHandle();
mutexIfSingleDataCreator = accessor->getLocalPoolMutex(); mutexIfSingleDataCreator = accessor->getLocalPoolMutex();
} }

View File

@ -22,7 +22,7 @@ LocalPoolObjectBase::LocalPoolObjectBase(lp_id_t poolId, HasLocalDataPoolIF* hkO
return; return;
} }
AccessPoolManagerIF* poolManAccessor = HasLocalDpIFUserAttorney::getAccessorHandle(hkOwner); AccessPoolManagerIF* poolManAccessor = HasLocalDpIFUserAttorney::getAccessorHandle(hkOwner);
hkManager = poolManAccessor->getHkManagerHandle(); hkManager = poolManAccessor->getPoolManagerHandle();
if (dataSet != nullptr) { if (dataSet != nullptr) {
dataSet->registerVariable(this); dataSet->registerVariable(this);
@ -50,7 +50,7 @@ LocalPoolObjectBase::LocalPoolObjectBase(object_id_t poolOwner, lp_id_t poolId,
AccessPoolManagerIF* accessor = HasLocalDpIFUserAttorney::getAccessorHandle(hkOwner); AccessPoolManagerIF* accessor = HasLocalDpIFUserAttorney::getAccessorHandle(hkOwner);
if(accessor != nullptr) { if(accessor != nullptr) {
hkManager = accessor->getHkManagerHandle(); hkManager = accessor->getPoolManagerHandle();
} }
if(dataSet != nullptr) { if(dataSet != nullptr) {

View File

@ -77,8 +77,7 @@ public:
* @param dataSet * @param dataSet
* @param setReadWriteMode * @param setReadWriteMode
*/ */
LocalPoolVector(gp_id_t globalPoolId, LocalPoolVector(gp_id_t globalPoolId, DataSetIF* dataSet = nullptr,
DataSetIF* dataSet = nullptr,
pool_rwm_t setReadWriteMode = pool_rwm_t::VAR_READ_WRITE); pool_rwm_t setReadWriteMode = pool_rwm_t::VAR_READ_WRITE);
/** /**
@ -87,7 +86,7 @@ public:
* The user can work on this attribute just like he would on a local * The user can work on this attribute just like he would on a local
* array of this type. * array of this type.
*/ */
T value[vectorSize]; T value[vectorSize]= {};
/** /**
* @brief The classes destructor is empty. * @brief The classes destructor is empty.
* @details If commit() was not called, the local value is * @details If commit() was not called, the local value is

View File

@ -16,7 +16,6 @@ inline LocalPoolVector<T, vectorSize>::LocalPoolVector(object_id_t poolOwner,
lp_id_t poolId, DataSetIF *dataSet, pool_rwm_t setReadWriteMode): lp_id_t poolId, DataSetIF *dataSet, pool_rwm_t setReadWriteMode):
LocalPoolObjectBase(poolOwner, poolId, dataSet, setReadWriteMode) {} LocalPoolObjectBase(poolOwner, poolId, dataSet, setReadWriteMode) {}
template<typename T, uint16_t vectorSize> template<typename T, uint16_t vectorSize>
inline LocalPoolVector<T, vectorSize>::LocalPoolVector(gp_id_t globalPoolId, inline LocalPoolVector<T, vectorSize>::LocalPoolVector(gp_id_t globalPoolId,
DataSetIF *dataSet, pool_rwm_t setReadWriteMode): DataSetIF *dataSet, pool_rwm_t setReadWriteMode):

View File

@ -6,7 +6,8 @@
#include <array> #include <array>
/** /**
* @brief This local dataset type is created on the stack. * @brief This dataset type can be used to group related pool variables if the number of
* variables is fixed.
* @details * @details
* This will is the primary data structure to organize pool variables into * This will is the primary data structure to organize pool variables into
* sets which can be accessed via the housekeeping service interface or * sets which can be accessed via the housekeeping service interface or

3
doc/doxy/.gitignore vendored Normal file
View File

@ -0,0 +1,3 @@
html
latex
rtf

2609
doc/doxy/OPUS.doxyfile Normal file

File diff suppressed because it is too large Load Diff

View File

@ -1,24 +1,37 @@
#ifndef FSFW_HOUSEKEEPING_HOUSEKEEPINGPACKETUPDATE_H_ #ifndef FSFW_HOUSEKEEPING_HOUSEKEEPINGSNAPSHOT_H_
#define FSFW_HOUSEKEEPING_HOUSEKEEPINGPACKETUPDATE_H_ #define FSFW_HOUSEKEEPING_HOUSEKEEPINGSNAPSHOT_H_
#include "../serialize/SerialBufferAdapter.h" #include "../serialize/SerialBufferAdapter.h"
#include "../serialize/SerialLinkedListAdapter.h" #include "../serialize/SerialLinkedListAdapter.h"
#include "../datapoollocal/LocalPoolDataSetBase.h" #include "../datapoollocal/LocalPoolDataSetBase.h"
#include "../datapoollocal/LocalPoolObjectBase.h"
#include "../timemanager/CCSDSTime.h"
/** /**
* @brief This helper class will be used to serialize and deserialize * @brief This helper class will be used to serialize and deserialize update housekeeping packets
* update housekeeping packets into the store. * into the store.
*/ */
class HousekeepingPacketUpdate: public SerializeIF { class HousekeepingSnapshot: public SerializeIF {
public: public:
/** /**
* Update packet constructor for datasets * Update packet constructor for datasets.
* @param timeStamp * @param cdsShort If a CSD short timestamp is used, a reference should be
* @param timeStampSize * supplied here
* @param hkData * @param dataSetPtr Pointer to the dataset instance to serialize or deserialize the
* @param hkDataSize * data into
*/ */
HousekeepingPacketUpdate(uint8_t* timeStamp, size_t timeStampSize, HousekeepingSnapshot(CCSDSTime::CDS_short* cdsShort, LocalPoolDataSetBase* dataSetPtr):
timeStamp(reinterpret_cast<uint8_t*>(cdsShort)),
timeStampSize(sizeof(CCSDSTime::CDS_short)), updateData(dataSetPtr) {};
/**
* Update packet constructor for datasets.
* @param timeStamp Pointer to the buffer where the timestamp will be stored.
* @param timeStampSize Size of the timestamp
* @param dataSetPtr Pointer to the dataset instance to deserialize the data into
*/
HousekeepingSnapshot(uint8_t* timeStamp, size_t timeStampSize,
LocalPoolDataSetBase* dataSetPtr): LocalPoolDataSetBase* dataSetPtr):
timeStamp(timeStamp), timeStampSize(timeStampSize), timeStamp(timeStamp), timeStampSize(timeStampSize),
updateData(dataSetPtr) {}; updateData(dataSetPtr) {};
@ -29,7 +42,7 @@ public:
* @param timeStampSize * @param timeStampSize
* @param dataSetPtr * @param dataSetPtr
*/ */
HousekeepingPacketUpdate(uint8_t* timeStamp, size_t timeStampSize, HousekeepingSnapshot(uint8_t* timeStamp, size_t timeStampSize,
LocalPoolObjectBase* dataSetPtr): LocalPoolObjectBase* dataSetPtr):
timeStamp(timeStamp), timeStampSize(timeStampSize), timeStamp(timeStamp), timeStampSize(timeStampSize),
updateData(dataSetPtr) {}; updateData(dataSetPtr) {};
@ -89,4 +102,4 @@ private:
}; };
#endif /* FSFW_HOUSEKEEPING_HOUSEKEEPINGPACKETUPDATE_H_ */ #endif /* FSFW_HOUSEKEEPING_HOUSEKEEPINGSNAPSHOT_H_ */

View File

@ -53,8 +53,7 @@ ReturnValue_t Clock::getClock_timeval(timeval* time) {
auto epoch = now.time_since_epoch(); auto epoch = now.time_since_epoch();
time->tv_sec = std::chrono::duration_cast<std::chrono::seconds>(epoch).count(); time->tv_sec = std::chrono::duration_cast<std::chrono::seconds>(epoch).count();
auto fraction = now - secondsChrono; auto fraction = now - secondsChrono;
time->tv_usec = std::chrono::duration_cast<std::chrono::microseconds>( time->tv_usec = std::chrono::duration_cast<std::chrono::microseconds>(fraction).count();
fraction).count();
return HasReturnvaluesIF::RETURN_OK; return HasReturnvaluesIF::RETURN_OK;
#elif defined(LINUX) #elif defined(LINUX)
timespec timeUnix; timespec timeUnix;
@ -67,7 +66,9 @@ ReturnValue_t Clock::getClock_timeval(timeval* time) {
return HasReturnvaluesIF::RETURN_OK; return HasReturnvaluesIF::RETURN_OK;
#else #else
#if FSFW_CPP_OSTREAM_ENABLED == 1 #if FSFW_CPP_OSTREAM_ENABLED == 1
sif::warning << "Clock::getUptime: Not implemented for found OS" << std::endl; sif::warning << "Clock::getUptime: Not implemented for found OS!" << std::endl;
#else
sif::printWarning("Clock::getUptime: Not implemented for found OS!\n");
#endif #endif
return HasReturnvaluesIF::RETURN_FAILED; return HasReturnvaluesIF::RETURN_FAILED;
#endif #endif

View File

@ -1,5 +1,5 @@
#include "Stopwatch.h" #include "Stopwatch.h"
#include "../serviceinterface/ServiceInterfaceStream.h" #include "../serviceinterface/ServiceInterface.h"
#include <iomanip> #include <iomanip>
Stopwatch::Stopwatch(bool displayOnDestruction, Stopwatch::Stopwatch(bool displayOnDestruction,
@ -28,9 +28,13 @@ double Stopwatch::stopSeconds() {
void Stopwatch::display() { void Stopwatch::display() {
if(displayMode == StopwatchDisplayMode::MILLIS) { if(displayMode == StopwatchDisplayMode::MILLIS) {
dur_millis_t timeMillis = static_cast<dur_millis_t>(
elapsedTime.tv_sec * 1000 + elapsedTime.tv_usec / 1000);
#if FSFW_CPP_OSTREAM_ENABLED == 1 #if FSFW_CPP_OSTREAM_ENABLED == 1
sif::info << "Stopwatch: Operation took " << (elapsedTime.tv_sec * 1000 + sif::info << "Stopwatch: Operation took " << timeMillis << " milliseconds" << std::endl;
elapsedTime.tv_usec / 1000) << " milliseconds" << std::endl; #else
sif::printInfo("Stopwatch: Operation took %lu milliseconds\n\r",
static_cast<unsigned int>(timeMillis));
#endif #endif
} }
else if(displayMode == StopwatchDisplayMode::SECONDS) { else if(displayMode == StopwatchDisplayMode::SECONDS) {
@ -38,6 +42,9 @@ void Stopwatch::display() {
sif::info <<"Stopwatch: Operation took " << std::setprecision(3) sif::info <<"Stopwatch: Operation took " << std::setprecision(3)
<< std::fixed << timevalOperations::toDouble(elapsedTime) << std::fixed << timevalOperations::toDouble(elapsedTime)
<< " seconds" << std::endl; << " seconds" << std::endl;
#else
sif::printInfo("Stopwatch: Operation took %.3f seconds\n\r",
static_cast<float>(timevalOperations::toDouble(elapsedTime)));
#endif #endif
} }
} }

View File

@ -1,10 +1,16 @@
#include "LocalPoolOwnerBase.h" #include "LocalPoolOwnerBase.h"
#include <catch2/catch_test_macros.hpp> #include <catch2/catch_test_macros.hpp>
#include <catch2/catch_approx.hpp>
#include <fsfw/datapool/PoolReadHelper.h>
#include <fsfw/datapoollocal/HasLocalDataPoolIF.h> #include <fsfw/datapoollocal/HasLocalDataPoolIF.h>
#include <fsfw/datapoollocal/StaticLocalDataSet.h> #include <fsfw/datapoollocal/StaticLocalDataSet.h>
#include <fsfw/housekeeping/HousekeepingSnapshot.h>
#include <fsfw/ipc/CommandMessageCleaner.h> #include <fsfw/ipc/CommandMessageCleaner.h>
#include <fsfw/timemanager/CCSDSTime.h>
#include <unittest/core/CatchDefinitions.h> #include <unittest/core/CatchDefinitions.h>
#include <iostream>
TEST_CASE("LocalPoolManagerTest" , "[LocManTest]") { TEST_CASE("LocalPoolManagerTest" , "[LocManTest]") {
@ -14,7 +20,7 @@ TEST_CASE("LocalPoolManagerTest" , "[LocManTest]") {
REQUIRE(poolOwner->initializeHkManager() == retval::CATCH_OK); REQUIRE(poolOwner->initializeHkManager() == retval::CATCH_OK);
REQUIRE(poolOwner->initializeHkManagerAfterTaskCreation() REQUIRE(poolOwner->initializeHkManagerAfterTaskCreation()
== retval::CATCH_OK); == retval::CATCH_OK);
REQUIRE(poolOwner->dataset.assignPointers() == retval::CATCH_OK); //REQUIRE(poolOwner->dataset.assignPointers() == retval::CATCH_OK);
MessageQueueMockBase* mqMock = poolOwner->getMockQueueHandle(); MessageQueueMockBase* mqMock = poolOwner->getMockQueueHandle();
REQUIRE(mqMock != nullptr); REQUIRE(mqMock != nullptr);
CommandMessage messageSent; CommandMessage messageSent;
@ -22,11 +28,11 @@ TEST_CASE("LocalPoolManagerTest" , "[LocManTest]") {
SECTION("BasicTest") { SECTION("BasicTest") {
// Subscribe for message generation on update. /* Subscribe for message generation on update. */
REQUIRE(poolOwner->subscribeWrapperSetUpdate() == retval::CATCH_OK); REQUIRE(poolOwner->subscribeWrapperSetUpdate() == retval::CATCH_OK);
// Subscribe for an update message. /* Subscribe for an update message. */
poolOwner->dataset.setChanged(true); poolOwner->dataset.setChanged(true);
// Now the update message should be generated. /* Now the update message should be generated. */
REQUIRE(poolOwner->poolManager.performHkOperation() == retval::CATCH_OK); REQUIRE(poolOwner->poolManager.performHkOperation() == retval::CATCH_OK);
REQUIRE(mqMock->wasMessageSent() == true); REQUIRE(mqMock->wasMessageSent() == true);
@ -34,9 +40,9 @@ TEST_CASE("LocalPoolManagerTest" , "[LocManTest]") {
CHECK(messageSent.getCommand() == static_cast<int>( CHECK(messageSent.getCommand() == static_cast<int>(
HousekeepingMessage::UPDATE_NOTIFICATION_SET)); HousekeepingMessage::UPDATE_NOTIFICATION_SET));
// Should have been reset. /* Should have been reset. */
CHECK(poolOwner->dataset.hasChanged() == false); CHECK(poolOwner->dataset.hasChanged() == false);
// Set changed again, result should be the same. /* Set changed again, result should be the same. */
poolOwner->dataset.setChanged(true); poolOwner->dataset.setChanged(true);
REQUIRE(poolOwner->poolManager.performHkOperation() == retval::CATCH_OK); REQUIRE(poolOwner->poolManager.performHkOperation() == retval::CATCH_OK);
@ -46,14 +52,14 @@ TEST_CASE("LocalPoolManagerTest" , "[LocManTest]") {
CHECK(messageSent.getCommand() == static_cast<int>( CHECK(messageSent.getCommand() == static_cast<int>(
HousekeepingMessage::UPDATE_NOTIFICATION_SET)); HousekeepingMessage::UPDATE_NOTIFICATION_SET));
// now subscribe for set update HK as well. /* Now subscribe for set update HK as well. */
REQUIRE(poolOwner->subscribeWrapperSetUpdateHk() == retval::CATCH_OK); REQUIRE(poolOwner->subscribeWrapperSetUpdateHk() == retval::CATCH_OK);
poolOwner->dataset.setChanged(true); poolOwner->dataset.setChanged(true);
REQUIRE(poolOwner->poolManager.performHkOperation() == retval::CATCH_OK); REQUIRE(poolOwner->poolManager.performHkOperation() == retval::CATCH_OK);
REQUIRE(mqMock->wasMessageSent(&messagesSent) == true); REQUIRE(mqMock->wasMessageSent(&messagesSent) == true);
CHECK(messagesSent == 2); CHECK(messagesSent == 2);
// first message sent should be the update notification, considering /* first message sent should be the update notification, considering
// the internal list is a vector checked in insertion order. the internal list is a vector checked in insertion order. */
REQUIRE(mqMock->receiveMessage(&messageSent) == retval::CATCH_OK); REQUIRE(mqMock->receiveMessage(&messageSent) == retval::CATCH_OK);
CHECK(messageSent.getCommand() == static_cast<int>( CHECK(messageSent.getCommand() == static_cast<int>(
HousekeepingMessage::UPDATE_NOTIFICATION_SET)); HousekeepingMessage::UPDATE_NOTIFICATION_SET));
@ -61,16 +67,82 @@ TEST_CASE("LocalPoolManagerTest" , "[LocManTest]") {
REQUIRE(mqMock->receiveMessage(&messageSent) == retval::CATCH_OK); REQUIRE(mqMock->receiveMessage(&messageSent) == retval::CATCH_OK);
CHECK(messageSent.getCommand() == static_cast<int>( CHECK(messageSent.getCommand() == static_cast<int>(
HousekeepingMessage::HK_REPORT)); HousekeepingMessage::HK_REPORT));
// clear message to avoid memory leak, our mock won't do it for us (yet) /* Clear message to avoid memory leak, our mock won't do it for us (yet) */
CommandMessageCleaner::clearCommandMessage(&messageSent); CommandMessageCleaner::clearCommandMessage(&messageSent);
}
SECTION("SnapshotUpdateTests") {
/* Set the variables in the set to certain values. These are checked later. */
{
PoolReadHelper readHelper(&poolOwner->dataset);
REQUIRE(readHelper.getReadResult() == retval::CATCH_OK);
poolOwner->dataset.localPoolVarUint8.value = 5;
poolOwner->dataset.localPoolVarFloat.value = -12.242;
poolOwner->dataset.localPoolUint16Vec.value[0] = 2;
poolOwner->dataset.localPoolUint16Vec.value[1] = 32;
poolOwner->dataset.localPoolUint16Vec.value[2] = 42932;
}
/* Subscribe for snapshot generation on update. */
REQUIRE(poolOwner->subscribeWrapperSetUpdateSnapshot() == retval::CATCH_OK);
poolOwner->dataset.setChanged(true);
/* Store current time, we are going to check the (approximate) time equality later */
CCSDSTime::CDS_short timeCdsNow;
timeval now;
Clock::getClock_timeval(&now);
CCSDSTime::convertToCcsds(&timeCdsNow, &now);
/* Trigger generation of snapshot */
REQUIRE(poolOwner->poolManager.performHkOperation() == retval::CATCH_OK);
REQUIRE(mqMock->wasMessageSent(&messagesSent) == true);
CHECK(messagesSent == 1);
REQUIRE(mqMock->receiveMessage(&messageSent) == retval::CATCH_OK);
/* Check that snapshot was generated */
CHECK(messageSent.getCommand() == static_cast<int>(
HousekeepingMessage::UPDATE_SNAPSHOT_SET));
/* Now we deserialize the snapshot into a new dataset instance */
CCSDSTime::CDS_short cdsShort;
LocalPoolTestDataSet newSet;
HousekeepingSnapshot snapshot(&cdsShort, &newSet);
store_address_t storeId;
HousekeepingMessage::getUpdateSnapshotSetCommand(&messageSent, &storeId);
ConstAccessorPair accessorPair = tglob::getIpcStoreHandle()->getData(storeId);
REQUIRE(accessorPair.first == retval::CATCH_OK);
const uint8_t* readOnlyPtr = accessorPair.second.data();
size_t sizeToDeserialize = accessorPair.second.size();
CHECK(newSet.localPoolVarFloat.value == 0);
CHECK(newSet.localPoolVarUint8 == 0);
CHECK(newSet.localPoolUint16Vec.value[0] == 0);
CHECK(newSet.localPoolUint16Vec.value[1] == 0);
CHECK(newSet.localPoolUint16Vec.value[2] == 0);
/* Fill the dataset and timestamp */
REQUIRE(snapshot.deSerialize(&readOnlyPtr, &sizeToDeserialize,
SerializeIF::Endianness::MACHINE) == retval::CATCH_OK);
/* Now we check that the snapshot is actually correct */
CHECK(newSet.localPoolVarFloat.value == Catch::Approx(-12.242));
CHECK(newSet.localPoolVarUint8 == 5);
CHECK(newSet.localPoolUint16Vec.value[0] == 2);
CHECK(newSet.localPoolUint16Vec.value[1] == 32);
CHECK(newSet.localPoolUint16Vec.value[2] == 42932);
/* Now we check that both times are equal */
CHECK(cdsShort.pField == timeCdsNow.pField);
CHECK(cdsShort.dayLSB == Catch::Approx(timeCdsNow.dayLSB).margin(1));
CHECK(cdsShort.dayMSB == Catch::Approx(timeCdsNow.dayMSB).margin(1));
CHECK(cdsShort.msDay_h == Catch::Approx(timeCdsNow.msDay_h).margin(1));
CHECK(cdsShort.msDay_hh == Catch::Approx(timeCdsNow.msDay_hh).margin(1));
CHECK(cdsShort.msDay_l == Catch::Approx(timeCdsNow.msDay_l).margin(1));
CHECK(cdsShort.msDay_ll == Catch::Approx(timeCdsNow.msDay_ll).margin(1));
} }
SECTION("AdvancedTests") { SECTION("AdvancedTests") {
// we need to reset the subscription list because the pool owner /* Acquire subscription interface */
// is a global object. ProvidesDataPoolSubscriptionIF* subscriptionIF = poolOwner->getSubscriptionInterface();
poolOwner->resetSubscriptionList(); REQUIRE(subscriptionIF != nullptr);
// Subscribe for variable update as well
REQUIRE(not poolOwner->dataset.hasChanged()); /* Subscribe for variable update */
REQUIRE(poolOwner->subscribeWrapperVariableUpdate(lpool::uint8VarId) == REQUIRE(poolOwner->subscribeWrapperVariableUpdate(lpool::uint8VarId) ==
retval::CATCH_OK); retval::CATCH_OK);
lp_var_t<uint8_t>* poolVar = dynamic_cast<lp_var_t<uint8_t>*>( lp_var_t<uint8_t>* poolVar = dynamic_cast<lp_var_t<uint8_t>*>(
@ -79,22 +151,22 @@ TEST_CASE("LocalPoolManagerTest" , "[LocManTest]") {
poolVar->setChanged(true); poolVar->setChanged(true);
REQUIRE(poolOwner->poolManager.performHkOperation() == retval::CATCH_OK); REQUIRE(poolOwner->poolManager.performHkOperation() == retval::CATCH_OK);
// Check update notification was sent. /* Check update notification was sent. */
REQUIRE(mqMock->wasMessageSent(&messagesSent) == true); REQUIRE(mqMock->wasMessageSent(&messagesSent) == true);
CHECK(messagesSent == 1); CHECK(messagesSent == 1);
// Should have been reset. /* Should have been reset. */
CHECK(poolVar->hasChanged() == false); CHECK(poolVar->hasChanged() == false);
REQUIRE(mqMock->receiveMessage(&messageSent) == retval::CATCH_OK); REQUIRE(mqMock->receiveMessage(&messageSent) == retval::CATCH_OK);
CHECK(messageSent.getCommand() == static_cast<int>( CHECK(messageSent.getCommand() == static_cast<int>(
HousekeepingMessage::UPDATE_NOTIFICATION_VARIABLE)); HousekeepingMessage::UPDATE_NOTIFICATION_VARIABLE));
/* Now subscribe for the dataset update (HK and update) again with subscription interface */
// now subscribe for the dataset update (HK and update) again REQUIRE(subscriptionIF->subscribeForSetUpdateMessages(lpool::testSetId,
REQUIRE(poolOwner->subscribeWrapperSetUpdate() == retval::CATCH_OK); objects::NO_OBJECT, objects::HK_RECEIVER_MOCK, false) == retval::CATCH_OK);
REQUIRE(poolOwner->subscribeWrapperSetUpdateHk() == retval::CATCH_OK); REQUIRE(poolOwner->subscribeWrapperSetUpdateHk() == retval::CATCH_OK);
poolOwner->dataset.setChanged(true); poolOwner->dataset.setChanged(true);
REQUIRE(poolOwner->poolManager.performHkOperation() == retval::CATCH_OK); REQUIRE(poolOwner->poolManager.performHkOperation() == retval::CATCH_OK);
// now two messages should be sent. /* Now two messages should be sent. */
REQUIRE(mqMock->wasMessageSent(&messagesSent) == true); REQUIRE(mqMock->wasMessageSent(&messagesSent) == true);
CHECK(messagesSent == 2); CHECK(messagesSent == 2);
mqMock->clearMessages(true); mqMock->clearMessages(true);
@ -102,7 +174,7 @@ TEST_CASE("LocalPoolManagerTest" , "[LocManTest]") {
poolOwner->dataset.setChanged(true); poolOwner->dataset.setChanged(true);
poolVar->setChanged(true); poolVar->setChanged(true);
REQUIRE(poolOwner->poolManager.performHkOperation() == retval::CATCH_OK); REQUIRE(poolOwner->poolManager.performHkOperation() == retval::CATCH_OK);
// now three messages should be sent. /* Now three messages should be sent. */
REQUIRE(mqMock->wasMessageSent(&messagesSent) == true); REQUIRE(mqMock->wasMessageSent(&messagesSent) == true);
CHECK(messagesSent == 3); CHECK(messagesSent == 3);
REQUIRE(mqMock->receiveMessage(&messageSent) == retval::CATCH_OK); REQUIRE(mqMock->receiveMessage(&messageSent) == retval::CATCH_OK);
@ -118,5 +190,10 @@ TEST_CASE("LocalPoolManagerTest" , "[LocManTest]") {
REQUIRE(mqMock->receiveMessage(&messageSent) == REQUIRE(mqMock->receiveMessage(&messageSent) ==
static_cast<int>(MessageQueueIF::EMPTY)); static_cast<int>(MessageQueueIF::EMPTY));
} }
/* we need to reset the subscription list because the pool owner
is a global object. */
poolOwner->resetSubscriptionList();
mqMock->clearMessages(true);
} }

View File

@ -20,33 +20,43 @@ static constexpr lp_id_t int64Vec2Id = 4;
static constexpr uint32_t testSetId = 0; static constexpr uint32_t testSetId = 0;
static constexpr uint8_t dataSetMaxVariables = 10; static constexpr uint8_t dataSetMaxVariables = 10;
static const sid_t testSid = sid_t(objects::TEST_LOCAL_POOL_OWNER_BASE,
testSetId); static const sid_t testSid = sid_t(objects::TEST_LOCAL_POOL_OWNER_BASE, testSetId);
static const gp_id_t uint8VarGpid = gp_id_t(objects::TEST_LOCAL_POOL_OWNER_BASE, uint8VarId);
static const gp_id_t floatVarGpid = gp_id_t(objects::TEST_LOCAL_POOL_OWNER_BASE, floatVarId);
static const gp_id_t uint32Gpid = gp_id_t(objects::TEST_LOCAL_POOL_OWNER_BASE, uint32VarId);
static const gp_id_t uint16Vec3Gpid = gp_id_t(objects::TEST_LOCAL_POOL_OWNER_BASE, uint16Vec3Id);
static const gp_id_t uint64Vec2Id = gp_id_t(objects::TEST_LOCAL_POOL_OWNER_BASE, int64Vec2Id);
} }
class LocalPoolTestDataSet: public LocalDataSet { class LocalPoolTestDataSet: public LocalDataSet {
public: public:
LocalPoolTestDataSet():
LocalDataSet(lpool::testSid, lpool::dataSetMaxVariables) {
}
LocalPoolTestDataSet(HasLocalDataPoolIF* owner, uint32_t setId): LocalPoolTestDataSet(HasLocalDataPoolIF* owner, uint32_t setId):
LocalDataSet(owner, setId, lpool::dataSetMaxVariables) { LocalDataSet(owner, setId, lpool::dataSetMaxVariables) {
} }
ReturnValue_t assignPointers() { // ReturnValue_t assignPointers() {
PoolVariableIF** rawVarArray = getContainer(); // PoolVariableIF** rawVarArray = getContainer();
localPoolVarUint8 = dynamic_cast<lp_var_t<uint8_t>*>(rawVarArray[0]); // localPoolVarUint8 = dynamic_cast<lp_var_t<uint8_t>*>(rawVarArray[0]);
localPoolVarFloat = dynamic_cast<lp_var_t<float>*>(rawVarArray[1]); // localPoolVarFloat = dynamic_cast<lp_var_t<float>*>(rawVarArray[1]);
localPoolUint16Vec = dynamic_cast<lp_vec_t<uint16_t, 3>*>( // localPoolUint16Vec = dynamic_cast<lp_vec_t<uint16_t, 3>*>(
rawVarArray[2]); // rawVarArray[2]);
if(localPoolVarUint8 == nullptr or localPoolVarFloat == nullptr or // if(localPoolVarUint8 == nullptr or localPoolVarFloat == nullptr or
localPoolUint16Vec == nullptr) { // localPoolUint16Vec == nullptr) {
return HasReturnvaluesIF::RETURN_FAILED; // return HasReturnvaluesIF::RETURN_FAILED;
} // }
return HasReturnvaluesIF::RETURN_OK; // return HasReturnvaluesIF::RETURN_OK;
} // }
lp_var_t<uint8_t>* localPoolVarUint8 = nullptr; lp_var_t<uint8_t> localPoolVarUint8 = lp_var_t<uint8_t>(lpool::uint8VarGpid, this);
lp_var_t<float>* localPoolVarFloat = nullptr; lp_var_t<float> localPoolVarFloat = lp_var_t<float>(lpool::floatVarGpid, this);
lp_vec_t<uint16_t, 3>* localPoolUint16Vec = nullptr; lp_vec_t<uint16_t, 3> localPoolUint16Vec = lp_vec_t<uint16_t, 3>(lpool::uint16Vec3Gpid, this);
private: private:
}; };
@ -155,7 +165,12 @@ public:
ReturnValue_t subscribeWrapperSetUpdate() { ReturnValue_t subscribeWrapperSetUpdate() {
return poolManager.subscribeForSetUpdateMessages(lpool::testSetId, return poolManager.subscribeForSetUpdateMessages(lpool::testSetId,
objects::NO_OBJECT, MessageQueueIF::NO_QUEUE, false); objects::NO_OBJECT, objects::HK_RECEIVER_MOCK, false);
}
ReturnValue_t subscribeWrapperSetUpdateSnapshot() {
return poolManager.subscribeForSetUpdateMessages(lpool::testSetId,
objects::NO_OBJECT, objects::HK_RECEIVER_MOCK, true);
} }
ReturnValue_t subscribeWrapperSetUpdateHk(bool diagnostics = false) { ReturnValue_t subscribeWrapperSetUpdateHk(bool diagnostics = false) {
@ -165,7 +180,7 @@ public:
ReturnValue_t subscribeWrapperVariableUpdate(lp_id_t localPoolId) { ReturnValue_t subscribeWrapperVariableUpdate(lp_id_t localPoolId) {
return poolManager.subscribeForVariableUpdateMessages(localPoolId, return poolManager.subscribeForVariableUpdateMessages(localPoolId,
MessageQueueIF::NO_QUEUE, objects::NO_OBJECT, false); MessageQueueIF::NO_QUEUE, objects::HK_RECEIVER_MOCK, false);
} }
void resetSubscriptionList() { void resetSubscriptionList() {

View File

@ -14,7 +14,7 @@ TEST_CASE("LocalPoolVariable" , "[LocPoolVarTest]") {
== retval::CATCH_OK); == retval::CATCH_OK);
SECTION("Basic Tests") { SECTION("Basic Tests") {
// very basic test. /* very basic test. */
lp_var_t<uint8_t> testVariable = lp_var_t<uint8_t>( lp_var_t<uint8_t> testVariable = lp_var_t<uint8_t>(
objects::TEST_LOCAL_POOL_OWNER_BASE, lpool::uint8VarId); objects::TEST_LOCAL_POOL_OWNER_BASE, lpool::uint8VarId);
REQUIRE(testVariable.read() == retval::CATCH_OK); REQUIRE(testVariable.read() == retval::CATCH_OK);
@ -77,7 +77,7 @@ TEST_CASE("LocalPoolVariable" , "[LocPoolVarTest]") {
SECTION("ErrorHandling") { SECTION("ErrorHandling") {
// not try to use a local pool variable which does not exist /* now try to use a local pool variable which does not exist */
lp_var_t<uint8_t> invalidVariable = lp_var_t<uint8_t>( lp_var_t<uint8_t> invalidVariable = lp_var_t<uint8_t>(
objects::TEST_LOCAL_POOL_OWNER_BASE, 0xffffffff); objects::TEST_LOCAL_POOL_OWNER_BASE, 0xffffffff);
REQUIRE(invalidVariable.read() == REQUIRE(invalidVariable.read() ==
@ -85,7 +85,7 @@ TEST_CASE("LocalPoolVariable" , "[LocPoolVarTest]") {
REQUIRE(invalidVariable.commit() == REQUIRE(invalidVariable.commit() ==
static_cast<int>(localpool::POOL_ENTRY_NOT_FOUND)); static_cast<int>(localpool::POOL_ENTRY_NOT_FOUND));
// now try to access with wrong type /* now try to access with wrong type */
lp_var_t<int8_t> invalidVariable2 = lp_var_t<int8_t>( lp_var_t<int8_t> invalidVariable2 = lp_var_t<int8_t>(
objects::TEST_LOCAL_POOL_OWNER_BASE, lpool::uint8VarId); objects::TEST_LOCAL_POOL_OWNER_BASE, lpool::uint8VarId);
REQUIRE(invalidVariable2.read() == REQUIRE(invalidVariable2.read() ==
@ -108,7 +108,7 @@ TEST_CASE("LocalPoolVariable" , "[LocPoolVarTest]") {
sif::info << "LocalPoolVariable printout: " <<uint32tVar << std::endl; sif::info << "LocalPoolVariable printout: " <<uint32tVar << std::endl;
#endif #endif
// for code coverage. If program does not crash -> OK /* for code coverage. If program does not crash -> OK */
lp_var_t<uint8_t> invalidObjectVar = lp_var_t<uint8_t>( lp_var_t<uint8_t> invalidObjectVar = lp_var_t<uint8_t>(
0xffffffff, lpool::uint8VarId); 0xffffffff, lpool::uint8VarId);
gp_id_t globPoolId(0xffffffff, gp_id_t globPoolId(0xffffffff,

View File

@ -35,13 +35,13 @@ TEST_CASE("LocalPoolVector" , "[LocPoolVecTest]") {
CHECK(testVector[0] == 5); CHECK(testVector[0] == 5);
// This is invalid access, so the last value will be set instead. /* This is invalid access, so the last value will be set instead.
// (we can't throw exceptions) (we can't throw exceptions) */
testVector[4] = 12; testVector[4] = 12;
CHECK(testVector[2] == 12); CHECK(testVector[2] == 12);
CHECK(testVector.commit() == retval::CATCH_OK); CHECK(testVector.commit() == retval::CATCH_OK);
// Use read-only reference. /* Use read-only reference. */
const lp_vec_t<uint16_t, 3>& roTestVec = testVector; const lp_vec_t<uint16_t, 3>& roTestVec = testVector;
uint16_t valueOne = roTestVec[0]; uint16_t valueOne = roTestVec[0];
CHECK(valueOne == 5); CHECK(valueOne == 5);
@ -87,7 +87,7 @@ TEST_CASE("LocalPoolVector" , "[LocPoolVecTest]") {
} }
SECTION("ErrorHandling") { SECTION("ErrorHandling") {
// not try to use a local pool variable which does not exist /* Now try to use a local pool variable which does not exist */
lp_vec_t<uint16_t, 3> invalidVector = lp_vec_t<uint16_t, 3>( lp_vec_t<uint16_t, 3> invalidVector = lp_vec_t<uint16_t, 3>(
objects::TEST_LOCAL_POOL_OWNER_BASE, 0xffffffff); objects::TEST_LOCAL_POOL_OWNER_BASE, 0xffffffff);
REQUIRE(invalidVector.read() == REQUIRE(invalidVector.read() ==
@ -95,7 +95,7 @@ TEST_CASE("LocalPoolVector" , "[LocPoolVecTest]") {
REQUIRE(invalidVector.commit() == REQUIRE(invalidVector.commit() ==
static_cast<int>(localpool::POOL_ENTRY_NOT_FOUND)); static_cast<int>(localpool::POOL_ENTRY_NOT_FOUND));
// now try to access with wrong type /* Now try to access with wrong type */
lp_vec_t<uint32_t, 3> invalidVector2 = lp_vec_t<uint32_t, 3>( lp_vec_t<uint32_t, 3> invalidVector2 = lp_vec_t<uint32_t, 3>(
objects::TEST_LOCAL_POOL_OWNER_BASE, lpool::uint16Vec3Id); objects::TEST_LOCAL_POOL_OWNER_BASE, lpool::uint16Vec3Id);
REQUIRE(invalidVector2.read() == REQUIRE(invalidVector2.read() ==

View File

@ -276,7 +276,7 @@ TEST_CASE( "Local Pool Extended Tests [3 Pools]" , "[TestPool2]") {
CHECK(receptionArray[3] == 66); CHECK(receptionArray[3] == 66);
// now clear first page // now clear first page
simplePool.clearPage(0); simplePool.clearSubPool(0);
bytesWritten = 0; bytesWritten = 0;
simplePool.getFillCount(receptionArray.data(), &bytesWritten); simplePool.getFillCount(receptionArray.data(), &bytesWritten);
// Second page full, median fill count is 33 % // Second page full, median fill count is 33 %