Merge pull request 'FSFW Update' (#361) from KSat/fsfw:mueller/fsfw-update into development
Reviewed-on: fsfw/fsfw#361
This commit is contained in:
commit
35d75bae56
@ -18,7 +18,7 @@ public:
|
|||||||
* This function is protected because it should only be used by the
|
* This function is protected because it should only be used by the
|
||||||
* class imlementing the interface.
|
* class imlementing the interface.
|
||||||
*/
|
*/
|
||||||
virtual LocalDataPoolManager* getHkManagerHandle() = 0;
|
virtual LocalDataPoolManager* getPoolManagerHandle() = 0;
|
||||||
|
|
||||||
protected:
|
protected:
|
||||||
|
|
||||||
|
@ -5,8 +5,8 @@
|
|||||||
#include "internal/LocalPoolDataSetAttorney.h"
|
#include "internal/LocalPoolDataSetAttorney.h"
|
||||||
#include "internal/HasLocalDpIFManagerAttorney.h"
|
#include "internal/HasLocalDpIFManagerAttorney.h"
|
||||||
|
|
||||||
#include "../housekeeping/HousekeepingPacketUpdate.h"
|
|
||||||
#include "../housekeeping/HousekeepingSetPacket.h"
|
#include "../housekeeping/HousekeepingSetPacket.h"
|
||||||
|
#include "../housekeeping/HousekeepingSnapshot.h"
|
||||||
#include "../housekeeping/AcceptsHkPacketsIF.h"
|
#include "../housekeeping/AcceptsHkPacketsIF.h"
|
||||||
#include "../timemanager/CCSDSTime.h"
|
#include "../timemanager/CCSDSTime.h"
|
||||||
#include "../ipc/MutexFactory.h"
|
#include "../ipc/MutexFactory.h"
|
||||||
@ -226,7 +226,7 @@ ReturnValue_t LocalDataPoolManager::handleNotificationSnapshot(
|
|||||||
Clock::getClock_timeval(&now);
|
Clock::getClock_timeval(&now);
|
||||||
CCSDSTime::CDS_short cds;
|
CCSDSTime::CDS_short cds;
|
||||||
CCSDSTime::convertToCcsds(&cds, &now);
|
CCSDSTime::convertToCcsds(&cds, &now);
|
||||||
HousekeepingPacketUpdate updatePacket(reinterpret_cast<uint8_t*>(&cds),
|
HousekeepingSnapshot updatePacket(reinterpret_cast<uint8_t*>(&cds),
|
||||||
sizeof(cds), HasLocalDpIFManagerAttorney::getPoolObjectHandle(owner,
|
sizeof(cds), HasLocalDpIFManagerAttorney::getPoolObjectHandle(owner,
|
||||||
receiver.dataId.localPoolId));
|
receiver.dataId.localPoolId));
|
||||||
|
|
||||||
@ -264,7 +264,7 @@ ReturnValue_t LocalDataPoolManager::handleNotificationSnapshot(
|
|||||||
Clock::getClock_timeval(&now);
|
Clock::getClock_timeval(&now);
|
||||||
CCSDSTime::CDS_short cds;
|
CCSDSTime::CDS_short cds;
|
||||||
CCSDSTime::convertToCcsds(&cds, &now);
|
CCSDSTime::convertToCcsds(&cds, &now);
|
||||||
HousekeepingPacketUpdate updatePacket(reinterpret_cast<uint8_t*>(&cds),
|
HousekeepingSnapshot updatePacket(reinterpret_cast<uint8_t*>(&cds),
|
||||||
sizeof(cds), HasLocalDpIFManagerAttorney::getDataSetHandle(owner,
|
sizeof(cds), HasLocalDpIFManagerAttorney::getDataSetHandle(owner,
|
||||||
receiver.dataId.sid));
|
receiver.dataId.sid));
|
||||||
|
|
||||||
@ -292,7 +292,7 @@ ReturnValue_t LocalDataPoolManager::handleNotificationSnapshot(
|
|||||||
}
|
}
|
||||||
|
|
||||||
ReturnValue_t LocalDataPoolManager::addUpdateToStore(
|
ReturnValue_t LocalDataPoolManager::addUpdateToStore(
|
||||||
HousekeepingPacketUpdate& updatePacket, store_address_t& storeId) {
|
HousekeepingSnapshot& updatePacket, store_address_t& storeId) {
|
||||||
size_t updatePacketSize = updatePacket.getSerializedSize();
|
size_t updatePacketSize = updatePacket.getSerializedSize();
|
||||||
uint8_t *storePtr = nullptr;
|
uint8_t *storePtr = nullptr;
|
||||||
ReturnValue_t result = ipcStore->getFreeElement(&storeId,
|
ReturnValue_t result = ipcStore->getFreeElement(&storeId,
|
||||||
@ -906,6 +906,6 @@ void LocalDataPoolManager::printWarningOrError(sif::OutputTypes outputType,
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
LocalDataPoolManager* LocalDataPoolManager::getHkManagerHandle() {
|
LocalDataPoolManager* LocalDataPoolManager::getPoolManagerHandle() {
|
||||||
return this;
|
return this;
|
||||||
}
|
}
|
||||||
|
@ -24,7 +24,7 @@ void setStaticFrameworkObjectIds();
|
|||||||
}
|
}
|
||||||
|
|
||||||
class LocalPoolDataSetBase;
|
class LocalPoolDataSetBase;
|
||||||
class HousekeepingPacketUpdate;
|
class HousekeepingSnapshot;
|
||||||
class HasLocalDataPoolIF;
|
class HasLocalDataPoolIF;
|
||||||
class LocalDataPool;
|
class LocalDataPool;
|
||||||
|
|
||||||
@ -52,17 +52,17 @@ class LocalDataPool;
|
|||||||
* Each pool entry has a valid state too.
|
* Each pool entry has a valid state too.
|
||||||
* @author R. Mueller
|
* @author R. Mueller
|
||||||
*/
|
*/
|
||||||
class LocalDataPoolManager: public ProvidesDataPoolSubscriptionIF,
|
class LocalDataPoolManager:
|
||||||
public AccessPoolManagerIF {
|
public ProvidesDataPoolSubscriptionIF,
|
||||||
friend void (Factory::setStaticFrameworkObjectIds)();
|
public AccessPoolManagerIF {
|
||||||
//! Some classes using the pool manager directly need to access class internals of the
|
friend void (Factory::setStaticFrameworkObjectIds)();
|
||||||
//! manager. The attorney provides granular control of access to these internals.
|
//! Some classes using the pool manager directly need to access class internals of the
|
||||||
friend class LocalDpManagerAttorney;
|
//! manager. The attorney provides granular control of access to these internals.
|
||||||
|
friend class LocalDpManagerAttorney;
|
||||||
public:
|
public:
|
||||||
static constexpr uint8_t INTERFACE_ID = CLASS_ID::HOUSEKEEPING_MANAGER;
|
static constexpr uint8_t INTERFACE_ID = CLASS_ID::HOUSEKEEPING_MANAGER;
|
||||||
|
|
||||||
static constexpr ReturnValue_t QUEUE_OR_DESTINATION_INVALID = MAKE_RETURN_CODE(0);
|
static constexpr ReturnValue_t QUEUE_OR_DESTINATION_INVALID = MAKE_RETURN_CODE(0);
|
||||||
|
|
||||||
static constexpr ReturnValue_t WRONG_HK_PACKET_TYPE = MAKE_RETURN_CODE(1);
|
static constexpr ReturnValue_t WRONG_HK_PACKET_TYPE = MAKE_RETURN_CODE(1);
|
||||||
static constexpr ReturnValue_t REPORTING_STATUS_UNCHANGED = MAKE_RETURN_CODE(2);
|
static constexpr ReturnValue_t REPORTING_STATUS_UNCHANGED = MAKE_RETURN_CODE(2);
|
||||||
static constexpr ReturnValue_t PERIODIC_HELPER_INVALID = MAKE_RETURN_CODE(3);
|
static constexpr ReturnValue_t PERIODIC_HELPER_INVALID = MAKE_RETURN_CODE(3);
|
||||||
@ -81,29 +81,29 @@ public:
|
|||||||
* @param appendValidityBuffer Specify whether a buffer containing the
|
* @param appendValidityBuffer Specify whether a buffer containing the
|
||||||
* validity state is generated when serializing or deserializing packets.
|
* validity state is generated when serializing or deserializing packets.
|
||||||
*/
|
*/
|
||||||
LocalDataPoolManager(HasLocalDataPoolIF* owner, MessageQueueIF* queueToUse,
|
LocalDataPoolManager(HasLocalDataPoolIF* owner, MessageQueueIF* queueToUse,
|
||||||
bool appendValidityBuffer = true);
|
bool appendValidityBuffer = true);
|
||||||
virtual~ LocalDataPoolManager();
|
virtual~ LocalDataPoolManager();
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Assigns the queue to use. Make sure to call this in the #initialize
|
* Assigns the queue to use. Make sure to call this in the #initialize
|
||||||
* function of the owner.
|
* function of the owner.
|
||||||
* @param queueToUse
|
* @param queueToUse
|
||||||
* @param nonDiagInvlFactor See #setNonDiagnosticIntervalFactor doc
|
* @param nonDiagInvlFactor See #setNonDiagnosticIntervalFactor doc
|
||||||
* @return
|
* @return
|
||||||
*/
|
*/
|
||||||
ReturnValue_t initialize(MessageQueueIF* queueToUse);
|
ReturnValue_t initialize(MessageQueueIF* queueToUse);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Initializes the map by calling the map initialization function and
|
* Initializes the map by calling the map initialization function and
|
||||||
* setting the periodic factor for non-diagnostic packets.
|
* setting the periodic factor for non-diagnostic packets.
|
||||||
* Don't forget to call this in the #initializeAfterTaskCreation call of
|
* Don't forget to call this in the #initializeAfterTaskCreation call of
|
||||||
* the owner, otherwise the map will be invalid!
|
* the owner, otherwise the map will be invalid!
|
||||||
* @param nonDiagInvlFactor
|
* @param nonDiagInvlFactor
|
||||||
* @return
|
* @return
|
||||||
*/
|
*/
|
||||||
ReturnValue_t initializeAfterTaskCreation(
|
ReturnValue_t initializeAfterTaskCreation(
|
||||||
uint8_t nonDiagInvlFactor = 5);
|
uint8_t nonDiagInvlFactor = 5);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* @brief This should be called in the periodic handler of the owner.
|
* @brief This should be called in the periodic handler of the owner.
|
||||||
@ -116,49 +116,49 @@ public:
|
|||||||
*/
|
*/
|
||||||
virtual ReturnValue_t performHkOperation();
|
virtual ReturnValue_t performHkOperation();
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* @brief Subscribe for the generation of periodic packets.
|
* @brief Subscribe for the generation of periodic packets.
|
||||||
* @details
|
* @details
|
||||||
* This subscription mechanism will generally be used by the data creator
|
* This subscription mechanism will generally be used by the data creator
|
||||||
* to generate housekeeping packets which are downlinked directly.
|
* to generate housekeeping packets which are downlinked directly.
|
||||||
* @return
|
* @return
|
||||||
*/
|
*/
|
||||||
ReturnValue_t subscribeForPeriodicPacket(sid_t sid, bool enableReporting,
|
ReturnValue_t subscribeForPeriodicPacket(sid_t sid, bool enableReporting,
|
||||||
float collectionInterval, bool isDiagnostics,
|
float collectionInterval, bool isDiagnostics,
|
||||||
object_id_t packetDestination = defaultHkDestination) override;
|
object_id_t packetDestination = defaultHkDestination) override;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* @brief Subscribe for the generation of packets if the dataset
|
* @brief Subscribe for the generation of packets if the dataset
|
||||||
* is marked as changed.
|
* is marked as changed.
|
||||||
* @details
|
* @details
|
||||||
* This subscription mechanism will generally be used by the data creator.
|
* This subscription mechanism will generally be used by the data creator.
|
||||||
* @param sid
|
* @param sid
|
||||||
* @param isDiagnostics
|
* @param isDiagnostics
|
||||||
* @param packetDestination
|
* @param packetDestination
|
||||||
* @return
|
* @return
|
||||||
*/
|
*/
|
||||||
ReturnValue_t subscribeForUpdatePackets(sid_t sid, bool reportingEnabled,
|
ReturnValue_t subscribeForUpdatePackets(sid_t sid, bool reportingEnabled,
|
||||||
bool isDiagnostics,
|
bool isDiagnostics,
|
||||||
object_id_t packetDestination = defaultHkDestination) override;
|
object_id_t packetDestination = defaultHkDestination) override;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* @brief Subscribe for a notification message which will be sent
|
* @brief Subscribe for a notification message which will be sent
|
||||||
* if a dataset has changed.
|
* if a dataset has changed.
|
||||||
* @details
|
* @details
|
||||||
* This subscription mechanism will generally be used internally by
|
* This subscription mechanism will generally be used internally by
|
||||||
* other software components.
|
* other software components.
|
||||||
* @param setId Set ID of the set to receive update messages from.
|
* @param setId Set ID of the set to receive update messages from.
|
||||||
* @param destinationObject
|
* @param destinationObject
|
||||||
* @param targetQueueId
|
* @param targetQueueId
|
||||||
* @param generateSnapshot If this is set to true, a copy of the current
|
* @param generateSnapshot If this is set to true, a copy of the current
|
||||||
* data with a timestamp will be generated and sent via message.
|
* data with a timestamp will be generated and sent via message.
|
||||||
* Otherwise, only an notification message is sent.
|
* Otherwise, only an notification message is sent.
|
||||||
* @return
|
* @return
|
||||||
*/
|
*/
|
||||||
ReturnValue_t subscribeForSetUpdateMessages(const uint32_t setId,
|
ReturnValue_t subscribeForSetUpdateMessages(const uint32_t setId,
|
||||||
object_id_t destinationObject,
|
object_id_t destinationObject,
|
||||||
MessageQueueId_t targetQueueId,
|
MessageQueueId_t targetQueueId,
|
||||||
bool generateSnapshot) override;
|
bool generateSnapshot) override;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* @brief Subscribe for an notification message which will be sent if a
|
* @brief Subscribe for an notification message which will be sent if a
|
||||||
@ -179,18 +179,16 @@ public:
|
|||||||
MessageQueueId_t targetQueueId,
|
MessageQueueId_t targetQueueId,
|
||||||
bool generateSnapshot) override;
|
bool generateSnapshot) override;
|
||||||
|
|
||||||
MutexIF* getLocalPoolMutex() override;
|
/**
|
||||||
|
* Non-Diagnostics packets usually have a lower minimum sampling frequency
|
||||||
/**
|
* than diagnostic packets.
|
||||||
* Non-Diagnostics packets usually have a lower minimum sampling frequency
|
* A factor can be specified to determine the minimum sampling frequency
|
||||||
* than diagnostic packets.
|
* for non-diagnostic packets. The minimum sampling frequency of the
|
||||||
* A factor can be specified to determine the minimum sampling frequency
|
* diagnostics packets,which is usually jusst the period of the
|
||||||
* for non-diagnostic packets. The minimum sampling frequency of the
|
* performOperation calls, is multiplied with that factor.
|
||||||
* diagnostics packets,which is usually jusst the period of the
|
* @param factor
|
||||||
* performOperation calls, is multiplied with that factor.
|
*/
|
||||||
* @param factor
|
void setNonDiagnosticIntervalFactor(uint8_t nonDiagInvlFactor);
|
||||||
*/
|
|
||||||
void setNonDiagnosticIntervalFactor(uint8_t nonDiagInvlFactor);
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* @brief The manager is also able to handle housekeeping messages.
|
* @brief The manager is also able to handle housekeeping messages.
|
||||||
@ -206,18 +204,18 @@ public:
|
|||||||
*/
|
*/
|
||||||
virtual ReturnValue_t handleHousekeepingMessage(CommandMessage* message);
|
virtual ReturnValue_t handleHousekeepingMessage(CommandMessage* message);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Generate a housekeeping packet with a given SID.
|
* Generate a housekeeping packet with a given SID.
|
||||||
* @param sid
|
* @param sid
|
||||||
* @return
|
* @return
|
||||||
*/
|
*/
|
||||||
ReturnValue_t generateHousekeepingPacket(sid_t sid,
|
ReturnValue_t generateHousekeepingPacket(sid_t sid,
|
||||||
LocalPoolDataSetBase* dataSet, bool forDownlink,
|
LocalPoolDataSetBase* dataSet, bool forDownlink,
|
||||||
MessageQueueId_t destination = MessageQueueIF::NO_QUEUE);
|
MessageQueueId_t destination = MessageQueueIF::NO_QUEUE);
|
||||||
|
|
||||||
HasLocalDataPoolIF* getOwner();
|
HasLocalDataPoolIF* getOwner();
|
||||||
|
|
||||||
ReturnValue_t printPoolEntry(lp_id_t localPoolId);
|
ReturnValue_t printPoolEntry(lp_id_t localPoolId);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Different types of housekeeping reporting are possible.
|
* Different types of housekeeping reporting are possible.
|
||||||
@ -236,22 +234,19 @@ public:
|
|||||||
PERIODIC,
|
PERIODIC,
|
||||||
//! Housekeeping packet will be generated if values have changed.
|
//! Housekeeping packet will be generated if values have changed.
|
||||||
UPDATE_HK,
|
UPDATE_HK,
|
||||||
//! Update notification will be sent out as message.
|
//! Update notification will be sent out as message.
|
||||||
UPDATE_NOTIFICATION,
|
UPDATE_NOTIFICATION,
|
||||||
//! Notification will be sent out as message and a snapshot of the
|
//! Notification will be sent out as message and a snapshot of the
|
||||||
//! current data will be generated.
|
//! current data will be generated.
|
||||||
UPDATE_SNAPSHOT,
|
UPDATE_SNAPSHOT,
|
||||||
};
|
};
|
||||||
|
|
||||||
/**
|
/** Different data types are possible in the HK receiver map. For example, updates can be
|
||||||
* Different data types are possible in the HK receiver map.
|
requested for full datasets or for single pool variables. Periodic reporting is only possible
|
||||||
* For example, updates can be requested for full datasets or
|
for data sets. */
|
||||||
* for single pool variables. Periodic reporting is only possible for
|
|
||||||
* data sets.
|
|
||||||
*/
|
|
||||||
enum class DataType: uint8_t {
|
enum class DataType: uint8_t {
|
||||||
LOCAL_POOL_VARIABLE,
|
LOCAL_POOL_VARIABLE,
|
||||||
DATA_SET
|
DATA_SET
|
||||||
};
|
};
|
||||||
|
|
||||||
/* Copying forbidden */
|
/* Copying forbidden */
|
||||||
@ -267,11 +262,19 @@ public:
|
|||||||
|
|
||||||
object_id_t getCreatorObjectId() const;
|
object_id_t getCreatorObjectId() const;
|
||||||
|
|
||||||
virtual LocalDataPoolManager* getHkManagerHandle() override;
|
/**
|
||||||
|
* Get the pointer to the mutex. Can be used to lock the data pool
|
||||||
|
* externally. Use with care and don't forget to unlock locked mutexes!
|
||||||
|
* For now, only friend classes can accss this function.
|
||||||
|
* @return
|
||||||
|
*/
|
||||||
|
MutexIF* getMutexHandle();
|
||||||
|
|
||||||
|
virtual LocalDataPoolManager* getPoolManagerHandle() override;
|
||||||
private:
|
private:
|
||||||
localpool::DataPool localPoolMap;
|
localpool::DataPool localPoolMap;
|
||||||
//! Every housekeeping data manager has a mutex to protect access
|
/** Every housekeeping data manager has a mutex to protect access
|
||||||
//! to it's data pool.
|
to it's data pool. */
|
||||||
MutexIF* mutex = nullptr;
|
MutexIF* mutex = nullptr;
|
||||||
|
|
||||||
/** The class which actually owns the manager (and its datapool). */
|
/** The class which actually owns the manager (and its datapool). */
|
||||||
@ -279,9 +282,9 @@ private:
|
|||||||
|
|
||||||
uint8_t nonDiagnosticIntervalFactor = 0;
|
uint8_t nonDiagnosticIntervalFactor = 0;
|
||||||
|
|
||||||
/** Default receiver for periodic HK packets */
|
/** Default receiver for periodic HK packets */
|
||||||
static object_id_t defaultHkDestination;
|
static object_id_t defaultHkDestination;
|
||||||
MessageQueueId_t hkDestinationId = MessageQueueIF::NO_QUEUE;
|
MessageQueueId_t hkDestinationId = MessageQueueIF::NO_QUEUE;
|
||||||
|
|
||||||
union DataId {
|
union DataId {
|
||||||
DataId(): sid() {};
|
DataId(): sid() {};
|
||||||
@ -291,10 +294,10 @@ private:
|
|||||||
|
|
||||||
/** The data pool manager will keep an internal map of HK receivers. */
|
/** The data pool manager will keep an internal map of HK receivers. */
|
||||||
struct HkReceiver {
|
struct HkReceiver {
|
||||||
/** Object ID of receiver */
|
/** Object ID of receiver */
|
||||||
object_id_t objectId = objects::NO_OBJECT;
|
object_id_t objectId = objects::NO_OBJECT;
|
||||||
|
|
||||||
DataType dataType = DataType::DATA_SET;
|
DataType dataType = DataType::DATA_SET;
|
||||||
DataId dataId;
|
DataId dataId;
|
||||||
|
|
||||||
ReportingType reportingType = ReportingType::PERIODIC;
|
ReportingType reportingType = ReportingType::PERIODIC;
|
||||||
@ -324,37 +327,30 @@ private:
|
|||||||
* of generated housekeeping packets. */
|
* of generated housekeeping packets. */
|
||||||
bool appendValidityBuffer = true;
|
bool appendValidityBuffer = true;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* @brief Queue used for communication, for example commands.
|
* @brief Queue used for communication, for example commands.
|
||||||
* Is also used to send messages. Can be set either in the constructor
|
* Is also used to send messages. Can be set either in the constructor
|
||||||
* or in the initialize() function.
|
* or in the initialize() function.
|
||||||
*/
|
*/
|
||||||
MessageQueueIF* hkQueue = nullptr;
|
MessageQueueIF* hkQueue = nullptr;
|
||||||
|
|
||||||
/** Global IPC store is used to store all packets. */
|
/** Global IPC store is used to store all packets. */
|
||||||
StorageManagerIF* ipcStore = nullptr;
|
StorageManagerIF* ipcStore = nullptr;
|
||||||
/**
|
|
||||||
* Get the pointer to the mutex. Can be used to lock the data pool
|
|
||||||
* externally. Use with care and don't forget to unlock locked mutexes!
|
|
||||||
* For now, only friend classes can accss this function.
|
|
||||||
* @return
|
|
||||||
*/
|
|
||||||
MutexIF* getMutexHandle();
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Read a variable by supplying its local pool ID and assign the pool
|
* Read a variable by supplying its local pool ID and assign the pool
|
||||||
* entry to the supplied PoolEntry pointer. The type of the pool entry
|
* entry to the supplied PoolEntry pointer. The type of the pool entry
|
||||||
* is deduced automatically. This call is not thread-safe!
|
* is deduced automatically. This call is not thread-safe!
|
||||||
* For now, only friend classes like LocalPoolVar may access this
|
* For now, only friend classes like LocalPoolVar may access this
|
||||||
* function.
|
* function.
|
||||||
* @tparam T Type of the pool entry
|
* @tparam T Type of the pool entry
|
||||||
* @param localPoolId Pool ID of the variable to read
|
* @param localPoolId Pool ID of the variable to read
|
||||||
* @param poolVar [out] Corresponding pool entry will be assigned to the
|
* @param poolVar [out] Corresponding pool entry will be assigned to the
|
||||||
* supplied pointer.
|
* supplied pointer.
|
||||||
* @return
|
* @return
|
||||||
*/
|
*/
|
||||||
template <class T> ReturnValue_t fetchPoolEntry(lp_id_t localPoolId,
|
template <class T> ReturnValue_t fetchPoolEntry(lp_id_t localPoolId,
|
||||||
PoolEntry<T> **poolEntry);
|
PoolEntry<T> **poolEntry);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* This function is used to fill the local data pool map with pool
|
* This function is used to fill the local data pool map with pool
|
||||||
@ -364,55 +360,57 @@ private:
|
|||||||
*/
|
*/
|
||||||
ReturnValue_t initializeHousekeepingPoolEntriesOnce();
|
ReturnValue_t initializeHousekeepingPoolEntriesOnce();
|
||||||
|
|
||||||
ReturnValue_t serializeHkPacketIntoStore(
|
MutexIF* getLocalPoolMutex() override;
|
||||||
HousekeepingPacketDownlink& hkPacket,
|
|
||||||
store_address_t& storeId, bool forDownlink, size_t* serializedSize);
|
|
||||||
|
|
||||||
void performPeriodicHkGeneration(HkReceiver& hkReceiver);
|
ReturnValue_t serializeHkPacketIntoStore(
|
||||||
ReturnValue_t togglePeriodicGeneration(sid_t sid, bool enable,
|
HousekeepingPacketDownlink& hkPacket,
|
||||||
bool isDiagnostics);
|
store_address_t& storeId, bool forDownlink, size_t* serializedSize);
|
||||||
ReturnValue_t changeCollectionInterval(sid_t sid,
|
|
||||||
float newCollectionInterval, bool isDiagnostics);
|
|
||||||
ReturnValue_t generateSetStructurePacket(sid_t sid, bool isDiagnostics);
|
|
||||||
|
|
||||||
void handleHkUpdateResetListInsertion(DataType dataType, DataId dataId);
|
void performPeriodicHkGeneration(HkReceiver& hkReceiver);
|
||||||
void handleChangeResetLogic(DataType type,
|
ReturnValue_t togglePeriodicGeneration(sid_t sid, bool enable,
|
||||||
DataId dataId, MarkChangedIF* toReset);
|
bool isDiagnostics);
|
||||||
void resetHkUpdateResetHelper();
|
ReturnValue_t changeCollectionInterval(sid_t sid,
|
||||||
|
float newCollectionInterval, bool isDiagnostics);
|
||||||
|
ReturnValue_t generateSetStructurePacket(sid_t sid, bool isDiagnostics);
|
||||||
|
|
||||||
ReturnValue_t handleHkUpdate(HkReceiver& hkReceiver,
|
void handleHkUpdateResetListInsertion(DataType dataType, DataId dataId);
|
||||||
|
void handleChangeResetLogic(DataType type,
|
||||||
|
DataId dataId, MarkChangedIF* toReset);
|
||||||
|
void resetHkUpdateResetHelper();
|
||||||
|
|
||||||
|
ReturnValue_t handleHkUpdate(HkReceiver& hkReceiver,
|
||||||
ReturnValue_t& status);
|
ReturnValue_t& status);
|
||||||
ReturnValue_t handleNotificationUpdate(HkReceiver& hkReceiver,
|
ReturnValue_t handleNotificationUpdate(HkReceiver& hkReceiver,
|
||||||
ReturnValue_t& status);
|
|
||||||
ReturnValue_t handleNotificationSnapshot(HkReceiver& hkReceiver,
|
|
||||||
ReturnValue_t& status);
|
ReturnValue_t& status);
|
||||||
ReturnValue_t addUpdateToStore(HousekeepingPacketUpdate& updatePacket,
|
ReturnValue_t handleNotificationSnapshot(HkReceiver& hkReceiver,
|
||||||
store_address_t& storeId);
|
ReturnValue_t& status);
|
||||||
|
ReturnValue_t addUpdateToStore(HousekeepingSnapshot& updatePacket,
|
||||||
|
store_address_t& storeId);
|
||||||
|
|
||||||
void printWarningOrError(sif::OutputTypes outputType,
|
void printWarningOrError(sif::OutputTypes outputType,
|
||||||
const char* functionName,
|
const char* functionName,
|
||||||
ReturnValue_t errorCode = HasReturnvaluesIF::RETURN_FAILED,
|
ReturnValue_t errorCode = HasReturnvaluesIF::RETURN_FAILED,
|
||||||
const char* errorPrint = nullptr);
|
const char* errorPrint = nullptr);
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
||||||
template<class T> inline
|
template<class T> inline
|
||||||
ReturnValue_t LocalDataPoolManager::fetchPoolEntry(lp_id_t localPoolId,
|
ReturnValue_t LocalDataPoolManager::fetchPoolEntry(lp_id_t localPoolId,
|
||||||
PoolEntry<T> **poolEntry) {
|
PoolEntry<T> **poolEntry) {
|
||||||
auto poolIter = localPoolMap.find(localPoolId);
|
auto poolIter = localPoolMap.find(localPoolId);
|
||||||
if (poolIter == localPoolMap.end()) {
|
if (poolIter == localPoolMap.end()) {
|
||||||
printWarningOrError(sif::OutputTypes::OUT_ERROR, "fetchPoolEntry",
|
printWarningOrError(sif::OutputTypes::OUT_ERROR, "fetchPoolEntry",
|
||||||
localpool::POOL_ENTRY_NOT_FOUND);
|
localpool::POOL_ENTRY_NOT_FOUND);
|
||||||
return localpool::POOL_ENTRY_NOT_FOUND;
|
return localpool::POOL_ENTRY_NOT_FOUND;
|
||||||
}
|
}
|
||||||
|
|
||||||
*poolEntry = dynamic_cast< PoolEntry<T>* >(poolIter->second);
|
*poolEntry = dynamic_cast< PoolEntry<T>* >(poolIter->second);
|
||||||
if(*poolEntry == nullptr) {
|
if(*poolEntry == nullptr) {
|
||||||
printWarningOrError(sif::OutputTypes::OUT_ERROR, "fetchPoolEntry",
|
printWarningOrError(sif::OutputTypes::OUT_ERROR, "fetchPoolEntry",
|
||||||
localpool::POOL_ENTRY_TYPE_CONFLICT);
|
localpool::POOL_ENTRY_TYPE_CONFLICT);
|
||||||
return localpool::POOL_ENTRY_TYPE_CONFLICT;
|
return localpool::POOL_ENTRY_TYPE_CONFLICT;
|
||||||
}
|
}
|
||||||
return HasReturnvaluesIF::RETURN_OK;
|
return HasReturnvaluesIF::RETURN_OK;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
@ -4,11 +4,26 @@
|
|||||||
#include "LocalPoolDataSetBase.h"
|
#include "LocalPoolDataSetBase.h"
|
||||||
#include <vector>
|
#include <vector>
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @brief This dataset type can be used to group related pool variables if the number of
|
||||||
|
* variables should not be fixed.
|
||||||
|
* @details
|
||||||
|
* This will is the primary data structure to organize pool variables into
|
||||||
|
* sets which can be accessed via the housekeeping service interface or
|
||||||
|
* which can be sent to other software objects.
|
||||||
|
*
|
||||||
|
* It is recommended to read the documentation of the LocalPoolDataSetBase
|
||||||
|
* class for more information on how this class works and how to use it.
|
||||||
|
* @tparam capacity Capacity of the static dataset, which is usually known
|
||||||
|
* beforehand.
|
||||||
|
*/
|
||||||
class LocalDataSet: public LocalPoolDataSetBase {
|
class LocalDataSet: public LocalPoolDataSetBase {
|
||||||
public:
|
public:
|
||||||
LocalDataSet(HasLocalDataPoolIF* hkOwner, uint32_t setId,
|
LocalDataSet(HasLocalDataPoolIF* hkOwner, uint32_t setId,
|
||||||
const size_t maxSize);
|
const size_t maxSize);
|
||||||
|
|
||||||
LocalDataSet(sid_t sid, const size_t maxSize);
|
LocalDataSet(sid_t sid, const size_t maxSize);
|
||||||
|
|
||||||
virtual~ LocalDataSet();
|
virtual~ LocalDataSet();
|
||||||
|
|
||||||
//! Copying forbidden for now.
|
//! Copying forbidden for now.
|
||||||
|
@ -28,7 +28,7 @@ LocalPoolDataSetBase::LocalPoolDataSetBase(HasLocalDataPoolIF *hkOwner,
|
|||||||
AccessPoolManagerIF* accessor = HasLocalDpIFUserAttorney::getAccessorHandle(hkOwner);
|
AccessPoolManagerIF* accessor = HasLocalDpIFUserAttorney::getAccessorHandle(hkOwner);
|
||||||
|
|
||||||
if(accessor != nullptr) {
|
if(accessor != nullptr) {
|
||||||
poolManager = accessor->getHkManagerHandle();
|
poolManager = accessor->getPoolManagerHandle();
|
||||||
mutexIfSingleDataCreator = accessor->getLocalPoolMutex();
|
mutexIfSingleDataCreator = accessor->getLocalPoolMutex();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -22,7 +22,7 @@ LocalPoolObjectBase::LocalPoolObjectBase(lp_id_t poolId, HasLocalDataPoolIF* hkO
|
|||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
AccessPoolManagerIF* poolManAccessor = HasLocalDpIFUserAttorney::getAccessorHandle(hkOwner);
|
AccessPoolManagerIF* poolManAccessor = HasLocalDpIFUserAttorney::getAccessorHandle(hkOwner);
|
||||||
hkManager = poolManAccessor->getHkManagerHandle();
|
hkManager = poolManAccessor->getPoolManagerHandle();
|
||||||
|
|
||||||
if (dataSet != nullptr) {
|
if (dataSet != nullptr) {
|
||||||
dataSet->registerVariable(this);
|
dataSet->registerVariable(this);
|
||||||
@ -50,7 +50,7 @@ LocalPoolObjectBase::LocalPoolObjectBase(object_id_t poolOwner, lp_id_t poolId,
|
|||||||
|
|
||||||
AccessPoolManagerIF* accessor = HasLocalDpIFUserAttorney::getAccessorHandle(hkOwner);
|
AccessPoolManagerIF* accessor = HasLocalDpIFUserAttorney::getAccessorHandle(hkOwner);
|
||||||
if(accessor != nullptr) {
|
if(accessor != nullptr) {
|
||||||
hkManager = accessor->getHkManagerHandle();
|
hkManager = accessor->getPoolManagerHandle();
|
||||||
}
|
}
|
||||||
|
|
||||||
if(dataSet != nullptr) {
|
if(dataSet != nullptr) {
|
||||||
|
@ -77,8 +77,7 @@ public:
|
|||||||
* @param dataSet
|
* @param dataSet
|
||||||
* @param setReadWriteMode
|
* @param setReadWriteMode
|
||||||
*/
|
*/
|
||||||
LocalPoolVector(gp_id_t globalPoolId,
|
LocalPoolVector(gp_id_t globalPoolId, DataSetIF* dataSet = nullptr,
|
||||||
DataSetIF* dataSet = nullptr,
|
|
||||||
pool_rwm_t setReadWriteMode = pool_rwm_t::VAR_READ_WRITE);
|
pool_rwm_t setReadWriteMode = pool_rwm_t::VAR_READ_WRITE);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@ -87,7 +86,7 @@ public:
|
|||||||
* The user can work on this attribute just like he would on a local
|
* The user can work on this attribute just like he would on a local
|
||||||
* array of this type.
|
* array of this type.
|
||||||
*/
|
*/
|
||||||
T value[vectorSize];
|
T value[vectorSize]= {};
|
||||||
/**
|
/**
|
||||||
* @brief The classes destructor is empty.
|
* @brief The classes destructor is empty.
|
||||||
* @details If commit() was not called, the local value is
|
* @details If commit() was not called, the local value is
|
||||||
|
@ -16,7 +16,6 @@ inline LocalPoolVector<T, vectorSize>::LocalPoolVector(object_id_t poolOwner,
|
|||||||
lp_id_t poolId, DataSetIF *dataSet, pool_rwm_t setReadWriteMode):
|
lp_id_t poolId, DataSetIF *dataSet, pool_rwm_t setReadWriteMode):
|
||||||
LocalPoolObjectBase(poolOwner, poolId, dataSet, setReadWriteMode) {}
|
LocalPoolObjectBase(poolOwner, poolId, dataSet, setReadWriteMode) {}
|
||||||
|
|
||||||
|
|
||||||
template<typename T, uint16_t vectorSize>
|
template<typename T, uint16_t vectorSize>
|
||||||
inline LocalPoolVector<T, vectorSize>::LocalPoolVector(gp_id_t globalPoolId,
|
inline LocalPoolVector<T, vectorSize>::LocalPoolVector(gp_id_t globalPoolId,
|
||||||
DataSetIF *dataSet, pool_rwm_t setReadWriteMode):
|
DataSetIF *dataSet, pool_rwm_t setReadWriteMode):
|
||||||
|
@ -6,7 +6,8 @@
|
|||||||
#include <array>
|
#include <array>
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* @brief This local dataset type is created on the stack.
|
* @brief This dataset type can be used to group related pool variables if the number of
|
||||||
|
* variables is fixed.
|
||||||
* @details
|
* @details
|
||||||
* This will is the primary data structure to organize pool variables into
|
* This will is the primary data structure to organize pool variables into
|
||||||
* sets which can be accessed via the housekeeping service interface or
|
* sets which can be accessed via the housekeeping service interface or
|
||||||
|
3
doc/doxy/.gitignore
vendored
Normal file
3
doc/doxy/.gitignore
vendored
Normal file
@ -0,0 +1,3 @@
|
|||||||
|
html
|
||||||
|
latex
|
||||||
|
rtf
|
2609
doc/doxy/OPUS.doxyfile
Normal file
2609
doc/doxy/OPUS.doxyfile
Normal file
File diff suppressed because it is too large
Load Diff
@ -1,24 +1,37 @@
|
|||||||
#ifndef FSFW_HOUSEKEEPING_HOUSEKEEPINGPACKETUPDATE_H_
|
#ifndef FSFW_HOUSEKEEPING_HOUSEKEEPINGSNAPSHOT_H_
|
||||||
#define FSFW_HOUSEKEEPING_HOUSEKEEPINGPACKETUPDATE_H_
|
#define FSFW_HOUSEKEEPING_HOUSEKEEPINGSNAPSHOT_H_
|
||||||
|
|
||||||
#include "../serialize/SerialBufferAdapter.h"
|
#include "../serialize/SerialBufferAdapter.h"
|
||||||
#include "../serialize/SerialLinkedListAdapter.h"
|
#include "../serialize/SerialLinkedListAdapter.h"
|
||||||
#include "../datapoollocal/LocalPoolDataSetBase.h"
|
#include "../datapoollocal/LocalPoolDataSetBase.h"
|
||||||
|
#include "../datapoollocal/LocalPoolObjectBase.h"
|
||||||
|
#include "../timemanager/CCSDSTime.h"
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* @brief This helper class will be used to serialize and deserialize
|
* @brief This helper class will be used to serialize and deserialize update housekeeping packets
|
||||||
* update housekeeping packets into the store.
|
* into the store.
|
||||||
*/
|
*/
|
||||||
class HousekeepingPacketUpdate: public SerializeIF {
|
class HousekeepingSnapshot: public SerializeIF {
|
||||||
public:
|
public:
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Update packet constructor for datasets
|
* Update packet constructor for datasets.
|
||||||
* @param timeStamp
|
* @param cdsShort If a CSD short timestamp is used, a reference should be
|
||||||
* @param timeStampSize
|
* supplied here
|
||||||
* @param hkData
|
* @param dataSetPtr Pointer to the dataset instance to serialize or deserialize the
|
||||||
* @param hkDataSize
|
* data into
|
||||||
*/
|
*/
|
||||||
HousekeepingPacketUpdate(uint8_t* timeStamp, size_t timeStampSize,
|
HousekeepingSnapshot(CCSDSTime::CDS_short* cdsShort, LocalPoolDataSetBase* dataSetPtr):
|
||||||
|
timeStamp(reinterpret_cast<uint8_t*>(cdsShort)),
|
||||||
|
timeStampSize(sizeof(CCSDSTime::CDS_short)), updateData(dataSetPtr) {};
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Update packet constructor for datasets.
|
||||||
|
* @param timeStamp Pointer to the buffer where the timestamp will be stored.
|
||||||
|
* @param timeStampSize Size of the timestamp
|
||||||
|
* @param dataSetPtr Pointer to the dataset instance to deserialize the data into
|
||||||
|
*/
|
||||||
|
HousekeepingSnapshot(uint8_t* timeStamp, size_t timeStampSize,
|
||||||
LocalPoolDataSetBase* dataSetPtr):
|
LocalPoolDataSetBase* dataSetPtr):
|
||||||
timeStamp(timeStamp), timeStampSize(timeStampSize),
|
timeStamp(timeStamp), timeStampSize(timeStampSize),
|
||||||
updateData(dataSetPtr) {};
|
updateData(dataSetPtr) {};
|
||||||
@ -29,7 +42,7 @@ public:
|
|||||||
* @param timeStampSize
|
* @param timeStampSize
|
||||||
* @param dataSetPtr
|
* @param dataSetPtr
|
||||||
*/
|
*/
|
||||||
HousekeepingPacketUpdate(uint8_t* timeStamp, size_t timeStampSize,
|
HousekeepingSnapshot(uint8_t* timeStamp, size_t timeStampSize,
|
||||||
LocalPoolObjectBase* dataSetPtr):
|
LocalPoolObjectBase* dataSetPtr):
|
||||||
timeStamp(timeStamp), timeStampSize(timeStampSize),
|
timeStamp(timeStamp), timeStampSize(timeStampSize),
|
||||||
updateData(dataSetPtr) {};
|
updateData(dataSetPtr) {};
|
||||||
@ -89,4 +102,4 @@ private:
|
|||||||
};
|
};
|
||||||
|
|
||||||
|
|
||||||
#endif /* FSFW_HOUSEKEEPING_HOUSEKEEPINGPACKETUPDATE_H_ */
|
#endif /* FSFW_HOUSEKEEPING_HOUSEKEEPINGSNAPSHOT_H_ */
|
@ -53,8 +53,7 @@ ReturnValue_t Clock::getClock_timeval(timeval* time) {
|
|||||||
auto epoch = now.time_since_epoch();
|
auto epoch = now.time_since_epoch();
|
||||||
time->tv_sec = std::chrono::duration_cast<std::chrono::seconds>(epoch).count();
|
time->tv_sec = std::chrono::duration_cast<std::chrono::seconds>(epoch).count();
|
||||||
auto fraction = now - secondsChrono;
|
auto fraction = now - secondsChrono;
|
||||||
time->tv_usec = std::chrono::duration_cast<std::chrono::microseconds>(
|
time->tv_usec = std::chrono::duration_cast<std::chrono::microseconds>(fraction).count();
|
||||||
fraction).count();
|
|
||||||
return HasReturnvaluesIF::RETURN_OK;
|
return HasReturnvaluesIF::RETURN_OK;
|
||||||
#elif defined(LINUX)
|
#elif defined(LINUX)
|
||||||
timespec timeUnix;
|
timespec timeUnix;
|
||||||
@ -67,7 +66,9 @@ ReturnValue_t Clock::getClock_timeval(timeval* time) {
|
|||||||
return HasReturnvaluesIF::RETURN_OK;
|
return HasReturnvaluesIF::RETURN_OK;
|
||||||
#else
|
#else
|
||||||
#if FSFW_CPP_OSTREAM_ENABLED == 1
|
#if FSFW_CPP_OSTREAM_ENABLED == 1
|
||||||
sif::warning << "Clock::getUptime: Not implemented for found OS" << std::endl;
|
sif::warning << "Clock::getUptime: Not implemented for found OS!" << std::endl;
|
||||||
|
#else
|
||||||
|
sif::printWarning("Clock::getUptime: Not implemented for found OS!\n");
|
||||||
#endif
|
#endif
|
||||||
return HasReturnvaluesIF::RETURN_FAILED;
|
return HasReturnvaluesIF::RETURN_FAILED;
|
||||||
#endif
|
#endif
|
||||||
|
@ -1,5 +1,5 @@
|
|||||||
#include "Stopwatch.h"
|
#include "Stopwatch.h"
|
||||||
#include "../serviceinterface/ServiceInterfaceStream.h"
|
#include "../serviceinterface/ServiceInterface.h"
|
||||||
#include <iomanip>
|
#include <iomanip>
|
||||||
|
|
||||||
Stopwatch::Stopwatch(bool displayOnDestruction,
|
Stopwatch::Stopwatch(bool displayOnDestruction,
|
||||||
@ -28,9 +28,13 @@ double Stopwatch::stopSeconds() {
|
|||||||
|
|
||||||
void Stopwatch::display() {
|
void Stopwatch::display() {
|
||||||
if(displayMode == StopwatchDisplayMode::MILLIS) {
|
if(displayMode == StopwatchDisplayMode::MILLIS) {
|
||||||
|
dur_millis_t timeMillis = static_cast<dur_millis_t>(
|
||||||
|
elapsedTime.tv_sec * 1000 + elapsedTime.tv_usec / 1000);
|
||||||
#if FSFW_CPP_OSTREAM_ENABLED == 1
|
#if FSFW_CPP_OSTREAM_ENABLED == 1
|
||||||
sif::info << "Stopwatch: Operation took " << (elapsedTime.tv_sec * 1000 +
|
sif::info << "Stopwatch: Operation took " << timeMillis << " milliseconds" << std::endl;
|
||||||
elapsedTime.tv_usec / 1000) << " milliseconds" << std::endl;
|
#else
|
||||||
|
sif::printInfo("Stopwatch: Operation took %lu milliseconds\n\r",
|
||||||
|
static_cast<unsigned int>(timeMillis));
|
||||||
#endif
|
#endif
|
||||||
}
|
}
|
||||||
else if(displayMode == StopwatchDisplayMode::SECONDS) {
|
else if(displayMode == StopwatchDisplayMode::SECONDS) {
|
||||||
@ -38,6 +42,9 @@ void Stopwatch::display() {
|
|||||||
sif::info <<"Stopwatch: Operation took " << std::setprecision(3)
|
sif::info <<"Stopwatch: Operation took " << std::setprecision(3)
|
||||||
<< std::fixed << timevalOperations::toDouble(elapsedTime)
|
<< std::fixed << timevalOperations::toDouble(elapsedTime)
|
||||||
<< " seconds" << std::endl;
|
<< " seconds" << std::endl;
|
||||||
|
#else
|
||||||
|
sif::printInfo("Stopwatch: Operation took %.3f seconds\n\r",
|
||||||
|
static_cast<float>(timevalOperations::toDouble(elapsedTime)));
|
||||||
#endif
|
#endif
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -6,17 +6,17 @@
|
|||||||
#include <unittest/core/CatchDefinitions.h>
|
#include <unittest/core/CatchDefinitions.h>
|
||||||
|
|
||||||
TEST_CASE("LocalDataSet" , "[LocDataSetTest]") {
|
TEST_CASE("LocalDataSet" , "[LocDataSetTest]") {
|
||||||
LocalPoolOwnerBase* poolOwner = objectManager->
|
LocalPoolOwnerBase* poolOwner = objectManager->
|
||||||
get<LocalPoolOwnerBase>(objects::TEST_LOCAL_POOL_OWNER_BASE);
|
get<LocalPoolOwnerBase>(objects::TEST_LOCAL_POOL_OWNER_BASE);
|
||||||
REQUIRE(poolOwner != nullptr);
|
REQUIRE(poolOwner != nullptr);
|
||||||
REQUIRE(poolOwner->initializeHkManager() == retval::CATCH_OK);
|
REQUIRE(poolOwner->initializeHkManager() == retval::CATCH_OK);
|
||||||
REQUIRE(poolOwner->initializeHkManagerAfterTaskCreation()
|
REQUIRE(poolOwner->initializeHkManagerAfterTaskCreation()
|
||||||
== retval::CATCH_OK);
|
== retval::CATCH_OK);
|
||||||
const uint32_t setId = 0;
|
const uint32_t setId = 0;
|
||||||
SECTION("BasicTest") {
|
SECTION("BasicTest") {
|
||||||
StaticLocalDataSet<3> localSet = StaticLocalDataSet<3>(
|
StaticLocalDataSet<3> localSet = StaticLocalDataSet<3>(
|
||||||
sid_t(objects::TEST_LOCAL_POOL_OWNER_BASE, setId));
|
sid_t(objects::TEST_LOCAL_POOL_OWNER_BASE, setId));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
@ -1,122 +1,199 @@
|
|||||||
#include "LocalPoolOwnerBase.h"
|
#include "LocalPoolOwnerBase.h"
|
||||||
|
|
||||||
#include <catch2/catch_test_macros.hpp>
|
#include <catch2/catch_test_macros.hpp>
|
||||||
|
#include <catch2/catch_approx.hpp>
|
||||||
|
|
||||||
|
#include <fsfw/datapool/PoolReadHelper.h>
|
||||||
#include <fsfw/datapoollocal/HasLocalDataPoolIF.h>
|
#include <fsfw/datapoollocal/HasLocalDataPoolIF.h>
|
||||||
#include <fsfw/datapoollocal/StaticLocalDataSet.h>
|
#include <fsfw/datapoollocal/StaticLocalDataSet.h>
|
||||||
|
#include <fsfw/housekeeping/HousekeepingSnapshot.h>
|
||||||
#include <fsfw/ipc/CommandMessageCleaner.h>
|
#include <fsfw/ipc/CommandMessageCleaner.h>
|
||||||
|
#include <fsfw/timemanager/CCSDSTime.h>
|
||||||
#include <unittest/core/CatchDefinitions.h>
|
#include <unittest/core/CatchDefinitions.h>
|
||||||
|
#include <iostream>
|
||||||
|
|
||||||
|
|
||||||
TEST_CASE("LocalPoolManagerTest" , "[LocManTest]") {
|
TEST_CASE("LocalPoolManagerTest" , "[LocManTest]") {
|
||||||
LocalPoolOwnerBase* poolOwner = objectManager->
|
LocalPoolOwnerBase* poolOwner = objectManager->
|
||||||
get<LocalPoolOwnerBase>(objects::TEST_LOCAL_POOL_OWNER_BASE);
|
get<LocalPoolOwnerBase>(objects::TEST_LOCAL_POOL_OWNER_BASE);
|
||||||
REQUIRE(poolOwner != nullptr);
|
REQUIRE(poolOwner != nullptr);
|
||||||
REQUIRE(poolOwner->initializeHkManager() == retval::CATCH_OK);
|
REQUIRE(poolOwner->initializeHkManager() == retval::CATCH_OK);
|
||||||
REQUIRE(poolOwner->initializeHkManagerAfterTaskCreation()
|
REQUIRE(poolOwner->initializeHkManagerAfterTaskCreation()
|
||||||
== retval::CATCH_OK);
|
== retval::CATCH_OK);
|
||||||
REQUIRE(poolOwner->dataset.assignPointers() == retval::CATCH_OK);
|
//REQUIRE(poolOwner->dataset.assignPointers() == retval::CATCH_OK);
|
||||||
MessageQueueMockBase* mqMock = poolOwner->getMockQueueHandle();
|
MessageQueueMockBase* mqMock = poolOwner->getMockQueueHandle();
|
||||||
REQUIRE(mqMock != nullptr);
|
REQUIRE(mqMock != nullptr);
|
||||||
CommandMessage messageSent;
|
CommandMessage messageSent;
|
||||||
uint8_t messagesSent = 0;
|
uint8_t messagesSent = 0;
|
||||||
|
|
||||||
|
|
||||||
SECTION("BasicTest") {
|
SECTION("BasicTest") {
|
||||||
// Subscribe for message generation on update.
|
/* Subscribe for message generation on update. */
|
||||||
REQUIRE(poolOwner->subscribeWrapperSetUpdate() == retval::CATCH_OK);
|
REQUIRE(poolOwner->subscribeWrapperSetUpdate() == retval::CATCH_OK);
|
||||||
// Subscribe for an update message.
|
/* Subscribe for an update message. */
|
||||||
poolOwner->dataset.setChanged(true);
|
poolOwner->dataset.setChanged(true);
|
||||||
// Now the update message should be generated.
|
/* Now the update message should be generated. */
|
||||||
REQUIRE(poolOwner->poolManager.performHkOperation() == retval::CATCH_OK);
|
REQUIRE(poolOwner->poolManager.performHkOperation() == retval::CATCH_OK);
|
||||||
REQUIRE(mqMock->wasMessageSent() == true);
|
REQUIRE(mqMock->wasMessageSent() == true);
|
||||||
|
|
||||||
REQUIRE(mqMock->receiveMessage(&messageSent) == retval::CATCH_OK);
|
REQUIRE(mqMock->receiveMessage(&messageSent) == retval::CATCH_OK);
|
||||||
CHECK(messageSent.getCommand() == static_cast<int>(
|
CHECK(messageSent.getCommand() == static_cast<int>(
|
||||||
HousekeepingMessage::UPDATE_NOTIFICATION_SET));
|
HousekeepingMessage::UPDATE_NOTIFICATION_SET));
|
||||||
|
|
||||||
// Should have been reset.
|
/* Should have been reset. */
|
||||||
CHECK(poolOwner->dataset.hasChanged() == false);
|
CHECK(poolOwner->dataset.hasChanged() == false);
|
||||||
// Set changed again, result should be the same.
|
/* Set changed again, result should be the same. */
|
||||||
poolOwner->dataset.setChanged(true);
|
poolOwner->dataset.setChanged(true);
|
||||||
REQUIRE(poolOwner->poolManager.performHkOperation() == retval::CATCH_OK);
|
REQUIRE(poolOwner->poolManager.performHkOperation() == retval::CATCH_OK);
|
||||||
|
|
||||||
REQUIRE(mqMock->wasMessageSent(&messagesSent) == true);
|
REQUIRE(mqMock->wasMessageSent(&messagesSent) == true);
|
||||||
CHECK(messagesSent == 1);
|
CHECK(messagesSent == 1);
|
||||||
REQUIRE(mqMock->receiveMessage(&messageSent) == retval::CATCH_OK);
|
REQUIRE(mqMock->receiveMessage(&messageSent) == retval::CATCH_OK);
|
||||||
CHECK(messageSent.getCommand() == static_cast<int>(
|
CHECK(messageSent.getCommand() == static_cast<int>(
|
||||||
HousekeepingMessage::UPDATE_NOTIFICATION_SET));
|
HousekeepingMessage::UPDATE_NOTIFICATION_SET));
|
||||||
|
|
||||||
// now subscribe for set update HK as well.
|
/* Now subscribe for set update HK as well. */
|
||||||
REQUIRE(poolOwner->subscribeWrapperSetUpdateHk() == retval::CATCH_OK);
|
REQUIRE(poolOwner->subscribeWrapperSetUpdateHk() == retval::CATCH_OK);
|
||||||
poolOwner->dataset.setChanged(true);
|
poolOwner->dataset.setChanged(true);
|
||||||
REQUIRE(poolOwner->poolManager.performHkOperation() == retval::CATCH_OK);
|
REQUIRE(poolOwner->poolManager.performHkOperation() == retval::CATCH_OK);
|
||||||
REQUIRE(mqMock->wasMessageSent(&messagesSent) == true);
|
REQUIRE(mqMock->wasMessageSent(&messagesSent) == true);
|
||||||
CHECK(messagesSent == 2);
|
CHECK(messagesSent == 2);
|
||||||
// first message sent should be the update notification, considering
|
/* first message sent should be the update notification, considering
|
||||||
// the internal list is a vector checked in insertion order.
|
the internal list is a vector checked in insertion order. */
|
||||||
REQUIRE(mqMock->receiveMessage(&messageSent) == retval::CATCH_OK);
|
REQUIRE(mqMock->receiveMessage(&messageSent) == retval::CATCH_OK);
|
||||||
CHECK(messageSent.getCommand() == static_cast<int>(
|
CHECK(messageSent.getCommand() == static_cast<int>(
|
||||||
HousekeepingMessage::UPDATE_NOTIFICATION_SET));
|
HousekeepingMessage::UPDATE_NOTIFICATION_SET));
|
||||||
|
|
||||||
REQUIRE(mqMock->receiveMessage(&messageSent) == retval::CATCH_OK);
|
REQUIRE(mqMock->receiveMessage(&messageSent) == retval::CATCH_OK);
|
||||||
CHECK(messageSent.getCommand() == static_cast<int>(
|
CHECK(messageSent.getCommand() == static_cast<int>(
|
||||||
HousekeepingMessage::HK_REPORT));
|
HousekeepingMessage::HK_REPORT));
|
||||||
// clear message to avoid memory leak, our mock won't do it for us (yet)
|
/* Clear message to avoid memory leak, our mock won't do it for us (yet) */
|
||||||
CommandMessageCleaner::clearCommandMessage(&messageSent);
|
CommandMessageCleaner::clearCommandMessage(&messageSent);
|
||||||
}
|
|
||||||
|
|
||||||
SECTION("AdvancedTests") {
|
}
|
||||||
// we need to reset the subscription list because the pool owner
|
|
||||||
// is a global object.
|
|
||||||
poolOwner->resetSubscriptionList();
|
|
||||||
// Subscribe for variable update as well
|
|
||||||
REQUIRE(not poolOwner->dataset.hasChanged());
|
|
||||||
REQUIRE(poolOwner->subscribeWrapperVariableUpdate(lpool::uint8VarId) ==
|
|
||||||
retval::CATCH_OK);
|
|
||||||
lp_var_t<uint8_t>* poolVar = dynamic_cast<lp_var_t<uint8_t>*>(
|
|
||||||
poolOwner->getPoolObjectHandle(lpool::uint8VarId));
|
|
||||||
REQUIRE(poolVar != nullptr);
|
|
||||||
poolVar->setChanged(true);
|
|
||||||
REQUIRE(poolOwner->poolManager.performHkOperation() == retval::CATCH_OK);
|
|
||||||
|
|
||||||
// Check update notification was sent.
|
SECTION("SnapshotUpdateTests") {
|
||||||
REQUIRE(mqMock->wasMessageSent(&messagesSent) == true);
|
/* Set the variables in the set to certain values. These are checked later. */
|
||||||
CHECK(messagesSent == 1);
|
{
|
||||||
// Should have been reset.
|
PoolReadHelper readHelper(&poolOwner->dataset);
|
||||||
CHECK(poolVar->hasChanged() == false);
|
REQUIRE(readHelper.getReadResult() == retval::CATCH_OK);
|
||||||
REQUIRE(mqMock->receiveMessage(&messageSent) == retval::CATCH_OK);
|
poolOwner->dataset.localPoolVarUint8.value = 5;
|
||||||
CHECK(messageSent.getCommand() == static_cast<int>(
|
poolOwner->dataset.localPoolVarFloat.value = -12.242;
|
||||||
HousekeepingMessage::UPDATE_NOTIFICATION_VARIABLE));
|
poolOwner->dataset.localPoolUint16Vec.value[0] = 2;
|
||||||
|
poolOwner->dataset.localPoolUint16Vec.value[1] = 32;
|
||||||
|
poolOwner->dataset.localPoolUint16Vec.value[2] = 42932;
|
||||||
|
}
|
||||||
|
|
||||||
// now subscribe for the dataset update (HK and update) again
|
/* Subscribe for snapshot generation on update. */
|
||||||
REQUIRE(poolOwner->subscribeWrapperSetUpdate() == retval::CATCH_OK);
|
REQUIRE(poolOwner->subscribeWrapperSetUpdateSnapshot() == retval::CATCH_OK);
|
||||||
REQUIRE(poolOwner->subscribeWrapperSetUpdateHk() == retval::CATCH_OK);
|
poolOwner->dataset.setChanged(true);
|
||||||
|
|
||||||
poolOwner->dataset.setChanged(true);
|
/* Store current time, we are going to check the (approximate) time equality later */
|
||||||
REQUIRE(poolOwner->poolManager.performHkOperation() == retval::CATCH_OK);
|
CCSDSTime::CDS_short timeCdsNow;
|
||||||
// now two messages should be sent.
|
timeval now;
|
||||||
REQUIRE(mqMock->wasMessageSent(&messagesSent) == true);
|
Clock::getClock_timeval(&now);
|
||||||
CHECK(messagesSent == 2);
|
CCSDSTime::convertToCcsds(&timeCdsNow, &now);
|
||||||
mqMock->clearMessages(true);
|
|
||||||
|
|
||||||
poolOwner->dataset.setChanged(true);
|
/* Trigger generation of snapshot */
|
||||||
poolVar->setChanged(true);
|
REQUIRE(poolOwner->poolManager.performHkOperation() == retval::CATCH_OK);
|
||||||
REQUIRE(poolOwner->poolManager.performHkOperation() == retval::CATCH_OK);
|
REQUIRE(mqMock->wasMessageSent(&messagesSent) == true);
|
||||||
// now three messages should be sent.
|
CHECK(messagesSent == 1);
|
||||||
REQUIRE(mqMock->wasMessageSent(&messagesSent) == true);
|
REQUIRE(mqMock->receiveMessage(&messageSent) == retval::CATCH_OK);
|
||||||
CHECK(messagesSent == 3);
|
/* Check that snapshot was generated */
|
||||||
REQUIRE(mqMock->receiveMessage(&messageSent) == retval::CATCH_OK);
|
CHECK(messageSent.getCommand() == static_cast<int>(
|
||||||
CHECK(messageSent.getCommand() == static_cast<int>(
|
HousekeepingMessage::UPDATE_SNAPSHOT_SET));
|
||||||
HousekeepingMessage::UPDATE_NOTIFICATION_VARIABLE));
|
/* Now we deserialize the snapshot into a new dataset instance */
|
||||||
REQUIRE(mqMock->receiveMessage(&messageSent) == retval::CATCH_OK);
|
CCSDSTime::CDS_short cdsShort;
|
||||||
CHECK(messageSent.getCommand() == static_cast<int>(
|
LocalPoolTestDataSet newSet;
|
||||||
HousekeepingMessage::UPDATE_NOTIFICATION_SET));
|
HousekeepingSnapshot snapshot(&cdsShort, &newSet);
|
||||||
REQUIRE(mqMock->receiveMessage(&messageSent) == retval::CATCH_OK);
|
store_address_t storeId;
|
||||||
CHECK(messageSent.getCommand() == static_cast<int>(
|
HousekeepingMessage::getUpdateSnapshotSetCommand(&messageSent, &storeId);
|
||||||
HousekeepingMessage::HK_REPORT));
|
ConstAccessorPair accessorPair = tglob::getIpcStoreHandle()->getData(storeId);
|
||||||
CommandMessageCleaner::clearCommandMessage(&messageSent);
|
REQUIRE(accessorPair.first == retval::CATCH_OK);
|
||||||
REQUIRE(mqMock->receiveMessage(&messageSent) ==
|
const uint8_t* readOnlyPtr = accessorPair.second.data();
|
||||||
static_cast<int>(MessageQueueIF::EMPTY));
|
size_t sizeToDeserialize = accessorPair.second.size();
|
||||||
}
|
CHECK(newSet.localPoolVarFloat.value == 0);
|
||||||
|
CHECK(newSet.localPoolVarUint8 == 0);
|
||||||
|
CHECK(newSet.localPoolUint16Vec.value[0] == 0);
|
||||||
|
CHECK(newSet.localPoolUint16Vec.value[1] == 0);
|
||||||
|
CHECK(newSet.localPoolUint16Vec.value[2] == 0);
|
||||||
|
/* Fill the dataset and timestamp */
|
||||||
|
REQUIRE(snapshot.deSerialize(&readOnlyPtr, &sizeToDeserialize,
|
||||||
|
SerializeIF::Endianness::MACHINE) == retval::CATCH_OK);
|
||||||
|
/* Now we check that the snapshot is actually correct */
|
||||||
|
CHECK(newSet.localPoolVarFloat.value == Catch::Approx(-12.242));
|
||||||
|
CHECK(newSet.localPoolVarUint8 == 5);
|
||||||
|
CHECK(newSet.localPoolUint16Vec.value[0] == 2);
|
||||||
|
CHECK(newSet.localPoolUint16Vec.value[1] == 32);
|
||||||
|
CHECK(newSet.localPoolUint16Vec.value[2] == 42932);
|
||||||
|
|
||||||
|
/* Now we check that both times are equal */
|
||||||
|
CHECK(cdsShort.pField == timeCdsNow.pField);
|
||||||
|
CHECK(cdsShort.dayLSB == Catch::Approx(timeCdsNow.dayLSB).margin(1));
|
||||||
|
CHECK(cdsShort.dayMSB == Catch::Approx(timeCdsNow.dayMSB).margin(1));
|
||||||
|
CHECK(cdsShort.msDay_h == Catch::Approx(timeCdsNow.msDay_h).margin(1));
|
||||||
|
CHECK(cdsShort.msDay_hh == Catch::Approx(timeCdsNow.msDay_hh).margin(1));
|
||||||
|
CHECK(cdsShort.msDay_l == Catch::Approx(timeCdsNow.msDay_l).margin(1));
|
||||||
|
CHECK(cdsShort.msDay_ll == Catch::Approx(timeCdsNow.msDay_ll).margin(1));
|
||||||
|
}
|
||||||
|
|
||||||
|
SECTION("AdvancedTests") {
|
||||||
|
/* Acquire subscription interface */
|
||||||
|
ProvidesDataPoolSubscriptionIF* subscriptionIF = poolOwner->getSubscriptionInterface();
|
||||||
|
REQUIRE(subscriptionIF != nullptr);
|
||||||
|
|
||||||
|
/* Subscribe for variable update */
|
||||||
|
REQUIRE(poolOwner->subscribeWrapperVariableUpdate(lpool::uint8VarId) ==
|
||||||
|
retval::CATCH_OK);
|
||||||
|
lp_var_t<uint8_t>* poolVar = dynamic_cast<lp_var_t<uint8_t>*>(
|
||||||
|
poolOwner->getPoolObjectHandle(lpool::uint8VarId));
|
||||||
|
REQUIRE(poolVar != nullptr);
|
||||||
|
poolVar->setChanged(true);
|
||||||
|
REQUIRE(poolOwner->poolManager.performHkOperation() == retval::CATCH_OK);
|
||||||
|
|
||||||
|
/* Check update notification was sent. */
|
||||||
|
REQUIRE(mqMock->wasMessageSent(&messagesSent) == true);
|
||||||
|
CHECK(messagesSent == 1);
|
||||||
|
/* Should have been reset. */
|
||||||
|
CHECK(poolVar->hasChanged() == false);
|
||||||
|
REQUIRE(mqMock->receiveMessage(&messageSent) == retval::CATCH_OK);
|
||||||
|
CHECK(messageSent.getCommand() == static_cast<int>(
|
||||||
|
HousekeepingMessage::UPDATE_NOTIFICATION_VARIABLE));
|
||||||
|
/* Now subscribe for the dataset update (HK and update) again with subscription interface */
|
||||||
|
REQUIRE(subscriptionIF->subscribeForSetUpdateMessages(lpool::testSetId,
|
||||||
|
objects::NO_OBJECT, objects::HK_RECEIVER_MOCK, false) == retval::CATCH_OK);
|
||||||
|
REQUIRE(poolOwner->subscribeWrapperSetUpdateHk() == retval::CATCH_OK);
|
||||||
|
|
||||||
|
poolOwner->dataset.setChanged(true);
|
||||||
|
REQUIRE(poolOwner->poolManager.performHkOperation() == retval::CATCH_OK);
|
||||||
|
/* Now two messages should be sent. */
|
||||||
|
REQUIRE(mqMock->wasMessageSent(&messagesSent) == true);
|
||||||
|
CHECK(messagesSent == 2);
|
||||||
|
mqMock->clearMessages(true);
|
||||||
|
|
||||||
|
poolOwner->dataset.setChanged(true);
|
||||||
|
poolVar->setChanged(true);
|
||||||
|
REQUIRE(poolOwner->poolManager.performHkOperation() == retval::CATCH_OK);
|
||||||
|
/* Now three messages should be sent. */
|
||||||
|
REQUIRE(mqMock->wasMessageSent(&messagesSent) == true);
|
||||||
|
CHECK(messagesSent == 3);
|
||||||
|
REQUIRE(mqMock->receiveMessage(&messageSent) == retval::CATCH_OK);
|
||||||
|
CHECK(messageSent.getCommand() == static_cast<int>(
|
||||||
|
HousekeepingMessage::UPDATE_NOTIFICATION_VARIABLE));
|
||||||
|
REQUIRE(mqMock->receiveMessage(&messageSent) == retval::CATCH_OK);
|
||||||
|
CHECK(messageSent.getCommand() == static_cast<int>(
|
||||||
|
HousekeepingMessage::UPDATE_NOTIFICATION_SET));
|
||||||
|
REQUIRE(mqMock->receiveMessage(&messageSent) == retval::CATCH_OK);
|
||||||
|
CHECK(messageSent.getCommand() == static_cast<int>(
|
||||||
|
HousekeepingMessage::HK_REPORT));
|
||||||
|
CommandMessageCleaner::clearCommandMessage(&messageSent);
|
||||||
|
REQUIRE(mqMock->receiveMessage(&messageSent) ==
|
||||||
|
static_cast<int>(MessageQueueIF::EMPTY));
|
||||||
|
}
|
||||||
|
|
||||||
|
/* we need to reset the subscription list because the pool owner
|
||||||
|
is a global object. */
|
||||||
|
poolOwner->resetSubscriptionList();
|
||||||
|
mqMock->clearMessages(true);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -20,33 +20,43 @@ static constexpr lp_id_t int64Vec2Id = 4;
|
|||||||
|
|
||||||
static constexpr uint32_t testSetId = 0;
|
static constexpr uint32_t testSetId = 0;
|
||||||
static constexpr uint8_t dataSetMaxVariables = 10;
|
static constexpr uint8_t dataSetMaxVariables = 10;
|
||||||
static const sid_t testSid = sid_t(objects::TEST_LOCAL_POOL_OWNER_BASE,
|
|
||||||
testSetId);
|
static const sid_t testSid = sid_t(objects::TEST_LOCAL_POOL_OWNER_BASE, testSetId);
|
||||||
|
|
||||||
|
static const gp_id_t uint8VarGpid = gp_id_t(objects::TEST_LOCAL_POOL_OWNER_BASE, uint8VarId);
|
||||||
|
static const gp_id_t floatVarGpid = gp_id_t(objects::TEST_LOCAL_POOL_OWNER_BASE, floatVarId);
|
||||||
|
static const gp_id_t uint32Gpid = gp_id_t(objects::TEST_LOCAL_POOL_OWNER_BASE, uint32VarId);
|
||||||
|
static const gp_id_t uint16Vec3Gpid = gp_id_t(objects::TEST_LOCAL_POOL_OWNER_BASE, uint16Vec3Id);
|
||||||
|
static const gp_id_t uint64Vec2Id = gp_id_t(objects::TEST_LOCAL_POOL_OWNER_BASE, int64Vec2Id);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
class LocalPoolTestDataSet: public LocalDataSet {
|
class LocalPoolTestDataSet: public LocalDataSet {
|
||||||
public:
|
public:
|
||||||
LocalPoolTestDataSet(HasLocalDataPoolIF* owner, uint32_t setId):
|
LocalPoolTestDataSet():
|
||||||
LocalDataSet(owner, setId, lpool::dataSetMaxVariables) {
|
LocalDataSet(lpool::testSid, lpool::dataSetMaxVariables) {
|
||||||
}
|
}
|
||||||
|
|
||||||
ReturnValue_t assignPointers() {
|
LocalPoolTestDataSet(HasLocalDataPoolIF* owner, uint32_t setId):
|
||||||
PoolVariableIF** rawVarArray = getContainer();
|
LocalDataSet(owner, setId, lpool::dataSetMaxVariables) {
|
||||||
localPoolVarUint8 = dynamic_cast<lp_var_t<uint8_t>*>(rawVarArray[0]);
|
}
|
||||||
localPoolVarFloat = dynamic_cast<lp_var_t<float>*>(rawVarArray[1]);
|
|
||||||
localPoolUint16Vec = dynamic_cast<lp_vec_t<uint16_t, 3>*>(
|
|
||||||
rawVarArray[2]);
|
|
||||||
if(localPoolVarUint8 == nullptr or localPoolVarFloat == nullptr or
|
|
||||||
localPoolUint16Vec == nullptr) {
|
|
||||||
return HasReturnvaluesIF::RETURN_FAILED;
|
|
||||||
}
|
|
||||||
return HasReturnvaluesIF::RETURN_OK;
|
|
||||||
}
|
|
||||||
|
|
||||||
lp_var_t<uint8_t>* localPoolVarUint8 = nullptr;
|
// ReturnValue_t assignPointers() {
|
||||||
lp_var_t<float>* localPoolVarFloat = nullptr;
|
// PoolVariableIF** rawVarArray = getContainer();
|
||||||
lp_vec_t<uint16_t, 3>* localPoolUint16Vec = nullptr;
|
// localPoolVarUint8 = dynamic_cast<lp_var_t<uint8_t>*>(rawVarArray[0]);
|
||||||
|
// localPoolVarFloat = dynamic_cast<lp_var_t<float>*>(rawVarArray[1]);
|
||||||
|
// localPoolUint16Vec = dynamic_cast<lp_vec_t<uint16_t, 3>*>(
|
||||||
|
// rawVarArray[2]);
|
||||||
|
// if(localPoolVarUint8 == nullptr or localPoolVarFloat == nullptr or
|
||||||
|
// localPoolUint16Vec == nullptr) {
|
||||||
|
// return HasReturnvaluesIF::RETURN_FAILED;
|
||||||
|
// }
|
||||||
|
// return HasReturnvaluesIF::RETURN_OK;
|
||||||
|
// }
|
||||||
|
|
||||||
|
lp_var_t<uint8_t> localPoolVarUint8 = lp_var_t<uint8_t>(lpool::uint8VarGpid, this);
|
||||||
|
lp_var_t<float> localPoolVarFloat = lp_var_t<float>(lpool::floatVarGpid, this);
|
||||||
|
lp_vec_t<uint16_t, 3> localPoolUint16Vec = lp_vec_t<uint16_t, 3>(lpool::uint16Vec3Gpid, this);
|
||||||
|
|
||||||
private:
|
private:
|
||||||
};
|
};
|
||||||
@ -54,143 +64,148 @@ private:
|
|||||||
|
|
||||||
class LocalPoolOwnerBase: public SystemObject, public HasLocalDataPoolIF {
|
class LocalPoolOwnerBase: public SystemObject, public HasLocalDataPoolIF {
|
||||||
public:
|
public:
|
||||||
LocalPoolOwnerBase(
|
LocalPoolOwnerBase(
|
||||||
object_id_t objectId = objects::TEST_LOCAL_POOL_OWNER_BASE):
|
object_id_t objectId = objects::TEST_LOCAL_POOL_OWNER_BASE):
|
||||||
SystemObject(objectId), poolManager(this, messageQueue),
|
SystemObject(objectId), poolManager(this, messageQueue),
|
||||||
dataset(this, lpool::testSetId) {
|
dataset(this, lpool::testSetId) {
|
||||||
messageQueue = new MessageQueueMockBase();
|
messageQueue = new MessageQueueMockBase();
|
||||||
}
|
}
|
||||||
|
|
||||||
~LocalPoolOwnerBase() {
|
~LocalPoolOwnerBase() {
|
||||||
QueueFactory::instance()->deleteMessageQueue(messageQueue);
|
QueueFactory::instance()->deleteMessageQueue(messageQueue);
|
||||||
}
|
}
|
||||||
|
|
||||||
object_id_t getObjectId() const override {
|
object_id_t getObjectId() const override {
|
||||||
return SystemObject::getObjectId();
|
return SystemObject::getObjectId();
|
||||||
}
|
}
|
||||||
|
|
||||||
ReturnValue_t initializeHkManager() {
|
ReturnValue_t initializeHkManager() {
|
||||||
if(not initialized) {
|
if(not initialized) {
|
||||||
initialized = true;
|
initialized = true;
|
||||||
return poolManager.initialize(messageQueue);
|
return poolManager.initialize(messageQueue);
|
||||||
}
|
}
|
||||||
return HasReturnvaluesIF::RETURN_OK;
|
return HasReturnvaluesIF::RETURN_OK;
|
||||||
}
|
}
|
||||||
|
|
||||||
ReturnValue_t initializeHkManagerAfterTaskCreation() {
|
ReturnValue_t initializeHkManagerAfterTaskCreation() {
|
||||||
if(not initializedAfterTaskCreation) {
|
if(not initializedAfterTaskCreation) {
|
||||||
initializedAfterTaskCreation = true;
|
initializedAfterTaskCreation = true;
|
||||||
return poolManager.initializeAfterTaskCreation();
|
return poolManager.initializeAfterTaskCreation();
|
||||||
}
|
}
|
||||||
return HasReturnvaluesIF::RETURN_OK;
|
return HasReturnvaluesIF::RETURN_OK;
|
||||||
}
|
}
|
||||||
|
|
||||||
/** Command queue for housekeeping messages. */
|
/** Command queue for housekeeping messages. */
|
||||||
MessageQueueId_t getCommandQueue() const override {
|
MessageQueueId_t getCommandQueue() const override {
|
||||||
return messageQueue->getId();
|
return messageQueue->getId();
|
||||||
}
|
}
|
||||||
|
|
||||||
// This is called by initializeAfterTaskCreation of the HK manager.
|
// This is called by initializeAfterTaskCreation of the HK manager.
|
||||||
virtual ReturnValue_t initializeLocalDataPool(
|
virtual ReturnValue_t initializeLocalDataPool(
|
||||||
localpool::DataPool& localDataPoolMap,
|
localpool::DataPool& localDataPoolMap,
|
||||||
LocalDataPoolManager& poolManager) {
|
LocalDataPoolManager& poolManager) {
|
||||||
// Default initialization empty for now.
|
// Default initialization empty for now.
|
||||||
localDataPoolMap.emplace(lpool::uint8VarId,
|
localDataPoolMap.emplace(lpool::uint8VarId,
|
||||||
new PoolEntry<uint8_t>({0}));
|
new PoolEntry<uint8_t>({0}));
|
||||||
localDataPoolMap.emplace(lpool::floatVarId,
|
localDataPoolMap.emplace(lpool::floatVarId,
|
||||||
new PoolEntry<float>({0}));
|
new PoolEntry<float>({0}));
|
||||||
localDataPoolMap.emplace(lpool::uint32VarId,
|
localDataPoolMap.emplace(lpool::uint32VarId,
|
||||||
new PoolEntry<uint32_t>({0}));
|
new PoolEntry<uint32_t>({0}));
|
||||||
|
|
||||||
localDataPoolMap.emplace(lpool::uint16Vec3Id,
|
localDataPoolMap.emplace(lpool::uint16Vec3Id,
|
||||||
new PoolEntry<uint16_t>({0, 0, 0}));
|
new PoolEntry<uint16_t>({0, 0, 0}));
|
||||||
localDataPoolMap.emplace(lpool::int64Vec2Id,
|
localDataPoolMap.emplace(lpool::int64Vec2Id,
|
||||||
new PoolEntry<int64_t>({0, 0}));
|
new PoolEntry<int64_t>({0, 0}));
|
||||||
return HasReturnvaluesIF::RETURN_OK;
|
return HasReturnvaluesIF::RETURN_OK;
|
||||||
}
|
}
|
||||||
|
|
||||||
LocalDataPoolManager* getHkManagerHandle() override {
|
LocalDataPoolManager* getHkManagerHandle() override {
|
||||||
return &poolManager;
|
return &poolManager;
|
||||||
}
|
}
|
||||||
|
|
||||||
uint32_t getPeriodicOperationFrequency() const override {
|
uint32_t getPeriodicOperationFrequency() const override {
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* This function is used by the pool manager to get a valid dataset
|
* This function is used by the pool manager to get a valid dataset
|
||||||
* from a SID
|
* from a SID
|
||||||
* @param sid Corresponding structure ID
|
* @param sid Corresponding structure ID
|
||||||
* @return
|
* @return
|
||||||
*/
|
*/
|
||||||
virtual LocalPoolDataSetBase* getDataSetHandle(sid_t sid) override {
|
virtual LocalPoolDataSetBase* getDataSetHandle(sid_t sid) override {
|
||||||
return &dataset;
|
return &dataset;
|
||||||
}
|
}
|
||||||
|
|
||||||
virtual LocalPoolObjectBase* getPoolObjectHandle(
|
virtual LocalPoolObjectBase* getPoolObjectHandle(
|
||||||
lp_id_t localPoolId) override {
|
lp_id_t localPoolId) override {
|
||||||
if(localPoolId == lpool::uint8VarId) {
|
if(localPoolId == lpool::uint8VarId) {
|
||||||
return &testUint8;
|
return &testUint8;
|
||||||
}
|
}
|
||||||
else if(localPoolId == lpool::uint16Vec3Id) {
|
else if(localPoolId == lpool::uint16Vec3Id) {
|
||||||
return &testUint16Vec;
|
return &testUint16Vec;
|
||||||
}
|
}
|
||||||
else if(localPoolId == lpool::floatVarId) {
|
else if(localPoolId == lpool::floatVarId) {
|
||||||
return &testFloat;
|
return &testFloat;
|
||||||
}
|
}
|
||||||
else if(localPoolId == lpool::int64Vec2Id) {
|
else if(localPoolId == lpool::int64Vec2Id) {
|
||||||
return &testInt64Vec;
|
return &testInt64Vec;
|
||||||
}
|
}
|
||||||
else if(localPoolId == lpool::uint32VarId) {
|
else if(localPoolId == lpool::uint32VarId) {
|
||||||
return &testUint32;
|
return &testUint32;
|
||||||
}
|
}
|
||||||
else {
|
else {
|
||||||
return &testUint8;
|
return &testUint8;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
MessageQueueMockBase* getMockQueueHandle() const {
|
MessageQueueMockBase* getMockQueueHandle() const {
|
||||||
return dynamic_cast<MessageQueueMockBase*>(messageQueue);
|
return dynamic_cast<MessageQueueMockBase*>(messageQueue);
|
||||||
}
|
}
|
||||||
|
|
||||||
ReturnValue_t subscribeWrapperSetUpdate() {
|
ReturnValue_t subscribeWrapperSetUpdate() {
|
||||||
return poolManager.subscribeForSetUpdateMessages(lpool::testSetId,
|
return poolManager.subscribeForSetUpdateMessages(lpool::testSetId,
|
||||||
objects::NO_OBJECT, MessageQueueIF::NO_QUEUE, false);
|
objects::NO_OBJECT, objects::HK_RECEIVER_MOCK, false);
|
||||||
}
|
}
|
||||||
|
|
||||||
ReturnValue_t subscribeWrapperSetUpdateHk(bool diagnostics = false) {
|
ReturnValue_t subscribeWrapperSetUpdateSnapshot() {
|
||||||
return poolManager.subscribeForUpdatePackets(lpool::testSid, diagnostics,
|
return poolManager.subscribeForSetUpdateMessages(lpool::testSetId,
|
||||||
false, objects::HK_RECEIVER_MOCK);
|
objects::NO_OBJECT, objects::HK_RECEIVER_MOCK, true);
|
||||||
}
|
}
|
||||||
|
|
||||||
ReturnValue_t subscribeWrapperVariableUpdate(lp_id_t localPoolId) {
|
ReturnValue_t subscribeWrapperSetUpdateHk(bool diagnostics = false) {
|
||||||
return poolManager.subscribeForVariableUpdateMessages(localPoolId,
|
return poolManager.subscribeForUpdatePackets(lpool::testSid, diagnostics,
|
||||||
MessageQueueIF::NO_QUEUE, objects::NO_OBJECT, false);
|
false, objects::HK_RECEIVER_MOCK);
|
||||||
}
|
}
|
||||||
|
|
||||||
void resetSubscriptionList() {
|
ReturnValue_t subscribeWrapperVariableUpdate(lp_id_t localPoolId) {
|
||||||
poolManager.clearReceiversList();
|
return poolManager.subscribeForVariableUpdateMessages(localPoolId,
|
||||||
}
|
MessageQueueIF::NO_QUEUE, objects::HK_RECEIVER_MOCK, false);
|
||||||
|
}
|
||||||
|
|
||||||
LocalDataPoolManager poolManager;
|
void resetSubscriptionList() {
|
||||||
LocalPoolTestDataSet dataset;
|
poolManager.clearReceiversList();
|
||||||
|
}
|
||||||
|
|
||||||
|
LocalDataPoolManager poolManager;
|
||||||
|
LocalPoolTestDataSet dataset;
|
||||||
private:
|
private:
|
||||||
|
|
||||||
lp_var_t<uint8_t> testUint8 = lp_var_t<uint8_t>(this, lpool::uint8VarId,
|
lp_var_t<uint8_t> testUint8 = lp_var_t<uint8_t>(this, lpool::uint8VarId,
|
||||||
&dataset);
|
&dataset);
|
||||||
lp_var_t<float> testFloat = lp_var_t<float>(this, lpool::floatVarId,
|
lp_var_t<float> testFloat = lp_var_t<float>(this, lpool::floatVarId,
|
||||||
&dataset);
|
&dataset);
|
||||||
lp_var_t<uint32_t> testUint32 = lp_var_t<uint32_t>(this, lpool::uint32VarId);
|
lp_var_t<uint32_t> testUint32 = lp_var_t<uint32_t>(this, lpool::uint32VarId);
|
||||||
|
|
||||||
lp_vec_t<uint16_t, 3> testUint16Vec = lp_vec_t<uint16_t, 3>(this,
|
lp_vec_t<uint16_t, 3> testUint16Vec = lp_vec_t<uint16_t, 3>(this,
|
||||||
lpool::uint16Vec3Id, &dataset);
|
lpool::uint16Vec3Id, &dataset);
|
||||||
lp_vec_t<int64_t, 2> testInt64Vec = lp_vec_t<int64_t, 2>(this,
|
lp_vec_t<int64_t, 2> testInt64Vec = lp_vec_t<int64_t, 2>(this,
|
||||||
lpool::int64Vec2Id);
|
lpool::int64Vec2Id);
|
||||||
|
|
||||||
MessageQueueIF* messageQueue = nullptr;
|
MessageQueueIF* messageQueue = nullptr;
|
||||||
|
|
||||||
bool initialized = false;
|
bool initialized = false;
|
||||||
bool initializedAfterTaskCreation = false;
|
bool initializedAfterTaskCreation = false;
|
||||||
|
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -6,117 +6,117 @@
|
|||||||
|
|
||||||
|
|
||||||
TEST_CASE("LocalPoolVariable" , "[LocPoolVarTest]") {
|
TEST_CASE("LocalPoolVariable" , "[LocPoolVarTest]") {
|
||||||
LocalPoolOwnerBase* poolOwner = objectManager->
|
LocalPoolOwnerBase* poolOwner = objectManager->
|
||||||
get<LocalPoolOwnerBase>(objects::TEST_LOCAL_POOL_OWNER_BASE);
|
get<LocalPoolOwnerBase>(objects::TEST_LOCAL_POOL_OWNER_BASE);
|
||||||
REQUIRE(poolOwner != nullptr);
|
REQUIRE(poolOwner != nullptr);
|
||||||
REQUIRE(poolOwner->initializeHkManager() == retval::CATCH_OK);
|
REQUIRE(poolOwner->initializeHkManager() == retval::CATCH_OK);
|
||||||
REQUIRE(poolOwner->initializeHkManagerAfterTaskCreation()
|
REQUIRE(poolOwner->initializeHkManagerAfterTaskCreation()
|
||||||
== retval::CATCH_OK);
|
== retval::CATCH_OK);
|
||||||
|
|
||||||
SECTION("Basic Tests") {
|
SECTION("Basic Tests") {
|
||||||
// very basic test.
|
/* very basic test. */
|
||||||
lp_var_t<uint8_t> testVariable = lp_var_t<uint8_t>(
|
lp_var_t<uint8_t> testVariable = lp_var_t<uint8_t>(
|
||||||
objects::TEST_LOCAL_POOL_OWNER_BASE, lpool::uint8VarId);
|
objects::TEST_LOCAL_POOL_OWNER_BASE, lpool::uint8VarId);
|
||||||
REQUIRE(testVariable.read() == retval::CATCH_OK);
|
REQUIRE(testVariable.read() == retval::CATCH_OK);
|
||||||
CHECK(testVariable.value == 0);
|
CHECK(testVariable.value == 0);
|
||||||
testVariable.value = 5;
|
testVariable.value = 5;
|
||||||
REQUIRE(testVariable.commit() == retval::CATCH_OK);
|
REQUIRE(testVariable.commit() == retval::CATCH_OK);
|
||||||
REQUIRE(testVariable.read() == retval::CATCH_OK);
|
REQUIRE(testVariable.read() == retval::CATCH_OK);
|
||||||
REQUIRE(testVariable.value == 5);
|
REQUIRE(testVariable.value == 5);
|
||||||
CHECK(not testVariable.isValid());
|
CHECK(not testVariable.isValid());
|
||||||
testVariable.setValid(true);
|
testVariable.setValid(true);
|
||||||
CHECK(testVariable.isValid());
|
CHECK(testVariable.isValid());
|
||||||
CHECK(testVariable.commit(true) == retval::CATCH_OK);
|
CHECK(testVariable.commit(true) == retval::CATCH_OK);
|
||||||
|
|
||||||
testVariable.setReadWriteMode(pool_rwm_t::VAR_READ);
|
testVariable.setReadWriteMode(pool_rwm_t::VAR_READ);
|
||||||
CHECK(testVariable.getReadWriteMode() == pool_rwm_t::VAR_READ);
|
CHECK(testVariable.getReadWriteMode() == pool_rwm_t::VAR_READ);
|
||||||
testVariable.setReadWriteMode(pool_rwm_t::VAR_READ_WRITE);
|
testVariable.setReadWriteMode(pool_rwm_t::VAR_READ_WRITE);
|
||||||
|
|
||||||
testVariable.setDataPoolId(22);
|
testVariable.setDataPoolId(22);
|
||||||
CHECK(testVariable.getDataPoolId() == 22);
|
CHECK(testVariable.getDataPoolId() == 22);
|
||||||
testVariable.setDataPoolId(lpool::uint8VarId);
|
testVariable.setDataPoolId(lpool::uint8VarId);
|
||||||
|
|
||||||
testVariable.setChanged(true);
|
testVariable.setChanged(true);
|
||||||
CHECK(testVariable.hasChanged());
|
CHECK(testVariable.hasChanged());
|
||||||
testVariable.setChanged(false);
|
testVariable.setChanged(false);
|
||||||
|
|
||||||
gp_id_t globPoolId(objects::TEST_LOCAL_POOL_OWNER_BASE,
|
gp_id_t globPoolId(objects::TEST_LOCAL_POOL_OWNER_BASE,
|
||||||
lpool::uint8VarId);
|
lpool::uint8VarId);
|
||||||
lp_var_t<uint8_t> testVariable2 = lp_var_t<uint8_t>(globPoolId);
|
lp_var_t<uint8_t> testVariable2 = lp_var_t<uint8_t>(globPoolId);
|
||||||
REQUIRE(testVariable2.read() == retval::CATCH_OK);
|
REQUIRE(testVariable2.read() == retval::CATCH_OK);
|
||||||
CHECK(testVariable2 == 5);
|
CHECK(testVariable2 == 5);
|
||||||
CHECK(testVariable == testVariable2);
|
CHECK(testVariable == testVariable2);
|
||||||
testVariable = 10;
|
testVariable = 10;
|
||||||
CHECK(testVariable != 5);
|
CHECK(testVariable != 5);
|
||||||
//CHECK(not testVariable != testVariable2);
|
//CHECK(not testVariable != testVariable2);
|
||||||
uint8_t variableRaw = 0;
|
uint8_t variableRaw = 0;
|
||||||
uint8_t* varPtr = &variableRaw;
|
uint8_t* varPtr = &variableRaw;
|
||||||
size_t maxSize = testVariable.getSerializedSize();
|
size_t maxSize = testVariable.getSerializedSize();
|
||||||
CHECK(maxSize == 1);
|
CHECK(maxSize == 1);
|
||||||
size_t serSize = 0;
|
size_t serSize = 0;
|
||||||
CHECK(testVariable.serialize(&varPtr, &serSize, maxSize,
|
CHECK(testVariable.serialize(&varPtr, &serSize, maxSize,
|
||||||
SerializeIF::Endianness::MACHINE) == retval::CATCH_OK);
|
SerializeIF::Endianness::MACHINE) == retval::CATCH_OK);
|
||||||
CHECK(variableRaw == 10);
|
CHECK(variableRaw == 10);
|
||||||
const uint8_t* varConstPtr = &variableRaw;
|
const uint8_t* varConstPtr = &variableRaw;
|
||||||
testVariable = 5;
|
testVariable = 5;
|
||||||
CHECK(testVariable.deSerialize(&varConstPtr, &serSize,
|
CHECK(testVariable.deSerialize(&varConstPtr, &serSize,
|
||||||
SerializeIF::Endianness::MACHINE) == retval::CATCH_OK);
|
SerializeIF::Endianness::MACHINE) == retval::CATCH_OK);
|
||||||
CHECK(testVariable == 10);
|
CHECK(testVariable == 10);
|
||||||
CHECK(testVariable != testVariable2);
|
CHECK(testVariable != testVariable2);
|
||||||
CHECK(testVariable2 < testVariable);
|
CHECK(testVariable2 < testVariable);
|
||||||
CHECK(testVariable2 < 10);
|
CHECK(testVariable2 < 10);
|
||||||
CHECK(testVariable > 5);
|
CHECK(testVariable > 5);
|
||||||
CHECK(testVariable > testVariable2);
|
CHECK(testVariable > testVariable2);
|
||||||
variableRaw = static_cast<uint8_t>(testVariable2);
|
variableRaw = static_cast<uint8_t>(testVariable2);
|
||||||
CHECK(variableRaw == 5);
|
CHECK(variableRaw == 5);
|
||||||
|
|
||||||
CHECK(testVariable == 10);
|
CHECK(testVariable == 10);
|
||||||
testVariable = testVariable2;
|
testVariable = testVariable2;
|
||||||
CHECK(testVariable == 5);
|
CHECK(testVariable == 5);
|
||||||
}
|
}
|
||||||
|
|
||||||
SECTION("ErrorHandling") {
|
SECTION("ErrorHandling") {
|
||||||
|
|
||||||
// not try to use a local pool variable which does not exist
|
/* now try to use a local pool variable which does not exist */
|
||||||
lp_var_t<uint8_t> invalidVariable = lp_var_t<uint8_t>(
|
lp_var_t<uint8_t> invalidVariable = lp_var_t<uint8_t>(
|
||||||
objects::TEST_LOCAL_POOL_OWNER_BASE, 0xffffffff);
|
objects::TEST_LOCAL_POOL_OWNER_BASE, 0xffffffff);
|
||||||
REQUIRE(invalidVariable.read() ==
|
REQUIRE(invalidVariable.read() ==
|
||||||
static_cast<int>(localpool::POOL_ENTRY_NOT_FOUND));
|
static_cast<int>(localpool::POOL_ENTRY_NOT_FOUND));
|
||||||
|
|
||||||
REQUIRE(invalidVariable.commit() ==
|
REQUIRE(invalidVariable.commit() ==
|
||||||
static_cast<int>(localpool::POOL_ENTRY_NOT_FOUND));
|
static_cast<int>(localpool::POOL_ENTRY_NOT_FOUND));
|
||||||
// now try to access with wrong type
|
/* now try to access with wrong type */
|
||||||
lp_var_t<int8_t> invalidVariable2 = lp_var_t<int8_t>(
|
lp_var_t<int8_t> invalidVariable2 = lp_var_t<int8_t>(
|
||||||
objects::TEST_LOCAL_POOL_OWNER_BASE, lpool::uint8VarId);
|
objects::TEST_LOCAL_POOL_OWNER_BASE, lpool::uint8VarId);
|
||||||
REQUIRE(invalidVariable2.read() ==
|
REQUIRE(invalidVariable2.read() ==
|
||||||
static_cast<int>(localpool::POOL_ENTRY_TYPE_CONFLICT));
|
static_cast<int>(localpool::POOL_ENTRY_TYPE_CONFLICT));
|
||||||
|
|
||||||
lp_var_t<uint8_t> readOnlyVar = lp_var_t<uint8_t>(
|
lp_var_t<uint8_t> readOnlyVar = lp_var_t<uint8_t>(
|
||||||
objects::TEST_LOCAL_POOL_OWNER_BASE, lpool::uint8VarId,
|
objects::TEST_LOCAL_POOL_OWNER_BASE, lpool::uint8VarId,
|
||||||
nullptr, pool_rwm_t::VAR_READ);
|
nullptr, pool_rwm_t::VAR_READ);
|
||||||
REQUIRE(readOnlyVar.commit() ==
|
REQUIRE(readOnlyVar.commit() ==
|
||||||
static_cast<int>(PoolVariableIF::INVALID_READ_WRITE_MODE));
|
static_cast<int>(PoolVariableIF::INVALID_READ_WRITE_MODE));
|
||||||
lp_var_t<uint8_t> writeOnlyVar = lp_var_t<uint8_t>(
|
lp_var_t<uint8_t> writeOnlyVar = lp_var_t<uint8_t>(
|
||||||
objects::TEST_LOCAL_POOL_OWNER_BASE, lpool::uint8VarId,
|
objects::TEST_LOCAL_POOL_OWNER_BASE, lpool::uint8VarId,
|
||||||
nullptr, pool_rwm_t::VAR_WRITE);
|
nullptr, pool_rwm_t::VAR_WRITE);
|
||||||
REQUIRE(writeOnlyVar.read() == static_cast<int>(
|
REQUIRE(writeOnlyVar.read() == static_cast<int>(
|
||||||
PoolVariableIF::INVALID_READ_WRITE_MODE));
|
PoolVariableIF::INVALID_READ_WRITE_MODE));
|
||||||
|
|
||||||
lp_var_t<uint32_t> uint32tVar = lp_var_t<uint32_t>(
|
lp_var_t<uint32_t> uint32tVar = lp_var_t<uint32_t>(
|
||||||
objects::TEST_LOCAL_POOL_OWNER_BASE, lpool::uint32VarId);
|
objects::TEST_LOCAL_POOL_OWNER_BASE, lpool::uint32VarId);
|
||||||
#if FSFW_CPP_OSTREAM_ENABLED == 1
|
#if FSFW_CPP_OSTREAM_ENABLED == 1
|
||||||
sif::info << "LocalPoolVariable printout: " <<uint32tVar << std::endl;
|
sif::info << "LocalPoolVariable printout: " <<uint32tVar << std::endl;
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
// for code coverage. If program does not crash -> OK
|
/* for code coverage. If program does not crash -> OK */
|
||||||
lp_var_t<uint8_t> invalidObjectVar = lp_var_t<uint8_t>(
|
lp_var_t<uint8_t> invalidObjectVar = lp_var_t<uint8_t>(
|
||||||
0xffffffff, lpool::uint8VarId);
|
0xffffffff, lpool::uint8VarId);
|
||||||
gp_id_t globPoolId(0xffffffff,
|
gp_id_t globPoolId(0xffffffff,
|
||||||
lpool::uint8VarId);
|
lpool::uint8VarId);
|
||||||
lp_var_t<uint8_t> invalidObjectVar2 = lp_var_t<uint8_t>(globPoolId);
|
lp_var_t<uint8_t> invalidObjectVar2 = lp_var_t<uint8_t>(globPoolId);
|
||||||
lp_var_t<uint8_t> invalidObjectVar3 = lp_var_t<uint8_t>(nullptr,
|
lp_var_t<uint8_t> invalidObjectVar3 = lp_var_t<uint8_t>(nullptr,
|
||||||
lpool::uint8VarId);
|
lpool::uint8VarId);
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -5,116 +5,116 @@
|
|||||||
#include <unittest/core/CatchDefinitions.h>
|
#include <unittest/core/CatchDefinitions.h>
|
||||||
|
|
||||||
TEST_CASE("LocalPoolVector" , "[LocPoolVecTest]") {
|
TEST_CASE("LocalPoolVector" , "[LocPoolVecTest]") {
|
||||||
LocalPoolOwnerBase* poolOwner = objectManager->
|
LocalPoolOwnerBase* poolOwner = objectManager->
|
||||||
get<LocalPoolOwnerBase>(objects::TEST_LOCAL_POOL_OWNER_BASE);
|
get<LocalPoolOwnerBase>(objects::TEST_LOCAL_POOL_OWNER_BASE);
|
||||||
REQUIRE(poolOwner != nullptr);
|
REQUIRE(poolOwner != nullptr);
|
||||||
REQUIRE(poolOwner->initializeHkManager() == retval::CATCH_OK);
|
REQUIRE(poolOwner->initializeHkManager() == retval::CATCH_OK);
|
||||||
REQUIRE(poolOwner->initializeHkManagerAfterTaskCreation()
|
REQUIRE(poolOwner->initializeHkManagerAfterTaskCreation()
|
||||||
== retval::CATCH_OK);
|
== retval::CATCH_OK);
|
||||||
|
|
||||||
SECTION("BasicTest") {
|
SECTION("BasicTest") {
|
||||||
// very basic test.
|
// very basic test.
|
||||||
lp_vec_t<uint16_t, 3> testVector = lp_vec_t<uint16_t, 3>(
|
lp_vec_t<uint16_t, 3> testVector = lp_vec_t<uint16_t, 3>(
|
||||||
objects::TEST_LOCAL_POOL_OWNER_BASE, lpool::uint16Vec3Id);
|
objects::TEST_LOCAL_POOL_OWNER_BASE, lpool::uint16Vec3Id);
|
||||||
REQUIRE(testVector.read() == retval::CATCH_OK);
|
REQUIRE(testVector.read() == retval::CATCH_OK);
|
||||||
testVector.value[0] = 5;
|
testVector.value[0] = 5;
|
||||||
testVector.value[1] = 232;
|
testVector.value[1] = 232;
|
||||||
testVector.value[2] = 32023;
|
testVector.value[2] = 32023;
|
||||||
|
|
||||||
REQUIRE(testVector.commit(true) == retval::CATCH_OK);
|
REQUIRE(testVector.commit(true) == retval::CATCH_OK);
|
||||||
CHECK(testVector.isValid());
|
CHECK(testVector.isValid());
|
||||||
|
|
||||||
testVector.value[0] = 0;
|
testVector.value[0] = 0;
|
||||||
testVector.value[1] = 0;
|
testVector.value[1] = 0;
|
||||||
testVector.value[2] = 0;
|
testVector.value[2] = 0;
|
||||||
|
|
||||||
CHECK(testVector.read() == retval::CATCH_OK);
|
CHECK(testVector.read() == retval::CATCH_OK);
|
||||||
CHECK(testVector.value[0] == 5);
|
CHECK(testVector.value[0] == 5);
|
||||||
CHECK(testVector.value[1] == 232);
|
CHECK(testVector.value[1] == 232);
|
||||||
CHECK(testVector.value[2] == 32023);
|
CHECK(testVector.value[2] == 32023);
|
||||||
|
|
||||||
CHECK(testVector[0] == 5);
|
CHECK(testVector[0] == 5);
|
||||||
|
|
||||||
// This is invalid access, so the last value will be set instead.
|
/* This is invalid access, so the last value will be set instead.
|
||||||
// (we can't throw exceptions)
|
(we can't throw exceptions) */
|
||||||
testVector[4] = 12;
|
testVector[4] = 12;
|
||||||
CHECK(testVector[2] == 12);
|
CHECK(testVector[2] == 12);
|
||||||
CHECK(testVector.commit() == retval::CATCH_OK);
|
CHECK(testVector.commit() == retval::CATCH_OK);
|
||||||
|
|
||||||
// Use read-only reference.
|
/* Use read-only reference. */
|
||||||
const lp_vec_t<uint16_t, 3>& roTestVec = testVector;
|
const lp_vec_t<uint16_t, 3>& roTestVec = testVector;
|
||||||
uint16_t valueOne = roTestVec[0];
|
uint16_t valueOne = roTestVec[0];
|
||||||
CHECK(valueOne == 5);
|
CHECK(valueOne == 5);
|
||||||
|
|
||||||
uint16_t lastVal = roTestVec[25];
|
uint16_t lastVal = roTestVec[25];
|
||||||
CHECK(lastVal == 12);
|
CHECK(lastVal == 12);
|
||||||
|
|
||||||
size_t maxSize = testVector.getSerializedSize();
|
size_t maxSize = testVector.getSerializedSize();
|
||||||
CHECK(maxSize == 6);
|
CHECK(maxSize == 6);
|
||||||
|
|
||||||
uint16_t serializedVector[3];
|
uint16_t serializedVector[3];
|
||||||
uint8_t* vecPtr = reinterpret_cast<uint8_t*>(serializedVector);
|
uint8_t* vecPtr = reinterpret_cast<uint8_t*>(serializedVector);
|
||||||
size_t serSize = 0;
|
size_t serSize = 0;
|
||||||
REQUIRE(testVector.serialize(&vecPtr, &serSize,
|
REQUIRE(testVector.serialize(&vecPtr, &serSize,
|
||||||
maxSize, SerializeIF::Endianness::MACHINE) == retval::CATCH_OK);
|
maxSize, SerializeIF::Endianness::MACHINE) == retval::CATCH_OK);
|
||||||
|
|
||||||
CHECK(serSize == 6);
|
CHECK(serSize == 6);
|
||||||
CHECK(serializedVector[0] == 5);
|
CHECK(serializedVector[0] == 5);
|
||||||
CHECK(serializedVector[1] == 232);
|
CHECK(serializedVector[1] == 232);
|
||||||
CHECK(serializedVector[2] == 12);
|
CHECK(serializedVector[2] == 12);
|
||||||
|
|
||||||
maxSize = 1;
|
maxSize = 1;
|
||||||
REQUIRE(testVector.serialize(&vecPtr, &serSize,
|
REQUIRE(testVector.serialize(&vecPtr, &serSize,
|
||||||
maxSize, SerializeIF::Endianness::MACHINE) ==
|
maxSize, SerializeIF::Endianness::MACHINE) ==
|
||||||
static_cast<int>(SerializeIF::BUFFER_TOO_SHORT));
|
static_cast<int>(SerializeIF::BUFFER_TOO_SHORT));
|
||||||
|
|
||||||
serializedVector[0] = 16;
|
serializedVector[0] = 16;
|
||||||
serializedVector[1] = 7832;
|
serializedVector[1] = 7832;
|
||||||
serializedVector[2] = 39232;
|
serializedVector[2] = 39232;
|
||||||
|
|
||||||
const uint8_t* constVecPtr = reinterpret_cast<const uint8_t*>(
|
const uint8_t* constVecPtr = reinterpret_cast<const uint8_t*>(
|
||||||
serializedVector);
|
serializedVector);
|
||||||
REQUIRE(testVector.deSerialize(&constVecPtr, &serSize,
|
REQUIRE(testVector.deSerialize(&constVecPtr, &serSize,
|
||||||
SerializeIF::Endianness::MACHINE) == retval::CATCH_OK);
|
SerializeIF::Endianness::MACHINE) == retval::CATCH_OK);
|
||||||
CHECK(testVector[0] == 16);
|
CHECK(testVector[0] == 16);
|
||||||
CHECK(testVector[1] == 7832);
|
CHECK(testVector[1] == 7832);
|
||||||
CHECK(testVector[2] == 39232);
|
CHECK(testVector[2] == 39232);
|
||||||
|
|
||||||
serSize = 1;
|
serSize = 1;
|
||||||
REQUIRE(testVector.deSerialize(&constVecPtr, &serSize,
|
REQUIRE(testVector.deSerialize(&constVecPtr, &serSize,
|
||||||
SerializeIF::Endianness::MACHINE) ==
|
SerializeIF::Endianness::MACHINE) ==
|
||||||
static_cast<int>(SerializeIF::STREAM_TOO_SHORT));
|
static_cast<int>(SerializeIF::STREAM_TOO_SHORT));
|
||||||
}
|
}
|
||||||
|
|
||||||
SECTION("ErrorHandling") {
|
SECTION("ErrorHandling") {
|
||||||
// not try to use a local pool variable which does not exist
|
/* Now try to use a local pool variable which does not exist */
|
||||||
lp_vec_t<uint16_t, 3> invalidVector = lp_vec_t<uint16_t, 3>(
|
lp_vec_t<uint16_t, 3> invalidVector = lp_vec_t<uint16_t, 3>(
|
||||||
objects::TEST_LOCAL_POOL_OWNER_BASE, 0xffffffff);
|
objects::TEST_LOCAL_POOL_OWNER_BASE, 0xffffffff);
|
||||||
REQUIRE(invalidVector.read() ==
|
REQUIRE(invalidVector.read() ==
|
||||||
static_cast<int>(localpool::POOL_ENTRY_NOT_FOUND));
|
static_cast<int>(localpool::POOL_ENTRY_NOT_FOUND));
|
||||||
REQUIRE(invalidVector.commit() ==
|
REQUIRE(invalidVector.commit() ==
|
||||||
static_cast<int>(localpool::POOL_ENTRY_NOT_FOUND));
|
static_cast<int>(localpool::POOL_ENTRY_NOT_FOUND));
|
||||||
|
|
||||||
// now try to access with wrong type
|
/* Now try to access with wrong type */
|
||||||
lp_vec_t<uint32_t, 3> invalidVector2 = lp_vec_t<uint32_t, 3>(
|
lp_vec_t<uint32_t, 3> invalidVector2 = lp_vec_t<uint32_t, 3>(
|
||||||
objects::TEST_LOCAL_POOL_OWNER_BASE, lpool::uint16Vec3Id);
|
objects::TEST_LOCAL_POOL_OWNER_BASE, lpool::uint16Vec3Id);
|
||||||
REQUIRE(invalidVector2.read() ==
|
REQUIRE(invalidVector2.read() ==
|
||||||
static_cast<int>(localpool::POOL_ENTRY_TYPE_CONFLICT));
|
static_cast<int>(localpool::POOL_ENTRY_TYPE_CONFLICT));
|
||||||
REQUIRE(invalidVector2.commit() ==
|
REQUIRE(invalidVector2.commit() ==
|
||||||
static_cast<int>(localpool::POOL_ENTRY_TYPE_CONFLICT));
|
static_cast<int>(localpool::POOL_ENTRY_TYPE_CONFLICT));
|
||||||
|
|
||||||
lp_vec_t<uint16_t, 3> writeOnlyVec = lp_vec_t<uint16_t, 3>(
|
lp_vec_t<uint16_t, 3> writeOnlyVec = lp_vec_t<uint16_t, 3>(
|
||||||
objects::TEST_LOCAL_POOL_OWNER_BASE, lpool::uint16Vec3Id,
|
objects::TEST_LOCAL_POOL_OWNER_BASE, lpool::uint16Vec3Id,
|
||||||
nullptr, pool_rwm_t::VAR_WRITE);
|
nullptr, pool_rwm_t::VAR_WRITE);
|
||||||
REQUIRE(writeOnlyVec.read() ==
|
REQUIRE(writeOnlyVec.read() ==
|
||||||
static_cast<int>(PoolVariableIF::INVALID_READ_WRITE_MODE));
|
static_cast<int>(PoolVariableIF::INVALID_READ_WRITE_MODE));
|
||||||
|
|
||||||
lp_vec_t<uint16_t, 3> readOnlyVec = lp_vec_t<uint16_t, 3>(
|
lp_vec_t<uint16_t, 3> readOnlyVec = lp_vec_t<uint16_t, 3>(
|
||||||
objects::TEST_LOCAL_POOL_OWNER_BASE, lpool::uint16Vec3Id,
|
objects::TEST_LOCAL_POOL_OWNER_BASE, lpool::uint16Vec3Id,
|
||||||
nullptr, pool_rwm_t::VAR_READ);
|
nullptr, pool_rwm_t::VAR_READ);
|
||||||
REQUIRE(readOnlyVec.commit() ==
|
REQUIRE(readOnlyVec.commit() ==
|
||||||
static_cast<int>(PoolVariableIF::INVALID_READ_WRITE_MODE));
|
static_cast<int>(PoolVariableIF::INVALID_READ_WRITE_MODE));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
@ -276,7 +276,7 @@ TEST_CASE( "Local Pool Extended Tests [3 Pools]" , "[TestPool2]") {
|
|||||||
CHECK(receptionArray[3] == 66);
|
CHECK(receptionArray[3] == 66);
|
||||||
|
|
||||||
// now clear first page
|
// now clear first page
|
||||||
simplePool.clearPage(0);
|
simplePool.clearSubPool(0);
|
||||||
bytesWritten = 0;
|
bytesWritten = 0;
|
||||||
simplePool.getFillCount(receptionArray.data(), &bytesWritten);
|
simplePool.getFillCount(receptionArray.data(), &bytesWritten);
|
||||||
// Second page full, median fill count is 33 %
|
// Second page full, median fill count is 33 %
|
||||||
|
Loading…
Reference in New Issue
Block a user