FSFW Update #361
@ -18,7 +18,7 @@ public:
|
||||
* This function is protected because it should only be used by the
|
||||
* class imlementing the interface.
|
||||
*/
|
||||
virtual LocalDataPoolManager* getHkManagerHandle() = 0;
|
||||
virtual LocalDataPoolManager* getPoolManagerHandle() = 0;
|
||||
|
||||
protected:
|
||||
|
||||
|
@ -5,8 +5,8 @@
|
||||
#include "internal/LocalPoolDataSetAttorney.h"
|
||||
#include "internal/HasLocalDpIFManagerAttorney.h"
|
||||
|
||||
#include "../housekeeping/HousekeepingPacketUpdate.h"
|
||||
#include "../housekeeping/HousekeepingSetPacket.h"
|
||||
#include "../housekeeping/HousekeepingSnapshot.h"
|
||||
#include "../housekeeping/AcceptsHkPacketsIF.h"
|
||||
#include "../timemanager/CCSDSTime.h"
|
||||
#include "../ipc/MutexFactory.h"
|
||||
@ -226,7 +226,7 @@ ReturnValue_t LocalDataPoolManager::handleNotificationSnapshot(
|
||||
Clock::getClock_timeval(&now);
|
||||
CCSDSTime::CDS_short cds;
|
||||
CCSDSTime::convertToCcsds(&cds, &now);
|
||||
HousekeepingPacketUpdate updatePacket(reinterpret_cast<uint8_t*>(&cds),
|
||||
HousekeepingSnapshot updatePacket(reinterpret_cast<uint8_t*>(&cds),
|
||||
sizeof(cds), HasLocalDpIFManagerAttorney::getPoolObjectHandle(owner,
|
||||
receiver.dataId.localPoolId));
|
||||
|
||||
@ -264,7 +264,7 @@ ReturnValue_t LocalDataPoolManager::handleNotificationSnapshot(
|
||||
Clock::getClock_timeval(&now);
|
||||
CCSDSTime::CDS_short cds;
|
||||
CCSDSTime::convertToCcsds(&cds, &now);
|
||||
HousekeepingPacketUpdate updatePacket(reinterpret_cast<uint8_t*>(&cds),
|
||||
HousekeepingSnapshot updatePacket(reinterpret_cast<uint8_t*>(&cds),
|
||||
sizeof(cds), HasLocalDpIFManagerAttorney::getDataSetHandle(owner,
|
||||
receiver.dataId.sid));
|
||||
|
||||
@ -292,7 +292,7 @@ ReturnValue_t LocalDataPoolManager::handleNotificationSnapshot(
|
||||
}
|
||||
|
||||
ReturnValue_t LocalDataPoolManager::addUpdateToStore(
|
||||
HousekeepingPacketUpdate& updatePacket, store_address_t& storeId) {
|
||||
HousekeepingSnapshot& updatePacket, store_address_t& storeId) {
|
||||
size_t updatePacketSize = updatePacket.getSerializedSize();
|
||||
uint8_t *storePtr = nullptr;
|
||||
ReturnValue_t result = ipcStore->getFreeElement(&storeId,
|
||||
@ -906,6 +906,6 @@ void LocalDataPoolManager::printWarningOrError(sif::OutputTypes outputType,
|
||||
}
|
||||
}
|
||||
|
||||
LocalDataPoolManager* LocalDataPoolManager::getHkManagerHandle() {
|
||||
LocalDataPoolManager* LocalDataPoolManager::getPoolManagerHandle() {
|
||||
return this;
|
||||
}
|
||||
|
@ -24,7 +24,7 @@ void setStaticFrameworkObjectIds();
|
||||
}
|
||||
|
||||
class LocalPoolDataSetBase;
|
||||
class HousekeepingPacketUpdate;
|
||||
class HousekeepingSnapshot;
|
||||
class HasLocalDataPoolIF;
|
||||
class LocalDataPool;
|
||||
|
||||
@ -52,17 +52,17 @@ class LocalDataPool;
|
||||
* Each pool entry has a valid state too.
|
||||
* @author R. Mueller
|
||||
*/
|
||||
class LocalDataPoolManager: public ProvidesDataPoolSubscriptionIF,
|
||||
public AccessPoolManagerIF {
|
||||
friend void (Factory::setStaticFrameworkObjectIds)();
|
||||
//! Some classes using the pool manager directly need to access class internals of the
|
||||
//! manager. The attorney provides granular control of access to these internals.
|
||||
friend class LocalDpManagerAttorney;
|
||||
class LocalDataPoolManager:
|
||||
public ProvidesDataPoolSubscriptionIF,
|
||||
public AccessPoolManagerIF {
|
||||
friend void (Factory::setStaticFrameworkObjectIds)();
|
||||
//! Some classes using the pool manager directly need to access class internals of the
|
||||
//! manager. The attorney provides granular control of access to these internals.
|
||||
friend class LocalDpManagerAttorney;
|
||||
public:
|
||||
static constexpr uint8_t INTERFACE_ID = CLASS_ID::HOUSEKEEPING_MANAGER;
|
||||
static constexpr uint8_t INTERFACE_ID = CLASS_ID::HOUSEKEEPING_MANAGER;
|
||||
|
||||
static constexpr ReturnValue_t QUEUE_OR_DESTINATION_INVALID = MAKE_RETURN_CODE(0);
|
||||
|
||||
static constexpr ReturnValue_t WRONG_HK_PACKET_TYPE = MAKE_RETURN_CODE(1);
|
||||
static constexpr ReturnValue_t REPORTING_STATUS_UNCHANGED = MAKE_RETURN_CODE(2);
|
||||
static constexpr ReturnValue_t PERIODIC_HELPER_INVALID = MAKE_RETURN_CODE(3);
|
||||
@ -81,29 +81,29 @@ public:
|
||||
* @param appendValidityBuffer Specify whether a buffer containing the
|
||||
* validity state is generated when serializing or deserializing packets.
|
||||
*/
|
||||
LocalDataPoolManager(HasLocalDataPoolIF* owner, MessageQueueIF* queueToUse,
|
||||
bool appendValidityBuffer = true);
|
||||
virtual~ LocalDataPoolManager();
|
||||
LocalDataPoolManager(HasLocalDataPoolIF* owner, MessageQueueIF* queueToUse,
|
||||
bool appendValidityBuffer = true);
|
||||
virtual~ LocalDataPoolManager();
|
||||
|
||||
/**
|
||||
* Assigns the queue to use. Make sure to call this in the #initialize
|
||||
* function of the owner.
|
||||
* @param queueToUse
|
||||
* @param nonDiagInvlFactor See #setNonDiagnosticIntervalFactor doc
|
||||
* @return
|
||||
*/
|
||||
ReturnValue_t initialize(MessageQueueIF* queueToUse);
|
||||
/**
|
||||
* Assigns the queue to use. Make sure to call this in the #initialize
|
||||
* function of the owner.
|
||||
* @param queueToUse
|
||||
* @param nonDiagInvlFactor See #setNonDiagnosticIntervalFactor doc
|
||||
* @return
|
||||
*/
|
||||
ReturnValue_t initialize(MessageQueueIF* queueToUse);
|
||||
|
||||
/**
|
||||
* Initializes the map by calling the map initialization function and
|
||||
* setting the periodic factor for non-diagnostic packets.
|
||||
* Don't forget to call this in the #initializeAfterTaskCreation call of
|
||||
* the owner, otherwise the map will be invalid!
|
||||
* @param nonDiagInvlFactor
|
||||
* @return
|
||||
*/
|
||||
ReturnValue_t initializeAfterTaskCreation(
|
||||
uint8_t nonDiagInvlFactor = 5);
|
||||
/**
|
||||
* Initializes the map by calling the map initialization function and
|
||||
* setting the periodic factor for non-diagnostic packets.
|
||||
* Don't forget to call this in the #initializeAfterTaskCreation call of
|
||||
* the owner, otherwise the map will be invalid!
|
||||
* @param nonDiagInvlFactor
|
||||
* @return
|
||||
*/
|
||||
ReturnValue_t initializeAfterTaskCreation(
|
||||
uint8_t nonDiagInvlFactor = 5);
|
||||
|
||||
/**
|
||||
* @brief This should be called in the periodic handler of the owner.
|
||||
@ -116,49 +116,49 @@ public:
|
||||
*/
|
||||
virtual ReturnValue_t performHkOperation();
|
||||
|
||||
/**
|
||||
* @brief Subscribe for the generation of periodic packets.
|
||||
* @details
|
||||
/**
|
||||
* @brief Subscribe for the generation of periodic packets.
|
||||
* @details
|
||||
* This subscription mechanism will generally be used by the data creator
|
||||
* to generate housekeeping packets which are downlinked directly.
|
||||
* @return
|
||||
*/
|
||||
ReturnValue_t subscribeForPeriodicPacket(sid_t sid, bool enableReporting,
|
||||
float collectionInterval, bool isDiagnostics,
|
||||
object_id_t packetDestination = defaultHkDestination) override;
|
||||
* @return
|
||||
*/
|
||||
ReturnValue_t subscribeForPeriodicPacket(sid_t sid, bool enableReporting,
|
||||
float collectionInterval, bool isDiagnostics,
|
||||
object_id_t packetDestination = defaultHkDestination) override;
|
||||
|
||||
/**
|
||||
* @brief Subscribe for the generation of packets if the dataset
|
||||
* is marked as changed.
|
||||
* @details
|
||||
* This subscription mechanism will generally be used by the data creator.
|
||||
* @param sid
|
||||
* @param isDiagnostics
|
||||
* @param packetDestination
|
||||
* @return
|
||||
*/
|
||||
/**
|
||||
* @brief Subscribe for the generation of packets if the dataset
|
||||
* is marked as changed.
|
||||
* @details
|
||||
* This subscription mechanism will generally be used by the data creator.
|
||||
* @param sid
|
||||
* @param isDiagnostics
|
||||
* @param packetDestination
|
||||
* @return
|
||||
*/
|
||||
ReturnValue_t subscribeForUpdatePackets(sid_t sid, bool reportingEnabled,
|
||||
bool isDiagnostics,
|
||||
object_id_t packetDestination = defaultHkDestination) override;
|
||||
|
||||
/**
|
||||
* @brief Subscribe for a notification message which will be sent
|
||||
* if a dataset has changed.
|
||||
* @details
|
||||
* This subscription mechanism will generally be used internally by
|
||||
* other software components.
|
||||
* @param setId Set ID of the set to receive update messages from.
|
||||
* @param destinationObject
|
||||
* @param targetQueueId
|
||||
* @param generateSnapshot If this is set to true, a copy of the current
|
||||
* data with a timestamp will be generated and sent via message.
|
||||
* Otherwise, only an notification message is sent.
|
||||
* @return
|
||||
*/
|
||||
ReturnValue_t subscribeForSetUpdateMessages(const uint32_t setId,
|
||||
object_id_t destinationObject,
|
||||
MessageQueueId_t targetQueueId,
|
||||
bool generateSnapshot) override;
|
||||
/**
|
||||
* @brief Subscribe for a notification message which will be sent
|
||||
* if a dataset has changed.
|
||||
* @details
|
||||
* This subscription mechanism will generally be used internally by
|
||||
* other software components.
|
||||
* @param setId Set ID of the set to receive update messages from.
|
||||
* @param destinationObject
|
||||
* @param targetQueueId
|
||||
* @param generateSnapshot If this is set to true, a copy of the current
|
||||
* data with a timestamp will be generated and sent via message.
|
||||
* Otherwise, only an notification message is sent.
|
||||
* @return
|
||||
*/
|
||||
ReturnValue_t subscribeForSetUpdateMessages(const uint32_t setId,
|
||||
object_id_t destinationObject,
|
||||
MessageQueueId_t targetQueueId,
|
||||
bool generateSnapshot) override;
|
||||
|
||||
/**
|
||||
* @brief Subscribe for an notification message which will be sent if a
|
||||
@ -179,18 +179,16 @@ public:
|
||||
MessageQueueId_t targetQueueId,
|
||||
bool generateSnapshot) override;
|
||||
|
||||
MutexIF* getLocalPoolMutex() override;
|
||||
|
||||
/**
|
||||
* Non-Diagnostics packets usually have a lower minimum sampling frequency
|
||||
* than diagnostic packets.
|
||||
* A factor can be specified to determine the minimum sampling frequency
|
||||
* for non-diagnostic packets. The minimum sampling frequency of the
|
||||
* diagnostics packets,which is usually jusst the period of the
|
||||
* performOperation calls, is multiplied with that factor.
|
||||
* @param factor
|
||||
*/
|
||||
void setNonDiagnosticIntervalFactor(uint8_t nonDiagInvlFactor);
|
||||
/**
|
||||
* Non-Diagnostics packets usually have a lower minimum sampling frequency
|
||||
* than diagnostic packets.
|
||||
* A factor can be specified to determine the minimum sampling frequency
|
||||
* for non-diagnostic packets. The minimum sampling frequency of the
|
||||
* diagnostics packets,which is usually jusst the period of the
|
||||
* performOperation calls, is multiplied with that factor.
|
||||
* @param factor
|
||||
*/
|
||||
void setNonDiagnosticIntervalFactor(uint8_t nonDiagInvlFactor);
|
||||
|
||||
/**
|
||||
* @brief The manager is also able to handle housekeeping messages.
|
||||
@ -206,18 +204,18 @@ public:
|
||||
*/
|
||||
virtual ReturnValue_t handleHousekeepingMessage(CommandMessage* message);
|
||||
|
||||
/**
|
||||
* Generate a housekeeping packet with a given SID.
|
||||
* @param sid
|
||||
* @return
|
||||
*/
|
||||
ReturnValue_t generateHousekeepingPacket(sid_t sid,
|
||||
LocalPoolDataSetBase* dataSet, bool forDownlink,
|
||||
MessageQueueId_t destination = MessageQueueIF::NO_QUEUE);
|
||||
/**
|
||||
* Generate a housekeeping packet with a given SID.
|
||||
* @param sid
|
||||
* @return
|
||||
*/
|
||||
ReturnValue_t generateHousekeepingPacket(sid_t sid,
|
||||
LocalPoolDataSetBase* dataSet, bool forDownlink,
|
||||
MessageQueueId_t destination = MessageQueueIF::NO_QUEUE);
|
||||
|
||||
HasLocalDataPoolIF* getOwner();
|
||||
HasLocalDataPoolIF* getOwner();
|
||||
|
||||
ReturnValue_t printPoolEntry(lp_id_t localPoolId);
|
||||
ReturnValue_t printPoolEntry(lp_id_t localPoolId);
|
||||
|
||||
/**
|
||||
* Different types of housekeeping reporting are possible.
|
||||
@ -236,22 +234,19 @@ public:
|
||||
PERIODIC,
|
||||
//! Housekeeping packet will be generated if values have changed.
|
||||
UPDATE_HK,
|
||||
//! Update notification will be sent out as message.
|
||||
UPDATE_NOTIFICATION,
|
||||
//! Update notification will be sent out as message.
|
||||
UPDATE_NOTIFICATION,
|
||||
//! Notification will be sent out as message and a snapshot of the
|
||||
//! current data will be generated.
|
||||
UPDATE_SNAPSHOT,
|
||||
};
|
||||
|
||||
/**
|
||||
* Different data types are possible in the HK receiver map.
|
||||
* For example, updates can be requested for full datasets or
|
||||
* for single pool variables. Periodic reporting is only possible for
|
||||
* data sets.
|
||||
*/
|
||||
/** Different data types are possible in the HK receiver map. For example, updates can be
|
||||
requested for full datasets or for single pool variables. Periodic reporting is only possible
|
||||
for data sets. */
|
||||
enum class DataType: uint8_t {
|
||||
LOCAL_POOL_VARIABLE,
|
||||
DATA_SET
|
||||
LOCAL_POOL_VARIABLE,
|
||||
DATA_SET
|
||||
};
|
||||
|
||||
/* Copying forbidden */
|
||||
@ -267,11 +262,19 @@ public:
|
||||
|
||||
object_id_t getCreatorObjectId() const;
|
||||
|
||||
virtual LocalDataPoolManager* getHkManagerHandle() override;
|
||||
/**
|
||||
* Get the pointer to the mutex. Can be used to lock the data pool
|
||||
* externally. Use with care and don't forget to unlock locked mutexes!
|
||||
* For now, only friend classes can accss this function.
|
||||
* @return
|
||||
*/
|
||||
MutexIF* getMutexHandle();
|
||||
|
||||
virtual LocalDataPoolManager* getPoolManagerHandle() override;
|
||||
private:
|
||||
localpool::DataPool localPoolMap;
|
||||
//! Every housekeeping data manager has a mutex to protect access
|
||||
//! to it's data pool.
|
||||
/** Every housekeeping data manager has a mutex to protect access
|
||||
to it's data pool. */
|
||||
MutexIF* mutex = nullptr;
|
||||
|
||||
/** The class which actually owns the manager (and its datapool). */
|
||||
@ -279,9 +282,9 @@ private:
|
||||
|
||||
uint8_t nonDiagnosticIntervalFactor = 0;
|
||||
|
||||
/** Default receiver for periodic HK packets */
|
||||
static object_id_t defaultHkDestination;
|
||||
MessageQueueId_t hkDestinationId = MessageQueueIF::NO_QUEUE;
|
||||
/** Default receiver for periodic HK packets */
|
||||
static object_id_t defaultHkDestination;
|
||||
MessageQueueId_t hkDestinationId = MessageQueueIF::NO_QUEUE;
|
||||
|
||||
union DataId {
|
||||
DataId(): sid() {};
|
||||
@ -291,10 +294,10 @@ private:
|
||||
|
||||
/** The data pool manager will keep an internal map of HK receivers. */
|
||||
struct HkReceiver {
|
||||
/** Object ID of receiver */
|
||||
object_id_t objectId = objects::NO_OBJECT;
|
||||
/** Object ID of receiver */
|
||||
object_id_t objectId = objects::NO_OBJECT;
|
||||
|
||||
DataType dataType = DataType::DATA_SET;
|
||||
DataType dataType = DataType::DATA_SET;
|
||||
DataId dataId;
|
||||
|
||||
ReportingType reportingType = ReportingType::PERIODIC;
|
||||
@ -324,37 +327,30 @@ private:
|
||||
* of generated housekeeping packets. */
|
||||
bool appendValidityBuffer = true;
|
||||
|
||||
/**
|
||||
* @brief Queue used for communication, for example commands.
|
||||
* Is also used to send messages. Can be set either in the constructor
|
||||
/**
|
||||
* @brief Queue used for communication, for example commands.
|
||||
* Is also used to send messages. Can be set either in the constructor
|
||||
* or in the initialize() function.
|
||||
*/
|
||||
MessageQueueIF* hkQueue = nullptr;
|
||||
*/
|
||||
MessageQueueIF* hkQueue = nullptr;
|
||||
|
||||
/** Global IPC store is used to store all packets. */
|
||||
StorageManagerIF* ipcStore = nullptr;
|
||||
/**
|
||||
* Get the pointer to the mutex. Can be used to lock the data pool
|
||||
* externally. Use with care and don't forget to unlock locked mutexes!
|
||||
* For now, only friend classes can accss this function.
|
||||
* @return
|
||||
*/
|
||||
MutexIF* getMutexHandle();
|
||||
/** Global IPC store is used to store all packets. */
|
||||
StorageManagerIF* ipcStore = nullptr;
|
||||
|
||||
/**
|
||||
* Read a variable by supplying its local pool ID and assign the pool
|
||||
* entry to the supplied PoolEntry pointer. The type of the pool entry
|
||||
* is deduced automatically. This call is not thread-safe!
|
||||
* For now, only friend classes like LocalPoolVar may access this
|
||||
* function.
|
||||
* @tparam T Type of the pool entry
|
||||
* @param localPoolId Pool ID of the variable to read
|
||||
* @param poolVar [out] Corresponding pool entry will be assigned to the
|
||||
* supplied pointer.
|
||||
* @return
|
||||
*/
|
||||
template <class T> ReturnValue_t fetchPoolEntry(lp_id_t localPoolId,
|
||||
PoolEntry<T> **poolEntry);
|
||||
/**
|
||||
* Read a variable by supplying its local pool ID and assign the pool
|
||||
* entry to the supplied PoolEntry pointer. The type of the pool entry
|
||||
* is deduced automatically. This call is not thread-safe!
|
||||
* For now, only friend classes like LocalPoolVar may access this
|
||||
* function.
|
||||
* @tparam T Type of the pool entry
|
||||
* @param localPoolId Pool ID of the variable to read
|
||||
* @param poolVar [out] Corresponding pool entry will be assigned to the
|
||||
* supplied pointer.
|
||||
* @return
|
||||
*/
|
||||
template <class T> ReturnValue_t fetchPoolEntry(lp_id_t localPoolId,
|
||||
PoolEntry<T> **poolEntry);
|
||||
|
||||
/**
|
||||
* This function is used to fill the local data pool map with pool
|
||||
@ -364,55 +360,57 @@ private:
|
||||
*/
|
||||
ReturnValue_t initializeHousekeepingPoolEntriesOnce();
|
||||
|
||||
ReturnValue_t serializeHkPacketIntoStore(
|
||||
HousekeepingPacketDownlink& hkPacket,
|
||||
store_address_t& storeId, bool forDownlink, size_t* serializedSize);
|
||||
MutexIF* getLocalPoolMutex() override;
|
||||
|
||||
void performPeriodicHkGeneration(HkReceiver& hkReceiver);
|
||||
ReturnValue_t togglePeriodicGeneration(sid_t sid, bool enable,
|
||||
bool isDiagnostics);
|
||||
ReturnValue_t changeCollectionInterval(sid_t sid,
|
||||
float newCollectionInterval, bool isDiagnostics);
|
||||
ReturnValue_t generateSetStructurePacket(sid_t sid, bool isDiagnostics);
|
||||
ReturnValue_t serializeHkPacketIntoStore(
|
||||
HousekeepingPacketDownlink& hkPacket,
|
||||
store_address_t& storeId, bool forDownlink, size_t* serializedSize);
|
||||
|
||||
void handleHkUpdateResetListInsertion(DataType dataType, DataId dataId);
|
||||
void handleChangeResetLogic(DataType type,
|
||||
DataId dataId, MarkChangedIF* toReset);
|
||||
void resetHkUpdateResetHelper();
|
||||
void performPeriodicHkGeneration(HkReceiver& hkReceiver);
|
||||
ReturnValue_t togglePeriodicGeneration(sid_t sid, bool enable,
|
||||
bool isDiagnostics);
|
||||
ReturnValue_t changeCollectionInterval(sid_t sid,
|
||||
float newCollectionInterval, bool isDiagnostics);
|
||||
ReturnValue_t generateSetStructurePacket(sid_t sid, bool isDiagnostics);
|
||||
|
||||
ReturnValue_t handleHkUpdate(HkReceiver& hkReceiver,
|
||||
void handleHkUpdateResetListInsertion(DataType dataType, DataId dataId);
|
||||
void handleChangeResetLogic(DataType type,
|
||||
DataId dataId, MarkChangedIF* toReset);
|
||||
void resetHkUpdateResetHelper();
|
||||
|
||||
ReturnValue_t handleHkUpdate(HkReceiver& hkReceiver,
|
||||
ReturnValue_t& status);
|
||||
ReturnValue_t handleNotificationUpdate(HkReceiver& hkReceiver,
|
||||
ReturnValue_t& status);
|
||||
ReturnValue_t handleNotificationSnapshot(HkReceiver& hkReceiver,
|
||||
ReturnValue_t handleNotificationUpdate(HkReceiver& hkReceiver,
|
||||
ReturnValue_t& status);
|
||||
ReturnValue_t addUpdateToStore(HousekeepingPacketUpdate& updatePacket,
|
||||
store_address_t& storeId);
|
||||
ReturnValue_t handleNotificationSnapshot(HkReceiver& hkReceiver,
|
||||
ReturnValue_t& status);
|
||||
ReturnValue_t addUpdateToStore(HousekeepingSnapshot& updatePacket,
|
||||
store_address_t& storeId);
|
||||
|
||||
void printWarningOrError(sif::OutputTypes outputType,
|
||||
const char* functionName,
|
||||
ReturnValue_t errorCode = HasReturnvaluesIF::RETURN_FAILED,
|
||||
const char* errorPrint = nullptr);
|
||||
void printWarningOrError(sif::OutputTypes outputType,
|
||||
const char* functionName,
|
||||
ReturnValue_t errorCode = HasReturnvaluesIF::RETURN_FAILED,
|
||||
const char* errorPrint = nullptr);
|
||||
};
|
||||
|
||||
|
||||
template<class T> inline
|
||||
ReturnValue_t LocalDataPoolManager::fetchPoolEntry(lp_id_t localPoolId,
|
||||
PoolEntry<T> **poolEntry) {
|
||||
auto poolIter = localPoolMap.find(localPoolId);
|
||||
if (poolIter == localPoolMap.end()) {
|
||||
printWarningOrError(sif::OutputTypes::OUT_ERROR, "fetchPoolEntry",
|
||||
localpool::POOL_ENTRY_NOT_FOUND);
|
||||
return localpool::POOL_ENTRY_NOT_FOUND;
|
||||
}
|
||||
PoolEntry<T> **poolEntry) {
|
||||
auto poolIter = localPoolMap.find(localPoolId);
|
||||
if (poolIter == localPoolMap.end()) {
|
||||
printWarningOrError(sif::OutputTypes::OUT_ERROR, "fetchPoolEntry",
|
||||
localpool::POOL_ENTRY_NOT_FOUND);
|
||||
return localpool::POOL_ENTRY_NOT_FOUND;
|
||||
}
|
||||
|
||||
*poolEntry = dynamic_cast< PoolEntry<T>* >(poolIter->second);
|
||||
if(*poolEntry == nullptr) {
|
||||
printWarningOrError(sif::OutputTypes::OUT_ERROR, "fetchPoolEntry",
|
||||
localpool::POOL_ENTRY_TYPE_CONFLICT);
|
||||
return localpool::POOL_ENTRY_TYPE_CONFLICT;
|
||||
}
|
||||
return HasReturnvaluesIF::RETURN_OK;
|
||||
*poolEntry = dynamic_cast< PoolEntry<T>* >(poolIter->second);
|
||||
if(*poolEntry == nullptr) {
|
||||
printWarningOrError(sif::OutputTypes::OUT_ERROR, "fetchPoolEntry",
|
||||
localpool::POOL_ENTRY_TYPE_CONFLICT);
|
||||
return localpool::POOL_ENTRY_TYPE_CONFLICT;
|
||||
}
|
||||
return HasReturnvaluesIF::RETURN_OK;
|
||||
}
|
||||
|
||||
|
||||
|
@ -4,11 +4,26 @@
|
||||
#include "LocalPoolDataSetBase.h"
|
||||
#include <vector>
|
||||
|
||||
/**
|
||||
* @brief This dataset type can be used to group related pool variables if the number of
|
||||
* variables should not be fixed.
|
||||
* @details
|
||||
* This will is the primary data structure to organize pool variables into
|
||||
* sets which can be accessed via the housekeeping service interface or
|
||||
* which can be sent to other software objects.
|
||||
*
|
||||
* It is recommended to read the documentation of the LocalPoolDataSetBase
|
||||
* class for more information on how this class works and how to use it.
|
||||
* @tparam capacity Capacity of the static dataset, which is usually known
|
||||
* beforehand.
|
||||
*/
|
||||
class LocalDataSet: public LocalPoolDataSetBase {
|
||||
public:
|
||||
LocalDataSet(HasLocalDataPoolIF* hkOwner, uint32_t setId,
|
||||
const size_t maxSize);
|
||||
|
||||
LocalDataSet(sid_t sid, const size_t maxSize);
|
||||
|
||||
virtual~ LocalDataSet();
|
||||
|
||||
//! Copying forbidden for now.
|
||||
|
@ -28,7 +28,7 @@ LocalPoolDataSetBase::LocalPoolDataSetBase(HasLocalDataPoolIF *hkOwner,
|
||||
AccessPoolManagerIF* accessor = HasLocalDpIFUserAttorney::getAccessorHandle(hkOwner);
|
||||
|
||||
if(accessor != nullptr) {
|
||||
poolManager = accessor->getHkManagerHandle();
|
||||
poolManager = accessor->getPoolManagerHandle();
|
||||
mutexIfSingleDataCreator = accessor->getLocalPoolMutex();
|
||||
}
|
||||
|
||||
|
@ -22,7 +22,7 @@ LocalPoolObjectBase::LocalPoolObjectBase(lp_id_t poolId, HasLocalDataPoolIF* hkO
|
||||
return;
|
||||
}
|
||||
AccessPoolManagerIF* poolManAccessor = HasLocalDpIFUserAttorney::getAccessorHandle(hkOwner);
|
||||
hkManager = poolManAccessor->getHkManagerHandle();
|
||||
hkManager = poolManAccessor->getPoolManagerHandle();
|
||||
|
||||
if (dataSet != nullptr) {
|
||||
dataSet->registerVariable(this);
|
||||
@ -50,7 +50,7 @@ LocalPoolObjectBase::LocalPoolObjectBase(object_id_t poolOwner, lp_id_t poolId,
|
||||
|
||||
AccessPoolManagerIF* accessor = HasLocalDpIFUserAttorney::getAccessorHandle(hkOwner);
|
||||
if(accessor != nullptr) {
|
||||
hkManager = accessor->getHkManagerHandle();
|
||||
hkManager = accessor->getPoolManagerHandle();
|
||||
}
|
||||
|
||||
if(dataSet != nullptr) {
|
||||
|
@ -77,8 +77,7 @@ public:
|
||||
* @param dataSet
|
||||
* @param setReadWriteMode
|
||||
*/
|
||||
LocalPoolVector(gp_id_t globalPoolId,
|
||||
DataSetIF* dataSet = nullptr,
|
||||
LocalPoolVector(gp_id_t globalPoolId, DataSetIF* dataSet = nullptr,
|
||||
pool_rwm_t setReadWriteMode = pool_rwm_t::VAR_READ_WRITE);
|
||||
|
||||
/**
|
||||
@ -87,7 +86,7 @@ public:
|
||||
* The user can work on this attribute just like he would on a local
|
||||
* array of this type.
|
||||
*/
|
||||
T value[vectorSize];
|
||||
T value[vectorSize]= {};
|
||||
/**
|
||||
* @brief The classes destructor is empty.
|
||||
* @details If commit() was not called, the local value is
|
||||
|
@ -16,7 +16,6 @@ inline LocalPoolVector<T, vectorSize>::LocalPoolVector(object_id_t poolOwner,
|
||||
lp_id_t poolId, DataSetIF *dataSet, pool_rwm_t setReadWriteMode):
|
||||
LocalPoolObjectBase(poolOwner, poolId, dataSet, setReadWriteMode) {}
|
||||
|
||||
|
||||
template<typename T, uint16_t vectorSize>
|
||||
inline LocalPoolVector<T, vectorSize>::LocalPoolVector(gp_id_t globalPoolId,
|
||||
DataSetIF *dataSet, pool_rwm_t setReadWriteMode):
|
||||
|
@ -6,7 +6,8 @@
|
||||
#include <array>
|
||||
|
||||
/**
|
||||
* @brief This local dataset type is created on the stack.
|
||||
* @brief This dataset type can be used to group related pool variables if the number of
|
||||
* variables is fixed.
|
||||
* @details
|
||||
* This will is the primary data structure to organize pool variables into
|
||||
* sets which can be accessed via the housekeeping service interface or
|
||||
|
3
doc/doxy/.gitignore
vendored
Normal file
3
doc/doxy/.gitignore
vendored
Normal file
@ -0,0 +1,3 @@
|
||||
html
|
||||
latex
|
||||
rtf
|
2609
doc/doxy/OPUS.doxyfile
Normal file
2609
doc/doxy/OPUS.doxyfile
Normal file
File diff suppressed because it is too large
Load Diff
@ -1,24 +1,37 @@
|
||||
#ifndef FSFW_HOUSEKEEPING_HOUSEKEEPINGPACKETUPDATE_H_
|
||||
#define FSFW_HOUSEKEEPING_HOUSEKEEPINGPACKETUPDATE_H_
|
||||
#ifndef FSFW_HOUSEKEEPING_HOUSEKEEPINGSNAPSHOT_H_
|
||||
#define FSFW_HOUSEKEEPING_HOUSEKEEPINGSNAPSHOT_H_
|
||||
|
||||
#include "../serialize/SerialBufferAdapter.h"
|
||||
#include "../serialize/SerialLinkedListAdapter.h"
|
||||
#include "../datapoollocal/LocalPoolDataSetBase.h"
|
||||
#include "../datapoollocal/LocalPoolObjectBase.h"
|
||||
#include "../timemanager/CCSDSTime.h"
|
||||
|
||||
/**
|
||||
* @brief This helper class will be used to serialize and deserialize
|
||||
* update housekeeping packets into the store.
|
||||
* @brief This helper class will be used to serialize and deserialize update housekeeping packets
|
||||
* into the store.
|
||||
*/
|
||||
class HousekeepingPacketUpdate: public SerializeIF {
|
||||
class HousekeepingSnapshot: public SerializeIF {
|
||||
public:
|
||||
|
||||
/**
|
||||
* Update packet constructor for datasets
|
||||
* @param timeStamp
|
||||
* @param timeStampSize
|
||||
* @param hkData
|
||||
* @param hkDataSize
|
||||
* Update packet constructor for datasets.
|
||||
* @param cdsShort If a CSD short timestamp is used, a reference should be
|
||||
* supplied here
|
||||
* @param dataSetPtr Pointer to the dataset instance to serialize or deserialize the
|
||||
* data into
|
||||
*/
|
||||
HousekeepingPacketUpdate(uint8_t* timeStamp, size_t timeStampSize,
|
||||
HousekeepingSnapshot(CCSDSTime::CDS_short* cdsShort, LocalPoolDataSetBase* dataSetPtr):
|
||||
timeStamp(reinterpret_cast<uint8_t*>(cdsShort)),
|
||||
timeStampSize(sizeof(CCSDSTime::CDS_short)), updateData(dataSetPtr) {};
|
||||
|
||||
/**
|
||||
* Update packet constructor for datasets.
|
||||
* @param timeStamp Pointer to the buffer where the timestamp will be stored.
|
||||
* @param timeStampSize Size of the timestamp
|
||||
* @param dataSetPtr Pointer to the dataset instance to deserialize the data into
|
||||
*/
|
||||
HousekeepingSnapshot(uint8_t* timeStamp, size_t timeStampSize,
|
||||
LocalPoolDataSetBase* dataSetPtr):
|
||||
timeStamp(timeStamp), timeStampSize(timeStampSize),
|
||||
updateData(dataSetPtr) {};
|
||||
@ -29,7 +42,7 @@ public:
|
||||
* @param timeStampSize
|
||||
* @param dataSetPtr
|
||||
*/
|
||||
HousekeepingPacketUpdate(uint8_t* timeStamp, size_t timeStampSize,
|
||||
HousekeepingSnapshot(uint8_t* timeStamp, size_t timeStampSize,
|
||||
LocalPoolObjectBase* dataSetPtr):
|
||||
timeStamp(timeStamp), timeStampSize(timeStampSize),
|
||||
updateData(dataSetPtr) {};
|
||||
@ -89,4 +102,4 @@ private:
|
||||
};
|
||||
|
||||
|
||||
#endif /* FSFW_HOUSEKEEPING_HOUSEKEEPINGPACKETUPDATE_H_ */
|
||||
#endif /* FSFW_HOUSEKEEPING_HOUSEKEEPINGSNAPSHOT_H_ */
|
@ -53,8 +53,7 @@ ReturnValue_t Clock::getClock_timeval(timeval* time) {
|
||||
auto epoch = now.time_since_epoch();
|
||||
time->tv_sec = std::chrono::duration_cast<std::chrono::seconds>(epoch).count();
|
||||
auto fraction = now - secondsChrono;
|
||||
time->tv_usec = std::chrono::duration_cast<std::chrono::microseconds>(
|
||||
fraction).count();
|
||||
time->tv_usec = std::chrono::duration_cast<std::chrono::microseconds>(fraction).count();
|
||||
return HasReturnvaluesIF::RETURN_OK;
|
||||
#elif defined(LINUX)
|
||||
timespec timeUnix;
|
||||
@ -67,7 +66,9 @@ ReturnValue_t Clock::getClock_timeval(timeval* time) {
|
||||
return HasReturnvaluesIF::RETURN_OK;
|
||||
#else
|
||||
#if FSFW_CPP_OSTREAM_ENABLED == 1
|
||||
sif::warning << "Clock::getUptime: Not implemented for found OS" << std::endl;
|
||||
sif::warning << "Clock::getUptime: Not implemented for found OS!" << std::endl;
|
||||
#else
|
||||
sif::printWarning("Clock::getUptime: Not implemented for found OS!\n");
|
||||
#endif
|
||||
return HasReturnvaluesIF::RETURN_FAILED;
|
||||
#endif
|
||||
|
@ -1,5 +1,5 @@
|
||||
#include "Stopwatch.h"
|
||||
#include "../serviceinterface/ServiceInterfaceStream.h"
|
||||
#include "../serviceinterface/ServiceInterface.h"
|
||||
#include <iomanip>
|
||||
|
||||
Stopwatch::Stopwatch(bool displayOnDestruction,
|
||||
@ -28,9 +28,13 @@ double Stopwatch::stopSeconds() {
|
||||
|
||||
void Stopwatch::display() {
|
||||
if(displayMode == StopwatchDisplayMode::MILLIS) {
|
||||
dur_millis_t timeMillis = static_cast<dur_millis_t>(
|
||||
elapsedTime.tv_sec * 1000 + elapsedTime.tv_usec / 1000);
|
||||
#if FSFW_CPP_OSTREAM_ENABLED == 1
|
||||
sif::info << "Stopwatch: Operation took " << (elapsedTime.tv_sec * 1000 +
|
||||
elapsedTime.tv_usec / 1000) << " milliseconds" << std::endl;
|
||||
sif::info << "Stopwatch: Operation took " << timeMillis << " milliseconds" << std::endl;
|
||||
#else
|
||||
sif::printInfo("Stopwatch: Operation took %lu milliseconds\n\r",
|
||||
static_cast<unsigned int>(timeMillis));
|
||||
#endif
|
||||
}
|
||||
else if(displayMode == StopwatchDisplayMode::SECONDS) {
|
||||
@ -38,6 +42,9 @@ void Stopwatch::display() {
|
||||
sif::info <<"Stopwatch: Operation took " << std::setprecision(3)
|
||||
<< std::fixed << timevalOperations::toDouble(elapsedTime)
|
||||
<< " seconds" << std::endl;
|
||||
#else
|
||||
sif::printInfo("Stopwatch: Operation took %.3f seconds\n\r",
|
||||
static_cast<float>(timevalOperations::toDouble(elapsedTime)));
|
||||
#endif
|
||||
}
|
||||
}
|
||||
|
@ -6,17 +6,17 @@
|
||||
#include <unittest/core/CatchDefinitions.h>
|
||||
|
||||
TEST_CASE("LocalDataSet" , "[LocDataSetTest]") {
|
||||
LocalPoolOwnerBase* poolOwner = objectManager->
|
||||
get<LocalPoolOwnerBase>(objects::TEST_LOCAL_POOL_OWNER_BASE);
|
||||
REQUIRE(poolOwner != nullptr);
|
||||
REQUIRE(poolOwner->initializeHkManager() == retval::CATCH_OK);
|
||||
REQUIRE(poolOwner->initializeHkManagerAfterTaskCreation()
|
||||
== retval::CATCH_OK);
|
||||
const uint32_t setId = 0;
|
||||
SECTION("BasicTest") {
|
||||
StaticLocalDataSet<3> localSet = StaticLocalDataSet<3>(
|
||||
sid_t(objects::TEST_LOCAL_POOL_OWNER_BASE, setId));
|
||||
}
|
||||
LocalPoolOwnerBase* poolOwner = objectManager->
|
||||
get<LocalPoolOwnerBase>(objects::TEST_LOCAL_POOL_OWNER_BASE);
|
||||
REQUIRE(poolOwner != nullptr);
|
||||
REQUIRE(poolOwner->initializeHkManager() == retval::CATCH_OK);
|
||||
REQUIRE(poolOwner->initializeHkManagerAfterTaskCreation()
|
||||
== retval::CATCH_OK);
|
||||
const uint32_t setId = 0;
|
||||
SECTION("BasicTest") {
|
||||
StaticLocalDataSet<3> localSet = StaticLocalDataSet<3>(
|
||||
sid_t(objects::TEST_LOCAL_POOL_OWNER_BASE, setId));
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
|
@ -1,122 +1,199 @@
|
||||
#include "LocalPoolOwnerBase.h"
|
||||
|
||||
#include <catch2/catch_test_macros.hpp>
|
||||
#include <catch2/catch_approx.hpp>
|
||||
|
||||
#include <fsfw/datapool/PoolReadHelper.h>
|
||||
#include <fsfw/datapoollocal/HasLocalDataPoolIF.h>
|
||||
#include <fsfw/datapoollocal/StaticLocalDataSet.h>
|
||||
#include <fsfw/housekeeping/HousekeepingSnapshot.h>
|
||||
#include <fsfw/ipc/CommandMessageCleaner.h>
|
||||
#include <fsfw/timemanager/CCSDSTime.h>
|
||||
#include <unittest/core/CatchDefinitions.h>
|
||||
#include <iostream>
|
||||
|
||||
|
||||
TEST_CASE("LocalPoolManagerTest" , "[LocManTest]") {
|
||||
LocalPoolOwnerBase* poolOwner = objectManager->
|
||||
get<LocalPoolOwnerBase>(objects::TEST_LOCAL_POOL_OWNER_BASE);
|
||||
REQUIRE(poolOwner != nullptr);
|
||||
REQUIRE(poolOwner->initializeHkManager() == retval::CATCH_OK);
|
||||
REQUIRE(poolOwner->initializeHkManagerAfterTaskCreation()
|
||||
== retval::CATCH_OK);
|
||||
REQUIRE(poolOwner->dataset.assignPointers() == retval::CATCH_OK);
|
||||
MessageQueueMockBase* mqMock = poolOwner->getMockQueueHandle();
|
||||
REQUIRE(mqMock != nullptr);
|
||||
CommandMessage messageSent;
|
||||
uint8_t messagesSent = 0;
|
||||
LocalPoolOwnerBase* poolOwner = objectManager->
|
||||
get<LocalPoolOwnerBase>(objects::TEST_LOCAL_POOL_OWNER_BASE);
|
||||
REQUIRE(poolOwner != nullptr);
|
||||
REQUIRE(poolOwner->initializeHkManager() == retval::CATCH_OK);
|
||||
REQUIRE(poolOwner->initializeHkManagerAfterTaskCreation()
|
||||
== retval::CATCH_OK);
|
||||
//REQUIRE(poolOwner->dataset.assignPointers() == retval::CATCH_OK);
|
||||
MessageQueueMockBase* mqMock = poolOwner->getMockQueueHandle();
|
||||
REQUIRE(mqMock != nullptr);
|
||||
CommandMessage messageSent;
|
||||
uint8_t messagesSent = 0;
|
||||
|
||||
|
||||
SECTION("BasicTest") {
|
||||
// Subscribe for message generation on update.
|
||||
REQUIRE(poolOwner->subscribeWrapperSetUpdate() == retval::CATCH_OK);
|
||||
// Subscribe for an update message.
|
||||
poolOwner->dataset.setChanged(true);
|
||||
// Now the update message should be generated.
|
||||
REQUIRE(poolOwner->poolManager.performHkOperation() == retval::CATCH_OK);
|
||||
REQUIRE(mqMock->wasMessageSent() == true);
|
||||
SECTION("BasicTest") {
|
||||
/* Subscribe for message generation on update. */
|
||||
REQUIRE(poolOwner->subscribeWrapperSetUpdate() == retval::CATCH_OK);
|
||||
/* Subscribe for an update message. */
|
||||
poolOwner->dataset.setChanged(true);
|
||||
/* Now the update message should be generated. */
|
||||
REQUIRE(poolOwner->poolManager.performHkOperation() == retval::CATCH_OK);
|
||||
REQUIRE(mqMock->wasMessageSent() == true);
|
||||
|
||||
REQUIRE(mqMock->receiveMessage(&messageSent) == retval::CATCH_OK);
|
||||
CHECK(messageSent.getCommand() == static_cast<int>(
|
||||
HousekeepingMessage::UPDATE_NOTIFICATION_SET));
|
||||
REQUIRE(mqMock->receiveMessage(&messageSent) == retval::CATCH_OK);
|
||||
CHECK(messageSent.getCommand() == static_cast<int>(
|
||||
HousekeepingMessage::UPDATE_NOTIFICATION_SET));
|
||||
|
||||
// Should have been reset.
|
||||
CHECK(poolOwner->dataset.hasChanged() == false);
|
||||
// Set changed again, result should be the same.
|
||||
poolOwner->dataset.setChanged(true);
|
||||
REQUIRE(poolOwner->poolManager.performHkOperation() == retval::CATCH_OK);
|
||||
/* Should have been reset. */
|
||||
CHECK(poolOwner->dataset.hasChanged() == false);
|
||||
/* Set changed again, result should be the same. */
|
||||
poolOwner->dataset.setChanged(true);
|
||||
REQUIRE(poolOwner->poolManager.performHkOperation() == retval::CATCH_OK);
|
||||
|
||||
REQUIRE(mqMock->wasMessageSent(&messagesSent) == true);
|
||||
CHECK(messagesSent == 1);
|
||||
REQUIRE(mqMock->receiveMessage(&messageSent) == retval::CATCH_OK);
|
||||
CHECK(messageSent.getCommand() == static_cast<int>(
|
||||
HousekeepingMessage::UPDATE_NOTIFICATION_SET));
|
||||
REQUIRE(mqMock->wasMessageSent(&messagesSent) == true);
|
||||
CHECK(messagesSent == 1);
|
||||
REQUIRE(mqMock->receiveMessage(&messageSent) == retval::CATCH_OK);
|
||||
CHECK(messageSent.getCommand() == static_cast<int>(
|
||||
HousekeepingMessage::UPDATE_NOTIFICATION_SET));
|
||||
|
||||
// now subscribe for set update HK as well.
|
||||
REQUIRE(poolOwner->subscribeWrapperSetUpdateHk() == retval::CATCH_OK);
|
||||
poolOwner->dataset.setChanged(true);
|
||||
REQUIRE(poolOwner->poolManager.performHkOperation() == retval::CATCH_OK);
|
||||
REQUIRE(mqMock->wasMessageSent(&messagesSent) == true);
|
||||
CHECK(messagesSent == 2);
|
||||
// first message sent should be the update notification, considering
|
||||
// the internal list is a vector checked in insertion order.
|
||||
REQUIRE(mqMock->receiveMessage(&messageSent) == retval::CATCH_OK);
|
||||
CHECK(messageSent.getCommand() == static_cast<int>(
|
||||
HousekeepingMessage::UPDATE_NOTIFICATION_SET));
|
||||
/* Now subscribe for set update HK as well. */
|
||||
REQUIRE(poolOwner->subscribeWrapperSetUpdateHk() == retval::CATCH_OK);
|
||||
poolOwner->dataset.setChanged(true);
|
||||
REQUIRE(poolOwner->poolManager.performHkOperation() == retval::CATCH_OK);
|
||||
REQUIRE(mqMock->wasMessageSent(&messagesSent) == true);
|
||||
CHECK(messagesSent == 2);
|
||||
/* first message sent should be the update notification, considering
|
||||
the internal list is a vector checked in insertion order. */
|
||||
REQUIRE(mqMock->receiveMessage(&messageSent) == retval::CATCH_OK);
|
||||
CHECK(messageSent.getCommand() == static_cast<int>(
|
||||
HousekeepingMessage::UPDATE_NOTIFICATION_SET));
|
||||
|
||||
REQUIRE(mqMock->receiveMessage(&messageSent) == retval::CATCH_OK);
|
||||
CHECK(messageSent.getCommand() == static_cast<int>(
|
||||
HousekeepingMessage::HK_REPORT));
|
||||
// clear message to avoid memory leak, our mock won't do it for us (yet)
|
||||
CommandMessageCleaner::clearCommandMessage(&messageSent);
|
||||
}
|
||||
REQUIRE(mqMock->receiveMessage(&messageSent) == retval::CATCH_OK);
|
||||
CHECK(messageSent.getCommand() == static_cast<int>(
|
||||
HousekeepingMessage::HK_REPORT));
|
||||
/* Clear message to avoid memory leak, our mock won't do it for us (yet) */
|
||||
CommandMessageCleaner::clearCommandMessage(&messageSent);
|
||||
|
||||
SECTION("AdvancedTests") {
|
||||
// we need to reset the subscription list because the pool owner
|
||||
// is a global object.
|
||||
poolOwner->resetSubscriptionList();
|
||||
// Subscribe for variable update as well
|
||||
REQUIRE(not poolOwner->dataset.hasChanged());
|
||||
REQUIRE(poolOwner->subscribeWrapperVariableUpdate(lpool::uint8VarId) ==
|
||||
retval::CATCH_OK);
|
||||
lp_var_t<uint8_t>* poolVar = dynamic_cast<lp_var_t<uint8_t>*>(
|
||||
poolOwner->getPoolObjectHandle(lpool::uint8VarId));
|
||||
REQUIRE(poolVar != nullptr);
|
||||
poolVar->setChanged(true);
|
||||
REQUIRE(poolOwner->poolManager.performHkOperation() == retval::CATCH_OK);
|
||||
}
|
||||
|
||||
// Check update notification was sent.
|
||||
REQUIRE(mqMock->wasMessageSent(&messagesSent) == true);
|
||||
CHECK(messagesSent == 1);
|
||||
// Should have been reset.
|
||||
CHECK(poolVar->hasChanged() == false);
|
||||
REQUIRE(mqMock->receiveMessage(&messageSent) == retval::CATCH_OK);
|
||||
CHECK(messageSent.getCommand() == static_cast<int>(
|
||||
HousekeepingMessage::UPDATE_NOTIFICATION_VARIABLE));
|
||||
SECTION("SnapshotUpdateTests") {
|
||||
/* Set the variables in the set to certain values. These are checked later. */
|
||||
{
|
||||
PoolReadHelper readHelper(&poolOwner->dataset);
|
||||
REQUIRE(readHelper.getReadResult() == retval::CATCH_OK);
|
||||
poolOwner->dataset.localPoolVarUint8.value = 5;
|
||||
poolOwner->dataset.localPoolVarFloat.value = -12.242;
|
||||
poolOwner->dataset.localPoolUint16Vec.value[0] = 2;
|
||||
poolOwner->dataset.localPoolUint16Vec.value[1] = 32;
|
||||
poolOwner->dataset.localPoolUint16Vec.value[2] = 42932;
|
||||
}
|
||||
|
||||
// now subscribe for the dataset update (HK and update) again
|
||||
REQUIRE(poolOwner->subscribeWrapperSetUpdate() == retval::CATCH_OK);
|
||||
REQUIRE(poolOwner->subscribeWrapperSetUpdateHk() == retval::CATCH_OK);
|
||||
/* Subscribe for snapshot generation on update. */
|
||||
REQUIRE(poolOwner->subscribeWrapperSetUpdateSnapshot() == retval::CATCH_OK);
|
||||
poolOwner->dataset.setChanged(true);
|
||||
|
||||
poolOwner->dataset.setChanged(true);
|
||||
REQUIRE(poolOwner->poolManager.performHkOperation() == retval::CATCH_OK);
|
||||
// now two messages should be sent.
|
||||
REQUIRE(mqMock->wasMessageSent(&messagesSent) == true);
|
||||
CHECK(messagesSent == 2);
|
||||
mqMock->clearMessages(true);
|
||||
/* Store current time, we are going to check the (approximate) time equality later */
|
||||
CCSDSTime::CDS_short timeCdsNow;
|
||||
timeval now;
|
||||
Clock::getClock_timeval(&now);
|
||||
CCSDSTime::convertToCcsds(&timeCdsNow, &now);
|
||||
|
||||
poolOwner->dataset.setChanged(true);
|
||||
poolVar->setChanged(true);
|
||||
REQUIRE(poolOwner->poolManager.performHkOperation() == retval::CATCH_OK);
|
||||
// now three messages should be sent.
|
||||
REQUIRE(mqMock->wasMessageSent(&messagesSent) == true);
|
||||
CHECK(messagesSent == 3);
|
||||
REQUIRE(mqMock->receiveMessage(&messageSent) == retval::CATCH_OK);
|
||||
CHECK(messageSent.getCommand() == static_cast<int>(
|
||||
HousekeepingMessage::UPDATE_NOTIFICATION_VARIABLE));
|
||||
REQUIRE(mqMock->receiveMessage(&messageSent) == retval::CATCH_OK);
|
||||
CHECK(messageSent.getCommand() == static_cast<int>(
|
||||
HousekeepingMessage::UPDATE_NOTIFICATION_SET));
|
||||
REQUIRE(mqMock->receiveMessage(&messageSent) == retval::CATCH_OK);
|
||||
CHECK(messageSent.getCommand() == static_cast<int>(
|
||||
HousekeepingMessage::HK_REPORT));
|
||||
CommandMessageCleaner::clearCommandMessage(&messageSent);
|
||||
REQUIRE(mqMock->receiveMessage(&messageSent) ==
|
||||
static_cast<int>(MessageQueueIF::EMPTY));
|
||||
}
|
||||
/* Trigger generation of snapshot */
|
||||
REQUIRE(poolOwner->poolManager.performHkOperation() == retval::CATCH_OK);
|
||||
REQUIRE(mqMock->wasMessageSent(&messagesSent) == true);
|
||||
CHECK(messagesSent == 1);
|
||||
REQUIRE(mqMock->receiveMessage(&messageSent) == retval::CATCH_OK);
|
||||
/* Check that snapshot was generated */
|
||||
CHECK(messageSent.getCommand() == static_cast<int>(
|
||||
HousekeepingMessage::UPDATE_SNAPSHOT_SET));
|
||||
/* Now we deserialize the snapshot into a new dataset instance */
|
||||
CCSDSTime::CDS_short cdsShort;
|
||||
LocalPoolTestDataSet newSet;
|
||||
HousekeepingSnapshot snapshot(&cdsShort, &newSet);
|
||||
store_address_t storeId;
|
||||
HousekeepingMessage::getUpdateSnapshotSetCommand(&messageSent, &storeId);
|
||||
ConstAccessorPair accessorPair = tglob::getIpcStoreHandle()->getData(storeId);
|
||||
REQUIRE(accessorPair.first == retval::CATCH_OK);
|
||||
const uint8_t* readOnlyPtr = accessorPair.second.data();
|
||||
size_t sizeToDeserialize = accessorPair.second.size();
|
||||
CHECK(newSet.localPoolVarFloat.value == 0);
|
||||
CHECK(newSet.localPoolVarUint8 == 0);
|
||||
CHECK(newSet.localPoolUint16Vec.value[0] == 0);
|
||||
CHECK(newSet.localPoolUint16Vec.value[1] == 0);
|
||||
CHECK(newSet.localPoolUint16Vec.value[2] == 0);
|
||||
/* Fill the dataset and timestamp */
|
||||
REQUIRE(snapshot.deSerialize(&readOnlyPtr, &sizeToDeserialize,
|
||||
SerializeIF::Endianness::MACHINE) == retval::CATCH_OK);
|
||||
/* Now we check that the snapshot is actually correct */
|
||||
CHECK(newSet.localPoolVarFloat.value == Catch::Approx(-12.242));
|
||||
CHECK(newSet.localPoolVarUint8 == 5);
|
||||
CHECK(newSet.localPoolUint16Vec.value[0] == 2);
|
||||
CHECK(newSet.localPoolUint16Vec.value[1] == 32);
|
||||
CHECK(newSet.localPoolUint16Vec.value[2] == 42932);
|
||||
|
||||
/* Now we check that both times are equal */
|
||||
CHECK(cdsShort.pField == timeCdsNow.pField);
|
||||
CHECK(cdsShort.dayLSB == Catch::Approx(timeCdsNow.dayLSB).margin(1));
|
||||
CHECK(cdsShort.dayMSB == Catch::Approx(timeCdsNow.dayMSB).margin(1));
|
||||
CHECK(cdsShort.msDay_h == Catch::Approx(timeCdsNow.msDay_h).margin(1));
|
||||
CHECK(cdsShort.msDay_hh == Catch::Approx(timeCdsNow.msDay_hh).margin(1));
|
||||
CHECK(cdsShort.msDay_l == Catch::Approx(timeCdsNow.msDay_l).margin(1));
|
||||
CHECK(cdsShort.msDay_ll == Catch::Approx(timeCdsNow.msDay_ll).margin(1));
|
||||
}
|
||||
|
||||
SECTION("AdvancedTests") {
|
||||
/* Acquire subscription interface */
|
||||
ProvidesDataPoolSubscriptionIF* subscriptionIF = poolOwner->getSubscriptionInterface();
|
||||
REQUIRE(subscriptionIF != nullptr);
|
||||
|
||||
/* Subscribe for variable update */
|
||||
REQUIRE(poolOwner->subscribeWrapperVariableUpdate(lpool::uint8VarId) ==
|
||||
retval::CATCH_OK);
|
||||
lp_var_t<uint8_t>* poolVar = dynamic_cast<lp_var_t<uint8_t>*>(
|
||||
poolOwner->getPoolObjectHandle(lpool::uint8VarId));
|
||||
REQUIRE(poolVar != nullptr);
|
||||
poolVar->setChanged(true);
|
||||
REQUIRE(poolOwner->poolManager.performHkOperation() == retval::CATCH_OK);
|
||||
|
||||
/* Check update notification was sent. */
|
||||
REQUIRE(mqMock->wasMessageSent(&messagesSent) == true);
|
||||
CHECK(messagesSent == 1);
|
||||
/* Should have been reset. */
|
||||
CHECK(poolVar->hasChanged() == false);
|
||||
REQUIRE(mqMock->receiveMessage(&messageSent) == retval::CATCH_OK);
|
||||
CHECK(messageSent.getCommand() == static_cast<int>(
|
||||
HousekeepingMessage::UPDATE_NOTIFICATION_VARIABLE));
|
||||
/* Now subscribe for the dataset update (HK and update) again with subscription interface */
|
||||
REQUIRE(subscriptionIF->subscribeForSetUpdateMessages(lpool::testSetId,
|
||||
objects::NO_OBJECT, objects::HK_RECEIVER_MOCK, false) == retval::CATCH_OK);
|
||||
REQUIRE(poolOwner->subscribeWrapperSetUpdateHk() == retval::CATCH_OK);
|
||||
|
||||
poolOwner->dataset.setChanged(true);
|
||||
REQUIRE(poolOwner->poolManager.performHkOperation() == retval::CATCH_OK);
|
||||
/* Now two messages should be sent. */
|
||||
REQUIRE(mqMock->wasMessageSent(&messagesSent) == true);
|
||||
CHECK(messagesSent == 2);
|
||||
mqMock->clearMessages(true);
|
||||
|
||||
poolOwner->dataset.setChanged(true);
|
||||
poolVar->setChanged(true);
|
||||
REQUIRE(poolOwner->poolManager.performHkOperation() == retval::CATCH_OK);
|
||||
/* Now three messages should be sent. */
|
||||
REQUIRE(mqMock->wasMessageSent(&messagesSent) == true);
|
||||
CHECK(messagesSent == 3);
|
||||
REQUIRE(mqMock->receiveMessage(&messageSent) == retval::CATCH_OK);
|
||||
CHECK(messageSent.getCommand() == static_cast<int>(
|
||||
HousekeepingMessage::UPDATE_NOTIFICATION_VARIABLE));
|
||||
REQUIRE(mqMock->receiveMessage(&messageSent) == retval::CATCH_OK);
|
||||
CHECK(messageSent.getCommand() == static_cast<int>(
|
||||
HousekeepingMessage::UPDATE_NOTIFICATION_SET));
|
||||
REQUIRE(mqMock->receiveMessage(&messageSent) == retval::CATCH_OK);
|
||||
CHECK(messageSent.getCommand() == static_cast<int>(
|
||||
HousekeepingMessage::HK_REPORT));
|
||||
CommandMessageCleaner::clearCommandMessage(&messageSent);
|
||||
REQUIRE(mqMock->receiveMessage(&messageSent) ==
|
||||
static_cast<int>(MessageQueueIF::EMPTY));
|
||||
}
|
||||
|
||||
/* we need to reset the subscription list because the pool owner
|
||||
is a global object. */
|
||||
poolOwner->resetSubscriptionList();
|
||||
mqMock->clearMessages(true);
|
||||
}
|
||||
|
||||
|
@ -20,33 +20,43 @@ static constexpr lp_id_t int64Vec2Id = 4;
|
||||
|
||||
static constexpr uint32_t testSetId = 0;
|
||||
static constexpr uint8_t dataSetMaxVariables = 10;
|
||||
static const sid_t testSid = sid_t(objects::TEST_LOCAL_POOL_OWNER_BASE,
|
||||
testSetId);
|
||||
|
||||
static const sid_t testSid = sid_t(objects::TEST_LOCAL_POOL_OWNER_BASE, testSetId);
|
||||
|
||||
static const gp_id_t uint8VarGpid = gp_id_t(objects::TEST_LOCAL_POOL_OWNER_BASE, uint8VarId);
|
||||
static const gp_id_t floatVarGpid = gp_id_t(objects::TEST_LOCAL_POOL_OWNER_BASE, floatVarId);
|
||||
static const gp_id_t uint32Gpid = gp_id_t(objects::TEST_LOCAL_POOL_OWNER_BASE, uint32VarId);
|
||||
static const gp_id_t uint16Vec3Gpid = gp_id_t(objects::TEST_LOCAL_POOL_OWNER_BASE, uint16Vec3Id);
|
||||
static const gp_id_t uint64Vec2Id = gp_id_t(objects::TEST_LOCAL_POOL_OWNER_BASE, int64Vec2Id);
|
||||
}
|
||||
|
||||
|
||||
class LocalPoolTestDataSet: public LocalDataSet {
|
||||
public:
|
||||
LocalPoolTestDataSet(HasLocalDataPoolIF* owner, uint32_t setId):
|
||||
LocalDataSet(owner, setId, lpool::dataSetMaxVariables) {
|
||||
}
|
||||
LocalPoolTestDataSet():
|
||||
LocalDataSet(lpool::testSid, lpool::dataSetMaxVariables) {
|
||||
}
|
||||
|
||||
ReturnValue_t assignPointers() {
|
||||
PoolVariableIF** rawVarArray = getContainer();
|
||||
localPoolVarUint8 = dynamic_cast<lp_var_t<uint8_t>*>(rawVarArray[0]);
|
||||
localPoolVarFloat = dynamic_cast<lp_var_t<float>*>(rawVarArray[1]);
|
||||
localPoolUint16Vec = dynamic_cast<lp_vec_t<uint16_t, 3>*>(
|
||||
rawVarArray[2]);
|
||||
if(localPoolVarUint8 == nullptr or localPoolVarFloat == nullptr or
|
||||
localPoolUint16Vec == nullptr) {
|
||||
return HasReturnvaluesIF::RETURN_FAILED;
|
||||
}
|
||||
return HasReturnvaluesIF::RETURN_OK;
|
||||
}
|
||||
LocalPoolTestDataSet(HasLocalDataPoolIF* owner, uint32_t setId):
|
||||
LocalDataSet(owner, setId, lpool::dataSetMaxVariables) {
|
||||
}
|
||||
|
||||
lp_var_t<uint8_t>* localPoolVarUint8 = nullptr;
|
||||
lp_var_t<float>* localPoolVarFloat = nullptr;
|
||||
lp_vec_t<uint16_t, 3>* localPoolUint16Vec = nullptr;
|
||||
// ReturnValue_t assignPointers() {
|
||||
// PoolVariableIF** rawVarArray = getContainer();
|
||||
// localPoolVarUint8 = dynamic_cast<lp_var_t<uint8_t>*>(rawVarArray[0]);
|
||||
// localPoolVarFloat = dynamic_cast<lp_var_t<float>*>(rawVarArray[1]);
|
||||
// localPoolUint16Vec = dynamic_cast<lp_vec_t<uint16_t, 3>*>(
|
||||
// rawVarArray[2]);
|
||||
// if(localPoolVarUint8 == nullptr or localPoolVarFloat == nullptr or
|
||||
// localPoolUint16Vec == nullptr) {
|
||||
// return HasReturnvaluesIF::RETURN_FAILED;
|
||||
// }
|
||||
// return HasReturnvaluesIF::RETURN_OK;
|
||||
// }
|
||||
|
||||
lp_var_t<uint8_t> localPoolVarUint8 = lp_var_t<uint8_t>(lpool::uint8VarGpid, this);
|
||||
lp_var_t<float> localPoolVarFloat = lp_var_t<float>(lpool::floatVarGpid, this);
|
||||
lp_vec_t<uint16_t, 3> localPoolUint16Vec = lp_vec_t<uint16_t, 3>(lpool::uint16Vec3Gpid, this);
|
||||
|
||||
private:
|
||||
};
|
||||
@ -54,143 +64,148 @@ private:
|
||||
|
||||
class LocalPoolOwnerBase: public SystemObject, public HasLocalDataPoolIF {
|
||||
public:
|
||||
LocalPoolOwnerBase(
|
||||
object_id_t objectId = objects::TEST_LOCAL_POOL_OWNER_BASE):
|
||||
SystemObject(objectId), poolManager(this, messageQueue),
|
||||
dataset(this, lpool::testSetId) {
|
||||
messageQueue = new MessageQueueMockBase();
|
||||
}
|
||||
LocalPoolOwnerBase(
|
||||
object_id_t objectId = objects::TEST_LOCAL_POOL_OWNER_BASE):
|
||||
SystemObject(objectId), poolManager(this, messageQueue),
|
||||
dataset(this, lpool::testSetId) {
|
||||
messageQueue = new MessageQueueMockBase();
|
||||
}
|
||||
|
||||
~LocalPoolOwnerBase() {
|
||||
QueueFactory::instance()->deleteMessageQueue(messageQueue);
|
||||
}
|
||||
~LocalPoolOwnerBase() {
|
||||
QueueFactory::instance()->deleteMessageQueue(messageQueue);
|
||||
}
|
||||
|
||||
object_id_t getObjectId() const override {
|
||||
return SystemObject::getObjectId();
|
||||
}
|
||||
object_id_t getObjectId() const override {
|
||||
return SystemObject::getObjectId();
|
||||
}
|
||||
|
||||
ReturnValue_t initializeHkManager() {
|
||||
if(not initialized) {
|
||||
initialized = true;
|
||||
return poolManager.initialize(messageQueue);
|
||||
}
|
||||
return HasReturnvaluesIF::RETURN_OK;
|
||||
}
|
||||
ReturnValue_t initializeHkManager() {
|
||||
if(not initialized) {
|
||||
initialized = true;
|
||||
return poolManager.initialize(messageQueue);
|
||||
}
|
||||
return HasReturnvaluesIF::RETURN_OK;
|
||||
}
|
||||
|
||||
ReturnValue_t initializeHkManagerAfterTaskCreation() {
|
||||
if(not initializedAfterTaskCreation) {
|
||||
initializedAfterTaskCreation = true;
|
||||
return poolManager.initializeAfterTaskCreation();
|
||||
}
|
||||
return HasReturnvaluesIF::RETURN_OK;
|
||||
}
|
||||
ReturnValue_t initializeHkManagerAfterTaskCreation() {
|
||||
if(not initializedAfterTaskCreation) {
|
||||
initializedAfterTaskCreation = true;
|
||||
return poolManager.initializeAfterTaskCreation();
|
||||
}
|
||||
return HasReturnvaluesIF::RETURN_OK;
|
||||
}
|
||||
|
||||
/** Command queue for housekeeping messages. */
|
||||
MessageQueueId_t getCommandQueue() const override {
|
||||
return messageQueue->getId();
|
||||
}
|
||||
/** Command queue for housekeeping messages. */
|
||||
MessageQueueId_t getCommandQueue() const override {
|
||||
return messageQueue->getId();
|
||||
}
|
||||
|
||||
// This is called by initializeAfterTaskCreation of the HK manager.
|
||||
virtual ReturnValue_t initializeLocalDataPool(
|
||||
localpool::DataPool& localDataPoolMap,
|
||||
LocalDataPoolManager& poolManager) {
|
||||
// Default initialization empty for now.
|
||||
localDataPoolMap.emplace(lpool::uint8VarId,
|
||||
new PoolEntry<uint8_t>({0}));
|
||||
localDataPoolMap.emplace(lpool::floatVarId,
|
||||
new PoolEntry<float>({0}));
|
||||
localDataPoolMap.emplace(lpool::uint32VarId,
|
||||
new PoolEntry<uint32_t>({0}));
|
||||
// This is called by initializeAfterTaskCreation of the HK manager.
|
||||
virtual ReturnValue_t initializeLocalDataPool(
|
||||
localpool::DataPool& localDataPoolMap,
|
||||
LocalDataPoolManager& poolManager) {
|
||||
// Default initialization empty for now.
|
||||
localDataPoolMap.emplace(lpool::uint8VarId,
|
||||
new PoolEntry<uint8_t>({0}));
|
||||
localDataPoolMap.emplace(lpool::floatVarId,
|
||||
new PoolEntry<float>({0}));
|
||||
localDataPoolMap.emplace(lpool::uint32VarId,
|
||||
new PoolEntry<uint32_t>({0}));
|
||||
|
||||
localDataPoolMap.emplace(lpool::uint16Vec3Id,
|
||||
new PoolEntry<uint16_t>({0, 0, 0}));
|
||||
localDataPoolMap.emplace(lpool::int64Vec2Id,
|
||||
new PoolEntry<int64_t>({0, 0}));
|
||||
return HasReturnvaluesIF::RETURN_OK;
|
||||
}
|
||||
localDataPoolMap.emplace(lpool::uint16Vec3Id,
|
||||
new PoolEntry<uint16_t>({0, 0, 0}));
|
||||
localDataPoolMap.emplace(lpool::int64Vec2Id,
|
||||
new PoolEntry<int64_t>({0, 0}));
|
||||
return HasReturnvaluesIF::RETURN_OK;
|
||||
}
|
||||
|
||||
LocalDataPoolManager* getHkManagerHandle() override {
|
||||
return &poolManager;
|
||||
}
|
||||
LocalDataPoolManager* getHkManagerHandle() override {
|
||||
return &poolManager;
|
||||
}
|
||||
|
||||
uint32_t getPeriodicOperationFrequency() const override {
|
||||
return 0;
|
||||
}
|
||||
uint32_t getPeriodicOperationFrequency() const override {
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* This function is used by the pool manager to get a valid dataset
|
||||
/**
|
||||
* This function is used by the pool manager to get a valid dataset
|
||||
* from a SID
|
||||
* @param sid Corresponding structure ID
|
||||
* @return
|
||||
*/
|
||||
virtual LocalPoolDataSetBase* getDataSetHandle(sid_t sid) override {
|
||||
return &dataset;
|
||||
}
|
||||
* @param sid Corresponding structure ID
|
||||
* @return
|
||||
*/
|
||||
virtual LocalPoolDataSetBase* getDataSetHandle(sid_t sid) override {
|
||||
return &dataset;
|
||||
}
|
||||
|
||||
virtual LocalPoolObjectBase* getPoolObjectHandle(
|
||||
lp_id_t localPoolId) override {
|
||||
if(localPoolId == lpool::uint8VarId) {
|
||||
return &testUint8;
|
||||
}
|
||||
else if(localPoolId == lpool::uint16Vec3Id) {
|
||||
return &testUint16Vec;
|
||||
}
|
||||
else if(localPoolId == lpool::floatVarId) {
|
||||
return &testFloat;
|
||||
}
|
||||
else if(localPoolId == lpool::int64Vec2Id) {
|
||||
return &testInt64Vec;
|
||||
}
|
||||
else if(localPoolId == lpool::uint32VarId) {
|
||||
return &testUint32;
|
||||
}
|
||||
else {
|
||||
return &testUint8;
|
||||
}
|
||||
}
|
||||
virtual LocalPoolObjectBase* getPoolObjectHandle(
|
||||
lp_id_t localPoolId) override {
|
||||
if(localPoolId == lpool::uint8VarId) {
|
||||
return &testUint8;
|
||||
}
|
||||
else if(localPoolId == lpool::uint16Vec3Id) {
|
||||
return &testUint16Vec;
|
||||
}
|
||||
else if(localPoolId == lpool::floatVarId) {
|
||||
return &testFloat;
|
||||
}
|
||||
else if(localPoolId == lpool::int64Vec2Id) {
|
||||
return &testInt64Vec;
|
||||
}
|
||||
else if(localPoolId == lpool::uint32VarId) {
|
||||
return &testUint32;
|
||||
}
|
||||
else {
|
||||
return &testUint8;
|
||||
}
|
||||
}
|
||||
|
||||
MessageQueueMockBase* getMockQueueHandle() const {
|
||||
return dynamic_cast<MessageQueueMockBase*>(messageQueue);
|
||||
}
|
||||
MessageQueueMockBase* getMockQueueHandle() const {
|
||||
return dynamic_cast<MessageQueueMockBase*>(messageQueue);
|
||||
}
|
||||
|
||||
ReturnValue_t subscribeWrapperSetUpdate() {
|
||||
return poolManager.subscribeForSetUpdateMessages(lpool::testSetId,
|
||||
objects::NO_OBJECT, MessageQueueIF::NO_QUEUE, false);
|
||||
}
|
||||
ReturnValue_t subscribeWrapperSetUpdate() {
|
||||
return poolManager.subscribeForSetUpdateMessages(lpool::testSetId,
|
||||
objects::NO_OBJECT, objects::HK_RECEIVER_MOCK, false);
|
||||
}
|
||||
|
||||
ReturnValue_t subscribeWrapperSetUpdateHk(bool diagnostics = false) {
|
||||
return poolManager.subscribeForUpdatePackets(lpool::testSid, diagnostics,
|
||||
false, objects::HK_RECEIVER_MOCK);
|
||||
}
|
||||
ReturnValue_t subscribeWrapperSetUpdateSnapshot() {
|
||||
return poolManager.subscribeForSetUpdateMessages(lpool::testSetId,
|
||||
objects::NO_OBJECT, objects::HK_RECEIVER_MOCK, true);
|
||||
}
|
||||
|
||||
ReturnValue_t subscribeWrapperVariableUpdate(lp_id_t localPoolId) {
|
||||
return poolManager.subscribeForVariableUpdateMessages(localPoolId,
|
||||
MessageQueueIF::NO_QUEUE, objects::NO_OBJECT, false);
|
||||
}
|
||||
ReturnValue_t subscribeWrapperSetUpdateHk(bool diagnostics = false) {
|
||||
return poolManager.subscribeForUpdatePackets(lpool::testSid, diagnostics,
|
||||
false, objects::HK_RECEIVER_MOCK);
|
||||
}
|
||||
|
||||
void resetSubscriptionList() {
|
||||
poolManager.clearReceiversList();
|
||||
}
|
||||
ReturnValue_t subscribeWrapperVariableUpdate(lp_id_t localPoolId) {
|
||||
return poolManager.subscribeForVariableUpdateMessages(localPoolId,
|
||||
MessageQueueIF::NO_QUEUE, objects::HK_RECEIVER_MOCK, false);
|
||||
}
|
||||
|
||||
LocalDataPoolManager poolManager;
|
||||
LocalPoolTestDataSet dataset;
|
||||
void resetSubscriptionList() {
|
||||
poolManager.clearReceiversList();
|
||||
}
|
||||
|
||||
LocalDataPoolManager poolManager;
|
||||
LocalPoolTestDataSet dataset;
|
||||
private:
|
||||
|
||||
lp_var_t<uint8_t> testUint8 = lp_var_t<uint8_t>(this, lpool::uint8VarId,
|
||||
&dataset);
|
||||
lp_var_t<float> testFloat = lp_var_t<float>(this, lpool::floatVarId,
|
||||
&dataset);
|
||||
lp_var_t<uint32_t> testUint32 = lp_var_t<uint32_t>(this, lpool::uint32VarId);
|
||||
lp_var_t<uint8_t> testUint8 = lp_var_t<uint8_t>(this, lpool::uint8VarId,
|
||||
&dataset);
|
||||
lp_var_t<float> testFloat = lp_var_t<float>(this, lpool::floatVarId,
|
||||
&dataset);
|
||||
lp_var_t<uint32_t> testUint32 = lp_var_t<uint32_t>(this, lpool::uint32VarId);
|
||||
|
||||
lp_vec_t<uint16_t, 3> testUint16Vec = lp_vec_t<uint16_t, 3>(this,
|
||||
lpool::uint16Vec3Id, &dataset);
|
||||
lp_vec_t<int64_t, 2> testInt64Vec = lp_vec_t<int64_t, 2>(this,
|
||||
lpool::int64Vec2Id);
|
||||
lp_vec_t<uint16_t, 3> testUint16Vec = lp_vec_t<uint16_t, 3>(this,
|
||||
lpool::uint16Vec3Id, &dataset);
|
||||
lp_vec_t<int64_t, 2> testInt64Vec = lp_vec_t<int64_t, 2>(this,
|
||||
lpool::int64Vec2Id);
|
||||
|
||||
MessageQueueIF* messageQueue = nullptr;
|
||||
MessageQueueIF* messageQueue = nullptr;
|
||||
|
||||
bool initialized = false;
|
||||
bool initializedAfterTaskCreation = false;
|
||||
bool initialized = false;
|
||||
bool initializedAfterTaskCreation = false;
|
||||
|
||||
};
|
||||
|
||||
|
@ -6,117 +6,117 @@
|
||||
|
||||
|
||||
TEST_CASE("LocalPoolVariable" , "[LocPoolVarTest]") {
|
||||
LocalPoolOwnerBase* poolOwner = objectManager->
|
||||
get<LocalPoolOwnerBase>(objects::TEST_LOCAL_POOL_OWNER_BASE);
|
||||
REQUIRE(poolOwner != nullptr);
|
||||
REQUIRE(poolOwner->initializeHkManager() == retval::CATCH_OK);
|
||||
REQUIRE(poolOwner->initializeHkManagerAfterTaskCreation()
|
||||
== retval::CATCH_OK);
|
||||
LocalPoolOwnerBase* poolOwner = objectManager->
|
||||
get<LocalPoolOwnerBase>(objects::TEST_LOCAL_POOL_OWNER_BASE);
|
||||
REQUIRE(poolOwner != nullptr);
|
||||
REQUIRE(poolOwner->initializeHkManager() == retval::CATCH_OK);
|
||||
REQUIRE(poolOwner->initializeHkManagerAfterTaskCreation()
|
||||
== retval::CATCH_OK);
|
||||
|
||||
SECTION("Basic Tests") {
|
||||
// very basic test.
|
||||
lp_var_t<uint8_t> testVariable = lp_var_t<uint8_t>(
|
||||
objects::TEST_LOCAL_POOL_OWNER_BASE, lpool::uint8VarId);
|
||||
REQUIRE(testVariable.read() == retval::CATCH_OK);
|
||||
CHECK(testVariable.value == 0);
|
||||
testVariable.value = 5;
|
||||
REQUIRE(testVariable.commit() == retval::CATCH_OK);
|
||||
REQUIRE(testVariable.read() == retval::CATCH_OK);
|
||||
REQUIRE(testVariable.value == 5);
|
||||
CHECK(not testVariable.isValid());
|
||||
testVariable.setValid(true);
|
||||
CHECK(testVariable.isValid());
|
||||
CHECK(testVariable.commit(true) == retval::CATCH_OK);
|
||||
SECTION("Basic Tests") {
|
||||
/* very basic test. */
|
||||
lp_var_t<uint8_t> testVariable = lp_var_t<uint8_t>(
|
||||
objects::TEST_LOCAL_POOL_OWNER_BASE, lpool::uint8VarId);
|
||||
REQUIRE(testVariable.read() == retval::CATCH_OK);
|
||||
CHECK(testVariable.value == 0);
|
||||
testVariable.value = 5;
|
||||
REQUIRE(testVariable.commit() == retval::CATCH_OK);
|
||||
REQUIRE(testVariable.read() == retval::CATCH_OK);
|
||||
REQUIRE(testVariable.value == 5);
|
||||
CHECK(not testVariable.isValid());
|
||||
testVariable.setValid(true);
|
||||
CHECK(testVariable.isValid());
|
||||
CHECK(testVariable.commit(true) == retval::CATCH_OK);
|
||||
|
||||
testVariable.setReadWriteMode(pool_rwm_t::VAR_READ);
|
||||
CHECK(testVariable.getReadWriteMode() == pool_rwm_t::VAR_READ);
|
||||
testVariable.setReadWriteMode(pool_rwm_t::VAR_READ_WRITE);
|
||||
testVariable.setReadWriteMode(pool_rwm_t::VAR_READ);
|
||||
CHECK(testVariable.getReadWriteMode() == pool_rwm_t::VAR_READ);
|
||||
testVariable.setReadWriteMode(pool_rwm_t::VAR_READ_WRITE);
|
||||
|
||||
testVariable.setDataPoolId(22);
|
||||
CHECK(testVariable.getDataPoolId() == 22);
|
||||
testVariable.setDataPoolId(lpool::uint8VarId);
|
||||
testVariable.setDataPoolId(22);
|
||||
CHECK(testVariable.getDataPoolId() == 22);
|
||||
testVariable.setDataPoolId(lpool::uint8VarId);
|
||||
|
||||
testVariable.setChanged(true);
|
||||
CHECK(testVariable.hasChanged());
|
||||
testVariable.setChanged(false);
|
||||
testVariable.setChanged(true);
|
||||
CHECK(testVariable.hasChanged());
|
||||
testVariable.setChanged(false);
|
||||
|
||||
gp_id_t globPoolId(objects::TEST_LOCAL_POOL_OWNER_BASE,
|
||||
lpool::uint8VarId);
|
||||
lp_var_t<uint8_t> testVariable2 = lp_var_t<uint8_t>(globPoolId);
|
||||
REQUIRE(testVariable2.read() == retval::CATCH_OK);
|
||||
CHECK(testVariable2 == 5);
|
||||
CHECK(testVariable == testVariable2);
|
||||
testVariable = 10;
|
||||
CHECK(testVariable != 5);
|
||||
//CHECK(not testVariable != testVariable2);
|
||||
uint8_t variableRaw = 0;
|
||||
uint8_t* varPtr = &variableRaw;
|
||||
size_t maxSize = testVariable.getSerializedSize();
|
||||
CHECK(maxSize == 1);
|
||||
size_t serSize = 0;
|
||||
CHECK(testVariable.serialize(&varPtr, &serSize, maxSize,
|
||||
SerializeIF::Endianness::MACHINE) == retval::CATCH_OK);
|
||||
CHECK(variableRaw == 10);
|
||||
const uint8_t* varConstPtr = &variableRaw;
|
||||
testVariable = 5;
|
||||
CHECK(testVariable.deSerialize(&varConstPtr, &serSize,
|
||||
SerializeIF::Endianness::MACHINE) == retval::CATCH_OK);
|
||||
CHECK(testVariable == 10);
|
||||
CHECK(testVariable != testVariable2);
|
||||
CHECK(testVariable2 < testVariable);
|
||||
CHECK(testVariable2 < 10);
|
||||
CHECK(testVariable > 5);
|
||||
CHECK(testVariable > testVariable2);
|
||||
variableRaw = static_cast<uint8_t>(testVariable2);
|
||||
CHECK(variableRaw == 5);
|
||||
gp_id_t globPoolId(objects::TEST_LOCAL_POOL_OWNER_BASE,
|
||||
lpool::uint8VarId);
|
||||
lp_var_t<uint8_t> testVariable2 = lp_var_t<uint8_t>(globPoolId);
|
||||
REQUIRE(testVariable2.read() == retval::CATCH_OK);
|
||||
CHECK(testVariable2 == 5);
|
||||
CHECK(testVariable == testVariable2);
|
||||
testVariable = 10;
|
||||
CHECK(testVariable != 5);
|
||||
//CHECK(not testVariable != testVariable2);
|
||||
uint8_t variableRaw = 0;
|
||||
uint8_t* varPtr = &variableRaw;
|
||||
size_t maxSize = testVariable.getSerializedSize();
|
||||
CHECK(maxSize == 1);
|
||||
size_t serSize = 0;
|
||||
CHECK(testVariable.serialize(&varPtr, &serSize, maxSize,
|
||||
SerializeIF::Endianness::MACHINE) == retval::CATCH_OK);
|
||||
CHECK(variableRaw == 10);
|
||||
const uint8_t* varConstPtr = &variableRaw;
|
||||
testVariable = 5;
|
||||
CHECK(testVariable.deSerialize(&varConstPtr, &serSize,
|
||||
SerializeIF::Endianness::MACHINE) == retval::CATCH_OK);
|
||||
CHECK(testVariable == 10);
|
||||
CHECK(testVariable != testVariable2);
|
||||
CHECK(testVariable2 < testVariable);
|
||||
CHECK(testVariable2 < 10);
|
||||
CHECK(testVariable > 5);
|
||||
CHECK(testVariable > testVariable2);
|
||||
variableRaw = static_cast<uint8_t>(testVariable2);
|
||||
CHECK(variableRaw == 5);
|
||||
|
||||
CHECK(testVariable == 10);
|
||||
testVariable = testVariable2;
|
||||
CHECK(testVariable == 5);
|
||||
}
|
||||
CHECK(testVariable == 10);
|
||||
testVariable = testVariable2;
|
||||
CHECK(testVariable == 5);
|
||||
}
|
||||
|
||||
SECTION("ErrorHandling") {
|
||||
SECTION("ErrorHandling") {
|
||||
|
||||
// not try to use a local pool variable which does not exist
|
||||
lp_var_t<uint8_t> invalidVariable = lp_var_t<uint8_t>(
|
||||
objects::TEST_LOCAL_POOL_OWNER_BASE, 0xffffffff);
|
||||
REQUIRE(invalidVariable.read() ==
|
||||
static_cast<int>(localpool::POOL_ENTRY_NOT_FOUND));
|
||||
/* now try to use a local pool variable which does not exist */
|
||||
lp_var_t<uint8_t> invalidVariable = lp_var_t<uint8_t>(
|
||||
objects::TEST_LOCAL_POOL_OWNER_BASE, 0xffffffff);
|
||||
REQUIRE(invalidVariable.read() ==
|
||||
static_cast<int>(localpool::POOL_ENTRY_NOT_FOUND));
|
||||
|
||||
REQUIRE(invalidVariable.commit() ==
|
||||
static_cast<int>(localpool::POOL_ENTRY_NOT_FOUND));
|
||||
// now try to access with wrong type
|
||||
lp_var_t<int8_t> invalidVariable2 = lp_var_t<int8_t>(
|
||||
objects::TEST_LOCAL_POOL_OWNER_BASE, lpool::uint8VarId);
|
||||
REQUIRE(invalidVariable2.read() ==
|
||||
static_cast<int>(localpool::POOL_ENTRY_TYPE_CONFLICT));
|
||||
REQUIRE(invalidVariable.commit() ==
|
||||
static_cast<int>(localpool::POOL_ENTRY_NOT_FOUND));
|
||||
/* now try to access with wrong type */
|
||||
lp_var_t<int8_t> invalidVariable2 = lp_var_t<int8_t>(
|
||||
objects::TEST_LOCAL_POOL_OWNER_BASE, lpool::uint8VarId);
|
||||
REQUIRE(invalidVariable2.read() ==
|
||||
static_cast<int>(localpool::POOL_ENTRY_TYPE_CONFLICT));
|
||||
|
||||
lp_var_t<uint8_t> readOnlyVar = lp_var_t<uint8_t>(
|
||||
objects::TEST_LOCAL_POOL_OWNER_BASE, lpool::uint8VarId,
|
||||
nullptr, pool_rwm_t::VAR_READ);
|
||||
REQUIRE(readOnlyVar.commit() ==
|
||||
static_cast<int>(PoolVariableIF::INVALID_READ_WRITE_MODE));
|
||||
lp_var_t<uint8_t> writeOnlyVar = lp_var_t<uint8_t>(
|
||||
objects::TEST_LOCAL_POOL_OWNER_BASE, lpool::uint8VarId,
|
||||
nullptr, pool_rwm_t::VAR_WRITE);
|
||||
REQUIRE(writeOnlyVar.read() == static_cast<int>(
|
||||
PoolVariableIF::INVALID_READ_WRITE_MODE));
|
||||
lp_var_t<uint8_t> readOnlyVar = lp_var_t<uint8_t>(
|
||||
objects::TEST_LOCAL_POOL_OWNER_BASE, lpool::uint8VarId,
|
||||
nullptr, pool_rwm_t::VAR_READ);
|
||||
REQUIRE(readOnlyVar.commit() ==
|
||||
static_cast<int>(PoolVariableIF::INVALID_READ_WRITE_MODE));
|
||||
lp_var_t<uint8_t> writeOnlyVar = lp_var_t<uint8_t>(
|
||||
objects::TEST_LOCAL_POOL_OWNER_BASE, lpool::uint8VarId,
|
||||
nullptr, pool_rwm_t::VAR_WRITE);
|
||||
REQUIRE(writeOnlyVar.read() == static_cast<int>(
|
||||
PoolVariableIF::INVALID_READ_WRITE_MODE));
|
||||
|
||||
lp_var_t<uint32_t> uint32tVar = lp_var_t<uint32_t>(
|
||||
objects::TEST_LOCAL_POOL_OWNER_BASE, lpool::uint32VarId);
|
||||
lp_var_t<uint32_t> uint32tVar = lp_var_t<uint32_t>(
|
||||
objects::TEST_LOCAL_POOL_OWNER_BASE, lpool::uint32VarId);
|
||||
#if FSFW_CPP_OSTREAM_ENABLED == 1
|
||||
sif::info << "LocalPoolVariable printout: " <<uint32tVar << std::endl;
|
||||
sif::info << "LocalPoolVariable printout: " <<uint32tVar << std::endl;
|
||||
#endif
|
||||
|
||||
// for code coverage. If program does not crash -> OK
|
||||
lp_var_t<uint8_t> invalidObjectVar = lp_var_t<uint8_t>(
|
||||
0xffffffff, lpool::uint8VarId);
|
||||
gp_id_t globPoolId(0xffffffff,
|
||||
lpool::uint8VarId);
|
||||
lp_var_t<uint8_t> invalidObjectVar2 = lp_var_t<uint8_t>(globPoolId);
|
||||
lp_var_t<uint8_t> invalidObjectVar3 = lp_var_t<uint8_t>(nullptr,
|
||||
lpool::uint8VarId);
|
||||
}
|
||||
/* for code coverage. If program does not crash -> OK */
|
||||
lp_var_t<uint8_t> invalidObjectVar = lp_var_t<uint8_t>(
|
||||
0xffffffff, lpool::uint8VarId);
|
||||
gp_id_t globPoolId(0xffffffff,
|
||||
lpool::uint8VarId);
|
||||
lp_var_t<uint8_t> invalidObjectVar2 = lp_var_t<uint8_t>(globPoolId);
|
||||
lp_var_t<uint8_t> invalidObjectVar3 = lp_var_t<uint8_t>(nullptr,
|
||||
lpool::uint8VarId);
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
|
@ -5,116 +5,116 @@
|
||||
#include <unittest/core/CatchDefinitions.h>
|
||||
|
||||
TEST_CASE("LocalPoolVector" , "[LocPoolVecTest]") {
|
||||
LocalPoolOwnerBase* poolOwner = objectManager->
|
||||
get<LocalPoolOwnerBase>(objects::TEST_LOCAL_POOL_OWNER_BASE);
|
||||
REQUIRE(poolOwner != nullptr);
|
||||
REQUIRE(poolOwner->initializeHkManager() == retval::CATCH_OK);
|
||||
REQUIRE(poolOwner->initializeHkManagerAfterTaskCreation()
|
||||
== retval::CATCH_OK);
|
||||
LocalPoolOwnerBase* poolOwner = objectManager->
|
||||
get<LocalPoolOwnerBase>(objects::TEST_LOCAL_POOL_OWNER_BASE);
|
||||
REQUIRE(poolOwner != nullptr);
|
||||
REQUIRE(poolOwner->initializeHkManager() == retval::CATCH_OK);
|
||||
REQUIRE(poolOwner->initializeHkManagerAfterTaskCreation()
|
||||
== retval::CATCH_OK);
|
||||
|
||||
SECTION("BasicTest") {
|
||||
// very basic test.
|
||||
lp_vec_t<uint16_t, 3> testVector = lp_vec_t<uint16_t, 3>(
|
||||
objects::TEST_LOCAL_POOL_OWNER_BASE, lpool::uint16Vec3Id);
|
||||
REQUIRE(testVector.read() == retval::CATCH_OK);
|
||||
testVector.value[0] = 5;
|
||||
testVector.value[1] = 232;
|
||||
testVector.value[2] = 32023;
|
||||
SECTION("BasicTest") {
|
||||
// very basic test.
|
||||
lp_vec_t<uint16_t, 3> testVector = lp_vec_t<uint16_t, 3>(
|
||||
objects::TEST_LOCAL_POOL_OWNER_BASE, lpool::uint16Vec3Id);
|
||||
REQUIRE(testVector.read() == retval::CATCH_OK);
|
||||
testVector.value[0] = 5;
|
||||
testVector.value[1] = 232;
|
||||
testVector.value[2] = 32023;
|
||||
|
||||
REQUIRE(testVector.commit(true) == retval::CATCH_OK);
|
||||
CHECK(testVector.isValid());
|
||||
REQUIRE(testVector.commit(true) == retval::CATCH_OK);
|
||||
CHECK(testVector.isValid());
|
||||
|
||||
testVector.value[0] = 0;
|
||||
testVector.value[1] = 0;
|
||||
testVector.value[2] = 0;
|
||||
testVector.value[0] = 0;
|
||||
testVector.value[1] = 0;
|
||||
testVector.value[2] = 0;
|
||||
|
||||
CHECK(testVector.read() == retval::CATCH_OK);
|
||||
CHECK(testVector.value[0] == 5);
|
||||
CHECK(testVector.value[1] == 232);
|
||||
CHECK(testVector.value[2] == 32023);
|
||||
CHECK(testVector.read() == retval::CATCH_OK);
|
||||
CHECK(testVector.value[0] == 5);
|
||||
CHECK(testVector.value[1] == 232);
|
||||
CHECK(testVector.value[2] == 32023);
|
||||
|
||||
CHECK(testVector[0] == 5);
|
||||
CHECK(testVector[0] == 5);
|
||||
|
||||
// This is invalid access, so the last value will be set instead.
|
||||
// (we can't throw exceptions)
|
||||
testVector[4] = 12;
|
||||
CHECK(testVector[2] == 12);
|
||||
CHECK(testVector.commit() == retval::CATCH_OK);
|
||||
/* This is invalid access, so the last value will be set instead.
|
||||
(we can't throw exceptions) */
|
||||
testVector[4] = 12;
|
||||
CHECK(testVector[2] == 12);
|
||||
CHECK(testVector.commit() == retval::CATCH_OK);
|
||||
|
||||
// Use read-only reference.
|
||||
const lp_vec_t<uint16_t, 3>& roTestVec = testVector;
|
||||
uint16_t valueOne = roTestVec[0];
|
||||
CHECK(valueOne == 5);
|
||||
/* Use read-only reference. */
|
||||
const lp_vec_t<uint16_t, 3>& roTestVec = testVector;
|
||||
uint16_t valueOne = roTestVec[0];
|
||||
CHECK(valueOne == 5);
|
||||
|
||||
uint16_t lastVal = roTestVec[25];
|
||||
CHECK(lastVal == 12);
|
||||
uint16_t lastVal = roTestVec[25];
|
||||
CHECK(lastVal == 12);
|
||||
|
||||
size_t maxSize = testVector.getSerializedSize();
|
||||
CHECK(maxSize == 6);
|
||||
size_t maxSize = testVector.getSerializedSize();
|
||||
CHECK(maxSize == 6);
|
||||
|
||||
uint16_t serializedVector[3];
|
||||
uint8_t* vecPtr = reinterpret_cast<uint8_t*>(serializedVector);
|
||||
size_t serSize = 0;
|
||||
REQUIRE(testVector.serialize(&vecPtr, &serSize,
|
||||
maxSize, SerializeIF::Endianness::MACHINE) == retval::CATCH_OK);
|
||||
uint16_t serializedVector[3];
|
||||
uint8_t* vecPtr = reinterpret_cast<uint8_t*>(serializedVector);
|
||||
size_t serSize = 0;
|
||||
REQUIRE(testVector.serialize(&vecPtr, &serSize,
|
||||
maxSize, SerializeIF::Endianness::MACHINE) == retval::CATCH_OK);
|
||||
|
||||
CHECK(serSize == 6);
|
||||
CHECK(serializedVector[0] == 5);
|
||||
CHECK(serializedVector[1] == 232);
|
||||
CHECK(serializedVector[2] == 12);
|
||||
CHECK(serSize == 6);
|
||||
CHECK(serializedVector[0] == 5);
|
||||
CHECK(serializedVector[1] == 232);
|
||||
CHECK(serializedVector[2] == 12);
|
||||
|
||||
maxSize = 1;
|
||||
REQUIRE(testVector.serialize(&vecPtr, &serSize,
|
||||
maxSize, SerializeIF::Endianness::MACHINE) ==
|
||||
static_cast<int>(SerializeIF::BUFFER_TOO_SHORT));
|
||||
maxSize = 1;
|
||||
REQUIRE(testVector.serialize(&vecPtr, &serSize,
|
||||
maxSize, SerializeIF::Endianness::MACHINE) ==
|
||||
static_cast<int>(SerializeIF::BUFFER_TOO_SHORT));
|
||||
|
||||
serializedVector[0] = 16;
|
||||
serializedVector[1] = 7832;
|
||||
serializedVector[2] = 39232;
|
||||
serializedVector[0] = 16;
|
||||
serializedVector[1] = 7832;
|
||||
serializedVector[2] = 39232;
|
||||
|
||||
const uint8_t* constVecPtr = reinterpret_cast<const uint8_t*>(
|
||||
serializedVector);
|
||||
REQUIRE(testVector.deSerialize(&constVecPtr, &serSize,
|
||||
SerializeIF::Endianness::MACHINE) == retval::CATCH_OK);
|
||||
CHECK(testVector[0] == 16);
|
||||
CHECK(testVector[1] == 7832);
|
||||
CHECK(testVector[2] == 39232);
|
||||
const uint8_t* constVecPtr = reinterpret_cast<const uint8_t*>(
|
||||
serializedVector);
|
||||
REQUIRE(testVector.deSerialize(&constVecPtr, &serSize,
|
||||
SerializeIF::Endianness::MACHINE) == retval::CATCH_OK);
|
||||
CHECK(testVector[0] == 16);
|
||||
CHECK(testVector[1] == 7832);
|
||||
CHECK(testVector[2] == 39232);
|
||||
|
||||
serSize = 1;
|
||||
REQUIRE(testVector.deSerialize(&constVecPtr, &serSize,
|
||||
SerializeIF::Endianness::MACHINE) ==
|
||||
static_cast<int>(SerializeIF::STREAM_TOO_SHORT));
|
||||
}
|
||||
serSize = 1;
|
||||
REQUIRE(testVector.deSerialize(&constVecPtr, &serSize,
|
||||
SerializeIF::Endianness::MACHINE) ==
|
||||
static_cast<int>(SerializeIF::STREAM_TOO_SHORT));
|
||||
}
|
||||
|
||||
SECTION("ErrorHandling") {
|
||||
// not try to use a local pool variable which does not exist
|
||||
lp_vec_t<uint16_t, 3> invalidVector = lp_vec_t<uint16_t, 3>(
|
||||
objects::TEST_LOCAL_POOL_OWNER_BASE, 0xffffffff);
|
||||
REQUIRE(invalidVector.read() ==
|
||||
static_cast<int>(localpool::POOL_ENTRY_NOT_FOUND));
|
||||
REQUIRE(invalidVector.commit() ==
|
||||
static_cast<int>(localpool::POOL_ENTRY_NOT_FOUND));
|
||||
SECTION("ErrorHandling") {
|
||||
/* Now try to use a local pool variable which does not exist */
|
||||
lp_vec_t<uint16_t, 3> invalidVector = lp_vec_t<uint16_t, 3>(
|
||||
objects::TEST_LOCAL_POOL_OWNER_BASE, 0xffffffff);
|
||||
REQUIRE(invalidVector.read() ==
|
||||
static_cast<int>(localpool::POOL_ENTRY_NOT_FOUND));
|
||||
REQUIRE(invalidVector.commit() ==
|
||||
static_cast<int>(localpool::POOL_ENTRY_NOT_FOUND));
|
||||
|
||||
// now try to access with wrong type
|
||||
lp_vec_t<uint32_t, 3> invalidVector2 = lp_vec_t<uint32_t, 3>(
|
||||
objects::TEST_LOCAL_POOL_OWNER_BASE, lpool::uint16Vec3Id);
|
||||
REQUIRE(invalidVector2.read() ==
|
||||
static_cast<int>(localpool::POOL_ENTRY_TYPE_CONFLICT));
|
||||
REQUIRE(invalidVector2.commit() ==
|
||||
static_cast<int>(localpool::POOL_ENTRY_TYPE_CONFLICT));
|
||||
/* Now try to access with wrong type */
|
||||
lp_vec_t<uint32_t, 3> invalidVector2 = lp_vec_t<uint32_t, 3>(
|
||||
objects::TEST_LOCAL_POOL_OWNER_BASE, lpool::uint16Vec3Id);
|
||||
REQUIRE(invalidVector2.read() ==
|
||||
static_cast<int>(localpool::POOL_ENTRY_TYPE_CONFLICT));
|
||||
REQUIRE(invalidVector2.commit() ==
|
||||
static_cast<int>(localpool::POOL_ENTRY_TYPE_CONFLICT));
|
||||
|
||||
lp_vec_t<uint16_t, 3> writeOnlyVec = lp_vec_t<uint16_t, 3>(
|
||||
objects::TEST_LOCAL_POOL_OWNER_BASE, lpool::uint16Vec3Id,
|
||||
nullptr, pool_rwm_t::VAR_WRITE);
|
||||
REQUIRE(writeOnlyVec.read() ==
|
||||
static_cast<int>(PoolVariableIF::INVALID_READ_WRITE_MODE));
|
||||
lp_vec_t<uint16_t, 3> writeOnlyVec = lp_vec_t<uint16_t, 3>(
|
||||
objects::TEST_LOCAL_POOL_OWNER_BASE, lpool::uint16Vec3Id,
|
||||
nullptr, pool_rwm_t::VAR_WRITE);
|
||||
REQUIRE(writeOnlyVec.read() ==
|
||||
static_cast<int>(PoolVariableIF::INVALID_READ_WRITE_MODE));
|
||||
|
||||
lp_vec_t<uint16_t, 3> readOnlyVec = lp_vec_t<uint16_t, 3>(
|
||||
objects::TEST_LOCAL_POOL_OWNER_BASE, lpool::uint16Vec3Id,
|
||||
nullptr, pool_rwm_t::VAR_READ);
|
||||
REQUIRE(readOnlyVec.commit() ==
|
||||
static_cast<int>(PoolVariableIF::INVALID_READ_WRITE_MODE));
|
||||
}
|
||||
lp_vec_t<uint16_t, 3> readOnlyVec = lp_vec_t<uint16_t, 3>(
|
||||
objects::TEST_LOCAL_POOL_OWNER_BASE, lpool::uint16Vec3Id,
|
||||
nullptr, pool_rwm_t::VAR_READ);
|
||||
REQUIRE(readOnlyVec.commit() ==
|
||||
static_cast<int>(PoolVariableIF::INVALID_READ_WRITE_MODE));
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
|
@ -276,7 +276,7 @@ TEST_CASE( "Local Pool Extended Tests [3 Pools]" , "[TestPool2]") {
|
||||
CHECK(receptionArray[3] == 66);
|
||||
|
||||
// now clear first page
|
||||
simplePool.clearPage(0);
|
||||
simplePool.clearSubPool(0);
|
||||
bytesWritten = 0;
|
||||
simplePool.getFillCount(receptionArray.data(), &bytesWritten);
|
||||
// Second page full, median fill count is 33 %
|
||||
|
Loading…
Reference in New Issue
Block a user