Merge branch 'development' into mueller/DeviceHandler-LocalPools

This commit is contained in:
Steffen Gaisser 2020-12-10 17:22:44 +01:00
commit 45b02083c9
14 changed files with 949 additions and 427 deletions

View File

@ -15,6 +15,10 @@ a C file without issues
- The same is possible for the event reporting service (PUS5) - The same is possible for the event reporting service (PUS5)
- PUS Health Service added, which allows to command and retrieve health via PUS packets - PUS Health Service added, which allows to command and retrieve health via PUS packets
### Local Pool
- Interface of LocalPools has changed. LocalPool is not a template anymore. Instead the size and bucket number of the pools per page and the number of pages are passed to the ctor instead of two ctor arguments and a template parameter
### Parameter Service ### Parameter Service
- The API of the parameter service has been changed to prevent inconsistencies - The API of the parameter service has been changed to prevent inconsistencies

View File

@ -7,21 +7,22 @@
#include "../ipc/MutexFactory.h" #include "../ipc/MutexFactory.h"
const uint16_t EventManager::POOL_SIZES[N_POOLS] = {
sizeof(EventMatchTree::Node), sizeof(EventIdRangeMatcher),
sizeof(ReporterRangeMatcher) };
// If one checks registerListener calls, there are around 40 (to max 50) // If one checks registerListener calls, there are around 40 (to max 50)
// objects registering for certain events. // objects registering for certain events.
// Each listener requires 1 or 2 EventIdMatcher and 1 or 2 ReportRangeMatcher. // Each listener requires 1 or 2 EventIdMatcher and 1 or 2 ReportRangeMatcher.
// So a good guess is 75 to a max of 100 pools required for each, which fits well. // So a good guess is 75 to a max of 100 pools required for each, which fits well.
const uint16_t EventManager::N_ELEMENTS[N_POOLS] = { const LocalPool::LocalPoolConfig EventManager::poolConfig = {
fsfwconfig::FSFW_EVENTMGMR_MATCHTREE_NODES , {fsfwconfig::FSFW_EVENTMGMR_MATCHTREE_NODES,
fsfwconfig::FSFW_EVENTMGMT_EVENTIDMATCHERS, sizeof(EventMatchTree::Node)},
fsfwconfig::FSFW_EVENTMGMR_RANGEMATCHERS }; {fsfwconfig::FSFW_EVENTMGMT_EVENTIDMATCHERS,
sizeof(EventIdRangeMatcher)},
{fsfwconfig::FSFW_EVENTMGMR_RANGEMATCHERS,
sizeof(ReporterRangeMatcher)}
};
EventManager::EventManager(object_id_t setObjectId) : EventManager::EventManager(object_id_t setObjectId) :
SystemObject(setObjectId), SystemObject(setObjectId),
factoryBackend(0, POOL_SIZES, N_ELEMENTS, false, true) { factoryBackend(0, poolConfig, false, true) {
mutex = MutexFactory::instance()->createMutex(); mutex = MutexFactory::instance()->createMutex();
eventReportQueue = QueueFactory::instance()->createMessageQueue( eventReportQueue = QueueFactory::instance()->createMessageQueue(
MAX_EVENTS_PER_CYCLE, EventMessage::EVENT_MESSAGE_SIZE); MAX_EVENTS_PER_CYCLE, EventMessage::EVENT_MESSAGE_SIZE);
@ -113,7 +114,7 @@ ReturnValue_t EventManager::unsubscribeFromEventRange(MessageQueueId_t listener,
return result; return result;
} }
#ifdef DEBUG #if FSFW_DEBUG_OUTPUT == 1
void EventManager::printEvent(EventMessage* message) { void EventManager::printEvent(EventMessage* message) {
const char *string = 0; const char *string = 0;

View File

@ -8,9 +8,11 @@
#include "../tasks/ExecutableObjectIF.h" #include "../tasks/ExecutableObjectIF.h"
#include "../ipc/MessageQueueIF.h" #include "../ipc/MessageQueueIF.h"
#include "../ipc/MutexIF.h" #include "../ipc/MutexIF.h"
#include <FSFWConfig.h>
#include <map> #include <map>
#ifdef DEBUG #if FSFW_DEBUG_OUTPUT == 1
// forward declaration, should be implemented by mission // forward declaration, should be implemented by mission
extern const char* translateObject(object_id_t object); extern const char* translateObject(object_id_t object);
extern const char* translateEvents(Event event); extern const char* translateEvents(Event event);
@ -49,13 +51,15 @@ protected:
MutexIF* mutex = nullptr; MutexIF* mutex = nullptr;
static const uint8_t N_POOLS = 3; static const uint8_t N_POOLS = 3;
LocalPool<N_POOLS> factoryBackend; LocalPool factoryBackend;
static const LocalPool::LocalPoolConfig poolConfig;
static const uint16_t POOL_SIZES[N_POOLS]; static const uint16_t POOL_SIZES[N_POOLS];
static const uint16_t N_ELEMENTS[N_POOLS]; static const uint16_t N_ELEMENTS[N_POOLS];
void notifyListeners(EventMessage *message); void notifyListeners(EventMessage *message);
#ifdef DEBUG #if FSFW_DEBUG_OUTPUT == 1
void printEvent(EventMessage *message); void printEvent(EventMessage *message);
#endif #endif

View File

@ -20,9 +20,7 @@ class StorageManagerIF;
*/ */
class ConstStorageAccessor { class ConstStorageAccessor {
//! StorageManager classes have exclusive access to private variables. //! StorageManager classes have exclusive access to private variables.
template<uint8_t NUMBER_OF_POOLS>
friend class PoolManager; friend class PoolManager;
template<uint8_t NUMBER_OF_POOLS>
friend class LocalPool; friend class LocalPool;
public: public:
/** /**

View File

@ -0,0 +1,347 @@
#include "LocalPool.h"
#include <FSFWConfig.h>
#include <cstring>
LocalPool::LocalPool(object_id_t setObjectId, const LocalPoolConfig& poolConfig,
bool registered, bool spillsToHigherPools):
SystemObject(setObjectId, registered),
NUMBER_OF_POOLS(poolConfig.size()),
spillsToHigherPools(spillsToHigherPools) {
if(NUMBER_OF_POOLS == 0) {
sif::error << "LocalPool::LocalPool: Passed pool configuration is "
<< " invalid!" << std::endl;
}
max_pools_t index = 0;
for (const auto& currentPoolConfig: poolConfig) {
this->numberOfElements[index] = currentPoolConfig.first;
this->elementSizes[index] = currentPoolConfig.second;
store[index] = std::vector<uint8_t>(
numberOfElements[index] * elementSizes[index]);
sizeLists[index] = std::vector<size_type>(numberOfElements[index]);
for(auto& size: sizeLists[index]) {
size = STORAGE_FREE;
}
index++;
}
}
LocalPool::~LocalPool(void) {}
ReturnValue_t LocalPool::addData(store_address_t* storageId,
const uint8_t* data, size_t size, bool ignoreFault) {
ReturnValue_t status = reserveSpace(size, storageId, ignoreFault);
if (status == RETURN_OK) {
write(*storageId, data, size);
}
return status;
}
ReturnValue_t LocalPool::getData(store_address_t packetId,
const uint8_t **packetPtr, size_t *size) {
uint8_t* tempData = nullptr;
ReturnValue_t status = modifyData(packetId, &tempData, size);
*packetPtr = tempData;
return status;
}
ReturnValue_t LocalPool::getData(store_address_t storeId,
ConstStorageAccessor& storeAccessor) {
uint8_t* tempData = nullptr;
ReturnValue_t status = modifyData(storeId, &tempData,
&storeAccessor.size_);
storeAccessor.assignStore(this);
storeAccessor.constDataPointer = tempData;
return status;
}
ConstAccessorPair LocalPool::getData(store_address_t storeId) {
uint8_t* tempData = nullptr;
ConstStorageAccessor constAccessor(storeId, this);
ReturnValue_t status = modifyData(storeId, &tempData, &constAccessor.size_);
constAccessor.constDataPointer = tempData;
return ConstAccessorPair(status, std::move(constAccessor));
}
ReturnValue_t LocalPool::getFreeElement(store_address_t *storageId,
const size_t size, uint8_t **pData, bool ignoreFault) {
ReturnValue_t status = reserveSpace(size, storageId, ignoreFault);
if (status == RETURN_OK) {
*pData = &store[storageId->poolIndex][getRawPosition(*storageId)];
}
else {
*pData = nullptr;
}
return status;
}
AccessorPair LocalPool::modifyData(store_address_t storeId) {
StorageAccessor accessor(storeId, this);
ReturnValue_t status = modifyData(storeId, &accessor.dataPointer,
&accessor.size_);
accessor.assignConstPointer();
return AccessorPair(status, std::move(accessor));
}
ReturnValue_t LocalPool::modifyData(store_address_t storeId,
StorageAccessor& storeAccessor) {
storeAccessor.assignStore(this);
ReturnValue_t status = modifyData(storeId, &storeAccessor.dataPointer,
&storeAccessor.size_);
storeAccessor.assignConstPointer();
return status;
}
ReturnValue_t LocalPool::modifyData(store_address_t storeId,
uint8_t **packetPtr, size_t *size) {
ReturnValue_t status = RETURN_FAILED;
if (storeId.poolIndex >= NUMBER_OF_POOLS) {
return ILLEGAL_STORAGE_ID;
}
if ((storeId.packetIndex >= numberOfElements[storeId.poolIndex])) {
return ILLEGAL_STORAGE_ID;
}
if (sizeLists[storeId.poolIndex][storeId.packetIndex]
!= STORAGE_FREE) {
size_type packetPosition = getRawPosition(storeId);
*packetPtr = &store[storeId.poolIndex][packetPosition];
*size = sizeLists[storeId.poolIndex][storeId.packetIndex];
status = RETURN_OK;
}
else {
status = DATA_DOES_NOT_EXIST;
}
return status;
}
ReturnValue_t LocalPool::deleteData(store_address_t storeId) {
#if FSFW_VERBOSE_PRINTOUT == 2
sif::debug << "Delete: Pool: " << std::dec << storeId.poolIndex
<< " Index: " << storeId.packetIndex << std::endl;
#endif
ReturnValue_t status = RETURN_OK;
size_type pageSize = getPageSize(storeId.poolIndex);
if ((pageSize != 0) and
(storeId.packetIndex < numberOfElements[storeId.poolIndex])) {
uint16_t packetPosition = getRawPosition(storeId);
uint8_t* ptr = &store[storeId.poolIndex][packetPosition];
std::memset(ptr, 0, pageSize);
//Set free list
sizeLists[storeId.poolIndex][storeId.packetIndex] = STORAGE_FREE;
}
else {
//pool_index or packet_index is too large
sif::error << "LocalPool::deleteData: Illegal store ID, no deletion!"
<< std::endl;
status = ILLEGAL_STORAGE_ID;
}
return status;
}
ReturnValue_t LocalPool::deleteData(uint8_t *ptr, size_t size,
store_address_t *storeId) {
store_address_t localId;
ReturnValue_t result = ILLEGAL_ADDRESS;
for (uint16_t n = 0; n < NUMBER_OF_POOLS; n++) {
//Not sure if new allocates all stores in order. so better be careful.
if ((store[n].data() <= ptr) and
(&store[n][numberOfElements[n]*elementSizes[n]] > ptr)) {
localId.poolIndex = n;
uint32_t deltaAddress = ptr - store[n].data();
// Getting any data from the right "block" is ok.
// This is necessary, as IF's sometimes don't point to the first
// element of an object.
localId.packetIndex = deltaAddress / elementSizes[n];
result = deleteData(localId);
#if FSFW_VERBOSE_PRINTOUT == 2
if (deltaAddress % elementSizes[n] != 0) {
sif::error << "LocalPool::deleteData: Address not aligned!"
<< std::endl;
}
#endif
break;
}
}
if (storeId != nullptr) {
*storeId = localId;
}
return result;
}
ReturnValue_t LocalPool::initialize() {
ReturnValue_t result = SystemObject::initialize();
if (result != RETURN_OK) {
return result;
}
internalErrorReporter = objectManager->get<InternalErrorReporterIF>(
objects::INTERNAL_ERROR_REPORTER);
if (internalErrorReporter == nullptr){
return ObjectManagerIF::INTERNAL_ERR_REPORTER_UNINIT;
}
//Check if any pool size is large than the maximum allowed.
for (uint8_t count = 0; count < NUMBER_OF_POOLS; count++) {
if (elementSizes[count] >= STORAGE_FREE) {
sif::error << "LocalPool::initialize: Pool is too large! "
"Max. allowed size is: " << (STORAGE_FREE - 1) << std::endl;
return StorageManagerIF::POOL_TOO_LARGE;
}
}
return HasReturnvaluesIF::RETURN_OK;
}
void LocalPool::clearStore() {
for(auto& sizeList: sizeLists) {
for(auto& size: sizeList) {
size = STORAGE_FREE;
}
// std::memset(sizeList[index], 0xff,
// numberOfElements[index] * sizeof(size_type));
}
}
ReturnValue_t LocalPool::reserveSpace(const size_t size,
store_address_t *storeId, bool ignoreFault) {
ReturnValue_t status = getPoolIndex(size, &storeId->poolIndex);
if (status != RETURN_OK) {
sif::error << "LocalPool( " << std::hex << getObjectId() << std::dec
<< " )::reserveSpace: Packet too large." << std::endl;
return status;
}
status = findEmpty(storeId->poolIndex, &storeId->packetIndex);
while (status != RETURN_OK && spillsToHigherPools) {
status = getPoolIndex(size, &storeId->poolIndex, storeId->poolIndex + 1);
if (status != RETURN_OK) {
//We don't find any fitting pool anymore.
break;
}
status = findEmpty(storeId->poolIndex, &storeId->packetIndex);
}
if (status == RETURN_OK) {
#if FSFW_VERBOSE_PRINTOUT == 2
sif::debug << "Reserve: Pool: " << std::dec
<< storeId->poolIndex << " Index: " << storeId->packetIndex
<< std::endl;
#endif
sizeLists[storeId->poolIndex][storeId->packetIndex] = size;
}
else {
if ((not ignoreFault) and (internalErrorReporter != nullptr)) {
internalErrorReporter->storeFull();
}
}
return status;
}
void LocalPool::write(store_address_t storeId, const uint8_t *data,
size_t size) {
uint8_t* ptr = nullptr;
size_type packetPosition = getRawPosition(storeId);
// Size was checked before calling this function.
ptr = &store[storeId.poolIndex][packetPosition];
std::memcpy(ptr, data, size);
sizeLists[storeId.poolIndex][storeId.packetIndex] = size;
}
LocalPool::size_type LocalPool::getPageSize(max_pools_t poolIndex) {
if (poolIndex < NUMBER_OF_POOLS) {
return elementSizes[poolIndex];
}
else {
return 0;
}
}
void LocalPool::setToSpillToHigherPools(bool enable) {
this->spillsToHigherPools = enable;
}
ReturnValue_t LocalPool::getPoolIndex(size_t packetSize, uint16_t *poolIndex,
uint16_t startAtIndex) {
for (uint16_t n = startAtIndex; n < NUMBER_OF_POOLS; n++) {
#if FSFW_VERBOSE_PRINTOUT == 2
sif::debug << "LocalPool " << getObjectId() << "::getPoolIndex: Pool: "
<< n << ", Element Size: " << elementSizes[n] << std::endl;
#endif
if (elementSizes[n] >= packetSize) {
*poolIndex = n;
return RETURN_OK;
}
}
return DATA_TOO_LARGE;
}
LocalPool::size_type LocalPool::getRawPosition(store_address_t storeId) {
return storeId.packetIndex * elementSizes[storeId.poolIndex];
}
ReturnValue_t LocalPool::findEmpty(n_pool_elem_t poolIndex, uint16_t *element) {
ReturnValue_t status = DATA_STORAGE_FULL;
for (uint16_t foundElement = 0; foundElement < numberOfElements[poolIndex];
foundElement++) {
if (sizeLists[poolIndex][foundElement] == STORAGE_FREE) {
*element = foundElement;
status = RETURN_OK;
break;
}
}
return status;
}
size_t LocalPool::getTotalSize(size_t* additionalSize) {
size_t totalSize = 0;
size_t sizesSize = 0;
for(uint8_t idx = 0; idx < NUMBER_OF_POOLS; idx ++) {
totalSize += elementSizes[idx] * numberOfElements[idx];
sizesSize += numberOfElements[idx] * sizeof(size_type);
}
if(additionalSize != nullptr) {
*additionalSize = sizesSize;
}
return totalSize;
}
void LocalPool::getFillCount(uint8_t *buffer, uint8_t *bytesWritten) {
if(bytesWritten == nullptr or buffer == nullptr) {
return;
}
uint16_t reservedHits = 0;
uint8_t idx = 0;
uint16_t sum = 0;
for(; idx < NUMBER_OF_POOLS; idx ++) {
for(const auto& size: sizeLists[idx]) {
if(size != STORAGE_FREE) {
reservedHits++;
}
}
buffer[idx] = static_cast<float>(reservedHits) /
numberOfElements[idx] * 100;
*bytesWritten += 1;
sum += buffer[idx];
reservedHits = 0;
}
buffer[idx] = sum / NUMBER_OF_POOLS;
*bytesWritten += 1;
}
void LocalPool::clearPage(max_pools_t pageIndex) {
if(pageIndex >= NUMBER_OF_POOLS) {
return;
}
// Mark the storage as free
for(auto& size: sizeLists[pageIndex]) {
size = STORAGE_FREE;
}
// Set all the page content to 0.
std::memset(store[pageIndex].data(), 0, elementSizes[pageIndex]);
}

View File

@ -7,57 +7,93 @@
#include "../serviceinterface/ServiceInterfaceStream.h" #include "../serviceinterface/ServiceInterfaceStream.h"
#include "../internalError/InternalErrorReporterIF.h" #include "../internalError/InternalErrorReporterIF.h"
#include "../storagemanager/StorageAccessor.h" #include "../storagemanager/StorageAccessor.h"
#include <cstring>
#include <vector>
#include <set>
#include <utility>
#include <limits>
/** /**
* @brief The LocalPool class provides an intermediate data storage with * @brief The LocalPool class provides an intermediate data storage with
* a fixed pool size policy. * a fixed pool size policy.
* @details The class implements the StorageManagerIF interface. While the * @details
* total number of pools is fixed, the element sizes in one pool and * The class implements the StorageManagerIF interface. While the total number
* the number of pool elements per pool are set on construction. * of pools is fixed, the element sizes in one pool and the number of pool
* The full amount of memory is allocated on construction. * elements per pool are set on construction. The full amount of memory is
* The overhead is 4 byte per pool element to store the size * allocated on construction.
* information of each stored element. * The overhead is 4 byte per pool element to store the size information of
* To maintain an "empty" information, the pool size is limited to * each stored element. To maintain an "empty" information, the pool size is
* 0xFFFF-1 bytes. * limited to 0xFFFF-1 bytes.
* It is possible to store empty packets in the pool. * It is possible to store empty packets in the pool.
* The local pool is NOT thread-safe. * The local pool is NOT thread-safe.
* @author Bastian Baetz
*/ */
template<uint8_t NUMBER_OF_POOLS = 5>
class LocalPool: public SystemObject, public StorageManagerIF { class LocalPool: public SystemObject, public StorageManagerIF {
public: public:
/** using pool_elem_size_t = size_type;
* @brief This definition generally sets the number of different sized pools. using n_pool_elem_t = uint16_t;
* @details This must be less than the maximum number of pools (currently 0xff). using LocalPoolCfgPair = std::pair<n_pool_elem_t, pool_elem_size_t>;
*/
// static const uint32_t NUMBER_OF_POOLS; // The configuration needs to be provided with the pool sizes ascending
/** // but the number of pool elements as the first value is more intuitive.
* @brief This is the default constructor for a pool manager instance. // Therefore, a custom comparator was provided.
* @details By passing two arrays of size NUMBER_OF_POOLS, the constructor struct LocalPoolConfigCmp
* allocates memory (with @c new) for store and size_list. These {
* regions are all set to zero on start up. bool operator ()(const LocalPoolCfgPair &a,
* @param setObjectId The object identifier to be set. This allows for const LocalPoolCfgPair &b) const
* multiple instances of LocalPool in the system. {
* @param element_sizes An array of size NUMBER_OF_POOLS in which the size if(a.second < b.second) {
* of a single element in each pool is determined. return true;
* <b>The sizes must be provided in ascending order. }
* </b> else if(a.second > b.second) {
* @param n_elements An array of size NUMBER_OF_POOLS in which the return false;
* number of elements for each pool is determined. }
* The position of these values correspond to those in else {
* element_sizes. if(a.first < b.first) {
* @param registered Register the pool in object manager or not. return true;
* Default is false (local pool). }
* @param spillsToHigherPools A variable to determine whether else {
* higher n pools are used if the store is full. return false;
*/ }
LocalPool(object_id_t setObjectId, }
const uint16_t element_sizes[NUMBER_OF_POOLS], }
const uint16_t n_elements[NUMBER_OF_POOLS], };
bool registered = false, using LocalPoolConfig = std::multiset<LocalPoolCfgPair, LocalPoolConfigCmp>;
bool spillsToHigherPools = false);
/**
* @brief This definition generally sets the number of
* different sized pools. It is derived from the number of pairs
* inside the LocalPoolConfig set on object creation.
* @details
* This must be less than the maximum number of pools (currently 0xff).
*/
const max_pools_t NUMBER_OF_POOLS;
/**
* @brief This is the default constructor for a pool manager instance.
* @details
* The pool is configured by passing a set of pairs into the constructor.
* The first value of that pair determines the number of one elements on
* the respective page of the pool while the second value determines how
* many elements with that size are created on that page.
* All regions are to zero on start up.
* @param setObjectId The object identifier to be set. This allows for
* multiple instances of LocalPool in the system.
* @param poolConfig
* This is a set of pairs to configure the number of pages in the pool,
* the size of an element on a page, the number of elements on a page
* and the total size of the pool at once while also implicitely
* sorting the pairs in the right order.
* @param registered
* Determines whether the pool is registered in the object manager or not.
* @param spillsToHigherPools A variable to determine whether
* higher n pools are used if the store is full.
*/
LocalPool(object_id_t setObjectId, const LocalPoolConfig& poolConfig,
bool registered = false, bool spillsToHigherPools = false);
void setToSpillToHigherPools(bool enable);
/** /**
* @brief In the LocalPool's destructor all allocated memory is freed. * @brief In the LocalPool's destructor all allocated memory is freed.
*/ */
@ -66,25 +102,49 @@ public:
/** /**
* Documentation: See StorageManagerIF.h * Documentation: See StorageManagerIF.h
*/ */
ReturnValue_t addData(store_address_t* storageId, const uint8_t * data, ReturnValue_t addData(store_address_t* storeId, const uint8_t * data,
size_t size, bool ignoreFault = false) override; size_t size, bool ignoreFault = false) override;
ReturnValue_t getFreeElement(store_address_t* storageId,const size_t size, ReturnValue_t getFreeElement(store_address_t* storeId,const size_t size,
uint8_t** p_data, bool ignoreFault = false) override; uint8_t** pData, bool ignoreFault = false) override;
ConstAccessorPair getData(store_address_t packet_id) override; ConstAccessorPair getData(store_address_t storeId) override;
ReturnValue_t getData(store_address_t packet_id, ConstStorageAccessor&) override; ReturnValue_t getData(store_address_t storeId,
ReturnValue_t getData(store_address_t packet_id, const uint8_t** packet_ptr, ConstStorageAccessor& constAccessor) override;
ReturnValue_t getData(store_address_t storeId,
const uint8_t** packet_ptr, size_t * size) override;
AccessorPair modifyData(store_address_t storeId) override;
ReturnValue_t modifyData(store_address_t storeId,
StorageAccessor& storeAccessor) override;
ReturnValue_t modifyData(store_address_t storeId, uint8_t** packet_ptr,
size_t * size) override; size_t * size) override;
AccessorPair modifyData(store_address_t packet_id) override; virtual ReturnValue_t deleteData(store_address_t storeId) override;
ReturnValue_t modifyData(store_address_t packet_id, StorageAccessor&) override;
ReturnValue_t modifyData(store_address_t packet_id, uint8_t** packet_ptr,
size_t * size) override;
virtual ReturnValue_t deleteData(store_address_t) override;
virtual ReturnValue_t deleteData(uint8_t* ptr, size_t size, virtual ReturnValue_t deleteData(uint8_t* ptr, size_t size,
store_address_t* storeId = NULL) override; store_address_t* storeId = nullptr) override;
/**
* Get the total size of allocated memory for pool data.
* There is an additional overhead of the sizes of elements which will
* be assigned to additionalSize
* @return
*/
size_t getTotalSize(size_t* additionalSize) override;
/**
* Get the fill count of the pool. Each character inside the provided
* buffer will be assigned to a rounded percentage fill count for each
* page. The last written byte (at the index bytesWritten - 1)
* will contain the total fill count of the pool as a mean of the
* percentages of single pages.
* @param buffer
* @param maxSize
*/
void getFillCount(uint8_t* buffer, uint8_t* bytesWritten) override;
void clearStore() override; void clearStore() override;
void clearPage(max_pools_t pageIndex) override;
ReturnValue_t initialize() override; ReturnValue_t initialize() override;
protected: protected:
/** /**
@ -94,43 +154,48 @@ protected:
* @return - #RETURN_OK on success, * @return - #RETURN_OK on success,
* - the return codes of #getPoolIndex or #findEmpty otherwise. * - the return codes of #getPoolIndex or #findEmpty otherwise.
*/ */
virtual ReturnValue_t reserveSpace(const uint32_t size, virtual ReturnValue_t reserveSpace(const size_t size,
store_address_t* address, bool ignoreFault); store_address_t* address, bool ignoreFault);
InternalErrorReporterIF *internalErrorReporter;
private: private:
/** /**
* Indicates that this element is free. * Indicates that this element is free.
* This value limits the maximum size of a pool. Change to larger data type * This value limits the maximum size of a pool.
* if increase is required. * Change to larger data type if increase is required.
*/ */
static const uint32_t STORAGE_FREE = 0xFFFFFFFF; static const size_type STORAGE_FREE = std::numeric_limits<size_type>::max();
/** /**
* @brief In this array, the element sizes of each pool is stored. * @brief In this array, the element sizes of each pool is stored.
* @details The sizes are maintained for internal pool management. The sizes * @details The sizes are maintained for internal pool management. The sizes
* must be set in ascending order on construction. * must be set in ascending order on construction.
*/ */
uint32_t element_sizes[NUMBER_OF_POOLS]; std::vector<size_type> elementSizes =
std::vector<size_type>(NUMBER_OF_POOLS);
/** /**
* @brief n_elements stores the number of elements per pool. * @brief n_elements stores the number of elements per pool.
* @details These numbers are maintained for internal pool management. * @details These numbers are maintained for internal pool management.
*/ */
uint16_t n_elements[NUMBER_OF_POOLS]; std::vector<uint16_t> numberOfElements =
std::vector<uint16_t>(NUMBER_OF_POOLS);
/** /**
* @brief store represents the actual memory pool. * @brief store represents the actual memory pool.
* @details It is an array of pointers to memory, which was allocated with * @details It is an array of pointers to memory, which was allocated with
* a @c new call on construction. * a @c new call on construction.
*/ */
uint8_t* store[NUMBER_OF_POOLS]; std::vector<std::vector<uint8_t>> store =
std::vector<std::vector<uint8_t>>(NUMBER_OF_POOLS);
/** /**
* @brief The size_list attribute stores the size values of every pool element. * @brief The size_list attribute stores the size values of every pool element.
* @details As the number of elements is determined on construction, the size list * @details As the number of elements is determined on construction, the size list
* is also dynamically allocated there. * is also dynamically allocated there.
*/ */
uint32_t* size_list[NUMBER_OF_POOLS]; std::vector<std::vector<size_type>> sizeLists =
std::vector<std::vector<size_type>>(NUMBER_OF_POOLS);
//! A variable to determine whether higher n pools are used if //! A variable to determine whether higher n pools are used if
//! the store is full. //! the store is full.
bool spillsToHigherPools; bool spillsToHigherPools = false;
/** /**
* @brief This method safely stores the given data in the given packet_id. * @brief This method safely stores the given data in the given packet_id.
* @details It also sets the size in size_list. The method does not perform * @details It also sets the size in size_list. The method does not perform
@ -139,30 +204,24 @@ private:
* @param data The data to be stored. * @param data The data to be stored.
* @param size The size of the data to be stored. * @param size The size of the data to be stored.
*/ */
void write(store_address_t packet_id, const uint8_t* data, size_t size); void write(store_address_t packetId, const uint8_t* data, size_t size);
/** /**
* @brief A helper method to read the element size of a certain pool. * @brief A helper method to read the element size of a certain pool.
* @param pool_index The pool in which to look. * @param pool_index The pool in which to look.
* @return Returns the size of an element or 0. * @return Returns the size of an element or 0.
*/ */
uint32_t getPageSize(uint16_t pool_index); size_type getPageSize(max_pools_t poolIndex);
/**
* @brief This helper method looks up a fitting pool for a given size.
* @details The pools are looked up in ascending order, so the first that
* fits is used.
* @param packet_size The size of the data to be stored.
* @return Returns the pool that fits or StorageManagerIF::INVALID_ADDRESS.
*/
/** /**
* @brief This helper method looks up a fitting pool for a given size. * @brief This helper method looks up a fitting pool for a given size.
* @details The pools are looked up in ascending order, so the first that * @details The pools are looked up in ascending order, so the first that
* fits is used. * fits is used.
* @param packet_size The size of the data to be stored. * @param packet_size The size of the data to be stored.
* @param[out] poolIndex The fitting pool index found. * @param[out] poolIndex The fitting pool index found.
* @return - #RETURN_OK on success, * @return - @c RETURN_OK on success,
* - #DATA_TOO_LARGE otherwise. * - @c DATA_TOO_LARGE otherwise.
*/ */
ReturnValue_t getPoolIndex(size_t packet_size, uint16_t* poolIndex, ReturnValue_t getPoolIndex(size_t packetSize, uint16_t* poolIndex,
uint16_t startAtIndex = 0); uint16_t startAtIndex = 0);
/** /**
* @brief This helper method calculates the true array position in store * @brief This helper method calculates the true array position in store
@ -172,7 +231,7 @@ private:
* @param packet_id The packet id to look up. * @param packet_id The packet id to look up.
* @return Returns the position of the data in store. * @return Returns the position of the data in store.
*/ */
uint32_t getRawPosition(store_address_t packet_id); size_type getRawPosition(store_address_t storeId);
/** /**
* @brief This is a helper method to find an empty element in a given pool. * @brief This is a helper method to find an empty element in a given pool.
* @details The method searches size_list for the first empty element, so * @details The method searches size_list for the first empty element, so
@ -182,9 +241,9 @@ private:
* @return - #RETURN_OK on success, * @return - #RETURN_OK on success,
* - #DATA_STORAGE_FULL if the store is full * - #DATA_STORAGE_FULL if the store is full
*/ */
ReturnValue_t findEmpty(uint16_t pool_index, uint16_t* element); ReturnValue_t findEmpty(n_pool_elem_t poolIndex, uint16_t* element);
InternalErrorReporterIF *internalErrorReporter = nullptr;
}; };
#include "LocalPool.tpp"
#endif /* FSFW_STORAGEMANAGER_LOCALPOOL_H_ */ #endif /* FSFW_STORAGEMANAGER_LOCALPOOL_H_ */

View File

@ -0,0 +1,60 @@
#include "PoolManager.h"
#include <FSFWConfig.h>
PoolManager::PoolManager(object_id_t setObjectId,
const LocalPoolConfig& localPoolConfig):
LocalPool(setObjectId, localPoolConfig, true) {
mutex = MutexFactory::instance()->createMutex();
}
PoolManager::~PoolManager(void) {
MutexFactory::instance()->deleteMutex(mutex);
}
ReturnValue_t PoolManager::reserveSpace(const size_t size,
store_address_t* address, bool ignoreFault) {
MutexHelper mutexHelper(mutex, MutexIF::TimeoutType::WAITING,
mutexTimeoutMs);
ReturnValue_t status = LocalPool::reserveSpace(size,
address,ignoreFault);
return status;
}
ReturnValue_t PoolManager::deleteData(
store_address_t storeId) {
#if FSFW_VERBOSE_PRINTOUT == 2
sif::debug << "PoolManager( " << translateObject(getObjectId()) <<
" )::deleteData from store " << storeId.poolIndex <<
". id is "<< storeId.packetIndex << std::endl;
#endif
MutexHelper mutexHelper(mutex, MutexIF::TimeoutType::WAITING,
mutexTimeoutMs);
return LocalPool::deleteData(storeId);
}
ReturnValue_t PoolManager::deleteData(uint8_t* buffer,
size_t size, store_address_t* storeId) {
MutexHelper mutexHelper(mutex, MutexIF::TimeoutType::WAITING, 20);
ReturnValue_t status = LocalPool::deleteData(buffer,
size, storeId);
return status;
}
void PoolManager::setMutexTimeout(
uint32_t mutexTimeoutMs) {
this->mutexTimeoutMs = mutexTimeoutMs;
}
ReturnValue_t PoolManager::lockMutex(MutexIF::TimeoutType timeoutType,
uint32_t timeoutMs) {
return mutex->lockMutex(timeoutType, timeoutMs);
}
ReturnValue_t PoolManager::unlockMutex() {
return mutex->unlockMutex();
}

View File

@ -9,16 +9,20 @@
/** /**
* @brief The PoolManager class provides an intermediate data storage with * @brief The PoolManager class provides an intermediate data storage with
* a fixed pool size policy for inter-process communication. * a fixed pool size policy for inter-process communication.
* @details Uses local pool calls but is thread safe by protecting the call * @details
* with a lock. * Uses local pool calls but is thread safe by protecting most calls
* with a lock. The developer can lock the pool with the provided API
* if the lock needs to persists beyond the function call.
*
* Other than that, the class provides the same interface as the LocalPool
* class. The class is always registered as a system object as it is assumed
* it will always be used concurrently (if this is not the case, it is
* recommended to use the LocalPool class instead).
* @author Bastian Baetz * @author Bastian Baetz
*/ */
template <uint8_t NUMBER_OF_POOLS = 5> class PoolManager: public LocalPool {
class PoolManager : public LocalPool<NUMBER_OF_POOLS> {
public: public:
PoolManager(object_id_t setObjectId, PoolManager(object_id_t setObjectId, const LocalPoolConfig& poolConfig);
const uint16_t element_sizes[NUMBER_OF_POOLS],
const uint16_t n_elements[NUMBER_OF_POOLS]);
/** /**
* @brief In the PoolManager's destructor all allocated memory * @brief In the PoolManager's destructor all allocated memory
@ -26,6 +30,12 @@ public:
*/ */
virtual ~PoolManager(); virtual ~PoolManager();
/**
* Set the default mutex timeout for internal calls.
* @param mutexTimeoutMs
*/
void setMutexTimeout(uint32_t mutexTimeoutMs);
/** /**
* @brief LocalPool overrides for thread-safety. Decorator function * @brief LocalPool overrides for thread-safety. Decorator function
* which wraps LocalPool calls with a mutex protection. * which wraps LocalPool calls with a mutex protection.
@ -34,12 +44,23 @@ public:
ReturnValue_t deleteData(uint8_t* buffer, size_t size, ReturnValue_t deleteData(uint8_t* buffer, size_t size,
store_address_t* storeId = nullptr) override; store_address_t* storeId = nullptr) override;
void setMutexTimeout(uint32_t mutexTimeoutMs); /**
* The developer is allowed to lock the mutex in case the lock needs
* to persist beyond the function calls which are not protected by the
* class.
* @param timeoutType
* @param timeoutMs
* @return
*/
ReturnValue_t lockMutex(MutexIF::TimeoutType timeoutType,
uint32_t timeoutMs);
ReturnValue_t unlockMutex();
protected: protected:
//! Default mutex timeout value to prevent permanent blocking. //! Default mutex timeout value to prevent permanent blocking.
uint32_t mutexTimeoutMs = 20; uint32_t mutexTimeoutMs = 20;
ReturnValue_t reserveSpace(const uint32_t size, store_address_t* address, ReturnValue_t reserveSpace(const size_t size, store_address_t* address,
bool ignoreFault) override; bool ignoreFault) override;
/** /**
@ -51,6 +72,4 @@ protected:
MutexIF* mutex; MutexIF* mutex;
}; };
#include "PoolManager.tpp"
#endif /* FSFW_STORAGEMANAGER_POOLMANAGER_H_ */ #endif /* FSFW_STORAGEMANAGER_POOLMANAGER_H_ */

View File

@ -10,9 +10,7 @@ class StorageManagerIF;
*/ */
class StorageAccessor: public ConstStorageAccessor { class StorageAccessor: public ConstStorageAccessor {
//! StorageManager classes have exclusive access to private variables. //! StorageManager classes have exclusive access to private variables.
template<uint8_t NUMBER_OF_POOLS>
friend class PoolManager; friend class PoolManager;
template<uint8_t NUMBER_OF_POOLS>
friend class LocalPool; friend class LocalPool;
public: public:
StorageAccessor(store_address_t storeId); StorageAccessor(store_address_t storeId);

View File

@ -28,6 +28,9 @@ using ConstAccessorPair = std::pair<ReturnValue_t, ConstStorageAccessor>;
*/ */
class StorageManagerIF : public HasReturnvaluesIF { class StorageManagerIF : public HasReturnvaluesIF {
public: public:
using size_type = size_t;
using max_pools_t = uint8_t;
static const uint8_t INTERFACE_ID = CLASS_ID::STORAGE_MANAGER_IF; //!< The unique ID for return codes for this interface. static const uint8_t INTERFACE_ID = CLASS_ID::STORAGE_MANAGER_IF; //!< The unique ID for return codes for this interface.
static const ReturnValue_t DATA_TOO_LARGE = MAKE_RETURN_CODE(1); //!< This return code indicates that the data to be stored is too large for the store. static const ReturnValue_t DATA_TOO_LARGE = MAKE_RETURN_CODE(1); //!< This return code indicates that the data to be stored is too large for the store.
static const ReturnValue_t DATA_STORAGE_FULL = MAKE_RETURN_CODE(2); //!< This return code indicates that a data storage is full. static const ReturnValue_t DATA_STORAGE_FULL = MAKE_RETURN_CODE(2); //!< This return code indicates that a data storage is full.
@ -40,7 +43,9 @@ public:
static const Event GET_DATA_FAILED = MAKE_EVENT(0, severity::LOW); static const Event GET_DATA_FAILED = MAKE_EVENT(0, severity::LOW);
static const Event STORE_DATA_FAILED = MAKE_EVENT(1, severity::LOW); static const Event STORE_DATA_FAILED = MAKE_EVENT(1, severity::LOW);
static const uint32_t INVALID_ADDRESS = 0xFFFFFFFF; //!< Indicates an invalid (i.e unused) storage address. //!< Indicates an invalid (i.e unused) storage address.
static const uint32_t INVALID_ADDRESS = 0xFFFFFFFF;
/** /**
* @brief This is the empty virtual destructor as required for C++ interfaces. * @brief This is the empty virtual destructor as required for C++ interfaces.
*/ */
@ -164,6 +169,22 @@ public:
* Use with care! * Use with care!
*/ */
virtual void clearStore() = 0; virtual void clearStore() = 0;
/**
* Clears a page in the store. Use with care!
* @param pageIndex
*/
virtual void clearPage(uint8_t pageIndex) = 0;
/**
* Get the fill count of the pool. The exact form will be implementation
* dependant.
* @param buffer
* @param bytesWritten
*/
virtual void getFillCount(uint8_t* buffer, uint8_t* bytesWritten) = 0;
virtual size_t getTotalSize(size_t* additionalSize) = 0;
}; };
#endif /* FSFW_STORAGEMANAGER_STORAGEMANAGERIF_H_ */ #endif /* FSFW_STORAGEMANAGER_STORAGEMANAGERIF_H_ */

View File

@ -3,16 +3,21 @@
#include <cstdint> #include <cstdint>
namespace storeId {
static constexpr uint32_t INVALID_STORE_ADDRESS = 0xffffffff;
}
/** /**
* This union defines the type that identifies where a data packet is * This union defines the type that identifies where a data packet is
* stored in the store. It comprises of a raw part to read it as raw value and * stored in the store. It comprises of a raw part to read it as raw value and
* a structured part to use it in pool-like stores. * a structured part to use it in pool-like stores.
*/ */
union store_address_t { union store_address_t {
/** /**
* Default Constructor, initializing to INVALID_ADDRESS * Default Constructor, initializing to INVALID_ADDRESS
*/ */
store_address_t():raw(0xFFFFFFFF){} store_address_t(): raw(storeId::INVALID_STORE_ADDRESS){}
/** /**
* Constructor to create an address object using the raw address * Constructor to create an address object using the raw address
* *
@ -28,7 +33,7 @@ union store_address_t {
* @param packetIndex * @param packetIndex
*/ */
store_address_t(uint16_t poolIndex, uint16_t packetIndex): store_address_t(uint16_t poolIndex, uint16_t packetIndex):
pool_index(poolIndex),packet_index(packetIndex){} poolIndex(poolIndex), packetIndex(packetIndex){}
/** /**
* A structure with two elements to access the store address pool-like. * A structure with two elements to access the store address pool-like.
*/ */
@ -36,11 +41,11 @@ union store_address_t {
/** /**
* The index in which pool the packet lies. * The index in which pool the packet lies.
*/ */
uint16_t pool_index; uint16_t poolIndex;
/** /**
* The position in the chosen pool. * The position in the chosen pool.
*/ */
uint16_t packet_index; uint16_t packetIndex;
}; };
/** /**
* Alternative access to the raw value. * Alternative access to the raw value.

View File

@ -3,19 +3,30 @@
#include "ServiceMatcher.h" #include "ServiceMatcher.h"
#include "SubserviceMatcher.h" #include "SubserviceMatcher.h"
// This should be configurable..
const LocalPool::LocalPoolConfig PacketMatchTree::poolConfig = {
{10, sizeof(ServiceMatcher)},
{20, sizeof(SubServiceMatcher)},
{2, sizeof(ApidMatcher)},
{40, sizeof(PacketMatchTree::Node)}
};
PacketMatchTree::PacketMatchTree(Node* root) : PacketMatchTree::PacketMatchTree(Node* root) :
MatchTree<TmPacketMinimal*>(root, 2), factoryBackend(0, POOL_SIZES, MatchTree<TmPacketMinimal*>(root, 2),
N_ELEMENTS, false, true), factory(&factoryBackend) { factoryBackend(0, poolConfig, false, true),
factory(&factoryBackend) {
} }
PacketMatchTree::PacketMatchTree(iterator root) : PacketMatchTree::PacketMatchTree(iterator root) :
MatchTree<TmPacketMinimal*>(root.element, 2), factoryBackend(0, MatchTree<TmPacketMinimal*>(root.element, 2),
POOL_SIZES, N_ELEMENTS, false, true), factory(&factoryBackend) { factoryBackend(0, poolConfig, false, true),
factory(&factoryBackend) {
} }
PacketMatchTree::PacketMatchTree() : PacketMatchTree::PacketMatchTree() :
MatchTree<TmPacketMinimal*>((Node*) NULL, 2), factoryBackend(0, MatchTree<TmPacketMinimal*>((Node*) NULL, 2),
POOL_SIZES, N_ELEMENTS, false, true), factory(&factoryBackend) { factoryBackend(0, poolConfig, false, true),
factory(&factoryBackend) {
} }
PacketMatchTree::~PacketMatchTree() { PacketMatchTree::~PacketMatchTree() {
@ -172,11 +183,6 @@ ReturnValue_t PacketMatchTree::initialize() {
return factoryBackend.initialize(); return factoryBackend.initialize();
} }
const uint16_t PacketMatchTree::POOL_SIZES[N_POOLS] = { sizeof(ServiceMatcher),
sizeof(SubServiceMatcher), sizeof(ApidMatcher),
sizeof(PacketMatchTree::Node) };
//Maximum number of types and subtypes to filter should be more than sufficient.
const uint16_t PacketMatchTree::N_ELEMENTS[N_POOLS] = { 10, 20, 2, 40 };
ReturnValue_t PacketMatchTree::changeMatch(bool addToMatch, uint16_t apid, ReturnValue_t PacketMatchTree::changeMatch(bool addToMatch, uint16_t apid,
uint8_t type, uint8_t subtype) { uint8_t type, uint8_t subtype) {

View File

@ -23,8 +23,9 @@ protected:
ReturnValue_t cleanUpElement(iterator position); ReturnValue_t cleanUpElement(iterator position);
private: private:
static const uint8_t N_POOLS = 4; static const uint8_t N_POOLS = 4;
LocalPool<N_POOLS> factoryBackend; LocalPool factoryBackend;
PlacementFactory factory; PlacementFactory factory;
static const LocalPool::LocalPoolConfig poolConfig;
static const uint16_t POOL_SIZES[N_POOLS]; static const uint16_t POOL_SIZES[N_POOLS];
static const uint16_t N_ELEMENTS[N_POOLS]; static const uint16_t N_ELEMENTS[N_POOLS];
template<typename VALUE_T, typename INSERTION_T> template<typename VALUE_T, typename INSERTION_T>

View File

@ -1,296 +1,295 @@
//#include "CatchDefinitions.h" #include "CatchDefinitions.h"
//
//#include <config/objects/Factory.h> #include <fsfw/objectmanager/ObjectManager.h>
//#include <fsfw/objectmanager/ObjectManager.h> #include <fsfw/storagemanager/LocalPool.h>
//#include <fsfw/storagemanager/LocalPool.h>
// #include <catch.hpp>
//#include <catch.hpp> #include <CatchDefinitions.h>
//#include <CatchDefinitions.h>
// #include <cstring>
//#include <cstring>
//
// TEST_CASE( "Local Pool Simple Tests [1 Pool]" , "[TestPool]") {
//TEST_CASE( "Local Pool Simple Tests [1 Pool]" , "[TestPool]") { // uint16_t numberOfElements[1] = {1};
//// uint16_t numberOfElements[1] = {1}; // uint16_t sizeofElements[1] = {10};
//// uint16_t sizeofElements[1] = {10}; LocalPool::LocalPoolConfig config = {{1, 10}};
// LocalPool::LocalPoolConfig config = {{1, 10}}; LocalPool simplePool(0, config);
// LocalPool simplePool(0, config); std::array<uint8_t, 20> testDataArray;
// std::array<uint8_t, 20> testDataArray; std::array<uint8_t, 20> receptionArray;
// std::array<uint8_t, 20> receptionArray; store_address_t testStoreId;
// store_address_t testStoreId; ReturnValue_t result = retval::CATCH_FAILED;
// ReturnValue_t result = retval::CATCH_FAILED; uint8_t *pointer = nullptr;
// uint8_t *pointer = nullptr; const uint8_t * constPointer = nullptr;
// const uint8_t * constPointer = nullptr;
// for(size_t i = 0; i < testDataArray.size(); i++) {
// for(size_t i = 0; i < testDataArray.size(); i++) { testDataArray[i] = i;
// testDataArray[i] = i; }
// } size_t size = 10;
// size_t size = 10;
// SECTION ( "Basic tests") {
// SECTION ( "Basic tests") { result = simplePool.addData(&testStoreId, testDataArray.data(), size);
// result = simplePool.addData(&testStoreId, testDataArray.data(), size); REQUIRE(result == retval::CATCH_OK);
// REQUIRE(result == retval::CATCH_OK); result = simplePool.getData(testStoreId, &constPointer, &size);
// result = simplePool.getData(testStoreId, &constPointer, &size); REQUIRE(result == retval::CATCH_OK);
// REQUIRE(result == retval::CATCH_OK); memcpy(receptionArray.data(), constPointer, size);
// memcpy(receptionArray.data(), constPointer, size); for(size_t i = 0; i < size; i++) {
// for(size_t i = 0; i < size; i++) { CHECK(receptionArray[i] == i );
// CHECK(receptionArray[i] == i ); }
// } memset(receptionArray.data(), 0, size);
// memset(receptionArray.data(), 0, size); result = simplePool.modifyData(testStoreId, &pointer, &size);
// result = simplePool.modifyData(testStoreId, &pointer, &size); memcpy(receptionArray.data(), pointer, size);
// memcpy(receptionArray.data(), pointer, size); REQUIRE(result == retval::CATCH_OK);
// REQUIRE(result == retval::CATCH_OK); for(size_t i = 0; i < size; i++) {
// for(size_t i = 0; i < size; i++) { CHECK(receptionArray[i] == i );
// CHECK(receptionArray[i] == i ); }
// } result = simplePool.deleteData(testStoreId);
// result = simplePool.deleteData(testStoreId); REQUIRE(result == retval::CATCH_OK);
// REQUIRE(result == retval::CATCH_OK); result = simplePool.addData(&testStoreId, testDataArray.data(), 15);
// result = simplePool.addData(&testStoreId, testDataArray.data(), 15); CHECK (result == (int) StorageManagerIF::DATA_TOO_LARGE);
// CHECK (result == (int) StorageManagerIF::DATA_TOO_LARGE); }
// }
// SECTION ( "Reservation Tests ") {
// SECTION ( "Reservation Tests ") { pointer = nullptr;
// pointer = nullptr; result = simplePool.getFreeElement(&testStoreId, size, &pointer);
// result = simplePool.getFreeElement(&testStoreId, size, &pointer); REQUIRE (result == retval::CATCH_OK);
// REQUIRE (result == retval::CATCH_OK); memcpy(pointer, testDataArray.data(), size);
// memcpy(pointer, testDataArray.data(), size); constPointer = nullptr;
// constPointer = nullptr; result = simplePool.getData(testStoreId, &constPointer, &size);
// result = simplePool.getData(testStoreId, &constPointer, &size);
// REQUIRE (result == retval::CATCH_OK);
// REQUIRE (result == retval::CATCH_OK); memcpy(receptionArray.data(), constPointer, size);
// memcpy(receptionArray.data(), constPointer, size); for(size_t i = 0; i < size; i++) {
// for(size_t i = 0; i < size; i++) { CHECK(receptionArray[i] == i );
// CHECK(receptionArray[i] == i ); }
// } }
// }
// SECTION ( "Add, delete, add, add when full") {
// SECTION ( "Add, delete, add, add when full") { result = simplePool.addData(&testStoreId, testDataArray.data(), size);
// result = simplePool.addData(&testStoreId, testDataArray.data(), size); REQUIRE(result == retval::CATCH_OK);
// REQUIRE(result == retval::CATCH_OK); result = simplePool.getData(testStoreId, &constPointer, &size);
// result = simplePool.getData(testStoreId, &constPointer, &size); REQUIRE( result == retval::CATCH_OK);
// REQUIRE( result == retval::CATCH_OK); memcpy(receptionArray.data(), constPointer, size);
// memcpy(receptionArray.data(), constPointer, size); for(size_t i = 0; i < size; i++) {
// for(size_t i = 0; i < size; i++) { CHECK(receptionArray[i] == i );
// CHECK(receptionArray[i] == i ); }
// }
// result = simplePool.deleteData(testStoreId);
// result = simplePool.deleteData(testStoreId); REQUIRE(result == retval::CATCH_OK);
// REQUIRE(result == retval::CATCH_OK);
// result = simplePool.addData(&testStoreId, testDataArray.data(), size);
// result = simplePool.addData(&testStoreId, testDataArray.data(), size); REQUIRE(result == retval::CATCH_OK);
// REQUIRE(result == retval::CATCH_OK); result = simplePool.getData(testStoreId, &constPointer, &size);
// result = simplePool.getData(testStoreId, &constPointer, &size); REQUIRE( result == retval::CATCH_OK);
// REQUIRE( result == retval::CATCH_OK); memcpy(receptionArray.data(), constPointer, size);
// memcpy(receptionArray.data(), constPointer, size); for(size_t i = 0; i < size; i++) {
// for(size_t i = 0; i < size; i++) { CHECK(receptionArray[i] == i );
// CHECK(receptionArray[i] == i ); }
// }
// store_address_t newAddress;
// store_address_t newAddress; result = simplePool.addData(&newAddress, testDataArray.data(), size);
// result = simplePool.addData(&newAddress, testDataArray.data(), size); REQUIRE(result == (int) StorageManagerIF::DATA_STORAGE_FULL);
// REQUIRE(result == (int) StorageManagerIF::DATA_STORAGE_FULL);
// // Packet Index to high intentionally
// // Packet Index to high intentionally newAddress.packetIndex = 2;
// newAddress.packetIndex = 2; pointer = testDataArray.data();
// pointer = testDataArray.data(); result = simplePool.modifyData(newAddress, &pointer, &size);
// result = simplePool.modifyData(newAddress, &pointer, &size); REQUIRE(result == (int) StorageManagerIF::ILLEGAL_STORAGE_ID);
// REQUIRE(result == (int) StorageManagerIF::ILLEGAL_STORAGE_ID);
// result = simplePool.deleteData(newAddress);
// result = simplePool.deleteData(newAddress); REQUIRE(result == (int) StorageManagerIF::ILLEGAL_STORAGE_ID);
// REQUIRE(result == (int) StorageManagerIF::ILLEGAL_STORAGE_ID);
// newAddress.packetIndex = 0;
// newAddress.packetIndex = 0; newAddress.poolIndex = 2;
// newAddress.poolIndex = 2; result = simplePool.deleteData(newAddress);
// result = simplePool.deleteData(newAddress); REQUIRE(result == (int) StorageManagerIF::ILLEGAL_STORAGE_ID);
// REQUIRE(result == (int) StorageManagerIF::ILLEGAL_STORAGE_ID); }
// }
// SECTION ( "Initialize and clear store, delete with pointer") {
// SECTION ( "Initialize and clear store, delete with pointer") { result = simplePool.initialize();
// result = simplePool.initialize(); REQUIRE(result == retval::CATCH_OK);
// REQUIRE(result == retval::CATCH_OK); result = simplePool.addData(&testStoreId, testDataArray.data(), size);
// result = simplePool.addData(&testStoreId, testDataArray.data(), size); REQUIRE(result == retval::CATCH_OK);
// REQUIRE(result == retval::CATCH_OK); simplePool.clearStore();
// simplePool.clearStore(); result = simplePool.addData(&testStoreId, testDataArray.data(), size);
// result = simplePool.addData(&testStoreId, testDataArray.data(), size); REQUIRE(result == retval::CATCH_OK);
// REQUIRE(result == retval::CATCH_OK); result = simplePool.modifyData(testStoreId, &pointer, &size);
// result = simplePool.modifyData(testStoreId, &pointer, &size); REQUIRE(result == retval::CATCH_OK);
// REQUIRE(result == retval::CATCH_OK); store_address_t newId;
// store_address_t newId; result = simplePool.deleteData(pointer, size, &testStoreId);
// result = simplePool.deleteData(pointer, size, &testStoreId); REQUIRE(result == retval::CATCH_OK);
// REQUIRE(result == retval::CATCH_OK); REQUIRE(testStoreId.raw != (uint32_t) StorageManagerIF::INVALID_ADDRESS);
// REQUIRE(testStoreId.raw != (uint32_t) StorageManagerIF::INVALID_ADDRESS); result = simplePool.addData(&testStoreId, testDataArray.data(), size);
// result = simplePool.addData(&testStoreId, testDataArray.data(), size); REQUIRE(result == retval::CATCH_OK);
// REQUIRE(result == retval::CATCH_OK); }
// } }
//}
// int runIdx = 0;
//int runIdx = 0;
// TEST_CASE( "Local Pool Extended Tests [3 Pools]" , "[TestPool2]") {
//TEST_CASE( "Local Pool Extended Tests [3 Pools]" , "[TestPool2]") { LocalPool::LocalPoolConfig* config;
// LocalPool::LocalPoolConfig* config; if(runIdx == 0) {
// if(runIdx == 0) { config = new LocalPool::LocalPoolConfig{{10, 5}, {5, 10}, {2, 20}};
// config = new LocalPool::LocalPoolConfig{{10, 5}, {5, 10}, {2, 20}}; }
// } else {
// else { // shufle the order, they should be sort implictely so that the
// // shufle the order, they should be sort implictely so that the // order is ascending for the page sizes.
// // order is ascending for the page sizes. config = new LocalPool::LocalPoolConfig{{5, 10}, {2, 20}, {10, 5}};
// config = new LocalPool::LocalPoolConfig{{5, 10}, {2, 20}, {10, 5}}; size_t lastSize = 0;
// size_t lastSize = 0; for(const auto& pair: *config) {
// for(const auto& pair: *config) { CHECK(pair.second > lastSize);
// CHECK(pair.second > lastSize); lastSize = pair.second;
// lastSize = pair.second; }
// } }
// } runIdx++;
// runIdx++;
// LocalPool simplePool(0, *config);
// LocalPool simplePool(0, *config); std::array<uint8_t, 20> testDataArray;
// std::array<uint8_t, 20> testDataArray; std::array<uint8_t, 20> receptionArray;
// std::array<uint8_t, 20> receptionArray; store_address_t testStoreId;
// store_address_t testStoreId; ReturnValue_t result = retval::CATCH_FAILED;
// ReturnValue_t result = retval::CATCH_FAILED; for(size_t i = 0; i < testDataArray.size(); i++) {
// for(size_t i = 0; i < testDataArray.size(); i++) { testDataArray[i] = i;
// testDataArray[i] = i; }
// } size_t size = 0;
// size_t size = 0;
// SECTION ("Basic tests") {
// SECTION ("Basic tests") { size = 8;
// size = 8; result = simplePool.addData(&testStoreId, testDataArray.data(), size);
// result = simplePool.addData(&testStoreId, testDataArray.data(), size); REQUIRE(result == retval::CATCH_OK);
// REQUIRE(result == retval::CATCH_OK); // Should be on second page of the pool now for 8 bytes
// // Should be on second page of the pool now for 8 bytes CHECK(testStoreId.poolIndex == 1);
// CHECK(testStoreId.poolIndex == 1); CHECK(testStoreId.packetIndex == 0);
// CHECK(testStoreId.packetIndex == 0);
// size = 15;
// size = 15; result = simplePool.addData(&testStoreId, testDataArray.data(), size);
// result = simplePool.addData(&testStoreId, testDataArray.data(), size); REQUIRE(result == retval::CATCH_OK);
// REQUIRE(result == retval::CATCH_OK); // Should be on third page of the pool now for 15 bytes
// // Should be on third page of the pool now for 15 bytes CHECK(testStoreId.poolIndex == 2);
// CHECK(testStoreId.poolIndex == 2); CHECK(testStoreId.packetIndex == 0);
// CHECK(testStoreId.packetIndex == 0);
// result = simplePool.addData(&testStoreId, testDataArray.data(), size);
// result = simplePool.addData(&testStoreId, testDataArray.data(), size); REQUIRE(result == retval::CATCH_OK);
// REQUIRE(result == retval::CATCH_OK); // Should be on third page of the pool now for 15 bytes
// // Should be on third page of the pool now for 15 bytes CHECK(testStoreId.poolIndex == 2);
// CHECK(testStoreId.poolIndex == 2); CHECK(testStoreId.packetIndex == 1);
// CHECK(testStoreId.packetIndex == 1);
// result = simplePool.addData(&testStoreId, testDataArray.data(), size);
// result = simplePool.addData(&testStoreId, testDataArray.data(), size); // Should be on third page of the pool now for 15 bytes
// // Should be on third page of the pool now for 15 bytes REQUIRE(result == (int) LocalPool::DATA_STORAGE_FULL);
// REQUIRE(result == (int) LocalPool::DATA_STORAGE_FULL);
// size = 8;
// size = 8; result = simplePool.addData(&testStoreId, testDataArray.data(), size);
// result = simplePool.addData(&testStoreId, testDataArray.data(), size); REQUIRE(result == retval::CATCH_OK);
// REQUIRE(result == retval::CATCH_OK); // Should still work
// // Should still work CHECK(testStoreId.poolIndex == 1);
// CHECK(testStoreId.poolIndex == 1); CHECK(testStoreId.packetIndex == 1);
// CHECK(testStoreId.packetIndex == 1);
// // fill the rest of the pool
// // fill the rest of the pool for(uint8_t idx = 2; idx < 5; idx++) {
// for(uint8_t idx = 2; idx < 5; idx++) { result = simplePool.addData(&testStoreId, testDataArray.data(), size);
// result = simplePool.addData(&testStoreId, testDataArray.data(), size); REQUIRE(result == retval::CATCH_OK);
// REQUIRE(result == retval::CATCH_OK); CHECK(testStoreId.poolIndex == 1);
// CHECK(testStoreId.poolIndex == 1); CHECK(testStoreId.packetIndex == idx);
// CHECK(testStoreId.packetIndex == idx); }
// } }
// }
// SECTION ("Fill Count and Clearing") {
// SECTION ("Fill Count and Clearing") { //SECTION("Basic tests");
// //SECTION("Basic tests"); uint8_t bytesWritten = 0;
// uint8_t bytesWritten = 0; simplePool.getFillCount(receptionArray.data(), &bytesWritten);
// simplePool.getFillCount(receptionArray.data(), &bytesWritten); // fill count should be all zeros now.
// // fill count should be all zeros now. CHECK(bytesWritten == 4);
// CHECK(bytesWritten == 4); CHECK(receptionArray[0] == 0);
// CHECK(receptionArray[0] == 0); CHECK(receptionArray[1] == 0);
// CHECK(receptionArray[1] == 0); CHECK(receptionArray[2] == 0);
// CHECK(receptionArray[2] == 0); CHECK(receptionArray[3] == 0);
// CHECK(receptionArray[3] == 0);
// // now fill the store completely.
// // now fill the store completely. size = 5;
// size = 5; for(uint8_t idx = 0; idx < 10; idx++) {
// for(uint8_t idx = 0; idx < 10; idx++) { result = simplePool.addData(&testStoreId, testDataArray.data(), size);
// result = simplePool.addData(&testStoreId, testDataArray.data(), size); REQUIRE(result == retval::CATCH_OK);
// REQUIRE(result == retval::CATCH_OK); CHECK(testStoreId.poolIndex == 0);
// CHECK(testStoreId.poolIndex == 0); CHECK(testStoreId.packetIndex == idx);
// CHECK(testStoreId.packetIndex == idx); }
// } size = 10;
// size = 10; for(uint8_t idx = 0; idx < 5; idx++) {
// for(uint8_t idx = 0; idx < 5; idx++) { result = simplePool.addData(&testStoreId, testDataArray.data(), size);
// result = simplePool.addData(&testStoreId, testDataArray.data(), size); REQUIRE(result == retval::CATCH_OK);
// REQUIRE(result == retval::CATCH_OK); CHECK(testStoreId.poolIndex == 1);
// CHECK(testStoreId.poolIndex == 1); CHECK(testStoreId.packetIndex == idx);
// CHECK(testStoreId.packetIndex == idx); }
// } size = 20;
// size = 20; for(uint8_t idx = 0; idx < 2; idx++) {
// for(uint8_t idx = 0; idx < 2; idx++) { result = simplePool.addData(&testStoreId, testDataArray.data(), size);
// result = simplePool.addData(&testStoreId, testDataArray.data(), size); REQUIRE(result == retval::CATCH_OK);
// REQUIRE(result == retval::CATCH_OK); CHECK(testStoreId.poolIndex == 2);
// CHECK(testStoreId.poolIndex == 2); CHECK(testStoreId.packetIndex == idx);
// CHECK(testStoreId.packetIndex == idx); }
// } bytesWritten = 0;
// bytesWritten = 0; simplePool.getFillCount(receptionArray.data(), &bytesWritten);
// simplePool.getFillCount(receptionArray.data(), &bytesWritten); // fill count should be all 100 now.
// // fill count should be all 100 now. CHECK(bytesWritten == 4);
// CHECK(bytesWritten == 4); CHECK(receptionArray[0] == 100);
// CHECK(receptionArray[0] == 100); CHECK(receptionArray[1] == 100);
// CHECK(receptionArray[1] == 100); CHECK(receptionArray[2] == 100);
// CHECK(receptionArray[2] == 100); CHECK(receptionArray[3] == 100);
// CHECK(receptionArray[3] == 100);
// // now clear the store
// // now clear the store simplePool.clearStore();
// simplePool.clearStore(); bytesWritten = 0;
// bytesWritten = 0; simplePool.getFillCount(receptionArray.data(), &bytesWritten);
// simplePool.getFillCount(receptionArray.data(), &bytesWritten); CHECK(bytesWritten == 4);
// CHECK(bytesWritten == 4); CHECK(receptionArray[0] == 0);
// CHECK(receptionArray[0] == 0); CHECK(receptionArray[1] == 0);
// CHECK(receptionArray[1] == 0); CHECK(receptionArray[2] == 0);
// CHECK(receptionArray[2] == 0); CHECK(receptionArray[3] == 0);
// CHECK(receptionArray[3] == 0);
// // now fill one page
// // now fill one page size = 5;
// size = 5; for(uint8_t idx = 0; idx < 10; idx++) {
// for(uint8_t idx = 0; idx < 10; idx++) { result = simplePool.addData(&testStoreId, testDataArray.data(), size);
// result = simplePool.addData(&testStoreId, testDataArray.data(), size); REQUIRE(result == retval::CATCH_OK);
// REQUIRE(result == retval::CATCH_OK); CHECK(testStoreId.poolIndex == 0);
// CHECK(testStoreId.poolIndex == 0); CHECK(testStoreId.packetIndex == idx);
// CHECK(testStoreId.packetIndex == idx); }
// } bytesWritten = 0;
// bytesWritten = 0; simplePool.getFillCount(receptionArray.data(), &bytesWritten);
// simplePool.getFillCount(receptionArray.data(), &bytesWritten); // First page full, median fill count is 33 %
// // First page full, median fill count is 33 % CHECK(bytesWritten == 4);
// CHECK(bytesWritten == 4); CHECK(receptionArray[0] == 100);
// CHECK(receptionArray[0] == 100); CHECK(receptionArray[1] == 0);
// CHECK(receptionArray[1] == 0); CHECK(receptionArray[2] == 0);
// CHECK(receptionArray[2] == 0); CHECK(receptionArray[3] == 33);
// CHECK(receptionArray[3] == 33);
// // now fill second page
// // now fill second page size = 10;
// size = 10; for(uint8_t idx = 0; idx < 5; idx++) {
// for(uint8_t idx = 0; idx < 5; idx++) { result = simplePool.addData(&testStoreId, testDataArray.data(), size);
// result = simplePool.addData(&testStoreId, testDataArray.data(), size); REQUIRE(result == retval::CATCH_OK);
// REQUIRE(result == retval::CATCH_OK); CHECK(testStoreId.poolIndex == 1);
// CHECK(testStoreId.poolIndex == 1); CHECK(testStoreId.packetIndex == idx);
// CHECK(testStoreId.packetIndex == idx); }
// } bytesWritten = 0;
// bytesWritten = 0; simplePool.getFillCount(receptionArray.data(), &bytesWritten);
// simplePool.getFillCount(receptionArray.data(), &bytesWritten); // First and second page full, median fill count is 66 %
// // First and second page full, median fill count is 66 % CHECK(bytesWritten == 4);
// CHECK(bytesWritten == 4); CHECK(receptionArray[0] == 100);
// CHECK(receptionArray[0] == 100); CHECK(receptionArray[1] == 100);
// CHECK(receptionArray[1] == 100); CHECK(receptionArray[2] == 0);
// CHECK(receptionArray[2] == 0); CHECK(receptionArray[3] == 66);
// CHECK(receptionArray[3] == 66);
// // now clear first page
// // now clear first page simplePool.clearPage(0);
// simplePool.clearPage(0); bytesWritten = 0;
// bytesWritten = 0; simplePool.getFillCount(receptionArray.data(), &bytesWritten);
// simplePool.getFillCount(receptionArray.data(), &bytesWritten); // Second page full, median fill count is 33 %
// // Second page full, median fill count is 33 % CHECK(bytesWritten == 4);
// CHECK(bytesWritten == 4); CHECK(receptionArray[0] == 0);
// CHECK(receptionArray[0] == 0); CHECK(receptionArray[1] == 100);
// CHECK(receptionArray[1] == 100); CHECK(receptionArray[2] == 0);
// CHECK(receptionArray[2] == 0); CHECK(receptionArray[3] == 33);
// CHECK(receptionArray[3] == 33); }
// }
// delete(config);
// delete(config); }
//}