diff --git a/events/EventManager.cpp b/events/EventManager.cpp index 0971dadc..57bda13f 100644 --- a/events/EventManager.cpp +++ b/events/EventManager.cpp @@ -1,23 +1,23 @@ #include "EventManager.h" -#include "EventMessage.h" #include "../serviceinterface/ServiceInterfaceStream.h" #include "../ipc/QueueFactory.h" #include "../ipc/MutexFactory.h" -const uint16_t EventManager::POOL_SIZES[N_POOLS] = { - sizeof(EventMatchTree::Node), sizeof(EventIdRangeMatcher), - sizeof(ReporterRangeMatcher) }; // If one checks registerListener calls, there are around 40 (to max 50) // objects registering for certain events. // Each listener requires 1 or 2 EventIdMatcher and 1 or 2 ReportRangeMatcher. // So a good guess is 75 to a max of 100 pools required for each, which fits well. -// SHOULDDO: Shouldn't this be in the config folder and passed via ctor? -const uint16_t EventManager::N_ELEMENTS[N_POOLS] = { 240, 120, 120 }; +// This should be configurable.. +const LocalPool::LocalPoolConfig EventManager::poolConfig = { + {240, sizeof(EventMatchTree::Node)}, + {120, sizeof(EventIdRangeMatcher)}, + {120, sizeof(ReporterRangeMatcher)} +}; EventManager::EventManager(object_id_t setObjectId) : SystemObject(setObjectId), - factoryBackend(0, POOL_SIZES, N_ELEMENTS, false, true) { + factoryBackend(0, poolConfig, false, true) { mutex = MutexFactory::instance()->createMutex(); eventReportQueue = QueueFactory::instance()->createMessageQueue( MAX_EVENTS_PER_CYCLE, EventMessage::EVENT_MESSAGE_SIZE); diff --git a/events/EventManager.h b/events/EventManager.h index f03c0241..fe35d9d3 100644 --- a/events/EventManager.h +++ b/events/EventManager.h @@ -51,7 +51,9 @@ protected: MutexIF* mutex = nullptr; static const uint8_t N_POOLS = 3; - LocalPool factoryBackend; + LocalPool factoryBackend; + static const LocalPool::LocalPoolConfig poolConfig; + static const uint16_t POOL_SIZES[N_POOLS]; static const uint16_t N_ELEMENTS[N_POOLS]; diff --git a/storagemanager/ConstStorageAccessor.h b/storagemanager/ConstStorageAccessor.h index 96d2dca2..570c20ce 100644 --- a/storagemanager/ConstStorageAccessor.h +++ b/storagemanager/ConstStorageAccessor.h @@ -20,9 +20,7 @@ class StorageManagerIF; */ class ConstStorageAccessor { //! StorageManager classes have exclusive access to private variables. - template friend class PoolManager; - template friend class LocalPool; public: /** diff --git a/storagemanager/LocalPool.cpp b/storagemanager/LocalPool.cpp new file mode 100644 index 00000000..8e5f4d48 --- /dev/null +++ b/storagemanager/LocalPool.cpp @@ -0,0 +1,346 @@ +#include "LocalPool.h" +#include + +LocalPool::LocalPool(object_id_t setObjectId, const LocalPoolConfig& poolConfig, + bool registered, bool spillsToHigherPools): + SystemObject(setObjectId, registered), + NUMBER_OF_POOLS(poolConfig.size()), + spillsToHigherPools(spillsToHigherPools) { + if(NUMBER_OF_POOLS == 0) { + sif::error << "LocalPool::LocalPool: Passed pool configuration is " + << " invalid!" << std::endl; + } + max_pools_t index = 0; + for (const auto& currentPoolConfig: poolConfig) { + this->numberOfElements[index] = currentPoolConfig.first; + this->elementSizes[index] = currentPoolConfig.second; + store[index] = std::vector( + numberOfElements[index] * elementSizes[index]); + sizeLists[index] = std::vector(numberOfElements[index]); + for(auto& size: sizeLists[index]) { + size = STORAGE_FREE; + } + index++; + } +} + +LocalPool::~LocalPool(void) {} + + +ReturnValue_t LocalPool::addData(store_address_t* storageId, + const uint8_t* data, size_t size, bool ignoreFault) { + ReturnValue_t status = reserveSpace(size, storageId, ignoreFault); + if (status == RETURN_OK) { + write(*storageId, data, size); + } + return status; +} + +ReturnValue_t LocalPool::getData(store_address_t packetId, + const uint8_t **packetPtr, size_t *size) { + uint8_t* tempData = nullptr; + ReturnValue_t status = modifyData(packetId, &tempData, size); + *packetPtr = tempData; + return status; +} + +ReturnValue_t LocalPool::getData(store_address_t storeId, + ConstStorageAccessor& storeAccessor) { + uint8_t* tempData = nullptr; + ReturnValue_t status = modifyData(storeId, &tempData, + &storeAccessor.size_); + storeAccessor.assignStore(this); + storeAccessor.constDataPointer = tempData; + return status; +} + +ConstAccessorPair LocalPool::getData(store_address_t storeId) { + uint8_t* tempData = nullptr; + ConstStorageAccessor constAccessor(storeId, this); + ReturnValue_t status = modifyData(storeId, &tempData, &constAccessor.size_); + constAccessor.constDataPointer = tempData; + return ConstAccessorPair(status, std::move(constAccessor)); +} + +ReturnValue_t LocalPool::getFreeElement(store_address_t *storageId, + const size_t size, uint8_t **pData, bool ignoreFault) { + ReturnValue_t status = reserveSpace(size, storageId, ignoreFault); + if (status == RETURN_OK) { + *pData = &store[storageId->poolIndex][getRawPosition(*storageId)]; + } + else { + *pData = nullptr; + } + return status; +} + + +AccessorPair LocalPool::modifyData(store_address_t storeId) { + StorageAccessor accessor(storeId, this); + ReturnValue_t status = modifyData(storeId, &accessor.dataPointer, + &accessor.size_); + accessor.assignConstPointer(); + return AccessorPair(status, std::move(accessor)); +} + +ReturnValue_t LocalPool::modifyData(store_address_t storeId, + StorageAccessor& storeAccessor) { + storeAccessor.assignStore(this); + ReturnValue_t status = modifyData(storeId, &storeAccessor.dataPointer, + &storeAccessor.size_); + storeAccessor.assignConstPointer(); + return status; +} + +ReturnValue_t LocalPool::modifyData(store_address_t storeId, + uint8_t **packetPtr, size_t *size) { + ReturnValue_t status = RETURN_FAILED; + if (storeId.poolIndex >= NUMBER_OF_POOLS) { + return ILLEGAL_STORAGE_ID; + } + if ((storeId.packetIndex >= numberOfElements[storeId.poolIndex])) { + return ILLEGAL_STORAGE_ID; + } + + if (sizeLists[storeId.poolIndex][storeId.packetIndex] + != STORAGE_FREE) { + size_type packetPosition = getRawPosition(storeId); + *packetPtr = &store[storeId.poolIndex][packetPosition]; + *size = sizeLists[storeId.poolIndex][storeId.packetIndex]; + status = RETURN_OK; + } + else { + status = DATA_DOES_NOT_EXIST; + } + return status; +} + +ReturnValue_t LocalPool::deleteData(store_address_t storeId) { +#if FSFW_DEBUGGING == 1 + sif::debug << "Delete: Pool: " << std::dec << storeId.poolIndex + << " Index: " << storeId.packetIndex << std::endl; + +#endif + ReturnValue_t status = RETURN_OK; + size_type pageSize = getPageSize(storeId.poolIndex); + if ((pageSize != 0) and + (storeId.packetIndex < numberOfElements[storeId.poolIndex])) { + uint16_t packetPosition = getRawPosition(storeId); + uint8_t* ptr = &store[storeId.poolIndex][packetPosition]; + std::memset(ptr, 0, pageSize); + //Set free list + sizeLists[storeId.poolIndex][storeId.packetIndex] = STORAGE_FREE; + } + else { + //pool_index or packet_index is too large + sif::error << "LocalPool::deleteData: Illegal store ID, no deletion!" + << std::endl; + status = ILLEGAL_STORAGE_ID; + } + return status; +} + +ReturnValue_t LocalPool::deleteData(uint8_t *ptr, size_t size, + store_address_t *storeId) { + store_address_t localId; + ReturnValue_t result = ILLEGAL_ADDRESS; + for (uint16_t n = 0; n < NUMBER_OF_POOLS; n++) { + //Not sure if new allocates all stores in order. so better be careful. + if ((store[n].data() <= ptr) and + (&store[n][numberOfElements[n]*elementSizes[n]] > ptr)) { + localId.poolIndex = n; + uint32_t deltaAddress = ptr - store[n].data(); + // Getting any data from the right "block" is ok. + // This is necessary, as IF's sometimes don't point to the first + // element of an object. + localId.packetIndex = deltaAddress / elementSizes[n]; + result = deleteData(localId); +#if FSFW_DEBUGGING == 1 + if (deltaAddress % elementSizes[n] != 0) { + sif::error << "LocalPool::deleteData: Address not aligned!" + << std::endl; + } +#endif + break; + } + } + if (storeId != nullptr) { + *storeId = localId; + } + return result; +} + + +ReturnValue_t LocalPool::initialize() { + ReturnValue_t result = SystemObject::initialize(); + if (result != RETURN_OK) { + return result; + } + internalErrorReporter = objectManager->get( + objects::INTERNAL_ERROR_REPORTER); + if (internalErrorReporter == nullptr){ + return ObjectManagerIF::INTERNAL_ERR_REPORTER_UNINIT; + } + + //Check if any pool size is large than the maximum allowed. + for (uint8_t count = 0; count < NUMBER_OF_POOLS; count++) { + if (elementSizes[count] >= STORAGE_FREE) { + sif::error << "LocalPool::initialize: Pool is too large! " + "Max. allowed size is: " << (STORAGE_FREE - 1) << std::endl; + return StorageManagerIF::POOL_TOO_LARGE; + } + } + return HasReturnvaluesIF::RETURN_OK; +} + +void LocalPool::clearStore() { + for(auto& sizeList: sizeLists) { + for(auto& size: sizeList) { + size = STORAGE_FREE; + } +// std::memset(sizeList[index], 0xff, +// numberOfElements[index] * sizeof(size_type)); + } + +} + +ReturnValue_t LocalPool::reserveSpace(const size_t size, + store_address_t *storeId, bool ignoreFault) { + ReturnValue_t status = getPoolIndex(size, &storeId->poolIndex); + if (status != RETURN_OK) { + sif::error << "LocalPool( " << std::hex << getObjectId() << std::dec + << " )::reserveSpace: Packet too large." << std::endl; + return status; + } + status = findEmpty(storeId->poolIndex, &storeId->packetIndex); + while (status != RETURN_OK && spillsToHigherPools) { + status = getPoolIndex(size, &storeId->poolIndex, storeId->poolIndex + 1); + if (status != RETURN_OK) { + //We don't find any fitting pool anymore. + break; + } + status = findEmpty(storeId->poolIndex, &storeId->packetIndex); + } + if (status == RETURN_OK) { +#if FSFW_DEBUGGING == 1 + sif::debug << "Reserve: Pool: " << std::dec + << storeId->poolIndex << " Index: " << storeId->packetIndex + << std::endl; +#endif + sizeLists[storeId->poolIndex][storeId->packetIndex] = size; + } + else { + if ((not ignoreFault) and (internalErrorReporter != nullptr)) { + internalErrorReporter->storeFull(); + } + } + return status; +} + +void LocalPool::write(store_address_t storeId, const uint8_t *data, + size_t size) { + uint8_t* ptr = nullptr; + size_type packetPosition = getRawPosition(storeId); + + // Size was checked before calling this function. + ptr = &store[storeId.poolIndex][packetPosition]; + std::memcpy(ptr, data, size); + sizeLists[storeId.poolIndex][storeId.packetIndex] = size; +} + +LocalPool::size_type LocalPool::getPageSize(max_pools_t poolIndex) { + if (poolIndex < NUMBER_OF_POOLS) { + return elementSizes[poolIndex]; + } + else { + return 0; + } +} + +void LocalPool::setToSpillToHigherPools(bool enable) { + this->spillsToHigherPools = enable; +} + +ReturnValue_t LocalPool::getPoolIndex(size_t packetSize, uint16_t *poolIndex, + uint16_t startAtIndex) { + for (uint16_t n = startAtIndex; n < NUMBER_OF_POOLS; n++) { +#if FSFW_DEBUGGING == 1 + sif::debug << "LocalPool " << getObjectId() << "::getPoolIndex: Pool: " + << n << ", Element Size: " << elementSizes[n] << std::endl; +#endif + if (elementSizes[n] >= packetSize) { + *poolIndex = n; + return RETURN_OK; + } + } + return DATA_TOO_LARGE; +} + +LocalPool::size_type LocalPool::getRawPosition(store_address_t storeId) { + return storeId.packetIndex * elementSizes[storeId.poolIndex]; +} + +ReturnValue_t LocalPool::findEmpty(n_pool_elem_t poolIndex, uint16_t *element) { + ReturnValue_t status = DATA_STORAGE_FULL; + for (uint16_t foundElement = 0; foundElement < numberOfElements[poolIndex]; + foundElement++) { + if (sizeLists[poolIndex][foundElement] == STORAGE_FREE) { + *element = foundElement; + status = RETURN_OK; + break; + } + } + return status; +} + +size_t LocalPool::getTotalSize(size_t* additionalSize) { + size_t totalSize = 0; + size_t sizesSize = 0; + for(uint8_t idx = 0; idx < NUMBER_OF_POOLS; idx ++) { + totalSize += elementSizes[idx] * numberOfElements[idx]; + sizesSize += numberOfElements[idx] * sizeof(size_type); + } + if(additionalSize != nullptr) { + *additionalSize = sizesSize; + } + return totalSize; +} + +void LocalPool::getFillCount(uint8_t *buffer, uint8_t *bytesWritten) { + if(bytesWritten == nullptr or buffer == nullptr) { + return; + } + + uint16_t reservedHits = 0; + uint8_t idx = 0; + uint16_t sum = 0; + for(; idx < NUMBER_OF_POOLS; idx ++) { + for(const auto& size: sizeLists[idx]) { + if(size != STORAGE_FREE) { + reservedHits++; + } + } + buffer[idx] = static_cast(reservedHits) / + numberOfElements[idx] * 100; + *bytesWritten += 1; + sum += buffer[idx]; + reservedHits = 0; + } + buffer[idx] = sum / NUMBER_OF_POOLS; + *bytesWritten += 1; +} + + +void LocalPool::clearPage(max_pools_t pageIndex) { + if(pageIndex >= NUMBER_OF_POOLS) { + return; + } + + // Mark the storage as free + for(auto& size: sizeLists[pageIndex]) { + size = STORAGE_FREE; + } + + // Set all the page content to 0. + std::memset(store[pageIndex].data(), 0, elementSizes[pageIndex]); +} diff --git a/storagemanager/LocalPool.h b/storagemanager/LocalPool.h index 3a94c03d..db771152 100644 --- a/storagemanager/LocalPool.h +++ b/storagemanager/LocalPool.h @@ -7,57 +7,93 @@ #include "../serviceinterface/ServiceInterfaceStream.h" #include "../internalError/InternalErrorReporterIF.h" #include "../storagemanager/StorageAccessor.h" -#include + +#include +#include +#include +#include /** - * @brief The LocalPool class provides an intermediate data storage with - * a fixed pool size policy. - * @details The class implements the StorageManagerIF interface. While the - * total number of pools is fixed, the element sizes in one pool and - * the number of pool elements per pool are set on construction. - * The full amount of memory is allocated on construction. - * The overhead is 4 byte per pool element to store the size - * information of each stored element. - * To maintain an "empty" information, the pool size is limited to - * 0xFFFF-1 bytes. - * It is possible to store empty packets in the pool. - * The local pool is NOT thread-safe. - * @author Bastian Baetz + * @brief The LocalPool class provides an intermediate data storage with + * a fixed pool size policy. + * @details + * The class implements the StorageManagerIF interface. While the total number + * of pools is fixed, the element sizes in one pool and the number of pool + * elements per pool are set on construction. The full amount of memory is + * allocated on construction. + * The overhead is 4 byte per pool element to store the size information of + * each stored element. To maintain an "empty" information, the pool size is + * limited to 0xFFFF-1 bytes. + * It is possible to store empty packets in the pool. + * The local pool is NOT thread-safe. */ -template class LocalPool: public SystemObject, public StorageManagerIF { public: - /** - * @brief This definition generally sets the number of different sized pools. - * @details This must be less than the maximum number of pools (currently 0xff). - */ - // static const uint32_t NUMBER_OF_POOLS; - /** - * @brief This is the default constructor for a pool manager instance. - * @details By passing two arrays of size NUMBER_OF_POOLS, the constructor - * allocates memory (with @c new) for store and size_list. These - * regions are all set to zero on start up. - * @param setObjectId The object identifier to be set. This allows for - * multiple instances of LocalPool in the system. - * @param element_sizes An array of size NUMBER_OF_POOLS in which the size - * of a single element in each pool is determined. - * The sizes must be provided in ascending order. - * - * @param n_elements An array of size NUMBER_OF_POOLS in which the - * number of elements for each pool is determined. - * The position of these values correspond to those in - * element_sizes. - * @param registered Register the pool in object manager or not. - * Default is false (local pool). - * @param spillsToHigherPools A variable to determine whether - * higher n pools are used if the store is full. - */ - LocalPool(object_id_t setObjectId, - const uint16_t element_sizes[NUMBER_OF_POOLS], - const uint16_t n_elements[NUMBER_OF_POOLS], - bool registered = false, - bool spillsToHigherPools = false); + using pool_elem_size_t = size_type; + using n_pool_elem_t = uint16_t; + using LocalPoolCfgPair = std::pair; + + // The configuration needs to be provided with the pool sizes ascending + // but the number of pool elements as the first value is more intuitive. + // Therefore, a custom comparator was provided. + struct LocalPoolConfigCmp + { + bool operator ()(const LocalPoolCfgPair &a, + const LocalPoolCfgPair &b) const + { + if(a.second < b.second) { + return true; + } + else if(a.second > b.second) { + return false; + } + else { + if(a.first < b.first) { + return true; + } + else { + return false; + } + } + } + }; + using LocalPoolConfig = std::multiset; + + /** + * @brief This definition generally sets the number of + * different sized pools. It is derived from the number of pairs + * inside the LocalPoolConfig set on object creation. + * @details + * This must be less than the maximum number of pools (currently 0xff). + */ + const max_pools_t NUMBER_OF_POOLS; + + /** + * @brief This is the default constructor for a pool manager instance. + * @details + * The pool is configured by passing a set of pairs into the constructor. + * The first value of that pair determines the number of one elements on + * the respective page of the pool while the second value determines how + * many elements with that size are created on that page. + * All regions are to zero on start up. + * @param setObjectId The object identifier to be set. This allows for + * multiple instances of LocalPool in the system. + * @param poolConfig + * This is a set of pairs to configure the number of pages in the pool, + * the size of an element on a page, the number of elements on a page + * and the total size of the pool at once while also implicitely + * sorting the pairs in the right order. + * @param registered + * Determines whether the pool is registered in the object manager or not. + * @param spillsToHigherPools A variable to determine whether + * higher n pools are used if the store is full. + */ + LocalPool(object_id_t setObjectId, const LocalPoolConfig& poolConfig, + bool registered = false, bool spillsToHigherPools = false); + + void setToSpillToHigherPools(bool enable); + /** * @brief In the LocalPool's destructor all allocated memory is freed. */ @@ -66,25 +102,49 @@ public: /** * Documentation: See StorageManagerIF.h */ - ReturnValue_t addData(store_address_t* storageId, const uint8_t * data, + ReturnValue_t addData(store_address_t* storeId, const uint8_t * data, size_t size, bool ignoreFault = false) override; - ReturnValue_t getFreeElement(store_address_t* storageId,const size_t size, - uint8_t** p_data, bool ignoreFault = false) override; + ReturnValue_t getFreeElement(store_address_t* storeId,const size_t size, + uint8_t** pData, bool ignoreFault = false) override; - ConstAccessorPair getData(store_address_t packet_id) override; - ReturnValue_t getData(store_address_t packet_id, ConstStorageAccessor&) override; - ReturnValue_t getData(store_address_t packet_id, const uint8_t** packet_ptr, + ConstAccessorPair getData(store_address_t storeId) override; + ReturnValue_t getData(store_address_t storeId, + ConstStorageAccessor& constAccessor) override; + ReturnValue_t getData(store_address_t storeId, + const uint8_t** packet_ptr, size_t * size) override; + + AccessorPair modifyData(store_address_t storeId) override; + ReturnValue_t modifyData(store_address_t storeId, + StorageAccessor& storeAccessor) override; + ReturnValue_t modifyData(store_address_t storeId, uint8_t** packet_ptr, size_t * size) override; - AccessorPair modifyData(store_address_t packet_id) override; - ReturnValue_t modifyData(store_address_t packet_id, StorageAccessor&) override; - ReturnValue_t modifyData(store_address_t packet_id, uint8_t** packet_ptr, - size_t * size) override; - - virtual ReturnValue_t deleteData(store_address_t) override; + virtual ReturnValue_t deleteData(store_address_t storeId) override; virtual ReturnValue_t deleteData(uint8_t* ptr, size_t size, - store_address_t* storeId = NULL) override; + store_address_t* storeId = nullptr) override; + + /** + * Get the total size of allocated memory for pool data. + * There is an additional overhead of the sizes of elements which will + * be assigned to additionalSize + * @return + */ + size_t getTotalSize(size_t* additionalSize) override; + + /** + * Get the fill count of the pool. Each character inside the provided + * buffer will be assigned to a rounded percentage fill count for each + * page. The last written byte (at the index bytesWritten - 1) + * will contain the total fill count of the pool as a mean of the + * percentages of single pages. + * @param buffer + * @param maxSize + */ + void getFillCount(uint8_t* buffer, uint8_t* bytesWritten) override; + void clearStore() override; + void clearPage(max_pools_t pageIndex) override; + ReturnValue_t initialize() override; protected: /** @@ -94,43 +154,48 @@ protected: * @return - #RETURN_OK on success, * - the return codes of #getPoolIndex or #findEmpty otherwise. */ - virtual ReturnValue_t reserveSpace(const uint32_t size, + virtual ReturnValue_t reserveSpace(const size_t size, store_address_t* address, bool ignoreFault); - InternalErrorReporterIF *internalErrorReporter; private: - /** - * Indicates that this element is free. - * This value limits the maximum size of a pool. Change to larger data type - * if increase is required. - */ - static const uint32_t STORAGE_FREE = 0xFFFFFFFF; + /** + * Indicates that this element is free. + * This value limits the maximum size of a pool. + * Change to larger data type if increase is required. + */ + static const size_type STORAGE_FREE = std::numeric_limits::max(); /** * @brief In this array, the element sizes of each pool is stored. * @details The sizes are maintained for internal pool management. The sizes * must be set in ascending order on construction. */ - uint32_t element_sizes[NUMBER_OF_POOLS]; + std::vector elementSizes = + std::vector(NUMBER_OF_POOLS); /** * @brief n_elements stores the number of elements per pool. * @details These numbers are maintained for internal pool management. */ - uint16_t n_elements[NUMBER_OF_POOLS]; + std::vector numberOfElements = + std::vector(NUMBER_OF_POOLS); /** * @brief store represents the actual memory pool. * @details It is an array of pointers to memory, which was allocated with * a @c new call on construction. */ - uint8_t* store[NUMBER_OF_POOLS]; + std::vector> store = + std::vector>(NUMBER_OF_POOLS); + /** * @brief The size_list attribute stores the size values of every pool element. * @details As the number of elements is determined on construction, the size list * is also dynamically allocated there. */ - uint32_t* size_list[NUMBER_OF_POOLS]; + std::vector> sizeLists = + std::vector>(NUMBER_OF_POOLS); + //! A variable to determine whether higher n pools are used if //! the store is full. - bool spillsToHigherPools; + bool spillsToHigherPools = false; /** * @brief This method safely stores the given data in the given packet_id. * @details It also sets the size in size_list. The method does not perform @@ -139,30 +204,24 @@ private: * @param data The data to be stored. * @param size The size of the data to be stored. */ - void write(store_address_t packet_id, const uint8_t* data, size_t size); + void write(store_address_t packetId, const uint8_t* data, size_t size); /** * @brief A helper method to read the element size of a certain pool. * @param pool_index The pool in which to look. * @return Returns the size of an element or 0. */ - uint32_t getPageSize(uint16_t pool_index); - /** - * @brief This helper method looks up a fitting pool for a given size. - * @details The pools are looked up in ascending order, so the first that - * fits is used. - * @param packet_size The size of the data to be stored. - * @return Returns the pool that fits or StorageManagerIF::INVALID_ADDRESS. - */ + size_type getPageSize(max_pools_t poolIndex); + /** * @brief This helper method looks up a fitting pool for a given size. * @details The pools are looked up in ascending order, so the first that * fits is used. * @param packet_size The size of the data to be stored. * @param[out] poolIndex The fitting pool index found. - * @return - #RETURN_OK on success, - * - #DATA_TOO_LARGE otherwise. + * @return - @c RETURN_OK on success, + * - @c DATA_TOO_LARGE otherwise. */ - ReturnValue_t getPoolIndex(size_t packet_size, uint16_t* poolIndex, + ReturnValue_t getPoolIndex(size_t packetSize, uint16_t* poolIndex, uint16_t startAtIndex = 0); /** * @brief This helper method calculates the true array position in store @@ -172,7 +231,7 @@ private: * @param packet_id The packet id to look up. * @return Returns the position of the data in store. */ - uint32_t getRawPosition(store_address_t packet_id); + size_type getRawPosition(store_address_t storeId); /** * @brief This is a helper method to find an empty element in a given pool. * @details The method searches size_list for the first empty element, so @@ -182,9 +241,9 @@ private: * @return - #RETURN_OK on success, * - #DATA_STORAGE_FULL if the store is full */ - ReturnValue_t findEmpty(uint16_t pool_index, uint16_t* element); + ReturnValue_t findEmpty(n_pool_elem_t poolIndex, uint16_t* element); + + InternalErrorReporterIF *internalErrorReporter = nullptr; }; -#include "LocalPool.tpp" - #endif /* FSFW_STORAGEMANAGER_LOCALPOOL_H_ */ diff --git a/storagemanager/LocalPool.tpp b/storagemanager/LocalPool.tpp deleted file mode 100644 index 5e61efe4..00000000 --- a/storagemanager/LocalPool.tpp +++ /dev/null @@ -1,305 +0,0 @@ -#ifndef FSFW_STORAGEMANAGER_LOCALPOOL_TPP_ -#define FSFW_STORAGEMANAGER_LOCALPOOL_TPP_ - -#ifndef FSFW_STORAGEMANAGER_LOCALPOOL_H_ -#error Include LocalPool.h before LocalPool.tpp! -#endif - -template -inline LocalPool::LocalPool(object_id_t setObjectId, - const uint16_t element_sizes[NUMBER_OF_POOLS], - const uint16_t n_elements[NUMBER_OF_POOLS], bool registered, - bool spillsToHigherPools) : - SystemObject(setObjectId, registered), internalErrorReporter(nullptr), - spillsToHigherPools(spillsToHigherPools) -{ - for (uint16_t n = 0; n < NUMBER_OF_POOLS; n++) { - this->element_sizes[n] = element_sizes[n]; - this->n_elements[n] = n_elements[n]; - store[n] = new uint8_t[n_elements[n] * element_sizes[n]]; - size_list[n] = new uint32_t[n_elements[n]]; - memset(store[n], 0x00, (n_elements[n] * element_sizes[n])); - //TODO checkme - memset(size_list[n], STORAGE_FREE, (n_elements[n] * sizeof(**size_list))); - } -} - - -template -inline ReturnValue_t LocalPool::findEmpty(uint16_t pool_index, - uint16_t* element) { - ReturnValue_t status = DATA_STORAGE_FULL; - for (uint16_t foundElement = 0; foundElement < n_elements[pool_index]; - foundElement++) { - if (size_list[pool_index][foundElement] == STORAGE_FREE) { - *element = foundElement; - status = RETURN_OK; - break; - } - } - return status; -} - -template -inline void LocalPool::write(store_address_t packet_id, - const uint8_t* data, size_t size) { - uint8_t* ptr; - uint32_t packet_position = getRawPosition(packet_id); - - //check size? -> Not necessary, because size is checked before calling this function. - ptr = &store[packet_id.pool_index][packet_position]; - memcpy(ptr, data, size); - size_list[packet_id.pool_index][packet_id.packet_index] = size; -} - -//Returns page size of 0 in case store_index is illegal -template -inline uint32_t LocalPool::getPageSize(uint16_t pool_index) { - if (pool_index < NUMBER_OF_POOLS) { - return element_sizes[pool_index]; - } else { - return 0; - } -} - -template -inline ReturnValue_t LocalPool::getPoolIndex( - size_t packet_size, uint16_t* poolIndex, uint16_t startAtIndex) { - for (uint16_t n = startAtIndex; n < NUMBER_OF_POOLS; n++) { - //debug << "LocalPool " << getObjectId() << "::getPoolIndex: Pool: " << - // n << ", Element Size: " << element_sizes[n] << std::endl; - if (element_sizes[n] >= packet_size) { - *poolIndex = n; - return RETURN_OK; - } - } - return DATA_TOO_LARGE; -} - -template -inline uint32_t LocalPool::getRawPosition( - store_address_t packet_id) { - return packet_id.packet_index * element_sizes[packet_id.pool_index]; -} - -template -inline ReturnValue_t LocalPool::reserveSpace( - const uint32_t size, store_address_t* address, bool ignoreFault) { - ReturnValue_t status = getPoolIndex(size, &address->pool_index); - if (status != RETURN_OK) { - sif::error << "LocalPool( " << std::hex << getObjectId() << std::dec - << " )::reserveSpace: Packet too large." << std::endl; - return status; - } - status = findEmpty(address->pool_index, &address->packet_index); - while (status != RETURN_OK && spillsToHigherPools) { - status = getPoolIndex(size, &address->pool_index, address->pool_index + 1); - if (status != RETURN_OK) { - //We don't find any fitting pool anymore. - break; - } - status = findEmpty(address->pool_index, &address->packet_index); - } - if (status == RETURN_OK) { - // if (getObjectId() == objects::IPC_STORE && address->pool_index >= 3) { - // debug << "Reserve: Pool: " << std::dec << address->pool_index << - // " Index: " << address->packet_index << std::endl; - // } - - size_list[address->pool_index][address->packet_index] = size; - } else { - if (!ignoreFault and internalErrorReporter != nullptr) { - internalErrorReporter->storeFull(); - } - // error << "LocalPool( " << std::hex << getObjectId() << std::dec - // << " )::reserveSpace: Packet store is full." << std::endl; - } - return status; -} - -template -inline LocalPool::~LocalPool(void) { - for (uint16_t n = 0; n < NUMBER_OF_POOLS; n++) { - delete[] store[n]; - delete[] size_list[n]; - } -} - -template -inline ReturnValue_t LocalPool::addData( - store_address_t* storageId, const uint8_t* data, size_t size, - bool ignoreFault) { - ReturnValue_t status = reserveSpace(size, storageId, ignoreFault); - if (status == RETURN_OK) { - write(*storageId, data, size); - } - return status; -} - -template -inline ReturnValue_t LocalPool::getFreeElement( - store_address_t* storageId, const size_t size, - uint8_t** p_data, bool ignoreFault) { - ReturnValue_t status = reserveSpace(size, storageId, ignoreFault); - if (status == RETURN_OK) { - *p_data = &store[storageId->pool_index][getRawPosition(*storageId)]; - } else { - *p_data = NULL; - } - return status; -} - -template -inline ConstAccessorPair LocalPool::getData( - store_address_t storeId) { - uint8_t* tempData = nullptr; - ConstStorageAccessor constAccessor(storeId, this); - ReturnValue_t status = modifyData(storeId, &tempData, &constAccessor.size_); - constAccessor.constDataPointer = tempData; - return ConstAccessorPair(status, std::move(constAccessor)); -} - -template -inline ReturnValue_t LocalPool::getData(store_address_t storeId, - ConstStorageAccessor& storeAccessor) { - uint8_t* tempData = nullptr; - ReturnValue_t status = modifyData(storeId, &tempData, &storeAccessor.size_); - storeAccessor.assignStore(this); - storeAccessor.constDataPointer = tempData; - return status; -} - -template -inline ReturnValue_t LocalPool::getData( - store_address_t packet_id, const uint8_t** packet_ptr, size_t* size) { - uint8_t* tempData = nullptr; - ReturnValue_t status = modifyData(packet_id, &tempData, size); - *packet_ptr = tempData; - return status; -} - -template -inline AccessorPair LocalPool::modifyData( - store_address_t storeId) { - StorageAccessor accessor(storeId, this); - ReturnValue_t status = modifyData(storeId, &accessor.dataPointer, - &accessor.size_); - accessor.assignConstPointer(); - return AccessorPair(status, std::move(accessor)); -} - -template -inline ReturnValue_t LocalPool::modifyData( - store_address_t storeId, StorageAccessor& storeAccessor) { - storeAccessor.assignStore(this); - ReturnValue_t status = modifyData(storeId, &storeAccessor.dataPointer, - &storeAccessor.size_); - storeAccessor.assignConstPointer(); - return status; -} - -template -inline ReturnValue_t LocalPool::modifyData( - store_address_t packet_id, uint8_t** packet_ptr, size_t* size) { - ReturnValue_t status = RETURN_FAILED; - if (packet_id.pool_index >= NUMBER_OF_POOLS) { - return ILLEGAL_STORAGE_ID; - } - if ((packet_id.packet_index >= n_elements[packet_id.pool_index])) { - return ILLEGAL_STORAGE_ID; - } - if (size_list[packet_id.pool_index][packet_id.packet_index] - != STORAGE_FREE) { - uint32_t packet_position = getRawPosition(packet_id); - *packet_ptr = &store[packet_id.pool_index][packet_position]; - *size = size_list[packet_id.pool_index][packet_id.packet_index]; - status = RETURN_OK; - } else { - status = DATA_DOES_NOT_EXIST; - } - return status; -} - -template -inline ReturnValue_t LocalPool::deleteData( - store_address_t packet_id) { - //if (getObjectId() == objects::IPC_STORE && packet_id.pool_index >= 3) { - // debug << "Delete: Pool: " << std::dec << packet_id.pool_index << " Index: " - // << packet_id.packet_index << std::endl; - //} - ReturnValue_t status = RETURN_OK; - uint32_t page_size = getPageSize(packet_id.pool_index); - if ((page_size != 0) - && (packet_id.packet_index < n_elements[packet_id.pool_index])) { - uint16_t packet_position = getRawPosition(packet_id); - uint8_t* ptr = &store[packet_id.pool_index][packet_position]; - memset(ptr, 0, page_size); - //Set free list - size_list[packet_id.pool_index][packet_id.packet_index] = STORAGE_FREE; - } else { - //pool_index or packet_index is too large - sif::error << "LocalPool:deleteData failed." << std::endl; - status = ILLEGAL_STORAGE_ID; - } - return status; -} - -template -inline void LocalPool::clearStore() { - for (uint16_t n = 0; n < NUMBER_OF_POOLS; n++) { - //TODO checkme - memset(size_list[n], STORAGE_FREE, (n_elements[n] * sizeof(**size_list))); - } -} - -template -inline ReturnValue_t LocalPool::deleteData(uint8_t* ptr, - size_t size, store_address_t* storeId) { - store_address_t localId; - ReturnValue_t result = ILLEGAL_ADDRESS; - for (uint16_t n = 0; n < NUMBER_OF_POOLS; n++) { - //Not sure if new allocates all stores in order. so better be careful. - if ((store[n] <= ptr) && (&store[n][n_elements[n]*element_sizes[n]]) > ptr) { - localId.pool_index = n; - uint32_t deltaAddress = ptr - store[n]; - // Getting any data from the right "block" is ok. - // This is necessary, as IF's sometimes don't point to the first - // element of an object. - localId.packet_index = deltaAddress / element_sizes[n]; - result = deleteData(localId); - //if (deltaAddress % element_sizes[n] != 0) { - // error << "Pool::deleteData: address not aligned!" << std::endl; - //} - break; - } - } - if (storeId != NULL) { - *storeId = localId; - } - return result; -} - -template -inline ReturnValue_t LocalPool::initialize() { - ReturnValue_t result = SystemObject::initialize(); - if (result != RETURN_OK) { - return result; - } - internalErrorReporter = objectManager->get( - objects::INTERNAL_ERROR_REPORTER); - if (internalErrorReporter == nullptr){ - return ObjectManagerIF::INTERNAL_ERR_REPORTER_UNINIT; - } - - //Check if any pool size is large than the maximum allowed. - for (uint8_t count = 0; count < NUMBER_OF_POOLS; count++) { - if (element_sizes[count] >= STORAGE_FREE) { - sif::error << "LocalPool::initialize: Pool is too large! " - "Max. allowed size is: " << (STORAGE_FREE - 1) << std::endl; - return StorageManagerIF::POOL_TOO_LARGE; - } - } - return RETURN_OK; -} - -#endif /* FSFW_STORAGEMANAGER_LOCALPOOL_TPP_ */ diff --git a/storagemanager/PoolManager.cpp b/storagemanager/PoolManager.cpp new file mode 100644 index 00000000..9c801a3d --- /dev/null +++ b/storagemanager/PoolManager.cpp @@ -0,0 +1,59 @@ +#include "PoolManager.h" + +PoolManager::PoolManager(object_id_t setObjectId, + const LocalPoolConfig& localPoolConfig): + LocalPool(setObjectId, localPoolConfig, true) { + mutex = MutexFactory::instance()->createMutex(); +} + + +PoolManager::~PoolManager(void) { + MutexFactory::instance()->deleteMutex(mutex); +} + + +ReturnValue_t PoolManager::reserveSpace(const size_t size, + store_address_t* address, bool ignoreFault) { + MutexHelper mutexHelper(mutex, MutexIF::TimeoutType::WAITING, + mutexTimeoutMs); + ReturnValue_t status = LocalPool::reserveSpace(size, + address,ignoreFault); + return status; +} + + +ReturnValue_t PoolManager::deleteData( + store_address_t storeId) { +#if FSFW_DEBUGGING == 1 + sif::debug << "PoolManager( " << translateObject(getObjectId()) << + " )::deleteData from store " << storeId.poolIndex << + ". id is "<< storeId.packetIndex << std::endl; +#endif + MutexHelper mutexHelper(mutex, MutexIF::TimeoutType::WAITING, + mutexTimeoutMs); + return LocalPool::deleteData(storeId); +} + + +ReturnValue_t PoolManager::deleteData(uint8_t* buffer, + size_t size, store_address_t* storeId) { + MutexHelper mutexHelper(mutex, MutexIF::TimeoutType::WAITING, 20); + ReturnValue_t status = LocalPool::deleteData(buffer, + size, storeId); + return status; +} + + +void PoolManager::setMutexTimeout( + uint32_t mutexTimeoutMs) { + this->mutexTimeoutMs = mutexTimeoutMs; +} + +ReturnValue_t PoolManager::lockMutex(MutexIF::TimeoutType timeoutType, + uint32_t timeoutMs) { + return mutex->lockMutex(timeoutType, timeoutMs); +} + +ReturnValue_t PoolManager::unlockMutex() { + return mutex->unlockMutex(); +} diff --git a/storagemanager/PoolManager.h b/storagemanager/PoolManager.h index 8cc6c065..5786a225 100644 --- a/storagemanager/PoolManager.h +++ b/storagemanager/PoolManager.h @@ -9,16 +9,20 @@ /** * @brief The PoolManager class provides an intermediate data storage with * a fixed pool size policy for inter-process communication. - * @details Uses local pool calls but is thread safe by protecting the call - * with a lock. + * @details + * Uses local pool calls but is thread safe by protecting most calls + * with a lock. The developer can lock the pool with the provided API + * if the lock needs to persists beyond the function call. + * + * Other than that, the class provides the same interface as the LocalPool + * class. The class is always registered as a system object as it is assumed + * it will always be used concurrently (if this is not the case, it is + * recommended to use the LocalPool class instead). * @author Bastian Baetz */ -template -class PoolManager : public LocalPool { +class PoolManager: public LocalPool { public: - PoolManager(object_id_t setObjectId, - const uint16_t element_sizes[NUMBER_OF_POOLS], - const uint16_t n_elements[NUMBER_OF_POOLS]); + PoolManager(object_id_t setObjectId, const LocalPoolConfig& poolConfig); /** * @brief In the PoolManager's destructor all allocated memory @@ -26,6 +30,12 @@ public: */ virtual ~PoolManager(); + /** + * Set the default mutex timeout for internal calls. + * @param mutexTimeoutMs + */ + void setMutexTimeout(uint32_t mutexTimeoutMs); + /** * @brief LocalPool overrides for thread-safety. Decorator function * which wraps LocalPool calls with a mutex protection. @@ -34,12 +44,23 @@ public: ReturnValue_t deleteData(uint8_t* buffer, size_t size, store_address_t* storeId = nullptr) override; - void setMutexTimeout(uint32_t mutexTimeoutMs); + /** + * The developer is allowed to lock the mutex in case the lock needs + * to persist beyond the function calls which are not protected by the + * class. + * @param timeoutType + * @param timeoutMs + * @return + */ + ReturnValue_t lockMutex(MutexIF::TimeoutType timeoutType, + uint32_t timeoutMs); + ReturnValue_t unlockMutex(); + protected: //! Default mutex timeout value to prevent permanent blocking. uint32_t mutexTimeoutMs = 20; - ReturnValue_t reserveSpace(const uint32_t size, store_address_t* address, + ReturnValue_t reserveSpace(const size_t size, store_address_t* address, bool ignoreFault) override; /** @@ -51,6 +72,4 @@ protected: MutexIF* mutex; }; -#include "PoolManager.tpp" - #endif /* FSFW_STORAGEMANAGER_POOLMANAGER_H_ */ diff --git a/storagemanager/PoolManager.tpp b/storagemanager/PoolManager.tpp deleted file mode 100644 index 2be44ece..00000000 --- a/storagemanager/PoolManager.tpp +++ /dev/null @@ -1,56 +0,0 @@ -#ifndef FRAMEWORK_STORAGEMANAGER_POOLMANAGER_TPP_ -#define FRAMEWORK_STORAGEMANAGER_POOLMANAGER_TPP_ - -#ifndef FSFW_STORAGEMANAGER_POOLMANAGER_H_ -#error Include PoolManager.h before PoolManager.tpp! -#endif - -template -inline PoolManager::PoolManager(object_id_t setObjectId, - const uint16_t element_sizes[NUMBER_OF_POOLS], - const uint16_t n_elements[NUMBER_OF_POOLS]) : - LocalPool(setObjectId, element_sizes, n_elements, true) { - mutex = MutexFactory::instance()->createMutex(); -} - -template -inline PoolManager::~PoolManager(void) { - MutexFactory::instance()->deleteMutex(mutex); -} - -template -inline ReturnValue_t PoolManager::reserveSpace( - const uint32_t size, store_address_t* address, bool ignoreFault) { - MutexHelper mutexHelper(mutex,MutexIF::WAITING, mutexTimeoutMs); - ReturnValue_t status = LocalPool::reserveSpace(size, - address,ignoreFault); - return status; -} - -template -inline ReturnValue_t PoolManager::deleteData( - store_address_t packet_id) { - // debug << "PoolManager( " << translateObject(getObjectId()) << - // " )::deleteData from store " << packet_id.pool_index << - // ". id is "<< packet_id.packet_index << std::endl; - MutexHelper mutexHelper(mutex,MutexIF::WAITING, mutexTimeoutMs); - ReturnValue_t status = LocalPool::deleteData(packet_id); - return status; -} - -template -inline ReturnValue_t PoolManager::deleteData(uint8_t* buffer, - size_t size, store_address_t* storeId) { - MutexHelper mutexHelper(mutex,MutexIF::WAITING, mutexTimeoutMs); - ReturnValue_t status = LocalPool::deleteData(buffer, - size, storeId); - return status; -} - -template -inline void PoolManager::setMutexTimeout( - uint32_t mutexTimeoutMs) { - this->mutexTimeout = mutexTimeoutMs; -} - -#endif /* FRAMEWORK_STORAGEMANAGER_POOLMANAGER_TPP_ */ diff --git a/storagemanager/StorageAccessor.h b/storagemanager/StorageAccessor.h index 5cf15d50..d5b383eb 100644 --- a/storagemanager/StorageAccessor.h +++ b/storagemanager/StorageAccessor.h @@ -10,9 +10,7 @@ class StorageManagerIF; */ class StorageAccessor: public ConstStorageAccessor { //! StorageManager classes have exclusive access to private variables. - template friend class PoolManager; - template friend class LocalPool; public: StorageAccessor(store_address_t storeId); diff --git a/storagemanager/StorageManagerIF.h b/storagemanager/StorageManagerIF.h index 834e7563..769616d7 100644 --- a/storagemanager/StorageManagerIF.h +++ b/storagemanager/StorageManagerIF.h @@ -28,6 +28,9 @@ using ConstAccessorPair = std::pair; */ class StorageManagerIF : public HasReturnvaluesIF { public: + using size_type = size_t; + using max_pools_t = uint8_t; + static const uint8_t INTERFACE_ID = CLASS_ID::STORAGE_MANAGER_IF; //!< The unique ID for return codes for this interface. static const ReturnValue_t DATA_TOO_LARGE = MAKE_RETURN_CODE(1); //!< This return code indicates that the data to be stored is too large for the store. static const ReturnValue_t DATA_STORAGE_FULL = MAKE_RETURN_CODE(2); //!< This return code indicates that a data storage is full. @@ -40,7 +43,9 @@ public: static const Event GET_DATA_FAILED = MAKE_EVENT(0, SEVERITY::LOW); static const Event STORE_DATA_FAILED = MAKE_EVENT(1, SEVERITY::LOW); - static const uint32_t INVALID_ADDRESS = 0xFFFFFFFF; //!< Indicates an invalid (i.e unused) storage address. + //!< Indicates an invalid (i.e unused) storage address. + static const uint32_t INVALID_ADDRESS = 0xFFFFFFFF; + /** * @brief This is the empty virtual destructor as required for C++ interfaces. */ @@ -164,6 +169,22 @@ public: * Use with care! */ virtual void clearStore() = 0; + + /** + * Clears a page in the store. Use with care! + * @param pageIndex + */ + virtual void clearPage(uint8_t pageIndex) = 0; + + /** + * Get the fill count of the pool. The exact form will be implementation + * dependant. + * @param buffer + * @param bytesWritten + */ + virtual void getFillCount(uint8_t* buffer, uint8_t* bytesWritten) = 0; + + virtual size_t getTotalSize(size_t* additionalSize) = 0; }; #endif /* FSFW_STORAGEMANAGER_STORAGEMANAGERIF_H_ */ diff --git a/storagemanager/storeAddress.h b/storagemanager/storeAddress.h index 994998a6..ea72f6f8 100644 --- a/storagemanager/storeAddress.h +++ b/storagemanager/storeAddress.h @@ -33,7 +33,7 @@ union store_address_t { * @param packetIndex */ store_address_t(uint16_t poolIndex, uint16_t packetIndex): - pool_index(poolIndex),packet_index(packetIndex){} + poolIndex(poolIndex), packetIndex(packetIndex){} /** * A structure with two elements to access the store address pool-like. */ @@ -41,11 +41,11 @@ union store_address_t { /** * The index in which pool the packet lies. */ - uint16_t pool_index; + uint16_t poolIndex; /** * The position in the chosen pool. */ - uint16_t packet_index; + uint16_t packetIndex; }; /** * Alternative access to the raw value. diff --git a/tmtcpacket/packetmatcher/PacketMatchTree.cpp b/tmtcpacket/packetmatcher/PacketMatchTree.cpp index 610829e5..ac72b3e7 100644 --- a/tmtcpacket/packetmatcher/PacketMatchTree.cpp +++ b/tmtcpacket/packetmatcher/PacketMatchTree.cpp @@ -1,21 +1,32 @@ -#include "../../tmtcpacket/packetmatcher/ApidMatcher.h" -#include "../../tmtcpacket/packetmatcher/PacketMatchTree.h" -#include "../../tmtcpacket/packetmatcher/ServiceMatcher.h" -#include "../../tmtcpacket/packetmatcher/SubserviceMatcher.h" +#include "ApidMatcher.h" +#include "PacketMatchTree.h" +#include "ServiceMatcher.h" +#include "SubserviceMatcher.h" + +// This should be configurable.. +const LocalPool::LocalPoolConfig PacketMatchTree::poolConfig = { + {10, sizeof(ServiceMatcher)}, + {20, sizeof(SubServiceMatcher)}, + {2, sizeof(ApidMatcher)}, + {40, sizeof(PacketMatchTree::Node)} +}; PacketMatchTree::PacketMatchTree(Node* root) : - MatchTree(root, 2), factoryBackend(0, POOL_SIZES, - N_ELEMENTS, false, true), factory(&factoryBackend) { + MatchTree(root, 2), + factoryBackend(0, poolConfig, false, true), + factory(&factoryBackend) { } PacketMatchTree::PacketMatchTree(iterator root) : - MatchTree(root.element, 2), factoryBackend(0, - POOL_SIZES, N_ELEMENTS, false, true), factory(&factoryBackend) { + MatchTree(root.element, 2), + factoryBackend(0, poolConfig, false, true), + factory(&factoryBackend) { } PacketMatchTree::PacketMatchTree() : - MatchTree((Node*) NULL, 2), factoryBackend(0, - POOL_SIZES, N_ELEMENTS, false, true), factory(&factoryBackend) { + MatchTree((Node*) NULL, 2), + factoryBackend(0, poolConfig, false, true), + factory(&factoryBackend) { } PacketMatchTree::~PacketMatchTree() { @@ -172,11 +183,6 @@ ReturnValue_t PacketMatchTree::initialize() { return factoryBackend.initialize(); } -const uint16_t PacketMatchTree::POOL_SIZES[N_POOLS] = { sizeof(ServiceMatcher), - sizeof(SubServiceMatcher), sizeof(ApidMatcher), - sizeof(PacketMatchTree::Node) }; -//Maximum number of types and subtypes to filter should be more than sufficient. -const uint16_t PacketMatchTree::N_ELEMENTS[N_POOLS] = { 10, 20, 2, 40 }; ReturnValue_t PacketMatchTree::changeMatch(bool addToMatch, uint16_t apid, uint8_t type, uint8_t subtype) { diff --git a/tmtcpacket/packetmatcher/PacketMatchTree.h b/tmtcpacket/packetmatcher/PacketMatchTree.h index 86fb087e..54fc856c 100644 --- a/tmtcpacket/packetmatcher/PacketMatchTree.h +++ b/tmtcpacket/packetmatcher/PacketMatchTree.h @@ -23,8 +23,9 @@ protected: ReturnValue_t cleanUpElement(iterator position); private: static const uint8_t N_POOLS = 4; - LocalPool factoryBackend; + LocalPool factoryBackend; PlacementFactory factory; + static const LocalPool::LocalPoolConfig poolConfig; static const uint16_t POOL_SIZES[N_POOLS]; static const uint16_t N_ELEMENTS[N_POOLS]; template