Compare commits
2 Commits
develop
...
mueller/sp
Author | SHA1 | Date | |
---|---|---|---|
06111ec3ed | |||
51087518b0 |
43
hal/src/fsfw_hal/linux/spi/ManualCsLockGuard.h
Normal file
43
hal/src/fsfw_hal/linux/spi/ManualCsLockGuard.h
Normal file
@ -0,0 +1,43 @@
|
||||
#pragma once
|
||||
|
||||
#include "fsfw/ipc/MutexIF.h"
|
||||
#include "fsfw/returnvalues/HasReturnvaluesIF.h"
|
||||
#include "fsfw_hal/common/gpio/GpioIF.h"
|
||||
|
||||
class ManualCsLockWrapper : public HasReturnvaluesIF {
|
||||
public:
|
||||
ManualCsLockWrapper(MutexIF* lock, GpioIF* gpioIF, SpiCookie* cookie,
|
||||
MutexIF::TimeoutType type = MutexIF::TimeoutType::BLOCKING,
|
||||
uint32_t timeoutMs = 0)
|
||||
: lock(lock), gpioIF(gpioIF), cookie(cookie), type(type), timeoutMs(timeoutMs) {
|
||||
if (cookie == nullptr) {
|
||||
// TODO: Error? Or maybe throw exception..
|
||||
return;
|
||||
}
|
||||
cookie->setCsLockManual(true);
|
||||
lockResult = lock->lockMutex(type, timeoutMs);
|
||||
if (lockResult != RETURN_OK) {
|
||||
return;
|
||||
}
|
||||
gpioResult = gpioIF->pullLow(cookie->getChipSelectPin());
|
||||
}
|
||||
|
||||
~ManualCsLockWrapper() {
|
||||
if (gpioResult == RETURN_OK) {
|
||||
gpioIF->pullHigh(cookie->getChipSelectPin());
|
||||
}
|
||||
cookie->setCsLockManual(false);
|
||||
if (lockResult == RETURN_OK) {
|
||||
lock->unlockMutex();
|
||||
}
|
||||
}
|
||||
ReturnValue_t lockResult;
|
||||
ReturnValue_t gpioResult;
|
||||
|
||||
private:
|
||||
MutexIF* lock;
|
||||
GpioIF* gpioIF;
|
||||
SpiCookie* cookie;
|
||||
MutexIF::TimeoutType type;
|
||||
uint32_t timeoutMs = 0;
|
||||
};
|
@ -196,16 +196,22 @@ ReturnValue_t SpiComIF::performRegularSendOperation(SpiCookie* spiCookie, const
|
||||
|
||||
bool fullDuplex = spiCookie->isFullDuplex();
|
||||
gpioId_t gpioId = spiCookie->getChipSelectPin();
|
||||
bool csLockManual = spiCookie->getCsLockManual();
|
||||
|
||||
/* Pull SPI CS low. For now, no support for active high given */
|
||||
if (gpioId != gpio::NO_GPIO) {
|
||||
result = spiMutex->lockMutex(timeoutType, timeoutMs);
|
||||
MutexIF::TimeoutType csType;
|
||||
dur_millis_t csTimeout = 0;
|
||||
// Pull SPI CS low. For now, no support for active high given
|
||||
if (gpioId != gpio::NO_GPIO and not csLockManual) {
|
||||
spiCookie->getMutexParams(csType, csTimeout);
|
||||
result = csMutex->lockMutex(csType, csTimeout);
|
||||
if (result != RETURN_OK) {
|
||||
#if FSFW_VERBOSE_LEVEL >= 1
|
||||
#if FSFW_CPP_OSTREAM_ENABLED == 1
|
||||
sif::error << "SpiComIF::sendMessage: Failed to lock mutex" << std::endl;
|
||||
sif::error << "SpiComIF::sendMessage: Failed to lock mutex with code "
|
||||
<< "0x" << std::hex << std::setfill('0') << std::setw(4) << result << std::dec
|
||||
<< std::endl;
|
||||
#else
|
||||
sif::printError("SpiComIF::sendMessage: Failed to lock mutex\n");
|
||||
sif::printError("SpiComIF::sendMessage: Failed to lock mutex with code %d\n", result);
|
||||
#endif
|
||||
#endif
|
||||
return result;
|
||||
@ -248,7 +254,7 @@ ReturnValue_t SpiComIF::performRegularSendOperation(SpiCookie* spiCookie, const
|
||||
}
|
||||
}
|
||||
|
||||
if (gpioId != gpio::NO_GPIO) {
|
||||
if (gpioId != gpio::NO_GPIO and not csLockManual) {
|
||||
gpioComIF->pullHigh(gpioId);
|
||||
result = spiMutex->unlockMutex();
|
||||
if (result != RETURN_OK) {
|
||||
@ -292,12 +298,27 @@ ReturnValue_t SpiComIF::performHalfDuplexReception(SpiCookie* spiCookie) {
|
||||
return result;
|
||||
}
|
||||
|
||||
bool csLockManual = spiCookie->getCsLockManual();
|
||||
gpioId_t gpioId = spiCookie->getChipSelectPin();
|
||||
<<<<<<< Updated upstream
|
||||
if (gpioId != gpio::NO_GPIO) {
|
||||
result = spiMutex->lockMutex(timeoutType, timeoutMs);
|
||||
=======
|
||||
MutexIF::TimeoutType csType;
|
||||
dur_millis_t csTimeout = 0;
|
||||
if (gpioId != gpio::NO_GPIO and not csLockManual) {
|
||||
spiCookie->getMutexParams(csType, csTimeout);
|
||||
result = csMutex->lockMutex(csType, csTimeout);
|
||||
>>>>>>> Stashed changes
|
||||
if (result != RETURN_OK) {
|
||||
#if FSFW_VERBOSE_LEVEL >= 1
|
||||
#if FSFW_CPP_OSTREAM_ENABLED == 1
|
||||
sif::error << "SpiComIF::getSendSuccess: Failed to lock mutex" << std::endl;
|
||||
sif::error << "SpiComIF::sendMessage: Failed to lock mutex with code "
|
||||
<< "0x" << std::hex << std::setfill('0') << std::setw(4) << result << std::dec
|
||||
<< std::endl;
|
||||
#else
|
||||
sif::printError("SpiComIF::sendMessage: Failed to lock mutex with code %d\n", result);
|
||||
#endif
|
||||
#endif
|
||||
return result;
|
||||
}
|
||||
@ -315,7 +336,7 @@ ReturnValue_t SpiComIF::performHalfDuplexReception(SpiCookie* spiCookie) {
|
||||
result = HALF_DUPLEX_TRANSFER_FAILED;
|
||||
}
|
||||
|
||||
if (gpioId != gpio::NO_GPIO) {
|
||||
if (gpioId != gpio::NO_GPIO and not csLockManual) {
|
||||
gpioComIF->pullHigh(gpioId);
|
||||
result = spiMutex->unlockMutex();
|
||||
if (result != RETURN_OK) {
|
||||
@ -346,6 +367,7 @@ ReturnValue_t SpiComIF::readReceivedMessage(CookieIF* cookie, uint8_t** buffer,
|
||||
return HasReturnvaluesIF::RETURN_OK;
|
||||
}
|
||||
|
||||
<<<<<<< Updated upstream
|
||||
MutexIF* SpiComIF::getMutex(MutexIF::TimeoutType* timeoutType, uint32_t* timeoutMs) {
|
||||
if (timeoutType != nullptr) {
|
||||
*timeoutType = this->timeoutType;
|
||||
@ -355,6 +377,9 @@ MutexIF* SpiComIF::getMutex(MutexIF::TimeoutType* timeoutType, uint32_t* timeout
|
||||
}
|
||||
return spiMutex;
|
||||
}
|
||||
=======
|
||||
MutexIF* SpiComIF::getCsMutex() { return csMutex; }
|
||||
>>>>>>> Stashed changes
|
||||
|
||||
void SpiComIF::performSpiWiretapping(SpiCookie* spiCookie) {
|
||||
if (spiCookie == nullptr) {
|
||||
|
@ -22,15 +22,15 @@ class SpiCookie;
|
||||
*/
|
||||
class SpiComIF : public DeviceCommunicationIF, public SystemObject {
|
||||
public:
|
||||
static constexpr uint8_t spiRetvalId = CLASS_ID::HAL_SPI;
|
||||
static constexpr uint8_t CLASS_ID = CLASS_ID::HAL_SPI;
|
||||
static constexpr ReturnValue_t OPENING_FILE_FAILED =
|
||||
HasReturnvaluesIF::makeReturnCode(spiRetvalId, 0);
|
||||
HasReturnvaluesIF::makeReturnCode(CLASS_ID, 0);
|
||||
/* Full duplex (ioctl) transfer failure */
|
||||
static constexpr ReturnValue_t FULL_DUPLEX_TRANSFER_FAILED =
|
||||
HasReturnvaluesIF::makeReturnCode(spiRetvalId, 1);
|
||||
HasReturnvaluesIF::makeReturnCode(CLASS_ID, 1);
|
||||
/* Half duplex (read/write) transfer failure */
|
||||
static constexpr ReturnValue_t HALF_DUPLEX_TRANSFER_FAILED =
|
||||
HasReturnvaluesIF::makeReturnCode(spiRetvalId, 2);
|
||||
HasReturnvaluesIF::makeReturnCode(CLASS_ID, 2);
|
||||
|
||||
SpiComIF(object_id_t objectId, GpioIF* gpioComIF);
|
||||
|
||||
@ -44,7 +44,7 @@ class SpiComIF : public DeviceCommunicationIF, public SystemObject {
|
||||
* @brief This function returns the mutex which can be used to protect the spi bus when
|
||||
* the chip select must be driven from outside of the com if.
|
||||
*/
|
||||
MutexIF* getMutex(MutexIF::TimeoutType* timeoutType = nullptr, uint32_t* timeoutMs = nullptr);
|
||||
MutexIF* getCsMutex();
|
||||
|
||||
/**
|
||||
* Perform a regular send operation using Linux iotcl. This is public so it can be used
|
||||
@ -70,10 +70,13 @@ class SpiComIF : public DeviceCommunicationIF, public SystemObject {
|
||||
};
|
||||
|
||||
GpioIF* gpioComIF = nullptr;
|
||||
std::string dev = "";
|
||||
/**
|
||||
* Protects the chip select operations. Lock when GPIO is pulled low, unlock after it was
|
||||
* pulled high
|
||||
*/
|
||||
MutexIF* csMutex = nullptr;
|
||||
|
||||
MutexIF* spiMutex = nullptr;
|
||||
MutexIF::TimeoutType timeoutType = MutexIF::TimeoutType::WAITING;
|
||||
uint32_t timeoutMs = 20;
|
||||
spi_ioc_transfer clockUpdateTransfer = {};
|
||||
|
||||
using SpiDeviceMap = std::unordered_map<address_t, SpiInstance>;
|
||||
|
@ -107,3 +107,17 @@ void SpiCookie::getCallback(spi::send_callback_function_t* callback, void** args
|
||||
*callback = this->sendCallback;
|
||||
*args = this->callbackArgs;
|
||||
}
|
||||
|
||||
void SpiCookie::setCsLockManual(bool enable) { manualCsLock = enable; }
|
||||
|
||||
bool SpiCookie::getCsLockManual() const { return manualCsLock; }
|
||||
|
||||
void SpiCookie::getMutexParams(MutexIF::TimeoutType& csTimeoutType, dur_millis_t& csTimeout) const {
|
||||
csTimeoutType = this->csTimeoutType;
|
||||
csTimeout = this->csTimeout;
|
||||
}
|
||||
|
||||
void SpiCookie::setMutexParams(MutexIF::TimeoutType csTimeoutType, dur_millis_t csTimeout) {
|
||||
this->csTimeoutType = csTimeoutType;
|
||||
this->csTimeout = csTimeout;
|
||||
}
|
||||
|
@ -2,6 +2,8 @@
|
||||
#define LINUX_SPI_SPICOOKIE_H_
|
||||
|
||||
#include <fsfw/devicehandlers/CookieIF.h>
|
||||
#include <fsfw/ipc/MutexIF.h>
|
||||
#include <fsfw/timemanager/clockDefinitions.h>
|
||||
#include <linux/spi/spidev.h>
|
||||
|
||||
#include "../../common/gpio/gpioDefinitions.h"
|
||||
@ -20,6 +22,8 @@
|
||||
*/
|
||||
class SpiCookie : public CookieIF {
|
||||
public:
|
||||
static constexpr dur_millis_t DEFAULT_MUTEX_TIMEOUT = 20;
|
||||
|
||||
/**
|
||||
* Each SPI device will have a corresponding cookie. The cookie is used by the communication
|
||||
* interface and contains device specific information like the largest expected size to be
|
||||
@ -139,9 +143,43 @@ class SpiCookie : public CookieIF {
|
||||
*/
|
||||
void activateCsDeselect(bool deselectCs, uint16_t delayUsecs);
|
||||
|
||||
void getMutexParams(MutexIF::TimeoutType& csTimeoutType, dur_millis_t& csTimeout) const;
|
||||
void setMutexParams(MutexIF::TimeoutType csTimeoutType, dur_millis_t csTimeout);
|
||||
|
||||
void setCsLockManual(bool enable);
|
||||
bool getCsLockManual() const;
|
||||
|
||||
spi_ioc_transfer* getTransferStructHandle();
|
||||
|
||||
private:
|
||||
address_t spiAddress;
|
||||
gpioId_t chipSelectPin;
|
||||
std::string spiDevice;
|
||||
|
||||
spi::SpiComIfModes comIfMode;
|
||||
|
||||
// Required for regular mode
|
||||
const size_t maxSize;
|
||||
spi::SpiModes spiMode;
|
||||
/**
|
||||
* If this is set to true, the SPI ComIF will not perform any mutex locking for the
|
||||
* CS mechanism. The user is responsible to locking and unlocking the mutex for the
|
||||
* whole duration of the transfers.
|
||||
*/
|
||||
bool manualCsLock = false;
|
||||
uint32_t spiSpeed;
|
||||
bool halfDuplex = false;
|
||||
|
||||
MutexIF::TimeoutType csTimeoutType = MutexIF::TimeoutType::WAITING;
|
||||
dur_millis_t csTimeout = DEFAULT_MUTEX_TIMEOUT;
|
||||
|
||||
// Required for callback mode
|
||||
spi::send_callback_function_t sendCallback = nullptr;
|
||||
void* callbackArgs = nullptr;
|
||||
|
||||
struct spi_ioc_transfer spiTransferStruct = {};
|
||||
UncommonParameters uncommonParameters;
|
||||
|
||||
/**
|
||||
* Internal constructor which initializes every field
|
||||
* @param spiAddress
|
||||
@ -154,27 +192,8 @@ class SpiCookie : public CookieIF {
|
||||
* @param args
|
||||
*/
|
||||
SpiCookie(spi::SpiComIfModes comIfMode, address_t spiAddress, gpioId_t chipSelect,
|
||||
std::string spiDev, const size_t maxSize, spi::SpiModes spiMode, uint32_t spiSpeed,
|
||||
const size_t maxSize, spi::SpiModes spiMode, uint32_t spiSpeed,
|
||||
spi::send_callback_function_t callback, void* args);
|
||||
|
||||
address_t spiAddress;
|
||||
gpioId_t chipSelectPin;
|
||||
std::string spiDevice;
|
||||
|
||||
spi::SpiComIfModes comIfMode;
|
||||
|
||||
// Required for regular mode
|
||||
const size_t maxSize;
|
||||
spi::SpiModes spiMode;
|
||||
uint32_t spiSpeed;
|
||||
bool halfDuplex = false;
|
||||
|
||||
// Required for callback mode
|
||||
spi::send_callback_function_t sendCallback = nullptr;
|
||||
void* callbackArgs = nullptr;
|
||||
|
||||
struct spi_ioc_transfer spiTransferStruct = {};
|
||||
UncommonParameters uncommonParameters;
|
||||
};
|
||||
|
||||
#endif /* LINUX_SPI_SPICOOKIE_H_ */
|
||||
|
@ -314,7 +314,7 @@ void UartComIF::configureBaudrate(struct termios* options, UartCookie* uartCooki
|
||||
cfsetispeed(options, B4000000);
|
||||
cfsetospeed(options, B4000000);
|
||||
break;
|
||||
#endif // ! __APPLE__
|
||||
#endif // ! __APPLE__
|
||||
default:
|
||||
#if FSFW_CPP_OSTREAM_ENABLED == 1
|
||||
sif::warning << "UartComIF::configureBaudrate: Baudrate not supported" << std::endl;
|
||||
|
@ -5,89 +5,88 @@
|
||||
#error Include FIFOBase.h before FIFOBase.tpp!
|
||||
#endif
|
||||
|
||||
template<typename T>
|
||||
inline FIFOBase<T>::FIFOBase(T* values, const size_t maxCapacity):
|
||||
maxCapacity(maxCapacity), values(values){};
|
||||
template <typename T>
|
||||
inline FIFOBase<T>::FIFOBase(T* values, const size_t maxCapacity)
|
||||
: maxCapacity(maxCapacity), values(values){};
|
||||
|
||||
template<typename T>
|
||||
template <typename T>
|
||||
inline ReturnValue_t FIFOBase<T>::insert(T value) {
|
||||
if (full()) {
|
||||
return FULL;
|
||||
} else {
|
||||
values[writeIndex] = value;
|
||||
writeIndex = next(writeIndex);
|
||||
++currentSize;
|
||||
return HasReturnvaluesIF::RETURN_OK;
|
||||
}
|
||||
if (full()) {
|
||||
return FULL;
|
||||
} else {
|
||||
values[writeIndex] = value;
|
||||
writeIndex = next(writeIndex);
|
||||
++currentSize;
|
||||
return HasReturnvaluesIF::RETURN_OK;
|
||||
}
|
||||
};
|
||||
|
||||
template<typename T>
|
||||
template <typename T>
|
||||
inline ReturnValue_t FIFOBase<T>::retrieve(T* value) {
|
||||
if (empty()) {
|
||||
return EMPTY;
|
||||
} else {
|
||||
if (value == nullptr){
|
||||
return HasReturnvaluesIF::RETURN_FAILED;
|
||||
}
|
||||
*value = values[readIndex];
|
||||
readIndex = next(readIndex);
|
||||
--currentSize;
|
||||
return HasReturnvaluesIF::RETURN_OK;
|
||||
if (empty()) {
|
||||
return EMPTY;
|
||||
} else {
|
||||
if (value == nullptr) {
|
||||
return HasReturnvaluesIF::RETURN_FAILED;
|
||||
}
|
||||
*value = values[readIndex];
|
||||
readIndex = next(readIndex);
|
||||
--currentSize;
|
||||
return HasReturnvaluesIF::RETURN_OK;
|
||||
}
|
||||
};
|
||||
|
||||
template<typename T>
|
||||
template <typename T>
|
||||
inline ReturnValue_t FIFOBase<T>::peek(T* value) {
|
||||
if(empty()) {
|
||||
return EMPTY;
|
||||
} else {
|
||||
if (value == nullptr){
|
||||
return HasReturnvaluesIF::RETURN_FAILED;
|
||||
}
|
||||
*value = values[readIndex];
|
||||
return HasReturnvaluesIF::RETURN_OK;
|
||||
if (empty()) {
|
||||
return EMPTY;
|
||||
} else {
|
||||
if (value == nullptr) {
|
||||
return HasReturnvaluesIF::RETURN_FAILED;
|
||||
}
|
||||
*value = values[readIndex];
|
||||
return HasReturnvaluesIF::RETURN_OK;
|
||||
}
|
||||
};
|
||||
|
||||
template<typename T>
|
||||
template <typename T>
|
||||
inline ReturnValue_t FIFOBase<T>::pop() {
|
||||
T value;
|
||||
return this->retrieve(&value);
|
||||
T value;
|
||||
return this->retrieve(&value);
|
||||
};
|
||||
|
||||
template<typename T>
|
||||
template <typename T>
|
||||
inline bool FIFOBase<T>::empty() {
|
||||
return (currentSize == 0);
|
||||
return (currentSize == 0);
|
||||
};
|
||||
|
||||
template<typename T>
|
||||
template <typename T>
|
||||
inline bool FIFOBase<T>::full() {
|
||||
return (currentSize == maxCapacity);
|
||||
return (currentSize == maxCapacity);
|
||||
}
|
||||
|
||||
template<typename T>
|
||||
template <typename T>
|
||||
inline size_t FIFOBase<T>::size() {
|
||||
return currentSize;
|
||||
return currentSize;
|
||||
}
|
||||
|
||||
template<typename T>
|
||||
template <typename T>
|
||||
inline size_t FIFOBase<T>::next(size_t current) {
|
||||
++current;
|
||||
if (current == maxCapacity) {
|
||||
current = 0;
|
||||
}
|
||||
return current;
|
||||
++current;
|
||||
if (current == maxCapacity) {
|
||||
current = 0;
|
||||
}
|
||||
return current;
|
||||
}
|
||||
|
||||
template<typename T>
|
||||
template <typename T>
|
||||
inline size_t FIFOBase<T>::getMaxCapacity() const {
|
||||
return maxCapacity;
|
||||
return maxCapacity;
|
||||
}
|
||||
|
||||
|
||||
template<typename T>
|
||||
inline void FIFOBase<T>::setContainer(T *data) {
|
||||
this->values = data;
|
||||
template <typename T>
|
||||
inline void FIFOBase<T>::setContainer(T* data) {
|
||||
this->values = data;
|
||||
}
|
||||
|
||||
#endif
|
||||
|
@ -12,6 +12,7 @@ template <typename T, size_t MAX_SIZE, typename count_t = uint8_t>
|
||||
class FixedArrayList : public ArrayList<T, count_t> {
|
||||
static_assert(MAX_SIZE <= std::numeric_limits<count_t>::max(),
|
||||
"count_t is not large enough to hold MAX_SIZE");
|
||||
|
||||
private:
|
||||
T data[MAX_SIZE];
|
||||
|
||||
|
@ -1,109 +1,109 @@
|
||||
#ifndef FRAMEWORK_CONTAINER_FIXEDORDEREDMULTIMAP_TPP_
|
||||
#define FRAMEWORK_CONTAINER_FIXEDORDEREDMULTIMAP_TPP_
|
||||
|
||||
|
||||
template<typename key_t, typename T, typename KEY_COMPARE>
|
||||
inline ReturnValue_t FixedOrderedMultimap<key_t, T, KEY_COMPARE>::insert(key_t key, T value, Iterator *storedValue) {
|
||||
if (_size == theMap.maxSize()) {
|
||||
return MAP_FULL;
|
||||
}
|
||||
size_t position = findNicePlace(key);
|
||||
memmove(static_cast<void*>(&theMap[position + 1]),static_cast<void*>(&theMap[position]),
|
||||
(_size - position) * sizeof(std::pair<key_t,T>));
|
||||
theMap[position].first = key;
|
||||
theMap[position].second = value;
|
||||
++_size;
|
||||
if (storedValue != nullptr) {
|
||||
*storedValue = Iterator(&theMap[position]);
|
||||
}
|
||||
return HasReturnvaluesIF::RETURN_OK;
|
||||
template <typename key_t, typename T, typename KEY_COMPARE>
|
||||
inline ReturnValue_t FixedOrderedMultimap<key_t, T, KEY_COMPARE>::insert(key_t key, T value,
|
||||
Iterator *storedValue) {
|
||||
if (_size == theMap.maxSize()) {
|
||||
return MAP_FULL;
|
||||
}
|
||||
size_t position = findNicePlace(key);
|
||||
memmove(static_cast<void *>(&theMap[position + 1]), static_cast<void *>(&theMap[position]),
|
||||
(_size - position) * sizeof(std::pair<key_t, T>));
|
||||
theMap[position].first = key;
|
||||
theMap[position].second = value;
|
||||
++_size;
|
||||
if (storedValue != nullptr) {
|
||||
*storedValue = Iterator(&theMap[position]);
|
||||
}
|
||||
return HasReturnvaluesIF::RETURN_OK;
|
||||
}
|
||||
template<typename key_t, typename T, typename KEY_COMPARE>
|
||||
template <typename key_t, typename T, typename KEY_COMPARE>
|
||||
inline ReturnValue_t FixedOrderedMultimap<key_t, T, KEY_COMPARE>::insert(std::pair<key_t, T> pair) {
|
||||
return insert(pair.first, pair.second);
|
||||
return insert(pair.first, pair.second);
|
||||
}
|
||||
|
||||
template<typename key_t, typename T, typename KEY_COMPARE>
|
||||
template <typename key_t, typename T, typename KEY_COMPARE>
|
||||
inline ReturnValue_t FixedOrderedMultimap<key_t, T, KEY_COMPARE>::exists(key_t key) const {
|
||||
ReturnValue_t result = KEY_DOES_NOT_EXIST;
|
||||
if (findFirstIndex(key) < _size) {
|
||||
result = HasReturnvaluesIF::RETURN_OK;
|
||||
}
|
||||
return result;
|
||||
ReturnValue_t result = KEY_DOES_NOT_EXIST;
|
||||
if (findFirstIndex(key) < _size) {
|
||||
result = HasReturnvaluesIF::RETURN_OK;
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
template<typename key_t, typename T, typename KEY_COMPARE>
|
||||
template <typename key_t, typename T, typename KEY_COMPARE>
|
||||
inline ReturnValue_t FixedOrderedMultimap<key_t, T, KEY_COMPARE>::erase(Iterator *iter) {
|
||||
size_t i;
|
||||
if ((i = findFirstIndex((*iter).value->first)) >= _size) {
|
||||
return KEY_DOES_NOT_EXIST;
|
||||
}
|
||||
removeFromPosition(i);
|
||||
if (*iter != begin()) {
|
||||
(*iter)--;
|
||||
} else {
|
||||
*iter = begin();
|
||||
}
|
||||
return HasReturnvaluesIF::RETURN_OK;
|
||||
size_t i;
|
||||
if ((i = findFirstIndex((*iter).value->first)) >= _size) {
|
||||
return KEY_DOES_NOT_EXIST;
|
||||
}
|
||||
removeFromPosition(i);
|
||||
if (*iter != begin()) {
|
||||
(*iter)--;
|
||||
} else {
|
||||
*iter = begin();
|
||||
}
|
||||
return HasReturnvaluesIF::RETURN_OK;
|
||||
}
|
||||
|
||||
template<typename key_t, typename T, typename KEY_COMPARE>
|
||||
template <typename key_t, typename T, typename KEY_COMPARE>
|
||||
inline ReturnValue_t FixedOrderedMultimap<key_t, T, KEY_COMPARE>::erase(key_t key) {
|
||||
size_t i;
|
||||
if ((i = findFirstIndex(key)) >= _size) {
|
||||
return KEY_DOES_NOT_EXIST;
|
||||
}
|
||||
do {
|
||||
removeFromPosition(i);
|
||||
i = findFirstIndex(key, i);
|
||||
} while (i < _size);
|
||||
return HasReturnvaluesIF::RETURN_OK;
|
||||
size_t i;
|
||||
if ((i = findFirstIndex(key)) >= _size) {
|
||||
return KEY_DOES_NOT_EXIST;
|
||||
}
|
||||
do {
|
||||
removeFromPosition(i);
|
||||
i = findFirstIndex(key, i);
|
||||
} while (i < _size);
|
||||
return HasReturnvaluesIF::RETURN_OK;
|
||||
}
|
||||
|
||||
template<typename key_t, typename T, typename KEY_COMPARE>
|
||||
template <typename key_t, typename T, typename KEY_COMPARE>
|
||||
inline ReturnValue_t FixedOrderedMultimap<key_t, T, KEY_COMPARE>::find(key_t key, T **value) const {
|
||||
ReturnValue_t result = exists(key);
|
||||
if (result != HasReturnvaluesIF::RETURN_OK) {
|
||||
return result;
|
||||
}
|
||||
*value = &theMap[findFirstIndex(key)].second;
|
||||
return HasReturnvaluesIF::RETURN_OK;
|
||||
ReturnValue_t result = exists(key);
|
||||
if (result != HasReturnvaluesIF::RETURN_OK) {
|
||||
return result;
|
||||
}
|
||||
*value = &theMap[findFirstIndex(key)].second;
|
||||
return HasReturnvaluesIF::RETURN_OK;
|
||||
}
|
||||
|
||||
template<typename key_t, typename T, typename KEY_COMPARE>
|
||||
inline size_t FixedOrderedMultimap<key_t, T, KEY_COMPARE>::findFirstIndex(key_t key, size_t startAt) const {
|
||||
if (startAt >= _size) {
|
||||
return startAt + 1;
|
||||
template <typename key_t, typename T, typename KEY_COMPARE>
|
||||
inline size_t FixedOrderedMultimap<key_t, T, KEY_COMPARE>::findFirstIndex(key_t key,
|
||||
size_t startAt) const {
|
||||
if (startAt >= _size) {
|
||||
return startAt + 1;
|
||||
}
|
||||
size_t i = startAt;
|
||||
for (i = startAt; i < _size; ++i) {
|
||||
if (theMap[i].first == key) {
|
||||
return i;
|
||||
}
|
||||
size_t i = startAt;
|
||||
for (i = startAt; i < _size; ++i) {
|
||||
if (theMap[i].first == key) {
|
||||
return i;
|
||||
}
|
||||
}
|
||||
return i;
|
||||
}
|
||||
return i;
|
||||
}
|
||||
|
||||
template<typename key_t, typename T, typename KEY_COMPARE>
|
||||
template <typename key_t, typename T, typename KEY_COMPARE>
|
||||
inline size_t FixedOrderedMultimap<key_t, T, KEY_COMPARE>::findNicePlace(key_t key) const {
|
||||
size_t i = 0;
|
||||
for (i = 0; i < _size; ++i) {
|
||||
if (myComp(key, theMap[i].first)) {
|
||||
return i;
|
||||
}
|
||||
size_t i = 0;
|
||||
for (i = 0; i < _size; ++i) {
|
||||
if (myComp(key, theMap[i].first)) {
|
||||
return i;
|
||||
}
|
||||
return i;
|
||||
}
|
||||
return i;
|
||||
}
|
||||
|
||||
template<typename key_t, typename T, typename KEY_COMPARE>
|
||||
template <typename key_t, typename T, typename KEY_COMPARE>
|
||||
inline void FixedOrderedMultimap<key_t, T, KEY_COMPARE>::removeFromPosition(size_t position) {
|
||||
if (_size <= position) {
|
||||
return;
|
||||
}
|
||||
memmove(static_cast<void*>(&theMap[position]), static_cast<void*>(&theMap[position + 1]),
|
||||
(_size - position - 1) * sizeof(std::pair<key_t,T>));
|
||||
--_size;
|
||||
if (_size <= position) {
|
||||
return;
|
||||
}
|
||||
memmove(static_cast<void *>(&theMap[position]), static_cast<void *>(&theMap[position + 1]),
|
||||
(_size - position - 1) * sizeof(std::pair<key_t, T>));
|
||||
--_size;
|
||||
}
|
||||
|
||||
|
||||
#endif /* FRAMEWORK_CONTAINER_FIXEDORDEREDMULTIMAP_TPP_ */
|
||||
|
@ -5,205 +5,189 @@
|
||||
#error Include LocalPoolVariable.h before LocalPoolVariable.tpp!
|
||||
#endif
|
||||
|
||||
template<typename T>
|
||||
inline LocalPoolVariable<T>::LocalPoolVariable(HasLocalDataPoolIF* hkOwner,
|
||||
lp_id_t poolId, DataSetIF* dataSet, pool_rwm_t setReadWriteMode):
|
||||
LocalPoolObjectBase(poolId, hkOwner, dataSet, setReadWriteMode) {}
|
||||
template <typename T>
|
||||
inline LocalPoolVariable<T>::LocalPoolVariable(HasLocalDataPoolIF* hkOwner, lp_id_t poolId,
|
||||
DataSetIF* dataSet, pool_rwm_t setReadWriteMode)
|
||||
: LocalPoolObjectBase(poolId, hkOwner, dataSet, setReadWriteMode) {}
|
||||
|
||||
template<typename T>
|
||||
inline LocalPoolVariable<T>::LocalPoolVariable(object_id_t poolOwner,
|
||||
lp_id_t poolId, DataSetIF *dataSet, pool_rwm_t setReadWriteMode):
|
||||
LocalPoolObjectBase(poolOwner, poolId, dataSet, setReadWriteMode) {}
|
||||
template <typename T>
|
||||
inline LocalPoolVariable<T>::LocalPoolVariable(object_id_t poolOwner, lp_id_t poolId,
|
||||
DataSetIF* dataSet, pool_rwm_t setReadWriteMode)
|
||||
: LocalPoolObjectBase(poolOwner, poolId, dataSet, setReadWriteMode) {}
|
||||
|
||||
template <typename T>
|
||||
inline LocalPoolVariable<T>::LocalPoolVariable(gp_id_t globalPoolId, DataSetIF* dataSet,
|
||||
pool_rwm_t setReadWriteMode)
|
||||
: LocalPoolObjectBase(globalPoolId.objectId, globalPoolId.localPoolId, dataSet,
|
||||
setReadWriteMode) {}
|
||||
|
||||
template<typename T>
|
||||
inline LocalPoolVariable<T>::LocalPoolVariable(gp_id_t globalPoolId,
|
||||
DataSetIF *dataSet, pool_rwm_t setReadWriteMode):
|
||||
LocalPoolObjectBase(globalPoolId.objectId, globalPoolId.localPoolId,
|
||||
dataSet, setReadWriteMode){}
|
||||
|
||||
|
||||
template<typename T>
|
||||
inline ReturnValue_t LocalPoolVariable<T>::read(
|
||||
MutexIF::TimeoutType timeoutType, uint32_t timeoutMs) {
|
||||
if(hkManager == nullptr) {
|
||||
return readWithoutLock();
|
||||
}
|
||||
MutexIF* mutex = LocalDpManagerAttorney::getMutexHandle(*hkManager);
|
||||
ReturnValue_t result = mutex->lockMutex(timeoutType, timeoutMs);
|
||||
if(result != HasReturnvaluesIF::RETURN_OK) {
|
||||
return result;
|
||||
}
|
||||
result = readWithoutLock();
|
||||
mutex->unlockMutex();
|
||||
template <typename T>
|
||||
inline ReturnValue_t LocalPoolVariable<T>::read(MutexIF::TimeoutType timeoutType,
|
||||
uint32_t timeoutMs) {
|
||||
if (hkManager == nullptr) {
|
||||
return readWithoutLock();
|
||||
}
|
||||
MutexIF* mutex = LocalDpManagerAttorney::getMutexHandle(*hkManager);
|
||||
ReturnValue_t result = mutex->lockMutex(timeoutType, timeoutMs);
|
||||
if (result != HasReturnvaluesIF::RETURN_OK) {
|
||||
return result;
|
||||
}
|
||||
result = readWithoutLock();
|
||||
mutex->unlockMutex();
|
||||
return result;
|
||||
}
|
||||
|
||||
template<typename T>
|
||||
template <typename T>
|
||||
inline ReturnValue_t LocalPoolVariable<T>::readWithoutLock() {
|
||||
if(readWriteMode == pool_rwm_t::VAR_WRITE) {
|
||||
object_id_t targetObjectId = hkManager->getCreatorObjectId();
|
||||
reportReadCommitError("LocalPoolVector",
|
||||
PoolVariableIF::INVALID_READ_WRITE_MODE, true, targetObjectId,
|
||||
localPoolId);
|
||||
return PoolVariableIF::INVALID_READ_WRITE_MODE;
|
||||
}
|
||||
if (readWriteMode == pool_rwm_t::VAR_WRITE) {
|
||||
object_id_t targetObjectId = hkManager->getCreatorObjectId();
|
||||
reportReadCommitError("LocalPoolVector", PoolVariableIF::INVALID_READ_WRITE_MODE, true,
|
||||
targetObjectId, localPoolId);
|
||||
return PoolVariableIF::INVALID_READ_WRITE_MODE;
|
||||
}
|
||||
|
||||
PoolEntry<T>* poolEntry = nullptr;
|
||||
ReturnValue_t result = LocalDpManagerAttorney::fetchPoolEntry(*hkManager, localPoolId,
|
||||
&poolEntry);
|
||||
if(result != RETURN_OK) {
|
||||
object_id_t ownerObjectId = hkManager->getCreatorObjectId();
|
||||
reportReadCommitError("LocalPoolVariable", result,
|
||||
false, ownerObjectId, localPoolId);
|
||||
return result;
|
||||
}
|
||||
|
||||
this->value = *(poolEntry->getDataPtr());
|
||||
this->valid = poolEntry->getValid();
|
||||
return RETURN_OK;
|
||||
}
|
||||
|
||||
template<typename T>
|
||||
inline ReturnValue_t LocalPoolVariable<T>::commit(bool setValid,
|
||||
MutexIF::TimeoutType timeoutType, uint32_t timeoutMs) {
|
||||
this->setValid(setValid);
|
||||
return commit(timeoutType, timeoutMs);
|
||||
}
|
||||
|
||||
template<typename T>
|
||||
inline ReturnValue_t LocalPoolVariable<T>::commit(
|
||||
MutexIF::TimeoutType timeoutType, uint32_t timeoutMs) {
|
||||
if(hkManager == nullptr) {
|
||||
return commitWithoutLock();
|
||||
}
|
||||
MutexIF* mutex = LocalDpManagerAttorney::getMutexHandle(*hkManager);
|
||||
ReturnValue_t result = mutex->lockMutex(timeoutType, timeoutMs);
|
||||
if(result != HasReturnvaluesIF::RETURN_OK) {
|
||||
return result;
|
||||
}
|
||||
result = commitWithoutLock();
|
||||
mutex->unlockMutex();
|
||||
PoolEntry<T>* poolEntry = nullptr;
|
||||
ReturnValue_t result =
|
||||
LocalDpManagerAttorney::fetchPoolEntry(*hkManager, localPoolId, &poolEntry);
|
||||
if (result != RETURN_OK) {
|
||||
object_id_t ownerObjectId = hkManager->getCreatorObjectId();
|
||||
reportReadCommitError("LocalPoolVariable", result, false, ownerObjectId, localPoolId);
|
||||
return result;
|
||||
}
|
||||
|
||||
this->value = *(poolEntry->getDataPtr());
|
||||
this->valid = poolEntry->getValid();
|
||||
return RETURN_OK;
|
||||
}
|
||||
|
||||
template<typename T>
|
||||
template <typename T>
|
||||
inline ReturnValue_t LocalPoolVariable<T>::commit(bool setValid, MutexIF::TimeoutType timeoutType,
|
||||
uint32_t timeoutMs) {
|
||||
this->setValid(setValid);
|
||||
return commit(timeoutType, timeoutMs);
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
inline ReturnValue_t LocalPoolVariable<T>::commit(MutexIF::TimeoutType timeoutType,
|
||||
uint32_t timeoutMs) {
|
||||
if (hkManager == nullptr) {
|
||||
return commitWithoutLock();
|
||||
}
|
||||
MutexIF* mutex = LocalDpManagerAttorney::getMutexHandle(*hkManager);
|
||||
ReturnValue_t result = mutex->lockMutex(timeoutType, timeoutMs);
|
||||
if (result != HasReturnvaluesIF::RETURN_OK) {
|
||||
return result;
|
||||
}
|
||||
result = commitWithoutLock();
|
||||
mutex->unlockMutex();
|
||||
return result;
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
inline ReturnValue_t LocalPoolVariable<T>::commitWithoutLock() {
|
||||
if(readWriteMode == pool_rwm_t::VAR_READ) {
|
||||
object_id_t targetObjectId = hkManager->getCreatorObjectId();
|
||||
reportReadCommitError("LocalPoolVector",
|
||||
PoolVariableIF::INVALID_READ_WRITE_MODE, false, targetObjectId,
|
||||
localPoolId);
|
||||
return PoolVariableIF::INVALID_READ_WRITE_MODE;
|
||||
}
|
||||
if (readWriteMode == pool_rwm_t::VAR_READ) {
|
||||
object_id_t targetObjectId = hkManager->getCreatorObjectId();
|
||||
reportReadCommitError("LocalPoolVector", PoolVariableIF::INVALID_READ_WRITE_MODE, false,
|
||||
targetObjectId, localPoolId);
|
||||
return PoolVariableIF::INVALID_READ_WRITE_MODE;
|
||||
}
|
||||
|
||||
PoolEntry<T>* poolEntry = nullptr;
|
||||
ReturnValue_t result = LocalDpManagerAttorney::fetchPoolEntry(*hkManager, localPoolId,
|
||||
&poolEntry);
|
||||
if(result != RETURN_OK) {
|
||||
object_id_t ownerObjectId = hkManager->getCreatorObjectId();
|
||||
reportReadCommitError("LocalPoolVariable", result,
|
||||
false, ownerObjectId, localPoolId);
|
||||
return result;
|
||||
}
|
||||
PoolEntry<T>* poolEntry = nullptr;
|
||||
ReturnValue_t result =
|
||||
LocalDpManagerAttorney::fetchPoolEntry(*hkManager, localPoolId, &poolEntry);
|
||||
if (result != RETURN_OK) {
|
||||
object_id_t ownerObjectId = hkManager->getCreatorObjectId();
|
||||
reportReadCommitError("LocalPoolVariable", result, false, ownerObjectId, localPoolId);
|
||||
return result;
|
||||
}
|
||||
|
||||
*(poolEntry->getDataPtr()) = this->value;
|
||||
poolEntry->setValid(this->valid);
|
||||
return RETURN_OK;
|
||||
*(poolEntry->getDataPtr()) = this->value;
|
||||
poolEntry->setValid(this->valid);
|
||||
return RETURN_OK;
|
||||
}
|
||||
|
||||
template<typename T>
|
||||
inline ReturnValue_t LocalPoolVariable<T>::serialize(uint8_t** buffer,
|
||||
size_t* size, const size_t max_size,
|
||||
SerializeIF::Endianness streamEndianness) const {
|
||||
return SerializeAdapter::serialize(&value,
|
||||
buffer, size ,max_size, streamEndianness);
|
||||
template <typename T>
|
||||
inline ReturnValue_t LocalPoolVariable<T>::serialize(
|
||||
uint8_t** buffer, size_t* size, const size_t max_size,
|
||||
SerializeIF::Endianness streamEndianness) const {
|
||||
return SerializeAdapter::serialize(&value, buffer, size, max_size, streamEndianness);
|
||||
}
|
||||
|
||||
template<typename T>
|
||||
template <typename T>
|
||||
inline size_t LocalPoolVariable<T>::getSerializedSize() const {
|
||||
return SerializeAdapter::getSerializedSize(&value);
|
||||
return SerializeAdapter::getSerializedSize(&value);
|
||||
}
|
||||
|
||||
template<typename T>
|
||||
inline ReturnValue_t LocalPoolVariable<T>::deSerialize(const uint8_t** buffer,
|
||||
size_t* size, SerializeIF::Endianness streamEndianness) {
|
||||
return SerializeAdapter::deSerialize(&value, buffer, size, streamEndianness);
|
||||
template <typename T>
|
||||
inline ReturnValue_t LocalPoolVariable<T>::deSerialize(const uint8_t** buffer, size_t* size,
|
||||
SerializeIF::Endianness streamEndianness) {
|
||||
return SerializeAdapter::deSerialize(&value, buffer, size, streamEndianness);
|
||||
}
|
||||
|
||||
#if FSFW_CPP_OSTREAM_ENABLED == 1
|
||||
template<typename T>
|
||||
inline std::ostream& operator<< (std::ostream &out,
|
||||
const LocalPoolVariable<T> &var) {
|
||||
out << var.value;
|
||||
return out;
|
||||
template <typename T>
|
||||
inline std::ostream& operator<<(std::ostream& out, const LocalPoolVariable<T>& var) {
|
||||
out << var.value;
|
||||
return out;
|
||||
}
|
||||
#endif
|
||||
|
||||
template<typename T>
|
||||
template <typename T>
|
||||
inline LocalPoolVariable<T>::operator T() const {
|
||||
return value;
|
||||
return value;
|
||||
}
|
||||
|
||||
template<typename T>
|
||||
inline LocalPoolVariable<T> & LocalPoolVariable<T>::operator=(
|
||||
const T& newValue) {
|
||||
value = newValue;
|
||||
return *this;
|
||||
template <typename T>
|
||||
inline LocalPoolVariable<T>& LocalPoolVariable<T>::operator=(const T& newValue) {
|
||||
value = newValue;
|
||||
return *this;
|
||||
}
|
||||
|
||||
template<typename T>
|
||||
inline LocalPoolVariable<T>& LocalPoolVariable<T>::operator =(
|
||||
const LocalPoolVariable<T>& newPoolVariable) {
|
||||
value = newPoolVariable.value;
|
||||
return *this;
|
||||
template <typename T>
|
||||
inline LocalPoolVariable<T>& LocalPoolVariable<T>::operator=(
|
||||
const LocalPoolVariable<T>& newPoolVariable) {
|
||||
value = newPoolVariable.value;
|
||||
return *this;
|
||||
}
|
||||
|
||||
template<typename T>
|
||||
inline bool LocalPoolVariable<T>::operator ==(
|
||||
const LocalPoolVariable<T> &other) const {
|
||||
return this->value == other.value;
|
||||
template <typename T>
|
||||
inline bool LocalPoolVariable<T>::operator==(const LocalPoolVariable<T>& other) const {
|
||||
return this->value == other.value;
|
||||
}
|
||||
|
||||
template<typename T>
|
||||
inline bool LocalPoolVariable<T>::operator ==(const T &other) const {
|
||||
return this->value == other;
|
||||
template <typename T>
|
||||
inline bool LocalPoolVariable<T>::operator==(const T& other) const {
|
||||
return this->value == other;
|
||||
}
|
||||
|
||||
|
||||
template<typename T>
|
||||
inline bool LocalPoolVariable<T>::operator !=(
|
||||
const LocalPoolVariable<T> &other) const {
|
||||
return not (*this == other);
|
||||
template <typename T>
|
||||
inline bool LocalPoolVariable<T>::operator!=(const LocalPoolVariable<T>& other) const {
|
||||
return not(*this == other);
|
||||
}
|
||||
|
||||
template<typename T>
|
||||
inline bool LocalPoolVariable<T>::operator !=(const T &other) const {
|
||||
return not (*this == other);
|
||||
template <typename T>
|
||||
inline bool LocalPoolVariable<T>::operator!=(const T& other) const {
|
||||
return not(*this == other);
|
||||
}
|
||||
|
||||
|
||||
template<typename T>
|
||||
inline bool LocalPoolVariable<T>::operator <(
|
||||
const LocalPoolVariable<T> &other) const {
|
||||
return this->value < other.value;
|
||||
template <typename T>
|
||||
inline bool LocalPoolVariable<T>::operator<(const LocalPoolVariable<T>& other) const {
|
||||
return this->value < other.value;
|
||||
}
|
||||
|
||||
template<typename T>
|
||||
inline bool LocalPoolVariable<T>::operator <(const T &other) const {
|
||||
return this->value < other;
|
||||
template <typename T>
|
||||
inline bool LocalPoolVariable<T>::operator<(const T& other) const {
|
||||
return this->value < other;
|
||||
}
|
||||
|
||||
|
||||
template<typename T>
|
||||
inline bool LocalPoolVariable<T>::operator >(
|
||||
const LocalPoolVariable<T> &other) const {
|
||||
return not (*this < other);
|
||||
template <typename T>
|
||||
inline bool LocalPoolVariable<T>::operator>(const LocalPoolVariable<T>& other) const {
|
||||
return not(*this < other);
|
||||
}
|
||||
|
||||
template<typename T>
|
||||
inline bool LocalPoolVariable<T>::operator >(const T &other) const {
|
||||
return not (*this < other);
|
||||
template <typename T>
|
||||
inline bool LocalPoolVariable<T>::operator>(const T& other) const {
|
||||
return not(*this < other);
|
||||
}
|
||||
|
||||
#endif /* FSFW_DATAPOOLLOCAL_LOCALPOOLVARIABLE_TPP_ */
|
||||
|
@ -5,174 +5,172 @@
|
||||
#error Include LocalPoolVector.h before LocalPoolVector.tpp!
|
||||
#endif
|
||||
|
||||
template<typename T, uint16_t vectorSize>
|
||||
inline LocalPoolVector<T, vectorSize>::LocalPoolVector(
|
||||
HasLocalDataPoolIF* hkOwner, lp_id_t poolId, DataSetIF* dataSet,
|
||||
pool_rwm_t setReadWriteMode):
|
||||
LocalPoolObjectBase(poolId, hkOwner, dataSet, setReadWriteMode) {}
|
||||
template <typename T, uint16_t vectorSize>
|
||||
inline LocalPoolVector<T, vectorSize>::LocalPoolVector(HasLocalDataPoolIF* hkOwner, lp_id_t poolId,
|
||||
DataSetIF* dataSet,
|
||||
pool_rwm_t setReadWriteMode)
|
||||
: LocalPoolObjectBase(poolId, hkOwner, dataSet, setReadWriteMode) {}
|
||||
|
||||
template<typename T, uint16_t vectorSize>
|
||||
inline LocalPoolVector<T, vectorSize>::LocalPoolVector(object_id_t poolOwner,
|
||||
lp_id_t poolId, DataSetIF *dataSet, pool_rwm_t setReadWriteMode):
|
||||
LocalPoolObjectBase(poolOwner, poolId, dataSet, setReadWriteMode) {}
|
||||
template <typename T, uint16_t vectorSize>
|
||||
inline LocalPoolVector<T, vectorSize>::LocalPoolVector(object_id_t poolOwner, lp_id_t poolId,
|
||||
DataSetIF* dataSet,
|
||||
pool_rwm_t setReadWriteMode)
|
||||
: LocalPoolObjectBase(poolOwner, poolId, dataSet, setReadWriteMode) {}
|
||||
|
||||
template<typename T, uint16_t vectorSize>
|
||||
inline LocalPoolVector<T, vectorSize>::LocalPoolVector(gp_id_t globalPoolId,
|
||||
DataSetIF *dataSet, pool_rwm_t setReadWriteMode):
|
||||
LocalPoolObjectBase(globalPoolId.objectId, globalPoolId.localPoolId,
|
||||
dataSet, setReadWriteMode) {}
|
||||
template <typename T, uint16_t vectorSize>
|
||||
inline LocalPoolVector<T, vectorSize>::LocalPoolVector(gp_id_t globalPoolId, DataSetIF* dataSet,
|
||||
pool_rwm_t setReadWriteMode)
|
||||
: LocalPoolObjectBase(globalPoolId.objectId, globalPoolId.localPoolId, dataSet,
|
||||
setReadWriteMode) {}
|
||||
|
||||
template<typename T, uint16_t vectorSize>
|
||||
inline ReturnValue_t LocalPoolVector<T, vectorSize>::read(
|
||||
MutexIF::TimeoutType timeoutType, uint32_t timeoutMs) {
|
||||
MutexGuard(LocalDpManagerAttorney::getMutexHandle(*hkManager), timeoutType, timeoutMs);
|
||||
return readWithoutLock();
|
||||
template <typename T, uint16_t vectorSize>
|
||||
inline ReturnValue_t LocalPoolVector<T, vectorSize>::read(MutexIF::TimeoutType timeoutType,
|
||||
uint32_t timeoutMs) {
|
||||
MutexGuard(LocalDpManagerAttorney::getMutexHandle(*hkManager), timeoutType, timeoutMs);
|
||||
return readWithoutLock();
|
||||
}
|
||||
template<typename T, uint16_t vectorSize>
|
||||
template <typename T, uint16_t vectorSize>
|
||||
inline ReturnValue_t LocalPoolVector<T, vectorSize>::readWithoutLock() {
|
||||
if(readWriteMode == pool_rwm_t::VAR_WRITE) {
|
||||
object_id_t targetObjectId = hkManager->getCreatorObjectId();
|
||||
reportReadCommitError("LocalPoolVector",
|
||||
PoolVariableIF::INVALID_READ_WRITE_MODE, true, targetObjectId,
|
||||
localPoolId);
|
||||
return PoolVariableIF::INVALID_READ_WRITE_MODE;
|
||||
}
|
||||
if (readWriteMode == pool_rwm_t::VAR_WRITE) {
|
||||
object_id_t targetObjectId = hkManager->getCreatorObjectId();
|
||||
reportReadCommitError("LocalPoolVector", PoolVariableIF::INVALID_READ_WRITE_MODE, true,
|
||||
targetObjectId, localPoolId);
|
||||
return PoolVariableIF::INVALID_READ_WRITE_MODE;
|
||||
}
|
||||
|
||||
PoolEntry<T>* poolEntry = nullptr;
|
||||
ReturnValue_t result = LocalDpManagerAttorney::fetchPoolEntry(*hkManager, localPoolId,
|
||||
&poolEntry);
|
||||
memset(this->value, 0, vectorSize * sizeof(T));
|
||||
PoolEntry<T>* poolEntry = nullptr;
|
||||
ReturnValue_t result =
|
||||
LocalDpManagerAttorney::fetchPoolEntry(*hkManager, localPoolId, &poolEntry);
|
||||
memset(this->value, 0, vectorSize * sizeof(T));
|
||||
|
||||
if(result != RETURN_OK) {
|
||||
object_id_t targetObjectId = hkManager->getCreatorObjectId();
|
||||
reportReadCommitError("LocalPoolVector", result, true, targetObjectId,
|
||||
localPoolId);
|
||||
return result;
|
||||
}
|
||||
std::memcpy(this->value, poolEntry->getDataPtr(), poolEntry->getByteSize());
|
||||
this->valid = poolEntry->getValid();
|
||||
return RETURN_OK;
|
||||
if (result != RETURN_OK) {
|
||||
object_id_t targetObjectId = hkManager->getCreatorObjectId();
|
||||
reportReadCommitError("LocalPoolVector", result, true, targetObjectId, localPoolId);
|
||||
return result;
|
||||
}
|
||||
std::memcpy(this->value, poolEntry->getDataPtr(), poolEntry->getByteSize());
|
||||
this->valid = poolEntry->getValid();
|
||||
return RETURN_OK;
|
||||
}
|
||||
|
||||
template<typename T, uint16_t vectorSize>
|
||||
template <typename T, uint16_t vectorSize>
|
||||
inline ReturnValue_t LocalPoolVector<T, vectorSize>::commit(bool valid,
|
||||
MutexIF::TimeoutType timeoutType, uint32_t timeoutMs) {
|
||||
this->setValid(valid);
|
||||
return commit(timeoutType, timeoutMs);
|
||||
MutexIF::TimeoutType timeoutType,
|
||||
uint32_t timeoutMs) {
|
||||
this->setValid(valid);
|
||||
return commit(timeoutType, timeoutMs);
|
||||
}
|
||||
|
||||
template<typename T, uint16_t vectorSize>
|
||||
inline ReturnValue_t LocalPoolVector<T, vectorSize>::commit(
|
||||
MutexIF::TimeoutType timeoutType, uint32_t timeoutMs) {
|
||||
MutexGuard(LocalDpManagerAttorney::getMutexHandle(*hkManager), timeoutType, timeoutMs);
|
||||
return commitWithoutLock();
|
||||
template <typename T, uint16_t vectorSize>
|
||||
inline ReturnValue_t LocalPoolVector<T, vectorSize>::commit(MutexIF::TimeoutType timeoutType,
|
||||
uint32_t timeoutMs) {
|
||||
MutexGuard(LocalDpManagerAttorney::getMutexHandle(*hkManager), timeoutType, timeoutMs);
|
||||
return commitWithoutLock();
|
||||
}
|
||||
|
||||
template<typename T, uint16_t vectorSize>
|
||||
template <typename T, uint16_t vectorSize>
|
||||
inline ReturnValue_t LocalPoolVector<T, vectorSize>::commitWithoutLock() {
|
||||
if(readWriteMode == pool_rwm_t::VAR_READ) {
|
||||
object_id_t targetObjectId = hkManager->getCreatorObjectId();
|
||||
reportReadCommitError("LocalPoolVector",
|
||||
PoolVariableIF::INVALID_READ_WRITE_MODE, false, targetObjectId,
|
||||
localPoolId);
|
||||
return PoolVariableIF::INVALID_READ_WRITE_MODE;
|
||||
}
|
||||
PoolEntry<T>* poolEntry = nullptr;
|
||||
ReturnValue_t result = LocalDpManagerAttorney::fetchPoolEntry(*hkManager, localPoolId,
|
||||
&poolEntry);
|
||||
if(result != RETURN_OK) {
|
||||
object_id_t targetObjectId = hkManager->getCreatorObjectId();
|
||||
reportReadCommitError("LocalPoolVector", result, false, targetObjectId,
|
||||
localPoolId);
|
||||
return result;
|
||||
}
|
||||
std::memcpy(poolEntry->getDataPtr(), this->value, poolEntry->getByteSize());
|
||||
poolEntry->setValid(this->valid);
|
||||
return RETURN_OK;
|
||||
}
|
||||
|
||||
template<typename T, uint16_t vectorSize>
|
||||
inline T& LocalPoolVector<T, vectorSize>::operator [](size_t i) {
|
||||
if(i < vectorSize) {
|
||||
return value[i];
|
||||
}
|
||||
// If this happens, I have to set some value. I consider this
|
||||
// a configuration error, but I wont exit here.
|
||||
#if FSFW_CPP_OSTREAM_ENABLED == 1
|
||||
sif::warning << "LocalPoolVector: Invalid index. Setting or returning"
|
||||
" last value!" << std::endl;
|
||||
#else
|
||||
sif::printWarning("LocalPoolVector: Invalid index. Setting or returning"
|
||||
" last value!\n");
|
||||
#endif
|
||||
return value[vectorSize - 1];
|
||||
}
|
||||
|
||||
template<typename T, uint16_t vectorSize>
|
||||
inline const T& LocalPoolVector<T, vectorSize>::operator [](size_t i) const {
|
||||
if(i < vectorSize) {
|
||||
return value[i];
|
||||
}
|
||||
// If this happens, I have to set some value. I consider this
|
||||
// a configuration error, but I wont exit here.
|
||||
#if FSFW_CPP_OSTREAM_ENABLED == 1
|
||||
sif::warning << "LocalPoolVector: Invalid index. Setting or returning"
|
||||
" last value!" << std::endl;
|
||||
#else
|
||||
sif::printWarning("LocalPoolVector: Invalid index. Setting or returning"
|
||||
" last value!\n");
|
||||
#endif
|
||||
return value[vectorSize - 1];
|
||||
}
|
||||
|
||||
template<typename T, uint16_t vectorSize>
|
||||
inline ReturnValue_t LocalPoolVector<T, vectorSize>::serialize(uint8_t** buffer,
|
||||
size_t* size, size_t maxSize,
|
||||
SerializeIF::Endianness streamEndianness) const {
|
||||
ReturnValue_t result = HasReturnvaluesIF::RETURN_FAILED;
|
||||
for (uint16_t i = 0; i < vectorSize; i++) {
|
||||
result = SerializeAdapter::serialize(&(value[i]), buffer, size,
|
||||
maxSize, streamEndianness);
|
||||
if (result != HasReturnvaluesIF::RETURN_OK) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
if (readWriteMode == pool_rwm_t::VAR_READ) {
|
||||
object_id_t targetObjectId = hkManager->getCreatorObjectId();
|
||||
reportReadCommitError("LocalPoolVector", PoolVariableIF::INVALID_READ_WRITE_MODE, false,
|
||||
targetObjectId, localPoolId);
|
||||
return PoolVariableIF::INVALID_READ_WRITE_MODE;
|
||||
}
|
||||
PoolEntry<T>* poolEntry = nullptr;
|
||||
ReturnValue_t result =
|
||||
LocalDpManagerAttorney::fetchPoolEntry(*hkManager, localPoolId, &poolEntry);
|
||||
if (result != RETURN_OK) {
|
||||
object_id_t targetObjectId = hkManager->getCreatorObjectId();
|
||||
reportReadCommitError("LocalPoolVector", result, false, targetObjectId, localPoolId);
|
||||
return result;
|
||||
}
|
||||
std::memcpy(poolEntry->getDataPtr(), this->value, poolEntry->getByteSize());
|
||||
poolEntry->setValid(this->valid);
|
||||
return RETURN_OK;
|
||||
}
|
||||
|
||||
template<typename T, uint16_t vectorSize>
|
||||
template <typename T, uint16_t vectorSize>
|
||||
inline T& LocalPoolVector<T, vectorSize>::operator[](size_t i) {
|
||||
if (i < vectorSize) {
|
||||
return value[i];
|
||||
}
|
||||
// If this happens, I have to set some value. I consider this
|
||||
// a configuration error, but I wont exit here.
|
||||
#if FSFW_CPP_OSTREAM_ENABLED == 1
|
||||
sif::warning << "LocalPoolVector: Invalid index. Setting or returning"
|
||||
" last value!"
|
||||
<< std::endl;
|
||||
#else
|
||||
sif::printWarning(
|
||||
"LocalPoolVector: Invalid index. Setting or returning"
|
||||
" last value!\n");
|
||||
#endif
|
||||
return value[vectorSize - 1];
|
||||
}
|
||||
|
||||
template <typename T, uint16_t vectorSize>
|
||||
inline const T& LocalPoolVector<T, vectorSize>::operator[](size_t i) const {
|
||||
if (i < vectorSize) {
|
||||
return value[i];
|
||||
}
|
||||
// If this happens, I have to set some value. I consider this
|
||||
// a configuration error, but I wont exit here.
|
||||
#if FSFW_CPP_OSTREAM_ENABLED == 1
|
||||
sif::warning << "LocalPoolVector: Invalid index. Setting or returning"
|
||||
" last value!"
|
||||
<< std::endl;
|
||||
#else
|
||||
sif::printWarning(
|
||||
"LocalPoolVector: Invalid index. Setting or returning"
|
||||
" last value!\n");
|
||||
#endif
|
||||
return value[vectorSize - 1];
|
||||
}
|
||||
|
||||
template <typename T, uint16_t vectorSize>
|
||||
inline ReturnValue_t LocalPoolVector<T, vectorSize>::serialize(
|
||||
uint8_t** buffer, size_t* size, size_t maxSize,
|
||||
SerializeIF::Endianness streamEndianness) const {
|
||||
ReturnValue_t result = HasReturnvaluesIF::RETURN_FAILED;
|
||||
for (uint16_t i = 0; i < vectorSize; i++) {
|
||||
result = SerializeAdapter::serialize(&(value[i]), buffer, size, maxSize, streamEndianness);
|
||||
if (result != HasReturnvaluesIF::RETURN_OK) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
template <typename T, uint16_t vectorSize>
|
||||
inline size_t LocalPoolVector<T, vectorSize>::getSerializedSize() const {
|
||||
return vectorSize * SerializeAdapter::getSerializedSize(value);
|
||||
return vectorSize * SerializeAdapter::getSerializedSize(value);
|
||||
}
|
||||
|
||||
template<typename T, uint16_t vectorSize>
|
||||
template <typename T, uint16_t vectorSize>
|
||||
inline ReturnValue_t LocalPoolVector<T, vectorSize>::deSerialize(
|
||||
const uint8_t** buffer, size_t* size,
|
||||
SerializeIF::Endianness streamEndianness) {
|
||||
ReturnValue_t result = HasReturnvaluesIF::RETURN_FAILED;
|
||||
for (uint16_t i = 0; i < vectorSize; i++) {
|
||||
result = SerializeAdapter::deSerialize(&(value[i]), buffer, size,
|
||||
streamEndianness);
|
||||
if (result != HasReturnvaluesIF::RETURN_OK) {
|
||||
break;
|
||||
}
|
||||
const uint8_t** buffer, size_t* size, SerializeIF::Endianness streamEndianness) {
|
||||
ReturnValue_t result = HasReturnvaluesIF::RETURN_FAILED;
|
||||
for (uint16_t i = 0; i < vectorSize; i++) {
|
||||
result = SerializeAdapter::deSerialize(&(value[i]), buffer, size, streamEndianness);
|
||||
if (result != HasReturnvaluesIF::RETURN_OK) {
|
||||
break;
|
||||
}
|
||||
return result;
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
#if FSFW_CPP_OSTREAM_ENABLED == 1
|
||||
template<typename T, uint16_t vectorSize>
|
||||
inline std::ostream& operator<< (std::ostream &out,
|
||||
const LocalPoolVector<T, vectorSize> &var) {
|
||||
out << "Vector: [";
|
||||
for(int i = 0;i < vectorSize; i++) {
|
||||
out << var.value[i];
|
||||
if(i < vectorSize - 1) {
|
||||
out << ", ";
|
||||
}
|
||||
template <typename T, uint16_t vectorSize>
|
||||
inline std::ostream& operator<<(std::ostream& out, const LocalPoolVector<T, vectorSize>& var) {
|
||||
out << "Vector: [";
|
||||
for (int i = 0; i < vectorSize; i++) {
|
||||
out << var.value[i];
|
||||
if (i < vectorSize - 1) {
|
||||
out << ", ";
|
||||
}
|
||||
out << "]";
|
||||
return out;
|
||||
}
|
||||
out << "]";
|
||||
return out;
|
||||
}
|
||||
#endif
|
||||
|
||||
|
@ -1062,7 +1062,8 @@ class DeviceHandlerBase : public DeviceHandlerIF,
|
||||
/**
|
||||
* Same as triggerEvent, but for forwarding if object is used as proxy.
|
||||
*/
|
||||
virtual void forwardEvent(Event event, uint32_t parameter1 = 0, uint32_t parameter2 = 0) const override;
|
||||
virtual void forwardEvent(Event event, uint32_t parameter1 = 0,
|
||||
uint32_t parameter2 = 0) const override;
|
||||
|
||||
/**
|
||||
* Checks if current mode is transitional mode.
|
||||
|
@ -50,7 +50,8 @@ class SystemObject : public SystemObjectIF {
|
||||
virtual ReturnValue_t initialize() override;
|
||||
virtual ReturnValue_t checkObjectConnections() override;
|
||||
|
||||
virtual void forwardEvent(Event event, uint32_t parameter1 = 0, uint32_t parameter2 = 0) const override;
|
||||
virtual void forwardEvent(Event event, uint32_t parameter1 = 0,
|
||||
uint32_t parameter2 = 0) const override;
|
||||
};
|
||||
|
||||
#endif /* FSFW_OBJECTMANAGER_SYSTEMOBJECT_H_ */
|
||||
|
@ -161,7 +161,7 @@ void TcpTmTcServer::handleServerOperation(socket_t& connSocket) {
|
||||
|
||||
while (true) {
|
||||
ssize_t retval = recv(connSocket, reinterpret_cast<char*>(receptionBuffer.data()),
|
||||
receptionBuffer.capacity(), tcpConfig.tcpFlags);
|
||||
receptionBuffer.capacity(), tcpConfig.tcpFlags);
|
||||
if (retval == 0) {
|
||||
size_t availableReadData = ringBuffer.getAvailableReadData();
|
||||
if (availableReadData > lastRingBufferSize) {
|
||||
@ -285,7 +285,7 @@ ReturnValue_t TcpTmTcServer::handleTmSending(socket_t connSocket, bool& tmSent)
|
||||
arrayprinter::print(storeAccessor.data(), storeAccessor.size());
|
||||
}
|
||||
ssize_t retval = send(connSocket, reinterpret_cast<const char*>(storeAccessor.data()),
|
||||
storeAccessor.size(), tcpConfig.tcpTmFlags);
|
||||
storeAccessor.size(), tcpConfig.tcpTmFlags);
|
||||
if (retval == static_cast<int>(storeAccessor.size())) {
|
||||
// Packet sent, clear FIFO entry
|
||||
tmtcBridge->tmFifo->pop();
|
||||
@ -340,7 +340,7 @@ ReturnValue_t TcpTmTcServer::handleTcRingBufferData(size_t availableReadData) {
|
||||
size_t foundSize = 0;
|
||||
size_t readLen = 0;
|
||||
while (readLen < readAmount) {
|
||||
if(spacePacketParser == nullptr) {
|
||||
if (spacePacketParser == nullptr) {
|
||||
return HasReturnvaluesIF::RETURN_FAILED;
|
||||
}
|
||||
result =
|
||||
|
@ -154,7 +154,7 @@ void UdpTcPollingTask::setTimeout(double timeoutSeconds) {
|
||||
#endif
|
||||
}
|
||||
#elif defined(PLATFORM_UNIX)
|
||||
timeval tval {};
|
||||
timeval tval{};
|
||||
tval = timevalOperations::toTimeval(timeoutSeconds);
|
||||
int result = setsockopt(serverSocket, SOL_SOCKET, SO_RCVTIMEO, &tval, sizeof(receptionTimeout));
|
||||
if (result == -1) {
|
||||
|
@ -20,7 +20,7 @@
|
||||
const std::string UdpTmTcBridge::DEFAULT_SERVER_PORT = tcpip::DEFAULT_SERVER_PORT;
|
||||
|
||||
UdpTmTcBridge::UdpTmTcBridge(object_id_t objectId, object_id_t tcDestination,
|
||||
const std::string& udpServerPort_, object_id_t tmStoreId,
|
||||
const std::string &udpServerPort_, object_id_t tmStoreId,
|
||||
object_id_t tcStoreId)
|
||||
: TmTcBridge(objectId, tcDestination, tmStoreId, tcStoreId) {
|
||||
if (udpServerPort_.empty()) {
|
||||
@ -118,7 +118,7 @@ ReturnValue_t UdpTmTcBridge::sendTm(const uint8_t *data, size_t dataLen) {
|
||||
#endif
|
||||
|
||||
ssize_t bytesSent = sendto(serverSocket, reinterpret_cast<const char *>(data), dataLen, flags,
|
||||
&clientAddress, clientAddressLen);
|
||||
&clientAddress, clientAddressLen);
|
||||
if (bytesSent == SOCKET_ERROR) {
|
||||
#if FSFW_CPP_OSTREAM_ENABLED == 1
|
||||
sif::warning << "TmTcUdpBridge::sendTm: Send operation failed." << std::endl;
|
||||
|
@ -29,8 +29,8 @@ class UdpTmTcBridge : public TmTcBridge, public TcpIpBase {
|
||||
/* The ports chosen here should not be used by any other process. */
|
||||
static const std::string DEFAULT_SERVER_PORT;
|
||||
|
||||
UdpTmTcBridge(object_id_t objectId, object_id_t tcDestination, const std::string& udpServerPort = "",
|
||||
object_id_t tmStoreId = objects::TM_STORE,
|
||||
UdpTmTcBridge(object_id_t objectId, object_id_t tcDestination,
|
||||
const std::string& udpServerPort = "", object_id_t tmStoreId = objects::TM_STORE,
|
||||
object_id_t tcStoreId = objects::TC_STORE);
|
||||
~UdpTmTcBridge() override;
|
||||
|
||||
|
@ -97,7 +97,8 @@ ReturnValue_t CService201HealthCommanding::handleReply(const CommandMessage *rep
|
||||
}
|
||||
|
||||
// Not used for now, health state already reported by event
|
||||
[[maybe_unused]] ReturnValue_t CService201HealthCommanding::prepareHealthSetReply(const CommandMessage *reply) {
|
||||
[[maybe_unused]] ReturnValue_t CService201HealthCommanding::prepareHealthSetReply(
|
||||
const CommandMessage *reply) {
|
||||
auto health = static_cast<uint8_t>(HealthMessage::getHealth(reply));
|
||||
auto oldHealth = static_cast<uint8_t>(HealthMessage::getOldHealth(reply));
|
||||
HealthSetReply healthSetReply(health, oldHealth);
|
||||
|
@ -39,7 +39,7 @@ class CService201HealthCommanding : public CommandingServiceBase {
|
||||
|
||||
private:
|
||||
static ReturnValue_t checkInterfaceAndAcquireMessageQueue(MessageQueueId_t *MessageQueueToSet,
|
||||
const object_id_t *objectId);
|
||||
const object_id_t *objectId);
|
||||
|
||||
[[maybe_unused]] ReturnValue_t prepareHealthSetReply(const CommandMessage *reply);
|
||||
|
||||
|
@ -67,7 +67,5 @@ TEST_CASE("Power Switcher", "[power-switcher]") {
|
||||
REQUIRE(not switcherUsingDummy.active());
|
||||
}
|
||||
|
||||
SECTION("More Dummy Tests") {
|
||||
|
||||
}
|
||||
SECTION("More Dummy Tests") {}
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user