Compare commits

...

2 Commits

Author SHA1 Message Date
06111ec3ed
missing call added 2022-05-14 16:57:59 +02:00
51087518b0
spi mutex handling refactoring 2022-05-14 11:32:51 +02:00
20 changed files with 563 additions and 476 deletions

View File

@ -0,0 +1,43 @@
#pragma once
#include "fsfw/ipc/MutexIF.h"
#include "fsfw/returnvalues/HasReturnvaluesIF.h"
#include "fsfw_hal/common/gpio/GpioIF.h"
class ManualCsLockWrapper : public HasReturnvaluesIF {
public:
ManualCsLockWrapper(MutexIF* lock, GpioIF* gpioIF, SpiCookie* cookie,
MutexIF::TimeoutType type = MutexIF::TimeoutType::BLOCKING,
uint32_t timeoutMs = 0)
: lock(lock), gpioIF(gpioIF), cookie(cookie), type(type), timeoutMs(timeoutMs) {
if (cookie == nullptr) {
// TODO: Error? Or maybe throw exception..
return;
}
cookie->setCsLockManual(true);
lockResult = lock->lockMutex(type, timeoutMs);
if (lockResult != RETURN_OK) {
return;
}
gpioResult = gpioIF->pullLow(cookie->getChipSelectPin());
}
~ManualCsLockWrapper() {
if (gpioResult == RETURN_OK) {
gpioIF->pullHigh(cookie->getChipSelectPin());
}
cookie->setCsLockManual(false);
if (lockResult == RETURN_OK) {
lock->unlockMutex();
}
}
ReturnValue_t lockResult;
ReturnValue_t gpioResult;
private:
MutexIF* lock;
GpioIF* gpioIF;
SpiCookie* cookie;
MutexIF::TimeoutType type;
uint32_t timeoutMs = 0;
};

View File

@ -196,16 +196,22 @@ ReturnValue_t SpiComIF::performRegularSendOperation(SpiCookie* spiCookie, const
bool fullDuplex = spiCookie->isFullDuplex();
gpioId_t gpioId = spiCookie->getChipSelectPin();
bool csLockManual = spiCookie->getCsLockManual();
/* Pull SPI CS low. For now, no support for active high given */
if (gpioId != gpio::NO_GPIO) {
result = spiMutex->lockMutex(timeoutType, timeoutMs);
MutexIF::TimeoutType csType;
dur_millis_t csTimeout = 0;
// Pull SPI CS low. For now, no support for active high given
if (gpioId != gpio::NO_GPIO and not csLockManual) {
spiCookie->getMutexParams(csType, csTimeout);
result = csMutex->lockMutex(csType, csTimeout);
if (result != RETURN_OK) {
#if FSFW_VERBOSE_LEVEL >= 1
#if FSFW_CPP_OSTREAM_ENABLED == 1
sif::error << "SpiComIF::sendMessage: Failed to lock mutex" << std::endl;
sif::error << "SpiComIF::sendMessage: Failed to lock mutex with code "
<< "0x" << std::hex << std::setfill('0') << std::setw(4) << result << std::dec
<< std::endl;
#else
sif::printError("SpiComIF::sendMessage: Failed to lock mutex\n");
sif::printError("SpiComIF::sendMessage: Failed to lock mutex with code %d\n", result);
#endif
#endif
return result;
@ -248,7 +254,7 @@ ReturnValue_t SpiComIF::performRegularSendOperation(SpiCookie* spiCookie, const
}
}
if (gpioId != gpio::NO_GPIO) {
if (gpioId != gpio::NO_GPIO and not csLockManual) {
gpioComIF->pullHigh(gpioId);
result = spiMutex->unlockMutex();
if (result != RETURN_OK) {
@ -292,12 +298,27 @@ ReturnValue_t SpiComIF::performHalfDuplexReception(SpiCookie* spiCookie) {
return result;
}
bool csLockManual = spiCookie->getCsLockManual();
gpioId_t gpioId = spiCookie->getChipSelectPin();
<<<<<<< Updated upstream
if (gpioId != gpio::NO_GPIO) {
result = spiMutex->lockMutex(timeoutType, timeoutMs);
=======
MutexIF::TimeoutType csType;
dur_millis_t csTimeout = 0;
if (gpioId != gpio::NO_GPIO and not csLockManual) {
spiCookie->getMutexParams(csType, csTimeout);
result = csMutex->lockMutex(csType, csTimeout);
>>>>>>> Stashed changes
if (result != RETURN_OK) {
#if FSFW_VERBOSE_LEVEL >= 1
#if FSFW_CPP_OSTREAM_ENABLED == 1
sif::error << "SpiComIF::getSendSuccess: Failed to lock mutex" << std::endl;
sif::error << "SpiComIF::sendMessage: Failed to lock mutex with code "
<< "0x" << std::hex << std::setfill('0') << std::setw(4) << result << std::dec
<< std::endl;
#else
sif::printError("SpiComIF::sendMessage: Failed to lock mutex with code %d\n", result);
#endif
#endif
return result;
}
@ -315,7 +336,7 @@ ReturnValue_t SpiComIF::performHalfDuplexReception(SpiCookie* spiCookie) {
result = HALF_DUPLEX_TRANSFER_FAILED;
}
if (gpioId != gpio::NO_GPIO) {
if (gpioId != gpio::NO_GPIO and not csLockManual) {
gpioComIF->pullHigh(gpioId);
result = spiMutex->unlockMutex();
if (result != RETURN_OK) {
@ -346,6 +367,7 @@ ReturnValue_t SpiComIF::readReceivedMessage(CookieIF* cookie, uint8_t** buffer,
return HasReturnvaluesIF::RETURN_OK;
}
<<<<<<< Updated upstream
MutexIF* SpiComIF::getMutex(MutexIF::TimeoutType* timeoutType, uint32_t* timeoutMs) {
if (timeoutType != nullptr) {
*timeoutType = this->timeoutType;
@ -355,6 +377,9 @@ MutexIF* SpiComIF::getMutex(MutexIF::TimeoutType* timeoutType, uint32_t* timeout
}
return spiMutex;
}
=======
MutexIF* SpiComIF::getCsMutex() { return csMutex; }
>>>>>>> Stashed changes
void SpiComIF::performSpiWiretapping(SpiCookie* spiCookie) {
if (spiCookie == nullptr) {

View File

@ -22,15 +22,15 @@ class SpiCookie;
*/
class SpiComIF : public DeviceCommunicationIF, public SystemObject {
public:
static constexpr uint8_t spiRetvalId = CLASS_ID::HAL_SPI;
static constexpr uint8_t CLASS_ID = CLASS_ID::HAL_SPI;
static constexpr ReturnValue_t OPENING_FILE_FAILED =
HasReturnvaluesIF::makeReturnCode(spiRetvalId, 0);
HasReturnvaluesIF::makeReturnCode(CLASS_ID, 0);
/* Full duplex (ioctl) transfer failure */
static constexpr ReturnValue_t FULL_DUPLEX_TRANSFER_FAILED =
HasReturnvaluesIF::makeReturnCode(spiRetvalId, 1);
HasReturnvaluesIF::makeReturnCode(CLASS_ID, 1);
/* Half duplex (read/write) transfer failure */
static constexpr ReturnValue_t HALF_DUPLEX_TRANSFER_FAILED =
HasReturnvaluesIF::makeReturnCode(spiRetvalId, 2);
HasReturnvaluesIF::makeReturnCode(CLASS_ID, 2);
SpiComIF(object_id_t objectId, GpioIF* gpioComIF);
@ -44,7 +44,7 @@ class SpiComIF : public DeviceCommunicationIF, public SystemObject {
* @brief This function returns the mutex which can be used to protect the spi bus when
* the chip select must be driven from outside of the com if.
*/
MutexIF* getMutex(MutexIF::TimeoutType* timeoutType = nullptr, uint32_t* timeoutMs = nullptr);
MutexIF* getCsMutex();
/**
* Perform a regular send operation using Linux iotcl. This is public so it can be used
@ -70,10 +70,13 @@ class SpiComIF : public DeviceCommunicationIF, public SystemObject {
};
GpioIF* gpioComIF = nullptr;
std::string dev = "";
/**
* Protects the chip select operations. Lock when GPIO is pulled low, unlock after it was
* pulled high
*/
MutexIF* csMutex = nullptr;
MutexIF* spiMutex = nullptr;
MutexIF::TimeoutType timeoutType = MutexIF::TimeoutType::WAITING;
uint32_t timeoutMs = 20;
spi_ioc_transfer clockUpdateTransfer = {};
using SpiDeviceMap = std::unordered_map<address_t, SpiInstance>;

View File

@ -107,3 +107,17 @@ void SpiCookie::getCallback(spi::send_callback_function_t* callback, void** args
*callback = this->sendCallback;
*args = this->callbackArgs;
}
void SpiCookie::setCsLockManual(bool enable) { manualCsLock = enable; }
bool SpiCookie::getCsLockManual() const { return manualCsLock; }
void SpiCookie::getMutexParams(MutexIF::TimeoutType& csTimeoutType, dur_millis_t& csTimeout) const {
csTimeoutType = this->csTimeoutType;
csTimeout = this->csTimeout;
}
void SpiCookie::setMutexParams(MutexIF::TimeoutType csTimeoutType, dur_millis_t csTimeout) {
this->csTimeoutType = csTimeoutType;
this->csTimeout = csTimeout;
}

View File

@ -2,6 +2,8 @@
#define LINUX_SPI_SPICOOKIE_H_
#include <fsfw/devicehandlers/CookieIF.h>
#include <fsfw/ipc/MutexIF.h>
#include <fsfw/timemanager/clockDefinitions.h>
#include <linux/spi/spidev.h>
#include "../../common/gpio/gpioDefinitions.h"
@ -20,6 +22,8 @@
*/
class SpiCookie : public CookieIF {
public:
static constexpr dur_millis_t DEFAULT_MUTEX_TIMEOUT = 20;
/**
* Each SPI device will have a corresponding cookie. The cookie is used by the communication
* interface and contains device specific information like the largest expected size to be
@ -139,9 +143,43 @@ class SpiCookie : public CookieIF {
*/
void activateCsDeselect(bool deselectCs, uint16_t delayUsecs);
void getMutexParams(MutexIF::TimeoutType& csTimeoutType, dur_millis_t& csTimeout) const;
void setMutexParams(MutexIF::TimeoutType csTimeoutType, dur_millis_t csTimeout);
void setCsLockManual(bool enable);
bool getCsLockManual() const;
spi_ioc_transfer* getTransferStructHandle();
private:
address_t spiAddress;
gpioId_t chipSelectPin;
std::string spiDevice;
spi::SpiComIfModes comIfMode;
// Required for regular mode
const size_t maxSize;
spi::SpiModes spiMode;
/**
* If this is set to true, the SPI ComIF will not perform any mutex locking for the
* CS mechanism. The user is responsible to locking and unlocking the mutex for the
* whole duration of the transfers.
*/
bool manualCsLock = false;
uint32_t spiSpeed;
bool halfDuplex = false;
MutexIF::TimeoutType csTimeoutType = MutexIF::TimeoutType::WAITING;
dur_millis_t csTimeout = DEFAULT_MUTEX_TIMEOUT;
// Required for callback mode
spi::send_callback_function_t sendCallback = nullptr;
void* callbackArgs = nullptr;
struct spi_ioc_transfer spiTransferStruct = {};
UncommonParameters uncommonParameters;
/**
* Internal constructor which initializes every field
* @param spiAddress
@ -154,27 +192,8 @@ class SpiCookie : public CookieIF {
* @param args
*/
SpiCookie(spi::SpiComIfModes comIfMode, address_t spiAddress, gpioId_t chipSelect,
std::string spiDev, const size_t maxSize, spi::SpiModes spiMode, uint32_t spiSpeed,
const size_t maxSize, spi::SpiModes spiMode, uint32_t spiSpeed,
spi::send_callback_function_t callback, void* args);
address_t spiAddress;
gpioId_t chipSelectPin;
std::string spiDevice;
spi::SpiComIfModes comIfMode;
// Required for regular mode
const size_t maxSize;
spi::SpiModes spiMode;
uint32_t spiSpeed;
bool halfDuplex = false;
// Required for callback mode
spi::send_callback_function_t sendCallback = nullptr;
void* callbackArgs = nullptr;
struct spi_ioc_transfer spiTransferStruct = {};
UncommonParameters uncommonParameters;
};
#endif /* LINUX_SPI_SPICOOKIE_H_ */

View File

@ -5,11 +5,11 @@
#error Include FIFOBase.h before FIFOBase.tpp!
#endif
template<typename T>
inline FIFOBase<T>::FIFOBase(T* values, const size_t maxCapacity):
maxCapacity(maxCapacity), values(values){};
template <typename T>
inline FIFOBase<T>::FIFOBase(T* values, const size_t maxCapacity)
: maxCapacity(maxCapacity), values(values){};
template<typename T>
template <typename T>
inline ReturnValue_t FIFOBase<T>::insert(T value) {
if (full()) {
return FULL;
@ -21,12 +21,12 @@ inline ReturnValue_t FIFOBase<T>::insert(T value) {
}
};
template<typename T>
template <typename T>
inline ReturnValue_t FIFOBase<T>::retrieve(T* value) {
if (empty()) {
return EMPTY;
} else {
if (value == nullptr){
if (value == nullptr) {
return HasReturnvaluesIF::RETURN_FAILED;
}
*value = values[readIndex];
@ -36,12 +36,12 @@ inline ReturnValue_t FIFOBase<T>::retrieve(T* value) {
}
};
template<typename T>
template <typename T>
inline ReturnValue_t FIFOBase<T>::peek(T* value) {
if(empty()) {
if (empty()) {
return EMPTY;
} else {
if (value == nullptr){
if (value == nullptr) {
return HasReturnvaluesIF::RETURN_FAILED;
}
*value = values[readIndex];
@ -49,28 +49,28 @@ inline ReturnValue_t FIFOBase<T>::peek(T* value) {
}
};
template<typename T>
template <typename T>
inline ReturnValue_t FIFOBase<T>::pop() {
T value;
return this->retrieve(&value);
};
template<typename T>
template <typename T>
inline bool FIFOBase<T>::empty() {
return (currentSize == 0);
};
template<typename T>
template <typename T>
inline bool FIFOBase<T>::full() {
return (currentSize == maxCapacity);
}
template<typename T>
template <typename T>
inline size_t FIFOBase<T>::size() {
return currentSize;
}
template<typename T>
template <typename T>
inline size_t FIFOBase<T>::next(size_t current) {
++current;
if (current == maxCapacity) {
@ -79,14 +79,13 @@ inline size_t FIFOBase<T>::next(size_t current) {
return current;
}
template<typename T>
template <typename T>
inline size_t FIFOBase<T>::getMaxCapacity() const {
return maxCapacity;
}
template<typename T>
inline void FIFOBase<T>::setContainer(T *data) {
template <typename T>
inline void FIFOBase<T>::setContainer(T* data) {
this->values = data;
}

View File

@ -12,6 +12,7 @@ template <typename T, size_t MAX_SIZE, typename count_t = uint8_t>
class FixedArrayList : public ArrayList<T, count_t> {
static_assert(MAX_SIZE <= std::numeric_limits<count_t>::max(),
"count_t is not large enough to hold MAX_SIZE");
private:
T data[MAX_SIZE];

View File

@ -1,15 +1,15 @@
#ifndef FRAMEWORK_CONTAINER_FIXEDORDEREDMULTIMAP_TPP_
#define FRAMEWORK_CONTAINER_FIXEDORDEREDMULTIMAP_TPP_
template<typename key_t, typename T, typename KEY_COMPARE>
inline ReturnValue_t FixedOrderedMultimap<key_t, T, KEY_COMPARE>::insert(key_t key, T value, Iterator *storedValue) {
template <typename key_t, typename T, typename KEY_COMPARE>
inline ReturnValue_t FixedOrderedMultimap<key_t, T, KEY_COMPARE>::insert(key_t key, T value,
Iterator *storedValue) {
if (_size == theMap.maxSize()) {
return MAP_FULL;
}
size_t position = findNicePlace(key);
memmove(static_cast<void*>(&theMap[position + 1]),static_cast<void*>(&theMap[position]),
(_size - position) * sizeof(std::pair<key_t,T>));
memmove(static_cast<void *>(&theMap[position + 1]), static_cast<void *>(&theMap[position]),
(_size - position) * sizeof(std::pair<key_t, T>));
theMap[position].first = key;
theMap[position].second = value;
++_size;
@ -18,12 +18,12 @@ inline ReturnValue_t FixedOrderedMultimap<key_t, T, KEY_COMPARE>::insert(key_t k
}
return HasReturnvaluesIF::RETURN_OK;
}
template<typename key_t, typename T, typename KEY_COMPARE>
template <typename key_t, typename T, typename KEY_COMPARE>
inline ReturnValue_t FixedOrderedMultimap<key_t, T, KEY_COMPARE>::insert(std::pair<key_t, T> pair) {
return insert(pair.first, pair.second);
}
template<typename key_t, typename T, typename KEY_COMPARE>
template <typename key_t, typename T, typename KEY_COMPARE>
inline ReturnValue_t FixedOrderedMultimap<key_t, T, KEY_COMPARE>::exists(key_t key) const {
ReturnValue_t result = KEY_DOES_NOT_EXIST;
if (findFirstIndex(key) < _size) {
@ -32,7 +32,7 @@ inline ReturnValue_t FixedOrderedMultimap<key_t, T, KEY_COMPARE>::exists(key_t k
return result;
}
template<typename key_t, typename T, typename KEY_COMPARE>
template <typename key_t, typename T, typename KEY_COMPARE>
inline ReturnValue_t FixedOrderedMultimap<key_t, T, KEY_COMPARE>::erase(Iterator *iter) {
size_t i;
if ((i = findFirstIndex((*iter).value->first)) >= _size) {
@ -47,7 +47,7 @@ inline ReturnValue_t FixedOrderedMultimap<key_t, T, KEY_COMPARE>::erase(Iterator
return HasReturnvaluesIF::RETURN_OK;
}
template<typename key_t, typename T, typename KEY_COMPARE>
template <typename key_t, typename T, typename KEY_COMPARE>
inline ReturnValue_t FixedOrderedMultimap<key_t, T, KEY_COMPARE>::erase(key_t key) {
size_t i;
if ((i = findFirstIndex(key)) >= _size) {
@ -60,7 +60,7 @@ inline ReturnValue_t FixedOrderedMultimap<key_t, T, KEY_COMPARE>::erase(key_t ke
return HasReturnvaluesIF::RETURN_OK;
}
template<typename key_t, typename T, typename KEY_COMPARE>
template <typename key_t, typename T, typename KEY_COMPARE>
inline ReturnValue_t FixedOrderedMultimap<key_t, T, KEY_COMPARE>::find(key_t key, T **value) const {
ReturnValue_t result = exists(key);
if (result != HasReturnvaluesIF::RETURN_OK) {
@ -70,8 +70,9 @@ inline ReturnValue_t FixedOrderedMultimap<key_t, T, KEY_COMPARE>::find(key_t key
return HasReturnvaluesIF::RETURN_OK;
}
template<typename key_t, typename T, typename KEY_COMPARE>
inline size_t FixedOrderedMultimap<key_t, T, KEY_COMPARE>::findFirstIndex(key_t key, size_t startAt) const {
template <typename key_t, typename T, typename KEY_COMPARE>
inline size_t FixedOrderedMultimap<key_t, T, KEY_COMPARE>::findFirstIndex(key_t key,
size_t startAt) const {
if (startAt >= _size) {
return startAt + 1;
}
@ -84,7 +85,7 @@ inline size_t FixedOrderedMultimap<key_t, T, KEY_COMPARE>::findFirstIndex(key_t
return i;
}
template<typename key_t, typename T, typename KEY_COMPARE>
template <typename key_t, typename T, typename KEY_COMPARE>
inline size_t FixedOrderedMultimap<key_t, T, KEY_COMPARE>::findNicePlace(key_t key) const {
size_t i = 0;
for (i = 0; i < _size; ++i) {
@ -95,15 +96,14 @@ inline size_t FixedOrderedMultimap<key_t, T, KEY_COMPARE>::findNicePlace(key_t k
return i;
}
template<typename key_t, typename T, typename KEY_COMPARE>
template <typename key_t, typename T, typename KEY_COMPARE>
inline void FixedOrderedMultimap<key_t, T, KEY_COMPARE>::removeFromPosition(size_t position) {
if (_size <= position) {
return;
}
memmove(static_cast<void*>(&theMap[position]), static_cast<void*>(&theMap[position + 1]),
(_size - position - 1) * sizeof(std::pair<key_t,T>));
memmove(static_cast<void *>(&theMap[position]), static_cast<void *>(&theMap[position + 1]),
(_size - position - 1) * sizeof(std::pair<key_t, T>));
--_size;
}
#endif /* FRAMEWORK_CONTAINER_FIXEDORDEREDMULTIMAP_TPP_ */

View File

@ -5,33 +5,31 @@
#error Include LocalPoolVariable.h before LocalPoolVariable.tpp!
#endif
template<typename T>
inline LocalPoolVariable<T>::LocalPoolVariable(HasLocalDataPoolIF* hkOwner,
lp_id_t poolId, DataSetIF* dataSet, pool_rwm_t setReadWriteMode):
LocalPoolObjectBase(poolId, hkOwner, dataSet, setReadWriteMode) {}
template <typename T>
inline LocalPoolVariable<T>::LocalPoolVariable(HasLocalDataPoolIF* hkOwner, lp_id_t poolId,
DataSetIF* dataSet, pool_rwm_t setReadWriteMode)
: LocalPoolObjectBase(poolId, hkOwner, dataSet, setReadWriteMode) {}
template<typename T>
inline LocalPoolVariable<T>::LocalPoolVariable(object_id_t poolOwner,
lp_id_t poolId, DataSetIF *dataSet, pool_rwm_t setReadWriteMode):
LocalPoolObjectBase(poolOwner, poolId, dataSet, setReadWriteMode) {}
template <typename T>
inline LocalPoolVariable<T>::LocalPoolVariable(object_id_t poolOwner, lp_id_t poolId,
DataSetIF* dataSet, pool_rwm_t setReadWriteMode)
: LocalPoolObjectBase(poolOwner, poolId, dataSet, setReadWriteMode) {}
template <typename T>
inline LocalPoolVariable<T>::LocalPoolVariable(gp_id_t globalPoolId, DataSetIF* dataSet,
pool_rwm_t setReadWriteMode)
: LocalPoolObjectBase(globalPoolId.objectId, globalPoolId.localPoolId, dataSet,
setReadWriteMode) {}
template<typename T>
inline LocalPoolVariable<T>::LocalPoolVariable(gp_id_t globalPoolId,
DataSetIF *dataSet, pool_rwm_t setReadWriteMode):
LocalPoolObjectBase(globalPoolId.objectId, globalPoolId.localPoolId,
dataSet, setReadWriteMode){}
template<typename T>
inline ReturnValue_t LocalPoolVariable<T>::read(
MutexIF::TimeoutType timeoutType, uint32_t timeoutMs) {
if(hkManager == nullptr) {
template <typename T>
inline ReturnValue_t LocalPoolVariable<T>::read(MutexIF::TimeoutType timeoutType,
uint32_t timeoutMs) {
if (hkManager == nullptr) {
return readWithoutLock();
}
MutexIF* mutex = LocalDpManagerAttorney::getMutexHandle(*hkManager);
ReturnValue_t result = mutex->lockMutex(timeoutType, timeoutMs);
if(result != HasReturnvaluesIF::RETURN_OK) {
if (result != HasReturnvaluesIF::RETURN_OK) {
return result;
}
result = readWithoutLock();
@ -39,23 +37,21 @@ inline ReturnValue_t LocalPoolVariable<T>::read(
return result;
}
template<typename T>
template <typename T>
inline ReturnValue_t LocalPoolVariable<T>::readWithoutLock() {
if(readWriteMode == pool_rwm_t::VAR_WRITE) {
if (readWriteMode == pool_rwm_t::VAR_WRITE) {
object_id_t targetObjectId = hkManager->getCreatorObjectId();
reportReadCommitError("LocalPoolVector",
PoolVariableIF::INVALID_READ_WRITE_MODE, true, targetObjectId,
localPoolId);
reportReadCommitError("LocalPoolVector", PoolVariableIF::INVALID_READ_WRITE_MODE, true,
targetObjectId, localPoolId);
return PoolVariableIF::INVALID_READ_WRITE_MODE;
}
PoolEntry<T>* poolEntry = nullptr;
ReturnValue_t result = LocalDpManagerAttorney::fetchPoolEntry(*hkManager, localPoolId,
&poolEntry);
if(result != RETURN_OK) {
ReturnValue_t result =
LocalDpManagerAttorney::fetchPoolEntry(*hkManager, localPoolId, &poolEntry);
if (result != RETURN_OK) {
object_id_t ownerObjectId = hkManager->getCreatorObjectId();
reportReadCommitError("LocalPoolVariable", result,
false, ownerObjectId, localPoolId);
reportReadCommitError("LocalPoolVariable", result, false, ownerObjectId, localPoolId);
return result;
}
@ -64,22 +60,22 @@ inline ReturnValue_t LocalPoolVariable<T>::readWithoutLock() {
return RETURN_OK;
}
template<typename T>
inline ReturnValue_t LocalPoolVariable<T>::commit(bool setValid,
MutexIF::TimeoutType timeoutType, uint32_t timeoutMs) {
template <typename T>
inline ReturnValue_t LocalPoolVariable<T>::commit(bool setValid, MutexIF::TimeoutType timeoutType,
uint32_t timeoutMs) {
this->setValid(setValid);
return commit(timeoutType, timeoutMs);
}
template<typename T>
inline ReturnValue_t LocalPoolVariable<T>::commit(
MutexIF::TimeoutType timeoutType, uint32_t timeoutMs) {
if(hkManager == nullptr) {
template <typename T>
inline ReturnValue_t LocalPoolVariable<T>::commit(MutexIF::TimeoutType timeoutType,
uint32_t timeoutMs) {
if (hkManager == nullptr) {
return commitWithoutLock();
}
MutexIF* mutex = LocalDpManagerAttorney::getMutexHandle(*hkManager);
ReturnValue_t result = mutex->lockMutex(timeoutType, timeoutMs);
if(result != HasReturnvaluesIF::RETURN_OK) {
if (result != HasReturnvaluesIF::RETURN_OK) {
return result;
}
result = commitWithoutLock();
@ -87,23 +83,21 @@ inline ReturnValue_t LocalPoolVariable<T>::commit(
return result;
}
template<typename T>
template <typename T>
inline ReturnValue_t LocalPoolVariable<T>::commitWithoutLock() {
if(readWriteMode == pool_rwm_t::VAR_READ) {
if (readWriteMode == pool_rwm_t::VAR_READ) {
object_id_t targetObjectId = hkManager->getCreatorObjectId();
reportReadCommitError("LocalPoolVector",
PoolVariableIF::INVALID_READ_WRITE_MODE, false, targetObjectId,
localPoolId);
reportReadCommitError("LocalPoolVector", PoolVariableIF::INVALID_READ_WRITE_MODE, false,
targetObjectId, localPoolId);
return PoolVariableIF::INVALID_READ_WRITE_MODE;
}
PoolEntry<T>* poolEntry = nullptr;
ReturnValue_t result = LocalDpManagerAttorney::fetchPoolEntry(*hkManager, localPoolId,
&poolEntry);
if(result != RETURN_OK) {
ReturnValue_t result =
LocalDpManagerAttorney::fetchPoolEntry(*hkManager, localPoolId, &poolEntry);
if (result != RETURN_OK) {
object_id_t ownerObjectId = hkManager->getCreatorObjectId();
reportReadCommitError("LocalPoolVariable", result,
false, ownerObjectId, localPoolId);
reportReadCommitError("LocalPoolVariable", result, false, ownerObjectId, localPoolId);
return result;
}
@ -112,98 +106,88 @@ inline ReturnValue_t LocalPoolVariable<T>::commitWithoutLock() {
return RETURN_OK;
}
template<typename T>
inline ReturnValue_t LocalPoolVariable<T>::serialize(uint8_t** buffer,
size_t* size, const size_t max_size,
template <typename T>
inline ReturnValue_t LocalPoolVariable<T>::serialize(
uint8_t** buffer, size_t* size, const size_t max_size,
SerializeIF::Endianness streamEndianness) const {
return SerializeAdapter::serialize(&value,
buffer, size ,max_size, streamEndianness);
return SerializeAdapter::serialize(&value, buffer, size, max_size, streamEndianness);
}
template<typename T>
template <typename T>
inline size_t LocalPoolVariable<T>::getSerializedSize() const {
return SerializeAdapter::getSerializedSize(&value);
}
template<typename T>
inline ReturnValue_t LocalPoolVariable<T>::deSerialize(const uint8_t** buffer,
size_t* size, SerializeIF::Endianness streamEndianness) {
template <typename T>
inline ReturnValue_t LocalPoolVariable<T>::deSerialize(const uint8_t** buffer, size_t* size,
SerializeIF::Endianness streamEndianness) {
return SerializeAdapter::deSerialize(&value, buffer, size, streamEndianness);
}
#if FSFW_CPP_OSTREAM_ENABLED == 1
template<typename T>
inline std::ostream& operator<< (std::ostream &out,
const LocalPoolVariable<T> &var) {
template <typename T>
inline std::ostream& operator<<(std::ostream& out, const LocalPoolVariable<T>& var) {
out << var.value;
return out;
}
#endif
template<typename T>
template <typename T>
inline LocalPoolVariable<T>::operator T() const {
return value;
}
template<typename T>
inline LocalPoolVariable<T> & LocalPoolVariable<T>::operator=(
const T& newValue) {
template <typename T>
inline LocalPoolVariable<T>& LocalPoolVariable<T>::operator=(const T& newValue) {
value = newValue;
return *this;
}
template<typename T>
inline LocalPoolVariable<T>& LocalPoolVariable<T>::operator =(
template <typename T>
inline LocalPoolVariable<T>& LocalPoolVariable<T>::operator=(
const LocalPoolVariable<T>& newPoolVariable) {
value = newPoolVariable.value;
return *this;
}
template<typename T>
inline bool LocalPoolVariable<T>::operator ==(
const LocalPoolVariable<T> &other) const {
template <typename T>
inline bool LocalPoolVariable<T>::operator==(const LocalPoolVariable<T>& other) const {
return this->value == other.value;
}
template<typename T>
inline bool LocalPoolVariable<T>::operator ==(const T &other) const {
template <typename T>
inline bool LocalPoolVariable<T>::operator==(const T& other) const {
return this->value == other;
}
template<typename T>
inline bool LocalPoolVariable<T>::operator !=(
const LocalPoolVariable<T> &other) const {
return not (*this == other);
template <typename T>
inline bool LocalPoolVariable<T>::operator!=(const LocalPoolVariable<T>& other) const {
return not(*this == other);
}
template<typename T>
inline bool LocalPoolVariable<T>::operator !=(const T &other) const {
return not (*this == other);
template <typename T>
inline bool LocalPoolVariable<T>::operator!=(const T& other) const {
return not(*this == other);
}
template<typename T>
inline bool LocalPoolVariable<T>::operator <(
const LocalPoolVariable<T> &other) const {
template <typename T>
inline bool LocalPoolVariable<T>::operator<(const LocalPoolVariable<T>& other) const {
return this->value < other.value;
}
template<typename T>
inline bool LocalPoolVariable<T>::operator <(const T &other) const {
template <typename T>
inline bool LocalPoolVariable<T>::operator<(const T& other) const {
return this->value < other;
}
template<typename T>
inline bool LocalPoolVariable<T>::operator >(
const LocalPoolVariable<T> &other) const {
return not (*this < other);
template <typename T>
inline bool LocalPoolVariable<T>::operator>(const LocalPoolVariable<T>& other) const {
return not(*this < other);
}
template<typename T>
inline bool LocalPoolVariable<T>::operator >(const T &other) const {
return not (*this < other);
template <typename T>
inline bool LocalPoolVariable<T>::operator>(const T& other) const {
return not(*this < other);
}
#endif /* FSFW_DATAPOOLLOCAL_LOCALPOOLVARIABLE_TPP_ */

View File

@ -5,48 +5,47 @@
#error Include LocalPoolVector.h before LocalPoolVector.tpp!
#endif
template<typename T, uint16_t vectorSize>
inline LocalPoolVector<T, vectorSize>::LocalPoolVector(
HasLocalDataPoolIF* hkOwner, lp_id_t poolId, DataSetIF* dataSet,
pool_rwm_t setReadWriteMode):
LocalPoolObjectBase(poolId, hkOwner, dataSet, setReadWriteMode) {}
template <typename T, uint16_t vectorSize>
inline LocalPoolVector<T, vectorSize>::LocalPoolVector(HasLocalDataPoolIF* hkOwner, lp_id_t poolId,
DataSetIF* dataSet,
pool_rwm_t setReadWriteMode)
: LocalPoolObjectBase(poolId, hkOwner, dataSet, setReadWriteMode) {}
template<typename T, uint16_t vectorSize>
inline LocalPoolVector<T, vectorSize>::LocalPoolVector(object_id_t poolOwner,
lp_id_t poolId, DataSetIF *dataSet, pool_rwm_t setReadWriteMode):
LocalPoolObjectBase(poolOwner, poolId, dataSet, setReadWriteMode) {}
template <typename T, uint16_t vectorSize>
inline LocalPoolVector<T, vectorSize>::LocalPoolVector(object_id_t poolOwner, lp_id_t poolId,
DataSetIF* dataSet,
pool_rwm_t setReadWriteMode)
: LocalPoolObjectBase(poolOwner, poolId, dataSet, setReadWriteMode) {}
template<typename T, uint16_t vectorSize>
inline LocalPoolVector<T, vectorSize>::LocalPoolVector(gp_id_t globalPoolId,
DataSetIF *dataSet, pool_rwm_t setReadWriteMode):
LocalPoolObjectBase(globalPoolId.objectId, globalPoolId.localPoolId,
dataSet, setReadWriteMode) {}
template <typename T, uint16_t vectorSize>
inline LocalPoolVector<T, vectorSize>::LocalPoolVector(gp_id_t globalPoolId, DataSetIF* dataSet,
pool_rwm_t setReadWriteMode)
: LocalPoolObjectBase(globalPoolId.objectId, globalPoolId.localPoolId, dataSet,
setReadWriteMode) {}
template<typename T, uint16_t vectorSize>
inline ReturnValue_t LocalPoolVector<T, vectorSize>::read(
MutexIF::TimeoutType timeoutType, uint32_t timeoutMs) {
template <typename T, uint16_t vectorSize>
inline ReturnValue_t LocalPoolVector<T, vectorSize>::read(MutexIF::TimeoutType timeoutType,
uint32_t timeoutMs) {
MutexGuard(LocalDpManagerAttorney::getMutexHandle(*hkManager), timeoutType, timeoutMs);
return readWithoutLock();
}
template<typename T, uint16_t vectorSize>
template <typename T, uint16_t vectorSize>
inline ReturnValue_t LocalPoolVector<T, vectorSize>::readWithoutLock() {
if(readWriteMode == pool_rwm_t::VAR_WRITE) {
if (readWriteMode == pool_rwm_t::VAR_WRITE) {
object_id_t targetObjectId = hkManager->getCreatorObjectId();
reportReadCommitError("LocalPoolVector",
PoolVariableIF::INVALID_READ_WRITE_MODE, true, targetObjectId,
localPoolId);
reportReadCommitError("LocalPoolVector", PoolVariableIF::INVALID_READ_WRITE_MODE, true,
targetObjectId, localPoolId);
return PoolVariableIF::INVALID_READ_WRITE_MODE;
}
PoolEntry<T>* poolEntry = nullptr;
ReturnValue_t result = LocalDpManagerAttorney::fetchPoolEntry(*hkManager, localPoolId,
&poolEntry);
ReturnValue_t result =
LocalDpManagerAttorney::fetchPoolEntry(*hkManager, localPoolId, &poolEntry);
memset(this->value, 0, vectorSize * sizeof(T));
if(result != RETURN_OK) {
if (result != RETURN_OK) {
object_id_t targetObjectId = hkManager->getCreatorObjectId();
reportReadCommitError("LocalPoolVector", result, true, targetObjectId,
localPoolId);
reportReadCommitError("LocalPoolVector", result, true, targetObjectId, localPoolId);
return result;
}
std::memcpy(this->value, poolEntry->getDataPtr(), poolEntry->getByteSize());
@ -54,36 +53,35 @@ inline ReturnValue_t LocalPoolVector<T, vectorSize>::readWithoutLock() {
return RETURN_OK;
}
template<typename T, uint16_t vectorSize>
template <typename T, uint16_t vectorSize>
inline ReturnValue_t LocalPoolVector<T, vectorSize>::commit(bool valid,
MutexIF::TimeoutType timeoutType, uint32_t timeoutMs) {
MutexIF::TimeoutType timeoutType,
uint32_t timeoutMs) {
this->setValid(valid);
return commit(timeoutType, timeoutMs);
}
template<typename T, uint16_t vectorSize>
inline ReturnValue_t LocalPoolVector<T, vectorSize>::commit(
MutexIF::TimeoutType timeoutType, uint32_t timeoutMs) {
template <typename T, uint16_t vectorSize>
inline ReturnValue_t LocalPoolVector<T, vectorSize>::commit(MutexIF::TimeoutType timeoutType,
uint32_t timeoutMs) {
MutexGuard(LocalDpManagerAttorney::getMutexHandle(*hkManager), timeoutType, timeoutMs);
return commitWithoutLock();
}
template<typename T, uint16_t vectorSize>
template <typename T, uint16_t vectorSize>
inline ReturnValue_t LocalPoolVector<T, vectorSize>::commitWithoutLock() {
if(readWriteMode == pool_rwm_t::VAR_READ) {
if (readWriteMode == pool_rwm_t::VAR_READ) {
object_id_t targetObjectId = hkManager->getCreatorObjectId();
reportReadCommitError("LocalPoolVector",
PoolVariableIF::INVALID_READ_WRITE_MODE, false, targetObjectId,
localPoolId);
reportReadCommitError("LocalPoolVector", PoolVariableIF::INVALID_READ_WRITE_MODE, false,
targetObjectId, localPoolId);
return PoolVariableIF::INVALID_READ_WRITE_MODE;
}
PoolEntry<T>* poolEntry = nullptr;
ReturnValue_t result = LocalDpManagerAttorney::fetchPoolEntry(*hkManager, localPoolId,
&poolEntry);
if(result != RETURN_OK) {
ReturnValue_t result =
LocalDpManagerAttorney::fetchPoolEntry(*hkManager, localPoolId, &poolEntry);
if (result != RETURN_OK) {
object_id_t targetObjectId = hkManager->getCreatorObjectId();
reportReadCommitError("LocalPoolVector", result, false, targetObjectId,
localPoolId);
reportReadCommitError("LocalPoolVector", result, false, targetObjectId, localPoolId);
return result;
}
std::memcpy(poolEntry->getDataPtr(), this->value, poolEntry->getByteSize());
@ -91,48 +89,51 @@ inline ReturnValue_t LocalPoolVector<T, vectorSize>::commitWithoutLock() {
return RETURN_OK;
}
template<typename T, uint16_t vectorSize>
inline T& LocalPoolVector<T, vectorSize>::operator [](size_t i) {
if(i < vectorSize) {
template <typename T, uint16_t vectorSize>
inline T& LocalPoolVector<T, vectorSize>::operator[](size_t i) {
if (i < vectorSize) {
return value[i];
}
// If this happens, I have to set some value. I consider this
// a configuration error, but I wont exit here.
#if FSFW_CPP_OSTREAM_ENABLED == 1
sif::warning << "LocalPoolVector: Invalid index. Setting or returning"
" last value!" << std::endl;
" last value!"
<< std::endl;
#else
sif::printWarning("LocalPoolVector: Invalid index. Setting or returning"
sif::printWarning(
"LocalPoolVector: Invalid index. Setting or returning"
" last value!\n");
#endif
return value[vectorSize - 1];
}
template<typename T, uint16_t vectorSize>
inline const T& LocalPoolVector<T, vectorSize>::operator [](size_t i) const {
if(i < vectorSize) {
template <typename T, uint16_t vectorSize>
inline const T& LocalPoolVector<T, vectorSize>::operator[](size_t i) const {
if (i < vectorSize) {
return value[i];
}
// If this happens, I have to set some value. I consider this
// a configuration error, but I wont exit here.
#if FSFW_CPP_OSTREAM_ENABLED == 1
sif::warning << "LocalPoolVector: Invalid index. Setting or returning"
" last value!" << std::endl;
" last value!"
<< std::endl;
#else
sif::printWarning("LocalPoolVector: Invalid index. Setting or returning"
sif::printWarning(
"LocalPoolVector: Invalid index. Setting or returning"
" last value!\n");
#endif
return value[vectorSize - 1];
}
template<typename T, uint16_t vectorSize>
inline ReturnValue_t LocalPoolVector<T, vectorSize>::serialize(uint8_t** buffer,
size_t* size, size_t maxSize,
template <typename T, uint16_t vectorSize>
inline ReturnValue_t LocalPoolVector<T, vectorSize>::serialize(
uint8_t** buffer, size_t* size, size_t maxSize,
SerializeIF::Endianness streamEndianness) const {
ReturnValue_t result = HasReturnvaluesIF::RETURN_FAILED;
for (uint16_t i = 0; i < vectorSize; i++) {
result = SerializeAdapter::serialize(&(value[i]), buffer, size,
maxSize, streamEndianness);
result = SerializeAdapter::serialize(&(value[i]), buffer, size, maxSize, streamEndianness);
if (result != HasReturnvaluesIF::RETURN_OK) {
break;
}
@ -140,19 +141,17 @@ inline ReturnValue_t LocalPoolVector<T, vectorSize>::serialize(uint8_t** buffer,
return result;
}
template<typename T, uint16_t vectorSize>
template <typename T, uint16_t vectorSize>
inline size_t LocalPoolVector<T, vectorSize>::getSerializedSize() const {
return vectorSize * SerializeAdapter::getSerializedSize(value);
}
template<typename T, uint16_t vectorSize>
template <typename T, uint16_t vectorSize>
inline ReturnValue_t LocalPoolVector<T, vectorSize>::deSerialize(
const uint8_t** buffer, size_t* size,
SerializeIF::Endianness streamEndianness) {
const uint8_t** buffer, size_t* size, SerializeIF::Endianness streamEndianness) {
ReturnValue_t result = HasReturnvaluesIF::RETURN_FAILED;
for (uint16_t i = 0; i < vectorSize; i++) {
result = SerializeAdapter::deSerialize(&(value[i]), buffer, size,
streamEndianness);
result = SerializeAdapter::deSerialize(&(value[i]), buffer, size, streamEndianness);
if (result != HasReturnvaluesIF::RETURN_OK) {
break;
}
@ -161,13 +160,12 @@ inline ReturnValue_t LocalPoolVector<T, vectorSize>::deSerialize(
}
#if FSFW_CPP_OSTREAM_ENABLED == 1
template<typename T, uint16_t vectorSize>
inline std::ostream& operator<< (std::ostream &out,
const LocalPoolVector<T, vectorSize> &var) {
template <typename T, uint16_t vectorSize>
inline std::ostream& operator<<(std::ostream& out, const LocalPoolVector<T, vectorSize>& var) {
out << "Vector: [";
for(int i = 0;i < vectorSize; i++) {
for (int i = 0; i < vectorSize; i++) {
out << var.value[i];
if(i < vectorSize - 1) {
if (i < vectorSize - 1) {
out << ", ";
}
}

View File

@ -1062,7 +1062,8 @@ class DeviceHandlerBase : public DeviceHandlerIF,
/**
* Same as triggerEvent, but for forwarding if object is used as proxy.
*/
virtual void forwardEvent(Event event, uint32_t parameter1 = 0, uint32_t parameter2 = 0) const override;
virtual void forwardEvent(Event event, uint32_t parameter1 = 0,
uint32_t parameter2 = 0) const override;
/**
* Checks if current mode is transitional mode.

View File

@ -50,7 +50,8 @@ class SystemObject : public SystemObjectIF {
virtual ReturnValue_t initialize() override;
virtual ReturnValue_t checkObjectConnections() override;
virtual void forwardEvent(Event event, uint32_t parameter1 = 0, uint32_t parameter2 = 0) const override;
virtual void forwardEvent(Event event, uint32_t parameter1 = 0,
uint32_t parameter2 = 0) const override;
};
#endif /* FSFW_OBJECTMANAGER_SYSTEMOBJECT_H_ */

View File

@ -340,7 +340,7 @@ ReturnValue_t TcpTmTcServer::handleTcRingBufferData(size_t availableReadData) {
size_t foundSize = 0;
size_t readLen = 0;
while (readLen < readAmount) {
if(spacePacketParser == nullptr) {
if (spacePacketParser == nullptr) {
return HasReturnvaluesIF::RETURN_FAILED;
}
result =

View File

@ -154,7 +154,7 @@ void UdpTcPollingTask::setTimeout(double timeoutSeconds) {
#endif
}
#elif defined(PLATFORM_UNIX)
timeval tval {};
timeval tval{};
tval = timevalOperations::toTimeval(timeoutSeconds);
int result = setsockopt(serverSocket, SOL_SOCKET, SO_RCVTIMEO, &tval, sizeof(receptionTimeout));
if (result == -1) {

View File

@ -20,7 +20,7 @@
const std::string UdpTmTcBridge::DEFAULT_SERVER_PORT = tcpip::DEFAULT_SERVER_PORT;
UdpTmTcBridge::UdpTmTcBridge(object_id_t objectId, object_id_t tcDestination,
const std::string& udpServerPort_, object_id_t tmStoreId,
const std::string &udpServerPort_, object_id_t tmStoreId,
object_id_t tcStoreId)
: TmTcBridge(objectId, tcDestination, tmStoreId, tcStoreId) {
if (udpServerPort_.empty()) {

View File

@ -29,8 +29,8 @@ class UdpTmTcBridge : public TmTcBridge, public TcpIpBase {
/* The ports chosen here should not be used by any other process. */
static const std::string DEFAULT_SERVER_PORT;
UdpTmTcBridge(object_id_t objectId, object_id_t tcDestination, const std::string& udpServerPort = "",
object_id_t tmStoreId = objects::TM_STORE,
UdpTmTcBridge(object_id_t objectId, object_id_t tcDestination,
const std::string& udpServerPort = "", object_id_t tmStoreId = objects::TM_STORE,
object_id_t tcStoreId = objects::TC_STORE);
~UdpTmTcBridge() override;

View File

@ -97,7 +97,8 @@ ReturnValue_t CService201HealthCommanding::handleReply(const CommandMessage *rep
}
// Not used for now, health state already reported by event
[[maybe_unused]] ReturnValue_t CService201HealthCommanding::prepareHealthSetReply(const CommandMessage *reply) {
[[maybe_unused]] ReturnValue_t CService201HealthCommanding::prepareHealthSetReply(
const CommandMessage *reply) {
auto health = static_cast<uint8_t>(HealthMessage::getHealth(reply));
auto oldHealth = static_cast<uint8_t>(HealthMessage::getOldHealth(reply));
HealthSetReply healthSetReply(health, oldHealth);

View File

@ -67,7 +67,5 @@ TEST_CASE("Power Switcher", "[power-switcher]") {
REQUIRE(not switcherUsingDummy.active());
}
SECTION("More Dummy Tests") {
}
SECTION("More Dummy Tests") {}
}