Compare commits

...

2 Commits

Author SHA1 Message Date
Robin Müller 06111ec3ed
missing call added 2022-05-14 16:57:59 +02:00
Robin Müller 51087518b0
spi mutex handling refactoring 2022-05-14 11:32:51 +02:00
20 changed files with 563 additions and 476 deletions

View File

@ -0,0 +1,43 @@
#pragma once
#include "fsfw/ipc/MutexIF.h"
#include "fsfw/returnvalues/HasReturnvaluesIF.h"
#include "fsfw_hal/common/gpio/GpioIF.h"
class ManualCsLockWrapper : public HasReturnvaluesIF {
public:
ManualCsLockWrapper(MutexIF* lock, GpioIF* gpioIF, SpiCookie* cookie,
MutexIF::TimeoutType type = MutexIF::TimeoutType::BLOCKING,
uint32_t timeoutMs = 0)
: lock(lock), gpioIF(gpioIF), cookie(cookie), type(type), timeoutMs(timeoutMs) {
if (cookie == nullptr) {
// TODO: Error? Or maybe throw exception..
return;
}
cookie->setCsLockManual(true);
lockResult = lock->lockMutex(type, timeoutMs);
if (lockResult != RETURN_OK) {
return;
}
gpioResult = gpioIF->pullLow(cookie->getChipSelectPin());
}
~ManualCsLockWrapper() {
if (gpioResult == RETURN_OK) {
gpioIF->pullHigh(cookie->getChipSelectPin());
}
cookie->setCsLockManual(false);
if (lockResult == RETURN_OK) {
lock->unlockMutex();
}
}
ReturnValue_t lockResult;
ReturnValue_t gpioResult;
private:
MutexIF* lock;
GpioIF* gpioIF;
SpiCookie* cookie;
MutexIF::TimeoutType type;
uint32_t timeoutMs = 0;
};

View File

@ -196,16 +196,22 @@ ReturnValue_t SpiComIF::performRegularSendOperation(SpiCookie* spiCookie, const
bool fullDuplex = spiCookie->isFullDuplex();
gpioId_t gpioId = spiCookie->getChipSelectPin();
bool csLockManual = spiCookie->getCsLockManual();
/* Pull SPI CS low. For now, no support for active high given */
if (gpioId != gpio::NO_GPIO) {
result = spiMutex->lockMutex(timeoutType, timeoutMs);
MutexIF::TimeoutType csType;
dur_millis_t csTimeout = 0;
// Pull SPI CS low. For now, no support for active high given
if (gpioId != gpio::NO_GPIO and not csLockManual) {
spiCookie->getMutexParams(csType, csTimeout);
result = csMutex->lockMutex(csType, csTimeout);
if (result != RETURN_OK) {
#if FSFW_VERBOSE_LEVEL >= 1
#if FSFW_CPP_OSTREAM_ENABLED == 1
sif::error << "SpiComIF::sendMessage: Failed to lock mutex" << std::endl;
sif::error << "SpiComIF::sendMessage: Failed to lock mutex with code "
<< "0x" << std::hex << std::setfill('0') << std::setw(4) << result << std::dec
<< std::endl;
#else
sif::printError("SpiComIF::sendMessage: Failed to lock mutex\n");
sif::printError("SpiComIF::sendMessage: Failed to lock mutex with code %d\n", result);
#endif
#endif
return result;
@ -248,7 +254,7 @@ ReturnValue_t SpiComIF::performRegularSendOperation(SpiCookie* spiCookie, const
}
}
if (gpioId != gpio::NO_GPIO) {
if (gpioId != gpio::NO_GPIO and not csLockManual) {
gpioComIF->pullHigh(gpioId);
result = spiMutex->unlockMutex();
if (result != RETURN_OK) {
@ -292,12 +298,27 @@ ReturnValue_t SpiComIF::performHalfDuplexReception(SpiCookie* spiCookie) {
return result;
}
bool csLockManual = spiCookie->getCsLockManual();
gpioId_t gpioId = spiCookie->getChipSelectPin();
<<<<<<< Updated upstream
if (gpioId != gpio::NO_GPIO) {
result = spiMutex->lockMutex(timeoutType, timeoutMs);
=======
MutexIF::TimeoutType csType;
dur_millis_t csTimeout = 0;
if (gpioId != gpio::NO_GPIO and not csLockManual) {
spiCookie->getMutexParams(csType, csTimeout);
result = csMutex->lockMutex(csType, csTimeout);
>>>>>>> Stashed changes
if (result != RETURN_OK) {
#if FSFW_VERBOSE_LEVEL >= 1
#if FSFW_CPP_OSTREAM_ENABLED == 1
sif::error << "SpiComIF::getSendSuccess: Failed to lock mutex" << std::endl;
sif::error << "SpiComIF::sendMessage: Failed to lock mutex with code "
<< "0x" << std::hex << std::setfill('0') << std::setw(4) << result << std::dec
<< std::endl;
#else
sif::printError("SpiComIF::sendMessage: Failed to lock mutex with code %d\n", result);
#endif
#endif
return result;
}
@ -315,7 +336,7 @@ ReturnValue_t SpiComIF::performHalfDuplexReception(SpiCookie* spiCookie) {
result = HALF_DUPLEX_TRANSFER_FAILED;
}
if (gpioId != gpio::NO_GPIO) {
if (gpioId != gpio::NO_GPIO and not csLockManual) {
gpioComIF->pullHigh(gpioId);
result = spiMutex->unlockMutex();
if (result != RETURN_OK) {
@ -346,6 +367,7 @@ ReturnValue_t SpiComIF::readReceivedMessage(CookieIF* cookie, uint8_t** buffer,
return HasReturnvaluesIF::RETURN_OK;
}
<<<<<<< Updated upstream
MutexIF* SpiComIF::getMutex(MutexIF::TimeoutType* timeoutType, uint32_t* timeoutMs) {
if (timeoutType != nullptr) {
*timeoutType = this->timeoutType;
@ -355,6 +377,9 @@ MutexIF* SpiComIF::getMutex(MutexIF::TimeoutType* timeoutType, uint32_t* timeout
}
return spiMutex;
}
=======
MutexIF* SpiComIF::getCsMutex() { return csMutex; }
>>>>>>> Stashed changes
void SpiComIF::performSpiWiretapping(SpiCookie* spiCookie) {
if (spiCookie == nullptr) {

View File

@ -22,15 +22,15 @@ class SpiCookie;
*/
class SpiComIF : public DeviceCommunicationIF, public SystemObject {
public:
static constexpr uint8_t spiRetvalId = CLASS_ID::HAL_SPI;
static constexpr uint8_t CLASS_ID = CLASS_ID::HAL_SPI;
static constexpr ReturnValue_t OPENING_FILE_FAILED =
HasReturnvaluesIF::makeReturnCode(spiRetvalId, 0);
HasReturnvaluesIF::makeReturnCode(CLASS_ID, 0);
/* Full duplex (ioctl) transfer failure */
static constexpr ReturnValue_t FULL_DUPLEX_TRANSFER_FAILED =
HasReturnvaluesIF::makeReturnCode(spiRetvalId, 1);
HasReturnvaluesIF::makeReturnCode(CLASS_ID, 1);
/* Half duplex (read/write) transfer failure */
static constexpr ReturnValue_t HALF_DUPLEX_TRANSFER_FAILED =
HasReturnvaluesIF::makeReturnCode(spiRetvalId, 2);
HasReturnvaluesIF::makeReturnCode(CLASS_ID, 2);
SpiComIF(object_id_t objectId, GpioIF* gpioComIF);
@ -44,7 +44,7 @@ class SpiComIF : public DeviceCommunicationIF, public SystemObject {
* @brief This function returns the mutex which can be used to protect the spi bus when
* the chip select must be driven from outside of the com if.
*/
MutexIF* getMutex(MutexIF::TimeoutType* timeoutType = nullptr, uint32_t* timeoutMs = nullptr);
MutexIF* getCsMutex();
/**
* Perform a regular send operation using Linux iotcl. This is public so it can be used
@ -70,10 +70,13 @@ class SpiComIF : public DeviceCommunicationIF, public SystemObject {
};
GpioIF* gpioComIF = nullptr;
std::string dev = "";
/**
* Protects the chip select operations. Lock when GPIO is pulled low, unlock after it was
* pulled high
*/
MutexIF* csMutex = nullptr;
MutexIF* spiMutex = nullptr;
MutexIF::TimeoutType timeoutType = MutexIF::TimeoutType::WAITING;
uint32_t timeoutMs = 20;
spi_ioc_transfer clockUpdateTransfer = {};
using SpiDeviceMap = std::unordered_map<address_t, SpiInstance>;

View File

@ -107,3 +107,17 @@ void SpiCookie::getCallback(spi::send_callback_function_t* callback, void** args
*callback = this->sendCallback;
*args = this->callbackArgs;
}
void SpiCookie::setCsLockManual(bool enable) { manualCsLock = enable; }
bool SpiCookie::getCsLockManual() const { return manualCsLock; }
void SpiCookie::getMutexParams(MutexIF::TimeoutType& csTimeoutType, dur_millis_t& csTimeout) const {
csTimeoutType = this->csTimeoutType;
csTimeout = this->csTimeout;
}
void SpiCookie::setMutexParams(MutexIF::TimeoutType csTimeoutType, dur_millis_t csTimeout) {
this->csTimeoutType = csTimeoutType;
this->csTimeout = csTimeout;
}

View File

@ -2,6 +2,8 @@
#define LINUX_SPI_SPICOOKIE_H_
#include <fsfw/devicehandlers/CookieIF.h>
#include <fsfw/ipc/MutexIF.h>
#include <fsfw/timemanager/clockDefinitions.h>
#include <linux/spi/spidev.h>
#include "../../common/gpio/gpioDefinitions.h"
@ -20,6 +22,8 @@
*/
class SpiCookie : public CookieIF {
public:
static constexpr dur_millis_t DEFAULT_MUTEX_TIMEOUT = 20;
/**
* Each SPI device will have a corresponding cookie. The cookie is used by the communication
* interface and contains device specific information like the largest expected size to be
@ -139,9 +143,43 @@ class SpiCookie : public CookieIF {
*/
void activateCsDeselect(bool deselectCs, uint16_t delayUsecs);
void getMutexParams(MutexIF::TimeoutType& csTimeoutType, dur_millis_t& csTimeout) const;
void setMutexParams(MutexIF::TimeoutType csTimeoutType, dur_millis_t csTimeout);
void setCsLockManual(bool enable);
bool getCsLockManual() const;
spi_ioc_transfer* getTransferStructHandle();
private:
address_t spiAddress;
gpioId_t chipSelectPin;
std::string spiDevice;
spi::SpiComIfModes comIfMode;
// Required for regular mode
const size_t maxSize;
spi::SpiModes spiMode;
/**
* If this is set to true, the SPI ComIF will not perform any mutex locking for the
* CS mechanism. The user is responsible to locking and unlocking the mutex for the
* whole duration of the transfers.
*/
bool manualCsLock = false;
uint32_t spiSpeed;
bool halfDuplex = false;
MutexIF::TimeoutType csTimeoutType = MutexIF::TimeoutType::WAITING;
dur_millis_t csTimeout = DEFAULT_MUTEX_TIMEOUT;
// Required for callback mode
spi::send_callback_function_t sendCallback = nullptr;
void* callbackArgs = nullptr;
struct spi_ioc_transfer spiTransferStruct = {};
UncommonParameters uncommonParameters;
/**
* Internal constructor which initializes every field
* @param spiAddress
@ -154,27 +192,8 @@ class SpiCookie : public CookieIF {
* @param args
*/
SpiCookie(spi::SpiComIfModes comIfMode, address_t spiAddress, gpioId_t chipSelect,
std::string spiDev, const size_t maxSize, spi::SpiModes spiMode, uint32_t spiSpeed,
const size_t maxSize, spi::SpiModes spiMode, uint32_t spiSpeed,
spi::send_callback_function_t callback, void* args);
address_t spiAddress;
gpioId_t chipSelectPin;
std::string spiDevice;
spi::SpiComIfModes comIfMode;
// Required for regular mode
const size_t maxSize;
spi::SpiModes spiMode;
uint32_t spiSpeed;
bool halfDuplex = false;
// Required for callback mode
spi::send_callback_function_t sendCallback = nullptr;
void* callbackArgs = nullptr;
struct spi_ioc_transfer spiTransferStruct = {};
UncommonParameters uncommonParameters;
};
#endif /* LINUX_SPI_SPICOOKIE_H_ */

View File

@ -6,8 +6,8 @@
#endif
template <typename T>
inline FIFOBase<T>::FIFOBase(T* values, const size_t maxCapacity):
maxCapacity(maxCapacity), values(values){};
inline FIFOBase<T>::FIFOBase(T* values, const size_t maxCapacity)
: maxCapacity(maxCapacity), values(values){};
template <typename T>
inline ReturnValue_t FIFOBase<T>::insert(T value) {
@ -84,7 +84,6 @@ inline size_t FIFOBase<T>::getMaxCapacity() const {
return maxCapacity;
}
template <typename T>
inline void FIFOBase<T>::setContainer(T* data) {
this->values = data;

View File

@ -12,6 +12,7 @@ template <typename T, size_t MAX_SIZE, typename count_t = uint8_t>
class FixedArrayList : public ArrayList<T, count_t> {
static_assert(MAX_SIZE <= std::numeric_limits<count_t>::max(),
"count_t is not large enough to hold MAX_SIZE");
private:
T data[MAX_SIZE];

View File

@ -1,9 +1,9 @@
#ifndef FRAMEWORK_CONTAINER_FIXEDORDEREDMULTIMAP_TPP_
#define FRAMEWORK_CONTAINER_FIXEDORDEREDMULTIMAP_TPP_
template <typename key_t, typename T, typename KEY_COMPARE>
inline ReturnValue_t FixedOrderedMultimap<key_t, T, KEY_COMPARE>::insert(key_t key, T value, Iterator *storedValue) {
inline ReturnValue_t FixedOrderedMultimap<key_t, T, KEY_COMPARE>::insert(key_t key, T value,
Iterator *storedValue) {
if (_size == theMap.maxSize()) {
return MAP_FULL;
}
@ -71,7 +71,8 @@ inline ReturnValue_t FixedOrderedMultimap<key_t, T, KEY_COMPARE>::find(key_t key
}
template <typename key_t, typename T, typename KEY_COMPARE>
inline size_t FixedOrderedMultimap<key_t, T, KEY_COMPARE>::findFirstIndex(key_t key, size_t startAt) const {
inline size_t FixedOrderedMultimap<key_t, T, KEY_COMPARE>::findFirstIndex(key_t key,
size_t startAt) const {
if (startAt >= _size) {
return startAt + 1;
}
@ -105,5 +106,4 @@ inline void FixedOrderedMultimap<key_t, T, KEY_COMPARE>::removeFromPosition(size
--_size;
}
#endif /* FRAMEWORK_CONTAINER_FIXEDORDEREDMULTIMAP_TPP_ */

View File

@ -6,26 +6,24 @@
#endif
template <typename T>
inline LocalPoolVariable<T>::LocalPoolVariable(HasLocalDataPoolIF* hkOwner,
lp_id_t poolId, DataSetIF* dataSet, pool_rwm_t setReadWriteMode):
LocalPoolObjectBase(poolId, hkOwner, dataSet, setReadWriteMode) {}
inline LocalPoolVariable<T>::LocalPoolVariable(HasLocalDataPoolIF* hkOwner, lp_id_t poolId,
DataSetIF* dataSet, pool_rwm_t setReadWriteMode)
: LocalPoolObjectBase(poolId, hkOwner, dataSet, setReadWriteMode) {}
template <typename T>
inline LocalPoolVariable<T>::LocalPoolVariable(object_id_t poolOwner,
lp_id_t poolId, DataSetIF *dataSet, pool_rwm_t setReadWriteMode):
LocalPoolObjectBase(poolOwner, poolId, dataSet, setReadWriteMode) {}
inline LocalPoolVariable<T>::LocalPoolVariable(object_id_t poolOwner, lp_id_t poolId,
DataSetIF* dataSet, pool_rwm_t setReadWriteMode)
: LocalPoolObjectBase(poolOwner, poolId, dataSet, setReadWriteMode) {}
template <typename T>
inline LocalPoolVariable<T>::LocalPoolVariable(gp_id_t globalPoolId,
DataSetIF *dataSet, pool_rwm_t setReadWriteMode):
LocalPoolObjectBase(globalPoolId.objectId, globalPoolId.localPoolId,
dataSet, setReadWriteMode){}
inline LocalPoolVariable<T>::LocalPoolVariable(gp_id_t globalPoolId, DataSetIF* dataSet,
pool_rwm_t setReadWriteMode)
: LocalPoolObjectBase(globalPoolId.objectId, globalPoolId.localPoolId, dataSet,
setReadWriteMode) {}
template <typename T>
inline ReturnValue_t LocalPoolVariable<T>::read(
MutexIF::TimeoutType timeoutType, uint32_t timeoutMs) {
inline ReturnValue_t LocalPoolVariable<T>::read(MutexIF::TimeoutType timeoutType,
uint32_t timeoutMs) {
if (hkManager == nullptr) {
return readWithoutLock();
}
@ -43,19 +41,17 @@ template<typename T>
inline ReturnValue_t LocalPoolVariable<T>::readWithoutLock() {
if (readWriteMode == pool_rwm_t::VAR_WRITE) {
object_id_t targetObjectId = hkManager->getCreatorObjectId();
reportReadCommitError("LocalPoolVector",
PoolVariableIF::INVALID_READ_WRITE_MODE, true, targetObjectId,
localPoolId);
reportReadCommitError("LocalPoolVector", PoolVariableIF::INVALID_READ_WRITE_MODE, true,
targetObjectId, localPoolId);
return PoolVariableIF::INVALID_READ_WRITE_MODE;
}
PoolEntry<T>* poolEntry = nullptr;
ReturnValue_t result = LocalDpManagerAttorney::fetchPoolEntry(*hkManager, localPoolId,
&poolEntry);
ReturnValue_t result =
LocalDpManagerAttorney::fetchPoolEntry(*hkManager, localPoolId, &poolEntry);
if (result != RETURN_OK) {
object_id_t ownerObjectId = hkManager->getCreatorObjectId();
reportReadCommitError("LocalPoolVariable", result,
false, ownerObjectId, localPoolId);
reportReadCommitError("LocalPoolVariable", result, false, ownerObjectId, localPoolId);
return result;
}
@ -65,15 +61,15 @@ inline ReturnValue_t LocalPoolVariable<T>::readWithoutLock() {
}
template <typename T>
inline ReturnValue_t LocalPoolVariable<T>::commit(bool setValid,
MutexIF::TimeoutType timeoutType, uint32_t timeoutMs) {
inline ReturnValue_t LocalPoolVariable<T>::commit(bool setValid, MutexIF::TimeoutType timeoutType,
uint32_t timeoutMs) {
this->setValid(setValid);
return commit(timeoutType, timeoutMs);
}
template <typename T>
inline ReturnValue_t LocalPoolVariable<T>::commit(
MutexIF::TimeoutType timeoutType, uint32_t timeoutMs) {
inline ReturnValue_t LocalPoolVariable<T>::commit(MutexIF::TimeoutType timeoutType,
uint32_t timeoutMs) {
if (hkManager == nullptr) {
return commitWithoutLock();
}
@ -91,19 +87,17 @@ template<typename T>
inline ReturnValue_t LocalPoolVariable<T>::commitWithoutLock() {
if (readWriteMode == pool_rwm_t::VAR_READ) {
object_id_t targetObjectId = hkManager->getCreatorObjectId();
reportReadCommitError("LocalPoolVector",
PoolVariableIF::INVALID_READ_WRITE_MODE, false, targetObjectId,
localPoolId);
reportReadCommitError("LocalPoolVector", PoolVariableIF::INVALID_READ_WRITE_MODE, false,
targetObjectId, localPoolId);
return PoolVariableIF::INVALID_READ_WRITE_MODE;
}
PoolEntry<T>* poolEntry = nullptr;
ReturnValue_t result = LocalDpManagerAttorney::fetchPoolEntry(*hkManager, localPoolId,
&poolEntry);
ReturnValue_t result =
LocalDpManagerAttorney::fetchPoolEntry(*hkManager, localPoolId, &poolEntry);
if (result != RETURN_OK) {
object_id_t ownerObjectId = hkManager->getCreatorObjectId();
reportReadCommitError("LocalPoolVariable", result,
false, ownerObjectId, localPoolId);
reportReadCommitError("LocalPoolVariable", result, false, ownerObjectId, localPoolId);
return result;
}
@ -113,11 +107,10 @@ inline ReturnValue_t LocalPoolVariable<T>::commitWithoutLock() {
}
template <typename T>
inline ReturnValue_t LocalPoolVariable<T>::serialize(uint8_t** buffer,
size_t* size, const size_t max_size,
inline ReturnValue_t LocalPoolVariable<T>::serialize(
uint8_t** buffer, size_t* size, const size_t max_size,
SerializeIF::Endianness streamEndianness) const {
return SerializeAdapter::serialize(&value,
buffer, size ,max_size, streamEndianness);
return SerializeAdapter::serialize(&value, buffer, size, max_size, streamEndianness);
}
template <typename T>
@ -126,15 +119,14 @@ inline size_t LocalPoolVariable<T>::getSerializedSize() const {
}
template <typename T>
inline ReturnValue_t LocalPoolVariable<T>::deSerialize(const uint8_t** buffer,
size_t* size, SerializeIF::Endianness streamEndianness) {
inline ReturnValue_t LocalPoolVariable<T>::deSerialize(const uint8_t** buffer, size_t* size,
SerializeIF::Endianness streamEndianness) {
return SerializeAdapter::deSerialize(&value, buffer, size, streamEndianness);
}
#if FSFW_CPP_OSTREAM_ENABLED == 1
template <typename T>
inline std::ostream& operator<< (std::ostream &out,
const LocalPoolVariable<T> &var) {
inline std::ostream& operator<<(std::ostream& out, const LocalPoolVariable<T>& var) {
out << var.value;
return out;
}
@ -146,8 +138,7 @@ inline LocalPoolVariable<T>::operator T() const {
}
template <typename T>
inline LocalPoolVariable<T> & LocalPoolVariable<T>::operator=(
const T& newValue) {
inline LocalPoolVariable<T>& LocalPoolVariable<T>::operator=(const T& newValue) {
value = newValue;
return *this;
}
@ -160,8 +151,7 @@ inline LocalPoolVariable<T>& LocalPoolVariable<T>::operator =(
}
template <typename T>
inline bool LocalPoolVariable<T>::operator ==(
const LocalPoolVariable<T> &other) const {
inline bool LocalPoolVariable<T>::operator==(const LocalPoolVariable<T>& other) const {
return this->value == other.value;
}
@ -170,10 +160,8 @@ inline bool LocalPoolVariable<T>::operator ==(const T &other) const {
return this->value == other;
}
template <typename T>
inline bool LocalPoolVariable<T>::operator !=(
const LocalPoolVariable<T> &other) const {
inline bool LocalPoolVariable<T>::operator!=(const LocalPoolVariable<T>& other) const {
return not(*this == other);
}
@ -182,10 +170,8 @@ inline bool LocalPoolVariable<T>::operator !=(const T &other) const {
return not(*this == other);
}
template <typename T>
inline bool LocalPoolVariable<T>::operator <(
const LocalPoolVariable<T> &other) const {
inline bool LocalPoolVariable<T>::operator<(const LocalPoolVariable<T>& other) const {
return this->value < other.value;
}
@ -194,10 +180,8 @@ inline bool LocalPoolVariable<T>::operator <(const T &other) const {
return this->value < other;
}
template <typename T>
inline bool LocalPoolVariable<T>::operator >(
const LocalPoolVariable<T> &other) const {
inline bool LocalPoolVariable<T>::operator>(const LocalPoolVariable<T>& other) const {
return not(*this < other);
}

View File

@ -6,25 +6,26 @@
#endif
template <typename T, uint16_t vectorSize>
inline LocalPoolVector<T, vectorSize>::LocalPoolVector(
HasLocalDataPoolIF* hkOwner, lp_id_t poolId, DataSetIF* dataSet,
pool_rwm_t setReadWriteMode):
LocalPoolObjectBase(poolId, hkOwner, dataSet, setReadWriteMode) {}
inline LocalPoolVector<T, vectorSize>::LocalPoolVector(HasLocalDataPoolIF* hkOwner, lp_id_t poolId,
DataSetIF* dataSet,
pool_rwm_t setReadWriteMode)
: LocalPoolObjectBase(poolId, hkOwner, dataSet, setReadWriteMode) {}
template <typename T, uint16_t vectorSize>
inline LocalPoolVector<T, vectorSize>::LocalPoolVector(object_id_t poolOwner,
lp_id_t poolId, DataSetIF *dataSet, pool_rwm_t setReadWriteMode):
LocalPoolObjectBase(poolOwner, poolId, dataSet, setReadWriteMode) {}
inline LocalPoolVector<T, vectorSize>::LocalPoolVector(object_id_t poolOwner, lp_id_t poolId,
DataSetIF* dataSet,
pool_rwm_t setReadWriteMode)
: LocalPoolObjectBase(poolOwner, poolId, dataSet, setReadWriteMode) {}
template <typename T, uint16_t vectorSize>
inline LocalPoolVector<T, vectorSize>::LocalPoolVector(gp_id_t globalPoolId,
DataSetIF *dataSet, pool_rwm_t setReadWriteMode):
LocalPoolObjectBase(globalPoolId.objectId, globalPoolId.localPoolId,
dataSet, setReadWriteMode) {}
inline LocalPoolVector<T, vectorSize>::LocalPoolVector(gp_id_t globalPoolId, DataSetIF* dataSet,
pool_rwm_t setReadWriteMode)
: LocalPoolObjectBase(globalPoolId.objectId, globalPoolId.localPoolId, dataSet,
setReadWriteMode) {}
template <typename T, uint16_t vectorSize>
inline ReturnValue_t LocalPoolVector<T, vectorSize>::read(
MutexIF::TimeoutType timeoutType, uint32_t timeoutMs) {
inline ReturnValue_t LocalPoolVector<T, vectorSize>::read(MutexIF::TimeoutType timeoutType,
uint32_t timeoutMs) {
MutexGuard(LocalDpManagerAttorney::getMutexHandle(*hkManager), timeoutType, timeoutMs);
return readWithoutLock();
}
@ -32,21 +33,19 @@ template<typename T, uint16_t vectorSize>
inline ReturnValue_t LocalPoolVector<T, vectorSize>::readWithoutLock() {
if (readWriteMode == pool_rwm_t::VAR_WRITE) {
object_id_t targetObjectId = hkManager->getCreatorObjectId();
reportReadCommitError("LocalPoolVector",
PoolVariableIF::INVALID_READ_WRITE_MODE, true, targetObjectId,
localPoolId);
reportReadCommitError("LocalPoolVector", PoolVariableIF::INVALID_READ_WRITE_MODE, true,
targetObjectId, localPoolId);
return PoolVariableIF::INVALID_READ_WRITE_MODE;
}
PoolEntry<T>* poolEntry = nullptr;
ReturnValue_t result = LocalDpManagerAttorney::fetchPoolEntry(*hkManager, localPoolId,
&poolEntry);
ReturnValue_t result =
LocalDpManagerAttorney::fetchPoolEntry(*hkManager, localPoolId, &poolEntry);
memset(this->value, 0, vectorSize * sizeof(T));
if (result != RETURN_OK) {
object_id_t targetObjectId = hkManager->getCreatorObjectId();
reportReadCommitError("LocalPoolVector", result, true, targetObjectId,
localPoolId);
reportReadCommitError("LocalPoolVector", result, true, targetObjectId, localPoolId);
return result;
}
std::memcpy(this->value, poolEntry->getDataPtr(), poolEntry->getByteSize());
@ -56,14 +55,15 @@ inline ReturnValue_t LocalPoolVector<T, vectorSize>::readWithoutLock() {
template <typename T, uint16_t vectorSize>
inline ReturnValue_t LocalPoolVector<T, vectorSize>::commit(bool valid,
MutexIF::TimeoutType timeoutType, uint32_t timeoutMs) {
MutexIF::TimeoutType timeoutType,
uint32_t timeoutMs) {
this->setValid(valid);
return commit(timeoutType, timeoutMs);
}
template <typename T, uint16_t vectorSize>
inline ReturnValue_t LocalPoolVector<T, vectorSize>::commit(
MutexIF::TimeoutType timeoutType, uint32_t timeoutMs) {
inline ReturnValue_t LocalPoolVector<T, vectorSize>::commit(MutexIF::TimeoutType timeoutType,
uint32_t timeoutMs) {
MutexGuard(LocalDpManagerAttorney::getMutexHandle(*hkManager), timeoutType, timeoutMs);
return commitWithoutLock();
}
@ -72,18 +72,16 @@ template<typename T, uint16_t vectorSize>
inline ReturnValue_t LocalPoolVector<T, vectorSize>::commitWithoutLock() {
if (readWriteMode == pool_rwm_t::VAR_READ) {
object_id_t targetObjectId = hkManager->getCreatorObjectId();
reportReadCommitError("LocalPoolVector",
PoolVariableIF::INVALID_READ_WRITE_MODE, false, targetObjectId,
localPoolId);
reportReadCommitError("LocalPoolVector", PoolVariableIF::INVALID_READ_WRITE_MODE, false,
targetObjectId, localPoolId);
return PoolVariableIF::INVALID_READ_WRITE_MODE;
}
PoolEntry<T>* poolEntry = nullptr;
ReturnValue_t result = LocalDpManagerAttorney::fetchPoolEntry(*hkManager, localPoolId,
&poolEntry);
ReturnValue_t result =
LocalDpManagerAttorney::fetchPoolEntry(*hkManager, localPoolId, &poolEntry);
if (result != RETURN_OK) {
object_id_t targetObjectId = hkManager->getCreatorObjectId();
reportReadCommitError("LocalPoolVector", result, false, targetObjectId,
localPoolId);
reportReadCommitError("LocalPoolVector", result, false, targetObjectId, localPoolId);
return result;
}
std::memcpy(poolEntry->getDataPtr(), this->value, poolEntry->getByteSize());
@ -100,9 +98,11 @@ inline T& LocalPoolVector<T, vectorSize>::operator [](size_t i) {
// a configuration error, but I wont exit here.
#if FSFW_CPP_OSTREAM_ENABLED == 1
sif::warning << "LocalPoolVector: Invalid index. Setting or returning"
" last value!" << std::endl;
" last value!"
<< std::endl;
#else
sif::printWarning("LocalPoolVector: Invalid index. Setting or returning"
sif::printWarning(
"LocalPoolVector: Invalid index. Setting or returning"
" last value!\n");
#endif
return value[vectorSize - 1];
@ -117,22 +117,23 @@ inline const T& LocalPoolVector<T, vectorSize>::operator [](size_t i) const {
// a configuration error, but I wont exit here.
#if FSFW_CPP_OSTREAM_ENABLED == 1
sif::warning << "LocalPoolVector: Invalid index. Setting or returning"
" last value!" << std::endl;
" last value!"
<< std::endl;
#else
sif::printWarning("LocalPoolVector: Invalid index. Setting or returning"
sif::printWarning(
"LocalPoolVector: Invalid index. Setting or returning"
" last value!\n");
#endif
return value[vectorSize - 1];
}
template <typename T, uint16_t vectorSize>
inline ReturnValue_t LocalPoolVector<T, vectorSize>::serialize(uint8_t** buffer,
size_t* size, size_t maxSize,
inline ReturnValue_t LocalPoolVector<T, vectorSize>::serialize(
uint8_t** buffer, size_t* size, size_t maxSize,
SerializeIF::Endianness streamEndianness) const {
ReturnValue_t result = HasReturnvaluesIF::RETURN_FAILED;
for (uint16_t i = 0; i < vectorSize; i++) {
result = SerializeAdapter::serialize(&(value[i]), buffer, size,
maxSize, streamEndianness);
result = SerializeAdapter::serialize(&(value[i]), buffer, size, maxSize, streamEndianness);
if (result != HasReturnvaluesIF::RETURN_OK) {
break;
}
@ -147,12 +148,10 @@ inline size_t LocalPoolVector<T, vectorSize>::getSerializedSize() const {
template <typename T, uint16_t vectorSize>
inline ReturnValue_t LocalPoolVector<T, vectorSize>::deSerialize(
const uint8_t** buffer, size_t* size,
SerializeIF::Endianness streamEndianness) {
const uint8_t** buffer, size_t* size, SerializeIF::Endianness streamEndianness) {
ReturnValue_t result = HasReturnvaluesIF::RETURN_FAILED;
for (uint16_t i = 0; i < vectorSize; i++) {
result = SerializeAdapter::deSerialize(&(value[i]), buffer, size,
streamEndianness);
result = SerializeAdapter::deSerialize(&(value[i]), buffer, size, streamEndianness);
if (result != HasReturnvaluesIF::RETURN_OK) {
break;
}
@ -162,8 +161,7 @@ inline ReturnValue_t LocalPoolVector<T, vectorSize>::deSerialize(
#if FSFW_CPP_OSTREAM_ENABLED == 1
template <typename T, uint16_t vectorSize>
inline std::ostream& operator<< (std::ostream &out,
const LocalPoolVector<T, vectorSize> &var) {
inline std::ostream& operator<<(std::ostream& out, const LocalPoolVector<T, vectorSize>& var) {
out << "Vector: [";
for (int i = 0; i < vectorSize; i++) {
out << var.value[i];

View File

@ -1062,7 +1062,8 @@ class DeviceHandlerBase : public DeviceHandlerIF,
/**
* Same as triggerEvent, but for forwarding if object is used as proxy.
*/
virtual void forwardEvent(Event event, uint32_t parameter1 = 0, uint32_t parameter2 = 0) const override;
virtual void forwardEvent(Event event, uint32_t parameter1 = 0,
uint32_t parameter2 = 0) const override;
/**
* Checks if current mode is transitional mode.

View File

@ -50,7 +50,8 @@ class SystemObject : public SystemObjectIF {
virtual ReturnValue_t initialize() override;
virtual ReturnValue_t checkObjectConnections() override;
virtual void forwardEvent(Event event, uint32_t parameter1 = 0, uint32_t parameter2 = 0) const override;
virtual void forwardEvent(Event event, uint32_t parameter1 = 0,
uint32_t parameter2 = 0) const override;
};
#endif /* FSFW_OBJECTMANAGER_SYSTEMOBJECT_H_ */

View File

@ -29,8 +29,8 @@ class UdpTmTcBridge : public TmTcBridge, public TcpIpBase {
/* The ports chosen here should not be used by any other process. */
static const std::string DEFAULT_SERVER_PORT;
UdpTmTcBridge(object_id_t objectId, object_id_t tcDestination, const std::string& udpServerPort = "",
object_id_t tmStoreId = objects::TM_STORE,
UdpTmTcBridge(object_id_t objectId, object_id_t tcDestination,
const std::string& udpServerPort = "", object_id_t tmStoreId = objects::TM_STORE,
object_id_t tcStoreId = objects::TC_STORE);
~UdpTmTcBridge() override;

View File

@ -97,7 +97,8 @@ ReturnValue_t CService201HealthCommanding::handleReply(const CommandMessage *rep
}
// Not used for now, health state already reported by event
[[maybe_unused]] ReturnValue_t CService201HealthCommanding::prepareHealthSetReply(const CommandMessage *reply) {
[[maybe_unused]] ReturnValue_t CService201HealthCommanding::prepareHealthSetReply(
const CommandMessage *reply) {
auto health = static_cast<uint8_t>(HealthMessage::getHealth(reply));
auto oldHealth = static_cast<uint8_t>(HealthMessage::getOldHealth(reply));
HealthSetReply healthSetReply(health, oldHealth);

View File

@ -67,7 +67,5 @@ TEST_CASE("Power Switcher", "[power-switcher]") {
REQUIRE(not switcherUsingDummy.active());
}
SECTION("More Dummy Tests") {
}
SECTION("More Dummy Tests") {}
}