Merge branch 'mueller/master' of https://egit.irs.uni-stuttgart.de/fsfw/fsfw into mueller/master
This commit is contained in:
commit
5095fd206f
@ -1,6 +1,6 @@
|
||||
#include "SharedRingBuffer.h"
|
||||
#include "../ipc/MutexFactory.h"
|
||||
#include "../ipc/MutexHelper.h"
|
||||
#include "../ipc/MutexGuard.h"
|
||||
|
||||
SharedRingBuffer::SharedRingBuffer(object_id_t objectId, const size_t size,
|
||||
bool overwriteOld, size_t maxExcessBytes):
|
||||
|
@ -66,7 +66,7 @@ public:
|
||||
|
||||
/**
|
||||
* The mutex handle can be accessed directly, for example to perform
|
||||
* the lock with the #MutexHelper for a RAII compliant lock operation.
|
||||
* the lock with the #MutexGuard for a RAII compliant lock operation.
|
||||
* @return
|
||||
*/
|
||||
MutexIF* getMutexHandle() const;
|
||||
|
@ -10,7 +10,7 @@
|
||||
#include "../housekeeping/AcceptsHkPacketsIF.h"
|
||||
#include "../timemanager/CCSDSTime.h"
|
||||
#include "../ipc/MutexFactory.h"
|
||||
#include "../ipc/MutexHelper.h"
|
||||
#include "../ipc/MutexGuard.h"
|
||||
#include "../ipc/QueueFactory.h"
|
||||
|
||||
#include <array>
|
||||
|
@ -14,7 +14,7 @@
|
||||
#include "../ipc/MutexIF.h"
|
||||
#include "../ipc/CommandMessage.h"
|
||||
#include "../ipc/MessageQueueIF.h"
|
||||
#include "../ipc/MutexHelper.h"
|
||||
#include "../ipc/MutexGuard.h"
|
||||
|
||||
#include <map>
|
||||
#include <vector>
|
||||
|
@ -166,6 +166,8 @@ public:
|
||||
|
||||
object_id_t getCreatorObjectId();
|
||||
|
||||
bool getReportingEnabled() const;
|
||||
|
||||
protected:
|
||||
sid_t sid;
|
||||
//! This mutex is used if the data is created by one object only.
|
||||
@ -180,7 +182,6 @@ protected:
|
||||
*/
|
||||
bool reportingEnabled = false;
|
||||
void setReportingEnabled(bool enabled);
|
||||
bool getReportingEnabled() const;
|
||||
|
||||
void initializePeriodicHelper(float collectionInterval,
|
||||
dur_millis_t minimumPeriodicInterval,
|
||||
|
@ -25,7 +25,7 @@ inline LocalPoolVector<T, vectorSize>::LocalPoolVector(gp_id_t globalPoolId,
|
||||
template<typename T, uint16_t vectorSize>
|
||||
inline ReturnValue_t LocalPoolVector<T, vectorSize>::read(
|
||||
MutexIF::TimeoutType timeoutType, uint32_t timeoutMs) {
|
||||
MutexHelper(LocalDpManagerAttorney::getMutexHandle(*hkManager), timeoutType, timeoutMs);
|
||||
MutexGuard(LocalDpManagerAttorney::getMutexHandle(*hkManager), timeoutType, timeoutMs);
|
||||
return readWithoutLock();
|
||||
}
|
||||
template<typename T, uint16_t vectorSize>
|
||||
@ -64,7 +64,7 @@ inline ReturnValue_t LocalPoolVector<T, vectorSize>::commit(bool valid,
|
||||
template<typename T, uint16_t vectorSize>
|
||||
inline ReturnValue_t LocalPoolVector<T, vectorSize>::commit(
|
||||
MutexIF::TimeoutType timeoutType, uint32_t timeoutMs) {
|
||||
MutexHelper(LocalDpManagerAttorney::getMutexHandle(*hkManager), timeoutType, timeoutMs);
|
||||
MutexGuard(LocalDpManagerAttorney::getMutexHandle(*hkManager), timeoutType, timeoutMs);
|
||||
return commitWithoutLock();
|
||||
}
|
||||
|
||||
|
@ -1,5 +1,5 @@
|
||||
#include "HealthTable.h"
|
||||
#include "../ipc/MutexHelper.h"
|
||||
#include "../ipc/MutexGuard.h"
|
||||
#include "../ipc/MutexFactory.h"
|
||||
#include "../serialize/SerializeAdapter.h"
|
||||
|
||||
@ -31,7 +31,7 @@ ReturnValue_t HealthTable::registerObject(object_id_t object,
|
||||
|
||||
void HealthTable::setHealth(object_id_t object,
|
||||
HasHealthIF::HealthState newState) {
|
||||
MutexHelper(mutex, timeoutType, mutexTimeoutMs);
|
||||
MutexGuard(mutex, timeoutType, mutexTimeoutMs);
|
||||
HealthMap::iterator iter = healthMap.find(object);
|
||||
if (iter != healthMap.end()) {
|
||||
iter->second = newState;
|
||||
@ -40,7 +40,7 @@ void HealthTable::setHealth(object_id_t object,
|
||||
|
||||
HasHealthIF::HealthState HealthTable::getHealth(object_id_t object) {
|
||||
HasHealthIF::HealthState state = HasHealthIF::HEALTHY;
|
||||
MutexHelper(mutex, timeoutType, mutexTimeoutMs);
|
||||
MutexGuard(mutex, timeoutType, mutexTimeoutMs);
|
||||
HealthMap::iterator iter = healthMap.find(object);
|
||||
if (iter != healthMap.end()) {
|
||||
state = iter->second;
|
||||
@ -49,7 +49,7 @@ HasHealthIF::HealthState HealthTable::getHealth(object_id_t object) {
|
||||
}
|
||||
|
||||
bool HealthTable::hasHealth(object_id_t object) {
|
||||
MutexHelper(mutex, timeoutType, mutexTimeoutMs);
|
||||
MutexGuard(mutex, timeoutType, mutexTimeoutMs);
|
||||
HealthMap::iterator iter = healthMap.find(object);
|
||||
if (iter != healthMap.end()) {
|
||||
return true;
|
||||
@ -58,14 +58,14 @@ bool HealthTable::hasHealth(object_id_t object) {
|
||||
}
|
||||
|
||||
size_t HealthTable::getPrintSize() {
|
||||
MutexHelper(mutex, timeoutType, mutexTimeoutMs);
|
||||
MutexGuard(mutex, timeoutType, mutexTimeoutMs);
|
||||
uint32_t size = healthMap.size() * sizeof(object_id_t) +
|
||||
sizeof(HasHealthIF::HealthState) + sizeof(uint16_t);
|
||||
return size;
|
||||
}
|
||||
|
||||
void HealthTable::printAll(uint8_t* pointer, size_t maxSize) {
|
||||
MutexHelper(mutex, timeoutType, mutexTimeoutMs);
|
||||
MutexGuard(mutex, timeoutType, mutexTimeoutMs);
|
||||
size_t size = 0;
|
||||
uint16_t count = healthMap.size();
|
||||
SerializeAdapter::serialize(&count,
|
||||
@ -81,7 +81,7 @@ void HealthTable::printAll(uint8_t* pointer, size_t maxSize) {
|
||||
|
||||
ReturnValue_t HealthTable::iterate(HealthEntry *value, bool reset) {
|
||||
ReturnValue_t result = HasReturnvaluesIF::RETURN_OK;
|
||||
MutexHelper(mutex, timeoutType, mutexTimeoutMs);
|
||||
MutexGuard(mutex, timeoutType, mutexTimeoutMs);
|
||||
if (reset) {
|
||||
mapIterator = healthMap.begin();
|
||||
}
|
||||
|
60
ipc/MutexGuard.h
Normal file
60
ipc/MutexGuard.h
Normal file
@ -0,0 +1,60 @@
|
||||
#ifndef FRAMEWORK_IPC_MUTEXGUARD_H_
|
||||
#define FRAMEWORK_IPC_MUTEXGUARD_H_
|
||||
|
||||
#include "MutexFactory.h"
|
||||
#include "../serviceinterface/ServiceInterface.h"
|
||||
|
||||
class MutexGuard {
|
||||
public:
|
||||
MutexGuard(MutexIF* mutex, MutexIF::TimeoutType timeoutType =
|
||||
MutexIF::TimeoutType::BLOCKING, uint32_t timeoutMs = 0):
|
||||
internalMutex(mutex) {
|
||||
if(mutex == nullptr) {
|
||||
#if FSFW_VERBOSE_LEVEL >= 1
|
||||
#if FSFW_CPP_OSTREAM_ENABLED == 1
|
||||
sif::error << "MutexGuard: Passed mutex is invalid!" << std::endl;
|
||||
#else
|
||||
sif::printError("MutexGuard: Passed mutex is invalid!\n");
|
||||
#endif /* FSFW_CPP_OSTREAM_ENABLED == 1 */
|
||||
#endif /* FSFW_VERBOSE_LEVEL >= 1 */
|
||||
return;
|
||||
}
|
||||
result = mutex->lockMutex(timeoutType,
|
||||
timeoutMs);
|
||||
#if FSFW_VERBOSE_LEVEL >= 1
|
||||
if(result == MutexIF::MUTEX_TIMEOUT) {
|
||||
#if FSFW_CPP_OSTREAM_ENABLED == 1
|
||||
sif::error << "MutexGuard: Lock of mutex failed with timeout of "
|
||||
<< timeoutMs << " milliseconds!" << std::endl;
|
||||
#else
|
||||
sif::printError("MutexGuard: Lock of mutex failed with timeout of %lu milliseconds\n",
|
||||
timeoutMs);
|
||||
#endif /* FSFW_CPP_OSTREAM_ENABLED == 1 */
|
||||
|
||||
}
|
||||
else if(result != HasReturnvaluesIF::RETURN_OK) {
|
||||
#if FSFW_CPP_OSTREAM_ENABLED == 1
|
||||
sif::error << "MutexGuard: Lock of Mutex failed with code " << result << std::endl;
|
||||
#else
|
||||
sif::printError("MutexGuard: Lock of Mutex failed with code %d\n", result);
|
||||
#endif /* FSFW_CPP_OSTREAM_ENABLED == 1 */
|
||||
}
|
||||
#else
|
||||
#endif /* FSFW_VERBOSE_LEVEL >= 1 */
|
||||
}
|
||||
|
||||
ReturnValue_t getLockResult() const {
|
||||
return result;
|
||||
}
|
||||
|
||||
~MutexGuard() {
|
||||
if(internalMutex != nullptr) {
|
||||
internalMutex->unlockMutex();
|
||||
}
|
||||
}
|
||||
private:
|
||||
MutexIF* internalMutex;
|
||||
ReturnValue_t result = HasReturnvaluesIF::RETURN_FAILED;
|
||||
};
|
||||
|
||||
#endif /* FRAMEWORK_IPC_MUTEXGUARD_H_ */
|
@ -1,57 +0,0 @@
|
||||
#ifndef FRAMEWORK_IPC_MUTEXHELPER_H_
|
||||
#define FRAMEWORK_IPC_MUTEXHELPER_H_
|
||||
|
||||
#include "MutexFactory.h"
|
||||
#include "../serviceinterface/ServiceInterface.h"
|
||||
|
||||
class MutexHelper {
|
||||
public:
|
||||
MutexHelper(MutexIF* mutex, MutexIF::TimeoutType timeoutType =
|
||||
MutexIF::TimeoutType::BLOCKING, uint32_t timeoutMs = 0):
|
||||
internalMutex(mutex) {
|
||||
if(mutex == nullptr) {
|
||||
#if FSFW_VERBOSE_LEVEL >= 1
|
||||
#if FSFW_CPP_OSTREAM_ENABLED == 1
|
||||
sif::error << "MutexHelper: Passed mutex is invalid!" << std::endl;
|
||||
#else
|
||||
sif::printError("MutexHelper: Passed mutex is invalid!\n");
|
||||
#endif /* FSFW_CPP_OSTREAM_ENABLED == 1 */
|
||||
#endif /* FSFW_VERBOSE_LEVEL >= 1 */
|
||||
return;
|
||||
}
|
||||
ReturnValue_t status = mutex->lockMutex(timeoutType,
|
||||
timeoutMs);
|
||||
#if FSFW_VERBOSE_LEVEL >= 1
|
||||
if(status == MutexIF::MUTEX_TIMEOUT) {
|
||||
#if FSFW_CPP_OSTREAM_ENABLED == 1
|
||||
sif::error << "MutexHelper: Lock of mutex failed with timeout of "
|
||||
<< timeoutMs << " milliseconds!" << std::endl;
|
||||
#else
|
||||
sif::printError("MutexHelper: Lock of mutex failed with timeout of %lu milliseconds\n",
|
||||
timeoutMs);
|
||||
#endif /* FSFW_CPP_OSTREAM_ENABLED == 1 */
|
||||
|
||||
}
|
||||
else if(status != HasReturnvaluesIF::RETURN_OK) {
|
||||
#if FSFW_CPP_OSTREAM_ENABLED == 1
|
||||
sif::error << "MutexHelper: Lock of Mutex failed with code " << status << std::endl;
|
||||
#else
|
||||
sif::printError("MutexHelper: Lock of Mutex failed with code %d\n", status);
|
||||
#endif /* FSFW_CPP_OSTREAM_ENABLED == 1 */
|
||||
}
|
||||
#else
|
||||
/* To avoid unused variable warning */
|
||||
static_cast<void>(status);
|
||||
#endif /* FSFW_VERBOSE_LEVEL >= 1 */
|
||||
}
|
||||
|
||||
~MutexHelper() {
|
||||
if(internalMutex != nullptr) {
|
||||
internalMutex->unlockMutex();
|
||||
}
|
||||
}
|
||||
private:
|
||||
MutexIF* internalMutex;
|
||||
};
|
||||
|
||||
#endif /* FRAMEWORK_IPC_MUTEXHELPER_H_ */
|
@ -3,7 +3,7 @@
|
||||
|
||||
#include "../../serviceinterface/ServiceInterfaceStream.h"
|
||||
#include "../../ipc/MutexFactory.h"
|
||||
#include "../../ipc/MutexHelper.h"
|
||||
#include "../../ipc/MutexGuard.h"
|
||||
|
||||
MessageQueue::MessageQueue(size_t messageDepth, size_t maxMessageSize):
|
||||
messageSize(maxMessageSize), messageDepth(messageDepth) {
|
||||
@ -65,7 +65,7 @@ ReturnValue_t MessageQueue::receiveMessage(MessageQueueMessageIF* message) {
|
||||
}
|
||||
// not sure this will work..
|
||||
//*message = std::move(messageQueue.front());
|
||||
MutexHelper mutexLock(queueLock, MutexIF::TimeoutType::WAITING, 20);
|
||||
MutexGuard mutexLock(queueLock, MutexIF::TimeoutType::WAITING, 20);
|
||||
MessageQueueMessage* currentMessage = &messageQueue.front();
|
||||
std::copy(currentMessage->getBuffer(),
|
||||
currentMessage->getBuffer() + messageSize, message->getBuffer());
|
||||
@ -130,7 +130,7 @@ ReturnValue_t MessageQueue::sendMessageFromMessageQueue(MessageQueueId_t sendTo,
|
||||
return HasReturnvaluesIF::RETURN_FAILED;
|
||||
}
|
||||
if(targetQueue->messageQueue.size() < targetQueue->messageDepth) {
|
||||
MutexHelper mutexLock(targetQueue->queueLock,
|
||||
MutexGuard mutexLock(targetQueue->queueLock,
|
||||
MutexIF::TimeoutType::WAITING, 20);
|
||||
// not ideal, works for now though.
|
||||
MessageQueueMessage* mqmMessage =
|
||||
|
@ -2,7 +2,7 @@
|
||||
|
||||
#include "../../serviceinterface/ServiceInterface.h"
|
||||
#include "../../ipc/MutexFactory.h"
|
||||
#include "../../ipc/MutexHelper.h"
|
||||
#include "../../ipc/MutexGuard.h"
|
||||
|
||||
QueueMapManager* QueueMapManager::mqManagerInstance = nullptr;
|
||||
|
||||
@ -43,7 +43,7 @@ ReturnValue_t QueueMapManager::addMessageQueue(
|
||||
|
||||
MessageQueueIF* QueueMapManager::getMessageQueue(
|
||||
MessageQueueId_t messageQueueId) const {
|
||||
MutexHelper(mapLock, MutexIF::TimeoutType::WAITING, 50);
|
||||
MutexGuard(mapLock, MutexIF::TimeoutType::WAITING, 50);
|
||||
auto queueIter = queueMap.find(messageQueueId);
|
||||
if(queueIter != queueMap.end()) {
|
||||
return queueIter->second;
|
||||
|
@ -2,6 +2,7 @@
|
||||
#include "PeriodicPosixTask.h"
|
||||
|
||||
#include "../../tasks/TaskFactory.h"
|
||||
#include "../../serviceinterface/ServiceInterface.h"
|
||||
#include "../../returnvalues/HasReturnvaluesIF.h"
|
||||
|
||||
//TODO: Different variant than the lazy loading in QueueFactory. What's better and why?
|
||||
|
@ -1,6 +1,6 @@
|
||||
#include "TmTcUnixUdpBridge.h"
|
||||
#include "../../serviceinterface/ServiceInterface.h"
|
||||
#include "../../ipc/MutexHelper.h"
|
||||
#include "../../ipc/MutexGuard.h"
|
||||
|
||||
#include <errno.h>
|
||||
#include <arpa/inet.h>
|
||||
@ -69,7 +69,7 @@ TmTcUnixUdpBridge::~TmTcUnixUdpBridge() {
|
||||
ReturnValue_t TmTcUnixUdpBridge::sendTm(const uint8_t *data, size_t dataLen) {
|
||||
int flags = 0;
|
||||
|
||||
MutexHelper lock(mutex, MutexIF::TimeoutType::WAITING, 10);
|
||||
MutexGuard lock(mutex, MutexIF::TimeoutType::WAITING, 10);
|
||||
|
||||
if(ipAddrAnySet){
|
||||
clientAddress.sin_addr.s_addr = htons(INADDR_ANY);
|
||||
@ -100,7 +100,7 @@ ReturnValue_t TmTcUnixUdpBridge::sendTm(const uint8_t *data, size_t dataLen) {
|
||||
}
|
||||
|
||||
void TmTcUnixUdpBridge::checkAndSetClientAddress(sockaddr_in& newAddress) {
|
||||
MutexHelper lock(mutex, MutexIF::TimeoutType::WAITING, 10);
|
||||
MutexGuard lock(mutex, MutexIF::TimeoutType::WAITING, 10);
|
||||
|
||||
// char ipAddress [15];
|
||||
#if FSFW_CPP_OSTREAM_ENABLED == 1
|
||||
|
@ -1,7 +1,7 @@
|
||||
#include "RtemsBasic.h"
|
||||
|
||||
#include "../../timemanager/Clock.h"
|
||||
#include "../../ipc/MutexHelper.h"
|
||||
#include "../../ipc/MutexGuard.h"
|
||||
|
||||
#include <rtems/score/todimpl.h>
|
||||
#include <rtems/rtems/clockimpl.h>
|
||||
@ -183,7 +183,7 @@ ReturnValue_t Clock::setLeapSeconds(const uint16_t leapSeconds_) {
|
||||
if(checkOrCreateClockMutex()!=HasReturnvaluesIF::RETURN_OK){
|
||||
return HasReturnvaluesIF::RETURN_FAILED;
|
||||
}
|
||||
MutexHelper helper(timeMutex);
|
||||
MutexGuard helper(timeMutex);
|
||||
|
||||
|
||||
leapSeconds = leapSeconds_;
|
||||
@ -196,7 +196,7 @@ ReturnValue_t Clock::getLeapSeconds(uint16_t* leapSeconds_) {
|
||||
if(timeMutex==nullptr){
|
||||
return HasReturnvaluesIF::RETURN_FAILED;
|
||||
}
|
||||
MutexHelper helper(timeMutex);
|
||||
MutexGuard helper(timeMutex);
|
||||
|
||||
*leapSeconds_ = leapSeconds;
|
||||
|
||||
|
@ -1,6 +1,6 @@
|
||||
#include "TmTcWinUdpBridge.h"
|
||||
|
||||
#include <fsfw/ipc/MutexHelper.h>
|
||||
#include <fsfw/ipc/MutexGuard.h>
|
||||
|
||||
#if defined(_MSC_VER)
|
||||
#include <BaseTsd.h>
|
||||
@ -120,7 +120,7 @@ ReturnValue_t TmTcWinUdpBridge::sendTm(const uint8_t *data, size_t dataLen) {
|
||||
}
|
||||
|
||||
void TmTcWinUdpBridge::checkAndSetClientAddress(sockaddr_in newAddress) {
|
||||
MutexHelper lock(mutex, MutexIF::TimeoutType::WAITING, 10);
|
||||
MutexGuard lock(mutex, MutexIF::TimeoutType::WAITING, 10);
|
||||
|
||||
#if FSFW_CPP_OSTREAM_ENABLED == 1 && FSFW_UDP_SEND_WIRETAPPING_ENABLED == 1
|
||||
char ipAddress [15];
|
||||
|
@ -15,7 +15,7 @@ PoolManager::~PoolManager(void) {
|
||||
|
||||
ReturnValue_t PoolManager::reserveSpace(const size_t size,
|
||||
store_address_t* address, bool ignoreFault) {
|
||||
MutexHelper mutexHelper(mutex, MutexIF::TimeoutType::WAITING,
|
||||
MutexGuard mutexHelper(mutex, MutexIF::TimeoutType::WAITING,
|
||||
mutexTimeoutMs);
|
||||
ReturnValue_t status = LocalPool::reserveSpace(size,
|
||||
address,ignoreFault);
|
||||
@ -32,7 +32,7 @@ ReturnValue_t PoolManager::deleteData(
|
||||
". id is "<< storeId.packetIndex << std::endl;
|
||||
#endif
|
||||
#endif
|
||||
MutexHelper mutexHelper(mutex, MutexIF::TimeoutType::WAITING,
|
||||
MutexGuard mutexHelper(mutex, MutexIF::TimeoutType::WAITING,
|
||||
mutexTimeoutMs);
|
||||
return LocalPool::deleteData(storeId);
|
||||
}
|
||||
@ -40,7 +40,7 @@ ReturnValue_t PoolManager::deleteData(
|
||||
|
||||
ReturnValue_t PoolManager::deleteData(uint8_t* buffer,
|
||||
size_t size, store_address_t* storeId) {
|
||||
MutexHelper mutexHelper(mutex, MutexIF::TimeoutType::WAITING, 20);
|
||||
MutexGuard mutexHelper(mutex, MutexIF::TimeoutType::WAITING, 20);
|
||||
ReturnValue_t status = LocalPool::deleteData(buffer,
|
||||
size, storeId);
|
||||
return status;
|
||||
|
@ -3,7 +3,7 @@
|
||||
|
||||
#include "LocalPool.h"
|
||||
#include "StorageAccessor.h"
|
||||
#include "../ipc/MutexHelper.h"
|
||||
#include "../ipc/MutexGuard.h"
|
||||
|
||||
|
||||
/**
|
||||
|
@ -70,7 +70,7 @@ TEST_CASE( "Action Helper" , "[ActionHelper]") {
|
||||
SECTION("Handle finish"){
|
||||
CHECK(not testMqMock.wasMessageSent());
|
||||
ReturnValue_t status = 0x9876;
|
||||
actionHelper.finish(true, testMqMock.getId(), testActionId, status);
|
||||
actionHelper.finish(false, testMqMock.getId(), testActionId, status);
|
||||
CHECK(testMqMock.wasMessageSent());
|
||||
CommandMessage testMessage;
|
||||
REQUIRE(testMqMock.receiveMessage(&testMessage) == static_cast<uint32_t>(HasReturnvaluesIF::RETURN_OK));
|
||||
|
@ -5,7 +5,7 @@
|
||||
|
||||
#include <fsfw/datapoollocal/HasLocalDataPoolIF.h>
|
||||
#include <fsfw/datapoollocal/StaticLocalDataSet.h>
|
||||
#include <fsfw/datapool/PoolReadHelper.h>
|
||||
#include <fsfw/datapool/PoolReadGuard.h>
|
||||
#include <fsfw/globalfunctions/bitutility.h>
|
||||
|
||||
#include <unittest/core/CatchDefinitions.h>
|
||||
@ -21,6 +21,7 @@ TEST_CASE("LocalDataSet" , "[LocDataSetTest]") {
|
||||
|
||||
SECTION("BasicTest") {
|
||||
/* Test some basic functions */
|
||||
CHECK(localSet.getReportingEnabled() == false);
|
||||
CHECK(localSet.getLocalPoolIdsSerializedSize(false) == 3 * sizeof(lp_id_t));
|
||||
CHECK(localSet.getLocalPoolIdsSerializedSize(true) ==
|
||||
3 * sizeof(lp_id_t) + sizeof(uint8_t));
|
||||
@ -54,7 +55,7 @@ TEST_CASE("LocalDataSet" , "[LocDataSetTest]") {
|
||||
|
||||
{
|
||||
/* Test read operation. Values should be all zeros */
|
||||
PoolReadHelper readHelper(&localSet);
|
||||
PoolReadGuard readHelper(&localSet);
|
||||
REQUIRE(readHelper.getReadResult() == retval::CATCH_OK);
|
||||
CHECK(not localSet.isValid());
|
||||
CHECK(localSet.localPoolVarUint8.value == 0);
|
||||
@ -82,7 +83,7 @@ TEST_CASE("LocalDataSet" , "[LocDataSetTest]") {
|
||||
{
|
||||
/* Now we read again and check whether our zeroed values were overwritten with
|
||||
the values in the pool */
|
||||
PoolReadHelper readHelper(&localSet);
|
||||
PoolReadGuard readHelper(&localSet);
|
||||
REQUIRE(readHelper.getReadResult() == retval::CATCH_OK);
|
||||
CHECK(localSet.isValid());
|
||||
CHECK(localSet.localPoolVarUint8.value == 232);
|
||||
|
@ -3,7 +3,7 @@
|
||||
#include <catch2/catch_test_macros.hpp>
|
||||
#include <catch2/catch_approx.hpp>
|
||||
|
||||
#include <fsfw/datapool/PoolReadHelper.h>
|
||||
#include <fsfw/datapool/PoolReadGuard.h>
|
||||
#include <fsfw/datapoollocal/HasLocalDataPoolIF.h>
|
||||
#include <fsfw/datapoollocal/StaticLocalDataSet.h>
|
||||
#include <fsfw/housekeeping/HousekeepingSnapshot.h>
|
||||
@ -75,7 +75,7 @@ TEST_CASE("LocalPoolManagerTest" , "[LocManTest]") {
|
||||
SECTION("SnapshotUpdateTests") {
|
||||
/* Set the variables in the set to certain values. These are checked later. */
|
||||
{
|
||||
PoolReadHelper readHelper(&poolOwner->dataset);
|
||||
PoolReadGuard readHelper(&poolOwner->dataset);
|
||||
REQUIRE(readHelper.getReadResult() == retval::CATCH_OK);
|
||||
poolOwner->dataset.localPoolVarUint8.value = 5;
|
||||
poolOwner->dataset.localPoolVarFloat.value = -12.242;
|
||||
|
@ -1,16 +1,17 @@
|
||||
#ifndef FSFW_UNITTEST_TESTS_DATAPOOLLOCAL_LOCALPOOLOWNERBASE_H_
|
||||
#define FSFW_UNITTEST_TESTS_DATAPOOLLOCAL_LOCALPOOLOWNERBASE_H_
|
||||
|
||||
#include <testcfg/objects/systemObjectList.h>
|
||||
|
||||
#include <fsfw/datapoollocal/HasLocalDataPoolIF.h>
|
||||
#include <fsfw/datapoollocal/LocalDataSet.h>
|
||||
#include <fsfw/objectmanager/SystemObject.h>
|
||||
#include <fsfw/datapoollocal/LocalPoolVariable.h>
|
||||
#include <fsfw/datapoollocal/LocalPoolVector.h>
|
||||
#include <fsfw/ipc/QueueFactory.h>
|
||||
#include <testcfg/objects/systemObjectList.h>
|
||||
#include <fsfw/datapoollocal/StaticLocalDataSet.h>
|
||||
#include <fsfw/unittest/tests/mocks/MessageQueueMockBase.h>
|
||||
#include "../../../datapool/PoolReadHelper.h"
|
||||
#include <fsfw/datapool/PoolReadGuard.h>
|
||||
|
||||
namespace lpool {
|
||||
static constexpr lp_id_t uint8VarId = 0;
|
||||
|
@ -115,6 +115,7 @@ TEST_CASE("LocalPoolVector" , "[LocPoolVecTest]") {
|
||||
REQUIRE(readOnlyVec.commit() ==
|
||||
static_cast<int>(PoolVariableIF::INVALID_READ_WRITE_MODE));
|
||||
}
|
||||
poolOwner->reset();
|
||||
}
|
||||
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user