some more fixes
This commit is contained in:
parent
fb4ba487b5
commit
30ba9ab916
@ -15,24 +15,17 @@ static constexpr uint8_t VERSION_BITS = 0b00100000;
|
||||
|
||||
static constexpr uint8_t CFDP_CLASS_ID = CLASS_ID::CFDP;
|
||||
|
||||
static constexpr ReturnValue_t INVALID_TLV_TYPE =
|
||||
result::makeCode(CFDP_CLASS_ID, 1);
|
||||
static constexpr ReturnValue_t INVALID_DIRECTIVE_FIELDS =
|
||||
result::makeCode(CFDP_CLASS_ID, 2);
|
||||
static constexpr ReturnValue_t INVALID_PDU_DATAFIELD_LEN =
|
||||
result::makeCode(CFDP_CLASS_ID, 3);
|
||||
static constexpr ReturnValue_t INVALID_ACK_DIRECTIVE_FIELDS =
|
||||
result::makeCode(CFDP_CLASS_ID, 4);
|
||||
static constexpr ReturnValue_t INVALID_TLV_TYPE = result::makeCode(CFDP_CLASS_ID, 1);
|
||||
static constexpr ReturnValue_t INVALID_DIRECTIVE_FIELDS = result::makeCode(CFDP_CLASS_ID, 2);
|
||||
static constexpr ReturnValue_t INVALID_PDU_DATAFIELD_LEN = result::makeCode(CFDP_CLASS_ID, 3);
|
||||
static constexpr ReturnValue_t INVALID_ACK_DIRECTIVE_FIELDS = result::makeCode(CFDP_CLASS_ID, 4);
|
||||
//! Can not parse options. This can also occur because there are options
|
||||
//! available but the user did not pass a valid options array
|
||||
static constexpr ReturnValue_t METADATA_CANT_PARSE_OPTIONS =
|
||||
result::makeCode(CFDP_CLASS_ID, 5);
|
||||
static constexpr ReturnValue_t NAK_CANT_PARSE_OPTIONS =
|
||||
result::makeCode(CFDP_CLASS_ID, 6);
|
||||
static constexpr ReturnValue_t METADATA_CANT_PARSE_OPTIONS = result::makeCode(CFDP_CLASS_ID, 5);
|
||||
static constexpr ReturnValue_t NAK_CANT_PARSE_OPTIONS = result::makeCode(CFDP_CLASS_ID, 6);
|
||||
static constexpr ReturnValue_t FINISHED_CANT_PARSE_FS_RESPONSES =
|
||||
result::makeCode(CFDP_CLASS_ID, 6);
|
||||
static constexpr ReturnValue_t FILESTORE_REQUIRES_SECOND_FILE =
|
||||
result::makeCode(CFDP_CLASS_ID, 8);
|
||||
static constexpr ReturnValue_t FILESTORE_REQUIRES_SECOND_FILE = result::makeCode(CFDP_CLASS_ID, 8);
|
||||
//! Can not parse filestore response because user did not pass a valid instance
|
||||
//! or remaining size is invalid
|
||||
static constexpr ReturnValue_t FILESTORE_RESPONSE_CANT_PARSE_FS_MESSAGE =
|
||||
|
@ -136,7 +136,7 @@ void Service2DeviceAccess::sendWiretappingTm(CommandMessage* reply, uint8_t subs
|
||||
const uint8_t* data = nullptr;
|
||||
size_t size = 0;
|
||||
ReturnValue_t result = ipcStore->getData(storeAddress, &data, &size);
|
||||
if (result != HasReturnvaluesIF::RETURN_OK) {
|
||||
if (result != RETURN_OK) {
|
||||
#if FSFW_CPP_OSTREAM_ENABLED == 1
|
||||
sif::error << "Service2DeviceAccess::sendWiretappingTm: Data Lost in "
|
||||
"handleUnrequestedReply with failure ID "
|
||||
@ -149,7 +149,7 @@ void Service2DeviceAccess::sendWiretappingTm(CommandMessage* reply, uint8_t subs
|
||||
// sending it back.
|
||||
WiretappingPacket tmPacket(DeviceHandlerMessage::getDeviceObjectId(reply), data);
|
||||
result = sendTmPacket(subservice, tmPacket.objectId, tmPacket.data, size);
|
||||
if (result != retval::OK) {
|
||||
if (result != RETURN_OK) {
|
||||
// TODO: Warning
|
||||
return;
|
||||
}
|
||||
|
@ -33,8 +33,7 @@ class HasReturnvaluesIF {
|
||||
* @param number
|
||||
* @return
|
||||
*/
|
||||
static constexpr ReturnValue_t makeReturnCode(
|
||||
uint8_t classId, uint8_t number) {
|
||||
static constexpr ReturnValue_t makeReturnCode(uint8_t classId, uint8_t number) {
|
||||
return result::makeCode(classId, number);
|
||||
}
|
||||
};
|
||||
|
@ -9,8 +9,8 @@
|
||||
class PusIF : public SpacePacketIF {
|
||||
public:
|
||||
static constexpr uint8_t INTERFACE_ID = CLASS_ID::PUS_IF;
|
||||
static constexpr ReturnValue_t INVALID_PUS_VERSION = retval::makeCode(INTERFACE_ID, 0);
|
||||
static constexpr ReturnValue_t INVALID_CRC_16 = retval::makeCode(INTERFACE_ID, 1);
|
||||
static constexpr ReturnValue_t INVALID_PUS_VERSION = result::makeCode(INTERFACE_ID, 0);
|
||||
static constexpr ReturnValue_t INVALID_CRC_16 = result::makeCode(INTERFACE_ID, 1);
|
||||
|
||||
~PusIF() override = default;
|
||||
/**
|
||||
|
@ -2,8 +2,8 @@
|
||||
|
||||
#include "fsfw/globalfunctions/CRC.h"
|
||||
|
||||
PusTmZeroCopyWriter::PusTmZeroCopyWriter(TimeReaderIF* timeReader, uint8_t* data, size_t size)
|
||||
: PusTmReader(timeReader, data, size) {}
|
||||
PusTmZeroCopyWriter::PusTmZeroCopyWriter(TimeReaderIF& timeReader, uint8_t* data, size_t size)
|
||||
: PusTmReader(&timeReader, data, size) {}
|
||||
|
||||
void PusTmZeroCopyWriter::setSequenceCount(uint16_t seqCount) {
|
||||
auto* spHeader =
|
||||
|
@ -4,7 +4,7 @@
|
||||
#include "PusTmReader.h"
|
||||
class PusTmZeroCopyWriter : public PusTmReader {
|
||||
public:
|
||||
PusTmZeroCopyWriter(TimeReaderIF* timeReader, uint8_t* data, size_t size);
|
||||
PusTmZeroCopyWriter(TimeReaderIF& timeReader, uint8_t* data, size_t size);
|
||||
|
||||
void setSequenceCount(uint16_t seqCount);
|
||||
void updateErrorControl();
|
||||
|
@ -309,7 +309,7 @@ void CommandingServiceBase::handleRequestQueue() {
|
||||
ReturnValue_t CommandingServiceBase::sendTmPacket(uint8_t subservice, const uint8_t* sourceData,
|
||||
size_t sourceDataLen) {
|
||||
ReturnValue_t result = tmHelper.prepareTmPacket(subservice, sourceData, sourceDataLen);
|
||||
if (result != retval::OK) {
|
||||
if (result != result::OK) {
|
||||
return result;
|
||||
}
|
||||
return tmHelper.storeAndSendTmPacket();
|
||||
@ -319,7 +319,7 @@ ReturnValue_t CommandingServiceBase::sendTmPacket(uint8_t subservice, object_id_
|
||||
const uint8_t* data, size_t dataLen) {
|
||||
telemetry::DataWithObjectIdPrefix dataWithObjId(objectId, data, dataLen);
|
||||
ReturnValue_t result = tmHelper.prepareTmPacket(subservice, dataWithObjId);
|
||||
if (result != retval::OK) {
|
||||
if (result != result::OK) {
|
||||
return result;
|
||||
}
|
||||
return tmHelper.storeAndSendTmPacket();
|
||||
@ -327,7 +327,7 @@ ReturnValue_t CommandingServiceBase::sendTmPacket(uint8_t subservice, object_id_
|
||||
|
||||
ReturnValue_t CommandingServiceBase::sendTmPacket(uint8_t subservice, SerializeIF& sourceData) {
|
||||
ReturnValue_t result = tmHelper.prepareTmPacket(subservice, sourceData);
|
||||
if (result != retval::OK) {
|
||||
if (result != result::OK) {
|
||||
return result;
|
||||
}
|
||||
return tmHelper.storeAndSendTmPacket();
|
||||
|
@ -36,7 +36,7 @@ ReturnValue_t VerificationReporter::sendFailureReport(VerifFailureParams params)
|
||||
|
||||
ReturnValue_t VerificationReporter::sendSuccessReport(VerifSuccessParams params) {
|
||||
PusVerificationMessage message(params.reportId, params.ackFlags, params.tcPacketId, params.tcPsc,
|
||||
retval::OK, params.step);
|
||||
result::OK, params.step);
|
||||
ReturnValue_t status = MessageQueueSenderIF::sendMessage(acknowledgeQueue, &message);
|
||||
if (status != HasReturnvaluesIF::RETURN_OK) {
|
||||
#if FSFW_CPP_OSTREAM_ENABLED == 1
|
||||
|
@ -41,7 +41,7 @@ struct VerifFailureParams : public VerifParamsBase {
|
||||
VerifFailureParams(uint8_t reportId, PusTcIF& tc)
|
||||
: VerifParamsBase(reportId, tc.getPacketIdRaw(), tc.getPacketSeqCtrlRaw()) {}
|
||||
|
||||
ReturnValue_t errorCode = retval::OK;
|
||||
ReturnValue_t errorCode = result::OK;
|
||||
uint8_t step = 0;
|
||||
uint32_t errorParam1 = 0;
|
||||
uint32_t errorParam2 = 0;
|
||||
|
@ -28,16 +28,16 @@ class DataWithObjectIdPrefix : public SerializeIF {
|
||||
}
|
||||
if (dataWrapper.type != ecss::DataTypes::RAW) {
|
||||
if ((dataWrapper.dataUnion.raw.data == nullptr) and (dataWrapper.dataUnion.raw.len > 0)) {
|
||||
return retval::FAILED;
|
||||
return result::FAILED;
|
||||
}
|
||||
} else if (dataWrapper.type == ecss::DataTypes::SERIALIZABLE) {
|
||||
if (dataWrapper.dataUnion.serializable == nullptr) {
|
||||
return retval::FAILED;
|
||||
return result::FAILED;
|
||||
}
|
||||
}
|
||||
ReturnValue_t result =
|
||||
SerializeAdapter::serialize(&objectId, buffer, size, maxSize, streamEndianness);
|
||||
if (result != retval::OK) {
|
||||
if (result != result::OK) {
|
||||
return result;
|
||||
}
|
||||
if (dataWrapper.type != ecss::DataTypes::RAW) {
|
||||
@ -47,7 +47,7 @@ class DataWithObjectIdPrefix : public SerializeIF {
|
||||
} else {
|
||||
return dataWrapper.dataUnion.serializable->serialize(buffer, size, maxSize, streamEndianness);
|
||||
}
|
||||
return retval::OK;
|
||||
return result::OK;
|
||||
}
|
||||
|
||||
[[nodiscard]] size_t getSerializedSize() const override {
|
||||
@ -58,7 +58,7 @@ class DataWithObjectIdPrefix : public SerializeIF {
|
||||
Endianness streamEndianness) override {
|
||||
// As long as there is no way to know how long the expected data will be, this function
|
||||
// does not make sense
|
||||
return retval::FAILED;
|
||||
return result::FAILED;
|
||||
}
|
||||
|
||||
private:
|
||||
|
@ -31,18 +31,18 @@ class CommandExecutor {
|
||||
static constexpr uint8_t CLASS_ID = CLASS_ID::LINUX_OSAL;
|
||||
|
||||
//! [EXPORT] : [COMMENT] Execution of the current command has finished
|
||||
static constexpr ReturnValue_t EXECUTION_FINISHED = retval::makeCode(CLASS_ID, 0);
|
||||
static constexpr ReturnValue_t EXECUTION_FINISHED = result::makeCode(CLASS_ID, 0);
|
||||
|
||||
//! [EXPORT] : [COMMENT] Command is pending. This will also be returned if the user tries
|
||||
//! to load another command but a command is still pending
|
||||
static constexpr ReturnValue_t COMMAND_PENDING = retval::makeCode(CLASS_ID, 1);
|
||||
static constexpr ReturnValue_t COMMAND_PENDING = result::makeCode(CLASS_ID, 1);
|
||||
//! [EXPORT] : [COMMENT] Some bytes have been read from the executing process
|
||||
static constexpr ReturnValue_t BYTES_READ = retval::makeCode(CLASS_ID, 2);
|
||||
static constexpr ReturnValue_t BYTES_READ = result::makeCode(CLASS_ID, 2);
|
||||
//! [EXPORT] : [COMMENT] Command execution failed
|
||||
static constexpr ReturnValue_t COMMAND_ERROR = retval::makeCode(CLASS_ID, 3);
|
||||
static constexpr ReturnValue_t COMMAND_ERROR = result::makeCode(CLASS_ID, 3);
|
||||
//! [EXPORT] : [COMMENT]
|
||||
static constexpr ReturnValue_t NO_COMMAND_LOADED_OR_PENDING = retval::makeCode(CLASS_ID, 4);
|
||||
static constexpr ReturnValue_t PCLOSE_CALL_ERROR = retval::makeCode(CLASS_ID, 6);
|
||||
static constexpr ReturnValue_t NO_COMMAND_LOADED_OR_PENDING = result::makeCode(CLASS_ID, 4);
|
||||
static constexpr ReturnValue_t PCLOSE_CALL_ERROR = result::makeCode(CLASS_ID, 6);
|
||||
|
||||
/**
|
||||
* Constructor. Is initialized with maximum size of internal buffer to read data from the
|
||||
|
@ -16,8 +16,8 @@
|
||||
TEST_CASE("DataSetTest", "[DataSetTest]") {
|
||||
auto queue = MessageQueueMock(1);
|
||||
LocalPoolOwnerBase poolOwner(queue, objects::TEST_LOCAL_POOL_OWNER_BASE);
|
||||
REQUIRE(poolOwner.initializeHkManager() == retval::CATCH_OK);
|
||||
REQUIRE(poolOwner.initializeHkManagerAfterTaskCreation() == retval::CATCH_OK);
|
||||
REQUIRE(poolOwner.initializeHkManager() == result::OK);
|
||||
REQUIRE(poolOwner.initializeHkManagerAfterTaskCreation() == result::OK);
|
||||
LocalPoolStaticTestDataSet localSet;
|
||||
|
||||
SECTION("BasicTest") {
|
||||
@ -36,7 +36,7 @@ TEST_CASE("DataSetTest", "[DataSetTest]") {
|
||||
|
||||
/* Test local pool ID serialization */
|
||||
CHECK(localSet.serializeLocalPoolIds(&localPoolIdBuffPtr, &serSize, maxSize,
|
||||
SerializeIF::Endianness::MACHINE) == retval::CATCH_OK);
|
||||
SerializeIF::Endianness::MACHINE) == result::OK);
|
||||
CHECK(serSize == maxSize);
|
||||
CHECK(localPoolIdBuff[0] == 3);
|
||||
CHECK(lpIds[0] == localSet.localPoolVarUint8.getDataPoolId());
|
||||
@ -47,8 +47,7 @@ TEST_CASE("DataSetTest", "[DataSetTest]") {
|
||||
localPoolIdBuffPtr = localPoolIdBuff;
|
||||
serSize = 0;
|
||||
CHECK(localSet.serializeLocalPoolIds(&localPoolIdBuffPtr, &serSize, maxSize,
|
||||
SerializeIF::Endianness::MACHINE,
|
||||
false) == retval::CATCH_OK);
|
||||
SerializeIF::Endianness::MACHINE, false) == result::OK);
|
||||
CHECK(serSize == maxSize - sizeof(uint8_t));
|
||||
CHECK(lpIds[0] == localSet.localPoolVarUint8.getDataPoolId());
|
||||
CHECK(lpIds[1] == localSet.localPoolVarFloat.getDataPoolId());
|
||||
@ -57,7 +56,7 @@ TEST_CASE("DataSetTest", "[DataSetTest]") {
|
||||
{
|
||||
/* Test read operation. Values should be all zeros */
|
||||
PoolReadGuard readHelper(&localSet);
|
||||
REQUIRE(readHelper.getReadResult() == retval::CATCH_OK);
|
||||
REQUIRE(readHelper.getReadResult() == result::OK);
|
||||
CHECK(not localSet.isValid());
|
||||
CHECK(localSet.localPoolVarUint8.value == 0);
|
||||
CHECK(not localSet.localPoolVarUint8.isValid());
|
||||
@ -90,7 +89,7 @@ TEST_CASE("DataSetTest", "[DataSetTest]") {
|
||||
/* Now we read again and check whether our zeroed values were overwritten with
|
||||
the values in the pool */
|
||||
PoolReadGuard readHelper(&localSet);
|
||||
REQUIRE(readHelper.getReadResult() == retval::CATCH_OK);
|
||||
REQUIRE(readHelper.getReadResult() == result::OK);
|
||||
CHECK(localSet.isValid());
|
||||
CHECK(localSet.localPoolVarUint8.value == 232);
|
||||
CHECK(localSet.localPoolVarUint8.isValid());
|
||||
@ -110,7 +109,7 @@ TEST_CASE("DataSetTest", "[DataSetTest]") {
|
||||
uint8_t buffer[maxSize + 1];
|
||||
uint8_t* buffPtr = buffer;
|
||||
CHECK(localSet.serialize(&buffPtr, &serSize, maxSize, SerializeIF::Endianness::MACHINE) ==
|
||||
retval::CATCH_OK);
|
||||
result::OK);
|
||||
uint8_t rawUint8 = buffer[0];
|
||||
CHECK(rawUint8 == 232);
|
||||
float rawFloat = 0.0;
|
||||
@ -128,7 +127,7 @@ TEST_CASE("DataSetTest", "[DataSetTest]") {
|
||||
std::memset(buffer, 0, sizeof(buffer));
|
||||
const uint8_t* constBuffPtr = buffer;
|
||||
CHECK(localSet.deSerialize(&constBuffPtr, &sizeToDeserialize,
|
||||
SerializeIF::Endianness::MACHINE) == retval::CATCH_OK);
|
||||
SerializeIF::Endianness::MACHINE) == result::OK);
|
||||
/* Check whether deserialization was successfull */
|
||||
CHECK(localSet.localPoolVarUint8.value == 0);
|
||||
CHECK(localSet.localPoolVarFloat.value == Catch::Approx(0.0));
|
||||
@ -156,7 +155,7 @@ TEST_CASE("DataSetTest", "[DataSetTest]") {
|
||||
serSize = 0;
|
||||
buffPtr = buffer;
|
||||
CHECK(localSet.serialize(&buffPtr, &serSize, maxSize, SerializeIF::Endianness::MACHINE) ==
|
||||
retval::CATCH_OK);
|
||||
result::OK);
|
||||
CHECK(rawUint8 == 232);
|
||||
std::memcpy(&rawFloat, buffer + sizeof(uint8_t), sizeof(float));
|
||||
CHECK(rawFloat == Catch::Approx(-2324.322));
|
||||
@ -186,7 +185,7 @@ TEST_CASE("DataSetTest", "[DataSetTest]") {
|
||||
sizeToDeserialize = maxSize;
|
||||
constBuffPtr = buffer;
|
||||
CHECK(localSet.deSerialize(&constBuffPtr, &sizeToDeserialize,
|
||||
SerializeIF::Endianness::MACHINE) == retval::CATCH_OK);
|
||||
SerializeIF::Endianness::MACHINE) == result::OK);
|
||||
/* Check whether deserialization was successfull */
|
||||
CHECK(localSet.localPoolVarUint8.value == 0);
|
||||
CHECK(localSet.localPoolVarFloat.value == Catch::Approx(0.0));
|
||||
@ -213,10 +212,10 @@ TEST_CASE("DataSetTest", "[DataSetTest]") {
|
||||
|
||||
/* Register same variables again to get more than 8 registered variables */
|
||||
for (uint8_t idx = 0; idx < 8; idx++) {
|
||||
REQUIRE(set.registerVariable(&localSet.localPoolVarUint8) == retval::CATCH_OK);
|
||||
REQUIRE(set.registerVariable(&localSet.localPoolVarUint8) == result::OK);
|
||||
}
|
||||
REQUIRE(set.registerVariable(&localSet.localPoolVarUint8) == retval::CATCH_OK);
|
||||
REQUIRE(set.registerVariable(&localSet.localPoolUint16Vec) == retval::CATCH_OK);
|
||||
REQUIRE(set.registerVariable(&localSet.localPoolVarUint8) == result::OK);
|
||||
REQUIRE(set.registerVariable(&localSet.localPoolUint16Vec) == result::OK);
|
||||
|
||||
set.setValidityBufferGeneration(true);
|
||||
{
|
||||
@ -233,7 +232,7 @@ TEST_CASE("DataSetTest", "[DataSetTest]") {
|
||||
uint8_t buffer[maxSize + 1];
|
||||
uint8_t* buffPtr = buffer;
|
||||
CHECK(set.serialize(&buffPtr, &serSize, maxSize, SerializeIF::Endianness::MACHINE) ==
|
||||
retval::CATCH_OK);
|
||||
result::OK);
|
||||
std::array<uint8_t, 2> validityBuffer{};
|
||||
std::memcpy(validityBuffer.data(), buffer + 9 + sizeof(uint16_t) * 3, 2);
|
||||
/* The first 9 variables should be valid */
|
||||
@ -251,7 +250,7 @@ TEST_CASE("DataSetTest", "[DataSetTest]") {
|
||||
const uint8_t* constBuffPtr = buffer;
|
||||
size_t sizeToDeSerialize = serSize;
|
||||
CHECK(set.deSerialize(&constBuffPtr, &sizeToDeSerialize, SerializeIF::Endianness::MACHINE) ==
|
||||
retval::CATCH_OK);
|
||||
result::OK);
|
||||
CHECK(localSet.localPoolVarUint8.isValid() == false);
|
||||
CHECK(localSet.localPoolUint16Vec.isValid() == true);
|
||||
}
|
||||
@ -261,20 +260,20 @@ TEST_CASE("DataSetTest", "[DataSetTest]") {
|
||||
SharedLocalDataSet sharedSet(sharedSetId, &poolOwner, lpool::testSetId, 5);
|
||||
localSet.localPoolVarUint8.setReadWriteMode(pool_rwm_t::VAR_WRITE);
|
||||
localSet.localPoolUint16Vec.setReadWriteMode(pool_rwm_t::VAR_WRITE);
|
||||
CHECK(sharedSet.registerVariable(&localSet.localPoolVarUint8) == retval::CATCH_OK);
|
||||
CHECK(sharedSet.registerVariable(&localSet.localPoolUint16Vec) == retval::CATCH_OK);
|
||||
CHECK(sharedSet.initialize() == retval::CATCH_OK);
|
||||
CHECK(sharedSet.lockDataset() == retval::CATCH_OK);
|
||||
CHECK(sharedSet.unlockDataset() == retval::CATCH_OK);
|
||||
CHECK(sharedSet.registerVariable(&localSet.localPoolVarUint8) == result::OK);
|
||||
CHECK(sharedSet.registerVariable(&localSet.localPoolUint16Vec) == result::OK);
|
||||
CHECK(sharedSet.initialize() == result::OK);
|
||||
CHECK(sharedSet.lockDataset() == result::OK);
|
||||
CHECK(sharedSet.unlockDataset() == result::OK);
|
||||
|
||||
{
|
||||
// PoolReadGuard rg(&sharedSet);
|
||||
// CHECK(rg.getReadResult() == retval::CATCH_OK);
|
||||
// CHECK(rg.getReadResult() == result::OK);
|
||||
localSet.localPoolVarUint8.value = 5;
|
||||
localSet.localPoolUint16Vec.value[0] = 1;
|
||||
localSet.localPoolUint16Vec.value[1] = 2;
|
||||
localSet.localPoolUint16Vec.value[2] = 3;
|
||||
CHECK(sharedSet.commit() == retval::CATCH_OK);
|
||||
CHECK(sharedSet.commit() == result::OK);
|
||||
}
|
||||
|
||||
sharedSet.setReadCommitProtectionBehaviour(true);
|
||||
|
@ -22,8 +22,8 @@ TEST_CASE("Local Pool Manager Tests", "[LocManTest]") {
|
||||
auto hkReceiver = HkReceiverMock(hkDest);
|
||||
auto queue = MessageQueueMock(3);
|
||||
LocalPoolOwnerBase poolOwner(queue, objects::TEST_LOCAL_POOL_OWNER_BASE);
|
||||
REQUIRE(poolOwner.initializeHkManager() == retval::CATCH_OK);
|
||||
REQUIRE(poolOwner.initializeHkManagerAfterTaskCreation() == retval::CATCH_OK);
|
||||
REQUIRE(poolOwner.initializeHkManager() == result::OK);
|
||||
REQUIRE(poolOwner.initializeHkManagerAfterTaskCreation() == result::OK);
|
||||
|
||||
MessageQueueMock& poolOwnerMock = poolOwner.getMockQueueHandle();
|
||||
|
||||
@ -46,14 +46,14 @@ TEST_CASE("Local Pool Manager Tests", "[LocManTest]") {
|
||||
CHECK(owner->getObjectId() == objects::TEST_LOCAL_POOL_OWNER_BASE);
|
||||
|
||||
/* Subscribe for message generation on update. */
|
||||
REQUIRE(poolOwner.subscribeWrapperSetUpdate(subscriberId) == retval::CATCH_OK);
|
||||
REQUIRE(poolOwner.subscribeWrapperSetUpdate(subscriberId) == result::OK);
|
||||
/* Subscribe for an update message. */
|
||||
poolOwner.dataset.setChanged(true);
|
||||
/* Now the update message should be generated. */
|
||||
REQUIRE(poolOwner.poolManager.performHkOperation() == retval::CATCH_OK);
|
||||
REQUIRE(poolOwner.poolManager.performHkOperation() == result::OK);
|
||||
REQUIRE(poolOwnerMock.wasMessageSent());
|
||||
|
||||
REQUIRE(poolOwnerMock.getNextSentMessage(subscriberId, messageSent) == retval::CATCH_OK);
|
||||
REQUIRE(poolOwnerMock.getNextSentMessage(subscriberId, messageSent) == result::OK);
|
||||
CHECK(messageSent.getCommand() ==
|
||||
static_cast<int>(HousekeepingMessage::UPDATE_NOTIFICATION_SET));
|
||||
|
||||
@ -62,26 +62,26 @@ TEST_CASE("Local Pool Manager Tests", "[LocManTest]") {
|
||||
poolOwnerMock.clearMessages(true);
|
||||
/* Set changed again, result should be the same. */
|
||||
poolOwner.dataset.setChanged(true);
|
||||
REQUIRE(poolOwner.poolManager.performHkOperation() == retval::CATCH_OK);
|
||||
REQUIRE(poolOwner.poolManager.performHkOperation() == result::OK);
|
||||
|
||||
REQUIRE(poolOwnerMock.wasMessageSent() == true);
|
||||
CHECK(poolOwnerMock.numberOfSentMessages() == 1);
|
||||
REQUIRE(poolOwnerMock.getNextSentMessage(subscriberId, messageSent) == retval::CATCH_OK);
|
||||
REQUIRE(poolOwnerMock.getNextSentMessage(subscriberId, messageSent) == result::OK);
|
||||
CHECK(messageSent.getCommand() ==
|
||||
static_cast<int>(HousekeepingMessage::UPDATE_NOTIFICATION_SET));
|
||||
|
||||
poolOwnerMock.clearMessages(true);
|
||||
/* Now subscribe for set update HK as well. */
|
||||
REQUIRE(poolOwner.subscribeWrapperSetUpdateHk(false, &hkReceiver) == retval::CATCH_OK);
|
||||
REQUIRE(poolOwner.subscribeWrapperSetUpdateHk(false, &hkReceiver) == result::OK);
|
||||
poolOwner.dataset.setChanged(true);
|
||||
REQUIRE(poolOwner.poolManager.performHkOperation() == retval::CATCH_OK);
|
||||
REQUIRE(poolOwner.poolManager.performHkOperation() == result::OK);
|
||||
REQUIRE(poolOwnerMock.wasMessageSent() == true);
|
||||
CHECK(poolOwnerMock.numberOfSentMessages() == 2);
|
||||
// first message sent should be the update notification
|
||||
REQUIRE(poolOwnerMock.getNextSentMessage(subscriberId, messageSent) == retval::CATCH_OK);
|
||||
REQUIRE(poolOwnerMock.getNextSentMessage(subscriberId, messageSent) == result::OK);
|
||||
CHECK(messageSent.getCommand() ==
|
||||
static_cast<int>(HousekeepingMessage::UPDATE_NOTIFICATION_SET));
|
||||
REQUIRE(poolOwnerMock.getNextSentMessage(messageSent) == retval::CATCH_OK);
|
||||
REQUIRE(poolOwnerMock.getNextSentMessage(messageSent) == result::OK);
|
||||
CHECK(messageSent.getCommand() == static_cast<int>(HousekeepingMessage::HK_REPORT));
|
||||
/* Clear message to avoid memory leak, our mock won't do it for us (yet) */
|
||||
CommandMessageCleaner::clearCommandMessage(&messageSent);
|
||||
@ -91,7 +91,7 @@ TEST_CASE("Local Pool Manager Tests", "[LocManTest]") {
|
||||
/* Set the variables in the set to certain values. These are checked later. */
|
||||
{
|
||||
PoolReadGuard readHelper(&poolOwner.dataset);
|
||||
REQUIRE(readHelper.getReadResult() == retval::CATCH_OK);
|
||||
REQUIRE(readHelper.getReadResult() == result::OK);
|
||||
poolOwner.dataset.localPoolVarUint8.value = 5;
|
||||
poolOwner.dataset.localPoolVarFloat.value = -12.242;
|
||||
poolOwner.dataset.localPoolUint16Vec.value[0] = 2;
|
||||
@ -100,7 +100,7 @@ TEST_CASE("Local Pool Manager Tests", "[LocManTest]") {
|
||||
}
|
||||
|
||||
/* Subscribe for snapshot generation on update. */
|
||||
REQUIRE(poolOwner.subscribeWrapperSetUpdateSnapshot(subscriberId) == retval::CATCH_OK);
|
||||
REQUIRE(poolOwner.subscribeWrapperSetUpdateSnapshot(subscriberId) == result::OK);
|
||||
poolOwner.dataset.setChanged(true);
|
||||
|
||||
/* Store current time, we are going to check the (approximate) time equality later */
|
||||
@ -108,10 +108,10 @@ TEST_CASE("Local Pool Manager Tests", "[LocManTest]") {
|
||||
Clock::getClock_timeval(&now);
|
||||
|
||||
/* Trigger generation of snapshot */
|
||||
REQUIRE(poolOwner.poolManager.performHkOperation() == retval::CATCH_OK);
|
||||
REQUIRE(poolOwner.poolManager.performHkOperation() == result::OK);
|
||||
REQUIRE(poolOwnerMock.wasMessageSent());
|
||||
CHECK(poolOwnerMock.numberOfSentMessages() == 1);
|
||||
REQUIRE(poolOwnerMock.getNextSentMessage(subscriberId, messageSent) == retval::CATCH_OK);
|
||||
REQUIRE(poolOwnerMock.getNextSentMessage(subscriberId, messageSent) == result::OK);
|
||||
/* Check that snapshot was generated */
|
||||
CHECK(messageSent.getCommand() == static_cast<int>(HousekeepingMessage::UPDATE_SNAPSHOT_SET));
|
||||
/* Now we deserialize the snapshot into a new dataset instance */
|
||||
@ -121,7 +121,7 @@ TEST_CASE("Local Pool Manager Tests", "[LocManTest]") {
|
||||
store_address_t storeId;
|
||||
HousekeepingMessage::getUpdateSnapshotSetCommand(&messageSent, &storeId);
|
||||
ConstAccessorPair accessorPair = tglob::getIpcStoreHandle()->getData(storeId);
|
||||
REQUIRE(accessorPair.first == retval::CATCH_OK);
|
||||
REQUIRE(accessorPair.first == result::OK);
|
||||
const uint8_t* readOnlyPtr = accessorPair.second.data();
|
||||
size_t sizeToDeserialize = accessorPair.second.size();
|
||||
CHECK(newSet.localPoolVarFloat.value == 0);
|
||||
@ -131,7 +131,7 @@ TEST_CASE("Local Pool Manager Tests", "[LocManTest]") {
|
||||
CHECK(newSet.localPoolUint16Vec.value[2] == 0);
|
||||
/* Fill the dataset and timestamp */
|
||||
REQUIRE(snapshot.deSerialize(&readOnlyPtr, &sizeToDeserialize,
|
||||
SerializeIF::Endianness::MACHINE) == retval::CATCH_OK);
|
||||
SerializeIF::Endianness::MACHINE) == result::OK);
|
||||
/* Now we check that the snapshot is actually correct */
|
||||
CHECK(newSet.localPoolVarFloat.value == Catch::Approx(-12.242));
|
||||
CHECK(newSet.localPoolVarUint8 == 5);
|
||||
@ -154,14 +154,14 @@ TEST_CASE("Local Pool Manager Tests", "[LocManTest]") {
|
||||
|
||||
/* Subscribe for variable snapshot */
|
||||
REQUIRE(poolOwner.subscribeWrapperVariableSnapshot(subscriberId, lpool::uint8VarId) ==
|
||||
retval::CATCH_OK);
|
||||
result::OK);
|
||||
auto poolVar =
|
||||
dynamic_cast<lp_var_t<uint8_t>*>(poolOwner.getPoolObjectHandle(lpool::uint8VarId));
|
||||
REQUIRE(poolVar != nullptr);
|
||||
|
||||
{
|
||||
PoolReadGuard rg(poolVar);
|
||||
CHECK(rg.getReadResult() == retval::CATCH_OK);
|
||||
CHECK(rg.getReadResult() == result::OK);
|
||||
poolVar->value = 25;
|
||||
}
|
||||
|
||||
@ -170,7 +170,7 @@ TEST_CASE("Local Pool Manager Tests", "[LocManTest]") {
|
||||
CCSDSTime::CDS_short timeCdsNow{};
|
||||
timeval now{};
|
||||
Clock::getClock_timeval(&now);
|
||||
REQUIRE(poolOwner.poolManager.performHkOperation() == retval::CATCH_OK);
|
||||
REQUIRE(poolOwner.poolManager.performHkOperation() == result::OK);
|
||||
|
||||
/* Check update snapshot was sent. */
|
||||
REQUIRE(poolOwnerMock.wasMessageSent());
|
||||
@ -178,7 +178,7 @@ TEST_CASE("Local Pool Manager Tests", "[LocManTest]") {
|
||||
|
||||
/* Should have been reset. */
|
||||
CHECK(poolVar->hasChanged() == false);
|
||||
REQUIRE(poolOwnerMock.getNextSentMessage(subscriberId, messageSent) == retval::CATCH_OK);
|
||||
REQUIRE(poolOwnerMock.getNextSentMessage(subscriberId, messageSent) == result::OK);
|
||||
CHECK(messageSent.getCommand() ==
|
||||
static_cast<int>(HousekeepingMessage::UPDATE_SNAPSHOT_VARIABLE));
|
||||
/* Now we deserialize the snapshot into a new dataset instance */
|
||||
@ -188,13 +188,13 @@ TEST_CASE("Local Pool Manager Tests", "[LocManTest]") {
|
||||
store_address_t storeId;
|
||||
HousekeepingMessage::getUpdateSnapshotVariableCommand(&messageSent, &storeId);
|
||||
ConstAccessorPair accessorPair = tglob::getIpcStoreHandle()->getData(storeId);
|
||||
REQUIRE(accessorPair.first == retval::CATCH_OK);
|
||||
REQUIRE(accessorPair.first == result::OK);
|
||||
const uint8_t* readOnlyPtr = accessorPair.second.data();
|
||||
size_t sizeToDeserialize = accessorPair.second.size();
|
||||
CHECK(varCopy.value == 0);
|
||||
/* Fill the dataset and timestamp */
|
||||
REQUIRE(snapshot.deSerialize(&readOnlyPtr, &sizeToDeserialize,
|
||||
SerializeIF::Endianness::MACHINE) == retval::CATCH_OK);
|
||||
SerializeIF::Endianness::MACHINE) == result::OK);
|
||||
CHECK(varCopy.value == 25);
|
||||
|
||||
/* Now we check that both times are equal */
|
||||
@ -212,30 +212,30 @@ TEST_CASE("Local Pool Manager Tests", "[LocManTest]") {
|
||||
|
||||
/* Subscribe for variable update */
|
||||
REQUIRE(poolOwner.subscribeWrapperVariableUpdate(subscriberId, lpool::uint8VarId) ==
|
||||
retval::CATCH_OK);
|
||||
result::OK);
|
||||
auto* poolVar =
|
||||
dynamic_cast<lp_var_t<uint8_t>*>(poolOwner.getPoolObjectHandle(lpool::uint8VarId));
|
||||
REQUIRE(poolVar != nullptr);
|
||||
poolVar->setChanged(true);
|
||||
REQUIRE(poolVar->hasChanged() == true);
|
||||
REQUIRE(poolOwner.poolManager.performHkOperation() == retval::CATCH_OK);
|
||||
REQUIRE(poolOwner.poolManager.performHkOperation() == result::OK);
|
||||
|
||||
/* Check update notification was sent. */
|
||||
REQUIRE(poolOwnerMock.wasMessageSent());
|
||||
CHECK(poolOwnerMock.numberOfSentMessages() == 1);
|
||||
/* Should have been reset. */
|
||||
CHECK(poolVar->hasChanged() == false);
|
||||
REQUIRE(poolOwnerMock.getNextSentMessage(subscriberId, messageSent) == retval::CATCH_OK);
|
||||
REQUIRE(poolOwnerMock.getNextSentMessage(subscriberId, messageSent) == result::OK);
|
||||
CHECK(messageSent.getCommand() ==
|
||||
static_cast<int>(HousekeepingMessage::UPDATE_NOTIFICATION_VARIABLE));
|
||||
/* Now subscribe for the dataset update (HK and update) again with subscription interface */
|
||||
REQUIRE(subscriptionIF->subscribeForSetUpdateMessage(lpool::testSetId, objects::NO_OBJECT,
|
||||
subscriberId, false) == retval::CATCH_OK);
|
||||
REQUIRE(poolOwner.subscribeWrapperSetUpdateHk(false, &hkReceiver) == retval::CATCH_OK);
|
||||
subscriberId, false) == result::OK);
|
||||
REQUIRE(poolOwner.subscribeWrapperSetUpdateHk(false, &hkReceiver) == result::OK);
|
||||
|
||||
poolOwner.dataset.setChanged(true);
|
||||
poolOwnerMock.clearMessages();
|
||||
REQUIRE(poolOwner.poolManager.performHkOperation() == retval::CATCH_OK);
|
||||
REQUIRE(poolOwner.poolManager.performHkOperation() == result::OK);
|
||||
/* Now two messages should be sent. */
|
||||
REQUIRE(poolOwnerMock.wasMessageSent());
|
||||
CHECK(poolOwnerMock.numberOfSentMessages() == 2);
|
||||
@ -244,13 +244,13 @@ TEST_CASE("Local Pool Manager Tests", "[LocManTest]") {
|
||||
poolOwner.dataset.setChanged(true);
|
||||
poolOwnerMock.clearMessages(true);
|
||||
poolVar->setChanged(true);
|
||||
REQUIRE(poolOwner.poolManager.performHkOperation() == retval::CATCH_OK);
|
||||
REQUIRE(poolOwner.poolManager.performHkOperation() == result::OK);
|
||||
/* Now three messages should be sent. */
|
||||
REQUIRE(poolOwnerMock.wasMessageSent());
|
||||
CHECK(poolOwnerMock.numberOfSentMessages() == 3);
|
||||
CHECK(poolOwnerMock.numberOfSentMessagesToDest(subscriberId) == 2);
|
||||
CHECK(poolOwnerMock.numberOfSentMessagesToDest(hkDest) == 1);
|
||||
REQUIRE(poolOwnerMock.getNextSentMessage(subscriberId, messageSent) == retval::CATCH_OK);
|
||||
REQUIRE(poolOwnerMock.getNextSentMessage(subscriberId, messageSent) == result::OK);
|
||||
CHECK(messageSent.getCommand() ==
|
||||
static_cast<int>(HousekeepingMessage::UPDATE_NOTIFICATION_VARIABLE));
|
||||
REQUIRE(poolOwnerMock.clearLastSentMessage(subscriberId) == HasReturnvaluesIF::RETURN_OK);
|
||||
@ -259,7 +259,7 @@ TEST_CASE("Local Pool Manager Tests", "[LocManTest]") {
|
||||
CHECK(messageSent.getCommand() ==
|
||||
static_cast<int>(HousekeepingMessage::UPDATE_NOTIFICATION_SET));
|
||||
REQUIRE(poolOwnerMock.clearLastSentMessage(subscriberId) == HasReturnvaluesIF::RETURN_OK);
|
||||
REQUIRE(poolOwnerMock.getNextSentMessage(messageSent) == retval::CATCH_OK);
|
||||
REQUIRE(poolOwnerMock.getNextSentMessage(messageSent) == result::OK);
|
||||
CHECK(messageSent.getCommand() == static_cast<int>(HousekeepingMessage::HK_REPORT));
|
||||
REQUIRE(poolOwnerMock.clearLastSentMessage() == HasReturnvaluesIF::RETURN_OK);
|
||||
REQUIRE(poolOwnerMock.getNextSentMessage(subscriberId, messageSent) == MessageQueueIF::EMPTY);
|
||||
@ -271,62 +271,62 @@ TEST_CASE("Local Pool Manager Tests", "[LocManTest]") {
|
||||
the temporal behaviour correctly the HK manager should generate a HK packet
|
||||
immediately and the periodic helper depends on HK op function calls anyway instead of
|
||||
using the clock, so we could also just call performHkOperation multiple times */
|
||||
REQUIRE(poolOwner.subscribePeriodicHk(true) == retval::CATCH_OK);
|
||||
REQUIRE(poolOwner.poolManager.performHkOperation() == retval::CATCH_OK);
|
||||
REQUIRE(poolOwner.subscribePeriodicHk(true) == result::OK);
|
||||
REQUIRE(poolOwner.poolManager.performHkOperation() == result::OK);
|
||||
/* Now HK packet should be sent as message immediately. */
|
||||
REQUIRE(poolOwnerMock.wasMessageSent());
|
||||
CHECK(poolOwnerMock.numberOfSentMessages() == 1);
|
||||
CHECK(poolOwnerMock.clearLastSentMessage() == retval::CATCH_OK);
|
||||
CHECK(poolOwnerMock.clearLastSentMessage() == result::OK);
|
||||
|
||||
LocalPoolDataSetBase* setHandle = poolOwner.getDataSetHandle(lpool::testSid);
|
||||
REQUIRE(setHandle != nullptr);
|
||||
CHECK(poolOwner.poolManager.generateHousekeepingPacket(lpool::testSid, setHandle, false) ==
|
||||
retval::CATCH_OK);
|
||||
result::OK);
|
||||
REQUIRE(poolOwnerMock.wasMessageSent());
|
||||
CHECK(poolOwnerMock.numberOfSentMessages() == 1);
|
||||
CHECK(poolOwnerMock.clearLastSentMessage() == retval::CATCH_OK);
|
||||
CHECK(poolOwnerMock.clearLastSentMessage() == result::OK);
|
||||
|
||||
CHECK(setHandle->getReportingEnabled() == true);
|
||||
CommandMessage hkCmd;
|
||||
HousekeepingMessage::setToggleReportingCommand(&hkCmd, lpool::testSid, false, false);
|
||||
CHECK(poolOwner.poolManager.handleHousekeepingMessage(&hkCmd) == retval::CATCH_OK);
|
||||
CHECK(poolOwner.poolManager.handleHousekeepingMessage(&hkCmd) == result::OK);
|
||||
CHECK(setHandle->getReportingEnabled() == false);
|
||||
REQUIRE(poolOwnerMock.wasMessageSent());
|
||||
CHECK(poolOwnerMock.numberOfSentMessages() == 1);
|
||||
CHECK(poolOwnerMock.clearLastSentMessage() == retval::CATCH_OK);
|
||||
CHECK(poolOwnerMock.clearLastSentMessage() == result::OK);
|
||||
|
||||
HousekeepingMessage::setToggleReportingCommand(&hkCmd, lpool::testSid, true, false);
|
||||
CHECK(poolOwner.poolManager.handleHousekeepingMessage(&hkCmd) == retval::CATCH_OK);
|
||||
CHECK(poolOwner.poolManager.handleHousekeepingMessage(&hkCmd) == result::OK);
|
||||
CHECK(setHandle->getReportingEnabled() == true);
|
||||
REQUIRE(poolOwnerMock.wasMessageSent());
|
||||
CHECK(poolOwnerMock.clearLastSentMessage() == retval::CATCH_OK);
|
||||
CHECK(poolOwnerMock.clearLastSentMessage() == result::OK);
|
||||
|
||||
HousekeepingMessage::setToggleReportingCommand(&hkCmd, lpool::testSid, false, false);
|
||||
CHECK(poolOwner.poolManager.handleHousekeepingMessage(&hkCmd) == retval::CATCH_OK);
|
||||
CHECK(poolOwner.poolManager.handleHousekeepingMessage(&hkCmd) == result::OK);
|
||||
CHECK(setHandle->getReportingEnabled() == false);
|
||||
REQUIRE(poolOwnerMock.wasMessageSent());
|
||||
CHECK(poolOwnerMock.clearLastSentMessage() == retval::CATCH_OK);
|
||||
CHECK(poolOwnerMock.clearLastSentMessage() == result::OK);
|
||||
|
||||
HousekeepingMessage::setCollectionIntervalModificationCommand(&hkCmd, lpool::testSid, 0.4,
|
||||
false);
|
||||
CHECK(poolOwner.poolManager.handleHousekeepingMessage(&hkCmd) == retval::CATCH_OK);
|
||||
CHECK(poolOwner.poolManager.handleHousekeepingMessage(&hkCmd) == result::OK);
|
||||
/* For non-diagnostics and a specified minimum frequency of 0.2 seconds, the
|
||||
resulting collection interval should be 1.0 second */
|
||||
CHECK(poolOwner.dataset.getCollectionInterval() == 1.0);
|
||||
REQUIRE(poolOwnerMock.wasMessageSent());
|
||||
REQUIRE(poolOwnerMock.numberOfSentMessages() == 1);
|
||||
CHECK(poolOwnerMock.clearLastSentMessage() == retval::CATCH_OK);
|
||||
CHECK(poolOwnerMock.clearLastSentMessage() == result::OK);
|
||||
|
||||
HousekeepingMessage::setStructureReportingCommand(&hkCmd, lpool::testSid, false);
|
||||
REQUIRE(poolOwner.poolManager.performHkOperation() == retval::CATCH_OK);
|
||||
CHECK(poolOwner.poolManager.handleHousekeepingMessage(&hkCmd) == retval::CATCH_OK);
|
||||
REQUIRE(poolOwner.poolManager.performHkOperation() == result::OK);
|
||||
CHECK(poolOwner.poolManager.handleHousekeepingMessage(&hkCmd) == result::OK);
|
||||
/* Now HK packet should be sent as message. */
|
||||
REQUIRE(poolOwnerMock.wasMessageSent());
|
||||
REQUIRE(poolOwnerMock.numberOfSentMessages() == 1);
|
||||
poolOwnerMock.clearMessages();
|
||||
|
||||
HousekeepingMessage::setOneShotReportCommand(&hkCmd, lpool::testSid, false);
|
||||
CHECK(poolOwner.poolManager.handleHousekeepingMessage(&hkCmd) == retval::CATCH_OK);
|
||||
CHECK(poolOwner.poolManager.handleHousekeepingMessage(&hkCmd) == result::OK);
|
||||
REQUIRE(poolOwnerMock.wasMessageSent());
|
||||
REQUIRE(poolOwnerMock.numberOfSentMessages() == 1);
|
||||
poolOwnerMock.clearMessages();
|
||||
@ -334,7 +334,7 @@ TEST_CASE("Local Pool Manager Tests", "[LocManTest]") {
|
||||
HousekeepingMessage::setUpdateNotificationSetCommand(&hkCmd, lpool::testSid);
|
||||
sid_t sidToCheck;
|
||||
store_address_t storeId;
|
||||
CHECK(poolOwner.poolManager.handleHousekeepingMessage(&hkCmd) == retval::CATCH_OK);
|
||||
CHECK(poolOwner.poolManager.handleHousekeepingMessage(&hkCmd) == result::OK);
|
||||
CHECK(poolOwner.changedDataSetCallbackWasCalled(sidToCheck, storeId) == true);
|
||||
CHECK(sidToCheck == lpool::testSid);
|
||||
|
||||
@ -347,7 +347,7 @@ TEST_CASE("Local Pool Manager Tests", "[LocManTest]") {
|
||||
/* We still expect a failure message being sent */
|
||||
REQUIRE(poolOwnerMock.wasMessageSent());
|
||||
REQUIRE(poolOwnerMock.numberOfSentMessages() == 1);
|
||||
CHECK(poolOwnerMock.clearLastSentMessage() == retval::CATCH_OK);
|
||||
CHECK(poolOwnerMock.clearLastSentMessage() == result::OK);
|
||||
|
||||
HousekeepingMessage::setCollectionIntervalModificationCommand(&hkCmd, lpool::testSid, 0.4,
|
||||
false);
|
||||
@ -355,36 +355,36 @@ TEST_CASE("Local Pool Manager Tests", "[LocManTest]") {
|
||||
static_cast<int>(LocalDataPoolManager::WRONG_HK_PACKET_TYPE));
|
||||
REQUIRE(poolOwnerMock.wasMessageSent());
|
||||
REQUIRE(poolOwnerMock.numberOfSentMessages() == 1);
|
||||
CHECK(poolOwnerMock.clearLastSentMessage() == retval::CATCH_OK);
|
||||
CHECK(poolOwnerMock.clearLastSentMessage() == result::OK);
|
||||
|
||||
HousekeepingMessage::setStructureReportingCommand(&hkCmd, lpool::testSid, false);
|
||||
CHECK(poolOwner.poolManager.handleHousekeepingMessage(&hkCmd) ==
|
||||
static_cast<int>(LocalDataPoolManager::WRONG_HK_PACKET_TYPE));
|
||||
REQUIRE(poolOwnerMock.wasMessageSent());
|
||||
REQUIRE(poolOwnerMock.numberOfSentMessages() == 1);
|
||||
CHECK(poolOwnerMock.clearLastSentMessage() == retval::CATCH_OK);
|
||||
CHECK(poolOwnerMock.clearLastSentMessage() == result::OK);
|
||||
|
||||
HousekeepingMessage::setStructureReportingCommand(&hkCmd, lpool::testSid, true);
|
||||
CHECK(poolOwner.poolManager.handleHousekeepingMessage(&hkCmd) == retval::CATCH_OK);
|
||||
CHECK(poolOwner.poolManager.handleHousekeepingMessage(&hkCmd) == result::OK);
|
||||
REQUIRE(poolOwnerMock.wasMessageSent());
|
||||
REQUIRE(poolOwnerMock.numberOfSentMessages() == 1);
|
||||
poolOwnerMock.clearMessages();
|
||||
|
||||
HousekeepingMessage::setCollectionIntervalModificationCommand(&hkCmd, lpool::testSid, 0.4,
|
||||
true);
|
||||
CHECK(poolOwner.poolManager.handleHousekeepingMessage(&hkCmd) == retval::CATCH_OK);
|
||||
CHECK(poolOwner.poolManager.handleHousekeepingMessage(&hkCmd) == result::OK);
|
||||
REQUIRE(poolOwnerMock.wasMessageSent());
|
||||
REQUIRE(poolOwnerMock.numberOfSentMessages() == 1);
|
||||
poolOwnerMock.clearMessages();
|
||||
|
||||
HousekeepingMessage::setToggleReportingCommand(&hkCmd, lpool::testSid, true, true);
|
||||
CHECK(poolOwner.poolManager.handleHousekeepingMessage(&hkCmd) == retval::CATCH_OK);
|
||||
CHECK(poolOwner.poolManager.handleHousekeepingMessage(&hkCmd) == result::OK);
|
||||
REQUIRE(poolOwnerMock.wasMessageSent());
|
||||
REQUIRE(poolOwnerMock.numberOfSentMessages() == 1);
|
||||
poolOwnerMock.clearMessages();
|
||||
|
||||
HousekeepingMessage::setToggleReportingCommand(&hkCmd, lpool::testSid, false, true);
|
||||
CHECK(poolOwner.poolManager.handleHousekeepingMessage(&hkCmd) == retval::CATCH_OK);
|
||||
CHECK(poolOwner.poolManager.handleHousekeepingMessage(&hkCmd) == result::OK);
|
||||
REQUIRE(poolOwnerMock.wasMessageSent());
|
||||
REQUIRE(poolOwnerMock.numberOfSentMessages() == 1);
|
||||
poolOwnerMock.clearMessages();
|
||||
@ -397,26 +397,26 @@ TEST_CASE("Local Pool Manager Tests", "[LocManTest]") {
|
||||
poolOwnerMock.clearMessages();
|
||||
|
||||
HousekeepingMessage::setOneShotReportCommand(&hkCmd, lpool::testSid, true);
|
||||
CHECK(poolOwner.poolManager.handleHousekeepingMessage(&hkCmd) == retval::CATCH_OK);
|
||||
CHECK(poolOwner.poolManager.handleHousekeepingMessage(&hkCmd) == result::OK);
|
||||
REQUIRE(poolOwnerMock.wasMessageSent());
|
||||
REQUIRE(poolOwnerMock.numberOfSentMessages() == 1);
|
||||
poolOwnerMock.clearMessages();
|
||||
|
||||
HousekeepingMessage::setUpdateNotificationVariableCommand(&hkCmd, lpool::uint8VarGpid);
|
||||
gp_id_t gpidToCheck;
|
||||
CHECK(poolOwner.poolManager.handleHousekeepingMessage(&hkCmd) == retval::CATCH_OK);
|
||||
CHECK(poolOwner.poolManager.handleHousekeepingMessage(&hkCmd) == result::OK);
|
||||
CHECK(poolOwner.changedVariableCallbackWasCalled(gpidToCheck, storeId) == true);
|
||||
CHECK(gpidToCheck == lpool::uint8VarGpid);
|
||||
|
||||
HousekeepingMessage::setUpdateSnapshotSetCommand(&hkCmd, lpool::testSid,
|
||||
store_address_t::invalid());
|
||||
CHECK(poolOwner.poolManager.handleHousekeepingMessage(&hkCmd) == retval::CATCH_OK);
|
||||
CHECK(poolOwner.poolManager.handleHousekeepingMessage(&hkCmd) == result::OK);
|
||||
CHECK(poolOwner.changedDataSetCallbackWasCalled(sidToCheck, storeId) == true);
|
||||
CHECK(sidToCheck == lpool::testSid);
|
||||
|
||||
HousekeepingMessage::setUpdateSnapshotVariableCommand(&hkCmd, lpool::uint8VarGpid,
|
||||
store_address_t::invalid());
|
||||
CHECK(poolOwner.poolManager.handleHousekeepingMessage(&hkCmd) == retval::CATCH_OK);
|
||||
CHECK(poolOwner.poolManager.handleHousekeepingMessage(&hkCmd) == result::OK);
|
||||
CHECK(poolOwner.changedVariableCallbackWasCalled(gpidToCheck, storeId) == true);
|
||||
CHECK(gpidToCheck == lpool::uint8VarGpid);
|
||||
|
||||
@ -425,6 +425,6 @@ TEST_CASE("Local Pool Manager Tests", "[LocManTest]") {
|
||||
|
||||
/* we need to reset the subscription list because the pool owner
|
||||
is a global object. */
|
||||
CHECK(poolOwner.reset() == retval::CATCH_OK);
|
||||
CHECK(poolOwner.reset() == result::OK);
|
||||
poolOwnerMock.clearMessages(true);
|
||||
}
|
||||
|
@ -10,23 +10,23 @@
|
||||
TEST_CASE("LocalPoolVariable", "[LocPoolVarTest]") {
|
||||
auto queue = MessageQueueMock(1);
|
||||
LocalPoolOwnerBase poolOwner(queue, objects::TEST_LOCAL_POOL_OWNER_BASE);
|
||||
REQUIRE(poolOwner.initializeHkManager() == retval::CATCH_OK);
|
||||
REQUIRE(poolOwner.initializeHkManagerAfterTaskCreation() == retval::CATCH_OK);
|
||||
REQUIRE(poolOwner.initializeHkManager() == result::OK);
|
||||
REQUIRE(poolOwner.initializeHkManagerAfterTaskCreation() == result::OK);
|
||||
|
||||
SECTION("Basic Tests") {
|
||||
/* very basic test. */
|
||||
lp_var_t<uint8_t> testVariable =
|
||||
lp_var_t<uint8_t>(objects::TEST_LOCAL_POOL_OWNER_BASE, lpool::uint8VarId);
|
||||
REQUIRE(testVariable.read() == retval::CATCH_OK);
|
||||
REQUIRE(testVariable.read() == result::OK);
|
||||
CHECK(testVariable.value == 0);
|
||||
testVariable.value = 5;
|
||||
REQUIRE(testVariable.commit() == retval::CATCH_OK);
|
||||
REQUIRE(testVariable.read() == retval::CATCH_OK);
|
||||
REQUIRE(testVariable.commit() == result::OK);
|
||||
REQUIRE(testVariable.read() == result::OK);
|
||||
REQUIRE(testVariable.value == 5);
|
||||
CHECK(not testVariable.isValid());
|
||||
testVariable.setValid(true);
|
||||
CHECK(testVariable.isValid());
|
||||
CHECK(testVariable.commit(true) == retval::CATCH_OK);
|
||||
CHECK(testVariable.commit(true) == result::OK);
|
||||
|
||||
testVariable.setReadWriteMode(pool_rwm_t::VAR_READ);
|
||||
CHECK(testVariable.getReadWriteMode() == pool_rwm_t::VAR_READ);
|
||||
@ -42,7 +42,7 @@ TEST_CASE("LocalPoolVariable", "[LocPoolVarTest]") {
|
||||
|
||||
gp_id_t globPoolId(objects::TEST_LOCAL_POOL_OWNER_BASE, lpool::uint8VarId);
|
||||
lp_var_t<uint8_t> testVariable2 = lp_var_t<uint8_t>(globPoolId);
|
||||
REQUIRE(testVariable2.read() == retval::CATCH_OK);
|
||||
REQUIRE(testVariable2.read() == result::OK);
|
||||
CHECK(testVariable2 == 5);
|
||||
CHECK(testVariable == testVariable2);
|
||||
testVariable = 10;
|
||||
@ -54,12 +54,12 @@ TEST_CASE("LocalPoolVariable", "[LocPoolVarTest]") {
|
||||
CHECK(maxSize == 1);
|
||||
size_t serSize = 0;
|
||||
CHECK(testVariable.serialize(&varPtr, &serSize, maxSize, SerializeIF::Endianness::MACHINE) ==
|
||||
retval::CATCH_OK);
|
||||
result::OK);
|
||||
CHECK(variableRaw == 10);
|
||||
const uint8_t* varConstPtr = &variableRaw;
|
||||
testVariable = 5;
|
||||
CHECK(testVariable.deSerialize(&varConstPtr, &serSize, SerializeIF::Endianness::MACHINE) ==
|
||||
retval::CATCH_OK);
|
||||
result::OK);
|
||||
CHECK(testVariable == 10);
|
||||
CHECK(testVariable != testVariable2);
|
||||
CHECK(testVariable2 < testVariable);
|
||||
|
@ -10,26 +10,26 @@
|
||||
TEST_CASE("LocalPoolVector", "[LocPoolVecTest]") {
|
||||
auto queue = MessageQueueMock(1);
|
||||
LocalPoolOwnerBase poolOwner(queue, objects::TEST_LOCAL_POOL_OWNER_BASE);
|
||||
REQUIRE(poolOwner.initializeHkManager() == retval::CATCH_OK);
|
||||
REQUIRE(poolOwner.initializeHkManagerAfterTaskCreation() == retval::CATCH_OK);
|
||||
REQUIRE(poolOwner.initializeHkManager() == result::OK);
|
||||
REQUIRE(poolOwner.initializeHkManagerAfterTaskCreation() == result::OK);
|
||||
|
||||
SECTION("BasicTest") {
|
||||
// very basic test.
|
||||
lp_vec_t<uint16_t, 3> testVector =
|
||||
lp_vec_t<uint16_t, 3>(objects::TEST_LOCAL_POOL_OWNER_BASE, lpool::uint16Vec3Id);
|
||||
REQUIRE(testVector.read() == retval::CATCH_OK);
|
||||
REQUIRE(testVector.read() == result::OK);
|
||||
testVector.value[0] = 5;
|
||||
testVector.value[1] = 232;
|
||||
testVector.value[2] = 32023;
|
||||
|
||||
REQUIRE(testVector.commit(true) == retval::CATCH_OK);
|
||||
REQUIRE(testVector.commit(true) == result::OK);
|
||||
CHECK(testVector.isValid());
|
||||
|
||||
testVector.value[0] = 0;
|
||||
testVector.value[1] = 0;
|
||||
testVector.value[2] = 0;
|
||||
|
||||
CHECK(testVector.read() == retval::CATCH_OK);
|
||||
CHECK(testVector.read() == result::OK);
|
||||
CHECK(testVector.value[0] == 5);
|
||||
CHECK(testVector.value[1] == 232);
|
||||
CHECK(testVector.value[2] == 32023);
|
||||
@ -40,7 +40,7 @@ TEST_CASE("LocalPoolVector", "[LocPoolVecTest]") {
|
||||
(we can't throw exceptions) */
|
||||
testVector[4] = 12;
|
||||
CHECK(testVector[2] == 12);
|
||||
CHECK(testVector.commit() == retval::CATCH_OK);
|
||||
CHECK(testVector.commit() == result::OK);
|
||||
|
||||
/* Use read-only reference. */
|
||||
const lp_vec_t<uint16_t, 3>& roTestVec = testVector;
|
||||
@ -57,7 +57,7 @@ TEST_CASE("LocalPoolVector", "[LocPoolVecTest]") {
|
||||
uint8_t* vecPtr = reinterpret_cast<uint8_t*>(serializedVector);
|
||||
size_t serSize = 0;
|
||||
REQUIRE(testVector.serialize(&vecPtr, &serSize, maxSize, SerializeIF::Endianness::MACHINE) ==
|
||||
retval::CATCH_OK);
|
||||
result::OK);
|
||||
|
||||
CHECK(serSize == 6);
|
||||
CHECK(serializedVector[0] == 5);
|
||||
@ -74,7 +74,7 @@ TEST_CASE("LocalPoolVector", "[LocPoolVecTest]") {
|
||||
|
||||
const uint8_t* constVecPtr = reinterpret_cast<const uint8_t*>(serializedVector);
|
||||
REQUIRE(testVector.deSerialize(&constVecPtr, &serSize, SerializeIF::Endianness::MACHINE) ==
|
||||
retval::CATCH_OK);
|
||||
result::OK);
|
||||
CHECK(testVector[0] == 16);
|
||||
CHECK(testVector[1] == 7832);
|
||||
CHECK(testVector[2] == 39232);
|
||||
|
@ -25,10 +25,10 @@ TEST_CASE("Local Pool Simple Tests [1 Pool]", "[TestPool]") {
|
||||
SECTION("Basic tests") {
|
||||
REQUIRE(not simplePool.hasDataAtId(testStoreId));
|
||||
result = simplePool.addData(&testStoreId, testDataArray.data(), size);
|
||||
REQUIRE(result == retval::CATCH_OK);
|
||||
REQUIRE(result == result::OK);
|
||||
REQUIRE(simplePool.hasDataAtId(testStoreId));
|
||||
result = simplePool.getData(testStoreId, &constPointer, &size);
|
||||
REQUIRE(result == retval::CATCH_OK);
|
||||
REQUIRE(result == result::OK);
|
||||
memcpy(receptionArray.data(), constPointer, size);
|
||||
for (size_t i = 0; i < size; i++) {
|
||||
CHECK(receptionArray[i] == i);
|
||||
@ -36,12 +36,12 @@ TEST_CASE("Local Pool Simple Tests [1 Pool]", "[TestPool]") {
|
||||
memset(receptionArray.data(), 0, size);
|
||||
result = simplePool.modifyData(testStoreId, &pointer, &size);
|
||||
memcpy(receptionArray.data(), pointer, size);
|
||||
REQUIRE(result == retval::CATCH_OK);
|
||||
REQUIRE(result == result::OK);
|
||||
for (size_t i = 0; i < size; i++) {
|
||||
CHECK(receptionArray[i] == i);
|
||||
}
|
||||
result = simplePool.deleteData(testStoreId);
|
||||
REQUIRE(result == retval::CATCH_OK);
|
||||
REQUIRE(result == result::OK);
|
||||
REQUIRE(not simplePool.hasDataAtId(testStoreId));
|
||||
result = simplePool.addData(&testStoreId, testDataArray.data(), 15);
|
||||
CHECK(result == (int)StorageManagerIF::DATA_TOO_LARGE);
|
||||
@ -50,12 +50,12 @@ TEST_CASE("Local Pool Simple Tests [1 Pool]", "[TestPool]") {
|
||||
SECTION("Reservation Tests ") {
|
||||
pointer = nullptr;
|
||||
result = simplePool.getFreeElement(&testStoreId, size, &pointer);
|
||||
REQUIRE(result == retval::CATCH_OK);
|
||||
REQUIRE(result == result::OK);
|
||||
memcpy(pointer, testDataArray.data(), size);
|
||||
constPointer = nullptr;
|
||||
result = simplePool.getData(testStoreId, &constPointer, &size);
|
||||
|
||||
REQUIRE(result == retval::CATCH_OK);
|
||||
REQUIRE(result == result::OK);
|
||||
memcpy(receptionArray.data(), constPointer, size);
|
||||
for (size_t i = 0; i < size; i++) {
|
||||
CHECK(receptionArray[i] == i);
|
||||
@ -64,21 +64,21 @@ TEST_CASE("Local Pool Simple Tests [1 Pool]", "[TestPool]") {
|
||||
|
||||
SECTION("Add, delete, add, add when full") {
|
||||
result = simplePool.addData(&testStoreId, testDataArray.data(), size);
|
||||
REQUIRE(result == retval::CATCH_OK);
|
||||
REQUIRE(result == result::OK);
|
||||
result = simplePool.getData(testStoreId, &constPointer, &size);
|
||||
REQUIRE(result == retval::CATCH_OK);
|
||||
REQUIRE(result == result::OK);
|
||||
memcpy(receptionArray.data(), constPointer, size);
|
||||
for (size_t i = 0; i < size; i++) {
|
||||
CHECK(receptionArray[i] == i);
|
||||
}
|
||||
|
||||
result = simplePool.deleteData(testStoreId);
|
||||
REQUIRE(result == retval::CATCH_OK);
|
||||
REQUIRE(result == result::OK);
|
||||
|
||||
result = simplePool.addData(&testStoreId, testDataArray.data(), size);
|
||||
REQUIRE(result == retval::CATCH_OK);
|
||||
REQUIRE(result == result::OK);
|
||||
result = simplePool.getData(testStoreId, &constPointer, &size);
|
||||
REQUIRE(result == retval::CATCH_OK);
|
||||
REQUIRE(result == result::OK);
|
||||
memcpy(receptionArray.data(), constPointer, size);
|
||||
for (size_t i = 0; i < size; i++) {
|
||||
CHECK(receptionArray[i] == i);
|
||||
@ -105,20 +105,20 @@ TEST_CASE("Local Pool Simple Tests [1 Pool]", "[TestPool]") {
|
||||
|
||||
SECTION("Initialize and clear store, delete with pointer") {
|
||||
result = simplePool.initialize();
|
||||
REQUIRE(result == retval::CATCH_OK);
|
||||
REQUIRE(result == result::OK);
|
||||
result = simplePool.addData(&testStoreId, testDataArray.data(), size);
|
||||
REQUIRE(result == retval::CATCH_OK);
|
||||
REQUIRE(result == result::OK);
|
||||
simplePool.clearStore();
|
||||
result = simplePool.addData(&testStoreId, testDataArray.data(), size);
|
||||
REQUIRE(result == retval::CATCH_OK);
|
||||
REQUIRE(result == result::OK);
|
||||
result = simplePool.modifyData(testStoreId, &pointer, &size);
|
||||
REQUIRE(result == retval::CATCH_OK);
|
||||
REQUIRE(result == result::OK);
|
||||
store_address_t newId;
|
||||
result = simplePool.deleteData(pointer, size, &testStoreId);
|
||||
REQUIRE(result == retval::CATCH_OK);
|
||||
REQUIRE(result == result::OK);
|
||||
REQUIRE(testStoreId.raw != (uint32_t)StorageManagerIF::INVALID_ADDRESS);
|
||||
result = simplePool.addData(&testStoreId, testDataArray.data(), size);
|
||||
REQUIRE(result == retval::CATCH_OK);
|
||||
REQUIRE(result == result::OK);
|
||||
}
|
||||
}
|
||||
|
||||
@ -141,10 +141,10 @@ TEST_CASE("Local Pool Extended Tests [3 Pools]", "[TestPool2]") {
|
||||
runIdx++;
|
||||
|
||||
LocalPool simplePool(0, *config);
|
||||
std::array<uint8_t, 20> testDataArray;
|
||||
std::array<uint8_t, 20> receptionArray;
|
||||
std::array<uint8_t, 20> testDataArray{};
|
||||
std::array<uint8_t, 20> receptionArray{};
|
||||
store_address_t testStoreId;
|
||||
ReturnValue_t result = retval::CATCH_FAILED;
|
||||
ReturnValue_t result = result::FAILED;
|
||||
for (size_t i = 0; i < testDataArray.size(); i++) {
|
||||
testDataArray[i] = i;
|
||||
}
|
||||
@ -153,20 +153,20 @@ TEST_CASE("Local Pool Extended Tests [3 Pools]", "[TestPool2]") {
|
||||
SECTION("Basic tests") {
|
||||
size = 8;
|
||||
result = simplePool.addData(&testStoreId, testDataArray.data(), size);
|
||||
REQUIRE(result == retval::CATCH_OK);
|
||||
REQUIRE(result == result::OK);
|
||||
// Should be on second page of the pool now for 8 bytes
|
||||
CHECK(testStoreId.poolIndex == 1);
|
||||
CHECK(testStoreId.packetIndex == 0);
|
||||
|
||||
size = 15;
|
||||
result = simplePool.addData(&testStoreId, testDataArray.data(), size);
|
||||
REQUIRE(result == retval::CATCH_OK);
|
||||
REQUIRE(result == result::OK);
|
||||
// Should be on third page of the pool now for 15 bytes
|
||||
CHECK(testStoreId.poolIndex == 2);
|
||||
CHECK(testStoreId.packetIndex == 0);
|
||||
|
||||
result = simplePool.addData(&testStoreId, testDataArray.data(), size);
|
||||
REQUIRE(result == retval::CATCH_OK);
|
||||
REQUIRE(result == result::OK);
|
||||
// Should be on third page of the pool now for 15 bytes
|
||||
CHECK(testStoreId.poolIndex == 2);
|
||||
CHECK(testStoreId.packetIndex == 1);
|
||||
@ -177,7 +177,7 @@ TEST_CASE("Local Pool Extended Tests [3 Pools]", "[TestPool2]") {
|
||||
|
||||
size = 8;
|
||||
result = simplePool.addData(&testStoreId, testDataArray.data(), size);
|
||||
REQUIRE(result == retval::CATCH_OK);
|
||||
REQUIRE(result == result::OK);
|
||||
// Should still work
|
||||
CHECK(testStoreId.poolIndex == 1);
|
||||
CHECK(testStoreId.packetIndex == 1);
|
||||
@ -185,7 +185,7 @@ TEST_CASE("Local Pool Extended Tests [3 Pools]", "[TestPool2]") {
|
||||
// fill the rest of the pool
|
||||
for (uint8_t idx = 2; idx < 5; idx++) {
|
||||
result = simplePool.addData(&testStoreId, testDataArray.data(), size);
|
||||
REQUIRE(result == retval::CATCH_OK);
|
||||
REQUIRE(result == result::OK);
|
||||
CHECK(testStoreId.poolIndex == 1);
|
||||
CHECK(testStoreId.packetIndex == idx);
|
||||
}
|
||||
@ -206,21 +206,21 @@ TEST_CASE("Local Pool Extended Tests [3 Pools]", "[TestPool2]") {
|
||||
size = 5;
|
||||
for (uint8_t idx = 0; idx < 10; idx++) {
|
||||
result = simplePool.addData(&testStoreId, testDataArray.data(), size);
|
||||
REQUIRE(result == retval::CATCH_OK);
|
||||
REQUIRE(result == result::OK);
|
||||
CHECK(testStoreId.poolIndex == 0);
|
||||
CHECK(testStoreId.packetIndex == idx);
|
||||
}
|
||||
size = 10;
|
||||
for (uint8_t idx = 0; idx < 5; idx++) {
|
||||
result = simplePool.addData(&testStoreId, testDataArray.data(), size);
|
||||
REQUIRE(result == retval::CATCH_OK);
|
||||
REQUIRE(result == result::OK);
|
||||
CHECK(testStoreId.poolIndex == 1);
|
||||
CHECK(testStoreId.packetIndex == idx);
|
||||
}
|
||||
size = 20;
|
||||
for (uint8_t idx = 0; idx < 2; idx++) {
|
||||
result = simplePool.addData(&testStoreId, testDataArray.data(), size);
|
||||
REQUIRE(result == retval::CATCH_OK);
|
||||
REQUIRE(result == result::OK);
|
||||
CHECK(testStoreId.poolIndex == 2);
|
||||
CHECK(testStoreId.packetIndex == idx);
|
||||
}
|
||||
@ -247,7 +247,7 @@ TEST_CASE("Local Pool Extended Tests [3 Pools]", "[TestPool2]") {
|
||||
size = 5;
|
||||
for (uint8_t idx = 0; idx < 10; idx++) {
|
||||
result = simplePool.addData(&testStoreId, testDataArray.data(), size);
|
||||
REQUIRE(result == retval::CATCH_OK);
|
||||
REQUIRE(result == result::OK);
|
||||
CHECK(testStoreId.poolIndex == 0);
|
||||
CHECK(testStoreId.packetIndex == idx);
|
||||
}
|
||||
@ -264,7 +264,7 @@ TEST_CASE("Local Pool Extended Tests [3 Pools]", "[TestPool2]") {
|
||||
size = 10;
|
||||
for (uint8_t idx = 0; idx < 5; idx++) {
|
||||
result = simplePool.addData(&testStoreId, testDataArray.data(), size);
|
||||
REQUIRE(result == retval::CATCH_OK);
|
||||
REQUIRE(result == result::OK);
|
||||
CHECK(testStoreId.poolIndex == 1);
|
||||
CHECK(testStoreId.packetIndex == idx);
|
||||
}
|
||||
|
@ -18,6 +18,6 @@ TEST_CASE("TM ZC Helper", "[tm-zc-helper]") {
|
||||
|
||||
SECTION("Basic") {
|
||||
REQUIRE(creator.serialize(dataPtr, serLen, buf.size()) == result::OK);
|
||||
PusTmZeroCopyWriter()
|
||||
PusTmZeroCopyWriter(timeStamper, dataPtr, serLen);
|
||||
}
|
||||
}
|
26
26