Merge remote-tracking branch 'upstream/master' into mueller_TmTcBridge_cherryPicked
This commit is contained in:
commit
887f8331a2
@ -49,7 +49,7 @@ void ActionHelper::setQueueToUse(MessageQueueIF* queue) {
|
||||
void ActionHelper::prepareExecution(MessageQueueId_t commandedBy, ActionId_t actionId,
|
||||
store_address_t dataAddress) {
|
||||
const uint8_t* dataPtr = NULL;
|
||||
uint32_t size = 0;
|
||||
size_t size = 0;
|
||||
ReturnValue_t result = ipcStore->getData(dataAddress, &dataPtr, &size);
|
||||
if (result != HasReturnvaluesIF::RETURN_OK) {
|
||||
CommandMessage reply;
|
||||
|
@ -113,7 +113,7 @@ uint8_t CommandActionHelper::getCommandCount() const {
|
||||
|
||||
void CommandActionHelper::extractDataForOwner(ActionId_t actionId, store_address_t storeId) {
|
||||
const uint8_t * data = NULL;
|
||||
uint32_t size = 0;
|
||||
size_t size = 0;
|
||||
ReturnValue_t result = ipcStore->getData(storeId, &data, &size);
|
||||
if (result != HasReturnvaluesIF::RETURN_OK) {
|
||||
return;
|
||||
|
@ -44,7 +44,7 @@ void SimpleActionHelper::prepareExecution(MessageQueueId_t commandedBy,
|
||||
queueToUse->sendMessage(commandedBy, &reply);
|
||||
}
|
||||
const uint8_t* dataPtr = NULL;
|
||||
uint32_t size = 0;
|
||||
size_t size = 0;
|
||||
ReturnValue_t result = ipcStore->getData(dataAddress, &dataPtr, &size);
|
||||
if (result != HasReturnvaluesIF::RETURN_OK) {
|
||||
ActionMessage::setStepReply(&reply, actionId, 0, result);
|
||||
|
@ -3,6 +3,11 @@
|
||||
|
||||
#include <framework/returnvalues/HasReturnvaluesIF.h>
|
||||
|
||||
/**
|
||||
* @brief Simple First-In-First-Out data structure
|
||||
* @tparam T Entry Type
|
||||
* @tparam capacity Maximum capacity
|
||||
*/
|
||||
template<typename T, uint8_t capacity>
|
||||
class FIFO {
|
||||
private:
|
||||
@ -54,6 +59,21 @@ public:
|
||||
return HasReturnvaluesIF::RETURN_OK;
|
||||
}
|
||||
}
|
||||
|
||||
ReturnValue_t peek(T * value) {
|
||||
if(empty()) {
|
||||
return EMPTY;
|
||||
} else {
|
||||
*value = data[readIndex];
|
||||
return HasReturnvaluesIF::RETURN_OK;
|
||||
}
|
||||
}
|
||||
|
||||
ReturnValue_t pop() {
|
||||
T value;
|
||||
return this->retrieve(&value);
|
||||
}
|
||||
|
||||
static const uint8_t INTERFACE_ID = CLASS_ID::FIFO_CLASS;
|
||||
static const ReturnValue_t FULL = MAKE_RETURN_CODE(1);
|
||||
static const ReturnValue_t EMPTY = MAKE_RETURN_CODE(2);
|
||||
|
@ -55,7 +55,7 @@ void Clcw::setBitLock(bool bitLock) {
|
||||
}
|
||||
|
||||
void Clcw::print() {
|
||||
debug << "Clcw::print: Clcw is: " << std::hex << getAsWhole() << std::dec << std::endl;
|
||||
sif::debug << "Clcw::print: Clcw is: " << std::hex << getAsWhole() << std::dec << std::endl;
|
||||
}
|
||||
|
||||
void Clcw::setWhole(uint32_t rawClcw) {
|
||||
|
@ -98,8 +98,8 @@ ReturnValue_t DataLinkLayer::processFrame(uint16_t length) {
|
||||
receivedDataLength = length;
|
||||
ReturnValue_t status = allFramesReception();
|
||||
if (status != RETURN_OK) {
|
||||
error << "DataLinkLayer::processFrame: frame reception failed. Error code: " << std::hex
|
||||
<< status << std::dec << std::endl;
|
||||
sif::error << "DataLinkLayer::processFrame: frame reception failed. "
|
||||
"Error code: " << std::hex << status << std::dec << std::endl;
|
||||
// currentFrame.print();
|
||||
return status;
|
||||
} else {
|
||||
@ -124,7 +124,7 @@ ReturnValue_t DataLinkLayer::initialize() {
|
||||
if ( virtualChannels.begin() != virtualChannels.end() ) {
|
||||
clcw->setVirtualChannel( virtualChannels.begin()->second->getChannelId() );
|
||||
} else {
|
||||
error << "DataLinkLayer::initialize: No VC assigned to this DLL instance! " << std::endl;
|
||||
sif::error << "DataLinkLayer::initialize: No VC assigned to this DLL instance! " << std::endl;
|
||||
return RETURN_FAILED;
|
||||
}
|
||||
|
||||
|
@ -36,7 +36,7 @@ ReturnValue_t MapPacketExtraction::extractPackets(TcTransferFrame* frame) {
|
||||
bufferPosition = &packetBuffer[packetLength];
|
||||
status = RETURN_OK;
|
||||
} else {
|
||||
error
|
||||
sif::error
|
||||
<< "MapPacketExtraction::extractPackets. Packet too large! Size: "
|
||||
<< packetLength << std::endl;
|
||||
clearBuffers();
|
||||
@ -58,14 +58,14 @@ ReturnValue_t MapPacketExtraction::extractPackets(TcTransferFrame* frame) {
|
||||
}
|
||||
status = RETURN_OK;
|
||||
} else {
|
||||
error
|
||||
sif::error
|
||||
<< "MapPacketExtraction::extractPackets. Packet too large! Size: "
|
||||
<< packetLength << std::endl;
|
||||
clearBuffers();
|
||||
status = CONTENT_TOO_LARGE;
|
||||
}
|
||||
} else {
|
||||
error
|
||||
sif::error
|
||||
<< "MapPacketExtraction::extractPackets. Illegal segment! Last flag: "
|
||||
<< (int) lastSegmentationFlag << std::endl;
|
||||
clearBuffers();
|
||||
@ -73,7 +73,7 @@ ReturnValue_t MapPacketExtraction::extractPackets(TcTransferFrame* frame) {
|
||||
}
|
||||
break;
|
||||
default:
|
||||
error
|
||||
sif::error
|
||||
<< "MapPacketExtraction::extractPackets. Illegal segmentationFlag: "
|
||||
<< (int) segmentationFlag << std::endl;
|
||||
clearBuffers();
|
||||
@ -142,9 +142,9 @@ ReturnValue_t MapPacketExtraction::initialize() {
|
||||
}
|
||||
|
||||
void MapPacketExtraction::printPacketBuffer(void) {
|
||||
debug << "DLL: packet_buffer contains: " << std::endl;
|
||||
sif::debug << "DLL: packet_buffer contains: " << std::endl;
|
||||
for (uint32_t i = 0; i < this->packetLength; ++i) {
|
||||
debug << "packet_buffer[" << std::dec << i << "]: 0x" << std::hex
|
||||
sif::debug << "packet_buffer[" << std::dec << i << "]: 0x" << std::hex
|
||||
<< (uint16_t) this->packetBuffer[i] << std::endl;
|
||||
}
|
||||
}
|
||||
|
@ -87,11 +87,11 @@ uint8_t* TcTransferFrame::getFullDataField() {
|
||||
}
|
||||
|
||||
void TcTransferFrame::print() {
|
||||
debug << "Raw Frame: " << std::hex << std::endl;
|
||||
sif::debug << "Raw Frame: " << std::hex << std::endl;
|
||||
for (uint16_t count = 0; count < this->getFullSize(); count++ ) {
|
||||
debug << (uint16_t)this->getFullFrame()[count] << " ";
|
||||
sif::debug << (uint16_t)this->getFullFrame()[count] << " ";
|
||||
}
|
||||
debug << std::dec << std::endl;
|
||||
sif::debug << std::dec << std::endl;
|
||||
// debug << "Frame Header:" << std::endl;
|
||||
// debug << "Version Number: " << std::hex << (uint16_t)this->current_frame.getVersionNumber() << std::endl;
|
||||
// debug << "Bypass Flag set?| Ctrl Cmd Flag set?: " << (uint16_t)this->current_frame.bypassFlagSet() << " | " << (uint16_t)this->current_frame.controlCommandFlagSet() << std::endl;
|
||||
|
@ -37,7 +37,7 @@ TcTransferFrameLocal::TcTransferFrameLocal(bool bypass, bool controlCommand, uin
|
||||
this->getFullFrame()[getFullSize()-2] = (crc & 0xFF00) >> 8;
|
||||
this->getFullFrame()[getFullSize()-1] = (crc & 0x00FF);
|
||||
} else {
|
||||
debug << "TcTransferFrameLocal: dataSize too large: " << dataSize << std::endl;
|
||||
sif::debug << "TcTransferFrameLocal: dataSize too large: " << dataSize << std::endl;
|
||||
}
|
||||
} else {
|
||||
//No data in frame
|
||||
|
@ -102,7 +102,7 @@ uint8_t VirtualChannelReception::getChannelId() const {
|
||||
ReturnValue_t VirtualChannelReception::initialize() {
|
||||
ReturnValue_t returnValue = RETURN_FAILED;
|
||||
if ((slidingWindowWidth > 254) || (slidingWindowWidth % 2 != 0)) {
|
||||
error << "VirtualChannelReception::initialize: Illegal sliding window width: "
|
||||
sif::error << "VirtualChannelReception::initialize: Illegal sliding window width: "
|
||||
<< (int) slidingWindowWidth << std::endl;
|
||||
return RETURN_FAILED;
|
||||
}
|
||||
|
@ -55,7 +55,7 @@ PoolEntryIF* DataPool::getRawData( uint32_t data_pool_id ) {
|
||||
ReturnValue_t DataPool::freeDataPoolLock() {
|
||||
ReturnValue_t status = mutex->unlockMutex();
|
||||
if ( status != RETURN_OK ) {
|
||||
error << "DataPool::DataPool: unlock of mutex failed with error code: " << status << std::endl;
|
||||
sif::error << "DataPool::DataPool: unlock of mutex failed with error code: " << status << std::endl;
|
||||
}
|
||||
return status;
|
||||
}
|
||||
@ -63,17 +63,17 @@ ReturnValue_t DataPool::freeDataPoolLock() {
|
||||
ReturnValue_t DataPool::lockDataPool() {
|
||||
ReturnValue_t status = mutex->lockMutex(MutexIF::NO_TIMEOUT);
|
||||
if ( status != RETURN_OK ) {
|
||||
error << "DataPool::DataPool: lock of mutex failed with error code: " << status << std::endl;
|
||||
sif::error << "DataPool::DataPool: lock of mutex failed with error code: " << status << std::endl;
|
||||
}
|
||||
return status;
|
||||
}
|
||||
|
||||
void DataPool::print() {
|
||||
debug << "DataPool contains: " << std::endl;
|
||||
sif::debug << "DataPool contains: " << std::endl;
|
||||
std::map<uint32_t, PoolEntryIF*>::iterator dataPoolIt;
|
||||
dataPoolIt = this->data_pool.begin();
|
||||
while( dataPoolIt != this->data_pool.end() ) {
|
||||
debug << std::hex << dataPoolIt->first << std::dec << " |";
|
||||
sif::debug << std::hex << dataPoolIt->first << std::dec << " |";
|
||||
dataPoolIt->second->print();
|
||||
dataPoolIt++;
|
||||
}
|
||||
|
@ -215,7 +215,7 @@ ReturnValue_t DataPoolAdmin::handleParameterCommand(CommandMessage* command) {
|
||||
ParameterMessage::getParameterId(command));
|
||||
|
||||
const uint8_t *storedStream;
|
||||
uint32_t storedStreamSize;
|
||||
size_t storedStreamSize;
|
||||
result = storage->getData(ParameterMessage::getStoreId(command),
|
||||
&storedStream, &storedStreamSize);
|
||||
if (result != HasReturnvaluesIF::RETURN_OK) {
|
||||
|
@ -31,7 +31,7 @@ ReturnValue_t DataSet::read() {
|
||||
state = DATA_SET_WAS_READ;
|
||||
freeDataPoolLock();
|
||||
} else {
|
||||
error << "DataSet::read(): Call made in wrong position." << std::endl;
|
||||
sif::error << "DataSet::read(): Call made in wrong position." << std::endl;
|
||||
result = SET_WAS_ALREADY_READ;
|
||||
}
|
||||
return result;
|
||||
@ -68,9 +68,9 @@ ReturnValue_t DataSet::commit() {
|
||||
} else if (registeredVariables[count]->getDataPoolId()
|
||||
!= PoolVariableIF::NO_PARAMETER) {
|
||||
if (result != COMMITING_WITHOUT_READING) {
|
||||
error
|
||||
<< "DataSet::commit(): commit-without-read call made with non write-only variable."
|
||||
<< std::endl;
|
||||
sif::error <<
|
||||
"DataSet::commit(): commit-without-read "
|
||||
"call made with non write-only variable." << std::endl;
|
||||
result = COMMITING_WITHOUT_READING;
|
||||
}
|
||||
}
|
||||
@ -92,7 +92,7 @@ void DataSet::registerVariable(PoolVariableIF* variable) {
|
||||
}
|
||||
}
|
||||
}
|
||||
error
|
||||
sif::error
|
||||
<< "DataSet::registerVariable: failed. Either NULL, or set is full, or call made in wrong position."
|
||||
<< std::endl;
|
||||
return;
|
||||
|
@ -26,7 +26,7 @@ protected:
|
||||
} else {
|
||||
value = 0;
|
||||
valid = false;
|
||||
error << "PIDReader: read of PID 0x" << std::hex << parameterId
|
||||
sif::error << "PIDReader: read of PID 0x" << std::hex << parameterId
|
||||
<< std::dec << " failed." << std::endl;
|
||||
return HasReturnvaluesIF::RETURN_FAILED;
|
||||
}
|
||||
|
@ -46,9 +46,10 @@ uint8_t PoolEntry<T>::getValid() {
|
||||
template <typename T>
|
||||
void PoolEntry<T>::print() {
|
||||
for (uint8_t size = 0; size < this->length; size++ ) {
|
||||
debug << "| " << std::hex << (double)this->address[size] << (this->valid? " (valid) " : " (invalid) ");
|
||||
sif::debug << "| " << std::hex << (double)this->address[size]
|
||||
<< (this->valid? " (valid) " : " (invalid) ");
|
||||
}
|
||||
debug << std::dec << std::endl;
|
||||
sif::debug << std::dec << std::endl;
|
||||
}
|
||||
|
||||
template<typename T>
|
||||
|
@ -42,7 +42,7 @@ ReturnValue_t PoolRawAccess::read() {
|
||||
} else {
|
||||
//Error entry does not exist.
|
||||
}
|
||||
error << "PoolRawAccess: read of DP Variable 0x" << std::hex << dataPoolId
|
||||
sif::error << "PoolRawAccess: read of DP Variable 0x" << std::hex << dataPoolId
|
||||
<< std::dec << " failed." << std::endl;
|
||||
valid = INVALID;
|
||||
typeSize = 0;
|
||||
@ -123,7 +123,7 @@ ReturnValue_t PoolRawAccess::setEntryFromBigEndian(const uint8_t* buffer,
|
||||
#endif
|
||||
return HasReturnvaluesIF::RETURN_OK;
|
||||
} else {
|
||||
error << "PoolRawAccess::setEntryFromBigEndian: Illegal sizes: Internal"
|
||||
sif::error << "PoolRawAccess::setEntryFromBigEndian: Illegal sizes: Internal"
|
||||
<< (uint32_t) typeSize << ", Requested: " << setSize
|
||||
<< std::endl;
|
||||
return INCORRECT_SIZE;
|
||||
|
@ -66,7 +66,7 @@ protected:
|
||||
} else {
|
||||
value = 0;
|
||||
valid = false;
|
||||
error << "PoolVariable: read of DP Variable 0x" << std::hex
|
||||
sif::error << "PoolVariable: read of DP Variable 0x" << std::hex
|
||||
<< dataPoolId << std::dec << " failed." << std::endl;
|
||||
return HasReturnvaluesIF::RETURN_FAILED;
|
||||
}
|
||||
|
@ -70,7 +70,7 @@ protected:
|
||||
|
||||
} else {
|
||||
memset(this->value, 0, vector_size * sizeof(T));
|
||||
error << "PoolVector: read of DP Variable 0x" << std::hex
|
||||
sif::error << "PoolVector: read of DP Variable 0x" << std::hex
|
||||
<< dataPoolId << std::dec << " failed." << std::endl;
|
||||
this->valid = INVALID;
|
||||
return HasReturnvaluesIF::RETURN_FAILED;
|
||||
|
@ -558,7 +558,7 @@ void DeviceHandlerBase::doGetRead() {
|
||||
|
||||
ReturnValue_t DeviceHandlerBase::getStorageData(store_address_t storageAddress,
|
||||
uint8_t * *data, uint32_t * len) {
|
||||
uint32_t lenTmp;
|
||||
size_t lenTmp;
|
||||
|
||||
if (IPCStore == NULL) {
|
||||
*data = NULL;
|
||||
@ -1165,7 +1165,7 @@ void DeviceHandlerBase::buildInternalCommand(void) {
|
||||
if (mode == MODE_NORMAL) {
|
||||
result = buildNormalDeviceCommand(&deviceCommandId);
|
||||
if (result == BUSY) {
|
||||
debug << std::hex << getObjectId()
|
||||
sif::debug << std::hex << getObjectId()
|
||||
<< ": DHB::buildInternalCommand busy" << std::endl; //so we can track misconfigurations
|
||||
result = NOTHING_TO_SEND; //no need to report this
|
||||
}
|
||||
@ -1186,7 +1186,7 @@ void DeviceHandlerBase::buildInternalCommand(void) {
|
||||
if (iter == deviceCommandMap.end()) {
|
||||
result = COMMAND_NOT_SUPPORTED;
|
||||
} else if (iter->second.isExecuting) {
|
||||
debug << std::hex << getObjectId()
|
||||
sif::debug << std::hex << getObjectId()
|
||||
<< ": DHB::buildInternalCommand: Command "
|
||||
<< deviceCommandId << " isExecuting" << std::endl; //so we can track misconfigurations
|
||||
return; //this is an internal command, no need to report a failure here, missed reply will track if a reply is too late, otherwise, it's ok
|
||||
|
@ -89,17 +89,20 @@ uint32_t FixedSlotSequence::getLengthMs() const {
|
||||
}
|
||||
|
||||
ReturnValue_t FixedSlotSequence::checkSequence() const {
|
||||
//Iterate through slotList and check successful creation. Checks if timing is ok (must be ascending) and if all handlers were found.
|
||||
if(slotList.empty()) {
|
||||
sif::error << "Fixed Slot Sequence: Slot list is empty!" << std::endl;
|
||||
return HasReturnvaluesIF::RETURN_FAILED;
|
||||
}
|
||||
auto slotIt = slotList.begin();
|
||||
uint32_t count = 0;
|
||||
uint32_t time = 0;
|
||||
while (slotIt != slotList.end()) {
|
||||
if ((*slotIt)->handler == NULL) {
|
||||
error << "FixedSlotSequene::initialize: ObjectId does not exist!"
|
||||
sif::error << "FixedSlotSequene::initialize: ObjectId does not exist!"
|
||||
<< std::endl;
|
||||
count++;
|
||||
} else if ((*slotIt)->pollingTimeMs < time) {
|
||||
error << "FixedSlotSequence::initialize: Time: "
|
||||
sif::error << "FixedSlotSequence::initialize: Time: "
|
||||
<< (*slotIt)->pollingTimeMs
|
||||
<< " is smaller than previous with " << time << std::endl;
|
||||
count++;
|
||||
|
@ -6,12 +6,18 @@
|
||||
#include <list>
|
||||
|
||||
/**
|
||||
* \brief This class is the representation of a Polling Sequence Table in software.
|
||||
* @brief This class is the representation of a Polling Sequence Table in software.
|
||||
*
|
||||
* \details The FixedSlotSequence object maintains the dynamic execution of device handler objects.
|
||||
* The main idea is to create a list of device handlers, to announce all handlers to the
|
||||
* polling sequence and to maintain a list of polling slot objects. This slot list represents the
|
||||
* Polling Sequence Table in software. Each polling slot contains information to indicate when and
|
||||
* @details
|
||||
* The FixedSlotSequence object maintains the dynamic execution of
|
||||
* device handler objects.
|
||||
*
|
||||
* The main idea is to create a list of device handlers, to announce all
|
||||
* handlers to thepolling sequence and to maintain a list of
|
||||
* polling slot objects. This slot list represents the Polling Sequence Table
|
||||
* in software.
|
||||
*
|
||||
* Each polling slot contains information to indicate when and
|
||||
* which device handler shall be executed within a given polling period.
|
||||
* The sequence is then executed by iterating through this slot list.
|
||||
* Handlers are invoking by calling a certain function stored in the handler list.
|
||||
@ -97,6 +103,11 @@ public:
|
||||
*/
|
||||
std::list<FixedSequenceSlot*>::iterator current;
|
||||
|
||||
/**
|
||||
* Iterate through slotList and check successful creation.
|
||||
* Checks if timing is ok (must be ascending) and if all handlers were found.
|
||||
* @return
|
||||
*/
|
||||
ReturnValue_t checkSequence() const;
|
||||
protected:
|
||||
|
||||
|
@ -117,26 +117,26 @@ void EventManager::printEvent(EventMessage* message) {
|
||||
switch (message->getSeverity()) {
|
||||
case SEVERITY::INFO:
|
||||
// string = translateObject(message->getReporter());
|
||||
// info << "EVENT: ";
|
||||
// sif::info << "EVENT: ";
|
||||
// if (string != 0) {
|
||||
// info << string;
|
||||
// sif::info << string;
|
||||
// } else {
|
||||
// info << "0x" << std::hex << message->getReporter() << std::dec;
|
||||
// sif::info << "0x" << std::hex << message->getReporter() << std::dec;
|
||||
// }
|
||||
// info << " reported " << translateEvents(message->getEvent()) << " ("
|
||||
// sif::info << " reported " << translateEvents(message->getEvent()) << " ("
|
||||
// << std::dec << message->getEventId() << std::hex << ") P1: 0x"
|
||||
// << message->getParameter1() << " P2: 0x"
|
||||
// << message->getParameter2() << std::dec << std::endl;
|
||||
break;
|
||||
default:
|
||||
string = translateObject(message->getReporter());
|
||||
error << "EVENT: ";
|
||||
sif::error << "EVENT: ";
|
||||
if (string != 0) {
|
||||
error << string;
|
||||
sif::error << string;
|
||||
} else {
|
||||
error << "0x" << std::hex << message->getReporter() << std::dec;
|
||||
sif::error << "0x" << std::hex << message->getReporter() << std::dec;
|
||||
}
|
||||
error << " reported " << translateEvents(message->getEvent()) << " ("
|
||||
sif::error << " reported " << translateEvents(message->getEvent()) << " ("
|
||||
<< std::dec << message->getEventId() << std::hex << ") P1: 0x"
|
||||
<< message->getParameter1() << " P2: 0x"
|
||||
<< message->getParameter2() << std::dec << std::endl;
|
||||
|
61
globalfunctions/arrayprinter.cpp
Normal file
61
globalfunctions/arrayprinter.cpp
Normal file
@ -0,0 +1,61 @@
|
||||
#include <framework/globalfunctions/arrayprinter.h>
|
||||
#include <framework/serviceinterface/ServiceInterfaceStream.h>
|
||||
#include <bitset>
|
||||
|
||||
void arrayprinter::print(const uint8_t *data, size_t size, OutputType type,
|
||||
bool printInfo, size_t maxCharPerLine) {
|
||||
if(printInfo) {
|
||||
sif::info << "Printing data with size " << size << ": ";
|
||||
}
|
||||
sif::info << "[";
|
||||
if(type == OutputType::HEX) {
|
||||
arrayprinter::printHex(data, size, maxCharPerLine);
|
||||
}
|
||||
else if (type == OutputType::DEC) {
|
||||
arrayprinter::printDec(data, size, maxCharPerLine);
|
||||
}
|
||||
else if(type == OutputType::BIN) {
|
||||
arrayprinter::printBin(data, size);
|
||||
}
|
||||
}
|
||||
|
||||
void arrayprinter::printHex(const uint8_t *data, size_t size,
|
||||
size_t maxCharPerLine) {
|
||||
sif::info << std::hex;
|
||||
for(size_t i = 0; i < size; i++) {
|
||||
sif::info << "0x" << static_cast<int>(data[i]);
|
||||
if(i < size - 1){
|
||||
sif::info << " , ";
|
||||
if(i > 0 and i % maxCharPerLine == 0) {
|
||||
sif::info << std::endl;
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
sif::info << std::dec;
|
||||
sif::info << "]" << std::endl;
|
||||
}
|
||||
|
||||
void arrayprinter::printDec(const uint8_t *data, size_t size,
|
||||
size_t maxCharPerLine) {
|
||||
sif::info << std::dec;
|
||||
for(size_t i = 0; i < size; i++) {
|
||||
sif::info << static_cast<int>(data[i]);
|
||||
if(i < size - 1){
|
||||
sif::info << " , ";
|
||||
if(i > 0 and i % maxCharPerLine == 0) {
|
||||
sif::info << std::endl;
|
||||
}
|
||||
}
|
||||
}
|
||||
sif::info << "]" << std::endl;
|
||||
}
|
||||
|
||||
void arrayprinter::printBin(const uint8_t *data, size_t size) {
|
||||
sif::info << "\n" << std::flush;
|
||||
for(size_t i = 0; i < size; i++) {
|
||||
sif::info << "Byte " << i + 1 << ": 0b"<<
|
||||
std::bitset<8>(data[i]) << ",\n" << std::flush;
|
||||
}
|
||||
sif::info << "]" << std::endl;
|
||||
}
|
20
globalfunctions/arrayprinter.h
Normal file
20
globalfunctions/arrayprinter.h
Normal file
@ -0,0 +1,20 @@
|
||||
#ifndef FRAMEWORK_GLOBALFUNCTIONS_ARRAYPRINTER_H_
|
||||
#define FRAMEWORK_GLOBALFUNCTIONS_ARRAYPRINTER_H_
|
||||
#include <cstdint>
|
||||
#include <cstddef>
|
||||
|
||||
enum class OutputType {
|
||||
DEC,
|
||||
HEX,
|
||||
BIN
|
||||
};
|
||||
|
||||
namespace arrayprinter {
|
||||
void print(const uint8_t* data, size_t size, OutputType type = OutputType::HEX,
|
||||
bool printInfo = true, size_t maxCharPerLine = 12);
|
||||
void printHex(const uint8_t* data, size_t size, size_t maxCharPerLine = 12);
|
||||
void printDec(const uint8_t* data, size_t size, size_t maxCharPerLine = 12);
|
||||
void printBin(const uint8_t* data, size_t size);
|
||||
}
|
||||
|
||||
#endif /* FRAMEWORK_GLOBALFUNCTIONS_ARRAYPRINTER_H_ */
|
@ -70,7 +70,7 @@ void HealthHelper::informParent(HasHealthIF::HealthState health,
|
||||
health, oldHealth);
|
||||
if (MessageQueueSenderIF::sendMessage(parentQueue, &message,
|
||||
owner->getCommandQueue()) != HasReturnvaluesIF::RETURN_OK) {
|
||||
debug << "HealthHelper::informParent: sending health reply failed."
|
||||
sif::debug << "HealthHelper::informParent: sending health reply failed."
|
||||
<< std::endl;
|
||||
}
|
||||
}
|
||||
@ -89,7 +89,7 @@ void HealthHelper::handleSetHealthCommand(CommandMessage* message) {
|
||||
}
|
||||
if (MessageQueueSenderIF::sendMessage(message->getSender(), &reply,
|
||||
owner->getCommandQueue()) != HasReturnvaluesIF::RETURN_OK) {
|
||||
debug
|
||||
sif::debug
|
||||
<< "HealthHelper::handleHealthCommand: sending health reply failed."
|
||||
<< std::endl;
|
||||
}
|
||||
|
@ -52,12 +52,12 @@ size_t MessageQueueMessage::getMinimumMessageSize() {
|
||||
}
|
||||
|
||||
void MessageQueueMessage::print() {
|
||||
debug << "MessageQueueMessage has size: " << this->messageSize << std::hex
|
||||
sif::debug << "MessageQueueMessage has size: " << this->messageSize << std::hex
|
||||
<< std::endl;
|
||||
for (uint8_t count = 0; count < this->messageSize; count++) {
|
||||
debug << (uint32_t) this->internalBuffer[count] << ":";
|
||||
sif::debug << (uint32_t) this->internalBuffer[count] << ":";
|
||||
}
|
||||
debug << std::dec << std::endl;
|
||||
sif::debug << std::dec << std::endl;
|
||||
}
|
||||
|
||||
void MessageQueueMessage::clear() {
|
||||
|
@ -10,7 +10,7 @@ public:
|
||||
internalMutex(mutex) {
|
||||
ReturnValue_t status = mutex->lockMutex(timeoutMs);
|
||||
if(status != HasReturnvaluesIF::RETURN_OK){
|
||||
error << "MutexHelper: Lock of Mutex failed " << status << std::endl;
|
||||
sif::error << "MutexHelper: Lock of Mutex failed " << status << std::endl;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -15,7 +15,7 @@ ReturnValue_t MemoryHelper::handleMemoryCommand(CommandMessage* message) {
|
||||
lastSender = message->getSender();
|
||||
lastCommand = message->getCommand();
|
||||
if (busy) {
|
||||
debug << "MemHelper: Busy!" << std::endl;
|
||||
sif::debug << "MemHelper: Busy!" << std::endl;
|
||||
}
|
||||
switch (lastCommand) {
|
||||
case MemoryMessage::CMD_MEMORY_DUMP:
|
||||
@ -152,7 +152,7 @@ void MemoryHelper::handleMemoryLoad(CommandMessage* message) {
|
||||
ipcAddress = MemoryMessage::getStoreID(message);
|
||||
const uint8_t* p_data = NULL;
|
||||
uint8_t* dataPointer = NULL;
|
||||
uint32_t size = 0;
|
||||
size_t size = 0;
|
||||
ReturnValue_t returnCode = ipcStore->getData(ipcAddress, &p_data, &size);
|
||||
if (returnCode == RETURN_OK) {
|
||||
returnCode = workOnThis->handleMemoryLoad(address, p_data, size,
|
||||
|
@ -16,6 +16,10 @@ ReturnValue_t ModeMessage::setModeMessage(CommandMessage* message, Command_t com
|
||||
return HasReturnvaluesIF::RETURN_OK;
|
||||
}
|
||||
|
||||
ReturnValue_t ModeMessage::getCantReachModeReason(const CommandMessage* message) {
|
||||
return message->getParameter();
|
||||
}
|
||||
|
||||
void ModeMessage::clear(CommandMessage* message) {
|
||||
message->setCommand(CommandMessage::CMD_NONE);
|
||||
}
|
||||
|
@ -23,7 +23,6 @@ public:
|
||||
static const Command_t REPLY_MODE_REPLY = MAKE_COMMAND_ID(0x02);//!> Reply to a CMD_MODE_COMMAND or CMD_MODE_READ
|
||||
static const Command_t REPLY_MODE_INFO = MAKE_COMMAND_ID(0x03); //!> Unrequested info about the current mode (used for composites to inform their container of a changed mode)
|
||||
static const Command_t REPLY_CANT_REACH_MODE = MAKE_COMMAND_ID(0x04); //!> Reply in case a mode command can't be executed. Par1: returnCode, Par2: 0
|
||||
//SHOULDDO is there a way we can transmit a returnvalue when responding that the mode is wrong, so we can give a nice failure code when commanded by PUS?
|
||||
static const Command_t REPLY_WRONG_MODE_REPLY = MAKE_COMMAND_ID(0x05);//!> Reply to a CMD_MODE_COMMAND, indicating that a mode was commanded and a transition started but was aborted; the parameters contain the mode that was reached
|
||||
static const Command_t CMD_MODE_READ = MAKE_COMMAND_ID(0x06);//!> Command to read the current mode and reply with a REPLY_MODE_REPLY
|
||||
static const Command_t CMD_MODE_ANNOUNCE = MAKE_COMMAND_ID(0x07);//!> Command to trigger an ModeInfo Event. This command does NOT have a reply.
|
||||
@ -34,6 +33,7 @@ public:
|
||||
static ReturnValue_t setModeMessage(CommandMessage* message,
|
||||
Command_t command, Mode_t mode, Submode_t submode);
|
||||
static void cantReachMode(CommandMessage* message, ReturnValue_t reason);
|
||||
static ReturnValue_t getCantReachModeReason(const CommandMessage* message);
|
||||
static void clear(CommandMessage* message);
|
||||
};
|
||||
|
||||
|
@ -63,7 +63,8 @@ private:
|
||||
if (timeStamper == NULL) {
|
||||
timeStamper = objectManager->get<TimeStamperIF>( timeStamperId );
|
||||
if ( timeStamper == NULL ) {
|
||||
error << "MonitoringReportContent::checkAndSetStamper: Stamper not found!" << std::endl;
|
||||
sif::error << "MonitoringReportContent::checkAndSetStamper: "
|
||||
"Stamper not found!" << std::endl;
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
@ -2,37 +2,42 @@
|
||||
#include <framework/serviceinterface/ServiceInterfaceStream.h>
|
||||
#include <cstdlib>
|
||||
|
||||
ObjectManager::ObjectManager( void (*setProducer)() ) : produceObjects(setProducer) {
|
||||
ObjectManager::ObjectManager( void (*setProducer)() ):
|
||||
produceObjects(setProducer) {
|
||||
//There's nothing special to do in the constructor.
|
||||
}
|
||||
|
||||
|
||||
ObjectManager::~ObjectManager() {
|
||||
std::map<object_id_t, SystemObjectIF*>::iterator it;
|
||||
for (it = this->objectList.begin(); it != this->objectList.end(); it++) {
|
||||
delete it->second;
|
||||
for (auto const& iter : objectList) {
|
||||
delete iter.second;
|
||||
}
|
||||
}
|
||||
|
||||
ReturnValue_t ObjectManager::insert( object_id_t id, SystemObjectIF* object) {
|
||||
bool insert_return = this->objectList.insert( std::pair< object_id_t, SystemObjectIF* >( id, object ) ).second;
|
||||
if (insert_return == true) {
|
||||
// debug << "ObjectManager::insert: Object " << std::hex << (int)id << std::dec << " inserted." << std::endl;
|
||||
auto returnPair = objectList.emplace(id, object);
|
||||
if (returnPair.second) {
|
||||
// sif::debug << "ObjectManager::insert: Object " << std::hex
|
||||
// << (int)id << std::dec << " inserted." << std::endl;
|
||||
return this->RETURN_OK;
|
||||
} else {
|
||||
error << "ObjectManager::insert: Object id " << std::hex << (int)id << std::dec << " is already in use!" << std::endl;
|
||||
exit(0); //This is very severe and difficult to handle in other places.
|
||||
return this->INSERTION_FAILED;
|
||||
sif::error << "ObjectManager::insert: Object id " << std::hex
|
||||
<< (int)id << std::dec << " is already in use!" << std::endl;
|
||||
sif::error << "Terminating program." << std::endl;
|
||||
//This is very severe and difficult to handle in other places.
|
||||
std::exit(INSERTION_FAILED);
|
||||
}
|
||||
}
|
||||
|
||||
ReturnValue_t ObjectManager::remove( object_id_t id ) {
|
||||
if ( this->getSystemObject(id) != NULL ) {
|
||||
this->objectList.erase( id );
|
||||
debug << "ObjectManager::removeObject: Object " << std::hex << (int)id << std::dec << " removed." << std::endl;
|
||||
//sif::debug << "ObjectManager::removeObject: Object " << std::hex
|
||||
// << (int)id << std::dec << " removed." << std::endl;
|
||||
return RETURN_OK;
|
||||
} else {
|
||||
error << "ObjectManager::removeObject: Requested object "<< std::hex << (int)id << std::dec << " not found." << std::endl;
|
||||
sif::error << "ObjectManager::removeObject: Requested object "
|
||||
<< std::hex << (int)id << std::dec << " not found." << std::endl;
|
||||
return NOT_FOUND;
|
||||
}
|
||||
}
|
||||
@ -40,55 +45,63 @@ ReturnValue_t ObjectManager::remove( object_id_t id ) {
|
||||
|
||||
|
||||
SystemObjectIF* ObjectManager::getSystemObject( object_id_t id ) {
|
||||
std::map<object_id_t, SystemObjectIF*>::iterator it = this->objectList.find( id );
|
||||
if (it == this->objectList.end() ) {
|
||||
//Changed for testing different method.
|
||||
// SystemObjectIF* object = this->produceObjects( id );
|
||||
// return object;
|
||||
return NULL;
|
||||
auto listIter = this->objectList.find( id );
|
||||
if (listIter == this->objectList.end() ) {
|
||||
return nullptr;
|
||||
} else {
|
||||
return it->second;
|
||||
return listIter->second;
|
||||
}
|
||||
}
|
||||
|
||||
ObjectManager::ObjectManager( ) : produceObjects(NULL) {
|
||||
ObjectManager::ObjectManager() : produceObjects(nullptr) {
|
||||
|
||||
}
|
||||
|
||||
void ObjectManager::initialize() {
|
||||
if(produceObjects == nullptr) {
|
||||
sif::error << "ObjectManager::initialize: Passed produceObjects "
|
||||
"functions is nullptr!" << std::endl;
|
||||
return;
|
||||
}
|
||||
this->produceObjects();
|
||||
ReturnValue_t return_value = RETURN_FAILED;
|
||||
uint32_t error_count = 0;
|
||||
for (std::map<object_id_t, SystemObjectIF*>::iterator it = this->objectList.begin(); it != objectList.end(); it++ ) {
|
||||
return_value = it->second->initialize();
|
||||
if ( return_value != RETURN_OK ) {
|
||||
object_id_t var = it->first;
|
||||
error << "Object " << std::hex << (int) var << " failed to initialize with code 0x" << return_value << std::dec << std::endl;
|
||||
error_count++;
|
||||
ReturnValue_t result = RETURN_FAILED;
|
||||
uint32_t errorCount = 0;
|
||||
for (auto const& it : objectList) {
|
||||
result = it.second->initialize();
|
||||
if ( result != RETURN_OK ) {
|
||||
object_id_t var = it.first;
|
||||
sif::error << "ObjectManager::initialize: Object 0x" << std::hex <<
|
||||
std::setw(8) << std::setfill('0')<< var << " failed to "
|
||||
"initialize with code 0x" << result << std::dec <<
|
||||
std::setfill(' ') << std::endl;
|
||||
errorCount++;
|
||||
}
|
||||
}
|
||||
if (error_count > 0) {
|
||||
error << "ObjectManager::ObjectManager: Counted " << error_count << " failed initializations." << std::endl;
|
||||
if (errorCount > 0) {
|
||||
sif::error << "ObjectManager::ObjectManager: Counted " << errorCount
|
||||
<< " failed initializations." << std::endl;
|
||||
}
|
||||
//Init was successful. Now check successful interconnections.
|
||||
error_count = 0;
|
||||
for (std::map<object_id_t, SystemObjectIF*>::iterator it = this->objectList.begin(); it != objectList.end(); it++ ) {
|
||||
return_value = it->second->checkObjectConnections();
|
||||
if ( return_value != RETURN_OK ) {
|
||||
error << "Object " << std::hex << (int) it->first << " connection check failed with code 0x" << return_value << std::dec << std::endl;
|
||||
error_count++;
|
||||
errorCount = 0;
|
||||
for (auto const& it : objectList) {
|
||||
result = it.second->checkObjectConnections();
|
||||
if ( result != RETURN_OK ) {
|
||||
sif::error << "ObjectManager::ObjectManager: Object " << std::hex <<
|
||||
(int) it.first << " connection check failed with code 0x"
|
||||
<< result << std::dec << std::endl;
|
||||
errorCount++;
|
||||
}
|
||||
}
|
||||
if (error_count > 0) {
|
||||
error << "ObjectManager::ObjectManager: Counted " << error_count << " failed connection checks." << std::endl;
|
||||
if (errorCount > 0) {
|
||||
sif::error << "ObjectManager::ObjectManager: Counted " << errorCount
|
||||
<< " failed connection checks." << std::endl;
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
void ObjectManager::printList() {
|
||||
std::map<object_id_t, SystemObjectIF*>::iterator it;
|
||||
debug << "ObjectManager: Object List contains:" << std::endl;
|
||||
for (it = this->objectList.begin(); it != this->objectList.end(); it++) {
|
||||
debug << std::hex << it->first << " | " << it->second << std::endl;
|
||||
sif::debug << "ObjectManager: Object List contains:" << std::endl;
|
||||
for (auto const& it : objectList) {
|
||||
sif::debug << std::hex << it.first << " | " << it.second << std::endl;
|
||||
}
|
||||
}
|
||||
|
@ -1,17 +1,10 @@
|
||||
/**
|
||||
* @file ObjectManagerIF.h
|
||||
* @brief This file contains the implementation of the ObjectManagerIF interface
|
||||
* @date 19.09.2012
|
||||
* @author Bastian Baetz
|
||||
*/
|
||||
|
||||
#ifndef OBJECTMANAGERIF_H_
|
||||
#define OBJECTMANAGERIF_H_
|
||||
#ifndef FRAMEWORK_OBJECTMANAGER_OBJECTMANAGERIF_H_
|
||||
#define FRAMEWORK_OBJECTMANAGER_OBJECTMANAGERIF_H_
|
||||
|
||||
#include <framework/objectmanager/frameworkObjects.h>
|
||||
#include <config/objects/systemObjectList.h>
|
||||
#include <framework/objectmanager/SystemObjectIF.h>
|
||||
#include <framework/returnvalues/HasReturnvaluesIF.h>
|
||||
#include <framework/serviceinterface/ServiceInterfaceStream.h>
|
||||
|
||||
/**
|
||||
* @brief This class provides an interface to the global object manager.
|
||||
@ -20,13 +13,17 @@
|
||||
* inserted, removed and retrieved from the list. On getting the
|
||||
* object, the call checks if the object implements the requested
|
||||
* interface.
|
||||
* \ingroup system_objects
|
||||
* @author Bastian Baetz
|
||||
* @ingroup system_objects
|
||||
*/
|
||||
class ObjectManagerIF : public HasReturnvaluesIF {
|
||||
public:
|
||||
static const uint8_t INTERFACE_ID = CLASS_ID::OBJECT_MANAGER_IF;
|
||||
static const ReturnValue_t INSERTION_FAILED = MAKE_RETURN_CODE( 1 );
|
||||
static const ReturnValue_t NOT_FOUND = MAKE_RETURN_CODE( 2 );
|
||||
static constexpr uint8_t INTERFACE_ID = CLASS_ID::OBJECT_MANAGER_IF;
|
||||
static constexpr ReturnValue_t INSERTION_FAILED = MAKE_RETURN_CODE( 1 );
|
||||
static constexpr ReturnValue_t NOT_FOUND = MAKE_RETURN_CODE( 2 );
|
||||
static constexpr ReturnValue_t CHILD_INIT_FAILED = MAKE_RETURN_CODE( 3 );
|
||||
static constexpr ReturnValue_t INTERNAL_ERR_REPORTER_UNINIT = MAKE_RETURN_CODE( 4 );
|
||||
|
||||
protected:
|
||||
/**
|
||||
* @brief This method is used to hide the template-based get call from
|
||||
@ -78,15 +75,21 @@ public:
|
||||
virtual void printList() = 0;
|
||||
};
|
||||
|
||||
template <typename T>
|
||||
T* ObjectManagerIF::get( object_id_t id ) {
|
||||
SystemObjectIF* temp = this->getSystemObject(id);
|
||||
return dynamic_cast<T*>(temp);
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief This is the forward declaration of the global objectManager instance.
|
||||
*/
|
||||
extern ObjectManagerIF *objectManager;
|
||||
|
||||
/*Documentation can be found in the class method declaration above.*/
|
||||
template <typename T>
|
||||
T* ObjectManagerIF::get( object_id_t id ) {
|
||||
if(objectManager == nullptr) {
|
||||
sif::error << "ObjectManagerIF: Global object manager has not "
|
||||
"been initialized yet!" << std::endl;
|
||||
}
|
||||
SystemObjectIF* temp = this->getSystemObject(id);
|
||||
return dynamic_cast<T*>(temp);
|
||||
}
|
||||
|
||||
#endif /* OBJECTMANAGERIF_H_ */
|
||||
|
@ -19,8 +19,7 @@ FixedTimeslotTask::~FixedTimeslotTask() {
|
||||
void FixedTimeslotTask::taskEntryPoint(void* argument) {
|
||||
|
||||
//The argument is re-interpreted as FixedTimeslotTask. The Task object is global, so it is found from any place.
|
||||
FixedTimeslotTask *originalTask(
|
||||
reinterpret_cast<FixedTimeslotTask*>(argument));
|
||||
FixedTimeslotTask *originalTask(reinterpret_cast<FixedTimeslotTask*>(argument));
|
||||
// Task should not start until explicitly requested
|
||||
// in FreeRTOS, tasks start as soon as they are created if the scheduler is running
|
||||
// but not if the scheduler is not running.
|
||||
@ -33,14 +32,14 @@ void FixedTimeslotTask::taskEntryPoint(void* argument) {
|
||||
}
|
||||
|
||||
originalTask->taskFunctionality();
|
||||
debug << "Polling task " << originalTask->handle
|
||||
sif::debug << "Polling task " << originalTask->handle
|
||||
<< " returned from taskFunctionality." << std::endl;
|
||||
}
|
||||
|
||||
void FixedTimeslotTask::missedDeadlineCounter() {
|
||||
FixedTimeslotTask::deadlineMissedCount++;
|
||||
if (FixedTimeslotTask::deadlineMissedCount % 10 == 0) {
|
||||
error << "PST missed " << FixedTimeslotTask::deadlineMissedCount
|
||||
sif::error << "PST missed " << FixedTimeslotTask::deadlineMissedCount
|
||||
<< " deadlines." << std::endl;
|
||||
}
|
||||
}
|
||||
@ -58,8 +57,19 @@ ReturnValue_t FixedTimeslotTask::startTask() {
|
||||
|
||||
ReturnValue_t FixedTimeslotTask::addSlot(object_id_t componentId,
|
||||
uint32_t slotTimeMs, int8_t executionStep) {
|
||||
if (objectManager->get<ExecutableObjectIF>(componentId) != nullptr) {
|
||||
if(slotTimeMs == 0) {
|
||||
// FreeRTOS throws a sanity error for zero values, so we set
|
||||
// the time to one millisecond.
|
||||
slotTimeMs = 1;
|
||||
}
|
||||
pst.addSlot(componentId, slotTimeMs, executionStep, this);
|
||||
return HasReturnvaluesIF::RETURN_OK;
|
||||
}
|
||||
|
||||
sif::error << "Component " << std::hex << componentId <<
|
||||
" not found, not adding it to pst" << std::endl;
|
||||
return HasReturnvaluesIF::RETURN_FAILED;
|
||||
}
|
||||
|
||||
uint32_t FixedTimeslotTask::getPeriodMs() const {
|
||||
|
@ -8,7 +8,7 @@ MessageQueue::MessageQueue(size_t message_depth, size_t max_message_size) :
|
||||
defaultDestination(0),lastPartner(0) {
|
||||
handle = xQueueCreate(message_depth, max_message_size);
|
||||
if (handle == NULL) {
|
||||
error << "MessageQueue creation failed" << std::endl;
|
||||
sif::error << "MessageQueue creation failed" << std::endl;
|
||||
}
|
||||
}
|
||||
|
||||
@ -97,7 +97,8 @@ ReturnValue_t MessageQueue::sendMessageFromMessageQueue(MessageQueueId_t sendTo,
|
||||
bool ignoreFault) {
|
||||
message->setSender(sentFrom);
|
||||
|
||||
BaseType_t result = xQueueSendToBack(reinterpret_cast<void*>(sendTo),reinterpret_cast<const void*>(message->getBuffer()), 0);
|
||||
BaseType_t result = xQueueSendToBack(reinterpret_cast<QueueHandle_t>(sendTo),
|
||||
reinterpret_cast<const void*>(message->getBuffer()), 0);
|
||||
if (result != pdPASS) {
|
||||
if (!ignoreFault) {
|
||||
InternalErrorReporterIF* internalErrorReporter = objectManager->get<InternalErrorReporterIF>(
|
||||
|
@ -10,7 +10,8 @@ PeriodicTask::PeriodicTask(const char *name, TaskPriority setPriority,
|
||||
|
||||
BaseType_t status = xTaskCreate(taskEntryPoint, name, setStack, this, setPriority, &handle);
|
||||
if(status != pdPASS){
|
||||
debug << "PeriodicTask Insufficient heap memory remaining. Status: " << status << std::endl;
|
||||
sif::debug << "PeriodicTask Insufficient heap memory remaining. Status: "
|
||||
<< status << std::endl;
|
||||
}
|
||||
|
||||
}
|
||||
@ -34,7 +35,7 @@ void PeriodicTask::taskEntryPoint(void* argument) {
|
||||
}
|
||||
|
||||
originalTask->taskFunctionality();
|
||||
debug << "Polling task " << originalTask->handle
|
||||
sif::debug << "Polling task " << originalTask->handle
|
||||
<< " returned from taskFunctionality." << std::endl;
|
||||
}
|
||||
|
||||
|
@ -9,7 +9,9 @@
|
||||
uint32_t FixedTimeslotTask::deadlineMissedCount = 0;
|
||||
const size_t PeriodicTaskIF::MINIMUM_STACK_SIZE = PTHREAD_STACK_MIN;
|
||||
|
||||
FixedTimeslotTask::FixedTimeslotTask(const char* name_, int priority_, size_t stackSize_, uint32_t periodMs_):PosixThread(name_,priority_,stackSize_),pst(periodMs_),started(false) {
|
||||
FixedTimeslotTask::FixedTimeslotTask(const char* name_, int priority_,
|
||||
size_t stackSize_, uint32_t periodMs_):
|
||||
PosixThread(name_,priority_,stackSize_),pst(periodMs_),started(false) {
|
||||
}
|
||||
|
||||
FixedTimeslotTask::~FixedTimeslotTask() {
|
||||
@ -40,8 +42,14 @@ uint32_t FixedTimeslotTask::getPeriodMs() const {
|
||||
|
||||
ReturnValue_t FixedTimeslotTask::addSlot(object_id_t componentId,
|
||||
uint32_t slotTimeMs, int8_t executionStep) {
|
||||
if (objectManager->get<ExecutableObjectIF>(componentId) != nullptr) {
|
||||
pst.addSlot(componentId, slotTimeMs, executionStep, this);
|
||||
return HasReturnvaluesIF::RETURN_OK;
|
||||
}
|
||||
|
||||
sif::error << "Component " << std::hex << componentId <<
|
||||
" not found, not adding it to pst" << std::endl;
|
||||
return HasReturnvaluesIF::RETURN_FAILED;
|
||||
}
|
||||
|
||||
ReturnValue_t FixedTimeslotTask::checkSequence() const {
|
||||
@ -80,7 +88,7 @@ void FixedTimeslotTask::taskFunctionality() {
|
||||
void FixedTimeslotTask::missedDeadlineCounter() {
|
||||
FixedTimeslotTask::deadlineMissedCount++;
|
||||
if (FixedTimeslotTask::deadlineMissedCount % 10 == 0) {
|
||||
error << "PST missed " << FixedTimeslotTask::deadlineMissedCount
|
||||
sif::error << "PST missed " << FixedTimeslotTask::deadlineMissedCount
|
||||
<< " deadlines." << std::endl;
|
||||
}
|
||||
}
|
||||
|
@ -17,11 +17,11 @@ MessageQueue::MessageQueue(size_t message_depth, size_t max_message_size) :
|
||||
attributes.mq_maxmsg = message_depth;
|
||||
attributes.mq_msgsize = max_message_size;
|
||||
attributes.mq_flags = 0; //Flags are ignored on Linux during mq_open
|
||||
|
||||
//Set the name of the queue
|
||||
sprintf(name, "/Q%u\n", queueCounter++);
|
||||
|
||||
//Create a nonblocking queue if the name is available (the queue is Read and writable for the owner as well as the group)
|
||||
//Create a nonblocking queue if the name is available (the queue is Read and
|
||||
// writable for the owner as well as the group)
|
||||
mqd_t tempId = mq_open(name, O_NONBLOCK | O_RDWR | O_CREAT | O_EXCL,
|
||||
S_IWUSR | S_IREAD | S_IWGRP | S_IRGRP | S_IROTH | S_IWOTH, &attributes);
|
||||
if (tempId == -1) {
|
||||
@ -32,7 +32,7 @@ MessageQueue::MessageQueue(size_t message_depth, size_t max_message_size) :
|
||||
//We unlink the other queue
|
||||
int status = mq_unlink(name);
|
||||
if (status != 0) {
|
||||
error << "mq_unlink Failed with status: " << strerror(errno)
|
||||
sif::error << "mq_unlink Failed with status: " << strerror(errno)
|
||||
<< std::endl;
|
||||
} else {
|
||||
//Successful unlinking, try to open again
|
||||
@ -47,7 +47,7 @@ MessageQueue::MessageQueue(size_t message_depth, size_t max_message_size) :
|
||||
}
|
||||
}
|
||||
//Failed either the first time or the second time
|
||||
error << "MessageQueue::MessageQueue: Creating Queue " << std::hex
|
||||
sif::error << "MessageQueue::MessageQueue: Creating Queue " << std::hex
|
||||
<< name << std::dec << " failed with status: "
|
||||
<< strerror(errno) << std::endl;
|
||||
} else {
|
||||
@ -59,11 +59,13 @@ MessageQueue::MessageQueue(size_t message_depth, size_t max_message_size) :
|
||||
MessageQueue::~MessageQueue() {
|
||||
int status = mq_close(this->id);
|
||||
if(status != 0){
|
||||
error << "MessageQueue::Destructor: mq_close Failed with status: " << strerror(errno) <<std::endl;
|
||||
sif::error << "MessageQueue::Destructor: mq_close Failed with status: "
|
||||
<< strerror(errno) <<std::endl;
|
||||
}
|
||||
status = mq_unlink(name);
|
||||
if(status != 0){
|
||||
error << "MessageQueue::Destructor: mq_unlink Failed with status: " << strerror(errno) <<std::endl;
|
||||
sif::error << "MessageQueue::Destructor: mq_unlink Failed with status: "
|
||||
<< strerror(errno) <<std::endl;
|
||||
}
|
||||
}
|
||||
|
||||
@ -93,7 +95,8 @@ ReturnValue_t MessageQueue::receiveMessage(MessageQueueMessage* message,
|
||||
|
||||
ReturnValue_t MessageQueue::receiveMessage(MessageQueueMessage* message) {
|
||||
unsigned int messagePriority = 0;
|
||||
int status = mq_receive(id,reinterpret_cast<char*>(message->getBuffer()),message->MAX_MESSAGE_SIZE,&messagePriority);
|
||||
int status = mq_receive(id,reinterpret_cast<char*>(message->getBuffer()),
|
||||
message->MAX_MESSAGE_SIZE,&messagePriority);
|
||||
if (status > 0) {
|
||||
this->lastPartner = message->getSender();
|
||||
//Check size of incoming message.
|
||||
@ -114,7 +117,8 @@ ReturnValue_t MessageQueue::receiveMessage(MessageQueueMessage* message) {
|
||||
return MessageQueueIF::EMPTY;
|
||||
case EBADF:
|
||||
//mqdes doesn't represent a valid queue open for reading.
|
||||
error << "MessageQueue::receive: configuration error " << strerror(errno) << std::endl;
|
||||
sif::error << "MessageQueue::receive: configuration error "
|
||||
<< strerror(errno) << std::endl;
|
||||
/*NO BREAK*/
|
||||
case EINVAL:
|
||||
/*
|
||||
@ -123,7 +127,8 @@ ReturnValue_t MessageQueue::receiveMessage(MessageQueueMessage* message) {
|
||||
* * The number of bytes requested, msg_len is less than zero.
|
||||
* * msg_len is anything other than the mq_msgsize of the specified queue, and the QNX extended option MQ_READBUF_DYNAMIC hasn't been set in the queue's mq_flags.
|
||||
*/
|
||||
error << "MessageQueue::receive: configuration error " << strerror(errno) << std::endl;
|
||||
sif::error << "MessageQueue::receive: configuration error "
|
||||
<< strerror(errno) << std::endl;
|
||||
/*NO BREAK*/
|
||||
case EMSGSIZE:
|
||||
/*
|
||||
@ -131,7 +136,8 @@ ReturnValue_t MessageQueue::receiveMessage(MessageQueueMessage* message) {
|
||||
* * the QNX extended option MQ_READBUF_DYNAMIC hasn't been set, and the given msg_len is shorter than the mq_msgsize for the given queue.
|
||||
* * the extended option MQ_READBUF_DYNAMIC has been set, but the given msg_len is too short for the message that would have been received.
|
||||
*/
|
||||
error << "MessageQueue::receive: configuration error " << strerror(errno) << std::endl;
|
||||
sif::error << "MessageQueue::receive: configuration error "
|
||||
<< strerror(errno) << std::endl;
|
||||
/*NO BREAK*/
|
||||
case EINTR:
|
||||
//The operation was interrupted by a signal.
|
||||
@ -154,7 +160,8 @@ ReturnValue_t MessageQueue::flush(uint32_t* count) {
|
||||
switch(errno){
|
||||
case EBADF:
|
||||
//mqdes doesn't represent a valid message queue.
|
||||
error << "MessageQueue::flush configuration error, called flush with an invalid queue ID" << std::endl;
|
||||
sif::error << "MessageQueue::flush configuration error, "
|
||||
"called flush with an invalid queue ID" << std::endl;
|
||||
/*NO BREAK*/
|
||||
case EINVAL:
|
||||
//mq_attr is NULL
|
||||
@ -169,7 +176,8 @@ ReturnValue_t MessageQueue::flush(uint32_t* count) {
|
||||
switch(errno){
|
||||
case EBADF:
|
||||
//mqdes doesn't represent a valid message queue.
|
||||
error << "MessageQueue::flush configuration error, called flush with an invalid queue ID" << std::endl;
|
||||
sif::error << "MessageQueue::flush configuration error, "
|
||||
"called flush with an invalid queue ID" << std::endl;
|
||||
/*NO BREAK*/
|
||||
case EINVAL:
|
||||
/*
|
||||
@ -237,7 +245,9 @@ ReturnValue_t MessageQueue::sendMessageFromMessageQueue(MessageQueueId_t sendTo,
|
||||
return MessageQueueIF::FULL;
|
||||
case EBADF:
|
||||
//mq_des doesn't represent a valid message queue descriptor, or mq_des wasn't opened for writing.
|
||||
error << "MessageQueue::sendMessage: Configuration error " << strerror(errno) << " in mq_send mqSendTo: " << sendTo << " sent from " << sentFrom << std::endl;
|
||||
sif::error << "MessageQueue::sendMessage: Configuration error "
|
||||
<< strerror(errno) << " in mq_send mqSendTo: " << sendTo
|
||||
<< " sent from " << sentFrom << std::endl;
|
||||
/*NO BREAK*/
|
||||
case EINTR:
|
||||
//The call was interrupted by a signal.
|
||||
@ -248,9 +258,11 @@ ReturnValue_t MessageQueue::sendMessageFromMessageQueue(MessageQueueId_t sendTo,
|
||||
* * msg_len is negative.
|
||||
* * msg_prio is greater than MQ_PRIO_MAX.
|
||||
* * msg_prio is less than 0.
|
||||
* * MQ_PRIO_RESTRICT is set in the mq_attr of mq_des, and msg_prio is greater than the priority of the calling process.
|
||||
* * MQ_PRIO_RESTRICT is set in the mq_attr of mq_des,
|
||||
* and msg_prio is greater than the priority of the calling process.
|
||||
* */
|
||||
error << "MessageQueue::sendMessage: Configuration error " << strerror(errno) << " in mq_send" << std::endl;
|
||||
sif::error << "MessageQueue::sendMessage: Configuration error "
|
||||
<< strerror(errno) << " in mq_send" << std::endl;
|
||||
/*NO BREAK*/
|
||||
case EMSGSIZE:
|
||||
//The msg_len is greater than the msgsize associated with the specified queue.
|
||||
|
@ -13,22 +13,22 @@ Mutex::Mutex() {
|
||||
pthread_mutexattr_t mutexAttr;
|
||||
int status = pthread_mutexattr_init(&mutexAttr);
|
||||
if (status != 0) {
|
||||
error << "Mutex: Attribute init failed with: " << strerror(status) << std::endl;
|
||||
sif::error << "Mutex: Attribute init failed with: " << strerror(status) << std::endl;
|
||||
}
|
||||
status = pthread_mutexattr_setprotocol(&mutexAttr, PTHREAD_PRIO_INHERIT);
|
||||
if (status != 0) {
|
||||
error << "Mutex: Attribute set PRIO_INHERIT failed with: " << strerror(status)
|
||||
sif::error << "Mutex: Attribute set PRIO_INHERIT failed with: " << strerror(status)
|
||||
<< std::endl;
|
||||
}
|
||||
status = pthread_mutex_init(&mutex, &mutexAttr);
|
||||
if (status != 0) {
|
||||
error << "Mutex: creation with name, id " << mutex.__data.__count
|
||||
sif::error << "Mutex: creation with name, id " << mutex.__data.__count
|
||||
<< ", " << " failed with " << strerror(status) << std::endl;
|
||||
}
|
||||
//After a mutex attributes object has been used to initialize one or more mutexes, any function affecting the attributes object (including destruction) shall not affect any previously initialized mutexes.
|
||||
status = pthread_mutexattr_destroy(&mutexAttr);
|
||||
if (status != 0) {
|
||||
error << "Mutex: Attribute destroy failed with " << strerror(status) << std::endl;
|
||||
sif::error << "Mutex: Attribute destroy failed with " << strerror(status) << std::endl;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -56,9 +56,9 @@ void PeriodicPosixTask::taskFunctionality(void){
|
||||
char name[20] = {0};
|
||||
int status = pthread_getname_np(pthread_self(),name,sizeof(name));
|
||||
if(status==0){
|
||||
error << "ObjectTask: " << name << " Deadline missed." << std::endl;
|
||||
sif::error << "ObjectTask: " << name << " Deadline missed." << std::endl;
|
||||
}else{
|
||||
error << "ObjectTask: X Deadline missed. " << status << std::endl;
|
||||
sif::error << "ObjectTask: X Deadline missed. " << status << std::endl;
|
||||
}
|
||||
if (this->deadlineMissedFunc != NULL) {
|
||||
this->deadlineMissedFunc();
|
||||
|
@ -9,7 +9,8 @@
|
||||
|
||||
class PeriodicPosixTask: public PosixThread, public PeriodicTaskIF {
|
||||
public:
|
||||
PeriodicPosixTask(const char* name_, int priority_, size_t stackSize_, uint32_t period_, void(*deadlineMissedFunc_)());
|
||||
PeriodicPosixTask(const char* name_, int priority_, size_t stackSize_,
|
||||
uint32_t period_, void(*deadlineMissedFunc_)());
|
||||
virtual ~PeriodicPosixTask();
|
||||
/**
|
||||
* @brief The method to start the task.
|
||||
|
@ -22,7 +22,8 @@ ReturnValue_t PosixThread::sleep(uint64_t ns) {
|
||||
//The nanosleep() function was interrupted by a signal.
|
||||
return HasReturnvaluesIF::RETURN_FAILED;
|
||||
case EINVAL:
|
||||
//The rqtp argument specified a nanosecond value less than zero or greater than or equal to 1000 million.
|
||||
//The rqtp argument specified a nanosecond value less than zero or
|
||||
// greater than or equal to 1000 million.
|
||||
return HasReturnvaluesIF::RETURN_FAILED;
|
||||
default:
|
||||
return HasReturnvaluesIF::RETURN_FAILED;
|
||||
@ -40,8 +41,8 @@ void PosixThread::suspend() {
|
||||
sigaddset(&waitSignal, SIGUSR1);
|
||||
sigwait(&waitSignal, &caughtSig);
|
||||
if (caughtSig != SIGUSR1) {
|
||||
error << "FixedTimeslotTask: Unknown Signal received: " << caughtSig
|
||||
<< std::endl;
|
||||
sif::error << "FixedTimeslotTask: Unknown Signal received: " <<
|
||||
caughtSig << std::endl;
|
||||
}
|
||||
}
|
||||
|
||||
@ -112,14 +113,15 @@ uint64_t PosixThread::getCurrentMonotonicTimeMs(){
|
||||
return currentTime_ms;
|
||||
}
|
||||
|
||||
PosixThread::PosixThread(const char* name_, int priority_, size_t stackSize_):thread(0),priority(priority_),stackSize(stackSize_) {
|
||||
PosixThread::PosixThread(const char* name_, int priority_, size_t stackSize_):
|
||||
thread(0),priority(priority_),stackSize(stackSize_) {
|
||||
strcpy(name,name_);
|
||||
}
|
||||
|
||||
|
||||
|
||||
void PosixThread::createTask(void* (*fnc_)(void*), void* arg_) {
|
||||
debug << "PosixThread::createTask" << std::endl;
|
||||
//sif::debug << "PosixThread::createTask" << std::endl;
|
||||
/*
|
||||
* The attr argument points to a pthread_attr_t structure whose contents
|
||||
are used at thread creation time to determine attributes for the new
|
||||
@ -130,35 +132,41 @@ void PosixThread::createTask(void* (*fnc_)(void*), void* arg_) {
|
||||
pthread_attr_t attributes;
|
||||
int status = pthread_attr_init(&attributes);
|
||||
if(status != 0){
|
||||
error << "Posix Thread attribute init failed with: " << strerror(status) << std::endl;
|
||||
sif::error << "Posix Thread attribute init failed with: " <<
|
||||
strerror(status) << std::endl;
|
||||
}
|
||||
void* sp;
|
||||
status = posix_memalign(&sp, sysconf(_SC_PAGESIZE), stackSize);
|
||||
if(status != 0){
|
||||
error << "Posix Thread stack init failed with: " << strerror(status) << std::endl;
|
||||
sif::error << "Posix Thread stack init failed with: " <<
|
||||
strerror(status) << std::endl;
|
||||
}
|
||||
|
||||
status = pthread_attr_setstack(&attributes, sp, stackSize);
|
||||
if(status != 0){
|
||||
error << "Posix Thread attribute setStack failed with: " << strerror(status) << std::endl;
|
||||
sif::error << "Posix Thread attribute setStack failed with: " <<
|
||||
strerror(status) << std::endl;
|
||||
}
|
||||
|
||||
status = pthread_attr_setinheritsched(&attributes, PTHREAD_EXPLICIT_SCHED);
|
||||
if(status != 0){
|
||||
error << "Posix Thread attribute setinheritsched failed with: " << strerror(status) << std::endl;
|
||||
sif::error << "Posix Thread attribute setinheritsched failed with: " <<
|
||||
strerror(status) << std::endl;
|
||||
}
|
||||
|
||||
//TODO FIFO -> This needs root privileges for the process
|
||||
status = pthread_attr_setschedpolicy(&attributes,SCHED_FIFO);
|
||||
if(status != 0){
|
||||
error << "Posix Thread attribute schedule policy failed with: " << strerror(status) << std::endl;
|
||||
sif::error << "Posix Thread attribute schedule policy failed with: " <<
|
||||
strerror(status) << std::endl;
|
||||
}
|
||||
|
||||
sched_param scheduleParams;
|
||||
scheduleParams.__sched_priority = priority;
|
||||
status = pthread_attr_setschedparam(&attributes, &scheduleParams);
|
||||
if(status != 0){
|
||||
error << "Posix Thread attribute schedule params failed with: " << strerror(status) << std::endl;
|
||||
sif::error << "Posix Thread attribute schedule params failed with: " <<
|
||||
strerror(status) << std::endl;
|
||||
}
|
||||
|
||||
//Set Signal Mask for suspend until startTask is called
|
||||
@ -167,22 +175,26 @@ void PosixThread::createTask(void* (*fnc_)(void*), void* arg_) {
|
||||
sigaddset(&waitSignal, SIGUSR1);
|
||||
status = pthread_sigmask(SIG_BLOCK, &waitSignal, NULL);
|
||||
if(status != 0){
|
||||
error << "Posix Thread sigmask failed failed with: " << strerror(status) << " errno: " << strerror(errno) << std::endl;
|
||||
sif::error << "Posix Thread sigmask failed failed with: " <<
|
||||
strerror(status) << " errno: " << strerror(errno) << std::endl;
|
||||
}
|
||||
|
||||
|
||||
status = pthread_create(&thread,&attributes,fnc_,arg_);
|
||||
if(status != 0){
|
||||
error << "Posix Thread create failed with: " << strerror(status) << std::endl;
|
||||
sif::error << "Posix Thread create failed with: " <<
|
||||
strerror(status) << std::endl;
|
||||
}
|
||||
|
||||
status = pthread_setname_np(thread,name);
|
||||
if(status != 0){
|
||||
error << "Posix Thread setname failed with: " << strerror(status) << std::endl;
|
||||
sif::error << "Posix Thread setname failed with: " <<
|
||||
strerror(status) << std::endl;
|
||||
}
|
||||
|
||||
status = pthread_attr_destroy(&attributes);
|
||||
if(status!=0){
|
||||
error << "Posix Thread attribute destroy failed with: " << strerror(status) << std::endl;
|
||||
sif::error << "Posix Thread attribute destroy failed with: " <<
|
||||
strerror(status) << std::endl;
|
||||
}
|
||||
}
|
||||
|
@ -9,7 +9,8 @@ Timer::Timer() {
|
||||
sigEvent.sigev_value.sival_ptr = &timerId;
|
||||
int status = timer_create(CLOCK_MONOTONIC, &sigEvent, &timerId);
|
||||
if(status!=0){
|
||||
error << "Timer creation failed with: " << status << " errno: " << errno << std::endl;
|
||||
sif::error << "Timer creation failed with: " << status <<
|
||||
" errno: " << errno << std::endl;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -66,10 +66,16 @@ ReturnValue_t PollingTask::startTask() {
|
||||
}
|
||||
}
|
||||
|
||||
ReturnValue_t PollingTask::addSlot(object_id_t componentId, uint32_t slotTimeMs,
|
||||
int8_t executionStep) {
|
||||
ReturnValue_t PollingTask::addSlot(object_id_t componentId,
|
||||
uint32_t slotTimeMs, int8_t executionStep) {
|
||||
if (objectManager->get<ExecutableObjectIF>(componentId) != nullptr) {
|
||||
pst.addSlot(componentId, slotTimeMs, executionStep, this);
|
||||
return HasReturnvaluesIF::RETURN_OK;
|
||||
}
|
||||
|
||||
error << "Component " << std::hex << componentId <<
|
||||
" not found, not adding it to pst" << std::endl;
|
||||
return HasReturnvaluesIF::RETURN_FAILED;
|
||||
}
|
||||
|
||||
uint32_t PollingTask::getPeriodMs() const {
|
||||
|
@ -37,7 +37,7 @@ ReturnValue_t ParameterHelper::handleParameterMessage(CommandMessage *message) {
|
||||
ParameterMessage::getParameterId(message));
|
||||
|
||||
const uint8_t *storedStream;
|
||||
uint32_t storedStreamSize;
|
||||
size_t storedStreamSize;
|
||||
result = storage->getData(
|
||||
ParameterMessage::getStoreId(message), &storedStream,
|
||||
&storedStreamSize);
|
||||
|
@ -91,7 +91,7 @@ template<typename T>
|
||||
ReturnValue_t ParameterWrapper::serializeData(uint8_t** buffer, uint32_t* size,
|
||||
const uint32_t max_size, bool bigEndian) const {
|
||||
const T *element = (const T*) readonlyData;
|
||||
ReturnValue_t result;
|
||||
ReturnValue_t result = HasReturnvaluesIF::RETURN_OK;
|
||||
uint16_t dataSize = columns * rows;
|
||||
while (dataSize != 0) {
|
||||
result = SerializeAdapter<T>::serialize(element, buffer, size, max_size,
|
||||
|
@ -7,11 +7,15 @@
|
||||
#include <sstream>
|
||||
#include <cstdio>
|
||||
|
||||
//Unfortunately, there must be a forward declaration of log_fe (MUST be defined in main), to let the system know where to write to.
|
||||
// Unfortunately, there must be a forward declaration of log_fe
|
||||
// (MUST be defined in main), to let the system know where to write to.
|
||||
namespace sif {
|
||||
extern std::ostream debug;
|
||||
extern std::ostream info;
|
||||
extern std::ostream warning;
|
||||
extern std::ostream error;
|
||||
}
|
||||
|
||||
|
||||
class ServiceInterfaceStream : public std::basic_ostream< char, std::char_traits< char > > {
|
||||
protected:
|
||||
|
@ -1,14 +1,11 @@
|
||||
#ifndef FRAMEWORK_STORAGEMANAGER_LOCALPOOL_H_
|
||||
#define FRAMEWORK_STORAGEMANAGER_LOCALPOOL_H_
|
||||
|
||||
/**
|
||||
* @file LocalPool
|
||||
*
|
||||
* @date 02.02.2012
|
||||
* @author Bastian Baetz
|
||||
*
|
||||
* @brief This file contains the definition of the LocalPool class.
|
||||
*/
|
||||
#ifndef FRAMEWORK_STORAGEMANAGER_LOCALPOOL_H_
|
||||
#define FRAMEWORK_STORAGEMANAGER_LOCALPOOL_H_
|
||||
|
||||
#include <framework/objectmanager/SystemObject.h>
|
||||
#include <framework/serviceinterface/ServiceInterfaceStream.h>
|
||||
@ -20,7 +17,7 @@
|
||||
/**
|
||||
* @brief The LocalPool class provides an intermediate data storage with
|
||||
* a fixed pool size policy.
|
||||
* \details The class implements the StorageManagerIF interface. While the
|
||||
* @details The class implements the StorageManagerIF interface. While the
|
||||
* total number of pools is fixed, the element sizes in one pool and
|
||||
* the number of pool elements per pool are set on construction.
|
||||
* The full amount of memory is allocated on construction.
|
||||
@ -31,7 +28,6 @@
|
||||
* It is possible to store empty packets in the pool.
|
||||
* The local pool is NOT thread-safe.
|
||||
*/
|
||||
|
||||
template<uint8_t NUMBER_OF_POOLS = 5>
|
||||
class LocalPool: public SystemObject, public StorageManagerIF {
|
||||
public:
|
||||
@ -39,7 +35,65 @@ public:
|
||||
* @brief This definition generally sets the number of different sized pools.
|
||||
* @details This must be less than the maximum number of pools (currently 0xff).
|
||||
*/
|
||||
// static const uint32_t NUMBER_OF_POOLS;
|
||||
// static const uint32_t NUMBER_OF_POOLS;
|
||||
/**
|
||||
* @brief This is the default constructor for a pool manager instance.
|
||||
* @details By passing two arrays of size NUMBER_OF_POOLS, the constructor
|
||||
* allocates memory (with \c new) for store and size_list. These
|
||||
* regions are all set to zero on start up.
|
||||
* @param setObjectId The object identifier to be set. This allows for
|
||||
* multiple instances of LocalPool in the system.
|
||||
* @param element_sizes An array of size NUMBER_OF_POOLS in which the size
|
||||
* of a single element in each pool is determined.
|
||||
* <b>The sizes must be provided in ascending order.
|
||||
* </b>
|
||||
* @param n_elements An array of size NUMBER_OF_POOLS in which the
|
||||
* number of elements for each pool is determined.
|
||||
* The position of these values correspond to those in
|
||||
* element_sizes.
|
||||
* @param registered Register the pool in object manager or not.
|
||||
* Default is false (local pool).
|
||||
* @param spillsToHigherPools A variable to determine whether
|
||||
* higher n pools are used if the store is full.
|
||||
*/
|
||||
LocalPool(object_id_t setObjectId,
|
||||
const uint16_t element_sizes[NUMBER_OF_POOLS],
|
||||
const uint16_t n_elements[NUMBER_OF_POOLS],
|
||||
bool registered = false,
|
||||
bool spillsToHigherPools = false);
|
||||
/**
|
||||
* @brief In the LocalPool's destructor all allocated memory is freed.
|
||||
*/
|
||||
virtual ~LocalPool(void);
|
||||
|
||||
/**
|
||||
* Documentation: See StorageManagerIF.h
|
||||
*/
|
||||
ReturnValue_t addData(store_address_t* storageId, const uint8_t * data,
|
||||
size_t size, bool ignoreFault = false) override;
|
||||
ReturnValue_t getFreeElement(store_address_t* storageId,const size_t size,
|
||||
uint8_t** p_data, bool ignoreFault = false) override;
|
||||
ReturnValue_t getData(store_address_t packet_id, const uint8_t** packet_ptr,
|
||||
size_t * size) override;
|
||||
ReturnValue_t modifyData(store_address_t packet_id, uint8_t** packet_ptr,
|
||||
size_t * size) override;
|
||||
virtual ReturnValue_t deleteData(store_address_t) override;
|
||||
virtual ReturnValue_t deleteData(uint8_t* ptr, size_t size,
|
||||
store_address_t* storeId = NULL) override;
|
||||
void clearStore() override;
|
||||
ReturnValue_t initialize() override;
|
||||
protected:
|
||||
/**
|
||||
* With this helper method, a free element of \c size is reserved.
|
||||
* @param size The minimum packet size that shall be reserved.
|
||||
* @param[out] address Storage ID of the reserved data.
|
||||
* @return - #RETURN_OK on success,
|
||||
* - the return codes of #getPoolIndex or #findEmpty otherwise.
|
||||
*/
|
||||
virtual ReturnValue_t reserveSpace(const uint32_t size,
|
||||
store_address_t* address, bool ignoreFault);
|
||||
|
||||
InternalErrorReporterIF *internalErrorReporter;
|
||||
private:
|
||||
/**
|
||||
* Indicates that this element is free.
|
||||
@ -60,7 +114,7 @@ private:
|
||||
/**
|
||||
* @brief store represents the actual memory pool.
|
||||
* @details It is an array of pointers to memory, which was allocated with
|
||||
* a \c new call on construction.
|
||||
* a @c new call on construction.
|
||||
*/
|
||||
uint8_t* store[NUMBER_OF_POOLS];
|
||||
/**
|
||||
@ -78,7 +132,7 @@ private:
|
||||
* @param data The data to be stored.
|
||||
* @param size The size of the data to be stored.
|
||||
*/
|
||||
void write(store_address_t packet_id, const uint8_t* data, uint32_t size);
|
||||
void write(store_address_t packet_id, const uint8_t* data, size_t size);
|
||||
/**
|
||||
* @brief A helper method to read the element size of a certain pool.
|
||||
* @param pool_index The pool in which to look.
|
||||
@ -101,7 +155,8 @@ private:
|
||||
* @return - #RETURN_OK on success,
|
||||
* - #DATA_TOO_LARGE otherwise.
|
||||
*/
|
||||
ReturnValue_t getPoolIndex(uint32_t packet_size, uint16_t* poolIndex, uint16_t startAtIndex = 0);
|
||||
ReturnValue_t getPoolIndex(size_t packet_size, uint16_t* poolIndex,
|
||||
uint16_t startAtIndex = 0);
|
||||
/**
|
||||
* @brief This helper method calculates the true array position in store
|
||||
* of a given packet id.
|
||||
@ -121,310 +176,8 @@ private:
|
||||
* - #DATA_STORAGE_FULL if the store is full
|
||||
*/
|
||||
ReturnValue_t findEmpty(uint16_t pool_index, uint16_t* element);
|
||||
protected:
|
||||
/**
|
||||
* With this helper method, a free element of \c size is reserved.
|
||||
* @param size The minimum packet size that shall be reserved.
|
||||
* @param[out] address Storage ID of the reserved data.
|
||||
* @return - #RETURN_OK on success,
|
||||
* - the return codes of #getPoolIndex or #findEmpty otherwise.
|
||||
*/
|
||||
virtual ReturnValue_t reserveSpace(const uint32_t size, store_address_t* address, bool ignoreFault);
|
||||
|
||||
InternalErrorReporterIF *internalErrorReporter;
|
||||
public:
|
||||
/**
|
||||
* @brief This is the default constructor for a pool manager instance.
|
||||
* @details By passing two arrays of size NUMBER_OF_POOLS, the constructor
|
||||
* allocates memory (with \c new) for store and size_list. These
|
||||
* regions are all set to zero on start up.
|
||||
* @param setObjectId The object identifier to be set. This allows for
|
||||
* multiple instances of LocalPool in the system.
|
||||
* @param element_sizes An array of size NUMBER_OF_POOLS in which the size
|
||||
* of a single element in each pool is determined.
|
||||
* <b>The sizes must be provided in ascending order.
|
||||
* </b>
|
||||
* @param n_elements An array of size NUMBER_OF_POOLS in which the
|
||||
* number of elements for each pool is determined.
|
||||
* The position of these values correspond to those in
|
||||
* element_sizes.
|
||||
* @param registered Register the pool in object manager or not. Default is false (local pool).
|
||||
*/
|
||||
LocalPool(object_id_t setObjectId,
|
||||
const uint16_t element_sizes[NUMBER_OF_POOLS],
|
||||
const uint16_t n_elements[NUMBER_OF_POOLS],
|
||||
bool registered = false,
|
||||
bool spillsToHigherPools = false);
|
||||
/**
|
||||
* @brief In the LocalPool's destructor all allocated memory is freed.
|
||||
*/
|
||||
virtual ~LocalPool(void);
|
||||
ReturnValue_t addData(store_address_t* storageId, const uint8_t * data,
|
||||
uint32_t size, bool ignoreFault = false);
|
||||
|
||||
/**
|
||||
* With this helper method, a free element of \c size is reserved.
|
||||
*
|
||||
* @param size The minimum packet size that shall be reserved.
|
||||
* @return Returns the storage identifier within the storage or
|
||||
* StorageManagerIF::INVALID_ADDRESS (in raw).
|
||||
*/
|
||||
ReturnValue_t getFreeElement(store_address_t* storageId,
|
||||
const uint32_t size, uint8_t** p_data, bool ignoreFault = false);
|
||||
ReturnValue_t getData(store_address_t packet_id, const uint8_t** packet_ptr,
|
||||
uint32_t* size);
|
||||
ReturnValue_t modifyData(store_address_t packet_id, uint8_t** packet_ptr,
|
||||
uint32_t* size);
|
||||
virtual ReturnValue_t deleteData(store_address_t);
|
||||
virtual ReturnValue_t deleteData(uint8_t* ptr, uint32_t size,
|
||||
store_address_t* storeId = NULL);
|
||||
void clearStore();
|
||||
ReturnValue_t initialize();
|
||||
};
|
||||
|
||||
template<uint8_t NUMBER_OF_POOLS>
|
||||
inline ReturnValue_t LocalPool<NUMBER_OF_POOLS>::findEmpty(uint16_t pool_index,
|
||||
uint16_t* element) {
|
||||
ReturnValue_t status = DATA_STORAGE_FULL;
|
||||
for (uint16_t foundElement = 0; foundElement < n_elements[pool_index];
|
||||
foundElement++) {
|
||||
if (size_list[pool_index][foundElement] == STORAGE_FREE) {
|
||||
*element = foundElement;
|
||||
status = RETURN_OK;
|
||||
break;
|
||||
}
|
||||
}
|
||||
return status;
|
||||
}
|
||||
|
||||
template<uint8_t NUMBER_OF_POOLS>
|
||||
inline void LocalPool<NUMBER_OF_POOLS>::write(store_address_t packet_id,
|
||||
const uint8_t* data, uint32_t size) {
|
||||
uint8_t* ptr;
|
||||
uint32_t packet_position = getRawPosition(packet_id);
|
||||
|
||||
//check size? -> Not necessary, because size is checked before calling this function.
|
||||
ptr = &store[packet_id.pool_index][packet_position];
|
||||
memcpy(ptr, data, size);
|
||||
size_list[packet_id.pool_index][packet_id.packet_index] = size;
|
||||
}
|
||||
|
||||
//Returns page size of 0 in case store_index is illegal
|
||||
template<uint8_t NUMBER_OF_POOLS>
|
||||
inline uint32_t LocalPool<NUMBER_OF_POOLS>::getPageSize(uint16_t pool_index) {
|
||||
if (pool_index < NUMBER_OF_POOLS) {
|
||||
return element_sizes[pool_index];
|
||||
} else {
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
||||
template<uint8_t NUMBER_OF_POOLS>
|
||||
inline ReturnValue_t LocalPool<NUMBER_OF_POOLS>::getPoolIndex(
|
||||
uint32_t packet_size, uint16_t* poolIndex, uint16_t startAtIndex) {
|
||||
for (uint16_t n = startAtIndex; n < NUMBER_OF_POOLS; n++) {
|
||||
// debug << "LocalPool " << getObjectId() << "::getPoolIndex: Pool: " << n << ", Element Size: " << element_sizes[n] << std::endl;
|
||||
if (element_sizes[n] >= packet_size) {
|
||||
*poolIndex = n;
|
||||
return RETURN_OK;
|
||||
}
|
||||
}
|
||||
return DATA_TOO_LARGE;
|
||||
}
|
||||
|
||||
template<uint8_t NUMBER_OF_POOLS>
|
||||
inline uint32_t LocalPool<NUMBER_OF_POOLS>::getRawPosition(
|
||||
store_address_t packet_id) {
|
||||
return packet_id.packet_index * element_sizes[packet_id.pool_index];
|
||||
}
|
||||
|
||||
template<uint8_t NUMBER_OF_POOLS>
|
||||
inline ReturnValue_t LocalPool<NUMBER_OF_POOLS>::reserveSpace(
|
||||
const uint32_t size, store_address_t* address, bool ignoreFault) {
|
||||
ReturnValue_t status = getPoolIndex(size, &address->pool_index);
|
||||
if (status != RETURN_OK) {
|
||||
error << "LocalPool( " << std::hex << getObjectId() << std::dec
|
||||
<< " )::reserveSpace: Packet too large." << std::endl;
|
||||
return status;
|
||||
}
|
||||
status = findEmpty(address->pool_index, &address->packet_index);
|
||||
while (status != RETURN_OK && spillsToHigherPools) {
|
||||
status = getPoolIndex(size, &address->pool_index, address->pool_index + 1);
|
||||
if (status != RETURN_OK) {
|
||||
//We don't find any fitting pool anymore.
|
||||
break;
|
||||
}
|
||||
status = findEmpty(address->pool_index, &address->packet_index);
|
||||
}
|
||||
if (status == RETURN_OK) {
|
||||
// if (getObjectId() == objects::IPC_STORE && address->pool_index >= 3) {
|
||||
// debug << "Reserve: Pool: " << std::dec << address->pool_index << " Index: " << address->packet_index << std::endl;
|
||||
// }
|
||||
|
||||
size_list[address->pool_index][address->packet_index] = size;
|
||||
} else {
|
||||
if (!ignoreFault) {
|
||||
internalErrorReporter->storeFull();
|
||||
}
|
||||
// error << "LocalPool( " << std::hex << getObjectId() << std::dec
|
||||
// << " )::reserveSpace: Packet store is full." << std::endl;
|
||||
}
|
||||
return status;
|
||||
}
|
||||
|
||||
template<uint8_t NUMBER_OF_POOLS>
|
||||
inline LocalPool<NUMBER_OF_POOLS>::LocalPool(object_id_t setObjectId,
|
||||
const uint16_t element_sizes[NUMBER_OF_POOLS],
|
||||
const uint16_t n_elements[NUMBER_OF_POOLS], bool registered, bool spillsToHigherPools) :
|
||||
SystemObject(setObjectId, registered), spillsToHigherPools(spillsToHigherPools), internalErrorReporter(NULL) {
|
||||
for (uint16_t n = 0; n < NUMBER_OF_POOLS; n++) {
|
||||
this->element_sizes[n] = element_sizes[n];
|
||||
this->n_elements[n] = n_elements[n];
|
||||
store[n] = new uint8_t[n_elements[n] * element_sizes[n]];
|
||||
size_list[n] = new uint32_t[n_elements[n]];
|
||||
memset(store[n], 0x00, (n_elements[n] * element_sizes[n]));
|
||||
memset(size_list[n], STORAGE_FREE, (n_elements[n] * sizeof(**size_list))); //TODO checkme
|
||||
}
|
||||
}
|
||||
|
||||
template<uint8_t NUMBER_OF_POOLS>
|
||||
inline LocalPool<NUMBER_OF_POOLS>::~LocalPool(void) {
|
||||
for (uint16_t n = 0; n < NUMBER_OF_POOLS; n++) {
|
||||
delete[] store[n];
|
||||
delete[] size_list[n];
|
||||
}
|
||||
}
|
||||
|
||||
template<uint8_t NUMBER_OF_POOLS>
|
||||
inline ReturnValue_t LocalPool<NUMBER_OF_POOLS>::addData(
|
||||
store_address_t* storageId, const uint8_t* data, uint32_t size, bool ignoreFault) {
|
||||
ReturnValue_t status = reserveSpace(size, storageId, ignoreFault);
|
||||
if (status == RETURN_OK) {
|
||||
write(*storageId, data, size);
|
||||
}
|
||||
return status;
|
||||
}
|
||||
|
||||
template<uint8_t NUMBER_OF_POOLS>
|
||||
inline ReturnValue_t LocalPool<NUMBER_OF_POOLS>::getFreeElement(
|
||||
store_address_t* storageId, const uint32_t size, uint8_t** p_data, bool ignoreFault) {
|
||||
ReturnValue_t status = reserveSpace(size, storageId, ignoreFault);
|
||||
if (status == RETURN_OK) {
|
||||
*p_data = &store[storageId->pool_index][getRawPosition(*storageId)];
|
||||
} else {
|
||||
*p_data = NULL;
|
||||
}
|
||||
return status;
|
||||
}
|
||||
|
||||
template<uint8_t NUMBER_OF_POOLS>
|
||||
inline ReturnValue_t LocalPool<NUMBER_OF_POOLS>::getData(
|
||||
store_address_t packet_id, const uint8_t** packet_ptr, uint32_t* size) {
|
||||
uint8_t* tempData = NULL;
|
||||
ReturnValue_t status = modifyData(packet_id, &tempData, size);
|
||||
*packet_ptr = tempData;
|
||||
return status;
|
||||
}
|
||||
|
||||
template<uint8_t NUMBER_OF_POOLS>
|
||||
inline ReturnValue_t LocalPool<NUMBER_OF_POOLS>::modifyData(store_address_t packet_id,
|
||||
uint8_t** packet_ptr, uint32_t* size) {
|
||||
ReturnValue_t status = RETURN_FAILED;
|
||||
if (packet_id.pool_index >= NUMBER_OF_POOLS) {
|
||||
return ILLEGAL_STORAGE_ID;
|
||||
}
|
||||
if ((packet_id.packet_index >= n_elements[packet_id.pool_index])) {
|
||||
return ILLEGAL_STORAGE_ID;
|
||||
}
|
||||
if (size_list[packet_id.pool_index][packet_id.packet_index]
|
||||
!= STORAGE_FREE) {
|
||||
uint32_t packet_position = getRawPosition(packet_id);
|
||||
*packet_ptr = &store[packet_id.pool_index][packet_position];
|
||||
*size = size_list[packet_id.pool_index][packet_id.packet_index];
|
||||
status = RETURN_OK;
|
||||
} else {
|
||||
status = DATA_DOES_NOT_EXIST;
|
||||
}
|
||||
return status;
|
||||
}
|
||||
|
||||
template<uint8_t NUMBER_OF_POOLS>
|
||||
inline ReturnValue_t LocalPool<NUMBER_OF_POOLS>::deleteData(
|
||||
store_address_t packet_id) {
|
||||
|
||||
// if (getObjectId() == objects::IPC_STORE && packet_id.pool_index >= 3) {
|
||||
// debug << "Delete: Pool: " << std::dec << packet_id.pool_index << " Index: " << packet_id.packet_index << std::endl;
|
||||
// }
|
||||
ReturnValue_t status = RETURN_OK;
|
||||
uint32_t page_size = getPageSize(packet_id.pool_index);
|
||||
if ((page_size != 0)
|
||||
&& (packet_id.packet_index < n_elements[packet_id.pool_index])) {
|
||||
uint16_t packet_position = getRawPosition(packet_id);
|
||||
uint8_t* ptr = &store[packet_id.pool_index][packet_position];
|
||||
memset(ptr, 0, page_size);
|
||||
//Set free list
|
||||
size_list[packet_id.pool_index][packet_id.packet_index] = STORAGE_FREE;
|
||||
} else {
|
||||
//pool_index or packet_index is too large
|
||||
error << "LocalPool:deleteData failed." << std::endl;
|
||||
status = ILLEGAL_STORAGE_ID;
|
||||
}
|
||||
return status;
|
||||
}
|
||||
|
||||
template<uint8_t NUMBER_OF_POOLS>
|
||||
inline void LocalPool<NUMBER_OF_POOLS>::clearStore() {
|
||||
for (uint16_t n = 0; n < NUMBER_OF_POOLS; n++) {
|
||||
memset(size_list[n], STORAGE_FREE, (n_elements[n] * sizeof(**size_list)));//TODO checkme
|
||||
}
|
||||
}
|
||||
|
||||
template<uint8_t NUMBER_OF_POOLS>
|
||||
inline ReturnValue_t LocalPool<NUMBER_OF_POOLS>::deleteData(uint8_t* ptr,
|
||||
uint32_t size, store_address_t* storeId) {
|
||||
store_address_t localId;
|
||||
ReturnValue_t result = ILLEGAL_ADDRESS;
|
||||
for (uint16_t n = 0; n < NUMBER_OF_POOLS; n++) {
|
||||
//Not sure if new allocates all stores in order. so better be careful.
|
||||
if ((store[n] <= ptr) && (&store[n][n_elements[n]*element_sizes[n]]) > ptr) {
|
||||
localId.pool_index = n;
|
||||
uint32_t deltaAddress = ptr - store[n];
|
||||
//Getting any data from the right "block" is ok. This is necessary, as IF's sometimes don't point to the first element of an object.
|
||||
localId.packet_index = deltaAddress / element_sizes[n];
|
||||
result = deleteData(localId);
|
||||
// if (deltaAddress % element_sizes[n] != 0) {
|
||||
// error << "Pool::deleteData: address not aligned!" << std::endl;
|
||||
// }
|
||||
break;
|
||||
}
|
||||
}
|
||||
if (storeId != NULL) {
|
||||
*storeId = localId;
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
template<uint8_t NUMBER_OF_POOLS>
|
||||
inline ReturnValue_t LocalPool<NUMBER_OF_POOLS>::initialize() {
|
||||
ReturnValue_t result = SystemObject::initialize();
|
||||
if (result != RETURN_OK) {
|
||||
return result;
|
||||
}
|
||||
internalErrorReporter = objectManager->get<InternalErrorReporterIF>(objects::INTERNAL_ERROR_REPORTER);
|
||||
if (internalErrorReporter == NULL){
|
||||
return RETURN_FAILED;
|
||||
}
|
||||
|
||||
//Check if any pool size is large than the maximum allowed.
|
||||
for (uint8_t count = 0; count < NUMBER_OF_POOLS; count++) {
|
||||
if (element_sizes[count] >= STORAGE_FREE) {
|
||||
error
|
||||
<< "LocalPool::initialize: Pool is too large! Max. allowed size is: "
|
||||
<< (STORAGE_FREE - 1) << std::endl;
|
||||
return RETURN_FAILED;
|
||||
}
|
||||
}
|
||||
return RETURN_OK;
|
||||
}
|
||||
#include <framework/storagemanager/LocalPool.tpp>
|
||||
|
||||
#endif /* FRAMEWORK_STORAGEMANAGER_LOCALPOOL_H_ */
|
||||
|
260
storagemanager/LocalPool.tpp
Normal file
260
storagemanager/LocalPool.tpp
Normal file
@ -0,0 +1,260 @@
|
||||
#ifndef FRAMEWORK_STORAGEMANAGER_LOCALPOOL_TPP_
|
||||
#define FRAMEWORK_STORAGEMANAGER_LOCALPOOL_TPP_
|
||||
|
||||
template<uint8_t NUMBER_OF_POOLS>
|
||||
inline LocalPool<NUMBER_OF_POOLS>::LocalPool(object_id_t setObjectId,
|
||||
const uint16_t element_sizes[NUMBER_OF_POOLS],
|
||||
const uint16_t n_elements[NUMBER_OF_POOLS], bool registered,
|
||||
bool spillsToHigherPools) :
|
||||
SystemObject(setObjectId, registered), internalErrorReporter(nullptr),
|
||||
spillsToHigherPools(spillsToHigherPools)
|
||||
{
|
||||
for (uint16_t n = 0; n < NUMBER_OF_POOLS; n++) {
|
||||
this->element_sizes[n] = element_sizes[n];
|
||||
this->n_elements[n] = n_elements[n];
|
||||
store[n] = new uint8_t[n_elements[n] * element_sizes[n]];
|
||||
size_list[n] = new uint32_t[n_elements[n]];
|
||||
memset(store[n], 0x00, (n_elements[n] * element_sizes[n]));
|
||||
//TODO checkme
|
||||
memset(size_list[n], STORAGE_FREE, (n_elements[n] * sizeof(**size_list)));
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
template<uint8_t NUMBER_OF_POOLS>
|
||||
inline ReturnValue_t LocalPool<NUMBER_OF_POOLS>::findEmpty(uint16_t pool_index,
|
||||
uint16_t* element) {
|
||||
ReturnValue_t status = DATA_STORAGE_FULL;
|
||||
for (uint16_t foundElement = 0; foundElement < n_elements[pool_index];
|
||||
foundElement++) {
|
||||
if (size_list[pool_index][foundElement] == STORAGE_FREE) {
|
||||
*element = foundElement;
|
||||
status = RETURN_OK;
|
||||
break;
|
||||
}
|
||||
}
|
||||
return status;
|
||||
}
|
||||
|
||||
template<uint8_t NUMBER_OF_POOLS>
|
||||
inline void LocalPool<NUMBER_OF_POOLS>::write(store_address_t packet_id,
|
||||
const uint8_t* data, size_t size) {
|
||||
uint8_t* ptr;
|
||||
uint32_t packet_position = getRawPosition(packet_id);
|
||||
|
||||
//check size? -> Not necessary, because size is checked before calling this function.
|
||||
ptr = &store[packet_id.pool_index][packet_position];
|
||||
memcpy(ptr, data, size);
|
||||
size_list[packet_id.pool_index][packet_id.packet_index] = size;
|
||||
}
|
||||
|
||||
//Returns page size of 0 in case store_index is illegal
|
||||
template<uint8_t NUMBER_OF_POOLS>
|
||||
inline uint32_t LocalPool<NUMBER_OF_POOLS>::getPageSize(uint16_t pool_index) {
|
||||
if (pool_index < NUMBER_OF_POOLS) {
|
||||
return element_sizes[pool_index];
|
||||
} else {
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
||||
template<uint8_t NUMBER_OF_POOLS>
|
||||
inline ReturnValue_t LocalPool<NUMBER_OF_POOLS>::getPoolIndex(
|
||||
size_t packet_size, uint16_t* poolIndex, uint16_t startAtIndex) {
|
||||
for (uint16_t n = startAtIndex; n < NUMBER_OF_POOLS; n++) {
|
||||
//debug << "LocalPool " << getObjectId() << "::getPoolIndex: Pool: " <<
|
||||
// n << ", Element Size: " << element_sizes[n] << std::endl;
|
||||
if (element_sizes[n] >= packet_size) {
|
||||
*poolIndex = n;
|
||||
return RETURN_OK;
|
||||
}
|
||||
}
|
||||
return DATA_TOO_LARGE;
|
||||
}
|
||||
|
||||
template<uint8_t NUMBER_OF_POOLS>
|
||||
inline uint32_t LocalPool<NUMBER_OF_POOLS>::getRawPosition(
|
||||
store_address_t packet_id) {
|
||||
return packet_id.packet_index * element_sizes[packet_id.pool_index];
|
||||
}
|
||||
|
||||
template<uint8_t NUMBER_OF_POOLS>
|
||||
inline ReturnValue_t LocalPool<NUMBER_OF_POOLS>::reserveSpace(
|
||||
const uint32_t size, store_address_t* address, bool ignoreFault) {
|
||||
ReturnValue_t status = getPoolIndex(size, &address->pool_index);
|
||||
if (status != RETURN_OK) {
|
||||
sif::error << "LocalPool( " << std::hex << getObjectId() << std::dec
|
||||
<< " )::reserveSpace: Packet too large." << std::endl;
|
||||
return status;
|
||||
}
|
||||
status = findEmpty(address->pool_index, &address->packet_index);
|
||||
while (status != RETURN_OK && spillsToHigherPools) {
|
||||
status = getPoolIndex(size, &address->pool_index, address->pool_index + 1);
|
||||
if (status != RETURN_OK) {
|
||||
//We don't find any fitting pool anymore.
|
||||
break;
|
||||
}
|
||||
status = findEmpty(address->pool_index, &address->packet_index);
|
||||
}
|
||||
if (status == RETURN_OK) {
|
||||
// if (getObjectId() == objects::IPC_STORE && address->pool_index >= 3) {
|
||||
// debug << "Reserve: Pool: " << std::dec << address->pool_index <<
|
||||
// " Index: " << address->packet_index << std::endl;
|
||||
// }
|
||||
|
||||
size_list[address->pool_index][address->packet_index] = size;
|
||||
} else {
|
||||
if (!ignoreFault and internalErrorReporter != nullptr) {
|
||||
internalErrorReporter->storeFull();
|
||||
}
|
||||
// error << "LocalPool( " << std::hex << getObjectId() << std::dec
|
||||
// << " )::reserveSpace: Packet store is full." << std::endl;
|
||||
}
|
||||
return status;
|
||||
}
|
||||
|
||||
template<uint8_t NUMBER_OF_POOLS>
|
||||
inline LocalPool<NUMBER_OF_POOLS>::~LocalPool(void) {
|
||||
for (uint16_t n = 0; n < NUMBER_OF_POOLS; n++) {
|
||||
delete[] store[n];
|
||||
delete[] size_list[n];
|
||||
}
|
||||
}
|
||||
|
||||
template<uint8_t NUMBER_OF_POOLS>
|
||||
inline ReturnValue_t LocalPool<NUMBER_OF_POOLS>::addData(store_address_t* storageId,
|
||||
const uint8_t* data, size_t size, bool ignoreFault) {
|
||||
ReturnValue_t status = reserveSpace(size, storageId, ignoreFault);
|
||||
if (status == RETURN_OK) {
|
||||
write(*storageId, data, size);
|
||||
}
|
||||
return status;
|
||||
}
|
||||
|
||||
template<uint8_t NUMBER_OF_POOLS>
|
||||
inline ReturnValue_t LocalPool<NUMBER_OF_POOLS>::getFreeElement(
|
||||
store_address_t* storageId, const size_t size,
|
||||
uint8_t** p_data, bool ignoreFault) {
|
||||
ReturnValue_t status = reserveSpace(size, storageId, ignoreFault);
|
||||
if (status == RETURN_OK) {
|
||||
*p_data = &store[storageId->pool_index][getRawPosition(*storageId)];
|
||||
} else {
|
||||
*p_data = NULL;
|
||||
}
|
||||
return status;
|
||||
}
|
||||
|
||||
template<uint8_t NUMBER_OF_POOLS>
|
||||
inline ReturnValue_t LocalPool<NUMBER_OF_POOLS>::getData(
|
||||
store_address_t packet_id, const uint8_t** packet_ptr, size_t* size) {
|
||||
uint8_t* tempData = NULL;
|
||||
ReturnValue_t status = modifyData(packet_id, &tempData, size);
|
||||
*packet_ptr = tempData;
|
||||
return status;
|
||||
}
|
||||
|
||||
template<uint8_t NUMBER_OF_POOLS>
|
||||
inline ReturnValue_t LocalPool<NUMBER_OF_POOLS>::modifyData(
|
||||
store_address_t packet_id, uint8_t** packet_ptr, size_t* size) {
|
||||
ReturnValue_t status = RETURN_FAILED;
|
||||
if (packet_id.pool_index >= NUMBER_OF_POOLS) {
|
||||
return ILLEGAL_STORAGE_ID;
|
||||
}
|
||||
if ((packet_id.packet_index >= n_elements[packet_id.pool_index])) {
|
||||
return ILLEGAL_STORAGE_ID;
|
||||
}
|
||||
if (size_list[packet_id.pool_index][packet_id.packet_index]
|
||||
!= STORAGE_FREE) {
|
||||
uint32_t packet_position = getRawPosition(packet_id);
|
||||
*packet_ptr = &store[packet_id.pool_index][packet_position];
|
||||
*size = size_list[packet_id.pool_index][packet_id.packet_index];
|
||||
status = RETURN_OK;
|
||||
} else {
|
||||
status = DATA_DOES_NOT_EXIST;
|
||||
}
|
||||
return status;
|
||||
}
|
||||
|
||||
template<uint8_t NUMBER_OF_POOLS>
|
||||
inline ReturnValue_t LocalPool<NUMBER_OF_POOLS>::deleteData(
|
||||
store_address_t packet_id) {
|
||||
//if (getObjectId() == objects::IPC_STORE && packet_id.pool_index >= 3) {
|
||||
// debug << "Delete: Pool: " << std::dec << packet_id.pool_index << " Index: "
|
||||
// << packet_id.packet_index << std::endl;
|
||||
//}
|
||||
ReturnValue_t status = RETURN_OK;
|
||||
uint32_t page_size = getPageSize(packet_id.pool_index);
|
||||
if ((page_size != 0)
|
||||
&& (packet_id.packet_index < n_elements[packet_id.pool_index])) {
|
||||
uint16_t packet_position = getRawPosition(packet_id);
|
||||
uint8_t* ptr = &store[packet_id.pool_index][packet_position];
|
||||
memset(ptr, 0, page_size);
|
||||
//Set free list
|
||||
size_list[packet_id.pool_index][packet_id.packet_index] = STORAGE_FREE;
|
||||
} else {
|
||||
//pool_index or packet_index is too large
|
||||
sif::error << "LocalPool:deleteData failed." << std::endl;
|
||||
status = ILLEGAL_STORAGE_ID;
|
||||
}
|
||||
return status;
|
||||
}
|
||||
|
||||
template<uint8_t NUMBER_OF_POOLS>
|
||||
inline void LocalPool<NUMBER_OF_POOLS>::clearStore() {
|
||||
for (uint16_t n = 0; n < NUMBER_OF_POOLS; n++) {
|
||||
//TODO checkme
|
||||
memset(size_list[n], STORAGE_FREE, (n_elements[n] * sizeof(**size_list)));
|
||||
}
|
||||
}
|
||||
|
||||
template<uint8_t NUMBER_OF_POOLS>
|
||||
inline ReturnValue_t LocalPool<NUMBER_OF_POOLS>::deleteData(uint8_t* ptr,
|
||||
size_t size, store_address_t* storeId) {
|
||||
store_address_t localId;
|
||||
ReturnValue_t result = ILLEGAL_ADDRESS;
|
||||
for (uint16_t n = 0; n < NUMBER_OF_POOLS; n++) {
|
||||
//Not sure if new allocates all stores in order. so better be careful.
|
||||
if ((store[n] <= ptr) && (&store[n][n_elements[n]*element_sizes[n]]) > ptr) {
|
||||
localId.pool_index = n;
|
||||
uint32_t deltaAddress = ptr - store[n];
|
||||
// Getting any data from the right "block" is ok.
|
||||
// This is necessary, as IF's sometimes don't point to the first
|
||||
// element of an object.
|
||||
localId.packet_index = deltaAddress / element_sizes[n];
|
||||
result = deleteData(localId);
|
||||
//if (deltaAddress % element_sizes[n] != 0) {
|
||||
// error << "Pool::deleteData: address not aligned!" << std::endl;
|
||||
//}
|
||||
break;
|
||||
}
|
||||
}
|
||||
if (storeId != NULL) {
|
||||
*storeId = localId;
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
template<uint8_t NUMBER_OF_POOLS>
|
||||
inline ReturnValue_t LocalPool<NUMBER_OF_POOLS>::initialize() {
|
||||
ReturnValue_t result = SystemObject::initialize();
|
||||
if (result != RETURN_OK) {
|
||||
return result;
|
||||
}
|
||||
internalErrorReporter = objectManager->get<InternalErrorReporterIF>(
|
||||
objects::INTERNAL_ERROR_REPORTER);
|
||||
if (internalErrorReporter == NULL){
|
||||
return RETURN_FAILED;
|
||||
}
|
||||
|
||||
//Check if any pool size is large than the maximum allowed.
|
||||
for (uint8_t count = 0; count < NUMBER_OF_POOLS; count++) {
|
||||
if (element_sizes[count] >= STORAGE_FREE) {
|
||||
sif::error << "LocalPool::initialize: Pool is too large! "
|
||||
"Max. allowed size is: " << (STORAGE_FREE - 1) << std::endl;
|
||||
return RETURN_FAILED;
|
||||
}
|
||||
}
|
||||
return RETURN_OK;
|
||||
}
|
||||
|
||||
#endif
|
@ -1,12 +1,3 @@
|
||||
/**
|
||||
* @file PoolManager
|
||||
*
|
||||
* @date 02.02.2012
|
||||
* @author Bastian Baetz
|
||||
*
|
||||
* @brief This file contains the definition of the PoolManager class.
|
||||
*/
|
||||
|
||||
#ifndef POOLMANAGER_H_
|
||||
#define POOLMANAGER_H_
|
||||
|
||||
@ -17,70 +8,39 @@
|
||||
/**
|
||||
* @brief The PoolManager class provides an intermediate data storage with
|
||||
* a fixed pool size policy for inter-process communication.
|
||||
* \details Uses local pool, but is thread-safe.
|
||||
* @details Uses local pool calls but is thread safe by protecting the call
|
||||
* with a lock.
|
||||
*/
|
||||
|
||||
template <uint8_t NUMBER_OF_POOLS = 5>
|
||||
class PoolManager : public LocalPool<NUMBER_OF_POOLS> {
|
||||
public:
|
||||
PoolManager(object_id_t setObjectId,
|
||||
const uint16_t element_sizes[NUMBER_OF_POOLS],
|
||||
const uint16_t n_elements[NUMBER_OF_POOLS]);
|
||||
|
||||
//! @brief In the PoolManager's destructor all allocated memory is freed.
|
||||
virtual ~PoolManager();
|
||||
|
||||
//! @brief LocalPool overrides for thread-safety.
|
||||
ReturnValue_t deleteData(store_address_t) override;
|
||||
ReturnValue_t deleteData(uint8_t* buffer, size_t size,
|
||||
store_address_t* storeId = NULL) override;
|
||||
ReturnValue_t modifyData(store_address_t packet_id, uint8_t** packet_ptr,
|
||||
size_t* size) override;
|
||||
protected:
|
||||
/**
|
||||
* Overwritten for thread safety.
|
||||
* Locks during execution.
|
||||
*/
|
||||
virtual ReturnValue_t reserveSpace(const uint32_t size, store_address_t* address, bool ignoreFault);
|
||||
ReturnValue_t reserveSpace(const uint32_t size, store_address_t* address,
|
||||
bool ignoreFault) override;
|
||||
|
||||
/**
|
||||
* \brief The mutex is created in the constructor and makes access mutual exclusive.
|
||||
* \details Locking and unlocking is done during searching for free slots and deleting existing slots.
|
||||
* @brief The mutex is created in the constructor and makes
|
||||
* access mutual exclusive.
|
||||
* @details Locking and unlocking is done during searching for free slots
|
||||
* and deleting existing slots.
|
||||
*/
|
||||
MutexIF* mutex;
|
||||
public:
|
||||
PoolManager( object_id_t setObjectId, const uint16_t element_sizes[NUMBER_OF_POOLS], const uint16_t n_elements[NUMBER_OF_POOLS] );
|
||||
/**
|
||||
* @brief In the PoolManager's destructor all allocated memory is freed.
|
||||
*/
|
||||
virtual ~PoolManager( void );
|
||||
/**
|
||||
* Overwritten for thread safety.
|
||||
*/
|
||||
virtual ReturnValue_t deleteData(store_address_t);
|
||||
virtual ReturnValue_t deleteData(uint8_t* buffer, uint32_t size, store_address_t* storeId = NULL);
|
||||
};
|
||||
|
||||
template<uint8_t NUMBER_OF_POOLS>
|
||||
inline ReturnValue_t PoolManager<NUMBER_OF_POOLS>::reserveSpace(const uint32_t size, store_address_t* address, bool ignoreFault) {
|
||||
MutexHelper mutexHelper(mutex,MutexIF::NO_TIMEOUT);
|
||||
ReturnValue_t status = LocalPool<NUMBER_OF_POOLS>::reserveSpace(size,address,ignoreFault);
|
||||
return status;
|
||||
}
|
||||
|
||||
template<uint8_t NUMBER_OF_POOLS>
|
||||
inline PoolManager<NUMBER_OF_POOLS>::PoolManager(object_id_t setObjectId,
|
||||
const uint16_t element_sizes[NUMBER_OF_POOLS],
|
||||
const uint16_t n_elements[NUMBER_OF_POOLS]) : LocalPool<NUMBER_OF_POOLS>(setObjectId, element_sizes, n_elements, true) {
|
||||
mutex = MutexFactory::instance()->createMutex();
|
||||
}
|
||||
|
||||
template<uint8_t NUMBER_OF_POOLS>
|
||||
inline PoolManager<NUMBER_OF_POOLS>::~PoolManager(void) {
|
||||
MutexFactory::instance()->deleteMutex(mutex);
|
||||
}
|
||||
|
||||
template<uint8_t NUMBER_OF_POOLS>
|
||||
inline ReturnValue_t PoolManager<NUMBER_OF_POOLS>::deleteData(
|
||||
store_address_t packet_id) {
|
||||
// debug << "PoolManager( " << translateObject(getObjectId()) << " )::deleteData from store " << packet_id.pool_index << ". id is " << packet_id.packet_index << std::endl;
|
||||
MutexHelper mutexHelper(mutex,MutexIF::NO_TIMEOUT);
|
||||
ReturnValue_t status = LocalPool<NUMBER_OF_POOLS>::deleteData(packet_id);
|
||||
return status;
|
||||
}
|
||||
|
||||
template<uint8_t NUMBER_OF_POOLS>
|
||||
inline ReturnValue_t PoolManager<NUMBER_OF_POOLS>::deleteData(uint8_t* buffer, uint32_t size,
|
||||
store_address_t* storeId) {
|
||||
MutexHelper mutexHelper(mutex,MutexIF::NO_TIMEOUT);
|
||||
ReturnValue_t status = LocalPool<NUMBER_OF_POOLS>::deleteData(buffer, size, storeId);
|
||||
return status;
|
||||
}
|
||||
#include "PoolManager.tpp"
|
||||
|
||||
#endif /* POOLMANAGER_H_ */
|
||||
|
55
storagemanager/PoolManager.tpp
Normal file
55
storagemanager/PoolManager.tpp
Normal file
@ -0,0 +1,55 @@
|
||||
#ifndef FRAMEWORK_STORAGEMANAGER_POOLMANAGER_TPP_
|
||||
#define FRAMEWORK_STORAGEMANAGER_POOLMANAGER_TPP_
|
||||
|
||||
template<uint8_t NUMBER_OF_POOLS>
|
||||
inline PoolManager<NUMBER_OF_POOLS>::PoolManager(object_id_t setObjectId,
|
||||
const uint16_t element_sizes[NUMBER_OF_POOLS],
|
||||
const uint16_t n_elements[NUMBER_OF_POOLS]) :
|
||||
LocalPool<NUMBER_OF_POOLS>(setObjectId, element_sizes, n_elements, true) {
|
||||
mutex = MutexFactory::instance()->createMutex();
|
||||
}
|
||||
|
||||
template<uint8_t NUMBER_OF_POOLS>
|
||||
inline PoolManager<NUMBER_OF_POOLS>::~PoolManager(void) {
|
||||
MutexFactory::instance()->deleteMutex(mutex);
|
||||
}
|
||||
|
||||
template<uint8_t NUMBER_OF_POOLS>
|
||||
inline ReturnValue_t PoolManager<NUMBER_OF_POOLS>::reserveSpace(
|
||||
const uint32_t size, store_address_t* address, bool ignoreFault) {
|
||||
MutexHelper mutexHelper(mutex,MutexIF::NO_TIMEOUT);
|
||||
ReturnValue_t status = LocalPool<NUMBER_OF_POOLS>::reserveSpace(size,
|
||||
address,ignoreFault);
|
||||
return status;
|
||||
}
|
||||
|
||||
template<uint8_t NUMBER_OF_POOLS>
|
||||
inline ReturnValue_t PoolManager<NUMBER_OF_POOLS>::deleteData(
|
||||
store_address_t packet_id) {
|
||||
// debug << "PoolManager( " << translateObject(getObjectId()) <<
|
||||
// " )::deleteData from store " << packet_id.pool_index <<
|
||||
// ". id is "<< packet_id.packet_index << std::endl;
|
||||
MutexHelper mutexHelper(mutex,MutexIF::NO_TIMEOUT);
|
||||
ReturnValue_t status = LocalPool<NUMBER_OF_POOLS>::deleteData(packet_id);
|
||||
return status;
|
||||
}
|
||||
|
||||
template<uint8_t NUMBER_OF_POOLS>
|
||||
inline ReturnValue_t PoolManager<NUMBER_OF_POOLS>::deleteData(uint8_t* buffer,
|
||||
size_t size, store_address_t* storeId) {
|
||||
MutexHelper mutexHelper(mutex,MutexIF::NO_TIMEOUT);
|
||||
ReturnValue_t status = LocalPool<NUMBER_OF_POOLS>::deleteData(buffer,
|
||||
size, storeId);
|
||||
return status;
|
||||
}
|
||||
|
||||
template<uint8_t NUMBER_OF_POOLS>
|
||||
inline ReturnValue_t PoolManager<NUMBER_OF_POOLS>::modifyData(
|
||||
store_address_t packet_id, uint8_t** packet_ptr, size_t* size) {
|
||||
MutexHelper mutexHelper(mutex,MutexIF::NO_TIMEOUT);
|
||||
ReturnValue_t status = LocalPool<NUMBER_OF_POOLS>::modifyData(packet_id,
|
||||
packet_ptr, size);
|
||||
return status;
|
||||
}
|
||||
|
||||
#endif
|
@ -6,18 +6,19 @@
|
||||
#include <stddef.h>
|
||||
|
||||
/**
|
||||
* This union defines the type that identifies where a data packet is stored in the store.
|
||||
* It comprises of a raw part to read it as raw value and a structured part to use it in
|
||||
* pool-like stores.
|
||||
* @brief This union defines the type that identifies where a data packet is
|
||||
* stored in the store.
|
||||
* It consists of a raw part to read it as raw value and
|
||||
* a structured part to use it in pool-like stores.
|
||||
*/
|
||||
union store_address_t {
|
||||
/**
|
||||
* Default Constructor, initializing to INVALID_ADDRESS
|
||||
*/
|
||||
store_address_t():raw(0xFFFFFFFF){}
|
||||
|
||||
/**
|
||||
* Constructor to create an address object using the raw address
|
||||
*
|
||||
* @param rawAddress
|
||||
*/
|
||||
store_address_t(uint32_t rawAddress):raw(rawAddress){}
|
||||
@ -30,7 +31,8 @@ union store_address_t {
|
||||
* @param packetIndex
|
||||
*/
|
||||
store_address_t(uint16_t poolIndex, uint16_t packetIndex):
|
||||
pool_index(poolIndex),packet_index(packetIndex){}
|
||||
pool_index(poolIndex),packet_index(packetIndex) {}
|
||||
|
||||
/**
|
||||
* A structure with two elements to access the store address pool-like.
|
||||
*/
|
||||
@ -48,6 +50,10 @@ union store_address_t {
|
||||
* Alternative access to the raw value.
|
||||
*/
|
||||
uint32_t raw;
|
||||
|
||||
bool operator==(const store_address_t& other) const {
|
||||
return raw == other.raw;
|
||||
}
|
||||
};
|
||||
|
||||
/**
|
||||
@ -94,7 +100,8 @@ public:
|
||||
* @li RETURN_FAILED if data could not be added.
|
||||
* storageId is unchanged then.
|
||||
*/
|
||||
virtual ReturnValue_t addData(store_address_t* storageId, const uint8_t * data, uint32_t size, bool ignoreFault = false) = 0;
|
||||
virtual ReturnValue_t addData(store_address_t* storageId,
|
||||
const uint8_t * data, size_t size, bool ignoreFault = false) = 0;
|
||||
/**
|
||||
* @brief With deleteData, the storageManager frees the memory region
|
||||
* identified by packet_id.
|
||||
@ -105,14 +112,16 @@ public:
|
||||
*/
|
||||
virtual ReturnValue_t deleteData(store_address_t packet_id) = 0;
|
||||
/**
|
||||
* @brief Another deleteData which uses the pointer and size of the stored data to delete the content.
|
||||
* @brief Another deleteData which uses the pointer and size of the
|
||||
* stored data to delete the content.
|
||||
* @param buffer Pointer to the data.
|
||||
* @param size Size of data to be stored.
|
||||
* @param storeId Store id of the deleted element (optional)
|
||||
* @return @li RETURN_OK on success.
|
||||
* @li failure code if deletion did not work
|
||||
*/
|
||||
virtual ReturnValue_t deleteData(uint8_t* buffer, uint32_t size, store_address_t* storeId = NULL) = 0;
|
||||
virtual ReturnValue_t deleteData(uint8_t* buffer, size_t size,
|
||||
store_address_t* storeId = nullptr) = 0;
|
||||
/**
|
||||
* @brief getData returns an address to data and the size of the data
|
||||
* for a given packet_id.
|
||||
@ -125,12 +134,12 @@ public:
|
||||
* (e.g. an illegal packet_id was passed).
|
||||
*/
|
||||
virtual ReturnValue_t getData(store_address_t packet_id,
|
||||
const uint8_t** packet_ptr, uint32_t* size) = 0;
|
||||
const uint8_t** packet_ptr, size_t* size) = 0;
|
||||
/**
|
||||
* Same as above, but not const and therefore modifiable.
|
||||
*/
|
||||
virtual ReturnValue_t modifyData(store_address_t packet_id,
|
||||
uint8_t** packet_ptr, uint32_t* size) = 0;
|
||||
uint8_t** packet_ptr, size_t* size) = 0;
|
||||
/**
|
||||
* This method reserves an element of \c size.
|
||||
*
|
||||
@ -144,13 +153,13 @@ public:
|
||||
* @li RETURN_FAILED if data could not be added.
|
||||
* storageId is unchanged then.
|
||||
*/
|
||||
virtual ReturnValue_t getFreeElement(store_address_t* storageId, const uint32_t size, uint8_t** p_data, bool ignoreFault = false ) = 0;
|
||||
virtual ReturnValue_t getFreeElement(store_address_t* storageId,
|
||||
const size_t size, uint8_t** p_data, bool ignoreFault = false ) = 0;
|
||||
/**
|
||||
* Clears the whole store.
|
||||
* Use with care!
|
||||
*/
|
||||
virtual void clearStore() = 0;
|
||||
|
||||
};
|
||||
|
||||
#endif /* STORAGEMANAGERIF_H_ */
|
||||
|
@ -162,7 +162,7 @@ ReturnValue_t Subsystem::handleCommandMessage(CommandMessage* message) {
|
||||
case ModeSequenceMessage::ADD_SEQUENCE: {
|
||||
FixedArrayList<ModeListEntry, MAX_LENGTH_OF_TABLE_OR_SEQUENCE> sequence;
|
||||
const uint8_t *pointer;
|
||||
uint32_t sizeRead;
|
||||
size_t sizeRead;
|
||||
result = IPCStore->getData(
|
||||
ModeSequenceMessage::getStoreAddress(message), &pointer,
|
||||
&sizeRead);
|
||||
@ -188,7 +188,7 @@ ReturnValue_t Subsystem::handleCommandMessage(CommandMessage* message) {
|
||||
case ModeSequenceMessage::ADD_TABLE: {
|
||||
FixedArrayList<ModeListEntry, MAX_LENGTH_OF_TABLE_OR_SEQUENCE> table;
|
||||
const uint8_t *pointer;
|
||||
uint32_t sizeRead;
|
||||
size_t sizeRead;
|
||||
result = IPCStore->getData(
|
||||
ModeSequenceMessage::getStoreAddress(message), &pointer,
|
||||
&sizeRead);
|
||||
|
@ -88,7 +88,7 @@ void SubsystemBase::executeTable(HybridIterator<ModeListEntry> tableIter, Submod
|
||||
object_id_t object = tableIter.value->getObject();
|
||||
if ((iter = childrenMap.find(object)) == childrenMap.end()) {
|
||||
//illegal table entry, should only happen due to misconfigured mode table
|
||||
debug << std::hex << getObjectId() << ": invalid mode table entry"
|
||||
sif::debug << std::hex << getObjectId() << ": invalid mode table entry"
|
||||
<< std::endl;
|
||||
continue;
|
||||
}
|
||||
|
@ -13,7 +13,7 @@ CCSDSDistributor::~CCSDSDistributor() {
|
||||
iterator_t CCSDSDistributor::selectDestination() {
|
||||
// debug << "CCSDSDistributor::selectDestination received: " << this->currentMessage.getStorageId().pool_index << ", " << this->currentMessage.getStorageId().packet_index << std::endl;
|
||||
const uint8_t* p_packet = NULL;
|
||||
uint32_t size = 0;
|
||||
size_t size = 0;
|
||||
//TODO check returncode?
|
||||
this->tcStore->getData( this->currentMessage.getStorageId(), &p_packet, &size );
|
||||
SpacePacketBase current_packet( p_packet );
|
||||
|
@ -31,7 +31,7 @@ iterator_t PUSDistributor::selectDestination() {
|
||||
}
|
||||
|
||||
if (tcStatus != RETURN_OK) {
|
||||
debug << "PUSDistributor::handlePacket: error with " << (int) tcStatus
|
||||
sif::debug << "PUSDistributor::handlePacket: error with " << (int) tcStatus
|
||||
<< std::endl;
|
||||
return this->queueMap.end();
|
||||
} else {
|
||||
|
@ -39,14 +39,14 @@ ReturnValue_t TcDistributor::handlePacket() {
|
||||
}
|
||||
|
||||
void TcDistributor::print() {
|
||||
debug << "Distributor content is: " << std::endl << "ID\t| message queue id"
|
||||
sif::debug << "Distributor content is: " << std::endl << "ID\t| message queue id"
|
||||
<< std::endl;
|
||||
for (iterator_t it = this->queueMap.begin(); it != this->queueMap.end();
|
||||
it++) {
|
||||
debug << it->first << "\t| 0x" << std::hex << it->second << std::dec
|
||||
sif::debug << it->first << "\t| 0x" << std::hex << it->second << std::dec
|
||||
<< std::endl;
|
||||
}
|
||||
debug << std::dec;
|
||||
sif::debug << std::dec;
|
||||
|
||||
}
|
||||
|
||||
|
@ -61,11 +61,11 @@ uint8_t TcPacketBase::getPusVersionNumber() {
|
||||
|
||||
void TcPacketBase::print() {
|
||||
uint8_t * wholeData = getWholeData();
|
||||
debug << "TcPacket contains: " << std::endl;
|
||||
sif::debug << "TcPacket contains: " << std::endl;
|
||||
for (uint8_t count = 0; count < getFullSize(); ++count) {
|
||||
debug << std::hex << (uint16_t) wholeData[count] << " ";
|
||||
sif::debug << std::hex << (uint16_t) wholeData[count] << " ";
|
||||
}
|
||||
debug << std::dec << std::endl;
|
||||
sif::debug << std::dec << std::endl;
|
||||
}
|
||||
|
||||
void TcPacketBase::initializeTcPacket(uint16_t apid, uint16_t sequenceCount,
|
||||
|
@ -48,7 +48,7 @@ bool TcPacketStored::checkAndSetStore() {
|
||||
if (this->store == NULL) {
|
||||
this->store = objectManager->get<StorageManagerIF>(objects::TC_STORE);
|
||||
if (this->store == NULL) {
|
||||
error << "TcPacketStored::TcPacketStored: TC Store not found!"
|
||||
sif::error << "TcPacketStored::TcPacketStored: TC Store not found!"
|
||||
<< std::endl;
|
||||
return false;
|
||||
}
|
||||
@ -59,7 +59,7 @@ bool TcPacketStored::checkAndSetStore() {
|
||||
void TcPacketStored::setStoreAddress(store_address_t setAddress) {
|
||||
this->storeAddress = setAddress;
|
||||
const uint8_t* temp_data = NULL;
|
||||
uint32_t temp_size;
|
||||
size_t temp_size;
|
||||
ReturnValue_t status = StorageManagerIF::RETURN_FAILED;
|
||||
if (this->checkAndSetStore()) {
|
||||
status = this->store->getData(this->storeAddress, &temp_data,
|
||||
@ -79,7 +79,7 @@ store_address_t TcPacketStored::getStoreAddress() {
|
||||
|
||||
bool TcPacketStored::isSizeCorrect() {
|
||||
const uint8_t* temp_data = NULL;
|
||||
uint32_t temp_size;
|
||||
size_t temp_size;
|
||||
ReturnValue_t status = this->store->getData(this->storeAddress, &temp_data,
|
||||
&temp_size);
|
||||
if (status == StorageManagerIF::RETURN_OK) {
|
||||
|
@ -63,7 +63,7 @@ bool TmPacketBase::checkAndSetStamper() {
|
||||
if (timeStamper == NULL) {
|
||||
timeStamper = objectManager->get<TimeStamperIF>(timeStamperId);
|
||||
if (timeStamper == NULL) {
|
||||
error << "TmPacketBase::checkAndSetStamper: Stamper not found!"
|
||||
sif::error << "TmPacketBase::checkAndSetStamper: Stamper not found!"
|
||||
<< std::endl;
|
||||
return false;
|
||||
}
|
||||
|
@ -81,7 +81,7 @@ void TmPacketStored::deletePacket() {
|
||||
void TmPacketStored::setStoreAddress(store_address_t setAddress) {
|
||||
storeAddress = setAddress;
|
||||
const uint8_t* temp_data = NULL;
|
||||
uint32_t temp_size;
|
||||
size_t temp_size;
|
||||
if (!checkAndSetStore()) {
|
||||
return;
|
||||
}
|
||||
@ -98,7 +98,7 @@ bool TmPacketStored::checkAndSetStore() {
|
||||
if (store == NULL) {
|
||||
store = objectManager->get<StorageManagerIF>(objects::TM_STORE);
|
||||
if (store == NULL) {
|
||||
error << "TmPacketStored::TmPacketStored: TM Store not found!"
|
||||
sif::error << "TmPacketStored::TmPacketStored: TM Store not found!"
|
||||
<< std::endl;
|
||||
return false;
|
||||
}
|
||||
|
@ -48,7 +48,7 @@ ReturnValue_t PusServiceBase::performOperation(uint8_t opCode) {
|
||||
break;
|
||||
} else {
|
||||
|
||||
error << "PusServiceBase::performOperation: Service "
|
||||
sif::error << "PusServiceBase::performOperation: Service "
|
||||
<< (uint16_t) this->serviceId
|
||||
<< ": Error receiving packet. Code: " << std::hex << status
|
||||
<< std::dec << std::endl;
|
||||
@ -59,7 +59,7 @@ ReturnValue_t PusServiceBase::performOperation(uint8_t opCode) {
|
||||
return RETURN_OK;
|
||||
} else {
|
||||
|
||||
error << "PusService " << (uint16_t) this->serviceId
|
||||
sif::error << "PusService " << (uint16_t) this->serviceId
|
||||
<< ": performService returned with " << (int16_t) return_code
|
||||
<< std::endl;
|
||||
return RETURN_FAILED;
|
||||
@ -89,7 +89,7 @@ ReturnValue_t PusServiceBase::initialize() {
|
||||
distributor->registerService(this);
|
||||
return RETURN_OK;
|
||||
} else {
|
||||
error << "PusServiceBase::PusServiceBase: Service "
|
||||
sif::error << "PusServiceBase::PusServiceBase: Service "
|
||||
<< (uint32_t) this->serviceId << ": Configuration error."
|
||||
<< " Make sure packetSource and packetDestination are defined correctly" << std::endl;
|
||||
return RETURN_FAILED;
|
||||
|
@ -24,7 +24,7 @@ void VerificationReporter::sendSuccessReport(uint8_t set_report_id,
|
||||
current_packet->getPacketSequenceControl(), 0, set_step);
|
||||
ReturnValue_t status = MessageQueueSenderIF::sendMessage(acknowledgeQueue, &message);
|
||||
if (status != HasReturnvaluesIF::RETURN_OK) {
|
||||
error
|
||||
sif::error
|
||||
<< "VerificationReporter::sendSuccessReport: Error writing to queue. Code: "
|
||||
<< (uint16_t) status << std::endl;
|
||||
}
|
||||
@ -40,7 +40,7 @@ void VerificationReporter::sendSuccessReport(uint8_t set_report_id,
|
||||
tcSequenceControl, 0, set_step);
|
||||
ReturnValue_t status = MessageQueueSenderIF::sendMessage(acknowledgeQueue, &message);
|
||||
if (status != HasReturnvaluesIF::RETURN_OK) {
|
||||
error
|
||||
sif::error
|
||||
<< "VerificationReporter::sendSuccessReport: Error writing to queue. Code: "
|
||||
<< (uint16_t) status << std::endl;
|
||||
}
|
||||
@ -59,7 +59,7 @@ void VerificationReporter::sendFailureReport(uint8_t report_id,
|
||||
parameter1, parameter2);
|
||||
ReturnValue_t status = MessageQueueSenderIF::sendMessage(acknowledgeQueue, &message);
|
||||
if (status != HasReturnvaluesIF::RETURN_OK) {
|
||||
error
|
||||
sif::error
|
||||
<< "VerificationReporter::sendFailureReport Error writing to queue. Code: "
|
||||
<< (uint16_t) status << std::endl;
|
||||
}
|
||||
@ -76,7 +76,7 @@ void VerificationReporter::sendFailureReport(uint8_t report_id,
|
||||
tcSequenceControl, error_code, step, parameter1, parameter2);
|
||||
ReturnValue_t status = MessageQueueSenderIF::sendMessage(acknowledgeQueue, &message);
|
||||
if (status != HasReturnvaluesIF::RETURN_OK) {
|
||||
error
|
||||
sif::error
|
||||
<< "VerificationReporter::sendFailureReport Error writing to queue. Code: "
|
||||
<< (uint16_t) status << std::endl;
|
||||
}
|
||||
@ -88,7 +88,7 @@ void VerificationReporter::initialize() {
|
||||
if (temp != NULL) {
|
||||
this->acknowledgeQueue = temp->getVerificationQueue();
|
||||
} else {
|
||||
error
|
||||
sif::error
|
||||
<< "VerificationReporter::VerificationReporter: Configuration error."
|
||||
<< std::endl;
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user