Merge branch 'mueller_framework' into front_branch
This commit is contained in:
commit
f0be1b1fff
@ -54,7 +54,7 @@ ReturnValue_t GlobalDataPool::freeDataPoolLock() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
ReturnValue_t GlobalDataPool::lockDataPool() {
|
ReturnValue_t GlobalDataPool::lockDataPool() {
|
||||||
ReturnValue_t status = mutex->lockMutex(MutexIF::NO_TIMEOUT);
|
ReturnValue_t status = mutex->lockMutex(MutexIF::BLOCKING);
|
||||||
if ( status != RETURN_OK ) {
|
if ( status != RETURN_OK ) {
|
||||||
sif::error << "DataPool::DataPool: lock of mutex failed "
|
sif::error << "DataPool::DataPool: lock of mutex failed "
|
||||||
"with error code: " << status << std::endl;
|
"with error code: " << status << std::endl;
|
||||||
|
@ -151,7 +151,7 @@ void EventManager::printEvent(EventMessage* message) {
|
|||||||
#endif
|
#endif
|
||||||
|
|
||||||
void EventManager::lockMutex() {
|
void EventManager::lockMutex() {
|
||||||
mutex->lockMutex(MutexIF::NO_TIMEOUT);
|
mutex->lockMutex(MutexIF::BLOCKING);
|
||||||
}
|
}
|
||||||
|
|
||||||
void EventManager::unlockMutex() {
|
void EventManager::unlockMutex() {
|
||||||
|
@ -26,7 +26,7 @@ ReturnValue_t HealthTable::registerObject(object_id_t object,
|
|||||||
|
|
||||||
void HealthTable::setHealth(object_id_t object,
|
void HealthTable::setHealth(object_id_t object,
|
||||||
HasHealthIF::HealthState newState) {
|
HasHealthIF::HealthState newState) {
|
||||||
mutex->lockMutex(MutexIF::NO_TIMEOUT);
|
mutex->lockMutex(MutexIF::BLOCKING);
|
||||||
HealthMap::iterator iter = healthMap.find(object);
|
HealthMap::iterator iter = healthMap.find(object);
|
||||||
if (iter != healthMap.end()) {
|
if (iter != healthMap.end()) {
|
||||||
iter->second = newState;
|
iter->second = newState;
|
||||||
@ -36,7 +36,7 @@ void HealthTable::setHealth(object_id_t object,
|
|||||||
|
|
||||||
HasHealthIF::HealthState HealthTable::getHealth(object_id_t object) {
|
HasHealthIF::HealthState HealthTable::getHealth(object_id_t object) {
|
||||||
HasHealthIF::HealthState state = HasHealthIF::HEALTHY;
|
HasHealthIF::HealthState state = HasHealthIF::HEALTHY;
|
||||||
mutex->lockMutex(MutexIF::NO_TIMEOUT);
|
mutex->lockMutex(MutexIF::BLOCKING);
|
||||||
HealthMap::iterator iter = healthMap.find(object);
|
HealthMap::iterator iter = healthMap.find(object);
|
||||||
if (iter != healthMap.end()) {
|
if (iter != healthMap.end()) {
|
||||||
state = iter->second;
|
state = iter->second;
|
||||||
@ -46,7 +46,7 @@ HasHealthIF::HealthState HealthTable::getHealth(object_id_t object) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
uint32_t HealthTable::getPrintSize() {
|
uint32_t HealthTable::getPrintSize() {
|
||||||
mutex->lockMutex(MutexIF::NO_TIMEOUT);
|
mutex->lockMutex(MutexIF::BLOCKING);
|
||||||
uint32_t size = healthMap.size() * 5 + 2;
|
uint32_t size = healthMap.size() * 5 + 2;
|
||||||
mutex->unlockMutex();
|
mutex->unlockMutex();
|
||||||
return size;
|
return size;
|
||||||
@ -54,7 +54,7 @@ uint32_t HealthTable::getPrintSize() {
|
|||||||
|
|
||||||
bool HealthTable::hasHealth(object_id_t object) {
|
bool HealthTable::hasHealth(object_id_t object) {
|
||||||
bool exits = false;
|
bool exits = false;
|
||||||
mutex->lockMutex(MutexIF::NO_TIMEOUT);
|
mutex->lockMutex(MutexIF::BLOCKING);
|
||||||
HealthMap::iterator iter = healthMap.find(object);
|
HealthMap::iterator iter = healthMap.find(object);
|
||||||
if (iter != healthMap.end()) {
|
if (iter != healthMap.end()) {
|
||||||
exits = true;
|
exits = true;
|
||||||
@ -64,7 +64,7 @@ bool HealthTable::hasHealth(object_id_t object) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
void HealthTable::printAll(uint8_t* pointer, uint32_t maxSize) {
|
void HealthTable::printAll(uint8_t* pointer, uint32_t maxSize) {
|
||||||
mutex->lockMutex(MutexIF::NO_TIMEOUT);
|
mutex->lockMutex(MutexIF::BLOCKING);
|
||||||
size_t size = 0;
|
size_t size = 0;
|
||||||
uint16_t count = healthMap.size();
|
uint16_t count = healthMap.size();
|
||||||
ReturnValue_t result = SerializeAdapter<uint16_t>::serialize(&count,
|
ReturnValue_t result = SerializeAdapter<uint16_t>::serialize(&count,
|
||||||
@ -85,7 +85,7 @@ void HealthTable::printAll(uint8_t* pointer, uint32_t maxSize) {
|
|||||||
ReturnValue_t HealthTable::iterate(
|
ReturnValue_t HealthTable::iterate(
|
||||||
std::pair<object_id_t, HasHealthIF::HealthState> *value, bool reset) {
|
std::pair<object_id_t, HasHealthIF::HealthState> *value, bool reset) {
|
||||||
ReturnValue_t result = HasReturnvaluesIF::RETURN_OK;
|
ReturnValue_t result = HasReturnvaluesIF::RETURN_OK;
|
||||||
mutex->lockMutex(MutexIF::NO_TIMEOUT);
|
mutex->lockMutex(MutexIF::BLOCKING);
|
||||||
if (reset) {
|
if (reset) {
|
||||||
mapIterator = healthMap.begin();
|
mapIterator = healthMap.begin();
|
||||||
}
|
}
|
||||||
|
@ -53,7 +53,7 @@ void InternalErrorReporter::lostTm() {
|
|||||||
|
|
||||||
uint32_t InternalErrorReporter::getAndResetQueueHits() {
|
uint32_t InternalErrorReporter::getAndResetQueueHits() {
|
||||||
uint32_t value;
|
uint32_t value;
|
||||||
mutex->lockMutex(MutexIF::NO_TIMEOUT);
|
mutex->lockMutex(MutexIF::BLOCKING);
|
||||||
value = queueHits;
|
value = queueHits;
|
||||||
queueHits = 0;
|
queueHits = 0;
|
||||||
mutex->unlockMutex();
|
mutex->unlockMutex();
|
||||||
@ -62,21 +62,21 @@ uint32_t InternalErrorReporter::getAndResetQueueHits() {
|
|||||||
|
|
||||||
uint32_t InternalErrorReporter::getQueueHits() {
|
uint32_t InternalErrorReporter::getQueueHits() {
|
||||||
uint32_t value;
|
uint32_t value;
|
||||||
mutex->lockMutex(MutexIF::NO_TIMEOUT);
|
mutex->lockMutex(MutexIF::BLOCKING);
|
||||||
value = queueHits;
|
value = queueHits;
|
||||||
mutex->unlockMutex();
|
mutex->unlockMutex();
|
||||||
return value;
|
return value;
|
||||||
}
|
}
|
||||||
|
|
||||||
void InternalErrorReporter::incrementQueueHits() {
|
void InternalErrorReporter::incrementQueueHits() {
|
||||||
mutex->lockMutex(MutexIF::NO_TIMEOUT);
|
mutex->lockMutex(MutexIF::BLOCKING);
|
||||||
queueHits++;
|
queueHits++;
|
||||||
mutex->unlockMutex();
|
mutex->unlockMutex();
|
||||||
}
|
}
|
||||||
|
|
||||||
uint32_t InternalErrorReporter::getAndResetTmHits() {
|
uint32_t InternalErrorReporter::getAndResetTmHits() {
|
||||||
uint32_t value;
|
uint32_t value;
|
||||||
mutex->lockMutex(MutexIF::NO_TIMEOUT);
|
mutex->lockMutex(MutexIF::BLOCKING);
|
||||||
value = tmHits;
|
value = tmHits;
|
||||||
tmHits = 0;
|
tmHits = 0;
|
||||||
mutex->unlockMutex();
|
mutex->unlockMutex();
|
||||||
@ -85,14 +85,14 @@ uint32_t InternalErrorReporter::getAndResetTmHits() {
|
|||||||
|
|
||||||
uint32_t InternalErrorReporter::getTmHits() {
|
uint32_t InternalErrorReporter::getTmHits() {
|
||||||
uint32_t value;
|
uint32_t value;
|
||||||
mutex->lockMutex(MutexIF::NO_TIMEOUT);
|
mutex->lockMutex(MutexIF::BLOCKING);
|
||||||
value = tmHits;
|
value = tmHits;
|
||||||
mutex->unlockMutex();
|
mutex->unlockMutex();
|
||||||
return value;
|
return value;
|
||||||
}
|
}
|
||||||
|
|
||||||
void InternalErrorReporter::incrementTmHits() {
|
void InternalErrorReporter::incrementTmHits() {
|
||||||
mutex->lockMutex(MutexIF::NO_TIMEOUT);
|
mutex->lockMutex(MutexIF::BLOCKING);
|
||||||
tmHits++;
|
tmHits++;
|
||||||
mutex->unlockMutex();
|
mutex->unlockMutex();
|
||||||
}
|
}
|
||||||
@ -103,7 +103,7 @@ void InternalErrorReporter::storeFull() {
|
|||||||
|
|
||||||
uint32_t InternalErrorReporter::getAndResetStoreHits() {
|
uint32_t InternalErrorReporter::getAndResetStoreHits() {
|
||||||
uint32_t value;
|
uint32_t value;
|
||||||
mutex->lockMutex(MutexIF::NO_TIMEOUT);
|
mutex->lockMutex(MutexIF::BLOCKING);
|
||||||
value = storeHits;
|
value = storeHits;
|
||||||
storeHits = 0;
|
storeHits = 0;
|
||||||
mutex->unlockMutex();
|
mutex->unlockMutex();
|
||||||
@ -112,14 +112,14 @@ uint32_t InternalErrorReporter::getAndResetStoreHits() {
|
|||||||
|
|
||||||
uint32_t InternalErrorReporter::getStoreHits() {
|
uint32_t InternalErrorReporter::getStoreHits() {
|
||||||
uint32_t value;
|
uint32_t value;
|
||||||
mutex->lockMutex(MutexIF::NO_TIMEOUT);
|
mutex->lockMutex(MutexIF::BLOCKING);
|
||||||
value = storeHits;
|
value = storeHits;
|
||||||
mutex->unlockMutex();
|
mutex->unlockMutex();
|
||||||
return value;
|
return value;
|
||||||
}
|
}
|
||||||
|
|
||||||
void InternalErrorReporter::incrementStoreHits() {
|
void InternalErrorReporter::incrementStoreHits() {
|
||||||
mutex->lockMutex(MutexIF::NO_TIMEOUT);
|
mutex->lockMutex(MutexIF::BLOCKING);
|
||||||
storeHits++;
|
storeHits++;
|
||||||
mutex->unlockMutex();
|
mutex->unlockMutex();
|
||||||
}
|
}
|
||||||
|
@ -12,8 +12,21 @@
|
|||||||
*/
|
*/
|
||||||
class MutexIF {
|
class MutexIF {
|
||||||
public:
|
public:
|
||||||
static const uint32_t NO_TIMEOUT; //!< Needs to be defined in implementation.
|
/**
|
||||||
static const uint32_t MAX_TIMEOUT;
|
* @brief Timeout value used for polling lock attempt.
|
||||||
|
* @details
|
||||||
|
* If the lock is not successfull, MUTEX_TIMEOUT will be returned
|
||||||
|
* immediately. Value needs to be defined in implementation.
|
||||||
|
*/
|
||||||
|
static const uint32_t POLLING;
|
||||||
|
/**
|
||||||
|
* @brief Timeout value used for permanent blocking lock attempt.
|
||||||
|
* @details
|
||||||
|
* The task will be blocked (indefinitely) until the mutex is unlocked.
|
||||||
|
* Value needs to be defined in implementation.
|
||||||
|
*/
|
||||||
|
static const uint32_t BLOCKING;
|
||||||
|
|
||||||
static const uint8_t INTERFACE_ID = CLASS_ID::MUTEX_IF;
|
static const uint8_t INTERFACE_ID = CLASS_ID::MUTEX_IF;
|
||||||
/**
|
/**
|
||||||
* The system lacked the necessary resources (other than memory) to initialize another mutex.
|
* The system lacked the necessary resources (other than memory) to initialize another mutex.
|
||||||
|
@ -17,11 +17,11 @@ BinarySemaphoreUsingTask::~BinarySemaphoreUsingTask() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
ReturnValue_t BinarySemaphoreUsingTask::acquire(uint32_t timeoutMs) {
|
ReturnValue_t BinarySemaphoreUsingTask::acquire(uint32_t timeoutMs) {
|
||||||
TickType_t timeout = SemaphoreIF::NO_TIMEOUT;
|
TickType_t timeout = SemaphoreIF::POLLING;
|
||||||
if(timeoutMs == SemaphoreIF::MAX_TIMEOUT) {
|
if(timeoutMs == SemaphoreIF::BLOCKING) {
|
||||||
timeout = SemaphoreIF::MAX_TIMEOUT;
|
timeout = SemaphoreIF::BLOCKING;
|
||||||
}
|
}
|
||||||
else if(timeoutMs > SemaphoreIF::NO_TIMEOUT){
|
else if(timeoutMs > SemaphoreIF::POLLING){
|
||||||
timeout = pdMS_TO_TICKS(timeoutMs);
|
timeout = pdMS_TO_TICKS(timeoutMs);
|
||||||
}
|
}
|
||||||
return acquireWithTickTimeout(timeout);
|
return acquireWithTickTimeout(timeout);
|
||||||
|
@ -26,7 +26,7 @@ public:
|
|||||||
virtual~ BinarySemaphoreUsingTask();
|
virtual~ BinarySemaphoreUsingTask();
|
||||||
|
|
||||||
ReturnValue_t acquire(uint32_t timeoutMs =
|
ReturnValue_t acquire(uint32_t timeoutMs =
|
||||||
SemaphoreIF::NO_TIMEOUT) override;
|
SemaphoreIF::BLOCKING) override;
|
||||||
ReturnValue_t release() override;
|
ReturnValue_t release() override;
|
||||||
uint8_t getSemaphoreCounter() const override;
|
uint8_t getSemaphoreCounter() const override;
|
||||||
static uint8_t getSemaphoreCounter(TaskHandle_t taskHandle);
|
static uint8_t getSemaphoreCounter(TaskHandle_t taskHandle);
|
||||||
@ -40,7 +40,7 @@ public:
|
|||||||
* - @c RETURN_FAILED on failure
|
* - @c RETURN_FAILED on failure
|
||||||
*/
|
*/
|
||||||
ReturnValue_t acquireWithTickTimeout(TickType_t timeoutTicks =
|
ReturnValue_t acquireWithTickTimeout(TickType_t timeoutTicks =
|
||||||
SemaphoreIF::NO_TIMEOUT);
|
SemaphoreIF::BLOCKING);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Get handle to the task related to the semaphore.
|
* Get handle to the task related to the semaphore.
|
||||||
|
@ -36,11 +36,11 @@ BinarySemaphore& BinarySemaphore::operator =(
|
|||||||
}
|
}
|
||||||
|
|
||||||
ReturnValue_t BinarySemaphore::acquire(uint32_t timeoutMs) {
|
ReturnValue_t BinarySemaphore::acquire(uint32_t timeoutMs) {
|
||||||
TickType_t timeout = SemaphoreIF::NO_TIMEOUT;
|
TickType_t timeout = SemaphoreIF::POLLING;
|
||||||
if(timeoutMs == SemaphoreIF::MAX_TIMEOUT) {
|
if(timeoutMs == SemaphoreIF::BLOCKING) {
|
||||||
timeout = SemaphoreIF::MAX_TIMEOUT;
|
timeout = SemaphoreIF::BLOCKING;
|
||||||
}
|
}
|
||||||
else if(timeoutMs > SemaphoreIF::NO_TIMEOUT){
|
else if(timeoutMs > SemaphoreIF::POLLING){
|
||||||
timeout = pdMS_TO_TICKS(timeoutMs);
|
timeout = pdMS_TO_TICKS(timeoutMs);
|
||||||
}
|
}
|
||||||
return acquireWithTickTimeout(timeout);
|
return acquireWithTickTimeout(timeout);
|
||||||
|
@ -53,7 +53,7 @@ public:
|
|||||||
* -@c SemaphoreIF::SEMAPHORE_TIMEOUT on timeout
|
* -@c SemaphoreIF::SEMAPHORE_TIMEOUT on timeout
|
||||||
*/
|
*/
|
||||||
ReturnValue_t acquire(uint32_t timeoutMs =
|
ReturnValue_t acquire(uint32_t timeoutMs =
|
||||||
SemaphoreIF::NO_TIMEOUT) override;
|
SemaphoreIF::BLOCKING) override;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Same as lockBinarySemaphore() with timeout in FreeRTOS ticks.
|
* Same as lockBinarySemaphore() with timeout in FreeRTOS ticks.
|
||||||
@ -62,7 +62,7 @@ public:
|
|||||||
* -@c SemaphoreIF::SEMAPHORE_TIMEOUT on timeout
|
* -@c SemaphoreIF::SEMAPHORE_TIMEOUT on timeout
|
||||||
*/
|
*/
|
||||||
ReturnValue_t acquireWithTickTimeout(TickType_t timeoutTicks =
|
ReturnValue_t acquireWithTickTimeout(TickType_t timeoutTicks =
|
||||||
BinarySemaphore::NO_TIMEOUT);
|
SemaphoreIF::BLOCKING);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Release the binary semaphore.
|
* Release the binary semaphore.
|
||||||
|
@ -155,7 +155,7 @@ ReturnValue_t Clock::setLeapSeconds(const uint16_t leapSeconds_) {
|
|||||||
if (checkOrCreateClockMutex() != HasReturnvaluesIF::RETURN_OK) {
|
if (checkOrCreateClockMutex() != HasReturnvaluesIF::RETURN_OK) {
|
||||||
return HasReturnvaluesIF::RETURN_FAILED;
|
return HasReturnvaluesIF::RETURN_FAILED;
|
||||||
}
|
}
|
||||||
ReturnValue_t result = timeMutex->lockMutex(MutexIF::NO_TIMEOUT);
|
ReturnValue_t result = timeMutex->lockMutex(MutexIF::BLOCKING);
|
||||||
if (result != HasReturnvaluesIF::RETURN_OK) {
|
if (result != HasReturnvaluesIF::RETURN_OK) {
|
||||||
return result;
|
return result;
|
||||||
}
|
}
|
||||||
@ -170,7 +170,7 @@ ReturnValue_t Clock::getLeapSeconds(uint16_t* leapSeconds_) {
|
|||||||
if (timeMutex == NULL) {
|
if (timeMutex == NULL) {
|
||||||
return HasReturnvaluesIF::RETURN_FAILED;
|
return HasReturnvaluesIF::RETURN_FAILED;
|
||||||
}
|
}
|
||||||
ReturnValue_t result = timeMutex->lockMutex(MutexIF::NO_TIMEOUT);
|
ReturnValue_t result = timeMutex->lockMutex(MutexIF::BLOCKING);
|
||||||
if (result != HasReturnvaluesIF::RETURN_OK) {
|
if (result != HasReturnvaluesIF::RETURN_OK) {
|
||||||
return result;
|
return result;
|
||||||
}
|
}
|
||||||
|
@ -38,11 +38,11 @@ CountingSemaphoreUsingTask::~CountingSemaphoreUsingTask() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
ReturnValue_t CountingSemaphoreUsingTask::acquire(uint32_t timeoutMs) {
|
ReturnValue_t CountingSemaphoreUsingTask::acquire(uint32_t timeoutMs) {
|
||||||
TickType_t timeout = SemaphoreIF::NO_TIMEOUT;
|
TickType_t timeout = SemaphoreIF::POLLING;
|
||||||
if(timeoutMs == SemaphoreIF::MAX_TIMEOUT) {
|
if(timeoutMs == SemaphoreIF::BLOCKING) {
|
||||||
timeout = SemaphoreIF::MAX_TIMEOUT;
|
timeout = SemaphoreIF::BLOCKING;
|
||||||
}
|
}
|
||||||
else if(timeoutMs > SemaphoreIF::NO_TIMEOUT){
|
else if(timeoutMs > SemaphoreIF::POLLING){
|
||||||
timeout = pdMS_TO_TICKS(timeoutMs);
|
timeout = pdMS_TO_TICKS(timeoutMs);
|
||||||
}
|
}
|
||||||
return acquireWithTickTimeout(timeout);
|
return acquireWithTickTimeout(timeout);
|
||||||
|
@ -31,7 +31,7 @@ public:
|
|||||||
* @return -@c RETURN_OK on success
|
* @return -@c RETURN_OK on success
|
||||||
* -@c SemaphoreIF::SEMAPHORE_TIMEOUT on timeout
|
* -@c SemaphoreIF::SEMAPHORE_TIMEOUT on timeout
|
||||||
*/
|
*/
|
||||||
ReturnValue_t acquire(uint32_t timeoutMs = SemaphoreIF::NO_TIMEOUT) override;
|
ReturnValue_t acquire(uint32_t timeoutMs = SemaphoreIF::BLOCKING) override;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Release a semaphore, increasing the number of available counting
|
* Release a semaphore, increasing the number of available counting
|
||||||
@ -61,7 +61,7 @@ public:
|
|||||||
* -@c SemaphoreIF::SEMAPHORE_TIMEOUT on timeout
|
* -@c SemaphoreIF::SEMAPHORE_TIMEOUT on timeout
|
||||||
*/
|
*/
|
||||||
ReturnValue_t acquireWithTickTimeout(
|
ReturnValue_t acquireWithTickTimeout(
|
||||||
TickType_t timeoutTicks = SemaphoreIF::NO_TIMEOUT);
|
TickType_t timeoutTicks = SemaphoreIF::BLOCKING);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Get handle to the task related to the semaphore.
|
* Get handle to the task related to the semaphore.
|
||||||
|
@ -10,15 +10,18 @@
|
|||||||
|
|
||||||
class FixedTimeslotTask: public FixedTimeslotTaskIF {
|
class FixedTimeslotTask: public FixedTimeslotTaskIF {
|
||||||
public:
|
public:
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* @brief The standard constructor of the class.
|
* Keep in Mind that you need to call before this vTaskStartScheduler()!
|
||||||
*
|
* A lot of task parameters are set in "FreeRTOSConfig.h".
|
||||||
* @details This is the general constructor of the class. In addition to the TaskBase parameters,
|
* @param name Name of the task, lenght limited by configMAX_TASK_NAME_LEN
|
||||||
* the following variables are passed:
|
* @param setPriority Number of priorities specified by
|
||||||
*
|
* configMAX_PRIORITIES. High taskPriority_ number means high priority.
|
||||||
* @param (*setDeadlineMissedFunc)() The function pointer to the deadline missed function that shall be assigned.
|
* @param setStack Stack size in words (not bytes!).
|
||||||
*
|
* Lower limit specified by configMINIMAL_STACK_SIZE
|
||||||
* @param getPst The object id of the completely initialized polling sequence.
|
* @param overallPeriod Period in seconds.
|
||||||
|
* @param setDeadlineMissedFunc Callback if a deadline was missed.
|
||||||
|
* @return Pointer to the newly created task.
|
||||||
*/
|
*/
|
||||||
FixedTimeslotTask(const char *name, TaskPriority setPriority,
|
FixedTimeslotTask(const char *name, TaskPriority setPriority,
|
||||||
TaskStackSize setStack, TaskPeriod overallPeriod,
|
TaskStackSize setStack, TaskPeriod overallPeriod,
|
||||||
|
@ -2,8 +2,8 @@
|
|||||||
|
|
||||||
#include <framework/serviceinterface/ServiceInterfaceStream.h>
|
#include <framework/serviceinterface/ServiceInterfaceStream.h>
|
||||||
|
|
||||||
const uint32_t MutexIF::NO_TIMEOUT = 0;
|
const uint32_t MutexIF::POLLING = 0;
|
||||||
const uint32_t MutexIF::MAX_TIMEOUT = portMAX_DELAY;
|
const uint32_t MutexIF::BLOCKING = portMAX_DELAY;
|
||||||
|
|
||||||
Mutex::Mutex() {
|
Mutex::Mutex() {
|
||||||
handle = xSemaphoreCreateMutex();
|
handle = xSemaphoreCreateMutex();
|
||||||
@ -23,11 +23,11 @@ ReturnValue_t Mutex::lockMutex(uint32_t timeoutMs) {
|
|||||||
if (handle == nullptr) {
|
if (handle == nullptr) {
|
||||||
return MutexIF::MUTEX_NOT_FOUND;
|
return MutexIF::MUTEX_NOT_FOUND;
|
||||||
}
|
}
|
||||||
TickType_t timeout = MutexIF::NO_TIMEOUT;
|
TickType_t timeout = MutexIF::POLLING;
|
||||||
if(timeoutMs == MutexIF::MAX_TIMEOUT) {
|
if(timeoutMs == MutexIF::BLOCKING) {
|
||||||
timeout = MutexIF::MAX_TIMEOUT;
|
timeout = MutexIF::BLOCKING;
|
||||||
}
|
}
|
||||||
else if(timeoutMs > MutexIF::NO_TIMEOUT){
|
else if(timeoutMs > MutexIF::POLLING){
|
||||||
timeout = pdMS_TO_TICKS(timeoutMs);
|
timeout = pdMS_TO_TICKS(timeoutMs);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -18,7 +18,7 @@ class Mutex : public MutexIF {
|
|||||||
public:
|
public:
|
||||||
Mutex();
|
Mutex();
|
||||||
~Mutex();
|
~Mutex();
|
||||||
ReturnValue_t lockMutex(uint32_t timeoutMs = MutexIF::MAX_TIMEOUT) override;
|
ReturnValue_t lockMutex(uint32_t timeoutMs = MutexIF::BLOCKING) override;
|
||||||
ReturnValue_t unlockMutex() override;
|
ReturnValue_t unlockMutex() override;
|
||||||
private:
|
private:
|
||||||
SemaphoreHandle_t handle;
|
SemaphoreHandle_t handle;
|
||||||
|
@ -1,5 +1,5 @@
|
|||||||
#ifndef PERIODICTASK_H_
|
#ifndef FRAMEWORK_OSAL_FREERTOS_PERIODICTASK_H_
|
||||||
#define PERIODICTASK_H_
|
#define FRAMEWORK_OSAL_FREERTOS_PERIODICTASK_H_
|
||||||
|
|
||||||
#include <framework/objectmanager/ObjectManagerIF.h>
|
#include <framework/objectmanager/ObjectManagerIF.h>
|
||||||
#include <framework/tasks/PeriodicTaskIF.h>
|
#include <framework/tasks/PeriodicTaskIF.h>
|
||||||
@ -17,8 +17,6 @@ class ExecutableObjectIF;
|
|||||||
/**
|
/**
|
||||||
* @brief This class represents a specialized task for
|
* @brief This class represents a specialized task for
|
||||||
* periodic activities of multiple objects.
|
* periodic activities of multiple objects.
|
||||||
* @details
|
|
||||||
*
|
|
||||||
* @ingroup task_handling
|
* @ingroup task_handling
|
||||||
*/
|
*/
|
||||||
class PeriodicTask: public PeriodicTaskIF {
|
class PeriodicTask: public PeriodicTaskIF {
|
||||||
@ -26,24 +24,25 @@ public:
|
|||||||
/**
|
/**
|
||||||
* @brief Standard constructor of the class.
|
* @brief Standard constructor of the class.
|
||||||
* @details
|
* @details
|
||||||
* The class is initialized without allocated objects. These need to be added
|
* The class is initialized without allocated objects.
|
||||||
* with #addComponent. In the underlying TaskBase class, a new operating
|
* These need to be added with #addComponent.
|
||||||
* system task is created. In addition to the TaskBase parameters,
|
* @param priority
|
||||||
* the period, the pointer to the aforementioned initialization function and
|
* Sets the priority of a task. Values depend on freeRTOS configuration,
|
||||||
* an optional "deadline-missed" function pointer is passed.
|
* high number means high priority.
|
||||||
* @param priority Sets the priority of a task. Values depend on
|
* @param stack_size
|
||||||
* freeRTOS configuration, high number means high priority.
|
* The stack size reserved by the operating system for the task.
|
||||||
* @param stack_size The stack size reserved by the operating system for the task.
|
* @param setPeriod
|
||||||
* @param setPeriod The length of the period with which the task's
|
* The length of the period with which the task's
|
||||||
* functionality will be executed. It is expressed in clock ticks.
|
* functionality will be executed. It is expressed in clock ticks.
|
||||||
* @param setDeadlineMissedFunc
|
* @param setDeadlineMissedFunc
|
||||||
* The function pointer to the deadline missed function that shall be assigned.
|
* The function pointer to the deadline missed function that shall
|
||||||
|
* be assigned.
|
||||||
*/
|
*/
|
||||||
PeriodicTask(const char *name, TaskPriority setPriority, TaskStackSize setStack,
|
PeriodicTask(const char *name, TaskPriority setPriority, TaskStackSize setStack,
|
||||||
TaskPeriod setPeriod,void (*setDeadlineMissedFunc)());
|
TaskPeriod setPeriod,void (*setDeadlineMissedFunc)());
|
||||||
/**
|
/**
|
||||||
* @brief Currently, the executed object's lifetime is not coupled with the task object's
|
* @brief Currently, the executed object's lifetime is not coupled with
|
||||||
* lifetime, so the destructor is empty.
|
* the task object's lifetime, so the destructor is empty.
|
||||||
*/
|
*/
|
||||||
virtual ~PeriodicTask(void);
|
virtual ~PeriodicTask(void);
|
||||||
|
|
||||||
|
@ -6,8 +6,8 @@
|
|||||||
#include <framework/serviceinterface/ServiceInterfaceStream.h>
|
#include <framework/serviceinterface/ServiceInterfaceStream.h>
|
||||||
|
|
||||||
SemaphoreFactory* SemaphoreFactory::factoryInstance = nullptr;
|
SemaphoreFactory* SemaphoreFactory::factoryInstance = nullptr;
|
||||||
const uint32_t SemaphoreIF::NO_TIMEOUT = 0;
|
const uint32_t SemaphoreIF::POLLING = 0;
|
||||||
const uint32_t SemaphoreIF::MAX_TIMEOUT = portMAX_DELAY;
|
const uint32_t SemaphoreIF::BLOCKING = portMAX_DELAY;
|
||||||
|
|
||||||
static const uint32_t USE_REGULAR_SEMAPHORES = 0;
|
static const uint32_t USE_REGULAR_SEMAPHORES = 0;
|
||||||
static const uint32_t USE_TASK_NOTIFICATIONS = 1;
|
static const uint32_t USE_TASK_NOTIFICATIONS = 1;
|
||||||
|
@ -34,9 +34,9 @@ FixedTimeslotTaskIF* TaskFactory::createFixedTimeslotTask(TaskName name_,
|
|||||||
}
|
}
|
||||||
|
|
||||||
ReturnValue_t TaskFactory::deleteTask(PeriodicTaskIF* task) {
|
ReturnValue_t TaskFactory::deleteTask(PeriodicTaskIF* task) {
|
||||||
if (task == NULL) {
|
if (task == nullptr) {
|
||||||
//delete self
|
//delete self
|
||||||
vTaskDelete(NULL);
|
vTaskDelete(nullptr);
|
||||||
return HasReturnvaluesIF::RETURN_OK;
|
return HasReturnvaluesIF::RETURN_OK;
|
||||||
} else {
|
} else {
|
||||||
//TODO not implemented
|
//TODO not implemented
|
||||||
|
@ -27,13 +27,13 @@ BinarySemaphore& BinarySemaphore::operator =(
|
|||||||
|
|
||||||
ReturnValue_t BinarySemaphore::acquire(uint32_t timeoutMs) {
|
ReturnValue_t BinarySemaphore::acquire(uint32_t timeoutMs) {
|
||||||
int result = 0;
|
int result = 0;
|
||||||
if(timeoutMs == SemaphoreIF::NO_TIMEOUT) {
|
if(timeoutMs == SemaphoreIF::POLLING) {
|
||||||
result = sem_trywait(&handle);
|
result = sem_trywait(&handle);
|
||||||
}
|
}
|
||||||
else if(timeoutMs == SemaphoreIF::MAX_TIMEOUT) {
|
else if(timeoutMs == SemaphoreIF::BLOCKING) {
|
||||||
result = sem_wait(&handle);
|
result = sem_wait(&handle);
|
||||||
}
|
}
|
||||||
else if(timeoutMs > SemaphoreIF::NO_TIMEOUT){
|
else if(timeoutMs > SemaphoreIF::POLLING){
|
||||||
timespec timeOut;
|
timespec timeOut;
|
||||||
clock_gettime(CLOCK_REALTIME, &timeOut);
|
clock_gettime(CLOCK_REALTIME, &timeOut);
|
||||||
uint64_t nseconds = timeOut.tv_sec * 1000000000 + timeOut.tv_nsec;
|
uint64_t nseconds = timeOut.tv_sec * 1000000000 + timeOut.tv_nsec;
|
||||||
|
@ -50,7 +50,7 @@ public:
|
|||||||
* -@c SemaphoreIF::SEMAPHORE_TIMEOUT on timeout
|
* -@c SemaphoreIF::SEMAPHORE_TIMEOUT on timeout
|
||||||
*/
|
*/
|
||||||
ReturnValue_t acquire(uint32_t timeoutMs =
|
ReturnValue_t acquire(uint32_t timeoutMs =
|
||||||
SemaphoreIF::NO_TIMEOUT) override;
|
SemaphoreIF::BLOCKING) override;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Release the binary semaphore.
|
* Release the binary semaphore.
|
||||||
|
@ -179,7 +179,7 @@ ReturnValue_t Clock::setLeapSeconds(const uint16_t leapSeconds_) {
|
|||||||
if(checkOrCreateClockMutex()!=HasReturnvaluesIF::RETURN_OK){
|
if(checkOrCreateClockMutex()!=HasReturnvaluesIF::RETURN_OK){
|
||||||
return HasReturnvaluesIF::RETURN_FAILED;
|
return HasReturnvaluesIF::RETURN_FAILED;
|
||||||
}
|
}
|
||||||
ReturnValue_t result = timeMutex->lockMutex(MutexIF::NO_TIMEOUT);
|
ReturnValue_t result = timeMutex->lockMutex(MutexIF::BLOCKING);
|
||||||
if (result != HasReturnvaluesIF::RETURN_OK) {
|
if (result != HasReturnvaluesIF::RETURN_OK) {
|
||||||
return result;
|
return result;
|
||||||
}
|
}
|
||||||
@ -194,7 +194,7 @@ ReturnValue_t Clock::getLeapSeconds(uint16_t* leapSeconds_) {
|
|||||||
if(timeMutex==NULL){
|
if(timeMutex==NULL){
|
||||||
return HasReturnvaluesIF::RETURN_FAILED;
|
return HasReturnvaluesIF::RETURN_FAILED;
|
||||||
}
|
}
|
||||||
ReturnValue_t result = timeMutex->lockMutex(MutexIF::NO_TIMEOUT);
|
ReturnValue_t result = timeMutex->lockMutex(MutexIF::BLOCKING);
|
||||||
if (result != HasReturnvaluesIF::RETURN_OK) {
|
if (result != HasReturnvaluesIF::RETURN_OK) {
|
||||||
return result;
|
return result;
|
||||||
}
|
}
|
||||||
|
@ -2,7 +2,8 @@
|
|||||||
#include <framework/serviceinterface/ServiceInterfaceStream.h>
|
#include <framework/serviceinterface/ServiceInterfaceStream.h>
|
||||||
#include <framework/timemanager/Clock.h>
|
#include <framework/timemanager/Clock.h>
|
||||||
|
|
||||||
const uint32_t MutexIF::NO_TIMEOUT = 0;
|
const uint32_t MutexIF::BLOCKING = 0xffffffff;
|
||||||
|
const uint32_t MutexIF::POLLING = 0;
|
||||||
uint8_t Mutex::count = 0;
|
uint8_t Mutex::count = 0;
|
||||||
|
|
||||||
|
|
||||||
@ -41,7 +42,7 @@ Mutex::~Mutex() {
|
|||||||
|
|
||||||
ReturnValue_t Mutex::lockMutex(uint32_t timeoutMs) {
|
ReturnValue_t Mutex::lockMutex(uint32_t timeoutMs) {
|
||||||
int status = 0;
|
int status = 0;
|
||||||
if (timeoutMs != MutexIF::NO_TIMEOUT) {
|
if (timeoutMs != MutexIF::BLOCKING) {
|
||||||
timespec timeOut;
|
timespec timeOut;
|
||||||
clock_gettime(CLOCK_REALTIME, &timeOut);
|
clock_gettime(CLOCK_REALTIME, &timeOut);
|
||||||
uint64_t nseconds = timeOut.tv_sec * 1000000000 + timeOut.tv_nsec;
|
uint64_t nseconds = timeOut.tv_sec * 1000000000 + timeOut.tv_nsec;
|
||||||
|
@ -3,8 +3,8 @@
|
|||||||
#include <framework/osal/linux/CountingSemaphore.h>
|
#include <framework/osal/linux/CountingSemaphore.h>
|
||||||
#include <framework/serviceinterface/ServiceInterfaceStream.h>
|
#include <framework/serviceinterface/ServiceInterfaceStream.h>
|
||||||
|
|
||||||
const uint32_t SemaphoreIF::NO_TIMEOUT = 0;
|
const uint32_t SemaphoreIF::POLLING = 0;
|
||||||
const uint32_t SemaphoreIF::MAX_TIMEOUT = 0xFFFFFFFF;
|
const uint32_t SemaphoreIF::BLOCKING = 0xffffffff;
|
||||||
|
|
||||||
SemaphoreFactory* SemaphoreFactory::factoryInstance = nullptr;
|
SemaphoreFactory* SemaphoreFactory::factoryInstance = nullptr;
|
||||||
|
|
||||||
|
@ -21,7 +21,7 @@ inline PoolManager<NUMBER_OF_POOLS>::~PoolManager(void) {
|
|||||||
template<uint8_t NUMBER_OF_POOLS>
|
template<uint8_t NUMBER_OF_POOLS>
|
||||||
inline ReturnValue_t PoolManager<NUMBER_OF_POOLS>::reserveSpace(
|
inline ReturnValue_t PoolManager<NUMBER_OF_POOLS>::reserveSpace(
|
||||||
const uint32_t size, store_address_t* address, bool ignoreFault) {
|
const uint32_t size, store_address_t* address, bool ignoreFault) {
|
||||||
MutexHelper mutexHelper(mutex,MutexIF::NO_TIMEOUT);
|
MutexHelper mutexHelper(mutex,MutexIF::BLOCKING);
|
||||||
ReturnValue_t status = LocalPool<NUMBER_OF_POOLS>::reserveSpace(size,
|
ReturnValue_t status = LocalPool<NUMBER_OF_POOLS>::reserveSpace(size,
|
||||||
address,ignoreFault);
|
address,ignoreFault);
|
||||||
return status;
|
return status;
|
||||||
@ -33,7 +33,7 @@ inline ReturnValue_t PoolManager<NUMBER_OF_POOLS>::deleteData(
|
|||||||
// debug << "PoolManager( " << translateObject(getObjectId()) <<
|
// debug << "PoolManager( " << translateObject(getObjectId()) <<
|
||||||
// " )::deleteData from store " << packet_id.pool_index <<
|
// " )::deleteData from store " << packet_id.pool_index <<
|
||||||
// ". id is "<< packet_id.packet_index << std::endl;
|
// ". id is "<< packet_id.packet_index << std::endl;
|
||||||
MutexHelper mutexHelper(mutex,MutexIF::NO_TIMEOUT);
|
MutexHelper mutexHelper(mutex,MutexIF::BLOCKING);
|
||||||
ReturnValue_t status = LocalPool<NUMBER_OF_POOLS>::deleteData(packet_id);
|
ReturnValue_t status = LocalPool<NUMBER_OF_POOLS>::deleteData(packet_id);
|
||||||
return status;
|
return status;
|
||||||
}
|
}
|
||||||
@ -41,7 +41,7 @@ inline ReturnValue_t PoolManager<NUMBER_OF_POOLS>::deleteData(
|
|||||||
template<uint8_t NUMBER_OF_POOLS>
|
template<uint8_t NUMBER_OF_POOLS>
|
||||||
inline ReturnValue_t PoolManager<NUMBER_OF_POOLS>::deleteData(uint8_t* buffer,
|
inline ReturnValue_t PoolManager<NUMBER_OF_POOLS>::deleteData(uint8_t* buffer,
|
||||||
size_t size, store_address_t* storeId) {
|
size_t size, store_address_t* storeId) {
|
||||||
MutexHelper mutexHelper(mutex,MutexIF::NO_TIMEOUT);
|
MutexHelper mutexHelper(mutex,MutexIF::BLOCKING);
|
||||||
ReturnValue_t status = LocalPool<NUMBER_OF_POOLS>::deleteData(buffer,
|
ReturnValue_t status = LocalPool<NUMBER_OF_POOLS>::deleteData(buffer,
|
||||||
size, storeId);
|
size, storeId);
|
||||||
return status;
|
return status;
|
||||||
|
@ -21,10 +21,21 @@
|
|||||||
class SemaphoreIF {
|
class SemaphoreIF {
|
||||||
public:
|
public:
|
||||||
virtual~ SemaphoreIF() {};
|
virtual~ SemaphoreIF() {};
|
||||||
//! Needs to be defined in implementation. No blocking time
|
/**
|
||||||
static const uint32_t NO_TIMEOUT;
|
* @brief Timeout value used for polling lock attempt.
|
||||||
//! Needs to be defined in implementation. Blocks indefinitely.
|
* @details
|
||||||
static const uint32_t MAX_TIMEOUT;
|
* If the lock is not successfull, MUTEX_TIMEOUT will be returned
|
||||||
|
* immediately. Value needs to be defined in implementation.
|
||||||
|
*/
|
||||||
|
static const uint32_t POLLING;
|
||||||
|
/**
|
||||||
|
* @brief Timeout value used for permanent blocking lock attempt.
|
||||||
|
* @details
|
||||||
|
* The task will be blocked (indefinitely) until the mutex is unlocked.
|
||||||
|
* Value needs to be defined in implementation.
|
||||||
|
*/
|
||||||
|
static const uint32_t BLOCKING;
|
||||||
|
|
||||||
static const uint8_t INTERFACE_ID = CLASS_ID::SEMAPHORE_IF;
|
static const uint8_t INTERFACE_ID = CLASS_ID::SEMAPHORE_IF;
|
||||||
//! Semaphore timeout
|
//! Semaphore timeout
|
||||||
static constexpr ReturnValue_t SEMAPHORE_TIMEOUT = MAKE_RETURN_CODE(1);
|
static constexpr ReturnValue_t SEMAPHORE_TIMEOUT = MAKE_RETURN_CODE(1);
|
||||||
|
@ -19,16 +19,14 @@ public:
|
|||||||
static TaskFactory* instance();
|
static TaskFactory* instance();
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Keep in Mind that you need to call before this vTaskStartScheduler()!
|
* Generic interface to create a periodic task
|
||||||
* A lot of task parameters are set in "FreeRTOSConfig.h".
|
* @param name_ Name of the task
|
||||||
* @param name_ Name of the task, lenght limited by configMAX_TASK_NAME_LEN
|
* @param taskPriority_ Priority of the task
|
||||||
* @param taskPriority_ Number of priorities specified by
|
* @param stackSize_ Stack size if the task
|
||||||
* configMAX_PRIORITIES. High taskPriority_ number means high priority.
|
* @param periodInSeconds_ Period in seconds
|
||||||
* @param stackSize_ Stack size in words (not bytes!).
|
* @param deadLineMissedFunction_ This function is called if a deadline was
|
||||||
* Lower limit specified by configMINIMAL_STACK_SIZE
|
* missed
|
||||||
* @param period_ Period in seconds.
|
* @return Pointer to the created periodic task class
|
||||||
* @param deadLineMissedFunction_ Callback if a deadline was missed.
|
|
||||||
* @return Pointer to the newly created task.
|
|
||||||
*/
|
*/
|
||||||
PeriodicTaskIF* createPeriodicTask(TaskName name_,
|
PeriodicTaskIF* createPeriodicTask(TaskName name_,
|
||||||
TaskPriority taskPriority_, TaskStackSize stackSize_,
|
TaskPriority taskPriority_, TaskStackSize stackSize_,
|
||||||
@ -36,16 +34,14 @@ public:
|
|||||||
TaskDeadlineMissedFunction deadLineMissedFunction_);
|
TaskDeadlineMissedFunction deadLineMissedFunction_);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Keep in Mind that you need to call before this vTaskStartScheduler()!
|
* Generic interface to create a fixed timeslot task
|
||||||
* A lot of task parameters are set in "FreeRTOSConfig.h".
|
* @param name_ Name of the task
|
||||||
* @param name_ Name of the task, lenght limited by configMAX_TASK_NAME_LEN
|
* @param taskPriority_ Priority of the task
|
||||||
* @param taskPriority_ Number of priorities specified by
|
* @param stackSize_ Stack size if the task
|
||||||
* configMAX_PRIORITIES. High taskPriority_ number means high priority.
|
* @param periodInSeconds_ Period in seconds
|
||||||
* @param stackSize_ Stack size in words (not bytes!).
|
* @param deadLineMissedFunction_ This function is called if a deadline was
|
||||||
* Lower limit specified by configMINIMAL_STACK_SIZE
|
* missed
|
||||||
* @param period_ Period in seconds.
|
* @return Pointer to the created periodic task class
|
||||||
* @param deadLineMissedFunction_ Callback if a deadline was missed.
|
|
||||||
* @return Pointer to the newly created task.
|
|
||||||
*/
|
*/
|
||||||
FixedTimeslotTaskIF* createFixedTimeslotTask(TaskName name_,
|
FixedTimeslotTaskIF* createFixedTimeslotTask(TaskName name_,
|
||||||
TaskPriority taskPriority_, TaskStackSize stackSize_,
|
TaskPriority taskPriority_, TaskStackSize stackSize_,
|
||||||
|
Loading…
Reference in New Issue
Block a user