diff --git a/datapoollocal/LocalDataPoolManager.h b/datapoollocal/LocalDataPoolManager.h
index 95d48303..c4024cf9 100644
--- a/datapoollocal/LocalDataPoolManager.h
+++ b/datapoollocal/LocalDataPoolManager.h
@@ -48,7 +48,7 @@ class HousekeepingPacketUpdate;
  * @author 		R. Mueller
  */
 class LocalDataPoolManager {
-	template<typename T> friend class LocalPoolVar;
+	template<typename T> friend class LocalPoolVariable;
 	template<typename T, uint16_t vecSize> friend class LocalPoolVector;
 	friend class LocalPoolDataSetBase;
 	friend void (Factory::setStaticFrameworkObjectIds)();
diff --git a/datapoollocal/LocalPoolVariable.h b/datapoollocal/LocalPoolVariable.h
index c18d5443..7b7e443e 100644
--- a/datapoollocal/LocalPoolVariable.h
+++ b/datapoollocal/LocalPoolVariable.h
@@ -22,10 +22,10 @@
  * @ingroup data_pool
  */
 template<typename T>
-class LocalPoolVar: public LocalPoolObjectBase {
+class LocalPoolVariable: public LocalPoolObjectBase {
 public:
 	//! Default ctor is forbidden.
-	LocalPoolVar() = delete;
+	LocalPoolVariable() = delete;
 
 	/**
 	 * This constructor is used by the data creators to have pool variable
@@ -43,7 +43,7 @@ public:
 	 * If nullptr, the variable is not registered.
 	 * @param setReadWriteMode Specify the read-write mode of the pool variable.
 	 */
-	LocalPoolVar(HasLocalDataPoolIF* hkOwner, lp_id_t poolId,
+	LocalPoolVariable(HasLocalDataPoolIF* hkOwner, lp_id_t poolId,
 			DataSetIF* dataSet = nullptr,
 			pool_rwm_t setReadWriteMode = pool_rwm_t::VAR_READ_WRITE);
 
@@ -64,7 +64,7 @@ public:
 	 * @param setReadWriteMode Specify the read-write mode of the pool variable.
 	 *
 	 */
-	LocalPoolVar(object_id_t poolOwner, lp_id_t poolId,
+	LocalPoolVariable(object_id_t poolOwner, lp_id_t poolId,
 			DataSetIF* dataSet = nullptr,
 			pool_rwm_t setReadWriteMode = pool_rwm_t::VAR_READ_WRITE);
 	/**
@@ -73,10 +73,10 @@ public:
 	 * @param dataSet
 	 * @param setReadWriteMode
 	 */
-	LocalPoolVar(gp_id_t globalPoolId, DataSetIF* dataSet = nullptr,
+	LocalPoolVariable(gp_id_t globalPoolId, DataSetIF* dataSet = nullptr,
 			pool_rwm_t setReadWriteMode = pool_rwm_t::VAR_READ_WRITE);
 
-	virtual~ LocalPoolVar() {};
+	virtual~ LocalPoolVariable() {};
 
 	/**
 	 * @brief	This is the local copy of the data pool entry.
@@ -118,23 +118,23 @@ public:
 	ReturnValue_t commit(dur_millis_t lockTimeout = MutexIF::BLOCKING) override;
 
 
-	LocalPoolVar<T> &operator=(const T& newValue);
-	LocalPoolVar<T> &operator=(const LocalPoolVar<T>& newPoolVariable);
+	LocalPoolVariable<T> &operator=(const T& newValue);
+	LocalPoolVariable<T> &operator=(const LocalPoolVariable<T>& newPoolVariable);
 
 	//! Explicit type conversion operator. Allows casting the class to
 	//! its template type to perform operations on value.
 	explicit operator T() const;
 
-	bool operator==(const LocalPoolVar<T>& other) const;
+	bool operator==(const LocalPoolVariable<T>& other) const;
 	bool operator==(const T& other) const;
 
-	bool operator!=(const LocalPoolVar<T>& other) const;
+	bool operator!=(const LocalPoolVariable<T>& other) const;
 	bool operator!=(const T& other) const;
 
-	bool operator<(const LocalPoolVar<T>& other) const;
+	bool operator<(const LocalPoolVariable<T>& other) const;
 	bool operator<(const T& other) const;
 
-	bool operator>(const LocalPoolVar<T>& other) const;
+	bool operator>(const LocalPoolVariable<T>& other) const;
 	bool operator>(const T& other) const;
 
 protected:
@@ -160,7 +160,7 @@ protected:
 	// std::ostream is the type for object std::cout
 	template <typename U>
 	friend std::ostream& operator<< (std::ostream &out,
-			const LocalPoolVar<U> &var);
+			const LocalPoolVariable<U> &var);
 
 private:
 };
@@ -168,18 +168,18 @@ private:
 #include "LocalPoolVariable.tpp"
 
 template<class T>
-using lp_var_t = LocalPoolVar<T>;
+using lp_var_t = LocalPoolVariable<T>;
 
-using lp_bool_t = LocalPoolVar<uint8_t>;
-using lp_uint8_t = LocalPoolVar<uint8_t>;
-using lp_uint16_t = LocalPoolVar<uint16_t>;
-using lp_uint32_t = LocalPoolVar<uint32_t>;
-using lp_uint64_t = LocalPoolVar<uint64_t>;
-using lp_int8_t = LocalPoolVar<int8_t>;
-using lp_int16_t = LocalPoolVar<int16_t>;
-using lp_int32_t = LocalPoolVar<int32_t>;
-using lp_int64_t = LocalPoolVar<int64_t>;
-using lp_float_t = LocalPoolVar<float>;
-using lp_double_t = LocalPoolVar<double>;
+using lp_bool_t = LocalPoolVariable<uint8_t>;
+using lp_uint8_t = LocalPoolVariable<uint8_t>;
+using lp_uint16_t = LocalPoolVariable<uint16_t>;
+using lp_uint32_t = LocalPoolVariable<uint32_t>;
+using lp_uint64_t = LocalPoolVariable<uint64_t>;
+using lp_int8_t = LocalPoolVariable<int8_t>;
+using lp_int16_t = LocalPoolVariable<int16_t>;
+using lp_int32_t = LocalPoolVariable<int32_t>;
+using lp_int64_t = LocalPoolVariable<int64_t>;
+using lp_float_t = LocalPoolVariable<float>;
+using lp_double_t = LocalPoolVariable<double>;
 
 #endif /* FSFW_DATAPOOLLOCAL_LOCALPOOLVARIABLE_H_ */
diff --git a/datapoollocal/LocalPoolVariable.tpp b/datapoollocal/LocalPoolVariable.tpp
index b9f7b906..48649ad5 100644
--- a/datapoollocal/LocalPoolVariable.tpp
+++ b/datapoollocal/LocalPoolVariable.tpp
@@ -6,32 +6,32 @@
 #endif
 
 template<typename T>
-inline LocalPoolVar<T>::LocalPoolVar(HasLocalDataPoolIF* hkOwner,
+inline LocalPoolVariable<T>::LocalPoolVariable(HasLocalDataPoolIF* hkOwner,
 		lp_id_t poolId, DataSetIF* dataSet, pool_rwm_t setReadWriteMode):
 		LocalPoolObjectBase(poolId, hkOwner, dataSet, setReadWriteMode) {}
 
 template<typename T>
-inline LocalPoolVar<T>::LocalPoolVar(object_id_t poolOwner, lp_id_t poolId,
+inline LocalPoolVariable<T>::LocalPoolVariable(object_id_t poolOwner, lp_id_t poolId,
         DataSetIF *dataSet, pool_rwm_t setReadWriteMode):
         LocalPoolObjectBase(poolOwner, poolId, dataSet, setReadWriteMode) {}
 
 
 template<typename T>
-inline LocalPoolVar<T>::LocalPoolVar(gp_id_t globalPoolId, DataSetIF *dataSet,
+inline LocalPoolVariable<T>::LocalPoolVariable(gp_id_t globalPoolId, DataSetIF *dataSet,
 		pool_rwm_t setReadWriteMode):
 		LocalPoolObjectBase(globalPoolId.objectId, globalPoolId.localPoolId,
 				dataSet, setReadWriteMode){}
 
 
 template<typename T>
-inline ReturnValue_t LocalPoolVar<T>::read(dur_millis_t lockTimeout) {
+inline ReturnValue_t LocalPoolVariable<T>::read(dur_millis_t lockTimeout) {
 	MutexHelper(hkManager->getMutexHandle(), MutexIF::TimeoutType::WAITING,
 			lockTimeout);
 	return readWithoutLock();
 }
 
 template<typename T>
-inline ReturnValue_t LocalPoolVar<T>::readWithoutLock() {
+inline ReturnValue_t LocalPoolVariable<T>::readWithoutLock() {
 	if(readWriteMode == pool_rwm_t::VAR_WRITE) {
 		sif::debug << "LocalPoolVar: Invalid read write "
 				"mode for read() call." << std::endl;
@@ -53,14 +53,14 @@ inline ReturnValue_t LocalPoolVar<T>::readWithoutLock() {
 }
 
 template<typename T>
-inline ReturnValue_t LocalPoolVar<T>::commit(dur_millis_t lockTimeout) {
+inline ReturnValue_t LocalPoolVariable<T>::commit(dur_millis_t lockTimeout) {
 	MutexHelper(hkManager->getMutexHandle(), MutexIF::TimeoutType::WAITING,
 			lockTimeout);
 	return commitWithoutLock();
 }
 
 template<typename T>
-inline ReturnValue_t LocalPoolVar<T>::commitWithoutLock() {
+inline ReturnValue_t LocalPoolVariable<T>::commitWithoutLock() {
 	if(readWriteMode == pool_rwm_t::VAR_READ) {
 		sif::debug << "LocalPoolVar: Invalid read write "
 				 "mode for commit() call." << std::endl;
@@ -81,88 +81,88 @@ inline ReturnValue_t LocalPoolVar<T>::commitWithoutLock() {
 }
 
 template<typename T>
-inline ReturnValue_t LocalPoolVar<T>::serialize(uint8_t** buffer, size_t* size,
+inline ReturnValue_t LocalPoolVariable<T>::serialize(uint8_t** buffer, size_t* size,
 		const size_t max_size, SerializeIF::Endianness streamEndianness) const {
 	return SerializeAdapter::serialize(&value,
 			buffer, size ,max_size, streamEndianness);
 }
 
 template<typename T>
-inline size_t LocalPoolVar<T>::getSerializedSize() const {
+inline size_t LocalPoolVariable<T>::getSerializedSize() const {
 	return SerializeAdapter::getSerializedSize(&value);
 }
 
 template<typename T>
-inline ReturnValue_t LocalPoolVar<T>::deSerialize(const uint8_t** buffer,
+inline ReturnValue_t LocalPoolVariable<T>::deSerialize(const uint8_t** buffer,
 		size_t* size, SerializeIF::Endianness streamEndianness) {
 	return SerializeAdapter::deSerialize(&value, buffer, size, streamEndianness);
 }
 
 template<typename T>
 inline std::ostream& operator<< (std::ostream &out,
-		const LocalPoolVar<T> &var) {
+		const LocalPoolVariable<T> &var) {
     out << var.value;
     return out;
 }
 
 template<typename T>
-inline LocalPoolVar<T>::operator T() const {
+inline LocalPoolVariable<T>::operator T() const {
 	return value;
 }
 
 template<typename T>
-inline LocalPoolVar<T> & LocalPoolVar<T>::operator=(const T& newValue) {
+inline LocalPoolVariable<T> & LocalPoolVariable<T>::operator=(const T& newValue) {
     value = newValue;
     return *this;
 }
 
 template<typename T>
-inline LocalPoolVar<T>& LocalPoolVar<T>::operator =(
-		const LocalPoolVar<T>& newPoolVariable) {
+inline LocalPoolVariable<T>& LocalPoolVariable<T>::operator =(
+		const LocalPoolVariable<T>& newPoolVariable) {
 	value = newPoolVariable.value;
 	return *this;
 }
 
 template<typename T>
-inline bool LocalPoolVar<T>::operator ==(const LocalPoolVar<T> &other) const {
+inline bool LocalPoolVariable<T>::operator ==(const LocalPoolVariable<T> &other) const {
 	return this->value == other.value;
 }
 
 template<typename T>
-inline bool LocalPoolVar<T>::operator ==(const T &other) const {
+inline bool LocalPoolVariable<T>::operator ==(const T &other) const {
 	return this->value == other;
 }
 
 
 template<typename T>
-inline bool LocalPoolVar<T>::operator !=(const LocalPoolVar<T> &other) const {
+inline bool LocalPoolVariable<T>::operator !=(const LocalPoolVariable<T> &other) const {
 	return not (*this == other);
 }
 
 template<typename T>
-inline bool LocalPoolVar<T>::operator !=(const T &other) const {
+inline bool LocalPoolVariable<T>::operator !=(const T &other) const {
 	return not (*this == other);
 }
 
 
 template<typename T>
-inline bool LocalPoolVar<T>::operator <(const LocalPoolVar<T> &other) const {
+inline bool LocalPoolVariable<T>::operator <(const LocalPoolVariable<T> &other) const {
 	return this->value < other.value;
 }
 
 template<typename T>
-inline bool LocalPoolVar<T>::operator <(const T &other) const {
+inline bool LocalPoolVariable<T>::operator <(const T &other) const {
 	return this->value < other;
 }
 
 
 template<typename T>
-inline bool LocalPoolVar<T>::operator >(const LocalPoolVar<T> &other) const {
+inline bool LocalPoolVariable<T>::operator >(const LocalPoolVariable<T> &other) const {
 	return not (*this < other);
 }
 
 template<typename T>
-inline bool LocalPoolVar<T>::operator >(const T &other) const {
+inline bool LocalPoolVariable<T>::operator >(const T &other) const {
 	return not (*this < other);
 }
 
diff --git a/defaultcfg/fsfwconfig/FSFWConfig.h b/defaultcfg/fsfwconfig/FSFWConfig.h
index 7e19235c..e1815d05 100644
--- a/defaultcfg/fsfwconfig/FSFWConfig.h
+++ b/defaultcfg/fsfwconfig/FSFWConfig.h
@@ -15,12 +15,6 @@
 //! Can be used to enable additional debugging printouts for developing the FSFW
 #define FSFW_PRINT_VERBOSITY_LEVEL   0
 
-//! Defines the FIFO depth of each commanding service base which
-//! also determines how many commands a CSB service can handle in one cycle
-//! simulataneously. This will increase the required RAM for
-//! each CSB service !
-#define FSFW_CSB_FIFO_DEPTH			6
-
 //! If FSFW_OBJ_EVENT_TRANSLATION is set to one,
 //! additional output which requires the translation files translateObjects
 //! and translateEvents (and their compiled source files)
@@ -29,8 +23,8 @@
 #if FSFW_OBJ_EVENT_TRANSLATION == 1
 //! Specify whether info events are printed too.
 #define FSFW_DEBUG_INFO				1
-#include <translateObjects.h>
-#include <translateEvents.h>
+#include "objects/translateObjects.h"
+#include "events/translateEvents.h"
 #else
 #endif
 
@@ -50,6 +44,12 @@ static constexpr uint8_t FSFW_MISSION_TIMESTAMP_SIZE = 8;
 static constexpr size_t FSFW_EVENTMGMR_MATCHTREE_NODES = 240;
 static constexpr size_t FSFW_EVENTMGMT_EVENTIDMATCHERS = 120;
 static constexpr size_t FSFW_EVENTMGMR_RANGEMATCHERS   = 120;
+
+//! Defines the FIFO depth of each commanding service base which
+//! also determines how many commands a CSB service can handle in one cycle
+//! simulataneously. This will increase the required RAM for
+//! each CSB service !
+static constexpr uint8_t FSFW_CSB_FIFO_DEPTH = 6;
 }
 
 #endif /* CONFIG_FSFWCONFIG_H_ */
diff --git a/defaultcfg/fsfwconfig/OBSWConfig.h b/defaultcfg/fsfwconfig/OBSWConfig.h
index a9f57638..8ad2cb67 100644
--- a/defaultcfg/fsfwconfig/OBSWConfig.h
+++ b/defaultcfg/fsfwconfig/OBSWConfig.h
@@ -3,6 +3,10 @@
 
 #include "OBSWVersion.h"
 
+#include "objects/systemObjectList.h"
+#include "events/subsystemIdRanges.h"
+#include "returnvalues/classIds.h"
+
 #ifdef __cplusplus
 namespace config {
 #endif
diff --git a/ipc/MutexFactory.h b/ipc/MutexFactory.h
index f8133d81..db505ff9 100644
--- a/ipc/MutexFactory.h
+++ b/ipc/MutexFactory.h
@@ -1,5 +1,5 @@
-#ifndef FRAMEWORK_IPC_MUTEXFACTORY_H_
-#define FRAMEWORK_IPC_MUTEXFACTORY_H_
+#ifndef FSFW_IPC_MUTEXFACTORY_H_
+#define FSFW_IPC_MUTEXFACTORY_H_
 
 #include "MutexIF.h"
 /**
@@ -31,4 +31,4 @@ private:
 
 
 
-#endif /* FRAMEWORK_IPC_MUTEXFACTORY_H_ */
+#endif /* FSFW_IPC_MUTEXFACTORY_H_ */
diff --git a/monitoring/MonitorBase.h b/monitoring/MonitorBase.h
index 530a3840..967f0f62 100644
--- a/monitoring/MonitorBase.h
+++ b/monitoring/MonitorBase.h
@@ -72,7 +72,7 @@ protected:
 	    return HasReturnvaluesIF::RETURN_OK;
 	}
 
-	LocalPoolVar<T> poolVariable;
+	LocalPoolVariable<T> poolVariable;
 };
 
 #endif /* FSFW_MONITORING_MONITORBASE_H_ */
diff --git a/osal/linux/BinarySemaphore.cpp b/osal/linux/BinarySemaphore.cpp
index 8c0eeae7..b81fa109 100644
--- a/osal/linux/BinarySemaphore.cpp
+++ b/osal/linux/BinarySemaphore.cpp
@@ -1,4 +1,4 @@
-#include "../../osal/linux/BinarySemaphore.h"
+#include "BinarySemaphore.h"
 #include "../../serviceinterface/ServiceInterfaceStream.h"
 
 extern "C" {
diff --git a/osal/linux/Clock.cpp b/osal/linux/Clock.cpp
index 4de18f83..6cddda35 100644
--- a/osal/linux/Clock.cpp
+++ b/osal/linux/Clock.cpp
@@ -76,25 +76,25 @@ timeval Clock::getUptime() {
 
 ReturnValue_t Clock::getUptime(timeval* uptime) {
     //TODO This is not posix compatible and delivers only seconds precision
-    // is the OS not called Linux?
-    //Linux specific file read but more precise
+    // Linux specific file read but more precise.
     double uptimeSeconds;
     if(std::ifstream("/proc/uptime",std::ios::in) >> uptimeSeconds){
         uptime->tv_sec = uptimeSeconds;
         uptime->tv_usec = uptimeSeconds *(double) 1e6 - (uptime->tv_sec *1e6);
     }
+    return HasReturnvaluesIF::RETURN_OK;
+}
 
-	//TODO This is not posix compatible and delivers only seconds precision
-    // I suggest this is moved into another clock function which will
-    // deliver second precision later.
+// Wait for new FSFW Clock function delivering seconds uptime.
+//uint32_t Clock::getUptimeSeconds() {
+//	//TODO This is not posix compatible and delivers only seconds precision
 //	struct sysinfo sysInfo;
 //	int result = sysinfo(&sysInfo);
 //	if(result != 0){
 //		return HasReturnvaluesIF::RETURN_FAILED;
 //	}
 //	return sysInfo.uptime;
-    return HasReturnvaluesIF::RETURN_OK;
-}
+//}
 
 ReturnValue_t Clock::getUptime(uint32_t* uptimeMs) {
 	timeval uptime;
diff --git a/osal/linux/MessageQueue.cpp b/osal/linux/MessageQueue.cpp
index bc14374a..cfadb793 100644
--- a/osal/linux/MessageQueue.cpp
+++ b/osal/linux/MessageQueue.cpp
@@ -3,6 +3,7 @@
 #include "../../objectmanager/ObjectManagerIF.h"
 
 #include <fstream>
+
 #include <fcntl.h>           /* For O_* constants */
 #include <sys/stat.h>        /* For mode constants */
 #include <cstring>
diff --git a/osal/linux/Mutex.h b/osal/linux/Mutex.h
index ecb47a33..cfce407f 100644
--- a/osal/linux/Mutex.h
+++ b/osal/linux/Mutex.h
@@ -1,10 +1,9 @@
-#ifndef OS_LINUX_MUTEX_H_
-#define OS_LINUX_MUTEX_H_
+#ifndef FSFW_OSAL_LINUX_MUTEX_H_
+#define FSFW_OSAL_LINUX_MUTEX_H_
 
 #include "../../ipc/MutexIF.h"
 #include <pthread.h>
 
-
 class Mutex : public MutexIF {
 public:
 	Mutex();
diff --git a/osal/linux/MutexFactory.cpp b/osal/linux/MutexFactory.cpp
index 8c2faf88..80211f8b 100644
--- a/osal/linux/MutexFactory.cpp
+++ b/osal/linux/MutexFactory.cpp
@@ -1,6 +1,7 @@
-#include "../../ipc/MutexFactory.h"
 #include "Mutex.h"
 
+#include "../../ipc/MutexFactory.h"
+
 //TODO: Different variant than the lazy loading in QueueFactory. What's better and why?
 MutexFactory* MutexFactory::factoryInstance = new MutexFactory();
 
diff --git a/osal/linux/PosixThread.cpp b/osal/linux/PosixThread.cpp
index ddb1f74f..55d74de3 100644
--- a/osal/linux/PosixThread.cpp
+++ b/osal/linux/PosixThread.cpp
@@ -1,5 +1,7 @@
-#include "../../serviceinterface/ServiceInterfaceStream.h"
 #include "PosixThread.h"
+
+#include "../../serviceinterface/ServiceInterfaceStream.h"
+
 #include <cstring>
 #include <errno.h>
 
@@ -149,8 +151,10 @@ void PosixThread::createTask(void* (*fnc_)(void*), void* arg_) {
 
 	status = pthread_attr_setstack(&attributes, stackPointer, stackSize);
 	if(status != 0){
-		sif::error << "Posix Thread attribute setStack failed with: " <<
-				strerror(status) << std::endl;
+		sif::error << "PosixThread::createTask: pthread_attr_setstack "
+				" failed with: " << strerror(status) <<  std::endl;
+		sif::error << "Make sure the specified stack size is valid and is "
+				"larger than the minimum allowed stack size." << std::endl;
 	}
 
 	status = pthread_attr_setinheritsched(&attributes, PTHREAD_EXPLICIT_SCHED);
diff --git a/osal/linux/SemaphoreFactory.cpp b/osal/linux/SemaphoreFactory.cpp
index e4710933..cfb5f12d 100644
--- a/osal/linux/SemaphoreFactory.cpp
+++ b/osal/linux/SemaphoreFactory.cpp
@@ -1,6 +1,7 @@
-#include "../../tasks/SemaphoreFactory.h"
 #include "BinarySemaphore.h"
 #include "CountingSemaphore.h"
+
+#include "../../tasks/SemaphoreFactory.h"
 #include "../../serviceinterface/ServiceInterfaceStream.h"
 
 SemaphoreFactory* SemaphoreFactory::factoryInstance = nullptr;
diff --git a/osal/linux/TaskFactory.cpp b/osal/linux/TaskFactory.cpp
index f507c767..d18a0316 100644
--- a/osal/linux/TaskFactory.cpp
+++ b/osal/linux/TaskFactory.cpp
@@ -1,5 +1,6 @@
 #include "FixedTimeslotTask.h"
 #include "PeriodicPosixTask.h"
+
 #include "../../tasks/TaskFactory.h"
 #include "../../returnvalues/HasReturnvaluesIF.h"
 
diff --git a/osal/linux/Timer.cpp b/osal/linux/Timer.cpp
index ee964baa..bae631d7 100644
--- a/osal/linux/Timer.cpp
+++ b/osal/linux/Timer.cpp
@@ -1,6 +1,7 @@
+#include "Timer.h"
 #include "../../serviceinterface/ServiceInterfaceStream.h"
 #include <errno.h>
-#include "Timer.h"
+
 
 Timer::Timer() {
 	sigevent sigEvent;
diff --git a/pus/Service3Housekeeping.cpp b/pus/Service3Housekeeping.cpp
new file mode 100644
index 00000000..175af026
--- /dev/null
+++ b/pus/Service3Housekeeping.cpp
@@ -0,0 +1,295 @@
+#include "Service3Housekeeping.h"
+#include "servicepackets/Service3Packets.h"
+#include "../datapoollocal/HasLocalDataPoolIF.h"
+
+
+Service3Housekeeping::Service3Housekeeping(object_id_t objectId, uint16_t apid,
+			uint8_t serviceId):
+		CommandingServiceBase(objectId, apid, serviceId,
+		NUM_OF_PARALLEL_COMMANDS, COMMAND_TIMEOUT_SECONDS) {}
+
+Service3Housekeeping::~Service3Housekeeping() {}
+
+ReturnValue_t Service3Housekeeping::isValidSubservice(uint8_t subservice) {
+	switch(static_cast<Subservice>(subservice)) {
+	case Subservice::ENABLE_PERIODIC_HK_REPORT_GENERATION:
+	case Subservice::DISABLE_PERIODIC_HK_REPORT_GENERATION:
+	case Subservice::ENABLE_PERIODIC_DIAGNOSTICS_REPORT_GENERATION:
+	case Subservice::DISABLE_PERIODIC_DIAGNOSTICS_REPORT_GENERATION:
+	case Subservice::REPORT_HK_REPORT_STRUCTURES:
+	case Subservice::REPORT_DIAGNOSTICS_REPORT_STRUCTURES :
+	case Subservice::GENERATE_ONE_PARAMETER_REPORT:
+	case Subservice::GENERATE_ONE_DIAGNOSTICS_REPORT:
+	case Subservice::MODIFY_PARAMETER_REPORT_COLLECTION_INTERVAL:
+	case Subservice::MODIFY_DIAGNOSTICS_REPORT_COLLECTION_INTERVAL:
+		return HasReturnvaluesIF::RETURN_OK;
+	// Telemetry or invalid subservice.
+	case Subservice::HK_DEFINITIONS_REPORT:
+	case Subservice::DIAGNOSTICS_DEFINITION_REPORT:
+	case Subservice::HK_REPORT:
+	case Subservice::DIAGNOSTICS_REPORT:
+	default:
+		return AcceptsTelecommandsIF::INVALID_SUBSERVICE;
+	}
+}
+
+ReturnValue_t Service3Housekeeping::getMessageQueueAndObject(uint8_t subservice,
+		const uint8_t *tcData, size_t tcDataLen,
+		MessageQueueId_t *id, object_id_t *objectId) {
+	ReturnValue_t result = checkAndAcquireTargetID(objectId,tcData,tcDataLen);
+	if(result != RETURN_OK) {
+		return result;
+	}
+	return checkInterfaceAndAcquireMessageQueue(id,objectId);
+}
+
+ReturnValue_t Service3Housekeeping::checkAndAcquireTargetID(
+		object_id_t* objectIdToSet, const uint8_t* tcData, size_t tcDataLen) {
+	if(SerializeAdapter::deSerialize(objectIdToSet, &tcData, &tcDataLen,
+	        SerializeIF::Endianness::BIG) != HasReturnvaluesIF::RETURN_OK) {
+		return CommandingServiceBase::INVALID_TC;
+	}
+	return HasReturnvaluesIF::RETURN_OK;
+}
+
+ReturnValue_t Service3Housekeeping::checkInterfaceAndAcquireMessageQueue(
+		MessageQueueId_t* messageQueueToSet, object_id_t* objectId) {
+	// check HasLocalDataPoolIF property of target
+	HasLocalDataPoolIF* possibleTarget =
+			objectManager->get<HasLocalDataPoolIF>(*objectId);
+	if(possibleTarget == nullptr){
+		return CommandingServiceBase::INVALID_OBJECT;
+	}
+	*messageQueueToSet = possibleTarget->getCommandQueue();
+	return HasReturnvaluesIF::RETURN_OK;
+}
+
+
+ReturnValue_t Service3Housekeeping::prepareCommand(CommandMessage* message,
+		uint8_t subservice, const uint8_t *tcData, size_t tcDataLen,
+		uint32_t *state, object_id_t objectId) {
+	switch(static_cast<Subservice>(subservice)) {
+	case Subservice::ENABLE_PERIODIC_HK_REPORT_GENERATION:
+		return prepareReportingTogglingCommand(message, objectId, true, false,
+				tcData, tcDataLen);
+	case Subservice::DISABLE_PERIODIC_HK_REPORT_GENERATION:
+		return prepareReportingTogglingCommand(message, objectId, false, false,
+				tcData, tcDataLen);
+	case Subservice::ENABLE_PERIODIC_DIAGNOSTICS_REPORT_GENERATION:
+		return prepareReportingTogglingCommand(message, objectId, true, true,
+				tcData, tcDataLen);
+	case Subservice::DISABLE_PERIODIC_DIAGNOSTICS_REPORT_GENERATION:
+		return prepareReportingTogglingCommand(message, objectId, false, true,
+				tcData, tcDataLen);
+	case Subservice::REPORT_HK_REPORT_STRUCTURES:
+		return prepareStructureReportingCommand(message, objectId, false, tcData,
+				tcDataLen);
+	case Subservice::REPORT_DIAGNOSTICS_REPORT_STRUCTURES:
+		return prepareStructureReportingCommand(message, objectId, true, tcData,
+				tcDataLen);
+	case Subservice::GENERATE_ONE_PARAMETER_REPORT:
+		return prepareOneShotReportCommand(message, objectId, false,
+				tcData, tcDataLen);
+	case Subservice::GENERATE_ONE_DIAGNOSTICS_REPORT:
+		return prepareOneShotReportCommand(message, objectId, true,
+				tcData, tcDataLen);
+	case Subservice::MODIFY_PARAMETER_REPORT_COLLECTION_INTERVAL:
+		return prepareCollectionIntervalModificationCommand(message, objectId,
+				false, tcData, tcDataLen);
+	case Subservice::MODIFY_DIAGNOSTICS_REPORT_COLLECTION_INTERVAL:
+		return prepareCollectionIntervalModificationCommand(message, objectId,
+				true, tcData, tcDataLen);
+	case Subservice::HK_DEFINITIONS_REPORT:
+	case Subservice::DIAGNOSTICS_DEFINITION_REPORT:
+	case Subservice::HK_REPORT:
+	case Subservice::DIAGNOSTICS_REPORT:
+		// Those are telemetry packets.
+		return CommandingServiceBase::INVALID_TC;
+	default:
+		// should never happen, subservice was already checked.
+		return HasReturnvaluesIF::RETURN_FAILED;
+	}
+	return HasReturnvaluesIF::RETURN_OK;
+}
+
+ReturnValue_t Service3Housekeeping::prepareReportingTogglingCommand(
+		CommandMessage *command, object_id_t objectId,
+		bool enableReporting, bool isDiagnostics,
+		const uint8_t* tcData, size_t tcDataLen) {
+	if(tcDataLen < sizeof(sid_t)) {
+		// TC data should consist of object ID and set ID.
+		return CommandingServiceBase::INVALID_TC;
+	}
+
+	sid_t targetSid = buildSid(objectId, &tcData, &tcDataLen);
+	HousekeepingMessage::setToggleReportingCommand(command, targetSid,
+			enableReporting, isDiagnostics);
+	return HasReturnvaluesIF::RETURN_OK;
+}
+
+ReturnValue_t Service3Housekeeping::prepareStructureReportingCommand(
+		CommandMessage *command, object_id_t objectId, bool isDiagnostics,
+		const uint8_t* tcData, size_t tcDataLen) {
+	if(tcDataLen < sizeof(sid_t)) {
+		// TC data should consist of object ID and set ID.
+		return CommandingServiceBase::INVALID_TC;
+	}
+
+	sid_t targetSid = buildSid(objectId, &tcData, &tcDataLen);
+	HousekeepingMessage::setStructureReportingCommand(command, targetSid,
+			isDiagnostics);
+	return HasReturnvaluesIF::RETURN_OK;
+}
+
+ReturnValue_t Service3Housekeeping::prepareOneShotReportCommand(
+		CommandMessage *command, object_id_t objectId, bool isDiagnostics,
+		const uint8_t *tcData, size_t tcDataLen) {
+	if(tcDataLen < sizeof(sid_t)) {
+		// TC data should consist of object ID and set ID.
+		return CommandingServiceBase::INVALID_TC;
+	}
+
+	sid_t targetSid = buildSid(objectId, &tcData, &tcDataLen);
+	HousekeepingMessage::setOneShotReportCommand(command, targetSid,
+			isDiagnostics);
+	return HasReturnvaluesIF::RETURN_OK;
+}
+
+ReturnValue_t Service3Housekeeping::prepareCollectionIntervalModificationCommand(
+		CommandMessage *command, object_id_t objectId, bool isDiagnostics,
+		const uint8_t *tcData, size_t tcDataLen) {
+	if(tcDataLen < sizeof(sid_t) + sizeof(float)) {
+		// SID plus the size of the new collection intervL.
+		return CommandingServiceBase::INVALID_TC;
+	}
+
+	sid_t targetSid = buildSid(objectId, &tcData, &tcDataLen);
+	float newCollectionInterval = 0;
+	SerializeAdapter::deSerialize(&newCollectionInterval, &tcData, &tcDataLen,
+			SerializeIF::Endianness::BIG);
+	HousekeepingMessage::setCollectionIntervalModificationCommand(command,
+			targetSid, newCollectionInterval, isDiagnostics);
+	return HasReturnvaluesIF::RETURN_OK;
+}
+
+
+ReturnValue_t Service3Housekeeping::handleReply(const CommandMessage* reply,
+		Command_t previousCommand, uint32_t *state,
+		CommandMessage* optionalNextCommand, object_id_t objectId,
+		bool *isStep) {
+	Command_t command = reply->getCommand();
+	switch(command) {
+
+	case(HousekeepingMessage::HK_REPORT): {
+		ReturnValue_t result = generateHkReply(reply,
+				static_cast<uint8_t>(Subservice::HK_REPORT));
+		if(result != HasReturnvaluesIF::RETURN_OK) {
+			return result;
+		}
+		return CommandingServiceBase::EXECUTION_COMPLETE;
+	}
+
+	case(HousekeepingMessage::DIAGNOSTICS_REPORT): {
+		ReturnValue_t result = generateHkReply(reply,
+				static_cast<uint8_t>(Subservice::DIAGNOSTICS_REPORT));
+		if(result != HasReturnvaluesIF::RETURN_OK) {
+			return result;
+		}
+		return CommandingServiceBase::EXECUTION_COMPLETE;
+	}
+
+    case(HousekeepingMessage::HK_DEFINITIONS_REPORT): {
+        return generateHkReply(reply, static_cast<uint8_t>(
+                Subservice::HK_DEFINITIONS_REPORT));
+        break;
+    }
+    case(HousekeepingMessage::DIAGNOSTICS_DEFINITION_REPORT): {
+        return generateHkReply(reply, static_cast<uint8_t>(
+                Subservice::DIAGNOSTICS_DEFINITION_REPORT));
+        break;
+    }
+
+	case(HousekeepingMessage::HK_REQUEST_SUCCESS): {
+		return CommandingServiceBase::EXECUTION_COMPLETE;
+	}
+
+	case(HousekeepingMessage::HK_REQUEST_FAILURE): {
+		failureParameter1 = objectId;
+		ReturnValue_t error = HasReturnvaluesIF::RETURN_FAILED;
+		HousekeepingMessage::getHkRequestFailureReply(reply,&error);
+		failureParameter2 = error;
+		return CommandingServiceBase::EXECUTION_COMPLETE;
+	}
+
+	default:
+		sif::error << "Service3Housekeeping::handleReply: Invalid reply with "
+				<< "reply command " << command << "!" << std::endl;
+		return CommandingServiceBase::INVALID_REPLY;
+	}
+	return HasReturnvaluesIF::RETURN_OK;
+}
+
+void Service3Housekeeping::handleUnrequestedReply(
+		CommandMessage* reply) {
+	ReturnValue_t result = HasReturnvaluesIF::RETURN_OK;
+	Command_t command = reply->getCommand();
+
+	switch(command) {
+
+	case(HousekeepingMessage::DIAGNOSTICS_REPORT): {
+		result = generateHkReply(reply,
+				static_cast<uint8_t>(Subservice::DIAGNOSTICS_REPORT));
+		break;
+	}
+
+	case(HousekeepingMessage::HK_REPORT): {
+		result = generateHkReply(reply,
+				static_cast<uint8_t>(Subservice::HK_REPORT));
+		break;
+	}
+
+	default:
+		sif::error << "Service3Housekeeping::handleUnrequestedReply: Invalid "
+		        << "reply with " << "reply command " << command << "!"
+		        << std::endl;
+		return;
+	}
+
+	if(result != HasReturnvaluesIF::RETURN_OK) {
+		// Configuration error
+		sif::debug << "Service3Housekeeping::handleUnrequestedReply:"
+				<< "Could not generate reply!" << std::endl;
+	}
+}
+
+MessageQueueId_t Service3Housekeeping::getHkQueue() const {
+	return commandQueue->getId();
+}
+
+ReturnValue_t Service3Housekeeping::generateHkReply(
+		const CommandMessage* hkMessage, uint8_t subserviceId) {
+	store_address_t storeId;
+
+	sid_t sid = HousekeepingMessage::getHkDataReply(hkMessage, &storeId);
+	auto resultPair = IPCStore->getData(storeId);
+	if(resultPair.first != HasReturnvaluesIF::RETURN_OK) {
+		return resultPair.first;
+	}
+
+	HkPacket hkPacket(sid, resultPair.second.data(), resultPair.second.size());
+	return sendTmPacket(static_cast<uint8_t>(subserviceId),
+			hkPacket.hkData, hkPacket.hkSize, nullptr, 0);
+}
+
+sid_t Service3Housekeeping::buildSid(object_id_t objectId,
+		const uint8_t** tcData, size_t* tcDataLen) {
+	sid_t targetSid;
+	targetSid.objectId = objectId;
+	// skip deserialization of object ID, was already done.
+	*tcData += sizeof(object_id_t);
+	*tcDataLen -= sizeof(object_id_t);
+	// size check is expected to be performed beforehand!
+	SerializeAdapter::deSerialize(&targetSid.ownerSetId, tcData, tcDataLen,
+			SerializeIF::Endianness::BIG);
+	return targetSid;
+}
diff --git a/pus/Service3Housekeeping.h b/pus/Service3Housekeeping.h
new file mode 100644
index 00000000..269710ef
--- /dev/null
+++ b/pus/Service3Housekeeping.h
@@ -0,0 +1,105 @@
+#ifndef FSFW_PUS_SERVICE3HOUSEKEEPINGSERVICE_H_
+#define FSFW_PUS_SERVICE3HOUSEKEEPINGSERVICE_H_
+
+#include "../housekeeping/AcceptsHkPacketsIF.h"
+#include "../housekeeping/HousekeepingMessage.h"
+#include "../tmtcservices/CommandingServiceBase.h"
+
+/**
+ * @brief   Manges spacecraft housekeeping reports and
+ *          sends pool variables (temperature, GPS data ...) to ground.
+ *
+ * @details Full Documentation: ECSS-E70-41A or ECSS-E-ST-70-41C.
+ * Implementation based on PUS-C
+ *
+ * The housekeeping service type provides means to control and adapt the
+ * spacecraft reporting plan according to the mission phases.
+ * The housekeeping service type provides the visibility of any
+ * on-board parameters assembled in housekeeping parameter report structures
+ * or diagnostic parameter report structures as required for the mission.
+ * The parameter report structures used by the housekeeping service can
+ * be predefined on-board or created when needed.
+ *
+ * @author 	R. Mueller
+ * @ingroup pus_services
+ */
+class Service3Housekeeping: public CommandingServiceBase,
+		public AcceptsHkPacketsIF {
+public:
+	static constexpr uint8_t NUM_OF_PARALLEL_COMMANDS = 4;
+	static constexpr uint16_t COMMAND_TIMEOUT_SECONDS = 60;
+
+	Service3Housekeeping(object_id_t objectId, uint16_t apid, uint8_t serviceId);
+	virtual~ Service3Housekeeping();
+protected:
+	/* CSB abstract functions implementation . See CSB documentation. */
+	ReturnValue_t isValidSubservice(uint8_t subservice) override;
+	ReturnValue_t getMessageQueueAndObject(uint8_t subservice,
+			const uint8_t *tcData, size_t tcDataLen, MessageQueueId_t *id,
+			object_id_t *objectId) override;
+	ReturnValue_t prepareCommand(CommandMessage* message,
+			uint8_t subservice, const uint8_t *tcData, size_t tcDataLen,
+			uint32_t *state, object_id_t objectId) override;
+	ReturnValue_t handleReply(const CommandMessage* reply,
+			Command_t previousCommand, uint32_t *state,
+			CommandMessage* optionalNextCommand, object_id_t objectId,
+			bool *isStep) override;
+
+	virtual MessageQueueId_t getHkQueue() const;
+private:
+	enum class Subservice {
+		ENABLE_PERIODIC_HK_REPORT_GENERATION = 5, //!< [EXPORT] : [TC]
+		DISABLE_PERIODIC_HK_REPORT_GENERATION = 6, //!< [EXPORT] : [TC]
+
+		ENABLE_PERIODIC_DIAGNOSTICS_REPORT_GENERATION = 7, //!< [EXPORT] : [TC]
+		DISABLE_PERIODIC_DIAGNOSTICS_REPORT_GENERATION = 8, //!< [EXPORT] : [TC]
+
+		//! [EXPORT] : [TC] Report HK structure by supplying SID
+		REPORT_HK_REPORT_STRUCTURES = 9,
+		//! [EXPORT] : [TC] Report Diagnostics structure by supplying SID
+		REPORT_DIAGNOSTICS_REPORT_STRUCTURES = 11,
+
+		//! [EXPORT] : [TM] Report corresponding to Subservice 9 TC
+		HK_DEFINITIONS_REPORT = 10,
+		//! [EXPORT] : [TM] Report corresponding to Subservice 11 TC
+		DIAGNOSTICS_DEFINITION_REPORT = 12,
+
+		//! [EXPORT] : [TM] Core packet. Contains Housekeeping data
+		HK_REPORT = 25,
+		//! [EXPORT] : [TM] Core packet. Contains diagnostics data
+		DIAGNOSTICS_REPORT = 26,
+
+		/* PUS-C */
+		GENERATE_ONE_PARAMETER_REPORT = 27, //!< [EXPORT] : [TC]
+		GENERATE_ONE_DIAGNOSTICS_REPORT = 28, //!< [EXPORT] : [TC]
+
+		MODIFY_PARAMETER_REPORT_COLLECTION_INTERVAL = 31, //!< [EXPORT] : [TC]
+		MODIFY_DIAGNOSTICS_REPORT_COLLECTION_INTERVAL = 32, //!< [EXPORT] : [TC]
+	};
+
+	ReturnValue_t checkAndAcquireTargetID(object_id_t* objectIdToSet,
+			const uint8_t* tcData, size_t tcDataLen);
+	ReturnValue_t checkInterfaceAndAcquireMessageQueue(
+			MessageQueueId_t* messageQueueToSet, object_id_t* objectId);
+
+	ReturnValue_t generateHkReply(const CommandMessage* hkMessage,
+			uint8_t subserviceId);
+	ReturnValue_t prepareReportingTogglingCommand(CommandMessage* command,
+			object_id_t objectId, bool enableReporting, bool isDiagnostics,
+			const uint8_t* tcData, size_t tcDataLen);
+	ReturnValue_t prepareStructureReportingCommand(CommandMessage* command,
+			object_id_t objectId, bool isDiagnostics, const uint8_t* tcData,
+			size_t tcDataLen);
+	ReturnValue_t prepareOneShotReportCommand(CommandMessage* command,
+			object_id_t objectId, bool isDiagnostics, const uint8_t* tcData,
+			size_t tcDataLen);
+	ReturnValue_t prepareCollectionIntervalModificationCommand(
+			CommandMessage* command, object_id_t objectId, bool isDiagnostics,
+			const uint8_t* tcData, size_t tcDataLen);
+
+	void handleUnrequestedReply(CommandMessage* reply) override;
+	sid_t buildSid(object_id_t objectId, const uint8_t** tcData,
+			size_t* tcDataLen);
+};
+
+#endif /* FSFW_PUS_SERVICE3HOUSEKEEPINGSERVICE_H_ */
diff --git a/pus/servicepackets/Service3Packets.h b/pus/servicepackets/Service3Packets.h
new file mode 100644
index 00000000..05732e11
--- /dev/null
+++ b/pus/servicepackets/Service3Packets.h
@@ -0,0 +1,21 @@
+#ifndef FSFW_PUS_SERVICEPACKETS_SERVICE3PACKETS_H_
+#define FSFW_PUS_SERVICEPACKETS_SERVICE3PACKETS_H_
+
+#include <fsfw/housekeeping/HousekeepingMessage.h>
+#include <cstdint>
+
+/**
+ * @brief Subservices 25 and 26: TM packets
+ * @ingroup spacepackets
+ */
+class HkPacket { //!< [EXPORT] : [SUBSERVICE] 25, 26
+public:
+    sid_t sid; //!< [EXPORT] : [COMMENT] Structure ID (SID) of housekeeping data.
+    const uint8_t* hkData; //!< [EXPORT] : [MAXSIZE] Deduced size
+    size_t hkSize; //!< [EXPORT] : [IGNORE]
+
+    HkPacket(sid_t sid, const uint8_t* data, size_t size):
+            sid(sid), hkData(data), hkSize(size) {}
+};
+
+#endif /* FSFW_PUS_SERVICEPACKETS_SERVICE3PACKETS_H_ */
diff --git a/storagemanager/LocalPool.tpp b/storagemanager/LocalPool.tpp
deleted file mode 100644
index 5e61efe4..00000000
--- a/storagemanager/LocalPool.tpp
+++ /dev/null
@@ -1,305 +0,0 @@
-#ifndef FSFW_STORAGEMANAGER_LOCALPOOL_TPP_
-#define FSFW_STORAGEMANAGER_LOCALPOOL_TPP_
-
-#ifndef FSFW_STORAGEMANAGER_LOCALPOOL_H_
-#error Include LocalPool.h before LocalPool.tpp!
-#endif
-
-template<uint8_t NUMBER_OF_POOLS>
-inline LocalPool<NUMBER_OF_POOLS>::LocalPool(object_id_t setObjectId,
-		const uint16_t element_sizes[NUMBER_OF_POOLS],
-		const uint16_t n_elements[NUMBER_OF_POOLS], bool registered,
-		bool spillsToHigherPools) :
-		SystemObject(setObjectId, registered),  internalErrorReporter(nullptr),
-		spillsToHigherPools(spillsToHigherPools)
-{
-	for (uint16_t n = 0; n < NUMBER_OF_POOLS; n++) {
-		this->element_sizes[n] = element_sizes[n];
-		this->n_elements[n] = n_elements[n];
-		store[n] = new uint8_t[n_elements[n] * element_sizes[n]];
-		size_list[n] = new uint32_t[n_elements[n]];
-		memset(store[n], 0x00, (n_elements[n] * element_sizes[n]));
-		//TODO checkme
-		memset(size_list[n], STORAGE_FREE, (n_elements[n] * sizeof(**size_list)));
-	}
-}
-
-
-template<uint8_t NUMBER_OF_POOLS>
-inline ReturnValue_t LocalPool<NUMBER_OF_POOLS>::findEmpty(uint16_t pool_index,
-		uint16_t* element) {
-	ReturnValue_t status = DATA_STORAGE_FULL;
-	for (uint16_t foundElement = 0; foundElement < n_elements[pool_index];
-			foundElement++) {
-		if (size_list[pool_index][foundElement] == STORAGE_FREE) {
-			*element = foundElement;
-			status = RETURN_OK;
-			break;
-		}
-	}
-	return status;
-}
-
-template<uint8_t NUMBER_OF_POOLS>
-inline void LocalPool<NUMBER_OF_POOLS>::write(store_address_t packet_id,
-		const uint8_t* data, size_t size) {
-	uint8_t* ptr;
-	uint32_t packet_position = getRawPosition(packet_id);
-
-	//check size? -> Not necessary, because size is checked before calling this function.
-	ptr = &store[packet_id.pool_index][packet_position];
-	memcpy(ptr, data, size);
-	size_list[packet_id.pool_index][packet_id.packet_index] = size;
-}
-
-//Returns page size of 0 in case store_index is illegal
-template<uint8_t NUMBER_OF_POOLS>
-inline uint32_t LocalPool<NUMBER_OF_POOLS>::getPageSize(uint16_t pool_index) {
-	if (pool_index < NUMBER_OF_POOLS) {
-		return element_sizes[pool_index];
-	} else {
-		return 0;
-	}
-}
-
-template<uint8_t NUMBER_OF_POOLS>
-inline ReturnValue_t LocalPool<NUMBER_OF_POOLS>::getPoolIndex(
-		size_t packet_size, uint16_t* poolIndex, uint16_t startAtIndex) {
-	for (uint16_t n = startAtIndex; n < NUMBER_OF_POOLS; n++) {
-		//debug << "LocalPool " << getObjectId() << "::getPoolIndex: Pool: " <<
-		//		n << ", Element Size: " << element_sizes[n] << std::endl;
-		if (element_sizes[n] >= packet_size) {
-			*poolIndex = n;
-			return RETURN_OK;
-		}
-	}
-	return DATA_TOO_LARGE;
-}
-
-template<uint8_t NUMBER_OF_POOLS>
-inline uint32_t LocalPool<NUMBER_OF_POOLS>::getRawPosition(
-		store_address_t packet_id) {
-	return packet_id.packet_index * element_sizes[packet_id.pool_index];
-}
-
-template<uint8_t NUMBER_OF_POOLS>
-inline ReturnValue_t LocalPool<NUMBER_OF_POOLS>::reserveSpace(
-		const uint32_t size, store_address_t* address, bool ignoreFault) {
-	ReturnValue_t status = getPoolIndex(size, &address->pool_index);
-	if (status != RETURN_OK) {
-		sif::error << "LocalPool( " << std::hex << getObjectId() << std::dec
-				<< " )::reserveSpace: Packet too large." << std::endl;
-		return status;
-	}
-	status = findEmpty(address->pool_index, &address->packet_index);
-	while (status != RETURN_OK && spillsToHigherPools) {
-		status = getPoolIndex(size, &address->pool_index, address->pool_index + 1);
-		if (status != RETURN_OK) {
-			//We don't find any fitting pool anymore.
-			break;
-		}
-		status = findEmpty(address->pool_index, &address->packet_index);
-	}
-	if (status == RETURN_OK) {
-		// if (getObjectId() == objects::IPC_STORE && address->pool_index >= 3) {
-		//	   debug << "Reserve: Pool: " << std::dec << address->pool_index <<
-		//				" Index: " << address->packet_index << std::endl;
-		// }
-
-		size_list[address->pool_index][address->packet_index] = size;
-	} else {
-		if (!ignoreFault and internalErrorReporter != nullptr) {
-			internalErrorReporter->storeFull();
-		}
-		// error << "LocalPool( " << std::hex << getObjectId() << std::dec
-		// 			<< " )::reserveSpace: Packet store is full." << std::endl;
-	}
-	return status;
-}
-
-template<uint8_t NUMBER_OF_POOLS>
-inline LocalPool<NUMBER_OF_POOLS>::~LocalPool(void) {
-	for (uint16_t n = 0; n < NUMBER_OF_POOLS; n++) {
-		delete[] store[n];
-		delete[] size_list[n];
-	}
-}
-
-template<uint8_t NUMBER_OF_POOLS>
-inline ReturnValue_t LocalPool<NUMBER_OF_POOLS>::addData(
-		store_address_t* storageId, const uint8_t* data, size_t size,
-		bool ignoreFault) {
-	ReturnValue_t status = reserveSpace(size, storageId, ignoreFault);
-	if (status == RETURN_OK) {
-		write(*storageId, data, size);
-	}
-	return status;
-}
-
-template<uint8_t NUMBER_OF_POOLS>
-inline ReturnValue_t LocalPool<NUMBER_OF_POOLS>::getFreeElement(
-		store_address_t* storageId, const size_t size,
-		uint8_t** p_data, bool ignoreFault) {
-	ReturnValue_t status = reserveSpace(size, storageId, ignoreFault);
-	if (status == RETURN_OK) {
-		*p_data = &store[storageId->pool_index][getRawPosition(*storageId)];
-	} else {
-		*p_data = NULL;
-	}
-	return status;
-}
-
-template<uint8_t NUMBER_OF_POOLS>
-inline ConstAccessorPair LocalPool<NUMBER_OF_POOLS>::getData(
-		store_address_t storeId) {
-	uint8_t* tempData = nullptr;
-	ConstStorageAccessor constAccessor(storeId, this);
-	ReturnValue_t status = modifyData(storeId, &tempData, &constAccessor.size_);
-	constAccessor.constDataPointer = tempData;
-	return ConstAccessorPair(status, std::move(constAccessor));
-}
-
-template<uint8_t NUMBER_OF_POOLS>
-inline ReturnValue_t LocalPool<NUMBER_OF_POOLS>::getData(store_address_t storeId,
-		ConstStorageAccessor& storeAccessor) {
-	uint8_t* tempData = nullptr;
-	ReturnValue_t status = modifyData(storeId, &tempData, &storeAccessor.size_);
-	storeAccessor.assignStore(this);
-	storeAccessor.constDataPointer = tempData;
-	return status;
-}
-
-template<uint8_t NUMBER_OF_POOLS>
-inline ReturnValue_t LocalPool<NUMBER_OF_POOLS>::getData(
-		store_address_t packet_id, const uint8_t** packet_ptr, size_t* size) {
-	uint8_t* tempData = nullptr;
-	ReturnValue_t status = modifyData(packet_id, &tempData, size);
-	*packet_ptr = tempData;
-	return status;
-}
-
-template<uint8_t NUMBER_OF_POOLS>
-inline AccessorPair LocalPool<NUMBER_OF_POOLS>::modifyData(
-		store_address_t storeId) {
-	StorageAccessor accessor(storeId, this);
-	ReturnValue_t status = modifyData(storeId, &accessor.dataPointer,
-			&accessor.size_);
-	accessor.assignConstPointer();
-	return AccessorPair(status, std::move(accessor));
-}
-
-template<uint8_t NUMBER_OF_POOLS>
-inline ReturnValue_t LocalPool<NUMBER_OF_POOLS>::modifyData(
-		store_address_t storeId, StorageAccessor& storeAccessor) {
-	storeAccessor.assignStore(this);
-	ReturnValue_t status = modifyData(storeId, &storeAccessor.dataPointer,
-			&storeAccessor.size_);
-	storeAccessor.assignConstPointer();
-	return status;
-}
-
-template<uint8_t NUMBER_OF_POOLS>
-inline ReturnValue_t LocalPool<NUMBER_OF_POOLS>::modifyData(
-		store_address_t packet_id, uint8_t** packet_ptr, size_t* size) {
-	ReturnValue_t status = RETURN_FAILED;
-	if (packet_id.pool_index >= NUMBER_OF_POOLS) {
-		return ILLEGAL_STORAGE_ID;
-	}
-	if ((packet_id.packet_index >= n_elements[packet_id.pool_index])) {
-		return ILLEGAL_STORAGE_ID;
-	}
-	if (size_list[packet_id.pool_index][packet_id.packet_index]
-			!= STORAGE_FREE) {
-		uint32_t packet_position = getRawPosition(packet_id);
-		*packet_ptr = &store[packet_id.pool_index][packet_position];
-		*size = size_list[packet_id.pool_index][packet_id.packet_index];
-		status = RETURN_OK;
-	} else {
-		status = DATA_DOES_NOT_EXIST;
-	}
-	return status;
-}
-
-template<uint8_t NUMBER_OF_POOLS>
-inline ReturnValue_t LocalPool<NUMBER_OF_POOLS>::deleteData(
-		store_address_t packet_id) {
-	//if (getObjectId() == objects::IPC_STORE && packet_id.pool_index >= 3) {
-	//	debug << "Delete: Pool: " << std::dec << packet_id.pool_index << " Index: "
-	//	         << packet_id.packet_index << std::endl;
-	//}
-	ReturnValue_t status = RETURN_OK;
-	uint32_t page_size = getPageSize(packet_id.pool_index);
-	if ((page_size != 0)
-			&& (packet_id.packet_index < n_elements[packet_id.pool_index])) {
-		uint16_t packet_position = getRawPosition(packet_id);
-		uint8_t* ptr = &store[packet_id.pool_index][packet_position];
-		memset(ptr, 0, page_size);
-		//Set free list
-		size_list[packet_id.pool_index][packet_id.packet_index] = STORAGE_FREE;
-	} else {
-		//pool_index or packet_index is too large
-		sif::error << "LocalPool:deleteData failed." << std::endl;
-		status = ILLEGAL_STORAGE_ID;
-	}
-	return status;
-}
-
-template<uint8_t NUMBER_OF_POOLS>
-inline void LocalPool<NUMBER_OF_POOLS>::clearStore() {
-	for (uint16_t n = 0; n < NUMBER_OF_POOLS; n++) {
-		//TODO checkme
-		memset(size_list[n], STORAGE_FREE, (n_elements[n] * sizeof(**size_list)));
-	}
-}
-
-template<uint8_t NUMBER_OF_POOLS>
-inline ReturnValue_t LocalPool<NUMBER_OF_POOLS>::deleteData(uint8_t* ptr,
-		size_t size, store_address_t* storeId) {
-	store_address_t localId;
-	ReturnValue_t result = ILLEGAL_ADDRESS;
-	for (uint16_t n = 0; n < NUMBER_OF_POOLS; n++) {
-		//Not sure if new allocates all stores in order. so better be careful.
-		if ((store[n] <= ptr) && (&store[n][n_elements[n]*element_sizes[n]]) > ptr) {
-			localId.pool_index = n;
-			uint32_t deltaAddress = ptr - store[n];
-			// Getting any data from the right "block" is ok.
-			// This is necessary, as IF's sometimes don't point to the first
-			// element of an object.
-			localId.packet_index = deltaAddress / element_sizes[n];
-			result = deleteData(localId);
-			//if (deltaAddress % element_sizes[n] != 0) {
-			//	error << "Pool::deleteData: address not aligned!" << std::endl;
-			//}
-			break;
-		}
-	}
-	if (storeId != NULL) {
-		*storeId = localId;
-	}
-	return result;
-}
-
-template<uint8_t NUMBER_OF_POOLS>
-inline ReturnValue_t LocalPool<NUMBER_OF_POOLS>::initialize() {
-	ReturnValue_t result = SystemObject::initialize();
-	if (result != RETURN_OK) {
-		return result;
-	}
-	internalErrorReporter = objectManager->get<InternalErrorReporterIF>(
-			objects::INTERNAL_ERROR_REPORTER);
-	if (internalErrorReporter == nullptr){
-		return ObjectManagerIF::INTERNAL_ERR_REPORTER_UNINIT;
-	}
-
-	//Check if any pool size is large than the maximum allowed.
-	for (uint8_t count = 0; count < NUMBER_OF_POOLS; count++) {
-		if (element_sizes[count] >= STORAGE_FREE) {
-			sif::error << "LocalPool::initialize: Pool is too large! "
-					"Max. allowed size is: " << (STORAGE_FREE - 1) << std::endl;
-			return StorageManagerIF::POOL_TOO_LARGE;
-		}
-	}
-	return RETURN_OK;
-}
-
-#endif /* FSFW_STORAGEMANAGER_LOCALPOOL_TPP_ */
diff --git a/storagemanager/PoolManager.tpp b/storagemanager/PoolManager.tpp
deleted file mode 100644
index 2be44ece..00000000
--- a/storagemanager/PoolManager.tpp
+++ /dev/null
@@ -1,56 +0,0 @@
-#ifndef FRAMEWORK_STORAGEMANAGER_POOLMANAGER_TPP_
-#define FRAMEWORK_STORAGEMANAGER_POOLMANAGER_TPP_
-
-#ifndef FSFW_STORAGEMANAGER_POOLMANAGER_H_
-#error Include PoolManager.h before PoolManager.tpp!
-#endif
-
-template<uint8_t NUMBER_OF_POOLS>
-inline PoolManager<NUMBER_OF_POOLS>::PoolManager(object_id_t setObjectId,
-		const uint16_t element_sizes[NUMBER_OF_POOLS],
-		const uint16_t n_elements[NUMBER_OF_POOLS]) :
-		LocalPool<NUMBER_OF_POOLS>(setObjectId, element_sizes, n_elements, true) {
-	mutex = MutexFactory::instance()->createMutex();
-}
-
-template<uint8_t NUMBER_OF_POOLS>
-inline PoolManager<NUMBER_OF_POOLS>::~PoolManager(void) {
-	MutexFactory::instance()->deleteMutex(mutex);
-}
-
-template<uint8_t NUMBER_OF_POOLS>
-inline ReturnValue_t PoolManager<NUMBER_OF_POOLS>::reserveSpace(
-		const uint32_t size, store_address_t* address, bool ignoreFault) {
-	MutexHelper mutexHelper(mutex,MutexIF::WAITING, mutexTimeoutMs);
-	ReturnValue_t status = LocalPool<NUMBER_OF_POOLS>::reserveSpace(size,
-			address,ignoreFault);
-	return status;
-}
-
-template<uint8_t NUMBER_OF_POOLS>
-inline ReturnValue_t PoolManager<NUMBER_OF_POOLS>::deleteData(
-		store_address_t packet_id) {
-	// debug << "PoolManager( " << translateObject(getObjectId()) <<
-	//       " )::deleteData from store " << packet_id.pool_index <<
-	//       ". id is "<< packet_id.packet_index << std::endl;
-	MutexHelper mutexHelper(mutex,MutexIF::WAITING, mutexTimeoutMs);
-	ReturnValue_t status = LocalPool<NUMBER_OF_POOLS>::deleteData(packet_id);
-	return status;
-}
-
-template<uint8_t NUMBER_OF_POOLS>
-inline ReturnValue_t PoolManager<NUMBER_OF_POOLS>::deleteData(uint8_t* buffer,
-		size_t size, store_address_t* storeId) {
-	MutexHelper mutexHelper(mutex,MutexIF::WAITING, mutexTimeoutMs);
-	ReturnValue_t status = LocalPool<NUMBER_OF_POOLS>::deleteData(buffer,
-			size, storeId);
-	return status;
-}
-
-template<uint8_t NUMBER_OF_POOLS>
-inline void PoolManager<NUMBER_OF_POOLS>::setMutexTimeout(
-		uint32_t mutexTimeoutMs) {
-	this->mutexTimeout = mutexTimeoutMs;
-}
-
-#endif /* FRAMEWORK_STORAGEMANAGER_POOLMANAGER_TPP_ */