Merge pull request 'hotfixees' (#428) from hotfixees into develop
All checks were successful
EIVE/eive-obsw/pipeline/head This commit looks good
All checks were successful
EIVE/eive-obsw/pipeline/head This commit looks good
Reviewed-on: #428
This commit is contained in:
commit
41f1eaec44
@ -22,6 +22,7 @@ will consitute of a breaking change warranting a new major release:
|
|||||||
- Allow quicker transition for the EIVE system component by allowing consecutive TCS and ACS
|
- Allow quicker transition for the EIVE system component by allowing consecutive TCS and ACS
|
||||||
component commanding again.
|
component commanding again.
|
||||||
- Changed a lot of lock guards to use timeouts
|
- Changed a lot of lock guards to use timeouts
|
||||||
|
- Queue sizes of TCP/UDP servers increased from 20 to 50
|
||||||
|
|
||||||
## Fixed
|
## Fixed
|
||||||
|
|
||||||
|
2
fsfw
2
fsfw
@ -1 +1 @@
|
|||||||
Subproject commit 6e17e45506b0d9834d3ae9ded6f044e13e3c4abd
|
Subproject commit 6006c97e48b7e6dc3b45a832bdd027a510b67f16
|
@ -28,17 +28,10 @@ ReturnValue_t Max31865RtdPolling::performOperation(uint8_t operationCode) {
|
|||||||
static_cast<void>(result);
|
static_cast<void>(result);
|
||||||
// Measured to take 0-1 ms in debug build
|
// Measured to take 0-1 ms in debug build
|
||||||
// Stopwatch watch;
|
// Stopwatch watch;
|
||||||
if (periodicInitHandling()) {
|
periodicInitHandling();
|
||||||
#if OBSW_RTD_AUTO_MODE == 0
|
|
||||||
// 10 ms delay for VBIAS startup
|
|
||||||
TaskFactory::delayTask(10);
|
|
||||||
#endif
|
|
||||||
} else {
|
|
||||||
// No devices usable (e.g. TCS board off)
|
|
||||||
return returnvalue::OK;
|
|
||||||
}
|
|
||||||
|
|
||||||
#if OBSW_RTD_AUTO_MODE == 0
|
#if OBSW_RTD_AUTO_MODE == 0
|
||||||
|
// 10 ms delay for VBIAS startup
|
||||||
|
TaskFactory::delayTask(10);
|
||||||
result = periodicReadReqHandling();
|
result = periodicReadReqHandling();
|
||||||
if (result != returnvalue::OK) {
|
if (result != returnvalue::OK) {
|
||||||
return result;
|
return result;
|
||||||
@ -113,12 +106,12 @@ ReturnValue_t Max31865RtdPolling::periodicInitHandling() {
|
|||||||
|
|
||||||
ReturnValue_t Max31865RtdPolling::periodicReadReqHandling() {
|
ReturnValue_t Max31865RtdPolling::periodicReadReqHandling() {
|
||||||
using namespace MAX31865;
|
using namespace MAX31865;
|
||||||
|
updateActiveRtdsArray();
|
||||||
// Now request one shot config for all active RTDs
|
// Now request one shot config for all active RTDs
|
||||||
for (auto& rtd : rtds) {
|
for (auto& rtd : rtds) {
|
||||||
if (rtd == nullptr) {
|
if (rtd == nullptr) {
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
updateActiveRtdsArray();
|
|
||||||
if (activeRtdsArray[rtd->idx]) {
|
if (activeRtdsArray[rtd->idx]) {
|
||||||
ReturnValue_t result = writeCfgReg(rtd->spiCookie, BASE_CFG | (1 << CfgBitPos::ONE_SHOT));
|
ReturnValue_t result = writeCfgReg(rtd->spiCookie, BASE_CFG | (1 << CfgBitPos::ONE_SHOT));
|
||||||
if (result != returnvalue::OK) {
|
if (result != returnvalue::OK) {
|
||||||
@ -134,12 +127,12 @@ ReturnValue_t Max31865RtdPolling::periodicReadReqHandling() {
|
|||||||
ReturnValue_t Max31865RtdPolling::periodicReadHandling() {
|
ReturnValue_t Max31865RtdPolling::periodicReadHandling() {
|
||||||
using namespace MAX31865;
|
using namespace MAX31865;
|
||||||
auto result = returnvalue::OK;
|
auto result = returnvalue::OK;
|
||||||
|
updateActiveRtdsArray();
|
||||||
// Now read the RTD values
|
// Now read the RTD values
|
||||||
for (auto& rtd : rtds) {
|
for (auto& rtd : rtds) {
|
||||||
if (rtd == nullptr) {
|
if (rtd == nullptr) {
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
updateActiveRtdsArray();
|
|
||||||
if (activeRtdsArray[rtd->idx]) {
|
if (activeRtdsArray[rtd->idx]) {
|
||||||
// Please note that using the manual CS lock wrapper here is problematic. Might be a SPI
|
// Please note that using the manual CS lock wrapper here is problematic. Might be a SPI
|
||||||
// or hardware specific issue where the CS needs to be pulled high and then low again
|
// or hardware specific issue where the CS needs to be pulled high and then low again
|
||||||
|
@ -95,7 +95,7 @@ ReturnValue_t SafeCtrl::safeNoMekf(timeval now, double *susDirB, bool susDirBVal
|
|||||||
}
|
}
|
||||||
|
|
||||||
// change unit from uT to T
|
// change unit from uT to T
|
||||||
double magFieldBT[3] = {0,0,0};
|
double magFieldBT[3] = {0, 0, 0};
|
||||||
VectorOperations<double>::mulScalar(magFieldB, 1e-6, magFieldBT, 3);
|
VectorOperations<double>::mulScalar(magFieldB, 1e-6, magFieldBT, 3);
|
||||||
|
|
||||||
// normalize sunDir and magDir
|
// normalize sunDir and magDir
|
||||||
|
@ -117,14 +117,16 @@ void ObjectFactory::produceGenericObjects(HealthTableIF** healthTable_, PusTmFun
|
|||||||
|
|
||||||
#if OBSW_ADD_TCPIP_SERVERS == 1
|
#if OBSW_ADD_TCPIP_SERVERS == 1
|
||||||
#if OBSW_ADD_TMTC_UDP_SERVER == 1
|
#if OBSW_ADD_TMTC_UDP_SERVER == 1
|
||||||
auto udpBridge = new UdpTmTcBridge(objects::UDP_TMTC_SERVER, objects::CCSDS_PACKET_DISTRIBUTOR);
|
auto udpBridge =
|
||||||
|
new UdpTmTcBridge(objects::UDP_TMTC_SERVER, objects::CCSDS_PACKET_DISTRIBUTOR, 50);
|
||||||
new UdpTcPollingTask(objects::UDP_TMTC_POLLING_TASK, objects::UDP_TMTC_SERVER);
|
new UdpTcPollingTask(objects::UDP_TMTC_POLLING_TASK, objects::UDP_TMTC_SERVER);
|
||||||
sif::info << "Created UDP server for TMTC commanding with listener port "
|
sif::info << "Created UDP server for TMTC commanding with listener port "
|
||||||
<< udpBridge->getUdpPort() << std::endl;
|
<< udpBridge->getUdpPort() << std::endl;
|
||||||
udpBridge->setMaxNumberOfPacketsStored(config::MAX_STORED_CMDS_UDP);
|
udpBridge->setMaxNumberOfPacketsStored(config::MAX_STORED_CMDS_UDP);
|
||||||
#endif
|
#endif
|
||||||
#if OBSW_ADD_TMTC_TCP_SERVER == 1
|
#if OBSW_ADD_TMTC_TCP_SERVER == 1
|
||||||
auto tcpBridge = new TcpTmTcBridge(objects::TCP_TMTC_SERVER, objects::CCSDS_PACKET_DISTRIBUTOR);
|
auto tcpBridge =
|
||||||
|
new TcpTmTcBridge(objects::TCP_TMTC_SERVER, objects::CCSDS_PACKET_DISTRIBUTOR, 50);
|
||||||
TcpTmTcServer::TcpConfig cfg(true, true);
|
TcpTmTcServer::TcpConfig cfg(true, true);
|
||||||
auto tcpServer = new TcpTmTcServer(objects::TCP_TMTC_POLLING_TASK, objects::TCP_TMTC_SERVER, cfg);
|
auto tcpServer = new TcpTmTcServer(objects::TCP_TMTC_POLLING_TASK, objects::TCP_TMTC_SERVER, cfg);
|
||||||
// TCP is stream based. Use packet ID as start marker when parsing for space packets
|
// TCP is stream based. Use packet ID as start marker when parsing for space packets
|
||||||
|
Loading…
Reference in New Issue
Block a user