update UART example, update UART HAL
This commit is contained in:
parent
ce7a8665a3
commit
60305ef393
@ -9,6 +9,7 @@ members = [
|
||||
"examples/embassy",
|
||||
"board-tests",
|
||||
"bootloader",
|
||||
# "flashloader",
|
||||
]
|
||||
|
||||
exclude = [
|
||||
|
@ -9,17 +9,12 @@ cortex-m-rt = "0.7"
|
||||
embedded-hal = "1"
|
||||
embedded-io = "0.6"
|
||||
rtt-target = { version = "0.5" }
|
||||
|
||||
# Even though we do not use this directly, we need to activate this feature explicitely
|
||||
# so that RTIC compiles because thumv6 does not have CAS operations natively.
|
||||
portable-atomic = { version = "1", features = ["unsafe-assume-single-core"]}
|
||||
panic-rtt-target = { version = "0.1" }
|
||||
|
||||
[dependencies.va108xx-hal]
|
||||
path = "../../va108xx-hal"
|
||||
|
||||
[dependencies.vorago-reb1]
|
||||
path = "../../vorago-reb1"
|
||||
|
||||
[dependencies.rtic]
|
||||
version = "2"
|
||||
features = ["thumbv6-backend"]
|
||||
@ -31,3 +26,20 @@ features = ["cortex-m-systick"]
|
||||
[dependencies.rtic-sync]
|
||||
version = "1.3"
|
||||
features = ["defmt-03"]
|
||||
|
||||
[dependencies.once_cell]
|
||||
version = "1"
|
||||
default-features = false
|
||||
features = ["critical-section"]
|
||||
|
||||
[dependencies.ringbuf]
|
||||
version = "0.4"
|
||||
git = "https://github.com/us-irs/ringbuf.git"
|
||||
branch = "use-portable-atomic-crate"
|
||||
default-features = false
|
||||
|
||||
[dependencies.va108xx-hal]
|
||||
path = "../../va108xx-hal"
|
||||
|
||||
[dependencies.vorago-reb1]
|
||||
path = "../../vorago-reb1"
|
||||
|
@ -10,10 +10,25 @@
|
||||
#![no_main]
|
||||
#![no_std]
|
||||
|
||||
use once_cell::sync::Lazy;
|
||||
use ringbuf::StaticRb;
|
||||
|
||||
// Larger buffer for TC to be able to hold the possibly large memory write packets.
|
||||
const RX_RING_BUF_SIZE: usize = 1024;
|
||||
|
||||
// Ring buffers to handling variable sized telemetry
|
||||
static mut RINGBUF: Lazy<StaticRb<u8, RX_RING_BUF_SIZE>> =
|
||||
Lazy::new(StaticRb::<u8, RX_RING_BUF_SIZE>::default);
|
||||
|
||||
#[rtic::app(device = pac, dispatchers = [OC4])]
|
||||
mod app {
|
||||
use super::*;
|
||||
use embedded_io::Write;
|
||||
use panic_rtt_target as _;
|
||||
use ringbuf::{
|
||||
traits::{Observer, Producer},
|
||||
CachingCons, StaticProd,
|
||||
};
|
||||
use rtic_example::SYSCLK_FREQ;
|
||||
use rtic_sync::make_channel;
|
||||
use rtt_target::{rprintln, rtt_init_print};
|
||||
@ -21,18 +36,18 @@ mod app {
|
||||
gpio::PinsB,
|
||||
pac,
|
||||
prelude::*,
|
||||
uart::{self, IrqCfg, IrqResult, UartWithIrqBase},
|
||||
uart::{self, IrqCfg, IrqContextTimeoutOrMaxSize, IrqResult, RxWithIrq},
|
||||
};
|
||||
|
||||
#[local]
|
||||
struct Local {
|
||||
rx_info_tx: rtic_sync::channel::Sender<'static, RxInfo, 3>,
|
||||
rx_info_rx: rtic_sync::channel::Receiver<'static, RxInfo, 3>,
|
||||
data_producer: StaticProd<'static, u8, RX_RING_BUF_SIZE>,
|
||||
data_consumer: CachingCons<&'static StaticRb<u8, RX_RING_BUF_SIZE>>,
|
||||
uart_rx: RxWithIrq<pac::Uartb>,
|
||||
}
|
||||
|
||||
#[shared]
|
||||
struct Shared {
|
||||
irq_uart: UartWithIrqBase<pac::Uartb>,
|
||||
rx_buf: [u8; 64],
|
||||
}
|
||||
|
||||
@ -58,18 +73,19 @@ mod app {
|
||||
let rx = gpiob.pb20.into_funsel_1();
|
||||
|
||||
let irq_cfg = IrqCfg::new(pac::interrupt::OC3, true, true);
|
||||
let (mut irq_uart, _) =
|
||||
uart::Uart::new(&mut dp.sysconfig, 50.MHz(), dp.uartb, (tx, rx), 115200.Hz())
|
||||
.into_uart_with_irq(irq_cfg, Some(&mut dp.sysconfig), Some(&mut dp.irqsel))
|
||||
.downgrade();
|
||||
irq_uart
|
||||
.read_fixed_len_using_irq(64, true)
|
||||
.expect("Read initialization failed");
|
||||
let irq_uart =
|
||||
uart::Uart::new(&mut dp.sysconfig, 50.MHz(), dp.uartb, (tx, rx), 115200.Hz());
|
||||
let (tx, rx) = irq_uart.split();
|
||||
let rx = rx.into_rx_with_irq(&dp.irqsel, pac::interrupt::OC3);
|
||||
|
||||
let context = IrqContextTimeoutOrMaxSize::new(64);
|
||||
rx.read_fixed_len_or_timeout_based_using_irq(&mut context)
|
||||
.expect("UART RX init failed");
|
||||
|
||||
let (rx_info_tx, rx_info_rx) = make_channel!(RxInfo, 3);
|
||||
let rx_buf: [u8; 64] = [0; 64];
|
||||
(
|
||||
Shared { irq_uart, rx_buf },
|
||||
Shared { uart_rx, rx_buf },
|
||||
Local {
|
||||
rx_info_tx,
|
||||
rx_info_rx,
|
||||
@ -87,57 +103,34 @@ mod app {
|
||||
|
||||
#[task(
|
||||
binds = OC3,
|
||||
shared = [irq_uart, rx_buf],
|
||||
local = [cnt: u32 = 0, result: IrqResult = IrqResult::new(), rx_info_tx],
|
||||
shared = [rx_buf],
|
||||
local = [
|
||||
uart_rx,
|
||||
data_producer
|
||||
],
|
||||
)]
|
||||
fn reception_task(cx: reception_task::Context) {
|
||||
let result = cx.local.result;
|
||||
let cnt: &mut u32 = cx.local.cnt;
|
||||
let irq_uart = cx.shared.irq_uart;
|
||||
let rx_buf = cx.shared.rx_buf;
|
||||
let (completed, end_idx) = (irq_uart, rx_buf).lock(|irq_uart, rx_buf| {
|
||||
match irq_uart.irq_handler(result, rx_buf) {
|
||||
Ok(_) => {
|
||||
if result.complete() {
|
||||
// Initiate next transfer immediately
|
||||
irq_uart
|
||||
.read_fixed_len_using_irq(64, true)
|
||||
.expect("Read operation init failed");
|
||||
|
||||
let mut end_idx = 0;
|
||||
for (idx, val) in rx_buf.iter().enumerate() {
|
||||
if (*val as char) == '\n' {
|
||||
end_idx = idx;
|
||||
break;
|
||||
}
|
||||
}
|
||||
(true, end_idx)
|
||||
} else {
|
||||
(false, 0)
|
||||
}
|
||||
}
|
||||
Err(e) => {
|
||||
rprintln!("reception error {:?}", e);
|
||||
(false, 0)
|
||||
}
|
||||
let mut buf: [u8; 16] = [0; 16];
|
||||
let mut ringbuf_full = false;
|
||||
let result = cx.local.uart_rx.irq_handler(&mut buf);
|
||||
if result.bytes_read > 0 && result.errors.is_none() {
|
||||
if cx.local.data_producer.vacant_len() < result.bytes_read {
|
||||
ringbuf_full = true;
|
||||
} else {
|
||||
cx.local
|
||||
.data_producer
|
||||
.push_slice(&buf[0..result.bytes_read]);
|
||||
}
|
||||
});
|
||||
if completed {
|
||||
rprintln!("counter: {}", cnt);
|
||||
cx.local
|
||||
.rx_info_tx
|
||||
.try_send(RxInfo {
|
||||
bytes_read: result.bytes_read,
|
||||
end_idx,
|
||||
timeout: result.timeout(),
|
||||
})
|
||||
.expect("RX queue full");
|
||||
}
|
||||
*cnt += 1;
|
||||
if ringbuf_full {
|
||||
// Could also drop oldest data, but that would require the consumer to be shared.
|
||||
rprintln!("buffer full, data was dropped");
|
||||
}
|
||||
}
|
||||
|
||||
#[task(shared = [irq_uart, rx_buf], local = [rx_info_rx], priority=1)]
|
||||
async fn reply_handler(cx: reply_handler::Context) {
|
||||
#[task(shared = [rx_buf], local = [data_consumer], priority=1)]
|
||||
async fn echo_handler(cx: echo_handler::Context) {
|
||||
/*
|
||||
let mut irq_uart = cx.shared.irq_uart;
|
||||
let mut rx_buf = cx.shared.rx_buf;
|
||||
loop {
|
||||
@ -161,5 +154,6 @@ mod app {
|
||||
}
|
||||
}
|
||||
}
|
||||
*/
|
||||
}
|
||||
}
|
1
flashloader/.gitignore
vendored
Normal file
1
flashloader/.gitignore
vendored
Normal file
@ -0,0 +1 @@
|
||||
/venv
|
50
flashloader/Cargo.toml
Normal file
50
flashloader/Cargo.toml
Normal file
@ -0,0 +1,50 @@
|
||||
[package]
|
||||
name = "flashloader"
|
||||
version = "0.1.0"
|
||||
edition = "2021"
|
||||
|
||||
[dependencies]
|
||||
cortex-m = "0.7"
|
||||
cortex-m-rt = "0.7"
|
||||
embedded-hal = "1"
|
||||
embedded-hal-nb = "1"
|
||||
embedded-io = "0.6"
|
||||
panic-rtt-target = { version = "0.1.3" }
|
||||
rtt-target = { version = "0.5" }
|
||||
rtt-log = "0.3"
|
||||
log = "0.4"
|
||||
crc = "3"
|
||||
rtic-sync = "1"
|
||||
|
||||
[dependencies.satrs]
|
||||
version = "0.2"
|
||||
default-features = false
|
||||
|
||||
[dependencies.ringbuf]
|
||||
version = "0.4"
|
||||
default-features = false
|
||||
|
||||
[dependencies.once_cell]
|
||||
version = "1"
|
||||
default-features = false
|
||||
features = ["critical-section"]
|
||||
|
||||
[dependencies.spacepackets]
|
||||
version = "0.11"
|
||||
default-features = false
|
||||
|
||||
[dependencies.cobs]
|
||||
git = "https://github.com/robamu/cobs.rs.git"
|
||||
branch = "all_features"
|
||||
default-features = false
|
||||
|
||||
[dependencies.va108xx-hal]
|
||||
path = "../va108xx-hal"
|
||||
|
||||
[dependencies.rtic]
|
||||
version = "2"
|
||||
features = ["thumbv7-backend"]
|
||||
|
||||
[dependencies.rtic-monotonics]
|
||||
version = "2"
|
||||
features = ["cortex-m-systick"]
|
66
flashloader/README.md
Normal file
66
flashloader/README.md
Normal file
@ -0,0 +1,66 @@
|
||||
VA416xx Flashloader Application
|
||||
========
|
||||
|
||||
This flashloader shows a minimal example for a self-updatable Rust software which exposes
|
||||
a simple PUS (CCSDS) interface to update the software. It also provides a Python application
|
||||
called the `image-loader.py` which can be used to upload compiled images to the flashloader
|
||||
application to write them to the NVM.
|
||||
|
||||
Please note that the both the application and the image loader are tailored towards usage
|
||||
with the [bootloader provided by this repository](https://egit.irs.uni-stuttgart.de/rust/va416xx-rs/src/branch/main/bootloader).
|
||||
|
||||
The software can quickly be adapted to interface with a real primary on-board software instead of
|
||||
the Python script provided here to upload images because it uses a low-level CCSDS based packet
|
||||
interface.
|
||||
|
||||
## Using the Python image loader
|
||||
|
||||
The Python image loader communicates with the Rust flashload application using a dedicated serial
|
||||
port with a baudrate of 115200.
|
||||
|
||||
It is recommended to run the script in a dedicated virtual environment. For example, on UNIX
|
||||
systems you can use `python3 -m venv venv` and then `source venv/bin/activate` to create
|
||||
and activate a virtual environment.
|
||||
|
||||
After that, you can use
|
||||
|
||||
```sh
|
||||
pip install -r requirements.txt
|
||||
```
|
||||
|
||||
to install all required dependencies.
|
||||
|
||||
After that, it is recommended to use `./image-load.py -h` to get an overview of some options.
|
||||
The flash loader uses the UART0 interface of the VA416xx board to perform CCSDS based
|
||||
communication. The Python image loader application will search for a file named `loader.toml` and
|
||||
use the `serial_port` key to determine the serial port to use for serial communication.
|
||||
|
||||
### Examples
|
||||
|
||||
You can use
|
||||
|
||||
```sh
|
||||
./image-loader.py -p
|
||||
```
|
||||
|
||||
to send a ping an verify the connection.
|
||||
|
||||
You can use
|
||||
|
||||
```sh
|
||||
cd flashloader/slot-a-blinky
|
||||
cargo build --release
|
||||
cd ../..
|
||||
./image-loader.py -t a ./slot-a-blinky/target/thumbv7em-none-eabihf/release/slot-a-blinky
|
||||
```
|
||||
|
||||
to build the slot A sample application and upload it to a running flash loader application
|
||||
to write it to slot A.
|
||||
|
||||
You can use
|
||||
|
||||
```sh
|
||||
./image-loader.py -c -t a
|
||||
```
|
||||
|
||||
to corrupt the image A and test that it switches to image B after a failed CRC check instead.
|
430
flashloader/image-loader.py
Executable file
430
flashloader/image-loader.py
Executable file
@ -0,0 +1,430 @@
|
||||
#!/usr/bin/env python3
|
||||
from typing import List, Tuple
|
||||
from spacepackets.ecss.defs import PusService
|
||||
from spacepackets.ecss.tm import PusTm
|
||||
from tmtccmd.com import ComInterface
|
||||
import toml
|
||||
import struct
|
||||
import logging
|
||||
import argparse
|
||||
import time
|
||||
import enum
|
||||
from tmtccmd.com.serial_base import SerialCfg
|
||||
from tmtccmd.com.serial_cobs import SerialCobsComIF
|
||||
from tmtccmd.com.ser_utils import prompt_com_port
|
||||
from crcmod.predefined import PredefinedCrc
|
||||
from spacepackets.ecss.tc import PusTc
|
||||
from spacepackets.ecss.pus_verificator import PusVerificator, StatusField
|
||||
from spacepackets.ecss.pus_1_verification import Service1Tm, UnpackParams
|
||||
from spacepackets.seqcount import SeqCountProvider
|
||||
from pathlib import Path
|
||||
import dataclasses
|
||||
from elftools.elf.elffile import ELFFile
|
||||
|
||||
|
||||
BAUD_RATE = 115200
|
||||
|
||||
BOOTLOADER_START_ADDR = 0x0
|
||||
BOOTLOADER_END_ADDR = 0x4000
|
||||
BOOTLOADER_CRC_ADDR = BOOTLOADER_END_ADDR - 4
|
||||
BOOTLOADER_MAX_SIZE = BOOTLOADER_END_ADDR - BOOTLOADER_START_ADDR - 4
|
||||
|
||||
APP_A_START_ADDR = 0x4000
|
||||
APP_A_END_ADDR = 0x22000
|
||||
# The actual size of the image which is relevant for CRC calculation.
|
||||
APP_A_SIZE_ADDR = APP_A_END_ADDR - 8
|
||||
APP_A_CRC_ADDR = APP_A_END_ADDR - 4
|
||||
APP_A_MAX_SIZE = APP_A_END_ADDR - APP_A_START_ADDR - 8
|
||||
|
||||
APP_B_START_ADDR = 0x22000
|
||||
APP_B_END_ADDR = 0x40000
|
||||
# The actual size of the image which is relevant for CRC calculation.
|
||||
APP_B_SIZE_ADDR = APP_B_END_ADDR - 8
|
||||
APP_B_CRC_ADDR = APP_B_END_ADDR - 4
|
||||
APP_B_MAX_SIZE = APP_A_END_ADDR - APP_A_START_ADDR - 8
|
||||
|
||||
APP_IMG_SZ = (APP_B_END_ADDR - APP_A_START_ADDR) // 2
|
||||
|
||||
CHUNK_SIZE = 896
|
||||
|
||||
MEMORY_SERVICE = 6
|
||||
ACTION_SERVICE = 8
|
||||
|
||||
RAW_MEMORY_WRITE_SUBSERVICE = 2
|
||||
BOOT_NVM_MEMORY_ID = 1
|
||||
PING_PAYLOAD_SIZE = 0
|
||||
|
||||
|
||||
class ActionId(enum.IntEnum):
|
||||
CORRUPT_APP_A = 128
|
||||
CORRUPT_APP_B = 129
|
||||
|
||||
|
||||
_LOGGER = logging.getLogger(__name__)
|
||||
SEQ_PROVIDER = SeqCountProvider(bit_width=14)
|
||||
|
||||
|
||||
@dataclasses.dataclass
|
||||
class LoadableSegment:
|
||||
name: str
|
||||
offset: int
|
||||
size: int
|
||||
data: bytes
|
||||
|
||||
|
||||
class Target(enum.Enum):
|
||||
BOOTLOADER = 0
|
||||
APP_A = 1
|
||||
APP_B = 2
|
||||
|
||||
|
||||
class ImageLoader:
|
||||
def __init__(self, com_if: ComInterface, verificator: PusVerificator) -> None:
|
||||
self.com_if = com_if
|
||||
self.verificator = verificator
|
||||
|
||||
def handle_ping_cmd(self):
|
||||
_LOGGER.info("Sending ping command")
|
||||
ping_tc = PusTc(
|
||||
apid=0x00,
|
||||
service=PusService.S17_TEST,
|
||||
subservice=1,
|
||||
seq_count=SEQ_PROVIDER.get_and_increment(),
|
||||
app_data=bytes(PING_PAYLOAD_SIZE),
|
||||
)
|
||||
self.verificator.add_tc(ping_tc)
|
||||
self.com_if.send(bytes(ping_tc.pack()))
|
||||
|
||||
data_available = self.com_if.data_available(0.4)
|
||||
if not data_available:
|
||||
_LOGGER.warning("no ping reply received")
|
||||
for reply in self.com_if.receive():
|
||||
result = self.verificator.add_tm(
|
||||
Service1Tm.from_tm(PusTm.unpack(reply, 0), UnpackParams(0))
|
||||
)
|
||||
if result is not None and result.completed:
|
||||
_LOGGER.info("received ping completion reply")
|
||||
|
||||
def handle_corruption_cmd(self, target: Target):
|
||||
|
||||
if target == Target.BOOTLOADER:
|
||||
_LOGGER.error("can not corrupt bootloader")
|
||||
if target == Target.APP_A:
|
||||
self.send_tc(
|
||||
PusTc(
|
||||
apid=0,
|
||||
service=ACTION_SERVICE,
|
||||
subservice=ActionId.CORRUPT_APP_A,
|
||||
),
|
||||
)
|
||||
if target == Target.APP_B:
|
||||
self.send_tc(
|
||||
PusTc(
|
||||
apid=0,
|
||||
service=ACTION_SERVICE,
|
||||
subservice=ActionId.CORRUPT_APP_B,
|
||||
),
|
||||
)
|
||||
|
||||
def handle_flash_cmd(self, target: Target, file_path: Path) -> int:
|
||||
loadable_segments = []
|
||||
_LOGGER.info("Parsing ELF file for loadable sections")
|
||||
total_size = 0
|
||||
loadable_segments, total_size = create_loadable_segments(target, file_path)
|
||||
segments_info_str(target, loadable_segments, total_size, file_path)
|
||||
result = self._perform_flashing_algorithm(loadable_segments)
|
||||
if result != 0:
|
||||
return result
|
||||
self._crc_and_app_size_postprocessing(target, total_size, loadable_segments)
|
||||
return 0
|
||||
|
||||
def _perform_flashing_algorithm(
|
||||
self,
|
||||
loadable_segments: List[LoadableSegment],
|
||||
) -> int:
|
||||
# Perform the flashing algorithm.
|
||||
for segment in loadable_segments:
|
||||
segment_end = segment.offset + segment.size
|
||||
current_addr = segment.offset
|
||||
pos_in_segment = 0
|
||||
while pos_in_segment < segment.size:
|
||||
next_chunk_size = min(segment_end - current_addr, CHUNK_SIZE)
|
||||
data = segment.data[pos_in_segment : pos_in_segment + next_chunk_size]
|
||||
next_packet = pack_memory_write_command(current_addr, data)
|
||||
_LOGGER.info(
|
||||
f"Sending memory write command for address {current_addr:#08x} and data with "
|
||||
f"length {len(data)}"
|
||||
)
|
||||
self.verificator.add_tc(next_packet)
|
||||
self.com_if.send(bytes(next_packet.pack()))
|
||||
current_addr += next_chunk_size
|
||||
pos_in_segment += next_chunk_size
|
||||
start_time = time.time()
|
||||
while True:
|
||||
if time.time() - start_time > 1.0:
|
||||
_LOGGER.error("Timeout while waiting for reply")
|
||||
return -1
|
||||
data_available = self.com_if.data_available(0.1)
|
||||
done = False
|
||||
if not data_available:
|
||||
continue
|
||||
replies = self.com_if.receive()
|
||||
for reply in replies:
|
||||
tm = PusTm.unpack(reply, 0)
|
||||
if tm.service != 1:
|
||||
continue
|
||||
service_1_tm = Service1Tm.from_tm(tm, UnpackParams(0))
|
||||
check_result = self.verificator.add_tm(service_1_tm)
|
||||
# We could send after we have received the step reply, but that can
|
||||
# somehow lead to overrun errors. I think it's okay to do it like
|
||||
# this as long as the flash loader only uses polling..
|
||||
if (
|
||||
check_result is not None
|
||||
and check_result.status.completed == StatusField.SUCCESS
|
||||
):
|
||||
done = True
|
||||
|
||||
# This is an optimized variant, but I think the small delay is not an issue.
|
||||
"""
|
||||
if (
|
||||
check_result is not None
|
||||
and check_result.status.step == StatusField.SUCCESS
|
||||
and len(check_result.status.step_list) == 1
|
||||
):
|
||||
done = True
|
||||
"""
|
||||
self.verificator.remove_completed_entries()
|
||||
if done:
|
||||
break
|
||||
return 0
|
||||
|
||||
def _crc_and_app_size_postprocessing(
|
||||
self,
|
||||
target: Target,
|
||||
total_size: int,
|
||||
loadable_segments: List[LoadableSegment],
|
||||
):
|
||||
if target == Target.BOOTLOADER:
|
||||
_LOGGER.info("Blanking the bootloader checksum")
|
||||
# Blank the checksum. For the bootloader, the bootloader will calculate the
|
||||
# checksum itself on the initial run.
|
||||
checksum_write_packet = pack_memory_write_command(
|
||||
BOOTLOADER_CRC_ADDR, bytes([0x00, 0x00, 0x00, 0x00])
|
||||
)
|
||||
self.send_tc(checksum_write_packet)
|
||||
else:
|
||||
crc_addr = None
|
||||
size_addr = None
|
||||
if target == Target.APP_A:
|
||||
crc_addr = APP_A_CRC_ADDR
|
||||
size_addr = APP_A_SIZE_ADDR
|
||||
elif target == Target.APP_B:
|
||||
crc_addr = APP_B_CRC_ADDR
|
||||
size_addr = APP_B_SIZE_ADDR
|
||||
assert crc_addr is not None
|
||||
assert size_addr is not None
|
||||
_LOGGER.info(f"Writing app size {total_size} at address {size_addr:#08x}")
|
||||
size_write_packet = pack_memory_write_command(
|
||||
size_addr, struct.pack("!I", total_size)
|
||||
)
|
||||
self.com_if.send(bytes(size_write_packet.pack()))
|
||||
time.sleep(0.2)
|
||||
crc_calc = PredefinedCrc("crc-32")
|
||||
for segment in loadable_segments:
|
||||
crc_calc.update(segment.data)
|
||||
checksum = crc_calc.digest()
|
||||
_LOGGER.info(
|
||||
f"Writing checksum 0x[{checksum.hex(sep=',')}] at address {crc_addr:#08x}"
|
||||
)
|
||||
self.send_tc(pack_memory_write_command(crc_addr, checksum))
|
||||
|
||||
def send_tc(self, tc: PusTc):
|
||||
self.com_if.send(bytes(tc.pack()))
|
||||
|
||||
|
||||
def main() -> int:
|
||||
print("Python VA416XX Image Loader Application")
|
||||
logging.basicConfig(
|
||||
format="[%(asctime)s] [%(levelname)s] %(message)s", level=logging.DEBUG
|
||||
)
|
||||
parser = argparse.ArgumentParser(
|
||||
prog="image-loader", description="Python VA416XX Image Loader Application"
|
||||
)
|
||||
parser.add_argument("-p", "--ping", action="store_true", help="Send ping command")
|
||||
parser.add_argument("-c", "--corrupt", action="store_true", help="Corrupt a target")
|
||||
parser.add_argument(
|
||||
"-t",
|
||||
"--target",
|
||||
choices=["bl", "a", "b"],
|
||||
help="Target (Bootloader or slot A or B)",
|
||||
)
|
||||
parser.add_argument(
|
||||
"path", nargs="?", default=None, help="Path to the App to flash"
|
||||
)
|
||||
args = parser.parse_args()
|
||||
serial_port = None
|
||||
if Path("loader.toml").exists():
|
||||
with open("loader.toml", "r") as toml_file:
|
||||
parsed_toml = toml.loads(toml_file.read())
|
||||
if "serial_port" in parsed_toml:
|
||||
serial_port = parsed_toml["serial_port"]
|
||||
if serial_port is None:
|
||||
serial_port = prompt_com_port()
|
||||
serial_cfg = SerialCfg(
|
||||
com_if_id="ser_cobs",
|
||||
serial_port=serial_port,
|
||||
baud_rate=BAUD_RATE,
|
||||
serial_timeout=0.1,
|
||||
)
|
||||
verificator = PusVerificator()
|
||||
com_if = SerialCobsComIF(serial_cfg)
|
||||
com_if.open()
|
||||
target = None
|
||||
if args.target == "bl":
|
||||
target = Target.BOOTLOADER
|
||||
elif args.target == "a":
|
||||
target = Target.APP_A
|
||||
elif args.target == "b":
|
||||
target = Target.APP_B
|
||||
image_loader = ImageLoader(com_if, verificator)
|
||||
file_path = None
|
||||
result = -1
|
||||
if args.ping:
|
||||
image_loader.handle_ping_cmd()
|
||||
com_if.close()
|
||||
return 0
|
||||
if target:
|
||||
if not args.corrupt:
|
||||
if not args.path:
|
||||
_LOGGER.error("App Path needs to be specified for the flash process")
|
||||
file_path = Path(args.path)
|
||||
if not file_path.exists():
|
||||
_LOGGER.error("File does not exist")
|
||||
if args.corrupt:
|
||||
if not target:
|
||||
_LOGGER.error("target for corruption command required")
|
||||
com_if.close()
|
||||
return -1
|
||||
image_loader.handle_corruption_cmd(target)
|
||||
else:
|
||||
assert file_path is not None
|
||||
assert target is not None
|
||||
result = image_loader.handle_flash_cmd(target, file_path)
|
||||
|
||||
com_if.close()
|
||||
return result
|
||||
|
||||
|
||||
def create_loadable_segments(
|
||||
target: Target, file_path: Path
|
||||
) -> Tuple[List[LoadableSegment], int]:
|
||||
loadable_segments = []
|
||||
total_size = 0
|
||||
with open(file_path, "rb") as app_file:
|
||||
elf_file = ELFFile(app_file)
|
||||
|
||||
for idx, segment in enumerate(elf_file.iter_segments("PT_LOAD")):
|
||||
if segment.header.p_filesz == 0:
|
||||
continue
|
||||
# Basic validity checks of the base addresses.
|
||||
if idx == 0:
|
||||
if (
|
||||
target == Target.BOOTLOADER
|
||||
and segment.header.p_paddr != BOOTLOADER_START_ADDR
|
||||
):
|
||||
raise ValueError(
|
||||
f"detected possibly invalid start address {segment.header.p_paddr:#08x} for "
|
||||
f"bootloader, expected {BOOTLOADER_START_ADDR}"
|
||||
)
|
||||
if (
|
||||
target == Target.APP_A
|
||||
and segment.header.p_paddr != APP_A_START_ADDR
|
||||
):
|
||||
raise ValueError(
|
||||
f"detected possibly invalid start address {segment.header.p_paddr:#08x} for "
|
||||
f"App A, expected {APP_A_START_ADDR}"
|
||||
)
|
||||
if (
|
||||
target == Target.APP_B
|
||||
and segment.header.p_paddr != APP_B_START_ADDR
|
||||
):
|
||||
raise ValueError(
|
||||
f"detected possibly invalid start address {segment.header.p_paddr:#08x} for "
|
||||
f"App B, expected {APP_B_START_ADDR}"
|
||||
)
|
||||
name = None
|
||||
for section in elf_file.iter_sections():
|
||||
if (
|
||||
section.header.sh_offset == segment.header.p_offset
|
||||
and section.header.sh_size > 0
|
||||
):
|
||||
name = section.name
|
||||
if name is None:
|
||||
_LOGGER.warning("no fitting section found for segment")
|
||||
continue
|
||||
# print(f"Segment Addr: {segment.header.p_paddr}")
|
||||
# print(f"Segment Offset: {segment.header.p_offset}")
|
||||
# print(f"Segment Filesize: {segment.header.p_filesz}")
|
||||
loadable_segments.append(
|
||||
LoadableSegment(
|
||||
name=name,
|
||||
offset=segment.header.p_paddr,
|
||||
size=segment.header.p_filesz,
|
||||
data=segment.data(),
|
||||
)
|
||||
)
|
||||
total_size += segment.header.p_filesz
|
||||
return loadable_segments, total_size
|
||||
|
||||
|
||||
def segments_info_str(
|
||||
target: Target,
|
||||
loadable_segments: List[LoadableSegment],
|
||||
total_size: int,
|
||||
file_path: Path,
|
||||
):
|
||||
# Set context string and perform basic sanity checks.
|
||||
if target == Target.BOOTLOADER:
|
||||
if total_size > BOOTLOADER_MAX_SIZE:
|
||||
_LOGGER.error(
|
||||
f"provided bootloader app larger than allowed {total_size} bytes"
|
||||
)
|
||||
return -1
|
||||
context_str = "Bootloader"
|
||||
elif target == Target.APP_A:
|
||||
if total_size > APP_A_MAX_SIZE:
|
||||
_LOGGER.error(f"provided App A larger than allowed {total_size} bytes")
|
||||
return -1
|
||||
context_str = "App Slot A"
|
||||
elif target == Target.APP_B:
|
||||
if total_size > APP_B_MAX_SIZE:
|
||||
_LOGGER.error(f"provided App B larger than allowed {total_size} bytes")
|
||||
return -1
|
||||
context_str = "App Slot B"
|
||||
_LOGGER.info(f"Flashing {context_str} with image {file_path} (size {total_size})")
|
||||
for idx, segment in enumerate(loadable_segments):
|
||||
_LOGGER.info(
|
||||
f"Loadable section {idx} {segment.name} with offset {segment.offset:#08x} and "
|
||||
f"size {segment.size}"
|
||||
)
|
||||
|
||||
|
||||
def pack_memory_write_command(addr: int, data: bytes) -> PusTc:
|
||||
app_data = bytearray()
|
||||
app_data.append(BOOT_NVM_MEMORY_ID)
|
||||
# N parameter is always 1 here.
|
||||
app_data.append(1)
|
||||
app_data.extend(struct.pack("!I", addr))
|
||||
app_data.extend(struct.pack("!I", len(data)))
|
||||
app_data.extend(data)
|
||||
return PusTc(
|
||||
apid=0,
|
||||
service=MEMORY_SERVICE,
|
||||
subservice=RAW_MEMORY_WRITE_SUBSERVICE,
|
||||
seq_count=SEQ_PROVIDER.get_and_increment(),
|
||||
app_data=bytes(app_data),
|
||||
)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
1
flashloader/loader.toml
Normal file
1
flashloader/loader.toml
Normal file
@ -0,0 +1 @@
|
||||
serial_port = "/dev/ttyUSB0"
|
5
flashloader/requirements.txt
Normal file
5
flashloader/requirements.txt
Normal file
@ -0,0 +1,5 @@
|
||||
spacepackets == 0.24
|
||||
tmtccmd == 8.0.2
|
||||
toml == 0.10
|
||||
pyelftools == 0.31
|
||||
crcmod == 1.7
|
2
flashloader/slot-a-blinky/.gitignore
vendored
Normal file
2
flashloader/slot-a-blinky/.gitignore
vendored
Normal file
@ -0,0 +1,2 @@
|
||||
/target
|
||||
/app.map
|
42
flashloader/slot-a-blinky/Cargo.toml
Normal file
42
flashloader/slot-a-blinky/Cargo.toml
Normal file
@ -0,0 +1,42 @@
|
||||
[package]
|
||||
name = "slot-a-blinky"
|
||||
version = "0.1.0"
|
||||
edition = "2021"
|
||||
|
||||
[workspace]
|
||||
|
||||
[dependencies]
|
||||
cortex-m-rt = "0.7"
|
||||
panic-rtt-target = { version = "0.1.3" }
|
||||
rtt-target = { version = "0.5" }
|
||||
cortex-m = { version = "0.7", features = ["critical-section-single-core"] }
|
||||
embedded-hal = "1"
|
||||
va416xx-hal = { path = "../../va416xx-hal", features = ["va41630"] }
|
||||
|
||||
[profile.dev]
|
||||
codegen-units = 1
|
||||
debug = 2
|
||||
debug-assertions = true # <-
|
||||
incremental = false
|
||||
# This is problematic for stepping..
|
||||
# opt-level = 'z' # <-
|
||||
overflow-checks = true # <-
|
||||
|
||||
# cargo build/run --release
|
||||
[profile.release]
|
||||
codegen-units = 1
|
||||
debug = 2
|
||||
debug-assertions = false # <-
|
||||
incremental = false
|
||||
lto = 'fat'
|
||||
opt-level = 3 # <-
|
||||
overflow-checks = false # <-
|
||||
|
||||
[profile.small]
|
||||
inherits = "release"
|
||||
codegen-units = 1
|
||||
debug-assertions = false # <-
|
||||
lto = true
|
||||
opt-level = 'z' # <-
|
||||
overflow-checks = false # <-
|
||||
# strip = true # Automatically strip symbols from the binary.
|
24
flashloader/slot-a-blinky/memory.x
Normal file
24
flashloader/slot-a-blinky/memory.x
Normal file
@ -0,0 +1,24 @@
|
||||
/* Special linker script for application slot A with an offset at address 0x4000 */
|
||||
MEMORY
|
||||
{
|
||||
FLASH : ORIGIN = 0x00004000, LENGTH = 256K
|
||||
/* RAM is a mandatory region. This RAM refers to the SRAM_0 */
|
||||
RAM : ORIGIN = 0x1FFF8000, LENGTH = 32K
|
||||
SRAM_1 : ORIGIN = 0x20000000, LENGTH = 32K
|
||||
}
|
||||
|
||||
/* This is where the call stack will be allocated. */
|
||||
/* The stack is of the full descending type. */
|
||||
/* NOTE Do NOT modify `_stack_start` unless you know what you are doing */
|
||||
/* SRAM_0 can be used for all busses: Instruction, Data and System */
|
||||
/* SRAM_1 only supports the system bus */
|
||||
_stack_start = ORIGIN(RAM) + LENGTH(RAM);
|
||||
|
||||
/* Define sections for placing symbols into the extra memory regions above. */
|
||||
/* This makes them accessible from code. */
|
||||
SECTIONS {
|
||||
.sram1 (NOLOAD) : ALIGN(8) {
|
||||
*(.sram1 .sram1.*);
|
||||
. = ALIGN(4);
|
||||
} > SRAM_1
|
||||
};
|
23
flashloader/slot-a-blinky/src/main.rs
Normal file
23
flashloader/slot-a-blinky/src/main.rs
Normal file
@ -0,0 +1,23 @@
|
||||
//! Simple blinky example using the HAL
|
||||
#![no_main]
|
||||
#![no_std]
|
||||
|
||||
use cortex_m_rt::entry;
|
||||
use embedded_hal::digital::StatefulOutputPin;
|
||||
use panic_rtt_target as _;
|
||||
use rtt_target::{rprintln, rtt_init_print};
|
||||
use va416xx_hal::{gpio::PinsG, pac};
|
||||
|
||||
#[entry]
|
||||
fn main() -> ! {
|
||||
rtt_init_print!();
|
||||
rprintln!("VA416xx HAL blinky example for App Slot A");
|
||||
|
||||
let mut dp = pac::Peripherals::take().unwrap();
|
||||
let portg = PinsG::new(&mut dp.sysconfig, dp.portg);
|
||||
let mut led = portg.pg5.into_readable_push_pull_output();
|
||||
loop {
|
||||
cortex_m::asm::delay(1_000_000);
|
||||
led.toggle().ok();
|
||||
}
|
||||
}
|
2
flashloader/slot-b-blinky/.gitignore
vendored
Normal file
2
flashloader/slot-b-blinky/.gitignore
vendored
Normal file
@ -0,0 +1,2 @@
|
||||
/target
|
||||
/app.map
|
42
flashloader/slot-b-blinky/Cargo.toml
Normal file
42
flashloader/slot-b-blinky/Cargo.toml
Normal file
@ -0,0 +1,42 @@
|
||||
[package]
|
||||
name = "slot-b-blinky"
|
||||
version = "0.1.0"
|
||||
edition = "2021"
|
||||
|
||||
[workspace]
|
||||
|
||||
[dependencies]
|
||||
cortex-m-rt = "0.7"
|
||||
panic-rtt-target = { version = "0.1.3" }
|
||||
rtt-target = { version = "0.5" }
|
||||
cortex-m = { version = "0.7", features = ["critical-section-single-core"] }
|
||||
embedded-hal = "1"
|
||||
va416xx-hal = { path = "../../va416xx-hal", features = ["va41630"] }
|
||||
|
||||
[profile.dev]
|
||||
codegen-units = 1
|
||||
debug = 2
|
||||
debug-assertions = true # <-
|
||||
incremental = false
|
||||
# This is problematic for stepping..
|
||||
# opt-level = 'z' # <-
|
||||
overflow-checks = true # <-
|
||||
|
||||
# cargo build/run --release
|
||||
[profile.release]
|
||||
codegen-units = 1
|
||||
debug = 2
|
||||
debug-assertions = false # <-
|
||||
incremental = false
|
||||
lto = 'fat'
|
||||
opt-level = 3 # <-
|
||||
overflow-checks = false # <-
|
||||
|
||||
[profile.small]
|
||||
inherits = "release"
|
||||
codegen-units = 1
|
||||
debug-assertions = false # <-
|
||||
lto = true
|
||||
opt-level = 'z' # <-
|
||||
overflow-checks = false # <-
|
||||
# strip = true # Automatically strip symbols from the binary.
|
24
flashloader/slot-b-blinky/memory.x
Normal file
24
flashloader/slot-b-blinky/memory.x
Normal file
@ -0,0 +1,24 @@
|
||||
/* Special linker script for application slot B with an offset at address 0x22000 */
|
||||
MEMORY
|
||||
{
|
||||
FLASH : ORIGIN = 0x00022000, LENGTH = 256K
|
||||
/* RAM is a mandatory region. This RAM refers to the SRAM_0 */
|
||||
RAM : ORIGIN = 0x1FFF8000, LENGTH = 32K
|
||||
SRAM_1 : ORIGIN = 0x20000000, LENGTH = 32K
|
||||
}
|
||||
|
||||
/* This is where the call stack will be allocated. */
|
||||
/* The stack is of the full descending type. */
|
||||
/* NOTE Do NOT modify `_stack_start` unless you know what you are doing */
|
||||
/* SRAM_0 can be used for all busses: Instruction, Data and System */
|
||||
/* SRAM_1 only supports the system bus */
|
||||
_stack_start = ORIGIN(RAM) + LENGTH(RAM);
|
||||
|
||||
/* Define sections for placing symbols into the extra memory regions above. */
|
||||
/* This makes them accessible from code. */
|
||||
SECTIONS {
|
||||
.sram1 (NOLOAD) : ALIGN(8) {
|
||||
*(.sram1 .sram1.*);
|
||||
. = ALIGN(4);
|
||||
} > SRAM_1
|
||||
};
|
23
flashloader/slot-b-blinky/src/main.rs
Normal file
23
flashloader/slot-b-blinky/src/main.rs
Normal file
@ -0,0 +1,23 @@
|
||||
//! Simple blinky example using the HAL
|
||||
#![no_main]
|
||||
#![no_std]
|
||||
|
||||
use cortex_m_rt::entry;
|
||||
use embedded_hal::digital::StatefulOutputPin;
|
||||
use panic_rtt_target as _;
|
||||
use rtt_target::{rprintln, rtt_init_print};
|
||||
use va416xx_hal::{gpio::PinsG, pac};
|
||||
|
||||
#[entry]
|
||||
fn main() -> ! {
|
||||
rtt_init_print!();
|
||||
rprintln!("VA416xx HAL blinky example for App Slot B");
|
||||
|
||||
let mut dp = pac::Peripherals::take().unwrap();
|
||||
let portg = PinsG::new(&mut dp.sysconfig, dp.portg);
|
||||
let mut led = portg.pg5.into_readable_push_pull_output();
|
||||
loop {
|
||||
cortex_m::asm::delay(8_000_000);
|
||||
led.toggle().ok();
|
||||
}
|
||||
}
|
9
flashloader/src/lib.rs
Normal file
9
flashloader/src/lib.rs
Normal file
@ -0,0 +1,9 @@
|
||||
#![no_std]
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
#[test]
|
||||
fn simple() {
|
||||
assert_eq!(1 + 1, 2);
|
||||
}
|
||||
}
|
529
flashloader/src/main.rs
Normal file
529
flashloader/src/main.rs
Normal file
@ -0,0 +1,529 @@
|
||||
//! Vorago flashloader which can be used to flash image A and image B via a simple
|
||||
//! low-level CCSDS memory interface via a UART interface.
|
||||
#![no_main]
|
||||
#![no_std]
|
||||
|
||||
use once_cell::sync::OnceCell;
|
||||
use panic_rtt_target as _;
|
||||
use va108xx::{clock::Clocks, edac, pac, time::Hertz, wdt::Wdt};
|
||||
|
||||
const CLOCK_FREQ: Hertz = Hertz::from_raw(50_000_000);
|
||||
|
||||
const MAX_TC_SIZE: usize = 1024;
|
||||
const MAX_TC_FRAME_SIZE: usize = cobs::max_encoding_length(MAX_TC_SIZE);
|
||||
|
||||
const MAX_TM_SIZE: usize = 128;
|
||||
const MAX_TM_FRAME_SIZE: usize = cobs::max_encoding_length(MAX_TM_SIZE);
|
||||
|
||||
const UART_BAUDRATE: u32 = 115200;
|
||||
const BOOT_NVM_MEMORY_ID: u8 = 1;
|
||||
const RX_DEBUGGING: bool = false;
|
||||
|
||||
pub enum ActionId {
|
||||
CorruptImageA = 128,
|
||||
CorruptImageB = 129,
|
||||
}
|
||||
pub trait WdtInterface {
|
||||
fn feed(&self);
|
||||
}
|
||||
|
||||
pub struct OptWdt(Option<Wdt>);
|
||||
|
||||
impl WdtInterface for OptWdt {
|
||||
fn feed(&self) {
|
||||
if self.0.is_some() {
|
||||
self.0.as_ref().unwrap().feed();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
use once_cell::sync::Lazy;
|
||||
use ringbuf::{
|
||||
traits::{Consumer, Observer, Producer, SplitRef},
|
||||
CachingCons, StaticProd, StaticRb,
|
||||
};
|
||||
|
||||
// Larger buffer for TC to be able to hold the possibly large memory write packets.
|
||||
const BUF_RB_SIZE_TC: usize = 2048;
|
||||
const SIZES_RB_SIZE_TC: usize = 16;
|
||||
|
||||
const BUF_RB_SIZE_TM: usize = 512;
|
||||
const SIZES_RB_SIZE_TM: usize = 16;
|
||||
|
||||
// Ring buffers to handling variable sized telemetry
|
||||
static mut BUF_RB_TM: Lazy<StaticRb<u8, BUF_RB_SIZE_TM>> =
|
||||
Lazy::new(StaticRb::<u8, BUF_RB_SIZE_TM>::default);
|
||||
static mut SIZES_RB_TM: Lazy<StaticRb<usize, SIZES_RB_SIZE_TM>> =
|
||||
Lazy::new(StaticRb::<usize, SIZES_RB_SIZE_TM>::default);
|
||||
|
||||
// Ring buffers to handling variable sized telecommands
|
||||
static mut BUF_RB_TC: Lazy<StaticRb<u8, BUF_RB_SIZE_TC>> =
|
||||
Lazy::new(StaticRb::<u8, BUF_RB_SIZE_TC>::default);
|
||||
static mut SIZES_RB_TC: Lazy<StaticRb<usize, SIZES_RB_SIZE_TC>> =
|
||||
Lazy::new(StaticRb::<usize, SIZES_RB_SIZE_TC>::default);
|
||||
|
||||
pub struct DataProducer<const BUF_SIZE: usize, const SIZES_LEN: usize> {
|
||||
pub buf_prod: StaticProd<'static, u8, BUF_SIZE>,
|
||||
pub sizes_prod: StaticProd<'static, usize, SIZES_LEN>,
|
||||
}
|
||||
|
||||
pub struct DataConsumer<const BUF_SIZE: usize, const SIZES_LEN: usize> {
|
||||
pub buf_cons: CachingCons<&'static StaticRb<u8, BUF_SIZE>>,
|
||||
pub sizes_cons: CachingCons<&'static StaticRb<usize, SIZES_LEN>>,
|
||||
}
|
||||
|
||||
static CLOCKS: OnceCell<Clocks> = OnceCell::new();
|
||||
|
||||
pub const APP_A_START_ADDR: u32 = 0x3000;
|
||||
pub const APP_A_END_ADDR: u32 = 0x11800;
|
||||
pub const APP_B_START_ADDR: u32 = APP_A_END_ADDR;
|
||||
pub const APP_B_END_ADDR: u32 = 0x20000;
|
||||
|
||||
#[rtic::app(device = pac, dispatchers = [U1, U2, U3])]
|
||||
mod app {
|
||||
use super::*;
|
||||
use cortex_m::asm;
|
||||
use embedded_io::Write;
|
||||
use panic_rtt_target as _;
|
||||
use rtic::Mutex;
|
||||
use rtic_monotonics::systick::prelude::*;
|
||||
use rtt_target::rprintln;
|
||||
use satrs::pus::verification::VerificationReportCreator;
|
||||
use spacepackets::ecss::PusServiceId;
|
||||
use spacepackets::ecss::{
|
||||
tc::PusTcReader, tm::PusTmCreator, EcssEnumU8, PusPacket, WritablePusPacket,
|
||||
};
|
||||
use va416xx_hal::irq_router::enable_and_init_irq_router;
|
||||
use va416xx_hal::uart::IrqContextTimeoutOrMaxSize;
|
||||
use va416xx_hal::{
|
||||
clock::ClkgenExt,
|
||||
edac,
|
||||
gpio::PinsG,
|
||||
nvm::Nvm,
|
||||
pac,
|
||||
uart::{self, Uart},
|
||||
};
|
||||
|
||||
use crate::{setup_edac, EXTCLK_FREQ};
|
||||
|
||||
#[derive(Default, Debug, Copy, Clone, PartialEq, Eq)]
|
||||
pub enum CobsReaderStates {
|
||||
#[default]
|
||||
WaitingForStart,
|
||||
WatingForEnd,
|
||||
FrameOverflow,
|
||||
}
|
||||
|
||||
#[local]
|
||||
struct Local {
|
||||
uart_rx: uart::RxWithIrq<pac::Uart0>,
|
||||
uart_tx: uart::Tx<pac::Uart0>,
|
||||
rx_context: IrqContextTimeoutOrMaxSize,
|
||||
rom_spi: Option<pac::Spi3>,
|
||||
// We handle all TM in one task.
|
||||
tm_cons: DataConsumer<BUF_RB_SIZE_TM, SIZES_RB_SIZE_TM>,
|
||||
// We consume all TC in one task.
|
||||
tc_cons: DataConsumer<BUF_RB_SIZE_TC, SIZES_RB_SIZE_TC>,
|
||||
// We produce all TC in one task.
|
||||
tc_prod: DataProducer<BUF_RB_SIZE_TC, SIZES_RB_SIZE_TC>,
|
||||
verif_reporter: VerificationReportCreator,
|
||||
}
|
||||
|
||||
#[shared]
|
||||
struct Shared {
|
||||
// Having this shared allows multiple tasks to generate telemetry.
|
||||
tm_prod: DataProducer<BUF_RB_SIZE_TM, SIZES_RB_SIZE_TM>,
|
||||
}
|
||||
|
||||
rtic_monotonics::systick_monotonic!(Mono, 10_000);
|
||||
|
||||
#[init]
|
||||
fn init(mut cx: init::Context) -> (Shared, Local) {
|
||||
//rtt_init_default!();
|
||||
rtt_log::init();
|
||||
rprintln!("-- Vorago flashloader --");
|
||||
// Initialize the systick interrupt & obtain the token to prove that we did
|
||||
// Use the external clock connected to XTAL_N.
|
||||
let clocks = cx
|
||||
.device
|
||||
.clkgen
|
||||
.constrain()
|
||||
.xtal_n_clk_with_src_freq(Hertz::from_raw(EXTCLK_FREQ))
|
||||
.freeze(&mut cx.device.sysconfig)
|
||||
.unwrap();
|
||||
enable_and_init_irq_router(&mut cx.device.sysconfig, &cx.device.irq_router);
|
||||
setup_edac(&mut cx.device.sysconfig);
|
||||
|
||||
let gpiog = PinsG::new(&mut cx.device.sysconfig, cx.device.portg);
|
||||
let tx = gpiog.pg0.into_funsel_1();
|
||||
let rx = gpiog.pg1.into_funsel_1();
|
||||
|
||||
let uart0 = Uart::new(
|
||||
cx.device.uart0,
|
||||
(tx, rx),
|
||||
Hertz::from_raw(UART_BAUDRATE),
|
||||
&mut cx.device.sysconfig,
|
||||
&clocks,
|
||||
);
|
||||
let (tx, rx) = uart0.split();
|
||||
|
||||
let verif_reporter = VerificationReportCreator::new(0).unwrap();
|
||||
|
||||
let (buf_prod_tm, buf_cons_tm) = unsafe { BUF_RB_TM.split_ref() };
|
||||
let (sizes_prod_tm, sizes_cons_tm) = unsafe { SIZES_RB_TM.split_ref() };
|
||||
|
||||
let (buf_prod_tc, buf_cons_tc) = unsafe { BUF_RB_TC.split_ref() };
|
||||
let (sizes_prod_tc, sizes_cons_tc) = unsafe { SIZES_RB_TC.split_ref() };
|
||||
|
||||
Mono::start(cx.core.SYST, clocks.sysclk().raw());
|
||||
CLOCKS.set(clocks).unwrap();
|
||||
|
||||
let mut rx = rx.to_rx_with_irq();
|
||||
let mut rx_context = IrqContextTimeoutOrMaxSize::new(MAX_TC_FRAME_SIZE);
|
||||
rx.read_fixed_len_or_timeout_based_using_irq(&mut rx_context)
|
||||
.expect("initiating UART RX failed");
|
||||
pus_tc_handler::spawn().unwrap();
|
||||
pus_tm_tx_handler::spawn().unwrap();
|
||||
(
|
||||
Shared {
|
||||
tm_prod: DataProducer {
|
||||
buf_prod: buf_prod_tm,
|
||||
sizes_prod: sizes_prod_tm,
|
||||
},
|
||||
},
|
||||
Local {
|
||||
uart_rx: rx,
|
||||
uart_tx: tx,
|
||||
rx_context,
|
||||
rom_spi: Some(cx.device.spi3),
|
||||
tm_cons: DataConsumer {
|
||||
buf_cons: buf_cons_tm,
|
||||
sizes_cons: sizes_cons_tm,
|
||||
},
|
||||
tc_cons: DataConsumer {
|
||||
buf_cons: buf_cons_tc,
|
||||
sizes_cons: sizes_cons_tc,
|
||||
},
|
||||
tc_prod: DataProducer {
|
||||
buf_prod: buf_prod_tc,
|
||||
sizes_prod: sizes_prod_tc,
|
||||
},
|
||||
verif_reporter,
|
||||
},
|
||||
)
|
||||
}
|
||||
|
||||
// `shared` cannot be accessed from this context
|
||||
#[idle]
|
||||
fn idle(_cx: idle::Context) -> ! {
|
||||
loop {
|
||||
asm::nop();
|
||||
}
|
||||
}
|
||||
|
||||
// This is the interrupt handler to read all bytes received on the UART0.
|
||||
#[task(
|
||||
binds = UART0_RX,
|
||||
local = [
|
||||
cnt: u32 = 0,
|
||||
rx_buf: [u8; MAX_TC_FRAME_SIZE] = [0; MAX_TC_FRAME_SIZE],
|
||||
rx_context,
|
||||
uart_rx,
|
||||
tc_prod
|
||||
],
|
||||
)]
|
||||
fn uart_rx_irq(cx: uart_rx_irq::Context) {
|
||||
match cx
|
||||
.local
|
||||
.uart_rx
|
||||
.irq_handler_max_size_or_timeout_based(cx.local.rx_context, cx.local.rx_buf)
|
||||
{
|
||||
Ok(result) => {
|
||||
if RX_DEBUGGING {
|
||||
log::debug!("RX Info: {:?}", cx.local.rx_context);
|
||||
log::debug!("RX Result: {:?}", result);
|
||||
}
|
||||
if result.complete() {
|
||||
// Check frame validity (must have COBS format) and decode the frame.
|
||||
// Currently, we expect a full frame or a frame received through a timeout
|
||||
// to be one COBS frame. We could parse for multiple COBS packets in one
|
||||
// frame, but the additional complexity is not necessary here..
|
||||
if cx.local.rx_buf[0] == 0 && cx.local.rx_buf[result.bytes_read - 1] == 0 {
|
||||
let decoded_size =
|
||||
cobs::decode_in_place(&mut cx.local.rx_buf[1..result.bytes_read]);
|
||||
if decoded_size.is_err() {
|
||||
log::warn!("COBS decoding failed");
|
||||
} else {
|
||||
let decoded_size = decoded_size.unwrap();
|
||||
if cx.local.tc_prod.sizes_prod.vacant_len() >= 1
|
||||
&& cx.local.tc_prod.buf_prod.vacant_len() >= decoded_size
|
||||
{
|
||||
// Should never fail, we checked there is enough space.
|
||||
cx.local.tc_prod.sizes_prod.try_push(decoded_size).unwrap();
|
||||
cx.local
|
||||
.tc_prod
|
||||
.buf_prod
|
||||
.push_slice(&cx.local.rx_buf[1..1 + decoded_size]);
|
||||
} else {
|
||||
log::warn!("COBS TC queue full");
|
||||
}
|
||||
}
|
||||
} else {
|
||||
log::warn!("COBS frame with invalid format, start and end bytes are not 0");
|
||||
}
|
||||
|
||||
// Initiate next transfer.
|
||||
cx.local
|
||||
.uart_rx
|
||||
.read_fixed_len_or_timeout_based_using_irq(cx.local.rx_context)
|
||||
.expect("read operation failed");
|
||||
}
|
||||
if result.error() {
|
||||
log::warn!("UART error: {:?}", result.error());
|
||||
}
|
||||
}
|
||||
Err(e) => {
|
||||
log::warn!("UART error: {:?}", e);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[task(
|
||||
priority = 2,
|
||||
local=[
|
||||
tc_buf: [u8; MAX_TC_SIZE] = [0; MAX_TC_SIZE],
|
||||
src_data_buf: [u8; 16] = [0; 16],
|
||||
verif_buf: [u8; 32] = [0; 32],
|
||||
tc_cons,
|
||||
rom_spi,
|
||||
verif_reporter
|
||||
],
|
||||
shared=[tm_prod]
|
||||
)]
|
||||
async fn pus_tc_handler(mut cx: pus_tc_handler::Context) {
|
||||
loop {
|
||||
// Try to read a TC from the ring buffer.
|
||||
let packet_len = cx.local.tc_cons.sizes_cons.try_pop();
|
||||
if packet_len.is_none() {
|
||||
// Small delay, TCs might arrive very quickly.
|
||||
Mono::delay(20.millis()).await;
|
||||
continue;
|
||||
}
|
||||
let packet_len = packet_len.unwrap();
|
||||
log::info!(target: "TC Handler", "received packet with length {}", packet_len);
|
||||
assert_eq!(
|
||||
cx.local
|
||||
.tc_cons
|
||||
.buf_cons
|
||||
.pop_slice(&mut cx.local.tc_buf[0..packet_len]),
|
||||
packet_len
|
||||
);
|
||||
// Read a telecommand, now handle it.
|
||||
handle_valid_pus_tc(&mut cx);
|
||||
}
|
||||
}
|
||||
|
||||
fn handle_valid_pus_tc(cx: &mut pus_tc_handler::Context) {
|
||||
let pus_tc = PusTcReader::new(cx.local.tc_buf);
|
||||
if pus_tc.is_err() {
|
||||
log::warn!("PUS TC error: {}", pus_tc.unwrap_err());
|
||||
return;
|
||||
}
|
||||
let (pus_tc, _) = pus_tc.unwrap();
|
||||
let mut write_and_send = |tm: &PusTmCreator| {
|
||||
let written_size = tm.write_to_bytes(cx.local.verif_buf).unwrap();
|
||||
cx.shared.tm_prod.lock(|prod| {
|
||||
prod.sizes_prod.try_push(tm.len_written()).unwrap();
|
||||
prod.buf_prod
|
||||
.push_slice(&cx.local.verif_buf[0..written_size]);
|
||||
});
|
||||
};
|
||||
let token = cx.local.verif_reporter.add_tc(&pus_tc);
|
||||
let (tm, accepted_token) = cx
|
||||
.local
|
||||
.verif_reporter
|
||||
.acceptance_success(cx.local.src_data_buf, token, 0, 0, &[])
|
||||
.expect("acceptance success failed");
|
||||
write_and_send(&tm);
|
||||
|
||||
let (tm, started_token) = cx
|
||||
.local
|
||||
.verif_reporter
|
||||
.start_success(cx.local.src_data_buf, accepted_token, 0, 0, &[])
|
||||
.expect("acceptance success failed");
|
||||
write_and_send(&tm);
|
||||
|
||||
if pus_tc.service() == PusServiceId::Action as u8 {
|
||||
let mut corrupt_image = |base_addr: u32| {
|
||||
// Safety: We only use this for NVM handling and we only do NVM
|
||||
// handling here.
|
||||
let mut sys_cfg = unsafe { pac::Sysconfig::steal() };
|
||||
let nvm = Nvm::new(
|
||||
&mut sys_cfg,
|
||||
cx.local.rom_spi.take().unwrap(),
|
||||
CLOCKS.get().as_ref().unwrap(),
|
||||
);
|
||||
let mut buf = [0u8; 4];
|
||||
nvm.read_data(base_addr + 32, &mut buf);
|
||||
buf[0] += 1;
|
||||
nvm.write_data(base_addr + 32, &buf);
|
||||
*cx.local.rom_spi = Some(nvm.release(&mut sys_cfg));
|
||||
let tm = cx
|
||||
.local
|
||||
.verif_reporter
|
||||
.completion_success(cx.local.src_data_buf, started_token, 0, 0, &[])
|
||||
.expect("completion success failed");
|
||||
write_and_send(&tm);
|
||||
};
|
||||
if pus_tc.subservice() == ActionId::CorruptImageA as u8 {
|
||||
rprintln!("corrupting App Image A");
|
||||
corrupt_image(APP_A_START_ADDR);
|
||||
}
|
||||
if pus_tc.subservice() == ActionId::CorruptImageB as u8 {
|
||||
rprintln!("corrupting App Image B");
|
||||
corrupt_image(APP_B_START_ADDR);
|
||||
}
|
||||
}
|
||||
if pus_tc.service() == PusServiceId::Test as u8 && pus_tc.subservice() == 1 {
|
||||
log::info!(target: "TC Handler", "received ping TC");
|
||||
let tm = cx
|
||||
.local
|
||||
.verif_reporter
|
||||
.completion_success(cx.local.src_data_buf, started_token, 0, 0, &[])
|
||||
.expect("completion success failed");
|
||||
write_and_send(&tm);
|
||||
} else if pus_tc.service() == PusServiceId::MemoryManagement as u8 {
|
||||
let tm = cx
|
||||
.local
|
||||
.verif_reporter
|
||||
.step_success(
|
||||
cx.local.src_data_buf,
|
||||
&started_token,
|
||||
0,
|
||||
0,
|
||||
&[],
|
||||
EcssEnumU8::new(0),
|
||||
)
|
||||
.expect("step success failed");
|
||||
write_and_send(&tm);
|
||||
// Raw memory write TC
|
||||
if pus_tc.subservice() == 2 {
|
||||
let app_data = pus_tc.app_data();
|
||||
if app_data.len() < 10 {
|
||||
log::warn!(
|
||||
target: "TC Handler",
|
||||
"app data for raw memory write is too short: {}",
|
||||
app_data.len()
|
||||
);
|
||||
}
|
||||
let memory_id = app_data[0];
|
||||
if memory_id != BOOT_NVM_MEMORY_ID {
|
||||
log::warn!(target: "TC Handler", "memory ID {} not supported", memory_id);
|
||||
// TODO: Error reporting
|
||||
return;
|
||||
}
|
||||
let offset = u32::from_be_bytes(app_data[2..6].try_into().unwrap());
|
||||
let data_len = u32::from_be_bytes(app_data[6..10].try_into().unwrap());
|
||||
if 10 + data_len as usize > app_data.len() {
|
||||
log::warn!(
|
||||
target: "TC Handler",
|
||||
"invalid data length {} for raw mem write detected",
|
||||
data_len
|
||||
);
|
||||
// TODO: Error reporting
|
||||
return;
|
||||
}
|
||||
let data = &app_data[10..10 + data_len as usize];
|
||||
log::info!(
|
||||
target: "TC Handler",
|
||||
"writing {} bytes at offset {} to NVM",
|
||||
data_len,
|
||||
offset
|
||||
);
|
||||
// Safety: We only use this for NVM handling and we only do NVM
|
||||
// handling here.
|
||||
let mut sys_cfg = unsafe { pac::Sysconfig::steal() };
|
||||
let nvm = Nvm::new(
|
||||
&mut sys_cfg,
|
||||
cx.local.rom_spi.take().unwrap(),
|
||||
CLOCKS.get().as_ref().unwrap(),
|
||||
);
|
||||
nvm.write_data(offset, data);
|
||||
*cx.local.rom_spi = Some(nvm.release(&mut sys_cfg));
|
||||
let tm = cx
|
||||
.local
|
||||
.verif_reporter
|
||||
.completion_success(cx.local.src_data_buf, started_token, 0, 0, &[])
|
||||
.expect("completion success failed");
|
||||
write_and_send(&tm);
|
||||
log::info!(
|
||||
target: "TC Handler",
|
||||
"NVM operation done");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[task(
|
||||
priority = 1,
|
||||
local=[
|
||||
read_buf: [u8;MAX_TM_SIZE] = [0; MAX_TM_SIZE],
|
||||
encoded_buf: [u8;MAX_TM_FRAME_SIZE] = [0; MAX_TM_FRAME_SIZE],
|
||||
uart_tx,
|
||||
tm_cons
|
||||
],
|
||||
shared=[]
|
||||
)]
|
||||
async fn pus_tm_tx_handler(cx: pus_tm_tx_handler::Context) {
|
||||
loop {
|
||||
while cx.local.tm_cons.sizes_cons.occupied_len() > 0 {
|
||||
let next_size = cx.local.tm_cons.sizes_cons.try_pop().unwrap();
|
||||
cx.local
|
||||
.tm_cons
|
||||
.buf_cons
|
||||
.pop_slice(&mut cx.local.read_buf[0..next_size]);
|
||||
cx.local.encoded_buf[0] = 0;
|
||||
let send_size = cobs::encode(
|
||||
&cx.local.read_buf[0..next_size],
|
||||
&mut cx.local.encoded_buf[1..],
|
||||
);
|
||||
cx.local.encoded_buf[send_size + 1] = 0;
|
||||
cx.local
|
||||
.uart_tx
|
||||
.write(&cx.local.encoded_buf[0..send_size + 2])
|
||||
.unwrap();
|
||||
Mono::delay(2.millis()).await;
|
||||
}
|
||||
Mono::delay(50.millis()).await;
|
||||
}
|
||||
}
|
||||
|
||||
#[task(binds = EDAC_SBE, priority = 1)]
|
||||
fn edac_sbe_isr(_cx: edac_sbe_isr::Context) {
|
||||
// TODO: Send some command via UART for notification purposes. Also identify the problematic
|
||||
// memory.
|
||||
edac::clear_sbe_irq();
|
||||
}
|
||||
|
||||
#[task(binds = EDAC_MBE, priority = 1)]
|
||||
fn edac_mbe_isr(_cx: edac_mbe_isr::Context) {
|
||||
// TODO: Send some command via UART for notification purposes.
|
||||
edac::clear_mbe_irq();
|
||||
// TODO: Reset like the vorago example?
|
||||
}
|
||||
|
||||
#[task(binds = WATCHDOG, priority = 1)]
|
||||
fn watchdog_isr(_cx: watchdog_isr::Context) {
|
||||
let wdt = unsafe { pac::WatchDog::steal() };
|
||||
// Clear interrupt.
|
||||
wdt.wdogintclr().write(|w| unsafe { w.bits(1) });
|
||||
}
|
||||
}
|
||||
|
||||
fn setup_edac(syscfg: &mut pac::Sysconfig) {
|
||||
// The scrub values are based on the Vorago provided bootloader.
|
||||
edac::enable_rom_scrub(syscfg, 125);
|
||||
edac::enable_ram0_scrub(syscfg, 1000);
|
||||
edac::enable_ram1_scrub(syscfg, 1000);
|
||||
edac::enable_sbe_irq();
|
||||
edac::enable_mbe_irq();
|
||||
}
|
File diff suppressed because it is too large
Load Diff
Loading…
Reference in New Issue
Block a user