Add tutorial 17

pull/158/head
Andre Richter 2 years ago
parent 8d87d2c847
commit 067589b0cf
No known key found for this signature in database
GPG Key ID: 2116C1AB102F615E

@ -13,7 +13,7 @@ max_line_length = 100
[Dockerfile]
indent_size = 4
[Makefile]
[{Makefile,*.mk}]
indent_style = tab
indent_size = 8

@ -0,0 +1,2 @@
[target.'cfg(target_os = "none")']
runner = "target/kernel_test_runner.sh"

@ -0,0 +1,10 @@
{
"editor.formatOnSave": true,
"editor.rulers": [100],
"rust-analyzer.cargo.target": "aarch64-unknown-none-softfloat",
"rust-analyzer.cargo.features": ["bsp_rpi3"],
"rust-analyzer.checkOnSave.allTargets": false,
"rust-analyzer.checkOnSave.extraArgs": ["--lib", "--bins"],
"rust-analyzer.lens.debug": false,
"rust-analyzer.lens.run": false
}

@ -0,0 +1,96 @@
# This file is automatically @generated by Cargo.
# It is not intended for manual editing.
version = 3
[[package]]
name = "cortex-a"
version = "7.2.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "27bd91f65ccd348bb2d043d98c5b34af141ecef7f102147f59bf5898f6e734ad"
dependencies = [
"tock-registers",
]
[[package]]
name = "debug-symbol-types"
version = "0.1.0"
[[package]]
name = "kernel_symbols"
version = "0.1.0"
dependencies = [
"debug-symbol-types",
]
[[package]]
name = "mingo"
version = "0.17.0"
dependencies = [
"cortex-a",
"debug-symbol-types",
"qemu-exit",
"test-macros",
"test-types",
"tock-registers",
]
[[package]]
name = "proc-macro2"
version = "1.0.37"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "ec757218438d5fda206afc041538b2f6d889286160d649a86a24d37e1235afd1"
dependencies = [
"unicode-xid",
]
[[package]]
name = "qemu-exit"
version = "3.0.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "9ff023245bfcc73fb890e1f8d5383825b3131cc920020a5c487d6f113dfc428a"
[[package]]
name = "quote"
version = "1.0.18"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "a1feb54ed693b93a84e14094943b84b7c4eae204c512b7ccb95ab0c66d278ad1"
dependencies = [
"proc-macro2",
]
[[package]]
name = "syn"
version = "1.0.91"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "b683b2b825c8eef438b77c36a06dc262294da3d5a5813fac20da149241dcd44d"
dependencies = [
"proc-macro2",
"quote",
"unicode-xid",
]
[[package]]
name = "test-macros"
version = "0.1.0"
dependencies = [
"proc-macro2",
"quote",
"syn",
"test-types",
]
[[package]]
name = "test-types"
version = "0.1.0"
[[package]]
name = "tock-registers"
version = "0.7.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "4ee8fba06c1f4d0b396ef61a54530bb6b28f0dc61c38bc8bc5a5a48161e6282e"
[[package]]
name = "unicode-xid"
version = "0.2.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "8ccb82d61f80a663efe1f787a51b16b5a51e3314d6ac365b08639f52387b33f3"

@ -0,0 +1,10 @@
[workspace]
members = [
"libraries/*",
"kernel",
"kernel_symbols"
]
[profile.release]
lto = true

@ -0,0 +1,389 @@
## SPDX-License-Identifier: MIT OR Apache-2.0
##
## Copyright (c) 2018-2022 Andre Richter <andre.o.richter@gmail.com>
include ../common/format.mk
include ../common/docker.mk
##--------------------------------------------------------------------------------------------------
## Optional, user-provided configuration values
##--------------------------------------------------------------------------------------------------
# Default to the RPi3.
BSP ?= rpi3
# Default to a serial device name that is common in Linux.
DEV_SERIAL ?= /dev/ttyUSB0
# Optional integration test name.
ifdef TEST
TEST_ARG = --test $(TEST)
else
TEST_ARG = --test '*'
endif
##--------------------------------------------------------------------------------------------------
## BSP-specific configuration values
##--------------------------------------------------------------------------------------------------
QEMU_MISSING_STRING = "This board is not yet supported for QEMU."
ifeq ($(BSP),rpi3)
TARGET = aarch64-unknown-none-softfloat
KERNEL_BIN = kernel8.img
QEMU_BINARY = qemu-system-aarch64
QEMU_MACHINE_TYPE = raspi3
QEMU_RELEASE_ARGS = -serial stdio -display none
QEMU_TEST_ARGS = $(QEMU_RELEASE_ARGS) -semihosting
OBJDUMP_BINARY = aarch64-none-elf-objdump
NM_BINARY = aarch64-none-elf-nm
READELF_BINARY = aarch64-none-elf-readelf
OPENOCD_ARG = -f /openocd/tcl/interface/ftdi/olimex-arm-usb-tiny-h.cfg -f /openocd/rpi3.cfg
JTAG_BOOT_IMAGE = ../X1_JTAG_boot/jtag_boot_rpi3.img
LD_SCRIPT_PATH = $(shell pwd)/kernel/src/bsp/raspberrypi
RUSTC_MISC_ARGS = -C target-cpu=cortex-a53
else ifeq ($(BSP),rpi4)
TARGET = aarch64-unknown-none-softfloat
KERNEL_BIN = kernel8.img
QEMU_BINARY = qemu-system-aarch64
QEMU_MACHINE_TYPE =
QEMU_RELEASE_ARGS = -serial stdio -display none
QEMU_TEST_ARGS = $(QEMU_RELEASE_ARGS) -semihosting
OBJDUMP_BINARY = aarch64-none-elf-objdump
NM_BINARY = aarch64-none-elf-nm
READELF_BINARY = aarch64-none-elf-readelf
OPENOCD_ARG = -f /openocd/tcl/interface/ftdi/olimex-arm-usb-tiny-h.cfg -f /openocd/rpi4.cfg
JTAG_BOOT_IMAGE = ../X1_JTAG_boot/jtag_boot_rpi4.img
LD_SCRIPT_PATH = $(shell pwd)/kernel/src/bsp/raspberrypi
RUSTC_MISC_ARGS = -C target-cpu=cortex-a72
endif
# Export for build.rs.
export LD_SCRIPT_PATH
##--------------------------------------------------------------------------------------------------
## Targets and Prerequisites
##--------------------------------------------------------------------------------------------------
KERNEL_MANIFEST = kernel/Cargo.toml
KERNEL_LINKER_SCRIPT = kernel.ld
LAST_BUILD_CONFIG = target/$(BSP).build_config
KERNEL_ELF_RAW = target/$(TARGET)/release/kernel
# This parses cargo's dep-info file.
# https://doc.rust-lang.org/cargo/guide/build-cache.html#dep-info-files
KERNEL_ELF_RAW_DEPS = $(filter-out %: ,$(file < $(KERNEL_ELF_RAW).d)) $(LAST_BUILD_CONFIG)
##------------------------------------------------------------------------------
## Translation tables
##------------------------------------------------------------------------------
TT_TOOL_PATH = tools/translation_table_tool
KERNEL_ELF_TTABLES = target/$(TARGET)/release/kernel+ttables
KERNEL_ELF_TTABLES_DEPS = $(KERNEL_ELF_RAW) $(wildcard $(TT_TOOL_PATH)/*)
##------------------------------------------------------------------------------
## Kernel symbols
##------------------------------------------------------------------------------
export KERNEL_SYMBOLS_TOOL_PATH = tools/kernel_symbols_tool
KERNEL_ELF_TTABLES_SYMS = target/$(TARGET)/release/kernel+ttables+symbols
# Unlike with KERNEL_ELF_RAW, we are not relying on dep-info here. One of the reasons being that the
# name of the generated symbols file varies between runs, which can cause confusion.
KERNEL_ELF_TTABLES_SYMS_DEPS = $(KERNEL_ELF_TTABLES) \
$(wildcard kernel_symbols/*) \
$(wildcard $(KERNEL_SYMBOLS_TOOL_PATH)/*)
export TARGET
export KERNEL_SYMBOLS_INPUT_ELF = $(KERNEL_ELF_TTABLES)
export KERNEL_SYMBOLS_OUTPUT_ELF = $(KERNEL_ELF_TTABLES_SYMS)
KERNEL_ELF = $(KERNEL_ELF_TTABLES_SYMS)
##--------------------------------------------------------------------------------------------------
## Command building blocks
##--------------------------------------------------------------------------------------------------
RUSTFLAGS = $(RUSTC_MISC_ARGS) \
-C link-arg=--library-path=$(LD_SCRIPT_PATH) \
-C link-arg=--script=$(KERNEL_LINKER_SCRIPT)
RUSTFLAGS_PEDANTIC = $(RUSTFLAGS) \
-D warnings \
-D missing_docs
FEATURES = --features bsp_$(BSP)
COMPILER_ARGS = --target=$(TARGET) \
$(FEATURES) \
--release
RUSTC_CMD = cargo rustc $(COMPILER_ARGS) --manifest-path $(KERNEL_MANIFEST)
DOC_CMD = cargo doc $(COMPILER_ARGS)
CLIPPY_CMD = cargo clippy $(COMPILER_ARGS)
CHECK_CMD = cargo check $(COMPILER_ARGS)
TEST_CMD = cargo test $(COMPILER_ARGS) --manifest-path $(KERNEL_MANIFEST)
OBJCOPY_CMD = rust-objcopy \
--strip-all \
-O binary
EXEC_QEMU = $(QEMU_BINARY) -M $(QEMU_MACHINE_TYPE)
EXEC_TT_TOOL = ruby $(TT_TOOL_PATH)/main.rb
EXEC_TEST_DISPATCH = ruby ../common/tests/dispatch.rb
EXEC_MINIPUSH = ruby ../common/serial/minipush.rb
##------------------------------------------------------------------------------
## Dockerization
##------------------------------------------------------------------------------
DOCKER_CMD = docker run -t --rm -v $(shell pwd):/work/tutorial -w /work/tutorial
DOCKER_CMD_INTERACT = $(DOCKER_CMD) -i
DOCKER_ARG_DIR_COMMON = -v $(shell pwd)/../common:/work/common
DOCKER_ARG_DIR_JTAG = -v $(shell pwd)/../X1_JTAG_boot:/work/X1_JTAG_boot
DOCKER_ARG_DEV = --privileged -v /dev:/dev
DOCKER_ARG_NET = --network host
# DOCKER_IMAGE defined in include file (see top of this file).
DOCKER_QEMU = $(DOCKER_CMD_INTERACT) $(DOCKER_IMAGE)
DOCKER_TOOLS = $(DOCKER_CMD) $(DOCKER_IMAGE)
DOCKER_TEST = $(DOCKER_CMD) $(DOCKER_ARG_DIR_COMMON) $(DOCKER_IMAGE)
DOCKER_GDB = $(DOCKER_CMD_INTERACT) $(DOCKER_ARG_NET) $(DOCKER_IMAGE)
# Dockerize commands, which require USB device passthrough, only on Linux.
ifeq ($(shell uname -s),Linux)
DOCKER_CMD_DEV = $(DOCKER_CMD_INTERACT) $(DOCKER_ARG_DEV)
DOCKER_CHAINBOOT = $(DOCKER_CMD_DEV) $(DOCKER_ARG_DIR_COMMON) $(DOCKER_IMAGE)
DOCKER_JTAGBOOT = $(DOCKER_CMD_DEV) $(DOCKER_ARG_DIR_COMMON) $(DOCKER_ARG_DIR_JTAG) $(DOCKER_IMAGE)
DOCKER_OPENOCD = $(DOCKER_CMD_DEV) $(DOCKER_ARG_NET) $(DOCKER_IMAGE)
else
DOCKER_OPENOCD = echo "Not yet supported on non-Linux systems."; \#
endif
##--------------------------------------------------------------------------------------------------
## Targets
##--------------------------------------------------------------------------------------------------
.PHONY: all doc qemu chainboot clippy clean readelf objdump nm check
all: $(KERNEL_BIN)
##------------------------------------------------------------------------------
## Save the configuration as a file, so make understands if it changed.
##------------------------------------------------------------------------------
$(LAST_BUILD_CONFIG):
@rm -f target/*.build_config
@mkdir -p target
@touch $(LAST_BUILD_CONFIG)
##------------------------------------------------------------------------------
## Compile the kernel ELF
##------------------------------------------------------------------------------
$(KERNEL_ELF_RAW): $(KERNEL_ELF_RAW_DEPS)
$(call color_header, "Compiling kernel ELF - $(BSP)")
@RUSTFLAGS="$(RUSTFLAGS_PEDANTIC)" $(RUSTC_CMD)
##------------------------------------------------------------------------------
## Precompute the kernel translation tables and patch them into the kernel ELF
##------------------------------------------------------------------------------
$(KERNEL_ELF_TTABLES): $(KERNEL_ELF_TTABLES_DEPS)
$(call color_header, "Precomputing kernel translation tables and patching kernel ELF")
@cp $(KERNEL_ELF_RAW) $(KERNEL_ELF_TTABLES)
@$(DOCKER_TOOLS) $(EXEC_TT_TOOL) $(BSP) $(KERNEL_ELF_TTABLES)
##------------------------------------------------------------------------------
## Generate kernel symbols and patch them into the kernel ELF
##------------------------------------------------------------------------------
$(KERNEL_ELF_TTABLES_SYMS): $(KERNEL_ELF_TTABLES_SYMS_DEPS)
$(call color_header, "Generating kernel symbols and patching kernel ELF")
@time -f "in %es" \
$(MAKE) --no-print-directory -f kernel_symbols.mk
##------------------------------------------------------------------------------
## Generate the stripped kernel binary
##------------------------------------------------------------------------------
$(KERNEL_BIN): $(KERNEL_ELF_TTABLES_SYMS)
$(call color_header, "Generating stripped binary")
@$(OBJCOPY_CMD) $(KERNEL_ELF_TTABLES_SYMS) $(KERNEL_BIN)
$(call color_progress_prefix, "Name")
@echo $(KERNEL_BIN)
$(call color_progress_prefix, "Size")
@printf '%s KiB\n' `du -k $(KERNEL_BIN) | cut -f1`
##------------------------------------------------------------------------------
## Generate the documentation
##------------------------------------------------------------------------------
doc:
$(call color_header, "Generating docs")
@$(DOC_CMD) --document-private-items --open
##------------------------------------------------------------------------------
## Run the kernel in QEMU
##------------------------------------------------------------------------------
ifeq ($(QEMU_MACHINE_TYPE),) # QEMU is not supported for the board.
qemu:
$(call color_header, "$(QEMU_MISSING_STRING)")
else # QEMU is supported.
qemu: $(KERNEL_BIN)
$(call color_header, "Launching QEMU")
@$(DOCKER_QEMU) $(EXEC_QEMU) $(QEMU_RELEASE_ARGS) -kernel $(KERNEL_BIN)
endif
##------------------------------------------------------------------------------
## Push the kernel to the real HW target
##------------------------------------------------------------------------------
chainboot: $(KERNEL_BIN)
@$(DOCKER_CHAINBOOT) $(EXEC_MINIPUSH) $(DEV_SERIAL) $(KERNEL_BIN)
##------------------------------------------------------------------------------
## Run clippy
##------------------------------------------------------------------------------
clippy:
@RUSTFLAGS="$(RUSTFLAGS_PEDANTIC)" $(CLIPPY_CMD)
@RUSTFLAGS="$(RUSTFLAGS_PEDANTIC)" $(CLIPPY_CMD) --features test_build --tests \
--manifest-path $(KERNEL_MANIFEST)
##------------------------------------------------------------------------------
## Clean
##------------------------------------------------------------------------------
clean:
rm -rf target $(KERNEL_BIN)
##------------------------------------------------------------------------------
## Run readelf
##------------------------------------------------------------------------------
readelf: $(KERNEL_ELF)
$(call color_header, "Launching readelf")
@$(DOCKER_TOOLS) $(READELF_BINARY) --headers $(KERNEL_ELF)
##------------------------------------------------------------------------------
## Run objdump
##------------------------------------------------------------------------------
objdump: $(KERNEL_ELF)
$(call color_header, "Launching objdump")
@$(DOCKER_TOOLS) $(OBJDUMP_BINARY) --disassemble --demangle \
--section .text \
--section .rodata \
--section .got \
$(KERNEL_ELF) | rustfilt
##------------------------------------------------------------------------------
## Run nm
##------------------------------------------------------------------------------
nm: $(KERNEL_ELF)
$(call color_header, "Launching nm")
@$(DOCKER_TOOLS) $(NM_BINARY) --demangle --print-size $(KERNEL_ELF) | sort | rustfilt
##--------------------------------------------------------------------------------------------------
## Debugging targets
##--------------------------------------------------------------------------------------------------
.PHONY: jtagboot openocd gdb gdb-opt0
##------------------------------------------------------------------------------
## Push the JTAG boot image to the real HW target
##------------------------------------------------------------------------------
jtagboot:
@$(DOCKER_JTAGBOOT) $(EXEC_MINIPUSH) $(DEV_SERIAL) $(JTAG_BOOT_IMAGE)
##------------------------------------------------------------------------------
## Start OpenOCD session
##------------------------------------------------------------------------------
openocd:
$(call color_header, "Launching OpenOCD")
@$(DOCKER_OPENOCD) openocd $(OPENOCD_ARG)
##------------------------------------------------------------------------------
## Start GDB session
##------------------------------------------------------------------------------
gdb: RUSTC_MISC_ARGS += -C debuginfo=2
gdb-opt0: RUSTC_MISC_ARGS += -C debuginfo=2 -C opt-level=0
gdb gdb-opt0: $(KERNEL_ELF)
$(call color_header, "Launching GDB")
@$(DOCKER_GDB) gdb-multiarch -q $(KERNEL_ELF)
##--------------------------------------------------------------------------------------------------
## Testing targets
##--------------------------------------------------------------------------------------------------
.PHONY: test test_boot test_unit test_integration
test_unit test_integration: FEATURES += --features test_build
ifeq ($(QEMU_MACHINE_TYPE),) # QEMU is not supported for the board.
test_boot test_unit test_integration test:
$(call color_header, "$(QEMU_MISSING_STRING)")
else # QEMU is supported.
##------------------------------------------------------------------------------
## Run boot test
##------------------------------------------------------------------------------
test_boot: $(KERNEL_BIN)
$(call color_header, "Boot test - $(BSP)")
@$(DOCKER_TEST) $(EXEC_TEST_DISPATCH) $(EXEC_QEMU) $(QEMU_RELEASE_ARGS) -kernel $(KERNEL_BIN)
##------------------------------------------------------------------------------
## Helpers for unit and integration test targets
##------------------------------------------------------------------------------
define KERNEL_TEST_RUNNER
#!/usr/bin/env bash
# The cargo test runner seems to change into the crate under test's directory. Therefore, ensure
# this script executes from the root.
cd $(shell pwd)
TEST_ELF=$$(echo $$1 | sed -e 's/.*target/target/g')
TEST_ELF_SYMS="$${TEST_ELF}_syms"
TEST_BINARY=$$(echo $$1.img | sed -e 's/.*target/target/g')
$(DOCKER_TOOLS) $(EXEC_TT_TOOL) $(BSP) $$TEST_ELF > /dev/null
# This overrides the two ENV variables. The other ENV variables that are required as input for
# the .mk file are set already because they are exported by this Makefile and this script is
# started by the same.
KERNEL_SYMBOLS_INPUT_ELF=$$TEST_ELF \
KERNEL_SYMBOLS_OUTPUT_ELF=$$TEST_ELF_SYMS \
$(MAKE) --no-print-directory -f kernel_symbols.mk > /dev/null 2>&1
$(OBJCOPY_CMD) $$TEST_ELF_SYMS $$TEST_BINARY
$(DOCKER_TEST) $(EXEC_TEST_DISPATCH) $(EXEC_QEMU) $(QEMU_TEST_ARGS) -kernel $$TEST_BINARY
endef
export KERNEL_TEST_RUNNER
define test_prepare
@mkdir -p target
@echo "$$KERNEL_TEST_RUNNER" > target/kernel_test_runner.sh
@chmod +x target/kernel_test_runner.sh
endef
##------------------------------------------------------------------------------
## Run unit test(s)
##------------------------------------------------------------------------------
test_unit:
$(call color_header, "Compiling unit test(s) - $(BSP)")
$(call test_prepare)
@RUSTFLAGS="$(RUSTFLAGS_PEDANTIC)" $(TEST_CMD) --lib
##------------------------------------------------------------------------------
## Run integration test(s)
##------------------------------------------------------------------------------
test_integration:
$(call color_header, "Compiling integration test(s) - $(BSP)")
$(call test_prepare)
@RUSTFLAGS="$(RUSTFLAGS_PEDANTIC)" $(TEST_CMD) $(TEST_ARG)
test: test_boot test_unit test_integration
endif

@ -0,0 +1,927 @@
# Tutorial 17 - Kernel Symbols
## tl;dr
- To enrich and augment existing and future debugging code, we add support for `kernel symbol`
lookup.
## Table of Contents
- [Introduction](#introduction)
- [Implementation](#implementation)
- [Linking Changes](#linking-changes)
- [Kernel Symbols Tool](#kernel-symbols-tool)
- [Lookup Code](#lookup-code)
- [Test it](#test-it)
- [Diff to previous](#diff-to-previous)
## Introduction
Ever since the first tutorial, it was possible to execute the `make nm` target in order to view all
`kernel symbols`. The kernel itself, however, does not have any means yet to correlate a virtual
address to a symbol during runtime. Gaining this capability would be useful for augmenting
debug-related prints. For example, when the kernel is handling an `exception`, it prints the content
of the `exception link register`, which is the program address where the CPU was executing from when
the exception happened.
Until now, in order to understand to which function or code such an address belongs to, a manual
lookup by the person debugging the issue was necessary. In this tutorial, we are adding a `data
structure` to the kernel which contains _all the symbol names and corresponding address ranges_.
This enables the kernel to print symbol names in existing and future debug-related code, which
improves triaging of issues by humans, because it does away with the manual lookup.
This tutorial is mostly is an enabler for the upcoming tutorial that will add [`backtracing`]
support.
[`backtracing`]: https://en.wikipedia.org/wiki/Stack_trace
## Implementation
First of all, a new support crate is added under `$ROOT/libraries/debug-symbol-types`. It contains
the definition for `struct Symbol`:
```rust
/// A symbol containing a size.
#[repr(C)]
pub struct Symbol {
addr_range: Range<usize>,
name: &'static str,
}
```
The implementation of the struct furthermore defines the two public functions `name()` and
`contains()`, which will be used to query information.
To enable the kernel to lookup symbol names, we will add an `array` to the kernel binary that
contains all the kernel symbols. Because we can query the final symbol names and addresses only
_after_ the kernel has been `linked`, the same approach as for the `translation tables` will be
used: The symbols array will be patched into a `placeholder section` of the final kernel `ELF`.
### Linking Changes
In the `kernel.ld` linker script, we define a new section named `kernel_symbols` and give it a size
of `32 KiB`:
```ld.s
.rodata : ALIGN(8) { *(.rodata*) } :segment_code
.got : ALIGN(8) { *(.got) } :segment_code
.kernel_symbols : ALIGN(8) {
__kernel_symbols_start = .;
. += 32 * 1024;
} :segment_code
```
Also, we are providing the start address of the section through the symbol `__kernel_symbols_start`,
which will be used by our `Rust` code later on.
### Kernel Symbols Tool
Under `$ROOT/tools/kernel_symbols_tool`, we are adding a helper tool that is able to dynamically
generate an `array` of all the kernel symbols and patch it into the final kernel `ELF`. In our main
`Makefile`, we are invoking the tool after the translation table generation. In the first step, the
tool generates a temporary `Rust` file that instantiates the symbols array. Here is an example of
how this can look like:
```console
$ head ./target/aarch64-unknown-none-softfloat/release/kernel+ttables_symbols_demangled.rs
```
```rust
use debug_symbol_types::Symbol;
# [no_mangle]
# [link_section = ".rodata.symbol_desc"]
static KERNEL_SYMBOLS: [Symbol; 139] = [
Symbol::new(18446744072635809792, 124, "_start"),
Symbol::new(18446744072635809920, 8, "BOOT_CORE_ID"),
Symbol::new(18446744072635809928, 8, "PHYS_KERNEL_TABLES_BASE_ADDR"),
Symbol::new(18446744072635809936, 80, "_start_rust"),
Symbol::new(18446744072635813888, 84, "__exception_restore_context"),
// Many more
```
Next, the _helper crate_ `$ROOT/kernel_symbols` is compiled. This crate contains a single `main.rs`
that just includes the temporary symbols file shown above.
```rust
//! Generation of kernel symbols.
#![no_std]
#![no_main]
#[cfg(feature = "generated_symbols_available")]
include!(env!("KERNEL_SYMBOLS_DEMANGLED_RS"));
```
`KERNEL_SYMBOLS_DEMANGLED_RS` is set by the corresponding `build.rs` file. The helper crate has its
own `linker file`, which ensures that that just the array and the corresponding strings that it
references are kept:
```ld.s
SECTIONS
{
.rodata : {
ASSERT(. > 0xffffffff00000000, "Expected higher half address")
KEEP(*(.rodata.symbol_desc*))
. = ALIGN(8);
*(.rodata*)
}
}
```
Afterwards, `objcopy` is used to strip the produced helper crate ELF. What remains is a small
`binary blob` that just contains the symbols array and the `names` that are referenced. To ensure
that these references are valid kernel addresses (remember that those are defined as `name: &'static
str`, so basically a pointer to a kernel address), the sub-makefile compiling this helper crate
(`$ROOT/kernel_symbols.mk`) did the following:
It used the `kernel_symbols_tool` to query the virtual address of the `kernel_symbols` **section**
(of the final kernel ELF). This address was then supplied to the linker when the helper crate was
linked (emphasis on the `--section-start=.rodata=` part):
```Makefile
GET_SYMBOLS_SECTION_VIRT_ADDR = $(DOCKER_TOOLS) $(EXEC_SYMBOLS_TOOL) \
--get_symbols_section_virt_addr $(KERNEL_SYMBOLS_OUTPUT_ELF)
RUSTFLAGS = -C link-arg=--script=$(KERNEL_SYMBOLS_LINKER_SCRIPT) \
-C link-arg=--section-start=.rodata=$$($(GET_SYMBOLS_SECTION_VIRT_ADDR))
```
This might be a bit convoluted, but the main take away is: This ensures that the start address of
the `.rodata` section of the `kernel_symbols` helper crate is exactly the same address as the
`placeholder section` of the final kernel ELF where the symbols `binary blob` will be patched into.
The latter is the last step done by the tool.
### Lookup Code
In the kernel, we add the file `src/symbols.rs`. It makes the linker-provided symbol
`__kernel_symbols_start` that we saw earlier accesible, and also defines `NUM_KERNEL_SYMBOLS`:
```rust
#[no_mangle]
static NUM_KERNEL_SYMBOLS: u64 = 0;
```
When the `kernel_symbols_tool` patches the symbols blob into the kernel ELF, it also updates this
value to reflect the number of symbols that are available. This is needed for the code that
internally crafts the slice of symbols that the kernel uses for lookup:
```rust
fn kernel_symbol_section_virt_start_addr() -> Address<Virtual> {
Address::new(unsafe { __kernel_symbols_start.get() as usize })
}
fn kernel_symbols_slice() -> &'static [Symbol] {
let ptr = kernel_symbol_section_virt_start_addr().as_usize() as *const Symbol;
unsafe {
let num = core::ptr::read_volatile(&NUM_KERNEL_SYMBOLS as *const u64) as usize;
slice::from_raw_parts(ptr, num)
}
}
```
Lookup is done by just iterating over the slice:
```rust
/// Retrieve the symbol name corresponding to a virtual address, if any.
pub fn lookup_symbol(addr: Address<Virtual>) -> Option<&'static str> {
for i in kernel_symbols_slice() {
if i.contains(addr.as_usize()) {
return Some(i.name());
}
}
None
}
```
And that's it for this tutorial. The upcoming tutorial on `backtracing` will put this code to more
prominent use.
## Test it
For now, symbol lookup can be observed in the integration test for synchronous exception handling.
Here, the kernel now also prints the symbol name that corresponds to the value of `ELR_EL1`. In the
following case, this is `kernel_init()`, which is where the the exception is generated in the test:
```console
$ TEST=02_exception_sync_page_fault make test_integration
[...]
-------------------------------------------------------------------
🦀 Testing synchronous exception handling by causing a page fault
-------------------------------------------------------------------
[ 0.002640] Writing to bottom of address space to address 1 GiB...
[ 0.004549] Kernel panic!
Panic location:
File 'kernel/src/_arch/aarch64/exception.rs', line 59, column 5
CPU Exception!
ESR_EL1: 0x96000004
...
ELR_EL1: 0xffffffffc0001118
Symbol: kernel_init
```
## Diff to previous
```diff
diff -uNr 16_virtual_mem_part4_higher_half_kernel/Cargo.toml 17_kernel_symbols/Cargo.toml
--- 16_virtual_mem_part4_higher_half_kernel/Cargo.toml
+++ 17_kernel_symbols/Cargo.toml
@@ -2,7 +2,8 @@
members = [
"libraries/*",
- "kernel"
+ "kernel",
+ "kernel_symbols"
]
[profile.release]
diff -uNr 16_virtual_mem_part4_higher_half_kernel/kernel/Cargo.toml 17_kernel_symbols/kernel/Cargo.toml
--- 16_virtual_mem_part4_higher_half_kernel/kernel/Cargo.toml
+++ 17_kernel_symbols/kernel/Cargo.toml
@@ -1,6 +1,6 @@
[package]
name = "mingo"
-version = "0.16.0"
+version = "0.17.0"
authors = ["Andre Richter <andre.o.richter@gmail.com>"]
edition = "2021"
@@ -16,6 +16,7 @@
[dependencies]
test-types = { path = "../libraries/test-types" }
+debug-symbol-types = { path = "../libraries/debug-symbol-types" }
# Optional dependencies
tock-registers = { version = "0.7.x", default-features = false, features = ["register_types"], optional = true }
diff -uNr 16_virtual_mem_part4_higher_half_kernel/kernel/src/_arch/aarch64/exception.rs 17_kernel_symbols/kernel/src/_arch/aarch64/exception.rs
--- 16_virtual_mem_part4_higher_half_kernel/kernel/src/_arch/aarch64/exception.rs
+++ 17_kernel_symbols/kernel/src/_arch/aarch64/exception.rs
@@ -11,7 +11,7 @@
//!
//! crate::exception::arch_exception
-use crate::{bsp, exception};
+use crate::{bsp, exception, memory, symbols};
use core::{arch::global_asm, cell::UnsafeCell, fmt};
use cortex_a::{asm::barrier, registers::*};
use tock_registers::{
@@ -262,6 +262,12 @@
writeln!(f, "{}", self.spsr_el1)?;
writeln!(f, "ELR_EL1: {:#018x}", self.elr_el1)?;
+ writeln!(
+ f,
+ " Symbol: {}",
+ symbols::lookup_symbol(memory::Address::new(self.elr_el1 as usize))
+ .unwrap_or("Symbol not found")
+ )?;
writeln!(f)?;
writeln!(f, "General purpose register:")?;
diff -uNr 16_virtual_mem_part4_higher_half_kernel/kernel/src/bsp/raspberrypi/kernel.ld 17_kernel_symbols/kernel/src/bsp/raspberrypi/kernel.ld
--- 16_virtual_mem_part4_higher_half_kernel/kernel/src/bsp/raspberrypi/kernel.ld
+++ 17_kernel_symbols/kernel/src/bsp/raspberrypi/kernel.ld
@@ -56,8 +56,12 @@
*(.text*) /* Everything else */
} :segment_code
- .rodata : ALIGN(8) { *(.rodata*) } :segment_code
- .got : ALIGN(8) { *(.got) } :segment_code
+ .rodata : ALIGN(8) { *(.rodata*) } :segment_code
+ .got : ALIGN(8) { *(.got) } :segment_code
+ .kernel_symbols : ALIGN(8) {
+ __kernel_symbols_start = .;
+ . += 32 * 1024;
+ } :segment_code
. = ALIGN(PAGE_SIZE);
__code_end_exclusive = .;
diff -uNr 16_virtual_mem_part4_higher_half_kernel/kernel/src/bsp/raspberrypi/memory.rs 17_kernel_symbols/kernel/src/bsp/raspberrypi/memory.rs
--- 16_virtual_mem_part4_higher_half_kernel/kernel/src/bsp/raspberrypi/memory.rs
+++ 17_kernel_symbols/kernel/src/bsp/raspberrypi/memory.rs
@@ -20,6 +20,7 @@
//! | .text |
//! | .rodata |
//! | .got |
+//! | .kernel_symbols |
//! | |
//! +---------------------------------------+
//! | | data_start == code_end_exclusive
@@ -41,6 +42,7 @@
//! | .text |
//! | .rodata |
//! | .got |
+//! | .kernel_symbols |
//! | |
//! +---------------------------------------+
//! | | data_start == code_end_exclusive
diff -uNr 16_virtual_mem_part4_higher_half_kernel/kernel/src/lib.rs 17_kernel_symbols/kernel/src/lib.rs
--- 16_virtual_mem_part4_higher_half_kernel/kernel/src/lib.rs
+++ 17_kernel_symbols/kernel/src/lib.rs
@@ -135,6 +135,7 @@
pub mod memory;
pub mod print;
pub mod state;
+pub mod symbols;
pub mod time;
//--------------------------------------------------------------------------------------------------
diff -uNr 16_virtual_mem_part4_higher_half_kernel/kernel/src/symbols.rs 17_kernel_symbols/kernel/src/symbols.rs
--- 16_virtual_mem_part4_higher_half_kernel/kernel/src/symbols.rs
+++ 17_kernel_symbols/kernel/src/symbols.rs
@@ -0,0 +1,85 @@
+// SPDX-License-Identifier: MIT OR Apache-2.0
+//
+// Copyright (c) 2022 Andre Richter <andre.o.richter@gmail.com>
+
+//! Debug symbol support.
+
+use crate::memory::{Address, Virtual};
+use core::{cell::UnsafeCell, slice};
+use debug_symbol_types::Symbol;
+
+//--------------------------------------------------------------------------------------------------
+// Private Definitions
+//--------------------------------------------------------------------------------------------------
+
+// Symbol from the linker script.
+extern "Rust" {
+ static __kernel_symbols_start: UnsafeCell<()>;
+}
+
+//--------------------------------------------------------------------------------------------------
+// Global instances
+//--------------------------------------------------------------------------------------------------
+
+/// This will be patched to the correct value by the "kernel symbols tool" after linking. This given
+/// value here is just a (safe) dummy.
+#[no_mangle]
+static NUM_KERNEL_SYMBOLS: u64 = 0;
+
+//--------------------------------------------------------------------------------------------------
+// Private Code
+//--------------------------------------------------------------------------------------------------
+
+fn kernel_symbol_section_virt_start_addr() -> Address<Virtual> {
+ Address::new(unsafe { __kernel_symbols_start.get() as usize })
+}
+
+fn kernel_symbols_slice() -> &'static [Symbol] {
+ let ptr = kernel_symbol_section_virt_start_addr().as_usize() as *const Symbol;
+
+ unsafe {
+ let num = core::ptr::read_volatile(&NUM_KERNEL_SYMBOLS as *const u64) as usize;
+ slice::from_raw_parts(ptr, num)
+ }
+}
+
+//--------------------------------------------------------------------------------------------------
+// Public Code
+//--------------------------------------------------------------------------------------------------
+
+/// Retrieve the symbol name corresponding to a virtual address, if any.
+pub fn lookup_symbol(addr: Address<Virtual>) -> Option<&'static str> {
+ for i in kernel_symbols_slice() {
+ if i.contains(addr.as_usize()) {
+ return Some(i.name());
+ }
+ }
+
+ None
+}
+
+//--------------------------------------------------------------------------------------------------
+// Testing
+//--------------------------------------------------------------------------------------------------
+
+#[cfg(test)]
+mod tests {
+ use super::*;
+ use test_macros::kernel_test;
+
+ /// Sanity of symbols module.
+ #[kernel_test]
+ fn symbols_sanity() {
+ let first_sym = lookup_symbol(Address::new(
+ crate::common::is_aligned as *const usize as usize,
+ ))
+ .unwrap();
+
+ assert_eq!(first_sym, "libkernel::common::is_aligned");
+
+ let second_sym =
+ lookup_symbol(Address::new(crate::version as *const usize as usize)).unwrap();
+
+ assert_eq!(second_sym, "libkernel::version");
+ }
+}
diff -uNr 16_virtual_mem_part4_higher_half_kernel/kernel_symbols/build.rs 17_kernel_symbols/kernel_symbols/build.rs
--- 16_virtual_mem_part4_higher_half_kernel/kernel_symbols/build.rs
+++ 17_kernel_symbols/kernel_symbols/build.rs
@@ -0,0 +1,14 @@
+use std::{env, path::Path};
+
+fn main() {
+ if let Ok(path) = env::var("KERNEL_SYMBOLS_DEMANGLED_RS") {
+ if Path::new(&path).exists() {
+ println!("cargo:rustc-cfg=feature=\"generated_symbols_available\"")
+ }
+ }
+
+ println!(
+ "cargo:rerun-if-changed={}",
+ Path::new("kernel_symbols.ld").display()
+ );
+}
diff -uNr 16_virtual_mem_part4_higher_half_kernel/kernel_symbols/Cargo.toml 17_kernel_symbols/kernel_symbols/Cargo.toml
--- 16_virtual_mem_part4_higher_half_kernel/kernel_symbols/Cargo.toml
+++ 17_kernel_symbols/kernel_symbols/Cargo.toml
@@ -0,0 +1,15 @@
+[package]
+name = "kernel_symbols"
+version = "0.1.0"
+edition = "2021"
+
+[features]
+default = []
+generated_symbols_available = []
+
+##--------------------------------------------------------------------------------------------------
+## Dependencies
+##--------------------------------------------------------------------------------------------------
+
+[dependencies]
+debug-symbol-types = { path = "../libraries/debug-symbol-types" }
diff -uNr 16_virtual_mem_part4_higher_half_kernel/kernel_symbols/kernel_symbols.ld 17_kernel_symbols/kernel_symbols/kernel_symbols.ld
--- 16_virtual_mem_part4_higher_half_kernel/kernel_symbols/kernel_symbols.ld
+++ 17_kernel_symbols/kernel_symbols/kernel_symbols.ld
@@ -0,0 +1,15 @@
+/* SPDX-License-Identifier: MIT OR Apache-2.0
+ *
+ * Copyright (c) 2022 Andre Richter <andre.o.richter@gmail.com>
+ */
+
+SECTIONS
+{
+ .rodata : {
+ ASSERT(. > 0xffffffff00000000, "Expected higher half address")
+
+ KEEP(*(.rodata.symbol_desc*))
+ . = ALIGN(8);
+ *(.rodata*)
+ }
+}
diff -uNr 16_virtual_mem_part4_higher_half_kernel/kernel_symbols/src/main.rs 17_kernel_symbols/kernel_symbols/src/main.rs
--- 16_virtual_mem_part4_higher_half_kernel/kernel_symbols/src/main.rs
+++ 17_kernel_symbols/kernel_symbols/src/main.rs
@@ -0,0 +1,16 @@
+// SPDX-License-Identifier: MIT OR Apache-2.0
+//
+// Copyright (c) 2022 Andre Richter <andre.o.richter@gmail.com>
+
+//! Generation of kernel symbols.
+
+#![no_std]
+#![no_main]
+
+#[cfg(feature = "generated_symbols_available")]
+include!(env!("KERNEL_SYMBOLS_DEMANGLED_RS"));
+
+#[panic_handler]
+fn panic(_info: &core::panic::PanicInfo) -> ! {
+ unimplemented!()
+}
diff -uNr 16_virtual_mem_part4_higher_half_kernel/kernel_symbols.mk 17_kernel_symbols/kernel_symbols.mk
--- 16_virtual_mem_part4_higher_half_kernel/kernel_symbols.mk
+++ 17_kernel_symbols/kernel_symbols.mk
@@ -0,0 +1,101 @@
+## SPDX-License-Identifier: MIT OR Apache-2.0
+##
+## Copyright (c) 2018-2022 Andre Richter <andre.o.richter@gmail.com>
+
+include ../common/format.mk
+include ../common/docker.mk
+
+##--------------------------------------------------------------------------------------------------
+## Check for input variables that need be exported by the calling Makefile
+##--------------------------------------------------------------------------------------------------
+ifndef KERNEL_SYMBOLS_TOOL_PATH
+$(error KERNEL_SYMBOLS_TOOL_PATH is not set)
+endif
+
+ifndef TARGET
+$(error TARGET is not set)
+endif
+
+ifndef KERNEL_SYMBOLS_INPUT_ELF
+$(error KERNEL_SYMBOLS_INPUT_ELF is not set)
+endif
+
+ifndef KERNEL_SYMBOLS_OUTPUT_ELF
+$(error KERNEL_SYMBOLS_OUTPUT_ELF is not set)
+endif
+
+
+
+##--------------------------------------------------------------------------------------------------
+## Targets and Prerequisites
+##--------------------------------------------------------------------------------------------------
+KERNEL_SYMBOLS_MANIFEST = kernel_symbols/Cargo.toml
+KERNEL_SYMBOLS_LINKER_SCRIPT = kernel_symbols/kernel_symbols.ld
+
+KERNEL_SYMBOLS_RS = $(KERNEL_SYMBOLS_INPUT_ELF)_symbols.rs
+KERNEL_SYMBOLS_DEMANGLED_RS = $(shell pwd)/$(KERNEL_SYMBOLS_INPUT_ELF)_symbols_demangled.rs
+
+KERNEL_SYMBOLS_ELF = target/$(TARGET)/release/kernel_symbols
+KERNEL_SYMBOLS_STRIPPED = target/$(TARGET)/release/kernel_symbols_stripped
+
+# Export for build.rs of kernel_symbols crate.
+export KERNEL_SYMBOLS_DEMANGLED_RS
+
+
+
+##--------------------------------------------------------------------------------------------------
+## Command building blocks
+##--------------------------------------------------------------------------------------------------
+GET_SYMBOLS_SECTION_VIRT_ADDR = $(DOCKER_TOOLS) $(EXEC_SYMBOLS_TOOL) \
+ --get_symbols_section_virt_addr $(KERNEL_SYMBOLS_OUTPUT_ELF)
+
+RUSTFLAGS = -C link-arg=--script=$(KERNEL_SYMBOLS_LINKER_SCRIPT) \
+ -C link-arg=--section-start=.rodata=$$($(GET_SYMBOLS_SECTION_VIRT_ADDR))
+
+RUSTFLAGS_PEDANTIC = $(RUSTFLAGS) \
+ -D warnings \
+ -D missing_docs
+
+COMPILER_ARGS = --target=$(TARGET) \
+ --release
+
+RUSTC_CMD = cargo rustc $(COMPILER_ARGS) --manifest-path $(KERNEL_SYMBOLS_MANIFEST)
+OBJCOPY_CMD = rust-objcopy \
+ --strip-all \
+ -O binary
+
+EXEC_SYMBOLS_TOOL = ruby $(KERNEL_SYMBOLS_TOOL_PATH)/main.rb
+
+##------------------------------------------------------------------------------
+## Dockerization
+##------------------------------------------------------------------------------
+DOCKER_CMD = docker run -t --rm -v $(shell pwd):/work/tutorial -w /work/tutorial
+
+# DOCKER_IMAGE defined in include file (see top of this file).
+DOCKER_TOOLS = $(DOCKER_CMD) $(DOCKER_IMAGE)
+
+
+
+##--------------------------------------------------------------------------------------------------
+## Targets
+##--------------------------------------------------------------------------------------------------
+.PHONY: all
+
+all:
+ @cp $(KERNEL_SYMBOLS_INPUT_ELF) $(KERNEL_SYMBOLS_OUTPUT_ELF)
+
+ @$(DOCKER_TOOLS) $(EXEC_SYMBOLS_TOOL) --gen_symbols $(KERNEL_SYMBOLS_OUTPUT_ELF) \
+ $(KERNEL_SYMBOLS_RS)
+
+ $(call color_progress_prefix, "Demangling")
+ @echo Symbol names
+ @cat $(KERNEL_SYMBOLS_RS) | rustfilt > $(KERNEL_SYMBOLS_DEMANGLED_RS)
+
+ @RUSTFLAGS="$(RUSTFLAGS_PEDANTIC)" $(RUSTC_CMD)
+
+ $(call color_progress_prefix, "Stripping")
+ @echo Symbols ELF file
+ @$(OBJCOPY_CMD) $(KERNEL_SYMBOLS_ELF) $(KERNEL_SYMBOLS_STRIPPED)
+
+ @$(DOCKER_TOOLS) $(EXEC_SYMBOLS_TOOL) --patch_data $(KERNEL_SYMBOLS_OUTPUT_ELF) \
+ $(KERNEL_SYMBOLS_STRIPPED)
diff -uNr 16_virtual_mem_part4_higher_half_kernel/libraries/debug-symbol-types/Cargo.toml 17_kernel_symbols/libraries/debug-symbol-types/Cargo.toml
--- 16_virtual_mem_part4_higher_half_kernel/libraries/debug-symbol-types/Cargo.toml
+++ 17_kernel_symbols/libraries/debug-symbol-types/Cargo.toml
@@ -0,0 +1,4 @@
+[package]
+name = "debug-symbol-types"
+version = "0.1.0"
+edition = "2021"
diff -uNr 16_virtual_mem_part4_higher_half_kernel/libraries/debug-symbol-types/src/lib.rs 17_kernel_symbols/libraries/debug-symbol-types/src/lib.rs
--- 16_virtual_mem_part4_higher_half_kernel/libraries/debug-symbol-types/src/lib.rs
+++ 17_kernel_symbols/libraries/debug-symbol-types/src/lib.rs
@@ -0,0 +1,39 @@
+// SPDX-License-Identifier: MIT OR Apache-2.0
+//
+// Copyright (c) 2022 Andre Richter <andre.o.richter@gmail.com>
+
+//! Types for implementing debug symbol support.
+
+#![no_std]
+
+use core::ops::Range;
+
+/// A symbol containing a size.
+#[repr(C)]
+pub struct Symbol {
+ addr_range: Range<usize>,
+ name: &'static str,
+}
+
+impl Symbol {
+ /// Create an instance.
+ pub const fn new(start: usize, size: usize, name: &'static str) -> Symbol {
+ Symbol {
+ addr_range: Range {
+ start,
+ end: start + size,
+ },
+ name,
+ }
+ }
+
+ /// Returns true if addr is contained in the range.
+ pub fn contains(&self, addr: usize) -> bool {
+ self.addr_range.contains(&addr)
+ }
+
+ /// Returns the symbol's name.
+ pub fn name(&self) -> &'static str {
+ self.name
+ }
+}
diff -uNr 16_virtual_mem_part4_higher_half_kernel/Makefile 17_kernel_symbols/Makefile
--- 16_virtual_mem_part4_higher_half_kernel/Makefile
+++ 17_kernel_symbols/Makefile
@@ -84,7 +84,24 @@
KERNEL_ELF_TTABLES = target/$(TARGET)/release/kernel+ttables
KERNEL_ELF_TTABLES_DEPS = $(KERNEL_ELF_RAW) $(wildcard $(TT_TOOL_PATH)/*)
-KERNEL_ELF = $(KERNEL_ELF_TTABLES)
+##------------------------------------------------------------------------------
+## Kernel symbols
+##------------------------------------------------------------------------------
+export KERNEL_SYMBOLS_TOOL_PATH = tools/kernel_symbols_tool
+
+KERNEL_ELF_TTABLES_SYMS = target/$(TARGET)/release/kernel+ttables+symbols
+
+# Unlike with KERNEL_ELF_RAW, we are not relying on dep-info here. One of the reasons being that the
+# name of the generated symbols file varies between runs, which can cause confusion.
+KERNEL_ELF_TTABLES_SYMS_DEPS = $(KERNEL_ELF_TTABLES) \
+ $(wildcard kernel_symbols/*) \
+ $(wildcard $(KERNEL_SYMBOLS_TOOL_PATH)/*)
+
+export TARGET
+export KERNEL_SYMBOLS_INPUT_ELF = $(KERNEL_ELF_TTABLES)
+export KERNEL_SYMBOLS_OUTPUT_ELF = $(KERNEL_ELF_TTABLES_SYMS)
+
+KERNEL_ELF = $(KERNEL_ELF_TTABLES_SYMS)
@@ -178,11 +195,18 @@
@$(DOCKER_TOOLS) $(EXEC_TT_TOOL) $(BSP) $(KERNEL_ELF_TTABLES)
##------------------------------------------------------------------------------
+## Generate kernel symbols and patch them into the kernel ELF
+##------------------------------------------------------------------------------
+$(KERNEL_ELF_TTABLES_SYMS): $(KERNEL_ELF_TTABLES_SYMS_DEPS)
+ $(call color_header, "Generating kernel symbols and patching kernel ELF")
+ @$(MAKE) --no-print-directory -f kernel_symbols.mk
+
+##------------------------------------------------------------------------------
## Generate the stripped kernel binary
##------------------------------------------------------------------------------
-$(KERNEL_BIN): $(KERNEL_ELF_TTABLES)
+$(KERNEL_BIN): $(KERNEL_ELF_TTABLES_SYMS)
$(call color_header, "Generating stripped binary")
- @$(OBJCOPY_CMD) $(KERNEL_ELF_TTABLES) $(KERNEL_BIN)
+ @$(OBJCOPY_CMD) $(KERNEL_ELF_TTABLES_SYMS) $(KERNEL_BIN)
$(call color_progress_prefix, "Name")
@echo $(KERNEL_BIN)
$(call color_progress_prefix, "Size")
@@ -319,10 +343,19 @@
cd $(shell pwd)
TEST_ELF=$$(echo $$1 | sed -e 's/.*target/target/g')
+ TEST_ELF_SYMS="$${TEST_ELF}_syms"
TEST_BINARY=$$(echo $$1.img | sed -e 's/.*target/target/g')
$(DOCKER_TOOLS) $(EXEC_TT_TOOL) $(BSP) $$TEST_ELF > /dev/null
- $(OBJCOPY_CMD) $$TEST_ELF $$TEST_BINARY
+
+ # This overrides the two ENV variables. The other ENV variables that are required as input for
+ # the .mk file are set already because they are exported by this Makefile and this script is
+ # started by the same.
+ KERNEL_SYMBOLS_INPUT_ELF=$$TEST_ELF \
+ KERNEL_SYMBOLS_OUTPUT_ELF=$$TEST_ELF_SYMS \
+ $(MAKE) --no-print-directory -f kernel_symbols.mk
+
+ $(OBJCOPY_CMD) $$TEST_ELF_SYMS $$TEST_BINARY
$(DOCKER_TEST) $(EXEC_TEST_DISPATCH) $(EXEC_QEMU) $(QEMU_TEST_ARGS) -kernel $$TEST_BINARY
endef
diff -uNr 16_virtual_mem_part4_higher_half_kernel/tools/kernel_symbols_tool/cmds.rb 17_kernel_symbols/tools/kernel_symbols_tool/cmds.rb
--- 16_virtual_mem_part4_higher_half_kernel/tools/kernel_symbols_tool/cmds.rb
+++ 17_kernel_symbols/tools/kernel_symbols_tool/cmds.rb
@@ -0,0 +1,45 @@
+# frozen_string_literal: true
+
+# SPDX-License-Identifier: MIT OR Apache-2.0
+#
+# Copyright (c) 2022 Andre Richter <andre.o.richter@gmail.com>
+
+def generate_symbols(kernel_elf, output_file)
+ File.open(output_file, 'w') do |file|
+ header = <<~HEREDOC
+ use debug_symbol_types::Symbol;
+
+ # [no_mangle]
+ # [link_section = ".rodata.symbol_desc"]
+ static KERNEL_SYMBOLS: [Symbol; #{kernel_elf.num_symbols}] = [
+ HEREDOC
+
+ file.write(header)
+ kernel_elf.symbols.each do |sym|
+ value = sym.header.st_value
+ size = sym.header.st_size
+ name = sym.name
+
+ file.write(" Symbol::new(#{value}, #{size}, \"#{name}\"),\n")
+ end
+ file.write("];\n")
+ end
+end
+
+def get_symbols_section_virt_addr(kernel_elf)
+ kernel_elf.kernel_symbols_section_virt_addr
+end
+
+def patch_symbol_data(kernel_elf, symbols_blob_path)
+ symbols_blob = File.binread(symbols_blob_path)
+
+ raise if symbols_blob.size > kernel_elf.kernel_symbols_section_size
+
+ File.binwrite(kernel_elf.path, File.binread(symbols_blob_path),
+ kernel_elf.kernel_symbols_section_offset_in_file)
+end
+
+def patch_num_symbols(kernel_elf)
+ num_packed = [kernel_elf.num_symbols].pack('Q<*') # "Q" == uint64_t, "<" == little endian
+ File.binwrite(kernel_elf.path, num_packed, kernel_elf.num_kernel_symbols_offset_in_file)
+end
diff -uNr 16_virtual_mem_part4_higher_half_kernel/tools/kernel_symbols_tool/kernel_elf.rb 17_kernel_symbols/tools/kernel_symbols_tool/kernel_elf.rb
--- 16_virtual_mem_part4_higher_half_kernel/tools/kernel_symbols_tool/kernel_elf.rb
+++ 17_kernel_symbols/tools/kernel_symbols_tool/kernel_elf.rb
@@ -0,0 +1,74 @@
+# frozen_string_literal: true
+
+# SPDX-License-Identifier: MIT OR Apache-2.0
+#
+# Copyright (c) 2021-2022 Andre Richter <andre.o.richter@gmail.com>
+
+# KernelELF
+class KernelELF
+ attr_reader :path
+
+ def initialize(kernel_elf_path, kernel_symbols_section, num_kernel_symbols)
+ @elf = ELFTools::ELFFile.new(File.open(kernel_elf_path))
+ @symtab_section = @elf.section_by_name('.symtab')
+
+ @path = kernel_elf_path
+ fetch_values(kernel_symbols_section, num_kernel_symbols)
+ end
+
+ private
+
+ def fetch_values(kernel_symbols_section, num_kernel_symbols)
+ sym = @symtab_section.symbol_by_name(num_kernel_symbols)
+ raise "Symbol \"#{num_kernel_symbols}\" not found" if sym.nil?
+
+ @num_kernel_symbols = sym
+
+ section = @elf.section_by_name(kernel_symbols_section)
+ raise "Section \"#{kernel_symbols_section}\" not found" if section.nil?
+
+ @kernel_symbols_section = section
+ end
+
+ def num_kernel_symbols_virt_addr
+ @num_kernel_symbols.header.st_value
+ end
+
+ def segment_containing_virt_addr(virt_addr)
+ @elf.each_segments do |segment|
+ return segment if segment.vma_in?(virt_addr)
+ end
+ end
+
+ def virt_addr_to_file_offset(virt_addr)
+ segment = segment_containing_virt_addr(virt_addr)
+ segment.vma_to_offset(virt_addr)
+ end
+
+ public
+
+ def symbols
+ non_zero_symbols = @symtab_section.symbols.reject { |sym| sym.header.st_size.zero? }
+ non_zero_symbols.sort_by { |sym| sym.header.st_value }
+ end
+
+ def num_symbols
+ symbols.size
+ end
+
+ def kernel_symbols_section_virt_addr
+ @kernel_symbols_section.header.sh_addr.to_i
+ end
+
+ def kernel_symbols_section_size
+ @kernel_symbols_section.header.sh_size.to_i
+ end
+
+ def kernel_symbols_section_offset_in_file
+ virt_addr_to_file_offset(kernel_symbols_section_virt_addr)
+ end
+
+ def num_kernel_symbols_offset_in_file
+ virt_addr_to_file_offset(num_kernel_symbols_virt_addr)
+ end
+end
diff -uNr 16_virtual_mem_part4_higher_half_kernel/tools/kernel_symbols_tool/main.rb 17_kernel_symbols/tools/kernel_symbols_tool/main.rb
--- 16_virtual_mem_part4_higher_half_kernel/tools/kernel_symbols_tool/main.rb
+++ 17_kernel_symbols/tools/kernel_symbols_tool/main.rb
@@ -0,0 +1,47 @@
+#!/usr/bin/env ruby
+# frozen_string_literal: true
+
+# SPDX-License-Identifier: MIT OR Apache-2.0
+#
+# Copyright (c) 2022 Andre Richter <andre.o.richter@gmail.com>
+
+require 'rubygems'
+require 'bundler/setup'
+require 'colorize'
+require 'elftools'
+
+require_relative 'kernel_elf'
+require_relative 'cmds'
+
+KERNEL_SYMBOLS_SECTION = '.kernel_symbols'
+NUM_KERNEL_SYMBOLS = 'NUM_KERNEL_SYMBOLS'
+
+cmd = ARGV[0]
+
+kernel_elf_path = ARGV[1]
+kernel_elf = KernelELF.new(kernel_elf_path, KERNEL_SYMBOLS_SECTION, NUM_KERNEL_SYMBOLS)
+
+case cmd
+when '--gen_symbols'
+ output_file = ARGV[2]
+
+ print 'Generating'.rjust(12).green.bold
+ puts ' Symbols source file'
+
+ generate_symbols(kernel_elf, output_file)
+when '--get_symbols_section_virt_addr'
+ addr = get_symbols_section_virt_addr(kernel_elf)
+
+ puts "0x#{addr.to_s(16)}"
+when '--patch_data'
+ symbols_blob_path = ARGV[2]
+ num_symbols = kernel_elf.num_symbols
+
+ print 'Patching'.rjust(12).green.bold
+ puts " Symbols blob and number of symbols (#{num_symbols}) into ELF"
+
+ patch_symbol_data(kernel_elf, symbols_blob_path)
+ patch_num_symbols(kernel_elf)
+else
+ raise
+end
```

@ -0,0 +1,58 @@
[package]
name = "mingo"
version = "0.17.0"
authors = ["Andre Richter <andre.o.richter@gmail.com>"]
edition = "2021"
[features]
default = []
bsp_rpi3 = ["tock-registers"]
bsp_rpi4 = ["tock-registers"]
test_build = ["qemu-exit"]
##--------------------------------------------------------------------------------------------------
## Dependencies
##--------------------------------------------------------------------------------------------------
[dependencies]
test-types = { path = "../libraries/test-types" }
debug-symbol-types = { path = "../libraries/debug-symbol-types" }
# Optional dependencies
tock-registers = { version = "0.7.x", default-features = false, features = ["register_types"], optional = true }
qemu-exit = { version = "3.x.x", optional = true }
# Platform specific dependencies
[target.'cfg(target_arch = "aarch64")'.dependencies]
cortex-a = { version = "7.x.x" }
##--------------------------------------------------------------------------------------------------
## Testing
##--------------------------------------------------------------------------------------------------
[dev-dependencies]
test-macros = { path = "../libraries/test-macros" }
# Unit tests are done in the library part of the kernel.
[lib]
name = "libkernel"
test = true
# Disable unit tests for the kernel binary.
[[bin]]
name = "kernel"
path = "src/main.rs"
test = false
# List of tests without harness.
[[test]]
name = "00_console_sanity"
harness = false
[[test]]
name = "02_exception_sync_page_fault"
harness = false
[[test]]
name = "03_exception_restore_sanity"
harness = false

@ -0,0 +1,20 @@
use std::{env, fs, process};
fn main() {
let ld_script_path = match env::var("LD_SCRIPT_PATH") {
Ok(var) => var,
_ => process::exit(0),
};
let files = fs::read_dir(ld_script_path).unwrap();
files
.filter_map(Result::ok)
.filter(|d| {
if let Some(e) = d.path().extension() {
e == "ld"
} else {
false
}
})
.for_each(|f| println!("cargo:rerun-if-changed={}", f.path().display()));
}

@ -0,0 +1,49 @@
// SPDX-License-Identifier: MIT OR Apache-2.0
//
// Copyright (c) 2018-2022 Andre Richter <andre.o.richter@gmail.com>
//! Architectural processor code.
//!
//! # Orientation
//!
//! Since arch modules are imported into generic modules using the path attribute, the path of this
//! file is:
//!
//! crate::cpu::arch_cpu
use cortex_a::asm;
//--------------------------------------------------------------------------------------------------
// Public Code
//--------------------------------------------------------------------------------------------------
pub use asm::nop;
/// Pause execution on the core.
#[inline(always)]
pub fn wait_forever() -> ! {
loop {
asm::wfe()
}
}
//--------------------------------------------------------------------------------------------------
// Testing
//--------------------------------------------------------------------------------------------------
#[cfg(feature = "test_build")]
use qemu_exit::QEMUExit;
#[cfg(feature = "test_build")]
const QEMU_EXIT_HANDLE: qemu_exit::AArch64 = qemu_exit::AArch64::new();
/// Make the host QEMU binary execute `exit(1)`.
#[cfg(feature = "test_build")]
pub fn qemu_exit_failure() -> ! {
QEMU_EXIT_HANDLE.exit_failure()
}
/// Make the host QEMU binary execute `exit(0)`.
#[cfg(feature = "test_build")]
pub fn qemu_exit_success() -> ! {
QEMU_EXIT_HANDLE.exit_success()
}

@ -0,0 +1,95 @@
// SPDX-License-Identifier: MIT OR Apache-2.0
//
// Copyright (c) 2021-2022 Andre Richter <andre.o.richter@gmail.com>
//! Architectural boot code.
//!
//! # Orientation
//!
//! Since arch modules are imported into generic modules using the path attribute, the path of this
//! file is:
//!
//! crate::cpu::boot::arch_boot
use crate::{memory, memory::Address};
use core::arch::global_asm;
use cortex_a::{asm, registers::*};
use tock_registers::interfaces::Writeable;
// Assembly counterpart to this file.
global_asm!(include_str!("boot.s"));
//--------------------------------------------------------------------------------------------------
// Private Code
//--------------------------------------------------------------------------------------------------
/// Prepares the transition from EL2 to EL1.
///
/// # Safety
///
/// - The `bss` section is not initialized yet. The code must not use or reference it in any way.
/// - The HW state of EL1 must be prepared in a sound way.
#[inline(always)]
unsafe fn prepare_el2_to_el1_transition(
virt_boot_core_stack_end_exclusive_addr: u64,
virt_kernel_init_addr: u64,
) {
// Enable timer counter registers for EL1.
CNTHCTL_EL2.write(CNTHCTL_EL2::EL1PCEN::SET + CNTHCTL_EL2::EL1PCTEN::SET);
// No offset for reading the counters.
CNTVOFF_EL2.set(0);
// Set EL1 execution state to AArch64.
HCR_EL2.write(HCR_EL2::RW::EL1IsAarch64);
// Set up a simulated exception return.
//
// First, fake a saved program status where all interrupts were masked and SP_EL1 was used as a
// stack pointer.
SPSR_EL2.write(
SPSR_EL2::D::Masked
+ SPSR_EL2::A::Masked
+ SPSR_EL2::I::Masked
+ SPSR_EL2::F::Masked
+ SPSR_EL2::M::EL1h,
);
// Second, let the link register point to kernel_init().
ELR_EL2.set(virt_kernel_init_addr);
// Set up SP_EL1 (stack pointer), which will be used by EL1 once we "return" to it. Since there
// are no plans to ever return to EL2, just re-use the same stack.
SP_EL1.set(virt_boot_core_stack_end_exclusive_addr);
}
//--------------------------------------------------------------------------------------------------
// Public Code
//--------------------------------------------------------------------------------------------------
/// The Rust entry of the `kernel` binary.
///
/// The function is called from the assembly `_start` function.
///
/// # Safety
///
/// - Exception return from EL2 must must continue execution in EL1 with `kernel_init()`.
#[no_mangle]
pub unsafe extern "C" fn _start_rust(
phys_kernel_tables_base_addr: u64,
virt_boot_core_stack_end_exclusive_addr: u64,
virt_kernel_init_addr: u64,
) -> ! {
prepare_el2_to_el1_transition(
virt_boot_core_stack_end_exclusive_addr,
virt_kernel_init_addr,
);
// Turn on the MMU for EL1.
let addr = Address::new(phys_kernel_tables_base_addr as usize);
memory::mmu::enable_mmu_and_caching(addr).unwrap();
// Use `eret` to "return" to EL1. Since virtual memory will already be enabled, this results in
// execution of kernel_init() in EL1 from its _virtual address_.
asm::eret()
}

@ -0,0 +1,100 @@
// SPDX-License-Identifier: MIT OR Apache-2.0
//
// Copyright (c) 2021-2022 Andre Richter <andre.o.richter@gmail.com>
//--------------------------------------------------------------------------------------------------
// Definitions
//--------------------------------------------------------------------------------------------------
// Load the address of a symbol into a register, PC-relative.
//
// The symbol must lie within +/- 4 GiB of the Program Counter.
//
// # Resources
//
// - https://sourceware.org/binutils/docs-2.36/as/AArch64_002dRelocations.html
.macro ADR_REL register, symbol
adrp \register, \symbol
add \register, \register, #:lo12:\symbol
.endm
// Load the address of a symbol into a register, absolute.
//
// # Resources
//
// - https://sourceware.org/binutils/docs-2.36/as/AArch64_002dRelocations.html
.macro ADR_ABS register, symbol
movz \register, #:abs_g3:\symbol
movk \register, #:abs_g2_nc:\symbol
movk \register, #:abs_g1_nc:\symbol
movk \register, #:abs_g0_nc:\symbol
.endm
.equ _EL2, 0x8
.equ _core_id_mask, 0b11
//--------------------------------------------------------------------------------------------------
// Public Code
//--------------------------------------------------------------------------------------------------
.section .text._start
//------------------------------------------------------------------------------
// fn _start()
//------------------------------------------------------------------------------
_start:
// Only proceed if the core executes in EL2. Park it otherwise.
mrs x0, CurrentEL
cmp x0, _EL2
b.ne .L_parking_loop
// Only proceed on the boot core. Park it otherwise.
mrs x1, MPIDR_EL1
and x1, x1, _core_id_mask
ldr x2, BOOT_CORE_ID // provided by bsp/__board_name__/cpu.rs
cmp x1, x2
b.ne .L_parking_loop
// If execution reaches here, it is the boot core.
// Initialize DRAM.
ADR_REL x0, __bss_start
ADR_REL x1, __bss_end_exclusive
.L_bss_init_loop:
cmp x0, x1
b.eq .L_prepare_rust
stp xzr, xzr, [x0], #16
b .L_bss_init_loop
// Prepare the jump to Rust code.
.L_prepare_rust:
// Load the base address of the kernel's translation tables.
ldr x0, PHYS_KERNEL_TABLES_BASE_ADDR // provided by bsp/__board_name__/memory/mmu.rs
// Load the _absolute_ addresses of the following symbols. Since the kernel is linked at
// the top of the 64 bit address space, these are effectively virtual addresses.
ADR_ABS x1, __boot_core_stack_end_exclusive
ADR_ABS x2, kernel_init
// Load the PC-relative address of the stack and set the stack pointer.
//
// Since _start() is the first function that runs after the firmware has loaded the kernel
// into memory, retrieving this symbol PC-relative returns the "physical" address.
//
// Setting the stack pointer to this value ensures that anything that still runs in EL2,
// until the kernel returns to EL1 with the MMU enabled, works as well. After the return to
// EL1, the virtual address of the stack retrieved above will be used.
ADR_REL x4, __boot_core_stack_end_exclusive
mov sp, x4
// Jump to Rust code. x0, x1 and x2 hold the function arguments provided to _start_rust().
b _start_rust
// Infinitely wait for events (aka "park the core").
.L_parking_loop:
wfe
b .L_parking_loop
.size _start, . - _start
.type _start, function
.global _start

@ -0,0 +1,30 @@
// SPDX-License-Identifier: MIT OR Apache-2.0
//
// Copyright (c) 2018-2022 Andre Richter <andre.o.richter@gmail.com>
//! Architectural symmetric multiprocessing.
//!
//! # Orientation
//!
//! Since arch modules are imported into generic modules using the path attribute, the path of this
//! file is:
//!
//! crate::cpu::smp::arch_smp
use cortex_a::registers::*;
use tock_registers::interfaces::Readable;
//--------------------------------------------------------------------------------------------------
// Public Code
//--------------------------------------------------------------------------------------------------
/// Return the executing core's id.
#[inline(always)]
pub fn core_id<T>() -> T
where
T: From<u8>,
{
const CORE_MASK: u64 = 0b11;
T::from((MPIDR_EL1.get() & CORE_MASK) as u8)
}

@ -0,0 +1,321 @@
// SPDX-License-Identifier: MIT OR Apache-2.0
//
// Copyright (c) 2018-2022 Andre Richter <andre.o.richter@gmail.com>
//! Architectural synchronous and asynchronous exception handling.
//!
//! # Orientation
//!
//! Since arch modules are imported into generic modules using the path attribute, the path of this
//! file is:
//!
//! crate::exception::arch_exception
use crate::{bsp, exception, memory, symbols};
use core::{arch::global_asm, cell::UnsafeCell, fmt};
use cortex_a::{asm::barrier, registers::*};
use tock_registers::{
interfaces::{Readable, Writeable},
registers::InMemoryRegister,
};
// Assembly counterpart to this file.
global_asm!(include_str!("exception.s"));
//--------------------------------------------------------------------------------------------------
// Private Definitions
//--------------------------------------------------------------------------------------------------
/// Wrapper structs for memory copies of registers.
#[repr(transparent)]
struct SpsrEL1(InMemoryRegister<u64, SPSR_EL1::Register>);
struct EsrEL1(InMemoryRegister<u64, ESR_EL1::Register>);
/// The exception context as it is stored on the stack on exception entry.
#[repr(C)]
struct ExceptionContext {
/// General Purpose Registers.
gpr: [u64; 30],
/// The link register, aka x30.
lr: u64,
/// Exception link register. The program counter at the time the exception happened.
elr_el1: u64,
/// Saved program status.
spsr_el1: SpsrEL1,
// Exception syndrome register.
esr_el1: EsrEL1,
}
//--------------------------------------------------------------------------------------------------
// Private Code
//--------------------------------------------------------------------------------------------------
/// Prints verbose information about the exception and then panics.
fn default_exception_handler(exc: &ExceptionContext) {
panic!(
"CPU Exception!\n\n\
{}",
exc
);
}
//------------------------------------------------------------------------------
// Current, EL0
//------------------------------------------------------------------------------
#[no_mangle]
unsafe extern "C" fn current_el0_synchronous(_e: &mut ExceptionContext) {
panic!("Should not be here. Use of SP_EL0 in EL1 is not supported.")
}
#[no_mangle]
unsafe extern "C" fn current_el0_irq(_e: &mut ExceptionContext) {
panic!("Should not be here. Use of SP_EL0 in EL1 is not supported.")
}
#[no_mangle]
unsafe extern "C" fn current_el0_serror(_e: &mut ExceptionContext) {
panic!("Should not be here. Use of SP_EL0 in EL1 is not supported.")
}
//------------------------------------------------------------------------------
// Current, ELx
//------------------------------------------------------------------------------
#[no_mangle]
unsafe extern "C" fn current_elx_synchronous(e: &mut ExceptionContext) {
#[cfg(feature = "test_build")]
{
const TEST_SVC_ID: u64 = 0x1337;
if let Some(ESR_EL1::EC::Value::SVC64) = e.esr_el1.exception_class() {
if e.esr_el1.iss() == TEST_SVC_ID {
return;
}
}
}
default_exception_handler(e);
}
#[no_mangle]
unsafe extern "C" fn current_elx_irq(_e: &mut ExceptionContext) {
use exception::asynchronous::interface::IRQManager;
let token = &exception::asynchronous::IRQContext::new();
bsp::exception::asynchronous::irq_manager().handle_pending_irqs(token);
}
#[no_mangle]
unsafe extern "C" fn current_elx_serror(e: &mut ExceptionContext) {
default_exception_handler(e);
}
//------------------------------------------------------------------------------
// Lower, AArch64
//------------------------------------------------------------------------------
#[no_mangle]
unsafe extern "C" fn lower_aarch64_synchronous(e: &mut ExceptionContext) {
default_exception_handler(e);
}
#[no_mangle]
unsafe extern "C" fn lower_aarch64_irq(e: &mut ExceptionContext) {
default_exception_handler(e);
}
#[no_mangle]
unsafe extern "C" fn lower_aarch64_serror(e: &mut ExceptionContext) {
default_exception_handler(e);
}
//------------------------------------------------------------------------------
// Lower, AArch32
//------------------------------------------------------------------------------
#[no_mangle]
unsafe extern "C" fn lower_aarch32_synchronous(e: &mut ExceptionContext) {
default_exception_handler(e);
}
#[no_mangle]
unsafe extern "C" fn lower_aarch32_irq(e: &mut ExceptionContext) {
default_exception_handler(e);
}
#[no_mangle]
unsafe extern "C" fn lower_aarch32_serror(e: &mut ExceptionContext) {
default_exception_handler(e);
}
//------------------------------------------------------------------------------
// Misc
//------------------------------------------------------------------------------
/// Human readable SPSR_EL1.
#[rustfmt::skip]
impl fmt::Display for SpsrEL1 {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
// Raw value.
writeln!(f, "SPSR_EL1: {:#010x}", self.0.get())?;
let to_flag_str = |x| -> _ {
if x { "Set" } else { "Not set" }
};
writeln!(f, " Flags:")?;
writeln!(f, " Negative (N): {}", to_flag_str(self.0.is_set(SPSR_EL1::N)))?;
writeln!(f, " Zero (Z): {}", to_flag_str(self.0.is_set(SPSR_EL1::Z)))?;
writeln!(f, " Carry (C): {}", to_flag_str(self.0.is_set(SPSR_EL1::C)))?;
writeln!(f, " Overflow (V): {}", to_flag_str(self.0.is_set(SPSR_EL1::V)))?;
let to_mask_str = |x| -> _ {
if x { "Masked" } else { "Unmasked" }
};
writeln!(f, " Exception handling state:")?;
writeln!(f, " Debug (D): {}", to_mask_str(self.0.is_set(SPSR_EL1::D)))?;
writeln!(f, " SError (A): {}", to_mask_str(self.0.is_set(SPSR_EL1::A)))?;
writeln!(f, " IRQ (I): {}", to_mask_str(self.0.is_set(SPSR_EL1::I)))?;
writeln!(f, " FIQ (F): {}", to_mask_str(self.0.is_set(SPSR_EL1::F)))?;
write!(f, " Illegal Execution State (IL): {}",
to_flag_str(self.0.is_set(SPSR_EL1::IL))
)
}
}
impl EsrEL1 {
#[inline(always)]
fn exception_class(&self) -> Option<ESR_EL1::EC::Value> {
self.0.read_as_enum(ESR_EL1::EC)
}
#[cfg(feature = "test_build")]
#[inline(always)]
fn iss(&self) -> u64 {
self.0.read(ESR_EL1::ISS)
}
}
/// Human readable ESR_EL1.
#[rustfmt::skip]
impl fmt::Display for EsrEL1 {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
// Raw print of whole register.
writeln!(f, "ESR_EL1: {:#010x}", self.0.get())?;
// Raw print of exception class.
write!(f, " Exception Class (EC) : {:#x}", self.0.read(ESR_EL1::EC))?;
// Exception class.
let ec_translation = match self.exception_class() {
Some(ESR_EL1::EC::Value::DataAbortCurrentEL) => "Data Abort, current EL",
_ => "N/A",
};
writeln!(f, " - {}", ec_translation)?;
// Raw print of instruction specific syndrome.
write!(f, " Instr Specific Syndrome (ISS): {:#x}", self.0.read(ESR_EL1::ISS))
}
}
impl ExceptionContext {
#[inline(always)]
fn exception_class(&self) -> Option<ESR_EL1::EC::Value> {
self.esr_el1.exception_class()
}
#[inline(always)]
fn fault_address_valid(&self) -> bool {
use ESR_EL1::EC::Value::*;
match self.exception_class() {
None => false,
Some(ec) => matches!(
ec,
InstrAbortLowerEL
| InstrAbortCurrentEL
| PCAlignmentFault
| DataAbortLowerEL
| DataAbortCurrentEL
| WatchpointLowerEL
| WatchpointCurrentEL
),
}
}
}
/// Human readable print of the exception context.
impl fmt::Display for ExceptionContext {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
writeln!(f, "{}", self.esr_el1)?;
if self.fault_address_valid() {
writeln!(f, "FAR_EL1: {:#018x}", FAR_EL1.get() as usize)?;
}
writeln!(f, "{}", self.spsr_el1)?;
writeln!(f, "ELR_EL1: {:#018x}", self.elr_el1)?;
writeln!(
f,
" Symbol: {}",
symbols::lookup_symbol(memory::Address::new(self.elr_el1 as usize))
.unwrap_or("Symbol not found")
)?;
writeln!(f)?;
writeln!(f, "General purpose register:")?;
#[rustfmt::skip]
let alternating = |x| -> _ {
if x % 2 == 0 { " " } else { "\n" }
};
// Print two registers per line.
for (i, reg) in self.gpr.iter().enumerate() {
write!(f, " x{: <2}: {: >#018x}{}", i, reg, alternating(i))?;
}
write!(f, " lr : {:#018x}", self.lr)
}
}
//--------------------------------------------------------------------------------------------------
// Public Code
//--------------------------------------------------------------------------------------------------
use crate::exception::PrivilegeLevel;
/// The processing element's current privilege level.
pub fn current_privilege_level() -> (PrivilegeLevel, &'static str) {
let el = CurrentEL.read_as_enum(CurrentEL::EL);
match el {
Some(CurrentEL::EL::Value::EL2) => (PrivilegeLevel::Hypervisor, "EL2"),
Some(CurrentEL::EL::Value::EL1) => (PrivilegeLevel::Kernel, "EL1"),
Some(CurrentEL::EL::Value::EL0) => (PrivilegeLevel::User, "EL0"),
_ => (PrivilegeLevel::Unknown, "Unknown"),
}
}
/// Init exception handling by setting the exception vector base address register.
///
/// # Safety
///
/// - Changes the HW state of the executing core.
/// - The vector table and the symbol `__exception_vector_table_start` from the linker script must
/// adhere to the alignment and size constraints demanded by the ARMv8-A Architecture Reference
/// Manual.
pub unsafe fn handling_init() {
// Provided by exception.S.
extern "Rust" {
static __exception_vector_start: UnsafeCell<()>;
}
VBAR_EL1.set(__exception_vector_start.get() as u64);
// Force VBAR update to complete before next instruction.
barrier::isb(barrier::SY);
}

@ -0,0 +1,150 @@
// SPDX-License-Identifier: MIT OR Apache-2.0
//
// Copyright (c) 2018-2022 Andre Richter <andre.o.richter@gmail.com>
//--------------------------------------------------------------------------------------------------
// Definitions
//--------------------------------------------------------------------------------------------------
/// Call the function provided by parameter `\handler` after saving the exception context. Provide
/// the context as the first parameter to '\handler'.
.macro CALL_WITH_CONTEXT handler
// Make room on the stack for the exception context.
sub sp, sp, #16 * 17
// Store all general purpose registers on the stack.
stp x0, x1, [sp, #16 * 0]
stp x2, x3, [sp, #16 * 1]
stp x4, x5, [sp, #16 * 2]
stp x6, x7, [sp, #16 * 3]
stp x8, x9, [sp, #16 * 4]
stp x10, x11, [sp, #16 * 5]
stp x12, x13, [sp, #16 * 6]
stp x14, x15, [sp, #16 * 7]
stp x16, x17, [sp, #16 * 8]
stp x18, x19, [sp, #16 * 9]
stp x20, x21, [sp, #16 * 10]
stp x22, x23, [sp, #16 * 11]
stp x24, x25, [sp, #16 * 12]
stp x26, x27, [sp, #16 * 13]
stp x28, x29, [sp, #16 * 14]
// Add the exception link register (ELR_EL1), saved program status (SPSR_EL1) and exception
// syndrome register (ESR_EL1).
mrs x1, ELR_EL1
mrs x2, SPSR_EL1
mrs x3, ESR_EL1
stp lr, x1, [sp, #16 * 15]
stp x2, x3, [sp, #16 * 16]
// x0 is the first argument for the function called through `\handler`.
mov x0, sp
// Call `\handler`.
bl \handler
// After returning from exception handling code, replay the saved context and return via
// `eret`.
b __exception_restore_context
.endm
.macro FIQ_SUSPEND
1: wfe
b 1b
.endm
//--------------------------------------------------------------------------------------------------
// Private Code
//--------------------------------------------------------------------------------------------------
.section .text
//------------------------------------------------------------------------------
// The exception vector table.
//------------------------------------------------------------------------------
// Align by 2^11 bytes, as demanded by ARMv8-A. Same as ALIGN(2048) in an ld script.
.align 11
// Export a symbol for the Rust code to use.
__exception_vector_start:
// Current exception level with SP_EL0.
//
// .org sets the offset relative to section start.
//
// # Safety
//
// - It must be ensured that `CALL_WITH_CONTEXT` <= 0x80 bytes.
.org 0x000
CALL_WITH_CONTEXT current_el0_synchronous
.org 0x080
CALL_WITH_CONTEXT current_el0_irq
.org 0x100
FIQ_SUSPEND
.org 0x180
CALL_WITH_CONTEXT current_el0_serror
// Current exception level with SP_ELx, x > 0.
.org 0x200
CALL_WITH_CONTEXT current_elx_synchronous
.org 0x280
CALL_WITH_CONTEXT current_elx_irq
.org 0x300
FIQ_SUSPEND
.org 0x380
CALL_WITH_CONTEXT current_elx_serror
// Lower exception level, AArch64
.org 0x400
CALL_WITH_CONTEXT lower_aarch64_synchronous
.org 0x480
CALL_WITH_CONTEXT lower_aarch64_irq
.org 0x500
FIQ_SUSPEND
.org 0x580
CALL_WITH_CONTEXT lower_aarch64_serror
// Lower exception level, AArch32
.org 0x600
CALL_WITH_CONTEXT lower_aarch32_synchronous
.org 0x680
CALL_WITH_CONTEXT lower_aarch32_irq
.org 0x700
FIQ_SUSPEND
.org 0x780
CALL_WITH_CONTEXT lower_aarch32_serror
.org 0x800
//------------------------------------------------------------------------------
// fn __exception_restore_context()
//------------------------------------------------------------------------------
__exception_restore_context:
ldr w19, [sp, #16 * 16]
ldp lr, x20, [sp, #16 * 15]
msr SPSR_EL1, x19
msr ELR_EL1, x20
ldp x0, x1, [sp, #16 * 0]
ldp x2, x3, [sp, #16 * 1]
ldp x4, x5, [sp, #16 * 2]
ldp x6, x7, [sp, #16 * 3]
ldp x8, x9, [sp, #16 * 4]
ldp x10, x11, [sp, #16 * 5]
ldp x12, x13, [sp, #16 * 6]
ldp x14, x15, [sp, #16 * 7]
ldp x16, x17, [sp, #16 * 8]
ldp x18, x19, [sp, #16 * 9]
ldp x20, x21, [sp, #16 * 10]
ldp x22, x23, [sp, #16 * 11]
ldp x24, x25, [sp, #16 * 12]
ldp x26, x27, [sp, #16 * 13]
ldp x28, x29, [sp, #16 * 14]
add sp, sp, #16 * 17
eret
.size __exception_restore_context, . - __exception_restore_context
.type __exception_restore_context, function

@ -0,0 +1,152 @@
// SPDX-License-Identifier: MIT OR Apache-2.0
//
// Copyright (c) 2018-2022 Andre Richter <andre.o.richter@gmail.com>
//! Architectural asynchronous exception handling.
//!
//! # Orientation
//!
//! Since arch modules are imported into generic modules using the path attribute, the path of this
//! file is:
//!
//! crate::exception::asynchronous::arch_asynchronous
use core::arch::asm;
use cortex_a::registers::*;
use tock_registers::interfaces::{Readable, Writeable};
//--------------------------------------------------------------------------------------------------
// Private Definitions
//--------------------------------------------------------------------------------------------------
mod daif_bits {
pub const IRQ: u8 = 0b0010;
}
trait DaifField {
fn daif_field() -> tock_registers::fields::Field<u64, DAIF::Register>;
}
struct Debug;
struct SError;
struct IRQ;
struct FIQ;
//--------------------------------------------------------------------------------------------------
// Private Code
//--------------------------------------------------------------------------------------------------
impl DaifField for Debug {
fn daif_field() -> tock_registers::fields::Field<u64, DAIF::Register> {
DAIF::D
}
}
impl DaifField for SError {
fn daif_field() -> tock_registers::fields::Field<u64, DAIF::Register> {
DAIF::A
}
}
impl DaifField for IRQ {
fn daif_field() -> tock_registers::fields::Field<u64, DAIF::Register> {
DAIF::I
}
}
impl DaifField for FIQ {
fn daif_field() -> tock_registers::fields::Field<u64, DAIF::Register> {
DAIF::F
}
}
fn is_masked<T>() -> bool
where
T: DaifField,
{
DAIF.is_set(T::daif_field())
}
//--------------------------------------------------------------------------------------------------
// Public Code
//--------------------------------------------------------------------------------------------------
/// Returns whether IRQs are masked on the executing core.
pub fn is_local_irq_masked() -> bool {
!is_masked::<IRQ>()
}
/// Unmask IRQs on the executing core.
///
/// It is not needed to place an explicit instruction synchronization barrier after the `msr`.
/// Quoting the Architecture Reference Manual for ARMv8-A, section C5.1.3:
///
/// "Writes to PSTATE.{PAN, D, A, I, F} occur in program order without the need for additional
/// synchronization."
///
/// # Safety
///
/// - Changes the HW state of the executing core.
#[inline(always)]
pub unsafe fn local_irq_unmask() {
#[rustfmt::skip]
asm!(
"msr DAIFClr, {arg}",
arg = const daif_bits::IRQ,
options(nomem, nostack, preserves_flags)
);
}
/// Mask IRQs on the executing core.
///
/// # Safety
///
/// - Changes the HW state of the executing core.
#[inline(always)]
pub unsafe fn local_irq_mask() {
#[rustfmt::skip]
asm!(
"msr DAIFSet, {arg}",
arg = const daif_bits::IRQ,
options(nomem, nostack, preserves_flags)
);
}
/// Mask IRQs on the executing core and return the previously saved interrupt mask bits (DAIF).
///
/// # Safety
///
/// - Changes the HW state of the executing core.
#[inline(always)]
pub unsafe fn local_irq_mask_save() -> u64 {
let saved = DAIF.get();
local_irq_mask();
saved
}
/// Restore the interrupt mask bits (DAIF) using the callee's argument.
///
/// # Safety
///
/// - Changes the HW state of the executing core.
/// - No sanity checks on the input.
#[inline(always)]
pub unsafe fn local_irq_restore(saved: u64) {
DAIF.set(saved);
}
/// Print the AArch64 exceptions status.
#[rustfmt::skip]
pub fn print_state() {
use crate::info;
let to_mask_str = |x| -> _ {
if x { "Masked" } else { "Unmasked" }
};
info!(" Debug: {}", to_mask_str(is_masked::<Debug>()));
info!(" SError: {}", to_mask_str(is_masked::<SError>()));
info!(" IRQ: {}", to_mask_str(is_masked::<IRQ>()));
info!(" FIQ: {}", to_mask_str(is_masked::<FIQ>()));
}

@ -0,0 +1,158 @@
// SPDX-License-Identifier: MIT OR Apache-2.0
//
// Copyright (c) 2018-2022 Andre Richter <andre.o.richter@gmail.com>
//! Memory Management Unit Driver.
//!
//! Only 64 KiB granule is supported.
//!
//! # Orientation
//!
//! Since arch modules are imported into generic modules using the path attribute, the path of this
//! file is:
//!
//! crate::memory::mmu::arch_mmu
use crate::{
bsp, memory,
memory::{mmu::TranslationGranule, Address, Physical},
};
use core::intrinsics::unlikely;
use cortex_a::{asm::barrier, registers::*};
use tock_registers::interfaces::{ReadWriteable, Readable, Writeable};
//--------------------------------------------------------------------------------------------------
// Private Definitions
//--------------------------------------------------------------------------------------------------
/// Memory Management Unit type.
struct MemoryManagementUnit;
//--------------------------------------------------------------------------------------------------
// Public Definitions
//--------------------------------------------------------------------------------------------------
pub type Granule512MiB = TranslationGranule<{ 512 * 1024 * 1024 }>;
pub type Granule64KiB = TranslationGranule<{ 64 * 1024 }>;
/// Constants for indexing the MAIR_EL1.
#[allow(dead_code)]
pub mod mair {
pub const DEVICE: u64 = 0;
pub const NORMAL: u64 = 1;
}
//--------------------------------------------------------------------------------------------------
// Global instances
//--------------------------------------------------------------------------------------------------
static MMU: MemoryManagementUnit = MemoryManagementUnit;
//--------------------------------------------------------------------------------------------------
// Private Code
//--------------------------------------------------------------------------------------------------
impl<const AS_SIZE: usize> memory::mmu::AddressSpace<AS_SIZE> {
/// Checks for architectural restrictions.
pub const fn arch_address_space_size_sanity_checks() {
// Size must be at least one full 512 MiB table.
assert!((AS_SIZE % Granule512MiB::SIZE) == 0);
// Check for 48 bit virtual address size as maximum, which is supported by any ARMv8
// version.
assert!(AS_SIZE <= (1 << 48));
}
}
impl MemoryManagementUnit {
/// Setup function for the MAIR_EL1 register.
#[inline(always)]
fn set_up_mair(&self) {
// Define the memory types being mapped.
MAIR_EL1.write(
// Attribute 1 - Cacheable normal DRAM.
MAIR_EL1::Attr1_Normal_Outer::WriteBack_NonTransient_ReadWriteAlloc +
MAIR_EL1::Attr1_Normal_Inner::WriteBack_NonTransient_ReadWriteAlloc +
// Attribute 0 - Device.
MAIR_EL1::Attr0_Device::nonGathering_nonReordering_EarlyWriteAck,
);
}
/// Configure various settings of stage 1 of the EL1 translation regime.
#[inline(always)]
fn configure_translation_control(&self) {
let t1sz = (64 - bsp::memory::mmu::KernelVirtAddrSpace::SIZE_SHIFT) as u64;
TCR_EL1.write(
TCR_EL1::TBI1::Used
+ TCR_EL1::IPS::Bits_40
+ TCR_EL1::TG1::KiB_64
+ TCR_EL1::SH1::Inner
+ TCR_EL1::ORGN1::WriteBack_ReadAlloc_WriteAlloc_Cacheable
+ TCR_EL1::IRGN1::WriteBack_ReadAlloc_WriteAlloc_Cacheable
+ TCR_EL1::EPD1::EnableTTBR1Walks
+ TCR_EL1::A1::TTBR1
+ TCR_EL1::T1SZ.val(t1sz)
+ TCR_EL1::EPD0::DisableTTBR0Walks,
);
}
}
//--------------------------------------------------------------------------------------------------
// Public Code
//--------------------------------------------------------------------------------------------------
/// Return a reference to the MMU instance.
pub fn mmu() -> &'static impl memory::mmu::interface::MMU {
&MMU
}
//------------------------------------------------------------------------------
// OS Interface Code
//------------------------------------------------------------------------------
use memory::mmu::MMUEnableError;
impl memory::mmu::interface::MMU for MemoryManagementUnit {
unsafe fn enable_mmu_and_caching(
&self,
phys_tables_base_addr: Address<Physical>,
) -> Result<(), MMUEnableError> {
if unlikely(self.is_enabled()) {
return Err(MMUEnableError::AlreadyEnabled);
}
// Fail early if translation granule is not supported.
if unlikely(!ID_AA64MMFR0_EL1.matches_all(ID_AA64MMFR0_EL1::TGran64::Supported)) {
return Err(MMUEnableError::Other(
"Translation granule not supported in HW",
));
}
// Prepare the memory attribute indirection register.
self.set_up_mair();
// Set the "Translation Table Base Register".
TTBR1_EL1.set_baddr(phys_tables_base_addr.as_usize() as u64);
self.configure_translation_control();
// Switch the MMU on.
//
// First, force all previous changes to be seen before the MMU is enabled.
barrier::isb(barrier::SY);
// Enable the MMU and turn on data and instruction caching.
SCTLR_EL1.modify(SCTLR_EL1::M::Enable + SCTLR_EL1::C::Cacheable + SCTLR_EL1::I::Cacheable);
// Force MMU init to complete before next instruction.
barrier::isb(barrier::SY);
Ok(())
}
#[inline(always)]
fn is_enabled(&self) -> bool {
SCTLR_EL1.matches_all(SCTLR_EL1::M::Enable)
}
}

@ -0,0 +1,521 @@
// SPDX-License-Identifier: MIT OR Apache-2.0
//
// Copyright (c) 2021-2022 Andre Richter <andre.o.richter@gmail.com>
//! Architectural translation table.
//!
//! Only 64 KiB granule is supported.
//!
//! # Orientation
//!
//! Since arch modules are imported into generic modules using the path attribute, the path of this
//! file is:
//!
//! crate::memory::mmu::translation_table::arch_translation_table
use crate::{
bsp,
memory::{
self,
mmu::{
arch_mmu::{Granule512MiB, Granule64KiB},
AccessPermissions, AttributeFields, MemAttributes, MemoryRegion, PageAddress,
},
Address, Physical, Virtual,
},
};
use core::convert;
use tock_registers::{
interfaces::{Readable, Writeable},
register_bitfields,
registers::InMemoryRegister,
};
//--------------------------------------------------------------------------------------------------
// Private Definitions
//--------------------------------------------------------------------------------------------------
// A table descriptor, as per ARMv8-A Architecture Reference Manual Figure D5-15.
register_bitfields! {u64,
STAGE1_TABLE_DESCRIPTOR [
/// Physical address of the next descriptor.
NEXT_LEVEL_TABLE_ADDR_64KiB OFFSET(16) NUMBITS(32) [], // [47:16]
TYPE OFFSET(1) NUMBITS(1) [
Block = 0,
Table = 1
],
VALID OFFSET(0) NUMBITS(1) [
False = 0,
True = 1
]
]
}
// A level 3 page descriptor, as per ARMv8-A Architecture Reference Manual Figure D5-17.
register_bitfields! {u64,
STAGE1_PAGE_DESCRIPTOR [
/// Unprivileged execute-never.
UXN OFFSET(54) NUMBITS(1) [
False = 0,
True = 1
],
/// Privileged execute-never.
PXN OFFSET(53) NUMBITS(1) [
False = 0,
True = 1
],
/// Physical address of the next table descriptor (lvl2) or the page descriptor (lvl3).
OUTPUT_ADDR_64KiB OFFSET(16) NUMBITS(32) [], // [47:16]
/// Access flag.
AF OFFSET(10) NUMBITS(1) [
False = 0,
True = 1
],
/// Shareability field.
SH OFFSET(8) NUMBITS(2) [
OuterShareable = 0b10,
InnerShareable = 0b11
],
/// Access Permissions.
AP OFFSET(6) NUMBITS(2) [
RW_EL1 = 0b00,
RW_EL1_EL0 = 0b01,
RO_EL1 = 0b10,
RO_EL1_EL0 = 0b11
],
/// Memory attributes index into the MAIR_EL1 register.
AttrIndx OFFSET(2) NUMBITS(3) [],
TYPE OFFSET(1) NUMBITS(1) [
Reserved_Invalid = 0,
Page = 1
],
VALID OFFSET(0) NUMBITS(1) [
False = 0,
True = 1
]
]
}
/// A table descriptor for 64 KiB aperture.
///
/// The output points to the next table.
#[derive(Copy, Clone)]
#[repr(C)]
struct TableDescriptor {
value: u64,
}
/// A page descriptor with 64 KiB aperture.
///
/// The output points to physical memory.
#[derive(Copy, Clone)]
#[repr(C)]
struct PageDescriptor {
value: u64,
}
trait StartAddr {
fn virt_start_addr(&self) -> Address<Virtual>;
}
//--------------------------------------------------------------------------------------------------
// Public Definitions
//--------------------------------------------------------------------------------------------------
/// Big monolithic struct for storing the translation tables. Individual levels must be 64 KiB
/// aligned, so the lvl3 is put first.
#[repr(C)]
#[repr(align(65536))]
pub struct FixedSizeTranslationTable<const NUM_TABLES: usize, const START_FROM_TOP: bool> {
/// Page descriptors, covering 64 KiB windows per entry.
lvl3: [[PageDescriptor; 8192]; NUM_TABLES],
/// Table descriptors, covering 512 MiB windows.
lvl2: [TableDescriptor; NUM_TABLES],
/// Have the tables been initialized?
initialized: bool,
}
//--------------------------------------------------------------------------------------------------
// Private Code
//--------------------------------------------------------------------------------------------------
impl<T, const N: usize> StartAddr for [T; N] {
fn virt_start_addr(&self) -> Address<Virtual> {
Address::new(self as *const _ as usize)
}
}
impl TableDescriptor {
/// Create an instance.
///
/// Descriptor is invalid by default.
pub const fn new_zeroed() -> Self {
Self { value: 0 }
}
/// Create an instance pointing to the supplied address.
pub fn from_next_lvl_table_addr(phys_next_lvl_table_addr: Address<Physical>) -> Self {
let val = InMemoryRegister::<u64, STAGE1_TABLE_DESCRIPTOR::Register>::new(0);
let shifted = phys_next_lvl_table_addr.as_usize() >> Granule64KiB::SHIFT;
val.write(
STAGE1_TABLE_DESCRIPTOR::NEXT_LEVEL_TABLE_ADDR_64KiB.val(shifted as u64)
+ STAGE1_TABLE_DESCRIPTOR::TYPE::Table
+ STAGE1_TABLE_DESCRIPTOR::VALID::True,
);
TableDescriptor { value: val.get() }
}
}
/// Convert the kernel's generic memory attributes to HW-specific attributes of the MMU.
impl convert::From<AttributeFields>
for tock_registers::fields::FieldValue<u64, STAGE1_PAGE_DESCRIPTOR::Register>
{
fn from(attribute_fields: AttributeFields) -> Self {
// Memory attributes.
let mut desc = match attribute_fields.mem_attributes {
MemAttributes::CacheableDRAM => {
STAGE1_PAGE_DESCRIPTOR::SH::InnerShareable
+ STAGE1_PAGE_DESCRIPTOR::AttrIndx.val(memory::mmu::arch_mmu::mair::NORMAL)
}
MemAttributes::Device => {
STAGE1_PAGE_DESCRIPTOR::SH::OuterShareable
+ STAGE1_PAGE_DESCRIPTOR::AttrIndx.val(memory::mmu::arch_mmu::mair::DEVICE)
}
};
// Access Permissions.
desc += match attribute_fields.acc_perms {
AccessPermissions::ReadOnly => STAGE1_PAGE_DESCRIPTOR::AP::RO_EL1,
AccessPermissions::ReadWrite => STAGE1_PAGE_DESCRIPTOR::AP::RW_EL1,
};
// The execute-never attribute is mapped to PXN in AArch64.
desc += if attribute_fields.execute_never {
STAGE1_PAGE_DESCRIPTOR::PXN::True
} else {
STAGE1_PAGE_DESCRIPTOR::PXN::False
};
// Always set unprivileged exectue-never as long as userspace is not implemented yet.
desc += STAGE1_PAGE_DESCRIPTOR::UXN::True;
desc
}
}
/// Convert the HW-specific attributes of the MMU to kernel's generic memory attributes.
impl convert::TryFrom<InMemoryRegister<u64, STAGE1_PAGE_DESCRIPTOR::Register>> for AttributeFields {
type Error = &'static str;
fn try_from(
desc: InMemoryRegister<u64, STAGE1_PAGE_DESCRIPTOR::Register>,
) -> Result<AttributeFields, Self::Error> {
let mem_attributes = match desc.read(STAGE1_PAGE_DESCRIPTOR::AttrIndx) {
memory::mmu::arch_mmu::mair::NORMAL => MemAttributes::CacheableDRAM,
memory::mmu::arch_mmu::mair::DEVICE => MemAttributes::Device,
_ => return Err("Unexpected memory attribute"),
};
let acc_perms = match desc.read_as_enum(STAGE1_PAGE_DESCRIPTOR::AP) {
Some(STAGE1_PAGE_DESCRIPTOR::AP::Value::RO_EL1) => AccessPermissions::ReadOnly,
Some(STAGE1_PAGE_DESCRIPTOR::AP::Value::RW_EL1) => AccessPermissions::ReadWrite,
_ => return Err("Unexpected access permission"),
};
let execute_never = desc.read(STAGE1_PAGE_DESCRIPTOR::PXN) > 0;
Ok(AttributeFields {
mem_attributes,
acc_perms,
execute_never,
})
}
}
impl PageDescriptor {
/// Create an instance.
///
/// Descriptor is invalid by default.
pub const fn new_zeroed() -> Self {
Self { value: 0 }
}
/// Create an instance.
pub fn from_output_page_addr(
phys_output_page_addr: PageAddress<Physical>,
attribute_fields: &AttributeFields,
) -> Self {
let val = InMemoryRegister::<u64, STAGE1_PAGE_DESCRIPTOR::Register>::new(0);
let shifted = phys_output_page_addr.into_inner().as_usize() >> Granule64KiB::SHIFT;
val.write(
STAGE1_PAGE_DESCRIPTOR::OUTPUT_ADDR_64KiB.val(shifted as u64)
+ STAGE1_PAGE_DESCRIPTOR::AF::True
+ STAGE1_PAGE_DESCRIPTOR::TYPE::Page
+ STAGE1_PAGE_DESCRIPTOR::VALID::True
+ (*attribute_fields).into(),
);
Self { value: val.get() }
}
/// Returns the valid bit.
fn is_valid(&self) -> bool {
InMemoryRegister::<u64, STAGE1_PAGE_DESCRIPTOR::Register>::new(self.value)
.is_set(STAGE1_PAGE_DESCRIPTOR::VALID)
}
/// Returns the output page.
fn output_page_addr(&self) -> PageAddress<Physical> {
let shifted = InMemoryRegister::<u64, STAGE1_PAGE_DESCRIPTOR::Register>::new(self.value)
.read(STAGE1_PAGE_DESCRIPTOR::OUTPUT_ADDR_64KiB) as usize;
PageAddress::from(shifted << Granule64KiB::SHIFT)
}
/// Returns the attributes.
fn try_attributes(&self) -> Result<AttributeFields, &'static str> {
InMemoryRegister::<u64, STAGE1_PAGE_DESCRIPTOR::Register>::new(self.value).try_into()
}
}
//--------------------------------------------------------------------------------------------------
// Public Code
//--------------------------------------------------------------------------------------------------
impl<const AS_SIZE: usize> memory::mmu::AssociatedTranslationTable
for memory::mmu::AddressSpace<AS_SIZE>
where
[u8; Self::SIZE >> Granule512MiB::SHIFT]: Sized,
{
type TableStartFromTop =
FixedSizeTranslationTable<{ Self::SIZE >> Granule512MiB::SHIFT }, true>;
type TableStartFromBottom =
FixedSizeTranslationTable<{ Self::SIZE >> Granule512MiB::SHIFT }, false>;
}
impl<const NUM_TABLES: usize, const START_FROM_TOP: bool>
FixedSizeTranslationTable<NUM_TABLES, START_FROM_TOP>
{
const START_FROM_TOP_OFFSET: Address<Virtual> =
Address::new((usize::MAX - (Granule512MiB::SIZE * NUM_TABLES)) + 1);
/// Create an instance.
#[allow(clippy::assertions_on_constants)]
const fn _new(for_precompute: bool) -> Self {
assert!(bsp::memory::mmu::KernelGranule::SIZE == Granule64KiB::SIZE);
// Can't have a zero-sized address space.
assert!(NUM_TABLES > 0);
Self {
lvl3: [[PageDescriptor::new_zeroed(); 8192]; NUM_TABLES],
lvl2: [TableDescriptor::new_zeroed(); NUM_TABLES],
initialized: for_precompute,
}
}
pub const fn new_for_precompute() -> Self {
Self::_new(true)
}
#[cfg(test)]
pub fn new_for_runtime() -> Self {
Self::_new(false)
}
/// Helper to calculate the lvl2 and lvl3 indices from an address.
#[inline(always)]
fn lvl2_lvl3_index_from_page_addr(
&self,
virt_page_addr: PageAddress<Virtual>,
) -> Result<(usize, usize), &'static str> {
let mut addr = virt_page_addr.into_inner();
if START_FROM_TOP {
addr = addr - Self::START_FROM_TOP_OFFSET;
}
let lvl2_index = addr.as_usize() >> Granule512MiB::SHIFT;
let lvl3_index = (addr.as_usize() & Granule512MiB::MASK) >> Granule64KiB::SHIFT;
if lvl2_index > (NUM_TABLES - 1) {
return Err("Virtual page is out of bounds of translation table");
}
Ok((lvl2_index, lvl3_index))
}
/// Returns the PageDescriptor corresponding to the supplied page address.
#[inline(always)]
fn page_descriptor_from_page_addr(
&self,
virt_page_addr: PageAddress<Virtual>,
) -> Result<&PageDescriptor, &'static str> {
let (lvl2_index, lvl3_index) = self.lvl2_lvl3_index_from_page_addr(virt_page_addr)?;
let desc = &self.lvl3[lvl2_index][lvl3_index];
Ok(desc)
}
/// Sets the PageDescriptor corresponding to the supplied page address.
///
/// Doesn't allow overriding an already valid page.
#[inline(always)]
fn set_page_descriptor_from_page_addr(
&mut self,
virt_page_addr: PageAddress<Virtual>,
new_desc: &PageDescriptor,
) -> Result<(), &'static str> {
let (lvl2_index, lvl3_index) = self.lvl2_lvl3_index_from_page_addr(virt_page_addr)?;
let desc = &mut self.lvl3[lvl2_index][lvl3_index];
if desc.is_valid() {
return Err("Virtual page is already mapped");
}
*desc = *new_desc;
Ok(())
}
}
//------------------------------------------------------------------------------
// OS Interface Code
//------------------------------------------------------------------------------
impl<const NUM_TABLES: usize, const START_FROM_TOP: bool>
memory::mmu::translation_table::interface::TranslationTable
for FixedSizeTranslationTable<NUM_TABLES, START_FROM_TOP>
{
fn init(&mut self) -> Result<(), &'static str> {
if self.initialized {
return Ok(());
}
// Populate the l2 entries.
for (lvl2_nr, lvl2_entry) in self.lvl2.iter_mut().enumerate() {
let virt_table_addr = self.lvl3[lvl2_nr].virt_start_addr();
let phys_table_addr = memory::mmu::try_kernel_virt_addr_to_phys_addr(virt_table_addr)?;
let new_desc = TableDescriptor::from_next_lvl_table_addr(phys_table_addr);
*lvl2_entry = new_desc;
}
self.initialized = true;
Ok(())
}
unsafe fn map_at(
&mut self,
virt_region: &MemoryRegion<Virtual>,
phys_region: &MemoryRegion<Physical>,
attr: &AttributeFields,
) -> Result<(), &'static str> {
assert!(self.initialized, "Translation tables not initialized");
if virt_region.size() != phys_region.size() {
return Err("Tried to map memory regions with unequal sizes");
}
if phys_region.end_exclusive_page_addr() > bsp::memory::phys_addr_space_end_exclusive_addr()
{
return Err("Tried to map outside of physical address space");
}
let iter = phys_region.into_iter().zip(virt_region.into_iter());
for (phys_page_addr, virt_page_addr) in iter {
let new_desc = PageDescriptor::from_output_page_addr(phys_page_addr, attr);
let virt_page = virt_page_addr;
self.set_page_descriptor_from_page_addr(virt_page, &new_desc)?;
}
Ok(())
}
fn try_virt_page_addr_to_phys_page_addr(
&self,
virt_page_addr: PageAddress<Virtual>,
) -> Result<PageAddress<Physical>, &'static str> {
let page_desc = self.page_descriptor_from_page_addr(virt_page_addr)?;
if !page_desc.is_valid() {
return Err("Page marked invalid");
}
Ok(page_desc.output_page_addr())
}
fn try_page_attributes(
&self,
virt_page_addr: PageAddress<Virtual>,
) -> Result<AttributeFields, &'static str> {
let page_desc = self.page_descriptor_from_page_addr(virt_page_addr)?;
if !page_desc.is_valid() {
return Err("Page marked invalid");
}
page_desc.try_attributes()
}
/// Try to translate a virtual address to a physical address.
///
/// Will only succeed if there exists a valid mapping for the input address.
fn try_virt_addr_to_phys_addr(
&self,
virt_addr: Address<Virtual>,
) -> Result<Address<Physical>, &'static str> {
let virt_page = PageAddress::from(virt_addr.align_down_page());
let phys_page = self.try_virt_page_addr_to_phys_page_addr(virt_page)?;
Ok(phys_page.into_inner() + virt_addr.offset_into_page())
}
}
//--------------------------------------------------------------------------------------------------
// Testing
//--------------------------------------------------------------------------------------------------
#[cfg(test)]
pub type MinSizeTranslationTable = FixedSizeTranslationTable<1, true>;
#[cfg(test)]
mod tests {
use super::*;
use test_macros::kernel_test;
/// Check if the size of `struct TableDescriptor` is as expected.
#[kernel_test]
fn size_of_tabledescriptor_equals_64_bit() {
assert_eq!(
core::mem::size_of::<TableDescriptor>(),
core::mem::size_of::<u64>()
);
}
/// Check if the size of `struct PageDescriptor` is as expected.
#[kernel_test]
fn size_of_pagedescriptor_equals_64_bit() {
assert_eq!(
core::mem::size_of::<PageDescriptor>(),
core::mem::size_of::<u64>()
);
}
}

@ -0,0 +1,121 @@
// SPDX-License-Identifier: MIT OR Apache-2.0
//
// Copyright (c) 2018-2022 Andre Richter <andre.o.richter@gmail.com>
//! Architectural timer primitives.
//!
//! # Orientation
//!
//! Since arch modules are imported into generic modules using the path attribute, the path of this
//! file is:
//!
//! crate::time::arch_time
use crate::{time, warn};
use core::time::Duration;
use cortex_a::{asm::barrier, registers::*};
use tock_registers::interfaces::{ReadWriteable, Readable, Writeable};
//--------------------------------------------------------------------------------------------------
// Private Definitions
//--------------------------------------------------------------------------------------------------
const NS_PER_S: u64 = 1_000_000_000;
/// ARMv8 Generic Timer.
struct GenericTimer;
//--------------------------------------------------------------------------------------------------
// Global instances
//--------------------------------------------------------------------------------------------------
static TIME_MANAGER: GenericTimer = GenericTimer;
//--------------------------------------------------------------------------------------------------
// Private Code
//--------------------------------------------------------------------------------------------------
impl GenericTimer {
#[inline(always)]
fn read_cntpct(&self) -> u64 {
// Prevent that the counter is read ahead of time due to out-of-order execution.
unsafe { barrier::isb(barrier::SY) };
CNTPCT_EL0.get()
}
}
//--------------------------------------------------------------------------------------------------
// Public Code
//--------------------------------------------------------------------------------------------------
/// Return a reference to the time manager.
pub fn time_manager() -> &'static impl time::interface::TimeManager {
&TIME_MANAGER
}
//------------------------------------------------------------------------------
// OS Interface Code
//------------------------------------------------------------------------------
impl time::interface::TimeManager for GenericTimer {
fn resolution(&self) -> Duration {
Duration::from_nanos(NS_PER_S / (CNTFRQ_EL0.get() as u64))
}
fn uptime(&self) -> Duration {
let current_count: u64 = self.read_cntpct() * NS_PER_S;
let frq: u64 = CNTFRQ_EL0.get() as u64;
Duration::from_nanos(current_count / frq)
}
fn spin_for(&self, duration: Duration) {
// Instantly return on zero.
if duration.as_nanos() == 0 {
return;
}
// Calculate the register compare value.
let frq = CNTFRQ_EL0.get();
let x = match frq.checked_mul(duration.as_nanos() as u64) {
#[allow(unused_imports)]
None => {
warn!("Spin duration too long, skipping");
return;
}
Some(val) => val,
};
let tval = x / NS_PER_S;
// Check if it is within supported bounds.
let warn: Option<&str> = if tval == 0 {
Some("smaller")
// The upper 32 bits of CNTP_TVAL_EL0 are reserved.
} else if tval > u32::max_value().into() {
Some("bigger")
} else {
None
};
#[allow(unused_imports)]
if let Some(w) = warn {
warn!(
"Spin duration {} than architecturally supported, skipping",
w
);
return;
}
// Set the compare value register.
CNTP_TVAL_EL0.set(tval);
// Kick off the counting. // Disable timer interrupt.
CNTP_CTL_EL0.modify(CNTP_CTL_EL0::ENABLE::SET + CNTP_CTL_EL0::IMASK::SET);
// ISTATUS will be '1' when cval ticks have passed. Busy-check it.
while !CNTP_CTL_EL0.matches_all(CNTP_CTL_EL0::ISTATUS::SET) {}
// Disable counting again.
CNTP_CTL_EL0.modify(CNTP_CTL_EL0::ENABLE::CLEAR);
}
}

@ -0,0 +1,13 @@
// SPDX-License-Identifier: MIT OR Apache-2.0
//
// Copyright (c) 2018-2022 Andre Richter <andre.o.richter@gmail.com>
//! Conditional reexporting of Board Support Packages.
mod device_driver;
#[cfg(any(feature = "bsp_rpi3", feature = "bsp_rpi4"))]
mod raspberrypi;
#[cfg(any(feature = "bsp_rpi3", feature = "bsp_rpi4"))]
pub use raspberrypi::*;

@ -0,0 +1,16 @@
// SPDX-License-Identifier: MIT OR Apache-2.0
//
// Copyright (c) 2018-2022 Andre Richter <andre.o.richter@gmail.com>
//! Device driver.
#[cfg(feature = "bsp_rpi4")]
mod arm;
#[cfg(any(feature = "bsp_rpi3", feature = "bsp_rpi4"))]
mod bcm;
mod common;
#[cfg(feature = "bsp_rpi4")]
pub use arm::*;
#[cfg(any(feature = "bsp_rpi3", feature = "bsp_rpi4"))]
pub use bcm::*;

@ -0,0 +1,9 @@
// SPDX-License-Identifier: MIT OR Apache-2.0
//
// Copyright (c) 2020-2022 Andre Richter <andre.o.richter@gmail.com>
//! ARM driver top level.
pub mod gicv2;
pub use gicv2::*;

@ -0,0 +1,246 @@
// SPDX-License-Identifier: MIT OR Apache-2.0
//
// Copyright (c) 2020-2022 Andre Richter <andre.o.richter@gmail.com>
//! GICv2 Driver - ARM Generic Interrupt Controller v2.
//!
//! The following is a collection of excerpts with useful information from
//! - `Programmer's Guide for ARMv8-A`
//! - `ARM Generic Interrupt Controller Architecture Specification`
//!
//! # Programmer's Guide - 10.6.1 Configuration
//!
//! The GIC is accessed as a memory-mapped peripheral.
//!
//! All cores can access the common Distributor, but the CPU interface is banked, that is, each core
//! uses the same address to access its own private CPU interface.
//!
//! It is not possible for a core to access the CPU interface of another core.
//!
//! # Architecture Specification - 10.6.2 Initialization
//!
//! Both the Distributor and the CPU interfaces are disabled at reset. The GIC must be initialized
//! after reset before it can deliver interrupts to the core.
//!
//! In the Distributor, software must configure the priority, target, security and enable individual
//! interrupts. The Distributor must subsequently be enabled through its control register
//! (GICD_CTLR). For each CPU interface, software must program the priority mask and preemption
//! settings.
//!
//! Each CPU interface block itself must be enabled through its control register (GICD_CTLR). This
//! prepares the GIC to deliver interrupts to the core.
//!
//! Before interrupts are expected in the core, software prepares the core to take interrupts by
//! setting a valid interrupt vector in the vector table, and clearing interrupt mask bits in
//! PSTATE, and setting the routing controls.
//!
//! The entire interrupt mechanism in the system can be disabled by disabling the Distributor.
//! Interrupt delivery to an individual core can be disabled by disabling its CPU interface.
//! Individual interrupts can also be disabled (or enabled) in the distributor.
//!
//! For an interrupt to reach the core, the individual interrupt, Distributor and CPU interface must
//! all be enabled. The interrupt also needs to be of sufficient priority, that is, higher than the
//! core's priority mask.
//!
//! # Architecture Specification - 1.4.2 Interrupt types
//!
//! - Peripheral interrupt
//! - Private Peripheral Interrupt (PPI)
//! - This is a peripheral interrupt that is specific to a single processor.
//! - Shared Peripheral Interrupt (SPI)
//! - This is a peripheral interrupt that the Distributor can route to any of a specified
//! combination of processors.
//!
//! - Software-generated interrupt (SGI)
//! - This is an interrupt generated by software writing to a GICD_SGIR register in the GIC. The
//! system uses SGIs for interprocessor communication.
//! - An SGI has edge-triggered properties. The software triggering of the interrupt is
//! equivalent to the edge transition of the interrupt request signal.
//! - When an SGI occurs in a multiprocessor implementation, the CPUID field in the Interrupt
//! Acknowledge Register, GICC_IAR, or the Aliased Interrupt Acknowledge Register, GICC_AIAR,
//! identifies the processor that requested the interrupt.
//!
//! # Architecture Specification - 2.2.1 Interrupt IDs
//!
//! Interrupts from sources are identified using ID numbers. Each CPU interface can see up to 1020
//! interrupts. The banking of SPIs and PPIs increases the total number of interrupts supported by
//! the Distributor.
//!
//! The GIC assigns interrupt ID numbers ID0-ID1019 as follows:
//! - Interrupt numbers 32..1019 are used for SPIs.
//! - Interrupt numbers 0..31 are used for interrupts that are private to a CPU interface. These
//! interrupts are banked in the Distributor.
//! - A banked interrupt is one where the Distributor can have multiple interrupts with the
//! same ID. A banked interrupt is identified uniquely by its ID number and its associated
//! CPU interface number. Of the banked interrupt IDs:
//! - 00..15 SGIs
//! - 16..31 PPIs
mod gicc;
mod gicd;
use crate::{bsp, cpu, driver, exception, memory, synchronization, synchronization::InitStateLock};
use core::sync::atomic::{AtomicBool, Ordering};
//--------------------------------------------------------------------------------------------------
// Private Definitions
//--------------------------------------------------------------------------------------------------
type HandlerTable = [Option<exception::asynchronous::IRQDescriptor>; GICv2::NUM_IRQS];
//--------------------------------------------------------------------------------------------------
// Public Definitions
//--------------------------------------------------------------------------------------------------
/// Used for the associated type of trait [`exception::asynchronous::interface::IRQManager`].
pub type IRQNumber = exception::asynchronous::IRQNumber<{ GICv2::MAX_IRQ_NUMBER }>;
/// Representation of the GIC.
pub struct GICv2 {
gicd_mmio_descriptor: memory::mmu::MMIODescriptor,
gicc_mmio_descriptor: memory::mmu::MMIODescriptor,
/// The Distributor.
gicd: gicd::GICD,
/// The CPU Interface.
gicc: gicc::GICC,
/// Have the MMIO regions been remapped yet?
is_mmio_remapped: AtomicBool,
/// Stores registered IRQ handlers. Writable only during kernel init. RO afterwards.
handler_table: InitStateLock<HandlerTable>,
}
//--------------------------------------------------------------------------------------------------
// Public Code
//--------------------------------------------------------------------------------------------------
impl GICv2 {
const MAX_IRQ_NUMBER: usize = 300; // Normally 1019, but keep it lower to save some space.
const NUM_IRQS: usize = Self::MAX_IRQ_NUMBER + 1;
/// Create an instance.
///
/// # Safety
///
/// - The user must ensure to provide correct MMIO descriptors.
pub const unsafe fn new(
gicd_mmio_descriptor: memory::mmu::MMIODescriptor,
gicc_mmio_descriptor: memory::mmu::MMIODescriptor,
) -> Self {
Self {
gicd_mmio_descriptor,
gicc_mmio_descriptor,
gicd: gicd::GICD::new(gicd_mmio_descriptor.start_addr().as_usize()),
gicc: gicc::GICC::new(gicc_mmio_descriptor.start_addr().as_usize()),
is_mmio_remapped: AtomicBool::new(false),
handler_table: InitStateLock::new([None; Self::NUM_IRQS]),
}
}
}
//------------------------------------------------------------------------------
// OS Interface Code
//------------------------------------------------------------------------------
use synchronization::interface::ReadWriteEx;
impl driver::interface::DeviceDriver for GICv2 {
fn compatible(&self) -> &'static str {
"GICv2 (ARM Generic Interrupt Controller v2)"
}
unsafe fn init(&self) -> Result<(), &'static str> {
let remapped = self.is_mmio_remapped.load(Ordering::Relaxed);
if !remapped {
// GICD
let mut virt_addr = memory::mmu::kernel_map_mmio("GICD", &self.gicd_mmio_descriptor)?;
self.gicd.set_mmio(virt_addr.as_usize());
// GICC
virt_addr = memory::mmu::kernel_map_mmio("GICC", &self.gicc_mmio_descriptor)?;
self.gicc.set_mmio(virt_addr.as_usize());
// Conclude remapping.
self.is_mmio_remapped.store(true, Ordering::Relaxed);
}
if bsp::cpu::BOOT_CORE_ID == cpu::smp::core_id() {
self.gicd.boot_core_init();
}
self.gicc.priority_accept_all();
self.gicc.enable();
Ok(())
}
}
impl exception::asynchronous::interface::IRQManager for GICv2 {
type IRQNumberType = IRQNumber;
fn register_handler(
&self,
irq_number: Self::IRQNumberType,
descriptor: exception::asynchronous::IRQDescriptor,
) -> Result<(), &'static str> {
self.handler_table.write(|table| {
let irq_number = irq_number.get();
if table[irq_number].is_some() {
return Err("IRQ handler already registered");
}
table[irq_number] = Some(descriptor);
Ok(())
})
}
fn enable(&self, irq_number: Self::IRQNumberType) {
self.gicd.enable(irq_number);
}
fn handle_pending_irqs<'irq_context>(
&'irq_context self,
ic: &exception::asynchronous::IRQContext<'irq_context>,
) {
// Extract the highest priority pending IRQ number from the Interrupt Acknowledge Register
// (IAR).
let irq_number = self.gicc.pending_irq_number(ic);
// Guard against spurious interrupts.
if irq_number > GICv2::MAX_IRQ_NUMBER {
return;
}
// Call the IRQ handler. Panic if there is none.
self.handler_table.read(|table| {
match table[irq_number] {
None => panic!("No handler registered for IRQ {}", irq_number),
Some(descriptor) => {
// Call the IRQ handler. Panics on failure.
descriptor.handler.handle().expect("Error handling IRQ");
}
}
});
// Signal completion of handling.
self.gicc.mark_comleted(irq_number as u32, ic);
}
fn print_handler(&self) {
use crate::info;
info!(" Peripheral handler:");
self.handler_table.read(|table| {
for (i, opt) in table.iter().skip(32).enumerate() {
if let Some(handler) = opt {
info!(" {: >3}. {}", i + 32, handler.name);
}
}
});
}
}

@ -0,0 +1,156 @@
// SPDX-License-Identifier: MIT OR Apache-2.0
//
// Copyright (c) 2020-2022 Andre Richter <andre.o.richter@gmail.com>
//! GICC Driver - GIC CPU interface.
use crate::{
bsp::device_driver::common::MMIODerefWrapper, exception, synchronization::InitStateLock,
};
use tock_registers::{
interfaces::{Readable, Writeable},
register_bitfields, register_structs,
registers::ReadWrite,
};
//--------------------------------------------------------------------------------------------------
// Private Definitions
//--------------------------------------------------------------------------------------------------
register_bitfields! {
u32,
/// CPU Interface Control Register
CTLR [
Enable OFFSET(0) NUMBITS(1) []
],
/// Interrupt Priority Mask Register
PMR [
Priority OFFSET(0) NUMBITS(8) []
],
/// Interrupt Acknowledge Register
IAR [
InterruptID OFFSET(0) NUMBITS(10) []
],
/// End of Interrupt Register
EOIR [
EOIINTID OFFSET(0) NUMBITS(10) []
]
}
register_structs! {
#[allow(non_snake_case)]
pub RegisterBlock {
(0x000 => CTLR: ReadWrite<u32, CTLR::Register>),
(0x004 => PMR: ReadWrite<u32, PMR::Register>),
(0x008 => _reserved1),
(0x00C => IAR: ReadWrite<u32, IAR::Register>),
(0x010 => EOIR: ReadWrite<u32, EOIR::Register>),
(0x014 => @END),
}
}
/// Abstraction for the associated MMIO registers.
type Registers = MMIODerefWrapper<RegisterBlock>;
//--------------------------------------------------------------------------------------------------
// Public Definitions
//--------------------------------------------------------------------------------------------------
/// Representation of the GIC CPU interface.
pub struct GICC {
registers: InitStateLock<Registers>,
}
//--------------------------------------------------------------------------------------------------
// Public Code
//--------------------------------------------------------------------------------------------------
use crate::synchronization::interface::ReadWriteEx;
impl GICC {
/// Create an instance.
///
/// # Safety
///
/// - The user must ensure to provide a correct MMIO start address.
pub const unsafe fn new(mmio_start_addr: usize) -> Self {
Self {
registers: InitStateLock::new(Registers::new(mmio_start_addr)),
}
}
pub unsafe fn set_mmio(&self, new_mmio_start_addr: usize) {
self.registers
.write(|regs| *regs = Registers::new(new_mmio_start_addr));
}
/// Accept interrupts of any priority.
///
/// Quoting the GICv2 Architecture Specification:
///
/// "Writing 255 to the GICC_PMR always sets it to the largest supported priority field
/// value."
///
/// # Safety
///
/// - GICC MMIO registers are banked per CPU core. It is therefore safe to have `&self` instead
/// of `&mut self`.
pub fn priority_accept_all(&self) {
self.registers.read(|regs| {
regs.PMR.write(PMR::Priority.val(255)); // Comment in arch spec.
});
}
/// Enable the interface - start accepting IRQs.
///
/// # Safety
///
/// - GICC MMIO registers are banked per CPU core. It is therefore safe to have `&self` instead
/// of `&mut self`.
pub fn enable(&self) {
self.registers.read(|regs| {
regs.CTLR.write(CTLR::Enable::SET);
});
}
/// Extract the number of the highest-priority pending IRQ.
///
/// Can only be called from IRQ context, which is ensured by taking an `IRQContext` token.
///
/// # Safety
///
/// - GICC MMIO registers are banked per CPU core. It is therefore safe to have `&self` instead
/// of `&mut self`.
#[allow(clippy::trivially_copy_pass_by_ref)]
pub fn pending_irq_number<'irq_context>(
&self,
_ic: &exception::asynchronous::IRQContext<'irq_context>,
) -> usize {
self.registers
.read(|regs| regs.IAR.read(IAR::InterruptID) as usize)
}
/// Complete handling of the currently active IRQ.
///
/// Can only be called from IRQ context, which is ensured by taking an `IRQContext` token.
///
/// To be called after `pending_irq_number()`.
///
/// # Safety
///
/// - GICC MMIO registers are banked per CPU core. It is therefore safe to have `&self` instead
/// of `&mut self`.
#[allow(clippy::trivially_copy_pass_by_ref)]
pub fn mark_comleted<'irq_context>(
&self,
irq_number: u32,
_ic: &exception::asynchronous::IRQContext<'irq_context>,
) {
self.registers.read(|regs| {
regs.EOIR.write(EOIR::EOIINTID.val(irq_number));
});
}
}

@ -0,0 +1,209 @@
// SPDX-License-Identifier: MIT OR Apache-2.0
//
// Copyright (c) 2020-2022 Andre Richter <andre.o.richter@gmail.com>
//! GICD Driver - GIC Distributor.
//!
//! # Glossary
//! - SPI - Shared Peripheral Interrupt.
use crate::{
bsp::device_driver::common::MMIODerefWrapper,
state, synchronization,
synchronization::{IRQSafeNullLock, InitStateLock},
};
use tock_registers::{
interfaces::{Readable, Writeable},
register_bitfields, register_structs,
registers::{ReadOnly, ReadWrite},
};
//--------------------------------------------------------------------------------------------------
// Private Definitions
//--------------------------------------------------------------------------------------------------
register_bitfields! {
u32,
/// Distributor Control Register
CTLR [
Enable OFFSET(0) NUMBITS(1) []
],
/// Interrupt Controller Type Register
TYPER [
ITLinesNumber OFFSET(0) NUMBITS(5) []
],
/// Interrupt Processor Targets Registers
ITARGETSR [
Offset3 OFFSET(24) NUMBITS(8) [],
Offset2 OFFSET(16) NUMBITS(8) [],
Offset1 OFFSET(8) NUMBITS(8) [],
Offset0 OFFSET(0) NUMBITS(8) []
]
}
register_structs! {
#[allow(non_snake_case)]
SharedRegisterBlock {
(0x000 => CTLR: ReadWrite<u32, CTLR::Register>),
(0x004 => TYPER: ReadOnly<u32, TYPER::Register>),
(0x008 => _reserved1),
(0x104 => ISENABLER: [ReadWrite<u32>; 31]),
(0x108 => _reserved2),
(0x820 => ITARGETSR: [ReadWrite<u32, ITARGETSR::Register>; 248]),
(0x824 => @END),
}
}
register_structs! {
#[allow(non_snake_case)]
BankedRegisterBlock {
(0x000 => _reserved1),
(0x100 => ISENABLER: ReadWrite<u32>),
(0x104 => _reserved2),
(0x800 => ITARGETSR: [ReadOnly<u32, ITARGETSR::Register>; 8]),
(0x804 => @END),
}
}
/// Abstraction for the non-banked parts of the associated MMIO registers.
type SharedRegisters = MMIODerefWrapper<SharedRegisterBlock>;
/// Abstraction for the banked parts of the associated MMIO registers.
type BankedRegisters = MMIODerefWrapper<BankedRegisterBlock>;
//--------------------------------------------------------------------------------------------------
// Public Definitions
//--------------------------------------------------------------------------------------------------
/// Representation of the GIC Distributor.
pub struct GICD {
/// Access to shared registers is guarded with a lock.
shared_registers: IRQSafeNullLock<SharedRegisters>,
/// Access to banked registers is unguarded.
banked_registers: InitStateLock<BankedRegisters>,
}
//--------------------------------------------------------------------------------------------------
// Private Code
//--------------------------------------------------------------------------------------------------
impl SharedRegisters {
/// Return the number of IRQs that this HW implements.
#[inline(always)]
fn num_irqs(&mut self) -> usize {
// Query number of implemented IRQs.
//
// Refer to GICv2 Architecture Specification, Section 4.3.2.
((self.TYPER.read(TYPER::ITLinesNumber) as usize) + 1) * 32
}
/// Return a slice of the implemented ITARGETSR.
#[inline(always)]
fn implemented_itargets_slice(&mut self) -> &[ReadWrite<u32, ITARGETSR::Register>] {
assert!(self.num_irqs() >= 36);
// Calculate the max index of the shared ITARGETSR array.
//
// The first 32 IRQs are private, so not included in `shared_registers`. Each ITARGETS
// register has four entries, so shift right by two. Subtract one because we start
// counting at zero.
let spi_itargetsr_max_index = ((self.num_irqs() - 32) >> 2) - 1;
// Rust automatically inserts slice range sanity check, i.e. max >= min.
&self.ITARGETSR[0..spi_itargetsr_max_index]
}
}
//--------------------------------------------------------------------------------------------------
// Public Code
//--------------------------------------------------------------------------------------------------
use crate::synchronization::interface::ReadWriteEx;
use synchronization::interface::Mutex;
impl GICD {
/// Create an instance.
///
/// # Safety
///
/// - The user must ensure to provide a correct MMIO start address.
pub const unsafe fn new(mmio_start_addr: usize) -> Self {
Self {
shared_registers: IRQSafeNullLock::new(SharedRegisters::new(mmio_start_addr)),
banked_registers: InitStateLock::new(BankedRegisters::new(mmio_start_addr)),
}
}
pub unsafe fn set_mmio(&self, new_mmio_start_addr: usize) {
self.shared_registers
.lock(|regs| *regs = SharedRegisters::new(new_mmio_start_addr));
self.banked_registers
.write(|regs| *regs = BankedRegisters::new(new_mmio_start_addr));
}
/// Use a banked ITARGETSR to retrieve the executing core's GIC target mask.
///
/// Quoting the GICv2 Architecture Specification:
///
/// "GICD_ITARGETSR0 to GICD_ITARGETSR7 are read-only, and each field returns a value that
/// corresponds only to the processor reading the register."
fn local_gic_target_mask(&self) -> u32 {
self.banked_registers
.read(|regs| regs.ITARGETSR[0].read(ITARGETSR::Offset0))
}
/// Route all SPIs to the boot core and enable the distributor.
pub fn boot_core_init(&self) {
assert!(
state::state_manager().is_init(),
"Only allowed during kernel init phase"
);
// Target all SPIs to the boot core only.
let mask = self.local_gic_target_mask();
self.shared_registers.lock(|regs| {
for i in regs.implemented_itargets_slice().iter() {
i.write(
ITARGETSR::Offset3.val(mask)
+ ITARGETSR::Offset2.val(mask)
+ ITARGETSR::Offset1.val(mask)
+ ITARGETSR::Offset0.val(mask),
);
}
regs.CTLR.write(CTLR::Enable::SET);
});
}
/// Enable an interrupt.
pub fn enable(&self, irq_num: super::IRQNumber) {
let irq_num = irq_num.get();
// Each bit in the u32 enable register corresponds to one IRQ number. Shift right by 5
// (division by 32) and arrive at the index for the respective ISENABLER[i].
let enable_reg_index = irq_num >> 5;
let enable_bit: u32 = 1u32 << (irq_num % 32);
// Check if we are handling a private or shared IRQ.
match irq_num {
// Private.
0..=31 => self.banked_registers.read(|regs| {
let enable_reg = &regs.ISENABLER;
enable_reg.set(enable_reg.get() | enable_bit);
}),
// Shared.
_ => {
let enable_reg_index_shared = enable_reg_index - 1;
self.shared_registers.lock(|regs| {
let enable_reg = &regs.ISENABLER[enable_reg_index_shared];
enable_reg.set(enable_reg.get() | enable_bit);
});
}
}
}
}

@ -0,0 +1,15 @@
// SPDX-License-Identifier: MIT OR Apache-2.0
//
// Copyright (c) 2018-2022 Andre Richter <andre.o.richter@gmail.com>
//! BCM driver top level.
mod bcm2xxx_gpio;
#[cfg(feature = "bsp_rpi3")]
mod bcm2xxx_interrupt_controller;
mod bcm2xxx_pl011_uart;
pub use bcm2xxx_gpio::*;
#[cfg(feature = "bsp_rpi3")]
pub use bcm2xxx_interrupt_controller::*;
pub use bcm2xxx_pl011_uart::*;

@ -0,0 +1,259 @@
// SPDX-License-Identifier: MIT OR Apache-2.0
//
// Copyright (c) 2018-2022 Andre Richter <andre.o.richter@gmail.com>
//! GPIO Driver.
use crate::{
bsp::device_driver::common::MMIODerefWrapper, driver, memory, synchronization,
synchronization::IRQSafeNullLock,
};
use core::sync::atomic::{AtomicUsize, Ordering};
use tock_registers::{
interfaces::{ReadWriteable, Writeable},
register_bitfields, register_structs,
registers::ReadWrite,
};
//--------------------------------------------------------------------------------------------------
// Private Definitions
//--------------------------------------------------------------------------------------------------
// GPIO registers.
//
// Descriptions taken from
// - https://github.com/raspberrypi/documentation/files/1888662/BCM2837-ARM-Peripherals.-.Revised.-.V2-1.pdf
// - https://datasheets.raspberrypi.org/bcm2711/bcm2711-peripherals.pdf
register_bitfields! {
u32,
/// GPIO Function Select 1
GPFSEL1 [
/// Pin 15
FSEL15 OFFSET(15) NUMBITS(3) [
Input = 0b000,
Output = 0b001,
AltFunc0 = 0b100 // PL011 UART RX
],
/// Pin 14
FSEL14 OFFSET(12) NUMBITS(3) [
Input = 0b000,
Output = 0b001,
AltFunc0 = 0b100 // PL011 UART TX
]
],
/// GPIO Pull-up/down Register
///
/// BCM2837 only.
GPPUD [
/// Controls the actuation of the internal pull-up/down control line to ALL the GPIO pins.
PUD OFFSET(0) NUMBITS(2) [
Off = 0b00,
PullDown = 0b01,
PullUp = 0b10
]
],
/// GPIO Pull-up/down Clock Register 0
///
/// BCM2837 only.
GPPUDCLK0 [
/// Pin 15
PUDCLK15 OFFSET(15) NUMBITS(1) [
NoEffect = 0,
AssertClock = 1
],
/// Pin 14
PUDCLK14 OFFSET(14) NUMBITS(1) [
NoEffect = 0,
AssertClock = 1
]
],
/// GPIO Pull-up / Pull-down Register 0
///
/// BCM2711 only.
GPIO_PUP_PDN_CNTRL_REG0 [
/// Pin 15
GPIO_PUP_PDN_CNTRL15 OFFSET(30) NUMBITS(2) [
NoResistor = 0b00,
PullUp = 0b01
],
/// Pin 14
GPIO_PUP_PDN_CNTRL14 OFFSET(28) NUMBITS(2) [
NoResistor = 0b00,
PullUp = 0b01
]
]
}
register_structs! {
#[allow(non_snake_case)]
RegisterBlock {
(0x00 => _reserved1),
(0x04 => GPFSEL1: ReadWrite<u32, GPFSEL1::Register>),
(0x08 => _reserved2),
(0x94 => GPPUD: ReadWrite<u32, GPPUD::Register>),
(0x98 => GPPUDCLK0: ReadWrite<u32, GPPUDCLK0::Register>),
(0x9C => _reserved3),
(0xE4 => GPIO_PUP_PDN_CNTRL_REG0: ReadWrite<u32, GPIO_PUP_PDN_CNTRL_REG0::Register>),
(0xE8 => @END),
}
}
/// Abstraction for the associated MMIO registers.
type Registers = MMIODerefWrapper<RegisterBlock>;
//--------------------------------------------------------------------------------------------------
// Public Definitions
//--------------------------------------------------------------------------------------------------
pub struct GPIOInner {
registers: Registers,
}
// Export the inner struct so that BSPs can use it for the panic handler.
pub use GPIOInner as PanicGPIO;
/// Representation of the GPIO HW.
pub struct GPIO {
mmio_descriptor: memory::mmu::MMIODescriptor,
virt_mmio_start_addr: AtomicUsize,
inner: IRQSafeNullLock<GPIOInner>,
}
//--------------------------------------------------------------------------------------------------
// Public Code
//--------------------------------------------------------------------------------------------------
impl GPIOInner {
/// Create an instance.
///
/// # Safety
///
/// - The user must ensure to provide a correct MMIO start address.
pub const unsafe fn new(mmio_start_addr: usize) -> Self {
Self {
registers: Registers::new(mmio_start_addr),
}
}
/// Init code.
///
/// # Safety
///
/// - The user must ensure to provide a correct MMIO start address.
pub unsafe fn init(&mut self, new_mmio_start_addr: Option<usize>) -> Result<(), &'static str> {
if let Some(addr) = new_mmio_start_addr {
self.registers = Registers::new(addr);
}
Ok(())
}
/// Disable pull-up/down on pins 14 and 15.
#[cfg(feature = "bsp_rpi3")]
fn disable_pud_14_15_bcm2837(&mut self) {
use crate::{time, time::interface::TimeManager};
use core::time::Duration;
// The Linux 2837 GPIO driver waits 1 µs between the steps.
const DELAY: Duration = Duration::from_micros(1);
self.registers.GPPUD.write(GPPUD::PUD::Off);
time::time_manager().spin_for(DELAY);
self.registers
.GPPUDCLK0
.write(GPPUDCLK0::PUDCLK15::AssertClock + GPPUDCLK0::PUDCLK14::AssertClock);
time::time_manager().spin_for(DELAY);
self.registers.GPPUD.write(GPPUD::PUD::Off);
self.registers.GPPUDCLK0.set(0);
}
/// Disable pull-up/down on pins 14 and 15.
#[cfg(feature = "bsp_rpi4")]
fn disable_pud_14_15_bcm2711(&mut self) {
self.registers.GPIO_PUP_PDN_CNTRL_REG0.write(
GPIO_PUP_PDN_CNTRL_REG0::GPIO_PUP_PDN_CNTRL15::PullUp
+ GPIO_PUP_PDN_CNTRL_REG0::GPIO_PUP_PDN_CNTRL14::PullUp,
);
}
/// Map PL011 UART as standard output.
///
/// TX to pin 14
/// RX to pin 15
pub fn map_pl011_uart(&mut self) {
// Select the UART on pins 14 and 15.
self.registers
.GPFSEL1
.modify(GPFSEL1::FSEL15::AltFunc0 + GPFSEL1::FSEL14::AltFunc0);
// Disable pull-up/down on pins 14 and 15.
#[cfg(feature = "bsp_rpi3")]
self.disable_pud_14_15_bcm2837();
#[cfg(feature = "bsp_rpi4")]
self.disable_pud_14_15_bcm2711();
}
}
impl GPIO {
/// Create an instance.
///
/// # Safety
///
/// - The user must ensure to provide correct MMIO descriptors.
pub const unsafe fn new(mmio_descriptor: memory::mmu::MMIODescriptor) -> Self {
Self {
mmio_descriptor,
virt_mmio_start_addr: AtomicUsize::new(0),
inner: IRQSafeNullLock::new(GPIOInner::new(mmio_descriptor.start_addr().as_usize())),
}
}
/// Concurrency safe version of `GPIOInner.map_pl011_uart()`
pub fn map_pl011_uart(&self) {
self.inner.lock(|inner| inner.map_pl011_uart())
}
}
//------------------------------------------------------------------------------
// OS Interface Code
//------------------------------------------------------------------------------
use synchronization::interface::Mutex;
impl driver::interface::DeviceDriver for GPIO {
fn compatible(&self) -> &'static str {
"BCM GPIO"
}
unsafe fn init(&self) -> Result<(), &'static str> {
let virt_addr = memory::mmu::kernel_map_mmio(self.compatible(), &self.mmio_descriptor)?;
self.inner
.lock(|inner| inner.init(Some(virt_addr.as_usize())))?;
self.virt_mmio_start_addr
.store(virt_addr.as_usize(), Ordering::Relaxed);
Ok(())
}
fn virt_mmio_start_addr(&self) -> Option<usize> {
let addr = self.virt_mmio_start_addr.load(Ordering::Relaxed);
if addr == 0 {
return None;
}
Some(addr)
}
}

@ -0,0 +1,138 @@
// SPDX-License-Identifier: MIT OR Apache-2.0
//
// Copyright (c) 2020-2022 Andre Richter <andre.o.richter@gmail.com>
//! Interrupt Controller Driver.
mod peripheral_ic;
use crate::{driver, exception, memory};
//--------------------------------------------------------------------------------------------------
// Private Definitions
//--------------------------------------------------------------------------------------------------
/// Wrapper struct for a bitmask indicating pending IRQ numbers.
struct PendingIRQs {
bitmask: u64,
}
//--------------------------------------------------------------------------------------------------
// Public Definitions
//--------------------------------------------------------------------------------------------------
pub type LocalIRQ =
exception::asynchronous::IRQNumber<{ InterruptController::MAX_LOCAL_IRQ_NUMBER }>;
pub type PeripheralIRQ =
exception::asynchronous::IRQNumber<{ InterruptController::MAX_PERIPHERAL_IRQ_NUMBER }>;
/// Used for the associated type of trait [`exception::asynchronous::interface::IRQManager`].
#[derive(Copy, Clone)]
pub enum IRQNumber {
Local(LocalIRQ),
Peripheral(PeripheralIRQ),
}
/// Representation of the Interrupt Controller.
pub struct InterruptController {
periph: peripheral_ic::PeripheralIC,
}
//--------------------------------------------------------------------------------------------------
// Private Code
//--------------------------------------------------------------------------------------------------
impl PendingIRQs {
pub fn new(bitmask: u64) -> Self {
Self { bitmask }
}
}
impl Iterator for PendingIRQs {
type Item = usize;
fn next(&mut self) -> Option<Self::Item> {
use core::intrinsics::cttz;
let next = cttz(self.bitmask);
if next == 64 {
return None;
}
self.bitmask &= !(1 << next);
Some(next as usize)
}
}
//--------------------------------------------------------------------------------------------------
// Public Code
//--------------------------------------------------------------------------------------------------
impl InterruptController {
const MAX_LOCAL_IRQ_NUMBER: usize = 11;
const MAX_PERIPHERAL_IRQ_NUMBER: usize = 63;
const NUM_PERIPHERAL_IRQS: usize = Self::MAX_PERIPHERAL_IRQ_NUMBER + 1;
/// Create an instance.
///
/// # Safety
///
/// - The user must ensure to provide correct MMIO descriptors.
pub const unsafe fn new(
_local_mmio_descriptor: memory::mmu::MMIODescriptor,
periph_mmio_descriptor: memory::mmu::MMIODescriptor,
) -> Self {
Self {
periph: peripheral_ic::PeripheralIC::new(periph_mmio_descriptor),
}
}
}
//------------------------------------------------------------------------------
// OS Interface Code
//------------------------------------------------------------------------------
impl driver::interface::DeviceDriver for InterruptController {
fn compatible(&self) -> &'static str {
"BCM Interrupt Controller"
}
unsafe fn init(&self) -> Result<(), &'static str> {
self.periph.init()
}
}
impl exception::asynchronous::interface::IRQManager for InterruptController {
type IRQNumberType = IRQNumber;
fn register_handler(
&self,
irq: Self::IRQNumberType,
descriptor: exception::asynchronous::IRQDescriptor,
) -> Result<(), &'static str> {
match irq {
IRQNumber::Local(_) => unimplemented!("Local IRQ controller not implemented."),
IRQNumber::Peripheral(pirq) => self.periph.register_handler(pirq, descriptor),
}
}
fn enable(&self, irq: Self::IRQNumberType) {
match irq {
IRQNumber::Local(_) => unimplemented!("Local IRQ controller not implemented."),
IRQNumber::Peripheral(pirq) => self.periph.enable(pirq),
}
}
fn handle_pending_irqs<'irq_context>(
&'irq_context self,
ic: &exception::asynchronous::IRQContext<'irq_context>,
) {
// It can only be a peripheral IRQ pending because enable() does not support local IRQs yet.
self.periph.handle_pending_irqs(ic)
}
fn print_handler(&self) {
self.periph.print_handler();
}
}

@ -0,0 +1,192 @@
// SPDX-License-Identifier: MIT OR Apache-2.0
//
// Copyright (c) 2020-2022 Andre Richter <andre.o.richter@gmail.com>
//! Peripheral Interrupt Controller Driver.
use super::{InterruptController, PendingIRQs, PeripheralIRQ};
use crate::{
bsp::device_driver::common::MMIODerefWrapper,
driver, exception, memory, synchronization,
synchronization::{IRQSafeNullLock, InitStateLock},
};
use tock_registers::{
interfaces::{Readable, Writeable},
register_structs,
registers::{ReadOnly, WriteOnly},
};
//--------------------------------------------------------------------------------------------------
// Private Definitions
//--------------------------------------------------------------------------------------------------
register_structs! {
#[allow(non_snake_case)]
WORegisterBlock {
(0x00 => _reserved1),
(0x10 => ENABLE_1: WriteOnly<u32>),
(0x14 => ENABLE_2: WriteOnly<u32>),
(0x24 => @END),
}
}
register_structs! {
#[allow(non_snake_case)]
RORegisterBlock {
(0x00 => _reserved1),
(0x04 => PENDING_1: ReadOnly<u32>),
(0x08 => PENDING_2: ReadOnly<u32>),
(0x0c => @END),
}
}
/// Abstraction for the WriteOnly parts of the associated MMIO registers.
type WriteOnlyRegisters = MMIODerefWrapper<WORegisterBlock>;
/// Abstraction for the ReadOnly parts of the associated MMIO registers.
type ReadOnlyRegisters = MMIODerefWrapper<RORegisterBlock>;
type HandlerTable =
[Option<exception::asynchronous::IRQDescriptor>; InterruptController::NUM_PERIPHERAL_IRQS];
//--------------------------------------------------------------------------------------------------
// Public Definitions
//--------------------------------------------------------------------------------------------------
/// Representation of the peripheral interrupt controller.
pub struct PeripheralIC {
mmio_descriptor: memory::mmu::MMIODescriptor,
/// Access to write registers is guarded with a lock.
wo_registers: IRQSafeNullLock<WriteOnlyRegisters>,
/// Register read access is unguarded.
ro_registers: InitStateLock<ReadOnlyRegisters>,
/// Stores registered IRQ handlers. Writable only during kernel init. RO afterwards.
handler_table: InitStateLock<HandlerTable>,
}
//--------------------------------------------------------------------------------------------------
// Public Code
//--------------------------------------------------------------------------------------------------
impl PeripheralIC {
/// Create an instance.
///
/// # Safety
///
/// - The user must ensure to provide correct MMIO descriptors.
pub const unsafe fn new(mmio_descriptor: memory::mmu::MMIODescriptor) -> Self {
let addr = mmio_descriptor.start_addr().as_usize();
Self {
mmio_descriptor,
wo_registers: IRQSafeNullLock::new(WriteOnlyRegisters::new(addr)),
ro_registers: InitStateLock::new(ReadOnlyRegisters::new(addr)),
handler_table: InitStateLock::new([None; InterruptController::NUM_PERIPHERAL_IRQS]),
}
}
/// Query the list of pending IRQs.
fn pending_irqs(&self) -> PendingIRQs {
self.ro_registers.read(|regs| {
let pending_mask: u64 =
(u64::from(regs.PENDING_2.get()) << 32) | u64::from(regs.PENDING_1.get());
PendingIRQs::new(pending_mask)
})
}
}
//------------------------------------------------------------------------------
// OS Interface Code
//------------------------------------------------------------------------------
use synchronization::interface::{Mutex, ReadWriteEx};
impl driver::interface::DeviceDriver for PeripheralIC {
fn compatible(&self) -> &'static str {
"BCM Peripheral Interrupt Controller"
}
unsafe fn init(&self) -> Result<(), &'static str> {
let virt_addr =
memory::mmu::kernel_map_mmio(self.compatible(), &self.mmio_descriptor)?.as_usize();
self.wo_registers
.lock(|regs| *regs = WriteOnlyRegisters::new(virt_addr));
self.ro_registers
.write(|regs| *regs = ReadOnlyRegisters::new(virt_addr));
Ok(())
}
}
impl exception::asynchronous::interface::IRQManager for PeripheralIC {
type IRQNumberType = PeripheralIRQ;
fn register_handler(
&self,
irq: Self::IRQNumberType,
descriptor: exception::asynchronous::IRQDescriptor,
) -> Result<(), &'static str> {
self.handler_table.write(|table| {
let irq_number = irq.get();
if table[irq_number].is_some() {
return Err("IRQ handler already registered");
}
table[irq_number] = Some(descriptor);
Ok(())
})
}
fn enable(&self, irq: Self::IRQNumberType) {
self.wo_registers.lock(|regs| {
let enable_reg = if irq.get() <= 31 {
&regs.ENABLE_1
} else {
&regs.ENABLE_2
};
let enable_bit: u32 = 1 << (irq.get() % 32);
// Writing a 1 to a bit will set the corresponding IRQ enable bit. All other IRQ enable
// bits are unaffected. So we don't need read and OR'ing here.
enable_reg.set(enable_bit);
});
}
fn handle_pending_irqs<'irq_context>(
&'irq_context self,
_ic: &exception::asynchronous::IRQContext<'irq_context>,
) {
self.handler_table.read(|table| {
for irq_number in self.pending_irqs() {
match table[irq_number] {
None => panic!("No handler registered for IRQ {}", irq_number),
Some(descriptor) => {
// Call the IRQ handler. Panics on failure.
descriptor.handler.handle().expect("Error handling IRQ");
}
}
}
})
}
fn print_handler(&self) {
use crate::info;
info!(" Peripheral handler:");
self.handler_table.read(|table| {
for (i, opt) in table.iter().enumerate() {
if let Some(handler) = opt {
info!(" {: >3}. {}", i, handler.name);
}
}
});
}
}

@ -0,0 +1,536 @@
// SPDX-License-Identifier: MIT OR Apache-2.0
//
// Copyright (c) 2018-2022 Andre Richter <andre.o.richter@gmail.com>
//! PL011 UART driver.
//!
//! # Resources
//!
//! - <https://github.com/raspberrypi/documentation/files/1888662/BCM2837-ARM-Peripherals.-.Revised.-.V2-1.pdf>
//! - <https://developer.arm.com/documentation/ddi0183/latest>
use crate::{
bsp, bsp::device_driver::common::MMIODerefWrapper, console, cpu, driver, exception, memory,
synchronization, synchronization::IRQSafeNullLock,
};
use core::{
fmt,
sync::atomic::{AtomicUsize, Ordering},
};
use tock_registers::{
interfaces::{Readable, Writeable},
register_bitfields, register_structs,
registers::{ReadOnly, ReadWrite, WriteOnly},
};
//--------------------------------------------------------------------------------------------------
// Private Definitions
//--------------------------------------------------------------------------------------------------
// PL011 UART registers.
//
// Descriptions taken from "PrimeCell UART (PL011) Technical Reference Manual" r1p5.
register_bitfields! {
u32,
/// Flag Register.
FR [
/// Transmit FIFO empty. The meaning of this bit depends on the state of the FEN bit in the
/// Line Control Register, LCR_H.
///
/// - If the FIFO is disabled, this bit is set when the transmit holding register is empty.
/// - If the FIFO is enabled, the TXFE bit is set when the transmit FIFO is empty.
/// - This bit does not indicate if there is data in the transmit shift register.
TXFE OFFSET(7) NUMBITS(1) [],
/// Transmit FIFO full. The meaning of this bit depends on the state of the FEN bit in the
/// LCR_H Register.
///
/// - If the FIFO is disabled, this bit is set when the transmit holding register is full.
/// - If the FIFO is enabled, the TXFF bit is set when the transmit FIFO is full.
TXFF OFFSET(5) NUMBITS(1) [],
/// Receive FIFO empty. The meaning of this bit depends on the state of the FEN bit in the
/// LCR_H Register.
///
/// - If the FIFO is disabled, this bit is set when the receive holding register is empty.
/// - If the FIFO is enabled, the RXFE bit is set when the receive FIFO is empty.
RXFE OFFSET(4) NUMBITS(1) [],
/// UART busy. If this bit is set to 1, the UART is busy transmitting data. This bit remains
/// set until the complete byte, including all the stop bits, has been sent from the shift
/// register.
///
/// This bit is set as soon as the transmit FIFO becomes non-empty, regardless of whether
/// the UART is enabled or not.
BUSY OFFSET(3) NUMBITS(1) []
],
/// Integer Baud Rate Divisor.
IBRD [
/// The integer baud rate divisor.
BAUD_DIVINT OFFSET(0) NUMBITS(16) []
],
/// Fractional Baud Rate Divisor.
FBRD [
/// The fractional baud rate divisor.
BAUD_DIVFRAC OFFSET(0) NUMBITS(6) []
],
/// Line Control Register.
LCR_H [
/// Word length. These bits indicate the number of data bits transmitted or received in a
/// frame.
#[allow(clippy::enum_variant_names)]
WLEN OFFSET(5) NUMBITS(2) [
FiveBit = 0b00,
SixBit = 0b01,
SevenBit = 0b10,
EightBit = 0b11
],
/// Enable FIFOs:
///
/// 0 = FIFOs are disabled (character mode) that is, the FIFOs become 1-byte-deep holding
/// registers.
///
/// 1 = Transmit and receive FIFO buffers are enabled (FIFO mode).
FEN OFFSET(4) NUMBITS(1) [
FifosDisabled = 0,
FifosEnabled = 1
]
],
/// Control Register.
CR [
/// Receive enable. If this bit is set to 1, the receive section of the UART is enabled.
/// Data reception occurs for either UART signals or SIR signals depending on the setting of
/// the SIREN bit. When the UART is disabled in the middle of reception, it completes the
/// current character before stopping.
RXE OFFSET(9) NUMBITS(1) [
Disabled = 0,
Enabled = 1
],
/// Transmit enable. If this bit is set to 1, the transmit section of the UART is enabled.
/// Data transmission occurs for either UART signals, or SIR signals depending on the
/// setting of the SIREN bit. When the UART is disabled in the middle of transmission, it
/// completes the current character before stopping.
TXE OFFSET(8) NUMBITS(1) [
Disabled = 0,
Enabled = 1
],
/// UART enable:
///
/// 0 = UART is disabled. If the UART is disabled in the middle of transmission or
/// reception, it completes the current character before stopping.
///
/// 1 = The UART is enabled. Data transmission and reception occurs for either UART signals
/// or SIR signals depending on the setting of the SIREN bit
UARTEN OFFSET(0) NUMBITS(1) [
/// If the UART is disabled in the middle of transmission or reception, it completes the
/// current character before stopping.
Disabled = 0,
Enabled = 1
]
],
/// Interrupt FIFO Level Select Register.
IFLS [
/// Receive interrupt FIFO level select. The trigger points for the receive interrupt are as
/// follows.
RXIFLSEL OFFSET(3) NUMBITS(5) [
OneEigth = 0b000,
OneQuarter = 0b001,
OneHalf = 0b010,
ThreeQuarters = 0b011,
SevenEights = 0b100
]
],
/// Interrupt Mask Set/Clear Register.
IMSC [
/// Receive timeout interrupt mask. A read returns the current mask for the UARTRTINTR
/// interrupt.
///
/// - On a write of 1, the mask of the UARTRTINTR interrupt is set.
/// - A write of 0 clears the mask.
RTIM OFFSET(6) NUMBITS(1) [
Disabled = 0,
Enabled = 1
],
/// Receive interrupt mask. A read returns the current mask for the UARTRXINTR interrupt.
///
/// - On a write of 1, the mask of the UARTRXINTR interrupt is set.
/// - A write of 0 clears the mask.
RXIM OFFSET(4) NUMBITS(1) [
Disabled = 0,
Enabled = 1
]
],
/// Masked Interrupt Status Register.
MIS [
/// Receive timeout masked interrupt status. Returns the masked interrupt state of the
/// UARTRTINTR interrupt.
RTMIS OFFSET(6) NUMBITS(1) [],
/// Receive masked interrupt status. Returns the masked interrupt state of the UARTRXINTR
/// interrupt.
RXMIS OFFSET(4) NUMBITS(1) []
],
/// Interrupt Clear Register.
ICR [
/// Meta field for all pending interrupts.
ALL OFFSET(0) NUMBITS(11) []
]
}
register_structs! {
#[allow(non_snake_case)]
pub RegisterBlock {
(0x00 => DR: ReadWrite<u32>),
(0x04 => _reserved1),
(0x18 => FR: ReadOnly<u32, FR::Register>),
(0x1c => _reserved2),
(0x24 => IBRD: WriteOnly<u32, IBRD::Register>),
(0x28 => FBRD: WriteOnly<u32, FBRD::Register>),
(0x2c => LCR_H: WriteOnly<u32, LCR_H::Register>),
(0x30 => CR: WriteOnly<u32, CR::Register>),
(0x34 => IFLS: ReadWrite<u32, IFLS::Register>),
(0x38 => IMSC: ReadWrite<u32, IMSC::Register>),
(0x3C => _reserved3),
(0x40 => MIS: ReadOnly<u32, MIS::Register>),
(0x44 => ICR: WriteOnly<u32, ICR::Register>),
(0x48 => @END),
}
}
/// Abstraction for the associated MMIO registers.
type Registers = MMIODerefWrapper<RegisterBlock>;
#[derive(PartialEq)]
enum BlockingMode {
Blocking,
NonBlocking,
}
//--------------------------------------------------------------------------------------------------
// Public Definitions
//--------------------------------------------------------------------------------------------------
pub struct PL011UartInner {
registers: Registers,
chars_written: usize,
chars_read: usize,
}
// Export the inner struct so that BSPs can use it for the panic handler.
pub use PL011UartInner as PanicUart;
/// Representation of the UART.
pub struct PL011Uart {
mmio_descriptor: memory::mmu::MMIODescriptor,
virt_mmio_start_addr: AtomicUsize,
inner: IRQSafeNullLock<PL011UartInner>,
irq_number: bsp::device_driver::IRQNumber,
}
//--------------------------------------------------------------------------------------------------
// Public Code
//--------------------------------------------------------------------------------------------------
impl PL011UartInner {
/// Create an instance.
///
/// # Safety
///
/// - The user must ensure to provide a correct MMIO start address.
pub const unsafe fn new(mmio_start_addr: usize) -> Self {
Self {
registers: Registers::new(mmio_start_addr),
chars_written: 0,
chars_read: 0,
}
}
/// Set up baud rate and characteristics.
///
/// This results in 8N1 and 921_600 baud.
///
/// The calculation for the BRD is (we set the clock to 48 MHz in config.txt):
/// `(48_000_000 / 16) / 921_600 = 3.2552083`.
///
/// This means the integer part is `3` and goes into the `IBRD`.
/// The fractional part is `0.2552083`.
///
/// `FBRD` calculation according to the PL011 Technical Reference Manual:
/// `INTEGER((0.2552083 * 64) + 0.5) = 16`.
///
/// Therefore, the generated baud rate divider is: `3 + 16/64 = 3.25`. Which results in a
/// genrated baud rate of `48_000_000 / (16 * 3.25) = 923_077`.
///
/// Error = `((923_077 - 921_600) / 921_600) * 100 = 0.16%`.
///
/// # Safety
///
/// - The user must ensure to provide a correct MMIO start address.
pub unsafe fn init(&mut self, new_mmio_start_addr: Option<usize>) -> Result<(), &'static str> {
if let Some(addr) = new_mmio_start_addr {
self.registers = Registers::new(addr);
}
// Execution can arrive here while there are still characters queued in the TX FIFO and
// actively being sent out by the UART hardware. If the UART is turned off in this case,
// those queued characters would be lost.
//
// For example, this can happen during runtime on a call to panic!(), because panic!()
// initializes its own UART instance and calls init().
//
// Hence, flush first to ensure all pending characters are transmitted.
self.flush();
// Turn the UART off temporarily.
self.registers.CR.set(0);
// Clear all pending interrupts.
self.registers.ICR.write(ICR::ALL::CLEAR);
// From the PL011 Technical Reference Manual:
//
// The LCR_H, IBRD, and FBRD registers form the single 30-bit wide LCR Register that is
// updated on a single write strobe generated by a LCR_H write. So, to internally update the
// contents of IBRD or FBRD, a LCR_H write must always be performed at the end.
//
// Set the baud rate, 8N1 and FIFO enabled.
self.registers.IBRD.write(IBRD::BAUD_DIVINT.val(3));
self.registers.FBRD.write(FBRD::BAUD_DIVFRAC.val(16));
self.registers
.LCR_H
.write(LCR_H::WLEN::EightBit + LCR_H::FEN::FifosEnabled);
// Set RX FIFO fill level at 1/8.
self.registers.IFLS.write(IFLS::RXIFLSEL::OneEigth);
// Enable RX IRQ + RX timeout IRQ.
self.registers
.IMSC
.write(IMSC::RXIM::Enabled + IMSC::RTIM::Enabled);
// Turn the UART on.
self.registers
.CR
.write(CR::UARTEN::Enabled + CR::TXE::Enabled + CR::RXE::Enabled);
Ok(())
}
/// Send a character.
fn write_char(&mut self, c: char) {
// Spin while TX FIFO full is set, waiting for an empty slot.
while self.registers.FR.matches_all(FR::TXFF::SET) {
cpu::nop();
}
// Write the character to the buffer.
self.registers.DR.set(c as u32);
self.chars_written += 1;
}
/// Block execution until the last buffered character has been physically put on the TX wire.
fn flush(&self) {
// Spin until the busy bit is cleared.
while self.registers.FR.matches_all(FR::BUSY::SET) {
cpu::nop();
}
}
/// Retrieve a character.
fn read_char_converting(&mut self, blocking_mode: BlockingMode) -> Option<char> {
// If RX FIFO is empty,
if self.registers.FR.matches_all(FR::RXFE::SET) {
// immediately return in non-blocking mode.
if blocking_mode == BlockingMode::NonBlocking {
return None;
}
// Otherwise, wait until a char was received.
while self.registers.FR.matches_all(FR::RXFE::SET) {
cpu::nop();
}
}
// Read one character.
let mut ret = self.registers.DR.get() as u8 as char;
// Convert carrige return to newline.
if ret == '\r' {
ret = '\n'
}
// Update statistics.
self.chars_read += 1;
Some(ret)
}
}
/// Implementing `core::fmt::Write` enables usage of the `format_args!` macros, which in turn are
/// used to implement the `kernel`'s `print!` and `println!` macros. By implementing `write_str()`,
/// we get `write_fmt()` automatically.
///
/// The function takes an `&mut self`, so it must be implemented for the inner struct.
///
/// See [`src/print.rs`].
///
/// [`src/print.rs`]: ../../print/index.html
impl fmt::Write for PL011UartInner {
fn write_str(&mut self, s: &str) -> fmt::Result {
for c in s.chars() {
self.write_char(c);
}
Ok(())
}
}
impl PL011Uart {
/// Create an instance.
///
/// # Safety
///
/// - The user must ensure to provide correct MMIO descriptors.
/// - The user must ensure to provide correct IRQ numbers.
pub const unsafe fn new(
mmio_descriptor: memory::mmu::MMIODescriptor,
irq_number: bsp::device_driver::IRQNumber,
) -> Self {
Self {
mmio_descriptor,
virt_mmio_start_addr: AtomicUsize::new(0),
inner: IRQSafeNullLock::new(PL011UartInner::new(
mmio_descriptor.start_addr().as_usize(),
)),
irq_number,
}
}
}
//------------------------------------------------------------------------------
// OS Interface Code
//------------------------------------------------------------------------------
use synchronization::interface::Mutex;
impl driver::interface::DeviceDriver for PL011Uart {
fn compatible(&self) -> &'static str {
"BCM PL011 UART"
}
unsafe fn init(&self) -> Result<(), &'static str> {
let virt_addr = memory::mmu::kernel_map_mmio(self.compatible(), &self.mmio_descriptor)?;
self.inner
.lock(|inner| inner.init(Some(virt_addr.as_usize())))?;
self.virt_mmio_start_addr
.store(virt_addr.as_usize(), Ordering::Relaxed);
Ok(())
}
fn register_and_enable_irq_handler(&'static self) -> Result<(), &'static str> {
use bsp::exception::asynchronous::irq_manager;
use exception::asynchronous::{interface::IRQManager, IRQDescriptor};
let descriptor = IRQDescriptor {
name: "BCM PL011 UART",
handler: self,
};
irq_manager().register_handler(self.irq_number, descriptor)?;
irq_manager().enable(self.irq_number);
Ok(())
}
fn virt_mmio_start_addr(&self) -> Option<usize> {
let addr = self.virt_mmio_start_addr.load(Ordering::Relaxed);
if addr == 0 {
return None;
}
Some(addr)
}
}
impl console::interface::Write for PL011Uart {
/// Passthrough of `args` to the `core::fmt::Write` implementation, but guarded by a Mutex to
/// serialize access.
fn write_char(&self, c: char) {
self.inner.lock(|inner| inner.write_char(c));
}
fn write_fmt(&self, args: core::fmt::Arguments) -> fmt::Result {
// Fully qualified syntax for the call to `core::fmt::Write::write:fmt()` to increase
// readability.
self.inner.lock(|inner| fmt::Write::write_fmt(inner, args))
}
fn flush(&self) {
// Spin until TX FIFO empty is set.
self.inner.lock(|inner| inner.flush());
}
}
impl console::interface::Read for PL011Uart {
fn read_char(&self) -> char {
self.inner
.lock(|inner| inner.read_char_converting(BlockingMode::Blocking).unwrap())
}
fn clear_rx(&self) {
// Read from the RX FIFO until it is indicating empty.
while self
.inner
.lock(|inner| inner.read_char_converting(BlockingMode::NonBlocking))
.is_some()
{}
}
}
impl console::interface::Statistics for PL011Uart {
fn chars_written(&self) -> usize {
self.inner.lock(|inner| inner.chars_written)
}
fn chars_read(&self) -> usize {
self.inner.lock(|inner| inner.chars_read)
}
}
impl exception::asynchronous::interface::IRQHandler for PL011Uart {
fn handle(&self) -> Result<(), &'static str> {
self.inner.lock(|inner| {
let pending = inner.registers.MIS.extract();
// Clear all pending IRQs.
inner.registers.ICR.write(ICR::ALL::CLEAR);
// Check for any kind of RX interrupt.
if pending.matches_any(MIS::RXMIS::SET + MIS::RTMIS::SET) {
// Echo any received characters.
while let Some(c) = inner.read_char_converting(BlockingMode::NonBlocking) {
inner.write_char(c)
}
}
});
Ok(())
}
}

@ -0,0 +1,38 @@
// SPDX-License-Identifier: MIT OR Apache-2.0
//
// Copyright (c) 2020-2022 Andre Richter <andre.o.richter@gmail.com>
//! Common device driver code.
use core::{marker::PhantomData, ops};
//--------------------------------------------------------------------------------------------------
// Public Definitions
//--------------------------------------------------------------------------------------------------
pub struct MMIODerefWrapper<T> {
start_addr: usize,
phantom: PhantomData<fn() -> T>,
}
//--------------------------------------------------------------------------------------------------
// Public Code
//--------------------------------------------------------------------------------------------------
impl<T> MMIODerefWrapper<T> {
/// Create an instance.
pub const unsafe fn new(start_addr: usize) -> Self {
Self {
start_addr,
phantom: PhantomData,
}
}
}
impl<T> ops::Deref for MMIODerefWrapper<T> {
type Target = T;
fn deref(&self) -> &Self::Target {
unsafe { &*(self.start_addr as *const _) }
}
}

@ -0,0 +1,62 @@
// SPDX-License-Identifier: MIT OR Apache-2.0
//
// Copyright (c) 2018-2022 Andre Richter <andre.o.richter@gmail.com>
//! Top-level BSP file for the Raspberry Pi 3 and 4.
pub mod console;
pub mod cpu;
pub mod driver;
pub mod exception;
pub mod memory;
use super::device_driver;
use crate::memory::mmu::MMIODescriptor;
use memory::map::mmio;
//--------------------------------------------------------------------------------------------------
// Global instances
//--------------------------------------------------------------------------------------------------
static GPIO: device_driver::GPIO =
unsafe { device_driver::GPIO::new(MMIODescriptor::new(mmio::GPIO_START, mmio::GPIO_SIZE)) };
static PL011_UART: device_driver::PL011Uart = unsafe {
device_driver::PL011Uart::new(
MMIODescriptor::new(mmio::PL011_UART_START, mmio::PL011_UART_SIZE),
exception::asynchronous::irq_map::PL011_UART,
)
};
#[cfg(feature = "bsp_rpi3")]
static INTERRUPT_CONTROLLER: device_driver::InterruptController = unsafe {
device_driver::InterruptController::new(
MMIODescriptor::new(mmio::LOCAL_IC_START, mmio::LOCAL_IC_SIZE),
MMIODescriptor::new(mmio::PERIPHERAL_IC_START, mmio::PERIPHERAL_IC_SIZE),
)
};
#[cfg(feature = "bsp_rpi4")]
static INTERRUPT_CONTROLLER: device_driver::GICv2 = unsafe {
device_driver::GICv2::new(
MMIODescriptor::new(mmio::GICD_START, mmio::GICD_SIZE),
MMIODescriptor::new(mmio::GICC_START, mmio::GICC_SIZE),
)
};
//--------------------------------------------------------------------------------------------------
// Public Code
//--------------------------------------------------------------------------------------------------
/// Board identification.
pub fn board_name() -> &'static str {
#[cfg(feature = "bsp_rpi3")]
{
"Raspberry Pi 3"
}
#[cfg(feature = "bsp_rpi4")]
{
"Raspberry Pi 4"
}
}

@ -0,0 +1,98 @@
// SPDX-License-Identifier: MIT OR Apache-2.0
//
// Copyright (c) 2018-2022 Andre Richter <andre.o.richter@gmail.com>
//! BSP console facilities.
use crate::{bsp::device_driver, console, cpu, driver};
use core::fmt;
//--------------------------------------------------------------------------------------------------
// Public Code
//--------------------------------------------------------------------------------------------------
/// In case of a panic, the panic handler uses this function to take a last shot at printing
/// something before the system is halted.
///
/// We try to init panic-versions of the GPIO and the UART. The panic versions are not protected
/// with synchronization primitives, which increases chances that we get to print something, even
/// when the kernel's default GPIO or UART instances happen to be locked at the time of the panic.
///
/// # Safety
///
/// - Use only for printing during a panic.
#[cfg(not(feature = "test_build"))]
pub unsafe fn panic_console_out() -> impl fmt::Write {
use driver::interface::DeviceDriver;
// If remapping of the driver's MMIO hasn't already happened, we won't be able to print. Just
// park the CPU core in this case.
let gpio_mmio_start_addr = match super::GPIO.virt_mmio_start_addr() {
None => cpu::wait_forever(),
Some(x) => x,
};
let uart_mmio_start_addr = match super::PL011_UART.virt_mmio_start_addr() {
None => cpu::wait_forever(),
Some(x) => x,
};
let mut panic_gpio = device_driver::PanicGPIO::new(gpio_mmio_start_addr);
let mut panic_uart = device_driver::PanicUart::new(uart_mmio_start_addr);
panic_gpio
.init(None)
.unwrap_or_else(|_| cpu::wait_forever());
panic_gpio.map_pl011_uart();
panic_uart
.init(None)
.unwrap_or_else(|_| cpu::wait_forever());
panic_uart
}
/// Reduced version for test builds.
///
/// # Safety
///
/// - Use only for printing during a panic.
#[cfg(feature = "test_build")]
pub unsafe fn panic_console_out() -> impl fmt::Write {
use driver::interface::DeviceDriver;
let uart_mmio_start_addr = match super::PL011_UART.virt_mmio_start_addr() {
None => cpu::wait_forever(),
Some(x) => x,
};
let mut panic_uart = device_driver::PanicUart::new(uart_mmio_start_addr);
panic_uart
.init(None)
.unwrap_or_else(|_| cpu::qemu_exit_failure());
panic_uart
}
/// Return a reference to the console.
pub fn console() -> &'static impl console::interface::All {
&super::PL011_UART
}
//--------------------------------------------------------------------------------------------------
// Testing
//--------------------------------------------------------------------------------------------------
/// Minimal code needed to bring up the console in QEMU (for testing only). This is often less steps
/// than on real hardware due to QEMU's abstractions.
#[cfg(feature = "test_build")]
pub fn qemu_bring_up_console() {
use driver::interface::DeviceDriver;
// Calling the UART's init ensures that the BSP's instance of the UART does remap the MMIO
// addresses.
unsafe {
super::PL011_UART
.init()
.unwrap_or_else(|_| cpu::qemu_exit_failure());
}
}

@ -0,0 +1,14 @@
// SPDX-License-Identifier: MIT OR Apache-2.0
//
// Copyright (c) 2018-2022 Andre Richter <andre.o.richter@gmail.com>
//! BSP Processor code.
//--------------------------------------------------------------------------------------------------
// Public Definitions
//--------------------------------------------------------------------------------------------------
/// Used by `arch` code to find the early boot core.
#[no_mangle]
#[link_section = ".text._start_arguments"]
pub static BOOT_CORE_ID: u64 = 0;

@ -0,0 +1,61 @@
// SPDX-License-Identifier: MIT OR Apache-2.0
//
// Copyright (c) 2018-2022 Andre Richter <andre.o.richter@gmail.com>
//! BSP driver support.
use crate::driver;
//--------------------------------------------------------------------------------------------------
// Private Definitions
//--------------------------------------------------------------------------------------------------
/// Device Driver Manager type.
struct BSPDriverManager {
device_drivers: [&'static (dyn DeviceDriver + Sync); 3],
}
//--------------------------------------------------------------------------------------------------
// Global instances
//--------------------------------------------------------------------------------------------------
static BSP_DRIVER_MANAGER: BSPDriverManager = BSPDriverManager {
device_drivers: [
&super::GPIO,
&super::PL011_UART,
&super::INTERRUPT_CONTROLLER,
],
};
//--------------------------------------------------------------------------------------------------
// Public Code
//--------------------------------------------------------------------------------------------------
/// Return a reference to the driver manager.
pub fn driver_manager() -> &'static impl driver::interface::DriverManager {
&BSP_DRIVER_MANAGER
}
//------------------------------------------------------------------------------
// OS Interface Code
//------------------------------------------------------------------------------
use driver::interface::DeviceDriver;
impl driver::interface::DriverManager for BSPDriverManager {
fn all_device_drivers(&self) -> &[&'static (dyn DeviceDriver + Sync)] {
&self.device_drivers[..]
}
fn early_print_device_drivers(&self) -> &[&'static (dyn DeviceDriver + Sync)] {
&self.device_drivers[0..=1]
}
fn non_early_print_device_drivers(&self) -> &[&'static (dyn DeviceDriver + Sync)] {
&self.device_drivers[2..]
}
fn post_early_print_device_driver_init(&self) {
// Configure PL011Uart's output pins.
super::GPIO.map_pl011_uart();
}
}

@ -0,0 +1,7 @@
// SPDX-License-Identifier: MIT OR Apache-2.0
//
// Copyright (c) 2020-2022 Andre Richter <andre.o.richter@gmail.com>
//! BSP synchronous and asynchronous exception handling.
pub mod asynchronous;

@ -0,0 +1,36 @@
// SPDX-License-Identifier: MIT OR Apache-2.0
//
// Copyright (c) 2020-2022 Andre Richter <andre.o.richter@gmail.com>
//! BSP asynchronous exception handling.
use crate::{bsp, exception};
//--------------------------------------------------------------------------------------------------
// Public Definitions
//--------------------------------------------------------------------------------------------------
#[cfg(feature = "bsp_rpi3")]
pub(in crate::bsp) mod irq_map {
use super::bsp::device_driver::{IRQNumber, PeripheralIRQ};
pub const PL011_UART: IRQNumber = IRQNumber::Peripheral(PeripheralIRQ::new(57));
}
#[cfg(feature = "bsp_rpi4")]
pub(in crate::bsp) mod irq_map {
use super::bsp::device_driver::IRQNumber;
pub const PL011_UART: IRQNumber = IRQNumber::new(153);
}
//--------------------------------------------------------------------------------------------------
// Public Code
//--------------------------------------------------------------------------------------------------
/// Return a reference to the IRQ manager.
pub fn irq_manager() -> &'static impl exception::asynchronous::interface::IRQManager<
IRQNumberType = bsp::device_driver::IRQNumber,
> {
&super::super::INTERRUPT_CONTROLLER
}

@ -0,0 +1,114 @@
/* SPDX-License-Identifier: MIT OR Apache-2.0
*
* Copyright (c) 2018-2022 Andre Richter <andre.o.richter@gmail.com>
*/
INCLUDE kernel_virt_addr_space_size.ld;
PAGE_SIZE = 64K;
PAGE_MASK = PAGE_SIZE - 1;
/* The kernel's virtual address range will be:
*
* [END_ADDRESS_INCLUSIVE, START_ADDRESS]
* [u64::MAX , (u64::MAX - __kernel_virt_addr_space_size) + 1]
*/
__kernel_virt_start_addr = ((0xffffffffffffffff - __kernel_virt_addr_space_size) + 1);
__rpi_phys_dram_start_addr = 0;
/* The physical address at which the the kernel binary will be loaded by the Raspberry's firmware */
__rpi_phys_binary_load_addr = 0x80000;
ENTRY(__rpi_phys_binary_load_addr)
/* Flags:
* 4 == R
* 5 == RX
* 6 == RW
*
* Segments are marked PT_LOAD below so that the ELF file provides virtual and physical addresses.
* It doesn't mean all of them need actually be loaded.
*/
PHDRS
{
segment_code PT_LOAD FLAGS(5);
segment_data PT_LOAD FLAGS(6);
segment_boot_core_stack PT_LOAD FLAGS(6);
}
SECTIONS
{
. = __kernel_virt_start_addr;
ASSERT((. & PAGE_MASK) == 0, "Start of address space is not page aligned")
/***********************************************************************************************
* Code + RO Data + Global Offset Table
***********************************************************************************************/
__code_start = .;
.text : AT(__rpi_phys_binary_load_addr)
{
KEEP(*(.text._start))
*(.text._start_arguments) /* Constants (or statics in Rust speak) read by _start(). */
*(.text._start_rust) /* The Rust entry point */
*(.text*) /* Everything else */
} :segment_code
.rodata : ALIGN(8) { *(.rodata*) } :segment_code
.got : ALIGN(8) { *(.got) } :segment_code
.kernel_symbols : ALIGN(8) {
__kernel_symbols_start = .;
. += 32 * 1024;
} :segment_code
. = ALIGN(PAGE_SIZE);
__code_end_exclusive = .;
/***********************************************************************************************
* Data + BSS
***********************************************************************************************/
__data_start = .;
.data : { *(.data*) } :segment_data
/* Section is zeroed in pairs of u64. Align start and end to 16 bytes */
.bss (NOLOAD) : ALIGN(16)
{
__bss_start = .;
*(.bss*);
. = ALIGN(16);
__bss_end_exclusive = .;
} :segment_data
. = ALIGN(PAGE_SIZE);
__data_end_exclusive = .;
/***********************************************************************************************
* MMIO Remap Reserved
***********************************************************************************************/
__mmio_remap_start = .;
. += 8 * 1024 * 1024;
__mmio_remap_end_exclusive = .;
ASSERT((. & PAGE_MASK) == 0, "MMIO remap reservation is not page aligned")
/***********************************************************************************************
* Guard Page
***********************************************************************************************/
. += PAGE_SIZE;
/***********************************************************************************************
* Boot Core Stack
***********************************************************************************************/
.boot_core_stack (NOLOAD) : AT(__rpi_phys_dram_start_addr)
{
__boot_core_stack_start = .; /* ^ */
/* | stack */
. += __rpi_phys_binary_load_addr; /* | growth */
/* | direction */
__boot_core_stack_end_exclusive = .; /* | */
} :segment_boot_core_stack
ASSERT((. & PAGE_MASK) == 0, "End of boot core stack is not page aligned")
}

@ -0,0 +1 @@
__kernel_virt_addr_space_size = 1024 * 1024 * 1024

@ -0,0 +1,227 @@
// SPDX-License-Identifier: MIT OR Apache-2.0
//
// Copyright (c) 2018-2022 Andre Richter <andre.o.richter@gmail.com>
//! BSP Memory Management.
//!
//! The physical memory layout.
//!
//! The Raspberry's firmware copies the kernel binary to 0x8_0000. The preceding region will be used
//! as the boot core's stack.
//!
//! +---------------------------------------+
//! | | boot_core_stack_start @ 0x0
//! | | ^
//! | Boot-core Stack | | stack
//! | | | growth
//! | | | direction
//! +---------------------------------------+
//! | | code_start @ 0x8_0000 == boot_core_stack_end_exclusive
//! | .text |
//! | .rodata |
//! | .got |
//! | .kernel_symbols |
//! | |
//! +---------------------------------------+
//! | | data_start == code_end_exclusive
//! | .data |
//! | .bss |
//! | |
//! +---------------------------------------+
//! | | data_end_exclusive
//! | |
//!
//!
//!
//!
//!
//! The virtual memory layout is as follows:
//!
//! +---------------------------------------+
//! | | code_start @ __kernel_virt_start_addr
//! | .text |
//! | .rodata |
//! | .got |
//! | .kernel_symbols |
//! | |
//! +---------------------------------------+
//! | | data_start == code_end_exclusive
//! | .data |
//! | .bss |
//! | |
//! +---------------------------------------+
//! | | mmio_remap_start == data_end_exclusive
//! | VA region for MMIO remapping |
//! | |
//! +---------------------------------------+
//! | | mmio_remap_end_exclusive
//! | Unmapped guard page |
//! | |
//! +---------------------------------------+
//! | | boot_core_stack_start
//! | | ^
//! | Boot-core Stack | | stack
//! | | | growth
//! | | | direction
//! +---------------------------------------+
//! | | boot_core_stack_end_exclusive
//! | |
pub mod mmu;
use crate::memory::{mmu::PageAddress, Address, Physical, Virtual};
use core::cell::UnsafeCell;
//--------------------------------------------------------------------------------------------------
// Private Definitions
//--------------------------------------------------------------------------------------------------
// Symbols from the linker script.
extern "Rust" {
static __code_start: UnsafeCell<()>;
static __code_end_exclusive: UnsafeCell<()>;
static __data_start: UnsafeCell<()>;
static __data_end_exclusive: UnsafeCell<()>;
static __mmio_remap_start: UnsafeCell<()>;
static __mmio_remap_end_exclusive: UnsafeCell<()>;
static __boot_core_stack_start: UnsafeCell<()>;
static __boot_core_stack_end_exclusive: UnsafeCell<()>;
}
//--------------------------------------------------------------------------------------------------
// Public Definitions
//--------------------------------------------------------------------------------------------------
/// The board's physical memory map.
#[rustfmt::skip]
pub(super) mod map {
use super::*;
/// Physical devices.
#[cfg(feature = "bsp_rpi3")]
pub mod mmio {
use super::*;
pub const PERIPHERAL_IC_START: Address<Physical> = Address::new(0x3F00_B200);
pub const PERIPHERAL_IC_SIZE: usize = 0x24;
pub const GPIO_START: Address<Physical> = Address::new(0x3F20_0000);
pub const GPIO_SIZE: usize = 0xA0;
pub const PL011_UART_START: Address<Physical> = Address::new(0x3F20_1000);
pub const PL011_UART_SIZE: usize = 0x48;
pub const LOCAL_IC_START: Address<Physical> = Address::new(0x4000_0000);
pub const LOCAL_IC_SIZE: usize = 0x100;
pub const END: Address<Physical> = Address::new(0x4001_0000);
}
/// Physical devices.
#[cfg(feature = "bsp_rpi4")]
pub mod mmio {
use super::*;
pub const GPIO_START: Address<Physical> = Address::new(0xFE20_0000);
pub const GPIO_SIZE: usize = 0xA0;
pub const PL011_UART_START: Address<Physical> = Address::new(0xFE20_1000);
pub const PL011_UART_SIZE: usize = 0x48;
pub const GICD_START: Address<Physical> = Address::new(0xFF84_1000);
pub const GICD_SIZE: usize = 0x824;
pub const GICC_START: Address<Physical> = Address::new(0xFF84_2000);
pub const GICC_SIZE: usize = 0x14;
pub const END: Address<Physical> = Address::new(0xFF85_0000);
}
pub const END: Address<Physical> = mmio::END;
}
//--------------------------------------------------------------------------------------------------
// Private Code
//--------------------------------------------------------------------------------------------------
/// Start page address of the code segment.
///
/// # Safety
///
/// - Value is provided by the linker script and must be trusted as-is.
#[inline(always)]
fn virt_code_start() -> PageAddress<Virtual> {
PageAddress::from(unsafe { __code_start.get() as usize })
}
/// Size of the code segment.
///
/// # Safety
///
/// - Value is provided by the linker script and must be trusted as-is.
#[inline(always)]
fn code_size() -> usize {
unsafe { (__code_end_exclusive.get() as usize) - (__code_start.get() as usize) }
}
/// Start page address of the data segment.
#[inline(always)]
fn virt_data_start() -> PageAddress<Virtual> {
PageAddress::from(unsafe { __data_start.get() as usize })
}
/// Size of the data segment.
///
/// # Safety
///
/// - Value is provided by the linker script and must be trusted as-is.
#[inline(always)]
fn data_size() -> usize {
unsafe { (__data_end_exclusive.get() as usize) - (__data_start.get() as usize) }
}
/// Start page address of the MMIO remap reservation.
///
/// # Safety
///
/// - Value is provided by the linker script and must be trusted as-is.
#[inline(always)]
fn virt_mmio_remap_start() -> PageAddress<Virtual> {
PageAddress::from(unsafe { __mmio_remap_start.get() as usize })
}
/// Size of the MMIO remap reservation.
///
/// # Safety
///
/// - Value is provided by the linker script and must be trusted as-is.
#[inline(always)]
fn mmio_remap_size() -> usize {
unsafe { (__mmio_remap_end_exclusive.get() as usize) - (__mmio_remap_start.get() as usize) }
}
/// Start page address of the boot core's stack.
#[inline(always)]
fn virt_boot_core_stack_start() -> PageAddress<Virtual> {
PageAddress::from(unsafe { __boot_core_stack_start.get() as usize })
}
/// Size of the boot core's stack.
#[inline(always)]
fn boot_core_stack_size() -> usize {
unsafe {
(__boot_core_stack_end_exclusive.get() as usize) - (__boot_core_stack_start.get() as usize)
}
}
//--------------------------------------------------------------------------------------------------
// Public Code
//--------------------------------------------------------------------------------------------------
/// Exclusive end address of the physical address space.
#[inline(always)]
pub fn phys_addr_space_end_exclusive_addr() -> PageAddress<Physical> {
PageAddress::from(map::END)
}

@ -0,0 +1,179 @@
// SPDX-License-Identifier: MIT OR Apache-2.0
//
// Copyright (c) 2018-2022 Andre Richter <andre.o.richter@gmail.com>
//! BSP Memory Management Unit.
use crate::{
memory::{
mmu::{
self as generic_mmu, AddressSpace, AssociatedTranslationTable, AttributeFields,
MemoryRegion, PageAddress, TranslationGranule,
},
Physical, Virtual,
},
synchronization::InitStateLock,
};
//--------------------------------------------------------------------------------------------------
// Private Definitions
//--------------------------------------------------------------------------------------------------
type KernelTranslationTable =
<KernelVirtAddrSpace as AssociatedTranslationTable>::TableStartFromTop;
//--------------------------------------------------------------------------------------------------
// Public Definitions
//--------------------------------------------------------------------------------------------------
/// The translation granule chosen by this BSP. This will be used everywhere else in the kernel to
/// derive respective data structures and their sizes. For example, the `crate::memory::mmu::Page`.
pub type KernelGranule = TranslationGranule<{ 64 * 1024 }>;
/// The kernel's virtual address space defined by this BSP.
pub type KernelVirtAddrSpace = AddressSpace<{ kernel_virt_addr_space_size() }>;
//--------------------------------------------------------------------------------------------------
// Global instances
//--------------------------------------------------------------------------------------------------
/// The kernel translation tables.
///
/// It is mandatory that InitStateLock is transparent.
///
/// That is, `size_of(InitStateLock<KernelTranslationTable>) == size_of(KernelTranslationTable)`.
/// There is a unit tests that checks this porperty.
#[link_section = ".data"]
#[no_mangle]
static KERNEL_TABLES: InitStateLock<KernelTranslationTable> =
InitStateLock::new(KernelTranslationTable::new_for_precompute());
/// This value is needed during early boot for MMU setup.
///
/// This will be patched to the correct value by the "translation table tool" after linking. This
/// given value here is just a dummy.
#[link_section = ".text._start_arguments"]
#[no_mangle]
static PHYS_KERNEL_TABLES_BASE_ADDR: u64 = 0xCCCCAAAAFFFFEEEE;
//--------------------------------------------------------------------------------------------------
// Private Code
//--------------------------------------------------------------------------------------------------
/// This is a hack for retrieving the value for the kernel's virtual address space size as a
/// constant from a common place, since it is needed as a compile-time/link-time constant in both,
/// the linker script and the Rust sources.
#[allow(clippy::needless_late_init)]
const fn kernel_virt_addr_space_size() -> usize {
let __kernel_virt_addr_space_size;
include!("../kernel_virt_addr_space_size.ld");
__kernel_virt_addr_space_size
}
/// Helper function for calculating the number of pages the given parameter spans.
const fn size_to_num_pages(size: usize) -> usize {
assert!(size > 0);
assert!(size % KernelGranule::SIZE == 0);
size >> KernelGranule::SHIFT
}
/// The code pages of the kernel binary.
fn virt_code_region() -> MemoryRegion<Virtual> {
let num_pages = size_to_num_pages(super::code_size());
let start_page_addr = super::virt_code_start();
let end_exclusive_page_addr = start_page_addr.checked_offset(num_pages as isize).unwrap();
MemoryRegion::new(start_page_addr, end_exclusive_page_addr)
}
/// The data pages of the kernel binary.
fn virt_data_region() -> MemoryRegion<Virtual> {
let num_pages = size_to_num_pages(super::data_size());
let start_page_addr = super::virt_data_start();
let end_exclusive_page_addr = start_page_addr.checked_offset(num_pages as isize).unwrap();
MemoryRegion::new(start_page_addr, end_exclusive_page_addr)
}
/// The boot core stack pages.
fn virt_boot_core_stack_region() -> MemoryRegion<Virtual> {
let num_pages = size_to_num_pages(super::boot_core_stack_size());
let start_page_addr = super::virt_boot_core_stack_start();
let end_exclusive_page_addr = start_page_addr.checked_offset(num_pages as isize).unwrap();
MemoryRegion::new(start_page_addr, end_exclusive_page_addr)
}
// There is no reason to expect the following conversions to fail, since they were generated offline
// by the `translation table tool`. If it doesn't work, a panic due to the unwraps is justified.
fn kernel_virt_to_phys_region(virt_region: MemoryRegion<Virtual>) -> MemoryRegion<Physical> {
let phys_start_page_addr =
generic_mmu::try_kernel_virt_page_addr_to_phys_page_addr(virt_region.start_page_addr())
.unwrap();
let phys_end_exclusive_page_addr = phys_start_page_addr
.checked_offset(virt_region.num_pages() as isize)
.unwrap();
MemoryRegion::new(phys_start_page_addr, phys_end_exclusive_page_addr)
}
fn kernel_page_attributes(virt_page_addr: PageAddress<Virtual>) -> AttributeFields {
generic_mmu::try_kernel_page_attributes(virt_page_addr).unwrap()
}
//--------------------------------------------------------------------------------------------------
// Public Code
//--------------------------------------------------------------------------------------------------
/// Return a reference to the kernel's translation tables.
pub fn kernel_translation_tables() -> &'static InitStateLock<KernelTranslationTable> {
&KERNEL_TABLES
}
/// The MMIO remap pages.
pub fn virt_mmio_remap_region() -> MemoryRegion<Virtual> {
let num_pages = size_to_num_pages(super::mmio_remap_size());
let start_page_addr = super::virt_mmio_remap_start();
let end_exclusive_page_addr = start_page_addr.checked_offset(num_pages as isize).unwrap();
MemoryRegion::new(start_page_addr, end_exclusive_page_addr)
}
/// Add mapping records for the kernel binary.
///
/// The actual translation table entries for the kernel binary are generated using the offline
/// `translation table tool` and patched into the kernel binary. This function just adds the mapping
/// record entries.
pub fn kernel_add_mapping_records_for_precomputed() {
let virt_code_region = virt_code_region();
generic_mmu::kernel_add_mapping_record(
"Kernel code and RO data",
&virt_code_region,
&kernel_virt_to_phys_region(virt_code_region),
&kernel_page_attributes(virt_code_region.start_page_addr()),
);
let virt_data_region = virt_data_region();
generic_mmu::kernel_add_mapping_record(
"Kernel data and bss",
&virt_data_region,
&kernel_virt_to_phys_region(virt_data_region),
&kernel_page_attributes(virt_data_region.start_page_addr()),
);
let virt_boot_core_stack_region = virt_boot_core_stack_region();
generic_mmu::kernel_add_mapping_record(
"Kernel boot-core stack",
&virt_boot_core_stack_region,
&kernel_virt_to_phys_region(virt_boot_core_stack_region),
&kernel_page_attributes(virt_boot_core_stack_region.start_page_addr()),
);
}

@ -0,0 +1,29 @@
// SPDX-License-Identifier: MIT OR Apache-2.0
//
// Copyright (c) 2020-2022 Andre Richter <andre.o.richter@gmail.com>
//! General purpose code.
/// Check if a value is aligned to a given size.
#[inline(always)]
pub const fn is_aligned(value: usize, alignment: usize) -> bool {
assert!(alignment.is_power_of_two());
(value & (alignment - 1)) == 0
}
/// Align down.
#[inline(always)]
pub const fn align_down(value: usize, alignment: usize) -> usize {
assert!(alignment.is_power_of_two());
value & !(alignment - 1)
}
/// Align up.
#[inline(always)]
pub const fn align_up(value: usize, alignment: usize) -> usize {
assert!(alignment.is_power_of_two());
(value + alignment - 1) & !(alignment - 1)
}

@ -0,0 +1,53 @@
// SPDX-License-Identifier: MIT OR Apache-2.0
//
// Copyright (c) 2018-2022 Andre Richter <andre.o.richter@gmail.com>
//! System console.
//--------------------------------------------------------------------------------------------------
// Public Definitions
//--------------------------------------------------------------------------------------------------
/// Console interfaces.
pub mod interface {
use core::fmt;
/// Console write functions.
pub trait Write {
/// Write a single character.
fn write_char(&self, c: char);
/// Write a Rust format string.
fn write_fmt(&self, args: fmt::Arguments) -> fmt::Result;
/// Block until the last buffered character has been physically put on the TX wire.
fn flush(&self);
}
/// Console read functions.
pub trait Read {
/// Read a single character.
fn read_char(&self) -> char {
' '
}
/// Clear RX buffers, if any.
fn clear_rx(&self);
}
/// Console statistics.
pub trait Statistics {
/// Return the number of characters written.
fn chars_written(&self) -> usize {
0
}
/// Return the number of characters read.
fn chars_read(&self) -> usize {
0
}
}
/// Trait alias for a full-fledged console.
pub trait All = Write + Read + Statistics;
}

@ -0,0 +1,21 @@
// SPDX-License-Identifier: MIT OR Apache-2.0
//
// Copyright (c) 2020-2022 Andre Richter <andre.o.richter@gmail.com>
//! Processor code.
#[cfg(target_arch = "aarch64")]
#[path = "_arch/aarch64/cpu.rs"]
mod arch_cpu;
mod boot;
pub mod smp;
//--------------------------------------------------------------------------------------------------
// Architectural Public Reexports
//--------------------------------------------------------------------------------------------------
pub use arch_cpu::{nop, wait_forever};
#[cfg(feature = "test_build")]
pub use arch_cpu::{qemu_exit_failure, qemu_exit_success};

@ -0,0 +1,9 @@
// SPDX-License-Identifier: MIT OR Apache-2.0
//
// Copyright (c) 2021-2022 Andre Richter <andre.o.richter@gmail.com>
//! Boot code.
#[cfg(target_arch = "aarch64")]
#[path = "../_arch/aarch64/cpu/boot.rs"]
mod arch_boot;

@ -0,0 +1,14 @@
// SPDX-License-Identifier: MIT OR Apache-2.0
//
// Copyright (c) 2018-2022 Andre Richter <andre.o.richter@gmail.com>
//! Symmetric multiprocessing.
#[cfg(target_arch = "aarch64")]
#[path = "../_arch/aarch64/cpu/smp.rs"]
mod arch_smp;
//--------------------------------------------------------------------------------------------------
// Architectural Public Reexports
//--------------------------------------------------------------------------------------------------
pub use arch_smp::core_id;

@ -0,0 +1,62 @@
// SPDX-License-Identifier: MIT OR Apache-2.0
//
// Copyright (c) 2018-2022 Andre Richter <andre.o.richter@gmail.com>
//! Driver support.
//--------------------------------------------------------------------------------------------------
// Public Definitions
//--------------------------------------------------------------------------------------------------
/// Driver interfaces.
pub mod interface {
/// Device Driver functions.
pub trait DeviceDriver {
/// Return a compatibility string for identifying the driver.
fn compatible(&self) -> &'static str;
/// Called by the kernel to bring up the device.
///
/// # Safety
///
/// - During init, drivers might do stuff with system-wide impact.
unsafe fn init(&self) -> Result<(), &'static str> {
Ok(())
}
/// Called by the kernel to register and enable the device's IRQ handlers, if any.
///
/// Rust's type system will prevent a call to this function unless the calling instance
/// itself has static lifetime.
fn register_and_enable_irq_handler(&'static self) -> Result<(), &'static str> {
Ok(())
}
/// After MMIO remapping, returns the new virtual start address.
///
/// This API assumes a driver has only a single, contiguous MMIO aperture, which will not be
/// the case for more complex devices. This API will likely change in future tutorials.
fn virt_mmio_start_addr(&self) -> Option<usize> {
None
}
}
/// Device driver management functions.
///
/// The `BSP` is supposed to supply one global instance.
pub trait DriverManager {
/// Return a slice of references to all `BSP`-instantiated drivers.
fn all_device_drivers(&self) -> &[&'static (dyn DeviceDriver + Sync)];
/// Return only those drivers needed for the BSP's early printing functionality.
///
/// For example, the default UART.
fn early_print_device_drivers(&self) -> &[&'static (dyn DeviceDriver + Sync)];
/// Return all drivers minus early-print drivers.
fn non_early_print_device_drivers(&self) -> &[&'static (dyn DeviceDriver + Sync)];
/// Initialization code that runs after the early print driver init.
fn post_early_print_device_driver_init(&self);
}
}

@ -0,0 +1,48 @@
// SPDX-License-Identifier: MIT OR Apache-2.0
//
// Copyright (c) 2020-2022 Andre Richter <andre.o.richter@gmail.com>
//! Synchronous and asynchronous exception handling.
#[cfg(target_arch = "aarch64")]
#[path = "_arch/aarch64/exception.rs"]
mod arch_exception;
pub mod asynchronous;
//--------------------------------------------------------------------------------------------------
// Architectural Public Reexports
//--------------------------------------------------------------------------------------------------
pub use arch_exception::{current_privilege_level, handling_init};
//--------------------------------------------------------------------------------------------------
// Public Definitions
//--------------------------------------------------------------------------------------------------
/// Kernel privilege levels.
#[allow(missing_docs)]
#[derive(PartialEq)]
pub enum PrivilegeLevel {
User,
Kernel,
Hypervisor,
Unknown,
}
//--------------------------------------------------------------------------------------------------
// Testing
//--------------------------------------------------------------------------------------------------
#[cfg(test)]
mod tests {
use super::*;
use test_macros::kernel_test;
/// Libkernel unit tests must execute in kernel mode.
#[kernel_test]
fn test_runner_executes_in_kernel_mode() {
let (level, _) = current_privilege_level();
assert!(level == PrivilegeLevel::Kernel)
}
}

@ -0,0 +1,152 @@
// SPDX-License-Identifier: MIT OR Apache-2.0
//
// Copyright (c) 2020-2022 Andre Richter <andre.o.richter@gmail.com>
//! Asynchronous exception handling.
#[cfg(target_arch = "aarch64")]
#[path = "../_arch/aarch64/exception/asynchronous.rs"]
mod arch_asynchronous;
use core::{fmt, marker::PhantomData};
//--------------------------------------------------------------------------------------------------
// Architectural Public Reexports
//--------------------------------------------------------------------------------------------------
pub use arch_asynchronous::{
is_local_irq_masked, local_irq_mask, local_irq_mask_save, local_irq_restore, local_irq_unmask,
print_state,
};
//--------------------------------------------------------------------------------------------------
// Public Definitions
//--------------------------------------------------------------------------------------------------
/// Interrupt descriptor.
#[derive(Copy, Clone)]
pub struct IRQDescriptor {
/// Descriptive name.
pub name: &'static str,
/// Reference to handler trait object.
pub handler: &'static (dyn interface::IRQHandler + Sync),
}
/// IRQContext token.
///
/// An instance of this type indicates that the local core is currently executing in IRQ
/// context, aka executing an interrupt vector or subcalls of it.
///
/// Concept and implementation derived from the `CriticalSection` introduced in
/// <https://github.com/rust-embedded/bare-metal>
#[derive(Clone, Copy)]
pub struct IRQContext<'irq_context> {
_0: PhantomData<&'irq_context ()>,
}
/// Asynchronous exception handling interfaces.
pub mod interface {
/// Implemented by types that handle IRQs.
pub trait IRQHandler {
/// Called when the corresponding interrupt is asserted.
fn handle(&self) -> Result<(), &'static str>;
}
/// IRQ management functions.
///
/// The `BSP` is supposed to supply one global instance. Typically implemented by the
/// platform's interrupt controller.
pub trait IRQManager {
/// The IRQ number type depends on the implementation.
type IRQNumberType;
/// Register a handler.
fn register_handler(
&self,
irq_number: Self::IRQNumberType,
descriptor: super::IRQDescriptor,
) -> Result<(), &'static str>;
/// Enable an interrupt in the controller.
fn enable(&self, irq_number: Self::IRQNumberType);
/// Handle pending interrupts.
///
/// This function is called directly from the CPU's IRQ exception vector. On AArch64,
/// this means that the respective CPU core has disabled exception handling.
/// This function can therefore not be preempted and runs start to finish.
///
/// Takes an IRQContext token to ensure it can only be called from IRQ context.
#[allow(clippy::trivially_copy_pass_by_ref)]
fn handle_pending_irqs<'irq_context>(
&'irq_context self,
ic: &super::IRQContext<'irq_context>,
);
/// Print list of registered handlers.
fn print_handler(&self);
}
}
/// A wrapper type for IRQ numbers with integrated range sanity check.
#[derive(Copy, Clone)]
pub struct IRQNumber<const MAX_INCLUSIVE: usize>(usize);
//--------------------------------------------------------------------------------------------------
// Public Code
//--------------------------------------------------------------------------------------------------
impl<'irq_context> IRQContext<'irq_context> {
/// Creates an IRQContext token.
///
/// # Safety
///
/// - This must only be called when the current core is in an interrupt context and will not
/// live beyond the end of it. That is, creation is allowed in interrupt vector functions. For
/// example, in the ARMv8-A case, in `extern "C" fn current_elx_irq()`.
/// - Note that the lifetime `'irq_context` of the returned instance is unconstrained. User code
/// must not be able to influence the lifetime picked for this type, since that might cause it
/// to be inferred to `'static`.
#[inline(always)]
pub unsafe fn new() -> Self {
IRQContext { _0: PhantomData }
}
}
impl<const MAX_INCLUSIVE: usize> IRQNumber<{ MAX_INCLUSIVE }> {
/// Creates a new instance if number <= MAX_INCLUSIVE.
pub const fn new(number: usize) -> Self {
assert!(number <= MAX_INCLUSIVE);
Self(number)
}
/// Return the wrapped number.
pub const fn get(self) -> usize {
self.0
}
}
impl<const MAX_INCLUSIVE: usize> fmt::Display for IRQNumber<{ MAX_INCLUSIVE }> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "{}", self.0)
}
}
/// Executes the provided closure while IRQs are masked on the executing core.
///
/// While the function temporarily changes the HW state of the executing core, it restores it to the
/// previous state before returning, so this is deemed safe.
#[inline(always)]
pub fn exec_with_irq_masked<T>(f: impl FnOnce() -> T) -> T {
let ret: T;
unsafe {
let saved = local_irq_mask_save();
ret = f();
local_irq_restore(saved);
}
ret
}

@ -0,0 +1,185 @@
// SPDX-License-Identifier: MIT OR Apache-2.0
//
// Copyright (c) 2018-2022 Andre Richter <andre.o.richter@gmail.com>
// Rust embedded logo for `make doc`.
#![doc(html_logo_url = "https://git.io/JeGIp")]
//! The `kernel` library.
//!
//! Used to compose the final kernel binary.
//!
//! # Code organization and architecture
//!
//! The code is divided into different *modules*, each representing a typical **subsystem** of the
//! `kernel`. Top-level module files of subsystems reside directly in the `src` folder. For example,
//! `src/memory.rs` contains code that is concerned with all things memory management.
//!
//! ## Visibility of processor architecture code
//!
//! Some of the `kernel`'s subsystems depend on low-level code that is specific to the target
//! processor architecture. For each supported processor architecture, there exists a subfolder in
//! `src/_arch`, for example, `src/_arch/aarch64`.
//!
//! The architecture folders mirror the subsystem modules laid out in `src`. For example,
//! architectural code that belongs to the `kernel`'s MMU subsystem (`src/memory/mmu.rs`) would go
//! into `src/_arch/aarch64/memory/mmu.rs`. The latter file is loaded as a module in
//! `src/memory/mmu.rs` using the `path attribute`. Usually, the chosen module name is the generic
//! module's name prefixed with `arch_`.
//!
//! For example, this is the top of `src/memory/mmu.rs`:
//!
//! ```
//! #[cfg(target_arch = "aarch64")]
//! #[path = "../_arch/aarch64/memory/mmu.rs"]
//! mod arch_mmu;
//! ```
//!
//! Often times, items from the `arch_ module` will be publicly reexported by the parent module.
//! This way, each architecture specific module can provide its implementation of an item, while the
//! caller must not be concerned which architecture has been conditionally compiled.
//!
//! ## BSP code
//!
//! `BSP` stands for Board Support Package. `BSP` code is organized under `src/bsp.rs` and contains
//! target board specific definitions and functions. These are things such as the board's memory map
//! or instances of drivers for devices that are featured on the respective board.
//!
//! Just like processor architecture code, the `BSP` code's module structure tries to mirror the
//! `kernel`'s subsystem modules, but there is no reexporting this time. That means whatever is
//! provided must be called starting from the `bsp` namespace, e.g. `bsp::driver::driver_manager()`.
//!
//! ## Kernel interfaces
//!
//! Both `arch` and `bsp` contain code that is conditionally compiled depending on the actual target
//! and board for which the kernel is compiled. For example, the `interrupt controller` hardware of
//! the `Raspberry Pi 3` and the `Raspberry Pi 4` is different, but we want the rest of the `kernel`
//! code to play nicely with any of the two without much hassle.
//!
//! In order to provide a clean abstraction between `arch`, `bsp` and `generic kernel code`,
//! `interface` traits are provided *whenever possible* and *where it makes sense*. They are defined
//! in the respective subsystem module and help to enforce the idiom of *program to an interface,
//! not an implementation*. For example, there will be a common IRQ handling interface which the two
//! different interrupt controller `drivers` of both Raspberrys will implement, and only export the
//! interface to the rest of the `kernel`.
//!
//! ```
//! +-------------------+
//! | Interface (Trait) |
//! | |
//! +--+-------------+--+
//! ^ ^
//! | |
//! | |
//! +----------+--+ +--+----------+
//! | kernel code | | bsp code |
//! | | | arch code |
//! +-------------+ +-------------+
//! ```
//!
//! # Summary
//!
//! For a logical `kernel` subsystem, corresponding code can be distributed over several physical
//! locations. Here is an example for the **memory** subsystem:
//!
//! - `src/memory.rs` and `src/memory/**/*`
//! - Common code that is agnostic of target processor architecture and `BSP` characteristics.
//! - Example: A function to zero a chunk of memory.
//! - Interfaces for the memory subsystem that are implemented by `arch` or `BSP` code.
//! - Example: An `MMU` interface that defines `MMU` function prototypes.
//! - `src/bsp/__board_name__/memory.rs` and `src/bsp/__board_name__/memory/**/*`
//! - `BSP` specific code.
//! - Example: The board's memory map (physical addresses of DRAM and MMIO devices).
//! - `src/_arch/__arch_name__/memory.rs` and `src/_arch/__arch_name__/memory/**/*`
//! - Processor architecture specific code.
//! - Example: Implementation of the `MMU` interface for the `__arch_name__` processor
//! architecture.
//!
//! From a namespace perspective, **memory** subsystem code lives in:
//!
//! - `crate::memory::*`
//! - `crate::bsp::memory::*`
//!
//! # Boot flow
//!
//! 1. The kernel's entry point is the function `cpu::boot::arch_boot::_start()`.
//! - It is implemented in `src/_arch/__arch_name__/cpu/boot.s`.
//! 2. Once finished with architectural setup, the arch code calls `kernel_init()`.
#![allow(clippy::upper_case_acronyms)]
#![allow(incomplete_features)]
#![feature(asm_const)]
#![feature(core_intrinsics)]
#![feature(format_args_nl)]
#![feature(generic_const_exprs)]
#![feature(linkage)]
#![feature(panic_info_message)]
#![feature(step_trait)]
#![feature(trait_alias)]
#![no_std]
// Testing
#![cfg_attr(test, no_main)]
#![feature(custom_test_frameworks)]
#![reexport_test_harness_main = "test_main"]
#![test_runner(crate::test_runner)]
mod panic_wait;
mod synchronization;
pub mod bsp;
pub mod common;
pub mod console;
pub mod cpu;
pub mod driver;
pub mod exception;
pub mod memory;
pub mod print;
pub mod state;
pub mod symbols;
pub mod time;
//--------------------------------------------------------------------------------------------------
// Public Code
//--------------------------------------------------------------------------------------------------
/// Version string.
pub fn version() -> &'static str {
concat!(
env!("CARGO_PKG_NAME"),
" version ",
env!("CARGO_PKG_VERSION")
)
}
//--------------------------------------------------------------------------------------------------
// Testing
//--------------------------------------------------------------------------------------------------
/// The default runner for unit tests.
pub fn test_runner(tests: &[&test_types::UnitTest]) {
// This line will be printed as the test header.
println!("Running {} tests", tests.len());
for (i, test) in tests.iter().enumerate() {
print!("{:>3}. {:.<58}", i + 1, test.name);
// Run the actual test.
(test.test_func)();
// Failed tests call panic!(). Execution reaches here only if the test has passed.
println!("[ok]")
}
}
/// The `kernel_init()` for unit tests.
#[cfg(test)]
#[no_mangle]
unsafe fn kernel_init() -> ! {
exception::handling_init();
memory::mmu::post_enable_init();
bsp::console::qemu_bring_up_console();
test_main();
cpu::qemu_exit_success()
}

@ -0,0 +1,109 @@
// SPDX-License-Identifier: MIT OR Apache-2.0
//
// Copyright (c) 2018-2022 Andre Richter <andre.o.richter@gmail.com>
// Rust embedded logo for `make doc`.
#![doc(html_logo_url = "https://git.io/JeGIp")]
//! The `kernel` binary.
#![feature(format_args_nl)]
#![no_main]
#![no_std]
use libkernel::{bsp, cpu, driver, exception, info, memory, state, time, warn};
/// Early init code.
///
/// When this code runs, virtual memory is already enabled.
///
/// # Safety
///
/// - Only a single core must be active and running this function.
/// - Printing will not work until the respective driver's MMIO is remapped.
#[no_mangle]
unsafe fn kernel_init() -> ! {
use driver::interface::DriverManager;
exception::handling_init();
memory::mmu::post_enable_init();
// Add the mapping records for the precomputed entries first, so that they appear on the top of
// the list.
bsp::memory::mmu::kernel_add_mapping_records_for_precomputed();
// Bring up the drivers needed for printing first.
for i in bsp::driver::driver_manager()
.early_print_device_drivers()
.iter()
{
// Any encountered errors cannot be printed yet, obviously, so just safely park the CPU.
i.init().unwrap_or_else(|_| cpu::wait_forever());
}
bsp::driver::driver_manager().post_early_print_device_driver_init();
// Printing available from here on.
// Now bring up the remaining drivers.
for i in bsp::driver::driver_manager()
.non_early_print_device_drivers()
.iter()
{
if let Err(x) = i.init() {
panic!("Error loading driver: {}: {}", i.compatible(), x);
}
}
// Let device drivers register and enable their handlers with the interrupt controller.
for i in bsp::driver::driver_manager().all_device_drivers() {
if let Err(msg) = i.register_and_enable_irq_handler() {
warn!("Error registering IRQ handler: {}", msg);
}
}
// Unmask interrupts on the boot CPU core.
exception::asynchronous::local_irq_unmask();
// Announce conclusion of the kernel_init() phase.
state::state_manager().transition_to_single_core_main();
// Transition from unsafe to safe.
kernel_main()
}
/// The main function running after the early init.
fn kernel_main() -> ! {
use driver::interface::DriverManager;
use exception::asynchronous::interface::IRQManager;
info!("{}", libkernel::version());
info!("Booting on: {}", bsp::board_name());
info!("MMU online:");
memory::mmu::kernel_print_mappings();
let (_, privilege_level) = exception::current_privilege_level();
info!("Current privilege level: {}", privilege_level);
info!("Exception handling state:");
exception::asynchronous::print_state();
info!(
"Architectural timer resolution: {} ns",
time::time_manager().resolution().as_nanos()
);
info!("Drivers loaded:");
for (i, driver) in bsp::driver::driver_manager()
.all_device_drivers()
.iter()
.enumerate()
{
info!(" {}. {}", i + 1, driver.compatible());
}
info!("Registered IRQ handlers:");
bsp::exception::asynchronous::irq_manager().print_handler();
info!("Echoing input now");
cpu::wait_forever();
}

@ -0,0 +1,167 @@
// SPDX-License-Identifier: MIT OR Apache-2.0
//
// Copyright (c) 2018-2022 Andre Richter <andre.o.richter@gmail.com>
//! Memory Management.
pub mod mmu;
use crate::{bsp, common};
use core::{
fmt,
marker::PhantomData,
ops::{Add, Sub},
};
//--------------------------------------------------------------------------------------------------
// Public Definitions
//--------------------------------------------------------------------------------------------------
/// Metadata trait for marking the type of an address.
pub trait AddressType: Copy + Clone + PartialOrd + PartialEq {}
/// Zero-sized type to mark a physical address.
#[derive(Copy, Clone, Debug, PartialOrd, PartialEq)]
pub enum Physical {}
/// Zero-sized type to mark a virtual address.
#[derive(Copy, Clone, Debug, PartialOrd, PartialEq)]
pub enum Virtual {}
/// Generic address type.
#[derive(Copy, Clone, Debug, PartialOrd, PartialEq)]
pub struct Address<ATYPE: AddressType> {
value: usize,
_address_type: PhantomData<fn() -> ATYPE>,
}
//--------------------------------------------------------------------------------------------------
// Public Code
//--------------------------------------------------------------------------------------------------
impl AddressType for Physical {}
impl AddressType for Virtual {}
impl<ATYPE: AddressType> Address<ATYPE> {
/// Create an instance.
pub const fn new(value: usize) -> Self {
Self {
value,
_address_type: PhantomData,
}
}
/// Convert to usize.
pub const fn as_usize(self) -> usize {
self.value
}
/// Align down to page size.
#[must_use]
pub const fn align_down_page(self) -> Self {
let aligned = common::align_down(self.value, bsp::memory::mmu::KernelGranule::SIZE);
Self::new(aligned)
}
/// Align up to page size.
#[must_use]
pub const fn align_up_page(self) -> Self {
let aligned = common::align_up(self.value, bsp::memory::mmu::KernelGranule::SIZE);
Self::new(aligned)
}
/// Checks if the address is page aligned.
pub const fn is_page_aligned(&self) -> bool {
common::is_aligned(self.value, bsp::memory::mmu::KernelGranule::SIZE)
}
/// Return the address' offset into the corresponding page.
pub const fn offset_into_page(&self) -> usize {
self.value & bsp::memory::mmu::KernelGranule::MASK
}
}
impl<ATYPE: AddressType> Add<usize> for Address<ATYPE> {
type Output = Self;
#[inline(always)]
fn add(self, rhs: usize) -> Self::Output {
match self.value.checked_add(rhs) {
None => panic!("Overflow on Address::add"),
Some(x) => Self::new(x),
}
}
}
impl<ATYPE: AddressType> Sub<Address<ATYPE>> for Address<ATYPE> {
type Output = Self;
#[inline(always)]
fn sub(self, rhs: Address<ATYPE>) -> Self::Output {
match self.value.checked_sub(rhs.value) {
None => panic!("Overflow on Address::sub"),
Some(x) => Self::new(x),
}
}
}
impl fmt::Display for Address<Physical> {
// Don't expect to see physical addresses greater than 40 bit.
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
let q3: u8 = ((self.value >> 32) & 0xff) as u8;
let q2: u16 = ((self.value >> 16) & 0xffff) as u16;
let q1: u16 = (self.value & 0xffff) as u16;
write!(f, "0x")?;
write!(f, "{:02x}_", q3)?;
write!(f, "{:04x}_", q2)?;
write!(f, "{:04x}", q1)
}
}
impl fmt::Display for Address<Virtual> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
let q4: u16 = ((self.value >> 48) & 0xffff) as u16;
let q3: u16 = ((self.value >> 32) & 0xffff) as u16;
let q2: u16 = ((self.value >> 16) & 0xffff) as u16;
let q1: u16 = (self.value & 0xffff) as u16;
write!(f, "0x")?;
write!(f, "{:04x}_", q4)?;
write!(f, "{:04x}_", q3)?;
write!(f, "{:04x}_", q2)?;
write!(f, "{:04x}", q1)
}
}
//--------------------------------------------------------------------------------------------------
// Testing
//--------------------------------------------------------------------------------------------------
#[cfg(test)]
mod tests {
use super::*;
use test_macros::kernel_test;
/// Sanity of [Address] methods.
#[kernel_test]
fn address_type_method_sanity() {
let addr = Address::<Virtual>::new(bsp::memory::mmu::KernelGranule::SIZE + 100);
assert_eq!(
addr.align_down_page().as_usize(),
bsp::memory::mmu::KernelGranule::SIZE
);
assert_eq!(
addr.align_up_page().as_usize(),
bsp::memory::mmu::KernelGranule::SIZE * 2
);
assert!(!addr.is_page_aligned());
assert_eq!(addr.offset_into_page(), 100);
}
}

@ -0,0 +1,270 @@
// SPDX-License-Identifier: MIT OR Apache-2.0
//
// Copyright (c) 2020-2022 Andre Richter <andre.o.richter@gmail.com>
//! Memory Management Unit.
#[cfg(target_arch = "aarch64")]
#[path = "../_arch/aarch64/memory/mmu.rs"]
mod arch_mmu;
mod alloc;
mod mapping_record;
mod translation_table;
mod types;
use crate::{
bsp,
memory::{Address, Physical, Virtual},
synchronization::{self, interface::Mutex},
warn,
};
use core::{fmt, num::NonZeroUsize};
pub use types::*;
//--------------------------------------------------------------------------------------------------
// Public Definitions
//--------------------------------------------------------------------------------------------------
/// MMU enable errors variants.
#[allow(missing_docs)]
#[derive(Debug)]
pub enum MMUEnableError {
AlreadyEnabled,
Other(&'static str),
}
/// Memory Management interfaces.
pub mod interface {
use super::*;
/// MMU functions.
pub trait MMU {
/// Turns on the MMU for the first time and enables data and instruction caching.
///
/// # Safety
///
/// - Changes the HW's global state.
unsafe fn enable_mmu_and_caching(
&self,
phys_tables_base_addr: Address<Physical>,
) -> Result<(), MMUEnableError>;
/// Returns true if the MMU is enabled, false otherwise.
fn is_enabled(&self) -> bool;
}
}
/// Describes the characteristics of a translation granule.
pub struct TranslationGranule<const GRANULE_SIZE: usize>;
/// Describes properties of an address space.
pub struct AddressSpace<const AS_SIZE: usize>;
/// Intended to be implemented for [`AddressSpace`].
pub trait AssociatedTranslationTable {
/// A translation table whose address range is:
///
/// [u64::MAX, (u64::MAX - AS_SIZE) + 1]
type TableStartFromTop;
/// A translation table whose address range is:
///
/// [AS_SIZE - 1, 0]
type TableStartFromBottom;
}
//--------------------------------------------------------------------------------------------------
// Private Code
//--------------------------------------------------------------------------------------------------
use interface::MMU;
use synchronization::interface::ReadWriteEx;
use translation_table::interface::TranslationTable;
/// Query the BSP for the reserved virtual addresses for MMIO remapping and initialize the kernel's
/// MMIO VA allocator with it.
fn kernel_init_mmio_va_allocator() {
let region = bsp::memory::mmu::virt_mmio_remap_region();
alloc::kernel_mmio_va_allocator().lock(|allocator| allocator.initialize(region));
}
/// Map a region in the kernel's translation tables.
///
/// No input checks done, input is passed through to the architectural implementation.
///
/// # Safety
///
/// - See `map_at()`.
/// - Does not prevent aliasing.
unsafe fn kernel_map_at_unchecked(
name: &'static str,
virt_region: &MemoryRegion<Virtual>,
phys_region: &MemoryRegion<Physical>,
attr: &AttributeFields,
) -> Result<(), &'static str> {
bsp::memory::mmu::kernel_translation_tables()
.write(|tables| tables.map_at(virt_region, phys_region, attr))?;
kernel_add_mapping_record(name, virt_region, phys_region, attr);
Ok(())
}
/// Try to translate a kernel virtual address to a physical address.
///
/// Will only succeed if there exists a valid mapping for the input address.
fn try_kernel_virt_addr_to_phys_addr(
virt_addr: Address<Virtual>,
) -> Result<Address<Physical>, &'static str> {
bsp::memory::mmu::kernel_translation_tables()
.read(|tables| tables.try_virt_addr_to_phys_addr(virt_addr))
}
//--------------------------------------------------------------------------------------------------
// Public Code
//--------------------------------------------------------------------------------------------------
impl fmt::Display for MMUEnableError {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match self {
MMUEnableError::AlreadyEnabled => write!(f, "MMU is already enabled"),
MMUEnableError::Other(x) => write!(f, "{}", x),
}
}
}
impl<const GRANULE_SIZE: usize> TranslationGranule<GRANULE_SIZE> {
/// The granule's size.
pub const SIZE: usize = Self::size_checked();
/// The granule's mask.
pub const MASK: usize = Self::SIZE - 1;
/// The granule's shift, aka log2(size).
pub const SHIFT: usize = Self::SIZE.trailing_zeros() as usize;
const fn size_checked() -> usize {
assert!(GRANULE_SIZE.is_power_of_two());
GRANULE_SIZE
}
}
impl<const AS_SIZE: usize> AddressSpace<AS_SIZE> {
/// The address space size.
pub const SIZE: usize = Self::size_checked();
/// The address space shift, aka log2(size).
pub const SIZE_SHIFT: usize = Self::SIZE.trailing_zeros() as usize;
const fn size_checked() -> usize {
assert!(AS_SIZE.is_power_of_two());
// Check for architectural restrictions as well.
Self::arch_address_space_size_sanity_checks();
AS_SIZE
}
}
/// Add an entry to the mapping info record.
pub fn kernel_add_mapping_record(
name: &'static str,
virt_region: &MemoryRegion<Virtual>,
phys_region: &MemoryRegion<Physical>,
attr: &AttributeFields,
) {
if let Err(x) = mapping_record::kernel_add(name, virt_region, phys_region, attr) {
warn!("{}", x);
}
}
/// MMIO remapping in the kernel translation tables.
///
/// Typically used by device drivers.
///
/// # Safety
///
/// - Same as `kernel_map_at_unchecked()`, minus the aliasing part.
pub unsafe fn kernel_map_mmio(
name: &'static str,
mmio_descriptor: &MMIODescriptor,
) -> Result<Address<Virtual>, &'static str> {
let phys_region = MemoryRegion::from(*mmio_descriptor);
let offset_into_start_page = mmio_descriptor.start_addr().offset_into_page();
// Check if an identical region has been mapped for another driver. If so, reuse it.
let virt_addr = if let Some(addr) =
mapping_record::kernel_find_and_insert_mmio_duplicate(mmio_descriptor, name)
{
addr
// Otherwise, allocate a new region and map it.
} else {
let num_pages = match NonZeroUsize::new(phys_region.num_pages()) {
None => return Err("Requested 0 pages"),
Some(x) => x,
};
let virt_region =
alloc::kernel_mmio_va_allocator().lock(|allocator| allocator.alloc(num_pages))?;
kernel_map_at_unchecked(
name,
&virt_region,
&phys_region,
&AttributeFields {
mem_attributes: MemAttributes::Device,
acc_perms: AccessPermissions::ReadWrite,
execute_never: true,
},
)?;
virt_region.start_addr()
};
Ok(virt_addr + offset_into_start_page)
}
/// Try to translate a kernel virtual page address to a physical page address.
///
/// Will only succeed if there exists a valid mapping for the input page.
pub fn try_kernel_virt_page_addr_to_phys_page_addr(
virt_page_addr: PageAddress<Virtual>,
) -> Result<PageAddress<Physical>, &'static str> {
bsp::memory::mmu::kernel_translation_tables()
.read(|tables| tables.try_virt_page_addr_to_phys_page_addr(virt_page_addr))
}
/// Try to get the attributes of a kernel page.
///
/// Will only succeed if there exists a valid mapping for the input page.
pub fn try_kernel_page_attributes(
virt_page_addr: PageAddress<Virtual>,
) -> Result<AttributeFields, &'static str> {
bsp::memory::mmu::kernel_translation_tables()
.read(|tables| tables.try_page_attributes(virt_page_addr))
}
/// Enable the MMU and data + instruction caching.
///
/// # Safety
///
/// - Crucial function during kernel init. Changes the the complete memory view of the processor.
#[inline(always)]
pub unsafe fn enable_mmu_and_caching(
phys_tables_base_addr: Address<Physical>,
) -> Result<(), MMUEnableError> {
arch_mmu::mmu().enable_mmu_and_caching(phys_tables_base_addr)
}
/// Finish initialization of the MMU subsystem.
pub fn post_enable_init() {
kernel_init_mmio_va_allocator();
}
/// Human-readable print of all recorded kernel mappings.
pub fn kernel_print_mappings() {
mapping_record::kernel_print()
}

@ -0,0 +1,70 @@
// SPDX-License-Identifier: MIT OR Apache-2.0
//
// Copyright (c) 2021-2022 Andre Richter <andre.o.richter@gmail.com>
//! Allocation.
use super::MemoryRegion;
use crate::{
memory::{AddressType, Virtual},
synchronization::IRQSafeNullLock,
warn,
};
use core::num::NonZeroUsize;
//--------------------------------------------------------------------------------------------------
// Public Definitions
//--------------------------------------------------------------------------------------------------
/// A page allocator that can be lazyily initialized.
pub struct PageAllocator<ATYPE: AddressType> {
pool: Option<MemoryRegion<ATYPE>>,
}
//--------------------------------------------------------------------------------------------------
// Global instances
//--------------------------------------------------------------------------------------------------
static KERNEL_MMIO_VA_ALLOCATOR: IRQSafeNullLock<PageAllocator<Virtual>> =
IRQSafeNullLock::new(PageAllocator::new());
//--------------------------------------------------------------------------------------------------
// Public Code
//--------------------------------------------------------------------------------------------------
/// Return a reference to the kernel's MMIO virtual address allocator.
pub fn kernel_mmio_va_allocator() -> &'static IRQSafeNullLock<PageAllocator<Virtual>> {
&KERNEL_MMIO_VA_ALLOCATOR
}
impl<ATYPE: AddressType> PageAllocator<ATYPE> {
/// Create an instance.
pub const fn new() -> Self {
Self { pool: None }
}
/// Initialize the allocator.
pub fn initialize(&mut self, pool: MemoryRegion<ATYPE>) {
if self.pool.is_some() {
warn!("Already initialized");
return;
}
self.pool = Some(pool);
}
/// Allocate a number of pages.
pub fn alloc(
&mut self,
num_requested_pages: NonZeroUsize,
) -> Result<MemoryRegion<ATYPE>, &'static str> {
if self.pool.is_none() {
return Err("Allocator not initialized");
}
self.pool
.as_mut()
.unwrap()
.take_first_n_pages(num_requested_pages)
}
}

@ -0,0 +1,233 @@
// SPDX-License-Identifier: MIT OR Apache-2.0
//
// Copyright (c) 2020-2022 Andre Richter <andre.o.richter@gmail.com>
//! A record of mapped pages.
use super::{
AccessPermissions, Address, AttributeFields, MMIODescriptor, MemAttributes, MemoryRegion,
Physical, Virtual,
};
use crate::{bsp, info, synchronization, synchronization::InitStateLock, warn};
//--------------------------------------------------------------------------------------------------
// Private Definitions
//--------------------------------------------------------------------------------------------------
/// Type describing a virtual memory mapping.
#[allow(missing_docs)]
#[derive(Copy, Clone)]
struct MappingRecordEntry {
pub users: [Option<&'static str>; 5],
pub phys_start_addr: Address<Physical>,
pub virt_start_addr: Address<Virtual>,
pub num_pages: usize,
pub attribute_fields: AttributeFields,
}
struct MappingRecord {
inner: [Option<MappingRecordEntry>; 12],
}
//--------------------------------------------------------------------------------------------------
// Global instances
//--------------------------------------------------------------------------------------------------
static KERNEL_MAPPING_RECORD: InitStateLock<MappingRecord> =
InitStateLock::new(MappingRecord::new());
//--------------------------------------------------------------------------------------------------
// Private Code
//--------------------------------------------------------------------------------------------------
impl MappingRecordEntry {
pub fn new(
name: &'static str,
virt_region: &MemoryRegion<Virtual>,
phys_region: &MemoryRegion<Physical>,
attr: &AttributeFields,
) -> Self {
Self {
users: [Some(name), None, None, None, None],
phys_start_addr: phys_region.start_addr(),
virt_start_addr: virt_region.start_addr(),
num_pages: phys_region.num_pages(),
attribute_fields: *attr,
}
}
fn find_next_free_user(&mut self) -> Result<&mut Option<&'static str>, &'static str> {
if let Some(x) = self.users.iter_mut().find(|x| x.is_none()) {
return Ok(x);
};
Err("Storage for user info exhausted")
}
pub fn add_user(&mut self, user: &'static str) -> Result<(), &'static str> {
let x = self.find_next_free_user()?;
*x = Some(user);
Ok(())
}
}
impl MappingRecord {
pub const fn new() -> Self {
Self { inner: [None; 12] }
}
fn find_next_free(&mut self) -> Result<&mut Option<MappingRecordEntry>, &'static str> {
if let Some(x) = self.inner.iter_mut().find(|x| x.is_none()) {
return Ok(x);
}
Err("Storage for mapping info exhausted")
}
fn find_duplicate(
&mut self,
phys_region: &MemoryRegion<Physical>,
) -> Option<&mut MappingRecordEntry> {
self.inner
.iter_mut()
.filter(|x| x.is_some())
.map(|x| x.as_mut().unwrap())
.filter(|x| x.attribute_fields.mem_attributes == MemAttributes::Device)
.find(|x| {
if x.phys_start_addr != phys_region.start_addr() {
return false;
}
if x.num_pages != phys_region.num_pages() {
return false;
}
true
})
}
pub fn add(
&mut self,
name: &'static str,
virt_region: &MemoryRegion<Virtual>,
phys_region: &MemoryRegion<Physical>,
attr: &AttributeFields,
) -> Result<(), &'static str> {
let x = self.find_next_free()?;
*x = Some(MappingRecordEntry::new(
name,
virt_region,
phys_region,
attr,
));
Ok(())
}
pub fn print(&self) {
const KIB_RSHIFT: u32 = 10; // log2(1024).
const MIB_RSHIFT: u32 = 20; // log2(1024 * 1024).
info!(" -------------------------------------------------------------------------------------------------------------------------------------------");
info!(
" {:^44} {:^30} {:^7} {:^9} {:^35}",
"Virtual", "Physical", "Size", "Attr", "Entity"
);
info!(" -------------------------------------------------------------------------------------------------------------------------------------------");
for i in self.inner.iter().flatten() {
let size = i.num_pages * bsp::memory::mmu::KernelGranule::SIZE;
let virt_start = i.virt_start_addr;
let virt_end_inclusive = virt_start + (size - 1);
let phys_start = i.phys_start_addr;
let phys_end_inclusive = phys_start + (size - 1);
let (size, unit) = if (size >> MIB_RSHIFT) > 0 {
(size >> MIB_RSHIFT, "MiB")
} else if (size >> KIB_RSHIFT) > 0 {
(size >> KIB_RSHIFT, "KiB")
} else {
(size, "Byte")
};
let attr = match i.attribute_fields.mem_attributes {
MemAttributes::CacheableDRAM => "C",
MemAttributes::Device => "Dev",
};
let acc_p = match i.attribute_fields.acc_perms {
AccessPermissions::ReadOnly => "RO",
AccessPermissions::ReadWrite => "RW",
};
let xn = if i.attribute_fields.execute_never {
"XN"
} else {
"X"
};
info!(
" {}..{} --> {}..{} | \
{: >3} {} | {: <3} {} {: <2} | {}",
virt_start,
virt_end_inclusive,
phys_start,
phys_end_inclusive,
size,
unit,
attr,
acc_p,
xn,
i.users[0].unwrap()
);
for k in i.users[1..].iter() {
if let Some(additional_user) = *k {
info!(
" | {}",
additional_user
);
}
}
}
info!(" -------------------------------------------------------------------------------------------------------------------------------------------");
}
}
//--------------------------------------------------------------------------------------------------
// Public Code
//--------------------------------------------------------------------------------------------------
use synchronization::interface::ReadWriteEx;
/// Add an entry to the mapping info record.
pub fn kernel_add(
name: &'static str,
virt_region: &MemoryRegion<Virtual>,
phys_region: &MemoryRegion<Physical>,
attr: &AttributeFields,
) -> Result<(), &'static str> {
KERNEL_MAPPING_RECORD.write(|mr| mr.add(name, virt_region, phys_region, attr))
}
pub fn kernel_find_and_insert_mmio_duplicate(
mmio_descriptor: &MMIODescriptor,
new_user: &'static str,
) -> Option<Address<Virtual>> {
let phys_region: MemoryRegion<Physical> = (*mmio_descriptor).into();
KERNEL_MAPPING_RECORD.write(|mr| {
let dup = mr.find_duplicate(&phys_region)?;
if let Err(x) = dup.add_user(new_user) {
warn!("{}", x);
}
Some(dup.virt_start_addr)
})
}
/// Human-readable print of all recorded kernel mappings.
pub fn kernel_print() {
KERNEL_MAPPING_RECORD.read(|mr| mr.print());
}

@ -0,0 +1,137 @@
// SPDX-License-Identifier: MIT OR Apache-2.0
//
// Copyright (c) 2021-2022 Andre Richter <andre.o.richter@gmail.com>
//! Translation table.
#[cfg(target_arch = "aarch64")]
#[path = "../../_arch/aarch64/memory/mmu/translation_table.rs"]
mod arch_translation_table;
use super::{AttributeFields, MemoryRegion};
use crate::memory::{Address, Physical, Virtual};
//--------------------------------------------------------------------------------------------------
// Architectural Public Reexports
//--------------------------------------------------------------------------------------------------
#[cfg(target_arch = "aarch64")]
pub use arch_translation_table::FixedSizeTranslationTable;
//--------------------------------------------------------------------------------------------------
// Public Definitions
//--------------------------------------------------------------------------------------------------
/// Translation table interfaces.
pub mod interface {
use crate::memory::mmu::PageAddress;
use super::*;
/// Translation table operations.
pub trait TranslationTable {
/// Anything that needs to run before any of the other provided functions can be used.
///
/// # Safety
///
/// - Implementor must ensure that this function can run only once or is harmless if invoked
/// multiple times.
fn init(&mut self) -> Result<(), &'static str>;
/// Map the given virtual memory region to the given physical memory region.
///
/// # Safety
///
/// - Using wrong attributes can cause multiple issues of different nature in the system.
/// - It is not required that the architectural implementation prevents aliasing. That is,
/// mapping to the same physical memory using multiple virtual addresses, which would
/// break Rust's ownership assumptions. This should be protected against in the kernel's
/// generic MMU code.
unsafe fn map_at(
&mut self,
virt_region: &MemoryRegion<Virtual>,
phys_region: &MemoryRegion<Physical>,
attr: &AttributeFields,
) -> Result<(), &'static str>;
/// Try to translate a virtual page address to a physical page address.
///
/// Will only succeed if there exists a valid mapping for the input page.
fn try_virt_page_addr_to_phys_page_addr(
&self,
virt_page_addr: PageAddress<Virtual>,
) -> Result<PageAddress<Physical>, &'static str>;
/// Try to get the attributes of a page.
///
/// Will only succeed if there exists a valid mapping for the input page.
fn try_page_attributes(
&self,
virt_page_addr: PageAddress<Virtual>,
) -> Result<AttributeFields, &'static str>;
/// Try to translate a virtual address to a physical address.
///
/// Will only succeed if there exists a valid mapping for the input address.
fn try_virt_addr_to_phys_addr(
&self,
virt_addr: Address<Virtual>,
) -> Result<Address<Physical>, &'static str>;
}
}
//--------------------------------------------------------------------------------------------------
// Testing
//--------------------------------------------------------------------------------------------------
#[cfg(test)]
mod tests {
use super::*;
use crate::memory::mmu::{AccessPermissions, MemAttributes, PageAddress};
use arch_translation_table::MinSizeTranslationTable;
use interface::TranslationTable;
use test_macros::kernel_test;
/// Sanity checks for the TranslationTable implementation.
#[kernel_test]
fn translationtable_implementation_sanity() {
// This will occupy a lot of space on the stack.
let mut tables = MinSizeTranslationTable::new_for_runtime();
assert!(tables.init().is_ok());
let virt_end_exclusive_page_addr: PageAddress<Virtual> = PageAddress::MAX;
let virt_start_page_addr: PageAddress<Virtual> =
virt_end_exclusive_page_addr.checked_offset(-5).unwrap();
let phys_start_page_addr: PageAddress<Physical> = PageAddress::from(0);
let phys_end_exclusive_page_addr: PageAddress<Physical> =
phys_start_page_addr.checked_offset(5).unwrap();
let virt_region = MemoryRegion::new(virt_start_page_addr, virt_end_exclusive_page_addr);
let phys_region = MemoryRegion::new(phys_start_page_addr, phys_end_exclusive_page_addr);
let attr = AttributeFields {
mem_attributes: MemAttributes::CacheableDRAM,
acc_perms: AccessPermissions::ReadWrite,
execute_never: true,
};
unsafe { assert_eq!(tables.map_at(&virt_region, &phys_region, &attr), Ok(())) };
assert_eq!(
tables.try_virt_page_addr_to_phys_page_addr(virt_start_page_addr),
Ok(phys_start_page_addr)
);
assert_eq!(
tables.try_page_attributes(virt_start_page_addr.checked_offset(-1).unwrap()),
Err("Page marked invalid")
);
assert_eq!(tables.try_page_attributes(virt_start_page_addr), Ok(attr));
let virt_addr = virt_start_page_addr.into_inner() + 0x100;
let phys_addr = phys_start_page_addr.into_inner() + 0x100;
assert_eq!(tables.try_virt_addr_to_phys_addr(virt_addr), Ok(phys_addr));
}
}

@ -0,0 +1,378 @@
// SPDX-License-Identifier: MIT OR Apache-2.0
//
// Copyright (c) 2020-2022 Andre Richter <andre.o.richter@gmail.com>
//! Memory Management Unit types.
use crate::{
bsp, common,
memory::{Address, AddressType, Physical},
};
use core::{convert::From, iter::Step, num::NonZeroUsize, ops::Range};
//--------------------------------------------------------------------------------------------------
// Public Definitions
//--------------------------------------------------------------------------------------------------
/// A wrapper type around [Address] that ensures page alignment.
#[derive(Copy, Clone, Debug, PartialOrd, PartialEq)]
pub struct PageAddress<ATYPE: AddressType> {
inner: Address<ATYPE>,
}
/// A type that describes a region of memory in quantities of pages.
#[derive(Copy, Clone, Debug, PartialOrd, PartialEq)]
pub struct MemoryRegion<ATYPE: AddressType> {
start: PageAddress<ATYPE>,
end_exclusive: PageAddress<ATYPE>,
}
/// Architecture agnostic memory attributes.
#[allow(missing_docs)]
#[derive(Copy, Clone, Debug, PartialOrd, PartialEq)]
pub enum MemAttributes {
CacheableDRAM,
Device,
}
/// Architecture agnostic access permissions.
#[allow(missing_docs)]
#[derive(Copy, Clone, Debug, PartialOrd, PartialEq)]
pub enum AccessPermissions {
ReadOnly,
ReadWrite,
}
/// Collection of memory attributes.
#[allow(missing_docs)]
#[derive(Copy, Clone, Debug, PartialOrd, PartialEq)]
pub struct AttributeFields {
pub mem_attributes: MemAttributes,
pub acc_perms: AccessPermissions,
pub execute_never: bool,
}
/// An MMIO descriptor for use in device drivers.
#[derive(Copy, Clone)]
pub struct MMIODescriptor {
start_addr: Address<Physical>,
end_addr_exclusive: Address<Physical>,
}
//--------------------------------------------------------------------------------------------------
// Public Code
//--------------------------------------------------------------------------------------------------
//------------------------------------------------------------------------------
// PageAddress
//------------------------------------------------------------------------------
impl<ATYPE: AddressType> PageAddress<ATYPE> {
/// The largest value that can be represented by this type.
pub const MAX: Self = PageAddress {
inner: Address::new(usize::MAX).align_down_page(),
};
/// Unwraps the value.
pub fn into_inner(self) -> Address<ATYPE> {
self.inner
}
/// Calculates the offset from the page address.
///
/// `count` is in units of [PageAddress]. For example, a count of 2 means `result = self + 2 *
/// page_size`.
pub fn checked_offset(self, count: isize) -> Option<Self> {
if count == 0 {
return Some(self);
}
let delta = count
.unsigned_abs()
.checked_mul(bsp::memory::mmu::KernelGranule::SIZE)?;
let result = if count.is_positive() {
self.inner.as_usize().checked_add(delta)?
} else {
self.inner.as_usize().checked_sub(delta)?
};
Some(Self {
inner: Address::new(result),
})
}
}
impl<ATYPE: AddressType> From<usize> for PageAddress<ATYPE> {
fn from(addr: usize) -> Self {
assert!(
common::is_aligned(addr, bsp::memory::mmu::KernelGranule::SIZE),
"Input usize not page aligned"
);
Self {
inner: Address::new(addr),
}
}
}
impl<ATYPE: AddressType> From<Address<ATYPE>> for PageAddress<ATYPE> {
fn from(addr: Address<ATYPE>) -> Self {
assert!(addr.is_page_aligned(), "Input Address not page aligned");
Self { inner: addr }
}
}
impl<ATYPE: AddressType> Step for PageAddress<ATYPE> {
fn steps_between(start: &Self, end: &Self) -> Option<usize> {
if start > end {
return None;
}
// Since start <= end, do unchecked arithmetic.
Some(
(end.inner.as_usize() - start.inner.as_usize())
>> bsp::memory::mmu::KernelGranule::SHIFT,
)
}
fn forward_checked(start: Self, count: usize) -> Option<Self> {
start.checked_offset(count as isize)
}
fn backward_checked(start: Self, count: usize) -> Option<Self> {
start.checked_offset(-(count as isize))
}
}
//------------------------------------------------------------------------------
// MemoryRegion
//------------------------------------------------------------------------------
impl<ATYPE: AddressType> MemoryRegion<ATYPE> {
/// Create an instance.
pub fn new(start: PageAddress<ATYPE>, end_exclusive: PageAddress<ATYPE>) -> Self {
assert!(start <= end_exclusive);
Self {
start,
end_exclusive,
}
}
fn as_range(&self) -> Range<PageAddress<ATYPE>> {
self.into_iter()
}
/// Returns the start page address.
pub fn start_page_addr(&self) -> PageAddress<ATYPE> {
self.start
}
/// Returns the start address.
pub fn start_addr(&self) -> Address<ATYPE> {
self.start.into_inner()
}
/// Returns the exclusive end page address.
pub fn end_exclusive_page_addr(&self) -> PageAddress<ATYPE> {
self.end_exclusive
}
/// Returns the exclusive end page address.
pub fn end_inclusive_page_addr(&self) -> PageAddress<ATYPE> {
self.end_exclusive.checked_offset(-1).unwrap()
}
/// Checks if self contains an address.
pub fn contains(&self, addr: Address<ATYPE>) -> bool {
let page_addr = PageAddress::from(addr.align_down_page());
self.as_range().contains(&page_addr)
}
/// Checks if there is an overlap with another memory region.
pub fn overlaps(&self, other_region: &Self) -> bool {
let self_range = self.as_range();
self_range.contains(&other_region.start_page_addr())
|| self_range.contains(&other_region.end_inclusive_page_addr())
}
/// Returns the number of pages contained in this region.
pub fn num_pages(&self) -> usize {
PageAddress::steps_between(&self.start, &self.end_exclusive).unwrap()
}
/// Returns the size in bytes of this region.
pub fn size(&self) -> usize {
// Invariant: start <= end_exclusive, so do unchecked arithmetic.
let end_exclusive = self.end_exclusive.into_inner().as_usize();
let start = self.start.into_inner().as_usize();
end_exclusive - start
}
/// Splits the MemoryRegion like:
///
/// --------------------------------------------------------------------------------
/// | | | | | | | | | | | | | | | | | | |
/// --------------------------------------------------------------------------------
/// ^ ^ ^
/// | | |
/// left_start left_end_exclusive |
/// |
/// ^ |
/// | |
/// right_start right_end_exclusive
///
/// Left region is returned to the caller. Right region is the new region for this struct.
pub fn take_first_n_pages(&mut self, num_pages: NonZeroUsize) -> Result<Self, &'static str> {
let count: usize = num_pages.into();
let left_end_exclusive = self.start.checked_offset(count as isize);
let left_end_exclusive = match left_end_exclusive {
None => return Err("Overflow while calculating left_end_exclusive"),
Some(x) => x,
};
if left_end_exclusive > self.end_exclusive {
return Err("Not enough free pages");
}
let allocation = Self {
start: self.start,
end_exclusive: left_end_exclusive,
};
self.start = left_end_exclusive;
Ok(allocation)
}
}
impl<ATYPE: AddressType> IntoIterator for MemoryRegion<ATYPE> {
type Item = PageAddress<ATYPE>;
type IntoIter = Range<Self::Item>;
fn into_iter(self) -> Self::IntoIter {
Range {
start: self.start,
end: self.end_exclusive,
}
}
}
impl From<MMIODescriptor> for MemoryRegion<Physical> {
fn from(desc: MMIODescriptor) -> Self {
let start = PageAddress::from(desc.start_addr.align_down_page());
let end_exclusive = PageAddress::from(desc.end_addr_exclusive().align_up_page());
Self {
start,
end_exclusive,
}
}
}
//------------------------------------------------------------------------------
// MMIODescriptor
//------------------------------------------------------------------------------
impl MMIODescriptor {
/// Create an instance.
pub const fn new(start_addr: Address<Physical>, size: usize) -> Self {
assert!(size > 0);
let end_addr_exclusive = Address::new(start_addr.as_usize() + size);
Self {
start_addr,
end_addr_exclusive,
}
}
/// Return the start address.
pub const fn start_addr(&self) -> Address<Physical> {
self.start_addr
}
/// Return the exclusive end address.
pub fn end_addr_exclusive(&self) -> Address<Physical> {
self.end_addr_exclusive
}
}
//--------------------------------------------------------------------------------------------------
// Testing
//--------------------------------------------------------------------------------------------------
#[cfg(test)]
mod tests {
use super::*;
use crate::memory::Virtual;
use test_macros::kernel_test;
/// Sanity of [PageAddress] methods.
#[kernel_test]
fn pageaddress_type_method_sanity() {
let page_addr: PageAddress<Virtual> =
PageAddress::from(bsp::memory::mmu::KernelGranule::SIZE * 2);
assert_eq!(
page_addr.checked_offset(-2),
Some(PageAddress::<Virtual>::from(0))
);
assert_eq!(
page_addr.checked_offset(2),
Some(PageAddress::<Virtual>::from(
bsp::memory::mmu::KernelGranule::SIZE * 4
))
);
assert_eq!(
PageAddress::<Virtual>::from(0).checked_offset(0),
Some(PageAddress::<Virtual>::from(0))
);
assert_eq!(PageAddress::<Virtual>::from(0).checked_offset(-1), None);
let max_page_addr = Address::<Virtual>::new(usize::MAX).align_down_page();
assert_eq!(
PageAddress::<Virtual>::from(max_page_addr).checked_offset(1),
None
);
let zero = PageAddress::<Virtual>::from(0);
let three = PageAddress::<Virtual>::from(bsp::memory::mmu::KernelGranule::SIZE * 3);
assert_eq!(PageAddress::steps_between(&zero, &three), Some(3));
}
/// Sanity of [MemoryRegion] methods.
#[kernel_test]
fn memoryregion_type_method_sanity() {
let zero = PageAddress::<Virtual>::from(0);
let zero_region = MemoryRegion::new(zero, zero);
assert_eq!(zero_region.num_pages(), 0);
assert_eq!(zero_region.size(), 0);
let one = PageAddress::<Virtual>::from(bsp::memory::mmu::KernelGranule::SIZE);
let one_region = MemoryRegion::new(zero, one);
assert_eq!(one_region.num_pages(), 1);
assert_eq!(one_region.size(), bsp::memory::mmu::KernelGranule::SIZE);
let three = PageAddress::<Virtual>::from(bsp::memory::mmu::KernelGranule::SIZE * 3);
let mut three_region = MemoryRegion::new(zero, three);
assert!(three_region.contains(zero.into_inner()));
assert!(!three_region.contains(three.into_inner()));
assert!(three_region.overlaps(&one_region));
let allocation = three_region
.take_first_n_pages(NonZeroUsize::new(2).unwrap())
.unwrap();
assert_eq!(allocation.num_pages(), 2);
assert_eq!(three_region.num_pages(), 1);
for (i, alloc) in allocation.into_iter().enumerate() {
assert_eq!(
alloc.into_inner().as_usize(),
i * bsp::memory::mmu::KernelGranule::SIZE
);
}
}
}

@ -0,0 +1,104 @@
// SPDX-License-Identifier: MIT OR Apache-2.0
//
// Copyright (c) 2018-2022 Andre Richter <andre.o.richter@gmail.com>
//! A panic handler that infinitely waits.
use crate::{bsp, cpu, exception};
use core::{fmt, panic::PanicInfo};
//--------------------------------------------------------------------------------------------------
// Private Code
//--------------------------------------------------------------------------------------------------
fn _panic_print(args: fmt::Arguments) {
use fmt::Write;
unsafe { bsp::console::panic_console_out().write_fmt(args).unwrap() };
}
/// The point of exit for `libkernel`.
///
/// It is linked weakly, so that the integration tests can overload its standard behavior.
#[linkage = "weak"]
#[no_mangle]
fn _panic_exit() -> ! {
#[cfg(not(feature = "test_build"))]
{
cpu::wait_forever()
}
#[cfg(feature = "test_build")]
{
cpu::qemu_exit_failure()
}
}
/// Prints with a newline - only use from the panic handler.
///
/// Carbon copy from <https://doc.rust-lang.org/src/std/macros.rs.html>
#[macro_export]
macro_rules! panic_println {
($($arg:tt)*) => ({
_panic_print(format_args_nl!($($arg)*));
})
}
/// Stop immediately if called a second time.
///
/// # Note
///
/// Using atomics here relieves us from needing to use `unsafe` for the static variable.
///
/// On `AArch64`, which is the only implemented architecture at the time of writing this,
/// [`AtomicBool::load`] and [`AtomicBool::store`] are lowered to ordinary load and store
/// instructions. They are therefore safe to use even with MMU + caching deactivated.
///
/// [`AtomicBool::load`]: core::sync::atomic::AtomicBool::load
/// [`AtomicBool::store`]: core::sync::atomic::AtomicBool::store
fn panic_prevent_reenter() {
use core::sync::atomic::{AtomicBool, Ordering};
#[cfg(not(target_arch = "aarch64"))]
compile_error!("Add the target_arch to above's check if the following code is safe to use");
static PANIC_IN_PROGRESS: AtomicBool = AtomicBool::new(false);
if !PANIC_IN_PROGRESS.load(Ordering::Relaxed) {
PANIC_IN_PROGRESS.store(true, Ordering::Relaxed);
return;
}
_panic_exit()
}
#[panic_handler]
fn panic(info: &PanicInfo) -> ! {
use crate::time::interface::TimeManager;
unsafe { exception::asynchronous::local_irq_mask() };
// Protect against panic infinite loops if any of the following code panics itself.
panic_prevent_reenter();
let timestamp = crate::time::time_manager().uptime();
let (location, line, column) = match info.location() {
Some(loc) => (loc.file(), loc.line(), loc.column()),
_ => ("???", 0, 0),
};
panic_println!(
"[ {:>3}.{:06}] Kernel panic!\n\n\
Panic location:\n File '{}', line {}, column {}\n\n\
{}",
timestamp.as_secs(),
timestamp.subsec_micros(),
location,
line,
column,
info.message().unwrap_or(&format_args!("")),
);
_panic_exit()
}

@ -0,0 +1,94 @@
// SPDX-License-Identifier: MIT OR Apache-2.0
//
// Copyright (c) 2018-2022 Andre Richter <andre.o.richter@gmail.com>
//! Printing.
use crate::{bsp, console};
use core::fmt;
//--------------------------------------------------------------------------------------------------
// Public Code
//--------------------------------------------------------------------------------------------------
#[doc(hidden)]
pub fn _print(args: fmt::Arguments) {
use console::interface::Write;
bsp::console::console().write_fmt(args).unwrap();
}
/// Prints without a newline.
///
/// Carbon copy from <https://doc.rust-lang.org/src/std/macros.rs.html>
#[macro_export]
macro_rules! print {
($($arg:tt)*) => ($crate::print::_print(format_args!($($arg)*)));
}
/// Prints with a newline.
///
/// Carbon copy from <https://doc.rust-lang.org/src/std/macros.rs.html>
#[macro_export]
macro_rules! println {
() => ($crate::print!("\n"));
($($arg:tt)*) => ({
$crate::print::_print(format_args_nl!($($arg)*));
})
}
/// Prints an info, with a newline.
#[macro_export]
macro_rules! info {
($string:expr) => ({
use $crate::time::interface::TimeManager;
let timestamp = $crate::time::time_manager().uptime();
$crate::print::_print(format_args_nl!(
concat!("[ {:>3}.{:06}] ", $string),
timestamp.as_secs(),
timestamp.subsec_micros(),
));
});
($format_string:expr, $($arg:tt)*) => ({
use $crate::time::interface::TimeManager;
let timestamp = $crate::time::time_manager().uptime();
$crate::print::_print(format_args_nl!(
concat!("[ {:>3}.{:06}] ", $format_string),
timestamp.as_secs(),
timestamp.subsec_micros(),
$($arg)*
));
})
}
/// Prints a warning, with a newline.
#[macro_export]
macro_rules! warn {
($string:expr) => ({
use $crate::time::interface::TimeManager;
let timestamp = $crate::time::time_manager().uptime();
$crate::print::_print(format_args_nl!(
concat!("[W {:>3}.{:06}] ", $string),
timestamp.as_secs(),
timestamp.subsec_micros(),
));
});
($format_string:expr, $($arg:tt)*) => ({
use $crate::time::interface::TimeManager;
let timestamp = $crate::time::time_manager().uptime();
$crate::print::_print(format_args_nl!(
concat!("[W {:>3}.{:06}] ", $format_string),
timestamp.as_secs(),
timestamp.subsec_micros(),
$($arg)*
));
})
}

@ -0,0 +1,92 @@
// SPDX-License-Identifier: MIT OR Apache-2.0
//
// Copyright (c) 2020-2022 Andre Richter <andre.o.richter@gmail.com>
//! State information about the kernel itself.
use core::sync::atomic::{AtomicU8, Ordering};
//--------------------------------------------------------------------------------------------------
// Private Definitions
//--------------------------------------------------------------------------------------------------
/// Different stages in the kernel execution.
#[derive(Copy, Clone, Eq, PartialEq)]
enum State {
/// The kernel starts booting in this state.
Init,
/// The kernel transitions to this state when jumping to `kernel_main()` (at the end of
/// `kernel_init()`, after all init calls are done).
SingleCoreMain,
/// The kernel transitions to this state when it boots the secondary cores, aka switches
/// exectution mode to symmetric multiprocessing (SMP).
MultiCoreMain,
}
//--------------------------------------------------------------------------------------------------
// Public Definitions
//--------------------------------------------------------------------------------------------------
/// Maintains the kernel state and state transitions.
pub struct StateManager(AtomicU8);
//--------------------------------------------------------------------------------------------------
// Global instances
//--------------------------------------------------------------------------------------------------
static STATE_MANAGER: StateManager = StateManager::new();
//--------------------------------------------------------------------------------------------------
// Public Code
//--------------------------------------------------------------------------------------------------
/// Return a reference to the global StateManager.
pub fn state_manager() -> &'static StateManager {
&STATE_MANAGER
}
impl StateManager {
const INIT: u8 = 0;
const SINGLE_CORE_MAIN: u8 = 1;
const MULTI_CORE_MAIN: u8 = 2;
/// Create a new instance.
pub const fn new() -> Self {
Self(AtomicU8::new(Self::INIT))
}
/// Return the current state.
fn state(&self) -> State {
let state = self.0.load(Ordering::Acquire);
match state {
Self::INIT => State::Init,
Self::SINGLE_CORE_MAIN => State::SingleCoreMain,
Self::MULTI_CORE_MAIN => State::MultiCoreMain,
_ => panic!("Invalid KERNEL_STATE"),
}
}
/// Return if the kernel is init state.
pub fn is_init(&self) -> bool {
self.state() == State::Init
}
/// Transition from Init to SingleCoreMain.
pub fn transition_to_single_core_main(&self) {
if self
.0
.compare_exchange(
Self::INIT,
Self::SINGLE_CORE_MAIN,
Ordering::Acquire,
Ordering::Relaxed,
)
.is_err()
{
panic!("transition_to_single_core_main() called while state != Init");
}
}
}

@ -0,0 +1,85 @@
// SPDX-License-Identifier: MIT OR Apache-2.0
//
// Copyright (c) 2022 Andre Richter <andre.o.richter@gmail.com>
//! Debug symbol support.
use crate::memory::{Address, Virtual};
use core::{cell::UnsafeCell, slice};
use debug_symbol_types::Symbol;
//--------------------------------------------------------------------------------------------------
// Private Definitions
//--------------------------------------------------------------------------------------------------
// Symbol from the linker script.
extern "Rust" {
static __kernel_symbols_start: UnsafeCell<()>;
}
//--------------------------------------------------------------------------------------------------
// Global instances
//--------------------------------------------------------------------------------------------------
/// This will be patched to the correct value by the "kernel symbols tool" after linking. This given
/// value here is just a (safe) dummy.
#[no_mangle]
static NUM_KERNEL_SYMBOLS: u64 = 0;
//--------------------------------------------------------------------------------------------------
// Private Code
//--------------------------------------------------------------------------------------------------
fn kernel_symbol_section_virt_start_addr() -> Address<Virtual> {
Address::new(unsafe { __kernel_symbols_start.get() as usize })
}
fn kernel_symbols_slice() -> &'static [Symbol] {
let ptr = kernel_symbol_section_virt_start_addr().as_usize() as *const Symbol;
unsafe {
let num = core::ptr::read_volatile(&NUM_KERNEL_SYMBOLS as *const u64) as usize;
slice::from_raw_parts(ptr, num)
}
}
//--------------------------------------------------------------------------------------------------
// Public Code
//--------------------------------------------------------------------------------------------------
/// Retrieve the symbol name corresponding to a virtual address, if any.
pub fn lookup_symbol(addr: Address<Virtual>) -> Option<&'static str> {
for i in kernel_symbols_slice() {
if i.contains(addr.as_usize()) {
return Some(i.name());
}
}
None
}
//--------------------------------------------------------------------------------------------------
// Testing
//--------------------------------------------------------------------------------------------------
#[cfg(test)]
mod tests {
use super::*;
use test_macros::kernel_test;
/// Sanity of symbols module.
#[kernel_test]
fn symbols_sanity() {
let first_sym = lookup_symbol(Address::new(
crate::common::is_aligned as *const usize as usize,
))
.unwrap();
assert_eq!(first_sym, "libkernel::common::is_aligned");
let second_sym =
lookup_symbol(Address::new(crate::version as *const usize as usize)).unwrap();
assert_eq!(second_sym, "libkernel::version");
}
}

@ -0,0 +1,159 @@
// SPDX-License-Identifier: MIT OR Apache-2.0
//
// Copyright (c) 2020-2022 Andre Richter <andre.o.richter@gmail.com>
//! Synchronization primitives.
//!
//! # Resources
//!
//! - <https://doc.rust-lang.org/book/ch16-04-extensible-concurrency-sync-and-send.html>
//! - <https://stackoverflow.com/questions/59428096/understanding-the-send-trait>
//! - <https://doc.rust-lang.org/std/cell/index.html>
use core::cell::UnsafeCell;
//--------------------------------------------------------------------------------------------------
// Public Definitions
//--------------------------------------------------------------------------------------------------
/// Synchronization interfaces.
pub mod interface {
/// Any object implementing this trait guarantees exclusive access to the data wrapped within
/// the Mutex for the duration of the provided closure.
pub trait Mutex {
/// The type of the data that is wrapped by this mutex.
type Data;
/// Locks the mutex and grants the closure temporary mutable access to the wrapped data.
fn lock<R>(&self, f: impl FnOnce(&mut Self::Data) -> R) -> R;
}
/// A reader-writer exclusion type.
///
/// The implementing object allows either a number of readers or at most one writer at any point
/// in time.
pub trait ReadWriteEx {
/// The type of encapsulated data.
type Data;
/// Grants temporary mutable access to the encapsulated data.
fn write<R>(&self, f: impl FnOnce(&mut Self::Data) -> R) -> R;
/// Grants temporary immutable access to the encapsulated data.
fn read<R>(&self, f: impl FnOnce(&Self::Data) -> R) -> R;
}
}
/// A pseudo-lock for teaching purposes.
///
/// In contrast to a real Mutex implementation, does not protect against concurrent access from
/// other cores to the contained data. This part is preserved for later lessons.
///
/// The lock will only be used as long as it is safe to do so, i.e. as long as the kernel is
/// executing on a single core.
pub struct IRQSafeNullLock<T>
where
T: ?Sized,
{
data: UnsafeCell<T>,
}
/// A pseudo-lock that is RW during the single-core kernel init phase and RO afterwards.
///
/// Intended to encapsulate data that is populated during kernel init when no concurrency exists.
pub struct InitStateLock<T>
where
T: ?Sized,
{
data: UnsafeCell<T>,
}
//--------------------------------------------------------------------------------------------------
// Public Code
//--------------------------------------------------------------------------------------------------
unsafe impl<T> Send for IRQSafeNullLock<T> where T: ?Sized + Send {}
unsafe impl<T> Sync for IRQSafeNullLock<T> where T: ?Sized + Send {}
impl<T> IRQSafeNullLock<T> {
/// Create an instance.
pub const fn new(data: T) -> Self {
Self {
data: UnsafeCell::new(data),
}
}
}
unsafe impl<T> Send for InitStateLock<T> where T: ?Sized + Send {}
unsafe impl<T> Sync for InitStateLock<T> where T: ?Sized + Send {}
impl<T> InitStateLock<T> {
/// Create an instance.
pub const fn new(data: T) -> Self {
Self {
data: UnsafeCell::new(data),
}
}
}
//------------------------------------------------------------------------------
// OS Interface Code
//------------------------------------------------------------------------------
use crate::{exception, state};
impl<T> interface::Mutex for IRQSafeNullLock<T> {
type Data = T;
fn lock<R>(&self, f: impl FnOnce(&mut Self::Data) -> R) -> R {
// In a real lock, there would be code encapsulating this line that ensures that this
// mutable reference will ever only be given out once at a time.
let data = unsafe { &mut *self.data.get() };
// Execute the closure while IRQs are masked.
exception::asynchronous::exec_with_irq_masked(|| f(data))
}
}
impl<T> interface::ReadWriteEx for InitStateLock<T> {
type Data = T;
fn write<R>(&self, f: impl FnOnce(&mut Self::Data) -> R) -> R {
assert!(
state::state_manager().is_init(),
"InitStateLock::write called after kernel init phase"
);
assert!(
!exception::asynchronous::is_local_irq_masked(),
"InitStateLock::write called with IRQs unmasked"
);
let data = unsafe { &mut *self.data.get() };
f(data)
}
fn read<R>(&self, f: impl FnOnce(&Self::Data) -> R) -> R {
let data = unsafe { &*self.data.get() };
f(data)
}
}
//--------------------------------------------------------------------------------------------------
// Testing
//--------------------------------------------------------------------------------------------------
#[cfg(test)]
mod tests {
use super::*;
use test_macros::kernel_test;
/// InitStateLock must be transparent.
#[kernel_test]
fn init_state_lock_is_transparent() {
use core::mem::size_of;
assert_eq!(size_of::<InitStateLock<u64>>(), size_of::<u64>());
}
}

@ -0,0 +1,37 @@
// SPDX-License-Identifier: MIT OR Apache-2.0
//
// Copyright (c) 2020-2022 Andre Richter <andre.o.richter@gmail.com>
//! Timer primitives.
#[cfg(target_arch = "aarch64")]
#[path = "_arch/aarch64/time.rs"]
mod arch_time;
//--------------------------------------------------------------------------------------------------
// Architectural Public Reexports
//--------------------------------------------------------------------------------------------------
pub use arch_time::time_manager;
//--------------------------------------------------------------------------------------------------
// Public Definitions
//--------------------------------------------------------------------------------------------------
/// Timekeeping interfaces.
pub mod interface {
use core::time::Duration;
/// Time management functions.
pub trait TimeManager {
/// The timer's resolution.
fn resolution(&self) -> Duration;
/// The uptime since power-on of the device.
///
/// This includes time consumed by firmware and bootloaders.
fn uptime(&self) -> Duration;
/// Spin for a given duration.
fn spin_for(&self, duration: Duration);
}
}

@ -0,0 +1,48 @@
# frozen_string_literal: true
# SPDX-License-Identifier: MIT OR Apache-2.0
#
# Copyright (c) 2019-2022 Andre Richter <andre.o.richter@gmail.com>
require 'console_io_test'
# Verify sending and receiving works as expected.
class TxRxHandshakeTest < SubtestBase
def name
'Transmit and Receive handshake'
end
def run(qemu_out, qemu_in)
qemu_in.write_nonblock('ABC')
expect_or_raise(qemu_out, 'OK1234')
end
end
# Check for correct TX statistics implementation. Depends on test 1 being run first.
class TxStatisticsTest < SubtestBase
def name
'Transmit statistics'
end
def run(qemu_out, _qemu_in)
expect_or_raise(qemu_out, '6')
end
end
# Check for correct RX statistics implementation. Depends on test 1 being run first.
class RxStatisticsTest < SubtestBase
def name
'Receive statistics'
end
def run(qemu_out, _qemu_in)
expect_or_raise(qemu_out, '3')
end
end
##--------------------------------------------------------------------------------------------------
## Test registration
##--------------------------------------------------------------------------------------------------
def subtest_collection
[TxRxHandshakeTest.new, TxStatisticsTest.new, RxStatisticsTest.new]
end

@ -0,0 +1,39 @@
// SPDX-License-Identifier: MIT OR Apache-2.0
//
// Copyright (c) 2019-2022 Andre Richter <andre.o.richter@gmail.com>
//! Console sanity tests - RX, TX and statistics.
#![feature(format_args_nl)]
#![no_main]
#![no_std]
/// Console tests should time out on the I/O harness in case of panic.
mod panic_wait_forever;
use libkernel::{bsp, console, cpu, exception, memory, print};
#[no_mangle]
unsafe fn kernel_init() -> ! {
use bsp::console::console;
use console::interface::*;
exception::handling_init();
memory::mmu::post_enable_init();
bsp::console::qemu_bring_up_console();
// Handshake
assert_eq!(console().read_char(), 'A');
assert_eq!(console().read_char(), 'B');
assert_eq!(console().read_char(), 'C');
print!("OK1234");
// 6
print!("{}", console().chars_written());
// 3
print!("{}", console().chars_read());
// The QEMU process running this test will be closed by the I/O test harness.
cpu::wait_forever();
}

@ -0,0 +1,50 @@
// SPDX-License-Identifier: MIT OR Apache-2.0
//
// Copyright (c) 2019-2022 Andre Richter <andre.o.richter@gmail.com>
//! Timer sanity tests.
#![feature(custom_test_frameworks)]
#![no_main]
#![no_std]
#![reexport_test_harness_main = "test_main"]
#![test_runner(libkernel::test_runner)]
use core::time::Duration;
use libkernel::{bsp, cpu, exception, memory, time, time::interface::TimeManager};
use test_macros::kernel_test;
#[no_mangle]
unsafe fn kernel_init() -> ! {
exception::handling_init();
memory::mmu::post_enable_init();
bsp::console::qemu_bring_up_console();
// Depending on CPU arch, some timer bring-up code could go here. Not needed for the RPi.
test_main();
cpu::qemu_exit_success()
}
/// Simple check that the timer is running.
#[kernel_test]
fn timer_is_counting() {
assert!(time::time_manager().uptime().as_nanos() > 0)
}
/// Timer resolution must be sufficient.
#[kernel_test]
fn timer_resolution_is_sufficient() {
assert!(time::time_manager().resolution().as_nanos() < 100)
}
/// Sanity check spin_for() implementation.
#[kernel_test]
fn spin_accuracy_check_1_second() {
let t1 = time::time_manager().uptime();
time::time_manager().spin_for(Duration::from_secs(1));
let t2 = time::time_manager().uptime();
assert_eq!((t2 - t1).as_secs(), 1)
}

@ -0,0 +1,37 @@
// SPDX-License-Identifier: MIT OR Apache-2.0
//
// Copyright (c) 2019-2022 Andre Richter <andre.o.richter@gmail.com>
//! Page faults must result in synchronous exceptions.
#![feature(format_args_nl)]
#![no_main]
#![no_std]
/// Overwrites libkernel's `panic_wait::_panic_exit()` so that it returns a "success" code.
///
/// In this test, reaching the panic is a success, because it is called from the synchronous
/// exception handler, which is what this test wants to achieve.
///
/// It also means that this integration test can not use any other code that calls panic!() directly
/// or indirectly.
mod panic_exit_success;
use libkernel::{bsp, cpu, exception, info, memory, println};
#[no_mangle]
unsafe fn kernel_init() -> ! {
exception::handling_init();
memory::mmu::post_enable_init();
bsp::console::qemu_bring_up_console();
// This line will be printed as the test header.
println!("Testing synchronous exception handling by causing a page fault");
info!("Writing to bottom of address space to address 1 GiB...");
let big_addr: u64 = 1024 * 1024 * 1024;
core::ptr::read_volatile(big_addr as *mut u64);
// If execution reaches here, the memory access above did not cause a page fault exception.
cpu::qemu_exit_failure()
}

@ -0,0 +1,25 @@
# frozen_string_literal: true
# SPDX-License-Identifier: MIT OR Apache-2.0
#
# Copyright (c) 2022 Andre Richter <andre.o.richter@gmail.com>
require 'console_io_test'
# Verify that exception restore works.
class ExceptionRestoreTest < SubtestBase
def name
'Exception restore'
end
def run(qemu_out, _qemu_in)
expect_or_raise(qemu_out, 'Back from system call!')
end
end
##--------------------------------------------------------------------------------------------------
## Test registration
##--------------------------------------------------------------------------------------------------
def subtest_collection
[ExceptionRestoreTest.new]
end

@ -0,0 +1,49 @@
// SPDX-License-Identifier: MIT OR Apache-2.0
//
// Copyright (c) 2022 Andre Richter <andre.o.richter@gmail.com>
//! A simple sanity test to see if exception restore code works.
#![feature(format_args_nl)]
#![no_main]
#![no_std]
/// Console tests should time out on the I/O harness in case of panic.
mod panic_wait_forever;
use core::arch::asm;
use libkernel::{bsp, cpu, exception, info, memory, println};
#[inline(never)]
fn nested_system_call() {
#[cfg(target_arch = "aarch64")]
unsafe {
asm!("svc #0x1337", options(nomem, nostack, preserves_flags));
}
#[cfg(not(target_arch = "aarch64"))]
{
info!("Not supported yet");
cpu::wait_forever();
}
}
#[no_mangle]
unsafe fn kernel_init() -> ! {
exception::handling_init();
memory::mmu::post_enable_init();
bsp::console::qemu_bring_up_console();
// This line will be printed as the test header.
println!("Testing exception restore");
info!("Making a dummy system call");
// Calling this inside a function indirectly tests if the link register is restored properly.
nested_system_call();
info!("Back from system call!");
// The QEMU process running this test will be closed by the I/O test harness.
cpu::wait_forever();
}

@ -0,0 +1,67 @@
// SPDX-License-Identifier: MIT OR Apache-2.0
//
// Copyright (c) 2020-2022 Andre Richter <andre.o.richter@gmail.com>
//! IRQ handling sanity tests.
#![feature(custom_test_frameworks)]
#![no_main]
#![no_std]
#![reexport_test_harness_main = "test_main"]
#![test_runner(libkernel::test_runner)]
use libkernel::{bsp, cpu, exception, memory};
use test_macros::kernel_test;
#[no_mangle]
unsafe fn kernel_init() -> ! {
memory::mmu::post_enable_init();
bsp::console::qemu_bring_up_console();
exception::handling_init();
exception::asynchronous::local_irq_unmask();
test_main();
cpu::qemu_exit_success()
}
/// Check that IRQ masking works.
#[kernel_test]
fn local_irq_mask_works() {
// Precondition: IRQs are unmasked.
assert!(exception::asynchronous::is_local_irq_masked());
unsafe { exception::asynchronous::local_irq_mask() };
assert!(!exception::asynchronous::is_local_irq_masked());
// Restore earlier state.
unsafe { exception::asynchronous::local_irq_unmask() };
}
/// Check that IRQ unmasking works.
#[kernel_test]
fn local_irq_unmask_works() {
// Precondition: IRQs are masked.
unsafe { exception::asynchronous::local_irq_mask() };
assert!(!exception::asynchronous::is_local_irq_masked());
unsafe { exception::asynchronous::local_irq_unmask() };
assert!(exception::asynchronous::is_local_irq_masked());
}
/// Check that IRQ mask save is saving "something".
#[kernel_test]
fn local_irq_mask_save_works() {
// Precondition: IRQs are unmasked.
assert!(exception::asynchronous::is_local_irq_masked());
let first = unsafe { exception::asynchronous::local_irq_mask_save() };
assert!(!exception::asynchronous::is_local_irq_masked());
let second = unsafe { exception::asynchronous::local_irq_mask_save() };
assert_ne!(first, second);
unsafe { exception::asynchronous::local_irq_restore(first) };
assert!(exception::asynchronous::is_local_irq_masked());
}

@ -0,0 +1,3 @@
# frozen_string_literal: true
EXPECTED_PRINT = 'Echoing input now'

@ -0,0 +1,9 @@
// SPDX-License-Identifier: MIT OR Apache-2.0
//
// Copyright (c) 2019-2022 Andre Richter <andre.o.richter@gmail.com>
/// Overwrites libkernel's `panic_wait::_panic_exit()` with the QEMU-exit version.
#[no_mangle]
fn _panic_exit() -> ! {
libkernel::cpu::qemu_exit_success()
}

@ -0,0 +1,9 @@
// SPDX-License-Identifier: MIT OR Apache-2.0
//
// Copyright (c) 2022 Andre Richter <andre.o.richter@gmail.com>
/// Overwrites libkernel's `panic_wait::_panic_exit()` with wait_forever.
#[no_mangle]
fn _panic_exit() -> ! {
libkernel::cpu::wait_forever()
}

@ -0,0 +1,103 @@
## SPDX-License-Identifier: MIT OR Apache-2.0
##
## Copyright (c) 2018-2022 Andre Richter <andre.o.richter@gmail.com>
include ../common/format.mk
include ../common/docker.mk
##--------------------------------------------------------------------------------------------------
## Check for input variables that need be exported by the calling Makefile
##--------------------------------------------------------------------------------------------------
ifndef KERNEL_SYMBOLS_TOOL_PATH
$(error KERNEL_SYMBOLS_TOOL_PATH is not set)
endif
ifndef TARGET
$(error TARGET is not set)
endif
ifndef KERNEL_SYMBOLS_INPUT_ELF
$(error KERNEL_SYMBOLS_INPUT_ELF is not set)
endif
ifndef KERNEL_SYMBOLS_OUTPUT_ELF
$(error KERNEL_SYMBOLS_OUTPUT_ELF is not set)
endif
##--------------------------------------------------------------------------------------------------
## Targets and Prerequisites
##--------------------------------------------------------------------------------------------------
KERNEL_SYMBOLS_MANIFEST = kernel_symbols/Cargo.toml
KERNEL_SYMBOLS_LINKER_SCRIPT = kernel_symbols/kernel_symbols.ld
KERNEL_SYMBOLS_RS = $(KERNEL_SYMBOLS_INPUT_ELF)_symbols.rs
KERNEL_SYMBOLS_DEMANGLED_RS = $(shell pwd)/$(KERNEL_SYMBOLS_INPUT_ELF)_symbols_demangled.rs
KERNEL_SYMBOLS_ELF = target/$(TARGET)/release/kernel_symbols
KERNEL_SYMBOLS_STRIPPED = target/$(TARGET)/release/kernel_symbols_stripped
# Export for build.rs of kernel_symbols crate.
export KERNEL_SYMBOLS_DEMANGLED_RS
##--------------------------------------------------------------------------------------------------
## Command building blocks
##--------------------------------------------------------------------------------------------------
GET_SYMBOLS_SECTION_VIRT_ADDR = $(DOCKER_TOOLS) $(EXEC_SYMBOLS_TOOL) \
--get_symbols_section_virt_addr $(KERNEL_SYMBOLS_OUTPUT_ELF)
RUSTFLAGS = -C link-arg=--script=$(KERNEL_SYMBOLS_LINKER_SCRIPT) \
-C link-arg=--section-start=.rodata=$$($(GET_SYMBOLS_SECTION_VIRT_ADDR))
RUSTFLAGS_PEDANTIC = $(RUSTFLAGS) \
-D warnings \
-D missing_docs
COMPILER_ARGS = --target=$(TARGET) \
--release
RUSTC_CMD = cargo rustc $(COMPILER_ARGS) --manifest-path $(KERNEL_SYMBOLS_MANIFEST)
OBJCOPY_CMD = rust-objcopy \
--strip-all \
-O binary
EXEC_SYMBOLS_TOOL = ruby $(KERNEL_SYMBOLS_TOOL_PATH)/main.rb
##------------------------------------------------------------------------------
## Dockerization
##------------------------------------------------------------------------------
DOCKER_CMD = docker run -t --rm -v $(shell pwd):/work/tutorial -w /work/tutorial
# DOCKER_IMAGE defined in include file (see top of this file).
DOCKER_TOOLS = $(DOCKER_CMD) $(DOCKER_IMAGE)
##--------------------------------------------------------------------------------------------------
## Targets
##--------------------------------------------------------------------------------------------------
.PHONY: all
all:
@cp $(KERNEL_SYMBOLS_INPUT_ELF) $(KERNEL_SYMBOLS_OUTPUT_ELF)
@$(DOCKER_TOOLS) $(EXEC_SYMBOLS_TOOL) --gen_symbols $(KERNEL_SYMBOLS_OUTPUT_ELF) \
$(KERNEL_SYMBOLS_RS)
$(call color_progress_prefix, "Demangling")
@echo Symbol names
@cat $(KERNEL_SYMBOLS_RS) | rustfilt > $(KERNEL_SYMBOLS_DEMANGLED_RS)
@RUSTFLAGS="$(RUSTFLAGS_PEDANTIC)" $(RUSTC_CMD)
$(call color_progress_prefix, "Stripping")
@echo Symbols ELF file
@$(OBJCOPY_CMD) $(KERNEL_SYMBOLS_ELF) $(KERNEL_SYMBOLS_STRIPPED)
@$(DOCKER_TOOLS) $(EXEC_SYMBOLS_TOOL) --patch_data $(KERNEL_SYMBOLS_OUTPUT_ELF) \
$(KERNEL_SYMBOLS_STRIPPED)
$(call color_progress_prefix, "Finished")

@ -0,0 +1,15 @@
[package]
name = "kernel_symbols"
version = "0.1.0"
edition = "2021"
[features]
default = []
generated_symbols_available = []
##--------------------------------------------------------------------------------------------------
## Dependencies
##--------------------------------------------------------------------------------------------------
[dependencies]
debug-symbol-types = { path = "../libraries/debug-symbol-types" }

@ -0,0 +1,14 @@
use std::{env, path::Path};
fn main() {
if let Ok(path) = env::var("KERNEL_SYMBOLS_DEMANGLED_RS") {
if Path::new(&path).exists() {
println!("cargo:rustc-cfg=feature=\"generated_symbols_available\"")
}
}
println!(
"cargo:rerun-if-changed={}",
Path::new("kernel_symbols.ld").display()
);
}

@ -0,0 +1,15 @@
/* SPDX-License-Identifier: MIT OR Apache-2.0
*
* Copyright (c) 2022 Andre Richter <andre.o.richter@gmail.com>
*/
SECTIONS
{
.rodata : {
ASSERT(. > 0xffffffff00000000, "Expected higher half address")
KEEP(*(.rodata.symbol_desc*))
. = ALIGN(8);
*(.rodata*)
}
}

@ -0,0 +1,16 @@
// SPDX-License-Identifier: MIT OR Apache-2.0
//
// Copyright (c) 2022 Andre Richter <andre.o.richter@gmail.com>
//! Generation of kernel symbols.
#![no_std]
#![no_main]
#[cfg(feature = "generated_symbols_available")]
include!(env!("KERNEL_SYMBOLS_DEMANGLED_RS"));
#[panic_handler]
fn panic(_info: &core::panic::PanicInfo) -> ! {
unimplemented!()
}

@ -0,0 +1,4 @@
[package]
name = "debug-symbol-types"
version = "0.1.0"
edition = "2021"

@ -0,0 +1,39 @@
// SPDX-License-Identifier: MIT OR Apache-2.0
//
// Copyright (c) 2022 Andre Richter <andre.o.richter@gmail.com>
//! Types for implementing debug symbol support.
#![no_std]
use core::ops::Range;
/// A symbol containing a size.
#[repr(C)]
pub struct Symbol {
addr_range: Range<usize>,
name: &'static str,
}
impl Symbol {
/// Create an instance.
pub const fn new(start: usize, size: usize, name: &'static str) -> Symbol {
Symbol {
addr_range: Range {
start,
end: start + size,
},
name,
}
}
/// Returns true if addr is contained in the range.
pub fn contains(&self, addr: usize) -> bool {
self.addr_range.contains(&addr)
}
/// Returns the symbol's name.
pub fn name(&self) -> &'static str {
self.name
}
}

@ -0,0 +1,14 @@
[package]
name = "test-macros"
version = "0.1.0"
authors = ["Andre Richter <andre.o.richter@gmail.com>"]
edition = "2021"
[lib]
proc-macro = true
[dependencies]
proc-macro2 = "1.x"
quote = "1.x"
syn = { version = "1.x", features = ["full"] }
test-types = { path = "../test-types" }

@ -0,0 +1,29 @@
// SPDX-License-Identifier: MIT OR Apache-2.0
//
// Copyright (c) 2019-2022 Andre Richter <andre.o.richter@gmail.com>
use proc_macro::TokenStream;
use proc_macro2::Span;
use quote::quote;
use syn::{parse_macro_input, Ident, ItemFn};
#[proc_macro_attribute]
pub fn kernel_test(_attr: TokenStream, input: TokenStream) -> TokenStream {
let f = parse_macro_input!(input as ItemFn);
let test_name = &format!("{}", f.sig.ident);
let test_ident = Ident::new(
&format!("{}_TEST_CONTAINER", f.sig.ident.to_string().to_uppercase()),
Span::call_site(),
);
let test_code_block = f.block;
quote!(
#[test_case]
const #test_ident: test_types::UnitTest = test_types::UnitTest {
name: #test_name,
test_func: || #test_code_block,
};
)
.into()
}

@ -0,0 +1,5 @@
[package]
name = "test-types"
version = "0.1.0"
authors = ["Andre Richter <andre.o.richter@gmail.com>"]
edition = "2021"

@ -0,0 +1,16 @@
// SPDX-License-Identifier: MIT OR Apache-2.0
//
// Copyright (c) 2019-2022 Andre Richter <andre.o.richter@gmail.com>
//! Types for the `custom_test_frameworks` implementation.
#![no_std]
/// Unit test container.
pub struct UnitTest {
/// Name of the test.
pub name: &'static str,
/// Function pointer to the test.
pub test_func: fn(),
}

@ -0,0 +1,45 @@
# frozen_string_literal: true
# SPDX-License-Identifier: MIT OR Apache-2.0
#
# Copyright (c) 2022 Andre Richter <andre.o.richter@gmail.com>
def generate_symbols(kernel_elf, output_file)
File.open(output_file, 'w') do |file|
header = <<~HEREDOC
use debug_symbol_types::Symbol;
# [no_mangle]
# [link_section = ".rodata.symbol_desc"]
static KERNEL_SYMBOLS: [Symbol; #{kernel_elf.num_symbols}] = [
HEREDOC
file.write(header)
kernel_elf.symbols.each do |sym|
value = sym.header.st_value
size = sym.header.st_size
name = sym.name
file.write(" Symbol::new(#{value}, #{size}, \"#{name}\"),\n")
end
file.write("];\n")
end
end
def get_symbols_section_virt_addr(kernel_elf)
kernel_elf.kernel_symbols_section_virt_addr
end
def patch_symbol_data(kernel_elf, symbols_blob_path)
symbols_blob = File.binread(symbols_blob_path)
raise if symbols_blob.size > kernel_elf.kernel_symbols_section_size
File.binwrite(kernel_elf.path, File.binread(symbols_blob_path),
kernel_elf.kernel_symbols_section_offset_in_file)
end
def patch_num_symbols(kernel_elf)
num_packed = [kernel_elf.num_symbols].pack('Q<*') # "Q" == uint64_t, "<" == little endian
File.binwrite(kernel_elf.path, num_packed, kernel_elf.num_kernel_symbols_offset_in_file)
end

@ -0,0 +1,74 @@
# frozen_string_literal: true
# SPDX-License-Identifier: MIT OR Apache-2.0
#
# Copyright (c) 2021-2022 Andre Richter <andre.o.richter@gmail.com>
# KernelELF
class KernelELF
attr_reader :path
def initialize(kernel_elf_path, kernel_symbols_section, num_kernel_symbols)
@elf = ELFTools::ELFFile.new(File.open(kernel_elf_path))
@symtab_section = @elf.section_by_name('.symtab')
@path = kernel_elf_path
fetch_values(kernel_symbols_section, num_kernel_symbols)
end
private
def fetch_values(kernel_symbols_section, num_kernel_symbols)
sym = @symtab_section.symbol_by_name(num_kernel_symbols)
raise "Symbol \"#{num_kernel_symbols}\" not found" if sym.nil?
@num_kernel_symbols = sym
section = @elf.section_by_name(kernel_symbols_section)
raise "Section \"#{kernel_symbols_section}\" not found" if section.nil?
@kernel_symbols_section = section
end
def num_kernel_symbols_virt_addr
@num_kernel_symbols.header.st_value
end
def segment_containing_virt_addr(virt_addr)
@elf.each_segments do |segment|
return segment if segment.vma_in?(virt_addr)
end
end
def virt_addr_to_file_offset(virt_addr)
segment = segment_containing_virt_addr(virt_addr)
segment.vma_to_offset(virt_addr)
end
public
def symbols
non_zero_symbols = @symtab_section.symbols.reject { |sym| sym.header.st_size.zero? }
non_zero_symbols.sort_by { |sym| sym.header.st_value }
end
def num_symbols
symbols.size
end
def kernel_symbols_section_virt_addr
@kernel_symbols_section.header.sh_addr.to_i
end
def kernel_symbols_section_size
@kernel_symbols_section.header.sh_size.to_i
end
def kernel_symbols_section_offset_in_file
virt_addr_to_file_offset(kernel_symbols_section_virt_addr)
end
def num_kernel_symbols_offset_in_file
virt_addr_to_file_offset(num_kernel_symbols_virt_addr)
end
end

@ -0,0 +1,47 @@
#!/usr/bin/env ruby
# frozen_string_literal: true
# SPDX-License-Identifier: MIT OR Apache-2.0
#
# Copyright (c) 2022 Andre Richter <andre.o.richter@gmail.com>
require 'rubygems'
require 'bundler/setup'
require 'colorize'
require 'elftools'
require_relative 'kernel_elf'
require_relative 'cmds'
KERNEL_SYMBOLS_SECTION = '.kernel_symbols'
NUM_KERNEL_SYMBOLS = 'NUM_KERNEL_SYMBOLS'
cmd = ARGV[0]
kernel_elf_path = ARGV[1]
kernel_elf = KernelELF.new(kernel_elf_path, KERNEL_SYMBOLS_SECTION, NUM_KERNEL_SYMBOLS)
case cmd
when '--gen_symbols'
output_file = ARGV[2]
print 'Generating'.rjust(12).green.bold
puts ' Symbols source file'
generate_symbols(kernel_elf, output_file)
when '--get_symbols_section_virt_addr'
addr = get_symbols_section_virt_addr(kernel_elf)
puts "0x#{addr.to_s(16)}"
when '--patch_data'
symbols_blob_path = ARGV[2]
num_symbols = kernel_elf.num_symbols
print 'Patching'.rjust(12).green.bold
puts " Symbols blob and number of symbols (#{num_symbols}) into ELF"
patch_symbol_data(kernel_elf, symbols_blob_path)
patch_num_symbols(kernel_elf)
else
raise
end

@ -0,0 +1,314 @@
# frozen_string_literal: true
# SPDX-License-Identifier: MIT OR Apache-2.0
#
# Copyright (c) 2021-2022 Andre Richter <andre.o.richter@gmail.com>
# Bitfield manipulation.
class BitField
def initialize
@value = 0
end
def self.attr_bitfield(name, offset, num_bits)
define_method("#{name}=") do |bits|
mask = (2**num_bits) - 1
raise "Input out of range: #{name} = 0x#{bits.to_s(16)}" if (bits & ~mask).positive?
# Clear bitfield
@value &= ~(mask << offset)
# Set it
@value |= (bits << offset)
end
end
def to_i
@value
end
def size_in_byte
8
end
end
# An array class that knows its memory location.
class CArray < Array
attr_reader :phys_start_addr
def initialize(phys_start_addr, size, &block)
@phys_start_addr = phys_start_addr
super(size, &block)
end
def size_in_byte
inject(0) { |sum, n| sum + n.size_in_byte }
end
end
#---------------------------------------------------------------------------------------------------
# Arch::
#---------------------------------------------------------------------------------------------------
module Arch
#---------------------------------------------------------------------------------------------------
# Arch::ARMv8
#---------------------------------------------------------------------------------------------------
module ARMv8
# ARMv8 Table Descriptor.
class Stage1TableDescriptor < BitField
module NextLevelTableAddr
OFFSET = 16
NUMBITS = 32
end
module Type
OFFSET = 1
NUMBITS = 1
BLOCK = 0
TABLE = 1
end
module Valid
OFFSET = 0
NUMBITS = 1
FALSE = 0
TRUE = 1
end
attr_bitfield(:__next_level_table_addr, NextLevelTableAddr::OFFSET, NextLevelTableAddr::NUMBITS)
attr_bitfield(:type, Type::OFFSET, Type::NUMBITS)
attr_bitfield(:valid, Valid::OFFSET, Valid::NUMBITS)
def next_level_table_addr=(addr)
addr = addr >> Granule64KiB::SHIFT
self.__next_level_table_addr = addr
end
private :__next_level_table_addr=
end
# ARMv8 level 3 page descriptor.
class Stage1PageDescriptor < BitField
module UXN
OFFSET = 54
NUMBITS = 1
FALSE = 0
TRUE = 1
end
module PXN
OFFSET = 53
NUMBITS = 1
FALSE = 0
TRUE = 1
end
module OutputAddr
OFFSET = 16
NUMBITS = 32
end
module AF
OFFSET = 10
NUMBITS = 1
FALSE = 0
TRUE = 1
end
module SH
OFFSET = 8
NUMBITS = 2
INNER_SHAREABLE = 0b11
end
module AP
OFFSET = 6
NUMBITS = 2
RW_EL1 = 0b00
RO_EL1 = 0b10
end
module AttrIndx
OFFSET = 2
NUMBITS = 3
end
module Type
OFFSET = 1
NUMBITS = 1
RESERVED_INVALID = 0
PAGE = 1
end
module Valid
OFFSET = 0
NUMBITS = 1
FALSE = 0
TRUE = 1
end
attr_bitfield(:uxn, UXN::OFFSET, UXN::NUMBITS)
attr_bitfield(:pxn, PXN::OFFSET, PXN::NUMBITS)
attr_bitfield(:__output_addr, OutputAddr::OFFSET, OutputAddr::NUMBITS)
attr_bitfield(:af, AF::OFFSET, AF::NUMBITS)
attr_bitfield(:sh, SH::OFFSET, SH::NUMBITS)
attr_bitfield(:ap, AP::OFFSET, AP::NUMBITS)
attr_bitfield(:attr_indx, AttrIndx::OFFSET, AttrIndx::NUMBITS)
attr_bitfield(:type, Type::OFFSET, Type::NUMBITS)
attr_bitfield(:valid, Valid::OFFSET, Valid::NUMBITS)
def output_addr=(addr)
addr = addr >> Granule64KiB::SHIFT
self.__output_addr = addr
end
private :__output_addr=
end
# Translation table representing the structure defined in translation_table.rs.
class TranslationTable
module MAIR
NORMAL = 1
end
def initialize
do_sanity_checks
num_lvl2_tables = BSP.kernel_virt_addr_space_size >> Granule512MiB::SHIFT
@lvl3 = new_lvl3(num_lvl2_tables, BSP.phys_addr_of_kernel_tables)
@lvl2_phys_start_addr = @lvl3.phys_start_addr + @lvl3.size_in_byte
@lvl2 = new_lvl2(num_lvl2_tables, @lvl2_phys_start_addr)
populate_lvl2_entries
end
def map_at(virt_region, phys_region, attributes)
return if virt_region.empty?
raise if virt_region.size != phys_region.size
raise if phys_region.last > BSP.phys_addr_space_end_page
virt_region.zip(phys_region).each do |virt_page, phys_page|
desc = page_descriptor_from(virt_page)
set_lvl3_entry(desc, phys_page, attributes)
end
end
def to_binary
data = @lvl3.flatten.map(&:to_i) + @lvl2.map(&:to_i)
data.pack('Q<*') # "Q" == uint64_t, "<" == little endian
end
def phys_tables_base_addr_binary
[@lvl2_phys_start_addr].pack('Q<*') # "Q" == uint64_t, "<" == little endian
end
def phys_tables_base_addr
@lvl2_phys_start_addr
end
private
def do_sanity_checks
raise unless BSP.kernel_granule::SIZE == Granule64KiB::SIZE
raise unless (BSP.kernel_virt_addr_space_size % Granule512MiB::SIZE).zero?
end
def new_lvl3(num_lvl2_tables, start_addr)
CArray.new(start_addr, num_lvl2_tables) do
temp = CArray.new(start_addr, 8192) do
Stage1PageDescriptor.new
end
start_addr += temp.size_in_byte
temp
end
end
def new_lvl2(num_lvl2_tables, start_addr)
CArray.new(start_addr, num_lvl2_tables) do
Stage1TableDescriptor.new
end
end
def populate_lvl2_entries
@lvl2.each_with_index do |descriptor, i|
descriptor.next_level_table_addr = @lvl3[i].phys_start_addr
descriptor.type = Stage1TableDescriptor::Type::TABLE
descriptor.valid = Stage1TableDescriptor::Valid::TRUE
end
end
def lvl2_lvl3_index_from(addr)
addr -= BSP.kernel_virt_start_addr
lvl2_index = addr >> Granule512MiB::SHIFT
lvl3_index = (addr & Granule512MiB::MASK) >> Granule64KiB::SHIFT
raise unless lvl2_index < @lvl2.size
[lvl2_index, lvl3_index]
end
def page_descriptor_from(virt_addr)
lvl2_index, lvl3_index = lvl2_lvl3_index_from(virt_addr)
@lvl3[lvl2_index][lvl3_index]
end
# rubocop:disable Metrics/MethodLength
def set_attributes(desc, attributes)
case attributes.mem_attributes
when :CacheableDRAM
desc.sh = Stage1PageDescriptor::SH::INNER_SHAREABLE
desc.attr_indx = MAIR::NORMAL
else
raise 'Invalid input'
end
desc.ap = case attributes.acc_perms
when :ReadOnly
Stage1PageDescriptor::AP::RO_EL1
when :ReadWrite
Stage1PageDescriptor::AP::RW_EL1
else
raise 'Invalid input'
end
desc.pxn = if attributes.execute_never
Stage1PageDescriptor::PXN::TRUE
else
Stage1PageDescriptor::PXN::FALSE
end
desc.uxn = Stage1PageDescriptor::UXN::TRUE
end
# rubocop:enable Metrics/MethodLength
def set_lvl3_entry(desc, output_addr, attributes)
desc.output_addr = output_addr
desc.af = Stage1PageDescriptor::AF::TRUE
desc.type = Stage1PageDescriptor::Type::PAGE
desc.valid = Stage1PageDescriptor::Valid::TRUE
set_attributes(desc, attributes)
end
end
end
end

@ -0,0 +1,50 @@
# frozen_string_literal: true
# SPDX-License-Identifier: MIT OR Apache-2.0
#
# Copyright (c) 2021-2022 Andre Richter <andre.o.richter@gmail.com>
# Raspberry Pi 3 + 4
class RaspberryPi
attr_reader :kernel_granule, :kernel_virt_addr_space_size, :kernel_virt_start_addr
MEMORY_SRC = File.read('kernel/src/bsp/raspberrypi/memory.rs').split("\n")
def initialize
@kernel_granule = Granule64KiB
@kernel_virt_addr_space_size = KERNEL_ELF.symbol_value('__kernel_virt_addr_space_size')
@kernel_virt_start_addr = KERNEL_ELF.symbol_value('__kernel_virt_start_addr')
@virt_addr_of_kernel_tables = KERNEL_ELF.symbol_value('KERNEL_TABLES')
@virt_addr_of_phys_kernel_tables_base_addr = KERNEL_ELF.symbol_value(
'PHYS_KERNEL_TABLES_BASE_ADDR'
)
end
def phys_addr_of_kernel_tables
KERNEL_ELF.virt_to_phys(@virt_addr_of_kernel_tables)
end
def kernel_tables_offset_in_file
KERNEL_ELF.virt_addr_to_file_offset(@virt_addr_of_kernel_tables)
end
def phys_kernel_tables_base_addr_offset_in_file
KERNEL_ELF.virt_addr_to_file_offset(@virt_addr_of_phys_kernel_tables_base_addr)
end
def phys_addr_space_end_page
x = MEMORY_SRC.grep(/pub const END/)
x = case BSP_TYPE
when :rpi3
x[0]
when :rpi4
x[1]
else
raise
end
x.scan(/\d+/).join.to_i(16)
end
end

@ -0,0 +1,179 @@
# frozen_string_literal: true
# SPDX-License-Identifier: MIT OR Apache-2.0
#
# Copyright (c) 2021-2022 Andre Richter <andre.o.richter@gmail.com>
module Granule64KiB
SIZE = 64 * 1024
SHIFT = Math.log2(SIZE).to_i
end
module Granule512MiB
SIZE = 512 * 1024 * 1024
SHIFT = Math.log2(SIZE).to_i
MASK = SIZE - 1
end
# Monkey-patch Integer with some helper functions.
class Integer
def power_of_two?
self[0].zero?
end
def aligned?(alignment)
raise unless alignment.power_of_two?
(self & (alignment - 1)).zero?
end
def align_up(alignment)
raise unless alignment.power_of_two?
(self + alignment - 1) & ~(alignment - 1)
end
def to_hex_underscore(with_leading_zeros: false)
fmt = with_leading_zeros ? '%016x' : '%x'
value = format(fmt, self).to_s.reverse.scan(/.{4}|.+/).join('_').reverse
format('0x%s', value)
end
end
# An array where each value is the start address of a Page.
class MemoryRegion < Array
def initialize(start_addr, size, granule_size)
raise unless start_addr.aligned?(granule_size)
raise unless size.positive?
raise unless (size % granule_size).zero?
num_pages = size / granule_size
super(num_pages) do |i|
(i * granule_size) + start_addr
end
end
end
# Collection of memory attributes.
class AttributeFields
attr_reader :mem_attributes, :acc_perms, :execute_never
def initialize(mem_attributes, acc_perms, execute_never)
@mem_attributes = mem_attributes
@acc_perms = acc_perms
@execute_never = execute_never
end
def to_s
x = case @mem_attributes
when :CacheableDRAM
'C'
else
'?'
end
y = case @acc_perms
when :ReadWrite
'RW'
when :ReadOnly
'RO'
else
'??'
end
z = @execute_never ? 'XN' : 'X '
"#{x} #{y} #{z}"
end
end
# A container that describes a virt-to-phys region mapping.
class MappingDescriptor
@max_section_name_length = 'Sections'.length
class << self
attr_accessor :max_section_name_length
def update_max_section_name_length(length)
@max_section_name_length = [@max_section_name_length, length].max
end
end
attr_reader :name, :virt_region, :phys_region, :attributes
def initialize(name, virt_region, phys_region, attributes)
@name = name
@virt_region = virt_region
@phys_region = phys_region
@attributes = attributes
end
def to_s
name = @name.ljust(self.class.max_section_name_length)
virt_start = @virt_region.first.to_hex_underscore(with_leading_zeros: true)
phys_start = @phys_region.first.to_hex_underscore(with_leading_zeros: true)
size = ((@virt_region.size * 65_536) / 1024).to_s.rjust(3)
"#{name} | #{virt_start} | #{phys_start} | #{size} KiB | #{@attributes}"
end
def self.print_divider
print ' '
print '-' * max_section_name_length
puts '--------------------------------------------------------------------'
end
def self.print_header
print_divider
print ' '
print 'Sections'.center(max_section_name_length)
print ' '
print 'Virt Start Addr'.center(21)
print ' '
print 'Phys Start Addr'.center(21)
print ' '
print 'Size'.center(7)
print ' '
print 'Attr'.center(7)
puts
print_divider
end
end
def kernel_map_binary
mapping_descriptors = KERNEL_ELF.generate_mapping_descriptors
# Generate_mapping_descriptors updates the header being printed with this call. So it must come
# afterwards.
MappingDescriptor.print_header
mapping_descriptors.each do |i|
print 'Generating'.rjust(12).green.bold
print ' '
puts i.to_s
TRANSLATION_TABLES.map_at(i.virt_region, i.phys_region, i.attributes)
end
MappingDescriptor.print_divider
end
def kernel_patch_tables(kernel_elf_path)
print 'Patching'.rjust(12).green.bold
print ' Kernel table struct at ELF file offset '
puts BSP.kernel_tables_offset_in_file.to_hex_underscore
File.binwrite(kernel_elf_path, TRANSLATION_TABLES.to_binary, BSP.kernel_tables_offset_in_file)
end
def kernel_patch_base_addr(kernel_elf_path)
print 'Patching'.rjust(12).green.bold
print ' Kernel tables physical base address start argument to value '
print TRANSLATION_TABLES.phys_tables_base_addr.to_hex_underscore
print ' at ELF file offset '
puts BSP.phys_kernel_tables_base_addr_offset_in_file.to_hex_underscore
File.binwrite(kernel_elf_path, TRANSLATION_TABLES.phys_tables_base_addr_binary,
BSP.phys_kernel_tables_base_addr_offset_in_file)
end

@ -0,0 +1,96 @@
# frozen_string_literal: true
# SPDX-License-Identifier: MIT OR Apache-2.0
#
# Copyright (c) 2021-2022 Andre Richter <andre.o.richter@gmail.com>
# KernelELF
class KernelELF
SECTION_FLAG_ALLOC = 2
def initialize(kernel_elf_path)
@elf = ELFTools::ELFFile.new(File.open(kernel_elf_path))
@symtab_section = @elf.section_by_name('.symtab')
end
def machine
@elf.machine.to_sym
end
def symbol_value(symbol_name)
@symtab_section.symbol_by_name(symbol_name).header.st_value
end
def segment_containing_virt_addr(virt_addr)
@elf.each_segments do |segment|
return segment if segment.vma_in?(virt_addr)
end
end
def virt_to_phys(virt_addr)
segment = segment_containing_virt_addr(virt_addr)
translation_offset = segment.header.p_vaddr - segment.header.p_paddr
virt_addr - translation_offset
end
def virt_addr_to_file_offset(virt_addr)
segment = segment_containing_virt_addr(virt_addr)
segment.vma_to_offset(virt_addr)
end
def sections_in_segment(segment)
head = segment.mem_head
tail = segment.mem_tail
sections = @elf.each_sections.select do |section|
file_offset = section.header.sh_addr
flags = section.header.sh_flags
file_offset >= head && file_offset < tail && (flags & SECTION_FLAG_ALLOC != 0)
end
sections.map(&:name).join(' ')
end
def select_load_segments
@elf.each_segments.select do |segment|
segment.instance_of?(ELFTools::Segments::LoadSegment)
end
end
def segment_get_acc_perms(segment)
if segment.readable? && segment.writable?
:ReadWrite
elsif segment.readable?
:ReadOnly
else
:Invalid
end
end
def update_max_section_name_length(descriptors)
MappingDescriptor.update_max_section_name_length(descriptors.map { |i| i.name.size }.max)
end
def generate_mapping_descriptors
descriptors = select_load_segments.map do |segment|
# Assume each segment is page aligned.
size = segment.mem_size.align_up(BSP.kernel_granule::SIZE)
virt_start_addr = segment.header.p_vaddr
phys_start_addr = segment.header.p_paddr
acc_perms = segment_get_acc_perms(segment)
execute_never = !segment.executable?
section_names = sections_in_segment(segment)
virt_region = MemoryRegion.new(virt_start_addr, size, BSP.kernel_granule::SIZE)
phys_region = MemoryRegion.new(phys_start_addr, size, BSP.kernel_granule::SIZE)
attributes = AttributeFields.new(:CacheableDRAM, acc_perms, execute_never)
MappingDescriptor.new(section_names, virt_region, phys_region, attributes)
end
update_max_section_name_length(descriptors)
descriptors
end
end

@ -0,0 +1,46 @@
#!/usr/bin/env ruby
# frozen_string_literal: true
# SPDX-License-Identifier: MIT OR Apache-2.0
#
# Copyright (c) 2021-2022 Andre Richter <andre.o.richter@gmail.com>
require 'rubygems'
require 'bundler/setup'
require 'colorize'
require 'elftools'
require_relative 'generic'
require_relative 'kernel_elf'
require_relative 'bsp'
require_relative 'arch'
BSP_TYPE = ARGV[0].to_sym
kernel_elf_path = ARGV[1]
start = Time.now
KERNEL_ELF = KernelELF.new(kernel_elf_path)
BSP = case BSP_TYPE
when :rpi3, :rpi4
RaspberryPi.new
else
raise
end
TRANSLATION_TABLES = case KERNEL_ELF.machine
when :AArch64
Arch::ARMv8::TranslationTable.new
else
raise
end
kernel_map_binary
kernel_patch_tables(kernel_elf_path)
kernel_patch_base_addr(kernel_elf_path)
elapsed = Time.now - start
print 'Finished'.rjust(12).green.bold
puts " in #{elapsed.round(2)}s"
Loading…
Cancel
Save