diff --git a/0E_global_println/src/memory/mmu.rs b/0E_global_println/src/memory/mmu.rs index 4c92a0aa..e44283cc 100644 --- a/0E_global_println/src/memory/mmu.rs +++ b/0E_global_println/src/memory/mmu.rs @@ -177,10 +177,10 @@ pub unsafe fn init() { // Finally, fill the single LVL3 table (4 KiB granule). Differentiate // between code+RO and RW pages. - let (ro_start_addr, ro_end_addr) = super::get_ro_start_end(); + let (ro_start_addr, ro_end_addr) = crate::memory::get_ro_start_end(); - let ro_first_page_index = ro_start_addr / super::PAGESIZE; - let ro_last_page_index = ro_end_addr / super::PAGESIZE; + let ro_first_page_index = ro_start_addr / crate::memory::PAGESIZE; + let ro_last_page_index = ro_end_addr / crate::memory::PAGESIZE; let common = STAGE1_DESCRIPTOR::VALID::True + STAGE1_DESCRIPTOR::TYPE::Table diff --git a/0F_DMA_memory/src/memory/bump_allocator.rs b/0F_DMA_memory/src/memory/bump_allocator.rs index 23346056..0bab4129 100644 --- a/0F_DMA_memory/src/memory/bump_allocator.rs +++ b/0F_DMA_memory/src/memory/bump_allocator.rs @@ -36,7 +36,7 @@ pub struct BumpAllocator { unsafe impl Alloc for BumpAllocator { unsafe fn alloc(&mut self, layout: Layout) -> Result, AllocErr> { - let start = super::aligned_addr_unchecked(self.next, layout.align()); + let start = crate::memory::aligned_addr_unchecked(self.next, layout.align()); let end = start + layout.size(); if end <= self.pool_end { diff --git a/0F_DMA_memory/src/memory/mmu.rs b/0F_DMA_memory/src/memory/mmu.rs index a26a3f08..1ead372f 100644 --- a/0F_DMA_memory/src/memory/mmu.rs +++ b/0F_DMA_memory/src/memory/mmu.rs @@ -186,10 +186,10 @@ pub unsafe fn init() { // Finally, fill the single LVL3 table (4 KiB granule). Differentiate // between code+RO and RW pages. - let (ro_start_addr, ro_end_addr) = super::get_ro_start_end(); + let (ro_start_addr, ro_end_addr) = crate::memory::get_ro_start_end(); - let ro_first_page_index = ro_start_addr / super::PAGESIZE; - let ro_last_page_index = ro_end_addr / super::PAGESIZE; + let ro_first_page_index = ro_start_addr / crate::memory::PAGESIZE; + let ro_last_page_index = ro_end_addr / crate::memory::PAGESIZE; let common = STAGE1_DESCRIPTOR::VALID::True + STAGE1_DESCRIPTOR::TYPE::Table diff --git a/10_exceptions_groundwork/src/memory/bump_allocator.rs b/10_exceptions_groundwork/src/memory/bump_allocator.rs index 23346056..0bab4129 100644 --- a/10_exceptions_groundwork/src/memory/bump_allocator.rs +++ b/10_exceptions_groundwork/src/memory/bump_allocator.rs @@ -36,7 +36,7 @@ pub struct BumpAllocator { unsafe impl Alloc for BumpAllocator { unsafe fn alloc(&mut self, layout: Layout) -> Result, AllocErr> { - let start = super::aligned_addr_unchecked(self.next, layout.align()); + let start = crate::memory::aligned_addr_unchecked(self.next, layout.align()); let end = start + layout.size(); if end <= self.pool_end { diff --git a/10_exceptions_groundwork/src/memory/mmu.rs b/10_exceptions_groundwork/src/memory/mmu.rs index a26a3f08..1ead372f 100644 --- a/10_exceptions_groundwork/src/memory/mmu.rs +++ b/10_exceptions_groundwork/src/memory/mmu.rs @@ -186,10 +186,10 @@ pub unsafe fn init() { // Finally, fill the single LVL3 table (4 KiB granule). Differentiate // between code+RO and RW pages. - let (ro_start_addr, ro_end_addr) = super::get_ro_start_end(); + let (ro_start_addr, ro_end_addr) = crate::memory::get_ro_start_end(); - let ro_first_page_index = ro_start_addr / super::PAGESIZE; - let ro_last_page_index = ro_end_addr / super::PAGESIZE; + let ro_first_page_index = ro_start_addr / crate::memory::PAGESIZE; + let ro_last_page_index = ro_end_addr / crate::memory::PAGESIZE; let common = STAGE1_DESCRIPTOR::VALID::True + STAGE1_DESCRIPTOR::TYPE::Table