From: Tulio A M Mendes Date: Thu, 12 Feb 2026 07:04:21 +0000 (-0300) Subject: refactor: extract generic VMM wrappers from x86 implementation to src/mm/vmm.c X-Git-Url: https://projects.tadryanom.me/sitemap.xml?a=commitdiff_plain;h=98ac9a0ae1ea557fbe6044d97792e831111e5023;p=AdrOS.git refactor: extract generic VMM wrappers from x86 implementation to src/mm/vmm.c Moved vmm_protect_range(), vmm_as_activate(), and vmm_as_map_page() from src/arch/x86/vmm.c to src/mm/vmm.c. These functions contain only architecture-independent logic (looping over pages, delegating to HAL for address space switching). The x86-specific VMM code (PAE page table manipulation, recursive mapping, CoW handling, address space create/destroy/clone) remains in src/arch/x86/vmm.c where it belongs. This ensures new architectures only need to implement the core primitives (vmm_init, vmm_map_page, vmm_unmap_page, vmm_set_page_flags, vmm_as_create_kernel_clone, vmm_as_destroy, vmm_as_clone_user, vmm_as_clone_user_cow, vmm_handle_cow_fault) and get the wrapper functions for free. --- diff --git a/src/arch/x86/vmm.c b/src/arch/x86/vmm.c index bc6f4b7..8ba3653 100644 --- a/src/arch/x86/vmm.c +++ b/src/arch/x86/vmm.c @@ -178,34 +178,8 @@ void vmm_set_page_flags(uint64_t virt, uint32_t flags) { invlpg((uintptr_t)(uint32_t)virt); } -void vmm_protect_range(uint64_t vaddr, uint64_t len, uint32_t flags) { - if (len == 0) return; - uint64_t start = vaddr & ~0xFFFULL; - uint64_t end = (vaddr + len - 1) & ~0xFFFULL; - for (uint64_t va = start;; va += 0x1000ULL) { - vmm_set_page_flags(va, flags | VMM_FLAG_PRESENT); - if (va == end) break; - } -} - -/* --- Address space management --- */ - -void vmm_as_activate(uintptr_t as) { - if (!as) return; - hal_cpu_set_address_space(as); -} - -void vmm_as_map_page(uintptr_t as, uint64_t phys, uint64_t virt, uint32_t flags) { - if (!as) return; - uintptr_t old_as = hal_cpu_get_address_space(); - if ((old_as & ~(uintptr_t)0x1FU) != (as & ~(uintptr_t)0x1FU)) { - vmm_as_activate(as); - vmm_map_page(phys, virt, flags); - vmm_as_activate(old_as); - } else { - vmm_map_page(phys, virt, flags); - } -} +/* vmm_protect_range, vmm_as_activate, vmm_as_map_page are + * architecture-independent and live in src/mm/vmm.c. */ /* * Create a new address space (PDPT + 4 PDs) that shares all kernel mappings diff --git a/src/mm/vmm.c b/src/mm/vmm.c new file mode 100644 index 0000000..8385609 --- /dev/null +++ b/src/mm/vmm.c @@ -0,0 +1,37 @@ +#include "vmm.h" +#include "hal/cpu.h" + +/* + * Architecture-independent VMM wrappers. + * + * These functions implement generic logic on top of the arch-specific + * primitives (vmm_map_page, vmm_set_page_flags, etc.) which are + * provided by src/arch//vmm.c. + */ + +void vmm_protect_range(uint64_t vaddr, uint64_t len, uint32_t flags) { + if (len == 0) return; + uint64_t start = vaddr & ~0xFFFULL; + uint64_t end = (vaddr + len - 1) & ~0xFFFULL; + for (uint64_t va = start;; va += 0x1000ULL) { + vmm_set_page_flags(va, flags | VMM_FLAG_PRESENT); + if (va == end) break; + } +} + +void vmm_as_activate(uintptr_t as) { + if (!as) return; + hal_cpu_set_address_space(as); +} + +void vmm_as_map_page(uintptr_t as, uint64_t phys, uint64_t virt, uint32_t flags) { + if (!as) return; + uintptr_t old_as = hal_cpu_get_address_space(); + if (old_as != as) { + vmm_as_activate(as); + vmm_map_page(phys, virt, flags); + vmm_as_activate(old_as); + } else { + vmm_map_page(phys, virt, flags); + } +}