]> Projects (at) Tadryanom (dot) Me - AdrOS.git/commitdiff
refactor: extract generic VMM wrappers from x86 implementation to src/mm/vmm.c
authorTulio A M Mendes <[email protected]>
Thu, 12 Feb 2026 07:04:21 +0000 (04:04 -0300)
committerTulio A M Mendes <[email protected]>
Fri, 13 Feb 2026 02:44:55 +0000 (23:44 -0300)
Moved vmm_protect_range(), vmm_as_activate(), and vmm_as_map_page()
from src/arch/x86/vmm.c to src/mm/vmm.c. These functions contain only
architecture-independent logic (looping over pages, delegating to HAL
for address space switching).

The x86-specific VMM code (PAE page table manipulation, recursive
mapping, CoW handling, address space create/destroy/clone) remains
in src/arch/x86/vmm.c where it belongs.

This ensures new architectures only need to implement the core
primitives (vmm_init, vmm_map_page, vmm_unmap_page, vmm_set_page_flags,
vmm_as_create_kernel_clone, vmm_as_destroy, vmm_as_clone_user,
vmm_as_clone_user_cow, vmm_handle_cow_fault) and get the wrapper
functions for free.

src/arch/x86/vmm.c
src/mm/vmm.c [new file with mode: 0644]

index bc6f4b7cca063699eeae04a0e4f5c92c6d639dd2..8ba36536255172cb034bf1d6f7c7f355781c7f83 100644 (file)
@@ -178,34 +178,8 @@ void vmm_set_page_flags(uint64_t virt, uint32_t flags) {
     invlpg((uintptr_t)(uint32_t)virt);
 }
 
-void vmm_protect_range(uint64_t vaddr, uint64_t len, uint32_t flags) {
-    if (len == 0) return;
-    uint64_t start = vaddr & ~0xFFFULL;
-    uint64_t end = (vaddr + len - 1) & ~0xFFFULL;
-    for (uint64_t va = start;; va += 0x1000ULL) {
-        vmm_set_page_flags(va, flags | VMM_FLAG_PRESENT);
-        if (va == end) break;
-    }
-}
-
-/* --- Address space management --- */
-
-void vmm_as_activate(uintptr_t as) {
-    if (!as) return;
-    hal_cpu_set_address_space(as);
-}
-
-void vmm_as_map_page(uintptr_t as, uint64_t phys, uint64_t virt, uint32_t flags) {
-    if (!as) return;
-    uintptr_t old_as = hal_cpu_get_address_space();
-    if ((old_as & ~(uintptr_t)0x1FU) != (as & ~(uintptr_t)0x1FU)) {
-        vmm_as_activate(as);
-        vmm_map_page(phys, virt, flags);
-        vmm_as_activate(old_as);
-    } else {
-        vmm_map_page(phys, virt, flags);
-    }
-}
+/* vmm_protect_range, vmm_as_activate, vmm_as_map_page are
+ * architecture-independent and live in src/mm/vmm.c. */
 
 /*
  * Create a new address space (PDPT + 4 PDs) that shares all kernel mappings
diff --git a/src/mm/vmm.c b/src/mm/vmm.c
new file mode 100644 (file)
index 0000000..8385609
--- /dev/null
@@ -0,0 +1,37 @@
+#include "vmm.h"
+#include "hal/cpu.h"
+
+/*
+ * Architecture-independent VMM wrappers.
+ *
+ * These functions implement generic logic on top of the arch-specific
+ * primitives (vmm_map_page, vmm_set_page_flags, etc.) which are
+ * provided by src/arch/<ARCH>/vmm.c.
+ */
+
+void vmm_protect_range(uint64_t vaddr, uint64_t len, uint32_t flags) {
+    if (len == 0) return;
+    uint64_t start = vaddr & ~0xFFFULL;
+    uint64_t end = (vaddr + len - 1) & ~0xFFFULL;
+    for (uint64_t va = start;; va += 0x1000ULL) {
+        vmm_set_page_flags(va, flags | VMM_FLAG_PRESENT);
+        if (va == end) break;
+    }
+}
+
+void vmm_as_activate(uintptr_t as) {
+    if (!as) return;
+    hal_cpu_set_address_space(as);
+}
+
+void vmm_as_map_page(uintptr_t as, uint64_t phys, uint64_t virt, uint32_t flags) {
+    if (!as) return;
+    uintptr_t old_as = hal_cpu_get_address_space();
+    if (old_as != as) {
+        vmm_as_activate(as);
+        vmm_map_page(phys, virt, flags);
+        vmm_as_activate(old_as);
+    } else {
+        vmm_map_page(phys, virt, flags);
+    }
+}