From: Tulio A M Mendes Date: Fri, 6 Feb 2026 18:10:32 +0000 (-0300) Subject: x86: enforce W^X-like user mappings (text RO after load) X-Git-Url: https://projects.tadryanom.me/?a=commitdiff_plain;h=e86fd49b61588cf75bbd0b649e4cdf39ba5f2611;p=AdrOS.git x86: enforce W^X-like user mappings (text RO after load) --- diff --git a/include/vmm.h b/include/vmm.h index cd97b9e..2a3ad84 100644 --- a/include/vmm.h +++ b/include/vmm.h @@ -22,6 +22,18 @@ void vmm_init(void); */ void vmm_map_page(uint64_t phys, uint64_t virt, uint32_t flags); +/* + * Update flags for an already-mapped virtual page. + * Keeps the physical frame, only changes PRESENT/RW/USER bits. + */ +void vmm_set_page_flags(uint64_t virt, uint32_t flags); + +/* + * Update flags for an already-mapped virtual range. + * vaddr/len may be unaligned. + */ +void vmm_protect_range(uint64_t vaddr, uint64_t len, uint32_t flags); + /* * Unmap a virtual page. */ diff --git a/src/arch/x86/vmm.c b/src/arch/x86/vmm.c index 365ea6f..b0b85b1 100644 --- a/src/arch/x86/vmm.c +++ b/src/arch/x86/vmm.c @@ -39,6 +39,14 @@ static inline void invlpg(uintptr_t vaddr) { __asm__ volatile("invlpg (%0)" : : "r" (vaddr) : "memory"); } +static uint32_t vmm_flags_to_x86(uint32_t flags) { + uint32_t x86_flags = 0; + if (flags & VMM_FLAG_PRESENT) x86_flags |= X86_PTE_PRESENT; + if (flags & VMM_FLAG_RW) x86_flags |= X86_PTE_RW; + if (flags & VMM_FLAG_USER) x86_flags |= X86_PTE_USER; + return x86_flags; +} + void vmm_map_page(uint64_t phys, uint64_t virt, uint32_t flags) { uint32_t pd_index = virt >> 22; uint32_t pt_index = (virt >> 12) & 0x03FF; @@ -73,16 +81,43 @@ void vmm_map_page(uint64_t phys, uint64_t virt, uint32_t flags) { // ACCESS SAFETY: Convert to Virtual uint32_t* pt = (uint32_t*)P2V(pt_phys); - - uint32_t x86_flags = 0; - if (flags & VMM_FLAG_PRESENT) x86_flags |= X86_PTE_PRESENT; - if (flags & VMM_FLAG_RW) x86_flags |= X86_PTE_RW; - if (flags & VMM_FLAG_USER) x86_flags |= X86_PTE_USER; - - pt[pt_index] = ((uint32_t)phys) | x86_flags; + + pt[pt_index] = ((uint32_t)phys) | vmm_flags_to_x86(flags); invlpg(virt); } +void vmm_set_page_flags(uint64_t virt, uint32_t flags) { + uint32_t pd_index = virt >> 22; + uint32_t pt_index = (virt >> 12) & 0x03FF; + + if (!(boot_pd[pd_index] & X86_PTE_PRESENT)) { + return; + } + + uint32_t pt_phys = boot_pd[pd_index] & 0xFFFFF000; + uint32_t* pt = (uint32_t*)P2V(pt_phys); + + uint32_t pte = pt[pt_index]; + if (!(pte & X86_PTE_PRESENT)) { + return; + } + + uint32_t phys = pte & 0xFFFFF000; + pt[pt_index] = phys | vmm_flags_to_x86(flags); + invlpg((uintptr_t)virt); +} + +void vmm_protect_range(uint64_t vaddr, uint64_t len, uint32_t flags) { + if (len == 0) return; + + uint64_t start = vaddr & ~0xFFFULL; + uint64_t end = (vaddr + len - 1) & ~0xFFFULL; + for (uint64_t va = start;; va += 0x1000ULL) { + vmm_set_page_flags(va, flags | VMM_FLAG_PRESENT); + if (va == end) break; + } +} + void vmm_unmap_page(uint64_t virt) { uint32_t pd_index = virt >> 22; uint32_t pt_index = (virt >> 12) & 0x03FF; diff --git a/src/kernel/elf.c b/src/kernel/elf.c index 2f6adc5..d964248 100644 --- a/src/kernel/elf.c +++ b/src/kernel/elf.c @@ -156,6 +156,11 @@ int elf32_load_user_from_initrd(const char* filename, uintptr_t* entry_out, uint if (ph[i].p_memsz > ph[i].p_filesz) { memset((void*)(uintptr_t)(ph[i].p_vaddr + ph[i].p_filesz), 0, ph[i].p_memsz - ph[i].p_filesz); } + + if ((ph[i].p_flags & PF_W) == 0) { + vmm_protect_range((uint64_t)(uintptr_t)ph[i].p_vaddr, (uint64_t)ph[i].p_memsz, + VMM_FLAG_USER); + } } const uintptr_t user_stack_base = 0x00800000U; diff --git a/user/linker.ld b/user/linker.ld index 85b35b6..19b198b 100644 --- a/user/linker.ld +++ b/user/linker.ld @@ -2,7 +2,8 @@ ENTRY(_start) PHDRS { - text PT_LOAD FLAGS(7); + text PT_LOAD FLAGS(5); + data PT_LOAD FLAGS(6); } SECTIONS @@ -19,10 +20,10 @@ SECTIONS .data : { *(.data*) - } :text + } :data .bss : { *(.bss*) *(COMMON) - } :text + } :data }