From: Tulio A M Mendes Date: Sun, 15 Feb 2026 00:41:33 +0000 (-0300) Subject: feat: enable SMAP (Supervisor Mode Access Prevention) X-Git-Url: https://projects.tadryanom.me/?a=commitdiff_plain;h=c728212cbf93a53a1c680badf210fd81e7997791;p=AdrOS.git feat: enable SMAP (Supervisor Mode Access Prevention) - STAC/CLAC bracket user memory accesses in copy_from_user/copy_to_user - CR4.SMAP enabled when CPU supports it (CPUID leaf 7, EBX bit 20) - g_smap_enabled runtime flag guards STAC/CLAC to avoid #UD on older CPUs - Encoded as raw bytes (.byte 0x0F,0x01,0xCB/CA) for assembler compat - 35/35 smoke tests pass, cppcheck clean --- diff --git a/src/arch/x86/uaccess.c b/src/arch/x86/uaccess.c index 30e953d..71bd537 100644 --- a/src/arch/x86/uaccess.c +++ b/src/arch/x86/uaccess.c @@ -6,6 +6,21 @@ #include +/* Global flag set by hal_cpu_detect_features() when SMAP is enabled in CR4. */ +extern int g_smap_enabled; + +/* STAC/CLAC — toggle EFLAGS.AC for SMAP bypass. + * Encoded as raw bytes for compatibility with older assemblers. + * Only executed when SMAP is actually enabled to avoid #UD. */ +static inline void stac(void) { + if (g_smap_enabled) + __asm__ volatile(".byte 0x0F, 0x01, 0xCB" ::: "memory"); +} +static inline void clac(void) { + if (g_smap_enabled) + __asm__ volatile(".byte 0x0F, 0x01, 0xCA" ::: "memory"); +} + static int x86_user_range_basic_ok(uintptr_t uaddr, size_t len) { if (len == 0) return 1; if (uaddr == 0) return 0; @@ -108,10 +123,12 @@ int copy_from_user(void* dst, const void* src_user, size_t len) { g_uaccess_recover_eip = (uintptr_t)&&uaccess_fault; g_uaccess_active = 1; + stac(); uintptr_t up = (uintptr_t)src_user; for (size_t i = 0; i < len; i++) { ((uint8_t*)dst)[i] = ((const volatile uint8_t*)up)[i]; } + clac(); g_uaccess_active = 0; g_uaccess_recover_eip = 0; @@ -119,6 +136,7 @@ int copy_from_user(void* dst, const void* src_user, size_t len) { return 0; uaccess_fault: + clac(); g_uaccess_active = 0; g_uaccess_faulted = 0; g_uaccess_recover_eip = 0; @@ -134,10 +152,12 @@ int copy_to_user(void* dst_user, const void* src, size_t len) { g_uaccess_recover_eip = (uintptr_t)&&uaccess_fault2; g_uaccess_active = 1; + stac(); uintptr_t up = (uintptr_t)dst_user; for (size_t i = 0; i < len; i++) { ((volatile uint8_t*)up)[i] = ((const uint8_t*)src)[i]; } + clac(); g_uaccess_active = 0; g_uaccess_recover_eip = 0; @@ -145,6 +165,7 @@ int copy_to_user(void* dst_user, const void* src, size_t len) { return 0; uaccess_fault2: + clac(); g_uaccess_active = 0; g_uaccess_faulted = 0; g_uaccess_recover_eip = 0; diff --git a/src/hal/x86/cpu_features.c b/src/hal/x86/cpu_features.c index 2d5762c..01617b9 100644 --- a/src/hal/x86/cpu_features.c +++ b/src/hal/x86/cpu_features.c @@ -17,6 +17,8 @@ static inline void write_cr4(uint32_t val) { __asm__ volatile("mov %0, %%cr4" :: "r"(val) : "memory"); } +int g_smap_enabled = 0; + static struct cpu_features g_features; static struct x86_cpu_features g_x86_features; @@ -59,10 +61,16 @@ void hal_cpu_detect_features(void) { kprintf("[CPU] SMEP enabled.\n"); } - /* SMAP (Supervisor Mode Access Prevention) is NOT enabled yet because - * copy_from_user/copy_to_user do not bracket accesses with STAC/CLAC. - * Enabling SMAP without STAC/CLAC would fault on every user memory access. - * TODO: Add STAC/CLAC to x86 uaccess.c, then enable SMAP here. */ + /* Enable SMAP if supported: prevents kernel from accidentally reading/writing + * user-mapped pages. copy_from_user/copy_to_user bracket accesses with + * STAC/CLAC so legitimate user copies still work. */ + if (g_x86_features.smap) { + uint32_t cr4 = read_cr4(); + cr4 |= CR4_SMAP; + write_cr4(cr4); + g_smap_enabled = 1; + kprintf("[CPU] SMAP enabled.\n"); + } } const struct cpu_features* hal_cpu_get_features(void) {