From e442ea159b5b00603e62b57bdee67758fb6ada26 Mon Sep 17 00:00:00 2001 From: Tulio A M Mendes Date: Mon, 9 Feb 2026 21:02:00 -0300 Subject: [PATCH] uaccess: recover from kernel page faults during copy_*_user --- include/uaccess.h | 5 +++++ src/arch/x86/idt.c | 10 +++++++-- src/kernel/uaccess.c | 52 ++++++++++++++++++++++++++++++++++++++++++++ 3 files changed, 65 insertions(+), 2 deletions(-) diff --git a/include/uaccess.h b/include/uaccess.h index 99016dd..013e6c5 100644 --- a/include/uaccess.h +++ b/include/uaccess.h @@ -2,9 +2,14 @@ #define UACCESS_H #include +#include + +struct registers; int user_range_ok(const void* user_ptr, size_t len); int copy_from_user(void* dst, const void* src_user, size_t len); int copy_to_user(void* dst_user, const void* src, size_t len); +int uaccess_try_recover(uintptr_t fault_addr, struct registers* regs); + #endif diff --git a/src/arch/x86/idt.c b/src/arch/x86/idt.c index c0e32ac..a926003 100644 --- a/src/arch/x86/idt.c +++ b/src/arch/x86/idt.c @@ -324,17 +324,23 @@ void isr_handler(struct registers* regs) { // If page fault came from ring3, convert it into a SIGSEGV delivery. // Default action for SIGSEGV will terminate the process, but a user // handler installed via sigaction() must be respected. + uint32_t cr2; + __asm__ volatile("mov %%cr2, %0" : "=r"(cr2)); + if ((regs->cs & 3U) == 3U) { const int SIG_SEGV = 11; if (current_process) { - uint32_t cr2; - __asm__ volatile("mov %%cr2, %0" : "=r"(cr2)); current_process->last_fault_addr = (uintptr_t)cr2; current_process->sig_pending_mask |= (1U << (uint32_t)SIG_SEGV); } deliver_signals_to_usermode(regs); return; } + + // Kernel-mode page faults during copy_{to,from}_user should not panic. + if (uaccess_try_recover((uintptr_t)cr2, regs)) { + return; + } } __asm__ volatile("cli"); // Stop everything diff --git a/src/kernel/uaccess.c b/src/kernel/uaccess.c index d7b578f..9ec48db 100644 --- a/src/kernel/uaccess.c +++ b/src/kernel/uaccess.c @@ -1,6 +1,7 @@ #include "uaccess.h" #include "errno.h" +#include "idt.h" #include @@ -17,6 +18,29 @@ static int x86_user_range_basic_ok(uintptr_t uaddr, size_t len) { return 1; } +static volatile int g_uaccess_active = 0; +static volatile int g_uaccess_faulted = 0; +static volatile uintptr_t g_uaccess_recover_eip = 0; + +int uaccess_try_recover(uintptr_t fault_addr, struct registers* regs) { + (void)fault_addr; + if (!regs) return 0; + +#if defined(__i386__) + if (g_uaccess_active == 0) return 0; + if (g_uaccess_recover_eip == 0) return 0; + + // Only recover faults on user addresses; kernel faults should still panic. + if (fault_addr >= X86_KERNEL_VIRT_BASE) return 0; + + g_uaccess_faulted = 1; + regs->eip = (uint32_t)g_uaccess_recover_eip; + return 1; +#else + return 0; +#endif +} + static int x86_user_page_writable_user(uintptr_t vaddr) { volatile uint32_t* pd = (volatile uint32_t*)0xFFFFF000U; volatile uint32_t* pt_base = (volatile uint32_t*)0xFFC00000U; @@ -94,11 +118,25 @@ int copy_from_user(void* dst, const void* src_user, size_t len) { if (len == 0) return 0; if (!user_range_ok(src_user, len)) return -EFAULT; + g_uaccess_faulted = 0; + g_uaccess_recover_eip = (uintptr_t)&&uaccess_fault; + g_uaccess_active = 1; + uintptr_t up = (uintptr_t)src_user; for (size_t i = 0; i < len; i++) { ((uint8_t*)dst)[i] = ((const volatile uint8_t*)up)[i]; } + + g_uaccess_active = 0; + g_uaccess_recover_eip = 0; + if (g_uaccess_faulted) return -EFAULT; return 0; + +uaccess_fault: + g_uaccess_active = 0; + g_uaccess_faulted = 0; + g_uaccess_recover_eip = 0; + return -EFAULT; } int copy_to_user(void* dst_user, const void* src, size_t len) { @@ -110,9 +148,23 @@ int copy_to_user(void* dst_user, const void* src, size_t len) { if (!user_range_ok(dst_user, len)) return -EFAULT; #endif + g_uaccess_faulted = 0; + g_uaccess_recover_eip = (uintptr_t)&&uaccess_fault2; + g_uaccess_active = 1; + uintptr_t up = (uintptr_t)dst_user; for (size_t i = 0; i < len; i++) { ((volatile uint8_t*)up)[i] = ((const uint8_t*)src)[i]; } + + g_uaccess_active = 0; + g_uaccess_recover_eip = 0; + if (g_uaccess_faulted) return -EFAULT; return 0; + +uaccess_fault2: + g_uaccess_active = 0; + g_uaccess_faulted = 0; + g_uaccess_recover_eip = 0; + return -EFAULT; } -- 2.43.0