#define UACCESS_H
#include <stddef.h>
+#include <stdint.h>
+
+struct registers;
int user_range_ok(const void* user_ptr, size_t len);
int copy_from_user(void* dst, const void* src_user, size_t len);
int copy_to_user(void* dst_user, const void* src, size_t len);
+int uaccess_try_recover(uintptr_t fault_addr, struct registers* regs);
+
#endif
// If page fault came from ring3, convert it into a SIGSEGV delivery.
// Default action for SIGSEGV will terminate the process, but a user
// handler installed via sigaction() must be respected.
+ uint32_t cr2;
+ __asm__ volatile("mov %%cr2, %0" : "=r"(cr2));
+
if ((regs->cs & 3U) == 3U) {
const int SIG_SEGV = 11;
if (current_process) {
- uint32_t cr2;
- __asm__ volatile("mov %%cr2, %0" : "=r"(cr2));
current_process->last_fault_addr = (uintptr_t)cr2;
current_process->sig_pending_mask |= (1U << (uint32_t)SIG_SEGV);
}
deliver_signals_to_usermode(regs);
return;
}
+
+ // Kernel-mode page faults during copy_{to,from}_user should not panic.
+ if (uaccess_try_recover((uintptr_t)cr2, regs)) {
+ return;
+ }
}
__asm__ volatile("cli"); // Stop everything
#include "uaccess.h"
#include "errno.h"
+#include "idt.h"
#include <stdint.h>
return 1;
}
+static volatile int g_uaccess_active = 0;
+static volatile int g_uaccess_faulted = 0;
+static volatile uintptr_t g_uaccess_recover_eip = 0;
+
+int uaccess_try_recover(uintptr_t fault_addr, struct registers* regs) {
+ (void)fault_addr;
+ if (!regs) return 0;
+
+#if defined(__i386__)
+ if (g_uaccess_active == 0) return 0;
+ if (g_uaccess_recover_eip == 0) return 0;
+
+ // Only recover faults on user addresses; kernel faults should still panic.
+ if (fault_addr >= X86_KERNEL_VIRT_BASE) return 0;
+
+ g_uaccess_faulted = 1;
+ regs->eip = (uint32_t)g_uaccess_recover_eip;
+ return 1;
+#else
+ return 0;
+#endif
+}
+
static int x86_user_page_writable_user(uintptr_t vaddr) {
volatile uint32_t* pd = (volatile uint32_t*)0xFFFFF000U;
volatile uint32_t* pt_base = (volatile uint32_t*)0xFFC00000U;
if (len == 0) return 0;
if (!user_range_ok(src_user, len)) return -EFAULT;
+ g_uaccess_faulted = 0;
+ g_uaccess_recover_eip = (uintptr_t)&&uaccess_fault;
+ g_uaccess_active = 1;
+
uintptr_t up = (uintptr_t)src_user;
for (size_t i = 0; i < len; i++) {
((uint8_t*)dst)[i] = ((const volatile uint8_t*)up)[i];
}
+
+ g_uaccess_active = 0;
+ g_uaccess_recover_eip = 0;
+ if (g_uaccess_faulted) return -EFAULT;
return 0;
+
+uaccess_fault:
+ g_uaccess_active = 0;
+ g_uaccess_faulted = 0;
+ g_uaccess_recover_eip = 0;
+ return -EFAULT;
}
int copy_to_user(void* dst_user, const void* src, size_t len) {
if (!user_range_ok(dst_user, len)) return -EFAULT;
#endif
+ g_uaccess_faulted = 0;
+ g_uaccess_recover_eip = (uintptr_t)&&uaccess_fault2;
+ g_uaccess_active = 1;
+
uintptr_t up = (uintptr_t)dst_user;
for (size_t i = 0; i < len; i++) {
((volatile uint8_t*)up)[i] = ((const uint8_t*)src)[i];
}
+
+ g_uaccess_active = 0;
+ g_uaccess_recover_eip = 0;
+ if (g_uaccess_faulted) return -EFAULT;
return 0;
+
+uaccess_fault2:
+ g_uaccess_active = 0;
+ g_uaccess_faulted = 0;
+ g_uaccess_recover_eip = 0;
+ return -EFAULT;
}