--- /dev/null
+/*
+ * AdrOS — SYSENTER fast syscall entry (x86-32)
+ *
+ * Userspace calling convention:
+ * EAX = syscall number
+ * EBX = arg1
+ * ECX = arg2
+ * EDX = arg3
+ * ESI = arg4
+ * EDI = arg5
+ *
+ * Before invoking SYSENTER, userspace must:
+ * push %ecx ; save arg2 (SYSENTER clobbers ECX)
+ * push %edx ; save arg3 (SYSENTER clobbers EDX)
+ * push $return_eip ; push return address
+ * mov %esp, %ecx ; ECX = user ESP (for kernel to read)
+ * lea return_eip, %edx ; EDX = return EIP (unused, we read from stack)
+ * sysenter
+ * return_eip:
+ * pop %edx ; restore arg3
+ * pop %ecx ; restore arg2
+ * ; EAX = return value
+ *
+ * Kernel entry:
+ * ESP = kernel stack (from MSR 0x175)
+ * ECX = user ESP (points to: [return_eip, saved_edx, saved_ecx])
+ * EDX = user return EIP
+ *
+ * We build a struct registers frame on the kernel stack so that
+ * the existing syscall_handler() works unchanged.
+ *
+ * struct registers (low addr → high addr):
+ * ds edi esi ebp esp_dummy ebx edx ecx eax int_no err_code
+ * eip cs eflags useresp ss
+ */
+
+.section .text
+
+.extern syscall_handler
+
+.global sysenter_entry
+sysenter_entry:
+ /*
+ * State at entry:
+ * Ring 0, interrupts disabled.
+ * ESP = per-task kernel stack top (MSR 0x175)
+ * ECX = user ESP
+ * EDX = user return EIP
+ * EAX = syscall number
+ * EBX = arg1, ESI = arg4, EDI = arg5
+ * User stack: [return_eip, saved_edx(arg3), saved_ecx(arg2)]
+ */
+
+ /* Build iret-style frame: ss, useresp, eflags, cs, eip */
+ push $0x23 /* ss = user data segment (GDT entry 4 | RPL 3) */
+ push %ecx /* useresp = user ESP */
+ pushf /* eflags (kernel flags — close enough, user had IF set) */
+ orl $0x200, (%esp) /* ensure IF is set in saved eflags */
+ push $0x1B /* cs = user code segment (GDT entry 3 | RPL 3) */
+ push %edx /* eip = user return EIP */
+
+ /* int_no and err_code (fake, for struct registers compatibility) */
+ push $0 /* err_code */
+ push $128 /* int_no = 128 (same as INT 0x80) */
+
+ /* Recover arg2 (ECX) and arg3 (EDX) from user stack.
+ * User stack layout at ECX: [return_eip][saved_edx][saved_ecx]
+ * We already have return_eip in EDX, so:
+ * real arg3 = *(ECX + 4)
+ * real arg2 = *(ECX + 8)
+ */
+ mov 4(%ecx), %edx /* EDX = arg3 (was saved by user) */
+ mov 8(%ecx), %ecx /* ECX = arg2 (was saved by user) */
+
+ /* pusha-equivalent: eax ecx edx ebx esp ebp esi edi */
+ push %eax
+ push %ecx
+ push %edx
+ push %ebx
+ push %esp /* esp_dummy (not used) */
+ push %ebp
+ push %esi
+ push %edi
+
+ /* ds */
+ push $0x10 /* kernel data segment */
+
+ /* Load kernel segments */
+ mov $0x10, %ax
+ mov %ax, %ds
+ mov %ax, %es
+ mov %ax, %fs
+ mov %ax, %gs
+
+ /* Enable interrupts (SYSENTER disables them) */
+ sti
+
+ /* Call syscall_handler(regs) */
+ push %esp
+ call syscall_handler
+ add $4, %esp
+
+ /* Disable interrupts for SYSEXIT */
+ cli
+
+ /* Restore registers from the frame.
+ * syscall_handler may have modified regs->eax (return value).
+ */
+ pop %eax /* ds (discard) */
+ pop %edi
+ pop %esi
+ pop %ebp
+ add $4, %esp /* skip esp_dummy */
+ pop %ebx
+ pop %edx /* arg3 (don't care, user will pop from stack) */
+ pop %ecx /* arg2 (don't care) */
+ pop %eax /* return value! */
+
+ add $8, %esp /* skip int_no, err_code */
+
+ /* Now stack has: eip, cs, eflags, useresp, ss */
+ /* SYSEXIT: ECX = user ESP, EDX = user EIP */
+ mov 12(%esp), %ecx /* useresp */
+ mov 0(%esp), %edx /* eip (user return address) */
+
+ /* Restore user segments before returning */
+ push $0x23
+ pop %ds
+ push $0x23
+ pop %es
+
+ sti
+ sysexit
+
+.section .note.GNU-stack,"",@progbits
--- /dev/null
+#include "hal/cpu_features.h"
+#include "uart_console.h"
+
+#include <stdint.h>
+
+extern void sysenter_entry(void);
+
+/* Write to a Model-Specific Register */
+static inline void wrmsr(uint32_t msr, uint64_t value) {
+ uint32_t lo = (uint32_t)(value & 0xFFFFFFFF);
+ uint32_t hi = (uint32_t)(value >> 32);
+ __asm__ volatile("wrmsr" : : "c"(msr), "a"(lo), "d"(hi));
+}
+
+static inline uint64_t rdmsr(uint32_t msr) {
+ uint32_t lo, hi;
+ __asm__ volatile("rdmsr" : "=a"(lo), "=d"(hi) : "c"(msr));
+ return ((uint64_t)hi << 32) | lo;
+}
+
+#define IA32_SYSENTER_CS 0x174
+#define IA32_SYSENTER_ESP 0x175
+#define IA32_SYSENTER_EIP 0x176
+
+/* Fixed kernel stack for SYSENTER entry — used only briefly before
+ * the handler switches to the per-task kernel stack via TSS.ESP0.
+ * For now, since we're single-core and the handler runs with IRQs
+ * disabled until it reads the real stack, this is safe. */
+static uint8_t sysenter_stack[4096] __attribute__((aligned(16)));
+static int sysenter_enabled = 0;
+
+void x86_sysenter_init(void) {
+ const struct cpu_features* f = hal_cpu_get_features();
+ if (!f->has_sysenter) {
+ uart_print("[SYSENTER] CPU does not support SYSENTER/SYSEXIT.\n");
+ return;
+ }
+
+ /* MSR 0x174: kernel CS selector. CPU uses CS+8 for kernel SS,
+ * CS+16|3 for user CS, CS+24|3 for user SS.
+ * Our GDT: 0x08=KernelCS, 0x10=KernelSS, 0x18=UserCS, 0x20=UserSS ✓ */
+ wrmsr(IA32_SYSENTER_CS, 0x08);
+
+ /* MSR 0x175: kernel ESP — top of our fixed sysenter stack */
+ wrmsr(IA32_SYSENTER_ESP, (uintptr_t)&sysenter_stack[sizeof(sysenter_stack)]);
+
+ /* MSR 0x176: kernel EIP — our assembly entry point */
+ wrmsr(IA32_SYSENTER_EIP, (uintptr_t)sysenter_entry);
+
+ sysenter_enabled = 1;
+ uart_print("[SYSENTER] Fast syscall enabled.\n");
+}
+
+void x86_sysenter_set_kernel_stack(uintptr_t esp0) {
+ if (sysenter_enabled) {
+ wrmsr(IA32_SYSENTER_ESP, (uint64_t)esp0);
+ }
+}