uint8_t lm : 1; /* Long Mode (64-bit) */
uint8_t syscall : 1; /* SYSCALL/SYSRET */
+ /* CPUID leaf 7 — EBX (structured extended features) */
+ uint8_t smep : 1; /* Supervisor Mode Execution Prevention */
+ uint8_t smap : 1; /* Supervisor Mode Access Prevention */
+
/* Extended info */
uint32_t max_ext_leaf;
char brand[49]; /* CPU brand string (leaves 0x80000002-4) */
out->initial_apic_id = (uint8_t)(ebx >> 24);
out->logical_cpus = (uint8_t)(ebx >> 16);
+ /* Leaf 7: structured extended features (SMEP, SMAP, etc.) */
+ if (out->max_leaf >= 7) {
+ cpuid(7, &eax, &ebx, &ecx, &edx);
+ out->smep = (ebx >> 7) & 1;
+ out->smap = (ebx >> 20) & 1;
+ }
+
/* Extended leaves */
cpuid(0x80000000, &eax, &ebx, &ecx, &edx);
out->max_ext_leaf = eax;
if (f->x2apic) uart_print(" x2APIC");
if (f->hypervisor) uart_print(" HYPERVISOR");
if (f->syscall) uart_print(" SYSCALL");
+ if (f->smep) uart_print(" SMEP");
+ if (f->smap) uart_print(" SMAP");
uart_print("\n");
uart_print("[CPUID] APIC ID: ");
#include "hal/cpu_features.h"
#include "arch/x86/cpuid.h"
+#include "uart_console.h"
#include <stddef.h>
+#define CR4_SMEP (1U << 20)
+#define CR4_SMAP (1U << 21)
+
+static inline uint32_t read_cr4(void) {
+ uint32_t val;
+ __asm__ volatile("mov %%cr4, %0" : "=r"(val));
+ return val;
+}
+
+static inline void write_cr4(uint32_t val) {
+ __asm__ volatile("mov %0, %%cr4" :: "r"(val) : "memory");
+}
+
static struct cpu_features g_features;
static struct x86_cpu_features g_x86_features;
g_features.logical_cpus = g_x86_features.logical_cpus;
g_features.initial_cpu_id = g_x86_features.initial_apic_id;
+
+ /* Enable SMEP if supported: prevents kernel from executing user-mapped pages.
+ * This blocks a common exploit technique where an attacker maps shellcode in
+ * userspace and tricks the kernel into jumping to it. */
+ if (g_x86_features.smep) {
+ uint32_t cr4 = read_cr4();
+ cr4 |= CR4_SMEP;
+ write_cr4(cr4);
+ uart_print("[CPU] SMEP enabled.\n");
+ }
+
+ /* SMAP (Supervisor Mode Access Prevention) is NOT enabled yet because
+ * copy_from_user/copy_to_user do not bracket accesses with STAC/CLAC.
+ * Enabling SMAP without STAC/CLAC would fault on every user memory access.
+ * TODO: Add STAC/CLAC to x86 uaccess.c, then enable SMAP here. */
}
const struct cpu_features* hal_cpu_get_features(void) {
}
}
it = it->next;
- } while (it != start);
+ } while (it && it != start);
}
if (!found_child) {
// The loader returns a fresh stack top (user_sp). We'll pack strings below it.
uintptr_t sp = user_sp;
sp &= ~(uintptr_t)0xF;
+ const uintptr_t sp_base = user_sp - user_stack_size;
uintptr_t argv_ptrs_va[EXECVE_MAX_ARGC + 1];
uintptr_t envp_ptrs_va[EXECVE_MAX_ENVC + 1];
for (int i = envc - 1; i >= 0; i--) {
size_t len = strlen(kenvp[i]) + 1;
+ if (sp - len < sp_base) { vmm_as_activate(old_as); current_process->addr_space = old_as; vmm_as_destroy(new_as); ret = -E2BIG; goto out; }
sp -= len;
memcpy((void*)sp, kenvp[i], len);
envp_ptrs_va[i] = sp;
for (int i = argc - 1; i >= 0; i--) {
size_t len = strlen(kargv[i]) + 1;
+ if (sp - len < sp_base) { vmm_as_activate(old_as); current_process->addr_space = old_as; vmm_as_destroy(new_as); ret = -E2BIG; goto out; }
sp -= len;
memcpy((void*)sp, kargv[i], len);
argv_ptrs_va[i] = sp;
// Heap starts at 3GB + 256MB
#define KHEAP_START 0xD0000000
#define KHEAP_INITIAL_SIZE (10 * 1024 * 1024) // 10MB
+#define KHEAP_MAX_SIZE (64 * 1024 * 1024) // 64MB max growth
#define PAGE_SIZE 4096
#define HEAP_MAGIC 0xCAFEBABE
static heap_header_t* head = NULL;
static heap_header_t* tail = NULL;
+static uintptr_t heap_end = 0; // Current end of mapped heap region
static spinlock_t heap_lock = {0};
head->prev = NULL;
tail = head;
+ heap_end = KHEAP_START + KHEAP_INITIAL_SIZE;
spin_unlock_irqrestore(&heap_lock, flags);
uart_print("[HEAP] 10MB Heap Ready.\n");
current = current->next;
}
+ // No free block found — try to grow the heap.
+ size_t grow_size = aligned_size + sizeof(heap_header_t);
+ // Round up to page boundary, minimum 64KB growth.
+ if (grow_size < 64 * 1024) grow_size = 64 * 1024;
+ grow_size = (grow_size + PAGE_SIZE - 1) & ~(size_t)(PAGE_SIZE - 1);
+
+ if (heap_end + grow_size > KHEAP_START + KHEAP_MAX_SIZE) {
+ spin_unlock_irqrestore(&heap_lock, flags);
+ uart_print("[HEAP] OOM: max heap size reached.\n");
+ return NULL;
+ }
+
+ // Map new pages.
+ uintptr_t map_addr = heap_end;
+ for (size_t off = 0; off < grow_size; off += PAGE_SIZE) {
+ void* phys_frame = pmm_alloc_page();
+ if (!phys_frame) {
+ // Partial growth: use whatever we mapped so far.
+ grow_size = off;
+ break;
+ }
+ vmm_map_page((uint64_t)(uintptr_t)phys_frame, (uint64_t)(map_addr + off),
+ VMM_FLAG_PRESENT | VMM_FLAG_RW);
+ }
+
+ if (grow_size == 0) {
+ spin_unlock_irqrestore(&heap_lock, flags);
+ uart_print("[HEAP] OOM: kmalloc failed (no phys pages).\n");
+ return NULL;
+ }
+
+ // Create a new free block in the grown region.
+ heap_header_t* new_block = (heap_header_t*)heap_end;
+ new_block->magic = HEAP_MAGIC;
+ new_block->size = grow_size - sizeof(heap_header_t);
+ new_block->is_free = 1;
+ new_block->next = NULL;
+ new_block->prev = tail;
+ if (tail) tail->next = new_block;
+ tail = new_block;
+ heap_end += grow_size;
+
+ // Coalesce with previous block if it's free and adjacent.
+ if (new_block->prev && new_block->prev->is_free) {
+ heap_header_t* prev = new_block->prev;
+ heap_header_t* expected = (heap_header_t*)((uint8_t*)prev + sizeof(heap_header_t) + prev->size);
+ if (expected == new_block) {
+ prev->size += sizeof(heap_header_t) + new_block->size;
+ prev->next = new_block->next;
+ if (new_block->next) {
+ new_block->next->prev = prev;
+ } else {
+ tail = prev;
+ }
+ }
+ }
+
+ // Retry allocation from the start (the new block should satisfy it).
+ current = head;
+ while (current) {
+ if (current->magic != HEAP_MAGIC) break;
+ if (current->is_free && current->size >= aligned_size) {
+ if (current->size > aligned_size + sizeof(heap_header_t) + 16) {
+ heap_header_t* split = (heap_header_t*)((uint8_t*)current + sizeof(heap_header_t) + aligned_size);
+ split->magic = HEAP_MAGIC;
+ split->size = current->size - aligned_size - sizeof(heap_header_t);
+ split->is_free = 1;
+ split->next = current->next;
+ split->prev = current;
+ if (current->next) current->next->prev = split;
+ current->next = split;
+ current->size = aligned_size;
+ if (current == tail) tail = split;
+ }
+ current->is_free = 0;
+ void* ret = (void*)((uint8_t*)current + sizeof(heap_header_t));
+ spin_unlock_irqrestore(&heap_lock, flags);
+ return ret;
+ }
+ current = current->next;
+ }
+
spin_unlock_irqrestore(&heap_lock, flags);
- uart_print("[HEAP] OOM: kmalloc failed.\n");
+ uart_print("[HEAP] OOM: kmalloc failed after grow.\n");
return NULL;
}
#include "slab.h"
#include "pmm.h"
+#include "heap.h"
#include "hal/mm.h"
#include "uart_console.h"
}
static int slab_grow(slab_cache_t* cache) {
- void* page = pmm_alloc_page();
- if (!page) return -1;
-
- uint8_t* base = (uint8_t*)(uintptr_t)page;
-
- /* In higher-half kernel the physical page needs to be accessible.
- * For simplicity we assume the kernel heap region or identity-mapped
- * low memory is used. We map via the kernel virtual address. */
- /* TODO: For pages above 4MB, a proper kernel mapping is needed.
- * For now, slab pages come from pmm_alloc_page which returns
- * physical addresses. We need to convert to virtual. */
-
- uint8_t* vbase = (uint8_t*)hal_mm_phys_to_virt((uintptr_t)base);
+ /* Allocate from the kernel heap instead of raw pmm_alloc_page.
+ * The heap is already VMM-mapped at valid kernel VAs, so we avoid
+ * the hal_mm_phys_to_virt bug where phys addresses above 16MB
+ * translate to VAs that collide with the heap range (0xD0000000+). */
+ uint8_t* vbase = (uint8_t*)kmalloc(PAGE_SIZE);
+ if (!vbase) return -1;
for (uint32_t i = 0; i < cache->objs_per_slab; i++) {
struct slab_free_node* node = (struct slab_free_node*)(vbase + i * cache->obj_size);