From: Tulio A M Mendes Date: Fri, 6 Feb 2026 11:40:33 +0000 (-0300) Subject: x86/boot: expand early mappings and stabilize higher-half bring-up X-Git-Url: https://projects.tadryanom.me/?a=commitdiff_plain;h=8eca13b58432c232890db07300b1484b7143e0f0;p=AdrOS.git x86/boot: expand early mappings and stabilize higher-half bring-up --- diff --git a/src/arch/x86/boot.S b/src/arch/x86/boot.S index 3714db5..191a2eb 100644 --- a/src/arch/x86/boot.S +++ b/src/arch/x86/boot.S @@ -6,13 +6,13 @@ .set MB_MAGIC, 0xE85250D6 .set MB_ARCH, 0 /* i386 */ .set KERNEL_VIRT_BASE, 0xC0000000 -.set PAGE_SIZE, 4096 /* * Convert virtual symbols to physical addresses before paging is enabled * by subtracting KERNEL_VIRT_BASE. */ +/* .multiboot_header section (Goes to the .boot file in the linker) */ .section .multiboot_header .align 8 multiboot_header_start: @@ -24,17 +24,22 @@ multiboot_header_start: .long 8 multiboot_header_end: -.section .text +/** .boot_text section (Goes to the .boot file in the linker) + * This runs in 1MB (Identity Mapped by the Linker) + */ +.section .boot_text .global _start .type _start, @function _start: + /* Disable interrupts until the kernel installs a valid IDT */ + cli /* * We are loaded at 1MB physical. Paging is OFF. * CPU is executing instructions here. * ESP is unknown. */ - + /* Setup a temporary stack (using physical address) */ mov $stack_top, %esp sub $KERNEL_VIRT_BASE, %esp @@ -43,6 +48,11 @@ _start: push %eax push %ebx + /* Mask PIC (disable all IRQs) until kernel remaps/unmasks */ + movb $0xFF, %al + outb %al, $0x21 + outb %al, $0xA1 + /* * SETUP PAGING (Manually) * We need to map: @@ -50,36 +60,55 @@ _start: * 2. Virt 0xC0000000 -> Phys 0x00000000 (Kernel Space) */ - /* 1. Get Physical Address of Page Table 0 (PT0) */ + /* + * Map 0-512MB using 128 page tables. + * Multiboot2 info is a physical pointer (EBX) and GRUB may place it high. + * Keeping a wide identity + higher-half window avoids early page faults + * while PMM/VMM/heap are coming up. + */ + + /* Fill PTs (boot_pt0..boot_pt127) */ mov $boot_pt0, %edi - sub $KERNEL_VIRT_BASE, %edi - - /* 2. Map 0-4MB to this PT (fill 1024 entries) */ - /* Entry 0: Addr 0 | Present | RW */ - mov $0, %esi - mov $1023, %ecx + sub $KERNEL_VIRT_BASE, %edi /* Physical address of PT0 */ + xor %ebx, %ebx /* pt_index = 0 */ + 1: - cmpl $_kernel_physical_end, %esi /* Only map what we need? Nah, map 4MB */ + /* Fill current PT with 0x003 | (pt_index*4MB + i*4KB) */ + mov %ebx, %eax + shl $22, %eax /* base = pt_index * 4MB */ + mov %eax, %esi + + mov $1024, %ecx +2: mov %esi, %edx - or $3, %edx /* Present + RW */ + or $3, %edx mov %edx, (%edi) add $4096, %esi add $4, %edi - loop 1b + loop 2b + + inc %ebx + cmp $128, %ebx + jne 1b /* 3. Get Physical Address of Page Directory */ mov $boot_pd, %edi sub $KERNEL_VIRT_BASE, %edi - /* 4. Link PT0 to PD at index 0 (Identity Map 0-4MB) */ + /* Link PT0..PT127 into PD for both identity and higher-half mapping */ mov $boot_pt0, %edx - sub $KERNEL_VIRT_BASE, %edx - or $3, %edx - mov %edx, (%edi) - - /* 5. Link PT0 to PD at index 768 (3GB mark -> 0xC0000000) */ - /* 768 * 4MB = 3072MB = 3GB */ - mov %edx, 3072(%edi) /* Offset 768 * 4 bytes = 3072 */ + sub $KERNEL_VIRT_BASE, %edx /* pt_phys = physical address of PT0 */ + mov $0, %ebx /* i = 0 */ + +3: + mov %edx, %eax + or $3, %eax + mov %eax, (%edi,%ebx,4) /* PD[i] */ + mov %eax, 3072(%edi,%ebx,4) /* PD[768+i] */ + add $4096, %edx + inc %ebx + cmp $128, %ebx + jne 3b /* 6. Recursive Mapping (Optional, good for VMM later) at index 1023 */ mov $boot_pd, %edx @@ -94,7 +123,7 @@ _start: /* 8. Enable Paging (Set PG bit in CR0) */ mov %cr0, %ecx - or $0x80000000, %ecx + or $0x80000000, %ecx /* Bit 31 (PG) e Bit 0 (PE - Protection Enable) */ mov %ecx, %cr0 /* @@ -105,6 +134,10 @@ _start: lea higher_half_start, %ecx jmp *%ecx +/** From here, we switch to the default .text section. + * The linker will place this in the 3GB area. + */ +.section .text higher_half_start: /* * We are now running at 0xC0xxxxxx. @@ -112,40 +145,25 @@ higher_half_start: * but let's leave it for C code to clean up. */ - /* Unmap the low 4MB (Security: Null pointer deref should fault now!) */ - movl $0, boot_pd - invlpg 0 - /* Update Stack Pointer to be Virtual */ /* (Currently ESP points to physical, which is valid mapped, - but let's fix it to use the virtual range) */ + * but let's fix it to use the virtual range) */ add $KERNEL_VIRT_BASE, %esp /* Restore Multiboot args (popped from stack) */ - pop %ebx - pop %eax + pop %ebx /* EBX now has the Multiboot Info address */ + pop %eax /* EAX now has the Magic Number */ - /* - * Pass args to kernel_main. - * NOTE: 'ebx' (multiboot info) is a PHYSICAL address. - * kernel_main might need to map it or we identity map enough RAM. - * Since we kept identity mapping for now (or just unmapped 0-4MB?), - * Wait, I unmapped 0. If multiboot struct is in low mem, we lost access. - * Let's Re-map 0 temporarily inside C or pass physical address and let C handle it. - * For now, let's NOT unmap 0 in asm to be safe, let C do it. - */ - - /* Re-map low memory for safety until PMM parses tags */ - mov $boot_pt0, %edx - sub $KERNEL_VIRT_BASE, %edx - or $3, %edx - mov %edx, boot_pd + /* Build arch_boot_args (in .bss) and call arch_start(args) */ + mov $arch_boot_args, %ecx + mov %eax, 0(%ecx) /* args->a0 = multiboot magic */ + mov %ebx, 4(%ecx) /* args->a1 = multiboot info phys */ + movl $0, 8(%ecx) /* args->a2 = 0 */ + movl $0, 12(%ecx) /* args->a3 = 0 */ - /* Call C Kernel */ - /* void kernel_main(uint32_t magic, uint32_t phys_addr) */ - push %ebx - push %eax - call kernel_main + push %ecx + call arch_start + add $4, %esp /* Hang */ cli @@ -160,7 +178,12 @@ boot_pd: .skip 4096 .global boot_pt0 boot_pt0: - .skip 4096 + .skip 4096*128 + +.align 16 +.global arch_boot_args +arch_boot_args: + .skip 16 .align 16 stack_bottom: @@ -168,4 +191,4 @@ stack_bottom: stack_top: /* Helper symbol for map loop limit */ -_kernel_physical_end: +_kernel_physical_end: \ No newline at end of file diff --git a/src/arch/x86/idt.c b/src/arch/x86/idt.c index 22bb85d..55c9a80 100644 --- a/src/arch/x86/idt.c +++ b/src/arch/x86/idt.c @@ -139,11 +139,8 @@ void idt_init(void) { // Load IDT __asm__ volatile("lidt %0" : : "m"(idtp)); - - // Enable interrupts! - __asm__ volatile("sti"); - - uart_print("[IDT] Interrupts Enabled!\n"); + + uart_print("[IDT] Loaded.\n"); } void register_interrupt_handler(uint8_t n, isr_handler_t handler) { diff --git a/src/arch/x86/linker.ld b/src/arch/x86/linker.ld index 1a04ae9..1c82843 100644 --- a/src/arch/x86/linker.ld +++ b/src/arch/x86/linker.ld @@ -2,6 +2,7 @@ * AdrOS - x86 Linker Script (Higher Half) */ +/* The entry point reverts to the default _start */ ENTRY(_start) /* The bootloader loads us at 1MB physical */ @@ -14,23 +15,20 @@ SECTIONS { /* * We start at 1MB physical. - * The symbol '.' tracks the VIRTUAL address. - * We offset it to 3GB + 1MB. */ - . = KERNEL_VIRT_BASE + PHYSICAL_BASE; + . = PHYSICAL_BASE; - _start = .; - - /* - * The AT(...) keyword tells the linker: - * "Put this section at PHYSICAL address X in the file, - * but calculate symbols as if it were at VIRTUAL address Y" + /** TEXT SECTION: + * VMA = 0xC0100000 (3GB + 1MB) + * LMA = 0x00100000 (1MB) - Defined by AT() */ - - .text : AT(ADDR(.text) - KERNEL_VIRT_BASE) + .text KERNEL_VIRT_BASE + PHYSICAL_BASE : AT(PHYSICAL_BASE) { - /* Multiboot header must be very early physically */ + /* REQUIRED: The Header MUST be the first thing */ KEEP(*(.multiboot_header)) + /* Boot code (boot.o) */ + *(.boot_text) + /* Rest of the kernel code */ *(.text) } diff --git a/src/arch/x86/multiboot_header.S b/src/arch/x86/multiboot_header.S deleted file mode 100644 index b83c50f..0000000 --- a/src/arch/x86/multiboot_header.S +++ /dev/null @@ -1 +0,0 @@ -/* Merged into boot.S */ \ No newline at end of file diff --git a/src/arch/x86/vmm.c b/src/arch/x86/vmm.c index 2c8db83..9649395 100644 --- a/src/arch/x86/vmm.c +++ b/src/arch/x86/vmm.c @@ -27,7 +27,7 @@ static void* pmm_alloc_page_low(void) { for (int tries = 0; tries < 1024; tries++) { void* p = pmm_alloc_page(); if (!p) return 0; - if ((uintptr_t)p < 0x00400000) { + if ((uintptr_t)p < 0x01000000) { return p; } pmm_free_page(p); @@ -47,7 +47,10 @@ void vmm_map_page(uint64_t phys, uint64_t virt, uint32_t flags) { if (!(boot_pd[pd_index] & X86_PTE_PRESENT)) { // Allocate a new PT uint32_t pt_phys = (uint32_t)pmm_alloc_page_low(); - if (!pt_phys) return; // OOM + if (!pt_phys) { + uart_print("[VMM] OOM allocating page table.\n"); + return; + } // ACCESS SAFETY: Convert Physical to Virtual to write to it uint32_t* pt_virt = (uint32_t*)P2V(pt_phys);