--- /dev/null
+#ifndef SPINLOCK_H
+#define SPINLOCK_H
+
+#include <stdint.h>
+
+#include <stddef.h>
+
+typedef struct {
+ volatile uint32_t locked;
+} spinlock_t;
+
+static inline void spinlock_init(spinlock_t* l) {
+ l->locked = 0;
+}
+
+static inline void spin_lock(spinlock_t* l) {
+ while (__sync_lock_test_and_set(&l->locked, 1)) {
+ while (l->locked) {
+#if defined(__i386__) || defined(__x86_64__)
+ __asm__ volatile ("pause");
+#endif
+ }
+ }
+}
+
+static inline void spin_unlock(spinlock_t* l) {
+ __sync_lock_release(&l->locked);
+}
+
+#if defined(__i386__) || defined(__x86_64__)
+static inline uintptr_t irq_save(void) {
+ uintptr_t flags;
+#if defined(__x86_64__)
+ __asm__ volatile ("pushfq; pop %0; cli" : "=r"(flags) :: "memory");
+#else
+ __asm__ volatile ("pushf; pop %0; cli" : "=r"(flags) :: "memory");
+#endif
+ return flags;
+}
+
+static inline void irq_restore(uintptr_t flags) {
+#if defined(__x86_64__)
+ __asm__ volatile ("push %0; popfq" :: "r"(flags) : "memory", "cc");
+#else
+ __asm__ volatile ("push %0; popf" :: "r"(flags) : "memory", "cc");
+#endif
+}
+#else
+static inline uintptr_t irq_save(void) {
+ return 0;
+}
+
+static inline void irq_restore(uintptr_t flags) {
+ (void)flags;
+}
+#endif
+
+static inline uintptr_t spin_lock_irqsave(spinlock_t* l) {
+ uintptr_t flags = irq_save();
+ spin_lock(l);
+ return flags;
+}
+
+static inline void spin_unlock_irqrestore(spinlock_t* l, uintptr_t flags) {
+ spin_unlock(l);
+ irq_restore(flags);
+}
+
+#endif
#include "idt.h"
#include "io.h"
#include "uart_console.h"
+#include "spinlock.h"
#include <stddef.h>
#define IDT_ENTRIES 256
// Array of function pointers for handlers
isr_handler_t interrupt_handlers[IDT_ENTRIES];
+static spinlock_t idt_handlers_lock = {0};
+
// Extern prototypes for Assembly stubs
extern void isr0(); extern void isr1(); extern void isr2(); extern void isr3();
extern void isr4(); extern void isr5(); extern void isr6(); extern void isr7();
}
void register_interrupt_handler(uint8_t n, isr_handler_t handler) {
+ uintptr_t flags = spin_lock_irqsave(&idt_handlers_lock);
interrupt_handlers[n] = handler;
+ spin_unlock_irqrestore(&idt_handlers_lock, flags);
}
#include "utils.h"
#include "hal/uart.h"
+#include "spinlock.h"
+
+static spinlock_t uart_lock = {0};
+
void uart_init(void) {
hal_uart_init();
}
void uart_put_char(char c) {
+ uintptr_t flags = spin_lock_irqsave(&uart_lock);
hal_uart_putc(c);
+ spin_unlock_irqrestore(&uart_lock, flags);
}
void uart_print(const char* str) {
+ uintptr_t flags = spin_lock_irqsave(&uart_lock);
for (int i = 0; str[i] != '\0'; i++) {
- uart_put_char(str[i]);
+ hal_uart_putc(str[i]);
}
+ spin_unlock_irqrestore(&uart_lock, flags);
}
#include "hal/video.h"
+#include "spinlock.h"
+
static volatile uint16_t* VGA_BUFFER = 0;
static const int VGA_WIDTH = 80;
static const int VGA_HEIGHT = 25;
static int term_row = 0;
static uint8_t term_color = 0x0F; // White on Black
+static spinlock_t vga_lock = {0};
+
void vga_init(void) {
VGA_BUFFER = (volatile uint16_t*)hal_video_text_buffer();
term_col = 0;
VGA_BUFFER[index] = (uint16_t) ' ' | (uint16_t) term_color << 8;
}
}
-}!VGA_BUFFER) {
- return;
- }
-
- if (
+}
void vga_set_color(uint8_t fg, uint8_t bg) {
+ uintptr_t flags = spin_lock_irqsave(&vga_lock);
term_color = fg | (bg << 4);
+ spin_unlock_irqrestore(&vga_lock, flags);
}
void vga_put_char(char c) {
+ uintptr_t flags = spin_lock_irqsave(&vga_lock);
+ if (!VGA_BUFFER) {
+ spin_unlock_irqrestore(&vga_lock, flags);
+ return;
+ }
+
if (c == '\n') {
term_col = 0;
term_row++;
// TODO: Implement scrolling
term_row = 0;
}
+
+ spin_unlock_irqrestore(&vga_lock, flags);
}
void vga_print(const char* str) {
+ uintptr_t flags = spin_lock_irqsave(&vga_lock);
+
+ if (!VGA_BUFFER) {
+ spin_unlock_irqrestore(&vga_lock, flags);
+ return;
+ }
+
for (int i = 0; str[i] != '\0'; i++) {
- vga_put_char(str[i]);
+ char c = str[i];
+ if (c == '\n') {
+ term_col = 0;
+ term_row++;
+ } else {
+ const int index = term_row * VGA_WIDTH + term_col;
+ VGA_BUFFER[index] = (uint16_t) c | (uint16_t) term_color << 8;
+ term_col++;
+ }
+
+ if (term_col >= VGA_WIDTH) {
+ term_col = 0;
+ term_row++;
+ }
+
+ if (term_row >= VGA_HEIGHT) {
+ term_row = 0;
+ }
}
+
+ spin_unlock_irqrestore(&vga_lock, flags);
}
#include "vmm.h"
#include "uart_console.h"
#include "timer.h" // Need access to current tick usually, but we pass it in wake_check
+#include "spinlock.h"
#include <stddef.h>
struct process* current_process = NULL;
struct process* ready_queue_tail = NULL;
static uint32_t next_pid = 1;
+static spinlock_t sched_lock = {0};
+
static void* pmm_alloc_page_low(void) {
// Bring-up safety: ensure we allocate from the identity-mapped area (0-4MB)
// until we have a full kernel virtual mapping for arbitrary phys pages.
void process_init(void) {
uart_print("[SCHED] Initializing Multitasking...\n");
+ uintptr_t flags = spin_lock_irqsave(&sched_lock);
+
// Initial Kernel Thread (PID 0) - IDLE TASK
struct process* kernel_proc = (struct process*)pmm_alloc_page_low();
ready_queue_head = kernel_proc;
ready_queue_tail = kernel_proc;
kernel_proc->next = kernel_proc;
+
+ spin_unlock_irqrestore(&sched_lock, flags);
}
void thread_wrapper(void (*fn)(void)) {
}
struct process* process_create_kernel(void (*entry_point)(void)) {
+ uintptr_t flags = spin_lock_irqsave(&sched_lock);
struct process* proc = (struct process*)pmm_alloc_page_low();
- if (!proc) return NULL;
+ if (!proc) {
+ spin_unlock_irqrestore(&sched_lock, flags);
+ return NULL;
+ }
proc->pid = next_pid++;
proc->state = PROCESS_READY;
proc->wake_at_tick = 0;
void* stack_phys = pmm_alloc_page_low();
- if (!stack_phys) return NULL;
+ if (!stack_phys) {
+ spin_unlock_irqrestore(&sched_lock, flags);
+ return NULL;
+ }
// Until we guarantee a linear phys->virt mapping, use the identity-mapped address
// for kernel thread stacks during bring-up.
ready_queue_tail->next = proc;
ready_queue_tail = proc;
+ spin_unlock_irqrestore(&sched_lock, flags);
return proc;
}
}
void schedule(void) {
- __asm__ volatile("cli");
+ uintptr_t irq_flags = irq_save();
+ spin_lock(&sched_lock);
if (!current_process) {
- __asm__ volatile("sti");
+ spin_unlock(&sched_lock);
+ irq_restore(irq_flags);
return;
}
struct process* next = get_next_ready_process();
if (prev == next) {
- __asm__ volatile("sti");
+ spin_unlock(&sched_lock);
+ irq_restore(irq_flags);
return;
}
current_process = next;
current_process->state = PROCESS_RUNNING;
+
+ spin_unlock(&sched_lock);
context_switch(&prev->esp, current_process->esp);
-
- __asm__ volatile("sti");
+
+ irq_restore(irq_flags);
}
void process_sleep(uint32_t ticks) {
extern uint32_t get_tick_count(void);
uint32_t current_tick = get_tick_count();
-
- __asm__ volatile("cli");
+
+ uintptr_t flags = spin_lock_irqsave(&sched_lock);
current_process->wake_at_tick = current_tick + ticks;
current_process->state = PROCESS_SLEEPING;
- // Force switch immediately
- // Since current state is SLEEPING, schedule() will pick someone else.
- // We call schedule() directly (but we need to re-enable interrupts inside schedule logic or before context switch return)
- // Our schedule() handles interrupt flag management, but we called CLI above.
- // schedule() calls CLI again (no-op) and then STI at end.
-
- // BUT we need to manually invoke the scheduler logic here because schedule() usually triggered by ISR.
- // Just calling schedule() works.
-
+ spin_unlock_irqrestore(&sched_lock, flags);
+
+ // Force switch immediately. Since current state is SLEEPING, schedule() will pick someone else.
schedule();
-
+
// When we return here, we woke up!
- __asm__ volatile("sti");
}
void process_wake_check(uint32_t current_tick) {
// Called by Timer ISR
+ uintptr_t flags = spin_lock_irqsave(&sched_lock);
struct process* iter = ready_queue_head;
// Iterate all processes (Circular list)
// Warning: O(N) inside ISR. Not ideal for 1000 processes.
- if (!iter) return;
+ if (!iter) {
+ spin_unlock_irqrestore(&sched_lock, flags);
+ return;
+ }
struct process* start = iter;
do {
}
iter = iter->next;
} while (iter != start);
+
+ spin_unlock_irqrestore(&sched_lock, flags);
}
#include "vmm.h"
#include "uart_console.h"
+#include "spinlock.h"
+
// Heap starts at 3GB + 256MB
#define KHEAP_START 0xD0000000
#define KHEAP_INITIAL_SIZE (10 * 1024 * 1024) // 10MB
static heap_header_t* head = NULL;
static heap_header_t* tail = NULL;
+static spinlock_t heap_lock = {0};
+
// Helper to check corruption
void check_integrity(heap_header_t* header) {
if (header->magic != HEAP_MAGIC) {
}
void kheap_init(void) {
uart_print("[HEAP] Initializing Advanced Heap (Doubly Linked)...\n");
+
+ uintptr_t flags = spin_lock_irqsave(&heap_lock);
// 1. Map pages
uint32_t pages_needed = KHEAP_INITIAL_SIZE / PAGE_SIZE;
for (uint32_t i = 0; i < pages_needed; i++) {
void* phys_frame = pmm_alloc_page();
if (!phys_frame) {
+ spin_unlock_irqrestore(&heap_lock, flags);
uart_print("[HEAP] OOM during init!\n");
return;
}
head->prev = NULL;
tail = head;
-
+ spin_unlock_irqrestore(&heap_lock, flags);
+
uart_print("[HEAP] 10MB Heap Ready.\n");
}
void* kmalloc(size_t size) {
if (size == 0) return NULL;
+
+ uintptr_t flags = spin_lock_irqsave(&heap_lock);
// Align to 8 bytes
size_t aligned_size = (size + 7) & ~7;
while (current) {
// Sanity Check
if (current->magic != HEAP_MAGIC) {
+ spin_unlock_irqrestore(&heap_lock, flags);
uart_print("[HEAP] Corruption Detected in kmalloc scan!\n");
for(;;) __asm__("hlt");
}
}
current->is_free = 0;
- return (void*)((uint8_t*)current + sizeof(heap_header_t));
+ void* ret = (void*)((uint8_t*)current + sizeof(heap_header_t));
+ spin_unlock_irqrestore(&heap_lock, flags);
+ return ret;
}
current = current->next;
}
+ spin_unlock_irqrestore(&heap_lock, flags);
uart_print("[HEAP] OOM: kmalloc failed.\n");
return NULL;
}
void kfree(void* ptr) {
if (!ptr) return;
+
+ uintptr_t flags = spin_lock_irqsave(&heap_lock);
heap_header_t* header = (heap_header_t*)((uint8_t*)ptr - sizeof(heap_header_t));
if (header->magic != HEAP_MAGIC) {
+ spin_unlock_irqrestore(&heap_lock, flags);
uart_print("[HEAP] Corruption Detected in kfree!\n");
for(;;) __asm__("hlt");
}
}
// No need to update 'header' anymore, prev_block is the merged one
}
+
+ spin_unlock_irqrestore(&heap_lock, flags);
}