PROCESS_READY,
PROCESS_RUNNING,
PROCESS_BLOCKED,
+ PROCESS_SLEEPING, // New state
PROCESS_ZOMBIE
} process_state_t;
struct process {
- uint32_t pid; // Process ID
- uint32_t esp; // Kernel Stack Pointer (Saved when switched out)
- uint32_t cr3; // Page Directory (Physical)
- uint32_t* kernel_stack; // Pointer to the bottom of the allocated kernel stack
- process_state_t state; // Current state
- struct process* next; // Linked list for round-robin
+ uint32_t pid;
+ uint32_t esp;
+ uint32_t cr3;
+ uint32_t* kernel_stack;
+ process_state_t state;
+ uint32_t wake_at_tick; // New: When to wake up (global tick count)
+ struct process* next;
+ struct process* prev; // Doubly linked list helps here too! (Optional but good)
};
// Global pointer to the currently running process
// Create a new kernel thread
struct process* process_create_kernel(void (*entry_point)(void));
+// Sleep for N ticks
+void process_sleep(uint32_t ticks);
+
+// Wake up sleeping processes (called by timer)
+void process_wake_check(uint32_t current_tick);
+
// The magic function that switches stacks (Implemented in Assembly)
// old_esp_ptr: Address where we save the OLD process's ESP
// new_esp: The NEW process's ESP to load
#include <stdint.h>
void timer_init(uint32_t frequency);
+uint32_t get_tick_count(void);
#endif
void reverse(char* str, int length);
// Integer to ASCII
void itoa(int num, char* str, int base);
+// ASCII to Integer
+int atoi(const char* str);
#endif
#include "idt.h"
#include "io.h"
#include "uart_console.h"
-#include "process.h" // For schedule()
+#include "process.h"
static uint32_t tick = 0;
+uint32_t get_tick_count(void) {
+ return tick;
+}
+
void timer_callback(struct registers* regs) {
(void)regs;
tick++;
- // Every 100 ticks (approx 1 sec), print a dot just to show life
+ // Check if anyone needs to wake up
+ process_wake_check(tick);
+
+ // Every 100 ticks (approx 2 sec at 50Hz), print dot
if (tick % 100 == 0) {
- // uart_print("."); // Commented out to not pollute shell
+ // uart_print(".");
}
- // PREEMPTION!
- // Force a task switch
schedule();
}
void timer_init(uint32_t frequency) {
uart_print("[TIMER] Initializing PIT...\n");
-
- // Register Timer Callback (IRQ 0 -> Int 32)
register_interrupt_handler(32, timer_callback);
- // The value we send to the PIT divisor is the value to divide it's input clock
- // (1193180 Hz) by, to get our required frequency.
uint32_t divisor = 1193180 / frequency;
-
- // Send the command byte.
- // 0x36 = 0011 0110
- // Channel 0 | Access lo/hi byte | Mode 3 (Square Wave) | 16-bit binary
outb(0x43, 0x36);
-
- // Split divisor into low and high bytes
uint8_t l = (uint8_t)(divisor & 0xFF);
uint8_t h = (uint8_t)( (divisor>>8) & 0xFF );
-
- // Send the frequency divisor.
outb(0x40, l);
outb(0x40, h);
}
#include "pmm.h"
#include "vmm.h"
#include "uart_console.h"
+#include "timer.h" // Need access to current tick usually, but we pass it in wake_check
#include <stddef.h>
struct process* current_process = NULL;
void process_init(void) {
uart_print("[SCHED] Initializing Multitasking...\n");
- // Initial Kernel Thread (PID 0)
+ // Initial Kernel Thread (PID 0) - IDLE TASK
struct process* kernel_proc = (struct process*)pmm_alloc_page();
kernel_proc->pid = 0;
kernel_proc->state = PROCESS_RUNNING;
+ kernel_proc->wake_at_tick = 0;
__asm__ volatile("mov %%cr3, %0" : "=r"(kernel_proc->cr3));
current_process = kernel_proc;
kernel_proc->next = kernel_proc;
}
-/*
- * Wrapper to start a new thread safely.
- * Since new threads don't return from 'context_switch',
- * they miss the 'sti' instruction there. We must enable it here.
- */
void thread_wrapper(void (*fn)(void)) {
- __asm__ volatile("sti"); // Enable interrupts for the new thread!
- fn(); // Run the task
-
- // If task returns, kill it (loop forever for now)
+ __asm__ volatile("sti");
+ fn();
uart_print("[SCHED] Thread exited.\n");
for(;;) __asm__("hlt");
}
proc->pid = next_pid++;
proc->state = PROCESS_READY;
proc->cr3 = current_process->cr3;
+ proc->wake_at_tick = 0;
- // Allocate Kernel Stack
void* stack_phys = pmm_alloc_page();
-
- // Assumption: We have identity map OR P2V works for this range.
- // For robustness in Higher Half, convert phys to virt if needed.
- // Since we map 0xC0000000 -> 0x0, adding 0xC0000000 gives us the virt address.
uint32_t stack_virt = (uint32_t)stack_phys + 0xC0000000;
-
proc->kernel_stack = (uint32_t*)stack_virt;
- // Top of stack
uint32_t* sp = (uint32_t*)((uint8_t*)stack_virt + 4096);
- /*
- * Forge the stack for context_switch
- * We want it to "return" to thread_wrapper, with entry_point as arg.
- * Stack Layout: [EIP] [Arg for Wrapper]
- */
-
- // Push Argument for thread_wrapper
*--sp = (uint32_t)entry_point;
-
- // Push Return Address (EIP) - Where context_switch jumps to
*--sp = (uint32_t)thread_wrapper;
-
- // Push Registers expected by context_switch (EBP, EBX, ESI, EDI)
- *--sp = 0; // EBP
- *--sp = 0; // EBX
- *--sp = 0; // ESI
- *--sp = 0; // EDI
+ *--sp = 0; *--sp = 0; *--sp = 0; *--sp = 0;
proc->esp = (uint32_t)sp;
- // Add to queue
proc->next = ready_queue_head;
ready_queue_tail->next = proc;
ready_queue_tail = proc;
return proc;
}
+// Find next READY process
+struct process* get_next_ready_process(void) {
+ struct process* iterator = current_process->next;
+
+ // Safety Break to prevent infinite loop if list broken
+ int count = 0;
+ while (iterator != current_process && count < 100) {
+ if (iterator->state == PROCESS_READY) {
+ return iterator;
+ }
+ iterator = iterator->next;
+ count++;
+ }
+
+ // If current is ready/running, return it.
+ if (current_process->state == PROCESS_RUNNING || current_process->state == PROCESS_READY)
+ return current_process;
+
+ // If EVERYONE is sleeping, we must return the IDLE task (PID 0)
+ // Assuming PID 0 is always in the list.
+ // Search specifically for PID 0
+ iterator = current_process->next;
+ while (iterator->pid != 0) {
+ iterator = iterator->next;
+ if (iterator == current_process) break; // Should not happen
+ }
+ return iterator; // Return idle task
+}
+
void schedule(void) {
- // Critical Section: Disable Interrupts
__asm__ volatile("cli");
if (!current_process) {
}
struct process* prev = current_process;
- struct process* next = current_process->next;
+ struct process* next = get_next_ready_process();
if (prev == next) {
__asm__ volatile("sti");
return;
}
+ // Only change state to READY if it was RUNNING.
+ // If it was SLEEPING/BLOCKED, leave it as is.
+ if (prev->state == PROCESS_RUNNING) {
+ prev->state = PROCESS_READY;
+ }
+
current_process = next;
current_process->state = PROCESS_RUNNING;
- // Switch!
context_switch(&prev->esp, current_process->esp);
- // We are back! (Task resumed)
- // Re-enable interrupts
__asm__ volatile("sti");
}
+
+void process_sleep(uint32_t ticks) {
+ // We need current tick count.
+ // For simplicity, let's just use a extern or pass it.
+ // But usually sleep() is called by process logic.
+ // Let's assume we read the global tick from timer.h accessor (TODO)
+ // Or we just add 'ticks' to current.
+
+ // Quick fix: declare extern tick from timer.c
+ extern uint32_t get_tick_count(void);
+
+ uint32_t current_tick = get_tick_count();
+
+ __asm__ volatile("cli");
+ current_process->wake_at_tick = current_tick + ticks;
+ current_process->state = PROCESS_SLEEPING;
+
+ // Force switch immediately
+ // Since current state is SLEEPING, schedule() will pick someone else.
+ // We call schedule() directly (but we need to re-enable interrupts inside schedule logic or before context switch return)
+ // Our schedule() handles interrupt flag management, but we called CLI above.
+ // schedule() calls CLI again (no-op) and then STI at end.
+
+ // BUT we need to manually invoke the scheduler logic here because schedule() usually triggered by ISR.
+ // Just calling schedule() works.
+
+ schedule();
+
+ // When we return here, we woke up!
+ __asm__ volatile("sti");
+}
+
+void process_wake_check(uint32_t current_tick) {
+ // Called by Timer ISR
+ struct process* iter = ready_queue_head;
+
+ // Iterate all processes (Circular list)
+ // Warning: O(N) inside ISR. Not ideal for 1000 processes.
+
+ if (!iter) return;
+
+ struct process* start = iter;
+ do {
+ if (iter->state == PROCESS_SLEEPING) {
+ if (current_tick >= iter->wake_at_tick) {
+ iter->state = PROCESS_READY;
+ // uart_print("Woke up PID ");
+ }
+ }
+ iter = iter->next;
+ } while (iter != start);
+}
#include "utils.h"
#include "pmm.h"
#include "vga_console.h"
+#include "process.h" // For sleep
#define MAX_CMD_LEN 256
static char cmd_buffer[MAX_CMD_LEN];
if (strcmp(cmd, "help") == 0) {
uart_print("Available commands:\n");
- uart_print(" help - Show this list\n");
- uart_print(" clear - Clear screen (if VGA)\n");
- uart_print(" mem - Show memory stats\n");
- uart_print(" panic - Trigger kernel panic\n");
- uart_print(" reboot - Restart system\n");
+ uart_print(" help - Show this list\n");
+ uart_print(" clear - Clear screen (if VGA)\n");
+ uart_print(" mem - Show memory stats\n");
+ uart_print(" panic - Trigger kernel panic\n");
+ uart_print(" reboot - Restart system\n");
+ uart_print(" sleep <num> - Sleep for N ticks (50Hz)\n");
}
else if (strcmp(cmd, "clear") == 0) {
// ANSI clear screen for UART
uart_print("\033[2J\033[1;1H");
-
- // TODO: Clear VGA if active
- // vga_clear();
+ }
+ else if (strncmp(cmd, "sleep ", 6) == 0) {
+ int ticks = atoi(cmd + 6);
+ uart_print("Sleeping for ");
+ uart_print(cmd + 6);
+ uart_print(" ticks...\n");
+ process_sleep(ticks);
+ uart_print("Woke up!\n");
}
else if (strcmp(cmd, "mem") == 0) {
// pmm_print_stats() is not impl yet, so let's fake it or add it
str[i] = '\0';
reverse(str, i);
}
+
+int atoi(const char* str) {
+ int res = 0;
+ int sign = 1;
+ int i = 0;
+
+ if (str[0] == '-') {
+ sign = -1;
+ i++;
+ }
+
+ for (; str[i] != '\0'; ++i) {
+ if (str[i] >= '0' && str[i] <= '9')
+ res = res * 10 + str[i] - '0';
+ }
+
+ return sign * res;
+}
#include "vmm.h"
#include "uart_console.h"
-// Heap starts at 3GB + 256MB (Arbitrary safe high location)
+// Heap starts at 3GB + 256MB
#define KHEAP_START 0xD0000000
#define KHEAP_INITIAL_SIZE (10 * 1024 * 1024) // 10MB
#define PAGE_SIZE 4096
+// Advanced Header: Doubly Linked List
typedef struct heap_header {
- size_t size; // Size of the data block (excluding header)
- uint8_t is_free; // 1 if free, 0 if used
- struct heap_header* next; // Next block in the list
+ size_t size; // Size of data
+ uint8_t is_free; // 1 = Free, 0 = Used
+ struct heap_header* next; // Next block
+ struct heap_header* prev; // Previous block (New!)
} heap_header_t;
static heap_header_t* head = NULL;
+static heap_header_t* tail = NULL; // Keep track of tail for easier expansion later
void kheap_init(void) {
- uart_print("[HEAP] Initializing Kernel Heap...\n");
-
- // 1. Map pages for the heap
- // We need to map Virtual Addresses [KHEAP_START] to [KHEAP_START + SIZE]
- // to physical frames.
+ uart_print("[HEAP] Initializing Advanced Heap (Doubly Linked)...\n");
+ // 1. Map pages
uint32_t pages_needed = KHEAP_INITIAL_SIZE / PAGE_SIZE;
if (KHEAP_INITIAL_SIZE % PAGE_SIZE != 0) pages_needed++;
return;
}
- // Map it!
- // Note: vmm_map_page expects 64-bit phys but we give it 32-bit cast
+ // Map 4KB frame
vmm_map_page((uint64_t)(uintptr_t)phys_frame, (uint64_t)virt_addr,
VMM_FLAG_PRESENT | VMM_FLAG_RW);
virt_addr += PAGE_SIZE;
}
- // 2. Create the initial huge free block
+ // 2. Initial Block
head = (heap_header_t*)KHEAP_START;
head->size = KHEAP_INITIAL_SIZE - sizeof(heap_header_t);
head->is_free = 1;
head->next = NULL;
+ head->prev = NULL;
+
+ tail = head;
- uart_print("[HEAP] Initialized 10MB at 0xD0000000.\n");
+ uart_print("[HEAP] 10MB Heap Ready.\n");
}
void* kmalloc(size_t size) {
if (size == 0) return NULL;
- // Align size to 8 bytes for performance/safety
+ // Align to 8 bytes
size_t aligned_size = (size + 7) & ~7;
heap_header_t* current = head;
while (current) {
if (current->is_free && current->size >= aligned_size) {
- // Found a block!
-
- // Can we split it?
- // Only split if remaining space is big enough for a header + minimal data
- if (current->size > aligned_size + sizeof(heap_header_t) + 8) {
+ // Found candidate. Split?
+ if (current->size > aligned_size + sizeof(heap_header_t) + 16) {
+ // Create new header in the remaining space
heap_header_t* new_block = (heap_header_t*)((uint8_t*)current + sizeof(heap_header_t) + aligned_size);
new_block->size = current->size - aligned_size - sizeof(heap_header_t);
new_block->is_free = 1;
new_block->next = current->next;
+ new_block->prev = current;
- current->size = aligned_size;
+ // Update pointers
+ if (current->next) {
+ current->next->prev = new_block;
+ }
current->next = new_block;
+ current->size = aligned_size;
+
+ if (current == tail) tail = new_block;
}
current->is_free = 0;
current = current->next;
}
- uart_print("[HEAP] OOM: No block large enough!\n");
+ uart_print("[HEAP] OOM: kmalloc failed.\n");
return NULL;
}
void kfree(void* ptr) {
if (!ptr) return;
- // Get header
heap_header_t* header = (heap_header_t*)((uint8_t*)ptr - sizeof(heap_header_t));
header->is_free = 1;
- // Merge with next block if free (Coalescing)
+ // 1. Coalesce Right (Forward)
if (header->next && header->next->is_free) {
- header->size += sizeof(heap_header_t) + header->next->size;
- header->next = header->next->next;
+ heap_header_t* next_block = header->next;
+
+ header->size += sizeof(heap_header_t) + next_block->size;
+ header->next = next_block->next;
+
+ if (header->next) {
+ header->next->prev = header;
+ } else {
+ tail = header; // If next was tail, now current is tail
+ }
}
- // TODO: We should ideally merge with PREVIOUS block too,
- // but a singly linked list makes that O(N).
- // A doubly linked list is better for production heaps.
+ // 2. Coalesce Left (Backward) - The Power of Double Links!
+ if (header->prev && header->prev->is_free) {
+ heap_header_t* prev_block = header->prev;
+
+ prev_block->size += sizeof(heap_header_t) + header->size;
+ prev_block->next = header->next;
+
+ if (header->next) {
+ header->next->prev = prev_block;
+ } else {
+ tail = prev_block;
+ }
+ // No need to update 'header' anymore, prev_block is the merged one
+ }
}