$(KERNEL_NAME): $(OBJ)
@echo " LD $@"
- @$(LD) $(LDFLAGS) -n -o $@ $(BOOT_OBJ) $(KERNEL_OBJ)
+ @$(LD) $(LDFLAGS) -n -o $@ $(BOOT_OBJ) $(KERNEL_OBJ) $(shell $(CC) $(ARCH_CFLAGS) -print-libgcc-file-name)
iso: $(KERNEL_NAME) $(INITRD_IMG)
@mkdir -p iso/boot
// Register a custom handler for a specific interrupt
typedef void (*isr_handler_t)(struct registers*);
void register_interrupt_handler(uint8_t n, isr_handler_t handler);
+void unregister_interrupt_handler(uint8_t n, isr_handler_t handler);
#endif
(void)n;
(void)handler;
}
+static inline void unregister_interrupt_handler(uint8_t n, isr_handler_t handler) {
+ (void)n;
+ (void)handler;
+}
#endif
int kmbox_fetch(kmbox_t* mb, void** msg, uint32_t timeout_ms);
int kmbox_tryfetch(kmbox_t* mb, void** msg);
+/* ------------------------------------------------------------------ */
+/* Kernel condition variable (paired with kmutex_t) */
+/* ------------------------------------------------------------------ */
+
+#define KCOND_MAX_WAITERS 16
+
+typedef struct kcond {
+ spinlock_t lock;
+ struct process* waiters[KCOND_MAX_WAITERS];
+ uint32_t nwaiters;
+} kcond_t;
+
+void kcond_init(kcond_t* cv);
+
+/* Release mutex, sleep until signaled, re-acquire mutex.
+ * Returns 0 on success, 1 on timeout (timeout_ms == 0 means wait forever). */
+int kcond_wait(kcond_t* cv, kmutex_t* mtx, uint32_t timeout_ms);
+
+/* Wake one waiter. */
+void kcond_signal(kcond_t* cv);
+
+/* Wake all waiters. */
+void kcond_broadcast(kcond_t* cv);
+
#endif /* SYNC_H */
void timer_init(uint32_t frequency);
uint32_t get_tick_count(void);
+/* High-resolution monotonic clock (nanoseconds since boot).
+ * Uses TSC if calibrated, falls back to tick-based 10ms granularity. */
+uint64_t clock_gettime_ns(void);
+
+/* TSC calibration (called during LAPIC timer setup) */
+void tsc_calibrate(uint32_t tsc_khz);
+uint32_t tsc_get_khz(void);
+
#endif
#include "arch/x86/signal.h"
#define IDT_ENTRIES 256
+#define IRQ_CHAIN_POOL_SIZE 32
struct idt_entry idt[IDT_ENTRIES];
struct idt_ptr idtp;
-// Array of function pointers for handlers
+struct irq_chain_node {
+ isr_handler_t handler;
+ struct irq_chain_node* next;
+};
+
+static struct irq_chain_node irq_chain_pool[IRQ_CHAIN_POOL_SIZE];
+static struct irq_chain_node* irq_chain_heads[IDT_ENTRIES];
+
+/* Legacy single-handler array kept for backward compatibility.
+ * New registrations via register_interrupt_handler go through the chain. */
isr_handler_t interrupt_handlers[IDT_ENTRIES];
static spinlock_t idt_handlers_lock = {0};
+static struct irq_chain_node* irq_chain_alloc(void) {
+ for (int i = 0; i < IRQ_CHAIN_POOL_SIZE; i++) {
+ if (irq_chain_pool[i].handler == 0) {
+ return &irq_chain_pool[i];
+ }
+ }
+ return 0;
+}
+
// Extern prototypes for Assembly stubs
extern void isr0(); extern void isr1(); extern void isr2(); extern void isr3();
extern void isr4(); extern void isr5(); extern void isr6(); extern void isr7();
void register_interrupt_handler(uint8_t n, isr_handler_t handler) {
uintptr_t flags = spin_lock_irqsave(&idt_handlers_lock);
- interrupt_handlers[n] = handler;
+
+ /* If this vector has no handler yet, use the fast legacy slot */
+ if (!interrupt_handlers[n] && !irq_chain_heads[n]) {
+ interrupt_handlers[n] = handler;
+ spin_unlock_irqrestore(&idt_handlers_lock, flags);
+ return;
+ }
+
+ /* Migrate legacy handler to chain if needed */
+ if (interrupt_handlers[n] && !irq_chain_heads[n]) {
+ struct irq_chain_node* first = irq_chain_alloc();
+ if (first) {
+ first->handler = interrupt_handlers[n];
+ first->next = 0;
+ irq_chain_heads[n] = first;
+ }
+ interrupt_handlers[n] = 0;
+ }
+
+ /* Add new handler to chain */
+ struct irq_chain_node* node = irq_chain_alloc();
+ if (node) {
+ node->handler = handler;
+ node->next = irq_chain_heads[n];
+ irq_chain_heads[n] = node;
+ }
+
+ spin_unlock_irqrestore(&idt_handlers_lock, flags);
+}
+
+void unregister_interrupt_handler(uint8_t n, isr_handler_t handler) {
+ uintptr_t flags = spin_lock_irqsave(&idt_handlers_lock);
+
+ /* Check legacy slot */
+ if (interrupt_handlers[n] == handler) {
+ interrupt_handlers[n] = 0;
+ spin_unlock_irqrestore(&idt_handlers_lock, flags);
+ return;
+ }
+
+ /* Search chain */
+ struct irq_chain_node** pp = &irq_chain_heads[n];
+ while (*pp) {
+ if ((*pp)->handler == handler) {
+ struct irq_chain_node* victim = *pp;
+ *pp = victim->next;
+ victim->handler = 0;
+ victim->next = 0;
+ break;
+ }
+ pp = &(*pp)->next;
+ }
+
+ /* If only one handler left in chain, migrate back to legacy slot */
+ if (irq_chain_heads[n] && !irq_chain_heads[n]->next) {
+ interrupt_handlers[n] = irq_chain_heads[n]->handler;
+ irq_chain_heads[n]->handler = 0;
+ irq_chain_heads[n] = 0;
+ }
+
spin_unlock_irqrestore(&idt_handlers_lock, flags);
}
}
}
- // Check if we have a custom handler
- if (interrupt_handlers[regs->int_no] != 0) {
+ // Check if we have custom handler(s)
+ if (irq_chain_heads[regs->int_no]) {
+ /* Shared IRQ: call all chained handlers */
+ struct irq_chain_node* node = irq_chain_heads[regs->int_no];
+ while (node) {
+ if (node->handler) node->handler(regs);
+ node = node->next;
+ }
+ } else if (interrupt_handlers[regs->int_no] != 0) {
isr_handler_t handler = interrupt_handlers[regs->int_no];
handler(regs);
} else {
#include "arch/x86/lapic.h"
#include "arch/x86/kernel_va_map.h"
#include "hal/cpu_features.h"
+#include "timer.h"
#include "vmm.h"
#include "io.h"
#include "console.h"
/* Reset LAPIC timer to max count */
lapic_write(LAPIC_TIMER_ICR, 0xFFFFFFFF);
+ /* Read TSC before and after to calibrate TSC frequency */
+ uint32_t tsc_lo0, tsc_hi0, tsc_lo1, tsc_hi1;
+ __asm__ volatile("rdtsc" : "=a"(tsc_lo0), "=d"(tsc_hi0));
+
/* Wait for PIT to count down */
while (!(inb(0x61) & 0x20)) {
__asm__ volatile("pause");
}
+ __asm__ volatile("rdtsc" : "=a"(tsc_lo1), "=d"(tsc_hi1));
+
/* Stop LAPIC timer */
lapic_write(LAPIC_TIMER_LVT, LAPIC_LVT_MASKED);
/* Read how many ticks elapsed in ~10ms */
uint32_t elapsed = 0xFFFFFFFF - lapic_read(LAPIC_TIMER_CCR);
+ /* Calibrate TSC: tsc_delta ticks in ~10ms → tsc_khz = tsc_delta / 10 */
+ uint64_t tsc0 = ((uint64_t)tsc_hi0 << 32) | tsc_lo0;
+ uint64_t tsc1 = ((uint64_t)tsc_hi1 << 32) | tsc_lo1;
+ uint64_t tsc_delta = tsc1 - tsc0;
+ uint32_t tsc_khz = (uint32_t)(tsc_delta / 10);
+ if (tsc_khz > 0) {
+ tsc_calibrate(tsc_khz);
+ kprintf("[TSC] Calibrated: %u kHz (%u MHz)\n",
+ (unsigned)tsc_khz, (unsigned)(tsc_khz / 1000));
+ }
+
/* Calculate ticks per desired frequency:
* ticks_per_second = elapsed * 100 (since we measured 10ms)
* ticks_per_interrupt = ticks_per_second / frequency_hz */
static uint32_t tick = 0;
+/* TSC-based nanosecond timekeeping */
+static uint32_t g_tsc_khz = 0;
+
uint32_t get_tick_count(void) {
return tick;
}
+void tsc_calibrate(uint32_t tsc_khz) {
+ g_tsc_khz = tsc_khz;
+}
+
+uint32_t tsc_get_khz(void) {
+ return g_tsc_khz;
+}
+
+static inline uint64_t rdtsc(void) {
+ uint32_t lo, hi;
+ __asm__ volatile("rdtsc" : "=a"(lo), "=d"(hi));
+ return ((uint64_t)hi << 32) | lo;
+}
+
+static uint64_t g_tsc_boot = 0;
+
+uint64_t clock_gettime_ns(void) {
+ if (g_tsc_khz == 0) {
+ /* Fallback: tick-based, 10ms granularity */
+ uint64_t ms = (uint64_t)tick * TIMER_MS_PER_TICK;
+ return ms * 1000000ULL;
+ }
+
+ uint64_t now = rdtsc();
+ uint64_t delta = now - g_tsc_boot;
+ /* ns = delta * 1000000 / tsc_khz
+ * To avoid overflow on large deltas, split:
+ * ns = (delta / tsc_khz) * 1000000 + ((delta % tsc_khz) * 1000000) / tsc_khz */
+ uint64_t khz = (uint64_t)g_tsc_khz;
+ uint64_t sec_part = (delta / khz) * 1000000ULL;
+ uint64_t frac_part = ((delta % khz) * 1000000ULL) / khz;
+ return sec_part + frac_part;
+}
+
static void hal_tick_bridge(void) {
tick++;
vdso_update_tick(tick);
void timer_init(uint32_t frequency) {
kprintf("[TIMER] Initializing...\n");
+ g_tsc_boot = rdtsc();
hal_timer_init(frequency, hal_tick_bridge);
}
ksem_signal(&mb->not_full);
return 0;
}
+
+/* ------------------------------------------------------------------ */
+/* Kernel Condition Variable */
+/* ------------------------------------------------------------------ */
+
+void kcond_init(kcond_t* cv) {
+ if (!cv) return;
+ spinlock_init(&cv->lock);
+ cv->nwaiters = 0;
+ for (uint32_t i = 0; i < KCOND_MAX_WAITERS; i++)
+ cv->waiters[i] = 0;
+}
+
+int kcond_wait(kcond_t* cv, kmutex_t* mtx, uint32_t timeout_ms) {
+ if (!cv || !mtx) return 1;
+
+ uintptr_t flags = spin_lock_irqsave(&cv->lock);
+
+ if (!current_process || cv->nwaiters >= KCOND_MAX_WAITERS) {
+ spin_unlock_irqrestore(&cv->lock, flags);
+ return 1;
+ }
+
+ cv->waiters[cv->nwaiters++] = current_process;
+
+ if (timeout_ms > 0) {
+ uint32_t ticks = (timeout_ms + TIMER_MS_PER_TICK - 1) / TIMER_MS_PER_TICK;
+ current_process->wake_at_tick = get_tick_count() + ticks;
+ current_process->state = PROCESS_SLEEPING;
+ } else {
+ current_process->state = PROCESS_BLOCKED;
+ }
+
+ spin_unlock_irqrestore(&cv->lock, flags);
+
+ /* Release the mutex before sleeping */
+ kmutex_unlock(mtx);
+ schedule();
+
+ /* Re-acquire the mutex after waking */
+ kmutex_lock(mtx);
+
+ /* Check if we timed out (still in waiters list) */
+ flags = spin_lock_irqsave(&cv->lock);
+ int found = 0;
+ for (uint32_t i = 0; i < cv->nwaiters; i++) {
+ if (cv->waiters[i] == current_process) {
+ for (uint32_t j = i; j + 1 < cv->nwaiters; j++)
+ cv->waiters[j] = cv->waiters[j + 1];
+ cv->waiters[--cv->nwaiters] = 0;
+ found = 1;
+ break;
+ }
+ }
+ spin_unlock_irqrestore(&cv->lock, flags);
+
+ return found ? 1 : 0;
+}
+
+void kcond_signal(kcond_t* cv) {
+ if (!cv) return;
+
+ uintptr_t flags = spin_lock_irqsave(&cv->lock);
+
+ struct process* to_wake = NULL;
+ for (uint32_t i = 0; i < cv->nwaiters; i++) {
+ struct process* p = cv->waiters[i];
+ if (p && (p->state == PROCESS_BLOCKED || p->state == PROCESS_SLEEPING)) {
+ for (uint32_t j = i; j + 1 < cv->nwaiters; j++)
+ cv->waiters[j] = cv->waiters[j + 1];
+ cv->waiters[--cv->nwaiters] = 0;
+
+ p->state = PROCESS_READY;
+ p->wake_at_tick = 0;
+ to_wake = p;
+ break;
+ }
+ }
+
+ spin_unlock_irqrestore(&cv->lock, flags);
+
+ if (to_wake) {
+ sched_enqueue_ready(to_wake);
+ }
+}
+
+void kcond_broadcast(kcond_t* cv) {
+ if (!cv) return;
+
+ struct process* wake_list[KCOND_MAX_WAITERS];
+ uint32_t wake_count = 0;
+
+ uintptr_t flags = spin_lock_irqsave(&cv->lock);
+
+ for (uint32_t i = 0; i < cv->nwaiters; i++) {
+ struct process* p = cv->waiters[i];
+ if (p && (p->state == PROCESS_BLOCKED || p->state == PROCESS_SLEEPING)) {
+ p->state = PROCESS_READY;
+ p->wake_at_tick = 0;
+ wake_list[wake_count++] = p;
+ cv->waiters[i] = 0;
+ }
+ }
+ cv->nwaiters = 0;
+
+ spin_unlock_irqrestore(&cv->lock, flags);
+
+ for (uint32_t i = 0; i < wake_count; i++) {
+ sched_enqueue_ready(wake_list[i]);
+ }
+}
tp.tv_sec = rtc_unix_timestamp();
tp.tv_nsec = 0;
} else {
- uint32_t ticks = get_tick_count();
- uint32_t total_ms = ticks * TIMER_MS_PER_TICK;
- tp.tv_sec = total_ms / 1000U;
- tp.tv_nsec = (total_ms % 1000U) * 1000000U;
+ uint64_t ns = clock_gettime_ns();
+ tp.tv_sec = (uint32_t)(ns / 1000000000ULL);
+ tp.tv_nsec = (uint32_t)(ns % 1000000000ULL);
}
if (copy_to_user(user_tp, &tp, sizeof(tp)) < 0) return -EFAULT;