uint32_t* kernel_stack;
#define SCHED_NUM_PRIOS 32
#define SCHED_DEFAULT_PRIO 16
+#define SCHED_TIME_SLICE 2 /* ticks before forced preemption (20ms at 100Hz) */
uint8_t priority; // 0 = highest, 31 = lowest
int8_t nice; // -20 to +19 (maps to priority)
+ uint8_t time_slice; // ticks remaining in current quantum
process_state_t state;
uint32_t wake_at_tick;
uint32_t alarm_tick;
#include "timer.h"
#include "console.h"
-#include "process.h"
+#include "process.h"
#include "vdso.h"
#include "vga_console.h"
#include "hal/timer.h"
-#if defined(__i386__)
-#include "arch/x86/lapic.h"
-#endif
-
static uint32_t tick = 0;
uint32_t get_tick_count(void) {
}
static void hal_tick_bridge(void) {
-#if defined(__i386__)
- if (lapic_is_enabled() && lapic_get_id() != 0) return;
-#endif
tick++;
vdso_update_tick(tick);
vga_flush();
process_wake_check(tick);
- /* Preempt every SCHED_DIVISOR ticks to reduce context-switch
- * overhead in emulated environments (QEMU TLB flush on CR3
- * reload is expensive). Sleeping processes still wake at full
- * TIMER_HZ resolution via process_wake_check above. */
- if (tick % 2 == 0)
- schedule();
+ schedule();
}
void timer_init(uint32_t frequency) {
#if defined(__i386__)
#include "arch/x86/idt.h"
#include "arch/x86/lapic.h"
+#include "arch/x86/ioapic.h"
#include "io.h"
#include "console.h"
static void timer_irq(struct registers* regs) {
(void)regs;
+ /* Only the BSP (LAPIC ID 0) drives the global tick, scheduling,
+ * and VGA refresh. APs have no processes to run and would only
+ * add spinlock contention on sched_lock / vga_lock. */
+ if (lapic_is_enabled() && lapic_get_id() != 0) return;
if (g_tick_cb) g_tick_cb();
}
register_interrupt_handler(32, timer_irq);
if (lapic_is_enabled()) {
- /* Use LAPIC timer — more precise and per-CPU capable */
+ /* Use LAPIC timer — more precise and per-CPU capable.
+ * Mask PIT IRQ 0 via IOAPIC so only the LAPIC timer drives
+ * vector 32. Without this, PIT adds ~18 extra ticks/sec,
+ * making all timing calculations off by ~18%. */
+ ioapic_mask_irq(0);
lapic_timer_start(frequency_hz);
} else {
/* Fallback to legacy PIT */
struct process* prev = current_process;
- // Put prev back into expired runqueue if it's still runnable.
- // Priority decay: penalize CPU-bound processes that exhaust their slice.
+ // Time-slice preemption: if the process is still running (timer
+ // preemption, not a voluntary yield) and has quantum left, do NOT
+ // preempt. Woken processes accumulate in rq_active and get their
+ // turn when the slice expires. This limits context-switch rate to
+ // TIMER_HZ/SCHED_TIME_SLICE while keeping full tick resolution for
+ // sleep/wake timing.
if (prev->state == PROCESS_RUNNING) {
+ if (prev->time_slice > 0) {
+ prev->time_slice--;
+ spin_unlock_irqrestore(&sched_lock, irq_flags);
+ return;
+ }
+ // Slice exhausted — enqueue to expired with priority decay.
prev->state = PROCESS_READY;
if (prev->priority < SCHED_NUM_PRIOS - 1) prev->priority++;
rq_enqueue(rq_expired, prev);
if (prev == next) {
prev->state = PROCESS_RUNNING;
+ prev->time_slice = SCHED_TIME_SLICE;
spin_unlock_irqrestore(&sched_lock, irq_flags);
return;
}
current_process = next;
current_process->state = PROCESS_RUNNING;
+ current_process->time_slice = SCHED_TIME_SLICE;
if (current_process->addr_space && current_process->addr_space != prev->addr_space) {
hal_cpu_set_address_space(current_process->addr_space);