// Syscall gate (int 0x80) must be callable from user mode (DPL=3)
idt_set_gate(128, (uint32_t)isr128, 0x08, 0xEE);
+ // IPI reschedule vector (0xFD = 253) — wakes idle APs to run schedule()
+ extern void isr253(void);
+ idt_set_gate(253, (uint32_t)isr253, 0x08, 0x8E);
+
// LAPIC spurious interrupt vector (must have an IDT entry or CPU triple-faults)
idt_set_gate(255, (uint32_t)isr255, 0x08, 0x8E);
return;
}
+ // IPI reschedule (vector 253): send EOI and call schedule()
+ if (regs->int_no == 253) {
+ lapic_eoi();
+ extern void schedule(void);
+ schedule();
+ return;
+ }
+
// Send EOI for IRQs (32-47) BEFORE calling the handler.
// This is critical: the timer handler calls schedule() which may
// context-switch away. If EOI is deferred until after the handler,
static struct cpu_rq pcpu_rq[SCHED_MAX_CPUS];
+#ifdef __i386__
+#include "arch/x86/lapic.h"
+#include "arch/x86/percpu.h"
+/* Send IPI reschedule to wake a remote CPU when work arrives */
+static void sched_ipi_resched(uint32_t target_cpu) {
+ uint32_t my_cpu = percpu_cpu_index();
+ if (target_cpu == my_cpu) return;
+ if (!lapic_is_enabled()) return;
+ extern const struct cpu_info* smp_get_cpu(uint32_t index);
+ const struct cpu_info* ci = smp_get_cpu(target_cpu);
+ if (!ci) return;
+ lapic_send_ipi(ci->lapic_id, IPI_RESCHED_VEC);
+}
+#else
+static void sched_ipi_resched(uint32_t target_cpu) { (void)target_cpu; }
+#endif
+
static inline uint32_t bsf32(uint32_t v) {
return (uint32_t)__builtin_ctz(v);
}
void sched_enqueue_ready(struct process* p) {
if (!p) return;
+ uint32_t target_cpu = 0;
+ int need_ipi = 0;
uintptr_t flags = spin_lock_irqsave(&sched_lock);
sleep_queue_remove(p);
if (p->state == PROCESS_READY) {
- uint32_t cpu = p->cpu_id < SCHED_MAX_CPUS ? p->cpu_id : 0;
- rq_enqueue(pcpu_rq[cpu].active, p);
- sched_pcpu_inc_load(cpu);
+ target_cpu = p->cpu_id < SCHED_MAX_CPUS ? p->cpu_id : 0;
+ rq_enqueue(pcpu_rq[target_cpu].active, p);
+ sched_pcpu_inc_load(target_cpu);
+ need_ipi = 1;
}
spin_unlock_irqrestore(&sched_lock, flags);
+ if (need_ipi) sched_ipi_resched(target_cpu);
}
void thread_wrapper(void (*fn)(void));
ready_queue_head->prev = proc;
ready_queue_tail = proc;
- rq_enqueue(pcpu_rq[proc->cpu_id].active, proc);
+ rq_enqueue(pcpu_rq[0].active, proc);
+ sched_pcpu_inc_load(0);
spin_unlock_irqrestore(&sched_lock, flags);
return proc;