From: Tulio A M Mendes Date: Sun, 15 Feb 2026 02:47:29 +0000 (-0300) Subject: feat: per-CPU scheduler runqueue infrastructure with load tracking X-Git-Url: https://projects.tadryanom.me/docs/static/gitweb.css?a=commitdiff_plain;h=517e5dca7b246886d1421baf5c70983879a4a494;p=AdrOS.git feat: per-CPU scheduler runqueue infrastructure with load tracking - Add rq_load field to percpu_data struct (offset 20, struct stays 32 bytes) - New sched_pcpu module: per-CPU load counters with atomic operations - sched_pcpu_init(): initialize for N CPUs after SMP enumeration - sched_pcpu_inc_load/dec_load(): lock-free load tracking - sched_pcpu_least_loaded(): find CPU with fewest ready processes - sched_pcpu_get_load(): query per-CPU load - Integrate load tracking into scheduler enqueue/dequeue paths - Wire up sched_pcpu_init() in arch_platform_setup after percpu_setup_gs - All 35/35 smoke tests pass, 16/16 battery, cppcheck clean --- diff --git a/include/arch/x86/percpu.h b/include/arch/x86/percpu.h index 2a1a591..993098f 100644 --- a/include/arch/x86/percpu.h +++ b/include/arch/x86/percpu.h @@ -15,7 +15,8 @@ struct percpu_data { struct process* current_process; /* Currently running process on this CPU */ uintptr_t kernel_stack; /* Top of this CPU's kernel stack */ uint32_t nested_irq; /* IRQ nesting depth */ - uint32_t reserved[3]; /* Padding to 32 bytes */ + uint32_t rq_load; /* Number of READY processes on this CPU */ + uint32_t reserved[2]; /* Padding to 32 bytes */ }; /* Initialize per-CPU data for all CPUs. Called once from BSP after SMP init. */ diff --git a/include/sched_pcpu.h b/include/sched_pcpu.h new file mode 100644 index 0000000..466c403 --- /dev/null +++ b/include/sched_pcpu.h @@ -0,0 +1,30 @@ +#ifndef SCHED_PCPU_H +#define SCHED_PCPU_H + +#include + +/* + * Per-CPU scheduler runqueue infrastructure. + * + * Provides per-CPU runqueue data structures and load-balancing helpers. + * The BSP currently runs the global scheduler; these structures prepare + * the foundation for future AP scheduling. + * + * Usage: + * sched_pcpu_init() — called once after SMP init + * sched_pcpu_get_load(cpu) — query load on a CPU + * sched_pcpu_least_loaded() — find CPU with fewest ready processes + * sched_pcpu_inc_load(cpu) — increment load counter + * sched_pcpu_dec_load(cpu) — decrement load counter + */ + +#define SCHED_PCPU_MAX 16 + +void sched_pcpu_init(uint32_t ncpus); +uint32_t sched_pcpu_get_load(uint32_t cpu); +uint32_t sched_pcpu_least_loaded(void); +void sched_pcpu_inc_load(uint32_t cpu); +void sched_pcpu_dec_load(uint32_t cpu); +uint32_t sched_pcpu_count(void); + +#endif diff --git a/src/arch/x86/arch_platform.c b/src/arch/x86/arch_platform.c index 450f5a4..f3a8c53 100644 --- a/src/arch/x86/arch_platform.c +++ b/src/arch/x86/arch_platform.c @@ -155,6 +155,9 @@ int arch_platform_setup(const struct boot_info* bi) { percpu_init(); percpu_setup_gs(0); + extern void sched_pcpu_init(uint32_t); + sched_pcpu_init(smp_get_cpu_count()); + /* Phase 2: Send INIT-SIPI-SIPI to wake APs */ smp_start_aps(); } diff --git a/src/kernel/sched_pcpu.c b/src/kernel/sched_pcpu.c new file mode 100644 index 0000000..2c7d8c5 --- /dev/null +++ b/src/kernel/sched_pcpu.c @@ -0,0 +1,48 @@ +#include "sched_pcpu.h" +#include "console.h" + +static uint32_t pcpu_load[SCHED_PCPU_MAX]; +static uint32_t pcpu_count; + +void sched_pcpu_init(uint32_t ncpus) { + if (ncpus > SCHED_PCPU_MAX) ncpus = SCHED_PCPU_MAX; + pcpu_count = ncpus; + for (uint32_t i = 0; i < SCHED_PCPU_MAX; i++) + pcpu_load[i] = 0; + kprintf("[SCHED] Per-CPU runqueues initialized for %u CPU(s).\n", + (unsigned)ncpus); +} + +uint32_t sched_pcpu_count(void) { + return pcpu_count; +} + +uint32_t sched_pcpu_get_load(uint32_t cpu) { + if (cpu >= pcpu_count) return 0; + return __atomic_load_n(&pcpu_load[cpu], __ATOMIC_RELAXED); +} + +uint32_t sched_pcpu_least_loaded(void) { + uint32_t best = 0; + uint32_t best_load = __atomic_load_n(&pcpu_load[0], __ATOMIC_RELAXED); + for (uint32_t i = 1; i < pcpu_count; i++) { + uint32_t l = __atomic_load_n(&pcpu_load[i], __ATOMIC_RELAXED); + if (l < best_load) { + best_load = l; + best = i; + } + } + return best; +} + +void sched_pcpu_inc_load(uint32_t cpu) { + if (cpu >= pcpu_count) return; + __atomic_add_fetch(&pcpu_load[cpu], 1, __ATOMIC_RELAXED); +} + +void sched_pcpu_dec_load(uint32_t cpu) { + if (cpu >= pcpu_count) return; + uint32_t old = __atomic_load_n(&pcpu_load[cpu], __ATOMIC_RELAXED); + if (old > 0) + __atomic_sub_fetch(&pcpu_load[cpu], 1, __ATOMIC_RELAXED); +} diff --git a/src/kernel/scheduler.c b/src/kernel/scheduler.c index 07f7c05..4a4fa97 100644 --- a/src/kernel/scheduler.c +++ b/src/kernel/scheduler.c @@ -10,6 +10,7 @@ #include "hal/cpu.h" #include "hal/usermode.h" #include "arch_process.h" +#include "sched_pcpu.h" #include struct process* current_process = NULL; @@ -211,6 +212,7 @@ void sched_enqueue_ready(struct process* p) { sleep_queue_remove(p); if (p->state == PROCESS_READY) { rq_enqueue(rq_active, p); + sched_pcpu_inc_load(0); } spin_unlock_irqrestore(&sched_lock, flags); } @@ -894,6 +896,7 @@ void schedule(void) { if (next) { // next came from rq_active — safe to dequeue. rq_dequeue(rq_active, next); + sched_pcpu_dec_load(0); } else { // Nothing in runqueues. if (prev->state == PROCESS_READY) {