AS ?= $(TOOLPREFIX)as
LD ?= $(TOOLPREFIX)ld
- # lwIP sources (NO_SYS=1, IPv4 only, no apps)
+ # lwIP sources (NO_SYS=0, IPv4, threaded API + sockets)
LWIPDIR := third_party/lwip/src
LWIP_CORE := $(LWIPDIR)/core/init.c $(LWIPDIR)/core/def.c $(LWIPDIR)/core/inet_chksum.c \
$(LWIPDIR)/core/ip.c $(LWIPDIR)/core/mem.c $(LWIPDIR)/core/memp.c \
$(LWIPDIR)/core/ipv4/ip4.c $(LWIPDIR)/core/ipv4/ip4_addr.c \
$(LWIPDIR)/core/ipv4/ip4_frag.c
LWIP_NETIF := $(LWIPDIR)/netif/ethernet.c
- LWIP_SOURCES := $(LWIP_CORE) $(LWIP_IPV4) $(LWIP_NETIF)
+ LWIP_API := $(LWIPDIR)/api/api_lib.c $(LWIPDIR)/api/api_msg.c \
+ $(LWIPDIR)/api/err.c $(LWIPDIR)/api/if_api.c $(LWIPDIR)/api/netbuf.c \
+ $(LWIPDIR)/api/netifapi.c $(LWIPDIR)/api/tcpip.c
+ LWIP_SOURCES := $(LWIP_CORE) $(LWIP_IPV4) $(LWIP_NETIF) $(LWIP_API)
NET_SOURCES := $(wildcard $(SRC_DIR)/net/*.c) $(wildcard $(SRC_DIR)/net/lwip_port/*.c)
C_SOURCES += $(NET_SOURCES)
ARCH_ASFLAGS := --32
# Default User Flags (Allow override via make CFLAGS=...)
- CFLAGS ?= -O2 -Wall -Wextra
+ CFLAGS ?= -O2 -Wall -Wextra -Werror -Wno-error=cpp
# Merge Flags
CFLAGS := $(ARCH_CFLAGS) $(CFLAGS)
CC := aarch64-linux-gnu-gcc
AS := aarch64-linux-gnu-as
LD := aarch64-linux-gnu-ld
- CFLAGS := -ffreestanding -O2 -Wall -Wextra -Iinclude
+ CFLAGS := -ffreestanding -O2 -Wall -Wextra -Werror -Wno-error=cpp -Iinclude
LDFLAGS := -T $(SRC_DIR)/arch/arm/linker.ld
ASFLAGS :=
ASM_SOURCES := $(wildcard $(SRC_DIR)/arch/arm/*.S)
CC := riscv64-linux-gnu-gcc
AS := riscv64-linux-gnu-as
LD := riscv64-linux-gnu-ld
- CFLAGS := -ffreestanding -O2 -Wall -Wextra -Iinclude -mcmodel=medany
+ CFLAGS := -ffreestanding -O2 -Wall -Wextra -Werror -Wno-error=cpp -Iinclude -mcmodel=medany
LDFLAGS := -T $(SRC_DIR)/arch/riscv/linker.ld
ASFLAGS :=
ASM_SOURCES := $(wildcard $(SRC_DIR)/arch/riscv/*.S)
CC := mipsel-linux-gnu-gcc
AS := mipsel-linux-gnu-as
LD := mipsel-linux-gnu-ld
- CFLAGS := -ffreestanding -O2 -Wall -Wextra -Iinclude -mabi=32 -march=mips32
+ CFLAGS := -ffreestanding -O2 -Wall -Wextra -Werror -Wno-error=cpp -Iinclude -mabi=32 -march=mips32
LDFLAGS := -T $(SRC_DIR)/arch/mips/linker.ld
ASFLAGS :=
ASM_SOURCES := $(wildcard $(SRC_DIR)/arch/mips/*.S)
--- /dev/null
+#ifndef LWIP_ARCH_SYS_ARCH_H
+#define LWIP_ARCH_SYS_ARCH_H
+
+#include "sync.h"
+#include <stdint.h>
+
+/* lwIP sys_arch types backed by AdrOS kernel primitives */
+
+typedef ksem_t* sys_sem_t;
+typedef kmutex_t* sys_mutex_t;
+typedef kmbox_t* sys_mbox_t;
+typedef void* sys_thread_t;
+typedef uintptr_t sys_prot_t;
+
+#define sys_sem_valid(s) ((s) != NULL && *(s) != NULL)
+#define sys_sem_valid_val(s) ((s) != NULL)
+#define sys_sem_set_invalid(s) do { if (s) *(s) = NULL; } while(0)
+
+#define sys_mutex_valid(m) ((m) != NULL && *(m) != NULL)
+#define sys_mutex_valid_val(m) ((m) != NULL)
+#define sys_mutex_set_invalid(m) do { if (m) *(m) = NULL; } while(0)
+
+#define sys_mbox_valid(mb) ((mb) != NULL && *(mb) != NULL)
+#define sys_mbox_valid_val(mb) ((mb) != NULL)
+#define sys_mbox_set_invalid(mb) do { if (mb) *(mb) = NULL; } while(0)
+
+#endif /* LWIP_ARCH_SYS_ARCH_H */
#ifndef LWIPOPTS_H
#define LWIPOPTS_H
-/* ---- NO_SYS mode (raw API, no threads) ---- */
-#define NO_SYS 1
+/* ---- Threaded mode (full API with threads) ---- */
+#define NO_SYS 0
#define LWIP_SOCKET 0
-#define LWIP_NETCONN 0
+#define LWIP_NETCONN 1
#define LWIP_NETIF_API 0
+#define LWIP_COMPAT_MUTEX 0
+#define LWIP_TCPIP_CORE_LOCKING 0
+#define TCPIP_THREAD_STACKSIZE 4096
+#define TCPIP_THREAD_PRIO 1
+#define TCPIP_MBOX_SIZE 16
+#define DEFAULT_THREAD_STACKSIZE 4096
+#define DEFAULT_ACCEPTMBOX_SIZE 8
+#define DEFAULT_RAW_RECVMBOX_SIZE 8
+#define DEFAULT_UDP_RECVMBOX_SIZE 8
+#define DEFAULT_TCP_RECVMBOX_SIZE 8
/* ---- Memory settings ---- */
#define MEM_ALIGNMENT 4
#define LWIP_PROVIDE_ERRNO 0
#define LWIP_RAND() ((u32_t)0x12345678) /* TODO: proper RNG */
#define LWIP_TIMERS 1
-#define SYS_LIGHTWEIGHT_PROT 0
+#define SYS_LIGHTWEIGHT_PROT 1
#define LWIP_DONT_PROVIDE_BYTEORDER_FUNCTIONS 1
/* ---- Raw API callbacks ---- */
--- /dev/null
+#ifndef SYNC_H
+#define SYNC_H
+
+#include <stdint.h>
+#include "spinlock.h"
+
+/* ------------------------------------------------------------------ */
+/* Kernel counting semaphore (blocking, sleep/wake — NOT spin-wait) */
+/* ------------------------------------------------------------------ */
+
+#define KSEM_MAX_WAITERS 16
+
+struct process; /* forward */
+
+typedef struct ksem {
+ spinlock_t lock;
+ int32_t count;
+ struct process* waiters[KSEM_MAX_WAITERS];
+ uint32_t nwaiters;
+} ksem_t;
+
+void ksem_init(ksem_t* s, int32_t initial_count);
+void ksem_wait(ksem_t* s);
+
+/* Wait with timeout (milliseconds). 0 = wait forever.
+ * Returns 0 on success, 1 on timeout. */
+int ksem_wait_timeout(ksem_t* s, uint32_t timeout_ms);
+
+void ksem_signal(ksem_t* s);
+
+/* ------------------------------------------------------------------ */
+/* Kernel mutex (binary semaphore) */
+/* ------------------------------------------------------------------ */
+
+typedef struct kmutex {
+ ksem_t sem;
+} kmutex_t;
+
+void kmutex_init(kmutex_t* m);
+void kmutex_lock(kmutex_t* m);
+void kmutex_unlock(kmutex_t* m);
+
+/* ------------------------------------------------------------------ */
+/* Kernel mailbox (fixed-size circular queue + semaphores) */
+/* ------------------------------------------------------------------ */
+
+#define KMBOX_MAX_MSGS 32
+
+typedef struct kmbox {
+ void* msgs[KMBOX_MAX_MSGS];
+ uint32_t head;
+ uint32_t tail;
+ uint32_t count;
+ uint32_t capacity;
+ ksem_t not_empty;
+ ksem_t not_full;
+ spinlock_t lock;
+} kmbox_t;
+
+int kmbox_init(kmbox_t* mb, uint32_t size);
+void kmbox_free(kmbox_t* mb);
+void kmbox_post(kmbox_t* mb, void* msg);
+int kmbox_trypost(kmbox_t* mb, void* msg);
+
+/* Fetch with timeout (ms). 0 = wait forever.
+ * Returns 0 on success, 1 on timeout. */
+int kmbox_fetch(kmbox_t* mb, void** msg, uint32_t timeout_ms);
+int kmbox_tryfetch(kmbox_t* mb, void** msg);
+
+#endif /* SYNC_H */
--- /dev/null
+--- a/third_party/lwip/src/api/tcpip.c
++++ b/third_party/lwip/src/api/tcpip.c
+@@ -55,8 +55,8 @@
+ #define TCPIP_MSG_VAR_FREE(name) API_VAR_FREE(MEMP_TCPIP_MSG_API, name)
+
+-/* global variables */
+-static tcpip_init_done_fn tcpip_init_done;
+-static void *tcpip_init_done_arg;
++/* global variables — volatile: set in tcpip_init(), read in tcpip_thread() */
++static volatile tcpip_init_done_fn tcpip_init_done;
++static void * volatile tcpip_init_done_arg;
+ static sys_mbox_t tcpip_mbox;
/* Map LAPIC MMIO region into kernel virtual address space.
* Use a fixed kernel VA for the LAPIC page. */
- uintptr_t lapic_va = 0xC0200000U; /* Fixed kernel VA, above _end */
+ uintptr_t lapic_va = 0xC0400000U; /* Fixed kernel VA, well above _end */
vmm_map_page((uint64_t)phys_base, (uint64_t)lapic_va,
VMM_FLAG_PRESENT | VMM_FLAG_RW | VMM_FLAG_NOCACHE);
lapic_base = (volatile uint32_t*)lapic_va;
/*
* Kernel stack allocator with guard pages.
- * Layout per slot: [guard page (unmapped)] [stack page (mapped)]
- * Virtual region: 0xC8000000 .. 0xCFFFFFFF (128MB, up to 16384 stacks)
+ * Layout per slot: [guard page (unmapped)] [2 stack pages (mapped)]
+ * Virtual region: 0xC8000000 .. 0xCFFFFFFF (128MB, up to 10922 stacks)
*/
#define KSTACK_REGION 0xC8000000U
-#define KSTACK_SLOT (2 * 0x1000U) /* guard + stack = 8KB per slot */
-#define KSTACK_MAX 16384
+#define KSTACK_PAGES 2 /* 8KB usable stack per thread */
+#define KSTACK_SIZE (KSTACK_PAGES * 0x1000U)
+#define KSTACK_SLOT (0x1000U + KSTACK_SIZE) /* guard + stack */
+#define KSTACK_MAX 10922
static uint32_t kstack_next_slot = 0;
static spinlock_t kstack_lock = {0};
uintptr_t base = KSTACK_REGION + slot * KSTACK_SLOT;
/* base+0x0000 = guard page (leave unmapped) */
- /* base+0x1000 = actual stack page */
- void* phys = pmm_alloc_page();
- if (!phys) return NULL;
- vmm_map_page((uint64_t)(uintptr_t)phys, (uint64_t)(base + 0x1000U),
- VMM_FLAG_PRESENT | VMM_FLAG_RW);
- memset((void*)(base + 0x1000U), 0, 0x1000U);
+ /* base+0x1000 .. base+0x1000+KSTACK_SIZE = actual stack pages */
+ for (uint32_t i = 0; i < KSTACK_PAGES; i++) {
+ void* phys = pmm_alloc_page();
+ if (!phys) return NULL;
+ vmm_map_page((uint64_t)(uintptr_t)phys,
+ (uint64_t)(base + 0x1000U + i * 0x1000U),
+ VMM_FLAG_PRESENT | VMM_FLAG_RW);
+ }
+ memset((void*)(base + 0x1000U), 0, KSTACK_SIZE);
return (void*)(base + 0x1000U);
}
uintptr_t addr = (uintptr_t)stack;
if (addr < KSTACK_REGION || addr >= KSTACK_REGION + KSTACK_MAX * KSTACK_SLOT)
return;
- vmm_unmap_page((uint64_t)addr);
+ for (uint32_t i = 0; i < KSTACK_PAGES; i++)
+ vmm_unmap_page((uint64_t)(addr + i * 0x1000U));
/* Note: slot is not recycled — acceptable for now */
}
}
proc->kernel_stack = (uint32_t*)stack;
- proc->sp = arch_kstack_init((uint8_t*)stack + 4096,
+ proc->sp = arch_kstack_init((uint8_t*)stack + KSTACK_SIZE,
thread_wrapper, fork_child_trampoline);
proc->next = ready_queue_head;
}
proc->kernel_stack = (uint32_t*)kstack;
- proc->sp = arch_kstack_init((uint8_t*)kstack + 4096,
+ proc->sp = arch_kstack_init((uint8_t*)kstack + KSTACK_SIZE,
thread_wrapper, clone_child_trampoline);
/* Insert into process list */
kernel_proc->next = kernel_proc;
kernel_proc->prev = kernel_proc;
- hal_cpu_set_kernel_stack((uintptr_t)kstack0 + 4096);
+ hal_cpu_set_kernel_stack((uintptr_t)kstack0 + KSTACK_SIZE);
spin_unlock_irqrestore(&sched_lock, flags);
}
proc->kernel_stack = (uint32_t*)stack;
- proc->sp = arch_kstack_init((uint8_t*)stack + 4096,
+ proc->sp = arch_kstack_init((uint8_t*)stack + KSTACK_SIZE,
thread_wrapper, entry_point);
proc->next = ready_queue_head;
}
if (current_process->kernel_stack) {
- hal_cpu_set_kernel_stack((uintptr_t)current_process->kernel_stack + 4096);
+ hal_cpu_set_kernel_stack((uintptr_t)current_process->kernel_stack + KSTACK_SIZE);
}
spin_unlock_irqrestore(&sched_lock, irq_flags);
--- /dev/null
+#include "sync.h"
+#include "process.h"
+#include "utils.h"
+
+extern uint32_t get_tick_count(void);
+extern void schedule(void);
+extern void sched_enqueue_ready(struct process* p);
+extern struct process* current_process;
+
+/* ------------------------------------------------------------------ */
+/* Kernel Semaphore */
+/* ------------------------------------------------------------------ */
+
+void ksem_init(ksem_t* s, int32_t initial_count) {
+ if (!s) return;
+ spinlock_init(&s->lock);
+ s->count = initial_count;
+ s->nwaiters = 0;
+ for (uint32_t i = 0; i < KSEM_MAX_WAITERS; i++)
+ s->waiters[i] = 0;
+}
+
+void ksem_wait(ksem_t* s) {
+ (void)ksem_wait_timeout(s, 0);
+}
+
+int ksem_wait_timeout(ksem_t* s, uint32_t timeout_ms) {
+ if (!s) return 1;
+
+ uintptr_t flags = spin_lock_irqsave(&s->lock);
+ if (s->count > 0) {
+ s->count--;
+ spin_unlock_irqrestore(&s->lock, flags);
+ return 0;
+ }
+
+ /* Need to block — add ourselves to the wait list */
+ if (!current_process || s->nwaiters >= KSEM_MAX_WAITERS) {
+ spin_unlock_irqrestore(&s->lock, flags);
+ return 1;
+ }
+
+ s->waiters[s->nwaiters++] = current_process;
+ current_process->state = PROCESS_BLOCKED;
+
+ /* Set a wake timeout if requested (convert ms to ticks at 50 Hz) */
+ uint32_t deadline = 0;
+ if (timeout_ms > 0) {
+ uint32_t ticks = (timeout_ms + 19) / 20; /* round up */
+ deadline = get_tick_count() + ticks;
+ current_process->wake_at_tick = deadline;
+ current_process->state = PROCESS_SLEEPING; /* timer will wake us */
+ }
+
+ spin_unlock_irqrestore(&s->lock, flags);
+ schedule();
+
+ /* We were woken — check if it was a timeout or a signal */
+ flags = spin_lock_irqsave(&s->lock);
+
+ /* Remove ourselves from waiters if still present (timeout case) */
+ int found = 0;
+ for (uint32_t i = 0; i < s->nwaiters; i++) {
+ if (s->waiters[i] == current_process) {
+ /* We timed out — remove from list */
+ for (uint32_t j = i; j + 1 < s->nwaiters; j++)
+ s->waiters[j] = s->waiters[j + 1];
+ s->waiters[--s->nwaiters] = 0;
+ found = 1;
+ break;
+ }
+ }
+
+ spin_unlock_irqrestore(&s->lock, flags);
+
+ /* If we were still in the waiters list, it was a timeout */
+ return found ? 1 : 0;
+}
+
+void ksem_signal(ksem_t* s) {
+ if (!s) return;
+
+ uintptr_t flags = spin_lock_irqsave(&s->lock);
+
+ /* Find a waiter still blocked/sleeping (skip those already woken by timeout) */
+ int woke = 0;
+ for (uint32_t i = 0; i < s->nwaiters; i++) {
+ struct process* p = s->waiters[i];
+ if (p && (p->state == PROCESS_BLOCKED || p->state == PROCESS_SLEEPING)) {
+ /* Remove from waiters list */
+ for (uint32_t j = i; j + 1 < s->nwaiters; j++)
+ s->waiters[j] = s->waiters[j + 1];
+ s->waiters[--s->nwaiters] = 0;
+
+ p->state = PROCESS_READY;
+ p->wake_at_tick = 0;
+ sched_enqueue_ready(p);
+ woke = 1;
+ break;
+ }
+ }
+
+ if (!woke) {
+ s->count++;
+ }
+
+ spin_unlock_irqrestore(&s->lock, flags);
+}
+
+/* ------------------------------------------------------------------ */
+/* Kernel Mutex */
+/* ------------------------------------------------------------------ */
+
+void kmutex_init(kmutex_t* m) {
+ if (!m) return;
+ ksem_init(&m->sem, 1);
+}
+
+void kmutex_lock(kmutex_t* m) {
+ if (!m) return;
+ ksem_wait(&m->sem);
+}
+
+void kmutex_unlock(kmutex_t* m) {
+ if (!m) return;
+ ksem_signal(&m->sem);
+}
+
+/* ------------------------------------------------------------------ */
+/* Kernel Mailbox */
+/* ------------------------------------------------------------------ */
+
+int kmbox_init(kmbox_t* mb, uint32_t size) {
+ if (!mb) return -1;
+ if (size == 0 || size > KMBOX_MAX_MSGS) size = KMBOX_MAX_MSGS;
+
+ spinlock_init(&mb->lock);
+ mb->head = 0;
+ mb->tail = 0;
+ mb->count = 0;
+ mb->capacity = size;
+ for (uint32_t i = 0; i < KMBOX_MAX_MSGS; i++)
+ mb->msgs[i] = 0;
+
+ ksem_init(&mb->not_empty, 0);
+ ksem_init(&mb->not_full, (int32_t)size);
+ return 0;
+}
+
+void kmbox_free(kmbox_t* mb) {
+ if (!mb) return;
+ mb->count = 0;
+ mb->head = 0;
+ mb->tail = 0;
+}
+
+void kmbox_post(kmbox_t* mb, void* msg) {
+ if (!mb) return;
+ ksem_wait(&mb->not_full);
+
+ uintptr_t flags = spin_lock_irqsave(&mb->lock);
+ mb->msgs[mb->tail] = msg;
+ mb->tail = (mb->tail + 1) % mb->capacity;
+ mb->count++;
+ spin_unlock_irqrestore(&mb->lock, flags);
+
+ ksem_signal(&mb->not_empty);
+}
+
+int kmbox_trypost(kmbox_t* mb, void* msg) {
+ if (!mb) return -1;
+
+ uintptr_t flags = spin_lock_irqsave(&mb->lock);
+ if (mb->count >= mb->capacity) {
+ spin_unlock_irqrestore(&mb->lock, flags);
+ return -1;
+ }
+ mb->msgs[mb->tail] = msg;
+ mb->tail = (mb->tail + 1) % mb->capacity;
+ mb->count++;
+ spin_unlock_irqrestore(&mb->lock, flags);
+
+ ksem_signal(&mb->not_empty);
+ return 0;
+}
+
+int kmbox_fetch(kmbox_t* mb, void** msg, uint32_t timeout_ms) {
+ if (!mb) return 1;
+
+ int rc = ksem_wait_timeout(&mb->not_empty, timeout_ms);
+ if (rc != 0) return 1; /* timeout */
+
+ uintptr_t flags = spin_lock_irqsave(&mb->lock);
+ void* m = mb->msgs[mb->head];
+ mb->head = (mb->head + 1) % mb->capacity;
+ mb->count--;
+ spin_unlock_irqrestore(&mb->lock, flags);
+
+ if (msg) *msg = m;
+
+ ksem_signal(&mb->not_full);
+ return 0;
+}
+
+int kmbox_tryfetch(kmbox_t* mb, void** msg) {
+ if (!mb) return -1;
+
+ uintptr_t flags = spin_lock_irqsave(&mb->lock);
+ if (mb->count == 0) {
+ spin_unlock_irqrestore(&mb->lock, flags);
+ return -1;
+ }
+ void* m = mb->msgs[mb->head];
+ mb->head = (mb->head + 1) % mb->capacity;
+ mb->count--;
+ spin_unlock_irqrestore(&mb->lock, flags);
+
+ if (msg) *msg = m;
+
+ ksem_signal(&mb->not_full);
+ return 0;
+}
#include "lwip/ip4_addr.h"
#include "lwip/init.h"
#include "lwip/timeouts.h"
+#include "lwip/tcpip.h"
+#include "lwip/sys.h"
#include "netif/ethernet.h"
#include "e1000.h"
static struct netif e1000_nif;
static int net_initialized = 0;
+static volatile int tcpip_ready = 0;
+
+static void net_init_done(void* arg) {
+ (void)arg;
+ tcpip_ready = 1;
+}
+
void net_init(void) {
if (!e1000_link_up()) {
uart_print("[NET] E1000 link down, skipping lwIP init.\n");
return;
}
- lwip_init();
+ /* Start lwIP tcpip thread and poll until it signals ready */
+ tcpip_init(net_init_done, NULL);
+ while (!tcpip_ready) {
+ __asm__ volatile("pause" ::: "memory");
+ }
ip4_addr_t ipaddr, netmask, gw;
IP4_ADDR(&ipaddr, 10, 0, 2, 15); /* QEMU user-mode default */
IP4_ADDR(&gw, 10, 0, 2, 2); /* QEMU user-mode gateway */
netif_add(&e1000_nif, &ipaddr, &netmask, &gw, NULL,
- e1000_netif_init, ethernet_input);
+ e1000_netif_init, tcpip_input);
netif_set_default(&e1000_nif);
netif_set_up(&e1000_nif);
net_initialized = 1;
- uart_print("[NET] lwIP initialized, IP=10.0.2.15\n");
+ uart_print("[NET] lwIP initialized (threaded), IP=10.0.2.15\n");
}
void net_poll(void) {
if (!net_initialized) return;
e1000_netif_poll(&e1000_nif);
- sys_check_timeouts();
}
struct netif* net_get_netif(void) {
/*
- * lwIP sys_arch for AdrOS — NO_SYS=1 mode.
- * Only sys_now() is required (for timeouts).
+ * lwIP sys_arch for AdrOS — NO_SYS=0 mode.
+ * Provides semaphore, mutex, mailbox, thread, and protection primitives
+ * backed by AdrOS kernel sync objects (include/sync.h).
*/
#include "lwip/opt.h"
#include "lwip/sys.h"
+#include "lwip/err.h"
+#include "lwip/stats.h"
+
+#include "sync.h"
+#include "process.h"
+#include "spinlock.h"
+
+#include <stddef.h>
extern uint32_t get_tick_count(void);
+extern void* kmalloc(uint32_t size);
+extern void kfree(void* ptr);
+extern struct process* process_create_kernel(void (*entry)(void));
/* Return milliseconds since boot. Timer runs at 50 Hz → 20 ms per tick. */
u32_t sys_now(void) {
return (u32_t)(get_tick_count() * 20);
}
+
+/* ------------------------------------------------------------------ */
+/* Semaphore */
+/* ------------------------------------------------------------------ */
+
+err_t sys_sem_new(sys_sem_t* sem, u8_t count) {
+ if (!sem) return ERR_ARG;
+ ksem_t* s = (ksem_t*)kmalloc(sizeof(ksem_t));
+ if (!s) return ERR_MEM;
+ ksem_init(s, (int32_t)count);
+ *sem = s;
+ return ERR_OK;
+}
+
+void sys_sem_free(sys_sem_t* sem) {
+ if (!sem || !*sem) return;
+ kfree(*sem);
+ *sem = NULL;
+}
+
+void sys_sem_signal(sys_sem_t* sem) {
+ if (!sem || !*sem) return;
+ ksem_signal(*sem);
+}
+
+u32_t sys_arch_sem_wait(sys_sem_t* sem, u32_t timeout) {
+ if (!sem || !*sem) return SYS_ARCH_TIMEOUT;
+ u32_t start = sys_now();
+ int rc = ksem_wait_timeout(*sem, timeout);
+ if (rc != 0) return SYS_ARCH_TIMEOUT;
+ u32_t elapsed = sys_now() - start;
+ return elapsed;
+}
+
+/* ------------------------------------------------------------------ */
+/* Mutex */
+/* ------------------------------------------------------------------ */
+
+err_t sys_mutex_new(sys_mutex_t* mutex) {
+ if (!mutex) return ERR_ARG;
+ kmutex_t* m = (kmutex_t*)kmalloc(sizeof(kmutex_t));
+ if (!m) return ERR_MEM;
+ kmutex_init(m);
+ *mutex = m;
+ return ERR_OK;
+}
+
+void sys_mutex_free(sys_mutex_t* mutex) {
+ if (!mutex || !*mutex) return;
+ kfree(*mutex);
+ *mutex = NULL;
+}
+
+void sys_mutex_lock(sys_mutex_t* mutex) {
+ if (!mutex || !*mutex) return;
+ kmutex_lock(*mutex);
+}
+
+void sys_mutex_unlock(sys_mutex_t* mutex) {
+ if (!mutex || !*mutex) return;
+ kmutex_unlock(*mutex);
+}
+
+/* ------------------------------------------------------------------ */
+/* Mailbox */
+/* ------------------------------------------------------------------ */
+
+err_t sys_mbox_new(sys_mbox_t* mbox, int size) {
+ if (!mbox) return ERR_ARG;
+ kmbox_t* mb = (kmbox_t*)kmalloc(sizeof(kmbox_t));
+ if (!mb) return ERR_MEM;
+ if (kmbox_init(mb, (uint32_t)(size > 0 ? size : KMBOX_MAX_MSGS)) < 0) {
+ kfree(mb);
+ return ERR_MEM;
+ }
+ *mbox = mb;
+ return ERR_OK;
+}
+
+void sys_mbox_free(sys_mbox_t* mbox) {
+ if (!mbox || !*mbox) return;
+ kmbox_free(*mbox);
+ kfree(*mbox);
+ *mbox = NULL;
+}
+
+void sys_mbox_post(sys_mbox_t* mbox, void* msg) {
+ if (!mbox || !*mbox) return;
+ kmbox_post(*mbox, msg);
+}
+
+err_t sys_mbox_trypost(sys_mbox_t* mbox, void* msg) {
+ if (!mbox || !*mbox) return ERR_ARG;
+ if (kmbox_trypost(*mbox, msg) < 0) return ERR_MEM;
+ return ERR_OK;
+}
+
+err_t sys_mbox_trypost_fromisr(sys_mbox_t* mbox, void* msg) {
+ return sys_mbox_trypost(mbox, msg);
+}
+
+u32_t sys_arch_mbox_fetch(sys_mbox_t* mbox, void** msg, u32_t timeout) {
+ if (!mbox || !*mbox) return SYS_ARCH_TIMEOUT;
+ u32_t start = sys_now();
+ int rc = kmbox_fetch(*mbox, msg, timeout);
+ if (rc != 0) return SYS_ARCH_TIMEOUT;
+ u32_t elapsed = sys_now() - start;
+ return elapsed;
+}
+
+u32_t sys_arch_mbox_tryfetch(sys_mbox_t* mbox, void** msg) {
+ if (!mbox || !*mbox) return SYS_MBOX_EMPTY;
+ if (kmbox_tryfetch(*mbox, msg) < 0) return SYS_MBOX_EMPTY;
+ return 0;
+}
+
+/* ------------------------------------------------------------------ */
+/* Thread */
+/* ------------------------------------------------------------------ */
+
+/* Wrapper: lwIP thread functions take a void* arg, but
+ * process_create_kernel takes void (*)(void).
+ * We store the real function + arg in a small struct and use a trampoline. */
+
+struct lwip_thread_arg {
+ lwip_thread_fn func;
+ void* arg;
+};
+
+#define LWIP_MAX_THREADS 4
+static struct lwip_thread_arg lwip_thread_args[LWIP_MAX_THREADS];
+static int lwip_thread_count = 0;
+
+static void lwip_thread_trampoline_0(void) { lwip_thread_args[0].func(lwip_thread_args[0].arg); }
+static void lwip_thread_trampoline_1(void) { lwip_thread_args[1].func(lwip_thread_args[1].arg); }
+static void lwip_thread_trampoline_2(void) { lwip_thread_args[2].func(lwip_thread_args[2].arg); }
+static void lwip_thread_trampoline_3(void) { lwip_thread_args[3].func(lwip_thread_args[3].arg); }
+
+static void (*lwip_trampolines[LWIP_MAX_THREADS])(void) = {
+ lwip_thread_trampoline_0,
+ lwip_thread_trampoline_1,
+ lwip_thread_trampoline_2,
+ lwip_thread_trampoline_3,
+};
+
+sys_thread_t sys_thread_new(const char* name, lwip_thread_fn thread,
+ void* arg, int stacksize, int prio) {
+ (void)name;
+ (void)stacksize;
+ (void)prio;
+
+ if (lwip_thread_count >= LWIP_MAX_THREADS) return NULL;
+ int idx = lwip_thread_count++;
+ lwip_thread_args[idx].func = thread;
+ lwip_thread_args[idx].arg = arg;
+
+ struct process* p = process_create_kernel(lwip_trampolines[idx]);
+ return (sys_thread_t)p;
+}
+
+/* ------------------------------------------------------------------ */
+/* Critical section protection */
+/* ------------------------------------------------------------------ */
+
+sys_prot_t sys_arch_protect(void) {
+ return irq_save();
+}
+
+void sys_arch_unprotect(sys_prot_t pval) {
+ irq_restore(pval);
+}
+
+/* ------------------------------------------------------------------ */
+/* Init (called by lwIP) */
+/* ------------------------------------------------------------------ */
+
+void sys_init(void) {
+ /* Nothing to do — kernel primitives are already initialized */
+}