for (int fd = 0; fd < PROCESS_MAX_FILES; fd++) {
struct file* f = current_process->files[fd];
if (!f) continue;
- f->refcount++;
+ __sync_fetch_and_add(&f->refcount, 1);
child->files[fd] = f;
child->fd_flags[fd] = current_process->fd_flags[fd];
}
if (!f) return -EBADF;
current_process->files[fd] = NULL;
- if (f->refcount > 0) {
- f->refcount--;
- }
- if (f->refcount == 0) {
+ if (__sync_sub_and_fetch(&f->refcount, 1) == 0) {
if (f->node) {
vfs_close(f->node);
}
static int syscall_dup_impl(int oldfd) {
struct file* f = fd_get(oldfd);
if (!f) return -EBADF;
- f->refcount++;
+ __sync_fetch_and_add(&f->refcount, 1);
int newfd = fd_alloc_from(0, f);
if (newfd < 0) {
- f->refcount--;
+ __sync_sub_and_fetch(&f->refcount, 1);
return -EMFILE;
}
return newfd;
(void)fd_close(newfd);
}
- f->refcount++;
+ __sync_fetch_and_add(&f->refcount, 1);
current_process->files[newfd] = f;
return newfd;
}
(void)fd_close(newfd);
}
- f->refcount++;
+ __sync_fetch_and_add(&f->refcount, 1);
current_process->files[newfd] = f;
return newfd;
}
if ((f.saved.cs & 3U) != 3U) return -EPERM;
if ((f.saved.ss & 3U) != 3U) return -EPERM;
+ // Sanitize eflags: clear IOPL (bits 12-13) to prevent privilege escalation,
+ // ensure IF (bit 9) is set so interrupts remain enabled in usermode.
+ f.saved.eflags = (f.saved.eflags & ~0x3000U) | 0x200U;
+
// Restore the full saved trapframe. The interrupt stub will pop these regs and iret.
*regs = f.saved;
return 0;
return 0;
}
+/* Conservative kernel/user boundary for the weak default.
+ * Architecture-specific overrides (e.g. x86 uaccess.c) refine this
+ * with page-table walks. The weak default MUST reject kernel addresses
+ * to prevent privilege escalation via syscall arguments. */
+#ifndef USER_ADDR_LIMIT
+#define USER_ADDR_LIMIT 0xC0000000U
+#endif
+
__attribute__((weak))
int user_range_ok(const void* user_ptr, size_t len) {
uintptr_t uaddr = (uintptr_t)user_ptr;
if (len == 0) return 1;
if (uaddr == 0) return 0;
uintptr_t end = uaddr + len - 1;
- if (end < uaddr) return 0;
+ if (end < uaddr) return 0; /* overflow */
+ if (uaddr >= USER_ADDR_LIMIT) return 0; /* kernel address */
+ if (end >= USER_ADDR_LIMIT) return 0; /* spans into kernel */
return 1;
}
#include "uart_console.h"
#include "hal/cpu.h"
#include "hal/mm.h"
+#include "spinlock.h"
#include <stddef.h>
#include <stdint.h>
static uint64_t used_memory = 0;
static uint64_t max_frames = 0;
static uint64_t last_alloc_frame = 1;
+static spinlock_t pmm_lock = {0};
static uint64_t align_down(uint64_t value, uint64_t align) {
return value & ~(align - 1);
uint64_t start_frame = base / PAGE_SIZE;
uint64_t frames_count = size / PAGE_SIZE;
+ uintptr_t flags = spin_lock_irqsave(&pmm_lock);
for (uint64_t i = 0; i < frames_count; i++) {
if (start_frame + i >= max_frames) break;
}
}
}
+ spin_unlock_irqrestore(&pmm_lock, flags);
}
void pmm_set_limits(uint64_t total_mem, uint64_t max_fr) {
}
void* pmm_alloc_page(void) {
+ uintptr_t flags = spin_lock_irqsave(&pmm_lock);
+
// Start from frame 1 so we never return physical address 0.
if (last_alloc_frame < 1) last_alloc_frame = 1;
if (last_alloc_frame >= max_frames) last_alloc_frame = 1;
used_memory += PAGE_SIZE;
last_alloc_frame = i + 1;
if (last_alloc_frame >= max_frames) last_alloc_frame = 1;
+ spin_unlock_irqrestore(&pmm_lock, flags);
return (void*)(uintptr_t)(i * PAGE_SIZE);
}
}
+
+ spin_unlock_irqrestore(&pmm_lock, flags);
return NULL; // OOM
}
uint64_t frame = addr / PAGE_SIZE;
if (frame == 0 || frame >= max_frames) return;
+ uintptr_t flags = spin_lock_irqsave(&pmm_lock);
+
uint16_t rc = frame_refcount[frame];
if (rc > 1) {
- __sync_sub_and_fetch(&frame_refcount[frame], 1);
+ frame_refcount[frame]--;
+ spin_unlock_irqrestore(&pmm_lock, flags);
return;
}
frame_refcount[frame] = 0;
bitmap_unset(frame);
used_memory -= PAGE_SIZE;
+
+ spin_unlock_irqrestore(&pmm_lock, flags);
}
void pmm_incref(uintptr_t paddr) {
uint64_t frame = paddr / PAGE_SIZE;
if (frame == 0 || frame >= max_frames) return;
- __sync_fetch_and_add(&frame_refcount[frame], 1);
+ uintptr_t flags = spin_lock_irqsave(&pmm_lock);
+ frame_refcount[frame]++;
+ spin_unlock_irqrestore(&pmm_lock, flags);
}
uint16_t pmm_decref(uintptr_t paddr) {
uint64_t frame = paddr / PAGE_SIZE;
if (frame == 0 || frame >= max_frames) return 0;
- uint16_t new_val = __sync_sub_and_fetch(&frame_refcount[frame], 1);
+ uintptr_t flags = spin_lock_irqsave(&pmm_lock);
+ uint16_t new_val = --frame_refcount[frame];
if (new_val == 0) {
bitmap_unset(frame);
used_memory -= PAGE_SIZE;
}
+ spin_unlock_irqrestore(&pmm_lock, flags);
return new_val;
}
uint16_t pmm_get_refcount(uintptr_t paddr) {
uint64_t frame = paddr / PAGE_SIZE;
if (frame >= max_frames) return 0;
- return frame_refcount[frame];
+ uintptr_t flags = spin_lock_irqsave(&pmm_lock);
+ uint16_t rc = frame_refcount[frame];
+ spin_unlock_irqrestore(&pmm_lock, flags);
+ return rc;
}