Files
osc2025/kernel/lib/mman.c
2025-04-15 15:12:07 +08:00

270 lines
6.5 KiB
C

#include <mman.h>
#include <logger.h>
#include <errcode.h>
#include <string.h>
#include <utils.h>
#include <kmalloc.h>
#include <dtb.h>
#include <initrd.h>
extern uint64_t __kernel_start;
extern uint64_t __kernel_end;
extern uint64_t __heap_start;
extern uint64_t __stack_end;
fdt_callback_t mman_dtb_memory_cb = {
.name = "memory@0",
.func = mman_fdt_memory_cb_func,
};
void *mman_memory_start = 0x0;
void *mman_memory_end = 0x0;
size_t mman_page_cnt = 0;
page_header_t *mman_frame_array = 0x0;
void mman_fdt_memory_cb_func(const vector_t *props)
{
for (int i = 0; i < (int)props->size; ++i) {
if (!strcmp(VEC_AT(fdt_prop_t, props, i)->name, "reg")) {
mman_memory_start = (void *)(uint64_t)ntoh32(
*((uint32_t *)VEC_AT(fdt_prop_t, props, i)->value));
mman_memory_end = (void *)(uint64_t)ntoh32(
*((uint32_t *)VEC_AT(fdt_prop_t, props, i)->value + 1));
}
}
}
// This is an array-like binary tree structure, where LCH (left-child) will
// always be the largest 2^k size block while RCH not empty.
#define CUR (&mman_frame_array[idx])
#define LIDX (((idx + 1) << 1) - 1)
#define LCH (&mman_frame_array[LIDX])
#define RIDX (((idx + 1) << 1 | 1) - 1)
#define RCH (&mman_frame_array[RIDX])
static inline
void _pull(int idx, size_t sz)
{
if (LCH->state == PAGE_FREE && RCH->state == PAGE_FREE) {
if (CUR->state != PAGE_FREE) {
LOG("free page merged");
DEBUG_MEM((uint64_t)idx);
}
CUR->state = PAGE_FREE;
CUR->size = sz;
}
if (LCH->state != PAGE_FREE || RCH->state != PAGE_FREE)
CUR->state = PAGE_DIVIDED;
switch (CUR->state) {
case PAGE_FREE:
CUR->size = sz;
break;
case PAGE_DIVIDED:
CUR->size = LCH->size | RCH->size;
break;
case PAGE_ALLOCATED:
case PAGE_RESERVED:
CUR->size = 0;
break;
default:
exit(ERR_UNREACHABLE);
}
}
void mman_init()
{
mman_memory_start = ALIGN4K(mman_memory_start);
mman_memory_end = ALIGN4K(mman_memory_end);
// 4KB per page
mman_page_cnt = (mman_memory_end - mman_memory_start) >> 12;
mman_frame_array = simple_alloc((mman_page_cnt << 1) * sizeof(page_header_t));
LOG(mman_memory_start);
LOG(mman_memory_end);
DEBUG_MEM((uint64_t)mman_page_cnt);
mman_frame_array[0] = (page_header_t){
.state = PAGE_FREE,
.size = mman_page_cnt,
};
fdt_reserve_entry_t *entry = 0x0;
for (int i = 0; i < (int)dtb_reserved_entries->size; ++i) {
entry = VEC_AT(fdt_reserve_entry_t, dtb_reserved_entries, i);
LOG(ntoh64(entry->address));
DEBUG_MEM(ntoh64(entry->address) + ntoh64(entry->size));
reserve_page((void *)ntoh64(entry->address),
(void *)ntoh64(entry->address) + ntoh64(entry->size));
}
reserve_page(&__kernel_start, &__kernel_end);
reserve_page(&__heap_start, &__stack_end);
reserve_page(dtb_start, dtb_end);
reserve_page(initrd_start, initrd_end);
init_mman_kmalloc();
}
static inline
uint64_t _allocate_page(size_t req, int idx, uint64_t l, uint64_t r)
{
uint64_t sz = r - l;
if (req > sz || req > CUR->size) {
return MMAN_NO_PAGE;
}
uint64_t m = l + ((msb64(sz) == sz) ? (sz >> 1) : msb64(sz));
switch (CUR->state) {
case PAGE_FREE:
if (req == sz) {
LOG("page allocated");
LOG(l);
DEBUG_MEM(r);
CUR->state = PAGE_ALLOCATED;
CUR->size = 0;
return l;
}
LOG("page divided");
LOG(l);
DEBUG(r);
LCH->state = RCH->state = PAGE_FREE;
LCH->size = m - l;
RCH->size = r - m;
break;
case PAGE_DIVIDED:
break;
case PAGE_ALLOCATED:
case PAGE_RESERVED:
return MMAN_NO_PAGE;
default:
exit(ERR_UNREACHABLE);
}
uint64_t ret = MMAN_NO_PAGE;
// Goto child where size >= req
// If both children can handle, choose the one that has the least significant
// bit that can handle.
// It both children are the same, choose the left one
int is_lch_valid = (LCH->size >= req);
int is_rch_valid = (RCH->size >= req);
if (is_lch_valid && is_rch_valid) {
if (lsb64(LCH->size & ~(req - 1)) <= lsb64(RCH->size & ~(req - 1)))
ret = _allocate_page(req, LIDX, l, m);
else
ret = _allocate_page(req, RIDX, m, r);
}
if (ret == MMAN_NO_PAGE && LCH->size >= req)
ret = _allocate_page(req, LIDX, l, m);
if (ret == MMAN_NO_PAGE && RCH->size >= req)
ret = _allocate_page(req, RIDX, m, r);
_pull(idx, sz);
return ret;
}
void *allocate_page(size_t page_cnt)
{
if (msb64(page_cnt) != page_cnt)
exit(ERR_INVALID_OP);
uint64_t offset = _allocate_page(page_cnt, 0, 0, mman_page_cnt);
if (offset == MMAN_NO_PAGE)
exit(ERR_NO_MEM);
// return (void *)0x0;
return mman_memory_start + offset * (1 << 12);
}
static inline
void _free_page(uint64_t req, int idx, uint64_t l, uint64_t r)
{
uint64_t sz = r - l;
switch (CUR->state) {
case PAGE_FREE:
return;
case PAGE_ALLOCATED:
if (req == l) {
LOG("page freed");
LOG(l);
DEBUG_MEM(r);
CUR->state = PAGE_FREE;
CUR->size = sz;
return;
}
case PAGE_DIVIDED:
break;
case PAGE_RESERVED:
default:
exit(ERR_UNREACHABLE);
}
uint64_t m = l + ((msb64(sz) == sz) ? (sz >> 1) : msb64(sz));
if (l <= req && req < m)
_free_page(req, LIDX, l, m);
if (m <= req && req < r)
_free_page(req, RIDX, m, r);
_pull(idx, sz);
}
void free_page(void *page)
{
if (ALIGN4K(page) != page)
exit(ERR_INVALID_OP);
uint64_t start = ((uint64_t)page - (uint64_t)mman_memory_start) >> 12;
_free_page(start, 0, 0, mman_page_cnt);
}
static inline
void _reserve_page(uint64_t ql, uint64_t qr, int idx, uint64_t l, uint64_t r)
{
if (qr <= l || r <= ql)
return;
uint64_t sz = r - l;
if (ql <= l && r <= qr && msb64(sz) == sz) {
if (CUR->state == PAGE_RESERVED)
return;
if (CUR->state != PAGE_FREE)
exit(ERR_INVALID_MEM);
LOG("page reserved");
LOG(l);
DEBUG_MEM(r);
CUR->state = PAGE_RESERVED;
CUR->size = 0;
return;
}
uint64_t m = l + ((msb64(sz) == sz) ? (sz >> 1) : msb64(sz));
if (CUR->state == PAGE_FREE) {
LOG("page divided"); LOG(l); DEBUG_MEM(r);
CUR->state = PAGE_DIVIDED;
LCH->state = RCH->state = PAGE_FREE;
LCH->size = m - l;
RCH->size = r - m;
}
if (ql < m)
_reserve_page(ql, qr, LIDX, l, m);
if (m < qr)
_reserve_page(ql, qr, RIDX, m, r);
_pull(idx, sz);
}
void reserve_page(void *begin, void *end)
{
uint64_t ql = ((uint64_t)begin - (uint64_t)mman_memory_start) >> 12;
uint64_t qr = (((uint64_t)end - (uint64_t)mman_memory_start - 1) >> 12) + 1;
LOG("reserve page");
LOG(ql);
DEBUG_MEM(qr);
_reserve_page(ql, qr, 0, 0, mman_page_cnt);
}