26 #define PAGE_COUNT(s) ((s) / ARCH_PAGE_SIZE) + 1;
30 static Mutex pagingLock(
"paging");
32 static Physical::PhysicalManager physical;
35 static uintptr_t pageDirectoryAddress;
39 [[gnu::section(
".page_tables,\"aw\", @nobits#")]]
static struct Arch::Memory::DirectoryEntry pageDirectoryPhysical[
ARCH_PAGE_DIR_ENTRIES];
40 [[gnu::section(
".page_tables,\"aw\", @nobits#")]]
static struct Arch::Memory::Table pageTables[
ARCH_PAGE_TABLE_ENTRIES];
43 static void pageFaultCallback(
struct registers* regs);
44 static void initPhysical(MemoryMap* map);
45 static void initDirectory();
46 static void mapEarlyMem();
47 static void mapKernel();
48 static uintptr_t findNextFreeVirtualAddress(
size_t seq);
49 static inline void mapKernelPageTable(
size_t idx,
struct Arch::Memory::Table* table);
50 static void argumentsCallback(
const char* arg);
53 static bool is_mapping_output_enabled =
false;
54 #define MAPPING_OUTPUT_FLAG "--enable-mapping-output"
70 Arch::Memory::setPageDirectory(Arch::Memory::pageAlign(pageDirectoryAddress));
75 static void pageFaultCallback(
struct registers* regs)
80 static void initPhysical(MemoryMap* map)
83 size_t reservedBytes = 0;
85 for (
size_t i = 0; i < map->Count(); i++) {
86 auto section = map->Get(i);
87 if (section.initialized()) {
88 switch (section.type())
92 freeBytes += section.size();
96 reservedBytes += section.size();
107 static inline void mapKernelPageTable(
size_t idx,
struct Arch::Memory::Table* table)
109 pageDirectoryVirtual[idx] = table;
110 pageDirectoryPhysical[idx] = {
128 static void initDirectory()
132 mapKernelPageTable(i, &pageTables[i]);
135 memset(&pageTables[i].pages[j], 0,
sizeof(
struct Arch::Memory::TableEntry));
144 pageDirectoryAddress =
KADDR_TO_PHYS((uintptr_t)&pageDirectoryPhysical[0]);
147 void mapKernelPage(Arch::Memory::Address vaddr, Arch::Memory::Address paddr)
150 size_t pde = vaddr.page().dirIndex;
151 size_t pte = vaddr.page().tableIndex;
154 if (is_mapping_output_enabled) {
155 Logger::Debug(__func__,
"map 0x%08lx to 0x%08lx, pde = 0x%08lx, pte = 0x%08lx", paddr.val(), vaddr.val(), pde, pte);
159 if (vaddr.page().offset) {
160 panicf(
"Attempted to map a non-page-aligned virtual address.\n(Address: 0x%08lX)\n", vaddr.val());
164 Arch::Memory::TableEntry* entry = &(pageTables[pde].pages[pte]);
165 if (entry->present) {
166 if (entry->frame == paddr.frame().index) {
171 panic(
"Attempted to map already mapped page.\n");
174 pageTables[pde].pages[pte] = {
185 .frame = paddr.frame().index,
189 mappedPages.
Set(vaddr.frame().index);
207 static void mapEarlyMem()
213 static void mapKernel()
223 static uintptr_t findNextFreeVirtualAddress(
size_t seq)
232 size_t free_idx = findNextFreeVirtualAddress(page_count);
233 if (free_idx == SIZE_MAX)
235 for (
size_t i = free_idx; i < free_idx + page_count; i++) {
237 if (phys_page_idx == SIZE_MAX)
250 Arch::Memory::Address addr((uintptr_t)page);
251 for (
size_t i = addr.page().tableIndex; i < addr.page().tableIndex + page_count; i++) {
252 mappedPages.
Reset(i);
258 memset(pte, 0,
sizeof(
struct Arch::Memory::TableEntry));
260 Arch::Memory::pageInvalidate(page);
274 return pageDirectoryAddress;
277 static void argumentsCallback(
const char* arg)
281 is_mapping_output_enabled =
true;