Xyris  0.5
paging.cpp
Go to the documentation of this file.
1 /**
2  * @file paging.cpp
3  * @author Keeton Feavel ([email protected])
4  * @author Micah Switzer ([email protected])
5  * @brief
6  * @version 0.3
7  * @date 2019-11-22
8  *
9  * @copyright Copyright Keeton Feavel and Micah Switzer (c) 2019
10  *
11  */
12 #include <Arch/Arch.hpp>
13 #include <Arch/Memory.hpp>
14 #include <Bootloader/Arguments.hpp>
15 #include <Library/Bitset.hpp>
16 #include <Locking/RAII.hpp>
17 #include <Library/stdio.hpp>
18 #include <Library/string.hpp>
19 #include <Memory/Physical.hpp>
20 #include <Memory/paging.hpp>
21 #include <Support/sections.hpp>
22 #include <Panic.hpp>
23 #include <Logger.hpp>
24 #include <stddef.h>
25 
26 #define PAGE_COUNT(s) ((s) / ARCH_PAGE_SIZE) + 1;
27 
28 namespace Memory {
29 
30 static Mutex pagingLock("paging");
31 
32 static Physical::PhysicalManager physical;
33 static Bitset<MEM_BITMAP_SIZE> mappedPages;
34 
35 static uintptr_t pageDirectoryAddress;
36 static struct Arch::Memory::Table* pageDirectoryVirtual[ARCH_PAGE_DIR_ENTRIES];
37 
38 // both of these must be page aligned for anything to work right at all
39 [[gnu::section(".page_tables,\"aw\", @nobits#")]] static struct Arch::Memory::DirectoryEntry pageDirectoryPhysical[ARCH_PAGE_DIR_ENTRIES];
40 [[gnu::section(".page_tables,\"aw\", @nobits#")]] static struct Arch::Memory::Table pageTables[ARCH_PAGE_TABLE_ENTRIES];
41 
42 // Function prototypes
43 static void pageFaultCallback(struct registers* regs);
44 static void initPhysical(MemoryMap* map);
45 static void initDirectory();
46 static void mapEarlyMem();
47 static void mapKernel();
48 static uintptr_t findNextFreeVirtualAddress(size_t seq);
49 static inline void mapKernelPageTable(size_t idx, struct Arch::Memory::Table* table);
50 static void argumentsCallback(const char* arg);
51 
52 // Kernel cmdline arguments
53 static bool is_mapping_output_enabled = false;
54 #define MAPPING_OUTPUT_FLAG "--enable-mapping-output"
55 KERNEL_PARAM(enableMappingLogs, MAPPING_OUTPUT_FLAG, argumentsCallback);
56 
57 void init(MemoryMap* map)
58 {
59  // we can set breakpoints or make a futile attempt to recover.
61  // populate the physical memory map based on bootloader information
62  initPhysical(map);
63  // init our structures
64  initDirectory();
65  // identity map the first 1 MiB of RAM
66  mapEarlyMem();
67  // map in our higher-half kernel
68  mapKernel();
69  // use our new set of page tables
70  Arch::Memory::setPageDirectory(Arch::Memory::pageAlign(pageDirectoryAddress));
71  // flush the tlb and we're off to the races!
73 }
74 
75 static void pageFaultCallback(struct registers* regs)
76 {
77  panic(regs);
78 }
79 
80 static void initPhysical(MemoryMap* map)
81 {
82  size_t freeBytes = 0;
83  size_t reservedBytes = 0;
84 
85  for (size_t i = 0; i < map->Count(); i++) {
86  auto section = map->Get(i);
87  if (section.initialized()) {
88  switch (section.type())
89  {
90  case Available:
91  physical.setFree(section);
92  freeBytes += section.size();
93  break;
94  default:
95  physical.setUsed(section);
96  reservedBytes += section.size();
97  break;
98  }
99  }
100  }
101 
102  Logger::Info(__func__, "Available memory: %zu MB", B_TO_MB(freeBytes));
103  Logger::Info(__func__, "Reserved memory: %zu MB", B_TO_MB(reservedBytes));
104  Logger::Info(__func__, "Total memory: %zu MB", B_TO_MB(freeBytes + reservedBytes));
105 }
106 
107 static inline void mapKernelPageTable(size_t idx, struct Arch::Memory::Table* table)
108 {
109  pageDirectoryVirtual[idx] = table;
110  pageDirectoryPhysical[idx] = {
111  .present = 1,
112  .readWrite = 1,
113  .usermode = 0,
114  .writeThrough = 0,
115  .cacheDisable = 0,
116  .accessed = 0,
117  .ignoredA = 0,
118  .size = 0,
119  .ignoredB = 0,
120  // compute the physical address of this page table the virtual address is obtained with the & operator and
121  // the offset is applied from the load address of the kernel we must shift it over 12 bits because we only
122  // care about the highest 20 bits for the page table
123  // TODO: Get rid of this shift by using ``union Address``
124  .tableAddr = KADDR_TO_PHYS((uintptr_t)table) >> ARCH_PAGE_TABLE_ENTRY_SHIFT
125  };
126 }
127 
128 static void initDirectory()
129 {
130  // For every page in kernel memory
131  for (size_t i = 0; i < ARCH_PAGE_DIR_ENTRIES - 1; i++) {
132  mapKernelPageTable(i, &pageTables[i]);
133  // clear out the page tables
134  for (size_t j = 0; j < ARCH_PAGE_TABLE_ENTRIES; j++) {
135  memset(&pageTables[i].pages[j], 0, sizeof(struct Arch::Memory::TableEntry));
136  }
137  }
138  // recursively map the last page table to the page directory
139  mapKernelPageTable(ARCH_PAGE_TABLE_ENTRIES - 1, (struct Arch::Memory::Table*)&pageDirectoryPhysical[0]);
141  mappedPages.Set(i);
142  }
143  // store the physical address of the page directory for quick access
144  pageDirectoryAddress = KADDR_TO_PHYS((uintptr_t)&pageDirectoryPhysical[0]);
145 }
146 
147 void mapKernelPage(Arch::Memory::Address vaddr, Arch::Memory::Address paddr)
148 {
149  // Set the page directory entry (pde) and page table entry (pte)
150  size_t pde = vaddr.page().dirIndex;
151  size_t pte = vaddr.page().tableIndex;
152 
153  // Print a debug message to serial
154  if (is_mapping_output_enabled) {
155  Logger::Debug(__func__, "map 0x%08lx to 0x%08lx, pde = 0x%08lx, pte = 0x%08lx", paddr.val(), vaddr.val(), pde, pte);
156  }
157 
158  // If the page's virtual address is not aligned
159  if (vaddr.page().offset) {
160  panicf("Attempted to map a non-page-aligned virtual address.\n(Address: 0x%08lX)\n", vaddr.val());
161  }
162 
163  // If the page is already mapped into memory
164  Arch::Memory::TableEntry* entry = &(pageTables[pde].pages[pte]);
165  if (entry->present) {
166  if (entry->frame == paddr.frame().index) {
167  // this page was already mapped the same way
168  return;
169  }
170 
171  panic("Attempted to map already mapped page.\n");
172  }
173  // Set the page information
174  pageTables[pde].pages[pte] = {
175  .present = 1, // The page is present
176  .readWrite = 1, // The page has r/w permissions
177  .usermode = 0, // These are kernel pages
178  .writeThrough = 0, // Disable write through
179  .cacheDisable = 0, // The page is cached
180  .accessed = 0, // The page is unaccessed
181  .dirty = 0, // The page is clean
182  .pageAttrTable = 0, // The page has no attribute table
183  .global = 0, // The page is local
184  .unused = 0, // Ignored
185  .frame = paddr.frame().index, // The last 20 bits are the frame
186  };
187  // Set the associated bit in the bitmaps
188  physical.setUsed(paddr);
189  mappedPages.Set(vaddr.frame().index);
190 }
191 
193 {
194  for (Arch::Memory::Address a(sect.base()); a < sect.end(); a += ARCH_PAGE_SIZE) {
195  mapKernelPage(a, a);
196  }
197 }
198 
200 {
201  for (Arch::Memory::Address a(sect.base()); a < sect.end(); a += ARCH_PAGE_SIZE) {
202  Arch::Memory::Address phys(KADDR_TO_PHYS(a));
203  mapKernelPage(a, phys);
204  }
205 }
206 
207 static void mapEarlyMem()
208 {
209  Logger::Debug(__func__, "==== MAP EARLY MEM ====");
211 }
212 
213 static void mapKernel()
214 {
215  Logger::Debug(__func__, "==== MAP HH KERNEL ====");
216  mapKernelRangePhysical(Section(Arch::Memory::pageAlign(KERNEL_START), KERNEL_END));
217 }
218 
219 /**
220  * note: this can't find more than 32 sequential pages
221  * @param seq the number of sequential pages to get
222  */
223 static uintptr_t findNextFreeVirtualAddress(size_t seq)
224 {
225  return mappedPages.FindFirstRange(seq, false);
226 }
227 
228 void* newPage(size_t size)
229 {
230  RAIIMutex lock(pagingLock);
231  size_t page_count = PAGE_COUNT(size);
232  size_t free_idx = findNextFreeVirtualAddress(page_count);
233  if (free_idx == SIZE_MAX)
234  return NULL;
235  for (size_t i = free_idx; i < free_idx + page_count; i++) {
236  size_t phys_page_idx = physical.findNextFreePhysicalAddress();
237  if (phys_page_idx == SIZE_MAX)
238  return NULL;
239  Arch::Memory::Address phys(phys_page_idx * ARCH_PAGE_SIZE);
240  Arch::Memory::Address vaddr(i * ARCH_PAGE_SIZE);
241  mapKernelPage(vaddr, phys);
242  }
243  return (void*)(free_idx * ARCH_PAGE_SIZE);
244 }
245 
246 void freePage(void* page, size_t size)
247 {
248  RAIIMutex lock(pagingLock);
249  size_t page_count = PAGE_COUNT(size);
250  Arch::Memory::Address addr((uintptr_t)page);
251  for (size_t i = addr.page().tableIndex; i < addr.page().tableIndex + page_count; i++) {
252  mappedPages.Reset(i);
253  // this is the same as the line above
254  struct Arch::Memory::TableEntry* pte = &(pageTables[i / ARCH_PAGE_TABLE_ENTRIES].pages[i % ARCH_PAGE_TABLE_ENTRIES]);
255  // the frame field is actually the page frame's index basically it's frame 0, 1...(2^21-1)
256  physical.setFree(pte->frame);
257  // zero it out to unmap it
258  memset(pte, 0, sizeof(struct Arch::Memory::TableEntry));
259  // clear that tlb
260  Arch::Memory::pageInvalidate(page);
261  }
262 }
263 
264 bool isPresent(uintptr_t addr)
265 {
266  // Convert the address into an index and check whether the page is in the bitmap
267  // TODO: Fix this function. It's inaccurate and can result in triple faults.
268  return mappedPages[addr >> ARCH_PAGE_TABLE_ENTRY_SHIFT];
269 }
270 
271 // TODO: maybe enforce access control here in the future
273 {
274  return pageDirectoryAddress;
275 }
276 
277 static void argumentsCallback(const char* arg)
278 {
279  if (strcmp(arg, MAPPING_OUTPUT_FLAG) == 0) {
280  Logger::Debug(__func__, "is_mapping_output_enabled = true");
281  is_mapping_output_enabled = true;
282  }
283 }
284 
285 } // !namespace Paging
size
uint16_t size
Definition: regs.hpp:2
stdio.hpp
Arch::Memory::pagingEnable
void pagingEnable()
Enable hardware paging.
Definition: Arch.i686.cpp:94
Memory
Definition: MemoryMap.hpp:15
Memory::mapKernelPage
void mapKernelPage(Arch::Memory::Address vaddr, Arch::Memory::Address paddr)
Map a page into the kernel address space.
Definition: paging.cpp:147
ARCH_PAGE_DIR_ENTRIES
#define ARCH_PAGE_DIR_ENTRIES
Definition: Memory.i686.hpp:19
Memory::KERNEL_PARAM
KERNEL_PARAM(enableMappingLogs, MAPPING_OUTPUT_FLAG, argumentsCallback)
RAII.hpp
Resource Acquisition Is Initialization mutex. Locks when constructed and unlocks when destructed.
Memory::init
void init(MemoryMap *map)
Sets up the environment, page directories etc and enables paging.
Definition: paging.cpp:57
Bitset::Set
void Set(size_t pos)
Set the bit for a given position.
Definition: Bitset.hpp:60
Interrupts::EXCEPTION_PAGE_FAULT
@ EXCEPTION_PAGE_FAULT
Definition: isr.hpp:40
Memory::freePage
void freePage(void *page, size_t size)
Frees pages starting at a given page address.
Definition: paging.cpp:246
ARCH_PAGE_TABLE_ENTRY_SHIFT
#define ARCH_PAGE_TABLE_ENTRY_SHIFT
Definition: Memory.i686.hpp:24
Bitset.hpp
A basic bitmap implementation.
Arguments.hpp
Kernel command argument parsing.
string.hpp
Standard string and memory utility library.
Memory::MemoryMap
Definition: MemoryMap.hpp:17
registers
A structure definining values for all x86 registers. Cannot be namespaced due to C linkage and ASM in...
Definition: regs.hpp:19
Memory::getPageDirPhysAddr
uintptr_t getPageDirPhysAddr()
Gets the physical address of the current page directory.
Definition: paging.cpp:272
EARLY_KERNEL_START
#define EARLY_KERNEL_START
Definition: sections.hpp:22
ARCH_PAGE_TABLE_ENTRIES
#define ARCH_PAGE_TABLE_ENTRIES
Definition: Memory.i686.hpp:20
EARLY_MEM_START
#define EARLY_MEM_START
Definition: sections.hpp:19
Memory::Physical::PhysicalManager::setFree
void setFree(Section &sect)
Definition: Physical.hpp:33
Interrupts::registerHandler
void registerHandler(uint8_t interrupt, InterruptHandler_t handler)
Definition: isr.cpp:98
strcmp
int strcmp(const char *s1, const char *s2)
Compares two strings.
Definition: string.cpp:51
Mutex
Definition: Mutex.hpp:16
Arch.hpp
Architecture control and initialization.
Memory::newPage
void * newPage(size_t size)
Returns a new page in memory for use. If less than one page is requested, exactly one page will be al...
Definition: paging.cpp:228
Logger::Info
static void Info(const char *tag, const char *fmt,...)
Definition: Logger.cpp:81
Panic.hpp
Kernel panic management.
PAGE_COUNT
#define PAGE_COUNT(s)
Definition: paging.cpp:26
MAPPING_OUTPUT_FLAG
#define MAPPING_OUTPUT_FLAG
Definition: paging.cpp:54
panicf
void panicf(const char *fmt,...)
Halt the system and print the provided message and arguments on the panic screen.
Definition: Panic.cpp:87
sections.hpp
Kernel ELF section definitions.
Memory::mapKernelRangePhysical
void mapKernelRangePhysical(Section sect)
Map a kernel address range into physical memory.
Definition: paging.cpp:199
Memory::Section
Definition: MemorySection.hpp:29
Bitset::FindFirstRange
size_t FindFirstRange(size_t count, bool isSet)
Finds a range of count clear bits and returns the starting position.
Definition: Bitset.hpp:133
KERNEL_START
#define KERNEL_START
Definition: sections.hpp:30
paging.hpp
Logger::Debug
static void Debug(const char *tag, const char *fmt,...)
Definition: Logger.cpp:71
KERNEL_END
#define KERNEL_END
Definition: sections.hpp:32
Memory::isPresent
bool isPresent(uintptr_t addr)
Checks whether an address is mapped into memory.
Definition: paging.cpp:264
Physical.hpp
Physical memory mapping.
RAIIMutex
Definition: RAII.hpp:16
Memory.hpp
Architecture memory management & paging API.
Memory::Section::base
uintptr_t base()
Definition: MemorySection.hpp:57
Memory::Section::end
uintptr_t end()
Definition: MemorySection.hpp:62
Memory::mapKernelRangeVirtual
void mapKernelRangeVirtual(Section sect)
Map an address range into the kernel virtual address space.
Definition: paging.cpp:192
panic
void panic(const char *msg)
Halt the system and print the provided message on the panic screen.
Definition: Panic.cpp:82
Memory::Physical::PhysicalManager::setUsed
void setUsed(Section &sect)
Definition: Physical.hpp:40
ARCH_PAGE_SIZE
#define ARCH_PAGE_SIZE
Definition: Memory.i686.hpp:21
memset
void * memset(void *bufptr, int value, size_t size)
Sets the number of bytes in memory at ptr to the value.
Definition: string.cpp:106
KADDR_TO_PHYS
#define KADDR_TO_PHYS(addr)
Definition: Physical.hpp:20
Memory::Available
@ Available
Definition: MemorySection.hpp:19
Bitset< MEM_BITMAP_SIZE >
Memory::Physical::PhysicalManager::findNextFreePhysicalAddress
uintptr_t findNextFreePhysicalAddress()
Definition: Physical.hpp:100
Logger.hpp
Bitset::Reset
void Reset(size_t pos)
Reset (clear) the bit at the given position.
Definition: Bitset.hpp:70
B_TO_MB
#define B_TO_MB(b)
Definition: Memory.hpp:23