Update kernel memory management to current coding conventions.

This commit is contained in:
Jonas 'Sortie' Termansen 2014-01-04 01:04:29 +01:00
parent 71f9f882d1
commit a099f82890
7 changed files with 1274 additions and 1212 deletions

View File

@ -1,6 +1,6 @@
/*******************************************************************************
Copyright(C) Jonas 'Sortie' Termansen 2011, 2012.
Copyright(C) Jonas 'Sortie' Termansen 2011, 2012, 2014.
This file is part of Sortix.
@ -22,74 +22,80 @@
*******************************************************************************/
#ifndef SORTIX_MEMORYMANAGEMENT_H
#define SORTIX_MEMORYMANAGEMENT_H
#ifndef INCLUDE_SORTIX_KERNEL_MEMORYMANAGEMENT_H
#define INCLUDE_SORTIX_KERNEL_MEMORYMANAGEMENT_H
// Forward declarations.
typedef struct multiboot_info multiboot_info_t;
namespace Sortix
{
class Process;
namespace Sortix {
namespace Page
{
bool Reserve(size_t* counter, size_t amount);
bool ReserveUnlocked(size_t* counter, size_t amount);
bool Reserve(size_t* counter, size_t least, size_t ideal);
bool ReserveUnlocked(size_t* counter, size_t least, size_t ideal);
addr_t GetReserved(size_t* counter);
addr_t GetReservedUnlocked(size_t* counter);
addr_t Get();
addr_t GetUnlocked();
void Put(addr_t page);
void PutUnlocked(addr_t page);
void Lock();
void Unlock();
class Process;
inline size_t Size() { return 4096UL; }
} // namespace Sortix
// Rounds a memory address down to nearest page.
inline addr_t AlignDown(addr_t page) { return page & ~(0xFFFUL); }
namespace Sortix {
namespace Page {
// Rounds a memory address up to nearest page.
inline addr_t AlignUp(addr_t page) { return AlignDown(page + 0xFFFUL); }
bool Reserve(size_t* counter, size_t amount);
bool ReserveUnlocked(size_t* counter, size_t amount);
bool Reserve(size_t* counter, size_t least, size_t ideal);
bool ReserveUnlocked(size_t* counter, size_t least, size_t ideal);
addr_t GetReserved(size_t* counter);
addr_t GetReservedUnlocked(size_t* counter);
addr_t Get();
addr_t GetUnlocked();
void Put(addr_t page);
void PutUnlocked(addr_t page);
void Lock();
void Unlock();
// Tests whether an address is page aligned.
inline bool IsAligned(addr_t page) { return AlignDown(page) == page; }
}
inline size_t Size() { return 4096UL; }
namespace Memory
{
void Init(multiboot_info_t* bootinfo);
void InvalidatePage(addr_t addr);
void Flush();
addr_t Fork();
addr_t GetAddressSpace();
addr_t SwitchAddressSpace(addr_t addrspace);
void DestroyAddressSpace(addr_t fallback = 0,
void (*func)(addr_t, void*) = NULL,
void* user = NULL);
bool Map(addr_t physical, addr_t mapto, int prot);
addr_t Unmap(addr_t mapto);
addr_t Physical(addr_t mapto);
int PageProtection(addr_t mapto);
bool LookUp(addr_t mapto, addr_t* physical, int* prot);
int ProvidedProtection(int prot);
void PageProtect(addr_t mapto, int protection);
void PageProtectAdd(addr_t mapto, int protection);
void PageProtectSub(addr_t mapto, int protection);
bool MapRange(addr_t where, size_t bytes, int protection);
bool UnmapRange(addr_t where, size_t bytes);
void Statistics(size_t* amountused, size_t* totalmem);
addr_t GetKernelStack();
size_t GetKernelStackSize();
void GetKernelVirtualArea(addr_t* from, size_t* size);
void GetUserVirtualArea(uintptr_t* from, size_t* size);
void UnmapMemory(Process* process, uintptr_t addr, size_t size);
bool ProtectMemory(Process* process, uintptr_t addr, size_t size, int prot);
bool MapMemory(Process* process, uintptr_t addr, size_t size, int prot);
}
}
// Rounds a memory address down to nearest page.
inline addr_t AlignDown(addr_t page) { return page & ~(0xFFFUL); }
// Rounds a memory address up to nearest page.
inline addr_t AlignUp(addr_t page) { return AlignDown(page + 0xFFFUL); }
// Tests whether an address is page aligned.
inline bool IsAligned(addr_t page) { return AlignDown(page) == page; }
} // namespace Page
} // namespace Sortix
namespace Sortix {
namespace Memory {
void Init(multiboot_info_t* bootinfo);
void InvalidatePage(addr_t addr);
void Flush();
addr_t Fork();
addr_t GetAddressSpace();
addr_t SwitchAddressSpace(addr_t addrspace);
void DestroyAddressSpace(addr_t fallback = 0,
void (*func)(addr_t, void*) = NULL,
void* user = NULL);
bool Map(addr_t physical, addr_t mapto, int prot);
addr_t Unmap(addr_t mapto);
addr_t Physical(addr_t mapto);
int PageProtection(addr_t mapto);
bool LookUp(addr_t mapto, addr_t* physical, int* prot);
int ProvidedProtection(int prot);
void PageProtect(addr_t mapto, int protection);
void PageProtectAdd(addr_t mapto, int protection);
void PageProtectSub(addr_t mapto, int protection);
bool MapRange(addr_t where, size_t bytes, int protection);
bool UnmapRange(addr_t where, size_t bytes);
void Statistics(size_t* amountused, size_t* totalmem);
addr_t GetKernelStack();
size_t GetKernelStackSize();
void GetKernelVirtualArea(addr_t* from, size_t* size);
void GetUserVirtualArea(uintptr_t* from, size_t* size);
void UnmapMemory(Process* process, uintptr_t addr, size_t size);
bool ProtectMemory(Process* process, uintptr_t addr, size_t size, int prot);
bool MapMemory(Process* process, uintptr_t addr, size_t size, int prot);
} // namespace Memory
} // namespace Sortix
#endif

View File

@ -1,6 +1,6 @@
/*******************************************************************************
Copyright(C) Jonas 'Sortie' Termansen 2011, 2012.
Copyright(C) Jonas 'Sortie' Termansen 2011, 2012, 2014.
This file is part of Sortix.
@ -31,181 +31,188 @@
#include "multiboot.h"
#include "x86-family/memorymanagement.h"
namespace Sortix
namespace Sortix {
namespace Page {
extern size_t stackused;
extern size_t stacklength;
void ExtendStack();
} // namespace Page
} // namespace Sortix
namespace Sortix {
namespace Memory {
extern addr_t currentdir;
void InitCPU()
{
namespace Page
// The x64 boot code already set up virtual memory and identity
// mapped the first 2 MiB. This code finishes the job such that
// virtual memory is fully usable and manageable.
// boot.s already initialized everything from 0x1000UL to 0xE000UL
// to zeroes. Since these structures are already used, doing it here
// will be very dangerous.
PML* const BOOTPML4 = (PML* const) 0x21000UL;
PML* const BOOTPML3 = (PML* const) 0x26000UL;
PML* const BOOTPML2 = (PML* const) 0x27000UL;
PML* const BOOTPML1 = (PML* const) 0x28000UL;
// First order of business is to map the virtual memory structures
// to the pre-defined locations in the virtual address space.
addr_t flags = PML_PRESENT | PML_WRITABLE;
// Fractal map the PML1s.
BOOTPML4->entry[511] = (addr_t) BOOTPML4 | flags;
// Fractal map the PML2s.
BOOTPML4->entry[510] = (addr_t) BOOTPML3 | flags | PML_FORK;
BOOTPML3->entry[511] = (addr_t) BOOTPML4 | flags;
// Fractal map the PML3s.
BOOTPML3->entry[510] = (addr_t) BOOTPML2 | flags | PML_FORK;
BOOTPML2->entry[511] = (addr_t) BOOTPML4 | flags;
// Fractal map the PML4s.
BOOTPML2->entry[510] = (addr_t) BOOTPML1 | flags | PML_FORK;
BOOTPML1->entry[511] = (addr_t) BOOTPML4 | flags;
// Add some predefined room for forking address spaces.
PML* const FORKPML2 = (PML* const) 0x29000UL;
PML* const FORKPML1 = (PML* const) 0x2A000UL;
BOOTPML3->entry[0] = (addr_t) FORKPML2 | flags | PML_FORK;
BOOTPML2->entry[0] = (addr_t) FORKPML1 | flags | PML_FORK;
currentdir = (addr_t) BOOTPML4;
// The virtual memory structures are now available on the predefined
// locations. This means the virtual memory code is bootstrapped. Of
// course, we still have no physical page allocator, so that's the
// next step.
PML* const PHYSPML3 = (PML* const) 0x2B000UL;
PML* const PHYSPML2 = (PML* const) 0x2C000UL;
PML* const PHYSPML1 = (PML* const) 0x2D000UL;
PML* const PHYSPML0 = (PML* const) 0x2E000UL;
BOOTPML4->entry[509] = (addr_t) PHYSPML3 | flags;
PHYSPML3->entry[0] = (addr_t) PHYSPML2 | flags;
PHYSPML2->entry[0] = (addr_t) PHYSPML1 | flags;
PHYSPML1->entry[0] = (addr_t) PHYSPML0 | flags;
Page::stackused = 0;
Page::stacklength = 4096UL / sizeof(addr_t);
// The physical memory allocator should now be ready for use. Next
// up, the calling function will fill up the physical allocator with
// plenty of nice physical pages. (see Page::InitPushRegion)
}
// Please note that even if this function exists, you should still clean
// up the address space of a process _before_ calling
// DestroyAddressSpace. This is just a hack because it currently is
// impossible to clean up PLM1's using the MM api!
// ---
// TODO: This function is duplicated in {x86,x64}/memorymanagement.cpp!
// ---
void RecursiveFreeUserspacePages(size_t level, size_t offset)
{
PML* pml = PMLS[level] + offset;
for ( size_t i = 0; i < ENTRIES; i++ )
{
extern size_t stackused;
extern size_t stacklength;
void ExtendStack();
}
namespace Memory
{
extern addr_t currentdir;
void InitCPU()
{
// The x64 boot code already set up virtual memory and identity
// mapped the first 2 MiB. This code finishes the job such that
// virtual memory is fully usable and manageable.
// boot.s already initialized everything from 0x1000UL to 0xE000UL
// to zeroes. Since these structures are already used, doing it here
// will be very dangerous.
PML* const BOOTPML4 = (PML* const) 0x21000UL;
PML* const BOOTPML3 = (PML* const) 0x26000UL;
PML* const BOOTPML2 = (PML* const) 0x27000UL;
PML* const BOOTPML1 = (PML* const) 0x28000UL;
// First order of business is to map the virtual memory structures
// to the pre-defined locations in the virtual address space.
addr_t flags = PML_PRESENT | PML_WRITABLE;
// Fractal map the PML1s.
BOOTPML4->entry[511] = (addr_t) BOOTPML4 | flags;
// Fractal map the PML2s.
BOOTPML4->entry[510] = (addr_t) BOOTPML3 | flags | PML_FORK;
BOOTPML3->entry[511] = (addr_t) BOOTPML4 | flags;
// Fractal map the PML3s.
BOOTPML3->entry[510] = (addr_t) BOOTPML2 | flags | PML_FORK;
BOOTPML2->entry[511] = (addr_t) BOOTPML4 | flags;
// Fractal map the PML4s.
BOOTPML2->entry[510] = (addr_t) BOOTPML1 | flags | PML_FORK;
BOOTPML1->entry[511] = (addr_t) BOOTPML4 | flags;
// Add some predefined room for forking address spaces.
PML* const FORKPML2 = (PML* const) 0x29000UL;
PML* const FORKPML1 = (PML* const) 0x2A000UL;
BOOTPML3->entry[0] = (addr_t) FORKPML2 | flags | PML_FORK;
BOOTPML2->entry[0] = (addr_t) FORKPML1 | flags | PML_FORK;
currentdir = (addr_t) BOOTPML4;
// The virtual memory structures are now available on the predefined
// locations. This means the virtual memory code is bootstrapped. Of
// course, we still have no physical page allocator, so that's the
// next step.
PML* const PHYSPML3 = (PML* const) 0x2B000UL;
PML* const PHYSPML2 = (PML* const) 0x2C000UL;
PML* const PHYSPML1 = (PML* const) 0x2D000UL;
PML* const PHYSPML0 = (PML* const) 0x2E000UL;
BOOTPML4->entry[509] = (addr_t) PHYSPML3 | flags;
PHYSPML3->entry[0] = (addr_t) PHYSPML2 | flags;
PHYSPML2->entry[0] = (addr_t) PHYSPML1 | flags;
PHYSPML1->entry[0] = (addr_t) PHYSPML0 | flags;
Page::stackused = 0;
Page::stacklength = 4096UL / sizeof(addr_t);
// The physical memory allocator should now be ready for use. Next
// up, the calling function will fill up the physical allocator with
// plenty of nice physical pages. (see Page::InitPushRegion)
}
// Please note that even if this function exists, you should still clean
// up the address space of a process _before_ calling
// DestroyAddressSpace. This is just a hack because it currently is
// impossible to clean up PLM1's using the MM api!
// ---
// TODO: This function is duplicated in {x86,x64}/memorymanagement.cpp!
// ---
void RecursiveFreeUserspacePages(size_t level, size_t offset)
{
PML* pml = PMLS[level] + offset;
for ( size_t i = 0; i < ENTRIES; i++ )
{
addr_t entry = pml->entry[i];
if ( !(entry & PML_PRESENT) ) { continue; }
if ( !(entry & PML_USERSPACE) ) { continue; }
if ( !(entry & PML_FORK) ) { continue; }
if ( level > 1 ) { RecursiveFreeUserspacePages(level-1, offset * ENTRIES + i); }
addr_t addr = pml->entry[i] & PML_ADDRESS;
// No need to unmap the page, we just need to mark it as unused.
Page::PutUnlocked(addr);
}
}
void DestroyAddressSpace(addr_t fallback, void (*func)(addr_t, void*), void* user)
{
// Look up the last few entries used for the fractal mapping. These
// cannot be unmapped as that would destroy the world. Instead, we
// will remember them, switch to another adress space, and safely
// mark them as unused. Also handling the forking related pages.
addr_t fractal3 = (PMLS[4] + 0)->entry[510UL];
addr_t fork2 = (PMLS[3] + 510UL)->entry[0];
addr_t fractal2 = (PMLS[3] + 510UL)->entry[510];
addr_t fork1 = (PMLS[2] + 510UL * 512UL + 510UL)->entry[0];
addr_t fractal1 = (PMLS[2] + 510UL * 512UL + 510UL)->entry[510];
addr_t dir = currentdir;
// We want to free the pages, but we are still using them ourselves,
// so lock the page allocation structure until we are done.
Page::Lock();
// In case any pages wasn't cleaned at this point.
// TODO: Page::Put calls may internally Page::Get and then reusing pages we are not done with just yet
RecursiveFreeUserspacePages(TOPPMLLEVEL, 0);
// Switch to the address space from when the world was originally
// created. It should contain the kernel, the whole kernel, and
// nothing but the kernel.
PML* const BOOTPML4 = (PML* const) 0x21000UL;
if ( !fallback )
fallback = (addr_t) BOOTPML4;
if ( func )
func(fallback, user);
else
SwitchAddressSpace(fallback);
// Ok, now we got marked everything left behind as unused, we can
// now safely let another thread use the pages.
Page::Unlock();
// These are safe to free since we switched address space.
Page::Put(fractal3 & PML_ADDRESS);
Page::Put(fractal2 & PML_ADDRESS);
Page::Put(fractal1 & PML_ADDRESS);
Page::Put(fork2 & PML_ADDRESS);
Page::Put(fork1 & PML_ADDRESS);
Page::Put(dir & PML_ADDRESS);
}
const size_t KERNEL_STACK_SIZE = 256UL * 1024UL;
const addr_t KERNEL_STACK_END = 0xFFFF800000001000UL;
const addr_t KERNEL_STACK_START = KERNEL_STACK_END + KERNEL_STACK_SIZE;
const addr_t VIRTUAL_AREA_LOWER = KERNEL_STACK_START;
const addr_t VIRTUAL_AREA_UPPER = 0xFFFFFE8000000000UL;
void GetKernelVirtualArea(addr_t* from, size_t* size)
{
*from = KERNEL_STACK_END;
*size = VIRTUAL_AREA_UPPER - VIRTUAL_AREA_LOWER;
}
void GetUserVirtualArea(uintptr_t* from, size_t* size)
{
*from = 0x400000; // 4 MiB.
*size = 0x800000000000 - *from; // 128 TiB - 4 MiB.
}
addr_t GetKernelStack()
{
return KERNEL_STACK_START;
}
size_t GetKernelStackSize()
{
return KERNEL_STACK_SIZE;
}
addr_t entry = pml->entry[i];
if ( !(entry & PML_PRESENT) )
continue;
if ( !(entry & PML_USERSPACE) )
continue;
if ( !(entry & PML_FORK) )
continue;
if ( 1 < level )
RecursiveFreeUserspacePages(level-1, offset * ENTRIES + i);
addr_t addr = pml->entry[i] & PML_ADDRESS;
// No need to unmap the page, we just need to mark it as unused.
Page::PutUnlocked(addr);
}
}
void DestroyAddressSpace(addr_t fallback, void (*func)(addr_t, void*), void* user)
{
// Look up the last few entries used for the fractal mapping. These
// cannot be unmapped as that would destroy the world. Instead, we
// will remember them, switch to another adress space, and safely
// mark them as unused. Also handling the forking related pages.
addr_t fractal3 = (PMLS[4] + 0)->entry[510UL];
addr_t fork2 = (PMLS[3] + 510UL)->entry[0];
addr_t fractal2 = (PMLS[3] + 510UL)->entry[510];
addr_t fork1 = (PMLS[2] + 510UL * 512UL + 510UL)->entry[0];
addr_t fractal1 = (PMLS[2] + 510UL * 512UL + 510UL)->entry[510];
addr_t dir = currentdir;
// We want to free the pages, but we are still using them ourselves,
// so lock the page allocation structure until we are done.
Page::Lock();
// In case any pages wasn't cleaned at this point.
// TODO: Page::Put calls may internally Page::Get and then reusing pages we are not done with just yet
RecursiveFreeUserspacePages(TOPPMLLEVEL, 0);
// Switch to the address space from when the world was originally
// created. It should contain the kernel, the whole kernel, and
// nothing but the kernel.
PML* const BOOTPML4 = (PML* const) 0x21000UL;
if ( !fallback )
fallback = (addr_t) BOOTPML4;
if ( func )
func(fallback, user);
else
SwitchAddressSpace(fallback);
// Ok, now we got marked everything left behind as unused, we can
// now safely let another thread use the pages.
Page::Unlock();
// These are safe to free since we switched address space.
Page::Put(fractal3 & PML_ADDRESS);
Page::Put(fractal2 & PML_ADDRESS);
Page::Put(fractal1 & PML_ADDRESS);
Page::Put(fork2 & PML_ADDRESS);
Page::Put(fork1 & PML_ADDRESS);
Page::Put(dir & PML_ADDRESS);
}
const size_t KERNEL_STACK_SIZE = 256UL * 1024UL;
const addr_t KERNEL_STACK_END = 0xFFFF800000001000UL;
const addr_t KERNEL_STACK_START = KERNEL_STACK_END + KERNEL_STACK_SIZE;
const addr_t VIRTUAL_AREA_LOWER = KERNEL_STACK_START;
const addr_t VIRTUAL_AREA_UPPER = 0xFFFFFE8000000000UL;
void GetKernelVirtualArea(addr_t* from, size_t* size)
{
*from = KERNEL_STACK_END;
*size = VIRTUAL_AREA_UPPER - VIRTUAL_AREA_LOWER;
}
void GetUserVirtualArea(uintptr_t* from, size_t* size)
{
*from = 0x400000; // 4 MiB.
*size = 0x800000000000 - *from; // 128 TiB - 4 MiB.
}
addr_t GetKernelStack()
{
return KERNEL_STACK_START;
}
size_t GetKernelStackSize()
{
return KERNEL_STACK_SIZE;
}
} // namespace Memory
} // namespace Sortix

View File

@ -1,6 +1,6 @@
/*******************************************************************************
Copyright(C) Jonas 'Sortie' Termansen 2011.
Copyright(C) Jonas 'Sortie' Termansen 2011, 2014.
This file is part of Sortix.
@ -25,32 +25,35 @@
#ifndef SORTIX_X64_MEMORYMANAGEMENT_H
#define SORTIX_X64_MEMORYMANAGEMENT_H
namespace Sortix
namespace Sortix {
namespace Memory {
const size_t TOPPMLLEVEL = 4;
const size_t ENTRIES = 4096UL / sizeof(addr_t);
const size_t TRANSBITS = 9;
PML* const PMLS[TOPPMLLEVEL + 1] =
{
namespace Memory
{
const size_t TOPPMLLEVEL = 4;
const size_t ENTRIES = 4096UL / sizeof(addr_t);
const size_t TRANSBITS = 9;
(PML* const) 0x0,
(PML* const) 0xFFFFFF8000000000UL,
(PML* const) 0xFFFFFF7FC0000000UL,
(PML* const) 0XFFFFFF7FBFE00000UL,
(PML* const) 0xFFFFFF7FBFDFF000UL,
};
PML* const PMLS[TOPPMLLEVEL + 1] =
{
(PML* const) 0x0,
(PML* const) 0xFFFFFF8000000000UL,
(PML* const) 0xFFFFFF7FC0000000UL,
(PML* const) 0XFFFFFF7FBFE00000UL,
(PML* const) 0xFFFFFF7FBFDFF000UL,
};
PML* const FORKPML = (PML* const) 0xFFFFFF0000000000UL;
PML* const FORKPML = (PML* const) 0xFFFFFF0000000000UL;
}
} // namespace Memory
} // namespace Sortix
namespace Page
{
addr_t* const STACK = (addr_t* const) 0xFFFFFE8000000000UL;
const size_t MAXSTACKSIZE = (512UL*1024UL*1024UL*1024UL);
const size_t MAXSTACKLENGTH = MAXSTACKSIZE / sizeof(addr_t);
}
}
namespace Sortix {
namespace Page {
addr_t* const STACK = (addr_t* const) 0xFFFFFE8000000000UL;
const size_t MAXSTACKSIZE = (512UL*1024UL*1024UL*1024UL);
const size_t MAXSTACKLENGTH = MAXSTACKSIZE / sizeof(addr_t);
} // namespace Page
} // namespace Sortix
#endif

File diff suppressed because it is too large Load Diff

View File

@ -1,6 +1,6 @@
/*******************************************************************************
Copyright(C) Jonas 'Sortie' Termansen 2011, 2012.
Copyright(C) Jonas 'Sortie' Termansen 2011, 2012, 2014.
This file is part of Sortix.
@ -25,73 +25,81 @@
#ifndef SORTIX_X86_FAMILY_MEMORYMANAGEMENT_H
#define SORTIX_X86_FAMILY_MEMORYMANAGEMENT_H
namespace Sortix
{
struct PML
{
addr_t entry[4096 / sizeof(addr_t)];
};
namespace Sortix {
namespace Memory
{
const addr_t PML_PRESENT = (1<<0);
const addr_t PML_WRITABLE = (1<<1);
const addr_t PML_USERSPACE = (1<<2);
const addr_t PML_WRTHROUGH = (1<<3);
const addr_t PML_NOCACHE = (1<<4);
const addr_t PML_PAT = (1<<7);
const addr_t PML_AVAILABLE1 = (1<<9);
const addr_t PML_AVAILABLE2 = (1<<10);
const addr_t PML_AVAILABLE3 = (1<<11);
const addr_t PML_FORK = PML_AVAILABLE1;
const addr_t PML_FLAGS = (0xFFFUL); // Bits used for the flags.
const addr_t PML_ADDRESS = (~0xFFFUL); // Bits used for the address.
const addr_t PAT_UC = 0x00; // Uncacheable
const addr_t PAT_WC = 0x01; // Write-Combine
const addr_t PAT_WT = 0x04; // Writethrough
const addr_t PAT_WP = 0x05; // Write-Protect
const addr_t PAT_WB = 0x06; // Writeback
const addr_t PAT_UCM = 0x07; // Uncacheable, overruled by MTRR.
const addr_t PAT_NUM = 0x08;
// Desired PAT-Register PA-Field Indexing (different from BIOS defaults)
const addr_t PA[PAT_NUM] =
{
PAT_WB,
PAT_WT,
PAT_UCM,
PAT_UC,
PAT_WC,
PAT_WP,
0,
0,
};
// Inverse function of the above.
const addr_t PAINV[PAT_NUM] =
{
3, // UC
4, // WC
7, // No such
8, // No such
1, // WT
5, // WP,
0, // WB
2, // UCM
};
static inline addr_t EncodePATAsPMLFlag(addr_t pat)
{
pat = PAINV[pat];
addr_t result = 0;
if ( pat & 0x1 ) { result |= PML_WRTHROUGH; }
if ( pat & 0x2 ) { result |= PML_NOCACHE; }
if ( pat & 0x4 ) { result |= PML_PAT; }
return result;
}
bool MapPAT(addr_t physical, addr_t mapto, int prot, addr_t mtype);
addr_t ProtectionToPMLFlags(int prot);
int PMLFlagsToProtection(addr_t flags);
}
struct PML
{
addr_t entry[4096 / sizeof(addr_t)];
};
} // namespace Sortix
namespace Sortix {
namespace Memory {
const addr_t PML_PRESENT = 1 << 0;
const addr_t PML_WRITABLE = 1 << 1;
const addr_t PML_USERSPACE = 1 << 2;
const addr_t PML_WRTHROUGH = 1 << 3;
const addr_t PML_NOCACHE = 1 << 4;
const addr_t PML_PAT = 1 << 7;
const addr_t PML_AVAILABLE1 = 1 << 9;
const addr_t PML_AVAILABLE2 = 1 << 10;
const addr_t PML_AVAILABLE3 = 1 << 11;
const addr_t PML_FORK = PML_AVAILABLE1;
const addr_t PML_FLAGS = 0xFFFUL; // Bits used for the flags.
const addr_t PML_ADDRESS = ~0xFFFUL; // Bits used for the address.
const addr_t PAT_UC = 0x00; // Uncacheable
const addr_t PAT_WC = 0x01; // Write-Combine
const addr_t PAT_WT = 0x04; // Writethrough
const addr_t PAT_WP = 0x05; // Write-Protect
const addr_t PAT_WB = 0x06; // Writeback
const addr_t PAT_UCM = 0x07; // Uncacheable, overruled by MTRR.
const addr_t PAT_NUM = 0x08;
// Desired PAT-Register PA-Field Indexing (different from BIOS defaults)
const addr_t PA[PAT_NUM] =
{
PAT_WB,
PAT_WT,
PAT_UCM,
PAT_UC,
PAT_WC,
PAT_WP,
0,
0,
};
// Inverse function of the above.
const addr_t PAINV[PAT_NUM] =
{
3, // UC
4, // WC
7, // No such
8, // No such
1, // WT
5, // WP,
0, // WB
2, // UCM
};
static inline addr_t EncodePATAsPMLFlag(addr_t pat)
{
pat = PAINV[pat];
addr_t result = 0;
if ( pat & 0x1 ) { result |= PML_WRTHROUGH; }
if ( pat & 0x2 ) { result |= PML_NOCACHE; }
if ( pat & 0x4 ) { result |= PML_PAT; }
return result;
}
bool MapPAT(addr_t physical, addr_t mapto, int prot, addr_t mtype);
addr_t ProtectionToPMLFlags(int prot);
int PMLFlagsToProtection(addr_t flags);
} // namespace Memory
} // namespace Sortix
#if defined(__i386__)
#include "../x86/memorymanagement.h"
#elif defined(__x86_64__)

View File

@ -1,6 +1,6 @@
/*******************************************************************************
Copyright(C) Jonas 'Sortie' Termansen 2011, 2012.
Copyright(C) Jonas 'Sortie' Termansen 2011, 2012, 2014.
This file is part of Sortix.
@ -32,167 +32,172 @@
#include "multiboot.h"
namespace Sortix
namespace Sortix {
namespace Page {
extern size_t stackused;
extern size_t stacklength;
void ExtendStack();
} // namespace Page
} // namespace Sortix
namespace Sortix {
namespace Memory {
extern addr_t currentdir;
void InitCPU()
{
namespace Page
PML* const BOOTPML2 = (PML* const) 0x11000UL;
PML* const BOOTPML1 = (PML* const) 0x12000UL;
//PML* const FORKPML1 = (PML* const) 0x13000UL;
PML* const IDENPML1 = (PML* const) 0x14000UL;
// Initialize the memory structures with zeroes.
memset((PML* const) 0x11000UL, 0, 0x6000UL);
// Identity map the first 4 MiB.
addr_t flags = PML_PRESENT | PML_WRITABLE;
BOOTPML2->entry[0] = ((addr_t) IDENPML1) | flags;
for ( size_t i = 0; i < ENTRIES; i++ )
IDENPML1->entry[i] = (i * 4096UL) | flags;
// Next order of business is to map the virtual memory structures
// to the pre-defined locations in the virtual address space.
// Fractal map the PML1s.
BOOTPML2->entry[1023] = (addr_t) BOOTPML2 | flags;
// Fractal map the PML2s.
BOOTPML2->entry[1022] = (addr_t) BOOTPML1 | flags | PML_FORK;
BOOTPML1->entry[1023] = (addr_t) BOOTPML2 | flags;
// Add some predefined room for forking address spaces.
BOOTPML1->entry[0] = 0; // (addr_t) FORKPML1 | flags | PML_FORK;
// The virtual memory structures are now available on the predefined
// locations. This means the virtual memory code is bootstrapped. Of
// course, we still have no physical page allocator, so that's the
// next step.
PML* const PHYSPML1 = (PML* const) 0x15000UL;
PML* const PHYSPML0 = (PML* const) 0x16000UL;
BOOTPML2->entry[1021] = (addr_t) PHYSPML1 | flags;
PHYSPML1->entry[0] = (addr_t) PHYSPML0 | flags;
// Alright, enable virtual memory!
SwitchAddressSpace((addr_t) BOOTPML2);
size_t cr0;
asm volatile("mov %%cr0, %0": "=r"(cr0));
cr0 |= 0x80000000UL; /* Enable paging! */
asm volatile("mov %0, %%cr0":: "r"(cr0));
Page::stackused = 0;
Page::stacklength = 4096UL / sizeof(addr_t);
// The physical memory allocator should now be ready for use. Next
// up, the calling function will fill up the physical allocator with
// plenty of nice physical pages. (see Page::InitPushRegion)
}
// Please note that even if this function exists, you should still clean
// up the address space of a process _before_ calling
// DestroyAddressSpace. This is just a hack because it currently is
// impossible to clean up PLM1's using the MM api!
// ---
// TODO: This function is duplicated in {x86,x64}/memorymanagement.cpp!
// ---
void RecursiveFreeUserspacePages(size_t level, size_t offset)
{
PML* pml = PMLS[level] + offset;
for ( size_t i = 0; i < ENTRIES; i++ )
{
extern size_t stackused;
extern size_t stacklength;
void ExtendStack();
}
namespace Memory
{
extern addr_t currentdir;
void InitCPU()
{
PML* const BOOTPML2 = (PML* const) 0x11000UL;
PML* const BOOTPML1 = (PML* const) 0x12000UL;
//PML* const FORKPML1 = (PML* const) 0x13000UL;
PML* const IDENPML1 = (PML* const) 0x14000UL;
// Initialize the memory structures with zeroes.
memset((PML* const) 0x11000UL, 0, 0x6000UL);
// Identity map the first 4 MiB.
addr_t flags = PML_PRESENT | PML_WRITABLE;
BOOTPML2->entry[0] = ((addr_t) IDENPML1) | flags;
for ( size_t i = 0; i < ENTRIES; i++ )
{
IDENPML1->entry[i] = (i * 4096UL) | flags;
}
// Next order of business is to map the virtual memory structures
// to the pre-defined locations in the virtual address space.
// Fractal map the PML1s.
BOOTPML2->entry[1023] = (addr_t) BOOTPML2 | flags;
// Fractal map the PML2s.
BOOTPML2->entry[1022] = (addr_t) BOOTPML1 | flags | PML_FORK;
BOOTPML1->entry[1023] = (addr_t) BOOTPML2 | flags;
// Add some predefined room for forking address spaces.
BOOTPML1->entry[0] = 0; // (addr_t) FORKPML1 | flags | PML_FORK;
// The virtual memory structures are now available on the predefined
// locations. This means the virtual memory code is bootstrapped. Of
// course, we still have no physical page allocator, so that's the
// next step.
PML* const PHYSPML1 = (PML* const) 0x15000UL;
PML* const PHYSPML0 = (PML* const) 0x16000UL;
BOOTPML2->entry[1021] = (addr_t) PHYSPML1 | flags;
PHYSPML1->entry[0] = (addr_t) PHYSPML0 | flags;
// Alright, enable virtual memory!
SwitchAddressSpace((addr_t) BOOTPML2);
size_t cr0;
asm volatile("mov %%cr0, %0": "=r"(cr0));
cr0 |= 0x80000000UL; /* Enable paging! */
asm volatile("mov %0, %%cr0":: "r"(cr0));
Page::stackused = 0;
Page::stacklength = 4096UL / sizeof(addr_t);
// The physical memory allocator should now be ready for use. Next
// up, the calling function will fill up the physical allocator with
// plenty of nice physical pages. (see Page::InitPushRegion)
}
// Please note that even if this function exists, you should still clean
// up the address space of a process _before_ calling
// DestroyAddressSpace. This is just a hack because it currently is
// impossible to clean up PLM1's using the MM api!
// ---
// TODO: This function is duplicated in {x86,x64}/memorymanagement.cpp!
// ---
void RecursiveFreeUserspacePages(size_t level, size_t offset)
{
PML* pml = PMLS[level] + offset;
for ( size_t i = 0; i < ENTRIES; i++ )
{
addr_t entry = pml->entry[i];
if ( !(entry & PML_PRESENT) ) { continue; }
if ( !(entry & PML_USERSPACE) ) { continue; }
if ( !(entry & PML_FORK) ) { continue; }
if ( level > 1 ) { RecursiveFreeUserspacePages(level-1, offset * ENTRIES + i); }
addr_t addr = pml->entry[i] & PML_ADDRESS;
// No need to unmap the page, we just need to mark it as unused.
Page::PutUnlocked(addr);
}
}
void DestroyAddressSpace(addr_t fallback, void (*func)(addr_t, void*), void* user)
{
// Look up the last few entries used for the fractal mapping. These
// cannot be unmapped as that would destroy the world. Instead, we
// will remember them, switch to another adress space, and safely
// mark them as unused. Also handling the forking related pages.
addr_t fractal1 = PMLS[2]->entry[1022];
addr_t dir = currentdir;
// We want to free the pages, but we are still using them ourselves,
// so lock the page allocation structure until we are done.
Page::Lock();
// In case any pages wasn't cleaned at this point.
// TODO: Page::Put calls may internally Page::Get and then reusing pages we are not done with just yet
RecursiveFreeUserspacePages(TOPPMLLEVEL, 0);
// Switch to the address space from when the world was originally
// created. It should contain the kernel, the whole kernel, and
// nothing but the kernel.
PML* const BOOTPML2 = (PML* const) 0x11000UL;
if ( !fallback )
fallback = (addr_t) BOOTPML2;
if ( func )
func(fallback, user);
else
SwitchAddressSpace(fallback);
// Ok, now we got marked everything left behind as unused, we can
// now safely let another thread use the pages.
Page::Unlock();
// These are safe to free since we switched address space.
Page::Put(fractal1 & PML_ADDRESS);
Page::Put(dir & PML_ADDRESS);
}
const size_t KERNEL_STACK_SIZE = 256UL * 1024UL;
const addr_t KERNEL_STACK_END = 0x80001000UL;
const addr_t KERNEL_STACK_START = KERNEL_STACK_END + KERNEL_STACK_SIZE;
const addr_t VIRTUAL_AREA_LOWER = KERNEL_STACK_START;
const addr_t VIRTUAL_AREA_UPPER = 0xFF400000UL;
void GetKernelVirtualArea(addr_t* from, size_t* size)
{
*from = KERNEL_STACK_END;
*size = VIRTUAL_AREA_UPPER - VIRTUAL_AREA_LOWER;
}
void GetUserVirtualArea(uintptr_t* from, size_t* size)
{
*from = 0x400000; // 4 MiB.
*size = 0x80000000 - *from; // 2 GiB - 4 MiB.
}
addr_t GetKernelStack()
{
return KERNEL_STACK_START;
}
size_t GetKernelStackSize()
{
return KERNEL_STACK_SIZE;
}
addr_t entry = pml->entry[i];
if ( !(entry & PML_PRESENT) )
continue;
if ( !(entry & PML_USERSPACE) )
continue;
if ( !(entry & PML_FORK) )
continue;
if ( 1 < level )
RecursiveFreeUserspacePages(level-1, offset * ENTRIES + i);
addr_t addr = pml->entry[i] & PML_ADDRESS;
// No need to unmap the page, we just need to mark it as unused.
Page::PutUnlocked(addr);
}
}
void DestroyAddressSpace(addr_t fallback, void (*func)(addr_t, void*), void* user)
{
// Look up the last few entries used for the fractal mapping. These
// cannot be unmapped as that would destroy the world. Instead, we
// will remember them, switch to another adress space, and safely
// mark them as unused. Also handling the forking related pages.
addr_t fractal1 = PMLS[2]->entry[1022];
addr_t dir = currentdir;
// We want to free the pages, but we are still using them ourselves,
// so lock the page allocation structure until we are done.
Page::Lock();
// In case any pages wasn't cleaned at this point.
// TODO: Page::Put calls may internally Page::Get and then reusing pages we are not done with just yet
RecursiveFreeUserspacePages(TOPPMLLEVEL, 0);
// Switch to the address space from when the world was originally
// created. It should contain the kernel, the whole kernel, and
// nothing but the kernel.
PML* const BOOTPML2 = (PML* const) 0x11000UL;
if ( !fallback )
fallback = (addr_t) BOOTPML2;
if ( func )
func(fallback, user);
else
SwitchAddressSpace(fallback);
// Ok, now we got marked everything left behind as unused, we can
// now safely let another thread use the pages.
Page::Unlock();
// These are safe to free since we switched address space.
Page::Put(fractal1 & PML_ADDRESS);
Page::Put(dir & PML_ADDRESS);
}
const size_t KERNEL_STACK_SIZE = 256UL * 1024UL;
const addr_t KERNEL_STACK_END = 0x80001000UL;
const addr_t KERNEL_STACK_START = KERNEL_STACK_END + KERNEL_STACK_SIZE;
const addr_t VIRTUAL_AREA_LOWER = KERNEL_STACK_START;
const addr_t VIRTUAL_AREA_UPPER = 0xFF400000UL;
void GetKernelVirtualArea(addr_t* from, size_t* size)
{
*from = KERNEL_STACK_END;
*size = VIRTUAL_AREA_UPPER - VIRTUAL_AREA_LOWER;
}
void GetUserVirtualArea(uintptr_t* from, size_t* size)
{
*from = 0x400000; // 4 MiB.
*size = 0x80000000 - *from; // 2 GiB - 4 MiB.
}
addr_t GetKernelStack()
{
return KERNEL_STACK_START;
}
size_t GetKernelStackSize()
{
return KERNEL_STACK_SIZE;
}
} // namespace Memory
} // namespace Sortix

View File

@ -1,6 +1,6 @@
/*******************************************************************************
Copyright(C) Jonas 'Sortie' Termansen 2011.
Copyright(C) Jonas 'Sortie' Termansen 2011, 2014.
This file is part of Sortix.
@ -25,30 +25,33 @@
#ifndef SORTIX_X64_MEMORYMANAGEMENT_H
#define SORTIX_X64_MEMORYMANAGEMENT_H
namespace Sortix
namespace Sortix {
namespace Memory {
const size_t TOPPMLLEVEL = 2;
const size_t ENTRIES = 4096UL / sizeof(addr_t);
const size_t TRANSBITS = 10;
PML* const PMLS[TOPPMLLEVEL + 1] =
{
namespace Memory
{
const size_t TOPPMLLEVEL = 2;
const size_t ENTRIES = 4096UL / sizeof(addr_t);
const size_t TRANSBITS = 10;
(PML* const) 0x0,
(PML* const) 0xFFC00000UL,
(PML* const) 0xFFBFF000UL,
};
PML* const PMLS[TOPPMLLEVEL + 1] =
{
(PML* const) 0x0,
(PML* const) 0xFFC00000UL,
(PML* const) 0xFFBFF000UL,
};
PML* const FORKPML = (PML* const) 0xFF800000UL;
PML* const FORKPML = (PML* const) 0xFF800000UL;
}
} // namespace Memory
} // namespace Sortix
namespace Page
{
addr_t* const STACK = (addr_t* const) 0xFF400000UL;
const size_t MAXSTACKSIZE = (4UL*1024UL*1024UL);
const size_t MAXSTACKLENGTH = MAXSTACKSIZE / sizeof(addr_t);
}
}
namespace Sortix {
namespace Page {
addr_t* const STACK = (addr_t* const) 0xFF400000UL;
const size_t MAXSTACKSIZE = (4UL*1024UL*1024UL);
const size_t MAXSTACKLENGTH = MAXSTACKSIZE / sizeof(addr_t);
} // namespace Page
} // namespace Sortix
#endif