implemented enough of the page allocator to fill in the gaps in the VM mapper

This commit is contained in:
Eric J. Bowersox 2013-04-27 23:33:29 -06:00
parent b23fd6b6e5
commit a7fb97cb3a
5 changed files with 769 additions and 21 deletions

View File

@ -58,9 +58,19 @@ typedef struct tagVMCTXT {
PTTB pTTB; /* pointer to the TTB */ PTTB pTTB; /* pointer to the TTB */
PTTBAUX pTTBAux; /* pointer to the TTB auxiliary data */ PTTBAUX pTTBAux; /* pointer to the TTB auxiliary data */
UINT32 uiMaxIndex; /* max index into the above tables */ UINT32 uiMaxIndex; /* max index into the above tables */
PHYSADDR paTTB; /* physical address of the TTB */
RBTREE rbtPageTables; /* tree containing page tables this context owns */ RBTREE rbtPageTables; /* tree containing page tables this context owns */
} VMCTXT, *PVMCTXT; } VMCTXT, *PVMCTXT;
/* Pointer to a function to update the page database with a PTE address. */
typedef void (*PFNSETPTEADDR)(UINT32, PHYSADDR, BOOL);
/* Invalid page return. */
#define INVALID_PAGE ((UINT32)(-1))
/* Page allocation flags. */
#define PGALLOC_ZERO 0x00000001 /* allocated page must be zeroed */
CDECL_BEGIN CDECL_BEGIN
/* Low-level maintenance functions */ /* Low-level maintenance functions */
@ -86,6 +96,10 @@ extern HRESULT MmMapKernelPages(PHYSADDR paBase, UINT32 cpg, UINT32 uiTableFlags
UINT32 uiPageFlags, UINT32 uiAuxFlags, PKERNADDR pvmaLocation); UINT32 uiPageFlags, UINT32 uiAuxFlags, PKERNADDR pvmaLocation);
extern HRESULT MmDemapKernelPages(KERNADDR vmaBase, UINT32 cpg); extern HRESULT MmDemapKernelPages(KERNADDR vmaBase, UINT32 cpg);
/* Page allocation functions */
extern HRESULT MmAllocatePage(UINT32 uiFlags, UINT32 tag, UINT32 subtag, PPHYSADDR ppaNewPage);
extern HRESULT MmFreePage(PHYSADDR paPage, UINT32 tag, UINT32 subtag);
/* Initialization functions only */ /* Initialization functions only */
extern void _MmInit(PSTARTUP_INFO pstartup); extern void _MmInit(PSTARTUP_INFO pstartup);

View File

@ -34,9 +34,9 @@
#ifdef __COMROGUE_INTERNALS__ #ifdef __COMROGUE_INTERNALS__
/*---------------------------------------------- /*---------------------------------------------------------------------------------------------
* BCM2835 ARM Memory Management Unit constants * BCM2835 ARM Memory Management Unit constants (and other COMROGUE-specific memory constants)
*---------------------------------------------- *---------------------------------------------------------------------------------------------
*/ */
/* Memory system constants */ /* Memory system constants */
@ -74,6 +74,9 @@
#define TTBSEC_SBASE 0xFF000000 /* supersection base address mask */ #define TTBSEC_SBASE 0xFF000000 /* supersection base address mask */
#define TTBSEC_SBASEHI 0x00F00000 /* supersection high base address mask */ #define TTBSEC_SBASEHI 0x00F00000 /* supersection high base address mask */
/* Flags that are safe to alter for a section. */
#define TTBSEC_SAFEFLAGS (TTBSEC_ALLFLAGS & ~(TTBSEC_ALWAYS | TTBSEC_SUPER))
/* AP bits for the standard access control model */ /* AP bits for the standard access control model */
#define TTBSEC_AP00 0x00000000 /* no access */ #define TTBSEC_AP00 0x00000000 /* no access */
#define TTBSEC_AP01 0x00000400 /* supervisor only access */ #define TTBSEC_AP01 0x00000400 /* supervisor only access */
@ -94,6 +97,9 @@
#define TTBPGTBL_ALLFLAGS 0x000003FF /* "all flags" mask */ #define TTBPGTBL_ALLFLAGS 0x000003FF /* "all flags" mask */
#define TTBPGTBL_BASE 0xFFFFFC00 /* page table base address mask */ #define TTBPGTBL_BASE 0xFFFFFC00 /* page table base address mask */
/* Flags that are safe to alter for a TTB page table entry. */
#define TTBPGTBL_SAFEFLAGS (TTBPGTBL_ALLFLAGS & ~0x03)
/* Bits to query the type of TTB entry we're looking at */ /* Bits to query the type of TTB entry we're looking at */
#define TTBQUERY_MASK 0x00000003 /* bits we can query */ #define TTBQUERY_MASK 0x00000003 /* bits we can query */
#define TTBQUERY_FAULT 0x00000000 /* indicates a fault */ #define TTBQUERY_FAULT 0x00000000 /* indicates a fault */
@ -104,6 +110,11 @@
/* TTB auxiliary descriptor bits */ /* TTB auxiliary descriptor bits */
#define TTBAUX_SACRED 0x00000001 /* sacred entry, do not deallocate */ #define TTBAUX_SACRED 0x00000001 /* sacred entry, do not deallocate */
#define TTBAUX_UNWRITEABLE 0x00000002 /* entry unwriteable */ #define TTBAUX_UNWRITEABLE 0x00000002 /* entry unwriteable */
#define TTBAUX_NOTPAGE 0x00000004 /* entry not mapped in page database */
#define TTBAUX_ALLFLAGS 0x00000007 /* "all flags" mask */
/* Flags that are safe to alter for the TTB auxiliary table. */
#define TTBAUX_SAFEFLAGS (TTBAUX_ALLFLAGS & ~TTBAUX_NOTPAGE)
/* Small page table entry bits */ /* Small page table entry bits */
#define PGTBLSM_XN 0x00000001 /* Execute-Never */ #define PGTBLSM_XN 0x00000001 /* Execute-Never */
@ -115,8 +126,12 @@
#define PGTBLSM_APX 0x00000200 /* access permission extended */ #define PGTBLSM_APX 0x00000200 /* access permission extended */
#define PGTBLSM_S 0x00000400 /* Shared */ #define PGTBLSM_S 0x00000400 /* Shared */
#define PGTBLSM_NG 0x00000800 /* Not Global */ #define PGTBLSM_NG 0x00000800 /* Not Global */
#define PGTBLSM_ALLFLAGS 0x00000FFF /* "all flags" mask */
#define PGTBLSM_PAGE 0xFFFFF000 /* page base address mask */ #define PGTBLSM_PAGE 0xFFFFF000 /* page base address mask */
/* Flags that are safe to alter for a page table entry. */
#define PGTBLSM_SAFEFLAGS (PGTBLSM_ALLFLAGS & ~PGTBLSM_ALWAYS)
/* AP bits for the standard access control model */ /* AP bits for the standard access control model */
#define PGTBLSM_AP00 0x00000000 /* no access */ #define PGTBLSM_AP00 0x00000000 /* no access */
#define PGTBLSM_AP01 0x00000010 /* supervisor only access */ #define PGTBLSM_AP01 0x00000010 /* supervisor only access */
@ -133,6 +148,11 @@
/* Page auxiliary descriptor bits */ /* Page auxiliary descriptor bits */
#define PGAUX_SACRED 0x00000001 /* sacred entry, do not deallocate */ #define PGAUX_SACRED 0x00000001 /* sacred entry, do not deallocate */
#define PGAUX_UNWRITEABLE 0x00000002 /* entry unwriteable */ #define PGAUX_UNWRITEABLE 0x00000002 /* entry unwriteable */
#define PGAUX_NOTPAGE 0x00000004 /* entry not mapped in page database */
#define PGAUX_ALLFLAGS 0x00000007 /* "all flags" mask */
/* Flags that are safe to alter for the page auxiliary table. */
#define PGAUX_SAFEFLAGS (PGAUX_ALLFLAGS & ~PGAUX_NOTPAGE)
/* Combinations of flags we use regularly. */ /* Combinations of flags we use regularly. */
#define TTBFLAGS_LIB_CODE TTBPGTBL_ALWAYS #define TTBFLAGS_LIB_CODE TTBPGTBL_ALWAYS
@ -152,7 +172,7 @@
#define PGAUXFLAGS_INIT_DATA 0 #define PGAUXFLAGS_INIT_DATA 0
#define TTBFLAGS_MMIO TTBPGTBL_ALWAYS #define TTBFLAGS_MMIO TTBPGTBL_ALWAYS
#define PGTBLFLAGS_MMIO (PGTBLSM_ALWAYS | PGTBLSM_AP01) #define PGTBLFLAGS_MMIO (PGTBLSM_ALWAYS | PGTBLSM_AP01)
#define PGAUXFLAGS_MMIO PGAUX_SACRED #define PGAUXFLAGS_MMIO (PGAUX_SACRED | PGAUX_NOTPAGE)
#define TTBAUXFLAGS_PAGETABLE 0 #define TTBAUXFLAGS_PAGETABLE 0
#ifndef __ASM__ #ifndef __ASM__
@ -210,7 +230,8 @@ typedef union tagTTB {
typedef struct tagTTBAUXENTRY { typedef struct tagTTBAUXENTRY {
unsigned sacred : 1; /* sacred TTB - should never be deallocated */ unsigned sacred : 1; /* sacred TTB - should never be deallocated */
unsigned unwriteable : 1; /* entry is not writeable */ unsigned unwriteable : 1; /* entry is not writeable */
unsigned reserved : 30; /* reserved for future allocation */ unsigned notpage : 1; /* entry not mapped in the page database */
unsigned reserved : 29; /* reserved for future allocation */
} TTBAUXENTRY, *PTTBAUXENTRY; } TTBAUXENTRY, *PTTBAUXENTRY;
/* TTB auxiliary table entry */ /* TTB auxiliary table entry */
@ -250,7 +271,8 @@ typedef union tagPGTBL {
typedef struct tagPGAUXENTRY { typedef struct tagPGAUXENTRY {
unsigned sacred : 1; /* sacred page - should never be deallocated */ unsigned sacred : 1; /* sacred page - should never be deallocated */
unsigned unwriteable : 1; /* entry is not writeable */ unsigned unwriteable : 1; /* entry is not writeable */
unsigned reserved : 30; /* reserved for future allocation */ unsigned notpage : 1; /* entry not mapped in the page database */
unsigned reserved : 29; /* reserved for future allocation */
} PGAUXENTRY, *PPGAUXENTRY; } PGAUXENTRY, *PPGAUXENTRY;
/* page table auxiliary entry */ /* page table auxiliary entry */
@ -272,17 +294,37 @@ typedef struct tagPAGETAB {
((((ttb) & ((1 << SYS_TTB_BITS) - 1)) << (SYS_PAGE_BITS + SYS_PGTBL_BITS)) | \ ((((ttb) & ((1 << SYS_TTB_BITS) - 1)) << (SYS_PAGE_BITS + SYS_PGTBL_BITS)) | \
(((pgtbl) & ((1 << SYS_PGTBL_BITS) - 1)) << SYS_PAGE_BITS) | ((ofs) & (SYS_PAGE_SIZE - 1))) (((pgtbl) & ((1 << SYS_PGTBL_BITS) - 1)) << SYS_PAGE_BITS) | ((ofs) & (SYS_PAGE_SIZE - 1)))
/* /*-----------------------------------------------
* Data structures for the Master Page Database. * Data structures for the Master Page Database.
*-----------------------------------------------
*/ */
/* internal structure of a MPDB entry */ /* internal structure of a MPDB entry */
typedef struct tagMPDB1 { typedef struct tagMPDB1 {
PHYSADDR paPTE; /* PA of page table entry for the page */ PHYSADDR paPTE; /* PA of page table entry for the page */
unsigned next : 20; /* index of "next" entry in list */ unsigned next : 20; /* index of "next" entry in list */
unsigned tag : 12; /* page tag */ unsigned sectionmap : 1; /* set if page is part of a section mapping */
unsigned tag : 3; /* page tag */
unsigned subtag : 8; /* page subtag */
} MPDB1; } MPDB1;
/* MPDB tags */
#define MPDBTAG_UNKNOWN 0 /* unknown, should never be used */
#define MPDBTAG_NORMAL 1 /* normal user/free page */
#define MPDBTAG_SYSTEM 2 /* system allocation */
/* MPDB system subtags */
#define MPDBSYS_ZEROPAGE 0 /* zero page allocation */
#define MPDBSYS_LIBCODE 1 /* library code */
#define MPDBSYS_KCODE 2 /* kernel code */
#define MPDBSYS_KDATA 3 /* kernel data */
#define MPDBSYS_INIT 4 /* init code & data (to be freed later) */
#define MPDBSYS_TTB 5 /* the system TTB */
#define MPDBSYS_TTBAUX 6 /* the system auxiliary TTB table */
#define MPDBSYS_MPDB 7 /* the MPDB itself */
#define MPDBSYS_PGTBL 8 /* page tables */
#define MPDBSYS_GPU 9 /* GPU reserved pages */
/* The MPDB entry itself. */ /* The MPDB entry itself. */
typedef union tagMPDB { typedef union tagMPDB {
UINT64 raw; /* raw data */ UINT64 raw; /* raw data */

View File

@ -105,5 +105,6 @@
#define MEMMGR_E_NOSACRED SCODE_CAST(0x86010005) /* tried to demap a "sacred" entry */ #define MEMMGR_E_NOSACRED SCODE_CAST(0x86010005) /* tried to demap a "sacred" entry */
#define MEMMGR_E_NOKERNSPC SCODE_CAST(0x86010006) /* no kernel space */ #define MEMMGR_E_NOKERNSPC SCODE_CAST(0x86010006) /* no kernel space */
#define MEMMGR_E_RECURSED SCODE_CAST(0x86010007) /* tried to recurse into page allocation */ #define MEMMGR_E_RECURSED SCODE_CAST(0x86010007) /* tried to recurse into page allocation */
#define MEMMGR_E_BADTAGS SCODE_CAST(0x86010008) /* invalid tags for freed page */
#endif /* __SCODE_H_INCLUDED */ #endif /* __SCODE_H_INCLUDED */

View File

@ -30,9 +30,18 @@
* "Raspberry Pi" is a trademark of the Raspberry Pi Foundation. * "Raspberry Pi" is a trademark of the Raspberry Pi Foundation.
*/ */
#include <comrogue/types.h> #include <comrogue/types.h>
#include <comrogue/scode.h>
#include <comrogue/str.h>
#include <comrogue/internals/seg.h> #include <comrogue/internals/seg.h>
#include <comrogue/internals/mmu.h> #include <comrogue/internals/mmu.h>
#include <comrogue/internals/memmgr.h>
#include <comrogue/internals/startup.h> #include <comrogue/internals/startup.h>
#include <comrogue/internals/trace.h>
#ifdef THIS_FILE
#undef THIS_FILE
DECLARE_THIS_FILE
#endif
/* Lists we keep track of various pages on. */ /* Lists we keep track of various pages on. */
typedef struct tagPAGELIST { typedef struct tagPAGELIST {
@ -42,17 +51,348 @@ typedef struct tagPAGELIST {
/* The Master Page Database */ /* The Master Page Database */
static PMPDB g_pMasterPageDB = NULL; static PMPDB g_pMasterPageDB = NULL;
static UINT32 g_cpgMaster = 0;
/* Individual page lists. */ /* Individual page lists. */
//static PAGELIST g_pglFree = { 0, 0 }; /* pages that are free */ static PAGELIST g_pglFree = { 0, 0 }; /* pages that are free */
//static PAGELIST g_pglZeroed = { 0, 0 }; /* pages that are free and zeroed */ static PAGELIST g_pglZeroed = { 0, 0 }; /* pages that are free and zeroed */
//static PAGELIST g_pglStandby = { 0, 0 }; /* pages removed but "in transition" */ //static PAGELIST g_pglStandby = { 0, 0 }; /* pages removed but "in transition" */
//static PAGELIST g_pglModified = { 0, 0 }; /* pages removed but "in transition" and modified */ //static PAGELIST g_pglModified = { 0, 0 }; /* pages removed but "in transition" and modified */
//static PAGELIST g_pglBad = { 0, 0 }; /* bad pages */ //static PAGELIST g_pglBad = { 0, 0 }; /* bad pages */
SEG_INIT_DATA static PAGELIST g_pglInit = { 0, 0 }; /* pages to be freed after initialization */
static KERNADDR g_kaZero = 0; /* kernel address where we map a page to zero it */
/*
* Zeroes a page of memory by index.
*
* Parameters:
* - ndxPage = Index of the page to be zeroed.
*
* Returns:
* Nothing.
*
* Side effects:
* Specified page is zeroed. TTB temporarily modified to map and unmap the page in memory.
*/
static void zero_page(UINT32 ndxPage)
{
HRESULT hr = MmMapPages(NULL, mmPageIndex2PA(ndxPage), g_kaZero, 1, TTBPGTBL_ALWAYS,
PGTBLSM_ALWAYS|PGTBLSM_AP01|PGTBLSM_XN, PGAUX_NOTPAGE);
ASSERT(SUCCEEDED(hr));
if (SUCCEEDED(hr))
{
StrSetMem(g_kaZero, 0, SYS_PAGE_SIZE);
VERIFY(SUCCEEDED(MmDemapPages(NULL, g_kaZero, 1)));
}
}
/*
* Sets the page table entry physical address pointer and the section-mapped flag for a given page.
*
* Parameters:
* - ndxPage = Index of the page to set the PTE and section flag for.
* - paPTE = Physical address of the page table entry that points to this page.
* - bIsSection = If TRUE, paPTE is actually the physical address of the TTB section entry that points
* to this page.
*
* Returns:
* Nothing.
*
* Side effects:
* Updates the MPDB entry indicated by ndxPage.
*/
static void set_pte_address(UINT32 ndxPage, PHYSADDR paPTE, BOOL bIsSection)
{
g_pMasterPageDB[ndxPage].d.paPTE = paPTE;
g_pMasterPageDB[ndxPage].d.sectionmap = (bIsSection ? 1 : 0);
}
/*
* Finds the given page's predecessor in a circular list.
*
* Parameters:
* - ndxPage = Index of the page to find the predecessor of. Assumes that the page is part of a circular list.
*
* Returns:
* Index of the page's predecessor in the circular list.
*/
static inline UINT32 find_predecessor(UINT32 ndxPage)
{
register UINT32 i = ndxPage; /* search page index */
while (g_pMasterPageDB[i].d.next != ndxPage)
i = g_pMasterPageDB[i].d.next;
return i;
}
/*
* Unchains the given page from the circular list it's in.
*
* Parameters:
* - ndxPage = Index of the page to be unchained from the list.
* - ndxStartForScan = Index of the page to start scanning for the ndxPage page at. Assumes that this page is
* part of a circular list.
*
* Returns:
* TRUE if the page was successfully unchained, FALSE if not.
*
* Side effects:
* Entries in the MPDB may have their "next" pointer modified.
*/
static BOOL unchain_page(UINT32 ndxPage, UINT32 ndxStartForScan)
{
register UINT32 i = ndxStartForScan; /* search page index */
do
{
if (g_pMasterPageDB[i].d.next == ndxPage)
{
g_pMasterPageDB[i].d.next = g_pMasterPageDB[ndxPage].d.next;
return TRUE;
}
i = g_pMasterPageDB[i].d.next;
} while (i != ndxStartForScan);
return FALSE;
}
/*
* Removes a page from a list.
*
* Parameters:
* - ppgl = Pointer to page list to remove the page from.
* - ndxPage = Index of the page to be removed from the list.
*
* Returns:
* Nothing.
*
* Side effects:
* Modifies fields of the page list, and possibly links in the MPDB.
*/
static void remove_from_list(PPAGELIST ppgl, UINT32 ndxPage)
{
if (ppgl->ndxLast == ndxPage)
ppgl->ndxLast = find_predecessor(ndxPage);
VERIFY(unchain_page(ndxPage, ppgl->ndxLast));
if (--ppgl->cpg == 0)
ppgl->ndxLast = 0;
}
/*
* Adds a page to the end of a list.
*
* Parameters:
* - ppgl = Pointer to page list to add the page to.
* - ndxPage = Index of the page to be added to the list.
*
* Returns:
* Nothing.
*
* Side effects:
* Modifies fields of the page list, and possibly links in the MPDB.
*/
static void add_to_list(PPAGELIST ppgl, UINT32 ndxPage)
{
if (ppgl->cpg++ == 0)
g_pMasterPageDB[ndxPage].d.next = ndxPage;
else
{
g_pMasterPageDB[ndxPage].d.next = g_pMasterPageDB[ppgl->ndxLast].d.next;
g_pMasterPageDB[ppgl->ndxLast].d.next = ndxPage;
}
ppgl->ndxLast = ndxPage;
}
/*
* Allocates a page off one of our lists.
*
* Parameters:
* - uiFlags = Flags for the page allocation.
*
* Returns:
* INVALID_PAGE if the page could not be allocated, otherwise the index of the allocated page.
*/
static UINT32 allocate_page(UINT32 uiFlags)
{
UINT32 rc;
PPAGELIST ppgl = NULL;
BOOL bZero = FALSE;
if (uiFlags & PGALLOC_ZERO)
{ /* try zeroed list first, then free (but need to zero afterwards) */
if (g_pglZeroed.cpg > 0)
ppgl = &g_pglZeroed;
else if (g_pglFree.cpg > 0)
{
ppgl = &g_pglFree;
bZero = TRUE;
}
}
else
{ /* try free list first, then zeroed */
if (g_pglFree.cpg > 0)
ppgl = &g_pglFree;
else if (g_pglZeroed.cpg > 0)
ppgl = &g_pglZeroed;
}
/* TODO: apply additional strategy if we don't yet have a page list */
if (!ppgl)
return INVALID_PAGE;
rc = g_pMasterPageDB[ppgl->ndxLast].d.next; /* take first page on list */
remove_from_list(ppgl, rc);
if (bZero)
zero_page(rc);
return rc;
}
/*
* Allocate a memory page and return its physical address.
*
* Parameters:
* - uiFlags = Flags for page allocation.
* - tag = Tag to give the newly-allocated page.
* - subtag = Subtag to give the newly-allocated page.
* - ppaNewPage = Pointer to location that will receive the physical address of the new page.
*
* Returns:
* Standard HRESULT success/failure indication.
*/
HRESULT MmAllocatePage(UINT32 uiFlags, UINT32 tag, UINT32 subtag, PPHYSADDR ppaNewPage)
{
register UINT32 ndxPage; /* index of page to be allocated */
if (!ppaNewPage)
return E_POINTER;
ndxPage = allocate_page(uiFlags);
if (ndxPage == INVALID_PAGE)
return E_OUTOFMEMORY;
g_pMasterPageDB[ndxPage].d.tag = tag;
g_pMasterPageDB[ndxPage].d.subtag = subtag;
*ppaNewPage = mmPageIndex2PA(ndxPage);
return S_OK;
}
/*
* Frees up a previously-allocated memory page.
*
* Parameters:
* - paPage = Physical address of the page to be freed.
* - tag = Tag value we expect the page to have.
* - subtag = Subtag value we expect the page to have.
*
* Returns:
* Standard HRESULT success/failure indication.
*/
HRESULT MmFreePage(PHYSADDR paPage, UINT32 tag, UINT32 subtag)
{
register UINT32 ndxPage = mmPA2PageIndex(paPage);
if ((g_pMasterPageDB[ndxPage].d.tag != tag) || (g_pMasterPageDB[ndxPage].d.subtag != subtag))
return MEMMGR_E_BADTAGS;
g_pMasterPageDB[ndxPage].d.tag = MPDBTAG_NORMAL;
g_pMasterPageDB[ndxPage].d.subtag = 0;
add_to_list(&g_pglFree, ndxPage);
return S_OK;
}
/*
* Builds a "chain" of linked pages in the MPDB, setting their tags to known values, and optionally linking
* them into a page list.
*
* Parameters:
* - ndxFirstPage = First page of the chain to be built.
* - cpg = Count of pages to include in the chain.
* - tag = Tag value to give the pages in the chain.
* - subtag = Subtag value to give the pages in the chain.
* - ppglAddTo = Pointer to the page list we want to add the new page chain to. May be NULL.
*
* Returns:
* The index of the first page following the new chain that was built, i.e. the next start point for a chain.
*
* Side effects:
* Modifies the MPDB accordingly.
*/
SEG_INIT_CODE static UINT32 build_page_chain(UINT32 ndxFirstPage, UINT32 cpg, unsigned tag, unsigned subtag,
PPAGELIST ppglAddTo)
{
register UINT32 i; /* loop counter */
if (cpg == 0)
return ndxFirstPage; /* do nothing */
for (i=0; i < cpg; i++)
{
g_pMasterPageDB[ndxFirstPage + i].d.tag = tag;
g_pMasterPageDB[ndxFirstPage + i].d.subtag = subtag;
if (i<(cpg - 1))
g_pMasterPageDB[ndxFirstPage + i].d.next = ndxFirstPage + i + 1;
}
if (ppglAddTo)
{
if (ppglAddTo->cpg == 0)
/* link as a circular list */
g_pMasterPageDB[ndxFirstPage + cpg - 1].d.next = ndxFirstPage;
else
{
/* link into existing circular list */
g_pMasterPageDB[ndxFirstPage + cpg - 1].d.next = g_pMasterPageDB[ppglAddTo->ndxLast].d.next;
g_pMasterPageDB[ppglAddTo->ndxLast].d.next = ndxFirstPage;
}
ppglAddTo->ndxLast = ndxFirstPage + cpg - 1;
ppglAddTo->cpg += cpg;
}
return ndxFirstPage + cpg;
}
/* External references to symbols defined by the linker script. */
extern char cpgPrestartTotal, cpgLibraryCode, cpgKernelCode, cpgKernelData, cpgKernelBss, cpgInitCode,
cpgInitData, cpgInitBss;
/* secondary init function in the VM mapper */
extern void _MmInitPTEMappings(PFNSETPTEADDR pfnSetPTEAddr);
/*
* Initializes the page allocator and the Master Page Database.
*
* Parameters:
* - pstartup = Pointer to startup information data structure.
*
* Returns:
* Nothing.
*
* Side effects:
* Local variables and the Master Page Database initialized.
*/
SEG_INIT_CODE void _MmInitPageAlloc(PSTARTUP_INFO pstartup) SEG_INIT_CODE void _MmInitPageAlloc(PSTARTUP_INFO pstartup)
{ {
register UINT32 i; /* loop counter */
/* Setup the master data pointers and zero the MPDB. */
g_pMasterPageDB = (PMPDB)(pstartup->kaMPDB); g_pMasterPageDB = (PMPDB)(pstartup->kaMPDB);
g_cpgMaster = pstartup->cpgSystemTotal;
StrSetMem(g_pMasterPageDB, 0, pstartup->cpgMPDB * SYS_PAGE_SIZE);
/* Classify all pages in the system and add them to lists. */
i = build_page_chain(0, 1, MPDBTAG_SYSTEM, MPDBSYS_ZEROPAGE, NULL);
i = build_page_chain(i, (INT32)(&cpgPrestartTotal) - 1, MPDBTAG_NORMAL, 0, &g_pglFree);
i = build_page_chain(i, (INT32)(&cpgLibraryCode), MPDBTAG_SYSTEM, MPDBSYS_LIBCODE, NULL);
i = build_page_chain(i, (INT32)(&cpgKernelCode), MPDBTAG_SYSTEM, MPDBSYS_KCODE, NULL);
i = build_page_chain(i, (INT32)(&cpgKernelData) + (INT32)(&cpgKernelBss), MPDBTAG_SYSTEM, MPDBSYS_KDATA, NULL);
i = build_page_chain(i, (INT32)(&cpgInitCode) + (INT32)(&cpgInitData) + (INT32)(&cpgInitBss), MPDBTAG_SYSTEM,
MPDBSYS_INIT, &g_pglInit);
i = build_page_chain(i, pstartup->cpgTTBGap, MPDBTAG_NORMAL, 0, &g_pglFree);
i = build_page_chain(i, SYS_TTB1_SIZE / SYS_PAGE_SIZE, MPDBTAG_SYSTEM, MPDBSYS_TTB, NULL);
i = build_page_chain(i, SYS_TTB1_SIZE / SYS_PAGE_SIZE, MPDBTAG_SYSTEM, MPDBSYS_TTBAUX, NULL);
i = build_page_chain(i, pstartup->cpgMPDB, MPDBTAG_SYSTEM, MPDBSYS_MPDB, NULL);
i = build_page_chain(i, pstartup->cpgPageTables, MPDBTAG_SYSTEM, MPDBSYS_PGTBL, NULL);
i = build_page_chain(i, pstartup->cpgSystemAvail - i, MPDBTAG_NORMAL, 0, &g_pglFree);
i = build_page_chain(i, pstartup->cpgSystemTotal - pstartup->cpgSystemAvail, MPDBTAG_SYSTEM, MPDBSYS_GPU, NULL);
ASSERT(i == g_cpgMaster);
/* Initialize the PTE mappings in the MPDB, and the VM mapper's hook function by which it keeps this up to date. */
_MmInitPTEMappings(set_pte_address);
/* Allocate the address we map a page to to zero it. */
g_kaZero = _MmAllocKernelAddr(1);
} }

View File

@ -55,9 +55,16 @@ static PMALLOC g_pMalloc = NULL; /* allocator used */
static VMCTXT g_vmctxtKernel = { /* kernel VM context */ static VMCTXT g_vmctxtKernel = { /* kernel VM context */
.pTTB = NULL, .pTTB = NULL,
.pTTBAux = NULL, .pTTBAux = NULL,
.uiMaxIndex = SYS_TTB1_ENTRIES .uiMaxIndex = SYS_TTB1_ENTRIES,
.paTTB = 0
}; };
static RBTREE g_rbtFreePageTables; /* tree containing free page tables */ static RBTREE g_rbtFreePageTables; /* tree containing free page tables */
static PFNSETPTEADDR g_pfnSetPTEAddr = NULL; /* hook function into page database */
/*------------------------------
* Inline resolution operations
*------------------------------
*/
/* /*
* Resolves a given page table reference for a TTB entry within a VM context. * Resolves a given page table reference for a TTB entry within a VM context.
@ -93,6 +100,11 @@ static inline PVMCTXT resolve_vmctxt(PVMCTXT pvmctxt, KERNADDR vma)
return pvmctxt; return pvmctxt;
} }
/*-----------------------------------------
* Virtual-to-physical functionality group
*-----------------------------------------
*/
/* /*
* Returns the physical address corresponding to a virtual memory address. * Returns the physical address corresponding to a virtual memory address.
* *
@ -137,6 +149,11 @@ PHYSADDR MmGetPhysAddr(PVMCTXT pvmctxt, KERNADDR vma)
return virt_to_phys(resolve_vmctxt(pvmctxt, vma), vma); return virt_to_phys(resolve_vmctxt(pvmctxt, vma), vma);
} }
/*---------------------------
* Demap functionality group
*---------------------------
*/
/* /*
* Determines whether or not the specified page table is empty. * Determines whether or not the specified page table is empty.
* *
@ -209,6 +226,7 @@ static HRESULT demap_pages1(PVMCTXT pvmctxt, KERNADDR vmaStart, UINT32 ndxTTB, U
{ {
UINT32 cpgCurrent; /* number of pages we're mapping */ UINT32 cpgCurrent; /* number of pages we're mapping */
PPAGETAB pTab = NULL; /* pointer to page table */ PPAGETAB pTab = NULL; /* pointer to page table */
PHYSADDR pa; /* temporary for physical address */
HRESULT hr; /* return from this function */ HRESULT hr; /* return from this function */
register INT32 i; /* loop counter */ register INT32 i; /* loop counter */
@ -222,8 +240,12 @@ static HRESULT demap_pages1(PVMCTXT pvmctxt, KERNADDR vmaStart, UINT32 ndxTTB, U
{ /* we can kill off the whole section */ { /* we can kill off the whole section */
if (pvmctxt->pTTBAux[ndxTTB].aux.sacred && !(uiFlags & DEMAP_NOTHING_SACRED)) if (pvmctxt->pTTBAux[ndxTTB].aux.sacred && !(uiFlags & DEMAP_NOTHING_SACRED))
return MEMMGR_E_NOSACRED; /* can't demap a sacred mapping */ return MEMMGR_E_NOSACRED; /* can't demap a sacred mapping */
pa = pvmctxt->pTTB[ndxTTB].data & TTBSEC_BASE;
if (pvmctxt->pTTB[ndxTTB].sec.c) if (pvmctxt->pTTB[ndxTTB].sec.c)
_MmFlushCacheForSection(vmaStart, !(pvmctxt->pTTBAux[ndxTTB].aux.unwriteable)); _MmFlushCacheForSection(vmaStart, !(pvmctxt->pTTBAux[ndxTTB].aux.unwriteable));
if (g_pfnSetPTEAddr && !(pvmctxt->pTTBAux[ndxTTB].aux.notpage))
for (i = 0; i < SYS_SEC_PAGES; i++)
(*g_pfnSetPTEAddr)(mmPA2PageIndex(pa) + i, 0, FALSE);
pvmctxt->pTTB[ndxTTB].data = 0; pvmctxt->pTTB[ndxTTB].data = 0;
pvmctxt->pTTBAux[ndxTTB].data = 0; pvmctxt->pTTBAux[ndxTTB].data = 0;
_MmFlushTLBForSection(vmaStart); _MmFlushTLBForSection(vmaStart);
@ -242,6 +264,8 @@ static HRESULT demap_pages1(PVMCTXT pvmctxt, KERNADDR vmaStart, UINT32 ndxTTB, U
{ {
if (pTab->pgtbl[ndxPage + i].pg.c) /* only flush cache if cacheable */ if (pTab->pgtbl[ndxPage + i].pg.c) /* only flush cache if cacheable */
_MmFlushCacheForPage(vmaStart, !(pTab->pgaux[ndxPage + i].aux.unwriteable)); _MmFlushCacheForPage(vmaStart, !(pTab->pgaux[ndxPage + i].aux.unwriteable));
if (g_pfnSetPTEAddr && !(pTab->pgaux[ndxPage + i].aux.notpage))
(*g_pfnSetPTEAddr)(mmPA2PageIndex(pTab->pgtbl[ndxPage + i].data & PGTBLSM_PAGE), 0, FALSE);
pTab->pgtbl[ndxPage + i].data = 0; pTab->pgtbl[ndxPage + i].data = 0;
pTab->pgaux[ndxPage + i].data = 0; pTab->pgaux[ndxPage + i].data = 0;
_MmFlushTLBForPage(vmaStart); _MmFlushTLBForPage(vmaStart);
@ -318,6 +342,11 @@ HRESULT MmDemapPages(PVMCTXT pvmctxt, KERNADDR vmaBase, UINT32 cpg)
return demap_pages0(resolve_vmctxt(pvmctxt, vmaBase), vmaBase, cpg, 0); return demap_pages0(resolve_vmctxt(pvmctxt, vmaBase), vmaBase, cpg, 0);
} }
/*------------------------------------------------------
* Flag-morphing operations used for reflag and mapping
*------------------------------------------------------
*/
/* /*
* Morphs the "flags" bits used for a page table entry in the TTB and for a page entry in the page table * Morphs the "flags" bits used for a page table entry in the TTB and for a page entry in the page table
* into the "flags" bits used for a section entry in the TTB. * into the "flags" bits used for a section entry in the TTB.
@ -359,11 +388,245 @@ static UINT32 make_section_flags(UINT32 uiTableFlags, UINT32 uiPageFlags)
*/ */
static UINT32 make_section_aux_flags(UINT32 uiPageAuxFlags) static UINT32 make_section_aux_flags(UINT32 uiPageAuxFlags)
{ {
register UINT32 rc = uiPageAuxFlags & (PGAUX_SACRED|PGAUX_UNWRITEABLE); register UINT32 rc = uiPageAuxFlags & (PGAUX_SACRED|PGAUX_UNWRITEABLE|PGAUX_NOTPAGE);
/* TODO if we define any other flags */ /* TODO if we define any other flags */
return rc; return rc;
} }
/*-------------------------
* Reflag operations group
*-------------------------
*/
/* Structure that defines flag operations on pages. */
typedef struct tagFLAG_OPERATIONS {
UINT32 uiTableFlags[2]; /* table flag alterations */
UINT32 uiPageFlags[2]; /* page flag alterations */
UINT32 uiAuxFlags[2]; /* auxiliary flag alterations */
} FLAG_OPERATIONS, *PFLAG_OPERATIONS;
typedef const FLAG_OPERATIONS *PCFLAG_OPERATIONS;
/* Reflag operation control bits. */
#define FLAGOP_TABLE_COPY0 0x00000001 /* copy uiTableFlags[0] to table flags */
#define FLAGOP_TABLE_SET0 0x00000002 /* set bits in uiTableFlags[0] in table flags */
#define FLAGOP_TABLE_CLEAR0 0x00000004 /* clear bits in uiTableFlags[0] in table flags */
#define FLAGOP_TABLE_CLEAR1 0x00000008 /* clear bits in uiTableFlags[1] in table flags */
#define FLAGOP_PAGE_COPY0 0x00000010 /* copy uiPageFlags[0] to page flags */
#define FLAGOP_PAGE_SET0 0x00000020 /* set bits in uiPageFlags[0] in page flags */
#define FLAGOP_PAGE_CLEAR0 0x00000040 /* clear bits in uiPageFlags[0] in page flags */
#define FLAGOP_PAGE_CLEAR1 0x00000080 /* clear bits in uiPageFlags[1] in page flags */
#define FLAGOP_AUX_COPY0 0x00000100 /* copy uiAuxFlags[0] to aux flags */
#define FLAGOP_AUX_SET0 0x00000200 /* set bits in uiAuxFlags[0] in aux flags */
#define FLAGOP_AUX_CLEAR0 0x00000400 /* clear bits in uiAuxFlags[0] in aux flags */
#define FLAGOP_AUX_CLEAR1 0x00000800 /* clear bits in uiAuxFlags[1] in aux flags */
#define FLAGOP_NOTHING_SACRED 0x80000000 /* reset bits even if marked "sacred" */
#define FLAGOP_PRECALCULATED 0x40000000 /* precalculation of set/clear masks already done */
/*
* Given a set of flag operations dictated by a FLAG_OPERATIONS structure and a set of control flags,
* turns them into another FLAG_OPERATIONS structure where the 0 element of each array represents bits
* to be cleared and the 1 element of each array represents bits to be set.
*
* Parameters:
* - pDest = Pointer to destination buffer. Will be filled with values by this function.
* - pSrc = Pointer to source buffer.
* - uiFlags = Control flags for the operation.
*
* Returns:
* Nothing.
*/
static void precalculate_masks(PFLAG_OPERATIONS pDest, PCFLAG_OPERATIONS pSrc, UINT32 uiFlags)
{
StrSetMem(pDest, 0, sizeof(FLAG_OPERATIONS));
/* Precalculate clear and set masks for table flags. */
if (uiFlags & FLAGOP_TABLE_COPY0)
pDest->uiTableFlags[0] = TTBPGTBL_SAFEFLAGS;
else if (uiFlags & FLAGOP_TABLE_CLEAR0)
pDest->uiTableFlags[0] = pSrc->uiTableFlags[0];
if (uiFlags & FLAGOP_TABLE_CLEAR1)
pDest->uiTableFlags[0] |= pSrc->uiTableFlags[1];
if (uiFlags & (FLAGOP_TABLE_COPY0|FLAGOP_TABLE_SET0))
pDest->uiTableFlags[1] = pSrc->uiTableFlags[0];
pDest->uiTableFlags[0] &= ~TTBPGTBL_SAFEFLAGS;
pDest->uiTableFlags[1] &= ~TTBPGTBL_SAFEFLAGS;
/* Precalculate clear and set masks for page flags. */
if (uiFlags & FLAGOP_PAGE_COPY0)
pDest->uiPageFlags[0] = PGTBLSM_SAFEFLAGS;
else if (uiFlags & FLAGOP_PAGE_CLEAR0)
pDest->uiPageFlags[0] = pSrc->uiPageFlags[0];
if (uiFlags & FLAGOP_PAGE_CLEAR1)
pDest->uiPageFlags[0] |= pSrc->uiPageFlags[1];
if (uiFlags & (FLAGOP_PAGE_COPY0|FLAGOP_PAGE_SET0))
pDest->uiPageFlags[1] = pSrc->uiPageFlags[0];
pDest->uiPageFlags[0] &= ~PGTBLSM_SAFEFLAGS;
pDest->uiPageFlags[1] &= ~PGTBLSM_SAFEFLAGS;
/* Precalculate clear and set masks for auxiliary flags. */
if (uiFlags & FLAGOP_AUX_COPY0)
pDest->uiAuxFlags[0] = PGAUX_SAFEFLAGS;
else if (uiFlags & FLAGOP_AUX_CLEAR0)
pDest->uiAuxFlags[0] = pSrc->uiAuxFlags[0];
if (uiFlags & FLAGOP_AUX_CLEAR1)
pDest->uiAuxFlags[0] |= pSrc->uiAuxFlags[1];
if (uiFlags & (FLAGOP_AUX_COPY0|FLAGOP_AUX_SET0))
pDest->uiAuxFlags[1] = pSrc->uiAuxFlags[0];
pDest->uiAuxFlags[0] &= ~PGAUX_SAFEFLAGS;
pDest->uiAuxFlags[1] &= ~PGAUX_SAFEFLAGS;
}
/*
* Reflags page mapping entries within a single current entry in the TTB.
*
* Parameters:
* - pvmctxt = Pointer to the VM context.
* - vmaStart = The starting VMA of the region to reflag.
* - ndxTTB = Index in the TTB that we're manipulating.
* - ndxPage = Starting index in the page table of the first entry to reflag.
* - cpg = Count of the number of pages to reflag. Note that this function will not reflag more
* page mapping entries than remain on the page, as indicated by ndxPage.
* - ops = Flag operations, which should be precalculated.
* - uiFlags = Flags for operation, which should include FLAGOP_PRECALCULATED.
*
* Returns:
* Standard HRESULT success/failure. If the result is successful, the SCODE_CODE of the result will
* indicate the number of pages actually reflagged.
*
* Side effects:
* May modify the TTB entry/aux entry pointed to, and the page table it points to, where applicable.
*/
static HRESULT reflag_pages1(PVMCTXT pvmctxt, KERNADDR vmaStart, UINT32 ndxTTB, UINT32 ndxPage, UINT32 cpg,
PCFLAG_OPERATIONS ops, UINT32 uiFlags)
{
UINT32 cpgCurrent; /* number of pages we're mapping */
PPAGETAB pTab = NULL; /* pointer to page table */
HRESULT hr; /* return from this function */
register INT32 i; /* loop counter */
BOOL bFlipSection = FALSE; /* are we flipping the entire section? */
UINT32 uiTemp; /* temporary for new table data */
ASSERT(uiFlags & FLAGOP_PRECALCULATED);
/* Figure out how many entries we're going to reflag. */
cpgCurrent = SYS_PGTBL_ENTRIES - ndxPage; /* total free slots on page */
if (cpg < cpgCurrent)
cpgCurrent = cpg; /* only reflag up to max requested */
hr = MAKE_SCODE(SEVERITY_SUCCESS, FACILITY_MEMMGR, cpgCurrent);
if (!(pvmctxt->pTTB[ndxTTB].data & TTBQUERY_MASK))
return hr; /* section not allocated - nothing to do */
if ((pvmctxt->pTTB[ndxTTB].data & TTBSEC_ALWAYS) && (cpgCurrent == SYS_PGTBL_ENTRIES) && (ndxPage == 0))
{ /* we can remap the section directly */
if (pvmctxt->pTTBAux[ndxTTB].aux.sacred && !(uiFlags & FLAGOP_NOTHING_SACRED))
return MEMMGR_E_NOSACRED; /* can't reflag a sacred mapping */
if (pvmctxt->pTTB[ndxTTB].sec.c)
_MmFlushCacheForSection(vmaStart, !(pvmctxt->pTTBAux[ndxTTB].aux.unwriteable));
pvmctxt->pTTB[ndxTTB].data = (pvmctxt->pTTB[ndxTTB].data
& ~make_section_flags(ops->uiTableFlags[0], ops->uiPageFlags[0]))
| make_section_flags(ops->uiTableFlags[1], ops->uiPageFlags[1]);
pvmctxt->pTTBAux[ndxTTB].data = (pvmctxt->pTTBAux[ndxTTB].data & ~make_section_aux_flags(ops->uiAuxFlags[0]))
| make_section_aux_flags(ops->uiAuxFlags[1]);
_MmFlushTLBForSection(vmaStart);
}
else if (pvmctxt->pTTB[ndxTTB].data & TTBPGTBL_ALWAYS)
{
pTab = resolve_pagetab(pvmctxt, pvmctxt->pTTB + ndxTTB);
if (!pTab)
return MEMMGR_E_NOPGTBL;
for (i = 0; i<cpgCurrent; i++)
{
if (pTab->pgaux[ndxPage + i].aux.sacred && !(uiFlags & FLAGOP_NOTHING_SACRED))
return MEMMGR_E_NOSACRED; /* can't reflag a sacred mapping */
}
/*
* If our remapping changes the table flags, then all the page table entries in this section that we're NOT
* changing had better be unallocated. If not, that's an error.
*/
uiTemp = (pvmctxt->pTTB[ndxTTB].data & ~(ops->uiTableFlags[0])) | ops->uiTableFlags[1];
if (pvmctxt->pTTB[ndxTTB].data != uiTemp)
{
for (i = 0; i < ndxPage; i++)
if (pTab->pgtbl[i].data & PGQUERY_MASK)
return MEMMGR_E_COLLIDED;
for (i = ndxPage + cpgCurrent; i < SYS_PGTBL_ENTRIES; i++)
if (pTab->pgtbl[i].data & PGQUERY_MASK)
return MEMMGR_E_COLLIDED;
bFlipSection = TRUE; /* flag it for later */
_MmFlushCacheForSection(mmIndices2VMA3(ndxTTB, 0, 0), !(pvmctxt->pTTBAux[ndxTTB].aux.unwriteable));
pvmctxt->pTTB[ndxTTB].data = uiTemp;
}
for (i = 0; i < cpgCurrent; i++)
{
if (!(pTab->pgtbl[ndxPage + i].data & PGQUERY_MASK))
continue; /* skip unallocated pages */
if (!bFlipSection && pTab->pgtbl[ndxPage + i].pg.c) /* only flush cache if cacheable */
_MmFlushCacheForPage(vmaStart, !(pTab->pgaux[ndxPage + i].aux.unwriteable));
pTab->pgtbl[ndxPage + i].data = (pTab->pgtbl[ndxPage + i].data & ~(ops->uiPageFlags[0])) | ops->uiPageFlags[1];
pTab->pgaux[ndxPage + i].data = (pTab->pgaux[ndxPage + i].data & ~(ops->uiAuxFlags[0])) | ops->uiAuxFlags[1];
if (!bFlipSection)
_MmFlushTLBForPage(vmaStart);
vmaStart += SYS_PAGE_SIZE;
}
if (bFlipSection)
_MmFlushTLBForSection(mmIndices2VMA3(ndxTTB, 0, 0));
}
return hr;
}
/*
* Reflags page mapping entries in the specified VM context.
*
* Parameters:
* - pvmctxt = Pointer to the VM context to use.
* - vmaBase = Base VM address of the region to reflag.
* - cpg = Count of the number of pages of memory to reflag.
* - ops = Flag operations structure.
* - uiFlags = Flags for operation.
*
* Returns:
* Standard HRESULT success/failure.
*/
static HRESULT reflag_pages0(PVMCTXT pvmctxt, KERNADDR vmaBase, UINT32 cpg, PCFLAG_OPERATIONS ops, UINT32 uiFlags)
{
UINT32 ndxTTB = mmVMA2TTBIndex(vmaBase); /* TTB entry index */
UINT32 ndxPage = mmVMA2PGTBLIndex(vmaBase); /* starting page entry index */
UINT32 cpgRemaining = cpg; /* number of pages remaining to demap */
HRESULT hr; /* temporary result */
FLAG_OPERATIONS opsReal; /* real operations buffer (precalculated) */
if (!ops)
return E_POINTER;
if (uiFlags & FLAGOP_PRECALCULATED)
StrCopyMem(&opsReal, ops, sizeof(FLAG_OPERATIONS));
else
precalculate_masks(&opsReal, ops, uiFlags);
if ((cpgRemaining > 0) && (ndxPage > 0))
{ /* We are starting in the middle of a VM page. Reflag to the end of the VM page. */
hr = reflag_pages1(pvmctxt, vmaBase, ndxTTB, ndxPage, cpgRemaining, &opsReal, uiFlags|FLAGOP_PRECALCULATED);
if (FAILED(hr))
return hr;
cpgRemaining -= SCODE_CODE(hr);
if (++ndxTTB == pvmctxt->uiMaxIndex)
return MEMMGR_E_ENDTTB;
vmaBase = mmIndices2VMA3(ndxTTB, 0, 0);
}
while (cpgRemaining > 0)
{
hr = reflag_pages1(pvmctxt, vmaBase, ndxTTB, 0, cpgRemaining, &opsReal, uiFlags|FLAGOP_PRECALCULATED);
if (FAILED(hr))
return hr;
cpgRemaining -= SCODE_CODE(hr);
if (++ndxTTB == pvmctxt->uiMaxIndex)
return MEMMGR_E_ENDTTB;
vmaBase += SYS_SEC_SIZE;
}
return S_OK;
}
/* Flags for mapping. */ /* Flags for mapping. */
#define MAP_DONT_ALLOC 0x00000001 /* don't try to allocate new page tables */ #define MAP_DONT_ALLOC 0x00000001 /* don't try to allocate new page tables */
@ -401,9 +664,9 @@ static HRESULT alloc_page_table(PVMCTXT pvmctxt, PTTB pttbEntry, PTTBAUX pttbAux
if (rbtIsEmpty(&g_rbtFreePageTables)) if (rbtIsEmpty(&g_rbtFreePageTables))
{ {
if (!(uiFlags & MAP_DONT_ALLOC)) if (!(uiFlags & MAP_DONT_ALLOC))
{ { /* allocate a new page */
/* TODO: pull a new page out of our ass and assign its PA to paNewPage */ hr = MmAllocatePage(0, MPDBTAG_SYSTEM, MPDBSYS_PGTBL, &paNewPage);
if (paNewPage) if (SUCCEEDED(hr))
{ /* allocate kernel addresses to map it into */ { /* allocate kernel addresses to map it into */
kaNewPage = _MmAllocKernelAddr(1); kaNewPage = _MmAllocKernelAddr(1);
if (kaNewPage) if (kaNewPage)
@ -438,9 +701,9 @@ static HRESULT alloc_page_table(PVMCTXT pvmctxt, PTTB pttbEntry, PTTBAUX pttbAux
} }
else else
hr = MEMMGR_E_NOKERNSPC; /* no kernel space available */ hr = MEMMGR_E_NOKERNSPC; /* no kernel space available */
if (FAILED(hr))
VERIFY(SUCCEEDED(MmFreePage(paNewPage, MPDBTAG_SYSTEM, MPDBSYS_PGTBL)));
} }
else
hr = E_OUTOFMEMORY; /* no memory to allocate new page table */
} }
else else
hr = MEMMGR_E_RECURSED; /* recursive entry */ hr = MEMMGR_E_RECURSED; /* recursive entry */
@ -493,6 +756,7 @@ static HRESULT map_pages1(PVMCTXT pvmctxt, PHYSADDR paBase, UINT32 ndxTTB, UINT3
{ {
UINT32 cpgCurrent; /* number of pages we're mapping */ UINT32 cpgCurrent; /* number of pages we're mapping */
PPAGETAB pTab = NULL; /* pointer to current or new page table */ PPAGETAB pTab = NULL; /* pointer to current or new page table */
PHYSADDR paPTab; /* PA of the page table */
HRESULT hr; /* return from this function */ HRESULT hr; /* return from this function */
register INT32 i; /* loop counter */ register INT32 i; /* loop counter */
@ -502,6 +766,7 @@ static HRESULT map_pages1(PVMCTXT pvmctxt, PHYSADDR paBase, UINT32 ndxTTB, UINT3
hr = alloc_page_table(pvmctxt, pvmctxt->pTTB + ndxTTB, pvmctxt->pTTBAux + ndxTTB, uiTableFlags, uiFlags, &pTab); hr = alloc_page_table(pvmctxt, pvmctxt->pTTB + ndxTTB, pvmctxt->pTTBAux + ndxTTB, uiTableFlags, uiFlags, &pTab);
if (FAILED(hr)) if (FAILED(hr))
return hr; return hr;
paPTab = (PHYSADDR)(pvmctxt->pTTB[ndxTTB].data & TTBPGTBL_BASE);
break; break;
case TTBQUERY_PGTBL: /* existing page table */ case TTBQUERY_PGTBL: /* existing page table */
@ -510,6 +775,7 @@ static HRESULT map_pages1(PVMCTXT pvmctxt, PHYSADDR paBase, UINT32 ndxTTB, UINT3
pTab = resolve_pagetab(pvmctxt, pvmctxt->pTTB + ndxTTB); pTab = resolve_pagetab(pvmctxt, pvmctxt->pTTB + ndxTTB);
if (!pTab) if (!pTab)
return MEMMGR_E_NOPGTBL; /* could not map the page table */ return MEMMGR_E_NOPGTBL; /* could not map the page table */
paPTab = (PHYSADDR)(pvmctxt->pTTB[ndxTTB].data & TTBPGTBL_BASE);
break; break;
case TTBQUERY_SEC: case TTBQUERY_SEC:
@ -522,6 +788,7 @@ static HRESULT map_pages1(PVMCTXT pvmctxt, PHYSADDR paBase, UINT32 ndxTTB, UINT3
if ((pvmctxt->pTTB[ndxTTB].data & TTBSEC_BASE) != (paBase & TTBSEC_BASE)) if ((pvmctxt->pTTB[ndxTTB].data & TTBSEC_BASE) != (paBase & TTBSEC_BASE))
return MEMMGR_E_COLLIDED; return MEMMGR_E_COLLIDED;
pTab = NULL; pTab = NULL;
paPTab = pvmctxt->paTTB + (ndxTTB * sizeof(TTB));
break; break;
} }
@ -539,18 +806,25 @@ static HRESULT map_pages1(PVMCTXT pvmctxt, PHYSADDR paBase, UINT32 ndxTTB, UINT3
{ {
while (--i >= 0) while (--i >= 0)
{ /* reverse any mapping we've done in this function */ { /* reverse any mapping we've done in this function */
if (g_pfnSetPTEAddr && !(uiAuxFlags & PGAUX_NOTPAGE))
(*g_pfnSetPTEAddr)(mmPA2PageIndex(pTab->pgtbl[ndxPage + i].data & PGTBLSM_PAGE), 0, FALSE);
pTab->pgtbl[ndxPage + i].data = 0; pTab->pgtbl[ndxPage + i].data = 0;
pTab->pgaux[ndxPage + i].data = 0; pTab->pgaux[ndxPage + i].data = 0;
} }
hr = MEMMGR_E_COLLIDED; /* stepping on existing mapping */ return MEMMGR_E_COLLIDED; /* stepping on existing mapping */
goto exit;
} }
if (g_pfnSetPTEAddr && !(uiAuxFlags & PGAUX_NOTPAGE))
(*g_pfnSetPTEAddr)(mmPA2PageIndex(paBase), paPTab + ((ndxPage + i) * sizeof(PGTBL)), FALSE);
pTab->pgtbl[ndxPage + i].data = paBase | uiPageFlags; pTab->pgtbl[ndxPage + i].data = paBase | uiPageFlags;
pTab->pgaux[ndxPage + i].data = uiAuxFlags; pTab->pgaux[ndxPage + i].data = uiAuxFlags;
paBase += SYS_PAGE_SIZE; paBase += SYS_PAGE_SIZE;
} }
} }
exit: else if (g_pfnSetPTEAddr && !(uiAuxFlags & PGAUX_NOTPAGE))
{
for (i=0; i < cpgCurrent; i++)
(*g_pfnSetPTEAddr)(mmPA2PageIndex(paBase & TTBSEC_BASE) + ndxPage + i, paPTab, TRUE);
}
return hr; return hr;
} }
@ -579,6 +853,7 @@ static HRESULT map_pages0(PVMCTXT pvmctxt, PHYSADDR paBase, KERNADDR vmaBase, UI
BOOL bCanMapBySection; /* can we map by section? */ BOOL bCanMapBySection; /* can we map by section? */
UINT32 uiSecFlags = 0; /* section flags */ UINT32 uiSecFlags = 0; /* section flags */
UINT32 uiSecAuxFlags = 0; /* section auxiliary flags */ UINT32 uiSecAuxFlags = 0; /* section auxiliary flags */
register UINT32 i; /* loop counter */
HRESULT hr; /* temporary result */ HRESULT hr; /* temporary result */
if ((cpgRemaining > 0) && (ndxPage > 0)) if ((cpgRemaining > 0) && (ndxPage > 0))
@ -595,6 +870,8 @@ static HRESULT map_pages0(PVMCTXT pvmctxt, PHYSADDR paBase, KERNADDR vmaBase, UI
goto errorExit; goto errorExit;
} }
} }
if (cpgRemaining == 0)
return S_OK; /* bail out if we finished mapping in first stage */
bCanMapBySection = MAKEBOOL((cpgRemaining >= SYS_PGTBL_ENTRIES) && ((paBase & TTBSEC_BASE) == paBase)); bCanMapBySection = MAKEBOOL((cpgRemaining >= SYS_PGTBL_ENTRIES) && ((paBase & TTBSEC_BASE) == paBase));
if (bCanMapBySection) if (bCanMapBySection)
@ -610,6 +887,11 @@ static HRESULT map_pages0(PVMCTXT pvmctxt, PHYSADDR paBase, KERNADDR vmaBase, UI
switch (pvmctxt->pTTB[ndxTTB].data & TTBQUERY_MASK) switch (pvmctxt->pTTB[ndxTTB].data & TTBQUERY_MASK)
{ {
case TTBQUERY_FAULT: /* unmapped - map the section */ case TTBQUERY_FAULT: /* unmapped - map the section */
if (g_pfnSetPTEAddr && !(uiAuxFlags & PGAUX_NOTPAGE))
{
for (i = 0; i < SYS_SEC_PAGES; i++)
(*g_pfnSetPTEAddr)(mmPA2PageIndex(paBase) + i, pvmctxt->paTTB + (ndxTTB * sizeof(TTB)), TRUE);
}
pvmctxt->pTTB[ndxTTB].data = paBase | uiSecFlags; pvmctxt->pTTB[ndxTTB].data = paBase | uiSecFlags;
pvmctxt->pTTBAux[ndxTTB].data = uiSecAuxFlags; pvmctxt->pTTBAux[ndxTTB].data = uiSecAuxFlags;
break; break;
@ -749,6 +1031,9 @@ HRESULT MmDemapKernelPages(KERNADDR vmaBase, UINT32 cpg)
*--------------------- *---------------------
*/ */
/* External references to linker-defined symbols. */
extern char cpgPrestartTotal;
/* /*
* Initialize the virtual-memory mapping. * Initialize the virtual-memory mapping.
* *
@ -764,6 +1049,11 @@ HRESULT MmDemapKernelPages(KERNADDR vmaBase, UINT32 cpg)
*/ */
SEG_INIT_CODE void _MmInitVMMap(PSTARTUP_INFO pstartup, PMALLOC pmInitHeap) SEG_INIT_CODE void _MmInitVMMap(PSTARTUP_INFO pstartup, PMALLOC pmInitHeap)
{ {
SEG_INIT_DATA static FLAG_OPERATIONS opsReflagZeroPage = {
.uiTableFlags = { TTBPGTBL_SAFEFLAGS, TTBFLAGS_KERNEL_DATA },
.uiPageFlags = { PGTBLSM_SAFEFLAGS, PGTBLFLAGS_KERNEL_DATA, },
.uiAuxFlags = { PGAUX_SAFEFLAGS, PGAUXFLAGS_KERNEL_DATA|PGAUX_NOTPAGE }
};
PHYSADDR paPageTable; /* PA of current page table */ PHYSADDR paPageTable; /* PA of current page table */
KERNADDR kaPageTable; /* KA of current page table */ KERNADDR kaPageTable; /* KA of current page table */
PPAGENODE ppgn; /* pointer to node being allocated & inserted */ PPAGENODE ppgn; /* pointer to node being allocated & inserted */
@ -774,6 +1064,7 @@ SEG_INIT_CODE void _MmInitVMMap(PSTARTUP_INFO pstartup, PMALLOC pmInitHeap)
IUnknown_AddRef(g_pMalloc); IUnknown_AddRef(g_pMalloc);
g_vmctxtKernel.pTTB = (PTTB)(pstartup->kaTTB); g_vmctxtKernel.pTTB = (PTTB)(pstartup->kaTTB);
g_vmctxtKernel.pTTBAux = (PTTBAUX)(pstartup->kaTTBAux); g_vmctxtKernel.pTTBAux = (PTTBAUX)(pstartup->kaTTBAux);
g_vmctxtKernel.paTTB = pstartup->paTTB;
rbtInitTree(&(g_vmctxtKernel.rbtPageTables), RbtStdCompareByValue); rbtInitTree(&(g_vmctxtKernel.rbtPageTables), RbtStdCompareByValue);
rbtInitTree(&g_rbtFreePageTables, RbtStdCompareByValue); rbtInitTree(&g_rbtFreePageTables, RbtStdCompareByValue);
@ -808,4 +1099,64 @@ SEG_INIT_CODE void _MmInitVMMap(PSTARTUP_INFO pstartup, PMALLOC pmInitHeap)
paPageTable += SYS_PAGE_SIZE; /* advance to next page table page */ paPageTable += SYS_PAGE_SIZE; /* advance to next page table page */
} }
/*
* Undo the "temporary" low-memory mappings we created in the prestart code. But we keep the "zero page"
* in place, because that's where the exception handlers are. Note that these pages were not flagged as
* "sacred" at prestart time.
*/
VERIFY(SUCCEEDED(demap_pages0(&g_vmctxtKernel, SYS_PAGE_SIZE, (UINT32)(&cpgPrestartTotal) - 1, 0)));
VERIFY(SUCCEEDED(demap_pages0(&g_vmctxtKernel, PHYSADDR_IO_BASE, PAGE_COUNT_IO, 0)));
/* Reset page attributes on the zero page. */
VERIFY(SUCCEEDED(reflag_pages0(&g_vmctxtKernel, 0, 1, &opsReflagZeroPage,
FLAGOP_NOTHING_SACRED|FLAGOP_PRECALCULATED)));
}
/*
* Initialize the PTE mapping hook and the PTE mappings for all existing mapped pages.
*
* Parameters:
* - pfnSetPTEAddr = Pointer to the PTE mapping hook function. This function will be called multiple times
* to initialize the PTE mappings in the MPDB.
*
* Returns:
* Nothing.
*/
SEG_INIT_CODE void _MmInitPTEMappings(PFNSETPTEADDR pfnSetPTEAddr)
{
register UINT32 i, j; /* loop counters */
PHYSADDR paPTE; /* PA of the PTE */
PPAGETAB pTab; /* page table pointer */
g_pfnSetPTEAddr = pfnSetPTEAddr; /* set up hook function */
for (i = 0; i < SYS_TTB1_ENTRIES; i++)
{
switch (g_vmctxtKernel.pTTB[i].data & TTBQUERY_MASK)
{
case TTBQUERY_PGTBL:
/* walk page table and assign page table entry pointers to allocated entries */
paPTE = (PHYSADDR)(g_vmctxtKernel.pTTB[i].data & TTBPGTBL_BASE);
pTab = resolve_pagetab(&g_vmctxtKernel, g_vmctxtKernel.pTTB + i);
for (j = 0; j < SYS_PGTBL_ENTRIES; j++)
{ /* set PTE entry for each entry in turn */
if ((pTab->pgtbl[j].data & PGTBLSM_ALWAYS) && !(pTab->pgaux[j].aux.notpage))
(*pfnSetPTEAddr)(mmPA2PageIndex(pTab->pgtbl[j].data & PGTBLSM_PAGE), paPTE, FALSE);
paPTE += sizeof(PGTBL);
}
break;
case TTBQUERY_SEC:
case TTBQUERY_PXNSEC:
if (!(g_vmctxtKernel.pTTBAux[i].aux.notpage))
{ /* set PTE entry (actually pointer to TTB entry) for the entire section */
paPTE = g_vmctxtKernel.paTTB + (i * sizeof(TTB));
for (j = 0; j < SYS_SEC_PAGES; j++)
(*pfnSetPTEAddr)(mmPA2PageIndex(g_vmctxtKernel.pTTB[i].data & TTBSEC_BASE) + j, paPTE, TRUE);
}
break;
default:
break;
}
}
} }