2013-04-14 03:38:48 -06:00
|
|
|
/*
|
|
|
|
* This file is part of the COMROGUE Operating System for Raspberry Pi
|
|
|
|
*
|
|
|
|
* Copyright (c) 2013, Eric J. Bowersox / Erbosoft Enterprises
|
|
|
|
* All rights reserved.
|
|
|
|
*
|
|
|
|
* This program is free for commercial and non-commercial use as long as the following conditions are
|
|
|
|
* adhered to.
|
|
|
|
*
|
|
|
|
* Copyright in this file remains Eric J. Bowersox and/or Erbosoft, and as such any copyright notices
|
|
|
|
* in the code are not to be removed.
|
|
|
|
*
|
|
|
|
* Redistribution and use in source and binary forms, with or without modification, are permitted
|
|
|
|
* provided that the following conditions are met:
|
|
|
|
*
|
|
|
|
* * Redistributions of source code must retain the above copyright notice, this list of conditions and
|
|
|
|
* the following disclaimer.
|
|
|
|
* * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and
|
|
|
|
* the following disclaimer in the documentation and/or other materials provided with the distribution.
|
|
|
|
*
|
|
|
|
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED
|
|
|
|
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
|
|
|
|
* PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
|
|
|
|
* ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
|
|
|
|
* TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
|
|
|
|
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
|
|
|
|
* NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
|
|
|
|
* POSSIBILITY OF SUCH DAMAGE.
|
|
|
|
*
|
|
|
|
* "Raspberry Pi" is a trademark of the Raspberry Pi Foundation.
|
|
|
|
*/
|
|
|
|
#include <comrogue/types.h>
|
|
|
|
#include <comrogue/scode.h>
|
2013-04-23 01:49:58 -06:00
|
|
|
#include <comrogue/str.h>
|
2013-04-21 05:11:55 -06:00
|
|
|
#include <comrogue/allocator.h>
|
2013-04-14 03:38:48 -06:00
|
|
|
#include <comrogue/internals/seg.h>
|
2013-04-23 01:49:58 -06:00
|
|
|
#include <comrogue/internals/layout.h>
|
2013-04-14 03:38:48 -06:00
|
|
|
#include <comrogue/internals/mmu.h>
|
|
|
|
#include <comrogue/internals/memmgr.h>
|
2013-04-21 05:11:55 -06:00
|
|
|
#include <comrogue/internals/rbtree.h>
|
2013-04-14 03:38:48 -06:00
|
|
|
#include <comrogue/internals/startup.h>
|
2013-04-21 05:11:55 -06:00
|
|
|
#include <comrogue/internals/trace.h>
|
|
|
|
|
|
|
|
#ifdef THIS_FILE
|
|
|
|
#undef THIS_FILE
|
|
|
|
DECLARE_THIS_FILE
|
|
|
|
#endif
|
|
|
|
|
2013-04-23 01:49:58 -06:00
|
|
|
/*-----------------------------------------------------------------------------------
|
|
|
|
* Virtual-memory mapping code that is part of the COMROGUE memory management system
|
|
|
|
*-----------------------------------------------------------------------------------
|
|
|
|
*/
|
2013-04-14 03:38:48 -06:00
|
|
|
|
2013-04-21 05:11:55 -06:00
|
|
|
static PMALLOC g_pMalloc = NULL; /* allocator used */
|
2013-04-23 01:49:58 -06:00
|
|
|
static VMCTXT g_vmctxtKernel = { /* kernel VM context */
|
|
|
|
.pTTB = NULL,
|
|
|
|
.pTTBAux = NULL,
|
2013-04-27 23:33:29 -06:00
|
|
|
.uiMaxIndex = SYS_TTB1_ENTRIES,
|
|
|
|
.paTTB = 0
|
2013-04-23 01:49:58 -06:00
|
|
|
};
|
|
|
|
static RBTREE g_rbtFreePageTables; /* tree containing free page tables */
|
2013-04-27 23:33:29 -06:00
|
|
|
static PFNSETPTEADDR g_pfnSetPTEAddr = NULL; /* hook function into page database */
|
|
|
|
|
|
|
|
/*------------------------------
|
|
|
|
* Inline resolution operations
|
|
|
|
*------------------------------
|
|
|
|
*/
|
2013-04-14 03:38:48 -06:00
|
|
|
|
|
|
|
/*
|
2013-04-23 01:49:58 -06:00
|
|
|
* Resolves a given page table reference for a TTB entry within a VM context.
|
2013-04-14 03:38:48 -06:00
|
|
|
*
|
|
|
|
* Parameters:
|
2013-04-23 01:49:58 -06:00
|
|
|
* - pvmctxt = Pointer to the VM context.
|
|
|
|
* - pTTBEntry = Pointer to the TTB entry containing the page table reference to resolve.
|
2013-04-14 03:38:48 -06:00
|
|
|
*
|
|
|
|
* Returns:
|
2013-04-23 01:49:58 -06:00
|
|
|
* Pointer to the page table, or NULL if the reference could not be resolved.
|
2013-04-14 03:38:48 -06:00
|
|
|
*/
|
2013-04-23 01:49:58 -06:00
|
|
|
static inline PPAGETAB resolve_pagetab(PVMCTXT pvmctxt, PTTB pTTBEntry)
|
2013-04-14 03:38:48 -06:00
|
|
|
{
|
2013-04-23 01:49:58 -06:00
|
|
|
register PPAGENODE ppgn = (PPAGENODE)RbtFind(&(pvmctxt->rbtPageTables), (TREEKEY)(pTTBEntry->data & TTBPGTBL_BASE));
|
|
|
|
return ppgn ? ppgn->ppt : NULL;
|
2013-04-14 03:38:48 -06:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
2013-04-23 01:49:58 -06:00
|
|
|
* Resolves a specified VM context pointer to either itself or the kernel VM context, depending on whether one
|
|
|
|
* was specified and on the virtual address to be worked with.
|
2013-04-14 03:38:48 -06:00
|
|
|
*
|
|
|
|
* Parameters:
|
2013-04-23 01:49:58 -06:00
|
|
|
* - pvmctxt = The specified VM context pointer.
|
|
|
|
* - vma = The base virtual address we're working with.
|
2013-04-14 03:38:48 -06:00
|
|
|
*
|
|
|
|
* Returns:
|
2013-04-23 01:49:58 -06:00
|
|
|
* The pointer to the selected VM context, which may be to g_vmctxtKernel.
|
2013-04-14 03:38:48 -06:00
|
|
|
*/
|
2013-04-23 01:49:58 -06:00
|
|
|
static inline PVMCTXT resolve_vmctxt(PVMCTXT pvmctxt, KERNADDR vma)
|
2013-04-14 03:38:48 -06:00
|
|
|
{
|
2013-04-23 01:49:58 -06:00
|
|
|
if (!pvmctxt || (vma & VMADDR_TTB_FENCE))
|
|
|
|
return &g_vmctxtKernel;
|
|
|
|
return pvmctxt;
|
2013-04-14 03:38:48 -06:00
|
|
|
}
|
|
|
|
|
2013-04-27 23:33:29 -06:00
|
|
|
/*-----------------------------------------
|
|
|
|
* Virtual-to-physical functionality group
|
|
|
|
*-----------------------------------------
|
|
|
|
*/
|
|
|
|
|
2013-04-14 03:38:48 -06:00
|
|
|
/*
|
2013-04-23 01:49:58 -06:00
|
|
|
* Returns the physical address corresponding to a virtual memory address.
|
2013-04-14 03:38:48 -06:00
|
|
|
*
|
|
|
|
* Parameters:
|
2013-04-23 01:49:58 -06:00
|
|
|
* - pvmctxt = The VM context to resolve the address against.
|
|
|
|
* - vma = The virtual memory address to resolve.
|
2013-04-14 03:38:48 -06:00
|
|
|
*
|
|
|
|
* Returns:
|
2013-04-23 01:49:58 -06:00
|
|
|
* The physical address corresponding to the virtual memory address, or NULL if the address could
|
|
|
|
* not be resolved (is not mapped, or page table could not be mapped).
|
2013-04-14 03:38:48 -06:00
|
|
|
*/
|
2013-04-23 01:49:58 -06:00
|
|
|
static PHYSADDR virt_to_phys(PVMCTXT pvmctxt, KERNADDR vma)
|
2013-04-14 03:38:48 -06:00
|
|
|
{
|
2013-04-23 01:49:58 -06:00
|
|
|
register PTTB pTTBEntry = pvmctxt->pTTB + mmVMA2TTBIndex(vma); /* TTB entry pointer */
|
|
|
|
register PPAGETAB pTab; /* page table pointer */
|
|
|
|
|
|
|
|
if ((pTTBEntry->data & TTBQUERY_MASK) == TTBQUERY_FAULT)
|
|
|
|
return NULL; /* we're not allocated */
|
|
|
|
if (pTTBEntry->data & TTBSEC_ALWAYS)
|
|
|
|
return (pTTBEntry->data & TTBSEC_BASE) | (vma & ~TTBSEC_BASE); /* resolve section address */
|
|
|
|
|
|
|
|
pTab = resolve_pagetab(pvmctxt, pTTBEntry);
|
|
|
|
if (!pTab)
|
|
|
|
return NULL; /* could not map the page table */
|
|
|
|
return (pTab->pgtbl[mmVMA2PGTBLIndex(vma)].pg.pgaddr << SYS_PAGE_BITS) | (vma & (SYS_PAGE_SIZE - 1));
|
2013-04-14 03:38:48 -06:00
|
|
|
}
|
|
|
|
|
2013-04-21 05:11:55 -06:00
|
|
|
/*
|
2013-04-23 01:49:58 -06:00
|
|
|
* Returns the physical address corresponding to a virtual memory address.
|
2013-04-21 05:11:55 -06:00
|
|
|
*
|
|
|
|
* Parameters:
|
2013-04-23 01:49:58 -06:00
|
|
|
* - pvmctxt = The VM context to resolve the address against. If this is NULL or the address specified
|
|
|
|
* is above the TTB0 fence, the kernel VM context is used.
|
|
|
|
* - vma = The virtual memory address to resolve.
|
2013-04-21 05:11:55 -06:00
|
|
|
*
|
|
|
|
* Returns:
|
2013-04-23 01:49:58 -06:00
|
|
|
* The physical address corresponding to the virtual memory address, or NULL if the address could
|
|
|
|
* not be resolved (is not mapped, or page table could not be mapped).
|
2013-04-21 05:11:55 -06:00
|
|
|
*/
|
2013-04-23 01:49:58 -06:00
|
|
|
PHYSADDR MmGetPhysAddr(PVMCTXT pvmctxt, KERNADDR vma)
|
2013-04-21 05:11:55 -06:00
|
|
|
{
|
2013-04-23 01:49:58 -06:00
|
|
|
return virt_to_phys(resolve_vmctxt(pvmctxt, vma), vma);
|
2013-04-21 05:11:55 -06:00
|
|
|
}
|
|
|
|
|
2013-04-27 23:33:29 -06:00
|
|
|
/*---------------------------
|
|
|
|
* Demap functionality group
|
|
|
|
*---------------------------
|
|
|
|
*/
|
|
|
|
|
2013-04-14 03:38:48 -06:00
|
|
|
/*
|
2013-04-23 01:49:58 -06:00
|
|
|
* Determines whether or not the specified page table is empty.
|
2013-04-14 03:38:48 -06:00
|
|
|
*
|
|
|
|
* Parameters:
|
2013-04-23 01:49:58 -06:00
|
|
|
* - ppgt = Pointer to the page table.
|
|
|
|
*
|
2013-04-14 03:38:48 -06:00
|
|
|
* Returns:
|
2013-04-23 01:49:58 -06:00
|
|
|
* TRUE if the page table is empty, FALSE otherwise.
|
2013-04-14 03:38:48 -06:00
|
|
|
*/
|
2013-04-23 01:49:58 -06:00
|
|
|
static BOOL is_pagetable_empty(PPAGETAB ppgt)
|
2013-04-14 03:38:48 -06:00
|
|
|
{
|
2013-04-23 01:49:58 -06:00
|
|
|
register UINT32 i; /* loop counter */
|
2013-04-14 03:38:48 -06:00
|
|
|
|
2013-04-23 01:49:58 -06:00
|
|
|
for (i = 0; i < SYS_PGTBL_ENTRIES; i++)
|
|
|
|
if ((ppgt->pgtbl[i].data & PGQUERY_MASK) != PGQUERY_FAULT)
|
|
|
|
return FALSE;
|
|
|
|
return TRUE;
|
|
|
|
}
|
2013-04-14 03:38:48 -06:00
|
|
|
|
2013-04-23 01:49:58 -06:00
|
|
|
/*
|
|
|
|
* Free a page table by returning it to the free list.
|
|
|
|
*
|
|
|
|
* Parameters:
|
|
|
|
* - pvmctxt = Pointer to the VM context.
|
|
|
|
* - ppgt = Pointer to the page table to be freed.
|
|
|
|
*
|
|
|
|
* Returns:
|
|
|
|
* Nothing.
|
|
|
|
*
|
|
|
|
* Side effects:
|
|
|
|
* May modify the VM context's page-table tree and g_rbtFreePageTables.
|
|
|
|
*/
|
|
|
|
static void free_page_table(PVMCTXT pvmctxt, PPAGETAB ppgt)
|
|
|
|
{
|
|
|
|
PHYSADDR pa = virt_to_phys(pvmctxt, (KERNADDR)ppgt);
|
|
|
|
PPAGENODE ppgn = (PPAGENODE)RbtFind(&(pvmctxt->rbtPageTables), (TREEKEY)pa);
|
|
|
|
if (ppgn)
|
|
|
|
{
|
|
|
|
RbtDelete(&(pvmctxt->rbtPageTables), (TREEKEY)pa);
|
|
|
|
rbtNewNode(&(ppgn->rbtn), ppgn->rbtn.treekey);
|
|
|
|
RbtInsert(&g_rbtFreePageTables, (PRBTREENODE)ppgn);
|
|
|
|
}
|
2013-04-14 03:38:48 -06:00
|
|
|
}
|
|
|
|
|
2013-04-21 05:11:55 -06:00
|
|
|
/* Flags for demapping. */
|
|
|
|
#define DEMAP_NOTHING_SACRED 0x00000001 /* disregard "sacred" flag */
|
|
|
|
|
2013-04-14 03:38:48 -06:00
|
|
|
/*
|
|
|
|
* Deallocates page mapping entries within a single current entry in the TTB.
|
|
|
|
*
|
|
|
|
* Parameters:
|
2013-04-23 01:49:58 -06:00
|
|
|
* - pvmctxt = Pointer to the VM context.
|
|
|
|
* - vmaStart = The starting VMA of the region to demap.
|
|
|
|
* - ndxTTB = Index in the TTB that we're manipulating.
|
2013-04-14 03:38:48 -06:00
|
|
|
* - ndxPage = Starting index in the page table of the first entry to deallocate.
|
|
|
|
* - cpg = Count of the number of pages to deallocate. Note that this function will not deallocate more
|
|
|
|
* page mapping entries than remain on the page, as indicated by ndxPage.
|
2013-04-21 05:11:55 -06:00
|
|
|
* - uiFlags = Flags for operation.
|
2013-04-14 03:38:48 -06:00
|
|
|
*
|
|
|
|
* Returns:
|
|
|
|
* Standard HRESULT success/failure. If the result is successful, the SCODE_CODE of the result will
|
|
|
|
* indicate the number of pages actually deallocated.
|
|
|
|
*
|
|
|
|
* Side effects:
|
2013-04-21 05:11:55 -06:00
|
|
|
* May modify the TTB entry/aux entry pointed to, and the page table it points to, where applicable. If the
|
2013-04-14 03:38:48 -06:00
|
|
|
* page table is empty after we finish demapping entries, it may be deallocated.
|
|
|
|
*/
|
2013-04-23 01:49:58 -06:00
|
|
|
static HRESULT demap_pages1(PVMCTXT pvmctxt, KERNADDR vmaStart, UINT32 ndxTTB, UINT32 ndxPage, UINT32 cpg,
|
|
|
|
UINT32 uiFlags)
|
2013-04-14 03:38:48 -06:00
|
|
|
{
|
2013-04-23 01:49:58 -06:00
|
|
|
UINT32 cpgCurrent; /* number of pages we're mapping */
|
|
|
|
PPAGETAB pTab = NULL; /* pointer to page table */
|
2013-04-27 23:33:29 -06:00
|
|
|
PHYSADDR pa; /* temporary for physical address */
|
2013-04-23 01:49:58 -06:00
|
|
|
HRESULT hr; /* return from this function */
|
|
|
|
register INT32 i; /* loop counter */
|
2013-04-14 03:38:48 -06:00
|
|
|
|
|
|
|
/* Figure out how many entries we're going to demap. */
|
|
|
|
cpgCurrent = SYS_PGTBL_ENTRIES - ndxPage; /* total free slots on page */
|
|
|
|
if (cpg < cpgCurrent)
|
2013-04-23 01:49:58 -06:00
|
|
|
cpgCurrent = cpg; /* only demap up to max requested */
|
2013-04-14 03:38:48 -06:00
|
|
|
hr = MAKE_SCODE(SEVERITY_SUCCESS, FACILITY_MEMMGR, cpgCurrent);
|
|
|
|
|
2013-04-23 01:49:58 -06:00
|
|
|
if ((pvmctxt->pTTB[ndxTTB].data & TTBSEC_ALWAYS) && (cpgCurrent == SYS_PGTBL_ENTRIES) && (ndxPage == 0))
|
2013-04-14 03:38:48 -06:00
|
|
|
{ /* we can kill off the whole section */
|
2013-04-23 01:49:58 -06:00
|
|
|
if (pvmctxt->pTTBAux[ndxTTB].aux.sacred && !(uiFlags & DEMAP_NOTHING_SACRED))
|
2013-04-21 05:11:55 -06:00
|
|
|
return MEMMGR_E_NOSACRED; /* can't demap a sacred mapping */
|
2013-04-27 23:33:29 -06:00
|
|
|
pa = pvmctxt->pTTB[ndxTTB].data & TTBSEC_BASE;
|
2013-04-23 01:49:58 -06:00
|
|
|
if (pvmctxt->pTTB[ndxTTB].sec.c)
|
|
|
|
_MmFlushCacheForSection(vmaStart, !(pvmctxt->pTTBAux[ndxTTB].aux.unwriteable));
|
2013-04-27 23:33:29 -06:00
|
|
|
if (g_pfnSetPTEAddr && !(pvmctxt->pTTBAux[ndxTTB].aux.notpage))
|
|
|
|
for (i = 0; i < SYS_SEC_PAGES; i++)
|
|
|
|
(*g_pfnSetPTEAddr)(mmPA2PageIndex(pa) + i, 0, FALSE);
|
2013-04-23 01:49:58 -06:00
|
|
|
pvmctxt->pTTB[ndxTTB].data = 0;
|
|
|
|
pvmctxt->pTTBAux[ndxTTB].data = 0;
|
|
|
|
_MmFlushTLBForSection(vmaStart);
|
2013-04-14 03:38:48 -06:00
|
|
|
}
|
2013-04-23 01:49:58 -06:00
|
|
|
else if (pvmctxt->pTTB[ndxTTB].data & TTBPGTBL_ALWAYS)
|
2013-04-14 03:38:48 -06:00
|
|
|
{
|
2013-04-23 01:49:58 -06:00
|
|
|
pTab = resolve_pagetab(pvmctxt, pvmctxt->pTTB + ndxTTB);
|
2013-04-14 03:38:48 -06:00
|
|
|
if (!pTab)
|
|
|
|
return MEMMGR_E_NOPGTBL;
|
|
|
|
for (i = 0; i<cpgCurrent; i++)
|
2013-04-21 05:11:55 -06:00
|
|
|
{
|
|
|
|
if (pTab->pgaux[ndxPage + i].aux.sacred && !(uiFlags & DEMAP_NOTHING_SACRED))
|
2013-04-23 01:49:58 -06:00
|
|
|
return MEMMGR_E_NOSACRED; /* can't demap a sacred mapping */
|
2013-04-21 05:11:55 -06:00
|
|
|
}
|
|
|
|
for (i = 0; i<cpgCurrent; i++)
|
2013-04-14 03:38:48 -06:00
|
|
|
{
|
2013-04-23 01:49:58 -06:00
|
|
|
if (pTab->pgtbl[ndxPage + i].pg.c) /* only flush cache if cacheable */
|
|
|
|
_MmFlushCacheForPage(vmaStart, !(pTab->pgaux[ndxPage + i].aux.unwriteable));
|
2013-04-27 23:33:29 -06:00
|
|
|
if (g_pfnSetPTEAddr && !(pTab->pgaux[ndxPage + i].aux.notpage))
|
|
|
|
(*g_pfnSetPTEAddr)(mmPA2PageIndex(pTab->pgtbl[ndxPage + i].data & PGTBLSM_PAGE), 0, FALSE);
|
2013-04-14 03:38:48 -06:00
|
|
|
pTab->pgtbl[ndxPage + i].data = 0;
|
|
|
|
pTab->pgaux[ndxPage + i].data = 0;
|
2013-04-23 01:49:58 -06:00
|
|
|
_MmFlushTLBForPage(vmaStart);
|
|
|
|
vmaStart += SYS_PAGE_SIZE;
|
|
|
|
}
|
|
|
|
if (is_pagetable_empty(pTab))
|
|
|
|
{ /* The page table is now empty; demap it and put it on our free list. */
|
|
|
|
pvmctxt->pTTB[ndxTTB].data = 0;
|
|
|
|
pvmctxt->pTTBAux[ndxTTB].data = 0;
|
|
|
|
free_page_table(pvmctxt, pTab);
|
|
|
|
_MmFlushTLBForSection(mmIndices2VMA3(ndxTTB, 0, 0));
|
2013-04-14 03:38:48 -06:00
|
|
|
}
|
|
|
|
}
|
|
|
|
return hr;
|
|
|
|
}
|
|
|
|
|
2013-04-23 01:49:58 -06:00
|
|
|
/*
|
|
|
|
* Deallocates page mapping entries in the specified VM context.
|
|
|
|
*
|
|
|
|
* Parameters:
|
|
|
|
* - pvmctxt = Pointer to the VM context to use.
|
|
|
|
* - vmaBase = Base VM address of the region to demap.
|
|
|
|
* - cpg = Count of the number of pages of memory to demap.
|
|
|
|
* - uiFlags = Flags for operation.
|
|
|
|
*
|
|
|
|
* Returns:
|
|
|
|
* Standard HRESULT success/failure.
|
|
|
|
*/
|
|
|
|
static HRESULT demap_pages0(PVMCTXT pvmctxt, KERNADDR vmaBase, UINT32 cpg, UINT32 uiFlags)
|
2013-04-14 03:38:48 -06:00
|
|
|
{
|
|
|
|
UINT32 ndxTTB = mmVMA2TTBIndex(vmaBase); /* TTB entry index */
|
|
|
|
UINT32 ndxPage = mmVMA2PGTBLIndex(vmaBase); /* starting page entry index */
|
|
|
|
UINT32 cpgRemaining = cpg; /* number of pages remaining to demap */
|
|
|
|
HRESULT hr; /* temporary result */
|
|
|
|
|
|
|
|
if ((cpgRemaining > 0) && (ndxPage > 0))
|
2013-04-23 01:49:58 -06:00
|
|
|
{ /* We are starting in the middle of a VM page. Demap to the end of the VM page. */
|
|
|
|
hr = demap_pages1(pvmctxt, vmaBase, ndxTTB, ndxPage, cpgRemaining, uiFlags);
|
2013-04-14 03:38:48 -06:00
|
|
|
if (FAILED(hr))
|
|
|
|
return hr;
|
|
|
|
cpgRemaining -= SCODE_CODE(hr);
|
2013-04-23 01:49:58 -06:00
|
|
|
if (++ndxTTB == pvmctxt->uiMaxIndex)
|
2013-04-14 03:38:48 -06:00
|
|
|
return MEMMGR_E_ENDTTB;
|
2013-04-23 01:49:58 -06:00
|
|
|
vmaBase = mmIndices2VMA3(ndxTTB, 0, 0);
|
2013-04-14 03:38:48 -06:00
|
|
|
}
|
|
|
|
|
|
|
|
while (cpgRemaining > 0)
|
|
|
|
{
|
2013-04-23 01:49:58 -06:00
|
|
|
hr = demap_pages1(pvmctxt, vmaBase, ndxTTB, 0, cpgRemaining, uiFlags);
|
2013-04-14 03:38:48 -06:00
|
|
|
if (FAILED(hr))
|
|
|
|
return hr;
|
|
|
|
cpgRemaining -= SCODE_CODE(hr);
|
2013-04-23 01:49:58 -06:00
|
|
|
if (++ndxTTB == pvmctxt->uiMaxIndex)
|
2013-04-14 03:38:48 -06:00
|
|
|
return MEMMGR_E_ENDTTB;
|
2013-04-23 01:49:58 -06:00
|
|
|
vmaBase += SYS_SEC_SIZE;
|
2013-04-14 03:38:48 -06:00
|
|
|
}
|
|
|
|
return S_OK;
|
|
|
|
}
|
|
|
|
|
2013-04-23 01:49:58 -06:00
|
|
|
/*
|
|
|
|
* Deallocates page mapping entries in the specified VM context.
|
|
|
|
*
|
|
|
|
* Parameters:
|
|
|
|
* - pvmctxt = Pointer to the VM context to use. If this is NULL or the vmaBase address specified is
|
|
|
|
* above the TTB0 fence, the kernel VM context is used.
|
|
|
|
* - vmaBase = Base VM address of the region to demap.
|
|
|
|
* - cpg = Count of the number of pages of memory to demap.
|
|
|
|
*
|
|
|
|
* Returns:
|
|
|
|
* Standard HRESULT success/failure.
|
|
|
|
*/
|
|
|
|
HRESULT MmDemapPages(PVMCTXT pvmctxt, KERNADDR vmaBase, UINT32 cpg)
|
2013-04-21 05:11:55 -06:00
|
|
|
{
|
2013-04-23 01:49:58 -06:00
|
|
|
return demap_pages0(resolve_vmctxt(pvmctxt, vmaBase), vmaBase, cpg, 0);
|
2013-04-21 05:11:55 -06:00
|
|
|
}
|
|
|
|
|
2013-04-27 23:33:29 -06:00
|
|
|
/*------------------------------------------------------
|
|
|
|
* Flag-morphing operations used for reflag and mapping
|
|
|
|
*------------------------------------------------------
|
|
|
|
*/
|
|
|
|
|
2013-04-14 03:38:48 -06:00
|
|
|
/*
|
|
|
|
* Morphs the "flags" bits used for a page table entry in the TTB and for a page entry in the page table
|
|
|
|
* into the "flags" bits used for a section entry in the TTB.
|
|
|
|
*
|
|
|
|
* Parameters:
|
|
|
|
* - uiTableFlags = Flag bits that would be used for a page table entry in the TTB.
|
|
|
|
* - uiPageFlags = Flag bits that would be used for a page entry in the page table.
|
|
|
|
*
|
|
|
|
* Returns:
|
|
|
|
* The flag bits that would be used for a section entry in the TTB. If a bit or option is set
|
|
|
|
* in either uiTableFlags or uiPageFlags, it will be set in the appropriate place in the result.
|
|
|
|
*/
|
|
|
|
static UINT32 make_section_flags(UINT32 uiTableFlags, UINT32 uiPageFlags)
|
|
|
|
{
|
|
|
|
register UINT32 rc = TTBSEC_ALWAYS;
|
|
|
|
rc |= ((uiTableFlags & TTBPGTBL_PXN) >> 2);
|
|
|
|
rc |= ((uiTableFlags & TTBPGTBL_NS) << 16);
|
|
|
|
rc |= (uiTableFlags & TTBPGTBL_DOM_MASK);
|
|
|
|
rc |= (uiTableFlags & TTBPGTBL_P);
|
|
|
|
rc |= ((uiPageFlags & PGTBLSM_XN) << 4);
|
|
|
|
rc |= (uiPageFlags & PGTBLSM_B);
|
|
|
|
rc |= (uiPageFlags & PGTBLSM_C);
|
|
|
|
rc |= ((uiPageFlags & PGTBLSM_AP) << 6);
|
|
|
|
rc |= ((uiPageFlags & PGTBLSM_TEX) << 6);
|
|
|
|
rc |= ((uiPageFlags & PGTBLSM_APX) << 6);
|
|
|
|
rc |= ((uiPageFlags & PGTBLSM_S) << 6);
|
|
|
|
rc |= ((uiPageFlags & PGTBLSM_NG) << 6);
|
|
|
|
return rc;
|
|
|
|
}
|
|
|
|
|
2013-04-21 05:11:55 -06:00
|
|
|
/*
|
|
|
|
* Morphs the "auxiliary flags" bits used for a page table entry into "auxiliary flags" used for a TTB entry.
|
|
|
|
*
|
|
|
|
* Parameters:
|
|
|
|
* - uiPageAuxFlags = Page auxiliary flag bits that would be used for a page table entry.
|
|
|
|
*
|
|
|
|
* Returns:
|
|
|
|
* TTB auxiliary flag bits that would be used for a TTB entry.
|
|
|
|
*/
|
|
|
|
static UINT32 make_section_aux_flags(UINT32 uiPageAuxFlags)
|
|
|
|
{
|
2013-04-27 23:33:29 -06:00
|
|
|
register UINT32 rc = uiPageAuxFlags & (PGAUX_SACRED|PGAUX_UNWRITEABLE|PGAUX_NOTPAGE);
|
2013-04-21 05:11:55 -06:00
|
|
|
/* TODO if we define any other flags */
|
|
|
|
return rc;
|
|
|
|
}
|
|
|
|
|
2013-04-27 23:33:29 -06:00
|
|
|
/*-------------------------
|
|
|
|
* Reflag operations group
|
|
|
|
*-------------------------
|
|
|
|
*/
|
|
|
|
|
|
|
|
/* Structure that defines flag operations on pages. */
|
|
|
|
typedef struct tagFLAG_OPERATIONS {
|
|
|
|
UINT32 uiTableFlags[2]; /* table flag alterations */
|
|
|
|
UINT32 uiPageFlags[2]; /* page flag alterations */
|
|
|
|
UINT32 uiAuxFlags[2]; /* auxiliary flag alterations */
|
|
|
|
} FLAG_OPERATIONS, *PFLAG_OPERATIONS;
|
|
|
|
typedef const FLAG_OPERATIONS *PCFLAG_OPERATIONS;
|
|
|
|
|
|
|
|
/* Reflag operation control bits. */
|
|
|
|
#define FLAGOP_TABLE_COPY0 0x00000001 /* copy uiTableFlags[0] to table flags */
|
|
|
|
#define FLAGOP_TABLE_SET0 0x00000002 /* set bits in uiTableFlags[0] in table flags */
|
|
|
|
#define FLAGOP_TABLE_CLEAR0 0x00000004 /* clear bits in uiTableFlags[0] in table flags */
|
|
|
|
#define FLAGOP_TABLE_CLEAR1 0x00000008 /* clear bits in uiTableFlags[1] in table flags */
|
|
|
|
#define FLAGOP_PAGE_COPY0 0x00000010 /* copy uiPageFlags[0] to page flags */
|
|
|
|
#define FLAGOP_PAGE_SET0 0x00000020 /* set bits in uiPageFlags[0] in page flags */
|
|
|
|
#define FLAGOP_PAGE_CLEAR0 0x00000040 /* clear bits in uiPageFlags[0] in page flags */
|
|
|
|
#define FLAGOP_PAGE_CLEAR1 0x00000080 /* clear bits in uiPageFlags[1] in page flags */
|
|
|
|
#define FLAGOP_AUX_COPY0 0x00000100 /* copy uiAuxFlags[0] to aux flags */
|
|
|
|
#define FLAGOP_AUX_SET0 0x00000200 /* set bits in uiAuxFlags[0] in aux flags */
|
|
|
|
#define FLAGOP_AUX_CLEAR0 0x00000400 /* clear bits in uiAuxFlags[0] in aux flags */
|
|
|
|
#define FLAGOP_AUX_CLEAR1 0x00000800 /* clear bits in uiAuxFlags[1] in aux flags */
|
|
|
|
#define FLAGOP_NOTHING_SACRED 0x80000000 /* reset bits even if marked "sacred" */
|
|
|
|
#define FLAGOP_PRECALCULATED 0x40000000 /* precalculation of set/clear masks already done */
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Given a set of flag operations dictated by a FLAG_OPERATIONS structure and a set of control flags,
|
|
|
|
* turns them into another FLAG_OPERATIONS structure where the 0 element of each array represents bits
|
|
|
|
* to be cleared and the 1 element of each array represents bits to be set.
|
|
|
|
*
|
|
|
|
* Parameters:
|
|
|
|
* - pDest = Pointer to destination buffer. Will be filled with values by this function.
|
|
|
|
* - pSrc = Pointer to source buffer.
|
|
|
|
* - uiFlags = Control flags for the operation.
|
|
|
|
*
|
|
|
|
* Returns:
|
|
|
|
* Nothing.
|
|
|
|
*/
|
|
|
|
static void precalculate_masks(PFLAG_OPERATIONS pDest, PCFLAG_OPERATIONS pSrc, UINT32 uiFlags)
|
|
|
|
{
|
|
|
|
StrSetMem(pDest, 0, sizeof(FLAG_OPERATIONS));
|
|
|
|
|
|
|
|
/* Precalculate clear and set masks for table flags. */
|
|
|
|
if (uiFlags & FLAGOP_TABLE_COPY0)
|
|
|
|
pDest->uiTableFlags[0] = TTBPGTBL_SAFEFLAGS;
|
|
|
|
else if (uiFlags & FLAGOP_TABLE_CLEAR0)
|
|
|
|
pDest->uiTableFlags[0] = pSrc->uiTableFlags[0];
|
|
|
|
if (uiFlags & FLAGOP_TABLE_CLEAR1)
|
|
|
|
pDest->uiTableFlags[0] |= pSrc->uiTableFlags[1];
|
|
|
|
if (uiFlags & (FLAGOP_TABLE_COPY0|FLAGOP_TABLE_SET0))
|
|
|
|
pDest->uiTableFlags[1] = pSrc->uiTableFlags[0];
|
|
|
|
pDest->uiTableFlags[0] &= ~TTBPGTBL_SAFEFLAGS;
|
|
|
|
pDest->uiTableFlags[1] &= ~TTBPGTBL_SAFEFLAGS;
|
|
|
|
|
|
|
|
/* Precalculate clear and set masks for page flags. */
|
|
|
|
if (uiFlags & FLAGOP_PAGE_COPY0)
|
|
|
|
pDest->uiPageFlags[0] = PGTBLSM_SAFEFLAGS;
|
|
|
|
else if (uiFlags & FLAGOP_PAGE_CLEAR0)
|
|
|
|
pDest->uiPageFlags[0] = pSrc->uiPageFlags[0];
|
|
|
|
if (uiFlags & FLAGOP_PAGE_CLEAR1)
|
|
|
|
pDest->uiPageFlags[0] |= pSrc->uiPageFlags[1];
|
|
|
|
if (uiFlags & (FLAGOP_PAGE_COPY0|FLAGOP_PAGE_SET0))
|
|
|
|
pDest->uiPageFlags[1] = pSrc->uiPageFlags[0];
|
|
|
|
pDest->uiPageFlags[0] &= ~PGTBLSM_SAFEFLAGS;
|
|
|
|
pDest->uiPageFlags[1] &= ~PGTBLSM_SAFEFLAGS;
|
|
|
|
|
|
|
|
/* Precalculate clear and set masks for auxiliary flags. */
|
|
|
|
if (uiFlags & FLAGOP_AUX_COPY0)
|
|
|
|
pDest->uiAuxFlags[0] = PGAUX_SAFEFLAGS;
|
|
|
|
else if (uiFlags & FLAGOP_AUX_CLEAR0)
|
|
|
|
pDest->uiAuxFlags[0] = pSrc->uiAuxFlags[0];
|
|
|
|
if (uiFlags & FLAGOP_AUX_CLEAR1)
|
|
|
|
pDest->uiAuxFlags[0] |= pSrc->uiAuxFlags[1];
|
|
|
|
if (uiFlags & (FLAGOP_AUX_COPY0|FLAGOP_AUX_SET0))
|
|
|
|
pDest->uiAuxFlags[1] = pSrc->uiAuxFlags[0];
|
|
|
|
pDest->uiAuxFlags[0] &= ~PGAUX_SAFEFLAGS;
|
|
|
|
pDest->uiAuxFlags[1] &= ~PGAUX_SAFEFLAGS;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Reflags page mapping entries within a single current entry in the TTB.
|
|
|
|
*
|
|
|
|
* Parameters:
|
|
|
|
* - pvmctxt = Pointer to the VM context.
|
|
|
|
* - vmaStart = The starting VMA of the region to reflag.
|
|
|
|
* - ndxTTB = Index in the TTB that we're manipulating.
|
|
|
|
* - ndxPage = Starting index in the page table of the first entry to reflag.
|
|
|
|
* - cpg = Count of the number of pages to reflag. Note that this function will not reflag more
|
|
|
|
* page mapping entries than remain on the page, as indicated by ndxPage.
|
|
|
|
* - ops = Flag operations, which should be precalculated.
|
|
|
|
* - uiFlags = Flags for operation, which should include FLAGOP_PRECALCULATED.
|
|
|
|
*
|
|
|
|
* Returns:
|
|
|
|
* Standard HRESULT success/failure. If the result is successful, the SCODE_CODE of the result will
|
|
|
|
* indicate the number of pages actually reflagged.
|
|
|
|
*
|
|
|
|
* Side effects:
|
|
|
|
* May modify the TTB entry/aux entry pointed to, and the page table it points to, where applicable.
|
|
|
|
*/
|
|
|
|
static HRESULT reflag_pages1(PVMCTXT pvmctxt, KERNADDR vmaStart, UINT32 ndxTTB, UINT32 ndxPage, UINT32 cpg,
|
|
|
|
PCFLAG_OPERATIONS ops, UINT32 uiFlags)
|
|
|
|
{
|
|
|
|
UINT32 cpgCurrent; /* number of pages we're mapping */
|
|
|
|
PPAGETAB pTab = NULL; /* pointer to page table */
|
|
|
|
HRESULT hr; /* return from this function */
|
|
|
|
register INT32 i; /* loop counter */
|
|
|
|
BOOL bFlipSection = FALSE; /* are we flipping the entire section? */
|
|
|
|
UINT32 uiTemp; /* temporary for new table data */
|
|
|
|
|
|
|
|
ASSERT(uiFlags & FLAGOP_PRECALCULATED);
|
|
|
|
|
|
|
|
/* Figure out how many entries we're going to reflag. */
|
|
|
|
cpgCurrent = SYS_PGTBL_ENTRIES - ndxPage; /* total free slots on page */
|
|
|
|
if (cpg < cpgCurrent)
|
|
|
|
cpgCurrent = cpg; /* only reflag up to max requested */
|
|
|
|
hr = MAKE_SCODE(SEVERITY_SUCCESS, FACILITY_MEMMGR, cpgCurrent);
|
|
|
|
|
|
|
|
if (!(pvmctxt->pTTB[ndxTTB].data & TTBQUERY_MASK))
|
|
|
|
return hr; /* section not allocated - nothing to do */
|
|
|
|
|
|
|
|
if ((pvmctxt->pTTB[ndxTTB].data & TTBSEC_ALWAYS) && (cpgCurrent == SYS_PGTBL_ENTRIES) && (ndxPage == 0))
|
|
|
|
{ /* we can remap the section directly */
|
|
|
|
if (pvmctxt->pTTBAux[ndxTTB].aux.sacred && !(uiFlags & FLAGOP_NOTHING_SACRED))
|
|
|
|
return MEMMGR_E_NOSACRED; /* can't reflag a sacred mapping */
|
|
|
|
if (pvmctxt->pTTB[ndxTTB].sec.c)
|
|
|
|
_MmFlushCacheForSection(vmaStart, !(pvmctxt->pTTBAux[ndxTTB].aux.unwriteable));
|
|
|
|
pvmctxt->pTTB[ndxTTB].data = (pvmctxt->pTTB[ndxTTB].data
|
|
|
|
& ~make_section_flags(ops->uiTableFlags[0], ops->uiPageFlags[0]))
|
|
|
|
| make_section_flags(ops->uiTableFlags[1], ops->uiPageFlags[1]);
|
|
|
|
pvmctxt->pTTBAux[ndxTTB].data = (pvmctxt->pTTBAux[ndxTTB].data & ~make_section_aux_flags(ops->uiAuxFlags[0]))
|
|
|
|
| make_section_aux_flags(ops->uiAuxFlags[1]);
|
|
|
|
_MmFlushTLBForSection(vmaStart);
|
|
|
|
}
|
|
|
|
else if (pvmctxt->pTTB[ndxTTB].data & TTBPGTBL_ALWAYS)
|
|
|
|
{
|
|
|
|
pTab = resolve_pagetab(pvmctxt, pvmctxt->pTTB + ndxTTB);
|
|
|
|
if (!pTab)
|
|
|
|
return MEMMGR_E_NOPGTBL;
|
|
|
|
for (i = 0; i<cpgCurrent; i++)
|
|
|
|
{
|
|
|
|
if (pTab->pgaux[ndxPage + i].aux.sacred && !(uiFlags & FLAGOP_NOTHING_SACRED))
|
|
|
|
return MEMMGR_E_NOSACRED; /* can't reflag a sacred mapping */
|
|
|
|
}
|
|
|
|
/*
|
|
|
|
* If our remapping changes the table flags, then all the page table entries in this section that we're NOT
|
|
|
|
* changing had better be unallocated. If not, that's an error.
|
|
|
|
*/
|
|
|
|
uiTemp = (pvmctxt->pTTB[ndxTTB].data & ~(ops->uiTableFlags[0])) | ops->uiTableFlags[1];
|
|
|
|
if (pvmctxt->pTTB[ndxTTB].data != uiTemp)
|
|
|
|
{
|
|
|
|
for (i = 0; i < ndxPage; i++)
|
|
|
|
if (pTab->pgtbl[i].data & PGQUERY_MASK)
|
|
|
|
return MEMMGR_E_COLLIDED;
|
|
|
|
for (i = ndxPage + cpgCurrent; i < SYS_PGTBL_ENTRIES; i++)
|
|
|
|
if (pTab->pgtbl[i].data & PGQUERY_MASK)
|
|
|
|
return MEMMGR_E_COLLIDED;
|
|
|
|
bFlipSection = TRUE; /* flag it for later */
|
|
|
|
_MmFlushCacheForSection(mmIndices2VMA3(ndxTTB, 0, 0), !(pvmctxt->pTTBAux[ndxTTB].aux.unwriteable));
|
|
|
|
pvmctxt->pTTB[ndxTTB].data = uiTemp;
|
|
|
|
}
|
|
|
|
for (i = 0; i < cpgCurrent; i++)
|
|
|
|
{
|
|
|
|
if (!(pTab->pgtbl[ndxPage + i].data & PGQUERY_MASK))
|
|
|
|
continue; /* skip unallocated pages */
|
|
|
|
if (!bFlipSection && pTab->pgtbl[ndxPage + i].pg.c) /* only flush cache if cacheable */
|
|
|
|
_MmFlushCacheForPage(vmaStart, !(pTab->pgaux[ndxPage + i].aux.unwriteable));
|
|
|
|
pTab->pgtbl[ndxPage + i].data = (pTab->pgtbl[ndxPage + i].data & ~(ops->uiPageFlags[0])) | ops->uiPageFlags[1];
|
|
|
|
pTab->pgaux[ndxPage + i].data = (pTab->pgaux[ndxPage + i].data & ~(ops->uiAuxFlags[0])) | ops->uiAuxFlags[1];
|
|
|
|
if (!bFlipSection)
|
|
|
|
_MmFlushTLBForPage(vmaStart);
|
|
|
|
vmaStart += SYS_PAGE_SIZE;
|
|
|
|
}
|
|
|
|
if (bFlipSection)
|
|
|
|
_MmFlushTLBForSection(mmIndices2VMA3(ndxTTB, 0, 0));
|
|
|
|
}
|
|
|
|
return hr;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Reflags page mapping entries in the specified VM context.
|
|
|
|
*
|
|
|
|
* Parameters:
|
|
|
|
* - pvmctxt = Pointer to the VM context to use.
|
|
|
|
* - vmaBase = Base VM address of the region to reflag.
|
|
|
|
* - cpg = Count of the number of pages of memory to reflag.
|
|
|
|
* - ops = Flag operations structure.
|
|
|
|
* - uiFlags = Flags for operation.
|
|
|
|
*
|
|
|
|
* Returns:
|
|
|
|
* Standard HRESULT success/failure.
|
|
|
|
*/
|
|
|
|
static HRESULT reflag_pages0(PVMCTXT pvmctxt, KERNADDR vmaBase, UINT32 cpg, PCFLAG_OPERATIONS ops, UINT32 uiFlags)
|
|
|
|
{
|
|
|
|
UINT32 ndxTTB = mmVMA2TTBIndex(vmaBase); /* TTB entry index */
|
|
|
|
UINT32 ndxPage = mmVMA2PGTBLIndex(vmaBase); /* starting page entry index */
|
|
|
|
UINT32 cpgRemaining = cpg; /* number of pages remaining to demap */
|
|
|
|
HRESULT hr; /* temporary result */
|
|
|
|
FLAG_OPERATIONS opsReal; /* real operations buffer (precalculated) */
|
|
|
|
|
|
|
|
if (!ops)
|
|
|
|
return E_POINTER;
|
|
|
|
if (uiFlags & FLAGOP_PRECALCULATED)
|
|
|
|
StrCopyMem(&opsReal, ops, sizeof(FLAG_OPERATIONS));
|
|
|
|
else
|
|
|
|
precalculate_masks(&opsReal, ops, uiFlags);
|
|
|
|
|
|
|
|
if ((cpgRemaining > 0) && (ndxPage > 0))
|
|
|
|
{ /* We are starting in the middle of a VM page. Reflag to the end of the VM page. */
|
|
|
|
hr = reflag_pages1(pvmctxt, vmaBase, ndxTTB, ndxPage, cpgRemaining, &opsReal, uiFlags|FLAGOP_PRECALCULATED);
|
|
|
|
if (FAILED(hr))
|
|
|
|
return hr;
|
|
|
|
cpgRemaining -= SCODE_CODE(hr);
|
|
|
|
if (++ndxTTB == pvmctxt->uiMaxIndex)
|
|
|
|
return MEMMGR_E_ENDTTB;
|
|
|
|
vmaBase = mmIndices2VMA3(ndxTTB, 0, 0);
|
|
|
|
}
|
|
|
|
|
|
|
|
while (cpgRemaining > 0)
|
|
|
|
{
|
|
|
|
hr = reflag_pages1(pvmctxt, vmaBase, ndxTTB, 0, cpgRemaining, &opsReal, uiFlags|FLAGOP_PRECALCULATED);
|
|
|
|
if (FAILED(hr))
|
|
|
|
return hr;
|
|
|
|
cpgRemaining -= SCODE_CODE(hr);
|
|
|
|
if (++ndxTTB == pvmctxt->uiMaxIndex)
|
|
|
|
return MEMMGR_E_ENDTTB;
|
|
|
|
vmaBase += SYS_SEC_SIZE;
|
|
|
|
}
|
|
|
|
return S_OK;
|
|
|
|
}
|
|
|
|
|
2013-04-23 01:49:58 -06:00
|
|
|
/* Flags for mapping. */
|
|
|
|
#define MAP_DONT_ALLOC 0x00000001 /* don't try to allocate new page tables */
|
|
|
|
|
|
|
|
/* Forward declaration. */
|
|
|
|
static HRESULT map_pages0(PVMCTXT pvmctxt, PHYSADDR paBase, KERNADDR vmaBase, UINT32 cpg, UINT32 uiTableFlags,
|
|
|
|
UINT32 uiPageFlags, UINT32 uiAuxFlags, UINT32 uiFlags);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Allocates a new page table and associates it with the given TTB entry.
|
|
|
|
*
|
|
|
|
* Parameters:
|
|
|
|
* - pvmctxt = Pointer to the VM context.
|
|
|
|
* - pttbEntry = Pointer to the TTB entry. On successful return, this will be updated.
|
|
|
|
* - pttbAuxEntry = Pointer to the TTB auxiliary table entry. On successful return, this will be updated.
|
|
|
|
* - uiTableFlags = Flags to apply to the TTB entry.
|
|
|
|
* - uiFlags = Flags for the mapping operation.
|
|
|
|
* - pppt = Pointer to variable to receive new page table pointer.
|
|
|
|
*
|
|
|
|
* Returns:
|
|
|
|
* Standard HRESULT success/failure.
|
|
|
|
*
|
|
|
|
* Side effects:
|
|
|
|
* The new page table is erased before it is returned. May modify the VM context's page-table tree and
|
|
|
|
* g_rbtFreePageTables. May also allocate a new page of memory.
|
|
|
|
*/
|
|
|
|
static HRESULT alloc_page_table(PVMCTXT pvmctxt, PTTB pttbEntry, PTTBAUX pttbAuxEntry, UINT32 uiTableFlags,
|
|
|
|
UINT32 uiFlags, PPAGETAB *pppt)
|
2013-04-21 05:11:55 -06:00
|
|
|
{
|
2013-04-23 01:49:58 -06:00
|
|
|
register PPAGENODE ppgn = NULL; /* page node pointer */
|
|
|
|
PPAGENODE ppgnFree; /* additional pointer for new "free" entry */
|
|
|
|
HRESULT hr = S_OK; /* return from this function */
|
|
|
|
PHYSADDR paNewPage = 0; /* physical address of new page */
|
|
|
|
KERNADDR kaNewPage = 0; /* kernel address of new page */
|
2013-04-21 05:11:55 -06:00
|
|
|
|
2013-04-23 01:49:58 -06:00
|
|
|
if (rbtIsEmpty(&g_rbtFreePageTables))
|
2013-04-21 05:11:55 -06:00
|
|
|
{
|
2013-04-23 01:49:58 -06:00
|
|
|
if (!(uiFlags & MAP_DONT_ALLOC))
|
2013-04-27 23:33:29 -06:00
|
|
|
{ /* allocate a new page */
|
|
|
|
hr = MmAllocatePage(0, MPDBTAG_SYSTEM, MPDBSYS_PGTBL, &paNewPage);
|
|
|
|
if (SUCCEEDED(hr))
|
2013-04-23 01:49:58 -06:00
|
|
|
{ /* allocate kernel addresses to map it into */
|
|
|
|
kaNewPage = _MmAllocKernelAddr(1);
|
|
|
|
if (kaNewPage)
|
|
|
|
{ /* map the new page in */
|
2013-04-27 23:33:29 -06:00
|
|
|
hr = map_pages0(pvmctxt, paNewPage, kaNewPage, 1, TTBFLAGS_KERNEL_DATA, PGTBLFLAGS_KERNEL_DATA,
|
2013-04-23 01:49:58 -06:00
|
|
|
PGAUXFLAGS_KERNEL_DATA, MAP_DONT_ALLOC);
|
|
|
|
if (SUCCEEDED(hr))
|
|
|
|
{ /* allocate heap memory for two nodes to describe the page tables */
|
|
|
|
ppgnFree = IMalloc_Alloc(g_pMalloc, sizeof(PAGENODE));
|
|
|
|
if (ppgnFree)
|
|
|
|
ppgn = IMalloc_Alloc(g_pMalloc, sizeof(PAGENODE));
|
|
|
|
if (ppgnFree && ppgn)
|
|
|
|
{ /* prepare the new nodes and insert them in their respective trees */
|
|
|
|
rbtNewNode(&(ppgnFree->rbtn), paNewPage + sizeof(PAGETAB));
|
|
|
|
ppgnFree->ppt = ((PPAGETAB)kaNewPage) + 1;
|
|
|
|
RbtInsert(&g_rbtFreePageTables, (PRBTREENODE)ppgnFree);
|
|
|
|
rbtNewNode(&(ppgn->rbtn), paNewPage);
|
|
|
|
ppgn->ppt = (PPAGETAB)kaNewPage;
|
|
|
|
RbtInsert(&(pvmctxt->rbtPageTables), (PRBTREENODE)ppgn);
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{ /* could not allocate both, free one if was allocated */
|
|
|
|
if (ppgnFree)
|
|
|
|
IMalloc_Free(g_pMalloc, ppgnFree);
|
|
|
|
hr = E_OUTOFMEMORY;
|
|
|
|
}
|
|
|
|
if (FAILED(hr))
|
|
|
|
demap_pages0(pvmctxt, kaNewPage, 1, 0);
|
|
|
|
}
|
|
|
|
if (FAILED(hr))
|
|
|
|
_MmFreeKernelAddr(kaNewPage, 1);
|
|
|
|
}
|
|
|
|
else
|
|
|
|
hr = MEMMGR_E_NOKERNSPC; /* no kernel space available */
|
2013-04-27 23:33:29 -06:00
|
|
|
if (FAILED(hr))
|
|
|
|
VERIFY(SUCCEEDED(MmFreePage(paNewPage, MPDBTAG_SYSTEM, MPDBSYS_PGTBL)));
|
2013-04-23 01:49:58 -06:00
|
|
|
}
|
2013-04-21 05:11:55 -06:00
|
|
|
}
|
2013-04-23 01:49:58 -06:00
|
|
|
else
|
|
|
|
hr = MEMMGR_E_RECURSED; /* recursive entry */
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{ /* get the first item out of the free-pages tree and reinsert it into the current VM context */
|
|
|
|
ppgn = (PPAGENODE)RbtFindMin(&g_rbtFreePageTables);
|
|
|
|
RbtDelete(&g_rbtFreePageTables, ppgn->rbtn.treekey);
|
|
|
|
rbtNewNode(&(ppgn->rbtn), ppgn->rbtn.treekey);
|
|
|
|
RbtInsert(&(pvmctxt->rbtPageTables), (PRBTREENODE)ppgn);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (SUCCEEDED(hr))
|
|
|
|
{ /* prepare new page table and insert it into the TTB */
|
|
|
|
StrSetMem(ppgn->ppt, 0, sizeof(PAGETAB));
|
|
|
|
pttbEntry->data = (PHYSADDR)(ppgn->rbtn.treekey) | uiTableFlags; /* poke new entry */
|
2013-04-21 05:11:55 -06:00
|
|
|
pttbAuxEntry->data = TTBAUXFLAGS_PAGETABLE;
|
2013-04-23 01:49:58 -06:00
|
|
|
*pppt = ppgn->ppt;
|
2013-04-21 05:11:55 -06:00
|
|
|
}
|
2013-04-23 01:49:58 -06:00
|
|
|
else
|
|
|
|
*pppt = NULL;
|
|
|
|
return hr;
|
2013-04-21 05:11:55 -06:00
|
|
|
}
|
|
|
|
|
2013-04-23 01:49:58 -06:00
|
|
|
/*
|
|
|
|
* Maps pages in the specified VM context within a single TTB entry.
|
|
|
|
*
|
|
|
|
* Parameters:
|
|
|
|
* - pvmctxt = Pointer to the VM context.
|
|
|
|
* - paBase = Base physical address to be mapped.
|
|
|
|
* - ndxTTB = Index in the TTB that we're manipulating.
|
|
|
|
* - ndxPage = Starting index in the page table of the first entry to allocate.
|
|
|
|
* - cpg = Count of the number of pages to allocate. Note that this function will not allocate more
|
|
|
|
* page mapping entries than remain on the page, as indicated by ndxPage.
|
|
|
|
* - uiTableFlags = TTB-level flags to use for the page table entry.
|
|
|
|
* - uiPageFlags = Page-level flags to use for the page table entry.
|
|
|
|
* - uiAuxFlags = Auxiliary data flags to use for the page table entry.
|
|
|
|
* - uiFlags = Flags for the mapping operation.
|
|
|
|
*
|
|
|
|
* Returns:
|
|
|
|
* Standard HRESULT success/failure. If the result is successful, the SCODE_CODE of the result will
|
|
|
|
* indicate the number of pages actually deallocated.
|
|
|
|
*
|
|
|
|
* Side effects:
|
|
|
|
* May modify the TTB entry/aux entry pointed to, and the page table it points to, where applicable. May
|
|
|
|
* also allocate a new page table, which may modify other data structures.
|
|
|
|
*/
|
|
|
|
static HRESULT map_pages1(PVMCTXT pvmctxt, PHYSADDR paBase, UINT32 ndxTTB, UINT32 ndxPage,
|
|
|
|
UINT32 cpg, UINT32 uiTableFlags, UINT32 uiPageFlags, UINT32 uiAuxFlags, UINT32 uiFlags)
|
2013-04-14 03:38:48 -06:00
|
|
|
{
|
|
|
|
UINT32 cpgCurrent; /* number of pages we're mapping */
|
|
|
|
PPAGETAB pTab = NULL; /* pointer to current or new page table */
|
2013-04-27 23:33:29 -06:00
|
|
|
PHYSADDR paPTab; /* PA of the page table */
|
2013-04-14 03:38:48 -06:00
|
|
|
HRESULT hr; /* return from this function */
|
|
|
|
register INT32 i; /* loop counter */
|
|
|
|
|
2013-04-23 01:49:58 -06:00
|
|
|
switch (pvmctxt->pTTB[ndxTTB].data & TTBQUERY_MASK)
|
2013-04-14 03:38:48 -06:00
|
|
|
{
|
|
|
|
case TTBQUERY_FAULT: /* not allocated, allocate a new page table for the slot */
|
2013-04-23 01:49:58 -06:00
|
|
|
hr = alloc_page_table(pvmctxt, pvmctxt->pTTB + ndxTTB, pvmctxt->pTTBAux + ndxTTB, uiTableFlags, uiFlags, &pTab);
|
|
|
|
if (FAILED(hr))
|
|
|
|
return hr;
|
2013-04-27 23:33:29 -06:00
|
|
|
paPTab = (PHYSADDR)(pvmctxt->pTTB[ndxTTB].data & TTBPGTBL_BASE);
|
2013-04-14 03:38:48 -06:00
|
|
|
break;
|
|
|
|
|
|
|
|
case TTBQUERY_PGTBL: /* existing page table */
|
2013-04-23 01:49:58 -06:00
|
|
|
if ((pvmctxt->pTTB[ndxTTB].data & TTBPGTBL_ALLFLAGS) != uiTableFlags)
|
2013-04-14 03:38:48 -06:00
|
|
|
return MEMMGR_E_BADTTBFLG; /* table flags not compatible */
|
2013-04-23 01:49:58 -06:00
|
|
|
pTab = resolve_pagetab(pvmctxt, pvmctxt->pTTB + ndxTTB);
|
2013-04-14 03:38:48 -06:00
|
|
|
if (!pTab)
|
|
|
|
return MEMMGR_E_NOPGTBL; /* could not map the page table */
|
2013-04-27 23:33:29 -06:00
|
|
|
paPTab = (PHYSADDR)(pvmctxt->pTTB[ndxTTB].data & TTBPGTBL_BASE);
|
2013-04-14 03:38:48 -06:00
|
|
|
break;
|
|
|
|
|
|
|
|
case TTBQUERY_SEC:
|
|
|
|
case TTBQUERY_PXNSEC:
|
|
|
|
/* this is a section, make sure its base address covers this mapping and its flags are compatible */
|
2013-04-23 01:49:58 -06:00
|
|
|
if ((pvmctxt->pTTB[ndxTTB].data & TTBSEC_ALLFLAGS) != make_section_flags(uiTableFlags, uiPageFlags))
|
2013-04-14 03:38:48 -06:00
|
|
|
return MEMMGR_E_BADTTBFLG;
|
2013-04-23 01:49:58 -06:00
|
|
|
if (pvmctxt->pTTBAux[ndxTTB].data != make_section_aux_flags(uiAuxFlags))
|
2013-04-21 05:11:55 -06:00
|
|
|
return MEMMGR_E_BADTTBFLG;
|
2013-04-23 01:49:58 -06:00
|
|
|
if ((pvmctxt->pTTB[ndxTTB].data & TTBSEC_BASE) != (paBase & TTBSEC_BASE))
|
2013-04-14 03:38:48 -06:00
|
|
|
return MEMMGR_E_COLLIDED;
|
2013-04-21 05:11:55 -06:00
|
|
|
pTab = NULL;
|
2013-04-27 23:33:29 -06:00
|
|
|
paPTab = pvmctxt->paTTB + (ndxTTB * sizeof(TTB));
|
2013-04-14 03:38:48 -06:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Figure out how many entries we're going to map. */
|
|
|
|
cpgCurrent = SYS_PGTBL_ENTRIES - ndxPage; /* total free slots on page */
|
|
|
|
if (cpg < cpgCurrent)
|
|
|
|
cpgCurrent = cpg; /* only map up to max requested */
|
|
|
|
hr = MAKE_SCODE(SEVERITY_SUCCESS, FACILITY_MEMMGR, cpgCurrent);
|
|
|
|
|
2013-04-21 05:11:55 -06:00
|
|
|
if (pTab)
|
|
|
|
{ /* fill in entries in the page table */
|
2013-04-14 03:38:48 -06:00
|
|
|
for (i=0; i < cpgCurrent; i++)
|
|
|
|
{
|
|
|
|
if ((pTab->pgtbl[ndxPage + i].data & PGQUERY_MASK) != PGQUERY_FAULT)
|
|
|
|
{
|
|
|
|
while (--i >= 0)
|
|
|
|
{ /* reverse any mapping we've done in this function */
|
2013-04-27 23:33:29 -06:00
|
|
|
if (g_pfnSetPTEAddr && !(uiAuxFlags & PGAUX_NOTPAGE))
|
|
|
|
(*g_pfnSetPTEAddr)(mmPA2PageIndex(pTab->pgtbl[ndxPage + i].data & PGTBLSM_PAGE), 0, FALSE);
|
2013-04-14 03:38:48 -06:00
|
|
|
pTab->pgtbl[ndxPage + i].data = 0;
|
|
|
|
pTab->pgaux[ndxPage + i].data = 0;
|
|
|
|
}
|
2013-04-27 23:33:29 -06:00
|
|
|
return MEMMGR_E_COLLIDED; /* stepping on existing mapping */
|
2013-04-14 03:38:48 -06:00
|
|
|
}
|
2013-04-27 23:33:29 -06:00
|
|
|
if (g_pfnSetPTEAddr && !(uiAuxFlags & PGAUX_NOTPAGE))
|
|
|
|
(*g_pfnSetPTEAddr)(mmPA2PageIndex(paBase), paPTab + ((ndxPage + i) * sizeof(PGTBL)), FALSE);
|
2013-04-14 03:38:48 -06:00
|
|
|
pTab->pgtbl[ndxPage + i].data = paBase | uiPageFlags;
|
2013-04-21 05:11:55 -06:00
|
|
|
pTab->pgaux[ndxPage + i].data = uiAuxFlags;
|
2013-04-14 03:38:48 -06:00
|
|
|
paBase += SYS_PAGE_SIZE;
|
|
|
|
}
|
|
|
|
}
|
2013-04-27 23:33:29 -06:00
|
|
|
else if (g_pfnSetPTEAddr && !(uiAuxFlags & PGAUX_NOTPAGE))
|
|
|
|
{
|
|
|
|
for (i=0; i < cpgCurrent; i++)
|
|
|
|
(*g_pfnSetPTEAddr)(mmPA2PageIndex(paBase & TTBSEC_BASE) + ndxPage + i, paPTab, TRUE);
|
|
|
|
}
|
2013-04-14 03:38:48 -06:00
|
|
|
return hr;
|
|
|
|
}
|
|
|
|
|
2013-04-23 01:49:58 -06:00
|
|
|
/*
|
|
|
|
* Maps pages in the specified VM context.
|
|
|
|
*
|
|
|
|
* Parameters:
|
|
|
|
* - pvmctxt = Pointer to the VM context.
|
|
|
|
* - paBase = Base physical address to be mapped.
|
|
|
|
* - vmaBase = Base virtual address to be mapped.
|
|
|
|
* - cpg = Count of the number of pages to map.
|
|
|
|
* - uiTableFlags = TTB-level flags to use for the page table entry.
|
|
|
|
* - uiPageFlags = Page-level flags to use for the page table entry.
|
|
|
|
* - uiAuxFlags = Auxiliary data flags to use for the page table entry.
|
|
|
|
* - uiFlags = Flags for the mapping operation.
|
|
|
|
*
|
|
|
|
* Returns:
|
|
|
|
* Standard HRESULT success/failure.
|
|
|
|
*/
|
|
|
|
static HRESULT map_pages0(PVMCTXT pvmctxt, PHYSADDR paBase, KERNADDR vmaBase, UINT32 cpg, UINT32 uiTableFlags,
|
|
|
|
UINT32 uiPageFlags, UINT32 uiAuxFlags, UINT32 uiFlags)
|
2013-04-14 03:38:48 -06:00
|
|
|
{
|
|
|
|
UINT32 ndxTTB = mmVMA2TTBIndex(vmaBase); /* TTB entry index */
|
|
|
|
UINT32 ndxPage = mmVMA2PGTBLIndex(vmaBase); /* starting page entry index */
|
|
|
|
UINT32 cpgRemaining = cpg; /* number of pages remaining to map */
|
2013-04-23 01:49:58 -06:00
|
|
|
BOOL bCanMapBySection; /* can we map by section? */
|
|
|
|
UINT32 uiSecFlags = 0; /* section flags */
|
|
|
|
UINT32 uiSecAuxFlags = 0; /* section auxiliary flags */
|
2013-04-27 23:33:29 -06:00
|
|
|
register UINT32 i; /* loop counter */
|
2013-04-14 03:38:48 -06:00
|
|
|
HRESULT hr; /* temporary result */
|
|
|
|
|
|
|
|
if ((cpgRemaining > 0) && (ndxPage > 0))
|
|
|
|
{
|
|
|
|
/* We are starting in the middle of a VM page. Map to the end of the VM page. */
|
2013-04-23 01:49:58 -06:00
|
|
|
hr = map_pages1(pvmctxt, paBase, ndxTTB, ndxPage, cpgRemaining, uiTableFlags, uiPageFlags, uiAuxFlags, uiFlags);
|
2013-04-14 03:38:48 -06:00
|
|
|
if (FAILED(hr))
|
|
|
|
return hr;
|
|
|
|
cpgRemaining -= SCODE_CODE(hr);
|
|
|
|
paBase += (SCODE_CODE(hr) << SYS_PAGE_BITS);
|
2013-04-23 01:49:58 -06:00
|
|
|
if (++ndxTTB == pvmctxt->uiMaxIndex)
|
2013-04-14 03:38:48 -06:00
|
|
|
{
|
|
|
|
hr = MEMMGR_E_ENDTTB;
|
|
|
|
goto errorExit;
|
|
|
|
}
|
|
|
|
}
|
2013-04-27 23:33:29 -06:00
|
|
|
if (cpgRemaining == 0)
|
|
|
|
return S_OK; /* bail out if we finished mapping in first stage */
|
2013-04-14 03:38:48 -06:00
|
|
|
|
2013-04-23 01:49:58 -06:00
|
|
|
bCanMapBySection = MAKEBOOL((cpgRemaining >= SYS_PGTBL_ENTRIES) && ((paBase & TTBSEC_BASE) == paBase));
|
|
|
|
if (bCanMapBySection)
|
2013-04-14 03:38:48 -06:00
|
|
|
{
|
2013-04-23 01:49:58 -06:00
|
|
|
uiSecFlags = make_section_flags(uiTableFlags, uiPageFlags);
|
|
|
|
uiSecAuxFlags = make_section_aux_flags(uiAuxFlags);
|
|
|
|
}
|
|
|
|
|
|
|
|
while (cpgRemaining >= SYS_PGTBL_ENTRIES)
|
|
|
|
{ /* try to map a whole section's worth at a time */
|
|
|
|
if (bCanMapBySection)
|
|
|
|
{ /* paBase is section-aligned now as well, we can use a direct 1Mb section mapping */
|
|
|
|
switch (pvmctxt->pTTB[ndxTTB].data & TTBQUERY_MASK)
|
2013-04-14 03:38:48 -06:00
|
|
|
{
|
|
|
|
case TTBQUERY_FAULT: /* unmapped - map the section */
|
2013-04-27 23:33:29 -06:00
|
|
|
if (g_pfnSetPTEAddr && !(uiAuxFlags & PGAUX_NOTPAGE))
|
|
|
|
{
|
|
|
|
for (i = 0; i < SYS_SEC_PAGES; i++)
|
|
|
|
(*g_pfnSetPTEAddr)(mmPA2PageIndex(paBase) + i, pvmctxt->paTTB + (ndxTTB * sizeof(TTB)), TRUE);
|
|
|
|
}
|
2013-04-23 01:49:58 -06:00
|
|
|
pvmctxt->pTTB[ndxTTB].data = paBase | uiSecFlags;
|
|
|
|
pvmctxt->pTTBAux[ndxTTB].data = uiSecAuxFlags;
|
2013-04-14 03:38:48 -06:00
|
|
|
break;
|
|
|
|
|
2013-04-23 01:49:58 -06:00
|
|
|
case TTBQUERY_PGTBL: /* page table here */
|
|
|
|
goto pageTableFallback;
|
2013-04-14 03:38:48 -06:00
|
|
|
|
|
|
|
case TTBQUERY_SEC: /* test existing section */
|
|
|
|
case TTBQUERY_PXNSEC:
|
2013-04-23 01:49:58 -06:00
|
|
|
if ( ((pvmctxt->pTTB[ndxTTB].data & TTBSEC_ALLFLAGS) != uiSecFlags)
|
|
|
|
|| (pvmctxt->pTTBAux[ndxTTB].data != uiSecAuxFlags))
|
2013-04-14 03:38:48 -06:00
|
|
|
{
|
|
|
|
hr = MEMMGR_E_BADTTBFLG;
|
|
|
|
goto errorExit;
|
|
|
|
}
|
2013-04-23 01:49:58 -06:00
|
|
|
if ((pvmctxt->pTTB[ndxTTB].data & TTBSEC_BASE) != paBase)
|
2013-04-14 03:38:48 -06:00
|
|
|
{
|
|
|
|
hr = MEMMGR_E_COLLIDED;
|
|
|
|
goto errorExit;
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
/* we mapped a whole section worth */
|
|
|
|
hr = MAKE_SCODE(SEVERITY_SUCCESS, FACILITY_MEMMGR, SYS_PGTBL_ENTRIES);
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
/* just map 256 individual pages */
|
2013-04-23 01:49:58 -06:00
|
|
|
pageTableFallback:
|
|
|
|
hr = map_pages1(pvmctxt, paBase, ndxTTB, 0, cpgRemaining, uiTableFlags, uiPageFlags, uiAuxFlags, uiFlags);
|
2013-04-14 03:38:48 -06:00
|
|
|
if (FAILED(hr))
|
|
|
|
goto errorExit;
|
|
|
|
}
|
|
|
|
/* adjust base physical address, page count, and TTB index */
|
|
|
|
paBase += (SCODE_CODE(hr) << SYS_PAGE_BITS);
|
|
|
|
cpgRemaining -= SCODE_CODE(hr);
|
2013-04-23 01:49:58 -06:00
|
|
|
if (++ndxTTB == pvmctxt->uiMaxIndex)
|
2013-04-14 03:38:48 -06:00
|
|
|
{
|
|
|
|
hr = MEMMGR_E_ENDTTB;
|
|
|
|
goto errorExit;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (cpgRemaining > 0)
|
2013-04-23 01:49:58 -06:00
|
|
|
{ /* map the "tail end" onto the next TTB */
|
|
|
|
hr = map_pages1(pvmctxt, paBase, ndxTTB, 0, cpgRemaining, uiTableFlags, uiPageFlags, uiAuxFlags, uiFlags);
|
2013-04-14 03:38:48 -06:00
|
|
|
if (FAILED(hr))
|
|
|
|
goto errorExit;
|
|
|
|
}
|
|
|
|
return S_OK;
|
|
|
|
errorExit:
|
|
|
|
/* demap everything we've managed to map thusfar */
|
2013-04-23 01:49:58 -06:00
|
|
|
demap_pages0(pvmctxt, vmaBase, cpg - cpgRemaining, DEMAP_NOTHING_SACRED);
|
2013-04-21 05:11:55 -06:00
|
|
|
return hr;
|
|
|
|
}
|
|
|
|
|
2013-04-23 01:49:58 -06:00
|
|
|
/*
|
|
|
|
* Maps pages in the specified VM context.
|
|
|
|
*
|
|
|
|
* Parameters:
|
|
|
|
* - pvmctxt = Pointer to the VM context to use. If this is NULL or the vmaBase address specified is
|
|
|
|
* above the TTB0 fence, the kernel VM context is used.
|
|
|
|
* - paBase = Base physical address to be mapped.
|
|
|
|
* - vmaBase = Base virtual address to be mapped.
|
|
|
|
* - cpg = Count of the number of pages to map.
|
|
|
|
* - uiTableFlags = TTB-level flags to use for the page table entry.
|
|
|
|
* - uiPageFlags = Page-level flags to use for the page table entry.
|
|
|
|
* - uiAuxFlags = Auxiliary data flags to use for the page table entry.
|
|
|
|
*
|
|
|
|
* Returns:
|
|
|
|
* Standard HRESULT success/failure.
|
|
|
|
*/
|
|
|
|
HRESULT MmMapPages(PVMCTXT pvmctxt, PHYSADDR paBase, KERNADDR vmaBase, UINT32 cpg, UINT32 uiTableFlags,
|
2013-04-21 05:11:55 -06:00
|
|
|
UINT32 uiPageFlags, UINT32 uiAuxFlags)
|
|
|
|
{
|
2013-04-23 01:49:58 -06:00
|
|
|
return map_pages0(resolve_vmctxt(pvmctxt, vmaBase), paBase, vmaBase, cpg, uiTableFlags, uiPageFlags, uiAuxFlags, 0);
|
2013-04-21 05:11:55 -06:00
|
|
|
}
|
|
|
|
|
2013-04-23 01:49:58 -06:00
|
|
|
/*
|
|
|
|
* Maps pages into the kernel address space. The mapping is done in the kernel VM context.
|
|
|
|
*
|
|
|
|
* Parameters:
|
|
|
|
* - paBase = Base physical address to be mapped.
|
|
|
|
* - cpg = Count of the number of pages to map.
|
|
|
|
* - uiTableFlags = TTB-level flags to use for the page table entry.
|
|
|
|
* - uiPageFlags = Page-level flags to use for the page table entry.
|
|
|
|
* - uiAuxFlags = Auxiliary data flags to use for the page table entry.
|
|
|
|
* - pvmaLocation = Pointer to a variable which will receive the VM address of the mapped pages.
|
|
|
|
*
|
|
|
|
* Returns:
|
|
|
|
* Standard HRESULT success/failure.
|
|
|
|
*/
|
|
|
|
HRESULT MmMapKernelPages(PHYSADDR paBase, UINT32 cpg, UINT32 uiTableFlags,
|
2013-04-21 05:11:55 -06:00
|
|
|
UINT32 uiPageFlags, UINT32 uiAuxFlags, PKERNADDR pvmaLocation)
|
|
|
|
{
|
2013-04-23 01:49:58 -06:00
|
|
|
register HRESULT hr; /* return from this function */
|
2013-04-21 05:11:55 -06:00
|
|
|
|
|
|
|
if (!pvmaLocation)
|
|
|
|
return E_POINTER;
|
|
|
|
*pvmaLocation = _MmAllocKernelAddr(cpg);
|
|
|
|
if (!(*pvmaLocation))
|
|
|
|
return MEMMGR_E_NOKERNSPC;
|
2013-04-23 01:49:58 -06:00
|
|
|
hr = map_pages0(&g_vmctxtKernel, paBase, *pvmaLocation, cpg, uiTableFlags, uiPageFlags, uiAuxFlags, 0);
|
2013-04-21 05:11:55 -06:00
|
|
|
if (FAILED(hr))
|
|
|
|
{
|
|
|
|
_MmFreeKernelAddr(*pvmaLocation, cpg);
|
|
|
|
*pvmaLocation = NULL;
|
|
|
|
}
|
|
|
|
return hr;
|
|
|
|
}
|
|
|
|
|
2013-04-23 01:49:58 -06:00
|
|
|
/*
|
|
|
|
* Unmaps pages from the kernel address space and reclaims that address space for later use.
|
|
|
|
* The mapping is done in the kernel VM context.
|
|
|
|
*
|
|
|
|
* Parameters:
|
|
|
|
* - vmaBase = Base VM address of the region to be unmapped.
|
|
|
|
* - cpg = Number of pages to be unmapped.
|
|
|
|
*
|
|
|
|
* Returns:
|
|
|
|
* Standard HRESULT success/failure.
|
|
|
|
*/
|
|
|
|
HRESULT MmDemapKernelPages(KERNADDR vmaBase, UINT32 cpg)
|
2013-04-21 05:11:55 -06:00
|
|
|
{
|
|
|
|
register HRESULT hr;
|
|
|
|
|
2013-04-23 01:49:58 -06:00
|
|
|
if ((vmaBase & VMADDR_KERNEL_FENCE) != VMADDR_KERNEL_FENCE)
|
2013-04-21 05:11:55 -06:00
|
|
|
return E_INVALIDARG;
|
2013-04-23 01:49:58 -06:00
|
|
|
hr = demap_pages0(&g_vmctxtKernel, vmaBase, cpg, 0);
|
2013-04-21 05:11:55 -06:00
|
|
|
if (SUCCEEDED(hr))
|
|
|
|
_MmFreeKernelAddr(vmaBase, cpg);
|
2013-04-14 03:38:48 -06:00
|
|
|
return hr;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*---------------------
|
|
|
|
* Initialization code
|
|
|
|
*---------------------
|
|
|
|
*/
|
|
|
|
|
2013-04-27 23:33:29 -06:00
|
|
|
/* External references to linker-defined symbols. */
|
|
|
|
extern char cpgPrestartTotal;
|
|
|
|
|
2013-04-23 01:49:58 -06:00
|
|
|
/*
|
|
|
|
* Initialize the virtual-memory mapping.
|
|
|
|
*
|
|
|
|
* Parameters:
|
|
|
|
* - pstartup = Pointer to the STARTUP_INFO data structure.
|
|
|
|
* - pmInitHeap = Pointer to the initialization heap's IMalloc interface.
|
|
|
|
*
|
|
|
|
* Returns:
|
|
|
|
* Nothing.
|
|
|
|
*
|
|
|
|
* Side effects:
|
|
|
|
* Sets up the data structures allocated statically in this file.
|
|
|
|
*/
|
2013-04-21 05:11:55 -06:00
|
|
|
SEG_INIT_CODE void _MmInitVMMap(PSTARTUP_INFO pstartup, PMALLOC pmInitHeap)
|
2013-04-14 03:38:48 -06:00
|
|
|
{
|
2013-04-27 23:33:29 -06:00
|
|
|
SEG_INIT_DATA static FLAG_OPERATIONS opsReflagZeroPage = {
|
|
|
|
.uiTableFlags = { TTBPGTBL_SAFEFLAGS, TTBFLAGS_KERNEL_DATA },
|
|
|
|
.uiPageFlags = { PGTBLSM_SAFEFLAGS, PGTBLFLAGS_KERNEL_DATA, },
|
|
|
|
.uiAuxFlags = { PGAUX_SAFEFLAGS, PGAUXFLAGS_KERNEL_DATA|PGAUX_NOTPAGE }
|
|
|
|
};
|
2013-04-23 01:49:58 -06:00
|
|
|
PHYSADDR paPageTable; /* PA of current page table */
|
|
|
|
KERNADDR kaPageTable; /* KA of current page table */
|
|
|
|
PPAGENODE ppgn; /* pointer to node being allocated & inserted */
|
|
|
|
register UINT32 i; /* loop counter */
|
|
|
|
|
|
|
|
/* Initialize the local variables in this module. */
|
2013-04-21 05:11:55 -06:00
|
|
|
g_pMalloc = pmInitHeap;
|
|
|
|
IUnknown_AddRef(g_pMalloc);
|
2013-04-23 01:49:58 -06:00
|
|
|
g_vmctxtKernel.pTTB = (PTTB)(pstartup->kaTTB);
|
|
|
|
g_vmctxtKernel.pTTBAux = (PTTBAUX)(pstartup->kaTTBAux);
|
2013-04-27 23:33:29 -06:00
|
|
|
g_vmctxtKernel.paTTB = pstartup->paTTB;
|
2013-04-23 01:49:58 -06:00
|
|
|
rbtInitTree(&(g_vmctxtKernel.rbtPageTables), RbtStdCompareByValue);
|
|
|
|
rbtInitTree(&g_rbtFreePageTables, RbtStdCompareByValue);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Load all the page tables we know about. They all get mapped in as part of the kernel context, except if
|
|
|
|
* there's one free on the last page; it gets added to the free list.
|
|
|
|
*/
|
|
|
|
paPageTable = pstartup->paFirstPageTable;
|
|
|
|
for (i = 0; i < pstartup->cpgPageTables; i++)
|
|
|
|
{ /* map page table into kernel space */
|
|
|
|
kaPageTable = _MmAllocKernelAddr(1);
|
|
|
|
ASSERT(kaPageTable);
|
|
|
|
VERIFY(SUCCEEDED(map_pages0(&g_vmctxtKernel, paPageTable, kaPageTable, 1, TTBFLAGS_KERNEL_DATA,
|
|
|
|
PGTBLFLAGS_KERNEL_DATA, PGAUXFLAGS_KERNEL_DATA, MAP_DONT_ALLOC)));
|
|
|
|
|
|
|
|
/* allocate node for first page table on page */
|
|
|
|
ppgn = IMalloc_Alloc(g_pMalloc, sizeof(PAGENODE));
|
|
|
|
ASSERT(ppgn);
|
|
|
|
rbtNewNode(&(ppgn->rbtn), paPageTable);
|
|
|
|
ppgn->ppt = (PPAGETAB)kaPageTable;
|
|
|
|
RbtInsert(&(g_vmctxtKernel.rbtPageTables), (PRBTREENODE)ppgn);
|
|
|
|
|
|
|
|
/* allocate node for second page table on page */
|
|
|
|
ppgn = IMalloc_Alloc(g_pMalloc, sizeof(PAGENODE));
|
|
|
|
ASSERT(ppgn);
|
|
|
|
rbtNewNode(&(ppgn->rbtn), paPageTable + sizeof(PAGETAB));
|
|
|
|
ppgn->ppt = ((PPAGETAB)kaPageTable) + 1;
|
|
|
|
if ((i == (pstartup->cpgPageTables - 1)) && pstartup->ctblFreeOnLastPage)
|
|
|
|
RbtInsert(&g_rbtFreePageTables, (PRBTREENODE)ppgn);
|
|
|
|
else
|
|
|
|
RbtInsert(&(g_vmctxtKernel.rbtPageTables), (PRBTREENODE)ppgn);
|
|
|
|
|
|
|
|
paPageTable += SYS_PAGE_SIZE; /* advance to next page table page */
|
|
|
|
}
|
2013-04-27 23:33:29 -06:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Undo the "temporary" low-memory mappings we created in the prestart code. But we keep the "zero page"
|
|
|
|
* in place, because that's where the exception handlers are. Note that these pages were not flagged as
|
|
|
|
* "sacred" at prestart time.
|
|
|
|
*/
|
|
|
|
VERIFY(SUCCEEDED(demap_pages0(&g_vmctxtKernel, SYS_PAGE_SIZE, (UINT32)(&cpgPrestartTotal) - 1, 0)));
|
|
|
|
VERIFY(SUCCEEDED(demap_pages0(&g_vmctxtKernel, PHYSADDR_IO_BASE, PAGE_COUNT_IO, 0)));
|
|
|
|
/* Reset page attributes on the zero page. */
|
|
|
|
VERIFY(SUCCEEDED(reflag_pages0(&g_vmctxtKernel, 0, 1, &opsReflagZeroPage,
|
|
|
|
FLAGOP_NOTHING_SACRED|FLAGOP_PRECALCULATED)));
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Initialize the PTE mapping hook and the PTE mappings for all existing mapped pages.
|
|
|
|
*
|
|
|
|
* Parameters:
|
|
|
|
* - pfnSetPTEAddr = Pointer to the PTE mapping hook function. This function will be called multiple times
|
|
|
|
* to initialize the PTE mappings in the MPDB.
|
|
|
|
*
|
|
|
|
* Returns:
|
|
|
|
* Nothing.
|
|
|
|
*/
|
|
|
|
SEG_INIT_CODE void _MmInitPTEMappings(PFNSETPTEADDR pfnSetPTEAddr)
|
|
|
|
{
|
|
|
|
register UINT32 i, j; /* loop counters */
|
|
|
|
PHYSADDR paPTE; /* PA of the PTE */
|
|
|
|
PPAGETAB pTab; /* page table pointer */
|
|
|
|
|
|
|
|
g_pfnSetPTEAddr = pfnSetPTEAddr; /* set up hook function */
|
|
|
|
for (i = 0; i < SYS_TTB1_ENTRIES; i++)
|
|
|
|
{
|
|
|
|
switch (g_vmctxtKernel.pTTB[i].data & TTBQUERY_MASK)
|
|
|
|
{
|
|
|
|
case TTBQUERY_PGTBL:
|
|
|
|
/* walk page table and assign page table entry pointers to allocated entries */
|
|
|
|
paPTE = (PHYSADDR)(g_vmctxtKernel.pTTB[i].data & TTBPGTBL_BASE);
|
|
|
|
pTab = resolve_pagetab(&g_vmctxtKernel, g_vmctxtKernel.pTTB + i);
|
|
|
|
for (j = 0; j < SYS_PGTBL_ENTRIES; j++)
|
|
|
|
{ /* set PTE entry for each entry in turn */
|
|
|
|
if ((pTab->pgtbl[j].data & PGTBLSM_ALWAYS) && !(pTab->pgaux[j].aux.notpage))
|
|
|
|
(*pfnSetPTEAddr)(mmPA2PageIndex(pTab->pgtbl[j].data & PGTBLSM_PAGE), paPTE, FALSE);
|
|
|
|
paPTE += sizeof(PGTBL);
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
|
|
|
|
case TTBQUERY_SEC:
|
|
|
|
case TTBQUERY_PXNSEC:
|
|
|
|
if (!(g_vmctxtKernel.pTTBAux[i].aux.notpage))
|
|
|
|
{ /* set PTE entry (actually pointer to TTB entry) for the entire section */
|
|
|
|
paPTE = g_vmctxtKernel.paTTB + (i * sizeof(TTB));
|
|
|
|
for (j = 0; j < SYS_SEC_PAGES; j++)
|
|
|
|
(*pfnSetPTEAddr)(mmPA2PageIndex(g_vmctxtKernel.pTTB[i].data & TTBSEC_BASE) + j, paPTE, TRUE);
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
|
|
|
|
default:
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
2013-04-14 03:38:48 -06:00
|
|
|
}
|