finished the rework of vmmap.c etc.
This commit is contained in:
parent
4c6b86ffbd
commit
b23fd6b6e5
|
@ -39,22 +39,52 @@
|
||||||
#include <comrogue/types.h>
|
#include <comrogue/types.h>
|
||||||
#include <comrogue/compiler_macros.h>
|
#include <comrogue/compiler_macros.h>
|
||||||
#include <comrogue/internals/mmu.h>
|
#include <comrogue/internals/mmu.h>
|
||||||
|
#include <comrogue/internals/rbtree.h>
|
||||||
#include <comrogue/internals/startup.h>
|
#include <comrogue/internals/startup.h>
|
||||||
|
|
||||||
|
/*------------------------------------------
|
||||||
|
* The COMROGUE memory management subsystem
|
||||||
|
*------------------------------------------
|
||||||
|
*/
|
||||||
|
|
||||||
|
/* Nodes in the page table tree. */
|
||||||
|
typedef struct tagPAGENODE {
|
||||||
|
RBTREENODE rbtn; /* RBT node containing physical address as key */
|
||||||
|
PPAGETAB ppt; /* pointer to page table */
|
||||||
|
} PAGENODE, *PPAGENODE;
|
||||||
|
|
||||||
|
/* Virtual memory context. */
|
||||||
|
typedef struct tagVMCTXT {
|
||||||
|
PTTB pTTB; /* pointer to the TTB */
|
||||||
|
PTTBAUX pTTBAux; /* pointer to the TTB auxiliary data */
|
||||||
|
UINT32 uiMaxIndex; /* max index into the above tables */
|
||||||
|
RBTREE rbtPageTables; /* tree containing page tables this context owns */
|
||||||
|
} VMCTXT, *PVMCTXT;
|
||||||
|
|
||||||
CDECL_BEGIN
|
CDECL_BEGIN
|
||||||
|
|
||||||
|
/* Low-level maintenance functions */
|
||||||
|
extern void _MmFlushCacheForPage(KERNADDR vmaPage, BOOL bWriteback);
|
||||||
|
extern void _MmFlushCacheForSection(KERNADDR vmaSection, BOOL bWriteback);
|
||||||
|
extern void _MmFlushTLBForPage(KERNADDR vmaPage);
|
||||||
|
extern void _MmFlushTLBForPageAndContext(KERNADDR vmaPage, UINT32 uiASID);
|
||||||
|
extern void _MmFlushTLBForSection(KERNADDR vmaSection);
|
||||||
|
extern void _MmFlushTLBForSectionAndContext(KERNADDR vmaSection, UINT32 uiASID);
|
||||||
|
extern PTTB _MmGetTTB0(void);
|
||||||
|
extern void _MmSetTTB0(PTTB pTTB);
|
||||||
|
|
||||||
/* Kernel address space functions */
|
/* Kernel address space functions */
|
||||||
extern KERNADDR _MmAllocKernelAddr(UINT32 cpgNeeded);
|
extern KERNADDR _MmAllocKernelAddr(UINT32 cpgNeeded);
|
||||||
extern void _MmFreeKernelAddr(KERNADDR kaBase, UINT32 cpgToFree);
|
extern void _MmFreeKernelAddr(KERNADDR kaBase, UINT32 cpgToFree);
|
||||||
|
|
||||||
/* Page mapping functions */
|
/* Page mapping functions */
|
||||||
extern PHYSADDR MmGetPhysAddr(PTTB pTTB, KERNADDR vma);
|
extern PHYSADDR MmGetPhysAddr(PVMCTXT pvmctxt, KERNADDR vma);
|
||||||
extern HRESULT MmDemapPages(PTTB pTTB, PTTBAUX pTTBAux, KERNADDR vmaBase, UINT32 cpg);
|
extern HRESULT MmDemapPages(PVMCTXT pvmctxt, KERNADDR vmaBase, UINT32 cpg);
|
||||||
extern HRESULT MmMapPages(PTTB pTTB, PTTBAUX pTTBAux, PHYSADDR paBase, KERNADDR vmaBase, UINT32 cpg,
|
extern HRESULT MmMapPages(PVMCTXT pvmctxt, PHYSADDR paBase, KERNADDR vmaBase, UINT32 cpg,
|
||||||
UINT32 uiTableFlags, UINT32 uiPageFlags, UINT32 uiAuxFlags);
|
UINT32 uiTableFlags, UINT32 uiPageFlags, UINT32 uiAuxFlags);
|
||||||
extern HRESULT MmMapKernelPages(PTTB pTTB, PTTBAUX pTTBAux, PHYSADDR paBase, UINT32 cpg, UINT32 uiTableFlags,
|
extern HRESULT MmMapKernelPages(PHYSADDR paBase, UINT32 cpg, UINT32 uiTableFlags,
|
||||||
UINT32 uiPageFlags, UINT32 uiAuxFlags, PKERNADDR pvmaLocation);
|
UINT32 uiPageFlags, UINT32 uiAuxFlags, PKERNADDR pvmaLocation);
|
||||||
extern HRESULT MmDemapKernelPages(PTTB pTTB, PTTBAUX pTTBAux, KERNADDR vmaBase, UINT32 cpg);
|
extern HRESULT MmDemapKernelPages(KERNADDR vmaBase, UINT32 cpg);
|
||||||
|
|
||||||
/* Initialization functions only */
|
/* Initialization functions only */
|
||||||
extern void _MmInit(PSTARTUP_INFO pstartup);
|
extern void _MmInit(PSTARTUP_INFO pstartup);
|
||||||
|
|
|
@ -103,6 +103,7 @@
|
||||||
|
|
||||||
/* TTB auxiliary descriptor bits */
|
/* TTB auxiliary descriptor bits */
|
||||||
#define TTBAUX_SACRED 0x00000001 /* sacred entry, do not deallocate */
|
#define TTBAUX_SACRED 0x00000001 /* sacred entry, do not deallocate */
|
||||||
|
#define TTBAUX_UNWRITEABLE 0x00000002 /* entry unwriteable */
|
||||||
|
|
||||||
/* Small page table entry bits */
|
/* Small page table entry bits */
|
||||||
#define PGTBLSM_XN 0x00000001 /* Execute-Never */
|
#define PGTBLSM_XN 0x00000001 /* Execute-Never */
|
||||||
|
@ -131,20 +132,21 @@
|
||||||
|
|
||||||
/* Page auxiliary descriptor bits */
|
/* Page auxiliary descriptor bits */
|
||||||
#define PGAUX_SACRED 0x00000001 /* sacred entry, do not deallocate */
|
#define PGAUX_SACRED 0x00000001 /* sacred entry, do not deallocate */
|
||||||
|
#define PGAUX_UNWRITEABLE 0x00000002 /* entry unwriteable */
|
||||||
|
|
||||||
/* Combinations of flags we use regularly. */
|
/* Combinations of flags we use regularly. */
|
||||||
#define TTBFLAGS_LIB_CODE TTBPGTBL_ALWAYS
|
#define TTBFLAGS_LIB_CODE TTBPGTBL_ALWAYS
|
||||||
#define PGTBLFLAGS_LIB_CODE (PGTBLSM_ALWAYS | PGTBLSM_B | PGTBLSM_C | PGTBLSM_AP10)
|
#define PGTBLFLAGS_LIB_CODE (PGTBLSM_ALWAYS | PGTBLSM_B | PGTBLSM_C | PGTBLSM_AP10)
|
||||||
#define PGAUXFLAGS_LIB_CODE PGAUX_SACRED
|
#define PGAUXFLAGS_LIB_CODE (PGAUX_SACRED | PGAUX_UNWRITEABLE)
|
||||||
#define TTBFLAGS_KERNEL_CODE TTBPGTBL_ALWAYS
|
#define TTBFLAGS_KERNEL_CODE TTBPGTBL_ALWAYS
|
||||||
#define PGTBLFLAGS_KERNEL_CODE (PGTBLSM_ALWAYS | PGTBLSM_B | PGTBLSM_C | PGTBLSM_AP01)
|
#define PGTBLFLAGS_KERNEL_CODE (PGTBLSM_ALWAYS | PGTBLSM_B | PGTBLSM_C | PGTBLSM_AP01)
|
||||||
#define PGAUXFLAGS_KERNEL_CODE PGAUX_SACRED
|
#define PGAUXFLAGS_KERNEL_CODE (PGAUX_SACRED | PGAUX_UNWRITEABLE)
|
||||||
#define TTBFLAGS_KERNEL_DATA TTBPGTBL_ALWAYS
|
#define TTBFLAGS_KERNEL_DATA TTBPGTBL_ALWAYS
|
||||||
#define PGTBLFLAGS_KERNEL_DATA (PGTBLSM_XN | PGTBLSM_ALWAYS | PGTBLSM_B | PGTBLSM_C | PGTBLSM_AP01)
|
#define PGTBLFLAGS_KERNEL_DATA (PGTBLSM_XN | PGTBLSM_ALWAYS | PGTBLSM_B | PGTBLSM_C | PGTBLSM_AP01)
|
||||||
#define PGAUXFLAGS_KERNEL_DATA PGAUX_SACRED
|
#define PGAUXFLAGS_KERNEL_DATA PGAUX_SACRED
|
||||||
#define TTBFLAGS_INIT_CODE TTBFLAGS_KERNEL_CODE
|
#define TTBFLAGS_INIT_CODE TTBFLAGS_KERNEL_CODE
|
||||||
#define PGTBLFLAGS_INIT_CODE PGTBLFLAGS_KERNEL_CODE
|
#define PGTBLFLAGS_INIT_CODE PGTBLFLAGS_KERNEL_CODE
|
||||||
#define PGAUXFLAGS_INIT_CODE 0
|
#define PGAUXFLAGS_INIT_CODE PGAUX_UNWRITEABLE
|
||||||
#define TTBFLAGS_INIT_DATA TTBFLAGS_KERNEL_DATA
|
#define TTBFLAGS_INIT_DATA TTBFLAGS_KERNEL_DATA
|
||||||
#define PGTBLFLAGS_INIT_DATA PGTBLFLAGS_KERNEL_DATA
|
#define PGTBLFLAGS_INIT_DATA PGTBLFLAGS_KERNEL_DATA
|
||||||
#define PGAUXFLAGS_INIT_DATA 0
|
#define PGAUXFLAGS_INIT_DATA 0
|
||||||
|
@ -207,7 +209,8 @@ typedef union tagTTB {
|
||||||
/* TTB auxiliary descriptor */
|
/* TTB auxiliary descriptor */
|
||||||
typedef struct tagTTBAUXENTRY {
|
typedef struct tagTTBAUXENTRY {
|
||||||
unsigned sacred : 1; /* sacred TTB - should never be deallocated */
|
unsigned sacred : 1; /* sacred TTB - should never be deallocated */
|
||||||
unsigned reserved : 31; /* reserved for future allocation */
|
unsigned unwriteable : 1; /* entry is not writeable */
|
||||||
|
unsigned reserved : 30; /* reserved for future allocation */
|
||||||
} TTBAUXENTRY, *PTTBAUXENTRY;
|
} TTBAUXENTRY, *PTTBAUXENTRY;
|
||||||
|
|
||||||
/* TTB auxiliary table entry */
|
/* TTB auxiliary table entry */
|
||||||
|
@ -246,7 +249,8 @@ typedef union tagPGTBL {
|
||||||
/* page auxiliary descriptor */
|
/* page auxiliary descriptor */
|
||||||
typedef struct tagPGAUXENTRY {
|
typedef struct tagPGAUXENTRY {
|
||||||
unsigned sacred : 1; /* sacred page - should never be deallocated */
|
unsigned sacred : 1; /* sacred page - should never be deallocated */
|
||||||
unsigned reserved : 31; /* reserved for future allocation */
|
unsigned unwriteable : 1; /* entry is not writeable */
|
||||||
|
unsigned reserved : 30; /* reserved for future allocation */
|
||||||
} PGAUXENTRY, *PPGAUXENTRY;
|
} PGAUXENTRY, *PPGAUXENTRY;
|
||||||
|
|
||||||
/* page table auxiliary entry */
|
/* page table auxiliary entry */
|
||||||
|
@ -264,6 +268,9 @@ typedef struct tagPAGETAB {
|
||||||
/* VMA index macros */
|
/* VMA index macros */
|
||||||
#define mmVMA2TTBIndex(vma) (((vma) >> (SYS_PAGE_BITS + SYS_PGTBL_BITS)) & ((1 << SYS_TTB_BITS) - 1))
|
#define mmVMA2TTBIndex(vma) (((vma) >> (SYS_PAGE_BITS + SYS_PGTBL_BITS)) & ((1 << SYS_TTB_BITS) - 1))
|
||||||
#define mmVMA2PGTBLIndex(vma) (((vma) >> SYS_PAGE_BITS) & ((1 << SYS_PGTBL_BITS) - 1))
|
#define mmVMA2PGTBLIndex(vma) (((vma) >> SYS_PAGE_BITS) & ((1 << SYS_PGTBL_BITS) - 1))
|
||||||
|
#define mmIndices2VMA3(ttb, pgtbl, ofs) \
|
||||||
|
((((ttb) & ((1 << SYS_TTB_BITS) - 1)) << (SYS_PAGE_BITS + SYS_PGTBL_BITS)) | \
|
||||||
|
(((pgtbl) & ((1 << SYS_PGTBL_BITS) - 1)) << SYS_PAGE_BITS) | ((ofs) & (SYS_PAGE_SIZE - 1)))
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Data structures for the Master Page Database.
|
* Data structures for the Master Page Database.
|
||||||
|
|
|
@ -82,9 +82,10 @@ typedef struct tagRBTREE {
|
||||||
PRBTREENODE ptnRoot; /* pointer to root of tree */
|
PRBTREENODE ptnRoot; /* pointer to root of tree */
|
||||||
} RBTREE, *PRBTREE;
|
} RBTREE, *PRBTREE;
|
||||||
|
|
||||||
/* Macro to initialize the tree head. */
|
/* Tree macros. */
|
||||||
#define rbtInitTree(ptree, pfnCompare) \
|
#define rbtInitTree(ptree, pfnCompare) \
|
||||||
do { (ptree)->pfnTreeCompare = (pfnCompare); (ptree)->ptnRoot = NULL; } while (0)
|
do { (ptree)->pfnTreeCompare = (pfnCompare); (ptree)->ptnRoot = NULL; } while (0)
|
||||||
|
#define rbtIsEmpty(ptree) MAKEBOOL(!((ptree)->ptnRoot))
|
||||||
|
|
||||||
/* Type of function used by RbtWalk. */
|
/* Type of function used by RbtWalk. */
|
||||||
typedef BOOL (*PFNRBTWALK)(PRBTREE, PRBTREENODE, PVOID);
|
typedef BOOL (*PFNRBTWALK)(PRBTREE, PRBTREENODE, PVOID);
|
||||||
|
|
|
@ -104,5 +104,6 @@
|
||||||
#define MEMMGR_E_ENDTTB SCODE_CAST(0x86010004) /* tried to "walk off" end of TTB */
|
#define MEMMGR_E_ENDTTB SCODE_CAST(0x86010004) /* tried to "walk off" end of TTB */
|
||||||
#define MEMMGR_E_NOSACRED SCODE_CAST(0x86010005) /* tried to demap a "sacred" entry */
|
#define MEMMGR_E_NOSACRED SCODE_CAST(0x86010005) /* tried to demap a "sacred" entry */
|
||||||
#define MEMMGR_E_NOKERNSPC SCODE_CAST(0x86010006) /* no kernel space */
|
#define MEMMGR_E_NOKERNSPC SCODE_CAST(0x86010006) /* no kernel space */
|
||||||
|
#define MEMMGR_E_RECURSED SCODE_CAST(0x86010007) /* tried to recurse into page allocation */
|
||||||
|
|
||||||
#endif /* __SCODE_H_INCLUDED */
|
#endif /* __SCODE_H_INCLUDED */
|
||||||
|
|
|
@ -95,7 +95,7 @@ SEG_INIT_CODE static UINT32 make_section_flags(UINT32 uiTableFlags, UINT32 uiPag
|
||||||
*/
|
*/
|
||||||
SEG_INIT_CODE static UINT32 make_section_aux_flags(UINT32 uiPageAuxFlags)
|
SEG_INIT_CODE static UINT32 make_section_aux_flags(UINT32 uiPageAuxFlags)
|
||||||
{
|
{
|
||||||
register UINT32 rc = uiPageAuxFlags & (PGAUX_SACRED);
|
register UINT32 rc = uiPageAuxFlags & (PGAUX_SACRED|PGAUX_UNWRITEABLE);
|
||||||
/* TODO if we define any other flags */
|
/* TODO if we define any other flags */
|
||||||
return rc;
|
return rc;
|
||||||
}
|
}
|
||||||
|
|
|
@ -29,6 +29,9 @@
|
||||||
*
|
*
|
||||||
* "Raspberry Pi" is a trademark of the Raspberry Pi Foundation.
|
* "Raspberry Pi" is a trademark of the Raspberry Pi Foundation.
|
||||||
*/
|
*/
|
||||||
|
#include <comrogue/internals/asm-macros.h>
|
||||||
|
#include <comrogue/internals/mmu.h>
|
||||||
|
|
||||||
.section ".text"
|
.section ".text"
|
||||||
|
|
||||||
/*------------------------
|
/*------------------------
|
||||||
|
@ -94,3 +97,154 @@ llIODelay:
|
||||||
pop {lr}
|
pop {lr}
|
||||||
.delayreturn:
|
.delayreturn:
|
||||||
bx lr
|
bx lr
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Flushes the system cache of all data on a page. Optionally writes back writeable data before flushing.
|
||||||
|
*
|
||||||
|
* Parameters:
|
||||||
|
* - vmaPage = The page to be invalidated.
|
||||||
|
* - bWriteback = TRUE to write back before invalidating, FALSE to not do so.
|
||||||
|
*
|
||||||
|
* Returns:
|
||||||
|
* Nothing.
|
||||||
|
*/
|
||||||
|
.globl _MmFlushCacheForPage
|
||||||
|
_MmFlushCacheForPage:
|
||||||
|
mov r2, # SYS_PAGE_SIZE
|
||||||
|
sub r2, r2, #1
|
||||||
|
orr ip, r0, r2 /* expand so that [r0, ip] is the range to invalidate */
|
||||||
|
bic r0, r0, r2
|
||||||
|
tst r1, r1 /* is this a writeable page? */
|
||||||
|
mcrrne p15, 0, ip, r0, c14 /* yes, clean and invalidate */
|
||||||
|
mcrreq p15, 0, ip, r0, c6 /* no, just invalidate */
|
||||||
|
mcrr p15, 0, ip, r0, c5 /* either way, invalidate instruction cache */
|
||||||
|
bx lr
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Flushes the system cache of all data in a section. Optionally writes back writeable data before flushing.
|
||||||
|
*
|
||||||
|
* Parameters:
|
||||||
|
* - vmaSection = The section to be invalidated.
|
||||||
|
* - bWriteback = TRUE to write back before invalidating, FALSE to not do so.
|
||||||
|
*
|
||||||
|
* Returns:
|
||||||
|
* Nothing.
|
||||||
|
*/
|
||||||
|
.globl _MmFlushCacheForSection
|
||||||
|
_MmFlushCacheForSection:
|
||||||
|
mov r2, # SYS_SEC_SIZE
|
||||||
|
sub r2, r2, #1
|
||||||
|
bic r0, r0, r2 /* expand so that [r0, ip] is the range to invalidate */
|
||||||
|
orr ip, r0, r2
|
||||||
|
tst r1, r1 /* is this a writeable section? */
|
||||||
|
mcrrne p15, 0, ip, r0, c14 /* yes, clean and invalidate */
|
||||||
|
mcrreq p15, 0, ip, r0, c6 /* no, just invalidate */
|
||||||
|
mcrr p15, 0, ip, r0, c5 /* either way, invalidate instruction cache */
|
||||||
|
bx lr
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Flushes the TLB for this page in the current address-space context.
|
||||||
|
*
|
||||||
|
* Parameters:
|
||||||
|
* - vmaPage = The page to be invalidated.
|
||||||
|
*
|
||||||
|
* Returns:
|
||||||
|
* Nothing.
|
||||||
|
*/
|
||||||
|
.globl _MmFlushTLBForPage
|
||||||
|
/*
|
||||||
|
* Flushes the TLB for this page in a specified address-space context.
|
||||||
|
*
|
||||||
|
* Parameters:
|
||||||
|
* - vmaPage = The page to be invalidated.
|
||||||
|
* - uiASID = Address-space identifier.
|
||||||
|
*
|
||||||
|
* Returns:
|
||||||
|
* Nothing.
|
||||||
|
*/
|
||||||
|
.globl _MmFlushTLBForPageAndContext
|
||||||
|
_MmFlushTLBForPage:
|
||||||
|
mrc p15, 0, r1, c13, c0, 1 /* get current context */
|
||||||
|
_MmFlushTLBForPageAndContext:
|
||||||
|
and r1, r1, #0xFF /* get ASID */
|
||||||
|
mov ip, # SYS_PAGE_SIZE
|
||||||
|
sub ip, ip, #1
|
||||||
|
bic r0, r0, ip /* mask off "page" bits */
|
||||||
|
orr r0, r0, r1 /* add in specified ASID */
|
||||||
|
mcr p15, 0, r0, c8, c5, 1 /* invalidate TLB by virtual address */
|
||||||
|
bx lr
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Flushes the TLB for this section in the current address-space context.
|
||||||
|
*
|
||||||
|
* Parameters:
|
||||||
|
* - vmaPage = The section to be invalidated.
|
||||||
|
*
|
||||||
|
* Returns:
|
||||||
|
* Nothing.
|
||||||
|
*/
|
||||||
|
.globl _MmFlushTLBForSection
|
||||||
|
/*
|
||||||
|
* Flushes the TLB for this section in a specified address-space context.
|
||||||
|
*
|
||||||
|
* Parameters:
|
||||||
|
* - vmaSection = The page to be invalidated.
|
||||||
|
* - uiASID = Address-space identifier.
|
||||||
|
*
|
||||||
|
* Returns:
|
||||||
|
* Nothing.
|
||||||
|
*/
|
||||||
|
.globl _MmFlushTLBForSectionAndContext
|
||||||
|
_MmFlushTLBForSection:
|
||||||
|
mrc p15, 0, r1, c13, c0, 1 /* get current context */
|
||||||
|
_MmFlushTLBForSectionAndContext:
|
||||||
|
and r1, r1, #0xFF /* get ASID */
|
||||||
|
mov ip, # SYS_SEC_SIZE
|
||||||
|
sub ip, ip, #1
|
||||||
|
and r0, r0, ip /* r0 = first page to invalidate */
|
||||||
|
orr r0, r0, r1
|
||||||
|
add ip, r0, # SYS_SEC_SIZE /* ip = last page to invalidate */
|
||||||
|
.flush1:
|
||||||
|
mcr p15, 0, r0, c8, c5, 1 /* invalidate TLB by virtual address */
|
||||||
|
add r0, r0, # SYS_PAGE_SIZE /* next page */
|
||||||
|
cmp r0, ip /* are we done? */
|
||||||
|
bxeq lr /* yes, bug out */
|
||||||
|
b .flush1 /* no, keep going */
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Returns the value of TTB0, the pointer to the process-level TTB.
|
||||||
|
*
|
||||||
|
* Parameters:
|
||||||
|
* None.
|
||||||
|
*
|
||||||
|
* Returns:
|
||||||
|
* A pointer to the process-level TTB.
|
||||||
|
*/
|
||||||
|
.globl _MmGetTTB0
|
||||||
|
_MmGetTTB0:
|
||||||
|
mrc p15, 0, r0, c2, c0, 0
|
||||||
|
bx lr
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Sets the value of TTB0, the pointer to the process-level TTB.
|
||||||
|
*
|
||||||
|
* Parameters:
|
||||||
|
* - pTTB = Pointer to the new process-level TTB.
|
||||||
|
*
|
||||||
|
* Returns:
|
||||||
|
* Nothing.
|
||||||
|
*
|
||||||
|
* N.B.:
|
||||||
|
* Only call this from within kernel code, as otherwise the results can be unpredictable.
|
||||||
|
*/
|
||||||
|
.globl _MmSetTTB0
|
||||||
|
_MmSetTTB0:
|
||||||
|
mov ip, #0
|
||||||
|
mcr p15, 0, ip, c7, c7, 0 /* clear caches */
|
||||||
|
mcr p15, 0, ip, c8, c7, 0 /* clear TLB */
|
||||||
|
instr_barrier
|
||||||
|
mcr p15, 0, r0, c2, c0, 0 /* set TTB0 */
|
||||||
|
mrc p15, 0, ip, c0, c0, 0 /* read ID register */
|
||||||
|
instr_barrier
|
||||||
|
bx lr
|
||||||
|
|
651
kernel/vmmap.c
651
kernel/vmmap.c
|
@ -31,8 +31,10 @@
|
||||||
*/
|
*/
|
||||||
#include <comrogue/types.h>
|
#include <comrogue/types.h>
|
||||||
#include <comrogue/scode.h>
|
#include <comrogue/scode.h>
|
||||||
|
#include <comrogue/str.h>
|
||||||
#include <comrogue/allocator.h>
|
#include <comrogue/allocator.h>
|
||||||
#include <comrogue/internals/seg.h>
|
#include <comrogue/internals/seg.h>
|
||||||
|
#include <comrogue/internals/layout.h>
|
||||||
#include <comrogue/internals/mmu.h>
|
#include <comrogue/internals/mmu.h>
|
||||||
#include <comrogue/internals/memmgr.h>
|
#include <comrogue/internals/memmgr.h>
|
||||||
#include <comrogue/internals/rbtree.h>
|
#include <comrogue/internals/rbtree.h>
|
||||||
|
@ -44,166 +46,139 @@
|
||||||
DECLARE_THIS_FILE
|
DECLARE_THIS_FILE
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
/* Tree node storing mapping of physical addresses of page table pages to their kernel addresses */
|
/*-----------------------------------------------------------------------------------
|
||||||
typedef struct tagPGTMAP {
|
* Virtual-memory mapping code that is part of the COMROGUE memory management system
|
||||||
RBTREENODE rbtn; /* tree node structure */
|
*-----------------------------------------------------------------------------------
|
||||||
KERNADDR kaPGTPage; /* page table page kernel address */
|
*/
|
||||||
UINT32 uiRefCount; /* reference count for mapping */
|
|
||||||
} PGTMAP, *PPGTMAP;
|
|
||||||
|
|
||||||
#define NMAPFRAMES 4 /* number of frame mappings */
|
|
||||||
|
|
||||||
static PMALLOC g_pMalloc = NULL; /* allocator used */
|
static PMALLOC g_pMalloc = NULL; /* allocator used */
|
||||||
static PTTB g_pttb1 = NULL; /* pointer to TTB1 */
|
static VMCTXT g_vmctxtKernel = { /* kernel VM context */
|
||||||
static PTTBAUX g_pttb1Aux = NULL; /* pointer to TTB1 aux data */
|
.pTTB = NULL,
|
||||||
static RBTREE g_rbtPageTables; /* tree mapping page table PAs to KAs */
|
.pTTBAux = NULL,
|
||||||
|
.uiMaxIndex = SYS_TTB1_ENTRIES
|
||||||
/* Forward declaration. */
|
};
|
||||||
static HRESULT map_pages0(PTTB pTTB, PTTBAUX pTTBAux, PHYSADDR paBase, KERNADDR vmaBase, UINT32 cpg,
|
static RBTREE g_rbtFreePageTables; /* tree containing free page tables */
|
||||||
UINT32 uiTableFlags, UINT32 uiPageFlags, UINT32 uiAuxFlags);
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Maps a page table's page into kernel memory space where we can examine it.
|
* Resolves a given page table reference for a TTB entry within a VM context.
|
||||||
*
|
*
|
||||||
* Parameters:
|
* Parameters:
|
||||||
* - paPageTable = Physical address of the page table to map.
|
* - pvmctxt = Pointer to the VM context.
|
||||||
|
* - pTTBEntry = Pointer to the TTB entry containing the page table reference to resolve.
|
||||||
*
|
*
|
||||||
* Returns:
|
* Returns:
|
||||||
* Pointer to the pagetable in kernel memory, or NULL if we weren't able to map it.
|
* Pointer to the page table, or NULL if the reference could not be resolved.
|
||||||
*
|
|
||||||
* Side effects:
|
|
||||||
* May modify g_rbtPageTables, and may modify TTB1 if we map a page into memory. May allocate
|
|
||||||
* memory from g_pMalloc.
|
|
||||||
*/
|
*/
|
||||||
static PPAGETAB map_pagetable(PHYSADDR paPageTable)
|
static inline PPAGETAB resolve_pagetab(PVMCTXT pvmctxt, PTTB pTTBEntry)
|
||||||
{
|
{
|
||||||
register PHYSADDR paOfPage = paPageTable & ~(SYS_PAGE_SIZE - 1); /* actual page table page's PA */
|
register PPAGENODE ppgn = (PPAGENODE)RbtFind(&(pvmctxt->rbtPageTables), (TREEKEY)(pTTBEntry->data & TTBPGTBL_BASE));
|
||||||
register PPGTMAP ppgtmap;
|
return ppgn ? ppgn->ppt : NULL;
|
||||||
|
|
||||||
ppgtmap = (PPGTMAP)RbtFind(&g_rbtPageTables, (TREEKEY)paOfPage);
|
|
||||||
if (!ppgtmap)
|
|
||||||
{
|
|
||||||
ppgtmap = IMalloc_Alloc(g_pMalloc, sizeof(PGTMAP));
|
|
||||||
ppgtmap->kaPGTPage = _MmAllocKernelAddr(1);
|
|
||||||
ASSERT(ppgtmap->kaPGTPage);
|
|
||||||
if (SUCCEEDED(map_pages0(g_pttb1, g_pttb1Aux, paOfPage, ppgtmap->kaPGTPage, 1, TTBFLAGS_KERNEL_DATA,
|
|
||||||
PGTBLFLAGS_KERNEL_DATA, PGAUXFLAGS_KERNEL_DATA)))
|
|
||||||
{
|
|
||||||
ppgtmap->uiRefCount = 1;
|
|
||||||
rbtNewNode(&(ppgtmap->rbtn), paOfPage);
|
|
||||||
RbtInsert(&g_rbtPageTables, (PRBTREENODE)ppgtmap);
|
|
||||||
}
|
|
||||||
else
|
|
||||||
{
|
|
||||||
_MmFreeKernelAddr(ppgtmap->kaPGTPage, 1);
|
|
||||||
IMalloc_Free(g_pMalloc, ppgtmap);
|
|
||||||
return NULL;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
else
|
|
||||||
ppgtmap->uiRefCount++;
|
|
||||||
return (PPAGETAB)(ppgtmap->kaPGTPage | (paPageTable & (SYS_PAGE_SIZE - 1)));
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Forward declaration. */
|
|
||||||
static HRESULT demap_pages0(PTTB pTTB, PTTBAUX pTTBAux, KERNADDR vmaBase, UINT32 cpg, UINT32 uiFlags);
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Demaps a page table's page from kernel memory space.
|
|
||||||
*
|
|
||||||
* Parameters:
|
|
||||||
* - ppgtbl = Pointer to the page table.
|
|
||||||
*
|
|
||||||
* Returns:
|
|
||||||
* Nothing.
|
|
||||||
*
|
|
||||||
* Side effects:
|
|
||||||
* May modify g_rbtPageTables, and may modify TTB1 if we unmap a page from memory. May free
|
|
||||||
* memory in g_pMalloc.
|
|
||||||
*/
|
|
||||||
static void demap_pagetable(PPAGETAB ppgtbl)
|
|
||||||
{
|
|
||||||
register PHYSADDR paOfPage;
|
|
||||||
register PPGTMAP ppgtmap;
|
|
||||||
|
|
||||||
paOfPage = MmGetPhysAddr(g_pttb1, ((KERNADDR)ppgtbl) & ~(SYS_PAGE_SIZE - 1));
|
|
||||||
ppgtmap = (PPGTMAP)RbtFind(&g_rbtPageTables, (TREEKEY)paOfPage);
|
|
||||||
if (ppgtmap)
|
|
||||||
{
|
|
||||||
if (--(ppgtmap->uiRefCount) == 0)
|
|
||||||
{
|
|
||||||
RbtDelete(&g_rbtPageTables, (TREEKEY)paOfPage);
|
|
||||||
demap_pages0(g_pttb1, g_pttb1Aux, ppgtmap->kaPGTPage, 1, 0);
|
|
||||||
_MmFreeKernelAddr(ppgtmap->kaPGTPage, 1);
|
|
||||||
IMalloc_Free(g_pMalloc, ppgtmap);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Resolves a specified TTB to either itself or the global TTB1, depending on whether one was specified
|
* Resolves a specified VM context pointer to either itself or the kernel VM context, depending on whether one
|
||||||
* and on the virtual address to be worked with.
|
|
||||||
*
|
|
||||||
* Parameters:
|
|
||||||
* - pTTB = The specified TTB pointer.
|
|
||||||
* - vma = The base virtual address we're working with.
|
|
||||||
*
|
|
||||||
* Returns:
|
|
||||||
* The pointer to the selected TTB, which may be the global variable g_pttb1.
|
|
||||||
*/
|
|
||||||
static inline PTTB resolve_ttb(PTTB pTTB, KERNADDR vma)
|
|
||||||
{
|
|
||||||
if (!pTTB || (vma & 0x80000000))
|
|
||||||
return g_pttb1; /* if no TTB specified or address is out of range for TTB0, use TTB1 */
|
|
||||||
return pTTB;
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Resolves a specified TTB auxiliary table to either itself or the global TTB1Aux, depending on whether one
|
|
||||||
* was specified and on the virtual address to be worked with.
|
* was specified and on the virtual address to be worked with.
|
||||||
*
|
*
|
||||||
* Parameters:
|
* Parameters:
|
||||||
* - pTTBAux = The specified TTB aux table pointer.
|
* - pvmctxt = The specified VM context pointer.
|
||||||
* - vma = The base virtual address we're working with.
|
* - vma = The base virtual address we're working with.
|
||||||
*
|
*
|
||||||
* Returns:
|
* Returns:
|
||||||
* The pointer to the selected TTB aux table, which may be the global variable g_pttb1Aux.
|
* The pointer to the selected VM context, which may be to g_vmctxtKernel.
|
||||||
*/
|
*/
|
||||||
static inline PTTBAUX resolve_ttbaux(PTTBAUX pTTBAux, KERNADDR vma)
|
static inline PVMCTXT resolve_vmctxt(PVMCTXT pvmctxt, KERNADDR vma)
|
||||||
{
|
{
|
||||||
if (!pTTBAux || (vma & 0x80000000))
|
if (!pvmctxt || (vma & VMADDR_TTB_FENCE))
|
||||||
return g_pttb1Aux;
|
return &g_vmctxtKernel;
|
||||||
return pTTBAux;
|
return pvmctxt;
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Returns the physical address corresponding to a virtual memory address.
|
* Returns the physical address corresponding to a virtual memory address.
|
||||||
*
|
*
|
||||||
* Parameters:
|
* Parameters:
|
||||||
* - pTTB = The TTB to resolve the VM address against. If this is NULL or if the address specified
|
* - pvmctxt = The VM context to resolve the address against.
|
||||||
* is outside the TTB0 range, the system TTB is used.
|
* - vma = The virtual memory address to resolve.
|
||||||
*
|
*
|
||||||
* Returns:
|
* Returns:
|
||||||
* The physical address corresponding to the virtual memory address, or NULL if the address could
|
* The physical address corresponding to the virtual memory address, or NULL if the address could
|
||||||
* not be resolved (is not mapped, or page table could not be mapped).
|
* not be resolved (is not mapped, or page table could not be mapped).
|
||||||
*/
|
*/
|
||||||
PHYSADDR MmGetPhysAddr(PTTB pTTB, KERNADDR vma)
|
static PHYSADDR virt_to_phys(PVMCTXT pvmctxt, KERNADDR vma)
|
||||||
{
|
{
|
||||||
PTTB pTTBEntry = resolve_ttb(pTTB, vma) + mmVMA2TTBIndex(vma);
|
register PTTB pTTBEntry = pvmctxt->pTTB + mmVMA2TTBIndex(vma); /* TTB entry pointer */
|
||||||
PPAGETAB pTab;
|
register PPAGETAB pTab; /* page table pointer */
|
||||||
PHYSADDR rc;
|
|
||||||
|
|
||||||
if ((pTTBEntry->data & TTBQUERY_MASK) == TTBQUERY_FAULT)
|
if ((pTTBEntry->data & TTBQUERY_MASK) == TTBQUERY_FAULT)
|
||||||
return NULL; /* we're not allocated */
|
return NULL; /* we're not allocated */
|
||||||
if (pTTBEntry->data & TTBSEC_ALWAYS)
|
if (pTTBEntry->data & TTBSEC_ALWAYS)
|
||||||
return (pTTBEntry->data & TTBSEC_BASE) | (vma & ~TTBSEC_BASE); /* resolve section address */
|
return (pTTBEntry->data & TTBSEC_BASE) | (vma & ~TTBSEC_BASE); /* resolve section address */
|
||||||
|
|
||||||
pTab = map_pagetable(pTTBEntry->data & TTBPGTBL_BASE);
|
pTab = resolve_pagetab(pvmctxt, pTTBEntry);
|
||||||
if (!pTab)
|
if (!pTab)
|
||||||
return NULL; /* could not map the page table */
|
return NULL; /* could not map the page table */
|
||||||
rc = (pTab->pgtbl[mmVMA2PGTBLIndex(vma)].pg.pgaddr << SYS_PAGE_BITS) | (vma & (SYS_PAGE_SIZE - 1));
|
return (pTab->pgtbl[mmVMA2PGTBLIndex(vma)].pg.pgaddr << SYS_PAGE_BITS) | (vma & (SYS_PAGE_SIZE - 1));
|
||||||
demap_pagetable(pTab);
|
}
|
||||||
return rc;
|
|
||||||
|
/*
|
||||||
|
* Returns the physical address corresponding to a virtual memory address.
|
||||||
|
*
|
||||||
|
* Parameters:
|
||||||
|
* - pvmctxt = The VM context to resolve the address against. If this is NULL or the address specified
|
||||||
|
* is above the TTB0 fence, the kernel VM context is used.
|
||||||
|
* - vma = The virtual memory address to resolve.
|
||||||
|
*
|
||||||
|
* Returns:
|
||||||
|
* The physical address corresponding to the virtual memory address, or NULL if the address could
|
||||||
|
* not be resolved (is not mapped, or page table could not be mapped).
|
||||||
|
*/
|
||||||
|
PHYSADDR MmGetPhysAddr(PVMCTXT pvmctxt, KERNADDR vma)
|
||||||
|
{
|
||||||
|
return virt_to_phys(resolve_vmctxt(pvmctxt, vma), vma);
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Determines whether or not the specified page table is empty.
|
||||||
|
*
|
||||||
|
* Parameters:
|
||||||
|
* - ppgt = Pointer to the page table.
|
||||||
|
*
|
||||||
|
* Returns:
|
||||||
|
* TRUE if the page table is empty, FALSE otherwise.
|
||||||
|
*/
|
||||||
|
static BOOL is_pagetable_empty(PPAGETAB ppgt)
|
||||||
|
{
|
||||||
|
register UINT32 i; /* loop counter */
|
||||||
|
|
||||||
|
for (i = 0; i < SYS_PGTBL_ENTRIES; i++)
|
||||||
|
if ((ppgt->pgtbl[i].data & PGQUERY_MASK) != PGQUERY_FAULT)
|
||||||
|
return FALSE;
|
||||||
|
return TRUE;
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Free a page table by returning it to the free list.
|
||||||
|
*
|
||||||
|
* Parameters:
|
||||||
|
* - pvmctxt = Pointer to the VM context.
|
||||||
|
* - ppgt = Pointer to the page table to be freed.
|
||||||
|
*
|
||||||
|
* Returns:
|
||||||
|
* Nothing.
|
||||||
|
*
|
||||||
|
* Side effects:
|
||||||
|
* May modify the VM context's page-table tree and g_rbtFreePageTables.
|
||||||
|
*/
|
||||||
|
static void free_page_table(PVMCTXT pvmctxt, PPAGETAB ppgt)
|
||||||
|
{
|
||||||
|
PHYSADDR pa = virt_to_phys(pvmctxt, (KERNADDR)ppgt);
|
||||||
|
PPAGENODE ppgn = (PPAGENODE)RbtFind(&(pvmctxt->rbtPageTables), (TREEKEY)pa);
|
||||||
|
if (ppgn)
|
||||||
|
{
|
||||||
|
RbtDelete(&(pvmctxt->rbtPageTables), (TREEKEY)pa);
|
||||||
|
rbtNewNode(&(ppgn->rbtn), ppgn->rbtn.treekey);
|
||||||
|
RbtInsert(&g_rbtFreePageTables, (PRBTREENODE)ppgn);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Flags for demapping. */
|
/* Flags for demapping. */
|
||||||
|
@ -213,8 +188,9 @@ PHYSADDR MmGetPhysAddr(PTTB pTTB, KERNADDR vma)
|
||||||
* Deallocates page mapping entries within a single current entry in the TTB.
|
* Deallocates page mapping entries within a single current entry in the TTB.
|
||||||
*
|
*
|
||||||
* Parameters:
|
* Parameters:
|
||||||
* - pTTBEntry = Pointer to the TTB entry to deallocate in.
|
* - pvmctxt = Pointer to the VM context.
|
||||||
* - pTTBAuxEntry = Pointer to the TTB aux table entry to deallocate in.
|
* - vmaStart = The starting VMA of the region to demap.
|
||||||
|
* - ndxTTB = Index in the TTB that we're manipulating.
|
||||||
* - ndxPage = Starting index in the page table of the first entry to deallocate.
|
* - ndxPage = Starting index in the page table of the first entry to deallocate.
|
||||||
* - cpg = Count of the number of pages to deallocate. Note that this function will not deallocate more
|
* - cpg = Count of the number of pages to deallocate. Note that this function will not deallocate more
|
||||||
* page mapping entries than remain on the page, as indicated by ndxPage.
|
* page mapping entries than remain on the page, as indicated by ndxPage.
|
||||||
|
@ -228,87 +204,118 @@ PHYSADDR MmGetPhysAddr(PTTB pTTB, KERNADDR vma)
|
||||||
* May modify the TTB entry/aux entry pointed to, and the page table it points to, where applicable. If the
|
* May modify the TTB entry/aux entry pointed to, and the page table it points to, where applicable. If the
|
||||||
* page table is empty after we finish demapping entries, it may be deallocated.
|
* page table is empty after we finish demapping entries, it may be deallocated.
|
||||||
*/
|
*/
|
||||||
static HRESULT demap_pages1(PTTB pTTBEntry, PTTBAUX pTTBAuxEntry, UINT32 ndxPage, UINT32 cpg, UINT32 uiFlags)
|
static HRESULT demap_pages1(PVMCTXT pvmctxt, KERNADDR vmaStart, UINT32 ndxTTB, UINT32 ndxPage, UINT32 cpg,
|
||||||
|
UINT32 uiFlags)
|
||||||
{
|
{
|
||||||
UINT32 cpgCurrent; /* number of pages we're mapping */
|
UINT32 cpgCurrent; /* number of pages we're mapping */
|
||||||
PPAGETAB pTab = NULL; /* pointer to current or new page table */
|
PPAGETAB pTab = NULL; /* pointer to page table */
|
||||||
HRESULT hr; /* return from this function */
|
HRESULT hr; /* return from this function */
|
||||||
register INT32 i; /* loop counter */
|
register INT32 i; /* loop counter */
|
||||||
|
|
||||||
/* Figure out how many entries we're going to demap. */
|
/* Figure out how many entries we're going to demap. */
|
||||||
cpgCurrent = SYS_PGTBL_ENTRIES - ndxPage; /* total free slots on page */
|
cpgCurrent = SYS_PGTBL_ENTRIES - ndxPage; /* total free slots on page */
|
||||||
if (cpg < cpgCurrent)
|
if (cpg < cpgCurrent)
|
||||||
cpgCurrent = cpg; /* only map up to max requested */
|
cpgCurrent = cpg; /* only demap up to max requested */
|
||||||
hr = MAKE_SCODE(SEVERITY_SUCCESS, FACILITY_MEMMGR, cpgCurrent);
|
hr = MAKE_SCODE(SEVERITY_SUCCESS, FACILITY_MEMMGR, cpgCurrent);
|
||||||
|
|
||||||
if ((pTTBEntry->data & TTBSEC_ALWAYS) && (cpgCurrent == SYS_PGTBL_ENTRIES) && (ndxPage == 0))
|
if ((pvmctxt->pTTB[ndxTTB].data & TTBSEC_ALWAYS) && (cpgCurrent == SYS_PGTBL_ENTRIES) && (ndxPage == 0))
|
||||||
{ /* we can kill off the whole section */
|
{ /* we can kill off the whole section */
|
||||||
if (pTTBAuxEntry->aux.sacred && !(uiFlags & DEMAP_NOTHING_SACRED))
|
if (pvmctxt->pTTBAux[ndxTTB].aux.sacred && !(uiFlags & DEMAP_NOTHING_SACRED))
|
||||||
return MEMMGR_E_NOSACRED; /* can't demap a sacred mapping */
|
return MEMMGR_E_NOSACRED; /* can't demap a sacred mapping */
|
||||||
pTTBEntry->data = 0;
|
if (pvmctxt->pTTB[ndxTTB].sec.c)
|
||||||
pTTBAuxEntry->data = 0;
|
_MmFlushCacheForSection(vmaStart, !(pvmctxt->pTTBAux[ndxTTB].aux.unwriteable));
|
||||||
/* TODO: handle TLB and cache */
|
pvmctxt->pTTB[ndxTTB].data = 0;
|
||||||
|
pvmctxt->pTTBAux[ndxTTB].data = 0;
|
||||||
|
_MmFlushTLBForSection(vmaStart);
|
||||||
}
|
}
|
||||||
else if (pTTBEntry->data & TTBPGTBL_ALWAYS)
|
else if (pvmctxt->pTTB[ndxTTB].data & TTBPGTBL_ALWAYS)
|
||||||
{
|
{
|
||||||
pTab = map_pagetable(pTTBEntry->data & TTBPGTBL_BASE);
|
pTab = resolve_pagetab(pvmctxt, pvmctxt->pTTB + ndxTTB);
|
||||||
if (!pTab)
|
if (!pTab)
|
||||||
return MEMMGR_E_NOPGTBL;
|
return MEMMGR_E_NOPGTBL;
|
||||||
for (i = 0; i<cpgCurrent; i++)
|
for (i = 0; i<cpgCurrent; i++)
|
||||||
{
|
{
|
||||||
if (pTab->pgaux[ndxPage + i].aux.sacred && !(uiFlags & DEMAP_NOTHING_SACRED))
|
if (pTab->pgaux[ndxPage + i].aux.sacred && !(uiFlags & DEMAP_NOTHING_SACRED))
|
||||||
{ /* can't demap a sacred mapping */
|
return MEMMGR_E_NOSACRED; /* can't demap a sacred mapping */
|
||||||
hr = MEMMGR_E_NOSACRED;
|
|
||||||
goto pageError;
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
for (i = 0; i<cpgCurrent; i++)
|
for (i = 0; i<cpgCurrent; i++)
|
||||||
{
|
{
|
||||||
|
if (pTab->pgtbl[ndxPage + i].pg.c) /* only flush cache if cacheable */
|
||||||
|
_MmFlushCacheForPage(vmaStart, !(pTab->pgaux[ndxPage + i].aux.unwriteable));
|
||||||
pTab->pgtbl[ndxPage + i].data = 0;
|
pTab->pgtbl[ndxPage + i].data = 0;
|
||||||
pTab->pgaux[ndxPage + i].data = 0;
|
pTab->pgaux[ndxPage + i].data = 0;
|
||||||
/* TODO: handle TLB and cache */
|
_MmFlushTLBForPage(vmaStart);
|
||||||
|
vmaStart += SYS_PAGE_SIZE;
|
||||||
|
}
|
||||||
|
if (is_pagetable_empty(pTab))
|
||||||
|
{ /* The page table is now empty; demap it and put it on our free list. */
|
||||||
|
pvmctxt->pTTB[ndxTTB].data = 0;
|
||||||
|
pvmctxt->pTTBAux[ndxTTB].data = 0;
|
||||||
|
free_page_table(pvmctxt, pTab);
|
||||||
|
_MmFlushTLBForSection(mmIndices2VMA3(ndxTTB, 0, 0));
|
||||||
}
|
}
|
||||||
/* TODO: check to see if page table can be deallocated */
|
|
||||||
pageError:
|
|
||||||
demap_pagetable(pTab);
|
|
||||||
}
|
}
|
||||||
return hr;
|
return hr;
|
||||||
}
|
}
|
||||||
|
|
||||||
static HRESULT demap_pages0(PTTB pTTB, PTTBAUX pTTBAux, KERNADDR vmaBase, UINT32 cpg, UINT32 uiFlags)
|
/*
|
||||||
|
* Deallocates page mapping entries in the specified VM context.
|
||||||
|
*
|
||||||
|
* Parameters:
|
||||||
|
* - pvmctxt = Pointer to the VM context to use.
|
||||||
|
* - vmaBase = Base VM address of the region to demap.
|
||||||
|
* - cpg = Count of the number of pages of memory to demap.
|
||||||
|
* - uiFlags = Flags for operation.
|
||||||
|
*
|
||||||
|
* Returns:
|
||||||
|
* Standard HRESULT success/failure.
|
||||||
|
*/
|
||||||
|
static HRESULT demap_pages0(PVMCTXT pvmctxt, KERNADDR vmaBase, UINT32 cpg, UINT32 uiFlags)
|
||||||
{
|
{
|
||||||
UINT32 ndxTTBMax = (pTTB == g_pttb1) ? SYS_TTB1_ENTRIES : SYS_TTB0_ENTRIES;
|
|
||||||
UINT32 ndxTTB = mmVMA2TTBIndex(vmaBase); /* TTB entry index */
|
UINT32 ndxTTB = mmVMA2TTBIndex(vmaBase); /* TTB entry index */
|
||||||
UINT32 ndxPage = mmVMA2PGTBLIndex(vmaBase); /* starting page entry index */
|
UINT32 ndxPage = mmVMA2PGTBLIndex(vmaBase); /* starting page entry index */
|
||||||
UINT32 cpgRemaining = cpg; /* number of pages remaining to demap */
|
UINT32 cpgRemaining = cpg; /* number of pages remaining to demap */
|
||||||
HRESULT hr; /* temporary result */
|
HRESULT hr; /* temporary result */
|
||||||
|
|
||||||
if ((cpgRemaining > 0) && (ndxPage > 0))
|
if ((cpgRemaining > 0) && (ndxPage > 0))
|
||||||
{
|
{ /* We are starting in the middle of a VM page. Demap to the end of the VM page. */
|
||||||
/* We are starting in the middle of a VM page. Demap to the end of the VM page. */
|
hr = demap_pages1(pvmctxt, vmaBase, ndxTTB, ndxPage, cpgRemaining, uiFlags);
|
||||||
hr = demap_pages1(pTTB + ndxTTB, pTTBAux + ndxTTB, ndxPage, cpgRemaining, uiFlags);
|
|
||||||
if (FAILED(hr))
|
if (FAILED(hr))
|
||||||
return hr;
|
return hr;
|
||||||
cpgRemaining -= SCODE_CODE(hr);
|
cpgRemaining -= SCODE_CODE(hr);
|
||||||
if (++ndxTTB == ndxTTBMax)
|
if (++ndxTTB == pvmctxt->uiMaxIndex)
|
||||||
return MEMMGR_E_ENDTTB;
|
return MEMMGR_E_ENDTTB;
|
||||||
|
vmaBase = mmIndices2VMA3(ndxTTB, 0, 0);
|
||||||
}
|
}
|
||||||
|
|
||||||
while (cpgRemaining > 0)
|
while (cpgRemaining > 0)
|
||||||
{
|
{
|
||||||
hr = demap_pages1(pTTB + ndxTTB, pTTBAux + ndxTTB, 0, cpgRemaining, uiFlags);
|
hr = demap_pages1(pvmctxt, vmaBase, ndxTTB, 0, cpgRemaining, uiFlags);
|
||||||
if (FAILED(hr))
|
if (FAILED(hr))
|
||||||
return hr;
|
return hr;
|
||||||
cpgRemaining -= SCODE_CODE(hr);
|
cpgRemaining -= SCODE_CODE(hr);
|
||||||
if (++ndxTTB == ndxTTBMax)
|
if (++ndxTTB == pvmctxt->uiMaxIndex)
|
||||||
return MEMMGR_E_ENDTTB;
|
return MEMMGR_E_ENDTTB;
|
||||||
|
vmaBase += SYS_SEC_SIZE;
|
||||||
}
|
}
|
||||||
return S_OK;
|
return S_OK;
|
||||||
}
|
}
|
||||||
|
|
||||||
HRESULT MmDemapPages(PTTB pTTB, PTTBAUX pTTBAux, KERNADDR vmaBase, UINT32 cpg)
|
/*
|
||||||
|
* Deallocates page mapping entries in the specified VM context.
|
||||||
|
*
|
||||||
|
* Parameters:
|
||||||
|
* - pvmctxt = Pointer to the VM context to use. If this is NULL or the vmaBase address specified is
|
||||||
|
* above the TTB0 fence, the kernel VM context is used.
|
||||||
|
* - vmaBase = Base VM address of the region to demap.
|
||||||
|
* - cpg = Count of the number of pages of memory to demap.
|
||||||
|
*
|
||||||
|
* Returns:
|
||||||
|
* Standard HRESULT success/failure.
|
||||||
|
*/
|
||||||
|
HRESULT MmDemapPages(PVMCTXT pvmctxt, KERNADDR vmaBase, UINT32 cpg)
|
||||||
{
|
{
|
||||||
return demap_pages0(resolve_ttb(pTTB, vmaBase), resolve_ttbaux(pTTBAux, vmaBase), vmaBase, cpg, 0);
|
return demap_pages0(resolve_vmctxt(pvmctxt, vmaBase), vmaBase, cpg, 0);
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -352,51 +359,155 @@ static UINT32 make_section_flags(UINT32 uiTableFlags, UINT32 uiPageFlags)
|
||||||
*/
|
*/
|
||||||
static UINT32 make_section_aux_flags(UINT32 uiPageAuxFlags)
|
static UINT32 make_section_aux_flags(UINT32 uiPageAuxFlags)
|
||||||
{
|
{
|
||||||
register UINT32 rc = uiPageAuxFlags & (PGAUX_SACRED);
|
register UINT32 rc = uiPageAuxFlags & (PGAUX_SACRED|PGAUX_UNWRITEABLE);
|
||||||
/* TODO if we define any other flags */
|
/* TODO if we define any other flags */
|
||||||
return rc;
|
return rc;
|
||||||
}
|
}
|
||||||
|
|
||||||
static PPAGETAB alloc_page_table(PTTB pttbEntry, PTTBAUX pttbAuxEntry, UINT32 uiTableFlags)
|
/* Flags for mapping. */
|
||||||
{
|
#define MAP_DONT_ALLOC 0x00000001 /* don't try to allocate new page tables */
|
||||||
PPAGETAB pTab = NULL; /* new page table pointer */
|
|
||||||
register INT32 i; /* loop counter */
|
|
||||||
|
|
||||||
/* TODO: pull pTab out of our ass somewhere */
|
/* Forward declaration. */
|
||||||
if (pTab)
|
static HRESULT map_pages0(PVMCTXT pvmctxt, PHYSADDR paBase, KERNADDR vmaBase, UINT32 cpg, UINT32 uiTableFlags,
|
||||||
|
UINT32 uiPageFlags, UINT32 uiAuxFlags, UINT32 uiFlags);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Allocates a new page table and associates it with the given TTB entry.
|
||||||
|
*
|
||||||
|
* Parameters:
|
||||||
|
* - pvmctxt = Pointer to the VM context.
|
||||||
|
* - pttbEntry = Pointer to the TTB entry. On successful return, this will be updated.
|
||||||
|
* - pttbAuxEntry = Pointer to the TTB auxiliary table entry. On successful return, this will be updated.
|
||||||
|
* - uiTableFlags = Flags to apply to the TTB entry.
|
||||||
|
* - uiFlags = Flags for the mapping operation.
|
||||||
|
* - pppt = Pointer to variable to receive new page table pointer.
|
||||||
|
*
|
||||||
|
* Returns:
|
||||||
|
* Standard HRESULT success/failure.
|
||||||
|
*
|
||||||
|
* Side effects:
|
||||||
|
* The new page table is erased before it is returned. May modify the VM context's page-table tree and
|
||||||
|
* g_rbtFreePageTables. May also allocate a new page of memory.
|
||||||
|
*/
|
||||||
|
static HRESULT alloc_page_table(PVMCTXT pvmctxt, PTTB pttbEntry, PTTBAUX pttbAuxEntry, UINT32 uiTableFlags,
|
||||||
|
UINT32 uiFlags, PPAGETAB *pppt)
|
||||||
{
|
{
|
||||||
for (i=0; i<SYS_PGTBL_ENTRIES; i++)
|
register PPAGENODE ppgn = NULL; /* page node pointer */
|
||||||
|
PPAGENODE ppgnFree; /* additional pointer for new "free" entry */
|
||||||
|
HRESULT hr = S_OK; /* return from this function */
|
||||||
|
PHYSADDR paNewPage = 0; /* physical address of new page */
|
||||||
|
KERNADDR kaNewPage = 0; /* kernel address of new page */
|
||||||
|
|
||||||
|
if (rbtIsEmpty(&g_rbtFreePageTables))
|
||||||
{
|
{
|
||||||
pTab->pgtbl[i].data = 0; /* blank out the new page table */
|
if (!(uiFlags & MAP_DONT_ALLOC))
|
||||||
pTab->pgaux[i].data = 0;
|
{
|
||||||
|
/* TODO: pull a new page out of our ass and assign its PA to paNewPage */
|
||||||
|
if (paNewPage)
|
||||||
|
{ /* allocate kernel addresses to map it into */
|
||||||
|
kaNewPage = _MmAllocKernelAddr(1);
|
||||||
|
if (kaNewPage)
|
||||||
|
{ /* map the new page in */
|
||||||
|
hr = map_pages0(pvmctxt, paNewPage, kaNewPage, 1,TTBFLAGS_KERNEL_DATA, PGTBLFLAGS_KERNEL_DATA,
|
||||||
|
PGAUXFLAGS_KERNEL_DATA, MAP_DONT_ALLOC);
|
||||||
|
if (SUCCEEDED(hr))
|
||||||
|
{ /* allocate heap memory for two nodes to describe the page tables */
|
||||||
|
ppgnFree = IMalloc_Alloc(g_pMalloc, sizeof(PAGENODE));
|
||||||
|
if (ppgnFree)
|
||||||
|
ppgn = IMalloc_Alloc(g_pMalloc, sizeof(PAGENODE));
|
||||||
|
if (ppgnFree && ppgn)
|
||||||
|
{ /* prepare the new nodes and insert them in their respective trees */
|
||||||
|
rbtNewNode(&(ppgnFree->rbtn), paNewPage + sizeof(PAGETAB));
|
||||||
|
ppgnFree->ppt = ((PPAGETAB)kaNewPage) + 1;
|
||||||
|
RbtInsert(&g_rbtFreePageTables, (PRBTREENODE)ppgnFree);
|
||||||
|
rbtNewNode(&(ppgn->rbtn), paNewPage);
|
||||||
|
ppgn->ppt = (PPAGETAB)kaNewPage;
|
||||||
|
RbtInsert(&(pvmctxt->rbtPageTables), (PRBTREENODE)ppgn);
|
||||||
}
|
}
|
||||||
/* TODO: use physical address of page here */
|
else
|
||||||
pttbEntry->data = MmGetPhysAddr(g_pttb1, (KERNADDR)pTab) | uiTableFlags; /* poke new entry */
|
{ /* could not allocate both, free one if was allocated */
|
||||||
|
if (ppgnFree)
|
||||||
|
IMalloc_Free(g_pMalloc, ppgnFree);
|
||||||
|
hr = E_OUTOFMEMORY;
|
||||||
|
}
|
||||||
|
if (FAILED(hr))
|
||||||
|
demap_pages0(pvmctxt, kaNewPage, 1, 0);
|
||||||
|
}
|
||||||
|
if (FAILED(hr))
|
||||||
|
_MmFreeKernelAddr(kaNewPage, 1);
|
||||||
|
}
|
||||||
|
else
|
||||||
|
hr = MEMMGR_E_NOKERNSPC; /* no kernel space available */
|
||||||
|
}
|
||||||
|
else
|
||||||
|
hr = E_OUTOFMEMORY; /* no memory to allocate new page table */
|
||||||
|
}
|
||||||
|
else
|
||||||
|
hr = MEMMGR_E_RECURSED; /* recursive entry */
|
||||||
|
}
|
||||||
|
else
|
||||||
|
{ /* get the first item out of the free-pages tree and reinsert it into the current VM context */
|
||||||
|
ppgn = (PPAGENODE)RbtFindMin(&g_rbtFreePageTables);
|
||||||
|
RbtDelete(&g_rbtFreePageTables, ppgn->rbtn.treekey);
|
||||||
|
rbtNewNode(&(ppgn->rbtn), ppgn->rbtn.treekey);
|
||||||
|
RbtInsert(&(pvmctxt->rbtPageTables), (PRBTREENODE)ppgn);
|
||||||
|
}
|
||||||
|
|
||||||
|
if (SUCCEEDED(hr))
|
||||||
|
{ /* prepare new page table and insert it into the TTB */
|
||||||
|
StrSetMem(ppgn->ppt, 0, sizeof(PAGETAB));
|
||||||
|
pttbEntry->data = (PHYSADDR)(ppgn->rbtn.treekey) | uiTableFlags; /* poke new entry */
|
||||||
pttbAuxEntry->data = TTBAUXFLAGS_PAGETABLE;
|
pttbAuxEntry->data = TTBAUXFLAGS_PAGETABLE;
|
||||||
|
*pppt = ppgn->ppt;
|
||||||
}
|
}
|
||||||
return pTab;
|
else
|
||||||
|
*pppt = NULL;
|
||||||
|
return hr;
|
||||||
}
|
}
|
||||||
|
|
||||||
static HRESULT map_pages1(PTTB pttbEntry, PTTBAUX pttbAuxEntry, PHYSADDR paBase, UINT32 ndxPage, UINT32 cpg,
|
/*
|
||||||
UINT32 uiTableFlags, UINT32 uiPageFlags, UINT32 uiAuxFlags)
|
* Maps pages in the specified VM context within a single TTB entry.
|
||||||
|
*
|
||||||
|
* Parameters:
|
||||||
|
* - pvmctxt = Pointer to the VM context.
|
||||||
|
* - paBase = Base physical address to be mapped.
|
||||||
|
* - ndxTTB = Index in the TTB that we're manipulating.
|
||||||
|
* - ndxPage = Starting index in the page table of the first entry to allocate.
|
||||||
|
* - cpg = Count of the number of pages to allocate. Note that this function will not allocate more
|
||||||
|
* page mapping entries than remain on the page, as indicated by ndxPage.
|
||||||
|
* - uiTableFlags = TTB-level flags to use for the page table entry.
|
||||||
|
* - uiPageFlags = Page-level flags to use for the page table entry.
|
||||||
|
* - uiAuxFlags = Auxiliary data flags to use for the page table entry.
|
||||||
|
* - uiFlags = Flags for the mapping operation.
|
||||||
|
*
|
||||||
|
* Returns:
|
||||||
|
* Standard HRESULT success/failure. If the result is successful, the SCODE_CODE of the result will
|
||||||
|
* indicate the number of pages actually deallocated.
|
||||||
|
*
|
||||||
|
* Side effects:
|
||||||
|
* May modify the TTB entry/aux entry pointed to, and the page table it points to, where applicable. May
|
||||||
|
* also allocate a new page table, which may modify other data structures.
|
||||||
|
*/
|
||||||
|
static HRESULT map_pages1(PVMCTXT pvmctxt, PHYSADDR paBase, UINT32 ndxTTB, UINT32 ndxPage,
|
||||||
|
UINT32 cpg, UINT32 uiTableFlags, UINT32 uiPageFlags, UINT32 uiAuxFlags, UINT32 uiFlags)
|
||||||
{
|
{
|
||||||
UINT32 cpgCurrent; /* number of pages we're mapping */
|
UINT32 cpgCurrent; /* number of pages we're mapping */
|
||||||
PPAGETAB pTab = NULL; /* pointer to current or new page table */
|
PPAGETAB pTab = NULL; /* pointer to current or new page table */
|
||||||
HRESULT hr; /* return from this function */
|
HRESULT hr; /* return from this function */
|
||||||
register INT32 i; /* loop counter */
|
register INT32 i; /* loop counter */
|
||||||
|
|
||||||
switch (pttbEntry->data & TTBQUERY_MASK)
|
switch (pvmctxt->pTTB[ndxTTB].data & TTBQUERY_MASK)
|
||||||
{
|
{
|
||||||
case TTBQUERY_FAULT: /* not allocated, allocate a new page table for the slot */
|
case TTBQUERY_FAULT: /* not allocated, allocate a new page table for the slot */
|
||||||
pTab = alloc_page_table(pttbEntry, pttbAuxEntry, uiTableFlags);
|
hr = alloc_page_table(pvmctxt, pvmctxt->pTTB + ndxTTB, pvmctxt->pTTBAux + ndxTTB, uiTableFlags, uiFlags, &pTab);
|
||||||
if (!pTab)
|
if (FAILED(hr))
|
||||||
return MEMMGR_E_NOPGTBL;
|
return hr;
|
||||||
break;
|
break;
|
||||||
|
|
||||||
case TTBQUERY_PGTBL: /* existing page table */
|
case TTBQUERY_PGTBL: /* existing page table */
|
||||||
if ((pttbEntry->data & TTBPGTBL_ALLFLAGS) != uiTableFlags)
|
if ((pvmctxt->pTTB[ndxTTB].data & TTBPGTBL_ALLFLAGS) != uiTableFlags)
|
||||||
return MEMMGR_E_BADTTBFLG; /* table flags not compatible */
|
return MEMMGR_E_BADTTBFLG; /* table flags not compatible */
|
||||||
pTab = map_pagetable(pttbEntry->data & TTBPGTBL_BASE);
|
pTab = resolve_pagetab(pvmctxt, pvmctxt->pTTB + ndxTTB);
|
||||||
if (!pTab)
|
if (!pTab)
|
||||||
return MEMMGR_E_NOPGTBL; /* could not map the page table */
|
return MEMMGR_E_NOPGTBL; /* could not map the page table */
|
||||||
break;
|
break;
|
||||||
|
@ -404,11 +515,11 @@ static HRESULT map_pages1(PTTB pttbEntry, PTTBAUX pttbAuxEntry, PHYSADDR paBase,
|
||||||
case TTBQUERY_SEC:
|
case TTBQUERY_SEC:
|
||||||
case TTBQUERY_PXNSEC:
|
case TTBQUERY_PXNSEC:
|
||||||
/* this is a section, make sure its base address covers this mapping and its flags are compatible */
|
/* this is a section, make sure its base address covers this mapping and its flags are compatible */
|
||||||
if ((pttbEntry->data & TTBSEC_ALLFLAGS) != make_section_flags(uiTableFlags, uiPageFlags))
|
if ((pvmctxt->pTTB[ndxTTB].data & TTBSEC_ALLFLAGS) != make_section_flags(uiTableFlags, uiPageFlags))
|
||||||
return MEMMGR_E_BADTTBFLG;
|
return MEMMGR_E_BADTTBFLG;
|
||||||
if (pttbAuxEntry->data != make_section_aux_flags(uiAuxFlags))
|
if (pvmctxt->pTTBAux[ndxTTB].data != make_section_aux_flags(uiAuxFlags))
|
||||||
return MEMMGR_E_BADTTBFLG;
|
return MEMMGR_E_BADTTBFLG;
|
||||||
if ((pttbEntry->data & TTBSEC_BASE) != (paBase & TTBSEC_BASE))
|
if ((pvmctxt->pTTB[ndxTTB].data & TTBSEC_BASE) != (paBase & TTBSEC_BASE))
|
||||||
return MEMMGR_E_COLLIDED;
|
return MEMMGR_E_COLLIDED;
|
||||||
pTab = NULL;
|
pTab = NULL;
|
||||||
break;
|
break;
|
||||||
|
@ -440,65 +551,81 @@ static HRESULT map_pages1(PTTB pttbEntry, PTTBAUX pttbAuxEntry, PHYSADDR paBase,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
exit:
|
exit:
|
||||||
demap_pagetable(pTab);
|
|
||||||
return hr;
|
return hr;
|
||||||
}
|
}
|
||||||
|
|
||||||
static HRESULT map_pages0(PTTB pTTB, PTTBAUX pTTBAux, PHYSADDR paBase, KERNADDR vmaBase, UINT32 cpg,
|
/*
|
||||||
UINT32 uiTableFlags, UINT32 uiPageFlags, UINT32 uiAuxFlags)
|
* Maps pages in the specified VM context.
|
||||||
|
*
|
||||||
|
* Parameters:
|
||||||
|
* - pvmctxt = Pointer to the VM context.
|
||||||
|
* - paBase = Base physical address to be mapped.
|
||||||
|
* - vmaBase = Base virtual address to be mapped.
|
||||||
|
* - cpg = Count of the number of pages to map.
|
||||||
|
* - uiTableFlags = TTB-level flags to use for the page table entry.
|
||||||
|
* - uiPageFlags = Page-level flags to use for the page table entry.
|
||||||
|
* - uiAuxFlags = Auxiliary data flags to use for the page table entry.
|
||||||
|
* - uiFlags = Flags for the mapping operation.
|
||||||
|
*
|
||||||
|
* Returns:
|
||||||
|
* Standard HRESULT success/failure.
|
||||||
|
*/
|
||||||
|
static HRESULT map_pages0(PVMCTXT pvmctxt, PHYSADDR paBase, KERNADDR vmaBase, UINT32 cpg, UINT32 uiTableFlags,
|
||||||
|
UINT32 uiPageFlags, UINT32 uiAuxFlags, UINT32 uiFlags)
|
||||||
{
|
{
|
||||||
UINT32 ndxTTBMax = (pTTB == g_pttb1) ? SYS_TTB1_ENTRIES : SYS_TTB0_ENTRIES;
|
|
||||||
UINT32 ndxTTB = mmVMA2TTBIndex(vmaBase); /* TTB entry index */
|
UINT32 ndxTTB = mmVMA2TTBIndex(vmaBase); /* TTB entry index */
|
||||||
UINT32 ndxPage = mmVMA2PGTBLIndex(vmaBase); /* starting page entry index */
|
UINT32 ndxPage = mmVMA2PGTBLIndex(vmaBase); /* starting page entry index */
|
||||||
UINT32 cpgRemaining = cpg; /* number of pages remaining to map */
|
UINT32 cpgRemaining = cpg; /* number of pages remaining to map */
|
||||||
|
BOOL bCanMapBySection; /* can we map by section? */
|
||||||
|
UINT32 uiSecFlags = 0; /* section flags */
|
||||||
|
UINT32 uiSecAuxFlags = 0; /* section auxiliary flags */
|
||||||
HRESULT hr; /* temporary result */
|
HRESULT hr; /* temporary result */
|
||||||
|
|
||||||
if ((cpgRemaining > 0) && (ndxPage > 0))
|
if ((cpgRemaining > 0) && (ndxPage > 0))
|
||||||
{
|
{
|
||||||
/* We are starting in the middle of a VM page. Map to the end of the VM page. */
|
/* We are starting in the middle of a VM page. Map to the end of the VM page. */
|
||||||
hr = map_pages1(pTTB + ndxTTB, pTTBAux + ndxTTB, paBase, ndxPage, cpgRemaining, uiTableFlags,
|
hr = map_pages1(pvmctxt, paBase, ndxTTB, ndxPage, cpgRemaining, uiTableFlags, uiPageFlags, uiAuxFlags, uiFlags);
|
||||||
uiPageFlags, uiAuxFlags);
|
|
||||||
if (FAILED(hr))
|
if (FAILED(hr))
|
||||||
return hr;
|
return hr;
|
||||||
cpgRemaining -= SCODE_CODE(hr);
|
cpgRemaining -= SCODE_CODE(hr);
|
||||||
paBase += (SCODE_CODE(hr) << SYS_PAGE_BITS);
|
paBase += (SCODE_CODE(hr) << SYS_PAGE_BITS);
|
||||||
if (++ndxTTB == ndxTTBMax)
|
if (++ndxTTB == pvmctxt->uiMaxIndex)
|
||||||
{
|
{
|
||||||
hr = MEMMGR_E_ENDTTB;
|
hr = MEMMGR_E_ENDTTB;
|
||||||
goto errorExit;
|
goto errorExit;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
bCanMapBySection = MAKEBOOL((cpgRemaining >= SYS_PGTBL_ENTRIES) && ((paBase & TTBSEC_BASE) == paBase));
|
||||||
|
if (bCanMapBySection)
|
||||||
|
{
|
||||||
|
uiSecFlags = make_section_flags(uiTableFlags, uiPageFlags);
|
||||||
|
uiSecAuxFlags = make_section_aux_flags(uiAuxFlags);
|
||||||
|
}
|
||||||
|
|
||||||
while (cpgRemaining >= SYS_PGTBL_ENTRIES)
|
while (cpgRemaining >= SYS_PGTBL_ENTRIES)
|
||||||
{
|
{ /* try to map a whole section's worth at a time */
|
||||||
/* try to map a whole section's worth at a time */
|
if (bCanMapBySection)
|
||||||
if ((paBase & TTBSEC_BASE) == paBase)
|
{ /* paBase is section-aligned now as well, we can use a direct 1Mb section mapping */
|
||||||
{
|
switch (pvmctxt->pTTB[ndxTTB].data & TTBQUERY_MASK)
|
||||||
/* paBase is section-aligned now as well, we can use a direct 1Mb section mapping */
|
|
||||||
switch (pTTB[ndxTTB].data & TTBQUERY_MASK)
|
|
||||||
{
|
{
|
||||||
case TTBQUERY_FAULT: /* unmapped - map the section */
|
case TTBQUERY_FAULT: /* unmapped - map the section */
|
||||||
pTTB[ndxTTB].data = paBase | make_section_flags(uiTableFlags, uiPageFlags);
|
pvmctxt->pTTB[ndxTTB].data = paBase | uiSecFlags;
|
||||||
pTTBAux[ndxTTB].data = make_section_aux_flags(uiAuxFlags);
|
pvmctxt->pTTBAux[ndxTTB].data = uiSecAuxFlags;
|
||||||
break;
|
break;
|
||||||
|
|
||||||
case TTBQUERY_PGTBL: /* collided with a page table */
|
case TTBQUERY_PGTBL: /* page table here */
|
||||||
hr = MEMMGR_E_COLLIDED;
|
goto pageTableFallback;
|
||||||
goto errorExit;
|
|
||||||
|
|
||||||
case TTBQUERY_SEC: /* test existing section */
|
case TTBQUERY_SEC: /* test existing section */
|
||||||
case TTBQUERY_PXNSEC:
|
case TTBQUERY_PXNSEC:
|
||||||
if ((pTTB[ndxTTB].data & TTBSEC_ALLFLAGS) != make_section_flags(uiTableFlags, uiPageFlags))
|
if ( ((pvmctxt->pTTB[ndxTTB].data & TTBSEC_ALLFLAGS) != uiSecFlags)
|
||||||
|
|| (pvmctxt->pTTBAux[ndxTTB].data != uiSecAuxFlags))
|
||||||
{
|
{
|
||||||
hr = MEMMGR_E_BADTTBFLG;
|
hr = MEMMGR_E_BADTTBFLG;
|
||||||
goto errorExit;
|
goto errorExit;
|
||||||
}
|
}
|
||||||
if (pTTBAux[ndxTTB].data != make_section_aux_flags(uiAuxFlags))
|
if ((pvmctxt->pTTB[ndxTTB].data & TTBSEC_BASE) != paBase)
|
||||||
{
|
|
||||||
hr = MEMMGR_E_BADTTBFLG;
|
|
||||||
goto errorExit;
|
|
||||||
}
|
|
||||||
if ((pTTB[ndxTTB].data & TTBSEC_BASE) != paBase)
|
|
||||||
{
|
{
|
||||||
hr = MEMMGR_E_COLLIDED;
|
hr = MEMMGR_E_COLLIDED;
|
||||||
goto errorExit;
|
goto errorExit;
|
||||||
|
@ -511,14 +638,15 @@ static HRESULT map_pages0(PTTB pTTB, PTTBAUX pTTBAux, PHYSADDR paBase, KERNADDR
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
/* just map 256 individual pages */
|
/* just map 256 individual pages */
|
||||||
hr = map_pages1(pTTB + ndxTTB, pTTBAux + ndxTTB, paBase, 0, cpgRemaining, uiTableFlags, uiPageFlags, uiAuxFlags);
|
pageTableFallback:
|
||||||
|
hr = map_pages1(pvmctxt, paBase, ndxTTB, 0, cpgRemaining, uiTableFlags, uiPageFlags, uiAuxFlags, uiFlags);
|
||||||
if (FAILED(hr))
|
if (FAILED(hr))
|
||||||
goto errorExit;
|
goto errorExit;
|
||||||
}
|
}
|
||||||
/* adjust base physical address, page count, and TTB index */
|
/* adjust base physical address, page count, and TTB index */
|
||||||
paBase += (SCODE_CODE(hr) << SYS_PAGE_BITS);
|
paBase += (SCODE_CODE(hr) << SYS_PAGE_BITS);
|
||||||
cpgRemaining -= SCODE_CODE(hr);
|
cpgRemaining -= SCODE_CODE(hr);
|
||||||
if (++ndxTTB == ndxTTBMax)
|
if (++ndxTTB == pvmctxt->uiMaxIndex)
|
||||||
{
|
{
|
||||||
hr = MEMMGR_E_ENDTTB;
|
hr = MEMMGR_E_ENDTTB;
|
||||||
goto errorExit;
|
goto errorExit;
|
||||||
|
@ -526,37 +654,65 @@ static HRESULT map_pages0(PTTB pTTB, PTTBAUX pTTBAux, PHYSADDR paBase, KERNADDR
|
||||||
}
|
}
|
||||||
|
|
||||||
if (cpgRemaining > 0)
|
if (cpgRemaining > 0)
|
||||||
{
|
{ /* map the "tail end" onto the next TTB */
|
||||||
/* map the "tail end" onto the next TTB */
|
hr = map_pages1(pvmctxt, paBase, ndxTTB, 0, cpgRemaining, uiTableFlags, uiPageFlags, uiAuxFlags, uiFlags);
|
||||||
hr = map_pages1(pTTB + ndxTTB, pTTBAux + ndxTTB, paBase, 0, cpgRemaining, uiTableFlags, uiPageFlags, uiAuxFlags);
|
|
||||||
if (FAILED(hr))
|
if (FAILED(hr))
|
||||||
goto errorExit;
|
goto errorExit;
|
||||||
}
|
}
|
||||||
return S_OK;
|
return S_OK;
|
||||||
errorExit:
|
errorExit:
|
||||||
/* demap everything we've managed to map thusfar */
|
/* demap everything we've managed to map thusfar */
|
||||||
demap_pages0(pTTB, pTTBAux, vmaBase, cpg - cpgRemaining, DEMAP_NOTHING_SACRED);
|
demap_pages0(pvmctxt, vmaBase, cpg - cpgRemaining, DEMAP_NOTHING_SACRED);
|
||||||
return hr;
|
return hr;
|
||||||
}
|
}
|
||||||
|
|
||||||
HRESULT MmMapPages(PTTB pTTB, PTTBAUX pTTBAux, PHYSADDR paBase, KERNADDR vmaBase, UINT32 cpg, UINT32 uiTableFlags,
|
/*
|
||||||
|
* Maps pages in the specified VM context.
|
||||||
|
*
|
||||||
|
* Parameters:
|
||||||
|
* - pvmctxt = Pointer to the VM context to use. If this is NULL or the vmaBase address specified is
|
||||||
|
* above the TTB0 fence, the kernel VM context is used.
|
||||||
|
* - paBase = Base physical address to be mapped.
|
||||||
|
* - vmaBase = Base virtual address to be mapped.
|
||||||
|
* - cpg = Count of the number of pages to map.
|
||||||
|
* - uiTableFlags = TTB-level flags to use for the page table entry.
|
||||||
|
* - uiPageFlags = Page-level flags to use for the page table entry.
|
||||||
|
* - uiAuxFlags = Auxiliary data flags to use for the page table entry.
|
||||||
|
*
|
||||||
|
* Returns:
|
||||||
|
* Standard HRESULT success/failure.
|
||||||
|
*/
|
||||||
|
HRESULT MmMapPages(PVMCTXT pvmctxt, PHYSADDR paBase, KERNADDR vmaBase, UINT32 cpg, UINT32 uiTableFlags,
|
||||||
UINT32 uiPageFlags, UINT32 uiAuxFlags)
|
UINT32 uiPageFlags, UINT32 uiAuxFlags)
|
||||||
{
|
{
|
||||||
return map_pages0(resolve_ttb(pTTB, vmaBase), resolve_ttbaux(pTTBAux, vmaBase), paBase, vmaBase, cpg,
|
return map_pages0(resolve_vmctxt(pvmctxt, vmaBase), paBase, vmaBase, cpg, uiTableFlags, uiPageFlags, uiAuxFlags, 0);
|
||||||
uiTableFlags, uiPageFlags, uiAuxFlags);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
HRESULT MmMapKernelPages(PTTB pTTB, PTTBAUX pTTBAux, PHYSADDR paBase, UINT32 cpg, UINT32 uiTableFlags,
|
/*
|
||||||
|
* Maps pages into the kernel address space. The mapping is done in the kernel VM context.
|
||||||
|
*
|
||||||
|
* Parameters:
|
||||||
|
* - paBase = Base physical address to be mapped.
|
||||||
|
* - cpg = Count of the number of pages to map.
|
||||||
|
* - uiTableFlags = TTB-level flags to use for the page table entry.
|
||||||
|
* - uiPageFlags = Page-level flags to use for the page table entry.
|
||||||
|
* - uiAuxFlags = Auxiliary data flags to use for the page table entry.
|
||||||
|
* - pvmaLocation = Pointer to a variable which will receive the VM address of the mapped pages.
|
||||||
|
*
|
||||||
|
* Returns:
|
||||||
|
* Standard HRESULT success/failure.
|
||||||
|
*/
|
||||||
|
HRESULT MmMapKernelPages(PHYSADDR paBase, UINT32 cpg, UINT32 uiTableFlags,
|
||||||
UINT32 uiPageFlags, UINT32 uiAuxFlags, PKERNADDR pvmaLocation)
|
UINT32 uiPageFlags, UINT32 uiAuxFlags, PKERNADDR pvmaLocation)
|
||||||
{
|
{
|
||||||
register HRESULT hr;
|
register HRESULT hr; /* return from this function */
|
||||||
|
|
||||||
if (!pvmaLocation)
|
if (!pvmaLocation)
|
||||||
return E_POINTER;
|
return E_POINTER;
|
||||||
*pvmaLocation = _MmAllocKernelAddr(cpg);
|
*pvmaLocation = _MmAllocKernelAddr(cpg);
|
||||||
if (!(*pvmaLocation))
|
if (!(*pvmaLocation))
|
||||||
return MEMMGR_E_NOKERNSPC;
|
return MEMMGR_E_NOKERNSPC;
|
||||||
hr = MmMapPages(pTTB, pTTBAux, paBase, *pvmaLocation, cpg, uiTableFlags, uiPageFlags, uiAuxFlags);
|
hr = map_pages0(&g_vmctxtKernel, paBase, *pvmaLocation, cpg, uiTableFlags, uiPageFlags, uiAuxFlags, 0);
|
||||||
if (FAILED(hr))
|
if (FAILED(hr))
|
||||||
{
|
{
|
||||||
_MmFreeKernelAddr(*pvmaLocation, cpg);
|
_MmFreeKernelAddr(*pvmaLocation, cpg);
|
||||||
|
@ -565,13 +721,24 @@ HRESULT MmMapKernelPages(PTTB pTTB, PTTBAUX pTTBAux, PHYSADDR paBase, UINT32 cpg
|
||||||
return hr;
|
return hr;
|
||||||
}
|
}
|
||||||
|
|
||||||
HRESULT MmDemapKernelPages(PTTB pTTB, PTTBAUX pTTBAux, KERNADDR vmaBase, UINT32 cpg)
|
/*
|
||||||
|
* Unmaps pages from the kernel address space and reclaims that address space for later use.
|
||||||
|
* The mapping is done in the kernel VM context.
|
||||||
|
*
|
||||||
|
* Parameters:
|
||||||
|
* - vmaBase = Base VM address of the region to be unmapped.
|
||||||
|
* - cpg = Number of pages to be unmapped.
|
||||||
|
*
|
||||||
|
* Returns:
|
||||||
|
* Standard HRESULT success/failure.
|
||||||
|
*/
|
||||||
|
HRESULT MmDemapKernelPages(KERNADDR vmaBase, UINT32 cpg)
|
||||||
{
|
{
|
||||||
register HRESULT hr;
|
register HRESULT hr;
|
||||||
|
|
||||||
if ((vmaBase & 0xC0000000) != 0xC0000000)
|
if ((vmaBase & VMADDR_KERNEL_FENCE) != VMADDR_KERNEL_FENCE)
|
||||||
return E_INVALIDARG;
|
return E_INVALIDARG;
|
||||||
hr = MmDemapPages(pTTB, pTTBAux, vmaBase, cpg);
|
hr = demap_pages0(&g_vmctxtKernel, vmaBase, cpg, 0);
|
||||||
if (SUCCEEDED(hr))
|
if (SUCCEEDED(hr))
|
||||||
_MmFreeKernelAddr(vmaBase, cpg);
|
_MmFreeKernelAddr(vmaBase, cpg);
|
||||||
return hr;
|
return hr;
|
||||||
|
@ -582,11 +749,63 @@ HRESULT MmDemapKernelPages(PTTB pTTB, PTTBAUX pTTBAux, KERNADDR vmaBase, UINT32
|
||||||
*---------------------
|
*---------------------
|
||||||
*/
|
*/
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Initialize the virtual-memory mapping.
|
||||||
|
*
|
||||||
|
* Parameters:
|
||||||
|
* - pstartup = Pointer to the STARTUP_INFO data structure.
|
||||||
|
* - pmInitHeap = Pointer to the initialization heap's IMalloc interface.
|
||||||
|
*
|
||||||
|
* Returns:
|
||||||
|
* Nothing.
|
||||||
|
*
|
||||||
|
* Side effects:
|
||||||
|
* Sets up the data structures allocated statically in this file.
|
||||||
|
*/
|
||||||
SEG_INIT_CODE void _MmInitVMMap(PSTARTUP_INFO pstartup, PMALLOC pmInitHeap)
|
SEG_INIT_CODE void _MmInitVMMap(PSTARTUP_INFO pstartup, PMALLOC pmInitHeap)
|
||||||
{
|
{
|
||||||
|
PHYSADDR paPageTable; /* PA of current page table */
|
||||||
|
KERNADDR kaPageTable; /* KA of current page table */
|
||||||
|
PPAGENODE ppgn; /* pointer to node being allocated & inserted */
|
||||||
|
register UINT32 i; /* loop counter */
|
||||||
|
|
||||||
|
/* Initialize the local variables in this module. */
|
||||||
g_pMalloc = pmInitHeap;
|
g_pMalloc = pmInitHeap;
|
||||||
IUnknown_AddRef(g_pMalloc);
|
IUnknown_AddRef(g_pMalloc);
|
||||||
g_pttb1 = (PTTB)(pstartup->kaTTB);
|
g_vmctxtKernel.pTTB = (PTTB)(pstartup->kaTTB);
|
||||||
g_pttb1Aux = (PTTBAUX)(pstartup->kaTTBAux);
|
g_vmctxtKernel.pTTBAux = (PTTBAUX)(pstartup->kaTTBAux);
|
||||||
rbtInitTree(&g_rbtPageTables, RbtStdCompareByValue);
|
rbtInitTree(&(g_vmctxtKernel.rbtPageTables), RbtStdCompareByValue);
|
||||||
|
rbtInitTree(&g_rbtFreePageTables, RbtStdCompareByValue);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Load all the page tables we know about. They all get mapped in as part of the kernel context, except if
|
||||||
|
* there's one free on the last page; it gets added to the free list.
|
||||||
|
*/
|
||||||
|
paPageTable = pstartup->paFirstPageTable;
|
||||||
|
for (i = 0; i < pstartup->cpgPageTables; i++)
|
||||||
|
{ /* map page table into kernel space */
|
||||||
|
kaPageTable = _MmAllocKernelAddr(1);
|
||||||
|
ASSERT(kaPageTable);
|
||||||
|
VERIFY(SUCCEEDED(map_pages0(&g_vmctxtKernel, paPageTable, kaPageTable, 1, TTBFLAGS_KERNEL_DATA,
|
||||||
|
PGTBLFLAGS_KERNEL_DATA, PGAUXFLAGS_KERNEL_DATA, MAP_DONT_ALLOC)));
|
||||||
|
|
||||||
|
/* allocate node for first page table on page */
|
||||||
|
ppgn = IMalloc_Alloc(g_pMalloc, sizeof(PAGENODE));
|
||||||
|
ASSERT(ppgn);
|
||||||
|
rbtNewNode(&(ppgn->rbtn), paPageTable);
|
||||||
|
ppgn->ppt = (PPAGETAB)kaPageTable;
|
||||||
|
RbtInsert(&(g_vmctxtKernel.rbtPageTables), (PRBTREENODE)ppgn);
|
||||||
|
|
||||||
|
/* allocate node for second page table on page */
|
||||||
|
ppgn = IMalloc_Alloc(g_pMalloc, sizeof(PAGENODE));
|
||||||
|
ASSERT(ppgn);
|
||||||
|
rbtNewNode(&(ppgn->rbtn), paPageTable + sizeof(PAGETAB));
|
||||||
|
ppgn->ppt = ((PPAGETAB)kaPageTable) + 1;
|
||||||
|
if ((i == (pstartup->cpgPageTables - 1)) && pstartup->ctblFreeOnLastPage)
|
||||||
|
RbtInsert(&g_rbtFreePageTables, (PRBTREENODE)ppgn);
|
||||||
|
else
|
||||||
|
RbtInsert(&(g_vmctxtKernel.rbtPageTables), (PRBTREENODE)ppgn);
|
||||||
|
|
||||||
|
paPageTable += SYS_PAGE_SIZE; /* advance to next page table page */
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
Loading…
Reference in New Issue
Block a user