begin rework of vmmap.c to use rbtree to track stuff, also added the
kernel_space manager code and a bunch of other bits and bobs
This commit is contained in:
parent
cf0e7ea2be
commit
4c6b86ffbd
|
@ -45,7 +45,8 @@
|
||||||
#define VMADDR_LIBRARY_FENCE 0xB0000000 /* base address for kernel "shared library" code */
|
#define VMADDR_LIBRARY_FENCE 0xB0000000 /* base address for kernel "shared library" code */
|
||||||
#define VMADDR_KERNEL_FENCE 0xC0000000 /* base address for the internal kernel code */
|
#define VMADDR_KERNEL_FENCE 0xC0000000 /* base address for the internal kernel code */
|
||||||
#define VMADDR_IO_BASE 0xE0000000 /* base address for memory-mapped IO */
|
#define VMADDR_IO_BASE 0xE0000000 /* base address for memory-mapped IO */
|
||||||
#define PAGE_COUNT_IO 1024 /* 4 megabytes mapped for IO */
|
#define PAGE_COUNT_IO 4096 /* 16 megabytes mapped for IO */
|
||||||
|
#define VMADDR_KERNEL_NOMANS 0xFFFF0000 /* start of kernel "no man's land" */
|
||||||
|
|
||||||
#endif /* __COMROGUE_INTERNALS__ */
|
#endif /* __COMROGUE_INTERNALS__ */
|
||||||
|
|
||||||
|
|
|
@ -43,11 +43,18 @@
|
||||||
|
|
||||||
CDECL_BEGIN
|
CDECL_BEGIN
|
||||||
|
|
||||||
|
/* Kernel address space functions */
|
||||||
|
extern KERNADDR _MmAllocKernelAddr(UINT32 cpgNeeded);
|
||||||
|
extern void _MmFreeKernelAddr(KERNADDR kaBase, UINT32 cpgToFree);
|
||||||
|
|
||||||
/* Page mapping functions */
|
/* Page mapping functions */
|
||||||
extern PHYSADDR MmGetPhysAddr(PTTB pTTB, KERNADDR vma);
|
extern PHYSADDR MmGetPhysAddr(PTTB pTTB, KERNADDR vma);
|
||||||
extern HRESULT MmDemapPages(PTTB pTTB, KERNADDR vmaBase, UINT32 cpg);
|
extern HRESULT MmDemapPages(PTTB pTTB, PTTBAUX pTTBAux, KERNADDR vmaBase, UINT32 cpg);
|
||||||
extern HRESULT MmMapPages(PTTB pTTB, PHYSADDR paBase, KERNADDR vmaBase, UINT32 cpg, UINT32 uiTableFlags,
|
extern HRESULT MmMapPages(PTTB pTTB, PTTBAUX pTTBAux, PHYSADDR paBase, KERNADDR vmaBase, UINT32 cpg,
|
||||||
UINT32 uiPageFlags);
|
UINT32 uiTableFlags, UINT32 uiPageFlags, UINT32 uiAuxFlags);
|
||||||
|
extern HRESULT MmMapKernelPages(PTTB pTTB, PTTBAUX pTTBAux, PHYSADDR paBase, UINT32 cpg, UINT32 uiTableFlags,
|
||||||
|
UINT32 uiPageFlags, UINT32 uiAuxFlags, PKERNADDR pvmaLocation);
|
||||||
|
extern HRESULT MmDemapKernelPages(PTTB pTTB, PTTBAUX pTTBAux, KERNADDR vmaBase, UINT32 cpg);
|
||||||
|
|
||||||
/* Initialization functions only */
|
/* Initialization functions only */
|
||||||
extern void _MmInit(PSTARTUP_INFO pstartup);
|
extern void _MmInit(PSTARTUP_INFO pstartup);
|
||||||
|
|
|
@ -48,6 +48,7 @@
|
||||||
#define SYS_TTB1_ENTRIES 4096 /* SYS_TTB1_SIZE/4, number of entries in TTB1 */
|
#define SYS_TTB1_ENTRIES 4096 /* SYS_TTB1_SIZE/4, number of entries in TTB1 */
|
||||||
#define SYS_TTB_BITS 12 /* log2(SYS_TTB1_SIZE/4), number of bits in a TTB address */
|
#define SYS_TTB_BITS 12 /* log2(SYS_TTB1_SIZE/4), number of bits in a TTB address */
|
||||||
#define SYS_SEC_SIZE 1048576 /* standard section size */
|
#define SYS_SEC_SIZE 1048576 /* standard section size */
|
||||||
|
#define SYS_SEC_BITS 20 /* number of bits in a section address */
|
||||||
#define SYS_SEC_PAGES 256 /* SYS_SEC_SIZE/SYS_PAGE_SIZE, number of pages equivalent to a section */
|
#define SYS_SEC_PAGES 256 /* SYS_SEC_SIZE/SYS_PAGE_SIZE, number of pages equivalent to a section */
|
||||||
#define SYS_PGTBL_SIZE 1024 /* page tables must be located on this boundary and are this size */
|
#define SYS_PGTBL_SIZE 1024 /* page tables must be located on this boundary and are this size */
|
||||||
#define SYS_PGTBL_BITS 8 /* log2(SYS_PGTBL_SIZE/4), number of bits in a page table address */
|
#define SYS_PGTBL_BITS 8 /* log2(SYS_PGTBL_SIZE/4), number of bits in a page table address */
|
||||||
|
@ -100,6 +101,9 @@
|
||||||
#define TTBQUERY_SEC 0x00000002 /* indicates a section */
|
#define TTBQUERY_SEC 0x00000002 /* indicates a section */
|
||||||
#define TTBQUERY_PXNSEC 0x00000003 /* indicates a section with PXN (or reserved) */
|
#define TTBQUERY_PXNSEC 0x00000003 /* indicates a section with PXN (or reserved) */
|
||||||
|
|
||||||
|
/* TTB auxiliary descriptor bits */
|
||||||
|
#define TTBAUX_SACRED 0x00000001 /* sacred entry, do not deallocate */
|
||||||
|
|
||||||
/* Small page table entry bits */
|
/* Small page table entry bits */
|
||||||
#define PGTBLSM_XN 0x00000001 /* Execute-Never */
|
#define PGTBLSM_XN 0x00000001 /* Execute-Never */
|
||||||
#define PGTBLSM_ALWAYS 0x00000002 /* this bit must always be set for a page table entry */
|
#define PGTBLSM_ALWAYS 0x00000002 /* this bit must always be set for a page table entry */
|
||||||
|
@ -125,15 +129,29 @@
|
||||||
#define PGQUERY_SM 0x00000002 /* small page (4K) */
|
#define PGQUERY_SM 0x00000002 /* small page (4K) */
|
||||||
#define PGQUERY_SM_XN 0x00000003 /* small page with Execute-Never set */
|
#define PGQUERY_SM_XN 0x00000003 /* small page with Execute-Never set */
|
||||||
|
|
||||||
|
/* Page auxiliary descriptor bits */
|
||||||
|
#define PGAUX_SACRED 0x00000001 /* sacred entry, do not deallocate */
|
||||||
|
|
||||||
/* Combinations of flags we use regularly. */
|
/* Combinations of flags we use regularly. */
|
||||||
#define TTBFLAGS_LIB_CODE TTBPGTBL_ALWAYS
|
#define TTBFLAGS_LIB_CODE TTBPGTBL_ALWAYS
|
||||||
#define PGTBLFLAGS_LIB_CODE (PGTBLSM_ALWAYS | PGTBLSM_B | PGTBLSM_C | PGTBLSM_AP10)
|
#define PGTBLFLAGS_LIB_CODE (PGTBLSM_ALWAYS | PGTBLSM_B | PGTBLSM_C | PGTBLSM_AP10)
|
||||||
|
#define PGAUXFLAGS_LIB_CODE PGAUX_SACRED
|
||||||
#define TTBFLAGS_KERNEL_CODE TTBPGTBL_ALWAYS
|
#define TTBFLAGS_KERNEL_CODE TTBPGTBL_ALWAYS
|
||||||
#define PGTBLFLAGS_KERNEL_CODE (PGTBLSM_ALWAYS | PGTBLSM_B | PGTBLSM_C | PGTBLSM_AP01)
|
#define PGTBLFLAGS_KERNEL_CODE (PGTBLSM_ALWAYS | PGTBLSM_B | PGTBLSM_C | PGTBLSM_AP01)
|
||||||
|
#define PGAUXFLAGS_KERNEL_CODE PGAUX_SACRED
|
||||||
#define TTBFLAGS_KERNEL_DATA TTBPGTBL_ALWAYS
|
#define TTBFLAGS_KERNEL_DATA TTBPGTBL_ALWAYS
|
||||||
#define PGTBLFLAGS_KERNEL_DATA (PGTBLSM_XN | PGTBLSM_ALWAYS | PGTBLSM_B | PGTBLSM_C | PGTBLSM_AP01)
|
#define PGTBLFLAGS_KERNEL_DATA (PGTBLSM_XN | PGTBLSM_ALWAYS | PGTBLSM_B | PGTBLSM_C | PGTBLSM_AP01)
|
||||||
|
#define PGAUXFLAGS_KERNEL_DATA PGAUX_SACRED
|
||||||
|
#define TTBFLAGS_INIT_CODE TTBFLAGS_KERNEL_CODE
|
||||||
|
#define PGTBLFLAGS_INIT_CODE PGTBLFLAGS_KERNEL_CODE
|
||||||
|
#define PGAUXFLAGS_INIT_CODE 0
|
||||||
|
#define TTBFLAGS_INIT_DATA TTBFLAGS_KERNEL_DATA
|
||||||
|
#define PGTBLFLAGS_INIT_DATA PGTBLFLAGS_KERNEL_DATA
|
||||||
|
#define PGAUXFLAGS_INIT_DATA 0
|
||||||
#define TTBFLAGS_MMIO TTBPGTBL_ALWAYS
|
#define TTBFLAGS_MMIO TTBPGTBL_ALWAYS
|
||||||
#define PGTBLFLAGS_MMIO (PGTBLSM_ALWAYS | PGTBLSM_AP01)
|
#define PGTBLFLAGS_MMIO (PGTBLSM_ALWAYS | PGTBLSM_AP01)
|
||||||
|
#define PGAUXFLAGS_MMIO PGAUX_SACRED
|
||||||
|
#define TTBAUXFLAGS_PAGETABLE 0
|
||||||
|
|
||||||
#ifndef __ASM__
|
#ifndef __ASM__
|
||||||
|
|
||||||
|
@ -186,6 +204,18 @@ typedef union tagTTB {
|
||||||
TTBSEC sec; /* 1Mb section data */
|
TTBSEC sec; /* 1Mb section data */
|
||||||
} TTB, *PTTB;
|
} TTB, *PTTB;
|
||||||
|
|
||||||
|
/* TTB auxiliary descriptor */
|
||||||
|
typedef struct tagTTBAUXENTRY {
|
||||||
|
unsigned sacred : 1; /* sacred TTB - should never be deallocated */
|
||||||
|
unsigned reserved : 31; /* reserved for future allocation */
|
||||||
|
} TTBAUXENTRY, *PTTBAUXENTRY;
|
||||||
|
|
||||||
|
/* TTB auxiliary table entry */
|
||||||
|
typedef union tagTTBAUX {
|
||||||
|
UINT32 data; /* raw data for entry */
|
||||||
|
TTBAUXENTRY aux; /* aux entry itself */
|
||||||
|
} TTBAUX, *PTTBAUX;
|
||||||
|
|
||||||
/* page table descriptor for a fault entry */
|
/* page table descriptor for a fault entry */
|
||||||
typedef struct tagPGTBLFAULT {
|
typedef struct tagPGTBLFAULT {
|
||||||
unsigned always0 : 2; /* bits are always 0 for a fault entry */
|
unsigned always0 : 2; /* bits are always 0 for a fault entry */
|
||||||
|
@ -213,10 +243,16 @@ typedef union tagPGTBL {
|
||||||
PGTBLSM pg; /* small page descriptor */
|
PGTBLSM pg; /* small page descriptor */
|
||||||
} PGTBL, *PPGTBL;
|
} PGTBL, *PPGTBL;
|
||||||
|
|
||||||
|
/* page auxiliary descriptor */
|
||||||
|
typedef struct tagPGAUXENTRY {
|
||||||
|
unsigned sacred : 1; /* sacred page - should never be deallocated */
|
||||||
|
unsigned reserved : 31; /* reserved for future allocation */
|
||||||
|
} PGAUXENTRY, *PPGAUXENTRY;
|
||||||
|
|
||||||
/* page table auxiliary entry */
|
/* page table auxiliary entry */
|
||||||
typedef union tagPGAUX {
|
typedef union tagPGAUX {
|
||||||
UINT32 data; /* raw data for entry */
|
UINT32 data; /* raw data for entry */
|
||||||
/* TODO */
|
PGAUXENTRY aux; /* the auxiliary entry itself */
|
||||||
} PGAUX, *PPGAUX;
|
} PGAUX, *PPGAUX;
|
||||||
|
|
||||||
/* complete structure of a page table, hardware + auxiliary */
|
/* complete structure of a page table, hardware + auxiliary */
|
||||||
|
|
|
@ -66,14 +66,14 @@ typedef struct tagRBTREENODE {
|
||||||
#define rbtNodeRight(ptn) ((PRBTREENODE)((ptn)->ptnRightColor & ~1))
|
#define rbtNodeRight(ptn) ((PRBTREENODE)((ptn)->ptnRightColor & ~1))
|
||||||
#define rbtNodeColor(ptn) ((ptn)->ptnRightColor & 1)
|
#define rbtNodeColor(ptn) ((ptn)->ptnRightColor & 1)
|
||||||
#define rbtIsRed(ptn) ((ptn) ? rbtNodeColor(ptn) : FALSE)
|
#define rbtIsRed(ptn) ((ptn) ? rbtNodeColor(ptn) : FALSE)
|
||||||
#define rbtSetNodeRight(ptn, ptnRight) \
|
#define rbtSetNodeRight(ptn, ptnR) \
|
||||||
do { (ptn)->ptnRightColor = (((UINT_PTR)(ptnRight)) & ~1) | ((ptn)->ptnRightColor & 1); } while (0)
|
do { (ptn)->ptnRightColor = (((UINT_PTR)(ptnR)) & ~1) | ((ptn)->ptnRightColor & 1); } while (0)
|
||||||
#define rbtSetNodeColor(ptn, clr) \
|
#define rbtSetNodeColor(ptn, clr) \
|
||||||
do { (ptn)->ptnRightColor = ((ptn)->ptnRightColor & ~1) | ((clr) ? 1 : 0); } while (0)
|
do { (ptn)->ptnRightColor = ((ptn)->ptnRightColor & ~1) | ((clr) ? 1 : 0); } while (0)
|
||||||
#define rbtToggleColor(ptn) do { if (ptn) (ptn)->ptnRightColor ^= 1; } while (0)
|
#define rbtToggleColor(ptn) do { if (ptn) (ptn)->ptnRightColor ^= 1; } while (0)
|
||||||
#define rbtInitNode(ptn, ptnLeft, ptnRight, clr, key) \
|
#define rbtInitNode(ptn, ptnL, ptnR, clr, key) \
|
||||||
do { (ptn)->ptnLeft = (ptnLeft); (ptn)->ptnRightColor = (((UINT_PTR)(ptnRight)) & ~1) | ((clr) ? 1 : 0); \
|
do { (ptn)->ptnLeft = (ptnL); (ptn)->ptnRightColor = (((UINT_PTR)(ptnR)) & ~1) | ((clr) ? 1 : 0); \
|
||||||
(ptn)->treekey = (key); } while (0)
|
(ptn)->treekey = (TREEKEY)(key); } while (0)
|
||||||
#define rbtNewNode(ptn, key) rbtInitNode(ptn, NULL, NULL, RED, key)
|
#define rbtNewNode(ptn, key) rbtInitNode(ptn, NULL, NULL, RED, key)
|
||||||
|
|
||||||
/* The head-of-tree structure. */
|
/* The head-of-tree structure. */
|
||||||
|
@ -86,14 +86,20 @@ typedef struct tagRBTREE {
|
||||||
#define rbtInitTree(ptree, pfnCompare) \
|
#define rbtInitTree(ptree, pfnCompare) \
|
||||||
do { (ptree)->pfnTreeCompare = (pfnCompare); (ptree)->ptnRoot = NULL; } while (0)
|
do { (ptree)->pfnTreeCompare = (pfnCompare); (ptree)->ptnRoot = NULL; } while (0)
|
||||||
|
|
||||||
|
/* Type of function used by RbtWalk. */
|
||||||
|
typedef BOOL (*PFNRBTWALK)(PRBTREE, PRBTREENODE, PVOID);
|
||||||
|
|
||||||
/* Function prototypes. */
|
/* Function prototypes. */
|
||||||
CDECL_BEGIN
|
CDECL_BEGIN
|
||||||
|
|
||||||
extern INT32 RbtStdCompareByValue(TREEKEY k1, TREEKEY k2);
|
extern INT32 RbtStdCompareByValue(TREEKEY k1, TREEKEY k2);
|
||||||
extern void RbtInsert(PRBTREE ptree, PRBTREENODE ptnNew);
|
extern void RbtInsert(PRBTREE ptree, PRBTREENODE ptnNew);
|
||||||
extern PRBTREENODE RbtFind(PRBTREE ptree, TREEKEY key);
|
extern PRBTREENODE RbtFind(PRBTREE ptree, TREEKEY key);
|
||||||
|
extern PRBTREENODE RbtFindPredecessor(PRBTREE ptree, TREEKEY key);
|
||||||
|
extern PRBTREENODE RbtFindSuccessor(PRBTREE ptree, TREEKEY key);
|
||||||
extern PRBTREENODE RbtFindMin(PRBTREE ptree);
|
extern PRBTREENODE RbtFindMin(PRBTREE ptree);
|
||||||
extern void RbtDelete(PRBTREE ptree, TREEKEY key);
|
extern void RbtDelete(PRBTREE ptree, TREEKEY key);
|
||||||
|
extern BOOL RbtWalk(PRBTREE ptree, PFNRBTWALK pfnWalk, PVOID pData);
|
||||||
|
|
||||||
CDECL_END
|
CDECL_END
|
||||||
|
|
||||||
|
|
|
@ -177,6 +177,8 @@ typedef struct tagSTARTUP_INFO {
|
||||||
UINT32 cpgSystemTotal; /* total number of memory pages in the system */
|
UINT32 cpgSystemTotal; /* total number of memory pages in the system */
|
||||||
UINT32 cpgSystemAvail; /* available memory pages in the system after GPU takes its bite */
|
UINT32 cpgSystemAvail; /* available memory pages in the system after GPU takes its bite */
|
||||||
UINT32 cpgTTBGap; /* number of pages in the "gap" between the end of kernel and TTB */
|
UINT32 cpgTTBGap; /* number of pages in the "gap" between the end of kernel and TTB */
|
||||||
|
PHYSADDR paTTBAux; /* physical address of the auxiliary TTB data */
|
||||||
|
KERNADDR kaTTBAux; /* kernel address of the auxiliary TTB data */
|
||||||
PHYSADDR paMPDB; /* physical address of the Master Page Database */
|
PHYSADDR paMPDB; /* physical address of the Master Page Database */
|
||||||
KERNADDR kaMPDB; /* kernel address of the Master Page Database */
|
KERNADDR kaMPDB; /* kernel address of the Master Page Database */
|
||||||
UINT32 cpgMPDB; /* number of pages we allocated for Master Page Database */
|
UINT32 cpgMPDB; /* number of pages we allocated for Master Page Database */
|
||||||
|
|
|
@ -102,5 +102,7 @@
|
||||||
#define MEMMGR_E_BADTTBFLG SCODE_CAST(0x86010002) /* bad TTB flags encountered */
|
#define MEMMGR_E_BADTTBFLG SCODE_CAST(0x86010002) /* bad TTB flags encountered */
|
||||||
#define MEMMGR_E_COLLIDED SCODE_CAST(0x86010003) /* memory mapping collided */
|
#define MEMMGR_E_COLLIDED SCODE_CAST(0x86010003) /* memory mapping collided */
|
||||||
#define MEMMGR_E_ENDTTB SCODE_CAST(0x86010004) /* tried to "walk off" end of TTB */
|
#define MEMMGR_E_ENDTTB SCODE_CAST(0x86010004) /* tried to "walk off" end of TTB */
|
||||||
|
#define MEMMGR_E_NOSACRED SCODE_CAST(0x86010005) /* tried to demap a "sacred" entry */
|
||||||
|
#define MEMMGR_E_NOKERNSPC SCODE_CAST(0x86010006) /* no kernel space */
|
||||||
|
|
||||||
#endif /* __SCODE_H_INCLUDED */
|
#endif /* __SCODE_H_INCLUDED */
|
||||||
|
|
|
@ -90,6 +90,8 @@ typedef unsigned long long uint64_t;
|
||||||
/* Internal system types */
|
/* Internal system types */
|
||||||
typedef UINT32 PHYSADDR; /* physical address */
|
typedef UINT32 PHYSADDR; /* physical address */
|
||||||
typedef UINT32 KERNADDR; /* kernel address */
|
typedef UINT32 KERNADDR; /* kernel address */
|
||||||
|
typedef PHYSADDR *PPHYSADDR;
|
||||||
|
typedef KERNADDR *PKERNADDR;
|
||||||
|
|
||||||
#endif /* __COMROGUE_INTERNALS__ */
|
#endif /* __COMROGUE_INTERNALS__ */
|
||||||
|
|
||||||
|
|
|
@ -49,7 +49,7 @@ ASM_CPP_FLAGS = $(INCLUDES) $(DEFS) -D__ASM__
|
||||||
|
|
||||||
PRESTART_OBJS = prestart.o early_trace.o collect_startup.o early_mm.o
|
PRESTART_OBJS = prestart.o early_trace.o collect_startup.o early_mm.o
|
||||||
LIB_OBJS = divide.o qdivrem.o intlib.o objhelp.o rbtree.o str.o strcopymem.o strcomparemem.o strsetmem.o lib_guids.o
|
LIB_OBJS = divide.o qdivrem.o intlib.o objhelp.o rbtree.o str.o strcopymem.o strcomparemem.o strsetmem.o lib_guids.o
|
||||||
RES_OBJS = lowlevel.o trace.o memmgr.o vmmap.o pagealloc.o
|
RES_OBJS = lowlevel.o trace.o memmgr.o vmmap.o pagealloc.o kernel_space.o
|
||||||
INIT_OBJS = start.o kistart.o init_heap.o
|
INIT_OBJS = start.o kistart.o init_heap.o
|
||||||
|
|
||||||
all: kernel.img
|
all: kernel.img
|
||||||
|
|
|
@ -41,6 +41,8 @@ SEG_INIT_DATA static STARTUP_INFO startup_info = {
|
||||||
.paTTB = 0,
|
.paTTB = 0,
|
||||||
.kaTTB = 0,
|
.kaTTB = 0,
|
||||||
.cpgTTBGap = 0,
|
.cpgTTBGap = 0,
|
||||||
|
.paTTBAux = 0,
|
||||||
|
.kaTTBAux = 0,
|
||||||
.paMPDB = 0,
|
.paMPDB = 0,
|
||||||
.kaMPDB = 0,
|
.kaMPDB = 0,
|
||||||
.cpgMPDB = 0,
|
.cpgMPDB = 0,
|
||||||
|
|
|
@ -49,6 +49,7 @@ DECLARE_THIS_FILE
|
||||||
|
|
||||||
/* Data stored in here temporarily and reflected back to startup info when we're done. */
|
/* Data stored in here temporarily and reflected back to startup info when we're done. */
|
||||||
SEG_INIT_DATA static PTTB g_pTTB = NULL; /* pointer to TTB */
|
SEG_INIT_DATA static PTTB g_pTTB = NULL; /* pointer to TTB */
|
||||||
|
SEG_INIT_DATA static PTTBAUX g_pTTBAux = NULL; /* pointer to TTB auxiliary data */
|
||||||
SEG_INIT_DATA static UINT32 g_cpgForPageTables = 0; /* number of pages being used for page tables */
|
SEG_INIT_DATA static UINT32 g_cpgForPageTables = 0; /* number of pages being used for page tables */
|
||||||
SEG_INIT_DATA static UINT32 g_ctblFreeonLastPage = 0; /* number of page tables free on last page */
|
SEG_INIT_DATA static UINT32 g_ctblFreeonLastPage = 0; /* number of page tables free on last page */
|
||||||
SEG_INIT_DATA static PPAGETAB g_ptblNext = NULL; /* pointer to next free page table */
|
SEG_INIT_DATA static PPAGETAB g_ptblNext = NULL; /* pointer to next free page table */
|
||||||
|
@ -84,35 +85,41 @@ SEG_INIT_CODE static UINT32 make_section_flags(UINT32 uiTableFlags, UINT32 uiPag
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Allocates page mapping entries within a single current entry in the TTB.
|
* Morphs the "auxiliary flags" bits used for a page table entry into "auxiliary flags" used for a TTB entry.
|
||||||
*
|
*
|
||||||
* Parameters:
|
* Parameters:
|
||||||
* - paBase = The page-aligned base physical address to map.
|
* - uiPageAuxFlags = Page auxiliary flag bits that would be used for a page table entry.
|
||||||
* - pTTBEntry = Pointer to the TTB entry to be used.
|
|
||||||
* - ndxPage = The "first" index within the current page to use.
|
|
||||||
* - cpg = The maximum number of pages we want to map. This function will only map as many pages as will
|
|
||||||
* fit in the current TTB entry, as indicated by ndxPage.
|
|
||||||
* - uiTableFlags = Flags to be used or verified for the TTB entry.
|
|
||||||
* - uiPageFlags = Flags to be used for new page table entries.
|
|
||||||
*
|
*
|
||||||
* Returns:
|
* Returns:
|
||||||
* The number of pages that were actually mapped by this function call, or -1 if there was an error in the mapping.
|
* TTB auxiliary flag bits that would be used for a TTB entry.
|
||||||
|
*/
|
||||||
|
SEG_INIT_CODE static UINT32 make_section_aux_flags(UINT32 uiPageAuxFlags)
|
||||||
|
{
|
||||||
|
register UINT32 rc = uiPageAuxFlags & (PGAUX_SACRED);
|
||||||
|
/* TODO if we define any other flags */
|
||||||
|
return rc;
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Allocates a new page table, initializes it, and initializes the pointed-to TTB entry with a
|
||||||
|
* pointer to it.
|
||||||
|
*
|
||||||
|
* Parameters:
|
||||||
|
* - pTTBEntry = Pointer to the TTB entry to be filled; this entry is modified.
|
||||||
|
* - pAuxEntry = Pointer to the TTB aux entry to be filled; this entry is modified.
|
||||||
|
* - uiTableFlags = Flags to be used for the TTB entry.
|
||||||
|
*
|
||||||
|
* Returns:
|
||||||
|
* A pointer to the new page table.
|
||||||
*
|
*
|
||||||
* Side effects:
|
* Side effects:
|
||||||
* May modify the TTB entry we point to, if it was not previously allocated. May modify the current page
|
* Modifies the global variables g_cpgForPageTables, g_ctblFreeonLastPage, and g_ptblNext.
|
||||||
* table that the TTB entry points to, where applicable. If we need to allocate a new page table, may modify the
|
|
||||||
* global variables g_cpgForPageTables, g_ctblFreeonLastPage, and g_ptblNext.
|
|
||||||
*/
|
*/
|
||||||
SEG_INIT_CODE static INT32 alloc_pages(PHYSADDR paBase, PTTB pTTBEntry, INT32 ndxPage, INT32 cpg, UINT32 uiTableFlags,
|
SEG_INIT_CODE static PPAGETAB alloc_page_table(PTTB pTTBEntry, PTTBAUX pAuxEntry, UINT32 uiTableFlags)
|
||||||
UINT32 uiPageFlags)
|
|
||||||
{
|
{
|
||||||
INT32 cpgCurrent; /* number of pages we're mapping */
|
register PPAGETAB pTab; /* pointer to new page table */
|
||||||
PPAGETAB pTab; /* pointer to current or new page table */
|
register UINT32 i; /* loop counter */
|
||||||
register INT32 i; /* loop counter */
|
|
||||||
|
|
||||||
switch (pTTBEntry->data & TTBQUERY_MASK)
|
|
||||||
{
|
|
||||||
case TTBQUERY_FAULT: /* not allocated, allocate a new page table for the slot */
|
|
||||||
if (g_ctblFreeonLastPage == 0)
|
if (g_ctblFreeonLastPage == 0)
|
||||||
{
|
{
|
||||||
g_cpgForPageTables++;
|
g_cpgForPageTables++;
|
||||||
|
@ -126,16 +133,60 @@ SEG_INIT_CODE static INT32 alloc_pages(PHYSADDR paBase, PTTB pTTBEntry, INT32 nd
|
||||||
pTab->pgaux[i].data = 0;
|
pTab->pgaux[i].data = 0;
|
||||||
}
|
}
|
||||||
pTTBEntry->data = ((UINT32)pTab) | uiTableFlags; /* poke new entry */
|
pTTBEntry->data = ((UINT32)pTab) | uiTableFlags; /* poke new entry */
|
||||||
|
pAuxEntry->data = TTBAUXFLAGS_PAGETABLE;
|
||||||
|
return pTab;
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Allocates page mapping entries within a single current entry in the TTB.
|
||||||
|
*
|
||||||
|
* Parameters:
|
||||||
|
* - paBase = The page-aligned base physical address to map.
|
||||||
|
* - pTTBEntry = Pointer to the TTB entry to be used.
|
||||||
|
* - pAuxEntry = Pointer to the TTB auxiliary entry to be used.
|
||||||
|
* - ndxPage = The "first" index within the current page to use.
|
||||||
|
* - cpg = The maximum number of pages we want to map. This function will only map as many pages as will
|
||||||
|
* fit in the current TTB entry, as indicated by ndxPage.
|
||||||
|
* - uiTableFlags = Flags to be used or verified for the TTB entry.
|
||||||
|
* - uiPageFlags = Flags to be used for new page table entries.
|
||||||
|
* - uiAuxFlags = Flags to be used for new page table auxiliary entries.
|
||||||
|
*
|
||||||
|
* Returns:
|
||||||
|
* The number of pages that were actually mapped by this function call, or -1 if there was an error in the mapping.
|
||||||
|
*
|
||||||
|
* Side effects:
|
||||||
|
* May modify the TTB entry we point to, if it was not previously allocated. May modify the current page
|
||||||
|
* table that the TTB entry points to, where applicable. If we need to allocate a new page table, may modify the
|
||||||
|
* global variables g_cpgForPageTables, g_ctblFreeonLastPage, and g_ptblNext.
|
||||||
|
*/
|
||||||
|
SEG_INIT_CODE static INT32 alloc_pages(PHYSADDR paBase, PTTB pTTBEntry, PTTBAUX pAuxEntry, INT32 ndxPage,
|
||||||
|
INT32 cpg, UINT32 uiTableFlags, UINT32 uiPageFlags, UINT32 uiAuxFlags)
|
||||||
|
{
|
||||||
|
INT32 cpgCurrent; /* number of pages we're mapping */
|
||||||
|
PPAGETAB pTab; /* pointer to current or new page table */
|
||||||
|
register INT32 i; /* loop counter */
|
||||||
|
|
||||||
|
switch (pTTBEntry->data & TTBQUERY_MASK)
|
||||||
|
{
|
||||||
|
case TTBQUERY_FAULT: /* not allocated, allocate a new page table for the slot */
|
||||||
|
pTab = alloc_page_table(pTTBEntry, pAuxEntry, uiTableFlags);
|
||||||
break;
|
break;
|
||||||
|
|
||||||
case TTBQUERY_PGTBL: /* existing page table */
|
case TTBQUERY_PGTBL: /* existing page table */
|
||||||
if ((pTTBEntry->data & TTBPGTBL_ALLFLAGS) != uiTableFlags)
|
if ((pTTBEntry->data & TTBPGTBL_ALLFLAGS) != uiTableFlags)
|
||||||
return -1; /* table flags not compatible */
|
return -1; /* table flags not compatible */
|
||||||
|
pTab = (PPAGETAB)(pTTBEntry->data & TTBPGTBL_BASE);
|
||||||
break;
|
break;
|
||||||
|
|
||||||
case TTBQUERY_SEC:
|
case TTBQUERY_SEC:
|
||||||
case TTBQUERY_PXNSEC:
|
case TTBQUERY_PXNSEC: /* existing section */
|
||||||
/* existing section, deal with this later */
|
if ((pTTBEntry->data & TTBSEC_ALLFLAGS) != make_section_flags(uiTableFlags, uiPageFlags))
|
||||||
|
return -1;
|
||||||
|
if (pAuxEntry->data != make_section_aux_flags(uiAuxFlags))
|
||||||
|
return -1;
|
||||||
|
if ((pTTBEntry->data & TTBSEC_BASE) != (paBase & TTBSEC_BASE))
|
||||||
|
return -1;
|
||||||
|
pTab = NULL;
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -144,24 +195,14 @@ SEG_INIT_CODE static INT32 alloc_pages(PHYSADDR paBase, PTTB pTTBEntry, INT32 nd
|
||||||
if (cpg < cpgCurrent)
|
if (cpg < cpgCurrent)
|
||||||
cpgCurrent = cpg; /* only map up to max requested */
|
cpgCurrent = cpg; /* only map up to max requested */
|
||||||
|
|
||||||
if (pTTBEntry->data & TTBSEC_ALWAYS)
|
if (pTab)
|
||||||
{
|
{ /* fill in entries in the page table */
|
||||||
/* this is a section, make sure its base address covers this mapping and its flags are compatible */
|
|
||||||
if ((pTTBEntry->data & TTBSEC_ALLFLAGS) != make_section_flags(uiTableFlags, uiPageFlags))
|
|
||||||
return -1;
|
|
||||||
if ((pTTBEntry->data & TTBSEC_BASE) != (paBase & TTBSEC_BASE))
|
|
||||||
return -1;
|
|
||||||
}
|
|
||||||
else
|
|
||||||
{
|
|
||||||
/* fill in entries in the page table */
|
|
||||||
pTab = (PPAGETAB)(pTTBEntry->data & TTBPGTBL_BASE);
|
|
||||||
for (i=0; i<cpgCurrent; i++)
|
for (i=0; i<cpgCurrent; i++)
|
||||||
{
|
{
|
||||||
if ((pTab->pgtbl[ndxPage + i].data & PGQUERY_MASK) != PGQUERY_FAULT)
|
if ((pTab->pgtbl[ndxPage + i].data & PGQUERY_MASK) != PGQUERY_FAULT)
|
||||||
return -1; /* stepping on existing mapping */
|
return -1; /* stepping on existing mapping */
|
||||||
pTab->pgtbl[ndxPage + i].data = paBase | uiPageFlags;
|
pTab->pgtbl[ndxPage + i].data = paBase | uiPageFlags;
|
||||||
pTab->pgaux[ndxPage + i].data = 0; /* TODO */
|
pTab->pgaux[ndxPage + i].data = uiAuxFlags;
|
||||||
paBase += SYS_PAGE_SIZE;
|
paBase += SYS_PAGE_SIZE;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -178,6 +219,7 @@ SEG_INIT_CODE static INT32 alloc_pages(PHYSADDR paBase, PTTB pTTBEntry, INT32 nd
|
||||||
* - cpg = The number of pages to be mapped.
|
* - cpg = The number of pages to be mapped.
|
||||||
* - uiTableFlags = Flags to be used or verified for TTB entries.
|
* - uiTableFlags = Flags to be used or verified for TTB entries.
|
||||||
* - uiPageFlags = Flags to be used for new page table entries.
|
* - uiPageFlags = Flags to be used for new page table entries.
|
||||||
|
* - uiAuxFlags = Flags to be used for new auxiliary TTB entries.
|
||||||
*
|
*
|
||||||
* Returns:
|
* Returns:
|
||||||
* TRUE if the mapping succeeded, FALSE if it failed.
|
* TRUE if the mapping succeeded, FALSE if it failed.
|
||||||
|
@ -188,13 +230,14 @@ SEG_INIT_CODE static INT32 alloc_pages(PHYSADDR paBase, PTTB pTTBEntry, INT32 nd
|
||||||
* g_ctblFreeonLastPage, and g_ptblNext.
|
* g_ctblFreeonLastPage, and g_ptblNext.
|
||||||
*/
|
*/
|
||||||
SEG_INIT_CODE static BOOL map_pages(PHYSADDR paBase, KERNADDR vmaBase, INT32 cpg, UINT32 uiTableFlags,
|
SEG_INIT_CODE static BOOL map_pages(PHYSADDR paBase, KERNADDR vmaBase, INT32 cpg, UINT32 uiTableFlags,
|
||||||
UINT32 uiPageFlags)
|
UINT32 uiPageFlags, UINT32 uiAuxFlags)
|
||||||
{
|
{
|
||||||
static DECLARE_INIT_STRING8_CONST(sz1, "Map ");
|
static DECLARE_INIT_STRING8_CONST(sz1, "Map ");
|
||||||
static DECLARE_INIT_STRING8_CONST(sz2, "->");
|
static DECLARE_INIT_STRING8_CONST(sz2, "->");
|
||||||
static DECLARE_INIT_STRING8_CONST(sz3, ",cpg=");
|
static DECLARE_INIT_STRING8_CONST(sz3, ",cpg=");
|
||||||
static DECLARE_INIT_STRING8_CONST(sz4, ",tf=");
|
static DECLARE_INIT_STRING8_CONST(sz4, ",tf=");
|
||||||
static DECLARE_INIT_STRING8_CONST(sz5, ",pf=");
|
static DECLARE_INIT_STRING8_CONST(sz5, ",pf=");
|
||||||
|
static DECLARE_INIT_STRING8_CONST(sz6, ",af=");
|
||||||
INT32 ndxTTB = mmVMA2TTBIndex(vmaBase); /* TTB entry index */
|
INT32 ndxTTB = mmVMA2TTBIndex(vmaBase); /* TTB entry index */
|
||||||
INT32 ndxPage = mmVMA2PGTBLIndex(vmaBase); /* starting page entry index */
|
INT32 ndxPage = mmVMA2PGTBLIndex(vmaBase); /* starting page entry index */
|
||||||
INT32 cpgCurrent; /* current number of pages mapped */
|
INT32 cpgCurrent; /* current number of pages mapped */
|
||||||
|
@ -209,12 +252,15 @@ SEG_INIT_CODE static BOOL map_pages(PHYSADDR paBase, KERNADDR vmaBase, INT32 cpg
|
||||||
ETrWriteWord(uiTableFlags);
|
ETrWriteWord(uiTableFlags);
|
||||||
ETrWriteString8(sz5);
|
ETrWriteString8(sz5);
|
||||||
ETrWriteWord(uiPageFlags);
|
ETrWriteWord(uiPageFlags);
|
||||||
|
ETrWriteString8(sz6);
|
||||||
|
ETrWriteWord(uiAuxFlags);
|
||||||
ETrWriteChar8('\n');
|
ETrWriteChar8('\n');
|
||||||
|
|
||||||
if ((cpg > 0) && (ndxPage > 0))
|
if ((cpg > 0) && (ndxPage > 0))
|
||||||
{
|
{
|
||||||
/* We are starting in the middle of a VM page. Map to the end of the VM page. */
|
/* We are starting in the middle of a VM page. Map to the end of the VM page. */
|
||||||
cpgCurrent = alloc_pages(paBase, g_pTTB + ndxTTB, ndxPage, cpg, uiTableFlags, uiPageFlags);
|
cpgCurrent = alloc_pages(paBase, g_pTTB + ndxTTB, g_pTTBAux + ndxTTB, ndxPage, cpg, uiTableFlags,
|
||||||
|
uiPageFlags, uiAuxFlags);
|
||||||
if (cpgCurrent < 0)
|
if (cpgCurrent < 0)
|
||||||
{
|
{
|
||||||
/* ETrWriteChar8('a'); */
|
/* ETrWriteChar8('a'); */
|
||||||
|
@ -236,6 +282,7 @@ SEG_INIT_CODE static BOOL map_pages(PHYSADDR paBase, KERNADDR vmaBase, INT32 cpg
|
||||||
{
|
{
|
||||||
case TTBQUERY_FAULT: /* unmapped - map the section */
|
case TTBQUERY_FAULT: /* unmapped - map the section */
|
||||||
g_pTTB[ndxTTB].data = paBase | make_section_flags(uiTableFlags, uiPageFlags);
|
g_pTTB[ndxTTB].data = paBase | make_section_flags(uiTableFlags, uiPageFlags);
|
||||||
|
g_pTTBAux[ndxTTB].data = make_section_aux_flags(uiAuxFlags);
|
||||||
break;
|
break;
|
||||||
|
|
||||||
case TTBQUERY_PGTBL: /* collided with a page table */
|
case TTBQUERY_PGTBL: /* collided with a page table */
|
||||||
|
@ -249,6 +296,11 @@ SEG_INIT_CODE static BOOL map_pages(PHYSADDR paBase, KERNADDR vmaBase, INT32 cpg
|
||||||
/* ETrWriteChar8('c'); */
|
/* ETrWriteChar8('c'); */
|
||||||
return FALSE; /* invalid flags */
|
return FALSE; /* invalid flags */
|
||||||
}
|
}
|
||||||
|
if (g_pTTBAux[ndxTTB].data != make_section_aux_flags(uiAuxFlags))
|
||||||
|
{
|
||||||
|
/* ETrWriteChar8('!'); */
|
||||||
|
return FALSE; /* invalid aux flags */
|
||||||
|
}
|
||||||
if ((g_pTTB[ndxTTB].data & TTBSEC_BASE) != paBase)
|
if ((g_pTTB[ndxTTB].data & TTBSEC_BASE) != paBase)
|
||||||
{
|
{
|
||||||
/* ETrWriteChar8('d'); */
|
/* ETrWriteChar8('d'); */
|
||||||
|
@ -261,7 +313,8 @@ SEG_INIT_CODE static BOOL map_pages(PHYSADDR paBase, KERNADDR vmaBase, INT32 cpg
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
/* just map 256 individual pages */
|
/* just map 256 individual pages */
|
||||||
cpgCurrent = alloc_pages(paBase, g_pTTB + ndxTTB, 0, cpg, uiTableFlags, uiPageFlags);
|
cpgCurrent = alloc_pages(paBase, g_pTTB + ndxTTB, g_pTTBAux + ndxTTB, 0, cpg, uiTableFlags,
|
||||||
|
uiPageFlags, uiAuxFlags);
|
||||||
if (cpgCurrent < 0)
|
if (cpgCurrent < 0)
|
||||||
{
|
{
|
||||||
/* ETrWriteChar8('e'); */
|
/* ETrWriteChar8('e'); */
|
||||||
|
@ -277,7 +330,7 @@ SEG_INIT_CODE static BOOL map_pages(PHYSADDR paBase, KERNADDR vmaBase, INT32 cpg
|
||||||
if (cpg > 0)
|
if (cpg > 0)
|
||||||
{
|
{
|
||||||
/* map the "tail end" onto the next TTB */
|
/* map the "tail end" onto the next TTB */
|
||||||
if (alloc_pages(paBase, g_pTTB + ndxTTB, 0, cpg, uiTableFlags, uiPageFlags) < 0)
|
if (alloc_pages(paBase, g_pTTB + ndxTTB, g_pTTBAux + ndxTTB, 0, cpg, uiTableFlags, uiPageFlags, uiAuxFlags) < 0)
|
||||||
{
|
{
|
||||||
/* ETrWriteChar8('f'); */
|
/* ETrWriteChar8('f'); */
|
||||||
return FALSE;
|
return FALSE;
|
||||||
|
@ -308,8 +361,10 @@ extern char paFirstFree, cpgPrestartTotal, paLibraryCode, vmaLibraryCode, cpgLib
|
||||||
SEG_INIT_CODE PHYSADDR EMmInit(PSTARTUP_INFO pstartup)
|
SEG_INIT_CODE PHYSADDR EMmInit(PSTARTUP_INFO pstartup)
|
||||||
{
|
{
|
||||||
static DECLARE_INIT_STRING8_CONST(szTTBAt, "EMmInit: TTB1@");
|
static DECLARE_INIT_STRING8_CONST(szTTBAt, "EMmInit: TTB1@");
|
||||||
|
#if 0
|
||||||
static DECLARE_INIT_STRING8_CONST(szPageTable, "Page table pages:");
|
static DECLARE_INIT_STRING8_CONST(szPageTable, "Page table pages:");
|
||||||
static DECLARE_INIT_STRING8_CONST(szFree, "\nFree last page:");
|
static DECLARE_INIT_STRING8_CONST(szFree, "\nFree last page:");
|
||||||
|
#endif
|
||||||
PHYSADDR paTTB = (PHYSADDR)(&paFirstFree); /* location of the system TTB1 */
|
PHYSADDR paTTB = (PHYSADDR)(&paFirstFree); /* location of the system TTB1 */
|
||||||
UINT32 cbMPDB; /* number of bytes in the MPDB */
|
UINT32 cbMPDB; /* number of bytes in the MPDB */
|
||||||
register INT32 i; /* loop counter */
|
register INT32 i; /* loop counter */
|
||||||
|
@ -332,8 +387,14 @@ SEG_INIT_CODE PHYSADDR EMmInit(PSTARTUP_INFO pstartup)
|
||||||
for (i=0; i<SYS_TTB1_ENTRIES; i++)
|
for (i=0; i<SYS_TTB1_ENTRIES; i++)
|
||||||
g_pTTB[i].data = 0;
|
g_pTTB[i].data = 0;
|
||||||
|
|
||||||
|
/* Save off the TTB auxiliary data location and initialize it. */
|
||||||
|
pstartup->paTTBAux = paTTB + SYS_TTB1_SIZE;
|
||||||
|
g_pTTBAux = (PTTBAUX)(pstartup->paTTBAux);
|
||||||
|
for (i=0; i<SYS_TTB1_ENTRIES; i++)
|
||||||
|
g_pTTBAux[i].data = 0;
|
||||||
|
|
||||||
/* Allocate space for the Master Page Database but do not initialize it. */
|
/* Allocate space for the Master Page Database but do not initialize it. */
|
||||||
pstartup->paMPDB = paTTB + SYS_TTB1_SIZE;
|
pstartup->paMPDB = pstartup->paTTBAux + SYS_TTB1_SIZE;
|
||||||
cbMPDB = pstartup->cpgSystemTotal << 3; /* 8 bytes per entry */
|
cbMPDB = pstartup->cpgSystemTotal << 3; /* 8 bytes per entry */
|
||||||
pstartup->cpgMPDB = cbMPDB >> SYS_PAGE_BITS;
|
pstartup->cpgMPDB = cbMPDB >> SYS_PAGE_BITS;
|
||||||
if (cbMPDB & (SYS_PAGE_SIZE - 1))
|
if (cbMPDB & (SYS_PAGE_SIZE - 1))
|
||||||
|
@ -347,38 +408,53 @@ SEG_INIT_CODE PHYSADDR EMmInit(PSTARTUP_INFO pstartup)
|
||||||
g_ptblNext = (PPAGETAB)(pstartup->paFirstPageTable);
|
g_ptblNext = (PPAGETAB)(pstartup->paFirstPageTable);
|
||||||
|
|
||||||
/* Map the "prestart" area (everything below load address, plus prestart code & data) as identity. */
|
/* Map the "prestart" area (everything below load address, plus prestart code & data) as identity. */
|
||||||
VERIFY(map_pages(0, 0, (INT32)(&cpgPrestartTotal), TTBPGTBL_ALWAYS, PGTBLSM_ALWAYS | PGTBLSM_AP01));
|
VERIFY(map_pages(0, 0, (INT32)(&cpgPrestartTotal), TTBPGTBL_ALWAYS, PGTBLSM_ALWAYS | PGTBLSM_AP01, 0));
|
||||||
/* Map the IO area as identity. */
|
/* Map the IO area as identity. */
|
||||||
VERIFY(map_pages(PHYSADDR_IO_BASE, PHYSADDR_IO_BASE, PAGE_COUNT_IO, TTBFLAGS_MMIO, PGTBLFLAGS_MMIO));
|
VERIFY(map_pages(PHYSADDR_IO_BASE, PHYSADDR_IO_BASE, PAGE_COUNT_IO, TTBFLAGS_MMIO, PGTBLFLAGS_MMIO, 0));
|
||||||
/* Map the library area. */
|
/* Map the library area. */
|
||||||
VERIFY(map_pages((PHYSADDR)(&paLibraryCode), (KERNADDR)(&vmaLibraryCode), (INT32)(&cpgLibraryCode),
|
VERIFY(map_pages((PHYSADDR)(&paLibraryCode), (KERNADDR)(&vmaLibraryCode), (INT32)(&cpgLibraryCode),
|
||||||
TTBFLAGS_LIB_CODE, PGTBLFLAGS_LIB_CODE));
|
TTBFLAGS_LIB_CODE, PGTBLFLAGS_LIB_CODE, PGAUXFLAGS_LIB_CODE));
|
||||||
/* Map the kernel code area. */
|
/* Map the kernel code area. */
|
||||||
VERIFY(map_pages((PHYSADDR)(&paKernelCode), (KERNADDR)(&vmaKernelCode), (INT32)(&cpgKernelCode),
|
VERIFY(map_pages((PHYSADDR)(&paKernelCode), (KERNADDR)(&vmaKernelCode), (INT32)(&cpgKernelCode),
|
||||||
TTBFLAGS_KERNEL_CODE, PGTBLFLAGS_KERNEL_CODE));
|
TTBFLAGS_KERNEL_CODE, PGTBLFLAGS_KERNEL_CODE, PGAUXFLAGS_KERNEL_CODE));
|
||||||
/* Map the kernel data/BSS area. */
|
/* Map the kernel data/BSS area. */
|
||||||
VERIFY(map_pages((PHYSADDR)(&paKernelData), (KERNADDR)(&vmaKernelData),
|
VERIFY(map_pages((PHYSADDR)(&paKernelData), (KERNADDR)(&vmaKernelData),
|
||||||
(INT32)(&cpgKernelData) + (INT32)(&cpgKernelBss), TTBFLAGS_KERNEL_DATA, PGTBLFLAGS_KERNEL_DATA));
|
(INT32)(&cpgKernelData) + (INT32)(&cpgKernelBss), TTBFLAGS_KERNEL_DATA, PGTBLFLAGS_KERNEL_DATA,
|
||||||
|
PGAUXFLAGS_KERNEL_DATA));
|
||||||
/* Map the kernel init code area. */
|
/* Map the kernel init code area. */
|
||||||
VERIFY(map_pages((PHYSADDR)(&paInitCode), (KERNADDR)(&vmaInitCode), (INT32)(&cpgInitCode),
|
VERIFY(map_pages((PHYSADDR)(&paInitCode), (KERNADDR)(&vmaInitCode), (INT32)(&cpgInitCode),
|
||||||
TTBFLAGS_KERNEL_CODE, PGTBLFLAGS_KERNEL_CODE));
|
TTBFLAGS_INIT_CODE, PGTBLFLAGS_INIT_CODE, PGAUXFLAGS_INIT_CODE));
|
||||||
/* Map the kernel init data/BSS area. */
|
/* Map the kernel init data/BSS area. */
|
||||||
VERIFY(map_pages((PHYSADDR)(&paInitData), (KERNADDR)(&vmaInitData),
|
VERIFY(map_pages((PHYSADDR)(&paInitData), (KERNADDR)(&vmaInitData),
|
||||||
(INT32)(&cpgInitData) + (INT32)(&cpgInitBss), TTBFLAGS_KERNEL_DATA, PGTBLFLAGS_KERNEL_DATA));
|
(INT32)(&cpgInitData) + (INT32)(&cpgInitBss), TTBFLAGS_INIT_DATA, PGTBLFLAGS_INIT_DATA,
|
||||||
|
PGAUXFLAGS_INIT_DATA));
|
||||||
/* Map the TTB itself. */
|
/* Map the TTB itself. */
|
||||||
pstartup->kaTTB = (KERNADDR)(&vmaFirstFree);
|
pstartup->kaTTB = (KERNADDR)(&vmaFirstFree);
|
||||||
VERIFY(map_pages(paTTB, pstartup->kaTTB, SYS_TTB1_SIZE / SYS_PAGE_SIZE, TTBFLAGS_KERNEL_DATA,
|
VERIFY(map_pages(paTTB, pstartup->kaTTB, SYS_TTB1_SIZE / SYS_PAGE_SIZE, TTBFLAGS_KERNEL_DATA,
|
||||||
PGTBLFLAGS_KERNEL_DATA));
|
PGTBLFLAGS_KERNEL_DATA, PGAUXFLAGS_KERNEL_DATA));
|
||||||
|
/* Map the TTB auxiliary data. */
|
||||||
|
pstartup->kaTTBAux = pstartup->kaTTB + SYS_TTB1_SIZE;
|
||||||
|
VERIFY(map_pages(pstartup->paTTBAux, pstartup->kaTTBAux, SYS_TTB1_SIZE / SYS_PAGE_SIZE, TTBFLAGS_KERNEL_DATA,
|
||||||
|
PGTBLFLAGS_KERNEL_DATA, PGAUXFLAGS_KERNEL_DATA));
|
||||||
/* Map the Master Page Database. */
|
/* Map the Master Page Database. */
|
||||||
pstartup->kaMPDB = pstartup->kaTTB + SYS_TTB1_SIZE;
|
pstartup->kaMPDB = pstartup->kaTTBAux + SYS_TTB1_SIZE;
|
||||||
VERIFY(map_pages(pstartup->paMPDB, pstartup->kaTTB + SYS_TTB1_SIZE, pstartup->cpgMPDB, TTBFLAGS_KERNEL_DATA,
|
VERIFY(map_pages(pstartup->paMPDB, pstartup->kaTTB + SYS_TTB1_SIZE, pstartup->cpgMPDB, TTBFLAGS_KERNEL_DATA,
|
||||||
PGTBLFLAGS_KERNEL_DATA));
|
PGTBLFLAGS_KERNEL_DATA, PGAUXFLAGS_KERNEL_DATA));
|
||||||
/* Map the IO area into high memory as well. */
|
/* Map the IO area into high memory as well. */
|
||||||
VERIFY(map_pages(PHYSADDR_IO_BASE, VMADDR_IO_BASE, PAGE_COUNT_IO, TTBFLAGS_MMIO, PGTBLFLAGS_MMIO));
|
VERIFY(map_pages(PHYSADDR_IO_BASE, VMADDR_IO_BASE, PAGE_COUNT_IO, TTBFLAGS_MMIO, PGTBLFLAGS_MMIO, PGAUXFLAGS_MMIO));
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Allocate one extra page table, just to ensure that we have sufficient free page table entries when we get up
|
||||||
|
* to the startup code.
|
||||||
|
*/
|
||||||
|
i = mmVMA2TTBIndex(VMADDR_KERNEL_FENCE);
|
||||||
|
while ((g_pTTB[i].data & TTBQUERY_MASK) != TTBQUERY_FAULT)
|
||||||
|
i++;
|
||||||
|
alloc_page_table(g_pTTB + i, g_pTTBAux + i, TTBFLAGS_KERNEL_DATA);
|
||||||
|
|
||||||
#if 0
|
#if 0
|
||||||
/* Dump the TTB and page tables to trace output. */
|
/* Dump the TTB and page tables to trace output. */
|
||||||
ETrDumpWords((PUINT32)paTTB, (SYS_TTB1_SIZE + (g_cpgForPageTables << SYS_PAGE_BITS)) >> 2);
|
ETrDumpWords((PUINT32)paTTB, (SYS_TTB1_SIZE + SYS_TTB1_SIZE + (g_cpgForPageTables << SYS_PAGE_BITS)) >> 2);
|
||||||
ETrWriteString8(szPageTable);
|
ETrWriteString8(szPageTable);
|
||||||
ETrWriteWord(g_cpgForPageTables);
|
ETrWriteWord(g_cpgForPageTables);
|
||||||
ETrWriteString8(szFree);
|
ETrWriteString8(szFree);
|
||||||
|
|
|
@ -30,6 +30,7 @@
|
||||||
* "Raspberry Pi" is a trademark of the Raspberry Pi Foundation.
|
* "Raspberry Pi" is a trademark of the Raspberry Pi Foundation.
|
||||||
*/
|
*/
|
||||||
#include <comrogue/types.h>
|
#include <comrogue/types.h>
|
||||||
|
#include <comrogue/intlib.h>
|
||||||
#include <comrogue/str.h>
|
#include <comrogue/str.h>
|
||||||
#include <comrogue/allocator.h>
|
#include <comrogue/allocator.h>
|
||||||
#include <comrogue/objhelp.h>
|
#include <comrogue/objhelp.h>
|
||||||
|
@ -60,6 +61,8 @@ typedef struct tagINITHEAP
|
||||||
IMalloc hdr; /* object header must be first */
|
IMalloc hdr; /* object header must be first */
|
||||||
BLOCK blkBase; /* base "zero" block */
|
BLOCK blkBase; /* base "zero" block */
|
||||||
PBLOCK pblkLastAlloc; /* last allocated block */
|
PBLOCK pblkLastAlloc; /* last allocated block */
|
||||||
|
UINT32 cbAlloc; /* number of bytes currently allocated */
|
||||||
|
UINT32 cbAllocHiWat; /* high watermark for bytes currently allocated */
|
||||||
} INITHEAP, *PINITHEAP;
|
} INITHEAP, *PINITHEAP;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -92,6 +95,9 @@ SEG_INIT_CODE static PVOID init_heap_Alloc(IMalloc *pThis, SIZE_T cb)
|
||||||
p->data.cblk = nBlocks;
|
p->data.cblk = nBlocks;
|
||||||
}
|
}
|
||||||
pih->pblkLastAlloc = q;
|
pih->pblkLastAlloc = q;
|
||||||
|
pih->cbAlloc += (nBlocks * sizeof(BLOCK));
|
||||||
|
if (pih->cbAlloc > pih->cbAllocHiWat)
|
||||||
|
pih->cbAllocHiWat = pih->cbAlloc;
|
||||||
return (PVOID)(p + 1);
|
return (PVOID)(p + 1);
|
||||||
}
|
}
|
||||||
if (p == pih->pblkLastAlloc)
|
if (p == pih->pblkLastAlloc)
|
||||||
|
@ -137,10 +143,12 @@ SEG_INIT_CODE static void init_heap_Free(IMalloc *pThis, PVOID pv)
|
||||||
{
|
{
|
||||||
PINITHEAP pih = (PINITHEAP)pThis;
|
PINITHEAP pih = (PINITHEAP)pThis;
|
||||||
register PBLOCK p, q;
|
register PBLOCK p, q;
|
||||||
|
register UINT32 nBlocks;
|
||||||
|
|
||||||
if (init_heap_DidAlloc(pThis, pv) != 1)
|
if (init_heap_DidAlloc(pThis, pv) != 1)
|
||||||
return; /* not our business */
|
return; /* not our business */
|
||||||
p = ((PBLOCK)pv) - 1;
|
p = ((PBLOCK)pv) - 1;
|
||||||
|
nBlocks = p->data.cblk;
|
||||||
for (q = pih->pblkLastAlloc; !((p > q) && (p < q->data.pNextFree)); q = q->data.pNextFree)
|
for (q = pih->pblkLastAlloc; !((p > q) && (p < q->data.pNextFree)); q = q->data.pNextFree)
|
||||||
if ((q >= q->data.pNextFree) && ((p > q) || (p < q->data.pNextFree)))
|
if ((q >= q->data.pNextFree) && ((p > q) || (p < q->data.pNextFree)))
|
||||||
break; /* at one end or another */
|
break; /* at one end or another */
|
||||||
|
@ -161,6 +169,7 @@ SEG_INIT_CODE static void init_heap_Free(IMalloc *pThis, PVOID pv)
|
||||||
else
|
else
|
||||||
q->data.pNextFree = p; /* chain to previous free block */
|
q->data.pNextFree = p; /* chain to previous free block */
|
||||||
pih->pblkLastAlloc = q;
|
pih->pblkLastAlloc = q;
|
||||||
|
pih->cbAlloc -= (nBlocks * sizeof(BLOCK));
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -182,6 +191,7 @@ SEG_INIT_CODE static PVOID init_heap_Realloc(IMalloc *pThis, PVOID pv, SIZE_T cb
|
||||||
PINITHEAP pih = (PINITHEAP)pThis;
|
PINITHEAP pih = (PINITHEAP)pThis;
|
||||||
SIZE_T nBlocksNew, nBlocksExtra;
|
SIZE_T nBlocksNew, nBlocksExtra;
|
||||||
PVOID pNew;
|
PVOID pNew;
|
||||||
|
UINT32 cbHiWatSave;
|
||||||
register PBLOCK p, pNext, q, qp;
|
register PBLOCK p, pNext, q, qp;
|
||||||
|
|
||||||
/* Handle degenerate cases */
|
/* Handle degenerate cases */
|
||||||
|
@ -205,7 +215,7 @@ SEG_INIT_CODE static PVOID init_heap_Realloc(IMalloc *pThis, PVOID pv, SIZE_T cb
|
||||||
pNext = p + nBlocksNew;
|
pNext = p + nBlocksNew;
|
||||||
pNext->data.cblk = p->data.cblk - nBlocksNew;
|
pNext->data.cblk = p->data.cblk - nBlocksNew;
|
||||||
p->data.cblk = nBlocksNew;
|
p->data.cblk = nBlocksNew;
|
||||||
init_heap_Free(pThis, (PVOID)(pNext + 1));
|
init_heap_Free(pThis, (PVOID)(pNext + 1)); /* adjusts cbAlloc */
|
||||||
return pv;
|
return pv;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -220,6 +230,7 @@ SEG_INIT_CODE static PVOID init_heap_Realloc(IMalloc *pThis, PVOID pv, SIZE_T cb
|
||||||
if (q->data.cblk < nBlocksExtra)
|
if (q->data.cblk < nBlocksExtra)
|
||||||
break; /* cannot get enough blocks by combining next free block */
|
break; /* cannot get enough blocks by combining next free block */
|
||||||
qp->data.pNextFree = q->data.pNextFree; /* remove block from free list */
|
qp->data.pNextFree = q->data.pNextFree; /* remove block from free list */
|
||||||
|
pih->cbAlloc += (q->data.cblk * sizeof(BLOCK)); /* act like we allocated it all for the nonce */
|
||||||
if (q->data.cblk == nBlocksExtra)
|
if (q->data.cblk == nBlocksExtra)
|
||||||
{ /* take it all */
|
{ /* take it all */
|
||||||
pih->pblkLastAlloc = qp->data.pNextFree;
|
pih->pblkLastAlloc = qp->data.pNextFree;
|
||||||
|
@ -228,9 +239,11 @@ SEG_INIT_CODE static PVOID init_heap_Realloc(IMalloc *pThis, PVOID pv, SIZE_T cb
|
||||||
{ /* chop in two, add first block to existing, free second block */
|
{ /* chop in two, add first block to existing, free second block */
|
||||||
pNext += nBlocksExtra;
|
pNext += nBlocksExtra;
|
||||||
pNext->data.cblk = q->data.cblk - nBlocksExtra;
|
pNext->data.cblk = q->data.cblk - nBlocksExtra;
|
||||||
init_heap_Free(pThis, (PVOID)(pNext + 1));
|
init_heap_Free(pThis, (PVOID)(pNext + 1)); /* also fixes cbAlloc */
|
||||||
}
|
}
|
||||||
p->data.cblk = nBlocksNew;
|
p->data.cblk = nBlocksNew;
|
||||||
|
if (pih->cbAlloc > pih->cbAllocHiWat)
|
||||||
|
pih->cbAllocHiWat = pih->cbAlloc;
|
||||||
return pv;
|
return pv;
|
||||||
}
|
}
|
||||||
if (q == pih->pblkLastAlloc)
|
if (q == pih->pblkLastAlloc)
|
||||||
|
@ -238,11 +251,13 @@ SEG_INIT_CODE static PVOID init_heap_Realloc(IMalloc *pThis, PVOID pv, SIZE_T cb
|
||||||
}
|
}
|
||||||
|
|
||||||
/* last ditch effort: allocate new block and copy old contents in */
|
/* last ditch effort: allocate new block and copy old contents in */
|
||||||
|
cbHiWatSave = pih->cbAllocHiWat;
|
||||||
pNew = init_heap_Alloc(pThis, cb);
|
pNew = init_heap_Alloc(pThis, cb);
|
||||||
if (!pNew)
|
if (!pNew)
|
||||||
return NULL; /* cannot reallocate */
|
return NULL; /* cannot reallocate */
|
||||||
StrCopyMem(pv, pNew, (p->data.cblk - 1) * sizeof(BLOCK));
|
StrCopyMem(pv, pNew, (p->data.cblk - 1) * sizeof(BLOCK));
|
||||||
init_heap_Free(pThis, pv);
|
init_heap_Free(pThis, pv);
|
||||||
|
pih->cbAllocHiWat = intMax(cbHiWatSave, pih->cbAlloc);
|
||||||
return pNew;
|
return pNew;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -303,6 +318,7 @@ SEG_INIT_CODE IMalloc *_MmGetInitHeap(void)
|
||||||
p = (PBLOCK)g_pInitHeapBlock;
|
p = (PBLOCK)g_pInitHeapBlock;
|
||||||
p->data.cblk = SIZE_INIT_HEAP / sizeof(BLOCK);
|
p->data.cblk = SIZE_INIT_HEAP / sizeof(BLOCK);
|
||||||
init_heap_Free((IMalloc *)(&initheap), (PVOID)(p + 1));
|
init_heap_Free((IMalloc *)(&initheap), (PVOID)(p + 1));
|
||||||
|
initheap.cbAlloc = initheap.cbAllocHiWat = 0; /* start from zero now */
|
||||||
}
|
}
|
||||||
return (IMalloc *)(&initheap);
|
return (IMalloc *)(&initheap);
|
||||||
}
|
}
|
||||||
|
|
326
kernel/kernel_space.c
Normal file
326
kernel/kernel_space.c
Normal file
|
@ -0,0 +1,326 @@
|
||||||
|
/*
|
||||||
|
* This file is part of the COMROGUE Operating System for Raspberry Pi
|
||||||
|
*
|
||||||
|
* Copyright (c) 2013, Eric J. Bowersox / Erbosoft Enterprises
|
||||||
|
* All rights reserved.
|
||||||
|
*
|
||||||
|
* This program is free for commercial and non-commercial use as long as the following conditions are
|
||||||
|
* adhered to.
|
||||||
|
*
|
||||||
|
* Copyright in this file remains Eric J. Bowersox and/or Erbosoft, and as such any copyright notices
|
||||||
|
* in the code are not to be removed.
|
||||||
|
*
|
||||||
|
* Redistribution and use in source and binary forms, with or without modification, are permitted
|
||||||
|
* provided that the following conditions are met:
|
||||||
|
*
|
||||||
|
* * Redistributions of source code must retain the above copyright notice, this list of conditions and
|
||||||
|
* the following disclaimer.
|
||||||
|
* * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and
|
||||||
|
* the following disclaimer in the documentation and/or other materials provided with the distribution.
|
||||||
|
*
|
||||||
|
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED
|
||||||
|
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
|
||||||
|
* PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
|
||||||
|
* ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
|
||||||
|
* TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
|
||||||
|
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
|
||||||
|
* NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
|
||||||
|
* POSSIBILITY OF SUCH DAMAGE.
|
||||||
|
*
|
||||||
|
* "Raspberry Pi" is a trademark of the Raspberry Pi Foundation.
|
||||||
|
*/
|
||||||
|
#include <comrogue/types.h>
|
||||||
|
#include <comrogue/allocator.h>
|
||||||
|
#include <comrogue/internals/memmgr.h>
|
||||||
|
#include <comrogue/internals/rbtree.h>
|
||||||
|
#include <comrogue/internals/layout.h>
|
||||||
|
#include <comrogue/internals/mmu.h>
|
||||||
|
#include <comrogue/internals/seg.h>
|
||||||
|
#include <comrogue/internals/startup.h>
|
||||||
|
#include <comrogue/internals/trace.h>
|
||||||
|
|
||||||
|
#ifdef THIS_FILE
|
||||||
|
#undef THIS_FILE
|
||||||
|
DECLARE_THIS_FILE
|
||||||
|
#endif
|
||||||
|
|
||||||
|
/*------------------------------------------
|
||||||
|
* Operations with kernel address intervals
|
||||||
|
*------------------------------------------
|
||||||
|
*/
|
||||||
|
|
||||||
|
/* Definiton of an address interval */
|
||||||
|
typedef struct tagAINTERVAL {
|
||||||
|
KERNADDR kaFirst; /* first kernel address in the interval */
|
||||||
|
KERNADDR kaLast; /* first kernel address NOT in the interval */
|
||||||
|
} AINTERVAL, *PAINTERVAL;
|
||||||
|
typedef const AINTERVAL *PCAINTERVAL;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Compares two address intervals.
|
||||||
|
*
|
||||||
|
* Parameters:
|
||||||
|
* - paiLeft = Pointer to first address interval to compare.
|
||||||
|
* - paiRight = Pointer to second address interval to compare.
|
||||||
|
*
|
||||||
|
* Returns:
|
||||||
|
* - -1 = If the interval paiLeft is entirely before the interval paiRight.
|
||||||
|
* - 0 = If the interval paiLeft is entirely contained within (or equal to) the interval paiRight.
|
||||||
|
* - 1 = If the interval paiLeft is entirely after the interval paiRight.
|
||||||
|
*
|
||||||
|
* N.B.:
|
||||||
|
* It is an error if the intervals overlap without paiLeft being entirely contained within paiRight.
|
||||||
|
* (This should not happen.)
|
||||||
|
*/
|
||||||
|
static INT32 interval_compare(PCAINTERVAL paiLeft, PCAINTERVAL paiRight)
|
||||||
|
{
|
||||||
|
static DECLARE_STRING8_CONST(szFitCheck, "interval_compare fitcheck: [%08x,%08x] <?> [%08x,%08x]");
|
||||||
|
|
||||||
|
ASSERT(paiLeft->kaFirst < paiLeft->kaLast);
|
||||||
|
ASSERT(paiRight->kaFirst < paiRight->kaLast);
|
||||||
|
if ((paiLeft->kaFirst >= paiRight->kaFirst) && (paiLeft->kaLast <= paiRight->kaLast))
|
||||||
|
return 0;
|
||||||
|
if (paiLeft->kaLast <= paiRight->kaFirst)
|
||||||
|
return -1;
|
||||||
|
if (paiLeft->kaFirst >= paiRight->kaLast)
|
||||||
|
return 1;
|
||||||
|
/* if get here, bugbugbugbugbug */
|
||||||
|
TrPrintf8(szFitCheck, paiLeft->kaFirst, paiLeft->kaLast, paiRight->kaFirst, paiRight->kaLast);
|
||||||
|
/* TODO: bugcheck */
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Determines if two intervals are adjacent, that is, if the end of the first is the start of the next.
|
||||||
|
*
|
||||||
|
* Parameters:
|
||||||
|
* - paiLeft = Pointer to first address interval to compare.
|
||||||
|
* - paiRight = Pointer to second address interval to compare.
|
||||||
|
*
|
||||||
|
* Returns:
|
||||||
|
* TRUE if paiLeft is adjacent to paiRight, FALSE otherwise.
|
||||||
|
*/
|
||||||
|
static inline BOOL intervals_adjacent(PCAINTERVAL paiLeft, PCAINTERVAL paiRight)
|
||||||
|
{
|
||||||
|
return MAKEBOOL(paiLeft->kaLast == paiRight->kaFirst);
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Returns the number of pages described by an interval.
|
||||||
|
*
|
||||||
|
* Parameters:
|
||||||
|
* - pai = The interval to test.
|
||||||
|
*
|
||||||
|
* Returns:
|
||||||
|
* The number of pages described by this interval.
|
||||||
|
*/
|
||||||
|
static inline UINT32 interval_numpages(PCAINTERVAL pai)
|
||||||
|
{
|
||||||
|
return (pai->kaLast - pai->kaFirst) >> SYS_PAGE_BITS;
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Initializes an interval's start and end points.
|
||||||
|
*
|
||||||
|
* Parameters:
|
||||||
|
* - pai = Pointer to the interval to be initialized.
|
||||||
|
* - kaFirst = First address in the interval.
|
||||||
|
* - kaLast = Last address in the interval.
|
||||||
|
*
|
||||||
|
* Returns:
|
||||||
|
* pai.
|
||||||
|
*/
|
||||||
|
static inline PAINTERVAL init_interval(PAINTERVAL pai, KERNADDR kaFirst, KERNADDR kaLast)
|
||||||
|
{
|
||||||
|
pai->kaFirst = kaFirst;
|
||||||
|
pai->kaLast = kaLast;
|
||||||
|
return pai;
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Initializes an interval to start at a specified location and cover a specific number of pages.
|
||||||
|
*
|
||||||
|
* Parameters:
|
||||||
|
* - pai = Pointer to the interval to be initialized.
|
||||||
|
* - kaBase = Base address of the interval.
|
||||||
|
* - cpg = Number of pages the interval is to contain.
|
||||||
|
*
|
||||||
|
* Returns:
|
||||||
|
* pai.
|
||||||
|
*/
|
||||||
|
static inline PAINTERVAL init_interval_pages(PAINTERVAL pai, KERNADDR kaBase, UINT32 cpg)
|
||||||
|
{
|
||||||
|
pai->kaFirst = kaBase;
|
||||||
|
pai->kaLast = kaBase + (cpg << SYS_PAGE_BITS);
|
||||||
|
return pai;
|
||||||
|
}
|
||||||
|
|
||||||
|
/*----------------------------------------
|
||||||
|
* Kernel address manipulation operations
|
||||||
|
*----------------------------------------
|
||||||
|
*/
|
||||||
|
|
||||||
|
/* Tree structure in which we store "free" address intervals. */
|
||||||
|
typedef struct tagADDRTREENODE {
|
||||||
|
RBTREENODE rbtn; /* tree node structure */
|
||||||
|
AINTERVAL ai; /* address interval this represents */
|
||||||
|
} ADDRTREENODE, *PADDRTREENODE;
|
||||||
|
|
||||||
|
/* Structure used in allocating address space. */
|
||||||
|
typedef struct tagALLOC_STRUC {
|
||||||
|
UINT32 cpgNeeded; /* count of number of pages needed */
|
||||||
|
PADDRTREENODE patnFound; /* pointer to "found" tree node */
|
||||||
|
} ALLOC_STRUC, *PALLOC_STRUC;
|
||||||
|
|
||||||
|
static RBTREE g_rbtFreeAddrs; /* free address tree */
|
||||||
|
static PMALLOC g_pMalloc = NULL; /* allocator we use */
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Inserts a kernel address range into the tree.
|
||||||
|
*
|
||||||
|
* Parameters:
|
||||||
|
* - kaFirst = First address in the range to be inserted.
|
||||||
|
* - kaLast = Last address in the range to be inserted.
|
||||||
|
*
|
||||||
|
* Returns:
|
||||||
|
* - Nothing.
|
||||||
|
*
|
||||||
|
* Side effects:
|
||||||
|
* Modifies g_rbtFreeAddrs; allocates space from the g_pMalloc heap.
|
||||||
|
*/
|
||||||
|
static void insert_into_tree(KERNADDR kaFirst, KERNADDR kaLast)
|
||||||
|
{
|
||||||
|
PADDRTREENODE pnode = IMalloc_Alloc(g_pMalloc, sizeof(ADDRTREENODE));
|
||||||
|
ASSERT(pnode);
|
||||||
|
rbtNewNode(&(pnode->rbtn), init_interval(&(pnode->ai), kaFirst, kaLast));
|
||||||
|
RbtInsert(&g_rbtFreeAddrs, (PRBTREENODE)pnode);
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Subfunction called from a tree walk to find a free address interval in the tree that can supply us with
|
||||||
|
* the number of pages we need.
|
||||||
|
*
|
||||||
|
* Parameters:
|
||||||
|
* - pUnused = Not used.
|
||||||
|
* - pnode = Current tree node we're walking over.
|
||||||
|
* - palloc = Pointer to allocation structure.
|
||||||
|
*
|
||||||
|
* Returns:
|
||||||
|
* FALSE if we found a node containing enough space (written to palloc->patnFound), TRUE otherwise.
|
||||||
|
*/
|
||||||
|
static BOOL alloc_check_space(PVOID pUnused, PADDRTREENODE pnode, PALLOC_STRUC palloc)
|
||||||
|
{
|
||||||
|
if (interval_numpages(&(pnode->ai)) >= palloc->cpgNeeded)
|
||||||
|
{
|
||||||
|
palloc->patnFound = pnode;
|
||||||
|
return FALSE;
|
||||||
|
}
|
||||||
|
return TRUE;
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Allocates a block of kernel addresses suitable to contain a certain number of pages.
|
||||||
|
*
|
||||||
|
* Parameters:
|
||||||
|
* - cpgNeeded = Number of pages of kernel address space that are needed.
|
||||||
|
*
|
||||||
|
* Returns:
|
||||||
|
* Base address of the block of address space we got.
|
||||||
|
*
|
||||||
|
* Side effects:
|
||||||
|
* May modify g_rbtFreeAddrs and free space to the g_pMalloc heap.
|
||||||
|
*
|
||||||
|
* N.B.:
|
||||||
|
* Running out of kernel address space should be a bug.
|
||||||
|
*/
|
||||||
|
KERNADDR _MmAllocKernelAddr(UINT32 cpgNeeded)
|
||||||
|
{
|
||||||
|
register KERNADDR rc; /* return from this function */
|
||||||
|
BOOL bResult; /* result of tree walk */
|
||||||
|
ALLOC_STRUC alloc_struc = { cpgNeeded, NULL }; /* allocation structure */
|
||||||
|
|
||||||
|
/* Walk the tree to find a block of free addresses that are big enough. */
|
||||||
|
bResult = RbtWalk(&g_rbtFreeAddrs, (PFNRBTWALK)alloc_check_space, &alloc_struc);
|
||||||
|
ASSERT(!bResult);
|
||||||
|
if (bResult)
|
||||||
|
{
|
||||||
|
/* TODO: bug check */
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
/* We allocate address space from the start of the interval we found. */
|
||||||
|
rc = alloc_struc.patnFound->ai.kaFirst;
|
||||||
|
if (interval_numpages(&(alloc_struc.patnFound->ai)) == cpgNeeded)
|
||||||
|
{
|
||||||
|
/* This node is all used up by this allocation. Remove it from the tree and free it. */
|
||||||
|
RbtDelete(&g_rbtFreeAddrs, (TREEKEY)(&(alloc_struc.patnFound->ai)));
|
||||||
|
IMalloc_Free(g_pMalloc, alloc_struc.patnFound);
|
||||||
|
}
|
||||||
|
else
|
||||||
|
{
|
||||||
|
/*
|
||||||
|
* Chop off the number of pages we're taking. This does not change the ordering of nodes in the tree
|
||||||
|
* because we're just shortening this one's interval.
|
||||||
|
*/
|
||||||
|
alloc_struc.patnFound->ai.kaFirst += (cpgNeeded << SYS_PAGE_BITS);
|
||||||
|
}
|
||||||
|
return rc;
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Frees a block of kernel addresses that was previously allocated.
|
||||||
|
*
|
||||||
|
* Parameters:
|
||||||
|
* - kaBase = Base address of the kernel address space region to be freed.
|
||||||
|
* - cpgToFree = Number of pages of kernel address space to be freed.
|
||||||
|
*
|
||||||
|
* Returns:
|
||||||
|
* Nothing.
|
||||||
|
*
|
||||||
|
* Side effects:
|
||||||
|
* May modify g_rbtFreeAddrs and allocate or free space in the g_pMalloc heap.
|
||||||
|
*/
|
||||||
|
void _MmFreeKernelAddr(KERNADDR kaBase, UINT32 cpgToFree)
|
||||||
|
{
|
||||||
|
register PADDRTREENODE patnPred, patnSucc; /* predecessor and successor pointers */
|
||||||
|
AINTERVAL aiFree; /* actual interval we're freeing */
|
||||||
|
|
||||||
|
init_interval_pages(&aiFree, kaBase, cpgToFree);
|
||||||
|
ASSERT(!RbtFind(&g_rbtFreeAddrs, (TREEKEY)(&aiFree)));
|
||||||
|
patnPred = (PADDRTREENODE)RbtFindPredecessor(&g_rbtFreeAddrs, (TREEKEY)(&aiFree));
|
||||||
|
patnSucc = (PADDRTREENODE)RbtFindSuccessor(&g_rbtFreeAddrs, (TREEKEY)(&aiFree));
|
||||||
|
if (patnPred && intervals_adjacent(&(patnPred->ai), &aiFree))
|
||||||
|
{
|
||||||
|
if (patnSucc && intervals_adjacent(&aiFree, &(patnSucc->ai)))
|
||||||
|
{ /* combine predecessor, interval, and successor into one big node */
|
||||||
|
RbtDelete(&g_rbtFreeAddrs, (TREEKEY)(&(patnPred->ai)));
|
||||||
|
patnPred->ai.kaLast = patnSucc->ai.kaLast;
|
||||||
|
IMalloc_Free(g_pMalloc, patnSucc);
|
||||||
|
}
|
||||||
|
else /* combine with predecessor */
|
||||||
|
patnPred->ai.kaLast = aiFree.kaLast;
|
||||||
|
}
|
||||||
|
else if (patnSucc && intervals_adjacent(&aiFree, &(patnSucc->ai)))
|
||||||
|
patnSucc->ai.kaFirst = aiFree.kaFirst; /* combine with successor */
|
||||||
|
else /* insert as a new address range */
|
||||||
|
insert_into_tree(aiFree.kaFirst, aiFree.kaLast);
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Initializes the kernel address space management code.
|
||||||
|
*
|
||||||
|
* Parameters:
|
||||||
|
* - pstartup = Pointer to startup information block.
|
||||||
|
* - pmInitHeap = Pointer to initialization heap allocator.
|
||||||
|
*
|
||||||
|
* Returns:
|
||||||
|
* Nothing.
|
||||||
|
*/
|
||||||
|
SEG_INIT_CODE void _MmInitKernelSpace(PSTARTUP_INFO pstartup, PMALLOC pmInitHeap)
|
||||||
|
{
|
||||||
|
g_pMalloc = pmInitHeap;
|
||||||
|
IUnknown_AddRef(g_pMalloc);
|
||||||
|
rbtInitTree(&g_rbtFreeAddrs, (PFNTREECOMPARE)interval_compare);
|
||||||
|
insert_into_tree(pstartup->vmaFirstFree, VMADDR_IO_BASE);
|
||||||
|
insert_into_tree(VMADDR_IO_BASE + (PAGE_COUNT_IO * SYS_PAGE_SIZE), VMADDR_KERNEL_NOMANS);
|
||||||
|
}
|
|
@ -30,6 +30,8 @@
|
||||||
* "Raspberry Pi" is a trademark of the Raspberry Pi Foundation.
|
* "Raspberry Pi" is a trademark of the Raspberry Pi Foundation.
|
||||||
*/
|
*/
|
||||||
#include <comrogue/types.h>
|
#include <comrogue/types.h>
|
||||||
|
#include <comrogue/objectbase.h>
|
||||||
|
#include <comrogue/allocator.h>
|
||||||
#include <comrogue/internals/seg.h>
|
#include <comrogue/internals/seg.h>
|
||||||
#include <comrogue/internals/memmgr.h>
|
#include <comrogue/internals/memmgr.h>
|
||||||
#include <comrogue/internals/startup.h>
|
#include <comrogue/internals/startup.h>
|
||||||
|
@ -39,11 +41,16 @@
|
||||||
*---------------------
|
*---------------------
|
||||||
*/
|
*/
|
||||||
|
|
||||||
extern void _MmInitVMMap(PSTARTUP_INFO pstartup);
|
extern IMalloc *_MmGetInitHeap(void);
|
||||||
|
extern void _MmInitKernelSpace(PSTARTUP_INFO pstartup, PMALLOC pmInitHeap);
|
||||||
|
extern void _MmInitVMMap(PSTARTUP_INFO pstartup, PMALLOC pmInitHeap);
|
||||||
extern void _MmInitPageAlloc(PSTARTUP_INFO pstartup);
|
extern void _MmInitPageAlloc(PSTARTUP_INFO pstartup);
|
||||||
|
|
||||||
SEG_INIT_CODE void _MmInit(PSTARTUP_INFO pstartup)
|
SEG_INIT_CODE void _MmInit(PSTARTUP_INFO pstartup)
|
||||||
{
|
{
|
||||||
_MmInitVMMap(pstartup);
|
IMalloc *pmInitHeap = _MmGetInitHeap();
|
||||||
|
_MmInitKernelSpace(pstartup, pmInitHeap);
|
||||||
|
_MmInitVMMap(pstartup, pmInitHeap);
|
||||||
_MmInitPageAlloc(pstartup);
|
_MmInitPageAlloc(pstartup);
|
||||||
|
IUnknown_Release(pmInitHeap);
|
||||||
}
|
}
|
||||||
|
|
122
kernel/rbtree.c
122
kernel/rbtree.c
|
@ -229,6 +229,76 @@ SEG_LIB_CODE PRBTREENODE RbtFind(PRBTREE ptree, TREEKEY key)
|
||||||
return ptn;
|
return ptn;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Given a key, returns either the node that matches the key, if the key is in the tree, or the node
|
||||||
|
* that has a key that most immediately precedes the supplied key. An O(log n) operation.
|
||||||
|
*
|
||||||
|
* Parameters:
|
||||||
|
* - ptree = Pointer to the tree head structure.
|
||||||
|
* - key = Key value to be looked up.
|
||||||
|
*
|
||||||
|
* Returns:
|
||||||
|
* Pointer to the node where the key is found, or pointer to the predecessor node, or NULL if the key
|
||||||
|
* is less than every key in the tree and hence has no predecessor.
|
||||||
|
*/
|
||||||
|
SEG_LIB_CODE PRBTREENODE RbtFindPredecessor(PRBTREE ptree, TREEKEY key)
|
||||||
|
{
|
||||||
|
register PRBTREENODE ptn = ptree->ptnRoot; /* current node */
|
||||||
|
register int cmp; /* compare result */
|
||||||
|
|
||||||
|
while (ptn)
|
||||||
|
{
|
||||||
|
cmp = (*(ptree->pfnTreeCompare))(key, ptn->treekey);
|
||||||
|
if (cmp == 0)
|
||||||
|
break; /* found */
|
||||||
|
else if (cmp > 0)
|
||||||
|
{
|
||||||
|
if (rbtNodeRight(ptn))
|
||||||
|
ptn = rbtNodeRight(ptn);
|
||||||
|
else
|
||||||
|
break; /* found predecessor */
|
||||||
|
}
|
||||||
|
else
|
||||||
|
ptn = ptn->ptnLeft;
|
||||||
|
}
|
||||||
|
return ptn;
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Given a key, returns either the node that matches the key, if the key is in the tree, or the node
|
||||||
|
* that has a key that most immediately succeeds the supplied key. An O(log n) operation.
|
||||||
|
*
|
||||||
|
* Parameters:
|
||||||
|
* - ptree = Pointer to the tree head structure.
|
||||||
|
* - key = Key value to be looked up.
|
||||||
|
*
|
||||||
|
* Returns:
|
||||||
|
* Pointer to the node where the key is found, or pointer to the successor node, or NULL if the key
|
||||||
|
* is greater than every key in the tree and hence has no successor.
|
||||||
|
*/
|
||||||
|
SEG_LIB_CODE PRBTREENODE RbtFindSuccessor(PRBTREE ptree, TREEKEY key)
|
||||||
|
{
|
||||||
|
register PRBTREENODE ptn = ptree->ptnRoot; /* current node */
|
||||||
|
register int cmp; /* compare result */
|
||||||
|
|
||||||
|
while (ptn)
|
||||||
|
{
|
||||||
|
cmp = (*(ptree->pfnTreeCompare))(key, ptn->treekey);
|
||||||
|
if (cmp == 0)
|
||||||
|
break; /* found */
|
||||||
|
else if (cmp < 0)
|
||||||
|
{
|
||||||
|
if (ptn->ptnLeft)
|
||||||
|
ptn = ptn->ptnLeft;
|
||||||
|
else
|
||||||
|
break; /* found successor */
|
||||||
|
}
|
||||||
|
else
|
||||||
|
ptn = rbtNodeRight(ptn);
|
||||||
|
}
|
||||||
|
return ptn;
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Finds the "minimum" node in the subtree (the one at the bottom end of the left spine of the subtree).
|
* Finds the "minimum" node in the subtree (the one at the bottom end of the left spine of the subtree).
|
||||||
*
|
*
|
||||||
|
@ -327,7 +397,8 @@ SEG_LIB_CODE static PRBTREENODE delete_min(PRBTREENODE ptn)
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Detetes the node in the subtree having an arbitrary key. An O(log n) operation.
|
* Deletes the node in the subtree having an arbitrary key. (Note that "deletes" means "removes from the tree."
|
||||||
|
* No memory delete operation is actually performed.) An O(log n) operation.
|
||||||
*
|
*
|
||||||
* Parameters:
|
* Parameters:
|
||||||
* - ptree = Pointer to the tree head structure, containing the compare function.
|
* - ptree = Pointer to the tree head structure, containing the compare function.
|
||||||
|
@ -386,7 +457,8 @@ SEG_LIB_CODE static PRBTREENODE delete_from_under(PRBTREE ptree, PRBTREENODE ptn
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Detetes the node in the tree having an arbitrary key. An O(log n) operation.
|
* Deletes the node in the tree having an arbitrary key. (Note that "deletes" means "removes from the tree."
|
||||||
|
* No memory delete operation is actually performed.) An O(log n) operation.
|
||||||
*
|
*
|
||||||
* Parameters:
|
* Parameters:
|
||||||
* - ptree = Pointer to the tree head structure.
|
* - ptree = Pointer to the tree head structure.
|
||||||
|
@ -401,3 +473,49 @@ SEG_LIB_CODE void RbtDelete(PRBTREE ptree, TREEKEY key)
|
||||||
if (ptree->ptnRoot)
|
if (ptree->ptnRoot)
|
||||||
rbtSetNodeColor(ptree->ptnRoot, BLACK);
|
rbtSetNodeColor(ptree->ptnRoot, BLACK);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Performs an inorder traversal of the tree rooted at the specified node. An O(n) operation.
|
||||||
|
*
|
||||||
|
* Parameters:
|
||||||
|
* - ptree = Pointer to the tree head structure.
|
||||||
|
* - ptn = Pointer to the root of the current tree node.
|
||||||
|
* - pfnWalk = Pointer to a function called for each tree node we encounter. This function returns TRUE
|
||||||
|
* to continue the traversal or FALSE to stop it.
|
||||||
|
* - pData = Arbitrary data pointer that gets passed to the pfnWalk function.
|
||||||
|
*
|
||||||
|
* Returns:
|
||||||
|
* TRUE if the tree was entirely traversed, FALSE if the tree walk was interrupted.
|
||||||
|
*
|
||||||
|
* N.B.:
|
||||||
|
* This function is recursive; however, the nature of the tree guarantees that the stack space consumed
|
||||||
|
* by its stack frames will be O(log n).
|
||||||
|
*/
|
||||||
|
SEG_LIB_CODE static BOOL do_walk(PRBTREE ptree, PRBTREENODE ptn, PFNRBTWALK pfnWalk, PVOID pData)
|
||||||
|
{
|
||||||
|
register BOOL rc = TRUE;
|
||||||
|
if (ptn->ptnLeft)
|
||||||
|
rc = do_walk(ptree, ptn->ptnLeft, pfnWalk, pData);
|
||||||
|
if (rc)
|
||||||
|
rc = (*pfnWalk)(ptree, ptn, pData);
|
||||||
|
if (rc && rbtNodeRight(ptn))
|
||||||
|
rc = do_walk(ptree, rbtNodeRight(ptn), pfnWalk, pData);
|
||||||
|
return rc;
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Performs an inorder traversal of the tree. An O(n) operation.
|
||||||
|
*
|
||||||
|
* Parameters:
|
||||||
|
* - ptree = Pointer to the tree head structure.
|
||||||
|
* - pfnWalk = Pointer to a function called for each tree node we encounter. This function returns TRUE
|
||||||
|
* to continue the traversal or FALSE to stop it.
|
||||||
|
* - pData = Arbitrary data pointer that gets passed to the pfnWalk function.
|
||||||
|
*
|
||||||
|
* Returns:
|
||||||
|
* TRUE if the tree was entirely traversed, FALSE if the tree walk was interrupted.
|
||||||
|
*/
|
||||||
|
SEG_LIB_CODE BOOL RbtWalk(PRBTREE ptree, PFNRBTWALK pfnWalk, PVOID pData)
|
||||||
|
{
|
||||||
|
return (ptree->ptnRoot ? do_walk(ptree, ptree->ptnRoot, pfnWalk, pData) : TRUE);
|
||||||
|
}
|
||||||
|
|
|
@ -68,7 +68,6 @@ COMROGUEStart:
|
||||||
/* copy startup info to our local buffer */
|
/* copy startup info to our local buffer */
|
||||||
mov r1, r0
|
mov r1, r0
|
||||||
ldr r0, =kiStartupInfo
|
ldr r0, =kiStartupInfo
|
||||||
mov r11, r0 /* save for later */
|
|
||||||
ldr r2, [r1] /* get number of bytes to copy */
|
ldr r2, [r1] /* get number of bytes to copy */
|
||||||
ldr ip, =StrCopyMem /* this is needed to branch to a library function */
|
ldr ip, =StrCopyMem /* this is needed to branch to a library function */
|
||||||
ldr lr, =.postCopy
|
ldr lr, =.postCopy
|
||||||
|
|
293
kernel/vmmap.c
293
kernel/vmmap.c
|
@ -31,18 +31,36 @@
|
||||||
*/
|
*/
|
||||||
#include <comrogue/types.h>
|
#include <comrogue/types.h>
|
||||||
#include <comrogue/scode.h>
|
#include <comrogue/scode.h>
|
||||||
|
#include <comrogue/allocator.h>
|
||||||
#include <comrogue/internals/seg.h>
|
#include <comrogue/internals/seg.h>
|
||||||
#include <comrogue/internals/mmu.h>
|
#include <comrogue/internals/mmu.h>
|
||||||
#include <comrogue/internals/memmgr.h>
|
#include <comrogue/internals/memmgr.h>
|
||||||
|
#include <comrogue/internals/rbtree.h>
|
||||||
#include <comrogue/internals/startup.h>
|
#include <comrogue/internals/startup.h>
|
||||||
|
#include <comrogue/internals/trace.h>
|
||||||
|
|
||||||
|
#ifdef THIS_FILE
|
||||||
|
#undef THIS_FILE
|
||||||
|
DECLARE_THIS_FILE
|
||||||
|
#endif
|
||||||
|
|
||||||
|
/* Tree node storing mapping of physical addresses of page table pages to their kernel addresses */
|
||||||
|
typedef struct tagPGTMAP {
|
||||||
|
RBTREENODE rbtn; /* tree node structure */
|
||||||
|
KERNADDR kaPGTPage; /* page table page kernel address */
|
||||||
|
UINT32 uiRefCount; /* reference count for mapping */
|
||||||
|
} PGTMAP, *PPGTMAP;
|
||||||
|
|
||||||
#define NMAPFRAMES 4 /* number of frame mappings */
|
#define NMAPFRAMES 4 /* number of frame mappings */
|
||||||
|
|
||||||
|
static PMALLOC g_pMalloc = NULL; /* allocator used */
|
||||||
static PTTB g_pttb1 = NULL; /* pointer to TTB1 */
|
static PTTB g_pttb1 = NULL; /* pointer to TTB1 */
|
||||||
static KERNADDR g_kaEndFence = 0; /* end fence in kernel addresses, after we reserve some */
|
static PTTBAUX g_pttb1Aux = NULL; /* pointer to TTB1 aux data */
|
||||||
static KERNADDR g_kaTableMap[NMAPFRAMES] = { 0 }; /* kernel addresses for page table mappings */
|
static RBTREE g_rbtPageTables; /* tree mapping page table PAs to KAs */
|
||||||
static PHYSADDR g_paTableMap[NMAPFRAMES] = { 0 }; /* physical addresses for page table mappings */
|
|
||||||
static UINT32 g_refTableMap[NMAPFRAMES] = { 0 }; /* reference counts for table mappings */
|
/* Forward declaration. */
|
||||||
|
static HRESULT map_pages0(PTTB pTTB, PTTBAUX pTTBAux, PHYSADDR paBase, KERNADDR vmaBase, UINT32 cpg,
|
||||||
|
UINT32 uiTableFlags, UINT32 uiPageFlags, UINT32 uiAuxFlags);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Maps a page table's page into kernel memory space where we can examine it.
|
* Maps a page table's page into kernel memory space where we can examine it.
|
||||||
|
@ -54,43 +72,41 @@ static UINT32 g_refTableMap[NMAPFRAMES] = { 0 }; /* reference counts for table
|
||||||
* Pointer to the pagetable in kernel memory, or NULL if we weren't able to map it.
|
* Pointer to the pagetable in kernel memory, or NULL if we weren't able to map it.
|
||||||
*
|
*
|
||||||
* Side effects:
|
* Side effects:
|
||||||
* May modify g_paTableMap and g_refTableMap, and may modify TTB1 if we map a page into memory.
|
* May modify g_rbtPageTables, and may modify TTB1 if we map a page into memory. May allocate
|
||||||
|
* memory from g_pMalloc.
|
||||||
*/
|
*/
|
||||||
static PPAGETAB map_pagetable(PHYSADDR paPageTable)
|
static PPAGETAB map_pagetable(PHYSADDR paPageTable)
|
||||||
{
|
{
|
||||||
PHYSADDR paOfPage = paPageTable & ~(SYS_PAGE_SIZE - 1); /* actual page table page's PA */
|
register PHYSADDR paOfPage = paPageTable & ~(SYS_PAGE_SIZE - 1); /* actual page table page's PA */
|
||||||
register UINT32 i; /* loop counter */
|
register PPGTMAP ppgtmap;
|
||||||
|
|
||||||
for (i = 0; i < NMAPFRAMES; i++)
|
ppgtmap = (PPGTMAP)RbtFind(&g_rbtPageTables, (TREEKEY)paOfPage);
|
||||||
|
if (!ppgtmap)
|
||||||
{
|
{
|
||||||
if (g_paTableMap[i] == paOfPage)
|
ppgtmap = IMalloc_Alloc(g_pMalloc, sizeof(PGTMAP));
|
||||||
|
ppgtmap->kaPGTPage = _MmAllocKernelAddr(1);
|
||||||
|
ASSERT(ppgtmap->kaPGTPage);
|
||||||
|
if (SUCCEEDED(map_pages0(g_pttb1, g_pttb1Aux, paOfPage, ppgtmap->kaPGTPage, 1, TTBFLAGS_KERNEL_DATA,
|
||||||
|
PGTBLFLAGS_KERNEL_DATA, PGAUXFLAGS_KERNEL_DATA)))
|
||||||
{
|
{
|
||||||
g_refTableMap[i]++; /* already mapped, return it */
|
ppgtmap->uiRefCount = 1;
|
||||||
goto returnOK;
|
rbtNewNode(&(ppgtmap->rbtn), paOfPage);
|
||||||
|
RbtInsert(&g_rbtPageTables, (PRBTREENODE)ppgtmap);
|
||||||
}
|
}
|
||||||
}
|
else
|
||||||
|
|
||||||
for (i = 0; i < NMAPFRAMES; i++)
|
|
||||||
{
|
{
|
||||||
if (!(g_paTableMap[i]))
|
_MmFreeKernelAddr(ppgtmap->kaPGTPage, 1);
|
||||||
{
|
IMalloc_Free(g_pMalloc, ppgtmap);
|
||||||
g_paTableMap[i] = paOfPage; /* claim slot and map into it */
|
|
||||||
g_refTableMap[i] = 1;
|
|
||||||
if (FAILED(MmMapPages(g_pttb1, g_paTableMap[i], g_kaTableMap[i], 1, TTBFLAGS_KERNEL_DATA,
|
|
||||||
PGTBLFLAGS_KERNEL_DATA)))
|
|
||||||
{
|
|
||||||
g_refTableMap[i] = 0;
|
|
||||||
g_paTableMap[i] = 0;
|
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
break;
|
|
||||||
}
|
}
|
||||||
|
else
|
||||||
|
ppgtmap->uiRefCount++;
|
||||||
|
return (PPAGETAB)(ppgtmap->kaPGTPage | (paPageTable & (SYS_PAGE_SIZE - 1)));
|
||||||
}
|
}
|
||||||
if (i == NMAPFRAMES)
|
|
||||||
return NULL;
|
/* Forward declaration. */
|
||||||
returnOK:
|
static HRESULT demap_pages0(PTTB pTTB, PTTBAUX pTTBAux, KERNADDR vmaBase, UINT32 cpg, UINT32 uiFlags);
|
||||||
return (PPAGETAB)(g_kaTableMap[i] | (paPageTable & (SYS_PAGE_SIZE - 1)));
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Demaps a page table's page from kernel memory space.
|
* Demaps a page table's page from kernel memory space.
|
||||||
|
@ -102,23 +118,24 @@ returnOK:
|
||||||
* Nothing.
|
* Nothing.
|
||||||
*
|
*
|
||||||
* Side effects:
|
* Side effects:
|
||||||
* May modify g_paTableMap and g_refTableMap, and may modify TTB1 if we unmap a page from memory.
|
* May modify g_rbtPageTables, and may modify TTB1 if we unmap a page from memory. May free
|
||||||
|
* memory in g_pMalloc.
|
||||||
*/
|
*/
|
||||||
static void demap_pagetable(PPAGETAB ppgtbl)
|
static void demap_pagetable(PPAGETAB ppgtbl)
|
||||||
{
|
{
|
||||||
KERNADDR kaOfPage = ((KERNADDR)ppgtbl) & ~(SYS_PAGE_SIZE - 1);
|
register PHYSADDR paOfPage;
|
||||||
register UINT32 i; /* loop counter */
|
register PPGTMAP ppgtmap;
|
||||||
|
|
||||||
for (i = 0; i < NMAPFRAMES; i++)
|
paOfPage = MmGetPhysAddr(g_pttb1, ((KERNADDR)ppgtbl) & ~(SYS_PAGE_SIZE - 1));
|
||||||
|
ppgtmap = (PPGTMAP)RbtFind(&g_rbtPageTables, (TREEKEY)paOfPage);
|
||||||
|
if (ppgtmap)
|
||||||
{
|
{
|
||||||
if (g_kaTableMap[i] == kaOfPage)
|
if (--(ppgtmap->uiRefCount) == 0)
|
||||||
{
|
{
|
||||||
if (--g_refTableMap[i] == 0)
|
RbtDelete(&g_rbtPageTables, (TREEKEY)paOfPage);
|
||||||
{
|
demap_pages0(g_pttb1, g_pttb1Aux, ppgtmap->kaPGTPage, 1, 0);
|
||||||
MmDemapPages(g_pttb1, g_kaTableMap[i], 1);
|
_MmFreeKernelAddr(ppgtmap->kaPGTPage, 1);
|
||||||
g_paTableMap[i] = 0;
|
IMalloc_Free(g_pMalloc, ppgtmap);
|
||||||
}
|
|
||||||
break;
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -134,13 +151,31 @@ static void demap_pagetable(PPAGETAB ppgtbl)
|
||||||
* Returns:
|
* Returns:
|
||||||
* The pointer to the selected TTB, which may be the global variable g_pttb1.
|
* The pointer to the selected TTB, which may be the global variable g_pttb1.
|
||||||
*/
|
*/
|
||||||
static PTTB resolve_ttb(PTTB pTTB, KERNADDR vma)
|
static inline PTTB resolve_ttb(PTTB pTTB, KERNADDR vma)
|
||||||
{
|
{
|
||||||
if (!pTTB || (vma & 0x80000000))
|
if (!pTTB || (vma & 0x80000000))
|
||||||
return g_pttb1; /* if no TTB specified or address is out of range for TTB0, use TTB1 */
|
return g_pttb1; /* if no TTB specified or address is out of range for TTB0, use TTB1 */
|
||||||
return pTTB;
|
return pTTB;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Resolves a specified TTB auxiliary table to either itself or the global TTB1Aux, depending on whether one
|
||||||
|
* was specified and on the virtual address to be worked with.
|
||||||
|
*
|
||||||
|
* Parameters:
|
||||||
|
* - pTTBAux = The specified TTB aux table pointer.
|
||||||
|
* - vma = The base virtual address we're working with.
|
||||||
|
*
|
||||||
|
* Returns:
|
||||||
|
* The pointer to the selected TTB aux table, which may be the global variable g_pttb1Aux.
|
||||||
|
*/
|
||||||
|
static inline PTTBAUX resolve_ttbaux(PTTBAUX pTTBAux, KERNADDR vma)
|
||||||
|
{
|
||||||
|
if (!pTTBAux || (vma & 0x80000000))
|
||||||
|
return g_pttb1Aux;
|
||||||
|
return pTTBAux;
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Returns the physical address corresponding to a virtual memory address.
|
* Returns the physical address corresponding to a virtual memory address.
|
||||||
*
|
*
|
||||||
|
@ -171,24 +206,29 @@ PHYSADDR MmGetPhysAddr(PTTB pTTB, KERNADDR vma)
|
||||||
return rc;
|
return rc;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/* Flags for demapping. */
|
||||||
|
#define DEMAP_NOTHING_SACRED 0x00000001 /* disregard "sacred" flag */
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Deallocates page mapping entries within a single current entry in the TTB.
|
* Deallocates page mapping entries within a single current entry in the TTB.
|
||||||
*
|
*
|
||||||
* Parameters:
|
* Parameters:
|
||||||
* - pTTBEntry = Pointer to the TTB entry to deallocate in.
|
* - pTTBEntry = Pointer to the TTB entry to deallocate in.
|
||||||
|
* - pTTBAuxEntry = Pointer to the TTB aux table entry to deallocate in.
|
||||||
* - ndxPage = Starting index in the page table of the first entry to deallocate.
|
* - ndxPage = Starting index in the page table of the first entry to deallocate.
|
||||||
* - cpg = Count of the number of pages to deallocate. Note that this function will not deallocate more
|
* - cpg = Count of the number of pages to deallocate. Note that this function will not deallocate more
|
||||||
* page mapping entries than remain on the page, as indicated by ndxPage.
|
* page mapping entries than remain on the page, as indicated by ndxPage.
|
||||||
|
* - uiFlags = Flags for operation.
|
||||||
*
|
*
|
||||||
* Returns:
|
* Returns:
|
||||||
* Standard HRESULT success/failure. If the result is successful, the SCODE_CODE of the result will
|
* Standard HRESULT success/failure. If the result is successful, the SCODE_CODE of the result will
|
||||||
* indicate the number of pages actually deallocated.
|
* indicate the number of pages actually deallocated.
|
||||||
*
|
*
|
||||||
* Side effects:
|
* Side effects:
|
||||||
* May modify the TTB entry pointed to, and the page table it points to, where applicable. If the
|
* May modify the TTB entry/aux entry pointed to, and the page table it points to, where applicable. If the
|
||||||
* page table is empty after we finish demapping entries, it may be deallocated.
|
* page table is empty after we finish demapping entries, it may be deallocated.
|
||||||
*/
|
*/
|
||||||
static HRESULT demap_pages1(PTTB pTTBEntry, UINT32 ndxPage, UINT32 cpg)
|
static HRESULT demap_pages1(PTTB pTTBEntry, PTTBAUX pTTBAuxEntry, UINT32 ndxPage, UINT32 cpg, UINT32 uiFlags)
|
||||||
{
|
{
|
||||||
UINT32 cpgCurrent; /* number of pages we're mapping */
|
UINT32 cpgCurrent; /* number of pages we're mapping */
|
||||||
PPAGETAB pTab = NULL; /* pointer to current or new page table */
|
PPAGETAB pTab = NULL; /* pointer to current or new page table */
|
||||||
|
@ -203,7 +243,10 @@ static HRESULT demap_pages1(PTTB pTTBEntry, UINT32 ndxPage, UINT32 cpg)
|
||||||
|
|
||||||
if ((pTTBEntry->data & TTBSEC_ALWAYS) && (cpgCurrent == SYS_PGTBL_ENTRIES) && (ndxPage == 0))
|
if ((pTTBEntry->data & TTBSEC_ALWAYS) && (cpgCurrent == SYS_PGTBL_ENTRIES) && (ndxPage == 0))
|
||||||
{ /* we can kill off the whole section */
|
{ /* we can kill off the whole section */
|
||||||
|
if (pTTBAuxEntry->aux.sacred && !(uiFlags & DEMAP_NOTHING_SACRED))
|
||||||
|
return MEMMGR_E_NOSACRED; /* can't demap a sacred mapping */
|
||||||
pTTBEntry->data = 0;
|
pTTBEntry->data = 0;
|
||||||
|
pTTBAuxEntry->data = 0;
|
||||||
/* TODO: handle TLB and cache */
|
/* TODO: handle TLB and cache */
|
||||||
}
|
}
|
||||||
else if (pTTBEntry->data & TTBPGTBL_ALWAYS)
|
else if (pTTBEntry->data & TTBPGTBL_ALWAYS)
|
||||||
|
@ -212,21 +255,29 @@ static HRESULT demap_pages1(PTTB pTTBEntry, UINT32 ndxPage, UINT32 cpg)
|
||||||
if (!pTab)
|
if (!pTab)
|
||||||
return MEMMGR_E_NOPGTBL;
|
return MEMMGR_E_NOPGTBL;
|
||||||
for (i = 0; i<cpgCurrent; i++)
|
for (i = 0; i<cpgCurrent; i++)
|
||||||
|
{
|
||||||
|
if (pTab->pgaux[ndxPage + i].aux.sacred && !(uiFlags & DEMAP_NOTHING_SACRED))
|
||||||
|
{ /* can't demap a sacred mapping */
|
||||||
|
hr = MEMMGR_E_NOSACRED;
|
||||||
|
goto pageError;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
for (i = 0; i<cpgCurrent; i++)
|
||||||
{
|
{
|
||||||
pTab->pgtbl[ndxPage + i].data = 0;
|
pTab->pgtbl[ndxPage + i].data = 0;
|
||||||
pTab->pgaux[ndxPage + i].data = 0;
|
pTab->pgaux[ndxPage + i].data = 0;
|
||||||
/* TODO: handle TLB and cache */
|
/* TODO: handle TLB and cache */
|
||||||
}
|
}
|
||||||
/* TODO: check to see if page table can be deallocated */
|
/* TODO: check to see if page table can be deallocated */
|
||||||
|
pageError:
|
||||||
demap_pagetable(pTab);
|
demap_pagetable(pTab);
|
||||||
}
|
}
|
||||||
return hr;
|
return hr;
|
||||||
}
|
}
|
||||||
|
|
||||||
HRESULT MmDemapPages(PTTB pTTB, KERNADDR vmaBase, UINT32 cpg)
|
static HRESULT demap_pages0(PTTB pTTB, PTTBAUX pTTBAux, KERNADDR vmaBase, UINT32 cpg, UINT32 uiFlags)
|
||||||
{
|
{
|
||||||
PTTB pMyTTB = resolve_ttb(pTTB, vmaBase); /* the TTB we use */
|
UINT32 ndxTTBMax = (pTTB == g_pttb1) ? SYS_TTB1_ENTRIES : SYS_TTB0_ENTRIES;
|
||||||
UINT32 ndxTTBMax = (pMyTTB == g_pttb1) ? SYS_TTB1_ENTRIES : SYS_TTB0_ENTRIES;
|
|
||||||
UINT32 ndxTTB = mmVMA2TTBIndex(vmaBase); /* TTB entry index */
|
UINT32 ndxTTB = mmVMA2TTBIndex(vmaBase); /* TTB entry index */
|
||||||
UINT32 ndxPage = mmVMA2PGTBLIndex(vmaBase); /* starting page entry index */
|
UINT32 ndxPage = mmVMA2PGTBLIndex(vmaBase); /* starting page entry index */
|
||||||
UINT32 cpgRemaining = cpg; /* number of pages remaining to demap */
|
UINT32 cpgRemaining = cpg; /* number of pages remaining to demap */
|
||||||
|
@ -235,7 +286,7 @@ HRESULT MmDemapPages(PTTB pTTB, KERNADDR vmaBase, UINT32 cpg)
|
||||||
if ((cpgRemaining > 0) && (ndxPage > 0))
|
if ((cpgRemaining > 0) && (ndxPage > 0))
|
||||||
{
|
{
|
||||||
/* We are starting in the middle of a VM page. Demap to the end of the VM page. */
|
/* We are starting in the middle of a VM page. Demap to the end of the VM page. */
|
||||||
hr = demap_pages1(pMyTTB + ndxTTB, ndxPage, cpgRemaining);
|
hr = demap_pages1(pTTB + ndxTTB, pTTBAux + ndxTTB, ndxPage, cpgRemaining, uiFlags);
|
||||||
if (FAILED(hr))
|
if (FAILED(hr))
|
||||||
return hr;
|
return hr;
|
||||||
cpgRemaining -= SCODE_CODE(hr);
|
cpgRemaining -= SCODE_CODE(hr);
|
||||||
|
@ -245,7 +296,7 @@ HRESULT MmDemapPages(PTTB pTTB, KERNADDR vmaBase, UINT32 cpg)
|
||||||
|
|
||||||
while (cpgRemaining > 0)
|
while (cpgRemaining > 0)
|
||||||
{
|
{
|
||||||
hr = demap_pages1(pMyTTB + ndxTTB, 0, cpgRemaining);
|
hr = demap_pages1(pTTB + ndxTTB, pTTBAux + ndxTTB, 0, cpgRemaining, uiFlags);
|
||||||
if (FAILED(hr))
|
if (FAILED(hr))
|
||||||
return hr;
|
return hr;
|
||||||
cpgRemaining -= SCODE_CODE(hr);
|
cpgRemaining -= SCODE_CODE(hr);
|
||||||
|
@ -255,6 +306,11 @@ HRESULT MmDemapPages(PTTB pTTB, KERNADDR vmaBase, UINT32 cpg)
|
||||||
return S_OK;
|
return S_OK;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
HRESULT MmDemapPages(PTTB pTTB, PTTBAUX pTTBAux, KERNADDR vmaBase, UINT32 cpg)
|
||||||
|
{
|
||||||
|
return demap_pages0(resolve_ttb(pTTB, vmaBase), resolve_ttbaux(pTTBAux, vmaBase), vmaBase, cpg, 0);
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Morphs the "flags" bits used for a page table entry in the TTB and for a page entry in the page table
|
* Morphs the "flags" bits used for a page table entry in the TTB and for a page entry in the page table
|
||||||
* into the "flags" bits used for a section entry in the TTB.
|
* into the "flags" bits used for a section entry in the TTB.
|
||||||
|
@ -285,8 +341,44 @@ static UINT32 make_section_flags(UINT32 uiTableFlags, UINT32 uiPageFlags)
|
||||||
return rc;
|
return rc;
|
||||||
}
|
}
|
||||||
|
|
||||||
static HRESULT map_pages1(PTTB pttbEntry, PHYSADDR paBase, UINT32 ndxPage, UINT32 cpg, UINT32 uiTableFlags,
|
/*
|
||||||
UINT32 uiPageFlags)
|
* Morphs the "auxiliary flags" bits used for a page table entry into "auxiliary flags" used for a TTB entry.
|
||||||
|
*
|
||||||
|
* Parameters:
|
||||||
|
* - uiPageAuxFlags = Page auxiliary flag bits that would be used for a page table entry.
|
||||||
|
*
|
||||||
|
* Returns:
|
||||||
|
* TTB auxiliary flag bits that would be used for a TTB entry.
|
||||||
|
*/
|
||||||
|
static UINT32 make_section_aux_flags(UINT32 uiPageAuxFlags)
|
||||||
|
{
|
||||||
|
register UINT32 rc = uiPageAuxFlags & (PGAUX_SACRED);
|
||||||
|
/* TODO if we define any other flags */
|
||||||
|
return rc;
|
||||||
|
}
|
||||||
|
|
||||||
|
static PPAGETAB alloc_page_table(PTTB pttbEntry, PTTBAUX pttbAuxEntry, UINT32 uiTableFlags)
|
||||||
|
{
|
||||||
|
PPAGETAB pTab = NULL; /* new page table pointer */
|
||||||
|
register INT32 i; /* loop counter */
|
||||||
|
|
||||||
|
/* TODO: pull pTab out of our ass somewhere */
|
||||||
|
if (pTab)
|
||||||
|
{
|
||||||
|
for (i=0; i<SYS_PGTBL_ENTRIES; i++)
|
||||||
|
{
|
||||||
|
pTab->pgtbl[i].data = 0; /* blank out the new page table */
|
||||||
|
pTab->pgaux[i].data = 0;
|
||||||
|
}
|
||||||
|
/* TODO: use physical address of page here */
|
||||||
|
pttbEntry->data = MmGetPhysAddr(g_pttb1, (KERNADDR)pTab) | uiTableFlags; /* poke new entry */
|
||||||
|
pttbAuxEntry->data = TTBAUXFLAGS_PAGETABLE;
|
||||||
|
}
|
||||||
|
return pTab;
|
||||||
|
}
|
||||||
|
|
||||||
|
static HRESULT map_pages1(PTTB pttbEntry, PTTBAUX pttbAuxEntry, PHYSADDR paBase, UINT32 ndxPage, UINT32 cpg,
|
||||||
|
UINT32 uiTableFlags, UINT32 uiPageFlags, UINT32 uiAuxFlags)
|
||||||
{
|
{
|
||||||
UINT32 cpgCurrent; /* number of pages we're mapping */
|
UINT32 cpgCurrent; /* number of pages we're mapping */
|
||||||
PPAGETAB pTab = NULL; /* pointer to current or new page table */
|
PPAGETAB pTab = NULL; /* pointer to current or new page table */
|
||||||
|
@ -296,16 +388,9 @@ static HRESULT map_pages1(PTTB pttbEntry, PHYSADDR paBase, UINT32 ndxPage, UINT3
|
||||||
switch (pttbEntry->data & TTBQUERY_MASK)
|
switch (pttbEntry->data & TTBQUERY_MASK)
|
||||||
{
|
{
|
||||||
case TTBQUERY_FAULT: /* not allocated, allocate a new page table for the slot */
|
case TTBQUERY_FAULT: /* not allocated, allocate a new page table for the slot */
|
||||||
/* TODO: allocate something into pTab */
|
pTab = alloc_page_table(pttbEntry, pttbAuxEntry, uiTableFlags);
|
||||||
if (!pTab)
|
if (!pTab)
|
||||||
return MEMMGR_E_NOPGTBL;
|
return MEMMGR_E_NOPGTBL;
|
||||||
for (i=0; i<SYS_PGTBL_ENTRIES; i++)
|
|
||||||
{
|
|
||||||
pTab->pgtbl[i].data = 0; /* blank out the new page table */
|
|
||||||
pTab->pgaux[i].data = 0;
|
|
||||||
}
|
|
||||||
/* TODO: use physical address of page here */
|
|
||||||
pttbEntry->data = MmGetPhysAddr(g_pttb1, (KERNADDR)pTab) | uiTableFlags; /* poke new entry */
|
|
||||||
break;
|
break;
|
||||||
|
|
||||||
case TTBQUERY_PGTBL: /* existing page table */
|
case TTBQUERY_PGTBL: /* existing page table */
|
||||||
|
@ -321,8 +406,11 @@ static HRESULT map_pages1(PTTB pttbEntry, PHYSADDR paBase, UINT32 ndxPage, UINT3
|
||||||
/* this is a section, make sure its base address covers this mapping and its flags are compatible */
|
/* this is a section, make sure its base address covers this mapping and its flags are compatible */
|
||||||
if ((pttbEntry->data & TTBSEC_ALLFLAGS) != make_section_flags(uiTableFlags, uiPageFlags))
|
if ((pttbEntry->data & TTBSEC_ALLFLAGS) != make_section_flags(uiTableFlags, uiPageFlags))
|
||||||
return MEMMGR_E_BADTTBFLG;
|
return MEMMGR_E_BADTTBFLG;
|
||||||
|
if (pttbAuxEntry->data != make_section_aux_flags(uiAuxFlags))
|
||||||
|
return MEMMGR_E_BADTTBFLG;
|
||||||
if ((pttbEntry->data & TTBSEC_BASE) != (paBase & TTBSEC_BASE))
|
if ((pttbEntry->data & TTBSEC_BASE) != (paBase & TTBSEC_BASE))
|
||||||
return MEMMGR_E_COLLIDED;
|
return MEMMGR_E_COLLIDED;
|
||||||
|
pTab = NULL;
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -332,9 +420,8 @@ static HRESULT map_pages1(PTTB pttbEntry, PHYSADDR paBase, UINT32 ndxPage, UINT3
|
||||||
cpgCurrent = cpg; /* only map up to max requested */
|
cpgCurrent = cpg; /* only map up to max requested */
|
||||||
hr = MAKE_SCODE(SEVERITY_SUCCESS, FACILITY_MEMMGR, cpgCurrent);
|
hr = MAKE_SCODE(SEVERITY_SUCCESS, FACILITY_MEMMGR, cpgCurrent);
|
||||||
|
|
||||||
if (!(pttbEntry->data & TTBSEC_ALWAYS))
|
if (pTab)
|
||||||
{
|
{ /* fill in entries in the page table */
|
||||||
/* fill in entries in the page table */
|
|
||||||
for (i=0; i < cpgCurrent; i++)
|
for (i=0; i < cpgCurrent; i++)
|
||||||
{
|
{
|
||||||
if ((pTab->pgtbl[ndxPage + i].data & PGQUERY_MASK) != PGQUERY_FAULT)
|
if ((pTab->pgtbl[ndxPage + i].data & PGQUERY_MASK) != PGQUERY_FAULT)
|
||||||
|
@ -348,7 +435,7 @@ static HRESULT map_pages1(PTTB pttbEntry, PHYSADDR paBase, UINT32 ndxPage, UINT3
|
||||||
goto exit;
|
goto exit;
|
||||||
}
|
}
|
||||||
pTab->pgtbl[ndxPage + i].data = paBase | uiPageFlags;
|
pTab->pgtbl[ndxPage + i].data = paBase | uiPageFlags;
|
||||||
pTab->pgaux[ndxPage + i].data = 0; /* TODO */
|
pTab->pgaux[ndxPage + i].data = uiAuxFlags;
|
||||||
paBase += SYS_PAGE_SIZE;
|
paBase += SYS_PAGE_SIZE;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -357,11 +444,10 @@ exit:
|
||||||
return hr;
|
return hr;
|
||||||
}
|
}
|
||||||
|
|
||||||
HRESULT MmMapPages(PTTB pTTB, PHYSADDR paBase, KERNADDR vmaBase, UINT32 cpg, UINT32 uiTableFlags,
|
static HRESULT map_pages0(PTTB pTTB, PTTBAUX pTTBAux, PHYSADDR paBase, KERNADDR vmaBase, UINT32 cpg,
|
||||||
UINT32 uiPageFlags)
|
UINT32 uiTableFlags, UINT32 uiPageFlags, UINT32 uiAuxFlags)
|
||||||
{
|
{
|
||||||
PTTB pMyTTB = resolve_ttb(pTTB, vmaBase); /* the TTB we use */
|
UINT32 ndxTTBMax = (pTTB == g_pttb1) ? SYS_TTB1_ENTRIES : SYS_TTB0_ENTRIES;
|
||||||
UINT32 ndxTTBMax = (pMyTTB == g_pttb1) ? SYS_TTB1_ENTRIES : SYS_TTB0_ENTRIES;
|
|
||||||
UINT32 ndxTTB = mmVMA2TTBIndex(vmaBase); /* TTB entry index */
|
UINT32 ndxTTB = mmVMA2TTBIndex(vmaBase); /* TTB entry index */
|
||||||
UINT32 ndxPage = mmVMA2PGTBLIndex(vmaBase); /* starting page entry index */
|
UINT32 ndxPage = mmVMA2PGTBLIndex(vmaBase); /* starting page entry index */
|
||||||
UINT32 cpgRemaining = cpg; /* number of pages remaining to map */
|
UINT32 cpgRemaining = cpg; /* number of pages remaining to map */
|
||||||
|
@ -370,7 +456,8 @@ HRESULT MmMapPages(PTTB pTTB, PHYSADDR paBase, KERNADDR vmaBase, UINT32 cpg, UIN
|
||||||
if ((cpgRemaining > 0) && (ndxPage > 0))
|
if ((cpgRemaining > 0) && (ndxPage > 0))
|
||||||
{
|
{
|
||||||
/* We are starting in the middle of a VM page. Map to the end of the VM page. */
|
/* We are starting in the middle of a VM page. Map to the end of the VM page. */
|
||||||
hr = map_pages1(pMyTTB + ndxTTB, paBase, ndxPage, cpgRemaining, uiTableFlags, uiPageFlags);
|
hr = map_pages1(pTTB + ndxTTB, pTTBAux + ndxTTB, paBase, ndxPage, cpgRemaining, uiTableFlags,
|
||||||
|
uiPageFlags, uiAuxFlags);
|
||||||
if (FAILED(hr))
|
if (FAILED(hr))
|
||||||
return hr;
|
return hr;
|
||||||
cpgRemaining -= SCODE_CODE(hr);
|
cpgRemaining -= SCODE_CODE(hr);
|
||||||
|
@ -388,10 +475,11 @@ HRESULT MmMapPages(PTTB pTTB, PHYSADDR paBase, KERNADDR vmaBase, UINT32 cpg, UIN
|
||||||
if ((paBase & TTBSEC_BASE) == paBase)
|
if ((paBase & TTBSEC_BASE) == paBase)
|
||||||
{
|
{
|
||||||
/* paBase is section-aligned now as well, we can use a direct 1Mb section mapping */
|
/* paBase is section-aligned now as well, we can use a direct 1Mb section mapping */
|
||||||
switch (pMyTTB[ndxTTB].data & TTBQUERY_MASK)
|
switch (pTTB[ndxTTB].data & TTBQUERY_MASK)
|
||||||
{
|
{
|
||||||
case TTBQUERY_FAULT: /* unmapped - map the section */
|
case TTBQUERY_FAULT: /* unmapped - map the section */
|
||||||
pMyTTB[ndxTTB].data = paBase | make_section_flags(uiTableFlags, uiPageFlags);
|
pTTB[ndxTTB].data = paBase | make_section_flags(uiTableFlags, uiPageFlags);
|
||||||
|
pTTBAux[ndxTTB].data = make_section_aux_flags(uiAuxFlags);
|
||||||
break;
|
break;
|
||||||
|
|
||||||
case TTBQUERY_PGTBL: /* collided with a page table */
|
case TTBQUERY_PGTBL: /* collided with a page table */
|
||||||
|
@ -400,12 +488,17 @@ HRESULT MmMapPages(PTTB pTTB, PHYSADDR paBase, KERNADDR vmaBase, UINT32 cpg, UIN
|
||||||
|
|
||||||
case TTBQUERY_SEC: /* test existing section */
|
case TTBQUERY_SEC: /* test existing section */
|
||||||
case TTBQUERY_PXNSEC:
|
case TTBQUERY_PXNSEC:
|
||||||
if ((pMyTTB[ndxTTB].data & TTBSEC_ALLFLAGS) != make_section_flags(uiTableFlags, uiPageFlags))
|
if ((pTTB[ndxTTB].data & TTBSEC_ALLFLAGS) != make_section_flags(uiTableFlags, uiPageFlags))
|
||||||
{
|
{
|
||||||
hr = MEMMGR_E_BADTTBFLG;
|
hr = MEMMGR_E_BADTTBFLG;
|
||||||
goto errorExit;
|
goto errorExit;
|
||||||
}
|
}
|
||||||
if ((pMyTTB[ndxTTB].data & TTBSEC_BASE) != paBase)
|
if (pTTBAux[ndxTTB].data != make_section_aux_flags(uiAuxFlags))
|
||||||
|
{
|
||||||
|
hr = MEMMGR_E_BADTTBFLG;
|
||||||
|
goto errorExit;
|
||||||
|
}
|
||||||
|
if ((pTTB[ndxTTB].data & TTBSEC_BASE) != paBase)
|
||||||
{
|
{
|
||||||
hr = MEMMGR_E_COLLIDED;
|
hr = MEMMGR_E_COLLIDED;
|
||||||
goto errorExit;
|
goto errorExit;
|
||||||
|
@ -418,7 +511,7 @@ HRESULT MmMapPages(PTTB pTTB, PHYSADDR paBase, KERNADDR vmaBase, UINT32 cpg, UIN
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
/* just map 256 individual pages */
|
/* just map 256 individual pages */
|
||||||
hr = map_pages1(pMyTTB + ndxTTB, paBase, 0, cpgRemaining, uiTableFlags, uiPageFlags);
|
hr = map_pages1(pTTB + ndxTTB, pTTBAux + ndxTTB, paBase, 0, cpgRemaining, uiTableFlags, uiPageFlags, uiAuxFlags);
|
||||||
if (FAILED(hr))
|
if (FAILED(hr))
|
||||||
goto errorExit;
|
goto errorExit;
|
||||||
}
|
}
|
||||||
|
@ -435,14 +528,52 @@ HRESULT MmMapPages(PTTB pTTB, PHYSADDR paBase, KERNADDR vmaBase, UINT32 cpg, UIN
|
||||||
if (cpgRemaining > 0)
|
if (cpgRemaining > 0)
|
||||||
{
|
{
|
||||||
/* map the "tail end" onto the next TTB */
|
/* map the "tail end" onto the next TTB */
|
||||||
hr = map_pages1(pMyTTB + ndxTTB, paBase, 0, cpgRemaining, uiTableFlags, uiPageFlags);
|
hr = map_pages1(pTTB + ndxTTB, pTTBAux + ndxTTB, paBase, 0, cpgRemaining, uiTableFlags, uiPageFlags, uiAuxFlags);
|
||||||
if (FAILED(hr))
|
if (FAILED(hr))
|
||||||
goto errorExit;
|
goto errorExit;
|
||||||
}
|
}
|
||||||
return S_OK;
|
return S_OK;
|
||||||
errorExit:
|
errorExit:
|
||||||
/* demap everything we've managed to map thusfar */
|
/* demap everything we've managed to map thusfar */
|
||||||
MmDemapPages(pMyTTB, vmaBase, cpg - cpgRemaining);
|
demap_pages0(pTTB, pTTBAux, vmaBase, cpg - cpgRemaining, DEMAP_NOTHING_SACRED);
|
||||||
|
return hr;
|
||||||
|
}
|
||||||
|
|
||||||
|
HRESULT MmMapPages(PTTB pTTB, PTTBAUX pTTBAux, PHYSADDR paBase, KERNADDR vmaBase, UINT32 cpg, UINT32 uiTableFlags,
|
||||||
|
UINT32 uiPageFlags, UINT32 uiAuxFlags)
|
||||||
|
{
|
||||||
|
return map_pages0(resolve_ttb(pTTB, vmaBase), resolve_ttbaux(pTTBAux, vmaBase), paBase, vmaBase, cpg,
|
||||||
|
uiTableFlags, uiPageFlags, uiAuxFlags);
|
||||||
|
}
|
||||||
|
|
||||||
|
HRESULT MmMapKernelPages(PTTB pTTB, PTTBAUX pTTBAux, PHYSADDR paBase, UINT32 cpg, UINT32 uiTableFlags,
|
||||||
|
UINT32 uiPageFlags, UINT32 uiAuxFlags, PKERNADDR pvmaLocation)
|
||||||
|
{
|
||||||
|
register HRESULT hr;
|
||||||
|
|
||||||
|
if (!pvmaLocation)
|
||||||
|
return E_POINTER;
|
||||||
|
*pvmaLocation = _MmAllocKernelAddr(cpg);
|
||||||
|
if (!(*pvmaLocation))
|
||||||
|
return MEMMGR_E_NOKERNSPC;
|
||||||
|
hr = MmMapPages(pTTB, pTTBAux, paBase, *pvmaLocation, cpg, uiTableFlags, uiPageFlags, uiAuxFlags);
|
||||||
|
if (FAILED(hr))
|
||||||
|
{
|
||||||
|
_MmFreeKernelAddr(*pvmaLocation, cpg);
|
||||||
|
*pvmaLocation = NULL;
|
||||||
|
}
|
||||||
|
return hr;
|
||||||
|
}
|
||||||
|
|
||||||
|
HRESULT MmDemapKernelPages(PTTB pTTB, PTTBAUX pTTBAux, KERNADDR vmaBase, UINT32 cpg)
|
||||||
|
{
|
||||||
|
register HRESULT hr;
|
||||||
|
|
||||||
|
if ((vmaBase & 0xC0000000) != 0xC0000000)
|
||||||
|
return E_INVALIDARG;
|
||||||
|
hr = MmDemapPages(pTTB, pTTBAux, vmaBase, cpg);
|
||||||
|
if (SUCCEEDED(hr))
|
||||||
|
_MmFreeKernelAddr(vmaBase, cpg);
|
||||||
return hr;
|
return hr;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -451,15 +582,11 @@ errorExit:
|
||||||
*---------------------
|
*---------------------
|
||||||
*/
|
*/
|
||||||
|
|
||||||
SEG_INIT_CODE void _MmInitVMMap(PSTARTUP_INFO pstartup)
|
SEG_INIT_CODE void _MmInitVMMap(PSTARTUP_INFO pstartup, PMALLOC pmInitHeap)
|
||||||
{
|
{
|
||||||
UINT32 i; /* loop counter */
|
g_pMalloc = pmInitHeap;
|
||||||
|
IUnknown_AddRef(g_pMalloc);
|
||||||
g_pttb1 = (PTTB)(pstartup->kaTTB);
|
g_pttb1 = (PTTB)(pstartup->kaTTB);
|
||||||
g_kaEndFence = pstartup->vmaFirstFree;
|
g_pttb1Aux = (PTTBAUX)(pstartup->kaTTBAux);
|
||||||
for (i=0; i<NMAPFRAMES; i++)
|
rbtInitTree(&g_rbtPageTables, RbtStdCompareByValue);
|
||||||
{
|
|
||||||
g_kaTableMap[i] = g_kaEndFence;
|
|
||||||
g_kaEndFence += SYS_PAGE_SIZE;
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
|
@ -232,10 +232,10 @@ sub ParseInterface($)
|
||||||
my $args = GetArgumentList($d);
|
my $args = GetArgumentList($d);
|
||||||
if (defined($args)) {
|
if (defined($args)) {
|
||||||
$res .= "#define $if->{NAME}_$d->{NAME}(pInterface, $args) \\\n";
|
$res .= "#define $if->{NAME}_$d->{NAME}(pInterface, $args) \\\n";
|
||||||
$res .= "\t(*((pInterface)->pVTable->$d->{NAME}))(pInterface, $args)\n";
|
$res .= "\t(*((pInterface)->pVTable->$d->{NAME}))(($if->{NAME} *)(pInterface), $args)\n";
|
||||||
} else {
|
} else {
|
||||||
$res .= "#define $if->{NAME}_$d->{NAME}(pInterface) \\\n";
|
$res .= "#define $if->{NAME}_$d->{NAME}(pInterface) \\\n";
|
||||||
$res .= "\t(*((pInterface)->pVTable->$d->{NAME}))(pInterface)\n";
|
$res .= "\t(*((pInterface)->pVTable->$d->{NAME}))(($if->{NAME} *)(pInterface))\n";
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
Loading…
Reference in New Issue
Block a user