more heap allocation stuff plus shifting around the HeapCreate function signature

This commit is contained in:
Eric J. Bowersox 2013-06-17 23:36:38 -06:00
parent dfac130868
commit 05ba968e82
5 changed files with 214 additions and 26 deletions

View File

@ -96,6 +96,8 @@ interface IChunkAllocator: IUnknown
interface IHeapConfiguration: IUnknown
{
[unique] typedef IHeapConfiguration *PHEAPCONFIGURATION;
cpp_quote("typedef void (*PFNHEAPABORT)(PVOID);") /* function called to abort on serious error */
HRESULT SetAbortProc([in] PFNHEAPABORT pfnHeapAbort, [in] PVOID pvArg);
HRESULT GetActiveDirtyRatio([out] SSIZE_T *pcbRatio);
HRESULT SetActiveDirtyRatio([in] SSIZE_T cbRatio);
}

View File

@ -47,11 +47,10 @@
typedef struct tagRAWHEAPDATA
{
UINT32 opaque[32]; /* opaque data, do not modify */
UINT32 opaque[128]; /* opaque data, do not modify */
} RAWHEAPDATA, *PRAWHEAPDATA;
typedef void (*PFNRAWHEAPDATAFREE)(PRAWHEAPDATA); /* function that optionally frees the heap data */
typedef void (*PFNHEAPABORT)(PVOID); /* function called to abort on serious error */
/*--------------------
* External functions
@ -60,10 +59,17 @@ typedef void (*PFNHEAPABORT)(PVOID); /* function called to abort on
#define STD_CHUNK_BITS 22 /* standard number of bits in a memory chunk - yields 4Mb chunks */
/* Flag definitions */
#define PHDFLAGS_REDZONE 0x00000001U /* use red zones? */
#define PHDFLAGS_JUNKFILL 0x00000002U /* fill junk in heap? */
#define PHDFLAGS_ZEROFILL 0x00000004U /* zero-fill allocations? */
#define PHDFLAGS_NOTCACHE 0x00000008U /* thread cache disabled? */
#define PHDFLAGS_PROFILE 0x00000010U /* profiling enabled? */
CDECL_BEGIN
extern HRESULT HeapCreate(PRAWHEAPDATA prhd, PFNRAWHEAPDATAFREE pfnFree, PFNHEAPABORT pfnAbort, PVOID pvAbortArg,
IChunkAllocator *pChunkAllocator, IMutexFactory *pMutexFactory, UINT32 nChunkBits,
extern HRESULT HeapCreate(PRAWHEAPDATA prhd, PFNRAWHEAPDATAFREE pfnFree, UINT32 uiFlags, UINT32 nChunkBits,
IChunkAllocator *pChunkAllocator, IMutexFactory *pMutexFactory,
IMalloc **ppHeap);
CDECL_END

View File

@ -208,7 +208,7 @@ SIZE_T _HeapArenaMapBitsBinIndexGet(PHEAPDATA phd, PARENACHUNK pChunk, SIZE_T nd
{
register SIZE_T szMapBits = _HeapArenaMapPGet(phd, pChunk, ndxPage)->bits;
register SIZE_T ndxBin = (szMapBits & CHUNK_MAP_BININD_MASK) >> CHUNK_MAP_BININD_SHIFT;
_H_ASSERT(phd, (ndxBin < NBINS) || (ndxBin = BININD_INVALID));
_H_ASSERT(phd, (ndxBin < NBINS) || (ndxBin == BININD_INVALID));
return ndxBin;
}
@ -348,45 +348,207 @@ void _HeapArenaMapBitsLargeSet(PHEAPDATA phd, PARENACHUNK pChunk, SIZE_T ndxPage
| CHUNK_MAP_ALLOCATED;
}
/*
* Sets the bin index of the given page that is allocated as part of a large allocation.
*
* Parameters:
* - phd = Pointer to the HEAPDATA block.
* - pChunk = Pointer to the arena chunk.
* - ndxPage = Index of the page to set the bin index for.
* - ndxBin = Index of the bin to set into the page.
*
* Returns:
* Nothing.
*/
void _HeapArenaMapBitsLargeBinIndSet(PHEAPDATA phd, PARENACHUNK pChunk, SIZE_T ndxPage, SIZE_T ndxBin)
{
/* TODO */
register PSIZE_T pMapBits;
_H_ASSERT(phd, ndxBin <= BININD_INVALID);
pMapBits = _HeapArenaMapBitsPGet(phd, pChunk, ndxPage);
_H_ASSERT(phd, _HeapArenaMapBitsLargeSizeGet(phd, pChunk, ndxPage) == SYS_PAGE_SIZE);
*pMapBits = (*pMapBits & ~CHUNK_MAP_BININD_MASK) | (ndxBin << CHUNK_MAP_BININD_SHIFT);
}
/*
* Sets the run/flags bits of the given page to mark it as part of a "small" allocation.
*
* Parameters:
* - phd = Pointer to the HEAPDATA block.
* - pChunk = Pointer to the arena chunk.
* - ndxPage = Index of the page to set the status for.
* - ndxRun = Index of the run the page belongs to.
* - ndxBin = Index of the bin to set into the page.
* - szFlags = May be either CHUNK_MAP_DIRTY or 0, to set the page "dirty" flag.
*
* Returns:
* Nothing.
*/
void _HeapArenaMapBitsSmallSet(PHEAPDATA phd, PARENACHUNK pChunk, SIZE_T ndxPage, SIZE_T ndxRun, SIZE_T ndxBin,
SIZE_T szFlags)
{
/* TODO */
register PSIZE_T pMapBits = _HeapArenaMapBitsPGet(phd, pChunk, ndxPage);
_H_ASSERT(phd, ndxBin < BININD_INVALID);
_H_ASSERT(phd, ndxPage - ndxRun >= phd->cpgMapBias);
_H_ASSERT(phd, (szFlags & CHUNK_MAP_DIRTY) == szFlags);
/* preserve the existing "unzeroed" flag */
*pMapBits = (ndxRun << SYS_PAGE_BITS) | (ndxBin << CHUNK_MAP_BININD_SHIFT) | szFlags
| (*pMapBits & CHUNK_MAP_UNZEROED) | CHUNK_MAP_ALLOCATED;
}
/*
* Sets the unzeroed bit of the given page.
*
* Parameters:
* - phd = Pointer to the HEAPDATA block.
* - pChunk = Pointer to the arena chunk.
* - ndxPage = Index of the page to set the status for.
* - szUnzeroed = Either 0 or CHUNK_MAP_UNZEROED, to set the status flag.
*
* Returns:
* Nothing.
*/
void _HeapArenaMapBitsUnzeroedSet(PHEAPDATA phd, PARENACHUNK pChunk, SIZE_T ndxPage, SIZE_T szUnzeroed)
{
/* TODO */
register PSIZE_T pMapBits = _HeapArenaMapBitsPGet(phd, pChunk, ndxPage);
*pMapBits = (*pMapBits & ~CHUNK_MAP_UNZEROED) | szUnzeroed;
}
/*
* Adds a byte count to the arena's profile accumulator, triggering some behavior if the profile accumulator
* rolls over the interval count.
*
* Parameters:
* - phd = Pointer to the HEAPDATA block.
* - pArena = Pointer to the arena to add a count to.
* - cbAccum = Number of bytes to add to the accumulator.
*
* Returns:
* - TRUE = The accumulator rolled over the profile interval and was adjusted.
* - FALSE = The accumulator did not roll over the profile interval.
*/
BOOL _HeapArenaProfAccumImpl(PHEAPDATA phd, PARENA pArena, UINT64 cbAccum)
{
return FALSE; /* TODO */
/* XXX assert the profile interval is non-zero */
pArena->cbProfAccum += cbAccum;
/* XXX
if (pArena->cbProfAccum >= phd->prof_interval)
{
pArena->cbProfAccum -= phd->prof_interval;
return TRUE;
}
*/
return FALSE;
}
/*
* Adds a byte count to the arena's profile accumulator, triggering some behavior if the profile accumulator
* rolls over the interval count. Assumes the arena mutex is locked.
*
* Parameters:
* - phd = Pointer to the HEAPDATA block.
* - pArena = Pointer to the arena to add a count to.
* - cbAccum = Number of bytes to add to the accumulator.
*
* Returns:
* - TRUE = The accumulator rolled over the profile interval and was adjusted.
* - FALSE = The accumulator did not roll over the profile interval, or profiling was not enabled.
*/
BOOL _HeapArenaProfAccumLocked(PHEAPDATA phd, PARENA pArena, UINT64 cbAccum)
{
return FALSE; /* TODO */
return FALSE; /* XXX if phd->prof_interval == 0 */
/* XXX return _HeapArenaProfAccumImpl(phd, pArena, cbAccum); */
}
/*
* Adds a byte count to the arena's profile accumulator, triggering some behavior if the profile accumulator
* rolls over the interval count.
*
* Parameters:
* - phd = Pointer to the HEAPDATA block.
* - pArena = Pointer to the arena to add a count to.
* - cbAccum = Number of bytes to add to the accumulator.
*
* Returns:
* - TRUE = The accumulator rolled over the profile interval and was adjusted.
* - FALSE = The accumulator did not roll over the profile interval, or profiling was not enabled.
*/
BOOL _HeapArenaProfAccum(PHEAPDATA phd, PARENA pArena, UINT64 cbAccum)
{
return FALSE; /* TODO */
return FALSE; /* XXX if phd->prof_interval == 0 */
/* XXX
{
BOOL rc;
IMutex_Lock(pArena->pmtxLock);
rc = _HeapArenaProfAccumImpl(phd, pArena, cbAccum);
IMutex_Unlock(pArena->pmtxLock);
return rc;
}
*/
}
/*
* Returns the bin index of the given page the pointer is on used for small allocations.
*
* Parameters:
* - phd = Pointer to the HEAPDATA block.
* - pv = Pointer to allocation to get the bin index for.
* - szMapBits = Map bits for the page the pointer points to.
*
* Returns:
* The associated bin index.
*/
SIZE_T _HeapArenaPtrSmallBinIndGet(PHEAPDATA phd, PCVOID pv, SIZE_T szMapBits)
{
return 0; /* TODO */
SIZE_T ndxBin = (szMapBits & CHUNK_MAP_BININD_MASK) >> CHUNK_MAP_BININD_SHIFT;
#if 0 /* debugging code */
{
PARENACHUNK pChunk; /* pointer to enclosing chunk */
PARENA pArena; /* pointer to arena for the chunk */
SIZE_T ndxPage; /* calculated page index */
SIZE_T szMapBitsActual; /* actual retrieved map bits */
PARENARUN pRun; /* pointer to run for this allocation */
PARENABIN pBin; /* pointer to bin for this allocation */
SIZE_T ndxBinActual; /* actual bin index for this allocation */
PARENABININFO pBinInfo; /* pointer to bin info */
_H_ASSERT(phd, ndxBin != BININD_INVALID);
_H_ASSERT(phd, ndxBin < NBINS);
pChunk = (PARENACHUNK)CHUNK_ADDR2BASE(phd, pv);
pArena = pChunk->parena;
ndxPage = ((UINT_PTR)pv - (UINT_PTR)pChunk) >> SYS_PAGE_BITS;
szMapBitsActual = _HeapArenaMapBitsGet(phd, pChunk, ndxPage);
_H_ASSERT(phd, szMapBits == szMapBitsActual);
_H_ASSERT(phd, _HeapArenaMapBitsLargeGet(phd, pChunk, ndxPage) == 0);
_H_ASSERT(phd, _HeapArenaMapBitsAllocatedGet(phd, pChunk, ndxPage) != 0);
pRun = (PARENARUN)((UINT_PTR)pChunk + (UINT_PTR)((ndxPage - (szMapBitsActual >> SYS_PAGE_SIZE)) << SYS_PAGE_SIZE));
pBin = pRun->pBin;
ndxBinActual = pBin - pArena->aBins;
_H_ASSERT(phd, ndxBin == ndxBinActual);
pBinInfo = &(phd->aArenaBinInfo[ndxBinActual]);
_H_ASSERT(phd, ((UINT_PTR)pv - ((UINT_PTR)pRun + (UINT_PTR)(pBinInfo->ofsRegion0))) % pBinInfo->cbInterval == 0);
}
#endif
return ndxBin;
}
/*
* Returns the index of a specific bin within an arena.
*
* Parameters:
* - phd = Pointer to the HEAPDATA block.
* - pArena = Pointer to the arena.
* - pBin = Pointer to the bin within the arena.
*
* Returns:
* Index of the bin within the arena.
*/
SIZE_T _HeapArenaBinIndex(PHEAPDATA phd, PARENA pArena, PARENABIN pBin)
{
return 0; /* TODO */
SIZE_T ndxBin = pBin - pArena->aBins;
_H_ASSERT(phd, ndxBin < NBINS);
return ndxBin;
}
UINT32 _HeapArenaRunRegInd(PHEAPDATA phd, PARENARUN pRun, PARENABININFO pBinInfo, PCVOID pv)

View File

@ -342,9 +342,6 @@ struct tagARENA
*----------------------------------
*/
#define PHDFLAGS_REDZONE 0x00000001U /* use red zones? */
#define PHDFLAGS_JUNKFILL 0x00000002U /* fill junk in heap? */
typedef struct tagHEAPDATA {
IMalloc mallocInterface; /* pointer to IMalloc interface - MUST BE FIRST! */
IConnectionPointContainer cpContainerInterface; /* pointer to IConnectionPointContainer interface */

View File

@ -45,7 +45,10 @@
#include "heap_internals.h"
#include "enumgeneric.h"
#define PHDFLAGS_DELETING 0x80000000 /* deleting the heap */
#define PHDFLAGS_DELETING 0x80000000 /* deleting the heap */
#define PHDFLAGS_PROFILE_ACTIVE 0x40000000 /* profile is active */
#define PHDFLAGS_INIT (PHDFLAGS_REDZONE|PHDFLAGS_JUNKFILL|PHDFLAGS_ZEROFILL|PHDFLAGS_NOTCACHE|PHDFLAGS_PROFILE)
/*------------------------
* IMalloc implementation
@ -500,6 +503,25 @@ static UINT32 heapconf_Release(IUnknown *pThis)
return malloc_Release((IUnknown *)HeapDataPtr(pThis));
}
/*
* Sets the function to call if the heap decides to abort operation.
*
* Parameters:
* - pThis = Pointer to the HeapConfiguration interface in the heap data object.
* - pfnHeapAbort = Pointer to the function to call when the heap aborts.
* - pvArg = Pointer to argument to pass to the heap abort function.
*
* Returns:
* Standard HRESULT success/failure indicator.
*/
static HRESULT heapconf_SetAbortProc(IHeapConfiguration *pThis, PFNHEAPABORT pfnHeapAbort, PVOID pvArg)
{
PHEAPDATA phd = (PHEAPDATA)HeapDataPtr(pThis); /* pointer to heap data */
phd->pfnAbort = pfnHeapAbort;
phd->pvAbortArg = pvArg;
return S_OK;
}
/*
* Retrieves the current ratio of active to dirty pages in the heap, expressed as a base-2 logarithm value.
* A value of -1 disables dirty page purging.
@ -548,6 +570,7 @@ static const SEG_RODATA struct IHeapConfigurationVTable vtblHeapConfiguration =
.QueryInterface = heapconf_QueryInterface,
.AddRef = heapconf_AddRef,
.Release = heapconf_Release,
.SetAbortProc = heapconf_SetAbortProc,
.GetActiveDirtyRatio = heapconf_GetActiveDirtyRatio,
.SetActiveDirtyRatio = heapconf_SetActiveDirtyRatio
};
@ -567,26 +590,26 @@ static const SEG_RODATA struct IHeapConfigurationVTable vtblHeapConfiguration =
* for the heap.
* - pfnFree = Pointer to a function called as the last stage of releasing the heap, which frees the
* "prhd" block. May be NULL.
* - pfnAbort = Pointer to a function called when there's a serious error in the heap. May be NULL.
* - pvAbortArg = Argument passed to the pfnAbort function when it's called. May be NULL.
* - uiFlags = Flag bits for the heap.
* - nChunkBits = Number of "bits" in a memory chunk that gets allocated.
* - pChunkAllocator = Pointer to the IChunkAllocator interface used by the heap to allocate chunks of memory
* for carving up by the heap.
* - pMutexFactory = Pointer to the IMutexFactory interface used to allocate IMutex objects.
* - nChunkBits = Number of "bits" in a memory chunk that gets allocated.
* - ppHeap = Pointer location that will receive a pointer to the heap's IMalloc interface.
*
* Returns:
* Standard HRESULT success/failure.
*/
HRESULT HeapCreate(PRAWHEAPDATA prhd, PFNRAWHEAPDATAFREE pfnFree, PFNHEAPABORT pfnAbort, PVOID pvAbortArg,
IChunkAllocator *pChunkAllocator, IMutexFactory *pMutexFactory, UINT32 nChunkBits,
IMalloc **ppHeap)
HRESULT HeapCreate(PRAWHEAPDATA prhd, PFNRAWHEAPDATAFREE pfnFree, UINT32 uiFlags, UINT32 nChunkBits,
IChunkAllocator *pChunkAllocator, IMutexFactory *pMutexFactory, IMalloc **ppHeap)
{
PHEAPDATA phd; /* pointer to actual heap data */
HRESULT hr; /* HRESULT of intermediate operations */
if (sizeof(RAWHEAPDATA) < sizeof(HEAPDATA))
return MEMMGR_E_BADHEAPDATASIZE; /* bogus size of raw heap data */
if (uiFlags & ~PHDFLAGS_INIT)
return E_INVALIDARG; /* invalid flags */
if (!prhd || !pChunkAllocator || !ppHeap)
return E_POINTER; /* invalid pointers */
@ -597,10 +620,8 @@ HRESULT HeapCreate(PRAWHEAPDATA prhd, PFNRAWHEAPDATAFREE pfnFree, PFNHEAPABORT p
phd->cpContainerInterface.pVTable = &vtblConnectionPointContainer;
phd->heapConfInterface.pVTable = &vtblHeapConfiguration;
phd->uiRefCount = 1;
phd->uiFlags = 0;
phd->uiFlags = uiFlags | PHDFLAGS_PROFILE_ACTIVE;
phd->pfnFreeRawHeapData = pfnFree;
phd->pfnAbort = pfnAbort;
phd->pvAbortArg = pvAbortArg;
phd->nChunkBits = nChunkBits;
phd->szChunk = 1 << nChunkBits;
if (phd->szChunk < SYS_PAGE_SIZE)