diff options
Diffstat (limited to 'lib/system/alloc.nim')
-rw-r--r-- | lib/system/alloc.nim | 347 |
1 files changed, 223 insertions, 124 deletions
diff --git a/lib/system/alloc.nim b/lib/system/alloc.nim index 9c8f70c11..3de6d8713 100644 --- a/lib/system/alloc.nim +++ b/lib/system/alloc.nim @@ -20,6 +20,37 @@ template track(op, address, size) = # We manage *chunks* of memory. Each chunk is a multiple of the page size. # Each chunk starts at an address that is divisible by the page size. +# Small chunks may be divided into smaller cells of reusable pointers to reduce the number of page allocations. + +# An allocation of a small pointer looks approximately like this +#[ + + alloc -> rawAlloc -> No free chunk available > Request a new page from tslf -> result = chunk.data -------------+ + | | + v | + Free chunk available | + | | + v v + Fetch shared cells -> No free cells available -> Advance acc -> result = chunk.data + chunk.acc -------> return + (may not add new cells) ^ + | | + v | + Free cells available -> result = chunk.freeList -> Advance chunk.freeList -----------------------------------+ +]# +# so it is split into 3 paths, where the last path is preferred to prevent unnecessary allocations. +# +# +# A deallocation of a small pointer then looks like this +#[ + dealloc -> rawDealloc -> chunk.owner == addr(a) --------------> This thread owns the chunk ------> The current chunk is active -> Chunk is completely unused -----> Chunk references no foreign cells + | | (Add cell into the current chunk) | Return the current chunk back to tlsf + | | | | + v v v v + A different thread owns this chunk. The current chunk is not active. chunk.free was < size Chunk references foreign cells, noop + Add the cell to a.sharedFreeLists Add the cell into the active chunk Activate the chunk (end) + (end) (end) (end) +]# +# So "true" deallocation is delayed for as long as possible in favor of reusing cells. const nimMinHeapPages {.intdefine.} = 128 # 0.5 MB @@ -71,6 +102,8 @@ const type FreeCell {.final, pure.} = object + # A free cell is a pointer that has been freed, meaning it became available for reuse. + # It may become foreign if it is lent to a chunk that did not create it, doing so reduces the amount of needed pages. next: ptr FreeCell # next free cell in chunk (overlaid with refcount) when not defined(gcDestructors): zeroField: int # 0 means cell is not used (overlaid with typ field) @@ -90,22 +123,23 @@ type SmallChunk = object of BaseChunk next, prev: PSmallChunk # chunks of the same size - freeList: ptr FreeCell - free: int # how many bytes remain - acc: int # accumulator for small object allocation - when defined(gcDestructors): - sharedFreeList: ptr FreeCell # make no attempt at avoiding false sharing for now for this object field - when defined(nimAlignPragma): - data {.align: MemAlign.}: UncheckedArray[byte] # start of usable memory - else: - data: UncheckedArray[byte] + freeList: ptr FreeCell # Singly linked list of cells. They may be from foreign chunks or from the current chunk. + # Should be `nil` when the chunk isn't active in `a.freeSmallChunks`. + free: int32 # Bytes this chunk is able to provide using both the accumulator and free cells. + # When a cell is considered foreign, its source chunk's free field is NOT adjusted until it + # reaches dealloc while the source chunk is active. + # Instead, the receiving chunk gains the capacity and thus reserves space in the foreign chunk. + acc: uint32 # Offset from data, used when there are no free cells available but the chunk is considered free. + foreignCells: int # When a free cell is given to a chunk that is not its origin, + # both the cell and the source chunk are considered foreign. + # Receiving a foreign cell can happen both when deallocating from another thread or when + # the active chunk in `a.freeSmallChunks` is not the current chunk. + # Freeing a chunk while `foreignCells > 0` leaks memory as all references to it become lost. + data {.align: MemAlign.}: UncheckedArray[byte] # start of usable memory BigChunk = object of BaseChunk # not necessarily > PageSize! next, prev: PBigChunk # chunks of the same (or bigger) size - when defined(nimAlignPragma): - data {.align: MemAlign.}: UncheckedArray[byte] # start of usable memory - else: - data: UncheckedArray[byte] + data {.align: MemAlign.}: UncheckedArray[byte] # start of usable memory HeapLinks = object len: int @@ -115,7 +149,12 @@ type MemRegion = object when not defined(gcDestructors): minLargeObj, maxLargeObj: int - freeSmallChunks: array[0..max(1,SmallChunkSize div MemAlign-1), PSmallChunk] + freeSmallChunks: array[0..max(1, SmallChunkSize div MemAlign-1), PSmallChunk] + # List of available chunks per size class. Only one is expected to be active per class. + when defined(gcDestructors): + sharedFreeLists: array[0..max(1, SmallChunkSize div MemAlign-1), ptr FreeCell] + # When a thread frees a pointer it did not create, it must not adjust the counters. + # Instead, the cell is placed here and deferred until the next allocation. flBitmap: uint32 slBitmap: array[RealFli, uint32] matrix: array[RealFli, array[MaxSli, PBigChunk]] @@ -274,6 +313,20 @@ proc getMaxMem(a: var MemRegion): int = # maximum of these both values here: result = max(a.currMem, a.maxMem) +const nimMaxHeap {.intdefine.} = 0 + +proc allocPages(a: var MemRegion, size: int): pointer = + when nimMaxHeap != 0: + if a.occ + size > nimMaxHeap * 1024 * 1024: + raiseOutOfMem() + osAllocPages(size) + +proc tryAllocPages(a: var MemRegion, size: int): pointer = + when nimMaxHeap != 0: + if a.occ + size > nimMaxHeap * 1024 * 1024: + raiseOutOfMem() + osTryAllocPages(size) + proc llAlloc(a: var MemRegion, size: int): pointer = # *low-level* alloc for the memory managers data structures. Deallocation # is done at the end of the allocator's life time. @@ -283,14 +336,14 @@ proc llAlloc(a: var MemRegion, size: int): pointer = # is one page: sysAssert roundup(size+sizeof(LLChunk), PageSize) == PageSize, "roundup 6" var old = a.llmem # can be nil and is correct with nil - a.llmem = cast[PLLChunk](osAllocPages(PageSize)) + a.llmem = cast[PLLChunk](allocPages(a, PageSize)) when defined(nimAvlcorruption): trackLocation(a.llmem, PageSize) incCurrMem(a, PageSize) a.llmem.size = PageSize - sizeof(LLChunk) a.llmem.acc = sizeof(LLChunk) a.llmem.next = old - result = cast[pointer](cast[ByteAddress](a.llmem) + a.llmem.acc) + result = cast[pointer](cast[int](a.llmem) + a.llmem.acc) dec(a.llmem.size, size) inc(a.llmem.acc, size) zeroMem(result, size) @@ -326,7 +379,7 @@ when not defined(gcDestructors): n.link[0] = a.freeAvlNodes a.freeAvlNodes = n -proc addHeapLink(a: var MemRegion; p: PBigChunk, size: int) = +proc addHeapLink(a: var MemRegion; p: PBigChunk, size: int): ptr HeapLinks = var it = addr(a.heapLinks) while it != nil and it.len >= it.chunks.len: it = it.next if it == nil: @@ -335,10 +388,12 @@ proc addHeapLink(a: var MemRegion; p: PBigChunk, size: int) = a.heapLinks.next = n n.chunks[0] = (p, size) n.len = 1 + result = n else: let L = it.len it.chunks[L] = (p, size) inc it.len + result = it when not defined(gcDestructors): include "system/avltree" @@ -422,8 +477,8 @@ iterator allObjects(m: var MemRegion): pointer {.inline.} = var c = cast[PSmallChunk](c) let size = c.size - var a = cast[ByteAddress](addr(c.data)) - let limit = a + c.acc + var a = cast[int](addr(c.data)) + let limit = a + c.acc.int while a <% limit: yield cast[pointer](a) a = a +% size @@ -441,13 +496,13 @@ when not defined(gcDestructors): # ------------- chunk management ---------------------------------------------- proc pageIndex(c: PChunk): int {.inline.} = - result = cast[ByteAddress](c) shr PageShift + result = cast[int](c) shr PageShift proc pageIndex(p: pointer): int {.inline.} = - result = cast[ByteAddress](p) shr PageShift + result = cast[int](p) shr PageShift proc pageAddr(p: pointer): PChunk {.inline.} = - result = cast[PChunk](cast[ByteAddress](p) and not PageMask) + result = cast[PChunk](cast[int](p) and not PageMask) #sysAssert(Contains(allocator.chunkStarts, pageIndex(result))) when false: @@ -459,15 +514,10 @@ when false: it, it.next, it.prev, it.size) it = it.next -const nimMaxHeap {.intdefine.} = 0 - proc requestOsChunks(a: var MemRegion, size: int): PBigChunk = when not defined(emscripten): if not a.blockChunkSizeIncrease: let usedMem = a.occ #a.currMem # - a.freeMem - when nimMaxHeap != 0: - if usedMem > nimMaxHeap * 1024 * 1024: - raiseOutOfMem() if usedMem < 64 * 1024: a.nextChunkSize = PageSize*4 else: @@ -476,32 +526,32 @@ proc requestOsChunks(a: var MemRegion, size: int): PBigChunk = var size = size if size > a.nextChunkSize: - result = cast[PBigChunk](osAllocPages(size)) + result = cast[PBigChunk](allocPages(a, size)) else: - result = cast[PBigChunk](osTryAllocPages(a.nextChunkSize)) + result = cast[PBigChunk](tryAllocPages(a, a.nextChunkSize)) if result == nil: - result = cast[PBigChunk](osAllocPages(size)) + result = cast[PBigChunk](allocPages(a, size)) a.blockChunkSizeIncrease = true else: size = a.nextChunkSize incCurrMem(a, size) inc(a.freeMem, size) - a.addHeapLink(result, size) + let heapLink = a.addHeapLink(result, size) when defined(debugHeapLinks): cprintf("owner: %p; result: %p; next pointer %p; size: %ld\n", addr(a), - result, result.heapLink, result.size) + result, heapLink, size) when defined(memtracker): trackLocation(addr result.size, sizeof(int)) - sysAssert((cast[ByteAddress](result) and PageMask) == 0, "requestOsChunks 1") + sysAssert((cast[int](result) and PageMask) == 0, "requestOsChunks 1") #zeroMem(result, size) result.next = nil result.prev = nil result.size = size # update next.prevSize: - var nxt = cast[ByteAddress](result) +% size + var nxt = cast[int](result) +% size sysAssert((nxt and PageMask) == 0, "requestOsChunks 2") var next = cast[PChunk](nxt) if pageIndex(next) in a.chunkStarts: @@ -509,7 +559,7 @@ proc requestOsChunks(a: var MemRegion, size: int): PBigChunk = next.prevSize = size or (next.prevSize and 1) # set result.prevSize: var lastSize = if a.lastSize != 0: a.lastSize else: PageSize - var prv = cast[ByteAddress](result) -% lastSize + var prv = cast[int](result) -% lastSize sysAssert((nxt and PageMask) == 0, "requestOsChunks 3") var prev = cast[PChunk](prv) if pageIndex(prev) in a.chunkStarts and prev.size == lastSize: @@ -555,13 +605,13 @@ proc listRemove[T](head: var T, c: T) {.inline.} = proc updatePrevSize(a: var MemRegion, c: PBigChunk, prevSize: int) {.inline.} = - var ri = cast[PChunk](cast[ByteAddress](c) +% c.size) - sysAssert((cast[ByteAddress](ri) and PageMask) == 0, "updatePrevSize") + var ri = cast[PChunk](cast[int](c) +% c.size) + sysAssert((cast[int](ri) and PageMask) == 0, "updatePrevSize") if isAccessible(a, ri): ri.prevSize = prevSize or (ri.prevSize and 1) proc splitChunk2(a: var MemRegion, c: PBigChunk, size: int): PBigChunk = - result = cast[PBigChunk](cast[ByteAddress](c) +% size) + result = cast[PBigChunk](cast[int](c) +% size) result.size = c.size - size track("result.size", addr result.size, sizeof(int)) when not defined(nimOptimizedSplitChunk): @@ -590,8 +640,8 @@ proc freeBigChunk(a: var MemRegion, c: PBigChunk) = when coalescLeft: let prevSize = c.prevSize if prevSize != 0: - var le = cast[PChunk](cast[ByteAddress](c) -% prevSize) - sysAssert((cast[ByteAddress](le) and PageMask) == 0, "freeBigChunk 4") + var le = cast[PChunk](cast[int](c) -% prevSize) + sysAssert((cast[int](le) and PageMask) == 0, "freeBigChunk 4") if isAccessible(a, le) and chunkUnused(le): sysAssert(not isSmallChunk(le), "freeBigChunk 5") if not isSmallChunk(le) and le.size < MaxBigChunkSize: @@ -607,8 +657,8 @@ proc freeBigChunk(a: var MemRegion, c: PBigChunk) = addChunkToMatrix(a, c) c = rest when coalescRight: - var ri = cast[PChunk](cast[ByteAddress](c) +% c.size) - sysAssert((cast[ByteAddress](ri) and PageMask) == 0, "freeBigChunk 2") + var ri = cast[PChunk](cast[int](c) +% c.size) + sysAssert((cast[int](ri) and PageMask) == 0, "freeBigChunk 2") if isAccessible(a, ri) and chunkUnused(ri): sysAssert(not isSmallChunk(ri), "freeBigChunk 3") if not isSmallChunk(ri) and c.size < MaxBigChunkSize: @@ -660,7 +710,7 @@ proc getBigChunk(a: var MemRegion, size: int): PBigChunk = releaseSys a.lock proc getHugeChunk(a: var MemRegion; size: int): PBigChunk = - result = cast[PBigChunk](osAllocPages(size)) + result = cast[PBigChunk](allocPages(a, size)) when RegionHasLock: if not a.lockActive: a.lockActive = true @@ -669,7 +719,7 @@ proc getHugeChunk(a: var MemRegion; size: int): PBigChunk = incCurrMem(a, size) # XXX add this to the heap links. But also remove it from it later. when false: a.addHeapLink(result, size) - sysAssert((cast[ByteAddress](result) and PageMask) == 0, "getHugeChunk") + sysAssert((cast[int](result) and PageMask) == 0, "getHugeChunk") result.next = nil result.prev = nil result.size = size @@ -772,40 +822,42 @@ when defined(gcDestructors): sysAssert c.next == nil, "c.next pointer must be nil" atomicPrepend a.sharedFreeListBigChunks, c - proc addToSharedFreeList(c: PSmallChunk; f: ptr FreeCell) {.inline.} = - atomicPrepend c.sharedFreeList, f + proc addToSharedFreeList(c: PSmallChunk; f: ptr FreeCell; size: int) {.inline.} = + atomicPrepend c.owner.sharedFreeLists[size], f + + const MaxSteps = 20 proc compensateCounters(a: var MemRegion; c: PSmallChunk; size: int) = # rawDealloc did NOT do the usual: # `inc(c.free, size); dec(a.occ, size)` because it wasn't the owner of these # memory locations. We have to compensate here for these for the entire list. - # Well, not for the entire list, but for `max` elements of the list because - # we split the list in order to achieve bounded response times. var it = c.freeList - var x = 0 - var maxIters = 20 # make it time-bounded + var total = 0 while it != nil: - if maxIters == 0: - let rest = it.next.loada - it.next.storea nil - addToSharedFreeList(c, rest) - break - inc x, size - it = it.next.loada - dec maxIters - inc(c.free, x) - dec(a.occ, x) + inc total, size + let chunk = cast[PSmallChunk](pageAddr(it)) + if c != chunk: + # The cell is foreign, potentially even from a foreign thread. + # It must block the current chunk from being freed, as doing so would leak memory. + inc c.foreignCells + it = it.next + # By not adjusting the foreign chunk we reserve space in it to prevent deallocation + inc(c.free, total) + dec(a.occ, total) proc freeDeferredObjects(a: var MemRegion; root: PBigChunk) = var it = root - var maxIters = 20 # make it time-bounded + var maxIters = MaxSteps # make it time-bounded while true: + let rest = it.next.loada + it.next.storea nil + deallocBigChunk(a, cast[PBigChunk](it)) if maxIters == 0: - let rest = it.next.loada - it.next.storea nil - addToSharedFreeListBigChunks(a, rest) + if rest != nil: + addToSharedFreeListBigChunks(a, rest) + sysAssert a.sharedFreeListBigChunks != nil, "re-enqueing failed" break - it = it.next.loada + it = rest dec maxIters if it == nil: break @@ -816,61 +868,90 @@ proc rawAlloc(a: var MemRegion, requestedSize: int): pointer = sysAssert(roundup(65, 8) == 72, "rawAlloc: roundup broken") var size = roundup(requestedSize, MemAlign) sysAssert(size >= sizeof(FreeCell), "rawAlloc: requested size too small") - sysAssert(size >= requestedSize, "insufficient allocated size!") #c_fprintf(stdout, "alloc; size: %ld; %ld\n", requestedSize, size) + if size <= SmallChunkSize-smallChunkOverhead(): + template fetchSharedCells(tc: PSmallChunk) = + # Consumes cells from (potentially) foreign threads from `a.sharedFreeLists[s]` + when defined(gcDestructors): + if tc.freeList == nil: + when hasThreadSupport: + # Steal the entire list from `sharedFreeList`: + tc.freeList = atomicExchangeN(addr a.sharedFreeLists[s], nil, ATOMIC_RELAXED) + else: + tc.freeList = a.sharedFreeLists[s] + a.sharedFreeLists[s] = nil + # if `tc.freeList` isn't nil, `tc` will gain capacity. + # We must calculate how much it gained and how many foreign cells are included. + compensateCounters(a, tc, size) + # allocate a small block: for small chunks, we use only its next pointer let s = size div MemAlign var c = a.freeSmallChunks[s] if c == nil: + # There is no free chunk of the requested size available, we need a new one. c = getSmallChunk(a) + # init all fields in case memory didn't get zeroed c.freeList = nil + c.foreignCells = 0 sysAssert c.size == PageSize, "rawAlloc 3" c.size = size - c.acc = size - when defined(gcDestructors): - c.sharedFreeList = nil - c.free = SmallChunkSize - smallChunkOverhead() - size + c.acc = size.uint32 + c.free = SmallChunkSize - smallChunkOverhead() - size.int32 sysAssert c.owner == addr(a), "rawAlloc: No owner set!" c.next = nil c.prev = nil - listAdd(a.freeSmallChunks[s], c) + # Shared cells are fetched here in case `c.size * 2 >= SmallChunkSize - smallChunkOverhead()`. + # For those single cell chunks, we would otherwise have to allocate a new one almost every time. + fetchSharedCells(c) + if c.free >= size: + # Because removals from `a.freeSmallChunks[s]` only happen in the other alloc branch and during dealloc, + # we must not add it to the list if it cannot be used the next time a pointer of `size` bytes is needed. + listAdd(a.freeSmallChunks[s], c) result = addr(c.data) - sysAssert((cast[ByteAddress](result) and (MemAlign-1)) == 0, "rawAlloc 4") + sysAssert((cast[int](result) and (MemAlign-1)) == 0, "rawAlloc 4") else: + # There is a free chunk of the requested size available, use it. sysAssert(allocInv(a), "rawAlloc: begin c != nil") sysAssert c.next != c, "rawAlloc 5" #if c.size != size: # c_fprintf(stdout, "csize: %lld; size %lld\n", c.size, size) sysAssert c.size == size, "rawAlloc 6" - when defined(gcDestructors): - if c.freeList == nil: - when hasThreadSupport: - c.freeList = atomicExchangeN(addr c.sharedFreeList, nil, ATOMIC_RELAXED) - else: - c.freeList = c.sharedFreeList - c.sharedFreeList = nil - compensateCounters(a, c, size) if c.freeList == nil: - sysAssert(c.acc + smallChunkOverhead() + size <= SmallChunkSize, + sysAssert(c.acc.int + smallChunkOverhead() + size <= SmallChunkSize, "rawAlloc 7") - result = cast[pointer](cast[ByteAddress](addr(c.data)) +% c.acc) + result = cast[pointer](cast[int](addr(c.data)) +% c.acc.int) inc(c.acc, size) else: + # There are free cells available, prefer them over the accumulator result = c.freeList when not defined(gcDestructors): sysAssert(c.freeList.zeroField == 0, "rawAlloc 8") c.freeList = c.freeList.next + if cast[PSmallChunk](pageAddr(result)) != c: + # This cell isn't a blocker for the current chunk's deallocation anymore + dec(c.foreignCells) + else: + sysAssert(c == cast[PSmallChunk](pageAddr(result)), "rawAlloc: Bad cell") + # Even if the cell we return is foreign, the local chunk's capacity decreases. + # The capacity was previously reserved in the source chunk (when it first got allocated), + # then added into the current chunk during dealloc, + # so the source chunk will not be freed or leak memory because of this. dec(c.free, size) - sysAssert((cast[ByteAddress](result) and (MemAlign-1)) == 0, "rawAlloc 9") + sysAssert((cast[int](result) and (MemAlign-1)) == 0, "rawAlloc 9") sysAssert(allocInv(a), "rawAlloc: end c != nil") - sysAssert(allocInv(a), "rawAlloc: before c.free < size") - if c.free < size: - sysAssert(allocInv(a), "rawAlloc: before listRemove test") - listRemove(a.freeSmallChunks[s], c) - sysAssert(allocInv(a), "rawAlloc: end listRemove test") - sysAssert(((cast[ByteAddress](result) and PageMask) - smallChunkOverhead()) %% + # We fetch deferred cells *after* advancing `c.freeList`/`acc` to adjust `c.free`. + # If after the adjustment it turns out there's free cells available, + # the chunk stays in `a.freeSmallChunks[s]` and the need for a new chunk is delayed. + fetchSharedCells(c) + sysAssert(allocInv(a), "rawAlloc: before c.free < size") + if c.free < size: + # Even after fetching shared cells the chunk has no usable memory left. It is no longer the active chunk + sysAssert(allocInv(a), "rawAlloc: before listRemove test") + listRemove(a.freeSmallChunks[s], c) + sysAssert(allocInv(a), "rawAlloc: end listRemove test") + sysAssert(((cast[int](result) and PageMask) - smallChunkOverhead()) %% size == 0, "rawAlloc 21") sysAssert(allocInv(a), "rawAlloc: end small size") inc a.occ, size @@ -892,16 +973,16 @@ proc rawAlloc(a: var MemRegion, requestedSize: int): pointer = sysAssert c.prev == nil, "rawAlloc 10" sysAssert c.next == nil, "rawAlloc 11" result = addr(c.data) - sysAssert((cast[ByteAddress](c) and (MemAlign-1)) == 0, "rawAlloc 13") - sysAssert((cast[ByteAddress](c) and PageMask) == 0, "rawAlloc: Not aligned on a page boundary") + sysAssert((cast[int](c) and (MemAlign-1)) == 0, "rawAlloc 13") + sysAssert((cast[int](c) and PageMask) == 0, "rawAlloc: Not aligned on a page boundary") when not defined(gcDestructors): if a.root == nil: a.root = getBottom(a) - add(a, a.root, cast[ByteAddress](result), cast[ByteAddress](result)+%size) + add(a, a.root, cast[int](result), cast[int](result)+%size) inc a.occ, c.size trackSize(c.size) sysAssert(isAccessible(a, result), "rawAlloc 14") sysAssert(allocInv(a), "rawAlloc: end") - when logAlloc: cprintf("var pointer_%p = alloc(%ld)\n", result, requestedSize) + when logAlloc: cprintf("var pointer_%p = alloc(%ld) # %p\n", result, requestedSize, addr a) proc rawAlloc0(a: var MemRegion, requestedSize: int): pointer = result = rawAlloc(a, requestedSize) @@ -917,7 +998,7 @@ proc rawDealloc(a: var MemRegion, p: pointer) = if isSmallChunk(c): # `p` is within a small chunk: var c = cast[PSmallChunk](c) - var s = c.size + let s = c.size # ^ We might access thread foreign storage here. # The other thread cannot possibly free this block as it's still alive. var f = cast[ptr FreeCell](p) @@ -926,37 +1007,54 @@ proc rawDealloc(a: var MemRegion, p: pointer) = dec a.occ, s untrackSize(s) sysAssert a.occ >= 0, "rawDealloc: negative occupied memory (case A)" - sysAssert(((cast[ByteAddress](p) and PageMask) - smallChunkOverhead()) %% + sysAssert(((cast[int](p) and PageMask) - smallChunkOverhead()) %% s == 0, "rawDealloc 3") when not defined(gcDestructors): - #echo("setting to nil: ", $cast[ByteAddress](addr(f.zeroField))) + #echo("setting to nil: ", $cast[int](addr(f.zeroField))) sysAssert(f.zeroField != 0, "rawDealloc 1") f.zeroField = 0 - f.next = c.freeList - c.freeList = f when overwriteFree: # set to 0xff to check for usage after free bugs: nimSetMem(cast[pointer](cast[int](p) +% sizeof(FreeCell)), -1'i32, s -% sizeof(FreeCell)) - # check if it is not in the freeSmallChunks[s] list: - if c.free < s: - # add it to the freeSmallChunks[s] array: - listAdd(a.freeSmallChunks[s div MemAlign], c) - inc(c.free, s) + let activeChunk = a.freeSmallChunks[s div MemAlign] + if activeChunk != nil and c != activeChunk: + # This pointer is not part of the active chunk, lend it out + # and do not adjust the current chunk (same logic as compensateCounters.) + # Put the cell into the active chunk, + # may prevent a queue of available chunks from forming in a.freeSmallChunks[s div MemAlign]. + # This queue would otherwise waste memory in the form of free cells until we return to those chunks. + f.next = activeChunk.freeList + activeChunk.freeList = f # lend the cell + inc(activeChunk.free, s) # By not adjusting the current chunk's capacity it is prevented from being freed + inc(activeChunk.foreignCells) # The cell is now considered foreign from the perspective of the active chunk else: - inc(c.free, s) - if c.free == SmallChunkSize-smallChunkOverhead(): - listRemove(a.freeSmallChunks[s div MemAlign], c) - c.size = SmallChunkSize - freeBigChunk(a, cast[PBigChunk](c)) + f.next = c.freeList + c.freeList = f + if c.free < s: + # The chunk could not have been active as it didn't have enough space to give + listAdd(a.freeSmallChunks[s div MemAlign], c) + inc(c.free, s) + else: + inc(c.free, s) + # Free only if the entire chunk is unused and there are no borrowed cells. + # If the chunk were to be freed while it references foreign cells, + # the foreign chunks will leak memory and can never be freed. + if c.free == SmallChunkSize-smallChunkOverhead() and c.foreignCells == 0: + listRemove(a.freeSmallChunks[s div MemAlign], c) + c.size = SmallChunkSize + freeBigChunk(a, cast[PBigChunk](c)) else: + when logAlloc: cprintf("dealloc(pointer_%p) # SMALL FROM %p CALLER %p\n", p, c.owner, addr(a)) + when defined(gcDestructors): - addToSharedFreeList(c, f) - sysAssert(((cast[ByteAddress](p) and PageMask) - smallChunkOverhead()) %% + addToSharedFreeList(c, f, s div MemAlign) + sysAssert(((cast[int](p) and PageMask) - smallChunkOverhead()) %% s == 0, "rawDealloc 2") else: # set to 0xff to check for usage after free bugs: when overwriteFree: nimSetMem(p, -1'i32, c.size -% bigChunkOverhead()) + when logAlloc: cprintf("dealloc(pointer_%p) # BIG %p\n", p, c.owner) when defined(gcDestructors): if c.owner == addr(a): deallocBigChunk(a, cast[PBigChunk](c)) @@ -964,8 +1062,9 @@ proc rawDealloc(a: var MemRegion, p: pointer) = addToSharedFreeListBigChunks(c.owner[], cast[PBigChunk](c)) else: deallocBigChunk(a, cast[PBigChunk](c)) + sysAssert(allocInv(a), "rawDealloc: end") - when logAlloc: cprintf("dealloc(pointer_%p)\n", p) + #when logAlloc: cprintf("dealloc(pointer_%p)\n", p) when not defined(gcDestructors): proc isAllocatedPtr(a: MemRegion, p: pointer): bool = @@ -974,9 +1073,9 @@ when not defined(gcDestructors): if not chunkUnused(c): if isSmallChunk(c): var c = cast[PSmallChunk](c) - var offset = (cast[ByteAddress](p) and (PageSize-1)) -% + var offset = (cast[int](p) and (PageSize-1)) -% smallChunkOverhead() - result = (c.acc >% offset) and (offset %% c.size == 0) and + result = (c.acc.int >% offset) and (offset %% c.size == 0) and (cast[ptr FreeCell](p).zeroField >% 1) else: var c = cast[PBigChunk](c) @@ -992,12 +1091,12 @@ when not defined(gcDestructors): if not chunkUnused(c): if isSmallChunk(c): var c = cast[PSmallChunk](c) - var offset = (cast[ByteAddress](p) and (PageSize-1)) -% + var offset = (cast[int](p) and (PageSize-1)) -% smallChunkOverhead() - if c.acc >% offset: - sysAssert(cast[ByteAddress](addr(c.data)) +% offset == - cast[ByteAddress](p), "offset is not what you think it is") - var d = cast[ptr FreeCell](cast[ByteAddress](addr(c.data)) +% + if c.acc.int >% offset: + sysAssert(cast[int](addr(c.data)) +% offset == + cast[int](p), "offset is not what you think it is") + var d = cast[ptr FreeCell](cast[int](addr(c.data)) +% offset -% (offset %% c.size)) if d.zeroField >% 1: result = d @@ -1024,7 +1123,7 @@ when not defined(gcDestructors): proc ptrSize(p: pointer): int = when not defined(gcDestructors): - var x = cast[pointer](cast[ByteAddress](p) -% sizeof(FreeCell)) + var x = cast[pointer](cast[int](p) -% sizeof(FreeCell)) var c = pageAddr(p) sysAssert(not chunkUnused(c), "ptrSize") result = c.size -% sizeof(FreeCell) @@ -1042,7 +1141,7 @@ proc alloc(allocator: var MemRegion, size: Natural): pointer {.gcsafe.} = result = rawAlloc(allocator, size+sizeof(FreeCell)) cast[ptr FreeCell](result).zeroField = 1 # mark it as used sysAssert(not isAllocatedPtr(allocator, result), "alloc") - result = cast[pointer](cast[ByteAddress](result) +% sizeof(FreeCell)) + result = cast[pointer](cast[int](result) +% sizeof(FreeCell)) track("alloc", result, size) else: result = rawAlloc(allocator, size) @@ -1054,7 +1153,7 @@ proc alloc0(allocator: var MemRegion, size: Natural): pointer = proc dealloc(allocator: var MemRegion, p: pointer) = when not defined(gcDestructors): sysAssert(p != nil, "dealloc: p is nil") - var x = cast[pointer](cast[ByteAddress](p) -% sizeof(FreeCell)) + var x = cast[pointer](cast[int](p) -% sizeof(FreeCell)) sysAssert(x != nil, "dealloc: x is nil") sysAssert(isAccessible(allocator, x), "is not accessible") sysAssert(cast[ptr FreeCell](x).zeroField == 1, "dealloc: object header corrupted") @@ -1087,7 +1186,7 @@ proc deallocOsPages(a: var MemRegion) = let (p, size) = it.chunks[i] when defined(debugHeapLinks): cprintf("owner %p; dealloc A: %p size: %ld; next: %p\n", addr(a), - it, it.size, next) + it, size, next) sysAssert size >= PageSize, "origSize too small" osDeallocPages(p, size) it = next @@ -1115,7 +1214,7 @@ template instantiateForRegion(allocator: untyped) {.dirty.} = result = interiorAllocatedPtr(allocator, p) proc isAllocatedPtr*(p: pointer): bool = - let p = cast[pointer](cast[ByteAddress](p)-%ByteAddress(sizeof(Cell))) + let p = cast[pointer](cast[int](p)-%ByteAddress(sizeof(Cell))) result = isAllocatedPtr(allocator, p) proc deallocOsPages = deallocOsPages(allocator) @@ -1135,7 +1234,7 @@ template instantiateForRegion(allocator: untyped) {.dirty.} = proc realloc0Impl(p: pointer, oldSize, newSize: Natural): pointer = result = realloc(allocator, p, newSize) if newSize > oldSize: - zeroMem(cast[pointer](cast[int](result) + oldSize), newSize - oldSize) + zeroMem(cast[pointer](cast[uint](result) + uint(oldSize)), newSize - oldSize) when false: proc countFreeMem(): int = |