summary refs log tree commit diff stats
diff options
context:
space:
mode:
authorAndreas Rumpf <rumpf_a@web.de>2017-04-02 21:06:10 +0200
committerAndreas Rumpf <rumpf_a@web.de>2017-04-02 21:06:10 +0200
commitc785066ee343268c5ef9c19c4d334a0f1e8e8c48 (patch)
tree609d71b0c190f256606e9bd0eaed6ef0201895a3
parent846e51bb65671a6ff33840567a44883b1e4a221b (diff)
downloadNim-c785066ee343268c5ef9c19c4d334a0f1e8e8c48.tar.gz
memory manager: use less memory; corruption prevention
-rw-r--r--lib/system/alloc.nim106
1 files changed, 47 insertions, 59 deletions
diff --git a/lib/system/alloc.nim b/lib/system/alloc.nim
index edd8ececb..bcbc5d92f 100644
--- a/lib/system/alloc.nim
+++ b/lib/system/alloc.nim
@@ -53,9 +53,8 @@ type
   PSmallChunk = ptr SmallChunk
   BaseChunk {.pure, inheritable.} = object
     prevSize: int        # size of previous chunk; for coalescing
+                         # 0th bit == 1 if 'used
     size: int            # if < PageSize it is a small chunk
-    origSize: int        # 0th bit == 1 if 'used'
-    heapLink: PBigChunk      # linked list of all chunks for bulk 'deallocPages'
 
   SmallChunk = object of BaseChunk
     next, prev: PSmallChunk  # chunks of the same size
@@ -93,6 +92,11 @@ type
     key, upperBound: int
     level: int
 
+  HeapLinks = object
+    len: int
+    chunks: array[30, (PBigChunk, int)]
+    next: ptr HeapLinks
+
   MemRegion = object
     minLargeObj, maxLargeObj: int
     freeSmallChunks: array[0..SmallChunkSize div MemAlign-1, PSmallChunk]
@@ -100,12 +104,12 @@ type
     currMem, maxMem, freeMem: int # memory sizes (allocated from OS)
     lastSize: int # needed for the case that OS gives us pages linearly
     freeChunksList: PBigChunk # XXX make this a datastructure with O(1) access
-    heapLink: PBigChunk # used to link every chunk for bulk 'deallocPages'
     chunkStarts: IntSet
     root, deleted, last, freeAvlNodes: PAvlNode
     locked, blockChunkSizeIncrease: bool # if locked, we cannot free pages.
     nextChunkSize: int
     bottomData: AvlNode
+    heapLinks: HeapLinks
 
 {.deprecated: [TMemRegion: MemRegion].}
 
@@ -177,6 +181,20 @@ proc deallocAvlNode(a: var MemRegion, n: PAvlNode) {.inline.} =
   n.link[0] = a.freeAvlNodes
   a.freeAvlNodes = n
 
+proc addHeapLink(a: var MemRegion; p: PBigChunk, size: int) =
+  var it = addr(a.heapLinks)
+  while it != nil and it.len >= it.chunks.len: it = it.next
+  if it == nil:
+    var n = cast[ptr HeapLinks](llAlloc(a, sizeof(HeapLinks)))
+    n.next = a.heapLinks.next
+    a.heapLinks.next = n
+    n.chunks[0] = (p, size)
+    n.len = 1
+  else:
+    let L = it.len
+    it.chunks[L] = (p, size)
+    inc it.len
+
 include "system/avltree"
 
 proc llDeallocAll(a: var MemRegion) =
@@ -244,7 +262,7 @@ proc isSmallChunk(c: PChunk): bool {.inline.} =
   return c.size <= SmallChunkSize-smallChunkOverhead()
 
 proc chunkUnused(c: PChunk): bool {.inline.} =
-  result = (c.origSize and 1) == 0
+  result = (c.prevSize and 1) == 0
 
 iterator allObjects(m: var MemRegion): pointer {.inline.} =
   m.locked = true
@@ -319,15 +337,13 @@ proc requestOsChunks(a: var MemRegion, size: int): PBigChunk =
 
   incCurrMem(a, size)
   inc(a.freeMem, size)
-  result.heapLink = a.heapLink
-  result.origSize = size
+  a.addHeapLink(result, size)
   when defined(debugHeapLinks):
     cprintf("owner: %p; result: %p; next pointer %p; size: %ld\n", addr(a),
       result, result.heapLink, result.origSize)
 
   when defined(memtracker):
     trackLocation(addr result.origSize, sizeof(int))
-  a.heapLink = result
 
   sysAssert((cast[ByteAddress](result) and PageMask) == 0, "requestOsChunks 1")
   #zeroMem(result, size)
@@ -340,7 +356,7 @@ proc requestOsChunks(a: var MemRegion, size: int): PBigChunk =
   var next = cast[PChunk](nxt)
   if pageIndex(next) in a.chunkStarts:
     #echo("Next already allocated!")
-    next.prevSize = size
+    next.prevSize = size or (next.prevSize and 1)
   # set result.prevSize:
   var lastSize = if a.lastSize != 0: a.lastSize else: PageSize
   var prv = cast[ByteAddress](result) -% lastSize
@@ -348,27 +364,12 @@ proc requestOsChunks(a: var MemRegion, size: int): PBigChunk =
   var prev = cast[PChunk](prv)
   if pageIndex(prev) in a.chunkStarts and prev.size == lastSize:
     #echo("Prev already allocated!")
-    result.prevSize = lastSize
+    result.prevSize = lastSize or (result.prevSize and 1)
   else:
-    result.prevSize = 0 # unknown
+    result.prevSize = 0 or (result.prevSize and 1) # unknown
+    # but do not overwrite 'used' field
   a.lastSize = size # for next request
 
-when false:
-  # with the new linked list design this is not possible anymore:
-  proc freeOsChunks(a: var MemRegion, p: pointer, size: int) =
-    # update next.prevSize:
-    var c = cast[PChunk](p)
-    var nxt = cast[ByteAddress](p) +% c.size
-    sysAssert((nxt and PageMask) == 0, "freeOsChunks")
-    var next = cast[PChunk](nxt)
-    if pageIndex(next) in a.chunkStarts:
-      next.prevSize = 0 # XXX used
-    excl(a.chunkStarts, pageIndex(p))
-    osDeallocPages(p, size)
-    decCurrMem(a, size)
-    dec(a.freeMem, size)
-    #c_fprintf(stdout, "[Alloc] back to OS: %ld\n", size)
-
 proc isAccessible(a: MemRegion, p: pointer): bool {.inline.} =
   result = contains(a.chunkStarts, pageIndex(p))
 
@@ -406,7 +407,7 @@ proc updatePrevSize(a: var MemRegion, c: PBigChunk,
   var ri = cast[PChunk](cast[ByteAddress](c) +% c.size)
   sysAssert((cast[ByteAddress](ri) and PageMask) == 0, "updatePrevSize")
   if isAccessible(a, ri):
-    ri.prevSize = prevSize
+    ri.prevSize = prevSize or (ri.prevSize and 1)
 
 proc freeBigChunk(a: var MemRegion, c: PBigChunk) =
   var c = c
@@ -422,8 +423,9 @@ proc freeBigChunk(a: var MemRegion, c: PBigChunk) =
         inc(c.size, ri.size)
         excl(a.chunkStarts, pageIndex(ri))
   when coalescLeft:
-    if c.prevSize != 0:
-      var le = cast[PChunk](cast[ByteAddress](c) -% c.prevSize)
+    let prevSize = c.prevSize and not 1
+    if prevSize != 0:
+      var le = cast[PChunk](cast[ByteAddress](c) -% prevSize)
       sysAssert((cast[ByteAddress](le) and PageMask) == 0, "freeBigChunk 4")
       if isAccessible(a, le) and chunkUnused(le):
         sysAssert(not isSmallChunk(le), "freeBigChunk 5")
@@ -433,25 +435,22 @@ proc freeBigChunk(a: var MemRegion, c: PBigChunk) =
           excl(a.chunkStarts, pageIndex(c))
           c = cast[PBigChunk](le)
 
-  #if c.size < ChunkOsReturn or doNotUnmap or a.locked:
   incl(a, a.chunkStarts, pageIndex(c))
   updatePrevSize(a, c, c.size)
   listAdd(a.freeChunksList, c)
   # set 'used' to false:
-  c.origSize = c.origSize and not 1
-  track("setUsedToFalse", addr c.origSize, sizeof(int))
-  #else:
-  #  freeOsChunks(a, c, c.size)
+  c.prevSize = c.prevSize and not 1
 
 proc splitChunk(a: var MemRegion, c: PBigChunk, size: int) =
   var rest = cast[PBigChunk](cast[ByteAddress](c) +% size)
   sysAssert(rest notin a.freeChunksList, "splitChunk")
   rest.size = c.size - size
-  rest.origSize = rest.origSize and not 1 # not used
   track("rest.origSize", addr rest.origSize, sizeof(int))
   rest.next = nil
   rest.prev = nil
+  # size and not used
   rest.prevSize = size
+  sysAssert((size and 1) == 0, "splitChunk 2")
   updatePrevSize(a, c, rest.size)
   c.size = size
   incl(a, a.chunkStarts, pageIndex(rest))
@@ -482,9 +481,9 @@ proc getBigChunk(a: var MemRegion, size: int): PBigChunk =
       # if we over allocated split the chunk:
       if result.size > size:
         splitChunk(a, result, size)
-  result.prevSize = 0 # XXX why is this needed?
+
   # set 'used' to to true:
-  result.origSize = result.origSize or 1
+  result.prevSize = 1
   track("setUsedToFalse", addr result.origSize, sizeof(int))
 
   incl(a, a.chunkStarts, pageIndex(result))
@@ -729,29 +728,18 @@ proc realloc(allocator: var MemRegion, p: pointer, newsize: Natural): pointer =
 
 proc deallocOsPages(a: var MemRegion) =
   # we free every 'ordinarily' allocated page by iterating over the page bits:
-  var it = a.heapLink
-  while it != nil:
-    let next = it.heapLink
-    when defined(debugHeapLinks):
-      cprintf("owner %p; dealloc A: %p size: %ld; next: %p\n", addr(a),
-        it, it.origSize and not 1, next)
-    sysAssert it.origSize >= PageSize, "origSize too small"
-    # note:
-    osDeallocPages(it, it.origSize and not 1)
+  var it = addr(a.heapLinks)
+  while true:
+    let next = it.next
+    for i in 0..it.len-1:
+      let (p, size) = it.chunks[i]
+      when defined(debugHeapLinks):
+        cprintf("owner %p; dealloc A: %p size: %ld; next: %p\n", addr(a),
+          it, it.origSize, next)
+      sysAssert size >= PageSize, "origSize too small"
+      osDeallocPages(p, size)
     it = next
-  when false:
-    for p in elements(a.chunkStarts):
-      var page = cast[PChunk](p shl PageShift)
-      when not doNotUnmap:
-        var size = if page.size < PageSize: PageSize else: page.size
-        osDeallocPages(page, size)
-      else:
-        # Linux on PowerPC for example frees MORE than asked if 'munmap'
-        # receives the start of an originally mmap'ed memory block. This is not
-        # too bad, but we must not access 'page.size' then as that could trigger
-        # a segfault. But we don't need to access 'page.size' here anyway,
-        # because calling munmap with PageSize suffices:
-        osDeallocPages(page, PageSize)
+    if it == nil: break
   # And then we free the pages that are in use for the page bits:
   llDeallocAll(a)