summary refs log tree commit diff stats
path: root/lib/system
diff options
context:
space:
mode:
Diffstat (limited to 'lib/system')
-rwxr-xr-xlib/system/alloc.nim89
-rw-r--r--lib/system/avltree.nim91
-rwxr-xr-xlib/system/gc.nim25
3 files changed, 192 insertions, 13 deletions
diff --git a/lib/system/alloc.nim b/lib/system/alloc.nim
index 6dee145c8..bc8124aca 100755
--- a/lib/system/alloc.nim
+++ b/lib/system/alloc.nim
@@ -151,15 +151,33 @@ type
     size: int                # remaining size
     acc: int                 # accumulator
     next: PLLChunk           # next low-level chunk; only needed for dealloc
+
+  PAvlNode = ptr TAvlNode
+  TAvlNode {.pure, final.} = object 
+    link: array[0..1, PAvlNode] # Left (0) and right (1) links 
+    key, upperBound: int
+    level: int
     
   TMemRegion {.final, pure.} = object
+    minLargeObj, maxLargeObj: int
+    freeSmallChunks: array[0..SmallChunkSize div MemAlign-1, PSmallChunk]
     llmem: PLLChunk
     currMem, maxMem, freeMem: int # memory sizes (allocated from OS)
     lastSize: int # needed for the case that OS gives us pages linearly 
-    freeSmallChunks: array[0..SmallChunkSize div MemAlign-1, PSmallChunk]
     freeChunksList: PBigChunk # XXX make this a datastructure with O(1) access
     chunkStarts: TIntSet
+    root, deleted, last, freeAvlNodes: PAvlNode
   
+# shared:
+var
+  bottomData: TAvlNode
+  bottom: PAvlNode
+
+proc initAllocator() =
+  bottom = addr(bottomData)
+  bottom.link[0] = bottom
+  bottom.link[1] = bottom
+
 proc incCurrMem(a: var TMemRegion, bytes: int) {.inline.} = 
   inc(a.currMem, bytes)
 
@@ -171,7 +189,7 @@ proc getMaxMem(a: var TMemRegion): int =
   # Since we update maxPagesCount only when freeing pages, 
   # maxPagesCount may not be up to date. Thus we use the
   # maximum of these both values here:
-  return max(a.currMem, a.maxMem)
+  result = max(a.currMem, a.maxMem)
   
 proc llAlloc(a: var TMemRegion, size: int): pointer =
   # *low-level* alloc for the memory managers data structures. Deallocation
@@ -191,7 +209,25 @@ proc llAlloc(a: var TMemRegion, size: int): pointer =
   dec(a.llmem.size, size)
   inc(a.llmem.acc, size)
   zeroMem(result, size)
-  
+
+proc allocAvlNode(a: var TMemRegion, key, upperBound: int): PAvlNode =
+  if a.freeAvlNodes != nil:
+    result = a.freeAvlNodes
+    a.freeAvlNodes = a.freeAvlNodes.link[0]
+  else:
+    result = cast[PAvlNode](llAlloc(a, sizeof(TAvlNode)))
+  result.key = key
+  result.upperBound = upperBound
+  result.link[0] = bottom
+  result.link[1] = bottom
+  result.level = 0
+
+proc deallocAvlNode(a: var TMemRegion, n: PAvlNode) {.inline.} =
+  n.link[0] = a.freeAvlNodes
+  a.freeAvlNodes = n
+
+include "system/avltree"
+
 proc llDeallocAll(a: var TMemRegion) =
   var it = a.llmem
   while it != nil:
@@ -497,6 +533,8 @@ proc rawAlloc(a: var TMemRegion, requestedSize: int): pointer =
     sysAssert c.size == size, "rawAlloc 12"
     result = addr(c.data)
     sysAssert((cast[TAddress](result) and (MemAlign-1)) == 0, "rawAlloc 13")
+    if a.root == nil: a.root = bottom
+    add(a, a.root, cast[TAddress](result), cast[TAddress](result)+%size)
   sysAssert(isAccessible(a, result), "rawAlloc 14")
 
 proc rawAlloc0(a: var TMemRegion, requestedSize: int): pointer =
@@ -534,7 +572,10 @@ proc rawDealloc(a: var TMemRegion, p: pointer) =
     # set to 0xff to check for usage after free bugs:
     when overwriteFree: c_memset(p, -1'i32, c.size -% bigChunkOverhead())
     # free big chunk
-    freeBigChunk(a, cast[PBigChunk](c))
+    var c = cast[PBigChunk](c)
+    a.deleted = bottom
+    del(a, a.root, cast[int](addr(c.data)))
+    freeBigChunk(a, c)
 
 proc isAllocatedPtr(a: TMemRegion, p: pointer): bool = 
   if isAccessible(a, p):
@@ -550,6 +591,46 @@ proc isAllocatedPtr(a: TMemRegion, p: pointer): bool =
         var c = cast[PBigChunk](c)
         result = p == addr(c.data) and cast[ptr TFreeCell](p).zeroField >% 1
 
+proc prepareForInteriorPointerChecking(a: var TMemRegion) {.inline.} =
+  a.minLargeObj = lowGauge(a.root)
+  a.maxLargeObj = highGauge(a.root)
+
+proc interiorAllocatedPtr(a: TMemRegion, p: pointer): pointer =
+  if isAccessible(a, p):
+    var c = pageAddr(p)
+    if not chunkUnused(c):
+      if isSmallChunk(c):
+        var c = cast[PSmallChunk](c)
+        var offset = (cast[TAddress](p) and (PageSize-1)) -% 
+                     smallChunkOverhead()
+        if c.acc >% offset:
+          sysAssert(cast[TAddress](addr(c.data)) +% offset ==
+                    cast[TAddress](p), "offset is not what you think it is")
+          var d = cast[ptr TFreeCell](cast[TAddress](addr(c.data)) +% 
+                    offset -% (offset %% c.size))
+          if d.zeroField >% 1:
+            result = d
+            sysAssert isAllocatedPtr(a, result), " result wrong pointer!"
+      else:
+        var c = cast[PBigChunk](c)
+        var d = addr(c.data)
+        if p >= d and cast[ptr TFreeCell](d).zeroField >% 1:
+          result = d
+          sysAssert isAllocatedPtr(a, result), " result wrong pointer!"
+  else:
+    var q = cast[int](p)
+    if q >=% a.minLargeObj and q <=% a.maxLargeObj:
+      # this check is highly effective! Test fails for 99,96% of all checks on
+      # an x86-64.
+      var avlNode = inRange(a.root, q)
+      if avlNode != nil:
+        var k = cast[pointer](avlNode.key)
+        var c = cast[PBigChunk](pageAddr(k))
+        sysAssert(addr(c.data) == k, " k is not the same as addr(c.data)!")
+        if cast[ptr TFreeCell](k).zeroField >% 1:
+          result = k
+          sysAssert isAllocatedPtr(a, result), " result wrong pointer!"
+
 proc ptrSize(p: pointer): int =
   var x = cast[pointer](cast[TAddress](p) -% sizeof(TFreeCell))
   result = pageAddr(x).size - sizeof(TFreeCell)
diff --git a/lib/system/avltree.nim b/lib/system/avltree.nim
new file mode 100644
index 000000000..9bc2687f4
--- /dev/null
+++ b/lib/system/avltree.nim
@@ -0,0 +1,91 @@
+#
+#
+#            Nimrod's Runtime Library
+#        (c) Copyright 2011 Andreas Rumpf
+#
+#    See the file "copying.txt", included in this
+#    distribution, for details about the copyright.
+#
+
+## not really an AVL tree anymore, but still balanced ...
+
+template IsBottom(n: PAvlNode): bool = n == bottom
+
+proc lowGauge(n: PAvlNode): int =
+  var it = n
+  while not IsBottom(it):
+    result = it.key
+    it = it.link[0]
+  
+proc highGauge(n: PAvlNode): int =
+  result = -1
+  var it = n
+  while not IsBottom(it):
+    result = it.upperBound
+    it = it.link[1]
+
+proc find(root: PAvlNode, key: int): PAvlNode = 
+  var it = root
+  while not IsBottom(it):
+    if it.key == key: return it
+    it = it.link[ord(it.key <% key)]
+
+proc inRange(root: PAvlNode, key: int): PAvlNode =
+  var it = root
+  while not IsBottom(it):
+    if it.key <=% key and key <% it.upperBound: return it
+    it = it.link[ord(it.key <% key)]
+
+proc skew(t: var PAvlNode) =
+  if t.link[0].level == t.level:
+    var temp = t
+    t = t.link[0]
+    temp.link[0] = t.link[1]
+    t.link[1] = temp
+
+proc split(t: var PAvlNode) =
+  if t.link[1].link[1].level == t.level:
+    var temp = t
+    t = t.link[1]
+    temp.link[1] = t.link[0]
+    t.link[0] = temp
+    inc t.level
+
+proc add(a: var TMemRegion, t: var PAvlNode, key, upperBound: int) =
+  if t == bottom:
+    t = allocAvlNode(a, key, upperBound)
+  else:
+    if key <% t.key:
+      add(a, t.link[0], key, upperBound)
+    elif key >% t.key:
+      add(a, t.link[1], key, upperBound)
+    else:
+      sysAssert false, "key already exists"
+    skew(t)
+    split(t)
+
+proc del(a: var TMemRegion, t: var PAvlNode, x: int) =
+  if t == bottom: return
+  a.last = t
+  if x <% t.key:
+    del(a, t.link[0], x)
+  else:
+    a.deleted = t
+    del(a, t.link[1], x)
+  if t == a.last and a.deleted != bottom and x == a.deleted.key:
+    a.deleted.key = t.key
+    a.deleted.upperBound = t.upperBound
+    a.deleted = bottom
+    t = t.link[1]
+    deallocAvlNode(a, a.last)
+  elif t.link[0].level < t.level-1 or
+       t.link[1].level < t.level-1:
+    dec t.level
+    if t.link[1].level > t.level:
+      t.link[1].level = t.level
+    skew(t)
+    skew(t.link[1])
+    skew(t.link[1].link[1])
+    split(t)
+    split(t.link[1])
+
diff --git a/lib/system/gc.nim b/lib/system/gc.nim
index caab22e34..c477cadef 100755
--- a/lib/system/gc.nim
+++ b/lib/system/gc.nim
@@ -561,24 +561,30 @@ proc gcMark(gch: var TGcHeap, p: pointer) {.inline.} =
   var c = cast[TAddress](cell)
   if c >% PageSize and (c and (MemAlign-1)) == 0:
     # fast check: does it look like a cell?
-    if isAllocatedPtr(gch.region, cell): 
+    var objStart = cast[PCell](interiorAllocatedPtr(gch.region, cell))
+    if objStart != nil:
       # mark the cell:
-      cell.refcount = cell.refcount +% rcIncrement
-      add(gch.decStack, cell)
+      objStart.refcount = objStart.refcount +% rcIncrement
+      add(gch.decStack, objStart)
     when false:
-      # Care for string->cstring and seq->openArray conversions.
-      # Now the codegen deals with it, it generated ``nimKeepAlive`` calls.
-      var b = cast[PCell](c -% sizeof(TGenericSeq))
-      if isAllocatedPtr(gch.region, b):
+      if isAllocatedPtr(gch.region, cell):
+        sysAssert false, "allocated pointer but not interior?"
         # mark the cell:
-        b.refcount = b.refcount +% rcIncrement
-        add(gch.decStack, b)
+        cell.refcount = cell.refcount +% rcIncrement
+        add(gch.decStack, cell)
 
 proc nimKeepAlive(p: PGenericSeq) {.compilerRtl, noinline.} =
   var c = usrToCell(p)
   if isAllocatedPtr(gch.region, c):
     c.refcount = c.refcount or rcMarked
 
+proc nimGCFrame(p: pointer) {.compilerRtl, noinline.} =
+  # 'cast' is correct here! no offset to add:
+  var c = cast[PCell](p)
+  var x = cast[TAddress](c)
+  if x <% PageSize and (x and (MemAlign-1)) == 0:
+    c.refcount = c.refcount or rcMarked
+
 proc markThreadStacks(gch: var TGcHeap) = 
   when hasThreadSupport and hasSharedHeap:
     {.error: "not fully implemented".}
@@ -771,6 +777,7 @@ proc collectCT(gch: var TGcHeap) =
       gch.recGcLock == 0:
     gch.stat.maxStackSize = max(gch.stat.maxStackSize, stackSize())
     sysAssert(gch.decStack.len == 0, "collectCT")
+    prepareForInteriorPointerChecking(gch.region)
     markStackAndRegisters(gch)
     markThreadStacks(gch)
     gch.stat.maxStackCells = max(gch.stat.maxStackCells, gch.decStack.len)