summary refs log tree commit diff stats
path: root/lib/system/alloc.nim
diff options
context:
space:
mode:
authorReimer Behrends <behrends@gmail.com>2015-09-18 14:05:04 +0200
committerReimer Behrends <behrends@gmail.com>2015-09-18 14:05:04 +0200
commit9ea99dbf369c498215705650b111e78cd53c94da (patch)
tree9b1d3538a3a19d0cb24760d804e0ea6b1a37f0f5 /lib/system/alloc.nim
parentd93c612067462919fb1bb6acf5cc384b7043c760 (diff)
downloadNim-9ea99dbf369c498215705650b111e78cd53c94da.tar.gz
Add option to disable munmap() use in the allocator.
When compiling with '-d:nimAllocNoUnmap', the allocator will not
attempt to return large chunks to the OS. For certain allocation
behaviors, this can be a significant speedup.
Diffstat (limited to 'lib/system/alloc.nim')
-rw-r--r--lib/system/alloc.nim11
1 files changed, 7 insertions, 4 deletions
diff --git a/lib/system/alloc.nim b/lib/system/alloc.nim
index 13a10e46f..3a1d7b666 100644
--- a/lib/system/alloc.nim
+++ b/lib/system/alloc.nim
@@ -27,8 +27,11 @@ sysAssert(roundup(65, 8) == 72, "roundup broken 2")
 # some platforms have really weird unmap behaviour: unmap(blockStart, PageSize)
 # really frees the whole block. Happens for Linux/PowerPC for example. Amd64
 # and x86 are safe though; Windows is special because MEM_RELEASE can only be
-# used with a size of 0:
-const weirdUnmap = not (defined(amd64) or defined(i386)) or defined(windows)
+# used with a size of 0. We also allow unmapping to be turned off with
+# -d:nimAllocNoUnmap:
+const doNotUnmap = not (defined(amd64) or defined(i386)) or
+                   defined(windows) or defined(nimAllocNoUnmap)
+
 
 when defined(posix):
   const
@@ -478,7 +481,7 @@ proc freeBigChunk(a: var MemRegion, c: PBigChunk) =
           excl(a.chunkStarts, pageIndex(c))
           c = cast[PBigChunk](le)
 
-  if c.size < ChunkOsReturn or weirdUnmap:
+  if c.size < ChunkOsReturn or doNotUnmap:
     incl(a, a.chunkStarts, pageIndex(c))
     updatePrevSize(a, c, c.size)
     listAdd(a.freeChunksList, c)
@@ -762,7 +765,7 @@ proc deallocOsPages(a: var MemRegion) =
   # we free every 'ordinarily' allocated page by iterating over the page bits:
   for p in elements(a.chunkStarts):
     var page = cast[PChunk](p shl PageShift)
-    when not weirdUnmap:
+    when not doNotUnmap:
       var size = if page.size < PageSize: PageSize else: page.size
       osDeallocPages(page, size)
     else: