summary refs log tree commit diff stats
path: root/lib
diff options
context:
space:
mode:
Diffstat (limited to 'lib')
-rw-r--r--lib/pure/concurrency/cpuload.nim6
-rw-r--r--lib/system/alloc.nim24
2 files changed, 25 insertions, 5 deletions
diff --git a/lib/pure/concurrency/cpuload.nim b/lib/pure/concurrency/cpuload.nim
index b0fd002ed..db5f47407 100644
--- a/lib/pure/concurrency/cpuload.nim
+++ b/lib/pure/concurrency/cpuload.nim
@@ -45,12 +45,12 @@ proc advice*(s: var ThreadPoolState): ThreadPoolAdvice =
         procKernelDiff = procKernel - s.prevProcKernel
         procUserDiff = procUser - s.prevProcUser
 
-        sysTotal = int(sysKernelDiff + sysUserDiff)
-        procTotal = int(procKernelDiff + procUserDiff)
+        sysTotal = sysKernelDiff + sysUserDiff
+        procTotal = procKernelDiff + procUserDiff
       # total CPU usage < 85% --> create a new worker thread.
       # Measurements show that 100% and often even 90% is not reached even
       # if all my cores are busy.
-      if sysTotal == 0 or procTotal / sysTotal < 0.85:
+      if sysTotal == 0 or procTotal.float / sysTotal.float < 0.85:
         result = doCreateThread
     s.prevSysKernel = sysKernel
     s.prevSysUser = sysUser
diff --git a/lib/system/alloc.nim b/lib/system/alloc.nim
index 3a8e8a1b6..5b0955132 100644
--- a/lib/system/alloc.nim
+++ b/lib/system/alloc.nim
@@ -100,7 +100,8 @@ type
     freeChunksList: PBigChunk # XXX make this a datastructure with O(1) access
     chunkStarts: IntSet
     root, deleted, last, freeAvlNodes: PAvlNode
-    locked: bool # if locked, we cannot free pages.
+    locked, blockChunkSizeIncrease: bool # if locked, we cannot free pages.
+    nextChunkSize: int
 {.deprecated: [TLLChunk: LLChunk, TAvlNode: AvlNode, TMemRegion: MemRegion].}
 
 # shared:
@@ -275,9 +276,25 @@ proc pageAddr(p: pointer): PChunk {.inline.} =
   #sysAssert(Contains(allocator.chunkStarts, pageIndex(result)))
 
 proc requestOsChunks(a: var MemRegion, size: int): PBigChunk =
+  if not a.blockChunkSizeIncrease:
+    a.nextChunkSize =
+      if a.currMem < 64 * 1024: PageSize*4
+      else: a.nextChunkSize*2
+  var size = size
+
+  if size > a.nextChunkSize:
+    result = cast[PBigChunk](osAllocPages(size))
+  else:
+    result = cast[PBigChunk](osTryAllocPages(a.nextChunkSize))
+    if result == nil:
+      result = cast[PBigChunk](osAllocPages(size))
+      a.blockChunkSizeIncrease = true
+    else:
+      size = a.nextChunkSize
+
   incCurrMem(a, size)
   inc(a.freeMem, size)
-  result = cast[PBigChunk](osAllocPages(size))
+
   sysAssert((cast[ByteAddress](result) and PageMask) == 0, "requestOsChunks 1")
   #zeroMem(result, size)
   result.next = nil
@@ -432,6 +449,9 @@ proc getBigChunk(a: var MemRegion, size: int): PBigChunk =
       splitChunk(a, result, size)
     else:
       result = requestOsChunks(a, size)
+      # if we over allocated split the chunk:
+      if result.size > size:
+        splitChunk(a, result, size)
   result.prevSize = 0 # XXX why is this needed?
   result.used = true
   incl(a, a.chunkStarts, pageIndex(result))