summary refs log tree commit diff stats
path: root/lib/pure/concurrency/threadpool.nim
diff options
context:
space:
mode:
Diffstat (limited to 'lib/pure/concurrency/threadpool.nim')
-rw-r--r--lib/pure/concurrency/threadpool.nim106
1 files changed, 55 insertions, 51 deletions
diff --git a/lib/pure/concurrency/threadpool.nim b/lib/pure/concurrency/threadpool.nim
index a4460165d..06ed2fe54 100644
--- a/lib/pure/concurrency/threadpool.nim
+++ b/lib/pure/concurrency/threadpool.nim
@@ -7,20 +7,25 @@
 #    distribution, for details about the copyright.
 #
 
-## Implements Nim's `spawn <manual_experimental.html#parallel-amp-spawn>`_.
-##
-## **See also:**
-## * `threads module <threads.html>`_
-## * `channels module <channels.html>`_
-## * `locks module <locks.html>`_
-## * `asyncdispatch module <asyncdispatch.html>`_
+{.deprecated: "use the nimble packages `malebolgia`, `taskpools` or `weave` instead".}
+
+## Implements Nim's `parallel & spawn statements <manual_experimental.html#parallel-amp-spawn>`_.
 ##
 ## Unstable API.
+##
+## See also
+## ========
+## * `threads module <typedthreads.html>`_ for basic thread support
+## * `locks module <locks.html>`_ for locks and condition variables
+## * `asyncdispatch module <asyncdispatch.html>`_ for asynchronous IO
 
 when not compileOption("threads"):
   {.error: "Threadpool requires --threads:on option.".}
 
-import cpuinfo, cpuload, locks, os
+import std/[cpuinfo, cpuload, locks, os]
+
+when defined(nimPreviewSlimSystem):
+  import std/[assertions, typedthreads, sysatomics]
 
 {.push stackTrace:off.}
 
@@ -51,17 +56,14 @@ proc signal(cv: var Semaphore) =
   release(cv.L)
   signal(cv.c)
 
-const CacheLineSize = 32 # true for most archs
+const CacheLineSize = 64 # true for most archs
 
 type
   Barrier {.compilerproc.} = object
     entered: int
     cv: Semaphore # Semaphore takes 3 words at least
-    when sizeof(int) < 8:
-      cacheAlign: array[CacheLineSize-4*sizeof(int), byte]
-    left: int
-    cacheAlign2: array[CacheLineSize-sizeof(int), byte]
-    interest: bool # whether the master is interested in the "all done" event
+    left {.align(CacheLineSize).}: int
+    interest {.align(CacheLineSize).} : bool # whether the master is interested in the "all done" event
 
 proc barrierEnter(b: ptr Barrier) {.compilerproc, inline.} =
   # due to the signaling between threads, it is ensured we are the only
@@ -101,8 +103,8 @@ type
     cv: Semaphore
     idx: int
 
-  FlowVarBase* = ref FlowVarBaseObj ## Untyped base class for ``FlowVar[T]``.
-  FlowVarBaseObj = object of RootObj
+  FlowVarBase* = ref FlowVarBaseObj ## Untyped base class for `FlowVar[T] <#FlowVar>`_.
+  FlowVarBaseObj {.acyclic.} = object of RootObj
     ready, usesSemaphore, awaited: bool
     cv: Semaphore  # for 'blockUntilAny' support
     ai: ptr AwaitInfo
@@ -111,10 +113,10 @@ type
                    # be RootRef here otherwise the wrong GC keeps track of it!
     owner: pointer # ptr Worker
 
-  FlowVarObj[T] = object of FlowVarBaseObj
+  FlowVarObj[T] {.acyclic.} = object of FlowVarBaseObj
     blob: T
 
-  FlowVar*{.compilerproc.}[T] = ref FlowVarObj[T] ## A data flow variable.
+  FlowVar*[T] {.compilerproc.} = ref FlowVarObj[T] ## A data flow variable.
 
   ToFreeQueue = object
     len: int
@@ -138,7 +140,7 @@ type
 const threadpoolWaitMs {.intdefine.}: int = 100
 
 proc blockUntil*(fv: var FlowVarBaseObj) =
-  ## Waits until the value for the ``fv`` arrives.
+  ## Waits until the value for `fv` arrives.
   ##
   ## Usually it is not necessary to call this explicitly.
   if fv.usesSemaphore and not fv.awaited:
@@ -230,12 +232,12 @@ proc nimFlowVarSignal(fv: FlowVarBase) {.compilerproc.} =
     signal(fv.cv)
 
 proc awaitAndThen*[T](fv: FlowVar[T]; action: proc (x: T) {.closure.}) =
-  ## Blocks until the ``fv`` is available and then passes its value
-  ## to ``action``.
+  ## Blocks until `fv` is available and then passes its value
+  ## to `action`.
   ##
-  ## Note that due to Nim's parameter passing semantics this
-  ## means that ``T`` doesn't need to be copied so ``awaitAndThen`` can
-  ## sometimes be more efficient than `^ proc <#^,FlowVar[T]>`_.
+  ## Note that due to Nim's parameter passing semantics, this
+  ## means that `T` doesn't need to be copied, so `awaitAndThen` can
+  ## sometimes be more efficient than the `^ proc <#^,FlowVar[T]>`_.
   blockUntil(fv[])
   when defined(nimV2):
     action(fv.blob)
@@ -266,15 +268,15 @@ proc `^`*[T](fv: FlowVar[T]): T =
   finished(fv[])
 
 proc blockUntilAny*(flowVars: openArray[FlowVarBase]): int =
-  ## Awaits any of the given ``flowVars``. Returns the index of one ``flowVar``
+  ## Awaits any of the given `flowVars`. Returns the index of one `flowVar`
   ## for which a value arrived.
   ##
-  ## A ``flowVar`` only supports one call to ``blockUntilAny`` at the same time.
-  ## That means if you ``blockUntilAny([a,b])`` and ``blockUntilAny([b,c])``
-  ## the second call will only block until ``c``. If there is no ``flowVar`` left
+  ## A `flowVar` only supports one call to `blockUntilAny` at the same time.
+  ## That means if you `blockUntilAny([a,b])` and `blockUntilAny([b,c])`
+  ## the second call will only block until `c`. If there is no `flowVar` left
   ## to be able to wait on, -1 is returned.
   ##
-  ## **Note**: This results in non-deterministic behaviour and should be avoided.
+  ## **Note:** This results in non-deterministic behaviour and should be avoided.
   var ai: AwaitInfo
   ai.cv.initSemaphore()
   var conflicts = 0
@@ -295,9 +297,9 @@ proc blockUntilAny*(flowVars: openArray[FlowVarBase]): int =
   destroySemaphore(ai.cv)
 
 proc isReady*(fv: FlowVarBase): bool =
-  ## Determines whether the specified ``FlowVarBase``'s value is available.
+  ## Determines whether the specified `FlowVarBase`'s value is available.
   ##
-  ## If ``true``, awaiting ``fv`` will not block.
+  ## If `true`, awaiting `fv` will not block.
   if fv.usesSemaphore and not fv.awaited:
     acquire(fv.cv.L)
     result = fv.cv.counter > 0
@@ -315,7 +317,7 @@ const
   MaxDistinguishedThread* {.intdefine.} = 32 ## Maximum number of "distinguished" threads.
 
 type
-  ThreadId* = range[0..MaxDistinguishedThread-1]
+  ThreadId* = range[0..MaxDistinguishedThread-1] ## A thread identifier.
 
 var
   currentPoolSize: int
@@ -402,7 +404,7 @@ proc setMinPoolSize*(size: range[1..MaxThreadPoolSize]) =
 
 proc setMaxPoolSize*(size: range[1..MaxThreadPoolSize]) =
   ## Sets the maximum thread pool size. The default value of this
-  ## is ``MaxThreadPoolSize`` (256).
+  ## is `MaxThreadPoolSize <#MaxThreadPoolSize>`_.
   maxPoolSize = size
   if currentPoolSize > maxPoolSize:
     for i in maxPoolSize..currentPoolSize-1:
@@ -442,43 +444,45 @@ proc setup() =
   for i in 0..<currentPoolSize: activateWorkerThread(i)
 
 proc preferSpawn*(): bool =
-  ## Use this proc to determine quickly if a ``spawn`` or a direct call is
+  ## Use this proc to determine quickly if a `spawn` or a direct call is
   ## preferable.
   ##
-  ## If it returns ``true``, a ``spawn`` may make sense. In general
-  ## it is not necessary to call this directly; use `spawnX template
+  ## If it returns `true`, a `spawn` may make sense. In general
+  ## it is not necessary to call this directly; use the `spawnX template
   ## <#spawnX.t>`_ instead.
   result = gSomeReady.counter > 0
 
-proc spawn*(call: sink typed): void {.magic: "Spawn".}
-  ## Always spawns a new task, so that the ``call`` is never executed on
+proc spawn*(call: sink typed) {.magic: "Spawn".} =
+  ## Always spawns a new task, so that the `call` is never executed on
   ## the calling thread.
   ##
-  ## ``call`` has to be proc call ``p(...)`` where ``p`` is gcsafe and has a
-  ## return type that is either ``void`` or compatible with ``FlowVar[T]``.
+  ## `call` has to be a proc call `p(...)` where `p` is gcsafe and has a
+  ## return type that is either `void` or compatible with `FlowVar[T]`.
+  discard "It uses `nimSpawn3` internally"
 
-proc pinnedSpawn*(id: ThreadId; call: sink typed): void {.magic: "Spawn".}
-  ## Always spawns a new task on the worker thread with ``id``, so that
-  ## the ``call`` is **always** executed on the thread.
+proc pinnedSpawn*(id: ThreadId; call: sink typed) {.magic: "Spawn".} =
+  ## Always spawns a new task on the worker thread with `id`, so that
+  ## the `call` is **always** executed on the thread.
   ##
-  ## ``call`` has to be proc call ``p(...)`` where ``p`` is gcsafe and has a
-  ## return type that is either ``void`` or compatible with ``FlowVar[T]``.
+  ## `call` has to be a proc call `p(...)` where `p` is gcsafe and has a
+  ## return type that is either `void` or compatible with `FlowVar[T]`.
+  discard "It uses `nimSpawn4` internally"
 
 template spawnX*(call) =
   ## Spawns a new task if a CPU core is ready, otherwise executes the
   ## call in the calling thread.
   ##
-  ## Usually it is advised to use `spawn proc <#spawn,sinktyped>`_ in order to
-  ## not block the producer for an unknown amount of time.
+  ## Usually, it is advised to use the `spawn proc <#spawn,sinktyped>`_
+  ## in order to not block the producer for an unknown amount of time.
   ##
-  ## ``call`` has to be proc call ``p(...)`` where ``p`` is gcsafe and has a
-  ## return type that is either 'void' or compatible with ``FlowVar[T]``.
+  ## `call` has to be a proc call `p(...)` where `p` is gcsafe and has a
+  ## return type that is either 'void' or compatible with `FlowVar[T]`.
   (if preferSpawn(): spawn call else: call)
 
 proc parallel*(body: untyped) {.magic: "Parallel".}
   ## A parallel section can be used to execute a block in parallel.
   ##
-  ## ``body`` has to be in a DSL that is a particular subset of the language.
+  ## `body` has to be in a DSL that is a particular subset of the language.
   ##
   ## Please refer to `the manual <manual_experimental.html#parallel-amp-spawn>`_
   ## for further information.
@@ -585,7 +589,7 @@ proc nimSpawn4(fn: WorkerProc; data: pointer; id: ThreadId) {.compilerproc.} =
 
 
 proc sync*() =
-  ## A simple barrier to wait for all ``spawn``'ed tasks.
+  ## A simple barrier to wait for all `spawn`ed tasks.
   ##
   ## If you need more elaborate waiting, you have to use an explicit barrier.
   while true: