diff options
author | Miran <narimiran@disroot.org> | 2019-03-01 12:57:55 +0100 |
---|---|---|
committer | GitHub <noreply@github.com> | 2019-03-01 12:57:55 +0100 |
commit | ca7980f301104d2ac5cbf30b36f79f7b4a350f48 (patch) | |
tree | 721c39041121ee4382c15a8af775828f6796cfb1 /lib/pure/concurrency | |
parent | e9d3c5de1998e142b1653152f2a235fdd2652b74 (diff) | |
download | Nim-ca7980f301104d2ac5cbf30b36f79f7b4a350f48.tar.gz |
improved documentation for several modules (#10752)
More detailed documentation for: * md5 * hashes Mostly cosmetic improvements for: * threadpool * typetraits * channels * threads
Diffstat (limited to 'lib/pure/concurrency')
-rw-r--r-- | lib/pure/concurrency/threadpool.nim | 117 |
1 files changed, 69 insertions, 48 deletions
diff --git a/lib/pure/concurrency/threadpool.nim b/lib/pure/concurrency/threadpool.nim index 4846c610f..15dbb10de 100644 --- a/lib/pure/concurrency/threadpool.nim +++ b/lib/pure/concurrency/threadpool.nim @@ -8,6 +8,12 @@ # ## Implements Nim's `spawn <manual.html#parallel-amp-spawn>`_. +## +## **See also:** +## * `threads module <threads.html>`_ +## * `chanels module <channels.html>`_ +## * `locks module <locks.html>`_ +## * `asyncdispatch module <asyncdispatch.html>`_ when not compileOption("threads"): {.error: "Threadpool requires --threads:on option.".} @@ -53,7 +59,7 @@ type cacheAlign: array[CacheLineSize-4*sizeof(int), byte] left: int cacheAlign2: array[CacheLineSize-sizeof(int), byte] - interest: bool ## wether the master is interested in the "all done" event + interest: bool # whether the master is interested in the "all done" event proc barrierEnter(b: ptr Barrier) {.compilerProc, inline.} = # due to the signaling between threads, it is ensured we are the only @@ -93,11 +99,10 @@ type cv: Semaphore idx: int - FlowVarBase* = ref FlowVarBaseObj ## untyped base class for 'FlowVar[T]' + FlowVarBase* = ref FlowVarBaseObj ## Untyped base class for ``FlowVar[T]``. FlowVarBaseObj = object of RootObj ready, usesSemaphore, awaited: bool - cv: Semaphore #\ - # for 'blockUntilAny' support + cv: Semaphore # for 'blockUntilAny' support ai: ptr AwaitInfo idx: int data: pointer # we incRef and unref it to keep it alive; note this MUST NOT @@ -107,7 +112,7 @@ type FlowVarObj[T] = object of FlowVarBaseObj blob: T - FlowVar*{.compilerProc.}[T] = ref FlowVarObj[T] ## a data flow variable + FlowVar*{.compilerProc.}[T] = ref FlowVarObj[T] ## A data flow variable. ToFreeQueue = object len: int @@ -129,8 +134,9 @@ type readyForTask: Semaphore proc blockUntil*(fv: FlowVarBase) = - ## waits until the value for the flowVar arrives. Usually it is not necessary - ## to call this explicitly. + ## Waits until the value for the ``fv`` arrives. + ## + ## Usually it is not necessary to call this explicitly. if fv.usesSemaphore and not fv.awaited: fv.awaited = true blockUntil(fv.cv) @@ -216,10 +222,12 @@ proc nimFlowVarSignal(fv: FlowVarBase) {.compilerProc.} = signal(fv.cv) proc awaitAndThen*[T](fv: FlowVar[T]; action: proc (x: T) {.closure.}) = - ## blocks until the ``fv`` is available and then passes its value - ## to ``action``. Note that due to Nim's parameter passing semantics this - ## means that ``T`` doesn't need to be copied and so ``awaitAndThen`` can - ## sometimes be more efficient than ``^``. + ## Blocks until the ``fv`` is available and then passes its value + ## to ``action``. + ## + ## Note that due to Nim's parameter passing semantics this + ## means that ``T`` doesn't need to be copied so ``awaitAndThen`` can + ## sometimes be more efficient than `^ proc <#^,FlowVar[T]>`_. blockUntil(fv) when T is string or T is seq: action(cast[T](fv.data)) @@ -230,18 +238,18 @@ proc awaitAndThen*[T](fv: FlowVar[T]; action: proc (x: T) {.closure.}) = finished(fv) proc unsafeRead*[T](fv: FlowVar[ref T]): ptr T = - ## blocks until the value is available and then returns this value. + ## Blocks until the value is available and then returns this value. blockUntil(fv) result = cast[ptr T](fv.data) proc `^`*[T](fv: FlowVar[ref T]): ref T = - ## blocks until the value is available and then returns this value. + ## Blocks until the value is available and then returns this value. blockUntil(fv) let src = cast[ref T](fv.data) deepCopy result, src proc `^`*[T](fv: FlowVar[T]): T = - ## blocks until the value is available and then returns this value. + ## Blocks until the value is available and then returns this value. blockUntil(fv) when T is string or T is seq: # XXX closures? deepCopy? @@ -250,11 +258,14 @@ proc `^`*[T](fv: FlowVar[T]): T = result = fv.blob proc blockUntilAny*(flowVars: openArray[FlowVarBase]): int = - ## awaits any of the given flowVars. Returns the index of one flowVar for - ## which a value arrived. A flowVar only supports one call to 'blockUntilAny' at - ## the same time. That means if you blockUntilAny([a,b]) and blockUntilAny([b,c]) the second - ## call will only blockUntil 'c'. If there is no flowVar left to be able to wait - ## on, -1 is returned. + ## Awaits any of the given ``flowVars``. Returns the index of one ``flowVar`` + ## for which a value arrived. + ## + ## A ``flowVar`` only supports one call to ``blockUntilAny`` at the same time. + ## That means if you ``blockUntilAny([a,b])`` and ``blockUntilAny([b,c])`` + ## the second call will only block until ``c``. If there is no ``flowVar`` left + ## to be able to wait on, -1 is returned. + ## ## **Note**: This results in non-deterministic behaviour and should be avoided. var ai: AwaitInfo ai.cv.initSemaphore() @@ -278,7 +289,7 @@ proc blockUntilAny*(flowVars: openArray[FlowVarBase]): int = proc isReady*(fv: FlowVarBase): bool = ## Determines whether the specified ``FlowVarBase``'s value is available. ## - ## If ``true`` awaiting ``fv`` will not block. + ## If ``true``, awaiting ``fv`` will not block. if fv.usesSemaphore and not fv.awaited: acquire(fv.cv.L) result = fv.cv.counter > 0 @@ -291,9 +302,9 @@ proc nimArgsPassingDone(p: pointer) {.compilerProc.} = signal(w.taskStarted) const - MaxThreadPoolSize* = 256 ## maximal size of the thread pool. 256 threads + MaxThreadPoolSize* = 256 ## Maximum size of the thread pool. 256 threads ## should be good enough for anybody ;-) - MaxDistinguishedThread* = 32 ## maximal number of "distinguished" threads. + MaxDistinguishedThread* = 32 ## Maximum number of "distinguished" threads. type ThreadId* = range[0..MaxDistinguishedThread-1] @@ -368,12 +379,12 @@ when defined(nimPinToCpu): var gCpus: Natural proc setMinPoolSize*(size: range[1..MaxThreadPoolSize]) = - ## sets the minimal thread pool size. The default value of this is 4. + ## Sets the minimum thread pool size. The default value of this is 4. minPoolSize = size proc setMaxPoolSize*(size: range[1..MaxThreadPoolSize]) = - ## sets the maximal thread pool size. The default value of this - ## is ``MaxThreadPoolSize``. + ## Sets the maximum thread pool size. The default value of this + ## is ``MaxThreadPoolSize`` (256). maxPoolSize = size if currentPoolSize > maxPoolSize: for i in maxPoolSize..currentPoolSize-1: @@ -413,37 +424,46 @@ proc setup() = for i in 0..<currentPoolSize: activateWorkerThread(i) proc preferSpawn*(): bool = - ## Use this proc to determine quickly if a 'spawn' or a direct call is - ## preferable. If it returns 'true' a 'spawn' may make sense. In general - ## it is not necessary to call this directly; use 'spawnX' instead. + ## Use this proc to determine quickly if a ``spawn`` or a direct call is + ## preferable. + ## + ## If it returns ``true``, a ``spawn`` may make sense. In general + ## it is not necessary to call this directly; use `spawnX template + ## <#spawnX.t>`_ instead. result = gSomeReady.counter > 0 proc spawn*(call: typed): void {.magic: "Spawn".} - ## always spawns a new task, so that the 'call' is never executed on - ## the calling thread. 'call' has to be proc call 'p(...)' where 'p' - ## is gcsafe and has a return type that is either 'void' or compatible - ## with ``FlowVar[T]``. + ## Always spawns a new task, so that the ``call`` is never executed on + ## the calling thread. + ## + ## ``call`` has to be proc call ``p(...)`` where ``p`` is gcsafe and has a + ## return type that is either ``void`` or compatible with ``FlowVar[T]``. proc pinnedSpawn*(id: ThreadId; call: typed): void {.magic: "Spawn".} - ## always spawns a new task on the worker thread with ``id``, so that - ## the 'call' is **always** executed on - ## the thread. 'call' has to be proc call 'p(...)' where 'p' - ## is gcsafe and has a return type that is either 'void' or compatible - ## with ``FlowVar[T]``. + ## Always spawns a new task on the worker thread with ``id``, so that + ## the ``call`` is **always** executed on the thread. + ## + ## ``call`` has to be proc call ``p(...)`` where ``p`` is gcsafe and has a + ## return type that is either ``void`` or compatible with ``FlowVar[T]``. template spawnX*(call): void = - ## spawns a new task if a CPU core is ready, otherwise executes the - ## call in the calling thread. Usually it is advised to - ## use 'spawn' in order to not block the producer for an unknown - ## amount of time. 'call' has to be proc call 'p(...)' where 'p' - ## is gcsafe and has a return type that is either 'void' or compatible - ## with ``FlowVar[T]``. + ## Spawns a new task if a CPU core is ready, otherwise executes the + ## call in the calling thread. + ## + ## Usually it is advised to use `spawn proc <#spawn,typed>`_ in order to + ## not block the producer for an unknown amount of time. + ## + ## ``call`` has to be proc call ``p(...)`` where ``p`` is gcsafe and has a + ## return type that is either 'void' or compatible with ``FlowVar[T]``. (if preferSpawn(): spawn call else: call) proc parallel*(body: untyped) {.magic: "Parallel".} - ## a parallel section can be used to execute a block in parallel. ``body`` - ## has to be in a DSL that is a particular subset of the language. Please - ## refer to the manual for further information. + ## A parallel section can be used to execute a block in parallel. + ## + ## ``body`` has to be in a DSL that is a particular subset of the language. + ## + ## Please refer to `the manual <manual.html#parallel-amp-spawn>`_ + ## for further information. var state: ThreadPoolState @@ -547,8 +567,9 @@ proc nimSpawn4(fn: WorkerProc; data: pointer; id: ThreadId) {.compilerProc.} = proc sync*() = - ## a simple barrier to wait for all spawn'ed tasks. If you need more elaborate - ## waiting, you have to use an explicit barrier. + ## A simple barrier to wait for all ``spawn``'ed tasks. + ## + ## If you need more elaborate waiting, you have to use an explicit barrier. var toRelease = 0 while true: var allReady = true |