diff options
Diffstat (limited to 'lib/system')
-rw-r--r-- | lib/system/ansi_c.nim | 22 | ||||
-rw-r--r-- | lib/system/avltree.nim | 4 | ||||
-rw-r--r-- | lib/system/cellsets.nim | 10 | ||||
-rw-r--r-- | lib/system/channels.nim | 522 | ||||
-rw-r--r-- | lib/system/deepcopy.nim | 10 | ||||
-rw-r--r-- | lib/system/gc2.nim | 140 | ||||
-rw-r--r-- | lib/system/gc_common.nim | 4 | ||||
-rw-r--r-- | lib/system/inclrtl.nim | 8 | ||||
-rw-r--r-- | lib/system/profiler.nim | 2 | ||||
-rw-r--r-- | lib/system/syslocks.nim | 20 | ||||
-rw-r--r-- | lib/system/sysspawn.nim | 2 | ||||
-rw-r--r-- | lib/system/timers.nim | 24 | ||||
-rw-r--r-- | lib/system/widestrs.nim | 6 |
13 files changed, 387 insertions, 387 deletions
diff --git a/lib/system/ansi_c.nim b/lib/system/ansi_c.nim index 9406f26c9..702559034 100644 --- a/lib/system/ansi_c.nim +++ b/lib/system/ansi_c.nim @@ -13,20 +13,20 @@ {.push hints:off} -proc c_strcmp(a, b: cstring): cint {.header: "<string.h>", +proc c_strcmp(a, b: cstring): cint {.header: "<string.h>", noSideEffect, importc: "strcmp".} -proc c_memcmp(a, b: cstring, size: int): cint {.header: "<string.h>", +proc c_memcmp(a, b: cstring, size: int): cint {.header: "<string.h>", noSideEffect, importc: "memcmp".} proc c_memcpy(a, b: cstring, size: int) {.header: "<string.h>", importc: "memcpy".} -proc c_strlen(a: cstring): int {.header: "<string.h>", +proc c_strlen(a: cstring): int {.header: "<string.h>", noSideEffect, importc: "strlen".} proc c_memset(p: pointer, value: cint, size: int) {. header: "<string.h>", importc: "memset".} type - C_TextFile {.importc: "FILE", header: "<stdio.h>", + C_TextFile {.importc: "FILE", header: "<stdio.h>", final, incompleteStruct.} = object - C_BinaryFile {.importc: "FILE", header: "<stdio.h>", + C_BinaryFile {.importc: "FILE", header: "<stdio.h>", final, incompleteStruct.} = object C_TextFileStar = ptr C_TextFile C_BinaryFileStar = ptr C_BinaryFile @@ -101,15 +101,15 @@ proc c_signal(sig: cint, handler: proc (a: cint) {.noconv.}) {. importc: "signal", header: "<signal.h>".} proc c_raise(sig: cint) {.importc: "raise", header: "<signal.h>".} -proc c_fputs(c: cstring, f: C_TextFileStar) {.importc: "fputs", +proc c_fputs(c: cstring, f: C_TextFileStar) {.importc: "fputs", header: "<stdio.h>".} proc c_fgets(c: cstring, n: int, f: C_TextFileStar): cstring {. importc: "fgets", header: "<stdio.h>".} -proc c_fgetc(stream: C_TextFileStar): int {.importc: "fgetc", +proc c_fgetc(stream: C_TextFileStar): int {.importc: "fgetc", header: "<stdio.h>".} -proc c_ungetc(c: int, f: C_TextFileStar) {.importc: "ungetc", +proc c_ungetc(c: int, f: C_TextFileStar) {.importc: "ungetc", header: "<stdio.h>".} -proc c_putc(c: char, stream: C_TextFileStar) {.importc: "putc", +proc c_putc(c: char, stream: C_TextFileStar) {.importc: "putc", header: "<stdio.h>".} proc c_fprintf(f: C_TextFileStar, frmt: cstring) {. importc: "fprintf", header: "<stdio.h>", varargs.} @@ -120,7 +120,7 @@ proc c_fopen(filename, mode: cstring): C_TextFileStar {. importc: "fopen", header: "<stdio.h>".} proc c_fclose(f: C_TextFileStar) {.importc: "fclose", header: "<stdio.h>".} -proc c_sprintf(buf, frmt: cstring): cint {.header: "<stdio.h>", +proc c_sprintf(buf, frmt: cstring): cint {.header: "<stdio.h>", importc: "sprintf", varargs, noSideEffect.} # we use it only in a way that cannot lead to security issues @@ -149,7 +149,7 @@ when hostOS != "standalone": when not declared(errno): when defined(NimrodVM): var vmErrnoWrapper {.importc.}: ptr cint - template errno: expr = + template errno: expr = bind vmErrnoWrapper vmErrnoWrapper[] else: diff --git a/lib/system/avltree.nim b/lib/system/avltree.nim index 5ee37d3eb..d5c901542 100644 --- a/lib/system/avltree.nim +++ b/lib/system/avltree.nim @@ -16,7 +16,7 @@ proc lowGauge(n: PAvlNode): int = while not isBottom(it): result = it.key it = it.link[0] - + proc highGauge(n: PAvlNode): int = result = -1 var it = n @@ -24,7 +24,7 @@ proc highGauge(n: PAvlNode): int = result = it.upperBound it = it.link[1] -proc find(root: PAvlNode, key: int): PAvlNode = +proc find(root: PAvlNode, key: int): PAvlNode = var it = root while not isBottom(it): if it.key == key: return it diff --git a/lib/system/cellsets.nim b/lib/system/cellsets.nim index 93c49483b..bb5de6f42 100644 --- a/lib/system/cellsets.nim +++ b/lib/system/cellsets.nim @@ -65,7 +65,7 @@ proc init(s: var CellSeq, cap: int = 1024) = s.cap = cap s.d = cast[PCellArray](alloc0(cap * sizeof(PCell))) -proc deinit(s: var CellSeq) = +proc deinit(s: var CellSeq) = dealloc(s.d) s.d = nil s.len = 0 @@ -98,7 +98,7 @@ proc nextTry(h, maxHash: int): int {.inline.} = # For any initial h in range(maxHash), repeating that maxHash times # generates each int in range(maxHash) exactly once (see any text on # random-number generation for proof). - + proc cellSetGet(t: CellSet, key: ByteAddress): PPageDesc = var h = cast[int](key) and t.max while t.data[h] != nil: @@ -170,16 +170,16 @@ proc excl(s: var CellSet, cell: PCell) = t.bits[u shr IntShift] = (t.bits[u shr IntShift] and not (1 shl (u and IntMask))) -proc containsOrIncl(s: var CellSet, cell: PCell): bool = +proc containsOrIncl(s: var CellSet, cell: PCell): bool = var u = cast[ByteAddress](cell) var t = cellSetGet(s, u shr PageShift) if t != nil: u = (u %% PageSize) /% MemAlign result = (t.bits[u shr IntShift] and (1 shl (u and IntMask))) != 0 - if not result: + if not result: t.bits[u shr IntShift] = t.bits[u shr IntShift] or (1 shl (u and IntMask)) - else: + else: incl(s, cell) result = false diff --git a/lib/system/channels.nim b/lib/system/channels.nim index 6739fb83e..68c0e32d2 100644 --- a/lib/system/channels.nim +++ b/lib/system/channels.nim @@ -1,267 +1,267 @@ -# -# -# Nim's Runtime Library +# +# +# Nim's Runtime Library # (c) Copyright 2015 Andreas Rumpf -# -# See the file "copying.txt", included in this -# distribution, for details about the copyright. -# - -## Channel support for threads. **Note**: This is part of the system module. -## Do not import it directly. To activate thread support you need to compile -## with the ``--threads:on`` command line switch. -## -## **Note:** The current implementation of message passing is slow and does -## not work with cyclic data structures. - -when not declared(NimString): - {.error: "You must not import this module explicitly".} - -type - pbytes = ptr array[0.. 0xffff, byte] - RawChannel {.pure, final.} = object ## msg queue for a thread - rd, wr, count, mask: int - data: pbytes - lock: SysLock - cond: SysCond - elemType: PNimType - ready: bool - region: MemRegion - PRawChannel = ptr RawChannel - LoadStoreMode = enum mStore, mLoad +# +# See the file "copying.txt", included in this +# distribution, for details about the copyright. +# + +## Channel support for threads. **Note**: This is part of the system module. +## Do not import it directly. To activate thread support you need to compile +## with the ``--threads:on`` command line switch. +## +## **Note:** The current implementation of message passing is slow and does +## not work with cyclic data structures. + +when not declared(NimString): + {.error: "You must not import this module explicitly".} + +type + pbytes = ptr array[0.. 0xffff, byte] + RawChannel {.pure, final.} = object ## msg queue for a thread + rd, wr, count, mask: int + data: pbytes + lock: SysLock + cond: SysCond + elemType: PNimType + ready: bool + region: MemRegion + PRawChannel = ptr RawChannel + LoadStoreMode = enum mStore, mLoad Channel* {.gcsafe.}[TMsg] = RawChannel ## a channel for thread communication {.deprecated: [TRawChannel: RawChannel, TLoadStoreMode: LoadStoreMode, TChannel: Channel].} -const ChannelDeadMask = -2 - -proc initRawChannel(p: pointer) = - var c = cast[PRawChannel](p) - initSysLock(c.lock) - initSysCond(c.cond) - c.mask = -1 - -proc deinitRawChannel(p: pointer) = - var c = cast[PRawChannel](p) - # we need to grab the lock to be safe against sending threads! - acquireSys(c.lock) - c.mask = ChannelDeadMask - deallocOsPages(c.region) - deinitSys(c.lock) - deinitSysCond(c.cond) - -proc storeAux(dest, src: pointer, mt: PNimType, t: PRawChannel, - mode: LoadStoreMode) {.benign.} -proc storeAux(dest, src: pointer, n: ptr TNimNode, t: PRawChannel, - mode: LoadStoreMode) {.benign.} = - var - d = cast[ByteAddress](dest) - s = cast[ByteAddress](src) - case n.kind - of nkSlot: storeAux(cast[pointer](d +% n.offset), - cast[pointer](s +% n.offset), n.typ, t, mode) - of nkList: - for i in 0..n.len-1: storeAux(dest, src, n.sons[i], t, mode) - of nkCase: - copyMem(cast[pointer](d +% n.offset), cast[pointer](s +% n.offset), - n.typ.size) - var m = selectBranch(src, n) - if m != nil: storeAux(dest, src, m, t, mode) - of nkNone: sysAssert(false, "storeAux") - -proc storeAux(dest, src: pointer, mt: PNimType, t: PRawChannel, - mode: LoadStoreMode) = - var - d = cast[ByteAddress](dest) - s = cast[ByteAddress](src) - sysAssert(mt != nil, "mt == nil") - case mt.kind - of tyString: - if mode == mStore: - var x = cast[PPointer](dest) - var s2 = cast[PPointer](s)[] - if s2 == nil: - x[] = nil - else: - var ss = cast[NimString](s2) - var ns = cast[NimString](alloc(t.region, ss.len+1 + GenericSeqSize)) - copyMem(ns, ss, ss.len+1 + GenericSeqSize) - x[] = ns - else: - var x = cast[PPointer](dest) - var s2 = cast[PPointer](s)[] - if s2 == nil: - unsureAsgnRef(x, s2) - else: - unsureAsgnRef(x, copyString(cast[NimString](s2))) - dealloc(t.region, s2) - of tySequence: - var s2 = cast[PPointer](src)[] - var seq = cast[PGenericSeq](s2) - var x = cast[PPointer](dest) - if s2 == nil: - if mode == mStore: - x[] = nil - else: - unsureAsgnRef(x, nil) - else: - sysAssert(dest != nil, "dest == nil") - if mode == mStore: - x[] = alloc(t.region, seq.len *% mt.base.size +% GenericSeqSize) - else: - unsureAsgnRef(x, newObj(mt, seq.len * mt.base.size + GenericSeqSize)) - var dst = cast[ByteAddress](cast[PPointer](dest)[]) - for i in 0..seq.len-1: - storeAux( - cast[pointer](dst +% i*% mt.base.size +% GenericSeqSize), - cast[pointer](cast[ByteAddress](s2) +% i *% mt.base.size +% - GenericSeqSize), - mt.base, t, mode) - var dstseq = cast[PGenericSeq](dst) - dstseq.len = seq.len - dstseq.reserved = seq.len - if mode != mStore: dealloc(t.region, s2) - of tyObject: - # copy type field: - var pint = cast[ptr PNimType](dest) - # XXX use dynamic type here! - pint[] = mt - if mt.base != nil: - storeAux(dest, src, mt.base, t, mode) - storeAux(dest, src, mt.node, t, mode) - of tyTuple: - storeAux(dest, src, mt.node, t, mode) - of tyArray, tyArrayConstr: - for i in 0..(mt.size div mt.base.size)-1: - storeAux(cast[pointer](d +% i*% mt.base.size), - cast[pointer](s +% i*% mt.base.size), mt.base, t, mode) - of tyRef: - var s = cast[PPointer](src)[] - var x = cast[PPointer](dest) - if s == nil: - if mode == mStore: - x[] = nil - else: - unsureAsgnRef(x, nil) - else: - if mode == mStore: - x[] = alloc(t.region, mt.base.size) - else: - # XXX we should use the dynamic type here too, but that is not stored - # in the inbox at all --> use source[]'s object type? but how? we need - # a tyRef to the object! - var obj = newObj(mt, mt.base.size) - unsureAsgnRef(x, obj) - storeAux(x[], s, mt.base, t, mode) - if mode != mStore: dealloc(t.region, s) - else: - copyMem(dest, src, mt.size) # copy raw bits - -proc rawSend(q: PRawChannel, data: pointer, typ: PNimType) = - ## adds an `item` to the end of the queue `q`. - var cap = q.mask+1 - if q.count >= cap: - # start with capacity for 2 entries in the queue: - if cap == 0: cap = 1 - var n = cast[pbytes](alloc0(q.region, cap*2*typ.size)) - var z = 0 - var i = q.rd - var c = q.count - while c > 0: - dec c - copyMem(addr(n[z*typ.size]), addr(q.data[i*typ.size]), typ.size) - i = (i + 1) and q.mask - inc z - if q.data != nil: dealloc(q.region, q.data) - q.data = n - q.mask = cap*2 - 1 - q.wr = q.count - q.rd = 0 - storeAux(addr(q.data[q.wr * typ.size]), data, typ, q, mStore) - inc q.count - q.wr = (q.wr + 1) and q.mask - -proc rawRecv(q: PRawChannel, data: pointer, typ: PNimType) = - sysAssert q.count > 0, "rawRecv" - dec q.count - storeAux(data, addr(q.data[q.rd * typ.size]), typ, q, mLoad) - q.rd = (q.rd + 1) and q.mask - -template lockChannel(q: expr, action: stmt) {.immediate.} = - acquireSys(q.lock) - action - releaseSys(q.lock) - -template sendImpl(q: expr) {.immediate.} = - if q.mask == ChannelDeadMask: - sysFatal(DeadThreadError, "cannot send message; thread died") - acquireSys(q.lock) - var m: TMsg - shallowCopy(m, msg) - var typ = cast[PNimType](getTypeInfo(msg)) - rawSend(q, addr(m), typ) - q.elemType = typ - releaseSys(q.lock) - signalSysCond(q.cond) - -proc send*[TMsg](c: var Channel[TMsg], msg: TMsg) = - ## sends a message to a thread. `msg` is deeply copied. - var q = cast[PRawChannel](addr(c)) - sendImpl(q) - -proc llRecv(q: PRawChannel, res: pointer, typ: PNimType) = - # to save space, the generic is as small as possible - q.ready = true - while q.count <= 0: - waitSysCond(q.cond, q.lock) - q.ready = false - if typ != q.elemType: - releaseSys(q.lock) - sysFatal(ValueError, "cannot receive message of wrong type") - rawRecv(q, res, typ) - -proc recv*[TMsg](c: var Channel[TMsg]): TMsg = - ## receives a message from the channel `c`. This blocks until - ## a message has arrived! You may use ``peek`` to avoid the blocking. - var q = cast[PRawChannel](addr(c)) - acquireSys(q.lock) - llRecv(q, addr(result), cast[PNimType](getTypeInfo(result))) - releaseSys(q.lock) - -proc tryRecv*[TMsg](c: var Channel[TMsg]): tuple[dataAvailable: bool, - msg: TMsg] = - ## try to receives a message from the channel `c` if available. Otherwise - ## it returns ``(false, default(msg))``. - var q = cast[PRawChannel](addr(c)) - if q.mask != ChannelDeadMask: +const ChannelDeadMask = -2 + +proc initRawChannel(p: pointer) = + var c = cast[PRawChannel](p) + initSysLock(c.lock) + initSysCond(c.cond) + c.mask = -1 + +proc deinitRawChannel(p: pointer) = + var c = cast[PRawChannel](p) + # we need to grab the lock to be safe against sending threads! + acquireSys(c.lock) + c.mask = ChannelDeadMask + deallocOsPages(c.region) + deinitSys(c.lock) + deinitSysCond(c.cond) + +proc storeAux(dest, src: pointer, mt: PNimType, t: PRawChannel, + mode: LoadStoreMode) {.benign.} +proc storeAux(dest, src: pointer, n: ptr TNimNode, t: PRawChannel, + mode: LoadStoreMode) {.benign.} = + var + d = cast[ByteAddress](dest) + s = cast[ByteAddress](src) + case n.kind + of nkSlot: storeAux(cast[pointer](d +% n.offset), + cast[pointer](s +% n.offset), n.typ, t, mode) + of nkList: + for i in 0..n.len-1: storeAux(dest, src, n.sons[i], t, mode) + of nkCase: + copyMem(cast[pointer](d +% n.offset), cast[pointer](s +% n.offset), + n.typ.size) + var m = selectBranch(src, n) + if m != nil: storeAux(dest, src, m, t, mode) + of nkNone: sysAssert(false, "storeAux") + +proc storeAux(dest, src: pointer, mt: PNimType, t: PRawChannel, + mode: LoadStoreMode) = + var + d = cast[ByteAddress](dest) + s = cast[ByteAddress](src) + sysAssert(mt != nil, "mt == nil") + case mt.kind + of tyString: + if mode == mStore: + var x = cast[PPointer](dest) + var s2 = cast[PPointer](s)[] + if s2 == nil: + x[] = nil + else: + var ss = cast[NimString](s2) + var ns = cast[NimString](alloc(t.region, ss.len+1 + GenericSeqSize)) + copyMem(ns, ss, ss.len+1 + GenericSeqSize) + x[] = ns + else: + var x = cast[PPointer](dest) + var s2 = cast[PPointer](s)[] + if s2 == nil: + unsureAsgnRef(x, s2) + else: + unsureAsgnRef(x, copyString(cast[NimString](s2))) + dealloc(t.region, s2) + of tySequence: + var s2 = cast[PPointer](src)[] + var seq = cast[PGenericSeq](s2) + var x = cast[PPointer](dest) + if s2 == nil: + if mode == mStore: + x[] = nil + else: + unsureAsgnRef(x, nil) + else: + sysAssert(dest != nil, "dest == nil") + if mode == mStore: + x[] = alloc(t.region, seq.len *% mt.base.size +% GenericSeqSize) + else: + unsureAsgnRef(x, newObj(mt, seq.len * mt.base.size + GenericSeqSize)) + var dst = cast[ByteAddress](cast[PPointer](dest)[]) + for i in 0..seq.len-1: + storeAux( + cast[pointer](dst +% i*% mt.base.size +% GenericSeqSize), + cast[pointer](cast[ByteAddress](s2) +% i *% mt.base.size +% + GenericSeqSize), + mt.base, t, mode) + var dstseq = cast[PGenericSeq](dst) + dstseq.len = seq.len + dstseq.reserved = seq.len + if mode != mStore: dealloc(t.region, s2) + of tyObject: + # copy type field: + var pint = cast[ptr PNimType](dest) + # XXX use dynamic type here! + pint[] = mt + if mt.base != nil: + storeAux(dest, src, mt.base, t, mode) + storeAux(dest, src, mt.node, t, mode) + of tyTuple: + storeAux(dest, src, mt.node, t, mode) + of tyArray, tyArrayConstr: + for i in 0..(mt.size div mt.base.size)-1: + storeAux(cast[pointer](d +% i*% mt.base.size), + cast[pointer](s +% i*% mt.base.size), mt.base, t, mode) + of tyRef: + var s = cast[PPointer](src)[] + var x = cast[PPointer](dest) + if s == nil: + if mode == mStore: + x[] = nil + else: + unsureAsgnRef(x, nil) + else: + if mode == mStore: + x[] = alloc(t.region, mt.base.size) + else: + # XXX we should use the dynamic type here too, but that is not stored + # in the inbox at all --> use source[]'s object type? but how? we need + # a tyRef to the object! + var obj = newObj(mt, mt.base.size) + unsureAsgnRef(x, obj) + storeAux(x[], s, mt.base, t, mode) + if mode != mStore: dealloc(t.region, s) + else: + copyMem(dest, src, mt.size) # copy raw bits + +proc rawSend(q: PRawChannel, data: pointer, typ: PNimType) = + ## adds an `item` to the end of the queue `q`. + var cap = q.mask+1 + if q.count >= cap: + # start with capacity for 2 entries in the queue: + if cap == 0: cap = 1 + var n = cast[pbytes](alloc0(q.region, cap*2*typ.size)) + var z = 0 + var i = q.rd + var c = q.count + while c > 0: + dec c + copyMem(addr(n[z*typ.size]), addr(q.data[i*typ.size]), typ.size) + i = (i + 1) and q.mask + inc z + if q.data != nil: dealloc(q.region, q.data) + q.data = n + q.mask = cap*2 - 1 + q.wr = q.count + q.rd = 0 + storeAux(addr(q.data[q.wr * typ.size]), data, typ, q, mStore) + inc q.count + q.wr = (q.wr + 1) and q.mask + +proc rawRecv(q: PRawChannel, data: pointer, typ: PNimType) = + sysAssert q.count > 0, "rawRecv" + dec q.count + storeAux(data, addr(q.data[q.rd * typ.size]), typ, q, mLoad) + q.rd = (q.rd + 1) and q.mask + +template lockChannel(q: expr, action: stmt) {.immediate.} = + acquireSys(q.lock) + action + releaseSys(q.lock) + +template sendImpl(q: expr) {.immediate.} = + if q.mask == ChannelDeadMask: + sysFatal(DeadThreadError, "cannot send message; thread died") + acquireSys(q.lock) + var m: TMsg + shallowCopy(m, msg) + var typ = cast[PNimType](getTypeInfo(msg)) + rawSend(q, addr(m), typ) + q.elemType = typ + releaseSys(q.lock) + signalSysCond(q.cond) + +proc send*[TMsg](c: var Channel[TMsg], msg: TMsg) = + ## sends a message to a thread. `msg` is deeply copied. + var q = cast[PRawChannel](addr(c)) + sendImpl(q) + +proc llRecv(q: PRawChannel, res: pointer, typ: PNimType) = + # to save space, the generic is as small as possible + q.ready = true + while q.count <= 0: + waitSysCond(q.cond, q.lock) + q.ready = false + if typ != q.elemType: + releaseSys(q.lock) + sysFatal(ValueError, "cannot receive message of wrong type") + rawRecv(q, res, typ) + +proc recv*[TMsg](c: var Channel[TMsg]): TMsg = + ## receives a message from the channel `c`. This blocks until + ## a message has arrived! You may use ``peek`` to avoid the blocking. + var q = cast[PRawChannel](addr(c)) + acquireSys(q.lock) + llRecv(q, addr(result), cast[PNimType](getTypeInfo(result))) + releaseSys(q.lock) + +proc tryRecv*[TMsg](c: var Channel[TMsg]): tuple[dataAvailable: bool, + msg: TMsg] = + ## try to receives a message from the channel `c` if available. Otherwise + ## it returns ``(false, default(msg))``. + var q = cast[PRawChannel](addr(c)) + if q.mask != ChannelDeadMask: if tryAcquireSys(q.lock): - if q.count > 0: - llRecv(q, addr(result.msg), cast[PNimType](getTypeInfo(result.msg))) - result.dataAvailable = true - releaseSys(q.lock) - -proc peek*[TMsg](c: var Channel[TMsg]): int = - ## returns the current number of messages in the channel `c`. Returns -1 - ## if the channel has been closed. **Note**: This is dangerous to use - ## as it encourages races. It's much better to use ``tryRecv`` instead. - var q = cast[PRawChannel](addr(c)) - if q.mask != ChannelDeadMask: - lockChannel(q): - result = q.count - else: - result = -1 - -proc open*[TMsg](c: var Channel[TMsg]) = - ## opens a channel `c` for inter thread communication. - initRawChannel(addr(c)) - -proc close*[TMsg](c: var Channel[TMsg]) = - ## closes a channel `c` and frees its associated resources. - deinitRawChannel(addr(c)) - -proc ready*[TMsg](c: var Channel[TMsg]): bool = - ## returns true iff some thread is waiting on the channel `c` for - ## new messages. - var q = cast[PRawChannel](addr(c)) - result = q.ready - + if q.count > 0: + llRecv(q, addr(result.msg), cast[PNimType](getTypeInfo(result.msg))) + result.dataAvailable = true + releaseSys(q.lock) + +proc peek*[TMsg](c: var Channel[TMsg]): int = + ## returns the current number of messages in the channel `c`. Returns -1 + ## if the channel has been closed. **Note**: This is dangerous to use + ## as it encourages races. It's much better to use ``tryRecv`` instead. + var q = cast[PRawChannel](addr(c)) + if q.mask != ChannelDeadMask: + lockChannel(q): + result = q.count + else: + result = -1 + +proc open*[TMsg](c: var Channel[TMsg]) = + ## opens a channel `c` for inter thread communication. + initRawChannel(addr(c)) + +proc close*[TMsg](c: var Channel[TMsg]) = + ## closes a channel `c` and frees its associated resources. + deinitRawChannel(addr(c)) + +proc ready*[TMsg](c: var Channel[TMsg]): bool = + ## returns true iff some thread is waiting on the channel `c` for + ## new messages. + var q = cast[PRawChannel](addr(c)) + result = q.ready + diff --git a/lib/system/deepcopy.nim b/lib/system/deepcopy.nim index 093c0f3a7..03230e541 100644 --- a/lib/system/deepcopy.nim +++ b/lib/system/deepcopy.nim @@ -14,7 +14,7 @@ proc genericDeepCopyAux(dest, src: pointer, n: ptr TNimNode) {.benign.} = s = cast[ByteAddress](src) case n.kind of nkSlot: - genericDeepCopyAux(cast[pointer](d +% n.offset), + genericDeepCopyAux(cast[pointer](d +% n.offset), cast[pointer](s +% n.offset), n.typ) of nkList: for i in 0..n.len-1: @@ -24,7 +24,7 @@ proc genericDeepCopyAux(dest, src: pointer, n: ptr TNimNode) {.benign.} = var m = selectBranch(src, n) # reset if different branches are in use; note different branches also # imply that's not self-assignment (``x = x``)! - if m != dd and dd != nil: + if m != dd and dd != nil: genericResetAux(dest, dd) copyMem(cast[pointer](d +% n.offset), cast[pointer](s +% n.offset), n.typ.size) @@ -103,16 +103,16 @@ proc genericDeepCopyAux(dest, src: pointer, mt: PNimType) = else: let realType = x.typ let z = newObj(realType, realType.base.size) - + unsureAsgnRef(cast[PPointer](dest), z) x.typ = cast[PNimType](cast[int](z) or 1) genericDeepCopyAux(z, s2, realType.base) x.typ = realType else: let realType = mt - let z = newObj(realType, realType.base.size) + let z = newObj(realType, realType.base.size) unsureAsgnRef(cast[PPointer](dest), z) - genericDeepCopyAux(z, s2, realType.base) + genericDeepCopyAux(z, s2, realType.base) of tyPtr: # no cycle check here, but also not really required let s2 = cast[PPointer](src)[] diff --git a/lib/system/gc2.nim b/lib/system/gc2.nim index 015e08c9e..4ca0d144f 100644 --- a/lib/system/gc2.nim +++ b/lib/system/gc2.nim @@ -37,19 +37,19 @@ const rcAlive = 0b00000 # object is reachable. # color *black* in the original paper - + rcCycleCandidate = 0b00001 # possible root of a cycle. *purple* rcDecRefApplied = 0b00010 # the first dec-ref phase of the # collector was already applied to this # object. *gray* - + rcMaybeDead = 0b00011 # this object is a candidate for deletion # during the collect cycles algorithm. # *white*. - + rcReallyDead = 0b00100 # this is proved to be garbage - + rcRetiredBuffer = 0b00101 # this is a seq or string buffer that # was replaced by a resize operation. # see growObj for details @@ -80,14 +80,14 @@ const # The bit must also be set for new objects that are not rc1 and it must be # examined in the decref loop in collectCycles. # XXX: not implemented yet as tests didn't show any improvement from this - + MarkingSkipsAcyclicObjects = true - # Acyclic objects can be safely ignored in the mark and scan phases, + # Acyclic objects can be safely ignored in the mark and scan phases, # because they cannot contribute to the internal count. # XXX: if we generate specialized `markCyclic` and `markAcyclic` # procs we can further optimize this as there won't be need for any # checks in the code - + MinimumStackMarking = false # Try to scan only the user stack and ignore the part of the stack # belonging to the GC itself. see setStackTop for further info. @@ -110,9 +110,9 @@ type maxThreshold: int # max threshold that has been set maxStackSize: int # max stack size maxStackCells: int # max stack cells in ``decStack`` - cycleTableSize: int # max entries in cycle table + cycleTableSize: int # max entries in cycle table maxPause: int64 # max measured GC pause in nanoseconds - + GcHeap {.final, pure.} = object # this contains the zero count and # non-zero count table stackBottom: pointer @@ -124,7 +124,7 @@ type tempStack: CellSeq # temporary stack for recursion elimination freeStack: CellSeq # objects ready to be freed recGcLock: int # prevent recursion via finalizers; no thread lock - cycleRootsTrimIdx: int # Trimming is a light-weight collection of the + cycleRootsTrimIdx: int # Trimming is a light-weight collection of the # cycle roots table that uses a cheap linear scan # to find only possitively dead objects. # One strategy is to perform it only for new objects @@ -143,11 +143,11 @@ var when not defined(useNimRtl): instantiateForRegion(gch.region) -template acquire(gch: GcHeap) = +template acquire(gch: GcHeap) = when hasThreadSupport and hasSharedHeap: AcquireSys(HeapLock) -template release(gch: GcHeap) = +template release(gch: GcHeap) = when hasThreadSupport and hasSharedHeap: releaseSys(HeapLock) @@ -185,7 +185,7 @@ when debugGC: of rcRetiredBuffer: return "retired" of rcReallyDead: return "dead" else: return "unknown?" - + proc inCycleRootsStr(c: PCell): cstring = if c.isBitUp(rcInCycleRoots): result = "cycleroot" else: result = "" @@ -225,7 +225,7 @@ template setStackTop(gch) = template addCycleRoot(cycleRoots: var CellSeq, c: PCell) = if c.color != rcCycleCandidate: c.setColor rcCycleCandidate - + # the object may be buffered already. for example, consider: # decref; incref; decref if c.isBitDown(rcInCycleRoots): @@ -307,7 +307,7 @@ when traceGC: let startLen = gch.tempStack.len c.forAllChildren waPush - + while startLen != gch.tempStack.len: dec gch.tempStack.len var c = gch.tempStack.d[gch.tempStack.len] @@ -331,7 +331,7 @@ when traceGC: if c.isBitUp(rcMarkBit) and not isMarked: writecell("cyclic cell", cell) cprintf "Weight %d\n", cell.computeCellWeight - + proc writeLeakage(onlyRoots: bool) = if onlyRoots: for c in elements(states[csAllocated]): @@ -356,7 +356,7 @@ template WithHeapLock(blk: stmt): stmt = blk when hasThreadSupport and hasSharedHeap: ReleaseSys(HeapLock) -proc rtlAddCycleRoot(c: PCell) {.rtl, inl.} = +proc rtlAddCycleRoot(c: PCell) {.rtl, inl.} = # we MUST access gch as a global here, because this crosses DLL boundaries! WithHeapLock: addCycleRoot(gch.cycleRoots, c) @@ -423,7 +423,7 @@ template doIncRef(cc: PCell, elif IncRefRemovesCandidates: c.setColor rcAlive # XXX: this is not really atomic enough! - + proc nimGCref(p: pointer) {.compilerProc, inline.} = doIncRef(usrToCell(p)) proc nimGCunref(p: pointer) {.compilerProc, inline.} = doDecRef(usrToCell(p)) @@ -449,7 +449,7 @@ proc asgnRef(dest: PPointer, src: pointer) {.compilerProc, inline.} = doAsgnRef(dest, src, LocalHeap, MaybeCyclic) proc asgnRefNoCycle(dest: PPointer, src: pointer) {.compilerProc, inline.} = - # the code generator calls this proc if it is known at compile time that no + # the code generator calls this proc if it is known at compile time that no # cycle is possible. doAsgnRef(dest, src, LocalHeap, Acyclic) @@ -509,7 +509,7 @@ proc forAllSlotsAux(dest: pointer, n: ptr TNimNode, op: WalkOp) = if n.sons[i].typ.kind in {tyRef, tyString, tySequence}: doOperation(cast[PPointer](d +% n.sons[i].offset)[], op) else: - forAllChildrenAux(cast[pointer](d +% n.sons[i].offset), + forAllChildrenAux(cast[pointer](d +% n.sons[i].offset), n.sons[i].typ, op) else: forAllSlotsAux(dest, n.sons[i], op) @@ -557,7 +557,7 @@ proc addNewObjToZCT(res: PCell, gch: var GcHeap) {.inline.} = # we check the last 8 entries (cache line) for a slot that could be reused. # In 63% of all cases we succeed here! But we have to optimize the heck # out of this small linear search so that ``newObj`` is not slowed down. - # + # # Slots to try cache hit # 1 32% # 4 59% @@ -602,7 +602,7 @@ proc rawNewObj(typ: PNimType, size: int, gch: var GcHeap, rc1 = false): pointer acquire(gch) sysAssert(allocInv(gch.region), "rawNewObj begin") sysAssert(typ.kind in {tyRef, tyString, tySequence}, "newObj: 1") - + collectCT(gch) sysAssert(allocInv(gch.region), "rawNewObj after collect") @@ -610,16 +610,16 @@ proc rawNewObj(typ: PNimType, size: int, gch: var GcHeap, rc1 = false): pointer sysAssert(allocInv(gch.region), "rawNewObj after rawAlloc") sysAssert((cast[ByteAddress](res) and (MemAlign-1)) == 0, "newObj: 2") - + res.typ = typ - + when trackAllocationSource and not hasThreadSupport: if framePtr != nil and framePtr.prev != nil and framePtr.prev.prev != nil: res.filename = framePtr.prev.prev.filename res.line = framePtr.prev.prev.line else: res.filename = "nofile" - + if rc1: res.refcount = rcIncrement # refcount is 1 else: @@ -631,9 +631,9 @@ proc rawNewObj(typ: PNimType, size: int, gch: var GcHeap, rc1 = false): pointer res.setBit(rcInCycleRoots) res.setColor rcCycleCandidate gch.cycleRoots.add res - + sysAssert(isAllocatedPtr(gch.region, res), "newObj: 3") - + when logGC: writeCell("new cell", res) gcTrace(res, csAllocated) release(gch) @@ -711,9 +711,9 @@ proc growObj(old: pointer, newsize: int, gch: var GcHeap): pointer = var res = cast[PCell](rawAlloc(gch.region, newsize + sizeof(Cell))) var elemSize = if ol.typ.kind != tyString: ol.typ.base.size else: 1 - + var oldsize = cast[PGenericSeq](old).len*elemSize + GenericSeqSize - + # XXX: This should happen outside # call user-defined move code # call user-defined default constructor @@ -723,24 +723,24 @@ proc growObj(old: pointer, newsize: int, gch: var GcHeap): pointer = sysAssert((cast[ByteAddress](res) and (MemAlign-1)) == 0, "growObj: 3") sysAssert(res.refcount shr rcShift <=% 1, "growObj: 4") - + when false: if ol.isBitUp(rcZct): var j = gch.zct.len-1 var d = gch.zct.d - while j >= 0: + while j >= 0: if d[j] == ol: d[j] = res break dec(j) - + if ol.isBitUp(rcInCycleRoots): for i in 0 .. <gch.cycleRoots.len: if gch.cycleRoots.d[i] == ol: eraseAt(gch.cycleRoots, i) freeCell(gch, ol) - + else: # the new buffer inherits the GC state of the old one if res.isBitUp(rcZct): gch.zct.add res @@ -787,12 +787,12 @@ proc doOperation(p: pointer, op: WalkOp) = var c: PCell = usrToCell(p) sysAssert(c != nil, "doOperation: 1") gch.tempStack.add c - + proc nimGCvisit(d: pointer, op: int) {.compilerRtl.} = doOperation(d, WalkOp(op)) type - RecursionType = enum + RecursionType = enum FromChildren, FromRoot {.deprecated: [TRecursionType: RecursionType].} @@ -838,14 +838,14 @@ proc collectCycles(gch: var GcHeap) = let startLen = gch.tempStack.len cell.setColor rcAlive cell.forAllChildren waPush - + while startLen != gch.tempStack.len: dec gch.tempStack.len var c = gch.tempStack.d[gch.tempStack.len] if c.color != rcAlive: c.setColor rcAlive c.forAllChildren waPush - + template earlyMarkAlive(stackRoots) = # This marks all objects reachable from the stack as alive before any # of the other stages is executed. Such objects cannot be garbage and @@ -856,7 +856,7 @@ proc collectCycles(gch: var GcHeap) = earlyMarkAliveRec(c) earlyMarkAlive(gch.decStack) - + when CollectCyclesStats: let tAfterEarlyMarkAlive = getTicks() @@ -864,7 +864,7 @@ proc collectCycles(gch: var GcHeap) = let startLen = gch.tempStack.len cell.setColor rcDecRefApplied cell.forAllChildren waPush - + while startLen != gch.tempStack.len: dec gch.tempStack.len var c = gch.tempStack.d[gch.tempStack.len] @@ -876,7 +876,7 @@ proc collectCycles(gch: var GcHeap) = if c.color != rcDecRefApplied: c.setColor rcDecRefApplied c.forAllChildren waPush - + template markRoots(roots) = var i = 0 while i < roots.len: @@ -885,34 +885,34 @@ proc collectCycles(gch: var GcHeap) = inc i else: roots.trimAt i - + markRoots(gch.cycleRoots) - + when CollectCyclesStats: let tAfterMark = getTicks() c_printf "COLLECT CYCLES %d: %d/%d\n", gcCollectionIdx, gch.cycleRoots.len, l0 - + template recursiveMarkAlive(cell) = let startLen = gch.tempStack.len cell.setColor rcAlive cell.forAllChildren waPush - + while startLen != gch.tempStack.len: dec gch.tempStack.len var c = gch.tempStack.d[gch.tempStack.len] if ignoreObject(c): continue inc c.refcount, rcIncrement inc increfs - + if c.color != rcAlive: c.setColor rcAlive c.forAllChildren waPush - + template scanRoots(roots) = for i in 0 .. <roots.len: let startLen = gch.tempStack.len gch.tempStack.add roots.d[i] - + while startLen != gch.tempStack.len: dec gch.tempStack.len var c = gch.tempStack.d[gch.tempStack.len] @@ -928,9 +928,9 @@ proc collectCycles(gch: var GcHeap) = c.setColor rcMaybeDead inc maybedeads c.forAllChildren waPush - + scanRoots(gch.cycleRoots) - + when CollectCyclesStats: let tAfterScan = getTicks() @@ -941,7 +941,7 @@ proc collectCycles(gch: var GcHeap) = let startLen = gch.tempStack.len gch.tempStack.add c - + while startLen != gch.tempStack.len: dec gch.tempStack.len var c = gch.tempStack.d[gch.tempStack.len] @@ -965,7 +965,7 @@ proc collectCycles(gch: var GcHeap) = freeCell(gch, gch.freeStack.d[i]) collectDead(gch.cycleRoots) - + when CollectCyclesStats: let tFinal = getTicks() cprintf "times:\n early mark alive: %d ms\n mark: %d ms\n scan: %d ms\n collect: %d ms\n decrefs: %d\n increfs: %d\n marked dead: %d\n collected: %d\n", @@ -986,7 +986,7 @@ proc collectCycles(gch: var GcHeap) = when MarkingSkipsAcyclicObjects: # Collect the acyclic objects that became unreachable due to collected - # cyclic objects. + # cyclic objects. discard collectZCT(gch) # collectZCT may add new cycle candidates and we may decide to loop here # if gch.cycleRoots.len > 0: repeat @@ -1030,12 +1030,12 @@ proc gcMark(gch: var GcHeap, p: pointer) {.inline.} = add(gch.decStack, cell) sysAssert(allocInv(gch.region), "gcMark end") -proc markThreadStacks(gch: var GcHeap) = +proc markThreadStacks(gch: var GcHeap) = when hasThreadSupport and hasSharedHeap: {.error: "not fully implemented".} var it = threadList while it != nil: - # mark registers: + # mark registers: for i in 0 .. high(it.registers): gcMark(gch, it.registers[i]) var sp = cast[ByteAddress](it.stackBottom) var max = cast[ByteAddress](it.stackTop) @@ -1121,7 +1121,7 @@ elif stackIncreases: var b = cast[ByteAddress](stackTop) var x = cast[ByteAddress](p) result = a <=% x and x <=% b - + proc markStackAndRegisters(gch: var GcHeap) {.noinline, cdecl.} = var registers: C_JmpBuf if c_setjmp(registers) == 0'i32: # To fill the C stack with registers. @@ -1156,7 +1156,7 @@ else: # mark the registers var jmpbufPtr = cast[ByteAddress](addr(registers)) var jmpbufEnd = jmpbufPtr +% jmpbufSize - + while jmpbufPtr <=% jmpbufEnd: gcMark(gch, cast[PPointer](jmpbufPtr)[]) jmpbufPtr = jmpbufPtr +% sizeof(pointer) @@ -1218,18 +1218,18 @@ proc releaseCell(gch: var GcHeap, cell: PCell) = proc collectZCT(gch: var GcHeap): bool = const workPackage = 100 var L = addr(gch.zct.len) - + when withRealtime: var steps = workPackage var t0: Ticks if gch.maxPause > 0: t0 = getticks() - + while L[] > 0: var c = gch.zct.d[0] sysAssert c.isBitUp(rcZct), "collectZCT: rcZct missing!" sysAssert(isAllocatedPtr(gch.region, c), "collectZCT: isAllocatedPtr") - - # remove from ZCT: + + # remove from ZCT: c.clearBit(rcZct) gch.zct.d[0] = gch.zct.d[L[] - 1] dec(L[]) @@ -1237,7 +1237,7 @@ proc collectZCT(gch: var GcHeap): bool = if c.refcount <% rcIncrement: # It may have a RC > 0, if it is in the hardware stack or # it has not been removed yet from the ZCT. This is because - # ``incref`` does not bother to remove the cell from the ZCT + # ``incref`` does not bother to remove the cell from the ZCT # as this might be too slow. # In any case, it should be removed from the ZCT. But not # freed. **KEEP THIS IN MIND WHEN MAKING THIS INCREMENTAL!** @@ -1252,7 +1252,7 @@ proc collectZCT(gch: var GcHeap): bool = steps = workPackage if gch.maxPause > 0: let duration = getticks() - t0 - # the GC's measuring is not accurate and needs some cleanup actions + # the GC's measuring is not accurate and needs some cleanup actions # (stack unmarking), so subtract some short amount of time in to # order to miss deadlines less often: if duration >= gch.maxPause - 50_000: @@ -1269,7 +1269,7 @@ proc unmarkStackAndRegisters(gch: var GcHeap) = # XXX: just call doDecRef? var c = d[i] sysAssert c.typ != nil, "unmarkStackAndRegisters 2" - + if c.color == rcRetiredBuffer: continue @@ -1278,7 +1278,7 @@ proc unmarkStackAndRegisters(gch: var GcHeap) = # the object survived only because of a stack reference # it still doesn't have heap references addZCT(gch.zct, c) - + if canbeCycleRoot(c): # any cyclic object reachable from the stack can be turned into # a leak if it's orphaned through the stack reference @@ -1293,7 +1293,7 @@ proc collectCTBody(gch: var GcHeap) = let t0 = getticks() when debugGC: inc gcCollectionIdx sysAssert(allocInv(gch.region), "collectCT: begin") - + gch.stat.maxStackSize = max(gch.stat.maxStackSize, stackSize()) sysAssert(gch.decStack.len == 0, "collectCT") prepareForInteriorPointerChecking(gch.region) @@ -1312,7 +1312,7 @@ proc collectCTBody(gch: var GcHeap) = gch.stat.maxThreshold = max(gch.stat.maxThreshold, gch.cycleThreshold) unmarkStackAndRegisters(gch) sysAssert(allocInv(gch.region), "collectCT: end") - + when withRealtime: let duration = getticks() - t0 gch.stat.maxPause = max(gch.stat.maxPause, duration) @@ -1322,7 +1322,7 @@ proc collectCTBody(gch: var GcHeap) = proc collectCT(gch: var GcHeap) = if (gch.zct.len >= ZctThreshold or (cycleGC and - getOccupiedMem(gch.region)>=gch.cycleThreshold) or alwaysGC) and + getOccupiedMem(gch.region)>=gch.cycleThreshold) or alwaysGC) and gch.recGcLock == 0: collectCTBody(gch) @@ -1337,7 +1337,7 @@ when withRealtime: acquire(gch) gch.maxPause = us.toNano if (gch.zct.len >= ZctThreshold or (cycleGC and - getOccupiedMem(gch.region)>=gch.cycleThreshold) or alwaysGC) or + getOccupiedMem(gch.region)>=gch.cycleThreshold) or alwaysGC) or strongAdvice: collectCTBody(gch) release(gch) @@ -1345,13 +1345,13 @@ when withRealtime: proc GC_step*(us: int, strongAdvice = false) = GC_step(gch, us, strongAdvice) when not defined(useNimRtl): - proc GC_disable() = + proc GC_disable() = when hasThreadSupport and hasSharedHeap: discard atomicInc(gch.recGcLock, 1) else: inc(gch.recGcLock) proc GC_enable() = - if gch.recGcLock > 0: + if gch.recGcLock > 0: when hasThreadSupport and hasSharedHeap: discard atomicDec(gch.recGcLock, 1) else: diff --git a/lib/system/gc_common.nim b/lib/system/gc_common.nim index c7dd667e4..ceb362378 100644 --- a/lib/system/gc_common.nim +++ b/lib/system/gc_common.nim @@ -197,7 +197,7 @@ else: var x = cast[ByteAddress](p) if a <=% x and x <=% b: return true - + template forEachStackSlot(gch, gcMark: expr) {.immediate, dirty.} = # We use a jmp_buf buffer that is in the C stack. # Used to traverse the stack and registers assuming @@ -207,7 +207,7 @@ else: getRegisters(registers) for i in registers.low .. registers.high: gcMark(gch, cast[PPointer](registers[i])) - + for stack in items(gch.stack): stack.maxStackSize = max(stack.maxStackSize, stackSize(stack.starts)) var max = cast[ByteAddress](stack.starts) diff --git a/lib/system/inclrtl.nim b/lib/system/inclrtl.nim index d0dc38284..201a99ca7 100644 --- a/lib/system/inclrtl.nim +++ b/lib/system/inclrtl.nim @@ -20,21 +20,21 @@ when not defined(nimNewShared): {.pragma: gcsafe.} when defined(createNimRtl): - when defined(useNimRtl): + when defined(useNimRtl): {.error: "Cannot create and use nimrtl at the same time!".} elif appType != "lib": {.error: "nimrtl must be built as a library!".} -when defined(createNimRtl): +when defined(createNimRtl): {.pragma: rtl, exportc: "nimrtl_$1", dynlib, gcsafe.} {.pragma: inl.} {.pragma: compilerRtl, compilerproc, exportc: "nimrtl_$1", dynlib.} elif defined(useNimRtl): - when defined(windows): + when defined(windows): const nimrtl* = "nimrtl.dll" elif defined(macosx): const nimrtl* = "libnimrtl.dylib" - else: + else: const nimrtl* = "libnimrtl.so" {.pragma: rtl, importc: "nimrtl_$1", dynlib: nimrtl, gcsafe.} {.pragma: inl.} diff --git a/lib/system/profiler.nim b/lib/system/profiler.nim index c93456fb3..4f600417e 100644 --- a/lib/system/profiler.nim +++ b/lib/system/profiler.nim @@ -40,7 +40,7 @@ proc captureStackTrace(f: PFrame, st: var StackTrace) = while it != nil: inc(total) it = it.prev - for j in 1..total-i-(firstCalls-1): + for j in 1..total-i-(firstCalls-1): if b != nil: b = b.prev if total != i: st[i] = "..." diff --git a/lib/system/syslocks.nim b/lib/system/syslocks.nim index ec8c26275..7a113b9d4 100644 --- a/lib/system/syslocks.nim +++ b/lib/system/syslocks.nim @@ -23,7 +23,7 @@ when defined(Windows): SysCond = Handle {.deprecated: [THandle: Handle, TSysLock: SysLock, TSysCond: SysCond].} - + proc initSysLock(L: var SysLock) {.stdcall, noSideEffect, dynlib: "kernel32", importc: "InitializeCriticalSection".} ## Initializes the lock `L`. @@ -31,14 +31,14 @@ when defined(Windows): proc tryAcquireSysAux(L: var SysLock): int32 {.stdcall, noSideEffect, dynlib: "kernel32", importc: "TryEnterCriticalSection".} ## Tries to acquire the lock `L`. - - proc tryAcquireSys(L: var SysLock): bool {.inline.} = + + proc tryAcquireSys(L: var SysLock): bool {.inline.} = result = tryAcquireSysAux(L) != 0'i32 proc acquireSys(L: var SysLock) {.stdcall, noSideEffect, dynlib: "kernel32", importc: "EnterCriticalSection".} ## Acquires the lock `L`. - + proc releaseSys(L: var SysLock) {.stdcall, noSideEffect, dynlib: "kernel32", importc: "LeaveCriticalSection".} ## Releases the lock `L`. @@ -46,11 +46,11 @@ when defined(Windows): proc deinitSys(L: var SysLock) {.stdcall, noSideEffect, dynlib: "kernel32", importc: "DeleteCriticalSection".} - proc createEvent(lpEventAttributes: pointer, + proc createEvent(lpEventAttributes: pointer, bManualReset, bInitialState: int32, lpName: cstring): SysCond {.stdcall, noSideEffect, dynlib: "kernel32", importc: "CreateEventA".} - + proc closeHandle(hObject: Handle) {.stdcall, noSideEffect, dynlib: "kernel32", importc: "CloseHandle".} proc waitForSingleObject(hHandle: Handle, dwMilliseconds: int32): int32 {. @@ -58,7 +58,7 @@ when defined(Windows): proc signalSysCond(hEvent: SysCond) {.stdcall, noSideEffect, dynlib: "kernel32", importc: "SetEvent".} - + proc initSysCond(cond: var SysCond) {.inline.} = cond = createEvent(nil, 0'i32, 0'i32, nil) proc deinitSysCond(cond: var SysCond) {.inline.} = @@ -86,7 +86,7 @@ else: proc tryAcquireSysAux(L: var SysLock): cint {.noSideEffect, importc: "pthread_mutex_trylock", header: "<pthread.h>".} - proc tryAcquireSys(L: var SysLock): bool {.inline.} = + proc tryAcquireSys(L: var SysLock): bool {.inline.} = result = tryAcquireSysAux(L) == 0'i32 proc releaseSys(L: var SysLock) {.noSideEffect, @@ -100,7 +100,7 @@ else: importc: "pthread_cond_wait", header: "<pthread.h>", noSideEffect.} proc signalSysCond(cond: var SysCond) {. importc: "pthread_cond_signal", header: "<pthread.h>", noSideEffect.} - + proc deinitSysCond(cond: var SysCond) {.noSideEffect, importc: "pthread_cond_destroy", header: "<pthread.h>".} - + diff --git a/lib/system/sysspawn.nim b/lib/system/sysspawn.nim index 5f8f2b2c5..7aef86df9 100644 --- a/lib/system/sysspawn.nim +++ b/lib/system/sysspawn.nim @@ -9,7 +9,7 @@ ## Implements Nim's 'spawn'. -when not declared(NimString): +when not declared(NimString): {.error: "You must not import this module explicitly".} {.push stackTrace:off.} diff --git a/lib/system/timers.nim b/lib/system/timers.nim index 74748c541..ac8418824 100644 --- a/lib/system/timers.nim +++ b/lib/system/timers.nim @@ -34,8 +34,8 @@ when defined(windows): elif defined(macosx): type - MachTimebaseInfoData {.pure, final, - importc: "mach_timebase_info_data_t", + MachTimebaseInfoData {.pure, final, + importc: "mach_timebase_info_data_t", header: "<mach/mach_time.h>".} = object numer, denom: int32 {.deprecated: [TMachTimebaseInfoData: MachTimebaseInfoData].} @@ -46,10 +46,10 @@ elif defined(macosx): proc getTicks(): Ticks {.inline.} = result = Ticks(mach_absolute_time()) - + var timeBaseInfo: MachTimebaseInfoData mach_timebase_info(timeBaseInfo) - + proc `-`(a, b: Ticks): Nanos = result = (a.int64 - b.int64) * timeBaseInfo.numer div timeBaseInfo.denom @@ -57,10 +57,10 @@ elif defined(posixRealtime): type Clockid {.importc: "clockid_t", header: "<time.h>", final.} = object - TimeSpec {.importc: "struct timespec", header: "<time.h>", + TimeSpec {.importc: "struct timespec", header: "<time.h>", final, pure.} = object ## struct timespec - tv_sec: int ## Seconds. - tv_nsec: int ## Nanoseconds. + tv_sec: int ## Seconds. + tv_nsec: int ## Nanoseconds. {.deprecated: [TClockid: Clickid, TTimeSpec: TimeSpec].} var @@ -77,12 +77,12 @@ elif defined(posixRealtime): proc `-`(a, b: Ticks): Nanos {.borrow.} else: - # fallback Posix implementation: + # fallback Posix implementation: type - Timeval {.importc: "struct timeval", header: "<sys/select.h>", + Timeval {.importc: "struct timeval", header: "<sys/select.h>", final, pure.} = object ## struct timeval - tv_sec: int ## Seconds. - tv_usec: int ## Microseconds. + tv_sec: int ## Seconds. + tv_usec: int ## Microseconds. {.deprecated: [Ttimeval: Timeval].} proc posix_gettimeofday(tp: var Timeval, unused: pointer = nil) {. importc: "gettimeofday", header: "<sys/time.h>".} @@ -90,7 +90,7 @@ else: proc getTicks(): Ticks = var t: Timeval posix_gettimeofday(t) - result = Ticks(int64(t.tv_sec) * 1000_000_000'i64 + + result = Ticks(int64(t.tv_sec) * 1000_000_000'i64 + int64(t.tv_usec) * 1000'i64) proc `-`(a, b: Ticks): Nanos {.borrow.} diff --git a/lib/system/widestrs.nim b/lib/system/widestrs.nim index 77310b289..5a30a7c0f 100644 --- a/lib/system/widestrs.nim +++ b/lib/system/widestrs.nim @@ -124,7 +124,7 @@ proc `$`*(w: WideCString, estimate: int, replacement: int = 0xFFFD): string = if ch >= UNI_SUR_HIGH_START and ch <= UNI_SUR_HIGH_END: # If the 16 bits following the high surrogate are in the source buffer... let ch2 = int(cast[uint16](w[i])) - + # If it's a low surrogate, convert to UTF32: if ch2 >= UNI_SUR_LOW_START and ch2 <= UNI_SUR_LOW_END: ch = (((ch and halfMask) shl halfShift) + (ch2 and halfMask)) + halfBase @@ -135,7 +135,7 @@ proc `$`*(w: WideCString, estimate: int, replacement: int = 0xFFFD): string = elif ch >= UNI_SUR_LOW_START and ch <= UNI_SUR_LOW_END: #invalid UTF-16 ch = replacement - + if ch < 0x80: result.add chr(ch) elif ch < 0x800: @@ -155,6 +155,6 @@ proc `$`*(w: WideCString, estimate: int, replacement: int = 0xFFFD): string = result.add chr(0xFFFD shr 12 or 0b1110_0000) result.add chr(0xFFFD shr 6 and ones(6) or 0b10_0000_00) result.add chr(0xFFFD and ones(6) or 0b10_0000_00) - + proc `$`*(s: WideCString): string = result = s $ 80 |