summary refs log tree commit diff stats
path: root/compiler/vmdef.nim
blob: 263ec8378879345b77ab6dedb9d3a5a86e782833 (plain) (blame)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
#
#
#           The Nim Compiler
#        (c) Copyright 2013 Andreas Rumpf
#
#    See the file "copying.txt", included in this
#    distribution, for details about the copyright.
#

## This module contains the type definitions for the new evaluation engine.
## An instruction is 1-3 int32s in memory, it is a register based VM.

import ast, passes, msgs, idents, intsets

const
  byteExcess* = 128 # we use excess-K for immediates
  wordExcess* = 32768

  MaxLoopIterations* = 1500_000 # max iterations of all loops


type
  TRegister* = range[0..255]
  TDest* = range[-1 .. 255]
  TInstr* = distinct uint32

  TOpcode* = enum
    opcEof,         # end of code
    opcRet,         # return
    opcYldYoid,     # yield with no value
    opcYldVal,      # yield with a value

    opcAsgnInt,
    opcAsgnStr,
    opcAsgnFloat,
    opcAsgnRef,
    opcAsgnComplex,
    opcRegToNode,
    opcNodeToReg,

    opcLdArr,  # a = b[c]
    opcWrArr,  # a[b] = c
    opcLdObj,  # a = b.c
    opcWrObj,  # a.b = c
    opcAddrReg,
    opcAddrNode,
    opcLdDeref,
    opcWrDeref,
    opcWrStrIdx,
    opcLdStrIdx, # a = b[c]

    opcAddInt,
    opcAddImmInt,
    opcSubInt,
    opcSubImmInt,
    opcLenSeq,
    opcLenStr,

    opcIncl, opcInclRange, opcExcl, opcCard, opcMulInt, opcDivInt, opcModInt,
    opcAddFloat, opcSubFloat, opcMulFloat, opcDivFloat, opcShrInt, opcShlInt,
    opcBitandInt, opcBitorInt, opcBitxorInt, opcAddu, opcSubu, opcMulu,
    opcDivu, opcModu, opcEqInt, opcLeInt, opcLtInt, opcEqFloat,
    opcLeFloat, opcLtFloat, opcLeu, opcLtu,
    opcEqRef, opcEqNimrodNode, opcSameNodeType,
    opcXor, opcNot, opcUnaryMinusInt, opcUnaryMinusFloat, opcBitnotInt,
    opcEqStr, opcLeStr, opcLtStr, opcEqSet, opcLeSet, opcLtSet,
    opcMulSet, opcPlusSet, opcMinusSet, opcSymdiffSet, opcConcatStr,
    opcContainsSet, opcRepr, opcSetLenStr, opcSetLenSeq,
    opcIsNil, opcOf, opcIs,
    opcSubStr, opcParseFloat, opcConv, opcCast,
    opcQuit, opcReset,
    opcNarrowS, opcNarrowU,

    opcAddStrCh,
    opcAddStrStr,
    opcAddSeqElem,
    opcRangeChck,

    opcNAdd,
    opcNAddMultiple,
    opcNKind,
    opcNIntVal,
    opcNFloatVal,
    opcNSymbol,
    opcNIdent,
    opcNGetType,
    opcNStrVal,

    opcNSetIntVal,
    opcNSetFloatVal, opcNSetSymbol, opcNSetIdent, opcNSetType, opcNSetStrVal,
    opcNNewNimNode, opcNCopyNimNode, opcNCopyNimTree, opcNDel, opcGenSym,

    opcSlurp,
    opcGorge,
    opcParseExprToAst,
    opcParseStmtToAst,
    opcQueryErrorFlag,
    opcNError,
    opcNWarning,
    opcNHint,
    opcNLineInfo,
    opcEqIdent,
    opcStrToIdent,
    opcIdentToStr,
    opcGetImpl,

    opcEcho,
    opcIndCall, # dest = call regStart, n; where regStart = fn, arg1, ...
    opcIndCallAsgn, # dest = call regStart, n; where regStart = fn, arg1, ...

    opcRaise,
    opcNChild,
    opcNSetChild,
    opcCallSite,
    opcNewStr,

    opcTJmp,  # jump Bx if A != 0
    opcFJmp,  # jump Bx if A == 0
    opcJmp,   # jump Bx
    opcJmpBack, # jump Bx; resulting from a while loop
    opcBranch,  # branch for 'case'
    opcTry,
    opcExcept,
    opcFinally,
    opcFinallyEnd,
    opcNew,
    opcNewSeq,
    opcLdNull,    # dest = nullvalue(types[Bx])
    opcLdNullReg,
    opcLdConst,   # dest = constants[Bx]
    opcAsgnConst, # dest = copy(constants[Bx])
    opcLdGlobal,  # dest = globals[Bx]
    opcLdGlobalAddr, # dest = addr(globals[Bx])

    opcLdImmInt,  # dest = immediate value
    opcNBindSym,
    opcSetType,   # dest.typ = types[Bx]
    opcTypeTrait,
    opcMarshalLoad, opcMarshalStore

  TBlock* = object
    label*: PSym
    fixups*: seq[TPosition]

  TEvalMode* = enum           ## reason for evaluation
    emRepl,                   ## evaluate because in REPL mode
    emConst,                  ## evaluate for 'const' according to spec
    emOptimize,               ## evaluate for optimization purposes (same as
                              ## emConst?)
    emStaticExpr,             ## evaluate for enforced compile time eval
                              ## ('static' context)
    emStaticStmt              ## 'static' as an expression

  TSandboxFlag* = enum        ## what the evaluation engine should allow
    allowCast,                ## allow unsafe language feature: 'cast'
    allowFFI,                 ## allow the FFI
    allowInfiniteLoops        ## allow endless loops
  TSandboxFlags* = set[TSandboxFlag]

  TSlotKind* = enum   # We try to re-use slots in a smart way to
                      # minimize allocations; however the VM supports arbitrary
                      # temporary slot usage. This is required for the parameter
                      # passing implementation.
    slotEmpty,        # slot is unused
    slotFixedVar,     # slot is used for a fixed var/result (requires copy then)
    slotFixedLet,     # slot is used for a fixed param/let
    slotTempUnknown,  # slot but type unknown (argument of proc call)
    slotTempInt,      # some temporary int
    slotTempFloat,    # some temporary float
    slotTempStr,      # some temporary string
    slotTempComplex,  # some complex temporary (s.node field is used)
    slotTempPerm      # slot is temporary but permanent (hack)

  PProc* = ref object
    blocks*: seq[TBlock]    # blocks; temp data structure
    sym*: PSym
    slots*: array[TRegister, tuple[inUse: bool, kind: TSlotKind]]
    maxSlots*: int

  VmArgs* = object
    ra*, rb*, rc*: Natural
    slots*: pointer
    currentException*: PNode
    currentLineInfo*: TLineInfo
  VmCallback* = proc (args: VmArgs) {.closure.}

  PCtx* = ref TCtx
  TCtx* = object of passes.TPassContext # code gen context
    code*: seq[TInstr]
    debug*: seq[TLineInfo]  # line info for every instruction; kept separate
                            # to not slow down interpretation
    globals*: PNode         #
    constants*: PNode       # constant data
    types*: seq[PType]      # some instructions reference types (e.g. 'except')
    currentExceptionA*, currentExceptionB*: PNode
    exceptionInstr*: int # index of instruction that raised the exception
    prc*: PProc
    module*: PSym
    callsite*: PNode
    mode*: TEvalMode
    features*: TSandboxFlags
    traceActive*: bool
    loopIterations*: int
    comesFromHeuristic*: TLineInfo # Heuristic for better macro stack traces
    callbacks*: seq[tuple[key: string, value: VmCallback]]
    errorFlag*: string
    cache*: IdentCache

  TPosition* = distinct int

  PEvalContext* = PCtx

proc newCtx*(module: PSym; cache: IdentCache): PCtx =
  PCtx(code: @[], debug: @[],
    globals: newNode(nkStmtListExpr), constants: newNode(nkStmtList), types: @[],
    prc: PProc(blocks: @[]), module: module, loopIterations: MaxLoopIterations,
    comesFromHeuristic: unknownLineInfo(), callbacks: @[], errorFlag: "",
    cache: cache)

proc refresh*(c: PCtx, module: PSym) =
  c.module = module
  c.prc = PProc(blocks: @[])
  c.loopIterations = MaxLoopIterations

proc registerCallback*(c: PCtx; name: string; callback: VmCallback) =
  c.callbacks.add((name, callback))

const
  firstABxInstr* = opcTJmp
  largeInstrs* = { # instructions which use 2 int32s instead of 1:
    opcSubStr, opcConv, opcCast, opcNewSeq, opcOf,
    opcMarshalLoad, opcMarshalStore}
  slotSomeTemp* = slotTempUnknown
  relativeJumps* = {opcTJmp, opcFJmp, opcJmp, opcJmpBack}

template opcode*(x: TInstr): TOpcode = TOpcode(x.uint32 and 0xff'u32)
template regA*(x: TInstr): TRegister = TRegister(x.uint32 shr 8'u32 and 0xff'u32)
template regB*(x: TInstr): TRegister = TRegister(x.uint32 shr 16'u32 and 0xff'u32)
template regC*(x: TInstr): TRegister = TRegister(x.uint32 shr 24'u32)
template regBx*(x: TInstr): int = (x.uint32 shr 16'u32).int

template jmpDiff*(x: TInstr): int = regBx(x) - wordExcess
init(gch.tempStack) init(gch.additionalRoots) when withBitvectors: init(gch.allocated) init(gch.marked) var localGcInitialized {.rtlThreadVar.}: bool proc setupForeignThreadGc*() = ## call this if you registered a callback that will be run from a thread not ## under your control. This has a cheap thread-local guard, so the GC for ## this thread will only be initialized once per thread, no matter how often ## it is called. if not localGcInitialized: localGcInitialized = true var stackTop {.volatile.}: pointer setStackBottom(addr(stackTop)) initGC() proc forAllSlotsAux(dest: pointer, n: ptr TNimNode, op: TWalkOp) {.benign.} = var d = cast[ByteAddress](dest) case n.kind of nkSlot: forAllChildrenAux(cast[pointer](d +% n.offset), n.typ, op) of nkList: for i in 0..n.len-1: forAllSlotsAux(dest, n.sons[i], op) of nkCase: var m = selectBranch(dest, n) if m != nil: forAllSlotsAux(dest, m, op) of nkNone: sysAssert(false, "forAllSlotsAux") proc forAllChildrenAux(dest: pointer, mt: PNimType, op: TWalkOp) = var d = cast[ByteAddress](dest) if dest == nil: return # nothing to do if ntfNoRefs notin mt.flags: case mt.kind of tyRef, tyString, tySequence: # leaf: doOperation(cast[PPointer](d)[], op) of tyObject, tyTuple: forAllSlotsAux(dest, mt.node, op) of tyArray, tyArrayConstr, tyOpenArray: for i in 0..(mt.size div mt.base.size)-1: forAllChildrenAux(cast[pointer](d +% i *% mt.base.size), mt.base, op) else: discard proc forAllChildren(cell: PCell, op: TWalkOp) = gcAssert(cell != nil, "forAllChildren: 1") gcAssert(cell.typ != nil, "forAllChildren: 2") gcAssert cell.typ.kind in {tyRef, tySequence, tyString}, "forAllChildren: 3" let marker = cell.typ.marker if marker != nil: marker(cellToUsr(cell), op.int) else: case cell.typ.kind of tyRef: # common case forAllChildrenAux(cellToUsr(cell), cell.typ.base, op) of tySequence: var d = cast[ByteAddress](cellToUsr(cell)) var s = cast[PGenericSeq](d) if s != nil: for i in 0..s.len-1: forAllChildrenAux(cast[pointer](d +% i *% cell.typ.base.size +% GenericSeqSize), cell.typ.base, op) else: discard proc rawNewObj(typ: PNimType, size: int, gch: var TGcHeap): pointer = # generates a new object and sets its reference counter to 0 acquire(gch) gcAssert(typ.kind in {tyRef, tyString, tySequence}, "newObj: 1") collectCT(gch) var res = cast[PCell](rawAlloc(gch.region, size + sizeof(TCell))) gcAssert((cast[ByteAddress](res) and (MemAlign-1)) == 0, "newObj: 2") # now it is buffered in the ZCT res.typ = typ when leakDetector and not hasThreadSupport: if framePtr != nil and framePtr.prev != nil: res.filename = framePtr.prev.filename res.line = framePtr.prev.line res.refcount = 0 release(gch) when withBitvectors: incl(gch.allocated, res) when useCellIds: inc gch.idGenerator res.id = gch.idGenerator result = cellToUsr(res) when useCellIds: proc getCellId*[T](x: ref T): int = let p = usrToCell(cast[pointer](x)) result = p.id {.pop.} proc newObj(typ: PNimType, size: int): pointer {.compilerRtl.} = result = rawNewObj(typ, size, gch) zeroMem(result, size) when defined(memProfiler): nimProfile(size) proc newObjNoInit(typ: PNimType, size: int): pointer {.compilerRtl.} = result = rawNewObj(typ, size, gch) when defined(memProfiler): nimProfile(size) proc newSeq(typ: PNimType, len: int): pointer {.compilerRtl.} = # `newObj` already uses locks, so no need for them here. let size = addInt(mulInt(len, typ.base.size), GenericSeqSize) result = newObj(typ, size) cast[PGenericSeq](result).len = len cast[PGenericSeq](result).reserved = len when defined(memProfiler): nimProfile(size) proc newObjRC1(typ: PNimType, size: int): pointer {.compilerRtl.} = result = rawNewObj(typ, size, gch) zeroMem(result, size) when defined(memProfiler): nimProfile(size) proc newSeqRC1(typ: PNimType, len: int): pointer {.compilerRtl.} = let size = addInt(mulInt(len, typ.base.size), GenericSeqSize) result = newObj(typ, size) cast[PGenericSeq](result).len = len cast[PGenericSeq](result).reserved = len when defined(memProfiler): nimProfile(size) proc growObj(old: pointer, newsize: int, gch: var TGcHeap): pointer = acquire(gch) collectCT(gch) var ol = usrToCell(old) sysAssert(ol.typ != nil, "growObj: 1") gcAssert(ol.typ.kind in {tyString, tySequence}, "growObj: 2") var res = cast[PCell](rawAlloc(gch.region, newsize + sizeof(TCell))) var elemSize = 1 if ol.typ.kind != tyString: elemSize = ol.typ.base.size var oldsize = cast[PGenericSeq](old).len*elemSize + GenericSeqSize copyMem(res, ol, oldsize + sizeof(TCell)) zeroMem(cast[pointer](cast[ByteAddress](res)+% oldsize +% sizeof(TCell)), newsize-oldsize) sysAssert((cast[ByteAddress](res) and (MemAlign-1)) == 0, "growObj: 3") when false: # this is wrong since seqs can be shared via 'shallow': when withBitvectors: excl(gch.allocated, ol) when reallyDealloc: rawDealloc(gch.region, ol) else: zeroMem(ol, sizeof(TCell)) when withBitvectors: incl(gch.allocated, res) when useCellIds: inc gch.idGenerator res.id = gch.idGenerator release(gch) result = cellToUsr(res) when defined(memProfiler): nimProfile(newsize-oldsize) proc growObj(old: pointer, newsize: int): pointer {.rtl.} = result = growObj(old, newsize, gch) {.push profiler:off.} # ----------------- collector ----------------------------------------------- proc mark(gch: var TGcHeap, c: PCell) = when withBitvectors: incl(gch.marked, c) gcAssert gch.tempStack.len == 0, "stack not empty!" forAllChildren(c, waMarkPrecise) while gch.tempStack.len > 0: dec gch.tempStack.len var d = gch.tempStack.d[gch.tempStack.len] if not containsOrIncl(gch.marked, d): forAllChildren(d, waMarkPrecise) else: # XXX no 'if c.refCount != rcBlack' here? c.refCount = rcBlack gcAssert gch.tempStack.len == 0, "stack not empty!" forAllChildren(c, waMarkPrecise) while gch.tempStack.len > 0: dec gch.tempStack.len var d = gch.tempStack.d[gch.tempStack.len] if d.refcount == rcWhite: d.refCount = rcBlack forAllChildren(d, waMarkPrecise) proc doOperation(p: pointer, op: TWalkOp) = if p == nil: return var c: PCell = usrToCell(p) gcAssert(c != nil, "doOperation: 1") case op of waMarkGlobal: when hasThreadSupport: # could point to a cell which we don't own and don't want to touch/trace if isAllocatedPtr(gch.region, c): mark(gch, c) else: mark(gch, c) of waMarkPrecise: add(gch.tempStack, c) proc nimGCvisit(d: pointer, op: int) {.compilerRtl.} = doOperation(d, TWalkOp(op)) proc freeCyclicCell(gch: var TGcHeap, c: PCell) = inc gch.stat.freedObjects prepareDealloc(c) when reallyDealloc: rawDealloc(gch.region, c) else: gcAssert(c.typ != nil, "freeCyclicCell") zeroMem(c, sizeof(TCell)) proc sweep(gch: var TGcHeap) = when withBitvectors: for c in gch.allocated.elementsExcept(gch.marked): gch.allocated.excl(c) freeCyclicCell(gch, c) else: for x in allObjects(gch.region): if isCell(x): # cast to PCell is correct here: var c = cast[PCell](x) if c.refcount == rcBlack: c.refcount = rcWhite else: freeCyclicCell(gch, c) when false: proc newGcInvariant*() = for x in allObjects(gch.region): if isCell(x): var c = cast[PCell](x) if c.typ == nil: writeStackTrace() quit 1 proc markGlobals(gch: var TGcHeap) = for i in 0 .. < globalMarkersLen: globalMarkers[i]() let d = gch.additionalRoots.d for i in 0 .. < gch.additionalRoots.len: mark(gch, d[i]) proc gcMark(gch: var TGcHeap, p: pointer) {.inline.} = # the addresses are not as cells on the stack, so turn them to cells: var cell = usrToCell(p) var c = cast[ByteAddress](cell) if c >% PageSize: # fast check: does it look like a cell? var objStart = cast[PCell](interiorAllocatedPtr(gch.region, cell)) if objStart != nil: mark(gch, objStart) # ----------------- stack management -------------------------------------- # inspired from Smart Eiffel when defined(sparc): const stackIncreases = false elif defined(hppa) or defined(hp9000) or defined(hp9000s300) or defined(hp9000s700) or defined(hp9000s800) or defined(hp9000s820): const stackIncreases = true else: const stackIncreases = false when not defined(useNimRtl): {.push stack_trace: off.} proc setStackBottom(theStackBottom: pointer) = #c_fprintf(c_stdout, "stack bottom: %p;\n", theStackBottom) # the first init must be the one that defines the stack bottom: if gch.stackBottom == nil: gch.stackBottom = theStackBottom else: var a = cast[ByteAddress](theStackBottom) # and not PageMask - PageSize*2 var b = cast[ByteAddress](gch.stackBottom) #c_fprintf(c_stdout, "old: %p new: %p;\n",gch.stackBottom,theStackBottom) when stackIncreases: gch.stackBottom = cast[pointer](min(a, b)) else: gch.stackBottom = cast[pointer](max(a, b)) {.pop.} proc stackSize(): int {.noinline.} = var stackTop {.volatile.}: pointer result = abs(cast[int](addr(stackTop)) - cast[int](gch.stackBottom)) when defined(sparc): # For SPARC architecture. proc isOnStack(p: pointer): bool = var stackTop {.volatile.}: pointer stackTop = addr(stackTop) var b = cast[ByteAddress](gch.stackBottom) var a = cast[ByteAddress](stackTop) var x = cast[ByteAddress](p) result = a <=% x and x <=% b proc markStackAndRegisters(gch: var TGcHeap) {.noinline, cdecl.} = when defined(sparcv9): asm """"flushw \n" """ else: asm """"ta 0x3 ! ST_FLUSH_WINDOWS\n" """ var max = gch.stackBottom sp: PPointer stackTop: array[0..1, pointer] sp = addr(stackTop[0]) # Addresses decrease as the stack grows. while sp <= max: gcMark(gch, sp[]) sp = cast[ppointer](cast[ByteAddress](sp) +% sizeof(pointer)) elif defined(ELATE): {.error: "stack marking code is to be written for this architecture".} elif stackIncreases: # --------------------------------------------------------------------------- # Generic code for architectures where addresses increase as the stack grows. # --------------------------------------------------------------------------- proc isOnStack(p: pointer): bool = var stackTop {.volatile.}: pointer stackTop = addr(stackTop) var a = cast[ByteAddress](gch.stackBottom) var b = cast[ByteAddress](stackTop) var x = cast[ByteAddress](p) result = a <=% x and x <=% b var jmpbufSize {.importc: "sizeof(jmp_buf)", nodecl.}: int # a little hack to get the size of a TJmpBuf in the generated C code # in a platform independent way proc markStackAndRegisters(gch: var TGcHeap) {.noinline, cdecl.} = var registers: C_JmpBuf if c_setjmp(registers) == 0'i32: # To fill the C stack with registers. var max = cast[ByteAddress](gch.stackBottom) var sp = cast[ByteAddress](addr(registers)) +% jmpbufSize -% sizeof(pointer) # sp will traverse the JMP_BUF as well (jmp_buf size is added, # otherwise sp would be below the registers structure). while sp >=% max: gcMark(gch, cast[ppointer](sp)[]) sp = sp -% sizeof(pointer) else: # --------------------------------------------------------------------------- # Generic code for architectures where addresses decrease as the stack grows. # --------------------------------------------------------------------------- proc isOnStack(p: pointer): bool = var stackTop {.volatile.}: pointer stackTop = addr(stackTop) var b = cast[ByteAddress](gch.stackBottom) var a = cast[ByteAddress](stackTop) var x = cast[ByteAddress](p) result = a <=% x and x <=% b proc markStackAndRegisters(gch: var TGcHeap) {.noinline, cdecl.} = # We use a jmp_buf buffer that is in the C stack. # Used to traverse the stack and registers assuming # that 'setjmp' will save registers in the C stack. type PStackSlice = ptr array [0..7, pointer] var registers {.noinit.}: C_JmpBuf if c_setjmp(registers) == 0'i32: # To fill the C stack with registers. var max = cast[ByteAddress](gch.stackBottom) var sp = cast[ByteAddress](addr(registers)) # loop unrolled: while sp <% max - 8*sizeof(pointer): gcMark(gch, cast[PStackSlice](sp)[0]) gcMark(gch, cast[PStackSlice](sp)[1]) gcMark(gch, cast[PStackSlice](sp)[2]) gcMark(gch, cast[PStackSlice](sp)[3]) gcMark(gch, cast[PStackSlice](sp)[4]) gcMark(gch, cast[PStackSlice](sp)[5]) gcMark(gch, cast[PStackSlice](sp)[6]) gcMark(gch, cast[PStackSlice](sp)[7]) sp = sp +% sizeof(pointer)*8 # last few entries: while sp <=% max: gcMark(gch, cast[PPointer](sp)[]) sp = sp +% sizeof(pointer) # ---------------------------------------------------------------------------- # end of non-portable code # ---------------------------------------------------------------------------- proc collectCTBody(gch: var TGcHeap) = gch.stat.maxStackSize = max(gch.stat.maxStackSize, stackSize()) prepareForInteriorPointerChecking(gch.region) markStackAndRegisters(gch) markGlobals(gch) sweep(gch) inc(gch.stat.collections) when withBitvectors: deinit(gch.marked) init(gch.marked) gch.cycleThreshold = max(InitialThreshold, getOccupiedMem().mulThreshold) gch.stat.maxThreshold = max(gch.stat.maxThreshold, gch.cycleThreshold) sysAssert(allocInv(gch.region), "collectCT: end") proc collectCT(gch: var TGcHeap) = if getOccupiedMem(gch.region) >= gch.cycleThreshold and gch.recGcLock == 0: collectCTBody(gch) when not defined(useNimRtl): proc GC_disable() = when hasThreadSupport and hasSharedHeap: atomicInc(gch.recGcLock, 1) else: inc(gch.recGcLock) proc GC_enable() = if gch.recGcLock > 0: when hasThreadSupport and hasSharedHeap: atomicDec(gch.recGcLock, 1) else: dec(gch.recGcLock) proc GC_setStrategy(strategy: GC_Strategy) = discard proc GC_enableMarkAndSweep() = gch.cycleThreshold = InitialThreshold proc GC_disableMarkAndSweep() = gch.cycleThreshold = high(gch.cycleThreshold)-1 # set to the max value to suppress the cycle detector proc GC_fullCollect() = acquire(gch) var oldThreshold = gch.cycleThreshold gch.cycleThreshold = 0 # forces cycle collection collectCT(gch) gch.cycleThreshold = oldThreshold release(gch) proc GC_getStatistics(): string = GC_disable() result = "[GC] total memory: " & $getTotalMem() & "\n" & "[GC] occupied memory: " & $getOccupiedMem() & "\n" & "[GC] collections: " & $gch.stat.collections & "\n" & "[GC] max threshold: " & $gch.stat.maxThreshold & "\n" & "[GC] freed objects: " & $gch.stat.freedObjects & "\n" & "[GC] max stack size: " & $gch.stat.maxStackSize & "\n" GC_enable() {.pop.}