"""
This page describes functions that plugins may implement to be called from Profanity on certain events. All functions are optional.
Examples:
::
def prof_on_start():
prof.cons_show("Profanity has started...")
def prof_pre_room_message_display(room, nick, message):
prof.cons_show("Manipulating chat room message before display...")
new_message = message + " (added by plugin)"
return new_message
def prof_on_contact_presence(barejid, resource, presence, status, priority):
notify_message = barejid + " is " + presence
prof.notify(notify_message, 5, "Presence")
"""
def prof_init(version, status, account_name, fulljid):
"""Called when a plugin is loaded, either when profanity is started, or when the ``/plugins load`` or ``/plugins install`` commands are called
:param version: the version of Profanity
:param status: the package status of Profanity, ``"development"`` or ``"release"``
:param account_name: account name of the currently logged in account, or ``None`` if not logged in
:param fulljid: the users full Jabber ID (barejid and resource) if logged in, ``None`` otherwise
:type version: str or unicode
:type status: str or unicode
:type account_name: str, unicode or None
:type fulljid: str, unicode or None
"""
pass
def prof_on_start():
"""Called when Profanity is started
"""
pass
def prof_on_shutdown():
"""Called when the user quits Profanity
"""
pass
def prof_on_unload():
"""Called when a plugin is unloaded with the ``/plugins unload`` command
"""
pass
def prof_on_connect(account_name, fulljid):
"""Called when the user connects with an account
:param account_name: account name of the account used for logging in
:param fulljid: the full Jabber ID (barejid and resource) of the account
:type account_name: str or unicode
:type fulljid: str or unicode
"""
pass
def prof_on_disconnect(account_name, fulljid):
"""Called when the user disconnects an account
:param account_name: account name of the account being disconnected
:param fulljid: the full Jabber ID (barejid and resource) of the account
:type account_name: str or unicode
:type fulljid: str or unicode
"""
pass
defpre { line-height: 125%; }
td.linenos .normal { color: inherit; background-color: transparent; padding-left: 5px; padding-right: 5px; }
span.linenos { color: inherit; background-color: transparent; padding-left: 5px; padding-right: 5px; }
td.linenos .special { color: #000000; background-color: #ffffc0; padding-left: 5px; padding-right: 5px; }
span.linenos.special { color: #000000; background-color: #ffffc0; padding-left: 5px; padding-right: 5px; }
.highlight .hll { background-color: #ffffcc }
.highlight .c { color: #888888 } /* Comment */
.highlight .err { color: #a61717; background-color: #e3d2d2 } /* Error */
.highlight .k { color: #008800; font-weight: bold } /* Keyword */
.highlight .ch { color: #888888 } /* Comment.Hashbang */
.highlight .cm { color: #888888 } /* Comment.Multiline */
.highlight .cp { color: #cc0000; font-weight: bold } /* Comment.Preproc */
.highlight .cpf { color: #888888 } /* Comment.PreprocFile */
.highlight .c1 { color: #888888 } /* Comment.Single */
.highlight .cs { color: #cc0000; font-weight: bold; background-color: #fff0f0 } /* Comment.Special */
.highlight .gd { color: #000000; background-color: #ffdddd } /* Generic.Deleted */
.highlight .ge { font-style: italic } /* Generic.Emph */
.highlight .ges { font-weight: bold; font-style: italic } /* Generic.EmphStrong */
.highlight .gr { color: #aa0000 } /* Generic.Error */
.highlight .gh { color: #333333 } /* Generic.Heading */
.highlight .gi { color: #000000; background-color: #ddffdd } /* Generic.Inserted */
.highlight .go { color: #888888 } /* Generic.Output */
.highlight .gp { color: #555555 } /* Generic.Prompt */
.highlight .gs { font-weight: bold } /* Generic.Strong */
.highlight .gu { color: #666666 } /* Generic.Subheading */
.highlight .gt { color: #aa0000 } /* Generic.Traceback */
.highlight .kc { color: #008800; font-weight: bold } /* Keyword.Constant */
.highlight .kd { color: #008800; font-weight: bold } /* Keyword.Declaration */
.highlight .kn { color: #008800; font-weight: bold } /* Keyword.Namespace */
.highlight .kp { color: #008800 } /* Keyword.Pseudo */
.highlight .kr { color: #008800; font-weight: bold } /* Keyword.Reserved */
.highlight .kt { color: #888888; font-weight: bold } /* Keyword.Type */
.highlight .m { color: #0000DD; font-weight: bold } /* Literal.Number */
.highlight .s { color: #dd2200; background-color: #fff0f0 } /* Literal.String */
.highlight .na { color: #336699 } /* Name.Attribute */
.highlight .nb { color: #003388 } /* Name.Builtin */
.highlight .nc { color: #bb0066; font-weight: bold } /* Name.Class */
.highlight .no { color: #003366; font-weight: bold } /* Name.Constant */
.highlight .nd { color: #555555 } /* Name.Decorator */
.highlight .ne { color: #bb0066; font-weight: bold } /* Name.Exception */
.highlight .nf { color: #0066bb; font-weight: bold } /* Name.Function */
.highlight .nl { color: #336699; font-style: italic } /* Name.Label */
.highlight .nn { color: #bb0066; font-weight: bold } /* Name.Namespace */
.highlight .py { color: #336699; font-weight: bold } /* Name.Property */
.highlight .nt { color: #bb0066; font-weight: bold } /* Name.Tag */
.highlight .nv { color: #336699 } /* Name.Variable */
.highlight .ow { color: #008800 } /* Operator.Word */
.highlight .w { color: #bbbbbb } /* Text.Whitespace */
.highlight .mb { color: #0000DD; font-weight: bold } /* Literal.Number.Bin */
.highlight .mf { color: #0000DD; font-weight: bold } /* Literal.Number.Float */
.highlight .mh { color: #0000DD; font-weight: bold } /* Literal.Number.Hex */
.highlight .mi { color: #0000DD; font-weight: bold } /* Literal.Number.Integer */
.highlight .mo { color: #0000DD; font-weight: bold } /* Literal.Number.Oct */
.highlight .sa { color: #dd2200; background-color: #fff0f0 } /* Literal.String.Affix */
.highlight .sb { color: #dd2200; background-color: #fff0f0 } /* Literal.String.Backtick */
.highlight .sc { color: #dd2200; background-color: #fff0f0 } /* Literal.String.Char */
.highlight .dl { color: #dd2200; background-color: #fff0f0 } /* Literal.String.Delimiter */
.highlight .sd { color: #dd2200; background-color: #fff0f0 } /* Literal.String.Doc */
.highlight .s2 { color: #dd2200; background-color: #fff0f0 } /* Literal.String.Double */
.highlight .se { color: #0044dd; background-color: #fff0f0 } /* Literal.String.Escape */
.highlight .sh { color: #dd2200; background-color: #fff0f0 } /* Literal.String.Heredoc */
.highlight .si { color: #3333bb; background-color: #fff0f0 } /* Literal.String.Interpol */
.highlight .sx { color: #22bb22; background-color: #f0fff0 } /* Literal.String.Other */
.highlight .sr { color: #008800; background-color: #fff0ff } /* Literal.String.Regex */
.highlight .s1 { color: #dd2200; background-color: #fff0f0 } /* Literal.String.Single */
.highlight .ss { color: #aa6600; background-color: #fff0f0 } /* Literal.String.Symbol */
.highlight .bp { color: #003388 } /* Name.Builtin.Pseudo */
.highlight .fm { color: #0066bb; font-weight: bold } /* Name.Function.Magic */
.highlight .vc { color: #336699 } /* Name.Variable.Class */
.highlight .vg { color: #dd7700 } /* Name.Variable.Global */
.highlight .vi { color: #3333bb } /* Name.Variable.Instance */
.highlight .vm { color: #336699 } /* Name.Variable.Magic */
.highlight .il { color: #0000DD; font-weight: bold } /* Literal.Number.Integer.Long */#
#
# Nimrod's Runtime Library
# (c) Copyright 2011 Andreas Rumpf
#
# See the file "copying.txt", included in this
# distribution, for details about the copyright.
#
# Low level allocator for Nimrod. Has been designed to support the GC.
# TODO:
# - eliminate "used" field
# - make searching for block O(1)
# ------------ platform specific chunk allocation code -----------------------
when defined(posix):
const
PROT_READ = 1 # page can be read
PROT_WRITE = 2 # page can be written
MAP_PRIVATE = 2 # Changes are private
when defined(linux) or defined(aix):
const MAP_ANONYMOUS = 0x20 # don't use a file
elif defined(macosx) or defined(bsd):
const MAP_ANONYMOUS = 0x1000
elif defined(solaris):
const MAP_ANONYMOUS = 0x100
else:
{.error: "Port memory manager to your platform".}
proc mmap(adr: pointer, len: int, prot, flags, fildes: cint,
off: int): pointer {.header: "<sys/mman.h>".}
proc munmap(adr: pointer, len: int) {.header: "<sys/mman.h>".}
proc osAllocPages(size: int): pointer {.inline.} =
result = mmap(nil, size, PROT_READ or PROT_WRITE,
MAP_PRIVATE or MAP_ANONYMOUS, -1, 0)
if result == nil or result == cast[pointer](-1):
raiseOutOfMem()
proc osDeallocPages(p: pointer, size: int) {.inline} =
when reallyOsDealloc: munmap(p, size)
elif defined(windows):
const
MEM_RESERVE = 0x2000
MEM_COMMIT = 0x1000
MEM_TOP_DOWN = 0x100000
PAGE_READWRITE = 0x04
MEM_DECOMMIT = 0x4000
MEM_RELEASE = 0x8000
proc VirtualAlloc(lpAddress: pointer, dwSize: int, flAllocationType,
flProtect: int32): pointer {.
header: "<windows.h>", stdcall.}
proc VirtualFree(lpAddress: pointer, dwSize: int,
dwFreeType: int32) {.header: "<windows.h>", stdcall.}
proc osAllocPages(size: int): pointer {.inline.} =
result = VirtualAlloc(nil, size, MEM_RESERVE or MEM_COMMIT,
PAGE_READWRITE)
if result == nil: raiseOutOfMem()
proc osDeallocPages(p: pointer, size: int) {.inline.} =
# according to Microsoft, 0 is the only correct value for MEM_RELEASE:
# This means that the OS has some different view over how big the block is
# that we want to free! So, we cannot reliably release the memory back to
# Windows :-(. We have to live with MEM_DECOMMIT instead.
when reallyOsDealloc: VirtualFree(p, size, MEM_DECOMMIT)
else:
{.error: "Port memory manager to your platform".}
# --------------------- end of non-portable code -----------------------------
# We manage *chunks* of memory. Each chunk is a multiple of the page size.
# Each chunk starts at an address that is divisible by the page size. Chunks
# that are bigger than ``ChunkOsReturn`` are returned back to the operating
# system immediately.
const
ChunkOsReturn = 256 * PageSize # 1 MB
InitialMemoryRequest = ChunkOsReturn div 2 # < ChunkOsReturn!
SmallChunkSize = PageSize
type
PTrunk = ptr TTrunk
TTrunk {.final.} = object
next: PTrunk # all nodes are connected with this pointer
key: int # start address at bit 0
bits: array[0..IntsPerTrunk-1, int] # a bit vector
TTrunkBuckets = array[0..255, PTrunk]
TIntSet {.final.} = object
data: TTrunkBuckets
type
TAlignType = biggestFloat
TFreeCell {.final, pure.} = object
next: ptr TFreeCell # next free cell in chunk (overlaid with refcount)
zeroField: int # 0 means cell is not used (overlaid with typ field)
# 1 means cell is manually managed pointer
# otherwise a PNimType is stored in there
PChunk = ptr TBaseChunk
PBigChunk = ptr TBigChunk
PSmallChunk = ptr TSmallChunk
TBaseChunk {.pure.} = object
prevSize: int # size of previous chunk; for coalescing
size: int # if < PageSize it is a small chunk
used: bool # later will be optimized into prevSize...
TSmallChunk = object of TBaseChunk
next, prev: PSmallChunk # chunks of the same size
freeList: ptr TFreeCell
free: int # how many bytes remain
acc: int # accumulator for small object allocation
data: TAlignType # start of usable memory
TBigChunk = object of TBaseChunk # not necessarily > PageSize!
next, prev: PBigChunk # chunks of the same (or bigger) size
align: int
data: TAlignType # start of usable memory
template smallChunkOverhead(): expr = sizeof(TSmallChunk)-sizeof(TAlignType)
template bigChunkOverhead(): expr = sizeof(TBigChunk)-sizeof(TAlignType)
proc roundup(x, v: int): int {.inline.} =
result = (x + (v-1)) and not (v-1)
sysAssert(result >= x)
#return ((-x) and (v-1)) +% x
sysAssert(roundup(14, PageSize) == PageSize)
sysAssert(roundup(15, 8) == 16)
sysAssert(roundup(65, 8) == 72)
# ------------- chunk table ---------------------------------------------------
# We use a PtrSet of chunk starts and a table[Page, chunksize] for chunk
# endings of big chunks. This is needed by the merging operation. The only
# remaining operation is best-fit for big chunks. Since there is a size-limit
# for big chunks (because greater than the limit means they are returned back
# to the OS), a fixed size array can be used.
type
PLLChunk = ptr TLLChunk
TLLChunk {.pure.} = object ## *low-level* chunk
size: int # remaining size
acc: int # accumulator
next: PLLChunk # next low-level chunk; only needed for dealloc
TMemRegion {.final, pure.} = object
llmem: PLLChunk
currMem, maxMem, freeMem: int # memory sizes (allocated from OS)
lastSize: int # needed for the case that OS gives us pages linearly
freeSmallChunks: array[0..SmallChunkSize div MemAlign-1, PSmallChunk]
freeChunksList: PBigChunk # XXX make this a datastructure with O(1) access
chunkStarts: TIntSet
proc incCurrMem(a: var TMemRegion, bytes: int) {.inline.} =
inc(a.currMem, bytes)
proc decCurrMem(a: var TMemRegion, bytes: int) {.inline.} =
a.maxMem = max(a.maxMem, a.currMem)
dec(a.currMem, bytes)
proc getMaxMem(a: var TMemRegion): int =
# Since we update maxPagesCount only when freeing pages,
# maxPagesCount may not be up to date. Thus we use the
# maximum of these both values here:
return max(a.currMem, a.maxMem)
proc llAlloc(a: var TMemRegion, size: int): pointer =
# *low-level* alloc for the memory managers data structures. Deallocation
# is done at he end of the allocator's life time.
if a.llmem == nil or size > a.llmem.size:
# the requested size is ``roundup(size+sizeof(TLLChunk), PageSize)``, but
# since we know ``size`` is a (small) constant, we know the requested size
# is one page:
sysAssert roundup(size+sizeof(TLLChunk), PageSize) == PageSize
var old = a.llmem # can be nil and is correct with nil
a.llmem = cast[PLLChunk](osAllocPages(PageSize))
incCurrMem(a, PageSize)
a.llmem.size = PageSize - sizeof(TLLChunk)
a.llmem.acc = sizeof(TLLChunk)
a.llmem.next = old
result = cast[pointer](cast[TAddress](a.llmem) + a.llmem.acc)
dec(a.llmem.size, size)
inc(a.llmem.acc, size)
zeroMem(result, size)
proc llDeallocAll(a: var TMemRegion) =
var it = a.llmem
while it != nil:
# we know each block in the list has the size of 1 page:
var next = it.next
osDeallocPages(it, PageSize)
it = next
proc IntSetGet(t: TIntSet, key: int): PTrunk =
var it = t.data[key and high(t.data)]
while it != nil:
if it.key == key: return it
it = it.next
result = nil
proc IntSetPut(a: var TMemRegion, t: var TIntSet, key: int): PTrunk =
result = IntSetGet(t, key)
if result == nil:
result = cast[PTrunk](llAlloc(a, sizeof(result[])))
result.next = t.data[key and high(t.data)]
t.data[key and high(t.data)] = result
result.key = key
proc Contains(s: TIntSet, key: int): bool =
var t = IntSetGet(s, key shr TrunkShift)
if t != nil:
var u = key and TrunkMask
result = (t.bits[u shr IntShift] and (1 shl (u and IntMask))) != 0
else:
result = false
proc Incl(a: var TMemRegion, s: var TIntSet, key: int) =
var t = IntSetPut(a, s, key shr TrunkShift)
var u = key and TrunkMask
t.bits[u shr IntShift] = t.bits[u shr IntShift] or (1 shl (u and IntMask))
proc Excl(s: var TIntSet, key: int) =
var t = IntSetGet(s, key shr TrunkShift)
if t != nil:
var u = key and TrunkMask
t.bits[u shr IntShift] = t.bits[u shr IntShift] and not
(1 shl (u and IntMask))
iterator elements(t: TIntSet): int {.inline.} =
# while traversing it is forbidden to change the set!
for h in 0..high(t.data):
var r = t.data[h]
while r != nil:
var i = 0
while i <= high(r.bits):
var w = r.bits[i] # taking a copy of r.bits[i] here is correct, because
# modifying operations are not allowed during traversation
var j = 0
while w != 0: # test all remaining bits for zero
if (w and 1) != 0: # the bit is set!
yield (r.key shl TrunkShift) or (i shl IntShift +% j)
inc(j)
w = w shr 1
inc(i)
r = r.next
# ------------- chunk management ----------------------------------------------
proc pageIndex(c: PChunk): int {.inline.} =
result = cast[TAddress](c) shr PageShift
proc pageIndex(p: pointer): int {.inline.} =
result = cast[TAddress](p) shr PageShift
proc pageAddr(p: pointer): PChunk {.inline.} =
result = cast[PChunk](cast[TAddress](p) and not PageMask)
#sysAssert(Contains(allocator.chunkStarts, pageIndex(result)))
proc requestOsChunks(a: var TMemRegion, size: int): PBigChunk =
incCurrMem(a, size)
inc(a.freeMem, size)
result = cast[PBigChunk](osAllocPages(size))
sysAssert((cast[TAddress](result) and PageMask) == 0)
#zeroMem(result, size)
result.next = nil
result.prev = nil
result.used = false
result.size = size
# update next.prevSize:
var nxt = cast[TAddress](result) +% size
sysAssert((nxt and PageMask) == 0)
var next = cast[PChunk](nxt)
if pageIndex(next) in a.chunkStarts:
#echo("Next already allocated!")
next.prevSize = size
# set result.prevSize:
var lastSize = if a.lastSize != 0: a.lastSize else: PageSize
var prv = cast[TAddress](result) -% lastSize
sysAssert((nxt and PageMask) == 0)
var prev = cast[PChunk](prv)
if pageIndex(prev) in a.chunkStarts and prev.size == lastSize:
#echo("Prev already allocated!")
result.prevSize = lastSize
else:
result.prevSize = 0 # unknown
a.lastSize = size # for next request
proc freeOsChunks(a: var TMemRegion, p: pointer, size: int) =
# update next.prevSize:
var c = cast[PChunk](p)
var nxt = cast[TAddress](p) +% c.size
sysAssert((nxt and PageMask) == 0)
var next = cast[PChunk](nxt)
if pageIndex(next) in a.chunkStarts:
next.prevSize = 0 # XXX used
excl(a.chunkStarts, pageIndex(p))
osDeallocPages(p, size)
decCurrMem(a, size)
dec(a.freeMem, size)
#c_fprintf(c_stdout, "[Alloc] back to OS: %ld\n", size)
proc isAccessible(a: TMemRegion, p: pointer): bool {.inline.} =
result = Contains(a.chunkStarts, pageIndex(p))
proc contains[T](list, x: T): bool =
var it = list
while it != nil:
if it == x: return true
it = it.next
proc writeFreeList(a: TMemRegion) =
var it = a.freeChunksList
c_fprintf(c_stdout, "freeChunksList: %p\n", it)
while it != nil:
c_fprintf(c_stdout, "it: %p, next: %p, prev: %p\n",
it, it.next, it.prev)
it = it.next
proc ListAdd[T](head: var T, c: T) {.inline.} =
sysAssert(c notin head)
sysAssert c.prev == nil
sysAssert c.next == nil
c.next = head
if head != nil:
sysAssert head.prev == nil
head.prev = c
head = c
proc ListRemove[T](head: var T, c: T) {.inline.} =
sysAssert(c in head)
if c == head:
head = c.next
sysAssert c.prev == nil
if head != nil: head.prev = nil
else:
sysAssert c.prev != nil
c.prev.next = c.next
if c.next != nil: c.next.prev = c.prev
c.next = nil
c.prev = nil
proc isSmallChunk(c: PChunk): bool {.inline.} =
return c.size <= SmallChunkSize-smallChunkOverhead()
proc chunkUnused(c: PChunk): bool {.inline.} =
result = not c.used
proc updatePrevSize(a: var TMemRegion, c: PBigChunk,
prevSize: int) {.inline.} =
var ri = cast[PChunk](cast[TAddress](c) +% c.size)
sysAssert((cast[TAddress](ri) and PageMask) == 0)
if isAccessible(a, ri):
ri.prevSize = prevSize
proc freeBigChunk(a: var TMemRegion, c: PBigChunk) =
var c = c
sysAssert(c.size >= PageSize)
inc(a.freeMem, c.size)
when coalescRight:
var ri = cast[PChunk](cast[TAddress](c) +% c.size)
sysAssert((cast[TAddress](ri) and PageMask) == 0)
if isAccessible(a, ri) and chunkUnused(ri):
sysAssert(not isSmallChunk(ri))
if not isSmallChunk(ri):
ListRemove(a.freeChunksList, cast[PBigChunk](ri))
inc(c.size, ri.size)
excl(a.chunkStarts, pageIndex(ri))
when coalescLeft:
if c.prevSize != 0:
var le = cast[PChunk](cast[TAddress](c) -% c.prevSize)
sysAssert((cast[TAddress](le) and PageMask) == 0)
if isAccessible(a, le) and chunkUnused(le):
sysAssert(not isSmallChunk(le))
if not isSmallChunk(le):
ListRemove(a.freeChunksList, cast[PBigChunk](le))
inc(le.size, c.size)
excl(a.chunkStarts, pageIndex(c))
c = cast[PBigChunk](le)
if c.size < ChunkOsReturn:
incl(a, a.chunkStarts, pageIndex(c))
updatePrevSize(a, c, c.size)
ListAdd(a.freeChunksList, c)
c.used = false
else:
freeOsChunks(a, c, c.size)
proc splitChunk(a: var TMemRegion, c: PBigChunk, size: int) =
var rest = cast[PBigChunk](cast[TAddress](c) +% size)
sysAssert(rest notin a.freeChunksList)
rest.size = c.size - size
rest.used = false
rest.next = nil
rest.prev = nil
rest.prevSize = size
updatePrevSize(a, c, rest.size)
c.size = size
incl(a, a.chunkStarts, pageIndex(rest))
ListAdd(a.freeChunksList, rest)
proc getBigChunk(a: var TMemRegion, size: int): PBigChunk =
# use first fit for now:
sysAssert((size and PageMask) == 0)
sysAssert(size > 0)
result = a.freeChunksList
block search:
while result != nil:
sysAssert chunkUnused(result)
if result.size == size:
ListRemove(a.freeChunksList, result)
break search
elif result.size > size:
ListRemove(a.freeChunksList, result)
splitChunk(a, result, size)
break search
result = result.next
sysAssert result != a.freeChunksList
if size < InitialMemoryRequest:
result = requestOsChunks(a, InitialMemoryRequest)
splitChunk(a, result, size)
else:
result = requestOsChunks(a, size)
result.prevSize = 0 # XXX why is this needed?
result.used = true
incl(a, a.chunkStarts, pageIndex(result))
dec(a.freeMem, size)
proc getSmallChunk(a: var TMemRegion): PSmallChunk =
var res = getBigChunk(a, PageSize)
sysAssert res.prev == nil
sysAssert res.next == nil
result = cast[PSmallChunk](res)
# -----------------------------------------------------------------------------
proc getCellSize(p: pointer): int {.inline.} =
var c = pageAddr(p)
result = c.size
proc memSize(a: TMemRegion, p: pointer): int {.inline.} =
var c = pageAddr(p)
result = c.size
proc rawAlloc(a: var TMemRegion, requestedSize: int): pointer =
sysAssert(roundup(65, 8) == 72)
sysAssert requestedSize >= sizeof(TFreeCell)
var size = roundup(requestedSize, MemAlign)
#c_fprintf(c_stdout, "alloc; size: %ld; %ld\n", requestedSize, size)
if size <= SmallChunkSize-smallChunkOverhead():
# allocate a small block: for small chunks, we use only its next pointer
var s = size div MemAlign
var c = a.freeSmallChunks[s]
if c == nil:
c = getSmallChunk(a)
c.freeList = nil
sysAssert c.size == PageSize
c.size = size
c.acc = size
c.free = SmallChunkSize - smallChunkOverhead() - size
c.next = nil
c.prev = nil
ListAdd(a.freeSmallChunks[s], c)
result = addr(c.data)
sysAssert((cast[TAddress](result) and (MemAlign-1)) == 0)
else:
sysAssert c.next != c
#if c.size != size:
# c_fprintf(c_stdout, "csize: %lld; size %lld\n", c.size, size)
sysAssert c.size == size
if c.freeList == nil:
sysAssert(c.acc + smallChunkOverhead() + size <= SmallChunkSize)
result = cast[pointer](cast[TAddress](addr(c.data)) +% c.acc)
inc(c.acc, size)
else:
result = c.freeList
sysAssert(c.freeList.zeroField == 0)
c.freeList = c.freeList.next
dec(c.free, size)
sysAssert((cast[TAddress](result) and (MemAlign-1)) == 0)
if c.free < size:
ListRemove(a.freeSmallChunks[s], c)
else:
size = roundup(requestedSize+bigChunkOverhead(), PageSize)
# allocate a large block
var c = getBigChunk(a, size)
sysAssert c.prev == nil
sysAssert c.next == nil
sysAssert c.size == size
result = addr(c.data)
sysAssert((cast[TAddress](result) and (MemAlign-1)) == 0)
sysAssert(isAccessible(a, result))
proc rawAlloc0(a: var TMemRegion, requestedSize: int): pointer =
result = rawAlloc(a, requestedSize)
zeroMem(result, requestedSize)
proc rawDealloc(a: var TMemRegion, p: pointer) =
var c = pageAddr(p)
if isSmallChunk(c):
# `p` is within a small chunk:
var c = cast[PSmallChunk](c)
var s = c.size
var f = cast[ptr TFreeCell](p)
#echo("setting to nil: ", $cast[TAddress](addr(f.zeroField)))
sysAssert(f.zeroField != 0)
f.zeroField = 0
f.next = c.freeList
c.freeList = f
when overwriteFree:
# set to 0xff to check for usage after free bugs:
c_memset(cast[pointer](cast[int](p) +% sizeof(TFreeCell)), -1'i32,
s -% sizeof(TFreeCell))
# check if it is not in the freeSmallChunks[s] list:
if c.free < s:
sysAssert c notin a.freeSmallChunks[s div memAlign]
# add it to the freeSmallChunks[s] array:
ListAdd(a.freeSmallChunks[s div memAlign], c)
inc(c.free, s)
else:
inc(c.free, s)
if c.free == SmallChunkSize-smallChunkOverhead():
ListRemove(a.freeSmallChunks[s div memAlign], c)
c.size = SmallChunkSize
freeBigChunk(a, cast[PBigChunk](c))
else:
# set to 0xff to check for usage after free bugs:
when overwriteFree: c_memset(p, -1'i32, c.size -% bigChunkOverhead())
# free big chunk
freeBigChunk(a, cast[PBigChunk](c))
proc isAllocatedPtr(a: TMemRegion, p: pointer): bool =
if isAccessible(a, p):
var c = pageAddr(p)
if not chunkUnused(c):
if isSmallChunk(c):
var c = cast[PSmallChunk](c)
var offset = (cast[TAddress](p) and (PageSize-1)) -%
smallChunkOverhead()
result = (c.acc >% offset) and (offset %% c.size == 0) and
(cast[ptr TFreeCell](p).zeroField >% 1)
else:
var c = cast[PBigChunk](c)
result = p == addr(c.data) and cast[ptr TFreeCell](p).zeroField >% 1
proc ptrSize(p: pointer): int =
var x = cast[pointer](cast[TAddress](p) -% sizeof(TFreeCell))
result = pageAddr(x).size - sizeof(TFreeCell)
proc alloc(allocator: var TMemRegion, size: int): pointer =
result = rawAlloc(allocator, size+sizeof(TFreeCell))
cast[ptr TFreeCell](result).zeroField = 1 # mark it as used
sysAssert(not isAllocatedPtr(allocator, result))
result = cast[pointer](cast[TAddress](result) +% sizeof(TFreeCell))
proc alloc0(allocator: var TMemRegion, size: int): pointer =
result = alloc(allocator, size)
zeroMem(result, size)
proc dealloc(allocator: var TMemRegion, p: pointer) =
var x = cast[pointer](cast[TAddress](p) -% sizeof(TFreeCell))
sysAssert(cast[ptr TFreeCell](x).zeroField == 1)
rawDealloc(allocator, x)
sysAssert(not isAllocatedPtr(allocator, x))
proc realloc(allocator: var TMemRegion, p: pointer, newsize: int): pointer =
if newsize > 0:
result = alloc(allocator, newsize)
if p != nil:
copyMem(result, p, ptrSize(p))
dealloc(allocator, p)
elif p != nil:
dealloc(allocator, p)
proc deallocOsPages(a: var TMemRegion) =
# we free every 'ordinarily' allocated page by iterating over the page bits:
for p in elements(a.chunkStarts):
var page = cast[PChunk](p shl pageShift)
var size = if page.size < PageSize: PageSize else: page.size
osDeallocPages(page, size)
# And then we free the pages that are in use for the page bits:
llDeallocAll(a)
proc getFreeMem(a: TMemRegion): int {.inline.} = result = a.freeMem
proc getTotalMem(a: TMemRegion): int {.inline.} = result = a.currMem
proc getOccupiedMem(a: TMemRegion): int {.inline.} =
result = a.currMem - a.freeMem
# ---------------------- thread memory region -------------------------------
template InstantiateForRegion(allocator: expr) =
proc deallocOsPages = deallocOsPages(allocator)
proc alloc(size: int): pointer =
result = alloc(allocator, size)
proc alloc0(size: int): pointer =
result = alloc0(allocator, size)
proc dealloc(p: pointer) =
dealloc(allocator, p)
proc realloc(p: pointer, newsize: int): pointer =
result = realloc(allocator, p, newsize)
when false:
proc countFreeMem(): int =
# only used for assertions
var it = allocator.freeChunksList
while it != nil:
inc(result, it.size)
it = it.next
proc getFreeMem(): int =
result = allocator.freeMem
#sysAssert(result == countFreeMem())
proc getTotalMem(): int = return allocator.currMem
proc getOccupiedMem(): int = return getTotalMem() - getFreeMem()
# -------------------- shared heap region ----------------------------------
when hasThreadSupport:
var sharedHeap: TMemRegion
var heapLock: TSysLock
InitSysLock(HeapLock)
proc allocShared(size: int): pointer =
when hasThreadSupport:
AcquireSys(HeapLock)
result = alloc(sharedHeap, size)
ReleaseSys(HeapLock)
else:
result = alloc(size)
proc allocShared0(size: int): pointer =
result = allocShared(size)
zeroMem(result, size)
proc deallocShared(p: pointer) =
when hasThreadSupport:
AcquireSys(HeapLock)
dealloc(sharedHeap, p)
ReleaseSys(HeapLock)
else:
dealloc(p)
proc reallocShared(p: pointer, newsize: int): pointer =
when hasThreadSupport:
AcquireSys(HeapLock)
result = realloc(sharedHeap, p, newsize)
ReleaseSys(HeapLock)
else:
result = realloc(p, newsize)