diff options
author | Araq <rumpf_a@web.de> | 2012-02-02 00:16:33 +0100 |
---|---|---|
committer | Araq <rumpf_a@web.de> | 2012-02-02 00:16:33 +0100 |
commit | 7efe817ca3ad2323a8200b2f73539da4b9bfad24 (patch) | |
tree | 47c2f30bd4354d24f9e80389bcd1dbb8a5ca1905 | |
parent | 4203aef827f44b86f399ee6bd2733f29028f4d16 (diff) | |
download | Nim-7efe817ca3ad2323a8200b2f73539da4b9bfad24.tar.gz |
bugfix: threading on PowerPC
-rwxr-xr-x | lib/system/alloc.nim | 19 | ||||
-rw-r--r-- | tests/mmaptest.nim | 48 |
2 files changed, 64 insertions, 3 deletions
diff --git a/lib/system/alloc.nim b/lib/system/alloc.nim index f3474a527..981dac977 100755 --- a/lib/system/alloc.nim +++ b/lib/system/alloc.nim @@ -14,6 +14,11 @@ # ------------ platform specific chunk allocation code ----------------------- +# some platforms have really weird unmap behaviour: unmap(blockStart, PageSize) +# really frees the whole block. Happens for Linux/PowerPC for example. Amd64 +# and x86 are safe though: +const weirdUnmap = not (defined(amd64) or defined(i386)) + when defined(posix): const PROT_READ = 1 # page can be read @@ -425,7 +430,7 @@ proc freeBigChunk(a: var TMemRegion, c: PBigChunk) = excl(a.chunkStarts, pageIndex(c)) c = cast[PBigChunk](le) - if c.size < ChunkOsReturn: + if c.size < ChunkOsReturn or weirdUnmap: incl(a, a.chunkStarts, pageIndex(c)) updatePrevSize(a, c, c.size) ListAdd(a.freeChunksList, c) @@ -697,8 +702,16 @@ proc deallocOsPages(a: var TMemRegion) = # we free every 'ordinarily' allocated page by iterating over the page bits: for p in elements(a.chunkStarts): var page = cast[PChunk](p shl pageShift) - var size = if page.size < PageSize: PageSize else: page.size - osDeallocPages(page, size) + when not weirdUnmap: + var size = if page.size < PageSize: PageSize else: page.size + osDeallocPages(page, size) + else: + # Linux on PowerPC for example frees MORE than asked if 'munmap' + # receives the start of an originally mmap'ed memory block. This is not + # too bad, but we must not access 'page.size' then as that could trigger + # a segfault. But we don't need to access 'page.size' here anyway, + # because calling munmap with PageSize suffices: + osDeallocPages(page, PageSize) # And then we free the pages that are in use for the page bits: llDeallocAll(a) diff --git a/tests/mmaptest.nim b/tests/mmaptest.nim new file mode 100644 index 000000000..c304920af --- /dev/null +++ b/tests/mmaptest.nim @@ -0,0 +1,48 @@ +# Small test program to test for mmap() weirdnesses + +include "lib/system/ansi_c" + +const + PageSize = 4096 + PROT_READ = 1 # page can be read + PROT_WRITE = 2 # page can be written + MAP_PRIVATE = 2 # Changes are private + +when defined(macosx) or defined(bsd): + const MAP_ANONYMOUS = 0x1000 +elif defined(solaris): + const MAP_ANONYMOUS = 0x100 +else: + var + MAP_ANONYMOUS {.importc: "MAP_ANONYMOUS", header: "<sys/mman.h>".}: cint + +proc mmap(adr: pointer, len: int, prot, flags, fildes: cint, + off: int): pointer {.header: "<sys/mman.h>".} + +proc munmap(adr: pointer, len: int) {.header: "<sys/mman.h>".} + +proc osAllocPages(size: int): pointer {.inline.} = + result = mmap(nil, size, PROT_READ or PROT_WRITE, + MAP_PRIVATE or MAP_ANONYMOUS, -1, 0) + if result == nil or result == cast[pointer](-1): + quit 1 + cfprintf(c_stdout, "allocated pages %p..%p\n", result, + cast[int](result) + size) + +proc osDeallocPages(p: pointer, size: int) {.inline} = + cfprintf(c_stdout, "freed pages %p..%p\n", p, cast[int](p) + size) + munmap(p, size-1) + +proc `+!!`(p: pointer, size: int): pointer {.inline.} = + result = cast[pointer](cast[int](p) + size) + +var p = osAllocPages(3 * PageSize) + +osDeallocPages(p, PageSize) +# If this fails the OS has freed the whole block starting at 'p': +echo(cast[ptr int](p +!! (pageSize*2))[]) + +osDeallocPages(p +!! PageSize*2, PageSize) +osDeallocPages(p +!! PageSize, PageSize) + + |