summary refs log tree commit diff stats
path: root/lib/system
diff options
context:
space:
mode:
authorAraq <rumpf_a@web.de>2011-04-11 23:28:53 +0200
committerAraq <rumpf_a@web.de>2011-04-11 23:28:53 +0200
commit46c41e43690cba9bc1caff6a994bb6915df8a1b7 (patch)
treec96be792eceb1d189cdb5bcff6e1a06f9b51e76c /lib/system
parent3d696c3da53e5c41d839d8265fbc94f1c64980bb (diff)
downloadNim-46c41e43690cba9bc1caff6a994bb6915df8a1b7.tar.gz
p[] instead of p^
Diffstat (limited to 'lib/system')
-rwxr-xr-xlib/system/alloc.nim2
-rwxr-xr-xlib/system/assign.nim10
-rwxr-xr-xlib/system/gc.nim30
-rwxr-xr-xlib/system/repr.nim46
4 files changed, 44 insertions, 44 deletions
diff --git a/lib/system/alloc.nim b/lib/system/alloc.nim
index 0e0b450b4..c385aa6fe 100755
--- a/lib/system/alloc.nim
+++ b/lib/system/alloc.nim
@@ -195,7 +195,7 @@ proc IntSetGet(t: TIntSet, key: int): PTrunk =
 proc IntSetPut(t: var TIntSet, key: int): PTrunk = 
   result = IntSetGet(t, key)
   if result == nil:
-    result = cast[PTrunk](llAlloc(allocator, sizeof(result^)))
+    result = cast[PTrunk](llAlloc(allocator, sizeof(result[])))
     result.next = t.data[key and high(t.data)]
     t.data[key and high(t.data)] = result
     result.key = key
diff --git a/lib/system/assign.nim b/lib/system/assign.nim
index 9ac00434e..c71525a12 100755
--- a/lib/system/assign.nim
+++ b/lib/system/assign.nim
@@ -34,13 +34,13 @@ proc genericAssignAux(dest, src: Pointer, mt: PNimType, shallow: bool) =
   case mt.Kind
   of tyString:
     var x = cast[ppointer](dest)
-    var s2 = cast[ppointer](s)^
+    var s2 = cast[ppointer](s)[]
     if s2 == nil or shallow:
       unsureAsgnRef(x, s2)
     else:
       unsureAsgnRef(x, copyString(cast[NimString](s2)))
   of tySequence:
-    var s2 = cast[ppointer](src)^
+    var s2 = cast[ppointer](src)[]
     var seq = cast[PGenericSeq](s2)      
     var x = cast[ppointer](dest)
     if s2 == nil or shallow:
@@ -49,7 +49,7 @@ proc genericAssignAux(dest, src: Pointer, mt: PNimType, shallow: bool) =
       return
     assert(dest != nil)
     unsureAsgnRef(x, newObj(mt, seq.len * mt.base.size + GenericSeqSize))
-    var dst = cast[taddress](cast[ppointer](dest)^)
+    var dst = cast[taddress](cast[ppointer](dest)[])
     for i in 0..seq.len-1:
       genericAssignAux(
         cast[pointer](dst +% i*% mt.base.size +% GenericSeqSize),
@@ -67,7 +67,7 @@ proc genericAssignAux(dest, src: Pointer, mt: PNimType, shallow: bool) =
       genericAssignAux(cast[pointer](d +% i*% mt.base.size),
                        cast[pointer](s +% i*% mt.base.size), mt.base, shallow)
   of tyRef:
-    unsureAsgnRef(cast[ppointer](dest), cast[ppointer](s)^)
+    unsureAsgnRef(cast[ppointer](dest), cast[ppointer](s)[])
   else:
     copyMem(dest, src, mt.size) # copy raw bits
 
@@ -116,7 +116,7 @@ proc objectInit(dest: Pointer, typ: PNimType) =
     # iterate over any structural type
     # here we have to init the type field:
     var pint = cast[ptr PNimType](dest)
-    pint^ = typ
+    pint[] = typ
     objectInitAux(dest, typ.node)
   of tyTuple, tyPureObject:
     objectInitAux(dest, typ.node)
diff --git a/lib/system/gc.nim b/lib/system/gc.nim
index 882825f5e..eb4811bf5 100755
--- a/lib/system/gc.nim
+++ b/lib/system/gc.nim
@@ -1,7 +1,7 @@
 #
 #
 #            Nimrod's Runtime Library
-#        (c) Copyright 2010 Andreas Rumpf
+#        (c) Copyright 2011 Andreas Rumpf
 #
 #    See the file "copying.txt", included in this
 #    distribution, for details about the copyright.
@@ -244,8 +244,8 @@ proc asgnRef(dest: ppointer, src: pointer) {.compilerProc, inline.} =
   assert(not isOnStack(dest))
   # BUGFIX: first incRef then decRef!
   if src != nil: incRef(usrToCell(src))
-  if dest^ != nil: decRef(usrToCell(dest^))
-  dest^ = src
+  if dest[] != nil: decRef(usrToCell(dest[]))
+  dest[] = src
 
 proc asgnRefNoCycle(dest: ppointer, src: pointer) {.compilerProc, inline.} =
   # the code generator calls this proc if it is known at compile time that no 
@@ -253,11 +253,11 @@ proc asgnRefNoCycle(dest: ppointer, src: pointer) {.compilerProc, inline.} =
   if src != nil: 
     var c = usrToCell(src)
     discard atomicInc(c.refcount, rcIncrement)
-  if dest^ != nil: 
-    var c = usrToCell(dest^)
+  if dest[] != nil: 
+    var c = usrToCell(dest[])
     if atomicDec(c.refcount, rcIncrement) <% rcIncrement:
       rtlAddZCT(c)
-  dest^ = src
+  dest[] = src
 
 proc unsureAsgnRef(dest: ppointer, src: pointer) {.compilerProc.} =
   # unsureAsgnRef updates the reference counters only if dest is not on the
@@ -268,8 +268,8 @@ proc unsureAsgnRef(dest: ppointer, src: pointer) {.compilerProc.} =
     # XXX finally use assembler for the stack checking instead!
     # the test for '!= nil' is correct, but I got tired of the segfaults
     # resulting from the crappy stack checking:
-    if cast[int](dest^) >=% PageSize: decRef(usrToCell(dest^))
-  dest^ = src
+    if cast[int](dest[]) >=% PageSize: decRef(usrToCell(dest[]))
+  dest[] = src
 
 proc initGC() =
   when not defined(useNimRtl):
@@ -311,7 +311,7 @@ proc forAllChildrenAux(dest: Pointer, mt: PNimType, op: TWalkOp) =
       for i in 0..(mt.size div mt.base.size)-1:
         forAllChildrenAux(cast[pointer](d +% i *% mt.base.size), mt.base, op)
     of tyRef, tyString, tySequence: # leaf:
-      doOperation(cast[ppointer](d)^, op)
+      doOperation(cast[ppointer](d)[], op)
     of tyObject, tyTuple, tyPureObject:
       forAllSlotsAux(dest, mt.node, op)
     else: nil
@@ -545,7 +545,7 @@ when defined(sparc): # For SPARC architecture.
     sp = addr(stackTop[0])
     # Addresses decrease as the stack grows.
     while sp <= max:
-      gcMark(sp^)
+      gcMark(sp[])
       sp = cast[ppointer](cast[TAddress](sp) +% sizeof(pointer))
 
 elif defined(ELATE):
@@ -575,7 +575,7 @@ elif stackIncreases:
       # sp will traverse the JMP_BUF as well (jmp_buf size is added,
       # otherwise sp would be below the registers structure).
       while sp >=% max:
-        gcMark(cast[ppointer](sp)^)
+        gcMark(cast[ppointer](sp)[])
         sp = sp -% sizeof(pointer)
 
 else:
@@ -598,7 +598,7 @@ else:
       var max = cast[TAddress](stackBottom)
       var sp = cast[TAddress](addr(registers))
       while sp <=% max:
-        gcMark(cast[ppointer](sp)^)
+        gcMark(cast[ppointer](sp)[])
         sp = sp +% sizeof(pointer)
 
 # ----------------------------------------------------------------------------
@@ -611,13 +611,13 @@ proc CollectZCT(gch: var TGcHeap) =
   # avoid a deep stack, we move objects to keep the ZCT small.
   # This is performance critical!
   var L = addr(gch.zct.len)
-  while L^ > 0:
+  while L[] > 0:
     var c = gch.zct.d[0]
     # remove from ZCT:
     assert((c.refcount and colorMask) == rcZct)
     c.refcount = c.refcount and not colorMask
-    gch.zct.d[0] = gch.zct.d[L^ - 1]
-    dec(L^)
+    gch.zct.d[0] = gch.zct.d[L[] - 1]
+    dec(L[])
     if c.refcount <% rcIncrement: 
       # It may have a RC > 0, if it is in the hardware stack or
       # it has not been removed yet from the ZCT. This is because
diff --git a/lib/system/repr.nim b/lib/system/repr.nim
index b597cb0ce..a70989cad 100755
--- a/lib/system/repr.nim
+++ b/lib/system/repr.nim
@@ -78,10 +78,10 @@ proc reprSetAux(result: var string, p: pointer, typ: PNimType) =
   add result, "{"
   var u: int64
   case typ.size
-  of 1: u = ze64(cast[ptr int8](p)^)
-  of 2: u = ze64(cast[ptr int16](p)^)
-  of 4: u = ze64(cast[ptr int32](p)^)
-  of 8: u = cast[ptr int64](p)^
+  of 1: u = ze64(cast[ptr int8](p)[])
+  of 2: u = ze64(cast[ptr int16](p)[])
+  of 4: u = ze64(cast[ptr int32](p)[])
+  of 8: u = cast[ptr int64](p)[]
   else:
     var a = cast[pbyteArray](p)
     for i in 0 .. typ.size*8-1:
@@ -194,31 +194,31 @@ when not defined(useNimRtl):
     of tyArray: reprArray(result, p, typ, cl)
     of tyTuple, tyPureObject: reprRecord(result, p, typ, cl)
     of tyObject: 
-      var t = cast[ptr PNimType](p)^
+      var t = cast[ptr PNimType](p)[]
       reprRecord(result, p, t, cl)
     of tyRef, tyPtr:
       assert(p != nil)
-      if cast[ppointer](p)^ == nil: add result, "nil"
-      else: reprRef(result, cast[ppointer](p)^, typ, cl)
+      if cast[ppointer](p)[] == nil: add result, "nil"
+      else: reprRef(result, cast[ppointer](p)[], typ, cl)
     of tySequence:
-      reprSequence(result, cast[ppointer](p)^, typ, cl)
-    of tyInt: add result, $(cast[ptr int](p)^)
-    of tyInt8: add result, $int(cast[ptr Int8](p)^)
-    of tyInt16: add result, $int(cast[ptr Int16](p)^)
-    of tyInt32: add result, $int(cast[ptr Int32](p)^)
-    of tyInt64: add result, $(cast[ptr Int64](p)^)
-    of tyFloat: add result, $(cast[ptr float](p)^)
-    of tyFloat32: add result, $(cast[ptr float32](p)^)
-    of tyFloat64: add result, $(cast[ptr float64](p)^)
-    of tyEnum: add result, reprEnum(cast[ptr int](p)^, typ)
-    of tyBool: add result, reprBool(cast[ptr bool](p)^)
-    of tyChar: add result, reprChar(cast[ptr char](p)^)
-    of tyString: reprStrAux(result, cast[ptr string](p)^)
-    of tyCString: reprStrAux(result, $(cast[ptr cstring](p)^))
+      reprSequence(result, cast[ppointer](p)[], typ, cl)
+    of tyInt: add result, $(cast[ptr int](p)[])
+    of tyInt8: add result, $int(cast[ptr Int8](p)[])
+    of tyInt16: add result, $int(cast[ptr Int16](p)[])
+    of tyInt32: add result, $int(cast[ptr Int32](p)[])
+    of tyInt64: add result, $(cast[ptr Int64](p)[])
+    of tyFloat: add result, $(cast[ptr float](p)[])
+    of tyFloat32: add result, $(cast[ptr float32](p)[])
+    of tyFloat64: add result, $(cast[ptr float64](p)[])
+    of tyEnum: add result, reprEnum(cast[ptr int](p)[], typ)
+    of tyBool: add result, reprBool(cast[ptr bool](p)[])
+    of tyChar: add result, reprChar(cast[ptr char](p)[])
+    of tyString: reprStrAux(result, cast[ptr string](p)[])
+    of tyCString: reprStrAux(result, $(cast[ptr cstring](p)[]))
     of tyRange: reprAux(result, p, typ.base, cl)
     of tyProc, tyPointer:
-      if cast[ppointer](p)^ == nil: add result, "nil"
-      else: add result, reprPointer(cast[ppointer](p)^)
+      if cast[ppointer](p)[] == nil: add result, "nil"
+      else: add result, reprPointer(cast[ppointer](p)[])
     else:
       add result, "(invalid data!)"
     inc(cl.recdepth)