summary refs log tree commit diff stats
path: root/nimlib/pure/lexbase.nim
diff options
context:
space:
mode:
Diffstat (limited to 'nimlib/pure/lexbase.nim')
-rwxr-xr-xnimlib/pure/lexbase.nim166
1 files changed, 0 insertions, 166 deletions
diff --git a/nimlib/pure/lexbase.nim b/nimlib/pure/lexbase.nim
deleted file mode 100755
index bb207e92a..000000000
--- a/nimlib/pure/lexbase.nim
+++ /dev/null
@@ -1,166 +0,0 @@
-#
-#
-#           The Nimrod Compiler
-#        (c) Copyright 2009 Andreas Rumpf
-#
-#    See the file "copying.txt", included in this
-#    distribution, for details about the copyright.
-#
-
-## This module implements a base object of a lexer with efficient buffer
-## handling. Only at line endings checks are necessary if the buffer
-## needs refilling.
-
-import
-  strutils, streams
-
-const
-  EndOfFile* = '\0'           ## end of file marker
-  NewLines* = {'\c', '\L'}
-
-# Buffer handling:
-#  buf:
-#  "Example Text\n ha!"   bufLen = 17
-#   ^pos = 0     ^ sentinel = 12
-#
-
-type
-  TBaseLexer* = object of TObject ## the base lexer. Inherit your lexer from
-                                  ## this object.
-    bufpos*: int              ## the current position within the buffer
-    buf*: cstring             ## the buffer itself
-    bufLen*: int              ## length of buffer in characters
-    input: PStream            ## the input stream
-    LineNumber*: int          ## the current line number
-    sentinel: int
-    lineStart: int            # index of last line start in buffer
-    fileOpened: bool
-
-proc open*(L: var TBaseLexer, input: PStream, bufLen: int = 8192)
-  ## inits the TBaseLexer with a stream to read from
-
-proc close*(L: var TBaseLexer)
-  ## closes the base lexer. This closes `L`'s associated stream too.
-
-proc getCurrentLine*(L: TBaseLexer, marker: bool = true): string
-  ## retrieves the current line.
-
-proc getColNumber*(L: TBaseLexer, pos: int): int
-  ## retrieves the current column.
-
-proc HandleCR*(L: var TBaseLexer, pos: int): int
-  ## Call this if you scanned over '\c' in the buffer; it returns the the
-  ## position to continue the scanning from. `pos` must be the position
-  ## of the '\c'.
-proc HandleLF*(L: var TBaseLexer, pos: int): int
-  ## Call this if you scanned over '\L' in the buffer; it returns the the
-  ## position to continue the scanning from. `pos` must be the position
-  ## of the '\L'.
-
-# implementation
-
-const
-  chrSize = sizeof(char)
-
-proc close(L: var TBaseLexer) =
-  dealloc(L.buf)
-  L.input.close(L.input)
-
-proc FillBuffer(L: var TBaseLexer) =
-  var
-    charsRead, toCopy, s: int # all are in characters,
-                              # not bytes (in case this
-                              # is not the same)
-    oldBufLen: int
-  # we know here that pos == L.sentinel, but not if this proc
-  # is called the first time by initBaseLexer()
-  assert(L.sentinel < L.bufLen)
-  toCopy = L.BufLen - L.sentinel - 1
-  assert(toCopy >= 0)
-  if toCopy > 0:
-    MoveMem(L.buf, addr(L.buf[L.sentinel + 1]), toCopy * chrSize) # "moveMem" handles overlapping regions
-  charsRead = L.input.readData(L.input, addr(L.buf[toCopy]),
-                               (L.sentinel + 1) * chrSize) div chrSize
-  s = toCopy + charsRead
-  if charsRead < L.sentinel + 1:
-    L.buf[s] = EndOfFile      # set end marker
-    L.sentinel = s
-  else:
-    # compute sentinel:
-    dec(s)                    # BUGFIX (valgrind)
-    while true:
-      assert(s < L.bufLen)
-      while (s >= 0) and not (L.buf[s] in NewLines): Dec(s)
-      if s >= 0:
-        # we found an appropriate character for a sentinel:
-        L.sentinel = s
-        break
-      else:
-        # rather than to give up here because the line is too long,
-        # double the buffer's size and try again:
-        oldBufLen = L.BufLen
-        L.bufLen = L.BufLen * 2
-        L.buf = cast[cstring](realloc(L.buf, L.bufLen * chrSize))
-        assert(L.bufLen - oldBuflen == oldBufLen)
-        charsRead = L.input.ReadData(L.input, addr(L.buf[oldBufLen]),
-                                     oldBufLen * chrSize) div chrSize
-        if charsRead < oldBufLen:
-          L.buf[oldBufLen + charsRead] = EndOfFile
-          L.sentinel = oldBufLen + charsRead
-          break
-        s = L.bufLen - 1
-
-proc fillBaseLexer(L: var TBaseLexer, pos: int): int =
-  assert(pos <= L.sentinel)
-  if pos < L.sentinel:
-    result = pos + 1          # nothing to do
-  else:
-    fillBuffer(L)
-    L.bufpos = 0              # XXX: is this really correct?
-    result = 0
-  L.lineStart = result
-
-proc HandleCR(L: var TBaseLexer, pos: int): int =
-  assert(L.buf[pos] == '\c')
-  inc(L.linenumber)
-  result = fillBaseLexer(L, pos)
-  if L.buf[result] == '\L':
-    result = fillBaseLexer(L, result)
-
-proc HandleLF(L: var TBaseLexer, pos: int): int =
-  assert(L.buf[pos] == '\L')
-  inc(L.linenumber)
-  result = fillBaseLexer(L, pos) #L.lastNL := result-1; // BUGFIX: was: result;
-
-proc skip_UTF_8_BOM(L: var TBaseLexer) =
-  if (L.buf[0] == '\xEF') and (L.buf[1] == '\xBB') and (L.buf[2] == '\xBF'):
-    inc(L.bufpos, 3)
-    inc(L.lineStart, 3)
-
-proc open(L: var TBaseLexer, input: PStream, bufLen: int = 8192) =
-  assert(bufLen > 0)
-  assert(input != nil)
-  L.input = input
-  L.bufpos = 0
-  L.bufLen = bufLen
-  L.buf = cast[cstring](alloc(bufLen * chrSize))
-  L.sentinel = bufLen - 1
-  L.lineStart = 0
-  L.linenumber = 1            # lines start at 1
-  fillBuffer(L)
-  skip_UTF_8_BOM(L)
-
-proc getColNumber(L: TBaseLexer, pos: int): int =
-  result = abs(pos - L.lineStart)
-
-proc getCurrentLine(L: TBaseLexer, marker: bool = true): string =
-  var i: int
-  result = ""
-  i = L.lineStart
-  while not (L.buf[i] in {'\c', '\L', EndOfFile}):
-    add(result, L.buf[i])
-    inc(i)
-  add(result, "\n")
-  if marker:
-    add(result, RepeatChar(getColNumber(L, L.bufpos)) & "^\n")
-