summary refs log tree commit diff stats
path: root/compiler/lexer.nim
diff options
context:
space:
mode:
Diffstat (limited to 'compiler/lexer.nim')
-rw-r--r--compiler/lexer.nim49
1 files changed, 44 insertions, 5 deletions
diff --git a/compiler/lexer.nim b/compiler/lexer.nim
index 9b37b499b..bca07e500 100644
--- a/compiler/lexer.nim
+++ b/compiler/lexer.nim
@@ -33,13 +33,13 @@ type
   TTokType* = enum
     tkInvalid, tkEof,         # order is important here!
     tkSymbol, # keywords:
-    tkAddr, tkAnd, tkAs, tkAsm, tkAtomic,
+    tkAddr, tkAnd, tkAs, tkAsm,
     tkBind, tkBlock, tkBreak, tkCase, tkCast,
     tkConcept, tkConst, tkContinue, tkConverter,
     tkDefer, tkDiscard, tkDistinct, tkDiv, tkDo,
     tkElif, tkElse, tkEnd, tkEnum, tkExcept, tkExport,
     tkFinally, tkFor, tkFrom, tkFunc,
-    tkGeneric, tkIf, tkImport, tkIn, tkInclude, tkInterface,
+    tkIf, tkImport, tkIn, tkInclude, tkInterface,
     tkIs, tkIsnot, tkIterator,
     tkLet,
     tkMacro, tkMethod, tkMixin, tkMod, tkNil, tkNot, tkNotin,
@@ -75,12 +75,12 @@ const
   tokKeywordHigh* = pred(tkIntLit)
   TokTypeToStr*: array[TTokType, string] = ["tkInvalid", "[EOF]",
     "tkSymbol",
-    "addr", "and", "as", "asm", "atomic",
+    "addr", "and", "as", "asm",
     "bind", "block", "break", "case", "cast",
     "concept", "const", "continue", "converter",
     "defer", "discard", "distinct", "div", "do",
     "elif", "else", "end", "enum", "except", "export",
-    "finally", "for", "from", "func", "generic", "if",
+    "finally", "for", "from", "func", "if",
     "import", "in", "include", "interface", "is", "isnot", "iterator",
     "let",
     "macro", "method", "mixin", "mod",
@@ -129,6 +129,7 @@ type
     when defined(nimpretty):
       offsetA*, offsetB*: int   # used for pretty printing so that literals
                                 # like 0b01 or  r"\L" are unaffected
+      commentOffsetA*, commentOffsetB*: int
 
   TErrorHandler* = proc (info: TLineInfo; msg: TMsgKind; arg: string)
   TLexer* = object of TBaseLexer
@@ -144,6 +145,10 @@ type
     when defined(nimsuggest):
       previousToken: TLineInfo
 
+when defined(nimpretty):
+  var
+    gIndentationWidth*: int
+
 var gLinesCompiled*: int  # all lines that have been compiled
 
 proc getLineInfo*(L: TLexer, tok: TToken): TLineInfo {.inline.} =
@@ -151,6 +156,8 @@ proc getLineInfo*(L: TLexer, tok: TToken): TLineInfo {.inline.} =
   when defined(nimpretty):
     result.offsetA = tok.offsetA
     result.offsetB = tok.offsetB
+    result.commentOffsetA = tok.commentOffsetA
+    result.commentOffsetB = tok.commentOffsetB
 
 proc isKeyword*(kind: TTokType): bool =
   result = (kind >= tokKeywordLow) and (kind <= tokKeywordHigh)
@@ -198,6 +205,9 @@ proc initToken*(L: var TToken) =
   L.fNumber = 0.0
   L.base = base10
   L.ident = nil
+  when defined(nimpretty):
+    L.commentOffsetA = 0
+    L.commentOffsetB = 0
 
 proc fillToken(L: var TToken) =
   L.tokType = tkInvalid
@@ -208,6 +218,9 @@ proc fillToken(L: var TToken) =
   L.fNumber = 0.0
   L.base = base10
   L.ident = nil
+  when defined(nimpretty):
+    L.commentOffsetA = 0
+    L.commentOffsetB = 0
 
 proc openLexer*(lex: var TLexer, fileIdx: int32, inputstream: PLLStream;
                  cache: IdentCache) =
@@ -680,7 +693,7 @@ proc getEscapedChar(L: var TLexer, tok: var TToken) =
 proc newString(s: cstring, len: int): string =
   ## XXX, how come there is no support for this?
   result = newString(len)
-  for i in 0 .. <len:
+  for i in 0 ..< len:
     result[i] = s[i]
 
 proc handleCRLF(L: var TLexer, pos: int): int =
@@ -847,6 +860,23 @@ proc getOperator(L: var TLexer, tok: var TToken) =
   if buf[pos] in {CR, LF, nimlexbase.EndOfFile}:
     tok.strongSpaceB = -1
 
+proc newlineFollows*(L: var TLexer): bool =
+  var pos = L.bufpos
+  var buf = L.buf
+  while true:
+    case buf[pos]
+    of ' ', '\t':
+      inc(pos)
+    of CR, LF:
+      result = true
+      break
+    of '#':
+      inc(pos)
+      if buf[pos] == '#': inc(pos)
+      if buf[pos] != '[': return true
+    else:
+      break
+
 proc skipMultiLineComment(L: var TLexer; tok: var TToken; start: int;
                           isDoc: bool) =
   var pos = start
@@ -996,18 +1026,27 @@ proc skip(L: var TLexer, tok: var TToken) =
     of '#':
       # do not skip documentation comment:
       if buf[pos+1] == '#': break
+      when defined(nimpretty):
+        tok.commentOffsetA = L.offsetBase + pos
       if buf[pos+1] == '[':
         skipMultiLineComment(L, tok, pos+2, false)
         pos = L.bufpos
         buf = L.buf
+        when defined(nimpretty):
+          tok.commentOffsetB = L.offsetBase + pos
       else:
         tokenBegin(tok, pos)
         while buf[pos] notin {CR, LF, nimlexbase.EndOfFile}: inc(pos)
         tokenEndIgnore(tok, pos+1)
+        when defined(nimpretty):
+          tok.commentOffsetB = L.offsetBase + pos + 1
     else:
       break                   # EndOfFile also leaves the loop
   tokenEndPrevious(tok, pos-1)
   L.bufpos = pos
+  when defined(nimpretty):
+    if gIndentationWidth <= 0:
+      gIndentationWidth = tok.indent
 
 proc rawGetTok*(L: var TLexer, tok: var TToken) =
   template atTokenEnd() {.dirty.} =