summary refs log tree commit diff stats
path: root/lib
diff options
context:
space:
mode:
authornarimiran <narimiran@disroot.org>2019-09-27 11:01:51 +0200
committernarimiran <narimiran@disroot.org>2019-09-30 13:58:08 +0200
commitb17ed2ca9cf5dda8aa47bc492dd32c11a6e013e8 (patch)
tree3cca126d14837ce12db02fa29dfb93ad970095de /lib
parent6c994b24980413bcef1808cbff47c9d80bc4fa91 (diff)
downloadNim-b17ed2ca9cf5dda8aa47bc492dd32c11a6e013e8.tar.gz
[backport] run nimpretty on parsers
Diffstat (limited to 'lib')
-rw-r--r--lib/pure/htmlparser.nim5
-rw-r--r--lib/pure/json.nim51
-rw-r--r--lib/pure/lexbase.nim22
-rw-r--r--lib/pure/parsecfg.nim52
-rw-r--r--lib/pure/parsecsv.nim12
-rw-r--r--lib/pure/parsejson.nim52
-rw-r--r--lib/pure/parseopt.nim33
-rw-r--r--lib/pure/parsesql.nim50
-rw-r--r--lib/pure/parseutils.nim40
-rw-r--r--lib/pure/parsexml.nim80
-rw-r--r--lib/pure/xmlparser.nim5
11 files changed, 208 insertions, 194 deletions
diff --git a/lib/pure/htmlparser.nim b/lib/pure/htmlparser.nim
index be61bb0d8..4b305cfd6 100644
--- a/lib/pure/htmlparser.nim
+++ b/lib/pure/htmlparser.nim
@@ -52,7 +52,7 @@
 import strutils, streams, parsexml, xmltree, unicode, strtabs
 
 type
-  HtmlTag* = enum ## list of all supported HTML tags; order will always be
+  HtmlTag* = enum  ## list of all supported HTML tags; order will always be
                    ## alphabetically
     tagUnknown,    ## unknown HTML element
     tagA,          ## the HTML ``a`` element
@@ -1945,7 +1945,8 @@ proc untilElementEnd(x: var XmlParser, result: XmlNode,
         adderr(expected(x, result))
         # this seems to do better match error corrections in browsers:
         while x.kind in {xmlElementEnd, xmlWhitespace}:
-          if x.kind == xmlElementEnd and cmpIgnoreCase(x.elemName, result.tag) == 0:
+          if x.kind == xmlElementEnd and cmpIgnoreCase(x.elemName,
+              result.tag) == 0:
             break
           next(x)
       next(x)
diff --git a/lib/pure/json.nim b/lib/pure/json.nim
index 530408b9f..4dad325bc 100644
--- a/lib/pure/json.nim
+++ b/lib/pure/json.nim
@@ -406,7 +406,7 @@ macro `%*`*(x: untyped): untyped =
   ## `%` for every element.
   result = toJson(x)
 
-proc `==`* (a, b: JsonNode): bool =
+proc `==`*(a, b: JsonNode): bool =
   ## Check two nodes for equality
   if a.isNil:
     if b.isNil: return true
@@ -428,13 +428,13 @@ proc `==`* (a, b: JsonNode): bool =
     of JArray:
       result = a.elems == b.elems
     of JObject:
-     # we cannot use OrderedTable's equality here as
-     # the order does not matter for equality here.
-     if a.fields.len != b.fields.len: return false
-     for key, val in a.fields:
-       if not b.fields.hasKey(key): return false
-       if b.fields[key] != val: return false
-     result = true
+      # we cannot use OrderedTable's equality here as
+      # the order does not matter for equality here.
+      if a.fields.len != b.fields.len: return false
+      for key, val in a.fields:
+        if not b.fields.hasKey(key): return false
+        if b.fields[key] != val: return false
+      result = true
 
 proc hash*(n: OrderedTable[string, JsonNode]): Hash {.noSideEffect.}
 
@@ -502,7 +502,8 @@ proc contains*(node: JsonNode, val: JsonNode): bool =
   assert(node.kind == JArray)
   find(node.elems, val) >= 0
 
-proc existsKey*(node: JsonNode, key: string): bool {.deprecated: "use 'hasKey' instead".} =
+proc existsKey*(node: JsonNode, key: string): bool {.
+    deprecated: "use 'hasKey' instead".} =
   node.hasKey(key)
 
 proc `{}`*(node: JsonNode, keys: varargs[string]): JsonNode =
@@ -538,7 +539,8 @@ proc getOrDefault*(node: JsonNode, key: string): JsonNode =
   if not isNil(node) and node.kind == JObject:
     result = node.fields.getOrDefault(key)
 
-template simpleGetOrDefault*{`{}`(node, [key])}(node: JsonNode, key: string): JsonNode = node.getOrDefault(key)
+template simpleGetOrDefault*{`{}`(node, [key])}(node: JsonNode,
+    key: string): JsonNode = node.getOrDefault(key)
 
 proc `{}=`*(node: JsonNode, keys: varargs[string], value: JsonNode) =
   ## Traverses the node and tries to set the value at the given location
@@ -691,7 +693,7 @@ proc pretty*(node: JsonNode, indent = 2): string =
   ## Similar to prettyprint in Python.
   runnableExamples:
     let j = %* {"name": "Isaac", "books": ["Robot Dreams"],
-                "details": {"age":35, "pi":3.1415}}
+                "details": {"age": 35, "pi": 3.1415}}
     doAssert pretty(j) == """
 {
   "name": "Isaac",
@@ -721,14 +723,14 @@ proc toUgly*(result: var string, node: JsonNode) =
     result.add "["
     for child in node.elems:
       if comma: result.add ","
-      else:     comma = true
+      else: comma = true
       result.toUgly child
     result.add "]"
   of JObject:
     result.add "{"
     for key, value in pairs(node.fields):
       if comma: result.add ","
-      else:     comma = true
+      else: comma = true
       key.escapeJson(result)
       result.add ":"
       result.toUgly value
@@ -1331,7 +1333,8 @@ proc createConstructor(typeSym, jsonNode: NimNode): NimNode =
         (
           var map = `tableInit`[`tableKeyType`, `tableValueType`]();
           verifyJsonKind(`jsonNode`, {JObject}, astToStr(`jsonNode`));
-          for `forLoopKey` in keys(`jsonNode`.fields): map[`forLoopKey`] = `constructorNode`;
+          for `forLoopKey` in keys(`jsonNode`.fields): map[
+              `forLoopKey`] = `constructorNode`;
           map
         )
     of "ref":
@@ -1374,7 +1377,8 @@ proc createConstructor(typeSym, jsonNode: NimNode): NimNode =
         (
           var list: `typeSym`;
           verifyJsonKind(`jsonNode`, {JArray}, astToStr(`jsonNode`));
-          for `forLoopI` in 0 ..< `jsonNode`.len: list[`forLoopI`] =`constructorNode`;
+          for `forLoopI` in 0 ..< `jsonNode`.len: list[
+              `forLoopI`] = `constructorNode`;
           list
         )
     of "tuple":
@@ -1640,11 +1644,11 @@ when isMainModule:
     except:
       doAssert(false, "IndexError thrown for valid index")
 
-  doAssert(testJson{"b"}.getStr()=="asd", "Couldn't fetch a singly nested key with {}")
+  doAssert(testJson{"b"}.getStr() == "asd", "Couldn't fetch a singly nested key with {}")
   doAssert(isNil(testJson{"nonexistent"}), "Non-existent keys should return nil")
   doAssert(isNil(testJson{"a", "b"}), "Indexing through a list should return nil")
   doAssert(isNil(testJson{"a", "b"}), "Indexing through a list should return nil")
-  doAssert(testJson{"a"}==parseJson"[1, 2, 3, 4]", "Didn't return a non-JObject when there was one to be found")
+  doAssert(testJson{"a"} == parseJson"[1, 2, 3, 4]", "Didn't return a non-JObject when there was one to be found")
   doAssert(isNil(parseJson("[1, 2, 3]"){"foo"}), "Indexing directly into a list should return nil")
 
   # Generator:
@@ -1669,10 +1673,10 @@ when isMainModule:
   const hisAge = 31
 
   var j3 = %*
-    [ { "name": "John"
+    [ {"name": "John"
       , "age": herAge
       }
-    , { "name": "Susan"
+    , {"name": "Susan"
       , "age": hisAge
       }
     ]
@@ -1708,7 +1712,8 @@ when isMainModule:
     except IndexError: doAssert(true)
 
     var parsed2 = parseFile("tests/testdata/jsontest2.json")
-    doAssert(parsed2{"repository", "description"}.str=="IRC Library for Haskell", "Couldn't fetch via multiply nested key using {}")
+    doAssert(parsed2{"repository", "description"}.str ==
+        "IRC Library for Haskell", "Couldn't fetch via multiply nested key using {}")
 
   doAssert escapeJsonUnquoted("\10Foo🎃barÄ") == "\\nFoo🎃barÄ"
   doAssert escapeJsonUnquoted("\0\7\20") == "\\u0000\\u0007\\u0014" # for #7887
@@ -1752,15 +1757,15 @@ when isMainModule:
   # Generate constructors for range[T] types
   block:
     type
-      Q1 = range[0'u8  .. 50'u8]
+      Q1 = range[0'u8 .. 50'u8]
       Q2 = range[0'u16 .. 50'u16]
       Q3 = range[0'u32 .. 50'u32]
-      Q4 = range[0'i8  .. 50'i8]
+      Q4 = range[0'i8 .. 50'i8]
       Q5 = range[0'i16 .. 50'i16]
       Q6 = range[0'i32 .. 50'i32]
       Q7 = range[0'f32 .. 50'f32]
       Q8 = range[0'f64 .. 50'f64]
-      Q9 = range[0     .. 50]
+      Q9 = range[0 .. 50]
 
       X = object
         m1: Q1
diff --git a/lib/pure/lexbase.nim b/lib/pure/lexbase.nim
index 0ef4a147a..8bc96c82c 100644
--- a/lib/pure/lexbase.nim
+++ b/lib/pure/lexbase.nim
@@ -15,7 +15,7 @@ import
   strutils, streams
 
 const
-  EndOfFile* = '\0'           ## end of file marker
+  EndOfFile* = '\0' ## end of file marker
   NewLines* = {'\c', '\L'}
 
 # Buffer handling:
@@ -27,13 +27,13 @@ const
 type
   BaseLexer* = object of RootObj ## the base lexer. Inherit your lexer from
                                  ## this object.
-    bufpos*: int              ## the current position within the buffer
-    buf*: string           ## the buffer itself
-    input: Stream            ## the input stream
-    lineNumber*: int          ## the current line number
+    bufpos*: int                 ## the current position within the buffer
+    buf*: string                 ## the buffer itself
+    input: Stream                ## the input stream
+    lineNumber*: int             ## the current line number
     sentinel: int
-    lineStart: int            # index of last line start in buffer
-    offsetBase*: int          # use ``offsetBase + bufpos`` to get the offset
+    lineStart: int               # index of last line start in buffer
+    offsetBase*: int             # use ``offsetBase + bufpos`` to get the offset
     refillChars: set[char]
 
 proc close*(L: var BaseLexer) =
@@ -65,11 +65,11 @@ proc fillBuffer(L: var BaseLexer) =
   charsRead = L.input.readDataStr(L.buf, toCopy ..< toCopy + L.sentinel + 1)
   s = toCopy + charsRead
   if charsRead < L.sentinel + 1:
-    L.buf[s] = EndOfFile      # set end marker
+    L.buf[s] = EndOfFile # set end marker
     L.sentinel = s
   else:
     # compute sentinel:
-    dec(s)                    # BUGFIX (valgrind)
+    dec(s) # BUGFIX (valgrind)
     while true:
       assert(s < L.buf.len)
       while s >= 0 and L.buf[s] notin L.refillChars: dec(s)
@@ -92,7 +92,7 @@ proc fillBuffer(L: var BaseLexer) =
 proc fillBaseLexer(L: var BaseLexer, pos: int): int =
   assert(pos <= L.sentinel)
   if pos < L.sentinel:
-    result = pos + 1          # nothing to do
+    result = pos + 1 # nothing to do
   else:
     fillBuffer(L)
     L.offsetBase += pos
@@ -142,7 +142,7 @@ proc open*(L: var BaseLexer, input: Stream, bufLen: int = 8192;
   L.buf = newString(bufLen)
   L.sentinel = bufLen - 1
   L.lineStart = 0
-  L.lineNumber = 1            # lines start at 1
+  L.lineNumber = 1 # lines start at 1
   fillBuffer(L)
   skipUtf8Bom(L)
 
diff --git a/lib/pure/parsecfg.nim b/lib/pure/parsecfg.nim
index 0fa666886..4fd5647f6 100644
--- a/lib/pure/parsecfg.nim
+++ b/lib/pure/parsecfg.nim
@@ -115,34 +115,34 @@ include "system/inclrtl"
 
 type
   CfgEventKind* = enum ## enumeration of all events that may occur when parsing
-    cfgEof,             ## end of file reached
-    cfgSectionStart,    ## a ``[section]`` has been parsed
-    cfgKeyValuePair,    ## a ``key=value`` pair has been detected
-    cfgOption,          ## a ``--key=value`` command line option
-    cfgError            ## an error occurred during parsing
+    cfgEof,            ## end of file reached
+    cfgSectionStart,   ## a ``[section]`` has been parsed
+    cfgKeyValuePair,   ## a ``key=value`` pair has been detected
+    cfgOption,         ## a ``--key=value`` command line option
+    cfgError           ## an error occurred during parsing
 
   CfgEvent* = object of RootObj ## describes a parsing event
     case kind*: CfgEventKind    ## the kind of the event
     of cfgEof: nil
     of cfgSectionStart:
-      section*: string           ## `section` contains the name of the
-                                 ## parsed section start (syntax: ``[section]``)
+      section*: string          ## `section` contains the name of the
+                                ## parsed section start (syntax: ``[section]``)
     of cfgKeyValuePair, cfgOption:
-      key*, value*: string       ## contains the (key, value) pair if an option
-                                 ## of the form ``--key: value`` or an ordinary
-                                 ## ``key= value`` pair has been parsed.
-                                 ## ``value==""`` if it was not specified in the
-                                 ## configuration file.
-    of cfgError:                 ## the parser encountered an error: `msg`
-      msg*: string               ## contains the error message. No exceptions
-                                 ## are thrown if a parse error occurs.
+      key*, value*: string      ## contains the (key, value) pair if an option
+                                ## of the form ``--key: value`` or an ordinary
+                                ## ``key= value`` pair has been parsed.
+                                ## ``value==""`` if it was not specified in the
+                                ## configuration file.
+    of cfgError:                ## the parser encountered an error: `msg`
+      msg*: string              ## contains the error message. No exceptions
+                                ## are thrown if a parse error occurs.
 
   TokKind = enum
     tkInvalid, tkEof,
     tkSymbol, tkEquals, tkColon, tkBracketLe, tkBracketRi, tkDashDash
-  Token = object             # a token
-    kind: TokKind            # the type of the token
-    literal: string          # the parsed (string) literal
+  Token = object    # a token
+    kind: TokKind   # the type of the token
+    literal: string # the parsed (string) literal
 
   CfgParser* = object of BaseLexer ## the parser object.
     tok: Token
@@ -203,7 +203,7 @@ proc handleDecChars(c: var CfgParser, xi: var int) =
     inc(c.bufpos)
 
 proc getEscapedChar(c: var CfgParser, tok: var Token) =
-  inc(c.bufpos)               # skip '\'
+  inc(c.bufpos) # skip '\'
   case c.buf[c.bufpos]
   of 'n', 'N':
     add(tok.literal, "\n")
@@ -258,11 +258,11 @@ proc handleCRLF(c: var CfgParser, pos: int): int =
   else: result = pos
 
 proc getString(c: var CfgParser, tok: var Token, rawMode: bool) =
-  var pos = c.bufpos + 1          # skip "
+  var pos = c.bufpos + 1 # skip "
   tok.kind = tkSymbol
   if (c.buf[pos] == '"') and (c.buf[pos + 1] == '"'):
     # long string literal:
-    inc(pos, 2)               # skip ""
+    inc(pos, 2) # skip ""
                               # skip leading newline:
     pos = handleCRLF(c, pos)
     while true:
@@ -280,13 +280,13 @@ proc getString(c: var CfgParser, tok: var Token, rawMode: bool) =
       else:
         add(tok.literal, c.buf[pos])
         inc(pos)
-    c.bufpos = pos + 3       # skip the three """
+    c.bufpos = pos + 3 # skip the three """
   else:
     # ordinary string literal
     while true:
       var ch = c.buf[pos]
       if ch == '"':
-        inc(pos)              # skip '"'
+        inc(pos) # skip '"'
         break
       if ch in {'\c', '\L', lexbase.EndOfFile}:
         tok.kind = tkInvalid
@@ -320,7 +320,7 @@ proc skip(c: var CfgParser) =
     of '\c', '\L':
       pos = handleCRLF(c, pos)
     else:
-      break                   # EndOfFile also leaves the loop
+      break # EndOfFile also leaves the loop
   c.bufpos = pos
 
 proc rawGetTok(c: var CfgParser, tok: var Token) =
@@ -370,13 +370,13 @@ proc errorStr*(c: CfgParser, msg: string): string {.rtl, extern: "npc$1".} =
   ## returns a properly formatted error message containing current line and
   ## column information.
   result = `%`("$1($2, $3) Error: $4",
-               [c.filename, $getLine(c), $getColumn(c), msg])
+                [c.filename, $getLine(c), $getColumn(c), msg])
 
 proc warningStr*(c: CfgParser, msg: string): string {.rtl, extern: "npc$1".} =
   ## returns a properly formatted warning message containing current line and
   ## column information.
   result = `%`("$1($2, $3) Warning: $4",
-               [c.filename, $getLine(c), $getColumn(c), msg])
+                [c.filename, $getLine(c), $getColumn(c), msg])
 
 proc ignoreMsg*(c: CfgParser, e: CfgEvent): string {.rtl, extern: "npc$1".} =
   ## returns a properly formatted warning message containing that
diff --git a/lib/pure/parsecsv.nim b/lib/pure/parsecsv.nim
index 91c878078..741ce33b8 100644
--- a/lib/pure/parsecsv.nim
+++ b/lib/pure/parsecsv.nim
@@ -71,12 +71,12 @@ import
 type
   CsvRow* = seq[string] ## A row in a CSV file.
   CsvParser* = object of BaseLexer ## The parser object.
-    ##
-    ## It consists of two public fields:
-    ## * `row` is the current row
-    ## * `headers` are the columns that are defined in the csv file
-    ##   (read using `readHeaderRow <#readHeaderRow,CsvParser>`_).
-    ##   Used with `rowEntry <#rowEntry,CsvParser,string>`_).
+                                   ##
+                                   ## It consists of two public fields:
+                                   ## * `row` is the current row
+                                   ## * `headers` are the columns that are defined in the csv file
+                                   ##   (read using `readHeaderRow <#readHeaderRow,CsvParser>`_).
+                                   ##   Used with `rowEntry <#rowEntry,CsvParser,string>`_).
     row*: CsvRow
     filename: string
     sep, quote, esc: char
diff --git a/lib/pure/parsejson.nim b/lib/pure/parsejson.nim
index 9893e434e..0d7d7093e 100644
--- a/lib/pure/parsejson.nim
+++ b/lib/pure/parsejson.nim
@@ -15,21 +15,21 @@ import
   strutils, lexbase, streams, unicode
 
 type
-  JsonEventKind* = enum  ## enumeration of all events that may occur when parsing
-    jsonError,           ## an error occurred during parsing
-    jsonEof,             ## end of file reached
-    jsonString,          ## a string literal
-    jsonInt,             ## an integer literal
-    jsonFloat,           ## a float literal
-    jsonTrue,            ## the value ``true``
-    jsonFalse,           ## the value ``false``
-    jsonNull,            ## the value ``null``
-    jsonObjectStart,     ## start of an object: the ``{`` token
-    jsonObjectEnd,       ## end of an object: the ``}`` token
-    jsonArrayStart,      ## start of an array: the ``[`` token
-    jsonArrayEnd         ## start of an array: the ``]`` token
+  JsonEventKind* = enum ## enumeration of all events that may occur when parsing
+    jsonError,          ## an error occurred during parsing
+    jsonEof,            ## end of file reached
+    jsonString,         ## a string literal
+    jsonInt,            ## an integer literal
+    jsonFloat,          ## a float literal
+    jsonTrue,           ## the value ``true``
+    jsonFalse,          ## the value ``false``
+    jsonNull,           ## the value ``null``
+    jsonObjectStart,    ## start of an object: the ``{`` token
+    jsonObjectEnd,      ## end of an object: the ``}`` token
+    jsonArrayStart,     ## start of an array: the ``[`` token
+    jsonArrayEnd        ## start of an array: the ``]`` token
 
-  TokKind* = enum         # must be synchronized with TJsonEventKind!
+  TokKind* = enum # must be synchronized with TJsonEventKind!
     tkError,
     tkEof,
     tkString,
@@ -45,18 +45,18 @@ type
     tkColon,
     tkComma
 
-  JsonError* = enum        ## enumeration that lists all errors that can occur
-    errNone,               ## no error
-    errInvalidToken,       ## invalid token
-    errStringExpected,     ## string expected
-    errColonExpected,      ## ``:`` expected
-    errCommaExpected,      ## ``,`` expected
-    errBracketRiExpected,  ## ``]`` expected
-    errCurlyRiExpected,    ## ``}`` expected
-    errQuoteExpected,      ## ``"`` or ``'`` expected
-    errEOC_Expected,       ## ``*/`` expected
-    errEofExpected,        ## EOF expected
-    errExprExpected        ## expr expected
+  JsonError* = enum       ## enumeration that lists all errors that can occur
+    errNone,              ## no error
+    errInvalidToken,      ## invalid token
+    errStringExpected,    ## string expected
+    errColonExpected,     ## ``:`` expected
+    errCommaExpected,     ## ``,`` expected
+    errBracketRiExpected, ## ``]`` expected
+    errCurlyRiExpected,   ## ``}`` expected
+    errQuoteExpected,     ## ``"`` or ``'`` expected
+    errEOC_Expected,      ## ``*/`` expected
+    errEofExpected,       ## EOF expected
+    errExprExpected       ## expr expected
 
   ParserState = enum
     stateEof, stateStart, stateObject, stateArray, stateExpectArrayComma,
diff --git a/lib/pure/parseopt.nim b/lib/pure/parseopt.nim
index 545f9f00a..23978c964 100644
--- a/lib/pure/parseopt.nim
+++ b/lib/pure/parseopt.nim
@@ -155,11 +155,11 @@ import
   os, strutils
 
 type
-  CmdLineKind* = enum         ## The detected command line token.
-    cmdEnd,                   ## End of command line reached
-    cmdArgument,              ## An argument such as a filename
-    cmdLongOption,            ## A long option such as --option
-    cmdShortOption            ## A short option such as -c
+  CmdLineKind* = enum ## The detected command line token.
+    cmdEnd,           ## End of command line reached
+    cmdArgument,      ## An argument such as a filename
+    cmdLongOption,    ## A long option such as --option
+    cmdShortOption    ## A short option such as -c
   OptParser* = object of RootObj ## \
     ## Implementation of the command line parser.
     ##
@@ -172,10 +172,10 @@ type
     longNoVal: seq[string]
     cmds: seq[string]
     idx: int
-    kind*: CmdLineKind        ## The detected command line token
-    key*, val*: TaintedString ## Key and value pair; the key is the option
-                              ## or the argument, and the value is not "" if
-                              ## the option was given a value
+    kind*: CmdLineKind           ## The detected command line token
+    key*, val*: TaintedString    ## Key and value pair; the key is the option
+                                 ## or the argument, and the value is not "" if
+                                 ## the option was given a value
 
 proc parseWord(s: string, i: int, w: var string,
                delim: set[char] = {'\t', ' '}): int =
@@ -197,7 +197,7 @@ when declared(os.paramCount):
   # we cannot provide this for NimRtl creation on Posix, because we can't
   # access the command line arguments then!
 
-  proc initOptParser*(cmdline = "", shortNoVal: set[char]={},
+  proc initOptParser*(cmdline = "", shortNoVal: set[char] = {},
                       longNoVal: seq[string] = @[];
                       allowWhitespaceAfterColon = true): OptParser =
     ## Initializes the command line parser.
@@ -235,7 +235,7 @@ when declared(os.paramCount):
     result.key = TaintedString""
     result.val = TaintedString""
 
-  proc initOptParser*(cmdline: seq[TaintedString], shortNoVal: set[char]={},
+  proc initOptParser*(cmdline: seq[TaintedString], shortNoVal: set[char] = {},
                       longNoVal: seq[string] = @[];
                       allowWhitespaceAfterColon = true): OptParser =
     ## Initializes the command line parser.
@@ -345,7 +345,8 @@ proc next*(p: var OptParser) {.rtl, extern: "npo$1".} =
         inc(i)
         while i < p.cmds[p.idx].len and p.cmds[p.idx][i] in {'\t', ' '}: inc(i)
         # if we're at the end, use the next command line option:
-        if i >= p.cmds[p.idx].len and p.idx < p.cmds.len and p.allowWhitespaceAfterColon:
+        if i >= p.cmds[p.idx].len and p.idx < p.cmds.len and
+            p.allowWhitespaceAfterColon:
           inc p.idx
           i = 0
         if p.idx < p.cmds.len:
@@ -403,7 +404,8 @@ proc remainingArgs*(p: OptParser): seq[TaintedString] {.rtl, extern: "npo$1".} =
   result = @[]
   for i in p.idx..<p.cmds.len: result.add TaintedString(p.cmds[i])
 
-iterator getopt*(p: var OptParser): tuple[kind: CmdLineKind, key, val: TaintedString] =
+iterator getopt*(p: var OptParser): tuple[kind: CmdLineKind, key,
+    val: TaintedString] =
   ## Convenience iterator for iterating over the given
   ## `OptParser<#OptParser>`_.
   ##
@@ -443,7 +445,7 @@ iterator getopt*(p: var OptParser): tuple[kind: CmdLineKind, key, val: TaintedSt
 
 when declared(initOptParser):
   iterator getopt*(cmdline: seq[TaintedString] = commandLineParams(),
-                   shortNoVal: set[char]={}, longNoVal: seq[string] = @[]):
+                   shortNoVal: set[char] = {}, longNoVal: seq[string] = @[]):
              tuple[kind: CmdLineKind, key, val: TaintedString] =
     ## Convenience iterator for iterating over command line arguments.
     ##
@@ -484,7 +486,8 @@ when declared(initOptParser):
     ##   if filename == "":
     ##     # no filename has been written, so we show the help
     ##     writeHelp()
-    var p = initOptParser(cmdline, shortNoVal=shortNoVal, longNoVal=longNoVal)
+    var p = initOptParser(cmdline, shortNoVal = shortNoVal,
+        longNoVal = longNoVal)
     while true:
       next(p)
       if p.kind == cmdEnd: break
diff --git a/lib/pure/parsesql.nim b/lib/pure/parsesql.nim
index 042d5374f..b84c1a744 100644
--- a/lib/pure/parsesql.nim
+++ b/lib/pure/parsesql.nim
@@ -18,12 +18,12 @@ import
 # ------------------- scanner -------------------------------------------------
 
 type
-  TokKind = enum       ## enumeration of all SQL tokens
-    tkInvalid,          ## invalid token
-    tkEof,              ## end of file reached
-    tkIdentifier,       ## abc
-    tkQuotedIdentifier, ## "abc"
-    tkStringConstant,   ## 'abc'
+  TokKind = enum            ## enumeration of all SQL tokens
+    tkInvalid,              ## invalid token
+    tkEof,                  ## end of file reached
+    tkIdentifier,           ## abc
+    tkQuotedIdentifier,     ## "abc"
+    tkStringConstant,       ## 'abc'
     tkEscapeConstant,       ## e'abc'
     tkDollarQuotedConstant, ## $tag$abc$tag$
     tkBitStringConstant,    ## B'00011'
@@ -40,9 +40,9 @@ type
     tkBracketRi,            ## ']'
     tkDot                   ## '.'
 
-  Token = object  # a token
-    kind: TokKind           # the type of the token
-    literal: string          # the parsed (string) literal
+  Token = object    # a token
+    kind: TokKind   # the type of the token
+    literal: string # the parsed (string) literal
 
   SqlLexer* = object of BaseLexer ## the parser object.
     filename: string
@@ -162,7 +162,7 @@ proc skip(c: var SqlLexer) =
         break
     of '/':
       if c.buf[pos+1] == '*':
-        inc(pos,2)
+        inc(pos, 2)
         while true:
           case c.buf[pos]
           of '\0': break
@@ -186,7 +186,7 @@ proc skip(c: var SqlLexer) =
     of '\c', '\L':
       pos = handleCRLF(c, pos)
     else:
-      break                   # EndOfFile also leaves the loop
+      break # EndOfFile also leaves the loop
   c.bufpos = pos
 
 proc getString(c: var SqlLexer, tok: var Token, kind: TokKind) =
@@ -264,12 +264,13 @@ proc getSymbol(c: var SqlLexer, tok: var Token) =
   while true:
     add(tok.literal, c.buf[pos])
     inc(pos)
-    if c.buf[pos] notin {'a'..'z','A'..'Z','0'..'9','_','$', '\128'..'\255'}:
+    if c.buf[pos] notin {'a'..'z', 'A'..'Z', '0'..'9', '_', '$',
+        '\128'..'\255'}:
       break
   c.bufpos = pos
   tok.kind = tkIdentifier
 
-proc getQuotedIdentifier(c: var SqlLexer, tok: var Token, quote='\"') =
+proc getQuotedIdentifier(c: var SqlLexer, tok: var Token, quote = '\"') =
   var pos = c.bufpos + 1
   tok.kind = tkQuotedIdentifier
   while true:
@@ -403,7 +404,7 @@ proc getTok(c: var SqlLexer, tok: var Token) =
   of 'x', 'X':
     if c.buf[c.bufpos + 1] == '\'':
       tok.kind = tkHexStringConstant
-      getBitHexString(c, tok, {'a'..'f','A'..'F','0'..'9'})
+      getBitHexString(c, tok, {'a'..'f', 'A'..'F', '0'..'9'})
     else:
       getSymbol(c, tok)
   of '$': getDollarString(c, tok)
@@ -504,7 +505,7 @@ type
     nkConstraint,
     nkUnique,
     nkIdentity,
-    nkColumnDef,        ## name, datatype, constraints
+    nkColumnDef,      ## name, datatype, constraints
     nkInsert,
     nkUpdate,
     nkDelete,
@@ -543,14 +544,14 @@ const
 
 type
   SqlParseError* = object of ValueError ## Invalid SQL encountered
-  SqlNode* = ref SqlNodeObj        ## an SQL abstract syntax tree node
-  SqlNodeObj* = object              ## an SQL abstract syntax tree node
-    case kind*: SqlNodeKind      ## kind of syntax tree
+  SqlNode* = ref SqlNodeObj ## an SQL abstract syntax tree node
+  SqlNodeObj* = object      ## an SQL abstract syntax tree node
+    case kind*: SqlNodeKind ## kind of syntax tree
     of LiteralNodes:
-      strVal*: string             ## AST leaf: the identifier, numeric literal
-                                  ## string literal, etc.
+      strVal*: string       ## AST leaf: the identifier, numeric literal
+                            ## string literal, etc.
     else:
-      sons*: seq[SqlNode]        ## the node's children
+      sons*: seq[SqlNode]   ## the node's children
 
   SqlParser* = object of SqlLexer ## SQL parser object
     tok: Token
@@ -714,7 +715,8 @@ proc identOrLiteral(p: var SqlParser): SqlNode =
       getTok(p) # we must consume a token here to prevent endless loops!
 
 proc primary(p: var SqlParser): SqlNode =
-  if (p.tok.kind == tkOperator and (p.tok.literal == "+" or p.tok.literal == "-")) or isKeyw(p, "not"):
+  if (p.tok.kind == tkOperator and (p.tok.literal == "+" or p.tok.literal ==
+      "-")) or isKeyw(p, "not"):
     result = newNode(nkPrefix)
     result.add(newNode(nkIdent, p.tok.literal))
     getTok(p)
@@ -1439,7 +1441,7 @@ proc ra(n: SqlNode, s: var SqlWriter) =
     s.addKeyw("enum")
     rs(n, s)
 
-proc renderSQL*(n: SqlNode, upperCase=false): string =
+proc renderSQL*(n: SqlNode, upperCase = false): string =
   ## Converts an SQL abstract syntax tree to its string representation.
   var s: SqlWriter
   s.buffer = ""
@@ -1493,7 +1495,7 @@ when not defined(js):
     finally:
       close(p)
 
-  proc parseSQL*(input: string, filename=""): SqlNode =
+  proc parseSQL*(input: string, filename = ""): SqlNode =
     ## parses the SQL from `input` into an AST and returns the AST.
     ## `filename` is only used for error messages.
     ## Syntax errors raise an `SqlParseError` exception.
diff --git a/lib/pure/parseutils.nim b/lib/pure/parseutils.nim
index e0561116e..053618b41 100644
--- a/lib/pure/parseutils.nim
+++ b/lib/pure/parseutils.nim
@@ -48,9 +48,9 @@
 ## * `other parsers<lib.html#pure-libraries-parsers>`_ for other parsers
 
 
-{.deadCodeElim: on.}  # dce option deprecated
+{.deadCodeElim: on.} # dce option deprecated
 
-{.push debugger:off .} # the user does not want to trace a part
+{.push debugger: off.} # the user does not want to trace a part
                        # of the standard library!
 
 include "system/inclrtl"
@@ -64,8 +64,8 @@ const
 proc toLower(c: char): char {.inline.} =
   result = if c in {'A'..'Z'}: chr(ord(c)-ord('A')+ord('a')) else: c
 
-proc parseBin*[T: SomeInteger](s: string, number: var T, start = 0, maxLen = 0): int
-  {.noSideEffect.} =
+proc parseBin*[T: SomeInteger](s: string, number: var T, start = 0,
+    maxLen = 0): int {.noSideEffect.} =
   ## Parses a binary number and stores its value in ``number``.
   ##
   ## Returns the number of the parsed characters or 0 in case of an error.
@@ -74,7 +74,7 @@ proc parseBin*[T: SomeInteger](s: string, number: var T, start = 0, maxLen = 0):
   ## If ``maxLen == 0``, the parsing continues until the first non-bin character
   ## or to the end of the string. Otherwise, no more than ``maxLen`` characters
   ## are parsed starting from the ``start`` position.
-  ## 
+  ##
   ## It does not check for overflow. If the value represented by the string is
   ## too big to fit into ``number``, only the value of last fitting characters
   ## will be stored in ``number`` without producing an error.
@@ -111,8 +111,8 @@ proc parseBin*[T: SomeInteger](s: string, number: var T, start = 0, maxLen = 0):
     number = output
     result = i - start
 
-proc parseOct*[T: SomeInteger](s: string, number: var T, start = 0, maxLen = 0): int
-  {.noSideEffect.} =
+proc parseOct*[T: SomeInteger](s: string, number: var T, start = 0,
+    maxLen = 0): int {.noSideEffect.} =
   ## Parses an octal number and stores its value in ``number``.
   ##
   ## Returns the number of the parsed characters or 0 in case of an error.
@@ -121,7 +121,7 @@ proc parseOct*[T: SomeInteger](s: string, number: var T, start = 0, maxLen = 0):
   ## If ``maxLen == 0``, the parsing continues until the first non-oct character
   ## or to the end of the string. Otherwise, no more than ``maxLen`` characters
   ## are parsed starting from the ``start`` position.
-  ## 
+  ##
   ## It does not check for overflow. If the value represented by the string is
   ## too big to fit into ``number``, only the value of last fitting characters
   ## will be stored in ``number`` without producing an error.
@@ -158,8 +158,8 @@ proc parseOct*[T: SomeInteger](s: string, number: var T, start = 0, maxLen = 0):
     number = output
     result = i - start
 
-proc parseHex*[T: SomeInteger](s: string, number: var T, start = 0, maxLen = 0): int
-  {.noSideEffect.} =
+proc parseHex*[T: SomeInteger](s: string, number: var T, start = 0,
+    maxLen = 0): int {.noSideEffect.} =
   ## Parses a hexadecimal number and stores its value in ``number``.
   ##
   ## Returns the number of the parsed characters or 0 in case of an error.
@@ -168,7 +168,7 @@ proc parseHex*[T: SomeInteger](s: string, number: var T, start = 0, maxLen = 0):
   ## If ``maxLen == 0``, the parsing continues until the first non-hex character
   ## or to the end of the string. Otherwise, no more than ``maxLen`` characters
   ## are parsed starting from the ``start`` position.
-  ## 
+  ##
   ## It does not check for overflow. If the value represented by the string is
   ## too big to fit into ``number``, only the value of last fitting characters
   ## will be stored in ``number`` without producing an error.
@@ -468,7 +468,7 @@ proc parseInt*(s: string, number: var int, start = 0): int {.
     number = int(res)
 
 proc parseSaturatedNatural*(s: string, b: var int, start = 0): int {.
-  raises: [].}=
+    raises: [].} =
   ## Parses a natural number into ``b``. This cannot raise an overflow
   ## error. ``high(int)`` is returned for an overflow.
   ## The number of processed character is returned.
@@ -573,13 +573,13 @@ proc parseFloat*(s: string, number: var float, start = 0): int {.
     number = bf
 
 type
-  InterpolatedKind* = enum   ## Describes for `interpolatedFragments`
-                             ## which part of the interpolated string is
-                             ## yielded; for example in "str$$$var${expr}"
-    ikStr,                   ## ``str`` part of the interpolated string
-    ikDollar,                ## escaped ``$`` part of the interpolated string
-    ikVar,                   ## ``var`` part of the interpolated string
-    ikExpr                   ## ``expr`` part of the interpolated string
+  InterpolatedKind* = enum ## Describes for `interpolatedFragments`
+                           ## which part of the interpolated string is
+                           ## yielded; for example in "str$$$var${expr}"
+    ikStr,                 ## ``str`` part of the interpolated string
+    ikDollar,              ## escaped ``$`` part of the interpolated string
+    ikVar,                 ## ``var`` part of the interpolated string
+    ikExpr                 ## ``expr`` part of the interpolated string
 
 iterator interpolatedFragments*(s: string): tuple[kind: InterpolatedKind,
   value: string] =
@@ -649,7 +649,7 @@ when isMainModule:
   import sequtils
   let input = "$test{}  $this is ${an{  example}}  "
   let expected = @[(ikVar, "test"), (ikStr, "{}  "), (ikVar, "this"),
-                   (ikStr, " is "), (ikExpr, "an{  example}"), (ikStr, "  ")]
+                    (ikStr, " is "), (ikExpr, "an{  example}"), (ikStr, "  ")]
   doAssert toSeq(interpolatedFragments(input)) == expected
 
   var value = 0
diff --git a/lib/pure/parsexml.nim b/lib/pure/parsexml.nim
index c1268bc1d..576484231 100644
--- a/lib/pure/parsexml.nim
+++ b/lib/pure/parsexml.nim
@@ -155,41 +155,41 @@ import
 
 type
   XmlEventKind* = enum ## enumeration of all events that may occur when parsing
-    xmlError,           ## an error occurred during parsing
-    xmlEof,             ## end of file reached
-    xmlCharData,        ## character data
-    xmlWhitespace,      ## whitespace has been parsed
-    xmlComment,         ## a comment has been parsed
-    xmlPI,              ## processing instruction (``<?name something ?>``)
-    xmlElementStart,    ## ``<elem>``
-    xmlElementEnd,      ## ``</elem>``
-    xmlElementOpen,     ## ``<elem
-    xmlAttribute,       ## ``key = "value"`` pair
-    xmlElementClose,    ## ``>``
-    xmlCData,           ## ``<![CDATA[`` ... data ... ``]]>``
-    xmlEntity,          ## &entity;
-    xmlSpecial          ## ``<! ... data ... >``
-
-  XmlErrorKind* = enum       ## enumeration that lists all errors that can occur
-    errNone,                 ## no error
-    errEndOfCDataExpected,   ## ``]]>`` expected
-    errNameExpected,         ## name expected
-    errSemicolonExpected,    ## ``;`` expected
-    errQmGtExpected,         ## ``?>`` expected
-    errGtExpected,           ## ``>`` expected
-    errEqExpected,           ## ``=`` expected
-    errQuoteExpected,        ## ``"`` or ``'`` expected
-    errEndOfCommentExpected  ## ``-->`` expected
+    xmlError,          ## an error occurred during parsing
+    xmlEof,            ## end of file reached
+    xmlCharData,       ## character data
+    xmlWhitespace,     ## whitespace has been parsed
+    xmlComment,        ## a comment has been parsed
+    xmlPI,             ## processing instruction (``<?name something ?>``)
+    xmlElementStart,   ## ``<elem>``
+    xmlElementEnd,     ## ``</elem>``
+    xmlElementOpen,    ## ``<elem
+    xmlAttribute,      ## ``key = "value"`` pair
+    xmlElementClose,   ## ``>``
+    xmlCData,          ## ``<![CDATA[`` ... data ... ``]]>``
+    xmlEntity,         ## &entity;
+    xmlSpecial         ## ``<! ... data ... >``
+
+  XmlErrorKind* = enum        ## enumeration that lists all errors that can occur
+    errNone,                  ## no error
+    errEndOfCDataExpected,    ## ``]]>`` expected
+    errNameExpected,          ## name expected
+    errSemicolonExpected,     ## ``;`` expected
+    errQmGtExpected,          ## ``?>`` expected
+    errGtExpected,            ## ``>`` expected
+    errEqExpected,            ## ``=`` expected
+    errQuoteExpected,         ## ``"`` or ``'`` expected
+    errEndOfCommentExpected   ## ``-->`` expected
     errAttributeValueExpected ## non-empty attribute value expected
 
   ParserState = enum
     stateStart, stateNormal, stateAttr, stateEmptyElementTag, stateError
 
-  XmlParseOption* = enum  ## options for the XML parser
-    reportWhitespace,      ## report whitespace
-    reportComments         ## report comments
-    allowUnquotedAttribs   ## allow unquoted attribute values (for HTML)
-    allowEmptyAttribs      ## allow empty attributes (without explicit value)
+  XmlParseOption* = enum ## options for the XML parser
+    reportWhitespace,    ## report whitespace
+    reportComments       ## report comments
+    allowUnquotedAttribs ## allow unquoted attribute values (for HTML)
+    allowEmptyAttribs    ## allow empty attributes (without explicit value)
 
   XmlParser* = object of BaseLexer ## the parser object.
     a, b, c: string
@@ -399,7 +399,7 @@ proc parseComment(my: var XmlParser) =
   my.bufpos = pos
   my.kind = xmlComment
 
-proc parseWhitespace(my: var XmlParser, skip=false) =
+proc parseWhitespace(my: var XmlParser, skip = false) =
   var pos = my.bufpos
   while true:
     case my.buf[pos]
@@ -562,7 +562,7 @@ proc parseTag(my: var XmlParser) =
     my.kind = xmlCharData
     add(my.a, '<')
     return
-  parseWhitespace(my, skip=true)
+  parseWhitespace(my, skip = true)
   if my.buf[my.bufpos] in NameStartChar:
     # an attribute follows:
     my.kind = xmlElementOpen
@@ -588,7 +588,7 @@ proc parseEndTag(my: var XmlParser) =
   my.bufpos = lexbase.handleRefillChar(my, my.bufpos+1)
   #inc(my.bufpos, 2)
   parseName(my, my.a)
-  parseWhitespace(my, skip=true)
+  parseWhitespace(my, skip = true)
   if my.buf[my.bufpos] == '>':
     inc(my.bufpos)
   else:
@@ -606,7 +606,7 @@ proc parseAttribute(my: var XmlParser) =
     return
 
   let startPos = my.bufpos
-  parseWhitespace(my, skip=true)
+  parseWhitespace(my, skip = true)
   if my.buf[my.bufpos] != '=':
     if allowEmptyAttribs notin my.options or
         (my.buf[my.bufpos] != '>' and my.bufpos == startPos):
@@ -614,7 +614,7 @@ proc parseAttribute(my: var XmlParser) =
     return
 
   inc(my.bufpos)
-  parseWhitespace(my, skip=true)
+  parseWhitespace(my, skip = true)
 
   var pos = my.bufpos
   if my.buf[pos] in {'\'', '"'}:
@@ -678,7 +678,7 @@ proc parseAttribute(my: var XmlParser) =
       add(my.b, my.buf[pos])
       inc pos
   my.bufpos = pos
-  parseWhitespace(my, skip=true)
+  parseWhitespace(my, skip = true)
 
 proc parseCharData(my: var XmlParser) =
   var pos = my.bufpos
@@ -711,8 +711,9 @@ proc rawGetTok(my: var XmlParser) =
     of '/':
       parseEndTag(my)
     of '!':
-      if my.buf[pos+2] == '[' and my.buf[pos+3] == 'C' and my.buf[pos+4] == 'D' and
-          my.buf[pos+5] == 'A' and my.buf[pos+6] == 'T' and my.buf[pos+7] == 'A' and
+      if my.buf[pos+2] == '[' and my.buf[pos+3] == 'C' and
+          my.buf[pos+4] == 'D' and my.buf[pos+5] == 'A' and
+          my.buf[pos+6] == 'T' and my.buf[pos+7] == 'A' and
           my.buf[pos+8] == '[':
         parseCDATA(my)
       elif my.buf[pos+2] == '-' and my.buf[pos+3] == '-':
@@ -742,7 +743,8 @@ proc getTok(my: var XmlParser) =
     of xmlComment:
       if my.options.contains(reportComments): break
     of xmlWhitespace:
-      if my.options.contains(reportWhitespace) or lastKind in {xmlCharData, xmlComment, xmlEntity}:
+      if my.options.contains(reportWhitespace) or lastKind in {xmlCharData,
+          xmlComment, xmlEntity}:
         break
     else: break
 
diff --git a/lib/pure/xmlparser.nim b/lib/pure/xmlparser.nim
index 597b80eb5..2a2d19dca 100644
--- a/lib/pure/xmlparser.nim
+++ b/lib/pure/xmlparser.nim
@@ -59,7 +59,7 @@ proc parse(x: var XmlParser, errors: var seq[string]): XmlNode =
   of xmlError:
     errors.add(errorMsg(x))
     next(x)
-  of xmlElementStart:    ## ``<elem>``
+  of xmlElementStart: ## ``<elem>``
     result = newElement(x.elementName)
     next(x)
     untilElementEnd(x, result, errors)
@@ -169,4 +169,5 @@ when isMainModule:
 
     block bug1518:
       var err: seq[string] = @[]
-      assert $parsexml(newStringStream"<tag>One &amp; two</tag>", "temp.xml", err) == "<tag>One &amp; two</tag>"
+      assert $parsexml(newStringStream"<tag>One &amp; two</tag>", "temp.xml",
+          err) == "<tag>One &amp; two</tag>"