diff options
Diffstat (limited to 'lib/packages')
-rw-r--r-- | lib/packages/docutils/dochelpers.nim | 298 | ||||
-rw-r--r-- | lib/packages/docutils/docutils.nimble | 4 | ||||
-rw-r--r-- | lib/packages/docutils/docutils.nimble.old | 7 | ||||
-rw-r--r-- | lib/packages/docutils/highlite.nim | 345 | ||||
-rw-r--r-- | lib/packages/docutils/rst.nim | 3891 | ||||
-rw-r--r-- | lib/packages/docutils/rstast.nim | 194 | ||||
-rw-r--r-- | lib/packages/docutils/rstgen.nim | 947 | ||||
-rw-r--r-- | lib/packages/docutils/rstidx.nim | 141 |
8 files changed, 4392 insertions, 1435 deletions
diff --git a/lib/packages/docutils/dochelpers.nim b/lib/packages/docutils/dochelpers.nim new file mode 100644 index 000000000..0a41d85b5 --- /dev/null +++ b/lib/packages/docutils/dochelpers.nim @@ -0,0 +1,298 @@ +# +# +# Nim's Runtime Library +# (c) Copyright 2021 Nim contributors +# +# See the file "copying.txt", included in this +# distribution, for details about the copyright. +# + +## Integration helpers between ``docgen.nim`` and ``rst.nim``. +## +## Function `toLangSymbol(linkText)`_ produces a signature `docLink` of +## `type LangSymbol`_ in ``rst.nim``, while `match(generated, docLink)`_ +## matches it with `generated`, produced from `PNode` by ``docgen.rst``. + +import rstast +import std/strutils + +when defined(nimPreviewSlimSystem): + import std/[assertions, syncio] + + +type + LangSymbol* = object ## symbol signature in Nim + symKind*: string ## "proc", "const", "type", etc + symTypeKind*: string ## ""|enum|object|tuple - + ## valid only when `symKind == "type"` + name*: string ## plain symbol name without any parameters + generics*: string ## generic parameters (without brackets) + isGroup*: bool ## is LangSymbol a group with overloads? + # the following fields are valid iff `isGroup` == false + # (always false when parsed by `toLangSymbol` because link like foo_ + # can point to just a single symbol foo, e.g. proc). + parametersProvided*: bool ## to disambiguate `proc f`_ and `proc f()`_ + parameters*: seq[tuple[name: string, `type`: string]] + ## name-type seq, e.g. for proc + outType*: string ## result type, e.g. for proc + +proc `$`*(s: LangSymbol): string = # for debug + ("(symkind=$1, symTypeKind=$2, name=$3, generics=$4, isGroup=$5, " & + "parametersProvided=$6, parameters=$7, outType=$8)") % [ + s.symKind, s.symTypeKind , s.name, s.generics, $s.isGroup, + $s.parametersProvided, $s.parameters, s.outType] + +func nimIdentBackticksNormalize*(s: string): string = + ## Normalizes the string `s` as a Nim identifier. + ## + ## Unlike `nimIdentNormalize` removes spaces and backticks. + ## + ## .. Warning:: No checking (e.g. that identifiers cannot start from + ## digits or '_', or that number of backticks is even) is performed. + runnableExamples: + doAssert nimIdentBackticksNormalize("Foo_bar") == "Foobar" + doAssert nimIdentBackticksNormalize("FoO BAr") == "Foobar" + doAssert nimIdentBackticksNormalize("`Foo BAR`") == "Foobar" + doAssert nimIdentBackticksNormalize("` Foo BAR `") == "Foobar" + # not a valid identifier: + doAssert nimIdentBackticksNormalize("`_x_y`") == "_xy" + result = newString(s.len) + var firstChar = true + var j = 0 + for i in 0..len(s) - 1: + if s[i] in {'A'..'Z'}: + if not firstChar: # to lowercase + result[j] = chr(ord(s[i]) + (ord('a') - ord('A'))) + else: + result[j] = s[i] + firstChar = false + inc j + elif s[i] notin {'_', ' ', '`'}: + result[j] = s[i] + inc j + firstChar = false + elif s[i] == '_' and firstChar: + result[j] = '_' + inc j + firstChar = false + else: discard # just omit '`' or ' ' + if j != s.len: setLen(result, j) + +proc langSymbolGroup*(kind: string, name: string): LangSymbol = + if kind notin ["proc", "func", "macro", "method", "iterator", + "template", "converter"]: + raise newException(ValueError, "unknown symbol kind $1" % [kind]) + result = LangSymbol(symKind: kind, name: name, isGroup: true) + +proc toLangSymbol*(linkText: PRstNode): LangSymbol = + ## Parses `linkText` into a more structured form using a state machine. + ## + ## This proc is designed to allow link syntax with operators even + ## without escaped backticks inside: + ## + ## `proc *`_ + ## `proc []`_ + ## + ## This proc should be kept in sync with the `renderTypes` proc from + ## ``compiler/typesrenderer.nim``. + template fail(msg: string) = + raise newException(ValueError, msg) + if linkText.kind notin {rnRstRef, rnInner}: + fail("toLangSymbol: wrong input kind " & $linkText.kind) + + const NimDefs = ["proc", "func", "macro", "method", "iterator", + "template", "converter", "const", "type", "var", + "enum", "object", "tuple", "module"] + template resolveSymKind(x: string) = + if x in ["enum", "object", "tuple"]: + result.symKind = "type" + result.symTypeKind = x + else: + result.symKind = x + type + State = enum + inBeginning + afterSymKind + beforeSymbolName # auxiliary state to catch situations like `proc []`_ after space + atSymbolName + afterSymbolName + genericsPar + parameterName + parameterType + outType + var state = inBeginning + var curIdent = "" + template flushIdent() = + if curIdent != "": + case state + of inBeginning: fail("incorrect state inBeginning") + of afterSymKind: resolveSymKind curIdent + of beforeSymbolName: fail("incorrect state beforeSymbolName") + of atSymbolName: result.name = curIdent.nimIdentBackticksNormalize + of afterSymbolName: fail("incorrect state afterSymbolName") + of genericsPar: result.generics = curIdent + of parameterName: result.parameters.add (curIdent, "") + of parameterType: + for a in countdown(result.parameters.len - 1, 0): + if result.parameters[a].`type` == "": + result.parameters[a].`type` = curIdent + of outType: result.outType = curIdent + curIdent = "" + var parens = 0 + let L = linkText.sons.len + template s(i: int): string = linkText.sons[i].text + var i = 0 + template nextState = + case s(i) + of " ": + if state == afterSymKind: + flushIdent + state = beforeSymbolName + of "`": + curIdent.add "`" + inc i + while i < L: # add contents between ` ` as a whole + curIdent.add s(i) + if s(i) == "`": + break + inc i + curIdent = curIdent.nimIdentBackticksNormalize + if state in {inBeginning, afterSymKind, beforeSymbolName}: + state = atSymbolName + flushIdent + state = afterSymbolName + of "[": + if state notin {inBeginning, afterSymKind, beforeSymbolName}: + inc parens + if state in {inBeginning, afterSymKind, beforeSymbolName}: + state = atSymbolName + curIdent.add s(i) + elif state in {atSymbolName, afterSymbolName} and parens == 1: + flushIdent + state = genericsPar + curIdent.add s(i) + else: curIdent.add s(i) + of "]": + if state notin {inBeginning, afterSymKind, beforeSymbolName, atSymbolName}: + dec parens + if state == genericsPar and parens == 0: + curIdent.add s(i) + flushIdent + else: curIdent.add s(i) + of "(": + inc parens + if state in {inBeginning, afterSymKind, beforeSymbolName}: + result.parametersProvided = true + state = atSymbolName + flushIdent + state = parameterName + elif state in {atSymbolName, afterSymbolName, genericsPar} and parens == 1: + result.parametersProvided = true + flushIdent + state = parameterName + else: curIdent.add s(i) + of ")": + dec parens + if state in {parameterName, parameterType} and parens == 0: + flushIdent + state = outType + else: curIdent.add s(i) + of "{": # remove pragmas + while i < L: + if s(i) == "}": + break + inc i + of ",", ";": + if state in {parameterName, parameterType} and parens == 1: + flushIdent + state = parameterName + else: curIdent.add s(i) + of "*": # skip export symbol + if state == atSymbolName: + flushIdent + state = afterSymbolName + elif state == afterSymbolName: + discard + else: curIdent.add "*" + of ":": + if state == outType: discard + elif state == parameterName: + flushIdent + state = parameterType + else: curIdent.add ":" + else: + let isPostfixSymKind = i > 0 and i == L - 1 and + result.symKind == "" and s(i) in NimDefs + if isPostfixSymKind: # for links like `foo proc`_ + resolveSymKind s(i) + else: + case state + of inBeginning: + if s(i) in NimDefs: + state = afterSymKind + else: + state = atSymbolName + curIdent.add s(i) + of afterSymKind, beforeSymbolName: + state = atSymbolName + curIdent.add s(i) + of parameterType: + case s(i) + of "ref": curIdent.add "ref." + of "ptr": curIdent.add "ptr." + of "var": discard + else: curIdent.add s(i).nimIdentBackticksNormalize + of atSymbolName: + curIdent.add s(i) + else: + curIdent.add s(i).nimIdentBackticksNormalize + while i < L: + nextState + inc i + if state == afterSymKind: # treat `type`_ as link to symbol `type` + state = atSymbolName + flushIdent + result.isGroup = false + +proc match*(generated: LangSymbol, docLink: LangSymbol): bool = + ## Returns true if `generated` can be a target for `docLink`. + ## If `generated` is an overload group then only `symKind` and `name` + ## are compared for success. + result = true + if docLink.symKind != "": + if generated.symKind == "proc": + result = docLink.symKind in ["proc", "func"] + else: + result = generated.symKind == docLink.symKind + if result and docLink.symKind == "type" and docLink.symTypeKind != "": + result = generated.symTypeKind == docLink.symTypeKind + if not result: return + result = generated.name == docLink.name + if not result: return + if generated.isGroup: + # if `()` were added then it's not a reference to the whole group: + return not docLink.parametersProvided + if docLink.generics != "": + result = generated.generics == docLink.generics + if not result: return + if docLink.outType != "": + result = generated.outType == docLink.outType + if not result: return + if docLink.parametersProvided: + result = generated.parameters.len == docLink.parameters.len + if not result: return + var onlyType = false + for i in 0 ..< generated.parameters.len: + let g = generated.parameters[i] + let d = docLink.parameters[i] + if i == 0: + if g.`type` == d.name: + onlyType = true # only types, not names, are provided in `docLink` + if onlyType: + result = g.`type` == d.name + else: + if d.`type` != "": + result = g.`type` == d.`type` + if not result: return + result = g.name == d.name + if not result: return diff --git a/lib/packages/docutils/docutils.nimble b/lib/packages/docutils/docutils.nimble deleted file mode 100644 index f1683c515..000000000 --- a/lib/packages/docutils/docutils.nimble +++ /dev/null @@ -1,4 +0,0 @@ -version = "0.10.0" -author = "Andreas Rumpf" -description = "Nim's reStructuredText processor." -license = "MIT" diff --git a/lib/packages/docutils/docutils.nimble.old b/lib/packages/docutils/docutils.nimble.old new file mode 100644 index 000000000..f97c3bdde --- /dev/null +++ b/lib/packages/docutils/docutils.nimble.old @@ -0,0 +1,7 @@ +# xxx disabled this as this isn't really a nimble package and it affects logic +# used to compute canonical imports, refs https://github.com/nim-lang/Nim/pull/16999#issuecomment-805442914 + +version = "0.10.0" +author = "Andreas Rumpf" +description = "Nim's reStructuredText processor." +license = "MIT" diff --git a/lib/packages/docutils/highlite.nim b/lib/packages/docutils/highlite.nim index 796c17d7d..f8376f46c 100644 --- a/lib/packages/docutils/highlite.nim +++ b/lib/packages/docutils/highlite.nim @@ -11,11 +11,9 @@ ## Currently only few languages are supported, other languages may be added. ## The interface supports one language nested in another. ## -## **Note:** Import ``packages/docutils/highlite`` to use this module -## ## You can use this to build your own syntax highlighting, check this example: ## -## .. code::nim +## ```Nim ## let code = """for x in $int.high: echo x.ord mod 2 == 0""" ## var toknizr: GeneralTokenizer ## initGeneralTokenizer(toknizr, code) @@ -33,18 +31,43 @@ ## else: ## echo toknizr.kind # All the kinds of tokens can be processed here. ## echo substr(code, toknizr.start, toknizr.length + toknizr.start - 1) +## ``` ## -## The proc ``getSourceLanguage`` can get the language ``enum`` from a string: -## -## .. code::nim +## The proc `getSourceLanguage` can get the language `enum` from a string: +## ```Nim ## for l in ["C", "c++", "jAvA", "Nim", "c#"]: echo getSourceLanguage(l) +## ``` +## +## There is also a `Cmd` pseudo-language supported, which is a simple generic +## shell/cmdline tokenizer (UNIX shell/Powershell/Windows Command): +## no escaping, no programming language constructs besides variable definition +## at the beginning of line. It supports these operators: +## ```Cmd +## & && | || ( ) '' "" ; # for comments +## ``` +## +## Instead of escaping always use quotes like here +## `nimgrep --ext:'nim|nims' file.name`:cmd: shows how to input ``|``. +## Any argument that contains ``.`` or ``/`` or ``\`` will be treated +## as a file or directory. ## +## In addition to `Cmd` there is also `Console` language for +## displaying interactive sessions. +## Lines with a command should start with ``$``, other lines are considered +## as program output. import - strutils -from algorithm import binarySearch + std/strutils +from std/algorithm import binarySearch + +when defined(nimPreviewSlimSystem): + import std/[assertions, syncio] + type + SourceLanguage* = enum + langNone, langNim, langCpp, langCsharp, langC, langJava, + langYaml, langPython, langCmd, langConsole TokenClass* = enum gtEof, gtNone, gtWhitespace, gtDecNumber, gtBinNumber, gtHexNumber, gtOctNumber, gtFloatNumber, gtIdentifier, gtKeyword, gtStringLit, @@ -52,28 +75,31 @@ type gtOperator, gtPunctuation, gtComment, gtLongComment, gtRegularExpression, gtTagStart, gtTagEnd, gtKey, gtValue, gtRawData, gtAssembler, gtPreprocessor, gtDirective, gtCommand, gtRule, gtHyperlink, gtLabel, - gtReference, gtOther + gtReference, gtPrompt, gtProgramOutput, gtProgram, gtOption, gtOther GeneralTokenizer* = object of RootObj kind*: TokenClass start*, length*: int buf: cstring pos: int state: TokenClass - - SourceLanguage* = enum - langNone, langNim, langCpp, langCsharp, langC, langJava, - langYaml + lang: SourceLanguage const sourceLanguageToStr*: array[SourceLanguage, string] = ["none", - "Nim", "C++", "C#", "C", "Java", "Yaml"] + "Nim", "C++", "C#", "C", "Java", "Yaml", "Python", "Cmd", "Console"] + sourceLanguageToAlpha*: array[SourceLanguage, string] = ["none", + "Nim", "cpp", "csharp", "C", "Java", "Yaml", "Python", "Cmd", "Console"] + ## list of languages spelled with alpabetic characters tokenClassToStr*: array[TokenClass, string] = ["Eof", "None", "Whitespace", "DecNumber", "BinNumber", "HexNumber", "OctNumber", "FloatNumber", "Identifier", "Keyword", "StringLit", "LongStringLit", "CharLit", "EscapeSequence", "Operator", "Punctuation", "Comment", "LongComment", "RegularExpression", "TagStart", "TagEnd", "Key", "Value", "RawData", "Assembler", "Preprocessor", "Directive", "Command", "Rule", "Hyperlink", - "Label", "Reference", "Other"] + "Label", "Reference", "Prompt", "ProgramOutput", + # start from lower-case if there is a corresponding RST role (see rst.nim) + "program", "option", + "Other"] # The following list comes from doc/keywords.txt, make sure it is # synchronized with this array by running the module itself as a test case. @@ -90,9 +116,11 @@ const "xor", "yield"] proc getSourceLanguage*(name: string): SourceLanguage = - for i in countup(succ(low(SourceLanguage)), high(SourceLanguage)): + for i in succ(low(SourceLanguage)) .. high(SourceLanguage): if cmpIgnoreStyle(name, sourceLanguageToStr[i]) == 0: return i + if cmpIgnoreStyle(name, sourceLanguageToAlpha[i]) == 0: + return i result = langNone proc initGeneralTokenizer*(g: var GeneralTokenizer, buf: cstring) = @@ -101,9 +129,8 @@ proc initGeneralTokenizer*(g: var GeneralTokenizer, buf: cstring) = g.start = 0 g.length = 0 g.state = low(TokenClass) - var pos = 0 # skip initial whitespace: - while g.buf[pos] in {' ', '\x09'..'\x0D'}: inc(pos) - g.pos = pos + g.lang = low(SourceLanguage) + g.pos = 0 proc initGeneralTokenizer*(g: var GeneralTokenizer, buf: string) = initGeneralTokenizer(g, cstring(buf)) @@ -161,7 +188,10 @@ const OpChars = {'+', '-', '*', '/', '\\', '<', '>', '!', '?', '^', '.', '|', '=', '%', '&', '$', '@', '~', ':'} -proc nimNextToken(g: var GeneralTokenizer) = +proc isKeyword(x: openArray[string], y: string): int = + binarySearch(x, y) + +proc nimNextToken(g: var GeneralTokenizer, keywords: openArray[string] = @[]) = const hexChars = {'0'..'9', 'A'..'F', 'a'..'f', '_'} octChars = {'0'..'7', '_'} @@ -170,36 +200,38 @@ proc nimNextToken(g: var GeneralTokenizer) = var pos = g.pos g.start = g.pos if g.state == gtStringLit: - g.kind = gtStringLit - while true: + if g.buf[pos] == '\\': + g.kind = gtEscapeSequence + inc(pos) case g.buf[pos] - of '\\': - g.kind = gtEscapeSequence + of 'x', 'X': inc(pos) + if g.buf[pos] in hexChars: inc(pos) + if g.buf[pos] in hexChars: inc(pos) + of '0'..'9': + while g.buf[pos] in {'0'..'9'}: inc(pos) + of '\0': + g.state = gtNone + else: inc(pos) + else: + g.kind = gtStringLit + while true: case g.buf[pos] - of 'x', 'X': + of '\\': + break + of '\0', '\r', '\n': + g.state = gtNone + break + of '\"': inc(pos) - if g.buf[pos] in hexChars: inc(pos) - if g.buf[pos] in hexChars: inc(pos) - of '0'..'9': - while g.buf[pos] in {'0'..'9'}: inc(pos) - of '\0': g.state = gtNone + break else: inc(pos) - break - of '\0', '\x0D', '\x0A': - g.state = gtNone - break - of '\"': - inc(pos) - g.state = gtNone - break - else: inc(pos) else: case g.buf[pos] - of ' ', '\x09'..'\x0D': + of ' ', '\t'..'\r': g.kind = gtWhitespace - while g.buf[pos] in {' ', '\x09'..'\x0D'}: inc(pos) + while g.buf[pos] in {' ', '\t'..'\r'}: inc(pos) of '#': g.kind = gtComment inc(pos) @@ -207,7 +239,7 @@ proc nimNextToken(g: var GeneralTokenizer) = if g.buf[pos] == '#': inc(pos) isDoc = true - if g.buf[pos] == '[': + if g.buf[pos] == '[' and g.lang == langNim: g.kind = gtLongComment var nesting = 0 while true: @@ -236,7 +268,7 @@ proc nimNextToken(g: var GeneralTokenizer) = else: inc pos else: - while g.buf[pos] notin {'\0', '\x0A', '\x0D'}: inc(pos) + while g.buf[pos] notin {'\0', '\n', '\r'}: inc(pos) of 'a'..'z', 'A'..'Z', '_', '\x80'..'\xFF': var id = "" while g.buf[pos] in SymChars + {'_'}: @@ -260,12 +292,15 @@ proc nimNextToken(g: var GeneralTokenizer) = else: g.kind = gtRawData inc(pos) - while not (g.buf[pos] in {'\0', '\x0A', '\x0D'}): + while not (g.buf[pos] in {'\0', '\n', '\r'}): if g.buf[pos] == '"' and g.buf[pos+1] != '"': break inc(pos) if g.buf[pos] == '\"': inc(pos) else: - g.kind = nimGetKeyword(id) + if g.lang == langNim: + g.kind = nimGetKeyword(id) + elif isKeyword(keywords, id) >= 0: + g.kind = gtKeyword of '0': inc(pos) case g.buf[pos] @@ -289,17 +324,18 @@ proc nimNextToken(g: var GeneralTokenizer) = pos = nimNumber(g, pos) of '\'': inc(pos) - g.kind = gtCharLit - while true: - case g.buf[pos] - of '\0', '\x0D', '\x0A': - break - of '\'': - inc(pos) - break - of '\\': - inc(pos, 2) - else: inc(pos) + if g.kind != gtPunctuation: + g.kind = gtCharLit + while true: + case g.buf[pos] + of '\0', '\r', '\n': + break + of '\'': + inc(pos) + break + of '\\': + inc(pos, 2) + else: inc(pos) of '\"': inc(pos) if (g.buf[pos] == '\"') and (g.buf[pos + 1] == '\"'): @@ -320,7 +356,7 @@ proc nimNextToken(g: var GeneralTokenizer) = g.kind = gtStringLit while true: case g.buf[pos] - of '\0', '\x0D', '\x0A': + of '\0', '\r', '\n': break of '\"': inc(pos) @@ -394,12 +430,6 @@ proc generalStrLit(g: var GeneralTokenizer, position: int): int = inc(pos) result = pos -proc isKeyword(x: openArray[string], y: string): int = - binarySearch(x, y) - -proc isKeywordIgnoreCase(x: openArray[string], y: string): int = - binarySearch(x, y, cmpIgnoreCase) - type TokenizerFlag = enum hasPreprocessor, hasNestedComments @@ -432,7 +462,7 @@ proc clikeNextToken(g: var GeneralTokenizer, keywords: openArray[string], g.state = gtNone else: inc(pos) break - of '\0', '\x0D', '\x0A': + of '\0', '\r', '\n': g.state = gtNone break of '\"': @@ -442,14 +472,14 @@ proc clikeNextToken(g: var GeneralTokenizer, keywords: openArray[string], else: inc(pos) else: case g.buf[pos] - of ' ', '\x09'..'\x0D': + of ' ', '\t'..'\r': g.kind = gtWhitespace - while g.buf[pos] in {' ', '\x09'..'\x0D'}: inc(pos) + while g.buf[pos] in {' ', '\t'..'\r'}: inc(pos) of '/': inc(pos) if g.buf[pos] == '/': g.kind = gtComment - while not (g.buf[pos] in {'\0', '\x0A', '\x0D'}): inc(pos) + while not (g.buf[pos] in {'\0', '\n', '\r'}): inc(pos) elif g.buf[pos] == '*': g.kind = gtLongComment var nested = 0 @@ -469,6 +499,9 @@ proc clikeNextToken(g: var GeneralTokenizer, keywords: openArray[string], of '\0': break else: inc(pos) + else: + g.kind = gtOperator + while g.buf[pos] in OpChars: inc(pos) of '#': inc(pos) if hasPreprocessor in flags: @@ -589,9 +622,9 @@ proc javaNextToken(g: var GeneralTokenizer) = proc yamlPlainStrLit(g: var GeneralTokenizer, pos: var int) = g.kind = gtStringLit - while g.buf[pos] notin {'\0', '\x09'..'\x0D', ',', ']', '}'}: + while g.buf[pos] notin {'\0', '\t'..'\r', ',', ']', '}'}: if g.buf[pos] == ':' and - g.buf[pos + 1] in {'\0', '\x09'..'\x0D', ' '}: + g.buf[pos + 1] in {'\0', '\t'..'\r', ' '}: break inc(pos) @@ -604,14 +637,14 @@ proc yamlPossibleNumber(g: var GeneralTokenizer, pos: var int) = while g.buf[pos] in {'0'..'9'}: inc(pos) else: yamlPlainStrLit(g, pos) if g.kind == gtNone: - if g.buf[pos] in {'\0', '\x09'..'\x0D', ' ', ',', ']', '}'}: + if g.buf[pos] in {'\0', '\t'..'\r', ' ', ',', ']', '}'}: g.kind = gtDecNumber elif g.buf[pos] == '.': inc(pos) if g.buf[pos] notin {'0'..'9'}: yamlPlainStrLit(g, pos) else: while g.buf[pos] in {'0'..'9'}: inc(pos) - if g.buf[pos] in {'\0', '\x09'..'\x0D', ' ', ',', ']', '}'}: + if g.buf[pos] in {'\0', '\t'..'\r', ' ', ',', ']', '}'}: g.kind = gtFloatNumber if g.kind == gtNone: if g.buf[pos] in {'e', 'E'}: @@ -620,13 +653,13 @@ proc yamlPossibleNumber(g: var GeneralTokenizer, pos: var int) = if g.buf[pos] notin {'0'..'9'}: yamlPlainStrLit(g, pos) else: while g.buf[pos] in {'0'..'9'}: inc(pos) - if g.buf[pos] in {'\0', '\x09'..'\x0D', ' ', ',', ']', '}'}: + if g.buf[pos] in {'\0', '\t'..'\r', ' ', ',', ']', '}'}: g.kind = gtFloatNumber else: yamlPlainStrLit(g, pos) else: yamlPlainStrLit(g, pos) - while g.buf[pos] notin {'\0', ',', ']', '}', '\x0A', '\x0D'}: + while g.buf[pos] notin {'\0', ',', ']', '}', '\n', '\r'}: inc(pos) - if g.buf[pos] notin {'\x09'..'\x0D', ' ', ',', ']', '}'}: + if g.buf[pos] notin {'\t'..'\r', ' ', ',', ']', '}'}: yamlPlainStrLit(g, pos) break # theoretically, we would need to parse indentation (like with block scalars) @@ -651,19 +684,16 @@ proc yamlNextToken(g: var GeneralTokenizer) = of 'x': inc(pos) for i in 1..2: - {.unroll.} if g.buf[pos] in hexChars: inc(pos) break of 'u': inc(pos) for i in 1..4: - {.unroll.} if g.buf[pos] in hexChars: inc(pos) break of 'U': inc(pos) for i in 1..8: - {.unroll.} if g.buf[pos] in hexChars: inc(pos) break else: inc(pos) @@ -698,13 +728,13 @@ proc yamlNextToken(g: var GeneralTokenizer) = while g.buf[pos] in {' ', '\t'}: inc(pos) of '#': g.kind = gtComment - while g.buf[pos] notin {'\0', '\x0A', '\x0D'}: inc(pos) - of '\x0A', '\x0D': discard + while g.buf[pos] notin {'\0', '\n', '\r'}: inc(pos) + of '\n', '\r': discard else: # illegal here. just don't parse a block scalar g.kind = gtNone g.state = gtOther - if g.buf[pos] in {'\x0A', '\x0D'} and g.state == gtCommand: + if g.buf[pos] in {'\n', '\r'} and g.state == gtCommand: g.state = gtLongStringLit elif g.state == gtLongStringLit: # beware, this is the only token where we actually have to parse @@ -713,10 +743,10 @@ proc yamlNextToken(g: var GeneralTokenizer) = g.kind = gtLongStringLit # first, we have to find the parent indentation of the block scalar, so that # we know when to stop - assert g.buf[pos] in {'\x0A', '\x0D'} + assert g.buf[pos] in {'\n', '\r'} var lookbehind = pos - 1 var headerStart = -1 - while lookbehind >= 0 and g.buf[lookbehind] notin {'\x0A', '\x0D'}: + while lookbehind >= 0 and g.buf[lookbehind] notin {'\n', '\r'}: if headerStart == -1 and g.buf[lookbehind] in {'|', '>'}: headerStart = lookbehind dec(lookbehind) @@ -727,12 +757,12 @@ proc yamlNextToken(g: var GeneralTokenizer) = # when the header is alone in a line, this line does not show the parent's # indentation, so we must go further. search the first previous line with # non-whitespace content. - while lookbehind >= 0 and g.buf[lookbehind] in {'\x0A', '\x0D'}: + while lookbehind >= 0 and g.buf[lookbehind] in {'\n', '\r'}: dec(lookbehind) while lookbehind >= 0 and g.buf[lookbehind] in {' ', '\t'}: dec(lookbehind) # now, find the beginning of the line... - while lookbehind >= 0 and g.buf[lookbehind] notin {'\x0A', '\x0D'}: + while lookbehind >= 0 and g.buf[lookbehind] notin {'\n', '\r'}: dec(lookbehind) # ... and its indentation indentation = 1 @@ -740,7 +770,7 @@ proc yamlNextToken(g: var GeneralTokenizer) = if lookbehind == -1: indentation = 0 # top level elif g.buf[lookbehind + 1] == '-' and g.buf[lookbehind + 2] == '-' and g.buf[lookbehind + 3] == '-' and - g.buf[lookbehind + 4] in {'\x09'..'\x0D', ' '}: + g.buf[lookbehind + 4] in {'\t'..'\r', ' '}: # this is a document start, therefore, we are at top level indentation = 0 # because lookbehind was at newline char when calculating indentation, we're @@ -748,7 +778,7 @@ proc yamlNextToken(g: var GeneralTokenizer) = let parentIndentation = indentation - 1 # find first content - while g.buf[pos] in {' ', '\x0A', '\x0D'}: + while g.buf[pos] in {' ', '\n', '\r'}: if g.buf[pos] == ' ': inc(indentation) else: indentation = 0 inc(pos) @@ -765,12 +795,12 @@ proc yamlNextToken(g: var GeneralTokenizer) = if (indentation < minIndentation and g.buf[pos] == '#') or (indentation == 0 and g.buf[pos] == '.' and g.buf[pos + 1] == '.' and g.buf[pos + 2] == '.' and - g.buf[pos + 3] in {'\0', '\x09'..'\x0D', ' '}): + g.buf[pos + 3] in {'\0', '\t'..'\r', ' '}): # comment after end of block scalar, or end of document break minIndentation = min(indentation, minIndentation) - while g.buf[pos] notin {'\0', '\x0A', '\x0D'}: inc(pos) - while g.buf[pos] in {' ', '\x0A', '\x0D'}: + while g.buf[pos] notin {'\0', '\n', '\r'}: inc(pos) + while g.buf[pos] in {' ', '\n', '\r'}: if g.buf[pos] == ' ': inc(indentation) else: indentation = 0 inc(pos) @@ -779,30 +809,29 @@ proc yamlNextToken(g: var GeneralTokenizer) = elif g.state == gtOther: # gtOther means 'inside YAML document' case g.buf[pos] - of ' ', '\x09'..'\x0D': + of ' ', '\t'..'\r': g.kind = gtWhitespace - while g.buf[pos] in {' ', '\x09'..'\x0D'}: inc(pos) + while g.buf[pos] in {' ', '\t'..'\r'}: inc(pos) of '#': g.kind = gtComment inc(pos) - while g.buf[pos] notin {'\0', '\x0A', '\x0D'}: inc(pos) + while g.buf[pos] notin {'\0', '\n', '\r'}: inc(pos) of '-': inc(pos) - if g.buf[pos] in {'\0', ' ', '\x09'..'\x0D'}: + if g.buf[pos] in {'\0', ' ', '\t'..'\r'}: g.kind = gtPunctuation elif g.buf[pos] == '-' and - (pos == 1 or g.buf[pos - 2] in {'\x0A', '\x0D'}): # start of line + (pos == 1 or g.buf[pos - 2] in {'\n', '\r'}): # start of line inc(pos) - if g.buf[pos] == '-' and g.buf[pos + 1] in {'\0', '\x09'..'\x0D', ' '}: + if g.buf[pos] == '-' and g.buf[pos + 1] in {'\0', '\t'..'\r', ' '}: inc(pos) g.kind = gtKeyword else: yamlPossibleNumber(g, pos) else: yamlPossibleNumber(g, pos) of '.': - if pos == 0 or g.buf[pos - 1] in {'\x0A', '\x0D'}: + if pos == 0 or g.buf[pos - 1] in {'\n', '\r'}: inc(pos) for i in 1..2: - {.unroll.} if g.buf[pos] != '.': break inc(pos) if pos == g.start + 3: @@ -812,12 +841,12 @@ proc yamlNextToken(g: var GeneralTokenizer) = else: yamlPlainStrLit(g, pos) of '?': inc(pos) - if g.buf[pos] in {'\0', ' ', '\x09'..'\x0D'}: + if g.buf[pos] in {'\0', ' ', '\t'..'\r'}: g.kind = gtPunctuation else: yamlPlainStrLit(g, pos) of ':': inc(pos) - if g.buf[pos] in {'\0', '\x09'..'\x0D', ' ', '\'', '\"'} or + if g.buf[pos] in {'\0', '\t'..'\r', ' ', '\'', '\"'} or (pos > 0 and g.buf[pos - 2] in {'}', ']', '\"', '\''}): g.kind = gtPunctuation else: yamlPlainStrLit(g, pos) @@ -836,7 +865,7 @@ proc yamlNextToken(g: var GeneralTokenizer) = inc(pos) if g.buf[pos] == '<': # literal tag (e.g. `!<tag:yaml.org,2002:str>`) - while g.buf[pos] notin {'\0', '>', '\x09'..'\x0D', ' '}: inc(pos) + while g.buf[pos] notin {'\0', '>', '\t'..'\r', ' '}: inc(pos) if g.buf[pos] == '>': inc(pos) else: while g.buf[pos] in {'A'..'Z', 'a'..'z', '0'..'9', '-'}: inc(pos) @@ -845,17 +874,17 @@ proc yamlNextToken(g: var GeneralTokenizer) = # prefixed tag (e.g. `!!str`) inc(pos) while g.buf[pos] notin - {'\0', '\x09'..'\x0D', ' ', ',', '[', ']', '{', '}'}: inc(pos) - of '\0', '\x09'..'\x0D', ' ': discard + {'\0', '\t'..'\r', ' ', ',', '[', ']', '{', '}'}: inc(pos) + of '\0', '\t'..'\r', ' ': discard else: # local tag (e.g. `!nim:system:int`) - while g.buf[pos] notin {'\0', '\x09'..'\x0D', ' '}: inc(pos) + while g.buf[pos] notin {'\0', '\t'..'\r', ' '}: inc(pos) of '&': g.kind = gtLabel - while g.buf[pos] notin {'\0', '\x09'..'\x0D', ' '}: inc(pos) + while g.buf[pos] notin {'\0', '\t'..'\r', ' '}: inc(pos) of '*': g.kind = gtReference - while g.buf[pos] notin {'\0', '\x09'..'\x0D', ' '}: inc(pos) + while g.buf[pos] notin {'\0', '\t'..'\r', ' '}: inc(pos) of '|', '>': # this can lead to incorrect tokenization when | or > appear inside flow # content. checking whether we're inside flow content is not @@ -871,18 +900,18 @@ proc yamlNextToken(g: var GeneralTokenizer) = # outside document case g.buf[pos] of '%': - if pos == 0 or g.buf[pos - 1] in {'\x0A', '\x0D'}: + if pos == 0 or g.buf[pos - 1] in {'\n', '\r'}: g.kind = gtDirective - while g.buf[pos] notin {'\0', '\x0A', '\x0D'}: inc(pos) + while g.buf[pos] notin {'\0', '\n', '\r'}: inc(pos) else: g.state = gtOther yamlPlainStrLit(g, pos) - of ' ', '\x09'..'\x0D': + of ' ', '\t'..'\r': g.kind = gtWhitespace - while g.buf[pos] in {' ', '\x09'..'\x0D'}: inc(pos) + while g.buf[pos] in {' ', '\t'..'\r'}: inc(pos) of '#': g.kind = gtComment - while g.buf[pos] notin {'\0', '\x0A', '\x0D'}: inc(pos) + while g.buf[pos] notin {'\0', '\n', '\r'}: inc(pos) of '\0': g.kind = gtEof else: g.kind = gtNone @@ -890,7 +919,86 @@ proc yamlNextToken(g: var GeneralTokenizer) = g.length = pos - g.pos g.pos = pos +proc pythonNextToken(g: var GeneralTokenizer) = + const + keywords: array[0..34, string] = [ + "False", "None", "True", "and", "as", "assert", "async", "await", + "break", "class", "continue", "def", "del", "elif", "else", "except", + "finally", "for", "from", "global", "if", "import", "in", "is", "lambda", + "nonlocal", "not", "or", "pass", "raise", "return", "try", "while", + "with", "yield"] + nimNextToken(g, keywords) + +proc cmdNextToken(g: var GeneralTokenizer, dollarPrompt = false) = + var pos = g.pos + g.start = g.pos + if g.state == low(TokenClass): + g.state = if dollarPrompt: gtPrompt else: gtProgram + case g.buf[pos] + of ' ', '\t'..'\r': + g.kind = gtWhitespace + while g.buf[pos] in {' ', '\t'..'\r'}: + if g.buf[pos] == '\n': + g.state = if dollarPrompt: gtPrompt else: gtProgram + inc(pos) + of '\'', '"': + g.kind = gtOption + let q = g.buf[pos] + inc(pos) + while g.buf[pos] notin {q, '\0'}: + inc(pos) + if g.buf[pos] == q: inc(pos) + of '#': + g.kind = gtComment + while g.buf[pos] notin {'\n', '\0'}: + inc(pos) + of '&', '|': + g.kind = gtOperator + inc(pos) + if g.buf[pos] == g.buf[pos-1]: inc(pos) + g.state = gtProgram + of '(': + g.kind = gtOperator + g.state = gtProgram + inc(pos) + of ')': + g.kind = gtOperator + inc(pos) + of ';': + g.state = gtProgram + g.kind = gtOperator + inc(pos) + of '\0': g.kind = gtEof + elif dollarPrompt and g.state == gtPrompt: + if g.buf[pos] == '$' and g.buf[pos+1] in {' ', '\t'}: + g.kind = gtPrompt + inc pos, 2 + g.state = gtProgram + else: + g.kind = gtProgramOutput + while g.buf[pos] notin {'\n', '\0'}: + inc(pos) + else: + if g.state == gtProgram: + g.kind = gtProgram + g.state = gtOption + else: + g.kind = gtOption + while g.buf[pos] notin {' ', '\t'..'\r', '&', '|', '(', ')', '\'', '"', '\0'}: + if g.buf[pos] == ';' and g.buf[pos+1] == ' ': + # (check space because ';' can be used inside arguments in Win bat) + break + if g.kind == gtOption and g.buf[pos] in {'/', '\\', '.'}: + g.kind = gtIdentifier # for file/dir name + elif g.kind == gtProgram and g.buf[pos] == '=': + g.kind = gtIdentifier # for env variable setting at beginning of line + g.state = gtProgram + inc(pos) + g.length = pos - g.pos + g.pos = pos + proc getNextToken*(g: var GeneralTokenizer, lang: SourceLanguage) = + g.lang = lang case lang of langNone: assert false of langNim: nimNextToken(g) @@ -899,13 +1007,28 @@ proc getNextToken*(g: var GeneralTokenizer, lang: SourceLanguage) = of langC: cNextToken(g) of langJava: javaNextToken(g) of langYaml: yamlNextToken(g) + of langPython: pythonNextToken(g) + of langCmd: cmdNextToken(g) + of langConsole: cmdNextToken(g, dollarPrompt=true) + +proc tokenize*(text: string, lang: SourceLanguage): seq[(string, TokenClass)] = + var g: GeneralTokenizer + initGeneralTokenizer(g, text) + var prevPos = 0 + while true: + getNextToken(g, lang) + if g.kind == gtEof: + break + var s = text[prevPos ..< g.pos] + result.add (s, g.kind) + prevPos = g.pos when isMainModule: var keywords: seq[string] # Try to work running in both the subdir or at the root. for filename in ["doc/keywords.txt", "../../../doc/keywords.txt"]: try: - let input = string(readFile(filename)) + let input = readFile(filename) keywords = input.splitWhitespace() break except: diff --git a/lib/packages/docutils/rst.nim b/lib/packages/docutils/rst.nim index 05b58c56b..706c50689 100644 --- a/lib/packages/docutils/rst.nim +++ b/lib/packages/docutils/rst.nim @@ -7,23 +7,46 @@ # distribution, for details about the copyright. # -## This module implements a `reStructuredText`:idx: parser. A large -## subset is implemented. Some features of the `markdown`:idx: wiki syntax are -## also supported. +## This module implements a `reStructuredText`:idx: (RST) and +## `Markdown`:idx: parser. +## User's manual on supported markup syntax and command line usage can be +## found in [Nim-flavored Markdown and reStructuredText](markdown_rst.html). ## -## **Note:** Import ``packages/docutils/rst`` to use this module +## * See also [Nim DocGen Tools Guide](docgen.html) for handling of +## ``.nim`` files. +## * See also [packages/docutils/rstgen module](rstgen.html) to know how to +## generate HTML or Latex strings (for embedding them into custom documents). +## +## Choice between Markdown and RST as well as optional additional features are +## turned on by passing ``options:`` [RstParseOptions] to [proc rstParse]. import - os, strutils, rstast + std/[os, strutils, enumutils, algorithm, lists, sequtils, + tables, strscans] +import dochelpers, rstidx, rstast +import std/private/miscdollars +from highlite import SourceLanguage, getSourceLanguage + +when defined(nimPreviewSlimSystem): + import std/[assertions, syncio] + type RstParseOption* = enum ## options for the RST parser - roSkipPounds, ## skip ``#`` at line beginning (documentation - ## embedded in Nim comments) roSupportSmilies, ## make the RST parser support smilies like ``:)`` roSupportRawDirective, ## support the ``raw`` directive (don't support ## it for sandboxing) - roSupportMarkdown ## support additional features of markdown + roSupportMarkdown, ## support additional features of Markdown + roPreferMarkdown, ## parse as Markdown (keeping RST as "extension" + ## to Markdown) -- implies `roSupportMarkdown` + roNimFile ## set for Nim files where default interpreted + ## text role should be :nim: + roSandboxDisabled ## this option enables certain options + ## (e.g. raw, include, importdoc) + ## which are disabled by default as they can + ## enable users to read arbitrary data and + ## perform XSS if the parser is used in a web + ## app. RstParseOptions* = set[RstParseOption] @@ -32,39 +55,41 @@ type mcWarning = "Warning", mcError = "Error" + # keep the order in sync with compiler/docgen.nim and compiler/lineinfos.nim: MsgKind* = enum ## the possible messages - meCannotOpenFile, - meExpected, - meGridTableNotImplemented, - meNewSectionExpected, - meGeneralParseError, - meInvalidDirective, - mwRedefinitionOfLabel, - mwUnknownSubstitution, - mwUnsupportedLanguage, - mwUnsupportedField + meCannotOpenFile = "cannot open '$1'", + meExpected = "'$1' expected", + meMissingClosing = "$1", + meGridTableNotImplemented = "grid table is not implemented", + meMarkdownIllformedTable = "illformed delimiter row of a Markdown table", + meIllformedTable = "Illformed table: $1", + meNewSectionExpected = "new section expected $1", + meGeneralParseError = "general parse error", + meInvalidDirective = "invalid directive: '$1'", + meInvalidField = "invalid field: $1", + meFootnoteMismatch = "mismatch in number of footnotes and their refs: $1", + mwRedefinitionOfLabel = "redefinition of label '$1'", + mwUnknownSubstitution = "unknown substitution '$1'", + mwAmbiguousLink = "ambiguous doc link $1", + mwBrokenLink = "broken link '$1'", + mwUnsupportedLanguage = "language '$1' not supported", + mwUnsupportedField = "field '$1' not supported", + mwRstStyle = "RST style: $1", + mwUnusedImportdoc = "importdoc for '$1' is not used", + meSandboxedDirective = "disabled directive: '$1'", MsgHandler* = proc (filename: string, line, col: int, msgKind: MsgKind, arg: string) {.closure, gcsafe.} ## what to do in case of an error FindFileHandler* = proc (filename: string): string {.closure, gcsafe.} - -const - messages: array[MsgKind, string] = [ - meCannotOpenFile: "cannot open '$1'", - meExpected: "'$1' expected", - meGridTableNotImplemented: "grid table is not implemented", - meNewSectionExpected: "new section expected", - meGeneralParseError: "general parse error", - meInvalidDirective: "invalid directive: '$1'", - mwRedefinitionOfLabel: "redefinition of label '$1'", - mwUnknownSubstitution: "unknown substitution '$1'", - mwUnsupportedLanguage: "language '$1' not supported", - mwUnsupportedField: "field '$1' not supported" - ] + FindRefFileHandler* = + proc (targetRelPath: string): + tuple[targetPath: string, linkRelPath: string] {.closure, gcsafe.} + ## returns where .html or .idx file should be found by its relative path; + ## `linkRelPath` is a prefix to be added before a link anchor from such file proc rstnodeToRefname*(n: PRstNode): string proc addNodes*(n: PRstNode): string -proc getFieldValue*(n: PRstNode, fieldname: string): string +proc getFieldValue*(n: PRstNode, fieldname: string): string {.gcsafe.} proc getArgument*(n: PRstNode): string # ----------------------------- scanner part -------------------------------- @@ -111,10 +136,19 @@ const ":geek:": "icon_e_geek", ":ugeek:": "icon_e_ugeek" } + SandboxDirAllowlist = [ + "image", "code", "code-block", "admonition", "attention", "caution", + "container", "contents", "danger", "default-role", "error", "figure", + "hint", "important", "index", "note", "role", "tip", "title", "warning"] type TokType = enum - tkEof, tkIndent, tkWhite, tkWord, tkAdornment, tkPunct, tkOther + tkEof, tkIndent, + tkWhite, tkWord, + tkAdornment, # used for chapter adornment, transitions and + # horizontal table borders + tkPunct, # one or many punctuation characters + tkOther Token = object # a RST token kind*: TokType # the type of the token ival*: int # the indentation or parsed integer value @@ -126,7 +160,8 @@ type buf*: cstring bufpos*: int line*, col*, baseIndent*: int - skipPounds*: bool + adornmentLine*: bool + escapeNext*: bool proc getThing(L: var Lexer, tok: var Token, s: set[char]) = tok.kind = tkWord @@ -134,57 +169,82 @@ proc getThing(L: var Lexer, tok: var Token, s: set[char]) = tok.col = L.col var pos = L.bufpos while true: - add(tok.symbol, L.buf[pos]) - inc(pos) + tok.symbol.add(L.buf[pos]) + inc pos if L.buf[pos] notin s: break - inc(L.col, pos - L.bufpos) + inc L.col, pos - L.bufpos L.bufpos = pos -proc getAdornment(L: var Lexer, tok: var Token) = - tok.kind = tkAdornment +proc isCurrentLineAdornment(L: var Lexer): bool = + var pos = L.bufpos + let c = L.buf[pos] + while true: + inc pos + if L.buf[pos] in {'\c', '\l', '\0'}: + break + if c == '+': # grid table + if L.buf[pos] notin {'-', '=', '+'}: + return false + else: # section adornment or table horizontal border + if L.buf[pos] notin {c, ' ', '\t', '\v', '\f'}: + return false + result = true + +proc getPunctAdornment(L: var Lexer, tok: var Token) = + if L.adornmentLine: + tok.kind = tkAdornment + else: + tok.kind = tkPunct tok.line = L.line tok.col = L.col var pos = L.bufpos - var c = L.buf[pos] - while true: - add(tok.symbol, L.buf[pos]) - inc(pos) - if L.buf[pos] != c: break - inc(L.col, pos - L.bufpos) + let c = L.buf[pos] + if not L.escapeNext and (c != '\\' or L.adornmentLine): + while true: + tok.symbol.add(L.buf[pos]) + inc pos + if L.buf[pos] != c: break + elif L.escapeNext: + tok.symbol.add(L.buf[pos]) + inc pos + else: # not L.escapeNext and c == '\\' and not L.adornmentLine + tok.symbol.add '\\' + inc pos + L.escapeNext = true + inc L.col, pos - L.bufpos L.bufpos = pos + if tok.symbol == "\\": tok.kind = tkPunct + # nim extension: standalone \ can not be adornment proc getBracket(L: var Lexer, tok: var Token) = tok.kind = tkPunct tok.line = L.line tok.col = L.col - add(tok.symbol, L.buf[L.bufpos]) + tok.symbol.add(L.buf[L.bufpos]) inc L.col inc L.bufpos proc getIndentAux(L: var Lexer, start: int): int = var pos = start # skip the newline (but include it in the token!) - if L.buf[pos] == '\x0D': - if L.buf[pos + 1] == '\x0A': inc(pos, 2) - else: inc(pos) - elif L.buf[pos] == '\x0A': - inc(pos) - if L.skipPounds: - if L.buf[pos] == '#': inc(pos) - if L.buf[pos] == '#': inc(pos) + if L.buf[pos] == '\r': + if L.buf[pos + 1] == '\n': inc pos, 2 + else: inc pos + elif L.buf[pos] == '\n': + inc pos while true: case L.buf[pos] - of ' ', '\x0B', '\x0C': - inc(pos) - inc(result) - of '\x09': - inc(pos) + of ' ', '\v', '\f': + inc pos + inc result + of '\t': + inc pos result = result - (result mod 8) + 8 else: break # EndOfFile also leaves the loop if L.buf[pos] == '\0': result = 0 - elif (L.buf[pos] == '\x0A') or (L.buf[pos] == '\x0D'): + elif L.buf[pos] == '\n' or L.buf[pos] == '\r': # look at the next line for proper indentation: result = getIndentAux(L, pos) L.bufpos = pos # no need to set back buf @@ -202,22 +262,26 @@ proc getIndent(L: var Lexer, tok: var Token) = proc rawGetTok(L: var Lexer, tok: var Token) = tok.symbol = "" tok.ival = 0 + if L.col == 0: + L.adornmentLine = false var c = L.buf[L.bufpos] case c of 'a'..'z', 'A'..'Z', '\x80'..'\xFF', '0'..'9': getThing(L, tok, SymChars) - of ' ', '\x09', '\x0B', '\x0C': - getThing(L, tok, {' ', '\x09'}) + of ' ', '\t', '\v', '\f': + getThing(L, tok, {' ', '\t'}) tok.kind = tkWhite - if L.buf[L.bufpos] in {'\x0D', '\x0A'}: + if L.buf[L.bufpos] in {'\r', '\n'}: rawGetTok(L, tok) # ignore spaces before \n - of '\x0D', '\x0A': + of '\r', '\n': getIndent(L, tok) + L.adornmentLine = false of '!', '\"', '#', '$', '%', '&', '\'', '*', '+', ',', '-', '.', '/', ':', ';', '<', '=', '>', '?', '@', '\\', '^', '_', '`', '|', '~': - getAdornment(L, tok) - if len(tok.symbol) <= 3: tok.kind = tkPunct + if L.col == 0: + L.adornmentLine = L.isCurrentLineAdornment() + getPunctAdornment(L, tok) of '(', ')', '[', ']', '{', '}': getBracket(L, tok) else: @@ -227,78 +291,174 @@ proc rawGetTok(L: var Lexer, tok: var Token) = tok.kind = tkEof else: tok.kind = tkOther - add(tok.symbol, c) - inc(L.bufpos) - inc(L.col) + tok.symbol.add(c) + inc L.bufpos + inc L.col tok.col = max(tok.col - L.baseIndent, 0) -proc getTokens(buffer: string, skipPounds: bool, tokens: var TokenSeq): int = +proc getTokens(buffer: string, tokens: var TokenSeq) = var L: Lexer - var length = len(tokens) + var length = tokens.len L.buf = cstring(buffer) L.line = 0 # skip UTF-8 BOM - if (L.buf[0] == '\xEF') and (L.buf[1] == '\xBB') and (L.buf[2] == '\xBF'): - inc(L.bufpos, 3) - L.skipPounds = skipPounds - if skipPounds: - if L.buf[L.bufpos] == '#': - inc(L.bufpos) - inc(result) - if L.buf[L.bufpos] == '#': - inc(L.bufpos) - inc(result) - L.baseIndent = 0 - while L.buf[L.bufpos] == ' ': - inc(L.bufpos) - inc(L.baseIndent) - inc(result) + if L.buf[0] == '\xEF' and L.buf[1] == '\xBB' and L.buf[2] == '\xBF': + inc L.bufpos, 3 while true: - inc(length) + inc length setLen(tokens, length) + let toEscape = L.escapeNext rawGetTok(L, tokens[length - 1]) + if toEscape: L.escapeNext = false if tokens[length - 1].kind == tkEof: break if tokens[0].kind == tkWhite: # BUGFIX - tokens[0].ival = len(tokens[0].symbol) + tokens[0].ival = tokens[0].symbol.len tokens[0].kind = tkIndent type - LevelMap = array[char, int] + LevelInfo = object + symbol: char # adornment character + hasOverline: bool # has also overline (besides underline)? + line: int # the last line of this style occurrence + # (for error message) + hasPeers: bool # has headings on the same level of hierarchy? + LiteralBlockKind = enum # RST-style literal blocks after `::` + lbNone, + lbIndentedLiteralBlock, + lbQuotedLiteralBlock + LevelMap = seq[LevelInfo] # Saves for each possible title adornment + # style its level in the current document. + SubstitutionKind = enum + rstSubstitution = "substitution", + hyperlinkAlias = "hyperlink alias", + implicitHyperlinkAlias = "implicitly-generated hyperlink alias" Substitution = object + kind*: SubstitutionKind key*: string value*: PRstNode - - SharedState = object - options: RstParseOptions # parsing options - uLevel, oLevel: int # counters for the section levels + info*: TLineInfo # place where the substitution was defined + AnchorRule = enum + arInternalRst, ## For automatically generated RST anchors (from + ## headings, footnotes, inline internal targets): + ## case-insensitive, 1-space-significant (by RST spec) + arExternalRst, ## For external .nim doc comments or .rst/.md + arNim ## For anchors generated by ``docgen.nim``: Nim-style case + ## sensitivity, etc. (see `proc normalizeNimName`_ for details) + arHyperlink, ## For links with manually set anchors in + ## form `text <pagename.html#anchor>`_ + RstAnchorKind = enum + manualDirectiveAnchor = "manual directive anchor", + manualInlineAnchor = "manual inline anchor", + footnoteAnchor = "footnote anchor", + headlineAnchor = "implicitly-generated headline anchor" + AnchorSubst = object + info: TLineInfo # the file where the anchor was defined + priority: int + case kind: range[arInternalRst .. arNim] + of arInternalRst: + anchorType: RstAnchorKind + target: PRstNode + of arExternalRst: + anchorTypeExt: RstAnchorKind + refnameExt: string + of arNim: + module: FileIndex # anchor's module (generally not the same as file) + tooltip: string # displayed tooltip for Nim-generated anchors + langSym: LangSymbol + refname: string # A reference name that will be inserted directly + # into HTML/Latex. + external: bool + AnchorSubstTable = Table[string, seq[AnchorSubst]] + # use `seq` to account for duplicate anchors + FootnoteType = enum + fnManualNumber, # manually numbered footnote like [3] + fnAutoNumber, # auto-numbered footnote [#] + fnAutoNumberLabel, # auto-numbered with label [#label] + fnAutoSymbol, # auto-symbol footnote [*] + fnCitation # simple text label like [citation2021] + FootnoteSubst = tuple + kind: FootnoteType # discriminator + number: int # valid for fnManualNumber (always) and fnAutoNumber, + # fnAutoNumberLabel after resolveSubs is called + autoNumIdx: int # order of occurrence: fnAutoNumber, fnAutoNumberLabel + autoSymIdx: int # order of occurrence: fnAutoSymbol + label: string # valid for fnAutoNumberLabel + RstFileTable* = object + filenameToIdx*: Table[string, FileIndex] + idxToFilename*: seq[string] + ImportdocInfo = object + used: bool # was this import used? + fromInfo: TLineInfo # place of `.. importdoc::` directive + idxPath: string # full path to ``.idx`` file + linkRelPath: string # prefix before target anchor + title: string # document title obtained from ``.idx`` + RstSharedState = object + options*: RstParseOptions # parsing options + hLevels: LevelMap # hierarchy of heading styles + hTitleCnt: int # =0 if no title, =1 if only main title, + # =2 if both title and subtitle are present + hCurLevel: int # current section level + currRole: string # current interpreted text role + currRoleKind: RstNodeKind # ... and its node kind subs: seq[Substitution] # substitutions - refs: seq[Substitution] # references - underlineToLevel: LevelMap # Saves for each possible title adornment - # character its level in the - # current document. - # This is for single underline adornments. - overlineToLevel: LevelMap # Saves for each possible title adornment - # character its level in the current - # document. - # This is for over-underline adornments. + refs*: seq[Substitution] # references + anchors*: AnchorSubstTable + # internal target substitutions + lineFootnoteNum: seq[TLineInfo] # footnote line, auto numbers .. [#] + lineFootnoteNumRef: seq[TLineInfo] # footnote line, their reference [#]_ + currFootnoteNumRef: int # ... their counter for `resolveSubs` + lineFootnoteSym: seq[TLineInfo] # footnote line, auto symbols .. [*] + lineFootnoteSymRef: seq[TLineInfo] # footnote line, their reference [*]_ + currFootnoteSymRef: int # ... their counter for `resolveSubs` + footnotes: seq[FootnoteSubst] # correspondence b/w footnote label, + # number, order of occurrence msgHandler: MsgHandler # How to handle errors. - findFile: FindFileHandler # How to find files. - - PSharedState = ref SharedState + findFile: FindFileHandler # How to find files for include. + findRefFile: FindRefFileHandler + # How to find files imported by importdoc. + filenames*: RstFileTable # map file name <-> FileIndex (for storing + # file names for warnings after 1st stage) + currFileIdx*: FileIndex # current index in `filenames` + tocPart*: seq[PRstNode] # all the headings of a document + hasToc*: bool + idxImports*: Table[string, ImportdocInfo] + # map `importdoc`ed filename -> it's info + nimFileImported*: bool # Was any ``.nim`` module `importdoc`ed ? + + PRstSharedState* = ref RstSharedState + ManualAnchor = object + alias: string # a (short) name that can substitute the `anchor` + anchor: string # anchor = id = refname + info: TLineInfo RstParser = object of RootObj idx*: int tok*: TokenSeq - s*: PSharedState + s*: PRstSharedState indentStack*: seq[int] - filename*: string - line*, col*: int - hasToc*: bool + line*, col*: int ## initial line/column of whole text or + ## documenation fragment that will be added + ## in case of error/warning reporting to + ## (relative) line/column of the token. + curAnchors*: seq[ManualAnchor] + ## seq to accumulate aliases for anchors: + ## because RST can have >1 alias per 1 anchor EParseError* = object of ValueError + SectionParser = proc (p: var RstParser): PRstNode {.nimcall, gcsafe.} + +const + LineRstInit* = 1 ## Initial line number for standalone RST text + ColRstInit* = 0 ## Initial column number for standalone RST text + ## (Nim global reporting adds ColOffset=1) + ColRstOffset* = 1 ## 1: a replica of ColOffset for internal use + +template currentTok(p: RstParser): Token = p.tok[p.idx] +template prevTok(p: RstParser): Token = p.tok[p.idx - 1] +template nextTok(p: RstParser): Token = p.tok[p.idx + 1] proc whichMsgClass*(k: MsgKind): MsgClass = ## returns which message class `k` belongs to. - case ($k)[1] + case k.symbolName[1] of 'e', 'E': result = mcError of 'w', 'W': result = mcWarning of 'h', 'H': result = mcHint @@ -307,8 +467,10 @@ proc whichMsgClass*(k: MsgKind): MsgClass = proc defaultMsgHandler*(filename: string, line, col: int, msgkind: MsgKind, arg: string) = let mc = msgkind.whichMsgClass - let a = messages[msgkind] % arg - let message = "$1($2, $3) $4: $5" % [filename, $line, $col, $mc, a] + let a = $msgkind % arg + var message: string + toLocation(message, filename, line, col + ColRstOffset) + message.add " $1: $2" % [$mc, a] if mc == mcError: raise newException(EParseError, message) else: writeLine(stdout, message) @@ -316,90 +478,262 @@ proc defaultFindFile*(filename: string): string = if fileExists(filename): result = filename else: result = "" -proc newSharedState(options: RstParseOptions, - findFile: FindFileHandler, - msgHandler: MsgHandler): PSharedState = - new(result) - result.subs = @[] - result.refs = @[] - result.options = options - result.msgHandler = if not isNil(msgHandler): msgHandler else: defaultMsgHandler - result.findFile = if not isNil(findFile): findFile else: defaultFindFile +proc defaultFindRefFile*(filename: string): (string, string) = + (filename, "") + +proc defaultRole(options: RstParseOptions): string = + if roNimFile in options: "nim" else: "literal" + +proc whichRoleAux(sym: string): RstNodeKind = + let r = sym.toLowerAscii + case r + of "idx": result = rnIdx + of "literal": result = rnInlineLiteral + of "strong": result = rnStrongEmphasis + of "emphasis": result = rnEmphasis + of "sub", "subscript": result = rnSub + of "sup", "superscript": result = rnSup + # literal and code are the same in our implementation + of "code": result = rnInlineLiteral + of "program", "option", "tok": result = rnCodeFragment + # c++ currently can be spelled only as cpp, c# only as csharp + elif getSourceLanguage(r) != langNone: + result = rnInlineCode + else: # unknown role + result = rnUnknownRole + +proc len(filenames: RstFileTable): int = filenames.idxToFilename.len + +proc addFilename*(s: PRstSharedState, file1: string): FileIndex = + ## Returns index of filename, adding it if it has not been used before + let nextIdx = s.filenames.len.FileIndex + result = getOrDefault(s.filenames.filenameToIdx, file1, default = nextIdx) + if result == nextIdx: + s.filenames.filenameToIdx[file1] = result + s.filenames.idxToFilename.add file1 + +proc setCurrFilename*(s: PRstSharedState, file1: string) = + s.currFileIdx = addFilename(s, file1) + +proc getFilename(filenames: RstFileTable, fid: FileIndex): string = + doAssert(0 <= fid.int and fid.int < filenames.len, + "incorrect FileIndex $1 (range 0..$2)" % [ + $fid.int, $(filenames.len - 1)]) + result = filenames.idxToFilename[fid.int] + +proc getFilename(s: PRstSharedState, subst: AnchorSubst): string = + getFilename(s.filenames, subst.info.fileIndex) + +proc getModule(s: PRstSharedState, subst: AnchorSubst): string = + result = getFilename(s.filenames, subst.module) + +proc currFilename(s: PRstSharedState): string = + getFilename(s.filenames, s.currFileIdx) + +proc newRstSharedState*(options: RstParseOptions, + filename: string, + findFile: FindFileHandler, + findRefFile: FindRefFileHandler, + msgHandler: MsgHandler, + hasToc: bool): PRstSharedState = + let r = defaultRole(options) + result = PRstSharedState( + currRole: r, + currRoleKind: whichRoleAux(r), + options: options, + msgHandler: if not isNil(msgHandler): msgHandler else: defaultMsgHandler, + findFile: if not isNil(findFile): findFile else: defaultFindFile, + findRefFile: + if not isNil(findRefFile): findRefFile + else: defaultFindRefFile, + hasToc: hasToc + ) + setCurrFilename(result, filename) + +proc curLine(p: RstParser): int = p.line + currentTok(p).line proc findRelativeFile(p: RstParser; filename: string): string = - result = p.filename.splitFile.dir / filename + result = p.s.currFilename.splitFile.dir / filename if not fileExists(result): result = p.s.findFile(filename) proc rstMessage(p: RstParser, msgKind: MsgKind, arg: string) = - p.s.msgHandler(p.filename, p.line + p.tok[p.idx].line, - p.col + p.tok[p.idx].col, msgKind, arg) + p.s.msgHandler(p.s.currFilename, curLine(p), + p.col + currentTok(p).col, msgKind, arg) + +proc rstMessage(s: PRstSharedState, msgKind: MsgKind, arg: string) = + s.msgHandler(s.currFilename, LineRstInit, ColRstInit, msgKind, arg) + +proc rstMessage(s: PRstSharedState, msgKind: MsgKind, arg: string; + line, col: int) = + s.msgHandler(s.currFilename, line, col, msgKind, arg) + +proc rstMessage(s: PRstSharedState, filename: string, msgKind: MsgKind, + arg: string) = + s.msgHandler(filename, LineRstInit, ColRstInit, msgKind, arg) + +proc rstMessage*(filenames: RstFileTable, f: MsgHandler, + info: TLineInfo, msgKind: MsgKind, arg: string) = + ## Print warnings using `info`, i.e. in 2nd-pass warnings for + ## footnotes/substitutions/references or from ``rstgen.nim``. + let file = getFilename(filenames, info.fileIndex) + f(file, info.line.int, info.col.int, msgKind, arg) proc rstMessage(p: RstParser, msgKind: MsgKind, arg: string, line, col: int) = - p.s.msgHandler(p.filename, p.line + line, + p.s.msgHandler(p.s.currFilename, p.line + line, p.col + col, msgKind, arg) proc rstMessage(p: RstParser, msgKind: MsgKind) = - p.s.msgHandler(p.filename, p.line + p.tok[p.idx].line, - p.col + p.tok[p.idx].col, msgKind, - p.tok[p.idx].symbol) + p.s.msgHandler(p.s.currFilename, curLine(p), + p.col + currentTok(p).col, msgKind, + currentTok(p).symbol) + +# Functions `isPureRst` & `stopOrWarn` address differences between +# Markdown and RST: +# * Markdown always tries to continue working. If it is really impossible +# to parse a markup element, its proc just returns `nil` and parsing +# continues for it as for normal text paragraph. +# The downside is that real mistakes/typos are often silently ignored. +# The same applies to legacy `RstMarkdown` mode for nimforum. +# * RST really signals errors. The downside is that it's more intrusive - +# the user must escape special syntax with \ explicitly. +# +# TODO: we need to apply this strategy to all markup elements eventually. + +func isPureRst(p: RstParser): bool = roSupportMarkdown notin p.s.options +func isRst(p: RstParser): bool = roPreferMarkdown notin p.s.options +func isMd(p: RstParser): bool = roPreferMarkdown in p.s.options +func isMd(s: PRstSharedState): bool = roPreferMarkdown in s.options + +proc stopOrWarn(p: RstParser, errorType: MsgKind, arg: string) = + let realMsgKind = if isPureRst(p): errorType else: mwRstStyle + rstMessage(p, realMsgKind, arg) + +proc stopOrWarn(p: RstParser, errorType: MsgKind, arg: string, line, col: int) = + let realMsgKind = if isPureRst(p): errorType else: mwRstStyle + rstMessage(p, realMsgKind, arg, line, col) proc currInd(p: RstParser): int = result = p.indentStack[high(p.indentStack)] proc pushInd(p: var RstParser, ind: int) = - add(p.indentStack, ind) + p.indentStack.add(ind) proc popInd(p: var RstParser) = - if len(p.indentStack) > 1: setLen(p.indentStack, len(p.indentStack) - 1) + if p.indentStack.len > 1: setLen(p.indentStack, p.indentStack.len - 1) + +# Working with indentation in rst.nim +# ----------------------------------- +# +# Every line break has an associated tkIndent. +# The tokenizer writes back the first column of next non-blank line +# in all preceeding tkIndent tokens to the `ival` field of tkIndent. +# +# RST document is separated into body elements (B.E.), every of which +# has a dedicated handler proc (or block of logic when B.E. is a block quote) +# that should follow the next rule: +# Every B.E. handler proc should finish at tkIndent (newline) +# after its B.E. finishes. +# Then its callers (which is `parseSection` or another B.E. handler) +# check for tkIndent ival (without necessity to advance `p.idx`) +# and decide themselves whether they continue processing or also stop. +# +# An example:: +# +# L RST text fragment indentation +# +--------------------+ +# 1 | | <- (empty line at the start of file) no tokens +# 2 |First paragraph. | <- tkIndent has ival=0, and next tkWord has col=0 +# 3 | | <- tkIndent has ival=0 +# 4 |* bullet item and | <- tkIndent has ival=0, and next tkPunct has col=0 +# 5 | its continuation | <- tkIndent has ival=2, and next tkWord has col=2 +# 6 | | <- tkIndent has ival=4 +# 7 | Block quote | <- tkIndent has ival=4, and next tkWord has col=4 +# 8 | | <- tkIndent has ival=0 +# 9 | | <- tkIndent has ival=0 +# 10|Final paragraph | <- tkIndent has ival=0, and tkWord has col=0 +# +--------------------+ +# C:01234 +# +# Here parser starts with initial `indentStack=[0]` and then calls the +# 1st `parseSection`: +# +# - `parseSection` calls `parseParagraph` and "First paragraph" is parsed +# - bullet list handler is started at reaching ``*`` (L4 C0), it +# starts bullet item logic (L4 C2), which calls `pushInd(p, ind=2)`, +# then calls `parseSection` (2nd call, nested) which parses +# paragraph "bullet list and its continuation" and then starts +# a block quote logic (L7 C4). +# The block quote logic calls calls `pushInd(p, ind=4)` and +# calls `parseSection` again, so a (simplified) sequence of calls now is:: +# +# parseSection -> parseBulletList -> +# parseSection (+block quote logic) -> parseSection +# +# 3rd `parseSection` finishes, block quote logic calls `popInd(p)`, +# it returns to bullet item logic, which sees that next tkIndent has +# ival=0 and stops there since the required indentation for a bullet item +# is 2 and 0<2; the bullet item logic calls `popInd(p)`. +# Then bullet list handler checks that next tkWord (L10 C0) has the +# right indentation but does not have ``*`` so stops at tkIndent (L10). +# - 1st `parseSection` invocation calls `parseParagraph` and the +# "Final paragraph" is parsed. +# +# If a B.E. handler has advanced `p.idx` past tkIndent to check +# whether it should continue its processing or not, and decided not to, +# then this B.E. handler should step back (e.g. do `dec p.idx`). -proc initParser(p: var RstParser, sharedState: PSharedState) = +proc initParser(p: var RstParser, sharedState: PRstSharedState) = p.indentStack = @[0] p.tok = @[] p.idx = 0 - p.filename = "" - p.hasToc = false - p.col = 0 - p.line = 1 + p.col = ColRstInit + p.line = LineRstInit p.s = sharedState proc addNodesAux(n: PRstNode, result: var string) = + if n == nil: + return if n.kind == rnLeaf: - add(result, n.text) + result.add(n.text) else: - for i in countup(0, len(n) - 1): addNodesAux(n.sons[i], result) + for i in 0 ..< n.len: addNodesAux(n.sons[i], result) proc addNodes(n: PRstNode): string = - result = "" - addNodesAux(n, result) + n.addNodesAux(result) + +proc linkName(n: PRstNode): string = + ## Returns a normalized reference name, see: + ## https://docutils.sourceforge.io/docs/ref/rst/restructuredtext.html#reference-names + n.addNodes.toLowerAscii proc rstnodeToRefnameAux(n: PRstNode, r: var string, b: var bool) = template special(s) = if b: - add(r, '-') + r.add('-') b = false - add(r, s) + r.add(s) if n == nil: return if n.kind == rnLeaf: - for i in countup(0, len(n.text) - 1): + for i in 0 ..< n.text.len: case n.text[i] of '0'..'9': if b: - add(r, '-') + r.add('-') b = false - if len(r) == 0: add(r, 'Z') - add(r, n.text[i]) + if r.len == 0: r.add('Z') + r.add(n.text[i]) of 'a'..'z', '\128'..'\255': if b: - add(r, '-') + r.add('-') b = false - add(r, n.text[i]) + r.add(n.text[i]) of 'A'..'Z': if b: - add(r, '-') + r.add('-') b = false - add(r, chr(ord(n.text[i]) - ord('A') + ord('a'))) + r.add(chr(ord(n.text[i]) - ord('A') + ord('a'))) of '$': special "dollar" of '%': special "percent" of '&': special "amp" @@ -420,128 +754,446 @@ proc rstnodeToRefnameAux(n: PRstNode, r: var string, b: var bool) = of '@': special "at" of '|': special "bar" else: - if len(r) > 0: b = true + if r.len > 0: b = true else: - for i in countup(0, len(n) - 1): rstnodeToRefnameAux(n.sons[i], r, b) + for i in 0 ..< n.len: rstnodeToRefnameAux(n.sons[i], r, b) proc rstnodeToRefname(n: PRstNode): string = - result = "" var b = false rstnodeToRefnameAux(n, result, b) -proc findSub(p: var RstParser, n: PRstNode): int = +proc findSub(s: PRstSharedState, n: PRstNode): int = var key = addNodes(n) # the spec says: if no exact match, try one without case distinction: - for i in countup(0, high(p.s.subs)): - if key == p.s.subs[i].key: + for i in countup(0, high(s.subs)): + if key == s.subs[i].key: return i - for i in countup(0, high(p.s.subs)): - if cmpIgnoreStyle(key, p.s.subs[i].key) == 0: + for i in countup(0, high(s.subs)): + if cmpIgnoreStyle(key, s.subs[i].key) == 0: return i result = -1 +proc lineInfo(p: RstParser, iTok: int): TLineInfo = + result.col = int16(p.col + p.tok[iTok].col) + result.line = uint16(p.line + p.tok[iTok].line) + result.fileIndex = p.s.currFileIdx + +proc lineInfo(p: RstParser): TLineInfo = lineInfo(p, p.idx) +# TODO: we need this simplification because we don't preserve exact starting +# token of currently parsed element: +proc prevLineInfo(p: RstParser): TLineInfo = lineInfo(p, p.idx-1) + proc setSub(p: var RstParser, key: string, value: PRstNode) = - var length = len(p.s.subs) - for i in countup(0, length - 1): + var length = p.s.subs.len + for i in 0 ..< length: if key == p.s.subs[i].key: p.s.subs[i].value = value return - setLen(p.s.subs, length + 1) - p.s.subs[length].key = key - p.s.subs[length].value = value + p.s.subs.add(Substitution(key: key, value: value, info: prevLineInfo(p))) -proc setRef(p: var RstParser, key: string, value: PRstNode) = - var length = len(p.s.refs) - for i in countup(0, length - 1): +proc setRef(p: var RstParser, key: string, value: PRstNode, + refType: SubstitutionKind) = + var length = p.s.refs.len + for i in 0 ..< length: if key == p.s.refs[i].key: if p.s.refs[i].value.addNodes != value.addNodes: rstMessage(p, mwRedefinitionOfLabel, key) - p.s.refs[i].value = value return - setLen(p.s.refs, length + 1) - p.s.refs[length].key = key - p.s.refs[length].value = value + p.s.refs.add(Substitution(kind: refType, key: key, value: value, + info: prevLineInfo(p))) + +proc findRef(s: PRstSharedState, key: string): seq[Substitution] = + for i in countup(0, high(s.refs)): + if key == s.refs[i].key: + result.add s.refs[i] + +# Ambiguity in links: we don't follow procedure of removing implicit targets +# defined in https://docutils.sourceforge.io/docs/ref/rst/restructuredtext.html#implicit-hyperlink-targets +# Instead we just give explicit links a higher priority than to implicit ones +# and report ambiguities as warnings. Hopefully it is easy to remove +# ambiguities manually. Nim auto-generated links from ``docgen.nim`` +# have lowest priority: 1 (for procs) and below for other symbol types. + +proc refPriority(k: SubstitutionKind): int = + case k + of rstSubstitution: result = 8 + of hyperlinkAlias: result = 7 + of implicitHyperlinkAlias: result = 2 + +proc internalRefPriority(k: RstAnchorKind): int = + case k + of manualDirectiveAnchor: result = 6 + of manualInlineAnchor: result = 5 + of footnoteAnchor: result = 4 + of headlineAnchor: result = 3 + +proc `$`(subst: AnchorSubst): string = # for debug + let s = + case subst.kind + of arInternalRst: "type=" & $subst.anchorType + of arExternalRst: "type=" & $subst.anchorTypeExt + of arNim: "langsym=" & $subst.langSym + result = "(kind=$1, priority=$2, $3)" % [$subst.kind, $subst.priority, s] + +proc addAnchorRst(p: var RstParser, name: string, target: PRstNode, + anchorType: RstAnchorKind) = + ## Associates node `target` (which has field `anchor`) with an + ## alias `name` and updates the corresponding aliases in `p.curAnchors`. + let prio = internalRefPriority(anchorType) + for a in p.curAnchors: + p.s.anchors.mgetOrPut(a.alias, newSeq[AnchorSubst]()).add( + AnchorSubst(kind: arInternalRst, target: target, priority: prio, + info: a.info, anchorType: manualDirectiveAnchor)) + if name != "": + p.s.anchors.mgetOrPut(name, newSeq[AnchorSubst]()).add( + AnchorSubst(kind: arInternalRst, target: target, priority: prio, + info: prevLineInfo(p), anchorType: anchorType)) + p.curAnchors.setLen 0 + +proc addAnchorExtRst(s: var PRstSharedState, key: string, refn: string, + anchorType: RstAnchorKind, info: TLineInfo) = + let name = key.toLowerAscii + let prio = internalRefPriority(anchorType) + s.anchors.mgetOrPut(name, newSeq[AnchorSubst]()).add( + AnchorSubst(kind: arExternalRst, refnameExt: refn, priority: prio, + info: info, + anchorTypeExt: anchorType)) + +proc addAnchorNim*(s: var PRstSharedState, external: bool, refn: string, tooltip: string, + langSym: LangSymbol, priority: int, + info: TLineInfo, module: FileIndex) = + ## Adds an anchor `refn`, which follows + ## the rule `arNim` (i.e. a symbol in ``*.nim`` file) + s.anchors.mgetOrPut(langSym.name, newSeq[AnchorSubst]()).add( + AnchorSubst(kind: arNim, external: external, refname: refn, langSym: langSym, + tooltip: tooltip, priority: priority, + info: info)) + +proc findMainAnchorNim(s: PRstSharedState, signature: PRstNode, + info: TLineInfo): + seq[AnchorSubst] = + var langSym: LangSymbol + try: + langSym = toLangSymbol(signature) + except ValueError: # parsing failed, not a Nim symbol + return + let substitutions = s.anchors.getOrDefault(langSym.name, + newSeq[AnchorSubst]()) + if substitutions.len == 0: + return + # logic to select only groups instead of concrete symbols + # with overloads, note that the same symbol can be defined + # in multiple modules and `importdoc`ed: + type GroupKey = tuple[symKind: string, origModule: string] + # map (symKind, file) (like "proc", "os.nim") -> found symbols/groups: + var found: Table[GroupKey, seq[AnchorSubst]] + for subst in substitutions: + if subst.kind == arNim: + if match(subst.langSym, langSym): + let key: GroupKey = (subst.langSym.symKind, getModule(s, subst)) + found.mgetOrPut(key, newSeq[AnchorSubst]()).add subst + for key, sList in found: + if sList.len == 1: + result.add sList[0] + else: # > 1, there are overloads, potential ambiguity in this `symKind` + if langSym.parametersProvided: + # there are non-group signatures, select only them + for s in sList: + if not s.langSym.isGroup: + result.add s + else: # when there are many overloads a link like foo_ points to all + # of them, so selecting the group + var foundGroup = false + for s in sList: + if s.langSym.isGroup: + result.add s + foundGroup = true + break + doAssert(foundGroup, + "docgen has not generated the group for $1 (file $2)" % [ + langSym.name, getModule(s, sList[0]) ]) + +proc findMainAnchorRst(s: PRstSharedState, linkText: string, info: TLineInfo): + seq[AnchorSubst] = + let name = linkText.toLowerAscii + let substitutions = s.anchors.getOrDefault(name, newSeq[AnchorSubst]()) + for s in substitutions: + if s.kind in {arInternalRst, arExternalRst}: + result.add s + +proc addFootnoteNumManual(p: var RstParser, num: int) = + ## add manually-numbered footnote + for fnote in p.s.footnotes: + if fnote.number == num: + rstMessage(p, mwRedefinitionOfLabel, $num) + return + p.s.footnotes.add((fnManualNumber, num, -1, -1, $num)) + +proc addFootnoteNumAuto(p: var RstParser, label: string) = + ## add auto-numbered footnote. + ## Empty label [#] means it'll be resolved by the occurrence. + if label == "": # simple auto-numbered [#] + p.s.lineFootnoteNum.add lineInfo(p) + p.s.footnotes.add((fnAutoNumber, -1, p.s.lineFootnoteNum.len, -1, label)) + else: # auto-numbered with label [#label] + for fnote in p.s.footnotes: + if fnote.label == label: + rstMessage(p, mwRedefinitionOfLabel, label) + return + p.s.footnotes.add((fnAutoNumberLabel, -1, -1, -1, label)) + +proc addFootnoteSymAuto(p: var RstParser) = + p.s.lineFootnoteSym.add lineInfo(p) + p.s.footnotes.add((fnAutoSymbol, -1, -1, p.s.lineFootnoteSym.len, "")) + +proc orderFootnotes(s: PRstSharedState) = + ## numerate auto-numbered footnotes taking into account that all + ## manually numbered ones always have preference. + ## Save the result back to `s.footnotes`. + + # Report an error if found any mismatch in number of automatic footnotes + proc listFootnotes(locations: seq[TLineInfo]): string = + var lines: seq[string] + for info in locations: + if s.filenames.len > 1: + let file = getFilename(s.filenames, info.fileIndex) + lines.add file & ":" + else: # no need to add file name here if there is only 1 + lines.add "" + lines[^1].add $info.line + result.add $lines.len & " (lines " & join(lines, ", ") & ")" + if s.lineFootnoteNum.len != s.lineFootnoteNumRef.len: + rstMessage(s, meFootnoteMismatch, + "$1 != $2" % [listFootnotes(s.lineFootnoteNum), + listFootnotes(s.lineFootnoteNumRef)] & + " for auto-numbered footnotes") + if s.lineFootnoteSym.len != s.lineFootnoteSymRef.len: + rstMessage(s, meFootnoteMismatch, + "$1 != $2" % [listFootnotes(s.lineFootnoteSym), + listFootnotes(s.lineFootnoteSymRef)] & + " for auto-symbol footnotes") + + var result: seq[FootnoteSubst] + var manuallyN, autoN, autoSymbol: seq[FootnoteSubst] + for fs in s.footnotes: + if fs.kind == fnManualNumber: manuallyN.add fs + elif fs.kind in {fnAutoNumber, fnAutoNumberLabel}: autoN.add fs + else: autoSymbol.add fs + + if autoN.len == 0: + result = manuallyN + else: + # fill gaps between manually numbered footnotes in ascending order + manuallyN.sort() # sort by number - its first field + var lst = initSinglyLinkedList[FootnoteSubst]() + for elem in manuallyN: lst.append(elem) + var firstAuto = 0 + if lst.head == nil or lst.head.value.number != 1: + # no manual footnote [1], start numeration from 1 for auto-numbered + lst.prepend (autoN[0].kind, 1, autoN[0].autoNumIdx, -1, autoN[0].label) + firstAuto = 1 + var curNode = lst.head + var nextNode: SinglyLinkedNode[FootnoteSubst] + # go simultaneously through `autoN` and `lst` looking for gaps + for (kind, x, autoNumIdx, y, label) in autoN[firstAuto .. ^1]: + while (nextNode = curNode.next; nextNode != nil): + if nextNode.value.number - curNode.value.number > 1: + # gap found, insert new node `n` between curNode and nextNode: + var n = newSinglyLinkedNode((kind, curNode.value.number + 1, + autoNumIdx, -1, label)) + curNode.next = n + n.next = nextNode + curNode = n + break + else: + curNode = nextNode + if nextNode == nil: # no gap found, just append + lst.append (kind, curNode.value.number + 1, autoNumIdx, -1, label) + curNode = lst.tail + result = lst.toSeq + + # we use ASCII symbols instead of those recommended in RST specification: + const footnoteAutoSymbols = ["*", "^", "+", "=", "~", "$", "@", "%", "&"] + for fs in autoSymbol: + # assignment order: *, **, ***, ^, ^^, ^^^, ... &&&, ****, *****, ... + let i = fs.autoSymIdx - 1 + let symbolNum = (i div 3) mod footnoteAutoSymbols.len + let nSymbols = (1 + i mod 3) + 3 * (i div (3 * footnoteAutoSymbols.len)) + let label = footnoteAutoSymbols[symbolNum].repeat(nSymbols) + result.add((fs.kind, -1, -1, fs.autoSymIdx, label)) + + s.footnotes = result + +proc getFootnoteNum(s: PRstSharedState, label: string): int = + ## get number from label. Must be called after `orderFootnotes`. + result = -1 + for fnote in s.footnotes: + if fnote.label == label: + return fnote.number -proc findRef(p: var RstParser, key: string): PRstNode = - for i in countup(0, high(p.s.refs)): - if key == p.s.refs[i].key: - return p.s.refs[i].value +proc getFootnoteNum(s: PRstSharedState, order: int): int = + ## get number from occurrence. Must be called after `orderFootnotes`. + result = -1 + for fnote in s.footnotes: + if fnote.autoNumIdx == order: + return fnote.number + +proc getAutoSymbol(s: PRstSharedState, order: int): string = + ## get symbol from occurrence of auto-symbol footnote. + result = "???" + for fnote in s.footnotes: + if fnote.autoSymIdx == order: + return fnote.label + +proc newRstNodeA(p: var RstParser, kind: RstNodeKind): PRstNode = + ## create node and consume the current anchor + result = newRstNode(kind) + if p.curAnchors.len > 0: + result.anchor = p.curAnchors[0].anchor + addAnchorRst(p, "", result, manualDirectiveAnchor) + +template newLeaf(s: string): PRstNode = newRstLeaf(s) proc newLeaf(p: var RstParser): PRstNode = - result = newRstNode(rnLeaf, p.tok[p.idx].symbol) + result = newLeaf(currentTok(p).symbol) + +proc validRefnamePunct(x: string): bool = + ## https://docutils.sourceforge.io/docs/ref/rst/restructuredtext.html#reference-names + x.len == 1 and x[0] in {'-', '_', '.', ':', '+'} + +func getRefnameIdx(p: RstParser, startIdx: int): int = + ## Gets last token index of a refname ("word" in RST terminology): + ## + ## reference names are single words consisting of alphanumerics plus + ## isolated (no two adjacent) internal hyphens, underscores, periods, + ## colons and plus signs; no whitespace or other characters are allowed. + ## + ## Refnames are used for: + ## - reference names + ## - role names + ## - directive names + ## - footnote labels + ## + # TODO: use this func in all other relevant places + var j = startIdx + if p.tok[j].kind == tkWord: + inc j + while p.tok[j].kind == tkPunct and validRefnamePunct(p.tok[j].symbol) and + p.tok[j+1].kind == tkWord: + inc j, 2 + result = j - 1 + +func getRefname(p: RstParser, startIdx: int): (string, int) = + let lastIdx = getRefnameIdx(p, startIdx) + result[1] = lastIdx + for j in startIdx..lastIdx: + result[0].add p.tok[j].symbol proc getReferenceName(p: var RstParser, endStr: string): PRstNode = var res = newRstNode(rnInner) while true: - case p.tok[p.idx].kind + case currentTok(p).kind of tkWord, tkOther, tkWhite: - add(res, newLeaf(p)) + res.add(newLeaf(p)) of tkPunct: - if p.tok[p.idx].symbol == endStr: - inc(p.idx) + if currentTok(p).symbol == endStr: + inc p.idx break else: - add(res, newLeaf(p)) + res.add(newLeaf(p)) else: rstMessage(p, meExpected, endStr) break - inc(p.idx) + inc p.idx result = res proc untilEol(p: var RstParser): PRstNode = result = newRstNode(rnInner) - while not (p.tok[p.idx].kind in {tkIndent, tkEof}): - add(result, newLeaf(p)) - inc(p.idx) + while currentTok(p).kind notin {tkIndent, tkEof}: + result.add(newLeaf(p)) + inc p.idx proc expect(p: var RstParser, tok: string) = - if p.tok[p.idx].symbol == tok: inc(p.idx) + if currentTok(p).symbol == tok: inc p.idx else: rstMessage(p, meExpected, tok) -proc isInlineMarkupEnd(p: RstParser, markup: string): bool = - result = p.tok[p.idx].symbol == markup - if not result: - return # Rule 3: - result = not (p.tok[p.idx - 1].kind in {tkIndent, tkWhite}) - if not result: - return # Rule 4: - result = (p.tok[p.idx + 1].kind in {tkIndent, tkWhite, tkEof}) or - (p.tok[p.idx + 1].symbol[0] in - {'\'', '\"', ')', ']', '}', '>', '-', '/', '\\', ':', '.', ',', ';', '!', - '?', '_'}) - if not result: - return # Rule 7: +proc inlineMarkdownEnd(p: RstParser): bool = + result = prevTok(p).kind notin {tkIndent, tkWhite} + ## (For a special case of ` we don't allow spaces surrounding it + ## unlike original Markdown because this behavior confusing/useless) + +proc inlineRstEnd(p: RstParser): bool = + # rst rules: https://docutils.sourceforge.io/docs/ref/rst/restructuredtext.html#inline-markup-recognition-rules + # Rule 2: + result = prevTok(p).kind notin {tkIndent, tkWhite} + if not result: return + # Rule 7: + result = nextTok(p).kind in {tkIndent, tkWhite, tkEof} or + nextTok(p).symbol[0] in + {'\'', '\"', ')', ']', '}', '>', '-', '/', '\\', ':', '.', ',', ';', '!', '?', '_'} + +proc isInlineMarkupEnd(p: RstParser, markup: string, exact: bool): bool = + if exact: + result = currentTok(p).symbol == markup + else: + result = currentTok(p).symbol.endsWith markup + if (not result) and markup == "``": + # check that escaping may have splitted `` to 2 tokens ` and ` + result = currentTok(p).symbol == "`" and prevTok(p).symbol == "`" + if not result: return + # surroundings check + if markup in ["_", "__"]: + result = inlineRstEnd(p) + else: + if roPreferMarkdown in p.s.options: result = inlineMarkdownEnd(p) + else: result = inlineRstEnd(p) + +proc rstRuleSurround(p: RstParser): bool = + result = true + # Rules 4 & 5: if p.idx > 0: - if (markup != "``") and (p.tok[p.idx - 1].symbol == "\\"): - result = false + var d: char + var c = prevTok(p).symbol[0] + case c + of '\'', '\"': d = c + of '(': d = ')' + of '[': d = ']' + of '{': d = '}' + of '<': d = '>' + else: d = '\0' + if d != '\0': result = nextTok(p).symbol[0] != d + +proc inlineMarkdownStart(p: RstParser): bool = + result = nextTok(p).kind notin {tkIndent, tkWhite, tkEof} + if not result: return + # this rst rule is really nice, let us use it in Markdown mode too. + result = rstRuleSurround(p) + +proc inlineRstStart(p: RstParser): bool = + ## rst rules: https://docutils.sourceforge.io/docs/ref/rst/restructuredtext.html#inline-markup-recognition-rules + # Rule 6 + result = p.idx == 0 or prevTok(p).kind in {tkIndent, tkWhite} or + prevTok(p).symbol[0] in {'\'', '\"', '(', '[', '{', '<', '-', '/', ':', '_'} + if not result: return + # Rule 1: + result = nextTok(p).kind notin {tkIndent, tkWhite, tkEof} + if not result: return + result = rstRuleSurround(p) proc isInlineMarkupStart(p: RstParser, markup: string): bool = - var d: char - result = p.tok[p.idx].symbol == markup - if not result: - return # Rule 1: - result = (p.idx == 0) or (p.tok[p.idx - 1].kind in {tkIndent, tkWhite}) or - (p.tok[p.idx - 1].symbol[0] in - {'\'', '\"', '(', '[', '{', '<', '-', '/', ':', '_'}) - if not result: - return # Rule 2: - result = not (p.tok[p.idx + 1].kind in {tkIndent, tkWhite, tkEof}) - if not result: - return # Rule 5 & 7: - if p.idx > 0: - if p.tok[p.idx - 1].symbol == "\\": - result = false - else: - var c = p.tok[p.idx - 1].symbol[0] - case c - of '\'', '\"': d = c - of '(': d = ')' - of '[': d = ']' - of '{': d = '}' - of '<': d = '>' - else: d = '\0' - if d != '\0': result = p.tok[p.idx + 1].symbol[0] != d + if markup != "_`": + result = currentTok(p).symbol == markup + else: # _` is a 2 token case + result = currentTok(p).symbol == "_" and nextTok(p).symbol == "`" + if not result: return + # surroundings check + if markup in ["_", "__", "[", "|"]: + # Note: we require space/punctuation even before [markdown link](...) + result = inlineRstStart(p) + else: + if roPreferMarkdown in p.s.options: result = inlineMarkdownStart(p) + else: result = inlineRstStart(p) proc match(p: RstParser, start: int, expr: string): bool = # regular expressions are: @@ -550,97 +1202,159 @@ proc match(p: RstParser, start: int, expr: string): bool = # ' ' tkWhite # 'a' tkAdornment # 'i' tkIndent + # 'I' tkIndent or tkEof # 'p' tkPunct # 'T' always true # 'E' whitespace, indent or eof - # 'e' tkWord or '#' (for enumeration lists) + # 'e' any enumeration sequence or '#' (for enumeration lists) + # 'x' a..z or '#' (for enumeration lists) + # 'n' 0..9 or '#' (for enumeration lists) var i = 0 var j = start - var last = len(expr) - 1 + var last = expr.len - 1 while i <= last: case expr[i] - of 'w': result = p.tok[j].kind == tkWord + of 'w': + let lastIdx = getRefnameIdx(p, j) + result = lastIdx >= j + if result: j = lastIdx of ' ': result = p.tok[j].kind == tkWhite of 'i': result = p.tok[j].kind == tkIndent + of 'I': result = p.tok[j].kind in {tkIndent, tkEof} of 'p': result = p.tok[j].kind == tkPunct of 'a': result = p.tok[j].kind == tkAdornment of 'o': result = p.tok[j].kind == tkOther of 'T': result = true of 'E': result = p.tok[j].kind in {tkEof, tkWhite, tkIndent} - of 'e': - result = (p.tok[j].kind == tkWord) or (p.tok[j].symbol == "#") + of 'e', 'x', 'n': + result = p.tok[j].kind == tkWord or p.tok[j].symbol == "#" if result: case p.tok[j].symbol[0] - of 'a'..'z', 'A'..'Z', '#': result = len(p.tok[j].symbol) == 1 - of '0'..'9': result = allCharsInSet(p.tok[j].symbol, {'0'..'9'}) + of '#': result = true + of 'a'..'z', 'A'..'Z': + result = expr[i] in {'e', 'x'} and p.tok[j].symbol.len == 1 + of '0'..'9': + result = expr[i] in {'e', 'n'} and + allCharsInSet(p.tok[j].symbol, {'0'..'9'}) else: result = false else: var c = expr[i] var length = 0 - while (i <= last) and (expr[i] == c): - inc(i) - inc(length) - dec(i) - result = (p.tok[j].kind in {tkPunct, tkAdornment}) and - (len(p.tok[j].symbol) == length) and (p.tok[j].symbol[0] == c) + while i <= last and expr[i] == c: + inc i + inc length + dec i + result = p.tok[j].kind in {tkPunct, tkAdornment} and + p.tok[j].symbol.len == length and p.tok[j].symbol[0] == c if not result: return - inc(j) - inc(i) + inc j + inc i result = true -proc fixupEmbeddedRef(n, a, b: PRstNode) = +proc safeProtocol*(linkStr: var string): string = + # Returns link's protocol and, if it's not safe, clears `linkStr` + result = "" + if scanf(linkStr, "$w:", result): + # if it has a protocol at all, ensure that it's not 'javascript:' or worse: + if cmpIgnoreCase(result, "http") == 0 or + cmpIgnoreCase(result, "https") == 0 or + cmpIgnoreCase(result, "ftp") == 0: + discard "it's fine" + else: + linkStr = "" + +proc fixupEmbeddedRef(p: var RstParser, n, a, b: PRstNode): bool = + # Returns `true` if the link belongs to an allowed protocol var sep = - 1 - for i in countdown(len(n) - 2, 0): + for i in countdown(n.len - 2, 0): if n.sons[i].text == "<": sep = i break - var incr = if (sep > 0) and (n.sons[sep - 1].text[0] == ' '): 2 else: 1 - for i in countup(0, sep - incr): add(a, n.sons[i]) - for i in countup(sep + 1, len(n) - 2): add(b, n.sons[i]) + var incr = if sep > 0 and n.sons[sep - 1].text[0] == ' ': 2 else: 1 + for i in countup(0, sep - incr): a.add(n.sons[i]) + var linkStr = "" + for i in countup(sep + 1, n.len - 2): linkStr.add(n.sons[i].addNodes) + if linkStr != "": + let protocol = safeProtocol(linkStr) + result = linkStr != "" + if not result: + rstMessage(p, mwBrokenLink, protocol, + p.tok[p.idx-3].line, p.tok[p.idx-3].col) + b.add newLeaf(linkStr) + +proc whichRole(p: RstParser, sym: string): RstNodeKind = + result = whichRoleAux(sym) + if result == rnUnknownRole: + rstMessage(p, mwUnsupportedLanguage, sym) + +proc toInlineCode(n: PRstNode, language: string): PRstNode = + ## Creates rnInlineCode and attaches `n` contents as code (in 3rd son). + result = newRstNode(rnInlineCode, info=n.info) + let args = newRstNode(rnDirArg) + var lang = language + if language == "cpp": lang = "c++" + elif language == "csharp": lang = "c#" + args.add newLeaf(lang) + result.add args + result.add PRstNode(nil) + var lb = newRstNode(rnLiteralBlock) + var s: string + for i in n.sons: + assert i.kind == rnLeaf + s.add i.text + lb.add newLeaf(s) + result.add lb + +proc toOtherRole(n: PRstNode, kind: RstNodeKind, roleName: string): PRstNode = + let newN = newRstNode(rnInner, n.sons) + let newSons = @[newN, newLeaf(roleName)] + result = newRstNode(kind, newSons) proc parsePostfix(p: var RstParser, n: PRstNode): PRstNode = - result = n - if isInlineMarkupEnd(p, "_") or isInlineMarkupEnd(p, "__"): - inc(p.idx) + ## Finalizes node `n` that was tentatively determined as interpreted text. + var newKind = n.kind + var newSons = n.sons + + proc finalizeInterpreted(node: PRstNode, newKind: RstNodeKind, + newSons: seq[PRstNode], roleName: string): + PRstNode {.nimcall.} = + # fixes interpreted text (`x` or `y`:role:) to proper internal AST format + if newKind in {rnUnknownRole, rnCodeFragment}: + result = node.toOtherRole(newKind, roleName) + elif newKind == rnInlineCode: + result = node.toInlineCode(language=roleName) + else: + result = newRstNode(newKind, newSons) + + if isInlineMarkupEnd(p, "_", exact=true) or + isInlineMarkupEnd(p, "__", exact=true): + inc p.idx if p.tok[p.idx-2].symbol == "`" and p.tok[p.idx-3].symbol == ">": var a = newRstNode(rnInner) var b = newRstNode(rnInner) - fixupEmbeddedRef(n, a, b) - if len(a) == 0: - result = newRstNode(rnStandaloneHyperlink) - add(result, b) - else: - result = newRstNode(rnHyperlink) - add(result, a) - add(result, b) - setRef(p, rstnodeToRefname(a), b) - elif n.kind == rnInterpretedText: - n.kind = rnRef - else: - result = newRstNode(rnRef) - add(result, n) + if fixupEmbeddedRef(p, n, a, b): + if a.len == 0: # e.g. `<a_named_relative_link>`_ + newKind = rnStandaloneHyperlink + newSons = @[b] + else: # e.g. `link title <http://site>`_ + newKind = rnHyperlink + newSons = @[a, b] + setRef(p, rstnodeToRefname(a), b, implicitHyperlinkAlias) + else: # include as plain text, not a link + newKind = rnInner + newSons = n.sons + result = newRstNode(newKind, newSons) + else: # some link that will be resolved in `resolveSubs` + newKind = rnRstRef + result = newRstNode(newKind, sons=newSons, info=n.info) elif match(p, p.idx, ":w:"): # a role: - if p.tok[p.idx + 1].symbol == "idx": - n.kind = rnIdx - elif p.tok[p.idx + 1].symbol == "literal": - n.kind = rnInlineLiteral - elif p.tok[p.idx + 1].symbol == "strong": - n.kind = rnStrongEmphasis - elif p.tok[p.idx + 1].symbol == "emphasis": - n.kind = rnEmphasis - elif (p.tok[p.idx + 1].symbol == "sub") or - (p.tok[p.idx + 1].symbol == "subscript"): - n.kind = rnSub - elif (p.tok[p.idx + 1].symbol == "sup") or - (p.tok[p.idx + 1].symbol == "supscript"): - n.kind = rnSup - else: - result = newRstNode(rnGeneralRole) - n.kind = rnInner - add(result, n) - add(result, newRstNode(rnLeaf, p.tok[p.idx + 1].symbol)) - inc(p.idx, 3) + let (roleName, lastIdx) = getRefname(p, p.idx+1) + newKind = whichRole(p, roleName) + result = n.finalizeInterpreted(newKind, newSons, roleName) + p.idx = lastIdx + 2 + else: + result = n.finalizeInterpreted(p.s.currRoleKind, newSons, p.s.currRole) proc matchVerbatim(p: RstParser, start: int, expr: string): int = result = start @@ -652,7 +1366,7 @@ proc matchVerbatim(p: RstParser, start: int, expr: string): int = if j < expr.len: result = 0 proc parseSmiley(p: var RstParser): PRstNode = - if p.tok[p.idx].symbol[0] notin SmileyStartChars: return + if currentTok(p).symbol[0] notin SmileyStartChars: return for key, val in items(Smilies): let m = matchVerbatim(p, p.idx, key) if m > 0: @@ -661,306 +1375,683 @@ proc parseSmiley(p: var RstParser): PRstNode = result.text = val return -when false: - const - urlChars = {'A'..'Z', 'a'..'z', '0'..'9', ':', '#', '@', '%', '/', ';', - '$', '(', ')', '~', '_', '?', '+', '-', '=', '\\', '.', '&', - '\128'..'\255'} - proc isUrl(p: RstParser, i: int): bool = - result = (p.tok[i+1].symbol == ":") and (p.tok[i+2].symbol == "//") and - (p.tok[i+3].kind == tkWord) and - (p.tok[i].symbol in ["http", "https", "ftp", "telnet", "file"]) - -proc parseUrl(p: var RstParser, father: PRstNode) = - #if p.tok[p.idx].symbol[strStart] == '<': - if isUrl(p, p.idx): - var n = newRstNode(rnStandaloneHyperlink) - while true: - case p.tok[p.idx].kind - of tkWord, tkAdornment, tkOther: discard - of tkPunct: - if p.tok[p.idx+1].kind notin {tkWord, tkAdornment, tkOther, tkPunct}: + result = p.tok[i+1].symbol == ":" and p.tok[i+2].symbol == "//" and + p.tok[i+3].kind == tkWord and + p.tok[i].symbol in ["http", "https", "ftp", "telnet", "file"] + +proc checkParen(token: Token, parensStack: var seq[char]): bool {.inline.} = + ## Returns `true` iff `token` is a closing parenthesis for some + ## previous opening parenthesis saved in `parensStack`. + ## This is according Markdown balanced parentheses rule + ## (https://spec.commonmark.org/0.29/#link-destination) + ## to allow links like + ## https://en.wikipedia.org/wiki/APL_(programming_language), + ## we use it for RST also. + result = false + if token.kind == tkPunct: + let c = token.symbol[0] + if c in {'(', '[', '{'}: # push + parensStack.add c + elif c in {')', ']', '}'}: # try pop + # a case like ([) inside a link is allowed and [ is also `pop`ed: + for i in countdown(parensStack.len - 1, 0): + if (parensStack[i] == '(' and c == ')' or + parensStack[i] == '[' and c == ']' or + parensStack[i] == '{' and c == '}'): + parensStack.setLen i + result = true break - else: break - add(n, newLeaf(p)) - inc(p.idx) - add(father, n) + +proc parseUrl(p: var RstParser): PRstNode = + ## https://docutils.sourceforge.io/docs/ref/rst/restructuredtext.html#standalone-hyperlinks + result = newRstNode(rnStandaloneHyperlink) + var lastIdx = p.idx + var closedParenIdx = p.idx - 1 # for balanced parens rule + var parensStack: seq[char] + while p.tok[lastIdx].kind in {tkWord, tkPunct, tkOther}: + let isClosing = checkParen(p.tok[lastIdx], parensStack) + if isClosing: + closedParenIdx = lastIdx + inc lastIdx + dec lastIdx + # standalone URL can not end with punctuation in RST + while lastIdx > closedParenIdx and p.tok[lastIdx].kind == tkPunct and + p.tok[lastIdx].symbol != "/": + dec lastIdx + var s = "" + for i in p.idx .. lastIdx: s.add p.tok[i].symbol + result.add s + p.idx = lastIdx + 1 + +proc parseWordOrRef(p: var RstParser, father: PRstNode) = + ## Parses a normal word or may be a reference or URL. + if nextTok(p).kind != tkPunct: # <- main path, a normal word + father.add newLeaf(p) + inc p.idx + elif isUrl(p, p.idx): # URL http://something + father.add parseUrl(p) else: - var n = newLeaf(p) - inc(p.idx) - if p.tok[p.idx].symbol == "_": n = parsePostfix(p, n) - add(father, n) + # check for reference (probably, long one like some.ref.with.dots_ ) + var saveIdx = p.idx + var reference: PRstNode = nil + inc p.idx + while currentTok(p).kind in {tkWord, tkPunct}: + if currentTok(p).kind == tkPunct: + if isInlineMarkupEnd(p, "_", exact=true): + reference = newRstNode(rnRstRef, info=lineInfo(p, saveIdx)) + break + if not validRefnamePunct(currentTok(p).symbol): + break + inc p.idx + if reference != nil: + for i in saveIdx..p.idx-1: reference.add newLeaf(p.tok[i].symbol) + father.add reference + inc p.idx # skip final _ + else: # 1 normal word + father.add newLeaf(p.tok[saveIdx].symbol) + p.idx = saveIdx + 1 proc parseBackslash(p: var RstParser, father: PRstNode) = - assert(p.tok[p.idx].kind == tkPunct) - if p.tok[p.idx].symbol == "\\\\": - add(father, newRstNode(rnLeaf, "\\")) - inc(p.idx) - elif p.tok[p.idx].symbol == "\\": + assert(currentTok(p).kind == tkPunct) + if currentTok(p).symbol == "\\": # XXX: Unicode? - inc(p.idx) - if p.tok[p.idx].kind != tkWhite: add(father, newLeaf(p)) - if p.tok[p.idx].kind != tkEof: inc(p.idx) + inc p.idx + if currentTok(p).kind != tkWhite: father.add(newLeaf(p)) + if currentTok(p).kind != tkEof: inc p.idx else: - add(father, newLeaf(p)) - inc(p.idx) - -when false: - proc parseAdhoc(p: var RstParser, father: PRstNode, verbatim: bool) = - if not verbatim and isURL(p, p.idx): - var n = newRstNode(rnStandaloneHyperlink) - while true: - case p.tok[p.idx].kind - of tkWord, tkAdornment, tkOther: nil - of tkPunct: - if p.tok[p.idx+1].kind notin {tkWord, tkAdornment, tkOther, tkPunct}: - break - else: break - add(n, newLeaf(p)) - inc(p.idx) - add(father, n) - elif not verbatim and roSupportSmilies in p.sharedState.options: - let n = parseSmiley(p) - if s != nil: - add(father, n) - else: - var n = newLeaf(p) - inc(p.idx) - if p.tok[p.idx].symbol == "_": n = parsePostfix(p, n) - add(father, n) + father.add(newLeaf(p)) + inc p.idx proc parseUntil(p: var RstParser, father: PRstNode, postfix: string, interpretBackslash: bool) = let - line = p.tok[p.idx].line - col = p.tok[p.idx].col + line = currentTok(p).line + col = currentTok(p).col inc p.idx while true: - case p.tok[p.idx].kind + case currentTok(p).kind of tkPunct: - if isInlineMarkupEnd(p, postfix): - inc(p.idx) + if isInlineMarkupEnd(p, postfix, exact=false): + let l = currentTok(p).symbol.len + if l > postfix.len: + # handle cases like *emphasis with stars****. (It's valid RST!) + father.add newLeaf(currentTok(p).symbol[0 ..< l - postfix.len]) + elif postfix == "``" and currentTok(p).symbol == "`" and + prevTok(p).symbol == "`": + # handle cases like ``literal\`` - delete ` already added after \ + father.sons.setLen(father.sons.len - 1) + inc p.idx break - elif interpretBackslash: - parseBackslash(p, father) else: - add(father, newLeaf(p)) - inc(p.idx) + if postfix == "`": + if currentTok(p).symbol == "\\": + if nextTok(p).symbol == "\\": + father.add newLeaf("\\") + father.add newLeaf("\\") + inc p.idx, 2 + elif nextTok(p).symbol == "`": # escape ` + father.add newLeaf("`") + inc p.idx, 2 + else: + father.add newLeaf("\\") + inc p.idx + else: + father.add(newLeaf(p)) + inc p.idx + else: + if interpretBackslash: + parseBackslash(p, father) + else: + father.add(newLeaf(p)) + inc p.idx of tkAdornment, tkWord, tkOther: - add(father, newLeaf(p)) - inc(p.idx) + father.add(newLeaf(p)) + inc p.idx of tkIndent: - add(father, newRstNode(rnLeaf, " ")) - inc(p.idx) - if p.tok[p.idx].kind == tkIndent: + father.add newLeaf(" ") + inc p.idx + if currentTok(p).kind == tkIndent: rstMessage(p, meExpected, postfix, line, col) break of tkWhite: - add(father, newRstNode(rnLeaf, " ")) - inc(p.idx) + father.add newLeaf(" ") + inc p.idx else: rstMessage(p, meExpected, postfix, line, col) +proc parseMarkdownCodeblockFields(p: var RstParser): PRstNode = + ## Parses additional (after language string) code block parameters + ## in a format *suggested* in the `CommonMark Spec`_ with handling of `"`. + if currentTok(p).kind == tkIndent: + result = nil + else: + result = newRstNode(rnFieldList) + while currentTok(p).kind notin {tkIndent, tkEof}: + if currentTok(p).kind == tkWhite: + inc p.idx + else: + let field = newRstNode(rnField) + var fieldName = "" + while currentTok(p).kind notin {tkWhite, tkIndent, tkEof} and + currentTok(p).symbol != "=": + fieldName.add currentTok(p).symbol + inc p.idx + field.add(newRstNode(rnFieldName, @[newLeaf(fieldName)])) + if currentTok(p).kind == tkWhite: inc p.idx + let fieldBody = newRstNode(rnFieldBody) + if currentTok(p).symbol == "=": + inc p.idx + if currentTok(p).kind == tkWhite: inc p.idx + var fieldValue = "" + if currentTok(p).symbol == "\"": + while true: + fieldValue.add currentTok(p).symbol + inc p.idx + if currentTok(p).kind == tkEof: + rstMessage(p, meExpected, "\"") + elif currentTok(p).symbol == "\"": + fieldValue.add "\"" + inc p.idx + break + else: + while currentTok(p).kind notin {tkWhite, tkIndent, tkEof}: + fieldValue.add currentTok(p).symbol + inc p.idx + fieldBody.add newLeaf(fieldValue) + field.add(fieldBody) + result.add(field) + +proc mayLoadFile(p: RstParser, result: var PRstNode) = + var filename = strip(getFieldValue(result, "file"), + chars = Whitespace + {'"'}) + if filename != "": + if roSandboxDisabled notin p.s.options: + let tok = p.tok[p.idx-2] + rstMessage(p, meSandboxedDirective, "file", tok.line, tok.col) + var path = p.findRelativeFile(filename) + if path == "": rstMessage(p, meCannotOpenFile, filename) + var n = newRstNode(rnLiteralBlock) + n.add newLeaf(readFile(path)) + result.sons[2] = n + +proc defaultCodeLangNim(p: RstParser, result: var PRstNode) = + # Create a field block if the input block didn't have any. + if result.sons[1].isNil: result.sons[1] = newRstNode(rnFieldList) + assert result.sons[1].kind == rnFieldList + # Hook the extra field and specify the Nim language as value. + var extraNode = newRstNode(rnField, info=lineInfo(p)) + extraNode.add(newRstNode(rnFieldName)) + extraNode.add(newRstNode(rnFieldBody)) + extraNode.sons[0].add newLeaf("default-language") + extraNode.sons[1].add newLeaf("Nim") + result.sons[1].add(extraNode) + proc parseMarkdownCodeblock(p: var RstParser): PRstNode = + result = newRstNodeA(p, rnCodeBlock) + result.sons.setLen(3) + let line = curLine(p) + let baseCol = currentTok(p).col + let baseSym = currentTok(p).symbol # usually just ``` + inc p.idx + result.info = lineInfo(p) var args = newRstNode(rnDirArg) - if p.tok[p.idx].kind == tkWord: - add(args, newLeaf(p)) - inc(p.idx) + if currentTok(p).kind == tkWord: + args.add(newLeaf(p)) + inc p.idx + result.sons[1] = parseMarkdownCodeblockFields(p) + mayLoadFile(p, result) else: args = nil - var n = newRstNode(rnLeaf, "") + var n = newLeaf("") + var isFirstLine = true while true: - case p.tok[p.idx].kind - of tkEof: - rstMessage(p, meExpected, "```") + if currentTok(p).kind == tkEof: + rstMessage(p, meMissingClosing, + "$1 (started at line $2)" % [baseSym, $line]) break - of tkPunct: - if p.tok[p.idx].symbol == "```": - inc(p.idx) - break - else: - add(n.text, p.tok[p.idx].symbol) - inc(p.idx) + elif nextTok(p).kind in {tkPunct, tkAdornment} and + nextTok(p).symbol[0] == baseSym[0] and + nextTok(p).symbol.len >= baseSym.len: + inc p.idx, 2 + break + elif currentTok(p).kind == tkIndent: + if not isFirstLine: + n.text.add "\n" + if currentTok(p).ival > baseCol: + n.text.add " ".repeat(currentTok(p).ival - baseCol) + elif currentTok(p).ival < baseCol: + rstMessage(p, mwRstStyle, + "unexpected de-indentation in Markdown code block") + inc p.idx else: - add(n.text, p.tok[p.idx].symbol) - inc(p.idx) - var lb = newRstNode(rnLiteralBlock) - add(lb, n) - result = newRstNode(rnCodeBlock) - add(result, args) - add(result, PRstNode(nil)) - add(result, lb) + n.text.add(currentTok(p).symbol) + inc p.idx + isFirstLine = false + result.sons[0] = args + if result.sons[2] == nil: + var lb = newRstNode(rnLiteralBlock) + lb.add(n) + result.sons[2] = lb + if result.sons[0].isNil and roNimFile in p.s.options: + defaultCodeLangNim(p, result) proc parseMarkdownLink(p: var RstParser; father: PRstNode): bool = - result = true - var desc, link = "" + # Parses Markdown link. If it's Pandoc auto-link then its second + # son (target) will be in tokenized format (rnInner with leafs). + var desc = newRstNode(rnInner) var i = p.idx + var parensStack: seq[char] template parse(endToken, dest) = + parensStack.setLen 0 inc i # skip begin token while true: - if p.tok[i].kind in {tkEof, tkIndent}: return false - if p.tok[i].symbol == endToken: break - dest.add p.tok[i].symbol + if p.tok[i].kind == tkEof: return false + if p.tok[i].kind == tkIndent and p.tok[i+1].kind == tkIndent: + return false + let isClosing = checkParen(p.tok[i], parensStack) + if p.tok[i].symbol == endToken and not isClosing: + break + let symbol = if p.tok[i].kind == tkIndent: " " else: p.tok[i].symbol + when dest is string: dest.add symbol + else: dest.add newLeaf(symbol) inc i inc i # skip end token parse("]", desc) - if p.tok[i].symbol != "(": return false - parse(")", link) - let child = newRstNode(rnHyperlink) - child.add desc - child.add link - # only commit if we detected no syntax error: - father.add child + if p.tok[i].symbol == "(": + var link = "" + let linkIdx = i + 1 + parse(")", link) + # only commit if we detected no syntax error: + let protocol = safeProtocol(link) + if link == "": + result = false + rstMessage(p, mwBrokenLink, protocol, + p.tok[linkIdx].line, p.tok[linkIdx].col) + else: + let child = newRstNode(rnHyperlink) + child.add newLeaf(desc.addNodes) + child.add link + father.add child + p.idx = i + result = true + elif roPreferMarkdown in p.s.options: + # Use Pandoc's implicit_header_references extension + var n = newRstNode(rnPandocRef) + if p.tok[i].symbol == "[": + var link = newRstNode(rnInner) + let targetIdx = i + 1 + parse("]", link) + n.add desc + if link.len != 0: # [description][target] + n.add link + n.info = lineInfo(p, targetIdx) + else: # [description=target][] + n.add desc + n.info = lineInfo(p, p.idx + 1) + else: # [description=target] + n.add desc + n.add desc # target is the same as description + n.info = lineInfo(p, p.idx + 1) + father.add n + p.idx = i + result = true + else: + result = false + +proc getRstFootnoteType(label: PRstNode): (FootnoteType, int) = + if label.sons.len >= 1 and label.sons[0].kind == rnLeaf and + label.sons[0].text == "#": + if label.sons.len == 1: + result = (fnAutoNumber, -1) + else: + result = (fnAutoNumberLabel, -1) + elif label.len == 1 and label.sons[0].kind == rnLeaf and + label.sons[0].text == "*": + result = (fnAutoSymbol, -1) + elif label.len == 1 and label.sons[0].kind == rnLeaf: + try: + result = (fnManualNumber, parseInt(label.sons[0].text)) + except ValueError: + result = (fnCitation, -1) + else: + result = (fnCitation, -1) + +proc getMdFootnoteType(label: PRstNode): (FootnoteType, int) = + try: + result = (fnManualNumber, parseInt(label.sons[0].text)) + except ValueError: + result = (fnAutoNumberLabel, -1) + +proc getFootnoteType(s: PRstSharedState, label: PRstNode): (FootnoteType, int) = + ## Returns footnote/citation type and manual number (if present). + if isMd(s): getMdFootnoteType(label) + else: getRstFootnoteType(label) + +proc parseRstFootnoteName(p: var RstParser, reference: bool): PRstNode = + ## parse footnote/citation label. Precondition: start at `[`. + ## Label text should be valid ref. name symbol, otherwise nil is returned. + var i = p.idx + 1 + result = newRstNode(rnInner) + while true: + if p.tok[i].kind in {tkEof, tkIndent, tkWhite}: + return nil + if p.tok[i].kind == tkPunct: + case p.tok[i].symbol: + of "]": + if i > p.idx + 1 and (not reference or (p.tok[i+1].kind == tkPunct and p.tok[i+1].symbol == "_")): + inc i # skip ] + if reference: inc i # skip _ + break # to succeed, it's a footnote/citation indeed + else: + return nil + of "#": + if i != p.idx + 1: + return nil + of "*": + if i != p.idx + 1 and p.tok[i].kind != tkPunct and p.tok[i+1].symbol != "]": + return nil + else: + if not validRefnamePunct(p.tok[i].symbol): + return nil + result.add newLeaf(p.tok[i].symbol) + inc i p.idx = i - result = true + +proc isMdFootnoteName(p: RstParser, reference: bool): bool = + ## Pandoc Markdown footnote extension. + let j = p.idx + result = p.tok[j].symbol == "[" and p.tok[j+1].symbol == "^" and + p.tok[j+2].kind == tkWord + +proc parseMdFootnoteName(p: var RstParser, reference: bool): PRstNode = + if isMdFootnoteName(p, reference): + result = newRstNode(rnInner) + var j = p.idx + 2 + while p.tok[j].kind in {tkWord, tkOther} or + validRefnamePunct(p.tok[j].symbol): + result.add newLeaf(p.tok[j].symbol) + inc j + if j == p.idx + 2: + return nil + if p.tok[j].symbol == "]": + if reference: + p.idx = j + 1 # skip ] + else: + if p.tok[j+1].symbol == ":": + p.idx = j + 2 # skip ]: + else: + result = nil + else: + result = nil + else: + result = nil + +proc parseFootnoteName(p: var RstParser, reference: bool): PRstNode = + if isMd(p): parseMdFootnoteName(p, reference) + else: + if isInlineMarkupStart(p, "["): parseRstFootnoteName(p, reference) + else: nil + +proc isMarkdownCodeBlock(p: RstParser, idx: int): bool = + let tok = p.tok[idx] + template allowedSymbol: bool = + (tok.symbol[0] == '`' or + roPreferMarkdown in p.s.options and tok.symbol[0] == '~') + result = (roSupportMarkdown in p.s.options and + tok.kind in {tkPunct, tkAdornment} and + allowedSymbol and + tok.symbol.len >= 3) + +proc isMarkdownCodeBlock(p: RstParser): bool = + isMarkdownCodeBlock(p, p.idx) proc parseInline(p: var RstParser, father: PRstNode) = - case p.tok[p.idx].kind + var n: PRstNode # to be used in `if` condition + let saveIdx = p.idx + case currentTok(p).kind of tkPunct: if isInlineMarkupStart(p, "***"): var n = newRstNode(rnTripleEmphasis) parseUntil(p, n, "***", true) - add(father, n) + father.add(n) elif isInlineMarkupStart(p, "**"): var n = newRstNode(rnStrongEmphasis) parseUntil(p, n, "**", true) - add(father, n) + father.add(n) elif isInlineMarkupStart(p, "*"): var n = newRstNode(rnEmphasis) parseUntil(p, n, "*", true) - add(father, n) - elif roSupportMarkdown in p.s.options and p.tok[p.idx].symbol == "```": - inc(p.idx) - add(father, parseMarkdownCodeblock(p)) + father.add(n) + elif isInlineMarkupStart(p, "_`"): + var n = newRstNode(rnInlineTarget) + inc p.idx + parseUntil(p, n, "`", false) + n.anchor = rstnodeToRefname(n) + addAnchorRst(p, name = linkName(n), target = n, + anchorType=manualInlineAnchor) + father.add(n) + elif isMarkdownCodeBlock(p): + father.add(parseMarkdownCodeblock(p)) elif isInlineMarkupStart(p, "``"): var n = newRstNode(rnInlineLiteral) parseUntil(p, n, "``", false) - add(father, n) + father.add(n) + elif match(p, p.idx, ":w:") and + (var lastIdx = getRefnameIdx(p, p.idx + 1); + p.tok[lastIdx+2].symbol == "`"): + let (roleName, _) = getRefname(p, p.idx+1) + let k = whichRole(p, roleName) + var n = newRstNode(k) + p.idx = lastIdx + 2 + if k == rnInlineCode: + n = n.toInlineCode(language=roleName) + parseUntil(p, n, "`", false) # bug #17260 + if k in {rnUnknownRole, rnCodeFragment}: + n = n.toOtherRole(k, roleName) + father.add(n) elif isInlineMarkupStart(p, "`"): - var n = newRstNode(rnInterpretedText) - parseUntil(p, n, "`", true) + var n = newRstNode(rnInterpretedText, info=lineInfo(p, p.idx+1)) + parseUntil(p, n, "`", false) # bug #17260 n = parsePostfix(p, n) - add(father, n) + father.add(n) elif isInlineMarkupStart(p, "|"): - var n = newRstNode(rnSubstitutionReferences) + var n = newRstNode(rnSubstitutionReferences, info=lineInfo(p, p.idx+1)) parseUntil(p, n, "|", false) - add(father, n) + father.add(n) + elif currentTok(p).symbol == "[" and nextTok(p).symbol != "[" and + (n = parseFootnoteName(p, reference=true); n != nil): + var nn = newRstNode(rnFootnoteRef) + nn.info = lineInfo(p, saveIdx+1) + nn.add n + let (fnType, _) = getFootnoteType(p.s, n) + case fnType + of fnAutoSymbol: + p.s.lineFootnoteSymRef.add lineInfo(p) + of fnAutoNumber: + p.s.lineFootnoteNumRef.add lineInfo(p) + else: discard + father.add(nn) elif roSupportMarkdown in p.s.options and - p.tok[p.idx].symbol == "[" and p.tok[p.idx+1].symbol != "[" and + currentTok(p).symbol == "[" and nextTok(p).symbol != "[" and parseMarkdownLink(p, father): discard "parseMarkdownLink already processed it" else: if roSupportSmilies in p.s.options: let n = parseSmiley(p) if n != nil: - add(father, n) + father.add(n) return parseBackslash(p, father) of tkWord: if roSupportSmilies in p.s.options: let n = parseSmiley(p) if n != nil: - add(father, n) + father.add(n) return - parseUrl(p, father) + parseWordOrRef(p, father) of tkAdornment, tkOther, tkWhite: + if isMarkdownCodeBlock(p): + father.add(parseMarkdownCodeblock(p)) + return if roSupportSmilies in p.s.options: let n = parseSmiley(p) if n != nil: - add(father, n) + father.add(n) return - add(father, newLeaf(p)) - inc(p.idx) + father.add(newLeaf(p)) + inc p.idx else: discard proc getDirective(p: var RstParser): string = - if p.tok[p.idx].kind == tkWhite and p.tok[p.idx+1].kind == tkWord: - var j = p.idx - inc(p.idx) - result = p.tok[p.idx].symbol - inc(p.idx) - while p.tok[p.idx].kind in {tkWord, tkPunct, tkAdornment, tkOther}: - if p.tok[p.idx].symbol == "::": break - add(result, p.tok[p.idx].symbol) - inc(p.idx) - if p.tok[p.idx].kind == tkWhite: inc(p.idx) - if p.tok[p.idx].symbol == "::": - inc(p.idx) - if (p.tok[p.idx].kind == tkWhite): inc(p.idx) - else: - p.idx = j # set back - result = "" # error - else: - result = "" - -proc parseComment(p: var RstParser): PRstNode = - case p.tok[p.idx].kind - of tkIndent, tkEof: - if p.tok[p.idx].kind != tkEof and p.tok[p.idx + 1].kind == tkIndent: - inc(p.idx) # empty comment - else: - var indent = p.tok[p.idx].ival - while true: - case p.tok[p.idx].kind - of tkEof: - break - of tkIndent: - if (p.tok[p.idx].ival < indent): break - else: - discard - inc(p.idx) + result = "" + if currentTok(p).kind == tkWhite: + let (name, lastIdx) = getRefname(p, p.idx + 1) + let afterIdx = lastIdx + 1 + if name.len > 0: + if p.tok[afterIdx].symbol == "::": + result = name + p.idx = afterIdx + 1 + if currentTok(p).kind == tkWhite: + inc p.idx + elif currentTok(p).kind != tkIndent: + rstMessage(p, mwRstStyle, + "whitespace or newline expected after directive " & name) + result = result.toLowerAscii() + elif p.tok[afterIdx].symbol == ":": + rstMessage(p, mwRstStyle, + "double colon :: may be missing at end of '" & name & "'", + p.tok[afterIdx].line, p.tok[afterIdx].col) + elif p.tok[afterIdx].kind == tkPunct and p.tok[afterIdx].symbol[0] == ':': + rstMessage(p, mwRstStyle, + "too many colons for a directive (should be ::)", + p.tok[afterIdx].line, p.tok[afterIdx].col) + +proc parseComment(p: var RstParser, col: int): PRstNode = + if currentTok(p).kind != tkEof and nextTok(p).kind == tkIndent: + inc p.idx # empty comment else: - while p.tok[p.idx].kind notin {tkIndent, tkEof}: inc(p.idx) + while currentTok(p).kind != tkEof: + if currentTok(p).kind == tkIndent and currentTok(p).ival > col or + currentTok(p).kind != tkIndent and currentTok(p).col > col: + inc p.idx + else: + break result = nil -type - DirKind = enum # must be ordered alphabetically! - dkNone, dkAuthor, dkAuthors, dkCode, dkCodeBlock, dkContainer, dkContents, - dkFigure, dkImage, dkInclude, dkIndex, dkRaw, dkTitle - -const - DirIds: array[0..12, string] = ["", "author", "authors", "code", - "code-block", "container", "contents", "figure", "image", "include", - "index", "raw", "title"] - -proc getDirKind(s: string): DirKind = - let i = find(DirIds, s) - if i >= 0: result = DirKind(i) - else: result = dkNone - proc parseLine(p: var RstParser, father: PRstNode) = while true: - case p.tok[p.idx].kind + case currentTok(p).kind of tkWhite, tkWord, tkOther, tkPunct: parseInline(p, father) else: break proc parseUntilNewline(p: var RstParser, father: PRstNode) = while true: - case p.tok[p.idx].kind + case currentTok(p).kind of tkWhite, tkWord, tkAdornment, tkOther, tkPunct: parseInline(p, father) of tkEof, tkIndent: break proc parseSection(p: var RstParser, result: PRstNode) {.gcsafe.} + +proc tokenAfterNewline(p: RstParser, start: int): int = + result = start + while true: + case p.tok[result].kind + of tkEof: + break + of tkIndent: + inc result + break + else: inc result + +proc tokenAfterNewline(p: RstParser): int {.inline.} = + result = tokenAfterNewline(p, p.idx) + +proc getWrappableIndent(p: RstParser): int = + ## Gets baseline indentation for bodies of field lists and directives. + ## Handles situations like this (with possible de-indent in [case.3]):: + ## + ## :field: definition [case.1] + ## + ## currInd currentTok(p).col + ## | | + ## v v + ## + ## .. Note:: defItem: [case.2] + ## definition + ## + ## ^ + ## | + ## nextIndent + ## + ## .. Note:: - point1 [case.3] + ## - point 2 + ## + ## ^ + ## | + ## nextIndent + if currentTok(p).kind == tkIndent: + result = currentTok(p).ival + else: + var nextIndent = p.tok[tokenAfterNewline(p)-1].ival + if nextIndent <= currInd(p): # parse only this line [case.1] + result = currentTok(p).col + elif nextIndent >= currentTok(p).col: # may be a definition list [case.2] + result = currentTok(p).col + else: + result = nextIndent # allow parsing next lines [case.3] + +proc getMdBlockIndent(p: RstParser): int = + ## Markdown version of `getWrappableIndent`. + if currentTok(p).kind == tkIndent: + result = currentTok(p).ival + else: + var nextIndent = p.tok[tokenAfterNewline(p)-1].ival + # TODO: Markdown-compliant definition should allow nextIndent == currInd(p): + if nextIndent <= currInd(p): # parse only this line + result = currentTok(p).col + else: + result = nextIndent # allow parsing next lines [case.3] + +proc indFollows(p: RstParser): bool = + result = currentTok(p).kind == tkIndent and currentTok(p).ival > currInd(p) + +proc parseBlockContent(p: var RstParser, father: var PRstNode, + contentParser: SectionParser): bool {.gcsafe.} = + ## parse the final content part of explicit markup blocks (directives, + ## footnotes, etc). Returns true if succeeded. + if currentTok(p).kind != tkIndent or indFollows(p): + let blockIndent = getWrappableIndent(p) + pushInd(p, blockIndent) + let content = contentParser(p) + popInd(p) + father.add content + result = true + +proc parseSectionWrapper(p: var RstParser): PRstNode = + result = newRstNode(rnInner) + parseSection(p, result) + while result.kind == rnInner and result.len == 1: + result = result.sons[0] + proc parseField(p: var RstParser): PRstNode = ## Returns a parsed rnField node. ## ## rnField nodes have two children nodes, a rnFieldName and a rnFieldBody. - result = newRstNode(rnField) - var col = p.tok[p.idx].col + result = newRstNode(rnField, info=lineInfo(p)) + var col = currentTok(p).col var fieldname = newRstNode(rnFieldName) parseUntil(p, fieldname, ":", false) var fieldbody = newRstNode(rnFieldBody) - if p.tok[p.idx].kind != tkIndent: parseLine(p, fieldbody) - if p.tok[p.idx].kind == tkIndent: - var indent = p.tok[p.idx].ival - if indent > col: - pushInd(p, indent) - parseSection(p, fieldbody) - popInd(p) - add(result, fieldname) - add(result, fieldbody) + if currentTok(p).kind == tkWhite: inc p.idx + let indent = getWrappableIndent(p) + if indent > col: + pushInd(p, indent) + parseSection(p, fieldbody) + popInd(p) + result.add(fieldname) + result.add(fieldbody) proc parseFields(p: var RstParser): PRstNode = ## Parses fields for a section or directive block. @@ -969,16 +2060,16 @@ proc parseFields(p: var RstParser): PRstNode = ## otherwise it will return a node of rnFieldList type with children. result = nil var atStart = p.idx == 0 and p.tok[0].symbol == ":" - if (p.tok[p.idx].kind == tkIndent) and (p.tok[p.idx + 1].symbol == ":") or + if currentTok(p).kind == tkIndent and nextTok(p).symbol == ":" or atStart: - var col = if atStart: p.tok[p.idx].col else: p.tok[p.idx].ival - result = newRstNode(rnFieldList) - if not atStart: inc(p.idx) + var col = if atStart: currentTok(p).col else: currentTok(p).ival + result = newRstNodeA(p, rnFieldList) + if not atStart: inc p.idx while true: - add(result, parseField(p)) - if (p.tok[p.idx].kind == tkIndent) and (p.tok[p.idx].ival == col) and - (p.tok[p.idx + 1].symbol == ":"): - inc(p.idx) + result.add(parseField(p)) + if currentTok(p).kind == tkIndent and currentTok(p).ival == col and + nextTok(p).symbol == ":": + inc p.idx else: break @@ -995,13 +2086,12 @@ proc getFieldValue*(n: PRstNode): string = result = addNodes(n.sons[1]).strip proc getFieldValue(n: PRstNode, fieldname: string): string = - result = "" if n.sons[1] == nil: return - if (n.sons[1].kind != rnFieldList): + if n.sons[1].kind != rnFieldList: #InternalError("getFieldValue (2): " & $n.sons[1].kind) # We don't like internal errors here anymore as that would break the forum! return - for i in countup(0, len(n.sons[1]) - 1): + for i in 0 ..< n.sons[1].len: var f = n.sons[1].sons[i] if cmpIgnoreStyle(addNodes(f.sons[0]), fieldname) == 0: result = addNodes(f.sons[1]) @@ -1014,64 +2104,215 @@ proc getArgument(n: PRstNode): string = proc parseDotDot(p: var RstParser): PRstNode {.gcsafe.} proc parseLiteralBlock(p: var RstParser): PRstNode = - result = newRstNode(rnLiteralBlock) - var n = newRstNode(rnLeaf, "") - if p.tok[p.idx].kind == tkIndent: - var indent = p.tok[p.idx].ival - inc(p.idx) + result = newRstNodeA(p, rnLiteralBlock) + var n = newLeaf("") + if currentTok(p).kind == tkIndent: + var indent = currentTok(p).ival + while currentTok(p).kind == tkIndent: inc p.idx # skip blank lines while true: - case p.tok[p.idx].kind + case currentTok(p).kind of tkEof: break of tkIndent: - if (p.tok[p.idx].ival < indent): + if currentTok(p).ival < indent: break else: - add(n.text, "\n") - add(n.text, spaces(p.tok[p.idx].ival - indent)) - inc(p.idx) + n.text.add("\n") + n.text.add(spaces(currentTok(p).ival - indent)) + inc p.idx else: - add(n.text, p.tok[p.idx].symbol) - inc(p.idx) + n.text.add(currentTok(p).symbol) + inc p.idx else: - while not (p.tok[p.idx].kind in {tkIndent, tkEof}): - add(n.text, p.tok[p.idx].symbol) - inc(p.idx) - add(result, n) - -proc getLevel(map: var LevelMap, lvl: var int, c: char): int = - if map[c] == 0: - inc(lvl) - map[c] = lvl - result = map[c] - -proc tokenAfterNewline(p: RstParser): int = - result = p.idx - while true: - case p.tok[result].kind - of tkEof: - break - of tkIndent: - inc(result) - break - else: inc(result) + while currentTok(p).kind notin {tkIndent, tkEof}: + n.text.add(currentTok(p).symbol) + inc p.idx + result.add(n) + +proc parseQuotedLiteralBlock(p: var RstParser): PRstNode = + result = newRstNodeA(p, rnLiteralBlock) + var n = newLeaf("") + if currentTok(p).kind == tkIndent: + var indent = currInd(p) + while currentTok(p).kind == tkIndent: inc p.idx # skip blank lines + var quoteSym = currentTok(p).symbol[0] + while true: + case currentTok(p).kind + of tkEof: + break + of tkIndent: + if currentTok(p).ival < indent: + break + elif currentTok(p).ival == indent: + if nextTok(p).kind == tkPunct and nextTok(p).symbol[0] == quoteSym: + n.text.add("\n") + inc p.idx + elif nextTok(p).kind == tkIndent: + break + else: + rstMessage(p, mwRstStyle, "no newline after quoted literal block") + break + else: + rstMessage(p, mwRstStyle, + "unexpected indentation in quoted literal block") + break + else: + n.text.add(currentTok(p).symbol) + inc p.idx + result.add(n) + +proc parseRstLiteralBlock(p: var RstParser, kind: LiteralBlockKind): PRstNode = + if kind == lbIndentedLiteralBlock: + result = parseLiteralBlock(p) + else: + result = parseQuotedLiteralBlock(p) + +proc getLevel(p: var RstParser, c: char, hasOverline: bool): int = + ## Returns (preliminary) heading level corresponding to `c` and + ## `hasOverline`. If level does not exist, add it first. + for i, hType in p.s.hLevels: + if hType.symbol == c and hType.hasOverline == hasOverline: + p.s.hLevels[i].line = curLine(p) + p.s.hLevels[i].hasPeers = true + return i + p.s.hLevels.add LevelInfo(symbol: c, hasOverline: hasOverline, + line: curLine(p), hasPeers: false) + result = p.s.hLevels.len - 1 + +proc countTitles(s: PRstSharedState, n: PRstNode) = + ## Fill `s.hTitleCnt` + if n == nil: return + for node in n.sons: + if node != nil: + if node.kind notin {rnOverline, rnSubstitutionDef, rnDefaultRole}: + break + if node.kind == rnOverline: + if s.hLevels[s.hTitleCnt].hasPeers: + break + inc s.hTitleCnt + if s.hTitleCnt >= 2: + break + +proc isAdornmentHeadline(p: RstParser, adornmentIdx: int): bool = + ## check that underline/overline length is enough for the heading. + ## No support for Unicode. + if p.tok[adornmentIdx].symbol in ["::", "..", "|"]: + return false + if isMarkdownCodeBlock(p, adornmentIdx): + return false + var headlineLen = 0 + var failure = "" + if p.idx < adornmentIdx: # check for underline + if p.idx > 0: + headlineLen = currentTok(p).col - p.tok[adornmentIdx].col + if headlineLen > 0: + rstMessage(p, mwRstStyle, "indentation of heading text allowed" & + " only for overline titles") + for i in p.idx ..< adornmentIdx-1: # adornmentIdx-1 is a linebreak + headlineLen += p.tok[i].symbol.len + result = p.tok[adornmentIdx].symbol.len >= headlineLen and headlineLen != 0 + if not result: + failure = "(underline '" & p.tok[adornmentIdx].symbol & "' is too short)" + else: # p.idx == adornmentIdx, at overline. Check overline and underline + var i = p.idx + 2 + headlineLen = p.tok[i].col - p.tok[adornmentIdx].col + while p.tok[i].kind notin {tkEof, tkIndent}: + headlineLen += p.tok[i].symbol.len + inc i + if p.tok[i].kind == tkIndent and + p.tok[i+1].kind == tkAdornment and + p.tok[i+1].symbol[0] == p.tok[adornmentIdx].symbol[0]: + result = p.tok[adornmentIdx].symbol.len >= headlineLen and + headlineLen != 0 + if result: + result = p.tok[i+1].symbol == p.tok[adornmentIdx].symbol + if not result: + failure = "(underline '" & p.tok[i+1].symbol & "' does not match " & + "overline '" & p.tok[adornmentIdx].symbol & "')" + else: + failure = "(overline '" & p.tok[adornmentIdx].symbol & "' is too short)" + else: # it's not overline/underline section, not reporting error + return false + if not result: + rstMessage(p, meNewSectionExpected, failure) proc isLineBlock(p: RstParser): bool = var j = tokenAfterNewline(p) - result = (p.tok[p.idx].col == p.tok[j].col) and (p.tok[j].symbol == "|") or - (p.tok[j].col > p.tok[p.idx].col) + result = currentTok(p).col == p.tok[j].col and p.tok[j].symbol == "|" or + p.tok[j].col > currentTok(p).col or + p.tok[j].symbol == "\n" + +proc isMarkdownBlockQuote(p: RstParser): bool = + result = currentTok(p).symbol[0] == '>' + +proc whichRstLiteralBlock(p: RstParser): LiteralBlockKind = + ## Checks that the following tokens are either Indented Literal Block or + ## Quoted Literal Block (which is not quite the same as Markdown quote block). + ## https://docutils.sourceforge.io/docs/ref/rst/restructuredtext.html#quoted-literal-blocks + if currentTok(p).symbol == "::" and nextTok(p).kind == tkIndent: + if currInd(p) > nextTok(p).ival: + result = lbNone + if currInd(p) < nextTok(p).ival: + result = lbIndentedLiteralBlock + elif currInd(p) == nextTok(p).ival: + var i = p.idx + 1 + while p.tok[i].kind == tkIndent: inc i + const validQuotingCharacters = { + '!', '"', '#', '$', '%', '&', '\'', '(', ')', '*', '+', ',', '-', + '.', '/', ':', ';', '<', '=', '>', '?', '@', '[', '\\', ']', '^', + '_', '`', '{', '|', '}', '~'} + if p.tok[i].kind in {tkPunct, tkAdornment} and + p.tok[i].symbol[0] in validQuotingCharacters: + result = lbQuotedLiteralBlock + else: + result = lbNone + else: + result = lbNone proc predNL(p: RstParser): bool = result = true if p.idx > 0: - result = p.tok[p.idx-1].kind == tkIndent and - p.tok[p.idx-1].ival == currInd(p) + result = prevTok(p).kind == tkIndent and + prevTok(p).ival == currInd(p) proc isDefList(p: RstParser): bool = var j = tokenAfterNewline(p) - result = (p.tok[p.idx].col < p.tok[j].col) and - (p.tok[j].kind in {tkWord, tkOther, tkPunct}) and - (p.tok[j - 2].symbol != "::") + result = currentTok(p).col < p.tok[j].col and + p.tok[j].kind in {tkWord, tkOther, tkPunct} and + p.tok[j - 2].symbol != "::" + +proc `$`(t: Token): string = # for debugging only + result = "(" & $t.kind & " line=" & $t.line & " col=" & $t.col + if t.kind == tkIndent: result = result & " ival=" & $t.ival & ")" + else: result = result & " symbol=" & t.symbol & ")" + +proc skipNewlines(p: RstParser, j: int): int = + result = j + while p.tok[result].kind != tkEof and p.tok[result].kind == tkIndent: + inc result # skip blank lines + +proc skipNewlines(p: var RstParser) = + p.idx = skipNewlines(p, p.idx) + +const maxMdRelInd = 3 ## In Markdown: maximum indentation that does not yet + ## make the indented block a code + +proc isMdRelInd(outerInd, nestedInd: int): bool = + result = outerInd <= nestedInd and nestedInd <= outerInd + maxMdRelInd + +proc isMdDefBody(p: RstParser, j: int, termCol: int): bool = + let defCol = p.tok[j].col + result = p.tok[j].symbol == ":" and + isMdRelInd(termCol, defCol) and + p.tok[j+1].kind == tkWhite and + p.tok[j+2].kind in {tkWord, tkOther, tkPunct} + +proc isMdDefListItem(p: RstParser, idx: int): bool = + var j = tokenAfterNewline(p, idx) + j = skipNewlines(p, j) + let termCol = p.tok[j].col + result = isMdRelInd(currInd(p), termCol) and + isMdDefBody(p, j, termCol) proc isOptionList(p: RstParser): bool = result = match(p, p.idx, "-w") or match(p, p.idx, "--w") or @@ -1085,99 +2326,240 @@ proc isMarkdownHeadlinePattern(s: string): bool = proc isMarkdownHeadline(p: RstParser): bool = if roSupportMarkdown in p.s.options: - if isMarkdownHeadlinePattern(p.tok[p.idx].symbol) and p.tok[p.idx+1].kind == tkWhite: + if isMarkdownHeadlinePattern(currentTok(p).symbol) and nextTok(p).kind == tkWhite: if p.tok[p.idx+2].kind in {tkWord, tkOther, tkPunct}: result = true +proc findPipe(p: RstParser, start: int): bool = + var i = start + while true: + if p.tok[i].symbol == "|": return true + if p.tok[i].kind in {tkIndent, tkEof}: return false + inc i + proc whichSection(p: RstParser): RstNodeKind = - case p.tok[p.idx].kind + if currentTok(p).kind in {tkAdornment, tkPunct}: + # for punctuation sequences that can be both tkAdornment and tkPunct + if isMarkdownCodeBlock(p): + return rnCodeBlock + elif isRst(p) and currentTok(p).symbol == "::": + return rnLiteralBlock + elif currentTok(p).symbol == ".." and + nextTok(p).kind in {tkWhite, tkIndent}: + return rnDirective + case currentTok(p).kind of tkAdornment: - if match(p, p.idx + 1, "ii"): result = rnTransition + if match(p, p.idx + 1, "iI") and currentTok(p).symbol.len >= 4: + result = rnTransition + elif match(p, p.idx, "+a+"): + result = rnGridTable + rstMessage(p, meGridTableNotImplemented) elif match(p, p.idx + 1, " a"): result = rnTable - elif match(p, p.idx + 1, "i"): result = rnOverline - elif isMarkdownHeadline(p): - result = rnHeadline + elif currentTok(p).symbol == "|" and isLineBlock(p): + result = rnLineBlock + elif roSupportMarkdown in p.s.options and isMarkdownBlockQuote(p): + result = rnMarkdownBlockQuote + elif (match(p, p.idx + 1, "i") and not match(p, p.idx + 2, "I")) and + isAdornmentHeadline(p, p.idx): + result = rnOverline else: - result = rnLeaf + result = rnParagraph of tkPunct: if isMarkdownHeadline(p): + result = rnMarkdownHeadline + elif roSupportMarkdown in p.s.options and predNL(p) and + match(p, p.idx, "| w") and findPipe(p, p.idx+3): + result = rnMarkdownTable + elif isMd(p) and isMdFootnoteName(p, reference=false): + result = rnFootnote + elif currentTok(p).symbol == "|" and isLineBlock(p): + result = rnLineBlock + elif roSupportMarkdown in p.s.options and isMarkdownBlockQuote(p): + result = rnMarkdownBlockQuote + elif match(p, tokenAfterNewline(p), "aI") and + isAdornmentHeadline(p, tokenAfterNewline(p)): result = rnHeadline - elif match(p, tokenAfterNewline(p), "ai"): - result = rnHeadline - elif p.tok[p.idx].symbol == "::": - result = rnLiteralBlock - elif predNL(p) and - ((p.tok[p.idx].symbol == "+") or (p.tok[p.idx].symbol == "*") or - (p.tok[p.idx].symbol == "-")) and (p.tok[p.idx + 1].kind == tkWhite): + elif currentTok(p).symbol in ["+", "*", "-"] and nextTok(p).kind == tkWhite: result = rnBulletList - elif (p.tok[p.idx].symbol == "|") and isLineBlock(p): - result = rnLineBlock - elif (p.tok[p.idx].symbol == "..") and predNL(p): - result = rnDirective - elif match(p, p.idx, ":w:") and predNL(p): - # (p.tok[p.idx].symbol == ":") + elif match(p, p.idx, ":w:E"): + # (currentTok(p).symbol == ":") result = rnFieldList - elif match(p, p.idx, "(e) ") or match(p, p.idx, "e. "): + elif match(p, p.idx, "(e) ") or match(p, p.idx, "e) ") or + match(p, p.idx, "e. "): result = rnEnumList - elif match(p, p.idx, "+a+"): - result = rnGridTable - rstMessage(p, meGridTableNotImplemented) - elif isDefList(p): - result = rnDefList elif isOptionList(p): result = rnOptionList + elif isRst(p) and isDefList(p): + result = rnDefList + elif isMd(p) and isMdDefListItem(p, p.idx): + result = rnMdDefList else: result = rnParagraph of tkWord, tkOther, tkWhite: - if match(p, tokenAfterNewline(p), "ai"): result = rnHeadline + let tokIdx = tokenAfterNewline(p) + if match(p, tokIdx, "aI"): + if isAdornmentHeadline(p, tokIdx): result = rnHeadline + else: result = rnParagraph elif match(p, p.idx, "e) ") or match(p, p.idx, "e. "): result = rnEnumList - elif isDefList(p): result = rnDefList + elif isRst(p) and isDefList(p): result = rnDefList + elif isMd(p) and isMdDefListItem(p, p.idx): + result = rnMdDefList else: result = rnParagraph else: result = rnLeaf proc parseLineBlock(p: var RstParser): PRstNode = + ## Returns rnLineBlock with all sons of type rnLineBlockItem result = nil - if p.tok[p.idx + 1].kind == tkWhite: - var col = p.tok[p.idx].col - result = newRstNode(rnLineBlock) - pushInd(p, p.tok[p.idx + 2].col) - inc(p.idx, 2) + if nextTok(p).kind in {tkWhite, tkIndent}: + var col = currentTok(p).col + result = newRstNodeA(p, rnLineBlock) while true: var item = newRstNode(rnLineBlockItem) - parseSection(p, item) - add(result, item) - if (p.tok[p.idx].kind == tkIndent) and (p.tok[p.idx].ival == col) and - (p.tok[p.idx + 1].symbol == "|") and - (p.tok[p.idx + 2].kind == tkWhite): - inc(p.idx, 3) + if nextTok(p).kind == tkWhite: + if nextTok(p).symbol.len > 1: # pass additional indentation after '| ' + item.lineIndent = nextTok(p).symbol + inc p.idx, 2 + pushInd(p, p.tok[p.idx].col) + parseSection(p, item) + popInd(p) + else: # tkIndent => add an empty line + item.lineIndent = "\n" + inc p.idx, 1 + result.add(item) + if currentTok(p).kind == tkIndent and currentTok(p).ival == col and + nextTok(p).symbol == "|" and + p.tok[p.idx + 2].kind in {tkWhite, tkIndent}: + inc p.idx, 1 else: break - popInd(p) + +proc parseDoc(p: var RstParser): PRstNode {.gcsafe.} + +proc getQuoteSymbol(p: RstParser, idx: int): tuple[sym: string, depth: int, tokens: int] = + result = ("", 0, 0) + var i = idx + result.sym &= p.tok[i].symbol + result.depth += p.tok[i].symbol.len + inc result.tokens + inc i + while p.tok[i].kind == tkWhite and i+1 < p.tok.len and + p.tok[i+1].kind == tkPunct and p.tok[i+1].symbol[0] == '>': + result.sym &= p.tok[i].symbol + result.sym &= p.tok[i+1].symbol + result.depth += p.tok[i+1].symbol.len + inc result.tokens, 2 + inc i, 2 + +proc parseMarkdownQuoteSegment(p: var RstParser, curSym: string, col: int): + PRstNode = + ## We define *segment* as a group of lines that starts with exactly the + ## same quote symbol. If the following lines don't contain any `>` (*lazy* + ## continuation) they considered as continuation of the current segment. + var q: RstParser # to delete `>` at a start of line and then parse normally + initParser(q, p.s) + q.col = p.col + q.line = p.line + var minCol = int.high # minimum colum num in the segment + while true: # move tokens of segment from `p` to `q` skipping `curSym` + case currentTok(p).kind + of tkEof: + break + of tkIndent: + if nextTok(p).kind in {tkIndent, tkEof}: + break + else: + if nextTok(p).symbol[0] == '>': + var (quoteSym, _, quoteTokens) = getQuoteSymbol(p, p.idx + 1) + if quoteSym == curSym: # the segment continues + var iTok = tokenAfterNewline(p, p.idx+1) + if p.tok[iTok].kind notin {tkEof, tkIndent} and + p.tok[iTok].symbol[0] != '>': + rstMessage(p, mwRstStyle, + "two or more quoted lines are followed by unquoted line " & + $(curLine(p) + 1)) + break + q.tok.add currentTok(p) + var ival = currentTok(p).ival + quoteSym.len + inc p.idx, (1 + quoteTokens) # skip newline and > > > + if currentTok(p).kind == tkWhite: + ival += currentTok(p).symbol.len + inc p.idx + # fix up previous `tkIndent`s to ival (as if >>> were not there) + var j = q.tok.len - 1 + while j >= 0 and q.tok[j].kind == tkIndent: + q.tok[j].ival = ival + dec j + else: # next segment started + break + elif currentTok(p).ival < col: + break + else: # the segment continues, a case like: + # > beginning + # continuation + q.tok.add currentTok(p) + inc p.idx + else: + if currentTok(p).col < minCol: minCol = currentTok(p).col + q.tok.add currentTok(p) + inc p.idx + q.indentStack = @[minCol] + # if initial indentation `minCol` is > 0 then final newlines + # should be omitted so that parseDoc could advance to the end of tokens: + var j = q.tok.len - 1 + while q.tok[j].kind == tkIndent: dec j + q.tok.setLen (j+1) + q.tok.add Token(kind: tkEof, line: currentTok(p).line) + result = parseDoc(q) + +proc parseMarkdownBlockQuote(p: var RstParser): PRstNode = + var (curSym, quotationDepth, quoteTokens) = getQuoteSymbol(p, p.idx) + let col = currentTok(p).col + result = newRstNodeA(p, rnMarkdownBlockQuote) + inc p.idx, quoteTokens # skip first > + while true: + var item = newRstNode(rnMarkdownBlockQuoteItem) + item.quotationDepth = quotationDepth + if currentTok(p).kind == tkWhite: inc p.idx + item.add parseMarkdownQuoteSegment(p, curSym, col) + result.add(item) + if currentTok(p).kind == tkIndent and currentTok(p).ival == col and + nextTok(p).kind != tkEof and nextTok(p).symbol[0] == '>': + (curSym, quotationDepth, quoteTokens) = getQuoteSymbol(p, p.idx + 1) + inc p.idx, (1 + quoteTokens) # skip newline and > > > + else: + break proc parseParagraph(p: var RstParser, result: PRstNode) = while true: - case p.tok[p.idx].kind + case currentTok(p).kind of tkIndent: - if p.tok[p.idx + 1].kind == tkIndent: - inc(p.idx) - break - elif (p.tok[p.idx].ival == currInd(p)): - inc(p.idx) + if nextTok(p).kind == tkIndent: + inc p.idx + break # blank line breaks paragraph for both Md & Rst + elif currentTok(p).ival == currInd(p) or ( + isMd(p) and currentTok(p).ival > currInd(p)): + # (Md allows adding additional indentation inside paragraphs) + inc p.idx case whichSection(p) - of rnParagraph, rnLeaf, rnHeadline, rnOverline, rnDirective: - add(result, newRstNode(rnLeaf, " ")) + of rnParagraph, rnLeaf, rnHeadline, rnMarkdownHeadline, + rnOverline, rnDirective: + result.add newLeaf(" ") of rnLineBlock: - addIfNotNil(result, parseLineBlock(p)) - else: break + result.addIfNotNil(parseLineBlock(p)) + of rnMarkdownBlockQuote: + result.addIfNotNil(parseMarkdownBlockQuote(p)) + else: + dec p.idx # allow subsequent block to be parsed as another section + break else: break of tkPunct: - if (p.tok[p.idx].symbol == "::") and - (p.tok[p.idx + 1].kind == tkIndent) and - (currInd(p) < p.tok[p.idx + 1].ival): - add(result, newRstNode(rnLeaf, ":")) - inc(p.idx) # skip '::' - add(result, parseLiteralBlock(p)) + if isRst(p) and ( + let literalBlockKind = whichRstLiteralBlock(p); + literalBlockKind != lbNone): + result.add newLeaf(":") + inc p.idx # skip '::' + result.add(parseRstLiteralBlock(p, literalBlockKind)) break else: parseInline(p, result) @@ -1185,375 +2567,725 @@ proc parseParagraph(p: var RstParser, result: PRstNode) = parseInline(p, result) else: break +proc checkHeadingHierarchy(p: RstParser, lvl: int) = + if lvl - p.s.hCurLevel > 1: # broken hierarchy! + proc descr(l: int): string = + (if p.s.hLevels[l].hasOverline: "overline " else: "underline ") & + repeat(p.s.hLevels[l].symbol, 5) + var msg = "(section level inconsistent: " + msg.add descr(lvl) & " unexpectedly found, " & + "while the following intermediate section level(s) are missing on lines " + msg.add $p.s.hLevels[p.s.hCurLevel].line & ".." & $curLine(p) & ":" + for l in p.s.hCurLevel+1 .. lvl-1: + msg.add " " & descr(l) + if l != lvl-1: msg.add "," + rstMessage(p, meNewSectionExpected, msg & ")") + proc parseHeadline(p: var RstParser): PRstNode = - result = newRstNode(rnHeadline) if isMarkdownHeadline(p): - result.level = p.tok[p.idx].symbol.len - assert(p.tok[p.idx+1].kind == tkWhite) + result = newRstNode(rnMarkdownHeadline) + # Note that level hierarchy is not checked for markdown headings + result.level = currentTok(p).symbol.len + assert(nextTok(p).kind == tkWhite) inc p.idx, 2 parseUntilNewline(p, result) else: + result = newRstNode(rnHeadline) + parseUntilNewline(p, result) + assert(currentTok(p).kind == tkIndent) + assert(nextTok(p).kind == tkAdornment) + var c = nextTok(p).symbol[0] + inc p.idx, 2 + result.level = getLevel(p, c, hasOverline=false) + checkHeadingHierarchy(p, result.level) + p.s.hCurLevel = result.level + addAnchorRst(p, linkName(result), result, anchorType=headlineAnchor) + p.s.tocPart.add result + +proc parseOverline(p: var RstParser): PRstNode = + var c = currentTok(p).symbol[0] + inc p.idx, 2 + result = newRstNode(rnOverline) + while true: parseUntilNewline(p, result) - assert(p.tok[p.idx].kind == tkIndent) - assert(p.tok[p.idx + 1].kind == tkAdornment) - var c = p.tok[p.idx + 1].symbol[0] - inc(p.idx, 2) - result.level = getLevel(p.s.underlineToLevel, p.s.uLevel, c) + if currentTok(p).kind == tkIndent: + inc p.idx + if prevTok(p).ival > currInd(p): + result.add newLeaf(" ") + else: + break + else: + break + result.level = getLevel(p, c, hasOverline=true) + checkHeadingHierarchy(p, result.level) + p.s.hCurLevel = result.level + if currentTok(p).kind == tkAdornment: + inc p.idx + if currentTok(p).kind == tkIndent: inc p.idx + addAnchorRst(p, linkName(result), result, anchorType=headlineAnchor) + p.s.tocPart.add result + +proc fixHeadlines(s: PRstSharedState) = + # Fix up section levels depending on presence of a title and subtitle: + for n in s.tocPart: + if n.kind in {rnHeadline, rnOverline}: + if s.hTitleCnt == 2: + if n.level == 1: # it's the subtitle + n.level = 0 + elif n.level >= 2: # normal sections, start numbering from 1 + n.level -= 1 + elif s.hTitleCnt == 0: + n.level += 1 + # Set headline anchors: + for iHeading in 0 .. s.tocPart.high: + let n: PRstNode = s.tocPart[iHeading] + if n.level >= 1: + n.anchor = rstnodeToRefname(n) + # Fix anchors for uniqueness if `.. contents::` is present + if s.hasToc: + # Find the last higher level section for unique reference name + var sectionPrefix = "" + for i in countdown(iHeading - 1, 0): + if s.tocPart[i].level >= 1 and s.tocPart[i].level < n.level: + sectionPrefix = rstnodeToRefname(s.tocPart[i]) & "-" + break + if sectionPrefix != "": + n.anchor = sectionPrefix & n.anchor + s.tocPart.setLen 0 type - IntSeq = seq[int] + ColSpec = object + start, stop: int + RstCols = seq[ColSpec] + ColumnLimits = tuple # for Markdown + first, last: int + ColSeq = seq[ColumnLimits] + +proc tokStart(p: RstParser, idx: int): int = + result = p.tok[idx].col + +proc tokStart(p: RstParser): int = + result = tokStart(p, p.idx) + +proc tokEnd(p: RstParser, idx: int): int = + result = p.tok[idx].col + p.tok[idx].symbol.len - 1 proc tokEnd(p: RstParser): int = - result = p.tok[p.idx].col + len(p.tok[p.idx].symbol) - 1 + result = tokEnd(p, p.idx) -proc getColumns(p: var RstParser, cols: var IntSeq) = +proc getColumns(p: RstParser, cols: var RstCols, startIdx: int): int = + # Fills table column specification (or separator) `cols` and returns + # the next parser index after it. var L = 0 + result = startIdx while true: - inc(L) + inc L setLen(cols, L) - cols[L - 1] = tokEnd(p) - assert(p.tok[p.idx].kind == tkAdornment) - inc(p.idx) - if p.tok[p.idx].kind != tkWhite: break - inc(p.idx) - if p.tok[p.idx].kind != tkAdornment: break - if p.tok[p.idx].kind == tkIndent: inc(p.idx) - # last column has no limit: - cols[L - 1] = 32000 + cols[L - 1].start = tokStart(p, result) + cols[L - 1].stop = tokEnd(p, result) + assert(p.tok[result].kind == tkAdornment) + inc result + if p.tok[result].kind != tkWhite: break + inc result + if p.tok[result].kind != tkAdornment: break + if p.tok[result].kind == tkIndent: inc result -proc parseDoc(p: var RstParser): PRstNode {.gcsafe.} +proc checkColumns(p: RstParser, cols: RstCols) = + var i = p.idx + if p.tok[i].symbol[0] != '=': + stopOrWarn(p, meIllformedTable, + "only tables with `=` columns specification are allowed") + for col in 0 ..< cols.len: + if tokEnd(p, i) != cols[col].stop: + stopOrWarn(p, meIllformedTable, + "end of table column #$1 should end at position $2" % [ + $(col+1), $(cols[col].stop+ColRstOffset)], + p.tok[i].line, tokEnd(p, i)) + inc i + if col == cols.len - 1: + if p.tok[i].kind == tkWhite: + inc i + if p.tok[i].kind notin {tkIndent, tkEof}: + stopOrWarn(p, meIllformedTable, "extraneous column specification") + elif p.tok[i].kind == tkWhite: + inc i + else: + stopOrWarn(p, meIllformedTable, + "no enough table columns", p.tok[i].line, p.tok[i].col) + +proc getSpans(p: RstParser, nextLine: int, + cols: RstCols, unitedCols: RstCols): seq[int] = + ## Calculates how many columns a joined cell occupies. + if unitedCols.len > 0: + result = newSeq[int](unitedCols.len) + var + iCell = 0 + jCell = 0 + uCell = 0 + while jCell < cols.len: + if cols[jCell].stop < unitedCols[uCell].stop: + inc jCell + elif cols[jCell].stop == unitedCols[uCell].stop: + result[uCell] = jCell - iCell + 1 + iCell = jCell + 1 + jCell = jCell + 1 + inc uCell + else: + rstMessage(p, meIllformedTable, + "spanning underline does not match main table columns", + p.tok[nextLine].line, p.tok[nextLine].col) + +proc parseSimpleTableRow(p: var RstParser, cols: RstCols, colChar: char): PRstNode = + ## Parses 1 row in RST simple table. + # Consider that columns may be spanning (united by using underline like ----): + let nextLine = tokenAfterNewline(p) + var unitedCols: RstCols + var afterSpan: int + if p.tok[nextLine].kind == tkAdornment and p.tok[nextLine].symbol[0] == '-': + afterSpan = getColumns(p, unitedCols, nextLine) + if unitedCols == cols and p.tok[nextLine].symbol[0] == colChar: + # legacy rst.nim compat.: allow punctuation like `----` in main boundaries + afterSpan = nextLine + unitedCols.setLen 0 + else: + afterSpan = nextLine + template colEnd(i): int = + if i == cols.len - 1: high(int) # last column has no limit + elif unitedCols.len > 0: unitedCols[i].stop else: cols[i].stop + template colStart(i): int = + if unitedCols.len > 0: unitedCols[i].start else: cols[i].start + var row = newSeq[string](if unitedCols.len > 0: unitedCols.len else: cols.len) + var spans: seq[int] = getSpans(p, nextLine, cols, unitedCols) + + let line = currentTok(p).line + # Iterate over the lines a single cell may span: + while true: + var nCell = 0 + # distribute tokens between cells in the current line: + while currentTok(p).kind notin {tkIndent, tkEof}: + if tokEnd(p) <= colEnd(nCell): + if tokStart(p) < colStart(nCell): + if currentTok(p).kind != tkWhite: + stopOrWarn(p, meIllformedTable, + "this word crosses table column from the left") + row[nCell].add(currentTok(p).symbol) + else: + row[nCell].add(currentTok(p).symbol) + inc p.idx + else: + if tokStart(p) < colEnd(nCell) and currentTok(p).kind != tkWhite: + stopOrWarn(p, meIllformedTable, + "this word crosses table column from the right") + row[nCell].add(currentTok(p).symbol) + inc p.idx + inc nCell + if currentTok(p).kind == tkIndent: inc p.idx + if tokEnd(p) <= colEnd(0): break + # Continued current cells because the 1st column is empty. + if currentTok(p).kind in {tkEof, tkAdornment}: + break + for nCell in countup(1, high(row)): row[nCell].add('\n') + result = newRstNode(rnTableRow) + var q: RstParser + for uCell in 0 ..< row.len: + initParser(q, p.s) + q.col = colStart(uCell) + q.line = line - 1 + getTokens(row[uCell], q.tok) + let cell = newRstNode(rnTableDataCell) + cell.span = if spans.len == 0: 0 else: spans[uCell] + cell.add(parseDoc(q)) + result.add(cell) + if afterSpan > p.idx: + p.idx = afterSpan proc parseSimpleTable(p: var RstParser): PRstNode = - var - cols: IntSeq - row: seq[string] - i, last, line: int - c: char - q: RstParser - a, b: PRstNode - result = newRstNode(rnTable) - cols = @[] - row = @[] - a = nil - c = p.tok[p.idx].symbol[0] + var cols: RstCols + result = newRstNodeA(p, rnTable) + let startIdx = getColumns(p, cols, p.idx) + let colChar = currentTok(p).symbol[0] + checkColumns(p, cols) + p.idx = startIdx + result.colCount = cols.len while true: - if p.tok[p.idx].kind == tkAdornment: - last = tokenAfterNewline(p) - if p.tok[last].kind in {tkEof, tkIndent}: + if currentTok(p).kind == tkAdornment: + checkColumns(p, cols) + p.idx = tokenAfterNewline(p) + if currentTok(p).kind in {tkEof, tkIndent}: # skip last adornment line: - p.idx = last break - getColumns(p, cols) - setLen(row, len(cols)) - if a != nil: - for j in 0..len(a)-1: a.sons[j].kind = rnTableHeaderCell - if p.tok[p.idx].kind == tkEof: break - for j in countup(0, high(row)): row[j] = "" - # the following while loop iterates over the lines a single cell may span: - line = p.tok[p.idx].line - while true: - i = 0 - while not (p.tok[p.idx].kind in {tkIndent, tkEof}): - if (tokEnd(p) <= cols[i]): - add(row[i], p.tok[p.idx].symbol) - inc(p.idx) - else: - if p.tok[p.idx].kind == tkWhite: inc(p.idx) - inc(i) - if p.tok[p.idx].kind == tkIndent: inc(p.idx) - if tokEnd(p) <= cols[0]: break - if p.tok[p.idx].kind in {tkEof, tkAdornment}: break - for j in countup(1, high(row)): add(row[j], '\x0A') + if result.sons.len > 0: result.sons[^1].endsHeader = true + # fix rnTableDataCell -> rnTableHeaderCell for previous table rows: + for nRow in 0 ..< result.sons.len: + for nCell in 0 ..< result.sons[nRow].len: + template cell: PRstNode = result.sons[nRow].sons[nCell] + cell = PRstNode(kind: rnTableHeaderCell, sons: cell.sons, + span: cell.span, anchor: cell.anchor) + if currentTok(p).kind == tkEof: break + let tabRow = parseSimpleTableRow(p, cols, colChar) + result.add tabRow + +proc readTableRow(p: var RstParser): ColSeq = + if currentTok(p).symbol == "|": inc p.idx + while currentTok(p).kind notin {tkIndent, tkEof}: + var limits: ColumnLimits + limits.first = p.idx + while currentTok(p).kind notin {tkIndent, tkEof}: + if currentTok(p).symbol == "|" and prevTok(p).symbol != "\\": break + inc p.idx + limits.last = p.idx + result.add(limits) + if currentTok(p).kind in {tkIndent, tkEof}: break + inc p.idx + p.idx = tokenAfterNewline(p) + +proc getColContents(p: var RstParser, colLim: ColumnLimits): string = + for i in colLim.first ..< colLim.last: + result.add(p.tok[i].symbol) + result.strip + +proc isValidDelimiterRow(p: var RstParser, colNum: int): bool = + let row = readTableRow(p) + if row.len != colNum: return false + for limits in row: + let content = getColContents(p, limits) + if content.len < 3 or not (content.startsWith("--") or content.startsWith(":-")): + return false + return true + +proc parseMarkdownTable(p: var RstParser): PRstNode = + var + row: ColSeq + a, b: PRstNode + q: RstParser + result = newRstNodeA(p, rnMarkdownTable) + + proc parseRow(p: var RstParser, cellKind: RstNodeKind, result: PRstNode) = + row = readTableRow(p) + if result.colCount == 0: result.colCount = row.len # table header + elif row.len < result.colCount: row.setLen(result.colCount) a = newRstNode(rnTableRow) - for j in countup(0, high(row)): + for j in 0 ..< result.colCount: + b = newRstNode(cellKind) initParser(q, p.s) - q.col = cols[j] - q.line = line - 1 - q.filename = p.filename - q.col += getTokens(row[j], false, q.tok) - b = newRstNode(rnTableDataCell) - add(b, parseDoc(q)) - add(a, b) - add(result, a) + q.col = p.col + q.line = currentTok(p).line - 1 + getTokens(getColContents(p, row[j]), q.tok) + b.add(parseDoc(q)) + a.add(b) + result.add(a) + + parseRow(p, rnTableHeaderCell, result) + if not isValidDelimiterRow(p, result.colCount): + rstMessage(p, meMarkdownIllformedTable) + while predNL(p) and currentTok(p).symbol == "|": + parseRow(p, rnTableDataCell, result) proc parseTransition(p: var RstParser): PRstNode = - result = newRstNode(rnTransition) - inc(p.idx) - if p.tok[p.idx].kind == tkIndent: inc(p.idx) - if p.tok[p.idx].kind == tkIndent: inc(p.idx) - -proc parseOverline(p: var RstParser): PRstNode = - var c = p.tok[p.idx].symbol[0] - inc(p.idx, 2) - result = newRstNode(rnOverline) - while true: - parseUntilNewline(p, result) - if p.tok[p.idx].kind == tkIndent: - inc(p.idx) - if p.tok[p.idx - 1].ival > currInd(p): - add(result, newRstNode(rnLeaf, " ")) - else: - break - else: - break - result.level = getLevel(p.s.overlineToLevel, p.s.oLevel, c) - if p.tok[p.idx].kind == tkAdornment: - inc(p.idx) # XXX: check? - if p.tok[p.idx].kind == tkIndent: inc(p.idx) + result = newRstNodeA(p, rnTransition) + inc p.idx + if currentTok(p).kind == tkIndent: inc p.idx + if currentTok(p).kind == tkIndent: inc p.idx proc parseBulletList(p: var RstParser): PRstNode = result = nil - if p.tok[p.idx + 1].kind == tkWhite: - var bullet = p.tok[p.idx].symbol - var col = p.tok[p.idx].col - result = newRstNode(rnBulletList) + if nextTok(p).kind == tkWhite: + var bullet = currentTok(p).symbol + var col = currentTok(p).col + result = newRstNodeA(p, rnBulletList) pushInd(p, p.tok[p.idx + 2].col) - inc(p.idx, 2) + inc p.idx, 2 while true: var item = newRstNode(rnBulletItem) parseSection(p, item) - add(result, item) - if (p.tok[p.idx].kind == tkIndent) and (p.tok[p.idx].ival == col) and - (p.tok[p.idx + 1].symbol == bullet) and - (p.tok[p.idx + 2].kind == tkWhite): - inc(p.idx, 3) + result.add(item) + if currentTok(p).kind == tkIndent and currentTok(p).ival == col and + nextTok(p).symbol == bullet and + p.tok[p.idx + 2].kind == tkWhite: + inc p.idx, 3 else: break popInd(p) proc parseOptionList(p: var RstParser): PRstNode = - result = newRstNode(rnOptionList) + result = newRstNodeA(p, rnOptionList) + let col = currentTok(p).col + var order = 1 while true: - if isOptionList(p): + if currentTok(p).col == col and isOptionList(p): var a = newRstNode(rnOptionGroup) var b = newRstNode(rnDescription) var c = newRstNode(rnOptionListItem) - if match(p, p.idx, "//w"): inc(p.idx) - while not (p.tok[p.idx].kind in {tkIndent, tkEof}): - if (p.tok[p.idx].kind == tkWhite) and (len(p.tok[p.idx].symbol) > 1): - inc(p.idx) + if match(p, p.idx, "//w"): inc p.idx + while currentTok(p).kind notin {tkIndent, tkEof}: + if currentTok(p).kind == tkWhite and currentTok(p).symbol.len > 1: + inc p.idx break - add(a, newLeaf(p)) - inc(p.idx) + a.add(newLeaf(p)) + inc p.idx var j = tokenAfterNewline(p) - if (j > 0) and (p.tok[j - 1].kind == tkIndent) and - (p.tok[j - 1].ival > currInd(p)): + if j > 0 and p.tok[j - 1].kind == tkIndent and p.tok[j - 1].ival > currInd(p): pushInd(p, p.tok[j - 1].ival) parseSection(p, b) popInd(p) else: parseLine(p, b) - if (p.tok[p.idx].kind == tkIndent): inc(p.idx) - add(c, a) - add(c, b) - add(result, c) + while currentTok(p).kind == tkIndent: inc p.idx + c.add(a) + c.add(b) + c.order = order; inc order + result.add(c) + else: + if currentTok(p).kind != tkEof: dec p.idx # back to tkIndent + break + +proc parseMdDefinitionList(p: var RstParser): PRstNode = + ## Parses (Pandoc/kramdown/PHPextra) Markdown definition lists. + result = newRstNodeA(p, rnMdDefList) + let termCol = currentTok(p).col + while true: + var item = newRstNode(rnDefItem) + var term = newRstNode(rnDefName) + parseLine(p, term) + skipNewlines(p) + inc p.idx, 2 # skip ":" and space + item.add(term) + while true: + var def = newRstNode(rnDefBody) + let indent = getMdBlockIndent(p) + pushInd(p, indent) + parseSection(p, def) + popInd(p) + item.add(def) + let j = skipNewlines(p, p.idx) + if isMdDefBody(p, j, termCol): # parse next definition body + p.idx = j + 2 # skip ":" and space + else: + break + result.add(item) + let j = skipNewlines(p, p.idx) + if p.tok[j].col == termCol and isMdDefListItem(p, j): + p.idx = j # parse next item else: break proc parseDefinitionList(p: var RstParser): PRstNode = result = nil var j = tokenAfterNewline(p) - 1 - if (j >= 1) and (p.tok[j].kind == tkIndent) and - (p.tok[j].ival > currInd(p)) and (p.tok[j - 1].symbol != "::"): - var col = p.tok[p.idx].col - result = newRstNode(rnDefList) + if j >= 1 and p.tok[j].kind == tkIndent and + p.tok[j].ival > currInd(p) and p.tok[j - 1].symbol != "::": + var col = currentTok(p).col + result = newRstNodeA(p, rnDefList) while true: + if isOptionList(p): + break # option list has priority over def.list j = p.idx var a = newRstNode(rnDefName) parseLine(p, a) - if (p.tok[p.idx].kind == tkIndent) and - (p.tok[p.idx].ival > currInd(p)) and - (p.tok[p.idx + 1].symbol != "::") and - not (p.tok[p.idx + 1].kind in {tkIndent, tkEof}): - pushInd(p, p.tok[p.idx].ival) + if currentTok(p).kind == tkIndent and + currentTok(p).ival > currInd(p) and + nextTok(p).symbol != "::" and + nextTok(p).kind notin {tkIndent, tkEof}: + pushInd(p, currentTok(p).ival) var b = newRstNode(rnDefBody) parseSection(p, b) var c = newRstNode(rnDefItem) - add(c, a) - add(c, b) - add(result, c) + c.add(a) + c.add(b) + result.add(c) popInd(p) else: p.idx = j break - if (p.tok[p.idx].kind == tkIndent) and (p.tok[p.idx].ival == col): - inc(p.idx) + if currentTok(p).kind == tkIndent and currentTok(p).ival == col: + inc p.idx j = tokenAfterNewline(p) - 1 if j >= 1 and p.tok[j].kind == tkIndent and p.tok[j].ival > col and p.tok[j-1].symbol != "::" and p.tok[j+1].kind != tkIndent: discard else: break - if len(result) == 0: result = nil + if result.len == 0: result = nil proc parseEnumList(p: var RstParser): PRstNode = const - wildcards: array[0..2, string] = ["(e) ", "e) ", "e. "] - wildpos: array[0..2, int] = [1, 0, 0] - result = nil + wildcards: array[0..5, string] = ["(n) ", "n) ", "n. ", + "(x) ", "x) ", "x. "] + # enumerator patterns, where 'x' means letter and 'n' means number + wildToken: array[0..5, int] = [4, 3, 3, 4, 3, 3] # number of tokens + wildIndex: array[0..5, int] = [1, 0, 0, 1, 0, 0] + # position of enumeration sequence (number/letter) in enumerator + let col = currentTok(p).col var w = 0 - while w <= 2: + while w < wildcards.len: if match(p, p.idx, wildcards[w]): break - inc(w) - if w <= 2: - var col = p.tok[p.idx].col - result = newRstNode(rnEnumList) - inc(p.idx, wildpos[w] + 3) - var j = tokenAfterNewline(p) - if (p.tok[j].col == p.tok[p.idx].col) or match(p, j, wildcards[w]): - pushInd(p, p.tok[p.idx].col) - while true: - var item = newRstNode(rnEnumItem) - parseSection(p, item) - add(result, item) - if (p.tok[p.idx].kind == tkIndent) and (p.tok[p.idx].ival == col) and - match(p, p.idx + 1, wildcards[w]): - inc(p.idx, wildpos[w] + 4) - else: + inc w + assert w < wildcards.len + + proc checkAfterNewline(p: RstParser, report: bool): bool = + ## If no indentation on the next line then parse as a normal paragraph + ## according to the RST spec. And report a warning with suggestions + let j = tokenAfterNewline(p, start=p.idx+1) + let requiredIndent = p.tok[p.idx+wildToken[w]].col + if p.tok[j].kind notin {tkIndent, tkEof} and + p.tok[j].col < requiredIndent and + (p.tok[j].col > col or + (p.tok[j].col == col and not match(p, j, wildcards[w]))): + if report: + let n = p.line + p.tok[j].line + let msg = "\n" & """ + not enough indentation on line $2 + (should be at column $3 if it's a continuation of enum. list), + or no blank line after line $1 (if it should be the next paragraph), + or no escaping \ at the beginning of line $1 + (if lines $1..$2 are a normal paragraph, not enum. list)""".dedent + let c = p.col + requiredIndent + ColRstOffset + rstMessage(p, mwRstStyle, msg % [$(n-1), $n, $c], + p.tok[j].line, p.tok[j].col) + result = false + else: + result = true + + if not checkAfterNewline(p, report = true): + return nil + result = newRstNodeA(p, rnEnumList) + let autoEnums = if roSupportMarkdown in p.s.options: @["#", "1"] else: @["#"] + var prevAE = "" # so as not allow mixing auto-enumerators `1` and `#` + var curEnum = 1 + for i in 0 ..< wildToken[w]-1: # add first enumerator with (, ), and . + if p.tok[p.idx + i].symbol == "#": + prevAE = "#" + result.labelFmt.add "1" + else: + result.labelFmt.add p.tok[p.idx + i].symbol + var prevEnum = p.tok[p.idx + wildIndex[w]].symbol + inc p.idx, wildToken[w] + while true: + var item = newRstNode(rnEnumItem) + pushInd(p, currentTok(p).col) + parseSection(p, item) + popInd(p) + result.add(item) + if currentTok(p).kind == tkIndent and currentTok(p).ival == col and + match(p, p.idx+1, wildcards[w]): + # don't report to avoid duplication of warning since for + # subsequent enum. items parseEnumList will be called second time: + if not checkAfterNewline(p, report = false): + break + let enumerator = p.tok[p.idx + 1 + wildIndex[w]].symbol + # check that it's in sequence: enumerator == next(prevEnum) + if "n" in wildcards[w]: # arabic numeral + let prevEnumI = try: parseInt(prevEnum) except ValueError: 1 + if enumerator in autoEnums: + if prevAE != "" and enumerator != prevAE: + break + prevAE = enumerator + curEnum = prevEnumI + 1 + else: curEnum = (try: parseInt(enumerator) except ValueError: 1) + if curEnum - prevEnumI != 1: break - popInd(p) + prevEnum = enumerator + else: # a..z + let prevEnumI = ord(prevEnum[0]) + if enumerator == "#": curEnum = prevEnumI + 1 + else: curEnum = ord(enumerator[0]) + if curEnum - prevEnumI != 1: + break + prevEnum = $chr(curEnum) + inc p.idx, 1 + wildToken[w] else: - dec(p.idx, wildpos[w] + 3) - result = nil + break + +proc prefix(ftnType: FootnoteType): string = + case ftnType + of fnManualNumber: result = "footnote-" + of fnAutoNumber: result = "footnoteauto-" + of fnAutoNumberLabel: result = "footnote-" + of fnAutoSymbol: result = "footnotesym-" + of fnCitation: result = "citation-" + +proc parseFootnote(p: var RstParser): PRstNode {.gcsafe.} = + ## Parses footnotes and citations, always returns 2 sons: + ## + ## 1) footnote label, always containing rnInner with 1 or more sons + ## 2) footnote body, which may be nil + var label: PRstNode + if isRst(p): + inc p.idx # skip space after `..` + label = parseFootnoteName(p, reference=false) + if label == nil: + if isRst(p): + dec p.idx + return nil + result = newRstNode(rnFootnote) + result.add label + let (fnType, i) = getFootnoteType(p.s, label) + var name = "" + var anchor = fnType.prefix + case fnType + of fnManualNumber: + addFootnoteNumManual(p, i) + anchor.add $i + of fnAutoNumber, fnAutoNumberLabel: + name = rstnodeToRefname(label) + addFootnoteNumAuto(p, name) + if fnType == fnAutoNumberLabel: + anchor.add name + else: # fnAutoNumber + result.order = p.s.lineFootnoteNum.len + anchor.add $result.order + of fnAutoSymbol: + addFootnoteSymAuto(p) + result.order = p.s.lineFootnoteSym.len + anchor.add $p.s.lineFootnoteSym.len + of fnCitation: + anchor.add rstnodeToRefname(label) + addAnchorRst(p, anchor, target = result, anchorType = footnoteAnchor) + result.anchor = anchor + if currentTok(p).kind == tkWhite: inc p.idx + discard parseBlockContent(p, result, parseSectionWrapper) + if result.len < 2: + result.add nil proc sonKind(father: PRstNode, i: int): RstNodeKind = result = rnLeaf - if i < len(father): result = father.sons[i].kind + if i < father.len: result = father.sons[i].kind proc parseSection(p: var RstParser, result: PRstNode) = + ## parse top-level RST elements: sections, transitions and body elements. while true: var leave = false assert(p.idx >= 0) - while p.tok[p.idx].kind == tkIndent: - if currInd(p) == p.tok[p.idx].ival: - inc(p.idx) - elif p.tok[p.idx].ival > currInd(p): - pushInd(p, p.tok[p.idx].ival) - var a = newRstNode(rnBlockQuote) - parseSection(p, a) - add(result, a) - popInd(p) + while currentTok(p).kind == tkIndent: + if currInd(p) == currentTok(p).ival: + inc p.idx + elif currentTok(p).ival > currInd(p): + if roPreferMarkdown in p.s.options: # Markdown => normal paragraphs + if currentTok(p).ival - currInd(p) >= 4: + result.add parseLiteralBlock(p) + else: + pushInd(p, currentTok(p).ival) + parseSection(p, result) + popInd(p) + else: # RST mode => block quotes + pushInd(p, currentTok(p).ival) + var a = newRstNodeA(p, rnBlockQuote) + parseSection(p, a) + result.add(a) + popInd(p) else: + while currentTok(p).kind != tkEof and nextTok(p).kind == tkIndent: + inc p.idx # skip blank lines leave = true break - if leave or p.tok[p.idx].kind == tkEof: break + if leave or currentTok(p).kind == tkEof: break var a: PRstNode = nil var k = whichSection(p) case k of rnLiteralBlock: - inc(p.idx) # skip '::' + inc p.idx # skip '::' a = parseLiteralBlock(p) of rnBulletList: a = parseBulletList(p) of rnLineBlock: a = parseLineBlock(p) + of rnMarkdownBlockQuote: a = parseMarkdownBlockQuote(p) of rnDirective: a = parseDotDot(p) + of rnFootnote: a = parseFootnote(p) of rnEnumList: a = parseEnumList(p) - of rnLeaf: rstMessage(p, meNewSectionExpected) + of rnLeaf: rstMessage(p, meNewSectionExpected, "(syntax error)") of rnParagraph: discard of rnDefList: a = parseDefinitionList(p) + of rnMdDefList: a = parseMdDefinitionList(p) of rnFieldList: - if p.idx > 0: dec(p.idx) + if p.idx > 0: dec p.idx a = parseFields(p) of rnTransition: a = parseTransition(p) - of rnHeadline: a = parseHeadline(p) + of rnHeadline, rnMarkdownHeadline: a = parseHeadline(p) of rnOverline: a = parseOverline(p) of rnTable: a = parseSimpleTable(p) + of rnMarkdownTable: a = parseMarkdownTable(p) of rnOptionList: a = parseOptionList(p) else: #InternalError("rst.parseSection()") discard if a == nil and k != rnDirective: - a = newRstNode(rnParagraph) + a = newRstNodeA(p, rnParagraph) parseParagraph(p, a) - addIfNotNil(result, a) + result.addIfNotNil(a) if sonKind(result, 0) == rnParagraph and sonKind(result, 1) != rnParagraph: - result.sons[0].kind = rnInner - -proc parseSectionWrapper(p: var RstParser): PRstNode = - result = newRstNode(rnInner) - parseSection(p, result) - while (result.kind == rnInner) and (len(result) == 1): - result = result.sons[0] - -proc `$`(t: Token): string = - result = $t.kind & ' ' & t.symbol + result.sons[0] = newRstNode(rnInner, result.sons[0].sons, + anchor=result.sons[0].anchor) proc parseDoc(p: var RstParser): PRstNode = result = parseSectionWrapper(p) - if p.tok[p.idx].kind != tkEof: - when false: - assert isAllocatedPtr(cast[pointer](p.tok)) - for i in 0 .. high(p.tok): - assert isNil(p.tok[i].symbol) or - isAllocatedPtr(cast[pointer](p.tok[i].symbol)) - echo "index: ", p.idx, " length: ", high(p.tok), "##", - p.tok[p.idx-1], p.tok[p.idx], p.tok[p.idx+1] - #assert isAllocatedPtr(cast[pointer](p.indentStack)) + if currentTok(p).kind != tkEof: rstMessage(p, meGeneralParseError) type DirFlag = enum hasArg, hasOptions, argIsFile, argIsWord DirFlags = set[DirFlag] - SectionParser = proc (p: var RstParser): PRstNode {.nimcall.} -proc parseDirective(p: var RstParser, flags: DirFlags): PRstNode = +proc parseDirective(p: var RstParser, k: RstNodeKind, flags: DirFlags): PRstNode = ## Parses arguments and options for a directive block. ## ## A directive block will always have three sons: the arguments for the - ## directive (rnDirArg), the options (rnFieldList) and the block - ## (rnLineBlock). This proc parses the two first nodes, the block is left to + ## directive (rnDirArg), the options (rnFieldList) and the directive + ## content block. This proc parses the two first nodes, the 3rd is left to ## the outer `parseDirective` call. ## ## Both rnDirArg and rnFieldList children nodes might be nil, so you need to ## check them before accessing. - result = newRstNode(rnDirective) + result = newRstNodeA(p, k) + if k == rnCodeBlock: result.info = lineInfo(p) var args: PRstNode = nil var options: PRstNode = nil if hasArg in flags: args = newRstNode(rnDirArg) if argIsFile in flags: while true: - case p.tok[p.idx].kind + case currentTok(p).kind of tkWord, tkOther, tkPunct, tkAdornment: - add(args, newLeaf(p)) - inc(p.idx) + args.add(newLeaf(p)) + inc p.idx else: break elif argIsWord in flags: - while p.tok[p.idx].kind == tkWhite: inc(p.idx) - if p.tok[p.idx].kind == tkWord: - add(args, newLeaf(p)) - inc(p.idx) + while currentTok(p).kind == tkWhite: inc p.idx + if currentTok(p).kind == tkWord: + args.add(newLeaf(p)) + inc p.idx else: args = nil else: parseLine(p, args) - add(result, args) + result.add(args) if hasOptions in flags: - if (p.tok[p.idx].kind == tkIndent) and (p.tok[p.idx].ival >= 3) and - (p.tok[p.idx + 1].symbol == ":"): + if currentTok(p).kind == tkIndent and currentTok(p).ival > currInd(p) and + nextTok(p).symbol == ":": + pushInd(p, currentTok(p).ival) options = parseFields(p) - add(result, options) - -proc indFollows(p: RstParser): bool = - result = p.tok[p.idx].kind == tkIndent and p.tok[p.idx].ival > currInd(p) + popInd(p) + result.add(options) -proc parseDirective(p: var RstParser, flags: DirFlags, +proc parseDirective(p: var RstParser, k: RstNodeKind, flags: DirFlags, contentParser: SectionParser): PRstNode = - ## Returns a generic rnDirective tree. + ## A helper proc that does main work for specific directive procs. + ## Always returns a generic rnDirective tree with these 3 children: ## - ## The children are rnDirArg, rnFieldList and rnLineBlock. Any might be nil. - result = parseDirective(p, flags) - if not isNil(contentParser) and indFollows(p): - pushInd(p, p.tok[p.idx].ival) - var content = contentParser(p) - popInd(p) - add(result, content) + ## 1) rnDirArg + ## 2) rnFieldList + ## 3) a node returned by `contentParser`. + ## + ## .. warning:: Any of the 3 children may be nil. + result = parseDirective(p, k, flags) + if not isNil(contentParser) and + parseBlockContent(p, result, contentParser): + discard "result is updated by parseBlockContent" else: - add(result, PRstNode(nil)) + result.add(PRstNode(nil)) proc parseDirBody(p: var RstParser, contentParser: SectionParser): PRstNode = if indFollows(p): - pushInd(p, p.tok[p.idx].ival) + pushInd(p, currentTok(p).ival) result = contentParser(p) popInd(p) @@ -1580,7 +3312,7 @@ proc dirInclude(p: var RstParser): PRstNode = # encoding (if specified). # result = nil - var n = parseDirective(p, {hasArg, argIsFile, hasOptions}, nil) + var n = parseDirective(p, rnDirective, {hasArg, argIsFile, hasOptions}, nil) var filename = strip(addNodes(n.sons[0])) var path = p.findRelativeFile(filename) if path == "": @@ -1589,15 +3321,15 @@ proc dirInclude(p: var RstParser): PRstNode = # XXX: error handling; recursive file inclusion! if getFieldValue(n, "literal") != "": result = newRstNode(rnLiteralBlock) - add(result, newRstNode(rnLeaf, readFile(path))) + result.add newLeaf(readFile(path)) else: - let inputString = readFile(path).string() + let inputString = readFile(path) let startPosition = block: let searchFor = n.getFieldValue("start-after").strip() if searchFor != "": let pos = inputString.find(searchFor) - if pos != -1: pos + searchFor.len() + if pos != -1: pos + searchFor.len else: 0 else: 0 @@ -1614,15 +3346,16 @@ proc dirInclude(p: var RstParser): PRstNode = var q: RstParser initParser(q, p.s) - q.filename = path - q.col += getTokens( - inputString[startPosition..endPosition].strip(), - false, + let saveFileIdx = p.s.currFileIdx + setCurrFilename(p.s, path) + getTokens( + inputString[startPosition..endPosition], q.tok) # workaround a GCC bug; more like the interior pointer bug? #if find(q.tok[high(q.tok)].symbol, "\0\x01\x02") > 0: # InternalError("Too many binary zeros in include file") result = parseDoc(q) + p.s.currFileIdx = saveFileIdx proc dirCodeBlock(p: var RstParser, nimExtension = false): PRstNode = ## Parses a code block. @@ -1640,57 +3373,54 @@ proc dirCodeBlock(p: var RstParser, nimExtension = false): PRstNode = ## ## As an extension this proc will process the ``file`` extension field and if ## present will replace the code block with the contents of the referenced - ## file. - result = parseDirective(p, {hasArg, hasOptions}, parseLiteralBlock) - var filename = strip(getFieldValue(result, "file")) - if filename != "": - var path = p.findRelativeFile(filename) - if path == "": rstMessage(p, meCannotOpenFile, filename) - var n = newRstNode(rnLiteralBlock) - add(n, newRstNode(rnLeaf, readFile(path))) - result.sons[2] = n + ## file. This behaviour is disabled in sandboxed mode and can be re-enabled + ## with the `roSandboxDisabled` flag. + result = parseDirective(p, rnCodeBlock, {hasArg, hasOptions}, parseLiteralBlock) + mayLoadFile(p, result) - # Extend the field block if we are using our custom extension. + # Extend the field block if we are using our custom Nim extension. if nimExtension: - # Create a field block if the input block didn't have any. - if result.sons[1].isNil: result.sons[1] = newRstNode(rnFieldList) - assert result.sons[1].kind == rnFieldList - # Hook the extra field and specify the Nim language as value. - var extraNode = newRstNode(rnField) - extraNode.add(newRstNode(rnFieldName)) - extraNode.add(newRstNode(rnFieldBody)) - extraNode.sons[0].add(newRstNode(rnLeaf, "default-language")) - extraNode.sons[1].add(newRstNode(rnLeaf, "Nim")) - result.sons[1].add(extraNode) - - result.kind = rnCodeBlock + defaultCodeLangNim(p, result) proc dirContainer(p: var RstParser): PRstNode = - result = parseDirective(p, {hasArg}, parseSectionWrapper) - assert(result.kind == rnDirective) - assert(len(result) == 3) - result.kind = rnContainer + result = parseDirective(p, rnContainer, {hasArg}, parseSectionWrapper) + assert(result.len == 3) proc dirImage(p: var RstParser): PRstNode = - result = parseDirective(p, {hasOptions, hasArg, argIsFile}, nil) - result.kind = rnImage + result = parseDirective(p, rnImage, {hasOptions, hasArg, argIsFile}, nil) proc dirFigure(p: var RstParser): PRstNode = - result = parseDirective(p, {hasOptions, hasArg, argIsFile}, + result = parseDirective(p, rnFigure, {hasOptions, hasArg, argIsFile}, parseSectionWrapper) - result.kind = rnFigure proc dirTitle(p: var RstParser): PRstNode = - result = parseDirective(p, {hasArg}, nil) - result.kind = rnTitle + result = parseDirective(p, rnTitle, {hasArg}, nil) proc dirContents(p: var RstParser): PRstNode = - result = parseDirective(p, {hasArg}, nil) - result.kind = rnContents + result = parseDirective(p, rnContents, {hasArg}, nil) + p.s.hasToc = true proc dirIndex(p: var RstParser): PRstNode = - result = parseDirective(p, {}, parseSectionWrapper) - result.kind = rnIndex + result = parseDirective(p, rnIndex, {}, parseSectionWrapper) + +proc dirAdmonition(p: var RstParser, d: string): PRstNode = + result = parseDirective(p, rnAdmonition, {}, parseSectionWrapper) + result.adType = d + +proc dirDefaultRole(p: var RstParser): PRstNode = + result = parseDirective(p, rnDefaultRole, {hasArg}, nil) + if result.sons[0].len == 0: p.s.currRole = defaultRole(p.s.options) + else: + assert result.sons[0].sons[0].kind == rnLeaf + p.s.currRole = result.sons[0].sons[0].text + p.s.currRoleKind = whichRole(p, p.s.currRole) + +proc dirRole(p: var RstParser): PRstNode = + result = parseDirective(p, rnDirective, {hasArg, hasOptions}, nil) + # just check that language is supported, TODO: real role association + let lang = getFieldValue(result, "language").strip + if lang != "" and getSourceLanguage(lang) == langNone: + rstMessage(p, mwUnsupportedLanguage, lang) proc dirRawAux(p: var RstParser, result: var PRstNode, kind: RstNodeKind, contentParser: SectionParser) = @@ -1702,10 +3432,10 @@ proc dirRawAux(p: var RstParser, result: var PRstNode, kind: RstNodeKind, else: var f = readFile(path) result = newRstNode(kind) - add(result, newRstNode(rnLeaf, f)) + result.add newLeaf(f) else: - result.kind = kind - add(result, parseDirBody(p, contentParser)) + result = newRstNode(kind, result.sons) + result.add(parseDirBody(p, contentParser)) proc dirRaw(p: var RstParser): PRstNode = # @@ -1716,7 +3446,7 @@ proc dirRaw(p: var RstParser): PRstNode = # # html # latex - result = parseDirective(p, {hasOptions, hasArg, argIsWord}) + result = parseDirective(p, rnDirective, {hasOptions, hasArg, argIsWord}) if result.sons[0] != nil: if cmpIgnoreCase(result.sons[0].sons[0].text, "html") == 0: dirRawAux(p, result, rnRawHtml, parseLiteralBlock) @@ -1727,99 +3457,430 @@ proc dirRaw(p: var RstParser): PRstNode = else: dirRawAux(p, result, rnRaw, parseSectionWrapper) +proc dirImportdoc(p: var RstParser): PRstNode = + result = parseDirective(p, rnDirective, {}, parseLiteralBlock) + assert result.sons[2].kind == rnLiteralBlock + assert result.sons[2].sons[0].kind == rnLeaf + let filenames: seq[string] = split(result.sons[2].sons[0].text, seps = {','}) + proc rmSpaces(s: string): string = s.split.join("") + for origFilename in filenames: + p.s.idxImports[origFilename.rmSpaces] = ImportdocInfo(fromInfo: lineInfo(p)) + +proc selectDir(p: var RstParser, d: string): PRstNode = + result = nil + let tok = p.tok[p.idx-2] # report on directive in ".. directive::" + if roSandboxDisabled notin p.s.options: + if d notin SandboxDirAllowlist: + rstMessage(p, meSandboxedDirective, d, tok.line, tok.col) + + case d + of "admonition", "attention", "caution": result = dirAdmonition(p, d) + of "code": result = dirCodeBlock(p) + of "code-block": result = dirCodeBlock(p, nimExtension = true) + of "container": result = dirContainer(p) + of "contents": result = dirContents(p) + of "danger": result = dirAdmonition(p, d) + of "default-role": result = dirDefaultRole(p) + of "error": result = dirAdmonition(p, d) + of "figure": result = dirFigure(p) + of "hint": result = dirAdmonition(p, d) + of "image": result = dirImage(p) + of "important": result = dirAdmonition(p, d) + of "importdoc": result = dirImportdoc(p) + of "include": result = dirInclude(p) + of "index": result = dirIndex(p) + of "note": result = dirAdmonition(p, d) + of "raw": + if roSupportRawDirective in p.s.options: + result = dirRaw(p) + else: + rstMessage(p, meInvalidDirective, d) + of "role": result = dirRole(p) + of "tip": result = dirAdmonition(p, d) + of "title": result = dirTitle(p) + of "warning": result = dirAdmonition(p, d) + else: + rstMessage(p, meInvalidDirective, d, tok.line, tok.col) + proc parseDotDot(p: var RstParser): PRstNode = + # parse "explicit markup blocks" result = nil - var col = p.tok[p.idx].col - inc(p.idx) + var n: PRstNode # to store result, workaround for bug 16855 + var col = currentTok(p).col + inc p.idx var d = getDirective(p) if d != "": pushInd(p, col) - case getDirKind(d) - of dkInclude: result = dirInclude(p) - of dkImage: result = dirImage(p) - of dkFigure: result = dirFigure(p) - of dkTitle: result = dirTitle(p) - of dkContainer: result = dirContainer(p) - of dkContents: result = dirContents(p) - of dkRaw: - if roSupportRawDirective in p.s.options: - result = dirRaw(p) - else: - rstMessage(p, meInvalidDirective, d) - of dkCode: result = dirCodeBlock(p) - of dkCodeBlock: result = dirCodeBlock(p, nimExtension = true) - of dkIndex: result = dirIndex(p) - else: rstMessage(p, meInvalidDirective, d) + result = selectDir(p, d) popInd(p) elif match(p, p.idx, " _"): # hyperlink target: - inc(p.idx, 2) - var a = getReferenceName(p, ":") - if p.tok[p.idx].kind == tkWhite: inc(p.idx) + inc p.idx, 2 + var ending = ":" + if currentTok(p).symbol == "`": + inc p.idx + ending = "`" + var a = getReferenceName(p, ending) + if ending == "`": + if currentTok(p).symbol == ":": + inc p.idx + else: + rstMessage(p, meExpected, ":") + if currentTok(p).kind == tkWhite: inc p.idx var b = untilEol(p) - setRef(p, rstnodeToRefname(a), b) + if len(b) == 0: # set internal anchor + p.curAnchors.add ManualAnchor( + alias: linkName(a), anchor: rstnodeToRefname(a), info: prevLineInfo(p) + ) + else: # external hyperlink + setRef(p, rstnodeToRefname(a), b, refType=hyperlinkAlias) elif match(p, p.idx, " |"): # substitution definitions: - inc(p.idx, 2) + inc p.idx, 2 var a = getReferenceName(p, "|") var b: PRstNode - if p.tok[p.idx].kind == tkWhite: inc(p.idx) - if cmpIgnoreStyle(p.tok[p.idx].symbol, "replace") == 0: - inc(p.idx) + if currentTok(p).kind == tkWhite: inc p.idx + if cmpIgnoreStyle(currentTok(p).symbol, "replace") == 0: + inc p.idx expect(p, "::") b = untilEol(p) - elif cmpIgnoreStyle(p.tok[p.idx].symbol, "image") == 0: - inc(p.idx) + elif cmpIgnoreStyle(currentTok(p).symbol, "image") == 0: + inc p.idx b = dirImage(p) else: - rstMessage(p, meInvalidDirective, p.tok[p.idx].symbol) + rstMessage(p, meInvalidDirective, currentTok(p).symbol) setSub(p, addNodes(a), b) - elif match(p, p.idx, " ["): - # footnotes, citations - inc(p.idx, 2) - var a = getReferenceName(p, "]") - if p.tok[p.idx].kind == tkWhite: inc(p.idx) - var b = untilEol(p) - setRef(p, rstnodeToRefname(a), b) + elif match(p, p.idx, " [") and + (n = parseFootnote(p); n != nil): + result = n else: - result = parseComment(p) - -proc resolveSubs(p: var RstParser, n: PRstNode): PRstNode = + result = parseComment(p, col) + +proc rstParsePass1*(fragment: string, + line, column: int, + sharedState: PRstSharedState): PRstNode = + ## Parses an RST `fragment`. + ## The result should be further processed by + ## preparePass2_ and resolveSubs_ (which is pass 2). + var p: RstParser + initParser(p, sharedState) + p.line = line + p.col = column + getTokens(fragment, p.tok) + result = parseDoc(p) + +proc extractLinkEnd(x: string): string = + ## From links like `path/to/file.html#/%` extract `file.html#/%`. + let i = find(x, '#') + let last = + if i >= 0: i + else: x.len - 1 + let j = rfind(x, '/', start=0, last=last) + if j >= 0: + result = x[j+1 .. ^1] + else: + result = x + +proc loadIdxFile(s: var PRstSharedState, origFilename: string) = + doAssert roSandboxDisabled in s.options + var info: TLineInfo + info.fileIndex = addFilename(s, origFilename) + var (dir, basename, ext) = origFilename.splitFile + if ext notin [".md", ".rst", ".nim", ""]: + rstMessage(s.filenames, s.msgHandler, s.idxImports[origFilename].fromInfo, + meCannotOpenFile, origFilename & ": unknown extension") + let idxFilename = dir / basename & ".idx" + let (idxPath, linkRelPath) = s.findRefFile(idxFilename) + s.idxImports[origFilename].linkRelPath = linkRelPath + var + fileEntries: seq[IndexEntry] + title: IndexEntry + try: + (fileEntries, title) = parseIdxFile(idxPath) + except IOError: + rstMessage(s.filenames, s.msgHandler, s.idxImports[origFilename].fromInfo, + meCannotOpenFile, idxPath) + except ValueError as e: + s.msgHandler(idxPath, LineRstInit, ColRstInit, meInvalidField, e.msg) + + var isMarkup = false # for sanity check to avoid mixing .md <-> .nim + for entry in fileEntries: + # Though target .idx already has inside it the path to HTML relative + # project's root, we won't rely on it and use `linkRelPath` instead. + let refn = extractLinkEnd(entry.link) + # select either markup (rst/md) or Nim cases: + if entry.kind in {ieMarkupTitle, ieNimTitle}: + s.idxImports[origFilename].title = entry.keyword + case entry.kind + of ieIdxRole, ieHeading, ieMarkupTitle: + if ext == ".nim" and entry.kind == ieMarkupTitle: + rstMessage(s, idxPath, meInvalidField, + $ieMarkupTitle & " in supposedly .nim-derived file") + if entry.kind == ieMarkupTitle: + isMarkup = true + info.line = entry.line.uint16 + addAnchorExtRst(s, key = entry.keyword, refn = refn, + anchorType = headlineAnchor, info=info) + of ieNim, ieNimGroup, ieNimTitle: + if ext in [".md", ".rst"] or isMarkup: + rstMessage(s, idxPath, meInvalidField, + $entry.kind & " in supposedly markup-derived file") + s.nimFileImported = true + var langSym: LangSymbol + if entry.kind in {ieNim, ieNimTitle}: + var q: RstParser + initParser(q, s) + info.line = entry.line.uint16 + setLen(q.tok, 0) + q.idx = 0 + getTokens(entry.linkTitle, q.tok) + var sons = newSeq[PRstNode](q.tok.len) + for i in 0 ..< q.tok.len: sons[i] = newLeaf(q.tok[i].symbol) + let linkTitle = newRstNode(rnInner, sons) + langSym = linkTitle.toLangSymbol + else: # entry.kind == ieNimGroup + langSym = langSymbolGroup(kind=entry.linkTitle, name=entry.keyword) + addAnchorNim(s, external = true, refn = refn, tooltip = entry.linkDesc, + langSym = langSym, priority = -4, # lowest + info = info, module = info.fileIndex) + doAssert s.idxImports[origFilename].title != "" + +proc preparePass2*(s: var PRstSharedState, mainNode: PRstNode, importdoc = true) = + ## Records titles in node `mainNode` and orders footnotes. + countTitles(s, mainNode) + fixHeadlines(s) + orderFootnotes(s) + if importdoc: + for origFilename in s.idxImports.keys: + loadIdxFile(s, origFilename) + +proc resolveLink(s: PRstSharedState, n: PRstNode) : PRstNode = + # Associate this link alias with its target and change node kind to + # rnHyperlink or rnInternalRef appropriately. + var desc, alias: PRstNode + if n.kind == rnPandocRef: # link like [desc][alias] + desc = n.sons[0] + alias = n.sons[1] + else: # n.kind == rnRstRef, link like `desc=alias`_ + desc = n + alias = n + type LinkDef = object + ar: AnchorRule + priority: int + tooltip: string + target: PRstNode + info: TLineInfo + externFilename: string + # when external anchor: origin filename where anchor was defined + isTitle: bool + proc cmp(x, y: LinkDef): int = + result = cmp(x.priority, y.priority) + if result == 0: + result = cmp(x.target, y.target) + var foundLinks: seq[LinkDef] + let refn = rstnodeToRefname(alias) + var hyperlinks = findRef(s, refn) + for y in hyperlinks: + foundLinks.add LinkDef(ar: arHyperlink, priority: refPriority(y.kind), + target: y.value, info: y.info, + tooltip: "(" & $y.kind & ")") + let substRst = findMainAnchorRst(s, alias.addNodes, n.info) + template getExternFilename(subst: AnchorSubst): string = + if subst.kind == arExternalRst or + (subst.kind == arNim and subst.external): + getFilename(s, subst) + else: "" + for subst in substRst: + var refname, fullRefname: string + if subst.kind == arInternalRst: + refname = subst.target.anchor + fullRefname = refname + else: # arExternalRst + refname = subst.refnameExt + fullRefname = s.idxImports[getFilename(s, subst)].linkRelPath & + "/" & refname + let anchorType = + if subst.kind == arInternalRst: subst.anchorType + else: subst.anchorTypeExt # arExternalRst + foundLinks.add LinkDef(ar: subst.kind, priority: subst.priority, + target: newLeaf(fullRefname), + info: subst.info, + externFilename: getExternFilename(subst), + isTitle: isDocumentationTitle(refname), + tooltip: "(" & $anchorType & ")") + # find anchors automatically generated from Nim symbols + if roNimFile in s.options or s.nimFileImported: + let substNim = findMainAnchorNim(s, signature=alias, n.info) + for subst in substNim: + let fullRefname = + if subst.external: + s.idxImports[getFilename(s, subst)].linkRelPath & + "/" & subst.refname + else: subst.refname + foundLinks.add LinkDef(ar: subst.kind, priority: subst.priority, + target: newLeaf(fullRefname), + externFilename: getExternFilename(subst), + isTitle: isDocumentationTitle(subst.refname), + info: subst.info, tooltip: subst.tooltip) + foundLinks.sort(cmp = cmp, order = Descending) + let aliasStr = addNodes(alias) + if foundLinks.len >= 1: + if foundLinks[0].externFilename != "": + s.idxImports[foundLinks[0].externFilename].used = true + let kind = if foundLinks[0].ar in {arHyperlink, arExternalRst}: rnHyperlink + elif foundLinks[0].ar == arNim: + if foundLinks[0].externFilename == "": rnNimdocRef + else: rnHyperlink + else: rnInternalRef + result = newRstNode(kind) + let documentName = # filename without ext for `.nim`, title for `.md` + if foundLinks[0].ar == arNim: + changeFileExt(foundLinks[0].externFilename.extractFilename, "") + elif foundLinks[0].externFilename != "": + s.idxImports[foundLinks[0].externFilename].title + else: foundLinks[0].externFilename.extractFilename + let linkText = + if foundLinks[0].externFilename != "": + if foundLinks[0].isTitle: newLeaf(addNodes(desc)) + else: newLeaf(documentName & ": " & addNodes(desc)) + else: + newRstNode(rnInner, desc.sons) + result.sons = @[linkText, foundLinks[0].target] + if kind == rnNimdocRef: result.tooltip = foundLinks[0].tooltip + if foundLinks.len > 1: # report ambiguous link + var targets = newSeq[string]() + for l in foundLinks: + var t = " " + if s.filenames.len > 1: + t.add getFilename(s.filenames, l.info.fileIndex) + let n = l.info.line + let c = l.info.col + ColRstOffset + t.add "($1, $2): $3" % [$n, $c, l.tooltip] + targets.add t + rstMessage(s.filenames, s.msgHandler, n.info, mwAmbiguousLink, + "`$1`\n clash:\n$2" % [ + aliasStr, targets.join("\n")]) + else: # nothing found + result = n + rstMessage(s.filenames, s.msgHandler, n.info, mwBrokenLink, aliasStr) + +proc resolveSubs*(s: PRstSharedState, n: PRstNode): PRstNode = + ## Makes pass 2 of RST parsing. + ## Resolves substitutions and anchor aliases, groups footnotes. + ## Takes input node `n` and returns the same node with recursive + ## substitutions in `n.sons` to `result`. result = n if n == nil: return case n.kind of rnSubstitutionReferences: - var x = findSub(p, n) + var x = findSub(s, n) if x >= 0: - result = p.s.subs[x].value + result = s.subs[x].value else: var key = addNodes(n) var e = getEnv(key) - if e != "": result = newRstNode(rnLeaf, e) - else: rstMessage(p, mwUnknownSubstitution, key) - of rnRef: - var y = findRef(p, rstnodeToRefname(n)) - if y != nil: - result = newRstNode(rnHyperlink) - n.kind = rnInner - add(result, n) - add(result, y) + if e != "": result = newLeaf(e) + else: rstMessage(s.filenames, s.msgHandler, n.info, + mwUnknownSubstitution, key) + of rnRstRef, rnPandocRef: + result = resolveLink(s, n) + of rnFootnote: + var (fnType, num) = getFootnoteType(s, n.sons[0]) + case fnType + of fnManualNumber, fnCitation: + discard "no need to alter fixed text" + of fnAutoNumberLabel, fnAutoNumber: + if fnType == fnAutoNumberLabel: + let labelR = rstnodeToRefname(n.sons[0]) + num = getFootnoteNum(s, labelR) + else: + num = getFootnoteNum(s, n.order) + var nn = newRstNode(rnInner) + nn.add newLeaf($num) + result.sons[0] = nn + of fnAutoSymbol: + let sym = getAutoSymbol(s, n.order) + n.sons[0].sons[0].text = sym + n.sons[1] = resolveSubs(s, n.sons[1]) + of rnFootnoteRef: + var (fnType, num) = getFootnoteType(s, n.sons[0]) + template addLabel(number: int | string) = + var nn = newRstNode(rnInner) + nn.add newLeaf($number) + result.add(nn) + var refn = fnType.prefix + # create new rnFootnoteRef, add final label, and finalize target refn: + result = newRstNode(rnFootnoteRef, info = n.info) + case fnType + of fnManualNumber: + addLabel num + refn.add $num + of fnAutoNumber: + inc s.currFootnoteNumRef + addLabel getFootnoteNum(s, s.currFootnoteNumRef) + refn.add $s.currFootnoteNumRef + of fnAutoNumberLabel: + addLabel getFootnoteNum(s, rstnodeToRefname(n)) + refn.add rstnodeToRefname(n) + of fnAutoSymbol: + inc s.currFootnoteSymRef + addLabel getAutoSymbol(s, s.currFootnoteSymRef) + refn.add $s.currFootnoteSymRef + of fnCitation: + result.add n.sons[0] + refn.add rstnodeToRefname(n) + # TODO: correctly report ambiguities + let anchorInfo = findMainAnchorRst(s, refn, n.info) + if anchorInfo.len != 0: + result.add newLeaf(anchorInfo[0].target.anchor) # add link + else: + rstMessage(s.filenames, s.msgHandler, n.info, mwBrokenLink, refn) + result.add newLeaf(refn) # add link of rnLeaf: discard - of rnContents: - p.hasToc = true else: - for i in countup(0, len(n) - 1): n.sons[i] = resolveSubs(p, n.sons[i]) + var regroup = false + for i in 0 ..< n.len: + n.sons[i] = resolveSubs(s, n.sons[i]) + if n.sons[i] != nil and n.sons[i].kind == rnFootnote: + regroup = true + if regroup: # group footnotes together into rnFootnoteGroup + var newSons: seq[PRstNode] + var i = 0 + while i < n.len: + if n.sons[i] != nil and n.sons[i].kind == rnFootnote: + var grp = newRstNode(rnFootnoteGroup) + while i < n.len and n.sons[i].kind == rnFootnote: + grp.sons.add n.sons[i] + inc i + newSons.add grp + else: + newSons.add n.sons[i] + inc i + result.sons = newSons + +proc completePass2*(s: PRstSharedState) = + for (filename, importdocInfo) in s.idxImports.pairs: + if not importdocInfo.used: + rstMessage(s.filenames, s.msgHandler, importdocInfo.fromInfo, + mwUnusedImportdoc, filename) proc rstParse*(text, filename: string, - line, column: int, hasToc: var bool, + line, column: int, options: RstParseOptions, findFile: FindFileHandler = nil, - msgHandler: MsgHandler = nil): PRstNode = - var p: RstParser - initParser(p, newSharedState(options, findFile, msgHandler)) - p.filename = filename - p.line = line - p.col = column + getTokens(text, roSkipPounds in options, p.tok) - result = resolveSubs(p, parseDoc(p)) - hasToc = p.hasToc + findRefFile: FindRefFileHandler = nil, + msgHandler: MsgHandler = nil): + tuple[node: PRstNode, filenames: RstFileTable, hasToc: bool] = + ## Parses the whole `text`. The result is ready for `rstgen.renderRstToOut`, + ## note that 2nd tuple element should be fed to `initRstGenerator` + ## argument `filenames` (it is being filled here at least with `filename` + ## and possibly with other files from RST ``.. include::`` statement). + var sharedState = newRstSharedState(options, filename, findFile, findRefFile, + msgHandler, hasToc=false) + let unresolved = rstParsePass1(text, line, column, sharedState) + preparePass2(sharedState, unresolved) + result.node = resolveSubs(sharedState, unresolved) + completePass2(sharedState) + result.filenames = sharedState.filenames + result.hasToc = sharedState.hasToc diff --git a/lib/packages/docutils/rstast.nim b/lib/packages/docutils/rstast.nim index 044ea2c14..2bbb0d0b8 100644 --- a/lib/packages/docutils/rstast.nim +++ b/lib/packages/docutils/rstast.nim @@ -8,23 +8,26 @@ # ## This module implements an AST for the `reStructuredText`:idx: parser. -## -## **Note:** Import ``packages/docutils/rstast`` to use this module -import strutils, json +import std/[strutils, json] + +when defined(nimPreviewSlimSystem): + import std/assertions + type RstNodeKind* = enum ## the possible node kinds of an PRstNode rnInner, # an inner node or a root rnHeadline, # a headline rnOverline, # an over- and underlined headline + rnMarkdownHeadline, # a Markdown headline rnTransition, # a transition (the ------------- <hr> thingie) rnParagraph, # a paragraph rnBulletList, # a bullet list rnBulletItem, # a bullet item rnEnumList, # an enumerated list rnEnumItem, # an enumerated item - rnDefList, # a definition list + rnDefList, rnMdDefList, # a definition list (RST/Markdown) rnDefItem, # an item of a definition list consisting of ... rnDefName, # ... a name part ... rnDefBody, # ... and a body part ... @@ -33,16 +36,28 @@ type rnFieldName, # consisting of a field name ... rnFieldBody, # ... and a field body rnOptionList, rnOptionListItem, rnOptionGroup, rnOption, rnOptionString, - rnOptionArgument, rnDescription, rnLiteralBlock, rnQuotedLiteralBlock, + rnOptionArgument, rnDescription, rnLiteralBlock, + rnMarkdownBlockQuote, # a quote starting from punctuation like >>> + rnMarkdownBlockQuoteItem, # a quotation block, quote lines starting with + # the same number of chars rnLineBlock, # the | thingie - rnLineBlockItem, # sons of the | thing + rnLineBlockItem, # a son of rnLineBlock - one line inside it. + # When `RstNode` lineIndent="\n" the line's empty rnBlockQuote, # text just indented - rnTable, rnGridTable, rnTableRow, rnTableHeaderCell, rnTableDataCell, - rnLabel, # used for footnotes and other things + rnTable, rnGridTable, rnMarkdownTable, rnTableRow, rnTableHeaderCell, rnTableDataCell, rnFootnote, # a footnote - rnCitation, # similar to footnote - rnStandaloneHyperlink, rnHyperlink, rnRef, rnDirective, # a directive - rnDirArg, rnRaw, rnTitle, rnContents, rnImage, rnFigure, rnCodeBlock, + rnCitation, # similar to footnote, so use rnFootnote instead + rnFootnoteGroup, # footnote group - exists for a purely stylistic + # reason: to display a few footnotes as 1 block + rnStandaloneHyperlink, rnHyperlink, + rnRstRef, # RST reference like `section name`_ + rnPandocRef, # Pandoc Markdown reference like [section name] + rnInternalRef, rnFootnoteRef, + rnNimdocRef, # reference to automatically generated Nim symbol + rnDirective, # a general directive + rnDirArg, # a directive argument (for some directives). + # here are directives that are not rnDirective: + rnRaw, rnTitle, rnContents, rnImage, rnFigure, rnCodeBlock, rnAdmonition, rnRawHtml, rnRawLatex, rnContainer, # ``container`` directive rnIndex, # index directve: @@ -51,40 +66,94 @@ type # * `file#id <file#id>`_ # * `file#id <file#id>'_ rnSubstitutionDef, # a definition of a substitution - rnGeneralRole, # Inline markup: + # Inline markup: + rnInlineCode, # interpreted text with code in a known language + rnCodeFragment, # inline code for highlighting with the specified + # class (which cannot be inferred from context) + rnUnknownRole, # interpreted text with an unknown role rnSub, rnSup, rnIdx, rnEmphasis, # "*" rnStrongEmphasis, # "**" rnTripleEmphasis, # "***" - rnInterpretedText, # "`" + rnInterpretedText, # "`" an auxiliary role for parsing that will + # be converted into other kinds like rnInlineCode rnInlineLiteral, # "``" + rnInlineTarget, # "_`target`" rnSubstitutionReferences, # "|" rnSmiley, # some smiley + rnDefaultRole, # .. default-role:: code rnLeaf # a leaf; the node's text field contains the # leaf val + FileIndex* = distinct int32 + TLineInfo* = object + line*: uint16 + col*: int16 + fileIndex*: FileIndex PRstNode* = ref RstNode ## an RST node RstNodeSeq* = seq[PRstNode] - RstNode* {.acyclic, final.} = object ## an RST node's description - kind*: RstNodeKind ## the node's kind - text*: string ## valid for leafs in the AST; and the title of - ## the document or the section - level*: int ## valid for some node kinds + RstNode* {.acyclic, final.} = object ## AST node (result of RST parsing) + case kind*: RstNodeKind ## the node's kind + of rnLeaf, rnSmiley: + text*: string ## string that is expected to be displayed + of rnEnumList: + labelFmt*: string ## label format like "(1)" + of rnLineBlockItem: + lineIndent*: string ## a few spaces or newline at the line beginning + of rnAdmonition: + adType*: string ## admonition type: "note", "caution", etc. This + ## text will set the style and also be displayed + of rnOverline, rnHeadline, rnMarkdownHeadline: + level*: int ## level of headings starting from 1 (main + ## chapter) to larger ones (minor sub-sections) + ## level=0 means it's document title or subtitle + of rnFootnote, rnCitation, rnOptionListItem: + order*: int ## footnote order (for auto-symbol footnotes and + ## auto-numbered ones without a label) + of rnMarkdownBlockQuoteItem: + quotationDepth*: int ## number of characters in line prefix + of rnRstRef, rnPandocRef, rnSubstitutionReferences, + rnInterpretedText, rnField, rnInlineCode, rnCodeBlock, rnFootnoteRef: + info*: TLineInfo ## To have line/column info for warnings at + ## nodes that are post-processed after parsing + of rnNimdocRef: + tooltip*: string + of rnTable, rnGridTable, rnMarkdownTable: + colCount*: int ## Number of (not-united) cells in the table + of rnTableRow: + endsHeader*: bool ## Is last row in the header of table? + of rnTableHeaderCell, rnTableDataCell: + span*: int ## Number of table columns that the cell occupies + else: + discard + anchor*: string ## anchor, internal link target + ## (aka HTML id tag, aka Latex label/hypertarget) sons*: RstNodeSeq ## the node's sons +proc `==`*(a, b: FileIndex): bool {.borrow.} + proc len*(n: PRstNode): int = result = len(n.sons) -proc newRstNode*(kind: RstNodeKind): PRstNode = - new(result) - result.sons = @[] - result.kind = kind +proc newRstNode*(kind: RstNodeKind, sons: seq[PRstNode] = @[], + anchor = ""): PRstNode = + result = PRstNode(kind: kind, sons: sons, anchor: anchor) + +proc newRstNode*(kind: RstNodeKind, info: TLineInfo, + sons: seq[PRstNode] = @[]): PRstNode = + result = PRstNode(kind: kind, sons: sons) + result.info = info -proc newRstNode*(kind: RstNodeKind, s: string): PRstNode = +proc newRstNode*(kind: RstNodeKind, s: string): PRstNode {.deprecated.} = + assert kind in {rnLeaf, rnSmiley} result = newRstNode(kind) result.text = s +proc newRstLeaf*(s: string): PRstNode = + result = newRstNode(rnLeaf) + result.text = s + proc lastSon*(n: PRstNode): PRstNode = result = n.sons[len(n.sons)-1] @@ -92,7 +161,7 @@ proc add*(father, son: PRstNode) = add(father.sons, son) proc add*(father: PRstNode; s: string) = - add(father.sons, newRstNode(rnLeaf, s)) + add(father.sons, newRstLeaf(s)) proc addIfNotNil*(father, son: PRstNode) = if son != nil: add(father, son) @@ -215,7 +284,7 @@ proc renderRstToRst(d: var RenderContext, n: PRstNode, result: var string) = inc(d.indent, 2) renderRstSons(d, n, result) dec(d.indent, 2) - of rnRef: + of rnRstRef: result.add("`") renderRstSons(d, n, result) result.add("`_") @@ -225,7 +294,7 @@ proc renderRstToRst(d: var RenderContext, n: PRstNode, result: var string) = result.add(" <") renderRstToRst(d, n.sons[1], result) result.add(">`_") - of rnGeneralRole: + of rnUnknownRole: result.add('`') renderRstToRst(d, n.sons[0],result) result.add("`:") @@ -298,7 +367,7 @@ proc renderRstToJsonNode(node: PRstNode): JsonNode = (key: "kind", val: %($node.kind)), (key: "level", val: %BiggestInt(node.level)) ] - if node.text.len > 0: + if node.kind in {rnLeaf, rnSmiley} and node.text.len > 0: result.add("text", %node.text) if len(node.sons) > 0: var accm = newSeq[JsonNode](len(node.sons)) @@ -308,11 +377,68 @@ proc renderRstToJsonNode(node: PRstNode): JsonNode = proc renderRstToJson*(node: PRstNode): string = ## Writes the given RST node as JSON that is in the form - ## :: - ## { - ## "kind":string node.kind, - ## "text":optional string node.text, - ## "level":optional int node.level, - ## "sons":optional node array - ## } + ## + ## { + ## "kind":string node.kind, + ## "text":optional string node.text, + ## "level":optional int node.level, + ## "sons":optional node array + ## } renderRstToJsonNode(node).pretty + +proc renderRstToText*(node: PRstNode): string = + ## minimal text representation of markup node + const code = {rnCodeFragment, rnInterpretedText, rnInlineLiteral, rnInlineCode} + if node == nil: + return "" + case node.kind + of rnLeaf, rnSmiley: + result.add node.text + else: + if node.kind in code: result.add "`" + for i in 0 ..< node.sons.len: + if node.kind in {rnInlineCode, rnCodeBlock} and i == 0: + continue # omit language specifier + result.add renderRstToText(node.sons[i]) + if node.kind in code: result.add "`" + +proc treeRepr*(node: PRstNode, indent=0): string = + ## Writes the parsed RST `node` into an AST tree with compact string + ## representation in the format (one line per every sub-node): + ## ``indent - kind - [text|level|order|adType] - anchor (if non-zero)`` + ## (suitable for debugging of RST parsing). + if node == nil: + result.add " ".repeat(indent) & "[nil]\n" + return + result.add " ".repeat(indent) & $node.kind + case node.kind + of rnLeaf, rnSmiley: + result.add (if node.text == "": "" else: " '" & node.text & "'") + of rnEnumList: + result.add " labelFmt=" & node.labelFmt + of rnLineBlockItem: + var txt: string + if node.lineIndent == "\n": txt = " (blank line)" + else: txt = " lineIndent=" & $node.lineIndent.len + result.add txt + of rnAdmonition: + result.add " adType=" & node.adType + of rnHeadline, rnOverline, rnMarkdownHeadline: + result.add " level=" & $node.level + of rnFootnote, rnCitation, rnOptionListItem: + result.add (if node.order == 0: "" else: " order=" & $node.order) + of rnMarkdownBlockQuoteItem: + result.add " quotationDepth=" & $node.quotationDepth + of rnTable, rnGridTable, rnMarkdownTable: + result.add " colCount=" & $node.colCount + of rnTableHeaderCell, rnTableDataCell: + if node.span > 0: + result.add " span=" & $node.span + of rnTableRow: + if node.endsHeader: result.add " endsHeader" + else: + discard + result.add (if node.anchor == "": "" else: " anchor='" & node.anchor & "'") + result.add "\n" + for son in node.sons: + result.add treeRepr(son, indent=indent+2) diff --git a/lib/packages/docutils/rstgen.nim b/lib/packages/docutils/rstgen.nim index 2ec25cc6e..7fc0ac03a 100644 --- a/lib/packages/docutils/rstgen.nim +++ b/lib/packages/docutils/rstgen.nim @@ -23,12 +23,31 @@ ## many options and tweaking, but you are not limited to snippets and can ## generate `LaTeX documents <https://en.wikipedia.org/wiki/LaTeX>`_ too. ## -## **Note:** Import ``packages/docutils/rstgen`` to use this module +## `Docutils configuration files`_ are not supported. Instead HTML generation +## can be tweaked by editing file ``config/nimdoc.cfg``. +## +## .. _Docutils configuration files: https://docutils.sourceforge.io/docs/user/config.htm +## +## There are stylistic difference between how this module renders some elements +## and how original Python Docutils does: +## +## * Backreferences to TOC in section headings are not generated. +## In HTML each section is also a link that points to the section itself: +## this is done for user to be able to copy the link into clipboard. +## +## * The same goes for footnotes/citations links: they point to themselves. +## No backreferences are generated since finding all references of a footnote +## can be done by simply searching for ``[footnoteName]``. + +import std/[strutils, os, hashes, strtabs, tables, sequtils, + algorithm, parseutils, strbasics] + +import rstast, rst, rstidx, highlite + +when defined(nimPreviewSlimSystem): + import std/[assertions, syncio, formatfloat] + -import strutils, os, hashes, strtabs, rstast, rst, highlite, tables, sequtils, - algorithm, parseutils -import "$lib/../compiler/nimpaths" -import "$lib/../compiler/pathutils" import ../../std/private/since const @@ -40,26 +59,26 @@ type outHtml, # output is HTML outLatex # output is Latex - TocEntry = object - n*: PRstNode - refname*, header*: string - MetaEnum* = enum - metaNone, metaTitle, metaSubtitle, metaAuthor, metaVersion + metaNone, metaTitleRaw, metaTitle, metaSubtitle, metaAuthor, metaVersion + + EscapeMode* = enum # in Latex text inside options [] and URLs is + # escaped slightly differently than in normal text + emText, emOption, emUrl # emText is currently used for code also RstGenerator* = object of RootObj target*: OutputTarget config*: StringTableRef splitAfter*: int # split too long entries in the TOC listingCounter*: int - tocPart*: seq[TocEntry] + tocPart*: seq[PRstNode] # headings for Table of Contents hasToc*: bool theIndex: string # Contents of the index file to be dumped at the end. - options*: RstParseOptions findFile*: FindFileHandler msgHandler*: MsgHandler - outDir*: AbsoluteDir ## output directory, initialized by docgen.nim - destFile*: AbsoluteFile ## output (HTML) file, initialized by docgen.nim + outDir*: string ## output directory, initialized by docgen.nim + destFile*: string ## output (HTML) file, initialized by docgen.nim + filenames*: RstFileTable filename*: string ## source Nim or Rst file meta*: array[MetaEnum, string] currentSection: string ## \ @@ -70,7 +89,9 @@ type ## for hyperlinks. See renderIndexTerm proc for details. id*: int ## A counter useful for generating IDs. onTestSnippet*: proc (d: var RstGenerator; filename, cmd: string; status: int; - content: string) + content: string) {.gcsafe.} + escMode*: EscapeMode + curQuotationDepth: int PDoc = var RstGenerator ## Alias to type less. @@ -84,7 +105,7 @@ type status: int proc prettyLink*(file: string): string = - changeFileExt(file, "").replace(dotdotMangle, "..") + changeFileExt(file, "").replace("_._", "..") proc init(p: var CodeBlockParams) = ## Default initialisation of CodeBlockParams to sane values. @@ -94,9 +115,10 @@ proc init(p: var CodeBlockParams) = proc initRstGenerator*(g: var RstGenerator, target: OutputTarget, config: StringTableRef, filename: string, - options: RstParseOptions, findFile: FindFileHandler = nil, - msgHandler: MsgHandler = nil) = + msgHandler: MsgHandler = nil, + filenames = default(RstFileTable), + hasToc = false) = ## Initializes a ``RstGenerator``. ## ## You need to call this before using a ``RstGenerator`` with any other @@ -113,7 +135,7 @@ proc initRstGenerator*(g: var RstGenerator, target: OutputTarget, ## it helps to prettify the generated index if no title is found. ## ## The ``RstParseOptions``, ``FindFileHandler`` and ``MsgHandler`` types - ## are defined in the the `packages/docutils/rst module <rst.html>`_. + ## are defined in the `packages/docutils/rst module <rst.html>`_. ## ``options`` selects the behaviour of the rst parser. ## ## ``findFile`` is a proc used by the rst ``include`` directive among others. @@ -132,22 +154,25 @@ proc initRstGenerator*(g: var RstGenerator, target: OutputTarget, ## ## Example: ## - ## .. code-block:: nim - ## + ## ```nim ## import packages/docutils/rstgen ## ## var gen: RstGenerator ## gen.initRstGenerator(outHtml, defaultConfig(), "filename", {}) + ## ``` g.config = config g.target = target g.tocPart = @[] + g.hasToc = hasToc g.filename = filename + g.filenames = filenames g.splitAfter = 20 g.theIndex = "" - g.options = options g.findFile = findFile g.currentSection = "" g.id = 0 + g.escMode = emText + g.curQuotationDepth = 0 let fileParts = filename.splitFile if fileParts.ext == ".nim": g.currentSection = "Module " & fileParts.name @@ -166,7 +191,9 @@ proc writeIndexFile*(g: var RstGenerator, outfile: string) = ## If the index is empty the file won't be created. if g.theIndex.len > 0: writeFile(outfile, g.theIndex) -proc addXmlChar(dest: var string, c: char) = +proc addHtmlChar(dest: var string, c: char) = + # Escapes HTML characters. Note that single quote ' is not escaped as + # ' -- unlike XML (for standards pre HTML5 it was even forbidden). case c of '&': add(dest, "&") of '<': add(dest, "<") @@ -174,35 +201,36 @@ proc addXmlChar(dest: var string, c: char) = of '\"': add(dest, """) else: add(dest, c) -proc addRtfChar(dest: var string, c: char) = - case c - of '{': add(dest, "\\{") - of '}': add(dest, "\\}") - of '\\': add(dest, "\\\\") - else: add(dest, c) - -proc addTexChar(dest: var string, c: char) = +proc addTexChar(dest: var string, c: char, escMode: EscapeMode) = + ## Escapes 10 special Latex characters and sometimes ` and [, ]. + ## TODO: @ is always a normal symbol (besides the header), am I wrong? + ## All escapes that need to work in text and code blocks (`emText` mode) + ## should start from \ (to be compatible with fancyvrb/fvextra). case c - of '_': add(dest, "\\_") - of '{': add(dest, "\\symbol{123}") - of '}': add(dest, "\\symbol{125}") - of '[': add(dest, "\\symbol{91}") - of ']': add(dest, "\\symbol{93}") - of '\\': add(dest, "\\symbol{92}") - of '$': add(dest, "\\$") - of '&': add(dest, "\\&") - of '#': add(dest, "\\#") - of '%': add(dest, "\\%") - of '~': add(dest, "\\symbol{126}") - of '@': add(dest, "\\symbol{64}") - of '^': add(dest, "\\symbol{94}") - of '`': add(dest, "\\symbol{96}") + of '_', '&', '#', '%': add(dest, "\\" & c) + # commands \label and \pageref don't accept \$ by some reason but OK with $: + of '$': (if escMode == emUrl: add(dest, c) else: add(dest, "\\" & c)) + # \~ and \^ have a special meaning unless they are followed by {} + of '~', '^': add(dest, "\\" & c & "{}") + # Latex loves to substitute ` to opening quote, even in texttt mode! + of '`': add(dest, "\\textasciigrave{}") + # add {} to avoid gobbling up space by \textbackslash + of '\\': add(dest, "\\textbackslash{}") + # Using { and } in URL in Latex: https://tex.stackexchange.com/a/469175 + of '{': + add(dest, if escMode == emUrl: "\\%7B" else: "\\{") + of '}': + add(dest, if escMode == emUrl: "\\%7D" else: "\\}") + of ']': + # escape ] inside an optional argument in e.g. \section[static[T]]{.. + add(dest, if escMode == emOption: "\\text{]}" else: "]") else: add(dest, c) -proc escChar*(target: OutputTarget, dest: var string, c: char) {.inline.} = +proc escChar*(target: OutputTarget, dest: var string, + c: char, escMode: EscapeMode) {.inline.} = case target - of outHtml: addXmlChar(dest, c) - of outLatex: addTexChar(dest, c) + of outHtml: addHtmlChar(dest, c) + of outLatex: addTexChar(dest, c, escMode) proc addSplitter(target: OutputTarget; dest: var string) {.inline.} = case target @@ -221,7 +249,7 @@ proc nextSplitPoint*(s: string, start: int): int = inc(result) dec(result) # last valid index -proc esc*(target: OutputTarget, s: string, splitAfter = -1): string = +proc esc*(target: OutputTarget, s: string, splitAfter = -1, escMode = emText): string = ## Escapes the HTML. result = "" if splitAfter >= 0: @@ -232,11 +260,11 @@ proc esc*(target: OutputTarget, s: string, splitAfter = -1): string = #if (splitter != " ") or (partLen + k - j + 1 > splitAfter): partLen = 0 addSplitter(target, result) - for i in countup(j, k): escChar(target, result, s[i]) + for i in countup(j, k): escChar(target, result, s[i], escMode) inc(partLen, k - j + 1) j = k + 1 else: - for i in countup(0, len(s) - 1): escChar(target, result, s[i]) + for i in countup(0, len(s) - 1): escChar(target, result, s[i], escMode) proc disp(target: OutputTarget, xml, tex: string): string = @@ -256,58 +284,46 @@ proc dispA(target: OutputTarget, dest: var string, proc `or`(x, y: string): string {.inline.} = result = if x.len == 0: y else: x -proc renderRstToOut*(d: var RstGenerator, n: PRstNode, result: var string) +proc renderRstToOut*(d: var RstGenerator, n: PRstNode, result: var string) {.gcsafe.} ## Writes into ``result`` the rst ast ``n`` using the ``d`` configuration. ## ## Before using this proc you need to initialise a ``RstGenerator`` with ## ``initRstGenerator`` and parse a rst file with ``rstParse`` from the ## `packages/docutils/rst module <rst.html>`_. Example: - ## - ## .. code-block:: nim - ## + ## ```nim ## # ...configure gen and rst vars... ## var generatedHtml = "" ## renderRstToOut(gen, rst, generatedHtml) ## echo generatedHtml + ## ``` proc renderAux(d: PDoc, n: PRstNode, result: var string) = for i in countup(0, len(n)-1): renderRstToOut(d, n.sons[i], result) -proc renderAux(d: PDoc, n: PRstNode, frmtA, frmtB: string, result: var string) = +template idS(txt: string): string = + if txt == "": "" + else: + case d.target + of outHtml: + " id=\"" & txt & "\"" + of outLatex: + "\\label{" & txt & "}\\hypertarget{" & txt & "}{}" + # we add \label for page number references via \pageref, while + # \hypertarget is for clickable links via \hyperlink. + +proc renderAux(d: PDoc, n: PRstNode, html, tex: string, result: var string) = + # formats sons of `n` as substitution variable $1 inside strings `html` and + # `tex`, internal target (anchor) is provided as substitute $2. var tmp = "" for i in countup(0, len(n)-1): renderRstToOut(d, n.sons[i], tmp) - if d.target != outLatex: - result.addf(frmtA, [tmp]) - else: - result.addf(frmtB, [tmp]) + case d.target + of outHtml: result.addf(html, [tmp, n.anchor.idS]) + of outLatex: result.addf(tex, [tmp, n.anchor.idS]) # ---------------- index handling -------------------------------------------- -proc quoteIndexColumn(text: string): string = - ## Returns a safe version of `text` for serialization to the ``.idx`` file. - ## - ## The returned version can be put without worries in a line based tab - ## separated column text file. The following character sequence replacements - ## will be performed for that goal: - ## - ## * ``"\\"`` => ``"\\\\"`` - ## * ``"\n"`` => ``"\\n"`` - ## * ``"\t"`` => ``"\\t"`` - result = newStringOfCap(text.len + 3) - for c in text: - case c - of '\\': result.add "\\" - of '\L': result.add "\\n" - of '\C': discard - of '\t': result.add "\\t" - else: result.add c - -proc unquoteIndexColumn(text: string): string = - ## Returns the unquoted version generated by ``quoteIndexColumn``. - result = text.multiReplace(("\\t", "\t"), ("\\n", "\n"), ("\\\\", "\\")) - -proc setIndexTerm*(d: var RstGenerator, htmlFile, id, term: string, - linkTitle, linkDesc = "") = +proc setIndexTerm*(d: var RstGenerator; k: IndexEntryKind, htmlFile, id, term: string, + linkTitle, linkDesc = "", line = 0) = ## Adds a `term` to the index using the specified hyperlink identifier. ## ## A new entry will be added to the index using the format @@ -330,21 +346,8 @@ proc setIndexTerm*(d: var RstGenerator, htmlFile, id, term: string, ## <#writeIndexFile,RstGenerator,string>`_. The purpose of the index is ## documented in the `docgen tools guide ## <docgen.html#related-options-index-switch>`_. - var - entry = term - isTitle = false - entry.add('\t') - entry.add(htmlFile) - if id.len > 0: - entry.add('#') - entry.add(id) - else: - isTitle = true - if linkTitle.len > 0 or linkDesc.len > 0: - entry.add('\t' & linkTitle.quoteIndexColumn) - entry.add('\t' & linkDesc.quoteIndexColumn) - entry.add("\n") - + let (entry, isTitle) = formatIndexEntry(k, htmlFile, id, term, + linkTitle, linkDesc, line) if isTitle: d.theIndex.insert(entry) else: d.theIndex.add(entry) @@ -357,6 +360,15 @@ proc hash(n: PRstNode): int = result = result !& hash(n.sons[i]) result = !$result +proc htmlFileRelPath(d: PDoc): string = + if d.outDir.len == 0: + # /foo/bar/zoo.nim -> zoo.html + changeFileExt(extractFilename(d.filename), HtmlExt) + else: # d is initialized in docgen.nim + # outDir = /foo -\ + # destFile = /foo/bar/zoo.html -|-> bar/zoo.html + d.destFile.relativePath(d.outDir, '/') + proc renderIndexTerm*(d: PDoc, n: PRstNode, result: var string) = ## Renders the string decorated within \`foobar\`\:idx\: markers. ## @@ -373,18 +385,13 @@ proc renderIndexTerm*(d: PDoc, n: PRstNode, result: var string) = var term = "" renderAux(d, n, term) - setIndexTerm(d, changeFileExt(extractFilename(d.filename), HtmlExt), id, term, d.currentSection) - dispA(d.target, result, "<span id=\"$1\">$2</span>", "$2\\label{$1}", + setIndexTerm(d, ieIdxRole, + htmlFileRelPath(d), id, term, d.currentSection) + dispA(d.target, result, "<span id=\"$1\">$2</span>", "\\nimindexterm{$1}{$2}", [id, term]) type - IndexEntry = object - keyword: string - link: string - linkTitle: string ## contains a prettier text for the href - linkDesc: string ## the title attribute of the final href - - IndexedDocs = Table[IndexEntry, seq[IndexEntry]] ## \ + IndexedDocs* = Table[IndexEntry, seq[IndexEntry]] ## \ ## Contains the index sequences for doc types. ## ## The key is a *fake* IndexEntry which will contain the title of the @@ -394,21 +401,6 @@ type ## The value indexed by this IndexEntry is a sequence with the real index ## entries found in the ``.idx`` file. -proc cmp(a, b: IndexEntry): int = - ## Sorts two ``IndexEntry`` first by `keyword` field, then by `link`. - result = cmpIgnoreStyle(a.keyword, b.keyword) - if result == 0: - result = cmpIgnoreStyle(a.link, b.link) - -proc hash(x: IndexEntry): Hash = - ## Returns the hash for the combined fields of the type. - ## - ## The hash is computed as the chained hash of the individual string hashes. - result = x.keyword.hash !& x.link.hash - result = result !& x.linkTitle.hash - result = result !& x.linkDesc.hash - result = !$result - when defined(gcDestructors): template `<-`(a, b: var IndexEntry) = a = move(b) else: @@ -417,6 +409,7 @@ else: shallowCopy a.link, b.link shallowCopy a.linkTitle, b.linkTitle shallowCopy a.linkDesc, b.linkDesc + shallowCopy a.module, b.module proc sortIndex(a: var openArray[IndexEntry]) = # we use shellsort here; fast and simple @@ -456,16 +449,20 @@ proc generateSymbolIndex(symbols: seq[IndexEntry]): string = result = "<dl>" var i = 0 while i < symbols.len: - let keyword = symbols[i].keyword + let keyword = esc(outHtml, symbols[i].keyword) let cleanedKeyword = keyword.escapeLink result.addf("<dt><a name=\"$2\" href=\"#$2\"><span>$1:</span></a></dt><dd><ul class=\"simple\">\n", [keyword, cleanedKeyword]) var j = i - while j < symbols.len and keyword == symbols[j].keyword: + while j < symbols.len and symbols[i].keyword == symbols[j].keyword: let url = symbols[j].link.escapeLink - text = if symbols[j].linkTitle.len > 0: symbols[j].linkTitle else: url - desc = if symbols[j].linkDesc.len > 0: symbols[j].linkDesc else: "" + module = symbols[j].module + text = + if symbols[j].linkTitle.len > 0: + esc(outHtml, module & ": " & symbols[j].linkTitle) + else: url + desc = symbols[j].linkDesc if desc.len > 0: result.addf("""<li><a class="reference external" title="$3" data-doc-search-tag="$2" href="$1">$2</a></li> @@ -479,13 +476,6 @@ proc generateSymbolIndex(symbols: seq[IndexEntry]): string = i = j result.add("</dl>") -proc isDocumentationTitle(hyperlink: string): bool = - ## Returns true if the hyperlink is actually a documentation title. - ## - ## Documentation titles lack the hash. See `mergeIndexes() - ## <#mergeIndexes,string>`_ for a more detailed explanation. - result = hyperlink.find('#') < 0 - proc stripTocLevel(s: string): tuple[level: int, text: string] = ## Returns the *level* of the toc along with the text without it. for c in 0 ..< s.len: @@ -519,17 +509,15 @@ proc generateDocumentationToc(entries: seq[IndexEntry]): string = level = 1 levels.newSeq(entries.len) for entry in entries: - let (rawLevel, rawText) = stripTocLevel(entry.linkTitle or entry.keyword) + let (rawLevel, rawText) = stripTocLevel(entry.linkTitle) if rawLevel < 1: # This is a normal symbol, push it *inside* one level from the last one. levels[L].level = level + 1 - # Also, ignore the linkTitle and use directly the keyword. - levels[L].text = entry.keyword else: # The level did change, update the level indicator. level = rawLevel levels[L].level = rawLevel - levels[L].text = rawText + levels[L].text = rawText inc L # Now generate hierarchical lists based on the precalculated levels. @@ -560,7 +548,7 @@ proc generateDocumentationIndex(docs: IndexedDocs): string = for title in titles: let tocList = generateDocumentationToc(docs.getOrDefault(title)) result.add("<ul><li><a href=\"" & - title.link & "\">" & title.keyword & "</a>\n" & tocList & "</li></ul>\n") + title.link & "\">" & title.linkTitle & "</a>\n" & tocList & "</li></ul>\n") proc generateDocumentationJumps(docs: IndexedDocs): string = ## Returns a plain list of hyperlinks to documentation TOCs in HTML. @@ -572,7 +560,7 @@ proc generateDocumentationJumps(docs: IndexedDocs): string = var chunks: seq[string] = @[] for title in titles: - chunks.add("<a href=\"" & title.link & "\">" & title.keyword & "</a>") + chunks.add("<a href=\"" & title.link & "\">" & title.linkTitle & "</a>") result.add(chunks.join(", ") & ".<br/>") @@ -586,7 +574,7 @@ proc generateModuleJumps(modules: seq[string]): string = result.add(chunks.join(", ") & ".<br/>") -proc readIndexDir(dir: string): +proc readIndexDir*(dir: string): tuple[modules: seq[string], symbols: seq[IndexEntry], docs: IndexedDocs] = ## Walks `dir` reading ``.idx`` files converting them in IndexEntry items. ## @@ -601,39 +589,12 @@ proc readIndexDir(dir: string): # Scan index files and build the list of symbols. for path in walkDirRec(dir): if path.endsWith(IndexExt): - var - fileEntries: seq[IndexEntry] - title: IndexEntry - f = 0 - newSeq(fileEntries, 500) - setLen(fileEntries, 0) - for line in lines(path): - let s = line.find('\t') - if s < 0: continue - setLen(fileEntries, f+1) - fileEntries[f].keyword = line.substr(0, s-1) - fileEntries[f].link = line.substr(s+1) - # See if we detect a title, a link without a `#foobar` trailing part. - if title.keyword.len == 0 and fileEntries[f].link.isDocumentationTitle: - title.keyword = fileEntries[f].keyword - title.link = fileEntries[f].link - - if fileEntries[f].link.find('\t') > 0: - let extraCols = fileEntries[f].link.split('\t') - fileEntries[f].link = extraCols[0] - assert extraCols.len == 3 - fileEntries[f].linkTitle = extraCols[1].unquoteIndexColumn - fileEntries[f].linkDesc = extraCols[2].unquoteIndexColumn - else: - fileEntries[f].linkTitle = "" - fileEntries[f].linkDesc = "" - inc f + var (fileEntries, title) = parseIdxFile(path) # Depending on type add this to the list of symbols or table of APIs. - if title.keyword.len == 0: - for i in 0 ..< f: - # Don't add to symbols TOC entries (they start with a whitespace). - let toc = fileEntries[i].linkTitle - if toc.len > 0 and toc[0] == ' ': + + if title.kind == ieNimTitle: + for i in 0 ..< fileEntries.len: + if fileEntries[i].kind != ieNim: continue # Ok, non TOC entry, add it. setLen(result.symbols, L + 1) @@ -643,16 +604,22 @@ proc readIndexDir(dir: string): var x = fileEntries[0].link let i = find(x, '#') if i > 0: - x = x.substr(0, i-1) + x.setLen(i) if i != 0: # don't add entries starting with '#' result.modules.add(x.changeFileExt("")) else: # Generate the symbolic anchor for index quickjumps. - title.linkTitle = "doc_toc_" & $result.docs.len + title.aux = "doc_toc_" & $result.docs.len result.docs[title] = fileEntries - sort(result.modules, system.cmp) + for i in 0 ..< fileEntries.len: + if fileEntries[i].kind != ieIdxRole: + continue + + setLen(result.symbols, L + 1) + result.symbols[L] = fileEntries[i] + inc L proc mergeIndexes*(dir: string): string = ## Merges all index files in `dir` and returns the generated index as HTML. @@ -683,6 +650,7 @@ proc mergeIndexes*(dir: string): string = ## Returns the merged and sorted indices into a single HTML block which can ## be further embedded into nimdoc templates. var (modules, symbols, docs) = readIndexDir(dir) + sort(modules, system.cmp) result = "" # Generate a quick jump list of documents. @@ -710,71 +678,36 @@ proc mergeIndexes*(dir: string): string = # ---------------------------------------------------------------------------- -proc stripTocHtml(s: string): string = - ## Ugly quick hack to remove HTML tags from TOC titles. - ## - ## A TocEntry.header field already contains rendered HTML tags. Instead of - ## implementing a proper version of renderRstToOut() which recursively - ## renders an rst tree to plain text, we simply remove text found between - ## angled brackets. Given the limited possibilities of rst inside TOC titles - ## this should be enough. - result = s - var first = result.find('<') - while first >= 0: - let last = result.find('>', first) - if last < 0: - # Abort, since we didn't found a closing angled bracket. - return - result.delete(first, last) - first = result.find('<', first) - proc renderHeadline(d: PDoc, n: PRstNode, result: var string) = var tmp = "" for i in countup(0, len(n) - 1): renderRstToOut(d, n.sons[i], tmp) d.currentSection = tmp - # Find the last higher level section for unique reference name - var sectionPrefix = "" - for i in countdown(d.tocPart.high, 0): - let n2 = d.tocPart[i].n - if n2.level < n.level: - sectionPrefix = rstnodeToRefname(n2) & "-" - break - var refname = sectionPrefix & rstnodeToRefname(n) + var tocName = esc(d.target, renderRstToText(n), escMode = emOption) + # for Latex: simple text without commands that may break TOC/hyperref if d.hasToc: - var length = len(d.tocPart) - setLen(d.tocPart, length + 1) - d.tocPart[length].refname = refname - d.tocPart[length].n = n - d.tocPart[length].header = tmp - - dispA(d.target, result, "\n<h$1><a class=\"toc-backref\" " & - "id=\"$2\" href=\"#$2\">$3</a></h$1>", "\\rsth$4{$3}\\label{$2}\n", - [$n.level, d.tocPart[length].refname, tmp, $chr(n.level - 1 + ord('A'))]) + d.tocPart.add n + dispA(d.target, result, "\n<h$1><a class=\"toc-backref\"" & + "$2 href=\"#$5\">$3</a></h$1>", "\\rsth$4[$6]{$3}$2\n", + [$n.level, n.anchor.idS, tmp, + $chr(n.level - 1 + ord('A')), n.anchor, tocName]) else: - dispA(d.target, result, "\n<h$1 id=\"$2\">$3</h$1>", - "\\rsth$4{$3}\\label{$2}\n", [ - $n.level, refname, tmp, - $chr(n.level - 1 + ord('A'))]) + dispA(d.target, result, "\n<h$1$2>$3</h$1>", + "\\rsth$4[$5]{$3}$2\n", [ + $n.level, n.anchor.idS, tmp, + $chr(n.level - 1 + ord('A')), tocName]) # Generate index entry using spaces to indicate TOC level for the output HTML. assert n.level >= 0 - let - htmlFileRelPath = if d.outDir.isEmpty(): - # /foo/bar/zoo.nim -> zoo.html - changeFileExt(extractFilename(d.filename), HtmlExt) - else: # d is initialized in docgen.nim - # outDir = /foo -\ - # destFile = /foo/bar/zoo.html -|-> bar/zoo.html - d.destFile.relativeTo(d.outDir, '/').string - setIndexTerm(d, htmlFileRelPath, refname, tmp.stripTocHtml, - spaces(max(0, n.level)) & tmp) + setIndexTerm(d, ieHeading, htmlFile = d.htmlFileRelPath, id = n.anchor, + term = n.addNodes, linkTitle = spaces(max(0, n.level)) & tmp) proc renderOverline(d: PDoc, n: PRstNode, result: var string) = - if d.meta[metaTitle].len == 0: + if n.level == 0 and d.meta[metaTitle].len == 0: + d.meta[metaTitleRaw] = n.addNodes for i in countup(0, len(n)-1): renderRstToOut(d, n.sons[i], d.meta[metaTitle]) d.currentSection = d.meta[metaTitle] - elif d.meta[metaSubtitle].len == 0: + elif n.level == 0 and d.meta[metaSubtitle].len == 0: for i in countup(0, len(n)-1): renderRstToOut(d, n.sons[i], d.meta[metaSubtitle]) d.currentSection = d.meta[metaSubtitle] @@ -782,21 +715,25 @@ proc renderOverline(d: PDoc, n: PRstNode, result: var string) = var tmp = "" for i in countup(0, len(n) - 1): renderRstToOut(d, n.sons[i], tmp) d.currentSection = tmp - dispA(d.target, result, "<h$1 id=\"$2\"><center>$3</center></h$1>", - "\\rstov$4{$3}\\label{$2}\n", [$n.level, - rstnodeToRefname(n), tmp, $chr(n.level - 1 + ord('A'))]) - - -proc renderTocEntry(d: PDoc, e: TocEntry, result: var string) = + var tocName = esc(d.target, renderRstToText(n), escMode=emOption) + dispA(d.target, result, "<h$1$2><center>$3</center></h$1>", + "\\rstov$4[$5]{$3}$2\n", [$n.level, + n.anchor.idS, tmp, $chr(n.level - 1 + ord('A')), tocName]) + setIndexTerm(d, ieHeading, htmlFile = d.htmlFileRelPath, id = n.anchor, + term = n.addNodes, linkTitle = spaces(max(0, n.level)) & tmp) + +proc renderTocEntry(d: PDoc, n: PRstNode, result: var string) = + var header = "" + for i in countup(0, len(n) - 1): renderRstToOut(d, n.sons[i], header) dispA(d.target, result, "<li><a class=\"reference\" id=\"$1_toc\" href=\"#$1\">$2</a></li>\n", - "\\item\\label{$1_toc} $2\\ref{$1}\n", [e.refname, e.header]) + "\\item\\label{$1_toc} $2\\ref{$1}\n", [n.anchor, header]) proc renderTocEntries*(d: var RstGenerator, j: var int, lvl: int, result: var string) = var tmp = "" while j <= high(d.tocPart): - var a = abs(d.tocPart[j].n.level) + var a = abs(d.tocPart[j].level) if a == lvl: renderTocEntry(d, d.tocPart[j], tmp) inc(j) @@ -842,14 +779,28 @@ proc renderImage(d: PDoc, n: PRstNode, result: var string) = if arg.endsWith(".mp4") or arg.endsWith(".ogg") or arg.endsWith(".webm"): htmlOut = """ - <video src="$1"$2 autoPlay='true' loop='true' muted='true'> + <video$3 src="$1"$2 autoPlay='true' loop='true' muted='true'> Sorry, your browser doesn't support embedded videos </video> """ else: - htmlOut = "<img src=\"$1\"$2/>" - dispA(d.target, result, htmlOut, "\\includegraphics$2{$1}", - [esc(d.target, arg), options]) + htmlOut = "<img$3 src=\"$1\"$2/>" + + # support for `:target:` links for images: + var target = esc(d.target, getFieldValue(n, "target").strip(), escMode=emUrl) + discard safeProtocol(target) + + if target.len > 0: + # `htmlOut` needs to be of the following format for link to work for images: + # <a class="reference external" href="target"><img src=\"$1\"$2/></a> + var htmlOutWithLink = "" + dispA(d.target, htmlOutWithLink, + "<a class=\"reference external\" href=\"$2\">$1</a>", + "\\href{$2}{$1}", [htmlOut, target]) + htmlOut = htmlOutWithLink + + dispA(d.target, result, htmlOut, "$3\\includegraphics$2{$1}", + [esc(d.target, arg), options, n.anchor.idS]) if len(n) >= 3: renderRstToOut(d, n.sons[2], result) proc renderSmiley(d: PDoc, n: PRstNode, result: var string) = @@ -859,6 +810,25 @@ proc renderSmiley(d: PDoc, n: PRstNode, result: var string) = "\\includegraphics{$1}", [d.config.getOrDefault"doc.smiley_format" % n.text]) +proc getField1Int(d: PDoc, n: PRstNode, fieldName: string): int = + template err(msg: string) = + rstMessage(d.filenames, d.msgHandler, n.info, meInvalidField, msg) + let value = n.getFieldValue + var number: int + let nChars = parseInt(value, number) + if nChars == 0: + if value.len == 0: + # use a good default value: + result = 1 + else: + err("field $1 requires an integer, but '$2' was given" % + [fieldName, value]) + elif nChars < value.len: + err("extra arguments were given to $1: '$2'" % + [fieldName, value[nChars..^1]]) + else: + result = number + proc parseCodeBlockField(d: PDoc, n: PRstNode, params: var CodeBlockParams) = ## Parses useful fields which can appear before a code block. ## @@ -868,9 +838,7 @@ proc parseCodeBlockField(d: PDoc, n: PRstNode, params: var CodeBlockParams) = of "number-lines": params.numberLines = true # See if the field has a parameter specifying a different line than 1. - var number: int - if parseInt(n.getFieldValue, number) > 0: - params.startLine = number + params.startLine = getField1Int(d, n, "number-lines") of "file", "filename": # The ``file`` option is a Nim extension to the official spec, it acts # like it would for other directives like ``raw`` or ``cvs-table``. This @@ -881,18 +849,20 @@ proc parseCodeBlockField(d: PDoc, n: PRstNode, params: var CodeBlockParams) = of "test": params.testCmd = n.getFieldValue.strip if params.testCmd.len == 0: - params.testCmd = "$nim r --backend:$backend $options" # see `interpSnippetCmd` + # factor with D20210224T221756. Note that `$docCmd` should appear before `$file` + # but after all other options, but currently `$options` merges both options and `$file` so it's tricky. + params.testCmd = "$nim r --backend:$backend --lib:$libpath $docCmd $options" else: + # consider whether `$docCmd` should be appended here too params.testCmd = unescape(params.testCmd) of "status", "exitcode": - var status: int - if parseInt(n.getFieldValue, status) > 0: - params.status = status + params.status = getField1Int(d, n, n.getArgument) of "default-language": params.langStr = n.getFieldValue.strip params.lang = params.langStr.getSourceLanguage else: - d.msgHandler(d.filename, 1, 0, mwUnsupportedField, n.getArgument) + rstMessage(d.filenames, d.msgHandler, n.info, mwUnsupportedField, + n.getArgument) proc parseCodeBlockParams(d: PDoc, n: PRstNode): CodeBlockParams = ## Iterates over all code block fields and returns processed params. @@ -902,8 +872,7 @@ proc parseCodeBlockParams(d: PDoc, n: PRstNode): CodeBlockParams = result.init if n.isNil: return - assert n.kind == rnCodeBlock - assert(not n.sons[2].isNil) + assert n.kind in {rnCodeBlock, rnInlineCode} # Parse the field list for rendering parameters if there are any. if not n.sons[1].isNil: @@ -914,7 +883,8 @@ proc parseCodeBlockParams(d: PDoc, n: PRstNode): CodeBlockParams = if result.langStr != "": result.lang = getSourceLanguage(result.langStr) -proc buildLinesHtmlTable(d: PDoc; params: CodeBlockParams, code: string): +proc buildLinesHtmlTable(d: PDoc; params: CodeBlockParams, code: string, + idStr: string): tuple[beginTable, endTable: string] = ## Returns the necessary tags to start/end a code block in HTML. ## @@ -926,13 +896,14 @@ proc buildLinesHtmlTable(d: PDoc; params: CodeBlockParams, code: string): let id = $d.listingCounter if not params.numberLines: result = (d.config.getOrDefault"doc.listing_start" % - [id, sourceLanguageToStr[params.lang]], + [id, sourceLanguageToStr[params.lang], idStr], d.config.getOrDefault"doc.listing_end" % id) return var codeLines = code.strip.countLines assert codeLines > 0 - result.beginTable = """<table class="line-nums-table"><tbody><tr><td class="blob-line-nums"><pre class="line-nums">""" + result.beginTable = """<table$1 class="line-nums-table">""" % [idStr] & + """<tbody><tr><td class="blob-line-nums"><pre class="line-nums">""" var line = params.startLine while codeLines > 0: result.beginTable.add($line & "\n") @@ -940,13 +911,32 @@ proc buildLinesHtmlTable(d: PDoc; params: CodeBlockParams, code: string): codeLines.dec result.beginTable.add("</pre></td><td>" & ( d.config.getOrDefault"doc.listing_start" % - [id, sourceLanguageToStr[params.lang]])) + [id, sourceLanguageToStr[params.lang], idStr])) result.endTable = (d.config.getOrDefault"doc.listing_end" % id) & "</td></tr></tbody></table>" & ( d.config.getOrDefault"doc.listing_button" % id) -proc renderCodeBlock(d: PDoc, n: PRstNode, result: var string) = - ## Renders a code block, appending it to `result`. +proc renderCodeLang*(result: var string, lang: SourceLanguage, code: string, + target: OutputTarget) = + var g: GeneralTokenizer + initGeneralTokenizer(g, code) + while true: + getNextToken(g, lang) + case g.kind + of gtEof: break + of gtNone, gtWhitespace: + add(result, substr(code, g.start, g.length + g.start - 1)) + else: + dispA(target, result, "<span class=\"$2\">$1</span>", "\\span$2{$1}", [ + esc(target, substr(code, g.start, g.length+g.start-1)), + tokenClassToStr[g.kind]]) + deinitGeneralTokenizer(g) + +proc renderNimCode*(result: var string, code: string, target: OutputTarget) = + renderCodeLang(result, langNim, code, target) + +proc renderCode(d: PDoc, n: PRstNode, result: var string) {.gcsafe.} = + ## Renders a code (code block or inline code), appending it to `result`. ## ## If the code block uses the ``number-lines`` option, a table will be ## generated with two columns, the first being a list of numbers and the @@ -955,37 +945,40 @@ proc renderCodeBlock(d: PDoc, n: PRstNode, result: var string) = ## may also come from the parser through the internal ``default-language`` ## option to differentiate between a plain code block and Nim's code block ## extension. - assert n.kind == rnCodeBlock - if n.sons[2] == nil: return + assert n.kind in {rnCodeBlock, rnInlineCode} var params = d.parseCodeBlockParams(n) + if n.sons[2] == nil: return var m = n.sons[2].sons[0] assert m.kind == rnLeaf if params.testCmd.len > 0 and d.onTestSnippet != nil: d.onTestSnippet(d, params.filename, params.testCmd, params.status, m.text) - let (blockStart, blockEnd) = buildLinesHtmlTable(d, params, m.text) - - dispA(d.target, result, blockStart, "\\begin{rstpre}\n", []) + var blockStart, blockEnd: string + case d.target + of outHtml: + if n.kind == rnCodeBlock: + (blockStart, blockEnd) = buildLinesHtmlTable(d, params, m.text, + n.anchor.idS) + else: # rnInlineCode + blockStart = "<tt class=\"docutils literal\"><span class=\"pre\">" + blockEnd = "</span></tt>" + of outLatex: + if n.kind == rnCodeBlock: + blockStart = "\n\n" & n.anchor.idS & "\\begin{rstpre}\n" + blockEnd = "\n\\end{rstpre}\n\n" + else: # rnInlineCode + blockStart = "\\rstcode{" + blockEnd = "}" + dispA(d.target, result, blockStart, blockStart, []) if params.lang == langNone: - if len(params.langStr) > 0: - d.msgHandler(d.filename, 1, 0, mwUnsupportedLanguage, params.langStr) - for letter in m.text: escChar(d.target, result, letter) + if len(params.langStr) > 0 and params.langStr.toLowerAscii != "none": + rstMessage(d.filenames, d.msgHandler, n.info, mwUnsupportedLanguage, + params.langStr) + for letter in m.text: escChar(d.target, result, letter, emText) else: - var g: GeneralTokenizer - initGeneralTokenizer(g, m.text) - while true: - getNextToken(g, params.lang) - case g.kind - of gtEof: break - of gtNone, gtWhitespace: - add(result, substr(m.text, g.start, g.length + g.start - 1)) - else: - dispA(d.target, result, "<span class=\"$2\">$1</span>", "\\span$2{$1}", [ - esc(d.target, substr(m.text, g.start, g.length+g.start-1)), - tokenClassToStr[g.kind]]) - deinitGeneralTokenizer(g) - dispA(d.target, result, blockEnd, "\n\\end{rstpre}\n") + renderCodeLang(result, params.lang, m.text, d.target) + dispA(d.target, result, blockEnd, blockEnd) proc renderContainer(d: PDoc, n: PRstNode, result: var string) = var tmp = "" @@ -996,10 +989,6 @@ proc renderContainer(d: PDoc, n: PRstNode, result: var string) = else: dispA(d.target, result, "<div class=\"$1\">$2</div>", "$2", [arg, tmp]) -proc texColumns(n: PRstNode): string = - result = "" - for i in countup(1, len(n)): add(result, "|X") - proc renderField(d: PDoc, n: PRstNode, result: var string) = var b = false if d.target == outLatex: @@ -1017,41 +1006,150 @@ proc renderField(d: PDoc, n: PRstNode, result: var string) = if not b: renderAux(d, n, "<tr>$1</tr>\n", "$1", result) +proc renderEnumList(d: PDoc, n: PRstNode, result: var string) = + var + specifier = "" + specStart = "" + i1 = 0 + pre = "" + i2 = n.labelFmt.len - 1 + post = "" + if n.labelFmt[0] == '(': + i1 = 1 + pre = "(" + if n.labelFmt[^1] == ')' or n.labelFmt[^1] == '.': + i2 = n.labelFmt.len - 2 + post = $n.labelFmt[^1] + let enumR = i1 .. i2 # enumerator range without surrounding (, ), . + if d.target == outLatex: + result.add ("\n%" & n.labelFmt & "\n") + # use enumerate parameters from package enumitem + if n.labelFmt[i1].isDigit: + var labelDef = "" + if pre != "" or post != "": + labelDef = "label=" & pre & "\\arabic*" & post & "," + if n.labelFmt[enumR] != "1": + specStart = "start=$1" % [n.labelFmt[enumR]] + if labelDef != "" or specStart != "": + specifier = "[$1$2]" % [labelDef, specStart] + else: + let (first, labelDef) = + if n.labelFmt[i1].isUpperAscii: ('A', "label=" & pre & "\\Alph*" & post) + else: ('a', "label=" & pre & "\\alph*" & post) + if n.labelFmt[i1] != first: + specStart = ",start=" & $(ord(n.labelFmt[i1]) - ord(first) + 1) + specifier = "[$1$2]" % [labelDef, specStart] + else: # HTML + # TODO: implement enumerator formatting using pre and post ( and ) for HTML + if n.labelFmt[i1].isDigit: + if n.labelFmt[enumR] != "1": + specStart = " start=\"$1\"" % [n.labelFmt[enumR]] + specifier = "class=\"simple\"" & specStart + else: + let (first, labelDef) = + if n.labelFmt[i1].isUpperAscii: ('A', "class=\"upperalpha simple\"") + else: ('a', "class=\"loweralpha simple\"") + if n.labelFmt[i1] != first: + specStart = " start=\"$1\"" % [ $(ord(n.labelFmt[i1]) - ord(first) + 1) ] + specifier = labelDef & specStart + renderAux(d, n, "<ol$2 " & specifier & ">$1</ol>\n", + "\\begin{enumerate}" & specifier & "$2$1\\end{enumerate}\n", + result) + +proc renderAdmonition(d: PDoc, n: PRstNode, result: var string) = + var + htmlCls = "admonition_warning" + texSz = "\\large" + texColor = "orange" + case n.adType + of "hint", "note", "tip": + htmlCls = "admonition-info"; texSz = "\\normalsize"; texColor = "green" + of "attention", "admonition", "important", "warning", "caution": + htmlCls = "admonition-warning"; texSz = "\\large"; texColor = "orange" + of "danger", "error": + htmlCls = "admonition-error"; texSz = "\\Large"; texColor = "red" + else: discard + let txt = n.adType.capitalizeAscii() + let htmlHead = "<div class=\"admonition " & htmlCls & "\">" + renderAux(d, n, + htmlHead & "<span$2 class=\"" & htmlCls & "-text\"><b>" & txt & + ":</b></span>\n" & "$1</div>\n", + "\n\n\\begin{rstadmonition}[borderline west={0.2em}{0pt}{" & + texColor & "}]$2\n" & + "{" & texSz & "\\color{" & texColor & "}{\\textbf{" & txt & ":}}} " & + "$1\n\\end{rstadmonition}\n", + result) + +proc renderHyperlink(d: PDoc, text, link: PRstNode, result: var string, + external: bool, nimdoc = false, tooltip="") = + var linkStr = "" + block: + let mode = d.escMode + d.escMode = emUrl + renderRstToOut(d, link, linkStr) + d.escMode = mode + discard safeProtocol(linkStr) + var textStr = "" + renderRstToOut(d, text, textStr) + let nimDocStr = if nimdoc: " nimdoc" else: "" + var tooltipStr = "" + if tooltip != "": + tooltipStr = """ title="$1"""" % [ esc(d.target, tooltip) ] + if external: + dispA(d.target, result, + "<a class=\"reference external$3\"$4 href=\"$2\">$1</a>", + "\\href{$2}{$1}", [textStr, linkStr, nimDocStr, tooltipStr]) + else: + dispA(d.target, result, + "<a class=\"reference internal$3\"$4 href=\"#$2\">$1</a>", + "\\hyperlink{$2}{$1} (p.~\\pageref{$2})", + [textStr, linkStr, nimDocStr, tooltipStr]) + +proc traverseForIndex*(d: PDoc, n: PRstNode) = + ## A version of [renderRstToOut] that only fills entries for ``.idx`` files. + var discarded: string + if n == nil: return + case n.kind + of rnIdx: renderIndexTerm(d, n, discarded) + of rnHeadline, rnMarkdownHeadline: renderHeadline(d, n, discarded) + of rnOverline: renderOverline(d, n, discarded) + else: + for i in 0 ..< len(n): + traverseForIndex(d, n.sons[i]) + proc renderRstToOut(d: PDoc, n: PRstNode, result: var string) = if n == nil: return case n.kind of rnInner: renderAux(d, n, result) - of rnHeadline: renderHeadline(d, n, result) + of rnHeadline, rnMarkdownHeadline: renderHeadline(d, n, result) of rnOverline: renderOverline(d, n, result) - of rnTransition: renderAux(d, n, "<hr />\n", "\\hrule\n", result) - of rnParagraph: renderAux(d, n, "<p>$1</p>\n", "$1\n\n", result) + of rnTransition: renderAux(d, n, "<hr$2 />\n", "\n\n\\vspace{0.6em}\\hrule$2\n", result) + of rnParagraph: renderAux(d, n, "<p$2>$1</p>\n", "\n\n$2\n$1\n\n", result) of rnBulletList: - renderAux(d, n, "<ul class=\"simple\">$1</ul>\n", - "\\begin{itemize}$1\\end{itemize}\n", result) + renderAux(d, n, "<ul$2 class=\"simple\">$1</ul>\n", + "\\begin{itemize}\n$2\n$1\\end{itemize}\n", result) of rnBulletItem, rnEnumItem: - renderAux(d, n, "<li>$1</li>\n", "\\item $1\n", result) - of rnEnumList: - renderAux(d, n, "<ol class=\"simple\">$1</ol>\n", - "\\begin{enumerate}$1\\end{enumerate}\n", result) - of rnDefList: - renderAux(d, n, "<dl class=\"docutils\">$1</dl>\n", - "\\begin{description}$1\\end{description}\n", result) + renderAux(d, n, "<li$2>$1</li>\n", "\\item $2$1\n", result) + of rnEnumList: renderEnumList(d, n, result) + of rnDefList, rnMdDefList: + renderAux(d, n, "<dl$2 class=\"docutils\">$1</dl>\n", + "\\begin{description}\n$2\n$1\\end{description}\n", result) of rnDefItem: renderAux(d, n, result) - of rnDefName: renderAux(d, n, "<dt>$1</dt>\n", "\\item[$1] ", result) - of rnDefBody: renderAux(d, n, "<dd>$1</dd>\n", "$1\n", result) + of rnDefName: renderAux(d, n, "<dt$2>$1</dt>\n", "$2\\item[$1]\\ ", result) + of rnDefBody: renderAux(d, n, "<dd$2>$1</dd>\n", "$2\n$1\n", result) of rnFieldList: var tmp = "" for i in countup(0, len(n) - 1): renderRstToOut(d, n.sons[i], tmp) if tmp.len != 0: dispA(d.target, result, - "<table class=\"docinfo\" frame=\"void\" rules=\"none\">" & + "<table$2 class=\"docinfo\" frame=\"void\" rules=\"none\">" & "<col class=\"docinfo-name\" />" & "<col class=\"docinfo-content\" />" & "<tbody valign=\"top\">$1" & "</tbody></table>", - "\\begin{description}$1\\end{description}\n", - [tmp]) + "\\begin{description}\n$2\n$1\\end{description}\n", + [tmp, n.anchor.idS]) of rnField: renderField(d, n, result) of rnFieldName: renderAux(d, n, "<th class=\"docinfo-name\">$1:</th>", @@ -1061,74 +1159,159 @@ proc renderRstToOut(d: PDoc, n: PRstNode, result: var string) = of rnIndex: renderRstToOut(d, n.sons[2], result) of rnOptionList: - renderAux(d, n, "<table frame=\"void\">$1</table>", - "\\begin{description}\n$1\\end{description}\n", result) + renderAux(d, n, "<div$2 class=\"option-list\">$1</div>", + "\\begin{rstoptlist}$2\n$1\\end{rstoptlist}", result) of rnOptionListItem: - renderAux(d, n, "<tr>$1</tr>\n", "$1", result) + var addclass = if n.order mod 2 == 1: " odd" else: "" + renderAux(d, n, + "<div class=\"option-list-item" & addclass & "\">$1</div>\n", + "$1", result) of rnOptionGroup: - renderAux(d, n, "<th align=\"left\">$1</th>", "\\item[$1]", result) + renderAux(d, n, + "<div class=\"option-list-label\"><tt><span class=\"option\">" & + "$1</span></tt></div>", + "\\item[\\rstcodeitem{\\spanoption{$1}}]", result) of rnDescription: - renderAux(d, n, "<td align=\"left\">$1</td>\n", " $1\n", result) + renderAux(d, n, "<div class=\"option-list-description\">$1</div>", + " $1\n", result) of rnOption, rnOptionString, rnOptionArgument: - doAssert false, "renderRstToOut" + raiseAssert "renderRstToOut" of rnLiteralBlock: - renderAux(d, n, "<pre>$1</pre>\n", - "\\begin{rstpre}\n$1\n\\end{rstpre}\n", result) - of rnQuotedLiteralBlock: - doAssert false, "renderRstToOut" + renderAux(d, n, "<pre$2>$1</pre>\n", + "\n\n$2\\begin{rstpre}\n$1\n\\end{rstpre}\n\n", result) + of rnMarkdownBlockQuote: + d.curQuotationDepth = 1 + var tmp = "" + renderAux(d, n, "$1", "$1", tmp) + let itemEnding = + if d.target == outHtml: "</blockquote>" else: "\\end{rstquote}" + tmp.add itemEnding.repeat(d.curQuotationDepth - 1) + dispA(d.target, result, + "<blockquote$2 class=\"markdown-quote\">$1</blockquote>\n", + "\n\\begin{rstquote}\n$2\n$1\\end{rstquote}\n", [tmp, n.anchor.idS]) + of rnMarkdownBlockQuoteItem: + let addQuotationDepth = n.quotationDepth - d.curQuotationDepth + var itemPrefix: string # start or ending (quotation grey bar on the left) + if addQuotationDepth >= 0: + let s = + if d.target == outHtml: "<blockquote class=\"markdown-quote\">" + else: "\\begin{rstquote}" + itemPrefix = s.repeat(addQuotationDepth) + else: + let s = + if d.target == outHtml: "</blockquote>" + else: "\\end{rstquote}" + itemPrefix = s.repeat(-addQuotationDepth) + renderAux(d, n, itemPrefix & "<p>$1</p>", itemPrefix & "\n$1", result) + d.curQuotationDepth = n.quotationDepth of rnLineBlock: - renderAux(d, n, "<p>$1</p>", "$1\n\n", result) + if n.sons.len == 1 and n.sons[0].lineIndent == "\n": + # whole line block is one empty line, no need to add extra spacing + renderAux(d, n, "<p$2>$1</p> ", "\n\n$2\n$1", result) + else: # add extra spacing around the line block for Latex + renderAux(d, n, "<p$2>$1</p>", + "\n\\vspace{0.5em}$2\n$1\\vspace{0.5em}\n", result) of rnLineBlockItem: - renderAux(d, n, "$1<br />", "$1\\\\\n", result) + if n.lineIndent.len == 0: # normal case - no additional indentation + renderAux(d, n, "$1<br/>", "\\noindent $1\n\n", result) + elif n.lineIndent == "\n": # add one empty line + renderAux(d, n, "<br/>", "\\vspace{1em}\n", result) + else: # additional indentation w.r.t. '| ' + let indent = $(0.5 * (n.lineIndent.len - 1).toFloat) & "em" + renderAux(d, n, + "<span style=\"margin-left: " & indent & "\">$1</span><br/>", + "\\noindent\\hspace{" & indent & "}$1\n\n", result) of rnBlockQuote: - renderAux(d, n, "<blockquote><p>$1</p></blockquote>\n", - "\\begin{quote}$1\\end{quote}\n", result) - of rnTable, rnGridTable: + renderAux(d, n, "<blockquote$2><p>$1</p></blockquote>\n", + "\\begin{quote}\n$2\n$1\\end{quote}\n", result) + of rnAdmonition: renderAdmonition(d, n, result) + of rnTable, rnGridTable, rnMarkdownTable: renderAux(d, n, - "<table border=\"1\" class=\"docutils\">$1</table>", - "\\begin{table}\\begin{rsttab}{" & - texColumns(n) & "|}\n\\hline\n$1\\end{rsttab}\\end{table}", result) + "<table$2 border=\"1\" class=\"docutils\">$1</table>", + "\n$2\n\\begin{rsttab}{" & + "L".repeat(n.colCount) & "}\n\\toprule\n$1" & + "\\addlinespace[0.1em]\\bottomrule\n\\end{rsttab}", result) of rnTableRow: if len(n) >= 1: - if d.target == outLatex: - #var tmp = "" - renderRstToOut(d, n.sons[0], result) - for i in countup(1, len(n) - 1): - result.add(" & ") - renderRstToOut(d, n.sons[i], result) - result.add("\\\\\n\\hline\n") - else: + case d.target + of outHtml: result.add("<tr>") renderAux(d, n, result) result.add("</tr>\n") - of rnTableDataCell: - renderAux(d, n, "<td>$1</td>", "$1", result) - of rnTableHeaderCell: - renderAux(d, n, "<th>$1</th>", "\\textbf{$1}", result) - of rnLabel: - doAssert false, "renderRstToOut" # used for footnotes and other - of rnFootnote: - doAssert false, "renderRstToOut" # a footnote - of rnCitation: - doAssert false, "renderRstToOut" # similar to footnote - of rnRef: - var tmp = "" - renderAux(d, n, tmp) + of outLatex: + if n.sons[0].kind == rnTableHeaderCell: + result.add "\\rowcolor{gray!15} " + var spanLines: seq[(int, int)] + var nCell = 0 + for uCell in 0 .. n.len - 1: + renderRstToOut(d, n.sons[uCell], result) + if n.sons[uCell].span > 0: + spanLines.add (nCell + 1, nCell + n.sons[uCell].span) + nCell += n.sons[uCell].span + else: + nCell += 1 + if uCell != n.len - 1: + result.add(" & ") + result.add("\\\\") + if n.endsHeader: result.add("\\midrule\n") + for (start, stop) in spanLines: + result.add("\\cmidrule(lr){$1-$2}" % [$start, $stop]) + result.add("\n") + of rnTableHeaderCell, rnTableDataCell: + case d.target + of outHtml: + let tag = if n.kind == rnTableHeaderCell: "th" else: "td" + var spanSpec: string + if n.span <= 1: spanSpec = "" + else: + spanSpec = " colspan=\"" & $n.span & "\" style=\"text-align: center\"" + renderAux(d, n, "<$1$2>$$1</$1>" % [tag, spanSpec], "", result) + of outLatex: + let text = if n.kind == rnTableHeaderCell: "\\textbf{$1}" else: "$1" + var latexStr: string + if n.span <= 1: latexStr = text + else: latexStr = "\\multicolumn{" & $n.span & "}{c}{" & text & "}" + renderAux(d, n, "", latexStr, result) + of rnFootnoteGroup: + renderAux(d, n, + "<hr class=\"footnote\">" & + "<div class=\"footnote-group\">\n$1</div>\n", + "\n\n\\noindent\\rule{0.25\\linewidth}{.4pt}\n" & + "\\begin{rstfootnote}\n$1\\end{rstfootnote}\n\n", + result) + of rnFootnote, rnCitation: + var mark = "" + renderAux(d, n.sons[0], mark) + var body = "" + renderRstToOut(d, n.sons[1], body) dispA(d.target, result, - "<a class=\"reference external\" href=\"#$2\">$1</a>", - "$1\\ref{$2}", [tmp, rstnodeToRefname(n)]) + "<div$2><div class=\"footnote-label\">" & + "<sup><strong><a href=\"#$4\">[$3]</a></strong></sup>" & + "</div>   $1\n</div>\n", + "\\item[\\textsuperscript{[$3]}]$2 $1\n", + [body, n.anchor.idS, mark, n.anchor]) + of rnPandocRef: + renderHyperlink(d, text=n.sons[0], link=n.sons[1], result, external=false) + of rnRstRef: + renderHyperlink(d, text=n.sons[0], link=n.sons[0], result, external=false) of rnStandaloneHyperlink: - renderAux(d, n, - "<a class=\"reference external\" href=\"$1\">$1</a>", - "\\href{$1}{$1}", result) + renderHyperlink(d, text=n.sons[0], link=n.sons[0], result, external=true) + of rnInternalRef: + renderHyperlink(d, text=n.sons[0], link=n.sons[1], result, external=false) + of rnNimdocRef: + renderHyperlink(d, text=n.sons[0], link=n.sons[1], result, external=false, + nimdoc=true, tooltip=n.tooltip) of rnHyperlink: - var tmp0 = "" - var tmp1 = "" - renderRstToOut(d, n.sons[0], tmp0) - renderRstToOut(d, n.sons[1], tmp1) + renderHyperlink(d, text=n.sons[0], link=n.sons[1], result, external=true) + of rnFootnoteRef: + var tmp = "[" + renderAux(d, n.sons[0], tmp) + tmp.add "]" dispA(d.target, result, - "<a class=\"reference external\" href=\"$2\">$1</a>", - "\\href{$2}{$1}", [tmp0, tmp1]) + "<sup><strong><a class=\"reference internal\" href=\"#$2\">" & + "$1</a></strong></sup>", + "\\textsuperscript{\\hyperlink{$2}{\\textbf{$1}}}", + [tmp, n.sons[1].text]) of rnDirArg, rnRaw: renderAux(d, n, result) of rnRawHtml: if d.target != outLatex and not lastSon(n).isNil: @@ -1138,19 +1321,28 @@ proc renderRstToOut(d: PDoc, n: PRstNode, result: var string) = result.add addNodes(lastSon(n)) of rnImage, rnFigure: renderImage(d, n, result) - of rnCodeBlock: renderCodeBlock(d, n, result) + of rnCodeBlock, rnInlineCode: renderCode(d, n, result) of rnContainer: renderContainer(d, n, result) of rnSubstitutionReferences, rnSubstitutionDef: renderAux(d, n, "|$1|", "|$1|", result) of rnDirective: renderAux(d, n, "", "", result) - of rnGeneralRole: + of rnUnknownRole, rnCodeFragment: var tmp0 = "" var tmp1 = "" renderRstToOut(d, n.sons[0], tmp0) renderRstToOut(d, n.sons[1], tmp1) - dispA(d.target, result, "<span class=\"$2\">$1</span>", "\\span$2{$1}", - [tmp0, tmp1]) + var class = tmp1 + # don't allow missing role break latex compilation: + if d.target == outLatex and n.kind == rnUnknownRole: class = "Other" + if n.kind == rnCodeFragment: + dispA(d.target, result, + "<tt class=\"docutils literal\"><span class=\"pre $2\">" & + "$1</span></tt>", + "\\rstcode{\\span$2{$1}}", [tmp0, class]) + else: # rnUnknownRole, not necessarily code/monospace font + dispA(d.target, result, "<span class=\"$2\">$1</span>", "\\span$2{$1}", + [tmp0, class]) of rnSub: renderAux(d, n, "<sub>$1</sub>", "\\rstsub{$1}", result) of rnSup: renderAux(d, n, "<sup>$1</sup>", "\\rstsup{$1}", result) of rnEmphasis: renderAux(d, n, "<em>$1</em>", "\\emph{$1}", result) @@ -1164,13 +1356,22 @@ proc renderRstToOut(d: PDoc, n: PRstNode, result: var string) = of rnInlineLiteral, rnInterpretedText: renderAux(d, n, "<tt class=\"docutils literal\"><span class=\"pre\">$1</span></tt>", - "\\texttt{$1}", result) + "\\rstcode{$1}", result) + of rnInlineTarget: + var tmp = "" + renderAux(d, n, tmp) + dispA(d.target, result, + "<span class=\"target\" id=\"$2\">$1</span>", + "\\label{$2}\\hypertarget{$2}{$1}", + [tmp, rstnodeToRefname(n)]) of rnSmiley: renderSmiley(d, n, result) - of rnLeaf: result.add(esc(d.target, n.text)) + of rnLeaf: result.add(esc(d.target, n.text, escMode=d.escMode)) of rnContents: d.hasToc = true + of rnDefaultRole: discard of rnTitle: d.meta[metaTitle] = "" renderRstToOut(d, n.sons[0], d.meta[metaTitle]) + d.meta[metaTitleRaw] = n.sons[0].addNodes # ----------------------------------------------------------------------------- @@ -1300,7 +1501,7 @@ $moduledesc $content </div> """) - setConfigVar("doc.listing_start", "<pre class = \"listing\">") + setConfigVar("doc.listing_start", "<pre$3 class = \"listing\">") setConfigVar("doc.listing_end", "</pre>") setConfigVar("doc.listing_button", "</pre>") setConfigVar("doc.body_no_toc", "$moduledesc $content") @@ -1310,7 +1511,8 @@ $content # ---------- forum --------------------------------------------------------- proc rstToHtml*(s: string, options: RstParseOptions, - config: StringTableRef): string = + config: StringTableRef, + msgHandler: MsgHandler = rst.defaultMsgHandler): string {.gcsafe.} = ## Converts an input rst string into embeddable HTML. ## ## This convenience proc parses any input string using rst markup (it doesn't @@ -1320,12 +1522,13 @@ proc rstToHtml*(s: string, options: RstParseOptions, ## work. For an explanation of the ``config`` parameter see the ## ``initRstGenerator`` proc. Example: ## - ## .. code-block:: nim + ## ```nim ## import packages/docutils/rstgen, strtabs ## ## echo rstToHtml("*Hello* **world**!", {}, ## newStringTable(modeStyleInsensitive)) ## # --> <em>Hello</em> <strong>world</strong>! + ## ``` ## ## If you need to allow the rst ``include`` directive or tweak the generated ## output you have to create your own ``RstGenerator`` with @@ -1334,28 +1537,30 @@ proc rstToHtml*(s: string, options: RstParseOptions, proc myFindFile(filename: string): string = # we don't find any files in online mode: result = "" + proc myFindRefFile(filename: string): (string, string) = + result = ("", "") const filen = "input" + let (rst, filenames, t) = rstParse(s, filen, + line=LineRstInit, column=ColRstInit, + options, myFindFile, myFindRefFile, msgHandler) var d: RstGenerator - initRstGenerator(d, outHtml, config, filen, options, myFindFile, - rst.defaultMsgHandler) - var dummyHasToc = false - var rst = rstParse(s, filen, 0, 1, dummyHasToc, options) + initRstGenerator(d, outHtml, config, filen, myFindFile, msgHandler, + filenames, hasToc = t) result = "" renderRstToOut(d, rst, result) + strbasics.strip(result) proc rstToLatex*(rstSource: string; options: RstParseOptions): string {.inline, since: (1, 3).} = ## Convenience proc for `renderRstToOut` and `initRstGenerator`. runnableExamples: doAssert rstToLatex("*Hello* **world**", {}) == """\emph{Hello} \textbf{world}""" if rstSource.len == 0: return - var option: bool + let (rst, filenames, t) = rstParse(rstSource, "", + line=LineRstInit, column=ColRstInit, + options) var rstGenera: RstGenerator - rstGenera.initRstGenerator(outLatex, defaultConfig(), "input", options) - rstGenera.renderRstToOut(rstParse(rstSource, "", 1, 1, option, options), result) - - -when isMainModule: - assert rstToHtml("*Hello* **world**!", {}, - newStringTable(modeStyleInsensitive)) == - "<em>Hello</em> <strong>world</strong>!" + rstGenera.initRstGenerator(outLatex, defaultConfig(), "input", + filenames=filenames, hasToc = t) + rstGenera.renderRstToOut(rst, result) + strbasics.strip(result) diff --git a/lib/packages/docutils/rstidx.nim b/lib/packages/docutils/rstidx.nim new file mode 100644 index 000000000..1472d28fd --- /dev/null +++ b/lib/packages/docutils/rstidx.nim @@ -0,0 +1,141 @@ +# +# Nim's Runtime Library +# (c) Copyright 2022 Andreas Rumpf +# +# See the file "copying.txt", included in this +# distribution, for details about the copyright. + +## Nim `idx`:idx: file format related definitions. + +import std/[strutils, syncio, hashes] +from std/os import splitFile + +type + IndexEntryKind* = enum ## discriminator tag + ieMarkupTitle = "markupTitle" + ## RST/Markdown title, text in `keyword` + + ## HTML text in `linkTitle` + ieNimTitle = "nimTitle" + ## Nim title + ieHeading = "heading" ## RST/Markdown markup heading, escaped + ieIdxRole = "idx" ## RST :idx: definition, escaped + ieNim = "nim" ## Nim symbol, unescaped + ieNimGroup = "nimgrp" ## Nim overload group, unescaped + IndexEntry* = object + kind*: IndexEntryKind ## 0. + keyword*: string ## 1. + link*: string ## 2. + linkTitle*: string ## 3. contains a prettier text for the href + linkDesc*: string ## 4. the title attribute of the final href + line*: int ## 5. + module*: string ## origin file, NOT a field in ``.idx`` file + aux*: string ## auxuliary field, NOT a field in ``.idx`` file + +proc isDocumentationTitle*(hyperlink: string): bool = + ## Returns true if the hyperlink is actually a documentation title. + ## + ## Documentation titles lack the hash. See `mergeIndexes() + ## <#mergeIndexes,string>`_ for a more detailed explanation. + result = hyperlink.find('#') < 0 + +proc `$`*(e: IndexEntry): string = + """("$1", "$2", "$3", "$4", $5)""" % [ + e.keyword, e.link, e.linkTitle, e.linkDesc, $e.line] + +proc quoteIndexColumn(text: string): string = + ## Returns a safe version of `text` for serialization to the ``.idx`` file. + ## + ## The returned version can be put without worries in a line based tab + ## separated column text file. The following character sequence replacements + ## will be performed for that goal: + ## + ## * ``"\\"`` => ``"\\\\"`` + ## * ``"\n"`` => ``"\\n"`` + ## * ``"\t"`` => ``"\\t"`` + result = newStringOfCap(text.len + 3) + for c in text: + case c + of '\\': result.add "\\" + of '\L': result.add "\\n" + of '\C': discard + of '\t': result.add "\\t" + else: result.add c + +proc unquoteIndexColumn*(text: string): string = + ## Returns the unquoted version generated by ``quoteIndexColumn``. + result = text.multiReplace(("\\t", "\t"), ("\\n", "\n"), ("\\\\", "\\")) + +proc formatIndexEntry*(kind: IndexEntryKind; htmlFile, id, term, linkTitle, + linkDesc: string, line: int): + tuple[entry: string, isTitle: bool] = + result.entry = $kind + result.entry.add('\t') + result.entry.add term + result.entry.add('\t') + result.entry.add(htmlFile) + if id.len > 0: + result.entry.add('#') + result.entry.add(id) + result.isTitle = false + else: + result.isTitle = true + result.entry.add('\t' & linkTitle.quoteIndexColumn) + result.entry.add('\t' & linkDesc.quoteIndexColumn) + result.entry.add('\t' & $line) + result.entry.add("\n") + +proc parseIndexEntryKind(s: string): IndexEntryKind = + result = case s: + of "nim": ieNim + of "nimgrp": ieNimGroup + of "heading": ieHeading + of "idx": ieIdxRole + of "nimTitle": ieNimTitle + of "markupTitle": ieMarkupTitle + else: raise newException(ValueError, "unknown index entry value $1" % [s]) + +proc parseIdxFile*(path: string): + tuple[fileEntries: seq[IndexEntry], title: IndexEntry] = + var + f = 0 + newSeq(result.fileEntries, 500) + setLen(result.fileEntries, 0) + let (_, base, _) = path.splitFile + for line in lines(path): + let s = line.find('\t') + if s < 0: continue + setLen(result.fileEntries, f+1) + let cols = line.split('\t') + result.fileEntries[f].kind = parseIndexEntryKind(cols[0]) + result.fileEntries[f].keyword = cols[1] + result.fileEntries[f].link = cols[2] + if result.fileEntries[f].kind == ieIdxRole: + result.fileEntries[f].module = base + else: + if result.title.keyword.len == 0: + result.fileEntries[f].module = base + else: + result.fileEntries[f].module = result.title.keyword + + result.fileEntries[f].linkTitle = cols[3].unquoteIndexColumn + result.fileEntries[f].linkDesc = cols[4].unquoteIndexColumn + result.fileEntries[f].line = parseInt(cols[5]) + + if result.fileEntries[f].kind in {ieNimTitle, ieMarkupTitle}: + result.title = result.fileEntries[f] + inc f + +proc cmp*(a, b: IndexEntry): int = + ## Sorts two ``IndexEntry`` first by `keyword` field, then by `link`. + result = cmpIgnoreStyle(a.keyword, b.keyword) + if result == 0: + result = cmpIgnoreStyle(a.link, b.link) + +proc hash*(x: IndexEntry): Hash = + ## Returns the hash for the combined fields of the type. + ## + ## The hash is computed as the chained hash of the individual string hashes. + result = x.keyword.hash !& x.link.hash + result = result !& x.linkTitle.hash + result = result !& x.linkDesc.hash + result = !$result |