diff options
Diffstat (limited to 'lib')
46 files changed, 2112 insertions, 2838 deletions
diff --git a/lib/core/macros.nim b/lib/core/macros.nim index 552c0dbff..872d4848d 100644 --- a/lib/core/macros.nim +++ b/lib/core/macros.nim @@ -97,7 +97,7 @@ type nskUnknown, nskConditional, nskDynLib, nskParam, nskGenericParam, nskTemp, nskModule, nskType, nskVar, nskLet, nskConst, nskResult, - nskProc, nskMethod, nskIterator, nskClosureIterator, + nskProc, nskMethod, nskIterator, nskConverter, nskMacro, nskTemplate, nskField, nskEnumField, nskForVar, nskLabel, nskStub @@ -416,8 +416,7 @@ proc newLit*(i: BiggestInt): NimNode {.compileTime.} = proc newLit*(b: bool): NimNode {.compileTime.} = ## produces a new boolean literal node. - result = newNimNode(nnkIntLit) - result.intVal = ord(b) + result = if b: bindSym"true" else: bindSym"false" proc newLit*(f: BiggestFloat): NimNode {.compileTime.} = ## produces a new float literal node. diff --git a/lib/core/typeinfo.nim b/lib/core/typeinfo.nim index 1f9fb1072..db5a83755 100644 --- a/lib/core/typeinfo.nim +++ b/lib/core/typeinfo.nim @@ -61,7 +61,10 @@ type ## wrapped value and **must not** live longer than ## its wrapped value. value: pointer - rawType: PNimType + when defined(js): + rawType: PNimType + else: + rawTypePtr: pointer ppointer = ptr pointer pbyteArray = ptr array[0.. 0xffff, int8] @@ -71,6 +74,14 @@ type when defined(gogc): elemSize: int PGenSeq = ptr TGenericSeq + +when not defined(js): + template rawType(x: Any): PNimType = + cast[PNimType](x.rawTypePtr) + + template `rawType=`(x: var Any, p: PNimType) = + x.rawTypePtr = cast[pointer](p) + {.deprecated: [TAny: Any, TAnyKind: AnyKind].} when defined(gogc): @@ -108,7 +119,7 @@ proc selectBranch(aa: pointer, n: ptr TNimNode): ptr TNimNode = else: result = n.sons[n.len] -proc newAny(value: pointer, rawType: PNimType): Any = +proc newAny(value: pointer, rawType: PNimType): Any {.inline.} = result.value = value result.rawType = rawType @@ -126,8 +137,7 @@ proc toAny*[T](x: var T): Any {.inline.} = ## constructs a ``Any`` object from `x`. This captures `x`'s address, so ## `x` can be modified with its ``Any`` wrapper! The client needs to ensure ## that the wrapper **does not** live longer than `x`! - result.value = addr(x) - result.rawType = cast[PNimType](getTypeInfo(x)) + newAny(addr(x), cast[PNimType](getTypeInfo(x))) proc kind*(x: Any): AnyKind {.inline.} = ## get the type kind @@ -345,7 +355,7 @@ proc `[]`*(x: Any, fieldName: string): Any = result.value = x.value +!! n.offset result.rawType = n.typ elif x.rawType.kind == tyObject and x.rawType.base != nil: - return `[]`(Any(value: x.value, rawType: x.rawType.base), fieldName) + return `[]`(newAny(x.value, x.rawType.base), fieldName) else: raise newException(ValueError, "invalid field name: " & fieldName) diff --git a/lib/impure/db_mysql.nim b/lib/impure/db_mysql.nim index 7f7511264..170fee8b8 100644 --- a/lib/impure/db_mysql.nim +++ b/lib/impure/db_mysql.nim @@ -1,7 +1,7 @@ # # # Nim's Runtime Library -# (c) Copyright 2012 Andreas Rumpf +# (c) Copyright 2015 Andreas Rumpf # # See the file "copying.txt", included in this # distribution, for details about the copyright. @@ -43,45 +43,26 @@ import strutils, mysql +import db_common +export db_common + type - DbConn* = PMySQL ## encapsulates a database connection + DbConn* = PMySQL ## encapsulates a database connection Row* = seq[string] ## a row of a dataset. NULL database values will be - ## transformed always to the empty string. - InstantRow* = tuple[row: cstringArray, len: int] ## a handle that can be - ## used to get a row's - ## column text on demand - EDb* = object of IOError ## exception that is raised if a database error occurs - - SqlQuery* = distinct string ## an SQL query string - - FDb* = object of IOEffect ## effect that denotes a database operation - FReadDb* = object of FDb ## effect that denotes a read operation - FWriteDb* = object of FDb ## effect that denotes a write operation -{.deprecated: [TRow: Row, TSqlQuery: SqlQuery, TDbConn: DbConn].} - -proc sql*(query: string): SqlQuery {.noSideEffect, inline.} = - ## constructs a SqlQuery from the string `query`. This is supposed to be - ## used as a raw-string-literal modifier: - ## ``sql"update user set counter = counter + 1"`` - ## - ## If assertions are turned off, it does nothing. If assertions are turned - ## on, later versions will check the string for valid syntax. - result = SqlQuery(query) + ## converted to nil. + InstantRow* = object ## a handle that can be used to get a row's + ## column text on demand + row: cstringArray + len: int +{.deprecated: [TRow: Row, TDbConn: DbConn].} -proc dbError(db: DbConn) {.noreturn.} = - ## raises an EDb exception. - var e: ref EDb +proc dbError*(db: DbConn) {.noreturn.} = + ## raises a DbError exception. + var e: ref DbError new(e) e.msg = $mysql.error(db) raise e -proc dbError*(msg: string) {.noreturn.} = - ## raises an EDb exception with message `msg`. - var e: ref EDb - new(e) - e.msg = msg - raise e - when false: proc dbQueryOpt*(db: DbConn, query: string, args: varargs[string, `$`]) = var stmt = mysql_stmt_init(db) @@ -114,7 +95,7 @@ proc dbFormat(formatstr: SqlQuery, args: varargs[string]): string = add(result, c) proc tryExec*(db: DbConn, query: SqlQuery, args: varargs[string, `$`]): bool {. - tags: [FReadDB, FWriteDb].} = + tags: [ReadDbEffect, WriteDbEffect].} = ## tries to execute the query and returns true if successful, false otherwise. var q = dbFormat(query, args) return mysql.realQuery(db, q, q.len) == 0'i32 @@ -124,7 +105,7 @@ proc rawExec(db: DbConn, query: SqlQuery, args: varargs[string, `$`]) = if mysql.realQuery(db, q, q.len) != 0'i32: dbError(db) proc exec*(db: DbConn, query: SqlQuery, args: varargs[string, `$`]) {. - tags: [FReadDB, FWriteDb].} = + tags: [ReadDbEffect, WriteDbEffect].} = ## executes the query and raises EDB if not successful. var q = dbFormat(query, args) if mysql.realQuery(db, q, q.len) != 0'i32: dbError(db) @@ -139,7 +120,7 @@ proc properFreeResult(sqlres: mysql.PRES, row: cstringArray) = mysql.freeResult(sqlres) iterator fastRows*(db: DbConn, query: SqlQuery, - args: varargs[string, `$`]): Row {.tags: [FReadDB].} = + args: varargs[string, `$`]): Row {.tags: [ReadDbEffect].} = ## executes the query and iterates over the result dataset. ## ## This is very fast, but potentially dangerous. Use this iterator only @@ -167,31 +148,113 @@ iterator fastRows*(db: DbConn, query: SqlQuery, iterator instantRows*(db: DbConn, query: SqlQuery, args: varargs[string, `$`]): InstantRow - {.tags: [FReadDb].} = - ## same as fastRows but returns a handle that can be used to get column text - ## on demand using []. Returned handle is valid only within the interator body. + {.tags: [ReadDbEffect].} = + ## Same as fastRows but returns a handle that can be used to get column text + ## on demand using []. Returned handle is valid only within the iterator body. + rawExec(db, query, args) + var sqlres = mysql.useResult(db) + if sqlres != nil: + let L = int(mysql.numFields(sqlres)) + var row: cstringArray + while true: + row = mysql.fetchRow(sqlres) + if row == nil: break + yield InstantRow(row: row, len: L) + properFreeResult(sqlres, row) + +proc setTypeName(t: var DbType; f: PFIELD) = + shallowCopy(t.name, $f.name) + t.maxReprLen = Natural(f.max_length) + if (NOT_NULL_FLAG and f.flags) != 0: t.notNull = true + case f.ftype + of TYPE_DECIMAL: + t.kind = dbDecimal + of TYPE_TINY: + t.kind = dbInt + t.size = 1 + of TYPE_SHORT: + t.kind = dbInt + t.size = 2 + of TYPE_LONG: + t.kind = dbInt + t.size = 4 + of TYPE_FLOAT: + t.kind = dbFloat + t.size = 4 + of TYPE_DOUBLE: + t.kind = dbFloat + t.size = 8 + of TYPE_NULL: + t.kind = dbNull + of TYPE_TIMESTAMP: + t.kind = dbTimestamp + of TYPE_LONGLONG: + t.kind = dbInt + t.size = 8 + of TYPE_INT24: + t.kind = dbInt + t.size = 3 + of TYPE_DATE: + t.kind = dbDate + of TYPE_TIME: + t.kind = dbTime + of TYPE_DATETIME: + t.kind = dbDatetime + of TYPE_YEAR: + t.kind = dbDate + of TYPE_NEWDATE: + t.kind = dbDate + of TYPE_VARCHAR, TYPE_VAR_STRING, TYPE_STRING: + t.kind = dbVarchar + of TYPE_BIT: + t.kind = dbBit + of TYPE_NEWDECIMAL: + t.kind = dbDecimal + of TYPE_ENUM: t.kind = dbEnum + of TYPE_SET: t.kind = dbSet + of TYPE_TINY_BLOB, TYPE_MEDIUM_BLOB, TYPE_LONG_BLOB, + TYPE_BLOB: t.kind = dbBlob + of TYPE_GEOMETRY: + t.kind = dbGeometry + +proc setColumnInfo(columns: var DbColumns; res: PRES; L: int) = + setLen(columns, L) + for i in 0..<L: + let fp = mysql.fetch_field_direct(res, cint(i)) + setTypeName(columns[i].typ, fp) + columns[i].name = $fp.name + columns[i].tableName = $fp.table + columns[i].primaryKey = (fp.flags and PRI_KEY_FLAG) != 0 + #columns[i].foreignKey = there is no such thing in mysql + +iterator instantRows*(db: DbConn; columns: var DbColumns; query: SqlQuery; + args: varargs[string, `$`]): InstantRow = + ## Same as fastRows but returns a handle that can be used to get column text + ## on demand using []. Returned handle is valid only within the iterator body. rawExec(db, query, args) var sqlres = mysql.useResult(db) if sqlres != nil: let L = int(mysql.numFields(sqlres)) + setColumnInfo(columns, sqlres, L) var row: cstringArray while true: row = mysql.fetchRow(sqlres) if row == nil: break - yield (row: row, len: L) + yield InstantRow(row: row, len: L) properFreeResult(sqlres, row) + proc `[]`*(row: InstantRow, col: int): string {.inline.} = - ## returns text for given column of the row + ## Returns text for given column of the row. $row.row[col] proc len*(row: InstantRow): int {.inline.} = - ## returns number of columns in the row + ## Returns number of columns in the row. row.len proc getRow*(db: DbConn, query: SqlQuery, - args: varargs[string, `$`]): Row {.tags: [FReadDB].} = - ## retrieves a single row. If the query doesn't return any rows, this proc + args: varargs[string, `$`]): Row {.tags: [ReadDbEffect].} = + ## Retrieves a single row. If the query doesn't return any rows, this proc ## will return a Row with empty strings for each column. rawExec(db, query, args) var sqlres = mysql.useResult(db) @@ -209,7 +272,7 @@ proc getRow*(db: DbConn, query: SqlQuery, properFreeResult(sqlres, row) proc getAllRows*(db: DbConn, query: SqlQuery, - args: varargs[string, `$`]): seq[Row] {.tags: [FReadDB].} = + args: varargs[string, `$`]): seq[Row] {.tags: [ReadDbEffect].} = ## executes the query and returns the whole result dataset. result = @[] rawExec(db, query, args) @@ -232,19 +295,19 @@ proc getAllRows*(db: DbConn, query: SqlQuery, mysql.freeResult(sqlres) iterator rows*(db: DbConn, query: SqlQuery, - args: varargs[string, `$`]): Row {.tags: [FReadDB].} = + args: varargs[string, `$`]): Row {.tags: [ReadDbEffect].} = ## same as `fastRows`, but slower and safe. for r in items(getAllRows(db, query, args)): yield r proc getValue*(db: DbConn, query: SqlQuery, - args: varargs[string, `$`]): string {.tags: [FReadDB].} = + args: varargs[string, `$`]): string {.tags: [ReadDbEffect].} = ## executes the query and returns the first column of the first row of the ## result dataset. Returns "" if the dataset contains no rows or the database ## value is NULL. result = getRow(db, query, args)[0] proc tryInsertId*(db: DbConn, query: SqlQuery, - args: varargs[string, `$`]): int64 {.tags: [FWriteDb].} = + args: varargs[string, `$`]): int64 {.tags: [WriteDbEffect].} = ## executes the query (typically "INSERT") and returns the ## generated ID for the row or -1 in case of an error. var q = dbFormat(query, args) @@ -254,7 +317,7 @@ proc tryInsertId*(db: DbConn, query: SqlQuery, result = mysql.insertId(db) proc insertId*(db: DbConn, query: SqlQuery, - args: varargs[string, `$`]): int64 {.tags: [FWriteDb].} = + args: varargs[string, `$`]): int64 {.tags: [WriteDbEffect].} = ## executes the query (typically "INSERT") and returns the ## generated ID for the row. result = tryInsertID(db, query, args) @@ -262,18 +325,18 @@ proc insertId*(db: DbConn, query: SqlQuery, proc execAffectedRows*(db: DbConn, query: SqlQuery, args: varargs[string, `$`]): int64 {. - tags: [FReadDB, FWriteDb].} = + tags: [ReadDbEffect, WriteDbEffect].} = ## runs the query (typically "UPDATE") and returns the ## number of affected rows rawExec(db, query, args) result = mysql.affectedRows(db) -proc close*(db: DbConn) {.tags: [FDb].} = +proc close*(db: DbConn) {.tags: [DbEffect].} = ## closes the database connection. if db != nil: mysql.close(db) proc open*(connection, user, password, database: string): DbConn {. - tags: [FDb].} = + tags: [DbEffect].} = ## opens a database connection. Raises `EDb` if the connection could not ## be established. result = mysql.init(nil) @@ -291,7 +354,7 @@ proc open*(connection, user, password, database: string): DbConn {. dbError(errmsg) proc setEncoding*(connection: DbConn, encoding: string): bool {. - tags: [FDb].} = + tags: [DbEffect].} = ## sets the encoding of a database connection, returns true for ## success, false for failure. result = mysql.set_character_set(connection, encoding) == 0 diff --git a/lib/impure/db_odbc.nim b/lib/impure/db_odbc.nim new file mode 100644 index 000000000..6af69d842 --- /dev/null +++ b/lib/impure/db_odbc.nim @@ -0,0 +1,463 @@ +# +# +# Nim's Runtime Library +# (c) Copyright 2015 Nim Contributors +# +# See the file "copying.txt", included in this +# distribution, for details about the copyright. +# + +## A higher level `ODBC` database wrapper. +## +## This is the same interface that is implemented for other databases. +## +## This has NOT yet been (extensively) tested agains ODBC drivers for +## Teradata, Oracle, Sybase, MSSqlvSvr, et. al. databases +## +## Currently all queries are ANSI calls, not Unicode. +## +## Example: +## +## .. code-block:: Nim +## +## import db_odbc, math +## +## let theDb = open("localhost", "nim", "nim", "test") +## +## theDb.exec(sql"Drop table if exists myTestTbl") +## theDb.exec(sql("create table myTestTbl (" & +## " Id INT(11) NOT NULL AUTO_INCREMENT PRIMARY KEY, " & +## " Name VARCHAR(50) NOT NULL, " & +## " i INT(11), " & +## " f DECIMAL(18,10))")) +## +## theDb.exec(sql"START TRANSACTION") +## for i in 1..1000: +## theDb.exec(sql"INSERT INTO myTestTbl (name,i,f) VALUES (?,?,?)", +## "Item#" & $i, i, sqrt(i.float)) +## theDb.exec(sql"COMMIT") +## +## for x in theDb.fastRows(sql"select * from myTestTbl"): +## echo x +## +## let id = theDb.tryInsertId(sql"INSERT INTO myTestTbl (name,i,f) VALUES (?,?,?)", +## "Item#1001", 1001, sqrt(1001.0)) +## echo "Inserted item: ", theDb.getValue(sql"SELECT name FROM myTestTbl WHERE id=?", id) +## +## theDb.close() + + +import strutils, odbcsql + +import db_common +export db_common + +type + OdbcConnTyp = tuple[hDb: SqlHDBC, env: SqlHEnv, stmt: SqlHStmt] + DbConn* = OdbcConnTyp ## encapsulates a database connection + Row* = seq[string] ## a row of a dataset. NULL database values will be + ## converted to nil. + InstantRow* = tuple[row: seq[string], len: int] ## a handle that can be + ## used to get a row's + ## column text on demand + +{.deprecated: [TRow: Row, TSqlQuery: SqlQuery, TDbConn: DbConn].} + +var + buf: array[0..4096, char] + +proc properFreeResult(hType: int, sqlres: var SqlHandle) {. + tags: [WriteDbEffect], raises: [].} = + try: + discard SQLFreeHandle(hType.TSqlSmallInt, sqlres) + sqlres = nil + except: discard + +proc getErrInfo(db: var DbConn): tuple[res: int, ss, ne, msg: string] {. + tags: [ReadDbEffect], raises: [].} = + ## Returns ODBC error information + var + sqlState: array[0..512, char] + nativeErr: array[0..512, char] + errMsg: array[0..512, char] + retSz: TSqlSmallInt = 0 + res: TSqlSmallInt = 0 + try: + sqlState[0] = '\0' + nativeErr[0] = '\0' + errMsg[0] = '\0' + res = SQLErr(db.env, db.hDb, db.stmt, + cast[PSQLCHAR](sqlState.addr), + cast[PSQLCHAR](nativeErr.addr), + cast[PSQLCHAR](errMsg.addr), + 511.TSqlSmallInt, retSz.addr.PSQLSMALLINT) + except: + discard + return (res.int, $sqlState, $nativeErr, $errMsg) + +proc dbError*(db: var DbConn) {. + tags: [ReadDbEffect, WriteDbEffect], raises: [DbError] .} = + ## Raises an `[DbError]` exception with ODBC error information + var + e: ref DbError + ss, ne, msg: string = "" + isAnError = false + res: int = 0 + prevSs = "" + while true: + prevSs = ss + (res, ss, ne, msg) = db.getErrInfo() + if prevSs == ss: + break + # sqlState of 00000 is not an error + elif ss == "00000": + break + elif ss == "01000": + echo "\nWarning: ", ss, " ", msg + continue + else: + isAnError = true + echo "\nError: ", ss, " ", msg + if isAnError: + new(e) + e.msg = "ODBC Error" + if db.stmt != nil: + properFreeResult(SQL_HANDLE_STMT, db.stmt) + properFreeResult(SQL_HANDLE_DBC, db.hDb) + properFreeResult(SQL_HANDLE_ENV, db.env) + raise e + +proc SqlCheck(db: var DbConn, resVal: TSqlSmallInt) {.raises: [DbError]} = + ## Wrapper that checks if ``resVal`` is not SQL_SUCCESS and if so, raises [EDb] + if resVal != SQL_SUCCESS: dbError(db) + +proc SqlGetDBMS(db: var DbConn): string {. + tags: [ReadDbEffect, WriteDbEffect], raises: [] .} = + ## Returns the ODBC SQL_DBMS_NAME string + const + SQL_DBMS_NAME = 17.SqlUSmallInt + var + sz: TSqlSmallInt = 0 + buf[0] = '\0' + try: + db.SqlCheck(SQLGetInfo(db.hDb, SQL_DBMS_NAME, cast[SqlPointer](buf.addr), + 4095.TSqlSmallInt, sz.addr)) + except: discard + return $buf.cstring + +proc dbQuote*(s: string): string {.noSideEffect.} = + ## DB quotes the string. + result = "'" + for c in items(s): + if c == '\'': add(result, "''") + else: add(result, c) + add(result, '\'') + +proc dbFormat(formatstr: SqlQuery, args: varargs[string]): string {. + noSideEffect.} = + ## Replace any ``?`` placeholders with `args`, + ## and quotes the arguments + result = "" + var a = 0 + for c in items(string(formatstr)): + if c == '?': + if args[a] == nil: + add(result, "NULL") + else: + add(result, dbQuote(args[a])) + inc(a) + else: + add(result, c) + +proc prepareFetch(db: var DbConn, query: SqlQuery, + args: varargs[string, `$`]) {. + tags: [ReadDbEffect, WriteDbEffect], raises: [DbError].} = + # Prepare a statement, execute it and fetch the data to the driver + # ready for retrieval of the data + # Used internally by iterators and retrieval procs + # requires calling + # properFreeResult(SQL_HANDLE_STMT, db.stmt) + # when finished + db.SqlCheck(SQLAllocHandle(SQL_HANDLE_STMT, db.hDb, db.stmt)) + var q = dbFormat(query, args) + db.SqlCheck(SQLPrepare(db.stmt, q.PSQLCHAR, q.len.TSqlSmallInt)) + db.SqlCheck(SQLExecute(db.stmt)) + db.SqlCheck(SQLFetch(db.stmt)) + +proc prepareFetchDirect(db: var DbConn, query: SqlQuery, + args: varargs[string, `$`]) {. + tags: [ReadDbEffect, WriteDbEffect], raises: [DbError].} = + # Prepare a statement, execute it and fetch the data to the driver + # ready for retrieval of the data + # Used internally by iterators and retrieval procs + # requires calling + # properFreeResult(SQL_HANDLE_STMT, db.stmt) + # when finished + db.SqlCheck(SQLAllocHandle(SQL_HANDLE_STMT, db.hDb, db.stmt)) + var q = dbFormat(query, args) + db.SqlCheck(SQLExecDirect(db.stmt, q.PSQLCHAR, q.len.TSqlSmallInt)) + db.SqlCheck(SQLFetch(db.stmt)) + +proc tryExec*(db: var DbConn, query: SqlQuery, args: varargs[string, `$`]): bool {. + tags: [ReadDbEffect, WriteDbEffect], raises: [].} = + ## Tries to execute the query and returns true if successful, false otherwise. + var + res:TSqlSmallInt = -1 + try: + db.prepareFetchDirect(query, args) + var + rCnt = -1 + res = SQLRowCount(db.stmt, rCnt) + if res != SQL_SUCCESS: dbError(db) + properFreeResult(SQL_HANDLE_STMT, db.stmt) + except: discard + return res == SQL_SUCCESS + +proc rawExec(db: var DbConn, query: SqlQuery, args: varargs[string, `$`]) {. + tags: [ReadDbEffect, WriteDbEffect], raises: [DbError].} = + db.prepareFetchDirect(query, args) + +proc exec*(db: var DbConn, query: SqlQuery, args: varargs[string, `$`]) {. + tags: [ReadDbEffect, WriteDbEffect], raises: [DbError].} = + ## Executes the query and raises EDB if not successful. + db.prepareFetchDirect(query, args) + properFreeResult(SQL_HANDLE_STMT, db.stmt) + +proc newRow(L: int): Row {.noSideEFfect.} = + newSeq(result, L) + for i in 0..L-1: result[i] = "" + +iterator fastRows*(db: var DbConn, query: SqlQuery, + args: varargs[string, `$`]): Row {. + tags: [ReadDbEffect, WriteDbEffect], raises: [DbError].} = + ## Executes the query and iterates over the result dataset. + ## + ## This is very fast, but potentially dangerous. Use this iterator only + ## if you require **ALL** the rows. + ## + ## Breaking the fastRows() iterator during a loop may cause a driver error + ## for subsequenct queries + ## + ## Rows are retrieved from the server at each iteration. + var + rowRes: Row + sz: TSqlSmallInt = 0 + cCnt: TSqlSmallInt = 0.TSqlSmallInt + rCnt = -1 + + db.prepareFetch(query, args) + db.SqlCheck(SQLNumResultCols(db.stmt, cCnt)) + db.SqlCheck(SQLRowCount(db.stmt, rCnt)) + rowRes = newRow(cCnt) + for rNr in 1..rCnt: + for colId in 1..cCnt: + buf[0] = '\0' + db.SqlCheck(SQLGetData(db.stmt, colId.SqlUSmallInt, SQL_C_CHAR, + cast[cstring](buf.addr), 4095.TSqlSmallInt, sz.addr)) + rowRes[colId-1] = $buf.cstring + db.SqlCheck(SQLFetchScroll(db.stmt, SQL_FETCH_NEXT, 1)) + yield rowRes + properFreeResult(SQL_HANDLE_STMT, db.stmt) + +iterator instantRows*(db: var DbConn, query: SqlQuery, + args: varargs[string, `$`]): InstantRow + {.tags: [ReadDbEffect, WriteDbEffect].} = + ## Same as fastRows but returns a handle that can be used to get column text + ## on demand using []. Returned handle is valid only within the interator body. + var + rowRes: Row + sz: TSqlSmallInt = 0 + cCnt: TSqlSmallInt = 0.TSqlSmallInt + rCnt = -1 + db.prepareFetch(query, args) + db.SqlCheck(SQLNumResultCols(db.stmt, cCnt)) + db.SqlCheck(SQLRowCount(db.stmt, rCnt)) + rowRes = newRow(cCnt) + for rNr in 1..rCnt: + for colId in 1..cCnt: + buf[0] = '\0' + db.SqlCheck(SQLGetData(db.stmt, colId.SqlUSmallInt, SQL_C_CHAR, + cast[cstring](buf.addr), 4095.TSqlSmallInt, sz.addr)) + rowRes[colId-1] = $buf.cstring + db.SqlCheck(SQLFetchScroll(db.stmt, SQL_FETCH_NEXT, 1)) + yield (row: rowRes, len: cCnt.int) + properFreeResult(SQL_HANDLE_STMT, db.stmt) + +proc `[]`*(row: InstantRow, col: int): string {.inline.} = + ## Returns text for given column of the row + row.row[col] + +proc len*(row: InstantRow): int {.inline.} = + ## Returns number of columns in the row + row.len + +proc getRow*(db: var DbConn, query: SqlQuery, + args: varargs[string, `$`]): Row {. + tags: [ReadDbEffect, WriteDbEffect], raises: [DbError].} = + ## Retrieves a single row. If the query doesn't return any rows, this proc + ## will return a Row with empty strings for each column. + var + sz: TSqlSmallInt = 0.TSqlSmallInt + cCnt: TSqlSmallInt = 0.TSqlSmallInt + rCnt = -1 + result = @[] + db.prepareFetch(query, args) + db.SqlCheck(SQLNumResultCols(db.stmt, cCnt)) + + db.SqlCheck(SQLRowCount(db.stmt, rCnt)) + for colId in 1..cCnt: + db.SqlCheck(SQLGetData(db.stmt, colId.SqlUSmallInt, SQL_C_CHAR, + cast[cstring](buf.addr), 4095.TSqlSmallInt, sz.addr)) + result.add($buf.cstring) + db.SqlCheck(SQLFetchScroll(db.stmt, SQL_FETCH_NEXT, 1)) + properFreeResult(SQL_HANDLE_STMT, db.stmt) + +proc getAllRows*(db: var DbConn, query: SqlQuery, + args: varargs[string, `$`]): seq[Row] {. + tags: [ReadDbEffect, WriteDbEffect], raises: [DbError].} = + ## Executes the query and returns the whole result dataset. + var + rowRes: Row + sz: TSqlSmallInt = 0 + cCnt: TSqlSmallInt = 0.TSqlSmallInt + rCnt = -1 + db.prepareFetch(query, args) + db.SqlCheck(SQLNumResultCols(db.stmt, cCnt)) + db.SqlCheck(SQLRowCount(db.stmt, rCnt)) + result = @[] + for rNr in 1..rCnt: + rowRes = @[] + buf[0] = '\0' + for colId in 1..cCnt: + db.SqlCheck(SQLGetData(db.stmt, colId.SqlUSmallInt, SQL_C_CHAR, + cast[SqlPointer](buf.addr), 4095.TSqlSmallInt, sz.addr)) + rowRes.add($buf.cstring) + db.SqlCheck(SQLFetchScroll(db.stmt, SQL_FETCH_NEXT, 1)) + result.add(rowRes) + properFreeResult(SQL_HANDLE_STMT, db.stmt) + +iterator rows*(db: var DbConn, query: SqlQuery, + args: varargs[string, `$`]): Row {. + tags: [ReadDbEffect, WriteDbEffect], raises: [DbError].} = + ## Same as `fastRows`, but slower and safe. + ## + ## This retrieves ALL rows into memory before + ## iterating through the rows. + ## Large dataset queries will impact on memory usage. + for r in items(getAllRows(db, query, args)): yield r + +proc getValue*(db: var DbConn, query: SqlQuery, + args: varargs[string, `$`]): string {. + tags: [ReadDbEffect, WriteDbEffect], raises: [].} = + ## Executes the query and returns the first column of the first row of the + ## result dataset. Returns "" if the dataset contains no rows or the database + ## value is NULL. + result = "" + try: + result = getRow(db, query, args)[0] + except: discard + +proc tryInsertId*(db: var DbConn, query: SqlQuery, + args: varargs[string, `$`]): int64 {. + tags: [ReadDbEffect, WriteDbEffect], raises: [].} = + ## Executes the query (typically "INSERT") and returns the + ## generated ID for the row or -1 in case of an error. + if not tryExec(db, query, args): + result = -1'i64 + else: + echo "DBMS: ",SqlGetDBMS(db).toLower() + result = -1'i64 + try: + case SqlGetDBMS(db).toLower(): + of "postgresql": + result = getValue(db, sql"SELECT LASTVAL();", []).parseInt + of "mysql": + result = getValue(db, sql"SELECT LAST_INSERT_ID();", []).parseInt + of "sqlite": + result = getValue(db, sql"SELECT LAST_INSERT_ROWID();", []).parseInt + of "microsoft sql server": + result = getValue(db, sql"SELECT SCOPE_IDENTITY();", []).parseInt + of "oracle": + result = getValue(db, sql"SELECT id.currval FROM DUAL;", []).parseInt + else: result = -1'i64 + except: discard + +proc insertId*(db: var DbConn, query: SqlQuery, + args: varargs[string, `$`]): int64 {. + tags: [ReadDbEffect, WriteDbEffect], raises: [DbError].} = + ## Executes the query (typically "INSERT") and returns the + ## generated ID for the row. + result = tryInsertID(db, query, args) + if result < 0: dbError(db) + +proc execAffectedRows*(db: var DbConn, query: SqlQuery, + args: varargs[string, `$`]): int64 {. + tags: [ReadDbEffect, WriteDbEffect], raises: [DbError].} = + ## Runs the query (typically "UPDATE") and returns the + ## number of affected rows + result = -1 + var res = SQLAllocHandle(SQL_HANDLE_STMT, db.hDb, db.stmt.SqlHandle) + if res != SQL_SUCCESS: dbError(db) + var q = dbFormat(query, args) + res = SQLPrepare(db.stmt, q.PSQLCHAR, q.len.TSqlSmallInt) + if res != SQL_SUCCESS: dbError(db) + rawExec(db, query, args) + var rCnt = -1 + result = SQLRowCount(db.hDb, rCnt) + if res != SQL_SUCCESS: dbError(db) + properFreeResult(SQL_HANDLE_STMT, db.stmt) + result = rCnt + +proc close*(db: var DbConn) {. + tags: [WriteDbEffect], raises: [].} = + ## Closes the database connection. + if db.hDb != nil: + try: + var res = SQLDisconnect(db.hDb) + if db.stmt != nil: + res = SQLFreeHandle(SQL_HANDLE_STMT, db.stmt) + res = SQLFreeHandle(SQL_HANDLE_DBC, db.hDb) + res = SQLFreeHandle(SQL_HANDLE_ENV, db.env) + db = (hDb: nil, env: nil, stmt: nil) + except: + discard + +proc open*(connection, user, password, database: string): DbConn {. + tags: [ReadDbEffect, WriteDbEffect], raises: [DbError].} = + ## Opens a database connection. + ## + ## Raises `EDb` if the connection could not be established. + ## + ## Currently the database parameter is ignored, + ## but included to match ``open()`` in the other db_xxxxx library modules. + var + val: TSqlInteger = SQL_OV_ODBC3 + resLen = 0 + result = (hDb: nil, env: nil, stmt: nil) + # allocate environment handle + var res = SQLAllocHandle(SQL_HANDLE_ENV, result.env, result.env) + if res != SQL_SUCCESS: dbError("Error: unable to initialise ODBC environment.") + res = SQLSetEnvAttr(result.env, + SQL_ATTR_ODBC_VERSION.TSqlInteger, + val, resLen.TSqlInteger) + if res != SQL_SUCCESS: dbError("Error: unable to set ODBC driver version.") + # allocate hDb handle + res = SQLAllocHandle(SQL_HANDLE_DBC, result.env, result.hDb) + if res != SQL_SUCCESS: dbError("Error: unable to allocate connection handle.") + + # Connect: connection = dsn str, + res = SQLConnect(result.hDb, + connection.PSQLCHAR , connection.len.TSqlSmallInt, + user.PSQLCHAR, user.len.TSqlSmallInt, + password.PSQLCHAR, password.len.TSqlSmallInt) + if res != SQL_SUCCESS: + result.dbError() + +proc setEncoding*(connection: DbConn, encoding: string): bool {. + tags: [ReadDbEffect, WriteDbEffect], raises: [DbError].} = + ## Currently not implemented for ODBC. + ## + ## Sets the encoding of a database connection, returns true for + ## success, false for failure. + #result = set_character_set(connection, encoding) == 0 + dbError("setEncoding() is currently not implemented by the db_odbc module") diff --git a/lib/impure/db_postgres.nim b/lib/impure/db_postgres.nim index 7e6219465..9bdbae4c2 100644 --- a/lib/impure/db_postgres.nim +++ b/lib/impure/db_postgres.nim @@ -62,47 +62,28 @@ ## "Dominik") import strutils, postgres +import db_common +export db_common + type DbConn* = PPGconn ## encapsulates a database connection Row* = seq[string] ## a row of a dataset. NULL database values will be - ## transformed always to the empty string. + ## converted to nil. InstantRow* = tuple[res: PPGresult, line: int32] ## a handle that can be ## used to get a row's ## column text on demand - EDb* = object of IOError ## exception that is raised if a database error occurs - - SqlQuery* = distinct string ## an SQL query string SqlPrepared* = distinct string ## a identifier for the prepared queries - FDb* = object of IOEffect ## effect that denotes a database operation - FReadDb* = object of FDb ## effect that denotes a read operation - FWriteDb* = object of FDb ## effect that denotes a write operation -{.deprecated: [TRow: Row, TSqlQuery: SqlQuery, TDbConn: DbConn, +{.deprecated: [TRow: Row, TDbConn: DbConn, TSqlPrepared: SqlPrepared].} -proc sql*(query: string): SqlQuery {.noSideEffect, inline.} = - ## constructs a SqlQuery from the string `query`. This is supposed to be - ## used as a raw-string-literal modifier: - ## ``sql"update user set counter = counter + 1"`` - ## - ## If assertions are turned off, it does nothing. If assertions are turned - ## on, later versions will check the string for valid syntax. - result = SqlQuery(query) - proc dbError*(db: DbConn) {.noreturn.} = - ## raises an EDb exception. - var e: ref EDb + ## raises a DbError exception. + var e: ref DbError new(e) e.msg = $pqErrorMessage(db) raise e -proc dbError*(msg: string) {.noreturn.} = - ## raises an EDb exception with message `msg`. - var e: ref EDb - new(e) - e.msg = msg - raise e - proc dbQuote*(s: string): string = ## DB quotes the string. result = "'" @@ -127,7 +108,7 @@ proc dbFormat(formatstr: SqlQuery, args: varargs[string]): string = add(result, c) proc tryExec*(db: DbConn, query: SqlQuery, - args: varargs[string, `$`]): bool {.tags: [FReadDB, FWriteDb].} = + args: varargs[string, `$`]): bool {.tags: [ReadDbEffect, WriteDbEffect].} = ## tries to execute the query and returns true if successful, false otherwise. var res = pqexecParams(db, dbFormat(query, args), 0, nil, nil, nil, nil, 0) @@ -135,7 +116,8 @@ proc tryExec*(db: DbConn, query: SqlQuery, pqclear(res) proc tryExec*(db: DbConn, stmtName: SqlPrepared, - args: varargs[string, `$`]): bool {.tags: [FReadDB, FWriteDb].} = + args: varargs[string, `$`]): bool {.tags: [ + ReadDbEffect, WriteDbEffect].} = ## tries to execute the query and returns true if successful, false otherwise. var arr = allocCStringArray(args) var res = pqexecPrepared(db, stmtName.string, int32(args.len), arr, @@ -145,7 +127,7 @@ proc tryExec*(db: DbConn, stmtName: SqlPrepared, pqclear(res) proc exec*(db: DbConn, query: SqlQuery, args: varargs[string, `$`]) {. - tags: [FReadDB, FWriteDb].} = + tags: [ReadDbEffect, WriteDbEffect].} = ## executes the query and raises EDB if not successful. var res = pqexecParams(db, dbFormat(query, args), 0, nil, nil, nil, nil, 0) @@ -153,7 +135,7 @@ proc exec*(db: DbConn, query: SqlQuery, args: varargs[string, `$`]) {. pqclear(res) proc exec*(db: DbConn, stmtName: SqlPrepared, - args: varargs[string]) {.tags: [FReadDB, FWriteDb].} = + args: varargs[string]) {.tags: [ReadDbEffect, WriteDbEffect].} = var arr = allocCStringArray(args) var res = pqexecPrepared(db, stmtName.string, int32(args.len), arr, nil, nil, 0) @@ -167,11 +149,7 @@ proc newRow(L: int): Row = proc setupQuery(db: DbConn, query: SqlQuery, args: varargs[string]): PPGresult = - # s is a dummy unique id str for each setupQuery query - let s = "setupQuery_Query_" & string(query) - var res = pqprepare(db, s, dbFormat(query, args), 0, nil) - result = pqexecPrepared(db, s, 0, nil, - nil, nil, 0) + result = pqexec(db, dbFormat(query, args)) if pqResultStatus(result) != PGRES_TUPLES_OK: dbError(db) proc setupQuery(db: DbConn, stmtName: SqlPrepared, @@ -200,7 +178,7 @@ proc setRow(res: PPGresult, r: var Row, line, cols: int32) = add(r[col], x) iterator fastRows*(db: DbConn, query: SqlQuery, - args: varargs[string, `$`]): Row {.tags: [FReadDB].} = + args: varargs[string, `$`]): Row {.tags: [ReadDbEffect].} = ## executes the query and iterates over the result dataset. This is very ## fast, but potenially dangerous: If the for-loop-body executes another ## query, the results can be undefined. For Postgres it is safe though. @@ -213,7 +191,7 @@ iterator fastRows*(db: DbConn, query: SqlQuery, pqclear(res) iterator fastRows*(db: DbConn, stmtName: SqlPrepared, - args: varargs[string, `$`]): Row {.tags: [FReadDB].} = + args: varargs[string, `$`]): Row {.tags: [ReadDbEffect].} = ## executes the prepared query and iterates over the result dataset. var res = setupQuery(db, stmtName, args) var L = pqNfields(res) @@ -225,9 +203,9 @@ iterator fastRows*(db: DbConn, stmtName: SqlPrepared, iterator instantRows*(db: DbConn, query: SqlQuery, args: varargs[string, `$`]): InstantRow - {.tags: [FReadDb].} = + {.tags: [ReadDbEffect].} = ## same as fastRows but returns a handle that can be used to get column text - ## on demand using []. Returned handle is valid only within interator body. + ## on demand using []. Returned handle is valid only within iterator body. var res = setupQuery(db, query, args) for i in 0..pqNtuples(res)-1: yield (res: res, line: i) @@ -235,9 +213,9 @@ iterator instantRows*(db: DbConn, query: SqlQuery, iterator instantRows*(db: DbConn, stmtName: SqlPrepared, args: varargs[string, `$`]): InstantRow - {.tags: [FReadDb].} = + {.tags: [ReadDbEffect].} = ## same as fastRows but returns a handle that can be used to get column text - ## on demand using []. Returned handle is valid only within interator body. + ## on demand using []. Returned handle is valid only within iterator body. var res = setupQuery(db, stmtName, args) for i in 0..pqNtuples(res)-1: yield (res: res, line: i) @@ -252,7 +230,7 @@ proc len*(row: InstantRow): int32 {.inline.} = pqNfields(row.res) proc getRow*(db: DbConn, query: SqlQuery, - args: varargs[string, `$`]): Row {.tags: [FReadDB].} = + args: varargs[string, `$`]): Row {.tags: [ReadDbEffect].} = ## retrieves a single row. If the query doesn't return any rows, this proc ## will return a Row with empty strings for each column. var res = setupQuery(db, query, args) @@ -262,7 +240,7 @@ proc getRow*(db: DbConn, query: SqlQuery, pqclear(res) proc getRow*(db: DbConn, stmtName: SqlPrepared, - args: varargs[string, `$`]): Row {.tags: [FReadDB].} = + args: varargs[string, `$`]): Row {.tags: [ReadDbEffect].} = var res = setupQuery(db, stmtName, args) var L = pqNfields(res) result = newRow(L) @@ -270,31 +248,34 @@ proc getRow*(db: DbConn, stmtName: SqlPrepared, pqClear(res) proc getAllRows*(db: DbConn, query: SqlQuery, - args: varargs[string, `$`]): seq[Row] {.tags: [FReadDB].} = + args: varargs[string, `$`]): seq[Row] {. + tags: [ReadDbEffect].} = ## executes the query and returns the whole result dataset. result = @[] for r in fastRows(db, query, args): result.add(r) proc getAllRows*(db: DbConn, stmtName: SqlPrepared, - args: varargs[string, `$`]): seq[Row] {.tags: [FReadDB].} = + args: varargs[string, `$`]): seq[Row] {.tags: + [ReadDbEffect].} = ## executes the prepared query and returns the whole result dataset. result = @[] for r in fastRows(db, stmtName, args): result.add(r) iterator rows*(db: DbConn, query: SqlQuery, - args: varargs[string, `$`]): Row {.tags: [FReadDB].} = + args: varargs[string, `$`]): Row {.tags: [ReadDbEffect].} = ## same as `fastRows`, but slower and safe. for r in items(getAllRows(db, query, args)): yield r iterator rows*(db: DbConn, stmtName: SqlPrepared, - args: varargs[string, `$`]): Row {.tags: [FReadDB].} = + args: varargs[string, `$`]): Row {.tags: [ReadDbEffect].} = ## same as `fastRows`, but slower and safe. for r in items(getAllRows(db, stmtName, args)): yield r proc getValue*(db: DbConn, query: SqlQuery, - args: varargs[string, `$`]): string {.tags: [FReadDB].} = + args: varargs[string, `$`]): string {. + tags: [ReadDbEffect].} = ## executes the query and returns the first column of the first row of the ## result dataset. Returns "" if the dataset contains no rows or the database ## value is NULL. @@ -302,7 +283,8 @@ proc getValue*(db: DbConn, query: SqlQuery, result = if isNil(x): "" else: $x proc tryInsertID*(db: DbConn, query: SqlQuery, - args: varargs[string, `$`]): int64 {.tags: [FWriteDb].}= + args: varargs[string, `$`]): int64 {. + tags: [WriteDbEffect].}= ## executes the query (typically "INSERT") and returns the ## generated ID for the row or -1 in case of an error. For Postgre this adds ## ``RETURNING id`` to the query, so it only works if your primary key is @@ -315,7 +297,8 @@ proc tryInsertID*(db: DbConn, query: SqlQuery, result = -1 proc insertID*(db: DbConn, query: SqlQuery, - args: varargs[string, `$`]): int64 {.tags: [FWriteDb].} = + args: varargs[string, `$`]): int64 {. + tags: [WriteDbEffect].} = ## executes the query (typically "INSERT") and returns the ## generated ID for the row. For Postgre this adds ## ``RETURNING id`` to the query, so it only works if your primary key is @@ -325,7 +308,7 @@ proc insertID*(db: DbConn, query: SqlQuery, proc execAffectedRows*(db: DbConn, query: SqlQuery, args: varargs[string, `$`]): int64 {.tags: [ - FReadDB, FWriteDb].} = + ReadDbEffect, WriteDbEffect].} = ## executes the query (typically "UPDATE") and returns the ## number of affected rows. var q = dbFormat(query, args) @@ -336,7 +319,7 @@ proc execAffectedRows*(db: DbConn, query: SqlQuery, proc execAffectedRows*(db: DbConn, stmtName: SqlPrepared, args: varargs[string, `$`]): int64 {.tags: [ - FReadDB, FWriteDb].} = + ReadDbEffect, WriteDbEffect].} = ## executes the query (typically "UPDATE") and returns the ## number of affected rows. var arr = allocCStringArray(args) @@ -347,12 +330,12 @@ proc execAffectedRows*(db: DbConn, stmtName: SqlPrepared, result = parseBiggestInt($pqcmdTuples(res)) pqclear(res) -proc close*(db: DbConn) {.tags: [FDb].} = +proc close*(db: DbConn) {.tags: [DbEffect].} = ## closes the database connection. if db != nil: pqfinish(db) proc open*(connection, user, password, database: string): DbConn {. - tags: [FDb].} = + tags: [DbEffect].} = ## opens a database connection. Raises `EDb` if the connection could not ## be established. ## @@ -374,10 +357,10 @@ proc open*(connection, user, password, database: string): DbConn {. if pqStatus(result) != CONNECTION_OK: dbError(result) # result = nil proc setEncoding*(connection: DbConn, encoding: string): bool {. - tags: [FDb].} = + tags: [DbEffect].} = ## sets the encoding of a database connection, returns true for ## success, false for failure. return pqsetClientEncoding(connection, encoding) == 0 -# Tests are in ../../tests/untestable/tpostgres. \ No newline at end of file +# Tests are in ../../tests/untestable/tpostgres. diff --git a/lib/impure/db_sqlite.nim b/lib/impure/db_sqlite.nim index 8366fdadc..c0d221a0d 100644 --- a/lib/impure/db_sqlite.nim +++ b/lib/impure/db_sqlite.nim @@ -1,7 +1,7 @@ # # # Nim's Runtime Library -# (c) Copyright 2012 Andreas Rumpf +# (c) Copyright 2015 Andreas Rumpf # # See the file "copying.txt", included in this # distribution, for details about the copyright. @@ -40,47 +40,30 @@ ## ## theDb.close() +{.deadCodeElim:on.} + import strutils, sqlite3 +import db_common +export db_common + type DbConn* = PSqlite3 ## encapsulates a database connection Row* = seq[string] ## a row of a dataset. NULL database values will be - ## transformed always to the empty string. + ## converted to nil. InstantRow* = Pstmt ## a handle that can be used to get a row's column ## text on demand - EDb* = object of IOError ## exception that is raised if a database error occurs - - SqlQuery* = distinct string ## an SQL query string +{.deprecated: [TRow: Row, TDbConn: DbConn].} - FDb* = object of IOEffect ## effect that denotes a database operation - FReadDb* = object of FDb ## effect that denotes a read operation - FWriteDb* = object of FDb ## effect that denotes a write operation -{.deprecated: [TRow: Row, TSqlQuery: SqlQuery, TDbConn: DbConn].} - -proc sql*(query: string): SqlQuery {.noSideEffect, inline.} = - ## constructs a SqlQuery from the string `query`. This is supposed to be - ## used as a raw-string-literal modifier: - ## ``sql"update user set counter = counter + 1"`` - ## - ## If assertions are turned off, it does nothing. If assertions are turned - ## on, later versions will check the string for valid syntax. - result = SqlQuery(query) - -proc dbError(db: DbConn) {.noreturn.} = - ## raises an EDb exception. - var e: ref EDb +proc dbError*(db: DbConn) {.noreturn.} = + ## raises a DbError exception. + var e: ref DbError new(e) e.msg = $sqlite3.errmsg(db) raise e -proc dbError*(msg: string) {.noreturn.} = - ## raises an EDb exception with message `msg`. - var e: ref EDb - new(e) - e.msg = msg - raise e - -proc dbQuote(s: string): string = +proc dbQuote*(s: string): string = + ## DB quotes the string. if s.isNil: return "NULL" result = "'" for c in items(s): @@ -99,7 +82,8 @@ proc dbFormat(formatstr: SqlQuery, args: varargs[string]): string = add(result, c) proc tryExec*(db: DbConn, query: SqlQuery, - args: varargs[string, `$`]): bool {.tags: [FReadDb, FWriteDb].} = + args: varargs[string, `$`]): bool {. + tags: [ReadDbEffect, WriteDbEffect].} = ## tries to execute the query and returns true if successful, false otherwise. var q = dbFormat(query, args) var stmt: sqlite3.Pstmt @@ -108,8 +92,8 @@ proc tryExec*(db: DbConn, query: SqlQuery, result = finalize(stmt) == SQLITE_OK proc exec*(db: DbConn, query: SqlQuery, args: varargs[string, `$`]) {. - tags: [FReadDb, FWriteDb].} = - ## executes the query and raises EDB if not successful. + tags: [ReadDbEffect, WriteDbEffect].} = + ## executes the query and raises DbError if not successful. if not tryExec(db, query, args): dbError(db) proc newRow(L: int): Row = @@ -129,14 +113,14 @@ proc setRow(stmt: Pstmt, r: var Row, cols: cint) = if not isNil(x): add(r[col], x) iterator fastRows*(db: DbConn, query: SqlQuery, - args: varargs[string, `$`]): Row {.tags: [FReadDb].} = + args: varargs[string, `$`]): Row {.tags: [ReadDbEffect].} = ## Executes the query and iterates over the result dataset. ## ## This is very fast, but potentially dangerous. Use this iterator only ## if you require **ALL** the rows. ## ## Breaking the fastRows() iterator during a loop will cause the next - ## database query to raise an [EDb] exception ``unable to close due to ...``. + ## database query to raise a DbError exception ``unable to close due to ...``. var stmt = setupQuery(db, query, args) var L = (column_count(stmt)) var result = newRow(L) @@ -147,10 +131,43 @@ iterator fastRows*(db: DbConn, query: SqlQuery, iterator instantRows*(db: DbConn, query: SqlQuery, args: varargs[string, `$`]): InstantRow - {.tags: [FReadDb].} = + {.tags: [ReadDbEffect].} = + ## same as fastRows but returns a handle that can be used to get column text + ## on demand using []. Returned handle is valid only within the iterator body. + var stmt = setupQuery(db, query, args) + while step(stmt) == SQLITE_ROW: + yield stmt + if finalize(stmt) != SQLITE_OK: dbError(db) + +proc toTypeKind(t: var DbType; x: int32) = + case x + of SQLITE_INTEGER: + t.kind = dbInt + t.size = 8 + of SQLITE_FLOAT: + t.kind = dbFloat + t.size = 8 + of SQLITE_BLOB: t.kind = dbBlob + of SQLITE_NULL: t.kind = dbNull + of SQLITE_TEXT: t.kind = dbVarchar + else: t.kind = dbUnknown + +proc setColumns(columns: var DbColumns; x: PStmt) = + let L = column_count(x) + setLen(columns, L) + for i in 0'i32 ..< L: + columns[i].name = $column_name(x, i) + columns[i].typ.name = $column_decltype(x, i) + toTypeKind(columns[i].typ, column_type(x, i)) + columns[i].tableName = $column_table_name(x, i) + +iterator instantRows*(db: DbConn; columns: var DbColumns; query: SqlQuery, + args: varargs[string, `$`]): InstantRow + {.tags: [ReadDbEffect].} = ## same as fastRows but returns a handle that can be used to get column text - ## on demand using []. Returned handle is valid only within the interator body. + ## on demand using []. Returned handle is valid only within the iterator body. var stmt = setupQuery(db, query, args) + setColumns(columns, stmt) while step(stmt) == SQLITE_ROW: yield stmt if finalize(stmt) != SQLITE_OK: dbError(db) @@ -164,7 +181,7 @@ proc len*(row: InstantRow): int32 {.inline.} = column_count(row) proc getRow*(db: DbConn, query: SqlQuery, - args: varargs[string, `$`]): Row {.tags: [FReadDb].} = + args: varargs[string, `$`]): Row {.tags: [ReadDbEffect].} = ## retrieves a single row. If the query doesn't return any rows, this proc ## will return a Row with empty strings for each column. var stmt = setupQuery(db, query, args) @@ -175,19 +192,19 @@ proc getRow*(db: DbConn, query: SqlQuery, if finalize(stmt) != SQLITE_OK: dbError(db) proc getAllRows*(db: DbConn, query: SqlQuery, - args: varargs[string, `$`]): seq[Row] {.tags: [FReadDb].} = + args: varargs[string, `$`]): seq[Row] {.tags: [ReadDbEffect].} = ## executes the query and returns the whole result dataset. result = @[] for r in fastRows(db, query, args): result.add(r) iterator rows*(db: DbConn, query: SqlQuery, - args: varargs[string, `$`]): Row {.tags: [FReadDb].} = + args: varargs[string, `$`]): Row {.tags: [ReadDbEffect].} = ## same as `FastRows`, but slower and safe. for r in fastRows(db, query, args): yield r proc getValue*(db: DbConn, query: SqlQuery, - args: varargs[string, `$`]): string {.tags: [FReadDb].} = + args: varargs[string, `$`]): string {.tags: [ReadDbEffect].} = ## executes the query and returns the first column of the first row of the ## result dataset. Returns "" if the dataset contains no rows or the database ## value is NULL. @@ -205,7 +222,7 @@ proc getValue*(db: DbConn, query: SqlQuery, proc tryInsertID*(db: DbConn, query: SqlQuery, args: varargs[string, `$`]): int64 - {.tags: [FWriteDb], raises: [].} = + {.tags: [WriteDbEffect], raises: [].} = ## executes the query (typically "INSERT") and returns the ## generated ID for the row or -1 in case of an error. var q = dbFormat(query, args) @@ -218,7 +235,7 @@ proc tryInsertID*(db: DbConn, query: SqlQuery, result = -1 proc insertID*(db: DbConn, query: SqlQuery, - args: varargs[string, `$`]): int64 {.tags: [FWriteDb].} = + args: varargs[string, `$`]): int64 {.tags: [WriteDbEffect].} = ## executes the query (typically "INSERT") and returns the ## generated ID for the row. For Postgre this adds ## ``RETURNING id`` to the query, so it only works if your primary key is @@ -228,18 +245,18 @@ proc insertID*(db: DbConn, query: SqlQuery, proc execAffectedRows*(db: DbConn, query: SqlQuery, args: varargs[string, `$`]): int64 {. - tags: [FReadDb, FWriteDb].} = + tags: [ReadDbEffect, WriteDbEffect].} = ## executes the query (typically "UPDATE") and returns the ## number of affected rows. exec(db, query, args) result = changes(db) -proc close*(db: DbConn) {.tags: [FDb].} = +proc close*(db: DbConn) {.tags: [DbEffect].} = ## closes the database connection. if sqlite3.close(db) != SQLITE_OK: dbError(db) proc open*(connection, user, password, database: string): DbConn {. - tags: [FDb].} = + tags: [DbEffect].} = ## opens a database connection. Raises `EDb` if the connection could not ## be established. Only the ``connection`` parameter is used for ``sqlite``. var db: DbConn @@ -249,7 +266,7 @@ proc open*(connection, user, password, database: string): DbConn {. dbError(db) proc setEncoding*(connection: DbConn, encoding: string): bool {. - tags: [FDb].} = + tags: [DbEffect].} = ## sets the encoding of a database connection, returns true for ## success, false for failure. ## diff --git a/lib/js/dom.nim b/lib/js/dom.nim index b063fa838..11df959d7 100644 --- a/lib/js/dom.nim +++ b/lib/js/dom.nim @@ -14,36 +14,35 @@ when not defined(js) and not defined(Nimdoc): {.error: "This module only works on the JavaScript platform".} type - TEventHandlers* {.importc.} = object of RootObj - onabort*: proc (event: ref TEvent) {.nimcall.} - onblur*: proc (event: ref TEvent) {.nimcall.} - onchange*: proc (event: ref TEvent) {.nimcall.} - onclick*: proc (event: ref TEvent) {.nimcall.} - ondblclick*: proc (event: ref TEvent) {.nimcall.} - onerror*: proc (event: ref TEvent) {.nimcall.} - onfocus*: proc (event: ref TEvent) {.nimcall.} - onkeydown*: proc (event: ref TEvent) {.nimcall.} - onkeypress*: proc (event: ref TEvent) {.nimcall.} - onkeyup*: proc (event: ref TEvent) {.nimcall.} - onload*: proc (event: ref TEvent) {.nimcall.} - onmousedown*: proc (event: ref TEvent) {.nimcall.} - onmousemove*: proc (event: ref TEvent) {.nimcall.} - onmouseout*: proc (event: ref TEvent) {.nimcall.} - onmouseover*: proc (event: ref TEvent) {.nimcall.} - onmouseup*: proc (event: ref TEvent) {.nimcall.} - onreset*: proc (event: ref TEvent) {.nimcall.} - onselect*: proc (event: ref TEvent) {.nimcall.} - onsubmit*: proc (event: ref TEvent) {.nimcall.} - onunload*: proc (event: ref TEvent) {.nimcall.} - - addEventListener*: proc(ev: cstring, cb: proc(ev: ref TEvent), useCapture: bool = false) {.nimcall.} + EventTarget* = ref EventTargetObj + EventTargetObj {.importc.} = object of RootObj + onabort*: proc (event: Event) {.nimcall.} + onblur*: proc (event: Event) {.nimcall.} + onchange*: proc (event: Event) {.nimcall.} + onclick*: proc (event: Event) {.nimcall.} + ondblclick*: proc (event: Event) {.nimcall.} + onerror*: proc (event: Event) {.nimcall.} + onfocus*: proc (event: Event) {.nimcall.} + onkeydown*: proc (event: Event) {.nimcall.} + onkeypress*: proc (event: Event) {.nimcall.} + onkeyup*: proc (event: Event) {.nimcall.} + onload*: proc (event: Event) {.nimcall.} + onmousedown*: proc (event: Event) {.nimcall.} + onmousemove*: proc (event: Event) {.nimcall.} + onmouseout*: proc (event: Event) {.nimcall.} + onmouseover*: proc (event: Event) {.nimcall.} + onmouseup*: proc (event: Event) {.nimcall.} + onreset*: proc (event: Event) {.nimcall.} + onselect*: proc (event: Event) {.nimcall.} + onsubmit*: proc (event: Event) {.nimcall.} + onunload*: proc (event: Event) {.nimcall.} Window* = ref WindowObj - WindowObj {.importc.} = object of TEventHandlers + WindowObj {.importc.} = object of EventTargetObj document*: Document - event*: ref TEvent - history*: ref THistory - location*: ref TLocation + event*: Event + history*: History + location*: Location closed*: bool defaultStatus*: cstring innerHeight*, innerWidth*: int @@ -57,50 +56,15 @@ type statusbar*: ref TStatusBar status*: cstring toolbar*: ref TToolBar - - alert*: proc (msg: cstring) {.nimcall.} - back*: proc () {.nimcall.} - blur*: proc () {.nimcall.} - captureEvents*: proc (eventMask: int) {.nimcall.} - clearInterval*: proc (interval: ref TInterval) {.nimcall.} - clearTimeout*: proc (timeout: ref TTimeOut) {.nimcall.} - close*: proc () {.nimcall.} - confirm*: proc (msg: cstring): bool {.nimcall.} - disableExternalCapture*: proc () {.nimcall.} - enableExternalCapture*: proc () {.nimcall.} - find*: proc (text: cstring, caseSensitive = false, - backwards = false) {.nimcall.} - focus*: proc () {.nimcall.} - forward*: proc () {.nimcall.} - handleEvent*: proc (e: ref TEvent) {.nimcall.} - home*: proc () {.nimcall.} - moveBy*: proc (x, y: int) {.nimcall.} - moveTo*: proc (x, y: int) {.nimcall.} - open*: proc (uri, windowname: cstring, - properties: cstring = nil): Window {.nimcall.} - print*: proc () {.nimcall.} - prompt*: proc (text, default: cstring): cstring {.nimcall.} - releaseEvents*: proc (eventMask: int) {.nimcall.} - resizeBy*: proc (x, y: int) {.nimcall.} - resizeTo*: proc (x, y: int) {.nimcall.} - routeEvent*: proc (event: ref TEvent) {.nimcall.} - scrollBy*: proc (x, y: int) {.nimcall.} - scrollTo*: proc (x, y: int) {.nimcall.} - setInterval*: proc (code: cstring, pause: int): ref TInterval {.nimcall.} - setTimeout*: proc (code: cstring, pause: int): ref TTimeOut {.nimcall.} - stop*: proc () {.nimcall.} frames*: seq[TFrame] Frame* = ref FrameObj FrameObj {.importc.} = object of WindowObj - ClassList* {.importc.} = object of RootObj - add*: proc (class: cstring) {.nimcall.} - remove*: proc (class: cstring) {.nimcall.} - contains*: proc (class: cstring):bool {.nimcall.} - toggle*: proc (class: cstring) {.nimcall.} + ClassList* = ref ClassListObj + ClassListObj {.importc.} = object of RootObj - TNodeType* = enum + NodeType* = enum ElementNode = 1, AttributeNode, TextNode, @@ -115,7 +79,7 @@ type NotationNode Node* = ref NodeObj - NodeObj {.importc.} = object of TEventHandlers + NodeObj {.importc.} = object of EventTargetObj attributes*: seq[Node] childNodes*: seq[Node] children*: seq[Node] @@ -124,29 +88,12 @@ type lastChild*: Node nextSibling*: Node nodeName*: cstring - nodeType*: TNodeType + nodeType*: NodeType nodeValue*: cstring parentNode*: Node previousSibling*: Node - appendChild*: proc (child: Node) {.nimcall.} - appendData*: proc (data: cstring) {.nimcall.} - cloneNode*: proc (copyContent: bool): Node {.nimcall.} - deleteData*: proc (start, len: int) {.nimcall.} - getAttribute*: proc (attr: cstring): cstring {.nimcall.} - getAttributeNode*: proc (attr: cstring): Node {.nimcall.} - hasChildNodes*: proc (): bool {.nimcall.} innerHTML*: cstring - insertBefore*: proc (newNode, before: Node) {.nimcall.} - insertData*: proc (position: int, data: cstring) {.nimcall.} - removeAttribute*: proc (attr: cstring) {.nimcall.} - removeAttributeNode*: proc (attr: Node) {.nimcall.} - removeChild*: proc (child: Node) {.nimcall.} - replaceChild*: proc (newNode, oldNode: Node) {.nimcall.} - replaceData*: proc (start, len: int, text: cstring) {.nimcall.} - scrollIntoView*: proc () {.nimcall.} - setAttribute*: proc (name, value: cstring) {.nimcall.} - setAttributeNode*: proc (attr: Node) {.nimcall.} - style*: ref TStyle + style*: Style Document* = ref DocumentObj DocumentObj {.importc.} = object of NodeObj @@ -164,31 +111,16 @@ type title*: cstring URL*: cstring vlinkColor*: cstring - captureEvents*: proc (eventMask: int) {.nimcall.} - createAttribute*: proc (identifier: cstring): Node {.nimcall.} - createElement*: proc (identifier: cstring): Element {.nimcall.} - createTextNode*: proc (identifier: cstring): Node {.nimcall.} - getElementById*: proc (id: cstring): Element {.nimcall.} - getElementsByName*: proc (name: cstring): seq[Element] {.nimcall.} - getElementsByTagName*: proc (name: cstring): seq[Element] {.nimcall.} - getElementsByClassName*: proc (name: cstring): seq[Element] {.nimcall.} - getSelection*: proc (): cstring {.nimcall.} - handleEvent*: proc (event: ref TEvent) {.nimcall.} - open*: proc () {.nimcall.} - releaseEvents*: proc (eventMask: int) {.nimcall.} - routeEvent*: proc (event: ref TEvent) {.nimcall.} - write*: proc (text: cstring) {.nimcall.} - writeln*: proc (text: cstring) {.nimcall.} anchors*: seq[AnchorElement] forms*: seq[FormElement] images*: seq[ImageElement] - applets*: seq[ref TApplet] + applets*: seq[Element] embeds*: seq[EmbedElement] links*: seq[LinkElement] Element* = ref ElementObj ElementObj {.importc.} = object of NodeObj - classList*: ref Classlist + classList*: Classlist checked*: bool defaultChecked*: bool defaultValue*: cstring @@ -196,14 +128,7 @@ type form*: FormElement name*: cstring readOnly*: bool - blur*: proc () {.nimcall.} - click*: proc () {.nimcall.} - focus*: proc () {.nimcall.} - handleEvent*: proc (event: ref TEvent) {.nimcall.} - select*: proc () {.nimcall.} options*: seq[OptionElement] - getElementsByTagName*: proc (name: cstring): seq[Element] {.nimcall.} - getElementsByClassName*: proc (name: cstring): seq[Element] {.nimcall.} LinkElement* = ref LinkObj LinkObj {.importc.} = object of ElementObj @@ -220,16 +145,12 @@ type width*: int `type`*: cstring vspace*: int - play*: proc () {.nimcall.} - stop*: proc () {.nimcall.} AnchorElement* = ref AnchorObj AnchorObj {.importc.} = object of ElementObj text*: cstring x*, y*: int - TApplet* {.importc.} = object of RootObj - OptionElement* = ref OptionObj OptionObj {.importc.} = object of ElementObj defaultSelected*: bool @@ -244,8 +165,6 @@ type encoding*: cstring `method`*: cstring target*: cstring - reset*: proc () {.nimcall.} - submit*: proc () {.nimcall.} elements*: seq[Element] ImageElement* = ref ImageObj @@ -259,8 +178,8 @@ type vspace*: int width*: int - - TStyle* {.importc.} = object of RootObj + Style = ref StyleObj + StyleObj {.importc.} = object of RootObj background*: cstring backgroundAttachment*: cstring backgroundColor*: cstring @@ -350,11 +269,9 @@ type width*: cstring wordSpacing*: cstring zIndex*: int - getAttribute*: proc (attr: cstring, caseSensitive=false): cstring {.nimcall.} - removeAttribute*: proc (attr: cstring, caseSensitive=false) {.nimcall.} - setAttribute*: proc (attr, value: cstring, caseSensitive=false) {.nimcall.} - TEvent* {.importc.} = object of RootObj + Event* = ref EventObj + EventObj {.importc.} = object of RootObj target*: Node altKey*, ctrlKey*, shiftKey*: bool button*: int @@ -393,7 +310,8 @@ type SUBMIT*: int UNLOAD*: int - TLocation* {.importc.} = object of RootObj + Location* = ref LocationObj + LocationObj {.importc.} = object of RootObj hash*: cstring host*: cstring hostname*: cstring @@ -402,16 +320,13 @@ type port*: cstring protocol*: cstring search*: cstring - reload*: proc () {.nimcall.} - replace*: proc (s: cstring) {.nimcall.} - THistory* {.importc.} = object of RootObj + History* = ref HistoryObj + HistoryObj {.importc.} = object of RootObj length*: int - back*: proc () {.nimcall.} - forward*: proc () {.nimcall.} - go*: proc (pagesToJump: int) {.nimcall.} - TNavigator* {.importc.} = object of RootObj + Navigator* = ref NavigatorObj + NavigatorObj {.importc.} = object of RootObj appCodeName*: cstring appName*: cstring appVersion*: cstring @@ -419,7 +334,6 @@ type language*: cstring platform*: cstring userAgent*: cstring - javaEnabled*: proc (): bool {.nimcall.} mimeTypes*: seq[ref TMimeType] TPlugin* {.importc.} = object of RootObj @@ -441,7 +355,8 @@ type TToolBar* = TLocationBar TStatusBar* = TLocationBar - TScreen* {.importc.} = object of RootObj + Screen = ref ScreenObj + ScreenObj {.importc.} = object of RootObj availHeight*: int availWidth*: int colorDepth*: int @@ -452,11 +367,127 @@ type TTimeOut* {.importc.} = object of RootObj TInterval* {.importc.} = object of RootObj +{.push importcpp.} + +# EventTarget "methods" +proc addEventListener*(et: EventTarget, ev: cstring, cb: proc(ev: Event), useCapture: bool = false) + +# Window "methods" +proc alert*(w: Window, msg: cstring) +proc back*(w: Window) +proc blur*(w: Window) +proc captureEvents*(w: Window, eventMask: int) {.deprecated.} +proc clearInterval*(w: Window, interval: ref TInterval) +proc clearTimeout*(w: Window, timeout: ref TTimeOut) +proc close*(w: Window) +proc confirm*(w: Window, msg: cstring): bool +proc disableExternalCapture*(w: Window) +proc enableExternalCapture*(w: Window) +proc find*(w: Window, text: cstring, caseSensitive = false, + backwards = false) +proc focus*(w: Window) +proc forward*(w: Window) +proc handleEvent*(w: Window, e: Event) +proc home*(w: Window) +proc moveBy*(w: Window, x, y: int) +proc moveTo*(w: Window, x, y: int) +proc open*(w: Window, uri, windowname: cstring, + properties: cstring = nil): Window +proc print*(w: Window) +proc prompt*(w: Window, text, default: cstring): cstring +proc releaseEvents*(w: Window, eventMask: int) {.deprecated.} +proc resizeBy*(w: Window, x, y: int) +proc resizeTo*(w: Window, x, y: int) +proc routeEvent*(w: Window, event: Event) +proc scrollBy*(w: Window, x, y: int) +proc scrollTo*(w: Window, x, y: int) +proc setInterval*(w: Window, code: cstring, pause: int): ref TInterval +proc setTimeout*(w: Window, code: cstring, pause: int): ref TTimeOut +proc stop*(w: Window) + +# Node "methods" +proc appendChild*(n, child: Node) +proc appendData*(n: Node, data: cstring) +proc cloneNode*(n: Node, copyContent: bool): Node +proc deleteData*(n: Node, start, len: int) +proc getAttribute*(n: Node, attr: cstring): cstring +proc getAttributeNode*(n: Node, attr: cstring): Node +proc hasChildNodes*(n: Node): bool +proc insertBefore*(n, newNode, before: Node) +proc insertData*(n: Node, position: int, data: cstring) +proc removeAttribute*(n: Node, attr: cstring) +proc removeAttributeNode*(n, attr: Node) +proc removeChild*(n, child: Node) +proc replaceChild*(n, newNode, oldNode: Node) +proc replaceData*(n: Node, start, len: int, text: cstring) +proc scrollIntoView*(n: Node) +proc setAttribute*(n: Node, name, value: cstring) +proc setAttributeNode*(n: Node, attr: Node) + +# Document "methods" +proc captureEvents*(d: Document, eventMask: int) {.deprecated.} +proc createAttribute*(d: Document, identifier: cstring): Node +proc createElement*(d: Document, identifier: cstring): Element +proc createTextNode*(d: Document, identifier: cstring): Node +proc getElementById*(d: Document, id: cstring): Element +proc getElementsByName*(d: Document, name: cstring): seq[Element] +proc getElementsByTagName*(d: Document, name: cstring): seq[Element] +proc getElementsByClassName*(d: Document, name: cstring): seq[Element] +proc getSelection*(d: Document): cstring +proc handleEvent*(d: Document, event: Event) +proc open*(d: Document) +proc releaseEvents*(d: Document, eventMask: int) {.deprecated.} +proc routeEvent*(d: Document, event: Event) +proc write*(d: Document, text: cstring) +proc writeln*(d: Document, text: cstring) + +# Element "methods" +proc blur*(e: Element) +proc click*(e: Element) +proc focus*(e: Element) +proc handleEvent*(e: Element, event: Event) +proc select*(e: Element) +proc getElementsByTagName*(e: Element, name: cstring): seq[Element] +proc getElementsByClassName*(e: Element, name: cstring): seq[Element] + +# FormElement "methods" +proc reset*(f: FormElement) +proc submit*(f: FormElement) + +# EmbedElement "methods" +proc play*(e: EmbedElement) +proc stop*(e: EmbedElement) + +# Location "methods" +proc reload*(loc: Location) +proc replace*(loc: Location, s: cstring) + +# History "methods" +proc back*(h: History) +proc forward*(h: History) +proc go*(h: History, pagesToJump: int) + +# Navigator "methods" +proc javaEnabled*(h: Navigator): bool + +# ClassList "methods" +proc add*(c: ClassList, class: cstring) +proc remove*(c: ClassList, class: cstring) +proc contains*(c: ClassList, class: cstring):bool +proc toggle*(c: ClassList, class: cstring) + +# Style "methods" +proc getAttribute*(s: Style, attr: cstring, caseSensitive=false): cstring +proc removeAttribute*(s: Style, attr: cstring, caseSensitive=false) +proc setAttribute*(s: Style, attr, value: cstring, caseSensitive=false) + +{.pop.} + var window* {.importc, nodecl.}: Window document* {.importc, nodecl.}: Document - navigator* {.importc, nodecl.}: ref TNavigator - screen* {.importc, nodecl.}: ref TScreen + navigator* {.importc, nodecl.}: Navigator + screen* {.importc, nodecl.}: Screen proc decodeURI*(uri: cstring): cstring {.importc, nodecl.} proc encodeURI*(uri: cstring): cstring {.importc, nodecl.} @@ -474,6 +505,7 @@ proc parseInt*(s: cstring, radix: int):int {.importc, nodecl.} type + TEventHandlers* {.deprecated.} = EventTargetObj TWindow* {.deprecated.} = WindowObj TFrame* {.deprecated.} = FrameObj TNode* {.deprecated.} = NodeObj @@ -485,3 +517,11 @@ type TOption* {.deprecated.} = OptionObj TForm* {.deprecated.} = FormObj TImage* {.deprecated.} = ImageObj + TNodeType* {.deprecated.} = NodeType + TEvent* {.deprecated.} = EventObj + TLocation* {.deprecated.} = LocationObj + THistory* {.deprecated.} = HistoryObj + TNavigator* {.deprecated.} = NavigatorObj + TStyle* {.deprecated.} = StyleObj + TScreen* {.deprecated.} = ScreenObj + TApplet* {.importc, deprecated.} = object of RootObj diff --git a/lib/nimbase.h b/lib/nimbase.h index 0946b9a1f..bba5ac023 100644 --- a/lib/nimbase.h +++ b/lib/nimbase.h @@ -418,10 +418,6 @@ typedef int assert_numbits[sizeof(NI) == sizeof(void*) && NIM_INTBITS == sizeof( # define NIM_EXTERNC #endif -/* we have to tinker with TNimType as it's both part of system.nim and - typeinfo.nim but system.nim doesn't export it cleanly... */ -typedef struct TNimType TNimType; - /* ---------------- platform specific includes ----------------------- */ /* VxWorks related includes */ diff --git a/lib/packages/docutils/highlite.nim b/lib/packages/docutils/highlite.nim index 640b8cd5a..1bc0af1b6 100644 --- a/lib/packages/docutils/highlite.nim +++ b/lib/packages/docutils/highlite.nim @@ -173,7 +173,41 @@ proc nimNextToken(g: var GeneralTokenizer) = while g.buf[pos] in {' ', '\x09'..'\x0D'}: inc(pos) of '#': g.kind = gtComment - while not (g.buf[pos] in {'\0', '\x0A', '\x0D'}): inc(pos) + inc(pos) + var isDoc = false + if g.buf[pos] == '#': + inc(pos) + isDoc = true + if g.buf[pos] == '[': + g.kind = gtLongComment + var nesting = 0 + while true: + case g.buf[pos] + of '\0': break + of '#': + if isDoc: + if g.buf[pos+1] == '#' and g.buf[pos+2] == '[': + inc nesting + elif g.buf[pos+1] == '[': + inc nesting + inc pos + of ']': + if isDoc: + if g.buf[pos+1] == '#' and g.buf[pos+2] == '#': + if nesting == 0: + inc(pos, 3) + break + dec nesting + elif g.buf[pos+1] == '#': + if nesting == 0: + inc(pos, 2) + break + dec nesting + inc pos + else: + inc pos + else: + while g.buf[pos] notin {'\0', '\x0A', '\x0D'}: inc(pos) of 'a'..'z', 'A'..'Z', '_', '\x80'..'\xFF': var id = "" while g.buf[pos] in SymChars + {'_'}: diff --git a/lib/packages/docutils/rstgen.nim b/lib/packages/docutils/rstgen.nim index 4a0304a7c..22d944597 100644 --- a/lib/packages/docutils/rstgen.nim +++ b/lib/packages/docutils/rstgen.nim @@ -534,7 +534,7 @@ proc generateDocumentationJumps(docs: IndexedDocs): string = for title in titles: chunks.add("<a href=\"" & title.link & "\">" & title.keyword & "</a>") - result.add(chunks.join(", ") & ".<br>") + result.add(chunks.join(", ") & ".<br/>") proc generateModuleJumps(modules: seq[string]): string = ## Returns a plain list of hyperlinks to the list of modules. @@ -544,7 +544,7 @@ proc generateModuleJumps(modules: seq[string]): string = for name in modules: chunks.add("<a href=\"" & name & ".html\">" & name & "</a>") - result.add(chunks.join(", ") & ".<br>") + result.add(chunks.join(", ") & ".<br/>") proc readIndexDir(dir: string): tuple[modules: seq[string], symbols: seq[IndexEntry], docs: IndexedDocs] = diff --git a/lib/pure/asyncftpclient.nim b/lib/pure/asyncftpclient.nim index b806f4235..fd899e080 100644 --- a/lib/pure/asyncftpclient.nim +++ b/lib/pure/asyncftpclient.nim @@ -288,7 +288,7 @@ proc defaultOnProgressChanged*(total, progress: BiggestInt, result.complete() proc retrFile*(ftp: AsyncFtpClient, file, dest: string, - onProgressChanged = defaultOnProgressChanged) {.async.} = + onProgressChanged: ProgressChangedProc = defaultOnProgressChanged) {.async.} = ## Downloads ``file`` and saves it to ``dest``. ## The ``EvRetr`` event is passed to the specified ``handleEvent`` function ## when the download is finished. The event's ``filename`` field will be equal @@ -339,7 +339,7 @@ proc doUpload(ftp: AsyncFtpClient, file: File, await countdownFut or sendFut proc store*(ftp: AsyncFtpClient, file, dest: string, - onProgressChanged = defaultOnProgressChanged) {.async.} = + onProgressChanged: ProgressChangedProc = defaultOnProgressChanged) {.async.} = ## Uploads ``file`` to ``dest`` on the remote FTP server. Usage of this ## function asynchronously is recommended to view the progress of ## the download. diff --git a/lib/pure/collections/critbits.nim b/lib/pure/collections/critbits.nim index 8c507d4fb..bb234565b 100644 --- a/lib/pure/collections/critbits.nim +++ b/lib/pure/collections/critbits.nim @@ -232,7 +232,7 @@ iterator mpairs*[T](c: var CritBitTree[T]): tuple[key: string, val: var T] = ## yields all (key, value)-pairs of `c`. The yielded values can be modified. for x in leaves(c.root): yield (x.key, x.val) -proc allprefixedAux[T](c: CritBitTree[T], key: string): Node[T] = +proc allprefixedAux[T](c: CritBitTree[T], key: string; longestMatch: bool): Node[T] = var p = c.root var top = p if p != nil: @@ -242,43 +242,51 @@ proc allprefixedAux[T](c: CritBitTree[T], key: string): Node[T] = let dir = (1 + (ch.ord or p.otherBits.ord)) shr 8 p = p.child[dir] if q.byte < key.len: top = p - for i in 0 .. <key.len: - if p.key[i] != key[i]: return + if not longestMatch: + for i in 0 .. <key.len: + if p.key[i] != key[i]: return result = top -iterator itemsWithPrefix*[T](c: CritBitTree[T], prefix: string): string = - ## yields all keys starting with `prefix`. - let top = allprefixedAux(c, prefix) +iterator itemsWithPrefix*[T](c: CritBitTree[T], prefix: string; + longestMatch=false): string = + ## yields all keys starting with `prefix`. If `longestMatch` is true, + ## the longest match is returned, it doesn't have to be a complete match then. + let top = allprefixedAux(c, prefix, longestMatch) for x in leaves(top): yield x.key -iterator keysWithPrefix*[T](c: CritBitTree[T], prefix: string): string = +iterator keysWithPrefix*[T](c: CritBitTree[T], prefix: string; + longestMatch=false): string = ## yields all keys starting with `prefix`. - let top = allprefixedAux(c, prefix) + let top = allprefixedAux(c, prefix, longestMatch) for x in leaves(top): yield x.key -iterator valuesWithPrefix*[T](c: CritBitTree[T], prefix: string): T = +iterator valuesWithPrefix*[T](c: CritBitTree[T], prefix: string; + longestMatch=false): T = ## yields all values of `c` starting with `prefix` of the ## corresponding keys. - let top = allprefixedAux(c, prefix) + let top = allprefixedAux(c, prefix, longestMatch) for x in leaves(top): yield x.val -iterator mvaluesWithPrefix*[T](c: var CritBitTree[T], prefix: string): var T = +iterator mvaluesWithPrefix*[T](c: var CritBitTree[T], prefix: string; + longestMatch=false): var T = ## yields all values of `c` starting with `prefix` of the ## corresponding keys. The values can be modified. - let top = allprefixedAux(c, prefix) + let top = allprefixedAux(c, prefix, longestMatch) for x in leaves(top): yield x.val iterator pairsWithPrefix*[T](c: CritBitTree[T], - prefix: string): tuple[key: string, val: T] = + prefix: string; + longestMatch=false): tuple[key: string, val: T] = ## yields all (key, value)-pairs of `c` starting with `prefix`. - let top = allprefixedAux(c, prefix) + let top = allprefixedAux(c, prefix, longestMatch) for x in leaves(top): yield (x.key, x.val) iterator mpairsWithPrefix*[T](c: var CritBitTree[T], - prefix: string): tuple[key: string, val: var T] = + prefix: string; + longestMatch=false): tuple[key: string, val: var T] = ## yields all (key, value)-pairs of `c` starting with `prefix`. ## The yielded values can be modified. - let top = allprefixedAux(c, prefix) + let top = allprefixedAux(c, prefix, longestMatch) for x in leaves(top): yield (x.key, x.val) proc `$`*[T](c: CritBitTree[T]): string = diff --git a/lib/pure/collections/sequtils.nim b/lib/pure/collections/sequtils.nim index 71babe93b..b72face91 100644 --- a/lib/pure/collections/sequtils.nim +++ b/lib/pure/collections/sequtils.nim @@ -10,12 +10,9 @@ ## :Author: Alexander Mitchell-Robinson (Amrykid) ## ## This module implements operations for the built-in `seq`:idx: type which -## were inspired by functional programming languages. If you are looking for -## the typical `map` function which applies a function to every element in a -## sequence, it already exists in the `system <system.html>`_ module in both -## mutable and immutable styles. +## were inspired by functional programming languages. ## -## Also, for functional style programming you may want to pass `anonymous procs +## For functional style programming you may want to pass `anonymous procs ## <manual.html#anonymous-procs>`_ to procs like ``filter`` to reduce typing. ## Anonymous procs can use `the special do notation <manual.html#do-notation>`_ ## which is more convenient in certain situations. @@ -471,7 +468,7 @@ template toSeq*(iter: expr): expr {.immediate.} = ## if x mod 2 == 1: ## result = true) ## assert odd_numbers == @[1, 3, 5, 7, 9] - + when compiles(iter.len): var i = 0 var result = newSeq[type(iter)](iter.len) diff --git a/lib/pure/db_common.nim b/lib/pure/db_common.nim new file mode 100644 index 000000000..957389605 --- /dev/null +++ b/lib/pure/db_common.nim @@ -0,0 +1,103 @@ +# +# +# Nim's Runtime Library +# (c) Copyright 2015 Andreas Rumpf +# +# See the file "copying.txt", included in this +# distribution, for details about the copyright. +# + +## Common datatypes and definitions for all ``db_*.nim`` ( +## `db_mysql <db_mysql.html>`_, `db_postgres <db_postgres.html>`_, +## and `db_sqlite <db_sqlite.html>`_) modules. + +type + DbError* = object of IOError ## exception that is raised if a database error occurs + + SqlQuery* = distinct string ## an SQL query string + + + DbEffect* = object of IOEffect ## effect that denotes a database operation + ReadDbEffect* = object of DbEffect ## effect that denotes a read operation + WriteDbEffect* = object of DbEffect ## effect that denotes a write operation + + DbTypeKind* = enum ## a superset of datatypes that might be supported. + dbUnknown, ## unknown datatype + dbSerial, ## datatype used for primary auto-increment keys + dbNull, ## datatype used for the NULL value + dbBit, ## bit datatype + dbBool, ## boolean datatype + dbBlob, ## blob datatype + dbFixedChar, ## string of fixed length + dbVarchar, ## string datatype + dbJson, ## JSON datatype + dbXml, ## XML datatype + dbInt, ## some integer type + dbUInt, ## some unsigned integer type + dbDecimal, ## decimal numbers (fixed-point number) + dbFloat, ## some floating point type + dbDate, ## a year-month-day description + dbTime, ## HH:MM:SS information + dbDatetime, ## year-month-day and HH:MM:SS information, + ## plus optional time or timezone information + dbTimestamp, ## Timestamp values are stored as the number of seconds + ## since the epoch ('1970-01-01 00:00:00' UTC). + dbTimeInterval, ## an interval [a,b] of times + dbEnum, ## some enum + dbSet, ## set of enum values + dbArray, ## an array of values + dbComposite, ## composite type (record, struct, etc) + dbUrl, ## a URL + dbUuid, ## a UUID + dbInet, ## an IP address + dbMacAddress, ## a MAC address + dbGeometry, ## some geometric type + dbPoint, ## Point on a plane (x,y) + dbLine, ## Infinite line ((x1,y1),(x2,y2)) + dbLseg, ## Finite line segment ((x1,y1),(x2,y2)) + dbBox, ## Rectangular box ((x1,y1),(x2,y2)) + dbPath, ## Closed or open path (similar to polygon) ((x1,y1),...) + dbPolygon, ## Polygon (similar to closed path) ((x1,y1),...) + dbCircle, ## Circle <(x,y),r> (center point and radius) + dbUser1, ## user definable datatype 1 (for unknown extensions) + dbUser2, ## user definable datatype 2 (for unknown extensions) + dbUser3, ## user definable datatype 3 (for unknown extensions) + dbUser4, ## user definable datatype 4 (for unknown extensions) + dbUser5 ## user definable datatype 5 (for unknown extensions) + + DbType* = object ## describes a database type + kind*: DbTypeKind ## the kind of the described type + notNull*: bool ## does the type contain NULL? + name*: string ## the name of the type + size*: Natural ## the size of the datatype; 0 if of variable size + maxReprLen*: Natural ## maximal length required for the representation + precision*, scale*: Natural ## precision and scale of the number + min*, max*: BiggestInt ## the minimum and maximum of allowed values + validValues*: seq[string] ## valid values of an enum or a set + + DbColumn* = object ## information about a database column + name*: string ## name of the column + tableName*: string ## name of the table the column belongs to (optional) + typ*: DbType ## type of the column + primaryKey*: bool ## is this a primary key? + foreignKey*: bool ## is this a foreign key? + DbColumns* = seq[DbColumn] + +{.deprecated: [EDb: DbError, TSqlQuery: SqlQuery, FDb: DbEffect, + FReadDb: ReadDbEffect, FWriteDb: WriteDbEffect].} + +template sql*(query: string): SqlQuery = + ## constructs a SqlQuery from the string `query`. This is supposed to be + ## used as a raw-string-literal modifier: + ## ``sql"update user set counter = counter + 1"`` + ## + ## If assertions are turned off, it does nothing. If assertions are turned + ## on, later versions will check the string for valid syntax. + SqlQuery(query) + +proc dbError*(msg: string) {.noreturn, noinline.} = + ## raises an DbError exception with message `msg`. + var e: ref DbError + new(e) + e.msg = msg + raise e diff --git a/lib/pure/events.nim b/lib/pure/events.nim index 62800c5c8..23a8a2c58 100644 --- a/lib/pure/events.nim +++ b/lib/pure/events.nim @@ -57,7 +57,7 @@ proc addHandler*(handler: var EventHandler, fn: proc(e: EventArgs) {.closure.}) proc removeHandler*(handler: var EventHandler, fn: proc(e: EventArgs) {.closure.}) = ## Removes the callback from the specified event handler. - for i in countup(0, len(handler.handlers) -1): + for i in countup(0, len(handler.handlers)-1): if fn == handler.handlers[i]: handler.handlers.del(i) break diff --git a/lib/pure/fsmonitor.nim b/lib/pure/fsmonitor.nim index 787acb5d4..115c4739e 100644 --- a/lib/pure/fsmonitor.nim +++ b/lib/pure/fsmonitor.nim @@ -34,8 +34,8 @@ type MonitorEventType* = enum ## Monitor event type MonitorAccess, ## File was accessed. MonitorAttrib, ## Metadata changed. - MonitorCloseWrite, ## Writtable file was closed. - MonitorCloseNoWrite, ## Unwrittable file closed. + MonitorCloseWrite, ## Writable file was closed. + MonitorCloseNoWrite, ## Non-writable file closed. MonitorCreate, ## Subfile was created. MonitorDelete, ## Subfile was deleted. MonitorDeleteSelf, ## Watched file/directory was itself deleted. @@ -78,21 +78,21 @@ proc add*(monitor: FSMonitor, target: string, ## watched paths of ``monitor``. ## You can specify the events to report using the ``filters`` parameter. - var INFilter = -1 + var INFilter = 0 for f in filters: case f - of MonitorAccess: INFilter = INFilter and IN_ACCESS - of MonitorAttrib: INFilter = INFilter and IN_ATTRIB - of MonitorCloseWrite: INFilter = INFilter and IN_CLOSE_WRITE - of MonitorCloseNoWrite: INFilter = INFilter and IN_CLOSE_NO_WRITE - of MonitorCreate: INFilter = INFilter and IN_CREATE - of MonitorDelete: INFilter = INFilter and IN_DELETE - of MonitorDeleteSelf: INFilter = INFilter and IN_DELETE_SELF - of MonitorModify: INFilter = INFilter and IN_MODIFY - of MonitorMoveSelf: INFilter = INFilter and IN_MOVE_SELF - of MonitorMoved: INFilter = INFilter and IN_MOVED_FROM and IN_MOVED_TO - of MonitorOpen: INFilter = INFilter and IN_OPEN - of MonitorAll: INFilter = INFilter and IN_ALL_EVENTS + of MonitorAccess: INFilter = INFilter or IN_ACCESS + of MonitorAttrib: INFilter = INFilter or IN_ATTRIB + of MonitorCloseWrite: INFilter = INFilter or IN_CLOSE_WRITE + of MonitorCloseNoWrite: INFilter = INFilter or IN_CLOSE_NO_WRITE + of MonitorCreate: INFilter = INFilter or IN_CREATE + of MonitorDelete: INFilter = INFilter or IN_DELETE + of MonitorDeleteSelf: INFilter = INFilter or IN_DELETE_SELF + of MonitorModify: INFilter = INFilter or IN_MODIFY + of MonitorMoveSelf: INFilter = INFilter or IN_MOVE_SELF + of MonitorMoved: INFilter = INFilter or IN_MOVED_FROM or IN_MOVED_TO + of MonitorOpen: INFilter = INFilter or IN_OPEN + of MonitorAll: INFilter = INFilter or IN_ALL_EVENTS result = inotifyAddWatch(monitor.fd, target, INFilter.uint32) if result < 0: @@ -200,9 +200,18 @@ proc register*(d: Dispatcher, monitor: FSMonitor, when not defined(testing) and isMainModule: proc main = - var disp = newDispatcher() - var monitor = newMonitor() - echo monitor.add("/home/dom/inotifytests/") + var + disp = newDispatcher() + monitor = newMonitor() + n = 0 + n = monitor.add("/tmp") + assert n == 1 + n = monitor.add("/tmp", {MonitorAll}) + assert n == 1 + n = monitor.add("/tmp", {MonitorCloseWrite, MonitorCloseNoWrite}) + assert n == 1 + n = monitor.add("/tmp", {MonitorMoved, MonitorOpen, MonitorAccess}) + assert n == 1 disp.register(monitor, proc (m: FSMonitor, ev: MonitorEvent) = echo("Got event: ", ev.kind) diff --git a/lib/pure/httpclient.nim b/lib/pure/httpclient.nim index 8e182e274..1b91132db 100644 --- a/lib/pure/httpclient.nim +++ b/lib/pure/httpclient.nim @@ -110,7 +110,7 @@ type EInvalidProtocol: ProtocolError, EHttpRequestErr: HttpRequestError ].} -const defUserAgent* = "Nim httpclient/0.1" +const defUserAgent* = "Nim httpclient/" & NimVersion proc httpError(msg: string) = var e: ref ProtocolError @@ -389,6 +389,7 @@ proc request*(url: string, httpMethod: string, extraHeaders = "", ## | An optional timeout can be specified in milliseconds, if reading from the ## server takes longer than specified an ETimeout exception will be raised. var r = if proxy == nil: parseUri(url) else: proxy.url + var hostUrl = if proxy == nil: r else: parseUri(url) var headers = substr(httpMethod, len("http")) # TODO: Use generateHeaders further down once it supports proxies. if proxy == nil: @@ -402,10 +403,10 @@ proc request*(url: string, httpMethod: string, extraHeaders = "", headers.add(" HTTP/1.1\c\L") - if r.port == "": - add(headers, "Host: " & r.hostname & "\c\L") + if hostUrl.port == "": + add(headers, "Host: " & hostUrl.hostname & "\c\L") else: - add(headers, "Host: " & r.hostname & ":" & r.port & "\c\L") + add(headers, "Host: " & hostUrl.hostname & ":" & hostUrl.port & "\c\L") if userAgent != "": add(headers, "User-Agent: " & userAgent & "\c\L") @@ -414,7 +415,6 @@ proc request*(url: string, httpMethod: string, extraHeaders = "", add(headers, "Proxy-Authorization: basic " & auth & "\c\L") add(headers, extraHeaders) add(headers, "\c\L") - var s = newSocket() if s == nil: raiseOSError(osLastError()) var port = net.Port(80) diff --git a/lib/pure/lexbase.nim b/lib/pure/lexbase.nim index bfecf6a58..cf2e8bb89 100644 --- a/lib/pure/lexbase.nim +++ b/lib/pure/lexbase.nim @@ -28,7 +28,10 @@ type BaseLexer* = object of RootObj ## the base lexer. Inherit your lexer from ## this object. bufpos*: int ## the current position within the buffer - buf*: cstring ## the buffer itself + when defined(js): ## the buffer itself + buf*: string + else: + buf*: cstring bufLen*: int ## length of buffer in characters input: Stream ## the input stream lineNumber*: int ## the current line number @@ -43,7 +46,8 @@ const proc close*(L: var BaseLexer) = ## closes the base lexer. This closes `L`'s associated stream too. - dealloc(L.buf) + when not defined(js): + dealloc(L.buf) close(L.input) proc fillBuffer(L: var BaseLexer) = @@ -58,8 +62,11 @@ proc fillBuffer(L: var BaseLexer) = toCopy = L.bufLen - L.sentinel - 1 assert(toCopy >= 0) if toCopy > 0: - moveMem(L.buf, addr(L.buf[L.sentinel + 1]), toCopy * chrSize) - # "moveMem" handles overlapping regions + when defined(js): + for i in 0 ..< toCopy: L.buf[i] = L.buf[L.sentinel + 1 + i] + else: + # "moveMem" handles overlapping regions + moveMem(L.buf, addr L.buf[L.sentinel + 1], toCopy * chrSize) charsRead = readData(L.input, addr(L.buf[toCopy]), (L.sentinel + 1) * chrSize) div chrSize s = toCopy + charsRead @@ -81,7 +88,10 @@ proc fillBuffer(L: var BaseLexer) = # double the buffer's size and try again: oldBufLen = L.bufLen L.bufLen = L.bufLen * 2 - L.buf = cast[cstring](realloc(L.buf, L.bufLen * chrSize)) + when defined(js): + L.buf.setLen(L.bufLen) + else: + L.buf = cast[cstring](realloc(L.buf, L.bufLen * chrSize)) assert(L.bufLen - oldBufLen == oldBufLen) charsRead = readData(L.input, addr(L.buf[oldBufLen]), oldBufLen * chrSize) div chrSize @@ -139,7 +149,10 @@ proc open*(L: var BaseLexer, input: Stream, bufLen: int = 8192; L.bufpos = 0 L.bufLen = bufLen L.refillChars = refillChars - L.buf = cast[cstring](alloc(bufLen * chrSize)) + when defined(js): + L.buf = newString(bufLen) + else: + L.buf = cast[cstring](alloc(bufLen * chrSize)) L.sentinel = bufLen - 1 L.lineStart = 0 L.lineNumber = 1 # lines start at 1 diff --git a/lib/pure/nativesockets.nim b/lib/pure/nativesockets.nim index c9e067a3e..b5a8d5777 100644 --- a/lib/pure/nativesockets.nim +++ b/lib/pure/nativesockets.nim @@ -203,9 +203,12 @@ proc getAddrInfo*(address: string, port: Port, domain: Domain = AF_INET, hints.ai_family = toInt(domain) hints.ai_socktype = toInt(sockType) hints.ai_protocol = toInt(protocol) + # OpenBSD doesn't support AI_V4MAPPED and doesn't define the macro AI_V4MAPPED. + # FreeBSD doesn't support AI_V4MAPPED but defines the macro. # https://bugs.freebsd.org/bugzilla/show_bug.cgi?id=198092 - when not defined(freebsd): - hints.ai_flags = AI_V4MAPPED + when not defined(freebsd) and not defined(openbsd) and not defined(netbsd): + if domain == AF_INET6: + hints.ai_flags = AI_V4MAPPED var gaiResult = getaddrinfo(address, $port, addr(hints), result) if gaiResult != 0'i32: when useWinVersion: diff --git a/lib/pure/nimprof.nim b/lib/pure/nimprof.nim index cfe6bc40d..e2397b91c 100644 --- a/lib/pure/nimprof.nim +++ b/lib/pure/nimprof.nim @@ -1,7 +1,7 @@ # # # Nim's Runtime Library -# (c) Copyright 2012 Andreas Rumpf +# (c) Copyright 2015 Andreas Rumpf # # See the file "copying.txt", included in this # distribution, for details about the copyright. @@ -117,24 +117,38 @@ when defined(memProfiler): var gTicker {.threadvar.}: int - proc hook(st: StackTrace, size: int) {.nimcall.} = + proc requestedHook(): bool {.nimcall.} = if gTicker == 0: - gTicker = -1 - when defined(ignoreAllocationSize): - hookAux(st, 1) - else: - hookAux(st, size) gTicker = SamplingInterval + result = true dec gTicker + proc hook(st: StackTrace, size: int) {.nimcall.} = + when defined(ignoreAllocationSize): + hookAux(st, 1) + else: + hookAux(st, size) + else: var t0 {.threadvar.}: Ticks + gTicker: int # we use an additional counter to + # avoid calling 'getTicks' too frequently + + proc requestedHook(): bool {.nimcall.} = + if interval == 0: result = true + elif gTicker == 0: + gTicker = 500 + if getTicks() - t0 > interval: + result = true + else: + dec gTicker proc hook(st: StackTrace) {.nimcall.} = + #echo "profiling! ", interval if interval == 0: hookAux(st, 1) - elif int64(t0) == 0 or getTicks() - t0 > interval: + else: hookAux(st, 1) t0 = getTicks() @@ -145,9 +159,10 @@ proc cmpEntries(a, b: ptr ProfileEntry): int = result = b.getTotal - a.getTotal proc `//`(a, b: int): string = - result = format("$1/$2 = $3%", a, b, formatFloat(a / b * 100.0, ffDefault, 2)) + result = format("$1/$2 = $3%", a, b, formatFloat(a / b * 100.0, ffDecimal, 2)) proc writeProfile() {.noconv.} = + system.profilingRequestedHook = nil when declared(system.StackTrace): system.profilerHook = nil const filename = "profile_results.txt" @@ -193,14 +208,15 @@ var proc disableProfiling*() = when declared(system.StackTrace): atomicDec disabled - system.profilerHook = nil + system.profilingRequestedHook = nil proc enableProfiling*() = when declared(system.StackTrace): if atomicInc(disabled) >= 0: - system.profilerHook = hook + system.profilingRequestedHook = requestedHook when declared(system.StackTrace): + system.profilingRequestedHook = requestedHook system.profilerHook = hook addQuitProc(writeProfile) diff --git a/lib/pure/options.nim b/lib/pure/options.nim index 3122d58b1..2abb80016 100644 --- a/lib/pure/options.nim +++ b/lib/pure/options.nim @@ -28,7 +28,7 @@ ## ## .. code-block:: nim ## -## import optionals +## import options ## ## proc find(haystack: string, needle: char): Option[int] = ## for i, c in haystack: @@ -156,7 +156,7 @@ proc `$`*[T]( self: Option[T] ): string = when isMainModule: import unittest, sequtils - suite "optionals": + suite "options": # work around a bug in unittest let intNone = none(int) let stringNone = none(string) diff --git a/lib/pure/os.nim b/lib/pure/os.nim index c01228563..1e00f92b1 100644 --- a/lib/pure/os.nim +++ b/lib/pure/os.nim @@ -810,6 +810,10 @@ type {.deprecated: [TPathComponent: PathComponent].} +proc staticWalkDir(dir: string; relative: bool): seq[ + tuple[kind: PathComponent, path: string]] = + discard + iterator walkDir*(dir: string; relative=false): tuple[kind: PathComponent, path: string] {. tags: [ReadDirEffect].} = ## walks over the directory `dir` and yields for each directory or file in @@ -833,49 +837,53 @@ iterator walkDir*(dir: string; relative=false): tuple[kind: PathComponent, path: ## dirA/dirC ## dirA/fileA1.txt ## dirA/fileA2.txt - when defined(windows): - var f: WIN32_FIND_DATA - var h = findFirstFile(dir / "*", f) - if h != -1: - while true: - var k = pcFile - if not skipFindData(f): - if (f.dwFileAttributes and FILE_ATTRIBUTE_DIRECTORY) != 0'i32: - k = pcDir - if (f.dwFileAttributes and FILE_ATTRIBUTE_REPARSE_POINT) != 0'i32: - k = succ(k) - let xx = if relative: extractFilename(getFilename(f)) - else: dir / extractFilename(getFilename(f)) - yield (k, xx) - if findNextFile(h, f) == 0'i32: break - findClose(h) + when nimvm: + for k, v in items(staticWalkDir(dir, relative)): + yield (k, v) else: - var d = opendir(dir) - if d != nil: - while true: - var x = readdir(d) - if x == nil: break - var y = $x.d_name - if y != "." and y != "..": - var s: Stat - if not relative: - y = dir / y + when defined(windows): + var f: WIN32_FIND_DATA + var h = findFirstFile(dir / "*", f) + if h != -1: + while true: var k = pcFile - - when defined(linux) or defined(macosx) or defined(bsd): - if x.d_type != DT_UNKNOWN: - if x.d_type == DT_DIR: k = pcDir - if x.d_type == DT_LNK: - if dirExists(y): k = pcLinkToDir - else: k = succ(k) - yield (k, y) - continue - - if lstat(y, s) < 0'i32: break - if S_ISDIR(s.st_mode): k = pcDir - if S_ISLNK(s.st_mode): k = succ(k) - yield (k, y) - discard closedir(d) + if not skipFindData(f): + if (f.dwFileAttributes and FILE_ATTRIBUTE_DIRECTORY) != 0'i32: + k = pcDir + if (f.dwFileAttributes and FILE_ATTRIBUTE_REPARSE_POINT) != 0'i32: + k = succ(k) + let xx = if relative: extractFilename(getFilename(f)) + else: dir / extractFilename(getFilename(f)) + yield (k, xx) + if findNextFile(h, f) == 0'i32: break + findClose(h) + else: + var d = opendir(dir) + if d != nil: + while true: + var x = readdir(d) + if x == nil: break + var y = $x.d_name + if y != "." and y != "..": + var s: Stat + if not relative: + y = dir / y + var k = pcFile + + when defined(linux) or defined(macosx) or defined(bsd): + if x.d_type != DT_UNKNOWN: + if x.d_type == DT_DIR: k = pcDir + if x.d_type == DT_LNK: + if dirExists(y): k = pcLinkToDir + else: k = succ(k) + yield (k, y) + continue + + if lstat(y, s) < 0'i32: break + if S_ISDIR(s.st_mode): k = pcDir + if S_ISLNK(s.st_mode): k = succ(k) + yield (k, y) + discard closedir(d) iterator walkDirRec*(dir: string, filter={pcFile, pcDir}): string {. tags: [ReadDirEffect].} = @@ -1353,7 +1361,7 @@ proc getAppFilename*(): string {.rtl, extern: "nos$1", tags: [ReadIOEffect].} = # /proc/<pid>/file when defined(windows): when useWinUnicode: - var buf = cast[WideCString](alloc(256*2)) + var buf = newWideCString("", 256) var len = getModuleFileNameW(0, buf, 256) result = buf$len else: diff --git a/lib/pure/osproc.nim b/lib/pure/osproc.nim index de9e63909..8560c3ee4 100644 --- a/lib/pure/osproc.nim +++ b/lib/pure/osproc.nim @@ -886,7 +886,7 @@ elif not defined(useNimRtl): discard write(data.pErrorPipe[writeIdx], addr error, sizeof(error)) exitnow(1) - when defined(macosx) or defined(freebsd): + when defined(macosx) or defined(freebsd) or defined(netbsd) or defined(android): var environ {.importc.}: cstringArray proc startProcessAfterFork(data: ptr StartProcessData) = @@ -916,7 +916,7 @@ elif not defined(useNimRtl): discard fcntl(data.pErrorPipe[writeIdx], F_SETFD, FD_CLOEXEC) if data.optionPoUsePath: - when defined(macosx) or defined(freebsd): + when defined(macosx) or defined(freebsd) or defined(netbsd) or defined(android): # MacOSX doesn't have execvpe, so we need workaround. # On MacOSX we can arrive here only from fork, so this is safe: environ = data.sysEnv @@ -937,9 +937,10 @@ elif not defined(useNimRtl): if p.inStream != nil: close(p.inStream) if p.outStream != nil: close(p.outStream) if p.errStream != nil: close(p.errStream) - discard close(p.inHandle) - discard close(p.outHandle) - discard close(p.errHandle) + if poParentStreams notin p.options: + discard close(p.inHandle) + discard close(p.outHandle) + discard close(p.errHandle) proc suspend(p: Process) = if kill(p.id, SIGSTOP) != 0'i32: raiseOsError(osLastError()) diff --git a/lib/pure/oswalkdir.nim b/lib/pure/oswalkdir.nim new file mode 100644 index 000000000..000fe25a3 --- /dev/null +++ b/lib/pure/oswalkdir.nim @@ -0,0 +1,27 @@ + +## Compile-time only version for walkDir if you need it at compile-time +## for JavaScript. + +type + PathComponent* = enum ## Enumeration specifying a path component. + pcFile, ## path refers to a file + pcLinkToFile, ## path refers to a symbolic link to a file + pcDir, ## path refers to a directory + pcLinkToDir ## path refers to a symbolic link to a directory + +proc staticWalkDir(dir: string; relative: bool): seq[ + tuple[kind: PathComponent, path: string]] = + discard + +iterator walkDir*(dir: string; relative=false): tuple[kind: PathComponent, path: string] = + for k, v in items(staticWalkDir(dir, relative)): + yield (k, v) + +iterator walkDirRec*(dir: string, filter={pcFile, pcDir}): string = + var stack = @[dir] + while stack.len > 0: + for k,p in walkDir(stack.pop()): + if k in filter: + case k + of pcFile, pcLinkToFile: yield p + of pcDir, pcLinkToDir: stack.add(p) diff --git a/lib/pure/parseopt2.nim b/lib/pure/parseopt2.nim index 73b498fe0..7fd9c60fe 100644 --- a/lib/pure/parseopt2.nim +++ b/lib/pure/parseopt2.nim @@ -70,7 +70,7 @@ when not defined(createNimRtl): ## Initializes option parser from current command line arguments. return initOptParser(commandLineParams()) -proc next*(p: var OptParser) {.rtl, extern: "npo$1".} +proc next*(p: var OptParser) {.rtl, extern: "npo2$1".} proc nextOption(p: var OptParser, token: string, allowEmpty: bool) = for splitchar in [':', '=']: @@ -113,7 +113,7 @@ proc next(p: var OptParser) = p.key = token p.val = "" -proc cmdLineRest*(p: OptParser): TaintedString {.rtl, extern: "npo$1", deprecated.} = +proc cmdLineRest*(p: OptParser): TaintedString {.rtl, extern: "npo2$1", deprecated.} = ## Returns part of command line string that has not been parsed yet. ## Do not use - does not correctly handle whitespace. return p.cmd[p.pos..p.cmd.len-1].join(" ") diff --git a/lib/pure/parseutils.nim b/lib/pure/parseutils.nim index b3708838a..698bde42a 100644 --- a/lib/pure/parseutils.nim +++ b/lib/pure/parseutils.nim @@ -25,7 +25,7 @@ const proc toLower(c: char): char {.inline.} = result = if c in {'A'..'Z'}: chr(ord(c)-ord('A')+ord('a')) else: c -proc parseHex*(s: string, number: var int, start = 0): int {. +proc parseHex*(s: string, number: var int, start = 0; maxLen = 0): int {. rtl, extern: "npuParseHex", noSideEffect.} = ## Parses a hexadecimal number and stores its value in ``number``. ## @@ -45,11 +45,14 @@ proc parseHex*(s: string, number: var int, start = 0): int {. ## discard parseHex("0x38", value) ## assert value == -200 ## + ## If 'maxLen==0' the length of the hexadecimal number has no + ## upper bound. Not more than ```maxLen`` characters are parsed. var i = start var foundDigit = false if s[i] == '0' and (s[i+1] == 'x' or s[i+1] == 'X'): inc(i, 2) elif s[i] == '#': inc(i) - while true: + let last = if maxLen == 0: s.len else: i+maxLen + while i < last: case s[i] of '_': discard of '0'..'9': diff --git a/lib/pure/redis.nim b/lib/pure/redis.nim deleted file mode 100644 index e3f18a496..000000000 --- a/lib/pure/redis.nim +++ /dev/null @@ -1,1096 +0,0 @@ -# -# -# Nim's Runtime Library -# (c) Copyright 2012 Dominik Picheta -# -# See the file "copying.txt", included in this -# distribution, for details about the copyright. -# - -## This module implements a redis client. It allows you to connect to a -## redis-server instance, send commands and receive replies. -## -## **Beware**: Most (if not all) functions that return a ``RedisString`` may -## return ``redisNil``, and functions which return a ``RedisList`` -## may return ``nil``. - -import sockets, os, strutils, parseutils - -const - redisNil* = "\0\0" - -type - Pipeline = ref object - enabled: bool - buffer: string - expected: int ## number of replies expected if pipelined - -type - SendMode = enum - normal, pipelined, multiple - -type - Redis* = object - socket: Socket - connected: bool - pipeline: Pipeline - - RedisStatus* = string - RedisInteger* = BiggestInt - RedisString* = string ## Bulk reply - RedisList* = seq[RedisString] ## Multi-bulk reply - - ReplyError* = object of IOError ## Invalid reply from redis - RedisError* = object of IOError ## Error in redis - -{.deprecated: [TSendMode: SendMode, TRedis: Redis, TRedisStatus: RedisStatus, - TRedisInteger: RedisInteger, TRedisString: RedisString, - TRedisList: RedisList, EInvalidReply: ReplyError, ERedis: RedisError].} - -proc newPipeline(): Pipeline = - new(result) - result.buffer = "" - result.enabled = false - result.expected = 0 - -proc open*(host = "localhost", port = 6379.Port): Redis = - ## Opens a connection to the redis server. - result.socket = socket(buffered = false) - if result.socket == invalidSocket: - raiseOSError(osLastError()) - result.socket.connect(host, port) - result.pipeline = newPipeline() - -proc raiseInvalidReply(expected, got: char) = - raise newException(ReplyError, - "Expected '$1' at the beginning of a status reply got '$2'" % - [$expected, $got]) - -proc raiseNoOK(status: string, pipelineEnabled: bool) = - if pipelineEnabled and not (status == "QUEUED" or status == "PIPELINED"): - raise newException(ReplyError, "Expected \"QUEUED\" or \"PIPELINED\" got \"$1\"" % status) - elif not pipelineEnabled and status != "OK": - raise newException(ReplyError, "Expected \"OK\" got \"$1\"" % status) - -template readSocket(r: Redis, dummyVal:expr): stmt = - var line {.inject.}: TaintedString = "" - if r.pipeline.enabled: - return dummyVal - else: - readLine(r.socket, line) - -proc parseStatus(r: Redis, line: string = ""): RedisStatus = - if r.pipeline.enabled: - return "PIPELINED" - - if line == "": - raise newException(RedisError, "Server closed connection prematurely") - - if line[0] == '-': - raise newException(RedisError, strip(line)) - if line[0] != '+': - raiseInvalidReply('+', line[0]) - - return line.substr(1) # Strip '+' - -proc readStatus(r:Redis): RedisStatus = - r.readSocket("PIPELINED") - return r.parseStatus(line) - -proc parseInteger(r: Redis, line: string = ""): RedisInteger = - if r.pipeline.enabled: return -1 - - #if line == "+QUEUED": # inside of multi - # return -1 - - if line == "": - raise newException(RedisError, "Server closed connection prematurely") - - if line[0] == '-': - raise newException(RedisError, strip(line)) - if line[0] != ':': - raiseInvalidReply(':', line[0]) - - # Strip ':' - if parseBiggestInt(line, result, 1) == 0: - raise newException(ReplyError, "Unable to parse integer.") - -proc readInteger(r: Redis): RedisInteger = - r.readSocket(-1) - return r.parseInteger(line) - -proc recv(sock: Socket, size: int): TaintedString = - result = newString(size).TaintedString - if sock.recv(cstring(result), size) != size: - raise newException(ReplyError, "recv failed") - -proc parseSingleString(r: Redis, line:string, allowMBNil = false): RedisString = - if r.pipeline.enabled: return "" - - # Error. - if line[0] == '-': - raise newException(RedisError, strip(line)) - - # Some commands return a /bulk/ value or a /multi-bulk/ nil. Odd. - if allowMBNil: - if line == "*-1": - return redisNil - - if line[0] != '$': - raiseInvalidReply('$', line[0]) - - var numBytes = parseInt(line.substr(1)) - if numBytes == -1: - return redisNil - - var s = r.socket.recv(numBytes+2) - result = strip(s.string) - -proc readSingleString(r: Redis): RedisString = - r.readSocket("") - return r.parseSingleString(line) - -proc readNext(r: Redis): RedisList - -proc parseArrayLines(r: Redis, countLine:string): RedisList = - if countLine.string[0] != '*': - raiseInvalidReply('*', countLine.string[0]) - - var numElems = parseInt(countLine.string.substr(1)) - if numElems == -1: return nil - result = @[] - - for i in 1..numElems: - var parsed = r.readNext() - if not isNil(parsed): - for item in parsed: - result.add(item) - -proc readArrayLines(r: Redis): RedisList = - r.readSocket(nil) - return r.parseArrayLines(line) - -proc parseBulkString(r: Redis, allowMBNil = false, line:string = ""): RedisString = - if r.pipeline.enabled: return "" - - return r.parseSingleString(line, allowMBNil) - -proc readBulkString(r: Redis, allowMBNil = false): RedisString = - r.readSocket("") - return r.parseBulkString(allowMBNil, line) - -proc readArray(r: Redis): RedisList = - r.readSocket(@[]) - return r.parseArrayLines(line) - -proc readNext(r: Redis): RedisList = - r.readSocket(@[]) - - var res = case line[0] - of '+', '-': @[r.parseStatus(line)] - of ':': @[$(r.parseInteger(line))] - of '$': @[r.parseBulkString(true,line)] - of '*': r.parseArrayLines(line) - else: - raise newException(ReplyError, "readNext failed on line: " & line) - nil - r.pipeline.expected -= 1 - return res - -proc flushPipeline*(r: Redis, wasMulti = false): RedisList = - ## Send buffered commands, clear buffer, return results - if r.pipeline.buffer.len > 0: - r.socket.send(r.pipeline.buffer) - r.pipeline.buffer = "" - - r.pipeline.enabled = false - result = @[] - - var tot = r.pipeline.expected - - for i in 0..tot-1: - var ret = r.readNext() - for item in ret: - if not (item.contains("OK") or item.contains("QUEUED")): - result.add(item) - - r.pipeline.expected = 0 - -proc startPipelining*(r: Redis) = - ## Enable command pipelining (reduces network roundtrips). - ## Note that when enabled, you must call flushPipeline to actually send commands, except - ## for multi/exec() which enable and flush the pipeline automatically. - ## Commands return immediately with dummy values; actual results returned from - ## flushPipeline() or exec() - r.pipeline.expected = 0 - r.pipeline.enabled = true - -proc sendCommand(r: Redis, cmd: string, args: varargs[string]) = - var request = "*" & $(1 + args.len()) & "\c\L" - request.add("$" & $cmd.len() & "\c\L") - request.add(cmd & "\c\L") - for i in items(args): - request.add("$" & $i.len() & "\c\L") - request.add(i & "\c\L") - - if r.pipeline.enabled: - r.pipeline.buffer.add(request) - r.pipeline.expected += 1 - else: - r.socket.send(request) - -proc sendCommand(r: Redis, cmd: string, arg1: string, - args: varargs[string]) = - var request = "*" & $(2 + args.len()) & "\c\L" - request.add("$" & $cmd.len() & "\c\L") - request.add(cmd & "\c\L") - request.add("$" & $arg1.len() & "\c\L") - request.add(arg1 & "\c\L") - for i in items(args): - request.add("$" & $i.len() & "\c\L") - request.add(i & "\c\L") - - if r.pipeline.enabled: - r.pipeline.expected += 1 - r.pipeline.buffer.add(request) - else: - r.socket.send(request) - -# Keys - -proc del*(r: Redis, keys: varargs[string]): RedisInteger = - ## Delete a key or multiple keys - r.sendCommand("DEL", keys) - return r.readInteger() - -proc exists*(r: Redis, key: string): bool = - ## Determine if a key exists - r.sendCommand("EXISTS", key) - return r.readInteger() == 1 - -proc expire*(r: Redis, key: string, seconds: int): bool = - ## Set a key's time to live in seconds. Returns `false` if the key could - ## not be found or the timeout could not be set. - r.sendCommand("EXPIRE", key, $seconds) - return r.readInteger() == 1 - -proc expireAt*(r: Redis, key: string, timestamp: int): bool = - ## Set the expiration for a key as a UNIX timestamp. Returns `false` - ## if the key could not be found or the timeout could not be set. - r.sendCommand("EXPIREAT", key, $timestamp) - return r.readInteger() == 1 - -proc keys*(r: Redis, pattern: string): RedisList = - ## Find all keys matching the given pattern - r.sendCommand("KEYS", pattern) - return r.readArray() - -proc scan*(r: Redis, cursor: var BiggestInt): RedisList = - ## Find all keys matching the given pattern and yield it to client in portions - ## using default Redis values for MATCH and COUNT parameters - r.sendCommand("SCAN", $cursor) - let reply = r.readArray() - cursor = strutils.parseBiggestInt(reply[0]) - return reply[1..high(reply)] - -proc scan*(r: Redis, cursor: var BiggestInt, pattern: string): RedisList = - ## Find all keys matching the given pattern and yield it to client in portions - ## using cursor as a client query identifier. Using default Redis value for COUNT argument - r.sendCommand("SCAN", $cursor, ["MATCH", pattern]) - let reply = r.readArray() - cursor = strutils.parseBiggestInt(reply[0]) - return reply[1..high(reply)] - -proc scan*(r: Redis, cursor: var BiggestInt, pattern: string, count: int): RedisList = - ## Find all keys matching the given pattern and yield it to client in portions - ## using cursor as a client query identifier. - r.sendCommand("SCAN", $cursor, ["MATCH", pattern, "COUNT", $count]) - let reply = r.readArray() - cursor = strutils.parseBiggestInt(reply[0]) - return reply[1..high(reply)] - -proc move*(r: Redis, key: string, db: int): bool = - ## Move a key to another database. Returns `true` on a successful move. - r.sendCommand("MOVE", key, $db) - return r.readInteger() == 1 - -proc persist*(r: Redis, key: string): bool = - ## Remove the expiration from a key. - ## Returns `true` when the timeout was removed. - r.sendCommand("PERSIST", key) - return r.readInteger() == 1 - -proc randomKey*(r: Redis): RedisString = - ## Return a random key from the keyspace - r.sendCommand("RANDOMKEY") - return r.readBulkString() - -proc rename*(r: Redis, key, newkey: string): RedisStatus = - ## Rename a key. - ## - ## **WARNING:** Overwrites `newkey` if it exists! - r.sendCommand("RENAME", key, newkey) - raiseNoOK(r.readStatus(), r.pipeline.enabled) - -proc renameNX*(r: Redis, key, newkey: string): bool = - ## Same as ``rename`` but doesn't continue if `newkey` exists. - ## Returns `true` if key was renamed. - r.sendCommand("RENAMENX", key, newkey) - return r.readInteger() == 1 - -proc ttl*(r: Redis, key: string): RedisInteger = - ## Get the time to live for a key - r.sendCommand("TTL", key) - return r.readInteger() - -proc keyType*(r: Redis, key: string): RedisStatus = - ## Determine the type stored at key - r.sendCommand("TYPE", key) - return r.readStatus() - - -# Strings - -proc append*(r: Redis, key, value: string): RedisInteger = - ## Append a value to a key - r.sendCommand("APPEND", key, value) - return r.readInteger() - -proc decr*(r: Redis, key: string): RedisInteger = - ## Decrement the integer value of a key by one - r.sendCommand("DECR", key) - return r.readInteger() - -proc decrBy*(r: Redis, key: string, decrement: int): RedisInteger = - ## Decrement the integer value of a key by the given number - r.sendCommand("DECRBY", key, $decrement) - return r.readInteger() - -proc get*(r: Redis, key: string): RedisString = - ## Get the value of a key. Returns `redisNil` when `key` doesn't exist. - r.sendCommand("GET", key) - return r.readBulkString() - -proc getBit*(r: Redis, key: string, offset: int): RedisInteger = - ## Returns the bit value at offset in the string value stored at key - r.sendCommand("GETBIT", key, $offset) - return r.readInteger() - -proc getRange*(r: Redis, key: string, start, stop: int): RedisString = - ## Get a substring of the string stored at a key - r.sendCommand("GETRANGE", key, $start, $stop) - return r.readBulkString() - -proc getSet*(r: Redis, key: string, value: string): RedisString = - ## Set the string value of a key and return its old value. Returns `redisNil` - ## when key doesn't exist. - r.sendCommand("GETSET", key, value) - return r.readBulkString() - -proc incr*(r: Redis, key: string): RedisInteger = - ## Increment the integer value of a key by one. - r.sendCommand("INCR", key) - return r.readInteger() - -proc incrBy*(r: Redis, key: string, increment: int): RedisInteger = - ## Increment the integer value of a key by the given number - r.sendCommand("INCRBY", key, $increment) - return r.readInteger() - -proc setk*(r: Redis, key, value: string) = - ## Set the string value of a key. - ## - ## NOTE: This function had to be renamed due to a clash with the `set` type. - r.sendCommand("SET", key, value) - raiseNoOK(r.readStatus(), r.pipeline.enabled) - -proc setNX*(r: Redis, key, value: string): bool = - ## Set the value of a key, only if the key does not exist. Returns `true` - ## if the key was set. - r.sendCommand("SETNX", key, value) - return r.readInteger() == 1 - -proc setBit*(r: Redis, key: string, offset: int, - value: string): RedisInteger = - ## Sets or clears the bit at offset in the string value stored at key - r.sendCommand("SETBIT", key, $offset, value) - return r.readInteger() - -proc setEx*(r: Redis, key: string, seconds: int, value: string): RedisStatus = - ## Set the value and expiration of a key - r.sendCommand("SETEX", key, $seconds, value) - raiseNoOK(r.readStatus(), r.pipeline.enabled) - -proc setRange*(r: Redis, key: string, offset: int, - value: string): RedisInteger = - ## Overwrite part of a string at key starting at the specified offset - r.sendCommand("SETRANGE", key, $offset, value) - return r.readInteger() - -proc strlen*(r: Redis, key: string): RedisInteger = - ## Get the length of the value stored in a key. Returns 0 when key doesn't - ## exist. - r.sendCommand("STRLEN", key) - return r.readInteger() - -# Hashes -proc hDel*(r: Redis, key, field: string): bool = - ## Delete a hash field at `key`. Returns `true` if the field was removed. - r.sendCommand("HDEL", key, field) - return r.readInteger() == 1 - -proc hExists*(r: Redis, key, field: string): bool = - ## Determine if a hash field exists. - r.sendCommand("HEXISTS", key, field) - return r.readInteger() == 1 - -proc hGet*(r: Redis, key, field: string): RedisString = - ## Get the value of a hash field - r.sendCommand("HGET", key, field) - return r.readBulkString() - -proc hGetAll*(r: Redis, key: string): RedisList = - ## Get all the fields and values in a hash - r.sendCommand("HGETALL", key) - return r.readArray() - -proc hIncrBy*(r: Redis, key, field: string, incr: int): RedisInteger = - ## Increment the integer value of a hash field by the given number - r.sendCommand("HINCRBY", key, field, $incr) - return r.readInteger() - -proc hKeys*(r: Redis, key: string): RedisList = - ## Get all the fields in a hash - r.sendCommand("HKEYS", key) - return r.readArray() - -proc hLen*(r: Redis, key: string): RedisInteger = - ## Get the number of fields in a hash - r.sendCommand("HLEN", key) - return r.readInteger() - -proc hMGet*(r: Redis, key: string, fields: varargs[string]): RedisList = - ## Get the values of all the given hash fields - r.sendCommand("HMGET", key, fields) - return r.readArray() - -proc hMSet*(r: Redis, key: string, - fieldValues: openArray[tuple[field, value: string]]) = - ## Set multiple hash fields to multiple values - var args = @[key] - for field, value in items(fieldValues): - args.add(field) - args.add(value) - r.sendCommand("HMSET", args) - raiseNoOK(r.readStatus(), r.pipeline.enabled) - -proc hSet*(r: Redis, key, field, value: string): RedisInteger = - ## Set the string value of a hash field - r.sendCommand("HSET", key, field, value) - return r.readInteger() - -proc hSetNX*(r: Redis, key, field, value: string): RedisInteger = - ## Set the value of a hash field, only if the field does **not** exist - r.sendCommand("HSETNX", key, field, value) - return r.readInteger() - -proc hVals*(r: Redis, key: string): RedisList = - ## Get all the values in a hash - r.sendCommand("HVALS", key) - return r.readArray() - -# Lists - -proc bLPop*(r: Redis, keys: varargs[string], timeout: int): RedisList = - ## Remove and get the *first* element in a list, or block until - ## one is available - var args: seq[string] = @[] - for i in items(keys): args.add(i) - args.add($timeout) - r.sendCommand("BLPOP", args) - return r.readArray() - -proc bRPop*(r: Redis, keys: varargs[string], timeout: int): RedisList = - ## Remove and get the *last* element in a list, or block until one - ## is available. - var args: seq[string] = @[] - for i in items(keys): args.add(i) - args.add($timeout) - r.sendCommand("BRPOP", args) - return r.readArray() - -proc bRPopLPush*(r: Redis, source, destination: string, - timeout: int): RedisString = - ## Pop a value from a list, push it to another list and return it; or - ## block until one is available. - ## - ## http://redis.io/commands/brpoplpush - r.sendCommand("BRPOPLPUSH", source, destination, $timeout) - return r.readBulkString(true) # Multi-Bulk nil allowed. - -proc lIndex*(r: Redis, key: string, index: int): RedisString = - ## Get an element from a list by its index - r.sendCommand("LINDEX", key, $index) - return r.readBulkString() - -proc lInsert*(r: Redis, key: string, before: bool, pivot, value: string): - RedisInteger = - ## Insert an element before or after another element in a list - var pos = if before: "BEFORE" else: "AFTER" - r.sendCommand("LINSERT", key, pos, pivot, value) - return r.readInteger() - -proc lLen*(r: Redis, key: string): RedisInteger = - ## Get the length of a list - r.sendCommand("LLEN", key) - return r.readInteger() - -proc lPop*(r: Redis, key: string): RedisString = - ## Remove and get the first element in a list - r.sendCommand("LPOP", key) - return r.readBulkString() - -proc lPush*(r: Redis, key, value: string, create: bool = true): RedisInteger = - ## Prepend a value to a list. Returns the length of the list after the push. - ## The ``create`` param specifies whether a list should be created if it - ## doesn't exist at ``key``. More specifically if ``create`` is true, `LPUSH` - ## will be used, otherwise `LPUSHX`. - if create: - r.sendCommand("LPUSH", key, value) - else: - r.sendCommand("LPUSHX", key, value) - return r.readInteger() - -proc lRange*(r: Redis, key: string, start, stop: int): RedisList = - ## Get a range of elements from a list. Returns `nil` when `key` - ## doesn't exist. - r.sendCommand("LRANGE", key, $start, $stop) - return r.readArray() - -proc lRem*(r: Redis, key: string, value: string, count: int = 0): RedisInteger = - ## Remove elements from a list. Returns the number of elements that have been - ## removed. - r.sendCommand("LREM", key, $count, value) - return r.readInteger() - -proc lSet*(r: Redis, key: string, index: int, value: string) = - ## Set the value of an element in a list by its index - r.sendCommand("LSET", key, $index, value) - raiseNoOK(r.readStatus(), r.pipeline.enabled) - -proc lTrim*(r: Redis, key: string, start, stop: int) = - ## Trim a list to the specified range - r.sendCommand("LTRIM", key, $start, $stop) - raiseNoOK(r.readStatus(), r.pipeline.enabled) - -proc rPop*(r: Redis, key: string): RedisString = - ## Remove and get the last element in a list - r.sendCommand("RPOP", key) - return r.readBulkString() - -proc rPopLPush*(r: Redis, source, destination: string): RedisString = - ## Remove the last element in a list, append it to another list and return it - r.sendCommand("RPOPLPUSH", source, destination) - return r.readBulkString() - -proc rPush*(r: Redis, key, value: string, create: bool = true): RedisInteger = - ## Append a value to a list. Returns the length of the list after the push. - ## The ``create`` param specifies whether a list should be created if it - ## doesn't exist at ``key``. More specifically if ``create`` is true, `RPUSH` - ## will be used, otherwise `RPUSHX`. - if create: - r.sendCommand("RPUSH", key, value) - else: - r.sendCommand("RPUSHX", key, value) - return r.readInteger() - -# Sets - -proc sadd*(r: Redis, key: string, member: string): RedisInteger = - ## Add a member to a set - r.sendCommand("SADD", key, member) - return r.readInteger() - -proc scard*(r: Redis, key: string): RedisInteger = - ## Get the number of members in a set - r.sendCommand("SCARD", key) - return r.readInteger() - -proc sdiff*(r: Redis, keys: varargs[string]): RedisList = - ## Subtract multiple sets - r.sendCommand("SDIFF", keys) - return r.readArray() - -proc sdiffstore*(r: Redis, destination: string, - keys: varargs[string]): RedisInteger = - ## Subtract multiple sets and store the resulting set in a key - r.sendCommand("SDIFFSTORE", destination, keys) - return r.readInteger() - -proc sinter*(r: Redis, keys: varargs[string]): RedisList = - ## Intersect multiple sets - r.sendCommand("SINTER", keys) - return r.readArray() - -proc sinterstore*(r: Redis, destination: string, - keys: varargs[string]): RedisInteger = - ## Intersect multiple sets and store the resulting set in a key - r.sendCommand("SINTERSTORE", destination, keys) - return r.readInteger() - -proc sismember*(r: Redis, key: string, member: string): RedisInteger = - ## Determine if a given value is a member of a set - r.sendCommand("SISMEMBER", key, member) - return r.readInteger() - -proc smembers*(r: Redis, key: string): RedisList = - ## Get all the members in a set - r.sendCommand("SMEMBERS", key) - return r.readArray() - -proc smove*(r: Redis, source: string, destination: string, - member: string): RedisInteger = - ## Move a member from one set to another - r.sendCommand("SMOVE", source, destination, member) - return r.readInteger() - -proc spop*(r: Redis, key: string): RedisString = - ## Remove and return a random member from a set - r.sendCommand("SPOP", key) - return r.readBulkString() - -proc srandmember*(r: Redis, key: string): RedisString = - ## Get a random member from a set - r.sendCommand("SRANDMEMBER", key) - return r.readBulkString() - -proc srem*(r: Redis, key: string, member: string): RedisInteger = - ## Remove a member from a set - r.sendCommand("SREM", key, member) - return r.readInteger() - -proc sunion*(r: Redis, keys: varargs[string]): RedisList = - ## Add multiple sets - r.sendCommand("SUNION", keys) - return r.readArray() - -proc sunionstore*(r: Redis, destination: string, - key: varargs[string]): RedisInteger = - ## Add multiple sets and store the resulting set in a key - r.sendCommand("SUNIONSTORE", destination, key) - return r.readInteger() - -# Sorted sets - -proc zadd*(r: Redis, key: string, score: int, member: string): RedisInteger = - ## Add a member to a sorted set, or update its score if it already exists - r.sendCommand("ZADD", key, $score, member) - return r.readInteger() - -proc zcard*(r: Redis, key: string): RedisInteger = - ## Get the number of members in a sorted set - r.sendCommand("ZCARD", key) - return r.readInteger() - -proc zcount*(r: Redis, key: string, min: string, max: string): RedisInteger = - ## Count the members in a sorted set with scores within the given values - r.sendCommand("ZCOUNT", key, min, max) - return r.readInteger() - -proc zincrby*(r: Redis, key: string, increment: string, - member: string): RedisString = - ## Increment the score of a member in a sorted set - r.sendCommand("ZINCRBY", key, increment, member) - return r.readBulkString() - -proc zinterstore*(r: Redis, destination: string, numkeys: string, - keys: openArray[string], weights: openArray[string] = [], - aggregate: string = ""): RedisInteger = - ## Intersect multiple sorted sets and store the resulting sorted set in - ## a new key - var args = @[destination, numkeys] - for i in items(keys): args.add(i) - - if weights.len != 0: - args.add("WITHSCORE") - for i in items(weights): args.add(i) - if aggregate.len != 0: - args.add("AGGREGATE") - args.add(aggregate) - - r.sendCommand("ZINTERSTORE", args) - - return r.readInteger() - -proc zrange*(r: Redis, key: string, start: string, stop: string, - withScores: bool): RedisList = - ## Return a range of members in a sorted set, by index - if not withScores: - r.sendCommand("ZRANGE", key, start, stop) - else: - r.sendCommand("ZRANGE", "WITHSCORES", key, start, stop) - return r.readArray() - -proc zrangebyscore*(r: Redis, key: string, min: string, max: string, - withScore: bool = false, limit: bool = false, - limitOffset: int = 0, limitCount: int = 0): RedisList = - ## Return a range of members in a sorted set, by score - var args = @[key, min, max] - - if withScore: args.add("WITHSCORE") - if limit: - args.add("LIMIT") - args.add($limitOffset) - args.add($limitCount) - - r.sendCommand("ZRANGEBYSCORE", args) - return r.readArray() - -proc zrank*(r: Redis, key: string, member: string): RedisString = - ## Determine the index of a member in a sorted set - r.sendCommand("ZRANK", key, member) - return r.readBulkString() - -proc zrem*(r: Redis, key: string, member: string): RedisInteger = - ## Remove a member from a sorted set - r.sendCommand("ZREM", key, member) - return r.readInteger() - -proc zremrangebyrank*(r: Redis, key: string, start: string, - stop: string): RedisInteger = - ## Remove all members in a sorted set within the given indexes - r.sendCommand("ZREMRANGEBYRANK", key, start, stop) - return r.readInteger() - -proc zremrangebyscore*(r: Redis, key: string, min: string, - max: string): RedisInteger = - ## Remove all members in a sorted set within the given scores - r.sendCommand("ZREMRANGEBYSCORE", key, min, max) - return r.readInteger() - -proc zrevrange*(r: Redis, key: string, start: string, stop: string, - withScore: bool): RedisList = - ## Return a range of members in a sorted set, by index, - ## with scores ordered from high to low - if withScore: - r.sendCommand("ZREVRANGE", "WITHSCORE", key, start, stop) - else: r.sendCommand("ZREVRANGE", key, start, stop) - return r.readArray() - -proc zrevrangebyscore*(r: Redis, key: string, min: string, max: string, - withScore: bool = false, limit: bool = false, - limitOffset: int = 0, limitCount: int = 0): RedisList = - ## Return a range of members in a sorted set, by score, with - ## scores ordered from high to low - var args = @[key, min, max] - - if withScore: args.add("WITHSCORE") - if limit: - args.add("LIMIT") - args.add($limitOffset) - args.add($limitCount) - - r.sendCommand("ZREVRANGEBYSCORE", args) - return r.readArray() - -proc zrevrank*(r: Redis, key: string, member: string): RedisString = - ## Determine the index of a member in a sorted set, with - ## scores ordered from high to low - r.sendCommand("ZREVRANK", key, member) - return r.readBulkString() - -proc zscore*(r: Redis, key: string, member: string): RedisString = - ## Get the score associated with the given member in a sorted set - r.sendCommand("ZSCORE", key, member) - return r.readBulkString() - -proc zunionstore*(r: Redis, destination: string, numkeys: string, - keys: openArray[string], weights: openArray[string] = [], - aggregate: string = ""): RedisInteger = - ## Add multiple sorted sets and store the resulting sorted set in a new key - var args = @[destination, numkeys] - for i in items(keys): args.add(i) - - if weights.len != 0: - args.add("WEIGHTS") - for i in items(weights): args.add(i) - if aggregate.len != 0: - args.add("AGGREGATE") - args.add(aggregate) - - r.sendCommand("ZUNIONSTORE", args) - - return r.readInteger() - -# HyperLogLog - -proc pfadd*(r: Redis, key: string, elements: varargs[string]): RedisInteger = - ## Add variable number of elements into special 'HyperLogLog' set type - r.sendCommand("PFADD", key, elements) - return r.readInteger() - -proc pfcount*(r: Redis, key: string): RedisInteger = - ## Count approximate number of elements in 'HyperLogLog' - r.sendCommand("PFCOUNT", key) - return r.readInteger() - -proc pfmerge*(r: Redis, destination: string, sources: varargs[string]) = - ## Merge several source HyperLogLog's into one specified by destKey - r.sendCommand("PFMERGE", destination, sources) - raiseNoOK(r.readStatus(), r.pipeline.enabled) - -# Pub/Sub - -# TODO: pub/sub -- I don't think this will work synchronously. -discard """ -proc psubscribe*(r: Redis, pattern: openarray[string]): ???? = - ## Listen for messages published to channels matching the given patterns - r.socket.send("PSUBSCRIBE $#\c\L" % pattern) - return ??? - -proc publish*(r: Redis, channel: string, message: string): RedisInteger = - ## Post a message to a channel - r.socket.send("PUBLISH $# $#\c\L" % [channel, message]) - return r.readInteger() - -proc punsubscribe*(r: Redis, [pattern: openarray[string], : string): ???? = - ## Stop listening for messages posted to channels matching the given patterns - r.socket.send("PUNSUBSCRIBE $# $#\c\L" % [[pattern.join(), ]) - return ??? - -proc subscribe*(r: Redis, channel: openarray[string]): ???? = - ## Listen for messages published to the given channels - r.socket.send("SUBSCRIBE $#\c\L" % channel.join) - return ??? - -proc unsubscribe*(r: Redis, [channel: openarray[string], : string): ???? = - ## Stop listening for messages posted to the given channels - r.socket.send("UNSUBSCRIBE $# $#\c\L" % [[channel.join(), ]) - return ??? - -""" - -# Transactions - -proc discardMulti*(r: Redis) = - ## Discard all commands issued after MULTI - r.sendCommand("DISCARD") - raiseNoOK(r.readStatus(), r.pipeline.enabled) - -proc exec*(r: Redis): RedisList = - ## Execute all commands issued after MULTI - r.sendCommand("EXEC") - r.pipeline.enabled = false - # Will reply with +OK for MULTI/EXEC and +QUEUED for every command - # between, then with the results - return r.flushPipeline(true) - - -proc multi*(r: Redis) = - ## Mark the start of a transaction block - r.startPipelining() - r.sendCommand("MULTI") - raiseNoOK(r.readStatus(), r.pipeline.enabled) - -proc unwatch*(r: Redis) = - ## Forget about all watched keys - r.sendCommand("UNWATCH") - raiseNoOK(r.readStatus(), r.pipeline.enabled) - -proc watch*(r: Redis, key: varargs[string]) = - ## Watch the given keys to determine execution of the MULTI/EXEC block - r.sendCommand("WATCH", key) - raiseNoOK(r.readStatus(), r.pipeline.enabled) - -# Connection - -proc auth*(r: Redis, password: string) = - ## Authenticate to the server - r.sendCommand("AUTH", password) - raiseNoOK(r.readStatus(), r.pipeline.enabled) - -proc echoServ*(r: Redis, message: string): RedisString = - ## Echo the given string - r.sendCommand("ECHO", message) - return r.readBulkString() - -proc ping*(r: Redis): RedisStatus = - ## Ping the server - r.sendCommand("PING") - return r.readStatus() - -proc quit*(r: Redis) = - ## Close the connection - r.sendCommand("QUIT") - raiseNoOK(r.readStatus(), r.pipeline.enabled) - -proc select*(r: Redis, index: int): RedisStatus = - ## Change the selected database for the current connection - r.sendCommand("SELECT", $index) - return r.readStatus() - -# Server - -proc bgrewriteaof*(r: Redis) = - ## Asynchronously rewrite the append-only file - r.sendCommand("BGREWRITEAOF") - raiseNoOK(r.readStatus(), r.pipeline.enabled) - -proc bgsave*(r: Redis) = - ## Asynchronously save the dataset to disk - r.sendCommand("BGSAVE") - raiseNoOK(r.readStatus(), r.pipeline.enabled) - -proc configGet*(r: Redis, parameter: string): RedisList = - ## Get the value of a configuration parameter - r.sendCommand("CONFIG", "GET", parameter) - return r.readArray() - -proc configSet*(r: Redis, parameter: string, value: string) = - ## Set a configuration parameter to the given value - r.sendCommand("CONFIG", "SET", parameter, value) - raiseNoOK(r.readStatus(), r.pipeline.enabled) - -proc configResetStat*(r: Redis) = - ## Reset the stats returned by INFO - r.sendCommand("CONFIG", "RESETSTAT") - raiseNoOK(r.readStatus(), r.pipeline.enabled) - -proc dbsize*(r: Redis): RedisInteger = - ## Return the number of keys in the selected database - r.sendCommand("DBSIZE") - return r.readInteger() - -proc debugObject*(r: Redis, key: string): RedisStatus = - ## Get debugging information about a key - r.sendCommand("DEBUG", "OBJECT", key) - return r.readStatus() - -proc debugSegfault*(r: Redis) = - ## Make the server crash - r.sendCommand("DEBUG", "SEGFAULT") - -proc flushall*(r: Redis): RedisStatus = - ## Remove all keys from all databases - r.sendCommand("FLUSHALL") - raiseNoOK(r.readStatus(), r.pipeline.enabled) - -proc flushdb*(r: Redis): RedisStatus = - ## Remove all keys from the current database - r.sendCommand("FLUSHDB") - raiseNoOK(r.readStatus(), r.pipeline.enabled) - -proc info*(r: Redis): RedisString = - ## Get information and statistics about the server - r.sendCommand("INFO") - return r.readBulkString() - -proc lastsave*(r: Redis): RedisInteger = - ## Get the UNIX time stamp of the last successful save to disk - r.sendCommand("LASTSAVE") - return r.readInteger() - -discard """ -proc monitor*(r: Redis) = - ## Listen for all requests received by the server in real time - r.socket.send("MONITOR\c\L") - raiseNoOK(r.readStatus(), r.pipeline.enabled) -""" - -proc save*(r: Redis) = - ## Synchronously save the dataset to disk - r.sendCommand("SAVE") - raiseNoOK(r.readStatus(), r.pipeline.enabled) - -proc shutdown*(r: Redis) = - ## Synchronously save the dataset to disk and then shut down the server - r.sendCommand("SHUTDOWN") - var s = "".TaintedString - r.socket.readLine(s) - if s.string.len != 0: raise newException(RedisError, s.string) - -proc slaveof*(r: Redis, host: string, port: string) = - ## Make the server a slave of another instance, or promote it as master - r.sendCommand("SLAVEOF", host, port) - raiseNoOK(r.readStatus(), r.pipeline.enabled) - -iterator hPairs*(r: Redis, key: string): tuple[key, value: string] = - ## Iterator for keys and values in a hash. - var - contents = r.hGetAll(key) - k = "" - for i in items(contents): - if k == "": - k = i - else: - yield (k, i) - k = "" - -proc someTests(r: Redis, how: SendMode):seq[string] = - var list:seq[string] = @[] - - if how == pipelined: - r.startPipelining() - elif how == multiple: - r.multi() - - r.setk("nim:test", "Testing something.") - r.setk("nim:utf8", "こんにちは") - r.setk("nim:esc", "\\ths ągt\\") - r.setk("nim:int", "1") - list.add(r.get("nim:esc")) - list.add($(r.incr("nim:int"))) - list.add(r.get("nim:int")) - list.add(r.get("nim:utf8")) - list.add($(r.hSet("test1", "name", "A Test"))) - var res = r.hGetAll("test1") - for r in res: - list.add(r) - list.add(r.get("invalid_key")) - list.add($(r.lPush("mylist","itema"))) - list.add($(r.lPush("mylist","itemb"))) - r.lTrim("mylist",0,1) - var p = r.lRange("mylist", 0, -1) - - for i in items(p): - if not isNil(i): - list.add(i) - - list.add(r.debugObject("mylist")) - - r.configSet("timeout", "299") - var g = r.configGet("timeout") - for i in items(g): - list.add(i) - - list.add(r.echoServ("BLAH")) - - case how - of normal: - return list - of pipelined: - return r.flushPipeline() - of multiple: - return r.exec() - -proc assertListsIdentical(listA, listB: seq[string]) = - assert(listA.len == listB.len) - var i = 0 - for item in listA: - assert(item == listB[i]) - i = i + 1 - -when not defined(testing) and isMainModule: - when false: - var r = open() - - # Test with no pipelining - var listNormal = r.someTests(normal) - - # Test with pipelining enabled - var listPipelined = r.someTests(pipelined) - assertListsIdentical(listNormal, listPipelined) - - # Test with multi/exec() (automatic pipelining) - var listMulti = r.someTests(multiple) - assertListsIdentical(listNormal, listMulti) diff --git a/lib/pure/strutils.nim b/lib/pure/strutils.nim index b61df6086..a446f85b4 100644 --- a/lib/pure/strutils.nim +++ b/lib/pure/strutils.nim @@ -1210,22 +1210,21 @@ proc unescape*(s: string, prefix = "\"", suffix = "\""): string {.noSideEffect, ## If `s` does not begin with ``prefix`` and end with ``suffix`` a ## ValueError exception will be raised. result = newStringOfCap(s.len) - var i = 0 + var i = prefix.len if not s.startsWith(prefix): raise newException(ValueError, "String does not start with a prefix of: " & prefix) - inc(i) while true: if i == s.len-suffix.len: break case s[i] of '\\': case s[i+1]: of 'x': - inc i + inc i, 2 var c: int - i += parseutils.parseHex(s, c, i) + i += parseutils.parseHex(s, c, i, maxLen=2) result.add(chr(c)) - inc(i, 2) + dec i, 2 of '\\': result.add('\\') of '\'': @@ -1281,7 +1280,7 @@ proc editDistance*(a, b: string): int {.noSideEffect, # another special case: if len1 == 1: - for j in s..len2-1: + for j in s..s+len2-1: if a[s] == b[j]: return len2 - 1 return len2 @@ -1344,8 +1343,8 @@ proc editDistance*(a, b: string): int {.noSideEffect, # floating point formating: - -proc c_sprintf(buf, frmt: cstring): cint {.header: "<stdio.h>", +when not defined(js): + proc c_sprintf(buf, frmt: cstring): cint {.header: "<stdio.h>", importc: "sprintf", varargs, noSideEffect.} type @@ -1370,29 +1369,44 @@ proc formatBiggestFloat*(f: BiggestFloat, format: FloatFormatMode = ffDefault, ## after the decimal point for Nim's ``biggestFloat`` type. ## ## If ``precision == 0``, it tries to format it nicely. - const floatFormatToChar: array[FloatFormatMode, char] = ['g', 'f', 'e'] - var - frmtstr {.noinit.}: array[0..5, char] - buf {.noinit.}: array[0..2500, char] - L: cint - frmtstr[0] = '%' - if precision > 0: - frmtstr[1] = '#' - frmtstr[2] = '.' - frmtstr[3] = '*' - frmtstr[4] = floatFormatToChar[format] - frmtstr[5] = '\0' - L = c_sprintf(buf, frmtstr, precision, f) + when defined(js): + var res: cstring + case format + of ffDefault: + {.emit: "`res` = `f`.toString();".} + of ffDecimal: + {.emit: "`res` = `f`.toFixed(`precision`);".} + of ffScientific: + {.emit: "`res` = `f`.toExponential(`precision`);".} + result = $res + for i in 0 ..< result.len: + # Depending on the locale either dot or comma is produced, + # but nothing else is possible: + if result[i] in {'.', ','}: result[i] = decimalsep else: - frmtstr[1] = floatFormatToChar[format] - frmtstr[2] = '\0' - L = c_sprintf(buf, frmtstr, f) - result = newString(L) - for i in 0 ..< L: - # Depending on the locale either dot or comma is produced, - # but nothing else is possible: - if buf[i] in {'.', ','}: result[i] = decimalsep - else: result[i] = buf[i] + const floatFormatToChar: array[FloatFormatMode, char] = ['g', 'f', 'e'] + var + frmtstr {.noinit.}: array[0..5, char] + buf {.noinit.}: array[0..2500, char] + L: cint + frmtstr[0] = '%' + if precision > 0: + frmtstr[1] = '#' + frmtstr[2] = '.' + frmtstr[3] = '*' + frmtstr[4] = floatFormatToChar[format] + frmtstr[5] = '\0' + L = c_sprintf(buf, frmtstr, precision, f) + else: + frmtstr[1] = floatFormatToChar[format] + frmtstr[2] = '\0' + L = c_sprintf(buf, frmtstr, f) + result = newString(L) + for i in 0 ..< L: + # Depending on the locale either dot or comma is produced, + # but nothing else is possible: + if buf[i] in {'.', ','}: result[i] = decimalsep + else: result[i] = buf[i] proc formatFloat*(f: float, format: FloatFormatMode = ffDefault, precision: range[0..32] = 16; decimalSep = '.'): string {. @@ -1706,3 +1720,4 @@ when isMainModule: doAssert isUpper("ABC") doAssert(not isUpper("AAcc")) doAssert(not isUpper("A#$")) + doAssert(unescape(r"\x013", "", "") == "\x013") diff --git a/lib/pure/times.nim b/lib/pure/times.nim index a478b9d65..03745d54e 100644 --- a/lib/pure/times.nim +++ b/lib/pure/times.nim @@ -29,7 +29,7 @@ ## echo "epochTime() float value: ", epochTime() ## echo "getTime() float value: ", toSeconds(getTime()) ## echo "cpuTime() float value: ", cpuTime() -## echo "An hour from now : ", getLocalTime(getTime()) + initInterval(0,0,0,1) +## echo "An hour from now : ", getLocalTime(getTime()) + 1.hours ## echo "An hour from (UTC) now: ", getGmTime(getTime()) + initInterval(0,0,0,1) {.push debugger:off.} # the user does not want to trace a part @@ -171,11 +171,6 @@ type {.deprecated: [TMonth: Month, TWeekDay: WeekDay, TTime: Time, TTimeInterval: TimeInterval, TTimeInfo: TimeInfo].} -proc miliseconds*(t: TimeInterval): int {.deprecated.} = t.milliseconds - -proc `miliseconds=`*(t:var TimeInterval, milliseconds: int) {.deprecated.} = - t.milliseconds = milliseconds - proc getTime*(): Time {.tags: [TimeEffect], benign.} ## gets the current calendar time as a UNIX epoch value (number of seconds ## elapsed since 1970) with integer precission. Use epochTime for higher @@ -245,13 +240,59 @@ proc getStartMilsecs*(): int {.deprecated, tags: [TimeEffect], benign.} proc initInterval*(milliseconds, seconds, minutes, hours, days, months, years: int = 0): TimeInterval = ## creates a new ``TimeInterval``. - result.milliseconds = milliseconds - result.seconds = seconds - result.minutes = minutes - result.hours = hours - result.days = days - result.months = months - result.years = years + ## + ## You can also use the convenience procedures called ``milliseconds``, + ## ``seconds``, ``minutes``, ``hours``, ``days``, ``months``, and ``years``. + ## + ## Example: + ## + ## .. code-block:: nim + ## + ## let day = initInterval(hours=24) + ## let tomorrow = getTime() + day + ## echo(tomorrow) + var carryO = 0 + result.milliseconds = `mod`(milliseconds, 1000) + carryO = `div`(milliseconds, 1000) + result.seconds = `mod`(carryO + seconds, 60) + carryO = `div`(seconds, 60) + result.minutes = `mod`(carryO + minutes, 60) + carryO = `div`(minutes, 60) + result.hours = `mod`(carryO + hours, 24) + carryO = `div`(hours, 24) + result.days = carryO + days + carryO = 0 + result.months = `mod`(months, 12) + carryO = `div`(months, 12) + result.years = carryO + years + +proc `+`*(ti1, ti2: TimeInterval): TimeInterval = + ## Adds two ``TimeInterval`` objects together. + var carryO = 0 + result.milliseconds = `mod`(ti1.milliseconds + ti2.milliseconds, 1000) + carryO = `div`(ti1.milliseconds + ti2.milliseconds, 1000) + result.seconds = `mod`(carryO + ti1.seconds + ti2.seconds, 60) + carryO = `div`(ti1.seconds + ti2.seconds, 60) + result.minutes = `mod`(carryO + ti1.minutes + ti2.minutes, 60) + carryO = `div`(ti1.minutes + ti2.minutes, 60) + result.hours = `mod`(carryO + ti1.hours + ti2.hours, 24) + carryO = `div`(ti1.hours + ti2.hours, 24) + result.days = carryO + ti1.days + ti2.days + carryO = 0 + result.months = `mod`(ti1.months + ti2.months, 12) + carryO = `div`(ti1.months + ti2.months, 12) + result.years = carryO + ti1.years + ti2.years + +proc `-`*(ti1, ti2: TimeInterval): TimeInterval = + ## Subtracts TimeInterval ``ti1`` from ``ti2``. + result = ti1 + result.milliseconds -= ti2.milliseconds + result.seconds -= ti2.seconds + result.minutes -= ti2.minutes + result.hours -= ti2.hours + result.days -= ti2.days + result.months -= ti2.months + result.years -= ti2.years proc isLeapYear*(year: int): bool = ## returns true if ``year`` is a leap year @@ -288,13 +329,22 @@ proc toSeconds(a: TimeInfo, interval: TimeInterval): float = newinterv.months += interval.years * 12 var curMonth = anew.month - for mth in 1 .. newinterv.months: - result += float(getDaysInMonth(curMonth, anew.year) * 24 * 60 * 60) - if curMonth == mDec: - curMonth = mJan - anew.year.inc() - else: - curMonth.inc() + if newinterv.months < 0: # subtracting + for mth in countDown(-1 * newinterv.months, 1): + result -= float(getDaysInMonth(curMonth, anew.year) * 24 * 60 * 60) + if curMonth == mJan: + curMonth = mDec + anew.year.dec() + else: + curMonth.dec() + else: # adding + for mth in 1 .. newinterv.months: + result += float(getDaysInMonth(curMonth, anew.year) * 24 * 60 * 60) + if curMonth == mDec: + curMonth = mJan + anew.year.inc() + else: + curMonth.inc() result += float(newinterv.days * 24 * 60 * 60) result += float(newinterv.hours * 60 * 60) result += float(newinterv.minutes * 60) @@ -302,28 +352,39 @@ proc toSeconds(a: TimeInfo, interval: TimeInterval): float = result += newinterv.milliseconds / 1000 proc `+`*(a: TimeInfo, interval: TimeInterval): TimeInfo = - ## adds ``interval`` time. + ## adds ``interval`` time from TimeInfo ``a``. ## ## **Note:** This has been only briefly tested and it may not be ## very accurate. let t = toSeconds(timeInfoToTime(a)) let secs = toSeconds(a, interval) - #if a.tzname == "UTC": - # result = getGMTime(fromSeconds(t + secs)) - #else: result = getLocalTime(fromSeconds(t + secs)) proc `-`*(a: TimeInfo, interval: TimeInterval): TimeInfo = - ## subtracts ``interval`` time. + ## subtracts ``interval`` time from TimeInfo ``a``. ## ## **Note:** This has been only briefly tested, it is inaccurate especially ## when you subtract so much that you reach the Julian calendar. let t = toSeconds(timeInfoToTime(a)) - let secs = toSeconds(a, interval) - #if a.tzname == "UTC": - # result = getGMTime(fromSeconds(t - secs)) - #else: - result = getLocalTime(fromSeconds(t - secs)) + var intval: TimeInterval + intval.milliseconds = - interval.milliseconds + intval.seconds = - interval.seconds + intval.minutes = - interval.minutes + intval.hours = - interval.hours + intval.days = - interval.days + intval.months = - interval.months + intval.years = - interval.years + let secs = toSeconds(a, intval) + result = getLocalTime(fromSeconds(t + secs)) + +proc miliseconds*(t: TimeInterval): int {.deprecated.} = t.milliseconds + +proc `miliseconds=`*(t: var TimeInterval, milliseconds: int) {.deprecated.} = + ## An alias for a misspelled field in ``TimeInterval``. + ## + ## **Warning:** This should not be used! It will be removed in the next + ## version. + t.milliseconds = milliseconds when not defined(JS): proc epochTime*(): float {.rtl, extern: "nt$1", tags: [TimeEffect].} @@ -603,6 +664,69 @@ proc `$`*(m: Month): string = "November", "December"] return lookup[m] +proc milliseconds*(ms: int): TimeInterval {.inline.} = + ## TimeInterval of `ms` milliseconds + ## + ## Note: not all time functions have millisecond resolution + initInterval(`mod`(ms,1000), `div`(ms,1000)) + +proc seconds*(s: int): TimeInterval {.inline.} = + ## TimeInterval of `s` seconds + ## + ## ``echo getTime() + 5.second`` + initInterval(0,`mod`(s,60), `div`(s,60)) + +proc minutes*(m: int): TimeInterval {.inline.} = + ## TimeInterval of `m` minutes + ## + ## ``echo getTime() + 5.minutes`` + initInterval(0,0,`mod`(m,60), `div`(m,60)) + +proc hours*(h: int): TimeInterval {.inline.} = + ## TimeInterval of `h` hours + ## + ## ``echo getTime() + 2.hours`` + initInterval(0,0,0,`mod`(h,24),`div`(h,24)) + +proc days*(d: int): TimeInterval {.inline.} = + ## TimeInterval of `d` days + ## + ## ``echo getTime() + 2.days`` + initInterval(0,0,0,0,d) + +proc months*(m: int): TimeInterval {.inline.} = + ## TimeInterval of `m` months + ## + ## ``echo getTime() + 2.months`` + initInterval(0,0,0,0,0,`mod`(m,12),`div`(m,12)) + +proc years*(y: int): TimeInterval {.inline.} = + ## TimeInterval of `y` years + ## + ## ``echo getTime() + 2.years`` + initInterval(0,0,0,0,0,0,y) + +proc `+=`*(t: var Time, ti: TimeInterval) = + ## modifies `t` by adding the interval `ti` + t = timeInfoToTime(getLocalTime(t) + ti) + +proc `+`*(t: Time, ti: TimeInterval): Time = + ## adds the interval `ti` to Time `t` + ## by converting to localTime, adding the interval, and converting back + ## + ## ``echo getTime() + 1.day`` + result = timeInfoToTime(getLocalTime(t) + ti) + +proc `-=`*(t: var Time, ti: TimeInterval) = + ## modifies `t` by subtracting the interval `ti` + t = timeInfoToTime(getLocalTime(t) - ti) + +proc `-`*(t: Time, ti: TimeInterval): Time = + ## adds the interval `ti` to Time `t` + ## + ## ``echo getTime() - 1.day`` + result = timeInfoToTime(getLocalTime(t) - ti) + proc formatToken(info: TimeInfo, token: string, buf: var string) = ## Helper of the format proc to parse individual tokens. ## @@ -1192,112 +1316,10 @@ proc timeToTimeInterval*(t: Time): TimeInterval = # Milliseconds not available from Time when isMainModule: - # $ date --date='@2147483647' - # Tue 19 Jan 03:14:07 GMT 2038 - - var t = getGMTime(fromSeconds(2147483647)) - assert t.format("ddd dd MMM hh:mm:ss ZZZ yyyy") == "Tue 19 Jan 03:14:07 UTC 2038" - assert t.format("ddd ddMMMhh:mm:ssZZZyyyy") == "Tue 19Jan03:14:07UTC2038" - - assert t.format("d dd ddd dddd h hh H HH m mm M MM MMM MMMM s" & - " ss t tt y yy yyy yyyy yyyyy z zz zzz ZZZ") == - "19 19 Tue Tuesday 3 03 3 03 14 14 1 01 Jan January 7 07 A AM 8 38 038 2038 02038 0 00 00:00 UTC" - - assert t.format("yyyyMMddhhmmss") == "20380119031407" - - var t2 = getGMTime(fromSeconds(160070789)) # Mon 27 Jan 16:06:29 GMT 1975 - assert t2.format("d dd ddd dddd h hh H HH m mm M MM MMM MMMM s" & - " ss t tt y yy yyy yyyy yyyyy z zz zzz ZZZ") == - "27 27 Mon Monday 4 04 16 16 6 06 1 01 Jan January 29 29 P PM 5 75 975 1975 01975 0 00 00:00 UTC" - - when not defined(JS): - when sizeof(Time) == 8: - var t3 = getGMTime(fromSeconds(889067643645)) # Fri 7 Jun 19:20:45 BST 30143 - assert t3.format("d dd ddd dddd h hh H HH m mm M MM MMM MMMM s" & - " ss t tt y yy yyy yyyy yyyyy z zz zzz ZZZ") == - "7 07 Fri Friday 6 06 18 18 20 20 6 06 Jun June 45 45 P PM 3 43 143 0143 30143 0 00 00:00 UTC" - assert t3.format(":,[]()-/") == ":,[]()-/" - - var t4 = getGMTime(fromSeconds(876124714)) # Mon 6 Oct 08:58:34 BST 1997 - assert t4.format("M MM MMM MMMM") == "10 10 Oct October" - - # Interval tests - assert((t4 - initInterval(years = 2)).format("yyyy") == "1995") - assert((t4 - initInterval(years = 7, minutes = 34, seconds = 24)).format("yyyy mm ss") == "1990 24 10") - - var s = "Tuesday at 09:04am on Dec 15, 2015" - var f = "dddd at hh:mmtt on MMM d, yyyy" - assert($s.parse(f) == "Tue Dec 15 09:04:00 2015") - # ANSIC = "Mon Jan _2 15:04:05 2006" - s = "Thu Jan 12 15:04:05 2006" - f = "ddd MMM dd HH:mm:ss yyyy" - assert($s.parse(f) == "Thu Jan 12 15:04:05 2006") - # UnixDate = "Mon Jan _2 15:04:05 MST 2006" - s = "Thu Jan 12 15:04:05 MST 2006" - f = "ddd MMM dd HH:mm:ss ZZZ yyyy" - assert($s.parse(f) == "Thu Jan 12 15:04:05 2006") - # RubyDate = "Mon Jan 02 15:04:05 -0700 2006" - s = "Thu Jan 12 15:04:05 -07:00 2006" - f = "ddd MMM dd HH:mm:ss zzz yyyy" - assert($s.parse(f) == "Thu Jan 12 15:04:05 2006") - # RFC822 = "02 Jan 06 15:04 MST" - s = "12 Jan 16 15:04 MST" - f = "dd MMM yy HH:mm ZZZ" - assert($s.parse(f) == "Tue Jan 12 15:04:00 2016") - # RFC822Z = "02 Jan 06 15:04 -0700" # RFC822 with numeric zone - s = "12 Jan 16 15:04 -07:00" - f = "dd MMM yy HH:mm zzz" - assert($s.parse(f) == "Tue Jan 12 15:04:00 2016") - # RFC850 = "Monday, 02-Jan-06 15:04:05 MST" - s = "Monday, 12-Jan-06 15:04:05 MST" - f = "dddd, dd-MMM-yy HH:mm:ss ZZZ" - assert($s.parse(f) == "Thu Jan 12 15:04:05 2006") - # RFC1123 = "Mon, 02 Jan 2006 15:04:05 MST" - s = "Thu, 12 Jan 2006 15:04:05 MST" - f = "ddd, dd MMM yyyy HH:mm:ss ZZZ" - assert($s.parse(f) == "Thu Jan 12 15:04:05 2006") - # RFC1123Z = "Mon, 02 Jan 2006 15:04:05 -0700" # RFC1123 with numeric zone - s = "Thu, 12 Jan 2006 15:04:05 -07:00" - f = "ddd, dd MMM yyyy HH:mm:ss zzz" - assert($s.parse(f) == "Thu Jan 12 15:04:05 2006") - # RFC3339 = "2006-01-02T15:04:05Z07:00" - s = "2006-01-12T15:04:05Z-07:00" - f = "yyyy-MM-ddTHH:mm:ssZzzz" - assert($s.parse(f) == "Thu Jan 12 15:04:05 2006") - f = "yyyy-MM-dd'T'HH:mm:ss'Z'zzz" - assert($s.parse(f) == "Thu Jan 12 15:04:05 2006") - # RFC3339Nano = "2006-01-02T15:04:05.999999999Z07:00" - s = "2006-01-12T15:04:05.999999999Z-07:00" - f = "yyyy-MM-ddTHH:mm:ss.999999999Zzzz" - assert($s.parse(f) == "Thu Jan 12 15:04:05 2006") - # Kitchen = "3:04PM" - s = "3:04PM" - f = "h:mmtt" - assert "15:04:00" in $s.parse(f) - when not defined(testing): - echo "Kitchen: " & $s.parse(f) - var ti = timeToTimeInfo(getTime()) - echo "Todays date after decoding: ", ti - var tint = timeToTimeInterval(getTime()) - echo "Todays date after decoding to interval: ", tint - # checking dayOfWeek matches known days - assert getDayOfWeek(21, 9, 1900) == dFri - assert getDayOfWeek(1, 1, 1970) == dThu - assert getDayOfWeek(21, 9, 1970) == dMon - assert getDayOfWeek(1, 1, 2000) == dSat - assert getDayOfWeek(1, 1, 2021) == dFri - # Julian tests - assert getDayOfWeekJulian(21, 9, 1900) == dFri - assert getDayOfWeekJulian(21, 9, 1970) == dMon - assert getDayOfWeekJulian(1, 1, 2000) == dSat - assert getDayOfWeekJulian(1, 1, 2021) == dFri - - # toSeconds tests with GM and Local timezones - #var t4 = getGMTime(fromSeconds(876124714)) # Mon 6 Oct 08:58:34 BST 1997 - var t4L = getLocalTime(fromSeconds(876124714)) - assert toSeconds(timeInfoToTime(t4L)) == 876124714 # fromSeconds is effectively "localTime" - assert toSeconds(timeInfoToTime(t4L)) + t4L.timezone.float == toSeconds(timeInfoToTime(t4)) - + # this is testing non-exported function + var + t4 = getGMTime(fromSeconds(876124714)) # Mon 6 Oct 08:58:34 BST 1997 + t4L = getLocalTime(fromSeconds(876124714)) assert toSeconds(t4, initInterval(seconds=0)) == 0.0 assert toSeconds(t4L, initInterval(milliseconds=1)) == toSeconds(t4, initInterval(milliseconds=1)) assert toSeconds(t4L, initInterval(seconds=1)) == toSeconds(t4, initInterval(seconds=1)) @@ -1307,12 +1329,5 @@ when isMainModule: assert toSeconds(t4L, initInterval(months=1)) == toSeconds(t4, initInterval(months=1)) assert toSeconds(t4L, initInterval(years=1)) == toSeconds(t4, initInterval(years=1)) - # adding intervals - var - a1L = toSeconds(timeInfoToTime(t4L + initInterval(hours = 1))) + t4L.timezone.float - a1G = toSeconds(timeInfoToTime(t4)) + 60.0 * 60.0 - assert a1L == a1G - # subtracting intervals - a1L = toSeconds(timeInfoToTime(t4L - initInterval(hours = 1))) + t4L.timezone.float - a1G = toSeconds(timeInfoToTime(t4)) - (60.0 * 60.0) - assert a1L == a1G + # Further tests are in tests/stdlib/ttime.nim + # koch test c stdlib diff --git a/lib/pure/unicode.nim b/lib/pure/unicode.nim index b059a7315..45f52eb7f 100644 --- a/lib/pure/unicode.nim +++ b/lib/pure/unicode.nim @@ -114,6 +114,7 @@ proc validateUtf8*(s: string): int = if ord(s[i]) <=% 127: inc(i) elif ord(s[i]) shr 5 == 0b110: + if ord(s[i]) < 0xc2: return i # Catch overlong ascii representations. if i+1 < L and ord(s[i+1]) shr 6 == 0b10: inc(i, 2) else: return i elif ord(s[i]) shr 4 == 0b1110: diff --git a/lib/pure/xmlparser.nim b/lib/pure/xmlparser.nim index 56b122000..2a2c3e1dd 100644 --- a/lib/pure/xmlparser.nim +++ b/lib/pure/xmlparser.nim @@ -96,7 +96,7 @@ proc parse(x: var XmlParser, errors: var seq[string]): XmlNode = next(x) of xmlEntity: ## &entity; - errors.add(errorMsg(x, "unknown entity: " & x.entityName)) + result = newEntity(x.entityName) next(x) of xmlEof: discard @@ -143,17 +143,24 @@ proc loadXml*(path: string): XmlNode = result = loadXml(path, errors) if errors.len > 0: raiseInvalidXml(errors) -when not defined(testing) and isMainModule: - import os +when isMainModule: + when not defined(testing): + import os - var errors: seq[string] = @[] - var x = loadXml(paramStr(1), errors) - for e in items(errors): echo e + var errors: seq[string] = @[] + var x = loadXml(paramStr(1), errors) + for e in items(errors): echo e - var f: File - if open(f, "xmltest.txt", fmWrite): - f.write($x) - f.close() + var f: File + if open(f, "xmltest.txt", fmWrite): + f.write($x) + f.close() + else: + quit("cannot write test.txt") else: - quit("cannot write test.txt") + block: # correctly parse ../../tests/testdata/doc1.xml + let filePath = "tests/testdata/doc1.xml" + var errors: seq[string] = @[] + var xml = loadXml(filePath, errors) + assert(errors.len == 0, "The file tests/testdata/doc1.xml should be parsed without errors.") diff --git a/lib/system.nim b/lib/system.nim index c5dd58c7b..bb8254364 100644 --- a/lib/system.nim +++ b/lib/system.nim @@ -232,8 +232,8 @@ proc low*[T](x: T): T {.magic: "Low", noSideEffect.} ## ## .. code-block:: nim ## var arr = [1,2,3,4,5,6,7] - ## high(arr) #=> 0 - ## high(2) #=> -9223372036854775808 + ## low(arr) #=> 0 + ## low(2) #=> -9223372036854775808 type range*{.magic: "Range".}[T] ## Generic type to construct range types. @@ -840,7 +840,7 @@ proc `div` *(x, y: int32): int32 {.magic: "DivI", noSideEffect.} ## 1 div 2 == 0 ## 2 div 2 == 1 ## 3 div 2 == 1 - ## 7 div 5 == 2 + ## 7 div 5 == 1 when defined(nimnomagic64): proc `div` *(x, y: int64): int64 {.magic: "DivI", noSideEffect.} @@ -2584,11 +2584,7 @@ when not defined(JS): #and not defined(nimscript): when hasAlloc: var - strDesc: TNimType - - strDesc.size = sizeof(string) - strDesc.kind = tyString - strDesc.flags = {ntfAcyclic} + strDesc = TNimType(size: sizeof(string), kind: tyString, flags: {ntfAcyclic}) when not defined(nimscript): include "system/ansi_c" diff --git a/lib/system/alloc.nim b/lib/system/alloc.nim index 3ebbc8c1e..b4462ed83 100644 --- a/lib/system/alloc.nim +++ b/lib/system/alloc.nim @@ -40,7 +40,7 @@ when defined(emscripten): MAP_PRIVATE = 2'i32 # Changes are private var MAP_ANONYMOUS {.importc: "MAP_ANONYMOUS", header: "<sys/mman.h>".}: cint - type + type PEmscriptenMMapBlock = ptr EmscriptenMMapBlock EmscriptenMMapBlock {.pure, inheritable.} = object realSize: int # size of previous chunk; for coalescing @@ -399,6 +399,9 @@ iterator allObjects(m: MemRegion): pointer {.inline.} = let c = cast[PBigChunk](c) yield addr(c.data) +proc iterToProc*(iter: typed, envType: typedesc; procName: untyped) {. + magic: "Plugin", compileTime.} + proc isCell(p: pointer): bool {.inline.} = result = cast[ptr FreeCell](p).zeroField >% 1 diff --git a/lib/system/cellsets.nim b/lib/system/cellsets.nim index bb5de6f42..776a2b7ec 100644 --- a/lib/system/cellsets.nim +++ b/lib/system/cellsets.nim @@ -201,6 +201,41 @@ iterator elements(t: CellSet): PCell {.inline.} = inc(i) r = r.next +when false: + type + CellSetIter = object + p: PPageDesc + i, w, j: int + + proc next(it: var CellSetIter): PCell = + while true: + while it.w != 0: # test all remaining bits for zero + if (it.w and 1) != 0: # the bit is set! + result = cast[PCell]((it.p.key shl PageShift) or + (it.i shl IntShift +% it.j) *% MemAlign) + + inc(it.j) + it.w = it.w shr 1 + return + else: + inc(it.j) + it.w = it.w shr 1 + # load next w: + if it.i >= high(it.p.bits): + it.i = 0 + it.j = 0 + it.p = it.p.next + if it.p == nil: return nil + else: + inc it.i + it.w = it.p.bits[i] + + proc init(it: var CellSetIter; t: CellSet): PCell = + it.p = t.head + it.i = -1 + it.w = 0 + result = it.next + iterator elementsExcept(t, s: CellSet): PCell {.inline.} = var r = t.head while r != nil: diff --git a/lib/system/dyncalls.nim b/lib/system/dyncalls.nim index 908aa551b..6dc8999d1 100644 --- a/lib/system/dyncalls.nim +++ b/lib/system/dyncalls.nim @@ -68,7 +68,10 @@ when defined(posix): proc nimLoadLibrary(path: string): LibHandle = result = dlopen(path, RTLD_NOW) - #c_fprintf(c_stdout, "%s\n", dlerror()) + when defined(nimDebugDlOpen): + let error = dlerror() + if error != nil: + c_fprintf(c_stdout, "%s\n", error) proc nimGetProcAddr(lib: LibHandle, name: cstring): ProcAddr = result = dlsym(lib, name) @@ -105,7 +108,12 @@ elif defined(windows) or defined(dos): proc nimGetProcAddr(lib: LibHandle, name: cstring): ProcAddr = result = getProcAddress(cast[THINSTANCE](lib), name) - if result == nil: procAddrError(name) + if result != nil: return + for i in countup(0, 50): + var decorated = "_" & $name & "@" & $(i * 4) + result = getProcAddress(cast[THINSTANCE](lib), cstring(decorated)) + if result != nil: return + procAddrError(name) else: {.error: "no implementation for dyncalls".} diff --git a/lib/system/gc.nim b/lib/system/gc.nim index 0c632aeb1..c25cf4606 100644 --- a/lib/system/gc.nim +++ b/lib/system/gc.nim @@ -558,7 +558,7 @@ proc growObj(old: pointer, newsize: int, gch: var GcHeap): pointer = # we split the old refcount in 2 parts. XXX This is still not entirely # correct if the pointer that receives growObj's result is on the stack. # A better fix would be to emit the location specific write barrier for - # 'growObj', but this is lost of more work and who knows what new problems + # 'growObj', but this is lots of more work and who knows what new problems # this would create. res.refcount = rcIncrement decRef(ol) diff --git a/lib/system/gc2.nim b/lib/system/gc2.nim index 4ca0d144f..e68a8586e 100644 --- a/lib/system/gc2.nim +++ b/lib/system/gc2.nim @@ -1,7 +1,7 @@ # # # Nim's Runtime Library -# (c) Copyright 2012 Andreas Rumpf +# (c) Copyright 2015 Andreas Rumpf # # See the file "copying.txt", included in this # distribution, for details about the copyright. @@ -9,13 +9,15 @@ # Garbage Collector # -# The basic algorithm is *Deferrent Reference Counting* with cycle detection. -# This is achieved by combining a Deutsch-Bobrow garbage collector -# together with Christoper's partial mark-sweep garbage collector. -# -# Special care has been taken to avoid recursion as far as possible to avoid -# stack overflows when traversing deep datastructures. It is well-suited -# for soft real time applications (like games). +# The basic algorithm is *Deferred Reference Counting* with an incremental mark +# and sweep GC to free cycles. It is hard realtime in that if you play +# according to its rules, no deadline will ever be missed. + +# XXX Ensure by smart color masking that the object is not in the ZCT. + +when defined(nimCoroutines): + import arch + {.push profiler:off.} const @@ -29,82 +31,36 @@ const when withRealTime and not declared(getTicks): include "system/timers" when defined(memProfiler): - proc nimProfile(requestedSize: int) - -const - rcShift = 6 # the reference count is shifted so we can use - # the least significat bits for additinal flags: - - rcAlive = 0b00000 # object is reachable. - # color *black* in the original paper - - rcCycleCandidate = 0b00001 # possible root of a cycle. *purple* - - rcDecRefApplied = 0b00010 # the first dec-ref phase of the - # collector was already applied to this - # object. *gray* - - rcMaybeDead = 0b00011 # this object is a candidate for deletion - # during the collect cycles algorithm. - # *white*. - - rcReallyDead = 0b00100 # this is proved to be garbage - - rcRetiredBuffer = 0b00101 # this is a seq or string buffer that - # was replaced by a resize operation. - # see growObj for details + proc nimProfile(requestedSize: int) {.benign.} - rcColorMask = RefCount(0b00111) - - rcZct = 0b01000 # already added to ZCT - rcInCycleRoots = 0b10000 # already buffered as cycle candidate - rcHasStackRef = 0b100000 # the object had a stack ref in the last - # cycle collection - - rcMarkBit = rcHasStackRef # this is currently used for leak detection - # when traceGC is on - - rcBufferedAnywhere = rcZct or rcInCycleRoots +type + ObjectSpaceIter = object + state: range[-1..0] - rcIncrement = 1 shl rcShift # don't touch the color bits +iterToProc(allObjects, ptr ObjectSpaceIter, allObjectsAsProc) const - NewObjectsAreCycleRoots = true - # the alternative is to use the old strategy of adding cycle roots - # in incRef (in the compiler itself, this doesn't change much) - - IncRefRemovesCandidates = false - # this is safe only if we can reliably track the fact that the object - # has stack references. This could be easily done by adding another bit - # to the refcount field and setting it up in unmarkStackAndRegisters. - # The bit must also be set for new objects that are not rc1 and it must be - # examined in the decref loop in collectCycles. - # XXX: not implemented yet as tests didn't show any improvement from this - - MarkingSkipsAcyclicObjects = true - # Acyclic objects can be safely ignored in the mark and scan phases, - # because they cannot contribute to the internal count. - # XXX: if we generate specialized `markCyclic` and `markAcyclic` - # procs we can further optimize this as there won't be need for any - # checks in the code - - MinimumStackMarking = false - # Try to scan only the user stack and ignore the part of the stack - # belonging to the GC itself. see setStackTop for further info. - # XXX: still has problems in release mode in the compiler itself. - # investigate how it affects growObj - - CollectCyclesStats = false - + rcIncrement = 0b1000 # so that lowest 3 bits are not touched + rcBlackOrig = 0b000 + rcWhiteOrig = 0b001 + rcGrey = 0b010 # traditional color for incremental mark&sweep + rcUnused = 0b011 + ZctFlag = 0b100 # in ZCT + rcShift = 3 # shift by rcShift to get the reference counter + colorMask = 0b011 type WalkOp = enum - waPush + waMarkGlobal, # part of the backup mark&sweep + waMarkGrey, + waZctDecRef #, waDebug - Finalizer {.compilerproc.} = proc (self: pointer) {.nimcall.} + Phase {.pure.} = enum + None, Marking, Sweeping + Finalizer {.compilerproc.} = proc (self: pointer) {.nimcall, benign.} # A ref type can have a finalizer that is called before the object's # storage is freed. - GcStat {.final, pure.} = object + GcStat = object stackScans: int # number of performed stack scans (for statistics) cycleCollections: int # number of performed full collections maxThreshold: int # max threshold that has been set @@ -113,134 +69,78 @@ type cycleTableSize: int # max entries in cycle table maxPause: int64 # max measured GC pause in nanoseconds - GcHeap {.final, pure.} = object # this contains the zero count and - # non-zero count table + GcStack = object + prev: ptr GcStack + next: ptr GcStack + starts: pointer + pos: pointer + maxStackSize: int + + GcHeap = object # this contains the zero count and + # non-zero count table + black: int # either 0 or 1. + stack: ptr GcStack stackBottom: pointer - stackTop: pointer + phase: Phase cycleThreshold: int + when useCellIds: + idGenerator: int zct: CellSeq # the zero count table decStack: CellSeq # cells in the stack that are to decref again - cycleRoots: CellSeq - tempStack: CellSeq # temporary stack for recursion elimination - freeStack: CellSeq # objects ready to be freed + greyStack: CellSeq recGcLock: int # prevent recursion via finalizers; no thread lock - cycleRootsTrimIdx: int # Trimming is a light-weight collection of the - # cycle roots table that uses a cheap linear scan - # to find only possitively dead objects. - # One strategy is to perform it only for new objects - # allocated between the invocations of collectZCT. - # This index indicates the start of the range of - # such new objects within the table. when withRealTime: maxPause: Nanos # max allowed pause in nanoseconds; active if > 0 region: MemRegion # garbage collected region stat: GcStat -{.deprecated: [TWalkOp: WalkOp, TFinalizer: Finalizer, TGcStat: GcStat, - TGcHeap: GcHeap].} + additionalRoots: CellSeq # dummy roots for GC_ref/unref + spaceIter: ObjectSpaceIter + var - gch* {.rtlThreadVar.}: GcHeap + gch {.rtlThreadVar.}: GcHeap when not defined(useNimRtl): instantiateForRegion(gch.region) -template acquire(gch: GcHeap) = - when hasThreadSupport and hasSharedHeap: - AcquireSys(HeapLock) - -template release(gch: GcHeap) = - when hasThreadSupport and hasSharedHeap: - releaseSys(HeapLock) - -template setColor(c: PCell, color) = - c.refcount = (c.refcount and not rcColorMask) or color - -template color(c: PCell): expr = - c.refcount and rcColorMask - -template isBitDown(c: PCell, bit): expr = - (c.refcount and bit) == 0 - -template isBitUp(c: PCell, bit): expr = - (c.refcount and bit) != 0 - -template setBit(c: PCell, bit): expr = - c.refcount = c.refcount or bit - -template isDead(c: Pcell): expr = - c.isBitUp(rcReallyDead) # also covers rcRetiredBuffer - -template clearBit(c: PCell, bit): expr = - c.refcount = c.refcount and (not RefCount(bit)) - -when debugGC: - var gcCollectionIdx = 0 - - proc colorStr(c: PCell): cstring = - let color = c.color - case color - of rcAlive: return "alive" - of rcMaybeDead: return "maybedead" - of rcCycleCandidate: return "candidate" - of rcDecRefApplied: return "marked" - of rcRetiredBuffer: return "retired" - of rcReallyDead: return "dead" - else: return "unknown?" - - proc inCycleRootsStr(c: PCell): cstring = - if c.isBitUp(rcInCycleRoots): result = "cycleroot" - else: result = "" - - proc inZctStr(c: PCell): cstring = - if c.isBitUp(rcZct): result = "zct" - else: result = "" - - proc writeCell*(msg: CString, c: PCell, force = false) = - var kind = -1 - if c.typ != nil: kind = ord(c.typ.kind) - when trackAllocationSource: - c_fprintf(c_stdout, "[GC %d] %s: %p %d rc=%ld %s %s %s from %s(%ld)\n", - gcCollectionIdx, - msg, c, kind, c.refcount shr rcShift, - c.colorStr, c.inCycleRootsStr, c.inZctStr, - c.filename, c.line) - else: - c_fprintf(c_stdout, "[GC] %s: %p %d rc=%ld\n", - msg, c, kind, c.refcount shr rcShift) - -proc addZCT(zct: var CellSeq, c: PCell) {.noinline.} = - if c.isBitDown(rcZct): - c.setBit rcZct - zct.add c - -template setStackTop(gch) = - # This must be called immediately after we enter the GC code - # to minimize the size of the scanned stack. The stack consumed - # by the GC procs may amount to 200-400 bytes depending on the - # build settings and this contributes to false-positives - # in the conservative stack marking - when MinimumStackMarking: - var stackTop {.volatile.}: pointer - gch.stackTop = addr(stackTop) - -template addCycleRoot(cycleRoots: var CellSeq, c: PCell) = - if c.color != rcCycleCandidate: - c.setColor rcCycleCandidate - - # the object may be buffered already. for example, consider: - # decref; incref; decref - if c.isBitDown(rcInCycleRoots): - c.setBit rcInCycleRoots - cycleRoots.add c +proc initGC() = + when not defined(useNimRtl): + when traceGC: + for i in low(CellState)..high(CellState): init(states[i]) + gch.cycleThreshold = InitialCycleThreshold + gch.stat.stackScans = 0 + gch.stat.cycleCollections = 0 + gch.stat.maxThreshold = 0 + gch.stat.maxStackSize = 0 + gch.stat.maxStackCells = 0 + gch.stat.cycleTableSize = 0 + # init the rt + init(gch.zct) + init(gch.decStack) + init(gch.additionalRoots) + init(gch.greyStack) + +template gcAssert(cond: bool, msg: string) = + when defined(useGcAssert): + if not cond: + echo "[GCASSERT] ", msg + GC_disable() + writeStackTrace() + quit 1 + +proc addZCT(s: var CellSeq, c: PCell) {.noinline.} = + if (c.refcount and ZctFlag) == 0: + c.refcount = c.refcount or ZctFlag + add(s, c) proc cellToUsr(cell: PCell): pointer {.inline.} = # convert object (=pointer to refcount) to pointer to userdata result = cast[pointer](cast[ByteAddress](cell)+%ByteAddress(sizeof(Cell))) -proc usrToCell*(usr: pointer): PCell {.inline.} = +proc usrToCell(usr: pointer): PCell {.inline.} = # convert pointer to userdata to object (=pointer to refcount) result = cast[PCell](cast[ByteAddress](usr)-%ByteAddress(sizeof(Cell))) -proc canbeCycleRoot(c: PCell): bool {.inline.} = +proc canBeCycleRoot(c: PCell): bool {.inline.} = result = ntfAcyclic notin c.typ.flags proc extGetCellType(c: pointer): PNimType {.compilerproc.} = @@ -254,14 +154,40 @@ proc internRefcount(p: pointer): int {.exportc: "getRefcount".} = when BitsPerPage mod (sizeof(int)*8) != 0: {.error: "(BitsPerPage mod BitsPerUnit) should be zero!".} +template color(c): expr = c.refCount and colorMask +template setColor(c, col) = + c.refcount = c.refcount and not colorMask or col + +proc writeCell(msg: cstring, c: PCell) = + var kind = -1 + if c.typ != nil: kind = ord(c.typ.kind) + when leakDetector: + c_fprintf(c_stdout, "[GC] %s: %p %d rc=%ld from %s(%ld)\n", + msg, c, kind, c.refcount shr rcShift, c.filename, c.line) + else: + c_fprintf(c_stdout, "[GC] %s: %p %d rc=%ld; color=%ld\n", + msg, c, kind, c.refcount shr rcShift, c.color) + +template gcTrace(cell, state: expr): stmt {.immediate.} = + when traceGC: traceCell(cell, state) + # forward declarations: -proc collectCT(gch: var GcHeap) -proc isOnStack*(p: pointer): bool {.noinline.} -proc forAllChildren(cell: PCell, op: WalkOp) -proc doOperation(p: pointer, op: WalkOp) -proc forAllChildrenAux(dest: pointer, mt: PNimType, op: WalkOp) +proc collectCT(gch: var GcHeap) {.benign.} +proc isOnStack(p: pointer): bool {.noinline, benign.} +proc forAllChildren(cell: PCell, op: WalkOp) {.benign.} +proc doOperation(p: pointer, op: WalkOp) {.benign.} +proc forAllChildrenAux(dest: pointer, mt: PNimType, op: WalkOp) {.benign.} # we need the prototype here for debugging purposes +when hasThreadSupport and hasSharedHeap: + template `--`(x: expr): expr = atomicDec(x, rcIncrement) <% rcIncrement + template `++`(x: expr): stmt = discard atomicInc(x, rcIncrement) +else: + template `--`(x: expr): expr = + dec(x, rcIncrement) + x <% rcIncrement + template `++`(x: expr): stmt = inc(x, rcIncrement) + proc prepareDealloc(cell: PCell) = if cell.typ.finalizer != nil: # the finalizer could invoke something that @@ -273,246 +199,127 @@ proc prepareDealloc(cell: PCell) = (cast[Finalizer](cell.typ.finalizer))(cellToUsr(cell)) dec(gch.recGcLock) -when traceGC: - # traceGC is a special switch to enable extensive debugging - type - CellState = enum - csAllocated, csFreed - {.deprecated: [TCellState: CellState].} - var - states: array[CellState, CellSet] - - proc traceCell(c: PCell, state: CellState) = - case state - of csAllocated: - if c in states[csAllocated]: - writeCell("attempt to alloc an already allocated cell", c) - sysAssert(false, "traceCell 1") - excl(states[csFreed], c) - # writecell("allocated", c) - of csFreed: - if c in states[csFreed]: - writeCell("attempt to free a cell twice", c) - sysAssert(false, "traceCell 2") - if c notin states[csAllocated]: - writeCell("attempt to free not an allocated cell", c) - sysAssert(false, "traceCell 3") - excl(states[csAllocated], c) - # writecell("freed", c) - incl(states[state], c) - - proc computeCellWeight(c: PCell): int = - var x: CellSet - x.init - - let startLen = gch.tempStack.len - c.forAllChildren waPush - - while startLen != gch.tempStack.len: - dec gch.tempStack.len - var c = gch.tempStack.d[gch.tempStack.len] - if c in states[csFreed]: continue - inc result - if c notin x: - x.incl c - c.forAllChildren waPush - - template markChildrenRec(cell) = - let startLen = gch.tempStack.len - cell.forAllChildren waPush - let isMarked = cell.isBitUp(rcMarkBit) - while startLen != gch.tempStack.len: - dec gch.tempStack.len - var c = gch.tempStack.d[gch.tempStack.len] - if c in states[csFreed]: continue - if c.isBitDown(rcMarkBit): - c.setBit rcMarkBit - c.forAllChildren waPush - if c.isBitUp(rcMarkBit) and not isMarked: - writecell("cyclic cell", cell) - cprintf "Weight %d\n", cell.computeCellWeight - - proc writeLeakage(onlyRoots: bool) = - if onlyRoots: - for c in elements(states[csAllocated]): - if c notin states[csFreed]: - markChildrenRec(c) - var f = 0 - var a = 0 - for c in elements(states[csAllocated]): - inc a - if c in states[csFreed]: inc f - elif c.isBitDown(rcMarkBit): - writeCell("leak", c) - cprintf "Weight %d\n", c.computeCellWeight - cfprintf(cstdout, "Allocations: %ld; freed: %ld\n", a, f) - -template gcTrace(cell, state: expr): stmt {.immediate.} = - when logGC: writeCell($state, cell) - when traceGC: traceCell(cell, state) - -template WithHeapLock(blk: stmt): stmt = - when hasThreadSupport and hasSharedHeap: AcquireSys(HeapLock) - blk - when hasThreadSupport and hasSharedHeap: ReleaseSys(HeapLock) - proc rtlAddCycleRoot(c: PCell) {.rtl, inl.} = # we MUST access gch as a global here, because this crosses DLL boundaries! - WithHeapLock: addCycleRoot(gch.cycleRoots, c) + discard proc rtlAddZCT(c: PCell) {.rtl, inl.} = # we MUST access gch as a global here, because this crosses DLL boundaries! - WithHeapLock: addZCT(gch.zct, c) + addZCT(gch.zct, c) -type - CyclicMode = enum - Cyclic, - Acyclic, - MaybeCyclic - - ReleaseType = enum - AddToZTC - FreeImmediately - - HeapType = enum - LocalHeap - SharedHeap -{.deprecated: [TCyclicMode: CyclicMode, TReleaseType: ReleaseType, - THeapType: HeapType].} - -template `++` (rc: RefCount, heapType: HeapType): stmt = - when heapType == SharedHeap: - discard atomicInc(rc, rcIncrement) - else: - inc rc, rcIncrement - -template `--`(rc: RefCount): expr = - dec rc, rcIncrement - rc <% rcIncrement - -template `--` (rc: RefCount, heapType: HeapType): expr = - (when heapType == SharedHeap: atomicDec(rc, rcIncrement) <% rcIncrement else: --rc) - -template doDecRef(cc: PCell, - heapType = LocalHeap, - cycleFlag = MaybeCyclic): stmt = - var c = cc - sysAssert(isAllocatedPtr(gch.region, c), "decRef: interiorPtr") - # XXX: move this elesewhere - - sysAssert(c.refcount >=% rcIncrement, "decRef") - if c.refcount--(heapType): - # this is the last reference from the heap - # add to a zero-count-table that will be matched against stack pointers +proc decRef(c: PCell) {.inline.} = + gcAssert(isAllocatedPtr(gch.region, c), "decRef: interiorPtr") + gcAssert(c.refcount >=% rcIncrement, "decRef") + if --c.refcount: rtlAddZCT(c) - else: - when cycleFlag != Acyclic: - if cycleFlag == Cyclic or canBeCycleRoot(c): - # a cycle may have been broken - rtlAddCycleRoot(c) - -template doIncRef(cc: PCell, - heapType = LocalHeap, - cycleFlag = MaybeCyclic): stmt = - var c = cc - c.refcount++(heapType) - when cycleFlag != Acyclic: - when NewObjectsAreCycleRoots: - if canbeCycleRoot(c): - addCycleRoot(gch.cycleRoots, c) - elif IncRefRemovesCandidates: - c.setColor rcAlive - # XXX: this is not really atomic enough! - -proc nimGCref(p: pointer) {.compilerProc, inline.} = doIncRef(usrToCell(p)) -proc nimGCunref(p: pointer) {.compilerProc, inline.} = doDecRef(usrToCell(p)) + +proc incRef(c: PCell) {.inline.} = + gcAssert(isAllocatedPtr(gch.region, c), "incRef: interiorPtr") + c.refcount = c.refcount +% rcIncrement + +proc nimGCref(p: pointer) {.compilerProc.} = + let cell = usrToCell(p) + incRef(cell) + add(gch.additionalRoots, cell) + +proc nimGCunref(p: pointer) {.compilerProc.} = + let cell = usrToCell(p) + decRef(cell) + var L = gch.additionalRoots.len-1 + var i = L + let d = gch.additionalRoots.d + while i >= 0: + if d[i] == cell: + d[i] = d[L] + dec gch.additionalRoots.len + break + dec(i) + +template markGrey(x: PCell) = + if x.color == 1-gch.black and gch.phase == Phase.Marking: + x.setColor(rcGrey) + add(gch.greyStack, x) + +proc GC_addCycleRoot*[T](p: ref T) {.inline.} = + ## adds 'p' to the cycle candidate set for the cycle collector. It is + ## necessary if you used the 'acyclic' pragma for optimization + ## purposes and need to break cycles manually. + rtlAddCycleRoot(usrToCell(cast[pointer](p))) proc nimGCunrefNoCycle(p: pointer) {.compilerProc, inline.} = sysAssert(allocInv(gch.region), "begin nimGCunrefNoCycle") var c = usrToCell(p) - sysAssert(isAllocatedPtr(gch.region, c), "nimGCunrefNoCycle: isAllocatedPtr") - if c.refcount--(LocalHeap): + gcAssert(isAllocatedPtr(gch.region, c), "nimGCunrefNoCycle: isAllocatedPtr") + if --c.refcount: rtlAddZCT(c) sysAssert(allocInv(gch.region), "end nimGCunrefNoCycle 2") sysAssert(allocInv(gch.region), "end nimGCunrefNoCycle 5") -template doAsgnRef(dest: PPointer, src: pointer, - heapType = LocalHeap, cycleFlag = MaybeCyclic): stmt = - sysAssert(not isOnStack(dest), "asgnRef") - # BUGFIX: first incRef then decRef! - if src != nil: doIncRef(usrToCell(src), heapType, cycleFlag) - if dest[] != nil: doDecRef(usrToCell(dest[]), heapType, cycleFlag) - dest[] = src - proc asgnRef(dest: PPointer, src: pointer) {.compilerProc, inline.} = # the code generator calls this proc! - doAsgnRef(dest, src, LocalHeap, MaybeCyclic) + gcAssert(not isOnStack(dest), "asgnRef") + # BUGFIX: first incRef then decRef! + if src != nil: + let s = usrToCell(src) + incRef(s) + markGrey(s) + if dest[] != nil: decRef(usrToCell(dest[])) + dest[] = src proc asgnRefNoCycle(dest: PPointer, src: pointer) {.compilerProc, inline.} = # the code generator calls this proc if it is known at compile time that no # cycle is possible. - doAsgnRef(dest, src, LocalHeap, Acyclic) + gcAssert(not isOnStack(dest), "asgnRefNoCycle") + if src != nil: + var c = usrToCell(src) + ++c.refcount + markGrey(c) + if dest[] != nil: + var c = usrToCell(dest[]) + if --c.refcount: + rtlAddZCT(c) + dest[] = src proc unsureAsgnRef(dest: PPointer, src: pointer) {.compilerProc.} = # unsureAsgnRef updates the reference counters only if dest is not on the # stack. It is used by the code generator if it cannot decide wether a # reference is in the stack or not (this can happen for var parameters). if not isOnStack(dest): - if src != nil: doIncRef(usrToCell(src)) - # XXX we must detect a shared heap here - # better idea may be to just eliminate the need for unsureAsgnRef - # + if src != nil: + let s = usrToCell(src) + incRef(s) + markGrey(s) # XXX finally use assembler for the stack checking instead! # the test for '!= nil' is correct, but I got tired of the segfaults # resulting from the crappy stack checking: - if cast[int](dest[]) >=% PageSize: doDecRef(usrToCell(dest[])) + if cast[int](dest[]) >=% PageSize: decRef(usrToCell(dest[])) else: # can't be an interior pointer if it's a stack location! - sysAssert(interiorAllocatedPtr(gch.region, dest)==nil, - "stack loc AND interior pointer") + gcAssert(interiorAllocatedPtr(gch.region, dest) == nil, + "stack loc AND interior pointer") dest[] = src -when hasThreadSupport and hasSharedHeap: - # shared heap version of the above procs - proc asgnRefSh(dest: PPointer, src: pointer) {.compilerProc, inline.} = - doAsgnRef(dest, src, SharedHeap, MaybeCyclic) - - proc asgnRefNoCycleSh(dest: PPointer, src: pointer) {.compilerProc, inline.} = - doAsgnRef(dest, src, SharedHeap, Acyclic) +type + GlobalMarkerProc = proc () {.nimcall, benign.} +var + globalMarkersLen: int + globalMarkers: array[0.. 7_000, GlobalMarkerProc] -proc initGC() = - when not defined(useNimRtl): - when traceGC: - for i in low(CellState)..high(CellState): init(states[i]) - gch.cycleThreshold = InitialCycleThreshold - gch.stat.stackScans = 0 - gch.stat.cycleCollections = 0 - gch.stat.maxThreshold = 0 - gch.stat.maxStackSize = 0 - gch.stat.maxStackCells = 0 - gch.stat.cycleTableSize = 0 - # init the rt - init(gch.zct) - init(gch.tempStack) - init(gch.freeStack) - init(gch.cycleRoots) - init(gch.decStack) +proc nimRegisterGlobalMarker(markerProc: GlobalMarkerProc) {.compilerProc.} = + if globalMarkersLen <= high(globalMarkers): + globalMarkers[globalMarkersLen] = markerProc + inc globalMarkersLen + else: + echo "[GC] cannot register global variable; too many global variables" + quit 1 -proc forAllSlotsAux(dest: pointer, n: ptr TNimNode, op: WalkOp) = +proc forAllSlotsAux(dest: pointer, n: ptr TNimNode, op: WalkOp) {.benign.} = var d = cast[ByteAddress](dest) case n.kind of nkSlot: forAllChildrenAux(cast[pointer](d +% n.offset), n.typ, op) of nkList: for i in 0..n.len-1: - # inlined for speed - if n.sons[i].kind == nkSlot: - if n.sons[i].typ.kind in {tyRef, tyString, tySequence}: - doOperation(cast[PPointer](d +% n.sons[i].offset)[], op) - else: - forAllChildrenAux(cast[pointer](d +% n.sons[i].offset), - n.sons[i].typ, op) - else: - forAllSlotsAux(dest, n.sons[i], op) + forAllSlotsAux(dest, n.sons[i], op) of nkCase: var m = selectBranch(dest, n) if m != nil: forAllSlotsAux(dest, m, op) @@ -533,9 +340,10 @@ proc forAllChildrenAux(dest: pointer, mt: PNimType, op: WalkOp) = else: discard proc forAllChildren(cell: PCell, op: WalkOp) = - sysAssert(cell != nil, "forAllChildren: 1") - sysAssert(cell.typ != nil, "forAllChildren: 2") - sysAssert cell.typ.kind in {tyRef, tySequence, tyString}, "forAllChildren: 3" + gcAssert(cell != nil, "forAllChildren: 1") + gcAssert(isAllocatedPtr(gch.region, cell), "forAllChildren: 2") + gcAssert(cell.typ != nil, "forAllChildren: 3") + gcAssert cell.typ.kind in {tyRef, tySequence, tyString}, "forAllChildren: 4" let marker = cell.typ.marker if marker != nil: marker(cellToUsr(cell), op.int) @@ -547,10 +355,9 @@ proc forAllChildren(cell: PCell, op: WalkOp) = var d = cast[ByteAddress](cellToUsr(cell)) var s = cast[PGenericSeq](d) if s != nil: - let baseAddr = d +% GenericSeqSize for i in 0..s.len-1: - forAllChildrenAux(cast[pointer](baseAddr +% i *% cell.typ.base.size), - cell.typ.base, op) + forAllChildrenAux(cast[pointer](d +% i *% cell.typ.base.size +% + GenericSeqSize), cell.typ.base, op) else: discard proc addNewObjToZCT(res: PCell, gch: var GcHeap) {.inline.} = @@ -571,7 +378,7 @@ proc addNewObjToZCT(res: PCell, gch: var GcHeap) {.inline.} = template replaceZctEntry(i: expr) = c = d[i] if c.refcount >=% rcIncrement: - c.clearBit(rcZct) + c.refcount = c.refcount and not ZctFlag d[i] = res return if L > 8: @@ -592,408 +399,335 @@ proc addNewObjToZCT(res: PCell, gch: var GcHeap) {.inline.} = for i in countdown(L-1, max(0, L-8)): var c = d[i] if c.refcount >=% rcIncrement: - c.clearBit(rcZct) + c.refcount = c.refcount and not ZctFlag d[i] = res return add(gch.zct, res) -proc rawNewObj(typ: PNimType, size: int, gch: var GcHeap, rc1 = false): pointer = +{.push stackTrace: off, profiler:off.} +proc gcInvariant*() = + sysAssert(allocInv(gch.region), "injected") + when declared(markForDebug): + markForDebug(gch) +{.pop.} + +proc rawNewObj(typ: PNimType, size: int, gch: var GcHeap): pointer = # generates a new object and sets its reference counter to 0 - acquire(gch) sysAssert(allocInv(gch.region), "rawNewObj begin") - sysAssert(typ.kind in {tyRef, tyString, tySequence}, "newObj: 1") - + gcAssert(typ.kind in {tyRef, tyString, tySequence}, "newObj: 1") collectCT(gch) - sysAssert(allocInv(gch.region), "rawNewObj after collect") - var res = cast[PCell](rawAlloc(gch.region, size + sizeof(Cell))) - sysAssert(allocInv(gch.region), "rawNewObj after rawAlloc") - - sysAssert((cast[ByteAddress](res) and (MemAlign-1)) == 0, "newObj: 2") - + gcAssert((cast[ByteAddress](res) and (MemAlign-1)) == 0, "newObj: 2") + # now it is buffered in the ZCT res.typ = typ - - when trackAllocationSource and not hasThreadSupport: - if framePtr != nil and framePtr.prev != nil and framePtr.prev.prev != nil: - res.filename = framePtr.prev.prev.filename - res.line = framePtr.prev.prev.line - else: - res.filename = "nofile" - - if rc1: - res.refcount = rcIncrement # refcount is 1 - else: - # its refcount is zero, so add it to the ZCT: - res.refcount = rcZct - addNewObjToZCT(res, gch) - - if NewObjectsAreCycleRoots and canBeCycleRoot(res): - res.setBit(rcInCycleRoots) - res.setColor rcCycleCandidate - gch.cycleRoots.add res - + when leakDetector and not hasThreadSupport: + if framePtr != nil and framePtr.prev != nil: + res.filename = framePtr.prev.filename + res.line = framePtr.prev.line + # refcount is zero, color is black, but mark it to be in the ZCT + res.refcount = ZctFlag or gch.black sysAssert(isAllocatedPtr(gch.region, res), "newObj: 3") - + # its refcount is zero, so add it to the ZCT: + addNewObjToZCT(res, gch) when logGC: writeCell("new cell", res) gcTrace(res, csAllocated) - release(gch) + when useCellIds: + inc gch.idGenerator + res.id = gch.idGenerator result = cellToUsr(res) sysAssert(allocInv(gch.region), "rawNewObj end") {.pop.} -proc freeCell(gch: var GcHeap, c: PCell) = - # prepareDealloc(c) - gcTrace(c, csFreed) - - when reallyDealloc: rawDealloc(gch.region, c) - else: - sysAssert(c.typ != nil, "collectCycles") - zeroMem(c, sizeof(Cell)) - -template eraseAt(cells: var CellSeq, at: int): stmt = - cells.d[at] = cells.d[cells.len - 1] - dec cells.len - -template trimAt(roots: var CellSeq, at: int): stmt = - # This will remove a cycle root candidate during trimming. - # a candidate is removed either because it received a refup and - # it's no longer a candidate or because it received further refdowns - # and now it's dead for sure. - let c = roots.d[at] - c.clearBit(rcInCycleRoots) - roots.eraseAt(at) - if c.isBitUp(rcReallyDead) and c.refcount <% rcIncrement: - # This case covers both dead objects and retired buffers - # That's why we must also check the refcount (it may be - # kept possitive by stack references). - freeCell(gch, c) +proc newObjNoInit(typ: PNimType, size: int): pointer {.compilerRtl.} = + result = rawNewObj(typ, size, gch) + when defined(memProfiler): nimProfile(size) proc newObj(typ: PNimType, size: int): pointer {.compilerRtl.} = - setStackTop(gch) - result = rawNewObj(typ, size, gch, false) + result = rawNewObj(typ, size, gch) zeroMem(result, size) when defined(memProfiler): nimProfile(size) -proc newObjNoInit(typ: PNimType, size: int): pointer {.compilerRtl.} = - setStackTop(gch) - result = rawNewObj(typ, size, gch, false) - when defined(memProfiler): nimProfile(size) - proc newSeq(typ: PNimType, len: int): pointer {.compilerRtl.} = - setStackTop(gch) # `newObj` already uses locks, so no need for them here. let size = addInt(mulInt(len, typ.base.size), GenericSeqSize) result = newObj(typ, size) cast[PGenericSeq](result).len = len cast[PGenericSeq](result).reserved = len + when defined(memProfiler): nimProfile(size) proc newObjRC1(typ: PNimType, size: int): pointer {.compilerRtl.} = - setStackTop(gch) - result = rawNewObj(typ, size, gch, true) + # generates a new object and sets its reference counter to 1 + sysAssert(allocInv(gch.region), "newObjRC1 begin") + gcAssert(typ.kind in {tyRef, tyString, tySequence}, "newObj: 1") + collectCT(gch) + sysAssert(allocInv(gch.region), "newObjRC1 after collectCT") + + var res = cast[PCell](rawAlloc(gch.region, size + sizeof(Cell))) + sysAssert(allocInv(gch.region), "newObjRC1 after rawAlloc") + sysAssert((cast[ByteAddress](res) and (MemAlign-1)) == 0, "newObj: 2") + # now it is buffered in the ZCT + res.typ = typ + when leakDetector and not hasThreadSupport: + if framePtr != nil and framePtr.prev != nil: + res.filename = framePtr.prev.filename + res.line = framePtr.prev.line + res.refcount = rcIncrement or gch.black # refcount is 1 + sysAssert(isAllocatedPtr(gch.region, res), "newObj: 3") + when logGC: writeCell("new cell", res) + gcTrace(res, csAllocated) + when useCellIds: + inc gch.idGenerator + res.id = gch.idGenerator + result = cellToUsr(res) + zeroMem(result, size) + sysAssert(allocInv(gch.region), "newObjRC1 end") when defined(memProfiler): nimProfile(size) proc newSeqRC1(typ: PNimType, len: int): pointer {.compilerRtl.} = - setStackTop(gch) let size = addInt(mulInt(len, typ.base.size), GenericSeqSize) result = newObjRC1(typ, size) cast[PGenericSeq](result).len = len cast[PGenericSeq](result).reserved = len + when defined(memProfiler): nimProfile(size) proc growObj(old: pointer, newsize: int, gch: var GcHeap): pointer = - acquire(gch) collectCT(gch) var ol = usrToCell(old) + gcAssert(isAllocatedPtr(gch.region, ol), "growObj: freed pointer?") + sysAssert(ol.typ != nil, "growObj: 1") - sysAssert(ol.typ.kind in {tyString, tySequence}, "growObj: 2") + gcAssert(ol.typ.kind in {tyString, tySequence}, "growObj: 2") sysAssert(allocInv(gch.region), "growObj begin") var res = cast[PCell](rawAlloc(gch.region, newsize + sizeof(Cell))) - var elemSize = if ol.typ.kind != tyString: ol.typ.base.size - else: 1 + var elemSize = 1 + if ol.typ.kind != tyString: elemSize = ol.typ.base.size - var oldsize = cast[PGenericSeq](old).len*elemSize + GenericSeqSize - - # XXX: This should happen outside - # call user-defined move code - # call user-defined default constructor + let oldsize = cast[PGenericSeq](old).len*elemSize + GenericSeqSize copyMem(res, ol, oldsize + sizeof(Cell)) - zeroMem(cast[pointer](cast[ByteAddress](res)+% oldsize +% sizeof(Cell)), + zeroMem(cast[pointer](cast[ByteAddress](res) +% oldsize +% sizeof(Cell)), newsize-oldsize) - sysAssert((cast[ByteAddress](res) and (MemAlign-1)) == 0, "growObj: 3") - sysAssert(res.refcount shr rcShift <=% 1, "growObj: 4") - - when false: - if ol.isBitUp(rcZct): - var j = gch.zct.len-1 - var d = gch.zct.d - while j >= 0: - if d[j] == ol: - d[j] = res - break - dec(j) - - if ol.isBitUp(rcInCycleRoots): - for i in 0 .. <gch.cycleRoots.len: - if gch.cycleRoots.d[i] == ol: - eraseAt(gch.cycleRoots, i) - - freeCell(gch, ol) - - else: - # the new buffer inherits the GC state of the old one - if res.isBitUp(rcZct): gch.zct.add res - if res.isBitUp(rcInCycleRoots): gch.cycleRoots.add res - - # Pay attention to what's going on here! We're not releasing the old memory. - # This is because at this point there may be an interior pointer pointing - # into this buffer somewhere on the stack (due to `var` parameters now and - # and `let` and `var:var` stack locations in the future). - # We'll release the memory in the next GC cycle. If we release it here, - # we cannot guarantee that no memory will be corrupted when only safe - # language features are used. Accessing the memory after the seq/string - # has been invalidated may still result in logic errors in the user code. - # We may improve on that by protecting the page in debug builds or - # by providing a warning when we detect a stack pointer into it. - let bufferFlags = ol.refcount and rcBufferedAnywhere - if bufferFlags == 0: - # we need this in order to collect it safely later - ol.refcount = rcRetiredBuffer or rcZct - gch.zct.add ol - else: - ol.refcount = rcRetiredBuffer or bufferFlags - - when logGC: - writeCell("growObj old cell", ol) - writeCell("growObj new cell", res) - + # This can be wrong for intermediate temps that are nevertheless on the + # heap because of lambda lifting: + #gcAssert(res.refcount shr rcShift <=% 1, "growObj: 4") + when logGC: + writeCell("growObj old cell", ol) + writeCell("growObj new cell", res) + gcTrace(ol, csZctFreed) gcTrace(res, csAllocated) - release(gch) + when reallyDealloc: + sysAssert(allocInv(gch.region), "growObj before dealloc") + if ol.refcount shr rcShift <=% 1: + # free immediately to save space: + if (ol.refcount and ZctFlag) != 0: + var j = gch.zct.len-1 + var d = gch.zct.d + while j >= 0: + if d[j] == ol: + d[j] = res + break + dec(j) + rawDealloc(gch.region, ol) + else: + # we split the old refcount in 2 parts. XXX This is still not entirely + # correct if the pointer that receives growObj's result is on the stack. + # A better fix would be to emit the location specific write barrier for + # 'growObj', but this is lots of more work and who knows what new problems + # this would create. + res.refcount = rcIncrement or gch.black + decRef(ol) + else: + sysAssert(ol.typ != nil, "growObj: 5") + zeroMem(ol, sizeof(Cell)) + when useCellIds: + inc gch.idGenerator + res.id = gch.idGenerator result = cellToUsr(res) sysAssert(allocInv(gch.region), "growObj end") when defined(memProfiler): nimProfile(newsize-oldsize) proc growObj(old: pointer, newsize: int): pointer {.rtl.} = - setStackTop(gch) result = growObj(old, newsize, gch) {.push profiler:off.} -# ---------------- cycle collector ------------------------------------------- -proc doOperation(p: pointer, op: WalkOp) = - if p == nil: return - var c: PCell = usrToCell(p) - sysAssert(c != nil, "doOperation: 1") - gch.tempStack.add c +template takeStartTime(workPackageSize) {.dirty.} = + const workPackage = workPackageSize + var debugticker = 1000 + when withRealTime: + var steps = workPackage + var t0: Ticks + if gch.maxPause > 0: t0 = getticks() -proc nimGCvisit(d: pointer, op: int) {.compilerRtl.} = - doOperation(d, WalkOp(op)) +template takeTime {.dirty.} = + when withRealTime: dec steps + dec debugticker + +template checkTime {.dirty.} = + if debugticker <= 0: + echo "in loop" + debugticker = 1000 + when withRealTime: + if steps == 0: + steps = workPackage + if gch.maxPause > 0: + let duration = getticks() - t0 + # the GC's measuring is not accurate and needs some cleanup actions + # (stack unmarking), so subtract some short amount of time in + # order to miss deadlines less often: + if duration >= gch.maxPause - 50_000: + return false -type - RecursionType = enum - FromChildren, - FromRoot -{.deprecated: [TRecursionType: RecursionType].} +# ---------------- cycle collector ------------------------------------------- -proc collectZCT(gch: var GcHeap): bool +proc freeCyclicCell(gch: var GcHeap, c: PCell) = + gcAssert(isAllocatedPtr(gch.region, c), "freeCyclicCell: freed pointer?") -template pseudoRecursion(typ: RecursionType, body: stmt): stmt = - discard + var d = gch.decStack.d + for i in 0..gch.decStack.len-1: + gcAssert d[i] != c, "wtf man, freeing obviously alive stuff?!!" + + prepareDealloc(c) + gcTrace(c, csCycFreed) + when logGC: writeCell("cycle collector dealloc cell", c) + when reallyDealloc: + sysAssert(allocInv(gch.region), "free cyclic cell") + rawDealloc(gch.region, c) + else: + gcAssert(c.typ != nil, "freeCyclicCell") + zeroMem(c, sizeof(Cell)) -proc trimCycleRoots(gch: var GcHeap, startIdx = gch.cycleRootsTrimIdx) = - var i = startIdx - while i < gch.cycleRoots.len: - if gch.cycleRoots.d[i].color != rcCycleCandidate: - gch.cycleRoots.trimAt i - else: - inc i +proc sweep(gch: var GcHeap): bool = + takeStartTime(100) + echo "loop start" + let black = gch.black + while true: + let x = allObjectsAsProc(gch.region, addr gch.spaceIter) + if gch.spaceIter.state < 0: break + takeTime() + if isCell(x): + # cast to PCell is correct here: + var c = cast[PCell](x) + gcAssert c.color != rcGrey, "cell is still grey?" + if c.color != black: freeCyclicCell(gch, c) + # Since this is incremental, we MUST not set the object to 'white' here. + # We could set all the remaining objects to white after the 'sweep' + # completed but instead we flip the meaning of black/white to save one + # traversal over the heap! + checkTime() + # prepare for next iteration: + echo "loop end" + gch.spaceIter = ObjectSpaceIter() + result = true - gch.cycleRootsTrimIdx = gch.cycleRoots.len +proc markRoot(gch: var GcHeap, c: PCell) = + # since we start with 'black' cells, we need to mark them here too: + if c.color != rcGrey: + c.setColor(rcGrey) + add(gch.greyStack, c) -# we now use a much simpler and non-recursive algorithm for cycle removal -proc collectCycles(gch: var GcHeap) = - if gch.cycleRoots.len == 0: return - gch.stat.cycleTableSize = max(gch.stat.cycleTableSize, gch.cycleRoots.len) +proc markIncremental(gch: var GcHeap): bool = + var L = addr(gch.greyStack.len) + takeStartTime(100) + while L[] > 0: + var c = gch.greyStack.d[0] + sysAssert(isAllocatedPtr(gch.region, c), "markIncremental: isAllocatedPtr") + gch.greyStack.d[0] = gch.greyStack.d[L[] - 1] + dec(L[]) + takeTime() + if c.color == rcGrey: + c.setColor(gch.black) + forAllChildren(c, waMarkGrey) + checkTime() + gcAssert gch.greyStack.len == 0, "markIncremental: greystack not empty " + result = true - when CollectCyclesStats: - let l0 = gch.cycleRoots.len - let tStart = getTicks() +proc markGlobals(gch: var GcHeap) = + for i in 0 .. < globalMarkersLen: globalMarkers[i]() +proc markLocals(gch: var GcHeap) = + var d = gch.decStack.d + for i in 0 .. < gch.decStack.len: + sysAssert isAllocatedPtr(gch.region, d[i]), "markLocals" + markRoot(gch, d[i]) + +when logGC: var - decrefs = 0 - increfs = 0 - collected = 0 - maybedeads = 0 - - template ignoreObject(c: PCell): expr = - # This controls which objects will be ignored in the mark and scan stages - (when MarkingSkipsAcyclicObjects: not canbeCycleRoot(c) else: false) - # not canbeCycleRoot(c) - # false - # c.isBitUp(rcHasStackRef) - - template earlyMarkAliveRec(cell) = - let startLen = gch.tempStack.len - cell.setColor rcAlive - cell.forAllChildren waPush - - while startLen != gch.tempStack.len: - dec gch.tempStack.len - var c = gch.tempStack.d[gch.tempStack.len] - if c.color != rcAlive: - c.setColor rcAlive - c.forAllChildren waPush - - template earlyMarkAlive(stackRoots) = - # This marks all objects reachable from the stack as alive before any - # of the other stages is executed. Such objects cannot be garbage and - # they don't need to participate in the recursive decref/incref. - for i in 0 .. <stackRoots.len: - var c = stackRoots.d[i] - # c.setBit rcHasStackRef - earlyMarkAliveRec(c) - - earlyMarkAlive(gch.decStack) - - when CollectCyclesStats: - let tAfterEarlyMarkAlive = getTicks() - - template recursiveDecRef(cell) = - let startLen = gch.tempStack.len - cell.setColor rcDecRefApplied - cell.forAllChildren waPush - - while startLen != gch.tempStack.len: - dec gch.tempStack.len - var c = gch.tempStack.d[gch.tempStack.len] - if ignoreObject(c): continue - - sysAssert(c.refcount >=% rcIncrement, "recursive dec ref") - dec c.refcount, rcIncrement - inc decrefs - if c.color != rcDecRefApplied: - c.setColor rcDecRefApplied - c.forAllChildren waPush - - template markRoots(roots) = - var i = 0 - while i < roots.len: - if roots.d[i].color == rcCycleCandidate: - recursiveDecRef(roots.d[i]) - inc i - else: - roots.trimAt i - - markRoots(gch.cycleRoots) - - when CollectCyclesStats: - let tAfterMark = getTicks() - c_printf "COLLECT CYCLES %d: %d/%d\n", gcCollectionIdx, gch.cycleRoots.len, l0 - - template recursiveMarkAlive(cell) = - let startLen = gch.tempStack.len - cell.setColor rcAlive - cell.forAllChildren waPush - - while startLen != gch.tempStack.len: - dec gch.tempStack.len - var c = gch.tempStack.d[gch.tempStack.len] - if ignoreObject(c): continue - inc c.refcount, rcIncrement - inc increfs - - if c.color != rcAlive: - c.setColor rcAlive - c.forAllChildren waPush - - template scanRoots(roots) = - for i in 0 .. <roots.len: - let startLen = gch.tempStack.len - gch.tempStack.add roots.d[i] - - while startLen != gch.tempStack.len: - dec gch.tempStack.len - var c = gch.tempStack.d[gch.tempStack.len] - if ignoreObject(c): continue - if c.color == rcDecRefApplied: - if c.refcount >=% rcIncrement: - recursiveMarkAlive(c) - else: - # note that this is not necessarily the ultimate - # destiny of the object. we may still mark it alive - # later if we encounter another node from where it's - # reachable. - c.setColor rcMaybeDead - inc maybedeads - c.forAllChildren waPush - - scanRoots(gch.cycleRoots) - - when CollectCyclesStats: - let tAfterScan = getTicks() - - template collectDead(roots) = - for i in 0 .. <roots.len: - var c = roots.d[i] - c.clearBit(rcInCycleRoots) - - let startLen = gch.tempStack.len - gch.tempStack.add c - - while startLen != gch.tempStack.len: - dec gch.tempStack.len - var c = gch.tempStack.d[gch.tempStack.len] - when MarkingSkipsAcyclicObjects: - if not canbeCycleRoot(c): - # This is an acyclic object reachable from a dead cyclic object - # We must do a normal decref here that may add the acyclic object - # to the ZCT - doDecRef(c, LocalHeap, Cyclic) - continue - if c.color == rcMaybeDead and not c.isBitUp(rcInCycleRoots): - c.setColor(rcReallyDead) - inc collected - c.forAllChildren waPush - # we need to postpone the actual deallocation in order to allow - # the finalizers to run while the data structures are still intact - gch.freeStack.add c - prepareDealloc(c) - - for i in 0 .. <gch.freeStack.len: - freeCell(gch, gch.freeStack.d[i]) - - collectDead(gch.cycleRoots) - - when CollectCyclesStats: - let tFinal = getTicks() - cprintf "times:\n early mark alive: %d ms\n mark: %d ms\n scan: %d ms\n collect: %d ms\n decrefs: %d\n increfs: %d\n marked dead: %d\n collected: %d\n", - (tAfterEarlyMarkAlive - tStart) div 1_000_000, - (tAfterMark - tAfterEarlyMarkAlive) div 1_000_000, - (tAfterScan - tAfterMark) div 1_000_000, - (tFinal - tAfterScan) div 1_000_000, - decrefs, - increfs, - maybedeads, - collected - - deinit(gch.cycleRoots) - init(gch.cycleRoots) - - deinit(gch.freeStack) - init(gch.freeStack) - - when MarkingSkipsAcyclicObjects: - # Collect the acyclic objects that became unreachable due to collected - # cyclic objects. - discard collectZCT(gch) - # collectZCT may add new cycle candidates and we may decide to loop here - # if gch.cycleRoots.len > 0: repeat - -var gcDebugging* = false - -var seqdbg* : proc (s: PGenericSeq) {.cdecl.} + cycleCheckA: array[100, PCell] + cycleCheckALen = 0 + + proc alreadySeen(c: PCell): bool = + for i in 0 .. <cycleCheckALen: + if cycleCheckA[i] == c: return true + if cycleCheckALen == len(cycleCheckA): + gcAssert(false, "cycle detection overflow") + quit 1 + cycleCheckA[cycleCheckALen] = c + inc cycleCheckALen + + proc debugGraph(s: PCell) = + if alreadySeen(s): + writeCell("child cell (already seen) ", s) + else: + writeCell("cell {", s) + forAllChildren(s, waDebug) + c_fprintf(c_stdout, "}\n") + +proc doOperation(p: pointer, op: WalkOp) = + if p == nil: return + var c: PCell = usrToCell(p) + gcAssert(c != nil, "doOperation: 1") + # the 'case' should be faster than function pointers because of easy + # prediction: + case op + of waZctDecRef: + #if not isAllocatedPtr(gch.region, c): + # c_fprintf(c_stdout, "[GC] decref bug: %p", c) + gcAssert(isAllocatedPtr(gch.region, c), "decRef: waZctDecRef") + gcAssert(c.refcount >=% rcIncrement, "doOperation 2") + #c.refcount = c.refcount -% rcIncrement + when logGC: writeCell("decref (from doOperation)", c) + decRef(c) + #if c.refcount <% rcIncrement: addZCT(gch.zct, c) + of waMarkGlobal: + when hasThreadSupport: + # could point to a cell which we don't own and don't want to touch/trace + if isAllocatedPtr(gch.region, c): + markRoot(gch, c) + else: + markRoot(gch, c) + of waMarkGrey: + if c.color == 1-gch.black: + c.setColor(rcGrey) + add(gch.greyStack, c) + #of waDebug: debugGraph(c) + +proc nimGCvisit(d: pointer, op: int) {.compilerRtl.} = + doOperation(d, WalkOp(op)) + +proc collectZCT(gch: var GcHeap): bool {.benign.} + +proc collectCycles(gch: var GcHeap): bool = + # ensure the ZCT 'color' is not used: + while gch.zct.len > 0: discard collectZCT(gch) + case gch.phase + of Phase.None, Phase.Marking: + #if gch.phase == Phase.None: + gch.phase = Phase.Marking + markGlobals(gch) + markLocals(gch) + if markIncremental(gch): + gch.phase = Phase.Sweeping + of Phase.Sweeping: + gcAssert gch.greyStack.len == 0, "greystack not empty" + if sweep(gch): + gch.phase = Phase.None + # flip black/white meanings: + gch.black = 1 - gch.black + result = true proc gcMark(gch: var GcHeap, p: pointer) {.inline.} = # the addresses are not as cells on the stack, so turn them to cells: @@ -1005,235 +739,33 @@ proc gcMark(gch: var GcHeap, p: pointer) {.inline.} = var objStart = cast[PCell](interiorAllocatedPtr(gch.region, cell)) if objStart != nil: # mark the cell: - if objStart.color != rcReallyDead: - if gcDebugging: - # writeCell("marking ", objStart) - discard - else: - inc objStart.refcount, rcIncrement - gch.decStack.add objStart - else: - # With incremental clean-up, objects spend some time - # in various lists before being deallocated. - # We just found a reference on the stack to an object, - # which we have previously labeled as unreachable. - # This is either a bug in the GC or a pure accidental - # coincidence due to the conservative stack marking. - when debugGC: - # writeCell("marking dead object", objStart) - discard - when false: - if isAllocatedPtr(gch.region, cell): - sysAssert false, "allocated pointer but not interior?" - # mark the cell: - inc cell.refcount, rcIncrement - add(gch.decStack, cell) + objStart.refcount = objStart.refcount +% rcIncrement + add(gch.decStack, objStart) sysAssert(allocInv(gch.region), "gcMark end") -proc markThreadStacks(gch: var GcHeap) = - when hasThreadSupport and hasSharedHeap: - {.error: "not fully implemented".} - var it = threadList - while it != nil: - # mark registers: - for i in 0 .. high(it.registers): gcMark(gch, it.registers[i]) - var sp = cast[ByteAddress](it.stackBottom) - var max = cast[ByteAddress](it.stackTop) - # XXX stack direction? - # XXX unroll this loop: - while sp <=% max: - gcMark(gch, cast[PPointer](sp)[]) - sp = sp +% sizeof(pointer) - it = it.next - -# ----------------- stack management -------------------------------------- -# inspired from Smart Eiffel - -when defined(sparc): - const stackIncreases = false -elif defined(hppa) or defined(hp9000) or defined(hp9000s300) or - defined(hp9000s700) or defined(hp9000s800) or defined(hp9000s820): - const stackIncreases = true -else: - const stackIncreases = false - -when not defined(useNimRtl): - {.push stack_trace: off.} - proc setStackBottom(theStackBottom: pointer) = - #c_fprintf(c_stdout, "stack bottom: %p;\n", theStackBottom) - # the first init must be the one that defines the stack bottom: - if gch.stackBottom == nil: gch.stackBottom = theStackBottom - else: - var a = cast[ByteAddress](theStackBottom) # and not PageMask - PageSize*2 - var b = cast[ByteAddress](gch.stackBottom) - #c_fprintf(c_stdout, "old: %p new: %p;\n",gch.stackBottom,theStackBottom) - when stackIncreases: - gch.stackBottom = cast[pointer](min(a, b)) - else: - gch.stackBottom = cast[pointer](max(a, b)) - {.pop.} - -proc stackSize(): int {.noinline.} = - var stackTop {.volatile.}: pointer - result = abs(cast[int](addr(stackTop)) - cast[int](gch.stackBottom)) +include gc_common -var - jmpbufSize {.importc: "sizeof(jmp_buf)", nodecl.}: int - # a little hack to get the size of a JmpBuf in the generated C code - # in a platform independent way - -when defined(sparc): # For SPARC architecture. - proc isOnStack(p: pointer): bool = - var stackTop {.volatile.}: pointer - stackTop = addr(stackTop) - var b = cast[ByteAddress](gch.stackBottom) - var a = cast[ByteAddress](stackTop) - var x = cast[ByteAddress](p) - result = a <=% x and x <=% b - - proc markStackAndRegisters(gch: var GcHeap) {.noinline, cdecl.} = - when defined(sparcv9): - asm """"flushw \n" """ - else: - asm """"ta 0x3 ! ST_FLUSH_WINDOWS\n" """ - - var - max = gch.stackBottom - sp: PPointer - stackTop: array[0..1, pointer] - sp = addr(stackTop[0]) - # Addresses decrease as the stack grows. - while sp <= max: - gcMark(gch, sp[]) - sp = cast[PPointer](cast[ByteAddress](sp) +% sizeof(pointer)) - -elif defined(ELATE): - {.error: "stack marking code is to be written for this architecture".} - -elif stackIncreases: - # --------------------------------------------------------------------------- - # Generic code for architectures where addresses increase as the stack grows. - # --------------------------------------------------------------------------- - proc isOnStack(p: pointer): bool = - var stackTop {.volatile.}: pointer - stackTop = addr(stackTop) - var a = cast[ByteAddress](gch.stackBottom) - var b = cast[ByteAddress](stackTop) - var x = cast[ByteAddress](p) - result = a <=% x and x <=% b - - proc markStackAndRegisters(gch: var GcHeap) {.noinline, cdecl.} = - var registers: C_JmpBuf - if c_setjmp(registers) == 0'i32: # To fill the C stack with registers. - var max = cast[ByteAddress](gch.stackBottom) - var sp = cast[ByteAddress](addr(registers)) +% jmpbufSize -% sizeof(pointer) - # sp will traverse the JMP_BUF as well (jmp_buf size is added, - # otherwise sp would be below the registers structure). - while sp >=% max: - gcMark(gch, cast[PPointer](sp)[]) - sp = sp -% sizeof(pointer) - -else: - # --------------------------------------------------------------------------- - # Generic code for architectures where addresses decrease as the stack grows. - # --------------------------------------------------------------------------- - proc isOnStack(p: pointer): bool = - var stackTop {.volatile.}: pointer - stackTop = addr(stackTop) - var b = cast[ByteAddress](gch.stackBottom) - var a = cast[ByteAddress](stackTop) - var x = cast[ByteAddress](p) - result = a <=% x and x <=% b - - proc markStackAndRegisters(gch: var GcHeap) {.noinline, cdecl.} = - # We use a jmp_buf buffer that is in the C stack. - # Used to traverse the stack and registers assuming - # that 'setjmp' will save registers in the C stack. - type PStackSlice = ptr array [0..7, pointer] - var registers: C_JmpBuf - if c_setjmp(registers) == 0'i32: # To fill the C stack with registers. - when MinimumStackMarking: - # mark the registers - var jmpbufPtr = cast[ByteAddress](addr(registers)) - var jmpbufEnd = jmpbufPtr +% jmpbufSize - - while jmpbufPtr <=% jmpbufEnd: - gcMark(gch, cast[PPointer](jmpbufPtr)[]) - jmpbufPtr = jmpbufPtr +% sizeof(pointer) - - var sp = cast[ByteAddress](gch.stackTop) - else: - var sp = cast[ByteAddress](addr(registers)) - # mark the user stack - var max = cast[ByteAddress](gch.stackBottom) - # loop unrolled: - while sp <% max - 8*sizeof(pointer): - gcMark(gch, cast[PStackSlice](sp)[0]) - gcMark(gch, cast[PStackSlice](sp)[1]) - gcMark(gch, cast[PStackSlice](sp)[2]) - gcMark(gch, cast[PStackSlice](sp)[3]) - gcMark(gch, cast[PStackSlice](sp)[4]) - gcMark(gch, cast[PStackSlice](sp)[5]) - gcMark(gch, cast[PStackSlice](sp)[6]) - gcMark(gch, cast[PStackSlice](sp)[7]) - sp = sp +% sizeof(pointer)*8 - # last few entries: - while sp <=% max: - gcMark(gch, cast[PPointer](sp)[]) - sp = sp +% sizeof(pointer) - -# ---------------------------------------------------------------------------- -# end of non-portable code -# ---------------------------------------------------------------------------- - -proc releaseCell(gch: var GcHeap, cell: PCell) = - if cell.color != rcReallyDead: - prepareDealloc(cell) - cell.setColor rcReallyDead - - let l1 = gch.tempStack.len - cell.forAllChildren waPush - let l2 = gch.tempStack.len - for i in l1 .. <l2: - var cc = gch.tempStack.d[i] - if cc.refcount--(LocalHeap): - releaseCell(gch, cc) - else: - if canbeCycleRoot(cc): - addCycleRoot(gch.cycleRoots, cc) - - gch.tempStack.len = l1 - - if cell.isBitDown(rcBufferedAnywhere): - freeCell(gch, cell) - # else: - # This object is either buffered in the cycleRoots list and we'll leave - # it there to be collected in the next collectCycles or it's pending in - # the ZCT: - # (e.g. we are now cleaning the 15th object, but this one is 18th in the - # list. Note that this can happen only if we reached this point by the - # recursion). - # We can ignore it now as the ZCT cleaner will reach it soon. +proc markStackAndRegisters(gch: var GcHeap) {.noinline, cdecl.} = + forEachStackSlot(gch, gcMark) proc collectZCT(gch: var GcHeap): bool = - const workPackage = 100 + # Note: Freeing may add child objects to the ZCT! So essentially we do + # deep freeing, which is bad for incremental operation. In order to + # avoid a deep stack, we move objects to keep the ZCT small. + # This is performance critical! var L = addr(gch.zct.len) - - when withRealtime: - var steps = workPackage - var t0: Ticks - if gch.maxPause > 0: t0 = getticks() + takeStartTime(100) while L[] > 0: var c = gch.zct.d[0] - sysAssert c.isBitUp(rcZct), "collectZCT: rcZct missing!" - sysAssert(isAllocatedPtr(gch.region, c), "collectZCT: isAllocatedPtr") - + sysAssert(isAllocatedPtr(gch.region, c), "CollectZCT: isAllocatedPtr") # remove from ZCT: - c.clearBit(rcZct) + gcAssert((c.refcount and ZctFlag) == ZctFlag, "collectZCT") + + c.refcount = c.refcount and not ZctFlag gch.zct.d[0] = gch.zct.d[L[] - 1] dec(L[]) - when withRealtime: dec steps + takeTime() if c.refcount <% rcIncrement: # It may have a RC > 0, if it is in the hardware stack or # it has not been removed yet from the ZCT. This is because @@ -1241,92 +773,78 @@ proc collectZCT(gch: var GcHeap): bool = # as this might be too slow. # In any case, it should be removed from the ZCT. But not # freed. **KEEP THIS IN MIND WHEN MAKING THIS INCREMENTAL!** - if c.color == rcRetiredBuffer: - if c.isBitDown(rcInCycleRoots): - freeCell(gch, c) + when logGC: writeCell("zct dealloc cell", c) + gcTrace(c, csZctFreed) + # We are about to free the object, call the finalizer BEFORE its + # children are deleted as well, because otherwise the finalizer may + # access invalid memory. This is done by prepareDealloc(): + prepareDealloc(c) + forAllChildren(c, waZctDecRef) + when reallyDealloc: + sysAssert(allocInv(gch.region), "collectZCT: rawDealloc") + rawDealloc(gch.region, c) else: - # if c.color == rcReallyDead: writeCell("ReallyDead in ZCT?", c) - releaseCell(gch, c) - when withRealtime: - if steps == 0: - steps = workPackage - if gch.maxPause > 0: - let duration = getticks() - t0 - # the GC's measuring is not accurate and needs some cleanup actions - # (stack unmarking), so subtract some short amount of time in to - # order to miss deadlines less often: - if duration >= gch.maxPause - 50_000: - return false + sysAssert(c.typ != nil, "collectZCT 2") + zeroMem(c, sizeof(Cell)) + checkTime() result = true - gch.trimCycleRoots - #deInit(gch.zct) - #init(gch.zct) proc unmarkStackAndRegisters(gch: var GcHeap) = var d = gch.decStack.d - for i in 0 .. <gch.decStack.len: + for i in 0..gch.decStack.len-1: sysAssert isAllocatedPtr(gch.region, d[i]), "unmarkStackAndRegisters" - # XXX: just call doDecRef? - var c = d[i] - sysAssert c.typ != nil, "unmarkStackAndRegisters 2" - - if c.color == rcRetiredBuffer: - continue - - # XXX no need for an atomic dec here: - if c.refcount--(LocalHeap): - # the object survived only because of a stack reference - # it still doesn't have heap references - addZCT(gch.zct, c) - - if canbeCycleRoot(c): - # any cyclic object reachable from the stack can be turned into - # a leak if it's orphaned through the stack reference - # that's because the write-barrier won't be executed for stack - # locations - addCycleRoot(gch.cycleRoots, c) - + decRef(d[i]) gch.decStack.len = 0 proc collectCTBody(gch: var GcHeap) = - when withRealtime: + when withRealTime: let t0 = getticks() - when debugGC: inc gcCollectionIdx sysAssert(allocInv(gch.region), "collectCT: begin") - gch.stat.maxStackSize = max(gch.stat.maxStackSize, stackSize()) + when not defined(nimCoroutines): + gch.stat.maxStackSize = max(gch.stat.maxStackSize, stackSize()) sysAssert(gch.decStack.len == 0, "collectCT") prepareForInteriorPointerChecking(gch.region) markStackAndRegisters(gch) - markThreadStacks(gch) gch.stat.maxStackCells = max(gch.stat.maxStackCells, gch.decStack.len) inc(gch.stat.stackScans) if collectZCT(gch): when cycleGC: if getOccupiedMem(gch.region) >= gch.cycleThreshold or alwaysCycleGC: - collectCycles(gch) - sysAssert gch.zct.len == 0, "zct is not null after collect cycles" - inc(gch.stat.cycleCollections) - gch.cycleThreshold = max(InitialCycleThreshold, getOccupiedMem() * - CycleIncrease) - gch.stat.maxThreshold = max(gch.stat.maxThreshold, gch.cycleThreshold) + if collectCycles(gch): + inc(gch.stat.cycleCollections) + gch.cycleThreshold = max(InitialCycleThreshold, getOccupiedMem() * + CycleIncrease) + gch.stat.maxThreshold = max(gch.stat.maxThreshold, gch.cycleThreshold) unmarkStackAndRegisters(gch) sysAssert(allocInv(gch.region), "collectCT: end") - when withRealtime: + when withRealTime: let duration = getticks() - t0 gch.stat.maxPause = max(gch.stat.maxPause, duration) when defined(reportMissedDeadlines): if gch.maxPause > 0 and duration > gch.maxPause: c_fprintf(c_stdout, "[GC] missed deadline: %ld\n", duration) +when defined(nimCoroutines): + proc currentStackSizes(): int = + for stack in items(gch.stack): + result = result + stackSize(stack.starts, stack.pos) + proc collectCT(gch: var GcHeap) = - if (gch.zct.len >= ZctThreshold or (cycleGC and + # stackMarkCosts prevents some pathological behaviour: Stack marking + # becomes more expensive with large stacks and large stacks mean that + # cells with RC=0 are more likely to be kept alive by the stack. + when defined(nimCoroutines): + let stackMarkCosts = max(currentStackSizes() div (16*sizeof(int)), ZctThreshold) + else: + let stackMarkCosts = max(stackSize() div (16*sizeof(int)), ZctThreshold) + if (gch.zct.len >= stackMarkCosts or (cycleGC and getOccupiedMem(gch.region)>=gch.cycleThreshold) or alwaysGC) and gch.recGcLock == 0: collectCTBody(gch) -when withRealtime: +when withRealTime: proc toNano(x: int): Nanos {.inline.} = result = x * 1000 @@ -1334,13 +852,11 @@ when withRealtime: gch.maxPause = MaxPauseInUs.toNano proc GC_step(gch: var GcHeap, us: int, strongAdvice: bool) = - acquire(gch) gch.maxPause = us.toNano if (gch.zct.len >= ZctThreshold or (cycleGC and getOccupiedMem(gch.region)>=gch.cycleThreshold) or alwaysGC) or strongAdvice: collectCTBody(gch) - release(gch) proc GC_step*(us: int, strongAdvice = false) = GC_step(gch, us, strongAdvice) @@ -1358,11 +874,7 @@ when not defined(useNimRtl): dec(gch.recGcLock) proc GC_setStrategy(strategy: GC_Strategy) = - case strategy - of gcThroughput: discard - of gcResponsiveness: discard - of gcOptimizeSpace: discard - of gcOptimizeTime: discard + discard proc GC_enableMarkAndSweep() = gch.cycleThreshold = InitialCycleThreshold @@ -1372,13 +884,10 @@ when not defined(useNimRtl): # set to the max value to suppress the cycle detector proc GC_fullCollect() = - setStackTop(gch) - acquire(gch) var oldThreshold = gch.cycleThreshold gch.cycleThreshold = 0 # forces cycle collection collectCT(gch) gch.cycleThreshold = oldThreshold - release(gch) proc GC_getStatistics(): string = GC_disable() @@ -1390,9 +899,13 @@ when not defined(useNimRtl): "[GC] max threshold: " & $gch.stat.maxThreshold & "\n" & "[GC] zct capacity: " & $gch.zct.cap & "\n" & "[GC] max cycle table size: " & $gch.stat.cycleTableSize & "\n" & - "[GC] max stack size: " & $gch.stat.maxStackSize & "\n" & "[GC] max pause time [ms]: " & $(gch.stat.maxPause div 1000_000) - when traceGC: writeLeakage(true) + when defined(nimCoroutines): + result = result & "[GC] number of stacks: " & $gch.stack.len & "\n" + for stack in items(gch.stack): + result = result & "[GC] stack " & stack.starts.repr & "[GC] max stack size " & $stack.maxStackSize & "\n" + else: + result = result & "[GC] max stack size: " & $gch.stat.maxStackSize & "\n" GC_enable() {.pop.} diff --git a/lib/system/jssys.nim b/lib/system/jssys.nim index 5bcddc5e6..5bac54772 100644 --- a/lib/system/jssys.nim +++ b/lib/system/jssys.nim @@ -33,6 +33,9 @@ type lineNumber {.importc.}: int message {.importc.}: cstring stack {.importc.}: cstring + + JSRef = ref RootObj # Fake type. + {.deprecated: [TSafePoint: SafePoint, TCallFrame: CallFrame].} var @@ -282,61 +285,6 @@ proc eqStrings(a, b: string): bool {.asmNoStackFrame, compilerProc.} = return true; """ -type - Document {.importc.} = object of RootObj - write: proc (text: cstring) {.nimcall.} - writeln: proc (text: cstring) {.nimcall.} - createAttribute: proc (identifier: cstring): ref Node {.nimcall.} - createElement: proc (identifier: cstring): ref Node {.nimcall.} - createTextNode: proc (identifier: cstring): ref Node {.nimcall.} - getElementById: proc (id: cstring): ref Node {.nimcall.} - getElementsByName: proc (name: cstring): seq[ref Node] {.nimcall.} - getElementsByTagName: proc (name: cstring): seq[ref Node] {.nimcall.} - - NodeType* = enum - ElementNode = 1, - AttributeNode, - TextNode, - CDATANode, - EntityRefNode, - EntityNode, - ProcessingInstructionNode, - CommentNode, - DocumentNode, - DocumentTypeNode, - DocumentFragmentNode, - NotationNode - Node* {.importc.} = object of RootObj - attributes*: seq[ref Node] - childNodes*: seq[ref Node] - data*: cstring - firstChild*: ref Node - lastChild*: ref Node - nextSibling*: ref Node - nodeName*: cstring - nodeType*: NodeType - nodeValue*: cstring - parentNode*: ref Node - previousSibling*: ref Node - appendChild*: proc (child: ref Node) {.nimcall.} - appendData*: proc (data: cstring) {.nimcall.} - cloneNode*: proc (copyContent: bool) {.nimcall.} - deleteData*: proc (start, len: int) {.nimcall.} - getAttribute*: proc (attr: cstring): cstring {.nimcall.} - getAttributeNode*: proc (attr: cstring): ref Node {.nimcall.} - getElementsByTagName*: proc (): seq[ref Node] {.nimcall.} - hasChildNodes*: proc (): bool {.nimcall.} - insertBefore*: proc (newNode, before: ref Node) {.nimcall.} - insertData*: proc (position: int, data: cstring) {.nimcall.} - removeAttribute*: proc (attr: cstring) {.nimcall.} - removeAttributeNode*: proc (attr: ref Node) {.nimcall.} - removeChild*: proc (child: ref Node) {.nimcall.} - replaceChild*: proc (newNode, oldNode: ref Node) {.nimcall.} - replaceData*: proc (start, len: int, text: cstring) {.nimcall.} - setAttribute*: proc (name, value: cstring) {.nimcall.} - setAttributeNode*: proc (attr: ref Node) {.nimcall.} -{.deprecated: [TNode: Node, TNodeType: NodeType, TDocument: Document].} - when defined(kwin): proc rawEcho {.compilerproc, asmNoStackFrame.} = asm """ @@ -360,28 +308,28 @@ elif defined(nodejs): """ else: - var - document {.importc, nodecl.}: ref Document - proc ewriteln(x: cstring) = - var node = document.getElementsByTagName("body")[0] - if node != nil: - node.appendChild(document.createTextNode(x)) - node.appendChild(document.createElement("br")) - else: + var node : JSRef + {.emit: "`node` = document.getElementsByTagName('body')[0];".} + if node.isNil: raise newException(ValueError, "<body> element does not exist yet!") + {.emit: """ + `node`.appendChild(document.createTextNode(`x`)); + `node`.appendChild(document.createElement("br")); + """.} proc rawEcho {.compilerproc.} = - var node = document.getElementsByTagName("body")[0] - if node == nil: + var node : JSRef + {.emit: "`node` = document.getElementsByTagName('body')[0];".} + if node.isNil: raise newException(IOError, "<body> element does not exist yet!") - asm """ - for (var i = 0; i < arguments.length; ++i) { - var x = `toJSStr`(arguments[i]); - `node`.appendChild(document.createTextNode(x)) - } - """ - node.appendChild(document.createElement("br")) + {.emit: """ + for (var i = 0; i < arguments.length; ++i) { + var x = `toJSStr`(arguments[i]); + `node`.appendChild(document.createTextNode(x)); + } + `node`.appendChild(document.createElement("br")); + """.} # Arithmetic: proc addInt(a, b: int): int {.asmNoStackFrame, compilerproc.} = @@ -532,8 +480,6 @@ proc nimMax(a, b: int): int {.compilerproc.} = return if a >= b: a else: b type NimString = string # hack for hti.nim include "system/hti" -type JSRef = ref RootObj # Fake type. - proc isFatPointer(ti: PNimType): bool = # This has to be consistent with the code generator! return ti.base.kind notin {tyObject, @@ -587,15 +533,20 @@ proc nimCopy(dest, src: JSRef, ti: PNimType): JSRef = nimCopyAux(result, src, ti.node) of tySequence, tyArrayConstr, tyOpenArray, tyArray: asm """ - if (`dest` === null || `dest` === undefined) { - `dest` = new Array(`src`.length); + if (`src` === null) { + `result` = null; } else { - `dest`.length = `src`.length; - } - `result` = `dest`; - for (var i = 0; i < `src`.length; ++i) { - `result`[i] = nimCopy(`result`[i], `src`[i], `ti`.base); + if (`dest` === null || `dest` === undefined) { + `dest` = new Array(`src`.length); + } + else { + `dest`.length = `src`.length; + } + `result` = `dest`; + for (var i = 0; i < `src`.length; ++i) { + `result`[i] = nimCopy(`result`[i], `src`[i], `ti`.base); + } } """ of tyString: diff --git a/lib/system/nimscript.nim b/lib/system/nimscript.nim index aaba11324..772d25343 100644 --- a/lib/system/nimscript.nim +++ b/lib/system/nimscript.nim @@ -242,7 +242,7 @@ template task*(name: untyped; description: string; body: untyped): untyped = ## .. code-block:: nim ## task build, "default build is via the C backend": ## setCommand "c" - proc `name Task`() = body + proc `name Task`*() = body let cmd = getCommand() if cmd.len == 0 or cmd ==? "help": diff --git a/lib/system/profiler.nim b/lib/system/profiler.nim index 4f600417e..ae8ff4e19 100644 --- a/lib/system/profiler.nim +++ b/lib/system/profiler.nim @@ -50,10 +50,15 @@ proc captureStackTrace(f: PFrame, st: var StackTrace) = inc(i) b = b.prev +var + profilingRequestedHook*: proc (): bool {.nimcall, benign.} + ## set this variable to provide a procedure that implements a profiler in + ## user space. See the `nimprof` module for a reference implementation. + when defined(memProfiler): type MemProfilerHook* = proc (st: StackTrace, requestedSize: int) {.nimcall, benign.} - {.deprecated: [TMemProfilerHook: MemProfilerHook].} + var profilerHook*: MemProfilerHook ## set this variable to provide a procedure that implements a profiler in @@ -65,17 +70,13 @@ when defined(memProfiler): hook(st, requestedSize) proc nimProfile(requestedSize: int) = - if not isNil(profilerHook): + if not isNil(profilingRequestedHook) and profilingRequestedHook(): callProfilerHook(profilerHook, requestedSize) else: - const - SamplingInterval = 50_000 - # set this to change the default sampling interval var profilerHook*: ProfilerHook ## set this variable to provide a procedure that implements a profiler in ## user space. See the `nimprof` module for a reference implementation. - gTicker {.threadvar.}: int proc callProfilerHook(hook: ProfilerHook) {.noinline.} = # 'noinline' so that 'nimProfile' does not perform the stack allocation @@ -86,16 +87,7 @@ else: proc nimProfile() = ## This is invoked by the compiler in every loop and on every proc entry! - if gTicker == 0: - gTicker = -1 - if not isNil(profilerHook): - # disable recursive calls: XXX should use try..finally, - # but that's too expensive! - let oldHook = profilerHook - profilerHook = nil - callProfilerHook(oldHook) - profilerHook = oldHook - gTicker = SamplingInterval - dec gTicker + if not isNil(profilingRequestedHook) and profilingRequestedHook(): + callProfilerHook(profilerHook) {.pop.} diff --git a/lib/system/repr.nim b/lib/system/repr.nim index 1f81a0813..986994203 100644 --- a/lib/system/repr.nim +++ b/lib/system/repr.nim @@ -259,8 +259,10 @@ when not defined(useNimRtl): of tyInt16: add result, $int(cast[ptr int16](p)[]) of tyInt32: add result, $int(cast[ptr int32](p)[]) of tyInt64: add result, $(cast[ptr int64](p)[]) - of tyUInt8: add result, $ze(cast[ptr int8](p)[]) - of tyUInt16: add result, $ze(cast[ptr int16](p)[]) + of tyUInt8: add result, $(cast[ptr uint8](p)[]) + of tyUInt16: add result, $(cast[ptr uint16](p)[]) + of tyUInt32: add result, $(cast[ptr uint32](p)[]) + of tyUInt64: add result, $(cast[ptr uint64](p)[]) of tyFloat: add result, $(cast[ptr float](p)[]) of tyFloat32: add result, $(cast[ptr float32](p)[]) diff --git a/lib/system/sysstr.nim b/lib/system/sysstr.nim index 326c601bd..e2137e8f4 100644 --- a/lib/system/sysstr.nim +++ b/lib/system/sysstr.nim @@ -229,7 +229,7 @@ proc setLengthSeq(seq: PGenericSeq, elemSize, newLen: int): PGenericSeq {. # we need to decref here, otherwise the GC leaks! when not defined(boehmGC) and not defined(nogc) and not defined(gcMarkAndSweep) and not defined(gogc): - when compileOption("gc", "v2"): + when false: # compileOption("gc", "v2"): for i in newLen..result.len-1: let len0 = gch.tempStack.len forAllChildrenAux(cast[pointer](cast[ByteAddress](result) +% diff --git a/lib/wrappers/linenoise/clinenoise.c b/lib/wrappers/linenoise/clinenoise.c index b4ae32472..dd3aa736c 100644 --- a/lib/wrappers/linenoise/clinenoise.c +++ b/lib/wrappers/linenoise/clinenoise.c @@ -417,10 +417,10 @@ void linenoiseAddCompletion(linenoiseCompletions *lc, const char *str) { size_t len = strlen(str); char *copy, **cvec; - copy = malloc(len+1); + copy = (char*)malloc(len+1); if (copy == NULL) return; memcpy(copy,str,len+1); - cvec = realloc(lc->cvec,sizeof(char*)*(lc->len+1)); + cvec = (char**)realloc(lc->cvec,sizeof(char*)*(lc->len+1)); if (cvec == NULL) { free(copy); return; @@ -446,11 +446,11 @@ static void abInit(struct abuf *ab) { } static void abAppend(struct abuf *ab, const char *s, int len) { - char *new = realloc(ab->b,ab->len+len); + char *neww = (char*)realloc(ab->b,ab->len+len); - if (new == NULL) return; - memcpy(new+ab->len,s,len); - ab->b = new; + if (neww == NULL) return; + memcpy(neww+ab->len,s,len); + ab->b = neww; ab->len += len; } @@ -1016,7 +1016,7 @@ int linenoiseHistoryAdd(const char *line) { /* Initialization on first call. */ if (history == NULL) { - history = malloc(sizeof(char*)*history_max_len); + history = (char**)malloc(sizeof(char*)*history_max_len); if (history == NULL) return 0; memset(history,0,(sizeof(char*)*history_max_len)); } @@ -1043,14 +1043,14 @@ int linenoiseHistoryAdd(const char *line) { * just the latest 'len' elements if the new history length value is smaller * than the amount of items already inside the history. */ int linenoiseHistorySetMaxLen(int len) { - char **new; + char **neww; if (len < 1) return 0; if (history) { int tocopy = history_len; - new = malloc(sizeof(char*)*len); - if (new == NULL) return 0; + neww = (char**)malloc(sizeof(char*)*len); + if (neww == NULL) return 0; /* If we can't copy everything, free the elements we'll not use. */ if (len < tocopy) { @@ -1059,10 +1059,10 @@ int linenoiseHistorySetMaxLen(int len) { for (j = 0; j < tocopy-len; j++) free(history[j]); tocopy = len; } - memset(new,0,sizeof(char*)*len); - memcpy(new,history+(history_len-tocopy), sizeof(char*)*tocopy); + memset(neww,0,sizeof(char*)*len); + memcpy(neww,history+(history_len-tocopy), sizeof(char*)*tocopy); free(history); - history = new; + history = neww; } history_max_len = len; if (history_len > history_max_len) diff --git a/lib/wrappers/mysql.nim b/lib/wrappers/mysql.nim index 8253e53a5..af504864d 100644 --- a/lib/wrappers/mysql.nim +++ b/lib/wrappers/mysql.nim @@ -418,6 +418,7 @@ type decimals*: cuint # Number of decimals in field charsetnr*: cuint # Character set ftype*: Enum_field_types # Type of field. See mysql_com.h for types + extension*: pointer FIELD* = St_mysql_field PFIELD* = ptr FIELD diff --git a/lib/wrappers/odbcsql.nim b/lib/wrappers/odbcsql.nim index 43ad80f76..1b2544ec0 100644 --- a/lib/wrappers/odbcsql.nim +++ b/lib/wrappers/odbcsql.nim @@ -641,11 +641,42 @@ const ODBC_CONFIG_SYS_DSN* = 5 ODBC_REMOVE_SYS_DSN* = 6 + SQL_ACTIVE_CONNECTIONS* = 0 # SQLGetInfo + SQL_DATA_SOURCE_NAME* = 2 + SQL_DATA_SOURCE_READ_ONLY* = 25 + SQL_DATABASE_NAME* = 2 + SQL_DBMS_NAME* = 17 + SQL_DBMS_VERSION* = 18 + SQL_DRIVER_HDBC* = 3 + SQL_DRIVER_HENV* = 4 + SQL_DRIVER_HSTMT* = 5 + SQL_DRIVER_NAME* = 6 + SQL_DRIVER_VER* = 7 + SQL_FETCH_DIRECTION* = 8 + SQL_ODBC_VER* = 10 + SQL_DRIVER_ODBC_VER* = 77 + SQL_SERVER_NAME* = 13 + SQL_ACTIVE_ENVIRONMENTS* = 116 + SQL_ACTIVE_STATEMENTS* = 1 + SQL_SQL_CONFORMANCE* = 118 + SQL_DATETIME_LITERALS* = 119 + SQL_ASYNC_MODE* = 10021 + SQL_BATCH_ROW_COUNT* = 120 + SQL_BATCH_SUPPORT* = 121 + SQL_CATALOG_LOCATION* = 114 + #SQL_CATALOG_NAME* = 10003 + SQL_CATALOG_NAME_SEPARATOR* = 41 + SQL_CATALOG_TERM* = 42 + SQL_CATALOG_USAGE* = 92 + #SQL_COLLATION_SEQ* = 10004 + SQL_COLUMN_ALIAS* = 87 + #SQL_USER_NAME* = 47 + proc SQLAllocHandle*(HandleType: TSqlSmallInt, InputHandle: SqlHandle, OutputHandlePtr: var SqlHandle): TSqlSmallInt{. dynlib: odbclib, importc.} proc SQLSetEnvAttr*(EnvironmentHandle: SqlHEnv, Attribute: TSqlInteger, - Value: SqlPointer, StringLength: TSqlInteger): TSqlSmallInt{. + Value: TSqlInteger, StringLength: TSqlInteger): TSqlSmallInt{. dynlib: odbclib, importc.} proc SQLGetEnvAttr*(EnvironmentHandle: SqlHEnv, Attribute: TSqlInteger, Value: SqlPointer, BufferLength: TSqlInteger, @@ -807,5 +838,10 @@ proc SQLStatistics*(hstmt: SqlHStmt, CatalogName: PSQLCHAR, NameLength3: TSqlSmallInt, Unique: SqlUSmallInt, Reserved: SqlUSmallInt): TSqlSmallInt {. dynlib: odbclib, importc.} +proc SQLErr*(henv: SqlHEnv, hdbc: SqlHDBC, hstmt: SqlHStmt, + szSqlState, pfNativeError, szErrorMsg: PSQLCHAR, + cbErrorMsgMax: TSqlSmallInt, + pcbErrorMsg: PSQLINTEGER): TSqlSmallInt {. + dynlib: odbclib, importc: "SQLError".} {.pop.} diff --git a/lib/wrappers/sqlite3.nim b/lib/wrappers/sqlite3.nim index c5019960c..e7fd2bc36 100644 --- a/lib/wrappers/sqlite3.nim +++ b/lib/wrappers/sqlite3.nim @@ -239,6 +239,8 @@ proc column_count*(pStmt: Pstmt): int32{.cdecl, dynlib: Lib, importc: "sqlite3_column_count".} proc column_name*(para1: Pstmt, para2: int32): cstring{.cdecl, dynlib: Lib, importc: "sqlite3_column_name".} +proc column_table_name*(para1: Pstmt; para2: int32): cstring{.cdecl, dynlib: Lib, + importc: "sqlite3_column_table_name".} proc column_name16*(para1: Pstmt, para2: int32): pointer{.cdecl, dynlib: Lib, importc: "sqlite3_column_name16".} proc column_decltype*(para1: Pstmt, i: int32): cstring{.cdecl, dynlib: Lib, |