about summary refs log tree commit diff stats
path: root/tools/iso/kernel.soso/vmm.c
blob: 7dad53be20c97c24ee6c4f5bf3f6512263641f44 (plain) (blame)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
#include "vmm.h"
#include "common.h"
#include "screen.h"
#include "alloc.h"
#include "isr.h"
#include "process.h"
#include "list.h"
#include "debugprint.h"

uint32 *gKernelPageDirectory = (uint32 *)KERN_PAGE_DIRECTORY;
uint8 gPhysicalPageFrameBitmap[RAM_AS_4M_PAGES / 8];
uint8 gKernelPageHeapBitmap[RAM_AS_4K_PAGES / 8];

static int gTotalPageCount = 0;

static void handlePageFault(Registers *regs);
static void syncPageDirectoriesKernelMemory();

void initializeMemory(uint32 high_mem) {
    int pg;
    unsigned long i;

    registerInterruptHandler(14, handlePageFault);

    gTotalPageCount = (high_mem * 1024) / PAGESIZE_4M;

    for (pg = 0; pg < gTotalPageCount / 8; ++pg) {
        gPhysicalPageFrameBitmap[pg] = 0;
    }

    for (pg = gTotalPageCount / 8; pg < RAM_AS_4M_PAGES / 8; ++pg) {
        gPhysicalPageFrameBitmap[pg] = 0xFF;
    }

    //Pages reserved for the kernel
    for (pg = PAGE_INDEX_4M(0x0); pg < (int)(PAGE_INDEX_4M(RESERVED_AREA)); ++pg) {
        SET_PAGEFRAME_USED(gPhysicalPageFrameBitmap, pg);
    }

    //Heap pages reserved
    for (pg = 0; pg < RAM_AS_4K_PAGES / 8; ++pg) {
        gKernelPageHeapBitmap[pg] = 0xFF;
    }

    for (pg = PAGE_INDEX_4K(KERN_PD_AREA_BEGIN); pg < (int)(PAGE_INDEX_4K(KERN_PD_AREA_END)); ++pg) {
        SET_PAGEHEAP_UNUSED(pg * PAGESIZE_4K);
    }

    //Identity map
    for (i = 0; i < 4; ++i) {
        gKernelPageDirectory[i] = (i * PAGESIZE_4M | (PG_PRESENT | PG_WRITE | PG_4MB));//add PG_USER for accesing kernel code in user mode
    }

    for (i = 4; i < 1024; ++i) {
        gKernelPageDirectory[i] = 0;
    }

    //Enable paging
    asm("	mov %0, %%eax \n \
        mov %%eax, %%cr3 \n \
        mov %%cr4, %%eax \n \
        or %2, %%eax \n \
        mov %%eax, %%cr4 \n \
        mov %%cr0, %%eax \n \
        or %1, %%eax \n \
        mov %%eax, %%cr0"::"m"(gKernelPageDirectory), "i"(PAGING_FLAG), "i"(PSE_FLAG));

    initializeKernelHeap();
}

char* getPageFrame4M() {
    int byte, bit;
    uint32 page = -1;

    for (byte = 0; byte < RAM_AS_4M_PAGES / 8; byte++) {
        if (gPhysicalPageFrameBitmap[byte] != 0xFF) {
            for (bit = 0; bit < 8; bit++) {
                if (!(gPhysicalPageFrameBitmap[byte] & (1 << bit))) {
                    page = 8 * byte + bit;
                    SET_PAGEFRAME_USED(gPhysicalPageFrameBitmap, page);
                    Debug_PrintF("DEBUG: got 4M on physical %x\n", page * PAGESIZE_4M);
                    return (char *) (page * PAGESIZE_4M);
                }
            }
        }
    }

    PANIC("Memory is full!");
    return (char *) -1;
}

void releasePageFrame4M(uint32 p_addr) {
    Debug_PrintF("DEBUG: released 4M on physical %x\n", p_addr);

    SET_PAGEFRAME_UNUSED(gPhysicalPageFrameBitmap, p_addr);
}

uint32* getPdFromReservedArea4K() {
    int byte, bit;
    int page = -1;

    //printkf("DEBUG: getPdFromReservedArea4K() begin\n");

    for (byte = 0; byte < RAM_AS_4K_PAGES / 8; byte++) {
        if (gKernelPageHeapBitmap[byte] != 0xFF) {
            for (bit = 0; bit < 8; bit++) {
                if (!(gKernelPageHeapBitmap[byte] & (1 << bit))) {
                    page = 8 * byte + bit;
                    SET_PAGEHEAP_USED(page);
                    //printkf("DEBUG: getPdFromReservedArea4K() found pageIndex:%d\n", page);
                    return (uint32 *) (page * PAGESIZE_4K);
                }
            }
        }
    }

    PANIC("Reserved Page Directory Area is Full!!!");
    return (uint32 *) -1;
}

void releasePdFromReservedArea4K(uint32 *v_addr) {
    SET_PAGEHEAP_UNUSED(v_addr);
}

uint32 *createPd() {
    int i;

    uint32* pd = getPdFromReservedArea4K();


    for (i = 0; i < KERNELMEMORY_PAGE_COUNT; ++i) {
        pd[i] = gKernelPageDirectory[i];
    }


    for (i = KERNELMEMORY_PAGE_COUNT; i < 1024; ++i) {
        pd[i] = 0;
    }

    return pd;
}

void destroyPd(uint32 *pd) {
    int startIndex = PAGE_INDEX_4M(USER_OFFSET);
    int lastIndex = PAGE_INDEX_4M(USER_OFFSET_END);

    ///we don't touch mmapped areas

    for (int i = startIndex; i < lastIndex; ++i) {
        uint32 p_addr = pd[i] & 0xFFC00000;

        if (p_addr) {
            releasePageFrame4M(p_addr);
        }

        pd[i] = 0;
    }

    releasePdFromReservedArea4K(pd);
}

uint32 *copyPd(uint32* pd) {
    int i;

    uint32* newPd = getPdFromReservedArea4K();


    for (i = 0; i < KERNELMEMORY_PAGE_COUNT; ++i) {
        newPd[i] = gKernelPageDirectory[i];
    }

    disablePaging();

    for (i = KERNELMEMORY_PAGE_COUNT; i < 1024; ++i) {
        newPd[i] = 0;

        if ((pd[i] & PG_PRESENT) == PG_PRESENT) {
            uint32 pagePyhsical = pd[i] & 0xFFC00000;
            char* newPagePhysical = getPageFrame4M();

            memcpy((uint8*)newPagePhysical, (uint8*)pagePyhsical, PAGESIZE_4M);

            uint32 vAddr =  (i * 4) << 20;

            //printkf("Copied page virtual %x\n", vAddr);

            addPageToPd(newPd, (char*)vAddr, newPagePhysical, PG_USER);
        }
    }

    enablePaging();

    return newPd;
}

//When calling this function:
//If it is intended to alloc kernel memory, v_addr must be < KERN_HEAP_END.
//If it is intended to alloc user memory, v_addr must be > KERN_HEAP_END.
BOOL addPageToPd(uint32* pd, char *v_addr, char *p_addr, int flags) {
    uint32 *pde = NULL;

    //printkf("DEBUG: addPageToPd(): v_addr:%x p_addr:%x flags:%x\n", v_addr, p_addr, flags);


    int index = (((uint32) v_addr & 0xFFC00000) >> 22);
    pde = pd + index;
    if ((*pde & PG_PRESENT) == PG_PRESENT) {
        //Already assigned!
        Debug_PrintF("ERROR: addPageToPd(): pde:%x is already assigned!!\n", pde);
        return FALSE;
    }

    //printkf("addPageToPd(): index:%d pde:%x\n", index, pde);

    *pde = ((uint32) p_addr) | (PG_PRESENT | PG_4MB | PG_WRITE | flags);
    //printkf("pde:%x *pde:%x\n", pde, *pde);

    SET_PAGEFRAME_USED(gPhysicalPageFrameBitmap, PAGE_INDEX_4M((uint32)p_addr));

    asm("invlpg %0"::"m"(v_addr));

    if (v_addr <= (char*)(KERN_HEAP_END - PAGESIZE_4M)) {
        if (pd == gKernelPageDirectory) {
            syncPageDirectoriesKernelMemory();
        }
        else {
            PANIC("Attemped to allocate kernel memory to a page directory which is not the kernel page directory!!!\n");
        }
    }
    else {
        if (pd == gKernelPageDirectory) {
            //No panic here. Because we allow kernel to map anywhere!
        }
    }

    return TRUE;
}

BOOL removePageFromPd(uint32* pd, char *v_addr, BOOL releasePageFrame) {
    int index = (((uint32) v_addr & 0xFFC00000) >> 22);
    uint32* pde = pd + index;
    if ((*pde & PG_PRESENT) == PG_PRESENT) {
        uint32 p_addr = *pde & 0xFFC00000;

        if (releasePageFrame) {
            releasePageFrame4M(p_addr);
        }

        *pde = 0;

        asm("invlpg %0"::"m"(v_addr));

        if (v_addr <= (char*)(KERN_HEAP_END - PAGESIZE_4M)) {
            if (pd == gKernelPageDirectory) {
                syncPageDirectoriesKernelMemory();
            }
        }

        return TRUE;
    }

    return FALSE;
}

static void syncPageDirectoriesKernelMemory() {
    //get page directory list
    //it can be easier to traverse proccesses(and access its pd) here
    for (int byte = 0; byte < RAM_AS_4M_PAGES / 8; byte++) {
        if (gKernelPageHeapBitmap[byte] != 0xFF) {
            for (int bit = 0; bit < 8; bit++) {
                if ((gKernelPageHeapBitmap[byte] & (1 << bit))) {
                    int page = 8 * byte + bit;

                    uint32* pd = (uint32*)(page * PAGESIZE_4K);

                    for (int i = 0; i < KERNELMEMORY_PAGE_COUNT; ++i) {
                        pd[i] = gKernelPageDirectory[i];
                    }
                }
            }
        }
    }
}

uint32 getTotalPageCount() {
    return gTotalPageCount;
}

uint32 getUsedPageCount() {
    int count = 0;
    for (int i = 0; i < gTotalPageCount; ++i) {
        if(IS_PAGEFRAME_USED(gPhysicalPageFrameBitmap, i)) {
            ++count;
        }
    }

    return count;
}

uint32 getFreePageCount() {
    return gTotalPageCount - getUsedPageCount();
}

static void printPageFaultInfo(uint32 faultingAddress, Registers *regs) {
    int present = regs->errorCode & 0x1;
    int rw = regs->errorCode & 0x2;
    int us = regs->errorCode & 0x4;
    int reserved = regs->errorCode & 0x8;
    int id = regs->errorCode & 0x10;

    printkf("Page fault!!! When trying to %s %x - IP:%x\n", rw ? "write to" : "read from", faultingAddress, regs->eip);
    printkf("The page was %s\n", present ? "present" : "not present");

    if (reserved) {
        printkf("Reserved bit was set\n");
    }

    if (id) {
        printkf("Caused by an instruction fetch\n");
    }

    printkf("CPU was in %s\n", us ? "user-mode" : "supervisor mode");
}

static void handlePageFault(Registers *regs) {
    // A page fault has occurred.

    // The faulting address is stored in the CR2 register.
    uint32 faultingAddress;
    asm volatile("mov %%cr2, %0" : "=r" (faultingAddress));

    //Debug_PrintF("page_fault()\n");
    //Debug_PrintF("stack of handler is %x\n", &faultingAddress);

    Thread* faultingThread = getCurrentThread();
    if (NULL != faultingThread) {
        Thread* mainThread = getMainKernelThread();

        if (mainThread == faultingThread) {
            printPageFaultInfo(faultingAddress, regs);

            PANIC("Page fault in Kernel main thread!!!");
        }
        else {
            printPageFaultInfo(faultingAddress, regs);

            Debug_PrintF("Faulting thread is %d\n", faultingThread->threadId);

            if (faultingThread->userMode) {
                Debug_PrintF("Destroying process %d\n", faultingThread->owner->pid);

                destroyProcess(faultingThread->owner);
            }
            else {
                Debug_PrintF("Destroying kernel thread %d\n", faultingThread->threadId);

                destroyThread(faultingThread);
            }

            waitForSchedule();
        }
    }
    else {
        printPageFaultInfo(faultingAddress, regs);

        PANIC("Page fault!!!");
    }
}

void initializeProcessMmap(Process* process) {
    int page = 0;

    for (page = 0; page < RAM_AS_4M_PAGES / 8; ++page) {
        process->mmappedVirtualMemory[page] = 0xFF;
    }

    //Virtual pages reserved for mmap
    for (page = PAGE_INDEX_4M(USER_OFFSET_MMAP); page < (int)(PAGE_INDEX_4M(USER_OFFSET_MMAP_END)); ++page) {
//?         printkf("reserving for mmap: %x\n", page*PAGESIZE_4M);
        SET_PAGEFRAME_UNUSED(process->mmappedVirtualMemory, page * PAGESIZE_4M);
    }
}

//this functions uses either pAddress or pAddressList
//both of them must not be null!
void* mapMemory(Process* process, uint32 nBytes, uint32 pAddress, List* pAddressList) {
    if (nBytes == 0) {
        return NULL;
    }

    int pageIndex = 0;

    int neededPages = (nBytes / PAGESIZE_4M) + 1;

    if (pAddressList) {
        if (List_GetCount(pAddressList) < neededPages) {
            return NULL;
        }
    }
    else if (0 == pAddress) {
        return NULL;
    }

    int foundAdjacent = 0;

    uint32 vMem = 0;

    for (pageIndex = PAGE_INDEX_4M(USER_OFFSET_MMAP); pageIndex < (int)(PAGE_INDEX_4M(USER_OFFSET_MMAP_END)); ++pageIndex) {
        if (IS_PAGEFRAME_USED(process->mmappedVirtualMemory, pageIndex)) {
            foundAdjacent = 0;
            vMem = 0;
        }
        else {
            if (0 == foundAdjacent) {
                vMem = pageIndex * PAGESIZE_4M;
            }
            ++foundAdjacent;
        }

        if (foundAdjacent == neededPages) {
            break;
        }
    }

    //Debug_PrintF("mapMemory: needed:%d foundAdjacent:%d vMem:%x\n", neededPages, foundAdjacent, vMem);

    if (foundAdjacent == neededPages) {
        uint32 p = 0;
        ListNode* pListNode = NULL;
        if (pAddressList) {
            pListNode = List_GetFirstNode(pAddressList);
            p = (uint32)(uint32*)pListNode->data;
        }
        else {
            p = pAddress;
        }
        p = p & 0xFFC00000;
        uint32 v = vMem;
        for (int i = 0; i < neededPages; ++i) {
            addPageToPd(process->pd, (char*)v, (char*)p, PG_USER);

            SET_PAGEFRAME_USED(process->mmappedVirtualMemory, PAGE_INDEX_4M(v));

            v += PAGESIZE_4M;

            if (pAddressList) {
                pListNode = pListNode->next;
                p = (uint32)(uint32*)pListNode->data;
                p = p & 0xFFC00000;
            }
            else {
                p += PAGESIZE_4M;
            }
        }

        return (void*)vMem;
    }

    return NULL;
}

BOOL unmapMemory(Process* process, uint32 nBytes, uint32 vAddress) {
    if (nBytes == 0) {
        return FALSE;
    }

    if (vAddress < USER_OFFSET_MMAP) {
        return FALSE;
    }

    int pageIndex = 0;

    int neededPages = (nBytes / PAGESIZE_4M) + 1;

    int startIndex = PAGE_INDEX_4M(vAddress);
    int endIndex = startIndex + neededPages;

    BOOL result = FALSE;

    for (pageIndex = startIndex; pageIndex < endIndex; ++pageIndex) {
        if (IS_PAGEFRAME_USED(process->mmappedVirtualMemory, pageIndex)) {
            char* vAddr = (char*)(pageIndex * PAGESIZE_4M);

            removePageFromPd(process->pd, vAddr, FALSE);

            SET_PAGEFRAME_UNUSED(process->mmappedVirtualMemory, vAddr);

            result = TRUE;
        }
    }

    return result;
}
="w"> 20:&:duplex-list:num/raw <- next list2 list2 <- prev list2 30:num/raw <- first list2 40:bool/raw <- equal list, list2 ] memory-should-contain [ 10 <- 4 # scanning next, skipping deleted element 11 <- 3 20 <- 0 # no more elements 30 <- 4 # prev of final element 40 <- 1 # list back at start ] ] scenario removing-from-end-of-duplex-list [ local-scope list:&:duplex-list:num <- push 3, null list <- push 4, list list <- push 5, list run [ # delete last element list2:&:duplex-list:num <- next list list2 <- next list2 list <- remove list2, list 10:bool/raw <- equal list2, null # check structure like before list2 <- copy list 11:num/raw <- first list2 list2 <- next list2 12:num/raw <- first list2 20:&:duplex-list:num/raw <- next list2 list2 <- prev list2 30:num/raw <- first list2 40:bool/raw <- equal list, list2 ] memory-should-contain [ 10 <- 0 # remove returned non-null 11 <- 5 # scanning next, skipping deleted element 12 <- 4 20 <- 0 # no more elements 30 <- 5 # prev of final element 40 <- 1 # list back at start ] ] scenario removing-from-singleton-duplex-list [ local-scope list:&:duplex-list:num <- push 3, null run [ list <- remove list, list 1:num/raw <- deaddress list ] memory-should-contain [ 1 <- 0 # back to an empty list ] ] def remove x:&:duplex-list:_elem/contained-in:in, n:num, in:&:duplex-list:_elem -> in:&:duplex-list:_elem [ local-scope load-inputs i:num <- copy 0 curr:&:duplex-list:_elem <- copy x { done?:bool <- greater-or-equal i, n break-if done? break-unless curr next:&:duplex-list:_elem <- next curr in <- remove curr, in curr <- copy next i <- add i, 1 loop } ] scenario removing-multiple-from-duplex-list [ local-scope list:&:duplex-list:num <- push 3, null list <- push 4, list list <- push 5, list run [ list2:&:duplex-list:num <- next list # second element list <- remove list2, 2, list stash list ] trace-should-contain [ app: 5 ] ] # remove values between 'start' and 'end' (both exclusive). # also clear pointers back out from start/end for hygiene. # set end to 0 to delete everything past start. # can't set start to 0 to delete everything before end, because there's no # clean way to return the new head pointer. def remove-between start:&:duplex-list:_elem, end:&:duplex-list:_elem/contained-in:start -> start:&:duplex-list:_elem [ local-scope load-inputs next:&:duplex-list:_elem <- get *start, next:offset nothing-to-delete?:bool <- equal next, end return-if nothing-to-delete? assert next, [malformed duplex list] # start->next->prev = 0 # start->next = end *next <- put *next, prev:offset, null *start <- put *start, next:offset, end { break-if end stash [spliced:] next return } # end->prev->next = 0 # end->prev = start prev:&:duplex-list:_elem <- get *end, prev:offset assert prev, [malformed duplex list - 2] *prev <- put *prev, next:offset, null stash [spliced:] next *end <- put *end, prev:offset, start ] scenario remove-range [ # construct a duplex list with six elements [13, 14, 15, 16, 17, 18] local-scope list:&:duplex-list:num <- push 18, null list <- push 17, list list <- push 16, list list <- push 15, list list <- push 14, list list <- push 13, list run [ # delete 16 onwards # first pointer: to the third element list2:&:duplex-list:num <- next list list2 <- next list2 list2 <- remove-between list2, null # now check the list 10:num/raw <- get *list, value:offset list <- next list 11:num/raw <- get *list, value:offset list <- next list 12:num/raw <- get *list, value:offset 20:&:duplex-list:num/raw <- next list ] memory-should-contain [ 10 <- 13 11 <- 14 12 <- 15 20 <- 0 ] trace-should-contain [ app: spliced: 16 <-> 17 <-> 18 ] ] scenario remove-range-to-final [ local-scope # construct a duplex list with six elements [13, 14, 15, 16, 17, 18] list:&:duplex-list:num <- push 18, null list <- push 17, list list <- push 16, list list <- push 15, list list <- push 14, list list <- push 13, list run [ # delete 15, 16 and 17 # start pointer: to the second element list2:&:duplex-list:num <- next list # end pointer: to the last (sixth) element end:&:duplex-list:num <- next list2 end <- next end end <- next end end <- next end remove-between list2, end # now check the list 10:num/raw <- get *list, value:offset list <- next list 11:num/raw <- get *list, value:offset list <- next list 12:num/raw <- get *list, value:offset 20:&:duplex-list:num/raw <- next list ] memory-should-contain [ 10 <- 13 11 <- 14 12 <- 18 20 <- 0 # no more elements ] trace-should-contain [ app: spliced: 15 <-> 16 <-> 17 ] ] scenario remove-range-to-penultimate [ local-scope # construct a duplex list with six elements [13, 14, 15, 16, 17, 18] list:&:duplex-list:num <- push 18, null list <- push 17, list list <- push 16, list list <- push 15, list list <- push 14, list list <- push 13, list run [ # delete 15 and 16 # start pointer: to the second element list2:&:duplex-list:num <- next list # end pointer: to the last (sixth) element end:&:duplex-list:num <- next list2 end <- next end end <- next end remove-between list2, end # now check the list 10:num/raw <- get *list, value:offset list <- next list 11:num/raw <- get *list, value:offset list <- next list 12:num/raw <- get *list, value:offset list <- next list 13:num/raw <- get *list, value:offset 20:&:duplex-list:num/raw <- next list ] memory-should-contain [ 10 <- 13 11 <- 14 12 <- 17 13 <- 18 20 <- 0 # no more elements ] trace-should-contain [ app: spliced: 15 <-> 16 ] ] scenario remove-range-empty [ local-scope # construct a duplex list with three elements [13, 14, 15] list:&:duplex-list:num <- push 15, null list <- push 14, list list <- push 13, list run [ # delete between first and second element (i.e. nothing) list2:&:duplex-list:num <- next list remove-between list, list2 # now check the list 10:num/raw <- get *list, value:offset list <- next list 11:num/raw <- get *list, value:offset list <- next list 12:num/raw <- get *list, value:offset 20:&:duplex-list:num/raw <- next list ] # no change memory-should-contain [ 10 <- 13 11 <- 14 12 <- 15 20 <- 0 ] ] scenario remove-range-to-end [ local-scope # construct a duplex list with six elements [13, 14, 15, 16, 17, 18] list:&:duplex-list:num <- push 18, null list <- push 17, list list <- push 16, list list <- push 15, list list <- push 14, list list <- push 13, list run [ # remove the third element and beyond list2:&:duplex-list:num <- next list remove-between list2, null # now check the list 10:num/raw <- get *list, value:offset list <- next list 11:num/raw <- get *list, value:offset 20:&:duplex-list:num/raw <- next list ] memory-should-contain [ 10 <- 13 11 <- 14 20 <- 0 ] ] # insert list beginning at 'start' after 'in' def splice in:&:duplex-list:_elem, start:&:duplex-list:_elem/contained-in:in -> in:&:duplex-list:_elem [ local-scope load-inputs return-unless in return-unless start end:&:duplex-list:_elem <- last start next:&:duplex-list:_elem <- next in { break-unless next *end <- put *end, next:offset, next *next <- put *next, prev:offset, end } *in <- put *in, next:offset, start *start <- put *start, prev:offset, in ] # insert contents of 'new' after 'in' def insert in:&:duplex-list:_elem, new:&:@:_elem -> in:&:duplex-list:_elem [ local-scope load-inputs return-unless in return-unless new len:num <- length *new return-unless len curr:&:duplex-list:_elem <- copy in idx:num <- copy 0 { done?:bool <- greater-or-equal idx, len break-if done? c:_elem <- index *new, idx insert c, curr # next iter curr <- next curr idx <- add idx, 1 loop } ] def append in:&:duplex-list:_elem, new:&:duplex-list:_elem/contained-in:in -> in:&:duplex-list:_elem [ local-scope load-inputs last:&:duplex-list:_elem <- last in *last <- put *last, next:offset, new return-unless new *new <- put *new, prev:offset, last ] def last in:&:duplex-list:_elem -> result:&:duplex-list:_elem [ local-scope load-inputs result <- copy in { next:&:duplex-list:_elem <- next result break-unless next result <- copy next loop } ] # does a duplex list start with a certain sequence of elements? def match x:&:duplex-list:_elem, y:&:@:_elem -> result:bool [ local-scope load-inputs i:num <- copy 0 max:num <- length *y { done?:bool <- greater-or-equal i, max break-if done? expected:_elem <- index *y, i return-unless x, false/no-match curr:_elem <- first x curr-matches?:bool <- equal curr, expected return-unless curr-matches?, false/no-match x <- next x i <- add i, 1 loop } return true/successful-match ] scenario duplex-list-match [ local-scope list:&:duplex-list:char <- push 97/a, null list <- push 98/b, list list <- push 99/c, list list <- push 100/d, list run [ 10:bool/raw <- match list, [] 11:bool/raw <- match list, [d] 12:bool/raw <- match list, [dc] 13:bool/raw <- match list, [dcba] 14:bool/raw <- match list, [dd] 15:bool/raw <- match list, [dcbax] ] memory-should-contain [ 10 <- 1 # matches [] 11 <- 1 # matches [d] 12 <- 1 # matches [dc] 13 <- 1 # matches [dcba] 14 <- 0 # does not match [dd] 15 <- 0 # does not match [dcbax] ] ] # helper for debugging def dump-from x:&:duplex-list:_elem [ local-scope load-inputs $print x, [: ] { break-unless x c:_elem <- get *x, value:offset $print c, [ ] x <- next x { is-newline?:bool <- equal c, 10/newline break-unless is-newline? $print 10/newline $print x, [: ] } loop } $print 10/newline, [---], 10/newline ] scenario stash-duplex-list [ local-scope list:&:duplex-list:num <- push 1, null list <- push 2, list list <- push 3, list run [ stash [list:], list ] trace-should-contain [ app: list: 3 <-> 2 <-> 1 ] ] def to-text in:&:duplex-list:_elem -> result:text [ local-scope load-inputs buf:&:buffer:char <- new-buffer 80 buf <- to-buffer in, buf result <- buffer-to-array buf ] # variant of 'to-text' which stops printing after a few elements (and so is robust to cycles) def to-text-line in:&:duplex-list:_elem -> result:text [ local-scope load-inputs buf:&:buffer:char <- new-buffer 80 buf <- to-buffer in, buf, 6 # max elements to display result <- buffer-to-array buf ] def to-buffer in:&:duplex-list:_elem, buf:&:buffer:char -> buf:&:buffer:char [ local-scope load-inputs { break-if in buf <- append buf, [[]] return } # append in.value to buf val:_elem <- get *in, value:offset buf <- append buf, val # now prepare next next:&:duplex-list:_elem <- next in nextn:num <- deaddress next return-unless next buf <- append buf, [ <-> ] # and recurse remaining:num, optional-input-found?:bool <- next-input { break-if optional-input-found? # unlimited recursion buf <- to-buffer next, buf return } { break-unless remaining # limited recursion remaining <- subtract remaining, 1 buf <- to-buffer next, buf, remaining return } # past recursion depth; insert ellipses and stop append buf, [...] ] scenario stash-empty-duplex-list [ local-scope x:&:duplex-list:num <- copy null run [ stash x ] trace-should-contain [ app: [] ] ]