about summary refs log tree commit diff stats
path: root/053new-segment.subx
blob: a400eeed26c803289f48c3339d9532ced5565449 (plain) (blame)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
# Create a new segment (pool of memory for allocating chunks from) in the form
# of an *allocation descriptor* that can be passed to the memory allocator
# (defined in a later layer).
#
# Currently an allocation descriptor consists of just the bounds of the pool of
# available memory:
#
#   curr: address
#   end: address
#
# This isn't enough information to reclaim individual allocations. We can't
# support arbitrary reclamation yet.

== code
#   instruction                     effective address                                                   register    displacement    immediate
# . op          subop               mod             rm32          base        index         scale       r32
# . 1-3 bytes   3 bits              2 bits          3 bits        3 bits      3 bits        2 bits      2 bits      0/1/2/4 bytes   0/1/2/4 bytes

Entry:   # manual test
    # var ad/ecx: allocation-descriptor
    68/push  0/imm32/limit
    68/push  0/imm32/curr
    89/copy                         3/mod/direct    1/rm32/ecx    .           .             .           4/r32/esp   .               .                 # copy esp to ecx
    # new-segment(0x1000, ad)
    # . . push args
    51/push-ecx
    68/push  0x1000/imm32
    # . . call
    e8/call  new-segment/disp32
    # . . discard args
    81          0/subop/add         3/mod/direct    4/rm32/esp    .           .             .           .           .               8/imm32           # add to esp
    # var eax: (addr _) = ad->curr
    8b/copy                         0/mod/indirect  1/rm32/ecx    .           .             .           0/r32/eax   .               .                 # copy *ecx to eax
    # write to *eax to check that we have access to the newly-allocated segment
    c7          0/subop/copy        0/mod/direct    0/rm32/eax    .           .             .           .           .               0x34/imm32        # copy to *eax
    # syscall(exit, eax)
    89/copy                         3/mod/direct    3/rm32/ebx    .           .             .           0/r32/eax   .               .                 # copy eax to ebx
    b8/copy-to-eax  1/imm32/exit
    cd/syscall  0x80/imm8

new-segment:  # len: int, ad: (addr allocation-descriptor)
    # . prologue
    55/push-ebp
    89/copy                         3/mod/direct    5/rm32/ebp    .           .             .           4/r32/esp   .               .                 # copy esp to ebp
    # . save registers
    50/push-eax
    53/push-ebx
    # copy len to _mmap-new-segment->len
    8b/copy                         1/mod/*+disp8   5/rm32/ebp    .           .             .           0/r32/eax   8/disp8         .                 # copy *(ebp+8) to eax
    89/copy                         0/mod/indirect  5/rm32/.disp32            .             .           0/r32/eax   $_mmap-new-segment:len/disp32     # copy eax to *$_mmap-new-segment:len
    # mmap(_mmap-new-segment)
    bb/copy-to-ebx  _mmap-new-segment/imm32
    b8/copy-to-eax  0x5a/imm32/mmap
    cd/syscall  0x80/imm8
    # copy {eax, eax+len} to *ad
    # . ebx = ad
    8b/copy                         1/mod/*+disp8   5/rm32/ebp    .           .             .           3/r32/ebx   0xc/disp8       .                 # copy *(ebp+12) to ebx
    # . ad->curr = eax
    89/copy                         0/mod/indirect  3/rm32/ebx    .           .             .           0/r32/eax   .               .                 # copy eax to *ebx
    # . ad->end = eax+len
    03/add                          1/mod/*+disp8   5/rm32/ebp    .           .             .           0/r32/eax   8/disp8         .                 # add *(ebp+8) to eax
    89/copy                         1/mod/*+disp8   3/rm32/ebx    .           .             .           0/r32/eax   4/disp8         .                 # copy eax to *(ebx+4)
$new-segment:end:
    # . restore registers
    5b/pop-to-ebx
    58/pop-to-eax
    # . epilogue
    89/copy                         3/mod/direct    4/rm32/esp    .           .             .           5/r32/ebp   .               .                 # copy ebp to esp
    5d/pop-to-ebp
    c3/return

== data

# various constants used here were found in the Linux sources (search for file mman-common.h)
_mmap-new-segment:  # mmap_arg_struct
    # addr
    0/imm32
$_mmap-new-segment:len:
    # len
    0/imm32
    # protection flags
    3/imm32  # PROT_READ | PROT_WRITE
    # sharing flags
    0x22/imm32  # MAP_PRIVATE | MAP_ANONYMOUS
    # fd
    -1/imm32  # since MAP_ANONYMOUS is specified
    # offset
    0/imm32  # since MAP_ANONYMOUS is specified

# . . vim:nowrap:textwidth=0
> //: //: To run all transforms: //: Load transforms for level n //: Load transforms for level n-1 //: ... //: Load transforms for level 2 //: Run code at level 1 //: //: b) *Within* a level we'll usually introduce transforms in the order //: they're run in. //: //: To run transforms for level n: //: Perform transform of layer l //: Perform transform of layer l+1 //: ... //: //: c) Within a level it's often most natural to introduce a new //: representation by showing how it's transformed to the level below. To //: make such exceptions more obvious checks usually won't be first-class //: transforms; instead code that keeps the program unmodified will run //: within transforms before they mutate the program. As an example: //: //: Layer l introduces a transform //: Layer l+1 adds precondition checks for the transform //: //: This may all seem abstract, but will hopefully make sense over time. The //: goals are basically to always have a working program after any layer, to //: have the order of layers make narrative sense, and to order transforms //: correctly at runtime. :(before "End One-time Setup") // Begin Transforms // End Transforms