ref: dc7a19d14394fa3bd8e832c1be557478cf390285
dir: /alloc.myr/
use "die.use"
use "sys.use"
use "types.use"
pkg std =
generic alloc : ( -> @a*)
generic free : (v:@a* -> void)
const bytealloc : (sz:size -> byte*)
const bytefree : (ptr:byte* -> void)
;;
/* null pointers */
const Zbyte = 0 castto(byte*)
const Zslab = 0 castto(slab*)
const Zbin = 0 castto(bin*)
const Pagesz = 4096 /* on the systems this supports, anyways... */
type bucket = struct
sz : size /* aligned size */
nper : size /* max number of elements per slab */
slabs : slab* /* partially filled or free slabs */
cache : slab* /* cache of empty slabs, to prevent thrashing */
ncache : size /* size of cache */
;;
type slab = struct
freehd : bin* /* the nodes we're allocating */
next : slab* /* the next slab on the chain */
nfree : size /* the number of free nodes */
;;
type bin = struct /* NB: must be smaller than sizeof(slab) */
next : bin* /* the next bin in the free list */
;;
const allocinit = {b : bucket*, sz
b.sz = align(sz, 16)
b.nper = (Pagesz - sizeof(slab))/b.sz
b.slabs = Zslab
b.cache = Zslab
b.ncache = 0
}
const mkslab = {bkt : bucket*
var p
var s
var b
var i
var bprev
var off /* offset of bin head */
p = mmap(Zbyte, Pagesz, Mprotrw, Mpriv | Manon, -1, 0)
if p == Mapbad
die("Unable to mmap")
;;
s = p castto(slab*)
s.nfree = bkt.nper
/* skip past the slab header */
off = align(sizeof(slab), 16)
b = nextbin(s castto(bin*), off)
for i = 0; i < bkt.nper; i++
b = nextbin(b, bkt.sz)
bprev.next = b
bprev = b
;;
bprev.next = Zbin
}
const nextbin = {b, sz
-> ((b castto(intptr)) + sz) castto(bin*)
}
const delslab = {s : slab*
}
const align = {v, align
-> (v + align - 1) & ~(align - 1)
}