shithub: mc

ref: 1c61d05cba15d7e93d3207d4f3ab3c6043d63b77
dir: /lib/std/bytealloc.myr/

View raw version
use sys
use "die"
use "extremum"
use "memops"
use "syswrap"
use "threadhooks"
use "types"
use "units"
use "result"
use "slfill"
use "backtrace"

pkg std =
	const startalloctrace	: (f : byte[:] -> void)
	const endalloctrace	: (-> void)

	/* public for testing */
	pkglocal const zbytealloc	: (sz:size -> byte#)
	const bytealloc			: (sz:size -> byte#)
	const bytefree			: (m:byte#, sz:size -> void)

	/* null pointers. only used internally. */
	pkglocal const Zsliceptr	= (0 : byte#)
	pkglocal const Align		= 16	/* minimum allocation alignment */

	pkglocal const align	: (m : std.size, align : std.size -> std.size)
	pkglocal const allocsz	: (sz : std.size -> std.size)

;;

const Zslab	= (0 : slab#)
const Zchunk	= (0 : chunk#)
const Slabsz 	= 4*MiB
const Cachemax	= 4
const Bktmax	= 128*KiB	/* a balance between wasted space and falling back to mmap */
const Pagesz	= 4*KiB

var buckets	: bucket[32] /* excessive */
var trace	: bool
var tracefd	: std.fd
var cache	: cacheelt[32]

type bucket = struct
	sz	: size	/* aligned size */
	nper	: size	/* max number of elements per slab */
	slabs	: slab#	/* partially filled or free slabs */
	cache	: slab# /* cache of empty slabs, to prevent thrashing */
	ncache	: size  /* size of cache */
;;

type slab = struct
	head	: byte#	/* head of virtual addresses, so we don't leak address space */
	next	: slab#	/* the next slab on the chain */
	prev	: slab#	/* the prev slab on the chain */
	freehd	: chunk#	/* the nodes we're allocating */
	nfree	: size	/* the number of free nodes */
;;

/* NB: must be smaller than sizeof(slab) */
type chunk = struct
	next	: chunk#	/* the next chunk in the free list */
;;

type cacheelt = struct
	sz	: std.size
	p	: byte#
;;

const __init__ = {
	for var i = 0; i < buckets.len && (Align << i) <= Bktmax; i++
		bktinit(&buckets[i], Align << i)
	;;
}

const startalloctrace = {path
	match openmode(path, Owronly | Ocreat, 0o644)
	| `Ok fd:	tracefd = fd
	| `Err e:	-> void
	;;
	trace = true
}

const endalloctrace = {
	std.close(tracefd)
	trace = false
}

const zbytealloc = {sz
	var p

	p = bytealloc(sz)
	memfill(p, 0, sz)
	-> p
}

const tracealloc = {p, sz
	var stk	: void#[13]	/* [type, addr, sz, 10 stack slots] */

	slfill(stk[:], (0 : void#))
	stk[0] = (0 : void#)
	stk[1] = (p : void#)
	stk[2] = (sz : void#)
	backtrace(stk[3:])
	writealloctrace(stk[:])
}

const tracefree = {p, sz
	var stk	: void#[3]

	stk[0] = (1 : void#)
	stk[1] = (p : void#)
	stk[2] = (sz : void#)
	writealloctrace(stk[:])
}

const writealloctrace = {sl
	var len, p

	len = sl.len * sizeof(void#)
	p = (sl : byte#)
	write(tracefd, p[:len])
}

/* Allocates a blob that is 'sz' bytes long. Dies if the allocation fails */
const bytealloc = {sz
	var bkt, p

	if sz <= Bktmax
		bkt = &buckets[bktnum(sz)]
		lock(memlck)
		p = bktalloc(bkt)
		unlock(memlck)
	else
		p = bigalloc(sz)
	;;
	if trace
		lock(memlck)
		tracealloc(p, sz)
		unlock(memlck)
	;;
	-> p
}

/* frees a blob that is 'sz' bytes long. */
const bytefree = {p, sz
	var bkt

	if trace
		lock(memlck)
		tracefree(p, sz)
		unlock(memlck)
	;;
	memfill(p, 0xa8, sz)
	if (sz < Bktmax)
		bkt = &buckets[bktnum(sz)]
		lock(memlck)
		bktfree(bkt, p)
		unlock(memlck)
	else
		bigfree(p, sz)
	;;
}

const bigalloc = {sz
	var p

	p = Failmem
	sz = align(sz, Align)

	/* check our cache */
	lock(memlck)
	for var i = 0; i < cache.len; i++
		if sz > cache[i].sz
			continue
		;;
		p = cache[i].p
		if cache[i].sz - sz >= Bktmax
			cache[i].sz -= sz
			cache[i].p = ((p : intptr) + (sz : intptr) : byte#)
		;;
		break
	;;
	unlock(memlck)
	if p != Failmem
		-> p
	;;

	/* ok, lets give up and get memory from the os */
	p = getmem(sz)
	if p != Failmem
		-> p
	;;
	die("could not get memory\n")
}

const bigfree = {p, sz
	var minsz, minp, minidx
	var endp, endblk

	minp = p
	minidx = -1
	minsz = align(sz, Align)
	endp = ((p : intptr) + (sz : intptr) : byte#)
	lock(memlck)
	for var i = 0; i < cache.len; i++
		endblk = ((cache[i].p : intptr) + (sz : intptr) : byte#)
		/* try to merge with a saved block */
		if cache[i].p == endp
			cache[i].sz += sz
			cache[i].p = p
			minsz = 0
			break
		elif endblk == p
			cache[i].sz += sz
			minsz = 0
			break
		;;
		/* check for merges */
		if cache[i].sz < minsz
			minsz = cache[i].sz
			minp = cache[i].p
			minidx = i
		;;
	;;
	unlock(memlck)

	/* size of 0 means we found a slot. */
	if minsz == 0
		-> void
	;;

	freemem(minp, minsz)
	if minidx >= 0
		cache[minidx].p = p
		cache[minidx].sz = sz
	;;
}

/* Sets up a single empty bucket */
const bktinit = {b, sz
	b.sz = align(sz, Align)
	b.nper = (Slabsz - sizeof(slab))/b.sz
	b.slabs = Zslab
	b.cache = Zslab
	b.ncache = 0
}

/* Creates a slab for bucket 'bkt', and fills the chunk list */
const mkslab = {bkt
	var p, s
	var b, bnext
	var off /* offset of chunk head */

	if bkt.ncache > 0
		s = bkt.cache
		bkt.cache = s.next
		bkt.ncache--
	;;
	/*
	tricky: we need power of two alignment, so we allocate double the
	needed size, chop off the unaligned ends, and waste the address
	space. Since the OS is "smart enough", this shouldn't actually
	cost us memory, and 64 bits of address space means that we're not
	going to have issues with running out of address space for a
	while. On a 32 bit system this would be a bad idea.
	*/
	p = getmem(Slabsz*2)
	if p == Failmem
		die("Unable to get memory")
	;;

	s = (align((p : size), Slabsz) : slab#)
	s.head = p
	s.nfree = bkt.nper
	s.next = Zslab
	s.prev = Zslab
	/* skip past the slab header */
	off = align(sizeof(slab), Align)
	bnext = nextchunk((s : chunk#), off)
	s.freehd = bnext
	for var i = 0; i < bkt.nper; i++
		b = bnext
		bnext = nextchunk(b, bkt.sz)
		b.next = bnext
	;;
	b.next = Zchunk
	-> s
}

/* 
Allocates a node from bucket 'bkt', crashing if the
allocation cannot be satisfied. Will create a new slab
if there are no slabs on the freelist.
*/
const bktalloc = {bkt
	var s
	var b

	/* find a slab */
	s = bkt.slabs
	if s == Zslab
		s = mkslab(bkt)
		bkt.slabs = s
		if s == Zslab
			die("No memory left")
		;;
	;;

	/* grab the first chunk on the slab */
	b = s.freehd
	s.freehd = b.next
	s.nfree--
	if s.nfree == 0
		bkt.slabs = s.next
		if s.next != Zslab
			s.next.prev = Zslab
		;;
	;;

	-> (b : byte#)
}

/*
Frees a chunk of memory 'm' into bucket 'bkt'.
Assumes that the memory already came from a slab
that was created for bucket 'bkt'. Will crash
if this is not the case.
*/
const bktfree = {bkt, m
	var s, b

	s = (mtrunc(m, Slabsz) : slab#)
	b = (m : chunk#)
	if s.nfree == 0
		if bkt.slabs != Zslab
			bkt.slabs.prev = s
		;;
		s.next = bkt.slabs
		s.prev = Zslab
		bkt.slabs = s
	elif s.nfree == bkt.nper - 1
		/*
		HACK HACK HACK: if we can't unmap, keep an infinite cache per slab size.
		We should solve this better somehow.
		*/
		if bkt.ncache < Cachemax || !Canunmap
			s.next = bkt.cache
			s.prev = Zslab
			bkt.cache = s
		else
			/* unlink the slab from the list */
			if s.next != Zslab
				s.next.prev = s.prev
			;;
			if s.prev != Zslab
				s.prev.next = s.next
			;;
			if bkt.slabs == s
				bkt.slabs = s.next
			;;
			/* we mapped 2*Slabsz so we could align it,
			 so we need to unmap the same */
			freemem(s.head, Slabsz*2)
		;;
		-> void
	;;
	s.nfree++
	b.next = s.freehd
	s.freehd = b
}

/*
Finds the correct bucket index to allocate from
for allocations of size 'sz'
*/
const bitpos : byte[32] = [
  0, 9, 1, 10, 13, 21, 2, 29, 11, 14, 16, 18, 22, 25, 3, 30,
  8, 12, 20, 28, 15, 17, 24, 7, 19, 27, 23, 6, 26, 5, 4, 31
]
const bktnum = {sz
	var n, v

	v = (sz >> 3 : uint32)
	v |= v >> 1
	v |= v >> 2
	v |= v >> 4
	v |= v >> 8
	v |= v >> 16

	n = bitpos[((v * 0x07c4acdd) & 0xffff_ffffui) >> 27]
	-> (n : size)
}

/*
returns the actual size we allocated for a given
size request
*/
const allocsz = {sz
	var bktsz

	if sz <= Bktmax
		bktsz = Align
		for var i = 0; bktsz <= Bktmax; i++
			if bktsz >= sz
				-> bktsz
			;;
			bktsz *= 2
		;;
	else
		-> align(sz, Pagesz)
	;;
	die("Size does not match any buckets")
}

/*
aligns a size to a requested alignment.
'align' must be a power of two
*/
const align = {v, align
	-> (v + align - 1) & ~(align - 1)
}

/*
chunks are variable sizes, so we can't just
index to get to the next one
*/
const nextchunk = {b, sz : size
	-> ((b : intptr) + (sz : intptr) : chunk#)
}

/*
truncates a pointer to 'align'. 'align' must
be a power of two.
*/
const mtrunc = {m, align
	-> ((m : intptr) & ~((align : intptr) - 1) : byte#)
}