TWCOS Kernel

Artifact Content
Login

Artifact adb598b623fa85559e3cf501c04b583f8a7f84aa:


#include "slab.h"

#if INTERFACE
#include <sys/types.h>
#include <stddef.h>
#include <stdint.h>

typedef struct slab_t slab_t;

/**
 * Slab type descriptor
 */
struct slab_type_t {
	/** Slab magic value - pseudo-random */
	uint32_t magic;

	/** Allocation size */
	size_t esize;

	/** Size of an allocation slot */
	size_t slotsize;

	/** Number of slots per slab */
	int count;

#ifdef DEBUG
	/** In use */
	int inuse;
#endif

	/** Definition source */
	char * source;
	/** Definition source line */
	int line;

	/** First slab with free slot */
	slab_t * first;

	/** Slot next pointer */
	slab_type_t * next;
	/** Slot prev pointer */
	slab_type_t * prev;

	/** Custom mark function */
	void (*mark)(void *);

	/** Custom finalize function */
	void (*finalize)(void *);

	/** Mutex lock */
	mutex_t lock[1];
};

/**
 * Weak reference
 */
struct slab_weakref_t {
	/** Reference */
	void * p;

	/** Remaining GC chances before a weak reference is removed */
	int chances;

	/** Sequence number of the allocation */
	uint64_t seq;
};

struct gc_stats_t
{
	size_t allocated;
	size_t inuse;
	size_t peak;
	size_t total;
	size_t cleaned;
	int maxcontext;
};

#define SLAB_TYPE(s, m, f) {.magic=0, .esize=s, .mark=m, .finalize=f, .source=__FILE__, .line=__LINE__}

#define mclone(p) mclone_sized(p, sizeof(*p))

#define GCROOT __attribute__((section(".gcroot")))

#endif

exception_def OutOfMemoryException = { "OutOfMemoryException", &Exception };
exception_def AllocationTooBigException = { "AllocationTooBigException", &Exception };

typedef enum { gcfree, gcwhite, gcgrey, gcblack } gccolour;
typedef struct slab_slot_t slab_slot_t;

/**
 * Slab slot - the unit of allocation
 */
struct slab_slot_t {
	/** sequence number of the allocation - used by the wek reference to detect stale references */
	uint64_t seq;

	/** GC prev pointer */
	slab_slot_t * prev;
	/** GC next pointer */
	slab_slot_t * next;

	/** GC state - indicates which GC list this slot is on */
	gccolour colour;
};

typedef struct slab_t slab_t;

/**
 * Slab - generally paged size
 */
struct slab_t {
	/** Magic number to validate against the type information */
	uint32_t magic;
	/** Next slab */
	slab_t * next;
	/** Prev slab */
	slab_t * prev;

	/** Slab type information */
	slab_type_t * type;

	/** First free slot of this slab */
	slab_slot_t * free;
};

gc_stats_t gc_stats = {0};

static slab_type_t * types;

#if 0
static thread_t * cleaner_thread;
int is_cleaner()
{
	return arch_get_thread() == cleaner_thread;
}

static void cleaner()
{
	cleaner_thread = arch_get_thread();
	int pages = page_count();
	while(1) {
		MONITOR_AUTOLOCK(cleansignal) {
			monitor_wait(cleansignal);
		}
		int prefree = page_count_free();
		thread_gc();
		int postfree = page_count_free();
		MONITOR_AUTOLOCK(freesignal) {
			monitor_broadcast(freesignal);
		}
	}
}
#endif

int slab_idle_gc()
{
	return gc_stats.allocated > gc_stats.inuse;
}

void slab_init()
{
}

static mutex_t slablock[1];
static void slab_lock(int gc)
{
	if (gc) {
		mutex_lock(slablock);
	} else {
		mutex_lock(slablock);
	}
}

static void slab_unlock()
{
	mutex_unlock(slablock);
}

static slab_type_t * slab_min = 0;
static slab_type_t * slab_max = 0;
static void slab_register(slab_type_t * stype)
{
	if (0 == slab_min || stype < slab_min) {
		slab_min = stype;
	}
	if (0 == slab_max || stype > slab_max) {
		slab_max = stype;
	}
}

static int slab_valid(slab_type_t * stype)
{
	return (slab_min <= stype && stype <= slab_max);
}

static void debug_fillbuf(void * p, size_t l, int c)
{
#ifdef DEBUG
	memset(p, c, l);
#endif
}

static interrupt_monitor_t list_move_lock[1];

static void slab_slot_free(slab_slot_t * slot)
{
	slab_t * slab = ARCH_PAGE_ALIGN(slot);
	if (0 == slab->free) {
		/* Previously empty, now has available slots */
		LIST_APPEND(slab->type->first, slab);
	}
	slot->seq = 0;	
	slot->colour = gcfree;
	debug_fillbuf(slot+1, slab->type->esize, 0xf8);
	LIST_APPEND(slab->free, slot);
}

static slab_t * slab_new(slab_type_t * stype)
{
	if (0 == stype->magic) {
		/* Initialize type */
		stype->first = 0;
		stype->magic = 997 * (uint32_t)stype;
		stype->slotsize = ROUNDUP(stype->esize+sizeof(slab_slot_t), sizeof(intptr_t));
		stype->count = (ARCH_PAGE_SIZE-sizeof(slab_t)) / stype->slotsize;
		INTERRUPT_MONITOR_AUTOLOCK(list_move_lock) {
			LIST_APPEND(types, stype);
		}
		slab_register(stype);
	}

	/* Allocate and map page */
	slab_t * slab = page_heap_alloc();
	if (slab) {
		memset(slab, 0, sizeof(*slab));
		slab->magic = stype->magic;
		slab->type = stype;

		/* Put each slot onto the free list */
		slab_slot_t * slot = (slab_slot_t*)(slab+1);
		INTERRUPT_MONITOR_AUTOLOCK(list_move_lock) {
			for(int i=0; i<stype->count; i++, slot = PTR_BYTE_ADDRESS(slot, stype->slotsize)) {
				slab_slot_free(slot);
			}
		}
	}

	return slab;
}

static void debug_checkbuf(void * p, size_t l)
{
#ifdef DEBUG
	const char * cp = p;
	char c = *cp;

	for(int i=0; i<l; i++)
	{
		if (c != *cp) {
			kernel_panic("Corrupted buffer: %p", p);
		}
	}
#endif
}


#define SLAB_SLOT(slab, slot) ((slab_slot_t*)(PTR_BYTE_ADDRESS(slab+1, slab->type->slotsize*slot)))
#define SLAB_SLOT_USER(slab, slot) (SLAB_SLOT(slab, slot)+1)

#define SLAB_SLOT_NUM(slab, p) (PTR_BYTE_DIFF(slab+1, p) / slab->type->slotsize)

/** Allocated but not marked yet */
static slab_slot_t * white;

/** Allocated, marked, but not scanned */
static slab_slot_t * grey;

/** Allocated, marked, and scanned */
static slab_slot_t * black;

/** Garbage, but not yet swept */
static slab_slot_t * garbage;

static int slab_gc_incremental_sweep_locked(slab_type_t * until);
void * slab_alloc(slab_type_t * stype)
{
	void * p = 0;
	slab_lock(0);
	MUTEX_AUTOLOCK(stype->lock)
	{
		int swept = 0;
		while(!stype->first) {
			if (swept) {
				stype->first = slab_new(stype);
				if (!stype->first) {
					/* Need to GC */
				}
			} else {
				slab_gc_incremental_sweep_locked(stype);
				swept = 1;
			}
		}
		slab_t * slab = stype->first;

		assert(slab->free);
		slab_slot_t * slot = slab->free;
		assert(slot);
		assert(slot->colour == gcfree);
		/* TODO: Concurrent GC would put this on the grey list */
		if (1) {
			INTERRUPT_MONITOR_AUTOLOCK(list_move_lock) {
				LIST_DELETE(slab->free, slot);
				slot->colour = gcwhite;
				LIST_APPEND(white, slot);

				/* Check if this was the last free slot */
				if (!slab->free) {
					LIST_DELETE(stype->first, slab);
				}
			}
		}
		gc_stats.allocated += slab->type->esize;

		/* Sequence number for weak reference support */
		static uint64_t seq = 0;
		slot->seq = ++seq;

		/* p is the returned buffer pointer */
		p = slot+1;
		debug_checkbuf(p, slab->type->esize);
		debug_fillbuf(p, slab->type->esize, 0x0a);
#ifdef DEBUG
		stype->inuse++;
#endif
	}
	slab_unlock();
	return p;
}

void * slab_calloc(slab_type_t * stype)
{
	void * p = slab_alloc(stype);
	memset(p, 0, stype->esize);

	return p;
}

static int slab_all_free(slab_t * slab)
{
	int freecount = 0;
	slab_slot_t * free = slab->free;

	while(free) {
		assert(free->colour == gcfree);
		freecount++;
		LIST_NEXT(slab->free, free);
	}

	assert(freecount<=slab->type->count);

	return freecount == slab->type->count;
}

void slab_nomark(void * p)
{
	/* Does nothing */
}

static struct gccontext {
	/* The block to be scanned */
	void ** from;
	void ** to;
} context[16];

static int gclevel = 0;

#ifdef DEBUG
static void slab_gc_dump_types()
{
#if 0
	slab_type_t * stype = types;
	while(stype) {
		kernel_debug("stype %s:%d: esize %d - inuse %d\n", stype->source, stype->line, stype->esize, stype->inuse);
		LIST_NEXT(types, stype);
	}
#endif
}
#endif

void slab_gc_begin()
{
	slab_lock(1);

	gc_stats.allocated = 0;
	gc_stats.inuse = 0;
	gc_stats.total = 0;
	gc_stats.cleaned = 0;

#ifdef DEBUG
	slab_gc_dump_types();
#endif

	/* Put the roots in the queue */
	extern char gcroot_start[];
	extern char gcroot_end[];
	slab_gc_mark_range(gcroot_start, gcroot_end);
}

static slab_t * slab_get(void * p)
{
	if (arch_is_heap_pointer(p)) {
		/* Check magic numbers */
		slab_t * slab = ARCH_PAGE_ALIGN(p);

		if (slab_valid(slab->type) && slab->magic == slab->type->magic) {
			return slab;
		}
	}

	return 0;
}

void slab_gc_mark(void * root)
{
	/* DEBUG_TRAP_PTR(root, kas); */
	slab_t * slab = slab_get(root);
	if (slab) {
		/* Entry within the slab */
		int slotnum = SLAB_SLOT_NUM(slab, root);
		if (slotnum>=slab->type->count) {
			/* Not a valid pointer - don't mark */
			return;
		}
		assert(slotnum<slab->type->count);

		slab_slot_t * slot = SLAB_SLOT(slab, slotnum);
		if ((void*)(slot+1) > root) {
			/* Reference to slot - ignore */
		} else if(slot->colour == gcwhite) {
			gc_stats.inuse += slab->type->esize;
			assert(slot->seq);
			INTERRUPT_MONITOR_AUTOLOCK(list_move_lock) {
				LIST_DELETE(white, slot);
				slot->colour = gcgrey;
				LIST_APPEND(grey, slot);
			}
		}
	}
}

void slab_gc_mark_range(void * from, void * to)
{
	gclevel++;
	if (gclevel>gc_stats.maxcontext) {
		gc_stats.maxcontext = gclevel;
	}
	context[gclevel].from = from;
	context[gclevel].to = to;
}

static void slab_gc_step()
{
	/* Mark non-heap items */
	while(gclevel) {
		/* Check the next pointer in the block */
		while(context[gclevel].from && context[gclevel].from < context[gclevel].to) {
			slab_gc_mark(*context[gclevel].from++);
		}
		context[gclevel].from = context[gclevel].to = 0;
		gclevel--;
	}

	/* Mark heap items */
	while(grey && !gclevel) {
		slab_slot_t * const slot = grey;
		assert(slot->colour == gcgrey);
		slab_t * const slab = ARCH_PAGE_ALIGN(slot);
		if (slab->type->mark) {
			/* Type specific mark */
			slab->type->mark(slot+1);
		} else {
			/* Generic mark */
			void ** pp = (void**)(slot+1);
			int pcount = slab->type->esize/sizeof(*pp);
			for(int i=0; i<pcount; i++) {
				slab_gc_mark(pp[i]);
			}
		}

		INTERRUPT_MONITOR_AUTOLOCK(list_move_lock) {
			LIST_DELETE(grey, slot);
			slot->colour = gcblack;
			LIST_APPEND(black, slot);
		}
	}
}

void slab_gc()
{
	while(gclevel || grey) {
		slab_gc_step();
	}
}

void slab_gc_mark_block(void ** block, size_t size)
{
	slab_gc_mark_range(block, block + size/sizeof(*block));
}

static int slab_gc_incremental_sweep_locked(slab_type_t * until)
{
	if (!garbage) {
		return 0;
	}

	while(garbage) {
		slab_slot_t * tofree = garbage;
		assert(garbage->colour == gcwhite);
		slab_t * slab = ARCH_PAGE_ALIGN(tofree);
		gc_stats.cleaned += slab->type->esize;
		if (slab->type->finalize) {
			slab->type->finalize(tofree+1);
		}
		INTERRUPT_MONITOR_AUTOLOCK(list_move_lock) {
			LIST_DELETE(garbage, tofree);
			slab_slot_free(tofree);
#ifdef DEBUG
			slab->type->inuse--;
#endif
		}
		/* Have we found what we're looking for? */
		if (until && until == slab->type) {
			slab->type->first = slab;
			return 1;
		} else if (!until) {
			return 1;
		}
	}

	/* Sweep finished, free any pages now empty */
	slab_type_t * stype = types;
	while(stype) {
		slab_t * slab = stype->first;

		while(slab) {
			/* Release page if now empty */
			INTERRUPT_MONITOR_AUTOLOCK(list_move_lock) {
				if (slab_all_free(slab)) {
					slab_t * empty = slab;
					LIST_NEXT(stype->first, slab);
					LIST_DELETE(stype->first, empty);
					page_heap_free(empty);
				} else {
					LIST_NEXT(stype->first, slab);
				}
			}
		}

		LIST_NEXT(types, stype);
	}
	return 0;
}

int slab_gc_incremental_sweep(slab_type_t * until)
{
	slab_lock(0);
	int retval = slab_gc_incremental_sweep_locked(until);
	slab_unlock();
	return retval;
}

void slab_gc_end()
{
	assert(!grey);
	if (gc_stats.inuse > gc_stats.peak) {
		gc_stats.peak = gc_stats.inuse;
	}

	/* Finalize garbage */
	garbage = white;
	white = 0;

	/* Move everything back to the white list */
	slab_slot_t * slot = white = black;
	while(slot) {
		slot->colour = gcwhite;
		LIST_NEXT(white, slot);
	}
	black = 0;

	slab_unlock();
}

static void slab_test_finalize(void * p)
{
	kernel_printk("Finalizing: %p\n", p);
}

static void slab_test_mark(void *p)
{
	kernel_printk("Marking: %p\n", p);
}

static void slab_weakref_mark(void * p)
{
	slab_weakref_t * ref = (slab_weakref_t*)p;
	if (ref->chances) {
		ref->chances--;
		slab_gc_mark(ref->p);
	}
}

slab_weakref_t * slab_weakref(void * p)
{
	static slab_type_t weakrefs[] = {
		SLAB_TYPE(sizeof(slab_weakref_t), slab_weakref_mark, 0),
	};
	static slab_weakref_t nullref[] = {0};

	slab_t * slab = slab_get(p);
	if (slab) {
		slab_weakref_t * ref = slab_alloc(weakrefs);
		int slot = SLAB_SLOT_NUM(slab, p);
		ref->seq = SLAB_SLOT(slab, slot)->seq;
		ref->p = p;
		ref->chances=0;

		return ref;
	} else {
		slab_weakref_t * ref = slab_alloc(weakrefs);
		ref->p = p;
		return ref;
	}
}

void * slab_weakref_get(slab_weakref_t * ref)
{
	slab_t * slab = slab_get(ref->p);
	if (slab) {
		int slot = SLAB_SLOT_NUM(slab, ref->p);
		if (ref->seq == SLAB_SLOT(slab, slot)->seq) {
			return ref->p;
		} else {
			return 0;
		}
	} else {
		return ref->p;
	}
}

#define SLAB_MAX_DATA_AREA(slots) ROUNDDOWN((((ARCH_PAGE_SIZE-sizeof(slab_t)-2*sizeof(uint32_t))/slots)-sizeof(slab_slot_t)),8)
static slab_type_t pools[] = {
#if 0
	SLAB_TYPE(8, 0, 0),
	SLAB_TYPE(12, 0, 0),
	SLAB_TYPE(16, 0, 0),
	SLAB_TYPE(24, 0, 0),
	SLAB_TYPE(32, 0, 0),
	SLAB_TYPE(48, 0, 0),
	SLAB_TYPE(64, 0, 0),
	SLAB_TYPE(96, 0, 0),
	SLAB_TYPE(128, 0, 0),
	SLAB_TYPE(196, 0, 0),
	SLAB_TYPE(256, 0, 0),
	SLAB_TYPE(384, 0, 0),
	SLAB_TYPE(512, 0, 0),
	SLAB_TYPE(768, 0, 0),
	SLAB_TYPE(1024, 0, 0),
#else
	SLAB_TYPE(SLAB_MAX_DATA_AREA(128), 0, 0),
	SLAB_TYPE(SLAB_MAX_DATA_AREA(96), 0, 0),
	SLAB_TYPE(SLAB_MAX_DATA_AREA(78), 0, 0),
	SLAB_TYPE(SLAB_MAX_DATA_AREA(64), 0, 0),
	SLAB_TYPE(SLAB_MAX_DATA_AREA(48), 0, 0),
	SLAB_TYPE(SLAB_MAX_DATA_AREA(32), 0, 0),
	SLAB_TYPE(SLAB_MAX_DATA_AREA(24), 0, 0),
	SLAB_TYPE(SLAB_MAX_DATA_AREA(16), 0, 0),
	SLAB_TYPE(SLAB_MAX_DATA_AREA(12), 0, 0),
	SLAB_TYPE(SLAB_MAX_DATA_AREA(8), 0, 0),
	SLAB_TYPE(SLAB_MAX_DATA_AREA(4), 0, 0),
	SLAB_TYPE(SLAB_MAX_DATA_AREA(2), 0, 0),
	SLAB_TYPE(SLAB_MAX_DATA_AREA(1), 0, 0),
#endif
};

void * slab_malloc(size_t size)
{
	for(int i=0; i<sizeof(pools)/sizeof(pools[0]);i++) {
		if (pools[i].esize > size) {
			return slab_alloc(pools+i);
		}
	}

	KTHROWF(AllocationTooBigException, "Allocation too big for malloc: %d", size);

	/* Never reached */
	return 0;
}

void * slab_realloc(void *p, size_t size)
{
	if (0 == p) {
		return slab_malloc(size);
	}

	slab_t * slab = slab_get(p);

	if (slab) {
		if (size <= slab->type->esize) {
			/* Nothing to do, new memory fits in existing slot */
			return p;
		} else {
			void * new = slab_malloc(size);

			/* Copy old data (of old size) to new buffer */
			return memcpy(new, p, slab->type->esize);
		}
	} else {
		/* FIXME: We should do something here to warn of misuse */
		kernel_panic("realloc: Invalid heap pointer: %p\n", p);
	}

	kernel_panic("realloc: we shouldn't get here!");
	return 0;
}

void slab_test()
{
	static slab_type_t t[1] = {SLAB_TYPE(1270, slab_test_mark, slab_test_finalize)};
	void * p[4];

	p[0] = slab_alloc(t);
	p[1] = slab_alloc(t);
	p[2] = slab_alloc(t);
	p[3] = slab_alloc(t);

	/* Nothing should be finalized here */
	thread_gc();
	p[0] = p[1] = p[2] = p[3] = malloc(653);

	/* p array should be finalized here */
	thread_gc();

	p[0] = p[1] = p[2] = p[3] = realloc(p[0], 736);
	p[0] = p[1] = p[2] = p[3] = realloc(p[0], 1736);

	thread_gc();
}