shithub: riscv

Download patch

ref: 7592fa3e68f6f42510a5a4f72c4d18c4a1879d1f
parent: 14d663b1695df1144ee19b6481f9c68bb8be21b2
author: cinap_lenrek <[email protected]>
date: Sat Jan 26 12:25:23 EST 2013

kernel: add portable uncached memory allocator (ucalloc) (from sources)

--- /dev/null
+++ b/sys/src/9/port/ucalloc.c
@@ -1,0 +1,137 @@
+/*
+ * allocate uncached memory
+ */
+#include "u.h"
+#include "../port/lib.h"
+#include "mem.h"
+#include "dat.h"
+#include "fns.h"
+
+#include <pool.h>
+
+typedef struct Private Private;
+struct Private {
+	Lock;
+	char	msg[256];
+	char*	cur;
+};
+
+static Private ucprivate;
+
+static void
+ucpoolpanic(Pool* p, char* fmt, ...)
+{
+	va_list v;
+	Private *pv;
+	char msg[sizeof pv->msg];
+
+	pv = p->private;
+	va_start(v, fmt);
+	vseprint(pv->cur, &pv->msg[sizeof pv->msg], fmt, v);
+	va_end(v);
+	memmove(msg, pv->msg, sizeof msg);
+	iunlock(pv);
+	panic("%s", msg);
+}
+
+static void
+ucpoolprint(Pool* p, char* fmt, ...)
+{
+	va_list v;
+	Private *pv;
+
+	pv = p->private;
+	va_start(v, fmt);
+	pv->cur = vseprint(pv->cur, &pv->msg[sizeof pv->msg], fmt, v);
+	va_end(v);
+}
+
+static void
+ucpoolunlock(Pool* p)
+{
+	Private *pv;
+	char msg[sizeof pv->msg];
+
+	pv = p->private;
+	if(pv->cur == pv->msg){
+		iunlock(pv);
+		return;
+	}
+
+	memmove(msg, pv->msg, sizeof msg);
+	pv->cur = pv->msg;
+	iunlock(pv);
+
+	iprint("%.*s", sizeof pv->msg, msg);
+}
+
+static void
+ucpoollock(Pool* p)
+{
+	Private *pv;
+
+	pv = p->private;
+	ilock(pv);
+	pv->pc = getcallerpc(&p);
+	pv->cur = pv->msg;
+}
+
+static void*
+ucarena(usize size)
+{
+	void *uv, *v;
+
+	assert(size == 1*MiB);
+
+	mainmem->maxsize += 1*MiB;
+	if((v = mallocalign(1*MiB, 1*MiB, 0, 0)) == nil ||
+	    (uv = mmuuncache(v, 1*MiB)) == nil){
+		free(v);
+		mainmem->maxsize -= 1*MiB;
+		return nil;
+	}
+	return uv;
+}
+
+static Pool ucpool = {
+	.name		= "Uncached",
+	.maxsize	= 4*MiB,
+	.minarena	= 1*MiB-32,
+	.quantum	= 32,
+	.alloc		= ucarena,
+	.merge		= nil,
+	.flags		= /*POOL_TOLERANCE|POOL_ANTAGONISM|POOL_PARANOIA|*/0,
+
+	.lock		= ucpoollock,
+	.unlock		= ucpoolunlock,
+	.print		= ucpoolprint,
+	.panic		= ucpoolpanic,
+
+	.private	= &ucprivate,
+};
+
+void
+ucfree(void* v)
+{
+	if(v == nil)
+		return;
+	poolfree(&ucpool, v);
+}
+
+void*
+ucallocalign(usize size, int align, int span)
+{
+	void *v;
+
+	assert(size < ucpool.minarena-128);
+	v = poolallocalign(&ucpool, size, align, 0, span);
+	if(v)
+		memset(v, 0, size);
+	return v;
+}
+
+void*
+ucalloc(usize size)
+{
+	return ucallocalign(size, 32, 0);
+}
--- /dev/null
+++ b/sys/src/9/port/ucallocb.c
@@ -1,0 +1,152 @@
+/*
+ * allocate Blocks from uncached memory
+ */
+#include	"u.h"
+#include	"../port/lib.h"
+#include	"mem.h"
+#include	"dat.h"
+#include	"fns.h"
+#include	"error.h"
+
+enum
+{
+	Hdrspc		= 64,		/* leave room for high-level headers */
+	Bdead		= 0x51494F42,	/* "QIOB" */
+};
+
+struct
+{
+	Lock;
+	ulong	bytes;
+} ucialloc;
+
+static Block*
+_ucallocb(int size)
+{
+	Block *b;
+	ulong addr;
+
+	if((b = ucalloc(sizeof(Block)+size+Hdrspc)) == nil)
+		return nil;
+
+	b->next = nil;
+	b->list = nil;
+	b->free = 0;
+	b->flag = 0;
+	b->ref = 0;
+	_xinc(&b->ref);
+
+	/* align start of data portion by rounding up */
+	addr = (ulong)b;
+	addr = ROUND(addr + sizeof(Block), BLOCKALIGN);
+	b->base = (uchar*)addr;
+
+	/* align end of data portion by rounding down */
+	b->lim = ((uchar*)b) + msize(b);
+	addr = (ulong)(b->lim);
+	addr = addr & ~(BLOCKALIGN-1);
+	b->lim = (uchar*)addr;
+
+	/* leave sluff at beginning for added headers */
+	b->rp = b->lim - ROUND(size, BLOCKALIGN);
+	if(b->rp < b->base)
+		panic("_ucallocb");
+	b->wp = b->rp;
+
+	return b;
+}
+
+Block*
+ucallocb(int size)
+{
+	Block *b;
+
+	/*
+	 * Check in a process and wait until successful.
+	 * Can still error out of here, though.
+	 */
+	if(up == nil)
+		panic("ucallocb without up: %#p", getcallerpc(&size));
+	if((b = _ucallocb(size)) == nil)
+		panic("ucallocb: no memory for %d bytes", size);
+	setmalloctag(b, getcallerpc(&size));
+
+	return b;
+}
+
+Block*
+uciallocb(int size)
+{
+	Block *b;
+	static int m1, m2, mp;
+
+	if(0 && ucialloc.bytes > conf.ialloc){
+		if((m1++%10000)==0){
+			if(mp++ > 1000){
+				active.exiting = 1;
+				exit(0);
+			}
+			iprint("uciallocb: limited %lud/%lud\n",
+				ucialloc.bytes, conf.ialloc);
+		}
+		return nil;
+	}
+
+	if((b = _ucallocb(size)) == nil){
+		if(0 && (m2++%10000)==0){
+			if(mp++ > 1000){
+				active.exiting = 1;
+				exit(0);
+			}
+			iprint("uciallocb: no memory %lud/%lud\n",
+				ucialloc.bytes, conf.ialloc);
+		}
+		return nil;
+	}
+	setmalloctag(b, getcallerpc(&size));
+	b->flag = BINTR;
+
+	ilock(&ucialloc);
+	ucialloc.bytes += b->lim - b->base;
+	iunlock(&ucialloc);
+
+	return b;
+}
+
+void
+ucfreeb(Block *b)
+{
+	void *dead = (void*)Bdead;
+	long ref;
+
+	if(b == nil || (ref = _xdec(&b->ref)) > 0)
+		return;
+
+	if(ref < 0){
+		dumpstack();
+		panic("ucfreeb: ref %ld; caller pc %#p", ref, getcallerpc(&b));
+	}
+
+	/*
+	 * drivers which perform non cache coherent DMA manage their own buffer
+	 * pool of uncached buffers and provide their own free routine.
+	 */
+	if(b->free) {
+		b->free(b);
+		return;
+	}
+	if(b->flag & BINTR) {
+		ilock(&ucialloc);
+		ucialloc.bytes -= b->lim - b->base;
+		iunlock(&ucialloc);
+	}
+
+	/* poison the block in case someone is still holding onto it */
+	b->next = dead;
+	b->rp = dead;
+	b->wp = dead;
+	b->lim = dead;
+	b->base = dead;
+
+	ucfree(b);
+}