#define page_isfree(i) GET_BIT(free_pages_bitmap, i)
+#define RESERVEDMAGIC 0x6e4c74d5
+#define MAXRESERVEDPAGES 100
+#define MAXRESERVEDQUEUES 15
+
+static struct reserved_pages {
+ struct reserved_pages *next; /* next in use */
+ int max_available; /* queue depth use, 0 if not in use at all */
+ int npages; /* number of consecutive pages */
+ int mappedin; /* must reserved pages also be mapped? */
+ int n_available; /* number of queue entries */
+ int allocflags; /* allocflags for alloc_mem */
+ struct reserved_pageslot {
+ phys_bytes phys;
+ void *vir;
+ } slots[MAXRESERVEDPAGES];
+ u32_t magic;
+} reservedqueues[MAXRESERVEDQUEUES], *first_reserved_inuse = NULL;
+
+int missing_spares = 0;
+
+static void sanitycheck_queues(void)
+{
+ struct reserved_pages *mrq;
+ int m = 0;
+
+ for(mrq = first_reserved_inuse; mrq > 0; mrq = mrq->next) {
+ assert(mrq->max_available > 0);
+ assert(mrq->max_available >= mrq->n_available);
+ m += mrq->max_available - mrq->n_available;
+ }
+
+ assert(m == missing_spares);
+}
+
+static void sanitycheck_rq(struct reserved_pages *rq)
+{
+ assert(rq->magic == RESERVEDMAGIC);
+ assert(rq->n_available >= 0);
+ assert(rq->n_available <= MAXRESERVEDPAGES);
+ assert(rq->n_available <= rq->max_available);
+
+ sanitycheck_queues();
+}
+
+void *reservedqueue_new(int max_available, int npages, int mapped, int allocflags)
+{
+ int r;
+ struct reserved_pages *rq;
+
+ assert(max_available > 0);
+ assert(max_available < MAXRESERVEDPAGES);
+ assert(npages > 0);
+ assert(npages < 10);
+
+ for(r = 0; r < MAXRESERVEDQUEUES; r++)
+ if(!reservedqueues[r].max_available)
+ break;
+
+ if(r >= MAXRESERVEDQUEUES) {
+ printf("VM: %d reserved queues in use\n", MAXRESERVEDQUEUES);
+ return NULL;
+ }
+
+ rq = &reservedqueues[r];
+
+ memset(rq, 0, sizeof(*rq));
+ rq->next = first_reserved_inuse;
+ first_reserved_inuse = rq;
+
+ rq->max_available = max_available;
+ rq->npages = npages;
+ rq->mappedin = mapped;
+ rq->allocflags = allocflags;
+ rq->magic = RESERVEDMAGIC;
+
+ missing_spares += max_available;
+
+ return rq;
+}
+
+static void
+reservedqueue_fillslot(struct reserved_pages *rq,
+ struct reserved_pageslot *rps, phys_bytes ph, void *vir)
+{
+ rps->phys = ph;
+ rps->vir = vir;
+ assert(missing_spares > 0);
+ if(rq->mappedin) assert(vir);
+ missing_spares--;
+ rq->n_available++;
+}
+
+static int
+reservedqueue_addslot(struct reserved_pages *rq)
+{
+ phys_bytes cl, cl_addr;
+ void *vir;
+ struct reserved_pageslot *rps;
+
+ sanitycheck_rq(rq);
+
+ if((cl = alloc_mem(rq->npages, rq->allocflags)) == NO_MEM)
+ return ENOMEM;
+
+ cl_addr = CLICK2ABS(cl);
+
+ vir = NULL;
+
+ if(rq->mappedin) {
+ if(!(vir = vm_mappages(cl_addr, rq->npages))) {
+ free_mem(cl, rq->npages);
+ printf("reservedqueue_addslot: vm_mappages failed\n");
+ return ENOMEM;
+ }
+ }
+
+ rps = &rq->slots[rq->n_available];
+
+ reservedqueue_fillslot(rq, rps, cl_addr, vir);
+
+ return OK;
+}
+
+void reservedqueue_add(void *rq_v, void *vir, phys_bytes ph)
+{
+ struct reserved_pages *rq = rq_v;
+ struct reserved_pageslot *rps;
+
+ sanitycheck_rq(rq);
+
+ rps = &rq->slots[rq->n_available];
+
+ reservedqueue_fillslot(rq, rps, ph, vir);
+}
+
+int reservedqueue_fill(void *rq_v)
+{
+ struct reserved_pages *rq = rq_v;
+ int r;
+
+ sanitycheck_rq(rq);
+
+ while(rq->n_available < rq->max_available)
+ if((r=reservedqueue_addslot(rq)) != OK)
+ return r;
+
+ return OK;
+}
+
+int
+reservedqueue_alloc(void *rq_v, phys_bytes *ph, void **vir)
+{
+ struct reserved_pages *rq = rq_v;
+ struct reserved_pageslot *rps;
+
+ sanitycheck_rq(rq);
+
+ if(rq->n_available < 1) return ENOMEM;
+
+ rq->n_available--;
+ missing_spares++;
+ rps = &rq->slots[rq->n_available];
+
+ *ph = rps->phys;
+ *vir = rps->vir;
+
+ sanitycheck_rq(rq);
+
+ return OK;
+}
+
+void alloc_cycle(void)
+{
+ struct reserved_pages *rq;
+ sanitycheck_queues();
+ for(rq = first_reserved_inuse; rq && missing_spares > 0; rq = rq->next) {
+ sanitycheck_rq(rq);
+ reservedqueue_fill(rq);
+ sanitycheck_rq(rq);
+ }
+ sanitycheck_queues();
+}
+
/*===========================================================================*
* alloc_mem *
*===========================================================================*/
phys_bytes phys;
} sparepagedirs[SPAREPAGEDIRS];
-int missing_spares = SPAREPAGES;
-static struct {
- void *page;
- phys_bytes phys;
-} sparepages[SPAREPAGES];
-
extern char _end;
#define is_staticaddr(v) ((vir_bytes) (v) < (vir_bytes) &_end)
#error CLICK_SIZE must be page size.
#endif
+static void *spare_pagequeue;
static char static_sparepages[VM_PAGE_SIZE*STATIC_SPAREPAGES]
__aligned(VM_PAGE_SIZE);
*===========================================================================*/
static void *vm_getsparepage(phys_bytes *phys)
{
- int s;
- assert(missing_spares >= 0 && missing_spares <= SPAREPAGES);
- for(s = 0; s < SPAREPAGES; s++) {
- if(sparepages[s].page) {
- void *sp;
- sp = sparepages[s].page;
- *phys = sparepages[s].phys;
- sparepages[s].page = NULL;
- missing_spares++;
- assert(missing_spares >= 0 && missing_spares <= SPAREPAGES);
- return sp;
- }
+ void *ptr;
+ if(reservedqueue_alloc(spare_pagequeue, phys, &ptr) != OK) {
+ printf("vm_getsparepage: no spare found\n");
+ return NULL;
}
- printf("no spare found, %d missing\n", missing_spares);
- return NULL;
+ assert(ptr);
+ return ptr;
}
/*===========================================================================*
return NULL;
}
-/*===========================================================================*
- * vm_checkspares *
- *===========================================================================*/
-static void *vm_checkspares(void)
+void *vm_mappages(phys_bytes p, int pages)
{
- int s, n = 0;
- static int total = 0, worst = 0;
- assert(missing_spares >= 0 && missing_spares <= SPAREPAGES);
- for(s = 0; s < SPAREPAGES && missing_spares > 0; s++) {
- if(!sparepages[s].page) {
- n++;
- if((sparepages[s].page = vm_allocpage(&sparepages[s].phys,
- VMP_SPARE))) {
- missing_spares--;
- assert(missing_spares >= 0);
- assert(missing_spares <= SPAREPAGES);
- } else {
- printf("VM: warning: couldn't get new spare page\n");
- }
- }
+ vir_bytes loc;
+ int r;
+ pt_t *pt = &vmprocess->vm_pt;
+
+ /* Where in our virtual address space can we put it? */
+ loc = findhole(pages);
+ if(loc == NO_MEM) {
+ printf("vm_mappages: findhole failed\n");
+ return NULL;
}
- if(worst < n) worst = n;
- total += n;
- return NULL;
+ /* Map this page into our address space. */
+ if((r=pt_writemap(vmprocess, pt, loc, p, VM_PAGE_SIZE*pages,
+ ARCH_VM_PTE_PRESENT | ARCH_VM_PTE_USER | ARCH_VM_PTE_RW
+#if defined(__arm__)
+ | ARM_VM_PTE_WB
+#endif
+ , 0)) != OK) {
+ printf("vm_mappages writemap failed\n");
+ return NULL;
+ }
+
+ if((r=sys_vmctl(SELF, VMCTL_FLUSHTLB, 0)) != OK) {
+ panic("VMCTL_FLUSHTLB failed: %d", r);
+ }
+
+ assert(loc);
+
+ return (void *) loc;
}
static int pt_init_done;
{
/* Allocate a page for use by VM itself. */
phys_bytes newpage;
- vir_bytes loc;
- pt_t *pt;
- int r;
static int level = 0;
void *ret;
u32_t mem_flags = 0;
- pt = &vmprocess->vm_pt;
assert(reason >= 0 && reason < VMP_CATEGORIES);
assert(pages > 0);
}
#endif
- /* VM does have a pagetable, so get a page and map it in there.
- * Where in our virtual address space can we put it?
- */
- loc = findhole(pages);
- if(loc == NO_MEM) {
- level--;
- printf("VM: vm_allocpage: findhole failed\n");
- return NULL;
- }
-
/* Allocate page of memory for use by VM. As VM
* is trusted, we don't have to pre-clear it.
*/
*phys = CLICK2ABS(newpage);
- /* Map this page into our address space. */
- if((r=pt_writemap(vmprocess, pt, loc, *phys, VM_PAGE_SIZE*pages,
- ARCH_VM_PTE_PRESENT | ARCH_VM_PTE_USER | ARCH_VM_PTE_RW
-#if defined(__arm__)
- | ARM_VM_PTE_WT
-#endif
- , 0)) != OK) {
- free_mem(newpage, pages);
- printf("vm_allocpage writemap failed\n");
+ if(!(ret = vm_mappages(*phys, pages))) {
level--;
+ printf("VM: vm_allocpage: vm_mappages failed\n");
return NULL;
}
- if((r=sys_vmctl(SELF, VMCTL_FLUSHTLB, 0)) != OK) {
- panic("VMCTL_FLUSHTLB failed: %d", r);
- }
-
level--;
-
- /* Return user-space-ready pointer to it. */
- ret = (void *) loc;
-
vm_self_pages++;
+
return ret;
}
}
#endif
- missing_spares = 0;
+ if(!(spare_pagequeue = reservedqueue_new(SPAREPAGES, 1, 1, 0)))
+ panic("reservedqueue_new for single pages failed");
+
assert(STATIC_SPAREPAGES < SPAREPAGES);
- for(s = 0; s < SPAREPAGES; s++) {
- vir_bytes v = (sparepages_mem + s*VM_PAGE_SIZE);;
+ for(s = 0; s < STATIC_SPAREPAGES; s++) {
+ void *v = (void *) (sparepages_mem + s*VM_PAGE_SIZE);
phys_bytes ph;
if((r=sys_umap(SELF, VM_D, (vir_bytes) v,
VM_PAGE_SIZE*SPAREPAGES, &ph)) != OK)
panic("pt_init: sys_umap failed: %d", r);
- if(s >= STATIC_SPAREPAGES) {
- sparepages[s].page = NULL;
- missing_spares++;
- continue;
- }
- sparepages[s].page = (void *) v;
- sparepages[s].phys = ph;
+ reservedqueue_add(spare_pagequeue, v, ph);
}
#if defined(__i386__)
pt_init_done = 1;
- vm_checkspares();
-
/* All OK. */
return;
}
return OK;
}
-/*===========================================================================*
- * pt_cycle *
- *===========================================================================*/
-void pt_cycle(void)
-{
- vm_checkspares();
-}
-
int get_vm_self_pages(void) { return vm_self_pages; }
SANITYCHECK(SCL_TOP);
if(missing_spares > 0) {
- pt_cycle(); /* pagetable code wants to be called */
+ alloc_cycle(); /* mem alloc code wants to be called */
}
if ((r=sef_receive_status(ANY, &msg, &rcv_sts)) != OK)
* though.
*/
if(missing_spares > 0) {
- pt_cycle(); /* pagetable code wants to be called */
+ alloc_cycle(); /* pagetable code wants to be called */
}
pt_clearmapcache();
#include "yielded.h"
/* alloc.c */
+void *reservedqueue_new(int, int, int, int);
+int reservedqueue_alloc(void *, phys_bytes *, void **);
+void reservedqueue_add(void *, void *, phys_bytes);
+void alloc_cycle(void);
void mem_sanitycheck(char *file, int line);
phys_clicks alloc_mem(phys_clicks clicks, u32_t flags);
void memstats(int *nodes, int *pages, int *largest);
physaddr, size_t bytes, u32_t flags, u32_t writemapflags);
int pt_checkrange(pt_t *pt, vir_bytes v, size_t bytes, int write);
int pt_bind(pt_t *pt, struct vmproc *who);
+void *vm_mappages(phys_bytes p, int pages);
void *vm_allocpage(phys_bytes *p, int cat);
void *vm_allocpages(phys_bytes *p, int cat, int pages);
void *vm_allocpagedir(phys_bytes *p);
-void pt_cycle(void);
int pt_mapkernel(pt_t *pt);
void vm_pagelock(void *vir, int lockflag);
int vm_addrok(void *vir, int write);