#define MAP_ALIGN64K 0x0040 /* physically aligned at 64kB */
#define MAP_LOWER1M 0x0080 /* physically below 16MB */
#define MAP_ALIGNMENT_64KB MAP_ALIGN64K
-#define MAP_IPC_SHARED 0x0100 /* share changes */
#define MAP_FIXED 0x0200 /* require mapping to happen at hint */
#define MAP_THIRDPARTY 0x0400 /* perform on behalf of any process */
int id;
struct shmid_ds shmid_ds;
vir_bytes page;
- phys_bytes phys;
+ int vm_id;
};
static struct shm_struct shm_list[MAX_SHM_NR];
static int shm_list_nr = 0;
shm = &shm_list[shm_list_nr];
memset(shm, 0, sizeof(struct shm_struct));
shm->page = (vir_bytes) minix_mmap(0, size,
- PROT_READ|PROT_WRITE,
- MAP_CONTIG|MAP_PREALLOC|MAP_ANON|MAP_IPC_SHARED,
- -1, 0);
+ PROT_READ|PROT_WRITE, MAP_ANON, -1, 0);
if (shm->page == (vir_bytes) MAP_FAILED)
return ENOMEM;
- shm->phys = vm_getphys(SELF_E, (void *) shm->page);
+ shm->vm_id = vm_getphys(SELF_E, (void *) shm->page);
memset((void *)shm->page, 0, size);
shm->shmid_ds.shm_perm.cuid =
int do_shmdt(message *m)
{
vir_bytes addr;
- phys_bytes paddr;
+ phys_bytes vm_id;
int i;
addr = m->SHMDT_ADDR;
- if ((paddr = vm_getphys(who_e, (void *) addr)) == 0)
+ if ((vm_id = vm_getphys(who_e, (void *) addr)) == 0)
return EINVAL;
for (i = 0; i < shm_list_nr; i++) {
- if (shm_list[i].phys == paddr) {
+ if (shm_list[i].vm_id == vm_id) {
struct shm_struct *shm = &shm_list[i];
shm->shmid_ds.shm_atime = time(NULL);
}
}
if (i == shm_list_nr)
- fprintf(stderr, "IPC: do_shmdt impossible error!\n");
+ printf("IPC: do_shmdt impossible error! could not find id %lu to unmap\n",
+ vm_id);
update_refcount_and_destroy();
/* NULL indicates the end of a list of mappings, nothing else to do */
if (!vri) return;
- printf(" %08lx-%08lx %c%c%c %c (%lu kB)\n", vri->vri_addr,
+ printf(" %08lx-%08lx %c%c%c (%lu kB)\n", vri->vri_addr,
vri->vri_addr + vri->vri_length,
(vri->vri_prot & PROT_READ) ? 'r' : '-',
(vri->vri_prot & PROT_WRITE) ? 'w' : '-',
(vri->vri_prot & PROT_EXEC) ? 'x' : '-',
- (vri->vri_flags & MAP_IPC_SHARED) ? 's' : 'p',
vri->vri_length / 1024L);
(*n)++;
}
break;
for (i = 0; i < r; i++) {
- buf_printf("%08lx-%08lx %c%c%c %c\n",
+ buf_printf("%08lx-%08lx %c%c%c\n",
vri[i].vri_addr, vri[i].vri_addr + vri[i].vri_length,
(vri[i].vri_prot & PROT_READ) ? 'r' : '-',
(vri[i].vri_prot & PROT_WRITE) ? 'w' : '-',
- (vri[i].vri_prot & PROT_EXEC) ? 'x' : '-',
- (vri[i].vri_flags & MAP_IPC_SHARED) ? 's' : 'p');
+ (vri[i].vri_prot & PROT_EXEC) ? 'x' : '-');
count++;
}
PROG= vm
SRCS= main.c alloc.c utility.c exit.c fork.c break.c \
mmap.c slaballoc.c region.c pagefaults.c \
- physravl.c rs.c queryexit.c yieldedavl.c regionavl.c pb.c
+ physravl.c rs.c queryexit.c yieldedavl.c regionavl.c pb.c \
+ mem_anon.c mem_directphys.c mem_anon_contig.c mem_shared.c
DPADD+= ${LIBSYS}
LDADD+= -lsys -lexec
#endif
-/*===========================================================================*
- * alloc_mem_in_list *
- *===========================================================================*/
-struct memlist *alloc_mem_in_list(phys_bytes bytes, u32_t flags, phys_bytes known)
-{
- phys_bytes rempages, phys_count;
- struct memlist *head = NULL, *tail = NULL;
-
- assert(bytes > 0);
- assert(!(bytes % VM_PAGE_SIZE));
-
- rempages = bytes / VM_PAGE_SIZE;
-
- assert(!(flags & PAF_CONTIG));
-
- if(known != MAP_NONE) {
- phys_count = known;
- }
-
- do {
- struct memlist *ml;
- phys_bytes mem;
- vir_bytes freed = 0;
-
- do {
- if(known == MAP_NONE) {
- mem = alloc_pages(1, flags);
-
- if(mem == NO_MEM) {
- freed = free_yielded(rempages * VM_PAGE_SIZE);
- }
- } else {
- mem = ABS2CLICK(phys_count);
- phys_count += VM_PAGE_SIZE;
- assert(mem != NO_MEM);
- }
- } while(mem == NO_MEM && freed > 0);
-
- if(mem == NO_MEM) {
- printf("alloc_mem_in_list: giving up, %lukB missing\n",
- rempages * VM_PAGE_SIZE/1024);
- printmemstats();
- free_mem_list(head, 1);
- return NULL;
- }
-
- if(!(SLABALLOC(ml))) {
- free_mem_list(head, 1);
- free_pages(mem, VM_PAGE_SIZE);
- return NULL;
- }
-
- USE(ml, ml->phys = CLICK2ABS(mem); ml->next = NULL;);
- if(tail) {
- USE(tail,
- tail->next = ml;);
- }
- tail = ml;
- if(!head)
- head = ml;
- rempages--;
- } while(rempages > 0);
-
- return head;
-}
-
-/*===========================================================================*
- * free_mem_list *
- *===========================================================================*/
-void free_mem_list(struct memlist *list, int all)
-{
- while(list) {
- struct memlist *next;
- next = list->next;
- assert(!(list->phys % VM_PAGE_SIZE));
- if(all)
- free_pages(list->phys / VM_PAGE_SIZE, 1);
- SLABFREE(list);
- list = next;
- }
-}
-
-/*===========================================================================*
- * print_mem_list *
- *===========================================================================*/
-void print_mem_list(struct memlist *list)
-{
- while(list) {
- printf("0x%lx-0x%lx", list->phys, list->phys+VM_PAGE_SIZE-1);
- printf(" ");
- list = list->next;
- }
- printf("\n");
-}
-
EXTERN char *sc_lastfile;
#endif
+/* mem types */
+EXTERN mem_type_t mem_type_anon, /* anonymous memory */
+ mem_type_directphys, /* direct physical mapping memory */
+ mem_type_anon_contig, /* physically contig anon memory */
+ mem_type_shared; /* memory shared by multiple processes */
+
/* total number of memory pages */
EXTERN int total_pages;
{
struct vmproc *vmp = ((struct vm_exec_info *) execi->opaque)->vmp;
- if(!(map_page_region(vmp, vaddr, 0,
- len, MAP_NONE, VR_ANON | VR_WRITABLE | VR_UNINITIALIZED, flags))) {
+ if(!(map_page_region(vmp, vaddr, 0, len,
+ VR_ANON | VR_WRITABLE | VR_UNINITIALIZED, flags,
+ &mem_type_anon))) {
panic("VM: exec: map_page_region for boot process failed");
}
}
CALLMAP(VM_MMAP, do_mmap);
CALLMAP(VM_MUNMAP, do_munmap);
CALLMAP(VM_MAP_PHYS, do_map_phys);
- CALLMAP(VM_UNMAP_PHYS, do_unmap_phys);
+ CALLMAP(VM_UNMAP_PHYS, do_munmap);
/* Calls from PM. */
CALLMAP(VM_EXIT, do_exit);
CALLMAP(VM_REMAP, do_remap);
CALLMAP(VM_REMAP_RO, do_remap);
CALLMAP(VM_GETPHYS, do_get_phys);
- CALLMAP(VM_SHM_UNMAP, do_shared_unmap);
+ CALLMAP(VM_SHM_UNMAP, do_munmap);
CALLMAP(VM_GETREF, do_get_refcount);
CALLMAP(VM_INFO, do_info);
CALLMAP(VM_QUERY_EXIT, do_query_exit);
--- /dev/null
+
+/* This file implements the methods of anonymous memory.
+ *
+ * Anonymous memory is memory that is for private use to a process
+ * and can not be related to a file (hence anonymous).
+ */
+
+#include <assert.h>
+
+#include "proto.h"
+#include "vm.h"
+#include "region.h"
+#include "glo.h"
+
+/* These functions are static so as to not pollute the
+ * global namespace, and are accessed through their function
+ * pointers.
+ */
+
+static int anon_reference(struct phys_region *pr);
+static int anon_unreference(struct phys_region *pr);
+static int anon_pagefault(struct vmproc *vmp, struct vir_region *region,
+ struct phys_region *ph, int write);
+static int anon_sanitycheck(struct phys_region *pr, char *file, int line);
+static int anon_writable(struct phys_region *pr);
+static int anon_resize(struct vmproc *vmp, struct vir_region *vr, vir_bytes l);
+static u32_t anon_regionid(struct vir_region *region);
+static int anon_refcount(struct vir_region *vr);
+
+struct mem_type mem_type_anon = {
+ .name = "anonymous memory",
+ .ev_reference = anon_reference,
+ .ev_unreference = anon_unreference,
+ .ev_pagefault = anon_pagefault,
+ .ev_resize = anon_resize,
+ .ev_sanitycheck = anon_sanitycheck,
+ .regionid = anon_regionid,
+ .writable = anon_writable,
+ .refcount = anon_refcount
+};
+
+static int anon_reference(struct phys_region *pr)
+{
+ return OK;
+}
+
+static int anon_unreference(struct phys_region *pr)
+{
+ assert(pr->ph->refcount == 0);
+ if(pr->ph->phys != MAP_NONE)
+ free_mem(ABS2CLICK(pr->ph->phys), 1);
+ return OK;
+}
+
+static int anon_pagefault(struct vmproc *vmp, struct vir_region *region,
+ struct phys_region *ph, int write)
+{
+ phys_bytes new_page, new_page_cl;
+ struct phys_block *pb;
+ u32_t allocflags;
+
+ allocflags = vrallocflags(region->flags);
+
+ assert(ph->ph->refcount > 0);
+
+ if((new_page_cl = alloc_mem(1, allocflags)) == NO_MEM)
+ return ENOMEM;
+ new_page = CLICK2ABS(new_page_cl);
+
+ /* Totally new block? Create it. */
+ if(ph->ph->phys == MAP_NONE) {
+ ph->ph->phys = new_page;
+ assert(ph->ph->phys != MAP_NONE);
+
+ return OK;
+ }
+
+ if(ph->ph->refcount < 2 || !write) {
+ printf("anon_pagefault: %d refcount, %d write - not handling pagefault\n",
+ ph->ph->refcount, write);
+ return OK;
+ }
+
+ assert(region->flags & VR_WRITABLE);
+
+ if(ph->ph->share_flag != PBSH_COW) {
+ printf("VM: write RO mapped pages.\n");
+ free_mem(new_page_cl, 1);
+ return EFAULT;
+ }
+
+ if(sys_abscopy(ph->ph->phys, new_page, VM_PAGE_SIZE) != OK) {
+ panic("VM: abscopy failed\n");
+ return EFAULT;
+ }
+
+ if(!(pb = pb_new(new_page))) {
+ free_mem(new_page_cl, 1);
+ return ENOMEM;
+ }
+
+ pb_unreferenced(region, ph, 0);
+ pb_link(ph, pb, ph->offset, region);
+
+ return OK;
+}
+
+static int anon_sanitycheck(struct phys_region *pr, char *file, int line)
+{
+ MYASSERT(usedpages_add(pr->ph->phys, VM_PAGE_SIZE) == OK);
+ return OK;
+}
+
+static int anon_writable(struct phys_region *pr)
+{
+ assert(pr->ph->refcount > 0);
+ if(pr->parent->remaps > 0)
+ return 1;
+ return pr->ph->phys != MAP_NONE && pr->ph->refcount == 1;
+}
+
+static int anon_resize(struct vmproc *vmp, struct vir_region *vr, vir_bytes l)
+{
+ /* Shrinking not implemented; silently ignored.
+ * (Which is ok for brk().)
+ */
+ if(l <= vr->length)
+ return OK;
+
+ assert(vr);
+ assert(vr->flags & VR_ANON);
+ assert(!(l % VM_PAGE_SIZE));
+
+ USE(vr, vr->length = l;);
+
+ return OK;
+}
+
+static u32_t anon_regionid(struct vir_region *region)
+{
+ return region->id;
+}
+
+static int anon_refcount(struct vir_region *vr)
+{
+ return 1 + vr->remaps;
+}
+
--- /dev/null
+
+/* This file implements the methods of physically contiguous anonymous memory. */
+
+#include <assert.h>
+
+#include "proto.h"
+#include "vm.h"
+#include "region.h"
+#include "glo.h"
+
+static int anon_contig_reference(struct phys_region *pr);
+static int anon_contig_unreference(struct phys_region *pr);
+static int anon_contig_pagefault(struct vmproc *vmp, struct vir_region *region,
+ struct phys_region *ph, int write);
+static int anon_contig_sanitycheck(struct phys_region *pr, char *file, int line);
+static int anon_contig_writable(struct phys_region *pr);
+static int anon_contig_resize(struct vmproc *vmp, struct vir_region *vr, vir_bytes l);
+static int anon_contig_new(struct vir_region *vr);
+
+struct mem_type mem_type_anon_contig = {
+ .name = "anonymous memory (physically contiguous)",
+ .ev_new = anon_contig_new,
+ .ev_reference = anon_contig_reference,
+ .ev_unreference = anon_contig_unreference,
+ .ev_pagefault = anon_contig_pagefault,
+ .ev_resize = anon_contig_resize,
+ .ev_sanitycheck = anon_contig_sanitycheck,
+ .writable = anon_contig_writable
+};
+
+static int anon_contig_pagefault(struct vmproc *vmp, struct vir_region *region,
+ struct phys_region *ph, int write)
+{
+ panic("anon_contig_pagefault: pagefault cannot happen");
+}
+
+static int anon_contig_new(struct vir_region *region)
+{
+ u32_t allocflags;
+ phys_bytes new_pages, new_page_cl, cur_ph;
+ int p, pages;
+ physr_iter iter;
+
+ allocflags = vrallocflags(region->flags);
+
+ pages = region->length/VM_PAGE_SIZE;
+
+ assert(physregions(region) == 0);
+
+ for(p = 0; p < pages; p++) {
+ struct phys_block *pb = pb_new(MAP_NONE);
+ struct phys_region *pr = NULL;
+ if(pb)
+ pr = pb_reference(pb, p * VM_PAGE_SIZE, region);
+ if(!pr) {
+ if(pb) pb_free(pb);
+ map_free(region);
+ return ENOMEM;
+ }
+ }
+
+ assert(physregions(region) == pages);
+
+ if((new_page_cl = alloc_mem(pages, allocflags)) == NO_MEM) {
+ map_free(region);
+ return ENOMEM;
+ }
+
+ cur_ph = new_pages = CLICK2ABS(new_page_cl);
+
+ physr_start_iter_least(region->phys, &iter);
+
+ for(p = 0; p < pages; p++) {
+ struct phys_region *pr = physr_get_iter(&iter);
+ assert(pr);
+ assert(pr->ph);
+ assert(pr->ph->phys == MAP_NONE);
+ assert(pr->offset == p * VM_PAGE_SIZE);
+ pr->ph->phys = cur_ph + pr->offset;
+ physr_incr_iter(&iter);
+ }
+
+ assert(!physr_get_iter(&iter));
+
+ return OK;
+}
+
+static int anon_contig_resize(struct vmproc *vmp, struct vir_region *vr, vir_bytes l)
+{
+ printf("VM: cannot resize physically contiguous memory.\n");
+ return ENOMEM;
+}
+
+static int anon_contig_reference(struct phys_region *pr)
+{
+ printf("VM: cannot fork with physically contig memory.\n");
+ return ENOMEM;
+}
+
+/* Methods inherited from the anonymous memory methods. */
+
+static int anon_contig_unreference(struct phys_region *pr)
+{
+ return mem_type_anon.ev_unreference(pr);
+}
+
+static int anon_contig_sanitycheck(struct phys_region *pr, char *file, int line)
+{
+ return mem_type_anon.ev_sanitycheck(pr, file, line);
+}
+
+static int anon_contig_writable(struct phys_region *pr)
+{
+ return mem_type_anon.writable(pr);
+}
+
--- /dev/null
+
+/* This file implements the methods of direct physical mapping.
+ *
+ * A direct physical mapping is done by accepting the physical
+ * memory address and range from the caller and allowing direct
+ * access to it. Most significantly, no physical memory is allocated
+ * when it's mapped or freed when it's unmapped. E.g. device memory.
+ */
+
+#include "vm.h"
+
+/* These functions are static so as to not pollute the
+ * global namespace, and are accessed through their function
+ * pointers.
+ */
+
+static int phys_reference(struct phys_region *pr);
+static int phys_unreference(struct phys_region *pr);
+static int phys_writable(struct phys_region *pr);
+static int phys_pagefault(struct vmproc *vmp, struct vir_region *region,
+ struct phys_region *ph, int write);
+static int phys_copy(struct vir_region *vr, struct vir_region *newvr);
+
+struct mem_type mem_type_directphys = {
+ .name = "physical memory mapping",
+ .ev_reference = phys_reference,
+ .ev_copy = phys_copy,
+ .ev_unreference = phys_unreference,
+ .writable = phys_writable,
+ .ev_pagefault = phys_pagefault
+};
+
+static int phys_reference(struct phys_region *pr)
+{
+ panic("%s", __FUNCTION__);
+ return OK;
+}
+
+static int phys_unreference(struct phys_region *pr)
+{
+ return OK;
+}
+
+static int phys_pagefault(struct vmproc *vmp, struct vir_region *region,
+ struct phys_region *ph, int write)
+{
+ phys_bytes arg = region->param.phys, phmem;
+ assert(arg != MAP_NONE);
+ assert(ph->ph->phys == MAP_NONE);
+ phmem = arg + ph->offset;
+ assert(phmem != MAP_NONE);
+ ph->ph->phys = phmem;
+ return OK;
+}
+
+static int phys_writable(struct phys_region *pr)
+{
+ assert(pr->ph->refcount > 0);
+ return pr->ph->phys != MAP_NONE;
+}
+
+void phys_setphys(struct vir_region *vr, phys_bytes phys)
+{
+ vr->param.phys = phys;
+}
+
+static int phys_copy(struct vir_region *vr, struct vir_region *newvr)
+{
+ newvr->param.phys = vr->param.phys;
+
+ return OK;
+}
--- /dev/null
+
+/* This file implements the methods of shared memory. */
+
+#include <assert.h>
+
+#include "proto.h"
+#include "vm.h"
+#include "region.h"
+#include "glo.h"
+
+/* These functions are static so as to not pollute the
+ * global namespace, and are accessed through their function
+ * pointers.
+ */
+
+static int shared_reference(struct phys_region *pr);
+static int shared_unreference(struct phys_region *pr);
+static int shared_pagefault(struct vmproc *vmp, struct vir_region *region,
+ struct phys_region *ph, int write);
+static int shared_sanitycheck(struct phys_region *pr, char *file, int line);
+static int shared_writable(struct phys_region *pr);
+static void shared_delete(struct vir_region *region);
+static u32_t shared_regionid(struct vir_region *region);
+static int shared_copy(struct vir_region *vr, struct vir_region *newvr);
+static int shared_refcount(struct vir_region *vr);
+
+struct mem_type mem_type_shared = {
+ .name = "shared memory",
+ .ev_reference = shared_reference,
+ .ev_copy = shared_copy,
+ .ev_unreference = shared_unreference,
+ .ev_pagefault = shared_pagefault,
+ .ev_sanitycheck = shared_sanitycheck,
+ .ev_delete = shared_delete,
+ .regionid = shared_regionid,
+ .refcount = shared_refcount,
+ .writable = shared_writable
+};
+
+static int shared_reference(struct phys_region *pr)
+{
+ return OK;
+}
+
+static int shared_unreference(struct phys_region *pr)
+{
+ return mem_type_anon.ev_unreference(pr);
+}
+
+static int getsrc(struct vir_region *region,
+ struct vmproc **vmp, struct vir_region **r)
+{
+ int srcproc;
+
+ if(region->memtype != &mem_type_shared) {
+ printf("shared region hasn't shared type but %s.\n",
+ region->memtype->name);
+ return EINVAL;
+ }
+
+ if(!region->param.shared.ep || !region->param.shared.vaddr) {
+ printf("shared region has not defined source region.\n");
+ util_stacktrace();
+ return EINVAL;
+ }
+
+ if(vm_isokendpt((endpoint_t) region->param.shared.ep, &srcproc) != OK) {
+ printf("VM: shared memory with missing source process.\n");
+ util_stacktrace();
+ return EINVAL;
+ }
+
+ *vmp = &vmproc[srcproc];
+
+ if(!(*r=map_lookup(*vmp, region->param.shared.vaddr, NULL))) {
+ printf("VM: shared memory with missing vaddr 0x%lx.\n",
+ region->param.shared.vaddr);
+ return EINVAL;
+ }
+
+ if((*r)->memtype != &mem_type_anon) {
+ printf("source region hasn't anon type but %s.\n",
+ (*r)->memtype->name);
+ return EINVAL;
+ }
+
+ if(region->param.shared.id != (*r)->id) {
+ printf("source region has no matching id\n");
+ return EINVAL;
+ }
+
+ return OK;
+}
+
+static u32_t shared_regionid(struct vir_region *vr)
+{
+ struct vir_region *src_region;
+ struct vmproc *src_vmp;
+
+ if(getsrc(vr, &src_vmp, &src_region) != OK)
+ return 0;
+
+ return src_region->id;
+}
+
+static void shared_delete(struct vir_region *region)
+{
+ struct vir_region *src_region;
+ struct vmproc *src_vmp;
+
+ if(getsrc(region, &src_vmp, &src_region) != OK)
+ return;
+
+ assert(src_region->remaps > 0);
+ src_region->remaps--;
+}
+
+static int shared_pagefault(struct vmproc *vmp, struct vir_region *region,
+ struct phys_region *ph, int write)
+{
+ struct vir_region *src_region;
+ struct vmproc *src_vmp;
+ struct phys_region *pr;
+
+ if(getsrc(region, &src_vmp, &src_region) != OK) {
+ return EINVAL;
+ }
+
+ assert(ph->ph->phys == MAP_NONE);
+ pb_free(ph->ph);
+
+ if(!(pr = physr_search(src_region->phys, ph->offset, AVL_EQUAL))) {
+ int r;
+ if((r=map_pf(src_vmp, src_region, ph->offset, write)) != OK)
+ return r;
+ if(!(pr = physr_search(src_region->phys, ph->offset,
+ AVL_EQUAL))) {
+ panic("missing region after pagefault handling");
+ }
+ }
+
+ pb_link(ph, pr->ph, ph->offset, region);
+
+ return OK;
+}
+
+static int shared_sanitycheck(struct phys_region *pr, char *file, int line)
+{
+ return OK;
+}
+
+static int shared_writable(struct phys_region *pr)
+{
+ assert(pr->ph->refcount > 0);
+ return pr->ph->phys != MAP_NONE;
+}
+
+void shared_setsource(struct vir_region *vr, endpoint_t ep,
+ struct vir_region *src_vr)
+{
+ struct vmproc *vmp;
+ struct vir_region *srcvr;
+ int id = src_vr->id;
+ vir_bytes vaddr = src_vr->vaddr;
+
+ assert(vr->memtype == &mem_type_shared);
+
+ if(!ep || !vaddr || !id) {
+ printf("VM: shared_setsource: zero ep/vaddr/id - ignoring\n");
+ return;
+ }
+
+ vr->param.shared.ep = ep;
+ vr->param.shared.vaddr = vaddr;
+ vr->param.shared.id = id;
+
+ if(getsrc(vr, &vmp, &srcvr) != OK)
+ panic("initial getsrc failed");
+
+ assert(srcvr == src_vr);
+
+ srcvr->remaps++;
+}
+
+static int shared_copy(struct vir_region *vr, struct vir_region *newvr)
+{
+ struct vmproc *vmp;
+ struct vir_region *srcvr;
+
+ if(getsrc(vr, &vmp, &srcvr) != OK)
+ panic("copy: original getsrc failed");
+
+ shared_setsource(newvr, vr->param.shared.ep, srcvr);
+
+ return OK;
+}
+
+static int shared_refcount(struct vir_region *vr)
+{
+ return 1 + vr->remaps;
+}
+
--- /dev/null
+
+#ifndef _MEMTYPE_H
+#define _MEMTYPE_H 1
+
+struct vmproc;
+struct vir_region;
+struct phys_region;
+
+typedef struct mem_type {
+ char *name; /* human-readable name */
+ int (*ev_new)(struct vir_region *region);
+ void (*ev_delete)(struct vir_region *region);
+ int (*ev_reference)(struct phys_region *pr);
+ int (*ev_unreference)(struct phys_region *pr);
+ int (*ev_pagefault)(struct vmproc *vmp, struct vir_region *region,
+ struct phys_region *ph, int write);
+ int (*ev_resize)(struct vmproc *vmp, struct vir_region *vr, vir_bytes len);
+ int (*writable)(struct phys_region *pr);
+ int (*ev_sanitycheck)(struct phys_region *pr, char *file, int line);
+ int (*ev_copy)(struct vir_region *vr, struct vir_region *newvr);
+ u32_t (*regionid)(struct vir_region *vr);
+ int (*refcount)(struct vir_region *vr);
+} mem_type_t;
+
+#endif
+
vmp = &vmproc[n];
if(m->VMM_FD == -1 || (m->VMM_FLAGS & MAP_ANON)) {
+ mem_type_t *mt;
u32_t vrflags = VR_ANON | VR_WRITABLE;
size_t len = (vir_bytes) m->VMM_LEN;
if(!execpriv) return EPERM;
vrflags |= VR_UNINITIALIZED;
}
- if(m->VMM_FLAGS & MAP_IPC_SHARED) {
- vrflags |= VR_SHARED;
- /* Shared memory has to be preallocated. */
- if((m->VMM_FLAGS & (MAP_PREALLOC|MAP_ANON)) !=
- (MAP_PREALLOC|MAP_ANON)) {
- return EINVAL;
- }
- }
- if(m->VMM_FLAGS & MAP_CONTIG) vrflags |= VR_CONTIG;
+ if(m->VMM_FLAGS & MAP_CONTIG) {
+ vrflags |= VR_CONTIG;
+ mt = &mem_type_anon_contig;
+ } else mt = &mem_type_anon;
if(len % VM_PAGE_SIZE)
len += VM_PAGE_SIZE - (len % VM_PAGE_SIZE);
if (m->VMM_ADDR || (m->VMM_FLAGS & MAP_FIXED)) {
/* An address is given, first try at that address. */
addr = m->VMM_ADDR;
- vr = map_page_region(vmp, addr, 0, len, MAP_NONE,
- vrflags, mfflags);
+ vr = map_page_region(vmp, addr, 0, len,
+ vrflags, mfflags, mt);
if(!vr && (m->VMM_FLAGS & MAP_FIXED))
return ENOMEM;
}
if (!vr) {
/* No address given or address already in use. */
vr = map_page_region(vmp, 0, VM_DATATOP, len,
- MAP_NONE, vrflags, mfflags);
+ vrflags, mfflags, mt);
}
if (!vr) {
return ENOMEM;
if(len % VM_PAGE_SIZE)
len += VM_PAGE_SIZE - (len % VM_PAGE_SIZE);
- if(!(vr = map_page_region(vmp, 0, VM_DATATOP, len, startaddr,
- VR_DIRECT | VR_NOPF | VR_WRITABLE, 0))) {
+ if(!(vr = map_page_region(vmp, 0, VM_DATATOP, len,
+ VR_DIRECT | VR_WRITABLE, 0, &mem_type_directphys))) {
return ENOMEM;
}
- m->VMMP_VADDR_REPLY = (void *) (vr->vaddr + offset);
-
- return OK;
-}
+ phys_setphys(vr, startaddr);
-/*===========================================================================*
- * do_unmap_phys *
- *===========================================================================*/
-int do_unmap_phys(message *m)
-{
- int r, n;
- struct vmproc *vmp;
- endpoint_t target;
- struct vir_region *region;
-
- target = m->VMUP_EP;
- if(target == SELF)
- target = m->m_source;
-
- if((r=vm_isokendpt(target, &n)) != OK)
- return EINVAL;
-
- vmp = &vmproc[n];
-
- if(!(region = map_lookup(vmp, (vir_bytes) m->VMUM_ADDR, NULL))) {
- return EINVAL;
- }
-
- if(!(region->flags & VR_DIRECT)) {
- return EINVAL;
- }
-
- if(map_unmap_region(vmp, region, 0, region->length) != OK) {
- return EINVAL;
- }
+ m->VMMP_VADDR_REPLY = (void *) (vr->vaddr + offset);
return OK;
}
int do_remap(message *m)
{
int dn, sn;
- vir_bytes da, sa, startv;
+ vir_bytes da, sa;
size_t size;
- struct vir_region *region;
+ u32_t flags;
+ struct vir_region *src_region, *vr;
struct vmproc *dvmp, *svmp;
int r;
int readonly;
dvmp = &vmproc[dn];
svmp = &vmproc[sn];
- /* da is not translated by arch_vir2map(),
- * it's handled a little differently,
- * since in map_remap(), we have to know
- * about whether the user needs to bind to
- * THAT address or be chosen by the system.
- */
- if (!(region = map_lookup(svmp, sa, NULL)))
+ if (!(src_region = map_lookup(svmp, sa, NULL)))
return EINVAL;
- if(region->vaddr != sa) {
+ if(src_region->vaddr != sa) {
printf("VM: do_remap: not start of region.\n");
return EFAULT;
}
- if(!(region->flags & VR_SHARED)) {
- printf("VM: do_remap: not shared.\n");
- return EFAULT;
- }
-
if (size % VM_PAGE_SIZE)
size += VM_PAGE_SIZE - size % VM_PAGE_SIZE;
- if(size != region->length) {
+ if(size != src_region->length) {
printf("VM: do_remap: not size of region.\n");
return EFAULT;
}
- if ((r = map_remap(dvmp, da, size, region, &startv, readonly)) != OK)
- return r;
+ flags = VR_SHARED;
+ if(!readonly)
+ flags |= VR_WRITABLE;
- m->VMRE_RETA = (char *) startv;
- return OK;
-}
-
-/*===========================================================================*
- * do_shared_unmap *
- *===========================================================================*/
-int do_shared_unmap(message *m)
-{
- int r, n;
- struct vmproc *vmp;
- endpoint_t target;
- struct vir_region *vr;
- vir_bytes addr;
-
- target = m->VMUN_ENDPT;
- if (target == SELF)
- target = m->m_source;
-
- if ((r = vm_isokendpt(target, &n)) != OK)
- return EINVAL;
+ if(da)
+ vr = map_page_region(dvmp, da, 0, size, flags, 0,
+ &mem_type_shared);
+ else
+ vr = map_page_region(dvmp, 0, VM_DATATOP, size, flags, 0,
+ &mem_type_shared);
- vmp = &vmproc[n];
-
- addr = m->VMUN_ADDR;
-
- if(!(vr = map_lookup(vmp, addr, NULL))) {
- printf("VM: addr 0x%lx not found.\n", m->VMUN_ADDR);
- return EFAULT;
- }
-
- if(vr->vaddr != addr) {
- printf("VM: wrong address for shared_unmap.\n");
- return EFAULT;
- }
-
- if(!(vr->flags & VR_SHARED)) {
- printf("VM: address does not point to shared region.\n");
- return EFAULT;
+ if(!vr) {
+ printf("VM: re-map of shared area failed\n");
+ return ENOMEM;
}
- if(map_unmap_region(vmp, vr, 0, vr->length) != OK)
- panic("do_shared_unmap: map_unmap_region failed");
+ shared_setsource(vr, svmp->vm_endpoint, src_region);
+ m->VMRE_RETA = (char *) vr->vaddr;
return OK;
}
struct vmproc *vmp;
vir_bytes addr, len, offset;
struct vir_region *vr;
+ endpoint_t target = SELF;
+
+ if(m->m_type == VM_UNMAP_PHYS) {
+ target = m->VMUP_EP;
+ } else if(m->m_type == VM_SHM_UNMAP) {
+ target = m->VMUN_ENDPT;
+ }
+
+ if(target == SELF)
+ target = m->m_source;
- if((r=vm_isokendpt(m->m_source, &n)) != OK) {
+ if((r=vm_isokendpt(target, &n)) != OK) {
panic("do_mmap: message from strange source: %d", m->m_source);
}
vmp = &vmproc[n];
- assert(m->m_type == VM_MUNMAP);
- addr = (vir_bytes) (vir_bytes) m->VMUM_ADDR;
+ if(m->m_type == VM_SHM_UNMAP) {
+ addr = (vir_bytes) m->VMUN_ADDR;
+ } else addr = (vir_bytes) m->VMUM_ADDR;
if(!(vr = map_lookup(vmp, addr, NULL))) {
printf("VM: unmap: virtual address %p not found in %d\n",
if(addr % VM_PAGE_SIZE)
return EFAULT;
- len = roundup(m->VMUM_LEN, VM_PAGE_SIZE);
+ if(m->m_type == VM_SHM_UNMAP) {
+ len = vr->length;
+ } else len = roundup(m->VMUM_LEN, VM_PAGE_SIZE);
offset = addr - vr->vaddr;
return;
}
- /* Make sure this isn't a region that isn't supposed
- * to cause pagefaults.
- */
- assert(!(region->flags & VR_NOPF));
-
/* If process was writing, see if it's writable. */
if(!(region->flags & VR_WRITABLE) && wr) {
printf("VM: pagefault: SIGSEGV %d ro map 0x%x %s\n",
} else {
vir_bytes offset, sublen;
assert(region->vaddr <= mem);
- assert(!(region->flags & VR_NOPF));
assert(!(region->vaddr % VM_PAGE_SIZE));
offset = mem - region->vaddr;
sublen = len;
return NULL;
}
- assert(!(phys % VM_PAGE_SIZE));
- assert(phys != MAP_NONE);
+ if(phys != MAP_NONE)
+ assert(!(phys % VM_PAGE_SIZE));
USE(newpb,
newpb->phys = phys;
return newpb;
}
-struct phys_region *pb_reference(struct phys_block *newpb, vir_bytes offset, struct vir_region *region)
+void pb_free(struct phys_block *pb)
+{
+ if(pb->phys != MAP_NONE)
+ free_mem(ABS2CLICK(pb->phys), 1);
+ SLABFREE(pb);
+}
+
+void pb_link(struct phys_region *newphysr, struct phys_block *newpb,
+ vir_bytes offset, struct vir_region *parent)
+{
+USE(newphysr,
+ newphysr->offset = offset;
+ newphysr->ph = newpb;
+ newphysr->parent = parent;
+ newphysr->next_ph_list = newpb->firstregion;
+ newphysr->memtype = parent->memtype;
+ newpb->firstregion = newphysr;);
+ newpb->refcount++;
+}
+
+struct phys_region *pb_reference(struct phys_block *newpb,
+ vir_bytes offset, struct vir_region *region)
{
struct phys_region *newphysr;
}
/* New physical region. */
-USE(newphysr,
- newphysr->offset = offset;
- newphysr->ph = newpb;
- newphysr->parent = region;
- newphysr->next_ph_list = newpb->firstregion;
- newpb->firstregion = newphysr;);
+ pb_link(newphysr, newpb, offset, region);
- newpb->refcount++;
physr_insert(region->phys, newphysr);
return newphysr;
if(pb->refcount == 0) {
assert(!pb->firstregion);
- if(region->flags & VR_ANON) {
- free_mem(ABS2CLICK(pb->phys), 1);
- } else if(region->flags & VR_DIRECT) {
- ; /* No action required. */
- } else {
- panic("strange phys flags");
- }
+ int r;
+ if((r = region->memtype->ev_unreference(pr)) != OK)
+ panic("unref failed, %d", r);
+
SLABFREE(pb);
}
+ pr->ph = NULL;
+
if(rm) physr_remove(region->phys, pr->offset);
}
#include <stddef.h>
+#include "memtype.h"
+
typedef struct phys_region {
struct phys_block *ph;
struct vir_region *parent; /* vir_region or NULL if yielded */
int written; /* written to pagetable */
#endif
+ mem_type_t *memtype;
+
/* list of phys_regions that reference the same phys_block */
struct phys_region *next_ph_list;
#include "sanitycheck.h"
#include "phys_region.h"
#include "physravl_defs.h"
-#include "cavl_if.h"
#include "cavl_impl.h"
+
#include <minix/u64.h>
#define AVL_UNIQUE(id) physr_ ## id
#define AVL_COMPARE_KEY_NODE(k, h) AVL_COMPARE_KEY_KEY((k), (h)->offset)
#define AVL_COMPARE_NODE_NODE(h1, h2) AVL_COMPARE_KEY_KEY((h1)->offset, (h2)->offset)
#define AVL_INSIDE_STRUCT char pad[4];
+
/* alloc.c */
void mem_sanitycheck(char *file, int line);
phys_clicks alloc_mem(phys_clicks clicks, u32_t flags);
-struct memlist *alloc_mem_in_list(phys_bytes bytes, u32_t flags, phys_bytes known);
void memstats(int *nodes, int *pages, int *largest);
void printmemstats(void);
void usedpages_reset(void);
int usedpages_add_f(phys_bytes phys, phys_bytes len, char *file, int
line);
void free_mem(phys_clicks base, phys_clicks clicks);
-void free_mem_list(struct memlist *list, int all);
-void print_mem_list(struct memlist *ml);
#define usedpages_add(a, l) usedpages_add_f(a, l, __FILE__, __LINE__)
void mem_init(struct memory *chunks);
int do_unmap_phys(message *msg);
int do_remap(message *m);
int do_get_phys(message *m);
-int do_shared_unmap(message *m);
int do_get_refcount(message *m);
/* pagefaults.c */
/* region.c */
void map_region_init(void);
struct vir_region * map_page_region(struct vmproc *vmp, vir_bytes min,
- vir_bytes max, vir_bytes length, vir_bytes what, u32_t flags, int
- mapflags);
+ vir_bytes max, vir_bytes length, u32_t flags, int mapflags,
+ mem_type_t *memtype);
struct vir_region * map_proc_kernel(struct vmproc *dst);
int map_region_extend(struct vmproc *vmp, struct vir_region *vr,
vir_bytes delta);
int map_region_extend_upto_v(struct vmproc *vmp, vir_bytes vir);
-int map_region_shrink(struct vir_region *vr, vir_bytes delta);
int map_unmap_region(struct vmproc *vmp, struct vir_region *vr,
vir_bytes offset, vir_bytes len);
int map_free_proc(struct vmproc *vmp);
void printregionstats(struct vmproc *vmp);
void map_setparent(struct vmproc *vmp);
int yielded_block_cmp(struct block_id *, struct block_id *);
+struct phys_region *map_clone_ph_block(struct vmproc *vmp,
+ struct vir_region *region, struct phys_region *ph, physr_iter *iter);
+u32_t vrallocflags(u32_t flags);
+int map_free(struct vir_region *region);
struct vir_region * map_region_lookup_tag(struct vmproc *vmp, u32_t
tag);
void map_region_set_tag(struct vir_region *vr, u32_t tag);
u32_t map_region_get_tag(struct vir_region *vr);
-int map_remap(struct vmproc *dvmp, vir_bytes da, size_t size, struct
- vir_region *region, vir_bytes *r, int ro);
int map_get_phys(struct vmproc *vmp, vir_bytes addr, phys_bytes *r);
int map_get_ref(struct vmproc *vmp, vir_bytes addr, u8_t *cnt);
+int physregions(struct vir_region *vr);
void get_stats_info(struct vm_stats_info *vsi);
void get_usage_info(struct vmproc *vmp, struct vm_usage_info *vui);
/* pb.c */
struct phys_block *pb_new(phys_bytes phys);
+void pb_free(struct phys_block *);
struct phys_region *pb_reference(struct phys_block *newpb,
vir_bytes offset, struct vir_region *region);
void pb_unreferenced(struct vir_region *region, struct phys_region *pr, int rm);
+void pb_link(struct phys_region *newphysr, struct phys_block *newpb,
+ vir_bytes offset, struct vir_region *parent);
+
+/* mem_directphys.c */
+void phys_setphys(struct vir_region *vr, phys_bytes startaddr);
+
+/* mem_shared.c */
+void shared_setsource(struct vir_region *vr, endpoint_t ep, struct vir_region *src);
-#define _SYSTEM 1
-
#include <minix/com.h>
#include <minix/callnr.h>
#include <minix/type.h>
#include <limits.h>
#include <string.h>
-#include <errno.h>
#include <assert.h>
#include <stdint.h>
#include <memory.h>
#include "sanitycheck.h"
#include "physravl.h"
#include "memlist.h"
+#include "memtype.h"
/* LRU list. */
static yielded_t *lru_youngest = NULL, *lru_oldest = NULL;
-/* Should a physblock be mapped writable? */
-#define WRITABLE(r, pb) \
- (((r)->flags & VR_WRITABLE) && \
- (((r)->flags & (VR_DIRECT | VR_SHARED)) || \
- (pb)->refcount == 1))
-
-static int map_new_physblock(struct vmproc *vmp, struct vir_region
- *region, vir_bytes offset, vir_bytes length, phys_bytes what, u32_t
- allocflags, int written);
-
static int map_ph_writept(struct vmproc *vmp, struct vir_region *vr,
struct phys_region *pr);
static struct vir_region *map_copy_region(struct vmproc *vmp, struct
vir_region *vr);
-static struct phys_region *map_clone_ph_block(struct vmproc *vmp,
- struct vir_region *region, struct phys_region *ph, physr_iter *iter);
-
#if SANITYCHECKS
static void lrucheck(void);
#endif
return &vm_yielded_blocks[h];
}
-static char *map_name(struct vir_region *vr)
-{
- static char name[100];
- char *typename, *tag;
- int type = vr->flags & (VR_ANON|VR_DIRECT);
- switch(type) {
- case VR_ANON:
- typename = "anonymous";
- break;
- case VR_DIRECT:
- typename = "direct";
- break;
- default:
- panic("unknown mapping type: %d", type);
- }
-
- switch(vr->tag) {
- case VRT_TEXT:
- tag = "text";
- break;
- case VRT_STACK:
- tag = "stack";
- break;
- case VRT_HEAP:
- tag = "heap";
- break;
- case VRT_NONE:
- tag = "untagged";
- break;
- default:
- tag = "unknown tag value";
- break;
- }
-
- sprintf(name, "%s, %s", typename, tag);
-
- return name;
-}
-
void map_printregion(struct vmproc *vmp, struct vir_region *vr)
{
physr_iter iter;
struct phys_region *ph;
- printf("map_printmap: map_name: %s\n", map_name(vr));
+ printf("map_printmap: map_name: %s\n", vr->memtype->name);
printf("\t%lx (len 0x%lx, %lukB), %p\n",
- vr->vaddr, vr->length, vr->length/1024, map_name(vr));
+ vr->vaddr, vr->length, vr->length/1024, vr->memtype->name);
printf("\t\tphysblocks:\n");
physr_start_iter_least(vr->phys, &iter);
while((ph = physr_get_iter(&iter))) {
return nextvr;
}
+int pr_writable(struct vir_region *vr, struct phys_region *pr)
+{
+ assert(vr->memtype->writable);
+ return ((vr->flags & VR_WRITABLE) && vr->memtype->writable(pr));
+}
+
#if SANITYCHECKS
/*===========================================================================*
int rw;
int r;
- if(WRITABLE(vr, pb))
+ if(pr_writable(vr, pr))
rw = PTF_WRITE;
else
rw = PTF_READ;
ALLREGIONS(;,USE(pr->ph, pr->ph->seencount = 0;););
ALLREGIONS(;,USE(pr->ph, pr->ph->seencount++;);
if(pr->ph->seencount == 1) {
- if(!(pr->parent->flags & VR_DIRECT)) {
- MYASSERT(usedpages_add(pr->ph->phys,
- VM_PAGE_SIZE) == OK);
- }
+ if(pr->parent->memtype->ev_sanitycheck)
+ pr->parent->memtype->ev_sanitycheck(pr, file, line);
}
);
int rw;
struct phys_block *pb = pr->ph;
+ assert(vr);
+ assert(pr);
+ assert(pb);
+
assert(!(vr->vaddr % VM_PAGE_SIZE));
assert(!(pr->offset % VM_PAGE_SIZE));
assert(pb->refcount > 0);
- if(WRITABLE(vr, pb))
+ if(pr_writable(vr, pr))
rw = PTF_WRITE;
else
rw = PTF_READ;
return region_find_slot_range(vmp, minv, maxv, length);
}
-struct vir_region *region_new(struct vmproc *vmp, vir_bytes startv, vir_bytes length, int flags)
+struct vir_region *region_new(struct vmproc *vmp, vir_bytes startv, vir_bytes length,
+ int flags, mem_type_t *memtype)
{
physr_avl *phavl;
struct vir_region *newregion;
+ static u32_t id;
if(!(SLABALLOC(newregion))) {
printf("vm: region_new: could not allocate\n");
/* Fill in node details. */
USE(newregion,
+ memset(newregion, 0, sizeof(*newregion));
newregion->vaddr = startv;
newregion->length = length;
newregion->flags = flags;
- newregion->tag = VRT_NONE;
+ newregion->memtype = memtype;
+ newregion->remaps = 0;
+ newregion->id = id++;
newregion->lower = newregion->higher = NULL;
newregion->parent = vmp;);
* map_page_region *
*===========================================================================*/
struct vir_region *map_page_region(vmp, minv, maxv, length,
- what, flags, mapflags)
+ flags, mapflags, memtype)
struct vmproc *vmp;
vir_bytes minv;
vir_bytes maxv;
vir_bytes length;
-vir_bytes what;
u32_t flags;
int mapflags;
+mem_type_t *memtype;
{
struct vir_region *newregion;
vir_bytes startv;
SANITYCHECK(SCL_FUNCTIONS);
- if((flags & VR_CONTIG) && !(mapflags & MF_PREALLOC)) {
- printf("map_page_region: can't make contiguous allocation without preallocating\n");
- return NULL;
- }
-
startv = region_find_slot(vmp, minv, maxv, length);
if (startv == SLOT_FAIL)
return NULL;
/* Now we want a new region. */
- if(!(newregion = region_new(vmp, startv, length, flags))) {
+ if(!(newregion = region_new(vmp, startv, length, flags, memtype))) {
printf("VM: map_page_region: allocating region failed\n");
return NULL;
}
- /* If we know what we're going to map to, map it right away. */
- if(what != MAP_NONE) {
- assert(!(what % VM_PAGE_SIZE));
- assert(!(startv % VM_PAGE_SIZE));
- assert(!(mapflags & MF_PREALLOC));
- if(map_new_physblock(vmp, newregion, 0, length,
- what, PAF_CLEAR, 0) != OK) {
- printf("VM: map_new_physblock failed\n");
- USE(newregion,
- SLABFREE(newregion->phys););
- SLABFREE(newregion);
- return NULL;
- }
- }
+ /* If a new event is specified, invoke it. */
+ if(newregion->memtype->ev_new)
+ newregion->memtype->ev_new(newregion);
- if((flags & VR_ANON) && (mapflags & MF_PREALLOC)) {
+ if(mapflags & MF_PREALLOC) {
if(map_handle_memory(vmp, newregion, 0, length, 1) != OK) {
printf("VM: map_page_region: prealloc failed\n");
USE(newregion,
return newregion;
}
-static struct phys_region *reset_physr_iter(struct vir_region *region,
- physr_iter *iter, vir_bytes offset)
-{
- struct phys_region *ph;
-
- physr_start_iter(region->phys, iter, offset, AVL_EQUAL);
- ph = physr_get_iter(iter);
- assert(ph);
- assert(ph->offset == offset);
-
- return ph;
-}
-
/*===========================================================================*
* map_subfree *
*===========================================================================*/
/*===========================================================================*
* map_free *
*===========================================================================*/
-static int map_free(struct vir_region *region)
+int map_free(struct vir_region *region)
{
int r;
return r;
}
+ if(region->memtype->ev_delete)
+ region->memtype->ev_delete(region);
+
USE(region,
SLABFREE(region->phys););
SLABFREE(region);
return NULL;
}
-static u32_t vrallocflags(u32_t flags)
+u32_t vrallocflags(u32_t flags)
{
u32_t allocflags = 0;
allocflags |= PAF_LOWER1MB;
if(flags & VR_CONTIG)
allocflags |= PAF_CONTIG;
+ if(!(flags & VR_UNINITIALIZED))
+ allocflags |= PAF_CLEAR;
return allocflags;
}
-/*===========================================================================*
- * map_new_physblock *
- *===========================================================================*/
-static int map_new_physblock(vmp, region, start_offset, length,
- what_mem, allocflags, written)
-struct vmproc *vmp;
-struct vir_region *region;
-vir_bytes start_offset;
-vir_bytes length;
-phys_bytes what_mem;
-u32_t allocflags;
-int written;
-{
- struct memlist *memlist, *ml;
- int r;
- vir_bytes mapped = 0;
- vir_bytes offset = start_offset;
-
- SANITYCHECK(SCL_FUNCTIONS);
-
- assert(!(length % VM_PAGE_SIZE));
-
- if((region->flags & VR_CONTIG) &&
- (start_offset > 0 || length < region->length)) {
- printf("VM: region length 0x%lx, offset 0x%lx length 0x%lx\n",
- region->length, start_offset, length);
- map_printmap(vmp);
- printf("VM: map_new_physblock: non-full contig allocation requested\n");
- return EFAULT;
- }
-
- /* Memory for new physical block. */
- allocflags |= vrallocflags(region->flags);
-
- if(allocflags & PAF_CONTIG) {
- assert(what_mem == MAP_NONE);
- if((what_mem = alloc_mem(length/VM_PAGE_SIZE, allocflags)) == NO_MEM) {
- return ENOMEM;
- }
- what_mem = CLICK2ABS(what_mem);
- allocflags &= ~PAF_CONTIG;
- assert(what_mem != MAP_NONE);
- }
-
- if(!(memlist = alloc_mem_in_list(length, allocflags, what_mem))) {
- printf("map_new_physblock: couldn't allocate\n");
- return ENOMEM;
- }
-
- r = OK;
-
- for(ml = memlist; ml; ml = ml->next) {
- struct phys_region *newphysr = NULL;
- struct phys_block *newpb = NULL;
-
- /* Allocate things necessary for this chunk of memory. */
- if(!(newpb = pb_new(ml->phys)) ||
- !(newphysr = pb_reference(newpb, offset, region))) {
- printf("map_new_physblock: no memory for the ph slabs\n");
- assert(!newphysr);
- if(newpb) SLABFREE(newpb);
- r = ENOMEM;
- break;
- }
-
- /* Update pagetable. */
- if(map_ph_writept(vmp, region, newphysr) != OK) {
- printf("map_new_physblock: map_ph_writept failed\n");
- r = ENOMEM;
- break;
- }
-
- offset += VM_PAGE_SIZE;
- mapped += VM_PAGE_SIZE;
- }
-
- if(r != OK) {
- offset = start_offset;
- /* Things did not go well. Undo everything. */
- for(ml = memlist; ml; ml = ml->next) {
- struct phys_region *physr;
- if((physr = physr_search(region->phys, offset,
- AVL_EQUAL))) {
- assert(physr->ph->refcount == 1);
- pb_unreferenced(region, physr, 1);
- SLABFREE(physr);
- }
- offset += VM_PAGE_SIZE;
- }
- } else assert(mapped == length);
-
- /* Always clean up the memlist itself, even if everything
- * worked we're not using the memlist nodes any more. And
- * the memory they reference is either freed above or in use.
- */
- free_mem_list(memlist, 0);
-
- SANITYCHECK(SCL_FUNCTIONS);
-
- return r;
-}
-
/*===========================================================================*
* map_clone_ph_block *
*===========================================================================*/
-static struct phys_region *map_clone_ph_block(vmp, region, ph, iter)
+struct phys_region *map_clone_ph_block(vmp, region, ph, iter)
struct vmproc *vmp;
struct vir_region *region;
struct phys_region *ph;
phys_bytes physaddr;
struct phys_region *newpr;
int region_has_single_block;
- int written = 0;
-#if SANITYCHECKS
- written = ph->written;
-#endif
SANITYCHECK(SCL_FUNCTIONS);
/* Warning: this function will free the passed
SLABSANE(ph->ph);
assert(ph->ph->refcount > 1);
pb_unreferenced(region, ph, 1);
- assert(ph->ph->refcount >= 1);
SLABFREE(ph);
SANITYCHECK(SCL_DETAIL);
/* Put new free memory in. */
- allocflags = vrallocflags(region->flags);
+ allocflags = vrallocflags(region->flags | VR_UNINITIALIZED);
region_has_single_block = (offset == 0 && region->length == VM_PAGE_SIZE);
assert(region_has_single_block || !(allocflags & PAF_CONTIG));
assert(!(allocflags & PAF_CLEAR));
- if(map_new_physblock(vmp, region, offset, VM_PAGE_SIZE,
- MAP_NONE, allocflags, written) != OK) {
+ if(map_pf(vmp, region, offset, 1) != OK) {
/* XXX original range now gone. */
- printf("VM: map_clone_ph_block: map_new_physblock failed.\n");
+ printf("VM: map_clone_ph_block: map_pf failed.\n");
return NULL;
}
vir_bytes offset;
int write;
{
- vir_bytes virpage;
struct phys_region *ph;
int r = OK;
+ offset -= offset % VM_PAGE_SIZE;
+
assert(offset >= 0);
assert(offset < region->length);
- assert(region->flags & VR_ANON);
assert(!(region->vaddr % VM_PAGE_SIZE));
-
- virpage = offset - offset % VM_PAGE_SIZE;
+ assert(!(write && !(region->flags & VR_WRITABLE)));
SANITYCHECK(SCL_FUNCTIONS);
- if((ph = physr_search(region->phys, offset, AVL_LESS_EQUAL)) &&
- (ph->offset <= offset && offset < ph->offset + VM_PAGE_SIZE)) {
- /* Pagefault in existing block. Do copy-on-write. */
- assert(write);
- assert(region->flags & VR_WRITABLE);
- assert(ph->ph->refcount > 0);
-
- if(WRITABLE(region, ph->ph)) {
- r = map_ph_writept(vmp, region, ph);
- if(r != OK)
- printf("map_ph_writept failed\n");
- } else {
- if(ph->ph->refcount > 0
- && ph->ph->share_flag != PBSH_COW) {
- printf("VM: write RO mapped pages.\n");
- return EFAULT;
- } else {
- if(!map_clone_ph_block(vmp, region, ph, NULL))
- r = ENOMEM;
- }
+ if(!(ph = physr_search(region->phys, offset, AVL_EQUAL))) {
+ struct phys_block *pb;
+
+ /* New block. */
+
+ if(!(pb = pb_new(MAP_NONE))) {
+ printf("map_pf: pb_new failed\n");
+ return ENOMEM;
}
- } else {
- /* Pagefault in non-existing block. Map in new block. */
- if(map_new_physblock(vmp, region, virpage,
- VM_PAGE_SIZE, MAP_NONE, PAF_CLEAR, 0) != OK) {
- printf("map_new_physblock failed\n");
- r = ENOMEM;
+
+ if(!(ph = pb_reference(pb, offset, region))) {
+ printf("map_pf: pb_reference failed\n");
+ pb_free(pb);
+ return ENOMEM;
+ }
+ }
+
+ assert(ph);
+ assert(ph->ph);
+
+ /* If we're writing and the block is already
+ * writable, nothing to do.
+ */
+
+ assert(region->memtype->writable);
+
+ if(!write || !region->memtype->writable(ph)) {
+ assert(region->memtype->ev_pagefault);
+ assert(ph->ph);
+ if((r = region->memtype->ev_pagefault(vmp,
+ region, ph, write)) == SUSPEND) {
+ panic("map_pf: memtype->ev_pagefault returned SUSPEND\n");
+ return SUSPEND;
+ }
+
+ if(r != OK) {
+ printf("map_pf: memtype->ev_pagefault failed\n");
+ if(ph)
+ pb_unreferenced(region, ph, 1);
+ return r;
}
+
+ assert(ph);
+ assert(ph->ph);
}
- SANITYCHECK(SCL_FUNCTIONS);
+ assert(ph->ph);
- if(r != OK) {
- printf("VM: map_pf: failed (%d)\n", r);
+ if((r = map_ph_writept(vmp, region, ph)) != OK) {
+ printf("map_pf: writept failed\n");
return r;
}
+ SANITYCHECK(SCL_FUNCTIONS);
+
#if SANITYCHECKS
- if(OK != pt_checkrange(&vmp->vm_pt, region->vaddr+virpage,
+ if(OK != pt_checkrange(&vmp->vm_pt, region->vaddr+offset,
VM_PAGE_SIZE, write)) {
panic("map_pf: pt_checkrange failed: %d", r);
}
return r;
}
+int map_handle_memory(vmp, region, start_offset, length, write)
+struct vmproc *vmp;
+struct vir_region *region;
+vir_bytes start_offset;
+vir_bytes length;
+int write;
+{
+ vir_bytes offset, lim;
+ int r;
+
+ assert(length > 0);
+ lim = start_offset + length;
+ assert(lim > start_offset);
+
+ for(offset = start_offset; offset < lim; offset += VM_PAGE_SIZE)
+ if((r = map_pf(vmp, region, offset, write)) != OK)
+ return r;
+
+ return OK;
+}
+
/*===========================================================================*
* map_pin_memory *
*===========================================================================*/
return OK;
}
-/*===========================================================================*
- * map_handle_memory *
- *===========================================================================*/
-int map_handle_memory(vmp, region, offset, length, write)
-struct vmproc *vmp;
-struct vir_region *region;
-vir_bytes offset, length;
-int write;
-{
- struct phys_region *physr, *nextphysr;
- int changes = 0;
- physr_iter iter;
- u32_t allocflags = 0;
-
- if(!(region->flags & VR_UNINITIALIZED)) {
- allocflags = PAF_CLEAR;
- }
-
-#define FREE_RANGE_HERE(er1, er2) { \
- struct phys_region *r1 = (er1), *r2 = (er2); \
- vir_bytes start = offset, end = offset + length; \
- if(r1) { \
- start = MAX(start, r1->offset + VM_PAGE_SIZE); } \
- if(r2) { \
- end = MIN(end, r2->offset); } \
- if(start < end) { \
- SANITYCHECK(SCL_DETAIL); \
- if(map_new_physblock(vmp, region, start, \
- end-start, MAP_NONE, allocflags, 0) != OK) { \
- SANITYCHECK(SCL_DETAIL); \
- return ENOMEM; \
- } \
- changes++; \
- } }
-
-
- SANITYCHECK(SCL_FUNCTIONS);
-
- assert(region->flags & VR_ANON);
- assert(!(region->vaddr % VM_PAGE_SIZE));
- assert(!(offset % VM_PAGE_SIZE));
- assert(!(length % VM_PAGE_SIZE));
- assert(!write || (region->flags & VR_WRITABLE));
-
- physr_start_iter(region->phys, &iter, offset, AVL_LESS_EQUAL);
- physr = physr_get_iter(&iter);
-
- if(!physr) {
- physr_start_iter(region->phys, &iter, offset, AVL_GREATER_EQUAL);
- physr = physr_get_iter(&iter);
- }
-
- FREE_RANGE_HERE(NULL, physr);
-
- if(physr) {
- physr = reset_physr_iter(region, &iter, physr->offset);
- if(physr->offset + VM_PAGE_SIZE <= offset) {
- physr_incr_iter(&iter);
- physr = physr_get_iter(&iter);
-
- FREE_RANGE_HERE(NULL, physr);
- if(physr) {
- physr = reset_physr_iter(region, &iter,
- physr->offset);
- }
- }
- }
-
- while(physr) {
- int r;
-
- SANITYCHECK(SCL_DETAIL);
-
- if(write) {
- assert(physr->ph->refcount > 0);
- if(!WRITABLE(region, physr->ph)) {
- if(!(physr = map_clone_ph_block(vmp, region,
- physr, &iter))) {
- printf("VM: map_handle_memory: no copy\n");
- return ENOMEM;
- }
- changes++;
- } else {
- SANITYCHECK(SCL_DETAIL);
- if((r=map_ph_writept(vmp, region, physr)) != OK) {
- printf("VM: map_ph_writept failed\n");
- return r;
- }
- changes++;
- SANITYCHECK(SCL_DETAIL);
- }
- }
-
- SANITYCHECK(SCL_DETAIL);
- physr_incr_iter(&iter);
- nextphysr = physr_get_iter(&iter);
- FREE_RANGE_HERE(physr, nextphysr);
- SANITYCHECK(SCL_DETAIL);
- if(nextphysr) {
- if(nextphysr->offset >= offset + length)
- break;
- nextphysr = reset_physr_iter(region, &iter,
- nextphysr->offset);
- }
- physr = nextphysr;
- }
-
- SANITYCHECK(SCL_FUNCTIONS);
-
- if(changes < 1) {
-#if VERBOSE
- printf("region start at 0x%lx offset 0x%lx len 0x%lx write %d\n",
- region->vaddr, offset, length, write);
- printf("no changes in map_handle_memory\n");
-#endif
- return EFAULT;
- }
-
-#if SANITYCHECKS
- if(OK != pt_checkrange(&vmp->vm_pt, region->vaddr+offset, length, write)) {
- printf("handle mem 0x%lx-0x%lx failed\n",
- region->vaddr+offset,region->vaddr+offset+length);
- map_printregion(vmp, region);
- panic("checkrange failed");
- }
-#endif
-
- return OK;
-}
-
#if SANITYCHECKS
static int count_phys_regions(struct vir_region *vr)
{
*/
struct vir_region *newvr;
struct phys_region *ph;
+ int r;
physr_iter iter;
#if SANITYCHECKS
int cr;
cr = count_phys_regions(vr);
#endif
- if(!(newvr = region_new(vr->parent, vr->vaddr, vr->length, vr->flags)))
+ if(!(newvr = region_new(vr->parent, vr->vaddr, vr->length, vr->flags, vr->memtype)))
return NULL;
+ if(vr->memtype->ev_copy && (r=vr->memtype->ev_copy(vr, newvr)) != OK) {
+ map_free(newvr);
+ printf("VM: memtype-specific copy failed (%d)\n", r);
+ return NULL;
+ }
+
physr_start_iter_least(vr->phys, &iter);
while((ph = physr_get_iter(&iter))) {
struct phys_region *newph = pb_reference(ph->ph, ph->offset, newvr);
int map_region_extend_upto_v(struct vmproc *vmp, vir_bytes v)
{
- vir_bytes offset = v, end;
+ vir_bytes offset = v;
struct vir_region *vr, *nextvr;
- int r = OK;
+
+ offset = roundup(offset, VM_PAGE_SIZE);
if(!(vr = region_search(&vmp->vm_regions_avl, offset, AVL_LESS))) {
printf("VM: nothing to extend\n");
return ENOMEM;
}
- if(!(vr->flags & VR_ANON)) {
- printf("VM: memory range to extend not anonymous\n");
- return ENOMEM;
- }
-
assert(vr->vaddr <= offset);
if((nextvr = getnextvr(vr))) {
assert(offset <= nextvr->vaddr);
}
- end = vr->vaddr + vr->length;
-
- offset = roundup(offset, VM_PAGE_SIZE);
-
- if(end < offset)
- r = map_region_extend(vmp, vr, offset - end);
-
- return r;
-}
-
-/*========================================================================*
- * map_region_extend *
- *========================================================================*/
-int map_region_extend(struct vmproc *vmp, struct vir_region *vr,
- vir_bytes delta)
-{
- vir_bytes end;
- struct vir_region *nextvr;
-
- assert(vr);
- assert(vr->flags & VR_ANON);
- assert(!(delta % VM_PAGE_SIZE));
- if(vr->flags & VR_CONTIG) {
- printf("VM: can't grow contig region\n");
- return EFAULT;
- }
-
- if(!delta) return OK;
- end = vr->vaddr + vr->length;
- assert(end >= vr->vaddr);
-
- if(end + delta <= end) {
- printf("VM: strange delta 0x%lx\n", delta);
+ if(nextvr && nextvr->vaddr < offset) {
+ printf("VM: can't grow into next region\n");
return ENOMEM;
}
- nextvr = getnextvr(vr);
-
- if(!nextvr || end + delta <= nextvr->vaddr) {
- USE(vr, vr->length += delta;);
- return OK;
- }
-
- return ENOMEM;
-}
-
-/*========================================================================*
- * map_region_shrink *
- *========================================================================*/
-int map_region_shrink(struct vir_region *vr, vir_bytes delta)
-{
- assert(vr);
- assert(vr->flags & VR_ANON);
- assert(!(delta % VM_PAGE_SIZE));
-
-#if 0
- printf("VM: ignoring region shrink\n");
-#endif
-
- return OK;
-}
-
-struct vir_region *map_region_lookup_tag(vmp, tag)
-struct vmproc *vmp;
-u32_t tag;
-{
- struct vir_region *vr;
- region_iter v_iter;
- region_start_iter_least(&vmp->vm_regions_avl, &v_iter);
-
- while((vr = region_get_iter(&v_iter))) {
- if(vr->tag == tag)
- return vr;
- region_incr_iter(&v_iter);
+ if(!vr->memtype->ev_resize) {
+ printf("VM: can't resize this type of memory\n");
+ return ENOMEM;
}
- return NULL;
-}
-
-void map_region_set_tag(struct vir_region *vr, u32_t tag)
-{
- USE(vr, vr->tag = tag;);
-}
-
-u32_t map_region_get_tag(struct vir_region *vr)
-{
- return vr->tag;
+ return vr->memtype->ev_resize(vmp, vr, offset - vr->vaddr);
}
/*========================================================================*
return EINVAL;
}
- if(!(r->flags & (VR_ANON|VR_DIRECT))) {
- printf("VM: only unmap anonymous or direct memory\n");
- return EINVAL;
- }
-
regionstart = r->vaddr + offset;
/* unreference its memory */
return OK;
}
-/*========================================================================*
- * map_remap *
- *========================================================================*/
-int map_remap(struct vmproc *dvmp, vir_bytes da, size_t size,
- struct vir_region *region, vir_bytes *r, int readonly)
-{
- struct vir_region *vr;
- struct phys_region *ph;
- vir_bytes startv, dst_addr;
- physr_iter iter;
-
- SANITYCHECK(SCL_FUNCTIONS);
-
- assert(region->flags & VR_SHARED);
-
- /* da is handled differently */
- if (!da)
- dst_addr = 0;
- else
- dst_addr = da;
-
- /* round up to page size */
- assert(!(size % VM_PAGE_SIZE));
- startv = region_find_slot(dvmp, dst_addr, VM_DATATOP, size);
- if (startv == SLOT_FAIL) {
- return ENOMEM;
- }
- /* when the user specifies the address, we cannot change it */
- if (da && (startv != dst_addr))
- return EINVAL;
-
- vr = map_copy_region(dvmp, region);
- if(!vr)
- return ENOMEM;
-
- USE(vr,
- vr->vaddr = startv;
- vr->length = size;
- vr->flags = region->flags;
- vr->tag = VRT_NONE;
- vr->parent = dvmp;
- if(readonly) {
- vr->flags &= ~VR_WRITABLE;
- }
- );
- assert(vr->flags & VR_SHARED);
-
- region_insert(&dvmp->vm_regions_avl, vr);
-
- physr_start_iter_least(vr->phys, &iter);
- while((ph = physr_get_iter(&iter))) {
- if(map_ph_writept(dvmp, vr, ph) != OK) {
- panic("map_remap: map_ph_writept failed");
- }
- physr_incr_iter(&iter);
- }
-
- *r = startv;
-
- SANITYCHECK(SCL_FUNCTIONS);
-
- return OK;
-}
-
/*========================================================================*
* map_get_phys *
*========================================================================*/
int map_get_phys(struct vmproc *vmp, vir_bytes addr, phys_bytes *r)
{
struct vir_region *vr;
- struct phys_region *ph;
- physr_iter iter;
if (!(vr = map_lookup(vmp, addr, NULL)) ||
(vr->vaddr != addr))
return EINVAL;
- if (!(vr->flags & VR_SHARED))
+ if (!vr->memtype->regionid)
return EINVAL;
- physr_start_iter_least(vr->phys, &iter);
- ph = physr_get_iter(&iter);
-
- assert(ph);
- assert(ph->ph);
- if (r)
- *r = ph->ph->phys;
+ if(r)
+ *r = vr->memtype->regionid(vr);
return OK;
}
int map_get_ref(struct vmproc *vmp, vir_bytes addr, u8_t *cnt)
{
struct vir_region *vr;
- struct phys_region *ph;
- physr_iter iter;
if (!(vr = map_lookup(vmp, addr, NULL)) ||
- (vr->vaddr != addr))
- return EINVAL;
-
- if (!(vr->flags & VR_SHARED))
+ (vr->vaddr != addr) || !vr->memtype->refcount)
return EINVAL;
- physr_start_iter_least(vr->phys, &iter);
- ph = physr_get_iter(&iter);
-
- assert(ph);
- assert(ph->ph);
if (cnt)
- *cnt = ph->ph->refcount;
+ *cnt = vr->memtype->refcount(vr);
return OK;
}
if (!(vr->flags & VR_WRITABLE))
vri->vri_prot &= ~PROT_WRITE;
- vri->vri_flags = (vr->flags & VR_SHARED) ? MAP_IPC_SHARED : 0;
-
next = vr->vaddr + vr->length;
region_incr_iter(&v_iter);
}
u32_t pt_flag = PTF_PRESENT | PTF_USER;
vir_bytes end;
+ if(map_handle_memory(vms, vrs, offset_s, length, 0) != OK) {
+ printf("do_map_memory: source cleaning up didn't work\n");
+ return EFAULT;
+ }
+
/* Search for the first phys region in the source process. */
physr_start_iter(vrs->phys, &iter, offset_s, AVL_EQUAL);
prs = physr_get_iter(&iter);
if(!prs)
- panic("do_map_memory: no aligned phys region: %d", 0);
+ panic("do_map_memory: no aligned phys region");
/* flag: 0 -> read-only
* 1 -> writable
physr_start_iter(vrd->phys, &iter, off, AVL_EQUAL);
pr = physr_get_iter(&iter);
if(!pr)
- panic("unmap_memory: no aligned phys region: %d", 0);
+ panic("unmap_memory: no aligned phys region");
/* Copy the phys block now rather than doing COW. */
end = off + length;
return OK;
}
-
-/*===========================================================================*
- * rm_phys_regions *
- *===========================================================================*/
-static void rm_phys_regions(struct vir_region *region,
- vir_bytes begin, vir_bytes length)
-{
-/* Remove all phys regions between @begin and @begin+length.
- *
- * Don't update the page table, because we will update it at map_memory()
- * later.
- */
- struct phys_region *pr;
- physr_iter iter;
-
- physr_start_iter(region->phys, &iter, begin, AVL_GREATER_EQUAL);
- while((pr = physr_get_iter(&iter)) && pr->offset < begin + length) {
- pb_unreferenced(region, pr, 1);
- physr_start_iter(region->phys, &iter, begin,
- AVL_GREATER_EQUAL);
- SLABFREE(pr);
- }
-}
-
/*===========================================================================*
* map_memory *
*===========================================================================*/
map_handle_memory(vms, vrs, offset_s, length, 0);
/* Prepare work. */
- rm_phys_regions(vrd, offset_d, length);
+
+ if((r=map_subfree(vrd, offset_d, length)) != OK) {
+ return r;
+ }
/* Map memory. */
r = do_map_memory(vms, vmd, vrs, vrd, offset_s, offset_d, length, flag);
region_incr_iter(&iter);
}
}
+
+int physregions(struct vir_region *vr)
+{
+ int n = 0;
+ physr_iter iter;
+ physr_start_iter_least(vr->phys, &iter);
+ while(physr_get_iter(&iter)) {
+ n++;
+ physr_incr_iter(&iter);
+ }
+ return n;
+}
#include "phys_region.h"
#include "physravl.h"
+#include "memtype.h"
+#include "vm.h"
struct phys_block {
#if SANITYCHECKS
#define PBSH_SMAP 2
u8_t share_flag; /* PBSH_COW or PBSH_SMAP */
+ /* what kind of memory is it? */
+ mem_type_t *memtype;
+
/* first in list of phys_regions that reference this block */
struct phys_region *firstregion;
};
vir_bytes length; /* length in bytes */
physr_avl *phys; /* avl tree of physical memory blocks */
u16_t flags;
- u32_t tag; /* Opaque to mapping code. */
struct vmproc *parent; /* Process that owns this vir_region. */
+ mem_type_t *memtype; /* Default instantiated memory type. */
+ int remaps;
+ u32_t id; /* unique id */
+
+ union {
+ phys_bytes phys;
+ struct {
+ endpoint_t ep;
+ vir_bytes vaddr;
+ int id;
+ } shared;
+ } param;
/* AVL fields */
struct vir_region *lower, *higher;
/* Mapping flags: */
#define VR_WRITABLE 0x001 /* Process may write here. */
-#define VR_NOPF 0x002 /* May not generate page faults. */
#define VR_PHYS64K 0x004 /* Physical memory must be 64k aligned. */
#define VR_LOWER16MB 0x008
#define VR_LOWER1MB 0x010
#define VR_ANON 0x100 /* Memory to be cleared and allocated */
#define VR_DIRECT 0x200 /* Mapped, but not managed by VM */
-/* Tag values: */
-#define VRT_NONE 0xBEEF0000
-#define VRT_HEAP 0xBEEF0001
-#define VRT_TEXT 0xBEEF0002
-#define VRT_STACK 0xBEEF0003
-
/* map_page_region flags */
#define MF_PREALLOC 0x01
#define SANITYCHECK(l)
#define SLABSANITYCHECK(l)
#define SLABSANE(ptr)
+#define MYASSERT(c)
#endif
#if MEMPROTECT
+#ifndef _VM_H
+#define _VM_H 1
+
+#define _SYSTEM 1
+
+/* Compile in asserts and custom sanity checks at all? */
+#define SANITYCHECKS 0
+#define VMSTATS 0
+
+/* VM behaviour */
+#define MEMPROTECT 0 /* Slab objects not mapped. Access with USE() */
+#define JUNKFREE 0 /* Fill freed pages with junk */
+
+#include <sys/errno.h>
+#include <memory.h>
+
+#include "sanitycheck.h"
+#include "region.h"
+
/* Memory flags to pt_allocmap() and alloc_mem(). */
#define PAF_CLEAR 0x01 /* Clear physical memory. */
#define PAF_CONTIG 0x02 /* Physically contiguous. */
/* special value for v in pt_allocmap */
#define AM_AUTO ((u32_t) -1)
-/* Compile in asserts and custom sanity checks at all? */
-#define SANITYCHECKS 0
-#define VMSTATS 0
-
-/* VM behaviour */
-#define MEMPROTECT 0 /* Slab objects not mapped. Access with USE() */
-#define JUNKFREE 0 /* Fill freed pages with junk */
-
/* How noisy are we supposed to be? */
#define VERBOSE 0
#define LU_DEBUG 0
#define MAP_NONE 0xFFFFFFFE
#define NO_MEM ((phys_clicks) MAP_NONE) /* returned by alloc_mem() with mem is up */
+#endif
+