PROG= vm
SRCS= main.c alloc.c utility.c exec.c exit.c fork.c break.c \
signal.c mmap.c slaballoc.c region.c pagefaults.c addravl.c \
- physravl.c rs.c queryexit.c map_mem.c
+ physravl.c rs.c queryexit.c
DPADD+= ${LIBSYS}
LDADD+= -lsys
#include "pagerange.h"
#include "addravl.h"
#include "sanitycheck.h"
+#include "memlist.h"
/* AVL tree of free pages. */
addr_avl addravl;
/* Used for sanity check. */
PRIVATE phys_bytes mem_low, mem_high;
-#define vm_assert_range(addr, len) \
+#define assert_range(addr, len) \
vm_assert((addr) >= mem_low); \
vm_assert((addr) + (len) - 1 <= mem_high);
FORWARD _PROTOTYPE( void del_slot, (struct hole *prev_ptr, struct hole *hp) );
FORWARD _PROTOTYPE( void merge, (struct hole *hp) );
FORWARD _PROTOTYPE( void free_pages, (phys_bytes addr, int pages) );
-FORWARD _PROTOTYPE( phys_bytes alloc_pages, (int pages, int flags) );
+FORWARD _PROTOTYPE( phys_bytes alloc_pages, (int pages, int flags,
+ phys_bytes *ret));
#if SANITYCHECKS
FORWARD _PROTOTYPE( void holes_sanity_f, (char *fn, int line) );
#define CHECKHOLES holes_sanity_f(__FILE__, __LINE__)
-#define MAXPAGES (1024*1024*1024/VM_PAGE_SIZE) /* 1GB of memory */
+#define PAGESPERGB (1024*1024*1024/VM_PAGE_SIZE) /* 1GB of memory */
+#define MAXPAGES (2*PAGESPERGB)
#define CHUNKS BITMAP_CHUNKS(MAXPAGES)
PRIVATE bitchunk_t pagemap[CHUNKS];
#define CHECKHOLES
#endif
-/* Sanity check for parameters of node p. */
-#define vm_assert_params(p, bytes, next) { \
- vm_assert((p) != NO_MEM); \
- vm_assert(!((bytes) % VM_PAGE_SIZE)); \
- vm_assert(!((next) % VM_PAGE_SIZE)); \
- vm_assert((bytes) > 0); \
- vm_assert((p) + (bytes) > (p)); \
- vm_assert((next) == NO_MEM || ((p) + (bytes) <= (next))); \
- vm_assert_range((p), (bytes)); \
- vm_assert_range((next), 1); \
-}
-
-/* Retrieve size of free block and pointer to next block from physical
- * address (page) p.
- */
-#define GET_PARAMS(p, bytes, next) { \
- phys_readaddr((p), &(bytes), &(next)); \
- vm_assert_params((p), (bytes), (next)); \
-}
-
-/* Write parameters to physical page p. */
-#define SET_PARAMS(p, bytes, next) { \
- vm_assert_params((p), (bytes), (next)); \
- phys_writeaddr((p), (bytes), (next)); \
-}
-
#if SANITYCHECKS
if(!(c)) { \
printf("holes_sanity_f:%s:%d: %s failed\n", file, line, #c); \
util_stacktrace(); \
- panic("assert failed"); } \
+ panic("vm_assert failed"); } \
}
int h, c = 0, n = 0;
#endif
/*===========================================================================*
- * alloc_mem_f *
+ * alloc_mem *
*===========================================================================*/
-PUBLIC phys_clicks alloc_mem_f(phys_clicks clicks, u32_t memflags)
+PUBLIC phys_clicks alloc_mem(phys_clicks clicks, u32_t memflags)
{
/* Allocate a block of memory from the free list using first fit. The block
* consists of a sequence of contiguous bytes, whose length in clicks is
if(vm_paged) {
vm_assert(CLICK_SIZE == VM_PAGE_SIZE);
- mem = alloc_pages(clicks, memflags);
+ mem = alloc_pages(clicks, memflags, NULL);
} else {
CHECKHOLES;
prev_ptr = NIL_HOLE;
if(o > 0) {
phys_clicks e;
e = align_clicks - o;
- FREE_MEM(mem, e);
+ free_mem(mem, e);
mem += e;
}
}
}
/*===========================================================================*
- * free_mem_f *
+ * free_mem *
*===========================================================================*/
-PUBLIC void free_mem_f(phys_clicks base, phys_clicks clicks)
+PUBLIC void free_mem(phys_clicks base, phys_clicks clicks)
{
/* Return a block of free memory to the hole list. The parameters tell where
* the block starts in physical memory and how big it is. The block is added
to = CLICK2ABS(chunks[i].base+chunks[i].size)-1;
if(first || from < mem_low) mem_low = from;
if(first || to > mem_high) mem_high = to;
- FREE_MEM(chunks[i].base, chunks[i].size);
+ free_mem(chunks[i].base, chunks[i].size);
total_pages += chunks[i].size;
first = 0;
}
/*===========================================================================*
* alloc_pages *
*===========================================================================*/
-PRIVATE PUBLIC phys_bytes alloc_pages(int pages, int memflags)
+PRIVATE PUBLIC phys_bytes alloc_pages(int pages, int memflags, phys_bytes *len)
{
addr_iter iter;
pagerange_t *pr;
while((pr = addr_get_iter(&iter))) {
SLABSANE(pr);
- if(pr->size >= pages) {
+ vm_assert(pr->size > 0);
+ if(pr->size >= pages || (memflags & PAF_FIRSTBLOCK)) {
if(memflags & PAF_LOWER16MB) {
if(pr->addr + pages > boundary16)
return NO_MEM;
printf("VM: alloc_pages: alloc failed of %d pages\n", pages);
util_stacktrace();
printmemstats();
+ if(len)
+ *len = 0;
#if SANITYCHECKS
if(largest >= pages) {
panic("no memory but largest was enough");
SLABSANE(pr);
+ if(memflags & PAF_FIRSTBLOCK) {
+ vm_assert(len);
+ /* block doesn't have to as big as requested;
+ * return its size though.
+ */
+ if(pr->size < pages) {
+ pages = pr->size;
+#if SANITYCHECKS
+ wantpages = firstpages - pages;
+#endif
+ }
+ }
+
+ if(len)
+ *len = pages;
+
/* Allocated chunk is off the end. */
mem = pr->addr + pr->size - pages;
memstats(&finalnodes, &finalpages, &largest);
sanitycheck();
+ if(finalpages != wantpages) {
+ printf("pages start: %d req: %d final: %d\n",
+ firstpages, pages, finalpages);
+ }
vm_assert(finalnodes == wantnodes);
vm_assert(finalpages == wantpages);
#endif
if (j >= NR_DMA)
{
/* Last segment */
- FREE_MEM(dmatab[i].dt_seg_base,
+ free_mem(dmatab[i].dt_seg_base,
dmatab[i].dt_seg_size);
}
}
}
if (!found_one)
- FREE_MEM(base, size);
+ free_mem(base, size);
msg->VMRD_FOUND = found_one;
#endif
vm_assert(!(addr % VM_PAGE_SIZE));
vm_assert(!(len % VM_PAGE_SIZE));
vm_assert(len > 0);
- vm_assert_range(addr, len);
+ assert_range(addr, len);
pagestart = addr / VM_PAGE_SIZE;
pages = len / VM_PAGE_SIZE;
}
#endif
+
+/*===========================================================================*
+ * alloc_mem_in_list *
+ *===========================================================================*/
+struct memlist *alloc_mem_in_list(phys_bytes bytes, u32_t flags)
+{
+ phys_bytes rempages;
+ struct memlist *head = NULL, *ml;
+
+ vm_assert(bytes > 0);
+ vm_assert(!(bytes % VM_PAGE_SIZE));
+
+ rempages = bytes / VM_PAGE_SIZE;
+
+ /* unless we are told to allocate all memory
+ * contiguously, tell alloc function to grab whatever
+ * block it can find.
+ */
+ if(!(flags & PAF_CONTIG))
+ flags |= PAF_FIRSTBLOCK;
+
+ do {
+ struct memlist *ml;
+ phys_bytes mem, gotpages;
+ mem = alloc_pages(rempages, flags, &gotpages);
+
+ if(mem == NO_MEM) {
+ free_mem_list(head, 1);
+ return NULL;
+ }
+
+ vm_assert(gotpages <= rempages);
+ vm_assert(gotpages > 0);
+
+ if(!(SLABALLOC(ml))) {
+ free_mem_list(head, 1);
+ free_pages(mem, gotpages);
+ return NULL;
+ }
+
+ USE(ml,
+ ml->phys = CLICK2ABS(mem);
+ ml->length = CLICK2ABS(gotpages);
+ ml->next = head;);
+ head = ml;
+ rempages -= gotpages;
+ } while(rempages > 0);
+
+ for(ml = head; ml; ml = ml->next) {
+ vm_assert(ml->phys);
+ vm_assert(ml->length);
+ }
+
+ return head;
+}
+
+/*===========================================================================*
+ * free_mem_list *
+ *===========================================================================*/
+void free_mem_list(struct memlist *list, int all)
+{
+ while(list) {
+ struct memlist *next;
+ next = list->next;
+ vm_assert(!(list->phys % VM_PAGE_SIZE));
+ vm_assert(!(list->length % VM_PAGE_SIZE));
+ if(all)
+ free_pages(list->phys / VM_PAGE_SIZE,
+ list->length / VM_PAGE_SIZE);
+ SLABFREE(list);
+ list = next;
+ }
+}
+
+/*===========================================================================*
+ * print_mem_list *
+ *===========================================================================*/
+void print_mem_list(struct memlist *list)
+{
+ while(list) {
+ vm_assert(list->length > 0);
+ printf("0x%lx-0x%lx", list->phys, list->phys+list->length-1);
+ printf(" ");
+ list = list->next;
+ }
+ printf("\n");
+}
+
#include <minix/safecopies.h>
#include <minix/cpufeature.h>
#include <minix/bitmap.h>
+#include <minix/debug.h>
#include <errno.h>
#include <stdlib.h>
PRIVATE int bigpage_ok = 0;
/* Our process table entry. */
-struct vmproc *vmp = &vmproc[VM_PROC_NR];
+struct vmproc *vmprocess = &vmproc[VM_PROC_NR];
/* Spare memory, ready to go after initialization, to avoid a
* circular dependency on allocating memory and writing it into VM's
for(i = proc_pde; i < I386_VM_DIR_ENTRIES; i++) {
if(pt->pt_pt[i]) {
+ int pte;
+ MYASSERT(vm_addrok(pt->pt_pt[i], 1));
if(!(pt->pt_dir[i] & I386_VM_PRESENT)) {
printf("slot %d: pt->pt_pt[%d] = 0x%lx, but pt_dir entry 0x%lx\n",
slot, i, pt->pt_pt[i], pt->pt_dir[i]);
PRIVATE void vm_freepages(vir_bytes vir, vir_bytes phys, int pages, int reason)
{
vm_assert(reason >= 0 && reason < VMP_CATEGORIES);
- if(vir >= vmp->vm_stacktop) {
+ if(vir >= vmprocess->vm_stacktop) {
vm_assert(!(vir % I386_PAGE_SIZE));
vm_assert(!(phys % I386_PAGE_SIZE));
- FREE_MEM(ABS2CLICK(phys), pages);
- if(pt_writemap(&vmp->vm_pt, arch_vir2map(vmp, vir),
+ free_mem(ABS2CLICK(phys), pages);
+ if(pt_writemap(&vmprocess->vm_pt, arch_vir2map(vmprocess, vir),
MAP_NONE, pages*I386_PAGE_SIZE, 0, WMF_OVERWRITE) != OK)
panic("vm_freepages: pt_writemap failed");
} else {
printf("VM: vm_freepages not freeing VM heap pages (%d)\n",
pages);
}
+
+#if SANITYCHECKS
+ /* If SANITYCHECKS are on, flush tlb so accessing freed pages is
+ * always trapped, also if not in tlb.
+ */
+ if((sys_vmctl(SELF, VMCTL_FLUSHTLB, 0)) != OK) {
+ panic("VMCTL_FLUSHTLB failed");
+ }
+#endif
}
/*===========================================================================*
static int level = 0;
void *ret;
- pt = &vmp->vm_pt;
+ pt = &vmprocess->vm_pt;
vm_assert(reason >= 0 && reason < VMP_CATEGORIES);
level++;
vm_assert(level >= 1);
vm_assert(level <= 2);
- if(level > 1 || !(vmp->vm_flags & VMF_HASPT) || !meminit_done) {
+ if(level > 1 || !(vmprocess->vm_flags & VMF_HASPT) || !meminit_done) {
int r;
void *s;
s=vm_getsparepage(phys);
/* VM does have a pagetable, so get a page and map it in there.
* Where in our virtual address space can we put it?
*/
- loc = findhole(pt, arch_vir2map(vmp, vmp->vm_stacktop),
- vmp->vm_arch.vm_data_top);
+ loc = findhole(pt, arch_vir2map(vmprocess, vmprocess->vm_stacktop),
+ vmprocess->vm_arch.vm_data_top);
if(loc == NO_MEM) {
level--;
printf("VM: vm_allocpage: findhole failed\n");
/* Allocate page of memory for use by VM. As VM
* is trusted, we don't have to pre-clear it.
*/
- if((newpage = ALLOC_MEM(CLICKSPERPAGE, 0)) == NO_MEM) {
+ if((newpage = alloc_mem(CLICKSPERPAGE, 0)) == NO_MEM) {
level--;
- printf("VM: vm_allocpage: ALLOC_MEM failed\n");
+ printf("VM: vm_allocpage: alloc_mem failed\n");
return NULL;
}
/* Map this page into our address space. */
if((r=pt_writemap(pt, loc, *phys, I386_PAGE_SIZE,
I386_VM_PRESENT | I386_VM_USER | I386_VM_WRITE, 0)) != OK) {
- FREE_MEM(newpage, CLICKSPERPAGE);
+ free_mem(newpage, CLICKSPERPAGE);
printf("vm_allocpage writemap failed\n");
level--;
return NULL;
level--;
/* Return user-space-ready pointer to it. */
- ret = (void *) arch_map2vir(vmp, loc);
+ ret = (void *) arch_map2vir(vmprocess, loc);
return ret;
}
u32_t flags = I386_VM_PRESENT | I386_VM_USER;
pt_t *pt;
- pt = &vmp->vm_pt;
- m = arch_vir2map(vmp, (vir_bytes) vir);
+ pt = &vmprocess->vm_pt;
+ m = arch_vir2map(vmprocess, (vir_bytes) vir);
vm_assert(!(m % I386_PAGE_SIZE));
return;
}
+/*===========================================================================*
+ * vm_addrok *
+ *===========================================================================*/
+PUBLIC int vm_addrok(void *vir, int writeflag)
+{
+/* Mark a page allocated by vm_allocpage() unwritable, i.e. only for VM. */
+ pt_t *pt = &vmprocess->vm_pt;
+ int pde, pte;
+ vir_bytes v = arch_vir2map(vmprocess, (vir_bytes) vir);
+
+ /* No PT yet? Don't bother looking. */
+ if(!(vmprocess->vm_flags & VMF_HASPT)) {
+ return 1;
+ }
+
+ pde = I386_VM_PDE(v);
+ pte = I386_VM_PTE(v);
+
+ if(!(pt->pt_dir[pde] & I386_VM_PRESENT)) {
+ printf("addr not ok: missing pde %d\n", pde);
+ return 0;
+ }
+
+ if(writeflag &&
+ !(pt->pt_dir[pde] & I386_VM_WRITE)) {
+ printf("addr not ok: pde %d present but pde unwritable\n", pde);
+ return 0;
+ }
+
+ if(!(pt->pt_pt[pde][pte] & I386_VM_PRESENT)) {
+ printf("addr not ok: missing pde %d / pte %d\n",
+ pde, pte);
+ return 0;
+ }
+
+ if(writeflag &&
+ !(pt->pt_pt[pde][pte] & I386_VM_WRITE)) {
+ printf("addr not ok: pde %d / pte %d present but unwritable\n",
+ pde, pte);
+ return 0;
+ }
+
+ return 1;
+}
+
/*===========================================================================*
* pt_ptalloc *
*===========================================================================*/
return OK;
}
+PRIVATE char *ptestr(u32_t pte)
+{
+#define FLAG(constant, name) { \
+ if(pte & (constant)) { strcat(str, name); strcat(str, " "); } \
+}
+
+ static char str[30];
+ if(!(pte & I386_VM_PRESENT)) {
+ return "not present";
+ }
+ str[0] = '\0';
+ FLAG(I386_VM_WRITE, "W");
+ FLAG(I386_VM_USER, "U");
+ FLAG(I386_VM_PWT, "PWT");
+ FLAG(I386_VM_PCD, "PCD");
+ FLAG(I386_VM_ACC, "ACC");
+ FLAG(I386_VM_DIRTY, "DIRTY");
+ FLAG(I386_VM_PS, "PS");
+ FLAG(I386_VM_GLOBAL, "G");
+ FLAG(I386_VM_PTAVAIL1, "AV1");
+ FLAG(I386_VM_PTAVAIL2, "AV2");
+ FLAG(I386_VM_PTAVAIL3, "AV3");
+
+ return str;
+}
+
/*===========================================================================*
* pt_writemap *
*===========================================================================*/
* what's actually written into the PTE if I386_VM_PRESENT
* isn't on, so we can just write MAP_NONE into it.
*/
-#if SANITYCHECKS
- if(physaddr != MAP_NONE && !(flags & I386_VM_PRESENT)) {
- panic("pt_writemap: writing dir with !P");
- }
- if(physaddr == MAP_NONE && flags) {
- panic("pt_writemap: writing 0 with flags");
- }
-#endif
+ vm_assert(physaddr == MAP_NONE || (flags & I386_VM_PRESENT));
+ vm_assert(physaddr != MAP_NONE || !flags);
finalpde = I386_VM_PDE(v + I386_PAGE_SIZE * pages);
*/
for(pdecheck = I386_VM_PDE(v); pdecheck <= finalpde; pdecheck++) {
vm_assert(pdecheck >= 0 && pdecheck < I386_VM_DIR_ENTRIES);
- if(pt->pt_dir[pdecheck] & I386_VM_BIGPAGE) {
- printf("pt_writemap: trying to write 0x%lx into 0x%lx\n",
- physaddr, v);
- panic("pt_writemap: BIGPAGE found");
- }
+ vm_assert(!(pt->pt_dir[pdecheck] & I386_VM_BIGPAGE));
if(!(pt->pt_dir[pdecheck] & I386_VM_PRESENT)) {
int r;
if(verify) {
/* Make sure page directory entry for this page table
* is marked present and page table entry is available.
*/
- vm_assert((pt->pt_dir[pde] & I386_VM_PRESENT) && pt->pt_pt[pde]);
+ vm_assert((pt->pt_dir[pde] & I386_VM_PRESENT));
+ vm_assert(pt->pt_pt[pde]);
#if SANITYCHECKS
/* We don't expect to overwrite a page. */
}
if(writemapflags & WMF_FREE) {
- FREE_MEM(ABS2CLICK(physaddr), 1);
+ free_mem(ABS2CLICK(physaddr), 1);
}
/* Entry we will write. */
maskedentry &= ~(I386_VM_ACC|I386_VM_DIRTY);
/* Verify pagetable entry. */
if(maskedentry != entry) {
- printf("pt_writemap: 0x%lx found, masked 0x%lx, 0x%lx expected\n",
- pt->pt_pt[pde][pte], maskedentry, entry);
+ printf("pt_writemap: mismatch: ");
+ if((entry & I386_VM_ADDR_MASK) !=
+ (maskedentry & I386_VM_ADDR_MASK)) {
+ printf("pt_writemap: physaddr mismatch (0x%lx, 0x%lx); ", entry, maskedentry);
+ } else printf("phys ok; ");
+ printf(" flags: found %s; ",
+ ptestr(pt->pt_pt[pde][pte]));
+ printf(" masked %s; ",
+ ptestr(maskedentry));
+ printf(" expected %s\n", ptestr(entry));
return EFAULT;
}
} else {
/* Write pagetable entry. */
+#if SANITYCHECKS
+ vm_assert(vm_addrok(pt->pt_pt[pde], 1));
+#endif
pt->pt_pt[pde][pte] = entry;
}
phys_bytes sparepages_ph;
/* Shorthand. */
- newpt = &vmp->vm_pt;
-
+ newpt = &vmprocess->vm_pt;
/* Get ourselves spare pages. */
if(!(sparepages_mem = (vir_bytes) aalloc(I386_PAGE_SIZE*SPAREPAGES)))
free_pde = id_map_high_pde+1;
/* Initial (current) range of our virtual address space. */
- lo = CLICK2ABS(vmp->vm_arch.vm_seg[T].mem_phys);
- hi = CLICK2ABS(vmp->vm_arch.vm_seg[S].mem_phys +
- vmp->vm_arch.vm_seg[S].mem_len);
+ lo = CLICK2ABS(vmprocess->vm_arch.vm_seg[T].mem_phys);
+ hi = CLICK2ABS(vmprocess->vm_arch.vm_seg[S].mem_phys +
+ vmprocess->vm_arch.vm_seg[S].mem_len);
vm_assert(!(lo % I386_PAGE_SIZE));
vm_assert(!(hi % I386_PAGE_SIZE));
}
/* Move segments up too. */
- vmp->vm_arch.vm_seg[T].mem_phys += ABS2CLICK(moveup);
- vmp->vm_arch.vm_seg[D].mem_phys += ABS2CLICK(moveup);
- vmp->vm_arch.vm_seg[S].mem_phys += ABS2CLICK(moveup);
+ vmprocess->vm_arch.vm_seg[T].mem_phys += ABS2CLICK(moveup);
+ vmprocess->vm_arch.vm_seg[D].mem_phys += ABS2CLICK(moveup);
+ vmprocess->vm_arch.vm_seg[S].mem_phys += ABS2CLICK(moveup);
/* Allocate us a page table in which to remember page directory
* pointers.
* like regular processes have.
*/
extra_clicks = ABS2CLICK(VM_DATATOP - hi);
- vmp->vm_arch.vm_seg[S].mem_len += extra_clicks;
+ vmprocess->vm_arch.vm_seg[S].mem_len += extra_clicks;
/* We pretend to the kernel we have a huge stack segment to
* increase our data segment.
*/
- vmp->vm_arch.vm_data_top =
- (vmp->vm_arch.vm_seg[S].mem_vir +
- vmp->vm_arch.vm_seg[S].mem_len) << CLICK_SHIFT;
+ vmprocess->vm_arch.vm_data_top =
+ (vmprocess->vm_arch.vm_seg[S].mem_vir +
+ vmprocess->vm_arch.vm_seg[S].mem_len) << CLICK_SHIFT;
/* Where our free virtual address space starts.
* This is only a hint to the VM system.
newpt->pt_virtop = 0;
/* Let other functions know VM now has a private page table. */
- vmp->vm_flags |= VMF_HASPT;
+ vmprocess->vm_flags |= VMF_HASPT;
/* Now reserve another pde for kernel's own mappings. */
{
/* Give our process the new, copied, private page table. */
pt_mapkernel(newpt); /* didn't know about vm_dir pages earlier */
- pt_bind(newpt, vmp);
+ pt_bind(newpt, vmprocess);
/* new segment limit for the kernel after paging is enabled */
ep_data.data_seg_limit = free_pde*I386_BIG_PAGE_SIZE;
/* the memory map which must be installed after paging is enabled */
- ep_data.mem_map = vmp->vm_arch.vm_seg;
+ ep_data.mem_map = vmprocess->vm_arch.vm_seg;
/* Now actually enable paging. */
if(sys_vmctl_enable_paging(&ep_data) != OK)
panic("pt_init: enable paging failed");
/* Back to reality - this is where the stack actually is. */
- vmp->vm_arch.vm_seg[S].mem_len -= extra_clicks;
+ vmprocess->vm_arch.vm_seg[S].mem_len -= extra_clicks;
/* All OK. */
return;
#include <sys/mman.h>
#include <errno.h>
+#include <assert.h>
#include <env.h>
#include "proto.h"
vmp= &vmproc[proc_n];
ptr= msg->VMEN_ARGSPTR;
- NOTRUNNABLE(vmp->vm_endpoint);
-
if(msg->VMEN_ARGSSIZE != sizeof(args)) {
printf("VM: exec_newmem: args size %d != %ld\n",
msg->VMEN_ARGSSIZE, sizeof(args));
if (!sh_mp) /* Load text if sh_mp = NULL */
msg->VMEN_FLAGS |= EXC_NM_RF_LOAD_TEXT;
- NOTRUNNABLE(vmp->vm_endpoint);
-
return OK;
}
*/
SANITYCHECK(SCL_DETAIL);
if(hadpt) {
- pt_free(&rmp->vm_pt);
rmp->vm_flags &= ~VMF_HASPT;
+ pt_free(&rmp->vm_pt);
}
vm_assert(!(vmpold->vm_flags & VMF_INUSE));
*vmpold = *rmp; /* copy current state. */
if(!hadpt) {
if (find_share(rmp, rmp->vm_ino, rmp->vm_dev, rmp->vm_ctime) == NULL) {
/* No other process shares the text segment, so free it. */
- FREE_MEM(rmp->vm_arch.vm_seg[T].mem_phys, rmp->vm_arch.vm_seg[T].mem_len);
+ free_mem(rmp->vm_arch.vm_seg[T].mem_phys, rmp->vm_arch.vm_seg[T].mem_len);
}
/* Free the data and stack segments. */
- FREE_MEM(rmp->vm_arch.vm_seg[D].mem_phys,
+ free_mem(rmp->vm_arch.vm_seg[D].mem_phys,
rmp->vm_arch.vm_seg[S].mem_vir
+ rmp->vm_arch.vm_seg[S].mem_len
- rmp->vm_arch.vm_seg[D].mem_vir);
SANITYCHECK(SCL_DETAIL);
printf("VM: new_mem: failed\n");
if(ptok) {
+ rmp->vm_flags &= ~VMF_HASPT;
pt_free(&rmp->vm_pt);
}
*rmp = *vmpold; /* undo. */
} else {
phys_clicks new_base;
- new_base = ALLOC_MEM(text_clicks + tot_clicks, 0);
+ new_base = alloc_mem(text_clicks + tot_clicks, 0);
if (new_base == NO_MEM) {
- printf("VM: new_mem: ALLOC_MEM failed\n");
+ printf("VM: new_mem: alloc_mem failed\n");
return(ENOMEM);
}
#include <minix/bitmap.h>
#include <errno.h>
+#include <assert.h>
#include <env.h>
#include "glo.h"
PUBLIC void free_proc(struct vmproc *vmp)
{
+ map_free_proc(vmp);
if(vmp->vm_flags & VMF_HASPT) {
vmp->vm_flags &= ~VMF_HASPT;
pt_free(&vmp->vm_pt);
}
- map_free_proc(vmp);
vmp->vm_regions = NULL;
#if VMSTATS
vmp->vm_bytecopies = 0;
SANITYCHECK(SCL_DETAIL);
} else {
/* Free the data and stack segments. */
- FREE_MEM(vmp->vm_arch.vm_seg[D].mem_phys,
+ free_mem(vmp->vm_arch.vm_seg[D].mem_phys,
vmp->vm_arch.vm_seg[S].mem_vir +
vmp->vm_arch.vm_seg[S].mem_len -
vmp->vm_arch.vm_seg[D].mem_vir);
/* No other process shares the text segment,
* so free it.
*/
- FREE_MEM(vmp->vm_arch.vm_seg[T].mem_phys,
+ free_mem(vmp->vm_arch.vm_seg[T].mem_phys,
vmp->vm_arch.vm_seg[T].mem_len);
}
}
vmc = &vmproc[childproc]; /* child */
vm_assert(vmc->vm_slot == childproc);
- NOTRUNNABLE(vmp->vm_endpoint);
-
if(vmp->vm_flags & VMF_HAS_DMA) {
printf("VM: %d has DMA memory and may not fork\n", msg->VMF_ENDPOINT);
return EINVAL;
vmc->vm_regions = NULL;
vmc->vm_endpoint = NONE; /* In case someone tries to use it. */
vmc->vm_pt = origpt;
- vmc->vm_flags |= VMF_HASPT;
+ vmc->vm_flags &= ~VMF_HASPT;
#if VMSTATS
vmc->vm_bytecopies = 0;
return ENOMEM;
}
+ vmc->vm_flags |= VMF_HASPT;
+
if(fullvm) {
SANITYCHECK(SCL_DETAIL);
SANITYCHECK(SCL_DETAIL);
} else {
vir_bytes sp;
- phys_bytes d_abs, s_abs;
+ struct vir_region *heap, *stack;
vir_bytes text_bytes, data_bytes, stack_bytes, parent_gap_bytes,
child_gap_bytes;
return r;
}
- if((d_abs = map_lookup_phys(vmc, VRT_HEAP)) == MAP_NONE)
- panic("couldn't lookup data");
- if((s_abs = map_lookup_phys(vmc, VRT_STACK)) == MAP_NONE)
+ if(!(heap = map_region_lookup_tag(vmc, VRT_HEAP)))
+ panic("couldn't lookup heap");
+ vm_assert(heap->phys);
+ if(!(stack = map_region_lookup_tag(vmc, VRT_STACK)))
panic("couldn't lookup stack");
+ vm_assert(stack->phys);
/* Now copy the memory regions. */
if(vmc->vm_arch.vm_seg[T].mem_len > 0) {
- phys_bytes t_abs;
- if((t_abs = map_lookup_phys(vmc, VRT_TEXT)) == MAP_NONE)
+ struct vir_region *text;
+ if(!(text = map_region_lookup_tag(vmc, VRT_TEXT)))
panic("couldn't lookup text");
- if(sys_abscopy(CLICK2ABS(vmp->vm_arch.vm_seg[T].mem_phys),
- t_abs, text_bytes) != OK)
+ vm_assert(text->phys);
+ if(copy_abs2region(CLICK2ABS(vmp->vm_arch.vm_seg[T].mem_phys),
+ text, 0, text_bytes) != OK)
panic("couldn't copy text");
}
- if(sys_abscopy(CLICK2ABS(vmp->vm_arch.vm_seg[D].mem_phys),
- d_abs, data_bytes) != OK)
- panic("couldn't copy data");
+ if(copy_abs2region(CLICK2ABS(vmp->vm_arch.vm_seg[D].mem_phys),
+ heap, 0, data_bytes) != OK)
+ panic("couldn't copy heap");
- if(sys_abscopy(
- CLICK2ABS(vmp->vm_arch.vm_seg[D].mem_phys +
+ if(copy_abs2region(CLICK2ABS(vmp->vm_arch.vm_seg[D].mem_phys +
vmc->vm_arch.vm_seg[D].mem_len) + parent_gap_bytes,
- s_abs + child_gap_bytes, stack_bytes) != OK)
+ stack, child_gap_bytes, stack_bytes) != OK)
panic("couldn't copy stack");
}
panic("do_fork can't sys_fork: %d", r);
}
- NOTRUNNABLE(vmp->vm_endpoint);
- NOTRUNNABLE(vmc->vm_endpoint);
-
if(fullvm) {
vir_bytes vir;
/* making these messages writable is an optimisation
EXTERN long vm_paged;
EXTERN int meminit_done;
+
#include <string.h>
#include <env.h>
#include <stdio.h>
+#include <assert.h>
#include <memory.h>
/* SEF local startup. */
sef_local_startup();
+ SANITYCHECK(SCL_TOP);
+
/* This is VM's main loop. */
while (TRUE) {
int r, c;
#if SANITYCHECKS
incheck = nocheck = 0;
- FIXME("VM SANITYCHECKS are on");
#endif
vm_paged = 1;
panic("VM: vmctl for new stack failed");
}
- FREE_MEM(vmp->vm_arch.vm_seg[D].mem_phys +
+ free_mem(vmp->vm_arch.vm_seg[D].mem_phys +
vmp->vm_arch.vm_seg[D].mem_len,
old_stack);
+++ /dev/null
-
-#define _SYSTEM 1
-
-#include <minix/type.h>
-#include <minix/config.h>
-#include <minix/const.h>
-#include <minix/sysutil.h>
-#include <minix/syslib.h>
-
-#include <limits.h>
-#include <errno.h>
-#include <assert.h>
-#include <stdint.h>
-#include <memory.h>
-
-#include "vm.h"
-#include "proto.h"
-#include "util.h"
-#include "glo.h"
-#include "region.h"
-#include "sanitycheck.h"
-
-/*===========================================================================*
- * split_phys *
- *===========================================================================*/
-PRIVATE int split_phys(struct phys_region *pr, vir_bytes point)
-{
- struct phys_region *newpr, *q, *prev;
- struct phys_block *newpb;
- struct phys_block *pb = pr->ph;
-/* Split the phys region into 2 parts by @point. */
-
- if(pr->offset >= point || pr->offset + pb->length <= point)
- return OK;
- if(!SLABALLOC(newpb))
- return ENOMEM;
-
- /* Split phys block. */
- *newpb = *pb;
- pb->length = point - pr->offset;
- newpb->length -= pb->length;
- newpb->phys += pb->length;
-
- /* Split phys regions in a list. */
- for(q = pb->firstregion; q; q = q->next_ph_list) {
- if(!SLABALLOC(newpr))
- return ENOMEM;
-
- *newpr = *q;
- newpr->ph = newpb;
- newpr->offset += pb->length;
-
- /* Link to the vir region's phys region list. */
- physr_insert(newpr->parent->phys, newpr);
-
- /* Link to the next_ph_list. */
- if(q == pb->firstregion) {
- newpb->firstregion = newpr;
- prev = newpr;
- } else {
- prev->next_ph_list = newpr;
- prev = newpr;
- }
- }
- prev->next_ph_list = NULL;
-
- return OK;
-}
-
-/*===========================================================================*
- * rm_phys_regions *
- *===========================================================================*/
-PRIVATE void rm_phys_regions(struct vir_region *region,
- vir_bytes begin, vir_bytes length)
-{
-/* Remove all phys regions between @begin and @begin+length.
- *
- * Don't update the page table, because we will update it at map_memory()
- * later.
- */
- struct phys_region *pr;
- physr_iter iter;
-
- physr_start_iter(region->phys, &iter, begin, AVL_GREATER_EQUAL);
- while((pr = physr_get_iter(&iter)) && pr->offset < begin + length) {
- pb_unreferenced(region, pr);
- physr_remove(region->phys, pr->offset);
- physr_start_iter(region->phys, &iter, begin,
- AVL_GREATER_EQUAL);
- SLABFREE(pr);
- }
-}
-
-/*===========================================================================*
- * clean_phys_regions *
- *===========================================================================*/
-PRIVATE void clean_phys_regions(struct vir_region *region,
- vir_bytes offset, vir_bytes length)
-{
-/* Consider @offset as the start address and @offset+length as the end address.
- * If there are phys regions crossing the start address or the end address,
- * split them into 2 parts.
- *
- * We assume that the phys regions are listed in order and don't overlap.
- */
- struct phys_region *pr;
- physr_iter iter;
-
- physr_start_iter_least(region->phys, &iter);
- while((pr = physr_get_iter(&iter))) {
- /* If this phys region crosses the start address, split it. */
- if(pr->offset < offset
- && pr->offset + pr->ph->length > offset) {
- split_phys(pr, offset);
- physr_start_iter_least(region->phys, &iter);
- }
- /* If this phys region crosses the end address, split it. */
- else if(pr->offset < offset + length
- && pr->offset + pr->ph->length > offset + length) {
- split_phys(pr, offset + length);
- physr_start_iter_least(region->phys, &iter);
- }
- else {
- physr_incr_iter(&iter);
- }
- }
-}
-
-/*===========================================================================*
- * do_map_memory *
- *===========================================================================*/
-PRIVATE int do_map_memory(struct vmproc *vms, struct vmproc *vmd,
- struct vir_region *vrs, struct vir_region *vrd,
- vir_bytes offset_s, vir_bytes offset_d,
- vir_bytes length, int flag)
-{
- struct phys_region *prs;
- struct phys_region *newphysr;
- struct phys_block *pb;
- physr_iter iter;
- u32_t pt_flag = PTF_PRESENT | PTF_USER;
- vir_bytes end;
-
- /* Search for the first phys region in the source process. */
- physr_start_iter(vrs->phys, &iter, offset_s, AVL_EQUAL);
- prs = physr_get_iter(&iter);
- if(!prs)
- panic("do_map_memory: no aligned phys region: %d", 0);
-
- /* flag: 0 -> read-only
- * 1 -> writable
- * -1 -> share as COW, so read-only
- */
- if(flag > 0)
- pt_flag |= PTF_WRITE;
-
- /* Map phys blocks in the source process to the destination process. */
- end = offset_d + length;
- while((prs = physr_get_iter(&iter)) && offset_d < end) {
- /* If a SMAP share was requested but the phys block has already
- * been shared as COW, copy the block for the source phys region
- * first.
- */
- pb = prs->ph;
- if(flag >= 0 && pb->refcount > 1
- && pb->share_flag == PBSH_COW) {
- map_copy_ph_block(vms, vrs, prs);
- pb = prs->ph;
- }
-
- /* Allocate a new phys region. */
- if(!SLABALLOC(newphysr))
- return ENOMEM;
-
- /* Set and link the new phys region to the block. */
- newphysr->ph = pb;
- newphysr->offset = offset_d;
- newphysr->parent = vrd;
- newphysr->next_ph_list = pb->firstregion;
- pb->firstregion = newphysr;
- physr_insert(newphysr->parent->phys, newphysr);
- pb->refcount++;
-
- /* If a COW share was requested but the phys block has already
- * been shared as SMAP, give up on COW and copy the block for
- * the destination phys region now.
- */
- if(flag < 0 && pb->refcount > 1
- && pb->share_flag == PBSH_SMAP) {
- map_copy_ph_block(vmd, vrd, newphysr);
- }
- else {
- /* See if this is a COW share or SMAP share. */
- if(flag < 0) { /* COW share */
- pb->share_flag = PBSH_COW;
- /* Update the page table for the src process. */
- pt_writemap(&vms->vm_pt, offset_s + vrs->vaddr,
- pb->phys, pb->length,
- pt_flag, WMF_OVERWRITE);
- }
- else { /* SMAP share */
- pb->share_flag = PBSH_SMAP;
- }
- /* Update the page table for the destination process. */
- pt_writemap(&vmd->vm_pt, offset_d + vrd->vaddr,
- pb->phys, pb->length, pt_flag, WMF_OVERWRITE);
- }
-
- physr_incr_iter(&iter);
- offset_d += pb->length;
- offset_s += pb->length;
- }
- return OK;
-}
-
-/*===========================================================================*
- * map_memory *
- *===========================================================================*/
-PUBLIC int map_memory(endpoint_t sour, endpoint_t dest,
- vir_bytes virt_s, vir_bytes virt_d, vir_bytes length, int flag)
-{
-/* This is the entry point. This function will be called by handle_memory() when
- * VM recieves a map-memory request.
- */
- struct vmproc *vms, *vmd;
- struct vir_region *vrs, *vrd;
- physr_iter iterd;
- vir_bytes offset_s, offset_d;
- int p;
- int r;
-
- if(vm_isokendpt(sour, &p) != OK)
- panic("map_memory: bad endpoint: %d", sour);
- vms = &vmproc[p];
- if(vm_isokendpt(dest, &p) != OK)
- panic("map_memory: bad endpoint: %d", dest);
- vmd = &vmproc[p];
-
- vrs = map_lookup(vms, virt_s);
- vm_assert(vrs);
- vrd = map_lookup(vmd, virt_d);
- vm_assert(vrd);
-
- /* Linear address -> offset from start of vir region. */
- offset_s = virt_s - vrs->vaddr;
- offset_d = virt_d - vrd->vaddr;
-
- /* Make sure that the range in the source process has been mapped
- * to physical memory.
- */
- map_handle_memory(vms, vrs, offset_s, length, 0);
-
- /* Prepare work. */
- clean_phys_regions(vrs, offset_s, length);
- clean_phys_regions(vrd, offset_d, length);
- rm_phys_regions(vrd, offset_d, length);
-
- /* Map memory. */
- r = do_map_memory(vms, vmd, vrs, vrd, offset_s, offset_d, length, flag);
-
- return r;
-}
-
-/*===========================================================================*
- * unmap_memory *
- *===========================================================================*/
-PUBLIC int unmap_memory(endpoint_t sour, endpoint_t dest,
- vir_bytes virt_s, vir_bytes virt_d, vir_bytes length, int flag)
-{
- struct vmproc *vmd;
- struct vir_region *vrd;
- struct phys_region *pr;
- struct phys_block *pb;
- physr_iter iter;
- vir_bytes off, end;
- int p;
-
- /* Use information on the destination process to unmap. */
- if(vm_isokendpt(dest, &p) != OK)
- panic("unmap_memory: bad endpoint: %d", dest);
- vmd = &vmproc[p];
-
- vrd = map_lookup(vmd, virt_d);
- vm_assert(vrd);
-
- /* Search for the first phys region in the destination process. */
- off = virt_d - vrd->vaddr;
- physr_start_iter(vrd->phys, &iter, off, AVL_EQUAL);
- pr = physr_get_iter(&iter);
- if(!pr)
- panic("unmap_memory: no aligned phys region: %d", 0);
-
- /* Copy the phys block now rather than doing COW. */
- end = off + length;
- while((pr = physr_get_iter(&iter)) && off < end) {
- pb = pr->ph;
- vm_assert(pb->refcount > 1);
- vm_assert(pb->share_flag == PBSH_SMAP);
-
- map_copy_ph_block(vmd, vrd, pr);
-
- physr_incr_iter(&iter);
- off += pb->length;
- }
-
- return OK;
-}
-
--- /dev/null
+
+#ifndef _MEMLIST_H
+#define _MEMLIST_H 1
+
+struct memlist {
+ struct memlist *next;
+ phys_bytes phys; /* physical address in bytes */
+ phys_bytes length; /* length in bytes */
+};
+
+#endif
#include <minix/syslib.h>
#include <minix/safecopies.h>
#include <minix/bitmap.h>
+#include <minix/debug.h>
#include <sys/mman.h>
#include <errno.h>
+#include <assert.h>
#include <string.h>
#include <env.h>
#include <stdio.h>
return EINVAL;
}
- if(m->VMM_FLAGS & MAP_CONTIG) mfflags |= MF_CONTIG;
if(m->VMM_FLAGS & MAP_PREALLOC) mfflags |= MF_PREALLOC;
if(m->VMM_FLAGS & MAP_LOWER16M) vrflags |= VR_LOWER16MB;
if(m->VMM_FLAGS & MAP_LOWER1M) vrflags |= VR_LOWER1MB;
if(m->VMM_FLAGS & MAP_ALIGN64K) vrflags |= VR_PHYS64K;
if(m->VMM_FLAGS & MAP_SHARED) vrflags |= VR_SHARED;
+ if(m->VMM_FLAGS & MAP_CONTIG) vrflags |= VR_CONTIG;
if(len % VM_PAGE_SIZE)
len += VM_PAGE_SIZE - (len % VM_PAGE_SIZE);
if (!(region = map_lookup(svmp, sa)))
return EINVAL;
+ if(region->vaddr != sa) {
+ printf("VM: do_remap: not start of region.\n");
+ return EFAULT;
+ }
+
+ if(!(region->flags & VR_SHARED)) {
+ printf("VM: do_remap: not shared.\n");
+ return EFAULT;
+ }
+
+ if (size % VM_PAGE_SIZE)
+ size += VM_PAGE_SIZE - size % VM_PAGE_SIZE;
+
+ if(size != region->length) {
+ printf("VM: do_remap: not size of region.\n");
+ return EFAULT;
+ }
+
if ((r = map_remap(dvmp, da, size, region, &startv)) != OK)
return r;
#include <stdio.h>
#include <fcntl.h>
#include <signal.h>
+#include <assert.h>
#include <pagefaults.h>
#include "vm.h"
/* alloc.c */
-_PROTOTYPE( phys_clicks alloc_mem_f, (phys_clicks clicks, u32_t flags) );
+_PROTOTYPE( phys_clicks alloc_mem, (phys_clicks clicks, u32_t flags) );
+_PROTOTYPE( struct memlist *alloc_mem_in_list, (phys_bytes bytes, u32_t flags));
_PROTOTYPE( int do_adddma, (message *msg) );
_PROTOTYPE( int do_deldma, (message *msg) );
_PROTOTYPE( int do_getdma, (message *msg) );
_PROTOTYPE( void usedpages_reset, (void) );
_PROTOTYPE( int usedpages_add_f, (phys_bytes phys, phys_bytes len,
char *file, int line) );
-_PROTOTYPE( void free_mem_f, (phys_clicks base, phys_clicks clicks) );
+_PROTOTYPE( void free_mem, (phys_clicks base, phys_clicks clicks) );
+_PROTOTYPE( void free_mem_list, (struct memlist *list, int all));
+_PROTOTYPE( void print_mem_list, (struct memlist *ml));
#define usedpages_add(a, l) usedpages_add_f(a, l, __FILE__, __LINE__)
-#define ALLOC_MEM(clicks, flags) alloc_mem_f(clicks, flags)
-#define FREE_MEM(base, clicks) free_mem_f(base, clicks)
-
_PROTOTYPE( void mem_init, (struct memory *chunks) );
/* utility.c */
_PROTOTYPE( void pt_cycle, (void));
_PROTOTYPE( int pt_mapkernel, (pt_t *pt));
_PROTOTYPE( void vm_pagelock, (void *vir, int lockflag) );
+_PROTOTYPE( int vm_addrok, (void *vir, int write) );
#if SANITYCHECKS
_PROTOTYPE( void pt_sanitycheck, (pt_t *pt, char *file, int line) );
_PROTOTYPE(void slabstats,(void));
_PROTOTYPE(void slab_sanitycheck, (char *file, int line));
#define SLABALLOC(var) (var = slaballoc(sizeof(*var)))
-#define SLABFREE(ptr) slabfree(ptr, sizeof(*(ptr)))
+#define SLABFREE(ptr) do { slabfree(ptr, sizeof(*(ptr))); (ptr) = NULL; } while(0)
#if SANITYCHECKS
_PROTOTYPE(void slabunlock,(void *mem, int bytes));
_PROTOTYPE(int map_get_phys, (struct vmproc *vmp, vir_bytes addr, phys_bytes *r));
_PROTOTYPE(int map_get_ref, (struct vmproc *vmp, vir_bytes addr, u8_t *cnt));
-_PROTOTYPE(int map_copy_ph_block, (struct vmproc *vmp,
- struct vir_region *region, struct phys_region *ph));
_PROTOTYPE(void pb_unreferenced, (struct vir_region *region,
struct phys_region *pr));
_PROTOTYPE(void get_usage_info, (struct vmproc *vmp,
struct vm_usage_info *vui));
_PROTOTYPE(int get_region_info, (struct vmproc *vmp,
struct vm_region_info *vri, int count, vir_bytes *nextp));
+_PROTOTYPE(int copy_abs2region, (phys_bytes abs,
+ struct vir_region *destregion, phys_bytes offset, phys_bytes len));
#if SANITYCHECKS
_PROTOTYPE(void map_sanitycheck,(char *file, int line));
#endif
#include "region.h"
#include "sanitycheck.h"
#include "physravl.h"
+#include "memlist.h"
/* Should a physblock be mapped writable? */
#define WRITABLE(r, pb) \
(((r)->flags & (VR_DIRECT | VR_SHARED)) || \
(((r)->flags & VR_WRITABLE) && (pb)->refcount == 1))
-FORWARD _PROTOTYPE(struct phys_region *map_new_physblock, (struct vmproc *vmp,
+FORWARD _PROTOTYPE(int map_new_physblock, (struct vmproc *vmp,
struct vir_region *region, vir_bytes offset, vir_bytes length,
- phys_bytes what));
+ phys_bytes what, u32_t allocflags, int written));
FORWARD _PROTOTYPE(int map_ph_writept, (struct vmproc *vmp, struct vir_region *vr,
struct phys_region *pr));
FORWARD _PROTOTYPE(struct vir_region *map_copy_region, (struct vmproc *vmp, struct vir_region *vr));
+FORWARD _PROTOTYPE(struct phys_region *map_clone_ph_block, (struct vmproc *vmp,
+ struct vir_region *region, struct phys_region *ph, physr_iter *iter));
+
PRIVATE char *map_name(struct vir_region *vr)
{
static char name[100];
{
struct phys_block *pb = pr->ph;
int rw;
+ int r;
if(!(vmp->vm_flags & VMF_HASPT))
return OK;
else
rw = 0;
- return pt_writemap(&vmp->vm_pt, vr->vaddr + pr->offset,
+ r = pt_writemap(&vmp->vm_pt, vr->vaddr + pr->offset,
pb->phys, pb->length, PTF_PRESENT | PTF_USER | rw, WMF_VERIFY);
+
+ if(r != OK) {
+ printf("proc %d phys_region 0x%lx sanity check failed\n",
+ vmp->vm_endpoint, pr->offset);
+ map_printregion(vmp, vr);
+ }
+
+ return r;
}
/*===========================================================================*
struct phys_region *ph;
physr_avl *phavl;
+ vm_assert(!(length % VM_PAGE_SIZE));
+
SANITYCHECK(SCL_FUNCTIONS);
startv = region_find_slot(vmp, minv, maxv, length, &prevregion);
/* If we know what we're going to map to, map it right away. */
if(what != MAP_NONE) {
- struct phys_region *pr;
+ vm_assert(what); /* mapping in 0 is unlikely to be right */
vm_assert(!(what % VM_PAGE_SIZE));
- vm_assert(!(length % VM_PAGE_SIZE));
vm_assert(!(startv % VM_PAGE_SIZE));
vm_assert(!(mapflags & MF_PREALLOC));
- if(!(pr=map_new_physblock(vmp, newregion, 0, length, what))) {
+ if(map_new_physblock(vmp, newregion, 0, length,
+ what, PAF_CLEAR, 0) != OK) {
printf("VM: map_new_physblock failed\n");
- SLABFREE(newregion->phys);
- SLABFREE(newregion);
- return NULL;
- }
- if(map_ph_writept(vmp, newregion, pr) != OK) {
- printf("VM: map_region_writept failed\n");
+ USE(newregion,
+ SLABFREE(newregion->phys););
SLABFREE(newregion);
return NULL;
}
if((flags & VR_ANON) && (mapflags & MF_PREALLOC)) {
if(map_handle_memory(vmp, newregion, 0, length, 1) != OK) {
printf("VM: map_page_region: prealloc failed\n");
- SLABFREE(newregion->phys);
+ USE(newregion,
+ SLABFREE(newregion->phys););
SLABFREE(newregion);
return NULL;
}
if(pb->refcount == 0) {
vm_assert(!pb->firstregion);
if(region->flags & VR_ANON) {
- FREE_MEM(ABS2CLICK(pb->phys),
+ free_mem(ABS2CLICK(pb->phys),
ABS2CLICK(pb->length));
} else if(region->flags & VR_DIRECT) {
; /* No action required. */
panic("strange phys flags");
}
SLABFREE(pb);
+ } else {
+ struct phys_region *others;
+ int n = 0;
+
+ for(others = pb->firstregion; others;
+ others = others->next_ph_list) {
+ if(WRITABLE(region, others->ph)) {
+ if(map_ph_writept(others->parent->parent,
+ others->parent, others) != OK) {
+ printf("VM: map_ph_writept failed unexpectedly\n");
+ }
+ }
+ n++;
+ }
+ vm_assert(n == pb->refcount);
}
}
+PRIVATE struct phys_region *reset_physr_iter(struct vir_region *region,
+ physr_iter *iter, vir_bytes offset)
+{
+ struct phys_region *ph;
+
+ physr_start_iter(region->phys, iter, offset, AVL_EQUAL);
+ ph = physr_get_iter(iter);
+ vm_assert(ph);
+ vm_assert(ph->offset == offset);
+
+ return ph;
+}
+
/*===========================================================================*
* map_subfree *
*===========================================================================*/
struct phys_region *pr, *nextpr;
physr_iter iter;
+
#if SANITYCHECKS
{
physr_start_iter_least(region->phys, &iter);
vm_assert(sublen < pr->ph->length);
if(pr->ph->refcount > 1) {
int r;
- r = map_copy_ph_block(vmp, region, pr);
- if(r != OK)
- return r;
+ if(!(pr = map_clone_ph_block(vmp, region,
+ pr, &iter)))
+ return ENOMEM;
}
vm_assert(pr->ph->refcount == 1);
if(!(region->flags & VR_DIRECT)) {
- FREE_MEM(ABS2CLICK(pr->ph->phys), ABS2CLICK(sublen));
+ free_mem(ABS2CLICK(pr->ph->phys), ABS2CLICK(sublen));
}
USE(pr, pr->offset += sublen;);
USE(pr->ph,
{
int r;
- if((r=map_subfree(vmp, region, region->length)) != OK)
+ if((r=map_subfree(vmp, region, region->length)) != OK) {
+ printf("%d\n", __LINE__);
return r;
+ }
- SLABFREE(region->phys);
+ USE(region,
+ SLABFREE(region->phys););
SLABFREE(region);
return OK;
return NULL;
}
+PRIVATE u32_t vrallocflags(u32_t flags)
+{
+ u32_t allocflags = 0;
+
+ if(flags & VR_PHYS64K)
+ allocflags |= PAF_ALIGN64K;
+ if(flags & VR_LOWER16MB)
+ allocflags |= PAF_LOWER16MB;
+ if(flags & VR_LOWER1MB)
+ allocflags |= PAF_LOWER1MB;
+ if(flags & VR_CONTIG)
+ allocflags |= PAF_CONTIG;
+
+ return allocflags;
+}
/*===========================================================================*
* map_new_physblock *
*===========================================================================*/
-PRIVATE struct phys_region *map_new_physblock(vmp, region, offset, length, what_mem)
+PRIVATE int map_new_physblock(vmp, region, start_offset, length,
+ what_mem, allocflags, written)
struct vmproc *vmp;
struct vir_region *region;
-vir_bytes offset;
+vir_bytes start_offset;
vir_bytes length;
phys_bytes what_mem;
+u32_t allocflags;
+int written;
{
- struct phys_region *newphysr;
- struct phys_block *newpb;
- phys_bytes mem_clicks, clicks;
- vir_bytes mem;
+ struct memlist *memlist, given, *ml;
+ int used_memlist, r;
+ vir_bytes mapped = 0;
+ vir_bytes offset = start_offset;
SANITYCHECK(SCL_FUNCTIONS);
vm_assert(!(length % VM_PAGE_SIZE));
- NOTRUNNABLE(vmp->vm_endpoint);
-
- /* Allocate things necessary for this chunk of memory. */
- if(!SLABALLOC(newphysr))
- return NULL;
- if(!SLABALLOC(newpb)) {
- SLABFREE(newphysr);
- return NULL;
+ if((region->flags & VR_CONTIG) &&
+ (start_offset > 0 || length < region->length)) {
+ printf("VM: map_new_physblock: non-full allocation requested\n");
+ return EFAULT;
}
/* Memory for new physical block. */
- clicks = CLICKSPERPAGE * length / VM_PAGE_SIZE;
if(what_mem == MAP_NONE) {
- u32_t af = PAF_CLEAR;
- if(region->flags & VR_PHYS64K)
- af |= PAF_ALIGN64K;
- if(region->flags & VR_LOWER16MB)
- af |= PAF_LOWER16MB;
- if(region->flags & VR_LOWER1MB)
- af |= PAF_LOWER1MB;
- if((mem_clicks = ALLOC_MEM(clicks, af)) == NO_MEM) {
- SLABFREE(newpb);
- SLABFREE(newphysr);
+ allocflags |= vrallocflags(region->flags);
+
+ if(!(memlist = alloc_mem_in_list(length, allocflags))) {
printf("map_new_physblock: couldn't allocate\n");
- return NULL;
+ return ENOMEM;
}
- mem = CLICK2ABS(mem_clicks);
+ used_memlist = 1;
} else {
- mem = what_mem;
+ given.phys = what_mem;
+ given.length = length;
+ given.next = NULL;
+ memlist = &given;
+ used_memlist = 0;
+ vm_assert(given.phys);
+ vm_assert(given.length);
}
- SANITYCHECK(SCL_DETAIL);
- /* New physical block. */
- USE(newpb,
- newpb->phys = mem;
- newpb->refcount = 1;
- newpb->length = length;
- newpb->firstregion = newphysr;);
-
- /* New physical region. */
- USE(newphysr,
- newphysr->offset = offset;
- newphysr->ph = newpb;
- newphysr->parent = region;
- newphysr->next_ph_list = NULL; /* No other references to this block. */);
+ r = OK;
+
+ for(ml = memlist; ml; ml = ml->next) {
+ vm_assert(ml->phys);
+ vm_assert(ml->length);
+ }
+
+ for(ml = memlist; ml; ml = ml->next) {
+ struct phys_region *newphysr = NULL;
+ struct phys_block *newpb = NULL;
+
+ /* Allocate things necessary for this chunk of memory. */
+ if(!SLABALLOC(newphysr) || !SLABALLOC(newpb)) {
+ printf("map_new_physblock: no memory for the ph slabs\n");
+ if(newphysr) SLABFREE(newphysr);
+ if(newpb) SLABFREE(newpb);
+ r = ENOMEM;
+ break;
+ }
+
+ vm_assert(ml->phys);
+ vm_assert(ml->length);
+
+ /* New physical block. */
+ vm_assert(!(ml->phys % VM_PAGE_SIZE));
+
+ USE(newpb,
+ newpb->phys = ml->phys;
+ newpb->refcount = 1;
+ newpb->length = ml->length;
+ newpb->firstregion = newphysr;);
+
+ /* New physical region. */
+ USE(newphysr,
+ newphysr->offset = offset;
+ newphysr->ph = newpb;
+ newphysr->parent = region;
+ /* No other references to this block. */
+ newphysr->next_ph_list = NULL;);
#if SANITYCHECKS
- USE(newphysr, newphysr->written = 0;);
+ USE(newphysr, newphysr->written = written;);
#endif
- /* Update pagetable. */
- vm_assert(!(length % VM_PAGE_SIZE));
- vm_assert(!(newpb->length % VM_PAGE_SIZE));
- SANITYCHECK(SCL_DETAIL);
- if(map_ph_writept(vmp, region, newphysr) != OK) {
- if(what_mem == MAP_NONE)
- FREE_MEM(mem_clicks, clicks);
- SLABFREE(newpb);
- SLABFREE(newphysr);
- printf("map_new_physblock: map_ph_writept failed\n");
- return NULL;
+ /* Update pagetable. */
+ if(map_ph_writept(vmp, region, newphysr) != OK) {
+ printf("map_new_physblock: map_ph_writept failed\n");
+ r = ENOMEM;
+ break;
+ }
+
+ physr_insert(region->phys, newphysr);
+
+ offset += ml->length;
+ mapped += ml->length;
}
- physr_insert(region->phys, newphysr);
+ if(used_memlist) {
+ if(r != OK) {
+ offset = start_offset;
+ /* Things did not go well. Undo everything. */
+ for(ml = memlist; ml; ml = ml->next) {
+ struct phys_region *physr;
+ offset += ml->length;
+ if((physr = physr_search(region->phys, offset,
+ AVL_EQUAL))) {
+ vm_assert(physr->ph->refcount == 1);
+ pb_unreferenced(region, physr);
+ physr_remove(region->phys, physr->offset);
+ SLABFREE(physr);
+ }
+ }
+ } else vm_assert(mapped == length);
+
+ /* Always clean up the memlist itself, even if everything
+ * worked we're not using the memlist nodes any more. And
+ * the memory they reference is either freed above or in use.
+ */
+ free_mem_list(memlist, 0);
+ }
SANITYCHECK(SCL_FUNCTIONS);
- return newphysr;
+ return r;
}
-
/*===========================================================================*
- * map_copy_ph_block *
+ * map_clone_ph_block *
*===========================================================================*/
-PUBLIC int map_copy_ph_block(vmp, region, ph)
+PRIVATE struct phys_region *map_clone_ph_block(vmp, region, ph, iter)
struct vmproc *vmp;
struct vir_region *region;
struct phys_region *ph;
+physr_iter *iter;
{
- int r;
- phys_bytes newmem, newmem_cl, clicks;
- struct phys_block *newpb;
- u32_t af = 0;
-
- /* This is only to be done if there is more than one copy. */
- vm_assert(ph->ph->refcount > 1);
+ vir_bytes offset, length;
+ struct memlist *ml;
+ u32_t allocflags;
+ phys_bytes physaddr;
+ struct phys_region *newpr;
+ int written = 0;
+#if SANITYCHECKS
+ written = ph->written;
+#endif
+ SANITYCHECK(SCL_FUNCTIONS);
- /* Do actual copy on write; allocate new physblock. */
- if(!SLABALLOC(newpb)) {
- printf("VM: map_copy_ph_block: couldn't allocate newpb\n");
- return ENOMEM;
- }
+ /* Warning: this function will free the passed
+ * phys_region *ph and replace it (in the same offset)
+ * with one or more others! So both the pointer to it
+ * and any iterators over the phys_regions in the vir_region
+ * will be invalid on successful return. (Iterators over
+ * the vir_region could be invalid on unsuccessful return too.)
+ */
- clicks = CLICKSPERPAGE * ph->ph->length / VM_PAGE_SIZE;
- vm_assert(CLICK2ABS(clicks) == ph->ph->length);
- if(region->flags & VR_PHYS64K)
- af |= PAF_ALIGN64K;
+ /* This function takes a physical block, copies its contents
+ * into newly allocated memory, and replaces the single physical
+ * block by one or more physical blocks with refcount 1 with the
+ * same contents as the original. In other words, a fragmentable
+ * version of map_copy_ph_block().
+ */
- NOTRUNNABLE(vmp->vm_endpoint);
+ /* Remember where and how much. */
+ offset = ph->offset;
+ length = ph->ph->length;
+ physaddr = ph->ph->phys;
- if((newmem_cl = ALLOC_MEM(clicks, af)) == NO_MEM) {
- printf("VM: map_copy_ph_block: couldn't allocate new block\n");
- SLABFREE(newpb);
- return ENOMEM;
- }
- newmem = CLICK2ABS(newmem_cl);
- vm_assert(ABS2CLICK(newmem) == newmem_cl);
+ /* Now unlink the original physical block so we can replace
+ * it with new ones.
+ */
+ SANITYCHECK(SCL_DETAIL);
+ SLABSANE(ph);
+ SLABSANE(ph->ph);
+ vm_assert(ph->ph->refcount > 1);
pb_unreferenced(region, ph);
- vm_assert(ph->ph->refcount > 0);
-
-USE(newpb,
- newpb->length = ph->ph->length;
- newpb->refcount = 1;
- newpb->phys = newmem;
- newpb->firstregion = ph;);
+ vm_assert(ph->ph->refcount >= 1);
+ physr_remove(region->phys, offset);
+ SLABFREE(ph);
- USE(ph, ph->next_ph_list = NULL;);
+ SANITYCHECK(SCL_DETAIL);
- NOTRUNNABLE(vmp->vm_endpoint);
+ /* Put new free memory in. */
+ allocflags = vrallocflags(region->flags);
+ vm_assert(!(allocflags & PAF_CONTIG));
+ vm_assert(!(allocflags & PAF_CLEAR));
- /* Copy old memory to new memory. */
- if((r=sys_abscopy(ph->ph->phys, newpb->phys, newpb->length)) != OK) {
- printf("VM: map_copy_ph_block: sys_abscopy failed\n");
- return r;
+ if(map_new_physblock(vmp, region, offset, length,
+ MAP_NONE, allocflags, written) != OK) {
+ /* XXX original range now gone. */
+ free_mem_list(ml, 0);
+ printf("VM: map_clone_ph_block: map_new_physblock failed.\n");
+ return NULL;
}
-#if VMSTATS
- vmp->vm_bytecopies += newpb->length;
-#endif
+ /* Copy the block to the new memory.
+ * Can only fail if map_new_physblock didn't do what we asked.
+ */
+ if(copy_abs2region(physaddr, region, offset, length) != OK)
+ panic("copy_abs2region failed, no good reason for that");
- /* Reference new block. */
- USE(ph, ph->ph = newpb;);
+ newpr = physr_search(region->phys, offset, AVL_EQUAL);
+ vm_assert(newpr);
+ vm_assert(newpr->offset == offset);
- /* Update pagetable with new address.
- * This will also make it writable.
- */
- r = map_ph_writept(vmp, region, ph);
- if(r != OK)
- panic("map_copy_ph_block: map_ph_writept failed: %d", r);
+ if(iter) {
+ physr_start_iter(region->phys, iter, offset, AVL_EQUAL);
+ vm_assert(physr_get_iter(iter) == newpr);
+ }
- return OK;
+ SANITYCHECK(SCL_FUNCTIONS);
+
+ return newpr;
}
+
/*===========================================================================*
* map_pf *
*===========================================================================*/
SANITYCHECK(SCL_FUNCTIONS);
- NOTRUNNABLE(vmp->vm_endpoint);
-
if((ph = physr_search(region->phys, offset, AVL_LESS_EQUAL)) &&
(ph->offset <= offset && offset < ph->offset + ph->ph->length)) {
+ phys_bytes blockoffset = ph->offset;
/* Pagefault in existing block. Do copy-on-write. */
vm_assert(write);
vm_assert(region->flags & VR_WRITABLE);
printf("VM: write RO mapped pages.\n");
return EFAULT;
} else {
- r = map_copy_ph_block(vmp, region, ph);
+ if(!map_clone_ph_block(vmp, region, ph, NULL))
+ r = ENOMEM;
}
}
} else {
/* Pagefault in non-existing block. Map in new block. */
- if(!map_new_physblock(vmp, region, virpage, VM_PAGE_SIZE, MAP_NONE)) {
+ if(map_new_physblock(vmp, region, virpage,
+ VM_PAGE_SIZE, MAP_NONE, PAF_CLEAR, 0) != OK) {
printf("map_new_physblock failed\n");
r = ENOMEM;
}
}
#if SANITYCHECKS
- if(OK != pt_checkrange(&vmp->vm_pt, region->vaddr+offset, VM_PAGE_SIZE, write)) {
+ if(OK != pt_checkrange(&vmp->vm_pt, region->vaddr+virpage,
+ VM_PAGE_SIZE, write)) {
panic("map_pf: pt_checkrange failed: %d", r);
}
#endif
int changes = 0;
physr_iter iter;
- NOTRUNNABLE(vmp->vm_endpoint);
-
#define FREE_RANGE_HERE(er1, er2) { \
struct phys_region *r1 = (er1), *r2 = (er2); \
vir_bytes start = offset, end = offset + length; \
if(start < end) { \
int r; \
SANITYCHECK(SCL_DETAIL); \
- if(!map_new_physblock(vmp, region, start, \
- end-start, MAP_NONE) != OK) { \
+ if(map_new_physblock(vmp, region, start, \
+ end-start, MAP_NONE, PAF_CLEAR, 0) != OK) { \
SANITYCHECK(SCL_DETAIL); \
return ENOMEM; \
} \
physr = physr_get_iter(&iter);
}
-#define RESET_ITER(it, where, what) { \
- physr_start_iter(region->phys, &it, where, AVL_EQUAL); \
- what = physr_get_iter(&it); \
- if(!what) panic("thing missing"); \
- if(what->offset != where) panic("thing wrong"); \
-}
-
FREE_RANGE_HERE(NULL, physr);
if(physr) {
- RESET_ITER(iter, physr->offset, physr);
+ physr = reset_physr_iter(region, &iter, physr->offset);
if(physr->offset + physr->ph->length <= offset) {
physr_incr_iter(&iter);
physr = physr_get_iter(&iter);
FREE_RANGE_HERE(NULL, physr);
if(physr) {
- RESET_ITER(iter, physr->offset, physr);
+ physr = reset_physr_iter(region, &iter,
+ physr->offset);
}
}
}
if(write) {
vm_assert(physr->ph->refcount > 0);
if(!WRITABLE(region, physr->ph)) {
- SANITYCHECK(SCL_DETAIL);
- r = map_copy_ph_block(vmp, region, physr);
- if(r != OK) {
+ if(!(physr = map_clone_ph_block(vmp, region,
+ physr, &iter))) {
printf("VM: map_handle_memory: no copy\n");
- return r;
+ return ENOMEM;
}
changes++;
- SANITYCHECK(SCL_DETAIL);
} else {
SANITYCHECK(SCL_DETAIL);
if((r=map_ph_writept(vmp, region, physr)) != OK) {
if(nextphysr) {
if(nextphysr->offset >= offset + length)
break;
- RESET_ITER(iter, nextphysr->offset, nextphysr);
+ nextphysr = reset_physr_iter(region, &iter,
+ nextphysr->offset);
}
physr = nextphysr;
}
USE(newph, newph->written = 0;);
#endif
physr_insert(newvr->phys, newph);
+#if SANITYCHECKS
vm_assert(countregions(vr) == cr);
+#endif
physr_incr_iter(&iter);
}
+#if SANITYCHECKS
vm_assert(countregions(vr) == countregions(newvr));
+#endif
return newvr;
}
+/*===========================================================================*
+ * copy_abs2region *
+ *===========================================================================*/
+PUBLIC int copy_abs2region(phys_bytes abs, struct vir_region *destregion,
+ phys_bytes offset, phys_bytes len)
+
+{
+ vm_assert(destregion);
+ vm_assert(destregion->phys);
+ while(len > 0) {
+ phys_bytes sublen, suboffset;
+ struct phys_region *ph;
+ vm_assert(destregion);
+ vm_assert(destregion->phys);
+ if(!(ph = physr_search(destregion->phys, offset, AVL_LESS_EQUAL))) {
+ printf("VM: copy_abs2region: no phys region found (1).\n");
+ return EFAULT;
+ }
+ vm_assert(ph->offset <= offset);
+ if(ph->offset+ph->ph->length <= offset) {
+ printf("VM: copy_abs2region: no phys region found (2).\n");
+ return EFAULT;
+ }
+ suboffset = offset - ph->offset;
+ vm_assert(suboffset < ph->ph->length);
+ sublen = len;
+ if(sublen > ph->ph->length - suboffset)
+ sublen = ph->ph->length - suboffset;
+ vm_assert(suboffset + sublen <= ph->ph->length);
+ if(ph->ph->refcount != 1) {
+ printf("VM: copy_abs2region: no phys region found (3).\n");
+ return EFAULT;
+ }
+
+ if(sys_abscopy(abs, ph->ph->phys + suboffset, sublen) != OK) {
+ printf("VM: copy_abs2region: abscopy failed.\n");
+ return EFAULT;
+ }
+ abs += sublen;
+ offset += sublen;
+ len -= sublen;
+ }
+
+ return OK;
+}
+
/*=========================================================================*
* map_writept *
*=========================================================================*/
SANITYCHECK(SCL_FUNCTIONS);
- PT_SANE(&src->vm_pt);
for(vr = src->vm_regions; vr; vr = vr->next) {
physr_iter iter_orig, iter_new;
/* If the phys block has been shared as SMAP,
* do the regular copy. */
if(pb->refcount > 2 && pb->share_flag == PBSH_SMAP) {
- map_copy_ph_block(dst, newvr, new_ph);
+ map_clone_ph_block(dst, newvr,new_ph,
+ &iter_new);
} else {
- pb->share_flag = PBSH_COW;
+ USE(pb, pb->share_flag = PBSH_COW;);
}
/* Get next new physregion */
SANITYCHECK(SCL_FUNCTIONS);
+ vm_assert(region->flags & VR_SHARED);
+
/* da is handled differently */
if (!da)
dst_addr = dvmp->vm_stacktop;
prev = NULL;
/* round up to page size */
- if (size % I386_PAGE_SIZE)
- size += I386_PAGE_SIZE - size % I386_PAGE_SIZE;
+ vm_assert(!(size % VM_PAGE_SIZE));
startv = region_find_slot(dvmp, dst_addr, VM_DATATOP, size, &prev);
if (startv == (vir_bytes) -1) {
- printf("map_remap: search %x...\n", dst_addr);
+ printf("map_remap: search 0x%x...\n", dst_addr);
map_printmap(dvmp);
return ENOMEM;
}
physr_start_iter_least(vr->phys, &iter);
while((ph = physr_get_iter(&iter))) {
struct phys_block *pb = ph->ph;
+ vm_assert(!ph->next_ph_list);
+ USE(ph, ph->next_ph_list = pb->firstregion;);
+ USE(pb, pb->firstregion = ph;);
USE(pb, pb->refcount++;);
if(map_ph_writept(dvmp, vr, ph) != OK) {
panic("map_remap: map_ph_writept failed");
}
-
physr_incr_iter(&iter);
}
return;
}
+/*===========================================================================*
+ * do_map_memory *
+ *===========================================================================*/
+PRIVATE int do_map_memory(struct vmproc *vms, struct vmproc *vmd,
+ struct vir_region *vrs, struct vir_region *vrd,
+ vir_bytes offset_s, vir_bytes offset_d,
+ vir_bytes length, int flag)
+{
+ struct phys_region *prs;
+ struct phys_region *newphysr;
+ struct phys_block *pb;
+ physr_iter iter;
+ u32_t pt_flag = PTF_PRESENT | PTF_USER;
+ vir_bytes end;
+
+ SANITYCHECK(SCL_FUNCTIONS);
+
+ /* Search for the first phys region in the source process. */
+ physr_start_iter(vrs->phys, &iter, offset_s, AVL_EQUAL);
+ prs = physr_get_iter(&iter);
+ if(!prs)
+ panic("do_map_memory: no aligned phys region: %d", 0);
+
+ /* flag: 0 -> read-only
+ * 1 -> writable
+ * -1 -> share as COW, so read-only
+ */
+ if(flag > 0)
+ pt_flag |= PTF_WRITE;
+
+ /* Map phys blocks in the source process to the destination process. */
+ end = offset_d + length;
+ while((prs = physr_get_iter(&iter)) && offset_d < end) {
+ /* If a SMAP share was requested but the phys block has already
+ * been shared as COW, copy the block for the source phys region
+ * first.
+ */
+ pb = prs->ph;
+ if(flag >= 0 && pb->refcount > 1
+ && pb->share_flag == PBSH_COW) {
+ if(!(prs = map_clone_ph_block(vms, vrs, prs, &iter)))
+ return ENOMEM;
+ pb = prs->ph;
+ }
+
+ /* Allocate a new phys region. */
+ if(!SLABALLOC(newphysr))
+ return ENOMEM;
+
+ /* Set and link the new phys region to the block. */
+ newphysr->ph = pb;
+ newphysr->offset = offset_d;
+ newphysr->parent = vrd;
+ newphysr->next_ph_list = pb->firstregion;
+ pb->firstregion = newphysr;
+ physr_insert(newphysr->parent->phys, newphysr);
+ pb->refcount++;
+
+ /* If a COW share was requested but the phys block has already
+ * been shared as SMAP, give up on COW and copy the block for
+ * the destination phys region now.
+ */
+ if(flag < 0 && pb->refcount > 1
+ && pb->share_flag == PBSH_SMAP) {
+ if(!(newphysr = map_clone_ph_block(vmd, vrd,
+ newphysr, NULL))) {
+ return ENOMEM;
+ }
+ }
+ else {
+ /* See if this is a COW share or SMAP share. */
+ if(flag < 0) { /* COW share */
+ pb->share_flag = PBSH_COW;
+ /* Update the page table for the src process. */
+ pt_writemap(&vms->vm_pt, offset_s + vrs->vaddr,
+ pb->phys, pb->length,
+ pt_flag, WMF_OVERWRITE);
+ }
+ else { /* SMAP share */
+ pb->share_flag = PBSH_SMAP;
+ }
+ /* Update the page table for the destination process. */
+ pt_writemap(&vmd->vm_pt, offset_d + vrd->vaddr,
+ pb->phys, pb->length, pt_flag, WMF_OVERWRITE);
+ }
+
+ physr_incr_iter(&iter);
+ offset_d += pb->length;
+ offset_s += pb->length;
+ }
+
+ SANITYCHECK(SCL_FUNCTIONS);
+
+ return OK;
+}
+
+/*===========================================================================*
+ * unmap_memory *
+ *===========================================================================*/
+PUBLIC int unmap_memory(endpoint_t sour, endpoint_t dest,
+ vir_bytes virt_s, vir_bytes virt_d, vir_bytes length, int flag)
+{
+ struct vmproc *vmd;
+ struct vir_region *vrd;
+ struct phys_region *pr;
+ struct phys_block *pb;
+ physr_iter iter;
+ vir_bytes off, end;
+ int p;
+
+ /* Use information on the destination process to unmap. */
+ if(vm_isokendpt(dest, &p) != OK)
+ panic("unmap_memory: bad endpoint: %d", dest);
+ vmd = &vmproc[p];
+
+ vrd = map_lookup(vmd, virt_d);
+ vm_assert(vrd);
+
+ /* Search for the first phys region in the destination process. */
+ off = virt_d - vrd->vaddr;
+ physr_start_iter(vrd->phys, &iter, off, AVL_EQUAL);
+ pr = physr_get_iter(&iter);
+ if(!pr)
+ panic("unmap_memory: no aligned phys region: %d", 0);
+
+ /* Copy the phys block now rather than doing COW. */
+ end = off + length;
+ while((pr = physr_get_iter(&iter)) && off < end) {
+ pb = pr->ph;
+ vm_assert(pb->refcount > 1);
+ vm_assert(pb->share_flag == PBSH_SMAP);
+
+ if(!(pr = map_clone_ph_block(vmd, vrd, pr, &iter)))
+ return ENOMEM;
+
+ physr_incr_iter(&iter);
+ off += pb->length;
+ }
+
+ return OK;
+}
+
+/*===========================================================================*
+ * split_phys *
+ *===========================================================================*/
+PRIVATE int split_phys(struct phys_region *pr, vir_bytes point)
+{
+ struct phys_region *newpr, *q, *prev;
+ struct phys_block *newpb;
+ struct phys_block *pb = pr->ph;
+/* Split the phys region into 2 parts by @point. */
+
+ if(pr->offset >= point || pr->offset + pb->length <= point)
+ return OK;
+ if(!SLABALLOC(newpb))
+ return ENOMEM;
+
+ /* Split phys block. */
+ *newpb = *pb;
+ pb->length = point - pr->offset;
+ newpb->length -= pb->length;
+ newpb->phys += pb->length;
+
+ /* Split phys regions in a list. */
+ for(q = pb->firstregion; q; q = q->next_ph_list) {
+ if(!SLABALLOC(newpr))
+ return ENOMEM;
+
+ *newpr = *q;
+ newpr->ph = newpb;
+ newpr->offset += pb->length;
+
+ /* Link to the vir region's phys region list. */
+ physr_insert(newpr->parent->phys, newpr);
+
+ /* Link to the next_ph_list. */
+ if(q == pb->firstregion) {
+ newpb->firstregion = newpr;
+ prev = newpr;
+ } else {
+ prev->next_ph_list = newpr;
+ prev = newpr;
+ }
+ }
+ prev->next_ph_list = NULL;
+
+ return OK;
+}
+
+/*===========================================================================*
+ * clean_phys_regions *
+ *===========================================================================*/
+PRIVATE void clean_phys_regions(struct vir_region *region,
+ vir_bytes offset, vir_bytes length)
+{
+/* Consider @offset as the start address and @offset+length as the end address.
+ * If there are phys regions crossing the start address or the end address,
+ * split them into 2 parts.
+ *
+ * We assume that the phys regions are listed in order and don't overlap.
+ */
+ struct phys_region *pr;
+ physr_iter iter;
+
+ physr_start_iter_least(region->phys, &iter);
+ while((pr = physr_get_iter(&iter))) {
+ /* If this phys region crosses the start address, split it. */
+ if(pr->offset < offset
+ && pr->offset + pr->ph->length > offset) {
+ split_phys(pr, offset);
+ physr_start_iter_least(region->phys, &iter);
+ }
+ /* If this phys region crosses the end address, split it. */
+ else if(pr->offset < offset + length
+ && pr->offset + pr->ph->length > offset + length) {
+ split_phys(pr, offset + length);
+ physr_start_iter_least(region->phys, &iter);
+ }
+ else {
+ physr_incr_iter(&iter);
+ }
+ }
+}
+
+/*===========================================================================*
+ * rm_phys_regions *
+ *===========================================================================*/
+PRIVATE void rm_phys_regions(struct vir_region *region,
+ vir_bytes begin, vir_bytes length)
+{
+/* Remove all phys regions between @begin and @begin+length.
+ *
+ * Don't update the page table, because we will update it at map_memory()
+ * later.
+ */
+ struct phys_region *pr;
+ physr_iter iter;
+
+ physr_start_iter(region->phys, &iter, begin, AVL_GREATER_EQUAL);
+ while((pr = physr_get_iter(&iter)) && pr->offset < begin + length) {
+ pb_unreferenced(region, pr);
+ physr_remove(region->phys, pr->offset);
+ physr_start_iter(region->phys, &iter, begin,
+ AVL_GREATER_EQUAL);
+ SLABFREE(pr);
+ }
+}
+
+/*===========================================================================*
+ * map_memory *
+ *===========================================================================*/
+PUBLIC int map_memory(endpoint_t sour, endpoint_t dest,
+ vir_bytes virt_s, vir_bytes virt_d, vir_bytes length, int flag)
+{
+/* This is the entry point. This function will be called by handle_memory() when
+ * VM recieves a map-memory request.
+ */
+ struct vmproc *vms, *vmd;
+ struct vir_region *vrs, *vrd;
+ physr_iter iterd;
+ vir_bytes offset_s, offset_d;
+ int p;
+ int r;
+
+ if(vm_isokendpt(sour, &p) != OK)
+ panic("map_memory: bad endpoint: %d", sour);
+ vms = &vmproc[p];
+ if(vm_isokendpt(dest, &p) != OK)
+ panic("map_memory: bad endpoint: %d", dest);
+ vmd = &vmproc[p];
+
+ vrs = map_lookup(vms, virt_s);
+ vm_assert(vrs);
+ vrd = map_lookup(vmd, virt_d);
+ vm_assert(vrd);
+
+ /* Linear address -> offset from start of vir region. */
+ offset_s = virt_s - vrs->vaddr;
+ offset_d = virt_d - vrd->vaddr;
+
+ /* Make sure that the range in the source process has been mapped
+ * to physical memory.
+ */
+ map_handle_memory(vms, vrs, offset_s, length, 0);
+
+ /* Prepare work. */
+ clean_phys_regions(vrs, offset_s, length);
+ clean_phys_regions(vrd, offset_d, length);
+ rm_phys_regions(vrd, offset_d, length);
+
+ /* Map memory. */
+ r = do_map_memory(vms, vmd, vrs, vrd, offset_s, offset_d, length, flag);
+
+ return r;
+}
+
/*========================================================================*
* map_lookup_phys *
*========================================================================*/
return pr->ph->phys;
}
+
+
+
#define VR_PHYS64K 0x004 /* Physical memory must be 64k aligned. */
#define VR_LOWER16MB 0x008
#define VR_LOWER1MB 0x010
+#define VR_CONTIG 0x020 /* Must be physically contiguous. */
/* Mapping type: */
#define VR_ANON 0x100 /* Memory to be cleared and allocated */
/* map_page_region flags */
#define MF_PREALLOC 0x01
-#define MF_CONTIG 0x02
#endif
#include <string.h>
#include <env.h>
#include <stdio.h>
+#include <assert.h>
#include "glo.h"
#include "proto.h"
#ifndef _SANITYCHECK_H
#define _SANITYCHECK_H 1
+#include <assert.h>
+
#include "vm.h"
#include "glo.h"
slab_sanitycheck(__FILE__, __LINE__); }
#define SANITYCHECK(l) if(!nocheck && ((l) <= vm_sanitychecklevel)) { \
- struct vmproc *vmp; \
+ struct vmproc *vmpr; \
vm_assert(incheck == 0); \
incheck = 1; \
usedpages_reset(); \
slab_sanitycheck(__FILE__, __LINE__); \
- for(vmp = vmproc; vmp < &vmproc[VMP_NR]; vmp++) { \
- if((vmp->vm_flags & (VMF_INUSE | VMF_HASPT)) == \
+ for(vmpr = vmproc; vmpr < &vmproc[VMP_NR]; vmpr++) { \
+ if((vmpr->vm_flags & (VMF_INUSE | VMF_HASPT)) == \
(VMF_INUSE | VMF_HASPT)) { \
- PT_SANE(&vmp->vm_pt); \
+ PT_SANE(&vmpr->vm_pt); \
} \
} \
map_sanitycheck(__FILE__, __LINE__); \
} \
}
-#define NOTRUNNABLE(ep) { \
- struct proc pr; \
- if(sys_getproc(&pr, ep) != OK) { \
- panic("VM: sys_getproc failed: %d", ep); \
- } \
- if(!pr.p_rts_flags) { \
- panic("VM: runnable: %d", ep); \
- } \
-}
-
#else
#define SANITYCHECK
#define SLABSANITYCHECK(l)
#define USE(obj, code) do { code } while(0)
#define SLABSANE(ptr)
-#define NOTRUNNABLE(ep)
#endif
#endif
#include <minix/bitmap.h>
#include <minix/debug.h>
+#include <assert.h>
#include <errno.h>
#include <string.h>
#include <env.h>
vm_assert(s);
firstused = LH(s, LIST_USED);
vm_assert(firstused);
+#if SANITYCHECKS
vm_assert(firstused->sdh.magic1 == MAGIC1);
vm_assert(firstused->sdh.magic2 == MAGIC2);
+#endif
vm_assert(firstused->sdh.nused < ITEMSPERPAGE(bytes));
for(i = firstused->sdh.freeguess;
SLABDATAUNWRITABLE(f);
- FIXME("verify new contents");
-
return;
}
#if SANITYCHECKS
#define vm_assert(cond) { \
if(vm_sanitychecklevel > 0 && !(cond)) { \
- printf("VM:%s:%d: assert failed: %s\n", \
+ printf("VM:%s:%d: vm_assert failed: %s\n", \
__FILE__, __LINE__, #cond); \
- panic("assert failed"); \
+ panic("vm_assert failed"); \
} \
}
#else
#define PAF_ALIGN64K 0x04 /* Aligned to 64k boundary. */
#define PAF_LOWER16MB 0x08
#define PAF_LOWER1MB 0x10
+#define PAF_FIRSTBLOCK 0x20 /* alloc_mem: return first block */
+
+#define MARK do { if(mark) { printf("%d\n", __LINE__); } } while(0)
/* special value for v in pt_allocmap */
#define AM_AUTO ((u32_t) -1)
#include <pagetable.h>
#include <arch_vmproc.h>
#include <minix/bitmap.h>
+#include <machine/archtypes.h>
#include "vm.h"