vmc->vm_regions = NULL;
vmc->vm_endpoint = NONE; /* In case someone tries to use it. */
vmc->vm_pt = origpt;
- vmc->vm_flags &= ~VMF_HASPT;
+ vmc->vm_flags |= VMF_HASPT;
#if VMSTATS
vmc->vm_bytecopies = 0;
return ENOMEM;
}
- vmc->vm_flags |= VMF_HASPT;
-
SANITYCHECK(SCL_DETAIL);
if(map_proc_copy(vmc, vmp) != OK) {
/* Create a copy of the parent's core image for the child. */
child_abs = (phys_bytes) child_base << CLICK_SHIFT;
parent_abs = (phys_bytes) vmp->vm_arch.vm_seg[D].mem_phys << CLICK_SHIFT;
- FIXME("VM uses kernel for abscopy");
s = sys_abscopy(parent_abs, child_abs, prog_bytes);
if (s < 0) vm_panic("do_fork can't copy", s);
vmc->vm_arch.vm_seg[D].mem_phys = child_base;
vmc->vm_arch.vm_seg[S].mem_phys = vmc->vm_arch.vm_seg[D].mem_phys +
(vmp->vm_arch.vm_seg[S].mem_vir - vmp->vm_arch.vm_seg[D].mem_vir);
+
+ if(pt_identity(&vmc->vm_pt) != OK) {
+ printf("VM: fork: pt_identity failed\n");
+ return ENOMEM;
+ }
}
/* Only inherit these flags. */
/* Tell kernel about the (now successful) FORK. */
if((r=sys_fork(vmp->vm_endpoint, childproc,
&vmc->vm_endpoint, vmc->vm_arch.vm_seg,
- fullvm ? PFF_VMINHIBIT : 0, &msgaddr)) != OK) {
+ PFF_VMINHIBIT, &msgaddr)) != OK) {
vm_panic("do_fork can't sys_fork", r);
}
handle_memory(vmc, vir, sizeof(message), 1);
vir = arch_vir2map(vmp, msgaddr);
handle_memory(vmp, vir, sizeof(message), 1);
- if((r=pt_bind(&vmc->vm_pt, vmc)) != OK)
- vm_panic("fork can't pt_bind", r);
}
+ if((r=pt_bind(&vmc->vm_pt, vmc)) != OK)
+ vm_panic("fork can't pt_bind", r);
+
/* Inform caller of new child endpoint. */
msg->VMF_CHILD_ENDPOINT = vmc->vm_endpoint;
#include "memory.h"
/* PDE used to map in kernel, kernel physical address. */
-PRIVATE int kernel_pde = -1, pagedir_pde = -1;
-PRIVATE u32_t kern_pde_val = 0, global_bit = 0, pagedir_pde_val;
+PRIVATE int id_map_high_pde = -1, pagedir_pde = -1;
+PRIVATE u32_t global_bit = 0, pagedir_pde_val;
PRIVATE int proc_pde = 0;
return OK;
}
+/*===========================================================================*
+ * pt_identity *
+ *===========================================================================*/
+PUBLIC int pt_identity(pt_t *pt)
+{
+/* Allocate a pagetable that does a 1:1 mapping. */
+ int i;
+
+ /* Allocate page directory. */
+ if(!pt->pt_dir &&
+ !(pt->pt_dir = vm_allocpage(&pt->pt_dir_phys, VMP_PAGEDIR))) {
+ return ENOMEM;
+ }
+
+ for(i = 0; i < I386_VM_DIR_ENTRIES; i++) {
+ phys_bytes addr;
+ addr = I386_BIG_PAGE_SIZE*i;
+ pt->pt_dir[i] = (addr & I386_VM_ADDR_MASK_4MB) |
+ I386_VM_BIGPAGE|
+ I386_VM_USER|
+ I386_VM_PRESENT|I386_VM_WRITE;
+ pt->pt_pt[i] = NULL;
+ }
+
+ /* Where to start looking for free virtual address space? */
+ pt->pt_virtop = 0;
+
+ return OK;
+}
+
/*===========================================================================*
* pt_init *
*===========================================================================*/
-PUBLIC void pt_init(void)
+PUBLIC void pt_init(phys_bytes usedlimit)
{
/* By default, the kernel gives us a data segment with pre-allocated
* memory that then can't grow. We want to be able to allocate memory
/* Shorthand. */
newpt = &vmp->vm_pt;
+
/* Get ourselves spare pages. */
if(!(sparepages_mem = (vir_bytes) aalloc(I386_PAGE_SIZE*SPAREPAGES)))
vm_panic("pt_init: aalloc for spare failed", NO_NUM);
if(global_bit_ok)
global_bit = I386_VM_GLOBAL;
- /* Figure out kernel pde slot. */
- {
- int pde1, pde2;
- pde1 = I386_VM_PDE(KERNEL_TEXT);
- pde2 = I386_VM_PDE(KERNEL_DATA+KERNEL_DATA_LEN);
- if(pde1 != pde2)
- vm_panic("pt_init: kernel too big", NO_NUM);
-
- /* Map in kernel with this single pde value if 4MB pages
- * supported.
- */
- kern_pde_val = (KERNEL_TEXT & I386_VM_ADDR_MASK_4MB) |
- I386_VM_BIGPAGE|
- I386_VM_USER|
- I386_VM_PRESENT|I386_VM_WRITE|global_bit;
- kernel_pde = pde1;
- vm_assert(kernel_pde >= 0);
- free_pde = kernel_pde+1;
- }
+ /* The kernel and boot time processes need an identity mapping.
+ * We use full PDE's for this without separate page tables.
+ * Figure out which pde we can start using for other purposes.
+ */
+ id_map_high_pde = usedlimit / I386_BIG_PAGE_SIZE;
+
+ /* We have to make mappings up till here. */
+ free_pde = id_map_high_pde+1;
+
+ printf("map high pde: %d for limit: 0x%lx\n",
+ id_map_high_pde, usedlimit);
- /* First unused pde. */
- proc_pde = free_pde;
-
/* Initial (current) range of our virtual address space. */
lo = CLICK2ABS(vmp->vm_arch.vm_seg[T].mem_phys);
hi = CLICK2ABS(vmp->vm_arch.vm_seg[S].mem_phys +
if(pt_new(newpt) != OK)
vm_panic("pt_init: pt_new failed", NO_NUM);
- /* Old position mapped in? */
- pt_check(vmp);
-
/* Set up mappings for VM process. */
for(v = lo; v < hi; v += I386_PAGE_SIZE) {
phys_bytes addr;
kern_mappings[index].flags = flags;
kern_mappings[index].lin_addr = offset;
kern_mappings[index].flags =
- I386_VM_PRESENT | I386_VM_USER | I386_VM_WRITE;
+ I386_VM_PRESENT | I386_VM_USER | I386_VM_WRITE |
+ global_bit;
if(flags & VMMF_UNCACHED)
kern_mappings[index].flags |=
I386_VM_PWT | I386_VM_PCD;
PUBLIC int pt_mapkernel(pt_t *pt)
{
int r, i;
- static int printed = 0;
/* Any i386 page table needs to map in the kernel address space. */
vm_assert(vmproc[VMP_SYSTEM].vm_flags & VMF_INUSE);
if(bigpage_ok) {
- if(kernel_pde >= 0) {
- pt->pt_dir[kernel_pde] = kern_pde_val;
- } else
- vm_panic("VM: pt_mapkernel: no kernel pde", NO_NUM);
+ int pde;
+ for(pde = 0; pde <= id_map_high_pde; pde++) {
+ phys_bytes addr;
+ addr = pde * I386_BIG_PAGE_SIZE;
+ vm_assert((addr & I386_VM_ADDR_MASK) == addr);
+ pt->pt_dir[pde] = addr | I386_VM_PRESENT |
+ I386_VM_BIGPAGE | I386_VM_USER |
+ I386_VM_WRITE | global_bit;
+ }
} else {
vm_panic("VM: pt_mapkernel: no bigpage", NO_NUM);
-
- /* Map in text. flags: don't write, supervisor only */
- if((r=pt_writemap(pt, KERNEL_TEXT, KERNEL_TEXT, KERNEL_TEXT_LEN,
- I386_VM_PRESENT|global_bit, 0)) != OK)
- return r;
-
- /* Map in data. flags: read-write, supervisor only */
- if((r=pt_writemap(pt, KERNEL_DATA, KERNEL_DATA, KERNEL_DATA_LEN,
- I386_VM_PRESENT|I386_VM_WRITE, 0)) != OK)
- return r;
}
if(pagedir_pde >= 0) {
return OK;
}
-/*===========================================================================*
- * pt_check *
- *===========================================================================*/
-PUBLIC void pt_check(struct vmproc *vmp)
-{
- phys_bytes hi;
- hi = CLICK2ABS(vmp->vm_arch.vm_seg[S].mem_phys +
- vmp->vm_arch.vm_seg[S].mem_len);
- if(hi > (kernel_pde+1) * I386_BIG_PAGE_SIZE) {
- printf("VM: %d doesn't fit in kernel range (0x%lx)\n",
- vmp->vm_endpoint, hi);
- vm_panic("boot time processes too big", NO_NUM);
- }
-}
-
/*===========================================================================*
* pt_cycle *
*===========================================================================*/
struct memory mem_chunks[NR_MEMS];
struct boot_image image[NR_BOOT_PROCS];
struct boot_image *ip;
+ phys_bytes limit = 0;
/* Get chunks of available memory. */
get_mem_chunks(mem_chunks);
* now and make valid slot entries for them.
*/
for (ip = &image[0]; ip < &image[NR_BOOT_PROCS]; ip++) {
+ phys_bytes proclimit;
struct vmproc *vmp;
if(ip->proc_nr >= _NR_PROCS) { vm_panic("proc", ip->proc_nr); }
/* Remove this memory from the free list. */
reserve_proc_mem(mem_chunks, vmp->vm_arch.vm_seg);
+ /* Set memory limit. */
+ proclimit = CLICK2ABS(vmp->vm_arch.vm_seg[S].mem_phys +
+ vmp->vm_arch.vm_seg[S].mem_len) - 1;
+
+ if(proclimit > limit)
+ limit = proclimit;
+
vmp->vm_flags = VMF_INUSE;
vmp->vm_endpoint = ip->endpoint;
vmp->vm_stacktop =
}
/* Architecture-dependent initialization. */
- pt_init();
+ pt_init(limit);
/* Initialize tables to all physical memory. */
mem_init(mem_chunks);
GETVMP(vmp, ip->proc_nr);
- if(!(ip->flags & PROC_FULLVM)) {
- /* See if this process fits in kernel
- * mapping. VM has its own pagetable,
- * don't check it.
- */
- if(!(vmp->vm_flags & VMF_HASPT)) {
- pt_check(vmp);
- }
- continue;
- }
+ if(!(ip->flags & PROC_FULLVM))
+ continue;
old_stack =
vmp->vm_arch.vm_seg[S].mem_vir +