CLICK2ABS(stack_clicks),/* how big is stack, page-aligned */
CLICK2ABS(gap_clicks), /* how big is gap, page-aligned */
0,0, /* not preallocated */
- VM_STACKTOP /* regular stack top */
- )) != OK) {
+ VM_STACKTOP, /* regular stack top */
+ 0)) != OK) {
SANITYCHECK(SCL_DETAIL);
printf("VM: new_mem: failed\n");
if(ptok) {
phys_bytes gap_bytes, /* gap bytes, page aligned */
phys_bytes text_start, /* text starts here, if preallocated, otherwise 0 */
phys_bytes data_start, /* data starts here, if preallocated, otherwise 0 */
- phys_bytes stacktop
+ phys_bytes stacktop,
+ int prealloc_stack
)
{
int s;
vir_bytes hole_bytes;
int prealloc;
+ struct vir_region *reg;
vm_assert(!(vstart % VM_PAGE_SIZE));
vm_assert(!(text_bytes % VM_PAGE_SIZE));
#define TEXTFLAGS (PTF_PRESENT | PTF_USER)
SANITYCHECK(SCL_DETAIL);
if(text_bytes > 0) {
- if(!map_page_region(vmp, vstart, 0, text_bytes,
+ if(!(reg=map_page_region(vmp, vstart, 0, text_bytes,
text_start ? text_start : MAP_NONE,
- VR_ANON | VR_WRITABLE, text_start ? 0 : MF_PREALLOC)) {
+ VR_ANON | VR_WRITABLE, text_start ? 0 : MF_PREALLOC))) {
SANITYCHECK(SCL_DETAIL);
printf("VM: proc_new: map_page_region failed (text)\n");
map_free_proc(vmp);
SANITYCHECK(SCL_DETAIL);
return(ENOMEM);
}
+ map_region_set_tag(reg, VRT_TEXT);
SANITYCHECK(SCL_DETAIL);
}
SANITYCHECK(SCL_DETAIL);
*/
hole_bytes = stacktop - data_bytes - stack_bytes - gap_bytes;
- if(!map_page_region(vmp, vstart + text_bytes + data_bytes + hole_bytes,
+ if(!(reg=map_page_region(vmp,
+ vstart + text_bytes + data_bytes + hole_bytes,
0, stack_bytes + gap_bytes, MAP_NONE,
- VR_ANON | VR_WRITABLE, 0) != OK) {
+ VR_ANON | VR_WRITABLE, prealloc_stack ? MF_PREALLOC : 0)) != OK) {
panic("map_page_region failed for stack");
}
+ map_region_set_tag(reg, VRT_STACK);
+
vmp->vm_arch.vm_seg[D].mem_phys = ABS2CLICK(vstart + text_bytes);
vmp->vm_arch.vm_seg[D].mem_vir = 0;
vmp->vm_arch.vm_seg[D].mem_len = ABS2CLICK(data_bytes);
text_bytes + data_bytes + gap_bytes + hole_bytes);
vmp->vm_arch.vm_seg[S].mem_vir = ABS2CLICK(data_bytes + gap_bytes + hole_bytes);
+ /* Where are we allowed to start using the rest of the virtual
+ * address space?
+ */
+ vmp->vm_stacktop = stacktop;
+
+ vmp->vm_flags |= VMF_HASPT;
+
+ if(vmp->vm_endpoint != NONE) {
+
/* Pretend the stack is the full size of the data segment, so
* we get a full-sized data segment, up to VM_DATATOP.
* After sys_newmap(), change the stack to what we know the
vmp->vm_arch.vm_seg[S].mem_len = (VM_DATATOP >> CLICK_SHIFT) -
vmp->vm_arch.vm_seg[S].mem_vir - ABS2CLICK(vstart) - ABS2CLICK(text_bytes);
- /* Where are we allowed to start using the rest of the virtual
- * address space?
- */
- vmp->vm_stacktop = stacktop;
-
/* What is the final size of the data segment in bytes? */
vmp->vm_arch.vm_data_top =
(vmp->vm_arch.vm_seg[S].mem_vir +
vmp->vm_arch.vm_seg[S].mem_len) << CLICK_SHIFT;
- vmp->vm_flags |= VMF_HASPT;
-
- if((s=sys_newmap(vmp->vm_endpoint, vmp->vm_arch.vm_seg)) != OK)
- panic("sys_newmap (vm) failed: %d", s);
-
- if((s=pt_bind(&vmp->vm_pt, vmp)) != OK)
- panic("exec_newmem: pt_bind failed: %d", s);
+ if((s=sys_newmap(vmp->vm_endpoint, vmp->vm_arch.vm_seg)) != OK)
+ panic("sys_newmap (vm) failed: %d", s);
+ if((s=pt_bind(&vmp->vm_pt, vmp)) != OK)
+ panic("exec_newmem: pt_bind failed: %d", s);
+ }
return OK;
}
#include <string.h>
#include <errno.h>
#include <env.h>
+#include <assert.h>
#include "glo.h"
#include "vm.h"
#include "util.h"
#include "sanitycheck.h"
#include "region.h"
+#include "memory.h"
/*===========================================================================*
* do_fork *
vmc->vm_bytecopies = 0;
#endif
- SANITYCHECK(SCL_DETAIL);
+ if(pt_new(&vmc->vm_pt) != OK) {
+ printf("VM: fork: pt_new failed\n");
+ return ENOMEM;
+ }
if(fullvm) {
SANITYCHECK(SCL_DETAIL);
- if(pt_new(&vmc->vm_pt) != OK) {
- printf("VM: fork: pt_new failed\n");
- return ENOMEM;
- }
-
- SANITYCHECK(SCL_DETAIL);
-
if(map_proc_copy(vmc, vmp) != OK) {
printf("VM: fork: map_proc_copy failed\n");
pt_free(&vmc->vm_pt);
SANITYCHECK(SCL_DETAIL);
} else {
- phys_bytes prog_bytes, parent_abs, child_abs; /* Intel only */
- phys_clicks prog_clicks, child_base;
+ vir_bytes sp;
+ phys_bytes d_abs, s_abs;
+ vir_bytes text_bytes, data_bytes, stack_bytes, parent_gap_bytes,
+ child_gap_bytes;
+
+ /* Get SP of new process (using parent). */
+ if(get_stack_ptr(vmp->vm_endpoint, &sp) != OK) {
+ printf("VM: fork: get_stack_ptr failed for %d\n",
+ vmp->vm_endpoint);
+ return ENOMEM;
+ }
+
+ /* Update size of stack segment using current SP. */
+ if(adjust(vmp, vmp->vm_arch.vm_seg[D].mem_len, sp) != OK) {
+ printf("VM: fork: adjust failed for %d\n",
+ vmp->vm_endpoint);
+ return ENOMEM;
+ }
- /* Determine how much memory to allocate. Only the data and stack
- * need to be copied, because the text segment is either shared or
- * of zero length.
+ /* Copy newly adjust()ed stack segment size to child. */
+ vmc->vm_arch.vm_seg[S] = vmp->vm_arch.vm_seg[S];
+
+ text_bytes = CLICK2ABS(vmc->vm_arch.vm_seg[T].mem_len);
+ data_bytes = CLICK2ABS(vmc->vm_arch.vm_seg[D].mem_len);
+ stack_bytes = CLICK2ABS(vmc->vm_arch.vm_seg[S].mem_len);
+
+ /* how much space after break and before lower end (which is the
+ * logical top) of stack for the parent
*/
+ parent_gap_bytes = CLICK2ABS(vmc->vm_arch.vm_seg[S].mem_vir -
+ vmc->vm_arch.vm_seg[D].mem_len);
- prog_clicks = (phys_clicks) vmp->vm_arch.vm_seg[S].mem_len;
- prog_clicks += (vmp->vm_arch.vm_seg[S].mem_vir - vmp->vm_arch.vm_seg[D].mem_vir);
- prog_bytes = (phys_bytes) prog_clicks << CLICK_SHIFT;
- if ( (child_base = ALLOC_MEM(prog_clicks, 0)) == NO_MEM) {
- SANITYCHECK(SCL_FUNCTIONS);
- return(ENOMEM);
+ /* how much space can the child stack grow downwards, below
+ * the current SP? The rest of the gap is available for the
+ * heap to grow upwards.
+ */
+ child_gap_bytes = VM_PAGE_SIZE;
+
+ if((r=proc_new(vmc, VM_PROCSTART,
+ text_bytes, data_bytes, stack_bytes, child_gap_bytes, 0, 0,
+ CLICK2ABS(vmc->vm_arch.vm_seg[S].mem_vir +
+ vmc->vm_arch.vm_seg[S].mem_len), 1)) != OK) {
+ printf("VM: fork: proc_new failed\n");
+ return r;
}
- /* Create a copy of the parent's core image for the child. */
- child_abs = (phys_bytes) child_base << CLICK_SHIFT;
- parent_abs = (phys_bytes) vmp->vm_arch.vm_seg[D].mem_phys << CLICK_SHIFT;
- s = sys_abscopy(parent_abs, child_abs, prog_bytes);
- if (s < 0) panic("do_fork can't copy: %d", s);
-
- /* A separate I&D child keeps the parents text segment. The data and stack
- * segments must refer to the new copy.
- */
- if (!(vmc->vm_flags & VMF_SEPARATE))
- vmc->vm_arch.vm_seg[T].mem_phys = child_base;
- vmc->vm_arch.vm_seg[D].mem_phys = child_base;
- vmc->vm_arch.vm_seg[S].mem_phys = vmc->vm_arch.vm_seg[D].mem_phys +
- (vmp->vm_arch.vm_seg[S].mem_vir - vmp->vm_arch.vm_seg[D].mem_vir);
-
- if(pt_identity(&vmc->vm_pt) != OK) {
- printf("VM: fork: pt_identity failed\n");
- return ENOMEM;
+ if((d_abs = map_lookup_phys(vmc, VRT_HEAP)) == MAP_NONE)
+ panic("couldn't lookup data");
+ if((s_abs = map_lookup_phys(vmc, VRT_STACK)) == MAP_NONE)
+ panic("couldn't lookup stack");
+
+ /* Now copy the memory regions. */
+
+ if(vmc->vm_arch.vm_seg[T].mem_len > 0) {
+ phys_bytes t_abs;
+ if((t_abs = map_lookup_phys(vmc, VRT_TEXT)) == MAP_NONE)
+ panic("couldn't lookup text");
+ if(sys_abscopy(CLICK2ABS(vmp->vm_arch.vm_seg[T].mem_phys),
+ t_abs, text_bytes) != OK)
+ panic("couldn't copy text");
}
+
+ if(sys_abscopy(CLICK2ABS(vmp->vm_arch.vm_seg[D].mem_phys),
+ d_abs, data_bytes) != OK)
+ panic("couldn't copy data");
+
+ if(sys_abscopy(
+ CLICK2ABS(vmp->vm_arch.vm_seg[D].mem_phys +
+ vmc->vm_arch.vm_seg[D].mem_len) + parent_gap_bytes,
+ s_abs + child_gap_bytes, stack_bytes) != OK)
+ panic("couldn't copy stack");
}
/* Only inherit these flags. */
return OK;
}
-/*===========================================================================*
- * pt_identity *
- *===========================================================================*/
-PUBLIC int pt_identity(pt_t *pt)
-{
-/* Allocate a pagetable that does a 1:1 mapping. */
- int i;
-
- /* Allocate page directory. */
- if(!pt->pt_dir &&
- !(pt->pt_dir = vm_allocpage(&pt->pt_dir_phys, VMP_PAGEDIR))) {
- return ENOMEM;
- }
-
- for(i = 0; i < I386_VM_DIR_ENTRIES; i++) {
- phys_bytes addr;
- addr = I386_BIG_PAGE_SIZE*i;
- pt->pt_dir[i] = (addr & I386_VM_ADDR_MASK_4MB) |
- I386_VM_BIGPAGE|
- I386_VM_USER|
- I386_VM_PRESENT|I386_VM_WRITE;
- pt->pt_pt[i] = NULL;
- }
-
- /* Where to start looking for free virtual address space? */
- pt->pt_virtop = 0;
-
- return OK;
-}
-
/*===========================================================================*
* pt_init *
*===========================================================================*/
vmp->vm_arch.vm_seg[D].mem_len) - BASICSTACK,
CLICK2ABS(vmp->vm_arch.vm_seg[T].mem_phys),
CLICK2ABS(vmp->vm_arch.vm_seg[D].mem_phys),
- VM_STACKTOP) != OK) {
+ VM_STACKTOP, 0) != OK) {
panic("failed proc_new for boot process");
}
}
_PROTOTYPE( int do_exec_newmem, (message *msg) );
_PROTOTYPE( int proc_new, (struct vmproc *vmp, phys_bytes start,
phys_bytes text, phys_bytes data, phys_bytes stack, phys_bytes gap,
- phys_bytes text_here, phys_bytes data_here, vir_bytes stacktop));
+ phys_bytes text_here, phys_bytes data_here, vir_bytes stacktop,
+ int prealloc_stack));
_PROTOTYPE( phys_bytes find_kernel_top, (void) );
/* break.c */
_PROTOTYPE( void pt_init, (phys_bytes limit) );
_PROTOTYPE( void pt_check, (struct vmproc *vmp) );
_PROTOTYPE( int pt_new, (pt_t *pt) );
-_PROTOTYPE( int pt_identity, (pt_t *pt) );
_PROTOTYPE( void pt_free, (pt_t *pt) );
_PROTOTYPE( int pt_writemap, (pt_t *pt, vir_bytes v, phys_bytes physaddr,
size_t bytes, u32_t flags, u32_t writemapflags));
_PROTOTYPE(void map_printmap, (struct vmproc *vmp));
_PROTOTYPE(int map_writept, (struct vmproc *vmp));
_PROTOTYPE(void printregionstats, (struct vmproc *vmp));
+_PROTOTYPE(phys_bytes map_lookup_phys, (struct vmproc *vmp, u32_t tag));
_PROTOTYPE(struct vir_region * map_region_lookup_tag, (struct vmproc *vmp, u32_t tag));
_PROTOTYPE(void map_region_set_tag, (struct vir_region *vr, u32_t tag));
PRIVATE char *map_name(struct vir_region *vr)
{
+ static char name[100];
+ char *typename, *tag;
int type = vr->flags & (VR_ANON|VR_DIRECT);
switch(type) {
case VR_ANON:
- return "anonymous";
+ typename = "anonymous";
+ break;
case VR_DIRECT:
- return "direct";
+ typename = "direct";
+ break;
default:
panic("unknown mapping type: %d", type);
}
- return "NOTREACHED";
+ switch(vr->tag) {
+ case VRT_TEXT:
+ tag = "text";
+ break;
+ case VRT_STACK:
+ tag = "stack";
+ break;
+ case VRT_HEAP:
+ tag = "heap";
+ break;
+ case VRT_NONE:
+ tag = "untagged";
+ break;
+ default:
+ tag = "unknown tag value";
+ break;
+ }
+
+ sprintf(name, "%s, %s", typename, tag);
+
+ return name;
}
PUBLIC void map_printregion(struct vmproc *vmp, struct vir_region *vr)
physr_iter iter;
struct phys_region *ph;
printf("map_printmap: map_name: %s\n", map_name(vr));
- printf("\t%s (len 0x%lx), %s\n",
+ printf("\t%s (len 0x%lx, %dkB), %s\n",
arch_map2str(vmp, vr->vaddr), vr->length,
- map_name(vr));
+ vr->length/1024, map_name(vr));
printf("\t\tphysblocks:\n");
physr_start_iter_least(vr->phys, &iter);
while((ph = physr_get_iter(&iter))) {
return;
}
+/*========================================================================*
+ * map_lookup_phys *
+ *========================================================================*/
+phys_bytes
+map_lookup_phys(struct vmproc *vmp, u32_t tag)
+{
+ struct vir_region *vr;
+ struct phys_region *pr;
+ physr_iter iter;
+
+ if(!(vr = map_region_lookup_tag(vmp, tag))) {
+ printf("VM: request for phys of missing region\n");
+ return MAP_NONE;
+ }
+
+ physr_start_iter_least(vr->phys, &iter);
+
+ if(!(pr = physr_get_iter(&iter))) {
+ printf("VM: request for phys of unmapped region\n");
+ return MAP_NONE;
+ }
+
+ if(pr->offset != 0 || pr->ph->length != vr->length) {
+ printf("VM: request for phys of partially mapped region\n");
+ return MAP_NONE;
+ }
+
+ return pr->ph->phys;
+}
+
/* Tag values: */
#define VRT_NONE 0xBEEF0000
#define VRT_HEAP 0xBEEF0001
-#define VRT_CODE 0xBEEF0002
+#define VRT_TEXT 0xBEEF0002
+#define VRT_STACK 0xBEEF0003
/* map_page_region flags */
#define MF_PREALLOC 0x01