#include <minix/sys_config.h>
#include <machine/stackframe.h>
#include <machine/fpu.h>
+#include <sys/cdefs.h>
struct segdesc_s { /* segment descriptor for protected mode */
u16_t limit_low;
# define GET_WHOAMI 19 /* get own name, endpoint, and privileges */
# define GET_RANDOMNESS_BIN 20 /* get one randomness bin */
# define GET_IDLETSC 21 /* get cumulative idle time stamp counter */
+#if !defined(__ELF__)
# define GET_AOUTHEADER 22 /* get a.out headers from the boot image */
+#endif
# define GET_CPUINFO 23 /* get information about cpus */
#define I_ENDPT m7_i4 /* calling process (may only be SELF) */
#define I_VAL_PTR m7_p1 /* virtual address at caller */
#define sys_getschedinfo(v1,v2) sys_getinfo(GET_SCHEDINFO, v1,0, v2,0)
#define sys_getpriv(dst, nr) sys_getinfo(GET_PRIV, dst, 0,0, nr)
#define sys_getidletsc(dst) sys_getinfo(GET_IDLETSC, dst, 0,0,0)
+#if !defined(__ELF__)
#define sys_getaoutheader(dst,nr) sys_getinfo(GET_AOUTHEADER, dst, 0,0,nr)
+#endif
_PROTOTYPE(int sys_getinfo, (int request, void *val_ptr, int val_len,
void *val_ptr2, int val_len2) );
_PROTOTYPE(int sys_whoami, (endpoint_t *ep, char *name, int namelen,
#include <machine/bios.h>
#include <minix/portio.h>
#include <minix/cpufeature.h>
+#if !defined(__ELF__)
#include <a.out.h>
+#endif
#include <assert.h>
#include <signal.h>
#include <machine/vm.h>
NOT_REACHABLE;
}
+#if !defined(__ELF__)
/* address of a.out headers, set in mpx386.s */
phys_bytes aout;
*/
phys_copy(aout + i * A_MINHDR, vir2phys(h), (phys_bytes) A_MINHDR);
}
+#endif
PUBLIC void fpu_init(void)
{
{ {0},
{0,0,0,0}, /* GDT descriptor */
{0,0,0,0}, /* IDT descriptor */
- {0xffff,0,0,0x92,0x4f,0}, /* kernel DS */
- {0xffff,0,0,0x92,0xcf,0}, /* kernel ES (386: flag 4 Gb at startup) */
- {0xffff,0,0,0x92,0x4f,0}, /* kernel SS (386: monitor SS at startup) */
- {0xffff,0,0,0x9a,0x4f,0}, /* kernel CS */
- {0xffff,0,0,0x9a,0x0f,0}, /* temp for BIOS (386: monitor CS at startup) */
+ {0xffff,0,0,0x93,0xcf,0}, /* kernel DS */
+ {0xffff,0,0,0x93,0xcf,0}, /* kernel ES (386: flag 4 Gb at startup) */
+ {0xffff,0,0,0x93,0xcf,0}, /* kernel SS (386: monitor SS at startup) */
+ {0xffff,0,0,0x9b,0xcf,0}, /* kernel CS */
+ {0xffff,0,0,0x9b,0xcf,0}, /* temp for BIOS (386: monitor CS at startup) */
};
PRIVATE struct gatedesc_s idt[IDT_SIZE]; /* zero-init so none present */
PUBLIC struct tss_s tss[CONFIG_MAX_CPUS]; /* zero init */
*/
phys_bytes code_bytes;
phys_bytes data_bytes;
+ phys_bytes text_vaddr, data_vaddr;
+ phys_bytes text_segbase, data_segbase;
int privilege;
data_bytes = (phys_bytes) (rp->p_memmap[S].mem_vir +
else
code_bytes = (phys_bytes) rp->p_memmap[T].mem_len << CLICK_SHIFT;
privilege = USER_PRIVILEGE;
+
+ text_vaddr = rp->p_memmap[T].mem_vir << CLICK_SHIFT;
+ data_vaddr = rp->p_memmap[D].mem_vir << CLICK_SHIFT;
+ text_segbase = (rp->p_memmap[T].mem_phys -
+ rp->p_memmap[T].mem_vir) << CLICK_SHIFT;
+ data_segbase = (rp->p_memmap[D].mem_phys -
+ rp->p_memmap[D].mem_vir) << CLICK_SHIFT;
+
init_codeseg(&rp->p_seg.p_ldt[CS_LDT_INDEX],
- (phys_bytes) rp->p_memmap[T].mem_phys << CLICK_SHIFT,
- code_bytes, privilege);
+ text_segbase,
+ text_vaddr + code_bytes, privilege);
init_dataseg(&rp->p_seg.p_ldt[DS_LDT_INDEX],
- (phys_bytes) rp->p_memmap[D].mem_phys << CLICK_SHIFT,
- data_bytes, privilege);
+ data_segbase,
+ data_vaddr + data_bytes, privilege);
rp->p_reg.cs = (CS_LDT_INDEX * DESC_SIZE) | TI | privilege;
rp->p_reg.gs =
rp->p_reg.fs =
#define PROC_H
#include <minix/const.h>
+#include <sys/cdefs.h>
#ifndef __ASSEMBLY__
_PROTOTYPE( void ser_putc, (char) );
_PROTOTYPE( __dead void arch_shutdown, (int) );
_PROTOTYPE( __dead void arch_monitor, (void) );
+#if !defined(__ELF__)
_PROTOTYPE( void arch_get_aout_headers, (int i, struct exec *h) );
+#endif
_PROTOTYPE( void restore_user_context, (struct proc * p) );
_PROTOTYPE( void read_tsc, (unsigned long *high, unsigned long *low) );
_PROTOTYPE( int arch_init_profile_clock, (u32_t freq) );
src_vir = (vir_bytes) &idl->p_cycles;
break;
}
+#if !defined(__ELF__)
case GET_AOUTHEADER: {
int hdrindex, index = m_ptr->I_VAL_LEN2_E;
if(index < 0 || index >= NR_BOOT_PROCS) {
src_vir = (vir_bytes) &e_hdr;
break;
}
-
+#endif
default:
printf("do_getinfo: invalid request %d\n", m_ptr->I_REQUEST);
return(EINVAL);
#include "syslib.h"
-PUBLIC int sys_newmap(proc_ep, ptr)
-endpoint_t proc_ep; /* process whose map is to be changed */
-struct mem_map *ptr; /* pointer to new map */
+PUBLIC int sys_newmap(
+endpoint_t proc_ep, /* process whose map is to be changed */
+struct mem_map *ptr /* pointer to new map */
+)
{
/* A process has been assigned a new memory map. Tell the kernel. */
* what the ranges are of the filler data.
*/
unmap_ok = 1;
+#if !defined(__ELF__)
_minix_unmapzero();
+#endif
/* Initialize user-space scheduling. */
sched_init();
/* Map out our own text and data. */
unmap_ok = 1;
+#if !defined(__ELF__)
_minix_unmapzero();
+#endif
/* Ask VM to pin memory for the new RS instance. */
if((s = vm_memctl(RS_PROC_NR, VM_RS_MEM_PIN)) != OK) {
{
vir_bytes textstart = CLICK2ABS(vmp->vm_arch.vm_seg[T].mem_phys);
vir_bytes datastart = CLICK2ABS(vmp->vm_arch.vm_seg[D].mem_phys);
+ vir_bytes datasegbase = CLICK2ABS(vmp->vm_arch.vm_seg[D].mem_phys -
+ vmp->vm_arch.vm_seg[D].mem_vir);
/* Could be a text address. */
assert(datastart <= addr || textstart <= addr);
- return addr - datastart;
+ return addr - datasegbase;
}
/*===========================================================================*
vir_bytes textstart = CLICK2ABS(vmp->vm_arch.vm_seg[T].mem_phys);
vir_bytes textend = textstart + CLICK2ABS(vmp->vm_arch.vm_seg[T].mem_len);
vir_bytes datastart = CLICK2ABS(vmp->vm_arch.vm_seg[D].mem_phys);
+ vir_bytes textsegbase = CLICK2ABS(vmp->vm_arch.vm_seg[T].mem_phys -
+ vmp->vm_arch.vm_seg[T].mem_vir);
+ vir_bytes datasegbase = CLICK2ABS(vmp->vm_arch.vm_seg[D].mem_phys -
+ vmp->vm_arch.vm_seg[D].mem_vir);
if(addr < textstart) {
sprintf(bufstr, "<lin:0x%lx>", addr);
} else if(addr < datastart) {
- sprintf(bufstr, "0x%lx (codeseg)", addr - textstart);
+ sprintf(bufstr, "0x%lx (codeseg)", addr - textsegbase);
} else {
- sprintf(bufstr, "0x%lx (dataseg)", addr - datastart);
+ sprintf(bufstr, "0x%lx (dataseg)", addr - datasegbase);
}
return bufstr;
vir_bytes textend = textstart +
CLICK2ABS(vmp->vm_arch.vm_seg[T].mem_len);
vir_bytes datastart = CLICK2ABS(vmp->vm_arch.vm_seg[D].mem_phys);
+ vir_bytes textsegbase = CLICK2ABS(vmp->vm_arch.vm_seg[T].mem_phys -
+ vmp->vm_arch.vm_seg[T].mem_vir);
+ vir_bytes datasegbase = CLICK2ABS(vmp->vm_arch.vm_seg[D].mem_phys -
+ vmp->vm_arch.vm_seg[D].mem_vir);
/* The protection to be returned here is that of the segment. */
if(addr < textstart) {
} else if(addr < datastart) {
*seg = T;
*prot = PROT_READ | PROT_EXEC;
- return addr - textstart;
+ return addr - textsegbase;
} else {
*seg = D;
if (textstart == textend) /* common I&D? */
*prot = PROT_READ | PROT_WRITE | PROT_EXEC;
else
*prot = PROT_READ | PROT_WRITE;
- return addr - datastart;
+ return addr - datasegbase;
}
}
*===========================================================================*/
PUBLIC vir_bytes arch_vir2map(struct vmproc *vmp, vir_bytes addr)
{
- vir_bytes bottom = CLICK2ABS(vmp->vm_arch.vm_seg[D].mem_phys);
+ vir_bytes datasegbase = CLICK2ABS(vmp->vm_arch.vm_seg[D].mem_phys -
+ vmp->vm_arch.vm_seg[D].mem_vir);
- return addr + bottom;
+ return addr + datasegbase;
}
/*===========================================================================*
*===========================================================================*/
PUBLIC vir_bytes arch_vir2map_text(struct vmproc *vmp, vir_bytes addr)
{
- vir_bytes bottom = CLICK2ABS(vmp->vm_arch.vm_seg[T].mem_phys);
+ vir_bytes textsegbase = CLICK2ABS(vmp->vm_arch.vm_seg[T].mem_phys -
+ vmp->vm_arch.vm_seg[T].mem_vir);
- return addr + bottom;
+ return addr + textsegbase;
}
#include "memory.h"
FORWARD _PROTOTYPE( int new_mem, (struct vmproc *vmp,
- vir_bytes text_bytes, vir_bytes data_bytes,
- vir_bytes stk_bytes, phys_bytes tot_bytes, vir_bytes *stack_top));
+ vir_bytes text_addr, vir_bytes text_bytes,
+ vir_bytes data_addr, vir_bytes data_bytes,
+ vir_bytes stk_bytes, phys_bytes tot_bytes, vir_bytes *stack_top,
+ int is_elf));
static int failcount;
/* Allocate new memory and release old memory. Fix map and tell
* kernel.
*/
- r = new_mem(vmp, args.text_bytes, args.data_bytes,
- args.args_bytes, args.tot_bytes, &stack_top);
+ r = new_mem(vmp, args.text_addr, args.text_bytes,
+ args.data_addr, args.data_bytes,
+ args.args_bytes, args.tot_bytes, &stack_top,
+ args.is_elf);
if (r != OK) {
printf("VM: newmem: new_mem failed\n");
return(r);
/*===========================================================================*
* new_mem *
*===========================================================================*/
-PRIVATE int new_mem(rmp, text_bytes, data_bytes,
- stk_bytes,tot_bytes,stack_top)
-struct vmproc *rmp; /* process to get a new memory map */
-vir_bytes text_bytes; /* text segment size in bytes */
-vir_bytes data_bytes; /* size of data (incl bss) in bytes */
-vir_bytes stk_bytes; /* size of initial stack segment in bytes */
-phys_bytes tot_bytes; /* total memory to allocate, including gap */
-vir_bytes *stack_top; /* top of process stack */
+PRIVATE int new_mem(
+ struct vmproc *rmp, /* process to get a new memory map */
+ vir_bytes text_addr, /* text segement load address */
+ vir_bytes text_bytes, /* text segment size in bytes */
+ vir_bytes data_addr, /* data segment load address */
+ vir_bytes data_bytes, /* size of data (incl bss) in bytes */
+ vir_bytes stk_bytes, /* size of initial stack segment in bytes */
+ phys_bytes tot_bytes, /* total memory to allocate, including gap */
+ vir_bytes *stack_top, /* top of process stack */
+ int is_elf
+)
{
/* Allocate new memory and release the old memory. Change the map and report
* the new map to the kernel. Zero the new core image's bss, gap and stack.
SANITYCHECK(SCL_DETAIL);
if(r != OK || (r=proc_new(rmp,
VM_PROCSTART, /* where to start the process in the page table */
+ text_addr, /* text load address */
CLICK2ABS(text_clicks),/* how big is the text in bytes, page-aligned */
+ data_addr, /* data load address */
CLICK2ABS(data_clicks),/* how big is data+bss, page-aligned */
CLICK2ABS(stack_clicks),/* how big is stack, page-aligned */
CLICK2ABS(gap_clicks), /* how big is gap, page-aligned */
0,0, /* not preallocated */
VM_STACKTOP, /* regular stack top */
- 0)) != OK) {
+ 0, is_elf)) != OK) {
SANITYCHECK(SCL_DETAIL);
printf("VM: new_mem: failed\n");
if(ptok) {
*===========================================================================*/
PUBLIC int proc_new(struct vmproc *vmp,
phys_bytes vstart, /* where to start the process in page table */
+ phys_bytes text_addr, /* address at which to load code */
phys_bytes text_bytes, /* how much code, in bytes but page aligned */
+ phys_bytes data_addr, /* address at which to load data */
phys_bytes data_bytes, /* how much data + bss, in bytes but page aligned */
phys_bytes stack_bytes, /* stack space to reserve, in bytes, page aligned */
phys_bytes gap_bytes, /* gap bytes, page aligned */
phys_bytes text_start, /* text starts here, if preallocated, otherwise 0 */
phys_bytes data_start, /* data starts here, if preallocated, otherwise 0 */
phys_bytes stacktop,
- int prealloc_stack
+ int prealloc_stack,
+ int is_elf
)
{
int s;
vir_bytes hole_bytes;
int prealloc;
struct vir_region *reg;
+ phys_bytes map_text_addr, map_data_addr, map_stack_addr;
assert(!(vstart % VM_PAGE_SIZE));
+ assert(!(text_addr % VM_PAGE_SIZE));
assert(!(text_bytes % VM_PAGE_SIZE));
+ assert(!(data_addr % VM_PAGE_SIZE));
assert(!(data_bytes % VM_PAGE_SIZE));
assert(!(stack_bytes % VM_PAGE_SIZE));
assert(!(gap_bytes % VM_PAGE_SIZE));
assert((!text_start && !data_start) || (text_start && data_start));
/* Place text at start of process. */
- vmp->vm_arch.vm_seg[T].mem_phys = ABS2CLICK(vstart);
- vmp->vm_arch.vm_seg[T].mem_vir = 0;
+ map_text_addr = vstart + text_addr;
+ vmp->vm_arch.vm_seg[T].mem_phys = ABS2CLICK(map_text_addr);
+ vmp->vm_arch.vm_seg[T].mem_vir = ABS2CLICK(text_addr);
vmp->vm_arch.vm_seg[T].mem_len = ABS2CLICK(text_bytes);
vmp->vm_offset = vstart;
#define TEXTFLAGS (PTF_PRESENT | PTF_USER)
SANITYCHECK(SCL_DETAIL);
if(text_bytes > 0) {
- if(!(reg=map_page_region(vmp, vstart, 0, text_bytes,
+ if(!(reg=map_page_region(vmp, map_text_addr, 0, text_bytes,
text_start ? text_start : MAP_NONE,
VR_ANON | VR_WRITABLE, text_start ? 0 : MF_PREALLOC))) {
SANITYCHECK(SCL_DETAIL);
SANITYCHECK(SCL_DETAIL);
return(ENOMEM);
}
+
map_region_set_tag(reg, VRT_TEXT);
SANITYCHECK(SCL_DETAIL);
}
* or stack), make sure it's cleared, and map it in after text
* (if any).
*/
- if(!(vmp->vm_heap = map_page_region(vmp, vstart + text_bytes, 0,
+ if (is_elf) {
+ map_data_addr = vstart + data_addr;
+ } else {
+ map_data_addr = vstart + text_bytes;
+ }
+
+ if(!(vmp->vm_heap = map_page_region(vmp, map_data_addr, 0,
data_bytes, data_start ? data_start : MAP_NONE, VR_ANON | VR_WRITABLE,
data_start ? 0 : MF_PREALLOC))) {
printf("VM: exec: map_page_region for data failed\n");
* stacktop is the first address after the stack, as addressed
* from within the user process.
*/
- hole_bytes = stacktop - data_bytes - stack_bytes - gap_bytes;
+ hole_bytes = stacktop - data_bytes - stack_bytes
+ - gap_bytes - data_addr;
+
+ map_stack_addr = map_data_addr + data_bytes + hole_bytes;
if(!(reg=map_page_region(vmp,
- vstart + text_bytes + data_bytes + hole_bytes,
+ map_stack_addr,
0, stack_bytes + gap_bytes, MAP_NONE,
VR_ANON | VR_WRITABLE, prealloc_stack ? MF_PREALLOC : 0)) != OK) {
panic("map_page_region failed for stack");
map_region_set_tag(reg, VRT_STACK);
- vmp->vm_arch.vm_seg[D].mem_phys = ABS2CLICK(vstart + text_bytes);
- vmp->vm_arch.vm_seg[D].mem_vir = 0;
+ vmp->vm_arch.vm_seg[D].mem_phys = ABS2CLICK(map_data_addr);
+ vmp->vm_arch.vm_seg[D].mem_vir = ABS2CLICK(data_addr);
vmp->vm_arch.vm_seg[D].mem_len = ABS2CLICK(data_bytes);
- vmp->vm_arch.vm_seg[S].mem_phys = ABS2CLICK(vstart +
- text_bytes + data_bytes + gap_bytes + hole_bytes);
- vmp->vm_arch.vm_seg[S].mem_vir = ABS2CLICK(data_bytes + gap_bytes + hole_bytes);
+ vmp->vm_arch.vm_seg[S].mem_phys = ABS2CLICK(map_data_addr +
+ data_bytes + gap_bytes + hole_bytes);
+ vmp->vm_arch.vm_seg[S].mem_vir = ABS2CLICK(data_addr +
+ data_bytes + gap_bytes + hole_bytes);
/* Where are we allowed to start using the rest of the virtual
* address space?
if(vmp->vm_endpoint != NONE) {
- /* Pretend the stack is the full size of the data segment, so
- * we get a full-sized data segment, up to VM_DATATOP.
- * After sys_newmap(), change the stack to what we know the
- * stack to be (up to stacktop).
- */
- vmp->vm_arch.vm_seg[S].mem_len = (VM_DATATOP >> CLICK_SHIFT) -
- vmp->vm_arch.vm_seg[S].mem_vir - ABS2CLICK(vstart) - ABS2CLICK(text_bytes);
-
- /* What is the final size of the data segment in bytes? */
- vmp->vm_arch.vm_data_top =
- (vmp->vm_arch.vm_seg[S].mem_vir +
- vmp->vm_arch.vm_seg[S].mem_len) << CLICK_SHIFT;
+ /* Pretend the stack is the full size of the data segment, so
+ * we get a full-sized data segment, up to VM_DATATOP.
+ * After sys_newmap(), change the stack to what we know the
+ * stack to be (up to stacktop).
+ */
+ vmp->vm_arch.vm_seg[S].mem_len = (VM_DATATOP >> CLICK_SHIFT) -
+ vmp->vm_arch.vm_seg[S].mem_vir - ABS2CLICK(map_data_addr);
+
+ /* What is the final size of the data segment in bytes? */
+ vmp->vm_arch.vm_data_top =
+ (vmp->vm_arch.vm_seg[S].mem_vir +
+ vmp->vm_arch.vm_seg[S].mem_len) << CLICK_SHIFT;
if((s=sys_newmap(vmp->vm_endpoint, vmp->vm_arch.vm_seg)) != OK)
panic("sys_newmap (vm) failed: %d", s);
struct vir_region *heap, *stack;
vir_bytes text_bytes, data_bytes, stack_bytes, parent_gap_bytes,
child_gap_bytes;
+ vir_bytes text_addr, data_addr;
+ int is_elf = 0;
/* Get SP of new process (using parent). */
if(get_stack_ptr(vmp->vm_endpoint, &sp) != OK) {
/* Copy newly adjust()ed stack segment size to child. */
vmc->vm_arch.vm_seg[S] = vmp->vm_arch.vm_seg[S];
+ text_addr = CLICK2ABS(vmc->vm_arch.vm_seg[T].mem_vir);
text_bytes = CLICK2ABS(vmc->vm_arch.vm_seg[T].mem_len);
+ data_addr = CLICK2ABS(vmc->vm_arch.vm_seg[D].mem_vir);
data_bytes = CLICK2ABS(vmc->vm_arch.vm_seg[D].mem_len);
stack_bytes = CLICK2ABS(vmc->vm_arch.vm_seg[S].mem_len);
* logical top) of stack for the parent
*/
parent_gap_bytes = CLICK2ABS(vmc->vm_arch.vm_seg[S].mem_vir -
- vmc->vm_arch.vm_seg[D].mem_len);
+ vmc->vm_arch.vm_seg[D].mem_len -
+ vmc->vm_arch.vm_seg[D].mem_vir);
/* how much space can the child stack grow downwards, below
* the current SP? The rest of the gap is available for the
*/
child_gap_bytes = VM_PAGE_SIZE;
+#if defined(__ELF__)
+ is_elf = 1;
+#endif
+
if((r=proc_new(vmc, VM_PROCSTART,
- text_bytes, data_bytes, stack_bytes, child_gap_bytes, 0, 0,
+ text_addr, text_bytes,
+ data_addr, data_bytes,
+ stack_bytes, child_gap_bytes, 0, 0,
CLICK2ABS(vmc->vm_arch.vm_seg[S].mem_vir +
- vmc->vm_arch.vm_seg[S].mem_len), 1)) != OK) {
+ vmc->vm_arch.vm_seg[S].mem_len),
+ 1, is_elf)) != OK) {
printf("VM: fork: proc_new failed\n");
return r;
}
* and its return value needn't be checked.
*/
vir = arch_vir2map(vmc, msgaddr);
- handle_memory(vmc, vir, sizeof(message), 1);
+ if (handle_memory(vmc, vir, sizeof(message), 1) != OK)
+ panic("do_fork: handle_memory for child failed\n");
vir = arch_vir2map(vmp, msgaddr);
- handle_memory(vmp, vir, sizeof(message), 1);
+ if (handle_memory(vmp, vir, sizeof(message), 1) != OK)
+ panic("do_fork: handle_memory for parent failed\n");
}
/* Inform caller of new child endpoint. */
struct boot_image *ip;
struct rprocpub rprocpub[NR_BOOT_PROCS];
phys_bytes limit = 0;
+ int is_elf = 0;
#if SANITYCHECKS
incheck = nocheck = 0;
for (ip = &image[0]; ip < &image[NR_BOOT_PROCS]; ip++) {
int s;
struct vmproc *vmp;
- vir_bytes old_stacktop, old_stack;
+ vir_bytes old_stacktop, old_stacklen;
if(ip->proc_nr < 0) continue;
if(!(ip->flags & PROC_FULLVM))
continue;
- old_stack =
- vmp->vm_arch.vm_seg[S].mem_vir +
- vmp->vm_arch.vm_seg[S].mem_len -
- vmp->vm_arch.vm_seg[D].mem_len;
-
if(pt_new(&vmp->vm_pt) != OK)
panic("VM: no new pagetable");
#define BASICSTACK VM_PAGE_SIZE
panic("VM: vmctl for new stack failed");
}
+ old_stacklen =
+ vmp->vm_arch.vm_seg[S].mem_vir +
+ vmp->vm_arch.vm_seg[S].mem_len -
+ vmp->vm_arch.vm_seg[D].mem_len -
+ vmp->vm_arch.vm_seg[D].mem_vir;
+
free_mem(vmp->vm_arch.vm_seg[D].mem_phys +
vmp->vm_arch.vm_seg[D].mem_len,
- old_stack);
+ old_stacklen);
+
+#if defined(__ELF__)
+ is_elf = 1;
+#endif
if(proc_new(vmp,
VM_PROCSTART,
+ CLICK2ABS(vmp->vm_arch.vm_seg[T].mem_vir),
CLICK2ABS(vmp->vm_arch.vm_seg[T].mem_len),
+ CLICK2ABS(vmp->vm_arch.vm_seg[D].mem_vir),
CLICK2ABS(vmp->vm_arch.vm_seg[D].mem_len),
BASICSTACK,
CLICK2ABS(vmp->vm_arch.vm_seg[S].mem_vir +
vmp->vm_arch.vm_seg[S].mem_len -
- vmp->vm_arch.vm_seg[D].mem_len) - BASICSTACK,
+ vmp->vm_arch.vm_seg[D].mem_len -
+ vmp->vm_arch.vm_seg[D].mem_vir) - BASICSTACK,
CLICK2ABS(vmp->vm_arch.vm_seg[T].mem_phys),
CLICK2ABS(vmp->vm_arch.vm_seg[D].mem_phys),
- VM_STACKTOP, 0) != OK) {
+ VM_STACKTOP, 0, is_elf) != OK) {
panic("failed proc_new for boot process");
}
}
/* Unmap our own low pages. */
unmap_ok = 1;
+#if !defined(__ELF__)
_minix_unmapzero();
+#endif
/* Map all the services in the boot image. */
if((s = sys_safecopyfrom(RS_PROC_NR, info->rproctab_gid, 0,
/* exec.c */
_PROTOTYPE( int do_exec_newmem, (message *msg) );
_PROTOTYPE( int proc_new, (struct vmproc *vmp, phys_bytes start,
- phys_bytes text, phys_bytes data, phys_bytes stack, phys_bytes gap,
+ phys_bytes text_addr, phys_bytes text_bytes,
+ phys_bytes data_addr, phys_bytes data_bytes,
+ phys_bytes stack, phys_bytes gap,
phys_bytes text_here, phys_bytes data_here, vir_bytes stacktop,
- int prealloc_stack));
+ int prealloc_stack, int is_elf));
_PROTOTYPE( phys_bytes find_kernel_top, (void) );
/* break.c */