#endif
#define _NR_PROCS 256
-#define _NR_SYS_PROCS 32
+#define _NR_SYS_PROCS 64
/* Set the CHIP type based on the machine selected. The symbol CHIP is actually
* indicative of more than just the CPU. For example, machines for which
#endif /* _MINIX || __minix */
+/*XXX*/ void vmmcall(u32_t eax, u32_t ebx, u32_t ecx);
+
#endif /* _TYPES_H */
return OK;
case VMCTL_INCSP:
/* Increase process SP. */
+vmmcall(0x12345601, 0, 40);
p->p_reg.sp += m_ptr->SVMCTL_VALUE;
+vmmcall(0x12345601, 0, 41);
return OK;
case VMCTL_I386_KERNELLIMIT:
{
}
case VMCTL_FLUSHTLB:
{
+vmmcall(0x12345601, 0, 42);
reload_cr3();
+vmmcall(0x12345601, 0, 43);
return OK;
}
}
PUBLIC void vm_init(struct proc *newptproc)
{
+vmmcall(0x12345602, vm_running, 5);
if(vm_running)
panic("vm_init: vm_running");
+vmmcall(0x12345602, (unsigned) newptproc, 6);
switch_address_space(newptproc);
+vmmcall(0x12345602, (unsigned) ptproc, 7);
assert(ptproc == newptproc);
+vmmcall(0x12345602, 0, 8);
vm_enable_paging();
+vmmcall(0x12345602, 0, 9);
vm_running = 1;
}
u32_t cr0, cr4;
int pgeok;
+vmmcall(0x12345602, 0, 10);
psok = _cpufeature(_CPUF_I386_PSE);
+vmmcall(0x12345602, psok, 11);
pgeok = _cpufeature(_CPUF_I386_PGE);
+vmmcall(0x12345602, pgeok, 12);
cr0= read_cr0();
+vmmcall(0x12345602, cr0, 13);
cr4= read_cr4();
/* First clear PG and PGE flag, as PGE must be enabled after PG. */
+vmmcall(0x12345602, cr4, 14);
write_cr0(cr0 & ~I386_CR0_PG);
+vmmcall(0x12345602, 0, 15);
write_cr4(cr4 & ~(I386_CR4_PGE | I386_CR4_PSE));
+vmmcall(0x12345602, 0, 16);
cr0= read_cr0();
+vmmcall(0x12345602, cr0, 17);
cr4= read_cr4();
/* Our first page table contains 4MB entries. */
if(psok)
cr4 |= I386_CR4_PSE;
+vmmcall(0x12345602, cr4, 18);
write_cr4(cr4);
/* First enable paging, then enable global page flag. */
+vmmcall(0x12345602, 0, 19);
cr0 |= I386_CR0_PG;
write_cr0(cr0 );
+vmmcall(0x12345602, 0, 20);
cr0 |= I386_CR0_WP;
write_cr0(cr0);
/* May we enable these features? */
+vmmcall(0x12345602, 0, 21);
if(pgeok)
cr4 |= I386_CR4_PGE;
write_cr4(cr4);
+vmmcall(0x12345602, 0, 22);
}
PUBLIC vir_bytes alloc_remote_segment(u32_t *selector,
/*
* copy the extra data associated with the call from userspace
*/
+vmmcall(0x12345602, 0, 23);
if((r=data_copy(caller->p_endpoint, (vir_bytes)m_ptr->SVMCTL_VALUE,
KERNEL, (vir_bytes) &ep_data, sizeof(ep_data))) != OK) {
printf("vmctl_enable_paging: data_copy failed! (%d)\n", r);
* when turning paging on i386 we also change the segment limits to make
* the special mappings requested by the kernel reachable
*/
+vmmcall(0x12345602, 0, 24);
if ((r = prot_set_kern_seg_limit(ep_data.data_seg_limit)) != OK)
return r;
/*
* install the new map provided by the call
*/
+vmmcall(0x12345602, 0, 25);
if (newmap(caller, caller, ep_data.mem_map) != OK)
panic("arch_enable_paging: newmap failed");
+vmmcall(0x12345602, 0, 26);
FIXLINMSG(caller);
+vmmcall(0x12345602, 0, 27);
assert(caller->p_delivermsg_lin == umap_local(caller, D,
caller->p_delivermsg_vir, sizeof(message)));
#ifdef CONFIG_APIC
/* if local APIC is enabled */
+vmmcall(0x12345602, 0, 28);
if (lapic_addr) {
lapic_addr = lapic_addr_vaddr;
lapic_eoi_addr = LAPIC_EOI;
* lapic address. Bad things would happen. It is unfortunate but such is
* life
*/
+vmmcall(0x12345602, 0, 29);
i386_watchdog_start();
#endif
+vmmcall(0x12345602, 0, 30);
return OK;
}
phys_bytes data_bytes;
int privilege;
+/*XXX*/vmmcall(0x12345603, 0, 110);
data_bytes = (phys_bytes) (rp->p_memmap[S].mem_vir +
rp->p_memmap[S].mem_len) << CLICK_SHIFT;
+/*XXX*/vmmcall(0x12345603, 0, 111);
if (rp->p_memmap[T].mem_len == 0)
code_bytes = data_bytes; /* common I&D, poor protect */
else
code_bytes = (phys_bytes) rp->p_memmap[T].mem_len << CLICK_SHIFT;
+/*XXX*/vmmcall(0x12345603, 0, 112);
privilege = USER_PRIVILEGE;
+/*XXX*/vmmcall(0x12345603, 0, 113);
init_codeseg(&rp->p_seg.p_ldt[CS_LDT_INDEX],
(phys_bytes) rp->p_memmap[T].mem_phys << CLICK_SHIFT,
code_bytes, privilege);
+/*XXX*/vmmcall(0x12345603, 0, 114);
init_dataseg(&rp->p_seg.p_ldt[DS_LDT_INDEX],
(phys_bytes) rp->p_memmap[D].mem_phys << CLICK_SHIFT,
data_bytes, privilege);
+/*XXX*/vmmcall(0x12345603, 0, 115);
rp->p_reg.cs = (CS_LDT_INDEX * DESC_SIZE) | TI | privilege;
rp->p_reg.gs =
rp->p_reg.fs =
rp->p_reg.ss =
rp->p_reg.es =
rp->p_reg.ds = (DS_LDT_INDEX*DESC_SIZE) | TI | privilege;
+/*XXX*/vmmcall(0x12345603, 0, 116);
}
/*===========================================================================*
int orig_click;
int incr_clicks;
+vmmcall(0x12345603, limit, 31);
if(limit <= kinfo.data_base) {
+vmmcall(0x12345603, kinfo.data_base, 38);
printf("prot_set_kern_seg_limit: limit bogus\n");
return EINVAL;
}
/* Do actual increase. */
+vmmcall(0x12345603, 0, 32);
orig_click = kinfo.data_size / CLICK_SIZE;
kinfo.data_size = limit - kinfo.data_base;
incr_clicks = kinfo.data_size / CLICK_SIZE - orig_click;
+vmmcall(0x12345603, 0, 33);
prot_init();
/* Increase kernel processes too. */
+vmmcall(0x12345603, 0, 34);
for (rp = BEG_PROC_ADDR; rp < END_PROC_ADDR; ++rp) {
+vmmcall(0x12345603, 0, 35);
if (isemptyp(rp) || !iskernelp(rp))
continue;
rp->p_memmap[S].mem_len += incr_clicks;
+vmmcall(0x12345603, 0, 36);
alloc_segments(rp);
rp->p_memmap[S].mem_len -= incr_clicks;
}
+vmmcall(0x12345603, 0, 37);
return OK;
}
struct mem_map *map_ptr; /* virtual address of map inside caller */
int proc_nr;
+/*XXX*/vmmcall(0x12345604, 0, 100);
map_ptr = (struct mem_map *) m_ptr->PR_MEM_PTR;
+/*XXX*/vmmcall(0x12345604, 0, 101);
if (! isokendpt(m_ptr->PR_ENDPT, &proc_nr)) return(EINVAL);
+/*XXX*/vmmcall(0x12345604, 0, 102);
if (iskerneln(proc_nr)) return(EPERM);
+/*XXX*/vmmcall(0x12345604, 0, 103);
rp = proc_addr(proc_nr);
+/*XXX*/vmmcall(0x12345604, 0, 104);
return newmap(caller, rp, map_ptr);
}
{
int r;
/* Fetch the memory map. */
+/*XXX*/vmmcall(0x12345604, 0, 105);
if((r=data_copy(caller->p_endpoint, (vir_bytes) map_ptr,
KERNEL, (vir_bytes) rp->p_memmap, sizeof(rp->p_memmap))) != OK) {
printf("newmap: data_copy failed! (%d)\n", r);
return r;
}
+/*XXX*/vmmcall(0x12345604, 0, 106);
alloc_segments(rp);
+/*XXX*/vmmcall(0x12345604, 0, 107);
return(OK);
}
*===========================================================================*/
PUBLIC int do_vmctl(struct proc * caller, message * m_ptr)
{
- int proc_nr;
+ int proc_nr, r;
endpoint_t ep = m_ptr->SVMCTL_WHO;
struct proc *p, *rp, *target;
return OK;
case VMCTL_ENABLE_PAGING:
+vmmcall(0x12345605, vm_running, 1);
if(vm_running)
panic("do_vmctl: paging already enabled");
+vmmcall(0x12345605, (unsigned) p, 2);
vm_init(p);
+vmmcall(0x12345605, vm_running, 3);
if(!vm_running)
panic("do_vmctl: paging enabling failed");
- return arch_enable_paging(caller, m_ptr);
+vmmcall(0x12345605, (unsigned) caller, 4);
+ r = arch_enable_paging(caller, m_ptr);
+vmmcall(0x12345605, r, 39);
+ return r;
case VMCTL_KERN_PHYSMAP:
{
int i = m_ptr->SVMCTL_VALUE;
{
va_list arg;
/* The system has run aground of a fatal kernel error. Terminate execution. */
+/*XXX*/vmmcall(0x12345610, ((unsigned *) &fmt)[-1], 1);
if (minix_panicing == ARE_PANICING) {
arch_monitor();
}
/* Accumulate a single character for a kernel message. Send a notification
* to the output driver if an END_OF_KMESS is encountered.
*/
+/*XXX*/vmmcall(0x12345612, c, 1);
if (c != END_OF_KMESS) {
if (do_serial_debug) {
if(c == '\n')
xorl %edx, %eax /* See if the bit changed */
testl %ecx, %eax
ret
+
+.globl _vmmcall
+_vmmcall:
+ push %ebp
+ mov %esp, %ebp
+ push %eax
+ push %ebx
+ push %ecx
+ push %edx
+ movl 8(%ebp), %eax
+ movl 12(%ebp), %ebx
+ movl 16(%ebp), %ecx
+ movl 4(%ebp), %edx
+.byte 0x0F, 0x01, 0xD9
+ pop %edx
+ pop %ecx
+ pop %ebx
+ pop %eax
+ pop %ebp
+ ret
/* Accumulate another character. If 0 or buffer full, print it. */
static int buf_count; /* # characters in the buffer */
+/*XXX*/vmmcall(0x12345613, c, 1);
if ((c == 0 && buf_count > 0) || buf_count == sizeof(print_buf)) {
sys_sysctl(SYSCTL_CODE_DIAG, print_buf, buf_count);
buf_count = 0;
static int panicing= 0;
va_list args;
+/*XXX*/vmmcall(0x12345611, ((unsigned *) &fmt)[-1], 1);
if(panicing) return;
panicing= 1;
int r, status;
/* Get information about self. */
+/*XXX*/vmmcall(0x12345606, 0, 1);
r = sys_whoami(&sef_self_endpoint, sef_self_name, SEF_SELF_NAME_MAXLEN);
+/*XXX*/vmmcall(0x12345606, r, 2);
if ( r != OK) {
sef_self_endpoint = SELF;
sprintf(sef_self_name, "%s", "Unknown");
#if INTERCEPT_SEF_INIT_REQUESTS
/* Intercept SEF Init requests. */
+/*XXX*/vmmcall(0x12345606, sef_self_endpoint, 3);
if(sef_self_endpoint == RS_PROC_NR) {
if((r = do_sef_rs_init()) != OK) {
panic("unable to complete init: %d", r);
else {
message m;
+/*XXX*/vmmcall(0x12345606, 0, 4);
r = receive(RS_PROC_NR, &m, &status);
+/*XXX*/vmmcall(0x12345606, r, 5);
+/*XXX*/vmmcall(0x12345606, status, 6);
+/*XXX*/vmmcall(0x12345606, m.m_type, 7);
if(r != OK) {
panic("unable to receive from RS: %d", r);
}
+/*XXX*/vmmcall(0x12345606, 0, 8);
if(IS_SEF_INIT_REQUEST(&m)) {
+/*XXX*/vmmcall(0x12345606, 0, 9);
if((r = do_sef_init_request(&m)) != OK) {
panic("unable to process init request: %d", r);
}
+/*XXX*/vmmcall(0x12345606, 0, 10);
}
else {
+/*XXX*/vmmcall(0x12345606, 0, 11);
panic("got an unexpected message type %d", m.m_type);
}
+/*XXX*/vmmcall(0x12345606, 0, 12);
}
#endif
+/*XXX*/vmmcall(0x12345606, 0, 13);
}
/*===========================================================================*
sef_init_info_t info;
/* Debug. */
+/*XXX*/vmmcall(0x12345607, 0, 14);
#if SEF_INIT_DEBUG
sef_init_debug_begin();
sef_init_dprint("%s. Got a SEF Init request of type: %d. About to init.\n",
#endif
/* Let the callback code handle the request. */
+/*XXX*/vmmcall(0x12345607, 0, 15);
type = m_ptr->RS_INIT_TYPE;
+/*XXX*/vmmcall(0x12345607, type, 16);
info.rproctab_gid = m_ptr->RS_INIT_RPROCTAB_GID;
info.old_endpoint = m_ptr->RS_INIT_OLD_ENDPOINT;
switch(type) {
case SEF_INIT_FRESH:
+/*XXX*/vmmcall(0x12345607, 0, 17);
r = sef_cbs.sef_cb_init_fresh(type, &info);
+/*XXX*/vmmcall(0x12345607, r, 18);
break;
case SEF_INIT_LU:
+/*XXX*/vmmcall(0x12345607, 0, 19);
r = sef_cbs.sef_cb_init_lu(type, &info);
+/*XXX*/vmmcall(0x12345607, r, 20);
break;
case SEF_INIT_RESTART:
+/*XXX*/vmmcall(0x12345607, 0, 21);
r = sef_cbs.sef_cb_init_restart(type, &info);
+/*XXX*/vmmcall(0x12345607, r, 22);
break;
default:
/* Not a valid SEF init type. */
+/*XXX*/vmmcall(0x12345607, 0, 23);
r = EINVAL;
break;
}
/* Report back to RS. */
m_ptr->RS_INIT_RESULT = r;
+/*XXX*/vmmcall(0x12345607, 0, 24);
r = sendrec(RS_PROC_NR, m_ptr);
+/*XXX*/vmmcall(0x12345607, 0, 25);
return r;
}
PUBLIC void sef_setcb_init_fresh(sef_cb_init_t cb)
{
assert(cb != NULL);
+/*XXX*/vmmcall(0x12345607, (unsigned) cb, 26);
sef_cbs.sef_cb_init_fresh = cb;
+/*XXX*/vmmcall(0x12345607, 0, 27);
}
/*===========================================================================*
struct sigaction sa;
struct stat stb;
+/*XXX*/vmmcall(0x12345608, 0, 1);
#define OPENFDS \
if (fstat(0, &stb) < 0) { \
/* Open standard input, output & error. */ \
dup(1); \
}
+/*XXX*/vmmcall(0x12345608, 0, 2);
sigemptyset(&sa.sa_mask);
sa.sa_flags = 0;
sigaction(SIGABRT, &sa, NULL);
/* Execute the /etc/rc file. */
+/*XXX*/vmmcall(0x12345608, 0, 3);
if ((pid = fork()) != 0) {
+/*XXX*/vmmcall(0x12345608, 0, 4);
/* Parent just waits. */
while (wait(NULL) != pid) {
+/*XXX*/vmmcall(0x12345608, 0, 5);
if (gotabrt) reboot(RBT_HALT);
}
+/*XXX*/vmmcall(0x12345608, 0, 6);
} else {
#if ! SYS_GETKENV
struct sysgetenv sysgetenv;
static char *rc_command[] = { "sh", "/etc/rc", NULL, NULL, NULL };
char **rcp = rc_command + 2;
+/*XXX*/vmmcall(0x12345608, 0, 7);
/* Get the boot options from the boot environment. */
sysgetenv.key = "bootopts";
sysgetenv.keylen = 8+1;
if (svrctl(MMGETPARAM, &sysgetenv) == 0) *rcp++ = bootopts;
*rcp = "start";
+/*XXX*/vmmcall(0x12345608, 0, 8);
execute(rc_command);
+/*XXX*/vmmcall(0x12345608, 0, 9);
report(2, "sh /etc/rc");
_exit(1); /* impossible, we hope */
}
+/*XXX*/vmmcall(0x12345608, 0, 10);
OPENFDS;
/* Clear /etc/utmp if it exists. */
check = 1;
while (1) {
+/*XXX*/vmmcall(0x12345608, 0, 11);
while ((pid = waitpid(-1, NULL, check ? WNOHANG : 0)) > 0) {
+/*XXX*/vmmcall(0x12345608, 0, 12);
/* Search to see which line terminated. */
for (linenr = 0; linenr < PIDSLOTS; linenr++) {
slotp = &slots[linenr];
}
}
}
+/*XXX*/vmmcall(0x12345608, 0, 13);
/* If a signal 1 (SIGHUP) is received, simply reset error counts. */
if (gothup) {
endttyent();
}
check = 0;
+/*XXX*/vmmcall(0x12345608, 0, 14);
}
}
sigset_t sigset;
/* SEF local startup. */
+/*XXX*/vmmcall(0x12345609, 0, 1);
sef_local_startup();
+/*XXX*/vmmcall(0x12345609, 0, 2);
sched_init(); /* initialize user-space scheduling */
/* This is PM's main loop- get work and do it, forever and forever. */
+/*XXX*/vmmcall(0x12345609, 0, 3);
while (TRUE) {
int ipc_status;
/* Wait for the next message and extract useful information from it. */
+/*XXX*/vmmcall(0x12345609, 0, 4);
if (sef_receive_status(ANY, &m_in, &ipc_status) != OK)
panic("PM sef_receive_status error");
who_e = m_in.m_source; /* who sent the message */
+/*XXX*/vmmcall(0x12345609, m_in.m_type, 5);
if(pm_isokendpt(who_e, &who_p) != OK)
panic("PM got message from invalid endpoint: %d", who_e);
call_nr = m_in.m_type; /* system call number */
* calling. This can happen in case of synchronous alarms (CLOCK) or or
* event like pending kernel signals (SYSTEM).
*/
+/*XXX*/vmmcall(0x12345609, call_nr, 6);
mp = &mproc[who_p < 0 ? PM_PROC_NR : who_p];
if(who_p >= 0 && mp->mp_endpoint != who_e) {
panic("PM endpoint number out of sync with source: %d",
}
/* Drop delayed calls from exiting processes. */
+/*XXX*/vmmcall(0x12345609, 0, 7);
if (mp->mp_flags & EXITING)
continue;
continue;
}
+/*XXX*/vmmcall(0x12345609, 0, 8);
switch(call_nr)
{
case PM_SETUID_REPLY:
}
/* Send reply. */
+/*XXX*/vmmcall(0x12345609, result, 9);
if (result != SUSPEND) setreply(who_p, result);
sendreply();
+/*XXX*/vmmcall(0x12345609, 0, 10);
}
return(OK);
}
int result; /* result to return */
/* SEF local startup. */
+/*XXX*/vmmcall(0x1234560a, 0, 1);
sef_local_startup();
/* Main loop - get work and do it, forever. */
+/*XXX*/vmmcall(0x1234560a, 0, 2);
while (TRUE) {
+/*XXX*/vmmcall(0x1234560a, 0, 3);
/* Wait for request message. */
get_work(&m, &ipc_status);
who_e = m.m_source;
panic("message from bogus source: %d", who_e);
}
+/*XXX*/vmmcall(0x1234560a, m.m_type, 4);
call_nr = m.m_type;
/* Now determine what to do. Four types of requests are expected:
/* Notification messages are control messages and do not need a reply.
* These include heartbeat messages and system notifications.
*/
+/*XXX*/vmmcall(0x1234560a, call_nr, 5);
if (is_ipc_notify(ipc_status)) {
switch (who_p) {
case CLOCK:
}
/* Handler functions are responsible for permission checking. */
+/*XXX*/vmmcall(0x1234560a, 0, 6);
switch(call_nr) {
/* User requests. */
case RS_UP: result = do_up(&m); break;
result = EINVAL;
}
+/*XXX*/vmmcall(0x1234560a, result, 7);
/* Finally send reply message, unless disabled. */
if (result != EDONTREPLY) {
m.m_type = result;
reply(who_e, &m);
}
+/*XXX*/vmmcall(0x1234560a, 0, 8);
}
}
}
PRIVATE void sef_local_startup()
{
/* Register init callbacks. */
+/*XXX*/vmmcall(0x1234560a, 9, 9);
sef_setcb_init_fresh(sef_cb_init_fresh); /* RS can only start fresh. */
/* Register signal callbacks. */
+/*XXX*/vmmcall(0x1234560a, 10, 10);
sef_setcb_signal_handler(sef_cb_signal_handler);
+/*XXX*/vmmcall(0x1234560a, 11, 11);
sef_setcb_signal_manager(sef_cb_signal_manager);
+/*XXX*/vmmcall(0x1234560a, 12, 12);
/* Let SEF perform startup. */
sef_startup();
+/*XXX*/vmmcall(0x1234560a, 13, 13);
}
/*===========================================================================*
struct boot_image_dev *boot_image_dev;
/* See if we run in verbose mode. */
+/*XXX*/vmmcall(0x1234560a, 14, 14);
env_parse("rs_verbose", "d", 0, &rs_verbose, 0, 1);
+/*XXX*/vmmcall(0x1234560a, 15, 15);
if ((s = sys_getinfo(GET_HZ, &system_hz, sizeof(system_hz), 0, 0)) != OK)
panic("Cannot get system timer frequency\n");
/* Initialize the global init descriptor. */
+/*XXX*/vmmcall(0x1234560a, 16, 16);
rinit.rproctab_gid = cpf_grant_direct(ANY, (vir_bytes) rprocpub,
sizeof(rprocpub), CPF_READ);
+/*XXX*/vmmcall(0x1234560a, 17, 17);
if(!GRANT_VALID(rinit.rproctab_gid)) {
panic("unable to create rprocpub table grant: %d", rinit.rproctab_gid);
}
/* Initialize some global variables. */
+/*XXX*/vmmcall(0x1234560a, 18, 18);
rupdate.flags = 0;
shutting_down = FALSE;
if ((s = sys_getimage(image)) != OK) {
panic("unable to get copy of boot image table: %d", s);
}
+/*XXX*/vmmcall(0x1234560a, 19, 19);
/* Determine the number of system services in the boot image table and
* compute the size required for the boot image buffer.
ip = &image[i];
/* System services only. */
+/*XXX*/vmmcall(0x1234560a, 20, 20);
if(iskerneln(_ENDPOINT_P(ip->endpoint))) {
continue;
}
nr_image_srvs++;
/* Lookup the corresponding entry in the boot image sys table. */
+/*XXX*/vmmcall(0x1234560a, 21, 21);
boot_image_info_lookup(ip->endpoint, image,
NULL, NULL, &boot_image_sys, NULL);
/* If we must keep a copy of this system service, read the header
* and increase the size of the boot image buffer.
*/
+/*XXX*/vmmcall(0x1234560a, 22, 22);
if(boot_image_sys->flags & SF_USE_REPL) {
boot_image_sys->flags |= SF_USE_COPY;
}
boot_image_buffer_size += header.a_hdrlen
+ header.a_text + header.a_data;
}
+/*XXX*/vmmcall(0x1234560a, 23, 23);
}
/* Determine the number of entries in the boot image priv table and make sure
* it matches the number of system services in the boot image table.
*/
+/*XXX*/vmmcall(0x1234560a, 24, 24);
nr_image_priv_srvs = 0;
for (i=0; boot_image_priv_table[i].endpoint != NULL_BOOT_NR; i++) {
boot_image_priv = &boot_image_priv_table[i];
}
/* Allocate boot image buffer. */
+/*XXX*/vmmcall(0x1234560a, 25, 25);
if(boot_image_buffer_size > 0) {
boot_image_buffer = rs_startup_sbrk(boot_image_buffer_size);
if(boot_image_buffer == (char *) -1) {
}
/* Reset the system process table. */
+/*XXX*/vmmcall(0x1234560a, 26, 26);
for (rp=BEG_RPROC_ADDR; rp<END_RPROC_ADDR; rp++) {
rp->r_flags = 0;
rp->r_pub = &rprocpub[rp - rproc];
* In addition, set priviliges, sys properties, and dev properties (if any)
* for every system service.
*/
+/*XXX*/vmmcall(0x1234560a, 27, 27);
for (i=0; boot_image_priv_table[i].endpoint != NULL_BOOT_NR; i++) {
boot_image_priv = &boot_image_priv_table[i];
/* System services only. */
+/*XXX*/vmmcall(0x1234560a, 28, 28);
if(iskerneln(_ENDPOINT_P(boot_image_priv->endpoint))) {
continue;
}
/*
* Get a copy of the executable image if required.
*/
+/*XXX*/vmmcall(0x1234560a, 29, 29);
rp->r_exec_len = 0;
rp->r_exec = NULL;
if(boot_image_sys->flags & SF_USE_COPY) {
rpub->period = boot_image_priv->period;
if(boot_image_priv->endpoint != RS_PROC_NR) {
+/*XXX*/vmmcall(0x1234560a, 30, 30);
/* Force a static priv id for system services in the boot image. */
rp->r_priv.s_id = static_priv_id(
_ENDPOINT_P(boot_image_priv->endpoint));
rp->r_priv.s_k_call_mask, KERNEL_CALL, TRUE);
/* Set the privilege structure. */
+/*XXX*/vmmcall(0x1234560a, 31, 31);
if ((s = sys_privctl(ip->endpoint, SYS_PRIV_SET_SYS, &(rp->r_priv)))
!= OK) {
panic("unable to set privilege structure: %d", s);
}
+/*XXX*/vmmcall(0x1234560a, 32, 32);
}
/* Synch the privilege structure with the kernel. */
+/*XXX*/vmmcall(0x1234560a, 33, 33);
if ((s = sys_getpriv(&(rp->r_priv), ip->endpoint)) != OK) {
panic("unable to synch privilege structure: %d", s);
}
/*
* Set sys properties.
*/
+/*XXX*/vmmcall(0x1234560a, 34, 34);
rpub->sys_flags = boot_image_sys->flags; /* sys flags */
/*
build_cmd_dep(rp);
/* Initialize vm call mask bitmap from unordered set. */
+/*XXX*/vmmcall(0x1234560a, 35, 35);
fill_call_mask(boot_image_priv->vm_calls, NR_VM_CALLS,
rpub->vm_call_mask, VM_RQ_BASE, TRUE);
rp->r_flags = RS_IN_USE | RS_ACTIVE;
rproc_ptr[_ENDPOINT_P(rpub->endpoint)]= rp;
rpub->in_use = TRUE;
+/*XXX*/vmmcall(0x1234560a, 36, 36);
}
/* - Step 2: allow every system service in the boot image to run.
*/
nr_uncaught_init_srvs = 0;
+/*XXX*/vmmcall(0x1234560a, 37, 37);
for (i=0; boot_image_priv_table[i].endpoint != NULL_BOOT_NR; i++) {
+/*XXX*/vmmcall(0x1234560a, 38, 38);
boot_image_priv = &boot_image_priv_table[i];
/* System services only. */
rpub = rp->r_pub;
/* Allow the service to run. */
+/*XXX*/vmmcall(0x1234560a, 39, 39);
if ((s = sys_privctl(rpub->endpoint, SYS_PRIV_ALLOW, NULL)) != OK) {
panic("unable to initialize privileges: %d", s);
}
/* Initialize service. We assume every service will always get
* back to us here at boot time.
*/
+/*XXX*/vmmcall(0x1234560a, 40, 40);
if(boot_image_priv->flags & SYS_PROC) {
+/*XXX*/vmmcall(0x1234560a, 41, 41);
if ((s = init_service(rp, SEF_INIT_FRESH)) != OK) {
panic("unable to initialize service: %d", s);
}
+/*XXX*/vmmcall(0x1234560a, 42, 42);
if(rpub->sys_flags & SF_SYNCH_BOOT) {
/* Catch init ready message now to synchronize. */
catch_boot_init_ready(rpub->endpoint);
/* Catch init ready message later. */
nr_uncaught_init_srvs++;
}
+/*XXX*/vmmcall(0x1234560a, 43, 43);
}
+/*XXX*/vmmcall(0x1234560a, 44, 44);
}
/* - Step 3: let every system service complete initialization by
* catching all the init ready messages left.
*/
+/*XXX*/vmmcall(0x1234560a, 45, 45);
while(nr_uncaught_init_srvs) {
+/*XXX*/vmmcall(0x1234560a, 46, 46);
catch_boot_init_ready(ANY);
nr_uncaught_init_srvs--;
}
* Complete the initialization of the system process table in collaboration
* with other system services.
*/
+/*XXX*/vmmcall(0x1234560a, 47, 47);
if ((s = getsysinfo(PM_PROC_NR, SI_PROC_TAB, mproc)) != OK) {
panic("unable to get copy of PM process table: %d", s);
}
+/*XXX*/vmmcall(0x1234560a, 48, 48);
for (i=0; boot_image_priv_table[i].endpoint != NULL_BOOT_NR; i++) {
boot_image_priv = &boot_image_priv_table[i];
+/*XXX*/vmmcall(0x1234560a, 49, 49);
/* System services only. */
if(iskerneln(_ENDPOINT_P(boot_image_priv->endpoint))) {
}
}
}
+/*XXX*/vmmcall(0x1234560a, 50, 50);
/*
* Now complete RS initialization process in collaboration with other
*/
/* Let the rest of the system know about our dynamically allocated buffer. */
if(boot_image_buffer_size > 0) {
+/*XXX*/vmmcall(0x1234560a, 51, 51);
boot_image_buffer = rs_startup_sbrk_synch(boot_image_buffer_size);
if(boot_image_buffer == (char *) -1) {
panic("unable to synch boot image buffer");
}
}
+/*XXX*/vmmcall(0x1234560a, 52, 52);
/* Set alarm to periodically check service status. */
if (OK != (s=sys_setalarm(RS_DELTA_T, 0)))
panic("couldn't set alarm: %d", s);
+/*XXX*/vmmcall(0x1234560a, 53, 53);
/* Map out our own text and data. This is normally done in crtso.o
* but RS is an exception - we don't get to talk to VM so early on.
*/
unmap_ok = 1;
_minix_unmapzero();
+/*XXX*/vmmcall(0x1234560a, 54, 54);
return(OK);
}
static int level = 0;
void *ret;
+/*XXX*/vmmcall(0x1234560e, 0, 86);
pt = &vmprocess->vm_pt;
assert(reason >= 0 && reason < VMP_CATEGORIES);
level++;
+/*XXX*/vmmcall(0x1234560e, level, 87);
assert(level >= 1);
assert(level <= 2);
+/*XXX*/vmmcall(0x1234560e, level, 88);
if(level > 1 || !(vmprocess->vm_flags & VMF_HASPT) || !meminit_done) {
int r;
void *s;
+/*XXX*/vmmcall(0x1234560e, level, 89);
s=vm_getsparepage(phys);
+/*XXX*/vmmcall(0x1234560e, level, 90);
level--;
if(!s) {
+/*XXX*/vmmcall(0x1234560e, level, 91);
util_stacktrace();
printf("VM: warning: out of spare pages\n");
}
+/*XXX*/vmmcall(0x1234560e, level, 92);
return s;
}
/* VM does have a pagetable, so get a page and map it in there.
* Where in our virtual address space can we put it?
*/
+/*XXX*/vmmcall(0x1234560e, level, 93);
loc = findhole(pt, arch_vir2map(vmprocess, vmprocess->vm_stacktop),
vmprocess->vm_arch.vm_data_top);
+/*XXX*/vmmcall(0x1234560e, loc, 94);
if(loc == NO_MEM) {
+/*XXX*/vmmcall(0x1234560e, level, 95);
level--;
printf("VM: vm_allocpage: findhole failed\n");
return NULL;
/* Allocate page of memory for use by VM. As VM
* is trusted, we don't have to pre-clear it.
*/
+/*XXX*/vmmcall(0x1234560e, level, 96);
if((newpage = alloc_mem(CLICKSPERPAGE, 0)) == NO_MEM) {
level--;
printf("VM: vm_allocpage: alloc_mem failed\n");
*phys = CLICK2ABS(newpage);
/* Map this page into our address space. */
+/*XXX*/vmmcall(0x1234560e, 0, 97);
if((r=pt_writemap(pt, loc, *phys, I386_PAGE_SIZE,
I386_VM_PRESENT | I386_VM_USER | I386_VM_WRITE, 0)) != OK) {
+/*XXX*/vmmcall(0x1234560e, 0, 98);
free_mem(newpage, CLICKSPERPAGE);
printf("vm_allocpage writemap failed\n");
level--;
return NULL;
}
+/*XXX*/vmmcall(0x1234560e, 0, 99);
if((r=sys_vmctl(SELF, VMCTL_FLUSHTLB, 0)) != OK) {
+/*XXX*/vmmcall(0x1234560e, 0, 100);
panic("VMCTL_FLUSHTLB failed: %d", r);
}
+/*XXX*/vmmcall(0x1234560e, 0, 101);
level--;
/* Return user-space-ready pointer to it. */
+/*XXX*/vmmcall(0x1234560e, 0, 102);
ret = (void *) arch_map2vir(vmprocess, loc);
+/*XXX*/vmmcall(0x1234560e, ret, 103);
return ret;
}
* mappings from in-kernel page tables pointing to
* the page directories (the page_directories data).
*/
+/*XXX*/vmmcall(0x1234560e, 0, 80);
if(!pt->pt_dir &&
!(pt->pt_dir = vm_allocpage(&pt->pt_dir_phys, VMP_PAGEDIR))) {
+/*XXX*/vmmcall(0x1234560e, 0, 81);
return ENOMEM;
}
+/*XXX*/vmmcall(0x1234560e, 0, 82);
for(i = 0; i < I386_VM_DIR_ENTRIES; i++) {
+/*XXX*/vmmcall(0x1234560e, 0, 83);
pt->pt_dir[i] = 0; /* invalid entry (I386_VM_PRESENT bit = 0) */
pt->pt_pt[i] = NULL;
}
pt->pt_virtop = 0;
/* Map in kernel. */
+/*XXX*/vmmcall(0x1234560e, 0, 84);
if(pt_mapkernel(pt) != OK)
panic("pt_new: pt_mapkernel failed");
+/*XXX*/vmmcall(0x1234560e, 0, 85);
return OK;
}
newpt = &vmprocess->vm_pt;
/* Get ourselves spare pages. */
+/*XXX*/vmmcall(0x1234560e, 0, 54);
if(!(sparepages_mem = (vir_bytes) aalloc(I386_PAGE_SIZE*SPAREPAGES)))
panic("pt_init: aalloc for spare failed");
+/*XXX*/vmmcall(0x1234560e, 0, 55);
if((r=sys_umap(SELF, VM_D, (vir_bytes) sparepages_mem,
I386_PAGE_SIZE*SPAREPAGES, &sparepages_ph)) != OK)
panic("pt_init: sys_umap failed: %d", r);
+/*XXX*/vmmcall(0x1234560e, 0, 56);
for(s = 0; s < SPAREPAGES; s++) {
sparepages[s].page = (void *) (sparepages_mem + s*I386_PAGE_SIZE);
sparepages[s].phys = sparepages_ph + s*I386_PAGE_SIZE;
/* global bit and 4MB pages available? */
global_bit_ok = _cpufeature(_CPUF_I386_PGE);
bigpage_ok = _cpufeature(_CPUF_I386_PSE);
+/*XXX*/vmmcall(0x1234560e, 0, 57);
/* Set bit for PTE's and PDE's if available. */
if(global_bit_ok)
/* Make new page table for ourselves, partly copied
* from the current one.
*/
+/*XXX*/vmmcall(0x1234560e, 0, 58);
if(pt_new(newpt) != OK)
panic("pt_init: pt_new failed");
/* Set up mappings for VM process. */
+/*XXX*/vmmcall(0x1234560e, 0, 59);
for(v = lo; v < hi; v += I386_PAGE_SIZE) {
phys_bytes addr;
u32_t flags;
/* We have to write the new position in the PT,
* so we can move our segments.
*/
+/*XXX*/vmmcall(0x1234560e, 0, 60);
if(pt_writemap(newpt, v+moveup, v, I386_PAGE_SIZE,
I386_VM_PRESENT|I386_VM_WRITE|I386_VM_USER, 0) != OK)
panic("pt_init: pt_writemap failed");
}
/* Move segments up too. */
+/*XXX*/vmmcall(0x1234560e, 0, 61);
vmprocess->vm_arch.vm_seg[T].mem_phys += ABS2CLICK(moveup);
vmprocess->vm_arch.vm_seg[D].mem_phys += ABS2CLICK(moveup);
vmprocess->vm_arch.vm_seg[S].mem_phys += ABS2CLICK(moveup);
/* Allocate us a page table in which to remember page directory
* pointers.
*/
+/*XXX*/vmmcall(0x1234560e, 0, 62);
if(!(page_directories = vm_allocpage(&page_directories_phys,
VMP_PAGETABLE)))
panic("no virt addr for vm mappings");
+/*XXX*/vmmcall(0x1234560e, 0, 63);
memset(page_directories, 0, I386_PAGE_SIZE);
/* Increase our hardware data segment to create virtual address
kernmap_pde = free_pde++;
offset = kernmap_pde * I386_BIG_PAGE_SIZE;
+/*XXX*/vmmcall(0x1234560e, 0, 64);
while(sys_vmctl_get_mapping(index, &addr, &len,
&flags) == OK) {
vir_bytes vir;
+/*XXX*/vmmcall(0x1234560e, 0, 65);
if(index >= MAX_KERNMAPPINGS)
panic("VM: too many kernel mappings: %d", index);
kern_mappings[index].phys_addr = addr;
kern_mappings[index].flags =
I386_VM_PRESENT | I386_VM_USER | I386_VM_WRITE |
global_bit;
+/*XXX*/vmmcall(0x1234560e, 0, 66);
if(flags & VMMF_UNCACHED)
kern_mappings[index].flags |= PTF_NOCACHE;
if(addr % I386_PAGE_SIZE)
panic("VM: addr unaligned: %d", addr);
if(len % I386_PAGE_SIZE)
panic("VM: len unaligned: %d", len);
+/*XXX*/vmmcall(0x1234560e, 0, 67);
vir = arch_map2vir(&vmproc[VMP_SYSTEM], offset);
if(sys_vmctl_reply_mapping(index, vir) != OK)
panic("VM: reply failed");
offset += len;
index++;
kernmappings++;
+/*XXX*/vmmcall(0x1234560e, 0, 68);
}
}
/* Find a PDE below processes available for mapping in the
* page directories (readonly).
*/
+/*XXX*/vmmcall(0x1234560e, 0, 69);
pagedir_pde = free_pde++;
pagedir_pde_val = (page_directories_phys & I386_VM_ADDR_MASK) |
I386_VM_PRESENT | I386_VM_USER | I386_VM_WRITE;
/* Tell kernel about free pde's. */
+/*XXX*/vmmcall(0x1234560e, 0, 70);
while(free_pde*I386_BIG_PAGE_SIZE < VM_PROCSTART) {
+/*XXX*/vmmcall(0x1234560e, 0, 71);
if((r=sys_vmctl(SELF, VMCTL_I386_FREEPDE, free_pde++)) != OK) {
panic("VMCTL_I386_FREEPDE failed: %d", r);
}
proc_pde = free_pde;
/* Give our process the new, copied, private page table. */
+/*XXX*/vmmcall(0x1234560e, 0, 72);
pt_mapkernel(newpt); /* didn't know about vm_dir pages earlier */
+/*XXX*/vmmcall(0x1234560e, 0, 73);
pt_bind(newpt, vmprocess);
/* new segment limit for the kernel after paging is enabled */
+/*XXX*/vmmcall(0x1234560e, 0, 74);
ep_data.data_seg_limit = free_pde*I386_BIG_PAGE_SIZE;
/* the memory map which must be installed after paging is enabled */
ep_data.mem_map = vmprocess->vm_arch.vm_seg;
/* Now actually enable paging. */
+/*XXX*/vmmcall(0x1234560e, 0, 75);
if(sys_vmctl_enable_paging(&ep_data) != OK)
panic("pt_init: enable paging failed");
/* Back to reality - this is where the stack actually is. */
vmprocess->vm_arch.vm_seg[S].mem_len -= extra_clicks;
+/*XXX*/vmmcall(0x1234560e, 0, 76);
/* All OK. */
return;
int prealloc;
struct vir_region *reg;
+/*XXX*/vmmcall(0x1234560b, 0, 1);
assert(!(vstart % VM_PAGE_SIZE));
assert(!(text_bytes % VM_PAGE_SIZE));
assert(!(data_bytes % VM_PAGE_SIZE));
assert((!text_start && !data_start) || (text_start && data_start));
/* Place text at start of process. */
+/*XXX*/vmmcall(0x1234560b, 0, 2);
vmp->vm_arch.vm_seg[T].mem_phys = ABS2CLICK(vstart);
vmp->vm_arch.vm_seg[T].mem_vir = 0;
vmp->vm_arch.vm_seg[T].mem_len = ABS2CLICK(text_bytes);
/* page mapping flags for code */
#define TEXTFLAGS (PTF_PRESENT | PTF_USER)
SANITYCHECK(SCL_DETAIL);
+/*XXX*/vmmcall(0x1234560b, 0, 3);
if(text_bytes > 0) {
+/*XXX*/vmmcall(0x1234560b, 0, 4);
if(!(reg=map_page_region(vmp, vstart, 0, text_bytes,
text_start ? text_start : MAP_NONE,
VR_ANON | VR_WRITABLE, text_start ? 0 : MF_PREALLOC))) {
+/*XXX*/vmmcall(0x1234560b, 0, 5);
SANITYCHECK(SCL_DETAIL);
printf("VM: proc_new: map_page_region failed (text)\n");
map_free_proc(vmp);
SANITYCHECK(SCL_DETAIL);
+/*XXX*/vmmcall(0x1234560b, 0, 6);
return(ENOMEM);
}
+/*XXX*/vmmcall(0x1234560b, 0, 7);
map_region_set_tag(reg, VRT_TEXT);
+/*XXX*/vmmcall(0x1234560b, 0, 8);
SANITYCHECK(SCL_DETAIL);
+/*XXX*/vmmcall(0x1234560b, 0, 9);
}
+/*XXX*/vmmcall(0x1234560b, 0, 10);
SANITYCHECK(SCL_DETAIL);
+/*XXX*/vmmcall(0x1234560b, 0, 11);
/* Allocate memory for data (including bss, but not including gap
* or stack), make sure it's cleared, and map it in after text
* (if any).
*/
+/*XXX*/vmmcall(0x1234560b, 0, 12);
if(!(vmp->vm_heap = map_page_region(vmp, vstart + text_bytes, 0,
data_bytes, data_start ? data_start : MAP_NONE, VR_ANON | VR_WRITABLE,
data_start ? 0 : MF_PREALLOC))) {
+/*XXX*/vmmcall(0x1234560b, 0, 13);
printf("VM: exec: map_page_region for data failed\n");
+/*XXX*/vmmcall(0x1234560b, 0, 14);
map_free_proc(vmp);
+/*XXX*/vmmcall(0x1234560b, 0, 15);
SANITYCHECK(SCL_DETAIL);
+/*XXX*/vmmcall(0x1234560b, 0, 16);
return ENOMEM;
}
/* Tag the heap so brk() call knows which region to extend. */
+/*XXX*/vmmcall(0x1234560b, 0, 17);
map_region_set_tag(vmp->vm_heap, VRT_HEAP);
/* How many address space clicks between end of data
* stacktop is the first address after the stack, as addressed
* from within the user process.
*/
+/*XXX*/vmmcall(0x1234560b, 0, 18);
hole_bytes = stacktop - data_bytes - stack_bytes - gap_bytes;
+/*XXX*/vmmcall(0x1234560b, 0, 19);
if(!(reg=map_page_region(vmp,
vstart + text_bytes + data_bytes + hole_bytes,
0, stack_bytes + gap_bytes, MAP_NONE,
VR_ANON | VR_WRITABLE, prealloc_stack ? MF_PREALLOC : 0)) != OK) {
+/*XXX*/vmmcall(0x1234560b, 0, 20);
panic("map_page_region failed for stack");
}
+/*XXX*/vmmcall(0x1234560b, 0, 21);
map_region_set_tag(reg, VRT_STACK);
+/*XXX*/vmmcall(0x1234560b, 0, 22);
vmp->vm_arch.vm_seg[D].mem_phys = ABS2CLICK(vstart + text_bytes);
vmp->vm_arch.vm_seg[D].mem_vir = 0;
vmp->vm_arch.vm_seg[D].mem_len = ABS2CLICK(data_bytes);
vmp->vm_flags |= VMF_HASPT;
+/*XXX*/vmmcall(0x1234560b, 0, 23);
if(vmp->vm_endpoint != NONE) {
/* Pretend the stack is the full size of the data segment, so
* After sys_newmap(), change the stack to what we know the
* stack to be (up to stacktop).
*/
+/*XXX*/vmmcall(0x1234560b, 0, 24);
vmp->vm_arch.vm_seg[S].mem_len = (VM_DATATOP >> CLICK_SHIFT) -
vmp->vm_arch.vm_seg[S].mem_vir - ABS2CLICK(vstart) - ABS2CLICK(text_bytes);
/* What is the final size of the data segment in bytes? */
+/*XXX*/vmmcall(0x1234560b, 0, 25);
vmp->vm_arch.vm_data_top =
(vmp->vm_arch.vm_seg[S].mem_vir +
vmp->vm_arch.vm_seg[S].mem_len) << CLICK_SHIFT;
+/*XXX*/vmmcall(0x1234560b, 0, 26);
if((s=sys_newmap(vmp->vm_endpoint, vmp->vm_arch.vm_seg)) != OK)
panic("sys_newmap (vm) failed: %d", s);
+/*XXX*/vmmcall(0x1234560b, 0, 27);
if((s=pt_bind(&vmp->vm_pt, vmp)) != OK)
panic("exec_newmem: pt_bind failed: %d", s);
+/*XXX*/vmmcall(0x1234560b, 0, 28);
}
/* No yielded memory blocks. */
+/*XXX*/vmmcall(0x1234560b, 0, 29);
yielded_init(&vmp->vm_yielded_blocks);
+/*XXX*/vmmcall(0x1234560b, 0, 30);
return OK;
}
int caller_slot;
struct vmproc *vmp_caller;
+/*XXX*/vmmcall(0x1234560c, 0, 0);
+
/* SEF local startup. */
sef_local_startup();
+/*XXX*/vmmcall(0x1234560c, 0, 1);
SANITYCHECK(SCL_TOP);
/* This is VM's main loop. */
+/*XXX*/vmmcall(0x1234560c, 0, 2);
while (TRUE) {
int r, c;
+/*XXX*/vmmcall(0x1234560c, 0, 3);
SANITYCHECK(SCL_TOP);
if(missing_spares > 0) {
pt_cycle(); /* pagetable code wants to be called */
}
+/*XXX*/vmmcall(0x1234560c, 0, 4);
if ((r=sef_receive_status(ANY, &msg, &rcv_sts)) != OK)
panic("sef_receive_status() error: %d", r);
+/*XXX*/vmmcall(0x1234560c, msg.m_type, 5);
if (is_ipc_notify(rcv_sts)) {
/* Unexpected notify(). */
printf("VM: ignoring notify() from %d\n", msg.m_source);
panic("invalid caller", who_e);
vmp_caller = &vmproc[caller_slot];
c = CALLNUMBER(msg.m_type);
+/*XXX*/vmmcall(0x1234560c, c, 6);
result = ENOSYS; /* Out of range or restricted calls return this. */
if (msg.m_type == VM_PAGEFAULT) {
if (!IPC_STATUS_FLAGS_TEST(rcv_sts, IPC_FLG_MSG_FROM_KERNEL)) {
SANITYCHECK(SCL_FUNCTIONS);
}
}
+/*XXX*/vmmcall(0x1234560c, result, 7);
/* Send reply message, unless the return code is SUSPEND,
* which is a pseudo-result suppressing the reply message.
panic("send() error");
}
}
+/*XXX*/vmmcall(0x1234560c, 0, 8);
}
+/*XXX*/vmmcall(0x1234560c, 0, 9);
return(OK);
}
PRIVATE void sef_local_startup()
{
/* Register init callbacks. */
+/*XXX*/vmmcall(0x1234560c, 10, 10);
sef_setcb_init_fresh(sef_cb_init_fresh);
+/*XXX*/vmmcall(0x1234560c, 11, 11);
sef_setcb_init_restart(sef_cb_init_fail);
/* No live update support for now. */
/* Register signal callbacks. */
+/*XXX*/vmmcall(0x1234560c, 12, 12);
sef_setcb_signal_handler(sef_cb_signal_handler);
/* Let SEF perform startup. */
+/*XXX*/vmmcall(0x1234560c, 13, 13);
sef_startup();
+/*XXX*/vmmcall(0x1234560c, 14, 14);
}
/*===========================================================================*
#if SANITYCHECKS
incheck = nocheck = 0;
#endif
-
+
+/*XXX*/vmmcall(0x1234560c, 0, 27);
vm_paged = 1;
env_parse("vm_paged", "d", 0, &vm_paged, 0, 1);
#if SANITYCHECKS
#endif
/* Get chunks of available memory. */
+/*XXX*/vmmcall(0x1234560c, 0, 28);
get_mem_chunks(mem_chunks);
/* Initialize VM's process table. Request a copy of the system
* image table that is defined at the kernel level to see which
* slots to fill in.
*/
+/*XXX*/vmmcall(0x1234560c, 0, 28);
if (OK != (s=sys_getimage(image)))
panic("couldn't get image table: %d", s);
/* Set table to 0. This invalidates all slots (clear VMF_INUSE). */
+/*XXX*/vmmcall(0x1234560c, 0, 29);
memset(vmproc, 0, sizeof(vmproc));
+/*XXX*/vmmcall(0x1234560c, 0, 30);
for(i = 0; i < ELEMENTS(vmproc); i++) {
vmproc[i].vm_slot = i;
}
/* Walk through boot-time system processes that are alive
* now and make valid slot entries for them.
*/
+/*XXX*/vmmcall(0x1234560c, 0, 31);
for (ip = &image[0]; ip < &image[NR_BOOT_PROCS]; ip++) {
phys_bytes proclimit;
struct vmproc *vmp;
+/*XXX*/vmmcall(0x1234560c, 0, 32);
if(ip->proc_nr >= _NR_PROCS) { panic("proc: %d", ip->proc_nr); }
if(ip->proc_nr < 0 && ip->proc_nr != SYSTEM) continue;
/* Initialize normal process table slot or special SYSTEM
* table slot. Kernel memory is already reserved.
*/
+/*XXX*/vmmcall(0x1234560c, 0, 33);
GETVMP(vmp, ip->proc_nr);
/* reset fields as if exited */
+/*XXX*/vmmcall(0x1234560c, 0, 34);
clear_proc(vmp);
/* Get memory map for this process from the kernel. */
+/*XXX*/vmmcall(0x1234560c, 0, 35);
if ((s=get_mem_map(ip->proc_nr, vmp->vm_arch.vm_seg)) != OK)
panic("couldn't get process mem_map: %d", s);
/* Remove this memory from the free list. */
+/*XXX*/vmmcall(0x1234560c, 0, 36);
reserve_proc_mem(mem_chunks, vmp->vm_arch.vm_seg);
/* Set memory limit. */
+/*XXX*/vmmcall(0x1234560c, 0, 37);
proclimit = CLICK2ABS(vmp->vm_arch.vm_seg[S].mem_phys +
vmp->vm_arch.vm_seg[S].mem_len) - 1;
+/*XXX*/vmmcall(0x1234560c, 0, 38);
if(proclimit > limit)
limit = proclimit;
if (vmp->vm_arch.vm_seg[T].mem_len != 0)
vmp->vm_flags |= VMF_SEPARATE;
+/*XXX*/vmmcall(0x1234560c, 0, 39);
}
/* Architecture-dependent initialization. */
+/*XXX*/vmmcall(0x1234560c, 0, 40);
pt_init(limit);
/* Initialize tables to all physical memory. */
+/*XXX*/vmmcall(0x1234560c, 0, 41);
mem_init(mem_chunks);
meminit_done = 1;
/* Give these processes their own page table. */
+/*XXX*/vmmcall(0x1234560c, 0, 42);
for (ip = &image[0]; ip < &image[NR_BOOT_PROCS]; ip++) {
int s;
struct vmproc *vmp;
vir_bytes old_stacktop, old_stack;
+/*XXX*/vmmcall(0x1234560c, 0, 43);
if(ip->proc_nr < 0) continue;
GETVMP(vmp, ip->proc_nr);
vmp->vm_arch.vm_seg[S].mem_len -
vmp->vm_arch.vm_seg[D].mem_len;
+/*XXX*/vmmcall(0x1234560c, 0, 44);
if(pt_new(&vmp->vm_pt) != OK)
panic("VM: no new pagetable");
#define BASICSTACK VM_PAGE_SIZE
+/*XXX*/vmmcall(0x1234560c, 0, 77);
old_stacktop = CLICK2ABS(vmp->vm_arch.vm_seg[S].mem_vir +
vmp->vm_arch.vm_seg[S].mem_len);
+/*XXX*/vmmcall(0x1234560c, old_stacktop, 78);
if(sys_vmctl(vmp->vm_endpoint, VMCTL_INCSP,
VM_STACKTOP - old_stacktop) != OK) {
+/*XXX*/vmmcall(0x1234560c, 0, 79);
panic("VM: vmctl for new stack failed");
}
+/*XXX*/vmmcall(0x1234560c, 0, 45);
free_mem(vmp->vm_arch.vm_seg[D].mem_phys +
vmp->vm_arch.vm_seg[D].mem_len,
old_stack);
+/*XXX*/vmmcall(0x1234560c, 0, 46);
if(proc_new(vmp,
VM_PROCSTART,
CLICK2ABS(vmp->vm_arch.vm_seg[T].mem_len),
VM_STACKTOP, 0) != OK) {
panic("failed proc_new for boot process");
}
+/*XXX*/vmmcall(0x1234560c, 0, 47);
}
/* Set up table of calls. */
/* Set call table to 0. This invalidates all calls (clear
* vmc_func).
*/
+/*XXX*/vmmcall(0x1234560c, 0, 48);
memset(vm_calls, 0, sizeof(vm_calls));
/* Basic VM calls. */
CALLMAP(VM_YIELDBLOCKGETBLOCK, do_yieldblockgetblock);
/* Sanity checks */
+/*XXX*/vmmcall(0x1234560c, 0, 49);
if(find_kernel_top() >= VM_PROCSTART)
panic("kernel loaded too high");
/* Initialize the structures for queryexit */
+/*XXX*/vmmcall(0x1234560c, 0, 50);
init_query_exit();
/* Unmap our own low pages. */
+/*XXX*/vmmcall(0x1234560c, 0, 51);
unmap_ok = 1;
_minix_unmapzero();
/* Map all the services in the boot image. */
+/*XXX*/vmmcall(0x1234560c, 0, 52);
if((s = sys_safecopyfrom(RS_PROC_NR, info->rproctab_gid, 0,
(vir_bytes) rprocpub, sizeof(rprocpub), S)) != OK) {
panic("sys_safecopyfrom failed: %d", s);
}
}
}
+/*XXX*/vmmcall(0x1234560c, 0, 53);
return(OK);
}
struct phys_region *ph;
physr_avl *phavl;
+/*XXX*/vmmcall(0x1234560d, 0, 31);
assert(!(length % VM_PAGE_SIZE));
SANITYCHECK(SCL_FUNCTIONS);
+/*XXX*/vmmcall(0x1234560d, 0, 32);
startv = region_find_slot(vmp, minv, maxv, length, &prevregion);
if (startv == (vir_bytes) -1)
return NULL;
/* Now we want a new region. */
+/*XXX*/vmmcall(0x1234560d, 0, 33);
if(!SLABALLOC(newregion)) {
printf("VM: map_page_region: allocating region failed\n");
return NULL;
}
/* Fill in node details. */
+/*XXX*/vmmcall(0x1234560d, 0, 34);
USE(newregion,
newregion->vaddr = startv;
newregion->length = length;
newregion->tag = VRT_NONE;
newregion->parent = vmp;);
+/*XXX*/vmmcall(0x1234560d, 0, 35);
SLABALLOC(phavl);
if(!phavl) {
+/*XXX*/vmmcall(0x1234560d, 0, 36);
printf("VM: map_page_region: allocating phys avl failed\n");
SLABFREE(newregion);
return NULL;
}
USE(newregion, newregion->phys = phavl;);
+/*XXX*/vmmcall(0x1234560d, 0, 37);
physr_init(newregion->phys);
/* If we know what we're going to map to, map it right away. */
+/*XXX*/vmmcall(0x1234560d, 0, 38);
if(what != MAP_NONE) {
assert(!(what % VM_PAGE_SIZE));
assert(!(startv % VM_PAGE_SIZE));
assert(!(mapflags & MF_PREALLOC));
+/*XXX*/vmmcall(0x1234560d, 0, 39);
if(map_new_physblock(vmp, newregion, 0, length,
what, PAF_CLEAR, 0) != OK) {
+/*XXX*/vmmcall(0x1234560d, 0, 40);
printf("VM: map_new_physblock failed\n");
USE(newregion,
SLABFREE(newregion->phys););
}
}
+/*XXX*/vmmcall(0x1234560d, 0, 41);
if((flags & VR_ANON) && (mapflags & MF_PREALLOC)) {
+/*XXX*/vmmcall(0x1234560d, 0, 42);
if(map_handle_memory(vmp, newregion, 0, length, 1) != OK) {
+/*XXX*/vmmcall(0x1234560d, 0, 43);
printf("VM: map_page_region: prealloc failed\n");
USE(newregion,
SLABFREE(newregion->phys););
}
/* Link it. */
+/*XXX*/vmmcall(0x1234560d, 0, 44);
if(prevregion) {
+/*XXX*/vmmcall(0x1234560d, 0, 45);
assert(prevregion->vaddr < newregion->vaddr);
USE(newregion, newregion->next = prevregion->next;);
USE(prevregion, prevregion->next = newregion;);
} else {
+/*XXX*/vmmcall(0x1234560d, 0, 46);
USE(newregion, newregion->next = vmp->vm_regions;);
vmp->vm_regions = newregion;
}
+/*XXX*/vmmcall(0x1234560d, 0, 47);
#if SANITYCHECKS
assert(startv == newregion->vaddr);
}
#endif
+/*XXX*/vmmcall(0x1234560d, 0, 48);
SANITYCHECK(SCL_FUNCTIONS);
+/*XXX*/vmmcall(0x1234560d, 0, 49);
return newregion;
}