#define VMCTL_MEMREQ_REPLY 15
#define VMCTL_INCSP 16
#define VMCTL_NOPAGEZERO 18
+#define VMCTL_I386_KERNELLIMIT 19
+#define VMCTL_I386_PAGEDIRS 20
+#define VMCTL_I386_PDE 21
+#define VMCTL_I386_PDEVAL 22
+#define VMCTL_I386_FREEPDE 23
/*===========================================================================*
* Messages for the Reincarnation Server *
#define CPROFILE 0 /* call profiling */
/* Compile kernel so that first page of code and data can be unmapped. */
-#define VM_KERN_NOPAGEZERO 1
+#define VM_KERN_NOPAGEZERO 0
#endif /* _CONFIG_H */
#define I386_VM_ACC 0x020 /* Accessed */
#define I386_VM_ADDR_MASK 0xFFFFF000 /* physical address */
#define I386_VM_ADDR_MASK_4MB 0xFFC00000 /* physical address */
+#define I386_VM_OFFSET_MASK_4MB 0x003FFFFF /* physical address */
/* Page directory specific flags. */
#define I386_VM_BIGPAGE 0x080 /* 4MB page */
#include "../../system.h"
#include <minix/type.h>
+#include "proto.h"
+
extern u32_t kernel_cr3;
+extern u32_t *vm_pagedirs;
/*===========================================================================*
* arch_do_vmctl *
register message *m_ptr; /* pointer to request message */
struct proc *p;
{
+
+ static int vmpde = -1;
+ static u32_t pdeval = -1;
+
switch(m_ptr->SVMCTL_PARAM) {
case VMCTL_I386_GETCR3:
/* Get process CR3. */
m_ptr->SVMCTL_PF_I386_ERR = rp->p_pagefault.pf_flags;
return OK;
}
+ case VMCTL_I386_KERNELLIMIT:
+ {
+ /* VM wants kernel to increase its segment. */
+ kprintf("kernel: increase limit to 0x%x\n",
+ m_ptr->SVMCTL_VALUE);
+ return prot_set_kern_seg_limit(m_ptr->SVMCTL_VALUE);
+ }
+ case VMCTL_I386_PAGEDIRS:
+ {
+ int pde;
+ vm_pagedirs = (u32_t *) m_ptr->SVMCTL_VALUE;
+ kprintf("kernel: pagedirs now 0x%lx\n", vm_pagedirs);
+ return OK;
+ }
+ case VMCTL_I386_PDE:
+ {
+ vmpde = m_ptr->SVMCTL_VALUE;
+ kprintf("kernel: HACK: vmpde %d\n", vmpde);
+ return OK;
+ }
+ case VMCTL_I386_PDEVAL:
+ {
+ pdeval = m_ptr->SVMCTL_VALUE;
+ kprintf("kernel: HACK: vmpde %d, set val 0x%x\n",
+ vmpde, pdeval);
+ i386_updatepde(vmpde, pdeval);
+ kprintf("kernel: HACK: vmpde %d, set val 0x%x done\n",
+ vmpde, pdeval);
+ return OK;
+ }
+ case VMCTL_I386_FREEPDE:
+ {
+ i386_freepde(m_ptr->SVMCTL_VALUE);
+ return OK;
+ }
}
kprintf("arch_do_vmctl: strange param %d\n", m_ptr->SVMCTL_PARAM);
#define IOPL_MASK 0x003000
#define vir2phys(vir) (kinfo.data_base + (vir_bytes) (vir))
+#define phys2vir(ph) ((vir_bytes) (ph) - kinfo.data_base)
#endif /* _I386_ACONST_H */
.define _write_cr0 ! write a value in cr0
.define _read_cr4
.define _write_cr4
+.define _i386_invlpg_addr
+.define _i386_invlpg_level0
.define _kernel_cr3
pop ebp
ret
+!*===========================================================================*
+!* i386_invlpg *
+!*===========================================================================*
+! PUBLIC void i386_invlpg(void);
+_i386_invlpg_level0:
+ push ebp
+ invlpg (_i386_invlpg_addr)
+ pop ebp
+ ret
+
+
+
extern u32_t cswitch;
u32_t last_cr3 = 0;
+u32_t *vm_pagedirs = NULL;
+
+u32_t i386_invlpg_addr = 0;
+
+#define WANT_FREEPDES 4
+PRIVATE int nfreepdes = 0, freepdes[WANT_FREEPDES];
+
#define HASPT(procptr) ((procptr)->p_seg.p_cr3 != 0)
FORWARD _PROTOTYPE( void phys_put32, (phys_bytes addr, u32_t value) );
return;
}
+void invlpg_range(u32_t lin, u32_t bytes)
+{
+ u32_t o;
+ o = lin % I386_PAGE_SIZE;
+ lin -= o;
+ bytes += o;
+ while(bytes >= I386_PAGE_SIZE) {
+ i386_invlpg_addr = lin;
+ level0(i386_invlpg_level0);
+ lin += I386_PAGE_SIZE;
+ bytes -= I386_PAGE_SIZE;
+ }
+}
+
+/*===========================================================================*
+ * lin_lin_copy *
+ *===========================================================================*/
+int lin_lin_copy(struct proc *srcproc, vir_bytes srclinaddr, u8_t *vsrc,
+ struct proc *dstproc, vir_bytes dstlinaddr, u8_t *vdst,
+ vir_bytes bytes)
+{
+
+ if(nfreepdes < 2)
+ minix_panic("vm: not enough free PDE's", NO_NUM);
+
+ util_stacktrace();
+
+#define CREATEPDE(PROC, PTR, LINADDR, OFFSET, FREEPDE, VIRT) { \
+ if(iskernelp(PROC)) { \
+ PTR = VIRT; \
+ OFFSET = 0; \
+ } else { \
+ u32_t *pdevalptr; \
+ u32_t myphysaddr; \
+ int pde_index; \
+ pde_index = I386_VM_PDE(LINADDR); \
+ pdevalptr = (u32_t *) ((u8_t *) vm_pagedirs + \
+ I386_PAGE_SIZE * PROC->p_nr + \
+ I386_VM_PT_ENT_SIZE * pde_index); \
+ kprintf("pagedirs: 0x%x p_nr: %d linaddr: 0x%x pde: %d\n", \
+ vm_pagedirs, PROC->p_nr, LINADDR, pde_index); \
+ kprintf("pde ptr: 0x%x\n", pdevalptr); \
+ kprintf("value: 0x%x\n", *pdevalptr); \
+ myphysaddr = kernel_cr3 + FREEPDE*I386_VM_PT_ENT_SIZE; \
+ phys_put32(myphysaddr, *pdevalptr); \
+ PTR = (u8_t *) phys2vir(I386_BIG_PAGE_SIZE*FREEPDE); \
+ kprintf("ptr: 0x%x\n", PTR); \
+ OFFSET = LINADDR & I386_VM_OFFSET_MASK_4MB; \
+ kprintf("offset: 0x%lx & 0x%lx -> 0x%lx\n", LINADDR, \
+ I386_VM_OFFSET_MASK_4MB, OFFSET); \
+ invlpg_range(LINADDR, bytes + I386_PAGE_SIZE); \
+ } \
+}
+
+ while(bytes > 0) {
+ u8_t *srcptr, *dstptr;
+ vir_bytes srcoffset, dstoffset, chunk, remain;
+
+ /* Set up 4MB ranges. */
+ CREATEPDE(srcproc, srcptr, srclinaddr, srcoffset, freepdes[0], vsrc);
+ CREATEPDE(dstproc, dstptr, dstlinaddr, dstoffset, freepdes[1], vdst);
+
+ remain = I386_BIG_PAGE_SIZE - MAX(srcoffset, dstoffset);
+ chunk = MIN(bytes, remain);
+
+ /* Copy pages. */
+ while(chunk > 0) {
+ kprintf("copy %d -> %d %d/%d using 0x%lx+0x%lx -> 0x%lx+0x%lx\n",
+ srcproc->p_endpoint, dstproc->p_endpoint,
+ chunk, bytes, srcptr, srcoffset,
+ dstptr, dstoffset);
+ memcpy(dstptr + dstoffset, srcptr + srcoffset, chunk);
+ kprintf("done\n");
+ }
+
+ /* Update counter and addresses for next iteration, if any. */
+ bytes -= chunk;
+ srclinaddr += chunk;
+ dstlinaddr += chunk;
+ vsrc += chunk;
+ vdst += chunk;
+ }
+
+ return OK;
+}
+
/*===========================================================================*
* virtual_copy_f *
*===========================================================================*/
}
}
+#if 0
+ /* Special case: vir to vir copy */
+ if(vm_pagedirs && procs[_SRC_] && procs[_DST_] &&
+ HASPT(procs[_SRC_]) && HASPT(procs[_DST_]) &&
+ src_addr->segment == D && src_addr->segment == D) {
+ lin_lin_copy(procs[_SRC_], phys_addr[_SRC_], (u8_t *) src_addr->offset,
+ procs[_DST_], phys_addr[_DST_], (u8_t *) dst_addr->offset,
+ bytes);
+ }
+#endif
+
if(vmcheck && procs[_SRC_])
CHECKRANGE_OR_SUSPEND(procs[_SRC_], phys_addr[_SRC_], bytes, 0);
if(vmcheck && procs[_DST_])
return EINVAL;
}
+void i386_updatepde(int pde, u32_t val)
+{
+ u32_t physaddr;
+ physaddr = kernel_cr3 + pde*I386_VM_PT_ENT_SIZE;
+ kprintf("kernel: i386_updatepde: cr3 0x%lx; phys addr 0x%lx; pde %d, val 0x%lx\n",
+ kernel_cr3, physaddr, pde, val);
+ kprintf("previous entry was: 0x%lx\n", phys_get32(physaddr));
+ phys_put32(physaddr, val);
+ kprintf("entry is now: 0x%lx\n", phys_get32(physaddr));
+}
+void i386_freepde(int pde)
+{
+ if(nfreepdes >= WANT_FREEPDES)
+ return;
+ freepdes[nfreepdes++] = pde;
+ printf("kernel: free pde: %d\n", pde);
+}
{ level0_call, LEVEL0_VECTOR, TASK_PRIVILEGE },
};
+ /* Click-round kernel. */
+ if(kinfo.data_base % CLICK_SIZE)
+ minix_panic("kinfo.data_base not aligned", NO_NUM);
+ kinfo.data_size = ((kinfo.data_size+CLICK_SIZE-1)/CLICK_SIZE) * CLICK_SIZE;
+
/* Build gdt and idt pointers in GDT where the BIOS expects them. */
dtp= (struct desctableptr_s *) &gdt[GDT_INDEX];
* (u16_t *) dtp->limit = (sizeof gdt) - 1;
rp->p_reg.ds = (DS_LDT_INDEX*DESC_SIZE) | TI | privilege;
}
+/*===========================================================================*
+ * prot_set_kern_seg_limit *
+ *===========================================================================*/
+PUBLIC int prot_set_kern_seg_limit(vir_bytes limit)
+{
+ struct proc *rp;
+ vir_bytes prev;
+ int orig_click;
+ int incr_clicks;
+
+ kprintf("prot_set_kern_seg_limit: limit 0x%lx\n", limit);
+
+ if(limit <= kinfo.data_base) {
+ kprintf("prot_set_kern_seg_limit: limit bogus\n");
+ return EINVAL;
+ }
+
+ kprintf("size: 0x%lx -> ", kinfo.data_size);
+
+ /* Do actual increase. */
+ orig_click = kinfo.data_size / CLICK_SIZE;
+ kinfo.data_size = limit - kinfo.data_base;
+ incr_clicks = kinfo.data_size / CLICK_SIZE - orig_click;
+
+ kprintf("0x%lx\n", kinfo.data_size);
+
+ kprintf("prot_set_kern_seg_limit: prot_init\n");
+
+ prot_init();
+
+ kprintf("prot_set_kern_seg_limit: prot_init done\n");
+
+ /* Increase kernel processes too. */
+ for (rp = BEG_PROC_ADDR; rp < END_PROC_ADDR; ++rp) {
+ if (RTS_ISSET(rp, SLOT_FREE) || !iskernelp(rp))
+ continue;
+ kprintf("prot_set_kern_seg_limit: increase %d 0x%x ->\n",
+ rp->p_endpoint, rp->p_memmap[S].mem_len);
+ rp->p_memmap[S].mem_len += incr_clicks;
+ alloc_segments(rp);
+ kprintf("prot_set_kern_seg_limit: increase %d done -> 0x%x\n",
+ rp->p_endpoint, rp->p_memmap[S].mem_len);
+ }
+ kprintf("prot_set_kern_seg_limit: done\n");
+
+ return OK;
+}
+
_PROTOTYPE( void vir_outsb, (u16_t port, struct proc *proc, u32_t vir, size_t count));
_PROTOTYPE( void vir_insw, (u16_t port, struct proc *proc, u32_t vir, size_t count));
_PROTOTYPE( void vir_outsw, (u16_t port, struct proc *proc, u32_t vir, size_t count));
+_PROTOTYPE( void i386_updatepde, (int pde, u32_t val));
+_PROTOTYPE( void i386_freepde, (int pde));
/* exception.c */
_PROTOTYPE( void phys_insw, (U16_t port, phys_bytes buf, size_t count) );
_PROTOTYPE( void phys_outsb, (U16_t port, phys_bytes buf, size_t count) );
_PROTOTYPE( void phys_outsw, (U16_t port, phys_bytes buf, size_t count) );
-_PROTOTYPE( void i386_invlpg, (U32_t addr) );
+_PROTOTYPE( void i386_invlpg_level0, (void) );
/* protect.c */
_PROTOTYPE( void prot_init, (void) );
_PROTOTYPE( void init_dataseg, (struct segdesc_s *segdp, phys_bytes base,
vir_bytes size, int privilege) );
_PROTOTYPE( void enable_iop, (struct proc *pp) );
+_PROTOTYPE( int prot_set_kern_seg_limit, (vir_bytes limit) );
/* functions defined in architecture-independent kernel source. */
#include "../../proto.h"
/* vm operation mode state and values */
EXTERN long vm_paged;
-EXTERN phys_bytes kernel_top_bytes;
#include "memory.h"
-int global_bit_ok = 0;
-int bigpage_ok = 0;
-
/* Location in our virtual address space where we can map in
* any physical page we want.
*/
-static unsigned char *varmap = NULL; /* Our address space. */
-static u32_t varmap_loc; /* Our page table. */
+PRIVATE unsigned char *varmap = NULL; /* Our address space. */
+PRIVATE u32_t varmap_loc; /* Our page table. */
+
+/* PDE used to map in kernel, kernel physical address. */
+PRIVATE int kernel_pde = -1, pagedir_pde = -1;
+PRIVATE u32_t kern_pde_val = 0, global_bit = 0, pagedir_pde_val;
+
+/* 4MB page size available in hardware? */
+PRIVATE int bigpage_ok = 0;
/* Our process table entry. */
struct vmproc *vmp = &vmproc[VM_PROC_NR];
*/
#define SPAREPAGES 5
int missing_spares = SPAREPAGES;
-static struct {
+PRIVATE struct {
void *page;
u32_t phys;
} sparepages[SPAREPAGES];
}
if(worst < n) worst = n;
total += n;
-#if 0
- if(n > 0)
- printf("VM: made %d spares, total %d, worst %d\n", n, total, worst);
-#endif
+
return NULL;
}
*/
pt_t *newpt;
int s, r;
- vir_bytes v;
+ vir_bytes v, kpagedir;
phys_bytes lo, hi;
vir_bytes extra_clicks;
u32_t moveup = 0;
-
- global_bit_ok = _cpufeature(_CPUF_I386_PGE);
- bigpage_ok = _cpufeature(_CPUF_I386_PSE);
+ int global_bit_ok = 0;
+ int free_pde;
+ int p;
+ vir_bytes kernlimit;
/* Shorthand. */
newpt = &vmp->vm_pt;
}
missing_spares = 0;
+
+ /* global bit and 4MB pages available? */
+ global_bit_ok = _cpufeature(_CPUF_I386_PGE);
+ bigpage_ok = _cpufeature(_CPUF_I386_PSE);
+
+ /* Set bit for PTE's and PDE's if available. */
+ if(global_bit_ok)
+ global_bit = I386_VM_GLOBAL;
+
+ /* Figure out kernel pde slot. */
+ {
+ int pde1, pde2;
+ pde1 = I386_VM_PDE(KERNEL_TEXT);
+ pde2 = I386_VM_PDE(KERNEL_DATA+KERNEL_DATA_LEN);
+ if(pde1 != pde2)
+ vm_panic("pt_init: kernel too big", NO_NUM);
+
+ /* Map in kernel with this single pde value if 4MB pages
+ * supported.
+ */
+ kern_pde_val = (KERNEL_TEXT & I386_VM_ADDR_MASK_4MB) |
+ I386_VM_BIGPAGE|
+ I386_VM_PRESENT|I386_VM_WRITE|global_bit;
+ kernel_pde = pde1;
+ vm_assert(kernel_pde >= 0);
+ free_pde = kernel_pde+1;
+ }
/* Make new page table for ourselves, partly copied
* from the current one.
vmp->vm_arch.vm_seg[D].mem_phys += ABS2CLICK(moveup);
vmp->vm_arch.vm_seg[S].mem_phys += ABS2CLICK(moveup);
-#if 0
- /* Map in kernel. */
- if(pt_mapkernel(newpt) != OK)
- vm_panic("pt_init: pt_mapkernel failed", NO_NUM);
-
/* Allocate us a page table in which to remember page directory
* pointers.
*/
if(!(page_directories = vm_allocpages(&page_directories_phys,
1, VMP_PAGETABLE)))
vm_panic("no virt addr for vm mappings", NO_NUM);
-#endif
+
+ memset(page_directories, 0, I386_PAGE_SIZE);
/* Give our process the new, copied, private page table. */
pt_bind(newpt, vmp);
}
varmap = (unsigned char *) arch_map2vir(vmp, varmap_loc);
+ /* Find a PDE below processes available for mapping in the
+ * page directories (readonly).
+ */
+ pagedir_pde = free_pde++;
+ pagedir_pde_val = (page_directories_phys & I386_VM_ADDR_MASK) |
+ I386_VM_PRESENT | I386_VM_USER;
+
+ printf("VM: HACK: pagedir pde val is 0x%x (phys 0x%x)\n",
+ pagedir_pde_val, page_directories_phys);
+
+ /* Temporary hack while kernel still maintains own pagetable */
+ if((r=sys_vmctl(SELF, VMCTL_I386_PDE, pagedir_pde)) != OK) {
+ vm_panic("VMCTL_I386_PDE failed", r);
+ }
+
+ printf("VM: HACK: pagedir pde val is 0x%x\n", pagedir_pde_val);
+
+ /* Temporary hack while kernel still maintains own pagetable */
+ if((r=sys_vmctl(SELF, VMCTL_I386_PDEVAL, pagedir_pde_val)) != OK) {
+ vm_panic("VMCTL_I386_PDEVAL failed", r);
+ }
+
+ /* Tell kernel about free pde's. */
+ while(free_pde*I386_BIG_PAGE_SIZE < VM_PROCSTART) {
+ printf("VM: telling kernel about free pde %d\n", free_pde);
+ if((r=sys_vmctl(SELF, VMCTL_I386_FREEPDE, free_pde++)) != OK) {
+ vm_panic("VMCTL_I386_FREEPDE failed", r);
+ }
+ }
+
+ kernlimit = free_pde*I386_BIG_PAGE_SIZE;
+
+ printf("VM: set limit to 0x%x\n", kernlimit);
+
+ /* Increase kernel segment to address this memory. */
+ if((r=sys_vmctl(SELF, VMCTL_I386_KERNELLIMIT, kernlimit)) != OK) {
+ vm_panic("VMCTL_I386_KERNELLIMIT failed", r);
+ }
+
+ kpagedir = arch_map2vir(&vmproc[VMP_SYSTEM],
+ pagedir_pde*I386_BIG_PAGE_SIZE);
+ printf("VM: pagedir linear 0x%x, in kernel 0x%x\n",
+ pagedir_pde*I386_BIG_PAGE_SIZE, kpagedir);
+
+ /* Tell kernel how to get at the page directories. */
+ if((r=sys_vmctl(SELF, VMCTL_I386_PAGEDIRS, kpagedir)) != OK) {
+ vm_panic("VMCTL_I386_KERNELLIMIT failed", r);
+ }
+
/* All OK. */
return;
}
PUBLIC int pt_bind(pt_t *pt, struct vmproc *who)
{
int slot;
+ u32_t phys;
/* Basic sanity checks. */
vm_assert(who);
if(pt) PT_SANE(pt);
vm_assert(pt);
-#if 0
slot = who->vm_slot;
vm_assert(slot >= 0);
vm_assert(slot < ELEMENTS(vmproc));
- vm_assert(!(pt->pt_dir_phys & ~I386_VM_ADDR_MASK));
+ vm_assert(slot < I386_VM_PT_ENTRIES);
- page_directories[slot] = (pt->pt_dir_phys & I386_VM_ADDR_MASK) |
- (I386_VM_PRESENT|I386_VM_WRITE);
-#endif
+ phys = pt->pt_dir_phys & I386_VM_ADDR_MASK;
+ vm_assert(pt->pt_dir_phys == phys);
+
+ /* Update "page directory pagetable." */
+ page_directories[slot] = phys | I386_VM_PRESENT|I386_VM_WRITE;
/* Tell kernel about new page table root. */
return sys_vmctl(who->vm_endpoint, VMCTL_I386_SETCR3,
PUBLIC int pt_mapkernel(pt_t *pt)
{
int r;
- static int pde = -1, do_bigpage = 0;
- u32_t global = 0;
- static u32_t kern_phys;
static int printed = 0;
- if(global_bit_ok) global = I386_VM_GLOBAL;
-
/* Any i386 page table needs to map in the kernel address space. */
vm_assert(vmproc[VMP_SYSTEM].vm_flags & VMF_INUSE);
- if(pde == -1 && bigpage_ok) {
- int pde1, pde2;
- pde1 = I386_VM_PDE(KERNEL_TEXT);
- pde2 = I386_VM_PDE(KERNEL_DATA+KERNEL_DATA_LEN);
- if(pde1 != pde2) {
- printf("VM: pt_mapkernel: kernel too big?");
- bigpage_ok = 0;
- } else {
- kern_phys = KERNEL_TEXT & I386_VM_ADDR_MASK_4MB;
- pde = pde1;
- do_bigpage = 1;
- vm_assert(pde >= 0);
- }
- }
-
- if(do_bigpage) {
- pt->pt_dir[pde] = kern_phys |
- I386_VM_BIGPAGE|I386_VM_PRESENT|I386_VM_WRITE|global;
+ if(bigpage_ok) {
+ pt->pt_dir[kernel_pde] = kern_pde_val;
} else {
/* Map in text. flags: don't write, supervisor only */
if((r=pt_writemap(pt, KERNEL_TEXT, KERNEL_TEXT, KERNEL_TEXT_LEN,
- I386_VM_PRESENT|global, 0)) != OK)
+ I386_VM_PRESENT|global_bit, 0)) != OK)
return r;
/* Map in data. flags: read-write, supervisor only */
if((r=pt_writemap(pt, KERNEL_DATA, KERNEL_DATA, KERNEL_DATA_LEN,
- I386_VM_PRESENT|I386_VM_WRITE|global, 0)) != OK)
+ I386_VM_PRESENT|I386_VM_WRITE, 0)) != OK)
return r;
}
+ /* Kernel also wants to know about all page directories. */
+ pt->pt_dir[pagedir_pde] = pagedir_pde_val;
+
return OK;
}
PRIVATE void vm_init(void)
{
int s, i;
+ int click, clicksforgotten = 0;
struct memory mem_chunks[NR_MEMS];
struct boot_image image[NR_BOOT_PROCS];
struct boot_image *ip;
/* Initialize tables to all physical memory. */
mem_init(mem_chunks);
- /* Bits of code need to know where a process can
- * start in a pagetable.
- */
- kernel_top_bytes = find_kernel_top();
-
+#if 0
/* Can first kernel pages of code and data be (left) mapped out?
* If so, change the SYSTEM process' memory map to reflect this
* (future mappings of SYSTEM into other processes will not include
vmp->vm_arch.vm_seg[D].mem_phys += DIFF;
vmp->vm_arch.vm_seg[D].mem_len -= DIFF;
}
+#endif
/* Give these processes their own page table. */
for (ip = &image[0]; ip < &image[NR_BOOT_PROCS]; ip++) {
VM_STACKTOP);
}
+ /* Temporary hack; throw away all lower memory. */
+ while((click=ALLOC_MEM(1, 0)) <= ABS2CLICK(VM_PROCSTART)) {
+ clicksforgotten++;
+ }
+
+ printf("VM: HACK: clicks forgotten: %d last one: 0x%x\n", clicksforgotten, click);
+
/* Set up table of calls. */
#define CALLMAP(code, func, thecaller) { int i; \
if((i=CALLNUMBER(code)) < 0) { vm_panic(#code " invalid", (code)); } \
vm_panic("kernel loaded too high", NO_NUM);
}
-#if 0
-void kputc(int c)
-{
- if(c == '\n')
- ser_putc('\r');
- ser_putc(c);
-}
-#endif
}
if (memp >= &mem_chunks[NR_MEMS])
{
+ printf("VM: looking for memory at 0x%x, length 0x%x\n",
+ CLICK2ABS(map_ptr[T].mem_phys),
+ CLICK2ABS(map_ptr[T].mem_len));
vm_panic("reserve_proc_mem: can't find map in mem_chunks ",
map_ptr[T].mem_phys);
}