}
else
{
+ printf("lance buf: 0x%lx\n", vir2phys(lance_buf));
report( "LANCE", "DMA denied because address out of range", NO_NUM );
}
random_update(RND_TIMING, &r, 1);
/* Schedule new alarm for next m_random call. */
- if (OK != (s=sys_setalarm(KRANDOM_PERIOD, 0)))
+ if (OK != (s=sys_setalarm(random_isseeded() ? KRANDOM_PERIOD : sys_hz(), 0)))
report("RANDOM", "sys_setalarm failed", s);
}
PRIVATE unsigned scr_width; /* # characters on a line */
PRIVATE unsigned scr_lines; /* # lines on the screen */
PRIVATE unsigned scr_size; /* # characters on the screen */
-PUBLIC unsigned info_location; /* location in video memory of struct */
/* tells mem_vid_copy() to blank the screen */
#define BLANK_MEM ((vir_bytes) 0)
char *console_memory = NULL;
-/* boot_tty_info we use to communicate with the boot code. */
-struct boot_tty_info boot_tty_info;
-
/* Per console data. */
typedef struct console {
tty_t *c_tty; /* associated TTY struct */
int c_line; /* line no */
} console_t;
-#define UPDATEBOOTINFO(ccons, infofield, value) { \
- if(ccons->c_line == 0) { \
- boot_tty_info.infofield = value; \
- mem_vid_copy((vir_bytes) &boot_tty_info, \
- info_location/2, sizeof(boot_tty_info)/2); \
- } \
-}
-
#define UPDATE_CURSOR(ccons, cursor) { \
ccons->c_cur = cursor; \
- UPDATEBOOTINFO(ccons, conscursor, ccons->c_cur); \
if(curcons && ccons == curcons) \
set_6845(CURSOR, ccons->c_cur); \
}
#define UPDATE_ORIGIN(ccons, origin) { \
ccons->c_org = origin; \
- UPDATEBOOTINFO(ccons, consorigin, ccons->c_org); \
if (curcons && ccons == curcons) \
set_6845(VID_ORG, ccons->c_org); \
}
}
if (machine.vdu_ega) vid_size = EGA_SIZE;
wrap = ! machine.vdu_ega;
- info_location = vid_size - sizeof(struct boot_tty_info);
console_memory = vm_map_phys(SELF, (void *) vid_base, vid_size);
if(console_memory == MAP_FAILED)
panic("TTY","Console couldn't map video memory", NO_NUM);
+ printf("TTY: vm_map_phys of 0x%lx OK, result 0x%lx",
+ vid_base, console_memory);
+
vid_size >>= 1; /* word count */
vid_mask = vid_size - 1;
scr_size = scr_lines * scr_width;
/* There can be as many consoles as video memory allows. */
- nr_cons = (vid_size - sizeof(struct boot_tty_info)/2) / scr_size;
+ nr_cons = vid_size / scr_size;
if (nr_cons > NR_CONS) nr_cons = NR_CONS;
if (nr_cons > 1) wrap = 0;
scroll_screen(cons, SCROLL_UP);
cons->c_row = scr_lines - 1;
cons->c_column = 0;
-
- memset(&boot_tty_info, 0, sizeof(boot_tty_info));
- UPDATE_CURSOR(cons, cons->c_cur);
- boot_tty_info.flags = BTIF_CONSCURSOR | BTIF_CONSORIGIN;
- boot_tty_info.magic = TTYMAGIC;
- UPDATE_ORIGIN(cons, cons->c_org);
}
select_console(0);
cons_ioctl(tp, 0);
i386_freepde(m_ptr->SVMCTL_VALUE);
return OK;
}
- case VMCTL_I386_INVLPG:
- {
- invlpg_range(m_ptr->SVMCTL_VALUE, 1);
- return OK;
- }
}
extern int vm_copy_in_progress, catch_pagefaults;
extern struct proc *vm_copy_from, *vm_copy_to;
-extern u32_t npagefaults;
-
-PUBLIC u32_t pagefault_count = 0;
void pagefault(vir_bytes old_eip, struct proc *pr, int trap_errno,
u32_t *old_eipptr, u32_t *old_eaxptr, u32_t pagefaultcr2)
vmassert(*old_eipptr == old_eip);
vmassert(old_eipptr != &old_eip);
- vmassert(pagefault_count == 1);
-
#if 0
printf("kernel: pagefault in pr %d, addr 0x%lx, his cr3 0x%lx, actual cr3 0x%lx\n",
pr->p_endpoint, pagefaultcr2, pr->p_seg.p_cr3, read_cr3());
#endif
vmassert(pr->p_seg.p_cr3 == read_cr3());
} else {
+ u32_t cr3;
+ lock;
+ cr3 = read_cr3();
vmassert(ptproc);
- vmassert(ptproc->p_seg.p_cr3 == read_cr3());
+ if(ptproc->p_seg.p_cr3 != cr3) {
+ util_stacktrace();
+ printf("cr3 wrong in pagefault; value 0x%lx, ptproc %s / %d, his cr3 0x%lx, pr %s / %d\n",
+ cr3,
+ ptproc->p_name, ptproc->p_endpoint,
+ ptproc->p_seg.p_cr3,
+ pr->p_name, pr->p_endpoint);
+ ser_dump_proc();
+ vm_print(cr3);
+ vm_print(ptproc->p_seg.p_cr3);
+ }
+ unlock;
}
test_eip = k_reenter ? old_eip : pr->p_reg.pc;
*old_eipptr = phys_copy_fault;
*old_eaxptr = pagefaultcr2;
- pagefault_count = 0;
-
return;
}
- npagefaults++;
-
/* System processes that don't have their own page table can't
* have page faults. VM does have its own page table but also
* can't have page faults (because VM has to handle them).
lock_notify(HARDWARE, VM_PROC_NR);
- pagefault_count = 0;
-
return;
}
u32_t pf_flags; /* Pagefault flags on stack. */
};
+#define INMEMORY(p) (!p->p_seg.p_cr3 || ptproc == p)
+
#endif /* #ifndef _I386_TYPES_H */
.define _read_cr4
.define _thecr3
.define _write_cr4
-.define _i386_invlpg_addr
-.define _i386_invlpg_level0
-.define __memcpy_k
-.define __memcpy_k_fault
.define _catch_pagefaults
! The routines only guarantee to preserve the registers the C compiler
mov (_thecr3), eax
ret
-!*===========================================================================*
-!* i386_invlpg *
-!*===========================================================================*
-! PUBLIC void i386_invlpg(void);
-_i386_invlpg_level0:
- mov eax, (_i386_invlpg_addr)
- invlpg (eax)
- ret
-
-
-!*===========================================================================*
-!* _memcpy_k *
-!*===========================================================================*
-! _memcpy_k() Original Author: Kees J. Bot
-! 2 Jan 1994
-! void *_memcpy_k(void *s1, const void *s2, size_t n)
-! Copy a chunk of memory that the kernel can use to trap pagefaults.
-.define __memcpy_k
-.define __memcpy_k_fault
- .align 16
-__memcpy_k:
- push ebp
- mov ebp, esp
- push esi
- push edi
- mov edi, 8(ebp) ! String s1
- mov esi, 12(ebp) ! String s2
- mov ecx, 16(ebp) ! Length
- cld ! Clear direction bit: upwards
- cmp ecx, 16
- jb upbyte ! Don't bother being smart with short arrays
- mov eax, esi
- or eax, edi
- testb al, 1
- jnz upbyte ! Bit 0 set, use byte copy
- testb al, 2
- jnz upword ! Bit 1 set, use word copy
-uplword:shrd eax, ecx, 2 ! Save low 2 bits of ecx in eax
- shr ecx, 2
- rep
- movs ! Copy longwords.
- shld ecx, eax, 2 ! Restore excess count
-upword: shr ecx, 1
- rep
- o16 movs ! Copy words
- adc ecx, ecx ! One more byte?
-upbyte: rep
- movsb ! Copy bytes
-done: mov eax, 0
-__memcpy_k_fault: ! Kernel can send us here with pf cr2 in eax
- pop edi
- pop esi
- pop ebp
- ret
int verifyrange = 0;
-extern u32_t newpde, overwritepde, linlincopies,
- physzero, invlpgs, straightpdes;
-
#define PROCPDEPTR(pr, pi) ((u32_t *) ((u8_t *) vm_pagedirs +\
I386_PAGE_SIZE * pr->p_nr + \
I386_VM_PT_ENT_SIZE * pi))
-/* Signal to exception handler that pagefaults can happen. */
-int catch_pagefaults = 0;
-
u8_t *vm_pagedirs = NULL;
-u32_t i386_invlpg_addr = 0;
-
#define WANT_FREEPDES 100
#define NOPDE -1
#define PDEMASK(n) (1L << (n))
}
+
+
+/* This macro sets up a mapping from within the kernel's address
+ * space to any other area of memory, either straight physical
+ * memory (PROC == NULL) or a process view of memory, in 4MB chunks.
+ * It recognizes PROC having kernel address space as a special case.
+ *
+ * It sets PTR to the pointer within kernel address space at the start
+ * of the 4MB chunk, and OFFSET to the offset within that chunk
+ * that corresponds to LINADDR.
+ *
+ * It needs FREEPDE (available and addressable PDE within kernel
+ * address space), SEG (hardware segment), VIRT (in-datasegment
+ * address if known).
+ */
+#define CREATEPDE(PROC, PTR, LINADDR, REMAIN, BYTES, PDE) { \
+ int proc_pde_index; \
+ proc_pde_index = I386_VM_PDE(LINADDR); \
+ PDE = NOPDE; \
+ if((PROC) && (((PROC) == ptproc) || !HASPT(PROC))) { \
+ PTR = LINADDR; \
+ } else { \
+ int fp; \
+ int mustinvl; \
+ u32_t pdeval, *pdevalptr, mask; \
+ phys_bytes offset; \
+ vmassert(psok); \
+ if(PROC) { \
+ u32_t *pdeptr; \
+ vmassert(!iskernelp(PROC)); \
+ vmassert(HASPT(PROC)); \
+ pdeptr = PROCPDEPTR(PROC, proc_pde_index); \
+ pdeval = *pdeptr; \
+ } else { \
+ vmassert(!iskernelp(PROC)); \
+ pdeval = (LINADDR & I386_VM_ADDR_MASK_4MB) | \
+ I386_VM_BIGPAGE | I386_VM_PRESENT | \
+ I386_VM_WRITE | I386_VM_USER; \
+ } \
+ for(fp = 0; fp < nfreepdes; fp++) { \
+ int k = freepdes[fp]; \
+ if(inusepde == k) \
+ continue; \
+ PDE = k; \
+ mask = PDEMASK(k); \
+ vmassert(mask); \
+ if(dirtypde & mask) \
+ continue; \
+ break; \
+ } \
+ vmassert(PDE != NOPDE); \
+ vmassert(mask); \
+ if(dirtypde & mask) { \
+ mustinvl = 1; \
+ } else { \
+ mustinvl = 0; \
+ } \
+ inusepde = PDE; \
+ *PROCPDEPTR(ptproc, PDE) = pdeval; \
+ offset = LINADDR & I386_VM_OFFSET_MASK_4MB; \
+ PTR = I386_BIG_PAGE_SIZE*PDE + offset; \
+ REMAIN = MIN(REMAIN, I386_BIG_PAGE_SIZE - offset); \
+ if(mustinvl) { \
+ level0(reload_cr3); \
+ } \
+ } \
+}
+
+#define DONEPDE(PDE) { \
+ if(PDE != NOPDE) { \
+ dirtypde |= PDEMASK(PDE); \
+ *PROCPDEPTR(ptproc, PDE) = 0; \
+ } \
+}
+
+
+
+/*===========================================================================*
+ * lin_lin_copy *
+ *===========================================================================*/
+int lin_lin_copy(struct proc *srcproc, vir_bytes srclinaddr,
+ struct proc *dstproc, vir_bytes dstlinaddr, vir_bytes bytes)
+{
+ u32_t addr;
+ int procslot;
+ NOREC_ENTER(linlincopy);
+
+ FIXME("lin_lin_copy requires big pages");
+ vmassert(vm_running);
+ vmassert(!catch_pagefaults);
+ vmassert(nfreepdes >= 3);
+
+ vmassert(ptproc);
+ vmassert(proc_ptr);
+ vmassert(read_cr3() == ptproc->p_seg.p_cr3);
+
+ procslot = ptproc->p_nr;
+
+ vmassert(procslot >= 0 && procslot < I386_VM_DIR_ENTRIES);
+
+ while(bytes > 0) {
+ phys_bytes srcptr, dstptr;
+ vir_bytes chunk = bytes;
+ int srcpde, dstpde;
+
+ /* Set up 4MB ranges. */
+ inusepde = NOPDE;
+ CREATEPDE(srcproc, srcptr, srclinaddr, chunk, bytes, srcpde);
+ CREATEPDE(dstproc, dstptr, dstlinaddr, chunk, bytes, dstpde);
+
+ /* Copy pages. */
+ PHYS_COPY_CATCH(srcptr, dstptr, chunk, addr);
+
+ DONEPDE(srcpde);
+ DONEPDE(dstpde);
+
+ if(addr) {
+ if(addr >= srcptr && addr < (srcptr + chunk)) {
+ NOREC_RETURN(linlincopy, EFAULT_SRC);
+ }
+ if(addr >= dstptr && addr < (dstptr + chunk)) {
+ NOREC_RETURN(linlincopy, EFAULT_DST);
+ }
+ minix_panic("lin_lin_copy fault out of range", NO_NUM);
+
+ /* Not reached. */
+ NOREC_RETURN(linlincopy, EFAULT);
+ }
+
+ /* Update counter and addresses for next iteration, if any. */
+ bytes -= chunk;
+ srclinaddr += chunk;
+ dstlinaddr += chunk;
+ }
+
+ NOREC_RETURN(linlincopy, OK);
+}
+
+
PRIVATE u32_t phys_get32(addr)
phys_bytes addr;
{
return 1;
}
-extern u32_t vmreqs;
-
/*===========================================================================*
* vm_suspend *
*===========================================================================*/
util_stacktrace_strcat(caller->p_vmrequest.stacktrace);
#endif
- vmreqs++;
-
caller->p_vmrequest.writeflag = 1;
caller->p_vmrequest.start = linaddr;
caller->p_vmrequest.length = len;
vmassert(rp->p_delivermsg.m_source != NONE);
vmassert(rp->p_delivermsg_lin);
- vmassert(rp->p_delivermsg_lin ==
+#if DEBUG_VMASSERT
+ if(rp->p_delivermsg_lin !=
+ umap_local(rp, D, rp->p_delivermsg_vir, sizeof(message))) {
+ printf("vir: 0x%lx lin was: 0x%lx umap now: 0x%lx\n",
+ rp->p_delivermsg_vir, rp->p_delivermsg_lin,
umap_local(rp, D, rp->p_delivermsg_vir, sizeof(message)));
+ minix_panic("that's wrong", NO_NUM);
+ }
+
+#endif
vm_set_cr3(rp);
- vmassert(intr_disabled());
- vmassert(!catch_pagefaults);
- catch_pagefaults = 1;
- addr = phys_copy(vir2phys(&rp->p_delivermsg),
- rp->p_delivermsg_lin, sizeof(message));
- vmassert(catch_pagefaults);
- catch_pagefaults = 0;
+ PHYS_COPY_CATCH(vir2phys(&rp->p_delivermsg),
+ rp->p_delivermsg_lin, sizeof(message), addr);
if(addr) {
printf("phys_copy failed - addr 0x%lx\n", addr);
return;
}
-/*===========================================================================*
- * invlpg_range *
- *===========================================================================*/
-void invlpg_range(u32_t lin, u32_t bytes)
-{
-/* Remove a range of translated addresses from the TLB.
- * Addresses are in linear, i.e., post-segment, pre-pagetable
- * form. Parameters are byte values, any offset and any multiple.
- */
- u32_t cr3;
- u32_t o, limit, addr;
- limit = lin + bytes - 1;
- o = lin % I386_PAGE_SIZE;
- lin -= o;
- limit = (limit + o) & I386_VM_ADDR_MASK;
-#if 1
- for(i386_invlpg_addr = lin; i386_invlpg_addr <= limit;
- i386_invlpg_addr += I386_PAGE_SIZE) {
- invlpgs++;
- level0(i386_invlpg_level0);
- }
-#else
- level0(reload_cr3);
-#endif
-}
-
u32_t thecr3;
u32_t read_cr3(void)
return thecr3;
}
-/* This macro sets up a mapping from within the kernel's address
- * space to any other area of memory, either straight physical
- * memory (PROC == NULL) or a process view of memory, in 4MB chunks.
- * It recognizes PROC having kernel address space as a special case.
- *
- * It sets PTR to the pointer within kernel address space at the start
- * of the 4MB chunk, and OFFSET to the offset within that chunk
- * that corresponds to LINADDR.
- *
- * It needs FREEPDE (available and addressable PDE within kernel
- * address space), SEG (hardware segment), VIRT (in-datasegment
- * address if known).
- */
-#define CREATEPDE(PROC, PTR, LINADDR, REMAIN, BYTES, PDE) { \
- int proc_pde_index; \
- FIXME("CREATEPDE: check if invlpg is necessary"); \
- proc_pde_index = I386_VM_PDE(LINADDR); \
- PDE = NOPDE; \
- if((PROC) && (((PROC) == ptproc) || iskernelp(PROC))) { \
- PTR = LINADDR; \
- straightpdes++; \
- } else { \
- int fp; \
- int mustinvl; \
- u32_t pdeval, *pdevalptr, mask; \
- phys_bytes offset; \
- vmassert(psok); \
- if(PROC) { \
- u32_t *pdeptr; \
- vmassert(!iskernelp(PROC)); \
- vmassert(HASPT(PROC)); \
- pdeptr = PROCPDEPTR(PROC, proc_pde_index); \
- pdeval = *pdeptr; \
- } else { \
- vmassert(!iskernelp(PROC)); \
- pdeval = (LINADDR & I386_VM_ADDR_MASK_4MB) | \
- I386_VM_BIGPAGE | I386_VM_PRESENT | \
- I386_VM_WRITE | I386_VM_USER; \
- } \
- for(fp = 0; fp < nfreepdes; fp++) { \
- int k = freepdes[fp]; \
- if(inusepde == k) \
- continue; \
- PDE = k; \
- mask = PDEMASK(k); \
- vmassert(mask); \
- if(dirtypde & mask) \
- continue; \
- break; \
- } \
- vmassert(PDE != NOPDE); \
- vmassert(mask); \
- if(dirtypde & mask) { \
- mustinvl = 1; \
- overwritepde++; \
- } else { \
- mustinvl = 0; \
- newpde++; \
- } \
- inusepde = PDE; \
- *PROCPDEPTR(ptproc, PDE) = pdeval; \
- offset = LINADDR & I386_VM_OFFSET_MASK_4MB; \
- PTR = I386_BIG_PAGE_SIZE*PDE + offset; \
- REMAIN = MIN(REMAIN, I386_BIG_PAGE_SIZE - offset); \
- if(mustinvl) { \
- invlpg_range(PTR, REMAIN); \
- } \
- } \
-}
-
-#define DONEPDE(PDE) { \
- if(PDE != NOPDE) { \
- dirtypde |= PDEMASK(PDE); \
- *PROCPDEPTR(ptproc, PDE) = 0; \
- } \
-}
-
-
-
-/*===========================================================================*
- * lin_lin_copy *
- *===========================================================================*/
-int lin_lin_copy(struct proc *srcproc, vir_bytes srclinaddr,
- struct proc *dstproc, vir_bytes dstlinaddr, vir_bytes bytes)
-{
- u32_t addr;
- int procslot;
- NOREC_ENTER(linlincopy);
-
- linlincopies++;
-
- FIXME("lin_lin_copy requires big pages");
- vmassert(vm_running);
- vmassert(!catch_pagefaults);
- vmassert(nfreepdes >= 3);
-
- vmassert(ptproc);
- vmassert(proc_ptr);
- vmassert(read_cr3() == ptproc->p_seg.p_cr3);
-
- procslot = ptproc->p_nr;
-
- vmassert(procslot >= 0 && procslot < I386_VM_DIR_ENTRIES);
-
- while(bytes > 0) {
- phys_bytes srcptr, dstptr;
- vir_bytes chunk = bytes;
- int srcpde, dstpde;
-
- /* Set up 4MB ranges. */
- inusepde = NOPDE;
- CREATEPDE(srcproc, srcptr, srclinaddr, chunk, bytes, srcpde);
- CREATEPDE(dstproc, dstptr, dstlinaddr, chunk, bytes, dstpde);
-
- /* Copy pages. */
- vmassert(intr_disabled());
- vmassert(!catch_pagefaults);
- catch_pagefaults = 1;
- addr=phys_copy(srcptr, dstptr, chunk);
- vmassert(intr_disabled());
- vmassert(catch_pagefaults);
- catch_pagefaults = 0;
-
- DONEPDE(srcpde);
- DONEPDE(dstpde);
-
- if(addr) {
- if(addr >= srcptr && addr < (srcptr + chunk)) {
- NOREC_RETURN(linlincopy, EFAULT_SRC);
- }
- if(addr >= dstptr && addr < (dstptr + chunk)) {
- NOREC_RETURN(linlincopy, EFAULT_DST);
- }
- minix_panic("lin_lin_copy fault out of range", NO_NUM);
-
- /* Not reached. */
- NOREC_RETURN(linlincopy, EFAULT);
- }
-
- /* Update counter and addresses for next iteration, if any. */
- bytes -= chunk;
- srclinaddr += chunk;
- dstlinaddr += chunk;
- }
-
- NOREC_RETURN(linlincopy, OK);
-}
/*===========================================================================*
* lin_memset *
u32_t p;
p = c | (c << 8) | (c << 16) | (c << 24);
- physzero++;
-
if(!vm_running) {
phys_memset(ph, p, bytes);
return OK;
.define _restart
.define save
-.define _pagefault_count
-.define _cr3_test
-.define _cr3_reload
.define _reload_cr3
.define _write_cr3 ! write cr3
call _schedcheck ! ask C function who we're running
mov esp, (_proc_ptr) ! will assume P_STACKBASE == 0
lldt P_LDT_SEL(esp) ! enable process' segment descriptors
- inc (_cr3_test)
cmp P_CR3(esp), 0 ! process does not have its own PT
jz 0f
mov eax, P_CR3(esp)
cmp eax, (loadedcr3)
jz 0f
- inc (_cr3_reload)
mov cr3, eax
mov (loadedcr3), eax
mov eax, (_proc_ptr)
push eax
mov eax, cr2
sseg mov (pagefaultcr2), eax
-sseg inc (_pagefault_count)
pop eax
jmp errexception
push ebp
mov ebp, esp
mov eax, 8(ebp)
- inc (_cr3_test)
cmp eax, (loadedcr3)
jz 0f
- inc (_cr3_reload)
mov cr3, eax
mov (loadedcr3), eax
mov (_dirtypde), 0
_reload_cr3:
push ebp
mov ebp, esp
- inc (_cr3_reload)
mov (_dirtypde), 0
mov eax, cr3
mov cr3, eax
#define DEBUG_TIME_LOCKS 1
/* Runtime sanity checking. */
-#define DEBUG_VMASSERT 0
-#define DEBUG_SCHED_CHECK 0
-#define DEBUG_STACK_CHECK 0
+#define DEBUG_VMASSERT 1
+#define DEBUG_SCHED_CHECK 1
+#define DEBUG_STACK_CHECK 1
#define DEBUG_TRACE 1
#if DEBUG_TRACE
/* VM */
EXTERN int vm_running;
+EXTERN int catch_pagefaults;
EXTERN struct proc *ptproc;
/* Timing */
PRIVATE int QueueMess(endpoint_t ep, vir_bytes msg_lin, struct proc *dst)
{
int k;
+ phys_bytes addr;
NOREC_ENTER(queuemess);
/* Queue a message from the src process (in memory) to the dst
* process (using dst process table entry). Do actual copy to
vmassert(!(dst->p_misc_flags & MF_DELIVERMSG));
vmassert(dst->p_delivermsg_lin);
vmassert(isokendpt(ep, &k));
+ FIXME("copy messages directly if in memory");
+ FIXME("possibly also msgcopy specific function");
+
+ if(INMEMORY(dst)) {
+ PHYS_COPY_CATCH(msg_lin, dst->p_delivermsg_lin,
+ sizeof(message), addr);
+ if(!addr) {
+ PHYS_COPY_CATCH(vir2phys(&ep), dst->p_delivermsg_lin,
+ sizeof(ep), addr);
+ if(!addr) {
+ NOREC_RETURN(queuemess, OK);
+ }
+ }
+ }
- if(phys_copy(msg_lin, vir2phys(&dst->p_delivermsg),
- sizeof(message))) {
+ PHYS_COPY_CATCH(msg_lin, vir2phys(&dst->p_delivermsg), sizeof(message), addr);
+ if(addr) {
NOREC_RETURN(queuemess, EFAULT);
}
dst->p_delivermsg.m_source = ep;
dst->p_misc_flags |= MF_DELIVERMSG;
+#if 0
+ if(INMEMORY(dst)) {
+ delivermsg(dst);
+ }
+#endif
+
NOREC_RETURN(queuemess, OK);
}
register struct proc **xpp;
int dst_p;
phys_bytes linaddr;
+ vir_bytes addr;
int r;
if(!(linaddr = umap_local(caller_ptr, D, (vir_bytes) m_ptr,
}
/* Destination is not waiting. Block and dequeue caller. */
- if(phys_copy(linaddr, vir2phys(&caller_ptr->p_sendmsg), sizeof(message))) {
- return EFAULT;
- }
+ PHYS_COPY_CATCH(linaddr, vir2phys(&caller_ptr->p_sendmsg),
+ sizeof(message), addr);
+
+ if(addr) { return EFAULT; }
RTS_SET(caller_ptr, SENDING);
caller_ptr->p_sendto_e = dst_e;
/* This is where we want our message. */
caller_ptr->p_delivermsg_lin = linaddr;
- caller_ptr->p_delivermsg_vir = m_ptr;
+ caller_ptr->p_delivermsg_vir = (vir_bytes) m_ptr;
if(src_e == ANY) src_p = ANY;
else
* process yet or current process isn't ready any more, or
* it's PREEMPTIBLE.
*/
- FIXME("PREEMPTIBLE test?");
vmassert(proc_ptr);
#if 0
if(!proc_ptr || proc_ptr->p_rts_flags)
* struct proc, be sure to change sconst.h to match.
*/
#include <minix/com.h>
+#include <minix/portio.h>
#include "const.h"
#include "priv.h"
#include <sys/sigcontext.h>
#include <minix/endpoint.h>
#include <minix/safecopies.h>
+#include <minix/portio.h>
#include <minix/u64.h>
#include <sys/vm_i386.h>
FORWARD _PROTOTYPE( void initialize, (void));
FORWARD _PROTOTYPE( struct proc *vmrestart_check, (message *));
-u32_t cr3_test, cr3_reload, newpde, overwritepde,
- linlincopies, physzero, invlpgs, npagefaults, vmreqs, straightpdes;
-
/*===========================================================================*
* sys_task *
*===========================================================================*/
minix_panic("receive() failed", r);
}
-#if 1
- {
- struct proc *stp;
- static int prevu;
- int u, dt;
- u = get_uptime();
- dt = u - prevu;
- if(dt >= 5*system_hz) {
-#define PERSEC(n) ((n)*system_hz/dt)
- printf("%6d cr3 tests: %5lu cr3: %5lu straightpdes: %5lu newpde: %5lu overwritepde %5lu linlincopies: %5lu physzero: %5lu invlpgs: %5lu pagefaults: %5lu vmreq: %5lu\n",
- u/system_hz,
- PERSEC(cr3_test), PERSEC(cr3_reload),
- PERSEC(straightpdes), PERSEC(newpde),
- PERSEC(overwritepde),
- PERSEC(linlincopies), PERSEC(physzero),
- PERSEC(invlpgs), PERSEC(npagefaults),
- PERSEC(vmreqs));
- cr3_reload = 0;
- cr3_test = 0;
- newpde = overwritepde = linlincopies =
- physzero = invlpgs = straightpdes = 0;
- npagefaults = 0;
- vmreqs = 0;
- prevu = u;
-#if DEBUG_TRACE
- for (stp = BEG_PROC_ADDR; stp < END_PROC_ADDR; stp++) {
- int ps = PERSEC(stp->p_schedules);
- if(isemptyp(stp))
- continue;
- if(ps > 10) {
- printf("%s %d ", stp->p_name, ps);
- stp->p_schedules = 0;
- }
- }
- printf("\n");
-#endif
- }
- }
-#endif
-
sys_call_code = (unsigned) m.m_type;
call_nr = sys_call_code - KERNEL_CALL;
who_e = m.m_source;
*/
#include "../system.h"
+#include "../vm.h"
#include <signal.h>
#include <minix/endpoint.h>
/* Install new map */
r = newmap(rpc, map_ptr);
+ FIXLINMSG(rpc);
/* Don't schedule process in VM mode until it has a new pagetable. */
if(m_ptr->PR_FORK_FLAGS & PFF_VMINHIBIT) {
printf("type %d\n", p->p_vmrequest.type);
#endif
-#if DEBUG_VMASSERT
- {
- vmassert(target->p_rts_flags);
-
- /* Sanity check. */
- if(p->p_vmrequest.vmresult == OK) {
- int r;
- vmassert(!verifyrange);
- verifyrange = 1;
- r = CHECKRANGE(target,
- p->p_vmrequest.start,
- p->p_vmrequest.length,
- p->p_vmrequest.writeflag);
- vmassert(verifyrange);
- verifyrange = 0;
-
- if(r != OK) {
-
-kprintf("SYSTEM: request by %d: on ep %d: 0x%lx-0x%lx, wrflag %d, stack %s, failed\n",
- p->p_endpoint, target->p_endpoint,
- p->p_vmrequest.start, p->p_vmrequest.start + p->p_vmrequest.length,
- p->p_vmrequest.writeflag,
- p->p_vmrequest.stacktrace);
-
- printf("printing pt of %d (0x%lx)\n",
- vm_print(target->p_endpoint),
- target->p_seg.p_cr3
- );
- vm_print(target->p_seg.p_cr3);
- minix_panic("SYSTEM: fail but VM said OK", NO_NUM);
- }
- }
- }
-#endif
-
vmassert(RTS_ISSET(target, VMREQTARGET));
RTS_LOCK_UNSET(target, VMREQTARGET);
minix_panic("do_vmctl: paging enabling failed", NO_NUM);
vmassert(p->p_delivermsg_lin ==
umap_local(p, D, p->p_delivermsg_vir, sizeof(message)));
- if(newmap(p, m_ptr->SVMCTL_VALUE) != OK)
+ if(newmap(p, (struct mem_map *) m_ptr->SVMCTL_VALUE) != OK)
minix_panic("do_vmctl: newmap failed", NO_NUM);
- p->p_delivermsg_lin =
- umap_local(p, D, p->p_delivermsg_vir, sizeof(message));
+ FIXLINMSG(p);
vmassert(p->p_delivermsg_lin);
return OK;
}
{CLOCK,clock_task,TSK_F, 8, TASK_Q, TSK_S, TSK_T, 0, no_c,"clock" },
{SYSTEM, sys_task,TSK_F, 8, TASK_Q, TSK_S, TSK_T, 0, no_c,"system"},
{HARDWARE, 0,TSK_F, 8, TASK_Q, HRD_S, 0, 0, no_c,"kernel"},
-{PM_PROC_NR, 0,SVM_F, 32, 4, 0, SRV_T, SRV_M, c(pm_c),"pm" },
-{FS_PROC_NR, 0,SVM_F, 32, 5, 0, SRV_T, SRV_M, c(fs_c),"vfs" },
+{PM_PROC_NR, 0,SRV_F, 32, 4, 0, SRV_T, SRV_M, c(pm_c),"pm" },
+{FS_PROC_NR, 0,SRV_F, 32, 5, 0, SRV_T, SRV_M, c(fs_c),"vfs" },
{RS_PROC_NR, 0,SVM_F, 4, 4, 0, SRV_T, SYS_M, c(rs_c),"rs" },
-{DS_PROC_NR, 0,SVM_F, 4, 4, 0, SRV_T, SYS_M, c(ds_c),"ds" },
+{DS_PROC_NR, 0,SRV_F, 4, 4, 0, SRV_T, SYS_M, c(ds_c),"ds" },
{TTY_PROC_NR, 0,SVM_F, 4, 1, 0, SRV_T, SYS_M,c(tty_c),"tty" },
{MEM_PROC_NR, 0,SVM_F, 4, 3, 0, SRV_T, SYS_M,c(mem_c),"memory"},
-{LOG_PROC_NR, 0,SVM_F, 4, 2, 0, SRV_T, SYS_M,c(drv_c),"log" },
+{LOG_PROC_NR, 0,SRV_F, 4, 2, 0, SRV_T, SYS_M,c(drv_c),"log" },
{MFS_PROC_NR, 0,SVM_F, 32, 5, 0, SRV_T, SRV_M, c(fs_c),"mfs" },
{VM_PROC_NR, 0,SRV_F, 32, 2, 0, SRV_T, SRV_M, c(vm_c),"vm" },
{INIT_PROC_NR, 0,USR_F, 8, USER_Q, 0, USR_T, USR_M, c(usr_c),"init" },
#define EFAULT_SRC -995
#define EFAULT_DST -994
+#define FIXLINMSG(prp) { prp->p_delivermsg_lin = umap_local(prp, D, prp->p_delivermsg_vir, sizeof(message)); }
+
+#define PHYS_COPY_CATCH(src, dst, size, a) { \
+ vmassert(intr_disabled()); \
+ catch_pagefaults++; \
+ a = phys_copy(src, dst, size); \
+ catch_pagefaults--; \
+ }
+
#endif
all build: $(SERVER)
$(SERVER): $(OBJ)
$(CC) -o $@ $(LDFLAGS) $(OBJ) $(LIBS)
- install -S `expr $(NR_BUFS) \* $(BS) \* 2.2` $(SERVER)
install: $(SERVER)
-mv $(DEST) $(DEST).prev
rmp->mp_sigstatus = (char) signo;
if (sigismember(&core_sset, signo) && slot != FS_PROC_NR) {
- printf("PM: signal %d for %d / %s\n", signo, rmp->mp_pid, rmp->mp_name);
+ printf("PM: signal %d for pid %d / %s\n",
+ signo, rmp->mp_pid, rmp->mp_name);
s= dump_core(rmp);
if (s == SUSPEND) {
return;
include /etc/make.conf
OBJ = main.o alloc.o utility.o exec.o exit.o fork.o break.o \
- signal.o vfs.o mmap.o slaballoc.o region.o pagefaults.o
+ signal.o vfs.o mmap.o slaballoc.o region.o pagefaults.o addravl.o
ARCHOBJ = $(ARCH)/vm.o $(ARCH)/pagetable.o $(ARCH)/arch_pagefaults.o $(ARCH)/util.o
CPPFLAGS=-I../../kernel/arch/$(ARCH)/include -I$(ARCH)
--- /dev/null
+
+#include "pagerange.h"
+#include "addravl.h"
+#include "cavl_impl.h"
+
--- /dev/null
+
+#define AVL_UNIQUE(id) addr_ ## id
+#define AVL_HANDLE pagerange_t *
+#define AVL_KEY phys_bytes
+#define AVL_MAX_DEPTH 30 /* good for 2 million nodes */
+#define AVL_NULL NULL
+#define AVL_GET_LESS(h, a) (h)->less
+#define AVL_GET_GREATER(h, a) (h)->greater
+#define AVL_SET_LESS(h1, h2) (h1)->less = h2;
+#define AVL_SET_GREATER(h1, h2) (h1)->greater = h2;
+#define AVL_GET_BALANCE_FACTOR(h) (h)->factor
+#define AVL_SET_BALANCE_FACTOR(h, f) (h)->factor = f;
+#define AVL_COMPARE_KEY_KEY(k1, k2) ((k1) > (k2) ? 1 : ((k1) < (k2) ? -1 : 0))
+#define AVL_COMPARE_KEY_NODE(k, h) AVL_COMPARE_KEY_KEY((k), (h)->addr)
+#define AVL_COMPARE_NODE_NODE(h1, h2) AVL_COMPARE_KEY_KEY((h1)->addr, (h2)->addr)
+
+#include "cavl_if.h"
#include "proto.h"
#include "util.h"
#include "glo.h"
+#include "pagerange.h"
+#include "addravl.h"
-/* Initially, no free pages are known. */
-PRIVATE phys_bytes free_pages_head = NO_MEM; /* Physical address in bytes. */
+/* AVL tree of free pages. */
+addr_avl addravl;
/* Used for sanity check. */
PRIVATE phys_bytes mem_low, mem_high;
}
-void availbytes(vir_bytes *bytes, vir_bytes *chunks)
-{
- phys_bytes p, nextp;
- *bytes = 0;
- *chunks = 0;
- for(p = free_pages_head; p != NO_MEM; p = nextp) {
- phys_bytes thissize, ret;
- GET_PARAMS(p, thissize, nextp);
- (*bytes) += thissize;
- (*chunks)++;
- if(nextp != NO_MEM) {
- vm_assert(nextp > p);
- vm_assert(nextp > p + thissize);
- }
- }
-
- return;
-}
-
-
#if SANITYCHECKS
/*===========================================================================*
hole_head = NIL_HOLE;
free_slots = &hole[0];
+ addr_init(&addravl);
+
/* Use the chunks of physical memory to allocate holes. */
for (i=NR_MEMS-1; i>=0; i--) {
if (chunks[i].size > 0) {
*===========================================================================*/
PRIVATE PUBLIC phys_bytes alloc_pages(int pages, int memflags)
{
- phys_bytes bytes, p, nextp, prevp = NO_MEM;
- phys_bytes prevsize = 0;
-
-#if SANITYCHECKS
- vir_bytes avail1, avail2, chunks1, chunks2;
- availbytes(&avail1, &chunks1);
-#endif
-
- vm_assert(pages > 0);
- bytes = CLICK2ABS(pages);
- vm_assert(ABS2CLICK(bytes) == pages);
+ addr_iter iter;
+ pagerange_t *pr;
+ int incr;
+ phys_bytes boundary16 = 16 * 1024 * 1024 / VM_PAGE_SIZE;
+ phys_bytes mem;
+
+ if(memflags & PAF_LOWER16MB) {
+ addr_start_iter_least(&addravl, &iter);
+ incr = 1;
+ } else {
+ addr_start_iter_greatest(&addravl, &iter);
+ incr = 0;
+ }
-#if SANITYCHECKS
-#define ALLOCRETURNCHECK \
- availbytes(&avail2, &chunks2); \
- vm_assert(avail1 - bytes == avail2); \
- vm_assert(chunks1 == chunks2 || chunks1-1 == chunks2);
-#else
-#define ALLOCRETURNCHECK
-#endif
+ while((pr = addr_get_iter(&iter))) {
+ SLABSANE(pr);
+ if(pr->size >= pages) {
+ if(memflags & PAF_LOWER16MB) {
+ if(pr->addr + pages > boundary16)
+ return NO_MEM;
+ }
+ /* good block found! */
+ break;
+ }
+ if(incr)
+ addr_incr_iter(&iter);
+ else
+ addr_decr_iter(&iter);
+ }
- for(p = free_pages_head; p != NO_MEM; p = nextp) {
- phys_bytes thissize, ret;
- GET_PARAMS(p, thissize, nextp);
- if(thissize >= bytes) {
- /* We found a chunk that's big enough. */
-
- ret = p + thissize - bytes;
- thissize -= bytes;
-
- if(thissize == 0) {
- /* Special case: remove this link entirely. */
- if(prevp == NO_MEM)
- free_pages_head = nextp;
- else {
- vm_assert(prevsize > 0);
- SET_PARAMS(prevp, prevsize, nextp);
- }
- } else {
- /* Remove memory from this chunk. */
- SET_PARAMS(p, thissize, nextp);
- }
+ if(!pr)
+ return NO_MEM;
- /* Clear memory if requested. */
- if(memflags & PAF_CLEAR) {
- int s;
- if ((s= sys_memset(0, ret, bytes)) != OK) {
- vm_panic("alloc_pages: sys_memset failed", s);
- }
- }
+ SLABSANE(pr);
- /* Check if returned range is actual good memory. */
- vm_assert_range(ret, bytes);
+ mem = pr->addr;
- ALLOCRETURNCHECK;
+ vm_assert(pr->size >= pages);
+ if(pr->size == pages) {
+ pagerange_t *prr;
+ prr = addr_remove(&addravl, pr->addr);
+ vm_assert(prr);
+ vm_assert(prr == pr);
+ SLABFREE(pr);
+ } else {
+ pr->addr += pages;
+ pr->size -= pages;
+ }
- /* Return it in clicks. */
- return ABS2CLICK(ret);
- }
- prevp = p;
- prevsize = thissize;
+ if(memflags & PAF_CLEAR) {
+ int s;
+ if ((s= sys_memset(0, CLICK_SIZE*mem,
+ VM_PAGE_SIZE*pages)) != OK)
+ vm_panic("alloc_mem: sys_memset failed", s);
}
- return NO_MEM;
+
+ return mem;
}
/*===========================================================================*
* free_pages *
*===========================================================================*/
-PRIVATE PUBLIC void free_pages(phys_bytes pageno, int npages)
+PRIVATE void free_pages(phys_bytes pageno, int npages)
{
- phys_bytes p, origsize,
- size, nextaddr, thissize, prevp = NO_MEM, pageaddr;
+ pagerange_t *pr;
-#if SANITYCHECKS
- vir_bytes avail1, avail2, chunks1, chunks2;
- availbytes(&avail1, &chunks1);
-#endif
-
-#if SANITYCHECKS
-#define FREERETURNCHECK \
- availbytes(&avail2, &chunks2); \
- vm_assert(avail1 + origsize == avail2); \
- vm_assert(chunks1 == chunks2 || chunks1+1 == chunks2 || chunks1-1 == chunks2);
-#else
-#define FREERETURNCHECK
-#endif
-
- /* Basic sanity check. */
+ if(!SLABALLOC(pr))
+ vm_panic("alloc_pages: can't alloc", NO_NUM);
+ SLABSANE(pr);
vm_assert(npages > 0);
- vm_assert(pageno != NO_MEM); /* Page number must be reasonable. */
-
- /* Convert page and pages to bytes. */
- pageaddr = CLICK2ABS(pageno);
- origsize = size = npages * VM_PAGE_SIZE; /* Size in bytes. */
- vm_assert(pageaddr != NO_MEM);
- vm_assert(ABS2CLICK(pageaddr) == pageno);
- vm_assert_range(pageaddr, size);
-
- /* More sanity checks. */
- vm_assert(ABS2CLICK(size) == npages); /* Sanity. */
- vm_assert(pageaddr + size > pageaddr); /* Must not overflow. */
-
- /* Special case: no free pages. */
- if(free_pages_head == NO_MEM) {
- free_pages_head = pageaddr;
- SET_PARAMS(pageaddr, size, NO_MEM);
- FREERETURNCHECK;
- return;
- }
-
- /* Special case: the free block is before the current head. */
- if(pageaddr < free_pages_head) {
- phys_bytes newsize, newnext, headsize, headnext;
- vm_assert(pageaddr + size <= free_pages_head);
- GET_PARAMS(free_pages_head, headsize, headnext);
- newsize = size;
- if(pageaddr + size == free_pages_head) {
- /* Special case: contiguous. */
- newsize += headsize;
- newnext = headnext;
- } else {
- newnext = free_pages_head;
- }
- SET_PARAMS(pageaddr, newsize, newnext);
- free_pages_head = pageaddr;
- FREERETURNCHECK;
- return;
- }
-
- /* Find where to put the block in the free list. */
- for(p = free_pages_head; p < pageaddr; p = nextaddr) {
- GET_PARAMS(p, thissize, nextaddr);
-
- if(nextaddr == NO_MEM) {
- /* Special case: page is at the end of the list. */
- if(p + thissize == pageaddr) {
- /* Special case: contiguous. */
- SET_PARAMS(p, thissize + size, NO_MEM);
- FREERETURNCHECK;
- } else {
- SET_PARAMS(p, thissize, pageaddr);
- SET_PARAMS(pageaddr, size, NO_MEM);
- FREERETURNCHECK;
- }
- return;
- }
-
- prevp = p;
- }
-
- /* Normal case: insert page block between two others.
- * The first block starts at 'prevp' and is 'thissize'.
- * The second block starts at 'p' and is 'nextsize'.
- * The block that has to come in between starts at
- * 'pageaddr' and is size 'size'.
- */
- vm_assert(p != NO_MEM);
- vm_assert(prevp != NO_MEM);
- vm_assert(prevp < p);
- vm_assert(p == nextaddr);
-
-#if SANITYCHECKS
- {
- vir_bytes prevpsize, prevpnext;
- GET_PARAMS(prevp, prevpsize, prevpnext);
- vm_assert(prevpsize == thissize);
- vm_assert(prevpnext == p);
-
- availbytes(&avail2, &chunks2);
- vm_assert(avail1 == avail2);
- }
-#endif
-
- if(prevp + thissize == pageaddr) {
- /* Special case: first block is contiguous with freed one. */
- phys_bytes newsize = thissize + size;
- SET_PARAMS(prevp, newsize, p);
- pageaddr = prevp;
- size = newsize;
- } else {
- SET_PARAMS(prevp, thissize, pageaddr);
- }
-
- /* The block has been inserted (and possibly merged with the
- * first one). Check if it has to be merged with the second one.
- */
-
- if(pageaddr + size == p) {
- phys_bytes nextsize, nextnextaddr;
- /* Special case: freed block is contiguous with next one. */
- GET_PARAMS(p, nextsize, nextnextaddr);
- SET_PARAMS(pageaddr, size+nextsize, nextnextaddr);
- FREERETURNCHECK;
- } else {
- SET_PARAMS(pageaddr, size, p);
- FREERETURNCHECK;
- }
-
- return;
+ pr->addr = pageno;
+ pr->size = npages;
+ addr_insert(&addravl, pr);
}
-
#define NR_DMA 16
PRIVATE struct dmatab
--- /dev/null
+/* Abstract AVL Tree Generic C Package.
+** Interface generation header file.
+**
+** This code is in the public domain. See cavl_tree.html for interface
+** documentation.
+**
+** Version: 1.5 Author: Walt Karas
+*/
+
+/* This header contains the definition of CHAR_BIT (number of bits in a
+** char). */
+#include <limits.h>
+
+#undef L__
+#undef L__EST_LONG_BIT
+#undef L__SIZE
+#undef L__SC
+#undef L__LONG_BIT
+#undef L__BIT_ARR_DEFN
+
+#ifndef AVL_SEARCH_TYPE_DEFINED_
+#define AVL_SEARCH_TYPE_DEFINED_
+
+typedef enum
+ {
+ AVL_EQUAL = 1,
+ AVL_LESS = 2,
+ AVL_GREATER = 4,
+ AVL_LESS_EQUAL = AVL_EQUAL | AVL_LESS,
+ AVL_GREATER_EQUAL = AVL_EQUAL | AVL_GREATER
+ }
+avl_search_type;
+
+#endif
+
+#ifdef AVL_UNIQUE
+
+#define L__ AVL_UNIQUE
+
+#else
+
+#define L__(X) X
+
+#endif
+
+/* Determine storage class for function prototypes. */
+#ifdef AVL_PRIVATE
+
+#define L__SC static
+
+#else
+
+#define L__SC extern
+
+#endif
+
+#ifdef AVL_SIZE
+
+#define L__SIZE AVL_SIZE
+
+#else
+
+#define L__SIZE unsigned long
+
+#endif
+
+typedef struct
+ {
+ #ifdef AVL_INSIDE_STRUCT
+
+ AVL_INSIDE_STRUCT
+
+ #endif
+
+ AVL_HANDLE root;
+ }
+L__(avl);
+
+/* Function prototypes. */
+
+L__SC void L__(init)(L__(avl) *tree);
+
+L__SC int L__(is_empty)(L__(avl) *tree);
+
+L__SC AVL_HANDLE L__(insert)(L__(avl) *tree, AVL_HANDLE h);
+
+L__SC AVL_HANDLE L__(search)(L__(avl) *tree, AVL_KEY k, avl_search_type st);
+
+L__SC AVL_HANDLE L__(search_least)(L__(avl) *tree);
+
+L__SC AVL_HANDLE L__(search_greatest)(L__(avl) *tree);
+
+L__SC AVL_HANDLE L__(remove)(L__(avl) *tree, AVL_KEY k);
+
+L__SC AVL_HANDLE L__(subst)(L__(avl) *tree, AVL_HANDLE new_node);
+
+#ifdef AVL_BUILD_ITER_TYPE
+
+L__SC int L__(build)(
+ L__(avl) *tree, AVL_BUILD_ITER_TYPE p, L__SIZE num_nodes);
+
+#endif
+
+/* ANSI C/ISO C++ require that a long have at least 32 bits. Set
+** L__EST_LONG_BIT to be the greatest multiple of 8 in the range
+** 32 - 64 (inclusive) that is less than or equal to the number of
+** bits in a long.
+*/
+
+#if (((LONG_MAX >> 31) >> 7) == 0)
+
+#define L__EST_LONG_BIT 32
+
+#elif (((LONG_MAX >> 31) >> 15) == 0)
+
+#define L__EST_LONG_BIT 40
+
+#elif (((LONG_MAX >> 31) >> 23) == 0)
+
+#define L__EST_LONG_BIT 48
+
+#elif (((LONG_MAX >> 31) >> 31) == 0)
+
+#define L__EST_LONG_BIT 56
+
+#else
+
+#define L__EST_LONG_BIT 64
+
+#endif
+
+/* Number of bits in a long. */
+#define L__LONG_BIT (sizeof(long) * CHAR_BIT)
+
+/* The macro L__BIT_ARR_DEFN defines a bit array whose index is a (0-based)
+** node depth. The definition depends on whether the maximum depth is more
+** or less than the number of bits in a single long.
+*/
+
+#if ((AVL_MAX_DEPTH) > L__EST_LONG_BIT)
+
+/* Maximum depth may be more than number of bits in a long. */
+
+#define L__BIT_ARR_DEFN(NAME) \
+ unsigned long NAME[((AVL_MAX_DEPTH) + L__LONG_BIT - 1) / L__LONG_BIT];
+
+#else
+
+/* Maximum depth is definitely less than number of bits in a long. */
+
+#define L__BIT_ARR_DEFN(NAME) unsigned long NAME;
+
+#endif
+
+/* Iterator structure. */
+typedef struct
+ {
+ /* Tree being iterated over. */
+ L__(avl) *tree_;
+
+ /* Records a path into the tree. If bit n is true, indicates
+ ** take greater branch from the nth node in the path, otherwise
+ ** take the less branch. bit 0 gives branch from root, and
+ ** so on. */
+ L__BIT_ARR_DEFN(branch)
+
+ /* Zero-based depth of path into tree. */
+ unsigned depth;
+
+ /* Handles of nodes in path from root to current node (returned by *). */
+ AVL_HANDLE path_h[(AVL_MAX_DEPTH) - 1];
+ }
+L__(iter);
+
+/* Iterator function prototypes. */
+
+L__SC void L__(start_iter)(
+ L__(avl) *tree, L__(iter) *iter, AVL_KEY k, avl_search_type st);
+
+L__SC void L__(start_iter_least)(L__(avl) *tree, L__(iter) *iter);
+
+L__SC void L__(start_iter_greatest)(L__(avl) *tree, L__(iter) *iter);
+
+L__SC AVL_HANDLE L__(get_iter)(L__(iter) *iter);
+
+L__SC void L__(incr_iter)(L__(iter) *iter);
+
+L__SC void L__(decr_iter)(L__(iter) *iter);
+
+L__SC void L__(init_iter)(L__(iter) *iter);
+
+#define AVL_IMPL_INIT 1
+#define AVL_IMPL_IS_EMPTY (1 << 1)
+#define AVL_IMPL_INSERT (1 << 2)
+#define AVL_IMPL_SEARCH (1 << 3)
+#define AVL_IMPL_SEARCH_LEAST (1 << 4)
+#define AVL_IMPL_SEARCH_GREATEST (1 << 5)
+#define AVL_IMPL_REMOVE (1 << 6)
+#define AVL_IMPL_BUILD (1 << 7)
+#define AVL_IMPL_START_ITER (1 << 8)
+#define AVL_IMPL_START_ITER_LEAST (1 << 9)
+#define AVL_IMPL_START_ITER_GREATEST (1 << 10)
+#define AVL_IMPL_GET_ITER (1 << 11)
+#define AVL_IMPL_INCR_ITER (1 << 12)
+#define AVL_IMPL_DECR_ITER (1 << 13)
+#define AVL_IMPL_INIT_ITER (1 << 14)
+#define AVL_IMPL_SUBST (1 << 15)
+
+#define AVL_IMPL_ALL (~0)
+
+#undef L__
+#undef L__EST_LONG_BIT
+#undef L__SIZE
+#undef L__SC
+#undef L__LONG_BIT
+#undef L__BIT_ARR_DEFN
--- /dev/null
+/* Abstract AVL Tree Generic C Package.
+** Implementation generation header file.
+**
+** This code is in the public domain. See cavl_tree.html for interface
+** documentation.
+**
+** Version: 1.5 Author: Walt Karas
+*/
+
+#undef L__
+#undef L__EST_LONG_BIT
+#undef L__SIZE
+#undef L__tree
+#undef L__MASK_HIGH_BIT
+#undef L__LONG_BIT
+#undef L__BIT_ARR_DEFN
+#undef L__BIT_ARR_VAL
+#undef L__BIT_ARR_0
+#undef L__BIT_ARR_1
+#undef L__BIT_ARR_ALL
+#undef L__BIT_ARR_LONGS
+#undef L__IMPL_MASK
+#undef L__CHECK_READ_ERROR
+#undef L__CHECK_READ_ERROR_INV_DEPTH
+#undef L__SC
+#undef L__BALANCE_PARAM_PREFIX
+
+#ifdef AVL_UNIQUE
+
+#define L__ AVL_UNIQUE
+
+#else
+
+#define L__(X) X
+
+#endif
+
+/* Determine correct storage class for functions */
+#ifdef AVL_PRIVATE
+
+#define L__SC static
+
+#else
+
+#define L__SC
+
+#endif
+
+#ifdef AVL_SIZE
+
+#define L__SIZE AVL_SIZE
+
+#else
+
+#define L__SIZE unsigned long
+
+#endif
+
+#define L__MASK_HIGH_BIT ((int) ~ ((~ (unsigned) 0) >> 1))
+
+/* ANSI C/ISO C++ require that a long have at least 32 bits. Set
+** L__EST_LONG_BIT to be the greatest multiple of 8 in the range
+** 32 - 64 (inclusive) that is less than or equal to the number of
+** bits in a long.
+*/
+
+#if (((LONG_MAX >> 31) >> 7) == 0)
+
+#define L__EST_LONG_BIT 32
+
+#elif (((LONG_MAX >> 31) >> 15) == 0)
+
+#define L__EST_LONG_BIT 40
+
+#elif (((LONG_MAX >> 31) >> 23) == 0)
+
+#define L__EST_LONG_BIT 48
+
+#elif (((LONG_MAX >> 31) >> 31) == 0)
+
+#define L__EST_LONG_BIT 56
+
+#else
+
+#define L__EST_LONG_BIT 64
+
+#endif
+
+#define L__LONG_BIT (sizeof(long) * CHAR_BIT)
+
+#if ((AVL_MAX_DEPTH) > L__EST_LONG_BIT)
+
+/* The maximum depth may be greater than the number of bits in a long,
+** so multiple longs are needed to hold a bit array indexed by node
+** depth. */
+
+#define L__BIT_ARR_LONGS (((AVL_MAX_DEPTH) + L__LONG_BIT - 1) / L__LONG_BIT)
+
+#define L__BIT_ARR_DEFN(NAME) unsigned long NAME[L__BIT_ARR_LONGS];
+
+#define L__BIT_ARR_VAL(BIT_ARR, BIT_NUM) \
+ ((BIT_ARR)[(BIT_NUM) / L__LONG_BIT] & (1L << ((BIT_NUM) % L__LONG_BIT)))
+
+#define L__BIT_ARR_0(BIT_ARR, BIT_NUM) \
+ (BIT_ARR)[(BIT_NUM) / L__LONG_BIT] &= ~(1L << ((BIT_NUM) % L__LONG_BIT));
+
+#define L__BIT_ARR_1(BIT_ARR, BIT_NUM) \
+ (BIT_ARR)[(BIT_NUM) / L__LONG_BIT] |= 1L << ((BIT_NUM) % L__LONG_BIT);
+
+#define L__BIT_ARR_ALL(BIT_ARR, BIT_VAL) \
+ { int i = L__BIT_ARR_LONGS; do (BIT_ARR)[--i] = 0L - (BIT_VAL); while(i); }
+
+#else /* The bit array can definitely fit in one long */
+
+#define L__BIT_ARR_DEFN(NAME) unsigned long NAME;
+
+#define L__BIT_ARR_VAL(BIT_ARR, BIT_NUM) ((BIT_ARR) & (1L << (BIT_NUM)))
+
+#define L__BIT_ARR_0(BIT_ARR, BIT_NUM) (BIT_ARR) &= ~(1L << (BIT_NUM));
+
+#define L__BIT_ARR_1(BIT_ARR, BIT_NUM) (BIT_ARR) |= 1L << (BIT_NUM);
+
+#define L__BIT_ARR_ALL(BIT_ARR, BIT_VAL) (BIT_ARR) = 0L - (BIT_VAL);
+
+#endif
+
+#ifdef AVL_READ_ERRORS_HAPPEN
+
+#define L__CHECK_READ_ERROR(ERROR_RETURN) \
+{ if (AVL_READ_ERROR) return(ERROR_RETURN); }
+
+#else
+
+#define L__CHECK_READ_ERROR(ERROR_RETURN)
+
+#endif
+
+/* The presumed reason that an instantiation places additional fields
+** inside the AVL tree structure is that the SET_ and GET_ macros
+** need these fields. The "balance" function does not explicitly use
+** any fields in the AVL tree structure, so only pass an AVL tree
+** structure pointer to "balance" if it has instantiation-specific
+** fields that are (presumably) needed by the SET_/GET_ calls within
+** "balance".
+*/
+#ifdef AVL_INSIDE_STRUCT
+
+#define L__BALANCE_PARAM_CALL_PREFIX L__tree,
+#define L__BALANCE_PARAM_DECL_PREFIX L__(avl) *L__tree,
+
+#else
+
+#define L__BALANCE_PARAM_CALL_PREFIX
+#define L__BALANCE_PARAM_DECL_PREFIX
+
+#endif
+
+#ifdef AVL_IMPL_MASK
+
+#define L__IMPL_MASK (AVL_IMPL_MASK)
+
+#else
+
+/* Define all functions. */
+#define L__IMPL_MASK AVL_IMPL_ALL
+
+#endif
+
+#if (L__IMPL_MASK & AVL_IMPL_INIT)
+
+L__SC void L__(init)(L__(avl) *L__tree) { L__tree->root = AVL_NULL; }
+
+#endif
+
+#if (L__IMPL_MASK & AVL_IMPL_IS_EMPTY)
+
+L__SC int L__(is_empty)(L__(avl) *L__tree)
+ { return(L__tree->root == AVL_NULL); }
+
+#endif
+
+/* Put the private balance function in the same compilation module as
+** the insert function. */
+#if (L__IMPL_MASK & AVL_IMPL_INSERT)
+
+/* Balances subtree, returns handle of root node of subtree after balancing.
+*/
+L__SC AVL_HANDLE L__(balance)(L__BALANCE_PARAM_DECL_PREFIX AVL_HANDLE bal_h)
+ {
+ AVL_HANDLE deep_h;
+
+ /* Either the "greater than" or the "less than" subtree of
+ ** this node has to be 2 levels deeper (or else it wouldn't
+ ** need balancing).
+ */
+ if (AVL_GET_BALANCE_FACTOR(bal_h) > 0)
+ {
+ /* "Greater than" subtree is deeper. */
+
+ deep_h = AVL_GET_GREATER(bal_h, 1);
+
+ L__CHECK_READ_ERROR(AVL_NULL)
+
+ if (AVL_GET_BALANCE_FACTOR(deep_h) < 0)
+ {
+ int bf;
+
+ AVL_HANDLE old_h = bal_h;
+ bal_h = AVL_GET_LESS(deep_h, 1);
+ L__CHECK_READ_ERROR(AVL_NULL)
+ AVL_SET_GREATER(old_h, AVL_GET_LESS(bal_h, 1))
+ AVL_SET_LESS(deep_h, AVL_GET_GREATER(bal_h, 1))
+ AVL_SET_LESS(bal_h, old_h)
+ AVL_SET_GREATER(bal_h, deep_h)
+
+ bf = AVL_GET_BALANCE_FACTOR(bal_h);
+ if (bf != 0)
+ {
+ if (bf > 0)
+ {
+ AVL_SET_BALANCE_FACTOR(old_h, -1)
+ AVL_SET_BALANCE_FACTOR(deep_h, 0)
+ }
+ else
+ {
+ AVL_SET_BALANCE_FACTOR(deep_h, 1)
+ AVL_SET_BALANCE_FACTOR(old_h, 0)
+ }
+ AVL_SET_BALANCE_FACTOR(bal_h, 0)
+ }
+ else
+ {
+ AVL_SET_BALANCE_FACTOR(old_h, 0)
+ AVL_SET_BALANCE_FACTOR(deep_h, 0)
+ }
+ }
+ else
+ {
+ AVL_SET_GREATER(bal_h, AVL_GET_LESS(deep_h, 0))
+ AVL_SET_LESS(deep_h, bal_h)
+ if (AVL_GET_BALANCE_FACTOR(deep_h) == 0)
+ {
+ AVL_SET_BALANCE_FACTOR(deep_h, -1)
+ AVL_SET_BALANCE_FACTOR(bal_h, 1)
+ }
+ else
+ {
+ AVL_SET_BALANCE_FACTOR(deep_h, 0)
+ AVL_SET_BALANCE_FACTOR(bal_h, 0)
+ }
+ bal_h = deep_h;
+ }
+ }
+ else
+ {
+ /* "Less than" subtree is deeper. */
+
+ deep_h = AVL_GET_LESS(bal_h, 1);
+ L__CHECK_READ_ERROR(AVL_NULL)
+
+ if (AVL_GET_BALANCE_FACTOR(deep_h) > 0)
+ {
+ int bf;
+ AVL_HANDLE old_h = bal_h;
+ bal_h = AVL_GET_GREATER(deep_h, 1);
+ L__CHECK_READ_ERROR(AVL_NULL)
+ AVL_SET_LESS(old_h, AVL_GET_GREATER(bal_h, 0))
+ AVL_SET_GREATER(deep_h, AVL_GET_LESS(bal_h, 0))
+ AVL_SET_GREATER(bal_h, old_h)
+ AVL_SET_LESS(bal_h, deep_h)
+
+ bf = AVL_GET_BALANCE_FACTOR(bal_h);
+ if (bf != 0)
+ {
+ if (bf < 0)
+ {
+ AVL_SET_BALANCE_FACTOR(old_h, 1)
+ AVL_SET_BALANCE_FACTOR(deep_h, 0)
+ }
+ else
+ {
+ AVL_SET_BALANCE_FACTOR(deep_h, -1)
+ AVL_SET_BALANCE_FACTOR(old_h, 0)
+ }
+ AVL_SET_BALANCE_FACTOR(bal_h, 0)
+ }
+ else
+ {
+ AVL_SET_BALANCE_FACTOR(old_h, 0)
+ AVL_SET_BALANCE_FACTOR(deep_h, 0)
+ }
+ }
+ else
+ {
+ AVL_SET_LESS(bal_h, AVL_GET_GREATER(deep_h, 0))
+ AVL_SET_GREATER(deep_h, bal_h)
+ if (AVL_GET_BALANCE_FACTOR(deep_h) == 0)
+ {
+ AVL_SET_BALANCE_FACTOR(deep_h, 1)
+ AVL_SET_BALANCE_FACTOR(bal_h, -1)
+ }
+ else
+ {
+ AVL_SET_BALANCE_FACTOR(deep_h, 0)
+ AVL_SET_BALANCE_FACTOR(bal_h, 0)
+ }
+ bal_h = deep_h;
+ }
+ }
+
+ return(bal_h);
+ }
+
+L__SC AVL_HANDLE L__(insert)(L__(avl) *L__tree, AVL_HANDLE h)
+ {
+ AVL_SET_LESS(h, AVL_NULL)
+ AVL_SET_GREATER(h, AVL_NULL)
+ AVL_SET_BALANCE_FACTOR(h, 0)
+
+ if (L__tree->root == AVL_NULL)
+ L__tree->root = h;
+ else
+ {
+ /* Last unbalanced node encountered in search for insertion point. */
+ AVL_HANDLE unbal = AVL_NULL;
+ /* Parent of last unbalanced node. */
+ AVL_HANDLE parent_unbal = AVL_NULL;
+ /* Balance factor of last unbalanced node. */
+ int unbal_bf;
+
+ /* Zero-based depth in tree. */
+ unsigned depth = 0, unbal_depth = 0;
+
+ /* Records a path into the tree. If bit n is true, indicates
+ ** take greater branch from the nth node in the path, otherwise
+ ** take the less branch. bit 0 gives branch from root, and
+ ** so on. */
+ L__BIT_ARR_DEFN(branch)
+
+ AVL_HANDLE hh = L__tree->root;
+ AVL_HANDLE parent = AVL_NULL;
+ int cmp;
+
+ do
+ {
+ if (AVL_GET_BALANCE_FACTOR(hh) != 0)
+ {
+ unbal = hh;
+ parent_unbal = parent;
+ unbal_depth = depth;
+ }
+ cmp = AVL_COMPARE_NODE_NODE(h, hh);
+ if (cmp == 0)
+ /* Duplicate key. */
+ return(hh);
+ parent = hh;
+ if (cmp > 0)
+ {
+ hh = AVL_GET_GREATER(hh, 1);
+ L__BIT_ARR_1(branch, depth)
+ }
+ else
+ {
+ hh = AVL_GET_LESS(hh, 1);
+ L__BIT_ARR_0(branch, depth)
+ }
+ L__CHECK_READ_ERROR(AVL_NULL)
+ depth++;
+ }
+ while (hh != AVL_NULL);
+
+ /* Add node to insert as leaf of tree. */
+ if (cmp < 0)
+ AVL_SET_LESS(parent, h)
+ else
+ AVL_SET_GREATER(parent, h)
+
+ depth = unbal_depth;
+
+ if (unbal == AVL_NULL)
+ hh = L__tree->root;
+ else
+ {
+ cmp = L__BIT_ARR_VAL(branch, depth) ? 1 : -1;
+ depth++;
+ unbal_bf = AVL_GET_BALANCE_FACTOR(unbal);
+ if (cmp < 0)
+ unbal_bf--;
+ else /* cmp > 0 */
+ unbal_bf++;
+ hh = cmp < 0 ? AVL_GET_LESS(unbal, 1) : AVL_GET_GREATER(unbal, 1);
+ L__CHECK_READ_ERROR(AVL_NULL)
+ if ((unbal_bf != -2) && (unbal_bf != 2))
+ {
+ /* No rebalancing of tree is necessary. */
+ AVL_SET_BALANCE_FACTOR(unbal, unbal_bf)
+ unbal = AVL_NULL;
+ }
+ }
+
+ if (hh != AVL_NULL)
+ while (h != hh)
+ {
+ cmp = L__BIT_ARR_VAL(branch, depth) ? 1 : -1;
+ depth++;
+ if (cmp < 0)
+ {
+ AVL_SET_BALANCE_FACTOR(hh, -1)
+ hh = AVL_GET_LESS(hh, 1);
+ }
+ else /* cmp > 0 */
+ {
+ AVL_SET_BALANCE_FACTOR(hh, 1)
+ hh = AVL_GET_GREATER(hh, 1);
+ }
+ L__CHECK_READ_ERROR(AVL_NULL)
+ }
+
+ if (unbal != AVL_NULL)
+ {
+ unbal = L__(balance)(L__BALANCE_PARAM_CALL_PREFIX unbal);
+ L__CHECK_READ_ERROR(AVL_NULL)
+ if (parent_unbal == AVL_NULL)
+ L__tree->root = unbal;
+ else
+ {
+ depth = unbal_depth - 1;
+ cmp = L__BIT_ARR_VAL(branch, depth) ? 1 : -1;
+ if (cmp < 0)
+ AVL_SET_LESS(parent_unbal, unbal)
+ else /* cmp > 0 */
+ AVL_SET_GREATER(parent_unbal, unbal)
+ }
+ }
+
+ }
+
+ return(h);
+ }
+
+#endif
+
+#if (L__IMPL_MASK & AVL_IMPL_SEARCH)
+
+L__SC AVL_HANDLE L__(search)(L__(avl) *L__tree, AVL_KEY k, avl_search_type st)
+ {
+ int cmp, target_cmp;
+ AVL_HANDLE match_h = AVL_NULL;
+ AVL_HANDLE h = L__tree->root;
+
+ if (st & AVL_LESS)
+ target_cmp = 1;
+ else if (st & AVL_GREATER)
+ target_cmp = -1;
+ else
+ target_cmp = 0;
+
+ while (h != AVL_NULL)
+ {
+ cmp = AVL_COMPARE_KEY_NODE(k, h);
+ if (cmp == 0)
+ {
+ if (st & AVL_EQUAL)
+ {
+ match_h = h;
+ break;
+ }
+ cmp = -target_cmp;
+ }
+ else if (target_cmp != 0)
+ if (!((cmp ^ target_cmp) & L__MASK_HIGH_BIT))
+ /* cmp and target_cmp are both positive or both negative. */
+ match_h = h;
+ h = cmp < 0 ? AVL_GET_LESS(h, 1) : AVL_GET_GREATER(h, 1);
+ L__CHECK_READ_ERROR(AVL_NULL)
+ }
+
+ return(match_h);
+ }
+
+#endif
+
+#if (L__IMPL_MASK & AVL_IMPL_SEARCH_LEAST)
+
+L__SC AVL_HANDLE L__(search_least)(L__(avl) *L__tree)
+ {
+ AVL_HANDLE h = L__tree->root;
+ AVL_HANDLE parent = AVL_NULL;
+
+ while (h != AVL_NULL)
+ {
+ parent = h;
+ h = AVL_GET_LESS(h, 1);
+ L__CHECK_READ_ERROR(AVL_NULL)
+ }
+
+ return(parent);
+ }
+
+#endif
+
+#if (L__IMPL_MASK & AVL_IMPL_SEARCH_GREATEST)
+
+L__SC AVL_HANDLE L__(search_greatest)(L__(avl) *L__tree)
+ {
+ AVL_HANDLE h = L__tree->root;
+ AVL_HANDLE parent = AVL_NULL;
+
+ while (h != AVL_NULL)
+ {
+ parent = h;
+ h = AVL_GET_GREATER(h, 1);
+ L__CHECK_READ_ERROR(AVL_NULL)
+ }
+
+ return(parent);
+ }
+
+#endif
+
+#if (L__IMPL_MASK & AVL_IMPL_REMOVE)
+
+/* Prototype of balance function (called by remove) in case not in
+** same compilation unit.
+*/
+L__SC AVL_HANDLE L__(balance)(L__BALANCE_PARAM_DECL_PREFIX AVL_HANDLE bal_h);
+
+L__SC AVL_HANDLE L__(remove)(L__(avl) *L__tree, AVL_KEY k)
+ {
+ /* Zero-based depth in tree. */
+ unsigned depth = 0, rm_depth;
+
+ /* Records a path into the tree. If bit n is true, indicates
+ ** take greater branch from the nth node in the path, otherwise
+ ** take the less branch. bit 0 gives branch from root, and
+ ** so on. */
+ L__BIT_ARR_DEFN(branch)
+
+ AVL_HANDLE h = L__tree->root;
+ AVL_HANDLE parent = AVL_NULL;
+ AVL_HANDLE child;
+ AVL_HANDLE path;
+ int cmp, cmp_shortened_sub_with_path;
+ int reduced_depth;
+ int bf;
+ AVL_HANDLE rm;
+ AVL_HANDLE parent_rm;
+
+ for ( ; ; )
+ {
+ if (h == AVL_NULL)
+ /* No node in tree with given key. */
+ return(AVL_NULL);
+ cmp = AVL_COMPARE_KEY_NODE(k, h);
+ if (cmp == 0)
+ /* Found node to remove. */
+ break;
+ parent = h;
+ if (cmp > 0)
+ {
+ h = AVL_GET_GREATER(h, 1);
+ L__BIT_ARR_1(branch, depth)
+ }
+ else
+ {
+ h = AVL_GET_LESS(h, 1);
+ L__BIT_ARR_0(branch, depth)
+ }
+ L__CHECK_READ_ERROR(AVL_NULL)
+ depth++;
+ cmp_shortened_sub_with_path = cmp;
+ }
+ rm = h;
+ parent_rm = parent;
+ rm_depth = depth;
+
+ /* If the node to remove is not a leaf node, we need to get a
+ ** leaf node, or a node with a single leaf as its child, to put
+ ** in the place of the node to remove. We will get the greatest
+ ** node in the less subtree (of the node to remove), or the least
+ ** node in the greater subtree. We take the leaf node from the
+ ** deeper subtree, if there is one. */
+
+ if (AVL_GET_BALANCE_FACTOR(h) < 0)
+ {
+ child = AVL_GET_LESS(h, 1);
+ L__BIT_ARR_0(branch, depth)
+ cmp = -1;
+ }
+ else
+ {
+ child = AVL_GET_GREATER(h, 1);
+ L__BIT_ARR_1(branch, depth)
+ cmp = 1;
+ }
+ L__CHECK_READ_ERROR(AVL_NULL)
+ depth++;
+
+ if (child != AVL_NULL)
+ {
+ cmp = -cmp;
+ do
+ {
+ parent = h;
+ h = child;
+ if (cmp < 0)
+ {
+ child = AVL_GET_LESS(h, 1);
+ L__BIT_ARR_0(branch, depth)
+ }
+ else
+ {
+ child = AVL_GET_GREATER(h, 1);
+ L__BIT_ARR_1(branch, depth)
+ }
+ L__CHECK_READ_ERROR(AVL_NULL)
+ depth++;
+ }
+ while (child != AVL_NULL);
+
+ if (parent == rm)
+ /* Only went through do loop once. Deleted node will be replaced
+ ** in the tree structure by one of its immediate children. */
+ cmp_shortened_sub_with_path = -cmp;
+ else
+ cmp_shortened_sub_with_path = cmp;
+
+ /* Get the handle of the opposite child, which may not be null. */
+ child = cmp > 0 ? AVL_GET_LESS(h, 0) : AVL_GET_GREATER(h, 0);
+ }
+
+ if (parent == AVL_NULL)
+ /* There were only 1 or 2 nodes in this tree. */
+ L__tree->root = child;
+ else if (cmp_shortened_sub_with_path < 0)
+ AVL_SET_LESS(parent, child)
+ else
+ AVL_SET_GREATER(parent, child)
+
+ /* "path" is the parent of the subtree being eliminated or reduced
+ ** from a depth of 2 to 1. If "path" is the node to be removed, we
+ ** set path to the node we're about to poke into the position of the
+ ** node to be removed. */
+ path = parent == rm ? h : parent;
+
+ if (h != rm)
+ {
+ /* Poke in the replacement for the node to be removed. */
+ AVL_SET_LESS(h, AVL_GET_LESS(rm, 0))
+ AVL_SET_GREATER(h, AVL_GET_GREATER(rm, 0))
+ AVL_SET_BALANCE_FACTOR(h, AVL_GET_BALANCE_FACTOR(rm))
+ if (parent_rm == AVL_NULL)
+ L__tree->root = h;
+ else
+ {
+ depth = rm_depth - 1;
+ if (L__BIT_ARR_VAL(branch, depth))
+ AVL_SET_GREATER(parent_rm, h)
+ else
+ AVL_SET_LESS(parent_rm, h)
+ }
+ }
+
+ if (path != AVL_NULL)
+ {
+ /* Create a temporary linked list from the parent of the path node
+ ** to the root node. */
+ h = L__tree->root;
+ parent = AVL_NULL;
+ depth = 0;
+ while (h != path)
+ {
+ if (L__BIT_ARR_VAL(branch, depth))
+ {
+ child = AVL_GET_GREATER(h, 1);
+ AVL_SET_GREATER(h, parent)
+ }
+ else
+ {
+ child = AVL_GET_LESS(h, 1);
+ AVL_SET_LESS(h, parent)
+ }
+ L__CHECK_READ_ERROR(AVL_NULL)
+ depth++;
+ parent = h;
+ h = child;
+ }
+
+ /* Climb from the path node to the root node using the linked
+ ** list, restoring the tree structure and rebalancing as necessary.
+ */
+ reduced_depth = 1;
+ cmp = cmp_shortened_sub_with_path;
+ for ( ; ; )
+ {
+ if (reduced_depth)
+ {
+ bf = AVL_GET_BALANCE_FACTOR(h);
+ if (cmp < 0)
+ bf++;
+ else /* cmp > 0 */
+ bf--;
+ if ((bf == -2) || (bf == 2))
+ {
+ h = L__(balance)(L__BALANCE_PARAM_CALL_PREFIX h);
+ L__CHECK_READ_ERROR(AVL_NULL)
+ bf = AVL_GET_BALANCE_FACTOR(h);
+ }
+ else
+ AVL_SET_BALANCE_FACTOR(h, bf)
+ reduced_depth = (bf == 0);
+ }
+ if (parent == AVL_NULL)
+ break;
+ child = h;
+ h = parent;
+ depth--;
+ cmp = L__BIT_ARR_VAL(branch, depth) ? 1 : -1;
+ if (cmp < 0)
+ {
+ parent = AVL_GET_LESS(h, 1);
+ AVL_SET_LESS(h, child)
+ }
+ else
+ {
+ parent = AVL_GET_GREATER(h, 1);
+ AVL_SET_GREATER(h, child)
+ }
+ L__CHECK_READ_ERROR(AVL_NULL)
+ }
+ L__tree->root = h;
+ }
+
+ return(rm);
+ }
+
+#endif
+
+#if (L__IMPL_MASK & AVL_IMPL_SUBST)
+
+L__SC AVL_HANDLE L__(subst)(L__(avl) *L__tree, AVL_HANDLE new_node)
+ {
+ AVL_HANDLE h = L__tree->root;
+ AVL_HANDLE parent = AVL_NULL;
+ int cmp, last_cmp;
+
+ /* Search for node already in tree with same key. */
+ for ( ; ; )
+ {
+ if (h == AVL_NULL)
+ /* No node in tree with same key as new node. */
+ return(AVL_NULL);
+ cmp = AVL_COMPARE_NODE_NODE(new_node, h);
+ if (cmp == 0)
+ /* Found the node to substitute new one for. */
+ break;
+ last_cmp = cmp;
+ parent = h;
+ h = cmp < 0 ? AVL_GET_LESS(h, 1) : AVL_GET_GREATER(h, 1);
+ L__CHECK_READ_ERROR(AVL_NULL)
+ }
+
+ /* Copy tree housekeeping fields from node in tree to new node. */
+ AVL_SET_LESS(new_node, AVL_GET_LESS(h, 0))
+ AVL_SET_GREATER(new_node, AVL_GET_GREATER(h, 0))
+ AVL_SET_BALANCE_FACTOR(new_node, AVL_GET_BALANCE_FACTOR(h))
+
+ if (parent == AVL_NULL)
+ /* New node is also new root. */
+ L__tree->root = new_node;
+ else
+ {
+ /* Make parent point to new node. */
+ if (last_cmp < 0)
+ AVL_SET_LESS(parent, new_node)
+ else
+ AVL_SET_GREATER(parent, new_node)
+ }
+
+ return(h);
+ }
+
+#endif
+
+#ifdef AVL_BUILD_ITER_TYPE
+
+#if (L__IMPL_MASK & AVL_IMPL_BUILD)
+
+L__SC int L__(build)(
+ L__(avl) *L__tree, AVL_BUILD_ITER_TYPE p, L__SIZE num_nodes)
+ {
+ /* Gives path to subtree being built. If bit n is false, branch
+ ** less from the node at depth n, if true branch greater. */
+ L__BIT_ARR_DEFN(branch)
+
+ /* If bit n is true, then for the current subtree at depth n, its
+ ** greater subtree has one more node than its less subtree. */
+ L__BIT_ARR_DEFN(rem)
+
+ /* Depth of root node of current subtree. */
+ unsigned depth = 0;
+
+ /* Number of nodes in current subtree. */
+ L__SIZE num_sub = num_nodes;
+
+ /* The algorithm relies on a stack of nodes whose less subtree has
+ ** been built, but whose greater subtree has not yet been built.
+ ** The stack is implemented as linked list. The nodes are linked
+ ** together by having the "greater" handle of a node set to the
+ ** next node in the list. "less_parent" is the handle of the first
+ ** node in the list. */
+ AVL_HANDLE less_parent = AVL_NULL;
+
+ /* h is root of current subtree, child is one of its children. */
+ AVL_HANDLE h;
+ AVL_HANDLE child;
+
+ if (num_nodes == 0)
+ {
+ L__tree->root = AVL_NULL;
+ return(1);
+ }
+
+ for ( ; ; )
+ {
+ while (num_sub > 2)
+ {
+ /* Subtract one for root of subtree. */
+ num_sub--;
+ if (num_sub & 1)
+ L__BIT_ARR_1(rem, depth)
+ else
+ L__BIT_ARR_0(rem, depth)
+ L__BIT_ARR_0(branch, depth)
+ depth++;
+ num_sub >>= 1;
+ }
+
+ if (num_sub == 2)
+ {
+ /* Build a subtree with two nodes, slanting to greater.
+ ** I arbitrarily chose to always have the extra node in the
+ ** greater subtree when there is an odd number of nodes to
+ ** split between the two subtrees. */
+
+ h = AVL_BUILD_ITER_VAL(p);
+ L__CHECK_READ_ERROR(0)
+ AVL_BUILD_ITER_INCR(p)
+ child = AVL_BUILD_ITER_VAL(p);
+ L__CHECK_READ_ERROR(0)
+ AVL_BUILD_ITER_INCR(p)
+ AVL_SET_LESS(child, AVL_NULL)
+ AVL_SET_GREATER(child, AVL_NULL)
+ AVL_SET_BALANCE_FACTOR(child, 0)
+ AVL_SET_GREATER(h, child)
+ AVL_SET_LESS(h, AVL_NULL)
+ AVL_SET_BALANCE_FACTOR(h, 1)
+ }
+ else /* num_sub == 1 */
+ {
+ /* Build a subtree with one node. */
+
+ h = AVL_BUILD_ITER_VAL(p);
+ L__CHECK_READ_ERROR(0)
+ AVL_BUILD_ITER_INCR(p)
+ AVL_SET_LESS(h, AVL_NULL)
+ AVL_SET_GREATER(h, AVL_NULL)
+ AVL_SET_BALANCE_FACTOR(h, 0)
+ }
+
+ while (depth)
+ {
+ depth--;
+ if (!L__BIT_ARR_VAL(branch, depth))
+ /* We've completed a less subtree. */
+ break;
+
+ /* We've completed a greater subtree, so attach it to
+ ** its parent (that is less than it). We pop the parent
+ ** off the stack of less parents. */
+ child = h;
+ h = less_parent;
+ less_parent = AVL_GET_GREATER(h, 1);
+ L__CHECK_READ_ERROR(0)
+ AVL_SET_GREATER(h, child)
+ /* num_sub = 2 * (num_sub - rem[depth]) + rem[depth] + 1 */
+ num_sub <<= 1;
+ num_sub += L__BIT_ARR_VAL(rem, depth) ? 0 : 1;
+ if (num_sub & (num_sub - 1))
+ /* num_sub is not a power of 2. */
+ AVL_SET_BALANCE_FACTOR(h, 0)
+ else
+ /* num_sub is a power of 2. */
+ AVL_SET_BALANCE_FACTOR(h, 1)
+ }
+
+ if (num_sub == num_nodes)
+ /* We've completed the full tree. */
+ break;
+
+ /* The subtree we've completed is the less subtree of the
+ ** next node in the sequence. */
+
+ child = h;
+ h = AVL_BUILD_ITER_VAL(p);
+ L__CHECK_READ_ERROR(0)
+ AVL_BUILD_ITER_INCR(p)
+ AVL_SET_LESS(h, child)
+
+ /* Put h into stack of less parents. */
+ AVL_SET_GREATER(h, less_parent)
+ less_parent = h;
+
+ /* Proceed to creating greater than subtree of h. */
+ L__BIT_ARR_1(branch, depth)
+ num_sub += L__BIT_ARR_VAL(rem, depth) ? 1 : 0;
+ depth++;
+
+ } /* end for ( ; ; ) */
+
+ L__tree->root = h;
+
+ return(1);
+ }
+
+#endif
+
+#endif
+
+#if (L__IMPL_MASK & AVL_IMPL_INIT_ITER)
+
+/* Initialize depth to invalid value, to indicate iterator is
+** invalid. (Depth is zero-base.) It's not necessary to initialize
+** iterators prior to passing them to the "start" function.
+*/
+L__SC void L__(init_iter)(L__(iter) *iter) { iter->depth = ~0; }
+
+#endif
+
+#ifdef AVL_READ_ERRORS_HAPPEN
+
+#define L__CHECK_READ_ERROR_INV_DEPTH \
+{ if (AVL_READ_ERROR) { iter->depth = ~0; return; } }
+
+#else
+
+#define L__CHECK_READ_ERROR_INV_DEPTH
+
+#endif
+
+#if (L__IMPL_MASK & AVL_IMPL_START_ITER)
+
+L__SC void L__(start_iter)(
+ L__(avl) *L__tree, L__(iter) *iter, AVL_KEY k, avl_search_type st)
+ {
+ AVL_HANDLE h = L__tree->root;
+ unsigned d = 0;
+ int cmp, target_cmp;
+
+ /* Save the tree that we're going to iterate through in a
+ ** member variable. */
+ iter->tree_ = L__tree;
+
+ iter->depth = ~0;
+
+ if (h == AVL_NULL)
+ /* Tree is empty. */
+ return;
+
+ if (st & AVL_LESS)
+ /* Key can be greater than key of starting node. */
+ target_cmp = 1;
+ else if (st & AVL_GREATER)
+ /* Key can be less than key of starting node. */
+ target_cmp = -1;
+ else
+ /* Key must be same as key of starting node. */
+ target_cmp = 0;
+
+ for ( ; ; )
+ {
+ cmp = AVL_COMPARE_KEY_NODE(k, h);
+ if (cmp == 0)
+ {
+ if (st & AVL_EQUAL)
+ {
+ /* Equal node was sought and found as starting node. */
+ iter->depth = d;
+ break;
+ }
+ cmp = -target_cmp;
+ }
+ else if (target_cmp != 0)
+ if (!((cmp ^ target_cmp) & L__MASK_HIGH_BIT))
+ /* cmp and target_cmp are both negative or both positive. */
+ iter->depth = d;
+ h = cmp < 0 ? AVL_GET_LESS(h, 1) : AVL_GET_GREATER(h, 1);
+ L__CHECK_READ_ERROR_INV_DEPTH
+ if (h == AVL_NULL)
+ break;
+ if (cmp > 0)
+ L__BIT_ARR_1(iter->branch, d)
+ else
+ L__BIT_ARR_0(iter->branch, d)
+ iter->path_h[d++] = h;
+ }
+ }
+
+#endif
+
+#if (L__IMPL_MASK & AVL_IMPL_START_ITER_LEAST)
+
+L__SC void L__(start_iter_least)(L__(avl) *L__tree, L__(iter) *iter)
+ {
+ AVL_HANDLE h = L__tree->root;
+
+ iter->tree_ = L__tree;
+
+ iter->depth = ~0;
+
+ L__BIT_ARR_ALL(iter->branch, 0)
+
+ while (h != AVL_NULL)
+ {
+ if (iter->depth != ~0)
+ iter->path_h[iter->depth] = h;
+ iter->depth++;
+ h = AVL_GET_LESS(h, 1);
+ L__CHECK_READ_ERROR_INV_DEPTH
+ }
+ }
+
+#endif
+
+#if (L__IMPL_MASK & AVL_IMPL_START_ITER_GREATEST)
+
+L__SC void L__(start_iter_greatest)(L__(avl) *L__tree, L__(iter) *iter)
+ {
+ AVL_HANDLE h = L__tree->root;
+
+ iter->tree_ = L__tree;
+
+ iter->depth = ~0;
+
+ L__BIT_ARR_ALL(iter->branch, 1)
+
+ while (h != AVL_NULL)
+ {
+ if (iter->depth != ~0)
+ iter->path_h[iter->depth] = h;
+ iter->depth++;
+ h = AVL_GET_GREATER(h, 1);
+ L__CHECK_READ_ERROR_INV_DEPTH
+ }
+ }
+
+#endif
+
+#if (L__IMPL_MASK & AVL_IMPL_GET_ITER)
+
+L__SC AVL_HANDLE L__(get_iter)(L__(iter) *iter)
+ {
+ if (iter->depth == ~0)
+ return(AVL_NULL);
+
+ return(iter->depth == 0 ?
+ iter->tree_->root : iter->path_h[iter->depth - 1]);
+ }
+
+#endif
+
+#if (L__IMPL_MASK & AVL_IMPL_INCR_ITER)
+
+L__SC void L__(incr_iter)(L__(iter) *iter)
+ {
+ #define L__tree (iter->tree_)
+
+ if (iter->depth != ~0)
+ {
+ AVL_HANDLE h =
+ AVL_GET_GREATER((iter->depth == 0 ?
+ iter->tree_->root : iter->path_h[iter->depth - 1]), 1);
+ L__CHECK_READ_ERROR_INV_DEPTH
+
+ if (h == AVL_NULL)
+ do
+ {
+ if (iter->depth == 0)
+ {
+ iter->depth = ~0;
+ break;
+ }
+ iter->depth--;
+ }
+ while (L__BIT_ARR_VAL(iter->branch, iter->depth));
+ else
+ {
+ L__BIT_ARR_1(iter->branch, iter->depth)
+ iter->path_h[iter->depth++] = h;
+ for ( ; ; )
+ {
+ h = AVL_GET_LESS(h, 1);
+ L__CHECK_READ_ERROR_INV_DEPTH
+ if (h == AVL_NULL)
+ break;
+ L__BIT_ARR_0(iter->branch, iter->depth)
+ iter->path_h[iter->depth++] = h;
+ }
+ }
+ }
+
+ #undef L__tree
+ }
+
+#endif
+
+#if (L__IMPL_MASK & AVL_IMPL_DECR_ITER)
+
+L__SC void L__(decr_iter)(L__(iter) *iter)
+ {
+ #define L__tree (iter->tree_)
+
+ if (iter->depth != ~0)
+ {
+ AVL_HANDLE h =
+ AVL_GET_LESS((iter->depth == 0 ?
+ iter->tree_->root : iter->path_h[iter->depth - 1]), 1);
+ L__CHECK_READ_ERROR_INV_DEPTH
+
+ if (h == AVL_NULL)
+ do
+ {
+ if (iter->depth == 0)
+ {
+ iter->depth = ~0;
+ break;
+ }
+ iter->depth--;
+ }
+ while (!L__BIT_ARR_VAL(iter->branch, iter->depth));
+ else
+ {
+ L__BIT_ARR_0(iter->branch, iter->depth)
+ iter->path_h[iter->depth++] = h;
+ for ( ; ; )
+ {
+ h = AVL_GET_GREATER(h, 1);
+ L__CHECK_READ_ERROR_INV_DEPTH
+ if (h == AVL_NULL)
+ break;
+ L__BIT_ARR_1(iter->branch, iter->depth)
+ iter->path_h[iter->depth++] = h;
+ }
+ }
+ }
+
+ #undef L__tree
+ }
+
+#endif
+
+/* Tidy up the preprocessor symbol name space. */
+#undef L__
+#undef L__EST_LONG_BIT
+#undef L__SIZE
+#undef L__MASK_HIGH_BIT
+#undef L__LONG_BIT
+#undef L__BIT_ARR_DEFN
+#undef L__BIT_ARR_VAL
+#undef L__BIT_ARR_0
+#undef L__BIT_ARR_1
+#undef L__BIT_ARR_ALL
+#undef L__CHECK_READ_ERROR
+#undef L__CHECK_READ_ERROR_INV_DEPTH
+#undef L__BIT_ARR_LONGS
+#undef L__IMPL_MASK
+#undef L__CHECK_READ_ERROR
+#undef L__CHECK_READ_ERROR_INV_DEPTH
+#undef L__SC
+#undef L__BALANCE_PARAM_CALL_PREFIX
+#undef L__BALANCE_PARAM_DECL_PREFIX
/* vm operation mode state and values */
EXTERN long vm_paged;
+
+EXTERN int meminit_done;
#include "memory.h"
-/* Location in our virtual address space where we can map in
- * any physical page we want.
-*/
-PRIVATE unsigned char *varmap = NULL; /* Our address space. */
-PRIVATE u32_t varmap_loc; /* Our page table. */
-
/* PDE used to map in kernel, kernel physical address. */
PRIVATE int kernel_pde = -1, pagedir_pde = -1;
PRIVATE u32_t kern_pde_val = 0, global_bit = 0, pagedir_pde_val;
vm_assert(level >= 1);
vm_assert(level <= 2);
- if(level > 1 || !(vmp->vm_flags & VMF_HASPT)) {
+ if(level > 1 || !(vmp->vm_flags & VMF_HASPT) || !meminit_done) {
int r;
void *s;
vm_assert(pages == 1);
for(pde = I386_VM_PDE(v); pde <= finalpde; pde++) {
vm_assert(pde >= 0 && pde < I386_VM_DIR_ENTRIES);
if(pt->pt_dir[pde] & I386_VM_BIGPAGE) {
+ printf("pt_writemap: trying to write 0x%lx into 0x%lx\n",
+ physaddr, v);
vm_panic("pt_writemap: BIGPAGE found", NO_NUM);
}
if(!(pt->pt_dir[pde] & I386_VM_PRESENT)) {
*/
if(pt_new(newpt) != OK)
vm_panic("pt_init: pt_new failed", NO_NUM);
+
+ /* Old position mapped in? */
+ pt_check(vmp);
/* Set up mappings for VM process. */
for(v = lo; v < hi; v += I386_PAGE_SIZE) {
phys_bytes addr;
u32_t flags;
- /* We have to write the old and new position in the PT,
+ /* We have to write the new position in the PT,
* so we can move our segments.
*/
if(pt_writemap(newpt, v+moveup, v, I386_PAGE_SIZE,
I386_VM_PRESENT|I386_VM_WRITE|I386_VM_USER, 0) != OK)
vm_panic("pt_init: pt_writemap failed", NO_NUM);
- if(pt_writemap(newpt, v, v, I386_PAGE_SIZE,
- I386_VM_PRESENT|I386_VM_WRITE|I386_VM_USER, 0) != OK)
- vm_panic("pt_init: pt_writemap failed", NO_NUM);
}
/* Move segments up too. */
/* Let other functions know VM now has a private page table. */
vmp->vm_flags |= VMF_HASPT;
- /* Reserve a page in our virtual address space that we
- * can use to map in arbitrary physical pages.
- */
- varmap_loc = findhole(newpt, I386_PAGE_SIZE,
- arch_vir2map(vmp, vmp->vm_stacktop),
- vmp->vm_arch.vm_data_top);
- if(varmap_loc == NO_MEM) {
- vm_panic("no virt addr for vm mappings", NO_NUM);
- }
- varmap = (unsigned char *) arch_map2vir(vmp, varmap_loc);
-
/* Find a PDE below processes available for mapping in the
* page directories (readonly).
*/
/* Back to reality - this is where the stack actually is. */
vmp->vm_arch.vm_seg[S].mem_len -= extra_clicks;
- /* Wipe old mappings from VM. */
- for(v = lo; v < hi; v += I386_PAGE_SIZE) {
- if(pt_writemap(newpt, v, MAP_NONE, I386_PAGE_SIZE,
- 0, WMF_OVERWRITE) != OK)
- vm_panic("pt_init: pt_writemap failed", NO_NUM);
- }
-
/* All OK. */
return;
}
*===========================================================================*/
PUBLIC int pt_bind(pt_t *pt, struct vmproc *who)
{
- int slot;
+ int slot, ispt;
u32_t phys;
/* Basic sanity checks. */
#if 0
printf("VM: slot %d has pde val 0x%lx\n", slot, page_directories[slot]);
#endif
-
/* Tell kernel about new page table root. */
return sys_vmctl(who->vm_endpoint, VMCTL_I386_SETCR3,
pt ? pt->pt_dir_phys : 0);
}
/*===========================================================================*
- * pt_cycle *
+ * pt_check *
*===========================================================================*/
-PUBLIC void pt_cycle(void)
+PUBLIC void pt_check(struct vmproc *vmp)
{
- vm_checkspares();
-}
-
-/* In sanity check mode, pages are mapped and unmapped explicitly, so
- * unexpected double mappings (overwriting a page table entry) are caught.
- * If not sanity checking, simply keep the page mapped in and overwrite
- * the mapping entry; we need WMF_OVERWRITE for that in PHYS_MAP though.
- */
-#if SANITYCHECKS
-#define MAPFLAGS 0
-#else
-#define MAPFLAGS WMF_OVERWRITE
-#endif
-
-static u32_t ismapped = MAP_NONE;
-
-#define PHYS_MAP(a, o) \
-{ int r; \
- u32_t wantmapped; \
- vm_assert(varmap); \
- (o) = (a) % I386_PAGE_SIZE; \
- wantmapped = (a) - (o); \
- if(wantmapped != ismapped || ismapped == MAP_NONE) { \
- r = pt_writemap(&vmp->vm_pt, (vir_bytes) varmap_loc, \
- wantmapped, I386_PAGE_SIZE, \
- I386_VM_PRESENT | I386_VM_USER | I386_VM_WRITE, \
- MAPFLAGS); \
- if(r != OK) \
- vm_panic("PHYS_MAP: pt_writemap", NO_NUM); \
- ismapped = wantmapped; \
- /* Invalidate TLB for this page. */ \
- if((r=sys_vmctl(SELF, VMCTL_I386_INVLPG, varmap_loc)) != OK) { \
- vm_panic("VM: vmctl failed", r); \
- } \
- } \
+ phys_bytes hi;
+ hi = CLICK2ABS(vmp->vm_arch.vm_seg[S].mem_phys +
+ vmp->vm_arch.vm_seg[S].mem_len);
+ if(hi >= (kernel_pde+1) * I386_BIG_PAGE_SIZE) {
+ printf("VM: %d doesn't fit in kernel range\n",
+ vmp->vm_endpoint, hi);
+ vm_panic("boot time processes too big", NO_NUM);
+ }
}
-#define PHYSMAGIC 0x7b9a0590
-
-#if SANITYCHECKS
-#define PHYS_UNMAP if(OK != pt_writemap(&vmp->vm_pt, varmap_loc, MAP_NONE,\
- I386_PAGE_SIZE, 0, WMF_OVERWRITE)) { \
- vm_panic("PHYS_UNMAP: pt_writemap failed", NO_NUM); } \
- ismapped = MAP_NONE;
-#endif
-
-#define PHYS_VAL(o) (* (phys_bytes *) (varmap + (o)))
-
-
/*===========================================================================*
- * phys_writeaddr *
+ * pt_cycle *
*===========================================================================*/
-PUBLIC void phys_writeaddr(phys_bytes addr, phys_bytes v1, phys_bytes v2)
+PUBLIC void pt_cycle(void)
{
- phys_bytes offset;
-
- SANITYCHECK(SCL_DETAIL);
- PHYS_MAP(addr, offset);
- PHYS_VAL(offset) = v1;
- PHYS_VAL(offset + sizeof(phys_bytes)) = v2;
-#if SANITYCHECKS
- PHYS_VAL(offset + 2*sizeof(phys_bytes)) = PHYSMAGIC;
- PHYS_UNMAP;
-#endif
- SANITYCHECK(SCL_DETAIL);
+ vm_checkspares();
}
-/*===========================================================================*
- * phys_readaddr *
- *===========================================================================*/
-PUBLIC void phys_readaddr(phys_bytes addr, phys_bytes *v1, phys_bytes *v2)
-{
- phys_bytes offset;
-
- SANITYCHECK(SCL_DETAIL);
- PHYS_MAP(addr, offset);
- *v1 = PHYS_VAL(offset);
- *v2 = PHYS_VAL(offset + sizeof(phys_bytes));
-#if SANITYCHECKS
- vm_assert(PHYS_VAL(offset + 2*sizeof(phys_bytes)) == PHYSMAGIC);
- PHYS_UNMAP;
-#endif
- SANITYCHECK(SCL_DETAIL);
-}
/* Initialize tables to all physical memory. */
mem_init(mem_chunks);
-
-#if 0
- /* Can first kernel pages of code and data be (left) mapped out?
- * If so, change the SYSTEM process' memory map to reflect this
- * (future mappings of SYSTEM into other processes will not include
- * first pages), and free the first pages.
- */
- if(vm_paged && sys_vmctl(SELF, VMCTL_NOPAGEZERO, 0) == OK) {
- struct vmproc *vmp;
- vmp = &vmproc[VMP_SYSTEM];
- if(vmp->vm_arch.vm_seg[T].mem_len > 0) {
-#define DIFF CLICKSPERPAGE
- vmp->vm_arch.vm_seg[T].mem_phys += DIFF;
- vmp->vm_arch.vm_seg[T].mem_len -= DIFF;
- }
- vmp->vm_arch.vm_seg[D].mem_phys += DIFF;
- vmp->vm_arch.vm_seg[D].mem_len -= DIFF;
- }
-#endif
+ meminit_done = 1;
/* Give these processes their own page table. */
for (ip = &image[0]; ip < &image[NR_BOOT_PROCS]; ip++) {
GETVMP(vmp, ip->proc_nr);
+ if(!(ip->flags & PROC_FULLVM)) {
+ /* See if this process fits in kernel
+ * mapping. VM has its own pagetable,
+ * don't check it.
+ */
+ printf("VM: not giving %d its own pt\n",
+ vmp->vm_endpoint);
+ if(!(vmp->vm_flags & VMF_HASPT)) {
+ pt_check(vmp);
+ }
+ continue;
+ }
+ printf("VM: giving %d its own pt\n", vmp->vm_endpoint);
+
old_stack =
vmp->vm_arch.vm_seg[S].mem_vir +
vmp->vm_arch.vm_seg[S].mem_len -
vmp->vm_arch.vm_seg[D].mem_len;
- if(!(ip->flags & PROC_FULLVM))
- continue;
-
if(pt_new(&vmp->vm_pt) != OK)
vm_panic("vm_init: no new pagetable", NO_NUM);
#define BASICSTACK VM_PAGE_SIZE
vmp = &vmproc[n];
- if(m->VMM_FLAGS & MAP_LOWER16M)
- printf("VM: warning for %d: MAP_LOWER16M not implemented\n",
- m->m_source);
-
if(!(vmp->vm_flags & VMF_HASPT))
return ENXIO;
if(m->VMM_FLAGS & MAP_CONTIG) mfflags |= MF_CONTIG;
if(m->VMM_FLAGS & MAP_PREALLOC) mfflags |= MF_PREALLOC;
+ if(m->VMM_FLAGS & MAP_LOWER16M) vrflags |= VR_LOWER16MB;
if(m->VMM_FLAGS & MAP_ALIGN64K) vrflags |= VR_PHYS64K;
if(len % VM_PAGE_SIZE)
len += VM_PAGE_SIZE - (len % VM_PAGE_SIZE);
if(!(vr = map_page_region(vmp,
- arch_vir2map(vmp, vmp->vm_stacktop), VM_DATATOP, len, MAP_NONE,
- vrflags, mfflags))) {
+ arch_vir2map(vmp, vmp->vm_stacktop),
+ VM_DATATOP, len, MAP_NONE, vrflags, mfflags))) {
return ENOMEM;
}
} else {
vm_assert(vr);
m->VMM_RETADDR = arch_map2vir(vmp, vr->vaddr);
+
return OK;
}
vmp = &vmproc[p];
vm_assert(vmp->vm_flags & VMF_INUSE);
+#if 0
+ map_printmap(vmp);
+#endif
+
/* See if address is valid at all. */
if(!(region = map_lookup(vmp, addr))) {
vm_assert(PFERR_NOPAGE(err));
/* $(ARCH)/pagetable.c */
_PROTOTYPE( void pt_init, (void) );
+_PROTOTYPE( void pt_check, (struct vmproc *vmp) );
_PROTOTYPE( int pt_new, (pt_t *pt) );
_PROTOTYPE( void pt_free, (pt_t *pt) );
_PROTOTYPE( void pt_freerange, (pt_t *pt, vir_bytes lo, vir_bytes hi) );
_PROTOTYPE( void *vm_allocpages, (phys_bytes *p, int pages, int cat));
_PROTOTYPE( void pt_cycle, (void));
_PROTOTYPE( int pt_mapkernel, (pt_t *pt));
-_PROTOTYPE( void phys_readaddr, (phys_bytes addr, phys_bytes *v1, phys_bytes *v2));
-_PROTOTYPE( void phys_writeaddr, (phys_bytes addr, phys_bytes v1, phys_bytes v2));
#if SANITYCHECKS
_PROTOTYPE( void pt_sanitycheck, (pt_t *pt, char *file, int line) );
#endif
u32_t af = PAF_CLEAR;
if(region->flags & VR_PHYS64K)
af |= PAF_ALIGN64K;
+ if(region->flags & VR_LOWER16MB)
+ af |= PAF_LOWER16MB;
if((mem_clicks = ALLOC_MEM(clicks, af)) == NO_MEM) {
SLABFREE(newpb);
SLABFREE(newphysr);
#define VR_WRITABLE 0x01 /* Process may write here. */
#define VR_NOPF 0x02 /* May not generate page faults. */
#define VR_PHYS64K 0x04 /* Physical memory must be 64k aligned. */
+#define VR_LOWER16MB 0x08
/* Mapping type: */
#define VR_ANON 0x10 /* Memory to be cleared and allocated */
#define PAF_CLEAR 0x01 /* Clear physical memory. */
#define PAF_CONTIG 0x02 /* Physically contiguous. */
#define PAF_ALIGN64K 0x04 /* Aligned to 64k boundary. */
+#define PAF_LOWER16MB 0x08
/* special value for v in pt_allocmap */
#define AM_AUTO ((u32_t) -1)