/* Used for sanity check. */
PRIVATE phys_bytes mem_low, mem_high;
#define assert_range(addr, len) \
- vm_assert((addr) >= mem_low); \
- vm_assert((addr) + (len) - 1 <= mem_high);
+ assert((addr) >= mem_low); \
+ assert((addr) + (len) - 1 <= mem_high);
struct hole {
struct hole *h_next; /* pointer to next entry on the list */
if(!(c)) { \
printf("holes_sanity_f:%s:%d: %s failed\n", file, line, #c); \
util_stacktrace(); \
- panic("vm_assert failed"); } \
+ panic("assert failed"); } \
}
int h, c = 0, n = 0;
}
if(vm_paged) {
- vm_assert(CLICK_SIZE == VM_PAGE_SIZE);
+ assert(CLICK_SIZE == VM_PAGE_SIZE);
mem = alloc_pages(clicks, memflags, NULL);
} else {
CHECKHOLES;
if (clicks == 0) return;
if(vm_paged) {
- vm_assert(CLICK_SIZE == VM_PAGE_SIZE);
+ assert(CLICK_SIZE == VM_PAGE_SIZE);
free_pages(base, clicks);
return;
}
addr_start_iter_least(&addravl, &iter);
while((p=addr_get_iter(&iter))) {
SLABSANE(p);
- vm_assert(p->size > 0);
+ assert(p->size > 0);
if(prevp) {
- vm_assert(prevp->addr < p->addr);
- vm_assert(prevp->addr + p->addr < p->addr);
+ assert(prevp->addr < p->addr);
+ assert(prevp->addr + p->addr < p->addr);
}
addr_incr_iter(&iter);
}
while((pr = addr_get_iter(&iter))) {
SLABSANE(pr);
- vm_assert(pr->size > 0);
+ assert(pr->size > 0);
if(pr->size >= pages || (memflags & PAF_FIRSTBLOCK)) {
if(memflags & PAF_LOWER16MB) {
if(pr->addr + pages > boundary16)
SLABSANE(pr);
if(memflags & PAF_FIRSTBLOCK) {
- vm_assert(len);
+ assert(len);
/* block doesn't have to as big as requested;
* return its size though.
*/
/* Allocated chunk is off the end. */
mem = pr->addr + pr->size - pages;
- vm_assert(pr->size >= pages);
+ assert(pr->size >= pages);
if(pr->size == pages) {
pagerange_t *prr;
prr = addr_remove(&addravl, pr->addr);
- vm_assert(prr);
- vm_assert(prr == pr);
+ assert(prr);
+ assert(prr == pr);
SLABFREE(pr);
#if SANITYCHECKS
wantnodes--;
printf("pages start: %d req: %d final: %d\n",
firstpages, pages, finalpages);
}
- vm_assert(finalnodes == wantnodes);
- vm_assert(finalpages == wantpages);
+ assert(finalnodes == wantnodes);
+ assert(finalpages == wantpages);
#endif
return mem;
wantpages = firstpages + npages;
#endif
- vm_assert(!addr_search(&addravl, pageno, AVL_EQUAL));
+ assert(!addr_search(&addravl, pageno, AVL_EQUAL));
/* try to merge with higher neighbour */
if((pr=addr_search(&addravl, pageno+npages, AVL_EQUAL))) {
sanitycheck();
#endif
- vm_assert(npages > 0);
+ assert(npages > 0);
USE(pr, pr->addr = pageno;
pr->size = npages;);
addr_insert(&addravl, pr);
addr_start_iter(&addravl, &iter, pr->addr, AVL_EQUAL);
p = addr_get_iter(&iter);
- vm_assert(p);
- vm_assert(p == pr);
+ assert(p);
+ assert(p == pr);
addr_decr_iter(&iter);
if((p = addr_get_iter(&iter))) {
memstats(&finalnodes, &finalpages, &largest);
sanitycheck();
- vm_assert(finalnodes == wantnodes);
- vm_assert(finalpages == wantpages);
+ assert(finalnodes == wantnodes);
+ assert(finalpages == wantpages);
#endif
}
if(!incheck)
return OK;
- vm_assert(!(addr % VM_PAGE_SIZE));
- vm_assert(!(len % VM_PAGE_SIZE));
- vm_assert(len > 0);
+ assert(!(addr % VM_PAGE_SIZE));
+ assert(!(len % VM_PAGE_SIZE));
+ assert(len > 0);
assert_range(addr, len);
pagestart = addr / VM_PAGE_SIZE;
while(pages > 0) {
phys_bytes thisaddr;
- vm_assert(pagestart > 0);
- vm_assert(pagestart < MAXPAGES);
+ assert(pagestart > 0);
+ assert(pagestart < MAXPAGES);
thisaddr = pagestart * VM_PAGE_SIZE;
if(GET_BIT(pagemap, pagestart)) {
int i;
phys_bytes rempages;
struct memlist *head = NULL, *ml;
- vm_assert(bytes > 0);
- vm_assert(!(bytes % VM_PAGE_SIZE));
+ assert(bytes > 0);
+ assert(!(bytes % VM_PAGE_SIZE));
rempages = bytes / VM_PAGE_SIZE;
return NULL;
}
- vm_assert(gotpages <= rempages);
- vm_assert(gotpages > 0);
+ assert(gotpages <= rempages);
+ assert(gotpages > 0);
if(!(SLABALLOC(ml))) {
free_mem_list(head, 1);
} while(rempages > 0);
for(ml = head; ml; ml = ml->next) {
- vm_assert(ml->phys);
- vm_assert(ml->length);
+ assert(ml->phys);
+ assert(ml->length);
}
return head;
while(list) {
struct memlist *next;
next = list->next;
- vm_assert(!(list->phys % VM_PAGE_SIZE));
- vm_assert(!(list->length % VM_PAGE_SIZE));
+ assert(!(list->phys % VM_PAGE_SIZE));
+ assert(!(list->length % VM_PAGE_SIZE));
if(all)
free_pages(list->phys / VM_PAGE_SIZE,
list->length / VM_PAGE_SIZE);
void print_mem_list(struct memlist *list)
{
while(list) {
- vm_assert(list->length > 0);
+ assert(list->length > 0);
printf("0x%lx-0x%lx", list->phys, list->phys+list->length-1);
printf(" ");
list = list->next;
static u32_t lastv = 0;
/* Input sanity check. */
- vm_assert(vmin + I386_PAGE_SIZE >= vmin);
- vm_assert(vmax >= vmin + I386_PAGE_SIZE);
- vm_assert((vmin % I386_PAGE_SIZE) == 0);
- vm_assert((vmax % I386_PAGE_SIZE) == 0);
+ assert(vmin + I386_PAGE_SIZE >= vmin);
+ assert(vmax >= vmin + I386_PAGE_SIZE);
+ assert((vmin % I386_PAGE_SIZE) == 0);
+ assert((vmax % I386_PAGE_SIZE) == 0);
#if SANITYCHECKS
curv = ((u32_t) random()) % ((vmax - vmin)/I386_PAGE_SIZE);
while(curv < vmax) {
int pte;
- vm_assert(curv >= vmin);
- vm_assert(curv < vmax);
+ assert(curv >= vmin);
+ assert(curv < vmax);
pde = I386_VM_PDE(curv);
pte = I386_VM_PTE(curv);
*===========================================================================*/
PRIVATE void vm_freepages(vir_bytes vir, vir_bytes phys, int pages, int reason)
{
- vm_assert(reason >= 0 && reason < VMP_CATEGORIES);
+ assert(reason >= 0 && reason < VMP_CATEGORIES);
if(vir >= vmprocess->vm_stacktop) {
- vm_assert(!(vir % I386_PAGE_SIZE));
- vm_assert(!(phys % I386_PAGE_SIZE));
+ assert(!(vir % I386_PAGE_SIZE));
+ assert(!(phys % I386_PAGE_SIZE));
free_mem(ABS2CLICK(phys), pages);
if(pt_writemap(&vmprocess->vm_pt, arch_vir2map(vmprocess, vir),
MAP_NONE, pages*I386_PAGE_SIZE, 0, WMF_OVERWRITE) != OK)
PRIVATE void *vm_getsparepage(u32_t *phys)
{
int s;
- vm_assert(missing_spares >= 0 && missing_spares <= SPAREPAGES);
+ assert(missing_spares >= 0 && missing_spares <= SPAREPAGES);
for(s = 0; s < SPAREPAGES; s++) {
if(sparepages[s].page) {
void *sp;
*phys = sparepages[s].phys;
sparepages[s].page = NULL;
missing_spares++;
- vm_assert(missing_spares >= 0 && missing_spares <= SPAREPAGES);
+ assert(missing_spares >= 0 && missing_spares <= SPAREPAGES);
return sp;
}
}
{
int s, n = 0;
static int total = 0, worst = 0;
- vm_assert(missing_spares >= 0 && missing_spares <= SPAREPAGES);
+ assert(missing_spares >= 0 && missing_spares <= SPAREPAGES);
for(s = 0; s < SPAREPAGES && missing_spares > 0; s++)
if(!sparepages[s].page) {
n++;
if((sparepages[s].page = vm_allocpage(&sparepages[s].phys,
VMP_SPARE))) {
missing_spares--;
- vm_assert(missing_spares >= 0);
- vm_assert(missing_spares <= SPAREPAGES);
+ assert(missing_spares >= 0);
+ assert(missing_spares <= SPAREPAGES);
} else {
printf("VM: warning: couldn't get new spare page\n");
}
void *ret;
pt = &vmprocess->vm_pt;
- vm_assert(reason >= 0 && reason < VMP_CATEGORIES);
+ assert(reason >= 0 && reason < VMP_CATEGORIES);
level++;
- vm_assert(level >= 1);
- vm_assert(level <= 2);
+ assert(level >= 1);
+ assert(level <= 2);
if(level > 1 || !(vmprocess->vm_flags & VMF_HASPT) || !meminit_done) {
int r;
pt = &vmprocess->vm_pt;
m = arch_vir2map(vmprocess, (vir_bytes) vir);
- vm_assert(!(m % I386_PAGE_SIZE));
+ assert(!(m % I386_PAGE_SIZE));
if(!lockflag)
flags |= I386_VM_WRITE;
u32_t pt_phys;
/* Argument must make sense. */
- vm_assert(pde >= 0 && pde < I386_VM_DIR_ENTRIES);
- vm_assert(!(flags & ~(PTF_ALLFLAGS)));
+ assert(pde >= 0 && pde < I386_VM_DIR_ENTRIES);
+ assert(!(flags & ~(PTF_ALLFLAGS)));
/* We don't expect to overwrite page directory entry, nor
* storage for the page table.
*/
- vm_assert(!(pt->pt_dir[pde] & I386_VM_PRESENT));
- vm_assert(!pt->pt_pt[pde]);
+ assert(!(pt->pt_dir[pde] & I386_VM_PRESENT));
+ assert(!pt->pt_pt[pde]);
/* Get storage for the page table. */
if(!(pt->pt_pt[pde] = vm_allocpage(&pt_phys, VMP_PAGETABLE)))
if(writemapflags & WMF_VERIFY)
verify = 1;
- vm_assert(!(bytes % I386_PAGE_SIZE));
- vm_assert(!(flags & ~(PTF_ALLFLAGS)));
+ assert(!(bytes % I386_PAGE_SIZE));
+ assert(!(flags & ~(PTF_ALLFLAGS)));
pages = bytes / I386_PAGE_SIZE;
* what's actually written into the PTE if I386_VM_PRESENT
* isn't on, so we can just write MAP_NONE into it.
*/
- vm_assert(physaddr == MAP_NONE || (flags & I386_VM_PRESENT));
- vm_assert(physaddr != MAP_NONE || !flags);
+ assert(physaddr == MAP_NONE || (flags & I386_VM_PRESENT));
+ assert(physaddr != MAP_NONE || !flags);
finalpde = I386_VM_PDE(v + I386_PAGE_SIZE * pages);
* sized leaps.
*/
for(pdecheck = I386_VM_PDE(v); pdecheck <= finalpde; pdecheck++) {
- vm_assert(pdecheck >= 0 && pdecheck < I386_VM_DIR_ENTRIES);
- vm_assert(!(pt->pt_dir[pdecheck] & I386_VM_BIGPAGE));
+ assert(pdecheck >= 0 && pdecheck < I386_VM_DIR_ENTRIES);
+ assert(!(pt->pt_dir[pdecheck] & I386_VM_BIGPAGE));
if(!(pt->pt_dir[pdecheck] & I386_VM_PRESENT)) {
int r;
if(verify) {
printf("pt_writemap verify: no pde %d\n", pdecheck);
return EFAULT;
}
- vm_assert(!pt->pt_dir[pdecheck]);
+ assert(!pt->pt_dir[pdecheck]);
if((r=pt_ptalloc(pt, pdecheck, flags)) != OK) {
/* Couldn't do (complete) mapping.
* Don't bother freeing any previously
return r;
}
}
- vm_assert(pt->pt_dir[pdecheck] & I386_VM_PRESENT);
+ assert(pt->pt_dir[pdecheck] & I386_VM_PRESENT);
}
/* Now write in them. */
int pde = I386_VM_PDE(v);
int pte = I386_VM_PTE(v);
- vm_assert(!(v % I386_PAGE_SIZE));
- vm_assert(pte >= 0 && pte < I386_VM_PT_ENTRIES);
- vm_assert(pde >= 0 && pde < I386_VM_DIR_ENTRIES);
+ assert(!(v % I386_PAGE_SIZE));
+ assert(pte >= 0 && pte < I386_VM_PT_ENTRIES);
+ assert(pde >= 0 && pde < I386_VM_DIR_ENTRIES);
/* Page table has to be there. */
- vm_assert(pt->pt_dir[pde] & I386_VM_PRESENT);
+ assert(pt->pt_dir[pde] & I386_VM_PRESENT);
/* Make sure page directory entry for this page table
* is marked present and page table entry is available.
*/
- vm_assert((pt->pt_dir[pde] & I386_VM_PRESENT));
- vm_assert(pt->pt_pt[pde]);
+ assert((pt->pt_dir[pde] & I386_VM_PRESENT));
+ assert(pt->pt_pt[pde]);
#if SANITYCHECKS
/* We don't expect to overwrite a page. */
if(!(writemapflags & (WMF_OVERWRITE|WMF_VERIFY)))
- vm_assert(!(pt->pt_pt[pde][pte] & I386_VM_PRESENT));
+ assert(!(pt->pt_pt[pde][pte] & I386_VM_PRESENT));
#endif
if(writemapflags & (WMF_WRITEFLAGSONLY|WMF_FREE)) {
physaddr = pt->pt_pt[pde][pte] & I386_VM_ADDR_MASK;
} else {
/* Write pagetable entry. */
#if SANITYCHECKS
- vm_assert(vm_addrok(pt->pt_pt[pde], 1));
+ assert(vm_addrok(pt->pt_pt[pde], 1));
#endif
pt->pt_pt[pde][pte] = entry;
}
{
int p, pages, pde;
- vm_assert(!(bytes % I386_PAGE_SIZE));
+ assert(!(bytes % I386_PAGE_SIZE));
pages = bytes / I386_PAGE_SIZE;
int pde = I386_VM_PDE(v);
int pte = I386_VM_PTE(v);
- vm_assert(!(v % I386_PAGE_SIZE));
- vm_assert(pte >= 0 && pte < I386_VM_PT_ENTRIES);
- vm_assert(pde >= 0 && pde < I386_VM_DIR_ENTRIES);
+ assert(!(v % I386_PAGE_SIZE));
+ assert(pte >= 0 && pte < I386_VM_PT_ENTRIES);
+ assert(pde >= 0 && pde < I386_VM_DIR_ENTRIES);
/* Page table has to be there. */
if(!(pt->pt_dir[pde] & I386_VM_PRESENT))
/* Make sure page directory entry for this page table
* is marked present and page table entry is available.
*/
- vm_assert((pt->pt_dir[pde] & I386_VM_PRESENT) && pt->pt_pt[pde]);
+ assert((pt->pt_dir[pde] & I386_VM_PRESENT) && pt->pt_pt[pde]);
if(!(pt->pt_pt[pde][pte] & I386_VM_PRESENT)) {
return EFAULT;
hi = CLICK2ABS(vmprocess->vm_arch.vm_seg[S].mem_phys +
vmprocess->vm_arch.vm_seg[S].mem_len);
- vm_assert(!(lo % I386_PAGE_SIZE));
- vm_assert(!(hi % I386_PAGE_SIZE));
+ assert(!(lo % I386_PAGE_SIZE));
+ assert(!(hi % I386_PAGE_SIZE));
if(lo < VM_PROCSTART) {
moveup = VM_PROCSTART - lo;
- vm_assert(!(VM_PROCSTART % I386_PAGE_SIZE));
- vm_assert(!(lo % I386_PAGE_SIZE));
- vm_assert(!(moveup % I386_PAGE_SIZE));
+ assert(!(VM_PROCSTART % I386_PAGE_SIZE));
+ assert(!(lo % I386_PAGE_SIZE));
+ assert(!(moveup % I386_PAGE_SIZE));
}
/* Make new page table for ourselves, partly copied
u32_t phys;
/* Basic sanity checks. */
- vm_assert(who);
- vm_assert(who->vm_flags & VMF_INUSE);
- vm_assert(pt);
+ assert(who);
+ assert(who->vm_flags & VMF_INUSE);
+ assert(pt);
slot = who->vm_slot;
- vm_assert(slot >= 0);
- vm_assert(slot < ELEMENTS(vmproc));
- vm_assert(slot < I386_VM_PT_ENTRIES);
+ assert(slot >= 0);
+ assert(slot < ELEMENTS(vmproc));
+ assert(slot < I386_VM_PT_ENTRIES);
phys = pt->pt_dir_phys & I386_VM_ADDR_MASK;
- vm_assert(pt->pt_dir_phys == phys);
+ assert(pt->pt_dir_phys == phys);
/* Update "page directory pagetable." */
page_directories[slot] = phys | I386_VM_PRESENT|I386_VM_WRITE;
int r, i;
/* Any i386 page table needs to map in the kernel address space. */
- vm_assert(vmproc[VMP_SYSTEM].vm_flags & VMF_INUSE);
+ assert(vmproc[VMP_SYSTEM].vm_flags & VMF_INUSE);
if(bigpage_ok) {
int pde;
for(pde = 0; pde <= id_map_high_pde; pde++) {
phys_bytes addr;
addr = pde * I386_BIG_PAGE_SIZE;
- vm_assert((addr & I386_VM_ADDR_MASK) == addr);
+ assert((addr & I386_VM_ADDR_MASK) == addr);
pt->pt_dir[pde] = addr | I386_VM_PRESENT |
I386_VM_BIGPAGE | I386_VM_USER |
I386_VM_WRITE | global_bit;
vir_bytes datastart = CLICK2ABS(vmp->vm_arch.vm_seg[D].mem_phys);
/* Could be a text address. */
- vm_assert(datastart <= addr || textstart <= addr);
+ assert(datastart <= addr || textstart <= addr);
return addr - datastart;
}
/* No need to allocate text if it can be shared. */
if (sh_mp != NULL) {
text_bytes = 0;
- vm_assert(!vm_paged);
+ assert(!vm_paged);
}
/* Acquire the new memory. Each of the 4 parts: text, (data+bss), gap,
rmp->vm_flags &= ~VMF_HASPT;
pt_free(&rmp->vm_pt);
}
- vm_assert(!(vmpold->vm_flags & VMF_INUSE));
+ assert(!(vmpold->vm_flags & VMF_INUSE));
*vmpold = *rmp; /* copy current state. */
rmp->vm_regions = NULL; /* exec()ing process regions thrown out. */
SANITYCHECK(SCL_DETAIL);
u32_t kernel_top = 0;
#define MEMTOP(v, i) \
(vmproc[v].vm_arch.vm_seg[i].mem_phys + vmproc[v].vm_arch.vm_seg[i].mem_len)
- vm_assert(vmproc[VMP_SYSTEM].vm_flags & VMF_INUSE);
+ assert(vmproc[VMP_SYSTEM].vm_flags & VMF_INUSE);
kernel_top = MEMTOP(VMP_SYSTEM, T);
kernel_top = MAX(kernel_top, MEMTOP(VMP_SYSTEM, D));
kernel_top = MAX(kernel_top, MEMTOP(VMP_SYSTEM, S));
- vm_assert(kernel_top);
+ assert(kernel_top);
return CLICK2ABS(kernel_top);
}
int prealloc;
struct vir_region *reg;
- vm_assert(!(vstart % VM_PAGE_SIZE));
- vm_assert(!(text_bytes % VM_PAGE_SIZE));
- vm_assert(!(data_bytes % VM_PAGE_SIZE));
- vm_assert(!(stack_bytes % VM_PAGE_SIZE));
- vm_assert(!(gap_bytes % VM_PAGE_SIZE));
- vm_assert(!(text_start % VM_PAGE_SIZE));
- vm_assert(!(data_start % VM_PAGE_SIZE));
- vm_assert((!text_start && !data_start) || (text_start && data_start));
+ assert(!(vstart % VM_PAGE_SIZE));
+ assert(!(text_bytes % VM_PAGE_SIZE));
+ assert(!(data_bytes % VM_PAGE_SIZE));
+ assert(!(stack_bytes % VM_PAGE_SIZE));
+ assert(!(gap_bytes % VM_PAGE_SIZE));
+ assert(!(text_start % VM_PAGE_SIZE));
+ assert(!(data_start % VM_PAGE_SIZE));
+ assert((!text_start && !data_start) || (text_start && data_start));
/* Place text at start of process. */
vmp->vm_arch.vm_seg[T].mem_phys = ABS2CLICK(vstart);
vmp = &vmproc[proc]; /* parent */
vmc = &vmproc[childproc]; /* child */
- vm_assert(vmc->vm_slot == childproc);
+ assert(vmc->vm_slot == childproc);
if(vmp->vm_flags & VMF_HAS_DMA) {
printf("VM: %d has DMA memory and may not fork\n", msg->VMF_ENDPOINT);
if(vmp->vm_heap) {
vmc->vm_heap = map_region_lookup_tag(vmc, VRT_HEAP);
- vm_assert(vmc->vm_heap);
+ assert(vmc->vm_heap);
}
SANITYCHECK(SCL_DETAIL);
if(!(heap = map_region_lookup_tag(vmc, VRT_HEAP)))
panic("couldn't lookup heap");
- vm_assert(heap->phys);
+ assert(heap->phys);
if(!(stack = map_region_lookup_tag(vmc, VRT_STACK)))
panic("couldn't lookup stack");
- vm_assert(stack->phys);
+ assert(stack->phys);
/* Now copy the memory regions. */
struct vir_region *text;
if(!(text = map_region_lookup_tag(vmc, VRT_TEXT)))
panic("couldn't lookup text");
- vm_assert(text->phys);
+ assert(text->phys);
if(copy_abs2region(CLICK2ABS(vmp->vm_arch.vm_seg[T].mem_phys),
text, 0, text_bytes) != OK)
panic("couldn't copy text");
}
/* Return mapping, as seen from process. */
- vm_assert(vr);
+ assert(vr);
m->VMM_RETADDR = arch_map2vir(vmp, vr->vaddr);
panic("do_pagefaults: endpoint wrong: %d", ep);
vmp = &vmproc[p];
- vm_assert(vmp->vm_flags & VMF_INUSE);
+ assert(vmp->vm_flags & VMF_INUSE);
/* See if address is valid at all. */
if(!(region = map_lookup(vmp, addr))) {
- vm_assert(PFERR_NOPAGE(err));
+ assert(PFERR_NOPAGE(err));
printf("VM: pagefault: SIGSEGV %d bad addr 0x%lx %s\n",
ep, arch_map2vir(vmp, addr), pf_errstr(err));
if((s=sys_kill(vmp->vm_endpoint, SIGSEGV)) != OK)
/* Make sure this isn't a region that isn't supposed
* to cause pagefaults.
*/
- vm_assert(!(region->flags & VR_NOPF));
+ assert(!(region->flags & VR_NOPF));
/* We do not allow shared memory to cause pagefaults.
* These pages have to be pre-allocated.
*/
- vm_assert(!(region->flags & VR_SHARED));
+ assert(!(region->flags & VR_SHARED));
/* If process was writing, see if it's writable. */
if(!(region->flags & VR_WRITABLE) && wr) {
continue;
}
- vm_assert(addr >= region->vaddr);
+ assert(addr >= region->vaddr);
offset = addr - region->vaddr;
/* Access is allowed; handle it. */
r = EFAULT;
} else {
vir_bytes offset, sublen;
- vm_assert(region->vaddr <= mem);
- vm_assert(!(region->flags & VR_NOPF));
- vm_assert(!(region->vaddr % VM_PAGE_SIZE));
+ assert(region->vaddr <= mem);
+ assert(!(region->flags & VR_NOPF));
+ assert(!(region->vaddr % VM_PAGE_SIZE));
offset = mem - region->vaddr;
sublen = len;
if(offset + sublen > region->length)
int rw;
struct phys_block *pb = pr->ph;
- vm_assert(!(vr->vaddr % VM_PAGE_SIZE));
- vm_assert(!(pb->length % VM_PAGE_SIZE));
- vm_assert(!(pr->offset % VM_PAGE_SIZE));
- vm_assert(pb->refcount > 0);
+ assert(!(vr->vaddr % VM_PAGE_SIZE));
+ assert(!(pb->length % VM_PAGE_SIZE));
+ assert(!(pr->offset % VM_PAGE_SIZE));
+ assert(pb->refcount > 0);
if(WRITABLE(vr, pb))
rw = PTF_WRITE;
SANITYCHECK(SCL_FUNCTIONS);
/* We must be in paged mode to be able to do this. */
- vm_assert(vm_paged);
+ assert(vm_paged);
/* Length must be reasonable. */
- vm_assert(length > 0);
+ assert(length > 0);
/* Special case: allow caller to set maxv to 0 meaning 'I want
* it to be mapped in right here.'
}
/* Basic input sanity checks. */
- vm_assert(!(length % VM_PAGE_SIZE));
+ assert(!(length % VM_PAGE_SIZE));
if(minv >= maxv) {
printf("VM: 1 minv: 0x%lx maxv: 0x%lx length: 0x%lx\n",
minv, maxv, length);
}
- vm_assert(minv < maxv);
- vm_assert(minv + length <= maxv);
+ assert(minv < maxv);
+ assert(minv + length <= maxv);
#define FREEVRANGE(rangestart, rangeend, foundcode) { \
vir_bytes frstart = (rangestart), frend = (rangeend); \
}
#if SANITYCHECKS
- if(prevregion) vm_assert(prevregion->vaddr < startv);
+ if(prevregion) assert(prevregion->vaddr < startv);
#endif
/* However we got it, startv must be in the requested range. */
- vm_assert(startv >= minv);
- vm_assert(startv < maxv);
- vm_assert(startv + length <= maxv);
+ assert(startv >= minv);
+ assert(startv < maxv);
+ assert(startv + length <= maxv);
if (prev)
*prev = prevregion;
struct phys_region *ph;
physr_avl *phavl;
- vm_assert(!(length % VM_PAGE_SIZE));
+ assert(!(length % VM_PAGE_SIZE));
SANITYCHECK(SCL_FUNCTIONS);
/* If we know what we're going to map to, map it right away. */
if(what != MAP_NONE) {
- vm_assert(what); /* mapping in 0 is unlikely to be right */
- vm_assert(!(what % VM_PAGE_SIZE));
- vm_assert(!(startv % VM_PAGE_SIZE));
- vm_assert(!(mapflags & MF_PREALLOC));
+ assert(what); /* mapping in 0 is unlikely to be right */
+ assert(!(what % VM_PAGE_SIZE));
+ assert(!(startv % VM_PAGE_SIZE));
+ assert(!(mapflags & MF_PREALLOC));
if(map_new_physblock(vmp, newregion, 0, length,
what, PAF_CLEAR, 0) != OK) {
printf("VM: map_new_physblock failed\n");
/* Link it. */
if(prevregion) {
- vm_assert(prevregion->vaddr < newregion->vaddr);
+ assert(prevregion->vaddr < newregion->vaddr);
USE(newregion, newregion->next = prevregion->next;);
USE(prevregion, prevregion->next = newregion;);
} else {
}
#if SANITYCHECKS
- vm_assert(startv == newregion->vaddr);
+ assert(startv == newregion->vaddr);
if(newregion->next) {
- vm_assert(newregion->vaddr < newregion->next->vaddr);
+ assert(newregion->vaddr < newregion->next->vaddr);
}
#endif
int remap = 0;
pb = pr->ph;
- vm_assert(pb->refcount > 0);
+ assert(pb->refcount > 0);
USE(pb, pb->refcount--;);
- vm_assert(pb->refcount >= 0);
+ assert(pb->refcount >= 0);
if(pb->firstregion == pr) {
USE(pb, pb->firstregion = pr->next_ph_list;);
for(others = pb->firstregion; others;
others = others->next_ph_list) {
- vm_assert(others->ph == pb);
+ assert(others->ph == pb);
if(others->next_ph_list == pr) {
USE(others, others->next_ph_list = pr->next_ph_list;);
break;
}
}
- vm_assert(others); /* Otherwise, wasn't on the list. */
+ assert(others); /* Otherwise, wasn't on the list. */
}
if(pb->refcount == 0) {
- vm_assert(!pb->firstregion);
+ assert(!pb->firstregion);
if(region->flags & VR_ANON) {
free_mem(ABS2CLICK(pb->phys),
ABS2CLICK(pb->length));
}
n++;
}
- vm_assert(n == pb->refcount);
+ assert(n == pb->refcount);
}
}
physr_start_iter(region->phys, iter, offset, AVL_EQUAL);
ph = physr_get_iter(iter);
- vm_assert(ph);
- vm_assert(ph->offset == offset);
+ assert(ph);
+ assert(ph->offset == offset);
return ph;
}
for(others = pb->firstregion; others;
others = others->next_ph_list) {
- vm_assert(others->ph == pb);
+ assert(others->ph == pb);
}
physr_incr_iter(&iter);
}
SLABFREE(pr);
} else {
vir_bytes sublen;
- vm_assert(len > pr->offset);
- vm_assert(len < pr->offset + pr->ph->length);
- vm_assert(pr->ph->refcount > 0);
+ assert(len > pr->offset);
+ assert(len < pr->offset + pr->ph->length);
+ assert(pr->ph->refcount > 0);
sublen = len - pr->offset;
- vm_assert(!(sublen % VM_PAGE_SIZE));
- vm_assert(sublen < pr->ph->length);
+ assert(!(sublen % VM_PAGE_SIZE));
+ assert(sublen < pr->ph->length);
if(pr->ph->refcount > 1) {
int r;
if(!(pr = map_clone_ph_block(vmp, region,
pr, &iter)))
return ENOMEM;
}
- vm_assert(pr->ph->refcount == 1);
+ assert(pr->ph->refcount == 1);
if(!(region->flags & VR_DIRECT)) {
free_mem(ABS2CLICK(pr->ph->phys), ABS2CLICK(sublen));
}
USE(pr->ph,
pr->ph->phys += sublen;
pr->ph->length -= sublen;);
- vm_assert(!(pr->offset % VM_PAGE_SIZE));
- vm_assert(!(pr->ph->phys % VM_PAGE_SIZE));
- vm_assert(!(pr->ph->length % VM_PAGE_SIZE));
+ assert(!(pr->offset % VM_PAGE_SIZE));
+ assert(!(pr->ph->phys % VM_PAGE_SIZE));
+ assert(!(pr->ph->length % VM_PAGE_SIZE));
}
}
SANITYCHECK(SCL_FUNCTIONS);
- vm_assert(!(length % VM_PAGE_SIZE));
+ assert(!(length % VM_PAGE_SIZE));
if((region->flags & VR_CONTIG) &&
(start_offset > 0 || length < region->length)) {
given.next = NULL;
memlist = &given;
used_memlist = 0;
- vm_assert(given.phys);
- vm_assert(given.length);
+ assert(given.phys);
+ assert(given.length);
}
r = OK;
for(ml = memlist; ml; ml = ml->next) {
- vm_assert(ml->phys);
- vm_assert(ml->length);
+ assert(ml->phys);
+ assert(ml->length);
}
for(ml = memlist; ml; ml = ml->next) {
break;
}
- vm_assert(ml->phys);
- vm_assert(ml->length);
+ assert(ml->phys);
+ assert(ml->length);
/* New physical block. */
- vm_assert(!(ml->phys % VM_PAGE_SIZE));
+ assert(!(ml->phys % VM_PAGE_SIZE));
USE(newpb,
newpb->phys = ml->phys;
offset += ml->length;
if((physr = physr_search(region->phys, offset,
AVL_EQUAL))) {
- vm_assert(physr->ph->refcount == 1);
+ assert(physr->ph->refcount == 1);
pb_unreferenced(region, physr);
physr_remove(region->phys, physr->offset);
SLABFREE(physr);
}
}
- } else vm_assert(mapped == length);
+ } else assert(mapped == length);
/* Always clean up the memlist itself, even if everything
* worked we're not using the memlist nodes any more. And
SANITYCHECK(SCL_DETAIL);
SLABSANE(ph);
SLABSANE(ph->ph);
- vm_assert(ph->ph->refcount > 1);
+ assert(ph->ph->refcount > 1);
pb_unreferenced(region, ph);
- vm_assert(ph->ph->refcount >= 1);
+ assert(ph->ph->refcount >= 1);
physr_remove(region->phys, offset);
SLABFREE(ph);
/* Put new free memory in. */
allocflags = vrallocflags(region->flags);
- vm_assert(!(allocflags & PAF_CONTIG));
- vm_assert(!(allocflags & PAF_CLEAR));
+ assert(!(allocflags & PAF_CONTIG));
+ assert(!(allocflags & PAF_CLEAR));
if(map_new_physblock(vmp, region, offset, length,
MAP_NONE, allocflags, written) != OK) {
panic("copy_abs2region failed, no good reason for that");
newpr = physr_search(region->phys, offset, AVL_EQUAL);
- vm_assert(newpr);
- vm_assert(newpr->offset == offset);
+ assert(newpr);
+ assert(newpr->offset == offset);
if(iter) {
physr_start_iter(region->phys, iter, offset, AVL_EQUAL);
- vm_assert(physr_get_iter(iter) == newpr);
+ assert(physr_get_iter(iter) == newpr);
}
SANITYCHECK(SCL_FUNCTIONS);
struct phys_region *ph;
int r = OK;
- vm_assert(offset >= 0);
- vm_assert(offset < region->length);
+ assert(offset >= 0);
+ assert(offset < region->length);
- vm_assert(region->flags & VR_ANON);
- vm_assert(!(region->vaddr % VM_PAGE_SIZE));
+ assert(region->flags & VR_ANON);
+ assert(!(region->vaddr % VM_PAGE_SIZE));
virpage = offset - offset % VM_PAGE_SIZE;
(ph->offset <= offset && offset < ph->offset + ph->ph->length)) {
phys_bytes blockoffset = ph->offset;
/* Pagefault in existing block. Do copy-on-write. */
- vm_assert(write);
- vm_assert(region->flags & VR_WRITABLE);
- vm_assert(ph->ph->refcount > 0);
+ assert(write);
+ assert(region->flags & VR_WRITABLE);
+ assert(ph->ph->refcount > 0);
if(WRITABLE(region, ph->ph)) {
r = map_ph_writept(vmp, region, ph);
SANITYCHECK(SCL_FUNCTIONS);
- vm_assert(region->flags & VR_ANON);
- vm_assert(!(region->vaddr % VM_PAGE_SIZE));
- vm_assert(!(offset % VM_PAGE_SIZE));
- vm_assert(!(length % VM_PAGE_SIZE));
- vm_assert(!write || (region->flags & VR_WRITABLE));
+ assert(region->flags & VR_ANON);
+ assert(!(region->vaddr % VM_PAGE_SIZE));
+ assert(!(offset % VM_PAGE_SIZE));
+ assert(!(length % VM_PAGE_SIZE));
+ assert(!write || (region->flags & VR_WRITABLE));
physr_start_iter(region->phys, &iter, offset, AVL_LESS_EQUAL);
physr = physr_get_iter(&iter);
SANITYCHECK(SCL_DETAIL);
if(write) {
- vm_assert(physr->ph->refcount > 0);
+ assert(physr->ph->refcount > 0);
if(!WRITABLE(region, physr->ph)) {
if(!(physr = map_clone_ph_block(vmp, region,
physr, &iter))) {
#endif
physr_insert(newvr->phys, newph);
#if SANITYCHECKS
- vm_assert(countregions(vr) == cr);
+ assert(countregions(vr) == cr);
#endif
physr_incr_iter(&iter);
}
#if SANITYCHECKS
- vm_assert(countregions(vr) == countregions(newvr));
+ assert(countregions(vr) == countregions(newvr));
#endif
return newvr;
phys_bytes offset, phys_bytes len)
{
- vm_assert(destregion);
- vm_assert(destregion->phys);
+ assert(destregion);
+ assert(destregion->phys);
while(len > 0) {
phys_bytes sublen, suboffset;
struct phys_region *ph;
- vm_assert(destregion);
- vm_assert(destregion->phys);
+ assert(destregion);
+ assert(destregion->phys);
if(!(ph = physr_search(destregion->phys, offset, AVL_LESS_EQUAL))) {
printf("VM: copy_abs2region: no phys region found (1).\n");
return EFAULT;
}
- vm_assert(ph->offset <= offset);
+ assert(ph->offset <= offset);
if(ph->offset+ph->ph->length <= offset) {
printf("VM: copy_abs2region: no phys region found (2).\n");
return EFAULT;
}
suboffset = offset - ph->offset;
- vm_assert(suboffset < ph->ph->length);
+ assert(suboffset < ph->ph->length);
sublen = len;
if(sublen > ph->ph->length - suboffset)
sublen = ph->ph->length - suboffset;
- vm_assert(suboffset + sublen <= ph->ph->length);
+ assert(suboffset + sublen <= ph->ph->length);
if(ph->ph->refcount != 1) {
printf("VM: copy_abs2region: no phys region found (3).\n");
return EFAULT;
/* Check two physregions both are nonnull,
* are different, and match physblocks.
*/
- vm_assert(new_ph);
- vm_assert(orig_ph);
- vm_assert(orig_ph != new_ph);
+ assert(new_ph);
+ assert(orig_ph);
+ assert(orig_ph != new_ph);
pb = orig_ph->ph;
- vm_assert(pb == new_ph->ph);
+ assert(pb == new_ph->ph);
/* Link in new physregion. */
- vm_assert(!new_ph->next_ph_list);
+ assert(!new_ph->next_ph_list);
USE(new_ph, new_ph->next_ph_list = pb->firstregion;);
USE(pb, pb->firstregion = new_ph;);
/* Increase phys block refcount */
- vm_assert(pb->refcount > 0);
+ assert(pb->refcount > 0);
USE(pb, pb->refcount++;);
- vm_assert(pb->refcount > 1);
+ assert(pb->refcount > 1);
/* If the phys block has been shared as SMAP,
* do the regular copy. */
physr_incr_iter(&iter_orig);
physr_incr_iter(&iter_new);
}
- vm_assert(!physr_get_iter(&iter_new));
+ assert(!physr_get_iter(&iter_new));
prevvr = newvr;
}
/* We assume these are the first regions to be mapped to
* make the function a bit simpler (free all regions on error).
*/
- vm_assert(!vmp->vm_regions);
- vm_assert(vmproc[VMP_SYSTEM].vm_flags & VMF_INUSE);
- vm_assert(!(KERNEL_TEXT % VM_PAGE_SIZE));
- vm_assert(!(KERNEL_TEXT_LEN % VM_PAGE_SIZE));
- vm_assert(!(KERNEL_DATA % VM_PAGE_SIZE));
- vm_assert(!(KERNEL_DATA_LEN % VM_PAGE_SIZE));
+ assert(!vmp->vm_regions);
+ assert(vmproc[VMP_SYSTEM].vm_flags & VMF_INUSE);
+ assert(!(KERNEL_TEXT % VM_PAGE_SIZE));
+ assert(!(KERNEL_TEXT_LEN % VM_PAGE_SIZE));
+ assert(!(KERNEL_DATA % VM_PAGE_SIZE));
+ assert(!(KERNEL_DATA_LEN % VM_PAGE_SIZE));
if(!(vr = map_page_region(vmp, KERNEL_TEXT, 0, KERNEL_TEXT_LEN,
KERNEL_TEXT, VR_DIRECT | VR_WRITABLE | VR_NOPF, 0)) ||
{
vir_bytes end;
- vm_assert(vr);
- vm_assert(vr->flags & VR_ANON);
- vm_assert(!(delta % VM_PAGE_SIZE));
+ assert(vr);
+ assert(vr->flags & VR_ANON);
+ assert(!(delta % VM_PAGE_SIZE));
if(!delta) return OK;
end = vr->vaddr + vr->length;
- vm_assert(end >= vr->vaddr);
+ assert(end >= vr->vaddr);
if(end + delta <= end) {
printf("VM: strange delta 0x%lx\n", delta);
*========================================================================*/
PUBLIC int map_region_shrink(struct vir_region *vr, vir_bytes delta)
{
- vm_assert(vr);
- vm_assert(vr->flags & VR_ANON);
- vm_assert(!(delta % VM_PAGE_SIZE));
+ assert(vr);
+ assert(vr->flags & VR_ANON);
+ assert(!(delta % VM_PAGE_SIZE));
#if 0
printf("VM: ignoring region shrink\n");
* same amount.
*/
while((pr = physr_get_iter(&iter))) {
- vm_assert(pr->offset >= len);
+ assert(pr->offset >= len);
USE(pr, pr->offset -= len;);
physr_incr_iter(&iter);
}
SANITYCHECK(SCL_FUNCTIONS);
- vm_assert(region->flags & VR_SHARED);
+ assert(region->flags & VR_SHARED);
/* da is handled differently */
if (!da)
prev = NULL;
/* round up to page size */
- vm_assert(!(size % VM_PAGE_SIZE));
+ assert(!(size % VM_PAGE_SIZE));
startv = region_find_slot(dvmp, dst_addr, VM_DATATOP, size, &prev);
if (startv == (vir_bytes) -1) {
printf("map_remap: search 0x%x...\n", dst_addr);
vr->flags = region->flags;
vr->tag = VRT_NONE;
vr->parent = dvmp;);
- vm_assert(vr->flags & VR_SHARED);
+ assert(vr->flags & VR_SHARED);
if (prev) {
USE(vr,
physr_start_iter_least(vr->phys, &iter);
while((ph = physr_get_iter(&iter))) {
struct phys_block *pb = ph->ph;
- vm_assert(!ph->next_ph_list);
+ assert(!ph->next_ph_list);
USE(ph, ph->next_ph_list = pb->firstregion;);
USE(pb, pb->firstregion = ph;);
USE(pb, pb->refcount++;);
physr_start_iter_least(vr->phys, &iter);
ph = physr_get_iter(&iter);
- vm_assert(ph);
- vm_assert(ph->ph);
+ assert(ph);
+ assert(ph->ph);
if (r)
*r = ph->ph->phys;
physr_start_iter_least(vr->phys, &iter);
ph = physr_get_iter(&iter);
- vm_assert(ph);
- vm_assert(ph->ph);
+ assert(ph);
+ assert(ph->ph);
if (cnt)
*cnt = ph->ph->refcount;
vmd = &vmproc[p];
vrd = map_lookup(vmd, virt_d);
- vm_assert(vrd);
+ assert(vrd);
/* Search for the first phys region in the destination process. */
off = virt_d - vrd->vaddr;
end = off + length;
while((pr = physr_get_iter(&iter)) && off < end) {
pb = pr->ph;
- vm_assert(pb->refcount > 1);
- vm_assert(pb->share_flag == PBSH_SMAP);
+ assert(pb->refcount > 1);
+ assert(pb->share_flag == PBSH_SMAP);
if(!(pr = map_clone_ph_block(vmd, vrd, pr, &iter)))
return ENOMEM;
vmd = &vmproc[p];
vrs = map_lookup(vms, virt_s);
- vm_assert(vrs);
+ assert(vrs);
vrd = map_lookup(vmd, virt_d);
- vm_assert(vrd);
+ assert(vrd);
/* Linear address -> offset from start of vir region. */
offset_s = virt_s - vrs->vaddr;
}
/* Adjust page tables. */
- vm_assert(src_vmp->vm_flags & VMF_HASPT);
- vm_assert(dst_vmp->vm_flags & VMF_HASPT);
+ assert(src_vmp->vm_flags & VMF_HASPT);
+ assert(dst_vmp->vm_flags & VMF_HASPT);
pt_bind(&src_vmp->vm_pt, src_vmp);
pt_bind(&dst_vmp->vm_pt, dst_vmp);
if((r=sys_vmctl(SELF, VMCTL_FLUSHTLB, 0)) != OK) {
#define SANITYCHECK(l) if(!nocheck && ((l) <= vm_sanitychecklevel)) { \
struct vmproc *vmpr; \
- vm_assert(incheck == 0); \
+ assert(incheck == 0); \
incheck = 1; \
usedpages_reset(); \
slab_sanitycheck(__FILE__, __LINE__); \
} \
} \
map_sanitycheck(__FILE__, __LINE__); \
- vm_assert(incheck == 1); \
+ assert(incheck == 1); \
incheck = 0; \
}
#define BITEL(f, b) (f)->sdh.usebits[(b)/ELBITS]
-#define OFF(f, b) vm_assert(!GETBIT(f, b))
-#define ON(f, b) vm_assert(GETBIT(f, b))
+#define OFF(f, b) assert(!GETBIT(f, b))
+#define ON(f, b) assert(GETBIT(f, b))
#if SANITYCHECKS
#define SLABDATAWRITABLE(data, wr) do { \
- vm_assert(data->sdh.writable == WRITABLE_NONE); \
- vm_assert(wr != WRITABLE_NONE); \
+ assert(data->sdh.writable == WRITABLE_NONE); \
+ assert(wr != WRITABLE_NONE); \
vm_pagelock(data, 0); \
data->sdh.writable = wr; \
} while(0)
#define SLABDATAUNWRITABLE(data) do { \
- vm_assert(data->sdh.writable != WRITABLE_NONE); \
+ assert(data->sdh.writable != WRITABLE_NONE); \
data->sdh.writable = WRITABLE_NONE; \
vm_pagelock(data, 1); \
} while(0)
#define GETSLAB(b, s) { \
int i; \
- vm_assert((b) >= MINSIZE); \
+ assert((b) >= MINSIZE); \
i = (b) - MINSIZE; \
- vm_assert((i) < SLABSIZES); \
- vm_assert((i) >= 0); \
+ assert((i) < SLABSIZES); \
+ assert((i) >= 0); \
s = &slabs[i]; \
}
/* move head of list l1 to list of l2 in slabheader sl. */
#define MOVEHEAD(sl, l1, l2) { \
struct slabdata *t; \
- vm_assert(LH(sl,l1)); \
+ assert(LH(sl,l1)); \
REMOVEHEAD(sl, l1, t); \
ADDHEAD(t, sl, l2); \
}
#define REMOVEHEAD(sl, list, to) { \
struct slabdata *dat; \
dat = (to) = LH(sl, list); \
- vm_assert(dat); \
+ assert(dat); \
LH(sl, list) = dat->sdh.next; \
UNLINKNODE(dat); \
}
struct slabdata *n;
phys_bytes p;
- vm_assert(sizeof(*n) == VM_PAGE_SIZE);
+ assert(sizeof(*n) == VM_PAGE_SIZE);
if(!(n = vm_allocpage(&p, VMP_SLAB))) {
printf("newslabdata: vm_allocpage failed\n");
/* Retrieve entry in slabs[]. */
GETSLAB(bytes, s);
- vm_assert(s);
+ assert(s);
/* To make the common case more common, make space in the 'used'
* queue first.
}
SLABSANITYCHECK(SCL_DETAIL);
- vm_assert(s);
+ assert(s);
firstused = LH(s, LIST_USED);
- vm_assert(firstused);
+ assert(firstused);
#if SANITYCHECKS
- vm_assert(firstused->sdh.magic1 == MAGIC1);
- vm_assert(firstused->sdh.magic2 == MAGIC2);
+ assert(firstused->sdh.magic1 == MAGIC1);
+ assert(firstused->sdh.magic2 == MAGIC2);
#endif
- vm_assert(firstused->sdh.nused < ITEMSPERPAGE(bytes));
+ assert(firstused->sdh.nused < ITEMSPERPAGE(bytes));
for(i = firstused->sdh.freeguess;
count < ITEMSPERPAGE(bytes); count++, i++) {
nojunkwarning++;
slabunlock(ret, bytes);
nojunkwarning--;
- vm_assert(!nojunkwarning);
+ assert(!nojunkwarning);
*(u32_t *) ret = NOJUNK;
slablock(ret, bytes);
#endif
nojunkwarning++;
slablock(mem, bytes);
nojunkwarning--;
- vm_assert(!nojunkwarning);
+ assert(!nojunkwarning);
#endif
/* Free this data. */
/* Check if this slab changes lists. */
if(f->sdh.nused == 0) {
/* Now become FREE; must've been USED */
- vm_assert(f->sdh.list == LIST_USED);
+ assert(f->sdh.list == LIST_USED);
UNLINKNODE(f);
if(f == LH(s, LIST_USED))
LH(s, LIST_USED) = f->sdh.next;
SLABSANITYCHECK(SCL_DETAIL);
} else if(f->sdh.nused == ITEMSPERPAGE(bytes)-1) {
/* Now become USED; must've been FULL */
- vm_assert(f->sdh.list == LIST_FULL);
+ assert(f->sdh.list == LIST_FULL);
UNLINKNODE(f);
if(f == LH(s, LIST_FULL))
LH(s, LIST_FULL) = f->sdh.next;
SLABSANITYCHECK(SCL_DETAIL);
} else {
/* Stay USED */
- vm_assert(f->sdh.list == LIST_USED);
+ assert(f->sdh.list == LIST_USED);
}
SLABSANITYCHECK(SCL_FUNCTIONS);
#define ELEMENTS(a) (sizeof(a)/sizeof((a)[0]))
-#if SANITYCHECKS
-#define vm_assert(cond) { \
- if(vm_sanitychecklevel > 0 && !(cond)) { \
- printf("VM:%s:%d: vm_assert failed: %s\n", \
- __FILE__, __LINE__, #cond); \
- panic("vm_assert failed"); \
- } \
- }
-#else
-#define vm_assert(cond) ;
-#endif
-
#endif
#define MINSTACKREGION (64*1024*1024)
/* If so, this level: */
-#define SCL_NONE 0 /* No sanity checks - vm_assert()s only. */
+#define SCL_NONE 0 /* No sanity checks - assert()s only. */
#define SCL_TOP 1 /* Main loop and other high-level places. */
#define SCL_FUNCTIONS 2 /* Function entry/exit. */
#define SCL_DETAIL 3 /* Detailled steps. */