]> Zhao Yanbai Git Server - minix.git/commitdiff
vm: use assert() instead of vm_assert(); remove vm_assert().
authorBen Gras <ben@minix3.org>
Mon, 12 Apr 2010 12:37:28 +0000 (12:37 +0000)
committerBen Gras <ben@minix3.org>
Mon, 12 Apr 2010 12:37:28 +0000 (12:37 +0000)
13 files changed:
servers/vm/alloc.c
servers/vm/arch/i386/pagetable.c
servers/vm/arch/i386/vm.c
servers/vm/exec.c
servers/vm/fork.c
servers/vm/mmap.c
servers/vm/pagefaults.c
servers/vm/region.c
servers/vm/rs.c
servers/vm/sanitycheck.h
servers/vm/slaballoc.c
servers/vm/util.h
servers/vm/vm.h

index 8a903ae86bb238b0b0c2dc8481ee493e145e1e19..4e1cb65deb22ff5f9244f07e5412782bfcaef153 100644 (file)
@@ -49,8 +49,8 @@ addr_avl addravl;
 /* Used for sanity check. */
 PRIVATE phys_bytes mem_low, mem_high;
 #define assert_range(addr, len)                        \
-       vm_assert((addr) >= mem_low);                   \
-       vm_assert((addr) + (len) - 1 <= mem_high);
+       assert((addr) >= mem_low);                      \
+       assert((addr) + (len) - 1 <= mem_high);
 
 struct hole {
        struct hole *h_next;          /* pointer to next entry on the list */
@@ -104,7 +104,7 @@ int line;
   if(!(c)) { \
        printf("holes_sanity_f:%s:%d: %s failed\n", file, line, #c); \
        util_stacktrace();      \
-       panic("vm_assert failed"); } \
+       panic("assert failed"); } \
   }    
 
        int h, c = 0, n = 0;
@@ -184,7 +184,7 @@ PUBLIC phys_clicks alloc_mem(phys_clicks clicks, u32_t memflags)
   }
 
   if(vm_paged) {
-       vm_assert(CLICK_SIZE == VM_PAGE_SIZE);
+       assert(CLICK_SIZE == VM_PAGE_SIZE);
        mem = alloc_pages(clicks, memflags, NULL);
   } else {
 CHECKHOLES;
@@ -255,7 +255,7 @@ CHECKHOLES;
   if (clicks == 0) return;
 
   if(vm_paged) {
-       vm_assert(CLICK_SIZE == VM_PAGE_SIZE);
+       assert(CLICK_SIZE == VM_PAGE_SIZE);
        free_pages(base, clicks);
        return;
   }
@@ -408,10 +408,10 @@ PRIVATE void sanitycheck(void)
        addr_start_iter_least(&addravl, &iter);
        while((p=addr_get_iter(&iter))) {
                SLABSANE(p);
-               vm_assert(p->size > 0);
+               assert(p->size > 0);
                if(prevp) {
-                       vm_assert(prevp->addr < p->addr);
-                       vm_assert(prevp->addr + p->addr < p->addr);
+                       assert(prevp->addr < p->addr);
+                       assert(prevp->addr + p->addr < p->addr);
                }
                addr_incr_iter(&iter);
        }
@@ -471,7 +471,7 @@ PRIVATE PUBLIC phys_bytes alloc_pages(int pages, int memflags, phys_bytes *len)
 
        while((pr = addr_get_iter(&iter))) {
                SLABSANE(pr);
-               vm_assert(pr->size > 0);
+               assert(pr->size > 0);
                if(pr->size >= pages || (memflags & PAF_FIRSTBLOCK)) {
                        if(memflags & PAF_LOWER16MB) {
                                if(pr->addr + pages > boundary16)
@@ -509,7 +509,7 @@ PRIVATE PUBLIC phys_bytes alloc_pages(int pages, int memflags, phys_bytes *len)
        SLABSANE(pr);
 
        if(memflags & PAF_FIRSTBLOCK) {
-               vm_assert(len);
+               assert(len);
                /* block doesn't have to as big as requested;
                 * return its size though.
                 */
@@ -527,12 +527,12 @@ PRIVATE PUBLIC phys_bytes alloc_pages(int pages, int memflags, phys_bytes *len)
        /* Allocated chunk is off the end. */
        mem = pr->addr + pr->size - pages;
 
-       vm_assert(pr->size >= pages);
+       assert(pr->size >= pages);
        if(pr->size == pages) {
                pagerange_t *prr;
                prr = addr_remove(&addravl, pr->addr);
-               vm_assert(prr);
-               vm_assert(prr == pr);
+               assert(prr);
+               assert(prr == pr);
                SLABFREE(pr);
 #if SANITYCHECKS
                wantnodes--;
@@ -556,8 +556,8 @@ PRIVATE PUBLIC phys_bytes alloc_pages(int pages, int memflags, phys_bytes *len)
                printf("pages start: %d req: %d final: %d\n",
                        firstpages, pages, finalpages);
        }
-       vm_assert(finalnodes == wantnodes);
-       vm_assert(finalpages == wantpages);
+       assert(finalnodes == wantnodes);
+       assert(finalpages == wantpages);
 #endif
 
        return mem;
@@ -581,7 +581,7 @@ PRIVATE void free_pages(phys_bytes pageno, int npages)
        wantpages = firstpages + npages;
 #endif
 
-       vm_assert(!addr_search(&addravl, pageno, AVL_EQUAL));
+       assert(!addr_search(&addravl, pageno, AVL_EQUAL));
 
        /* try to merge with higher neighbour */
        if((pr=addr_search(&addravl, pageno+npages, AVL_EQUAL))) {
@@ -598,7 +598,7 @@ PRIVATE void free_pages(phys_bytes pageno, int npages)
 
                sanitycheck();
 #endif
-               vm_assert(npages > 0);
+               assert(npages > 0);
                USE(pr, pr->addr = pageno;
                         pr->size = npages;);
                addr_insert(&addravl, pr);
@@ -609,8 +609,8 @@ PRIVATE void free_pages(phys_bytes pageno, int npages)
 
        addr_start_iter(&addravl, &iter, pr->addr, AVL_EQUAL);
        p = addr_get_iter(&iter);
-       vm_assert(p);
-       vm_assert(p == pr);
+       assert(p);
+       assert(p == pr);
 
        addr_decr_iter(&iter);
        if((p = addr_get_iter(&iter))) {
@@ -630,8 +630,8 @@ PRIVATE void free_pages(phys_bytes pageno, int npages)
        memstats(&finalnodes, &finalpages,  &largest);
        sanitycheck();
 
-       vm_assert(finalnodes == wantnodes);
-       vm_assert(finalpages == wantpages);
+       assert(finalnodes == wantnodes);
+       assert(finalpages == wantpages);
 #endif
 }
 
@@ -864,9 +864,9 @@ int usedpages_add_f(phys_bytes addr, phys_bytes len, char *file, int line)
        if(!incheck)
                return OK;
 
-       vm_assert(!(addr % VM_PAGE_SIZE));
-       vm_assert(!(len % VM_PAGE_SIZE));
-       vm_assert(len > 0);
+       assert(!(addr % VM_PAGE_SIZE));
+       assert(!(len % VM_PAGE_SIZE));
+       assert(len > 0);
        assert_range(addr, len);
 
        pagestart = addr / VM_PAGE_SIZE;
@@ -874,8 +874,8 @@ int usedpages_add_f(phys_bytes addr, phys_bytes len, char *file, int line)
 
        while(pages > 0) {
                phys_bytes thisaddr;
-               vm_assert(pagestart > 0);
-               vm_assert(pagestart < MAXPAGES);
+               assert(pagestart > 0);
+               assert(pagestart < MAXPAGES);
                thisaddr = pagestart * VM_PAGE_SIZE;
                if(GET_BIT(pagemap, pagestart)) {
                        int i;
@@ -901,8 +901,8 @@ struct memlist *alloc_mem_in_list(phys_bytes bytes, u32_t flags)
        phys_bytes rempages;
        struct memlist *head = NULL, *ml;
 
-       vm_assert(bytes > 0);
-       vm_assert(!(bytes % VM_PAGE_SIZE));
+       assert(bytes > 0);
+       assert(!(bytes % VM_PAGE_SIZE));
 
        rempages = bytes / VM_PAGE_SIZE;
 
@@ -923,8 +923,8 @@ struct memlist *alloc_mem_in_list(phys_bytes bytes, u32_t flags)
                        return NULL;
                }
 
-               vm_assert(gotpages <= rempages);
-               vm_assert(gotpages > 0);
+               assert(gotpages <= rempages);
+               assert(gotpages > 0);
 
                if(!(SLABALLOC(ml))) {
                        free_mem_list(head, 1);
@@ -941,8 +941,8 @@ struct memlist *alloc_mem_in_list(phys_bytes bytes, u32_t flags)
        } while(rempages > 0);
 
        for(ml = head; ml; ml = ml->next) {
-               vm_assert(ml->phys);
-               vm_assert(ml->length);
+               assert(ml->phys);
+               assert(ml->length);
        }
 
        return head;
@@ -956,8 +956,8 @@ void free_mem_list(struct memlist *list, int all)
        while(list) {
                struct memlist *next;
                next = list->next;
-               vm_assert(!(list->phys % VM_PAGE_SIZE));
-               vm_assert(!(list->length % VM_PAGE_SIZE));
+               assert(!(list->phys % VM_PAGE_SIZE));
+               assert(!(list->length % VM_PAGE_SIZE));
                if(all)
                        free_pages(list->phys / VM_PAGE_SIZE,
                        list->length / VM_PAGE_SIZE);
@@ -972,7 +972,7 @@ void free_mem_list(struct memlist *list, int all)
 void print_mem_list(struct memlist *list)
 {
        while(list) {
-               vm_assert(list->length > 0);
+               assert(list->length > 0);
                printf("0x%lx-0x%lx", list->phys, list->phys+list->length-1);
                printf(" ");
                list = list->next;
index 5946b8986270724e52b3ea5f1cd344b9f7564bb5..cc3e8f776c968877c3da641cf50b11abf213da8f 100644 (file)
@@ -160,10 +160,10 @@ PRIVATE u32_t findhole(pt_t *pt, u32_t vmin, u32_t vmax)
        static u32_t lastv = 0;
 
        /* Input sanity check. */
-       vm_assert(vmin + I386_PAGE_SIZE >= vmin);
-       vm_assert(vmax >= vmin + I386_PAGE_SIZE);
-       vm_assert((vmin % I386_PAGE_SIZE) == 0);
-       vm_assert((vmax % I386_PAGE_SIZE) == 0);
+       assert(vmin + I386_PAGE_SIZE >= vmin);
+       assert(vmax >= vmin + I386_PAGE_SIZE);
+       assert((vmin % I386_PAGE_SIZE) == 0);
+       assert((vmax % I386_PAGE_SIZE) == 0);
 
 #if SANITYCHECKS
        curv = ((u32_t) random()) % ((vmax - vmin)/I386_PAGE_SIZE);
@@ -180,8 +180,8 @@ PRIVATE u32_t findhole(pt_t *pt, u32_t vmin, u32_t vmax)
        while(curv < vmax) {
                int pte;
 
-               vm_assert(curv >= vmin);
-               vm_assert(curv < vmax);
+               assert(curv >= vmin);
+               assert(curv < vmax);
 
                pde = I386_VM_PDE(curv);
                pte = I386_VM_PTE(curv);
@@ -210,10 +210,10 @@ PRIVATE u32_t findhole(pt_t *pt, u32_t vmin, u32_t vmax)
  *===========================================================================*/
 PRIVATE void vm_freepages(vir_bytes vir, vir_bytes phys, int pages, int reason)
 {
-       vm_assert(reason >= 0 && reason < VMP_CATEGORIES);
+       assert(reason >= 0 && reason < VMP_CATEGORIES);
        if(vir >= vmprocess->vm_stacktop) {
-               vm_assert(!(vir % I386_PAGE_SIZE)); 
-               vm_assert(!(phys % I386_PAGE_SIZE)); 
+               assert(!(vir % I386_PAGE_SIZE)); 
+               assert(!(phys % I386_PAGE_SIZE)); 
                free_mem(ABS2CLICK(phys), pages);
                if(pt_writemap(&vmprocess->vm_pt, arch_vir2map(vmprocess, vir),
                        MAP_NONE, pages*I386_PAGE_SIZE, 0, WMF_OVERWRITE) != OK)
@@ -239,7 +239,7 @@ PRIVATE void vm_freepages(vir_bytes vir, vir_bytes phys, int pages, int reason)
 PRIVATE void *vm_getsparepage(u32_t *phys)
 {
        int s;
-       vm_assert(missing_spares >= 0 && missing_spares <= SPAREPAGES);
+       assert(missing_spares >= 0 && missing_spares <= SPAREPAGES);
        for(s = 0; s < SPAREPAGES; s++) {
                if(sparepages[s].page) {
                        void *sp;
@@ -247,7 +247,7 @@ PRIVATE void *vm_getsparepage(u32_t *phys)
                        *phys = sparepages[s].phys;
                        sparepages[s].page = NULL;
                        missing_spares++;
-                       vm_assert(missing_spares >= 0 && missing_spares <= SPAREPAGES);
+                       assert(missing_spares >= 0 && missing_spares <= SPAREPAGES);
                        return sp;
                }
        }
@@ -261,15 +261,15 @@ PRIVATE void *vm_checkspares(void)
 {
        int s, n = 0;
        static int total = 0, worst = 0;
-       vm_assert(missing_spares >= 0 && missing_spares <= SPAREPAGES);
+       assert(missing_spares >= 0 && missing_spares <= SPAREPAGES);
        for(s = 0; s < SPAREPAGES && missing_spares > 0; s++)
            if(!sparepages[s].page) {
                n++;
                if((sparepages[s].page = vm_allocpage(&sparepages[s].phys, 
                        VMP_SPARE))) {
                        missing_spares--;
-                       vm_assert(missing_spares >= 0);
-                       vm_assert(missing_spares <= SPAREPAGES);
+                       assert(missing_spares >= 0);
+                       assert(missing_spares <= SPAREPAGES);
                } else {
                        printf("VM: warning: couldn't get new spare page\n");
                }
@@ -294,12 +294,12 @@ PUBLIC void *vm_allocpage(phys_bytes *phys, int reason)
        void *ret;
 
        pt = &vmprocess->vm_pt;
-       vm_assert(reason >= 0 && reason < VMP_CATEGORIES);
+       assert(reason >= 0 && reason < VMP_CATEGORIES);
 
        level++;
 
-       vm_assert(level >= 1);
-       vm_assert(level <= 2);
+       assert(level >= 1);
+       assert(level <= 2);
 
        if(level > 1 || !(vmprocess->vm_flags & VMF_HASPT) || !meminit_done) {
                int r;
@@ -370,7 +370,7 @@ PUBLIC void vm_pagelock(void *vir, int lockflag)
        pt = &vmprocess->vm_pt;
        m = arch_vir2map(vmprocess, (vir_bytes) vir);
 
-       vm_assert(!(m % I386_PAGE_SIZE));
+       assert(!(m % I386_PAGE_SIZE));
 
        if(!lockflag)
                flags |= I386_VM_WRITE;
@@ -443,14 +443,14 @@ PRIVATE int pt_ptalloc(pt_t *pt, int pde, u32_t flags)
        u32_t pt_phys;
 
        /* Argument must make sense. */
-       vm_assert(pde >= 0 && pde < I386_VM_DIR_ENTRIES);
-       vm_assert(!(flags & ~(PTF_ALLFLAGS)));
+       assert(pde >= 0 && pde < I386_VM_DIR_ENTRIES);
+       assert(!(flags & ~(PTF_ALLFLAGS)));
 
        /* We don't expect to overwrite page directory entry, nor
         * storage for the page table.
         */
-       vm_assert(!(pt->pt_dir[pde] & I386_VM_PRESENT));
-       vm_assert(!pt->pt_pt[pde]);
+       assert(!(pt->pt_dir[pde] & I386_VM_PRESENT));
+       assert(!pt->pt_pt[pde]);
 
        /* Get storage for the page table. */
         if(!(pt->pt_pt[pde] = vm_allocpage(&pt_phys, VMP_PAGETABLE)))
@@ -510,8 +510,8 @@ PUBLIC int pt_writemap(pt_t *pt, vir_bytes v, phys_bytes physaddr,
        if(writemapflags & WMF_VERIFY)
                verify = 1;
 
-       vm_assert(!(bytes % I386_PAGE_SIZE));
-       vm_assert(!(flags & ~(PTF_ALLFLAGS)));
+       assert(!(bytes % I386_PAGE_SIZE));
+       assert(!(flags & ~(PTF_ALLFLAGS)));
 
        pages = bytes / I386_PAGE_SIZE;
 
@@ -519,8 +519,8 @@ PUBLIC int pt_writemap(pt_t *pt, vir_bytes v, phys_bytes physaddr,
         * what's actually written into the PTE if I386_VM_PRESENT
         * isn't on, so we can just write MAP_NONE into it.
         */
-       vm_assert(physaddr == MAP_NONE || (flags & I386_VM_PRESENT));
-       vm_assert(physaddr != MAP_NONE || !flags);
+       assert(physaddr == MAP_NONE || (flags & I386_VM_PRESENT));
+       assert(physaddr != MAP_NONE || !flags);
 
        finalpde = I386_VM_PDE(v + I386_PAGE_SIZE * pages);
 
@@ -530,15 +530,15 @@ PUBLIC int pt_writemap(pt_t *pt, vir_bytes v, phys_bytes physaddr,
         * sized leaps.
         */
        for(pdecheck = I386_VM_PDE(v); pdecheck <= finalpde; pdecheck++) {
-               vm_assert(pdecheck >= 0 && pdecheck < I386_VM_DIR_ENTRIES);
-               vm_assert(!(pt->pt_dir[pdecheck] & I386_VM_BIGPAGE));
+               assert(pdecheck >= 0 && pdecheck < I386_VM_DIR_ENTRIES);
+               assert(!(pt->pt_dir[pdecheck] & I386_VM_BIGPAGE));
                if(!(pt->pt_dir[pdecheck] & I386_VM_PRESENT)) {
                        int r;
                        if(verify) {
                                printf("pt_writemap verify: no pde %d\n", pdecheck);
                                return EFAULT;
                        }
-                       vm_assert(!pt->pt_dir[pdecheck]);
+                       assert(!pt->pt_dir[pdecheck]);
                        if((r=pt_ptalloc(pt, pdecheck, flags)) != OK) {
                                /* Couldn't do (complete) mapping.
                                 * Don't bother freeing any previously
@@ -551,7 +551,7 @@ PUBLIC int pt_writemap(pt_t *pt, vir_bytes v, phys_bytes physaddr,
                                return r;
                        }
                }
-               vm_assert(pt->pt_dir[pdecheck] & I386_VM_PRESENT);
+               assert(pt->pt_dir[pdecheck] & I386_VM_PRESENT);
        }
 
        /* Now write in them. */
@@ -560,23 +560,23 @@ PUBLIC int pt_writemap(pt_t *pt, vir_bytes v, phys_bytes physaddr,
                int pde = I386_VM_PDE(v);
                int pte = I386_VM_PTE(v);
 
-               vm_assert(!(v % I386_PAGE_SIZE));
-               vm_assert(pte >= 0 && pte < I386_VM_PT_ENTRIES);
-               vm_assert(pde >= 0 && pde < I386_VM_DIR_ENTRIES);
+               assert(!(v % I386_PAGE_SIZE));
+               assert(pte >= 0 && pte < I386_VM_PT_ENTRIES);
+               assert(pde >= 0 && pde < I386_VM_DIR_ENTRIES);
 
                /* Page table has to be there. */
-               vm_assert(pt->pt_dir[pde] & I386_VM_PRESENT);
+               assert(pt->pt_dir[pde] & I386_VM_PRESENT);
 
                /* Make sure page directory entry for this page table
                 * is marked present and page table entry is available.
                 */
-               vm_assert((pt->pt_dir[pde] & I386_VM_PRESENT));
-               vm_assert(pt->pt_pt[pde]);
+               assert((pt->pt_dir[pde] & I386_VM_PRESENT));
+               assert(pt->pt_pt[pde]);
 
 #if SANITYCHECKS
                /* We don't expect to overwrite a page. */
                if(!(writemapflags & (WMF_OVERWRITE|WMF_VERIFY)))
-                       vm_assert(!(pt->pt_pt[pde][pte] & I386_VM_PRESENT));
+                       assert(!(pt->pt_pt[pde][pte] & I386_VM_PRESENT));
 #endif
                if(writemapflags & (WMF_WRITEFLAGSONLY|WMF_FREE)) {
                        physaddr = pt->pt_pt[pde][pte] & I386_VM_ADDR_MASK;
@@ -610,7 +610,7 @@ PUBLIC int pt_writemap(pt_t *pt, vir_bytes v, phys_bytes physaddr,
                } else {
                        /* Write pagetable entry. */
 #if SANITYCHECKS
-                       vm_assert(vm_addrok(pt->pt_pt[pde], 1));
+                       assert(vm_addrok(pt->pt_pt[pde], 1));
 #endif
                        pt->pt_pt[pde][pte] = entry;
                }
@@ -630,7 +630,7 @@ PUBLIC int pt_checkrange(pt_t *pt, vir_bytes v,  size_t bytes,
 {
        int p, pages, pde;
 
-       vm_assert(!(bytes % I386_PAGE_SIZE));
+       assert(!(bytes % I386_PAGE_SIZE));
 
        pages = bytes / I386_PAGE_SIZE;
 
@@ -639,9 +639,9 @@ PUBLIC int pt_checkrange(pt_t *pt, vir_bytes v,  size_t bytes,
                int pde = I386_VM_PDE(v);
                int pte = I386_VM_PTE(v);
 
-               vm_assert(!(v % I386_PAGE_SIZE));
-               vm_assert(pte >= 0 && pte < I386_VM_PT_ENTRIES);
-               vm_assert(pde >= 0 && pde < I386_VM_DIR_ENTRIES);
+               assert(!(v % I386_PAGE_SIZE));
+               assert(pte >= 0 && pte < I386_VM_PT_ENTRIES);
+               assert(pde >= 0 && pde < I386_VM_DIR_ENTRIES);
 
                /* Page table has to be there. */
                if(!(pt->pt_dir[pde] & I386_VM_PRESENT))
@@ -650,7 +650,7 @@ PUBLIC int pt_checkrange(pt_t *pt, vir_bytes v,  size_t bytes,
                /* Make sure page directory entry for this page table
                 * is marked present and page table entry is available.
                 */
-               vm_assert((pt->pt_dir[pde] & I386_VM_PRESENT) && pt->pt_pt[pde]);
+               assert((pt->pt_dir[pde] & I386_VM_PRESENT) && pt->pt_pt[pde]);
 
                if(!(pt->pt_pt[pde][pte] & I386_VM_PRESENT)) {
                        return EFAULT;
@@ -767,14 +767,14 @@ PUBLIC void pt_init(phys_bytes usedlimit)
         hi = CLICK2ABS(vmprocess->vm_arch.vm_seg[S].mem_phys +
                 vmprocess->vm_arch.vm_seg[S].mem_len);
                   
-        vm_assert(!(lo % I386_PAGE_SIZE)); 
-        vm_assert(!(hi % I386_PAGE_SIZE));
+        assert(!(lo % I386_PAGE_SIZE)); 
+        assert(!(hi % I386_PAGE_SIZE));
  
         if(lo < VM_PROCSTART) {
                 moveup = VM_PROCSTART - lo;
-                vm_assert(!(VM_PROCSTART % I386_PAGE_SIZE));
-                vm_assert(!(lo % I386_PAGE_SIZE));
-                vm_assert(!(moveup % I386_PAGE_SIZE));
+                assert(!(VM_PROCSTART % I386_PAGE_SIZE));
+                assert(!(lo % I386_PAGE_SIZE));
+                assert(!(moveup % I386_PAGE_SIZE));
         }
         
         /* Make new page table for ourselves, partly copied
@@ -925,17 +925,17 @@ PUBLIC int pt_bind(pt_t *pt, struct vmproc *who)
        u32_t phys;
 
        /* Basic sanity checks. */
-       vm_assert(who);
-       vm_assert(who->vm_flags & VMF_INUSE);
-       vm_assert(pt);
+       assert(who);
+       assert(who->vm_flags & VMF_INUSE);
+       assert(pt);
 
        slot = who->vm_slot;
-       vm_assert(slot >= 0);
-       vm_assert(slot < ELEMENTS(vmproc));
-       vm_assert(slot < I386_VM_PT_ENTRIES);
+       assert(slot >= 0);
+       assert(slot < ELEMENTS(vmproc));
+       assert(slot < I386_VM_PT_ENTRIES);
 
        phys = pt->pt_dir_phys & I386_VM_ADDR_MASK;
-       vm_assert(pt->pt_dir_phys == phys);
+       assert(pt->pt_dir_phys == phys);
 
        /* Update "page directory pagetable." */
        page_directories[slot] = phys | I386_VM_PRESENT|I386_VM_WRITE;
@@ -972,14 +972,14 @@ PUBLIC int pt_mapkernel(pt_t *pt)
        int r, i;
 
         /* Any i386 page table needs to map in the kernel address space. */
-        vm_assert(vmproc[VMP_SYSTEM].vm_flags & VMF_INUSE);
+        assert(vmproc[VMP_SYSTEM].vm_flags & VMF_INUSE);
 
        if(bigpage_ok) {
                int pde;
                for(pde = 0; pde <= id_map_high_pde; pde++) {
                        phys_bytes addr;
                        addr = pde * I386_BIG_PAGE_SIZE;
-                       vm_assert((addr & I386_VM_ADDR_MASK) == addr);
+                       assert((addr & I386_VM_ADDR_MASK) == addr);
                        pt->pt_dir[pde] = addr | I386_VM_PRESENT |
                                I386_VM_BIGPAGE | I386_VM_USER |
                                I386_VM_WRITE | global_bit;
index 804ea334bf11b40a8777106753e5ebb087914549..16079e6dad0625c05237e9c5eb4295ccb87e9490 100644 (file)
@@ -36,7 +36,7 @@ PUBLIC vir_bytes arch_map2vir(struct vmproc *vmp, vir_bytes addr)
        vir_bytes datastart = CLICK2ABS(vmp->vm_arch.vm_seg[D].mem_phys);
 
        /* Could be a text address. */
-       vm_assert(datastart <= addr || textstart <= addr);
+       assert(datastart <= addr || textstart <= addr);
 
        return addr - datastart;
 }
index 5bcac2c0871b0cfd2de4f416871f4cbe841a6e6d..7c8465f630830dd2687a412ca482281e2dd22826 100644 (file)
@@ -193,7 +193,7 @@ vir_bytes *stack_top;               /* top of process stack */
   /* No need to allocate text if it can be shared. */
   if (sh_mp != NULL) {
        text_bytes = 0;
-       vm_assert(!vm_paged);
+       assert(!vm_paged);
   }
 
   /* Acquire the new memory.  Each of the 4 parts: text, (data+bss), gap,
@@ -224,7 +224,7 @@ SANITYCHECK(SCL_DETAIL);
          rmp->vm_flags &= ~VMF_HASPT;
          pt_free(&rmp->vm_pt);
   }
-  vm_assert(!(vmpold->vm_flags & VMF_INUSE));
+  assert(!(vmpold->vm_flags & VMF_INUSE));
   *vmpold = *rmp;      /* copy current state. */
   rmp->vm_regions = NULL; /* exec()ing process regions thrown out. */
 SANITYCHECK(SCL_DETAIL);
@@ -385,11 +385,11 @@ PUBLIC phys_bytes find_kernel_top(void)
        u32_t kernel_top = 0;
 #define MEMTOP(v, i) \
   (vmproc[v].vm_arch.vm_seg[i].mem_phys + vmproc[v].vm_arch.vm_seg[i].mem_len)
-       vm_assert(vmproc[VMP_SYSTEM].vm_flags & VMF_INUSE);
+       assert(vmproc[VMP_SYSTEM].vm_flags & VMF_INUSE);
        kernel_top = MEMTOP(VMP_SYSTEM, T);
        kernel_top = MAX(kernel_top, MEMTOP(VMP_SYSTEM, D));
        kernel_top = MAX(kernel_top, MEMTOP(VMP_SYSTEM, S));
-       vm_assert(kernel_top);
+       assert(kernel_top);
 
        return CLICK2ABS(kernel_top);
 }
@@ -414,14 +414,14 @@ PUBLIC int proc_new(struct vmproc *vmp,
        int prealloc;
        struct vir_region *reg;
 
-       vm_assert(!(vstart % VM_PAGE_SIZE));
-       vm_assert(!(text_bytes % VM_PAGE_SIZE));
-       vm_assert(!(data_bytes % VM_PAGE_SIZE));
-       vm_assert(!(stack_bytes % VM_PAGE_SIZE));
-       vm_assert(!(gap_bytes % VM_PAGE_SIZE));
-       vm_assert(!(text_start % VM_PAGE_SIZE));
-       vm_assert(!(data_start % VM_PAGE_SIZE));
-       vm_assert((!text_start && !data_start) || (text_start && data_start));
+       assert(!(vstart % VM_PAGE_SIZE));
+       assert(!(text_bytes % VM_PAGE_SIZE));
+       assert(!(data_bytes % VM_PAGE_SIZE));
+       assert(!(stack_bytes % VM_PAGE_SIZE));
+       assert(!(gap_bytes % VM_PAGE_SIZE));
+       assert(!(text_start % VM_PAGE_SIZE));
+       assert(!(data_start % VM_PAGE_SIZE));
+       assert((!text_start && !data_start) || (text_start && data_start));
 
        /* Place text at start of process. */
        vmp->vm_arch.vm_seg[T].mem_phys = ABS2CLICK(vstart);
index 2ab6dac957b12610a1140a9c03c84b22db27d53a..aeafa0cbc6dff072fce48795c13f86f359872d5f 100644 (file)
@@ -56,7 +56,7 @@ PUBLIC int do_fork(message *msg)
 
   vmp = &vmproc[proc];         /* parent */
   vmc = &vmproc[childproc];    /* child */
-  vm_assert(vmc->vm_slot == childproc);
+  assert(vmc->vm_slot == childproc);
 
   if(vmp->vm_flags & VMF_HAS_DMA) {
        printf("VM: %d has DMA memory and may not fork\n", msg->VMF_ENDPOINT);
@@ -96,7 +96,7 @@ PUBLIC int do_fork(message *msg)
 
        if(vmp->vm_heap) {
                vmc->vm_heap = map_region_lookup_tag(vmc, VRT_HEAP);
-               vm_assert(vmc->vm_heap);
+               assert(vmc->vm_heap);
        }
 
        SANITYCHECK(SCL_DETAIL);
@@ -149,10 +149,10 @@ PUBLIC int do_fork(message *msg)
 
        if(!(heap = map_region_lookup_tag(vmc, VRT_HEAP)))
                panic("couldn't lookup heap");
-       vm_assert(heap->phys);
+       assert(heap->phys);
        if(!(stack = map_region_lookup_tag(vmc, VRT_STACK)))
                panic("couldn't lookup stack");
-       vm_assert(stack->phys);
+       assert(stack->phys);
 
        /* Now copy the memory regions. */
 
@@ -160,7 +160,7 @@ PUBLIC int do_fork(message *msg)
                struct vir_region *text;
                if(!(text = map_region_lookup_tag(vmc, VRT_TEXT)))
                        panic("couldn't lookup text");
-               vm_assert(text->phys);
+               assert(text->phys);
                if(copy_abs2region(CLICK2ABS(vmp->vm_arch.vm_seg[T].mem_phys),
                        text, 0, text_bytes) != OK)
                                panic("couldn't copy text");
index 8a5507ca2b4b764ecd07b190866ce15728744b7e..c85847a6dfd1337f8568486e366fa37509bae9a5 100644 (file)
@@ -82,7 +82,7 @@ PUBLIC int do_mmap(message *m)
        }
 
        /* Return mapping, as seen from process. */
-       vm_assert(vr);
+       assert(vr);
        m->VMM_RETADDR = arch_map2vir(vmp, vr->vaddr);
 
 
index 4cac55aa47604164c4d35282a3dd3c2ce296c0e5..3624d5f89d2e8b7fbdf0ad82a94691736f17cec8 100644 (file)
@@ -67,11 +67,11 @@ PUBLIC void do_pagefaults(void)
                        panic("do_pagefaults: endpoint wrong: %d", ep);
 
                vmp = &vmproc[p];
-               vm_assert(vmp->vm_flags & VMF_INUSE);
+               assert(vmp->vm_flags & VMF_INUSE);
 
                /* See if address is valid at all. */
                if(!(region = map_lookup(vmp, addr))) {
-                       vm_assert(PFERR_NOPAGE(err));
+                       assert(PFERR_NOPAGE(err));
                        printf("VM: pagefault: SIGSEGV %d bad addr 0x%lx %s\n", 
                                ep, arch_map2vir(vmp, addr), pf_errstr(err));
                        if((s=sys_kill(vmp->vm_endpoint, SIGSEGV)) != OK)
@@ -84,12 +84,12 @@ PUBLIC void do_pagefaults(void)
                /* Make sure this isn't a region that isn't supposed
                 * to cause pagefaults.
                 */
-               vm_assert(!(region->flags & VR_NOPF));
+               assert(!(region->flags & VR_NOPF));
 
                /* We do not allow shared memory to cause pagefaults.
                 * These pages have to be pre-allocated.
                 */
-               vm_assert(!(region->flags & VR_SHARED));
+               assert(!(region->flags & VR_SHARED));
 
                /* If process was writing, see if it's writable. */
                if(!(region->flags & VR_WRITABLE) && wr) {
@@ -102,7 +102,7 @@ PUBLIC void do_pagefaults(void)
                        continue;
                }
 
-               vm_assert(addr >= region->vaddr);
+               assert(addr >= region->vaddr);
                offset = addr - region->vaddr;
 
                /* Access is allowed; handle it. */
@@ -195,9 +195,9 @@ int handle_memory(struct vmproc *vmp, vir_bytes mem, vir_bytes len, int wrflag)
                        r = EFAULT;
                } else {
                        vir_bytes offset, sublen;
-                       vm_assert(region->vaddr <= mem);
-                       vm_assert(!(region->flags & VR_NOPF));
-                       vm_assert(!(region->vaddr % VM_PAGE_SIZE));
+                       assert(region->vaddr <= mem);
+                       assert(!(region->flags & VR_NOPF));
+                       assert(!(region->vaddr % VM_PAGE_SIZE));
                        offset = mem - region->vaddr;
                        sublen = len;
                        if(offset + sublen > region->length)
index 4dda6609ed8c55fda582250b318aa9bb4489565d..b3f2b35650bbd1a8b64ca0847b07375561b92002 100644 (file)
@@ -242,10 +242,10 @@ PRIVATE int map_ph_writept(struct vmproc *vmp, struct vir_region *vr,
        int rw;
        struct phys_block *pb = pr->ph;
 
-       vm_assert(!(vr->vaddr % VM_PAGE_SIZE));
-       vm_assert(!(pb->length % VM_PAGE_SIZE));
-       vm_assert(!(pr->offset % VM_PAGE_SIZE));
-       vm_assert(pb->refcount > 0);
+       assert(!(vr->vaddr % VM_PAGE_SIZE));
+       assert(!(pb->length % VM_PAGE_SIZE));
+       assert(!(pr->offset % VM_PAGE_SIZE));
+       assert(pb->refcount > 0);
 
        if(WRITABLE(vr, pb))
                rw = PTF_WRITE;
@@ -283,10 +283,10 @@ PRIVATE vir_bytes region_find_slot(struct vmproc *vmp,
        SANITYCHECK(SCL_FUNCTIONS);
 
        /* We must be in paged mode to be able to do this. */
-       vm_assert(vm_paged);
+       assert(vm_paged);
 
        /* Length must be reasonable. */
-       vm_assert(length > 0);
+       assert(length > 0);
 
        /* Special case: allow caller to set maxv to 0 meaning 'I want
         * it to be mapped in right here.'
@@ -304,13 +304,13 @@ PRIVATE vir_bytes region_find_slot(struct vmproc *vmp,
         }
 
        /* Basic input sanity checks. */
-       vm_assert(!(length % VM_PAGE_SIZE));
+       assert(!(length % VM_PAGE_SIZE));
        if(minv >= maxv) {
                printf("VM: 1 minv: 0x%lx maxv: 0x%lx length: 0x%lx\n",
                        minv, maxv, length);
        }
-       vm_assert(minv < maxv);
-       vm_assert(minv + length <= maxv);
+       assert(minv < maxv);
+       assert(minv + length <= maxv);
 
 #define FREEVRANGE(rangestart, rangeend, foundcode) {          \
        vir_bytes frstart = (rangestart), frend = (rangeend);   \
@@ -342,13 +342,13 @@ PRIVATE vir_bytes region_find_slot(struct vmproc *vmp,
        }
 
 #if SANITYCHECKS
-       if(prevregion) vm_assert(prevregion->vaddr < startv);
+       if(prevregion) assert(prevregion->vaddr < startv);
 #endif
 
        /* However we got it, startv must be in the requested range. */
-       vm_assert(startv >= minv);
-       vm_assert(startv < maxv);
-       vm_assert(startv + length <= maxv);
+       assert(startv >= minv);
+       assert(startv < maxv);
+       assert(startv + length <= maxv);
 
        if (prev)
                *prev = prevregion;
@@ -373,7 +373,7 @@ int mapflags;
        struct phys_region *ph;
        physr_avl *phavl;
 
-       vm_assert(!(length % VM_PAGE_SIZE));
+       assert(!(length % VM_PAGE_SIZE));
 
        SANITYCHECK(SCL_FUNCTIONS);
 
@@ -407,10 +407,10 @@ USE(newregion,
 
        /* If we know what we're going to map to, map it right away. */
        if(what != MAP_NONE) {
-               vm_assert(what);        /* mapping in 0 is unlikely to be right */
-               vm_assert(!(what % VM_PAGE_SIZE));
-               vm_assert(!(startv % VM_PAGE_SIZE));
-               vm_assert(!(mapflags & MF_PREALLOC));
+               assert(what);   /* mapping in 0 is unlikely to be right */
+               assert(!(what % VM_PAGE_SIZE));
+               assert(!(startv % VM_PAGE_SIZE));
+               assert(!(mapflags & MF_PREALLOC));
                if(map_new_physblock(vmp, newregion, 0, length,
                        what, PAF_CLEAR, 0) != OK) {
                        printf("VM: map_new_physblock failed\n");
@@ -433,7 +433,7 @@ USE(newregion,
 
        /* Link it. */
        if(prevregion) {
-               vm_assert(prevregion->vaddr < newregion->vaddr);
+               assert(prevregion->vaddr < newregion->vaddr);
                USE(newregion, newregion->next = prevregion->next;);
                USE(prevregion, prevregion->next = newregion;);
        } else {
@@ -442,9 +442,9 @@ USE(newregion,
        }
 
 #if SANITYCHECKS
-       vm_assert(startv == newregion->vaddr);
+       assert(startv == newregion->vaddr);
        if(newregion->next) {
-               vm_assert(newregion->vaddr < newregion->next->vaddr);
+               assert(newregion->vaddr < newregion->next->vaddr);
        }
 #endif
 
@@ -462,9 +462,9 @@ PUBLIC void pb_unreferenced(struct vir_region *region, struct phys_region *pr)
        int remap = 0;
 
        pb = pr->ph;
-       vm_assert(pb->refcount > 0);
+       assert(pb->refcount > 0);
        USE(pb, pb->refcount--;);
-       vm_assert(pb->refcount >= 0);
+       assert(pb->refcount >= 0);
 
        if(pb->firstregion == pr) {
                USE(pb, pb->firstregion = pr->next_ph_list;);
@@ -473,18 +473,18 @@ PUBLIC void pb_unreferenced(struct vir_region *region, struct phys_region *pr)
 
                for(others = pb->firstregion; others;
                        others = others->next_ph_list) {
-                       vm_assert(others->ph == pb);
+                       assert(others->ph == pb);
                        if(others->next_ph_list == pr) {
                                USE(others, others->next_ph_list = pr->next_ph_list;);
                                break;
                        }
                }
 
-               vm_assert(others); /* Otherwise, wasn't on the list. */
+               assert(others); /* Otherwise, wasn't on the list. */
        }
 
        if(pb->refcount == 0) {
-               vm_assert(!pb->firstregion);
+               assert(!pb->firstregion);
                if(region->flags & VR_ANON) {
                        free_mem(ABS2CLICK(pb->phys),
                                ABS2CLICK(pb->length));
@@ -508,7 +508,7 @@ PUBLIC void pb_unreferenced(struct vir_region *region, struct phys_region *pr)
                        } 
                        n++;
                }
-               vm_assert(n == pb->refcount);
+               assert(n == pb->refcount);
        }
 }
 
@@ -519,8 +519,8 @@ PRIVATE struct phys_region *reset_physr_iter(struct vir_region *region,
 
        physr_start_iter(region->phys, iter, offset, AVL_EQUAL);
        ph = physr_get_iter(iter);
-       vm_assert(ph);
-       vm_assert(ph->offset == offset);
+       assert(ph);
+       assert(ph->offset == offset);
 
        return ph;
 }
@@ -546,7 +546,7 @@ PRIVATE int map_subfree(struct vmproc *vmp,
 
                for(others = pb->firstregion; others;
                        others = others->next_ph_list) {
-                       vm_assert(others->ph == pb);
+                       assert(others->ph == pb);
                }
                physr_incr_iter(&iter);
        }
@@ -565,19 +565,19 @@ PRIVATE int map_subfree(struct vmproc *vmp,
                        SLABFREE(pr);
                } else {
                        vir_bytes sublen;
-                       vm_assert(len > pr->offset);
-                       vm_assert(len < pr->offset + pr->ph->length);
-                       vm_assert(pr->ph->refcount > 0);
+                       assert(len > pr->offset);
+                       assert(len < pr->offset + pr->ph->length);
+                       assert(pr->ph->refcount > 0);
                        sublen = len - pr->offset;
-                       vm_assert(!(sublen % VM_PAGE_SIZE));
-                       vm_assert(sublen < pr->ph->length);
+                       assert(!(sublen % VM_PAGE_SIZE));
+                       assert(sublen < pr->ph->length);
                        if(pr->ph->refcount > 1) {
                                int r;
                                if(!(pr = map_clone_ph_block(vmp, region,
                                        pr, &iter)))
                                        return ENOMEM;
                        }
-                       vm_assert(pr->ph->refcount == 1);
+                       assert(pr->ph->refcount == 1);
                        if(!(region->flags & VR_DIRECT)) {
                                free_mem(ABS2CLICK(pr->ph->phys), ABS2CLICK(sublen));
                        }
@@ -585,9 +585,9 @@ PRIVATE int map_subfree(struct vmproc *vmp,
                        USE(pr->ph,
                                pr->ph->phys += sublen;
                                pr->ph->length -= sublen;);
-                       vm_assert(!(pr->offset % VM_PAGE_SIZE));
-                       vm_assert(!(pr->ph->phys % VM_PAGE_SIZE));
-                       vm_assert(!(pr->ph->length % VM_PAGE_SIZE));
+                       assert(!(pr->offset % VM_PAGE_SIZE));
+                       assert(!(pr->ph->phys % VM_PAGE_SIZE));
+                       assert(!(pr->ph->length % VM_PAGE_SIZE));
                }
        }
 
@@ -704,7 +704,7 @@ int written;
 
        SANITYCHECK(SCL_FUNCTIONS);
 
-       vm_assert(!(length % VM_PAGE_SIZE));
+       assert(!(length % VM_PAGE_SIZE));
 
        if((region->flags & VR_CONTIG) &&
                (start_offset > 0 || length < region->length)) {
@@ -727,15 +727,15 @@ int written;
                given.next = NULL;
                memlist = &given;
                used_memlist = 0;
-               vm_assert(given.phys);
-               vm_assert(given.length);
+               assert(given.phys);
+               assert(given.length);
        }
 
        r = OK;
 
        for(ml = memlist; ml; ml = ml->next) {
-               vm_assert(ml->phys);
-               vm_assert(ml->length);
+               assert(ml->phys);
+               assert(ml->length);
        }
 
        for(ml = memlist; ml; ml = ml->next) {
@@ -751,11 +751,11 @@ int written;
                        break;
                }
 
-               vm_assert(ml->phys);
-               vm_assert(ml->length);
+               assert(ml->phys);
+               assert(ml->length);
 
                /* New physical block. */
-               vm_assert(!(ml->phys % VM_PAGE_SIZE));
+               assert(!(ml->phys % VM_PAGE_SIZE));
 
                USE(newpb,
                newpb->phys = ml->phys;
@@ -796,13 +796,13 @@ int written;
                                offset += ml->length;
                                if((physr = physr_search(region->phys, offset,
                                        AVL_EQUAL))) {
-                                       vm_assert(physr->ph->refcount == 1);
+                                       assert(physr->ph->refcount == 1);
                                        pb_unreferenced(region, physr);
                                        physr_remove(region->phys, physr->offset);
                                        SLABFREE(physr);
                                }
                        }
-               } else vm_assert(mapped == length);
+               } else assert(mapped == length);
 
                /* Always clean up the memlist itself, even if everything
                 * worked we're not using the memlist nodes any more. And
@@ -863,9 +863,9 @@ physr_iter *iter;
        SANITYCHECK(SCL_DETAIL);
        SLABSANE(ph);
        SLABSANE(ph->ph);
-       vm_assert(ph->ph->refcount > 1);
+       assert(ph->ph->refcount > 1);
        pb_unreferenced(region, ph);
-       vm_assert(ph->ph->refcount >= 1);
+       assert(ph->ph->refcount >= 1);
        physr_remove(region->phys, offset);
        SLABFREE(ph);
 
@@ -873,8 +873,8 @@ physr_iter *iter;
 
        /* Put new free memory in. */
        allocflags = vrallocflags(region->flags);
-       vm_assert(!(allocflags & PAF_CONTIG));
-       vm_assert(!(allocflags & PAF_CLEAR));
+       assert(!(allocflags & PAF_CONTIG));
+       assert(!(allocflags & PAF_CLEAR));
 
        if(map_new_physblock(vmp, region, offset, length,
                MAP_NONE, allocflags, written) != OK) {
@@ -891,12 +891,12 @@ physr_iter *iter;
                panic("copy_abs2region failed, no good reason for that");
 
        newpr = physr_search(region->phys, offset, AVL_EQUAL);
-       vm_assert(newpr);
-       vm_assert(newpr->offset == offset);
+       assert(newpr);
+       assert(newpr->offset == offset);
 
        if(iter) {
                physr_start_iter(region->phys, iter, offset, AVL_EQUAL);
-               vm_assert(physr_get_iter(iter) == newpr);
+               assert(physr_get_iter(iter) == newpr);
        }
 
        SANITYCHECK(SCL_FUNCTIONS);
@@ -918,11 +918,11 @@ int write;
        struct phys_region *ph;
        int r = OK;
 
-       vm_assert(offset >= 0);
-       vm_assert(offset < region->length);
+       assert(offset >= 0);
+       assert(offset < region->length);
 
-       vm_assert(region->flags & VR_ANON);
-       vm_assert(!(region->vaddr % VM_PAGE_SIZE));
+       assert(region->flags & VR_ANON);
+       assert(!(region->vaddr % VM_PAGE_SIZE));
 
        virpage = offset - offset % VM_PAGE_SIZE;
 
@@ -932,9 +932,9 @@ int write;
           (ph->offset <= offset && offset < ph->offset + ph->ph->length)) {
                phys_bytes blockoffset = ph->offset;
                /* Pagefault in existing block. Do copy-on-write. */
-               vm_assert(write);
-               vm_assert(region->flags & VR_WRITABLE);
-               vm_assert(ph->ph->refcount > 0);
+               assert(write);
+               assert(region->flags & VR_WRITABLE);
+               assert(ph->ph->refcount > 0);
 
                if(WRITABLE(region, ph->ph)) {
                        r = map_ph_writept(vmp, region, ph);
@@ -1010,11 +1010,11 @@ int write;
 
        SANITYCHECK(SCL_FUNCTIONS);
 
-       vm_assert(region->flags & VR_ANON);
-       vm_assert(!(region->vaddr % VM_PAGE_SIZE));
-       vm_assert(!(offset % VM_PAGE_SIZE));
-       vm_assert(!(length % VM_PAGE_SIZE));
-       vm_assert(!write || (region->flags & VR_WRITABLE));
+       assert(region->flags & VR_ANON);
+       assert(!(region->vaddr % VM_PAGE_SIZE));
+       assert(!(offset % VM_PAGE_SIZE));
+       assert(!(length % VM_PAGE_SIZE));
+       assert(!write || (region->flags & VR_WRITABLE));
 
        physr_start_iter(region->phys, &iter, offset, AVL_LESS_EQUAL);
        physr = physr_get_iter(&iter);
@@ -1046,7 +1046,7 @@ int write;
                SANITYCHECK(SCL_DETAIL);
 
                if(write) {
-                 vm_assert(physr->ph->refcount > 0);
+                 assert(physr->ph->refcount > 0);
                  if(!WRITABLE(region, physr->ph)) {
                        if(!(physr = map_clone_ph_block(vmp, region,
                                physr, &iter))) {
@@ -1170,13 +1170,13 @@ PRIVATE struct vir_region *map_copy_region(struct vmproc *vmp, struct vir_region
 #endif
                physr_insert(newvr->phys, newph);
 #if SANITYCHECKS
-               vm_assert(countregions(vr) == cr);
+               assert(countregions(vr) == cr);
 #endif
                physr_incr_iter(&iter);
        }
 
 #if SANITYCHECKS
-       vm_assert(countregions(vr) == countregions(newvr));
+       assert(countregions(vr) == countregions(newvr));
 #endif
 
        return newvr;
@@ -1189,28 +1189,28 @@ PUBLIC int copy_abs2region(phys_bytes abs, struct vir_region *destregion,
        phys_bytes offset, phys_bytes len)
 
 {
-       vm_assert(destregion);
-       vm_assert(destregion->phys);
+       assert(destregion);
+       assert(destregion->phys);
        while(len > 0) {
                phys_bytes sublen, suboffset;
                struct phys_region *ph;
-               vm_assert(destregion);
-               vm_assert(destregion->phys);
+               assert(destregion);
+               assert(destregion->phys);
                if(!(ph = physr_search(destregion->phys, offset, AVL_LESS_EQUAL))) {
                        printf("VM: copy_abs2region: no phys region found (1).\n");
                        return EFAULT;
                }
-               vm_assert(ph->offset <= offset);
+               assert(ph->offset <= offset);
                if(ph->offset+ph->ph->length <= offset) {
                        printf("VM: copy_abs2region: no phys region found (2).\n");
                        return EFAULT;
                }
                suboffset = offset - ph->offset;
-               vm_assert(suboffset < ph->ph->length);
+               assert(suboffset < ph->ph->length);
                sublen = len;
                if(sublen > ph->ph->length - suboffset)
                        sublen = ph->ph->length - suboffset;
-               vm_assert(suboffset + sublen <= ph->ph->length);
+               assert(suboffset + sublen <= ph->ph->length);
                if(ph->ph->refcount != 1) {
                        printf("VM: copy_abs2region: no phys region found (3).\n");
                        return EFAULT;
@@ -1292,21 +1292,21 @@ struct vmproc *src;
                        /* Check two physregions both are nonnull,
                         * are different, and match physblocks.
                         */
-                       vm_assert(new_ph);
-                       vm_assert(orig_ph);
-                       vm_assert(orig_ph != new_ph);
+                       assert(new_ph);
+                       assert(orig_ph);
+                       assert(orig_ph != new_ph);
                        pb = orig_ph->ph;
-                       vm_assert(pb == new_ph->ph);
+                       assert(pb == new_ph->ph);
 
                        /* Link in new physregion. */
-                       vm_assert(!new_ph->next_ph_list);
+                       assert(!new_ph->next_ph_list);
                        USE(new_ph, new_ph->next_ph_list = pb->firstregion;);
                        USE(pb, pb->firstregion = new_ph;);
 
                        /* Increase phys block refcount */
-                       vm_assert(pb->refcount > 0);
+                       assert(pb->refcount > 0);
                        USE(pb, pb->refcount++;);
-                       vm_assert(pb->refcount > 1);
+                       assert(pb->refcount > 1);
 
                        /* If the phys block has been shared as SMAP,
                         * do the regular copy. */
@@ -1321,7 +1321,7 @@ struct vmproc *src;
                        physr_incr_iter(&iter_orig);
                        physr_incr_iter(&iter_new);
                }
-               vm_assert(!physr_get_iter(&iter_new));
+               assert(!physr_get_iter(&iter_new));
                prevvr = newvr;
        }
 
@@ -1342,12 +1342,12 @@ PUBLIC struct vir_region *map_proc_kernel(struct vmproc *vmp)
        /* We assume these are the first regions to be mapped to
         * make the function a bit simpler (free all regions on error).
         */
-       vm_assert(!vmp->vm_regions);
-       vm_assert(vmproc[VMP_SYSTEM].vm_flags & VMF_INUSE);
-       vm_assert(!(KERNEL_TEXT % VM_PAGE_SIZE));
-       vm_assert(!(KERNEL_TEXT_LEN % VM_PAGE_SIZE));
-       vm_assert(!(KERNEL_DATA % VM_PAGE_SIZE));
-       vm_assert(!(KERNEL_DATA_LEN % VM_PAGE_SIZE));
+       assert(!vmp->vm_regions);
+       assert(vmproc[VMP_SYSTEM].vm_flags & VMF_INUSE);
+       assert(!(KERNEL_TEXT % VM_PAGE_SIZE));
+       assert(!(KERNEL_TEXT_LEN % VM_PAGE_SIZE));
+       assert(!(KERNEL_DATA % VM_PAGE_SIZE));
+       assert(!(KERNEL_DATA_LEN % VM_PAGE_SIZE));
 
        if(!(vr = map_page_region(vmp, KERNEL_TEXT, 0, KERNEL_TEXT_LEN, 
                KERNEL_TEXT, VR_DIRECT | VR_WRITABLE | VR_NOPF, 0)) ||
@@ -1368,13 +1368,13 @@ PUBLIC int map_region_extend(struct vmproc *vmp, struct vir_region *vr,
 {
        vir_bytes end;
 
-       vm_assert(vr);
-       vm_assert(vr->flags & VR_ANON);
-       vm_assert(!(delta % VM_PAGE_SIZE));
+       assert(vr);
+       assert(vr->flags & VR_ANON);
+       assert(!(delta % VM_PAGE_SIZE));
 
        if(!delta) return OK;
        end = vr->vaddr + vr->length;
-       vm_assert(end >= vr->vaddr);
+       assert(end >= vr->vaddr);
 
        if(end + delta <= end) {
                printf("VM: strange delta 0x%lx\n", delta);
@@ -1396,9 +1396,9 @@ PUBLIC int map_region_extend(struct vmproc *vmp, struct vir_region *vr,
  *========================================================================*/
 PUBLIC int map_region_shrink(struct vir_region *vr, vir_bytes delta)
 {
-       vm_assert(vr);
-       vm_assert(vr->flags & VR_ANON);
-       vm_assert(!(delta % VM_PAGE_SIZE));
+       assert(vr);
+       assert(vr->flags & VR_ANON);
+       assert(!(delta % VM_PAGE_SIZE));
 
 #if 0
        printf("VM: ignoring region shrink\n");
@@ -1493,7 +1493,7 @@ PUBLIC int map_unmap_region(struct vmproc *vmp, struct vir_region *region,
                 * same amount.
                 */
                while((pr = physr_get_iter(&iter))) {
-                       vm_assert(pr->offset >= len);
+                       assert(pr->offset >= len);
                        USE(pr, pr->offset -= len;);
                        physr_incr_iter(&iter);
                }
@@ -1525,7 +1525,7 @@ PUBLIC int map_remap(struct vmproc *dvmp, vir_bytes da, size_t size,
 
        SANITYCHECK(SCL_FUNCTIONS);
 
-       vm_assert(region->flags & VR_SHARED);
+       assert(region->flags & VR_SHARED);
 
        /* da is handled differently */
        if (!da)
@@ -1536,7 +1536,7 @@ PUBLIC int map_remap(struct vmproc *dvmp, vir_bytes da, size_t size,
 
        prev = NULL;
        /* round up to page size */
-       vm_assert(!(size % VM_PAGE_SIZE));
+       assert(!(size % VM_PAGE_SIZE));
        startv = region_find_slot(dvmp, dst_addr, VM_DATATOP, size, &prev);
        if (startv == (vir_bytes) -1) {
                printf("map_remap: search 0x%x...\n", dst_addr);
@@ -1557,7 +1557,7 @@ PUBLIC int map_remap(struct vmproc *dvmp, vir_bytes da, size_t size,
        vr->flags = region->flags;
        vr->tag = VRT_NONE;
        vr->parent = dvmp;);
-       vm_assert(vr->flags & VR_SHARED);
+       assert(vr->flags & VR_SHARED);
 
        if (prev) {
                USE(vr,
@@ -1572,7 +1572,7 @@ PUBLIC int map_remap(struct vmproc *dvmp, vir_bytes da, size_t size,
        physr_start_iter_least(vr->phys, &iter);
        while((ph = physr_get_iter(&iter))) {
                struct phys_block *pb = ph->ph;
-               vm_assert(!ph->next_ph_list);
+               assert(!ph->next_ph_list);
                USE(ph, ph->next_ph_list = pb->firstregion;);
                USE(pb, pb->firstregion = ph;);
                USE(pb, pb->refcount++;);
@@ -1608,8 +1608,8 @@ PUBLIC int map_get_phys(struct vmproc *vmp, vir_bytes addr, phys_bytes *r)
        physr_start_iter_least(vr->phys, &iter);
        ph = physr_get_iter(&iter);
 
-       vm_assert(ph);
-       vm_assert(ph->ph);
+       assert(ph);
+       assert(ph->ph);
        if (r)
                *r = ph->ph->phys;
 
@@ -1635,8 +1635,8 @@ PUBLIC int map_get_ref(struct vmproc *vmp, vir_bytes addr, u8_t *cnt)
        physr_start_iter_least(vr->phys, &iter);
        ph = physr_get_iter(&iter);
 
-       vm_assert(ph);
-       vm_assert(ph->ph);
+       assert(ph);
+       assert(ph->ph);
        if (cnt)
                *cnt = ph->ph->refcount;
 
@@ -1856,7 +1856,7 @@ PUBLIC int unmap_memory(endpoint_t sour, endpoint_t dest,
        vmd = &vmproc[p];
 
        vrd = map_lookup(vmd, virt_d);
-       vm_assert(vrd);
+       assert(vrd);
 
        /* Search for the first phys region in the destination process. */
        off = virt_d - vrd->vaddr;
@@ -1869,8 +1869,8 @@ PUBLIC int unmap_memory(endpoint_t sour, endpoint_t dest,
        end = off + length;
        while((pr = physr_get_iter(&iter)) && off < end) {
                pb = pr->ph;
-               vm_assert(pb->refcount > 1);
-               vm_assert(pb->share_flag == PBSH_SMAP);
+               assert(pb->refcount > 1);
+               assert(pb->share_flag == PBSH_SMAP);
 
                if(!(pr = map_clone_ph_block(vmd, vrd, pr, &iter)))
                        return ENOMEM;
@@ -2012,9 +2012,9 @@ PUBLIC int map_memory(endpoint_t sour, endpoint_t dest,
        vmd = &vmproc[p];
 
        vrs = map_lookup(vms, virt_s);
-       vm_assert(vrs);
+       assert(vrs);
        vrd = map_lookup(vmd, virt_d);
-       vm_assert(vrd);
+       assert(vrd);
 
        /* Linear address -> offset from start of vir region. */
        offset_s = virt_s - vrs->vaddr;
index 623240eca35543637b53dc3ab95420bbb3579f0d..fd453eb404b71bff8452215aebaf50034915d172 100644 (file)
@@ -123,8 +123,8 @@ PUBLIC int do_rs_update(message *m_ptr)
        }
 
        /* Adjust page tables. */
-       vm_assert(src_vmp->vm_flags & VMF_HASPT);
-       vm_assert(dst_vmp->vm_flags & VMF_HASPT);
+       assert(src_vmp->vm_flags & VMF_HASPT);
+       assert(dst_vmp->vm_flags & VMF_HASPT);
        pt_bind(&src_vmp->vm_pt, src_vmp);
        pt_bind(&dst_vmp->vm_pt, dst_vmp);
        if((r=sys_vmctl(SELF, VMCTL_FLUSHTLB, 0)) != OK) {
index f341ff531951e9143eba088c249eedc34555d430..730786923e1f5fe05ea744aae2544336c61fc601 100644 (file)
@@ -20,7 +20,7 @@
 
 #define SANITYCHECK(l) if(!nocheck && ((l) <= vm_sanitychecklevel)) {  \
                struct vmproc *vmpr;    \
-               vm_assert(incheck == 0);        \
+               assert(incheck == 0);   \
                incheck = 1;            \
                usedpages_reset();      \
        slab_sanitycheck(__FILE__, __LINE__);   \
@@ -31,7 +31,7 @@
                } \
        } \
        map_sanitycheck(__FILE__, __LINE__); \
-       vm_assert(incheck == 1);        \
+       assert(incheck == 1);   \
        incheck = 0;            \
        } 
 
index 7072cc3721af367e286910203819c17c4655ac7f..101ca6f0152ccc78c9df2787e3e60f850cd7b5ed 100644 (file)
 #define BITEL(f, b)    (f)->sdh.usebits[(b)/ELBITS]
 
 
-#define OFF(f, b) vm_assert(!GETBIT(f, b))
-#define ON(f, b)  vm_assert(GETBIT(f, b))
+#define OFF(f, b) assert(!GETBIT(f, b))
+#define ON(f, b)  assert(GETBIT(f, b))
 
 #if SANITYCHECKS
 #define SLABDATAWRITABLE(data, wr) do {                        \
-       vm_assert(data->sdh.writable == WRITABLE_NONE); \
-       vm_assert(wr != WRITABLE_NONE);                 \
+       assert(data->sdh.writable == WRITABLE_NONE);    \
+       assert(wr != WRITABLE_NONE);                    \
        vm_pagelock(data, 0);                           \
        data->sdh.writable = wr;                        \
 } while(0)
 
 #define SLABDATAUNWRITABLE(data) do {                  \
-       vm_assert(data->sdh.writable != WRITABLE_NONE); \
+       assert(data->sdh.writable != WRITABLE_NONE);    \
        data->sdh.writable = WRITABLE_NONE;             \
        vm_pagelock(data, 1);                           \
 } while(0)
@@ -133,10 +133,10 @@ FORWARD _PROTOTYPE( int objstats, (void *, int, struct slabheader **, struct sla
 
 #define GETSLAB(b, s) {                        \
        int i;                          \
-       vm_assert((b) >= MINSIZE);      \
+       assert((b) >= MINSIZE); \
        i = (b) - MINSIZE;              \
-       vm_assert((i) < SLABSIZES);     \
-       vm_assert((i) >= 0);            \
+       assert((i) < SLABSIZES);        \
+       assert((i) >= 0);               \
        s = &slabs[i];                  \
 }
 
@@ -145,7 +145,7 @@ FORWARD _PROTOTYPE( int objstats, (void *, int, struct slabheader **, struct sla
 /* move head of list l1 to list of l2 in slabheader sl. */
 #define MOVEHEAD(sl, l1, l2) {         \
        struct slabdata *t;             \
-       vm_assert(LH(sl,l1));           \
+       assert(LH(sl,l1));              \
        REMOVEHEAD(sl, l1, t);          \
        ADDHEAD(t, sl, l2);             \
 }
@@ -154,7 +154,7 @@ FORWARD _PROTOTYPE( int objstats, (void *, int, struct slabheader **, struct sla
 #define REMOVEHEAD(sl, list, to) {     \
        struct slabdata *dat;           \
        dat = (to) = LH(sl, list);      \
-       vm_assert(dat);                 \
+       assert(dat);                    \
        LH(sl, list) = dat->sdh.next;   \
        UNLINKNODE(dat);                \
 }
@@ -185,7 +185,7 @@ struct slabdata *newslabdata(int list)
        struct slabdata *n;
        phys_bytes p;
 
-       vm_assert(sizeof(*n) == VM_PAGE_SIZE);
+       assert(sizeof(*n) == VM_PAGE_SIZE);
 
        if(!(n = vm_allocpage(&p, VMP_SLAB))) {
                printf("newslabdata: vm_allocpage failed\n");
@@ -290,7 +290,7 @@ PUBLIC void *slaballoc(int bytes)
 
        /* Retrieve entry in slabs[]. */
        GETSLAB(bytes, s);
-       vm_assert(s);
+       assert(s);
 
        /* To make the common case more common, make space in the 'used'
         * queue first.
@@ -314,14 +314,14 @@ PUBLIC void *slaballoc(int bytes)
        }
        SLABSANITYCHECK(SCL_DETAIL);
 
-       vm_assert(s);
+       assert(s);
        firstused = LH(s, LIST_USED);
-       vm_assert(firstused);
+       assert(firstused);
 #if SANITYCHECKS
-       vm_assert(firstused->sdh.magic1 == MAGIC1);
-       vm_assert(firstused->sdh.magic2 == MAGIC2);
+       assert(firstused->sdh.magic1 == MAGIC1);
+       assert(firstused->sdh.magic2 == MAGIC2);
 #endif
-       vm_assert(firstused->sdh.nused < ITEMSPERPAGE(bytes));
+       assert(firstused->sdh.nused < ITEMSPERPAGE(bytes));
 
        for(i = firstused->sdh.freeguess;
                count < ITEMSPERPAGE(bytes); count++, i++) {
@@ -345,7 +345,7 @@ PUBLIC void *slaballoc(int bytes)
                        nojunkwarning++;
                        slabunlock(ret, bytes);
                        nojunkwarning--;
-                       vm_assert(!nojunkwarning);
+                       assert(!nojunkwarning);
                        *(u32_t *) ret = NOJUNK;
                        slablock(ret, bytes);
 #endif
@@ -458,7 +458,7 @@ PUBLIC void slabfree(void *mem, int bytes)
        nojunkwarning++;
        slablock(mem, bytes);
        nojunkwarning--;
-       vm_assert(!nojunkwarning);
+       assert(!nojunkwarning);
 #endif
 
        /* Free this data. */
@@ -467,7 +467,7 @@ PUBLIC void slabfree(void *mem, int bytes)
        /* Check if this slab changes lists. */
        if(f->sdh.nused == 0) {
                /* Now become FREE; must've been USED */
-               vm_assert(f->sdh.list == LIST_USED);
+               assert(f->sdh.list == LIST_USED);
                UNLINKNODE(f);
                if(f == LH(s, LIST_USED))
                        LH(s, LIST_USED) = f->sdh.next;
@@ -475,7 +475,7 @@ PUBLIC void slabfree(void *mem, int bytes)
                SLABSANITYCHECK(SCL_DETAIL);
        } else if(f->sdh.nused == ITEMSPERPAGE(bytes)-1) {
                /* Now become USED; must've been FULL */
-               vm_assert(f->sdh.list == LIST_FULL);
+               assert(f->sdh.list == LIST_FULL);
                UNLINKNODE(f);
                if(f == LH(s, LIST_FULL))
                        LH(s, LIST_FULL) = f->sdh.next;
@@ -483,7 +483,7 @@ PUBLIC void slabfree(void *mem, int bytes)
                SLABSANITYCHECK(SCL_DETAIL);
        } else {
                /* Stay USED */
-               vm_assert(f->sdh.list == LIST_USED);
+               assert(f->sdh.list == LIST_USED);
        }
 
        SLABSANITYCHECK(SCL_FUNCTIONS);
index d93ec296d0c43975a6192491d775966348ec94b9..d5d165cae05ee4011593da64ee505e5a55c2627d 100644 (file)
@@ -7,17 +7,5 @@
 
 #define ELEMENTS(a) (sizeof(a)/sizeof((a)[0]))
 
-#if SANITYCHECKS
-#define vm_assert(cond) {                              \
-       if(vm_sanitychecklevel > 0 && !(cond)) {        \
-               printf("VM:%s:%d: vm_assert failed: %s\n",      \
-                       __FILE__, __LINE__, #cond);     \
-               panic("vm_assert failed");              \
-       }                                               \
-       }
-#else
-#define vm_assert(cond)        ;
-#endif
-
 #endif
 
index c711dd617ea3dff259ccd89e9145b7b856df38b3..0a20d6ee4b89a3a68bfec8c4534d1ac0dcda7c86 100644 (file)
@@ -28,7 +28,7 @@
 #define MINSTACKREGION (64*1024*1024)
 
 /* If so, this level: */
-#define SCL_NONE       0       /* No sanity checks - vm_assert()s only. */
+#define SCL_NONE       0       /* No sanity checks - assert()s only. */
 #define SCL_TOP                1       /* Main loop and other high-level places. */
 #define SCL_FUNCTIONS  2       /* Function entry/exit. */
 #define SCL_DETAIL     3       /* Detailled steps. */