]> Zhao Yanbai Git Server - minix.git/commitdiff
vm: merge i386 and arm pagetable code
authorBen Gras <ben@minix3.org>
Wed, 31 Oct 2012 18:24:14 +0000 (19:24 +0100)
committerBen Gras <ben@minix3.org>
Fri, 9 Nov 2012 17:46:03 +0000 (18:46 +0100)
24 files changed:
servers/vm/arch/arm/Makefile.inc
servers/vm/arch/arm/memory.h [deleted file]
servers/vm/arch/arm/pagefaults.h [deleted file]
servers/vm/arch/arm/pagetable.c [deleted file]
servers/vm/arch/arm/pagetable.h
servers/vm/arch/i386/memory.h [deleted file]
servers/vm/arch/i386/pagefaults.h [deleted file]
servers/vm/arch/i386/pagetable.c
servers/vm/arch/i386/pagetable.h
servers/vm/arch/i386/util.S [deleted file]
servers/vm/fork.c
servers/vm/main.c
servers/vm/mmap.c
servers/vm/pagefaults.c
servers/vm/pb.c
servers/vm/proto.h
servers/vm/pt.h [new file with mode: 0644]
servers/vm/region.c
servers/vm/rs.c
servers/vm/sanitycheck.h
servers/vm/slaballoc.c
servers/vm/utility.c
servers/vm/vm.h
servers/vm/vmproc.h

index f3d71c80b5046f8b978bb7eb75909dfb606fdc7a..a6a6603ba1d198f5a55c8aec13d586302ca3db08 100644 (file)
@@ -1,5 +1,5 @@
 .include <bsd.own.mk>
 
 #Arch-specific sources
-.PATH: ${.CURDIR}/arch/${MACHINE_ARCH}
-SRCS+= pagetable.c #util.S
+.PATH: ${.CURDIR}/arch/i386
+SRCS+= pagetable.c
diff --git a/servers/vm/arch/arm/memory.h b/servers/vm/arch/arm/memory.h
deleted file mode 100644 (file)
index 4ceb046..0000000
+++ /dev/null
@@ -1,12 +0,0 @@
-#include <machine/vm.h>
-
-/* And what is the highest addressable piece of memory, when in paged
- * mode?
- */
-#define VM_DATATOP     kernel_boot_info.user_end
-#define VM_STACKTOP    kernel_boot_info.user_sp
-
-#define SLAB_PAGESIZE  ARM_PAGE_SIZE
-#define VM_PAGE_SIZE   ARM_PAGE_SIZE
-
-#define CLICKSPERPAGE (ARM_PAGE_SIZE/CLICK_SIZE)
diff --git a/servers/vm/arch/arm/pagefaults.h b/servers/vm/arch/arm/pagefaults.h
deleted file mode 100644 (file)
index 757cb7f..0000000
+++ /dev/null
@@ -1,14 +0,0 @@
-
-#ifndef _PAGEFAULTS_H
-#define _PAGEFAULTS_H 1
-
-#include <machine/vm.h>
-
-#define PFERR_PROT(e)  ((ARM_VM_PFE_FS(e) == ARM_VM_PFE_L1PERM) \
-                        || (ARM_VM_PFE_FS(e) == ARM_VM_PFE_L2PERM))
-#define PFERR_NOPAGE(e) (!PFERR_PROT(e))
-#define PFERR_WRITE(e) ((e) & ARM_VM_PFE_W)
-#define PFERR_READ(e)  (!((e) & ARM_VM_PFE_W))
-
-#endif
-
diff --git a/servers/vm/arch/arm/pagetable.c b/servers/vm/arch/arm/pagetable.c
deleted file mode 100644 (file)
index 841fd41..0000000
+++ /dev/null
@@ -1,1261 +0,0 @@
-
-#define _SYSTEM 1
-#define _POSIX_SOURCE 1
-
-#include <minix/callnr.h>
-#include <minix/com.h>
-#include <minix/config.h>
-#include <minix/const.h>
-#include <minix/ds.h>
-#include <minix/endpoint.h>
-#include <minix/keymap.h>
-#include <minix/minlib.h>
-#include <minix/type.h>
-#include <minix/ipc.h>
-#include <minix/sysutil.h>
-#include <minix/syslib.h>
-#include <minix/safecopies.h>
-#include <minix/cpufeature.h>
-#include <minix/bitmap.h>
-#include <minix/debug.h>
-
-#include <errno.h>
-#include <stdlib.h>
-#include <assert.h>
-#include <string.h>
-#include <env.h>
-#include <stdio.h>
-#include <fcntl.h>
-#include <stdlib.h>
-
-#include "proto.h"
-#include "glo.h"
-#include "util.h"
-#include "vm.h"
-#include "sanitycheck.h"
-
-#include "memory.h"
-
-static int vm_self_pages;
-
-/* PDE used to map in kernel, kernel physical address. */
-static int pagedir_pde = -1;
-static u32_t pagedir_pde_val;
-
-static multiboot_module_t *kern_mb_mod = NULL;
-static size_t kern_size = 0;
-static int kern_start_pde = -1;
-
-/* 1MB page size available in hardware? */
-static int bigpage_ok = 1;
-
-/* Our process table entry. */
-struct vmproc *vmprocess = &vmproc[VM_PROC_NR];
-
-/* Spare memory, ready to go after initialization, to avoid a
- * circular dependency on allocating memory and writing it into VM's
- * page table.
- */
-#define SPAREPAGEDIRS 11
-#define STATIC_SPAREPAGEDIRS 10
-#define SPAREPAGES 250
-#define STATIC_SPAREPAGES 100
-int missing_sparedirs = SPAREPAGEDIRS;
-static struct {
-       void *pagedir;
-       phys_bytes phys;
-} sparepagedirs[SPAREPAGEDIRS];
-
-int missing_spares = SPAREPAGES;
-static struct {
-       void *page;
-       phys_bytes phys;
-} sparepages[SPAREPAGES];
-
-extern char _end;      
-#define is_staticaddr(v) ((vir_bytes) (v) < (vir_bytes) &_end)
-
-#define MAX_KERNMAPPINGS 10
-static struct {
-       phys_bytes      phys_addr;      /* Physical addr. */
-       phys_bytes      len;            /* Length in bytes. */
-       vir_bytes       vir_addr;       /* Offset in page table. */
-       int             flags;
-} kern_mappings[MAX_KERNMAPPINGS];
-int kernmappings = 0;
-
-/* Clicks must be pages, as
- *  - they must be page aligned to map them
- *  - they must be a multiple of the page size
- *  - it's inconvenient to have them bigger than pages, because we often want
- *    just one page
- * May as well require them to be equal then.
- */
-#if CLICK_SIZE != ARM_PAGE_SIZE
-#error CLICK_SIZE must be page size.
-#endif
-
-/* Page table that contains pointers to all page directories. */
-phys_bytes page_directories_phys;
-u32_t *page_directories = NULL;
-
-static char static_sparepagedirs[ARM_PAGEDIR_SIZE*STATIC_SPAREPAGEDIRS + ARM_PAGEDIR_SIZE] __aligned(ARM_PAGEDIR_SIZE);
-
-static char static_sparepages[ARM_PAGE_SIZE*STATIC_SPAREPAGES] __aligned(ARM_PAGE_SIZE);
-
-#if SANITYCHECKS
-/*===========================================================================*
- *                             pt_sanitycheck                               *
- *===========================================================================*/
-void pt_sanitycheck(pt_t *pt, char *file, int line)
-{
-/* Basic pt sanity check. */
-       int slot;
-
-       MYASSERT(pt);
-       MYASSERT(pt->pt_dir);
-       MYASSERT(pt->pt_dir_phys);
-
-       for(slot = 0; slot < ELEMENTS(vmproc); slot++) {
-               if(pt == &vmproc[slot].vm_pt)
-                       break;
-       }
-
-       if(slot >= ELEMENTS(vmproc)) {
-               panic("pt_sanitycheck: passed pt not in any proc");
-       }
-
-       MYASSERT(usedpages_add(pt->pt_dir_phys, ARM_PAGE_SIZE) == OK);
-}
-#endif
-
-/*===========================================================================*
- *                             findhole                                     *
- *===========================================================================*/
-static u32_t findhole(int pages)
-{
-/* Find a space in the virtual address space of VM. */
-       u32_t curv;
-       int pde = 0, try_restart;
-       static u32_t lastv = 0;
-       pt_t *pt = &vmprocess->vm_pt;
-       vir_bytes vmin, vmax;
-       u32_t holev;
-
-       vmin = (vir_bytes) (&_end) & ARM_VM_ADDR_MASK; /* marks end of VM BSS */
-       vmax = VM_STACKTOP;
-
-       /* Input sanity check. */
-       assert(vmin + ARM_PAGE_SIZE >= vmin);
-       assert(vmax >= vmin + ARM_PAGE_SIZE);
-       assert((vmin % ARM_PAGE_SIZE) == 0);
-       assert((vmax % ARM_PAGE_SIZE) == 0);
-       assert(pages > 0);
-
-#if SANITYCHECKS
-       curv = ((u32_t) random()) % ((vmax - vmin)/ARM_PAGE_SIZE);
-       curv *= ARM_PAGE_SIZE;
-       curv += vmin;
-#else
-       curv = lastv;
-       if(curv < vmin || curv >= vmax)
-               curv = vmin;
-#endif
-       try_restart = 1;
-
-       /* Start looking for a free page starting at vmin. */
-       while(curv < vmax) {
-               int pte;
-               int i, nohole;
-
-               assert(curv >= vmin);
-               assert(curv < vmax);
-
-               holev = curv; /* the candidate hole */
-               nohole = 0;
-               for (i = 0; i < pages && !nohole; ++i) {
-                   if(curv >= vmax) {
-                       break;
-                   }
-
-                   pde = ARM_VM_PDE(curv);
-                   pte = ARM_VM_PTE(curv);
-
-                   /* if page present, no hole */
-                   if((pt->pt_dir[pde] & ARM_VM_PDE_PRESENT) &&
-                      (pt->pt_pt[pde][pte] & ARM_VM_PTE_PRESENT))
-                       nohole = 1;
-
-                   /* if not contiguous, no hole */
-                   if (curv != holev + i * ARM_PAGE_SIZE)
-                       nohole = 1;
-
-                   curv+=ARM_PAGE_SIZE;
-               }
-
-               /* there's a large enough hole */
-               if (!nohole && i == pages) {
-                       lastv = curv;
-                       return holev;
-               }
-
-               /* Reset curv */
-               if(curv >= vmax && try_restart) {
-                       curv = vmin;
-                       try_restart = 0;
-               }
-       }
-
-       printf("VM: out of virtual address space in vm\n");
-
-       return NO_MEM;
-}
-
-/*===========================================================================*
- *                             vm_freepages                                 *
- *===========================================================================*/
-void vm_freepages(vir_bytes vir, int pages)
-{
-       assert(!(vir % ARM_PAGE_SIZE)); 
-
-       if(is_staticaddr(vir)) {
-               printf("VM: not freeing static page\n");
-               return;
-       }
-
-       if(pt_writemap(vmprocess, &vmprocess->vm_pt, vir,
-               MAP_NONE, pages*ARM_PAGE_SIZE, 0,
-               WMF_OVERWRITE | WMF_FREE) != OK)
-               panic("vm_freepages: pt_writemap failed");
-
-       vm_self_pages--;
-
-#if SANITYCHECKS
-       /* If SANITYCHECKS are on, flush tlb so accessing freed pages is
-        * always trapped, also if not in tlb.
-        */
-       if((sys_vmctl(SELF, VMCTL_FLUSHTLB, 0)) != OK) {
-               panic("VMCTL_FLUSHTLB failed");
-       }
-#endif
-}
-
-/*===========================================================================*
- *                             vm_getsparepage                              *
- *===========================================================================*/
-static void *vm_getsparepage(phys_bytes *phys)
-{
-       int s;
-       assert(missing_spares >= 0 && missing_spares <= SPAREPAGES);
-       for(s = 0; s < SPAREPAGES; s++) {
-               if(sparepages[s].page) {
-                       void *sp;
-                       sp = sparepages[s].page;
-                       *phys = sparepages[s].phys;
-                       sparepages[s].page = NULL;
-                       missing_spares++;
-                       assert(missing_spares >= 0 && missing_spares <= SPAREPAGES);
-                       return sp;
-               }
-       }
-       return NULL;
-}
-
-/*===========================================================================*
- *                             vm_getsparepagedir                           *
- *===========================================================================*/
-static void *vm_getsparepagedir(phys_bytes *phys)
-{
-       int s;
-       assert(missing_sparedirs >= 0 && missing_sparedirs <= SPAREPAGEDIRS);
-       for(s = 0; s < SPAREPAGEDIRS; s++) {
-               if(sparepagedirs[s].pagedir) {
-                       void *sp;
-                       sp = sparepagedirs[s].pagedir;
-                       *phys = sparepagedirs[s].phys;
-                       sparepagedirs[s].pagedir = NULL;
-                       missing_sparedirs++;
-                       assert(missing_sparedirs >= 0 && missing_sparedirs <= SPAREPAGEDIRS);
-                       return sp;
-               }
-       }
-       return NULL;
-}
-
-/*===========================================================================*
- *                             vm_checkspares                               *
- *===========================================================================*/
-static void *vm_checkspares(void)
-{
-       int s, n = 0;
-       static int total = 0, worst = 0;
-       assert(missing_spares >= 0 && missing_spares <= SPAREPAGES);
-       for(s = 0; s < SPAREPAGES && missing_spares > 0; s++)
-           if(!sparepages[s].page) {
-               n++;
-               if((sparepages[s].page = vm_allocpage(&sparepages[s].phys, 
-                       VMP_SPARE))) {
-                       missing_spares--;
-                       assert(missing_spares >= 0);
-                       assert(missing_spares <= SPAREPAGES);
-               } else {
-                       printf("VM: warning: couldn't get new spare page\n");
-               }
-       }
-       if(worst < n) worst = n;
-       total += n;
-
-       return NULL;
-}
-
-/*===========================================================================*
- *                             vm_checksparedirs                            *
- *===========================================================================*/
-static void *vm_checksparedirs(void)
-{
-       int s, n = 0;
-       static int total = 0, worst = 0;
-       assert(missing_sparedirs >= 0 && missing_sparedirs <= SPAREPAGEDIRS);
-       for(s = 0; s < SPAREPAGEDIRS && missing_sparedirs > 0; s++)
-           if(!sparepagedirs[s].pagedir) {
-               n++;
-               if((sparepagedirs[s].pagedir = vm_allocpage(&sparepagedirs[s].phys,
-                       VMP_SPARE))) {
-                       missing_sparedirs--;
-                       assert(missing_sparedirs >= 0);
-                       assert(missing_sparedirs <= SPAREPAGEDIRS);
-               } else {
-                       printf("VM: warning: couldn't get new spare pagedir\n");
-               }
-       }
-       if(worst < n) worst = n;
-       total += n;
-
-       return NULL;
-}
-
-static int pt_init_done;
-
-/*===========================================================================*
- *                             vm_allocpage                                 *
- *===========================================================================*/
-void *vm_allocpage(phys_bytes *phys, int reason)
-{
-/* Allocate a page for use by VM itself. */
-       phys_bytes newpage;
-       vir_bytes loc;
-       pt_t *pt;
-       int r;
-       static int level = 0;
-       void *ret;
-       u32_t mem_bytes, mem_clicks, mem_flags;
-
-       pt = &vmprocess->vm_pt;
-       assert(reason >= 0 && reason < VMP_CATEGORIES);
-
-       level++;
-
-       assert(level >= 1);
-       assert(level <= 2);
-
-       if(level > 1 || !pt_init_done) {
-               void *s;
-
-               if (reason == VMP_PAGEDIR)
-                       s=vm_getsparepagedir(phys);
-               else
-                       s=vm_getsparepage(phys);
-
-               level--;
-               if(!s) {
-                       util_stacktrace();
-                       printf("VM: warning: out of spare pages\n");
-               }
-               if(!is_staticaddr(s)) vm_self_pages++;
-               return s;
-       }
-
-       if (reason == VMP_PAGEDIR) {
-               mem_bytes = ARM_PAGEDIR_SIZE;
-               mem_flags = PAF_ALIGN16K;
-       } else {
-               mem_bytes = ARM_PAGE_SIZE;
-               mem_flags = 0;
-       }
-       mem_clicks = mem_bytes / ARM_PAGE_SIZE * CLICKSPERPAGE;
-
-       /* VM does have a pagetable, so get a page and map it in there.
-        * Where in our virtual address space can we put it?
-        */
-       loc = findhole(mem_bytes / ARM_PAGE_SIZE);
-       if(loc == NO_MEM) {
-               level--;
-               printf("VM: vm_allocpage: findhole failed\n");
-               return NULL;
-       }
-
-       /* Allocate page of memory for use by VM. As VM
-        * is trusted, we don't have to pre-clear it.
-        */
-       if((newpage = alloc_mem(mem_clicks, mem_flags)) == NO_MEM) {
-               level--;
-               printf("VM: vm_allocpage: alloc_mem failed\n");
-               return NULL;
-       }
-
-       *phys = CLICK2ABS(newpage);
-
-       /* Map this page into our address space. */
-       if((r=pt_writemap(vmprocess, pt, loc, *phys, mem_bytes,
-               ARM_VM_PTE_PRESENT | ARM_VM_PTE_USER | ARM_VM_PTE_RW |
-               ARM_VM_PTE_WB | ARM_VM_PTE_SHAREABLE, 0)) != OK) {
-               free_mem(newpage, mem_clicks);
-               printf("vm_allocpage writemap failed\n");
-               level--;
-               return NULL;
-       }
-
-       if((r=sys_vmctl(SELF, VMCTL_FLUSHTLB, 0)) != OK) {
-               panic("VMCTL_FLUSHTLB failed: %d", r);
-       }
-
-       level--;
-
-       /* Return user-space-ready pointer to it. */
-       ret = (void *) loc;
-
-       vm_self_pages++;
-       return ret;
-}
-
-/*===========================================================================*
- *                             vm_pagelock                                  *
- *===========================================================================*/
-void vm_pagelock(void *vir, int lockflag)
-{
-/* Mark a page allocated by vm_allocpage() unwritable, i.e. only for VM. */
-       vir_bytes m = (vir_bytes) vir;
-       int r;
-       u32_t flags = ARM_VM_PTE_PRESENT | ARM_VM_PTE_USER;
-       pt_t *pt;
-
-       pt = &vmprocess->vm_pt;
-
-       assert(!(m % ARM_PAGE_SIZE));
-
-       if(!lockflag)
-               flags |= ARM_VM_PTE_RW;
-       else
-               flags |= ARM_VM_PTE_RO;
-       flags |= ARM_VM_PTE_WB | ARM_VM_PTE_SHAREABLE;
-
-       /* Update flags. */
-       if((r=pt_writemap(vmprocess, pt, m, 0, ARM_PAGE_SIZE,
-               flags, WMF_OVERWRITE | WMF_WRITEFLAGSONLY)) != OK) {
-               panic("vm_lockpage: pt_writemap failed");
-       }
-
-       if((r=sys_vmctl(SELF, VMCTL_FLUSHTLB, 0)) != OK) {
-               panic("VMCTL_FLUSHTLB failed: %d", r);
-       }
-
-       return;
-}
-
-/*===========================================================================*
- *                             vm_addrok                                    *
- *===========================================================================*/
-int vm_addrok(void *vir, int writeflag)
-{
-       pt_t *pt = &vmprocess->vm_pt;
-       int pde, pte;
-       vir_bytes v = (vir_bytes) vir;
-
-       pde = ARM_VM_PDE(v);
-       pte = ARM_VM_PTE(v);
-
-       if(!(pt->pt_dir[pde] & ARM_VM_PDE_PRESENT)) {
-               printf("addr not ok: missing pde %d\n", pde);
-               return 0;
-       }
-
-       if(!(pt->pt_pt[pde][pte] & ARM_VM_PTE_PRESENT)) {
-               printf("addr not ok: missing pde %d / pte %d\n",
-                       pde, pte);
-               return 0;
-       }
-
-       if(!writeflag &&
-               !(pt->pt_pt[pde][pte] & ARM_VM_PTE_RO)) {
-               printf("addr not ok: pde %d / pte %d present but writable\n",
-                       pde, pte);
-               return 0;
-       }
-
-       return 1;
-}
-
-/*===========================================================================*
- *                             pt_ptalloc                                   *
- *===========================================================================*/
-static int pt_ptalloc(pt_t *pt, int pde, u32_t flags)
-{
-/* Allocate a page table and write its address into the page directory. */
-       int i;
-       phys_bytes pt_phys;
-
-       /* Argument must make sense. */
-       assert(pde >= 0 && pde < ARM_VM_DIR_ENTRIES);
-       assert(!(flags & ~(PTF_ALLFLAGS)));
-
-       /* We don't expect to overwrite page directory entry, nor
-        * storage for the page table.
-        */
-       assert(!(pt->pt_dir[pde] & ARM_VM_PDE_PRESENT));
-       assert(!pt->pt_pt[pde]);
-
-       /* Get storage for the page table. */
-        if(!(pt->pt_pt[pde] = vm_allocpage(&pt_phys, VMP_PAGETABLE)))
-               return ENOMEM;
-
-       for(i = 0; i < ARM_VM_PT_ENTRIES; i++)
-               pt->pt_pt[pde][i] = 0;  /* Empty entry. */
-
-       /* Make page directory entry.
-        * The PDE is always 'present,' 'writable,' and 'user accessible,'
-        * relying on the PTE for protection.
-        */
-       pt->pt_dir[pde] = (pt_phys & ARM_VM_PDE_MASK)
-               | ARM_VM_PDE_PRESENT | ARM_VM_PDE_DOMAIN;
-
-       return OK;
-}
-
-/*===========================================================================*
- *                         pt_ptalloc_in_range                              *
- *===========================================================================*/
-int pt_ptalloc_in_range(pt_t *pt, vir_bytes start, vir_bytes end,
-       u32_t flags, int verify)
-{
-/* Allocate all the page tables in the range specified. */
-       int pde, first_pde, last_pde;
-
-       first_pde = ARM_VM_PDE(start);
-       last_pde = ARM_VM_PDE(end-1);
-       assert(first_pde >= 0);
-       assert(last_pde < ARM_VM_DIR_ENTRIES);
-
-       /* Scan all page-directory entries in the range. */
-       for(pde = first_pde; pde <= last_pde; pde++) {
-               assert(!(pt->pt_dir[pde] & ARM_VM_BIGPAGE));
-               if(!(pt->pt_dir[pde] & ARM_VM_PDE_PRESENT)) {
-                       int r;
-                       if(verify) {
-                               printf("pt_ptalloc_in_range: no pde %d\n", pde);
-                               return EFAULT;
-                       }
-                       assert(!pt->pt_dir[pde]);
-                       if((r=pt_ptalloc(pt, pde, flags)) != OK) {
-                               /* Couldn't do (complete) mapping.
-                                * Don't bother freeing any previously
-                                * allocated page tables, they're
-                                * still writable, don't point to nonsense,
-                                * and pt_ptalloc leaves the directory
-                                * and other data in a consistent state.
-                                */
-                               printf("pt_ptalloc_in_range: pt_ptalloc failed\n");
-                               return r;
-                       }
-               }
-               assert(pt->pt_dir[pde]);
-               assert(pt->pt_dir[pde] & ARM_VM_PDE_PRESENT);
-       }
-
-       return OK;
-}
-
-static char *ptestr(u32_t pte)
-{
-#define FLAG(constant, name) {                                         \
-       if(pte & (constant)) { strcat(str, name); strcat(str, " "); }   \
-}
-
-       static char str[30];
-       if(!(pte & ARM_VM_PTE_PRESENT)) {
-               return "not present";
-       }
-       str[0] = '\0';
-       if(pte & ARM_VM_PTE_RO) {
-           strcat(str, "R ");
-       } else {
-           strcat(str, "W ");
-       }
-       FLAG(ARM_VM_PTE_USER, "U");
-       FLAG(ARM_VM_PTE_SUPER, "S");
-       FLAG(ARM_VM_PTE_SHAREABLE, "SH");
-       FLAG(ARM_VM_PTE_WB, "WB");
-       FLAG(ARM_VM_PTE_WT, "WT");
-
-       return str;
-}
-
-/*===========================================================================*
- *                          pt_map_in_range                                 *
- *===========================================================================*/
-int pt_map_in_range(struct vmproc *src_vmp, struct vmproc *dst_vmp,
-       vir_bytes start, vir_bytes end)
-{
-/* Transfer all the mappings from the pt of the source process to the pt of
- * the destination process in the range specified.
- */
-       int pde, pte;
-       vir_bytes viraddr;
-       pt_t *pt, *dst_pt;
-
-       pt = &src_vmp->vm_pt;
-       dst_pt = &dst_vmp->vm_pt;
-
-       end = end ? end : VM_DATATOP;
-       assert(start % ARM_PAGE_SIZE == 0);
-       assert(end % ARM_PAGE_SIZE == 0);
-       assert(ARM_VM_PDE(start) >= 0 && start <= end);
-       assert(ARM_VM_PDE(end) < ARM_VM_DIR_ENTRIES);
-
-#if LU_DEBUG
-       printf("VM: pt_map_in_range: src = %d, dst = %d\n",
-               src_vmp->vm_endpoint, dst_vmp->vm_endpoint);
-       printf("VM: pt_map_in_range: transferring from 0x%08x (pde %d pte %d) to 0x%08x (pde %d pte %d)\n",
-               start, ARM_VM_PDE(start), ARM_VM_PTE(start),
-               end, ARM_VM_PDE(end), ARM_VM_PTE(end));
-#endif
-
-       /* Scan all page-table entries in the range. */
-       for(viraddr = start; viraddr <= end; viraddr += ARM_PAGE_SIZE) {
-               pde = ARM_VM_PDE(viraddr);
-               if(!(pt->pt_dir[pde] & ARM_VM_PDE_PRESENT)) {
-                       if(viraddr == VM_DATATOP) break;
-                       continue;
-               }
-               pte = ARM_VM_PTE(viraddr);
-               if(!(pt->pt_pt[pde][pte] & ARM_VM_PTE_PRESENT)) {
-                       if(viraddr == VM_DATATOP) break;
-                       continue;
-               }
-
-               /* Transfer the mapping. */
-               dst_pt->pt_pt[pde][pte] = pt->pt_pt[pde][pte];
-
-                if(viraddr == VM_DATATOP) break;
-       }
-
-       return OK;
-}
-
-/*===========================================================================*
- *                             pt_ptmap                                     *
- *===========================================================================*/
-int pt_ptmap(struct vmproc *src_vmp, struct vmproc *dst_vmp)
-{
-/* Transfer mappings to page dir and page tables from source process and
- * destination process. Make sure all the mappings are above the stack, not
- * to corrupt valid mappings in the data segment of the destination process.
- */
-       int pde, r;
-       phys_bytes physaddr;
-       vir_bytes viraddr;
-       pt_t *pt;
-
-       pt = &src_vmp->vm_pt;
-
-#if LU_DEBUG
-       printf("VM: pt_ptmap: src = %d, dst = %d\n",
-               src_vmp->vm_endpoint, dst_vmp->vm_endpoint);
-#endif
-
-       /* Transfer mapping to the page directory. */
-       viraddr = (vir_bytes) pt->pt_dir;
-       physaddr = pt->pt_dir_phys & ARM_VM_ADDR_MASK;
-       if((r=pt_writemap(dst_vmp, &dst_vmp->vm_pt, viraddr, physaddr, ARM_PAGEDIR_SIZE,
-               ARM_VM_PTE_PRESENT | ARM_VM_PTE_USER | ARM_VM_PTE_RW |
-               ARM_VM_PTE_WB | ARM_VM_PTE_SHAREABLE,
-               WMF_OVERWRITE)) != OK) {
-               return r;
-       }
-#if LU_DEBUG
-       printf("VM: pt_ptmap: transferred mapping to page dir: 0x%08x (0x%08x)\n",
-               viraddr, physaddr);
-#endif
-
-       /* Scan all non-reserved page-directory entries. */
-       for(pde=0; pde < ARM_VM_DIR_ENTRIES; pde++) {
-               if(!(pt->pt_dir[pde] & ARM_VM_PDE_PRESENT)) {
-                       continue;
-               }
-
-               /* Transfer mapping to the page table. */
-               viraddr = (vir_bytes) pt->pt_pt[pde];
-               physaddr = pt->pt_dir[pde] & ARM_VM_PDE_MASK;
-               if((r=pt_writemap(dst_vmp, &dst_vmp->vm_pt, viraddr, physaddr, ARM_PAGE_SIZE,
-                       ARM_VM_PTE_PRESENT | ARM_VM_PTE_USER | ARM_VM_PTE_RW |
-                       ARM_VM_PTE_WB | ARM_VM_PTE_SHAREABLE,
-                       WMF_OVERWRITE)) != OK) {
-                       return r;
-               }
-       }
-
-       return OK;
-}
-
-void pt_clearmapcache(void)
-{
-       /* Make sure kernel will invalidate tlb when using current
-        * pagetable (i.e. vm's) to make new mappings before new cr3
-        * is loaded.
-        */
-       if(sys_vmctl(SELF, VMCTL_CLEARMAPCACHE, 0) != OK)
-               panic("VMCTL_CLEARMAPCACHE failed");
-}
-
-/*===========================================================================*
- *                             pt_writemap                                  *
- *===========================================================================*/
-int pt_writemap(struct vmproc * vmp,
-                       pt_t *pt,
-                       vir_bytes v,
-                       phys_bytes physaddr,
-                       size_t bytes,
-                       u32_t flags,
-                       u32_t writemapflags)
-{
-/* Write mapping into page table. Allocate a new page table if necessary. */
-/* Page directory and table entries for this virtual address. */
-       int p, pages;
-       int verify = 0;
-       int ret = OK;
-
-#ifdef CONFIG_SMP
-       int vminhibit_clear = 0;
-       /* FIXME
-        * don't do it everytime, stop the process only on the first change and
-        * resume the execution on the last change. Do in a wrapper of this
-        * function
-        */
-       if (vmp && vmp->vm_endpoint != NONE && vmp->vm_endpoint != VM_PROC_NR &&
-                       !(vmp->vm_flags & VMF_EXITING)) {
-               sys_vmctl(vmp->vm_endpoint, VMCTL_VMINHIBIT_SET, 0);
-               vminhibit_clear = 1;
-       }
-#endif
-
-       if(writemapflags & WMF_VERIFY)
-               verify = 1;
-
-       assert(!(bytes % ARM_PAGE_SIZE));
-       assert(!(flags & ~(PTF_ALLFLAGS)));
-
-       pages = bytes / ARM_PAGE_SIZE;
-
-       /* MAP_NONE means to clear the mapping. It doesn't matter
-        * what's actually written into the PTE if ARM_VM_PRESENT
-        * isn't on, so we can just write MAP_NONE into it.
-        */
-       assert(physaddr == MAP_NONE || (flags & ARM_VM_PTE_PRESENT));
-       assert(physaddr != MAP_NONE || !flags);
-
-       /* First make sure all the necessary page tables are allocated,
-        * before we start writing in any of them, because it's a pain
-        * to undo our work properly.
-        */
-       ret = pt_ptalloc_in_range(pt, v, v + ARM_PAGE_SIZE*pages, flags, verify);
-       if(ret != OK) {
-               printf("VM: writemap: pt_ptalloc_in_range failed\n");
-               goto resume_exit;
-       }
-
-       /* Now write in them. */
-       for(p = 0; p < pages; p++) {
-               u32_t entry;
-               int pde = ARM_VM_PDE(v);
-               int pte = ARM_VM_PTE(v);
-
-               if(!v) { printf("VM: warning: making zero page for %d\n",
-                       vmp->vm_endpoint); }
-
-               assert(!(v % ARM_PAGE_SIZE));
-               assert(pte >= 0 && pte < ARM_VM_PT_ENTRIES);
-               assert(pde >= 0 && pde < ARM_VM_DIR_ENTRIES);
-
-               /* Page table has to be there. */
-               assert(pt->pt_dir[pde] & ARM_VM_PDE_PRESENT);
-
-               /* We do not expect it to be a bigpage. */
-               assert(!(pt->pt_dir[pde] & ARM_VM_BIGPAGE));
-
-               /* Make sure page directory entry for this page table
-                * is marked present and page table entry is available.
-                */
-               assert(pt->pt_pt[pde]);
-
-#if SANITYCHECKS
-               /* We don't expect to overwrite a page. */
-               if(!(writemapflags & (WMF_OVERWRITE|WMF_VERIFY)))
-                       assert(!(pt->pt_pt[pde][pte] & ARM_VM_PTE_PRESENT));
-#endif
-               if(writemapflags & (WMF_WRITEFLAGSONLY|WMF_FREE)) {
-                       physaddr = pt->pt_pt[pde][pte] & ARM_VM_PTE_MASK;
-               }
-
-               if(writemapflags & WMF_FREE) {
-                       free_mem(ABS2CLICK(physaddr), 1);
-               }
-
-               /* Entry we will write. */
-               entry = (physaddr & ARM_VM_PTE_MASK) | flags;
-
-               if(verify) {
-                       u32_t maskedentry;
-                       maskedentry = pt->pt_pt[pde][pte];
-                       /* Verify pagetable entry. */
-                       if(entry & ARM_VM_PTE_RW) {
-                               /* If we expect a writable page, allow a readonly page. */
-                               maskedentry |= ARM_VM_PTE_RW;
-                       }
-                       if(maskedentry != entry) {
-                               printf("pt_writemap: mismatch: ");
-                               if((entry & ARM_VM_PTE_MASK) !=
-                                       (maskedentry & ARM_VM_PTE_MASK)) {
-                                       printf("pt_writemap: physaddr mismatch (0x%lx, 0x%lx); ",
-                                               (long)entry, (long)maskedentry);
-                               } else printf("phys ok; ");
-                               printf(" flags: found %s; ",
-                                       ptestr(pt->pt_pt[pde][pte]));
-                               printf(" masked %s; ",
-                                       ptestr(maskedentry));
-                               printf(" expected %s\n", ptestr(entry));
-                               ret = EFAULT;
-                               goto resume_exit;
-                       }
-               } else {
-                       /* Write pagetable entry. */
-                       pt->pt_pt[pde][pte] = entry;
-               }
-
-               physaddr += ARM_PAGE_SIZE;
-               v += ARM_PAGE_SIZE;
-       }
-
-resume_exit:
-
-#ifdef CONFIG_SMP
-       if (vminhibit_clear) {
-               assert(vmp && vmp->vm_endpoint != NONE && vmp->vm_endpoint != VM_PROC_NR &&
-                       !(vmp->vm_flags & VMF_EXITING));
-               sys_vmctl(vmp->vm_endpoint, VMCTL_VMINHIBIT_CLEAR, 0);
-       }
-#endif
-
-       return ret;
-}
-
-/*===========================================================================*
- *                             pt_checkrange                                *
- *===========================================================================*/
-int pt_checkrange(pt_t *pt, vir_bytes v,  size_t bytes,
-       int write)
-{
-       int p, pages;
-
-       assert(!(bytes % ARM_PAGE_SIZE));
-
-       pages = bytes / ARM_PAGE_SIZE;
-
-       for(p = 0; p < pages; p++) {
-               int pde = ARM_VM_PDE(v);
-               int pte = ARM_VM_PTE(v);
-
-               assert(!(v % ARM_PAGE_SIZE));
-               assert(pte >= 0 && pte < ARM_VM_PT_ENTRIES);
-               assert(pde >= 0 && pde < ARM_VM_DIR_ENTRIES);
-
-               /* Page table has to be there. */
-               if(!(pt->pt_dir[pde] & ARM_VM_PDE_PRESENT))
-                       return EFAULT;
-
-               /* Make sure page directory entry for this page table
-                * is marked present and page table entry is available.
-                */
-               assert((pt->pt_dir[pde] & ARM_VM_PDE_PRESENT) && pt->pt_pt[pde]);
-
-               if(!(pt->pt_pt[pde][pte] & ARM_VM_PTE_PRESENT)) {
-                       return EFAULT;
-               }
-
-               if(write && (pt->pt_pt[pde][pte] & ARM_VM_PTE_RO)) {
-                       return EFAULT;
-               }
-
-               v += ARM_PAGE_SIZE;
-       }
-
-       return OK;
-}
-
-/*===========================================================================*
- *                             pt_new                                       *
- *===========================================================================*/
-int pt_new(pt_t *pt)
-{
-/* Allocate a pagetable root. On ARM, allocate a page-aligned page directory
- * and set them to 0 (indicating no page tables are allocated). Lookup
- * its physical address as we'll need that in the future. Verify it's
- * page-aligned.
- */
-       int i;
-
-       /* Don't ever re-allocate/re-move a certain process slot's
-        * page directory once it's been created. This is a fraction
-        * faster, but also avoids having to invalidate the page
-        * mappings from in-kernel page tables pointing to
-        * the page directories (the page_directories data).
-        */
-        if(!pt->pt_dir &&
-          !(pt->pt_dir = vm_allocpage((phys_bytes *)&pt->pt_dir_phys, VMP_PAGEDIR))) {
-               return ENOMEM;
-       }
-       assert(!((u32_t)pt->pt_dir_phys % ARM_PAGEDIR_SIZE));
-
-       for(i = 0; i < ARM_VM_DIR_ENTRIES; i++) {
-               pt->pt_dir[i] = 0; /* invalid entry (ARM_VM_PRESENT bit = 0) */
-               pt->pt_pt[i] = NULL;
-       }
-
-       /* Where to start looking for free virtual address space? */
-       pt->pt_virtop = 0;
-
-        /* Map in kernel. */
-        if(pt_mapkernel(pt) != OK)
-                panic("pt_new: pt_mapkernel failed");
-
-       return OK;
-}
-
-static int freepde(void)
-{
-       int p = kernel_boot_info.freepde_start++;
-       assert(kernel_boot_info.freepde_start < ARM_VM_DIR_ENTRIES);
-       return p;
-}
-
-/*===========================================================================*
- *                              pt_init                                      *
- *===========================================================================*/
-void pt_init(void)
-{
-        pt_t *newpt;
-        int s, r, p;
-       vir_bytes sparepages_mem;
-       vir_bytes sparepagedirs_mem;
-       static u32_t currentpagedir[ARM_VM_DIR_ENTRIES];
-       int m = kernel_boot_info.kern_mod;
-       u32_t myttbr;
-
-       /* Find what the physical location of the kernel is. */
-       assert(m >= 0);
-       assert(m < kernel_boot_info.mods_with_kernel);
-       assert(kernel_boot_info.mods_with_kernel < MULTIBOOT_MAX_MODS);
-       kern_mb_mod = &kernel_boot_info.module_list[m];
-       kern_size = kern_mb_mod->mod_end - kern_mb_mod->mod_start;
-       assert(!(kern_mb_mod->mod_start % ARM_BIG_PAGE_SIZE));
-       assert(!(kernel_boot_info.vir_kern_start % ARM_BIG_PAGE_SIZE));
-       kern_start_pde = kernel_boot_info.vir_kern_start / ARM_BIG_PAGE_SIZE;
-
-        /* Get ourselves spare pages. */
-        sparepages_mem = (vir_bytes) static_sparepages;
-       assert(!(sparepages_mem % ARM_PAGE_SIZE));
-
-        /* Get ourselves spare pagedirs. */
-       sparepagedirs_mem = (vir_bytes) static_sparepagedirs;
-       assert(!(sparepagedirs_mem % ARM_PAGEDIR_SIZE));
-
-       /* Spare pages are used to allocate memory before VM has its own page
-        * table that things (i.e. arbitrary physical memory) can be mapped into.
-        * We get it by pre-allocating it in our bss (allocated and mapped in by
-        * the kernel) in static_sparepages. We also need the physical addresses
-        * though; we look them up now so they are ready for use.
-        */
-        missing_sparedirs = 0;
-        assert(STATIC_SPAREPAGEDIRS < SPAREPAGEDIRS);
-        for(s = 0; s < SPAREPAGEDIRS; s++) {
-               vir_bytes v = (sparepagedirs_mem + s*ARM_PAGEDIR_SIZE);;
-               phys_bytes ph;
-               if((r=sys_umap(SELF, VM_D, (vir_bytes) v,
-                       ARM_PAGEDIR_SIZE, &ph)) != OK)
-                               panic("pt_init: sys_umap failed: %d", r);
-               if(s >= STATIC_SPAREPAGEDIRS) {
-                       sparepagedirs[s].pagedir = NULL;
-                       missing_sparedirs++;
-                       continue;
-               }
-               sparepagedirs[s].pagedir = (void *) v;
-               sparepagedirs[s].phys = ph;
-        }
-
-        missing_spares = 0;
-        assert(STATIC_SPAREPAGES < SPAREPAGES);
-        for(s = 0; s < SPAREPAGES; s++) {
-               vir_bytes v = (sparepages_mem + s*ARM_PAGE_SIZE);;
-               phys_bytes ph;
-               if((r=sys_umap(SELF, VM_D, (vir_bytes) v,
-                       ARM_PAGE_SIZE*SPAREPAGES, &ph)) != OK)
-                               panic("pt_init: sys_umap failed: %d", r);
-               if(s >= STATIC_SPAREPAGES) {
-                       sparepages[s].page = NULL;
-                       missing_spares++;
-                       continue;
-               }
-               sparepages[s].page = (void *) v;
-               sparepages[s].phys = ph;
-        }
-
-       /* 1MB pages available? */
-       bigpage_ok = 1;
-
-       /* Allocate us a page table in which to remember page directory
-        * pointers.
-        */
-       if(!(page_directories = vm_allocpage(&page_directories_phys,
-               VMP_PAGETABLE)))
-                panic("no virt addr for vm mappings");
-
-       memset(page_directories, 0, ARM_PAGE_SIZE);
-
-       /* Now reserve another pde for kernel's own mappings. */
-       {
-               int kernmap_pde;
-               phys_bytes addr, len;
-               int flags, index = 0;
-               u32_t offset = 0;
-
-               kernmap_pde = freepde();
-               offset = kernmap_pde * ARM_BIG_PAGE_SIZE;
-
-               while(sys_vmctl_get_mapping(index, &addr, &len,
-                       &flags) == OK)  {
-                       vir_bytes vir;
-                       if(index >= MAX_KERNMAPPINGS)
-                               panic("VM: too many kernel mappings: %d", index);
-                       kern_mappings[index].phys_addr = addr;
-                       kern_mappings[index].len = len;
-                       kern_mappings[index].flags = flags;
-                       kern_mappings[index].vir_addr = addr;
-                       kern_mappings[index].flags =
-                               ARM_VM_PTE_PRESENT;
-                       if(flags & VMMF_UNCACHED)
-                               kern_mappings[index].flags |= ARM_VM_PTE_DEVICE;
-                       else
-                               kern_mappings[index].flags |=
-                                   ARM_VM_PTE_WB | ARM_VM_PTE_SHAREABLE;
-                       if(flags & VMMF_USER)
-                               kern_mappings[index].flags |= ARM_VM_PTE_USER;
-                       else
-                               kern_mappings[index].flags |= ARM_VM_PTE_SUPER;
-                       if(flags & VMMF_WRITE)
-                               kern_mappings[index].flags |= ARM_VM_PTE_RW;
-                       else
-                               kern_mappings[index].flags |= ARM_VM_PTE_RO;
-                       if(addr % ARM_PAGE_SIZE)
-                               panic("VM: addr unaligned: %d", addr);
-                       if(len % ARM_PAGE_SIZE)
-                               panic("VM: len unaligned: %d", len);
-                       vir = offset;
-                       if(sys_vmctl_reply_mapping(index, vir) != OK)
-                               panic("VM: reply failed");
-                       offset += len;
-                       index++;
-                       kernmappings++;
-               }
-       }
-
-       /* Find a PDE below processes available for mapping in the
-        * page directories.
-        */
-       pagedir_pde = freepde();
-       pagedir_pde_val = (page_directories_phys & ARM_VM_PDE_MASK) |
-                       ARM_VM_PDE_PRESENT | ARM_VM_PDE_DOMAIN;
-
-       /* Allright. Now. We have to make our own page directory and page tables,
-        * that the kernel has already set up, accessible to us. It's easier to
-        * understand if we just copy all the required pages (i.e. page directory
-        * and page tables), and set up the pointers as if VM had done it itself.
-        *
-        * This allocation will happen without using any page table, and just
-        * uses spare pages.
-        */
-        newpt = &vmprocess->vm_pt;
-       if(pt_new(newpt) != OK)
-               panic("vm pt_new failed");
-
-       /* Get our current pagedir so we can see it. */
-       if(sys_vmctl_get_pdbr(SELF, &myttbr) != OK)
-               panic("VM: sys_vmctl_get_pdbr failed");
-       if(sys_vircopy(NONE, myttbr, SELF,
-               (vir_bytes) currentpagedir, ARM_PAGEDIR_SIZE) != OK)
-               panic("VM: sys_vircopy failed");
-
-       /* We have mapped in kernel ourselves; now copy mappings for VM
-        * that kernel made, including allocations for BSS. Skip identity
-        * mapping bits; just map in VM.
-        */
-       for(p = 0; p < ARM_VM_DIR_ENTRIES; p++) {
-               u32_t entry = currentpagedir[p];
-               phys_bytes ptaddr_kern, ptaddr_us;
-
-               /* BIGPAGEs are kernel mapping (do ourselves) or boot
-                * identity mapping (don't want).
-                */
-               if(!(entry & ARM_VM_PDE_PRESENT)) continue;
-               if((entry & ARM_VM_BIGPAGE)) continue;
-
-               if(pt_ptalloc(newpt, p, 0) != OK)
-                       panic("pt_ptalloc failed");
-               assert(newpt->pt_dir[p] & ARM_VM_PDE_PRESENT);
-
-               ptaddr_kern = entry & ARM_VM_PDE_MASK;
-               ptaddr_us = newpt->pt_dir[p] & ARM_VM_PDE_MASK;
-
-               /* Copy kernel-initialized pagetable contents into our
-                * normally accessible pagetable.
-                */
-                if(sys_abscopy(ptaddr_kern, ptaddr_us, ARM_PAGETABLE_SIZE) != OK)
-                       panic("pt_init: abscopy failed");
-       }
-
-       /* Inform kernel vm has a newly built page table. */
-       assert(vmproc[VM_PROC_NR].vm_endpoint == VM_PROC_NR);
-       pt_bind(newpt, &vmproc[VM_PROC_NR]);
-
-       pt_init_done = 1;
-
-        /* All OK. */
-        return;
-}
-
-/*===========================================================================*
- *                             pt_bind                                      *
- *===========================================================================*/
-int pt_bind(pt_t *pt, struct vmproc *who)
-{
-       int slot;
-       u32_t phys;
-       void *pdes;
-       int i;
-       int pages_per_pagedir = ARM_PAGEDIR_SIZE/ARM_PAGE_SIZE;
-
-       /* Basic sanity checks. */
-       assert(who);
-       assert(who->vm_flags & VMF_INUSE);
-       assert(pt);
-
-       assert(pagedir_pde >= 0);
-
-       slot = who->vm_slot;
-       assert(slot >= 0);
-       assert(slot < ELEMENTS(vmproc));
-       assert(slot < ARM_VM_PT_ENTRIES / pages_per_pagedir);
-
-       phys = pt->pt_dir_phys & ARM_VM_PTE_MASK;
-       assert(pt->pt_dir_phys == phys);
-       assert(!(pt->pt_dir_phys % ARM_PAGEDIR_SIZE));
-
-       /* Update "page directory pagetable." */
-       for (i = 0; i < pages_per_pagedir; i++)
-           page_directories[slot*pages_per_pagedir+i] =
-               (phys+i*ARM_PAGE_SIZE) |
-               ARM_VM_PTE_PRESENT | ARM_VM_PTE_RW |
-               ARM_VM_PTE_USER;
-
-       /* This is where the PDE's will be visible to the kernel
-        * in its address space.
-        */
-       pdes = (void *) (pagedir_pde*ARM_BIG_PAGE_SIZE + 
-                       slot * ARM_PAGEDIR_SIZE);
-
-#if 0
-       printf("VM: slot %d endpoint %d has pde val 0x%lx at kernel address 0x%lx\n",
-               slot, who->vm_endpoint, page_directories[slot], pdes);
-#endif
-       /* Tell kernel about new page table root. */
-       return sys_vmctl_set_addrspace(who->vm_endpoint, pt->pt_dir_phys, pdes);
-}
-
-/*===========================================================================*
- *                             pt_free                                      *
- *===========================================================================*/
-void pt_free(pt_t *pt)
-{
-/* Free memory associated with this pagetable. */
-       int i;
-
-       for(i = 0; i < ARM_VM_DIR_ENTRIES; i++)
-               if(pt->pt_pt[i])
-                       vm_freepages((vir_bytes) pt->pt_pt[i], 1);
-
-       return;
-}
-
-/*===========================================================================*
- *                             pt_mapkernel                                 *
- *===========================================================================*/
-int pt_mapkernel(pt_t *pt)
-{
-       int i;
-       int kern_pde = kern_start_pde;
-       phys_bytes addr, mapped = 0;
-
-        /* Any ARM page table needs to map in the kernel address space. */
-       assert(bigpage_ok);
-       assert(pagedir_pde >= 0);
-       assert(kern_pde >= 0);
-
-       /* pt_init() has made sure this is ok. */
-       addr = kern_mb_mod->mod_start;
-
-       /* Actually mapping in kernel */
-       while(mapped < kern_size) {
-               pt->pt_dir[kern_pde] = (addr & ARM_VM_PDE_MASK) |
-                       ARM_VM_SECTION |
-                       ARM_VM_SECTION_DOMAIN | ARM_VM_SECTION_WB |
-                       ARM_VM_SECTION_SHAREABLE | ARM_VM_SECTION_SUPER;
-               kern_pde++;
-               mapped += ARM_BIG_PAGE_SIZE;
-               addr += ARM_BIG_PAGE_SIZE;
-       }
-
-       /* Kernel also wants to know about all page directories. */
-       assert(pagedir_pde > kern_pde);
-       pt->pt_dir[pagedir_pde] = pagedir_pde_val;
-
-       /* Kernel also wants various mappings of its own. */
-       for(i = 0; i < kernmappings; i++) {
-               if(pt_writemap(NULL, pt,
-                       kern_mappings[i].vir_addr,
-                       kern_mappings[i].phys_addr,
-                       kern_mappings[i].len,
-                       kern_mappings[i].flags, 0) != OK) {
-                       panic("pt_mapkernel: pt_writemap failed");
-               }
-       }
-
-       return OK;
-}
-
-/*===========================================================================*
- *                             pt_cycle                                     *
- *===========================================================================*/
-void pt_cycle(void)
-{
-       vm_checkspares();
-       vm_checksparedirs();
-}
-
-int get_vm_self_pages(void) { return vm_self_pages; }
index f78be1578dd7b56d2d7a71f242c915e4dc3b14cd..b7f0fb91114382c84a54e8ea67934c3b495ad6ea 100644 (file)
@@ -7,23 +7,6 @@
 
 #include "vm.h"
 
-/* An ARM pagetable. */
-typedef struct {
-       /* Directory entries in VM addr space - root of page table.  */
-       u32_t *pt_dir;          /* 16KB aligned (ARM_VM_DIR_ENTRIES) */
-       u32_t pt_dir_phys;      /* physical address of pt_dir */
-
-       /* Pointers to page tables in VM address space. */
-       u32_t *pt_pt[ARM_VM_DIR_ENTRIES];
-
-       /* When looking for a hole in virtual address space, start
-        * looking here. This is in linear addresses, i.e.,
-        * not as the process sees it but the position in the page
-        * page table. This is just a hint.
-        */
-       u32_t pt_virtop;
-} pt_t;
-
 /* Mapping flags. */
 #define PTF_WRITE      ARM_VM_PTE_RW
 #define PTF_READ       ARM_VM_PTE_RO
@@ -35,17 +18,31 @@ typedef struct {
 #define PTF_CACHEWT    ARM_VM_PTE_WT
 #define PTF_SHARE      ARM_VM_PTE_SHAREABLE
 
+#define ARCH_VM_DIR_ENTRIES     ARM_VM_DIR_ENTRIES
+#define ARCH_BIG_PAGE_SIZE      ARM_BIG_PAGE_SIZE
+#define ARCH_VM_ADDR_MASK       ARM_VM_ADDR_MASK
+#define ARCH_VM_PDE_MASK       ARM_VM_PDE_MASK
+#define ARCH_VM_PDE_PRESENT    ARM_VM_PDE_PRESENT
+#define ARCH_VM_PTE_PRESENT    ARM_VM_PTE_PRESENT
+#define ARCH_VM_PTE_USER       ARM_VM_PTE_USER
+#define ARCH_PAGEDIR_SIZE      ARM_PAGEDIR_SIZE
+#define ARCH_VM_PTE_RW         ARM_VM_PTE_RW
+#define ARCH_VM_BIGPAGE                ARM_VM_BIGPAGE
+#define ARCH_VM_PT_ENTRIES     ARM_VM_PT_ENTRIES
+#define ARCH_VM_PTE_RO         ARM_VM_PTE_RO
+
 /* For arch-specific PT routines to check if no bits outside
  * the regular flags are set.
  */
 #define PTF_ALLFLAGS   (PTF_READ|PTF_WRITE|PTF_PRESENT|PTF_SUPER|PTF_USER|PTF_NOCACHE|PTF_CACHEWB|PTF_CACHEWT|PTF_SHARE)
 
-#if SANITYCHECKS
-#define PT_SANE(p) { pt_sanitycheck((p), __FILE__, __LINE__); }
-#else
-#define PT_SANE(p)
-#endif
+#define PFERR_PROT(e)  ((ARM_VM_PFE_FS(e) == ARM_VM_PFE_L1PERM) \
+                        || (ARM_VM_PFE_FS(e) == ARM_VM_PFE_L2PERM))
+#define PFERR_NOPAGE(e) (!PFERR_PROT(e))
+#define PFERR_WRITE(e) ((e) & ARM_VM_PFE_W)
+#define PFERR_READ(e)  (!((e) & ARM_VM_PFE_W))
 
-#endif
+#define VM_PAGE_SIZE    ARM_PAGE_SIZE
 
+#endif
 
diff --git a/servers/vm/arch/i386/memory.h b/servers/vm/arch/i386/memory.h
deleted file mode 100644 (file)
index d72de15..0000000
+++ /dev/null
@@ -1,12 +0,0 @@
-#include <machine/vm.h>
-
-/* And what is the highest addressable piece of memory, when in paged
- * mode?
- */
-#define VM_DATATOP     kernel_boot_info.user_end
-#define VM_STACKTOP    kernel_boot_info.user_sp
-
-#define SLAB_PAGESIZE  I386_PAGE_SIZE
-#define VM_PAGE_SIZE   I386_PAGE_SIZE
-
-#define CLICKSPERPAGE (I386_PAGE_SIZE/CLICK_SIZE)
diff --git a/servers/vm/arch/i386/pagefaults.h b/servers/vm/arch/i386/pagefaults.h
deleted file mode 100644 (file)
index 4cdc9a6..0000000
+++ /dev/null
@@ -1,13 +0,0 @@
-
-#ifndef _PAGEFAULTS_H
-#define _PAGEFAULTS_H 1
-
-#include <machine/vm.h>
-
-#define PFERR_NOPAGE(e)        (!((e) & I386_VM_PFE_P))
-#define PFERR_PROT(e)  (((e) & I386_VM_PFE_P))
-#define PFERR_WRITE(e) ((e) & I386_VM_PFE_W)
-#define PFERR_READ(e)  (!((e) & I386_VM_PFE_W))
-
-#endif
-
index 67a21ed4b1d30e4e71e73bfb6aa2ef8a3da44fc0..424b452e4f3e23926d976dab99c3b63bc390abf9 100644 (file)
 #include "vm.h"
 #include "sanitycheck.h"
 
-#include "memory.h"
-
 static int vm_self_pages;
 
 /* PDE used to map in kernel, kernel physical address. */
 static int pagedir_pde = -1;
+#if defined(__i386__)
 static u32_t global_bit = 0, pagedir_pde_val;
+#elif defined(__arm__)
+static u32_t pagedir_pde_val;
+#endif
 
 static multiboot_module_t *kern_mb_mod = NULL;
 static size_t kern_size = 0;
 static int kern_start_pde = -1;
 
+#if defined(__i386__)
 /* 4MB page size available in hardware? */
 static int bigpage_ok = 0;
+#elif defined(__arm__)
+/* 1MB page size available in hardware? */
+static int bigpage_ok = 1;
+#endif
 
 /* Our process table entry. */
 struct vmproc *vmprocess = &vmproc[VM_PROC_NR];
@@ -56,12 +63,25 @@ struct vmproc *vmprocess = &vmproc[VM_PROC_NR];
  * circular dependency on allocating memory and writing it into VM's
  * page table.
  */
+#if defined(__i386__)
 #if SANITYCHECKS
 #define SPAREPAGES 100
 #define STATIC_SPAREPAGES 90
 #else
 #define SPAREPAGES 15
 #define STATIC_SPAREPAGES 10
+#endif
+#elif defined(__arm__)
+#define SPAREPAGEDIRS 11
+#define STATIC_SPAREPAGEDIRS 10
+#define SPAREPAGES 250
+#define STATIC_SPAREPAGES 100
+int missing_sparedirs = SPAREPAGEDIRS;
+static struct {
+       void *pagedir;
+       phys_bytes phys;
+} sparepagedirs[SPAREPAGEDIRS];
+
 #endif
 int missing_spares = SPAREPAGES;
 static struct {
@@ -88,7 +108,7 @@ int kernmappings = 0;
  *    just one page
  * May as well require them to be equal then.
  */
-#if CLICK_SIZE != I386_PAGE_SIZE
+#if CLICK_SIZE != VM_PAGE_SIZE
 #error CLICK_SIZE must be page size.
 #endif
 
@@ -96,9 +116,17 @@ int kernmappings = 0;
 phys_bytes page_directories_phys;
 u32_t *page_directories = NULL;
 
-static char static_sparepages[I386_PAGE_SIZE*STATIC_SPAREPAGES] 
-       __aligned(I386_PAGE_SIZE);
+#if defined(__i386__)
+static char static_sparepages[VM_PAGE_SIZE*STATIC_SPAREPAGES] 
+       __aligned(VM_PAGE_SIZE);
+#elif defined(__arm__)
+static char static_sparepagedirs[ARCH_PAGEDIR_SIZE*STATIC_SPAREPAGEDIRS + ARCH_PAGEDIR_SIZE] __aligned(ARCH_PAGEDIR_SIZE);
+#endif
+
+#if defined(__arm__)
+static char static_sparepages[VM_PAGE_SIZE*STATIC_SPAREPAGES] __aligned(VM_PAGE_SIZE);
 
+#endif
 #if SANITYCHECKS
 /*===========================================================================*
  *                             pt_sanitycheck                               *
@@ -121,14 +149,18 @@ void pt_sanitycheck(pt_t *pt, char *file, int line)
                panic("pt_sanitycheck: passed pt not in any proc");
        }
 
-       MYASSERT(usedpages_add(pt->pt_dir_phys, I386_PAGE_SIZE) == OK);
+       MYASSERT(usedpages_add(pt->pt_dir_phys, VM_PAGE_SIZE) == OK);
 }
 #endif
 
 /*===========================================================================*
  *                             findhole                                     *
  *===========================================================================*/
+#if defined(__i386__)
 static u32_t findhole(void)
+#elif defined(__arm__)
+static u32_t findhole(int pages)
+#endif
 {
 /* Find a space in the virtual address space of VM. */
        u32_t curv;
@@ -136,19 +168,25 @@ static u32_t findhole(void)
        static u32_t lastv = 0;
        pt_t *pt = &vmprocess->vm_pt;
        vir_bytes vmin, vmax;
+#if defined(__arm__)
+       u32_t holev;
+#endif
 
-       vmin = (vir_bytes) (&_end) & I386_VM_ADDR_MASK; /* marks end of VM BSS */
+       vmin = (vir_bytes) (&_end) & ARCH_VM_ADDR_MASK; /* marks end of VM BSS */
        vmax = VM_STACKTOP;
 
        /* Input sanity check. */
-       assert(vmin + I386_PAGE_SIZE >= vmin);
-       assert(vmax >= vmin + I386_PAGE_SIZE);
-       assert((vmin % I386_PAGE_SIZE) == 0);
-       assert((vmax % I386_PAGE_SIZE) == 0);
+       assert(vmin + VM_PAGE_SIZE >= vmin);
+       assert(vmax >= vmin + VM_PAGE_SIZE);
+       assert((vmin % VM_PAGE_SIZE) == 0);
+       assert((vmax % VM_PAGE_SIZE) == 0);
+#if defined(__arm__)
+       assert(pages > 0);
+#endif
 
 #if SANITYCHECKS
-       curv = ((u32_t) random()) % ((vmax - vmin)/I386_PAGE_SIZE);
-       curv *= I386_PAGE_SIZE;
+       curv = ((u32_t) random()) % ((vmax - vmin)/VM_PAGE_SIZE);
+       curv *= VM_PAGE_SIZE;
        curv += vmin;
 #else
        curv = lastv;
@@ -160,21 +198,61 @@ static u32_t findhole(void)
        /* Start looking for a free page starting at vmin. */
        while(curv < vmax) {
                int pte;
+#if defined(__arm__)
+               int i, nohole;
+#endif
 
                assert(curv >= vmin);
                assert(curv < vmax);
 
+#if defined(__i386__)
                pde = I386_VM_PDE(curv);
                pte = I386_VM_PTE(curv);
+#elif defined(__arm__)
+               holev = curv; /* the candidate hole */
+               nohole = 0;
+               for (i = 0; i < pages && !nohole; ++i) {
+                   if(curv >= vmax) {
+                       break;
+                   }
+#endif
 
-               if(!(pt->pt_dir[pde] & I386_VM_PRESENT) ||
-                  !(pt->pt_pt[pde][pte] & I386_VM_PRESENT)) {
+#if defined(__i386__)
+               if(!(pt->pt_dir[pde] & ARCH_VM_PDE_PRESENT) ||
+                  !(pt->pt_pt[pde][pte] & ARCH_VM_PAGE_PRESENT)) {
+#elif defined(__arm__)
+                   pde = ARM_VM_PDE(curv);
+                   pte = ARM_VM_PTE(curv);
+
+                   /* if page present, no hole */
+                   if((pt->pt_dir[pde] & ARCH_VM_PDE_PRESENT) &&
+                      (pt->pt_pt[pde][pte] & ARCH_VM_PTE_PRESENT))
+                       nohole = 1;
+
+                   /* if not contiguous, no hole */
+                   if (curv != holev + i * VM_PAGE_SIZE)
+                       nohole = 1;
+
+                   curv+=VM_PAGE_SIZE;
+               }
+
+               /* there's a large enough hole */
+               if (!nohole && i == pages) {
+#endif
                        lastv = curv;
+#if defined(__i386__)
                        return curv;
+#elif defined(__arm__)
+                       return holev;
+#endif
                }
 
-               curv+=I386_PAGE_SIZE;
+#if defined(__i386__)
+               curv+=VM_PAGE_SIZE;
 
+#elif defined(__arm__)
+               /* Reset curv */
+#endif
                if(curv >= vmax && try_restart) {
                        curv = vmin;
                        try_restart = 0;
@@ -191,7 +269,7 @@ static u32_t findhole(void)
  *===========================================================================*/
 void vm_freepages(vir_bytes vir, int pages)
 {
-       assert(!(vir % I386_PAGE_SIZE)); 
+       assert(!(vir % VM_PAGE_SIZE)); 
 
        if(is_staticaddr(vir)) {
                printf("VM: not freeing static page\n");
@@ -199,7 +277,7 @@ void vm_freepages(vir_bytes vir, int pages)
        }
 
        if(pt_writemap(vmprocess, &vmprocess->vm_pt, vir,
-               MAP_NONE, pages*I386_PAGE_SIZE, 0,
+               MAP_NONE, pages*VM_PAGE_SIZE, 0,
                WMF_OVERWRITE | WMF_FREE) != OK)
                panic("vm_freepages: pt_writemap failed");
 
@@ -236,6 +314,29 @@ static void *vm_getsparepage(phys_bytes *phys)
        return NULL;
 }
 
+#if defined(__arm__)
+/*===========================================================================*
+ *                             vm_getsparepagedir                           *
+ *===========================================================================*/
+static void *vm_getsparepagedir(phys_bytes *phys)
+{
+       int s;
+       assert(missing_sparedirs >= 0 && missing_sparedirs <= SPAREPAGEDIRS);
+       for(s = 0; s < SPAREPAGEDIRS; s++) {
+               if(sparepagedirs[s].pagedir) {
+                       void *sp;
+                       sp = sparepagedirs[s].pagedir;
+                       *phys = sparepagedirs[s].phys;
+                       sparepagedirs[s].pagedir = NULL;
+                       missing_sparedirs++;
+                       assert(missing_sparedirs >= 0 && missing_sparedirs <= SPAREPAGEDIRS);
+                       return sp;
+               }
+       }
+       return NULL;
+}
+#endif
+
 /*===========================================================================*
  *                             vm_checkspares                               *
  *===========================================================================*/
@@ -262,6 +363,34 @@ static void *vm_checkspares(void)
        return NULL;
 }
 
+#if defined(__arm__)
+/*===========================================================================*
+ *                             vm_checksparedirs                            *
+ *===========================================================================*/
+static void *vm_checksparedirs(void)
+{
+       int s, n = 0;
+       static int total = 0, worst = 0;
+       assert(missing_sparedirs >= 0 && missing_sparedirs <= SPAREPAGEDIRS);
+       for(s = 0; s < SPAREPAGEDIRS && missing_sparedirs > 0; s++)
+           if(!sparepagedirs[s].pagedir) {
+               n++;
+               if((sparepagedirs[s].pagedir = vm_allocpage(&sparepagedirs[s].phys,
+                       VMP_SPARE))) {
+                       missing_sparedirs--;
+                       assert(missing_sparedirs >= 0);
+                       assert(missing_sparedirs <= SPAREPAGEDIRS);
+               } else {
+                       printf("VM: warning: couldn't get new spare pagedir\n");
+               }
+       }
+       if(worst < n) worst = n;
+       total += n;
+
+       return NULL;
+}
+
+#endif
 static int pt_init_done;
 
 /*===========================================================================*
@@ -276,6 +405,9 @@ void *vm_allocpage(phys_bytes *phys, int reason)
        int r;
        static int level = 0;
        void *ret;
+#if defined(__arm__)
+       u32_t mem_bytes, mem_clicks, mem_flags;
+#endif
 
        pt = &vmprocess->vm_pt;
        assert(reason >= 0 && reason < VMP_CATEGORIES);
@@ -285,9 +417,22 @@ void *vm_allocpage(phys_bytes *phys, int reason)
        assert(level >= 1);
        assert(level <= 2);
 
+#if defined(__i386__)
        if((level > 1) || !pt_init_done) {
+#elif defined(__arm__)
+       if(level > 1 || !pt_init_done) {
+#endif
                void *s;
+#if defined(__i386__)
                s=vm_getsparepage(phys);
+#elif defined(__arm__)
+
+               if (reason == VMP_PAGEDIR)
+                       s=vm_getsparepagedir(phys);
+               else
+                       s=vm_getsparepage(phys);
+
+#endif
                level--;
                if(!s) {
                        util_stacktrace();
@@ -297,10 +442,25 @@ void *vm_allocpage(phys_bytes *phys, int reason)
                return s;
        }
 
+#if defined(__arm__)
+       if (reason == VMP_PAGEDIR) {
+               mem_bytes = ARCH_PAGEDIR_SIZE;
+               mem_flags = PAF_ALIGN16K;
+       } else {
+               mem_bytes = VM_PAGE_SIZE;
+               mem_flags = 0;
+       }
+       mem_clicks = mem_bytes / VM_PAGE_SIZE * CLICKSPERPAGE;
+
+#endif
        /* VM does have a pagetable, so get a page and map it in there.
         * Where in our virtual address space can we put it?
         */
+#if defined(__i386__)
        loc = findhole();
+#elif defined(__arm__)
+       loc = findhole(mem_bytes / VM_PAGE_SIZE);
+#endif
        if(loc == NO_MEM) {
                level--;
                printf("VM: vm_allocpage: findhole failed\n");
@@ -310,7 +470,11 @@ void *vm_allocpage(phys_bytes *phys, int reason)
        /* Allocate page of memory for use by VM. As VM
         * is trusted, we don't have to pre-clear it.
         */
+#if defined(__i386__)
        if((newpage = alloc_mem(CLICKSPERPAGE, 0)) == NO_MEM) {
+#elif defined(__arm__)
+       if((newpage = alloc_mem(mem_clicks, mem_flags)) == NO_MEM) {
+#endif
                level--;
                printf("VM: vm_allocpage: alloc_mem failed\n");
                return NULL;
@@ -319,9 +483,16 @@ void *vm_allocpage(phys_bytes *phys, int reason)
        *phys = CLICK2ABS(newpage);
 
        /* Map this page into our address space. */
-       if((r=pt_writemap(vmprocess, pt, loc, *phys, I386_PAGE_SIZE,
-               I386_VM_PRESENT | I386_VM_USER | I386_VM_WRITE, 0)) != OK) {
+#if defined(__i386__)
+       if((r=pt_writemap(vmprocess, pt, loc, *phys, VM_PAGE_SIZE,
+               ARCH_VM_PTE_PRESENT | ARCH_VM_PTE_USER | ARCH_VM_PTE_RW, 0)) != OK) {
                free_mem(newpage, CLICKSPERPAGE);
+#elif defined(__arm__)
+       if((r=pt_writemap(vmprocess, pt, loc, *phys, mem_bytes,
+               ARCH_VM_PTE_PRESENT | ARCH_VM_PTE_USER | ARCH_VM_PTE_RW |
+               ARM_VM_PTE_WB | ARM_VM_PTE_SHAREABLE, 0)) != OK) {
+               free_mem(newpage, mem_clicks);
+#endif
                printf("vm_allocpage writemap failed\n");
                level--;
                return NULL;
@@ -348,18 +519,23 @@ void vm_pagelock(void *vir, int lockflag)
 /* Mark a page allocated by vm_allocpage() unwritable, i.e. only for VM. */
        vir_bytes m = (vir_bytes) vir;
        int r;
-       u32_t flags = I386_VM_PRESENT | I386_VM_USER;
+       u32_t flags = ARCH_VM_PTE_PRESENT | ARCH_VM_PTE_USER;
        pt_t *pt;
 
        pt = &vmprocess->vm_pt;
 
-       assert(!(m % I386_PAGE_SIZE));
+       assert(!(m % VM_PAGE_SIZE));
 
        if(!lockflag)
-               flags |= I386_VM_WRITE;
+               flags |= ARCH_VM_PTE_RW;
+#if defined(__arm__)
+       else
+               flags |= ARCH_VM_PTE_RO;
+       flags |= ARM_VM_PTE_WB | ARM_VM_PTE_SHAREABLE;
+#endif
 
        /* Update flags. */
-       if((r=pt_writemap(vmprocess, pt, m, 0, I386_PAGE_SIZE,
+       if((r=pt_writemap(vmprocess, pt, m, 0, VM_PAGE_SIZE,
                flags, WMF_OVERWRITE | WMF_WRITEFLAGSONLY)) != OK) {
                panic("vm_lockpage: pt_writemap failed");
        }
@@ -380,29 +556,42 @@ int vm_addrok(void *vir, int writeflag)
        int pde, pte;
        vir_bytes v = (vir_bytes) vir;
 
+#if defined(__i386__)
        pde = I386_VM_PDE(v);
        pte = I386_VM_PTE(v);
+#elif defined(__arm__)
+       pde = ARM_VM_PDE(v);
+       pte = ARM_VM_PTE(v);
+#endif
 
-       if(!(pt->pt_dir[pde] & I386_VM_PRESENT)) {
+       if(!(pt->pt_dir[pde] & ARCH_VM_PDE_PRESENT)) {
                printf("addr not ok: missing pde %d\n", pde);
                return 0;
        }
 
+#if defined(__i386__)
        if(writeflag &&
-               !(pt->pt_dir[pde] & I386_VM_WRITE)) {
+               !(pt->pt_dir[pde] & ARCH_VM_PTE_RW)) {
                printf("addr not ok: pde %d present but pde unwritable\n", pde);
                return 0;
        }
 
-       if(!(pt->pt_pt[pde][pte] & I386_VM_PRESENT)) {
+#endif
+       if(!(pt->pt_pt[pde][pte] & ARCH_VM_PTE_PRESENT)) {
                printf("addr not ok: missing pde %d / pte %d\n",
                        pde, pte);
                return 0;
        }
 
+#if defined(__i386__)
        if(writeflag &&
-               !(pt->pt_pt[pde][pte] & I386_VM_WRITE)) {
+               !(pt->pt_pt[pde][pte] & ARCH_VM_PTE_RW)) {
                printf("addr not ok: pde %d / pte %d present but unwritable\n",
+#elif defined(__arm__)
+       if(!writeflag &&
+               !(pt->pt_pt[pde][pte] & ARCH_VM_PTE_RO)) {
+               printf("addr not ok: pde %d / pte %d present but writable\n",
+#endif
                        pde, pte);
                return 0;
        }
@@ -420,28 +609,33 @@ static int pt_ptalloc(pt_t *pt, int pde, u32_t flags)
        phys_bytes pt_phys;
 
        /* Argument must make sense. */
-       assert(pde >= 0 && pde < I386_VM_DIR_ENTRIES);
+       assert(pde >= 0 && pde < ARCH_VM_DIR_ENTRIES);
        assert(!(flags & ~(PTF_ALLFLAGS)));
 
        /* We don't expect to overwrite page directory entry, nor
         * storage for the page table.
         */
-       assert(!(pt->pt_dir[pde] & I386_VM_PRESENT));
+       assert(!(pt->pt_dir[pde] & ARCH_VM_PDE_PRESENT));
        assert(!pt->pt_pt[pde]);
 
        /* Get storage for the page table. */
         if(!(pt->pt_pt[pde] = vm_allocpage(&pt_phys, VMP_PAGETABLE)))
                return ENOMEM;
 
-       for(i = 0; i < I386_VM_PT_ENTRIES; i++)
+       for(i = 0; i < ARCH_VM_PT_ENTRIES; i++)
                pt->pt_pt[pde][i] = 0;  /* Empty entry. */
 
        /* Make page directory entry.
         * The PDE is always 'present,' 'writable,' and 'user accessible,'
         * relying on the PTE for protection.
         */
-       pt->pt_dir[pde] = (pt_phys & I386_VM_ADDR_MASK) | flags
-               | I386_VM_PRESENT | I386_VM_USER | I386_VM_WRITE;
+#if defined(__i386__)
+       pt->pt_dir[pde] = (pt_phys & ARCH_VM_ADDR_MASK) | flags
+               | ARCH_VM_PDE_PRESENT | ARCH_VM_PTE_USER | ARCH_VM_PTE_RW;
+#elif defined(__arm__)
+       pt->pt_dir[pde] = (pt_phys & ARCH_VM_PDE_MASK)
+               | ARCH_VM_PDE_PRESENT | ARM_VM_PDE_DOMAIN;
+#endif
 
        return OK;
 }
@@ -455,15 +649,20 @@ int pt_ptalloc_in_range(pt_t *pt, vir_bytes start, vir_bytes end,
 /* Allocate all the page tables in the range specified. */
        int pde, first_pde, last_pde;
 
+#if defined(__i386__)
        first_pde = I386_VM_PDE(start);
        last_pde = I386_VM_PDE(end-1);
+#elif defined(__arm__)
+       first_pde = ARM_VM_PDE(start);
+       last_pde = ARM_VM_PDE(end-1);
+#endif
        assert(first_pde >= 0);
-       assert(last_pde < I386_VM_DIR_ENTRIES);
+       assert(last_pde < ARCH_VM_DIR_ENTRIES);
 
        /* Scan all page-directory entries in the range. */
        for(pde = first_pde; pde <= last_pde; pde++) {
-               assert(!(pt->pt_dir[pde] & I386_VM_BIGPAGE));
-               if(!(pt->pt_dir[pde] & I386_VM_PRESENT)) {
+               assert(!(pt->pt_dir[pde] & ARCH_VM_BIGPAGE));
+               if(!(pt->pt_dir[pde] & ARCH_VM_PDE_PRESENT)) {
                        int r;
                        if(verify) {
                                printf("pt_ptalloc_in_range: no pde %d\n", pde);
@@ -482,7 +681,7 @@ int pt_ptalloc_in_range(pt_t *pt, vir_bytes start, vir_bytes end,
                        }
                }
                assert(pt->pt_dir[pde]);
-               assert(pt->pt_dir[pde] & I386_VM_PRESENT);
+               assert(pt->pt_dir[pde] & ARCH_VM_PDE_PRESENT);
        }
 
        return OK;
@@ -495,12 +694,21 @@ static char *ptestr(u32_t pte)
 }
 
        static char str[30];
-       if(!(pte & I386_VM_PRESENT)) {
+       if(!(pte & ARCH_VM_PTE_PRESENT)) {
                return "not present";
        }
        str[0] = '\0';
-       FLAG(I386_VM_WRITE, "W");
-       FLAG(I386_VM_USER, "U");
+#if defined(__i386__)
+       FLAG(ARCH_VM_PTE_RW, "W");
+#elif defined(__arm__)
+       if(pte & ARCH_VM_PTE_RO) {
+           strcat(str, "R ");
+       } else {
+           strcat(str, "W ");
+       }
+#endif
+       FLAG(ARCH_VM_PTE_USER, "U");
+#if defined(__i386__)
        FLAG(I386_VM_PWT, "PWT");
        FLAG(I386_VM_PCD, "PCD");
        FLAG(I386_VM_ACC, "ACC");
@@ -510,6 +718,12 @@ static char *ptestr(u32_t pte)
        FLAG(I386_VM_PTAVAIL1, "AV1");
        FLAG(I386_VM_PTAVAIL2, "AV2");
        FLAG(I386_VM_PTAVAIL3, "AV3");
+#elif defined(__arm__)
+       FLAG(ARM_VM_PTE_SUPER, "S");
+       FLAG(ARM_VM_PTE_SHAREABLE, "SH");
+       FLAG(ARM_VM_PTE_WB, "WB");
+       FLAG(ARM_VM_PTE_WT, "WT");
+#endif
 
        return str;
 }
@@ -531,28 +745,46 @@ int pt_map_in_range(struct vmproc *src_vmp, struct vmproc *dst_vmp,
        dst_pt = &dst_vmp->vm_pt;
 
        end = end ? end : VM_DATATOP;
-       assert(start % I386_PAGE_SIZE == 0);
-       assert(end % I386_PAGE_SIZE == 0);
+       assert(start % VM_PAGE_SIZE == 0);
+       assert(end % VM_PAGE_SIZE == 0);
+#if defined(__i386__)
        assert(start <= end);
-       assert(I386_VM_PDE(end) < I386_VM_DIR_ENTRIES);
+       assert(I386_VM_PDE(end) < ARCH_VM_DIR_ENTRIES);
+#elif defined(__arm__)
+       assert(ARM_VM_PDE(start) >= 0 && start <= end);
+       assert(ARM_VM_PDE(end) < ARCH_VM_DIR_ENTRIES);
+#endif
 
 #if LU_DEBUG
        printf("VM: pt_map_in_range: src = %d, dst = %d\n",
                src_vmp->vm_endpoint, dst_vmp->vm_endpoint);
        printf("VM: pt_map_in_range: transferring from 0x%08x (pde %d pte %d) to 0x%08x (pde %d pte %d)\n",
+#if defined(__i386__)
                start, I386_VM_PDE(start), I386_VM_PTE(start),
                end, I386_VM_PDE(end), I386_VM_PTE(end));
+#elif defined(__arm__)
+               start, ARM_VM_PDE(start), ARM_VM_PTE(start),
+               end, ARM_VM_PDE(end), ARM_VM_PTE(end));
+#endif
 #endif
 
        /* Scan all page-table entries in the range. */
-       for(viraddr = start; viraddr <= end; viraddr += I386_PAGE_SIZE) {
+       for(viraddr = start; viraddr <= end; viraddr += VM_PAGE_SIZE) {
+#if defined(__i386__)
                pde = I386_VM_PDE(viraddr);
-               if(!(pt->pt_dir[pde] & I386_VM_PRESENT)) {
+#elif defined(__arm__)
+               pde = ARM_VM_PDE(viraddr);
+#endif
+               if(!(pt->pt_dir[pde] & ARCH_VM_PDE_PRESENT)) {
                        if(viraddr == VM_DATATOP) break;
                        continue;
                }
+#if defined(__i386__)
                pte = I386_VM_PTE(viraddr);
-               if(!(pt->pt_pt[pde][pte] & I386_VM_PRESENT)) {
+#elif defined(__arm__)
+               pte = ARM_VM_PTE(viraddr);
+#endif
+               if(!(pt->pt_pt[pde][pte] & ARCH_VM_PTE_PRESENT)) {
                        if(viraddr == VM_DATATOP) break;
                        continue;
                }
@@ -589,9 +821,15 @@ int pt_ptmap(struct vmproc *src_vmp, struct vmproc *dst_vmp)
 
        /* Transfer mapping to the page directory. */
        viraddr = (vir_bytes) pt->pt_dir;
-       physaddr = pt->pt_dir_phys & I386_VM_ADDR_MASK;
-       if((r=pt_writemap(dst_vmp, &dst_vmp->vm_pt, viraddr, physaddr, I386_PAGE_SIZE,
-               I386_VM_PRESENT | I386_VM_USER | I386_VM_WRITE,
+       physaddr = pt->pt_dir_phys & ARCH_VM_ADDR_MASK;
+#if defined(__i386__)
+       if((r=pt_writemap(dst_vmp, &dst_vmp->vm_pt, viraddr, physaddr, VM_PAGE_SIZE,
+               ARCH_VM_PTE_PRESENT | ARCH_VM_PTE_USER | ARCH_VM_PTE_RW,
+#elif defined(__arm__)
+       if((r=pt_writemap(dst_vmp, &dst_vmp->vm_pt, viraddr, physaddr, ARCH_PAGEDIR_SIZE,
+               ARCH_VM_PTE_PRESENT | ARCH_VM_PTE_USER | ARCH_VM_PTE_RW |
+               ARM_VM_PTE_WB | ARM_VM_PTE_SHAREABLE,
+#endif
                WMF_OVERWRITE)) != OK) {
                return r;
        }
@@ -601,16 +839,25 @@ int pt_ptmap(struct vmproc *src_vmp, struct vmproc *dst_vmp)
 #endif
 
        /* Scan all non-reserved page-directory entries. */
-       for(pde=0; pde < I386_VM_DIR_ENTRIES; pde++) {
-               if(!(pt->pt_dir[pde] & I386_VM_PRESENT)) {
+       for(pde=0; pde < ARCH_VM_DIR_ENTRIES; pde++) {
+               if(!(pt->pt_dir[pde] & ARCH_VM_PDE_PRESENT)) {
                        continue;
                }
 
                /* Transfer mapping to the page table. */
                viraddr = (vir_bytes) pt->pt_pt[pde];
-               physaddr = pt->pt_dir[pde] & I386_VM_ADDR_MASK;
-               if((r=pt_writemap(dst_vmp, &dst_vmp->vm_pt, viraddr, physaddr, I386_PAGE_SIZE,
-                       I386_VM_PRESENT | I386_VM_USER | I386_VM_WRITE,
+#if defined(__i386__)
+               physaddr = pt->pt_dir[pde] & ARCH_VM_ADDR_MASK;
+#elif defined(__arm__)
+               physaddr = pt->pt_dir[pde] & ARCH_VM_PDE_MASK;
+#endif
+               if((r=pt_writemap(dst_vmp, &dst_vmp->vm_pt, viraddr, physaddr, VM_PAGE_SIZE,
+#if defined(__i386__)
+                       ARCH_VM_PTE_PRESENT | ARCH_VM_PTE_USER | ARCH_VM_PTE_RW,
+#elif defined(__arm__)
+                       ARCH_VM_PTE_PRESENT | ARCH_VM_PTE_USER | ARCH_VM_PTE_RW |
+                       ARM_VM_PTE_WB | ARM_VM_PTE_SHAREABLE,
+#endif
                        WMF_OVERWRITE)) != OK) {
                        return r;
                }
@@ -663,23 +910,23 @@ int pt_writemap(struct vmproc * vmp,
        if(writemapflags & WMF_VERIFY)
                verify = 1;
 
-       assert(!(bytes % I386_PAGE_SIZE));
+       assert(!(bytes % VM_PAGE_SIZE));
        assert(!(flags & ~(PTF_ALLFLAGS)));
 
-       pages = bytes / I386_PAGE_SIZE;
+       pages = bytes / VM_PAGE_SIZE;
 
        /* MAP_NONE means to clear the mapping. It doesn't matter
-        * what's actually written into the PTE if I386_VM_PRESENT
+        * what's actually written into the PTE if PRESENT
         * isn't on, so we can just write MAP_NONE into it.
         */
-       assert(physaddr == MAP_NONE || (flags & I386_VM_PRESENT));
+       assert(physaddr == MAP_NONE || (flags & ARCH_VM_PTE_PRESENT));
        assert(physaddr != MAP_NONE || !flags);
 
        /* First make sure all the necessary page tables are allocated,
         * before we start writing in any of them, because it's a pain
         * to undo our work properly.
         */
-       ret = pt_ptalloc_in_range(pt, v, v + I386_PAGE_SIZE*pages, flags, verify);
+       ret = pt_ptalloc_in_range(pt, v, v + VM_PAGE_SIZE*pages, flags, verify);
        if(ret != OK) {
                printf("VM: writemap: pt_ptalloc_in_range failed\n");
                goto resume_exit;
@@ -688,18 +935,23 @@ int pt_writemap(struct vmproc * vmp,
        /* Now write in them. */
        for(p = 0; p < pages; p++) {
                u32_t entry;
+#if defined(__i386__)
                int pde = I386_VM_PDE(v);
                int pte = I386_VM_PTE(v);
+#elif defined(__arm__)
+               int pde = ARM_VM_PDE(v);
+               int pte = ARM_VM_PTE(v);
+#endif
 
-               assert(!(v % I386_PAGE_SIZE));
-               assert(pte >= 0 && pte < I386_VM_PT_ENTRIES);
-               assert(pde >= 0 && pde < I386_VM_DIR_ENTRIES);
+               assert(!(v % VM_PAGE_SIZE));
+               assert(pte >= 0 && pte < ARCH_VM_PT_ENTRIES);
+               assert(pde >= 0 && pde < ARCH_VM_DIR_ENTRIES);
 
                /* Page table has to be there. */
-               assert(pt->pt_dir[pde] & I386_VM_PRESENT);
+               assert(pt->pt_dir[pde] & ARCH_VM_PDE_PRESENT);
 
                /* We do not expect it to be a bigpage. */
-               assert(!(pt->pt_dir[pde] & I386_VM_BIGPAGE));
+               assert(!(pt->pt_dir[pde] & ARCH_VM_BIGPAGE));
 
                /* Make sure page directory entry for this page table
                 * is marked present and page table entry is available.
@@ -709,10 +961,14 @@ int pt_writemap(struct vmproc * vmp,
 #if SANITYCHECKS
                /* We don't expect to overwrite a page. */
                if(!(writemapflags & (WMF_OVERWRITE|WMF_VERIFY)))
-                       assert(!(pt->pt_pt[pde][pte] & I386_VM_PRESENT));
+                       assert(!(pt->pt_pt[pde][pte] & ARCH_VM_PTE_PRESENT));
 #endif
                if(writemapflags & (WMF_WRITEFLAGSONLY|WMF_FREE)) {
-                       physaddr = pt->pt_pt[pde][pte] & I386_VM_ADDR_MASK;
+#if defined(__i386__)
+                       physaddr = pt->pt_pt[pde][pte] & ARCH_VM_ADDR_MASK;
+#elif defined(__arm__)
+                       physaddr = pt->pt_pt[pde][pte] & ARM_VM_PTE_MASK;
+#endif
                }
 
                if(writemapflags & WMF_FREE) {
@@ -720,21 +976,32 @@ int pt_writemap(struct vmproc * vmp,
                }
 
                /* Entry we will write. */
-               entry = (physaddr & I386_VM_ADDR_MASK) | flags;
+#if defined(__i386__)
+               entry = (physaddr & ARCH_VM_ADDR_MASK) | flags;
+#elif defined(__arm__)
+               entry = (physaddr & ARM_VM_PTE_MASK) | flags;
+#endif
 
                if(verify) {
                        u32_t maskedentry;
                        maskedentry = pt->pt_pt[pde][pte];
+#if defined(__i386__)
                        maskedentry &= ~(I386_VM_ACC|I386_VM_DIRTY);
+#endif
                        /* Verify pagetable entry. */
-                       if(entry & I386_VM_WRITE) {
+                       if(entry & ARCH_VM_PTE_RW) {
                                /* If we expect a writable page, allow a readonly page. */
-                               maskedentry |= I386_VM_WRITE;
+                               maskedentry |= ARCH_VM_PTE_RW;
                        }
                        if(maskedentry != entry) {
                                printf("pt_writemap: mismatch: ");
-                               if((entry & I386_VM_ADDR_MASK) !=
-                                       (maskedentry & I386_VM_ADDR_MASK)) {
+#if defined(__i386__)
+                               if((entry & ARCH_VM_ADDR_MASK) !=
+                                       (maskedentry & ARCH_VM_ADDR_MASK)) {
+#elif defined(__arm__)
+                               if((entry & ARM_VM_PTE_MASK) !=
+                                       (maskedentry & ARM_VM_PTE_MASK)) {
+#endif
                                        printf("pt_writemap: physaddr mismatch (0x%lx, 0x%lx); ",
                                                (long)entry, (long)maskedentry);
                                } else printf("phys ok; ");
@@ -751,8 +1018,8 @@ int pt_writemap(struct vmproc * vmp,
                        pt->pt_pt[pde][pte] = entry;
                }
 
-               physaddr += I386_PAGE_SIZE;
-               v += I386_PAGE_SIZE;
+               physaddr += VM_PAGE_SIZE;
+               v += VM_PAGE_SIZE;
        }
 
 resume_exit:
@@ -776,36 +1043,45 @@ int pt_checkrange(pt_t *pt, vir_bytes v,  size_t bytes,
 {
        int p, pages;
 
-       assert(!(bytes % I386_PAGE_SIZE));
+       assert(!(bytes % VM_PAGE_SIZE));
 
-       pages = bytes / I386_PAGE_SIZE;
+       pages = bytes / VM_PAGE_SIZE;
 
        for(p = 0; p < pages; p++) {
+#if defined(__i386__)
                int pde = I386_VM_PDE(v);
                int pte = I386_VM_PTE(v);
+#elif defined(__arm__)
+               int pde = ARM_VM_PDE(v);
+               int pte = ARM_VM_PTE(v);
+#endif
 
-               assert(!(v % I386_PAGE_SIZE));
-               assert(pte >= 0 && pte < I386_VM_PT_ENTRIES);
-               assert(pde >= 0 && pde < I386_VM_DIR_ENTRIES);
+               assert(!(v % VM_PAGE_SIZE));
+               assert(pte >= 0 && pte < ARCH_VM_PT_ENTRIES);
+               assert(pde >= 0 && pde < ARCH_VM_DIR_ENTRIES);
 
                /* Page table has to be there. */
-               if(!(pt->pt_dir[pde] & I386_VM_PRESENT))
+               if(!(pt->pt_dir[pde] & ARCH_VM_PDE_PRESENT))
                        return EFAULT;
 
                /* Make sure page directory entry for this page table
                 * is marked present and page table entry is available.
                 */
-               assert((pt->pt_dir[pde] & I386_VM_PRESENT) && pt->pt_pt[pde]);
+               assert((pt->pt_dir[pde] & ARCH_VM_PDE_PRESENT) && pt->pt_pt[pde]);
 
-               if(!(pt->pt_pt[pde][pte] & I386_VM_PRESENT)) {
+               if(!(pt->pt_pt[pde][pte] & ARCH_VM_PTE_PRESENT)) {
                        return EFAULT;
                }
 
-               if(write && !(pt->pt_pt[pde][pte] & I386_VM_WRITE)) {
+#if defined(__i386__)
+               if(write && !(pt->pt_pt[pde][pte] & ARCH_VM_PTE_RW)) {
+#elif defined(__arm__)
+               if(write && (pt->pt_pt[pde][pte] & ARCH_VM_PTE_RO)) {
+#endif
                        return EFAULT;
                }
 
-               v += I386_PAGE_SIZE;
+               v += VM_PAGE_SIZE;
        }
 
        return OK;
@@ -816,7 +1092,7 @@ int pt_checkrange(pt_t *pt, vir_bytes v,  size_t bytes,
  *===========================================================================*/
 int pt_new(pt_t *pt)
 {
-/* Allocate a pagetable root. On i386, allocate a page-aligned page directory
+/* Allocate a pagetable root. Allocate a page-aligned page directory
  * and set them to 0 (indicating no page tables are allocated). Lookup
  * its physical address as we'll need that in the future. Verify it's
  * page-aligned.
@@ -833,9 +1109,12 @@ int pt_new(pt_t *pt)
           !(pt->pt_dir = vm_allocpage((phys_bytes *)&pt->pt_dir_phys, VMP_PAGEDIR))) {
                return ENOMEM;
        }
+#if defined(__arm__)
+       assert(!((u32_t)pt->pt_dir_phys % ARCH_PAGEDIR_SIZE));
+#endif
 
-       for(i = 0; i < I386_VM_DIR_ENTRIES; i++) {
-               pt->pt_dir[i] = 0; /* invalid entry (I386_VM_PRESENT bit = 0) */
+       for(i = 0; i < ARCH_VM_DIR_ENTRIES; i++) {
+               pt->pt_dir[i] = 0; /* invalid entry (PRESENT bit = 0) */
                pt->pt_pt[i] = NULL;
        }
 
@@ -852,7 +1131,7 @@ int pt_new(pt_t *pt)
 static int freepde(void)
 {
        int p = kernel_boot_info.freepde_start++;
-       assert(kernel_boot_info.freepde_start < I386_VM_DIR_ENTRIES);
+       assert(kernel_boot_info.freepde_start < ARCH_VM_DIR_ENTRIES);
        return p;
 }
 
@@ -863,11 +1142,20 @@ void pt_init(void)
 {
         pt_t *newpt;
         int s, r, p;
+#if defined(__i386__)
        int global_bit_ok = 0;
+#endif
        vir_bytes sparepages_mem;
-       static u32_t currentpagedir[I386_VM_DIR_ENTRIES];
+#if defined(__arm__)
+       vir_bytes sparepagedirs_mem;
+#endif
+       static u32_t currentpagedir[ARCH_VM_DIR_ENTRIES];
        int m = kernel_boot_info.kern_mod;
+#if defined(__i386__)
        u32_t mypdbr; /* Page Directory Base Register (cr3) value */
+#elif defined(__arm__)
+       u32_t myttbr;
+#endif
 
        /* Find what the physical location of the kernel is. */
        assert(m >= 0);
@@ -875,30 +1163,62 @@ void pt_init(void)
        assert(kernel_boot_info.mods_with_kernel < MULTIBOOT_MAX_MODS);
        kern_mb_mod = &kernel_boot_info.module_list[m];
        kern_size = kern_mb_mod->mod_end - kern_mb_mod->mod_start;
-       assert(!(kern_mb_mod->mod_start % I386_BIG_PAGE_SIZE));
-       assert(!(kernel_boot_info.vir_kern_start % I386_BIG_PAGE_SIZE));
-       kern_start_pde = kernel_boot_info.vir_kern_start / I386_BIG_PAGE_SIZE;
+       assert(!(kern_mb_mod->mod_start % ARCH_BIG_PAGE_SIZE));
+       assert(!(kernel_boot_info.vir_kern_start % ARCH_BIG_PAGE_SIZE));
+       kern_start_pde = kernel_boot_info.vir_kern_start / ARCH_BIG_PAGE_SIZE;
 
         /* Get ourselves spare pages. */
         sparepages_mem = (vir_bytes) static_sparepages;
-       assert(!(sparepages_mem % I386_PAGE_SIZE));
+       assert(!(sparepages_mem % VM_PAGE_SIZE));
+
+#if defined(__arm__)
+        /* Get ourselves spare pagedirs. */
+       sparepagedirs_mem = (vir_bytes) static_sparepagedirs;
+       assert(!(sparepagedirs_mem % ARCH_PAGEDIR_SIZE));
 
+#endif
        /* Spare pages are used to allocate memory before VM has its own page
         * table that things (i.e. arbitrary physical memory) can be mapped into.
         * We get it by pre-allocating it in our bss (allocated and mapped in by
         * the kernel) in static_sparepages. We also need the physical addresses
         * though; we look them up now so they are ready for use.
         */
+#if defined(__arm__)
+        missing_sparedirs = 0;
+        assert(STATIC_SPAREPAGEDIRS < SPAREPAGEDIRS);
+        for(s = 0; s < SPAREPAGEDIRS; s++) {
+               vir_bytes v = (sparepagedirs_mem + s*ARCH_PAGEDIR_SIZE);;
+               phys_bytes ph;
+               if((r=sys_umap(SELF, VM_D, (vir_bytes) v,
+                       ARCH_PAGEDIR_SIZE, &ph)) != OK)
+                               panic("pt_init: sys_umap failed: %d", r);
+               if(s >= STATIC_SPAREPAGEDIRS) {
+                       sparepagedirs[s].pagedir = NULL;
+                       missing_sparedirs++;
+                       continue;
+               }
+               sparepagedirs[s].pagedir = (void *) v;
+               sparepagedirs[s].phys = ph;
+        }
+#endif
 
         missing_spares = 0;
         assert(STATIC_SPAREPAGES < SPAREPAGES);
         for(s = 0; s < SPAREPAGES; s++) {
-               vir_bytes v = (sparepages_mem + s*I386_PAGE_SIZE);;
+               vir_bytes v = (sparepages_mem + s*VM_PAGE_SIZE);;
                phys_bytes ph;
+#if defined(__i386__)
                if((r=sys_umap(SELF, VM_D, (vir_bytes) v,
-                       I386_PAGE_SIZE*SPAREPAGES, &ph)) != OK)
+#elif defined(__arm__)
+               if((r=sys_umap(SELF, VM_D, (vir_bytes) v,
+#endif
+                       VM_PAGE_SIZE*SPAREPAGES, &ph)) != OK)
                                panic("pt_init: sys_umap failed: %d", r);
+#if defined(__i386__)
                if(s >= STATIC_SPAREPAGES) {
+#elif defined(__arm__)
+               if(s >= STATIC_SPAREPAGES) {
+#endif
                        sparepages[s].page = NULL;
                        missing_spares++;
                        continue;
@@ -907,14 +1227,21 @@ void pt_init(void)
                sparepages[s].phys = ph;
         }
 
+#if defined(__i386__)
        /* global bit and 4MB pages available? */
        global_bit_ok = _cpufeature(_CPUF_I386_PGE);
        bigpage_ok = _cpufeature(_CPUF_I386_PSE);
+#elif defined(__arm__)
+       /* 1MB pages available? */
+       bigpage_ok = 1;
+#endif
 
+#if defined(__i386__)
        /* Set bit for PTE's and PDE's if available. */
        if(global_bit_ok)
                global_bit = I386_VM_GLOBAL;
 
+#endif
        /* Allocate us a page table in which to remember page directory
         * pointers.
         */
@@ -922,7 +1249,7 @@ void pt_init(void)
                VMP_PAGETABLE)))
                 panic("no virt addr for vm mappings");
 
-       memset(page_directories, 0, I386_PAGE_SIZE);
+       memset(page_directories, 0, VM_PAGE_SIZE);
 
        /* Now reserve another pde for kernel's own mappings. */
        {
@@ -932,7 +1259,7 @@ void pt_init(void)
                u32_t offset = 0;
 
                kernmap_pde = freepde();
-               offset = kernmap_pde * I386_BIG_PAGE_SIZE;
+               offset = kernmap_pde * ARCH_BIG_PAGE_SIZE;
 
                while(sys_vmctl_get_mapping(index, &addr, &len,
                        &flags) == OK)  {
@@ -942,20 +1269,40 @@ void pt_init(void)
                        kern_mappings[index].phys_addr = addr;
                        kern_mappings[index].len = len;
                        kern_mappings[index].flags = flags;
+#if defined(__i386__)
                        kern_mappings[index].vir_addr = offset;
+#elif defined(__arm__)
+                       kern_mappings[index].vir_addr = addr;
+#endif
                        kern_mappings[index].flags =
-                               I386_VM_PRESENT;
+                               ARCH_VM_PTE_PRESENT;
                        if(flags & VMMF_UNCACHED)
+#if defined(__i386__)
                                kern_mappings[index].flags |= PTF_NOCACHE;
+#elif defined(__arm__)
+                               kern_mappings[index].flags |= ARM_VM_PTE_DEVICE;
+                       else
+                               kern_mappings[index].flags |=
+                                   ARM_VM_PTE_WB | ARM_VM_PTE_SHAREABLE;
+#endif
                        if(flags & VMMF_USER)
-                               kern_mappings[index].flags |= I386_VM_USER;
+                               kern_mappings[index].flags |= ARCH_VM_PTE_USER;
+#if defined(__arm__)
+                       else
+                               kern_mappings[index].flags |= ARM_VM_PTE_SUPER;
+#endif
                        if(flags & VMMF_WRITE)
-                               kern_mappings[index].flags |= I386_VM_WRITE;
+                               kern_mappings[index].flags |= ARCH_VM_PTE_RW;
+#if defined(__i386__)
                        if(flags & VMMF_GLO)
                                kern_mappings[index].flags |= I386_VM_GLOBAL;
-                       if(addr % I386_PAGE_SIZE)
+#elif defined(__arm__)
+                       else
+                               kern_mappings[index].flags |= ARCH_VM_PTE_RO;
+#endif
+                       if(addr % VM_PAGE_SIZE)
                                panic("VM: addr unaligned: %d", addr);
-                       if(len % I386_PAGE_SIZE)
+                       if(len % VM_PAGE_SIZE)
                                panic("VM: len unaligned: %d", len);
                        vir = offset;
                        if(sys_vmctl_reply_mapping(index, vir) != OK)
@@ -970,8 +1317,13 @@ void pt_init(void)
         * page directories.
         */
        pagedir_pde = freepde();
-       pagedir_pde_val = (page_directories_phys & I386_VM_ADDR_MASK) |
-                       I386_VM_PRESENT | I386_VM_WRITE;
+#if defined(__i386__)
+       pagedir_pde_val = (page_directories_phys & ARCH_VM_ADDR_MASK) |
+                       ARCH_VM_PDE_PRESENT | ARCH_VM_PTE_RW;
+#elif defined(__arm__)
+       pagedir_pde_val = (page_directories_phys & ARCH_VM_PDE_MASK) |
+                       ARCH_VM_PDE_PRESENT | ARM_VM_PDE_DOMAIN;
+#endif
 
        /* Allright. Now. We have to make our own page directory and page tables,
         * that the kernel has already set up, accessible to us. It's easier to
@@ -986,37 +1338,55 @@ void pt_init(void)
                panic("vm pt_new failed");
 
        /* Get our current pagedir so we can see it. */
+#if defined(__i386__)
        if(sys_vmctl_get_pdbr(SELF, &mypdbr) != OK)
+#elif defined(__arm__)
+       if(sys_vmctl_get_pdbr(SELF, &myttbr) != OK)
+#endif
                panic("VM: sys_vmctl_get_pdbr failed");
+#if defined(__i386__)
        if(sys_vircopy(NONE, mypdbr, SELF,
-               (vir_bytes) currentpagedir, I386_PAGE_SIZE) != OK)
+               (vir_bytes) currentpagedir, VM_PAGE_SIZE) != OK)
+#elif defined(__arm__)
+       if(sys_vircopy(NONE, myttbr, SELF,
+               (vir_bytes) currentpagedir, ARCH_PAGEDIR_SIZE) != OK)
+#endif
                panic("VM: sys_vircopy failed");
 
        /* We have mapped in kernel ourselves; now copy mappings for VM
         * that kernel made, including allocations for BSS. Skip identity
         * mapping bits; just map in VM.
         */
-       for(p = 0; p < I386_VM_DIR_ENTRIES; p++) {
+       for(p = 0; p < ARCH_VM_DIR_ENTRIES; p++) {
                u32_t entry = currentpagedir[p];
                phys_bytes ptaddr_kern, ptaddr_us;
 
                /* BIGPAGEs are kernel mapping (do ourselves) or boot
                 * identity mapping (don't want).
                 */
-               if(!(entry & I386_VM_PRESENT)) continue;
-               if((entry & I386_VM_BIGPAGE)) continue;
+               if(!(entry & ARCH_VM_PDE_PRESENT)) continue;
+               if((entry & ARCH_VM_BIGPAGE)) continue;
 
                if(pt_ptalloc(newpt, p, 0) != OK)
                        panic("pt_ptalloc failed");
-               assert(newpt->pt_dir[p] & I386_VM_PRESENT);
-
-               ptaddr_kern = entry & I386_VM_ADDR_MASK;
-               ptaddr_us = newpt->pt_dir[p] & I386_VM_ADDR_MASK;
+               assert(newpt->pt_dir[p] & ARCH_VM_PDE_PRESENT);
+
+#if defined(__i386__)
+               ptaddr_kern = entry & ARCH_VM_ADDR_MASK;
+               ptaddr_us = newpt->pt_dir[p] & ARCH_VM_ADDR_MASK;
+#elif defined(__arm__)
+               ptaddr_kern = entry & ARCH_VM_PDE_MASK;
+               ptaddr_us = newpt->pt_dir[p] & ARCH_VM_PDE_MASK;
+#endif
 
                /* Copy kernel-initialized pagetable contents into our
                 * normally accessible pagetable.
                 */
-                if(sys_abscopy(ptaddr_kern, ptaddr_us, I386_PAGE_SIZE) != OK)
+#if defined(__i386__)
+                if(sys_abscopy(ptaddr_kern, ptaddr_us, VM_PAGE_SIZE) != OK)
+#elif defined(__arm__)
+                if(sys_abscopy(ptaddr_kern, ptaddr_us, ARM_PAGETABLE_SIZE) != OK)
+#endif
                        panic("pt_init: abscopy failed");
        }
 
@@ -1038,6 +1408,10 @@ int pt_bind(pt_t *pt, struct vmproc *who)
        int slot;
        u32_t phys;
        void *pdes;
+#if defined(__arm__)
+       int i;
+       int pages_per_pagedir = ARCH_PAGEDIR_SIZE/VM_PAGE_SIZE;
+#endif
 
        /* Basic sanity checks. */
        assert(who);
@@ -1049,19 +1423,42 @@ int pt_bind(pt_t *pt, struct vmproc *who)
        slot = who->vm_slot;
        assert(slot >= 0);
        assert(slot < ELEMENTS(vmproc));
-       assert(slot < I386_VM_PT_ENTRIES);
+#if defined(__i386__)
+       assert(slot < ARCH_VM_PT_ENTRIES);
+#elif defined(__arm__)
+       assert(slot < ARCH_VM_PT_ENTRIES / pages_per_pagedir);
+#endif
 
-       phys = pt->pt_dir_phys & I386_VM_ADDR_MASK;
+#if defined(__i386__)
+       phys = pt->pt_dir_phys & ARCH_VM_ADDR_MASK;
+#elif defined(__arm__)
+       phys = pt->pt_dir_phys & ARM_VM_PTE_MASK;
+#endif
        assert(pt->pt_dir_phys == phys);
+#if defined(__arm__)
+       assert(!(pt->pt_dir_phys % ARCH_PAGEDIR_SIZE));
+#endif
 
        /* Update "page directory pagetable." */
-       page_directories[slot] = phys | I386_VM_PRESENT|I386_VM_WRITE;
+#if defined(__i386__)
+       page_directories[slot] = phys | ARCH_VM_PDE_PRESENT|ARCH_VM_PTE_RW;
+#elif defined(__arm__)
+       for (i = 0; i < pages_per_pagedir; i++)
+           page_directories[slot*pages_per_pagedir+i] =
+               (phys+i*VM_PAGE_SIZE) |
+               ARCH_VM_PTE_PRESENT | ARCH_VM_PTE_RW |
+               ARCH_VM_PTE_USER;
+#endif
 
        /* This is where the PDE's will be visible to the kernel
         * in its address space.
         */
-       pdes = (void *) (pagedir_pde*I386_BIG_PAGE_SIZE + 
-                       slot * I386_PAGE_SIZE);
+       pdes = (void *) (pagedir_pde*ARCH_BIG_PAGE_SIZE + 
+#if defined(__i386__)
+                       slot * VM_PAGE_SIZE);
+#elif defined(__arm__)
+                       slot * ARCH_PAGEDIR_SIZE);
+#endif
 
 #if 0
        printf("VM: slot %d endpoint %d has pde val 0x%lx at kernel address 0x%lx\n",
@@ -1079,7 +1476,7 @@ void pt_free(pt_t *pt)
 /* Free memory associated with this pagetable. */
        int i;
 
-       for(i = 0; i < I386_VM_DIR_ENTRIES; i++)
+       for(i = 0; i < ARCH_VM_DIR_ENTRIES; i++)
                if(pt->pt_pt[i])
                        vm_freepages((vir_bytes) pt->pt_pt[i], 1);
 
@@ -1095,7 +1492,11 @@ int pt_mapkernel(pt_t *pt)
        int kern_pde = kern_start_pde;
        phys_bytes addr, mapped = 0;
 
+#if defined(__i386__)
         /* Any i386 page table needs to map in the kernel address space. */
+#elif defined(__arm__)
+        /* Any ARM page table needs to map in the kernel address space. */
+#endif
        assert(bigpage_ok);
        assert(pagedir_pde >= 0);
        assert(kern_pde >= 0);
@@ -1105,11 +1506,18 @@ int pt_mapkernel(pt_t *pt)
 
        /* Actually mapping in kernel */
        while(mapped < kern_size) {
-               pt->pt_dir[kern_pde] = addr | I386_VM_PRESENT |
-                       I386_VM_BIGPAGE | I386_VM_WRITE | global_bit;
+#if defined(__i386__)
+               pt->pt_dir[kern_pde] = addr | ARCH_VM_PDE_PRESENT |
+                       ARCH_VM_BIGPAGE | ARCH_VM_PTE_RW | global_bit;
+#elif defined(__arm__)
+               pt->pt_dir[kern_pde] = (addr & ARCH_VM_PDE_MASK) |
+                       ARM_VM_SECTION |
+                       ARM_VM_SECTION_DOMAIN | ARM_VM_SECTION_WB |
+                       ARM_VM_SECTION_SHAREABLE | ARM_VM_SECTION_SUPER;
+#endif
                kern_pde++;
-               mapped += I386_BIG_PAGE_SIZE;
-               addr += I386_BIG_PAGE_SIZE;
+               mapped += ARCH_BIG_PAGE_SIZE;
+               addr += ARCH_BIG_PAGE_SIZE;
        }
 
        /* Kernel also wants to know about all page directories. */
@@ -1137,6 +1545,9 @@ int pt_mapkernel(pt_t *pt)
 void pt_cycle(void)
 {
        vm_checkspares();
+#if defined(__arm__)
+       vm_checksparedirs();
+#endif
 }
 
 int get_vm_self_pages(void) { return vm_self_pages; }
index acb12a02af1f66e7962413446ce50f8ed601bbb0..69bb1cd1ddf9cdf616f473729103723f7edf416c 100644 (file)
@@ -7,23 +7,6 @@
 
 #include "vm.h"
 
-/* An i386 pagetable. */
-typedef struct {
-       /* Directory entries in VM addr space - root of page table.  */
-       u32_t *pt_dir;          /* page aligned (I386_VM_DIR_ENTRIES) */
-       u32_t pt_dir_phys;      /* physical address of pt_dir */
-
-       /* Pointers to page tables in VM address space. */
-       u32_t *pt_pt[I386_VM_DIR_ENTRIES];
-
-       /* When looking for a hole in virtual address space, start
-        * looking here. This is in linear addresses, i.e.,
-        * not as the process sees it but the position in the page
-        * page table. This is just a hint.
-        */
-       u32_t pt_virtop;
-} pt_t;
-
 /* Mapping flags. */
 #define PTF_WRITE      I386_VM_WRITE
 #define PTF_READ       I386_VM_READ
@@ -33,17 +16,29 @@ typedef struct {
 #define PTF_MAPALLOC   I386_VM_PTAVAIL1 /* Page allocated by pt code. */
 #define PTF_NOCACHE    (I386_VM_PWT | I386_VM_PCD)
 
+#define ARCH_VM_DIR_ENTRIES    I386_VM_DIR_ENTRIES
+#define ARCH_BIG_PAGE_SIZE     I386_BIG_PAGE_SIZE
+#define ARCH_VM_ADDR_MASK      I386_VM_ADDR_MASK
+#define ARCH_VM_PAGE_PRESENT    I386_VM_PRESENT
+#define ARCH_VM_PDE_MASK        I386_VM_PDE_MASK
+#define ARCH_VM_PDE_PRESENT     I386_VM_PRESENT
+#define ARCH_VM_PTE_PRESENT    I386_VM_PRESENT
+#define ARCH_VM_PTE_USER       I386_VM_USER
+#define ARCH_VM_PTE_RW         I386_VM_WRITE
+#define ARCH_PAGEDIR_SIZE      I386_PAGEDIR_SIZE
+#define ARCH_VM_BIGPAGE                I386_VM_BIGPAGE
+#define ARCH_VM_PT_ENTRIES      I386_VM_PT_ENTRIES
+
 /* For arch-specific PT routines to check if no bits outside
  * the regular flags are set.
  */
 #define PTF_ALLFLAGS   (PTF_READ|PTF_WRITE|PTF_PRESENT|PTF_USER|PTF_GLOBAL|PTF_NOCACHE)
 
-#if SANITYCHECKS
-#define PT_SANE(p) { pt_sanitycheck((p), __FILE__, __LINE__); }
-#else
-#define PT_SANE(p)
-#endif
-
-#endif
+#define PFERR_NOPAGE(e)        (!((e) & I386_VM_PFE_P))
+#define PFERR_PROT(e)  (((e) & I386_VM_PFE_P))
+#define PFERR_WRITE(e) ((e) & I386_VM_PFE_W)
+#define PFERR_READ(e)  (!((e) & I386_VM_PFE_W))
 
+#define VM_PAGE_SIZE   I386_PAGE_SIZE
 
+#endif
diff --git a/servers/vm/arch/i386/util.S b/servers/vm/arch/i386/util.S
deleted file mode 100644 (file)
index eb8c6a3..0000000
+++ /dev/null
@@ -1,22 +0,0 @@
-#include <machine/asm.h>
-
-/**===========================================================================* */
-/**                              i386_invlpg                                  * */
-/**===========================================================================* */
-/* PUBLIC void i386_invlpg(u32_t addr) */
-/* Tell the processor to invalidate a tlb entry at virtual address addr. */
-ENTRY(i386_invlpg)
-       push    %ebp
-       mov     %esp, %ebp
-       push    %eax
-
-       mov     8(%ebp), %eax
-#ifdef __ACK__
-       invlpg  %eax
-#else
-       invlpg  (%eax)
-#endif
-
-       pop     %eax
-       pop     %ebp
-       ret
index 5a8c57e066650f8876deba15b11c8d671d9f9c26..39dcd9d8b4b7244bcb6fa855940835fc7e2fa162 100644 (file)
@@ -27,7 +27,6 @@
 #include "util.h"
 #include "sanitycheck.h"
 #include "region.h"
-#include "memory.h"
 
 /*===========================================================================*
  *                             do_fork                                      *
index 10124586c05a8c301f94d6afd93b93a835f783cf..d40a0b62170e716533c358b9a682289b0a346a0e 100644 (file)
@@ -28,8 +28,6 @@
 #include <stdio.h>
 #include <assert.h>
 
-#include <memory.h>
-
 #define _MAIN 1
 #include "glo.h"
 #include "proto.h"
index cc63523b11c70bab4da542936553ebf5199e360d..8a9bf22e6ce6c5da0fb5eabdbf3092f1f400b8a7 100644 (file)
@@ -26,7 +26,6 @@
 #include <env.h>
 #include <stdio.h>
 #include <fcntl.h>
-#include <memory.h>
 
 #include "glo.h"
 #include "proto.h"
index 04b3037af31b619ad09eedeb89864708843b0fab..97d9678da4d7290b2893b18df885a175acc06103 100644 (file)
 #include <signal.h>
 #include <assert.h>
 
-#include <pagefaults.h>
-
 #include "glo.h"
 #include "proto.h"
-#include "memory.h"
 #include "util.h"
 #include "region.h"
 
index 3cf06f1766c37e1069a83d4637ce139ed6d52334..736c7f9861f8a035da5a3bfdf1b55d2e8708af00 100644 (file)
@@ -19,7 +19,6 @@
 #include <errno.h>
 #include <assert.h>
 #include <stdint.h>
-#include <memory.h>
 #include <sys/param.h>
 
 #include "vm.h"
index 2f3ee76c68e726796f10999b08f8dcf40b6424d6..d38f590222184b617e89e0205b6a3a36d73c462e 100644 (file)
@@ -12,8 +12,8 @@ struct phys_region;
 #include <minix/vm.h>
 #include <timers.h>
 #include <stdio.h>
-#include <pagetable.h>
 
+#include "pt.h"
 #include "vm.h"
 #include "yielded.h"
 
diff --git a/servers/vm/pt.h b/servers/vm/pt.h
new file mode 100644 (file)
index 0000000..6d43758
--- /dev/null
@@ -0,0 +1,29 @@
+
+#ifndef _PT_H
+#define _PT_H 1
+
+#include <machine/vm.h>
+
+#include "vm.h"
+#include "pagetable.h"
+
+/* A pagetable. */
+typedef struct {
+       /* Directory entries in VM addr space - root of page table.  */
+       u32_t *pt_dir;          /* page aligned (ARCH_VM_DIR_ENTRIES) */
+       u32_t pt_dir_phys;      /* physical address of pt_dir */
+
+       /* Pointers to page tables in VM address space. */
+       u32_t *pt_pt[ARCH_VM_DIR_ENTRIES];
+
+       /* When looking for a hole in virtual address space, start
+        * looking here. This is in linear addresses, i.e.,
+        * not as the process sees it but the position in the page
+        * page table. This is just a hint.
+        */
+       u32_t pt_virtop;
+} pt_t;
+
+#define CLICKSPERPAGE (VM_PAGE_SIZE/CLICK_SIZE)
+
+#endif
index 373b96bbdc86edb0587c89c54ac8cef2dd0a50eb..ee54cef69bb77e6f08e1ffa9fd12ffe73a9e2377 100644 (file)
@@ -17,7 +17,6 @@
 #include <string.h>
 #include <assert.h>
 #include <stdint.h>
-#include <memory.h>
 #include <sys/param.h>
 
 #include "vm.h"
index 5fef8c4470ec9a5c1938a98608e4efa90a1f07e7..e932f6b14a6acc487f296bcb338108111138b0f2 100644 (file)
@@ -21,7 +21,6 @@
 #include <env.h>
 #include <stdio.h>
 #include <assert.h>
-#include <memory.h>
 
 #include "glo.h"
 #include "proto.h"
index 78ddc86356073154567ef54d994775be5bdd45d4..1528a3ec16b50b6d13b34159bcbd981ec429b45e 100644 (file)
@@ -7,6 +7,8 @@
 
 #if SANITYCHECKS
 
+#define PT_SANE(p) { pt_sanitycheck((p), __FILE__, __LINE__); }
+
 /* This macro is used in the sanity check functions, where file and 
  * line are function arguments.
  */
@@ -48,6 +50,7 @@
 #define SLABSANITYCHECK(l)
 #define SLABSANE(ptr)
 #define MYASSERT(c)
+#define PT_SANE(p)
 #endif
 
 #if MEMPROTECT
index e7f1d65863f19343b532dcc463eb0f478d4f8ff0..b7dd8bc02709e2fbdcf1f981e64e949b9a4dce64 100644 (file)
@@ -21,8 +21,6 @@
 #include <string.h>
 #include <env.h>
 
-#include <memory.h>
-
 #include <sys/param.h>
 
 #include "glo.h"
index d4178bb07e5bd7ef0a28672adbcc748baabf0a7b..a23a1996c43885c9ecfc051a8cb33a66f22d6c77 100644 (file)
@@ -22,7 +22,6 @@
 #include <errno.h>
 #include <env.h>
 #include <unistd.h>
-#include <memory.h>
 #include <assert.h>
 #include <sys/param.h>
 
index bc40d565cf917690c1917f030a253f6d43b3ecb1..b6d91f6fa7960a454160be0200f3c28ae8c4b36d 100644 (file)
@@ -13,7 +13,6 @@
 #define JUNKFREE       0       /* Fill freed pages with junk */
 
 #include <sys/errno.h>
-#include <memory.h>
 
 #include "sanitycheck.h"
 #include "region.h"
@@ -61,5 +60,9 @@
 #define MAP_NONE       0xFFFFFFFE
 #define NO_MEM ((phys_clicks) MAP_NONE)  /* returned by alloc_mem() with mem is up */
 
+/* And what is the highest addressable piece of memory? */
+#define VM_DATATOP      kernel_boot_info.user_end
+#define VM_STACKTOP     kernel_boot_info.user_sp
+
 #endif
 
index 8cb00e1558931653e6a7e57da08521912f2e634c..1c9c488bcadbaa9de30786e53f18c83e1669c4c2 100644 (file)
@@ -2,10 +2,10 @@
 #ifndef _VMPROC_H 
 #define _VMPROC_H 1
 
-#include <pagetable.h>
 #include <minix/bitmap.h>
 #include <machine/archtypes.h>
 
+#include "pt.h"
 #include "vm.h"
 #include "physravl.h"
 #include "yieldedavl.h"