]> Zhao Yanbai Git Server - minix.git/commitdiff
vm: restartability improvements (#1) 21/3121/1
authorBen Gras <ben@minix3.org>
Thu, 15 Jan 2015 15:47:46 +0000 (16:47 +0100)
committerDavid van Moolenbroek <david@minix3.org>
Thu, 17 Sep 2015 13:41:26 +0000 (13:41 +0000)
Two bugs fixed wrt vm restartability.

. make sure pagetable data is only allocated
  using dynamic data instead of static spare pages
  (bootstrap pages). They are needed for bootstrap
  but now repeat some of the initialization so only
  dynamic data remains. This solves the problem of
  physical addresses changing (as static pages are
  re-allocated for the new instance) after update.
. pt_ptalloc has to be specified in bytes instead of
  pde slot numbers. leaving pt_pt NULL causes mapping
  transfers to fail because NULL happens to be mapped in
  then and updates then happen there.
. added some sanity checks against the above happening.

The new state is that VM can update many times, but the system
isn't fully reliable afterwards yet.

Change-Id: I7313602c740cdae8590589132291116ed921aed7

minix/servers/vm/pagetable.c
minix/servers/vm/proto.h
minix/servers/vm/region.c
minix/servers/vm/rs.c

index e7885bc64d968ba97300c8e9f1218dea5f339977..66dd9da0f29a5f9d4b9589eb5180920be368159b 100644 (file)
@@ -113,6 +113,17 @@ static char static_sparepages[VM_PAGE_SIZE*STATIC_SPAREPAGES]
 static char static_sparepagedirs[ARCH_PAGEDIR_SIZE*STATIC_SPAREPAGEDIRS + ARCH_PAGEDIR_SIZE] __aligned(ARCH_PAGEDIR_SIZE);
 #endif
 
+void pt_assert(pt_t *pt)
+{
+       char dir[4096];
+       pt_clearmapcache();
+       if((sys_vmctl(SELF, VMCTL_FLUSHTLB, 0)) != OK) {
+               panic("VMCTL_FLUSHTLB failed");
+       }
+       sys_physcopy(NONE, pt->pt_dir_phys, SELF, (vir_bytes) dir, sizeof(dir), 0);
+       assert(!memcmp(dir, pt->pt_dir, sizeof(dir)));
+}
+
 #if SANITYCHECKS
 /*===========================================================================*
  *                             pt_sanitycheck                               *
@@ -255,7 +266,6 @@ static void *vm_getsparepage(phys_bytes *phys)
 {
        void *ptr;
        if(reservedqueue_alloc(spare_pagequeue, phys, &ptr) != OK) {
-               printf("vm_getsparepage: no spare found\n");
                return NULL;
        }
        assert(ptr);
@@ -662,6 +672,7 @@ int pt_map_in_range(struct vmproc *src_vmp, struct vmproc *dst_vmp,
 
                /* Transfer the mapping. */
                dst_pt->pt_pt[pde][pte] = pt->pt_pt[pde][pte];
+               assert(dst_pt->pt_pt[pde]);
 
                 if(viraddr == VM_DATATOP) break;
        }
@@ -709,11 +720,13 @@ int pt_ptmap(struct vmproc *src_vmp, struct vmproc *dst_vmp)
 #endif
 
        /* Scan all non-reserved page-directory entries. */
-       for(pde=0; pde < ARCH_VM_DIR_ENTRIES; pde++) {
+       for(pde=0; pde < kern_start_pde; pde++) {
                if(!(pt->pt_dir[pde] & ARCH_VM_PDE_PRESENT)) {
                        continue;
                }
 
+               if(!pt->pt_pt[pde]) { panic("pde %d empty\n", pde); }
+
                /* Transfer mapping to the page table. */
                viraddr = (vir_bytes) pt->pt_pt[pde];
 #if defined(__i386__)
@@ -721,6 +734,7 @@ int pt_ptmap(struct vmproc *src_vmp, struct vmproc *dst_vmp)
 #elif defined(__arm__)
                physaddr = pt->pt_dir[pde] & ARCH_VM_PDE_MASK;
 #endif
+               assert(viraddr);
                if((r=pt_writemap(dst_vmp, &dst_vmp->vm_pt, viraddr, physaddr, VM_PAGE_SIZE,
                        ARCH_VM_PTE_PRESENT | ARCH_VM_PTE_USER | ARCH_VM_PTE_RW
 #ifdef __arm__
@@ -1024,6 +1038,40 @@ static int freepde(void)
        return p;
 }
 
+void pt_allocate_kernel_mapped_pagetables(void)
+{
+       /* Reserve PDEs available for mapping in the page directories. */
+       int pd;
+       for(pd = 0; pd < MAX_PAGEDIR_PDES; pd++) {
+               struct pdm *pdm = &pagedir_mappings[pd];
+               if(!pdm->pdeno)  {
+                       pdm->pdeno = freepde();
+                       assert(pdm->pdeno);
+               }
+               phys_bytes ph;
+
+               /* Allocate us a page table in which to
+                * remember page directory pointers.
+                */
+               if(!(pdm->page_directories =
+                       vm_allocpage(&ph, VMP_PAGETABLE))) {
+                       panic("no virt addr for vm mappings");
+               }
+               memset(pdm->page_directories, 0, VM_PAGE_SIZE);
+               pdm->phys = ph;
+
+#if defined(__i386__)
+               pdm->val = (ph & ARCH_VM_ADDR_MASK) |
+                       ARCH_VM_PDE_PRESENT | ARCH_VM_PTE_RW;
+#elif defined(__arm__)
+               pdm->val = (ph & ARCH_VM_PDE_MASK)
+                       | ARCH_VM_PDE_PRESENT
+                       | ARM_VM_PTE_CACHED
+                       | ARM_VM_PDE_DOMAIN; //LSC FIXME
+#endif
+       }
+}
+
 /*===========================================================================*
  *                              pt_init                                      *
  *===========================================================================*/
@@ -1031,6 +1079,7 @@ void pt_init(void)
 {
         pt_t *newpt;
         int s, r, p;
+       phys_bytes phys;
        vir_bytes sparepages_mem;
 #if defined(__arm__)
        vir_bytes sparepagedirs_mem;
@@ -1181,35 +1230,7 @@ void pt_init(void)
                }
        }
 
-       /* Reserve PDEs available for mapping in the page directories. */
-       {
-               int pd;
-               for(pd = 0; pd < MAX_PAGEDIR_PDES; pd++) {
-                       struct pdm *pdm = &pagedir_mappings[pd];
-                       pdm->pdeno = freepde();
-                       phys_bytes ph;
-
-                       /* Allocate us a page table in which to
-                        * remember page directory pointers.
-                        */
-                       if(!(pdm->page_directories =
-                               vm_allocpage(&ph, VMP_PAGETABLE))) {
-                               panic("no virt addr for vm mappings");
-                       }
-                       memset(pdm->page_directories, 0, VM_PAGE_SIZE);
-                       pdm->phys = ph;
-
-#if defined(__i386__)
-                       pdm->val = (ph & ARCH_VM_ADDR_MASK) |
-                               ARCH_VM_PDE_PRESENT | ARCH_VM_PTE_RW;
-#elif defined(__arm__)
-                       pdm->val = (ph & ARCH_VM_PDE_MASK)
-                               | ARCH_VM_PDE_PRESENT
-                               | ARM_VM_PTE_CACHED
-                               | ARM_VM_PDE_DOMAIN; //LSC FIXME
-#endif
-               }
-       }
+       pt_allocate_kernel_mapped_pagetables();
 
        /* Allright. Now. We have to make our own page directory and page tables,
         * that the kernel has already set up, accessible to us. It's easier to
@@ -1279,6 +1300,27 @@ void pt_init(void)
 
        pt_init_done = 1;
 
+       /* VM is now fully functional in that it can dynamically allocate memory
+        * for itself.
+        *
+        * We don't want to keep using the bootstrap statically allocated spare
+        * pages though, as the physical addresses will change on liveupdate. So we
+        * re-do part of the initialization now with purely dynamically allocated
+        * memory. First throw out the static pool.
+        */
+
+       alloc_cycle();                          /* Make sure allocating works */
+       while(vm_getsparepage(&phys)) ;         /* Use up all static pages */
+       alloc_cycle();                          /* Refill spares with dynamic */
+       pt_allocate_kernel_mapped_pagetables(); /* Reallocate in-kernel pages */
+       pt_bind(newpt, &vmproc[VM_PROC_NR]);    /* Recalculate */
+       pt_mapkernel(newpt);                    /* Rewrite pagetable info */
+
+       /* Flush TLB just in case any of those mappings have been touched */
+       if((sys_vmctl(SELF, VMCTL_FLUSHTLB, 0)) != OK) {
+               panic("VMCTL_FLUSHTLB failed");
+       }
+
         /* All OK. */
         return;
 }
index 79d3ac4c6902f83e07190dbb8c89e35b00cbc41d..546fe1734f7234ab70a66aea8c3f839eaacdd30b 100644 (file)
@@ -118,6 +118,7 @@ void vm_pagelock(void *vir, int lockflag);
 int vm_addrok(void *vir, int write);
 int get_vm_self_pages(void);
 int pt_writable(struct vmproc *vmp, vir_bytes v);
+void pt_assert(pt_t *pt);
 
 #if SANITYCHECKS
 void pt_sanitycheck(pt_t *pt, const char *file, int line);
index 5816b747c47358d87e356af260fdb6e1e9d2926c..2afc4080abf475b892037ed40a9008e20233b2c7 100644 (file)
@@ -783,6 +783,7 @@ int map_pin_memory(struct vmproc *vmp)
        region_iter iter;
        region_start_iter_least(&vmp->vm_regions_avl, &iter);
        /* Scan all memory regions. */
+       pt_assert(&vmp->vm_pt);
        while((vr = region_get_iter(&iter))) {
                /* Make sure region is mapped to physical memory and writable.*/
                r = map_handle_memory(vmp, vr, 0, vr->length, 1, NULL, 0, 0);
@@ -791,6 +792,7 @@ int map_pin_memory(struct vmproc *vmp)
                }
                region_incr_iter(&iter);
        }
+       pt_assert(&vmp->vm_pt);
        return OK;
 }
 
index 38d56bd25af133aa9ecc76d1a0bb3393e0d9beb4..d6c2ac8be8c0273fba9a7a002387608a584ba68d 100644 (file)
@@ -146,6 +146,8 @@ static int rs_memctl_make_vm_instance(struct vmproc *new_vm_vmp)
 
        this_vm_vmp = &vmproc[VM_PROC_NR];
 
+       pt_assert(&this_vm_vmp->vm_pt);
+
        /* Check if the operation is allowed. */
        assert(num_vm_instances == 1 || num_vm_instances == 2);
        if(num_vm_instances == 2) {
@@ -169,12 +171,12 @@ static int rs_memctl_make_vm_instance(struct vmproc *new_vm_vmp)
        flags = 0;
        verify = FALSE;
        r = pt_ptalloc_in_range(&this_vm_vmp->vm_pt,
-               kernel_boot_info.freepde_start, ARCH_VM_DIR_ENTRIES, flags, verify);
+               VM_OWN_HEAPBASE, VM_DATATOP, flags, verify);
        if(r != OK) {
                return r;
        }
        r = pt_ptalloc_in_range(&new_vm_vmp->vm_pt,
-               kernel_boot_info.freepde_start, ARCH_VM_DIR_ENTRIES, flags, verify);
+               VM_OWN_HEAPBASE, VM_DATATOP, flags, verify);
        if(r != OK) {
                return r;
        }
@@ -189,6 +191,9 @@ static int rs_memctl_make_vm_instance(struct vmproc *new_vm_vmp)
                return r;
        }
 
+       pt_assert(&this_vm_vmp->vm_pt);
+       pt_assert(&new_vm_vmp->vm_pt);
+
        return OK;
 }