From: Ben Gras Date: Fri, 8 Feb 2013 18:11:42 +0000 (+0100) Subject: impove memory accounting X-Git-Tag: v3.2.1~14 X-Git-Url: http://zhaoyanbai.com/repos/named-checkzone.html?a=commitdiff_plain;h=3bc6d7df0683c9d9ee2bf39f0e2dc44438506ec0;p=minix.git impove memory accounting . the total amount of memory in the system didn't include the memory used by the boot-time modules and some dynamic allocation by the kernel at boot time (to map in VM). especially apparent on our ARM board with 'only' 512MB of memory and a huge ramdisk. . also: *add* the VM loaded module to the freelist after it has been allocated for & mapped in instead of cutting it *out* of the freelist! so we get a few more MB free.. Change-Id: If37ac32b21c9d38610830e21421264da4f20bc4f --- diff --git a/include/minix/param.h b/include/minix/param.h index 104d7b016..90a8b1676 100644 --- a/include/minix/param.h +++ b/include/minix/param.h @@ -41,7 +41,8 @@ typedef struct kinfo { char release[6]; /* kernel release number */ char version[6]; /* kernel version number */ int vm_allocated_bytes; /* allocated by kernel to load vm */ - int kernel_allocated_bytes; /* used by kernel */ + int kernel_allocated_bytes; /* used by kernel */ + int kernel_allocated_bytes_dynamic; /* used by kernel (runtime) */ } kinfo_t; #endif diff --git a/kernel/arch/earm/pg_utils.c b/kernel/arch/earm/pg_utils.c index ad333a9a0..2908a7a80 100644 --- a/kernel/arch/earm/pg_utils.c +++ b/kernel/arch/earm/pg_utils.c @@ -67,26 +67,6 @@ void cut_memmap(kinfo_t *cbi, phys_bytes start, phys_bytes end) } } -phys_bytes alloc_lowest(kinfo_t *cbi, phys_bytes len) -{ - /* Allocate the lowest physical page we have. */ - int m; -#define EMPTY 0xffffffff - phys_bytes lowest = EMPTY; - assert(len > 0); - len = roundup(len, ARM_PAGE_SIZE); - - assert(kernel_may_alloc); - - for(m = 0; m < cbi->mmap_size; m++) { - if(cbi->memmap[m].len < len) continue; - if(cbi->memmap[m].addr < lowest) lowest = cbi->memmap[m].addr; - } - assert(lowest != EMPTY); - cut_memmap(cbi, lowest, len); - return lowest; -} - void add_memmap(kinfo_t *cbi, u64_t addr, u64_t len) { int m; @@ -157,6 +137,8 @@ phys_bytes pg_alloc_page(kinfo_t *cbi) mmap->addr += ARM_PAGE_SIZE; mmap->len -= ARM_PAGE_SIZE; + cbi->kernel_allocated_bytes_dynamic += ARM_PAGE_SIZE; + return addr; } diff --git a/kernel/arch/earm/pre_init.c b/kernel/arch/earm/pre_init.c index 4d9663434..626aa6a53 100644 --- a/kernel/arch/earm/pre_init.c +++ b/kernel/arch/earm/pre_init.c @@ -106,8 +106,8 @@ int overlaps(multiboot_module_t *mod, int n, int cmp_mod) #define MB_PARAM_MOD 0x96000000 #define MB_MODS_ALIGN 0x00800000 /* 8 MB */ #define MB_MODS_SIZE 0x00004000 /* 16 KB */ -#define MB_MMAP_START MB_MODS_BASE -#define MB_MMAP_SIZE 0x10000000 /* 256 MB */ +#define MB_MMAP_START 0x80000000 +#define MB_MMAP_SIZE 0x20000000 /* 512 MB */ multiboot_module_t mb_modlist[MB_MODS_NR]; multiboot_memory_map_t mb_memmap; @@ -204,6 +204,7 @@ void get_parameters(u32_t ebx, kinfo_t *cbi) * still needed but will be freed after bootstrapping. */ kinfo.kernel_allocated_bytes = (phys_bytes) &_kern_size; + kinfo.kernel_allocated_bytes -= cbi->bootstrap_len; assert(!(cbi->bootstrap_start % ARM_PAGE_SIZE)); cbi->bootstrap_len = rounddown(cbi->bootstrap_len, ARM_PAGE_SIZE); diff --git a/kernel/arch/earm/protect.c b/kernel/arch/earm/protect.c index 7a6f759b2..fd8b41617 100644 --- a/kernel/arch/earm/protect.c +++ b/kernel/arch/earm/protect.c @@ -151,7 +151,8 @@ void arch_boot_proc(struct boot_image *ip, struct proc *rp) arch_proc_init(rp, execi.pc, kinfo.user_sp - 3*4, ip->proc_name); /* Free VM blob that was just copied into existence. */ - cut_memmap(&kinfo, mod->mod_start, mod->mod_end); + add_memmap(&kinfo, mod->mod_start, mod->mod_end-mod->mod_start); + mod->mod_end = mod->mod_start = 0; /* Remember them */ kinfo.vm_allocated_bytes = alloc_for_vm; diff --git a/kernel/arch/i386/pg_utils.c b/kernel/arch/i386/pg_utils.c index f76ba516a..444df1f90 100644 --- a/kernel/arch/i386/pg_utils.c +++ b/kernel/arch/i386/pg_utils.c @@ -81,6 +81,7 @@ phys_bytes alloc_lowest(kinfo_t *cbi, phys_bytes len) } assert(lowest != EMPTY); cut_memmap(cbi, lowest, len); + cbi->kernel_allocated_bytes_dynamic += len; return lowest; } @@ -152,6 +153,8 @@ phys_bytes pg_alloc_page(kinfo_t *cbi) mmap->len -= I386_PAGE_SIZE; + cbi->kernel_allocated_bytes_dynamic += I386_PAGE_SIZE; + return mmap->addr + mmap->len; } diff --git a/kernel/arch/i386/pre_init.c b/kernel/arch/i386/pre_init.c index 03e068fc2..8701a21a7 100644 --- a/kernel/arch/i386/pre_init.c +++ b/kernel/arch/i386/pre_init.c @@ -164,6 +164,7 @@ void get_parameters(u32_t ebx, kinfo_t *cbi) * still needed but will be freed after bootstrapping. */ kinfo.kernel_allocated_bytes = (phys_bytes) &_kern_size; + kinfo.kernel_allocated_bytes -= cbi->bootstrap_len; assert(!(cbi->bootstrap_start % I386_PAGE_SIZE)); cbi->bootstrap_len = rounddown(cbi->bootstrap_len, I386_PAGE_SIZE); diff --git a/kernel/arch/i386/protect.c b/kernel/arch/i386/protect.c index 7594e2361..fe9d23f3f 100644 --- a/kernel/arch/i386/protect.c +++ b/kernel/arch/i386/protect.c @@ -428,7 +428,8 @@ void arch_boot_proc(struct boot_image *ip, struct proc *rp) arch_proc_init(rp, execi.pc, kinfo.user_sp - 3*4, ip->proc_name); /* Free VM blob that was just copied into existence. */ - cut_memmap(&kinfo, mod->mod_start, mod->mod_end); + add_memmap(&kinfo, mod->mod_start, mod->mod_end-mod->mod_start); + mod->mod_end = mod->mod_start = 0; /* Remember them */ kinfo.vm_allocated_bytes = alloc_for_vm; diff --git a/servers/vm/alloc.c b/servers/vm/alloc.c index bc296b6a4..fcd9eb0c9 100644 --- a/servers/vm/alloc.c +++ b/servers/vm/alloc.c @@ -97,6 +97,11 @@ phys_clicks alloc_mem(phys_clicks clicks, u32_t memflags) return mem; } +void mem_add_total_pages(int pages) +{ + total_pages += pages; +} + /*===========================================================================* * free_mem * *===========================================================================*/ diff --git a/servers/vm/main.c b/servers/vm/main.c index f2b7c2f5f..3a64b2e32 100644 --- a/servers/vm/main.c +++ b/servers/vm/main.c @@ -306,6 +306,8 @@ void init_vm(void) static struct memory mem_chunks[NR_MEMS]; static struct boot_image *ip; extern void __minix_init(void); + multiboot_module_t *mod; + vir_bytes kern_dyn, kern_static; #if SANITYCHECKS incheck = nocheck = 0; @@ -344,6 +346,21 @@ void init_vm(void) init_proc(VM_PROC_NR); pt_init(); + /* The kernel's freelist does not include boot-time modules; let + * the allocator know that the total memory is bigger. + */ + for (mod = &kernel_boot_info.module_list[0]; + mod < &kernel_boot_info.module_list[kernel_boot_info.mods_with_kernel-1]; mod++) { + phys_bytes len = mod->mod_end-mod->mod_start+1; + len = roundup(len, VM_PAGE_SIZE); + mem_add_total_pages(len/VM_PAGE_SIZE); + } + + kern_dyn = kernel_boot_info.kernel_allocated_bytes_dynamic; + kern_static = kernel_boot_info.kernel_allocated_bytes; + kern_static = roundup(kern_static, VM_PAGE_SIZE); + mem_add_total_pages((kern_dyn + kern_static)/VM_PAGE_SIZE); + /* Give these processes their own page table. */ for (ip = &kernel_boot_info.boot_procs[0]; ip < &kernel_boot_info.boot_procs[NR_BOOT_PROCS]; ip++) { diff --git a/servers/vm/proto.h b/servers/vm/proto.h index 1882cafe9..d49057de5 100644 --- a/servers/vm/proto.h +++ b/servers/vm/proto.h @@ -26,6 +26,7 @@ void usedpages_reset(void); int usedpages_add_f(phys_bytes phys, phys_bytes len, char *file, int line); void free_mem(phys_clicks base, phys_clicks clicks); +void mem_add_total_pages(int pages); #define usedpages_add(a, l) usedpages_add_f(a, l, __FILE__, __LINE__) void mem_init(struct memory *chunks); diff --git a/servers/vm/region.c b/servers/vm/region.c index efb9bc415..16f76a20c 100644 --- a/servers/vm/region.c +++ b/servers/vm/region.c @@ -1462,7 +1462,8 @@ void get_stats_info(struct vm_stats_info *vsi) void get_usage_info_kernel(struct vm_usage_info *vui) { memset(vui, 0, sizeof(*vui)); - vui->vui_total = kernel_boot_info.kernel_allocated_bytes; + vui->vui_total = kernel_boot_info.kernel_allocated_bytes + + kernel_boot_info.kernel_allocated_bytes_dynamic; } static void get_usage_info_vm(struct vm_usage_info *vui)