int nr_tasks; /* number of kernel tasks */
char release[6]; /* kernel release number */
char version[6]; /* kernel version number */
+ int vm_allocated_bytes; /* allocated by kernel to load vm */
+ int kernel_allocated_bytes; /* used by kernel */
} kinfo_t;
#endif
cbi->user_sp &= 0xF0000000;
cbi->user_end = cbi->user_sp;
+ /* kernel bytes without bootstrap code/data that is currently
+ * still needed but will be freed after bootstrapping.
+ */
+ kinfo.kernel_allocated_bytes = (phys_bytes) &_kern_size;
+
assert(!(cbi->bootstrap_start % I386_PAGE_SIZE));
cbi->bootstrap_len = rounddown(cbi->bootstrap_len, I386_PAGE_SIZE);
assert(mbi->flags & MULTIBOOT_INFO_MODS);
prot_init_done = 1;
}
+static int alloc_for_vm = 0;
+
void arch_post_init(void)
{
/* Let memory mapping code know what's going on at bootstrap time */
pg_map(PG_ALLOCATEME, vaddr, vaddr+len, &kinfo);
pg_load();
memset((char *) vaddr, 0, len);
+ alloc_for_vm += len;
return OK;
}
/* Free VM blob that was just copied into existence. */
cut_memmap(&kinfo, mod->mod_start, mod->mod_end);
+
+ /* Remember them */
+ kinfo.vm_allocated_bytes = alloc_for_vm;
}
}
ex64lo(proc[i].p_cycles)
);
- /* If the process is not a kernel task, we add some extra info. */
- if (!task) {
- memset(&vui, 0, sizeof(vui));
+ memset(&vui, 0, sizeof(vui));
- if (!is_zombie(i)) {
- /* We don't care if this fails. */
- (void) vm_info_usage(proc[i].p_endpoint, &vui);
- }
+ if (!is_zombie(i)) {
+ /* We don't care if this fails. */
+ (void) vm_info_usage(proc[i].p_endpoint, &vui);
+ }
+ /* If the process is not a kernel task, we add some extra info. */
+ if (!task) {
if (mproc[pi].mp_flags & PAUSED)
p_state = PSTATE_PAUSED;
else if (mproc[pi].mp_flags & WAITING)
ex64hi(proc[i].p_kcall_cycles),
ex64lo(proc[i].p_kcall_cycles));
+ /* add total memory for tasks at the end */
+ if(task) buf_printf(" %lu", vui.vui_total);
+
/* Newline at the end of the file. */
buf_printf("\n");
}
#include "memory.h"
+static int vm_self_pages;
+
/* PDE used to map in kernel, kernel physical address. */
static int pagedir_pde = -1;
static u32_t global_bit = 0, pagedir_pde_val;
phys_bytes phys;
} sparepages[SPAREPAGES];
+extern char _end;
+#define is_staticaddr(v) ((vir_bytes) (v) < (vir_bytes) &_end)
+
#define MAX_KERNMAPPINGS 10
static struct {
phys_bytes phys_addr; /* Physical addr. */
int pde = 0, try_restart;
static u32_t lastv = 0;
pt_t *pt = &vmprocess->vm_pt;
- extern char _end;
vir_bytes vmin, vmax;
vmin = (vir_bytes) (&_end) & I386_VM_ADDR_MASK; /* marks end of VM BSS */
void vm_freepages(vir_bytes vir, int pages)
{
assert(!(vir % I386_PAGE_SIZE));
- extern char _end;
- if(vir < (vir_bytes) &_end) {
+ if(is_staticaddr(vir)) {
printf("VM: not freeing static page\n");
return;
}
WMF_OVERWRITE | WMF_FREE) != OK)
panic("vm_freepages: pt_writemap failed");
+ vm_self_pages--;
+
#if SANITYCHECKS
/* If SANITYCHECKS are on, flush tlb so accessing freed pages is
* always trapped, also if not in tlb.
util_stacktrace();
printf("VM: warning: out of spare pages\n");
}
+ if(!is_staticaddr(s)) vm_self_pages++;
return s;
}
/* Return user-space-ready pointer to it. */
ret = (void *) loc;
+ vm_self_pages++;
return ret;
}
vm_checkspares();
}
+int get_vm_self_pages(void) { return vm_self_pages; }
int pt_mapkernel(pt_t *pt);
void vm_pagelock(void *vir, int lockflag);
int vm_addrok(void *vir, int write);
+int get_vm_self_pages(void);
#if SANITYCHECKS
void pt_sanitycheck(pt_t *pt, char *file, int line);
void get_stats_info(struct vm_stats_info *vsi);
void get_usage_info(struct vmproc *vmp, struct vm_usage_info *vui);
+void get_usage_info_kernel(struct vm_usage_info *vui);
int get_region_info(struct vmproc *vmp, struct vm_region_info *vri, int
count, vir_bytes *nextp);
int copy_abs2region(phys_bytes abs, struct vir_region *destregion,
#include <minix/debug.h>
#include <minix/bitmap.h>
#include <minix/hash.h>
+#include <machine/multiboot.h>
#include <sys/mman.h>
vsi->vsi_cached++;
}
+void get_usage_info_kernel(struct vm_usage_info *vui)
+{
+ memset(vui, 0, sizeof(*vui));
+ vui->vui_total = kernel_boot_info.kernel_allocated_bytes;
+}
+
+static void get_usage_info_vm(struct vm_usage_info *vui)
+{
+ memset(vui, 0, sizeof(*vui));
+ vui->vui_total = kernel_boot_info.vm_allocated_bytes +
+ get_vm_self_pages() * VM_PAGE_SIZE;
+}
+
/*========================================================================*
* get_usage_info *
*========================================================================*/
memset(vui, 0, sizeof(*vui));
+ if(vmp->vm_endpoint == VM_PROC_NR) {
+ get_usage_info_vm(vui);
+ return;
+ }
+
+ if(vmp->vm_endpoint < 0) {
+ get_usage_info_kernel(vui);
+ return;
+ }
+
while((vr = region_get_iter(&v_iter))) {
physr_start_iter_least(vr->phys, &iter);
while((ph = physr_get_iter(&iter))) {
break;
case VMIW_USAGE:
- if (vm_isokendpt(m->VMI_EP, &pr) != OK)
+ if(m->VMI_EP < 0)
+ get_usage_info_kernel(&vui);
+ else if (vm_isokendpt(m->VMI_EP, &pr) != OK)
return EINVAL;
-
- get_usage_info(&vmproc[pr], &vui);
+ else get_usage_info(&vmproc[pr], &vui);
addr = (vir_bytes) &vui;
size = sizeof(vui);
}
}
+ if ((p->p_flags & IS_TASK)) {
+ fscanf(fp, " %lu ", &p->p_memory);
+ }
+
p->p_flags |= USED;
fclose(fp);
}
if(p-NR_TASKS == KERNEL) {
kernelticks = uticks;
- continue;
}
if(!(proc2[p].p_flags & IS_TASK)) {
if(proc2[p].p_flags & IS_SYSTEM)
pr = tick_procs[p].p;
- if(pr->p_flags & IS_TASK) {
+ if((pr->p_flags & IS_TASK) && pr->p_pid != KERNEL) {
/* skip old kernel tasks as they don't run anymore */
continue;
}