-int env_parse(char *env, char *fmt, int field, long *param, long min,
+int env_parse(const char *env, const char *fmt, int field, long *param, long min,
long max);
-void env_panic(char *env);
+void env_panic(const char *env);
int env_prefix(char *env, char *prefix);
extern char **env_argv;
void env_setargs(int argc, char *argv[]);
-int env_get_param(char *key, char *value, int max_size);
+int env_get_param(const char *key, char *value, int max_size);
int env_prefix(char *env, char *prefix);
-void env_panic(char *key);
-int env_parse(char *env, char *fmt, int field, long *param, long min,
- long max);
+void env_panic(const char *key);
+int env_parse(const char *env, const char *fmt, int field,
+ long *param, long min, long max);
#define fkey_map(fkeys, sfkeys) fkey_ctl(FKEY_MAP, (fkeys), (sfkeys))
#define fkey_unmap(fkeys, sfkeys) fkey_ctl(FKEY_UNMAP, (fkeys), (sfkeys))
/*===========================================================================*
* env_get_param *
*===========================================================================*/
-int env_get_param(key, value, max_len)
-char *key; /* which key to look up */
-char *value; /* where to store value */
-int max_len; /* maximum length of value */
+int env_get_param(const char *key, char *value, int max_len)
{
message m;
static char mon_params[MULTIBOOT_PARAM_BUF_SIZE]; /* copy parameters here */
/*=========================================================================*
* env_panic *
*=========================================================================*/
-void env_panic(key)
-char *key; /* environment variable whose value is bogus */
+void env_panic(const char *key)
{
static char value[EP_BUF_SIZE] = "<unknown>";
int s;
/*=========================================================================*
* env_parse *
*=========================================================================*/
-int env_parse(env, fmt, field, param, min, max)
-char *env; /* environment variable to inspect */
-char *fmt; /* template to parse it with */
-int field; /* field number of value to return */
-long *param; /* address of parameter to get */
-long min, max; /* minimum and maximum values for the parameter */
+int env_parse(const char *env, const char *fmt,
+ int field, long *param, long min, long max)
{
/* Parse an environment variable setting, something like "DPETH0=300:3".
* Panic if the parsing fails. Return EP_UNSET if the environment variable
DPADD+= ${LIBSYS} ${LIBEXEC}
LDADD+= -lsys -lexec
+WARNS=5
+
MAN=
BINDIR?= /usr/sbin
#include "memlist.h"
/* Number of physical pages in a 32-bit address space */
-#define NUMBER_PHYSICAL_PAGES (0x100000000ULL/VM_PAGE_SIZE)
+#define NUMBER_PHYSICAL_PAGES (int)(0x100000000ULL/VM_PAGE_SIZE)
#define PAGE_BITMAP_CHUNKS BITMAP_CHUNKS(NUMBER_PHYSICAL_PAGES)
static bitchunk_t free_pages_bitmap[PAGE_BITMAP_CHUNKS];
#define PAGE_CACHE_MAX 10000
struct reserved_pages *mrq;
int m = 0;
- for(mrq = first_reserved_inuse; mrq > 0; mrq = mrq->next) {
+ for(mrq = first_reserved_inuse; mrq; mrq = mrq->next) {
assert(mrq->max_available > 0);
assert(mrq->max_available >= mrq->n_available);
m += mrq->max_available - mrq->n_available;
reservedqueue_fillslot(rq, rps, ph, vir);
}
-int reservedqueue_fill(void *rq_v)
+static int reservedqueue_fill(void *rq_v)
{
struct reserved_pages *rq = rq_v;
int r;
/*===========================================================================*
* mem_init *
*===========================================================================*/
-void mem_init(chunks)
-struct memory *chunks; /* list of free memory chunks */
+void mem_init(struct memory *chunks)
{
/* Initialize hole lists. There are two lists: 'hole_head' points to a linked
* list of all the holes (unused memory) in the system; 'free_slots' points to
{
phys_bytes boundary16 = 16 * 1024 * 1024 / VM_PAGE_SIZE;
phys_bytes boundary1 = 1 * 1024 * 1024 / VM_PAGE_SIZE;
- phys_bytes mem = NO_MEM;
- int maxpage = NUMBER_PHYSICAL_PAGES - 1, i;
+ phys_bytes mem = NO_MEM, i; /* page number */
+ int maxpage = NUMBER_PHYSICAL_PAGES - 1;
static int lastscan = -1;
int startscan, run_length;
return OK;
}
-static char *ptestr(u32_t pte)
+static const char *ptestr(u32_t pte)
{
#define FLAG(constant, name) { \
if(pte & (constant)) { strcat(str, name); strcat(str, " "); } \
assert(start % VM_PAGE_SIZE == 0);
assert(end % VM_PAGE_SIZE == 0);
- assert(ARCH_VM_PDE(start) >= 0 && start <= end);
+ assert( /* ARCH_VM_PDE(start) >= 0 && */ start <= end);
assert(ARCH_VM_PDE(end) < ARCH_VM_DIR_ENTRIES);
#if LU_DEBUG
{
int kernmap_pde;
phys_bytes addr, len;
- int flags, index = 0;
+ int flags, pindex = 0;
u32_t offset = 0;
kernmap_pde = freepde();
offset = kernmap_pde * ARCH_BIG_PAGE_SIZE;
- while(sys_vmctl_get_mapping(index, &addr, &len,
+ while(sys_vmctl_get_mapping(pindex, &addr, &len,
&flags) == OK) {
int usedpde;
vir_bytes vir;
- if(index >= MAX_KERNMAPPINGS)
- panic("VM: too many kernel mappings: %d", index);
- kern_mappings[index].phys_addr = addr;
- kern_mappings[index].len = len;
- kern_mappings[index].flags = flags;
- kern_mappings[index].vir_addr = offset;
- kern_mappings[index].flags =
+ if(pindex >= MAX_KERNMAPPINGS)
+ panic("VM: too many kernel mappings: %d", pindex);
+ kern_mappings[pindex].phys_addr = addr;
+ kern_mappings[pindex].len = len;
+ kern_mappings[pindex].flags = flags;
+ kern_mappings[pindex].vir_addr = offset;
+ kern_mappings[pindex].flags =
ARCH_VM_PTE_PRESENT;
if(flags & VMMF_UNCACHED)
#if defined(__i386__)
- kern_mappings[index].flags |= PTF_NOCACHE;
+ kern_mappings[pindex].flags |= PTF_NOCACHE;
#elif defined(__arm__)
- kern_mappings[index].flags |= ARM_VM_PTE_DEVICE;
+ kern_mappings[pindex].flags |= ARM_VM_PTE_DEVICE;
#endif
if(flags & VMMF_USER)
- kern_mappings[index].flags |= ARCH_VM_PTE_USER;
+ kern_mappings[pindex].flags |= ARCH_VM_PTE_USER;
#if defined(__arm__)
else
- kern_mappings[index].flags |= ARM_VM_PTE_SUPER;
+ kern_mappings[pindex].flags |= ARM_VM_PTE_SUPER;
#endif
if(flags & VMMF_WRITE)
- kern_mappings[index].flags |= ARCH_VM_PTE_RW;
+ kern_mappings[pindex].flags |= ARCH_VM_PTE_RW;
#if defined(__i386__)
if(flags & VMMF_GLO)
- kern_mappings[index].flags |= I386_VM_GLOBAL;
+ kern_mappings[pindex].flags |= I386_VM_GLOBAL;
#elif defined(__arm__)
else
- kern_mappings[index].flags |= ARCH_VM_PTE_RO;
+ kern_mappings[pindex].flags |= ARCH_VM_PTE_RO;
#endif
if(addr % VM_PAGE_SIZE)
panic("VM: addr unaligned: %lu", addr);
if(len % VM_PAGE_SIZE)
panic("VM: len unaligned: %lu", len);
vir = offset;
- if(sys_vmctl_reply_mapping(index, vir) != OK)
+ if(sys_vmctl_reply_mapping(pindex, vir) != OK)
panic("VM: reply failed");
offset += len;
- index++;
+ pindex++;
kernmappings++;
usedpde = ARCH_VM_PDE(offset);
/*===========================================================================*
* real_brk *
*===========================================================================*/
-int real_brk(vmp, v)
-struct vmproc *vmp;
-vir_bytes v;
+int real_brk(struct vmproc *vmp, vir_bytes v)
{
if(map_region_extend_upto_v(vmp, v) == OK) {
return OK;
L__BIT_ARR_DEFN(branch)
/* Zero-based depth of path into tree. */
- unsigned depth;
+ int depth;
/* Handles of nodes in path from root to current node (returned by *). */
AVL_HANDLE path_h[(AVL_MAX_DEPTH) - 1];
/* Balances subtree, returns handle of root node of subtree after balancing.
*/
-L__SC AVL_HANDLE L__(balance)(L__BALANCE_PARAM_DECL_PREFIX AVL_HANDLE bal_h)
+static L__SC AVL_HANDLE L__(balance)(L__BALANCE_PARAM_DECL_PREFIX AVL_HANDLE bal_h)
{
AVL_HANDLE deep_h;
struct fdref *fdref_new(struct vmproc *owner, ino_t ino, dev_t dev, int fd)
{
- struct fdref *fdref;
+ struct fdref *nfdref;
- if(!SLABALLOC(fdref)) return NULL;
+ if(!SLABALLOC(nfdref)) return NULL;
- fdref->fd = fd;
- fdref->refcount = 0;
- fdref->dev = dev;
- fdref->ino = ino;
- fdref->next = fdrefs;
- fdrefs = fdref;
+ nfdref->fd = fd;
+ nfdref->refcount = 0;
+ nfdref->dev = dev;
+ nfdref->ino = ino;
+ nfdref->next = fdrefs;
+ fdrefs = nfdref;
- return fdref;
+ return nfdref;
}
void fdref_ref(struct fdref *ref, struct vir_region *region)
ino_t ino;
struct fdref *next;
int counting; /* sanity check */
-} *fdref;
+};
#endif
/* Table of calls and a macro to test for being in range. */
struct {
int (*vmc_func)(message *); /* Call handles message. */
- char *vmc_name; /* Human-readable string. */
+ const char *vmc_name; /* Human-readable string. */
} vm_calls[NR_VM_CALLS];
/* Macro to verify call range and map 'high' range to 'base' range
/* This is VM's main loop. */
while (TRUE) {
int r, c;
- u32_t type, param;
+ int type, param;
SANITYCHECK(SCL_TOP);
if(missing_spares > 0) {
return(SUSPEND);
}
-struct vmproc *init_proc(endpoint_t ep_nr)
+static struct vmproc *init_proc(endpoint_t ep_nr)
{
static struct boot_image *ip;
return OK;
}
-void exec_bootproc(struct vmproc *vmp, struct boot_image *ip)
+static void exec_bootproc(struct vmproc *vmp, struct boot_image *ip)
{
struct vm_exec_info vmexeci;
struct exec_info *execi = &vmexeci.execi;
}
/* Set up table of calls. */
-#define CALLMAP(code, func) { int i; \
- i=CALLNUMBER(code); \
- assert(i >= 0); \
- assert(i < NR_VM_CALLS); \
- vm_calls[i].vmc_func = (func); \
- vm_calls[i].vmc_name = #code; \
+#define CALLMAP(code, func) { int _cmi; \
+ _cmi=CALLNUMBER(code); \
+ assert(_cmi >= 0); \
+ assert(_cmi < NR_VM_CALLS); \
+ vm_calls[_cmi].vmc_func = (func); \
+ vm_calls[_cmi].vmc_name = #code; \
}
/* Set call table to 0. This invalidates all calls (clear
/*===========================================================================*
* map_service *
*===========================================================================*/
-static int map_service(rpub)
-struct rprocpub *rpub;
+static int map_service(struct rprocpub *rpub)
{
/* Map a new service by initializing its call mask. */
int r, proc_nr;
{
u32_t allocflags;
phys_bytes new_pages, new_page_cl, cur_ph;
- int p, pages;
+ phys_bytes p, pages;
allocflags = vrallocflags(region->flags);
u64_t dev_off = (u64_t) msg->m_u.m_vmmcp.dev_offset_pages * VM_PAGE_SIZE;
u64_t ino_off = (u64_t) msg->m_u.m_vmmcp.ino_offset_pages * VM_PAGE_SIZE;
int n;
- int bytes = msg->m_u.m_vmmcp.pages * VM_PAGE_SIZE;
+ phys_bytes bytes = msg->m_u.m_vmmcp.pages * VM_PAGE_SIZE;
struct vir_region *vr;
struct vmproc *caller;
vir_bytes offset;
u64_t ino_off = (u64_t) msg->m_u.m_vmmcp.ino_offset_pages * VM_PAGE_SIZE;
int n;
struct vmproc *caller;
- vir_bytes offset;
- int bytes = msg->m_u.m_vmmcp.pages * VM_PAGE_SIZE;
+ phys_bytes offset;
+ phys_bytes bytes = msg->m_u.m_vmmcp.pages * VM_PAGE_SIZE;
if(bytes < VM_PAGE_SIZE) return EINVAL;
*/
#include "vm.h"
+#include "proto.h"
/* These functions are static so as to not pollute the
* global namespace, and are accessed through their function
void *, void *);
typedef struct mem_type {
- char *name; /* human-readable name */
+ const char *name; /* human-readable name */
int (*ev_new)(struct vir_region *region);
void (*ev_delete)(struct vir_region *region);
int (*ev_reference)(struct phys_region *pr, struct phys_region *newpr);
/*===========================================================================*
* map_perm_check *
*===========================================================================*/
-int map_perm_check(endpoint_t caller, endpoint_t target,
+static int map_perm_check(endpoint_t caller, endpoint_t target,
phys_bytes physaddr, phys_bytes len)
{
int r;
sublen, wrflag, NULL, NULL, 0);
} else {
r = map_handle_memory(vmp, region, offset,
- sublen, wrflag, callback, state, sizeof(*state));
+ sublen, wrflag, callback, state, sizeof(state));
}
len -= sublen;
pb = pr->ph;
assert(pb->refcount > 0);
USE(pb, pb->refcount--;);
- assert(pb->refcount >= 0);
+/* assert(pb->refcount >= 0); */ /* always true */
if(pb->firstregion == pr) {
USE(pb, pb->firstregion = pr->next_ph_list;);
wrflag, vfs_callback_t cb, void *state, int statelen);
/* $(ARCH)/pagetable.c */
-void pt_init();
+void pt_init(void);
void vm_freepages(vir_bytes vir, int pages);
void pt_init_mem(void);
void pt_check(struct vmproc *vmp);
u32_t map_region_get_tag(struct vir_region *vr);
int map_get_phys(struct vmproc *vmp, vir_bytes addr, phys_bytes *r);
int map_get_ref(struct vmproc *vmp, vir_bytes addr, u8_t *cnt);
-int physregions(struct vir_region *vr);
+unsigned int physregions(struct vir_region *vr);
void get_usage_info(struct vmproc *vmp, struct vm_usage_info *vui);
void get_usage_info_kernel(struct vm_usage_info *vui);
{
}
-void map_printregion(struct vir_region *vr)
+static void map_printregion(struct vir_region *vr)
{
- int i;
+ unsigned int i;
struct phys_region *ph;
printf("map_printmap: map_name: %s\n", vr->def_memtype->name);
printf("\t%lx (len 0x%lx, %lukB), %p, %s\n",
int i;
struct phys_region *foundregion;
assert(!(offset % VM_PAGE_SIZE));
- assert(offset >= 0 && offset < region->length);
+ assert( /* offset >= 0 && */ offset < region->length);
i = offset/VM_PAGE_SIZE;
if((foundregion = region->physblocks[i]))
assert(foundregion->offset == offset);
int i;
struct vmproc *proc;
assert(!(offset % VM_PAGE_SIZE));
- assert(offset >= 0 && offset < region->length);
+ assert( /* offset >= 0 && */ offset < region->length);
i = offset/VM_PAGE_SIZE;
proc = region->parent;
assert(proc);
/*===========================================================================*
* map_printmap *
*===========================================================================*/
-void map_printmap(vmp)
-struct vmproc *vmp;
+void map_printmap(struct vmproc *vmp)
{
struct vir_region *vr;
region_iter iter;
return nextvr;
}
-int pr_writable(struct vir_region *vr, struct phys_region *pr)
+static int pr_writable(struct vir_region *vr, struct phys_region *pr)
{
assert(pr->memtype->writable);
return ((vr->flags & VR_WRITABLE) && pr->memtype->writable(pr));
return region_find_slot_range(vmp, minv, maxv, length);
}
-static int phys_slot(vir_bytes len)
+static unsigned int phys_slot(vir_bytes len)
{
assert(!(len % VM_PAGE_SIZE));
return len / VM_PAGE_SIZE;
}
-struct vir_region *region_new(struct vmproc *vmp, vir_bytes startv, vir_bytes length,
+static struct vir_region *region_new(struct vmproc *vmp, vir_bytes startv, vir_bytes length,
int flags, mem_type_t *memtype)
{
struct vir_region *newregion;
- struct phys_region **physregions;
+ struct phys_region **newphysregions;
static u32_t id;
int slots = phys_slot(length);
newregion->lower = newregion->higher = NULL;
newregion->parent = vmp;);
- if(!(physregions = calloc(slots, sizeof(struct phys_region *)))) {
+ if(!(newphysregions = calloc(slots, sizeof(struct phys_region *)))) {
printf("VM: region_new: allocating phys blocks failed\n");
SLABFREE(newregion);
return NULL;
}
- USE(newregion, newregion->physblocks = physregions;);
+ USE(newregion, newregion->physblocks = newphysregions;);
return newregion;
}
/*===========================================================================*
* map_page_region *
*===========================================================================*/
-struct vir_region *map_page_region(vmp, minv, maxv, length,
- flags, mapflags, memtype)
-struct vmproc *vmp;
-vir_bytes minv;
-vir_bytes maxv;
-vir_bytes length;
-u32_t flags;
-int mapflags;
-mem_type_t *memtype;
+struct vir_region *map_page_region(struct vmproc *vmp, vir_bytes minv,
+ vir_bytes maxv, vir_bytes length, u32_t flags, int mapflags,
+ mem_type_t *memtype)
{
struct vir_region *newregion;
vir_bytes startv;
/*========================================================================*
* map_free_proc *
*========================================================================*/
-int map_free_proc(vmp)
-struct vmproc *vmp;
+int map_free_proc(struct vmproc *vmp)
{
struct vir_region *r;
/*===========================================================================*
* map_lookup *
*===========================================================================*/
-struct vir_region *map_lookup(vmp, offset, physr)
-struct vmproc *vmp;
-vir_bytes offset;
-struct phys_region **physr;
+struct vir_region *map_lookup(struct vmproc *vmp,
+ vir_bytes offset, struct phys_region **physr)
{
struct vir_region *r;
/*===========================================================================*
* map_pf *
*===========================================================================*/
-int map_pf(vmp, region, offset, write, pf_callback, state, len, io)
-struct vmproc *vmp;
-struct vir_region *region;
-vir_bytes offset;
-int write;
-vfs_callback_t pf_callback;
-void *state;
-int len;
-int *io;
+int map_pf(struct vmproc *vmp,
+ struct vir_region *region,
+ vir_bytes offset,
+ int write,
+ vfs_callback_t pf_callback,
+ void *state,
+ int len,
+ int *io)
{
struct phys_region *ph;
int r = OK;
offset -= offset % VM_PAGE_SIZE;
- assert(offset >= 0);
+/* assert(offset >= 0); */ /* always true */
assert(offset < region->length);
assert(!(region->vaddr % VM_PAGE_SIZE));
return r;
}
-int map_handle_memory(vmp, region, start_offset, length, write,
- cb, state, statelen)
-struct vmproc *vmp;
-struct vir_region *region;
-vir_bytes start_offset;
-vir_bytes length;
-int write;
-vfs_callback_t cb;
-void *state;
-int statelen;
+int map_handle_memory(struct vmproc *vmp,
+ struct vir_region *region, vir_bytes start_offset, vir_bytes length,
+ int write, vfs_callback_t cb, void *state, int statelen)
{
vir_bytes offset, lim;
int r;
/*===========================================================================*
* copy_abs2region *
*===========================================================================*/
-int copy_abs2region(phys_bytes abs, struct vir_region *destregion,
+int copy_abs2region(phys_bytes absaddr, struct vir_region *destregion,
phys_bytes offset, phys_bytes len)
{
return EFAULT;
}
- if(sys_abscopy(abs, ph->ph->phys + suboffset, sublen) != OK) {
+ if(sys_abscopy(absaddr, ph->ph->phys + suboffset, sublen) != OK) {
printf("VM: copy_abs2region: abscopy failed.\n");
return EFAULT;
}
- abs += sublen;
+ absaddr += sublen;
offset += sublen;
len -= sublen;
}
/*========================================================================*
* map_proc_copy *
*========================================================================*/
-int map_proc_copy(dst, src)
-struct vmproc *dst;
-struct vmproc *src;
+int map_proc_copy(struct vmproc *dst, struct vmproc *src)
{
/* Copy all the memory regions from the src process to the dst process. */
region_init(&dst->vm_regions_avl);
/*========================================================================*
* map_proc_copy_from *
*========================================================================*/
-int map_proc_copy_from(dst, src, start_src_vr)
-struct vmproc *dst;
-struct vmproc *src;
-struct vir_region *start_src_vr;
+int map_proc_copy_from(struct vmproc *dst, struct vmproc *src,
+ struct vir_region *start_src_vr)
{
struct vir_region *vr;
region_iter v_iter;
return OK;
}
-int split_region(struct vmproc *vmp, struct vir_region *vr,
+static int split_region(struct vmproc *vmp, struct vir_region *vr,
struct vir_region **vr1, struct vir_region **vr2, vir_bytes split_len)
{
struct vir_region *r1 = NULL, *r2 = NULL;
if(this_unmap_start >= this_unmap_limit) continue;
if(this_unmap_start > vr->vaddr && this_unmap_limit < thislimit) {
- int r;
struct vir_region *vr1, *vr2;
vir_bytes split_len = this_unmap_limit - vr->vaddr;
assert(split_len > 0);
}
}
-int physregions(struct vir_region *vr)
+unsigned int physregions(struct vir_region *vr)
{
- int n = 0;
+ unsigned int n = 0;
vir_bytes voffset;
for(voffset = 0; voffset < vr->length; voffset += VM_PAGE_SIZE) {
if(physblock_get(vr, voffset))
struct vmproc *parent; /* Process that owns this vir_region. */
mem_type_t *def_memtype; /* Default instantiated memory type. */
int remaps;
- u32_t id; /* unique id */
+ int id; /* unique id */
union {
phys_bytes phys; /* VR_DIRECT */
#define SLABSIZES 200
-#define ITEMSPERPAGE(bytes) (DATABYTES / (bytes))
+#define ITEMSPERPAGE(bytes) (int)(DATABYTES / (bytes))
#define ELBITS (sizeof(element_t)*8)
#define BITPAT(b) (1UL << ((b) % ELBITS))
**, int *);
#define GETSLAB(b, s) { \
- int i; \
+ int _gsi; \
assert((b) >= MINSIZE); \
- i = (b) - MINSIZE; \
- assert((i) < SLABSIZES); \
- assert((i) >= 0); \
- s = &slabs[i]; \
+ _gsi = (b) - MINSIZE; \
+ assert((_gsi) < SLABSIZES); \
+ assert((_gsi) >= 0); \
+ s = &slabs[_gsi]; \
}
/* move slabdata nw to slabheader sl under list number l. */
if(next) { SLABDATAUSE(next, next->sdh.prev = prev;); } \
}
-static struct slabdata *newslabdata()
+static struct slabdata *newslabdata(void)
{
struct slabdata *n;
phys_bytes p;
return;
}
+#if MEMPROTECT
/*===========================================================================*
* void *slablock *
*===========================================================================*/
return;
}
+#endif
#if SANITYCHECKS
/*===========================================================================*
#include "vm.h"
#include "glo.h"
-#define ELEMENTS(a) (sizeof(a)/sizeof((a)[0]))
+#define ELEMENTS(a) (int)(sizeof(a)/sizeof((a)[0]))
#endif
#define _SYSTEM 1
+#define brk _brk /* get rid of no previous prototype warning */
+
#include <minix/callnr.h>
#include <minix/com.h>
#include <minix/config.h>
/*===========================================================================*
* vm_isokendpt *
*===========================================================================*/
-int vm_isokendpt(endpoint_t endpoint, int *proc)
+int vm_isokendpt(endpoint_t endpoint, int *procn)
{
- *proc = _ENDPOINT_P(endpoint);
- if(*proc < 0 || *proc >= NR_PROCS)
+ *procn = _ENDPOINT_P(endpoint);
+ if(*procn < 0 || *procn >= NR_PROCS)
return EINVAL;
- if(*proc >= 0 && endpoint != vmproc[*proc].vm_endpoint)
+ if(*procn >= 0 && endpoint != vmproc[*procn].vm_endpoint)
return EDEADEPT;
- if(*proc >= 0 && !(vmproc[*proc].vm_flags & VMF_INUSE))
+ if(*procn >= 0 && !(vmproc[*procn].vm_flags & VMF_INUSE))
return EDEADEPT;
return OK;
}
return 0;
}
-int _brk(void *addr)
+int brk(void *addr)
{
vir_bytes target = roundup((vir_bytes)addr, VM_PAGE_SIZE), v;
extern char _end;
char reqstate[STATELEN];
void *opaque;
endpoint_t who;
- u32_t req_id;
+ int req_id;
vfs_callback_t callback;
struct vfs_request_node *next;
} *first_queued, *active;
panic("VM: asynsend to VFS failed");
}
+#define ID_MAX LONG_MAX
+
/*===========================================================================*
* vfs_request *
*===========================================================================*/
* and then handle the reply as it if were a VM_VFS_REPLY request.
*/
message *m;
- static u32_t reqid = 0;
+ static int reqid = 0;
struct vfs_request_node *reqnode;
reqid++;