#define VM_RS_SET_PRIV (VM_RQ_BASE+37)
# define VM_RS_NR m2_i1
# define VM_RS_BUF m2_l1
+# define VM_RS_SYS m2_i2
#define VM_QUERY_EXIT (VM_RQ_BASE+38)
# define VM_QUERY_RET_PT m2_i1
int devman_id;
};
+/* Return whether the given boot process is a user process, as opposed to a
+ * system process. Only usable by core services during SEF initialization.
+ */
+#define IS_RPUB_BOOT_USR(rpub) ((rpub)->endpoint == INIT_PROC_NR)
+
int minix_rs_lookup(const char *name, endpoint_t *value);
#endif
int vm_unmap_phys(endpoint_t who, void *vaddr, size_t len);
int vm_notify_sig(endpoint_t ep, endpoint_t ipc_ep);
-int vm_set_priv(int procnr, void *buf);
+int vm_set_priv(endpoint_t ep, void *buf, int sys_proc);
int vm_update(endpoint_t src_e, endpoint_t dst_e);
int vm_memctl(endpoint_t ep, int req);
int vm_query_exit(endpoint_t *endpt);
#include <lib.h>
#include <unistd.h>
-int vm_set_priv(int nr, void *buf)
+int vm_set_priv(endpoint_t ep, void *buf, int sys_proc)
{
message m;
- m.VM_RS_NR = nr;
+ m.VM_RS_NR = ep;
m.VM_RS_BUF = (long) buf;
+ m.VM_RS_SYS = sys_proc;
return _syscall(VM_PROC_NR, VM_RS_SET_PRIV, &m);
}
}
/* Tell VM about allowed calls. */
- if ((s = vm_set_priv(rpub->endpoint, &rpub->vm_call_mask[0])) != OK) {
+ if ((s = vm_set_priv(rpub->endpoint, &rpub->vm_call_mask[0], TRUE)) != OK) {
printf("RS: vm_set_priv failed: %d\n", s);
cleanup_service(rp);
return s;
!= OK) {
return kill_service(rp,"can't set script privileges",r);
}
+ /* Set the script's privileges on other servers. */
+ vm_set_priv(endpoint, NULL, FALSE);
+ if ((r = vm_set_priv(endpoint, NULL, FALSE)) != OK) {
+ return kill_service(rp,"can't set script VM privs",r);
+ }
/* Allow the script to run. */
if ((r = sys_privctl(endpoint, SYS_PRIV_ALLOW, NULL)) != OK) {
return kill_service(rp,"can't let the script run",r);
}
/* Update VM calls. */
- if ((r = vm_set_priv(rpub->endpoint, &rpub->vm_call_mask[0])) != OK) {
+ if ((r = vm_set_priv(rpub->endpoint, &rpub->vm_call_mask[0],
+ !!(rp->r_priv.s_flags & SYS_PROC))) != OK) {
printf("RS: do_edit: failed: %d\n", r);
return r;
}
struct dmap *fdp, *sdp;
struct fproc *rfp;
+ if (IS_RPUB_BOOT_USR(rpub)) return(OK);
+
/* Process is a service */
if (isokendpt(rpub->endpoint, &slot) != OK) {
printf("VFS: can't map service with unknown endpoint %d\n",
mmap.c slaballoc.c region.c pagefaults.c \
rs.c queryexit.c pb.c regionavl.c \
mem_anon.c mem_directphys.c mem_anon_contig.c mem_shared.c \
- mem_cache.c cache.c vfs.c mem_file.c fdref.c
+ mem_cache.c cache.c vfs.c mem_file.c fdref.c acl.c
.if ${MACHINE_ARCH} == "earm"
LDFLAGS+= -T ${.CURDIR}/arch/${MACHINE_ARCH}/vm.lds
--- /dev/null
+
+/* Call mask ACL management. */
+
+#include <minix/drivers.h>
+
+#include "proto.h"
+#include "glo.h"
+#include "util.h"
+
+#define NO_ACL -1
+#define USER_ACL 0
+#define FIRST_SYS_ACL 1
+
+static bitchunk_t acl_mask[NR_SYS_PROCS][VM_CALL_MASK_SIZE];
+static bitchunk_t acl_inuse[BITMAP_CHUNKS(NR_SYS_PROCS)];
+
+/*
+ * Initialize ACL data structures.
+ */
+void
+acl_init(void)
+{
+ int i;
+
+ for (i = 0; i < ELEMENTS(vmproc); i++)
+ vmproc[i].vm_acl = NO_ACL;
+
+ memset(acl_mask, 0, sizeof(acl_mask));
+ memset(acl_inuse, 0, sizeof(acl_inuse));
+}
+
+/*
+ * Check whether a process is allowed to make a certain (zero-based) call.
+ * Return OK or an error.
+ */
+int
+acl_check(struct vmproc *vmp, int call)
+{
+ /* If the process has no ACL, all calls are allowed.. for now. */
+ if (vmp->vm_acl == NO_ACL) {
+ printf("VM: calling process %u has no ACL!\n",
+ vmp->vm_endpoint);
+
+ return OK;
+ }
+
+ /* See if the call is allowed. */
+ if (!GET_BIT(acl_mask[vmp->vm_acl], call))
+ return EPERM;
+
+ return OK;
+}
+
+/*
+ * Assign a call mask to a process. User processes share the first ACL entry.
+ * System processes are assigned to any of the other slots. For user
+ * processes, no call mask need to be provided: it will simply be inherited in
+ * that case.
+ */
+void
+acl_set(struct vmproc *vmp, bitchunk_t *mask, int sys_proc)
+{
+ int i;
+
+ acl_clear(vmp);
+
+ if (sys_proc) {
+ for (i = FIRST_SYS_ACL; i < NR_SYS_PROCS; i++)
+ if (!GET_BIT(acl_inuse, i))
+ break;
+
+ /*
+ * This should never happen. If it does, then different user
+ * processes have been assigned call masks separately. It is
+ * RS's responsibility to prevent that.
+ */
+ if (i == NR_SYS_PROCS) {
+ printf("VM: no ACL entries available!\n");
+ return;
+ }
+ } else
+ i = USER_ACL;
+
+ if (!GET_BIT(acl_inuse, i) && mask == NULL)
+ printf("VM: WARNING: inheriting uninitialized ACL mask\n");
+
+ SET_BIT(acl_inuse, i);
+ vmp->vm_acl = i;
+
+ if (mask != NULL)
+ memcpy(&acl_mask[vmp->vm_acl], mask, sizeof(acl_mask[0]));
+}
+
+/*
+ * A process has forked. User processes inherit their parent's ACL by default,
+ * although they may be turned into system processes later. System processes
+ * do not inherit an ACL, and will have to be assigned one before getting to
+ * run.
+ */
+void
+acl_fork(struct vmproc *vmp)
+{
+ if (vmp->vm_acl != USER_ACL)
+ vmp->vm_acl = NO_ACL;
+}
+
+/*
+ * A process has exited. Decrease the reference count on its ACL entry, and
+ * mark the process as having no ACL.
+ */
+void
+acl_clear(struct vmproc *vmp)
+{
+ if (vmp->vm_acl != NO_ACL) {
+ if (vmp->vm_acl != USER_ACL)
+ UNSET_BIT(acl_inuse, vmp->vm_acl);
+
+ vmp->vm_acl = NO_ACL;
+ }
+}
void clear_proc(struct vmproc *vmp)
{
region_init(&vmp->vm_regions_avl);
+ acl_clear(vmp);
vmp->vm_flags = 0; /* Clear INUSE, so slot is free. */
#if VMSTATS
vmp->vm_bytecopies = 0;
/* Only inherit these flags. */
vmc->vm_flags &= VMF_INUSE;
- /* inherit the priv call bitmaps */
- memcpy(&vmc->vm_call_mask, &vmp->vm_call_mask, sizeof(vmc->vm_call_mask));
+ /* Deal with ACLs. */
+ acl_fork(vmc);
/* Tell kernel about the (now successful) FORK. */
if((r=sys_fork(vmp->vm_endpoint, childproc,
((c) - VM_RQ_BASE) : -1)
static int map_service(struct rprocpub *rpub);
-static int vm_acl_ok(endpoint_t caller, int call);
static int do_rs_init(message *m);
/* SEF functions and variables. */
} else if(c < 0 || !vm_calls[c].vmc_func) {
/* out of range or missing callnr */
} else {
- if (vm_acl_ok(who_e, c) != OK) {
+ if (acl_check(&vmproc[caller_slot], c) != OK) {
printf("VM: unauthorized %s by %d\n",
vm_calls[c].vmc_name, who_e);
} else {
vmproc[i].vm_slot = i;
}
+ /* Initialize ACL data structures. */
+ acl_init();
+
/* region management initialization. */
map_region_init();
}
/* Copy the call mask. */
- memcpy(&vmproc[proc_nr].vm_call_mask, &rpub->vm_call_mask,
- sizeof(vmproc[proc_nr].vm_call_mask));
+ acl_set(&vmproc[proc_nr], rpub->vm_call_mask, !IS_RPUB_BOOT_USR(rpub));
return(OK);
}
-
-/*===========================================================================*
- * vm_acl_ok *
- *===========================================================================*/
-static int vm_acl_ok(endpoint_t caller, int call)
-{
- int n, r;
-
- if ((r = vm_isokendpt(caller, &n)) != OK)
- panic("VM: from strange source: %d", caller);
-
- /* See if the call is allowed. */
- if (!GET_BIT(vmproc[n].vm_call_mask, call)) {
- return EPERM;
- }
-
- return OK;
-}
-
#include "pt.h"
#include "vm.h"
+/* acl.c */
+void acl_init(void);
+int acl_check(struct vmproc *vmp, int call);
+void acl_set(struct vmproc *vmp, bitchunk_t *mask, int sys_proc);
+void acl_fork(struct vmproc *vmp);
+void acl_clear(struct vmproc *vmp);
+
/* alloc.c */
void *reservedqueue_new(int, int, int, int);
int reservedqueue_alloc(void *, phys_bytes *, void **);
{
int r, n, nr;
struct vmproc *vmp;
+ bitchunk_t call_mask[VM_CALL_MASK_SIZE], *call_mask_p;
nr = m->VM_RS_NR;
vmp = &vmproc[n];
if (m->VM_RS_BUF) {
- r = sys_datacopy(m->m_source, (vir_bytes) m->VM_RS_BUF,
- SELF, (vir_bytes) vmp->vm_call_mask,
- sizeof(vmp->vm_call_mask));
+ r = sys_datacopy(m->m_source, (vir_bytes) m->VM_RS_BUF, SELF,
+ (vir_bytes) call_mask, sizeof(call_mask));
if (r != OK)
return r;
+ call_mask_p = call_mask;
+ } else {
+ if (m->VM_RS_SYS) {
+ printf("VM: do_rs_set_priv: sys procs don't share!\n");
+ return EINVAL;
+ }
+ call_mask_p = NULL;
}
+ acl_set(vmp, call_mask_p, m->VM_RS_SYS);
+
return OK;
}
r_usage.ru_minflt = vmp->vm_minor_page_fault;
r_usage.ru_majflt = vmp->vm_major_page_fault;
- return sys_datacopy(SELF, &r_usage, m->m_source,
+ return sys_datacopy(SELF, (vir_bytes) &r_usage, m->m_source,
(vir_bytes) m->RU_RUSAGE_ADDR, (vir_bytes) sizeof(r_usage));
}
/* Regions in virtual address space. */
region_avl vm_regions_avl;
vir_bytes vm_region_top; /* highest vaddr last inserted */
- bitchunk_t vm_call_mask[VM_CALL_MASK_SIZE];
+ int vm_acl;
int vm_slot; /* process table slot */
#if VMSTATS
int vm_bytecopies;