#define MINEPM 0
#define MAXMASK (sizeof(mask_t)*8)
#define ANYEPM (MINEPM+MAXMASK-1)
-#define MAXEPM (ANYEPM-1)
+#define NEEDACL (MINEPM+MAXMASK-2)
+#define MAXEPM (NEEDACL-1)
#define EPM(e) ((1L) << ((e)-MINEPM))
#define EPMOK(mask, ep) (((mask) & EPM(ANYEPM)) || ((ep) >= MINEPM && (ep) <= MAXEPM && (EPM(ep) & (mask))))
-#define EPMANYOK(mask, ep) ((mask) & EPM(ANYEPM))
/* Table of calls and a macro to test for being in range. */
struct {
((c) - VM_RQ_BASE) : -1)
FORWARD _PROTOTYPE(void vm_init, (void));
+FORWARD _PROTOTYPE(int vm_acl_ok, (endpoint_t caller, int call));
-#if SANITYCHECKS
-extern int kputc_use_private_grants;
-#endif
/*===========================================================================*
* main *
continue;
}
who_e = msg.m_source;
- c = msg.m_type - VM_RQ_BASE;
+ c = CALLNUMBER(msg.m_type);
result = ENOSYS; /* Out of range or restricted calls return this. */
- if((c=CALLNUMBER(msg.m_type)) < 0 || !vm_calls[c].vmc_func) {
+ if(c < 0 || !vm_calls[c].vmc_func) {
printf("VM: out of range or missing callnr %d from %d\n",
- msg.m_type, msg.m_source);
- } else if(!EPMOK(vm_calls[c].vmc_callers, msg.m_source)) {
- printf("VM: restricted call %s from %d instead of 0x%lx\n",
- vm_calls[c].vmc_name, msg.m_source,
- vm_calls[c].vmc_callers);
- } else if (EPMANYOK(vm_calls[c].vmc_callers, who_e) &&
- c != VM_MMAP-VM_RQ_BASE &&
- c != VM_MUNMAP_TEXT-VM_RQ_BASE &&
- c != VM_MUNMAP-VM_RQ_BASE) {
- /* check VM acl, we care ANYEPM only,
- * and omit other hard-coded permission checks.
- */
- int n;
-
- if ((r = vm_isokendpt(who_e, &n)) != OK)
- vm_panic("VM: from strange source.", who_e);
-
- if (!GET_BIT(vmproc[n].vm_call_priv_mask, c))
- printf("VM: restricted call %s from %d\n",
- vm_calls[c].vmc_name, who_e);
- else {
- SANITYCHECK(SCL_FUNCTIONS);
- result = vm_calls[c].vmc_func(&msg);
- SANITYCHECK(SCL_FUNCTIONS);
- }
+ msg.m_type, who_e);
+ } else if (vm_acl_ok(who_e, c) != OK) {
+ printf("VM: unauthorized %s by %d\n",
+ vm_calls[c].vmc_name, who_e);
} else {
SANITYCHECK(SCL_FUNCTIONS);
result = vm_calls[c].vmc_func(&msg);
vm_calls[i].vmc_func = (func); \
vm_calls[i].vmc_name = #code; \
if(((thecaller) < MINEPM || (thecaller) > MAXEPM) \
- && (thecaller) != ANYEPM) { \
+ && (thecaller) != ANYEPM \
+ && (thecaller) != NEEDACL ) { \
vm_panic(#thecaller " invalid", (code)); \
} \
vm_calls[i].vmc_callers |= EPM(thecaller); \
CALLMAP(VM_ALLOCMEM, do_allocmem, PM_PROC_NR);
CALLMAP(VM_NOTIFY_SIG, do_notify_sig, PM_PROC_NR);
- /* Physical mapping requests.
- * tty (for /dev/video) does this.
- * memory (for /dev/mem) does this.
- */
- CALLMAP(VM_MAP_PHYS, do_map_phys, TTY_PROC_NR);
- CALLMAP(VM_UNMAP_PHYS, do_unmap_phys, TTY_PROC_NR);
- CALLMAP(VM_MAP_PHYS, do_map_phys, MEM_PROC_NR);
- CALLMAP(VM_UNMAP_PHYS, do_unmap_phys, MEM_PROC_NR);
+ /* Requests from RS */
+ CALLMAP(VM_RS_SET_PRIV, do_rs_set_priv, RS_PROC_NR);
/* Requests from userland (source unrestricted). */
CALLMAP(VM_MMAP, do_mmap, ANYEPM);
CALLMAP(VM_MUNMAP, do_munmap, ANYEPM);
CALLMAP(VM_MUNMAP_TEXT, do_munmap, ANYEPM);
- CALLMAP(VM_REMAP, do_remap, ANYEPM);
- CALLMAP(VM_GETPHYS, do_get_phys, ANYEPM);
- CALLMAP(VM_SHM_UNMAP, do_shared_unmap, ANYEPM);
- CALLMAP(VM_GETREF, do_get_refcount, ANYEPM);
- CALLMAP(VM_CTL, do_ctl, ANYEPM);
-
- /* Request only from IPC server */
- CALLMAP(VM_QUERY_EXIT, do_query_exit, ANYEPM);
-
- /* Requests (actually replies) from VFS (restricted to VFS only). */
- CALLMAP(VM_VFS_REPLY_OPEN, do_vfs_reply, VFS_PROC_NR);
- CALLMAP(VM_VFS_REPLY_MMAP, do_vfs_reply, VFS_PROC_NR);
- CALLMAP(VM_VFS_REPLY_CLOSE, do_vfs_reply, VFS_PROC_NR);
+ CALLMAP(VM_MAP_PHYS, do_map_phys, ANYEPM); /* Does its own checking. */
+ CALLMAP(VM_UNMAP_PHYS, do_unmap_phys, ANYEPM);
- /* Requests from RS */
- CALLMAP(VM_RS_SET_PRIV, do_rs_set_priv, RS_PROC_NR);
+ /* Requests from userland (anyone can call but need an ACL bit). */
+ CALLMAP(VM_REMAP, do_remap, NEEDACL);
+ CALLMAP(VM_GETPHYS, do_get_phys, NEEDACL);
+ CALLMAP(VM_SHM_UNMAP, do_shared_unmap, NEEDACL);
+ CALLMAP(VM_GETREF, do_get_refcount, NEEDACL);
+ CALLMAP(VM_CTL, do_ctl, NEEDACL);
+ CALLMAP(VM_QUERY_EXIT, do_query_exit, NEEDACL);
/* Sanity checks */
if(find_kernel_top() >= VM_PROCSTART)
_minix_unmapzero();
}
+/*===========================================================================*
+ * vm_acl_ok *
+ *===========================================================================*/
+PRIVATE int vm_acl_ok(endpoint_t caller, int call)
+{
+ int n, r;
+
+ /* Some calls are always allowed by some, or all, processes. */
+ if(EPMOK(vm_calls[call].vmc_callers, caller)) {
+ return OK;
+ }
+
+ if ((r = vm_isokendpt(caller, &n)) != OK)
+ vm_panic("VM: from strange source.", caller);
+
+ /* Other calls need an ACL bit. */
+ if (!(vm_calls[call].vmc_callers & EPM(NEEDACL))) {
+ return EPERM;
+ }
+ if (!GET_BIT(vmproc[n].vm_call_priv_mask, call)) {
+ printf("VM: no ACL for %s for %d\n",
+ vm_calls[call].vmc_name, caller);
+ return EPERM;
+ }
+
+ return OK;
+}
return OK;
}
+/*===========================================================================*
+ * map_perm_check *
+ *===========================================================================*/
+PUBLIC int map_perm_check(endpoint_t caller, endpoint_t target,
+ phys_bytes physaddr, phys_bytes len)
+{
+ int r;
+
+ /* TTY and memory are allowed to do anything.
+ * They have to be special cases as they have to be able to do
+ * anything; TTY even on behalf of anyone for the TIOCMAPMEM
+ * ioctl. MEM just for itself.
+ */
+ if(caller == TTY_PROC_NR)
+ return OK;
+ if(caller != target)
+ return EPERM;
+ if(caller == MEM_PROC_NR)
+ return OK;
+
+ /* Anyone else needs explicit permission from the kernel (ultimately
+ * set by PCI).
+ */
+ r = sys_privquery_mem(caller, physaddr, len);
+
+ return r;
+}
+
/*===========================================================================*
* do_map_phys *
*===========================================================================*/
endpoint_t target;
struct vir_region *vr;
vir_bytes len;
+ phys_bytes startaddr;
target = m->VMMP_EP;
+ len = m->VMMP_LEN;
+
if(target == SELF)
target = m->m_source;
if((r=vm_isokendpt(target, &n)) != OK)
return EINVAL;
+ startaddr = (vir_bytes)m->VMMP_PHADDR;
+
+ /* First check permission, then round range down/up. Caller can't
+ * help it if we can't map in lower than page granularity.
+ */
+ if(map_perm_check(m->m_source, target, startaddr, len) != OK) {
+ printf("VM: unauthorized mapping of 0x%lx by %d\n",
+ startaddr, m->m_source);
+ return EPERM;
+ }
+
vmp = &vmproc[n];
if(!(vmp->vm_flags & VMF_HASPT))
return ENXIO;
- len = m->VMMP_LEN;
if(len % VM_PAGE_SIZE)
len += VM_PAGE_SIZE - (len % VM_PAGE_SIZE);
if(!(vr = map_page_region(vmp, arch_vir2map(vmp, vmp->vm_stacktop),
- VM_DATATOP, len, (vir_bytes)m->VMMP_PHADDR,
+ VM_DATATOP, len, startaddr,
VR_DIRECT | VR_NOPF | VR_WRITABLE, 0))) {
return ENOMEM;
}