int check_ipc_filter(struct ipc_filter_s *ipcf, int fill_flags);
int allow_ipc_filtered_msg(struct proc *rp, endpoint_t src_e,
vir_bytes m_src_v, message *m_src_p);
+int allow_ipc_filtered_memreq(struct proc *src_rp, struct proc *dst_rp);
int priv_add_irq(struct proc *rp, int irq);
int priv_add_io(struct proc *rp, struct io_range *ior);
int priv_add_mem(struct proc *rp, struct minix_mem_range *memr);
}
}
+/*===========================================================================*
+ * clear_memreq *
+ *===========================================================================*/
+static void clear_memreq(struct proc *rp)
+{
+ struct proc **rpp;
+
+ if (!RTS_ISSET(rp, RTS_VMREQUEST))
+ return; /* nothing to do */
+
+ for (rpp = &vmrequest; *rpp != NULL;
+ rpp = &(*rpp)->p_vmrequest.nextrequestor) {
+ if (*rpp == rp) {
+ *rpp = rp->p_vmrequest.nextrequestor;
+ break;
+ }
+ }
+
+ RTS_UNSET(rp, RTS_VMREQUEST);
+}
+
/*===========================================================================*
* clear_ipc *
*===========================================================================*/
*/
clear_ipc_refs(rc, EDEADSRCDST);
+ /* Finally, if the process was blocked on a VM request, remove it from the
+ * queue of processes waiting to be processed by VM.
+ */
+ clear_memreq(rc);
}
/*===========================================================================*
}
priv(rp)->s_ipcf = NULL;
+
+ /* VM is a special case here: since the cleared IPC filter may have
+ * blocked memory handling requests, we may now have to tell VM that
+ * there are "new" requests pending.
+ */
+ if (rp->p_endpoint == VM_PROC_NR && vmrequest != NULL)
+ if (send_sig(VM_PROC_NR, SIGKMEM) != OK)
+ panic("send_sig failed");
}
/*===========================================================================*
return allow;
}
+/*===========================================================================*
+ * allow_ipc_filtered_memreq *
+ *===========================================================================*/
+int allow_ipc_filtered_memreq(struct proc *src_rp, struct proc *dst_rp)
+{
+ /* Determine whether VM should receive a request to handle memory
+ * that is the result of process 'src_rp' trying to access currently
+ * unavailable memory in process 'dst_rp'. Return TRUE if VM should
+ * be given the request, FALSE otherwise.
+ */
+
+ struct proc *vmp;
+ message m_buf;
+ int allow_src, allow_dst;
+
+ vmp = proc_addr(VM_PROC_NR);
+
+ /* If VM has no filter in place, all requests should go through. */
+ if (priv(vmp)->s_ipcf == NULL)
+ return TRUE;
+
+ /* VM obtains memory requests in response to a SIGKMEM signal, which
+ * is a notification sent from SYSTEM. Thus, if VM blocks such
+ * notifications, it also should not get any memory requests. Of
+ * course, VM should not be asking for requests in that case either,
+ * but the extra check doesn't hurt.
+ */
+ m_buf.m_type = NOTIFY_MESSAGE;
+ if (!allow_ipc_filtered_msg(vmp, SYSTEM, 0, &m_buf))
+ return FALSE;
+
+ /* A more refined policy may be implemented here, for example to
+ * ensure that both the source and the destination (if different)
+ * are in the group of processes that VM wants to talk to. Since VM
+ * is basically not able to handle any memory requests during an
+ * update, we will not get here, and none of that is needed.
+ */
+ return TRUE;
+}
+
/*===========================================================================*
* priv_add_irq *
*===========================================================================*/
static void adjust_asyn_table(struct priv *src_privp, struct priv *dst_privp);
static void swap_proc_slot_pointer(struct proc **rpp, struct proc
*src_rp, struct proc *dst_rp);
+static void swap_memreq(struct proc *src_rp, struct proc *dst_rp);
/*===========================================================================*
* do_update *
/* Swap global process slot addresses. */
swap_proc_slot_pointer(get_cpulocal_var_ptr(ptproc), src_rp, dst_rp);
+ /* Swap VM request entries. */
+ swap_memreq(src_rp, dst_rp);
+
#if DEBUG
printf("do_update: updated %d (%s, %d, %d) into %d (%s, %d, %d)\n",
src_rp->p_endpoint, src_rp->p_name, src_rp->p_nr, priv(src_rp)->s_proc_nr,
}
}
+/*===========================================================================*
+ * swap_memreq *
+ *===========================================================================*/
+static void swap_memreq(struct proc *src_rp, struct proc *dst_rp)
+{
+/* If either the source or the destination process is part of the VM request
+ * chain, but not both, then swap the process pointers in the chain.
+ */
+ struct proc **rpp;
+
+ if (RTS_ISSET(src_rp, RTS_VMREQUEST) == RTS_ISSET(dst_rp, RTS_VMREQUEST))
+ return; /* nothing to do */
+
+ for (rpp = &vmrequest; *rpp != NULL;
+ rpp = &(*rpp)->p_vmrequest.nextrequestor) {
+ if (*rpp == src_rp) {
+ dst_rp->p_vmrequest.nextrequestor =
+ src_rp->p_vmrequest.nextrequestor;
+ *rpp = dst_rp;
+ break;
+ } else if (*rpp == dst_rp) {
+ src_rp->p_vmrequest.nextrequestor =
+ dst_rp->p_vmrequest.nextrequestor;
+ *rpp = src_rp;
+ break;
+ }
+ }
+}
+
#endif /* USE_UPDATE */
{
int proc_nr;
endpoint_t ep = m_ptr->SVMCTL_WHO;
- struct proc *p, *rp, *target;
+ struct proc *p, *rp, **rpp, *target;
if(ep == SELF) { ep = caller->p_endpoint; }
RTS_UNSET(p, RTS_PAGEFAULT);
return OK;
case VMCTL_MEMREQ_GET:
- /* Send VM the information about the memory request. */
- if(!(rp = vmrequest))
- return ESRCH;
- assert(RTS_ISSET(rp, RTS_VMREQUEST));
+ /* Send VM the information about the memory request. We can
+ * not simply send the first request on the list, because IPC
+ * filters may forbid VM from getting requests for particular
+ * sources. However, IPC filters are used only in rare cases.
+ */
+ for (rpp = &vmrequest; *rpp != NULL;
+ rpp = &(*rpp)->p_vmrequest.nextrequestor) {
+ rp = *rpp;
- okendpt(rp->p_vmrequest.target, &proc_nr);
- target = proc_addr(proc_nr);
+ assert(RTS_ISSET(rp, RTS_VMREQUEST));
+
+ okendpt(rp->p_vmrequest.target, &proc_nr);
+ target = proc_addr(proc_nr);
+
+ /* Check against IPC filters. */
+ if (!allow_ipc_filtered_memreq(rp, target))
+ continue;
+
+ /* Reply with request fields. */
+ if (rp->p_vmrequest.req_type != VMPTYPE_CHECK)
+ panic("VMREQUEST wrong type");
- /* Reply with request fields. */
- switch(rp->p_vmrequest.req_type) {
- case VMPTYPE_CHECK:
m_ptr->SVMCTL_MRG_TARGET =
rp->p_vmrequest.target;
m_ptr->SVMCTL_MRG_ADDR =
rp->p_vmrequest.params.check.writeflag;
m_ptr->SVMCTL_MRG_REQUESTOR =
(void *) rp->p_endpoint;
- break;
- default:
- panic("VMREQUEST wrong type");
- }
- rp->p_vmrequest.vmresult = VMSUSPEND;
+ rp->p_vmrequest.vmresult = VMSUSPEND;
+
+ /* Remove from request chain. */
+ *rpp = rp->p_vmrequest.nextrequestor;
+
+ return rp->p_vmrequest.req_type;
+ }
- /* Remove from request chain. */
- vmrequest = vmrequest->p_vmrequest.nextrequestor;
+ return ENOENT;
- return rp->p_vmrequest.req_type;
case VMCTL_MEMREQ_REPLY:
assert(RTS_ISSET(p, RTS_VMREQUEST));
assert(p->p_vmrequest.vmresult == VMSUSPEND);