# Makefile for the VirtualBox backdoor driver (VBOX)
PROG= vbox
-SRCS= vbox.c
+SRCS= vbox.c hgcm.c err.c
DPADD+= ${LIBSYS}
LDADD+= -lsys
--- /dev/null
+/* VirtualBox driver - by D.C. van Moolenbroek */
+#include <minix/drivers.h>
+#include <errno.h>
+
+#include "vmmdev.h"
+#include "proto.h"
+
+static int codes[] = {
+ OK, /* 0: success */
+ EGENERIC, /* -1: general failure */
+ EINVAL, /* -2: invalid parameter */
+ EINVAL, /* -3: invalid magic */
+ EBADF, /* -4: invalid handle */
+ ENOLCK, /* -5: lock failed */
+ EFAULT, /* -6: invalid pointer */
+ EGENERIC, /* -7: patching IDT failed */
+ ENOMEM, /* -8: memory allocation failed */
+ EEXIST, /* -9: already loaded */
+ EPERM, /* -10: permission denied */
+ EINVAL, /* -11: version mismatch */
+ ENOSYS, /* -12: function not implemented */
+ EGENERIC, /* -13 */
+ EGENERIC, /* -14 */
+ EGENERIC, /* -15 */
+ EGENERIC, /* -16 */
+ EGENERIC, /* -17 */
+ EGENERIC, /* -18: not equal */
+ EINVAL, /* -19: not a symlink */
+ ENOMEM, /* -20: temporary memory allocation failed */
+ EINVAL, /* -21: invalid file mode */
+ EINVAL, /* -22: incorrect call order */
+ EGENERIC, /* -23: no TLS available */
+ EGENERIC, /* -24: failed to set TLS */
+ EGENERIC, /* -25 */
+ ENOMEM, /* -26: contiguous memory allocation failed */
+ ENOMEM, /* -27: no memory available for page table */
+ EGENERIC, /* -28 */
+ ESRCH, /* -29: thread is dead */
+ EINVAL, /* -30: thread is not waitable */
+ EGENERIC, /* -31: page table not present */
+ EINVAL, /* -32: invalid context */
+ EBUSY, /* -33: timer is busy */
+ EGENERIC, /* -34: address conflict */
+ EGENERIC, /* -35: unresolved error */
+ ENOTTY, /* -36: invalid function */
+ EINVAL, /* -37: not supported */
+ EACCES, /* -38: access denied */
+ EINTR, /* -39: interrupted */
+ ETIMEDOUT, /* -40: timeout */
+ E2BIG, /* -41: buffer overflow */
+ E2BIG, /* -42: too much data */
+ EAGAIN, /* -43: max thread number reached */
+ EAGAIN, /* -44: max process number reached */
+ EGENERIC, /* -45: signal refused */
+ EBUSY, /* -46: signal already pending */
+ EINVAL, /* -47: invalid signal */
+ EGENERIC, /* -48: state changed */
+ EINVAL, /* -49: invalid UUID format */
+ ESRCH, /* -50: process not found */
+ OK, /* -51: waited-for process is still running */
+ EAGAIN, /* -52: try again */
+ EGENERIC, /* -53: generic parse error */
+ ERANGE, /* -54: value out of range */
+ EOVERFLOW, /* -55: value too big */
+ EGENERIC, /* -56: no digits in string */
+ ERANGE, /* -57: minus sign in unsigned value */
+ EILSEQ, /* -58: character translation failed */
+ EGENERIC, /* -59: encountered unicode byte order mark */
+ EGENERIC, /* -60: encountered unicode surrogate */
+ EILSEQ, /* -61: invalid UTF8 encoding */
+ EILSEQ, /* -62: invalid UTF16 encoding */
+ EGENERIC, /* -63: no UTF16 for character */
+ ENOMEM, /* -64: string memory allocation failed */
+ ENOMEM, /* -65: UTF16 string memory allocation failed */
+ ENOMEM, /* -66: code point array allocation failed */
+ EBUSY, /* -67: cannot free in-use memory */
+ EGENERIC, /* -68: timer already active */
+ EGENERIC, /* -69: timer already suspended */
+ ECANCELED, /* -70: operation cancelled */
+ ENOMEM, /* -71: failed to initialize memory object */
+ ENOMEM, /* -72: low physical memory allocation failed */
+ ENOMEM, /* -73: physical memory allocation failed */
+ EGENERIC, /* -74: address too big */
+ EGENERIC, /* -75: memory mapping failed */
+ EGENERIC, /* -76: trailing characters */
+ EGENERIC, /* -77: trailing spaces */
+ ESRCH, /* -78: not found */
+ EGENERIC, /* -79: invalid state */
+ ENOMEM, /* -80: out of resources */
+ ENFILE, /* -81: no more handles */
+ EGENERIC, /* -82: preemption disabled */
+ EGENERIC, /* -83: end of string */
+ EGENERIC, /* -84: page count out of range */
+ EGENERIC, /* -85: object destroyed */
+ EGENERIC, /* -86: dangling objects */
+ EGENERIC, /* -87: invalid Base64 encoding */
+ EGENERIC, /* -88: return triggered by callback */
+ EGENERIC, /* -89: authentication failure */
+ EGENERIC, /* -90: not a power of two */
+ EGENERIC, /* -91: ignored */
+ EGENERIC, /* -92: concurrent access not allowed */
+ EGENERIC, /* -93: invalid reference sharing */
+ EGENERIC, /* -94 */
+ EGENERIC, /* -95: no change */
+ ENOMEM, /* -96: executable memory allocation failed */
+ EINVAL, /* -97: unsupported alignment */
+ EGENERIC, /* -98: duplicate */
+ EGENERIC, /* -99: missing */
+ EIO, /* -100: I/O error */
+ ENXIO, /* -101: open failed */
+ ENOENT, /* -102: file not found */
+ ENOTDIR, /* -103: path not found (may also mean ENOENT) */
+ EINVAL, /* -104: invalid name */
+ EEXIST, /* -105: already exists */
+ ENFILE, /* -106: too many open files */
+ EIO, /* -107: seek error */
+ EINVAL, /* -108: negative seek */
+ ESPIPE, /* -109: seek on device */
+ EGENERIC, /* -110: end of file */
+ EIO, /* -111: generic read error */
+ EIO, /* -112: generic write error */
+ EROFS, /* -113: write protected */
+ ETXTBSY, /* -114: sharing violation */
+ ENOLCK, /* -115: file lock failed */
+ EAGAIN, /* -116: file lock violation */
+ EIO, /* -117: cannot create file */
+ EIO, /* -118: cannot delete directory */
+ EXDEV, /* -119: not the same device */
+ ENAMETOOLONG, /* -120: file name too long */
+ ENXIO, /* -121: media not present */
+ EIO, /* -122: media not recognized */
+ OK, /* -123: nothing to unlocked */
+ EGENERIC, /* -124: lock lost */
+ ENOTEMPTY, /* -125: directory not empty */
+ ENOTDIR, /* -126: not a directory */
+ EISDIR, /* -127: is a directory */
+ EFBIG, /* -128: file too big */
+ EGENERIC, /* -129: no asynchronous I/O request */
+ EGENERIC, /* -130: asynchronous I/O in progress */
+ EGENERIC, /* -131: asynchronous I/O completed */
+ EGENERIC, /* -132: asynchronous I/O busy */
+ EGENERIC, /* -133: asynchronous I/O limit exceeded */
+ EGENERIC, /* -134: asynchronous I/O canceled */
+ EGENERIC, /* -135: asynchronous I/O not submitted */
+ EGENERIC, /* -136: asynchronous I/O not prepared */
+ EGENERIC, /* -137: asynchronous I/O out of resources */
+ EBUSY, /* -138: device or resource busy */
+ EGENERIC, /* -139: not a file */
+ EGENERIC, /* -140: is a file */
+ EGENERIC, /* -141: unexpected file type */
+ EGENERIC, /* -142: missing path root specification */
+ EGENERIC, /* -143: path is relative */
+ EGENERIC, /* -144: path is not relative */
+ EGENERIC, /* -145 */
+ EGENERIC, /* -146 */
+ EGENERIC, /* -147 */
+ EGENERIC, /* -148 */
+ EGENERIC, /* -149 */
+ EIO, /* -150: disk I/O error */
+ ENXIO, /* -151: invalid drive number */
+ ENOSPC, /* -152: disk full */
+ EIO, /* -153: disk changed */
+ EGENERIC, /* -154: drive locked */
+ ENXIO, /* -155: invalid disk format */
+ ELOOP, /* -156: too many symlinks */
+ EOPNOTSUPP, /* -157: can not set symlink file times */
+ EOPNOTSUPP, /* -158: can not change symlink owner */
+ EGENERIC, /* -159 */
+ EGENERIC, /* -160 */
+ EGENERIC, /* -161 */
+ EGENERIC, /* -162 */
+ EGENERIC, /* -163 */
+ EGENERIC, /* -164 */
+ EGENERIC, /* -165 */
+ EGENERIC, /* -166 */
+ EGENERIC, /* -167 */
+ EGENERIC, /* -168 */
+ EGENERIC, /* -169 */
+ EGENERIC, /* -170 */
+ EGENERIC, /* -171 */
+ EGENERIC, /* -172 */
+ EGENERIC, /* -173 */
+ EGENERIC, /* -174 */
+ EGENERIC, /* -175 */
+ EGENERIC, /* -176 */
+ EGENERIC, /* -177 */
+ EGENERIC, /* -178 */
+ EGENERIC, /* -179 */
+ EGENERIC, /* -180 */
+ EGENERIC, /* -181 */
+ EGENERIC, /* -182 */
+ EGENERIC, /* -183 */
+ EGENERIC, /* -184 */
+ EGENERIC, /* -185 */
+ EGENERIC, /* -186 */
+ EGENERIC, /* -187 */
+ EGENERIC, /* -188 */
+ EGENERIC, /* -189 */
+ EGENERIC, /* -190 */
+ EGENERIC, /* -191 */
+ EGENERIC, /* -192 */
+ EGENERIC, /* -193 */
+ EGENERIC, /* -194 */
+ EGENERIC, /* -195 */
+ EGENERIC, /* -196 */
+ EGENERIC, /* -197 */
+ EGENERIC, /* -198 */
+ EGENERIC, /* -199 */
+ EGENERIC, /* -200: search error */
+ OK, /* -201: no more files */
+ ENFILE, /* -202: no more search handles available */
+};
+
+int convert_err(int code)
+{
+/* Return a POSIX error code for the given VirtualBox error code.
+ */
+ unsigned int index;
+
+ index = -code;
+
+ if (index < sizeof(codes) / sizeof(codes[0]))
+ return codes[index];
+
+ return EGENERIC;
+}
--- /dev/null
+/* VirtualBox driver - by D.C. van Moolenbroek */
+#include <minix/drivers.h>
+#include <minix/vboxtype.h>
+#include <minix/vboxif.h>
+#include <assert.h>
+
+#include "vmmdev.h"
+#include "proto.h"
+
+#define MAX_CONNS 4 /* maximum number of HGCM connections */
+#define MAX_REQS 2 /* number of concurrent requests per conn. */
+#define MAX_PARAMS 8 /* maximum number of parameters per request */
+
+/* HGCM connection states. */
+enum {
+ STATE_FREE,
+ STATE_OPENING,
+ STATE_OPEN,
+ STATE_CLOSING
+};
+
+/* HGCM connection information. */
+static struct {
+ int state; /* connection state */
+ endpoint_t endpt; /* caller endpoint */
+ u32_t client_id; /* VMMDev-given client ID */
+ struct {
+ int busy; /* is this request ongoing? */
+ struct VMMDevHGCMHeader *ptr; /* request buffer */
+ phys_bytes addr; /* buffer's physical address */
+
+ int status; /* IPC status of request */
+ long id; /* request ID */
+
+ cp_grant_id_t grant; /* grant for parameters */
+ int count; /* number of parameters */
+ vbox_param_t param[MAX_PARAMS]; /* local copy of parameters */
+ } req[MAX_REQS]; /* concurrent requests */
+} hgcm_conn[MAX_CONNS];
+
+/*===========================================================================*
+ * convert_result *
+ *===========================================================================*/
+static int convert_result(int res)
+{
+ /* Convert a VirtualBox result code to a POSIX error code.
+ */
+
+ /* HGCM transport error codes. */
+ switch (res) {
+ case VMMDEV_ERR_HGCM_NOT_FOUND: return ESRCH;
+ case VMMDEV_ERR_HGCM_DENIED: return EPERM;
+ case VMMDEV_ERR_HGCM_INVALID_ADDR: return EFAULT;
+ case VMMDEV_ERR_HGCM_ASYNC_EXEC: return EDONTREPLY;
+ case VMMDEV_ERR_HGCM_INTERNAL: return EGENERIC;
+ case VMMDEV_ERR_HGCM_INVALID_ID: return EINVAL;
+ }
+
+ /* Positive codes are success codes. */
+ if (res >= 0)
+ return OK;
+
+ /* Unsupported negative codes are translated to EGENERIC; it is up to
+ * the caller to check the actual VirtualBox result code in that case.
+ */
+ return convert_err(res);
+}
+
+/*===========================================================================*
+ * send_reply *
+ *===========================================================================*/
+static void send_reply(endpoint_t endpt, int ipc_status, int result, int code,
+ long id)
+{
+ /* Send a reply to an earlier request. */
+ message m;
+ int r;
+
+ memset(&m, 0, sizeof(m));
+ m.m_type = VBOX_REPLY;
+ m.VBOX_RESULT = result;
+ m.VBOX_CODE = code;
+ m.VBOX_ID = id;
+
+ if (IPC_STATUS_CALL(ipc_status) == SENDREC)
+ r = sendnb(endpt, &m);
+ else
+ r = asynsend3(endpt, &m, AMF_NOREPLY);
+
+ if (r != OK)
+ printf("VBOX: unable to send reply to %d: %d\n", endpt, r);
+}
+
+/*===========================================================================*
+ * alloc_req *
+ *===========================================================================*/
+static int alloc_req(int conn)
+{
+ /* Allocate a request for the given connection. Allocate memory as
+ * necessary. Do not mark the request as busy, as it may end up not
+ * being used.
+ */
+ phys_bytes addr;
+ void *ptr;
+ int req;
+
+ for (req = 0; req < MAX_REQS; req++)
+ if (!hgcm_conn[conn].req[req].busy)
+ break;
+
+ if (req == MAX_REQS)
+ return EMFILE;
+
+ if (hgcm_conn[conn].req[req].ptr == NULL) {
+ if ((ptr = alloc_contig(VMMDEV_BUF_SIZE, 0, &addr)) == NULL)
+ return ENOMEM;
+
+ hgcm_conn[conn].req[req].ptr = (struct VMMDevHGCMHeader *) ptr;
+ hgcm_conn[conn].req[req].addr = addr;
+ }
+
+ return req;
+}
+
+/*===========================================================================*
+ * free_conn *
+ *===========================================================================*/
+static void free_conn(int conn)
+{
+ /* Free the memory for all requests of the given connections, and mark
+ * the connection as free.
+ */
+ void *ptr;
+ int req;
+
+ for (req = 0; req < MAX_REQS; req++) {
+ if ((ptr = (void *) hgcm_conn[conn].req[req].ptr) != NULL) {
+ assert(!hgcm_conn[conn].req[req].busy);
+
+ free_contig(ptr, VMMDEV_BUF_SIZE);
+
+ hgcm_conn[conn].req[req].ptr = NULL;
+ }
+ }
+
+ hgcm_conn[conn].state = STATE_FREE;
+}
+
+/*===========================================================================*
+ * start_req *
+ *===========================================================================*/
+static int start_req(int conn, int req, int type, size_t size, int ipc_status,
+ long id, int *code)
+{
+ /* Start a request. */
+ int r, res;
+
+ hgcm_conn[conn].req[req].ptr->flags = 0;
+ hgcm_conn[conn].req[req].ptr->result = VMMDEV_ERR_GENERIC;
+
+ *code = res = vbox_request(&hgcm_conn[conn].req[req].ptr->header,
+ hgcm_conn[conn].req[req].addr, type, size);
+
+ r = convert_result(res);
+
+ if (r != OK && r != EDONTREPLY)
+ return r;
+
+ /* The request may be processed either immediately or asynchronously.
+ * The caller of this function must be able to cope with both
+ * situations. In either case, mark the current request as ongoing.
+ */
+ hgcm_conn[conn].req[req].busy = TRUE;
+ hgcm_conn[conn].req[req].status = ipc_status;
+ hgcm_conn[conn].req[req].id = id;
+
+ return r;
+}
+
+/*===========================================================================*
+ * cancel_req *
+ *===========================================================================*/
+static void cancel_req(int conn, int req)
+{
+ /* Cancel an ongoing request. */
+
+ assert(hgcm_conn[conn].req[req].ptr != NULL);
+
+ /* The cancel request consists only of the HGCM header. The physical
+ * location determines the request to cancel. Note that request
+ * cancellation this is full of race conditions, so we simply ignore
+ * the return value and assumed all went well.
+ */
+ hgcm_conn[conn].req[req].ptr->flags = 0;
+ hgcm_conn[conn].req[req].ptr->result = VMMDEV_ERR_GENERIC;
+
+ vbox_request(&hgcm_conn[conn].req[req].ptr->header,
+ hgcm_conn[conn].req[req].addr, VMMDEV_REQ_HGCMCANCEL,
+ sizeof(struct VMMDevHGCMCancel));
+
+ hgcm_conn[conn].req[req].busy = FALSE;
+}
+
+/*===========================================================================*
+ * finish_req *
+ *===========================================================================*/
+static int finish_req(int conn, int req, int *code)
+{
+ /* The given request has finished. Take the appropriate action.
+ */
+ struct VMMDevHGCMConnect *connreq;
+ struct VMMDevHGCMCall *callreq;
+ struct VMMDevHGCMParam *inp;
+ vbox_param_t *outp;
+ int i, count, res, r = OK;
+
+ hgcm_conn[conn].req[req].busy = FALSE;
+
+ *code = res = hgcm_conn[conn].req[req].ptr->result;
+
+ r = convert_result(res);
+
+ /* The request has finished, so it cannot still be in progress. */
+ if (r == EDONTREPLY)
+ r = EGENERIC;
+
+ switch (hgcm_conn[conn].state) {
+ case STATE_FREE:
+ assert(0);
+
+ break;
+
+ case STATE_OPENING:
+ if (r == OK) {
+ connreq = (struct VMMDevHGCMConnect *)
+ hgcm_conn[conn].req[req].ptr;
+ hgcm_conn[conn].client_id = connreq->client_id;
+ hgcm_conn[conn].state = STATE_OPEN;
+
+ r = conn;
+ } else {
+ free_conn(conn);
+ }
+
+ break;
+
+ case STATE_CLOSING:
+ /* Neither we nor the caller can do anything with failures. */
+ if (r != OK)
+ printf("VBOX: disconnection failure #2 (%d)\n", res);
+
+ free_conn(conn);
+
+ r = OK;
+
+ break;
+
+ case STATE_OPEN:
+ /* On success, extract and copy back parameters to the caller.
+ */
+ if (r == OK) {
+ callreq = (struct VMMDevHGCMCall *)
+ hgcm_conn[conn].req[req].ptr;
+ inp = (struct VMMDevHGCMParam *) &callreq[1];
+ outp = &hgcm_conn[conn].req[req].param[0];
+ count = hgcm_conn[conn].req[req].count;
+
+ for (i = 0; i < count; i++) {
+ switch (outp->type) {
+ case VBOX_TYPE_U32:
+ outp->u32 = inp->u32;
+ break;
+
+ case VBOX_TYPE_U64:
+ outp->u64 = inp->u64;
+ break;
+
+ default:
+ break;
+ }
+
+ inp++;
+ outp++;
+ }
+
+ if (count > 0) {
+ r = sys_safecopyto(hgcm_conn[conn].endpt,
+ hgcm_conn[conn].req[req].grant, 0,
+ (vir_bytes)
+ hgcm_conn[conn].req[req].param,
+ count * sizeof(vbox_param_t), D);
+ }
+ }
+
+ break;
+ }
+
+ return r;
+}
+
+/*===========================================================================*
+ * check_conn *
+ *===========================================================================*/
+static void check_conn(int conn)
+{
+ /* Check all requests for the given connection for completion. */
+ int r, req, code;
+
+ for (req = 0; req < MAX_REQS; req++) {
+ if (!hgcm_conn[conn].req[req].busy) continue;
+
+ if (!(hgcm_conn[conn].req[req].ptr->flags &
+ VMMDEV_HGCM_REQ_DONE))
+ continue;
+
+ r = finish_req(conn, req, &code);
+
+ assert(r != EDONTREPLY);
+
+ send_reply(hgcm_conn[conn].endpt,
+ hgcm_conn[conn].req[req].status, r, code,
+ hgcm_conn[conn].req[req].id);
+ }
+}
+
+/*===========================================================================*
+ * do_open *
+ *===========================================================================*/
+static int do_open(message *m_ptr, int ipc_status, int *code)
+{
+ /* Process a connection request. */
+ struct VMMDevHGCMConnect *connreq;
+ int i, r, conn, count;
+
+ if (m_ptr->VBOX_COUNT < 0 || m_ptr->VBOX_COUNT > VMMDEV_HGCM_NAME_SIZE)
+ return EINVAL;
+
+ /* Find a free connection slot. Make sure the sending endpoint is not
+ * already using up half of the connection slots.
+ */
+ conn = -1;
+ count = 0;
+ for (i = 0; i < MAX_CONNS; i++) {
+ if (conn < 0 && hgcm_conn[i].state == STATE_FREE)
+ conn = i;
+ if (hgcm_conn[i].endpt == m_ptr->m_source)
+ count++;
+ }
+
+ if (count >= MAX(MAX_CONNS / 2, 2))
+ return EMFILE;
+
+ if (conn < 0)
+ return ENFILE;
+
+ /* Initialize the connection and request structures. */
+ hgcm_conn[conn].state = STATE_OPENING;
+ hgcm_conn[conn].endpt = m_ptr->m_source;
+
+ for (i = 0; i < MAX_REQS; i++) {
+ hgcm_conn[conn].req[i].busy = FALSE;
+ hgcm_conn[conn].req[i].ptr = NULL;
+ }
+
+ /* Set up and start the connection request. */
+ r = alloc_req(conn);
+
+ if (r < 0)
+ return r;
+ assert(r == 0);
+
+ connreq = (struct VMMDevHGCMConnect *) hgcm_conn[conn].req[0].ptr;
+ connreq->type = VMMDEV_HGCM_SVCLOC_LOCALHOST_EXISTING;
+ if ((r = sys_safecopyfrom(m_ptr->m_source, m_ptr->VBOX_GRANT, 0,
+ (vir_bytes) connreq->name, m_ptr->VBOX_COUNT, D)) !=
+ OK) {
+ free_conn(conn);
+
+ return r;
+ }
+ connreq->name[VMMDEV_HGCM_NAME_SIZE-1] = 0;
+
+ r = start_req(conn, 0, VMMDEV_REQ_HGCMCONNECT, sizeof(*connreq),
+ ipc_status, m_ptr->VBOX_ID, code);
+
+ if (r != OK && r != EDONTREPLY) {
+ free_conn(conn);
+
+ return r;
+ }
+
+ return (r == OK) ? finish_req(conn, 0, code) : r;
+}
+
+/*===========================================================================*
+ * do_close *
+ *===========================================================================*/
+static int do_close(message *m_ptr, int ipc_status, int *code)
+{
+ /* Process a disconnection request. */
+ struct VMMDevHGCMDisconnect *discreq;
+ int r, conn, req;
+
+ conn = m_ptr->VBOX_CONN;
+
+ /* Sanity checks. */
+ if (conn < 0 || conn >= MAX_CONNS)
+ return EINVAL;
+ if (hgcm_conn[conn].endpt != m_ptr->m_source ||
+ hgcm_conn[conn].state != STATE_OPEN)
+ return EINVAL;
+
+ /* Cancel any ongoing requests. */
+ for (req = 0; req < MAX_REQS; req++)
+ if (hgcm_conn[conn].req[req].busy)
+ cancel_req(conn, req);
+
+ assert(hgcm_conn[conn].req[0].ptr != NULL);
+
+ discreq = (struct VMMDevHGCMDisconnect *) hgcm_conn[conn].req[0].ptr;
+ discreq->client_id = hgcm_conn[conn].client_id;
+
+ r = start_req(conn, 0, VMMDEV_REQ_HGCMDISCONNECT, sizeof(*discreq),
+ ipc_status, m_ptr->VBOX_ID, code);
+
+ if (r != OK && r != EDONTREPLY) {
+ /* Neither we nor the caller can do anything with failures. */
+ printf("VBOX: disconnection failure #1 (%d)\n", r);
+
+ free_conn(conn);
+
+ return OK;
+ }
+
+ hgcm_conn[conn].state = STATE_CLOSING;
+
+ return (r == OK) ? finish_req(conn, 0, code) : r;
+}
+
+/*===========================================================================*
+ * store_pages *
+ *===========================================================================*/
+static int store_pages(int conn, int req, vbox_param_t *inp, size_t *offp)
+{
+ /* Create a page list of physical pages that make up the provided
+ * buffer area.
+ */
+ struct vumap_vir vvec;
+ struct vumap_phys pvec[MAPVEC_NR];
+ struct VMMDevHGCMPageList *pagelist;
+ size_t offset, size, skip;
+ int i, j, r, first, access, count, pages;
+
+ /* Empty strings are allowed. */
+ if (inp->ptr.size == 0)
+ return OK;
+
+ pagelist = (struct VMMDevHGCMPageList *)
+ (((u8_t *) hgcm_conn[conn].req[req].ptr) + *offp);
+
+ pagelist->flags = 0;
+ if (inp->ptr.dir & VBOX_DIR_IN)
+ pagelist->flags |= VMMDEV_HGCM_FLAG_FROM_HOST;
+ if (inp->ptr.dir & VBOX_DIR_OUT)
+ pagelist->flags |= VMMDEV_HGCM_FLAG_TO_HOST;
+ pagelist->count = 0;
+
+ /* Make sure there is room for the header (but no actual pages yet). */
+ *offp += sizeof(*pagelist) - sizeof(pagelist->addr[0]);
+ if (*offp > VMMDEV_BUF_SIZE)
+ return ENOMEM;
+
+ access = 0;
+ if (inp->ptr.dir & VBOX_DIR_IN) access |= VUA_WRITE;
+ if (inp->ptr.dir & VBOX_DIR_OUT) access |= VUA_READ;
+
+ offset = 0;
+ first = TRUE;
+ do {
+ /* If the caller gives us a huge buffer, we might need multiple
+ * calls to sys_vumap(). Note that the caller currently has no
+ * reliable way to know whether such a buffer will fit in our
+ * request page. In the future, we may dynamically reallocate
+ * the request area to make more room as necessary; for now we
+ * just return an ENOMEM error in such cases.
+ */
+ vvec.vv_grant = inp->ptr.grant;
+ vvec.vv_size = inp->ptr.off + inp->ptr.size;
+ count = MAPVEC_NR;
+ if ((r = sys_vumap(hgcm_conn[conn].endpt, &vvec, 1,
+ inp->ptr.off + offset, access, pvec,
+ &count)) != OK)
+ return r;
+
+ /* First get the number of bytes processed, before (possibly)
+ * adjusting the size of the first element.
+ */
+ for (i = size = 0; i < count; i++)
+ size += pvec[i].vp_size;
+
+ /* VirtualBox wants aligned page addresses only, and an offset
+ * into the first page. All other pages except the last are
+ * full pages, and the last page is cut off using the size.
+ */
+ skip = 0;
+ if (first) {
+ skip = pvec[0].vp_addr & (PAGE_SIZE - 1);
+ pvec[0].vp_addr -= skip;
+ pvec[0].vp_size += skip;
+ pagelist->offset = skip;
+ first = FALSE;
+ }
+
+ /* How many pages were mapped? */
+ pages = (skip + size + PAGE_SIZE - 1) / PAGE_SIZE;
+
+ /* Make sure there is room to store this many extra pages. */
+ *offp += sizeof(pagelist->addr[0]) * pages;
+ if (*offp > VMMDEV_BUF_SIZE)
+ return ENOMEM;
+
+ /* Actually store the pages in the page list. */
+ for (i = j = 0; i < pages; i++) {
+ assert(!(pvec[j].vp_addr & (PAGE_SIZE - 1)));
+
+ pagelist->addr[pagelist->count++] =
+ cvul64(pvec[j].vp_addr);
+
+ if (pvec[j].vp_size > PAGE_SIZE) {
+ pvec[j].vp_addr += PAGE_SIZE;
+ pvec[j].vp_size -= PAGE_SIZE;
+ }
+ else j++;
+ }
+ assert(j == count);
+
+ offset += size;
+ } while (offset < inp->ptr.size);
+
+ assert(offset == inp->ptr.size);
+
+ return OK;
+}
+
+/*===========================================================================*
+ * do_call *
+ *===========================================================================*/
+static int do_call(message *m_ptr, int ipc_status, int *code)
+{
+ /* Perform a HGCM call. */
+ vbox_param_t *inp;
+ struct VMMDevHGCMParam *outp;
+ struct VMMDevHGCMCall *callreq;
+ size_t size;
+ int i, r, conn, req, count;
+
+ conn = m_ptr->VBOX_CONN;
+ count = m_ptr->VBOX_COUNT;
+
+ /* Sanity checks. */
+ if (conn < 0 || conn >= MAX_CONNS)
+ return EINVAL;
+ if (hgcm_conn[conn].endpt != m_ptr->m_source ||
+ hgcm_conn[conn].state != STATE_OPEN)
+ return EINVAL;
+
+ /* Allocate a request, and copy in the parameters. */
+ req = alloc_req(conn);
+
+ if (req < 0)
+ return req;
+
+ hgcm_conn[conn].req[req].grant = m_ptr->VBOX_GRANT;
+ hgcm_conn[conn].req[req].count = count;
+
+ if (count > 0) {
+ if ((r = sys_safecopyfrom(m_ptr->m_source, m_ptr->VBOX_GRANT,
+ 0, (vir_bytes) hgcm_conn[conn].req[req].param,
+ count * sizeof(vbox_param_t), D)) != OK)
+ return r;
+ }
+
+ /* Set up the basic request. */
+ callreq = (struct VMMDevHGCMCall *) hgcm_conn[conn].req[req].ptr;
+ callreq->client_id = hgcm_conn[conn].client_id;
+ callreq->function = m_ptr->VBOX_FUNCTION;
+ callreq->count = count;
+
+ /* Rewrite and convert the parameters. */
+ inp = &hgcm_conn[conn].req[req].param[0];
+ outp = (struct VMMDevHGCMParam *) &callreq[1];
+
+ size = sizeof(*callreq) + sizeof(*outp) * count;
+ assert(size < VMMDEV_BUF_SIZE);
+
+ for (i = 0; i < count; i++) {
+ switch (inp->type) {
+ case VBOX_TYPE_U32:
+ outp->type = VMMDEV_HGCM_PARAM_U32;
+ outp->u32 = inp->u32;
+ break;
+
+ case VBOX_TYPE_U64:
+ outp->type = VMMDEV_HGCM_PARAM_U64;
+ outp->u64 = inp->u64;
+ break;
+
+ case VBOX_TYPE_PTR:
+ outp->type = VMMDEV_HGCM_PARAM_PAGELIST;
+ outp->pagelist.offset = size;
+ outp->pagelist.size = inp->ptr.size;
+
+ if ((r = store_pages(conn, req, inp, &size)) != OK)
+ return r;
+
+ break;
+
+ default:
+ return EINVAL;
+ }
+
+ inp++;
+ outp++;
+ }
+
+ /* Start the request. */
+ r = start_req(conn, req, VMMDEV_REQ_HGCMCALL, size, ipc_status,
+ m_ptr->VBOX_ID, code);
+
+ if (r != OK && r != EDONTREPLY)
+ return r;
+
+ return (r == OK) ? finish_req(conn, req, code) : r;
+}
+
+/*===========================================================================*
+ * do_cancel *
+ *===========================================================================*/
+static int do_cancel(message *m_ptr, int ipc_status)
+{
+ /* Cancel an ongoing call. */
+ int conn, req;
+
+ conn = m_ptr->VBOX_CONN;
+
+ /* Sanity checks. Note that connection and disconnection requests
+ * cannot be cancelled.
+ */
+ if (conn < 0 || conn >= MAX_CONNS)
+ return EINVAL;
+ if (hgcm_conn[conn].endpt != m_ptr->m_source ||
+ hgcm_conn[conn].state != STATE_OPEN)
+ return EINVAL;
+
+ /* Find the request. */
+ for (req = 0; req < MAX_REQS; req++) {
+ if (hgcm_conn[conn].req[req].busy &&
+ hgcm_conn[conn].req[req].id == m_ptr->VBOX_ID)
+ break;
+ }
+
+ /* If no such request was ongoing, then our behavior depends on the
+ * way the request was made: we do not want to send two asynchronous
+ * replies for one request, but if the caller used SENDREC, we have to
+ * reply with something or the caller would deadlock.
+ */
+ if (req == MAX_REQS) {
+ if (IPC_STATUS_CALL(ipc_status) == SENDREC)
+ return EINVAL;
+ else
+ return EDONTREPLY;
+ }
+
+ /* Actually cancel the request, and send a reply. */
+ cancel_req(conn, req);
+
+ return EINTR;
+}
+
+/*===========================================================================*
+ * hgcm_message *
+ *===========================================================================*/
+void hgcm_message(message *m_ptr, int ipc_status)
+{
+ /* Process a request message. */
+ int r, code = VMMDEV_ERR_GENERIC;
+
+ switch (m_ptr->m_type) {
+ case VBOX_OPEN: r = do_open(m_ptr, ipc_status, &code); break;
+ case VBOX_CLOSE: r = do_close(m_ptr, ipc_status, &code); break;
+ case VBOX_CALL: r = do_call(m_ptr, ipc_status, &code); break;
+ case VBOX_CANCEL: r = do_cancel(m_ptr, ipc_status); break;
+ default: r = ENOSYS; break;
+ }
+
+ if (r != EDONTREPLY)
+ send_reply(m_ptr->m_source, ipc_status, r, code,
+ m_ptr->VBOX_ID);
+}
+
+/*===========================================================================*
+ * hgcm_intr *
+ *===========================================================================*/
+void hgcm_intr(void)
+{
+ /* We received an HGCM event. Check ongoing requests for completion. */
+ int conn;
+
+ for (conn = 0; conn < MAX_CONNS; conn++)
+ if (hgcm_conn[conn].state != STATE_FREE)
+ check_conn(conn);
+}
--- /dev/null
+#ifndef _VBOX_PROTO_H
+#define _VBOX_PROTO_H
+
+/* err.c */
+extern int convert_err(int code);
+
+/* hgcm.c */
+extern void hgcm_message(message *m_ptr, int ipc_status);
+extern void hgcm_intr(void);
+
+/* vbox.c */
+extern int vbox_request(struct VMMDevRequestHeader *header, phys_bytes addr,
+ int type, size_t size);
+
+#endif /* _VBOX_PROTO_H */
-/* VirtualBox driver - only does regular time sync - by D.C. van Moolenbroek */
+/* VirtualBox driver - by D.C. van Moolenbroek */
+/*
+ * This driver currently performs two tasks:
+ * - synchronizing to the host system time;
+ * - providing an interface for HGCM communication with the host system.
+ */
#include <minix/drivers.h>
#include <minix/driver.h>
#include <minix/optset.h>
#include <machine/pci.h>
#include <sys/time.h>
-#include "vbox.h"
+#include "vmmdev.h"
+#include "proto.h"
#define DEFAULT_INTERVAL 1 /* check host time every second */
#define DEFAULT_DRIFT 2 /* update time if delta is >= 2 secs */
static int interval;
static int drift;
+static unsigned int irq;
+static int hook_id;
+
static struct optset optset_table[] = {
{ "interval", OPT_INT, &interval, 10 },
{ "drift", OPT_INT, &drift, 10 },
/*===========================================================================*
* vbox_request *
*===========================================================================*/
-static int vbox_request(int req_nr, size_t size)
+int vbox_request(struct VMMDevRequestHeader *header, phys_bytes addr,
+ int type, size_t size)
{
/* Perform a VirtualBox backdoor request. */
- struct VMMDevRequestHeader *hdr;
int r;
- hdr = (struct VMMDevRequestHeader *) vir_ptr;
- hdr->size = size;
- hdr->version = VMMDEV_BACKDOOR_VERSION;
- hdr->type = req_nr;
- hdr->rc = VMMDEV_ERR_PERM;
+ header->size = size;
+ header->version = VMMDEV_BACKDOOR_VERSION;
+ header->type = type;
+ header->result = VMMDEV_ERR_GENERIC;
- if ((r = sys_outl(port, phys_ptr)) != OK)
+ if ((r = sys_outl(port, addr)) != OK)
panic("device I/O failed: %d", r);
- return hdr->rc;
+ return header->result;
}
/*===========================================================================*
if (r != 1)
panic("backdoor device not found");
- if (vid == VBOX_PCI_VID && did == VBOX_PCI_DID)
+ if (vid == VMMDEV_PCI_VID && did == VMMDEV_PCI_DID)
break;
r = pci_next_dev(&devind, &vid, &did);
port = pci_attr_r32(devind, PCI_BAR) & PCI_BAR_IO_MASK;
+ irq = pci_attr_r8(devind, PCI_ILR);
+
+ if ((r = sys_irqsetpolicy(irq, 0 /* IRQ_REENABLE */, &hook_id)) != OK)
+ panic("unable to register IRQ: %d", r);
+
+ if ((r = sys_irqenable(&hook_id)) != OK)
+ panic("unable to enable IRQ: %d", r);
+
if ((vir_ptr = alloc_contig(VMMDEV_BUF_SIZE, 0, &phys_ptr)) == NULL)
panic("unable to allocate memory");
req = (struct VMMDevReportGuestInfo *) vir_ptr;
- req->guest_info.add_version = VMMDEV_GUEST_VERSION;
- req->guest_info.os_type = VMMDEV_GUEST_OS_OTHER;
+ req->add_version = VMMDEV_GUEST_VERSION;
+ req->os_type = VMMDEV_GUEST_OS_OTHER;
- if ((r = vbox_request(VMMDEV_REQ_REPORTGUESTINFO, sizeof(*req))) !=
+ if ((r = vbox_request(&req->header, phys_ptr,
+ VMMDEV_REQ_REPORTGUESTINFO, sizeof(*req))) !=
VMMDEV_ERR_OK)
panic("backdoor device not functioning");
return OK;
}
+/*===========================================================================*
+ * vbox_intr *
+ *===========================================================================*/
+static void vbox_intr(void)
+{
+ /* Process an interrupt. */
+ struct VMMDevEvents *req;
+ int r;
+
+ req = (struct VMMDevEvents *) vir_ptr;
+ req->events = 0;
+
+ /* If we cannot retrieve the events mask, we cannot do anything with
+ * this or any future interrupt either, so return without reenabling
+ * interrupts.
+ */
+ if ((r = vbox_request(&req->header, phys_ptr,
+ VMMDEV_REQ_ACKNOWLEDGEEVENTS, sizeof(*req))) !=
+ VMMDEV_ERR_OK) {
+ printf("VBOX: unable to retrieve event mask (%d)\n", r);
+
+ return;
+ }
+
+ if (req->events & VMMDEV_EVENT_HGCM)
+ hgcm_intr();
+
+ if ((r = sys_irqenable(&hook_id)) != OK)
+ panic("unable to reenable IRQ: %d", r);
+}
+
/*===========================================================================*
* vbox_update_time *
*===========================================================================*/
req = (struct VMMDevReqHostTime *) vir_ptr;
- if (vbox_request(VMMDEV_REQ_HOSTTIME, sizeof(*req)) == VMMDEV_ERR_OK) {
+ if (vbox_request(&req->header, phys_ptr, VMMDEV_REQ_HOSTTIME,
+ sizeof(*req)) == VMMDEV_ERR_OK) {
time(&otime); /* old time */
ntime = div64u(req->time, 1000); /* new time */
if (is_ipc_notify(ipc_status)) {
switch (m.m_source) {
+ case HARDWARE:
+ vbox_intr();
+
+ break;
+
case CLOCK:
vbox_update_time();
continue;
}
- printf("VBOX: received message %d from %d\n",
- m.m_type, m.m_source);
+ hgcm_message(&m, ipc_status);
}
return 0;
+++ /dev/null
-#ifndef _VBOX_H
-#define _VBOX_H
-
-#define VBOX_PCI_VID 0x80ee
-#define VBOX_PCI_DID 0xcafe
-
-struct VMMDevRequestHeader {
- u32_t size;
- u32_t version;
- u32_t type;
- i32_t rc;
- u32_t reserved[2];
-};
-
-struct VBoxGuestInfo {
- u32_t add_version;
- u32_t os_type;
-};
-
-struct VMMDevReportGuestInfo {
- struct VMMDevRequestHeader header;
- struct VBoxGuestInfo guest_info;
-};
-
-struct VMMDevReqHostTime {
- struct VMMDevRequestHeader header;
- u64_t time;
-};
-
-#define VMMDEV_MAKEWORD(m,n) (((m) << 16) | (n))
-
-#define VMMDEV_BACKDOOR_VERSION VMMDEV_MAKEWORD(1, 1)
-#define VMMDEV_GUEST_VERSION VMMDEV_MAKEWORD(1, 4)
-#define VMMDEV_GUEST_OS_OTHER 0x90000 /* this is L4 - close enough */
-
-#define VMMDEV_REQ_REPORTGUESTINFO 50
-#define VMMDEV_REQ_HOSTTIME 10
-
-#define VMMDEV_ERR_OK 0
-#define VMMDEV_ERR_PERM (-10)
-
-#define VMMDEV_BUF_SIZE 4096 /* just one page */
-
-#endif /* _VBOX_H */
--- /dev/null
+#ifndef _VBOX_VMMDEV_H
+#define _VBOX_VMMDEV_H
+
+#define VMMDEV_PCI_VID 0x80ee
+#define VMMDEV_PCI_DID 0xcafe
+
+#define VMMDEV_REQ_HOSTTIME 10
+#define VMMDEV_REQ_ACKNOWLEDGEEVENTS 41
+#define VMMDEV_REQ_REPORTGUESTINFO 50
+#define VMMDEV_REQ_HGCMCONNECT 60
+#define VMMDEV_REQ_HGCMDISCONNECT 61
+#define VMMDEV_REQ_HGCMCALL 62
+#define VMMDEV_REQ_HGCMCANCEL 64
+
+#define VMMDEV_ERR_OK 0 /* success */
+#define VMMDEV_ERR_GENERIC (-1) /* general failure */
+#define VMMDEV_ERR_HGCM_NOT_FOUND (-2900) /* service not found */
+#define VMMDEV_ERR_HGCM_DENIED 2901 /* client rejected */
+#define VMMDEV_ERR_HGCM_INVALID_ADDR (-2902) /* invalid address */
+#define VMMDEV_ERR_HGCM_ASYNC_EXEC 2903 /* call in progress */
+#define VMMDEV_ERR_HGCM_INTERNAL (-2904) /* internal error */
+#define VMMDEV_ERR_HGCM_INVALID_ID (-2905) /* invalid client ID */
+
+#define VMMDEV_MAKEWORD(m,n) (((m) << 16) | (n))
+
+#define VMMDEV_BACKDOOR_VERSION VMMDEV_MAKEWORD(1, 1)
+#define VMMDEV_GUEST_VERSION VMMDEV_MAKEWORD(1, 4)
+#define VMMDEV_GUEST_OS_OTHER 0x90000 /* this is L4 - close enough */
+
+struct VMMDevRequestHeader {
+ u32_t size;
+ u32_t version;
+ u32_t type;
+ i32_t result;
+ u32_t reserved[2];
+};
+
+struct VMMDevReportGuestInfo {
+ struct VMMDevRequestHeader header;
+ u32_t add_version;
+ u32_t os_type;
+};
+
+struct VMMDevReqHostTime {
+ struct VMMDevRequestHeader header;
+ u64_t time;
+};
+
+#define VMMDEV_EVENT_HGCM (1 << 1)
+
+struct VMMDevEvents {
+ struct VMMDevRequestHeader header;
+ u32_t events;
+};
+
+#define VMMDEV_HGCM_REQ_DONE (1 << 0)
+
+struct VMMDevHGCMHeader {
+ struct VMMDevRequestHeader header;
+ u32_t flags;
+ i32_t result;
+};
+
+#define VMMDEV_HGCM_SVCLOC_LOCALHOST_EXISTING 2
+
+#define VMMDEV_HGCM_NAME_SIZE 128
+
+struct VMMDevHGCMConnect {
+ struct VMMDevHGCMHeader header;
+ u32_t type;
+ char name[VMMDEV_HGCM_NAME_SIZE];
+ u32_t client_id;
+};
+
+struct VMMDevHGCMDisconnect {
+ struct VMMDevHGCMHeader header;
+ u32_t client_id;
+};
+
+#define VMMDEV_HGCM_FLAG_TO_HOST 0x01
+#define VMMDEV_HGCM_FLAG_FROM_HOST 0x02
+
+struct VMMDevHGCMPageList {
+ u32_t flags;
+ u16_t offset;
+ u16_t count;
+ u64_t addr[1];
+};
+
+#define VMMDEV_HGCM_PARAM_U32 1
+#define VMMDEV_HGCM_PARAM_U64 2
+#define VMMDEV_HGCM_PARAM_PAGELIST 10
+
+struct VMMDevHGCMParam {
+ u32_t type;
+ union {
+ u32_t u32;
+ u64_t u64;
+ struct {
+ u32_t size;
+ union {
+ u32_t phys;
+ void *vir;
+ } addr;
+ } ptr;
+ struct {
+ u32_t size;
+ u32_t offset;
+ } pagelist;
+ };
+};
+
+struct VMMDevHGCMCall {
+ struct VMMDevHGCMHeader header;
+ u32_t client_id;
+ u32_t function;
+ u32_t count;
+};
+
+struct VMMDevHGCMCancel {
+ struct VMMDevHGCMHeader header;
+};
+
+#define VMMDEV_BUF_SIZE 4096 /* just one page */
+
+#endif /* _VBOX_VMMDEV_H */
{
system
UMAP # 14
+ VUMAP # 18
+ IRQCTL # 19
DEVIO # 21
;
pci device 80ee/cafe;
minix/rs.h minix/safecopies.h minix/sched.h minix/sef.h \
minix/sound.h minix/spin.h minix/sys_config.h minix/sysinfo.h \
minix/syslib.h minix/sysutil.h minix/timers.h minix/type.h \
- minix/tty.h minix/u64.h minix/usb.h minix/usb_ch9.h minix/vm.h \
- minix/vfsif.h minix/vtreefs.h minix/libminixfs.h \
- minix/netsock.h
+ minix/tty.h minix/u64.h minix/usb.h minix/usb_ch9.h minix/vbox.h \
+ minix/vboxif.h minix/vboxtype.h minix/vm.h \
+ minix/vfsif.h minix/vtreefs.h minix/libminixfs.h minix/netsock.h
INCS+= net/gen/arp_io.h net/gen/dhcp.h net/gen/ether.h \
net/gen/eth_hdr.h net/gen/eth_io.h net/gen/icmp.h \
* 0x1300 - 0x13FF TTY Input
* 0x1400 - 0x14FF VFS-FS transaction IDs
* 0x1500 - 0x15FF Block device requests and responses
+ * 0x1600 - 0x16FF VirtualBox (VBOX) requests (see vboxif.h)
*
* Zero and negative values are widely used for OK and error responses.
*/
--- /dev/null
+#ifndef _MINIX_VBOX_H
+#define _MINIX_VBOX_H
+
+#include <minix/vboxtype.h>
+
+typedef int vbox_conn_t;
+
+extern int vbox_init(void);
+
+extern vbox_conn_t vbox_open(char *name);
+extern int vbox_close(vbox_conn_t conn);
+extern int vbox_call(vbox_conn_t conn, u32_t function, vbox_param_t *param,
+ int count, int *code);
+
+extern void vbox_set_u32(vbox_param_t *param, u32_t value);
+extern void vbox_set_u64(vbox_param_t *param, u64_t value);
+extern void vbox_set_ptr(vbox_param_t *param, void *ptr, size_t size,
+ unsigned int dir);
+extern void vbox_set_grant(vbox_param_t *param, endpoint_t endpt,
+ cp_grant_id_t grant, size_t off, size_t size, unsigned int dir);
+
+extern u32_t vbox_get_u32(vbox_param_t *param);
+extern u64_t vbox_get_u64(vbox_param_t *param);
+
+extern void vbox_put(vbox_param_t *param, int count);
+
+#endif /* _MINIX_VBOX_H */
--- /dev/null
+#ifndef _MINIX_VBOXIF_H
+#define _MINIX_VBOXIF_H
+
+/*===========================================================================*
+ * Messages for VBOX device *
+ *===========================================================================*/
+
+/* Base type for VBOX requests and responses. */
+#define VBOX_RQ_BASE 0x1600
+#define VBOX_RS_BASE 0x1680
+
+#define IS_VBOX_RQ(type) (((type) & ~0x7f) == VBOX_RQ_BASE)
+#define IS_VBOX_RS(type) (((type) & ~0x7f) == VBOX_RS_BASE)
+
+/* Message types for VBOX requests. */
+#define VBOX_OPEN (VBOX_RQ_BASE + 0) /* open a connection */
+#define VBOX_CLOSE (VBOX_RQ_BASE + 1) /* close a connection */
+#define VBOX_CALL (VBOX_RQ_BASE + 2) /* perform a call */
+#define VBOX_CANCEL (VBOX_RQ_BASE + 3) /* cancel an ongoing call */
+
+/* Message types for VBOX responses. */
+#define VBOX_REPLY (VBOX_RS_BASE + 0) /* general reply code */
+
+/* Field names for VBOX messages. */
+#define VBOX_CONN m2_i1 /* connection identifier */
+#define VBOX_GRANT m2_i2 /* grant ID of buffer or name */
+#define VBOX_COUNT m2_i3 /* number of bytes or elements */
+#define VBOX_RESULT m2_i1 /* result or error code */
+#define VBOX_CODE m2_i2 /* VirtualBox result code */
+#define VBOX_FUNCTION m2_l1 /* function call number */
+#define VBOX_ID m2_l2 /* opaque request ID */
+
+#endif /* _MINIX_VBOXIF_H */
--- /dev/null
+#ifndef _MINIX_VBOXTYPE_H
+#define _MINIX_VBOXTYPE_H
+
+/* This header declares the type definitions shared between VBOX driver, the
+ * interface in libsys, and any caller of those interface functions.
+ */
+
+/* Call parameter type. */
+typedef enum {
+ VBOX_TYPE_INVALID, /* invalid type */
+ VBOX_TYPE_U32, /* 32-bit value */
+ VBOX_TYPE_U64, /* 64-bit value */
+ VBOX_TYPE_PTR /* pointer to granted memory area */
+} vbox_type_t;
+
+/* Call parameter transfer direction. */
+#define VBOX_DIR_IN 0x01 /* from host to guest */
+#define VBOX_DIR_OUT 0x02 /* from guest to host */
+#define VBOX_DIR_INOUT (VBOX_DIR_IN | VBOX_DIR_OUT)
+
+/* Call parameter. */
+typedef struct {
+ vbox_type_t type;
+ union {
+ u32_t u32;
+ u64_t u64;
+ struct {
+ cp_grant_id_t grant;
+ size_t off;
+ size_t size;
+ unsigned int dir;
+ } ptr;
+ };
+} vbox_param_t;
+
+#endif /*_MINIX_VBOXTYPE_H */
timers.c \
timing.c \
tsc_util.c \
+ vbox.c \
vm_brk.c \
vm_dmacalls.c \
vm_exec_newmem.c \
--- /dev/null
+/* VBOX driver interface - synchronous version - by D.C. van Moolenbroek */
+
+#include <minix/drivers.h>
+#include <minix/vboxtype.h>
+#include <minix/vboxif.h>
+#include <minix/vbox.h>
+#include <minix/ds.h>
+#include <assert.h>
+
+static endpoint_t vbox_endpt = NONE;
+
+int vbox_init(void)
+{
+/* Initialize the library, by resolving the VBOX driver endpoint.
+ */
+ int r;
+
+ if ((r = ds_retrieve_label_endpt("vbox", &vbox_endpt)) != OK) {
+ printf("libvbox: unable to obtain VBOX endpoint (%d)\n", r);
+
+ return EINVAL;
+ }
+
+ return OK;
+}
+
+vbox_conn_t vbox_open(char *name)
+{
+/* Open a VirtualBox HGCM connection.
+ */
+ message m;
+ cp_grant_id_t grant;
+ size_t len;
+ int r;
+
+ if (vbox_endpt == NONE)
+ return EDEADSRCDST;
+
+ len = strlen(name) + 1;
+ grant = cpf_grant_direct(vbox_endpt, (vir_bytes) name, len, CPF_READ);
+ if (!GRANT_VALID(grant))
+ return ENOMEM;
+
+ memset(&m, 0, sizeof(m));
+ m.m_type = VBOX_OPEN;
+ m.VBOX_GRANT = grant;
+ m.VBOX_COUNT = len;
+ m.VBOX_ID = 0;
+
+ r = sendrec(vbox_endpt, &m);
+
+ cpf_revoke(grant);
+
+ if (r != OK)
+ return r;
+
+ if (m.VBOX_ID != 0)
+ return EINVAL;
+
+ return m.VBOX_RESULT;
+}
+
+int vbox_close(vbox_conn_t conn)
+{
+/* Close a VirtualBox HGCM connection.
+ */
+ message m;
+ int r;
+
+ if (vbox_endpt == NONE)
+ return EDEADSRCDST;
+
+ memset(&m, 0, sizeof(m));
+ m.m_type = VBOX_CLOSE;
+ m.VBOX_CONN = conn;
+ m.VBOX_ID = 0;
+
+ r = sendrec(vbox_endpt, &m);
+
+ if (r != OK)
+ return r;
+
+ if (m.VBOX_ID != 0)
+ return EINVAL;
+
+ return m.VBOX_RESULT;
+}
+
+int vbox_call(vbox_conn_t conn, u32_t function, vbox_param_t *param, int count,
+ int *code)
+{
+/* Call a VirtualBox HGCM function. The caller must set up all buffer grants.
+ */
+ cp_grant_id_t grant = GRANT_INVALID;
+ message m;
+ int i, r;
+
+ if (vbox_endpt == NONE) {
+ vbox_put(param, count);
+
+ return EDEADSRCDST;
+ }
+
+ /* Check whether all parameters are initialized correctly. */
+ for (i = 0; i < count; i++) {
+ switch (param[i].type) {
+ case VBOX_TYPE_U32:
+ case VBOX_TYPE_U64:
+ case VBOX_TYPE_PTR:
+ break;
+
+ default:
+ vbox_put(param, count);
+
+ return ENOMEM;
+ }
+ }
+
+ if (count > 0) {
+ grant = cpf_grant_direct(vbox_endpt, (vir_bytes) param,
+ sizeof(param[0]) * count, CPF_READ | CPF_WRITE);
+ if (!GRANT_VALID(grant)) {
+ vbox_put(param, count);
+
+ return ENOMEM;
+ }
+ }
+
+ memset(&m, 0, sizeof(m));
+ m.m_type = VBOX_CALL;
+ m.VBOX_CONN = conn;
+ m.VBOX_GRANT = grant;
+ m.VBOX_COUNT = count;
+ m.VBOX_ID = 0;
+ m.VBOX_FUNCTION = function;
+
+ r = sendrec(vbox_endpt, &m);
+
+ if (GRANT_VALID(grant))
+ cpf_revoke(grant);
+
+ vbox_put(param, count);
+
+ if (r != OK)
+ return r;
+
+ if (m.VBOX_ID != 0)
+ return EINVAL;
+
+ if (code != NULL)
+ *code = m.VBOX_CODE;
+
+ return m.VBOX_RESULT;
+}
+
+void vbox_set_u32(vbox_param_t *param, u32_t value)
+{
+/* Set the given parameter to a 32-bit value.
+ */
+
+ param->type = VBOX_TYPE_U32;
+ param->u32 = value;
+}
+
+void vbox_set_u64(vbox_param_t *param, u64_t value)
+{
+/* Set the given parameter to a 32-bit value.
+ */
+
+ param->type = VBOX_TYPE_U64;
+ param->u64 = value;
+}
+
+void vbox_set_ptr(vbox_param_t *param, void *ptr, size_t size,
+ unsigned int dir)
+{
+/* Set the given parameter to a grant for the given local pointer.
+ */
+ cp_grant_id_t grant = GRANT_INVALID;
+ int flags;
+
+ flags = 0;
+ if (dir & VBOX_DIR_IN) flags |= CPF_WRITE;
+ if (dir & VBOX_DIR_OUT) flags |= CPF_READ;
+
+ if (size > 0) {
+ grant = cpf_grant_direct(vbox_endpt, (vir_bytes) ptr, size, flags);
+ if (!GRANT_VALID(grant)) {
+ param->type = VBOX_TYPE_INVALID;
+
+ return;
+ }
+ }
+
+ param->type = VBOX_TYPE_PTR;
+ param->ptr.grant = grant;
+ param->ptr.off = 0;
+ param->ptr.size = size;
+ param->ptr.dir = dir;
+}
+
+void vbox_set_grant(vbox_param_t *param, endpoint_t endpt, cp_grant_id_t grant,
+ size_t off, size_t size, unsigned int dir)
+{
+/* Set the given parameter to an indirect grant for the given grant.
+ */
+ cp_grant_id_t indir_grant;
+
+ /* Unfortunately, the current implementation of indirect grants does not
+ * support making smaller subgrants out of larger original grants. Therefore,
+ * we are forced to grant more access than we would like.
+ */
+ indir_grant = cpf_grant_indirect(vbox_endpt, endpt, grant);
+ if (!GRANT_VALID(indir_grant)) {
+ param->type = VBOX_TYPE_INVALID;
+
+ return;
+ }
+
+ param->type = VBOX_TYPE_PTR;
+ param->ptr.grant = indir_grant;
+ param->ptr.off = off;
+ param->ptr.size = size;
+ param->ptr.dir = dir;
+}
+
+void vbox_put(vbox_param_t *param, int count)
+{
+/* Free all resources used for the given parameters.
+ */
+
+ while (count--) {
+ if (param->type == VBOX_TYPE_PTR && GRANT_VALID(param->ptr.grant))
+ cpf_revoke(param->ptr.grant);
+
+ param++;
+ }
+}
+
+u32_t vbox_get_u32(vbox_param_t *param)
+{
+/* Retrieve the 32-bit value from the given parameter.
+ */
+
+ assert(param->type == VBOX_TYPE_U32);
+ return param->u32;
+}
+
+u64_t vbox_get_u64(vbox_param_t *param)
+{
+/* Retrieve the 64-bit value from the given parameter.
+ */
+
+ assert(param->type == VBOX_TYPE_U64);
+ return param->u64;
+}