pid_t getnpid(endpoint_t proc_ep);
uid_t getnuid(endpoint_t proc_ep);
gid_t getngid(endpoint_t proc_ep);
+int dupfrom(endpoint_t endpt, int fd);
ssize_t pread64(int fd, void *buf, size_t count, u64_t where);
ssize_t pwrite64(int fd, const void *buf, size_t count, u64_t where);
#define FSTAT 52
#define LSTAT 53
#define IOCTL 54
+#define DUPFROM 56
#define FS_READY 57
#define PIPE2 58
#define EXEC 59
#define VFS_PFS_FD m2_i3
#define VFS_PFS_FILP m2_p1
+/* Field names for the dupfrom(2) call. */
+#define VFS_DUPFROM_ENDPT m1_i1
+#define VFS_DUPFROM_FD m1_i2
+
/* Field names for GETRUSAGE related calls */
#define RU_ENDPT m1_i1 /* indicates a process for sys_getrusage */
#define RU_WHO m1_i1 /* who argument in getrusage call */
# queryparam
SRCS+= paramvalue.c
-# Minix servers/drivers syscall.
-SRCS+= getngid.c getnpid.c getnprocnr.c getnucred.c getnuid.c getprocnr.c \
- mapdriver.c vm_memctl.c vm_set_priv.c vm_query_exit.c vm_update.c
+# Minix servers/drivers syscall. FIXME: these should be moved into libsys.
+SRCS+= dupfrom.c getngid.c getnpid.c getnprocnr.c getnucred.c getnuid.c \
+ getprocnr.c mapdriver.c vm_memctl.c vm_set_priv.c vm_query_exit.c \
+ vm_update.c
SRCS+= oneC_sum.c
--- /dev/null
+#include <lib.h>
+#include <string.h>
+
+int
+dupfrom(endpoint_t endpt, int fd)
+{
+ message m;
+
+ memset(&m, 0, sizeof(m));
+ m.VFS_DUPFROM_ENDPT = endpt;
+ m.VFS_DUPFROM_FD = fd;
+
+ return _syscall(VFS_PROC_NR, DUPFROM, &m);
+}
no_sys, /* 53 = (lstat) */
no_sys, /* 54 = ioctl */
no_sys, /* 55 = fcntl */
- no_sys, /* 56 = unused */
+ no_sys, /* 56 = dupfrom */
no_sys, /* 57 = unused */
no_sys, /* 58 = unused */
do_exec, /* 59 = execve */
* do_put_filp: marks a filp as not in-flight anymore.
* do_cancel_fd: cancel the transaction when something goes wrong for
* the receiver.
+ * do_dupfrom: copies a filp from another endpoint.
*/
#include <sys/select.h>
err_code = EIO; /* disallow all use except close(2) */
else if ((filp = rfp->fp_filp[fild]) == NULL)
err_code = EBADF;
- else
+ else if (locktype != VNODE_NONE) /* Only lock the filp if requested */
lock_filp(filp, locktype); /* All is fine */
return(filp); /* may also be NULL */
mutex_unlock(&f->filp_lock);
}
+
+/*===========================================================================*
+ * do_dupfrom *
+ *===========================================================================*/
+int do_dupfrom(message *UNUSED(m_out))
+{
+/* Duplicate a file descriptor from another process into the calling process.
+ * The other process is identified by a magic grant created for it to make the
+ * initial (IOCTL) request to the calling process. This call has been added
+ * specifically for the VND driver.
+ */
+ struct fproc *rfp;
+ struct filp *rfilp;
+ struct vnode *vp;
+ endpoint_t endpt;
+ int r, fd, slot;
+
+ /* This should be replaced with an ACL check. */
+ if (!super_user) return(EPERM);
+
+ endpt = (endpoint_t) job_m_in.VFS_DUPFROM_ENDPT;
+ fd = job_m_in.VFS_DUPFROM_FD;
+
+ if (isokendpt(endpt, &slot) != OK) return(EINVAL);
+ rfp = &fproc[slot];
+
+ /* Obtain the filp, but do not lock it yet: we first need to make sure that
+ * locking it will not result in a deadlock.
+ */
+ if ((rfilp = get_filp2(rfp, fd, VNODE_NONE)) == NULL)
+ return(err_code);
+
+ /* For now, we do not allow remote duplication of device nodes. In practice,
+ * only a block-special file can cause a deadlock for the caller (currently
+ * only the VND driver). This would happen if a user process passes in the
+ * file descriptor to the device node on which it is performing the IOCTL.
+ * This would cause two VFS threads to deadlock on the same filp. Since the
+ * VND driver does not allow device nodes to be used anyway, this somewhat
+ * rudimentary check eliminates such deadlocks. A better solution would be
+ * to check if the given endpoint holds a lock to the target filp, but we
+ * currently do not have this information within VFS.
+ */
+ vp = rfilp->filp_vno;
+ if (S_ISCHR(vp->v_mode) || S_ISBLK(vp->v_mode))
+ return(EINVAL);
+
+ /* Now we can safely lock the filp, copy it, and unlock it again. */
+ lock_filp(rfilp, VNODE_READ);
+
+ r = copy_filp(who_e, (filp_id_t) rfilp);
+
+ unlock_filp(rfilp);
+
+ return(r);
+}
int cancel_fd(endpoint_t ep, int fd);
int do_cancel_fd(message *m_out);
void close_filp(struct filp *fp);
+int do_dupfrom(message *m_out);
/* fscall.c */
void nested_fs_call(message *m);
do_lstat, /* 53 = lstat */
do_ioctl, /* 54 = ioctl */
do_fcntl, /* 55 = fcntl */
- no_sys, /* 56 = (mpx) */
+ do_dupfrom, /* 56 = dupfrom */
do_fsready, /* 57 = FS proc ready */
do_pipe2, /* 58 = pipe2 */
no_sys, /* 59 = (execve)*/
} vnode[NR_VNODES];
/* vnode lock types mapping */
+#define VNODE_NONE TLL_NONE /* used only for get_filp2 to avoid locking */
#define VNODE_READ TLL_READ
#define VNODE_OPCL TLL_READSER
#define VNODE_WRITE TLL_WRITE