assert(filp->filp_softlock == NULL);
filp->filp_softlock = fp;
} else {
+ /* We have to make an exception for vnodes belonging to pipes. Even
+ * read(2) operations on pipes change the vnode and therefore require
+ * exclusive access.
+ */
+ if (S_ISFIFO(vp->v_mode) && locktype == VNODE_READ)
+ locktype = VNODE_WRITE;
lock_vnode(vp, locktype);
}
}
assert(vmp != NULL);
- tll_upgrade(&vmp->m_lock);
+ upgrade_vmnt_lock(vmp);
if (job_call_nr == UNLINK)
r = req_unlink(dirp->v_fs_e, dirp->v_inode_nr, fullpath);
(r1 = forbidden(fp, new_dirp, W_BIT|X_BIT)) != OK) r = r1;
if (r == OK) {
- tll_upgrade(&oldvmp->m_lock); /* Upgrade to exclusive access */
+ upgrade_vmnt_lock(oldvmp); /* Upgrade to exclusive access */
r = req_rename(old_dirp->v_fs_e, old_dirp->v_inode_nr, old_name,
new_dirp->v_inode_nr, fullpath);
}
int r = OK;
for (vmp = &vmnt[0]; vmp < &vmnt[NR_MNTS]; ++vmp) {
- if ((r = lock_vmnt(vmp, VMNT_EXCL)) != OK)
+ if ((r = lock_vmnt(vmp, VMNT_READ)) != OK)
break;
if (vmp->m_dev != NO_DEV && vmp->m_fs_e != NONE &&
vmp->m_root_node != NULL) {
if ((rfilp = get_filp(scratch(fp).file.fd_nr, VNODE_READ)) == NULL)
return(err_code);
+
dev = rfilp->filp_vno->v_dev;
+ unlock_filp(rfilp);
+
for (vmp = &vmnt[0]; vmp < &vmnt[NR_MNTS]; ++vmp) {
+ if (vmp->m_dev != dev) continue;
+ if ((r = lock_vmnt(vmp, VMNT_READ)) != OK)
+ break;
if (vmp->m_dev != NO_DEV && vmp->m_dev == dev &&
vmp->m_fs_e != NONE && vmp->m_root_node != NULL) {
- if ((r = lock_vmnt(vmp, VMNT_EXCL)) != OK)
- break;
req_sync(vmp->m_fs_e);
- unlock_vmnt(vmp);
}
+ unlock_vmnt(vmp);
}
- unlock_filp(rfilp);
-
return(r);
}
if (oflags & O_TRUNC) {
if ((r = forbidden(fp, vp, W_BIT)) != OK)
break;
+ upgrade_vnode_lock(vp);
truncate_vnode(vp, 0);
}
break;
case S_IFIFO:
/* Create a mapped inode on PFS which handles reads
and writes to this named pipe. */
- tll_upgrade(&vp->v_lock);
+ upgrade_vnode_lock(vp);
r = map_vnode(vp, PFS_PROC_NR);
if (r == OK) {
if (vp->v_ref_count == 1) {
}
lock_vnode(vp, VNODE_OPCL);
+ upgrade_vmnt_lock(dir_vmp); /* Creating file, need exclusive access */
if ((r = forbidden(fp, dirp, W_BIT|X_BIT)) != OK ||
(r = req_create(dirp->v_fs_e, dirp->v_inode_nr,bits, fp->fp_effuid,
/* Can't create inode either due to permissions or some other
* problem. In case r is EEXIST, we might be dealing with a
* dangling symlink.*/
+
+ /* Downgrade lock to prevent deadlock during symlink resolving*/
+ downgrade_vmnt_lock(dir_vmp);
+
if (r == EEXIST) {
struct vnode *slp, *old_wd;
+
/* Resolve path up to symlink */
findnode.l_flags = PATH_RET_SYMLINK;
findnode.l_vnode_lock = VNODE_READ;
struct vnode *dir_vp;
struct vmnt *vmp, *vmpres;
struct lookup_res res;
+ tll_access_t mnt_lock_type;
assert(resolve->l_vmp);
assert(resolve->l_vnode);
symloop = 0; /* Number of symlinks seen so far */
/* Lock vmnt */
- if ((r = lock_vmnt(vmpres, resolve->l_vmnt_lock)) != OK) {
+ if (resolve->l_vmnt_lock == VMNT_READ)
+ mnt_lock_type = VMNT_WRITE;
+ else
+ mnt_lock_type = resolve->l_vmnt_lock;
+
+ if ((r = lock_vmnt(vmpres, mnt_lock_type)) != OK) {
if (r == EBUSY) /* vmnt already locked */
vmpres = NULL;
else
if (vmpres) unlock_vmnt(vmpres);
vmpres = find_vmnt(fs_e);
if (vmpres == NULL) return(EIO); /* mount point vanished? */
- if ((r = lock_vmnt(vmpres, resolve->l_vmnt_lock)) != OK) {
+ if ((r = lock_vmnt(vmpres, mnt_lock_type)) != OK) {
if (r == EBUSY)
vmpres = NULL; /* Already locked */
else
}
}
+ if (*(resolve->l_vmp) != NULL && resolve->l_vmnt_lock != mnt_lock_type) {
+ /* downgrade VMNT_WRITE to VMNT_READ */
+ downgrade_vmnt_lock(*(resolve->l_vmp));
+ }
+
/* Fill in response fields */
result_node->inode_nr = res.inode_nr;
result_node->fmode = res.fmode;
/* Get a lock on PFS */
if ((vmp = find_vmnt(PFS_PROC_NR)) == NULL) panic("PFS gone");
- if ((r = lock_vmnt(vmp, VMNT_WRITE)) != OK) return(r);
+ if ((r = lock_vmnt(vmp, VMNT_READ)) != OK) return(r);
/* See if a free vnode is available */
if ((vp = get_free_vnode()) == NULL) {
void unlock_vmnt(struct vmnt *vp);
void vmnt_unmap_by_endpt(endpoint_t proc_e);
void fetch_vmnt_paths(void);
+void upgrade_vmnt_lock(struct vmnt *vmp);
+void downgrade_vmnt_lock(struct vmnt *vmp);
/* vnode.c */
void check_vnode_locks(void);
void dup_vnode(struct vnode *vp);
void put_vnode(struct vnode *vp);
void vnode_clean_refs(struct vnode *vp);
+void upgrade_vnode_lock(struct vnode *vp);
/* write.c */
int do_write(void);
u64_t position, new_pos;
/* Must make sure we're operating on locked filp and vnode */
- assert(tll_islocked(&f->filp_vno->v_lock));
+ assert(tll_locked_by_me(&f->filp_vno->v_lock));
assert(mutex_trylock(&f->filp_lock) == -EDEADLK);
oflags = f->filp_flags;
if (len == 0) len = (size_t) job_m_in.utime_strlen;
lookup_init(&resolve, fullpath, PATH_NOFLAGS, &vmp, &vp);
- resolve.l_vmnt_lock = VMNT_WRITE;
+ resolve.l_vmnt_lock = VMNT_READ;
resolve.l_vnode_lock = VNODE_READ;
/* Temporarily open the file */
* request queued ("write bias") or when a read-serialized lock is trying to
* upgrade to write-only. The current lock for this tll is either read or
* read-serialized. */
- if (tllp->t_write != NULL || (tllp->t_status & TLL_UPGR))
+ if (tllp->t_write != NULL || (tllp->t_status & TLL_UPGR)) {
+ assert(!(tllp->t_status & TLL_PEND));
return tll_append(tllp, locktype);
+ }
/* If this lock is in read-serialized mode, we can allow read requests and
* queue read-serialized requests */
if (tllp->t_current == TLL_READSER) {
- if (locktype == TLL_READ) {
+ if (locktype == TLL_READ && !(tllp->t_status & TLL_UPGR)) {
tllp->t_readonly++;
return(OK);
} else
if (r == EBUSY) return(r);
if (initial_locktype != locktype) {
- tll_upgrade(&vmp->m_lock);
+ upgrade_vmnt_lock(vmp);
}
#if LOCK_DEBUG
}
+/*===========================================================================*
+ * downgrade_vmnt_lock *
+ *===========================================================================*/
+void downgrade_vmnt_lock(struct vmnt *vmp)
+{
+ ASSERTVMP(vmp);
+ tll_downgrade(&vmp->m_lock);
+
+#if LOCK_DEBUG
+ /* If we're no longer the owner of a lock, we downgraded to VMNT_READ */
+ if (!tll_locked_by_me(&vmp->m_lock)) {
+ fp->fp_vmnt_rdlocks++;
+ }
+#endif
+}
+
+/*===========================================================================*
+ * upgrade_vmnt_lock *
+ *===========================================================================*/
+void upgrade_vmnt_lock(struct vmnt *vmp)
+{
+ ASSERTVMP(vmp);
+ tll_upgrade(&vmp->m_lock);
+}
+
/*===========================================================================*
* fetch_vmnt_paths *
*===========================================================================*/
tll_unlock(&vp->v_lock);
}
+/*===========================================================================*
+ * vnode *
+ *===========================================================================*/
+void upgrade_vnode_lock(struct vnode *vp)
+{
+ ASSERTVP(vp);
+ tll_upgrade(&vp->v_lock);
+}
+
/*===========================================================================*
* dup_vnode *
*===========================================================================*/
/* If we already had a lock, there is a consistency problem */
assert(lock_vp != EBUSY);
- tll_upgrade(&vp->v_lock); /* Make sure nobody else accesses this vnode */
+ upgrade_vnode_lock(vp); /* Acquire exclusive access */
/* A vnode that's not in use can't be put back. */
if (vp->v_ref_count <= 0)