if ((vmp = find_vmnt(fs_e)) == NULL)
panic("Trying to talk to non-existent FS");
+ if (fs_e == fp->fp_endpoint) return(EDEADLK);
if (!force_sync) {
fp->fp_sendrec = reqmp; /* Where to store request and reply */
if ((vmp = find_vmnt(who_e)) != NULL) {
/* A back call or dev result from an FS endpoint */
+
+ /* When an FS point has to make a callback in order to mount, force
+ * its device to a "none device" so block reads/writes will be handled
+ * by ROOT_FS_E.
+ */
+ if (vmp->m_flags & VMNT_MOUNTING)
+ vmp->m_flags |= VMNT_FORCEROOTBSF;
+
if (worker_available() == 0) {
/* No worker threads available to handle call */
if (deadlock_resolving) {
/* Already trying to resolve a deadlock, can't
* handle more, sorry */
-
reply(who_e, EAGAIN);
return;
}
deadlock_resolving = 1;
- vmp->m_flags |= VMNT_BACKCALL;
dl_worker_start(func);
return;
}
select_reply2(m_in.m_source, m_in.DEV_MINOR, m_in.DEV_SEL_OPS);
if (deadlock_resolving) {
- struct vmnt *vmp;
- if ((vmp = find_vmnt(who_e)) != NULL)
- vmp->m_flags &= ~VMNT_BACKCALL;
-
if (fp != NULL && fp->fp_wtid == dl_worker.w_tid)
deadlock_resolving = 0;
}
int error, i;
struct job my_job;
struct fproc *rfp;
- struct vmnt *vmp;
my_job = *((struct job *) arg);
fp = my_job.j_fp;
/* Copy the results back to the user and send reply. */
if (error != SUSPEND) {
if (deadlock_resolving) {
- if ((vmp = find_vmnt(who_e)) != NULL)
- vmp->m_flags &= ~VMNT_BACKCALL;
-
if (fp->fp_wtid == dl_worker.w_tid)
deadlock_resolving = 0;
}
/* XXX: move this upwards before lookup after proper locking. */
/* We'll need a vnode for the root inode */
if ((root_node = get_free_vnode()) == NULL || dev == 266) {
- if (vp != NULL) {
- unlock_vnode(vp);
- put_vnode(vp);
- }
+ unlock_vnode(vp);
unlock_vmnt(new_vmp);
+ put_vnode(vp);
return(err_code);
}
else new_vmp->m_flags &= ~VMNT_READONLY;
/* Tell FS which device to mount */
+ new_vmp->m_flags |= VMNT_MOUNTING;
r = req_readsuper(fs_e, label, dev, rdonly, isroot, &res, &con_reqs);
+ new_vmp->m_flags &= ~VMNT_MOUNTING;
+
if (r != OK) {
- if (vp != NULL) {
- unlock_vnode(vp);
- put_vnode(vp);
- }
new_vmp->m_fs_e = NONE;
new_vmp->m_dev = NO_DEV;
unlock_vnode(root_node);
+ unlock_vnode(vp);
unlock_vmnt(new_vmp);
+ put_vnode(vp);
return(r);
}
+ lock_bsf();
+
/* Fill in root node's fields */
root_node->v_fs_e = res.fs_e;
root_node->v_inode_nr = res.inode_nr;
new_vmp->m_comm.c_max_reqs = con_reqs;
new_vmp->m_comm.c_cur_reqs = 0;
- lock_bsf();
-
if (mount_root) {
/* Superblock and root node already read.
* Nothing else can go wrong. Perform the mount. */
put_vnode(vp);
put_vnode(root_node);
new_vmp->m_dev = NO_DEV;
+ new_vmp->m_flags = 0;
unlock_bsf();
return(r);
}
if (is_nonedev(dev)) alloc_nonedev(dev);
/* The new FS will handle block I/O requests for its device now. */
- update_bspec(dev, fs_e, 0 /* Don't send new driver endpoint */);
+ if (!(new_vmp->m_flags & VMNT_FORCEROOTBSF))
+ update_bspec(dev, fs_e, 0 /* Don't send new driver endpoint */);
unlock_vnode(vp);
unlock_vnode(root_node);
unlock_vmnt(new_vmp);
unlock_bsf();
- return(r);
+ return(OK);
}
vp->v_bfs_e = ROOT_FS_E; /* By default */
for (vmp = &vmnt[0]; vmp < &vmnt[NR_MNTS]; ++vmp)
if (vmp->m_dev == vp->v_sdev &&
- !is_nonedev(vmp->m_dev))
+ !(vmp->m_flags & VMNT_FORCEROOTBSF)) {
vp->v_bfs_e = vmp->m_fs_e;
+ }
/* Get the driver endpoint of the block spec device */
major_dev = major(vp->v_sdev);
/* vmnt flags */
#define VMNT_READONLY 01 /* Device mounted readonly */
#define VMNT_BACKCALL 02 /* FS did back call */
+#define VMNT_MOUNTING 04 /* Device is being mounted */
+#define VMNT_FORCEROOTBSF 010 /* Force usage of none-device */
/* vmnt lock types mapping */
#define VMNT_READ TLL_READ
PUBLIC void worker_stop(struct worker_thread *worker)
{
ASSERTW(worker); /* Make sure we have a valid thread */
- worker->w_job.j_m_in.m_type = EIO;
+ if (worker->w_job.j_fp)
+ worker->w_job.j_fp->fp_sendrec->m_type = EIO;
+ else
+ worker->w_job.j_m_in.m_type = EIO;
worker_wake(worker);
}