/* SEF functions and variables. */
static void sef_local_startup(void);
static int sef_cb_init_fresh(int type, sef_init_info_t *info);
-static endpoint_t receive_from;
/*===========================================================================*
* main *
message mess;
struct rprocpub rprocpub[NR_BOOT_PROCS];
- receive_from = NONE;
self = NULL;
verbose = 0;
char *mount_type, *mount_label;
int r;
- /* Mount the pipe file server. */
- receive_from = PFS_PROC_NR;
+ /* Disallow requests from e.g. init(8) while doing the initial mounting. */
+ worker_allow(FALSE);
+ /* Mount the pipe file server. */
mount_pfs();
/* Mount the root file system. */
- receive_from = MFS_PROC_NR;
-
mount_type = "mfs"; /* FIXME: use boot image process name instead */
mount_label = "fs_imgrd"; /* FIXME: obtain this from RS */
mount_label);
if (r != OK)
panic("Failed to initialize root");
- receive_from = ANY;
+
+ /* All done with mounting, allow requests now. */
+ worker_allow(TRUE);
}
/*===========================================================================*
}
for(;;) {
- assert(receive_from != NONE);
-
/* Normal case. No one to revive. Get a useful request. */
- if ((r = sef_receive(receive_from, &m_in)) != OK) {
+ if ((r = sef_receive(ANY, &m_in)) != OK) {
panic("VFS: sef_receive error: %d", r);
}
static void *worker_main(void *arg);
static void worker_sleep(void);
static void worker_wake(struct worker_thread *worker);
+
static mthread_attr_t tattr;
+static unsigned int pending;
+static unsigned int busy;
+static int block_all;
#ifdef MKCOVERAGE
# define TH_STACKSIZE (40 * 1024)
if (mthread_attr_setdetachstate(&tattr, MTHREAD_CREATE_DETACHED) != 0)
panic("couldn't set default thread detach state");
pending = 0;
+ busy = 0;
+ block_all = FALSE;
for (i = 0; i < NR_WTHREADS; i++) {
wp = &workers[i];
yield_all();
}
+/*===========================================================================*
+ * worker_assign *
+ *===========================================================================*/
+static void worker_assign(struct fproc *rfp)
+{
+/* Assign the work for the given process to a free thread. The caller must
+ * ensure that there is in fact at least one free thread.
+ */
+ struct worker_thread *worker;
+ int i;
+
+ /* Find a free worker thread. */
+ for (i = 0; i < NR_WTHREADS; i++) {
+ worker = &workers[i];
+
+ if (worker->w_fp == NULL)
+ break;
+ }
+ assert(worker != NULL);
+
+ /* Assign work to it. */
+ rfp->fp_worker = worker;
+ worker->w_fp = rfp;
+ busy++;
+
+ worker_wake(worker);
+}
+
+/*===========================================================================*
+ * worker_may_do_pending *
+ *===========================================================================*/
+static int worker_may_do_pending(void)
+{
+/* Return whether there is a free thread that may do pending work. This is true
+ * only if there is pending work at all, and there is a free non-spare thread
+ * (the spare thread is never used for pending work), and VFS is currently
+ * processing new requests at all (this may not be true during initialization).
+ */
+
+ /* Ordered by likelihood to be false. */
+ return (pending > 0 && worker_available() > 1 && !block_all);
+}
+
+/*===========================================================================*
+ * worker_allow *
+ *===========================================================================*/
+void worker_allow(int allow)
+{
+/* Allow or disallow workers to process new work. If disallowed, any new work
+ * will be stored as pending, even when there are free worker threads. There is
+ * no facility to stop active workers. To be used only during initialization!
+ */
+ struct fproc *rfp;
+
+ block_all = !allow;
+
+ if (!worker_may_do_pending())
+ return;
+
+ /* Assign any pending work to workers. */
+ for (rfp = &fproc[0]; rfp < &fproc[NR_PROCS]; rfp++) {
+ if (rfp->fp_flags & FP_PENDING) {
+ rfp->fp_flags &= ~FP_PENDING; /* No longer pending */
+ assert(pending > 0);
+ pending--;
+ worker_assign(rfp);
+
+ if (!worker_may_do_pending())
+ return;
+ }
+ }
+}
+
/*===========================================================================*
* worker_get_work *
*===========================================================================*/
*/
struct fproc *rfp;
- /* Do we have queued work to do? */
- if (pending > 0) {
+ assert(self->w_fp == NULL);
+
+ /* Is there pending work, and should we do it? */
+ if (worker_may_do_pending()) {
/* Find pending work */
for (rfp = &fproc[0]; rfp < &fproc[NR_PROCS]; rfp++) {
if (rfp->fp_flags & FP_PENDING) {
self->w_fp = rfp;
rfp->fp_worker = self;
+ busy++;
rfp->fp_flags &= ~FP_PENDING; /* No longer pending */
+ assert(pending > 0);
pending--;
- assert(pending >= 0);
return;
}
}
*===========================================================================*/
int worker_available(void)
{
- int busy, i;
-
- busy = 0;
- for (i = 0; i < NR_WTHREADS; i++) {
- if (workers[i].w_fp != NULL)
- busy++;
- }
+/* Return the number of threads that are available, including the spare thread.
+ */
return(NR_WTHREADS - busy);
}
fp->fp_worker = NULL;
self->w_fp = NULL;
+ assert(busy > 0);
+ busy--;
}
return(NULL); /* Unreachable */
/* See if we can wake up a thread to do the work scheduled for the given
* process. If not, mark the process as having pending work for later.
*/
- int i, available, needed;
- struct worker_thread *worker;
+ int needed;
/* Use the last available thread only if requested. Otherwise, leave at least
* one spare thread for deadlock resolution.
*/
needed = use_spare ? 1 : 2;
- worker = NULL;
- for (i = available = 0; i < NR_WTHREADS; i++) {
- if (workers[i].w_fp == NULL) {
- if (worker == NULL)
- worker = &workers[i];
- if (++available >= needed)
- break;
- }
- }
-
- if (available >= needed) {
- assert(worker != NULL);
- rfp->fp_worker = worker;
- worker->w_fp = rfp;
- worker_wake(worker);
+ /* Also make sure that doing new work is allowed at all right now, which may
+ * not be the case during VFS initialization. We do always allow callback
+ * calls, i.e., calls that may use the spare thread. The reason is that we do
+ * not support callback calls being marked as pending, so the (entirely
+ * theoretical) exception here may (entirely theoretically) avoid deadlocks.
+ */
+ if (needed <= worker_available() && (!block_all || use_spare)) {
+ worker_assign(rfp);
} else {
rfp->fp_flags |= FP_PENDING;
pending++;