/* Prototype declarations for PRIVATE functions. */
FORWARD _PROTOTYPE( void announce, (void));
+void ser_dump_queues(void);
PUBLIC void bsp_finish_booting(void)
{
+ int i;
#if SPROFILE
sprofiling = 0; /* we're not profiling until instructed to */
#endif /* SPROFILE */
get_cpulocal_var(bill_ptr) = proc_addr(IDLE); /* it has to point somewhere */
announce(); /* print MINIX startup banner */
+ /*
+ * we have access to the cpu local run queue, only now schedule the processes.
+ * We ignore the slots for the former kernel tasks
+ */
+ for (i=0; i < NR_BOOT_PROCS - NR_TASKS; i++) {
+ RTS_UNSET(proc_addr(i), RTS_PROC_STOP);
+ }
/*
* enable timer interrupts and clock task on the boot CPU
*/
cycles_accounting_init();
DEBUGEXTRA(("done\n"));
- assert(runqueues_ok());
-
+#ifdef CONFIG_SMP
+ cpu_set_flag(bsp_cpu_id, CPU_IS_READY);
+#endif
+
switch_to_user();
NOT_REACHABLE;
}
* done this; until then, don't let it run.
*/
if(ip->flags & PROC_FULLVM)
- RTS_SET(rp, RTS_VMINHIBIT);
+ rp->p_rts_flags |= RTS_VMINHIBIT;
- /* None of the kernel tasks run */
- if (rp->p_nr < 0) RTS_SET(rp, RTS_PROC_STOP);
- RTS_UNSET(rp, RTS_SLOT_FREE); /* remove RTS_SLOT_FREE and schedule */
+ rp->p_rts_flags |= RTS_PROC_STOP;
+ rp->p_rts_flags &= ~RTS_SLOT_FREE;
alloc_segments(rp);
DEBUGEXTRA(("done\n"));
}
}
}
+PRIVATE void switch_address_space_idle(void)
+{
+#ifdef CONFIG_SMP
+ /*
+ * currently we bet that VM is always alive and its pages available so
+ * when the CPU wakes up the kernel is mapped and no surprises happen.
+ * This is only a problem if more than 1 cpus are available
+ */
+ switch_address_space(proc_addr(VM_PROC_NR));
+#endif
+}
+
/*===========================================================================*
* idle *
*===========================================================================*/
* the CPU utiliziation of certain workloads with high precision.
*/
+ switch_address_space_idle();
+
/* start accounting for the idle time */
context_stop(proc_addr(KERNEL));
halt_cpu();
* responsible for inserting a process into one of the scheduling queues.
* The mechanism is implemented here. The actual scheduling policy is
* defined in sched() and pick_proc().
+ *
+ * This function can be used x-cpu as it always uses the queues of the cpu the
+ * process is assigned to.
*/
int q = rp->p_priority; /* scheduling queue to use */
struct proc * p;
-
-#if DEBUG_RACE
- /* With DEBUG_RACE, schedule everyone at the same priority level. */
- rp->p_priority = q = MIN_USER_Q;
-#endif
-
+ struct proc **rdy_head, **rdy_tail;
+
assert(proc_is_runnable(rp));
assert(q >= 0);
+ rdy_head = get_cpu_var(rp->p_cpu, run_q_head);
+ rdy_tail = get_cpu_var(rp->p_cpu, run_q_tail);
+
/* Now add the process to the queue. */
if (!rdy_head[q]) { /* add to empty queue */
rdy_head[q] = rdy_tail[q] = rp; /* create a new queue */
RTS_SET(p, RTS_PREEMPTED); /* calls dequeue() */
#if DEBUG_SANITYCHECKS
- assert(runqueues_ok());
+ assert(runqueues_ok_local());
#endif
}
{
const int q = rp->p_priority; /* scheduling queue to use */
+ struct proc **rdy_head, **rdy_tail;
+
assert(proc_ptr_ok(rp));
assert(proc_is_runnable(rp));
assert(q >= 0);
+ rdy_head = get_cpu_var(rp->p_cpu, run_q_head);
+ rdy_tail = get_cpu_var(rp->p_cpu, run_q_tail);
+
/* Now add the process to the queue. */
if (!rdy_head[q]) { /* add to empty queue */
rdy_head[q] = rdy_tail[q] = rp; /* create a new queue */
rdy_head[q] = rp; /* set new queue head */
#if DEBUG_SANITYCHECKS
- assert(runqueues_ok());
+ assert(runqueues_ok_local());
#endif
}
/* A process must be removed from the scheduling queues, for example, because
* it has blocked. If the currently active process is removed, a new process
* is picked to run by calling pick_proc().
+ *
+ * This function can operate x-cpu as it always removes the process from the
+ * queue of the cpu the process is currently assigned to.
*/
register int q = rp->p_priority; /* queue to use */
register struct proc **xpp; /* iterate over queue */
register struct proc *prev_xp;
+ struct proc **rdy_tail;
+
assert(proc_ptr_ok(rp));
assert(!proc_is_runnable(rp));
/* Side-effect for kernel: check if the task's stack still is ok? */
assert (!iskernelp(rp) || *priv(rp)->s_stack_guard == STACK_GUARD);
+ rdy_tail = get_cpu_var(rp->p_cpu, run_q_tail);
+
/* Now make sure that the process is not in its ready queue. Remove the
* process if it is found. A process can be made unready even if it is not
* running by being sent a signal that kills it.
*/
prev_xp = NULL;
- for (xpp = &rdy_head[q]; *xpp; xpp = &(*xpp)->p_nextready) {
+ for (xpp = get_cpu_var_ptr(rp->p_cpu, run_q_head[q]); *xpp;
+ xpp = &(*xpp)->p_nextready) {
if (*xpp == rp) { /* found process to remove */
*xpp = (*xpp)->p_nextready; /* replace with next chain */
if (rp == rdy_tail[q]) { /* queue tail removed */
}
#if DEBUG_SANITYCHECKS
- assert(runqueues_ok());
+ assert(runqueues_ok_local());
#endif
}
-#if DEBUG_RACE
-/*===========================================================================*
- * random_process *
- *===========================================================================*/
-PRIVATE struct proc *random_process(struct proc *head)
-{
- int i, n = 0;
- struct proc *rp;
- u64_t r;
- read_tsc_64(&r);
-
- for(rp = head; rp; rp = rp->p_nextready)
- n++;
-
- /* Use low-order word of TSC as pseudorandom value. */
- i = r.lo % n;
-
- for(rp = head; i--; rp = rp->p_nextready)
- ;
-
- assert(rp);
-
- return rp;
-}
-#endif
-
/*===========================================================================*
* pick_proc *
*===========================================================================*/
/* Decide who to run now. A new process is selected an returned.
* When a billable process is selected, record it in 'bill_ptr', so that the
* clock task can tell who to bill for system time.
+ *
+ * This functions always uses the run queues of the local cpu!
*/
register struct proc *rp; /* process to run */
+ struct proc **rdy_head;
int q; /* iterate over queues */
/* Check each of the scheduling queues for ready processes. The number of
* queues is defined in proc.h, and priorities are set in the task table.
* The lowest queue contains IDLE, which is always ready.
*/
+ rdy_head = get_cpulocal_var(run_q_head);
for (q=0; q < NR_SCHED_QUEUES; q++) {
if(!(rp = rdy_head[q])) {
TRACE(VF_PICKPROC, printf("queue %d empty\n", q););
continue;
}
-
-#if DEBUG_RACE
- rp = random_process(rdy_head[q]);
-#endif
-
- TRACE(VF_PICKPROC, printf("found %s / %d on queue %d\n",
- rp->p_name, rp->p_endpoint, q););
assert(proc_is_runnable(rp));
if (priv(rp)->s_flags & BILLABLE)
get_cpulocal_var(bill_ptr) = rp; /* bill for system time */