Round-robin within one priority queue works fine.
Ageing algorithm to be done.
* is called on those clock ticks when a lot of work needs to be done.
*/
+ /* A process used up a full quantum. The interrupt handler stored this
+ * process in 'prev_ptr'. First make sure that the process is not on the
+ * scheduling queues. Then announce the process ready again. Since it has
+ * no more time left, it will get a new quantum and inserted at the right
+ * place in the queues. As a side-effect a new process will be scheduled.
+ */
+ if (prev_ptr->p_sched_ticks <= 0 && priv(prev_ptr)->s_flags & PREEMPTIBLE) {
+ lock_dequeue(prev_ptr); /* take it off the queues */
+ lock_enqueue(prev_ptr); /* and reinsert it again */
+ }
+
/* Check if a clock timer expired and run its watchdog function. */
if (next_timeout <= realtime) {
tmrs_exptimers(&clock_timers, realtime, NULL);
TMR_NEVER : clock_timers->tmr_exp_time;
}
- /* A process used up a full quantum. The interrupt handler stored this
- * process in 'prev_ptr'. Reset the quantum and schedule another process.
- */
- if (prev_ptr->p_sched_ticks <= 0) {
- lock_sched(prev_ptr);
- }
-
/* Inhibit sending a reply. */
return(EDONTREPLY);
}
* Thus the unbillable process' user time is the billable user's system time.
*/
proc_ptr->p_user_time += ticks;
- if (proc_ptr != bill_ptr) bill_ptr->p_sys_time += ticks;
- if (priv(proc_ptr)->s_flags & PREEMPTIBLE) proc_ptr->p_sched_ticks -= ticks;
+ if (priv(proc_ptr)->s_flags & PREEMPTIBLE) {
+ proc_ptr->p_sched_ticks -= ticks;
+ }
+ if (! (priv(proc_ptr)->s_flags & BILLABLE)) {
+ bill_ptr->p_sys_time += ticks;
+ bill_ptr->p_sched_ticks -= ticks;
+ }
/* Check if do_clocktick() must be called. Done for alarms and scheduling.
* Some processes, such as the kernel tasks, cannot be preempted.
}
/* Set ready. The HARDWARE task is never ready. */
- if (rp->p_nr != HARDWARE) lock_ready(rp);
- rp->p_rts_flags = 0;
+ if (rp->p_nr != HARDWARE) {
+ rp->p_rts_flags = 0; /* runnable if no flags */
+ lock_enqueue(rp); /* add to scheduling queues */
+ } else {
+ rp->p_rts_flags = NO_MAP; /* prevent from running */
+ }
/* Code and data segments must be allocated in protected mode. */
alloc_segments(rp);
/* Bits for the system property flags. */
#define PREEMPTIBLE 0x01 /* kernel tasks are not preemptible */
-#define RDY_Q_HEAD 0x02 /* add to queue head instead of tail */
#define BILLABLE 0x04 /* some processes are not billable */
#define SYS_PROC 0x10 /* system processes are privileged */
#define SENDREC_BUSY 0x20 /* sendrec() in progress */
*
* lock_notify: notify a process of a system event
* lock_send: send a message to a process
- * lock_ready: put a process on one of the ready queues
- * lock_unready: remove a process from the ready queues
- * lock_sched: a process has run too long; schedule another one
+ * lock_enqueue: put a process on one of the scheduling queues
+ * lock_dequeue: remove a process from the scheduling queues
*
* Changes:
+ * Aug 19, 2005 rewrote multilevel scheduling code (Jorrit N. Herder)
* Jul 25, 2005 protection and checks in sys_call() (Jorrit N. Herder)
- * May 26, 2005 rewrite of message passing functions (Jorrit N. Herder)
- * May 24, 2005 new, queued NOTIFY system call (Jorrit N. Herder)
- * Oct 28, 2004 new, non-blocking SEND and RECEIVE (Jorrit N. Herder)
- * Oct 28, 2004 rewrite of sys_call() function (Jorrit N. Herder)
- * Aug 19, 2004 rewrite of multilevel scheduling (Jorrit N. Herder)
+ * May 26, 2005 rewrote message passing functions (Jorrit N. Herder)
+ * May 24, 2005 new notification system call (Jorrit N. Herder)
+ * Oct 28, 2004 nonblocking send and receive calls (Jorrit N. Herder)
*
* The code here is critical to make everything work and is important for the
* overall performance of the system. A large fraction of the code deals with
message *m_ptr, unsigned flags) );
FORWARD _PROTOTYPE( int mini_notify, (struct proc *caller_ptr, int dst) );
-FORWARD _PROTOTYPE( void ready, (struct proc *rp) );
-FORWARD _PROTOTYPE( void unready, (struct proc *rp) );
-FORWARD _PROTOTYPE( void sched, (struct proc *rp) );
+FORWARD _PROTOTYPE( void enqueue, (struct proc *rp) );
+FORWARD _PROTOTYPE( void dequeue, (struct proc *rp) );
+FORWARD _PROTOTYPE( void sched, (struct proc *rp, int *queue, int *front) );
FORWARD _PROTOTYPE( void pick_proc, (void) );
-
+FORWARD _PROTOTYPE( void balance_queues, (struct proc *rp) );
#define BuildMess(m_ptr, src, dst_ptr) \
(m_ptr)->m_source = (src); \
#endif /* (CHIP == M68000) */
-
/*===========================================================================*
* sys_call *
*===========================================================================*/
/* Destination is indeed waiting for this message. */
CopyMess(caller_ptr->p_nr, caller_ptr, m_ptr, dst_ptr,
dst_ptr->p_messbuf);
- if ((dst_ptr->p_rts_flags &= ~RECEIVING) == 0) ready(dst_ptr);
+ if ((dst_ptr->p_rts_flags &= ~RECEIVING) == 0) enqueue(dst_ptr);
} else if ( ! (flags & NON_BLOCKING)) {
/* Destination is not waiting. Block and queue caller. */
caller_ptr->p_messbuf = m_ptr;
- if (caller_ptr->p_rts_flags == 0) unready(caller_ptr);
+ if (caller_ptr->p_rts_flags == 0) dequeue(caller_ptr);
caller_ptr->p_rts_flags |= SENDING;
caller_ptr->p_sendto = dst;
/* Process is now blocked. Put in on the destination's queue. */
xpp = &dst_ptr->p_caller_q; /* find end of list */
- while (*xpp != NIL_PROC) xpp = &(*xpp)->p_q_link;
+ while (*xpp != NIL_PROC) xpp = &(*xpp)->p_q_link;
*xpp = caller_ptr; /* add caller to end */
caller_ptr->p_q_link = NIL_PROC; /* mark new end of list */
} else {
if (src == ANY || src == proc_nr(*xpp)) {
/* Found acceptable message. Copy it and update status. */
CopyMess((*xpp)->p_nr, *xpp, (*xpp)->p_messbuf, caller_ptr, m_ptr);
- if (((*xpp)->p_rts_flags &= ~SENDING) == 0) ready(*xpp);
+ if (((*xpp)->p_rts_flags &= ~SENDING) == 0) enqueue(*xpp);
*xpp = (*xpp)->p_q_link; /* remove from queue */
return(OK); /* report success */
}
if ( ! (flags & NON_BLOCKING)) {
caller_ptr->p_getfrom = src;
caller_ptr->p_messbuf = m_ptr;
- if (caller_ptr->p_rts_flags == 0) unready(caller_ptr);
+ if (caller_ptr->p_rts_flags == 0) dequeue(caller_ptr);
caller_ptr->p_rts_flags |= RECEIVING;
return(OK);
} else {
CopyMess(proc_nr(caller_ptr), proc_addr(HARDWARE), &m,
dst_ptr, dst_ptr->p_messbuf);
dst_ptr->p_rts_flags &= ~RECEIVING; /* deblock destination */
- if (dst_ptr->p_rts_flags == 0) ready(dst_ptr);
+ if (dst_ptr->p_rts_flags == 0) enqueue(dst_ptr);
return(OK);
}
/*===========================================================================*
- * ready *
+ * enqueue *
*===========================================================================*/
-PRIVATE void ready(rp)
+PRIVATE void enqueue(rp)
register struct proc *rp; /* this process is now runnable */
{
-/* Add 'rp' to one of the queues of runnable processes. */
- register int q = rp->p_priority; /* scheduling queue to use */
+/* Add 'rp' to one of the queues of runnable processes. We need to decide
+ * where to put the process based on its quantum. If there is time left, it
+ * is added to the front of its queue, so that it can immediately run.
+ * Otherwise its is given a new quantum and added to the rear of the queue.
+ */
+ register int q; /* scheduling queue to use */
+ int time_left; /* quantum fully used? */
+
+ /* Check if the process has time left and determine what queue to use. A
+ * process that consumed a full quantum is given a lower priority, so that
+ * the CPU-bound processes cannot starve I/O-bound processes. When the
+ * threshold is reached, the scheduling queues are balanced to prevent all
+ * processes from ending up in the lowest queue.
+ */
+ time_left = (rp->p_sched_ticks > 0); /* check ticks left */
+ if ( ! time_left) { /* quantum consumed ? */
+ rp->p_sched_ticks = rp->p_quantum_size; /* give new quantum */
+#if DEAD_CODE
+ if (proc_nr(rp) != IDLE) { /* already lowest priority */
+ rp->p_priority ++; /* lower the priority */
+ if (rp->p_priority >= IDLE_Q) /* threshold exceeded */
+ balance_queues(rp); /* rebalance queues */
+ }
+#endif
+ }
+ q = rp->p_priority; /* scheduling queue to use */
#if DEBUG_SCHED_CHECK
- check_runqueues("ready");
- if(rp->p_ready) kprintf("ready() already ready process\n");
+ check_runqueues("enqueue");
+ if(rp->p_ready) kprintf("enqueue() already ready process\n");
#endif
- /* Processes, in principle, are added to the end of the queue. However,
- * user processes are added in front of the queue, because this is a bit
- * fairer to I/O bound processes.
- */
+ /* Now add the process to the queue. */
if (rdy_head[q] == NIL_PROC) { /* add to empty queue */
rdy_head[q] = rdy_tail[q] = rp; /* create a new queue */
rp->p_nextready = NIL_PROC; /* mark new end */
}
- else if (priv(rp)->s_flags & RDY_Q_HEAD) { /* add to head of queue */
+ else if (time_left) { /* add to head of queue */
rp->p_nextready = rdy_head[q]; /* chain head of queue */
rdy_head[q] = rp; /* set new queue head */
}
#if DEBUG_SCHED_CHECK
rp->p_ready = 1;
- check_runqueues("ready");
+ check_runqueues("enqueue");
#endif
}
/*===========================================================================*
- * unready *
+ * dequeue *
*===========================================================================*/
-PRIVATE void unready(rp)
+PRIVATE void dequeue(rp)
register struct proc *rp; /* this process is no longer runnable */
{
/* A process has blocked. See ready for a description of the queues. */
}
#if DEBUG_SCHED_CHECK
- check_runqueues("unready");
- if (! rp->p_ready) kprintf("unready() already unready process\n");
+ check_runqueues("dequeue");
+ if (! rp->p_ready) kprintf("dequeue() already unready process\n");
#endif
/* Now make sure that the process is not in its ready queue. Remove the
prev_xp = *xpp; /* save previous in chain */
}
+#if DEAD_CODE
/* The caller blocked. Reset the scheduling priority and quantums allowed.
* The process' priority may have been lowered if a process consumed too
* many full quantums in a row to prevent damage from infinite loops
*/
rp->p_priority = rp->p_max_priority;
rp->p_full_quantums = QUANTUMS(rp->p_priority);
+#endif
#if DEBUG_SCHED_CHECK
rp->p_ready = 0;
- check_runqueues("unready");
+ check_runqueues("dequeue");
#endif
}
+
/*===========================================================================*
* sched *
*===========================================================================*/
-PRIVATE void sched(sched_ptr)
-struct proc *sched_ptr; /* quantum eating process */
+PRIVATE void sched(sched_ptr, queue, front)
+struct proc *sched_ptr; /* process to be scheduled */
+int *queue; /* return: queue to use */
+int *front; /* return: front or back */
{
- int q;
- /* Check if this process is preemptible, otherwise leave it as is. */
- if (! (priv(sched_ptr)->s_flags & PREEMPTIBLE)) return;
+}
- /* Process exceeded the maximum number of full quantums it is allowed
- * to use in a row. Lower the process' priority, but make sure we don't
- * end up in the IDLE queue. This helps to limit the damage caused by
- * for example infinite loops in high-priority processes.
- * This is a rare situation, so the overhead is acceptable.
- */
- if (-- sched_ptr->p_full_quantums <= 0) { /* exceeded threshold */
- if (sched_ptr->p_priority + 1 < IDLE_Q ) {
- q = sched_ptr->p_priority + 1; /* backup new priority */
- unready(sched_ptr); /* remove from queues */
- sched_ptr->p_priority = q; /* lower priority */
- ready(sched_ptr); /* add to new queue */
- }
- sched_ptr->p_full_quantums = QUANTUMS(sched_ptr->p_priority);
+/*===========================================================================*
+ * balance_queues *
+ *===========================================================================*/
+PRIVATE void balance_queues(pp)
+struct proc *pp; /* process that caused this */
+{
+/* To balance the scheduling queues, they will be rebuild whenever a process
+ * is put in the lowest queues where IDLE resides. All processes get their
+ * priority raised up to their maximum priority.
+ */
+ register struct proc *rp;
+ register int q;
+ int penalty = pp->p_priority - pp->p_max_priority;
+
+ /* First clean up the old scheduling queues. */
+ for (q=0; q<NR_SCHED_QUEUES; q++) {
+ rdy_head[q] = rdy_tail[q] = NIL_PROC;
}
- /* The current process has run too long. If another low priority (user)
- * process is runnable, put the current process on the tail of its queue,
- * possibly promoting another user to head of the queue. Don't do anything
- * if the queue is empty, or the process to be scheduled is not the head.
+ /* Then rebuild the queues, while balancing priorities. Each process that is
+ * in use may get a higher priority and gets a new quantum. Processes that
+ * are runnable are added to the scheduling queues, unless it concerns the
+ * process that caused this function to be called (it will be added after
+ * returning from this function).
*/
- q = sched_ptr->p_priority; /* convenient shorthand */
- if (rdy_head[q] == sched_ptr) {
- rdy_tail[q]->p_nextready = rdy_head[q]; /* add expired to end */
- rdy_tail[q] = rdy_head[q]; /* set new queue tail */
- rdy_head[q] = rdy_head[q]->p_nextready; /* set new queue head */
- rdy_tail[q]->p_nextready = NIL_PROC; /* mark new queue end */
+ for (rp=BEG_PROC_ADDR; rp<END_PROC_ADDR; rp++) {
+ if (! (rp->p_rts_flags & SLOT_FREE)) { /* update in-use slots */
+ rp->p_priority = MAX(rp->p_priority - penalty, rp->p_max_priority);
+ rp->p_sched_ticks = rp->p_quantum_size;
+ if (rp->p_rts_flags == 0) { /* process is runnable */
+ if (rp != pp) enqueue(rp); /* add it to a queue */
+ }
+ }
}
-
- /* Give the expired process a new quantum and see who is next to run. */
- sched_ptr->p_sched_ticks = sched_ptr->p_quantum_size;
- pick_proc();
}
/*==========================================================================*
- * lock_ready *
+ * lock_enqueue *
*==========================================================================*/
-PUBLIC void lock_ready(rp)
+PUBLIC void lock_enqueue(rp)
struct proc *rp; /* this process is now runnable */
{
-/* Safe gateway to ready() for tasks. */
- lock(3, "ready");
- ready(rp);
+/* Safe gateway to enqueue() for tasks. */
+ lock(3, "enqueue");
+ enqueue(rp);
unlock(3);
}
/*==========================================================================*
- * lock_unready *
+ * lock_dequeue *
*==========================================================================*/
-PUBLIC void lock_unready(rp)
+PUBLIC void lock_dequeue(rp)
struct proc *rp; /* this process is no longer runnable */
{
-/* Safe gateway to unready() for tasks. */
- lock(4, "unready");
- unready(rp);
+/* Safe gateway to dequeue() for tasks. */
+ lock(4, "dequeue");
+ dequeue(rp);
unlock(4);
}
-/*==========================================================================*
- * lock_sched *
- *==========================================================================*/
-PUBLIC void lock_sched(sched_ptr)
-struct proc *sched_ptr;
-{
-/* Safe gateway to sched() for tasks. */
- lock(5, "sched");
- sched(sched_ptr);
- unlock(5);
-}
#define NO_PRIV 0x80 /* keep forked system process from running */
/* Scheduling priorities for p_priority. Values must start at zero (highest
- * priority) and increment. Priorities of the processes in the boot image can
- * be set in table.c.
+ * priority) and increment. Priorities of the processes in the boot image
+ * can be set in table.c. IDLE must have a queue for itself, to prevent low
+ * priority user processes to run round-robin with IDLE.
*/
#define NR_SCHED_QUEUES 16 /* MUST equal minimum priority + 1 */
-#define TASK_Q 0 /* highest, reserved for kernel tasks */
-#define MAX_USER_Q 8 /* highest priority for user processes */
-#define USER_Q 11 /* user default (should correspond to nice 0) */
+#define TASK_Q 0 /* highest, used for kernel tasks */
+#define MAX_USER_Q 0 /* highest priority for user processes */
+#define USER_Q 7 /* default (should correspond to nice 0) */
#define MIN_USER_Q 14 /* minimum priority for user processes */
#define IDLE_Q 15 /* lowest, only IDLE process goes here */
_PROTOTYPE( int sys_call, (int function, int src_dest, message *m_ptr) );
_PROTOTYPE( int lock_notify, (int src, int dst) );
_PROTOTYPE( int lock_send, (int dst, message *m_ptr) );
-_PROTOTYPE( void lock_ready, (struct proc *rp) );
-_PROTOTYPE( void lock_sched, (struct proc *rp) );
-_PROTOTYPE( void lock_unready, (struct proc *rp) );
+_PROTOTYPE( void lock_enqueue, (struct proc *rp) );
+_PROTOTYPE( void lock_dequeue, (struct proc *rp) );
/* start.c */
_PROTOTYPE( void cstart, (U16_t cs, U16_t ds, U16_t mds,
if (! sigismember(&rp->p_pending, sig_nr)) {
sigaddset(&rp->p_pending, sig_nr);
if (! (rp->p_rts_flags & SIGNALED)) { /* other pending */
- if (rp->p_rts_flags == 0) lock_unready(rp); /* make not ready */
+ if (rp->p_rts_flags == 0) lock_dequeue(rp); /* make not ready */
rp->p_rts_flags |= SIGNALED | SIG_PENDING; /* update flags */
send_sig(PM_PROC_NR, SIGKSIG);
}
/* PM has finished one kernel signal. Perhaps process is ready now? */
if (! (rp->p_rts_flags & SIGNALED)) /* new signal arrived */
if ((rp->p_rts_flags &= ~SIG_PENDING)==0) /* remove pending flag */
- lock_ready(rp); /* ready if no flags */
+ lock_enqueue(rp); /* ready if no flags */
return(OK);
}
#endif
rp->p_reg.pc = (reg_t) m_ptr->PR_IP_PTR; /* set pc */
rp->p_rts_flags &= ~RECEIVING; /* PM does not reply to EXEC call */
- if (rp->p_rts_flags == 0) lock_ready(rp);
+ if (rp->p_rts_flags == 0) lock_enqueue(rp);
/* Save command name for debugging, ps(1) output, etc. */
phys_name = numap_local(m_ptr->m_source, (vir_bytes) m_ptr->PR_NAME_PTR,
reset_timer(&priv(rc)->s_alarm_timer);
/* Make sure that the exiting process is no longer scheduled. */
- if (rc->p_rts_flags == 0) lock_unready(rc);
+ if (rc->p_rts_flags == 0) lock_dequeue(rc);
/* If the process being terminated happens to be queued trying to send a
* message (e.g., the process was killed by a signal, rather than it doing
rpc->p_user_time = 0; /* set all the accounting times to 0 */
rpc->p_sys_time = 0;
+ rpc->p_sched_ticks /= 2; /* parent and child have to share quantum */
+ rpp->p_sched_ticks /= 2;
+
/* If the parent is a privileged process, take away the privileges from the
* child process and inhibit it from running by setting the NO_PRIV flag.
* The caller should explicitely set the new privileges before executing.
#endif
old_flags = rp->p_rts_flags; /* save the previous value of the flags */
rp->p_rts_flags &= ~NO_MAP;
- if (old_flags != 0 && rp->p_rts_flags == 0) lock_ready(rp);
+ if (old_flags != 0 && rp->p_rts_flags == 0) lock_enqueue(rp);
return(OK);
}
* queue if it is runnable.
*/
rp = proc_addr(proc_nr);
- lock_unready(rp);
+ lock_dequeue(rp);
rp->p_max_priority = rp->p_priority = new_q;
- if (! rp->p_rts_flags) lock_ready(rp);
+ if (! rp->p_rts_flags) lock_enqueue(rp);
return(OK);
}
/* Done. Privileges have been set. Allow process to run again. */
old_flags = rp->p_rts_flags; /* save value of the flags */
rp->p_rts_flags &= ~NO_PRIV;
- if (old_flags != 0 && rp->p_rts_flags == 0) lock_ready(rp);
+ if (old_flags != 0 && rp->p_rts_flags == 0) lock_enqueue(rp);
return(OK);
}
if (isemptyp(rp)) return(EIO);
switch (tr_request) {
case T_STOP: /* stop process */
- if (rp->p_rts_flags == 0) lock_unready(rp);
+ if (rp->p_rts_flags == 0) lock_dequeue(rp);
rp->p_rts_flags |= P_STOP;
rp->p_reg.psw &= ~TRACEBIT; /* clear trace bit */
return(OK);
case T_RESUME: /* resume execution */
rp->p_rts_flags &= ~P_STOP;
- if (rp->p_rts_flags == 0) lock_ready(rp);
+ if (rp->p_rts_flags == 0) lock_enqueue(rp);
m_ptr->CTL_DATA = 0;
break;
case T_STEP: /* set trace bit */
rp->p_reg.psw |= TRACEBIT;
rp->p_rts_flags &= ~P_STOP;
- if (rp->p_rts_flags == 0) lock_ready(rp);
+ if (rp->p_rts_flags == 0) lock_enqueue(rp);
m_ptr->CTL_DATA = 0;
break;
PUBLIC char *t_stack[TOT_STACK_SPACE / sizeof(char *)];
/* Define flags for the various process types. */
-#define IDL_F (BILLABLE | SYS_PROC) /* idle task */
+#define IDL_F (SYS_PROC | PREEMPTIBLE | BILLABLE) /* idle task */
#define TSK_F (SYS_PROC) /* kernel tasks */
-#define SRV_F (BILLABLE | PREEMPTIBLE | SYS_PROC) /* system services */
-#define USR_F (PREEMPTIBLE | BILLABLE) /* user processes */
+#define SRV_F (SYS_PROC | PREEMPTIBLE) /* system services */
+#define USR_F (BILLABLE | PREEMPTIBLE) /* user processes */
/* Define system call traps for the various process types. These call masks
* determine what system call traps a process is allowed to make.
#define FS_C (c(SYS_KILL) | c(SYS_VIRCOPY) | c(SYS_VIRVCOPY) | c(SYS_UMAP) \
| c(SYS_GETINFO) | c(SYS_EXIT) | c(SYS_TIMES) | c(SYS_SETALARM))
#define DRV_C (FS_C | c(SYS_SEGCTL) | c(SYS_IRQCTL) | c(SYS_INT86) \
- | c(SYS_DEVIO) | c(SYS_VDEVIO) | c(SYS_SDEVIO) | c(SYS_PHYSCOPY) | c(SYS_PHYSVCOPY))
+ | c(SYS_DEVIO) | c(SYS_VDEVIO) | c(SYS_SDEVIO))
+#define MEM_C (DRV_C | c(SYS_PHYSCOPY) | c(SYS_PHYSVCOPY))
/* The system image table lists all programs that are part of the boot image.
* The order of the entries here MUST agree with the order of the programs
*/
PUBLIC struct boot_image image[] = {
/* process nr, pc, flags, qs, queue, stack, traps, ipcto, call, name */
- { IDLE, idle_task, IDL_F, 32, IDLE_Q, IDL_S, 0, 0, 0, "IDLE" },
- { CLOCK,clock_task, TSK_F, 0, TASK_Q, TSK_S, TSK_T, 0, 0, "CLOCK" },
- { SYSTEM, sys_task, TSK_F, 0, TASK_Q, TSK_S, TSK_T, 0, 0, "SYSTEM"},
- { HARDWARE, 0, TSK_F, 0, TASK_Q, HRD_S, 0, 0, 0, "KERNEL"},
- { PM_PROC_NR, 0, SRV_F, 16, 3, 0, SRV_T, SRV_M, PM_C, "pm" },
- { FS_PROC_NR, 0, SRV_F, 16, 4, 0, SRV_T, SRV_M, FS_C, "fs" },
- { SM_PROC_NR, 0, SRV_F, 16, 3, 0, SRV_T, SYS_M, SM_C, "sm" },
+ { IDLE, idle_task, IDL_F, 8, IDLE_Q, IDL_S, 0, 0, 0, "IDLE" },
+ { CLOCK,clock_task, TSK_F, 64, TASK_Q, TSK_S, TSK_T, 0, 0, "CLOCK" },
+ { SYSTEM, sys_task, TSK_F, 64, TASK_Q, TSK_S, TSK_T, 0, 0, "SYSTEM"},
+ { HARDWARE, 0, TSK_F, 64, TASK_Q, HRD_S, 0, 0, 0, "KERNEL"},
+ { PM_PROC_NR, 0, SRV_F, 32, 3, 0, SRV_T, SRV_M, PM_C, "pm" },
+ { FS_PROC_NR, 0, SRV_F, 32, 4, 0, SRV_T, SRV_M, FS_C, "fs" },
+ { SM_PROC_NR, 0, SRV_F, 32, 3, 0, SRV_T, SYS_M, SM_C, "sm" },
{ TTY_PROC_NR, 0, SRV_F, 16, 1, 0, SRV_T, SYS_M, DRV_C, "tty" },
- { MEM_PROC_NR, 0, SRV_F, 16, 2, 0, SRV_T, DRV_M, DRV_C, "memory"},
+ { MEM_PROC_NR, 0, SRV_F, 16, 2, 0, SRV_T, DRV_M, MEM_C, "memory"},
{ LOG_PROC_NR, 0, SRV_F, 16, 2, 0, SRV_T, SYS_M, DRV_C, "log" },
{ DRVR_PROC_NR, 0, SRV_F, 16, 2, 0, SRV_T, SYS_M, DRV_C, "driver"},
{ INIT_PROC_NR, 0, USR_F, 8, USER_Q, 0, USR_T, USR_M, 0, "init" },
proc_nr_t proc_nr; /* process number to use */
task_t *initial_pc; /* start function for tasks */
int flags; /* process flags */
- char quantum; /* quantum (tick count) */
+ unsigned char quantum; /* quantum (tick count) */
int priority; /* scheduling priority */
int stksize; /* stack size for tasks */
short trap_mask; /* allowed system call traps */
{
static char str[10];
str[0] = (flags & PREEMPTIBLE) ? 'P' : '-';
- str[1] = (flags & RDY_Q_HEAD) ? 'Q' : '-';
+ str[1] = '-';
str[2] = (flags & BILLABLE) ? 'B' : '-';
str[3] = (flags & SYS_PROC) ? 'S' : '-';
str[4] = '-';