From a01645b7882f2f69cf96e18430c508f39c1c74b3 Mon Sep 17 00:00:00 2001 From: Jorrit Herder Date: Fri, 19 Aug 2005 16:43:28 +0000 Subject: [PATCH] New scheduling code in kernel. Work in progress. Round-robin within one priority queue works fine. Ageing algorithm to be done. --- kernel/clock.c | 27 ++++-- kernel/main.c | 8 +- kernel/priv.h | 1 - kernel/proc.c | 192 ++++++++++++++++++++----------------- kernel/proc.h | 11 ++- kernel/proto.h | 5 +- kernel/system.c | 2 +- kernel/system/do_endksig.c | 2 +- kernel/system/do_exec.c | 2 +- kernel/system/do_exit.c | 2 +- kernel/system/do_fork.c | 3 + kernel/system/do_newmap.c | 2 +- kernel/system/do_nice.c | 4 +- kernel/system/do_privctl.c | 2 +- kernel/system/do_trace.c | 6 +- kernel/table.c | 25 ++--- kernel/type.h | 2 +- servers/is/dmp_kernel.c | 2 +- 18 files changed, 163 insertions(+), 135 deletions(-) diff --git a/kernel/clock.c b/kernel/clock.c index d1e378fe5..fbf2d8430 100755 --- a/kernel/clock.c +++ b/kernel/clock.c @@ -112,6 +112,17 @@ message *m_ptr; /* pointer to request message */ * is called on those clock ticks when a lot of work needs to be done. */ + /* A process used up a full quantum. The interrupt handler stored this + * process in 'prev_ptr'. First make sure that the process is not on the + * scheduling queues. Then announce the process ready again. Since it has + * no more time left, it will get a new quantum and inserted at the right + * place in the queues. As a side-effect a new process will be scheduled. + */ + if (prev_ptr->p_sched_ticks <= 0 && priv(prev_ptr)->s_flags & PREEMPTIBLE) { + lock_dequeue(prev_ptr); /* take it off the queues */ + lock_enqueue(prev_ptr); /* and reinsert it again */ + } + /* Check if a clock timer expired and run its watchdog function. */ if (next_timeout <= realtime) { tmrs_exptimers(&clock_timers, realtime, NULL); @@ -119,13 +130,6 @@ message *m_ptr; /* pointer to request message */ TMR_NEVER : clock_timers->tmr_exp_time; } - /* A process used up a full quantum. The interrupt handler stored this - * process in 'prev_ptr'. Reset the quantum and schedule another process. - */ - if (prev_ptr->p_sched_ticks <= 0) { - lock_sched(prev_ptr); - } - /* Inhibit sending a reply. */ return(EDONTREPLY); } @@ -177,8 +181,13 @@ irq_hook_t *hook; * Thus the unbillable process' user time is the billable user's system time. */ proc_ptr->p_user_time += ticks; - if (proc_ptr != bill_ptr) bill_ptr->p_sys_time += ticks; - if (priv(proc_ptr)->s_flags & PREEMPTIBLE) proc_ptr->p_sched_ticks -= ticks; + if (priv(proc_ptr)->s_flags & PREEMPTIBLE) { + proc_ptr->p_sched_ticks -= ticks; + } + if (! (priv(proc_ptr)->s_flags & BILLABLE)) { + bill_ptr->p_sys_time += ticks; + bill_ptr->p_sched_ticks -= ticks; + } /* Check if do_clocktick() must be called. Done for alarms and scheduling. * Some processes, such as the kernel tasks, cannot be preempted. diff --git a/kernel/main.c b/kernel/main.c index 5c6805a1e..8af1329d7 100755 --- a/kernel/main.c +++ b/kernel/main.c @@ -133,8 +133,12 @@ PUBLIC void main() } /* Set ready. The HARDWARE task is never ready. */ - if (rp->p_nr != HARDWARE) lock_ready(rp); - rp->p_rts_flags = 0; + if (rp->p_nr != HARDWARE) { + rp->p_rts_flags = 0; /* runnable if no flags */ + lock_enqueue(rp); /* add to scheduling queues */ + } else { + rp->p_rts_flags = NO_MAP; /* prevent from running */ + } /* Code and data segments must be allocated in protected mode. */ alloc_segments(rp); diff --git a/kernel/priv.h b/kernel/priv.h index 6c54a00fd..db541b3ac 100755 --- a/kernel/priv.h +++ b/kernel/priv.h @@ -40,7 +40,6 @@ struct priv { /* Bits for the system property flags. */ #define PREEMPTIBLE 0x01 /* kernel tasks are not preemptible */ -#define RDY_Q_HEAD 0x02 /* add to queue head instead of tail */ #define BILLABLE 0x04 /* some processes are not billable */ #define SYS_PROC 0x10 /* system processes are privileged */ #define SENDREC_BUSY 0x20 /* sendrec() in progress */ diff --git a/kernel/proc.c b/kernel/proc.c index 570cb9d8d..618dbb19a 100755 --- a/kernel/proc.c +++ b/kernel/proc.c @@ -8,17 +8,15 @@ * * lock_notify: notify a process of a system event * lock_send: send a message to a process - * lock_ready: put a process on one of the ready queues - * lock_unready: remove a process from the ready queues - * lock_sched: a process has run too long; schedule another one + * lock_enqueue: put a process on one of the scheduling queues + * lock_dequeue: remove a process from the scheduling queues * * Changes: + * Aug 19, 2005 rewrote multilevel scheduling code (Jorrit N. Herder) * Jul 25, 2005 protection and checks in sys_call() (Jorrit N. Herder) - * May 26, 2005 rewrite of message passing functions (Jorrit N. Herder) - * May 24, 2005 new, queued NOTIFY system call (Jorrit N. Herder) - * Oct 28, 2004 new, non-blocking SEND and RECEIVE (Jorrit N. Herder) - * Oct 28, 2004 rewrite of sys_call() function (Jorrit N. Herder) - * Aug 19, 2004 rewrite of multilevel scheduling (Jorrit N. Herder) + * May 26, 2005 rewrote message passing functions (Jorrit N. Herder) + * May 24, 2005 new notification system call (Jorrit N. Herder) + * Oct 28, 2004 nonblocking send and receive calls (Jorrit N. Herder) * * The code here is critical to make everything work and is important for the * overall performance of the system. A large fraction of the code deals with @@ -54,11 +52,11 @@ FORWARD _PROTOTYPE( int mini_receive, (struct proc *caller_ptr, int src, message *m_ptr, unsigned flags) ); FORWARD _PROTOTYPE( int mini_notify, (struct proc *caller_ptr, int dst) ); -FORWARD _PROTOTYPE( void ready, (struct proc *rp) ); -FORWARD _PROTOTYPE( void unready, (struct proc *rp) ); -FORWARD _PROTOTYPE( void sched, (struct proc *rp) ); +FORWARD _PROTOTYPE( void enqueue, (struct proc *rp) ); +FORWARD _PROTOTYPE( void dequeue, (struct proc *rp) ); +FORWARD _PROTOTYPE( void sched, (struct proc *rp, int *queue, int *front) ); FORWARD _PROTOTYPE( void pick_proc, (void) ); - +FORWARD _PROTOTYPE( void balance_queues, (struct proc *rp) ); #define BuildMess(m_ptr, src, dst_ptr) \ (m_ptr)->m_source = (src); \ @@ -87,7 +85,6 @@ FORWARD _PROTOTYPE( void pick_proc, (void) ); #endif /* (CHIP == M68000) */ - /*===========================================================================* * sys_call * *===========================================================================*/ @@ -231,17 +228,17 @@ unsigned flags; /* system call flags */ /* Destination is indeed waiting for this message. */ CopyMess(caller_ptr->p_nr, caller_ptr, m_ptr, dst_ptr, dst_ptr->p_messbuf); - if ((dst_ptr->p_rts_flags &= ~RECEIVING) == 0) ready(dst_ptr); + if ((dst_ptr->p_rts_flags &= ~RECEIVING) == 0) enqueue(dst_ptr); } else if ( ! (flags & NON_BLOCKING)) { /* Destination is not waiting. Block and queue caller. */ caller_ptr->p_messbuf = m_ptr; - if (caller_ptr->p_rts_flags == 0) unready(caller_ptr); + if (caller_ptr->p_rts_flags == 0) dequeue(caller_ptr); caller_ptr->p_rts_flags |= SENDING; caller_ptr->p_sendto = dst; /* Process is now blocked. Put in on the destination's queue. */ xpp = &dst_ptr->p_caller_q; /* find end of list */ - while (*xpp != NIL_PROC) xpp = &(*xpp)->p_q_link; + while (*xpp != NIL_PROC) xpp = &(*xpp)->p_q_link; *xpp = caller_ptr; /* add caller to end */ caller_ptr->p_q_link = NIL_PROC; /* mark new end of list */ } else { @@ -305,7 +302,7 @@ unsigned flags; /* system call flags */ if (src == ANY || src == proc_nr(*xpp)) { /* Found acceptable message. Copy it and update status. */ CopyMess((*xpp)->p_nr, *xpp, (*xpp)->p_messbuf, caller_ptr, m_ptr); - if (((*xpp)->p_rts_flags &= ~SENDING) == 0) ready(*xpp); + if (((*xpp)->p_rts_flags &= ~SENDING) == 0) enqueue(*xpp); *xpp = (*xpp)->p_q_link; /* remove from queue */ return(OK); /* report success */ } @@ -319,7 +316,7 @@ unsigned flags; /* system call flags */ if ( ! (flags & NON_BLOCKING)) { caller_ptr->p_getfrom = src; caller_ptr->p_messbuf = m_ptr; - if (caller_ptr->p_rts_flags == 0) unready(caller_ptr); + if (caller_ptr->p_rts_flags == 0) dequeue(caller_ptr); caller_ptr->p_rts_flags |= RECEIVING; return(OK); } else { @@ -354,7 +351,7 @@ int dst; /* which process to notify */ CopyMess(proc_nr(caller_ptr), proc_addr(HARDWARE), &m, dst_ptr, dst_ptr->p_messbuf); dst_ptr->p_rts_flags &= ~RECEIVING; /* deblock destination */ - if (dst_ptr->p_rts_flags == 0) ready(dst_ptr); + if (dst_ptr->p_rts_flags == 0) enqueue(dst_ptr); return(OK); } @@ -399,28 +396,49 @@ int dst; /* who is to be notified */ /*===========================================================================* - * ready * + * enqueue * *===========================================================================*/ -PRIVATE void ready(rp) +PRIVATE void enqueue(rp) register struct proc *rp; /* this process is now runnable */ { -/* Add 'rp' to one of the queues of runnable processes. */ - register int q = rp->p_priority; /* scheduling queue to use */ +/* Add 'rp' to one of the queues of runnable processes. We need to decide + * where to put the process based on its quantum. If there is time left, it + * is added to the front of its queue, so that it can immediately run. + * Otherwise its is given a new quantum and added to the rear of the queue. + */ + register int q; /* scheduling queue to use */ + int time_left; /* quantum fully used? */ + + /* Check if the process has time left and determine what queue to use. A + * process that consumed a full quantum is given a lower priority, so that + * the CPU-bound processes cannot starve I/O-bound processes. When the + * threshold is reached, the scheduling queues are balanced to prevent all + * processes from ending up in the lowest queue. + */ + time_left = (rp->p_sched_ticks > 0); /* check ticks left */ + if ( ! time_left) { /* quantum consumed ? */ + rp->p_sched_ticks = rp->p_quantum_size; /* give new quantum */ +#if DEAD_CODE + if (proc_nr(rp) != IDLE) { /* already lowest priority */ + rp->p_priority ++; /* lower the priority */ + if (rp->p_priority >= IDLE_Q) /* threshold exceeded */ + balance_queues(rp); /* rebalance queues */ + } +#endif + } + q = rp->p_priority; /* scheduling queue to use */ #if DEBUG_SCHED_CHECK - check_runqueues("ready"); - if(rp->p_ready) kprintf("ready() already ready process\n"); + check_runqueues("enqueue"); + if(rp->p_ready) kprintf("enqueue() already ready process\n"); #endif - /* Processes, in principle, are added to the end of the queue. However, - * user processes are added in front of the queue, because this is a bit - * fairer to I/O bound processes. - */ + /* Now add the process to the queue. */ if (rdy_head[q] == NIL_PROC) { /* add to empty queue */ rdy_head[q] = rdy_tail[q] = rp; /* create a new queue */ rp->p_nextready = NIL_PROC; /* mark new end */ } - else if (priv(rp)->s_flags & RDY_Q_HEAD) { /* add to head of queue */ + else if (time_left) { /* add to head of queue */ rp->p_nextready = rdy_head[q]; /* chain head of queue */ rdy_head[q] = rp; /* set new queue head */ } @@ -433,14 +451,14 @@ register struct proc *rp; /* this process is now runnable */ #if DEBUG_SCHED_CHECK rp->p_ready = 1; - check_runqueues("ready"); + check_runqueues("enqueue"); #endif } /*===========================================================================* - * unready * + * dequeue * *===========================================================================*/ -PRIVATE void unready(rp) +PRIVATE void dequeue(rp) register struct proc *rp; /* this process is no longer runnable */ { /* A process has blocked. See ready for a description of the queues. */ @@ -456,8 +474,8 @@ register struct proc *rp; /* this process is no longer runnable */ } #if DEBUG_SCHED_CHECK - check_runqueues("unready"); - if (! rp->p_ready) kprintf("unready() already unready process\n"); + check_runqueues("dequeue"); + if (! rp->p_ready) kprintf("dequeue() already unready process\n"); #endif /* Now make sure that the process is not in its ready queue. Remove the @@ -478,62 +496,67 @@ register struct proc *rp; /* this process is no longer runnable */ prev_xp = *xpp; /* save previous in chain */ } +#if DEAD_CODE /* The caller blocked. Reset the scheduling priority and quantums allowed. * The process' priority may have been lowered if a process consumed too * many full quantums in a row to prevent damage from infinite loops */ rp->p_priority = rp->p_max_priority; rp->p_full_quantums = QUANTUMS(rp->p_priority); +#endif #if DEBUG_SCHED_CHECK rp->p_ready = 0; - check_runqueues("unready"); + check_runqueues("dequeue"); #endif } + /*===========================================================================* * sched * *===========================================================================*/ -PRIVATE void sched(sched_ptr) -struct proc *sched_ptr; /* quantum eating process */ +PRIVATE void sched(sched_ptr, queue, front) +struct proc *sched_ptr; /* process to be scheduled */ +int *queue; /* return: queue to use */ +int *front; /* return: front or back */ { - int q; - /* Check if this process is preemptible, otherwise leave it as is. */ - if (! (priv(sched_ptr)->s_flags & PREEMPTIBLE)) return; +} - /* Process exceeded the maximum number of full quantums it is allowed - * to use in a row. Lower the process' priority, but make sure we don't - * end up in the IDLE queue. This helps to limit the damage caused by - * for example infinite loops in high-priority processes. - * This is a rare situation, so the overhead is acceptable. - */ - if (-- sched_ptr->p_full_quantums <= 0) { /* exceeded threshold */ - if (sched_ptr->p_priority + 1 < IDLE_Q ) { - q = sched_ptr->p_priority + 1; /* backup new priority */ - unready(sched_ptr); /* remove from queues */ - sched_ptr->p_priority = q; /* lower priority */ - ready(sched_ptr); /* add to new queue */ - } - sched_ptr->p_full_quantums = QUANTUMS(sched_ptr->p_priority); +/*===========================================================================* + * balance_queues * + *===========================================================================*/ +PRIVATE void balance_queues(pp) +struct proc *pp; /* process that caused this */ +{ +/* To balance the scheduling queues, they will be rebuild whenever a process + * is put in the lowest queues where IDLE resides. All processes get their + * priority raised up to their maximum priority. + */ + register struct proc *rp; + register int q; + int penalty = pp->p_priority - pp->p_max_priority; + + /* First clean up the old scheduling queues. */ + for (q=0; qp_priority; /* convenient shorthand */ - if (rdy_head[q] == sched_ptr) { - rdy_tail[q]->p_nextready = rdy_head[q]; /* add expired to end */ - rdy_tail[q] = rdy_head[q]; /* set new queue tail */ - rdy_head[q] = rdy_head[q]->p_nextready; /* set new queue head */ - rdy_tail[q]->p_nextready = NIL_PROC; /* mark new queue end */ + for (rp=BEG_PROC_ADDR; rpp_rts_flags & SLOT_FREE)) { /* update in-use slots */ + rp->p_priority = MAX(rp->p_priority - penalty, rp->p_max_priority); + rp->p_sched_ticks = rp->p_quantum_size; + if (rp->p_rts_flags == 0) { /* process is runnable */ + if (rp != pp) enqueue(rp); /* add it to a queue */ + } + } } - - /* Give the expired process a new quantum and see who is next to run. */ - sched_ptr->p_sched_ticks = sched_ptr->p_quantum_size; - pick_proc(); } @@ -581,38 +604,27 @@ message *m_ptr; /* pointer to message buffer */ /*==========================================================================* - * lock_ready * + * lock_enqueue * *==========================================================================*/ -PUBLIC void lock_ready(rp) +PUBLIC void lock_enqueue(rp) struct proc *rp; /* this process is now runnable */ { -/* Safe gateway to ready() for tasks. */ - lock(3, "ready"); - ready(rp); +/* Safe gateway to enqueue() for tasks. */ + lock(3, "enqueue"); + enqueue(rp); unlock(3); } /*==========================================================================* - * lock_unready * + * lock_dequeue * *==========================================================================*/ -PUBLIC void lock_unready(rp) +PUBLIC void lock_dequeue(rp) struct proc *rp; /* this process is no longer runnable */ { -/* Safe gateway to unready() for tasks. */ - lock(4, "unready"); - unready(rp); +/* Safe gateway to dequeue() for tasks. */ + lock(4, "dequeue"); + dequeue(rp); unlock(4); } -/*==========================================================================* - * lock_sched * - *==========================================================================*/ -PUBLIC void lock_sched(sched_ptr) -struct proc *sched_ptr; -{ -/* Safe gateway to sched() for tasks. */ - lock(5, "sched"); - sched(sched_ptr); - unlock(5); -} diff --git a/kernel/proc.h b/kernel/proc.h index 6e5bc8da3..12b88583e 100755 --- a/kernel/proc.h +++ b/kernel/proc.h @@ -69,13 +69,14 @@ struct proc { #define NO_PRIV 0x80 /* keep forked system process from running */ /* Scheduling priorities for p_priority. Values must start at zero (highest - * priority) and increment. Priorities of the processes in the boot image can - * be set in table.c. + * priority) and increment. Priorities of the processes in the boot image + * can be set in table.c. IDLE must have a queue for itself, to prevent low + * priority user processes to run round-robin with IDLE. */ #define NR_SCHED_QUEUES 16 /* MUST equal minimum priority + 1 */ -#define TASK_Q 0 /* highest, reserved for kernel tasks */ -#define MAX_USER_Q 8 /* highest priority for user processes */ -#define USER_Q 11 /* user default (should correspond to nice 0) */ +#define TASK_Q 0 /* highest, used for kernel tasks */ +#define MAX_USER_Q 0 /* highest priority for user processes */ +#define USER_Q 7 /* default (should correspond to nice 0) */ #define MIN_USER_Q 14 /* minimum priority for user processes */ #define IDLE_Q 15 /* lowest, only IDLE process goes here */ diff --git a/kernel/proto.h b/kernel/proto.h index 7a1e3c320..b940b5def 100755 --- a/kernel/proto.h +++ b/kernel/proto.h @@ -27,9 +27,8 @@ _PROTOTYPE( void panic, (_CONST char *s, int n) ); _PROTOTYPE( int sys_call, (int function, int src_dest, message *m_ptr) ); _PROTOTYPE( int lock_notify, (int src, int dst) ); _PROTOTYPE( int lock_send, (int dst, message *m_ptr) ); -_PROTOTYPE( void lock_ready, (struct proc *rp) ); -_PROTOTYPE( void lock_sched, (struct proc *rp) ); -_PROTOTYPE( void lock_unready, (struct proc *rp) ); +_PROTOTYPE( void lock_enqueue, (struct proc *rp) ); +_PROTOTYPE( void lock_dequeue, (struct proc *rp) ); /* start.c */ _PROTOTYPE( void cstart, (U16_t cs, U16_t ds, U16_t mds, diff --git a/kernel/system.c b/kernel/system.c index 438a72091..a2f237bbb 100755 --- a/kernel/system.c +++ b/kernel/system.c @@ -280,7 +280,7 @@ int sig_nr; /* signal to be sent, 1 to _NSIG */ if (! sigismember(&rp->p_pending, sig_nr)) { sigaddset(&rp->p_pending, sig_nr); if (! (rp->p_rts_flags & SIGNALED)) { /* other pending */ - if (rp->p_rts_flags == 0) lock_unready(rp); /* make not ready */ + if (rp->p_rts_flags == 0) lock_dequeue(rp); /* make not ready */ rp->p_rts_flags |= SIGNALED | SIG_PENDING; /* update flags */ send_sig(PM_PROC_NR, SIGKSIG); } diff --git a/kernel/system/do_endksig.c b/kernel/system/do_endksig.c index 5311bf570..1dca1d432 100644 --- a/kernel/system/do_endksig.c +++ b/kernel/system/do_endksig.c @@ -32,7 +32,7 @@ message *m_ptr; /* pointer to request message */ /* PM has finished one kernel signal. Perhaps process is ready now? */ if (! (rp->p_rts_flags & SIGNALED)) /* new signal arrived */ if ((rp->p_rts_flags &= ~SIG_PENDING)==0) /* remove pending flag */ - lock_ready(rp); /* ready if no flags */ + lock_enqueue(rp); /* ready if no flags */ return(OK); } diff --git a/kernel/system/do_exec.c b/kernel/system/do_exec.c index fa5828cb3..14d7707bc 100644 --- a/kernel/system/do_exec.c +++ b/kernel/system/do_exec.c @@ -41,7 +41,7 @@ register message *m_ptr; /* pointer to request message */ #endif rp->p_reg.pc = (reg_t) m_ptr->PR_IP_PTR; /* set pc */ rp->p_rts_flags &= ~RECEIVING; /* PM does not reply to EXEC call */ - if (rp->p_rts_flags == 0) lock_ready(rp); + if (rp->p_rts_flags == 0) lock_enqueue(rp); /* Save command name for debugging, ps(1) output, etc. */ phys_name = numap_local(m_ptr->m_source, (vir_bytes) m_ptr->PR_NAME_PTR, diff --git a/kernel/system/do_exit.c b/kernel/system/do_exit.c index 3eac7891b..1d15e1c8d 100644 --- a/kernel/system/do_exit.c +++ b/kernel/system/do_exit.c @@ -55,7 +55,7 @@ register struct proc *rc; /* slot of process to clean up */ reset_timer(&priv(rc)->s_alarm_timer); /* Make sure that the exiting process is no longer scheduled. */ - if (rc->p_rts_flags == 0) lock_unready(rc); + if (rc->p_rts_flags == 0) lock_dequeue(rc); /* If the process being terminated happens to be queued trying to send a * message (e.g., the process was killed by a signal, rather than it doing diff --git a/kernel/system/do_fork.c b/kernel/system/do_fork.c index a1b0b6008..6a1ed6a99 100644 --- a/kernel/system/do_fork.c +++ b/kernel/system/do_fork.c @@ -51,6 +51,9 @@ register message *m_ptr; /* pointer to request message */ rpc->p_user_time = 0; /* set all the accounting times to 0 */ rpc->p_sys_time = 0; + rpc->p_sched_ticks /= 2; /* parent and child have to share quantum */ + rpp->p_sched_ticks /= 2; + /* If the parent is a privileged process, take away the privileges from the * child process and inhibit it from running by setting the NO_PRIV flag. * The caller should explicitely set the new privileges before executing. diff --git a/kernel/system/do_newmap.c b/kernel/system/do_newmap.c index c9d70ce7b..8fc80f778 100644 --- a/kernel/system/do_newmap.c +++ b/kernel/system/do_newmap.c @@ -42,7 +42,7 @@ message *m_ptr; /* pointer to request message */ #endif old_flags = rp->p_rts_flags; /* save the previous value of the flags */ rp->p_rts_flags &= ~NO_MAP; - if (old_flags != 0 && rp->p_rts_flags == 0) lock_ready(rp); + if (old_flags != 0 && rp->p_rts_flags == 0) lock_enqueue(rp); return(OK); } diff --git a/kernel/system/do_nice.c b/kernel/system/do_nice.c index 879ae32ff..c879f7e97 100644 --- a/kernel/system/do_nice.c +++ b/kernel/system/do_nice.c @@ -40,9 +40,9 @@ PUBLIC int do_nice(message *m_ptr) * queue if it is runnable. */ rp = proc_addr(proc_nr); - lock_unready(rp); + lock_dequeue(rp); rp->p_max_priority = rp->p_priority = new_q; - if (! rp->p_rts_flags) lock_ready(rp); + if (! rp->p_rts_flags) lock_enqueue(rp); return(OK); } diff --git a/kernel/system/do_privctl.c b/kernel/system/do_privctl.c index 8c2e8011c..bfd06a445 100644 --- a/kernel/system/do_privctl.c +++ b/kernel/system/do_privctl.c @@ -76,7 +76,7 @@ message *m_ptr; /* pointer to request message */ /* Done. Privileges have been set. Allow process to run again. */ old_flags = rp->p_rts_flags; /* save value of the flags */ rp->p_rts_flags &= ~NO_PRIV; - if (old_flags != 0 && rp->p_rts_flags == 0) lock_ready(rp); + if (old_flags != 0 && rp->p_rts_flags == 0) lock_enqueue(rp); return(OK); } diff --git a/kernel/system/do_trace.c b/kernel/system/do_trace.c index 278ce9bd5..7b7328a07 100644 --- a/kernel/system/do_trace.c +++ b/kernel/system/do_trace.c @@ -54,7 +54,7 @@ register message *m_ptr; if (isemptyp(rp)) return(EIO); switch (tr_request) { case T_STOP: /* stop process */ - if (rp->p_rts_flags == 0) lock_unready(rp); + if (rp->p_rts_flags == 0) lock_dequeue(rp); rp->p_rts_flags |= P_STOP; rp->p_reg.psw &= ~TRACEBIT; /* clear trace bit */ return(OK); @@ -126,14 +126,14 @@ register message *m_ptr; case T_RESUME: /* resume execution */ rp->p_rts_flags &= ~P_STOP; - if (rp->p_rts_flags == 0) lock_ready(rp); + if (rp->p_rts_flags == 0) lock_enqueue(rp); m_ptr->CTL_DATA = 0; break; case T_STEP: /* set trace bit */ rp->p_reg.psw |= TRACEBIT; rp->p_rts_flags &= ~P_STOP; - if (rp->p_rts_flags == 0) lock_ready(rp); + if (rp->p_rts_flags == 0) lock_enqueue(rp); m_ptr->CTL_DATA = 0; break; diff --git a/kernel/table.c b/kernel/table.c index 0b474c30c..2a9477d0f 100755 --- a/kernel/table.c +++ b/kernel/table.c @@ -46,10 +46,10 @@ PUBLIC char *t_stack[TOT_STACK_SPACE / sizeof(char *)]; /* Define flags for the various process types. */ -#define IDL_F (BILLABLE | SYS_PROC) /* idle task */ +#define IDL_F (SYS_PROC | PREEMPTIBLE | BILLABLE) /* idle task */ #define TSK_F (SYS_PROC) /* kernel tasks */ -#define SRV_F (BILLABLE | PREEMPTIBLE | SYS_PROC) /* system services */ -#define USR_F (PREEMPTIBLE | BILLABLE) /* user processes */ +#define SRV_F (SYS_PROC | PREEMPTIBLE) /* system services */ +#define USR_F (BILLABLE | PREEMPTIBLE) /* user processes */ /* Define system call traps for the various process types. These call masks * determine what system call traps a process is allowed to make. @@ -83,7 +83,8 @@ PUBLIC char *t_stack[TOT_STACK_SPACE / sizeof(char *)]; #define FS_C (c(SYS_KILL) | c(SYS_VIRCOPY) | c(SYS_VIRVCOPY) | c(SYS_UMAP) \ | c(SYS_GETINFO) | c(SYS_EXIT) | c(SYS_TIMES) | c(SYS_SETALARM)) #define DRV_C (FS_C | c(SYS_SEGCTL) | c(SYS_IRQCTL) | c(SYS_INT86) \ - | c(SYS_DEVIO) | c(SYS_VDEVIO) | c(SYS_SDEVIO) | c(SYS_PHYSCOPY) | c(SYS_PHYSVCOPY)) + | c(SYS_DEVIO) | c(SYS_VDEVIO) | c(SYS_SDEVIO)) +#define MEM_C (DRV_C | c(SYS_PHYSCOPY) | c(SYS_PHYSVCOPY)) /* The system image table lists all programs that are part of the boot image. * The order of the entries here MUST agree with the order of the programs @@ -94,15 +95,15 @@ PUBLIC char *t_stack[TOT_STACK_SPACE / sizeof(char *)]; */ PUBLIC struct boot_image image[] = { /* process nr, pc, flags, qs, queue, stack, traps, ipcto, call, name */ - { IDLE, idle_task, IDL_F, 32, IDLE_Q, IDL_S, 0, 0, 0, "IDLE" }, - { CLOCK,clock_task, TSK_F, 0, TASK_Q, TSK_S, TSK_T, 0, 0, "CLOCK" }, - { SYSTEM, sys_task, TSK_F, 0, TASK_Q, TSK_S, TSK_T, 0, 0, "SYSTEM"}, - { HARDWARE, 0, TSK_F, 0, TASK_Q, HRD_S, 0, 0, 0, "KERNEL"}, - { PM_PROC_NR, 0, SRV_F, 16, 3, 0, SRV_T, SRV_M, PM_C, "pm" }, - { FS_PROC_NR, 0, SRV_F, 16, 4, 0, SRV_T, SRV_M, FS_C, "fs" }, - { SM_PROC_NR, 0, SRV_F, 16, 3, 0, SRV_T, SYS_M, SM_C, "sm" }, + { IDLE, idle_task, IDL_F, 8, IDLE_Q, IDL_S, 0, 0, 0, "IDLE" }, + { CLOCK,clock_task, TSK_F, 64, TASK_Q, TSK_S, TSK_T, 0, 0, "CLOCK" }, + { SYSTEM, sys_task, TSK_F, 64, TASK_Q, TSK_S, TSK_T, 0, 0, "SYSTEM"}, + { HARDWARE, 0, TSK_F, 64, TASK_Q, HRD_S, 0, 0, 0, "KERNEL"}, + { PM_PROC_NR, 0, SRV_F, 32, 3, 0, SRV_T, SRV_M, PM_C, "pm" }, + { FS_PROC_NR, 0, SRV_F, 32, 4, 0, SRV_T, SRV_M, FS_C, "fs" }, + { SM_PROC_NR, 0, SRV_F, 32, 3, 0, SRV_T, SYS_M, SM_C, "sm" }, { TTY_PROC_NR, 0, SRV_F, 16, 1, 0, SRV_T, SYS_M, DRV_C, "tty" }, - { MEM_PROC_NR, 0, SRV_F, 16, 2, 0, SRV_T, DRV_M, DRV_C, "memory"}, + { MEM_PROC_NR, 0, SRV_F, 16, 2, 0, SRV_T, DRV_M, MEM_C, "memory"}, { LOG_PROC_NR, 0, SRV_F, 16, 2, 0, SRV_T, SYS_M, DRV_C, "log" }, { DRVR_PROC_NR, 0, SRV_F, 16, 2, 0, SRV_T, SYS_M, DRV_C, "driver"}, { INIT_PROC_NR, 0, USR_F, 8, USER_Q, 0, USR_T, USR_M, 0, "init" }, diff --git a/kernel/type.h b/kernel/type.h index a8b75c63b..1d4a3b648 100755 --- a/kernel/type.h +++ b/kernel/type.h @@ -14,7 +14,7 @@ struct boot_image { proc_nr_t proc_nr; /* process number to use */ task_t *initial_pc; /* start function for tasks */ int flags; /* process flags */ - char quantum; /* quantum (tick count) */ + unsigned char quantum; /* quantum (tick count) */ int priority; /* scheduling priority */ int stksize; /* stack size for tasks */ short trap_mask; /* allowed system call traps */ diff --git a/servers/is/dmp_kernel.c b/servers/is/dmp_kernel.c index 488fa9323..fb9df8622 100644 --- a/servers/is/dmp_kernel.c +++ b/servers/is/dmp_kernel.c @@ -313,7 +313,7 @@ PRIVATE char *s_flags_str(int flags) { static char str[10]; str[0] = (flags & PREEMPTIBLE) ? 'P' : '-'; - str[1] = (flags & RDY_Q_HEAD) ? 'Q' : '-'; + str[1] = '-'; str[2] = (flags & BILLABLE) ? 'B' : '-'; str[3] = (flags & SYS_PROC) ? 'S' : '-'; str[4] = '-'; -- 2.44.0