From: Ben Gras Date: Sat, 8 May 2010 18:00:03 +0000 (+0000) Subject: kernel: new DEBUG_RACE option. try to provoke race conditions between processes. X-Git-Tag: v3.1.7~72 X-Git-Url: http://zhaoyanbai.com/repos/%22http:/www.isc.org/icons/zlib_tech.html?a=commitdiff_plain;h=a1636b85b7381ac17fd593aa8db59a7487a1fbea;p=minix.git kernel: new DEBUG_RACE option. try to provoke race conditions between processes. it does this by - making all processes interruptible by running out of quantum - giving all processes a single tick of quantum - picking a random runnable process instead of in order, and from a single pool of runnable processes (no priorities) This together with very high HZ values currently provokes some race conditions seen earlier only when running with SMP. --- diff --git a/kernel/clock.c b/kernel/clock.c index b5c853934..e55d6624b 100644 --- a/kernel/clock.c +++ b/kernel/clock.c @@ -35,6 +35,7 @@ #include #include "clock.h" +#include "debug.h" #ifdef CONFIG_WATCHDOG #include "watchdog.h" @@ -199,9 +200,15 @@ PUBLIC int ap_timer_int_handler(void) billp = bill_ptr; p->p_user_time += ticks; + +#if DEBUG_RACE + /* With DEBUG_RACE, every process gets interrupted. */ + p->p_ticks_left = 0; +#else if (priv(p)->s_flags & PREEMPTIBLE) { p->p_ticks_left -= ticks; } +#endif if (! (priv(p)->s_flags & BILLABLE)) { billp->p_sys_time += ticks; } diff --git a/kernel/debug.h b/kernel/debug.h index 4f0ef0ab3..0c9e60fc5 100644 --- a/kernel/debug.h +++ b/kernel/debug.h @@ -30,6 +30,13 @@ /* Verbose messages. */ #define DEBUG_TRACE 0 +/* DEBUG_RACE makes every process preemptible, schedules + * every process on the same priority queue, and randomizes + * the next process to run, in order to help catch race + * conditions that could otherwise be masked. + */ +#define DEBUG_RACE 0 + #if DEBUG_TRACE #define VF_SCHEDULING (1L << 1) diff --git a/kernel/proc.c b/kernel/proc.c index c2b96edc3..da3e04e5b 100644 --- a/kernel/proc.c +++ b/kernel/proc.c @@ -1144,6 +1144,11 @@ PUBLIC void enqueue( */ int q = rp->p_priority; /* scheduling queue to use */ +#if DEBUG_RACE + /* With DEBUG_RACE, schedule everyone at the same priority level. */ + rp->p_priority = q = MIN_USER_Q; +#endif + assert(proc_is_runnable(rp)); assert(q >= 0); @@ -1255,6 +1260,32 @@ PUBLIC void dequeue(const struct proc *rp) #endif } +#if DEBUG_RACE +/*===========================================================================* + * random_process * + *===========================================================================*/ +PRIVATE struct proc *random_process(struct proc *head) +{ + int i, n = 0; + struct proc *rp; + u64_t r; + read_tsc_64(&r); + + for(rp = head; rp; rp = rp->p_nextready) + n++; + + /* Use low-order word of TSC as pseudorandom value. */ + i = r.lo % n; + + for(rp = head; i--; rp = rp->p_nextready) + ; + + assert(rp); + + return rp; +} +#endif + /*===========================================================================* * pick_proc * *===========================================================================*/ @@ -1276,6 +1307,11 @@ PRIVATE struct proc * pick_proc(void) TRACE(VF_PICKPROC, printf("queue %d empty\n", q);); continue; } + +#if DEBUG_RACE + rp = random_process(rdy_head[q]); +#endif + TRACE(VF_PICKPROC, printf("found %s / %d on queue %d\n", rp->p_name, rp->p_endpoint, q);); assert(proc_is_runnable(rp)); @@ -1393,6 +1429,10 @@ PUBLIC void check_ticks_left(struct proc * p) * be renewed. In fact, they by pass scheduling */ p->p_ticks_left = p->p_quantum_size; +#if DEBUG_RACE + RTS_SET(proc_ptr, RTS_PREEMPTED); + RTS_UNSET(proc_ptr, RTS_PREEMPTED); +#endif } } }