p->p_cycles = add64(p->p_cycles, sub64(tsc, *__tsc_ctr_switch));
BKL_UNLOCK();
} else {
+ u64_t bkl_tsc, tmp;
+ unsigned cpu = cpuid;
+ atomic_t succ;
+
+ read_tsc_64(&bkl_tsc);
+ /* this only gives a good estimate */
+ succ = big_kernel_lock.val;
+
BKL_LOCK();
+
read_tsc_64(&tsc);
- p->p_cycles = add64(p->p_cycles, sub64(tsc, *__tsc_ctr_switch));
+
+ bkl_ticks[cpu] = add64(bkl_ticks[cpu], sub64(tsc, bkl_tsc));
+ bkl_tries[cpu]++;
+ bkl_succ[cpu] += !(!(succ == 0));
+
+ tmp = sub64(tsc, *__tsc_ctr_switch);
+ kernel_ticks[cpu] = add64(kernel_ticks[cpu], tmp);
+ p->p_cycles = add64(p->p_cycles, tmp);
}
*__tsc_ctr_switch = tsc;
}
}
+#ifdef CONFIG_SMP
+PRIVATE void dump_bkl_usage(void)
+{
+ unsigned cpu;
+
+ printf("--- BKL usage ---\n");
+ for (cpu = 0; cpu < ncpus; cpu++) {
+ printf("cpu %3d kernel ticks 0x%x%08x bkl ticks 0x%x%08x succ %d tries %d\n", cpu,
+ kernel_ticks[cpu].hi, kernel_ticks[cpu].lo,
+ bkl_ticks[cpu].hi, bkl_ticks[cpu].lo,
+ bkl_succ[cpu], bkl_tries[cpu]);
+ }
+}
+
+PRIVATE void reset_bkl_usage(void)
+{
+ unsigned cpu;
+
+ memset(kernel_ticks, 0, sizeof(kernel_ticks));
+ memset(bkl_ticks, 0, sizeof(bkl_ticks));
+ memset(bkl_tries, 0, sizeof(bkl_tries));
+ memset(bkl_succ, 0, sizeof(bkl_succ));
+}
+#endif
+
PRIVATE void ser_debug(const int c)
{
serial_debug_active = 1;
case 'Q':
minix_shutdown(NULL);
NOT_REACHABLE;
+#ifdef CONFIG_SMP
+ case 'B':
+ dump_bkl_usage();
+ break;
+ case 'b':
+ reset_bkl_usage();
+ break;
+#endif
case '1':
ser_dump_proc();
break;
EXTERN volatile int serial_debug_active;
+/* BKL stats */
+EXTERN u64_t kernel_ticks[CONFIG_MAX_CPUS];
+EXTERN u64_t bkl_ticks[CONFIG_MAX_CPUS];
+EXTERN unsigned bkl_tries[CONFIG_MAX_CPUS];
+EXTERN unsigned bkl_succ[CONFIG_MAX_CPUS];
+
#endif /* GLO_H */