#include "kernel/kernel.h"
#include "kernel/clock.h"
-#include "kernel/proc.h"
#include "kernel/interrupt.h"
#include <minix/u64.h>
#include <minix/board.h>
#include "kernel/glo.h"
#include "kernel/profile.h"
-#include <assert.h>
-
#include <sys/sched.h> /* for CP_*, CPUSTATES */
#if CPUSTATES != MINIX_CPUSTATES
/* If this breaks, the code in this file may have to be adapted accordingly. */
#ifdef CONFIG_SMP
#include "kernel/smp.h"
+#error CONFIG_SMP is unsupported on ARM
#endif
#include "bsp_timer.h"
void cycles_accounting_init(void)
{
+#ifdef CONFIG_SMP
+ unsigned cpu = cpuid;
+#endif
+
read_tsc_64(get_cpu_var_ptr(cpu, tsc_ctr_switch));
get_cpu_var(cpu, cpu_last_tsc) = 0;
void context_stop(struct proc * p)
{
- u64_t tsc;
- u32_t tsc_delta;
- unsigned int counter, tpt;
+ u64_t tsc, tsc_delta;
u64_t * __tsc_ctr_switch = get_cpulocal_var_ptr(tsc_ctr_switch);
+ unsigned int cpu, tpt, counter;
+#ifdef CONFIG_SMP
+#error CONFIG_SMP is unsupported on ARM
+#else
read_tsc_64(&tsc);
- assert(tsc >= *__tsc_ctr_switch);
+ p->p_cycles = p->p_cycles + tsc - *__tsc_ctr_switch;
+ cpu = 0;
+#endif
+
tsc_delta = tsc - *__tsc_ctr_switch;
- p->p_cycles += tsc_delta;
- if(kbill_ipc) {
+ if (kbill_ipc) {
kbill_ipc->p_kipc_cycles += tsc_delta;
kbill_ipc = NULL;
}
- if(kbill_kcall) {
+ if (kbill_kcall) {
kbill_kcall->p_kcall_cycles += tsc_delta;
kbill_kcall = NULL;
}
* the code below is a loop, but the loop will in by far most cases not
* be executed more than once, and often be skipped at all.
*/
- tpt = tsc_per_tick[0];
+ tpt = tsc_per_tick[cpu];
p->p_tick_cycles += tsc_delta;
while (tpt > 0 && p->p_tick_cycles >= tpt) {
/*
* deduct the just consumed cpu cycles from the cpu time left for this
* process during its current quantum. Skip IDLE and other pseudo kernel
- * tasks, except for accounting purposes.
+ * tasks, except for global accounting purposes.
*/
if (p->p_endpoint >= 0) {
/* On MINIX3, the "system" counter covers system processes. */
#else
if (tsc_delta < p->p_cpu_time_left) {
p->p_cpu_time_left -= tsc_delta;
- } else p->p_cpu_time_left = 0;
+ } else {
+ p->p_cpu_time_left = 0;
+ }
#endif
} else {
/* On MINIX3, the "interrupts" counter covers the kernel. */
counter = CP_IDLE;
else
counter = CP_INTR;
-
}
+ tsc_per_state[cpu][counter] += tsc_delta;
+
*__tsc_ctr_switch = tsc;
}
u64_t ms_2_cpu_time(unsigned ms)
{
- return (u64_t)(tsc_per_ms[cpuid]) * ms;
+ return (u64_t)tsc_per_ms[cpuid] * ms;
}
unsigned cpu_time_2_ms(u64_t cpu_time)
short cpu_load(void)
{
- return 0;
+ u64_t current_tsc, *current_idle;
+ u64_t tsc_delta, idle_delta, busy;
+ struct proc *idle;
+ short load;
+#ifdef CONFIG_SMP
+ unsigned cpu = cpuid;
+#endif
+
+ u64_t *last_tsc, *last_idle;
+
+ last_tsc = get_cpu_var_ptr(cpu, cpu_last_tsc);
+ last_idle = get_cpu_var_ptr(cpu, cpu_last_idle);
+
+ idle = get_cpu_var_ptr(cpu, idle_proc);;
+ read_tsc_64(¤t_tsc);
+ current_idle = &idle->p_cycles; /* ptr to idle proc */
+
+ /* calculate load since last cpu_load invocation */
+ if (*last_tsc) {
+ tsc_delta = current_tsc - *last_tsc;
+ idle_delta = *current_idle - *last_idle;
+
+ busy = tsc_delta - idle_delta;
+ busy = busy * 100;
+ load = ex64lo(busy / tsc_delta);
+
+ if (load > 100)
+ load = 100;
+ } else
+ load = 0;
+
+ *last_tsc = current_tsc;
+ *last_idle = *current_idle;
+ return load;
}
/*
{
int i;
+ /* TODO: make this inter-CPU safe! */
for (i = 0; i < CPUSTATES; i++)
- ticks[i] = tsc_per_state[0][i] / tsc_per_tick[0];
+ ticks[i] = tsc_per_state[cpu][i] / tsc_per_tick[cpu];
}
-
/* i386-specific clock functions. */
#include <machine/ports.h>
#include "kernel/clock.h"
#include "kernel/interrupt.h"
#include <minix/u64.h>
-#include "glo.h"
+#include "kernel/glo.h"
#include "kernel/profile.h"
#include <sys/sched.h> /* for CP_*, CPUSTATES */
read_tsc_64(get_cpu_var_ptr(cpu, tsc_ctr_switch));
- get_cpu_var(cpu, cpu_last_tsc) = 0;
- get_cpu_var(cpu, cpu_last_idle) = 0;
+ get_cpu_var(cpu, cpu_last_tsc) = 0;
+ get_cpu_var(cpu, cpu_last_idle) = 0;
}
void context_stop(struct proc * p)
p->p_cycles = p->p_cycles + tsc - *__tsc_ctr_switch;
cpu = 0;
#endif
-
+
tsc_delta = tsc - *__tsc_ctr_switch;
if (kbill_ipc) {
- kbill_ipc->p_kipc_cycles =
- kbill_ipc->p_kipc_cycles + tsc_delta;
+ kbill_ipc->p_kipc_cycles += tsc_delta;
kbill_ipc = NULL;
}
if (kbill_kcall) {
- kbill_kcall->p_kcall_cycles =
- kbill_kcall->p_kcall_cycles + tsc_delta;
+ kbill_kcall->p_kcall_cycles += tsc_delta;
kbill_kcall = NULL;
}
#if DEBUG_RACE
p->p_cpu_time_left = 0;
#else
- /* if (tsc_delta < p->p_cpu_time_left) in 64bit */
- if (ex64hi(tsc_delta) < ex64hi(p->p_cpu_time_left) ||
- (ex64hi(tsc_delta) == ex64hi(p->p_cpu_time_left) &&
- ex64lo(tsc_delta) < ex64lo(p->p_cpu_time_left)))
- p->p_cpu_time_left = p->p_cpu_time_left - tsc_delta;
- else {
+ if (tsc_delta < p->p_cpu_time_left) {
+ p->p_cpu_time_left -= tsc_delta;
+ } else {
p->p_cpu_time_left = 0;
}
#endif
#ifdef CONFIG_SMP
unsigned cpu = cpuid;
#endif
-
+
is_idle = get_cpu_var(cpu, cpu_is_idle);
get_cpu_var(cpu, cpu_is_idle) = 0;
load = 100;
} else
load = 0;
-
+
*last_tsc = current_tsc;
*last_idle = *current_idle;
return load;