TEST_INT_IN_KERNEL(4, 0f) ;\
\
SAVE_PROCESS_CTX(0) ;\
+ push %ebp ;\
+ call cycles_accounting_stop ;\
+ add $4, %esp ;\
movl $0, %ebp /* for stack trace */ ;\
APIC_IRQ_HANDLER(irq) ;\
jmp restart ;\
\
0: \
pusha ;\
+ call cycles_accounting_stop_idle ;\
APIC_IRQ_HANDLER(irq) ;\
popa ;\
iret ;
TEST_INT_IN_KERNEL(4, 0f) ;\
\
SAVE_PROCESS_CTX(0) ;\
+ push %ebp ;\
+ call cycles_accounting_stop ;\
+ add $4, %esp ;\
movl $0, %ebp /* for stack trace */ ;\
LAPIC_INTR_HANDLER(func) ;\
jmp restart ;\
\
0: \
pusha ;\
+ call cycles_accounting_stop_idle ;\
LAPIC_INTR_HANDLER(func) ;\
popa ;\
iret ;
#include "../../kernel.h"
#include "../../clock.h"
+#include "../../proc.h"
+#include <minix/u64.h>
+
#ifdef CONFIG_APIC
#include "apic.h"
#define TIMER_FREQ 1193182 /* clock frequency for timer in PC and AT */
#define TIMER_COUNT(freq) (TIMER_FREQ/(freq)) /* initial value for counter*/
+/* FIXME make it cpu local! */
+PRIVATE u64_t tsc_ctr_switch; /* when did we switched time accounting */
+
PRIVATE irq_hook_t pic_timer_hook; /* interrupt handler hook */
/*===========================================================================*
return 0;
}
+
+PUBLIC void cycles_accounting_init(void)
+{
+ read_tsc_64(&tsc_ctr_switch);
+}
+
+PUBLIC void cycles_accounting_stop(struct proc * p)
+{
+ u64_t tsc;
+
+ read_tsc_64(&tsc);
+ p->p_cycles = add64(p->p_cycles, sub64(tsc, tsc_ctr_switch));
+ tsc_ctr_switch = tsc;
+}
+
+PUBLIC void cycles_accounting_stop_idle(void)
+{
+ cycles_accounting_stop(proc_addr(IDLE));
+}
TEST_INT_IN_KERNEL(4, 0f) ;\
\
SAVE_PROCESS_CTX(0) ;\
+ push %ebp ;\
+ call cycles_accounting_stop ;\
+ add $4, %esp ;\
movl $0, %ebp /* for stack trace */ ;\
PIC_IRQ_HANDLER(irq) ;\
movb $END_OF_INT, %al ;\
\
0: \
pusha ;\
+ call cycles_accounting_stop_idle ;\
PIC_IRQ_HANDLER(irq) ;\
movb $END_OF_INT, %al ;\
outb $INT_CTL /* reenable interrupts in master pic */ ;\
TEST_INT_IN_KERNEL(4, 0f) ;\
\
SAVE_PROCESS_CTX(0) ;\
+ push %ebp ;\
+ call cycles_accounting_stop ;\
+ add $4, %esp ;\
movl $0, %ebp /* for stack trace */ ;\
PIC_IRQ_HANDLER(irq) ;\
movb $END_OF_INT, %al ;\
\
0: \
pusha ;\
+ call cycles_accounting_stop_idle ;\
PIC_IRQ_HANDLER(irq) ;\
movb $END_OF_INT, %al ;\
outb $INT_CTL /* reenable interrupts in master pic */ ;\
push %eax
push %ecx
+ /* stop user process cycles */
+ push %ebp
+ call cycles_accounting_stop
+ add $4, %esp
+
/* for stack trace */
movl $0, %ebp
/* save the pointer to the current process */
push %ebp
- /* for stack trace */
- movl $0, %ebp
-
/*
* pass the syscall arguments from userspace to the handler.
* SAVE_PROCESS_CTX() does not clobber these registers, they are still
*/
push %eax
+ /* stop user process cycles */
+ push %ebp
+ call cycles_accounting_stop
+ add $4, %esp
+
+ /* for stack trace */
+ movl $0, %ebp
+
call kernel_call
/* restore the current process pointer and save the return value */
SAVE_PROCESS_CTX(8)
+ /* stop user process cycles */
+ push %ebp
+ call cycles_accounting_stop
+ add $4, %esp
+
/* for stack trace clear %ebp */
movl $0, %ebp
jnz 0f /* jump if FPU is already initialized */
orw $MF_FPU_INITIALIZED, (%ebx)
fninit
+ /* stop user process cycles */
+ push %ebp
+ call cycles_accounting_stop
+ add $4, %esp
jmp copr_return
0: /* load FPU context for current process */
mov %ss:FP_SAVE_AREA_P(%ebp), %eax
COL
- kprintf("%d: %s %d prio %d/%d time %d/%d cr3 0x%lx rts %s misc %s",
+ kprintf("%d: %s %d prio %d/%d time %d/%d cycles 0x%x%08x cr3 0x%lx rts %s misc %s",
proc_nr(pp), pp->p_name, pp->p_endpoint,
pp->p_priority, pp->p_max_priority, pp->p_user_time,
- pp->p_sys_time, pp->p_seg.p_cr3,
+ pp->p_sys_time, pp->p_cycles.hi, pp->p_cycles.lo, pp->p_seg.p_cr3,
rtsflagstr(pp->p_rts_flags), miscflagstr(pp->p_misc_flags));
if(pp->p_rts_flags & RTS_SENDING) {
{
unsigned ticks;
- IDLE_STOP;
-
if(minix_panicing)
return 0;
int expired = 0;
struct proc * p, * billp;
- IDLE_STOP;
-
#ifdef CONFIG_WATCHDOG
/*
* we need to know whether local timer ticks are happening or whether
#define unset_sys_bit(map,bit) \
( MAP_CHUNK(map.chunk,bit) &= ~(1 << CHUNK_OFFSET(bit) )
-#ifdef CONFIG_IDLE_TSC
-#define IDLE_STOP if(idle_active) { read_tsc_64(&idle_stop); idle_active = 0; }
-#else
-#define IDLE_STOP
-#endif
-
/* args to intr_init() */
#define INTS_ORIG 0 /* restore interrupts */
#define INTS_MINIX 1 /* initialize interrupts for minix */
EXTERN int config_no_apic; /* optionaly turn off apic */
#endif
-#ifdef CONFIG_IDLE_TSC
-EXTERN u64_t idle_tsc;
-EXTERN u64_t idle_stop;
-EXTERN int idle_active;
-#endif
-
EXTERN unsigned cpu_hz[CONFIG_MAX_CPUS];
#define cpu_set_freq(cpu, freq) do {cpu_hz[cpu] = freq;} while (0)
{
irq_hook_t * hook;
- IDLE_STOP;
-
/* here we need not to get this IRQ until all the handlers had a say */
hw_intr_mask(irq);
hook = irq_handlers[irq];
/* We only support 1 cpu now */
#define CONFIG_MAX_CPUS 1
#define cpuid 0
-/* measure cumulative idle timestamp counter ticks */
-#undef CONFIG_IDLE_TSC
/* This is the master header for the kernel. It includes some other files
* and defines the principal constants.
#endif /* SPROFILE */
cprof_procs_no = 0; /* init nr of hash table slots used */
-#ifdef CONFIG_IDLE_TSC
- idle_tsc = cvu64(0);
-#endif
-
vm_running = 0;
krandom.random_sources = RANDOM_SOURCES;
krandom.random_elements = RANDOM_ELEMENTS;
FIXME("PROC check enabled");
#endif
+ cycles_accounting_init();
+
restart();
NOT_REACHABLE;
}
/*===========================================================================*
* idle *
*===========================================================================*/
-PRIVATE void idle()
+PRIVATE void idle(void)
{
/* This function is called whenever there is no work to do.
* Halt the CPU, and measure how many timestamp counter ticks are
* spent not doing anything. This allows test setups to measure
* the CPU utiliziation of certain workloads with high precision.
*/
-#ifdef CONFIG_IDLE_TSC
- u64_t idle_start;
-
- read_tsc_64(&idle_start);
- idle_active = 1;
-#endif
+ /* start accounting for the idle time */
+ cycles_accounting_stop(proc_addr(KERNEL));
halt_cpu();
-
-#ifdef CONFIG_IDLE_TSC
- if (idle_active) {
- IDLE_STOP;
- printf("Kernel: idle active after resuming CPU\n");
- }
-
- idle_tsc = add64(idle_tsc, sub64(idle_stop, idle_start));
-#endif
+ /*
+ * end of accounting for the idle task does not happen here, the kernel
+ * is handling stuff for quite a while before it gets back here!
+ */
}
/*===========================================================================*
#endif
proc_ptr = arch_finish_schedcheck();
+ cycles_accounting_stop(proc_addr(KERNEL));
NOREC_RETURN(schedch, proc_ptr);
}
clock_t p_virt_left; /* number of ticks left on virtual timer */
clock_t p_prof_left; /* number of ticks left on profile timer */
+ u64_t p_cycles; /* how many cycles did the process use */
+
struct proc *p_nextready; /* pointer to next ready process */
struct proc *p_caller_q; /* head of list of procs wishing to send */
struct proc *p_q_link; /* link to next proc wishing to send */
_PROTOTYPE( void reset_timer, (struct timer *tp) );
_PROTOTYPE( void ser_dump_proc, (void) );
+_PROTOTYPE( void cycles_accounting_init, (void) );
+/*
+ * This functions start and stop accounting for process, kernel or idle cycles.
+ * It inherently have to account for some kernel cycles for process too,
+ * therefore it should be called asap after trapping to kernel and as late as
+ * possible before returning to userspace. These function is architecture
+ * dependent
+ */
+_PROTOTYPE( void cycles_accounting_stop, (struct proc * p) );
+/* this is a wrapper to make calling it from assembly easier */
+_PROTOTYPE( void cycles_accounting_stop_idle, (void) );
+
/* main.c */
_PROTOTYPE( void main, (void) );
_PROTOTYPE( void prepare_shutdown, (int how) );
break;
}
case GET_IDLETSC: {
-#ifdef CONFIG_IDLE_TSC
- length = sizeof(idle_tsc);
- src_vir = (vir_bytes) &idle_tsc;
+ struct proc * idl;
+
+ idl = proc_addr(IDLE);
+ length = sizeof(idl->p_cycles);
+ src_vir = (vir_bytes) &idl->p_cycles;
break;
-#else
- kprintf("do_getinfo: kernel not compiled with CONFIG_IDLE_TSC\n");
- return(EINVAL);
-#endif
}
case GET_AOUTHEADER: {
int hdrindex, index = m_ptr->I_VAL_LEN2_E;