servers/sched/sched
servers/vfs/vfs
servers/vm/vm
+servers/sched/sched
commands/service/service
drivers/ahci/ahci
+drivers/acpi/acpi
drivers/amddev/amddev
drivers/at_wini/at_wini
drivers/atl2/atl2
#define SCHEDULING_BASE 0xF00
#define SCHEDULING_NO_QUANTUM (SCHEDULING_BASE+1)
+# define SCHEDULING_ACNT_DEQS m9_l1
+# define SCHEDULING_ACNT_IPC_SYNC m9_l2
+# define SCHEDULING_ACNT_IPC_ASYNC m9_l3
+# define SCHEDULING_ACNT_PREEMPT m9_l4
+# define SCHEDULING_ACNT_QUEUE m9_l5
+# define SCHEDULING_ACNT_CPU m9_s1
+# define SCHEDULING_ACNT_CPU_LOAD m9_s2
+/* These are used for SYS_SCHEDULE, a reply to SCHEDULING_NO_QUANTUM */
# define SCHEDULING_ENDPOINT m9_l1
# define SCHEDULING_QUANTUM m9_l2
# define SCHEDULING_PRIORITY m9_s1
# define SCHEDULING_CPU m9_l4
-/* SCHEDULING_START uses _ENDPOINT, _PRIORITY and _QUANTUM from
- * SCHEDULING_NO_QUANTUM */
+/*
+ * SCHEDULING_START uses _ENDPOINT, _PRIORITY and _QUANTUM from
+ * SCHEDULING_NO_QUANTUM/SYS_SCHEDULE
+ */
#define SCHEDULING_START (SCHEDULING_BASE+2)
# define SCHEDULING_SCHEDULER m9_l1 /* Overrides _ENDPOINT on return*/
# define SCHEDULING_PARENT m9_l3
PUBLIC void cycles_accounting_init(void)
{
- read_tsc_64(get_cpulocal_var_ptr(tsc_ctr_switch));
+ unsigned cpu = cpuid;
+
+ read_tsc_64(get_cpu_var_ptr(cpu, tsc_ctr_switch));
+
+ make_zero64(get_cpu_var(cpu, cpu_last_tsc));
+ make_zero64(get_cpu_var(cpu, cpu_last_idle));
}
PUBLIC void context_stop(struct proc * p)
{
return mul64u(tsc_per_ms[cpuid], ms);
}
+
+PUBLIC unsigned cpu_time_2_ms(u64_t cpu_time)
+{
+ return div64u(cpu_time, tsc_per_ms[cpuid]);
+}
+
+PUBLIC short cpu_load(void)
+{
+ u64_t current_tsc, *current_idle;
+ u64_t tsc_delta, idle_delta, busy;
+ struct proc *idle;
+ short load;
+ unsigned cpu = cpuid;
+
+ u64_t *last_tsc, *last_idle;
+
+ last_tsc = get_cpu_var_ptr(cpu, cpu_last_tsc);
+ last_idle = get_cpu_var_ptr(cpu, cpu_last_idle);
+
+ idle = get_cpu_var_ptr(cpu, idle_proc);;
+ read_tsc_64(¤t_tsc);
+ current_idle = &idle->p_cycles; /* ptr to idle proc */
+
+ /* calculate load since last cpu_load invocation */
+ if (!is_zero64(*last_tsc)) {
+ tsc_delta = sub64(current_tsc, *last_tsc);
+ idle_delta = sub64(*current_idle, *last_idle);
+
+ busy = sub64(tsc_delta, idle_delta);
+ busy = mul64(busy, make64(100, 0));
+ load = div64(busy, tsc_delta).lo;
+ printf("CPULOAD %d\n", load);
+
+ if (load > 100)
+ load = 100;
+ } else
+ load = 0;
+
+ *last_tsc = current_tsc;
+ *last_idle = *current_idle;
+ return load;
+}
_PROTOTYPE(int register_local_timer_handler, (irq_handler_t handler));
_PROTOTYPE( u64_t ms_2_cpu_time, (unsigned ms));
+_PROTOTYPE( unsigned cpu_time_2_ms, (u64_t cpu_time));
#endif /* __CLOCK_H__ */
DECLARE_CPULOCAL(u64_t ,tsc_ctr_switch); /* when did we switched time accounting */
+/* last values read from cpu when sending ooq msg to scheduler */
+DECLARE_CPULOCAL(u64_t, cpu_last_tsc);
+DECLARE_CPULOCAL(u64_t, cpu_last_idle);
+
+
DECLARE_CPULOCAL(char ,fpu_presence); /* whether the cpu has FPU or not */
DECLARE_CPULOCAL(struct proc * ,fpu_owner); /* who owns the FPU of the local cpu */
ip->endpoint = rp->p_endpoint; /* ipc endpoint */
make_zero64(rp->p_cpu_time_left);
strncpy(rp->p_name, ip->proc_name, P_NAME_LEN); /* set process name */
+
+ reset_proc_accounting(rp);
/* See if this process is immediately schedulable.
* In that case, set its privileges now and allow it to run.
case RECEIVE:
case NOTIFY:
case SENDNB:
+ {
+ /* Process accounting for scheduling */
+ caller_ptr->p_accounting.ipc_sync++;
+
return do_sync_ipc(caller_ptr, call_nr, (endpoint_t) r2,
(message *) r3);
+ }
case SENDA:
{
- /*
+ /*
* Get and check the size of the argument in bytes as it is a
* table
*/
size_t msg_size = (size_t) r2;
+ /* Process accounting for scheduling */
+ caller_ptr->p_accounting.ipc_async++;
+
/* Limit size to something reasonable. An arbitrary choice is 16
* times the number of process table entries.
*/
}
#endif
+ /* Make note of when this process was added to queue */
+ read_tsc_64(&(get_cpulocal_var(proc_ptr)->p_accounting.enter_queue));
+
+
#if DEBUG_SANITYCHECKS
assert(runqueues_ok_local());
#endif
rp->p_nextready = rdy_head[q]; /* chain head of queue */
rdy_head[q] = rp; /* set new queue head */
+ /* Make note of when this process was added to queue */
+ read_tsc_64(&(get_cpulocal_var(proc_ptr->p_accounting.enter_queue)));
+
+
+ /* Process accounting for scheduling */
+ rp->p_accounting.dequeues--;
+ rp->p_accounting.preempted++;
+
#if DEBUG_SANITYCHECKS
assert(runqueues_ok_local());
#endif
/*===========================================================================*
* dequeue *
*===========================================================================*/
-PUBLIC void dequeue(const struct proc *rp)
+PUBLIC void dequeue(struct proc *rp)
/* this process is no longer runnable */
{
/* A process must be removed from the scheduling queues, for example, because
* This function can operate x-cpu as it always removes the process from the
* queue of the cpu the process is currently assigned to.
*/
- register int q = rp->p_priority; /* queue to use */
- register struct proc **xpp; /* iterate over queue */
- register struct proc *prev_xp;
+ int q = rp->p_priority; /* queue to use */
+ struct proc **xpp; /* iterate over queue */
+ struct proc *prev_xp;
+ u64_t tsc, tsc_delta;
struct proc **rdy_tail;
prev_xp = *xpp; /* save previous in chain */
}
+
+ /* Process accounting for scheduling */
+ rp->p_accounting.dequeues++;
+
+ /* this is not all that accurate on virtual machines, especially with
+ IO bound processes that only spend a short amount of time in the queue
+ at a time. */
+ if (!is_zero64(rp->p_accounting.enter_queue)) {
+ read_tsc_64(&tsc);
+ tsc_delta = sub64(tsc, rp->p_accounting.enter_queue);
+ rp->p_accounting.time_in_queue = add64(rp->p_accounting.time_in_queue,
+ tsc_delta);
+ make_zero64(rp->p_accounting.enter_queue);
+ }
+
+
#if DEBUG_SANITYCHECKS
assert(runqueues_ok_local());
#endif
*/
m_no_quantum.m_source = p->p_endpoint;
m_no_quantum.m_type = SCHEDULING_NO_QUANTUM;
+ m_no_quantum.SCHEDULING_ACNT_QUEUE = cpu_time_2_ms(p->p_accounting.time_in_queue);
+ m_no_quantum.SCHEDULING_ACNT_DEQS = p->p_accounting.dequeues;
+ m_no_quantum.SCHEDULING_ACNT_IPC_SYNC = p->p_accounting.ipc_sync;
+ m_no_quantum.SCHEDULING_ACNT_IPC_ASYNC = p->p_accounting.ipc_async;
+ m_no_quantum.SCHEDULING_ACNT_PREEMPT = p->p_accounting.preempted;
+ m_no_quantum.SCHEDULING_ACNT_CPU = cpuid;
+ m_no_quantum.SCHEDULING_ACNT_CPU_LOAD = cpu_load();
+
+ /* Reset accounting */
+ reset_proc_accounting(p);
if ((err = mini_send(p, p->p_scheduler->p_endpoint,
&m_no_quantum, FROM_KERNEL))) {
#endif
}
}
+
+PUBLIC void reset_proc_accounting(struct proc *p)
+{
+ p->p_accounting.preempted = 0;
+ p->p_accounting.ipc_sync = 0;
+ p->p_accounting.ipc_async = 0;
+ p->p_accounting.dequeues = 0;
+ make_zero64(p->p_accounting.time_in_queue);
+ make_zero64(p->p_accounting.enter_queue);
+}
PUBLIC void copr_not_available_handler(void)
{
run on */
#endif
+ /* Accounting statistics that get passed to the process' scheduler */
+ struct {
+ u64_t enter_queue; /* time when enqueued (cycles) */
+ u64_t time_in_queue; /* time spent in queue */
+ unsigned long dequeues;
+ unsigned long ipc_sync;
+ unsigned long ipc_async;
+ unsigned long preempted;
+ } p_accounting;
+
struct mem_map p_memmap[NR_LOCAL_SEGS]; /* memory map (T, D, S) */
clock_t p_user_time; /* user time in ticks */
_PROTOTYPE( void proc_init, (void) );
_PROTOTYPE( int mini_notify, (const struct proc *src, endpoint_t dst) );
_PROTOTYPE( void enqueue, (struct proc *rp) );
-_PROTOTYPE( void dequeue, (const struct proc *rp) );
+_PROTOTYPE( void dequeue, (struct proc *rp) );
_PROTOTYPE( void switch_to_user, (void) );
_PROTOTYPE( struct proc * arch_finish_switch_to_user, (void) );
_PROTOTYPE( struct proc *endpoint_lookup, (endpoint_t ep) );
#define isokendpt_d(e, p, f) isokendpt_f((e), (p), (f))
#endif
_PROTOTYPE( void proc_no_time, (struct proc *p));
+_PROTOTYPE( void reset_proc_accounting, (struct proc *p));
+_PROTOTYPE( void flag_account, (struct proc *p, int flag));
/* start.c */
_PROTOTYPE( void cstart, (u16_t cs, u16_t ds, u16_t mds,
/* the child process is not runnable until it's scheduled. */
RTS_SET(rpc, RTS_NO_QUANTUM);
+ reset_proc_accounting(rpc);
make_zero64(rpc->p_cpu_time_left);
make_zero64(rpc->p_cycles);