if(!size_known) {
disk_size = part.size;
size_known = 1;
- sectors = div64u(disk_size, SECTOR_SIZE);
- if(cmp64(mul64u(sectors, SECTOR_SIZE), disk_size)) {
+ sectors = (unsigned long)(disk_size / SECTOR_SIZE);
+ if ((u64_t)sectors * SECTOR_SIZE != disk_size) {
printf("Filter: partition too large\n");
return RET_REDO;
disk_size, sectors);
#endif
} else {
- if(cmp64(disk_size, part.size)) {
+ if (disk_size != part.size) {
printf("Filter: partition size mismatch "
"(0x%"PRIx64" != 0x%"PRIx64")\n",
part.size, disk_size);
* report the driver for acting strangely!
*/
if (m1.BDEV_STATUS > (ssize_t) *sizep ||
- cmp64(add64u(pos, m1.BDEV_STATUS), disk_size) < 0)
+ (pos + (unsigned int) m1.BDEV_STATUS < disk_size))
return bad_driver(DRIVER_MAIN, BD_PROTO, EFAULT);
/* Return the actual size. */
/* As above */
if (m2.BDEV_STATUS > (ssize_t) *sizep ||
- cmp64(add64u(pos, m2.BDEV_STATUS),
- disk_size) < 0)
+ (pos + (unsigned int) m2.BDEV_STATUS <
+ disk_size))
return bad_driver(DRIVER_BACKUP, BD_PROTO,
EFAULT);
sizeof(*req)) == VMMDEV_ERR_OK) {
time(&otime); /* old time */
- ntime = div64u(req->time, 1000); /* new time */
+ ntime = (unsigned long)(req->time / 1000); /* new time */
/* Make time go forward, if the difference exceeds the drift
* threshold. Never make time go backward.
#include <limits.h>
-#define is_zero64(i) ((i) == 0)
-#define make_zero64(i) ((i) = 0)
-#define neg64(i) ((i) = -(i))
-
static inline u64_t add64(u64_t i, u64_t j)
{
return i + j;
{
read_tsc_64(get_cpu_var_ptr(cpu, tsc_ctr_switch));
- make_zero64(get_cpu_var(cpu, cpu_last_tsc));
- make_zero64(get_cpu_var(cpu, cpu_last_idle));
+ get_cpu_var(cpu, cpu_last_tsc) = 0;
+ get_cpu_var(cpu, cpu_last_idle) = 0;
}
void context_stop(struct proc * p)
/* remove the probe */
rm_irq_handler(&calib_cpu);
- tsc_delta = sub64(tsc1, tsc0);
+ tsc_delta = tsc1 - tsc0;
- cpu_freq = mul64(div64u64(tsc_delta, PROBE_TICKS - 1), make64(system_hz, 0));
+ cpu_freq = (tsc_delta / (PROBE_TICKS - 1)) * system_hz;
cpu_set_freq(cpuid, cpu_freq);
- cpu_info[cpuid].freq = div64u(cpu_freq, 1000000);
+ cpu_info[cpuid].freq = (unsigned long)(cpu_freq / 1000000);
BOOT_VERBOSE(cpu_print_freq(cpuid));
}
/* if we know the address, lapic is enabled and we should use it */
if (lapic_addr) {
unsigned cpu = cpuid;
- tsc_per_ms[cpu] = div64u(cpu_get_freq(cpu), 1000);
- lapic_set_timer_one_shot(1000000/system_hz);
- } else
- {
+ tsc_per_ms[cpu] = (unsigned long)(cpu_get_freq(cpu) / 1000);
+ lapic_set_timer_one_shot(1000000 / system_hz);
+ } else {
BOOT_VERBOSE(printf("Initiating legacy i8253 timer\n"));
#else
{
init_8253A_timer(freq);
estimate_cpu_freq();
/* always only 1 cpu in the system */
- tsc_per_ms[0] = div64u(cpu_get_freq(0), 1000);
+ tsc_per_ms[0] = (unsigned long)(cpu_get_freq(0) / 1000);
}
return 0;
read_tsc_64(get_cpu_var_ptr(cpu, tsc_ctr_switch));
- make_zero64(get_cpu_var(cpu, cpu_last_tsc));
- make_zero64(get_cpu_var(cpu, cpu_last_idle));
+ get_cpu_var(cpu, cpu_last_tsc) = 0;
+ get_cpu_var(cpu, cpu_last_idle) = 0;
}
void context_stop(struct proc * p)
u64_t tmp;
read_tsc_64(&tsc);
- tmp = sub64(tsc, *__tsc_ctr_switch);
- kernel_ticks[cpu] = add64(kernel_ticks[cpu], tmp);
- p->p_cycles = add64(p->p_cycles, tmp);
+ tmp = tsc - *__tsc_ctr_switch;
+ kernel_ticks[cpu] = kernel_ticks[cpu] + tmp;
+ p->p_cycles = p->p_cycles + tmp;
must_bkl_unlock = 1;
} else {
u64_t bkl_tsc;
read_tsc_64(&tsc);
- bkl_ticks[cpu] = add64(bkl_ticks[cpu], sub64(tsc, bkl_tsc));
+ bkl_ticks[cpu] = bkl_ticks[cpu] + tsc - bkl_tsc;
bkl_tries[cpu]++;
bkl_succ[cpu] += !(!(succ == 0));
- p->p_cycles = add64(p->p_cycles, sub64(tsc, *__tsc_ctr_switch));
+ p->p_cycles = p->p_cycles + tsc - *__tsc_ctr_switch;
#ifdef CONFIG_SMP
/*
}
#else
read_tsc_64(&tsc);
- p->p_cycles = add64(p->p_cycles, sub64(tsc, *__tsc_ctr_switch));
+ p->p_cycles = p->p_cycles + tsc - *__tsc_ctr_switch;
#endif
- tsc_delta = sub64(tsc, *__tsc_ctr_switch);
+ tsc_delta = tsc - *__tsc_ctr_switch;
- if(kbill_ipc) {
+ if (kbill_ipc) {
kbill_ipc->p_kipc_cycles =
- add64(kbill_ipc->p_kipc_cycles, tsc_delta);
+ kbill_ipc->p_kipc_cycles + tsc_delta;
kbill_ipc = NULL;
}
- if(kbill_kcall) {
+ if (kbill_kcall) {
kbill_kcall->p_kcall_cycles =
- add64(kbill_kcall->p_kcall_cycles, tsc_delta);
+ kbill_kcall->p_kcall_cycles + tsc_delta;
kbill_kcall = NULL;
}
*/
if (p->p_endpoint >= 0) {
#if DEBUG_RACE
- make_zero64(p->p_cpu_time_left);
+ p->p_cpu_time_left = 0;
#else
/* if (tsc_delta < p->p_cpu_time_left) in 64bit */
if (ex64hi(tsc_delta) < ex64hi(p->p_cpu_time_left) ||
(ex64hi(tsc_delta) == ex64hi(p->p_cpu_time_left) &&
ex64lo(tsc_delta) < ex64lo(p->p_cpu_time_left)))
- p->p_cpu_time_left = sub64(p->p_cpu_time_left, tsc_delta);
+ p->p_cpu_time_left = p->p_cpu_time_left - tsc_delta;
else {
- make_zero64(p->p_cpu_time_left);
+ p->p_cpu_time_left = 0;
}
#endif
}
u64_t ms_2_cpu_time(unsigned ms)
{
- return mul64u(tsc_per_ms[cpuid], ms);
+ return (u64_t)tsc_per_ms[cpuid] * ms;
}
unsigned cpu_time_2_ms(u64_t cpu_time)
{
- return div64u(cpu_time, tsc_per_ms[cpuid]);
+ return (unsigned long)(cpu_time / tsc_per_ms[cpuid]);
}
short cpu_load(void)
current_idle = &idle->p_cycles; /* ptr to idle proc */
/* calculate load since last cpu_load invocation */
- if (!is_zero64(*last_tsc)) {
- tsc_delta = sub64(current_tsc, *last_tsc);
- idle_delta = sub64(*current_idle, *last_idle);
+ if (*last_tsc) {
+ tsc_delta = current_tsc - *last_tsc;
+ idle_delta = *current_idle - *last_idle;
- busy = sub64(tsc_delta, idle_delta);
- busy = mul64(busy, make64(100, 0));
- load = ex64lo(div64(busy, tsc_delta));
+ busy = tsc_delta - idle_delta;
+ busy = busy * 100;
+ load = ex64lo(busy / tsc_delta);
if (load > 100)
load = 100;
val = 1 << 20 | 1 << 17 | 1 << 16 | 0x76;
ia32_msr_write(AMD_MSR_EVENT_SEL0, 0, val);
- cpuf = cpu_get_freq(cpu);
- neg64(cpuf);
+ cpuf = -cpu_get_freq(cpu);
watchdog->resetval = watchdog->watchdog_resetval = cpuf;
ia32_msr_write(AMD_MSR_EVENT_CTR0,
/* FIXME works only if all CPUs have the same freq */
cpuf = cpu_get_freq(cpuid);
- cpuf = div64u64(cpuf, freq);
+ cpuf = -div64u64(cpuf, freq);
- neg64(cpuf);
watchdog->profile_resetval = cpuf;
return OK;
DEBUGEXTRA(("initializing %s... ", ip->proc_name));
rp = proc_addr(ip->proc_nr); /* get process pointer */
ip->endpoint = rp->p_endpoint; /* ipc endpoint */
- make_zero64(rp->p_cpu_time_left);
+ rp->p_cpu_time_left = 0;
if(i < NR_TASKS) /* name (tasks only) */
strlcpy(rp->p_name, ip->proc_name, sizeof(rp->p_name));
if (proc_is_preempted(p)) {
p->p_rts_flags &= ~RTS_PREEMPTED;
if (proc_is_runnable(p)) {
- if (!is_zero64(p->p_cpu_time_left))
+ if (p->p_cpu_time_left)
enqueue_head(p);
else
enqueue(p);
* as we are sure that a possible out-of-quantum message to the
* scheduler will not collide with the regular ipc
*/
- if (is_zero64(p->p_cpu_time_left))
+ if (!p->p_cpu_time_left)
proc_no_time(p);
/*
* After handling the misc flags the selected process might not be
#endif
p = arch_finish_switch_to_user();
- assert(!is_zero64(p->p_cpu_time_left));
+ assert(p->p_cpu_time_left);
context_stop(proc_addr(KERNEL));
/* If the process isn't the owner of FPU, enable the FPU exception */
- if(get_cpulocal_var(fpu_owner) != p)
+ if (get_cpulocal_var(fpu_owner) != p)
enable_fpu_exception();
else
disable_fpu_exception();
* the process was runnable without its quantum expired when dequeued. A
* process with no time left should have been handled else and differently
*/
- assert(!is_zero64(rp->p_cpu_time_left));
+ assert(rp->p_cpu_time_left);
assert(q >= 0);
/* this is not all that accurate on virtual machines, especially with
IO bound processes that only spend a short amount of time in the queue
at a time. */
- if (!is_zero64(rp->p_accounting.enter_queue)) {
+ if (rp->p_accounting.enter_queue) {
read_tsc_64(&tsc);
- tsc_delta = sub64(tsc, rp->p_accounting.enter_queue);
- rp->p_accounting.time_in_queue = add64(rp->p_accounting.time_in_queue,
- tsc_delta);
- make_zero64(rp->p_accounting.enter_queue);
+ tsc_delta = tsc - rp->p_accounting.enter_queue;
+ rp->p_accounting.time_in_queue = rp->p_accounting.time_in_queue +
+ tsc_delta;
+ rp->p_accounting.enter_queue = 0;
}
p->p_accounting.ipc_sync = 0;
p->p_accounting.ipc_async = 0;
p->p_accounting.dequeues = 0;
- make_zero64(p->p_accounting.time_in_queue);
- make_zero64(p->p_accounting.enter_queue);
+ p->p_accounting.time_in_queue = 0;
+ p->p_accounting.enter_queue = 0;
}
void copr_not_available_handler(void)
RTS_SET(rpc, RTS_NO_QUANTUM);
reset_proc_accounting(rpc);
- make_zero64(rpc->p_cpu_time_left);
- make_zero64(rpc->p_cycles);
- make_zero64(rpc->p_kcall_cycles);
- make_zero64(rpc->p_kipc_cycles);
+ rpc->p_cpu_time_left = 0;
+ rpc->p_cycles = 0;
+ rpc->p_kcall_cycles = 0;
+ rpc->p_kipc_cycles = 0;
rpc->p_signal_received = 0;
/* If the parent is a privileged process, take away the privileges from the