PRIVATE struct device * hello_prepare(dev)
int dev;
{
- hello_device.dv_base.lo = 0;
- hello_device.dv_base.hi = 0;
- hello_device.dv_size.lo = strlen(HELLO_MESSAGE);
- hello_device.dv_size.hi = 0;
+ hello_device.dv_base = make64(0, 0);
+ hello_device.dv_size = make64(strlen(HELLO_MESSAGE), 0);
return &hello_device;
}
printf("hello_transfer()\n");
- bytes = strlen(HELLO_MESSAGE) - position.lo < iov->iov_size ?
- strlen(HELLO_MESSAGE) - position.lo : iov->iov_size;
+ bytes = strlen(HELLO_MESSAGE) - ex64lo(position) < iov->iov_size ?
+ strlen(HELLO_MESSAGE) - ex64lo(position) : iov->iov_size;
if (bytes <= 0)
{
{
case DEV_GATHER_S:
ret = sys_safecopyto(proc_nr, iov->iov_addr, 0,
- (vir_bytes) (HELLO_MESSAGE + position.lo),
+ (vir_bytes) (HELLO_MESSAGE + ex64lo(position)),
bytes, D);
iov->iov_size -= bytes;
break;
PRIVATE int lapic_disable_in_msr(void)
{
- u64_t msr;
- u32_t addr;
+ u32_t addr, msr_hi, msr_lo;
- ia32_msr_read(IA32_APIC_BASE, &msr.hi, &msr.lo);
+ ia32_msr_read(IA32_APIC_BASE, &msr_hi, &msr_lo);
- msr.lo &= ~(1 << IA32_APIC_BASE_ENABLE_BIT);
- ia32_msr_write(IA32_APIC_BASE, msr.hi, msr.lo);
+ msr_lo &= ~(1 << IA32_APIC_BASE_ENABLE_BIT);
+ ia32_msr_write(IA32_APIC_BASE, msr_hi, msr_lo);
return 1;
}
PRIVATE int lapic_enable_in_msr(void)
{
- u64_t msr;
- u32_t addr;
+ u32_t addr, msr_hi, msr_lo;
- ia32_msr_read(IA32_APIC_BASE, &msr.hi, &msr.lo);
+ ia32_msr_read(IA32_APIC_BASE, &msr_hi, &msr_lo);
#if 0
/*FIXME this is a problem on AP */
* FIXME if the location is different (unlikely) then the one we expect,
* update it
*/
- addr = (msr.lo >> 12) | ((msr.hi & 0xf) << 20);
+ addr = (msr_lo >> 12) | ((msr_hi & 0xf) << 20);
if (phys2vir(addr) != (lapic_addr >> 12)) {
- if (msr.hi & 0xf) {
+ if (msr_hi & 0xf) {
printf("ERROR : APIC address needs more then 32 bits\n");
return 0;
}
- lapic_addr = phys2vir(msr.lo & ~((1 << 12) - 1));
+ lapic_addr = phys2vir(msr_lo & ~((1 << 12) - 1));
}
#endif
- msr.lo |= (1 << IA32_APIC_BASE_ENABLE_BIT);
- ia32_msr_write(IA32_APIC_BASE, msr.hi, msr.lo);
+ msr_lo |= (1 << IA32_APIC_BASE_ENABLE_BIT);
+ ia32_msr_write(IA32_APIC_BASE, msr_hi, msr_lo);
return 1;
}
make_zero64(p->p_cpu_time_left);
#else
/* if (tsc_delta < p->p_cpu_time_left) in 64bit */
- if (tsc_delta.hi < p->p_cpu_time_left.hi ||
- (tsc_delta.hi == p->p_cpu_time_left.hi &&
- tsc_delta.lo < p->p_cpu_time_left.lo))
+ if (ex64hi(tsc_delta) < ex64hi(p->p_cpu_time_left) ||
+ (ex64hi(tsc_delta) == ex64hi(p->p_cpu_time_left) &&
+ ex64lo(tsc_delta) < ex64lo(p->p_cpu_time_left)))
p->p_cpu_time_left = sub64(p->p_cpu_time_left, tsc_delta);
else {
make_zero64(p->p_cpu_time_left);
busy = sub64(tsc_delta, idle_delta);
busy = mul64(busy, make64(100, 0));
- load = div64(busy, tsc_delta).lo;
+ load = ex64lo(div64(busy, tsc_delta));
if (load > 100)
load = 100;
printf("--- BKL usage ---\n");
for (cpu = 0; cpu < ncpus; cpu++) {
printf("cpu %3d kernel ticks 0x%x%08x bkl ticks 0x%x%08x succ %d tries %d\n", cpu,
- kernel_ticks[cpu].hi, kernel_ticks[cpu].lo,
- bkl_ticks[cpu].hi, bkl_ticks[cpu].lo,
+ ex64hi(kernel_ticks[cpu]),
+ ex64lo(kernel_ticks[cpu]),
+ ex64hi(bkl_ticks[cpu]),
+ ex64lo(bkl_ticks[cpu]),
bkl_succ[cpu], bkl_tries[cpu]);
}
}
* lowest 31 bits writable :(
*/
cpuf = cpu_get_freq(cpu);
- while (cpuf.hi || cpuf.lo > 0x7fffffffU)
+ while (ex64hi(cpuf) || ex64lo(cpuf) > 0x7fffffffU)
cpuf = div64u64(cpuf, 2);
- cpuf.lo = -cpuf.lo;
+ cpuf = make64(-ex64lo(cpuf), ex64hi(cpuf));
watchdog->resetval = watchdog->watchdog_resetval = cpuf;
- ia32_msr_write(INTEL_MSR_PERFMON_CRT0, 0, cpuf.lo);
+ ia32_msr_write(INTEL_MSR_PERFMON_CRT0, 0, ex64lo(cpuf));
ia32_msr_write(INTEL_MSR_PERFMON_SEL0, 0,
val | INTEL_MSR_PERFMON_SEL0_ENABLE);
PRIVATE void intel_arch_watchdog_reinit(const unsigned cpu)
{
lapic_write(LAPIC_LVTPCR, APIC_ICR_DM_NMI);
- ia32_msr_write(INTEL_MSR_PERFMON_CRT0, 0, watchdog->resetval.lo);
+ ia32_msr_write(INTEL_MSR_PERFMON_CRT0, 0, ex64lo(watchdog->resetval));
}
PUBLIC int arch_watchdog_init(void)
* if freq is too low and the cpu freq too high we may get in a range of
* insane value which cannot be handled by the 31bit CPU perf counter
*/
- if (cpuf.hi != 0 || cpuf.lo > 0x7fffffffU) {
+ if (ex64hi(cpuf) != 0 || ex64lo(cpuf) > 0x7fffffffU) {
printf("ERROR : nmi watchdog ticks exceed 31bits, use higher frequency\n");
return EINVAL;
}
- cpuf.lo = -cpuf.lo;
+ cpuf = make64(-ex64lo(cpuf), ex64hi(cpuf));
watchdog->profile_resetval = cpuf;
return OK;
watchdog->resetval = watchdog->watchdog_resetval = cpuf;
ia32_msr_write(AMD_MSR_EVENT_CTR0,
- watchdog->resetval.hi, watchdog->resetval.lo);
+ ex64hi(watchdog->resetval), ex64lo(watchdog->resetval));
ia32_msr_write(AMD_MSR_EVENT_SEL0, 0,
val | AMD_MSR_EVENT_SEL0_ENABLE);
{
lapic_write(LAPIC_LVTPCR, APIC_ICR_DM_NMI);
ia32_msr_write(AMD_MSR_EVENT_CTR0,
- watchdog->resetval.hi, watchdog->resetval.lo);
+ ex64hi(watchdog->resetval), ex64lo(watchdog->resetval));
}
PRIVATE int amd_watchdog_profile_init(const unsigned freq)
"cr3 0x%lx rts %s misc %s sched %s ",
proc_nr(pp), pp->p_name, pp->p_endpoint,
pp->p_priority, pp->p_user_time,
- pp->p_sys_time, pp->p_cycles.hi, pp->p_cycles.lo, pp->p_cpu,
+ pp->p_sys_time, ex64hi(pp->p_cycles),
+ ex64lo(pp->p_cycles), pp->p_cpu,
pp->p_seg.p_cr3,
rtsflagstr(pp->p_rts_flags), miscflagstr(pp->p_misc_flags),
schedulerstr(pp->p_scheduler));
read_tsc_64(&tsc);
totalsize = block_get_totalsize(size);
page_index_max = (ptr_max - ptr_min - totalsize) / PAGE_SIZE;
- page_index = (page_index_max > 0) ? (tsc.lo % page_index_max) : 0;
+ page_index = (page_index_max > 0) ? (ex64lo(tsc) % page_index_max) : 0;
ptr = ptr_min + page_index * PAGE_SIZE;
/* allocate block */
PUBLIC void procexit (char *UNUSED(name))
{
u64_t stop, spent;
+ u32_t tsc_lo, tsc_hi;
/* Procexit is not reentrant. */
if (cprof_locked) return; else cprof_locked = 1;
/* First thing: read CPU cycle count into local variable. */
- read_tsc(&stop.hi, &stop.lo);
+ read_tsc(&tsc_hi, &tsc_lo);
+ stop = make64(tsc_lo, tsc_hi);
/* Only continue if sane. */
if (control.err) return;
*/
/* Read CPU cycle count. */
- read_tsc(&stop.hi, &stop.lo);
+ read_tsc(&tsc_hi, &tsc_lo);
+ stop = make64(tsc_lo, tsc_hi);
/* Calculate "big" difference. */
spent = sub64(stop, cprof_stk[cprof_stk_top].start_1);
memset(cprof_tbl[i].cpath, '\0', CPROF_CPATH_MAX_LEN);
cprof_tbl[i].next = 0;
cprof_tbl[i].calls = 0;
- cprof_tbl[i].cycles.lo = 0;
- cprof_tbl[i].cycles.hi = 0;
+ cprof_tbl[i].cycles = make64(0, 0);
}
}