]> Zhao Yanbai Git Server - minix.git/commitdiff
Some more 64bit function eradication. 10/710/1
authorLukasz Hryniuk <lukequaint@gmail.com>
Wed, 7 Aug 2013 10:17:09 +0000 (12:17 +0200)
committerBen Gras <ben@minix3.org>
Wed, 7 Aug 2013 12:35:53 +0000 (12:35 +0000)
  . Replace 64bit funcions with operators in arch_clock.c
  . Replace 64bit funcions with operators in proc.c
  . Replace 64bit funcions with operators in vbox.c
  . Replace 64bit funcions with operators in driver.c
  . Eradicates is_zero64, make_zero64, neg64

Change-Id: Ie4e1242a73534f114725271b2e2365b2004cb7b9

drivers/filter/driver.c
drivers/vbox/vbox.c
include/minix/u64.h
kernel/arch/earm/arch_clock.c
kernel/arch/i386/arch_clock.c
kernel/arch/i386/arch_watchdog.c
kernel/main.c
kernel/proc.c
kernel/system/do_fork.c

index 35464b10efd0ac33ce068ffd384fab40fc84c9e6..d0a959078bb0bd423e7eb3cb00c26756a38709a2 100644 (file)
@@ -77,8 +77,8 @@ static int driver_open(int which)
        if(!size_known) {
                disk_size = part.size;
                size_known = 1;
-               sectors = div64u(disk_size, SECTOR_SIZE);
-               if(cmp64(mul64u(sectors, SECTOR_SIZE), disk_size)) {
+               sectors = (unsigned long)(disk_size / SECTOR_SIZE);
+               if ((u64_t)sectors * SECTOR_SIZE != disk_size) {
                        printf("Filter: partition too large\n");
 
                        return RET_REDO;
@@ -88,7 +88,7 @@ static int driver_open(int which)
                        disk_size, sectors);
 #endif
        } else {
-               if(cmp64(disk_size, part.size)) {
+               if (disk_size != part.size) {
                        printf("Filter: partition size mismatch "
                                "(0x%"PRIx64" != 0x%"PRIx64")\n",
                                part.size, disk_size);
@@ -954,7 +954,7 @@ int read_write(u64_t pos, char *bufa, char *bufb, size_t *sizep, int request)
                 * report the driver for acting strangely!
                 */
                if (m1.BDEV_STATUS > (ssize_t) *sizep ||
-                       cmp64(add64u(pos, m1.BDEV_STATUS), disk_size) < 0)
+                       (pos + (unsigned int) m1.BDEV_STATUS < disk_size))
                        return bad_driver(DRIVER_MAIN, BD_PROTO, EFAULT);
 
                /* Return the actual size. */
@@ -976,8 +976,8 @@ int read_write(u64_t pos, char *bufa, char *bufb, size_t *sizep, int request)
 
                        /* As above */
                        if (m2.BDEV_STATUS > (ssize_t) *sizep ||
-                                       cmp64(add64u(pos, m2.BDEV_STATUS),
-                                       disk_size) < 0)
+                                       (pos + (unsigned int) m2.BDEV_STATUS <
+                                       disk_size))
                                return bad_driver(DRIVER_BACKUP, BD_PROTO,
                                        EFAULT);
 
index 8a1a95bbb5f2a03af64f0a9da8a7b6446efbb09a..f19def0981305111552e7dd99f835861910c98c3 100644 (file)
@@ -161,7 +161,7 @@ static void vbox_update_time(void)
                        sizeof(*req)) == VMMDEV_ERR_OK) {
                time(&otime);                           /* old time */
 
-               ntime = div64u(req->time, 1000);        /* new time */
+               ntime = (unsigned long)(req->time / 1000);      /* new time */
 
                /* Make time go forward, if the difference exceeds the drift
                 * threshold. Never make time go backward.
index 807abfe7c3b4cd301bddec6fac0d661345bef17b..a03cae37c6c992db498be36875d11083a17dcf66 100644 (file)
@@ -9,10 +9,6 @@
 
 #include <limits.h>
 
-#define is_zero64(i)   ((i) == 0)
-#define make_zero64(i)  ((i) = 0)
-#define neg64(i)       ((i) = -(i))
-
 static inline u64_t add64(u64_t i, u64_t j)
 {
        return i + j;
index 8a0e7b1d4b5833af7d10c9b8cdaf629f48df9e3e..4d2c2164fc97adb5571d3237d8a0cc6bf07846c4 100644 (file)
@@ -52,8 +52,8 @@ void cycles_accounting_init(void)
 {
        read_tsc_64(get_cpu_var_ptr(cpu, tsc_ctr_switch));
 
-       make_zero64(get_cpu_var(cpu, cpu_last_tsc));
-       make_zero64(get_cpu_var(cpu, cpu_last_idle));
+       get_cpu_var(cpu, cpu_last_tsc) = 0;
+       get_cpu_var(cpu, cpu_last_idle) = 0;
 }
 
 void context_stop(struct proc * p)
index ee20d2961855701802e477bc6bc506e315c237bc..ba7af4d0f70dbc9a483acf610e64a52625ac2a14 100644 (file)
@@ -119,11 +119,11 @@ static void estimate_cpu_freq(void)
        /* remove the probe */
        rm_irq_handler(&calib_cpu);
 
-       tsc_delta = sub64(tsc1, tsc0);
+       tsc_delta = tsc1 - tsc0;
 
-       cpu_freq = mul64(div64u64(tsc_delta, PROBE_TICKS - 1), make64(system_hz, 0));
+       cpu_freq = (tsc_delta / (PROBE_TICKS - 1)) * system_hz;
        cpu_set_freq(cpuid, cpu_freq);
-       cpu_info[cpuid].freq = div64u(cpu_freq, 1000000);
+       cpu_info[cpuid].freq = (unsigned long)(cpu_freq / 1000000);
        BOOT_VERBOSE(cpu_print_freq(cpuid));
 }
 
@@ -133,10 +133,9 @@ int init_local_timer(unsigned freq)
        /* if we know the address, lapic is enabled and we should use it */
        if (lapic_addr) {
                unsigned cpu = cpuid;
-               tsc_per_ms[cpu] = div64u(cpu_get_freq(cpu), 1000);
-               lapic_set_timer_one_shot(1000000/system_hz);
-       } else
-       {
+               tsc_per_ms[cpu] = (unsigned long)(cpu_get_freq(cpu) / 1000);
+               lapic_set_timer_one_shot(1000000 / system_hz);
+       } else {
                BOOT_VERBOSE(printf("Initiating legacy i8253 timer\n"));
 #else
        {
@@ -144,7 +143,7 @@ int init_local_timer(unsigned freq)
                init_8253A_timer(freq);
                estimate_cpu_freq();
                /* always only 1 cpu in the system */
-               tsc_per_ms[0] = div64u(cpu_get_freq(0), 1000);
+               tsc_per_ms[0] = (unsigned long)(cpu_get_freq(0) / 1000);
        }
 
        return 0;
@@ -199,8 +198,8 @@ void cycles_accounting_init(void)
 
        read_tsc_64(get_cpu_var_ptr(cpu, tsc_ctr_switch));
 
-       make_zero64(get_cpu_var(cpu, cpu_last_tsc));
-       make_zero64(get_cpu_var(cpu, cpu_last_idle));
+       get_cpu_var(cpu, cpu_last_tsc) = 0;
+       get_cpu_var(cpu, cpu_last_idle) = 0;
 }
 
 void context_stop(struct proc * p)
@@ -223,9 +222,9 @@ void context_stop(struct proc * p)
                u64_t tmp;
 
                read_tsc_64(&tsc);
-               tmp = sub64(tsc, *__tsc_ctr_switch);
-               kernel_ticks[cpu] = add64(kernel_ticks[cpu], tmp);
-               p->p_cycles = add64(p->p_cycles, tmp);
+               tmp = tsc - *__tsc_ctr_switch;
+               kernel_ticks[cpu] = kernel_ticks[cpu] + tmp;
+               p->p_cycles = p->p_cycles + tmp;
                must_bkl_unlock = 1;
        } else {
                u64_t bkl_tsc;
@@ -239,11 +238,11 @@ void context_stop(struct proc * p)
                
                read_tsc_64(&tsc);
 
-               bkl_ticks[cpu] = add64(bkl_ticks[cpu], sub64(tsc, bkl_tsc));
+               bkl_ticks[cpu] = bkl_ticks[cpu] + tsc - bkl_tsc;
                bkl_tries[cpu]++;
                bkl_succ[cpu] += !(!(succ == 0));
 
-               p->p_cycles = add64(p->p_cycles, sub64(tsc, *__tsc_ctr_switch));
+               p->p_cycles = p->p_cycles + tsc - *__tsc_ctr_switch;
 
 #ifdef CONFIG_SMP
                /*
@@ -261,20 +260,20 @@ void context_stop(struct proc * p)
        }
 #else
        read_tsc_64(&tsc);
-       p->p_cycles = add64(p->p_cycles, sub64(tsc, *__tsc_ctr_switch));
+       p->p_cycles = p->p_cycles + tsc - *__tsc_ctr_switch;
 #endif
        
-       tsc_delta = sub64(tsc, *__tsc_ctr_switch);
+       tsc_delta = tsc - *__tsc_ctr_switch;
 
-       if(kbill_ipc) {
+       if (kbill_ipc) {
                kbill_ipc->p_kipc_cycles =
-                       add64(kbill_ipc->p_kipc_cycles, tsc_delta);
+                       kbill_ipc->p_kipc_cycles + tsc_delta;
                kbill_ipc = NULL;
        }
 
-       if(kbill_kcall) {
+       if (kbill_kcall) {
                kbill_kcall->p_kcall_cycles =
-                       add64(kbill_kcall->p_kcall_cycles, tsc_delta);
+                       kbill_kcall->p_kcall_cycles + tsc_delta;
                kbill_kcall = NULL;
        }
 
@@ -285,15 +284,15 @@ void context_stop(struct proc * p)
         */
        if (p->p_endpoint >= 0) {
 #if DEBUG_RACE
-               make_zero64(p->p_cpu_time_left);
+               p->p_cpu_time_left = 0;
 #else
                /* if (tsc_delta < p->p_cpu_time_left) in 64bit */
                if (ex64hi(tsc_delta) < ex64hi(p->p_cpu_time_left) ||
                                (ex64hi(tsc_delta) == ex64hi(p->p_cpu_time_left) &&
                                 ex64lo(tsc_delta) < ex64lo(p->p_cpu_time_left)))
-                       p->p_cpu_time_left = sub64(p->p_cpu_time_left, tsc_delta);
+                       p->p_cpu_time_left = p->p_cpu_time_left - tsc_delta;
                else {
-                       make_zero64(p->p_cpu_time_left);
+                       p->p_cpu_time_left = 0;
                }
 #endif
        }
@@ -329,12 +328,12 @@ void context_stop_idle(void)
 
 u64_t ms_2_cpu_time(unsigned ms)
 {
-       return mul64u(tsc_per_ms[cpuid], ms);
+       return (u64_t)tsc_per_ms[cpuid] * ms;
 }
 
 unsigned cpu_time_2_ms(u64_t cpu_time)
 {
-       return div64u(cpu_time, tsc_per_ms[cpuid]);
+       return (unsigned long)(cpu_time / tsc_per_ms[cpuid]);
 }
 
 short cpu_load(void)
@@ -357,13 +356,13 @@ short cpu_load(void)
        current_idle = &idle->p_cycles; /* ptr to idle proc */
 
        /* calculate load since last cpu_load invocation */
-       if (!is_zero64(*last_tsc)) {
-               tsc_delta = sub64(current_tsc, *last_tsc);
-               idle_delta = sub64(*current_idle, *last_idle);
+       if (*last_tsc) {
+               tsc_delta = current_tsc - *last_tsc;
+               idle_delta = *current_idle - *last_idle;
 
-               busy = sub64(tsc_delta, idle_delta);
-               busy = mul64(busy, make64(100, 0));
-               load = ex64lo(div64(busy, tsc_delta));
+               busy = tsc_delta - idle_delta;
+               busy = busy * 100;
+               load = ex64lo(busy / tsc_delta);
 
                if (load > 100)
                        load = 100;
index 80647af0dee542157e3bfb4f8c5ee50e76240c6d..4dd3dcc8d93b82dfe6724d5976d7f31cedcc4d4a 100644 (file)
@@ -197,8 +197,7 @@ static void amd_watchdog_init(const unsigned cpu)
        val = 1 << 20 | 1 << 17 | 1 << 16 | 0x76;
        ia32_msr_write(AMD_MSR_EVENT_SEL0, 0, val);
 
-       cpuf = cpu_get_freq(cpu);
-       neg64(cpuf);
+       cpuf = -cpu_get_freq(cpu);
        watchdog->resetval = watchdog->watchdog_resetval = cpuf;
 
        ia32_msr_write(AMD_MSR_EVENT_CTR0,
@@ -224,9 +223,8 @@ static int amd_watchdog_profile_init(const unsigned freq)
 
        /* FIXME works only if all CPUs have the same freq */
        cpuf = cpu_get_freq(cpuid);
-       cpuf = div64u64(cpuf, freq);
+       cpuf = -div64u64(cpuf, freq);
 
-       neg64(cpuf);
        watchdog->profile_resetval = cpuf;
 
        return OK;
index 20bd64ac789d1d62e922d87d599577a689cb721d..2ab0953441003209abd6ca4930f305b75758f269 100644 (file)
@@ -161,7 +161,7 @@ void kmain(kinfo_t *local_cbi)
        DEBUGEXTRA(("initializing %s... ", ip->proc_name));
        rp = proc_addr(ip->proc_nr);            /* get process pointer */
        ip->endpoint = rp->p_endpoint;          /* ipc endpoint */
-       make_zero64(rp->p_cpu_time_left);
+       rp->p_cpu_time_left = 0;
        if(i < NR_TASKS)                        /* name (tasks only) */
                strlcpy(rp->p_name, ip->proc_name, sizeof(rp->p_name));
 
index 140ac4724f510210dd24184319a3490f502f04b5..9a1c3e1150d71417a2f8a44e87e222b906acbd47 100644 (file)
@@ -252,7 +252,7 @@ not_runnable_pick_new:
        if (proc_is_preempted(p)) {
                p->p_rts_flags &= ~RTS_PREEMPTED;
                if (proc_is_runnable(p)) {
-                       if (!is_zero64(p->p_cpu_time_left))
+                       if (p->p_cpu_time_left)
                                enqueue_head(p);
                        else
                                enqueue(p);
@@ -348,7 +348,7 @@ check_misc_flags:
         * as we are sure that a possible out-of-quantum message to the
         * scheduler will not collide with the regular ipc
         */
-       if (is_zero64(p->p_cpu_time_left))
+       if (!p->p_cpu_time_left)
                proc_no_time(p);
        /*
         * After handling the misc flags the selected process might not be
@@ -365,12 +365,12 @@ check_misc_flags:
 #endif
 
        p = arch_finish_switch_to_user();
-       assert(!is_zero64(p->p_cpu_time_left));
+       assert(p->p_cpu_time_left);
 
        context_stop(proc_addr(KERNEL));
 
        /* If the process isn't the owner of FPU, enable the FPU exception */
-       if(get_cpulocal_var(fpu_owner) != p)
+       if (get_cpulocal_var(fpu_owner) != p)
                enable_fpu_exception();
        else
                disable_fpu_exception();
@@ -1606,7 +1606,7 @@ static void enqueue_head(struct proc *rp)
    * the process was runnable without its quantum expired when dequeued. A
    * process with no time left should have been handled else and differently
    */
-  assert(!is_zero64(rp->p_cpu_time_left));
+  assert(rp->p_cpu_time_left);
 
   assert(q >= 0);
 
@@ -1689,12 +1689,12 @@ void dequeue(struct proc *rp)
   /* this is not all that accurate on virtual machines, especially with
      IO bound processes that only spend a short amount of time in the queue
      at a time. */
-  if (!is_zero64(rp->p_accounting.enter_queue)) {
+  if (rp->p_accounting.enter_queue) {
        read_tsc_64(&tsc);
-       tsc_delta = sub64(tsc, rp->p_accounting.enter_queue);
-       rp->p_accounting.time_in_queue = add64(rp->p_accounting.time_in_queue,
-               tsc_delta);
-       make_zero64(rp->p_accounting.enter_queue);
+       tsc_delta = tsc - rp->p_accounting.enter_queue;
+       rp->p_accounting.time_in_queue = rp->p_accounting.time_in_queue +
+               tsc_delta;
+       rp->p_accounting.enter_queue = 0;
   }
 
 
@@ -1843,8 +1843,8 @@ void reset_proc_accounting(struct proc *p)
   p->p_accounting.ipc_sync  = 0;
   p->p_accounting.ipc_async = 0;
   p->p_accounting.dequeues  = 0;
-  make_zero64(p->p_accounting.time_in_queue);
-  make_zero64(p->p_accounting.enter_queue);
+  p->p_accounting.time_in_queue = 0;
+  p->p_accounting.enter_queue = 0;
 }
        
 void copr_not_available_handler(void)
index c58622102ccca521b3e7784bb0e1095e465af1d5..7a1504a5cb3d72abbd2369bcf91b3772408ad293 100644 (file)
@@ -86,10 +86,10 @@ int do_fork(struct proc * caller, message * m_ptr)
   RTS_SET(rpc, RTS_NO_QUANTUM);
   reset_proc_accounting(rpc);
 
-  make_zero64(rpc->p_cpu_time_left);
-  make_zero64(rpc->p_cycles);
-  make_zero64(rpc->p_kcall_cycles);
-  make_zero64(rpc->p_kipc_cycles);
+  rpc->p_cpu_time_left = 0;
+  rpc->p_cycles = 0;
+  rpc->p_kcall_cycles = 0;
+  rpc->p_kipc_cycles = 0;
   rpc->p_signal_received = 0;
 
   /* If the parent is a privileged process, take away the privileges from the