PUBLIC void cycles_accounting_init(void)
{
+#ifdef CONFIG_SMP
+ unsigned cpu = cpuid;
+#endif
+
read_tsc_64(get_cpu_var_ptr(cpu, tsc_ctr_switch));
make_zero64(get_cpu_var(cpu, cpu_last_tsc));
#include "arch_proto.h"
#include "kernel/glo.h"
#include <unistd.h>
+#include <stdlib.h>
#include <machine/cmos.h>
#include <machine/bios.h>
#include <minix/portio.h>
#include "kernel/smp.h"
#include "apic.h"
#include "acpi.h"
+#include "clock.h"
#include "glo.h"
SPINLOCK_DEFINE(smp_cpu_lock)
SPINLOCK_DEFINE(dispq_lock)
-FORWARD _PROTOTYPE(void smp_init_vars, (void));
FORWARD _PROTOTYPE(void smp_reinit_vars, (void));
/*
PRIVATE phys_bytes copy_trampoline(void)
{
char * s, *end;
- phys_bytes tramp_base;
+ phys_bytes tramp_base = 0;
unsigned tramp_size;
tramp_size = (unsigned) &__trampoline_end - (unsigned)&trampoline;
s = env_get("memory");
- s = (char *) get_value(params_buffer, "memory");
if (!s)
return 0;
-
+
while (*s != 0) {
phys_bytes base = 0xfffffff;
unsigned size;
PUBLIC void smp_shutdown_aps(void)
{
unsigned cpu;
- unsigned aid = apicid();
- unsigned local_cpu = cpuid;
-
+
if (ncpus == 1)
goto exit_shutdown_aps;
-
+
/* we must let the other cpus enter the kernel mode */
BKL_UNLOCK();
PRIVATE void smp_reinit_vars(void)
{
- int i;
lapic_addr = lapic_eoi_addr = 0;
ioapic_enabled = 0;
PRIVATE void reset_bkl_usage(void)
{
- unsigned cpu;
-
memset(kernel_ticks, 0, sizeof(kernel_ticks));
memset(bkl_ticks, 0, sizeof(bkl_ticks));
memset(bkl_tries, 0, sizeof(bkl_tries));
#define get_k_stack_top(cpu) ((void *)(((char*)(k_stacks)) \
+ 2 * ((cpu) + 1) * K_STACK_SIZE))
+_PROTOTYPE( void, mfence(void));
#define barrier() do { mfence(); } while(0)
if(lapic_addr)
lapic_mapping_index = freeidx++;
if (ioapic_enabled) {
- int i;
ioapic_first_index = freeidx;
assert(nioapics > 0);
freeidx += nioapics;
PRIVATE void get_parameters(multiboot_info_t *mbi)
{
char mem_value[40], temp[ITOA_BUFFER_SIZE];
- int i, processor;
+ int i;
int dev;
int ctrlr;
int disk, prim, sub;
#include "smp.h"
#include "interrupt.h"
+#include "clock.h"
unsigned ncpus;
unsigned ht_per_core;
_PROTOTYPE(void wait_for_APs_to_finish_booting, (void));
_PROTOTYPE(void ap_boot_finished, (unsigned cpu));
+_PROTOTYPE(void smp_shutdown_aps, (void ));
/* IPI handlers */
_PROTOTYPE(void smp_ipi_halt_handler, (void));
#define spinlock_lock(sl)
#define spinlock_unlock(sl)
#else
+void arch_spinlock_lock(atomic_t * sl);
+void arch_spinlock_unlock(atomic_t * sl);
#define spinlock_lock(sl) arch_spinlock_lock((atomic_t*) sl)
#define spinlock_unlock(sl) arch_spinlock_unlock((atomic_t*) sl)
#endif