From: Tomas Hruby Date: Wed, 15 Sep 2010 14:10:12 +0000 (+0000) Subject: SMP - BSP waits until the APs finish their booting X-Git-Tag: v3.2.0~861 X-Git-Url: http://zhaoyanbai.com/repos/man.dig.html?a=commitdiff_plain;h=9b6d66c787c2c9136f5af372bbf9cb9f0be8c53b;p=minix.git SMP - BSP waits until the APs finish their booting - APs configure local timers - while configuring local APIC timer the CPUs fiddle with the interrupt handlers. As the interrupt table is shared the BSP must not run --- diff --git a/kernel/arch/i386/apic.c b/kernel/arch/i386/apic.c index 979325be2..fc6a313ef 100644 --- a/kernel/arch/i386/apic.c +++ b/kernel/arch/i386/apic.c @@ -127,11 +127,7 @@ PRIVATE struct irq io_apic_irq[NR_IRQ_VECTORS]; * to make APIC work if SMP is not configured, we need to set the maximal number * of CPUS to 1, cpuid to return 0 and the current cpu is always BSP */ -#ifndef CONFIG_SMP -/* this is always true on an uniprocessor */ -#define cpu_is_bsp(x) 1 - -#else +#ifdef CONFIG_SMP #include "kernel/smp.h" diff --git a/kernel/arch/i386/arch_smp.c b/kernel/arch/i386/arch_smp.c index b10abbaa9..360786a33 100644 --- a/kernel/arch/i386/arch_smp.c +++ b/kernel/arch/i386/arch_smp.c @@ -190,15 +190,35 @@ PRIVATE void ap_finish_booting(void) while(!i386_paging_enabled) arch_pause(); + /* + * Finish processor initialisation. CPUs must be excluded from running. + * lapic timer calibration locks and unlocks the BKL because of the + * nested interrupts used for calibration. Therefore BKL is not good + * enough, the boot_lock must be held. + */ + spinlock_lock(&boot_lock); + BKL_LOCK(); + /* * we must load some page tables befre we turn paging on. As VM is * always present we use those */ segmentation2paging(proc_addr(VM_PROC_NR)); + + printf("CPU %d paging is on\n", cpu); + + lapic_enable(cpu); + + if (app_cpu_init_timer(system_hz)) { + panic("FATAL : failed to initialize timer interrupts CPU %d, " + "cannot continue without any clock source!", cpuid); + } + printf("CPU %d local APIC timer is ticking\n", cpu); - BKL_LOCK(); - printf("CPU %d is running\n", cpu); BKL_UNLOCK(); + + ap_boot_finished(cpu); + spinlock_unlock(&boot_lock); for(;;); /* finish processor initialisation. */ diff --git a/kernel/arch/i386/include/arch_proto.h b/kernel/arch/i386/include/arch_proto.h index 189d879ce..0d9238999 100644 --- a/kernel/arch/i386/include/arch_proto.h +++ b/kernel/arch/i386/include/arch_proto.h @@ -172,6 +172,9 @@ extern void * k_stacks; #define get_k_stack_top(cpu) ((void *)(((char*)(k_stacks)) \ + 2 * ((cpu) + 1) * K_STACK_SIZE)) +#define barrier() do { mfence(); } while(0) + + #ifndef __GNUC__ /* call a function to read the stack fram pointer (%ebp) */ _PROTOTYPE(reg_t read_ebp, (void)); diff --git a/kernel/arch/i386/klib.S b/kernel/arch/i386/klib.S index 4687f782a..b0519631f 100644 --- a/kernel/arch/i386/klib.S +++ b/kernel/arch/i386/klib.S @@ -999,6 +999,8 @@ ENTRY(arch_spinlock_unlock) mfence ret +#endif /* CONFIG_SMP */ + /*===========================================================================*/ /* mfence */ /*===========================================================================*/ @@ -1008,8 +1010,6 @@ ENTRY(mfence) mfence ret -#endif /* CONFIG_SMP */ - /*===========================================================================*/ /* arch_pause */ /*===========================================================================*/ diff --git a/kernel/arch/i386/memory.c b/kernel/arch/i386/memory.c index 10c432126..35fa5f687 100644 --- a/kernel/arch/i386/memory.c +++ b/kernel/arch/i386/memory.c @@ -1083,12 +1083,15 @@ PUBLIC int arch_enable_paging(struct proc * caller, const message * m_ptr) io_apic[i].addr = io_apic[i].vaddr; } } - - /* TODO APs are still waiting, release them */ -#endif +#if CONFIG_SMP + barrier(); i386_paging_enabled = 1; + wait_for_APs_to_finish_booting(); +#endif +#endif + #ifdef CONFIG_WATCHDOG /* * We make sure that we don't enable the watchdog until paging is turned diff --git a/kernel/kernel.h b/kernel/kernel.h index 6247f2c29..ff80d1d4a 100644 --- a/kernel/kernel.h +++ b/kernel/kernel.h @@ -61,6 +61,8 @@ /* We only support 1 cpu now */ #define CONFIG_MAX_CPUS 1 #define cpuid 0 +/* this is always true on an uniprocessor */ +#define cpu_is_bsp(x) 1 #else diff --git a/kernel/smp.c b/kernel/smp.c index b0fafc266..367be5947 100644 --- a/kernel/smp.c +++ b/kernel/smp.c @@ -6,4 +6,24 @@ unsigned bsp_cpu_id; struct cpu cpus[CONFIG_MAX_CPUS]; +static volatile unsigned ap_cpus_booted; + SPINLOCK_DEFINE(big_kernel_lock) +SPINLOCK_DEFINE(boot_lock) + +void wait_for_APs_to_finish_booting(void) +{ + /* we must let the other CPUs to run in kernel mode first */ + BKL_UNLOCK(); + while (ap_cpus_booted != (ncpus - 1)) + arch_pause(); + /* now we have to take the lock again as we continu execution */ + BKL_LOCK(); +} + +void ap_boot_finished(unsigned cpu) +{ + printf("CPU %d is running\n", cpu); + + ap_cpus_booted++; +} diff --git a/kernel/smp.h b/kernel/smp.h index 405db60e8..312dd144f 100644 --- a/kernel/smp.h +++ b/kernel/smp.h @@ -49,7 +49,17 @@ EXTERN struct cpu cpus[CONFIG_MAX_CPUS]; #define cpu_test_flag(cpu, flag) (cpus[cpu].flags & (flag)) #define cpu_is_ready(cpu) cpu_test_flag(cpu, CPU_IS_READY) +/* + * Big Kernel Lock prevents more then one cpu executing the kernel code + */ SPINLOCK_DECLARE(big_kernel_lock) +/* + * to sync the booting APs + */ +SPINLOCK_DECLARE(boot_lock) + +_PROTOTYPE(void wait_for_APs_to_finish_booting, (void)); +_PROTOTYPE(void ap_boot_finished, (unsigned cpu)); #endif /* __ASSEMBLY__ */