From 963e5d3b76d657f1ebcf3561446d2ba1872bbfa2 Mon Sep 17 00:00:00 2001 From: Benjamin Herrenschmidt Date: Tue, 29 Mar 2011 14:51:10 +1100 Subject: powerpc: Make decrementer interrupt robust against offlined CPUs With some implementations, it is possible that a timer interrupt occurs every few seconds on an offline CPU. In this case, just re-arm the decrementer and return immediately Signed-off-by: Benjamin Herrenschmidt diff --git a/arch/powerpc/kernel/time.c b/arch/powerpc/kernel/time.c index aa92696..375480c 100644 --- a/arch/powerpc/kernel/time.c +++ b/arch/powerpc/kernel/time.c @@ -577,14 +577,21 @@ void timer_interrupt(struct pt_regs * regs) struct clock_event_device *evt = &decrementer->event; u64 now; + /* Ensure a positive value is written to the decrementer, or else + * some CPUs will continue to take decrementer exceptions. + */ + set_dec(DECREMENTER_MAX); + + /* Some implementations of hotplug will get timer interrupts while + * offline, just ignore these + */ + if (!cpu_online(smp_processor_id())) + return; + trace_timer_interrupt_entry(regs); __get_cpu_var(irq_stat).timer_irqs++; - /* Ensure a positive value is written to the decrementer, or else - * some CPUs will continuue to take decrementer exceptions */ - set_dec(DECREMENTER_MAX); - #if defined(CONFIG_PPC32) && defined(CONFIG_PMAC) if (atomic_read(&ppc_n_lost_interrupts) != 0) do_IRQ(regs); -- cgit v0.10.2 From fa3f82c8bb7acbe049ea71f258b3ae0a33d9d40b Mon Sep 17 00:00:00 2001 From: Benjamin Herrenschmidt Date: Thu, 10 Feb 2011 18:45:24 +1100 Subject: powerpc/smp: soft-replugged CPUs must go back to start_secondary Various thing are torn down when a CPU is hot-unplugged. That CPU is expected to go back to start_secondary when re-plugged to re initialize everything, such as clock sources, maps, ... Some implementations just return from cpu_die() callback in the idle loop when the CPU is "re-plugged". This is not enough. We fix it using a little asm trampoline which resets the stack and calls back into start_secondary as if we were all fresh from boot. The trampoline already existed on ppc64, but we add it for ppc32 Signed-off-by: Benjamin Herrenschmidt diff --git a/arch/powerpc/include/asm/smp.h b/arch/powerpc/include/asm/smp.h index 66e237b..1de0e97 100644 --- a/arch/powerpc/include/asm/smp.h +++ b/arch/powerpc/include/asm/smp.h @@ -36,6 +36,7 @@ extern void cpu_die(void); extern void smp_send_debugger_break(int cpu); extern void smp_message_recv(int); +extern void start_secondary_resume(void); DECLARE_PER_CPU(unsigned int, cpu_pvr); diff --git a/arch/powerpc/kernel/head_32.S b/arch/powerpc/kernel/head_32.S index 98c4b29..c5c24be 100644 --- a/arch/powerpc/kernel/head_32.S +++ b/arch/powerpc/kernel/head_32.S @@ -890,6 +890,15 @@ __secondary_start: mtspr SPRN_SRR1,r4 SYNC RFI + +_GLOBAL(start_secondary_resume) + /* Reset stack */ + rlwinm r1,r1,0,0,(31-THREAD_SHIFT) /* current_thread_info() */ + addi r1,r1,THREAD_SIZE-STACK_FRAME_OVERHEAD + li r3,0 + std r3,0(r1) /* Zero the stack frame pointer */ + bl start_secondary + b . #endif /* CONFIG_SMP */ #ifdef CONFIG_KVM_BOOK3S_HANDLER diff --git a/arch/powerpc/kernel/smp.c b/arch/powerpc/kernel/smp.c index 9813605..1c9956c 100644 --- a/arch/powerpc/kernel/smp.c +++ b/arch/powerpc/kernel/smp.c @@ -502,7 +502,7 @@ static struct device_node *cpu_to_l2cache(int cpu) } /* Activate a secondary processor. */ -int __devinit start_secondary(void *unused) +void __devinit start_secondary(void *unused) { unsigned int cpu = smp_processor_id(); struct device_node *l2_cache; @@ -558,7 +558,8 @@ int __devinit start_secondary(void *unused) local_irq_enable(); cpu_idle(); - return 0; + + BUG(); } int setup_profiling_timer(unsigned int multiplier) @@ -660,5 +661,9 @@ void cpu_die(void) { if (ppc_md.cpu_die) ppc_md.cpu_die(); + + /* If we return, we re-enter start_secondary */ + start_secondary_resume(); } + #endif diff --git a/arch/powerpc/platforms/pseries/offline_states.h b/arch/powerpc/platforms/pseries/offline_states.h index 75a6f48..08672d9 100644 --- a/arch/powerpc/platforms/pseries/offline_states.h +++ b/arch/powerpc/platforms/pseries/offline_states.h @@ -34,6 +34,4 @@ static inline void set_default_offline_state(int cpu) #endif extern enum cpu_state_vals get_preferred_offline_state(int cpu); -extern int start_secondary(void); -extern void start_secondary_resume(void); #endif -- cgit v0.10.2 From 4fcb8833af3355065bd8bffcd338eabc6f3a38a0 Mon Sep 17 00:00:00 2001 From: Benjamin Herrenschmidt Date: Thu, 10 Feb 2011 18:46:50 +1100 Subject: powerpc/smp: Fix generic_mach_cpu_die() This is used by some "soft" hotplug implementations. I needs to call idle_task_exit() when the CPU is going away, and we remove the now no-longer needed set_cpu_online() and local_irq_enable() which are handled by the return to start_secondary Signed-off-by: Benjamin Herrenschmidt diff --git a/arch/powerpc/kernel/smp.c b/arch/powerpc/kernel/smp.c index 1c9956c..3c0fab5 100644 --- a/arch/powerpc/kernel/smp.c +++ b/arch/powerpc/kernel/smp.c @@ -362,14 +362,13 @@ void generic_mach_cpu_die(void) unsigned int cpu; local_irq_disable(); + idle_task_exit(); cpu = smp_processor_id(); printk(KERN_DEBUG "CPU%d offline\n", cpu); __get_cpu_var(cpu_state) = CPU_DEAD; smp_wmb(); while (__get_cpu_var(cpu_state) != CPU_UP_PREPARE) cpu_relax(); - set_cpu_online(cpu, true); - local_irq_enable(); } #endif -- cgit v0.10.2 From b527d07114fdab83f39040c69b4b0a4b1b232c16 Mon Sep 17 00:00:00 2001 From: Benjamin Herrenschmidt Date: Fri, 11 Feb 2011 12:46:41 +1100 Subject: powerpc/smp: Remove unused generic_cpu_enable() Nobody uses it, besides we should always use the normal __cpu_up path anyways Signed-off-by: Benjamin Herrenschmidt diff --git a/arch/powerpc/include/asm/smp.h b/arch/powerpc/include/asm/smp.h index 1de0e97..a629b6f 100644 --- a/arch/powerpc/include/asm/smp.h +++ b/arch/powerpc/include/asm/smp.h @@ -43,7 +43,6 @@ DECLARE_PER_CPU(unsigned int, cpu_pvr); #ifdef CONFIG_HOTPLUG_CPU extern void fixup_irqs(const struct cpumask *map); int generic_cpu_disable(void); -int generic_cpu_enable(unsigned int cpu); void generic_cpu_die(unsigned int cpu); void generic_mach_cpu_die(void); #endif diff --git a/arch/powerpc/kernel/smp.c b/arch/powerpc/kernel/smp.c index 3c0fab5..19d0c25 100644 --- a/arch/powerpc/kernel/smp.c +++ b/arch/powerpc/kernel/smp.c @@ -322,28 +322,6 @@ int generic_cpu_disable(void) return 0; } -int generic_cpu_enable(unsigned int cpu) -{ - /* Do the normal bootup if we haven't - * already bootstrapped. */ - if (system_state != SYSTEM_RUNNING) - return -ENOSYS; - - /* get the target out of it's holding state */ - per_cpu(cpu_state, cpu) = CPU_UP_PREPARE; - smp_wmb(); - - while (!cpu_online(cpu)) - cpu_relax(); - -#ifdef CONFIG_PPC64 - fixup_irqs(cpu_online_mask); - /* counter the irq disable in fixup_irqs */ - local_irq_enable(); -#endif - return 0; -} - void generic_cpu_die(unsigned int cpu) { int i; -- cgit v0.10.2 From 7a53a4fe707a93a33f6c5d42173bf213cb6ff71d Mon Sep 17 00:00:00 2001 From: Benjamin Herrenschmidt Date: Fri, 11 Feb 2011 12:49:01 +1100 Subject: powerpc/smp: Remove unused smp_ops->cpu_enable() Remove the last remnants of cpu_enable(), everybody uses the normal __cpu_up() path now Signed-off-by: Benjamin Herrenschmidt diff --git a/arch/powerpc/include/asm/machdep.h b/arch/powerpc/include/asm/machdep.h index fe56a23..bcfc0da 100644 --- a/arch/powerpc/include/asm/machdep.h +++ b/arch/powerpc/include/asm/machdep.h @@ -37,7 +37,6 @@ struct smp_ops_t { void (*setup_cpu)(int nr); void (*take_timebase)(void); void (*give_timebase)(void); - int (*cpu_enable)(unsigned int nr); int (*cpu_disable)(void); void (*cpu_die)(unsigned int nr); int (*cpu_bootable)(unsigned int nr); diff --git a/arch/powerpc/kernel/smp.c b/arch/powerpc/kernel/smp.c index 19d0c25..be7d728 100644 --- a/arch/powerpc/kernel/smp.c +++ b/arch/powerpc/kernel/smp.c @@ -350,21 +350,11 @@ void generic_mach_cpu_die(void) } #endif -static int __devinit cpu_enable(unsigned int cpu) -{ - if (smp_ops && smp_ops->cpu_enable) - return smp_ops->cpu_enable(cpu); - - return -ENOSYS; -} - int __cpuinit __cpu_up(unsigned int cpu) { int c; secondary_ti = current_set[cpu]; - if (!cpu_enable(cpu)) - return 0; if (smp_ops == NULL || (smp_ops->cpu_bootable && !smp_ops->cpu_bootable(cpu))) diff --git a/arch/powerpc/platforms/powermac/smp.c b/arch/powerpc/platforms/powermac/smp.c index c95215f..ebd2b7e 100644 --- a/arch/powerpc/platforms/powermac/smp.c +++ b/arch/powerpc/platforms/powermac/smp.c @@ -923,8 +923,6 @@ struct smp_ops_t core99_smp_ops = { # if defined(CONFIG_PPC64) .cpu_disable = generic_cpu_disable, .cpu_die = generic_cpu_die, - /* intentionally do *NOT* assign cpu_enable, - * the generic code will use kick_cpu then! */ # endif #endif }; -- cgit v0.10.2 From fb49f864c3c3f8ac5b68563774171fe43634ffeb Mon Sep 17 00:00:00 2001 From: Benjamin Herrenschmidt Date: Fri, 11 Feb 2011 14:09:32 +1100 Subject: powerpc/pmac/smp: Fix 32-bit PowerMac cpu_die Use generic cpu_state, call idle_task_exit() properly, and remove smp_core99_cpu_die() which isn't useful, the generic function does the job just fine. diff --git a/arch/powerpc/include/asm/smp.h b/arch/powerpc/include/asm/smp.h index a629b6f..9fe5597 100644 --- a/arch/powerpc/include/asm/smp.h +++ b/arch/powerpc/include/asm/smp.h @@ -45,6 +45,7 @@ extern void fixup_irqs(const struct cpumask *map); int generic_cpu_disable(void); void generic_cpu_die(unsigned int cpu); void generic_mach_cpu_die(void); +DECLARE_PER_CPU(int, cpu_state); #endif #ifdef CONFIG_PPC64 diff --git a/arch/powerpc/platforms/powermac/smp.c b/arch/powerpc/platforms/powermac/smp.c index ebd2b7e..e0ac7bb 100644 --- a/arch/powerpc/platforms/powermac/smp.c +++ b/arch/powerpc/platforms/powermac/smp.c @@ -880,31 +880,17 @@ int smp_core99_cpu_disable(void) return 0; } -static int cpu_dead[NR_CPUS]; - void pmac32_cpu_die(void) { local_irq_disable(); - cpu_dead[smp_processor_id()] = 1; + idle_task_exit(); + printk(KERN_DEBUG "CPU%d offline\n", smp_processor_id()); + __get_cpu_var(cpu_state) = CPU_DEAD; + smp_wmb(); mb(); low_cpu_die(); } -void smp_core99_cpu_die(unsigned int cpu) -{ - int timeout; - - timeout = 1000; - while (!cpu_dead[cpu]) { - if (--timeout == 0) { - printk("CPU %u refused to die!\n", cpu); - break; - } - msleep(1); - } - cpu_dead[cpu] = 0; -} - #endif /* CONFIG_HOTPLUG_CPU && CONFIG_PP32 */ /* Core99 Macs (dual G4s and G5s) */ @@ -918,12 +904,11 @@ struct smp_ops_t core99_smp_ops = { #if defined(CONFIG_HOTPLUG_CPU) # if defined(CONFIG_PPC32) .cpu_disable = smp_core99_cpu_disable, - .cpu_die = smp_core99_cpu_die, # endif # if defined(CONFIG_PPC64) .cpu_disable = generic_cpu_disable, - .cpu_die = generic_cpu_die, # endif + .cpu_die = generic_cpu_die, #endif }; -- cgit v0.10.2 From 1c91cc570576dfd0f288d664c095d64d11aaace4 Mon Sep 17 00:00:00 2001 From: Benjamin Herrenschmidt Date: Fri, 11 Feb 2011 13:05:17 +1100 Subject: powerpc/pmac/smp: Rename fixup_irqs() to migrate_irqs() and use it on ppc32 Signed-off-by: Benjamin Herrenschmidt diff --git a/arch/powerpc/include/asm/smp.h b/arch/powerpc/include/asm/smp.h index 9fe5597..7e99771 100644 --- a/arch/powerpc/include/asm/smp.h +++ b/arch/powerpc/include/asm/smp.h @@ -41,7 +41,7 @@ extern void start_secondary_resume(void); DECLARE_PER_CPU(unsigned int, cpu_pvr); #ifdef CONFIG_HOTPLUG_CPU -extern void fixup_irqs(const struct cpumask *map); +extern void migrate_irqs(void); int generic_cpu_disable(void); void generic_cpu_die(unsigned int cpu); void generic_mach_cpu_die(void); diff --git a/arch/powerpc/kernel/irq.c b/arch/powerpc/kernel/irq.c index 63625e0..f621b7d 100644 --- a/arch/powerpc/kernel/irq.c +++ b/arch/powerpc/kernel/irq.c @@ -246,12 +246,13 @@ u64 arch_irq_stat_cpu(unsigned int cpu) } #ifdef CONFIG_HOTPLUG_CPU -void fixup_irqs(const struct cpumask *map) +void migrate_irqs(void) { struct irq_desc *desc; unsigned int irq; static int warned; cpumask_var_t mask; + const struct cpumask *map = cpu_online_mask; alloc_cpumask_var(&mask, GFP_KERNEL); diff --git a/arch/powerpc/kernel/smp.c b/arch/powerpc/kernel/smp.c index be7d728..f6cc5c1 100644 --- a/arch/powerpc/kernel/smp.c +++ b/arch/powerpc/kernel/smp.c @@ -317,8 +317,8 @@ int generic_cpu_disable(void) set_cpu_online(cpu, false); #ifdef CONFIG_PPC64 vdso_data->processorCount--; - fixup_irqs(cpu_online_mask); #endif + migrate_irqs(); return 0; } -- cgit v0.10.2 From 45e07fd045153c0049c99b0cf6cf7254c164d37b Mon Sep 17 00:00:00 2001 From: Benjamin Herrenschmidt Date: Mon, 21 Feb 2011 16:31:49 +1100 Subject: powerpc/pmac/smp: Fixup smp_core99_cpu_disable() and use it on 64-bit Use the generic code, just add the MPIC priority setting, I don't see any use in mucking around with the decrementer, as 32-bit will have EE off all along, and 64-bit will be able to deal with it. Signed-off-by: Benjamin Herrenschmidt diff --git a/arch/powerpc/platforms/powermac/smp.c b/arch/powerpc/platforms/powermac/smp.c index e0ac7bb..eda4709 100644 --- a/arch/powerpc/platforms/powermac/smp.c +++ b/arch/powerpc/platforms/powermac/smp.c @@ -867,16 +867,14 @@ static void __devinit smp_core99_setup_cpu(int cpu_nr) #if defined(CONFIG_HOTPLUG_CPU) && defined(CONFIG_PPC32) -int smp_core99_cpu_disable(void) +static int smp_core99_cpu_disable(void) { - set_cpu_online(smp_processor_id(), false); + int rc = generic_cpu_disable(); + if (rc) + return rc; - /* XXX reset cpu affinity here */ mpic_cpu_set_priority(0xf); - asm volatile("mtdec %0" : : "r" (0x7fffffff)); - mb(); - udelay(20); - asm volatile("mtdec %0" : : "r" (0x7fffffff)); + return 0; } @@ -902,12 +900,7 @@ struct smp_ops_t core99_smp_ops = { .give_timebase = smp_core99_give_timebase, .take_timebase = smp_core99_take_timebase, #if defined(CONFIG_HOTPLUG_CPU) -# if defined(CONFIG_PPC32) .cpu_disable = smp_core99_cpu_disable, -# endif -# if defined(CONFIG_PPC64) - .cpu_disable = generic_cpu_disable, -# endif .cpu_die = generic_cpu_die, #endif }; -- cgit v0.10.2 From 4c6130d9bba78e7ac44584378660a1204c5297d1 Mon Sep 17 00:00:00 2001 From: Benjamin Herrenschmidt Date: Fri, 11 Feb 2011 14:03:20 +1100 Subject: powerpc/pmac/smp: Consolidate 32-bit and 64-bit PowerMac cpu_die in one file Signed-off-by: Benjamin Herrenschmidt diff --git a/arch/powerpc/platforms/powermac/pmac.h b/arch/powerpc/platforms/powermac/pmac.h index f0bc08f..20468f4 100644 --- a/arch/powerpc/platforms/powermac/pmac.h +++ b/arch/powerpc/platforms/powermac/pmac.h @@ -33,7 +33,6 @@ extern void pmac_setup_pci_dma(void); extern void pmac_check_ht_link(void); extern void pmac_setup_smp(void); -extern void pmac32_cpu_die(void); extern void low_cpu_die(void) __attribute__((noreturn)); extern int pmac_nvram_init(void); diff --git a/arch/powerpc/platforms/powermac/setup.c b/arch/powerpc/platforms/powermac/setup.c index d5aceb7..aa45281 100644 --- a/arch/powerpc/platforms/powermac/setup.c +++ b/arch/powerpc/platforms/powermac/setup.c @@ -650,51 +650,6 @@ static int pmac_pci_probe_mode(struct pci_bus *bus) return PCI_PROBE_NORMAL; return PCI_PROBE_DEVTREE; } - -#ifdef CONFIG_HOTPLUG_CPU -/* access per cpu vars from generic smp.c */ -DECLARE_PER_CPU(int, cpu_state); - -static void pmac64_cpu_die(void) -{ - /* - * turn off as much as possible, we'll be - * kicked out as this will only be invoked - * on core99 platforms for now ... - */ - - printk(KERN_INFO "CPU#%d offline\n", smp_processor_id()); - __get_cpu_var(cpu_state) = CPU_DEAD; - smp_wmb(); - - /* - * during the path that leads here preemption is disabled, - * reenable it now so that when coming up preempt count is - * zero correctly - */ - preempt_enable(); - - /* - * hard-disable interrupts for the non-NAP case, the NAP code - * needs to re-enable interrupts (but soft-disables them) - */ - hard_irq_disable(); - - while (1) { - /* let's not take timer interrupts too often ... */ - set_dec(0x7fffffff); - - /* should always be true at this point */ - if (cpu_has_feature(CPU_FTR_CAN_NAP)) - power4_cpu_offline_powersave(); - else { - HMT_low(); - HMT_very_low(); - } - } -} -#endif /* CONFIG_HOTPLUG_CPU */ - #endif /* CONFIG_PPC64 */ define_machine(powermac) { @@ -726,15 +681,4 @@ define_machine(powermac) { .pcibios_after_init = pmac_pcibios_after_init, .phys_mem_access_prot = pci_phys_mem_access_prot, #endif -#ifdef CONFIG_HOTPLUG_CPU -#ifdef CONFIG_PPC64 - .cpu_die = pmac64_cpu_die, -#endif -#ifdef CONFIG_PPC32 - .cpu_die = pmac32_cpu_die, -#endif -#endif -#if defined(CONFIG_HOTPLUG_CPU) && defined(CONFIG_PPC32) - .cpu_die = generic_mach_cpu_die, -#endif }; diff --git a/arch/powerpc/platforms/powermac/smp.c b/arch/powerpc/platforms/powermac/smp.c index eda4709..50e15b4 100644 --- a/arch/powerpc/platforms/powermac/smp.c +++ b/arch/powerpc/platforms/powermac/smp.c @@ -865,7 +865,7 @@ static void __devinit smp_core99_setup_cpu(int cpu_nr) } -#if defined(CONFIG_HOTPLUG_CPU) && defined(CONFIG_PPC32) +#ifdef CONFIG_HOTPLUG_CPU static int smp_core99_cpu_disable(void) { @@ -878,7 +878,9 @@ static int smp_core99_cpu_disable(void) return 0; } -void pmac32_cpu_die(void) +#ifdef CONFIG_PPC32 + +static void pmac_cpu_die(void) { local_irq_disable(); idle_task_exit(); @@ -889,7 +891,52 @@ void pmac32_cpu_die(void) low_cpu_die(); } -#endif /* CONFIG_HOTPLUG_CPU && CONFIG_PP32 */ +#else /* CONFIG_PPC32 */ + +static void pmac_cpu_die(void) +{ + local_irq_disable(); + idle_task_exit(); + + /* + * turn off as much as possible, we'll be + * kicked out as this will only be invoked + * on core99 platforms for now ... + */ + + printk(KERN_INFO "CPU#%d offline\n", smp_processor_id()); + __get_cpu_var(cpu_state) = CPU_DEAD; + smp_wmb(); + + /* + * during the path that leads here preemption is disabled, + * reenable it now so that when coming up preempt count is + * zero correctly + */ + preempt_enable(); + + /* + * hard-disable interrupts for the non-NAP case, the NAP code + * needs to re-enable interrupts (but soft-disables them) + */ + hard_irq_disable(); + + while (1) { + /* let's not take timer interrupts too often ... */ + set_dec(0x7fffffff); + + /* should always be true at this point */ + if (cpu_has_feature(CPU_FTR_CAN_NAP)) + power4_cpu_offline_powersave(); + else { + HMT_low(); + HMT_very_low(); + } + } +} + +#endif /* else CONFIG_PPC32 */ +#endif /* CONFIG_HOTPLUG_CPU */ /* Core99 Macs (dual G4s and G5s) */ struct smp_ops_t core99_smp_ops = { @@ -933,5 +980,10 @@ void __init pmac_setup_smp(void) smp_ops = &psurge_smp_ops; } #endif /* CONFIG_PPC32 */ + +#ifdef CONFIG_HOTPLUG_CPU + ppc_md.cpu_die = pmac_cpu_die; +#endif } + -- cgit v0.10.2 From e872e41b79f61dbc22f85577613925a4a7de1c1a Mon Sep 17 00:00:00 2001 From: Benjamin Herrenschmidt Date: Fri, 11 Feb 2011 14:55:42 +1100 Subject: powerpc/pmac/smp: Remove HMT changes for PowerMac offline code Those instructions do nothing on non-threaded processors such as 970's used on those machines. Signed-off-by: Benjamin Herrenschmidt diff --git a/arch/powerpc/platforms/powermac/smp.c b/arch/powerpc/platforms/powermac/smp.c index 50e15b4..53bee66 100644 --- a/arch/powerpc/platforms/powermac/smp.c +++ b/arch/powerpc/platforms/powermac/smp.c @@ -928,10 +928,6 @@ static void pmac_cpu_die(void) /* should always be true at this point */ if (cpu_has_feature(CPU_FTR_CAN_NAP)) power4_cpu_offline_powersave(); - else { - HMT_low(); - HMT_very_low(); - } } } -- cgit v0.10.2 From 62cc67b9df273be18fcb09a071592dedf751c90a Mon Sep 17 00:00:00 2001 From: Benjamin Herrenschmidt Date: Mon, 21 Feb 2011 16:49:58 +1100 Subject: powerpc/pmac/smp: Properly NAP offlined CPU on G5 The current code soft-disables, and then goes to NAP mode which turns interrupts on. That means that if an interrupt occurs, we will hit the masked interrupt code path which isn't what we want, as it will return with EE off, which will either get us out of NAP mode, or fail to enter it (according to spec). Instead, let's just rely on the fact that it is safe to take decrementer interrupts on an offline CPU and leave interrupts enabled. We can also get rid of the special case in asm for power4_cpu_offline_powersave() and just use power4_idle(). Signed-off-by: Benjamin Herrenschmidt diff --git a/arch/powerpc/include/asm/machdep.h b/arch/powerpc/include/asm/machdep.h index bcfc0da..578d330 100644 --- a/arch/powerpc/include/asm/machdep.h +++ b/arch/powerpc/include/asm/machdep.h @@ -266,7 +266,6 @@ struct machdep_calls { extern void e500_idle(void); extern void power4_idle(void); -extern void power4_cpu_offline_powersave(void); extern void ppc6xx_idle(void); extern void book3e_idle(void); diff --git a/arch/powerpc/kernel/head_64.S b/arch/powerpc/kernel/head_64.S index 782f23d..271140b 100644 --- a/arch/powerpc/kernel/head_64.S +++ b/arch/powerpc/kernel/head_64.S @@ -536,6 +536,13 @@ _GLOBAL(pmac_secondary_start) add r13,r13,r4 /* for this processor. */ mtspr SPRN_SPRG_PACA,r13 /* Save vaddr of paca in an SPRG*/ + /* Mark interrupts soft and hard disabled (they might be enabled + * in the PACA when doing hotplug) + */ + li r0,0 + stb r0,PACASOFTIRQEN(r13) + stb r0,PACAHARDIRQEN(r13) + /* Create a temp kernel stack for use before relocation is on. */ ld r1,PACAEMERGSP(r13) subi r1,r1,STACK_FRAME_OVERHEAD diff --git a/arch/powerpc/kernel/idle_power4.S b/arch/powerpc/kernel/idle_power4.S index 5328709..ba31954 100644 --- a/arch/powerpc/kernel/idle_power4.S +++ b/arch/powerpc/kernel/idle_power4.S @@ -53,24 +53,3 @@ END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC) isync b 1b -_GLOBAL(power4_cpu_offline_powersave) - /* Go to NAP now */ - mfmsr r7 - rldicl r0,r7,48,1 - rotldi r0,r0,16 - mtmsrd r0,1 /* hard-disable interrupts */ - li r0,1 - li r6,0 - stb r0,PACAHARDIRQEN(r13) /* we'll hard-enable shortly */ - stb r6,PACASOFTIRQEN(r13) /* soft-disable irqs */ -BEGIN_FTR_SECTION - DSSALL - sync -END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC) - ori r7,r7,MSR_EE - oris r7,r7,MSR_POW@h - sync - isync - mtmsrd r7 - isync - blr diff --git a/arch/powerpc/platforms/powermac/smp.c b/arch/powerpc/platforms/powermac/smp.c index 53bee66..837989e 100644 --- a/arch/powerpc/platforms/powermac/smp.c +++ b/arch/powerpc/platforms/powermac/smp.c @@ -916,18 +916,20 @@ static void pmac_cpu_die(void) preempt_enable(); /* - * hard-disable interrupts for the non-NAP case, the NAP code - * needs to re-enable interrupts (but soft-disables them) + * Re-enable interrupts. The NAP code needs to enable them + * anyways, do it now so we deal with the case where one already + * happened while soft-disabled. + * We shouldn't get any external interrupts, only decrementer, and the + * decrementer handler is safe for use on offline CPUs */ - hard_irq_disable(); + local_irq_enable(); while (1) { /* let's not take timer interrupts too often ... */ set_dec(0x7fffffff); - /* should always be true at this point */ - if (cpu_has_feature(CPU_FTR_CAN_NAP)) - power4_cpu_offline_powersave(); + /* Enter NAP mode */ + power4_idle(); } } -- cgit v0.10.2 From 65e646ac509ff45e9f35e7fc1fea0d2b6723679f Mon Sep 17 00:00:00 2001 From: Benjamin Herrenschmidt Date: Mon, 7 Mar 2011 17:02:49 +1100 Subject: powerpc/pmac: Rename cpu_state in therm_pm72 to avoid collision This collides with the cpu_state in our SMP code, use processor_state instead. Signed-off-by: Benjamin Herrenschmidt diff --git a/drivers/macintosh/therm_pm72.c b/drivers/macintosh/therm_pm72.c index bca2af2..c987033 100644 --- a/drivers/macintosh/therm_pm72.c +++ b/drivers/macintosh/therm_pm72.c @@ -153,7 +153,7 @@ static struct i2c_adapter * u3_0; static struct i2c_adapter * u3_1; static struct i2c_adapter * k2; static struct i2c_client * fcu; -static struct cpu_pid_state cpu_state[2]; +static struct cpu_pid_state processor_state[2]; static struct basckside_pid_params backside_params; static struct backside_pid_state backside_state; static struct drives_pid_state drives_state; @@ -664,8 +664,8 @@ static int read_eeprom(int cpu, struct mpu_data *out) static void fetch_cpu_pumps_minmax(void) { - struct cpu_pid_state *state0 = &cpu_state[0]; - struct cpu_pid_state *state1 = &cpu_state[1]; + struct cpu_pid_state *state0 = &processor_state[0]; + struct cpu_pid_state *state1 = &processor_state[1]; u16 pump_min = 0, pump_max = 0xffff; u16 tmp[4]; @@ -717,17 +717,17 @@ static ssize_t show_##name(struct device *dev, struct device_attribute *attr, ch return sprintf(buf, "%d", data); \ } -BUILD_SHOW_FUNC_FIX(cpu0_temperature, cpu_state[0].last_temp) -BUILD_SHOW_FUNC_FIX(cpu0_voltage, cpu_state[0].voltage) -BUILD_SHOW_FUNC_FIX(cpu0_current, cpu_state[0].current_a) -BUILD_SHOW_FUNC_INT(cpu0_exhaust_fan_rpm, cpu_state[0].rpm) -BUILD_SHOW_FUNC_INT(cpu0_intake_fan_rpm, cpu_state[0].intake_rpm) +BUILD_SHOW_FUNC_FIX(cpu0_temperature, processor_state[0].last_temp) +BUILD_SHOW_FUNC_FIX(cpu0_voltage, processor_state[0].voltage) +BUILD_SHOW_FUNC_FIX(cpu0_current, processor_state[0].current_a) +BUILD_SHOW_FUNC_INT(cpu0_exhaust_fan_rpm, processor_state[0].rpm) +BUILD_SHOW_FUNC_INT(cpu0_intake_fan_rpm, processor_state[0].intake_rpm) -BUILD_SHOW_FUNC_FIX(cpu1_temperature, cpu_state[1].last_temp) -BUILD_SHOW_FUNC_FIX(cpu1_voltage, cpu_state[1].voltage) -BUILD_SHOW_FUNC_FIX(cpu1_current, cpu_state[1].current_a) -BUILD_SHOW_FUNC_INT(cpu1_exhaust_fan_rpm, cpu_state[1].rpm) -BUILD_SHOW_FUNC_INT(cpu1_intake_fan_rpm, cpu_state[1].intake_rpm) +BUILD_SHOW_FUNC_FIX(cpu1_temperature, processor_state[1].last_temp) +BUILD_SHOW_FUNC_FIX(cpu1_voltage, processor_state[1].voltage) +BUILD_SHOW_FUNC_FIX(cpu1_current, processor_state[1].current_a) +BUILD_SHOW_FUNC_INT(cpu1_exhaust_fan_rpm, processor_state[1].rpm) +BUILD_SHOW_FUNC_INT(cpu1_intake_fan_rpm, processor_state[1].intake_rpm) BUILD_SHOW_FUNC_FIX(backside_temperature, backside_state.last_temp) BUILD_SHOW_FUNC_INT(backside_fan_pwm, backside_state.pwm) @@ -919,8 +919,8 @@ static void do_cpu_pid(struct cpu_pid_state *state, s32 temp, s32 power) static void do_monitor_cpu_combined(void) { - struct cpu_pid_state *state0 = &cpu_state[0]; - struct cpu_pid_state *state1 = &cpu_state[1]; + struct cpu_pid_state *state0 = &processor_state[0]; + struct cpu_pid_state *state1 = &processor_state[1]; s32 temp0, power0, temp1, power1; s32 temp_combi, power_combi; int rc, intake, pump; @@ -1150,7 +1150,7 @@ static void do_monitor_cpu_rack(struct cpu_pid_state *state) /* * Initialize the state structure for one CPU control loop */ -static int init_cpu_state(struct cpu_pid_state *state, int index) +static int init_processor_state(struct cpu_pid_state *state, int index) { int err; @@ -1205,7 +1205,7 @@ static int init_cpu_state(struct cpu_pid_state *state, int index) /* * Dispose of the state data for one CPU control loop */ -static void dispose_cpu_state(struct cpu_pid_state *state) +static void dispose_processor_state(struct cpu_pid_state *state) { if (state->monitor == NULL) return; @@ -1804,9 +1804,9 @@ static int main_control_loop(void *x) set_pwm_fan(SLOTS_FAN_PWM_INDEX, SLOTS_FAN_DEFAULT_PWM); /* Initialize ADCs */ - initialize_adc(&cpu_state[0]); - if (cpu_state[1].monitor != NULL) - initialize_adc(&cpu_state[1]); + initialize_adc(&processor_state[0]); + if (processor_state[1].monitor != NULL) + initialize_adc(&processor_state[1]); fcu_tickle_ticks = FCU_TICKLE_TICKS; @@ -1833,14 +1833,14 @@ static int main_control_loop(void *x) if (cpu_pid_type == CPU_PID_TYPE_COMBINED) do_monitor_cpu_combined(); else if (cpu_pid_type == CPU_PID_TYPE_RACKMAC) { - do_monitor_cpu_rack(&cpu_state[0]); - if (cpu_state[1].monitor != NULL) - do_monitor_cpu_rack(&cpu_state[1]); + do_monitor_cpu_rack(&processor_state[0]); + if (processor_state[1].monitor != NULL) + do_monitor_cpu_rack(&processor_state[1]); // better deal with UP } else { - do_monitor_cpu_split(&cpu_state[0]); - if (cpu_state[1].monitor != NULL) - do_monitor_cpu_split(&cpu_state[1]); + do_monitor_cpu_split(&processor_state[0]); + if (processor_state[1].monitor != NULL) + do_monitor_cpu_split(&processor_state[1]); // better deal with UP } /* Then, the rest */ @@ -1885,8 +1885,8 @@ static int main_control_loop(void *x) */ static void dispose_control_loops(void) { - dispose_cpu_state(&cpu_state[0]); - dispose_cpu_state(&cpu_state[1]); + dispose_processor_state(&processor_state[0]); + dispose_processor_state(&processor_state[1]); dispose_backside_state(&backside_state); dispose_drives_state(&drives_state); dispose_slots_state(&slots_state); @@ -1928,12 +1928,12 @@ static int create_control_loops(void) /* Create control loops for everything. If any fail, everything * fails */ - if (init_cpu_state(&cpu_state[0], 0)) + if (init_processor_state(&processor_state[0], 0)) goto fail; if (cpu_pid_type == CPU_PID_TYPE_COMBINED) fetch_cpu_pumps_minmax(); - if (cpu_count > 1 && init_cpu_state(&cpu_state[1], 1)) + if (cpu_count > 1 && init_processor_state(&processor_state[1], 1)) goto fail; if (init_backside_state(&backside_state)) goto fail; -- cgit v0.10.2 From d72944457bb7d5c4be43aa1b741cb93c69484c20 Mon Sep 17 00:00:00 2001 From: Benjamin Herrenschmidt Date: Tue, 8 Mar 2011 13:50:37 +1100 Subject: powerpc/smp: Add a smp_ops->bringup_up() done callback This allows us to stop abusing smp_ops->setup_cpu() for cleanup tasks that have to take place after the initial boot time CPU bringup. Signed-off-by: Benjamin Herrenschmidt diff --git a/arch/powerpc/include/asm/machdep.h b/arch/powerpc/include/asm/machdep.h index 578d330..e4f0191 100644 --- a/arch/powerpc/include/asm/machdep.h +++ b/arch/powerpc/include/asm/machdep.h @@ -35,6 +35,7 @@ struct smp_ops_t { int (*probe)(void); void (*kick_cpu)(int nr); void (*setup_cpu)(int nr); + void (*bringup_done)(void); void (*take_timebase)(void); void (*give_timebase)(void); int (*cpu_disable)(void); diff --git a/arch/powerpc/kernel/smp.c b/arch/powerpc/kernel/smp.c index f6cc5c1..df37397 100644 --- a/arch/powerpc/kernel/smp.c +++ b/arch/powerpc/kernel/smp.c @@ -553,7 +553,11 @@ void __init smp_cpus_done(unsigned int max_cpus) free_cpumask_var(old_mask); + if (smp_ops && smp_ops->bringup_done) + smp_ops->bringup_done(); + dump_numa_cpu_topology(); + } int arch_sd_sibling_asym_packing(void) -- cgit v0.10.2 From 734796f12351f9a0f38c47b981414f82d852f222 Mon Sep 17 00:00:00 2001 From: Benjamin Herrenschmidt Date: Tue, 8 Mar 2011 13:54:50 +1100 Subject: powerpc/pmac/smp: Fix CPU hotplug crashes on some machines On some machines that use i2c to synchronize the timebases (such as PowerMac7,2/7,3 G5 machines), hotplug CPU would crash when putting back a new CPU online due to the underlying i2c bus being closed. This uses the newly added bringup_done() callback to move the close along with other housekeeping calls, and adds a CPU notifier to re-open the i2c bus around subsequent hotplug operations Signed-off-by: Benjamin Herrenschmidt diff --git a/arch/powerpc/platforms/powermac/smp.c b/arch/powerpc/platforms/powermac/smp.c index 837989e..74a43c6 100644 --- a/arch/powerpc/platforms/powermac/smp.c +++ b/arch/powerpc/platforms/powermac/smp.c @@ -840,30 +840,68 @@ static void __devinit smp_core99_setup_cpu(int cpu_nr) /* Setup openpic */ mpic_setup_this_cpu(); +} - if (cpu_nr == 0) { -#ifdef CONFIG_PPC64 - extern void g5_phy_disable_cpu1(void); +#ifdef CONFIG_HOTPLUG_CPU +static int smp_core99_cpu_notify(struct notifier_block *self, + unsigned long action, void *hcpu) +{ + int rc; - /* Close i2c bus if it was used for tb sync */ + switch(action) { + case CPU_UP_PREPARE: + case CPU_UP_PREPARE_FROZEN: + /* Open i2c bus if it was used for tb sync */ if (pmac_tb_clock_chip_host) { - pmac_i2c_close(pmac_tb_clock_chip_host); - pmac_tb_clock_chip_host = NULL; + rc = pmac_i2c_open(pmac_tb_clock_chip_host, 1); + if (rc) { + pr_err("Failed to open i2c bus for time sync\n"); + return notifier_from_errno(rc); + } } + break; + case CPU_ONLINE: + case CPU_UP_CANCELED: + /* Close i2c bus if it was used for tb sync */ + if (pmac_tb_clock_chip_host) + pmac_i2c_close(pmac_tb_clock_chip_host); + break; + default: + break; + } + return NOTIFY_OK; +} - /* If we didn't start the second CPU, we must take - * it off the bus - */ - if (of_machine_is_compatible("MacRISC4") && - num_online_cpus() < 2) - g5_phy_disable_cpu1(); -#endif /* CONFIG_PPC64 */ +static struct notifier_block __cpuinitdata smp_core99_cpu_nb = { + .notifier_call = smp_core99_cpu_notify, +}; +#endif /* CONFIG_HOTPLUG_CPU */ - if (ppc_md.progress) - ppc_md.progress("core99_setup_cpu 0 done", 0x349); +static void __init smp_core99_bringup_done(void) +{ +#ifdef CONFIG_PPC64 + extern void g5_phy_disable_cpu1(void); + + /* Close i2c bus if it was used for tb sync */ + if (pmac_tb_clock_chip_host) + pmac_i2c_close(pmac_tb_clock_chip_host); + + /* If we didn't start the second CPU, we must take + * it off the bus. + */ + if (of_machine_is_compatible("MacRISC4") && + num_online_cpus() < 2) { + set_cpu_present(1, false); + g5_phy_disable_cpu1(); } -} +#endif /* CONFIG_PPC64 */ +#ifdef CONFIG_HOTPLUG_CPU + register_cpu_notifier(&smp_core99_cpu_nb); +#endif + if (ppc_md.progress) + ppc_md.progress("smp_core99_bringup_done", 0x349); +} #ifdef CONFIG_HOTPLUG_CPU @@ -940,6 +978,7 @@ static void pmac_cpu_die(void) struct smp_ops_t core99_smp_ops = { .message_pass = smp_mpic_message_pass, .probe = smp_core99_probe, + .bringup_done = smp_core99_bringup_done, .kick_cpu = smp_core99_kick_cpu, .setup_cpu = smp_core99_setup_cpu, .give_timebase = smp_core99_give_timebase, -- cgit v0.10.2 From 105765f451d3ff007bb4ae3761e825686d9615db Mon Sep 17 00:00:00 2001 From: Benjamin Herrenschmidt Date: Fri, 1 Apr 2011 09:23:37 +1100 Subject: powerpc/smp: Don't expose per-cpu "cpu_state" array Instead, keep it static, expose an accessor and use that from the PowerMac code. Avoids easy namespace collisions and will make it easier to consolidate with other implementations. Signed-off-by: Benjamin Herrenschmidt diff --git a/arch/powerpc/include/asm/smp.h b/arch/powerpc/include/asm/smp.h index 7e99771..a902a0d 100644 --- a/arch/powerpc/include/asm/smp.h +++ b/arch/powerpc/include/asm/smp.h @@ -45,7 +45,7 @@ extern void migrate_irqs(void); int generic_cpu_disable(void); void generic_cpu_die(unsigned int cpu); void generic_mach_cpu_die(void); -DECLARE_PER_CPU(int, cpu_state); +void generic_set_cpu_dead(unsigned int cpu); #endif #ifdef CONFIG_PPC64 diff --git a/arch/powerpc/kernel/smp.c b/arch/powerpc/kernel/smp.c index df37397..d7f8cc1 100644 --- a/arch/powerpc/kernel/smp.c +++ b/arch/powerpc/kernel/smp.c @@ -305,7 +305,7 @@ void __devinit smp_prepare_boot_cpu(void) #ifdef CONFIG_HOTPLUG_CPU /* State of each CPU during hotplug phases */ -DEFINE_PER_CPU(int, cpu_state) = { 0 }; +static DEFINE_PER_CPU(int, cpu_state) = { 0 }; int generic_cpu_disable(void) { @@ -348,6 +348,11 @@ void generic_mach_cpu_die(void) while (__get_cpu_var(cpu_state) != CPU_UP_PREPARE) cpu_relax(); } + +void generic_set_cpu_dead(unsigned int cpu) +{ + per_cpu(cpu_state, cpu) = CPU_DEAD; +} #endif int __cpuinit __cpu_up(unsigned int cpu) diff --git a/arch/powerpc/platforms/powermac/smp.c b/arch/powerpc/platforms/powermac/smp.c index 74a43c6..ce5b4f5 100644 --- a/arch/powerpc/platforms/powermac/smp.c +++ b/arch/powerpc/platforms/powermac/smp.c @@ -920,10 +920,12 @@ static int smp_core99_cpu_disable(void) static void pmac_cpu_die(void) { + int cpu = smp_processor_id(); + local_irq_disable(); idle_task_exit(); - printk(KERN_DEBUG "CPU%d offline\n", smp_processor_id()); - __get_cpu_var(cpu_state) = CPU_DEAD; + pr_debug("CPU%d offline\n", cpu); + generic_set_cpu_dead(cpu); smp_wmb(); mb(); low_cpu_die(); @@ -933,6 +935,8 @@ static void pmac_cpu_die(void) static void pmac_cpu_die(void) { + int cpu = smp_processor_id(); + local_irq_disable(); idle_task_exit(); @@ -942,8 +946,8 @@ static void pmac_cpu_die(void) * on core99 platforms for now ... */ - printk(KERN_INFO "CPU#%d offline\n", smp_processor_id()); - __get_cpu_var(cpu_state) = CPU_DEAD; + printk(KERN_INFO "CPU#%d offline\n", cpu); + generic_set_cpu_dead(cpu); smp_wmb(); /* -- cgit v0.10.2 From c56e58537d504706954a06570b4034c04e5b7500 Mon Sep 17 00:00:00 2001 From: Benjamin Herrenschmidt Date: Tue, 8 Mar 2011 14:40:04 +1100 Subject: powerpc/smp: Create idle threads on demand and properly reset them Instead of creating idle threads at boot for all possible CPUs, we create them on demand, like x86 or ARM, and we properly call init_idle to re-initialize an idle thread when a CPU was unplugged and is now re-plugged. Signed-off-by: Benjamin Herrenschmidt diff --git a/arch/powerpc/kernel/smp.c b/arch/powerpc/kernel/smp.c index d7f8cc1..54faff9 100644 --- a/arch/powerpc/kernel/smp.c +++ b/arch/powerpc/kernel/smp.c @@ -57,6 +57,25 @@ #define DBG(fmt...) #endif + +/* Store all idle threads, this can be reused instead of creating +* a new thread. Also avoids complicated thread destroy functionality +* for idle threads. +*/ +#ifdef CONFIG_HOTPLUG_CPU +/* + * Needed only for CONFIG_HOTPLUG_CPU because __cpuinitdata is + * removed after init for !CONFIG_HOTPLUG_CPU. + */ +static DEFINE_PER_CPU(struct task_struct *, idle_thread_array); +#define get_idle_for_cpu(x) (per_cpu(idle_thread_array, x)) +#define set_idle_for_cpu(x, p) (per_cpu(idle_thread_array, x) = (p)) +#else +static struct task_struct *idle_thread_array[NR_CPUS] __cpuinitdata ; +#define get_idle_for_cpu(x) (idle_thread_array[(x)]) +#define set_idle_for_cpu(x, p) (idle_thread_array[(x)] = (p)) +#endif + struct thread_info *secondary_ti; DEFINE_PER_CPU(cpumask_var_t, cpu_sibling_map); @@ -238,23 +257,6 @@ static void __devinit smp_store_cpu_info(int id) per_cpu(cpu_pvr, id) = mfspr(SPRN_PVR); } -static void __init smp_create_idle(unsigned int cpu) -{ - struct task_struct *p; - - /* create a process for the processor */ - p = fork_idle(cpu); - if (IS_ERR(p)) - panic("failed fork for CPU %u: %li", cpu, PTR_ERR(p)); -#ifdef CONFIG_PPC64 - paca[cpu].__current = p; - paca[cpu].kstack = (unsigned long) task_thread_info(p) - + THREAD_SIZE - STACK_FRAME_OVERHEAD; -#endif - current_set[cpu] = task_thread_info(p); - task_thread_info(p)->cpu = cpu; -} - void __init smp_prepare_cpus(unsigned int max_cpus) { unsigned int cpu; @@ -288,10 +290,6 @@ void __init smp_prepare_cpus(unsigned int max_cpus) max_cpus = NR_CPUS; else max_cpus = 1; - - for_each_possible_cpu(cpu) - if (cpu != boot_cpuid) - smp_create_idle(cpu); } void __devinit smp_prepare_boot_cpu(void) @@ -355,9 +353,62 @@ void generic_set_cpu_dead(unsigned int cpu) } #endif +struct create_idle { + struct work_struct work; + struct task_struct *idle; + struct completion done; + int cpu; +}; + +static void __cpuinit do_fork_idle(struct work_struct *work) +{ + struct create_idle *c_idle = + container_of(work, struct create_idle, work); + + c_idle->idle = fork_idle(c_idle->cpu); + complete(&c_idle->done); +} + +static int __cpuinit create_idle(unsigned int cpu) +{ + struct thread_info *ti; + struct create_idle c_idle = { + .cpu = cpu, + .done = COMPLETION_INITIALIZER_ONSTACK(c_idle.done), + }; + INIT_WORK_ONSTACK(&c_idle.work, do_fork_idle); + + c_idle.idle = get_idle_for_cpu(cpu); + + /* We can't use kernel_thread since we must avoid to + * reschedule the child. We use a workqueue because + * we want to fork from a kernel thread, not whatever + * userspace process happens to be trying to online us. + */ + if (!c_idle.idle) { + schedule_work(&c_idle.work); + wait_for_completion(&c_idle.done); + } else + init_idle(c_idle.idle, cpu); + if (IS_ERR(c_idle.idle)) { + pr_err("Failed fork for CPU %u: %li", cpu, PTR_ERR(c_idle.idle)); + return PTR_ERR(c_idle.idle); + } + ti = task_thread_info(c_idle.idle); + +#ifdef CONFIG_PPC64 + paca[cpu].__current = c_idle.idle; + paca[cpu].kstack = (unsigned long)ti + THREAD_SIZE - STACK_FRAME_OVERHEAD; +#endif + ti->cpu = cpu; + current_set[cpu] = ti; + + return 0; +} + int __cpuinit __cpu_up(unsigned int cpu) { - int c; + int rc, c; secondary_ti = current_set[cpu]; @@ -365,6 +416,11 @@ int __cpuinit __cpu_up(unsigned int cpu) (smp_ops->cpu_bootable && !smp_ops->cpu_bootable(cpu))) return -EINVAL; + /* Make sure we have an idle thread */ + rc = create_idle(cpu); + if (rc) + return rc; + /* Make sure callin-map entry is 0 (can be leftover a CPU * hotplug */ -- cgit v0.10.2 From aeeafbfa7a5692c68d306043878aa2dd785e5230 Mon Sep 17 00:00:00 2001 From: Benjamin Herrenschmidt Date: Tue, 8 Mar 2011 14:49:33 +1100 Subject: powerpc/smp: Increase vdso_data->processorCount, not just decrease it Signed-off-by: Benjamin Herrenschmidt diff --git a/arch/powerpc/kernel/smp.c b/arch/powerpc/kernel/smp.c index 54faff9..cbdbb14 100644 --- a/arch/powerpc/kernel/smp.c +++ b/arch/powerpc/kernel/smp.c @@ -551,6 +551,10 @@ void __devinit start_secondary(void *unused) secondary_cpu_time_init(); +#ifdef CONFIG_PPC64 + if (system_state == SYSTEM_RUNNING) + vdso_data->processorCount++; +#endif ipi_call_lock(); notify_cpu_starting(cpu); set_cpu_online(cpu, true); -- cgit v0.10.2 From 76d479a7caff58b1e5f31d80805f7f65f9177696 Mon Sep 17 00:00:00 2001 From: Benjamin Herrenschmidt Date: Tue, 8 Mar 2011 14:57:26 +1100 Subject: powerpc/pmac/smp: Remove no-longer needed preempt workaround The generic code properly re-initializes the preempt count in the idle thread now Signed-off-by: Benjamin Herrenschmidt diff --git a/arch/powerpc/platforms/powermac/smp.c b/arch/powerpc/platforms/powermac/smp.c index ce5b4f5..a830c5e 100644 --- a/arch/powerpc/platforms/powermac/smp.c +++ b/arch/powerpc/platforms/powermac/smp.c @@ -951,13 +951,6 @@ static void pmac_cpu_die(void) smp_wmb(); /* - * during the path that leads here preemption is disabled, - * reenable it now so that when coming up preempt count is - * zero correctly - */ - preempt_enable(); - - /* * Re-enable interrupts. The NAP code needs to enable them * anyways, do it now so we deal with the case where one already * happened while soft-disabled. -- cgit v0.10.2 From c0bb9e45f3a7f67fc358946727bc3d5f23d0f55d Mon Sep 17 00:00:00 2001 From: Anton Blanchard Date: Wed, 25 Aug 2010 10:22:58 +1000 Subject: kdump: Allow shrinking of kdump region to be overridden On ppc64 the crashkernel region almost always overlaps an area of firmware. This works fine except when using the sysfs interface to reduce the kdump region. If we free the firmware area we are guaranteed to crash. Rename free_reserved_phys_range to crash_free_reserved_phys_range and make it a weak function so we can override it. Signed-off-by: Anton Blanchard Signed-off-by: Benjamin Herrenschmidt diff --git a/include/linux/kexec.h b/include/linux/kexec.h index 03e8e8d..c2478a3 100644 --- a/include/linux/kexec.h +++ b/include/linux/kexec.h @@ -208,6 +208,7 @@ int __init parse_crashkernel(char *cmdline, unsigned long long system_ram, unsigned long long *crash_size, unsigned long long *crash_base); int crash_shrink_memory(unsigned long new_size); size_t crash_get_memory_size(void); +void crash_free_reserved_phys_range(unsigned long begin, unsigned long end); #else /* !CONFIG_KEXEC */ struct pt_regs; diff --git a/kernel/kexec.c b/kernel/kexec.c index ec19b92..4e240a3 100644 --- a/kernel/kexec.c +++ b/kernel/kexec.c @@ -1099,7 +1099,8 @@ size_t crash_get_memory_size(void) return size; } -static void free_reserved_phys_range(unsigned long begin, unsigned long end) +void __weak crash_free_reserved_phys_range(unsigned long begin, + unsigned long end) { unsigned long addr; @@ -1135,7 +1136,7 @@ int crash_shrink_memory(unsigned long new_size) start = roundup(start, PAGE_SIZE); end = roundup(start + new_size, PAGE_SIZE); - free_reserved_phys_range(end, crashk_res.end); + crash_free_reserved_phys_range(end, crashk_res.end); if ((start == end) && (crashk_res.parent != NULL)) release_resource(&crashk_res); -- cgit v0.10.2