summaryrefslogtreecommitdiff
path: root/arch/powerpc/platforms/85xx/smp.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/powerpc/platforms/85xx/smp.c')
-rw-r--r--arch/powerpc/platforms/85xx/smp.c257
1 files changed, 234 insertions, 23 deletions
diff --git a/arch/powerpc/platforms/85xx/smp.c b/arch/powerpc/platforms/85xx/smp.c
index 6a17599..43cc5c9 100644
--- a/arch/powerpc/platforms/85xx/smp.c
+++ b/arch/powerpc/platforms/85xx/smp.c
@@ -26,6 +26,7 @@
#include <asm/cacheflush.h>
#include <asm/dbell.h>
#include <asm/fsl_guts.h>
+#include <asm/cputhreads.h>
#include <sysdev/fsl_soc.h>
#include <sysdev/mpic.h>
@@ -40,14 +41,61 @@ struct epapr_spin_table {
u32 pir;
};
-static struct ccsr_guts __iomem *guts;
+static void __iomem *guts_regs;
static u64 timebase;
static int tb_req;
static int tb_valid;
+static u32 cur_booting_core;
+static bool rcpmv2;
-static void mpc85xx_timebase_freeze(int freeze)
+extern void fsl_enable_threads(void);
+
+#ifdef CONFIG_PPC_E500MC
+/* get a physical mask of online cores and booting core */
+static inline u32 get_phy_cpu_mask(void)
+{
+ u32 mask;
+ int cpu;
+
+ if (smt_capable()) {
+ /* two threads in one core share one time base */
+ mask = 1 << cpu_core_index_of_thread(cur_booting_core);
+ for_each_online_cpu(cpu)
+ mask |= 1 << cpu_core_index_of_thread(
+ get_hard_smp_processor_id(cpu));
+ } else {
+ mask = 1 << cur_booting_core;
+ for_each_online_cpu(cpu)
+ mask |= 1 << get_hard_smp_processor_id(cpu);
+ }
+
+ return mask;
+}
+
+static void __cpuinit mpc85xx_timebase_freeze(int freeze)
+{
+ u32 *addr;
+ u32 mask = get_phy_cpu_mask();
+
+ if (rcpmv2)
+ addr = &((struct ccsr_rcpm_v2 *)guts_regs)->pctbenr;
+ else
+ addr = &((struct ccsr_rcpm *)guts_regs)->ctbenr;
+
+ if (freeze)
+ clrbits32(addr, mask);
+ else
+ setbits32(addr, mask);
+
+ /* read back to push the previous write */
+ in_be32(addr);
+}
+
+#else
+static void __cpuinit mpc85xx_timebase_freeze(int freeze)
{
- uint32_t mask;
+ struct ccsr_guts __iomem *guts = guts_regs;
+ u32 mask;
mask = CCSR_GUTS_DEVDISR_TB0 | CCSR_GUTS_DEVDISR_TB1;
if (freeze)
@@ -55,13 +103,26 @@ static void mpc85xx_timebase_freeze(int freeze)
else
clrbits32(&guts->devdisr, mask);
+ /* read back to push the previous write */
in_be32(&guts->devdisr);
}
+#endif
-static void mpc85xx_give_timebase(void)
+static void __cpuinit mpc85xx_give_timebase(void)
{
unsigned long flags;
+ /* only do time base sync when system is running */
+ if (system_state == SYSTEM_BOOTING)
+ return;
+ /*
+ * If the booting thread is not the first thread of the core,
+ * skip time base sync.
+ */
+ if (smt_capable() &&
+ cur_booting_core != cpu_first_thread_sibling(cur_booting_core))
+ return;
+
local_irq_save(flags);
while (!tb_req)
@@ -69,7 +130,30 @@ static void mpc85xx_give_timebase(void)
tb_req = 0;
mpc85xx_timebase_freeze(1);
+#ifdef CONFIG_PPC64
+ /*
+ * e5500/e6500 have a workaround for erratum A-006958 in place
+ * that will reread the timebase until TBL is non-zero.
+ * That would be a bad thing when the timebase is frozen.
+ *
+ * Thus, we read it manually, and instead of checking that
+ * TBL is non-zero, we ensure that TB does not change. We don't
+ * do that for the main mftb implementation, because it requires
+ * a scratch register
+ */
+ {
+ u64 prev;
+
+ asm volatile("mftb %0" : "=r" (timebase));
+
+ do {
+ prev = timebase;
+ asm volatile("mftb %0" : "=r" (timebase));
+ } while (prev != timebase);
+ }
+#else
timebase = get_tb();
+#endif
mb();
tb_valid = 1;
@@ -81,10 +165,17 @@ static void mpc85xx_give_timebase(void)
local_irq_restore(flags);
}
-static void mpc85xx_take_timebase(void)
+static void __cpuinit mpc85xx_take_timebase(void)
{
unsigned long flags;
+ if (system_state == SYSTEM_BOOTING)
+ return;
+
+ if (smt_capable() &&
+ cur_booting_core != cpu_first_thread_sibling(cur_booting_core))
+ return;
+
local_irq_save(flags);
tb_req = 1;
@@ -99,6 +190,59 @@ static void mpc85xx_take_timebase(void)
}
#ifdef CONFIG_HOTPLUG_CPU
+#ifdef CONFIG_PPC_E500MC
+static inline bool is_core_down(unsigned int thread)
+{
+ cpumask_t thd_mask;
+
+ if (!smt_capable())
+ return true;
+
+ cpumask_shift_left(&thd_mask, &threads_core_mask,
+ cpu_core_index_of_thread(thread) * threads_per_core);
+
+ return !cpumask_intersects(&thd_mask, cpu_online_mask);
+}
+
+static void __cpuinit smp_85xx_mach_cpu_die(void)
+{
+ unsigned int cpu = smp_processor_id();
+
+ local_irq_disable();
+ idle_task_exit();
+ mb();
+
+ mtspr(SPRN_TCR, 0);
+
+ if (is_core_down(cpu))
+ __flush_disable_L1();
+
+ if (cur_cpu_spec->l2cache_type == PPC_L2_CACHE_CORE)
+ disable_backside_L2_cache();
+
+ generic_set_cpu_dead(cpu);
+
+ while (1)
+ ;
+}
+
+void platform_cpu_die(unsigned int cpu)
+{
+ unsigned int hw_cpu = get_hard_smp_processor_id(cpu);
+ struct ccsr_rcpm __iomem *rcpm;
+
+ if (rcpmv2 && is_core_down(cpu)) {
+ /* enter PH20 status */
+ setbits32(&((struct ccsr_rcpm_v2 *)guts_regs)->pcph20setr,
+ 1 << cpu_core_index_of_thread(hw_cpu));
+ } else if (!rcpmv2 && guts_regs) {
+ rcpm = guts_regs;
+ /* Core Nap Operation */
+ setbits32(&rcpm->cnapcr, 1 << hw_cpu);
+ }
+}
+#else
+/* for e500v1 and e500v2 */
static void __cpuinit smp_85xx_mach_cpu_die(void)
{
unsigned int cpu = smp_processor_id();
@@ -126,6 +270,7 @@ static void __cpuinit smp_85xx_mach_cpu_die(void)
while (1)
;
}
+#endif /* CONFIG_PPC_E500MC */
#endif
static inline void flush_spin_table(void *spin_table)
@@ -150,12 +295,62 @@ static int __cpuinit smp_85xx_kick_cpu(int nr)
int hw_cpu = get_hard_smp_processor_id(nr);
int ioremappable;
int ret = 0;
+#ifdef CONFIG_PPC_E500MC
+ struct ccsr_rcpm __iomem *rcpm = guts_regs;
+ struct ccsr_rcpm_v2 __iomem *rcpm_v2 = guts_regs;
+#endif
WARN_ON(nr < 0 || nr >= NR_CPUS);
WARN_ON(hw_cpu < 0 || hw_cpu >= NR_CPUS);
pr_debug("smp_85xx_kick_cpu: kick CPU #%d\n", nr);
+#ifdef CONFIG_PPC64
+ /* If the cpu we're kicking is a thread, kick it and return */
+ if (smt_capable() && (cpu_thread_in_core(nr) != 0)) {
+ /*
+ * Since Thread 1 can not start Thread 0 in the same core,
+ * Thread 0 of each core must run first before starting
+ * Thread 1.
+ */
+ if (cpu_online(cpu_first_thread_sibling(nr))) {
+
+ local_irq_save(flags);
+ /*
+ * In cpu hotplug case, Thread 1 of Core 0 must
+ * start by calling fsl_enable_threads(). Thread 1
+ * of other cores can be started by Thread 0
+ * after reset.
+ */
+ if (nr == 1 && system_state == SYSTEM_RUNNING)
+ fsl_enable_threads();
+
+ smp_generic_kick_cpu(nr);
+
+ generic_set_cpu_up(nr);
+ cur_booting_core = hw_cpu;
+
+ local_irq_restore(flags);
+
+ return 0;
+ } else {
+ pr_err("%s: Can not start CPU #%d. Start CPU #%d first.\n",
+ __func__, nr, cpu_first_thread_sibling(nr));
+ return -ENOENT;
+ }
+ }
+
+#ifdef CONFIG_HOTPLUG_CPU
+ /* Starting Thread 0 will reset core, so put both threads down first */
+ if (smt_capable() && system_state == SYSTEM_RUNNING &&
+ cpu_thread_in_core(nr) == 0 && !is_core_down(nr)) {
+ pr_err("%s: Can not start CPU #%d. Put CPU #%d down first.",
+ __func__, nr, cpu_last_thread_sibling(nr));
+ return -ENOENT;
+ }
+#endif
+#endif
+
np = of_get_cpu_node(nr, NULL);
cpu_rel_addr = of_get_property(np, "cpu-release-addr", NULL);
@@ -180,10 +375,6 @@ static int __cpuinit smp_85xx_kick_cpu(int nr)
spin_table = phys_to_virt(*cpu_rel_addr);
local_irq_save(flags);
-#ifdef CONFIG_PPC32
-#ifdef CONFIG_HOTPLUG_CPU
- /* Corresponding to generic_set_cpu_dead() */
- generic_set_cpu_up(nr);
if (system_state == SYSTEM_RUNNING) {
/*
@@ -197,6 +388,15 @@ static int __cpuinit smp_85xx_kick_cpu(int nr)
out_be32(&spin_table->addr_l, 0);
flush_spin_table(spin_table);
+#ifdef CONFIG_PPC_E500MC
+ /* Due to an erratum, wake the core before reset. */
+ if (rcpmv2)
+ setbits32(&rcpm_v2->pcph20clrr,
+ 1 << cpu_core_index_of_thread(hw_cpu));
+ else
+ clrbits32(&rcpm->cnapcr, 1 << hw_cpu);
+#endif
+
/*
* We don't set the BPTR register here since it already points
* to the boot page properly.
@@ -220,12 +420,19 @@ static int __cpuinit smp_85xx_kick_cpu(int nr)
/* clear the acknowledge status */
__secondary_hold_acknowledge = -1;
}
-#endif
flush_spin_table(spin_table);
out_be32(&spin_table->pir, hw_cpu);
+#ifdef CONFIG_PPC32
out_be32(&spin_table->addr_l, __pa(__early_start));
+#else
+ out_be32(&spin_table->addr_h,
+ __pa(*(u64 *)generic_secondary_smp_init) >> 32);
+ out_be32(&spin_table->addr_l,
+ __pa(*(u64 *)generic_secondary_smp_init) & 0xffffffff);
+#endif
flush_spin_table(spin_table);
+#ifdef CONFIG_PPC32
/* Wait a bit for the CPU to ack. */
if (!spin_event_timeout(__secondary_hold_acknowledge == hw_cpu,
10000, 100)) {
@@ -234,17 +441,14 @@ static int __cpuinit smp_85xx_kick_cpu(int nr)
ret = -ENOENT;
goto out;
}
-out:
#else
smp_generic_kick_cpu(nr);
-
- flush_spin_table(spin_table);
- out_be32(&spin_table->pir, hw_cpu);
- out_be64((u64 *)(&spin_table->addr_h),
- __pa((u64)*((unsigned long long *)generic_secondary_smp_init)));
- flush_spin_table(spin_table);
#endif
+ /* Corresponding to generic_set_cpu_dead() */
+ generic_set_cpu_up(nr);
+ cur_booting_core = hw_cpu;
+out:
local_irq_restore(flags);
if (ioremappable)
@@ -255,14 +459,11 @@ out:
struct smp_ops_t smp_85xx_ops = {
.kick_cpu = smp_85xx_kick_cpu,
+ .cpu_bootable = smp_generic_cpu_bootable,
#ifdef CONFIG_HOTPLUG_CPU
.cpu_disable = generic_cpu_disable,
.cpu_die = generic_cpu_die,
#endif
-#ifdef CONFIG_KEXEC
- .give_timebase = smp_generic_give_timebase,
- .take_timebase = smp_generic_take_timebase,
-#endif
};
#ifdef CONFIG_KEXEC
@@ -378,6 +579,9 @@ static const struct of_device_id mpc85xx_smp_guts_ids[] = {
{ .compatible = "fsl,p1022-guts", },
{ .compatible = "fsl,p1023-guts", },
{ .compatible = "fsl,p2020-guts", },
+ { .compatible = "fsl,qoriq-rcpm-1.0", },
+ { .compatible = "fsl,qoriq-rcpm-2.0", },
+ { .compatible = "fsl,bsc9132-guts", },
{},
};
@@ -402,11 +606,18 @@ void __init mpc85xx_smp_init(void)
smp_85xx_ops.cause_ipi = doorbell_cause_ipi;
}
+#ifdef CONFIG_HOTPLUG_CPU
+ ppc_md.cpu_die = generic_mach_cpu_die;
+#endif
+
np = of_find_matching_node(NULL, mpc85xx_smp_guts_ids);
if (np) {
- guts = of_iomap(np, 0);
+ if (of_device_is_compatible(np, "fsl,qoriq-rcpm-2.0"))
+ rcpmv2 = true;
+
+ guts_regs = of_iomap(np, 0);
of_node_put(np);
- if (!guts) {
+ if (!guts_regs) {
pr_err("%s: Could not map guts node address\n",
__func__);
return;