summaryrefslogtreecommitdiff
path: root/arch
diff options
context:
space:
mode:
authorZhang Zhuoyu <Zhuoyu.Zhang@freescale.com>2014-10-28 10:26:13 (GMT)
committerMatthew Weigel <Matthew.Weigel@freescale.com>2014-12-11 18:39:03 (GMT)
commitce8f25a1f2483918dc88838599ea1c6e70811bfa (patch)
treef198ebe968c424bb13ffcb42ce034838b4cb3623 /arch
parent7287030178f83068e4fc8e30688ab50124df321a (diff)
downloadlinux-fsl-qoriq-ce8f25a1f2483918dc88838599ea1c6e70811bfa.tar.xz
powerpc/cpu-hotplug: Support PCL10 state for e6500
PCL10 is a cluster low power state in which cluster clock is gated off. For e6500-based platform, cluster will enter PCL10 state automatically when all cores of this cluster are offline. Signed-off-by: Hongtao Jia <hongtao.jia@freescale.com> Signed-off-by: Zhang Zhuoyu <Zhuoyu.Zhang@freescale.com> Change-Id: Ibac7138ff685bbaeaed139629e9f2833e3148379 Reviewed-on: http://git.am.freescale.net:8181/22315 Tested-by: Review Code-CDREVIEW <CDREVIEW@freescale.com> Reviewed-by: Yang Li <LeoLi@freescale.com>
Diffstat (limited to 'arch')
-rw-r--r--arch/powerpc/include/asm/fsl_pm.h7
-rw-r--r--arch/powerpc/platforms/85xx/smp.c196
-rw-r--r--arch/powerpc/platforms/85xx/smp.h6
-rw-r--r--arch/powerpc/sysdev/fsl_rcpm.c114
-rw-r--r--arch/powerpc/sysdev/fsl_soc.c17
-rw-r--r--arch/powerpc/sysdev/fsl_soc.h1
6 files changed, 340 insertions, 1 deletions
diff --git a/arch/powerpc/include/asm/fsl_pm.h b/arch/powerpc/include/asm/fsl_pm.h
index 4e6fd0e..80484d6 100644
--- a/arch/powerpc/include/asm/fsl_pm.h
+++ b/arch/powerpc/include/asm/fsl_pm.h
@@ -19,7 +19,9 @@
#define E500_PM_PW10 2
#define E500_PM_PH15 3
#define E500_PM_PH20 4
-#define E500_PM_PH30 5
+#define E500_PM_PW20 5
+#define E500_PM_PH30 6
+#define E500_PM_PCL10 7
#define E500_PM_DOZE E500_PM_PH10
#define E500_PM_NAP E500_PM_PH15
@@ -31,9 +33,12 @@ struct fsl_pm_ops {
void (*irq_unmask)(int cpu);
void (*cpu_enter_state)(int cpu, int state);
void (*cpu_exit_state)(int cpu, int state);
+ void (*cluster_enter_state)(int cpu, int state);
+ void (*cluster_exit_state)(int cpu, int state);
int (*plat_enter_state)(int state);
void (*freeze_time_base)(int freeze);
void (*set_ip_power)(int enable, u32 mask);
+ bool (*cpu_ready)(unsigned int cpu, int state);
};
extern const struct fsl_pm_ops *qoriq_pm_ops;
diff --git a/arch/powerpc/platforms/85xx/smp.c b/arch/powerpc/platforms/85xx/smp.c
index 5b12f4b..81f6899 100644
--- a/arch/powerpc/platforms/85xx/smp.c
+++ b/arch/powerpc/platforms/85xx/smp.c
@@ -28,6 +28,7 @@
#include <asm/fsl_guts.h>
#include <asm/cputhreads.h>
#include <asm/fsl_pm.h>
+#include <asm/cacheflush.h>
#include <sysdev/fsl_soc.h>
#include <sysdev/mpic.h>
@@ -247,6 +248,191 @@ static void smp_85xx_mach_cpu_die(void)
#endif /* CONFIG_PPC_E500MC */
#endif
+#if defined(CONFIG_PPC_E500MC) && defined(CONFIG_HOTPLUG_CPU)
+static int cluster_offline(unsigned int cpu)
+{
+ unsigned long flags;
+ int i;
+ const struct cpumask *mask;
+ static void __iomem *cluster_l2_base;
+ int ret = 0;
+
+ mask = cpu_cluster_mask(cpu);
+ cluster_l2_base = get_cpu_l2_base(cpu);
+
+ /* Wait until all CPU has entered wait state. */
+ for_each_cpu(i, mask) {
+ if (!spin_event_timeout(
+ qoriq_pm_ops->cpu_ready(i, E500_PM_PW10),
+ 10000, 100)) {
+ pr_err("%s: cpu enter wait state timeout.\n",
+ __func__);
+ ret = -ETIMEDOUT;
+ goto out;
+ }
+ }
+
+ /* Flush, invalidate L2 cache */
+ local_irq_save(flags);
+ cluster_flush_invalidate_L2_cache(cluster_l2_base);
+
+ /* Let all cores of the same cluster enter PH20 state */
+ for_each_cpu(i, mask) {
+ qoriq_pm_ops->cpu_enter_state(i, E500_PM_PH20);
+ }
+
+ /* Wait until all cores has entered PH20 state */
+ for_each_cpu(i, mask) {
+ if (!spin_event_timeout(
+ qoriq_pm_ops->cpu_ready(i, E500_PM_PH20),
+ 10000, 100)) {
+ pr_err("%s: core enter PH20 timeout\n", __func__);
+ ret = -ETIMEDOUT;
+ goto irq_restore_out;
+ }
+ }
+
+ /* Disable L2 cache */
+ cluster_disable_L2_cache(cluster_l2_base);
+
+ /* Cluster enter PCL10 stste */
+ qoriq_pm_ops->cluster_enter_state(cpu, E500_PM_PCL10);
+
+ if (!spin_event_timeout(qoriq_pm_ops->cpu_ready(cpu, E500_PM_PCL10),
+ 10000, 100)) {
+ /* If entering PCL10 failed. Enable L2 cache back */
+ cluster_invalidate_enable_L2(cluster_l2_base);
+ pr_err("%s: cluster enter PCL10 timeout\n", __func__);
+ ret = -ETIMEDOUT;
+ }
+
+irq_restore_out:
+ local_irq_restore(flags);
+out:
+ iounmap(cluster_l2_base);
+ return ret;
+}
+
+static int cluster_online(unsigned int cpu)
+{
+ unsigned long flags;
+ int i;
+ const struct cpumask *mask;
+ static void __iomem *cluster_l2_base;
+ int ret = 0;
+
+ mask = cpu_cluster_mask(cpu);
+ cluster_l2_base = get_cpu_l2_base(cpu);
+
+ local_irq_save(flags);
+
+ qoriq_pm_ops->cluster_exit_state(cpu, E500_PM_PCL10);
+
+ /* Wait until cluster exit PCL10 state */
+ if (!spin_event_timeout(
+ !qoriq_pm_ops->cpu_ready(cpu, E500_PM_PCL10),
+ 10000, 100)) {
+ pr_err("%s: cluster exit PCL10 timeout\n", __func__);
+ ret = -ETIMEDOUT;
+ goto out;
+ }
+
+ /* Invalidate and enable L2 cache */
+ cluster_invalidate_enable_L2(cluster_l2_base);
+
+ /* Let all cores of a cluster exit PH20 state */
+ for_each_cpu(i, mask) {
+ qoriq_pm_ops->cpu_exit_state(i, E500_PM_PH20);
+ }
+
+ /* Wait until all cores of a cluster exit PH20 state */
+ for_each_cpu(i, mask) {
+ if (!spin_event_timeout(
+ !qoriq_pm_ops->cpu_ready(i, E500_PM_PH20),
+ 10000, 100)) {
+ pr_err("%s: core exit PH20 timeout\n", __func__);
+ ret = -ETIMEDOUT;
+ break;
+ }
+ }
+
+out:
+ local_irq_restore(flags);
+ iounmap(cluster_l2_base);
+ return ret;
+}
+
+void platform_cpu_die(unsigned int cpu)
+{
+ int i;
+ const struct cpumask *cluster_mask;
+
+ if (PVR_VER(cur_cpu_spec->pvr_value) == PVR_VER_E6500) {
+ cluster_mask = cpu_cluster_mask(cpu);
+ for_each_cpu(i, cluster_mask) {
+ if (cpu_online(i))
+ return;
+ }
+
+ cluster_offline(cpu);
+ }
+}
+#endif
+
+static struct device_node *cpu_to_l2cache(int cpu)
+{
+ struct device_node *np;
+ struct device_node *cache;
+
+ if (!cpu_present(cpu))
+ return NULL;
+
+ np = of_get_cpu_node(cpu, NULL);
+ if (np == NULL)
+ return NULL;
+
+ cache = of_find_next_cache_node(np);
+
+ of_node_put(np);
+
+ return cache;
+}
+
+DEFINE_PER_CPU(cpumask_t, cpu_cluster_map);
+EXPORT_PER_CPU_SYMBOL(cpu_cluster_map);
+
+static void init_cpu_cluster_map(void)
+{
+ struct device_node *l2_cache, *np;
+ int cpu, i;
+ char buf[20];
+ ptrdiff_t len = PTR_ALIGN(buf + PAGE_SIZE - 1, PAGE_SIZE) - buf;
+
+ for_each_cpu(cpu, cpu_present_mask) {
+ l2_cache = cpu_to_l2cache(cpu);
+ if (!l2_cache)
+ continue;
+
+ for_each_cpu(i, cpu_present_mask) {
+ np = cpu_to_l2cache(i);
+ if (!np)
+ continue;
+ if (np == l2_cache) {
+ cpumask_set_cpu(cpu, cpu_cluster_mask(i));
+ cpumask_set_cpu(i, cpu_cluster_mask(cpu));
+ }
+ of_node_put(np);
+ }
+ of_node_put(l2_cache);
+ cpumask_scnprintf(buf, len-2, cpu_cluster_mask(cpu));
+ }
+}
+
+static void smp_85xx_bringup_done(void)
+{
+ init_cpu_cluster_map();
+}
+
static inline void flush_spin_table(void *spin_table)
{
flush_dcache_range((ulong)spin_table,
@@ -339,6 +525,15 @@ static int smp_85xx_kick_cpu(int nr)
#endif
#endif
+#if defined(CONFIG_PPC_E500MC) && defined(CONFIG_HOTPLUG_CPU)
+ /* If cluster is in PCL10, exit PCL10 first */
+ if (system_state == SYSTEM_RUNNING &&
+ (PVR_VER(cur_cpu_spec->pvr_value) == PVR_VER_E6500)) {
+ if (qoriq_pm_ops->cpu_ready(nr, E500_PM_PCL10))
+ cluster_online(nr);
+ }
+#endif
+
np = of_get_cpu_node(nr, NULL);
cpu_rel_addr = of_get_property(np, "cpu-release-addr", NULL);
@@ -462,6 +657,7 @@ out:
struct smp_ops_t smp_85xx_ops = {
.kick_cpu = smp_85xx_kick_cpu,
.cpu_bootable = smp_generic_cpu_bootable,
+ .bringup_done = smp_85xx_bringup_done,
#ifdef CONFIG_HOTPLUG_CPU
.cpu_disable = generic_cpu_disable,
.cpu_die = generic_cpu_die,
diff --git a/arch/powerpc/platforms/85xx/smp.h b/arch/powerpc/platforms/85xx/smp.h
index 8983009..8b4efc3 100644
--- a/arch/powerpc/platforms/85xx/smp.h
+++ b/arch/powerpc/platforms/85xx/smp.h
@@ -4,6 +4,12 @@
#include <linux/init.h>
#ifdef CONFIG_SMP
+DECLARE_PER_CPU(cpumask_t, cpu_cluster_map);
+static inline struct cpumask *cpu_cluster_mask(int cpu)
+{
+ return &per_cpu(cpu_cluster_map, cpu);
+}
+
void __init mpc85xx_smp_init(void);
#else
static inline void mpc85xx_smp_init(void)
diff --git a/arch/powerpc/sysdev/fsl_rcpm.c b/arch/powerpc/sysdev/fsl_rcpm.c
index 6b26678..8b8befb 100644
--- a/arch/powerpc/sysdev/fsl_rcpm.c
+++ b/arch/powerpc/sysdev/fsl_rcpm.c
@@ -21,6 +21,8 @@
#include <asm/fsl_pm.h>
#include <asm/machdep.h>
+#include <platforms/85xx/smp.h>
+
const struct fsl_pm_ops *qoriq_pm_ops;
static struct ccsr_rcpm_v1 __iomem *rcpm_v1_regs;
@@ -240,6 +242,46 @@ static void rcpm_v2_cpu_exit_state(int cpu, int state)
}
}
+static void rcpm_v2_cluster_enter_state(int cpu, int state)
+{
+ u32 cpu_on_cluster, cluster_mask;
+ const struct cpumask *cpumask;
+
+ cpumask = cpu_cluster_mask(cpu);
+ cpu_on_cluster = cpu / cpumask_weight(cpumask);
+ cluster_mask = 1 << cpu_on_cluster;
+
+ switch (state) {
+ case E500_PM_PCL10:
+ /* one bit corresponds to one cluster */
+ out_be32(&rcpm_v2_regs->clpcl10setr, cluster_mask);
+
+ break;
+ default:
+ pr_err("%s: Unknown cluster PM state (%d)\n", __func__, state);
+ }
+}
+
+static void rcpm_v2_cluster_exit_state(int cpu, int state)
+{
+ u32 cpu_on_cluster, cluster_mask;
+ const struct cpumask *cpumask;
+
+ cpumask = cpu_cluster_mask(cpu);
+ cpu_on_cluster = cpu / cpumask_weight(cpumask);
+ cluster_mask = 1 << cpu_on_cluster;
+
+ switch (state) {
+ case E500_PM_PCL10:
+ /* one bit corresponds to one cluster */
+ out_be32(&rcpm_v2_regs->clpcl10clrr, cluster_mask);
+ break;
+ default:
+ pr_err("%s: Unknown cluster PM state (%d)\n", __func__, state);
+ }
+}
+
+
static int rcpm_v2_plat_enter_state(int state)
{
u32 *pmcsr_reg = &rcpm_v2_regs->powmgtcsr;
@@ -273,6 +315,75 @@ static int rcpm_v2_plat_enter_state(int state)
return ret;
}
+bool rcpm_v2_cpu_ready(unsigned int cpu, int state)
+{
+ unsigned int hw_cpu;
+ u32 mask;
+ bool ret = false;
+ const struct cpumask *cluster_mask;
+ u32 cpu_on_cluster = 0;
+ int tmp_cpu = 0;
+
+ hw_cpu = get_hard_smp_processor_id(cpu);
+
+ switch (state) {
+ case E500_PM_PH10:
+ if (in_be32(&rcpm_v2_regs->tph10sr0) & (1 << hw_cpu))
+ ret = true;
+ break;
+ case E500_PM_PW10:
+ if (in_be32(&rcpm_v2_regs->twaitsr0) & (1 << hw_cpu))
+ ret = true;
+ break;
+ case E500_PM_PH15:
+ mask = 1 << cpu_core_index_of_thread(hw_cpu);
+
+ if (in_be32(&rcpm_v2_regs->pcph15sr) & mask)
+ ret = true;
+ break;
+ case E500_PM_PH20:
+ mask = 1 << cpu_core_index_of_thread(hw_cpu);
+
+ if (in_be32(&rcpm_v2_regs->pcph20sr) & mask)
+ ret = true;
+ break;
+ case E500_PM_PW20:
+ mask = 1 << cpu_core_index_of_thread(hw_cpu);
+
+ if (in_be32(&rcpm_v2_regs->pcpw20sr) & mask)
+ ret = true;
+ break;
+ case E500_PM_PH30:
+ mask = 1 << cpu_core_index_of_thread(hw_cpu);
+
+ if (in_be32(&rcpm_v2_regs->pcph30sr) & mask)
+ ret = true;
+ break;
+ case E500_PM_PCL10:
+ cluster_mask = cpu_cluster_mask(boot_cpuid);
+ tmp_cpu += cpumask_weight(cluster_mask);
+
+ while (cpu >= tmp_cpu) {
+ cpu_on_cluster++;
+ cluster_mask = cpu_cluster_mask(tmp_cpu);
+ tmp_cpu += cpumask_weight(cluster_mask);
+ }
+
+ mask = 1 << cpu_on_cluster;
+
+ if (in_be32(&rcpm_v2_regs->clpcl10sr) & mask)
+ ret = true;
+ break;
+ default:
+ pr_err("%s: Unknown platform PM state (%d)\n",
+ __func__, state);
+ ret = false;
+
+ }
+
+ return ret;
+}
+
static const struct fsl_pm_ops qoriq_rcpm_v1_ops = {
.irq_mask = rcpm_v1_irq_mask,
.irq_unmask = rcpm_v1_irq_unmask,
@@ -288,9 +399,12 @@ static const struct fsl_pm_ops qoriq_rcpm_v2_ops = {
.irq_unmask = rcpm_v2_irq_unmask,
.cpu_enter_state = rcpm_v2_cpu_enter_state,
.cpu_exit_state = rcpm_v2_cpu_exit_state,
+ .cluster_enter_state = rcpm_v2_cluster_enter_state,
+ .cluster_exit_state = rcpm_v2_cluster_exit_state,
.plat_enter_state = rcpm_v2_plat_enter_state,
.set_ip_power = rcpm_v2_set_ip_power,
.freeze_time_base = rcpm_v2_freeze_time_base,
+ .cpu_ready = rcpm_v2_cpu_ready,
};
static const struct of_device_id rcpm_matches[] = {
diff --git a/arch/powerpc/sysdev/fsl_soc.c b/arch/powerpc/sysdev/fsl_soc.c
index 0c9e3a1..c93848c 100644
--- a/arch/powerpc/sysdev/fsl_soc.c
+++ b/arch/powerpc/sysdev/fsl_soc.c
@@ -108,6 +108,23 @@ phys_addr_t get_immrbase(void)
EXPORT_SYMBOL(get_immrbase);
+/* get address of cluster shared L2 cache controller */
+void __iomem *get_cpu_l2_base(int cpu)
+{
+ static void __iomem *cpu_l2_base;
+ struct device_node *np, *cache;
+
+ np = of_get_cpu_node(cpu, NULL);
+ cache = of_find_next_cache_node(np);
+
+ of_node_put(np);
+
+ cpu_l2_base = of_iomap(cache, 0);
+
+ return cpu_l2_base;
+}
+EXPORT_SYMBOL(get_cpu_l2_base);
+
static u32 sysfreq = -1;
u32 fsl_get_sys_freq(void)
diff --git a/arch/powerpc/sysdev/fsl_soc.h b/arch/powerpc/sysdev/fsl_soc.h
index a158c18..c50e38a 100644
--- a/arch/powerpc/sysdev/fsl_soc.h
+++ b/arch/powerpc/sysdev/fsl_soc.h
@@ -8,6 +8,7 @@ struct spi_device;
extern phys_addr_t get_dcsrbase(void);
extern phys_addr_t get_immrbase(void);
+extern void __iomem *get_cpu_l2_base(int cpu);
#if defined(CONFIG_CPM2) || defined(CONFIG_QUICC_ENGINE) || defined(CONFIG_8xx)
extern u32 get_brgfreq(void);
extern u32 get_baudrate(void);