summaryrefslogtreecommitdiff
path: root/drivers
diff options
context:
space:
mode:
authorHaiying Wang <Haiying.Wang@freescale.com>2013-10-03 15:54:51 (GMT)
committerJ. German Rivera <German.Rivera@freescale.com>2013-10-12 00:40:00 (GMT)
commit068b20945b044e6a28af76db7c98ca0cf676d9cd (patch)
tree91031df11afb38a8c83dbc6dc541eb0649918ee1 /drivers
parent9819511160678c246f1825693b2156cbda06950c (diff)
downloadlinux-fsl-qoriq-068b20945b044e6a28af76db7c98ca0cf676d9cd.tar.xz
fsl_qman: add cpu hotplug support
- Initialize all the portals at the boot time based cpu_possible_mask. Migrate the portals on offline cpus to bootcpu when there is less cpu online at boot time. - Register cpu hotplug notifier to migrate the portals to boot cpu when cpus are offlined, and migrate the portal back to original cpu when cpu is onlined. - Re-direct irq and sdest of the portal in the migrate function. Signed-off-by: Haiying Wang <Haiying.Wang@freescale.com> Change-Id: Ica4d1b2b0fd3c3ae5e043663febd9f4cb7c762cf Reviewed-on: http://git.am.freescale.net:8181/5452 Tested-by: Review Code-CDREVIEW <CDREVIEW@freescale.com> Reviewed-by: Ladouceur Jeffrey-R11498 <Jeffrey.Ladouceur@freescale.com> Reviewed-by: Thorpe Geoff-R01361 <Geoff.Thorpe@freescale.com> Reviewed-by: Rivera Jose-B46482 <German.Rivera@freescale.com>
Diffstat (limited to 'drivers')
-rw-r--r--drivers/staging/fsl_qbman/qman_driver.c101
-rw-r--r--drivers/staging/fsl_qbman/qman_high.c81
-rw-r--r--drivers/staging/fsl_qbman/qman_private.h10
3 files changed, 156 insertions, 36 deletions
diff --git a/drivers/staging/fsl_qbman/qman_driver.c b/drivers/staging/fsl_qbman/qman_driver.c
index 4afa340..9b3bb42 100644
--- a/drivers/staging/fsl_qbman/qman_driver.c
+++ b/drivers/staging/fsl_qbman/qman_driver.c
@@ -31,8 +31,10 @@
#include "qman_private.h"
-#include <linux/iommu.h>
#include <asm/smp.h> /* hard_smp_processor_id() if !CONFIG_SMP */
+#ifdef CONFIG_HOTPLUG_CPU
+#include <linux/cpu.h>
+#endif
/* Global variable containing revision id (even on non-control plane systems
* where CCSR isn't available) */
@@ -471,7 +473,6 @@ static struct qm_portal_config *get_pcfg_idx(struct list_head *list, u32 idx)
return NULL;
}
-
static void portal_set_cpu(struct qm_portal_config *pcfg, int cpu)
{
int ret;
@@ -585,10 +586,8 @@ void qm_put_unused_portal(struct qm_portal_config *pcfg)
static struct qman_portal *init_pcfg(struct qm_portal_config *pcfg)
{
struct qman_portal *p;
- struct cpumask oldmask = *tsk_cpus_allowed(current);
portal_set_cpu(pcfg, pcfg->public_cfg.cpu);
- set_cpus_allowed_ptr(current, get_cpu_mask(pcfg->public_cfg.cpu));
p = qman_create_affine_portal(pcfg, NULL);
if (p) {
u32 irq_sources = 0;
@@ -600,14 +599,13 @@ static struct qman_portal *init_pcfg(struct qm_portal_config *pcfg)
#ifdef CONFIG_FSL_DPA_PIRQ_FAST
irq_sources |= QM_PIRQ_DQRI;
#endif
- qman_irqsource_add(irq_sources);
+ qman_p_irqsource_add(p, irq_sources);
pr_info("Qman portal %sinitialised, cpu %d\n",
pcfg->public_cfg.is_shared ? "(shared) " : "",
pcfg->public_cfg.cpu);
} else
pr_crit("Qman portal failure on cpu %d\n",
pcfg->public_cfg.cpu);
- set_cpus_allowed_ptr(current, &oldmask);
return p;
}
@@ -616,7 +614,7 @@ static void init_slave(int cpu)
struct qman_portal *p;
struct cpumask oldmask = *tsk_cpus_allowed(current);
set_cpus_allowed_ptr(current, get_cpu_mask(cpu));
- p = qman_create_affine_slave(shared_portals[shared_portals_idx++]);
+ p = qman_create_affine_slave(shared_portals[shared_portals_idx++], cpu);
if (!p)
pr_err("Qman slave portal failure on cpu %d\n", cpu);
else
@@ -636,6 +634,47 @@ static int __init parse_qportals(char *str)
}
__setup("qportals=", parse_qportals);
+static void qman_offline_cpu(unsigned int cpu)
+{
+ struct qman_portal *p;
+ p = (struct qman_portal *)affine_portals[cpu];
+ if (p && (!qman_portal_is_sharing_redirect(p)))
+ qman_migrate_portal(p);
+}
+
+static void qman_online_cpu(unsigned int cpu)
+{
+ struct qman_portal *p;
+ p = (struct qman_portal *)affine_portals[cpu];
+ if (p && (!qman_portal_is_sharing_redirect(p)))
+ qman_migrate_portal_back(p, cpu);
+}
+
+#ifdef CONFIG_HOTPLUG_CPU
+static int __cpuinit qman_hotplug_cpu_callback(struct notifier_block *nfb,
+ unsigned long action, void *hcpu)
+{
+ unsigned int cpu = (unsigned long)hcpu;
+
+ switch (action) {
+ case CPU_ONLINE:
+ case CPU_ONLINE_FROZEN:
+ qman_online_cpu(cpu);
+ break;
+ case CPU_DOWN_PREPARE:
+ case CPU_DOWN_PREPARE_FROZEN:
+ qman_offline_cpu(cpu);
+ default:
+ break;
+ }
+ return NOTIFY_OK;
+}
+
+static struct notifier_block qman_hotplug_cpu_notifier = {
+ .notifier_call = qman_hotplug_cpu_callback,
+};
+#endif /* CONFIG_HOTPLUG_CPU */
+
__init int qman_init(void)
{
struct cpumask slave_cpus;
@@ -648,6 +687,7 @@ __init int qman_init(void)
struct qman_portal *p;
int cpu, ret;
const u32 *clk;
+ struct cpumask offline_cpus;
/* Initialise the Qman (CCSR) device */
for_each_compatible_node(dn, NULL, "fsl,qman") {
@@ -685,6 +725,7 @@ __init int qman_init(void)
return ret;
}
+ memset(&affine_portals, 0, sizeof(uintptr_t) * num_possible_cpus());
/* Initialise portals. See bman_driver.c for comments */
for_each_compatible_node(dn, NULL, "fsl,qman-portal") {
if (!of_device_is_available(dn))
@@ -695,26 +736,28 @@ __init int qman_init(void)
list_add_tail(&pcfg->list, &unused_pcfgs);
}
}
- for_each_cpu(cpu, &want_shared) {
- pcfg = get_pcfg(&unused_pcfgs);
- if (!pcfg)
- break;
- pcfg->public_cfg.cpu = cpu;
- list_add_tail(&pcfg->list, &shared_pcfgs);
- cpumask_set_cpu(cpu, &shared_cpus);
- }
- for_each_cpu(cpu, &want_unshared) {
- if (cpumask_test_cpu(cpu, &shared_cpus))
- continue;
- pcfg = get_pcfg(&unused_pcfgs);
- if (!pcfg)
- break;
- pcfg->public_cfg.cpu = cpu;
- list_add_tail(&pcfg->list, &unshared_pcfgs);
- cpumask_set_cpu(cpu, &unshared_cpus);
+ for_each_possible_cpu(cpu) {
+ if (cpumask_test_cpu(cpu, &want_shared)) {
+ pcfg = get_pcfg(&unused_pcfgs);
+ if (!pcfg)
+ break;
+ pcfg->public_cfg.cpu = cpu;
+ list_add_tail(&pcfg->list, &shared_pcfgs);
+ cpumask_set_cpu(cpu, &shared_cpus);
+ }
+ if (cpumask_test_cpu(cpu, &want_unshared)) {
+ if (cpumask_test_cpu(cpu, &shared_cpus))
+ continue;
+ pcfg = get_pcfg(&unused_pcfgs);
+ if (!pcfg)
+ break;
+ pcfg->public_cfg.cpu = cpu;
+ list_add_tail(&pcfg->list, &unshared_pcfgs);
+ cpumask_set_cpu(cpu, &unshared_cpus);
+ }
}
if (list_empty(&shared_pcfgs) && list_empty(&unshared_pcfgs)) {
- for_each_online_cpu(cpu) {
+ for_each_possible_cpu(cpu) {
pcfg = get_pcfg(&unused_pcfgs);
if (!pcfg)
break;
@@ -723,7 +766,7 @@ __init int qman_init(void)
cpumask_set_cpu(cpu, &unshared_cpus);
}
}
- cpumask_andnot(&slave_cpus, cpu_online_mask, &shared_cpus);
+ cpumask_andnot(&slave_cpus, cpu_possible_mask, &shared_cpus);
cpumask_andnot(&slave_cpus, &slave_cpus, &unshared_cpus);
if (cpumask_empty(&slave_cpus)) {
if (!list_empty(&shared_pcfgs)) {
@@ -759,6 +802,12 @@ __init int qman_init(void)
for_each_cpu(cpu, &slave_cpus)
init_slave(cpu);
pr_info("Qman portals initialised\n");
+ cpumask_andnot(&offline_cpus, cpu_possible_mask, cpu_online_mask);
+ for_each_cpu(cpu, &offline_cpus)
+ qman_offline_cpu(cpu);
+#ifdef CONFIG_HOTPLUG_CPU
+ register_hotcpu_notifier(&qman_hotplug_cpu_notifier);
+#endif
return 0;
}
diff --git a/drivers/staging/fsl_qbman/qman_high.c b/drivers/staging/fsl_qbman/qman_high.c
index 59e9a8a..37fbaaa 100644
--- a/drivers/staging/fsl_qbman/qman_high.c
+++ b/drivers/staging/fsl_qbman/qman_high.c
@@ -154,6 +154,7 @@ static cpumask_t affine_mask;
static DEFINE_SPINLOCK(affine_mask_lock);
static u16 affine_channels[NR_CPUS];
static DEFINE_PER_CPU(struct qman_portal, qman_affine_portal);
+uintptr_t affine_portals[NR_CPUS];
/* "raw" gets the cpu-local struct whether it's a redirect or not. */
static inline struct qman_portal *get_raw_affine_portal(void)
@@ -550,19 +551,16 @@ struct qman_portal *qman_create_affine_portal(
const struct qman_cgrs *cgrs)
{
struct qman_portal *res;
- struct qman_portal *portal = get_raw_affine_portal();
- /* A criteria for calling this function (from qman_driver.c) is that
- * we're already affine to the cpu and won't schedule onto another cpu.
- * This means we can put_affine_portal() and yet continue to use
- * "portal", which in turn means aspects of this routine can sleep. */
- put_affine_portal();
+ struct qman_portal *portal;
+ portal = &per_cpu(qman_affine_portal, config->public_cfg.cpu);
res = qman_create_portal(portal, config, cgrs);
if (res) {
spin_lock(&affine_mask_lock);
cpumask_set_cpu(config->public_cfg.cpu, &affine_mask);
affine_channels[config->public_cfg.cpu] =
config->public_cfg.channel;
+ affine_portals[config->public_cfg.cpu] = (uintptr_t)portal;
spin_unlock(&affine_mask_lock);
}
return res;
@@ -570,10 +568,12 @@ struct qman_portal *qman_create_affine_portal(
/* These checks are BUG_ON()s because the driver is already supposed to avoid
* these cases. */
-struct qman_portal *qman_create_affine_slave(struct qman_portal *redirect)
+struct qman_portal *qman_create_affine_slave(struct qman_portal *redirect,
+ int cpu)
{
#ifdef CONFIG_FSL_DPA_PORTAL_SHARE
- struct qman_portal *p = get_raw_affine_portal();
+ struct qman_portal *p;
+ p = &per_cpu(qman_affine_portal, cpu);
/* Check that we don't already have our own portal */
BUG_ON(p->config);
/* Check that we aren't already slaving to another portal */
@@ -583,7 +583,7 @@ struct qman_portal *qman_create_affine_slave(struct qman_portal *redirect)
/* These are the only elements to initialise when redirecting */
p->irq_sources = 0;
p->sharing_redirect = redirect;
- put_affine_portal();
+ affine_portals[cpu] = (uintptr_t)p;
return p;
#else
BUG();
@@ -1090,6 +1090,12 @@ u16 qman_affine_channel(int cpu)
}
EXPORT_SYMBOL(qman_affine_channel);
+uintptr_t qman_get_affine_portal(int cpu)
+{
+ return affine_portals[cpu];
+}
+EXPORT_SYMBOL(qman_affine_portal);
+
int qman_p_poll_dqrr(struct qman_portal *p, unsigned int limit)
{
int ret;
@@ -4826,3 +4832,60 @@ int qman_shutdown_fq(u32 fqid)
PORTAL_IRQ_UNLOCK(p, irqflags);
put_affine_portal();
}
+
+static void qman_portal_update_sdest(const struct qm_portal_config *pcfg,
+ unsigned int cpu)
+{
+ struct iommu_stash_attribute stash_attr;
+ int ret;
+
+ if (!pcfg->iommu_domain) {
+ pr_err(KBUILD_MODNAME ":%s(): iommu_domain_alloc() failed",
+ __func__);
+ goto _no_iommu;
+ }
+
+ stash_attr.cpu = cpu;
+ stash_attr.cache = IOMMU_ATTR_CACHE_L1;
+ stash_attr.window = ~(u32)0;
+ ret = iommu_domain_set_attr(pcfg->iommu_domain, DOMAIN_ATTR_PAMU_STASH,
+ &stash_attr);
+ if (ret < 0) {
+ pr_err(KBUILD_MODNAME ":%s(): iommu_domain_set_attr() = %d",
+ __func__, ret);
+ return;
+ }
+
+_no_iommu:
+#ifdef CONFIG_FSL_QMAN_CONFIG
+ if (qman_set_sdest(pcfg->public_cfg.channel, cpu))
+#endif
+ pr_warn("Failed to update portal's stash request queue\n");
+
+ return;
+}
+
+int qman_portal_is_sharing_redirect(struct qman_portal *portal)
+{
+ return portal->sharing_redirect ? 1 : 0;
+}
+
+/* Migrate the portal to the boot cpu(cpu0) for offline cpu */
+void qman_migrate_portal(struct qman_portal *portal)
+{
+ unsigned long irqflags __maybe_unused;
+ PORTAL_IRQ_LOCK(portal, irqflags);
+ irq_set_affinity(portal->config->public_cfg.irq, cpumask_of(0));
+ qman_portal_update_sdest(portal->config, 0);
+ PORTAL_IRQ_UNLOCK(portal, irqflags);
+}
+
+/* Migrate the portal back to the affined cpu once that cpu appears.*/
+void qman_migrate_portal_back(struct qman_portal *portal, unsigned int cpu)
+{
+ unsigned long irqflags __maybe_unused;
+ PORTAL_IRQ_LOCK(portal, irqflags);
+ qman_portal_update_sdest(portal->config, cpu);
+ irq_set_affinity(portal->config->public_cfg.irq, cpumask_of(cpu));
+ PORTAL_IRQ_UNLOCK(portal, irqflags);
+}
diff --git a/drivers/staging/fsl_qbman/qman_private.h b/drivers/staging/fsl_qbman/qman_private.h
index df96ce5..dafbce1 100644
--- a/drivers/staging/fsl_qbman/qman_private.h
+++ b/drivers/staging/fsl_qbman/qman_private.h
@@ -31,6 +31,7 @@
#include "dpa_sys.h"
#include <linux/fsl_qman.h>
+#include <linux/iommu.h>
#if !defined(CONFIG_FSL_QMAN_FQ_LOOKUP) && defined(CONFIG_PPC64)
#error "_PPC64 requires _FSL_QMAN_FQ_LOOKUP"
@@ -212,7 +213,8 @@ struct qman_portal *qman_create_portal(
struct qman_portal *qman_create_affine_portal(
const struct qm_portal_config *config,
const struct qman_cgrs *cgrs);
-struct qman_portal *qman_create_affine_slave(struct qman_portal *redirect);
+struct qman_portal *qman_create_affine_slave(struct qman_portal *redirect,
+ int cpu);
const struct qm_portal_config *qman_destroy_affine_portal(void);
void qman_destroy_portal(struct qman_portal *qm);
@@ -387,3 +389,9 @@ int qman_ceetm_query_cq(unsigned int cqid, unsigned int dcpid,
int qman_ceetm_query_ccgr(struct qm_mcc_ceetm_ccgr_query *ccgr_query,
struct qm_mcr_ceetm_ccgr_query *response);
int qman_ceetm_get_xsfdr(enum qm_dc_portal portal, unsigned int *num);
+
+/* Portal migration */
+extern uintptr_t affine_portals[NR_CPUS];
+int qman_portal_is_sharing_redirect(struct qman_portal *portal);
+void qman_migrate_portal(struct qman_portal *portal);
+void qman_migrate_portal_back(struct qman_portal *portal, unsigned int cpu);