summaryrefslogtreecommitdiff
path: root/drivers
diff options
context:
space:
mode:
authorRoy Pledge <Roy.Pledge@freescale.com>2013-07-03 18:28:36 (GMT)
committerFleming Andrew-AFLEMING <AFLEMING@freescale.com>2013-07-23 21:04:18 (GMT)
commit0028e6f867816411aac6bbf57740286d12114bcb (patch)
tree73faa7c9c6eebd09e02acbb032c7a7bde19ecb88 /drivers
parent286d9a0debaee7e159808aa41a40c5c863295365 (diff)
downloadlinux-fsl-qoriq-0028e6f867816411aac6bbf57740286d12114bcb.tar.xz
Set SDQCR to ensure frame queues can reach the retired state
In some situations frame queues may still be on a work queue when a retirement command is sent during cleanup. In order to reach the retired state these frame queues must be scheduled. This patch sets an appropriate SDQCR value to ensure that the Frame Queue is scheduled. Since Frame Queues could be 'locked' to a particular portal if the Hold Active feature is enabled the shutrown routine must service all portals to ensure that the FQ reaches a retired state Signed-off-by: Roy Pledge <Roy.Pledge@freescale.com> Change-Id: I9faab0039e4bb7f8c740a271154e7756ce014d98 Reviewed-on: http://git.am.freescale.net:8181/3229 Reviewed-by: Wang Haiying-R54964 <Haiying.Wang@freescale.com> Reviewed-by: Fleming Andrew-AFLEMING <AFLEMING@freescale.com> Tested-by: Fleming Andrew-AFLEMING <AFLEMING@freescale.com>
Diffstat (limited to 'drivers')
-rw-r--r--drivers/staging/fsl_qbman/dpa_alloc.c15
-rw-r--r--drivers/staging/fsl_qbman/fsl_usdpaa.c67
-rw-r--r--drivers/staging/fsl_qbman/qman_driver.c1
-rw-r--r--drivers/staging/fsl_qbman/qman_high.c4
-rw-r--r--drivers/staging/fsl_qbman/qman_low.h134
-rw-r--r--drivers/staging/fsl_qbman/qman_private.h1
6 files changed, 141 insertions, 81 deletions
diff --git a/drivers/staging/fsl_qbman/dpa_alloc.c b/drivers/staging/fsl_qbman/dpa_alloc.c
index 8656ec3..40b1cbf 100644
--- a/drivers/staging/fsl_qbman/dpa_alloc.c
+++ b/drivers/staging/fsl_qbman/dpa_alloc.c
@@ -112,6 +112,11 @@ void bman_seed_bpid_range(u32 bpid, u32 count)
}
EXPORT_SYMBOL(bman_seed_bpid_range);
+int bman_reserve_bpid_range(u32 bpid, u32 count)
+{
+ return dpa_alloc_reserve(&bpalloc, bpid, count);
+}
+EXPORT_SYMBOL(bman_reserve_bpid_range);
/* FQID allocator front-end */
@@ -190,9 +195,7 @@ static int qpool_cleanup(u32 qp)
void qman_release_pool_range(u32 qp, u32 count)
{
u32 total_invalid = release_id_range(&qpalloc, qp,
- count, NULL);
- /* Temporarly disable QMan Pool recovery due to a frequent
- hang in qpool_cleanup() */
+ count, qpool_cleanup);
if (total_invalid) {
/* Pool channels are almost always used individually */
if (count == 1)
@@ -213,6 +216,12 @@ void qman_seed_pool_range(u32 poolid, u32 count)
}
EXPORT_SYMBOL(qman_seed_pool_range);
+int qman_reserve_pool_range(u32 poolid, u32 count)
+{
+ return dpa_alloc_reserve(&qpalloc, poolid, count);
+}
+EXPORT_SYMBOL(qman_reserve_pool_range);
+
/* CGR ID allocator front-end */
diff --git a/drivers/staging/fsl_qbman/fsl_usdpaa.c b/drivers/staging/fsl_qbman/fsl_usdpaa.c
index c19f3f3..f2891ef 100644
--- a/drivers/staging/fsl_qbman/fsl_usdpaa.c
+++ b/drivers/staging/fsl_qbman/fsl_usdpaa.c
@@ -123,12 +123,14 @@ static const struct alloc_backend {
.id_type = usdpaa_id_bpid,
.alloc = bman_alloc_bpid_range,
.release = bman_release_bpid_range,
+ .reserve = bman_reserve_bpid_range,
.acronym = "BPID"
},
{
.id_type = usdpaa_id_qpool,
.alloc = qman_alloc_pool_range,
.release = qman_release_pool_range,
+ .reserve = qman_reserve_pool_range,
.acronym = "QPOOL"
},
{
@@ -313,6 +315,9 @@ static int init_qm_portal(struct qm_portal_config *config,
portal->addr.addr_ce = config->addr_virt[DPA_PORTAL_CE];
portal->addr.addr_ci = config->addr_virt[DPA_PORTAL_CI];
+ /* Make sure interrupts are inhibited */
+ qm_out(IIR, 1);
+
/* Initialize the DQRR. This will stop any dequeue
commands that are in progress */
if (qm_dqrr_init(portal, config, qm_dqrr_dpush, qm_dqrr_pvb,
@@ -322,7 +327,10 @@ static int init_qm_portal(struct qm_portal_config *config,
return 1;
}
/* Consume any items in the dequeue ring */
- qm_dqrr_cdc_consume_n(portal, 0xffff);
+ while (qm_dqrr_cdc_cci(portal) != qm_dqrr_cursor(portal)) {
+ qm_dqrr_cdc_consume_n(portal, 0xffff);
+ qm_dqrr_cdc_cce_prefetch(portal);
+ }
/* Initialize the EQCR */
if (qm_eqcr_init(portal, qm_eqcr_pvb, qm_eqcr_cce)) {
@@ -370,8 +378,7 @@ static int init_bm_portal(struct bm_portal_config *config,
be torn down. If the check_channel helper returns true the FQ will be
transitioned to the OOS state */
static int qm_check_and_destroy_fqs(struct qm_portal *portal, void *ctx,
- bool (*check_channel)
- (void *ctx, u32 channel))
+ bool (*check_channel)(void*, u32))
{
u32 fq_id = 0;
while (1) {
@@ -406,7 +413,7 @@ static int qm_check_and_destroy_fqs(struct qm_portal *portal, void *ctx,
goto next;
if (check_channel(ctx, channel))
- qm_shutdown_fq(portal, fq_id);
+ qm_shutdown_fq(&portal, 1, fq_id);
next:
++fq_id;
}
@@ -439,6 +446,17 @@ static bool check_channel_device(void *_ctx, u32 channel)
return false;
}
+static bool check_portal_channel(void *ctx, u32 channel)
+{
+ u32 portal_channel = *(u32 *)ctx;
+ if (portal_channel == channel) {
+ /* This FQs destination is a portal
+ we're cleaning, send a retire */
+ return true;
+ }
+ return false;
+}
+
static int usdpaa_release(struct inode *inode, struct file *filp)
{
struct ctx *ctx = filp->private_data;
@@ -451,6 +469,9 @@ static int usdpaa_release(struct inode *inode, struct file *filp)
struct qm_portal_config *qm_alloced_portal = NULL;
struct bm_portal_config *bm_alloced_portal = NULL;
+ struct qm_portal *portal_array[qman_portal_max];
+ int portal_count = 0;
+
/* The following logic is used to recover resources that were not
correctly released by the process that is closing the FD.
Step 1: syncronize the HW with the qm_portal/bm_portal structures
@@ -460,10 +481,19 @@ static int usdpaa_release(struct inode *inode, struct file *filp)
list_for_each_entry_safe(portal, tmpportal, &ctx->portals, list) {
/* Try to recover any portals that weren't shut down */
if (portal->user.type == usdpaa_portal_qman) {
+ portal_array[portal_count] = &portal->qman_portal_low;
+ ++portal_count;
init_qm_portal(portal->qportal,
&portal->qman_portal_low);
- if (!qm_cleanup_portal)
+ if (!qm_cleanup_portal) {
qm_cleanup_portal = &portal->qman_portal_low;
+ } else {
+ /* Clean FQs on the dedicated channel */
+ u32 chan = portal->qportal->public_cfg.channel;
+ qm_check_and_destroy_fqs(
+ &portal->qman_portal_low, &chan,
+ check_portal_channel);
+ }
} else {
/* BMAN */
init_bm_portal(portal->bportal,
@@ -506,6 +536,15 @@ static int usdpaa_release(struct inode *inode, struct file *filp)
int leaks = 0;
list_for_each_entry(res, &ctx->resources[backend->id_type],
list) {
+ if (backend->id_type == usdpaa_id_fqid) {
+ int i = 0;
+ for (; i < res->num; i++) {
+ /* Clean FQs with the cleanup portal */
+ qm_shutdown_fq(portal_array,
+ portal_count,
+ res->id + i);
+ }
+ }
leaks += res->num;
backend->release(res->id, res->num);
}
@@ -615,7 +654,7 @@ static int check_mmap_portal(struct ctx *ctx, struct vm_area_struct *vma,
static int usdpaa_mmap(struct file *filp, struct vm_area_struct *vma)
{
struct ctx *ctx = filp->private_data;
- unsigned long pfn;
+ unsigned long pfn = 0;
int match, ret;
spin_lock(&mem_lock);
@@ -733,6 +772,8 @@ static long ioctl_id_release(struct ctx *ctx, void __user *arg)
}
/* Failed to find the resource */
spin_unlock(&ctx->lock);
+ pr_err("Couldn't find resource type %d base 0x%x num %d\n",
+ i.id_type, i.base, i.num);
return -EINVAL;
found:
/* Release the resource to the backend */
@@ -1093,17 +1134,6 @@ err_copy_from_user:
return ret;
}
-static bool check_portal_channel(void *ctx, u32 channel)
-{
- u32 portal_channel = *(u32 *)ctx;
- if (portal_channel == channel) {
- /* This FQs destination is a portal
- we're cleaning, send a retire */
- return true;
- }
- return false;
-}
-
static long ioctl_portal_unmap(struct ctx *ctx, struct usdpaa_portal_map *i)
{
struct portal_mapping *mapping;
@@ -1142,7 +1172,8 @@ found:
/* Tear down any FQs this portal is referencing */
channel = mapping->qportal->public_cfg.channel;
- qm_check_and_destroy_fqs(&mapping->qman_portal_low, &channel,
+ qm_check_and_destroy_fqs(&mapping->qman_portal_low,
+ &channel,
check_portal_channel);
qm_put_unused_portal(mapping->qportal);
} else if (mapping->user.type == usdpaa_portal_bman) {
diff --git a/drivers/staging/fsl_qbman/qman_driver.c b/drivers/staging/fsl_qbman/qman_driver.c
index c40e24a..683a442 100644
--- a/drivers/staging/fsl_qbman/qman_driver.c
+++ b/drivers/staging/fsl_qbman/qman_driver.c
@@ -45,6 +45,7 @@ EXPORT_SYMBOL(qm_channel_caam);
u16 qm_channel_pme = QMAN_CHANNEL_PME;
EXPORT_SYMBOL(qm_channel_pme);
u16 qman_portal_max;
+EXPORT_SYMBOL(qman_portal_max);
u32 qman_clk;
struct qm_ceetm qman_ceetms[QMAN_CEETM_MAX];
diff --git a/drivers/staging/fsl_qbman/qman_high.c b/drivers/staging/fsl_qbman/qman_high.c
index 5fcd709..85cdcf1 100644
--- a/drivers/staging/fsl_qbman/qman_high.c
+++ b/drivers/staging/fsl_qbman/qman_high.c
@@ -4511,9 +4511,11 @@ int qman_shutdown_fq(u32 fqid)
struct qman_portal *p;
unsigned long irqflags __maybe_unused;
int ret;
+ struct qm_portal *low_p;
p = get_affine_portal();
PORTAL_IRQ_LOCK(p, irqflags);
- ret = qm_shutdown_fq(&p->p, fqid);
+ low_p = &p->p;
+ ret = qm_shutdown_fq(&low_p, 1, fqid);
PORTAL_IRQ_UNLOCK(p, irqflags);
put_affine_portal();
return ret;
diff --git a/drivers/staging/fsl_qbman/qman_low.h b/drivers/staging/fsl_qbman/qman_low.h
index 1205ac7..d63c722 100644
--- a/drivers/staging/fsl_qbman/qman_low.h
+++ b/drivers/staging/fsl_qbman/qman_low.h
@@ -1175,21 +1175,22 @@ static inline void __qm_isr_write(struct qm_portal *portal, enum qm_isr_reg n,
}
/* Cleanup FQs */
-static inline int qm_shutdown_fq(struct qm_portal *portal, u32 fqid)
+static inline int qm_shutdown_fq(struct qm_portal **portal, int portal_count,
+ u32 fqid)
{
struct qm_mc_command *mcc;
struct qm_mc_result *mcr;
u8 state;
- int orl_empty, fq_empty, count, drain = 0;
+ int orl_empty, fq_empty, i, drain = 0;
u32 result;
u32 channel, wq;
/* Determine the state of the FQID */
- mcc = qm_mc_start(portal);
+ mcc = qm_mc_start(portal[0]);
mcc->queryfq_np.fqid = fqid;
- qm_mc_commit(portal, QM_MCC_VERB_QUERYFQ_NP);
- while (!(mcr = qm_mc_result(portal)))
+ qm_mc_commit(portal[0], QM_MCC_VERB_QUERYFQ_NP);
+ while (!(mcr = qm_mc_result(portal[0])))
cpu_relax();
DPA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCR_VERB_QUERYFQ_NP);
state = mcr->queryfq_np.state & QM_MCR_NP_STATE_MASK;
@@ -1197,10 +1198,10 @@ static inline int qm_shutdown_fq(struct qm_portal *portal, u32 fqid)
return 0; /* Already OOS, no need to do anymore checks */
/* Query which channel the FQ is using */
- mcc = qm_mc_start(portal);
+ mcc = qm_mc_start(portal[0]);
mcc->queryfq.fqid = fqid;
- qm_mc_commit(portal, QM_MCC_VERB_QUERYFQ);
- while (!(mcr = qm_mc_result(portal)))
+ qm_mc_commit(portal[0], QM_MCC_VERB_QUERYFQ);
+ while (!(mcr = qm_mc_result(portal[0])))
cpu_relax();
DPA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCR_VERB_QUERYFQ);
@@ -1214,10 +1215,10 @@ static inline int qm_shutdown_fq(struct qm_portal *portal, u32 fqid)
case QM_MCR_NP_STATE_ACTIVE:
case QM_MCR_NP_STATE_PARKED:
orl_empty = 0;
- mcc = qm_mc_start(portal);
+ mcc = qm_mc_start(portal[0]);
mcc->alterfq.fqid = fqid;
- qm_mc_commit(portal, QM_MCC_VERB_ALTER_RETIRE);
- while (!(mcr = qm_mc_result(portal)))
+ qm_mc_commit(portal[0], QM_MCC_VERB_ALTER_RETIRE);
+ while (!(mcr = qm_mc_result(portal[0])))
cpu_relax();
DPA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) ==
QM_MCR_VERB_ALTER_RETIRE);
@@ -1250,34 +1251,47 @@ static inline int qm_shutdown_fq(struct qm_portal *portal, u32 fqid)
fqid, channel);
return -EBUSY;
}
-
+ /* Set the sdqcr to drain this channel */
+ if (channel < qm_channel_pool1)
+ for (i = 0; i < portal_count; i++)
+ qm_dqrr_sdqcr_set(portal[i],
+ QM_SDQCR_TYPE_ACTIVE |
+ QM_SDQCR_CHANNELS_DEDICATED);
+ else
+ for (i = 0; i < portal_count; i++)
+ qm_dqrr_sdqcr_set(portal[i],
+ QM_SDQCR_TYPE_ACTIVE |
+ QM_SDQCR_CHANNELS_POOL_CONV
+ (channel));
while (!found_fqrn) {
/* Keep draining DQRR while checking the MR*/
- qm_dqrr_sdqcr_set(portal,
- 0x41000000 | dequeue_wq);
- qm_dqrr_pvb_update(portal);
- dqrr = qm_dqrr_current(portal);
- while (dqrr) {
- qm_dqrr_cdc_consume_1ptr(portal,
- dqrr, 0);
- qm_dqrr_pvb_update(portal);
- qm_dqrr_next(portal);
- dqrr = qm_dqrr_current(portal);
+ for (i = 0; i < portal_count; i++) {
+ qm_dqrr_pvb_update(portal[i]);
+ dqrr = qm_dqrr_current(portal[i]);
+ while (dqrr) {
+ qm_dqrr_cdc_consume_1ptr(
+ portal[i], dqrr, 0);
+ qm_dqrr_pvb_update(portal[i]);
+ qm_dqrr_next(portal[i]);
+ dqrr = qm_dqrr_current(
+ portal[i]);
+ }
+ /* Process message ring too */
+ qm_mr_pvb_update(portal[i]);
+ msg = qm_mr_current(portal[i]);
+ while (msg) {
+ if ((msg->verb &
+ QM_MR_VERB_TYPE_MASK)
+ == QM_MR_VERB_FQRN)
+ found_fqrn = 1;
+ qm_mr_next(portal[i]);
+ qm_mr_cci_consume_to_current(
+ portal[i]);
+ qm_mr_pvb_update(portal[i]);
+ msg = qm_mr_current(portal[i]);
+ }
+ cpu_relax();
}
-
- /* Process message ring too */
- qm_mr_pvb_update(portal);
- msg = qm_mr_current(portal);
- while (msg) {
- if ((msg->verb & QM_MR_VERB_TYPE_MASK)
- == QM_MR_VERB_FQRN)
- found_fqrn = 1;
- qm_mr_next(portal);
- qm_mr_cci_consume_to_current(portal);
- qm_mr_pvb_update(portal);
- msg = qm_mr_current(portal);
- }
- cpu_relax();
}
}
if (result != QM_MCR_RESULT_OK &&
@@ -1300,55 +1314,57 @@ static inline int qm_shutdown_fq(struct qm_portal *portal, u32 fqid)
do {
const struct qm_dqrr_entry *dqrr = NULL;
u32 vdqcr = fqid | QM_VDQCR_NUMFRAMES_SET(3);
- qm_dqrr_vdqcr_set(portal, vdqcr);
+ qm_dqrr_vdqcr_set(portal[0], vdqcr);
/* Wait for a dequeue to occur */
while (dqrr == NULL) {
- qm_dqrr_pvb_update(portal);
- dqrr = qm_dqrr_current(portal);
+ qm_dqrr_pvb_update(portal[0]);
+ dqrr = qm_dqrr_current(portal[0]);
if (!dqrr)
cpu_relax();
}
/* Process the dequeues, making sure to
empty the ring completely */
while (dqrr) {
- if (dqrr->stat & QM_DQRR_STAT_FQ_EMPTY)
+ if (dqrr->fqid == fqid &&
+ dqrr->stat & QM_DQRR_STAT_FQ_EMPTY)
fq_empty = 1;
- qm_dqrr_cdc_consume_1ptr(portal,
+ qm_dqrr_cdc_consume_1ptr(portal[0],
dqrr, 0);
- qm_dqrr_pvb_update(portal);
- qm_dqrr_next(portal);
- dqrr = qm_dqrr_current(portal);
+ qm_dqrr_pvb_update(portal[0]);
+ qm_dqrr_next(portal[0]);
+ dqrr = qm_dqrr_current(portal[0]);
}
} while (fq_empty == 0);
}
+ for (i = 0; i < portal_count; i++)
+ qm_dqrr_sdqcr_set(portal[i], 0);
+
/* Wait for the ORL to have been completely drained */
- count = 0;
while (orl_empty == 0) {
const struct qm_mr_entry *msg;
- qm_mr_pvb_update(portal);
- msg = qm_mr_current(portal);
+ qm_mr_pvb_update(portal[0]);
+ msg = qm_mr_current(portal[0]);
while (msg) {
- ++count;
if ((msg->verb & QM_MR_VERB_TYPE_MASK) ==
QM_MR_VERB_FQRL)
orl_empty = 1;
- qm_mr_next(portal);
- qm_mr_cci_consume_to_current(portal);
- qm_mr_pvb_update(portal);
- msg = qm_mr_current(portal);
+ qm_mr_next(portal[0]);
+ qm_mr_cci_consume_to_current(portal[0]);
+ qm_mr_pvb_update(portal[0]);
+ msg = qm_mr_current(portal[0]);
}
cpu_relax();
}
- mcc = qm_mc_start(portal);
+ mcc = qm_mc_start(portal[0]);
mcc->alterfq.fqid = fqid;
- qm_mc_commit(portal, QM_MCC_VERB_ALTER_OOS);
- while (!(mcr = qm_mc_result(portal)))
+ qm_mc_commit(portal[0], QM_MCC_VERB_ALTER_OOS);
+ while (!(mcr = qm_mc_result(portal[0])))
cpu_relax();
DPA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) ==
QM_MCR_VERB_ALTER_OOS);
if (mcr->result != QM_MCR_RESULT_OK) {
- pr_err("OOS Failed on FQID 0x%x, result 0x%x\n",
+ pr_err("OOS after drain Failed on FQID 0x%x, result 0x%x\n",
fqid, mcr->result);
return -1;
}
@@ -1356,10 +1372,10 @@ static inline int qm_shutdown_fq(struct qm_portal *portal, u32 fqid)
break;
case QM_MCR_NP_STATE_RETIRED:
/* Send OOS Command */
- mcc = qm_mc_start(portal);
+ mcc = qm_mc_start(portal[0]);
mcc->alterfq.fqid = fqid;
- qm_mc_commit(portal, QM_MCC_VERB_ALTER_OOS);
- while (!(mcr = qm_mc_result(portal)))
+ qm_mc_commit(portal[0], QM_MCC_VERB_ALTER_OOS);
+ while (!(mcr = qm_mc_result(portal[0])))
cpu_relax();
DPA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) ==
QM_MCR_VERB_ALTER_OOS);
diff --git a/drivers/staging/fsl_qbman/qman_private.h b/drivers/staging/fsl_qbman/qman_private.h
index 6738b02..7f35dcf 100644
--- a/drivers/staging/fsl_qbman/qman_private.h
+++ b/drivers/staging/fsl_qbman/qman_private.h
@@ -192,6 +192,7 @@ struct qm_portal_config {
#define QMAN_REV31 0x0301
extern u16 qman_ip_rev; /* 0 if uninitialised, otherwise QMAN_REVx */
extern u32 qman_clk;
+extern u16 qman_portal_max;
#ifdef CONFIG_FSL_QMAN_CONFIG
/* Hooks from qman_driver.c to qman_config.c */