summaryrefslogtreecommitdiff
path: root/drivers
diff options
context:
space:
mode:
authorHaiying Wang <Haiying.Wang@freescale.com>2013-10-03 15:38:53 (GMT)
committerJ. German Rivera <German.Rivera@freescale.com>2013-10-12 00:31:04 (GMT)
commit9819511160678c246f1825693b2156cbda06950c (patch)
tree0e296940047a50c868a6d3148b2757678b138ca3 /drivers
parent98672ff00ffd786e06d1205d47a271f11fa8ed5c (diff)
downloadlinux-fsl-qoriq-9819511160678c246f1825693b2156cbda06950c.tar.xz
fsl_qman: extend some QMan APIs with portal parameter
Some APIs might be called in a migration situation(e.g. cpu hotplug), and they may need to continue accessing the portal that execution was affine to prior to migration, we exend those APIs with qman_p_***. Signed-off-by: Haiying Wang <Haiying.Wang@freescale.com> Change-Id: I9b1422d1dfbd59561e3bf072af5c009047e4f47b Reviewed-on: http://git.am.freescale.net:8181/5451 Tested-by: Review Code-CDREVIEW <CDREVIEW@freescale.com> Reviewed-by: Thorpe Geoff-R01361 <Geoff.Thorpe@freescale.com> Reviewed-by: Rivera Jose-B46482 <German.Rivera@freescale.com>
Diffstat (limited to 'drivers')
-rw-r--r--drivers/staging/fsl_qbman/qman_high.c402
1 files changed, 345 insertions, 57 deletions
diff --git a/drivers/staging/fsl_qbman/qman_high.c b/drivers/staging/fsl_qbman/qman_high.c
index 8c019e7..59e9a8a 100644
--- a/drivers/staging/fsl_qbman/qman_high.c
+++ b/drivers/staging/fsl_qbman/qman_high.c
@@ -154,6 +154,7 @@ static cpumask_t affine_mask;
static DEFINE_SPINLOCK(affine_mask_lock);
static u16 affine_channels[NR_CPUS];
static DEFINE_PER_CPU(struct qman_portal, qman_affine_portal);
+
/* "raw" gets the cpu-local struct whether it's a redirect or not. */
static inline struct qman_portal *get_raw_affine_portal(void)
{
@@ -544,8 +545,6 @@ fail_eqcr:
return NULL;
}
-
-
struct qman_portal *qman_create_affine_portal(
const struct qm_portal_config *config,
const struct qman_cgrs *cgrs)
@@ -569,7 +568,6 @@ struct qman_portal *qman_create_affine_portal(
return res;
}
-
/* These checks are BUG_ON()s because the driver is already supposed to avoid
* these cases. */
struct qman_portal *qman_create_affine_slave(struct qman_portal *redirect)
@@ -593,8 +591,6 @@ struct qman_portal *qman_create_affine_slave(struct qman_portal *redirect)
#endif
}
-
-
void qman_destroy_portal(struct qman_portal *qm)
{
const struct qm_portal_config *pcfg;
@@ -660,10 +656,16 @@ const struct qm_portal_config *qman_destroy_affine_portal(void)
return pcfg;
}
+const struct qman_portal_config *qman_p_get_portal_config(struct qman_portal *p)
+{
+ return &p->config->public_cfg;
+}
+EXPORT_SYMBOL(qman_p_get_portal_config);
+
const struct qman_portal_config *qman_get_portal_config(void)
{
struct qman_portal *p = get_affine_portal();
- const struct qman_portal_config *ret = &p->config->public_cfg;
+ const struct qman_portal_config *ret = qman_p_get_portal_config(p);
put_affine_portal();
return ret;
}
@@ -998,30 +1000,36 @@ u32 qman_irqsource_get(void)
}
EXPORT_SYMBOL(qman_irqsource_get);
-int qman_irqsource_add(u32 bits __maybe_unused)
+int qman_p_irqsource_add(struct qman_portal *p, u32 bits __maybe_unused)
{
- struct qman_portal *p = get_raw_affine_portal();
- int ret = 0;
+ __maybe_unused unsigned long irqflags;
#ifdef CONFIG_FSL_DPA_PORTAL_SHARE
if (p->sharing_redirect)
- ret = -EINVAL;
+ return -EINVAL;
else
#endif
{
- __maybe_unused unsigned long irqflags;
PORTAL_IRQ_LOCK(p, irqflags);
set_bits(bits & QM_PIRQ_VISIBLE, &p->irq_sources);
qm_isr_enable_write(&p->p, p->irq_sources);
PORTAL_IRQ_UNLOCK(p, irqflags);
}
+ return 0;
+}
+EXPORT_SYMBOL(qman_p_irqsource_add);
+
+int qman_irqsource_add(u32 bits __maybe_unused)
+{
+ struct qman_portal *p = get_raw_affine_portal();
+ int ret;
+ ret = qman_p_irqsource_add(p, bits);
put_affine_portal();
return ret;
}
EXPORT_SYMBOL(qman_irqsource_add);
-int qman_irqsource_remove(u32 bits)
+int qman_p_irqsource_remove(struct qman_portal *p, u32 bits)
{
- struct qman_portal *p = get_raw_affine_portal();
__maybe_unused unsigned long irqflags;
u32 ier;
#ifdef CONFIG_FSL_DPA_PORTAL_SHARE
@@ -1047,9 +1055,18 @@ int qman_irqsource_remove(u32 bits)
* data-dependency, ie. to protect against re-ordering. */
qm_isr_status_clear(&p->p, ~ier);
PORTAL_IRQ_UNLOCK(p, irqflags);
- put_affine_portal();
return 0;
}
+EXPORT_SYMBOL(qman_p_irqsource_remove);
+
+int qman_irqsource_remove(u32 bits)
+{
+ struct qman_portal *p = get_raw_affine_portal();
+ int ret;
+ ret = qman_p_irqsource_remove(p, bits);
+ put_affine_portal();
+ return ret;
+}
EXPORT_SYMBOL(qman_irqsource_remove);
const cpumask_t *qman_affine_cpus(void)
@@ -1073,10 +1090,10 @@ u16 qman_affine_channel(int cpu)
}
EXPORT_SYMBOL(qman_affine_channel);
-int qman_poll_dqrr(unsigned int limit)
+int qman_p_poll_dqrr(struct qman_portal *p, unsigned int limit)
{
- struct qman_portal *p = get_poll_portal();
int ret;
+
#ifdef CONFIG_FSL_DPA_PORTAL_SHARE
if (unlikely(p->sharing_redirect))
ret = -EINVAL;
@@ -1086,14 +1103,22 @@ int qman_poll_dqrr(unsigned int limit)
BUG_ON(p->irq_sources & QM_PIRQ_DQRI);
ret = __poll_portal_fast(p, limit);
}
+ return ret;
+}
+EXPORT_SYMBOL(qman_p_poll_dqrr);
+
+int qman_poll_dqrr(unsigned int limit)
+{
+ struct qman_portal *p = get_poll_portal();
+ int ret;
+ ret = qman_p_poll_dqrr(p, limit);
put_poll_portal();
return ret;
}
EXPORT_SYMBOL(qman_poll_dqrr);
-u32 qman_poll_slow(void)
+u32 qman_p_poll_slow(struct qman_portal *p)
{
- struct qman_portal *p = get_poll_portal();
u32 ret;
#ifdef CONFIG_FSL_DPA_PORTAL_SHARE
if (unlikely(p->sharing_redirect))
@@ -1105,18 +1130,26 @@ u32 qman_poll_slow(void)
ret = __poll_portal_slow(p, is);
qm_isr_status_clear(&p->p, ret);
}
+ return ret;
+}
+EXPORT_SYMBOL(qman_p_poll_slow);
+
+u32 qman_poll_slow(void)
+{
+ struct qman_portal *p = get_poll_portal();
+ u32 ret;
+ ret = qman_p_poll_slow(p);
put_poll_portal();
return ret;
}
EXPORT_SYMBOL(qman_poll_slow);
/* Legacy wrapper */
-void qman_poll(void)
+void qman_p_poll(struct qman_portal *p)
{
- struct qman_portal *p = get_poll_portal();
#ifdef CONFIG_FSL_DPA_PORTAL_SHARE
if (unlikely(p->sharing_redirect))
- goto done;
+ return;
#endif
if ((~p->irq_sources) & QM_PIRQ_SLOW) {
if (!(p->slowpoll--)) {
@@ -1131,73 +1164,114 @@ void qman_poll(void)
}
if ((~p->irq_sources) & QM_PIRQ_DQRI)
__poll_portal_fast(p, CONFIG_FSL_QMAN_POLL_LIMIT);
-#ifdef CONFIG_FSL_DPA_PORTAL_SHARE
-done:
-#endif
+}
+EXPORT_SYMBOL(qman_p_poll);
+
+void qman_poll(void)
+{
+ struct qman_portal *p = get_poll_portal();
+ qman_p_poll(p);
put_poll_portal();
}
EXPORT_SYMBOL(qman_poll);
+void qman_p_stop_dequeues(struct qman_portal *p)
+{
+ qman_stop_dequeues_ex(p);
+}
+EXPORT_SYMBOL(qman_p_stop_dequeues);
+
void qman_stop_dequeues(void)
{
struct qman_portal *p = get_affine_portal();
- qman_stop_dequeues_ex(p);
+ qman_p_stop_dequeues(p);
put_affine_portal();
}
EXPORT_SYMBOL(qman_stop_dequeues);
-void qman_start_dequeues(void)
+void qman_p_start_dequeues(struct qman_portal *p)
{
- struct qman_portal *p = get_affine_portal();
unsigned long irqflags __maybe_unused;
PORTAL_IRQ_LOCK(p, irqflags);
DPA_ASSERT(p->dqrr_disable_ref > 0);
if (!(--p->dqrr_disable_ref))
qm_dqrr_set_maxfill(&p->p, DQRR_MAXFILL);
PORTAL_IRQ_UNLOCK(p, irqflags);
+}
+EXPORT_SYMBOL(qman_p_start_dequeues);
+
+void qman_start_dequeues(void)
+{
+ struct qman_portal *p = get_affine_portal();
+ qman_p_start_dequeues(p);
put_affine_portal();
}
EXPORT_SYMBOL(qman_start_dequeues);
-void qman_static_dequeue_add(u32 pools)
+void qman_p_static_dequeue_add(struct qman_portal *p, u32 pools)
{
unsigned long irqflags __maybe_unused;
- struct qman_portal *p = get_affine_portal();
PORTAL_IRQ_LOCK(p, irqflags);
pools &= p->config->public_cfg.pools;
p->sdqcr |= pools;
qm_dqrr_sdqcr_set(&p->p, p->sdqcr);
PORTAL_IRQ_UNLOCK(p, irqflags);
+}
+EXPORT_SYMBOL(qman_p_static_dequeue_add);
+
+void qman_static_dequeue_add(u32 pools)
+{
+ struct qman_portal *p = get_affine_portal();
+ qman_p_static_dequeue_add(p, pools);
put_affine_portal();
}
EXPORT_SYMBOL(qman_static_dequeue_add);
-void qman_static_dequeue_del(u32 pools)
+void qman_p_static_dequeue_del(struct qman_portal *p, u32 pools)
{
- struct qman_portal *p = get_affine_portal();
unsigned long irqflags __maybe_unused;
PORTAL_IRQ_LOCK(p, irqflags);
pools &= p->config->public_cfg.pools;
p->sdqcr &= ~pools;
qm_dqrr_sdqcr_set(&p->p, p->sdqcr);
PORTAL_IRQ_UNLOCK(p, irqflags);
+}
+EXPORT_SYMBOL(qman_p_static_dequeue_del);
+
+void qman_static_dequeue_del(u32 pools)
+{
+ struct qman_portal *p = get_affine_portal();
+ qman_p_static_dequeue_del(p, pools);
put_affine_portal();
}
EXPORT_SYMBOL(qman_static_dequeue_del);
+u32 qman_p_static_dequeue_get(struct qman_portal *p)
+{
+ return p->sdqcr;
+}
+EXPORT_SYMBOL(qman_p_static_dequeue_get);
+
u32 qman_static_dequeue_get(void)
{
struct qman_portal *p = get_affine_portal();
- u32 ret = p->sdqcr;
+ u32 ret = qman_p_static_dequeue_get(p);
put_affine_portal();
return ret;
}
EXPORT_SYMBOL(qman_static_dequeue_get);
+void qman_p_dca(struct qman_portal *p, struct qm_dqrr_entry *dq,
+ int park_request)
+{
+ qm_dqrr_cdc_consume_1ptr(&p->p, dq, park_request);
+}
+EXPORT_SYMBOL(qman_p_dca);
+
void qman_dca(struct qm_dqrr_entry *dq, int park_request)
{
struct qman_portal *p = get_affine_portal();
- qm_dqrr_cdc_consume_1ptr(&p->p, dq, park_request);
+ qman_p_dca(p, dq, park_request);
put_affine_portal();
}
EXPORT_SYMBOL(qman_dca);
@@ -1849,30 +1923,49 @@ int qman_query_congestion(struct qm_mcr_querycongestion *congestion)
EXPORT_SYMBOL(qman_query_congestion);
/* internal function used as a wait_event() expression */
-static int set_vdqcr(struct qman_portal **p, struct qman_fq *fq, u32 vdqcr)
+static int set_p_vdqcr(struct qman_portal *p, struct qman_fq *fq, u32 vdqcr)
{
unsigned long irqflags __maybe_unused;
int ret = -EBUSY;
- *p = get_affine_portal();
- PORTAL_IRQ_LOCK(*p, irqflags);
- if (!(*p)->vdqcr_owned) {
+ PORTAL_IRQ_LOCK(p, irqflags);
+ if (!p->vdqcr_owned) {
FQLOCK(fq);
if (fq_isset(fq, QMAN_FQ_STATE_VDQCR))
goto escape;
fq_set(fq, QMAN_FQ_STATE_VDQCR);
FQUNLOCK(fq);
- (*p)->vdqcr_owned = fq;
+ p->vdqcr_owned = fq;
ret = 0;
}
escape:
- PORTAL_IRQ_UNLOCK(*p, irqflags);
+ PORTAL_IRQ_UNLOCK(p, irqflags);
if (!ret)
- qm_dqrr_vdqcr_set(&(*p)->p, vdqcr);
+ qm_dqrr_vdqcr_set(&p->p, vdqcr);
+ return ret;
+}
+
+static int set_vdqcr(struct qman_portal **p, struct qman_fq *fq, u32 vdqcr)
+{
+ int ret;
+ *p = get_affine_portal();
+ ret = set_p_vdqcr(*p, fq, vdqcr);
put_affine_portal();
return ret;
}
#ifdef CONFIG_FSL_DPA_CAN_WAIT
+static int wait_p_vdqcr_start(struct qman_portal *p, struct qman_fq *fq,
+ u32 vdqcr, u32 flags)
+{
+ int ret = 0;
+ if (flags & QMAN_VOLATILE_FLAG_WAIT_INT)
+ ret = wait_event_interruptible(affine_queue,
+ !(ret = set_p_vdqcr(p, fq, vdqcr)));
+ else
+ wait_event(affine_queue, !(ret = set_p_vdqcr(p, fq, vdqcr)));
+ return ret;
+}
+
static int wait_vdqcr_start(struct qman_portal **p, struct qman_fq *fq,
u32 vdqcr, u32 flags)
{
@@ -1886,6 +1979,46 @@ static int wait_vdqcr_start(struct qman_portal **p, struct qman_fq *fq,
}
#endif
+int qman_p_volatile_dequeue(struct qman_portal *p, struct qman_fq *fq,
+ u32 flags __maybe_unused, u32 vdqcr)
+{
+ int ret;
+
+ if ((fq->state != qman_fq_state_parked) &&
+ (fq->state != qman_fq_state_retired))
+ return -EINVAL;
+ if (vdqcr & QM_VDQCR_FQID_MASK)
+ return -EINVAL;
+ if (fq_isset(fq, QMAN_FQ_STATE_VDQCR))
+ return -EBUSY;
+ vdqcr = (vdqcr & ~QM_VDQCR_FQID_MASK) | fq->fqid;
+#ifdef CONFIG_FSL_DPA_CAN_WAIT
+ if (flags & QMAN_VOLATILE_FLAG_WAIT)
+ ret = wait_p_vdqcr_start(p, fq, vdqcr, flags);
+ else
+#endif
+ ret = set_p_vdqcr(p, fq, vdqcr);
+ if (ret)
+ return ret;
+ /* VDQCR is set */
+#ifdef CONFIG_FSL_DPA_CAN_WAIT
+ if (flags & QMAN_VOLATILE_FLAG_FINISH) {
+ if (flags & QMAN_VOLATILE_FLAG_WAIT_INT)
+ /* NB: don't propagate any error - the caller wouldn't
+ * know whether the VDQCR was issued or not. A signal
+ * could arrive after returning anyway, so the caller
+ * can check signal_pending() if that's an issue. */
+ wait_event_interruptible(affine_queue,
+ !fq_isset(fq, QMAN_FQ_STATE_VDQCR));
+ else
+ wait_event(affine_queue,
+ !fq_isset(fq, QMAN_FQ_STATE_VDQCR));
+ }
+#endif
+ return 0;
+}
+EXPORT_SYMBOL(qman_p_volatile_dequeue);
+
int qman_volatile_dequeue(struct qman_fq *fq, u32 flags __maybe_unused,
u32 vdqcr)
{
@@ -1964,7 +2097,7 @@ void qman_set_dc_ern(qman_cb_dc_ern handler, int affine)
}
EXPORT_SYMBOL(qman_set_dc_ern);
-static inline struct qm_eqcr_entry *try_eq_start(struct qman_portal **p,
+static inline struct qm_eqcr_entry *try_p_eq_start(struct qman_portal *p,
unsigned long *irqflags __maybe_unused,
struct qman_fq *fq,
const struct qm_fd *fd,
@@ -1972,45 +2105,41 @@ static inline struct qm_eqcr_entry *try_eq_start(struct qman_portal **p,
{
struct qm_eqcr_entry *eq;
u8 avail;
-
- *p = get_affine_portal();
- PORTAL_IRQ_LOCK(*p, (*irqflags));
+ PORTAL_IRQ_LOCK(p, (*irqflags));
#ifdef CONFIG_FSL_DPA_CAN_WAIT_SYNC
if (unlikely((flags & QMAN_ENQUEUE_FLAG_WAIT) &&
(flags & QMAN_ENQUEUE_FLAG_WAIT_SYNC))) {
- if ((*p)->eqci_owned) {
- PORTAL_IRQ_UNLOCK(*p, (*irqflags));
- put_affine_portal();
+ if (p->eqci_owned) {
+ PORTAL_IRQ_UNLOCK(p, (*irqflags));
return NULL;
}
- (*p)->eqci_owned = fq;
+ p->eqci_owned = fq;
}
#endif
- if ((*p)->use_eqcr_ci_stashing) {
+ if (p->use_eqcr_ci_stashing) {
/*
* The stashing case is easy, only update if we need to in
* order to try and liberate ring entries.
*/
- eq = qm_eqcr_start_stash(&(*p)->p);
+ eq = qm_eqcr_start_stash(&p->p);
} else {
/*
* The non-stashing case is harder, need to prefetch ahead of
* time.
*/
- avail = qm_eqcr_get_avail(&(*p)->p);
+ avail = qm_eqcr_get_avail(&p->p);
if (avail < 2)
- update_eqcr_ci(*p, avail);
- eq = qm_eqcr_start_no_stash(&(*p)->p);
+ update_eqcr_ci(p, avail);
+ eq = qm_eqcr_start_no_stash(&p->p);
}
if (unlikely(!eq)) {
#ifdef CONFIG_FSL_DPA_CAN_WAIT_SYNC
if (unlikely((flags & QMAN_ENQUEUE_FLAG_WAIT) &&
(flags & QMAN_ENQUEUE_FLAG_WAIT_SYNC)))
- (*p)->eqci_owned = NULL;
+ p->eqci_owned = NULL;
#endif
- PORTAL_IRQ_UNLOCK(*p, (*irqflags));
- put_affine_portal();
+ PORTAL_IRQ_UNLOCK(p, (*irqflags));
return NULL;
}
if (flags & QMAN_ENQUEUE_FLAG_DCA)
@@ -2028,6 +2157,20 @@ static inline struct qm_eqcr_entry *try_eq_start(struct qman_portal **p,
return eq;
}
+static inline struct qm_eqcr_entry *try_eq_start(struct qman_portal **p,
+ unsigned long *irqflags __maybe_unused,
+ struct qman_fq *fq,
+ const struct qm_fd *fd,
+ u32 flags)
+{
+ struct qm_eqcr_entry *eq;
+ *p = get_affine_portal();
+ eq = try_p_eq_start(*p, irqflags, fq, fd, flags);
+ if (!eq)
+ put_affine_portal();
+ return eq;
+}
+
#ifdef CONFIG_FSL_DPA_CAN_WAIT
static noinline struct qm_eqcr_entry *__wait_eq_start(struct qman_portal **p,
unsigned long *irqflags __maybe_unused,
@@ -2055,8 +2198,67 @@ static noinline struct qm_eqcr_entry *wait_eq_start(struct qman_portal **p,
(eq = __wait_eq_start(p, irqflags, fq, fd, flags)));
return eq;
}
+static noinline struct qm_eqcr_entry *__wait_p_eq_start(struct qman_portal *p,
+ unsigned long *irqflags __maybe_unused,
+ struct qman_fq *fq,
+ const struct qm_fd *fd,
+ u32 flags)
+{
+ struct qm_eqcr_entry *eq = try_p_eq_start(p, irqflags, fq, fd, flags);
+ if (!eq)
+ qm_eqcr_set_ithresh(&p->p, EQCR_ITHRESH);
+ return eq;
+}
+static noinline struct qm_eqcr_entry *wait_p_eq_start(struct qman_portal *p,
+ unsigned long *irqflags __maybe_unused,
+ struct qman_fq *fq,
+ const struct qm_fd *fd,
+ u32 flags)
+{
+ struct qm_eqcr_entry *eq;
+ if (flags & QMAN_ENQUEUE_FLAG_WAIT_INT)
+ wait_event_interruptible(affine_queue,
+ (eq = __wait_p_eq_start(p, irqflags, fq, fd, flags)));
+ else
+ wait_event(affine_queue,
+ (eq = __wait_p_eq_start(p, irqflags, fq, fd, flags)));
+ return eq;
+}
#endif
+int qman_p_enqueue(struct qman_portal *p, struct qman_fq *fq,
+ const struct qm_fd *fd, u32 flags)
+{
+ struct qm_eqcr_entry *eq;
+ unsigned long irqflags __maybe_unused;
+
+#ifdef CONFIG_FSL_DPA_CAN_WAIT
+ if (flags & QMAN_ENQUEUE_FLAG_WAIT)
+ eq = wait_p_eq_start(p, &irqflags, fq, fd, flags);
+ else
+#endif
+ eq = try_p_eq_start(p, &irqflags, fq, fd, flags);
+ if (!eq)
+ return -EBUSY;
+ /* Note: QM_EQCR_VERB_INTERRUPT == QMAN_ENQUEUE_FLAG_WAIT_SYNC */
+ qm_eqcr_pvb_commit(&p->p, QM_EQCR_VERB_CMD_ENQUEUE |
+ (flags & (QM_EQCR_VERB_COLOUR_MASK | QM_EQCR_VERB_INTERRUPT)));
+ /* Factor the below out, it's used from qman_enqueue_orp() too */
+ PORTAL_IRQ_UNLOCK(p, irqflags);
+#ifdef CONFIG_FSL_DPA_CAN_WAIT_SYNC
+ if (unlikely((flags & QMAN_ENQUEUE_FLAG_WAIT) &&
+ (flags & QMAN_ENQUEUE_FLAG_WAIT_SYNC))) {
+ if (flags & QMAN_ENQUEUE_FLAG_WAIT_INT)
+ wait_event_interruptible(affine_queue,
+ (p->eqci_owned != fq));
+ else
+ wait_event(affine_queue, (p->eqci_owned != fq));
+ }
+#endif
+ return 0;
+}
+EXPORT_SYMBOL(qman_p_enqueue);
+
int qman_enqueue(struct qman_fq *fq, const struct qm_fd *fd, u32 flags)
{
struct qman_portal *p;
@@ -2091,6 +2293,54 @@ int qman_enqueue(struct qman_fq *fq, const struct qm_fd *fd, u32 flags)
}
EXPORT_SYMBOL(qman_enqueue);
+int qman_p_enqueue_orp(struct qman_portal *p, struct qman_fq *fq,
+ const struct qm_fd *fd, u32 flags,
+ struct qman_fq *orp, u16 orp_seqnum)
+{
+ struct qm_eqcr_entry *eq;
+ unsigned long irqflags __maybe_unused;
+
+#ifdef CONFIG_FSL_DPA_CAN_WAIT
+ if (flags & QMAN_ENQUEUE_FLAG_WAIT)
+ eq = wait_p_eq_start(p, &irqflags, fq, fd, flags);
+ else
+#endif
+ eq = try_p_eq_start(p, &irqflags, fq, fd, flags);
+ if (!eq)
+ return -EBUSY;
+ /* Process ORP-specifics here */
+ if (flags & QMAN_ENQUEUE_FLAG_NLIS)
+ orp_seqnum |= QM_EQCR_SEQNUM_NLIS;
+ else {
+ orp_seqnum &= ~QM_EQCR_SEQNUM_NLIS;
+ if (flags & QMAN_ENQUEUE_FLAG_NESN)
+ orp_seqnum |= QM_EQCR_SEQNUM_NESN;
+ else
+ /* No need to check 4 QMAN_ENQUEUE_FLAG_HOLE */
+ orp_seqnum &= ~QM_EQCR_SEQNUM_NESN;
+ }
+ eq->seqnum = orp_seqnum;
+ eq->orp = orp->fqid;
+ /* Note: QM_EQCR_VERB_INTERRUPT == QMAN_ENQUEUE_FLAG_WAIT_SYNC */
+ qm_eqcr_pvb_commit(&p->p, QM_EQCR_VERB_ORP |
+ ((flags & (QMAN_ENQUEUE_FLAG_HOLE | QMAN_ENQUEUE_FLAG_NESN)) ?
+ 0 : QM_EQCR_VERB_CMD_ENQUEUE) |
+ (flags & (QM_EQCR_VERB_COLOUR_MASK | QM_EQCR_VERB_INTERRUPT)));
+ PORTAL_IRQ_UNLOCK(p, irqflags);
+#ifdef CONFIG_FSL_DPA_CAN_WAIT_SYNC
+ if (unlikely((flags & QMAN_ENQUEUE_FLAG_WAIT) &&
+ (flags & QMAN_ENQUEUE_FLAG_WAIT_SYNC))) {
+ if (flags & QMAN_ENQUEUE_FLAG_WAIT_INT)
+ wait_event_interruptible(affine_queue,
+ (p->eqci_owned != fq));
+ else
+ wait_event(affine_queue, (p->eqci_owned != fq));
+ }
+#endif
+ return 0;
+}
+EXPORT_SYMBOL(qman_p_enqueue_orp);
+
int qman_enqueue_orp(struct qman_fq *fq, const struct qm_fd *fd, u32 flags,
struct qman_fq *orp, u16 orp_seqnum)
{
@@ -2140,6 +2390,45 @@ int qman_enqueue_orp(struct qman_fq *fq, const struct qm_fd *fd, u32 flags,
}
EXPORT_SYMBOL(qman_enqueue_orp);
+int qman_p_enqueue_precommit(struct qman_portal *p, struct qman_fq *fq,
+ const struct qm_fd *fd, u32 flags,
+ qman_cb_precommit cb, void *cb_arg)
+{
+ struct qm_eqcr_entry *eq;
+ unsigned long irqflags __maybe_unused;
+
+#ifdef CONFIG_FSL_DPA_CAN_WAIT
+ if (flags & QMAN_ENQUEUE_FLAG_WAIT)
+ eq = wait_p_eq_start(p, &irqflags, fq, fd, flags);
+ else
+#endif
+ eq = try_p_eq_start(p, &irqflags, fq, fd, flags);
+ if (!eq)
+ return -EBUSY;
+ /* invoke user supplied callback function before writing commit verb */
+ if (cb(cb_arg)) {
+ PORTAL_IRQ_UNLOCK(p, irqflags);
+ return -EINVAL;
+ }
+ /* Note: QM_EQCR_VERB_INTERRUPT == QMAN_ENQUEUE_FLAG_WAIT_SYNC */
+ qm_eqcr_pvb_commit(&p->p, QM_EQCR_VERB_CMD_ENQUEUE |
+ (flags & (QM_EQCR_VERB_COLOUR_MASK | QM_EQCR_VERB_INTERRUPT)));
+ /* Factor the below out, it's used from qman_enqueue_orp() too */
+ PORTAL_IRQ_UNLOCK(p, irqflags);
+#ifdef CONFIG_FSL_DPA_CAN_WAIT_SYNC
+ if (unlikely((flags & QMAN_ENQUEUE_FLAG_WAIT) &&
+ (flags & QMAN_ENQUEUE_FLAG_WAIT_SYNC))) {
+ if (flags & QMAN_ENQUEUE_FLAG_WAIT_INT)
+ wait_event_interruptible(affine_queue,
+ (p->eqci_owned != fq));
+ else
+ wait_event(affine_queue, (p->eqci_owned != fq));
+ }
+#endif
+ return 0;
+}
+EXPORT_SYMBOL(qman_p_enqueue_precommit);
+
int qman_enqueue_precommit(struct qman_fq *fq, const struct qm_fd *fd,
u32 flags, qman_cb_precommit cb, void *cb_arg)
{
@@ -4536,5 +4825,4 @@ int qman_shutdown_fq(u32 fqid)
ret = qm_shutdown_fq(&low_p, 1, fqid);
PORTAL_IRQ_UNLOCK(p, irqflags);
put_affine_portal();
- return ret;
}