summaryrefslogtreecommitdiff
path: root/drivers/infiniband/hw
diff options
context:
space:
mode:
authorMike Marciniszyn <mike.marciniszyn@qlogic.com>2011-01-11 01:42:22 (GMT)
committerRoland Dreier <rolandd@cisco.com>2011-01-11 01:42:22 (GMT)
commitdd04e43d46ad7a4e625a9ff3b270dc0db9abe81d (patch)
tree9cb5e6c86c1ee179cb03b7335bf1ecb2c58ebb7d /drivers/infiniband/hw
parent994bcd28a36af1413381dfe0aac065e2cbc2af40 (diff)
downloadlinux-fsl-qoriq-dd04e43d46ad7a4e625a9ff3b270dc0db9abe81d.tar.xz
IB/qib: Unnecessary delayed completions on RC connection
Currently on receipt of a response message (ACKs, RDMA Response, Atomic Responses etc.) if the SDMA completion counter is not advanced the driver delays the completion of the WQE. In most cases this is overly pessimistic as the response (ACK) to a previously transmitted send implies that the send is complete. Ensure that SDMA queue is progressed appropriately before determining if a send has delayed completions. Signed-off-by: Mike Marciniszyn <mike.marciniszyn@qlogic.com> Signed-off-by: Roland Dreier <rolandd@cisco.com>
Diffstat (limited to 'drivers/infiniband/hw')
-rw-r--r--drivers/infiniband/hw/qib/qib_rc.c24
1 files changed, 24 insertions, 0 deletions
diff --git a/drivers/infiniband/hw/qib/qib_rc.c b/drivers/infiniband/hw/qib/qib_rc.c
index 955fb71..8245237 100644
--- a/drivers/infiniband/hw/qib/qib_rc.c
+++ b/drivers/infiniband/hw/qib/qib_rc.c
@@ -1407,6 +1407,7 @@ static void qib_rc_rcv_resp(struct qib_ibport *ibp,
struct qib_ctxtdata *rcd)
{
struct qib_swqe *wqe;
+ struct qib_pportdata *ppd = ppd_from_ibp(ibp);
enum ib_wc_status status;
unsigned long flags;
int diff;
@@ -1414,6 +1415,29 @@ static void qib_rc_rcv_resp(struct qib_ibport *ibp,
u32 aeth;
u64 val;
+ if (opcode != OP(RDMA_READ_RESPONSE_MIDDLE)) {
+ /*
+ * If ACK'd PSN on SDMA busy list try to make progress to
+ * reclaim SDMA credits.
+ */
+ if ((qib_cmp24(psn, qp->s_sending_psn) >= 0) &&
+ (qib_cmp24(qp->s_sending_psn, qp->s_sending_hpsn) <= 0)) {
+
+ /*
+ * If send tasklet not running attempt to progress
+ * SDMA queue.
+ */
+ if (!(qp->s_flags & QIB_S_BUSY)) {
+ /* Acquire SDMA Lock */
+ spin_lock_irqsave(&ppd->sdma_lock, flags);
+ /* Invoke sdma make progress */
+ qib_sdma_make_progress(ppd);
+ /* Release SDMA Lock */
+ spin_unlock_irqrestore(&ppd->sdma_lock, flags);
+ }
+ }
+ }
+
spin_lock_irqsave(&qp->s_lock, flags);
/* Ignore invalid responses. */