summaryrefslogtreecommitdiff
path: root/drivers/infiniband/sw
diff options
context:
space:
mode:
authorParav Pandit <pandit.parav@gmail.com>2016-09-28 20:24:12 (GMT)
committerDoug Ledford <dledford@redhat.com>2016-10-06 17:50:04 (GMT)
commit063af59597492d31c44e549d6c773b6485f7dc53 (patch)
treeda38a219f014a9fe8966faad735e53b966e8e0e0 /drivers/infiniband/sw
parent61347fa6087884305ea4a3a04501839fdb68dc76 (diff)
downloadlinux-063af59597492d31c44e549d6c773b6485f7dc53.tar.xz
IB/rxe: Avoid scheduling tasklet for userspace QP
This patch avoids scheduing tasklet for WQE and protocol processing for user space QP. It performs the task in calling process context. To improve code readability kernel specific post_send handling moved to post_send_kernel() function. Signed-off-by: Parav Pandit <pandit.parav@gmail.com> Signed-off-by: Doug Ledford <dledford@redhat.com>
Diffstat (limited to 'drivers/infiniband/sw')
-rw-r--r--drivers/infiniband/sw/rxe/rxe_verbs.c38
1 files changed, 25 insertions, 13 deletions
diff --git a/drivers/infiniband/sw/rxe/rxe_verbs.c b/drivers/infiniband/sw/rxe/rxe_verbs.c
index 4552be9..a5af691 100644
--- a/drivers/infiniband/sw/rxe/rxe_verbs.c
+++ b/drivers/infiniband/sw/rxe/rxe_verbs.c
@@ -801,26 +801,15 @@ err1:
return err;
}
-static int rxe_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
- struct ib_send_wr **bad_wr)
+static int rxe_post_send_kernel(struct rxe_qp *qp, struct ib_send_wr *wr,
+ struct ib_send_wr **bad_wr)
{
int err = 0;
- struct rxe_qp *qp = to_rqp(ibqp);
unsigned int mask;
unsigned int length = 0;
int i;
int must_sched;
- if (unlikely(!qp->valid)) {
- *bad_wr = wr;
- return -EINVAL;
- }
-
- if (unlikely(qp->req.state < QP_STATE_READY)) {
- *bad_wr = wr;
- return -EINVAL;
- }
-
while (wr) {
mask = wr_opcode_mask(wr->opcode, qp);
if (unlikely(!mask)) {
@@ -861,6 +850,29 @@ static int rxe_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
return err;
}
+static int rxe_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
+ struct ib_send_wr **bad_wr)
+{
+ struct rxe_qp *qp = to_rqp(ibqp);
+
+ if (unlikely(!qp->valid)) {
+ *bad_wr = wr;
+ return -EINVAL;
+ }
+
+ if (unlikely(qp->req.state < QP_STATE_READY)) {
+ *bad_wr = wr;
+ return -EINVAL;
+ }
+
+ if (qp->is_user) {
+ /* Utilize process context to do protocol processing */
+ rxe_run_task(&qp->req.task, 0);
+ return 0;
+ } else
+ return rxe_post_send_kernel(qp, wr, bad_wr);
+}
+
static int rxe_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *wr,
struct ib_recv_wr **bad_wr)
{