From f400e5b38a5eeb8a91b481e4f3059611fa4ddce2 Mon Sep 17 00:00:00 2001 From: Roland Dreier Date: Wed, 14 Jul 2010 13:25:04 -0700 Subject: IB/umad: Remove unused-but-set variable 'already_dead' Signed-off-by: Roland Dreier diff --git a/drivers/infiniband/core/user_mad.c b/drivers/infiniband/core/user_mad.c index 6babb72..5fa8569 100644 --- a/drivers/infiniband/core/user_mad.c +++ b/drivers/infiniband/core/user_mad.c @@ -1085,7 +1085,6 @@ err_cdev: static void ib_umad_kill_port(struct ib_umad_port *port) { struct ib_umad_file *file; - int already_dead; int id; dev_set_drvdata(port->dev, NULL); @@ -1103,7 +1102,6 @@ static void ib_umad_kill_port(struct ib_umad_port *port) list_for_each_entry(file, &port->file_list, port_list) { mutex_lock(&file->mutex); - already_dead = file->agents_dead; file->agents_dead = 1; mutex_unlock(&file->mutex); -- cgit v0.10.2 From ea9f3bc6d1d8e2c56225e5be912d035b04740682 Mon Sep 17 00:00:00 2001 From: Roland Dreier Date: Wed, 14 Jul 2010 13:29:21 -0700 Subject: RDMA/nes: Rewrite expression to avoid undefined semantics Change code like x = expr(++x) that assigns to x twice without a sequence point in between to the intended (and well-defined) x = expr(x + 1) Signed-off-by: Roland Dreier diff --git a/drivers/infiniband/hw/nes/nes_hw.c b/drivers/infiniband/hw/nes/nes_hw.c index 57874a1..f41d890 100644 --- a/drivers/infiniband/hw/nes/nes_hw.c +++ b/drivers/infiniband/hw/nes/nes_hw.c @@ -1970,7 +1970,7 @@ void nes_destroy_nic_qp(struct nes_vnic *nesvnic) dev_kfree_skb( nesvnic->nic.tx_skb[nesvnic->nic.sq_tail]); - nesvnic->nic.sq_tail = (++nesvnic->nic.sq_tail) + nesvnic->nic.sq_tail = (nesvnic->nic.sq_tail + 1) & (nesvnic->nic.sq_size - 1); } -- cgit v0.10.2 From c1d7356c8572f3fe0445336d8e75914bdcadad59 Mon Sep 17 00:00:00 2001 From: Dan Carpenter Date: Mon, 31 May 2010 14:00:53 +0000 Subject: RDMA/cxgb4: Remove unneeded assignment We don't need to assign rpl here, we do that later on. Signed-off-by: Dan Carpenter [ Indeed this assignment makes no sense, since skb is set to NULL a couple of lines before. - Roland ] Signed-off-by: Roland Dreier diff --git a/drivers/infiniband/hw/cxgb4/cm.c b/drivers/infiniband/hw/cxgb4/cm.c index 855ee44..b5e676c 100644 --- a/drivers/infiniband/hw/cxgb4/cm.c +++ b/drivers/infiniband/hw/cxgb4/cm.c @@ -2244,7 +2244,7 @@ static void process_work(struct work_struct *work) { struct sk_buff *skb = NULL; struct c4iw_dev *dev; - struct cpl_act_establish *rpl = cplhdr(skb); + struct cpl_act_establish *rpl; unsigned int opcode; int ret; -- cgit v0.10.2 From 85963e4cbcf11c00b1d27ea0e0fcab8cb3d7a69b Mon Sep 17 00:00:00 2001 From: Roland Dreier Date: Mon, 19 Jul 2010 13:13:09 -0700 Subject: RDMA/cxgb4: Remove unneeded NULL check The rest of the code seems to assume that ep->com.cm_id can't be NULL, so remove an unneeded test. Reported-by: Dan Carpenter Signed-off-by: Roland Dreier diff --git a/drivers/infiniband/hw/cxgb4/cm.c b/drivers/infiniband/hw/cxgb4/cm.c index b5e676c..4185c3b 100644 --- a/drivers/infiniband/hw/cxgb4/cm.c +++ b/drivers/infiniband/hw/cxgb4/cm.c @@ -780,11 +780,11 @@ static void connect_reply_upcall(struct c4iw_ep *ep, int status) event.private_data_len = ep->plen; event.private_data = ep->mpa_pkt + sizeof(struct mpa_message); } - if (ep->com.cm_id) { - PDBG("%s ep %p tid %u status %d\n", __func__, ep, - ep->hwtid, status); - ep->com.cm_id->event_handler(ep->com.cm_id, &event); - } + + PDBG("%s ep %p tid %u status %d\n", __func__, ep, + ep->hwtid, status); + ep->com.cm_id->event_handler(ep->com.cm_id, &event); + if (status < 0) { ep->com.cm_id->rem_ref(ep->com.cm_id); ep->com.cm_id = NULL; -- cgit v0.10.2 From cc323b2aaa3921c4eeec309ff64256b0c43ca752 Mon Sep 17 00:00:00 2001 From: Ralph Campbell Date: Thu, 3 Jun 2010 00:21:07 +0000 Subject: IB/qib: Avoid variable-length array Rather than use a variable size array allocation on the stack, define a constant for the maximum array size possible. Signed-off-by: Ralph Campbell Signed-off-by: Roland Dreier diff --git a/drivers/infiniband/hw/qib/qib.h b/drivers/infiniband/hw/qib/qib.h index 3593983..e67dba4 100644 --- a/drivers/infiniband/hw/qib/qib.h +++ b/drivers/infiniband/hw/qib/qib.h @@ -326,6 +326,9 @@ struct qib_verbs_txreq { #define QIB_DEFAULT_MTU 4096 +/* max number of IB ports supported per HCA */ +#define QIB_MAX_IB_PORTS 2 + /* * Possible IB config parameters for f_get/set_ib_table() */ diff --git a/drivers/infiniband/hw/qib/qib_tx.c b/drivers/infiniband/hw/qib/qib_tx.c index af30232..7f36454 100644 --- a/drivers/infiniband/hw/qib/qib_tx.c +++ b/drivers/infiniband/hw/qib/qib_tx.c @@ -170,7 +170,7 @@ static int find_ctxt(struct qib_devdata *dd, unsigned bufn) void qib_disarm_piobufs_set(struct qib_devdata *dd, unsigned long *mask, unsigned cnt) { - struct qib_pportdata *ppd, *pppd[dd->num_pports]; + struct qib_pportdata *ppd, *pppd[QIB_MAX_IB_PORTS]; unsigned i; unsigned long flags; -- cgit v0.10.2 From 91fb0dd9cb71ab1a90ab1f48c34b935fdbca55b9 Mon Sep 17 00:00:00 2001 From: Alexander Schmidt Date: Thu, 10 Jun 2010 09:06:53 +0000 Subject: IB/ehca: Fix bitmask handling for lock_hcalls Fix reading hcall locking capability bit from device capabilities. Signed-off-by: Alexander Schmidt Signed-off-by: Roland Dreier diff --git a/drivers/infiniband/hw/ehca/ehca_main.c b/drivers/infiniband/hw/ehca/ehca_main.c index ecb51b3..cfc4de7 100644 --- a/drivers/infiniband/hw/ehca/ehca_main.c +++ b/drivers/infiniband/hw/ehca/ehca_main.c @@ -360,7 +360,8 @@ static int ehca_sense_attributes(struct ehca_shca *shca) * a firmware property, so it's valid across all adapters */ if (ehca_lock_hcalls == -1) - ehca_lock_hcalls = !(shca->hca_cap & HCA_CAP_H_ALLOC_RES_SYNC); + ehca_lock_hcalls = !EHCA_BMASK_GET(HCA_CAP_H_ALLOC_RES_SYNC, + shca->hca_cap); /* translate supported MR page sizes; always support 4K */ shca->hca_cap_mr_pgsize = EHCA_PAGESIZE; -- cgit v0.10.2 From dccb816de38efe72becc5d7f90a5dfedcd6fd053 Mon Sep 17 00:00:00 2001 From: Ben Hutchings Date: Sat, 12 Jun 2010 02:25:09 +0000 Subject: IB/ipath: Fix probe failure path The failure path in ipath_init_one() does not match the cleanup code in ipath_remove_one() and appears to leave interrupts enabled in some cases. Change it to match. Signed-off-by: Ben Hutchings Signed-off-by: Roland Dreier diff --git a/drivers/infiniband/hw/ipath/ipath_driver.c b/drivers/infiniband/hw/ipath/ipath_driver.c index 2133746..765f0fc 100644 --- a/drivers/infiniband/hw/ipath/ipath_driver.c +++ b/drivers/infiniband/hw/ipath/ipath_driver.c @@ -390,6 +390,8 @@ done: ipath_enable_armlaunch(dd); } +static void cleanup_device(struct ipath_devdata *dd); + static int __devinit ipath_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) { @@ -616,8 +618,13 @@ static int __devinit ipath_init_one(struct pci_dev *pdev, goto bail; bail_irqsetup: - if (pdev->irq) - free_irq(pdev->irq, dd); + cleanup_device(dd); + + if (dd->ipath_irq) + dd->ipath_f_free_irq(dd); + + if (dd->ipath_f_cleanup) + dd->ipath_f_cleanup(dd); bail_iounmap: iounmap((volatile void __iomem *) dd->ipath_kregbase); @@ -635,7 +642,7 @@ bail: return ret; } -static void __devexit cleanup_device(struct ipath_devdata *dd) +static void cleanup_device(struct ipath_devdata *dd) { int port; struct ipath_portdata **tmp; -- cgit v0.10.2 From ba6d39256bed87a0e8ee1770b5f7638bb3e0cfe4 Mon Sep 17 00:00:00 2001 From: Steve Wise Date: Wed, 23 Jun 2010 15:46:49 +0000 Subject: RDMA/cxgb4: Add module option to tweak delayed ack Signed-off-by: Steve Wise Signed-off-by: Roland Dreier diff --git a/drivers/infiniband/hw/cxgb4/cm.c b/drivers/infiniband/hw/cxgb4/cm.c index 4185c3b..e7b08dc 100644 --- a/drivers/infiniband/hw/cxgb4/cm.c +++ b/drivers/infiniband/hw/cxgb4/cm.c @@ -61,6 +61,10 @@ static char *states[] = { NULL, }; +static int dack_mode; +module_param(dack_mode, int, 0644); +MODULE_PARM_DESC(dack_mode, "Delayed ack mode (default=0)"); + int c4iw_max_read_depth = 8; module_param(c4iw_max_read_depth, int, 0644); MODULE_PARM_DESC(c4iw_max_read_depth, "Per-connection max ORD/IRD (default=8)"); @@ -474,6 +478,7 @@ static int send_connect(struct c4iw_ep *ep) cxgb4_best_mtu(ep->com.dev->rdev.lldi.mtus, ep->mtu, &mtu_idx); wscale = compute_wscale(rcv_win); opt0 = KEEP_ALIVE(1) | + DELACK(1) | WND_SCALE(wscale) | MSS_IDX(mtu_idx) | L2T_IDX(ep->l2t->idx) | @@ -845,7 +850,9 @@ static int update_rx_credits(struct c4iw_ep *ep, u32 credits) INIT_TP_WR(req, ep->hwtid); OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_RX_DATA_ACK, ep->hwtid)); - req->credit_dack = cpu_to_be32(credits); + req->credit_dack = cpu_to_be32(credits | RX_FORCE_ACK(1) | + F_RX_DACK_CHANGE | + V_RX_DACK_MODE(dack_mode)); set_wr_txq(skb, CPL_PRIORITY_ACK, ep->txq_idx); c4iw_ofld_send(&ep->com.dev->rdev, skb); return credits; @@ -1264,6 +1271,7 @@ static void accept_cr(struct c4iw_ep *ep, __be32 peer_ip, struct sk_buff *skb, cxgb4_best_mtu(ep->com.dev->rdev.lldi.mtus, ep->mtu, &mtu_idx); wscale = compute_wscale(rcv_win); opt0 = KEEP_ALIVE(1) | + DELACK(1) | WND_SCALE(wscale) | MSS_IDX(mtu_idx) | L2T_IDX(ep->l2t->idx) | diff --git a/drivers/infiniband/hw/cxgb4/t4fw_ri_api.h b/drivers/infiniband/hw/cxgb4/t4fw_ri_api.h index fc706bd..dc193c2 100644 --- a/drivers/infiniband/hw/cxgb4/t4fw_ri_api.h +++ b/drivers/infiniband/hw/cxgb4/t4fw_ri_api.h @@ -826,4 +826,14 @@ struct ulptx_idata { #define S_ULPTX_NSGE 0 #define M_ULPTX_NSGE 0xFFFF #define V_ULPTX_NSGE(x) ((x) << S_ULPTX_NSGE) + +#define S_RX_DACK_MODE 29 +#define M_RX_DACK_MODE 0x3 +#define V_RX_DACK_MODE(x) ((x) << S_RX_DACK_MODE) +#define G_RX_DACK_MODE(x) (((x) >> S_RX_DACK_MODE) & M_RX_DACK_MODE) + +#define S_RX_DACK_CHANGE 31 +#define V_RX_DACK_CHANGE(x) ((x) << S_RX_DACK_CHANGE) +#define F_RX_DACK_CHANGE V_RX_DACK_CHANGE(1U) + #endif /* _T4FW_RI_API_H_ */ -- cgit v0.10.2 From d3c814e8b2a094dc3bcbe6a0d93ec4824b26e86a Mon Sep 17 00:00:00 2001 From: David Rientjes Date: Wed, 21 Jul 2010 02:44:56 +0000 Subject: RDMA/cxgb4: Remove dependency on __GFP_NOFAIL The alloc_skb() in various allocations are failable, so remove __GFP_NOFAIL from their masks. Signed-off-by: David Rientjes Acked-by: Steve Wise Signed-off-by: Roland Dreier diff --git a/drivers/infiniband/hw/cxgb4/cq.c b/drivers/infiniband/hw/cxgb4/cq.c index fac5c6e..b3daf39 100644 --- a/drivers/infiniband/hw/cxgb4/cq.c +++ b/drivers/infiniband/hw/cxgb4/cq.c @@ -43,7 +43,7 @@ static int destroy_cq(struct c4iw_rdev *rdev, struct t4_cq *cq, int ret; wr_len = sizeof *res_wr + sizeof *res; - skb = alloc_skb(wr_len, GFP_KERNEL | __GFP_NOFAIL); + skb = alloc_skb(wr_len, GFP_KERNEL); if (!skb) return -ENOMEM; set_wr_txq(skb, CPL_PRIORITY_CONTROL, 0); @@ -118,7 +118,7 @@ static int create_cq(struct c4iw_rdev *rdev, struct t4_cq *cq, /* build fw_ri_res_wr */ wr_len = sizeof *res_wr + sizeof *res; - skb = alloc_skb(wr_len, GFP_KERNEL | __GFP_NOFAIL); + skb = alloc_skb(wr_len, GFP_KERNEL); if (!skb) { ret = -ENOMEM; goto err4; diff --git a/drivers/infiniband/hw/cxgb4/mem.c b/drivers/infiniband/hw/cxgb4/mem.c index 82b5703..269373a 100644 --- a/drivers/infiniband/hw/cxgb4/mem.c +++ b/drivers/infiniband/hw/cxgb4/mem.c @@ -59,7 +59,7 @@ static int write_adapter_mem(struct c4iw_rdev *rdev, u32 addr, u32 len, wr_len = roundup(sizeof *req + sizeof *sc + roundup(copy_len, T4_ULPTX_MIN_IO), 16); - skb = alloc_skb(wr_len, GFP_KERNEL | __GFP_NOFAIL); + skb = alloc_skb(wr_len, GFP_KERNEL); if (!skb) return -ENOMEM; set_wr_txq(skb, CPL_PRIORITY_CONTROL, 0); diff --git a/drivers/infiniband/hw/cxgb4/qp.c b/drivers/infiniband/hw/cxgb4/qp.c index 7065cb3..b88b1af 100644 --- a/drivers/infiniband/hw/cxgb4/qp.c +++ b/drivers/infiniband/hw/cxgb4/qp.c @@ -130,7 +130,7 @@ static int create_qp(struct c4iw_rdev *rdev, struct t4_wq *wq, /* build fw_ri_res_wr */ wr_len = sizeof *res_wr + 2 * sizeof *res; - skb = alloc_skb(wr_len, GFP_KERNEL | __GFP_NOFAIL); + skb = alloc_skb(wr_len, GFP_KERNEL); if (!skb) { ret = -ENOMEM; goto err7; @@ -961,7 +961,7 @@ static int rdma_fini(struct c4iw_dev *rhp, struct c4iw_qp *qhp) PDBG("%s qhp %p qid 0x%x tid %u\n", __func__, qhp, qhp->wq.sq.qid, qhp->ep->hwtid); - skb = alloc_skb(sizeof *wqe, GFP_KERNEL | __GFP_NOFAIL); + skb = alloc_skb(sizeof *wqe, GFP_KERNEL); if (!skb) return -ENOMEM; set_wr_txq(skb, CPL_PRIORITY_DATA, qhp->ep->txq_idx); @@ -1035,7 +1035,7 @@ static int rdma_init(struct c4iw_dev *rhp, struct c4iw_qp *qhp) PDBG("%s qhp %p qid 0x%x tid %u\n", __func__, qhp, qhp->wq.sq.qid, qhp->ep->hwtid); - skb = alloc_skb(sizeof *wqe, GFP_KERNEL | __GFP_NOFAIL); + skb = alloc_skb(sizeof *wqe, GFP_KERNEL); if (!skb) return -ENOMEM; set_wr_txq(skb, CPL_PRIORITY_DATA, qhp->ep->txq_idx); -- cgit v0.10.2 From d37ac31ddc24c1a0beed134278bc074c98812210 Mon Sep 17 00:00:00 2001 From: Steve Wise Date: Thu, 10 Jun 2010 19:03:00 +0000 Subject: RDMA/cxgb4: Support variable sized work requests T4 EQ entries are in multiples of 64 bytes. Currently the RDMA SQ and RQ use fixed sized entries composed of 4 EQ entries for the SQ and 2 EQ entries for the RQ. For optimial latency with small IO, we need to change this so the HW only needs to DMA the EQ entries actually used by a given work request. Implementation: - add wq_pidx counter to track where we are in the EQ. cidx/pidx are used for the sw sq/rq tracking and flow control. - the variable part of work requests is the SGL. Add new functions to build the SGL and/or immediate data directly in the EQ memory wrapping when needed. - adjust the min burst size for the EQ contexts to 64B. Signed-off-by: Steve Wise Signed-off-by: Roland Dreier diff --git a/drivers/infiniband/hw/cxgb4/qp.c b/drivers/infiniband/hw/cxgb4/qp.c index b88b1af..657a5b3 100644 --- a/drivers/infiniband/hw/cxgb4/qp.c +++ b/drivers/infiniband/hw/cxgb4/qp.c @@ -162,7 +162,7 @@ static int create_qp(struct c4iw_rdev *rdev, struct t4_wq *wq, res->u.sqrq.dcaen_to_eqsize = cpu_to_be32( V_FW_RI_RES_WR_DCAEN(0) | V_FW_RI_RES_WR_DCACPU(0) | - V_FW_RI_RES_WR_FBMIN(3) | + V_FW_RI_RES_WR_FBMIN(2) | V_FW_RI_RES_WR_FBMAX(3) | V_FW_RI_RES_WR_CIDXFTHRESHO(0) | V_FW_RI_RES_WR_CIDXFTHRESH(0) | @@ -185,7 +185,7 @@ static int create_qp(struct c4iw_rdev *rdev, struct t4_wq *wq, res->u.sqrq.dcaen_to_eqsize = cpu_to_be32( V_FW_RI_RES_WR_DCAEN(0) | V_FW_RI_RES_WR_DCACPU(0) | - V_FW_RI_RES_WR_FBMIN(3) | + V_FW_RI_RES_WR_FBMIN(2) | V_FW_RI_RES_WR_FBMAX(3) | V_FW_RI_RES_WR_CIDXFTHRESHO(0) | V_FW_RI_RES_WR_CIDXFTHRESH(0) | @@ -235,12 +235,78 @@ err1: return -ENOMEM; } -static int build_rdma_send(union t4_wr *wqe, struct ib_send_wr *wr, u8 *len16) +static int build_immd(struct t4_sq *sq, struct fw_ri_immd *immdp, + struct ib_send_wr *wr, int max, u32 *plenp) { + u8 *dstp, *srcp; + u32 plen = 0; int i; + int rem, len; + + dstp = (u8 *)immdp->data; + for (i = 0; i < wr->num_sge; i++) { + if ((plen + wr->sg_list[i].length) > max) + return -EMSGSIZE; + srcp = (u8 *)(unsigned long)wr->sg_list[i].addr; + plen += wr->sg_list[i].length; + rem = wr->sg_list[i].length; + while (rem) { + if (dstp == (u8 *)&sq->queue[sq->size]) + dstp = (u8 *)sq->queue; + if (rem <= (u8 *)&sq->queue[sq->size] - dstp) + len = rem; + else + len = (u8 *)&sq->queue[sq->size] - dstp; + memcpy(dstp, srcp, len); + dstp += len; + srcp += len; + rem -= len; + } + } + immdp->op = FW_RI_DATA_IMMD; + immdp->r1 = 0; + immdp->r2 = 0; + immdp->immdlen = cpu_to_be32(plen); + *plenp = plen; + return 0; +} + +static int build_isgl(__be64 *queue_start, __be64 *queue_end, + struct fw_ri_isgl *isglp, struct ib_sge *sg_list, + int num_sge, u32 *plenp) + +{ + int i; + u32 plen = 0; + __be64 *flitp = (__be64 *)isglp->sge; + + for (i = 0; i < num_sge; i++) { + if ((plen + sg_list[i].length) < plen) + return -EMSGSIZE; + plen += sg_list[i].length; + *flitp = cpu_to_be64(((u64)sg_list[i].lkey << 32) | + sg_list[i].length); + if (++flitp == queue_end) + flitp = queue_start; + *flitp = cpu_to_be64(sg_list[i].addr); + if (++flitp == queue_end) + flitp = queue_start; + } + isglp->op = FW_RI_DATA_ISGL; + isglp->r1 = 0; + isglp->nsge = cpu_to_be16(num_sge); + isglp->r2 = 0; + if (plenp) + *plenp = plen; + return 0; +} + +static int build_rdma_send(struct t4_sq *sq, union t4_wr *wqe, + struct ib_send_wr *wr, u8 *len16) +{ u32 plen; int size; - u8 *datap; + int ret; if (wr->num_sge > T4_MAX_SEND_SGE) return -EINVAL; @@ -267,43 +333,23 @@ static int build_rdma_send(union t4_wr *wqe, struct ib_send_wr *wr, u8 *len16) default: return -EINVAL; } + plen = 0; if (wr->num_sge) { if (wr->send_flags & IB_SEND_INLINE) { - datap = (u8 *)wqe->send.u.immd_src[0].data; - for (i = 0; i < wr->num_sge; i++) { - if ((plen + wr->sg_list[i].length) > - T4_MAX_SEND_INLINE) { - return -EMSGSIZE; - } - plen += wr->sg_list[i].length; - memcpy(datap, - (void *)(unsigned long)wr->sg_list[i].addr, - wr->sg_list[i].length); - datap += wr->sg_list[i].length; - } - wqe->send.u.immd_src[0].op = FW_RI_DATA_IMMD; - wqe->send.u.immd_src[0].r1 = 0; - wqe->send.u.immd_src[0].r2 = 0; - wqe->send.u.immd_src[0].immdlen = cpu_to_be32(plen); + ret = build_immd(sq, wqe->send.u.immd_src, wr, + T4_MAX_SEND_INLINE, &plen); + if (ret) + return ret; size = sizeof wqe->send + sizeof(struct fw_ri_immd) + plen; } else { - for (i = 0; i < wr->num_sge; i++) { - if ((plen + wr->sg_list[i].length) < plen) - return -EMSGSIZE; - plen += wr->sg_list[i].length; - wqe->send.u.isgl_src[0].sge[i].stag = - cpu_to_be32(wr->sg_list[i].lkey); - wqe->send.u.isgl_src[0].sge[i].len = - cpu_to_be32(wr->sg_list[i].length); - wqe->send.u.isgl_src[0].sge[i].to = - cpu_to_be64(wr->sg_list[i].addr); - } - wqe->send.u.isgl_src[0].op = FW_RI_DATA_ISGL; - wqe->send.u.isgl_src[0].r1 = 0; - wqe->send.u.isgl_src[0].nsge = cpu_to_be16(wr->num_sge); - wqe->send.u.isgl_src[0].r2 = 0; + ret = build_isgl((__be64 *)sq->queue, + (__be64 *)&sq->queue[sq->size], + wqe->send.u.isgl_src, + wr->sg_list, wr->num_sge, &plen); + if (ret) + return ret; size = sizeof wqe->send + sizeof(struct fw_ri_isgl) + wr->num_sge * sizeof(struct fw_ri_sge); } @@ -313,62 +359,40 @@ static int build_rdma_send(union t4_wr *wqe, struct ib_send_wr *wr, u8 *len16) wqe->send.u.immd_src[0].r2 = 0; wqe->send.u.immd_src[0].immdlen = 0; size = sizeof wqe->send + sizeof(struct fw_ri_immd); + plen = 0; } *len16 = DIV_ROUND_UP(size, 16); wqe->send.plen = cpu_to_be32(plen); return 0; } -static int build_rdma_write(union t4_wr *wqe, struct ib_send_wr *wr, u8 *len16) +static int build_rdma_write(struct t4_sq *sq, union t4_wr *wqe, + struct ib_send_wr *wr, u8 *len16) { - int i; u32 plen; int size; - u8 *datap; + int ret; - if (wr->num_sge > T4_MAX_WRITE_SGE) + if (wr->num_sge > T4_MAX_SEND_SGE) return -EINVAL; wqe->write.r2 = 0; wqe->write.stag_sink = cpu_to_be32(wr->wr.rdma.rkey); wqe->write.to_sink = cpu_to_be64(wr->wr.rdma.remote_addr); - plen = 0; if (wr->num_sge) { if (wr->send_flags & IB_SEND_INLINE) { - datap = (u8 *)wqe->write.u.immd_src[0].data; - for (i = 0; i < wr->num_sge; i++) { - if ((plen + wr->sg_list[i].length) > - T4_MAX_WRITE_INLINE) { - return -EMSGSIZE; - } - plen += wr->sg_list[i].length; - memcpy(datap, - (void *)(unsigned long)wr->sg_list[i].addr, - wr->sg_list[i].length); - datap += wr->sg_list[i].length; - } - wqe->write.u.immd_src[0].op = FW_RI_DATA_IMMD; - wqe->write.u.immd_src[0].r1 = 0; - wqe->write.u.immd_src[0].r2 = 0; - wqe->write.u.immd_src[0].immdlen = cpu_to_be32(plen); + ret = build_immd(sq, wqe->write.u.immd_src, wr, + T4_MAX_WRITE_INLINE, &plen); + if (ret) + return ret; size = sizeof wqe->write + sizeof(struct fw_ri_immd) + plen; } else { - for (i = 0; i < wr->num_sge; i++) { - if ((plen + wr->sg_list[i].length) < plen) - return -EMSGSIZE; - plen += wr->sg_list[i].length; - wqe->write.u.isgl_src[0].sge[i].stag = - cpu_to_be32(wr->sg_list[i].lkey); - wqe->write.u.isgl_src[0].sge[i].len = - cpu_to_be32(wr->sg_list[i].length); - wqe->write.u.isgl_src[0].sge[i].to = - cpu_to_be64(wr->sg_list[i].addr); - } - wqe->write.u.isgl_src[0].op = FW_RI_DATA_ISGL; - wqe->write.u.isgl_src[0].r1 = 0; - wqe->write.u.isgl_src[0].nsge = - cpu_to_be16(wr->num_sge); - wqe->write.u.isgl_src[0].r2 = 0; + ret = build_isgl((__be64 *)sq->queue, + (__be64 *)&sq->queue[sq->size], + wqe->write.u.isgl_src, + wr->sg_list, wr->num_sge, &plen); + if (ret) + return ret; size = sizeof wqe->write + sizeof(struct fw_ri_isgl) + wr->num_sge * sizeof(struct fw_ri_sge); } @@ -378,6 +402,7 @@ static int build_rdma_write(union t4_wr *wqe, struct ib_send_wr *wr, u8 *len16) wqe->write.u.immd_src[0].r2 = 0; wqe->write.u.immd_src[0].immdlen = 0; size = sizeof wqe->write + sizeof(struct fw_ri_immd); + plen = 0; } *len16 = DIV_ROUND_UP(size, 16); wqe->write.plen = cpu_to_be32(plen); @@ -416,29 +441,13 @@ static int build_rdma_read(union t4_wr *wqe, struct ib_send_wr *wr, u8 *len16) static int build_rdma_recv(struct c4iw_qp *qhp, union t4_recv_wr *wqe, struct ib_recv_wr *wr, u8 *len16) { - int i; - int plen = 0; + int ret; - for (i = 0; i < wr->num_sge; i++) { - if ((plen + wr->sg_list[i].length) < plen) - return -EMSGSIZE; - plen += wr->sg_list[i].length; - wqe->recv.isgl.sge[i].stag = - cpu_to_be32(wr->sg_list[i].lkey); - wqe->recv.isgl.sge[i].len = - cpu_to_be32(wr->sg_list[i].length); - wqe->recv.isgl.sge[i].to = - cpu_to_be64(wr->sg_list[i].addr); - } - for (; i < T4_MAX_RECV_SGE; i++) { - wqe->recv.isgl.sge[i].stag = 0; - wqe->recv.isgl.sge[i].len = 0; - wqe->recv.isgl.sge[i].to = 0; - } - wqe->recv.isgl.op = FW_RI_DATA_ISGL; - wqe->recv.isgl.r1 = 0; - wqe->recv.isgl.nsge = cpu_to_be16(wr->num_sge); - wqe->recv.isgl.r2 = 0; + ret = build_isgl((__be64 *)qhp->wq.rq.queue, + (__be64 *)&qhp->wq.rq.queue[qhp->wq.rq.size], + &wqe->recv.isgl, wr->sg_list, wr->num_sge, NULL); + if (ret) + return ret; *len16 = DIV_ROUND_UP(sizeof wqe->recv + wr->num_sge * sizeof(struct fw_ri_sge), 16); return 0; @@ -547,7 +556,9 @@ int c4iw_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, *bad_wr = wr; break; } - wqe = &qhp->wq.sq.queue[qhp->wq.sq.pidx]; + wqe = (union t4_wr *)((u8 *)qhp->wq.sq.queue + + qhp->wq.sq.wq_pidx * T4_EQ_ENTRY_SIZE); + fw_flags = 0; if (wr->send_flags & IB_SEND_SOLICITED) fw_flags |= FW_RI_SOLICITED_EVENT_FLAG; @@ -564,12 +575,12 @@ int c4iw_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, swsqe->opcode = FW_RI_SEND; else swsqe->opcode = FW_RI_SEND_WITH_INV; - err = build_rdma_send(wqe, wr, &len16); + err = build_rdma_send(&qhp->wq.sq, wqe, wr, &len16); break; case IB_WR_RDMA_WRITE: fw_opcode = FW_RI_RDMA_WRITE_WR; swsqe->opcode = FW_RI_RDMA_WRITE; - err = build_rdma_write(wqe, wr, &len16); + err = build_rdma_write(&qhp->wq.sq, wqe, wr, &len16); break; case IB_WR_RDMA_READ: case IB_WR_RDMA_READ_WITH_INV: @@ -619,8 +630,8 @@ int c4iw_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, swsqe->opcode, swsqe->read_len); wr = wr->next; num_wrs--; - t4_sq_produce(&qhp->wq); - idx++; + t4_sq_produce(&qhp->wq, len16); + idx += DIV_ROUND_UP(len16*16, T4_EQ_ENTRY_SIZE); } if (t4_wq_db_enabled(&qhp->wq)) t4_ring_sq_db(&qhp->wq, idx); @@ -656,7 +667,9 @@ int c4iw_post_receive(struct ib_qp *ibqp, struct ib_recv_wr *wr, *bad_wr = wr; break; } - wqe = &qhp->wq.rq.queue[qhp->wq.rq.pidx]; + wqe = (union t4_recv_wr *)((u8 *)qhp->wq.rq.queue + + qhp->wq.rq.wq_pidx * + T4_EQ_ENTRY_SIZE); if (num_wrs) err = build_rdma_recv(qhp, wqe, wr, &len16); else @@ -675,15 +688,12 @@ int c4iw_post_receive(struct ib_qp *ibqp, struct ib_recv_wr *wr, wqe->recv.r2[1] = 0; wqe->recv.r2[2] = 0; wqe->recv.len16 = len16; - if (len16 < 5) - wqe->flits[8] = 0; - PDBG("%s cookie 0x%llx pidx %u\n", __func__, (unsigned long long) wr->wr_id, qhp->wq.rq.pidx); - t4_rq_produce(&qhp->wq); + t4_rq_produce(&qhp->wq, len16); + idx += DIV_ROUND_UP(len16*16, T4_EQ_ENTRY_SIZE); wr = wr->next; num_wrs--; - idx++; } if (t4_wq_db_enabled(&qhp->wq)) t4_ring_rq_db(&qhp->wq, idx); diff --git a/drivers/infiniband/hw/cxgb4/t4.h b/drivers/infiniband/hw/cxgb4/t4.h index 9cf8d85..aef55f4 100644 --- a/drivers/infiniband/hw/cxgb4/t4.h +++ b/drivers/infiniband/hw/cxgb4/t4.h @@ -65,10 +65,10 @@ struct t4_status_page { u8 db_off; }; -#define T4_EQ_SIZE 64 +#define T4_EQ_ENTRY_SIZE 64 #define T4_SQ_NUM_SLOTS 4 -#define T4_SQ_NUM_BYTES (T4_EQ_SIZE * T4_SQ_NUM_SLOTS) +#define T4_SQ_NUM_BYTES (T4_EQ_ENTRY_SIZE * T4_SQ_NUM_SLOTS) #define T4_MAX_SEND_SGE ((T4_SQ_NUM_BYTES - sizeof(struct fw_ri_send_wr) - \ sizeof(struct fw_ri_isgl)) / sizeof(struct fw_ri_sge)) #define T4_MAX_SEND_INLINE ((T4_SQ_NUM_BYTES - sizeof(struct fw_ri_send_wr) - \ @@ -84,7 +84,7 @@ struct t4_status_page { #define T4_MAX_FR_DEPTH (T4_MAX_FR_IMMD / sizeof(u64)) #define T4_RQ_NUM_SLOTS 2 -#define T4_RQ_NUM_BYTES (T4_EQ_SIZE * T4_RQ_NUM_SLOTS) +#define T4_RQ_NUM_BYTES (T4_EQ_ENTRY_SIZE * T4_RQ_NUM_SLOTS) #define T4_MAX_RECV_SGE 4 union t4_wr { @@ -97,20 +97,18 @@ union t4_wr { struct fw_ri_fr_nsmr_wr fr; struct fw_ri_inv_lstag_wr inv; struct t4_status_page status; - __be64 flits[T4_EQ_SIZE / sizeof(__be64) * T4_SQ_NUM_SLOTS]; + __be64 flits[T4_EQ_ENTRY_SIZE / sizeof(__be64) * T4_SQ_NUM_SLOTS]; }; union t4_recv_wr { struct fw_ri_recv_wr recv; struct t4_status_page status; - __be64 flits[T4_EQ_SIZE / sizeof(__be64) * T4_RQ_NUM_SLOTS]; + __be64 flits[T4_EQ_ENTRY_SIZE / sizeof(__be64) * T4_RQ_NUM_SLOTS]; }; static inline void init_wr_hdr(union t4_wr *wqe, u16 wrid, enum fw_wr_opcodes opcode, u8 flags, u8 len16) { - int slots_used; - wqe->send.opcode = (u8)opcode; wqe->send.flags = flags; wqe->send.wrid = wrid; @@ -118,12 +116,6 @@ static inline void init_wr_hdr(union t4_wr *wqe, u16 wrid, wqe->send.r1[1] = 0; wqe->send.r1[2] = 0; wqe->send.len16 = len16; - - slots_used = DIV_ROUND_UP(len16*16, T4_EQ_SIZE); - while (slots_used < T4_SQ_NUM_SLOTS) { - wqe->flits[slots_used * T4_EQ_SIZE / sizeof(__be64)] = 0; - slots_used++; - } } /* CQE/AE status codes */ @@ -289,6 +281,7 @@ struct t4_sq { u16 size; u16 cidx; u16 pidx; + u16 wq_pidx; }; struct t4_swrqe { @@ -310,6 +303,7 @@ struct t4_rq { u16 size; u16 cidx; u16 pidx; + u16 wq_pidx; }; struct t4_wq { @@ -340,11 +334,14 @@ static inline u32 t4_rq_avail(struct t4_wq *wq) return wq->rq.size - 1 - wq->rq.in_use; } -static inline void t4_rq_produce(struct t4_wq *wq) +static inline void t4_rq_produce(struct t4_wq *wq, u8 len16) { wq->rq.in_use++; if (++wq->rq.pidx == wq->rq.size) wq->rq.pidx = 0; + wq->rq.wq_pidx += DIV_ROUND_UP(len16*16, T4_EQ_ENTRY_SIZE); + if (wq->rq.wq_pidx >= wq->rq.size * T4_RQ_NUM_SLOTS) + wq->rq.wq_pidx %= wq->rq.size * T4_RQ_NUM_SLOTS; } static inline void t4_rq_consume(struct t4_wq *wq) @@ -370,11 +367,14 @@ static inline u32 t4_sq_avail(struct t4_wq *wq) return wq->sq.size - 1 - wq->sq.in_use; } -static inline void t4_sq_produce(struct t4_wq *wq) +static inline void t4_sq_produce(struct t4_wq *wq, u8 len16) { wq->sq.in_use++; if (++wq->sq.pidx == wq->sq.size) wq->sq.pidx = 0; + wq->sq.wq_pidx += DIV_ROUND_UP(len16*16, T4_EQ_ENTRY_SIZE); + if (wq->sq.wq_pidx >= wq->sq.size * T4_SQ_NUM_SLOTS) + wq->sq.wq_pidx %= wq->sq.size * T4_SQ_NUM_SLOTS; } static inline void t4_sq_consume(struct t4_wq *wq) @@ -386,14 +386,12 @@ static inline void t4_sq_consume(struct t4_wq *wq) static inline void t4_ring_sq_db(struct t4_wq *wq, u16 inc) { - inc *= T4_SQ_NUM_SLOTS; wmb(); writel(QID(wq->sq.qid) | PIDX(inc), wq->db); } static inline void t4_ring_rq_db(struct t4_wq *wq, u16 inc) { - inc *= T4_RQ_NUM_SLOTS; wmb(); writel(QID(wq->rq.qid) | PIDX(inc), wq->db); } -- cgit v0.10.2 From dd378c2102cf4bf81144db70771d09ecead2cd15 Mon Sep 17 00:00:00 2001 From: Arnd Bergmann Date: Wed, 7 Jul 2010 21:40:06 +0000 Subject: IB/qib: Use generic_file_llseek When the default llseek action gets changed to no_llseek, all file systems relying on the current behaviour need to set explicit .llseek operations. In case of qib_fs, we want the files to be seekable, so generic_file_llseek fits best. Signed-off-by: Arnd Bergmann Signed-off-by: Roland Dreier diff --git a/drivers/infiniband/hw/qib/qib_fs.c b/drivers/infiniband/hw/qib/qib_fs.c index 844954b..9f989c0 100644 --- a/drivers/infiniband/hw/qib/qib_fs.c +++ b/drivers/infiniband/hw/qib/qib_fs.c @@ -135,8 +135,8 @@ static ssize_t driver_names_read(struct file *file, char __user *buf, } static const struct file_operations driver_ops[] = { - { .read = driver_stats_read, }, - { .read = driver_names_read, }, + { .read = driver_stats_read, .llseek = generic_file_llseek, }, + { .read = driver_names_read, .llseek = generic_file_llseek, }, }; /* read the per-device counters */ @@ -164,8 +164,8 @@ static ssize_t dev_names_read(struct file *file, char __user *buf, } static const struct file_operations cntr_ops[] = { - { .read = dev_counters_read, }, - { .read = dev_names_read, }, + { .read = dev_counters_read, .llseek = generic_file_llseek, }, + { .read = dev_names_read, .llseek = generic_file_llseek, }, }; /* @@ -210,9 +210,9 @@ static ssize_t portcntrs_2_read(struct file *file, char __user *buf, } static const struct file_operations portcntr_ops[] = { - { .read = portnames_read, }, - { .read = portcntrs_1_read, }, - { .read = portcntrs_2_read, }, + { .read = portnames_read, .llseek = generic_file_llseek, }, + { .read = portcntrs_1_read, .llseek = generic_file_llseek, }, + { .read = portcntrs_2_read, .llseek = generic_file_llseek, }, }; /* @@ -261,8 +261,8 @@ static ssize_t qsfp_2_read(struct file *file, char __user *buf, } static const struct file_operations qsfp_ops[] = { - { .read = qsfp_1_read, }, - { .read = qsfp_2_read, }, + { .read = qsfp_1_read, .llseek = generic_file_llseek, }, + { .read = qsfp_2_read, .llseek = generic_file_llseek, }, }; static ssize_t flash_read(struct file *file, char __user *buf, -- cgit v0.10.2 From 2d978a953b874bac418e0b9475edd1b9125281f6 Mon Sep 17 00:00:00 2001 From: Ralph Campbell Date: Wed, 23 Jun 2010 15:58:31 +0000 Subject: IB/qib: Turn off IB latency mode Turn off IB latency mode. This improves link quality for slower process chips. Signed-off-by: Ralph Campbell Signed-off-by: Roland Dreier diff --git a/drivers/infiniband/hw/qib/qib_iba7322.c b/drivers/infiniband/hw/qib/qib_iba7322.c index 5eedf83..fc14ef8 100644 --- a/drivers/infiniband/hw/qib/qib_iba7322.c +++ b/drivers/infiniband/hw/qib/qib_iba7322.c @@ -7271,6 +7271,8 @@ static int serdes_7322_init(struct qib_pportdata *ppd) ibsd_wr_allchans(ppd, 20, (4 << 13), BMASK(15, 13)); /* SDR */ data = qib_read_kreg_port(ppd, krp_serdesctrl); + /* Turn off IB latency mode */ + data &= ~SYM_MASK(IBSerdesCtrl_0, IB_LAT_MODE); qib_write_kreg_port(ppd, krp_serdesctrl, data | SYM_MASK(IBSerdesCtrl_0, RXLOSEN)); -- cgit v0.10.2 From bdf8edcb5732df554ef53096092e94d22ab5ffc9 Mon Sep 17 00:00:00 2001 From: Dave Olson Date: Thu, 17 Jun 2010 23:13:49 +0000 Subject: IB/qib: Allow PSM to select from multiple port assignment algorithms We used to allow only full specification, or using all contexts within an HCA before moving to the next HCA. We now allow an additional method -- round-robining through HCAs -- and make that the default. Signed-off-by: Dave Olson Signed-off-by: Roland Dreier diff --git a/drivers/infiniband/hw/qib/qib_common.h b/drivers/infiniband/hw/qib/qib_common.h index b3955ed..145da40 100644 --- a/drivers/infiniband/hw/qib/qib_common.h +++ b/drivers/infiniband/hw/qib/qib_common.h @@ -279,7 +279,7 @@ struct qib_base_info { * may not be implemented; the user code must deal with this if it * cares, or it must abort after initialization reports the difference. */ -#define QIB_USER_SWMINOR 10 +#define QIB_USER_SWMINOR 11 #define QIB_USER_SWVERSION ((QIB_USER_SWMAJOR << 16) | QIB_USER_SWMINOR) @@ -302,6 +302,18 @@ struct qib_base_info { #define QIB_KERN_SWVERSION ((QIB_KERN_TYPE << 31) | QIB_USER_SWVERSION) /* + * If the unit is specified via open, HCA choice is fixed. If port is + * specified, it's also fixed. Otherwise we try to spread contexts + * across ports and HCAs, using different algorithims. WITHIN is + * the old default, prior to this mechanism. + */ +#define QIB_PORT_ALG_ACROSS 0 /* round robin contexts across HCAs, then + * ports; this is the default */ +#define QIB_PORT_ALG_WITHIN 1 /* use all contexts on an HCA (round robin + * active ports within), then next HCA */ +#define QIB_PORT_ALG_COUNT 2 /* number of algorithm choices */ + +/* * This structure is passed to qib_userinit() to tell the driver where * user code buffers are, sizes, etc. The offsets and sizes of the * fields must remain unchanged, for binary compatibility. It can @@ -319,7 +331,7 @@ struct qib_user_info { /* size of struct base_info to write to */ __u32 spu_base_info_size; - __u32 _spu_unused3; + __u32 spu_port_alg; /* which QIB_PORT_ALG_*; unused user minor < 11 */ /* * If two or more processes wish to share a context, each process diff --git a/drivers/infiniband/hw/qib/qib_file_ops.c b/drivers/infiniband/hw/qib/qib_file_ops.c index a142a9e..6b11645 100644 --- a/drivers/infiniband/hw/qib/qib_file_ops.c +++ b/drivers/infiniband/hw/qib/qib_file_ops.c @@ -1294,128 +1294,130 @@ bail: return ret; } -static inline int usable(struct qib_pportdata *ppd, int active_only) +static inline int usable(struct qib_pportdata *ppd) { struct qib_devdata *dd = ppd->dd; - u32 linkok = active_only ? QIBL_LINKACTIVE : - (QIBL_LINKINIT | QIBL_LINKARMED | QIBL_LINKACTIVE); return dd && (dd->flags & QIB_PRESENT) && dd->kregbase && ppd->lid && - (ppd->lflags & linkok); + (ppd->lflags & QIBL_LINKACTIVE); } -static int find_free_ctxt(int unit, struct file *fp, - const struct qib_user_info *uinfo) +/* + * Select a context on the given device, either using a requested port + * or the port based on the context number. + */ +static int choose_port_ctxt(struct file *fp, struct qib_devdata *dd, u32 port, + const struct qib_user_info *uinfo) { - struct qib_devdata *dd = qib_lookup(unit); struct qib_pportdata *ppd = NULL; - int ret; - u32 ctxt; + int ret, ctxt; - if (!dd || (uinfo->spu_port && uinfo->spu_port > dd->num_pports)) { - ret = -ENODEV; - goto bail; - } - - /* - * If users requests specific port, only try that one port, else - * select "best" port below, based on context. - */ - if (uinfo->spu_port) { - ppd = dd->pport + uinfo->spu_port - 1; - if (!usable(ppd, 0)) { + if (port) { + if (!usable(dd->pport + port - 1)) { ret = -ENETDOWN; - goto bail; - } + goto done; + } else + ppd = dd->pport + port - 1; } - - for (ctxt = dd->first_user_ctxt; ctxt < dd->cfgctxts; ctxt++) { - if (dd->rcd[ctxt]) - continue; - /* - * The setting and clearing of user context rcd[x] protected - * by the qib_mutex - */ - if (!ppd) { - /* choose port based on ctxt, if up, else 1st up */ - ppd = dd->pport + (ctxt % dd->num_pports); - if (!usable(ppd, 0)) { - int i; - for (i = 0; i < dd->num_pports; i++) { - ppd = dd->pport + i; - if (usable(ppd, 0)) - break; - } - if (i == dd->num_pports) { - ret = -ENETDOWN; - goto bail; - } - } + for (ctxt = dd->first_user_ctxt; ctxt < dd->cfgctxts && dd->rcd[ctxt]; + ctxt++) + ; + if (ctxt == dd->cfgctxts) { + ret = -EBUSY; + goto done; + } + if (!ppd) { + u32 pidx = ctxt % dd->num_pports; + if (usable(dd->pport + pidx)) + ppd = dd->pport + pidx; + else { + for (pidx = 0; pidx < dd->num_pports && !ppd; + pidx++) + if (usable(dd->pport + pidx)) + ppd = dd->pport + pidx; } - ret = setup_ctxt(ppd, ctxt, fp, uinfo); - goto bail; } - ret = -EBUSY; + ret = ppd ? setup_ctxt(ppd, ctxt, fp, uinfo) : -ENETDOWN; +done: + return ret; +} + +static int find_free_ctxt(int unit, struct file *fp, + const struct qib_user_info *uinfo) +{ + struct qib_devdata *dd = qib_lookup(unit); + int ret; + + if (!dd || (uinfo->spu_port && uinfo->spu_port > dd->num_pports)) + ret = -ENODEV; + else + ret = choose_port_ctxt(fp, dd, uinfo->spu_port, uinfo); -bail: return ret; } -static int get_a_ctxt(struct file *fp, const struct qib_user_info *uinfo) +static int get_a_ctxt(struct file *fp, const struct qib_user_info *uinfo, + unsigned alg) { - struct qib_pportdata *ppd; - int ret = 0, devmax; - int npresent, nup; - int ndev; + struct qib_devdata *udd = NULL; + int ret = 0, devmax, npresent, nup, ndev, dusable = 0, i; u32 port = uinfo->spu_port, ctxt; devmax = qib_count_units(&npresent, &nup); + if (!npresent) { + ret = -ENXIO; + goto done; + } + if (nup == 0) { + ret = -ENETDOWN; + goto done; + } - for (ndev = 0; ndev < devmax; ndev++) { - struct qib_devdata *dd = qib_lookup(ndev); - - /* device portion of usable() */ - if (!(dd && (dd->flags & QIB_PRESENT) && dd->kregbase)) - continue; - for (ctxt = dd->first_user_ctxt; ctxt < dd->cfgctxts; ctxt++) { - if (dd->rcd[ctxt]) + if (alg == QIB_PORT_ALG_ACROSS) { + unsigned inuse = ~0U; + /* find device (with ACTIVE ports) with fewest ctxts in use */ + for (ndev = 0; ndev < devmax; ndev++) { + struct qib_devdata *dd = qib_lookup(ndev); + unsigned cused = 0, cfree = 0; + if (!dd) continue; - if (port) { - if (port > dd->num_pports) - continue; - ppd = dd->pport + port - 1; - if (!usable(ppd, 0)) - continue; - } else { - /* - * choose port based on ctxt, if up, else - * first port that's up for multi-port HCA - */ - ppd = dd->pport + (ctxt % dd->num_pports); - if (!usable(ppd, 0)) { - int j; - - ppd = NULL; - for (j = 0; j < dd->num_pports && - !ppd; j++) - if (usable(dd->pport + j, 0)) - ppd = dd->pport + j; - if (!ppd) - continue; /* to next unit */ - } + if (port && port <= dd->num_pports && + usable(dd->pport + port - 1)) + dusable = 1; + else + for (i = 0; i < dd->num_pports; i++) + if (usable(dd->pport + i)) + dusable++; + if (!dusable) + continue; + for (ctxt = dd->first_user_ctxt; ctxt < dd->cfgctxts; + ctxt++) + if (dd->rcd[ctxt]) + cused++; + else + cfree++; + if (cfree && cused < inuse) { + udd = dd; + inuse = cused; } - ret = setup_ctxt(ppd, ctxt, fp, uinfo); + } + if (udd) { + ret = choose_port_ctxt(fp, udd, port, uinfo); goto done; } + } else { + for (ndev = 0; ndev < devmax; ndev++) { + struct qib_devdata *dd = qib_lookup(ndev); + if (dd) { + ret = choose_port_ctxt(fp, dd, port, uinfo); + if (!ret) + goto done; + if (ret == -EBUSY) + dusable++; + } + } } - - if (npresent) { - if (nup == 0) - ret = -ENETDOWN; - else - ret = -EBUSY; - } else - ret = -ENXIO; + ret = dusable ? -EBUSY : -ENETDOWN; done: return ret; @@ -1481,7 +1483,7 @@ static int qib_assign_ctxt(struct file *fp, const struct qib_user_info *uinfo) { int ret; int i_minor; - unsigned swmajor, swminor; + unsigned swmajor, swminor, alg = QIB_PORT_ALG_ACROSS; /* Check to be sure we haven't already initialized this file */ if (ctxt_fp(fp)) { @@ -1498,6 +1500,9 @@ static int qib_assign_ctxt(struct file *fp, const struct qib_user_info *uinfo) swminor = uinfo->spu_userversion & 0xffff; + if (swminor >= 11 && uinfo->spu_port_alg < QIB_PORT_ALG_COUNT) + alg = uinfo->spu_port_alg; + mutex_lock(&qib_mutex); if (qib_compatible_subctxts(swmajor, swminor) && @@ -1514,7 +1519,7 @@ static int qib_assign_ctxt(struct file *fp, const struct qib_user_info *uinfo) if (i_minor) ret = find_free_ctxt(i_minor - 1, fp, uinfo); else - ret = get_a_ctxt(fp, uinfo); + ret = get_a_ctxt(fp, uinfo, alg); done_chk_sdma: if (!ret) { @@ -1862,7 +1867,7 @@ static int disarm_req_delay(struct qib_ctxtdata *rcd) { int ret = 0; - if (!usable(rcd->ppd, 1)) { + if (!usable(rcd->ppd)) { int i; /* * if link is down, or otherwise not usable, delay @@ -1881,7 +1886,7 @@ static int disarm_req_delay(struct qib_ctxtdata *rcd) set_bit(_QIB_EVENT_DISARM_BUFS_BIT, &rcd->user_event_mask[i]); } - for (i = 0; !usable(rcd->ppd, 1) && i < 300; i++) + for (i = 0; !usable(rcd->ppd) && i < 300; i++) msleep(100); ret = -ENETDOWN; } -- cgit v0.10.2 From e675b6db1245649f91bb1bfb10baef9af6d3f8e6 Mon Sep 17 00:00:00 2001 From: Alexander Schmidt Date: Mon, 5 Jul 2010 16:19:25 +0000 Subject: IB/ehca: Catch failing ioremap() When ioremap() fails with a NULL pointer, catch the error and pass it to the caller of create_qp() or create_cq() instead of trying to dereference the NULL pointer later on. Signed-off-by: Alexander Schmidt Signed-off-by: Roland Dreier diff --git a/drivers/infiniband/hw/ehca/hcp_if.c b/drivers/infiniband/hw/ehca/hcp_if.c index 4d5dc33..e6f9cdd 100644 --- a/drivers/infiniband/hw/ehca/hcp_if.c +++ b/drivers/infiniband/hw/ehca/hcp_if.c @@ -269,6 +269,7 @@ u64 hipz_h_alloc_resource_cq(const struct ipz_adapter_handle adapter_handle, struct ehca_cq *cq, struct ehca_alloc_cq_parms *param) { + int rc; u64 ret; unsigned long outs[PLPAR_HCALL9_BUFSIZE]; @@ -283,8 +284,19 @@ u64 hipz_h_alloc_resource_cq(const struct ipz_adapter_handle adapter_handle, param->act_nr_of_entries = (u32)outs[3]; param->act_pages = (u32)outs[4]; - if (ret == H_SUCCESS) - hcp_galpas_ctor(&cq->galpas, 0, outs[5], outs[6]); + if (ret == H_SUCCESS) { + rc = hcp_galpas_ctor(&cq->galpas, 0, outs[5], outs[6]); + if (rc) { + ehca_gen_err("Could not establish HW access. rc=%d paddr=%#lx", + rc, outs[5]); + + ehca_plpar_hcall_norets(H_FREE_RESOURCE, + adapter_handle.handle, /* r4 */ + cq->ipz_cq_handle.handle, /* r5 */ + 0, 0, 0, 0, 0); + ret = H_NO_MEM; + } + } if (ret == H_NOT_ENOUGH_RESOURCES) ehca_gen_err("Not enough resources. ret=%lli", ret); @@ -295,6 +307,7 @@ u64 hipz_h_alloc_resource_cq(const struct ipz_adapter_handle adapter_handle, u64 hipz_h_alloc_resource_qp(const struct ipz_adapter_handle adapter_handle, struct ehca_alloc_qp_parms *parms, int is_user) { + int rc; u64 ret; u64 allocate_controls, max_r10_reg, r11, r12; unsigned long outs[PLPAR_HCALL9_BUFSIZE]; @@ -358,8 +371,19 @@ u64 hipz_h_alloc_resource_qp(const struct ipz_adapter_handle adapter_handle, parms->rqueue.queue_size = (u32)EHCA_BMASK_GET(H_ALL_RES_QP_RQUEUE_SIZE_PAGES, outs[4]); - if (ret == H_SUCCESS) - hcp_galpas_ctor(&parms->galpas, is_user, outs[6], outs[6]); + if (ret == H_SUCCESS) { + rc = hcp_galpas_ctor(&parms->galpas, is_user, outs[6], outs[6]); + if (rc) { + ehca_gen_err("Could not establish HW access. rc=%d paddr=%#lx", + rc, outs[6]); + + ehca_plpar_hcall_norets(H_FREE_RESOURCE, + adapter_handle.handle, /* r4 */ + parms->qp_handle.handle, /* r5 */ + 0, 0, 0, 0, 0); + ret = H_NO_MEM; + } + } if (ret == H_NOT_ENOUGH_RESOURCES) ehca_gen_err("Not enough resources. ret=%lli", ret); diff --git a/drivers/infiniband/hw/ehca/hcp_phyp.c b/drivers/infiniband/hw/ehca/hcp_phyp.c index b3e0e72..077376f 100644 --- a/drivers/infiniband/hw/ehca/hcp_phyp.c +++ b/drivers/infiniband/hw/ehca/hcp_phyp.c @@ -42,10 +42,9 @@ #include "ehca_classes.h" #include "hipz_hw.h" -int hcall_map_page(u64 physaddr, u64 *mapaddr) +u64 hcall_map_page(u64 physaddr) { - *mapaddr = (u64)(ioremap(physaddr, EHCA_PAGESIZE)); - return 0; + return (u64)ioremap(physaddr, EHCA_PAGESIZE); } int hcall_unmap_page(u64 mapaddr) @@ -58,9 +57,9 @@ int hcp_galpas_ctor(struct h_galpas *galpas, int is_user, u64 paddr_kernel, u64 paddr_user) { if (!is_user) { - int ret = hcall_map_page(paddr_kernel, &galpas->kernel.fw_handle); - if (ret) - return ret; + galpas->kernel.fw_handle = hcall_map_page(paddr_kernel); + if (!galpas->kernel.fw_handle) + return -ENOMEM; } else galpas->kernel.fw_handle = 0; diff --git a/drivers/infiniband/hw/ehca/hcp_phyp.h b/drivers/infiniband/hw/ehca/hcp_phyp.h index 204227d..d1b0299 100644 --- a/drivers/infiniband/hw/ehca/hcp_phyp.h +++ b/drivers/infiniband/hw/ehca/hcp_phyp.h @@ -83,7 +83,7 @@ int hcp_galpas_ctor(struct h_galpas *galpas, int is_user, int hcp_galpas_dtor(struct h_galpas *galpas); -int hcall_map_page(u64 physaddr, u64 * mapaddr); +u64 hcall_map_page(u64 physaddr); int hcall_unmap_page(u64 mapaddr); -- cgit v0.10.2 From 33085bb8da9c77c2d5572125385efa87627d8b59 Mon Sep 17 00:00:00 2001 From: Peter Huewe Date: Thu, 15 Jul 2010 19:01:21 +0000 Subject: RDMA/nes: Convert pci_table entries to PCI_VDEVICE This patch converts pci_table entries, where .subvendor=PCI_ANY_ID and .subdevice=PCI_ANY_ID, .class=0 and .class_mask=0, to use the PCI_VDEVICE macro, and thus improves readability. Signed-off-by: Peter Huewe Signed-off-by: Roland Dreier diff --git a/drivers/infiniband/hw/nes/nes.c b/drivers/infiniband/hw/nes/nes.c index de7b9d7..16b7832 100644 --- a/drivers/infiniband/hw/nes/nes.c +++ b/drivers/infiniband/hw/nes/nes.c @@ -110,8 +110,8 @@ static unsigned int sysfs_nonidx_addr; static unsigned int sysfs_idx_addr; static struct pci_device_id nes_pci_table[] = { - {PCI_VENDOR_ID_NETEFFECT, PCI_DEVICE_ID_NETEFFECT_NE020, PCI_ANY_ID, PCI_ANY_ID}, - {PCI_VENDOR_ID_NETEFFECT, PCI_DEVICE_ID_NETEFFECT_NE020_KR, PCI_ANY_ID, PCI_ANY_ID}, + { PCI_VDEVICE(NETEFFECT, PCI_DEVICE_ID_NETEFFECT_NE020), }, + { PCI_VDEVICE(NETEFFECT, PCI_DEVICE_ID_NETEFFECT_NE020_KR), }, {0} }; -- cgit v0.10.2 From 0eec495ee69e3fbbe2ef2b244a0a988a4fe2e887 Mon Sep 17 00:00:00 2001 From: Chien Tung Date: Wed, 9 Jun 2010 20:19:53 +0000 Subject: RDMA/nes: Store and print eeprom version Read and print eeprom version and save it off for later use. Also delete a tab. Signed-off-by: Chien Tung Signed-off-by: Roland Dreier diff --git a/drivers/infiniband/hw/nes/nes_hw.h b/drivers/infiniband/hw/nes/nes_hw.h index bbbfe9f..aa9183d 100644 --- a/drivers/infiniband/hw/nes/nes_hw.h +++ b/drivers/infiniband/hw/nes/nes_hw.h @@ -1100,11 +1100,12 @@ struct nes_adapter { u32 wqm_wat; u32 core_clock; u32 firmware_version; + u32 eeprom_version; u32 nic_rx_eth_route_err; u32 et_rx_coalesce_usecs; - u32 et_rx_max_coalesced_frames; + u32 et_rx_max_coalesced_frames; u32 et_rx_coalesce_usecs_irq; u32 et_rx_max_coalesced_frames_irq; u32 et_pkt_rate_low; diff --git a/drivers/infiniband/hw/nes/nes_utils.c b/drivers/infiniband/hw/nes/nes_utils.c index a9f5dd2..f9c417c 100644 --- a/drivers/infiniband/hw/nes/nes_utils.c +++ b/drivers/infiniband/hw/nes/nes_utils.c @@ -190,6 +190,11 @@ int nes_read_eeprom_values(struct nes_device *nesdev, struct nes_adapter *nesada nesadapter->firmware_version = (((u32)(u8)(eeprom_data>>8)) << 16) + (u32)((u8)eeprom_data); + eeprom_data = nes_read16_eeprom(nesdev->regs, next_section_address + 10); + printk(PFX "EEPROM version %u.%u\n", (u8)(eeprom_data>>8), (u8)eeprom_data); + nesadapter->eeprom_version = (((u32)(u8)(eeprom_data>>8)) << 16) + + (u32)((u8)eeprom_data); + no_fw_rev: /* eeprom is valid */ eeprom_offset = nesadapter->software_eeprom_offset; -- cgit v0.10.2 From cd6860eb036ab4320d591fdd056f86172438fae4 Mon Sep 17 00:00:00 2001 From: Faisal Latif Date: Sun, 4 Jul 2010 00:17:59 +0000 Subject: RDMA/nes: Fix hangs on ifdown When ib_unregister_device() is called from netdev stop during ifdown, it sometimes hangs. Changes made to indicate port_err to ib_dispatch_event() during netdev stop and port_active during netdev open. The ib_unregister_device() is only called during remove of the module. Signed-off-by: Faisal Latif Signed-off-by: Roland Dreier diff --git a/drivers/infiniband/hw/nes/nes.h b/drivers/infiniband/hw/nes/nes.h index cc78fee..b3d145e 100644 --- a/drivers/infiniband/hw/nes/nes.h +++ b/drivers/infiniband/hw/nes/nes.h @@ -262,6 +262,7 @@ struct nes_device { u16 base_doorbell_index; u16 currcq_count; u16 deepcq_count; + u8 iw_status; u8 msi_enabled; u8 netdev_count; u8 napi_isr_ran; @@ -527,6 +528,7 @@ void nes_cm_disconn_worker(void *); int nes_hw_modify_qp(struct nes_device *, struct nes_qp *, u32, u32, u32); int nes_modify_qp(struct ib_qp *, struct ib_qp_attr *, int, struct ib_udata *); struct nes_ib_device *nes_init_ofa_device(struct net_device *); +void nes_port_ibevent(struct nes_vnic *nesvnic); void nes_destroy_ofa_device(struct nes_ib_device *); int nes_register_ofa_device(struct nes_ib_device *); diff --git a/drivers/infiniband/hw/nes/nes_hw.c b/drivers/infiniband/hw/nes/nes_hw.c index f41d890..199107a 100644 --- a/drivers/infiniband/hw/nes/nes_hw.c +++ b/drivers/infiniband/hw/nes/nes_hw.c @@ -3283,9 +3283,15 @@ static void nes_terminate_connection(struct nes_device *nesdev, struct nes_qp *n else mod_qp_flags |= NES_CQP_QP_TERM_DONT_SEND_TERM_MSG; - nes_terminate_start_timer(nesqp); - nesqp->term_flags |= NES_TERM_SENT; - nes_hw_modify_qp(nesdev, nesqp, mod_qp_flags, termlen, 0); + if (!nesdev->iw_status) { + nesqp->term_flags = NES_TERM_DONE; + nes_hw_modify_qp(nesdev, nesqp, NES_CQP_QP_IWARP_STATE_ERROR, 0, 0); + nes_cm_disconn(nesqp); + } else { + nes_terminate_start_timer(nesqp); + nesqp->term_flags |= NES_TERM_SENT; + nes_hw_modify_qp(nesdev, nesqp, mod_qp_flags, termlen, 0); + } } static void nes_terminate_send_fin(struct nes_device *nesdev, diff --git a/drivers/infiniband/hw/nes/nes_nic.c b/drivers/infiniband/hw/nes/nes_nic.c index 5cc0a9a..c0c404e 100644 --- a/drivers/infiniband/hw/nes/nes_nic.c +++ b/drivers/infiniband/hw/nes/nes_nic.c @@ -232,6 +232,13 @@ static int nes_netdev_open(struct net_device *netdev) NES_MAC_INT_TX_UNDERFLOW | NES_MAC_INT_TX_ERROR)); first_nesvnic = nesvnic; } + + if (nesvnic->of_device_registered) { + nesdev->iw_status = 1; + nesdev->nesadapter->send_term_ok = 1; + nes_port_ibevent(nesvnic); + } + if (first_nesvnic->linkup) { /* Enable network packets */ nesvnic->linkup = 1; @@ -309,9 +316,9 @@ static int nes_netdev_stop(struct net_device *netdev) if (nesvnic->of_device_registered) { - nes_destroy_ofa_device(nesvnic->nesibdev); - nesvnic->nesibdev = NULL; - nesvnic->of_device_registered = 0; + nesdev->nesadapter->send_term_ok = 0; + nesdev->iw_status = 0; + nes_port_ibevent(nesvnic); } nes_destroy_nic_qp(nesvnic); diff --git a/drivers/infiniband/hw/nes/nes_verbs.c b/drivers/infiniband/hw/nes/nes_verbs.c index 9bc2d74..3b95d04 100644 --- a/drivers/infiniband/hw/nes/nes_verbs.c +++ b/drivers/infiniband/hw/nes/nes_verbs.c @@ -3058,6 +3058,7 @@ int nes_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, nesqp->hte_added = 0; } if ((nesqp->hw_tcp_state > NES_AEQE_TCP_STATE_CLOSED) && + (nesdev->iw_status) && (nesqp->hw_tcp_state != NES_AEQE_TCP_STATE_TIME_WAIT)) { next_iwarp_state |= NES_CQP_QP_RESET; } else { @@ -3936,6 +3937,17 @@ struct nes_ib_device *nes_init_ofa_device(struct net_device *netdev) return nesibdev; } +void nes_port_ibevent(struct nes_vnic *nesvnic) +{ + struct nes_ib_device *nesibdev = nesvnic->nesibdev; + struct nes_device *nesdev = nesvnic->nesdev; + struct ib_event event; + event.device = &nesibdev->ibdev; + event.element.port_num = nesvnic->logical_port + 1; + event.event = nesdev->iw_status ? IB_EVENT_PORT_ACTIVE : IB_EVENT_PORT_ERR; + ib_dispatch_event(&event); +} + /** * nes_destroy_ofa_device -- cgit v0.10.2 From 50a025c69ee749d822c301f9bf63dee13c113680 Mon Sep 17 00:00:00 2001 From: Sean Hefty Date: Wed, 21 Jul 2010 23:36:52 +0000 Subject: IB/cm: Check LAP state before sending an MRA NULL pointer dereferences in ib_cm_init_qp_attr() were seen by some users. From a crash dump, I determined that we died in cm_init_qp_rts_attr() (it's inlined, so it doesn't show up in the traceback) on the line labeled below: static int cm_init_qp_rts_attr(struct cm_id_private *cm_id_priv, struct ib_qp_attr *qp_attr, int *qp_attr_mask) { ........ if (cm_id_priv->id.lap_state == IB_CM_LAP_UNINIT) { ..... } else { *qp_attr_mask = IB_QP_ALT_PATH | IB_QP_PATH_MIG_STATE; qp_attr->alt_port_num = cm_id_priv->alt_av.port->port_num; <-die The problem is that the rdma_cm can call ib_send_cm_mra() after a connection has been established. The ib_cm incorrectly assumes that the MRA is in response to a LAP (load alternate path) message, even though no LAP message has been received. The ib_cm needs to check the lap_state before sending an MRA if the cm_id state is established. Reported-by: Arthur Kepner Reported-by: Josh England Signed-off-by: Sean Hefty Signed-off-by: Roland Dreier diff --git a/drivers/infiniband/core/cm.c b/drivers/infiniband/core/cm.c index ad63b79..64e0903 100644 --- a/drivers/infiniband/core/cm.c +++ b/drivers/infiniband/core/cm.c @@ -2409,10 +2409,12 @@ int ib_send_cm_mra(struct ib_cm_id *cm_id, msg_response = CM_MSG_RESPONSE_REP; break; case IB_CM_ESTABLISHED: - cm_state = cm_id->state; - lap_state = IB_CM_MRA_LAP_SENT; - msg_response = CM_MSG_RESPONSE_OTHER; - break; + if (cm_id->lap_state == IB_CM_LAP_RCVD) { + cm_state = cm_id->state; + lap_state = IB_CM_MRA_LAP_SENT; + msg_response = CM_MSG_RESPONSE_OTHER; + break; + } default: ret = -EINVAL; goto error1; -- cgit v0.10.2 From 73d6fcad2ac84b6fad326d87dc1dd0b29aabbd34 Mon Sep 17 00:00:00 2001 From: Steve Wise Date: Fri, 23 Jul 2010 19:12:27 +0000 Subject: RDMA/cxgb4: Fix race in fini path There exists a race condition where the app disconnects, which initiates an orderly close (via rdma_fini()), concurrently with an ingress abort condition, which initiates an abortive close operation. Since rdma_fini() must be called without IRQs disabled, the fini can be called after the QP has been transitioned to ERROR. This is ok, but we need to protect against qp->ep getting NULLed. Signed-off-by: Steve Wise Signed-off-by: Roland Dreier diff --git a/drivers/infiniband/hw/cxgb4/qp.c b/drivers/infiniband/hw/cxgb4/qp.c index 657a5b3..c9aaf24 100644 --- a/drivers/infiniband/hw/cxgb4/qp.c +++ b/drivers/infiniband/hw/cxgb4/qp.c @@ -961,7 +961,8 @@ static void flush_qp(struct c4iw_qp *qhp, unsigned long *flag) __flush_qp(qhp, rchp, schp, flag); } -static int rdma_fini(struct c4iw_dev *rhp, struct c4iw_qp *qhp) +static int rdma_fini(struct c4iw_dev *rhp, struct c4iw_qp *qhp, + struct c4iw_ep *ep) { struct fw_ri_wr *wqe; int ret; @@ -969,12 +970,12 @@ static int rdma_fini(struct c4iw_dev *rhp, struct c4iw_qp *qhp) struct sk_buff *skb; PDBG("%s qhp %p qid 0x%x tid %u\n", __func__, qhp, qhp->wq.sq.qid, - qhp->ep->hwtid); + ep->hwtid); skb = alloc_skb(sizeof *wqe, GFP_KERNEL); if (!skb) return -ENOMEM; - set_wr_txq(skb, CPL_PRIORITY_DATA, qhp->ep->txq_idx); + set_wr_txq(skb, CPL_PRIORITY_DATA, ep->txq_idx); wqe = (struct fw_ri_wr *)__skb_put(skb, sizeof(*wqe)); memset(wqe, 0, sizeof *wqe); @@ -982,7 +983,7 @@ static int rdma_fini(struct c4iw_dev *rhp, struct c4iw_qp *qhp) FW_WR_OP(FW_RI_INIT_WR) | FW_WR_COMPL(1)); wqe->flowid_len16 = cpu_to_be32( - FW_WR_FLOWID(qhp->ep->hwtid) | + FW_WR_FLOWID(ep->hwtid) | FW_WR_LEN16(DIV_ROUND_UP(sizeof *wqe, 16))); wqe->cookie = (u64)&wr_wait; @@ -1212,17 +1213,16 @@ int c4iw_modify_qp(struct c4iw_dev *rhp, struct c4iw_qp *qhp, case C4IW_QP_STATE_CLOSING: BUG_ON(atomic_read(&qhp->ep->com.kref.refcount) < 2); qhp->attr.state = C4IW_QP_STATE_CLOSING; + ep = qhp->ep; if (!internal) { abort = 0; disconnect = 1; - ep = qhp->ep; c4iw_get_ep(&ep->com); } spin_unlock_irqrestore(&qhp->lock, flag); - ret = rdma_fini(rhp, qhp); + ret = rdma_fini(rhp, qhp, ep); spin_lock_irqsave(&qhp->lock, flag); if (ret) { - ep = qhp->ep; c4iw_get_ep(&ep->com); disconnect = abort = 1; goto err; -- cgit v0.10.2 From d4f1a5c6efabccd4b787a8b5907a5df9204ad2f6 Mon Sep 17 00:00:00 2001 From: Steve Wise Date: Fri, 23 Jul 2010 19:12:32 +0000 Subject: RDMA/cxgb4: Use correct control txq There is only one control txq per tx channel. So use the port number as the queue index when sending. Signed-off-by: Steve Wise Signed-off-by: Roland Dreier diff --git a/drivers/infiniband/hw/cxgb4/cm.c b/drivers/infiniband/hw/cxgb4/cm.c index e7b08dc..ffdc308 100644 --- a/drivers/infiniband/hw/cxgb4/cm.c +++ b/drivers/infiniband/hw/cxgb4/cm.c @@ -473,7 +473,7 @@ static int send_connect(struct c4iw_ep *ep) __func__); return -ENOMEM; } - set_wr_txq(skb, CPL_PRIORITY_SETUP, ep->txq_idx); + set_wr_txq(skb, CPL_PRIORITY_SETUP, ep->ctrlq_idx); cxgb4_best_mtu(ep->com.dev->rdev.lldi.mtus, ep->mtu, &mtu_idx); wscale = compute_wscale(rcv_win); @@ -853,7 +853,7 @@ static int update_rx_credits(struct c4iw_ep *ep, u32 credits) req->credit_dack = cpu_to_be32(credits | RX_FORCE_ACK(1) | F_RX_DACK_CHANGE | V_RX_DACK_MODE(dack_mode)); - set_wr_txq(skb, CPL_PRIORITY_ACK, ep->txq_idx); + set_wr_txq(skb, CPL_PRIORITY_ACK, ep->ctrlq_idx); c4iw_ofld_send(&ep->com.dev->rdev, skb); return credits; } @@ -1295,7 +1295,7 @@ static void accept_cr(struct c4iw_ep *ep, __be32 peer_ip, struct sk_buff *skb, ep->hwtid)); rpl->opt0 = cpu_to_be64(opt0); rpl->opt2 = cpu_to_be32(opt2); - set_wr_txq(skb, CPL_PRIORITY_SETUP, ep->txq_idx); + set_wr_txq(skb, CPL_PRIORITY_SETUP, ep->ctrlq_idx); c4iw_l2t_send(&ep->com.dev->rdev, skb, ep->l2t); return; @@ -1352,7 +1352,7 @@ static int pass_accept_req(struct c4iw_dev *dev, struct sk_buff *skb) u16 rss_qid; u32 mtu; int step; - int txq_idx; + int txq_idx, ctrlq_idx; parent_ep = lookup_stid(t, stid); PDBG("%s parent ep %p tid %u\n", __func__, parent_ep, hwtid); @@ -1384,6 +1384,7 @@ static int pass_accept_req(struct c4iw_dev *dev, struct sk_buff *skb) smac_idx = (cxgb4_port_viid(pdev) & 0x7F) << 1; step = dev->rdev.lldi.ntxq / dev->rdev.lldi.nchan; txq_idx = cxgb4_port_idx(pdev) * step; + ctrlq_idx = cxgb4_port_idx(pdev); step = dev->rdev.lldi.nrxq / dev->rdev.lldi.nchan; rss_qid = dev->rdev.lldi.rxq_ids[cxgb4_port_idx(pdev) * step]; dev_put(pdev); @@ -1395,6 +1396,7 @@ static int pass_accept_req(struct c4iw_dev *dev, struct sk_buff *skb) smac_idx = (cxgb4_port_viid(dst->neighbour->dev) & 0x7F) << 1; step = dev->rdev.lldi.ntxq / dev->rdev.lldi.nchan; txq_idx = cxgb4_port_idx(dst->neighbour->dev) * step; + ctrlq_idx = cxgb4_port_idx(dst->neighbour->dev); step = dev->rdev.lldi.nrxq / dev->rdev.lldi.nchan; rss_qid = dev->rdev.lldi.rxq_ids[ cxgb4_port_idx(dst->neighbour->dev) * step]; @@ -1434,6 +1436,7 @@ static int pass_accept_req(struct c4iw_dev *dev, struct sk_buff *skb) child_ep->rss_qid = rss_qid; child_ep->mtu = mtu; child_ep->txq_idx = txq_idx; + child_ep->ctrlq_idx = ctrlq_idx; PDBG("%s tx_chan %u smac_idx %u rss_qid %u\n", __func__, tx_chan, smac_idx, rss_qid); @@ -1965,6 +1968,7 @@ int c4iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param) ep->txq_idx = cxgb4_port_idx(pdev) * step; step = ep->com.dev->rdev.lldi.nrxq / ep->com.dev->rdev.lldi.nchan; + ep->ctrlq_idx = cxgb4_port_idx(pdev); ep->rss_qid = ep->com.dev->rdev.lldi.rxq_ids[ cxgb4_port_idx(pdev) * step]; dev_put(pdev); @@ -1979,6 +1983,7 @@ int c4iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param) step = ep->com.dev->rdev.lldi.ntxq / ep->com.dev->rdev.lldi.nchan; ep->txq_idx = cxgb4_port_idx(ep->dst->neighbour->dev) * step; + ep->ctrlq_idx = cxgb4_port_idx(ep->dst->neighbour->dev); step = ep->com.dev->rdev.lldi.nrxq / ep->com.dev->rdev.lldi.nchan; ep->rss_qid = ep->com.dev->rdev.lldi.rxq_ids[ diff --git a/drivers/infiniband/hw/cxgb4/iw_cxgb4.h b/drivers/infiniband/hw/cxgb4/iw_cxgb4.h index d33e1a6..ed459b8 100644 --- a/drivers/infiniband/hw/cxgb4/iw_cxgb4.h +++ b/drivers/infiniband/hw/cxgb4/iw_cxgb4.h @@ -619,6 +619,7 @@ struct c4iw_ep { u16 plen; u16 rss_qid; u16 txq_idx; + u16 ctrlq_idx; u8 tos; }; -- cgit v0.10.2 From ca5a22028d0845dd6bcce0dce12a7beda315baf0 Mon Sep 17 00:00:00 2001 From: Steve Wise Date: Fri, 23 Jul 2010 19:12:37 +0000 Subject: RDMA/cxgb4: Set/reset the EP timer inside EP lock Endpoint timer manipulation needs to be done inside the lock. Otherwise we can get into a situation where a timer is stopped before it is started, which hits the WARN_ON() in stop_ep_timer(). Signed-off-by: Steve Wise Signed-off-by: Roland Dreier diff --git a/drivers/infiniband/hw/cxgb4/cm.c b/drivers/infiniband/hw/cxgb4/cm.c index ffdc308..3eff5df 100644 --- a/drivers/infiniband/hw/cxgb4/cm.c +++ b/drivers/infiniband/hw/cxgb4/cm.c @@ -1484,8 +1484,6 @@ static int peer_close(struct c4iw_dev *dev, struct sk_buff *skb) int closing = 0; struct tid_info *t = dev->rdev.lldi.tids; unsigned int tid = GET_TID(hdr); - int start_timer = 0; - int stop_timer = 0; ep = lookup_tid(t, tid); PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid); @@ -1522,7 +1520,7 @@ static int peer_close(struct c4iw_dev *dev, struct sk_buff *skb) wake_up(&ep->com.waitq); break; case FPDU_MODE: - start_timer = 1; + start_ep_timer(ep); __state_set(&ep->com, CLOSING); closing = 1; peer_close_upcall(ep); @@ -1535,7 +1533,7 @@ static int peer_close(struct c4iw_dev *dev, struct sk_buff *skb) disconnect = 0; break; case MORIBUND: - stop_timer = 1; + stop_ep_timer(ep); if (ep->com.cm_id && ep->com.qp) { attrs.next_state = C4IW_QP_STATE_IDLE; c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp, @@ -1558,10 +1556,6 @@ static int peer_close(struct c4iw_dev *dev, struct sk_buff *skb) c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp, C4IW_QP_ATTR_NEXT_STATE, &attrs, 1); } - if (start_timer) - start_ep_timer(ep); - if (stop_timer) - stop_ep_timer(ep); if (disconnect) c4iw_ep_disconnect(ep, 0, GFP_KERNEL); if (release) @@ -1590,7 +1584,6 @@ static int peer_abort(struct c4iw_dev *dev, struct sk_buff *skb) unsigned long flags; struct tid_info *t = dev->rdev.lldi.tids; unsigned int tid = GET_TID(req); - int stop_timer = 0; ep = lookup_tid(t, tid); if (is_neg_adv_abort(req->status)) { @@ -1605,10 +1598,10 @@ static int peer_abort(struct c4iw_dev *dev, struct sk_buff *skb) case CONNECTING: break; case MPA_REQ_WAIT: - stop_timer = 1; + stop_ep_timer(ep); break; case MPA_REQ_SENT: - stop_timer = 1; + stop_ep_timer(ep); connect_reply_upcall(ep, -ECONNRESET); break; case MPA_REP_SENT: @@ -1632,7 +1625,7 @@ static int peer_abort(struct c4iw_dev *dev, struct sk_buff *skb) break; case MORIBUND: case CLOSING: - stop_timer = 1; + stop_ep_timer(ep); /*FALLTHROUGH*/ case FPDU_MODE: if (ep->com.cm_id && ep->com.qp) { @@ -1678,8 +1671,6 @@ static int peer_abort(struct c4iw_dev *dev, struct sk_buff *skb) rpl->cmd = CPL_ABORT_NO_RST; c4iw_ofld_send(&ep->com.dev->rdev, rpl_skb); out: - if (stop_timer) - stop_ep_timer(ep); if (release) release_ep_resources(ep); return 0; @@ -1694,7 +1685,6 @@ static int close_con_rpl(struct c4iw_dev *dev, struct sk_buff *skb) int release = 0; struct tid_info *t = dev->rdev.lldi.tids; unsigned int tid = GET_TID(rpl); - int stop_timer = 0; ep = lookup_tid(t, tid); @@ -1708,7 +1698,7 @@ static int close_con_rpl(struct c4iw_dev *dev, struct sk_buff *skb) __state_set(&ep->com, MORIBUND); break; case MORIBUND: - stop_timer = 1; + stop_ep_timer(ep); if ((ep->com.cm_id) && (ep->com.qp)) { attrs.next_state = C4IW_QP_STATE_IDLE; c4iw_modify_qp(ep->com.qp->rhp, @@ -1728,8 +1718,6 @@ static int close_con_rpl(struct c4iw_dev *dev, struct sk_buff *skb) break; } spin_unlock_irqrestore(&ep->com.lock, flags); - if (stop_timer) - stop_ep_timer(ep); if (release) release_ep_resources(ep); return 0; @@ -2108,8 +2096,6 @@ int c4iw_ep_disconnect(struct c4iw_ep *ep, int abrupt, gfp_t gfp) int close = 0; int fatal = 0; struct c4iw_rdev *rdev; - int start_timer = 0; - int stop_timer = 0; spin_lock_irqsave(&ep->com.lock, flags); @@ -2133,7 +2119,7 @@ int c4iw_ep_disconnect(struct c4iw_ep *ep, int abrupt, gfp_t gfp) ep->com.state = ABORTING; else { ep->com.state = CLOSING; - start_timer = 1; + start_ep_timer(ep); } set_bit(CLOSE_SENT, &ep->com.flags); break; @@ -2141,7 +2127,7 @@ int c4iw_ep_disconnect(struct c4iw_ep *ep, int abrupt, gfp_t gfp) if (!test_and_set_bit(CLOSE_SENT, &ep->com.flags)) { close = 1; if (abrupt) { - stop_timer = 1; + stop_ep_timer(ep); ep->com.state = ABORTING; } else ep->com.state = MORIBUND; @@ -2159,10 +2145,6 @@ int c4iw_ep_disconnect(struct c4iw_ep *ep, int abrupt, gfp_t gfp) } spin_unlock_irqrestore(&ep->com.lock, flags); - if (start_timer) - start_ep_timer(ep); - if (stop_timer) - stop_ep_timer(ep); if (close) { if (abrupt) ret = abort_connection(ep, NULL, gfp); -- cgit v0.10.2 From 0502f94c62be79d1f4ae6f53ceaefde67ef3cea2 Mon Sep 17 00:00:00 2001 From: Ralph Campbell Date: Wed, 21 Jul 2010 22:46:11 +0000 Subject: IB/qib: Set cfgctxts to number of CPUs by default Up to now, we have set the number of available user contexts based on the number of hardware contexts which is set according to the number of available CPUs. This was fine since most CPUs had a power of two number of cores and the chip supported 4, 8, or 16 user contexts. Now that some systems have 12 cores, the default isn't optimal and should be set to 12 even though 16 hardware contexts need to be enabled. Signed-off-by: Ralph Campbell Signed-off-by: Roland Dreier diff --git a/drivers/infiniband/hw/qib/qib_iba7322.c b/drivers/infiniband/hw/qib/qib_iba7322.c index fc14ef8..584d443 100644 --- a/drivers/infiniband/hw/qib/qib_iba7322.c +++ b/drivers/infiniband/hw/qib/qib_iba7322.c @@ -5864,7 +5864,7 @@ static void write_7322_initregs(struct qib_devdata *dd) * Doesn't clear any of the error bits that might be set. */ val = TIDFLOW_ERRBITS; /* these are W1C */ - for (i = 0; i < dd->ctxtcnt; i++) { + for (i = 0; i < dd->cfgctxts; i++) { int flow; for (flow = 0; flow < NUM_TIDFLOWS_CTXT; flow++) qib_write_ureg(dd, ur_rcvflowtable+flow, val, i); diff --git a/drivers/infiniband/hw/qib/qib_init.c b/drivers/infiniband/hw/qib/qib_init.c index a873dd5..f1d16d3 100644 --- a/drivers/infiniband/hw/qib/qib_init.c +++ b/drivers/infiniband/hw/qib/qib_init.c @@ -93,7 +93,7 @@ unsigned long *qib_cpulist; void qib_set_ctxtcnt(struct qib_devdata *dd) { if (!qib_cfgctxts) - dd->cfgctxts = dd->ctxtcnt; + dd->cfgctxts = dd->first_user_ctxt + num_online_cpus(); else if (qib_cfgctxts < dd->num_pports) dd->cfgctxts = dd->ctxtcnt; else if (qib_cfgctxts <= dd->ctxtcnt) -- cgit v0.10.2 From 4c6931f5d4f423238ae6e93423081c6ff9753d26 Mon Sep 17 00:00:00 2001 From: Ira Weiny Date: Wed, 14 Jul 2010 01:53:18 +0000 Subject: IB/qib: Allow writes to the diag_counters to be able to clear them Signed-off-by: Ira Weiny Acked-by: Ralph Campbell Signed-off-by: Roland Dreier diff --git a/drivers/infiniband/hw/qib/qib_sysfs.c b/drivers/infiniband/hw/qib/qib_sysfs.c index dab4d9f..d50a33f 100644 --- a/drivers/infiniband/hw/qib/qib_sysfs.c +++ b/drivers/infiniband/hw/qib/qib_sysfs.c @@ -347,7 +347,7 @@ static struct kobj_type qib_sl2vl_ktype = { #define QIB_DIAGC_ATTR(N) \ static struct qib_diagc_attr qib_diagc_attr_##N = { \ - .attr = { .name = __stringify(N), .mode = 0444 }, \ + .attr = { .name = __stringify(N), .mode = 0664 }, \ .counter = offsetof(struct qib_ibport, n_##N) \ } @@ -403,8 +403,27 @@ static ssize_t diagc_attr_show(struct kobject *kobj, struct attribute *attr, return sprintf(buf, "%u\n", *(u32 *)((char *)qibp + dattr->counter)); } +static ssize_t diagc_attr_store(struct kobject *kobj, struct attribute *attr, + const char *buf, size_t size) +{ + struct qib_diagc_attr *dattr = + container_of(attr, struct qib_diagc_attr, attr); + struct qib_pportdata *ppd = + container_of(kobj, struct qib_pportdata, diagc_kobj); + struct qib_ibport *qibp = &ppd->ibport_data; + char *endp; + long val = simple_strtol(buf, &endp, 0); + + if (val < 0 || endp == buf) + return -EINVAL; + + *(u32 *)((char *) qibp + dattr->counter) = val; + return size; +} + static const struct sysfs_ops qib_diagc_ops = { .show = diagc_attr_show, + .store = diagc_attr_store, }; static struct kobj_type qib_diagc_ktype = { -- cgit v0.10.2 From 3e3aed0b88f680fed5c604caf7b10d77b2ec45c4 Mon Sep 17 00:00:00 2001 From: Ralph Campbell Date: Mon, 2 Aug 2010 22:39:24 +0000 Subject: IB/qib: Limit the number of packets processed per interrupt Don't processes too many packets without allowing other IRQ functions a chance to run. Otherwise, there is a chance of getting a "soft lockup" messages and poor application response times. Signed-off-by: Ralph Campbell Signed-off-by: Roland Dreier diff --git a/drivers/infiniband/hw/qib/qib_driver.c b/drivers/infiniband/hw/qib/qib_driver.c index f15ce07..9cd1936 100644 --- a/drivers/infiniband/hw/qib/qib_driver.c +++ b/drivers/infiniband/hw/qib/qib_driver.c @@ -335,7 +335,7 @@ u32 qib_kreceive(struct qib_ctxtdata *rcd, u32 *llic, u32 *npkts) smp_rmb(); /* prevent speculative reads of dma'ed hdrq */ } - for (last = 0, i = 1; !last; i += !last) { + for (last = 0, i = 1; !last && i <= 64; i += !last) { hdr = dd->f_get_msgheader(dd, rhf_addr); eflags = qib_hdrget_err_flags(rhf_addr); etype = qib_hdrget_rcv_type(rhf_addr); -- cgit v0.10.2 From a5210c12b7c4e34e904f4820a4abd048a2d75db5 Mon Sep 17 00:00:00 2001 From: Ralph Campbell Date: Mon, 2 Aug 2010 22:39:30 +0000 Subject: IB/qib: Fix race between qib_error_qp() and receive packet processing When transitioning a QP to the error state, in progress RWQEs need to be marked complete. This also involves releasing the reference count to the memory regions referenced in the SGEs. The locking in the receive packet processing wasn't sufficient to prevent qib_error_qp() from modifying the r_sge state at the same time, thus leading to kernel panics. Signed-off-by: Ralph Campbell Signed-off-by: Roland Dreier diff --git a/drivers/infiniband/hw/qib/qib_qp.c b/drivers/infiniband/hw/qib/qib_qp.c index e0f65e3..6c39851 100644 --- a/drivers/infiniband/hw/qib/qib_qp.c +++ b/drivers/infiniband/hw/qib/qib_qp.c @@ -450,7 +450,7 @@ static void clear_mr_refs(struct qib_qp *qp, int clr_sends) * * Flushes both send and receive work queues. * Returns true if last WQE event should be generated. - * The QP s_lock should be held and interrupts disabled. + * The QP r_lock and s_lock should be held and interrupts disabled. * If we are already in error state, just return. */ int qib_error_qp(struct qib_qp *qp, enum ib_wc_status err) diff --git a/drivers/infiniband/hw/qib/qib_rc.c b/drivers/infiniband/hw/qib/qib_rc.c index 40c0a37..a093111 100644 --- a/drivers/infiniband/hw/qib/qib_rc.c +++ b/drivers/infiniband/hw/qib/qib_rc.c @@ -868,7 +868,7 @@ done: /* * Back up requester to resend the last un-ACKed request. - * The QP s_lock should be held and interrupts disabled. + * The QP r_lock and s_lock should be held and interrupts disabled. */ static void qib_restart_rc(struct qib_qp *qp, u32 psn, int wait) { @@ -911,7 +911,8 @@ static void rc_timeout(unsigned long arg) struct qib_ibport *ibp; unsigned long flags; - spin_lock_irqsave(&qp->s_lock, flags); + spin_lock_irqsave(&qp->r_lock, flags); + spin_lock(&qp->s_lock); if (qp->s_flags & QIB_S_TIMER) { ibp = to_iport(qp->ibqp.device, qp->port_num); ibp->n_rc_timeouts++; @@ -920,7 +921,8 @@ static void rc_timeout(unsigned long arg) qib_restart_rc(qp, qp->s_last_psn + 1, 1); qib_schedule_send(qp); } - spin_unlock_irqrestore(&qp->s_lock, flags); + spin_unlock(&qp->s_lock); + spin_unlock_irqrestore(&qp->r_lock, flags); } /* @@ -1414,10 +1416,6 @@ static void qib_rc_rcv_resp(struct qib_ibport *ibp, spin_lock_irqsave(&qp->s_lock, flags); - /* Double check we can process this now that we hold the s_lock. */ - if (!(ib_qib_state_ops[qp->state] & QIB_PROCESS_RECV_OK)) - goto ack_done; - /* Ignore invalid responses. */ if (qib_cmp24(psn, qp->s_next_psn) >= 0) goto ack_done; @@ -1661,9 +1659,6 @@ static int qib_rc_rcv_error(struct qib_other_headers *ohdr, ibp->n_rc_dupreq++; spin_lock_irqsave(&qp->s_lock, flags); - /* Double check we can process this now that we hold the s_lock. */ - if (!(ib_qib_state_ops[qp->state] & QIB_PROCESS_RECV_OK)) - goto unlock_done; for (i = qp->r_head_ack_queue; ; i = prev) { if (i == qp->s_tail_ack_queue) @@ -1878,9 +1873,6 @@ void qib_rc_rcv(struct qib_ctxtdata *rcd, struct qib_ib_header *hdr, psn = be32_to_cpu(ohdr->bth[2]); opcode >>= 24; - /* Prevent simultaneous processing after APM on different CPUs */ - spin_lock(&qp->r_lock); - /* * Process responses (ACKs) before anything else. Note that the * packet sequence number will be for something in the send work @@ -1891,14 +1883,14 @@ void qib_rc_rcv(struct qib_ctxtdata *rcd, struct qib_ib_header *hdr, opcode <= OP(ATOMIC_ACKNOWLEDGE)) { qib_rc_rcv_resp(ibp, ohdr, data, tlen, qp, opcode, psn, hdrsize, pmtu, rcd); - goto runlock; + return; } /* Compute 24 bits worth of difference. */ diff = qib_cmp24(psn, qp->r_psn); if (unlikely(diff)) { if (qib_rc_rcv_error(ohdr, data, qp, opcode, psn, diff, rcd)) - goto runlock; + return; goto send_ack; } @@ -2090,9 +2082,6 @@ send_last: if (next > QIB_MAX_RDMA_ATOMIC) next = 0; spin_lock_irqsave(&qp->s_lock, flags); - /* Double check we can process this while holding the s_lock. */ - if (!(ib_qib_state_ops[qp->state] & QIB_PROCESS_RECV_OK)) - goto srunlock; if (unlikely(next == qp->s_tail_ack_queue)) { if (!qp->s_ack_queue[next].sent) goto nack_inv_unlck; @@ -2146,7 +2135,7 @@ send_last: qp->s_flags |= QIB_S_RESP_PENDING; qib_schedule_send(qp); - goto srunlock; + goto sunlock; } case OP(COMPARE_SWAP): @@ -2165,9 +2154,6 @@ send_last: if (next > QIB_MAX_RDMA_ATOMIC) next = 0; spin_lock_irqsave(&qp->s_lock, flags); - /* Double check we can process this while holding the s_lock. */ - if (!(ib_qib_state_ops[qp->state] & QIB_PROCESS_RECV_OK)) - goto srunlock; if (unlikely(next == qp->s_tail_ack_queue)) { if (!qp->s_ack_queue[next].sent) goto nack_inv_unlck; @@ -2213,7 +2199,7 @@ send_last: qp->s_flags |= QIB_S_RESP_PENDING; qib_schedule_send(qp); - goto srunlock; + goto sunlock; } default: @@ -2227,7 +2213,7 @@ send_last: /* Send an ACK if requested or required. */ if (psn & (1 << 31)) goto send_ack; - goto runlock; + return; rnr_nak: qp->r_nak_state = IB_RNR_NAK | qp->r_min_rnr_timer; @@ -2238,7 +2224,7 @@ rnr_nak: atomic_inc(&qp->refcount); list_add_tail(&qp->rspwait, &rcd->qp_wait_list); } - goto runlock; + return; nack_op_err: qib_rc_error(qp, IB_WC_LOC_QP_OP_ERR); @@ -2250,7 +2236,7 @@ nack_op_err: atomic_inc(&qp->refcount); list_add_tail(&qp->rspwait, &rcd->qp_wait_list); } - goto runlock; + return; nack_inv_unlck: spin_unlock_irqrestore(&qp->s_lock, flags); @@ -2264,7 +2250,7 @@ nack_inv: atomic_inc(&qp->refcount); list_add_tail(&qp->rspwait, &rcd->qp_wait_list); } - goto runlock; + return; nack_acc_unlck: spin_unlock_irqrestore(&qp->s_lock, flags); @@ -2274,13 +2260,6 @@ nack_acc: qp->r_ack_psn = qp->r_psn; send_ack: qib_send_rc_ack(qp); -runlock: - spin_unlock(&qp->r_lock); - return; - -srunlock: - spin_unlock_irqrestore(&qp->s_lock, flags); - spin_unlock(&qp->r_lock); return; sunlock: diff --git a/drivers/infiniband/hw/qib/qib_sdma.c b/drivers/infiniband/hw/qib/qib_sdma.c index b845688..cad4449 100644 --- a/drivers/infiniband/hw/qib/qib_sdma.c +++ b/drivers/infiniband/hw/qib/qib_sdma.c @@ -656,6 +656,7 @@ unmap: } qp = tx->qp; qib_put_txreq(tx); + spin_lock(&qp->r_lock); spin_lock(&qp->s_lock); if (qp->ibqp.qp_type == IB_QPT_RC) { /* XXX what about error sending RDMA read responses? */ @@ -664,6 +665,7 @@ unmap: } else if (qp->s_wqe) qib_send_complete(qp, qp->s_wqe, IB_WC_GENERAL_ERR); spin_unlock(&qp->s_lock); + spin_unlock(&qp->r_lock); /* return zero to process the next send work request */ goto unlock; diff --git a/drivers/infiniband/hw/qib/qib_uc.c b/drivers/infiniband/hw/qib/qib_uc.c index 6c7fe78..b9c8b63 100644 --- a/drivers/infiniband/hw/qib/qib_uc.c +++ b/drivers/infiniband/hw/qib/qib_uc.c @@ -272,9 +272,6 @@ void qib_uc_rcv(struct qib_ibport *ibp, struct qib_ib_header *hdr, opcode >>= 24; memset(&wc, 0, sizeof wc); - /* Prevent simultaneous processing after APM on different CPUs */ - spin_lock(&qp->r_lock); - /* Compare the PSN verses the expected PSN. */ if (unlikely(qib_cmp24(psn, qp->r_psn) != 0)) { /* @@ -534,7 +531,6 @@ rdma_last: } qp->r_psn++; qp->r_state = opcode; - spin_unlock(&qp->r_lock); return; rewind: @@ -542,12 +538,10 @@ rewind: qp->r_sge.num_sge = 0; drop: ibp->n_pkt_drops++; - spin_unlock(&qp->r_lock); return; op_err: qib_rc_error(qp, IB_WC_LOC_QP_OP_ERR); - spin_unlock(&qp->r_lock); return; sunlock: diff --git a/drivers/infiniband/hw/qib/qib_ud.c b/drivers/infiniband/hw/qib/qib_ud.c index c838cda..e1b3da2 100644 --- a/drivers/infiniband/hw/qib/qib_ud.c +++ b/drivers/infiniband/hw/qib/qib_ud.c @@ -535,13 +535,6 @@ void qib_ud_rcv(struct qib_ibport *ibp, struct qib_ib_header *hdr, wc.byte_len = tlen + sizeof(struct ib_grh); /* - * We need to serialize getting a receive work queue entry and - * generating a completion for it against QPs sending to this QP - * locally. - */ - spin_lock(&qp->r_lock); - - /* * Get the next work request entry to find where to put the data. */ if (qp->r_flags & QIB_R_REUSE_SGE) @@ -552,19 +545,19 @@ void qib_ud_rcv(struct qib_ibport *ibp, struct qib_ib_header *hdr, ret = qib_get_rwqe(qp, 0); if (ret < 0) { qib_rc_error(qp, IB_WC_LOC_QP_OP_ERR); - goto bail_unlock; + return; } if (!ret) { if (qp->ibqp.qp_num == 0) ibp->n_vl15_dropped++; - goto bail_unlock; + return; } } /* Silently drop packets which are too big. */ if (unlikely(wc.byte_len > qp->r_len)) { qp->r_flags |= QIB_R_REUSE_SGE; ibp->n_pkt_drops++; - goto bail_unlock; + return; } if (has_grh) { qib_copy_sge(&qp->r_sge, &hdr->u.l.grh, @@ -579,7 +572,7 @@ void qib_ud_rcv(struct qib_ibport *ibp, struct qib_ib_header *hdr, qp->r_sge.sge = *qp->r_sge.sg_list++; } if (!test_and_clear_bit(QIB_R_WRID_VALID, &qp->r_aflags)) - goto bail_unlock; + return; wc.wr_id = qp->r_wr_id; wc.status = IB_WC_SUCCESS; wc.opcode = IB_WC_RECV; @@ -601,7 +594,5 @@ void qib_ud_rcv(struct qib_ibport *ibp, struct qib_ib_header *hdr, qib_cq_enter(to_icq(qp->ibqp.recv_cq), &wc, (ohdr->bth[0] & cpu_to_be32(IB_BTH_SOLICITED)) != 0); -bail_unlock: - spin_unlock(&qp->r_lock); bail:; } diff --git a/drivers/infiniband/hw/qib/qib_verbs.c b/drivers/infiniband/hw/qib/qib_verbs.c index cda8f41..9fab404 100644 --- a/drivers/infiniband/hw/qib/qib_verbs.c +++ b/drivers/infiniband/hw/qib/qib_verbs.c @@ -550,10 +550,12 @@ static void qib_qp_rcv(struct qib_ctxtdata *rcd, struct qib_ib_header *hdr, { struct qib_ibport *ibp = &rcd->ppd->ibport_data; + spin_lock(&qp->r_lock); + /* Check for valid receive state. */ if (!(ib_qib_state_ops[qp->state] & QIB_PROCESS_RECV_OK)) { ibp->n_pkt_drops++; - return; + goto unlock; } switch (qp->ibqp.qp_type) { @@ -577,6 +579,9 @@ static void qib_qp_rcv(struct qib_ctxtdata *rcd, struct qib_ib_header *hdr, default: break; } + +unlock: + spin_unlock(&qp->r_lock); } /** -- cgit v0.10.2 From a5f4a07820ebc60b21c984d893f48402c4b4a4a2 Mon Sep 17 00:00:00 2001 From: Steve Wise Date: Fri, 23 Jul 2010 19:12:43 +0000 Subject: RDMA/cxgb4: Add timeouts when waiting for FW responses Don't hang a host thread if the FW stops responding. Signed-off-by: Steve Wise Signed-off-by: Roland Dreier diff --git a/drivers/infiniband/hw/cxgb4/cm.c b/drivers/infiniband/hw/cxgb4/cm.c index 3eff5df..6c40779 100644 --- a/drivers/infiniband/hw/cxgb4/cm.c +++ b/drivers/infiniband/hw/cxgb4/cm.c @@ -2050,8 +2050,15 @@ int c4iw_create_listen(struct iw_cm_id *cm_id, int backlog) goto fail3; /* wait for pass_open_rpl */ - wait_event(ep->com.waitq, ep->com.rpl_done); - err = ep->com.rpl_err; + wait_event_timeout(ep->com.waitq, ep->com.rpl_done, C4IW_WR_TO); + if (ep->com.rpl_done) + err = ep->com.rpl_err; + else { + printk(KERN_ERR MOD "Device %s not responding!\n", + pci_name(ep->com.dev->rdev.lldi.pdev)); + ep->com.dev->rdev.flags = T4_FATAL_ERROR; + err = -EIO; + } if (!err) { cm_id->provider_data = ep; goto out; @@ -2080,10 +2087,17 @@ int c4iw_destroy_listen(struct iw_cm_id *cm_id) err = listen_stop(ep); if (err) goto done; - wait_event(ep->com.waitq, ep->com.rpl_done); + wait_event_timeout(ep->com.waitq, ep->com.rpl_done, C4IW_WR_TO); + if (ep->com.rpl_done) + err = ep->com.rpl_err; + else { + printk(KERN_ERR MOD "Device %s not responding!\n", + pci_name(ep->com.dev->rdev.lldi.pdev)); + ep->com.dev->rdev.flags = T4_FATAL_ERROR; + err = -EIO; + } cxgb4_free_stid(ep->com.dev->rdev.lldi.tids, ep->stid, PF_INET); done: - err = ep->com.rpl_err; cm_id->rem_ref(cm_id); c4iw_put_ep(&ep->com); return err; -- cgit v0.10.2 From 48d8fcebb7abf64843314672c1208b730be911bb Mon Sep 17 00:00:00 2001 From: Or Gerlitz Date: Tue, 20 Jul 2010 12:00:20 +0000 Subject: IB/iser: Make needlessly global iser_alloc_rx_descriptors() static Signed-off-by: Or Gerlitz Signed-off-by: Roland Dreier diff --git a/drivers/infiniband/ulp/iser/iser_initiator.c b/drivers/infiniband/ulp/iser/iser_initiator.c index 0b9ef07..95a08a8 100644 --- a/drivers/infiniband/ulp/iser/iser_initiator.c +++ b/drivers/infiniband/ulp/iser/iser_initiator.c @@ -170,7 +170,7 @@ static void iser_create_send_desc(struct iser_conn *ib_conn, } -int iser_alloc_rx_descriptors(struct iser_conn *ib_conn) +static int iser_alloc_rx_descriptors(struct iser_conn *ib_conn) { int i, j; u64 dma_addr; -- cgit v0.10.2 From 812d8672218e71fe3ea249f16ad990978dc3bb84 Mon Sep 17 00:00:00 2001 From: Or Gerlitz Date: Tue, 20 Jul 2010 12:00:20 +0000 Subject: RDMA/nes: Fix two sparse warnings Simple changes to fix warnings: CHECK drivers/infiniband/hw/nes/nes_verbs.c nes_verbs.c:1944:45: warning: Using plain integer as NULL pointer nes_verbs.c:1944:48: warning: Using plain integer as NULL pointer CHECK drivers/infiniband/hw/nes/nes_cm.c nes_cm.c:2645:43: warning: mixing different enum types nes_cm.c:2645:43: int enum iw_cm_event_type versus nes_cm.c:2645:43: int enum iw_cm_event_status Signed-off-by: Or Gerlitz Acked-by: Chien Tung Signed-off-by: Roland Dreier diff --git a/drivers/infiniband/hw/nes/nes_cm.c b/drivers/infiniband/hw/nes/nes_cm.c index 986d6f3..d400fa9 100644 --- a/drivers/infiniband/hw/nes/nes_cm.c +++ b/drivers/infiniband/hw/nes/nes_cm.c @@ -2565,7 +2565,7 @@ static int nes_cm_disconn_true(struct nes_qp *nesqp) u16 last_ae; u8 original_hw_tcp_state; u8 original_ibqp_state; - enum iw_cm_event_type disconn_status = IW_CM_EVENT_STATUS_OK; + enum iw_cm_event_status disconn_status = IW_CM_EVENT_STATUS_OK; int issue_disconn = 0; int issue_close = 0; int issue_flush = 0; diff --git a/drivers/infiniband/hw/nes/nes_verbs.c b/drivers/infiniband/hw/nes/nes_verbs.c index 3b95d04..3b50dc5 100644 --- a/drivers/infiniband/hw/nes/nes_verbs.c +++ b/drivers/infiniband/hw/nes/nes_verbs.c @@ -1941,7 +1941,7 @@ static int nes_reg_mr(struct nes_device *nesdev, struct nes_pd *nespd, u8 use_256_pbls = 0; u8 use_4k_pbls = 0; u16 use_two_level = (pbl_count_4k > 1) ? 1 : 0; - struct nes_root_vpbl new_root = {0, 0, 0}; + struct nes_root_vpbl new_root = { 0, NULL, NULL }; u32 opcode = 0; u16 major_code; -- cgit v0.10.2 From a2ebf07ae53e65bd073f96877e4818f2e89271ae Mon Sep 17 00:00:00 2001 From: Aleksey Senin Date: Sun, 4 Jul 2010 13:55:57 +0000 Subject: IB: Rename RAW_ETY to RAW_ETHERTYPE Change abbreviated IB_QPT_RAW_ETY to IB_QPT_RAW_ETHERTYPE to make the special QP type easier to understand. cf http://www.mail-archive.com/linux-rdma@vger.kernel.org/msg04530.html Signed-off-by: Aleksey Senin Signed-off-by: Roland Dreier diff --git a/drivers/infiniband/core/verbs.c b/drivers/infiniband/core/verbs.c index a7da9be..e0fa222 100644 --- a/drivers/infiniband/core/verbs.c +++ b/drivers/infiniband/core/verbs.c @@ -310,8 +310,8 @@ EXPORT_SYMBOL(ib_create_qp); static const struct { int valid; - enum ib_qp_attr_mask req_param[IB_QPT_RAW_ETY + 1]; - enum ib_qp_attr_mask opt_param[IB_QPT_RAW_ETY + 1]; + enum ib_qp_attr_mask req_param[IB_QPT_RAW_ETHERTYPE + 1]; + enum ib_qp_attr_mask opt_param[IB_QPT_RAW_ETHERTYPE + 1]; } qp_state_table[IB_QPS_ERR + 1][IB_QPS_ERR + 1] = { [IB_QPS_RESET] = { [IB_QPS_RESET] = { .valid = 1 }, diff --git a/drivers/infiniband/hw/ehca/ehca_qp.c b/drivers/infiniband/hw/ehca/ehca_qp.c index 47d388e..32fb342 100644 --- a/drivers/infiniband/hw/ehca/ehca_qp.c +++ b/drivers/infiniband/hw/ehca/ehca_qp.c @@ -251,7 +251,7 @@ static inline int ibqptype2servicetype(enum ib_qp_type ibqptype) return ST_UD; case IB_QPT_RAW_IPV6: return -EINVAL; - case IB_QPT_RAW_ETY: + case IB_QPT_RAW_ETHERTYPE: return -EINVAL; default: ehca_gen_err("Invalid ibqptype=%x", ibqptype); diff --git a/drivers/infiniband/hw/mthca/mthca_cmd.c b/drivers/infiniband/hw/mthca/mthca_cmd.c index 3603ae8..f4ceecd 100644 --- a/drivers/infiniband/hw/mthca/mthca_cmd.c +++ b/drivers/infiniband/hw/mthca/mthca_cmd.c @@ -1817,7 +1817,7 @@ int mthca_CONF_SPECIAL_QP(struct mthca_dev *dev, int type, u32 qpn, case IB_QPT_RAW_IPV6: op_mod = 2; break; - case IB_QPT_RAW_ETY: + case IB_QPT_RAW_ETHERTYPE: op_mod = 3; break; default: diff --git a/include/rdma/ib_verbs.h b/include/rdma/ib_verbs.h index f3e8f3c..857b3b9 100644 --- a/include/rdma/ib_verbs.h +++ b/include/rdma/ib_verbs.h @@ -555,7 +555,7 @@ enum ib_qp_type { IB_QPT_UC, IB_QPT_UD, IB_QPT_RAW_IPV6, - IB_QPT_RAW_ETY + IB_QPT_RAW_ETHERTYPE }; enum ib_qp_create_flags { -- cgit v0.10.2 From 7a7008110b94dfaa90db4b0cc5b0c3f964c80506 Mon Sep 17 00:00:00 2001 From: Bart Van Assche Date: Thu, 29 Jul 2010 15:56:37 +0000 Subject: IB/srp: Use print_hex_dump() Replace an open-coded dump of the receive buffer with a call to print_hex_dump(). Signed-off-by: Bart Van Assche Signed-off-by: Roland Dreier diff --git a/drivers/infiniband/ulp/srp/ib_srp.c b/drivers/infiniband/ulp/srp/ib_srp.c index ed3f9eb..4675def 100644 --- a/drivers/infiniband/ulp/srp/ib_srp.c +++ b/drivers/infiniband/ulp/srp/ib_srp.c @@ -879,21 +879,10 @@ static void srp_handle_recv(struct srp_target_port *target, struct ib_wc *wc) opcode = *(u8 *) iu->buf; if (0) { - int i; - shost_printk(KERN_ERR, target->scsi_host, PFX "recv completion, opcode 0x%02x\n", opcode); - - for (i = 0; i < wc->byte_len; ++i) { - if (i % 8 == 0) - printk(KERN_ERR " [%02x] ", i); - printk(" %02x", ((u8 *) iu->buf)[i]); - if ((i + 1) % 8 == 0) - printk("\n"); - } - - if (wc->byte_len % 8) - printk("\n"); + print_hex_dump(KERN_ERR, "", DUMP_PREFIX_OFFSET, 8, 1, + iu->buf, wc->byte_len, true); } switch (opcode) { -- cgit v0.10.2 From c996bb47bb419b7c2f75499e11750142775e5da9 Mon Sep 17 00:00:00 2001 From: Bart Van Assche Date: Fri, 30 Jul 2010 10:59:05 +0000 Subject: IB/srp: Make receive buffer handling more robust The current strategy in ib_srp for posting receive buffers is: * Post one buffer after channel establishment. * Post one buffer before sending an SRP_CMD or SRP_TSK_MGMT to the target. As a result, only the first non-SRP_RSP information unit from the target will be processed. If that first information unit is an SRP_T_LOGOUT, it will be processed. On the other hand, if the initiator receives an SRP_CRED_REQ or SRP_AER_REQ before it receives a SRP_T_LOGOUT, the SRP_T_LOGOUT won't be processed. We can fix this inconsistency by changing the strategy for posting receive buffers to: * Post all receive buffers after channel establishment. * After a receive buffer has been consumed and processed, post it again. A side effect is that the ib_post_recv() call is moved out of the SCSI command processing path. Since __srp_post_recv() is not called directly any more, get rid of it and move the code directly into srp_post_recv(). Also, move srp_post_recv() up in the file to avoid a forward declaration. Signed-off-by: Bart Van Assche Acked-by: David Dillow Signed-off-by: Roland Dreier diff --git a/drivers/infiniband/ulp/srp/ib_srp.c b/drivers/infiniband/ulp/srp/ib_srp.c index 4675def..ffdd2d1 100644 --- a/drivers/infiniband/ulp/srp/ib_srp.c +++ b/drivers/infiniband/ulp/srp/ib_srp.c @@ -811,6 +811,38 @@ static int srp_map_data(struct scsi_cmnd *scmnd, struct srp_target_port *target, return len; } +static int srp_post_recv(struct srp_target_port *target) +{ + unsigned long flags; + struct srp_iu *iu; + struct ib_sge list; + struct ib_recv_wr wr, *bad_wr; + unsigned int next; + int ret; + + spin_lock_irqsave(target->scsi_host->host_lock, flags); + + next = target->rx_head & (SRP_RQ_SIZE - 1); + wr.wr_id = next; + iu = target->rx_ring[next]; + + list.addr = iu->dma; + list.length = iu->size; + list.lkey = target->srp_host->srp_dev->mr->lkey; + + wr.next = NULL; + wr.sg_list = &list; + wr.num_sge = 1; + + ret = ib_post_recv(target->qp, &wr, &bad_wr); + if (!ret) + ++target->rx_head; + + spin_unlock_irqrestore(target->scsi_host->host_lock, flags); + + return ret; +} + static void srp_process_rsp(struct srp_target_port *target, struct srp_rsp *rsp) { struct srp_request *req; @@ -868,6 +900,7 @@ static void srp_handle_recv(struct srp_target_port *target, struct ib_wc *wc) { struct ib_device *dev; struct srp_iu *iu; + int res; u8 opcode; iu = target->rx_ring[wc->wr_id]; @@ -904,6 +937,11 @@ static void srp_handle_recv(struct srp_target_port *target, struct ib_wc *wc) ib_dma_sync_single_for_device(dev, iu->dma, target->max_ti_iu_len, DMA_FROM_DEVICE); + + res = srp_post_recv(target); + if (res != 0) + shost_printk(KERN_ERR, target->scsi_host, + PFX "Recv failed with error code %d\n", res); } static void srp_recv_completion(struct ib_cq *cq, void *target_ptr) @@ -943,45 +981,6 @@ static void srp_send_completion(struct ib_cq *cq, void *target_ptr) } } -static int __srp_post_recv(struct srp_target_port *target) -{ - struct srp_iu *iu; - struct ib_sge list; - struct ib_recv_wr wr, *bad_wr; - unsigned int next; - int ret; - - next = target->rx_head & (SRP_RQ_SIZE - 1); - wr.wr_id = next; - iu = target->rx_ring[next]; - - list.addr = iu->dma; - list.length = iu->size; - list.lkey = target->srp_host->srp_dev->mr->lkey; - - wr.next = NULL; - wr.sg_list = &list; - wr.num_sge = 1; - - ret = ib_post_recv(target->qp, &wr, &bad_wr); - if (!ret) - ++target->rx_head; - - return ret; -} - -static int srp_post_recv(struct srp_target_port *target) -{ - unsigned long flags; - int ret; - - spin_lock_irqsave(target->scsi_host->host_lock, flags); - ret = __srp_post_recv(target); - spin_unlock_irqrestore(target->scsi_host->host_lock, flags); - - return ret; -} - /* * Must be called with target->scsi_host->host_lock held to protect * req_lim and tx_head. Lock cannot be dropped between call here and @@ -1091,11 +1090,6 @@ static int srp_queuecommand(struct scsi_cmnd *scmnd, goto err; } - if (__srp_post_recv(target)) { - shost_printk(KERN_ERR, target->scsi_host, PFX "Recv failed\n"); - goto err_unmap; - } - ib_dma_sync_single_for_device(dev, iu->dma, srp_max_iu_len, DMA_TO_DEVICE); @@ -1238,6 +1232,7 @@ static int srp_cm_handler(struct ib_cm_id *cm_id, struct ib_cm_event *event) int attr_mask = 0; int comp = 0; int opcode = 0; + int i; switch (event->event) { case IB_CM_REQ_ERROR: @@ -1287,7 +1282,11 @@ static int srp_cm_handler(struct ib_cm_id *cm_id, struct ib_cm_event *event) if (target->status) break; - target->status = srp_post_recv(target); + for (i = 0; i < SRP_RQ_SIZE; i++) { + target->status = srp_post_recv(target); + if (target->status) + break; + } if (target->status) break; -- cgit v0.10.2 From 89de74866b846cc48780fda3de7fd223296aaca9 Mon Sep 17 00:00:00 2001 From: Bart Van Assche Date: Tue, 3 Aug 2010 14:08:45 +0000 Subject: IB/srp: Export req_lim via sysfs Export req_lim via sysfs for debugging. Signed-off-by: Bart Van Assche Acked-by: David Dillow Signed-off-by: Roland Dreier diff --git a/drivers/infiniband/ulp/srp/ib_srp.c b/drivers/infiniband/ulp/srp/ib_srp.c index ffdd2d1..7f8f16b 100644 --- a/drivers/infiniband/ulp/srp/ib_srp.c +++ b/drivers/infiniband/ulp/srp/ib_srp.c @@ -1552,6 +1552,18 @@ static ssize_t show_orig_dgid(struct device *dev, return sprintf(buf, "%pI6\n", target->orig_dgid); } +static ssize_t show_req_lim(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct srp_target_port *target = host_to_target(class_to_shost(dev)); + + if (target->state == SRP_TARGET_DEAD || + target->state == SRP_TARGET_REMOVED) + return -ENODEV; + + return sprintf(buf, "%d\n", target->req_lim); +} + static ssize_t show_zero_req_lim(struct device *dev, struct device_attribute *attr, char *buf) { @@ -1586,6 +1598,7 @@ static DEVICE_ATTR(service_id, S_IRUGO, show_service_id, NULL); static DEVICE_ATTR(pkey, S_IRUGO, show_pkey, NULL); static DEVICE_ATTR(dgid, S_IRUGO, show_dgid, NULL); static DEVICE_ATTR(orig_dgid, S_IRUGO, show_orig_dgid, NULL); +static DEVICE_ATTR(req_lim, S_IRUGO, show_req_lim, NULL); static DEVICE_ATTR(zero_req_lim, S_IRUGO, show_zero_req_lim, NULL); static DEVICE_ATTR(local_ib_port, S_IRUGO, show_local_ib_port, NULL); static DEVICE_ATTR(local_ib_device, S_IRUGO, show_local_ib_device, NULL); @@ -1597,6 +1610,7 @@ static struct device_attribute *srp_host_attrs[] = { &dev_attr_pkey, &dev_attr_dgid, &dev_attr_orig_dgid, + &dev_attr_req_lim, &dev_attr_zero_req_lim, &dev_attr_local_ib_port, &dev_attr_local_ib_device, -- cgit v0.10.2 From ff0380ce3928e162d5b417ac88124d87b34d63a7 Mon Sep 17 00:00:00 2001 From: Miroslaw Walukiewicz Date: Thu, 15 Jul 2010 14:53:29 +0000 Subject: RDMA/nes: Read firmware version from correct place Signed-off-by: Mirek Walukiewicz Signed-off-by: Roland Dreier diff --git a/drivers/infiniband/hw/nes/nes_verbs.c b/drivers/infiniband/hw/nes/nes_verbs.c index 3b50dc5..3638b83 100644 --- a/drivers/infiniband/hw/nes/nes_verbs.c +++ b/drivers/infiniband/hw/nes/nes_verbs.c @@ -518,7 +518,7 @@ static int nes_query_device(struct ib_device *ibdev, struct ib_device_attr *prop memset(props, 0, sizeof(*props)); memcpy(&props->sys_image_guid, nesvnic->netdev->dev_addr, 6); - props->fw_ver = nesdev->nesadapter->fw_ver; + props->fw_ver = nesdev->nesadapter->firmware_version; props->device_cap_flags = nesdev->nesadapter->device_cap_flags; props->vendor_id = nesdev->nesadapter->vendor_id; props->vendor_part_id = nesdev->nesadapter->vendor_part_id; -- cgit v0.10.2 From 69d510238300c0888ec17e4a811279fe7760b527 Mon Sep 17 00:00:00 2001 From: Roland Dreier Date: Wed, 4 Aug 2010 14:25:40 -0700 Subject: RDMA/nes: Get rid of "set but not used" variables Delete dead code in various places that is shown by gcc 4.6's new -Wunused-but-set-variable warnings. Signed-off-by: Roland Dreier diff --git a/drivers/infiniband/hw/nes/nes.c b/drivers/infiniband/hw/nes/nes.c index 16b7832..969a01c 100644 --- a/drivers/infiniband/hw/nes/nes.c +++ b/drivers/infiniband/hw/nes/nes.c @@ -259,13 +259,11 @@ static void nes_cqp_rem_ref_callback(struct nes_device *nesdev, struct nes_cqp_r unsigned long flags; struct nes_qp *nesqp = cqp_request->cqp_callback_pointer; struct nes_adapter *nesadapter = nesdev->nesadapter; - u32 qp_id; atomic_inc(&qps_destroyed); /* Free the control structures */ - qp_id = nesqp->hwqp.qp_id; if (nesqp->pbl_vbase) { pci_free_consistent(nesdev->pcidev, nesqp->qp_mem_size, nesqp->hwqp.q2_vbase, nesqp->hwqp.q2_pbase); @@ -441,7 +439,6 @@ static int __devinit nes_probe(struct pci_dev *pcidev, const struct pci_device_i struct net_device *netdev = NULL; struct nes_device *nesdev = NULL; int ret = 0; - struct nes_vnic *nesvnic = NULL; void __iomem *mmio_regs = NULL; u8 hw_rev; @@ -677,8 +674,6 @@ static int __devinit nes_probe(struct pci_dev *pcidev, const struct pci_device_i } nes_print_macaddr(netdev); - /* create a CM core for this netdev */ - nesvnic = netdev_priv(netdev); nesdev->netdev_count++; nesdev->nesadapter->netdev_count++; diff --git a/drivers/infiniband/hw/nes/nes_cm.c b/drivers/infiniband/hw/nes/nes_cm.c index d400fa9..8bc11e1 100644 --- a/drivers/infiniband/hw/nes/nes_cm.c +++ b/drivers/infiniband/hw/nes/nes_cm.c @@ -1719,8 +1719,6 @@ static int handle_ack_pkt(struct nes_cm_node *cm_node, struct sk_buff *skb, { int datasize = 0; u32 inc_sequence; - u32 rem_seq_ack; - u32 rem_seq; int ret = 0; int optionsize; optionsize = (tcph->doff << 2) - sizeof(struct tcphdr); @@ -1730,8 +1728,6 @@ static int handle_ack_pkt(struct nes_cm_node *cm_node, struct sk_buff *skb, skb_pull(skb, tcph->doff << 2); inc_sequence = ntohl(tcph->seq); - rem_seq = ntohl(tcph->seq); - rem_seq_ack = ntohl(tcph->ack_seq); datasize = skb->len; switch (cm_node->state) { case NES_CM_STATE_SYN_RCVD: @@ -3128,17 +3124,15 @@ int nes_create_listen(struct iw_cm_id *cm_id, int backlog) struct nes_vnic *nesvnic; struct nes_cm_listener *cm_node; struct nes_cm_info cm_info; - struct nes_adapter *adapter; int err; - nes_debug(NES_DBG_CM, "cm_id = %p, local port = 0x%04X.\n", cm_id, ntohs(cm_id->local_addr.sin_port)); nesvnic = to_nesvnic(cm_id->device); if (!nesvnic) return -EINVAL; - adapter = nesvnic->nesdev->nesadapter; + nes_debug(NES_DBG_CM, "nesvnic=%p, netdev=%p, %s\n", nesvnic, nesvnic->netdev, nesvnic->netdev->name); diff --git a/drivers/infiniband/hw/nes/nes_hw.c b/drivers/infiniband/hw/nes/nes_hw.c index 199107a..07c4004 100644 --- a/drivers/infiniband/hw/nes/nes_hw.c +++ b/drivers/infiniband/hw/nes/nes_hw.c @@ -2999,11 +2999,8 @@ static void nes_cqp_ce_handler(struct nes_device *nesdev, struct nes_hw_cq *cq) static u8 *locate_mpa(u8 *pkt, u32 aeq_info) { - u16 pkt_len; - if (aeq_info & NES_AEQE_Q2_DATA_ETHERNET) { /* skip over ethernet header */ - pkt_len = be16_to_cpu(*(u16 *)(pkt + ETH_HLEN - 2)); pkt += ETH_HLEN; /* Skip over IP and TCP headers */ diff --git a/drivers/infiniband/hw/nes/nes_nic.c b/drivers/infiniband/hw/nes/nes_nic.c index c0c404e..1901ec7 100644 --- a/drivers/infiniband/hw/nes/nes_nic.c +++ b/drivers/infiniband/hw/nes/nes_nic.c @@ -470,7 +470,6 @@ static int nes_netdev_start_xmit(struct sk_buff *skb, struct net_device *netdev) u16 nhoffset; u16 wqes_needed; u16 wqes_available; - u32 old_head; u32 wqe_misc; /* @@ -510,7 +509,6 @@ sq_no_longer_full: if (skb_is_gso(skb)) { nesvnic->segmented_tso_requests++; nesvnic->tso_requests++; - old_head = nesnic->sq_head; /* Basically 4 fragments available per WQE with extended fragments */ wqes_needed = nr_frags >> 2; wqes_needed += (nr_frags&3)?1:0; diff --git a/drivers/infiniband/hw/nes/nes_verbs.c b/drivers/infiniband/hw/nes/nes_verbs.c index 3638b83..9046e66 100644 --- a/drivers/infiniband/hw/nes/nes_verbs.c +++ b/drivers/infiniband/hw/nes/nes_verbs.c @@ -2112,13 +2112,12 @@ static struct ib_mr *nes_reg_phys_mr(struct ib_pd *ib_pd, u32 driver_key = 0; u32 root_pbl_index = 0; u32 cur_pbl_index = 0; - int err = 0, pbl_depth = 0; + int err = 0; int ret = 0; u16 pbl_count = 0; u8 single_page = 1; u8 stag_key = 0; - pbl_depth = 0; region_length = 0; vpbl.pbl_vbase = NULL; root_vpbl.pbl_vbase = NULL; @@ -2931,7 +2930,6 @@ int nes_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int ret; u16 original_last_aeq; u8 issue_modify_qp = 0; - u8 issue_disconnect = 0; u8 dont_wait = 0; nes_debug(NES_DBG_MOD_QP, "QP%u: QP State=%u, cur QP State=%u," @@ -3083,7 +3081,6 @@ int nes_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, nesqp->iwarp_state = next_iwarp_state & NES_CQP_QP_IWARP_STATE_MASK; nes_debug(NES_DBG_MOD_QP, "Change nesqp->iwarp_state=%08x\n", nesqp->iwarp_state); - issue_disconnect = 1; } else { nesqp->iwarp_state = next_iwarp_state & NES_CQP_QP_IWARP_STATE_MASK; nes_debug(NES_DBG_MOD_QP, "Change nesqp->iwarp_state=%08x\n", -- cgit v0.10.2 From df924f833c0389c9f5206b073e87e9f5f3e857d3 Mon Sep 17 00:00:00 2001 From: Roland Dreier Date: Wed, 4 Aug 2010 14:27:01 -0700 Subject: RDMA/nes: Fix showing wqm_quanta In nes_show_wqm_quanta(), the wrong value is printed. Fix this. Signed-off-by: Roland Dreier diff --git a/drivers/infiniband/hw/nes/nes.c b/drivers/infiniband/hw/nes/nes.c index 969a01c..d53480e 100644 --- a/drivers/infiniband/hw/nes/nes.c +++ b/drivers/infiniband/hw/nes/nes.c @@ -1099,7 +1099,7 @@ static ssize_t nes_show_wqm_quanta(struct device_driver *ddp, char *buf) i++; } - return snprintf(buf, PAGE_SIZE, "0x%X\n", wqm_quanta); + return snprintf(buf, PAGE_SIZE, "0x%X\n", wqm_quanta_value); } -- cgit v0.10.2 From b2a899eaf3ab78cab380ef7e331c549d771cc2ca Mon Sep 17 00:00:00 2001 From: Roland Dreier Date: Wed, 4 Aug 2010 14:29:31 -0700 Subject: RDMA/nes: Fix misindented code In nes_probe(), a bit of code is indented one tab stop too far. Fix this. Signed-off-by: Roland Dreier diff --git a/drivers/infiniband/hw/nes/nes.c b/drivers/infiniband/hw/nes/nes.c index d53480e..0c9f0aa 100644 --- a/drivers/infiniband/hw/nes/nes.c +++ b/drivers/infiniband/hw/nes/nes.c @@ -661,23 +661,21 @@ static int __devinit nes_probe(struct pci_dev *pcidev, const struct pci_device_i nes_notifiers_registered++; /* Initialize network devices */ - if ((netdev = nes_netdev_init(nesdev, mmio_regs)) == NULL) { - goto bail7; - } - - /* Register network device */ - ret = register_netdev(netdev); - if (ret) { - printk(KERN_ERR PFX "Unable to register netdev, ret = %d\n", ret); - nes_netdev_destroy(netdev); - goto bail7; - } + if ((netdev = nes_netdev_init(nesdev, mmio_regs)) == NULL) + goto bail7; - nes_print_macaddr(netdev); + /* Register network device */ + ret = register_netdev(netdev); + if (ret) { + printk(KERN_ERR PFX "Unable to register netdev, ret = %d\n", ret); + nes_netdev_destroy(netdev); + goto bail7; + } - nesdev->netdev_count++; - nesdev->nesadapter->netdev_count++; + nes_print_macaddr(netdev); + nesdev->netdev_count++; + nesdev->nesadapter->netdev_count++; printk(KERN_ERR PFX "%s: NetEffect RNIC driver successfully loaded.\n", pci_name(pcidev)); -- cgit v0.10.2 From bd5d0ccbef9f2565e76dba4ff291da6a2cb8b1b4 Mon Sep 17 00:00:00 2001 From: Alexander Schmidt Date: Mon, 5 Jul 2010 11:41:56 +0000 Subject: IB/ehca: Init irq tasklet before irq can happen Initialize tasklet before interrupts are requested to prevent scheduling of an uninitialized tasklet. Signed-off-by: Alexander Schmidt Signed-off-by: Roland Dreier diff --git a/drivers/infiniband/hw/ehca/ehca_eq.c b/drivers/infiniband/hw/ehca/ehca_eq.c index 3b87589..d9b1bb4 100644 --- a/drivers/infiniband/hw/ehca/ehca_eq.c +++ b/drivers/infiniband/hw/ehca/ehca_eq.c @@ -122,21 +122,21 @@ int ehca_create_eq(struct ehca_shca *shca, /* register interrupt handlers and initialize work queues */ if (type == EHCA_EQ) { + tasklet_init(&eq->interrupt_task, ehca_tasklet_eq, (long)shca); + ret = ibmebus_request_irq(eq->ist, ehca_interrupt_eq, IRQF_DISABLED, "ehca_eq", (void *)shca); if (ret < 0) ehca_err(ib_dev, "Can't map interrupt handler."); - - tasklet_init(&eq->interrupt_task, ehca_tasklet_eq, (long)shca); } else if (type == EHCA_NEQ) { + tasklet_init(&eq->interrupt_task, ehca_tasklet_neq, (long)shca); + ret = ibmebus_request_irq(eq->ist, ehca_interrupt_neq, IRQF_DISABLED, "ehca_neq", (void *)shca); if (ret < 0) ehca_err(ib_dev, "Can't map interrupt handler."); - - tasklet_init(&eq->interrupt_task, ehca_tasklet_neq, (long)shca); } eq->is_initialized = 1; -- cgit v0.10.2 From 817979ac45679f0cb3f9fea6dc444e4097aa6892 Mon Sep 17 00:00:00 2001 From: Roland Dreier Date: Thu, 5 Aug 2010 14:21:31 -0700 Subject: RDMA/nes: Fix confusing if statement indentation Fix confusing indentation that makes a statement look as if it's part of an if statement when in fact it isn't. Reported-by: Julia Lawall Signed-off-by: Roland Dreier diff --git a/drivers/infiniband/hw/nes/nes_hw.c b/drivers/infiniband/hw/nes/nes_hw.c index 07c4004..f8233c8 100644 --- a/drivers/infiniband/hw/nes/nes_hw.c +++ b/drivers/infiniband/hw/nes/nes_hw.c @@ -2737,9 +2737,9 @@ void nes_nic_ce_handler(struct nes_device *nesdev, struct nes_hw_nic_cq *cq) nesnic->sq_tail &= nesnic->sq_size-1; if (sq_cqes > 128) { barrier(); - /* restart the queue if it had been stopped */ - if (netif_queue_stopped(nesvnic->netdev)) - netif_wake_queue(nesvnic->netdev); + /* restart the queue if it had been stopped */ + if (netif_queue_stopped(nesvnic->netdev)) + netif_wake_queue(nesvnic->netdev); sq_cqes = 0; } } else { -- cgit v0.10.2 From 2db00321815e20f4a7ff9df43f7cf2073085683d Mon Sep 17 00:00:00 2001 From: Julia Lawall Date: Tue, 3 Aug 2010 21:35:36 +0000 Subject: IB/ehca: Drop unnecessary NULL test list_for_each_entry binds its first argument to a non-null value, and thus any null test on the value of that argument is superfluous. The semantic patch that makes this change is as follows: (http://coccinelle.lip6.fr/) // @@ iterator I; expression x; statement S,S1,S2; @@ I(x,...) { <... - if (x == NULL && ...) S ...> } // Signed-off-by: Julia Lawall Acked-by: Alexander Schmidt Signed-off-by: Roland Dreier diff --git a/drivers/infiniband/hw/ehca/ehca_mrmw.c b/drivers/infiniband/hw/ehca/ehca_mrmw.c index 31a68b9..53f4cd4 100644 --- a/drivers/infiniband/hw/ehca/ehca_mrmw.c +++ b/drivers/infiniband/hw/ehca/ehca_mrmw.c @@ -933,11 +933,6 @@ int ehca_unmap_fmr(struct list_head *fmr_list) /* check all FMR belong to same SHCA, and check internal flag */ list_for_each_entry(ib_fmr, fmr_list, list) { prev_shca = shca; - if (!ib_fmr) { - ehca_gen_err("bad fmr=%p in list", ib_fmr); - ret = -EINVAL; - goto unmap_fmr_exit0; - } shca = container_of(ib_fmr->device, struct ehca_shca, ib_device); e_fmr = container_of(ib_fmr, struct ehca_mr, ib.ib_fmr); -- cgit v0.10.2 From ba818afdc62590e95e45d63be96954ea568925bf Mon Sep 17 00:00:00 2001 From: David Miller Date: Thu, 5 Aug 2010 05:55:52 +0000 Subject: IB/qib: Add missing include Fix build failure on sparc64 which is missing the include of via that x86, powerpc, ia64, etc. have. Signed-off-by: David S. Miller Signed-off-by: Roland Dreier diff --git a/drivers/infiniband/hw/qib/qib.h b/drivers/infiniband/hw/qib/qib.h index e67dba4..61de065 100644 --- a/drivers/infiniband/hw/qib/qib.h +++ b/drivers/infiniband/hw/qib/qib.h @@ -45,6 +45,7 @@ #include #include #include +#include #include #include #include -- cgit v0.10.2