From 8c5122e45a10a9262f872b53f151a592e870f905 Mon Sep 17 00:00:00 2001 From: Jason Gunthorpe Date: Wed, 8 Jun 2016 17:28:29 -0600 Subject: IB/mlx4: Properly initialize GRH TClass and FlowLabel in AHs When this code was reworked for IBoE support the order of assignments for the sl_tclass_flowlabel got flipped around resulting in TClass & FlowLabel being permanently set to 0 in the packet headers. This breaks IB routers that rely on these headers, but only affects kernel users - libmlx4 does this properly for user space. Cc: stable@vger.kernel.org Fixes: fa417f7b520e ("IB/mlx4: Add support for IBoE") Signed-off-by: Jason Gunthorpe Signed-off-by: Doug Ledford diff --git a/drivers/infiniband/hw/mlx4/ah.c b/drivers/infiniband/hw/mlx4/ah.c index 105246f..5fc6233 100644 --- a/drivers/infiniband/hw/mlx4/ah.c +++ b/drivers/infiniband/hw/mlx4/ah.c @@ -47,6 +47,7 @@ static struct ib_ah *create_ib_ah(struct ib_pd *pd, struct ib_ah_attr *ah_attr, ah->av.ib.port_pd = cpu_to_be32(to_mpd(pd)->pdn | (ah_attr->port_num << 24)); ah->av.ib.g_slid = ah_attr->src_path_bits; + ah->av.ib.sl_tclass_flowlabel = cpu_to_be32(ah_attr->sl << 28); if (ah_attr->ah_flags & IB_AH_GRH) { ah->av.ib.g_slid |= 0x80; ah->av.ib.gid_index = ah_attr->grh.sgid_index; @@ -64,7 +65,6 @@ static struct ib_ah *create_ib_ah(struct ib_pd *pd, struct ib_ah_attr *ah_attr, !(1 << ah->av.ib.stat_rate & dev->caps.stat_rate_support)) --ah->av.ib.stat_rate; } - ah->av.ib.sl_tclass_flowlabel = cpu_to_be32(ah_attr->sl << 28); return &ah->ibah; } -- cgit v0.10.2 From 37e07cdafc111dfb7ce27e70e73d900d7cf2920c Mon Sep 17 00:00:00 2001 From: Bart Van Assche Date: Fri, 10 Jun 2016 11:08:25 -0700 Subject: IB/cma: Make the code easier to verify Static source code analysis tools like smatch cannot handle functions that lock or not lock a mutex depending on the value of the arguments. Hence inline the function cma_disable_callback(). Additionally, this patch realizes a small performance optimization by reducing the number of mutex_lock() and mutex_unlock() calls in the modified functions. With this patch applied smatch no longer complains about source file cma.c. Without this patch smatch reports the following for this source file: drivers/infiniband/core/cma.c:1959: cma_req_handler() warn: inconsistent returns 'mutex:&listen_id->handler_mutex'. Locked on: line 1880 line 1959 Unlocked on: line 1941 drivers/infiniband/core/cma.c:2112: iw_conn_req_handler() warn: inconsistent returns 'mutex:&listen_id->handler_mutex'. Locked on: line 2048 Unlocked on: line 2112 Signed-off-by: Bart Van Assche Cc: Sean Hefty Cc: Steve Wise Cc: Leon Romanovsky Acked-by: Sean Hefty Reviewed-by: Steve Wise Reviewed-by: Leon Romanovsky Signed-off-by: Doug Ledford diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c index f0c91ba..c58ee77 100644 --- a/drivers/infiniband/core/cma.c +++ b/drivers/infiniband/core/cma.c @@ -708,17 +708,6 @@ static void cma_deref_id(struct rdma_id_private *id_priv) complete(&id_priv->comp); } -static int cma_disable_callback(struct rdma_id_private *id_priv, - enum rdma_cm_state state) -{ - mutex_lock(&id_priv->handler_mutex); - if (id_priv->state != state) { - mutex_unlock(&id_priv->handler_mutex); - return -EINVAL; - } - return 0; -} - struct rdma_cm_id *rdma_create_id(struct net *net, rdma_cm_event_handler event_handler, void *context, enum rdma_port_space ps, @@ -1671,11 +1660,12 @@ static int cma_ib_handler(struct ib_cm_id *cm_id, struct ib_cm_event *ib_event) struct rdma_cm_event event; int ret = 0; + mutex_lock(&id_priv->handler_mutex); if ((ib_event->event != IB_CM_TIMEWAIT_EXIT && - cma_disable_callback(id_priv, RDMA_CM_CONNECT)) || + id_priv->state != RDMA_CM_CONNECT) || (ib_event->event == IB_CM_TIMEWAIT_EXIT && - cma_disable_callback(id_priv, RDMA_CM_DISCONNECT))) - return 0; + id_priv->state != RDMA_CM_DISCONNECT)) + goto out; memset(&event, 0, sizeof event); switch (ib_event->event) { @@ -1870,7 +1860,7 @@ static int cma_check_req_qp_type(struct rdma_cm_id *id, struct ib_cm_event *ib_e static int cma_req_handler(struct ib_cm_id *cm_id, struct ib_cm_event *ib_event) { - struct rdma_id_private *listen_id, *conn_id; + struct rdma_id_private *listen_id, *conn_id = NULL; struct rdma_cm_event event; struct net_device *net_dev; int offset, ret; @@ -1884,9 +1874,10 @@ static int cma_req_handler(struct ib_cm_id *cm_id, struct ib_cm_event *ib_event) goto net_dev_put; } - if (cma_disable_callback(listen_id, RDMA_CM_LISTEN)) { + mutex_lock(&listen_id->handler_mutex); + if (listen_id->state != RDMA_CM_LISTEN) { ret = -ECONNABORTED; - goto net_dev_put; + goto err1; } memset(&event, 0, sizeof event); @@ -1976,8 +1967,9 @@ static int cma_iw_handler(struct iw_cm_id *iw_id, struct iw_cm_event *iw_event) struct sockaddr *laddr = (struct sockaddr *)&iw_event->local_addr; struct sockaddr *raddr = (struct sockaddr *)&iw_event->remote_addr; - if (cma_disable_callback(id_priv, RDMA_CM_CONNECT)) - return 0; + mutex_lock(&id_priv->handler_mutex); + if (id_priv->state != RDMA_CM_CONNECT) + goto out; memset(&event, 0, sizeof event); switch (iw_event->event) { @@ -2029,6 +2021,7 @@ static int cma_iw_handler(struct iw_cm_id *iw_id, struct iw_cm_event *iw_event) return ret; } +out: mutex_unlock(&id_priv->handler_mutex); return ret; } @@ -2039,13 +2032,15 @@ static int iw_conn_req_handler(struct iw_cm_id *cm_id, struct rdma_cm_id *new_cm_id; struct rdma_id_private *listen_id, *conn_id; struct rdma_cm_event event; - int ret; + int ret = -ECONNABORTED; struct sockaddr *laddr = (struct sockaddr *)&iw_event->local_addr; struct sockaddr *raddr = (struct sockaddr *)&iw_event->remote_addr; listen_id = cm_id->context; - if (cma_disable_callback(listen_id, RDMA_CM_LISTEN)) - return -ECONNABORTED; + + mutex_lock(&listen_id->handler_mutex); + if (listen_id->state != RDMA_CM_LISTEN) + goto out; /* Create a new RDMA id for the new IW CM ID */ new_cm_id = rdma_create_id(listen_id->id.route.addr.dev_addr.net, @@ -3216,8 +3211,9 @@ static int cma_sidr_rep_handler(struct ib_cm_id *cm_id, struct ib_cm_sidr_rep_event_param *rep = &ib_event->param.sidr_rep_rcvd; int ret = 0; - if (cma_disable_callback(id_priv, RDMA_CM_CONNECT)) - return 0; + mutex_lock(&id_priv->handler_mutex); + if (id_priv->state != RDMA_CM_CONNECT) + goto out; memset(&event, 0, sizeof event); switch (ib_event->event) { @@ -3673,12 +3669,13 @@ static int cma_ib_mc_handler(int status, struct ib_sa_multicast *multicast) struct rdma_id_private *id_priv; struct cma_multicast *mc = multicast->context; struct rdma_cm_event event; - int ret; + int ret = 0; id_priv = mc->id_priv; - if (cma_disable_callback(id_priv, RDMA_CM_ADDR_BOUND) && - cma_disable_callback(id_priv, RDMA_CM_ADDR_RESOLVED)) - return 0; + mutex_lock(&id_priv->handler_mutex); + if (id_priv->state != RDMA_CM_ADDR_BOUND && + id_priv->state != RDMA_CM_ADDR_RESOLVED) + goto out; if (!status) status = cma_set_qkey(id_priv, be32_to_cpu(multicast->rec.qkey)); @@ -3720,6 +3717,7 @@ static int cma_ib_mc_handler(int status, struct ib_sa_multicast *multicast) return 0; } +out: mutex_unlock(&id_priv->handler_mutex); return 0; } -- cgit v0.10.2 From b4ba6633ea153266429f16614029ab1578815390 Mon Sep 17 00:00:00 2001 From: Jubin John Date: Thu, 9 Jun 2016 07:51:08 -0700 Subject: IB/hfi1: Fix credit return threshold adjustment The credit return threshold adjustment on mtu change algorithm does not take into account all the kernel send contexts that are assigned per VL. Use the pio send context map to adjust the credit return thresholds for all the allocated and assigned kernel send contexts based on the MTU adjustment per VL. The pio send context map can be changed dynamically based on the actual number of operational vls which is set by the fabric manager. When this happens update the credit return threshold values for all the remapped kernel send contexts. Reviewed-by: Dennis Dalessandro Reviewed-by: Mike Marciniszyn Reviewed-by: Jianxin Xiong Signed-off-by: Jubin John Signed-off-by: Doug Ledford diff --git a/drivers/infiniband/hw/hfi1/chip.c b/drivers/infiniband/hw/hfi1/chip.c index 81619fb..c533d44 100644 --- a/drivers/infiniband/hw/hfi1/chip.c +++ b/drivers/infiniband/hw/hfi1/chip.c @@ -9777,7 +9777,7 @@ static void set_send_length(struct hfi1_pportdata *ppd) u64 len1 = 0, len2 = (((dd->vld[15].mtu + max_hb) >> 2) & SEND_LEN_CHECK1_LEN_VL15_MASK) << SEND_LEN_CHECK1_LEN_VL15_SHIFT; - int i; + int i, j; u32 thres; for (i = 0; i < ppd->vls_supported; i++) { @@ -9801,7 +9801,10 @@ static void set_send_length(struct hfi1_pportdata *ppd) sc_mtu_to_threshold(dd->vld[i].sc, dd->vld[i].mtu, dd->rcd[0]->rcvhdrqentsize)); - sc_set_cr_threshold(dd->vld[i].sc, thres); + for (j = 0; j < INIT_SC_PER_VL; j++) + sc_set_cr_threshold( + pio_select_send_context_vl(dd, j, i), + thres); } thres = min(sc_percent_to_threshold(dd->vld[15].sc, 50), sc_mtu_to_threshold(dd->vld[15].sc, diff --git a/drivers/infiniband/hw/hfi1/pio.c b/drivers/infiniband/hw/hfi1/pio.c index d5edb1a..e0e1ff2 100644 --- a/drivers/infiniband/hw/hfi1/pio.c +++ b/drivers/infiniband/hw/hfi1/pio.c @@ -1798,6 +1798,21 @@ static void pio_map_rcu_callback(struct rcu_head *list) } /* + * Set credit return threshold for the kernel send context + */ +static void set_threshold(struct hfi1_devdata *dd, int scontext, int i) +{ + u32 thres; + + thres = min(sc_percent_to_threshold(dd->kernel_send_context[scontext], + 50), + sc_mtu_to_threshold(dd->kernel_send_context[scontext], + dd->vld[i].mtu, + dd->rcd[0]->rcvhdrqentsize)); + sc_set_cr_threshold(dd->kernel_send_context[scontext], thres); +} + +/* * pio_map_init - called when #vls change * @dd: hfi1_devdata * @port: port number @@ -1872,11 +1887,16 @@ int pio_map_init(struct hfi1_devdata *dd, u8 port, u8 num_vls, u8 *vl_scontexts) if (!newmap->map[i]) goto bail; newmap->map[i]->mask = (1 << ilog2(sz)) - 1; - /* assign send contexts */ + /* + * assign send contexts and + * adjust credit return threshold + */ for (j = 0; j < sz; j++) { - if (dd->kernel_send_context[scontext]) + if (dd->kernel_send_context[scontext]) { newmap->map[i]->ksc[j] = dd->kernel_send_context[scontext]; + set_threshold(dd, scontext, i); + } if (++scontext >= first_scontext + vl_scontexts[i]) /* wrap back to first send context */ -- cgit v0.10.2 From 96605672a4172f6e31f31ce29ee27fef68011de0 Mon Sep 17 00:00:00 2001 From: Brian Welty Date: Thu, 9 Jun 2016 07:51:14 -0700 Subject: IB/rdmavt: Correct required callback functions for MODIFY_QP Functions required for MODIFY_PORT were incorrectly being required for MODIFY_QP. Reviewed-by: Dennis Dalessandro Signed-off-by: Brian Welty Signed-off-by: Doug Ledford diff --git a/drivers/infiniband/sw/rdmavt/vt.c b/drivers/infiniband/sw/rdmavt/vt.c index e1cc2cc..30c4fda 100644 --- a/drivers/infiniband/sw/rdmavt/vt.c +++ b/drivers/infiniband/sw/rdmavt/vt.c @@ -501,9 +501,7 @@ static noinline int check_support(struct rvt_dev_info *rdi, int verb) !rdi->driver_f.quiesce_qp || !rdi->driver_f.notify_error_qp || !rdi->driver_f.mtu_from_qp || - !rdi->driver_f.mtu_to_path_mtu || - !rdi->driver_f.shut_down_port || - !rdi->driver_f.cap_mask_chg) + !rdi->driver_f.mtu_to_path_mtu) return -EINVAL; break; -- cgit v0.10.2 From 501edc42446e89fa67fb6ef2c9afb50792c310c0 Mon Sep 17 00:00:00 2001 From: Brian Welty Date: Thu, 9 Jun 2016 07:51:20 -0700 Subject: IB/rdmavt: Correct warning during QPN allocation Correct calculation of the low order bits which should be unset based on use of qos_shift parameter when assigning QPN. Reviewed-by: Dennis Dalessandro Signed-off-by: Brian Welty Signed-off-by: Doug Ledford diff --git a/drivers/infiniband/sw/rdmavt/qp.c b/drivers/infiniband/sw/rdmavt/qp.c index 7de5134..c3e0d61 100644 --- a/drivers/infiniband/sw/rdmavt/qp.c +++ b/drivers/infiniband/sw/rdmavt/qp.c @@ -369,8 +369,8 @@ static int alloc_qpn(struct rvt_dev_info *rdi, struct rvt_qpn_table *qpt, /* wrap to first map page, invert bit 0 */ offset = qpt->incr | ((offset & 1) ^ 1); } - /* there can be no bits at shift and below */ - WARN_ON(offset & (rdi->dparms.qos_shift - 1)); + /* there can be no set bits in low-order QoS bits */ + WARN_ON(offset & (BIT(rdi->dparms.qos_shift) - 1)); qpn = mk_qpn(qpt, map, offset); } -- cgit v0.10.2 From c3c64a951cbdb6096d02dcc339a9c807ce1e976a Mon Sep 17 00:00:00 2001 From: Jubin John Date: Thu, 9 Jun 2016 07:51:27 -0700 Subject: IB/hfi1: Increase packet egress timeout The current value of 500us for the packet egress timeout is too small which causes the host to declare failure on draining packets too early and unnecessarily bounces the link. Increase this to 50ms taking into account the switch packet discard timer default and the worst case per-VL package drainage rate. Reviewed-by: Dean Luick Signed-off-by: Jubin John Signed-off-by: Doug Ledford diff --git a/drivers/infiniband/hw/hfi1/pio.c b/drivers/infiniband/hw/hfi1/pio.c index e0e1ff2..d402245 100644 --- a/drivers/infiniband/hw/hfi1/pio.c +++ b/drivers/infiniband/hw/hfi1/pio.c @@ -995,7 +995,7 @@ static void sc_wait_for_packet_egress(struct send_context *sc, int pause) /* counter is reset if occupancy count changes */ if (reg != reg_prev) loop = 0; - if (loop > 500) { + if (loop > 50000) { /* timed out - bounce the link */ dd_dev_err(dd, "%s: context %u(%u) timeout waiting for packets to egress, remaining count %u, bouncing link\n", -- cgit v0.10.2 From ca2f30a0a6786e6b08eeb8abb2bbb8df58709d6e Mon Sep 17 00:00:00 2001 From: Ira Weiny Date: Thu, 9 Jun 2016 07:51:33 -0700 Subject: IB/hfi1: Prevent context loss If a context has already been assigned to an FD, prevent another assignment. Reviewed-by: Dennis Dalessandro Signed-off-by: Ira Weiny Signed-off-by: Doug Ledford diff --git a/drivers/infiniband/hw/hfi1/file_ops.c b/drivers/infiniband/hw/hfi1/file_ops.c index 7a5b0e6..c702a00 100644 --- a/drivers/infiniband/hw/hfi1/file_ops.c +++ b/drivers/infiniband/hw/hfi1/file_ops.c @@ -203,6 +203,9 @@ static long hfi1_file_ioctl(struct file *fp, unsigned int cmd, switch (cmd) { case HFI1_IOCTL_ASSIGN_CTXT: + if (uctxt) + return -EINVAL; + if (copy_from_user(&uinfo, (struct hfi1_user_info __user *)arg, sizeof(uinfo))) -- cgit v0.10.2 From 5e9ef24619486213223053678eb9175630ef6bf9 Mon Sep 17 00:00:00 2001 From: Ira Weiny Date: Thu, 9 Jun 2016 07:51:39 -0700 Subject: IB/qib: Prevent context loss If a context has already been assigned to an FD, prevent another assignment. Reviewed-by: Dennis Dalessandro Signed-off-by: Ira Weiny Signed-off-by: Doug Ledford diff --git a/drivers/infiniband/hw/qib/qib_file_ops.c b/drivers/infiniband/hw/qib/qib_file_ops.c index ff946d5..382466a 100644 --- a/drivers/infiniband/hw/qib/qib_file_ops.c +++ b/drivers/infiniband/hw/qib/qib_file_ops.c @@ -2178,6 +2178,11 @@ static ssize_t qib_write(struct file *fp, const char __user *data, switch (cmd.type) { case QIB_CMD_ASSIGN_CTXT: + if (rcd) { + ret = -EINVAL; + goto bail; + } + ret = qib_assign_ctxt(fp, &cmd.cmd.user_info); if (ret) goto bail; -- cgit v0.10.2 From 93dd0a097859a174817ea94ec55bfc29c5706454 Mon Sep 17 00:00:00 2001 From: Tadeusz Struk Date: Thu, 9 Jun 2016 07:51:45 -0700 Subject: IB/hfi1: Fix potential NULL ptr dereference This fixes potential NULL ptr dereference because IS_ERR(dd) doesn't handle NULL. Fix the issue by initializing the pointer with a not NULL error code. Reviewed-by: Dennis Dalessandro Signed-off-by: Tadeusz Struk Signed-off-by: Doug Ledford diff --git a/drivers/infiniband/hw/hfi1/init.c b/drivers/infiniband/hw/hfi1/init.c index 0d28a5a..eed971c 100644 --- a/drivers/infiniband/hw/hfi1/init.c +++ b/drivers/infiniband/hw/hfi1/init.c @@ -1383,7 +1383,7 @@ static void postinit_cleanup(struct hfi1_devdata *dd) static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent) { int ret = 0, j, pidx, initfail; - struct hfi1_devdata *dd = NULL; + struct hfi1_devdata *dd = ERR_PTR(-EINVAL); struct hfi1_pportdata *ppd; /* First, lock the non-writable module parameters */ -- cgit v0.10.2 From c078f0dd01b73c70b92a660cb1ce3dfc3cbf2903 Mon Sep 17 00:00:00 2001 From: Tadeusz Struk Date: Thu, 9 Jun 2016 07:51:51 -0700 Subject: IB/hfi1: Fix potential buffer overflow This fixes potential buffer overflow because the sprintf function doesn't check buffer boundaries. Use snprintf instead. Reviewed-by: Dennis Dalessandro Signed-off-by: Tadeusz Struk Signed-off-by: Doug Ledford diff --git a/drivers/infiniband/hw/hfi1/qsfp.c b/drivers/infiniband/hw/hfi1/qsfp.c index 2441669..9fb5616 100644 --- a/drivers/infiniband/hw/hfi1/qsfp.c +++ b/drivers/infiniband/hw/hfi1/qsfp.c @@ -579,7 +579,8 @@ int qsfp_dump(struct hfi1_pportdata *ppd, char *buf, int len) if (ppd->qsfp_info.cache_valid) { if (QSFP_IS_CU(cache[QSFP_MOD_TECH_OFFS])) - sprintf(lenstr, "%dM ", cache[QSFP_MOD_LEN_OFFS]); + snprintf(lenstr, sizeof(lenstr), "%dM ", + cache[QSFP_MOD_LEN_OFFS]); power_byte = cache[QSFP_MOD_PWR_OFFS]; sofar += scnprintf(buf + sofar, len - sofar, "PWR:%.3sW\n", -- cgit v0.10.2 From 3ec5fa28c9ea54fb172859d2612a0be6526e4dc1 Mon Sep 17 00:00:00 2001 From: Sebastian Sanchez Date: Thu, 9 Jun 2016 07:51:57 -0700 Subject: IB/hfi1: Remove FULL_MGMT_P_KEY from pkey table at link up FULL_MGMT_P_KEY doesn't get cleared from the pkey table at link bounce because the link down and link bounce code paths are different when moving a QSFP cable on a switch. This causes an HFI unit connected to a switch to try to be initialized to an FM node when the QSFP cable is moved from a MgmtAllowed=NO port to a MgmtAllowed=YES port and back to a MgmtAllowed=NO port. Remove FULL_MGMT_P_KEY from pkey table at link up. Reviewed-by: Dean Luick Signed-off-by: Sebastian Sanchez Signed-off-by: Doug Ledford diff --git a/drivers/infiniband/hw/hfi1/chip.c b/drivers/infiniband/hw/hfi1/chip.c index c533d44..3487fdf 100644 --- a/drivers/infiniband/hw/hfi1/chip.c +++ b/drivers/infiniband/hw/hfi1/chip.c @@ -1037,7 +1037,7 @@ static void dc_shutdown(struct hfi1_devdata *); static void dc_start(struct hfi1_devdata *); static int qos_rmt_entries(struct hfi1_devdata *dd, unsigned int *mp, unsigned int *np); -static void remove_full_mgmt_pkey(struct hfi1_pportdata *ppd); +static void clear_full_mgmt_pkey(struct hfi1_pportdata *ppd); /* * Error interrupt table entry. This is used as input to the interrupt @@ -6962,8 +6962,6 @@ void handle_link_down(struct work_struct *work) } reset_neighbor_info(ppd); - if (ppd->mgmt_allowed) - remove_full_mgmt_pkey(ppd); /* disable the port */ clear_rcvctrl(ppd->dd, RCV_CTRL_RCV_PORT_ENABLE_SMASK); @@ -7072,10 +7070,12 @@ static void add_full_mgmt_pkey(struct hfi1_pportdata *ppd) (void)hfi1_set_ib_cfg(ppd, HFI1_IB_CFG_PKEYS, 0); } -static void remove_full_mgmt_pkey(struct hfi1_pportdata *ppd) +static void clear_full_mgmt_pkey(struct hfi1_pportdata *ppd) { - ppd->pkeys[2] = 0; - (void)hfi1_set_ib_cfg(ppd, HFI1_IB_CFG_PKEYS, 0); + if (ppd->pkeys[2] != 0) { + ppd->pkeys[2] = 0; + (void)hfi1_set_ib_cfg(ppd, HFI1_IB_CFG_PKEYS, 0); + } } /* @@ -9168,6 +9168,13 @@ int start_link(struct hfi1_pportdata *ppd) return 0; } + /* + * FULL_MGMT_P_KEY is cleared from the pkey table, so that the + * pkey table can be configured properly if the HFI unit is connected + * to switch port with MgmtAllowed=NO + */ + clear_full_mgmt_pkey(ppd); + return set_link_state(ppd, HLS_DN_POLL); } -- cgit v0.10.2 From 34d351f8ddf6dee24d739c4b00a4404e48089a04 Mon Sep 17 00:00:00 2001 From: Sebastian Sanchez Date: Thu, 9 Jun 2016 07:52:03 -0700 Subject: IB/hfi1: Send a pkey change event on driver pkey update Swapping a cable from a "Mgmt Allowed=No" switch port to a "Mgmt Allowed=Yes" switch port doesn't send a pkey change notification. Therefore, the link doesn't become active as the oib_utils layer uses an old pkey table cache. Fix by ensuring the pkey change notification is sent when the table is changed both explicitly by the FM and implicitly by the driver via a cable swap. Reviewed-by: Mike Marciniszyn Signed-off-by: Sebastian Sanchez Signed-off-by: Doug Ledford diff --git a/drivers/infiniband/hw/hfi1/chip.c b/drivers/infiniband/hw/hfi1/chip.c index 3487fdf..f5de851 100644 --- a/drivers/infiniband/hw/hfi1/chip.c +++ b/drivers/infiniband/hw/hfi1/chip.c @@ -7068,6 +7068,7 @@ static void add_full_mgmt_pkey(struct hfi1_pportdata *ppd) __func__, ppd->pkeys[2], FULL_MGMT_P_KEY); ppd->pkeys[2] = FULL_MGMT_P_KEY; (void)hfi1_set_ib_cfg(ppd, HFI1_IB_CFG_PKEYS, 0); + hfi1_event_pkey_change(ppd->dd, ppd->port); } static void clear_full_mgmt_pkey(struct hfi1_pportdata *ppd) @@ -7075,6 +7076,7 @@ static void clear_full_mgmt_pkey(struct hfi1_pportdata *ppd) if (ppd->pkeys[2] != 0) { ppd->pkeys[2] = 0; (void)hfi1_set_ib_cfg(ppd, HFI1_IB_CFG_PKEYS, 0); + hfi1_event_pkey_change(ppd->dd, ppd->port); } } diff --git a/drivers/infiniband/hw/hfi1/mad.c b/drivers/infiniband/hw/hfi1/mad.c index 2190295..fca07a1 100644 --- a/drivers/infiniband/hw/hfi1/mad.c +++ b/drivers/infiniband/hw/hfi1/mad.c @@ -78,6 +78,16 @@ static inline void clear_opa_smp_data(struct opa_smp *smp) memset(data, 0, size); } +void hfi1_event_pkey_change(struct hfi1_devdata *dd, u8 port) +{ + struct ib_event event; + + event.event = IB_EVENT_PKEY_CHANGE; + event.device = &dd->verbs_dev.rdi.ibdev; + event.element.port_num = port; + ib_dispatch_event(&event); +} + static void send_trap(struct hfi1_ibport *ibp, void *data, unsigned len) { struct ib_mad_send_buf *send_buf; @@ -1418,15 +1428,10 @@ static int set_pkeys(struct hfi1_devdata *dd, u8 port, u16 *pkeys) } if (changed) { - struct ib_event event; - (void)hfi1_set_ib_cfg(ppd, HFI1_IB_CFG_PKEYS, 0); - - event.event = IB_EVENT_PKEY_CHANGE; - event.device = &dd->verbs_dev.rdi.ibdev; - event.element.port_num = port; - ib_dispatch_event(&event); + hfi1_event_pkey_change(dd, port); } + return 0; } diff --git a/drivers/infiniband/hw/hfi1/mad.h b/drivers/infiniband/hw/hfi1/mad.h index 55ee086..8b734aa 100644 --- a/drivers/infiniband/hw/hfi1/mad.h +++ b/drivers/infiniband/hw/hfi1/mad.h @@ -434,4 +434,6 @@ struct sc2vlnt { COUNTER_MASK(1, 3) | \ COUNTER_MASK(1, 4)) +void hfi1_event_pkey_change(struct hfi1_devdata *dd, u8 port); + #endif /* _HFI1_MAD_H */ -- cgit v0.10.2 From f336ae03149725bb5844166c7b04f7f65f17eec9 Mon Sep 17 00:00:00 2001 From: Talat Batheesh Date: Wed, 22 Jun 2016 17:27:22 +0300 Subject: IB/core: Fix no default GIDs when netdevice reregisters Currently, when the netdevice returned by get_netdev is unregistered, we delete all GIDs (including the default GIDs) and reset their attributes. Therefore, when we re-register it, no default GIDs will be assigned (as their "default GID") attribute will be reset. Fixing this by keeping "default GID" attribute. Fixes: 03db3a2d81e6 ('IB/core: Add RoCE GID table management') Signed-off-by: Talat Batheesh Signed-off-by: Leon Romanovsky Signed-off-by: Doug Ledford diff --git a/drivers/infiniband/core/cache.c b/drivers/infiniband/core/cache.c index 0409667..1a2984c 100644 --- a/drivers/infiniband/core/cache.c +++ b/drivers/infiniband/core/cache.c @@ -411,7 +411,9 @@ int ib_cache_gid_del_all_netdev_gids(struct ib_device *ib_dev, u8 port, for (ix = 0; ix < table->sz; ix++) if (table->data_vec[ix].attr.ndev == ndev) - if (!del_gid(ib_dev, port, table, ix, false)) + if (!del_gid(ib_dev, port, table, ix, + !!(table->data_vec[ix].props & + GID_TABLE_ENTRY_DEFAULT))) deleted = true; write_unlock_irq(&table->rwlock); -- cgit v0.10.2 From c65f6c5a3650876a69d1041a9d3c90986e9ca233 Mon Sep 17 00:00:00 2001 From: Alex Vesker Date: Wed, 22 Jun 2016 17:27:23 +0300 Subject: IB/core: Fix RoCE v1 multicast join logic issue During multicast join of RoCEv1, IGMP join state and max hop limit were updated incorrectly. IGMP join should be sent and marked as joined only on RoCEv2 after a successful join. Max hops should be updated to the hop limit on RoCEv2 regardless of the join state. Fixes: bee3c3c91865 ('IB/cma: Join and leave multicast groups...') Signed-off-by: Alex Vesker Signed-off-by: Leon Romanovsky Signed-off-by: Doug Ledford diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c index f0c91ba..dcde870 100644 --- a/drivers/infiniband/core/cma.c +++ b/drivers/infiniband/core/cma.c @@ -3878,12 +3878,12 @@ static int cma_iboe_join_multicast(struct rdma_id_private *id_priv, gid_type = id_priv->cma_dev->default_gid_type[id_priv->id.port_num - rdma_start_port(id_priv->cma_dev->device)]; if (addr->sa_family == AF_INET) { - if (gid_type == IB_GID_TYPE_ROCE_UDP_ENCAP) + if (gid_type == IB_GID_TYPE_ROCE_UDP_ENCAP) { + mc->multicast.ib->rec.hop_limit = IPV6_DEFAULT_HOPLIMIT; err = cma_igmp_send(ndev, &mc->multicast.ib->rec.mgid, true); - if (!err) { - mc->igmp_joined = true; - mc->multicast.ib->rec.hop_limit = IPV6_DEFAULT_HOPLIMIT; + if (!err) + mc->igmp_joined = true; } } else { if (gid_type == IB_GID_TYPE_ROCE_UDP_ENCAP) -- cgit v0.10.2 From b3556005c5f319285610e3eae88b4078e1a4d3bc Mon Sep 17 00:00:00 2001 From: Eli Cohen Date: Wed, 22 Jun 2016 17:27:24 +0300 Subject: IB/core: Fix false search of the IB_SA_WELL_KNOWN_GUID When virtualziation is supported, VFs may send SA MADs to a GID formed by the concatenation of the subnet prefix with the IB_SA_WELL_KNOWN_GUID. When a response is required, the current code will search the local HCA's port for the received GID to figure out the GID index of the entry containing this GID. However, since this is not a real GID it will not be found and error will be printed. We change the logic to check if the destination GID is this special GID and avoid lookup in this case and use GID index 0. Fixes: a0c1b2a35087 ('IB/core: Support accessing SA in virtualized environment') Signed-off-by: Eli Cohen Signed-off-by: Leon Romanovsky Signed-off-by: Doug Ledford diff --git a/drivers/infiniband/core/verbs.c b/drivers/infiniband/core/verbs.c index 1d7d4cf..6298f54 100644 --- a/drivers/infiniband/core/verbs.c +++ b/drivers/infiniband/core/verbs.c @@ -511,12 +511,16 @@ int ib_init_ah_from_wc(struct ib_device *device, u8 port_num, ah_attr->grh.dgid = sgid; if (!rdma_cap_eth_ah(device, port_num)) { - ret = ib_find_cached_gid_by_port(device, &dgid, - IB_GID_TYPE_IB, - port_num, NULL, - &gid_index); - if (ret) - return ret; + if (dgid.global.interface_id != cpu_to_be64(IB_SA_WELL_KNOWN_GUID)) { + ret = ib_find_cached_gid_by_port(device, &dgid, + IB_GID_TYPE_IB, + port_num, NULL, + &gid_index); + if (ret) + return ret; + } else { + gid_index = 0; + } } ah_attr->grh.sgid_index = (u8) gid_index; -- cgit v0.10.2 From b57141c1abe41e26c09b1b1b7ece463464ba6de2 Mon Sep 17 00:00:00 2001 From: Maor Gottlieb Date: Wed, 22 Jun 2016 17:27:25 +0300 Subject: IB/uverbs: Initialize ib_qp_init_attr with zeros Initialize ib_qp_init_attr with zeros in order to avoid from garbage in fields that won't be set with user values. Fixes: a060b5629ab06 ('IB/core: generic RDMA READ/WRITE API') Signed-off-by: Maor Gottlieb Signed-off-by: Leon Romanovsky Signed-off-by: Doug Ledford diff --git a/drivers/infiniband/core/uverbs_cmd.c b/drivers/infiniband/core/uverbs_cmd.c index 1a8babb..825021d 100644 --- a/drivers/infiniband/core/uverbs_cmd.c +++ b/drivers/infiniband/core/uverbs_cmd.c @@ -1747,7 +1747,7 @@ static int create_qp(struct ib_uverbs_file *file, struct ib_srq *srq = NULL; struct ib_qp *qp; char *buf; - struct ib_qp_init_attr attr; + struct ib_qp_init_attr attr = {}; struct ib_uverbs_ex_create_qp_resp resp; int ret; -- cgit v0.10.2 From c9b254955b9f8814966f5dabd34c39d0e0a2b437 Mon Sep 17 00:00:00 2001 From: Eli Cohen Date: Wed, 22 Jun 2016 17:27:26 +0300 Subject: IB/mlx5: Fix post send fence logic If the caller specified IB_SEND_FENCE in the send flags of the work request and no previous work request stated that the successive one should be fenced, the work request would be executed without a fence. This could result in RDMA read or atomic operations failure due to a MR being invalidated. Fix this by adding the mlx5 enumeration for fencing RDMA/atomic operations and fix the logic to apply this. Fixes: e126ba97dba9 ('mlx5: Add driver for Mellanox Connect-IB adapters') Signed-off-by: Eli Cohen Signed-off-by: Leon Romanovsky Signed-off-by: Doug Ledford diff --git a/drivers/infiniband/hw/mlx5/qp.c b/drivers/infiniband/hw/mlx5/qp.c index ce43422..ce0a7ab 100644 --- a/drivers/infiniband/hw/mlx5/qp.c +++ b/drivers/infiniband/hw/mlx5/qp.c @@ -3332,10 +3332,11 @@ static u8 get_fence(u8 fence, struct ib_send_wr *wr) return MLX5_FENCE_MODE_SMALL_AND_FENCE; else return fence; - - } else { - return 0; + } else if (unlikely(wr->send_flags & IB_SEND_FENCE)) { + return MLX5_FENCE_MODE_FENCE; } + + return 0; } static int begin_wqe(struct mlx5_ib_qp *qp, void **seg, diff --git a/include/linux/mlx5/qp.h b/include/linux/mlx5/qp.h index e4e2988..630f66a 100644 --- a/include/linux/mlx5/qp.h +++ b/include/linux/mlx5/qp.h @@ -172,6 +172,7 @@ enum { enum { MLX5_FENCE_MODE_NONE = 0 << 5, MLX5_FENCE_MODE_INITIATOR_SMALL = 1 << 5, + MLX5_FENCE_MODE_FENCE = 2 << 5, MLX5_FENCE_MODE_STRONG_ORDERING = 3 << 5, MLX5_FENCE_MODE_SMALL_AND_FENCE = 4 << 5, }; -- cgit v0.10.2 From 00bf534fce23048aa0e6fd8dbceedf097ee65508 Mon Sep 17 00:00:00 2001 From: Talat Batheesh Date: Wed, 22 Jun 2016 17:27:27 +0300 Subject: IB/mlx5: Fix wrong naming of port_rcv_data counter port_xmit_data is written instead of port_rcv_data. Fixes: 3efd9a11212d ('IB/mlx5: Modify MAD reading counters method to use counter registers') Signed-off-by: Talat Batheesh Signed-off-by: Leon Romanovsky Signed-off-by: Doug Ledford diff --git a/drivers/infiniband/hw/mlx5/mad.c b/drivers/infiniband/hw/mlx5/mad.c index 1534af1..364aab9 100644 --- a/drivers/infiniband/hw/mlx5/mad.c +++ b/drivers/infiniband/hw/mlx5/mad.c @@ -121,7 +121,7 @@ static void pma_cnt_ext_assign(struct ib_pma_portcounters_ext *pma_cnt_ext, pma_cnt_ext->port_xmit_data = cpu_to_be64(MLX5_SUM_CNT(out, transmitted_ib_unicast.octets, transmitted_ib_multicast.octets) >> 2); - pma_cnt_ext->port_xmit_data = + pma_cnt_ext->port_rcv_data = cpu_to_be64(MLX5_SUM_CNT(out, received_ib_unicast.octets, received_ib_multicast.octets) >> 2); pma_cnt_ext->port_xmit_packets = -- cgit v0.10.2 From f2940e2c76bb554a7fbdd28ca5b90904117a9e96 Mon Sep 17 00:00:00 2001 From: Yishai Hadas Date: Wed, 22 Jun 2016 17:27:28 +0300 Subject: IB/mlx4: Fix the SQ size of an RC QP When calculating the required size of an RC QP send queue, leave enough space for masked atomic operations, which require more space than "regular" atomic operation. Fixes: 6fa8f719844b ("IB/mlx4: Add support for masked atomic operations") Signed-off-by: Yishai Hadas Reviewed-by: Jack Morgenstein Reviewed-by: Eran Ben Elisha Signed-off-by: Leon Romanovsky Signed-off-by: Doug Ledford diff --git a/drivers/infiniband/hw/mlx4/qp.c b/drivers/infiniband/hw/mlx4/qp.c index 81b0e1f..5196117 100644 --- a/drivers/infiniband/hw/mlx4/qp.c +++ b/drivers/infiniband/hw/mlx4/qp.c @@ -362,7 +362,7 @@ static int send_wqe_overhead(enum mlx4_ib_qp_type type, u32 flags) sizeof (struct mlx4_wqe_raddr_seg); case MLX4_IB_QPT_RC: return sizeof (struct mlx4_wqe_ctrl_seg) + - sizeof (struct mlx4_wqe_atomic_seg) + + sizeof (struct mlx4_wqe_masked_atomic_seg) + sizeof (struct mlx4_wqe_raddr_seg); case MLX4_IB_QPT_SMI: case MLX4_IB_QPT_GSI: -- cgit v0.10.2 From a6100603a4a87fc436199362bdb81cb849faaf6e Mon Sep 17 00:00:00 2001 From: Yishai Hadas Date: Wed, 22 Jun 2016 17:27:29 +0300 Subject: IB/mlx4: Fix error flow when sending mads under SRIOV Fix mad send error flow to prevent double freeing address handles, and leaking tx_ring entries when SRIOV is active. If ib_mad_post_send fails, the address handle pointer in the tx_ring entry must be set to NULL (or there will be a double-free) and tx_tail must be incremented (or there will be a leak of tx_ring entries). The tx_ring is handled the same way in the send-completion handler. Fixes: 37bfc7c1e83f ("IB/mlx4: SR-IOV multiplex and demultiplex MADs") Signed-off-by: Yishai Hadas Reviewed-by: Jack Morgenstein Signed-off-by: Leon Romanovsky Signed-off-by: Doug Ledford diff --git a/drivers/infiniband/hw/mlx4/mad.c b/drivers/infiniband/hw/mlx4/mad.c index d68f506..9c2e53d 100644 --- a/drivers/infiniband/hw/mlx4/mad.c +++ b/drivers/infiniband/hw/mlx4/mad.c @@ -527,7 +527,7 @@ int mlx4_ib_send_to_slave(struct mlx4_ib_dev *dev, int slave, u8 port, tun_tx_ix = (++tun_qp->tx_ix_head) & (MLX4_NUM_TUNNEL_BUFS - 1); spin_unlock(&tun_qp->tx_lock); if (ret) - goto out; + goto end; tun_mad = (struct mlx4_rcv_tunnel_mad *) (tun_qp->tx_ring[tun_tx_ix].buf.addr); if (tun_qp->tx_ring[tun_tx_ix].ah) @@ -596,9 +596,15 @@ int mlx4_ib_send_to_slave(struct mlx4_ib_dev *dev, int slave, u8 port, wr.wr.send_flags = IB_SEND_SIGNALED; ret = ib_post_send(src_qp, &wr.wr, &bad_wr); -out: - if (ret) - ib_destroy_ah(ah); + if (!ret) + return 0; + out: + spin_lock(&tun_qp->tx_lock); + tun_qp->tx_ix_tail++; + spin_unlock(&tun_qp->tx_lock); + tun_qp->tx_ring[tun_tx_ix].ah = NULL; +end: + ib_destroy_ah(ah); return ret; } @@ -1326,9 +1332,15 @@ int mlx4_ib_send_to_wire(struct mlx4_ib_dev *dev, int slave, u8 port, ret = ib_post_send(send_qp, &wr.wr, &bad_wr); + if (!ret) + return 0; + + spin_lock(&sqp->tx_lock); + sqp->tx_ix_tail++; + spin_unlock(&sqp->tx_lock); + sqp->tx_ring[wire_tx_ix].ah = NULL; out: - if (ret) - ib_destroy_ah(ah); + ib_destroy_ah(ah); return ret; } -- cgit v0.10.2 From 5533c18ab02b17a7f2ac11908e2d97d4b421617d Mon Sep 17 00:00:00 2001 From: Yishai Hadas Date: Wed, 22 Jun 2016 17:27:30 +0300 Subject: IB/mlx4: Verify port number in flow steering create flow In procedure mlx4_ib_create_flow, passing an invalid port number will cause an out-of-bounds array access. Data passed to this procedure can come from user-space. Therefore, need to validate port number before proceeding onwards. Note that we check against the number of physical ports declared at the verbs (ib core) level; When bonding is active, the verbs level sees one physical port, even though the low-level driver sees two ports. Fixes: f77c0162a339 ("IB/mlx4: Add receive flow steering support") Signed-off-by: Yishai Hadas Reviewed-by: Jack Morgenstein Reviewed-by: Moni Shoua Signed-off-by: Leon Romanovsky Signed-off-by: Doug Ledford diff --git a/drivers/infiniband/hw/mlx4/main.c b/drivers/infiniband/hw/mlx4/main.c index 0eb09e1..42a4607 100644 --- a/drivers/infiniband/hw/mlx4/main.c +++ b/drivers/infiniband/hw/mlx4/main.c @@ -1704,6 +1704,9 @@ static struct ib_flow *mlx4_ib_create_flow(struct ib_qp *qp, struct mlx4_dev *dev = (to_mdev(qp->device))->dev; int is_bonded = mlx4_is_bonded(dev); + if (flow_attr->port < 1 || flow_attr->port > qp->device->phys_port_cnt) + return ERR_PTR(-EINVAL); + if ((flow_attr->flags & IB_FLOW_ATTR_FLAGS_DONT_TRAP) && (flow_attr->type != IB_FLOW_ATTR_NORMAL)) return ERR_PTR(-EOPNOTSUPP); -- cgit v0.10.2 From 5b420d9cf7382c6e1512e96e02d18842d272049c Mon Sep 17 00:00:00 2001 From: Dotan Barak Date: Wed, 22 Jun 2016 17:27:31 +0300 Subject: IB/mlx4: Fix memory leak if QP creation failed When RC, UC, or RAW QPs are created, a qp object is allocated (kzalloc). If at a later point (in procedure create_qp_common) the qp creation fails, this qp object must be freed. Fixes: 1ffeb2eb8be99 ("IB/mlx4: SR-IOV IB context objects and proxy/tunnel SQP support") Signed-off-by: Dotan Barak Signed-off-by: Jack Morgenstein Signed-off-by: Leon Romanovsky Signed-off-by: Doug Ledford diff --git a/drivers/infiniband/hw/mlx4/qp.c b/drivers/infiniband/hw/mlx4/qp.c index 5196117..8db8405 100644 --- a/drivers/infiniband/hw/mlx4/qp.c +++ b/drivers/infiniband/hw/mlx4/qp.c @@ -1191,8 +1191,10 @@ static struct ib_qp *_mlx4_ib_create_qp(struct ib_pd *pd, { err = create_qp_common(to_mdev(pd->device), pd, init_attr, udata, 0, &qp, gfp); - if (err) + if (err) { + kfree(qp); return ERR_PTR(err); + } qp->ibqp.qp_num = qp->mqp.qpn; qp->xrcdn = xrcdn; -- cgit v0.10.2 From cbc9355a939b90263c58beb855f8151b56634c42 Mon Sep 17 00:00:00 2001 From: Chuck Lever Date: Wed, 22 Jun 2016 17:27:32 +0300 Subject: IB/mlx4: Prevent cross page boundary allocation Prevent cross page boundary allocation by allocating new page, this is required to be aligned with ConnectX-3 HW requirements. Not doing that might cause to "RDMA read local protection" error. Fixes: 1b2cd0fc673c ('IB/mlx4: Support the new memory registration API') Suggested-by: Christoph Hellwig Signed-off-by: Chuck Lever Reviewed-by: Sagi Grimberg Signed-off-by: Yishai Hadas Signed-off-by: Leon Romanovsky Signed-off-by: Leon Romanovsky Signed-off-by: Doug Ledford diff --git a/drivers/infiniband/hw/mlx4/mlx4_ib.h b/drivers/infiniband/hw/mlx4/mlx4_ib.h index 6c5ac5d..29acda2 100644 --- a/drivers/infiniband/hw/mlx4/mlx4_ib.h +++ b/drivers/infiniband/hw/mlx4/mlx4_ib.h @@ -139,7 +139,7 @@ struct mlx4_ib_mr { u32 max_pages; struct mlx4_mr mmr; struct ib_umem *umem; - void *pages_alloc; + size_t page_map_size; }; struct mlx4_ib_mw { diff --git a/drivers/infiniband/hw/mlx4/mr.c b/drivers/infiniband/hw/mlx4/mr.c index 6312721..5d73989 100644 --- a/drivers/infiniband/hw/mlx4/mr.c +++ b/drivers/infiniband/hw/mlx4/mr.c @@ -277,20 +277,23 @@ mlx4_alloc_priv_pages(struct ib_device *device, struct mlx4_ib_mr *mr, int max_pages) { - int size = max_pages * sizeof(u64); - int add_size; int ret; - add_size = max_t(int, MLX4_MR_PAGES_ALIGN - ARCH_KMALLOC_MINALIGN, 0); + /* Ensure that size is aligned to DMA cacheline + * requirements. + * max_pages is limited to MLX4_MAX_FAST_REG_PAGES + * so page_map_size will never cross PAGE_SIZE. + */ + mr->page_map_size = roundup(max_pages * sizeof(u64), + MLX4_MR_PAGES_ALIGN); - mr->pages_alloc = kzalloc(size + add_size, GFP_KERNEL); - if (!mr->pages_alloc) + /* Prevent cross page boundary allocation. */ + mr->pages = (__be64 *)get_zeroed_page(GFP_KERNEL); + if (!mr->pages) return -ENOMEM; - mr->pages = PTR_ALIGN(mr->pages_alloc, MLX4_MR_PAGES_ALIGN); - mr->page_map = dma_map_single(device->dma_device, mr->pages, - size, DMA_TO_DEVICE); + mr->page_map_size, DMA_TO_DEVICE); if (dma_mapping_error(device->dma_device, mr->page_map)) { ret = -ENOMEM; @@ -298,9 +301,9 @@ mlx4_alloc_priv_pages(struct ib_device *device, } return 0; -err: - kfree(mr->pages_alloc); +err: + free_page((unsigned long)mr->pages); return ret; } @@ -309,11 +312,10 @@ mlx4_free_priv_pages(struct mlx4_ib_mr *mr) { if (mr->pages) { struct ib_device *device = mr->ibmr.device; - int size = mr->max_pages * sizeof(u64); dma_unmap_single(device->dma_device, mr->page_map, - size, DMA_TO_DEVICE); - kfree(mr->pages_alloc); + mr->page_map_size, DMA_TO_DEVICE); + free_page((unsigned long)mr->pages); mr->pages = NULL; } } @@ -537,14 +539,12 @@ int mlx4_ib_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg, int sg_nents, mr->npages = 0; ib_dma_sync_single_for_cpu(ibmr->device, mr->page_map, - sizeof(u64) * mr->max_pages, - DMA_TO_DEVICE); + mr->page_map_size, DMA_TO_DEVICE); rc = ib_sg_to_pages(ibmr, sg, sg_nents, sg_offset, mlx4_set_page); ib_dma_sync_single_for_device(ibmr->device, mr->page_map, - sizeof(u64) * mr->max_pages, - DMA_TO_DEVICE); + mr->page_map_size, DMA_TO_DEVICE); return rc; } -- cgit v0.10.2 From 2aee309d3e01447c55fdf89cef05a0e2be372655 Mon Sep 17 00:00:00 2001 From: Mike Marciniszyn Date: Fri, 17 Jun 2016 19:17:49 -0700 Subject: IB/hfi1: Fix deadlock with txreq allocation slow path A failure in the get_txreq() inline will result in a slow path retry using __get_txreq(). __get_txreq() attempts to procure the qp s_lock, which is already held in all callers. Fix by deleting the s_lock maintenance in __get_txreq() and add sparse syntax hooks to future proof the code. Cc: Stable # 4.6+ Reviewed-by: Dennis Dalessandro Signed-off-by: Mike Marciniszyn Signed-off-by: Dennis Dalessandro Signed-off-by: Doug Ledford diff --git a/drivers/infiniband/hw/hfi1/verbs_txreq.c b/drivers/infiniband/hw/hfi1/verbs_txreq.c index bc95c41..d8fb056 100644 --- a/drivers/infiniband/hw/hfi1/verbs_txreq.c +++ b/drivers/infiniband/hw/hfi1/verbs_txreq.c @@ -92,11 +92,10 @@ void hfi1_put_txreq(struct verbs_txreq *tx) struct verbs_txreq *__get_txreq(struct hfi1_ibdev *dev, struct rvt_qp *qp) + __must_hold(&qp->s_lock) { struct verbs_txreq *tx = ERR_PTR(-EBUSY); - unsigned long flags; - spin_lock_irqsave(&qp->s_lock, flags); write_seqlock(&dev->iowait_lock); if (ib_rvt_state_ops[qp->state] & RVT_PROCESS_RECV_OK) { struct hfi1_qp_priv *priv; @@ -116,7 +115,6 @@ struct verbs_txreq *__get_txreq(struct hfi1_ibdev *dev, } out: write_sequnlock(&dev->iowait_lock); - spin_unlock_irqrestore(&qp->s_lock, flags); return tx; } diff --git a/drivers/infiniband/hw/hfi1/verbs_txreq.h b/drivers/infiniband/hw/hfi1/verbs_txreq.h index 1cf69b2..a1d6e08 100644 --- a/drivers/infiniband/hw/hfi1/verbs_txreq.h +++ b/drivers/infiniband/hw/hfi1/verbs_txreq.h @@ -73,6 +73,7 @@ struct verbs_txreq *__get_txreq(struct hfi1_ibdev *dev, static inline struct verbs_txreq *get_txreq(struct hfi1_ibdev *dev, struct rvt_qp *qp) + __must_hold(&qp->slock) { struct verbs_txreq *tx; struct hfi1_qp_priv *priv = qp->priv; -- cgit v0.10.2 From 8ae84f7c56044ee17ef6b700cb34d11ad9428a2e Mon Sep 17 00:00:00 2001 From: Ashutosh Dixit Date: Fri, 17 Jun 2016 19:17:54 -0700 Subject: IB/hfi1: Don't zero out qp->s_ack_queue in rvt_reset_qp Since rvt_reset_qp already zero's out qp->s_ack_queue head and tail pointers, there is no need to zero out qp->s_ack_queue itself. Reviewed-by: Dennis Dalessandro Reviewed-by: Mike Marciniszyn Signed-off-by: Ashutosh Dixit Signed-off-by: Dennis Dalessandro Signed-off-by: Doug Ledford diff --git a/drivers/infiniband/sw/rdmavt/qp.c b/drivers/infiniband/sw/rdmavt/qp.c index c3e0d61..d04f4fe 100644 --- a/drivers/infiniband/sw/rdmavt/qp.c +++ b/drivers/infiniband/sw/rdmavt/qp.c @@ -576,12 +576,6 @@ static void rvt_reset_qp(struct rvt_dev_info *rdi, struct rvt_qp *qp, qp->s_ssn = 1; qp->s_lsn = 0; qp->s_mig_state = IB_MIG_MIGRATED; - if (qp->s_ack_queue) - memset( - qp->s_ack_queue, - 0, - rvt_max_atomic(rdi) * - sizeof(*qp->s_ack_queue)); qp->r_head_ack_queue = 0; qp->s_tail_ack_queue = 0; qp->s_num_rd_atomic = 0; -- cgit v0.10.2 From c755f4afa66ad3ed98870bd3254f37c47fb2c800 Mon Sep 17 00:00:00 2001 From: Mike Marciniszyn Date: Wed, 22 Jun 2016 13:29:33 -0700 Subject: IB/rdmavt: Correct qp_priv_alloc() return value test The current drivers return errors from this calldown wrapped in an ERR_PTR(). The rdmavt code incorrectly tests for NULL. The code is fixed to use IS_ERR() and change ret according to the driver return value. Cc: Stable # 4.6+ Reviewed-by: Dennis Dalessandro Signed-off-by: Mike Marciniszyn Signed-off-by: Dennis Dalessandro Signed-off-by: Doug Ledford diff --git a/drivers/infiniband/sw/rdmavt/qp.c b/drivers/infiniband/sw/rdmavt/qp.c index d04f4fe..41ba7e9 100644 --- a/drivers/infiniband/sw/rdmavt/qp.c +++ b/drivers/infiniband/sw/rdmavt/qp.c @@ -699,8 +699,10 @@ struct ib_qp *rvt_create_qp(struct ib_pd *ibpd, * initialization that is needed. */ priv = rdi->driver_f.qp_priv_alloc(rdi, qp, gfp); - if (!priv) + if (IS_ERR(priv)) { + ret = priv; goto bail_qp; + } qp->priv = priv; qp->timeout_jiffies = usecs_to_jiffies((4096UL * (1UL << qp->timeout)) / diff --git a/include/rdma/rdma_vt.h b/include/rdma/rdma_vt.h index 16274e2..9c9a27d 100644 --- a/include/rdma/rdma_vt.h +++ b/include/rdma/rdma_vt.h @@ -203,7 +203,9 @@ struct rvt_driver_provided { /* * Allocate a private queue pair data structure for driver specific - * information which is opaque to rdmavt. + * information which is opaque to rdmavt. Errors are returned via + * ERR_PTR(err). The driver is free to return NULL or a valid + * pointer. */ void * (*qp_priv_alloc)(struct rvt_dev_info *rdi, struct rvt_qp *qp, gfp_t gfp); -- cgit v0.10.2 From 747f1c6d9be749a29612fc78c321b97099906008 Mon Sep 17 00:00:00 2001 From: Shiraz Saleem Date: Tue, 14 Jun 2016 16:54:16 -0500 Subject: i40iw: Correct CQ arming CQ is armed for solicited events only, ignoring other notification flags. Correct this by arming for next and arming for solicited event if IB_CQ_SOLICITED is set. Also protect CQ shadow area update with spinlock. Signed-off-by: Shiraz Saleem Signed-off-by: Doug Ledford diff --git a/drivers/infiniband/hw/i40iw/i40iw_verbs.c b/drivers/infiniband/hw/i40iw/i40iw_verbs.c index 02a735b..c6e75ac 100644 --- a/drivers/infiniband/hw/i40iw/i40iw_verbs.c +++ b/drivers/infiniband/hw/i40iw/i40iw_verbs.c @@ -2327,13 +2327,16 @@ static int i40iw_req_notify_cq(struct ib_cq *ibcq, { struct i40iw_cq *iwcq; struct i40iw_cq_uk *ukcq; - enum i40iw_completion_notify cq_notify = IW_CQ_COMPL_SOLICITED; + unsigned long flags; + enum i40iw_completion_notify cq_notify = IW_CQ_COMPL_EVENT; iwcq = (struct i40iw_cq *)ibcq; ukcq = &iwcq->sc_cq.cq_uk; - if (notify_flags == IB_CQ_NEXT_COMP) - cq_notify = IW_CQ_COMPL_EVENT; + if (notify_flags == IB_CQ_SOLICITED) + cq_notify = IW_CQ_COMPL_SOLICITED; + spin_lock_irqsave(&iwcq->lock, flags); ukcq->ops.iw_cq_request_notification(ukcq, cq_notify); + spin_unlock_irqrestore(&iwcq->lock, flags); return 0; } -- cgit v0.10.2 From ee23abd75c5076e51061c275e8f659d754a63c9d Mon Sep 17 00:00:00 2001 From: Faisal Latif Date: Tue, 14 Jun 2016 16:54:17 -0500 Subject: i40iw: Correct status check on i40iw_get_pble i40iw_get_pble returns 0 on success. Correct the check on return code. Signed-off-by: Faisal Latif Signed-off-by: Shiraz Saleem Signed-off-by: Doug Ledford diff --git a/drivers/infiniband/hw/i40iw/i40iw_verbs.c b/drivers/infiniband/hw/i40iw/i40iw_verbs.c index c6e75ac..65bea9c 100644 --- a/drivers/infiniband/hw/i40iw/i40iw_verbs.c +++ b/drivers/infiniband/hw/i40iw/i40iw_verbs.c @@ -1527,7 +1527,7 @@ static struct ib_mr *i40iw_alloc_mr(struct ib_pd *pd, mutex_lock(&iwdev->pbl_mutex); status = i40iw_get_pble(&iwdev->sc_dev, iwdev->pble_rsrc, palloc, iwmr->page_cnt); mutex_unlock(&iwdev->pbl_mutex); - if (!status) + if (status) goto err1; if (palloc->level != I40IW_LEVEL_1) -- cgit v0.10.2 From 0477e18145c565f9ca74c6df4112f818f673fcaa Mon Sep 17 00:00:00 2001 From: Faisal Latif Date: Tue, 14 Jun 2016 16:54:18 -0500 Subject: i40iw: Return correct max_fast_reg_page_list_len Return correct value for max_fast_reg_page_list_len from i40iw_query_device(). Signed-off-by: Faisal Latif Signed-off-by: Shiraz Saleem Signed-off-by: Doug Ledford diff --git a/drivers/infiniband/hw/i40iw/i40iw.h b/drivers/infiniband/hw/i40iw/i40iw.h index 8b95320..14f16a2 100644 --- a/drivers/infiniband/hw/i40iw/i40iw.h +++ b/drivers/infiniband/hw/i40iw/i40iw.h @@ -113,6 +113,7 @@ #define IW_HMC_OBJ_TYPE_NUM ARRAY_SIZE(iw_hmc_obj_types) #define IW_CFG_FPM_QP_COUNT 32768 +#define I40IW_MAX_PAGES_PER_FMR 512 #define I40IW_MTU_TO_MSS 40 #define I40IW_DEFAULT_MSS 1460 diff --git a/drivers/infiniband/hw/i40iw/i40iw_verbs.c b/drivers/infiniband/hw/i40iw/i40iw_verbs.c index 65bea9c..31eda323 100644 --- a/drivers/infiniband/hw/i40iw/i40iw_verbs.c +++ b/drivers/infiniband/hw/i40iw/i40iw_verbs.c @@ -79,6 +79,7 @@ static int i40iw_query_device(struct ib_device *ibdev, props->max_qp_init_rd_atom = props->max_qp_rd_atom; props->atomic_cap = IB_ATOMIC_NONE; props->max_map_per_fmr = 1; + props->max_fast_reg_page_list_len = I40IW_MAX_PAGES_PER_FMR; return 0; } -- cgit v0.10.2 From 7748e4990de42ea796543c0ffd34118c3a5e6a98 Mon Sep 17 00:00:00 2001 From: Shiraz Saleem Date: Tue, 14 Jun 2016 16:54:19 -0500 Subject: i40iw: Enable level-1 PBL for fast memory registration Set the chunk_size to enable level-1 PBL support when the fast memory page count is more than one. Signed-off-by: Shiraz Saleem Signed-off-by: Faisal Latif Signed-off-by: Doug Ledford diff --git a/drivers/infiniband/hw/i40iw/i40iw.h b/drivers/infiniband/hw/i40iw/i40iw.h index 14f16a2..b738acd 100644 --- a/drivers/infiniband/hw/i40iw/i40iw.h +++ b/drivers/infiniband/hw/i40iw/i40iw.h @@ -114,6 +114,7 @@ #define IW_HMC_OBJ_TYPE_NUM ARRAY_SIZE(iw_hmc_obj_types) #define IW_CFG_FPM_QP_COUNT 32768 #define I40IW_MAX_PAGES_PER_FMR 512 +#define I40IW_MIN_PAGES_PER_FMR 1 #define I40IW_MTU_TO_MSS 40 #define I40IW_DEFAULT_MSS 1460 diff --git a/drivers/infiniband/hw/i40iw/i40iw_verbs.c b/drivers/infiniband/hw/i40iw/i40iw_verbs.c index 31eda323..33959ed 100644 --- a/drivers/infiniband/hw/i40iw/i40iw_verbs.c +++ b/drivers/infiniband/hw/i40iw/i40iw_verbs.c @@ -2150,6 +2150,7 @@ static int i40iw_post_send(struct ib_qp *ibqp, struct i40iw_sc_dev *dev = &iwqp->iwdev->sc_dev; struct i40iw_fast_reg_stag_info info; + memset(&info, 0, sizeof(info)); info.access_rights = I40IW_ACCESS_FLAGS_LOCALREAD; info.access_rights |= i40iw_get_user_access(flags); info.stag_key = reg_wr(ib_wr)->key & 0xff; @@ -2159,10 +2160,14 @@ static int i40iw_post_send(struct ib_qp *ibqp, info.addr_type = I40IW_ADDR_TYPE_VA_BASED; info.va = (void *)(uintptr_t)iwmr->ibmr.iova; info.total_len = iwmr->ibmr.length; + info.reg_addr_pa = *(u64 *)palloc->level1.addr; info.first_pm_pbl_index = palloc->level1.idx; info.local_fence = ib_wr->send_flags & IB_SEND_FENCE; info.signaled = ib_wr->send_flags & IB_SEND_SIGNALED; + if (iwmr->npages > I40IW_MIN_PAGES_PER_FMR) + info.chunk_size = 1; + if (page_shift == 21) info.page_size = 1; /* 2M page */ -- cgit v0.10.2 From c0cf4512a31eb3cec70b066bc36ed55f7d05b8c0 Mon Sep 17 00:00:00 2001 From: Bart Van Assche Date: Thu, 23 Jun 2016 09:35:48 +0200 Subject: IB/srpt: Reduce QP buffer size The memory needed for the send and receive queues associated with a QP is proportional to the max_sge parameter. The current value of that parameter is such that with an mlx4 HCA the QP buffer size is 8 MB. Since DMA is used for communication between HCA and CPU that buffer either has to be allocated coherently or map_single() must succeed for that buffer. Since large contiguous allocations are fragile and since the maximum segment size for e.g. swiotlb is 256 KB, reduce the max_sge parameter. This patch avoids that the following text appears on the console after SRP logout and relogin on a system equipped with multiple IB HCAs: mlx4_core 0000:05:00.0: swiotlb buffer is full (sz: 8388608 bytes) swiotlb: coherent allocation failed for device 0000:05:00.0 size=8388608 CPU: 11 PID: 148 Comm: kworker/11:1 Not tainted 4.7.0-rc4-dbg+ #1 Call Trace: [] dump_stack+0x67/0x92 [] swiotlb_alloc_coherent+0x141/0x150 [] x86_swiotlb_alloc_coherent+0x3e/0x50 [] mlx4_buf_direct_alloc.isra.5+0x9a/0x120 [mlx4_core] [] mlx4_buf_alloc+0x165/0x1a0 [mlx4_core] [] create_qp_common.isra.29+0x57d/0xff0 [mlx4_ib] [] mlx4_ib_create_qp+0x12a/0x3f0 [mlx4_ib] [] ib_create_qp+0x3a/0x250 [ib_core] [] srpt_cm_handler+0x4bb/0xcad [ib_srpt] [] cm_process_work+0x20/0xf0 [ib_cm] [] cm_work_handler+0x1ac0/0x2059 [ib_cm] [] process_one_work+0x19d/0x490 [] worker_thread+0x49/0x490 [] kthread+0xea/0x100 [] ret_from_fork+0x1f/0x40 Fixes: b99f8e4d7bcd ("IB/srpt: convert to the generic RDMA READ/WRITE API") Signed-off-by: Bart Van Assche Cc: Laurence Oberman Cc: Christoph Hellwig Cc: Sagi Grimberg Signed-off-by: Doug Ledford diff --git a/drivers/infiniband/ulp/srpt/ib_srpt.c b/drivers/infiniband/ulp/srpt/ib_srpt.c index e68b20cb..4a41556 100644 --- a/drivers/infiniband/ulp/srpt/ib_srpt.c +++ b/drivers/infiniband/ulp/srpt/ib_srpt.c @@ -1638,8 +1638,7 @@ retry: */ qp_init->cap.max_send_wr = srp_sq_size / 2; qp_init->cap.max_rdma_ctxs = srp_sq_size / 2; - qp_init->cap.max_send_sge = max(sdev->device->attrs.max_sge_rd, - sdev->device->attrs.max_sge); + qp_init->cap.max_send_sge = SRPT_DEF_SG_PER_WQE; qp_init->port_num = ch->sport->port; ch->qp = ib_create_qp(sdev->pd, qp_init); diff --git a/drivers/infiniband/ulp/srpt/ib_srpt.h b/drivers/infiniband/ulp/srpt/ib_srpt.h index fee6bfd..3890304 100644 --- a/drivers/infiniband/ulp/srpt/ib_srpt.h +++ b/drivers/infiniband/ulp/srpt/ib_srpt.h @@ -106,6 +106,7 @@ enum { SRP_LOGIN_RSP_MULTICHAN_MAINTAINED = 0x2, SRPT_DEF_SG_TABLESIZE = 128, + SRPT_DEF_SG_PER_WQE = 16, MIN_SRPT_SQ_SIZE = 16, DEF_SRPT_SQ_SIZE = 4096, -- cgit v0.10.2