From 91018f8632e09e3a617c9fc2efbbdaa2922d2fe7 Mon Sep 17 00:00:00 2001 From: Kumar Sanghvi Date: Sat, 25 Feb 2012 17:45:02 -0800 Subject: RDMA/cxgb4: Add missing peer2peer check in MPAv2 code Don't worry about p2p_type if peer2peer itself is not requested in the first place. Signed-off-by: Kumar Sanghvi Signed-off-by: Roland Dreier diff --git a/drivers/infiniband/hw/cxgb4/cm.c b/drivers/infiniband/hw/cxgb4/cm.c index 0668bb3..006a353 100644 --- a/drivers/infiniband/hw/cxgb4/cm.c +++ b/drivers/infiniband/hw/cxgb4/cm.c @@ -1114,7 +1114,7 @@ static void process_mpa_reply(struct c4iw_ep *ep, struct sk_buff *skb) * generated when moving QP to RTS state. * A TERM message will be sent after QP has moved to RTS state */ - if ((ep->mpa_attr.version == 2) && + if ((ep->mpa_attr.version == 2) && peer2peer && (ep->mpa_attr.p2p_type != p2p_type)) { ep->mpa_attr.p2p_type = FW_RI_INIT_P2PTYPE_DISABLED; rtr_mismatch = 1; -- cgit v0.10.2 From 8dd87fba939370e729b0ee72c163f279d310de06 Mon Sep 17 00:00:00 2001 From: Tatyana Nikolova Date: Sat, 25 Feb 2012 17:45:37 -0800 Subject: RDMA/nes: Fixes for sparse endianness warnings Fix endianness problems detect by sparse, introduced with the enhanced MPA patch. Reported-by: Dan Carpenter Signed-off-by: Tatyana Nikolova Signed-off-by: Faisal Latif Signed-off-by: Roland Dreier diff --git a/drivers/infiniband/hw/nes/nes_cm.c b/drivers/infiniband/hw/nes/nes_cm.c index a4972ab..da2c67db 100644 --- a/drivers/infiniband/hw/nes/nes_cm.c +++ b/drivers/infiniband/hw/nes/nes_cm.c @@ -338,18 +338,21 @@ static int parse_mpa(struct nes_cm_node *cm_node, u8 *buffer, u32 *type, case IETF_MPA_V2: { u16 ird_size; u16 ord_size; + u16 rtr_ctrl_ird; + u16 rtr_ctrl_ord; + mpa_v2_frame = (struct ietf_mpa_v2 *)buffer; mpa_hdr_len += IETF_RTR_MSG_SIZE; cm_node->mpa_frame_size -= IETF_RTR_MSG_SIZE; rtr_msg = &mpa_v2_frame->rtr_msg; /* parse rtr message */ - rtr_msg->ctrl_ird = ntohs(rtr_msg->ctrl_ird); - rtr_msg->ctrl_ord = ntohs(rtr_msg->ctrl_ord); - ird_size = rtr_msg->ctrl_ird & IETF_NO_IRD_ORD; - ord_size = rtr_msg->ctrl_ord & IETF_NO_IRD_ORD; + rtr_ctrl_ird = ntohs(rtr_msg->ctrl_ird); + rtr_ctrl_ord = ntohs(rtr_msg->ctrl_ord); + ird_size = rtr_ctrl_ird & IETF_NO_IRD_ORD; + ord_size = rtr_ctrl_ord & IETF_NO_IRD_ORD; - if (!(rtr_msg->ctrl_ird & IETF_PEER_TO_PEER)) { + if (!(rtr_ctrl_ird & IETF_PEER_TO_PEER)) { /* send reset */ return -EINVAL; } @@ -370,9 +373,9 @@ static int parse_mpa(struct nes_cm_node *cm_node, u8 *buffer, u32 *type, } } - if (rtr_msg->ctrl_ord & IETF_RDMA0_READ) { + if (rtr_ctrl_ord & IETF_RDMA0_READ) { cm_node->send_rdma0_op = SEND_RDMA_READ_ZERO; - } else if (rtr_msg->ctrl_ord & IETF_RDMA0_WRITE) { + } else if (rtr_ctrl_ord & IETF_RDMA0_WRITE) { cm_node->send_rdma0_op = SEND_RDMA_WRITE_ZERO; } else { /* Not supported RDMA0 operation */ return -EINVAL; @@ -543,6 +546,8 @@ static void build_mpa_v2(struct nes_cm_node *cm_node, { struct ietf_mpa_v2 *mpa_frame = (struct ietf_mpa_v2 *)start_addr; struct ietf_rtr_msg *rtr_msg = &mpa_frame->rtr_msg; + u16 ctrl_ird; + u16 ctrl_ord; /* initialize the upper 5 bytes of the frame */ build_mpa_v1(cm_node, start_addr, mpa_key); @@ -550,31 +555,31 @@ static void build_mpa_v2(struct nes_cm_node *cm_node, mpa_frame->priv_data_len += htons(IETF_RTR_MSG_SIZE); /* initialize RTR msg */ - rtr_msg->ctrl_ird = (cm_node->ird_size > IETF_NO_IRD_ORD) ? + ctrl_ird = (cm_node->ird_size > IETF_NO_IRD_ORD) ? IETF_NO_IRD_ORD : cm_node->ird_size; - rtr_msg->ctrl_ord = (cm_node->ord_size > IETF_NO_IRD_ORD) ? + ctrl_ord = (cm_node->ord_size > IETF_NO_IRD_ORD) ? IETF_NO_IRD_ORD : cm_node->ord_size; - rtr_msg->ctrl_ird |= IETF_PEER_TO_PEER; - rtr_msg->ctrl_ird |= IETF_FLPDU_ZERO_LEN; + ctrl_ird |= IETF_PEER_TO_PEER; + ctrl_ird |= IETF_FLPDU_ZERO_LEN; switch (mpa_key) { case MPA_KEY_REQUEST: - rtr_msg->ctrl_ord |= IETF_RDMA0_WRITE; - rtr_msg->ctrl_ord |= IETF_RDMA0_READ; + ctrl_ord |= IETF_RDMA0_WRITE; + ctrl_ord |= IETF_RDMA0_READ; break; case MPA_KEY_REPLY: switch (cm_node->send_rdma0_op) { case SEND_RDMA_WRITE_ZERO: - rtr_msg->ctrl_ord |= IETF_RDMA0_WRITE; + ctrl_ord |= IETF_RDMA0_WRITE; break; case SEND_RDMA_READ_ZERO: - rtr_msg->ctrl_ord |= IETF_RDMA0_READ; + ctrl_ord |= IETF_RDMA0_READ; break; } } - rtr_msg->ctrl_ird = htons(rtr_msg->ctrl_ird); - rtr_msg->ctrl_ord = htons(rtr_msg->ctrl_ord); + rtr_msg->ctrl_ird = htons(ctrl_ird); + rtr_msg->ctrl_ord = htons(ctrl_ord); } /** -- cgit v0.10.2 From a778f3fddc6fc2ed4c065f6e160d517a5959f949 Mon Sep 17 00:00:00 2001 From: Mike Marciniszyn Date: Sat, 25 Feb 2012 17:45:49 -0800 Subject: IB/qib: Add logic for affinity hint Call irq_set_affinity_hint() to give userspace programs such as irqbalance the information to be able to distribute qib interrupts appropriately. The logic allocates all non-receive interrupts to the first CPU local to the HCA. Receive interrupts are allocated round robin starting with the second CPU local to the HCA with potential wrap back to the second CPU. This patch also adds a refinement to the name registered for MSI-X interrupts so that user level scripts can determine the device associated with the IRQs when there are multiple HCAs with a potentially different set of local CPUs. Signed-off-by: Mike Marciniszyn Signed-off-by: Roland Dreier diff --git a/drivers/infiniband/hw/qib/qib.h b/drivers/infiniband/hw/qib/qib.h index b881bdc..6b811e3 100644 --- a/drivers/infiniband/hw/qib/qib.h +++ b/drivers/infiniband/hw/qib/qib.h @@ -427,6 +427,14 @@ struct qib_verbs_txreq { /* how often we check for packet activity for "power on hours (in seconds) */ #define ACTIVITY_TIMER 5 +#define MAX_NAME_SIZE 64 +struct qib_msix_entry { + struct msix_entry msix; + void *arg; + char name[MAX_NAME_SIZE]; + cpumask_var_t mask; +}; + /* Below is an opaque struct. Each chip (device) can maintain * private data needed for its operation, but not germane to the * rest of the driver. For convenience, we define another that @@ -1355,7 +1363,7 @@ int qib_pcie_init(struct pci_dev *, const struct pci_device_id *); int qib_pcie_ddinit(struct qib_devdata *, struct pci_dev *, const struct pci_device_id *); void qib_pcie_ddcleanup(struct qib_devdata *); -int qib_pcie_params(struct qib_devdata *, u32, u32 *, struct msix_entry *); +int qib_pcie_params(struct qib_devdata *, u32, u32 *, struct qib_msix_entry *); int qib_reinit_intr(struct qib_devdata *); void qib_enable_intx(struct pci_dev *); void qib_nomsi(struct qib_devdata *); diff --git a/drivers/infiniband/hw/qib/qib_iba7322.c b/drivers/infiniband/hw/qib/qib_iba7322.c index 41e9208..060b960 100644 --- a/drivers/infiniband/hw/qib/qib_iba7322.c +++ b/drivers/infiniband/hw/qib/qib_iba7322.c @@ -541,8 +541,7 @@ struct qib_chip_specific { u32 lastbuf_for_pio; u32 stay_in_freeze; u32 recovery_ports_initted; - struct msix_entry *msix_entries; - void **msix_arg; + struct qib_msix_entry *msix_entries; unsigned long *sendchkenable; unsigned long *sendgrhchk; unsigned long *sendibchk; @@ -639,24 +638,24 @@ static struct { int lsb; int port; /* 0 if not port-specific, else port # */ } irq_table[] = { - { QIB_DRV_NAME, qib_7322intr, -1, 0 }, - { QIB_DRV_NAME " (buf avail)", qib_7322bufavail, + { "", qib_7322intr, -1, 0 }, + { " (buf avail)", qib_7322bufavail, SYM_LSB(IntStatus, SendBufAvail), 0 }, - { QIB_DRV_NAME " (sdma 0)", sdma_intr, + { " (sdma 0)", sdma_intr, SYM_LSB(IntStatus, SDmaInt_0), 1 }, - { QIB_DRV_NAME " (sdma 1)", sdma_intr, + { " (sdma 1)", sdma_intr, SYM_LSB(IntStatus, SDmaInt_1), 2 }, - { QIB_DRV_NAME " (sdmaI 0)", sdma_idle_intr, + { " (sdmaI 0)", sdma_idle_intr, SYM_LSB(IntStatus, SDmaIdleInt_0), 1 }, - { QIB_DRV_NAME " (sdmaI 1)", sdma_idle_intr, + { " (sdmaI 1)", sdma_idle_intr, SYM_LSB(IntStatus, SDmaIdleInt_1), 2 }, - { QIB_DRV_NAME " (sdmaP 0)", sdma_progress_intr, + { " (sdmaP 0)", sdma_progress_intr, SYM_LSB(IntStatus, SDmaProgressInt_0), 1 }, - { QIB_DRV_NAME " (sdmaP 1)", sdma_progress_intr, + { " (sdmaP 1)", sdma_progress_intr, SYM_LSB(IntStatus, SDmaProgressInt_1), 2 }, - { QIB_DRV_NAME " (sdmaC 0)", sdma_cleanup_intr, + { " (sdmaC 0)", sdma_cleanup_intr, SYM_LSB(IntStatus, SDmaCleanupDone_0), 1 }, - { QIB_DRV_NAME " (sdmaC 1)", sdma_cleanup_intr, + { " (sdmaC 1)", sdma_cleanup_intr, SYM_LSB(IntStatus, SDmaCleanupDone_1), 2 }, }; @@ -2567,9 +2566,13 @@ static void qib_7322_nomsix(struct qib_devdata *dd) int i; dd->cspec->num_msix_entries = 0; - for (i = 0; i < n; i++) - free_irq(dd->cspec->msix_entries[i].vector, - dd->cspec->msix_arg[i]); + for (i = 0; i < n; i++) { + irq_set_affinity_hint( + dd->cspec->msix_entries[i].msix.vector, NULL); + free_cpumask_var(dd->cspec->msix_entries[i].mask); + free_irq(dd->cspec->msix_entries[i].msix.vector, + dd->cspec->msix_entries[i].arg); + } qib_nomsix(dd); } /* make sure no MSIx interrupts are left pending */ @@ -2597,7 +2600,6 @@ static void qib_setup_7322_cleanup(struct qib_devdata *dd) kfree(dd->cspec->sendgrhchk); kfree(dd->cspec->sendibchk); kfree(dd->cspec->msix_entries); - kfree(dd->cspec->msix_arg); for (i = 0; i < dd->num_pports; i++) { unsigned long flags; u32 mask = QSFP_GPIO_MOD_PRS_N | @@ -3070,6 +3072,8 @@ static void qib_setup_7322_interrupt(struct qib_devdata *dd, int clearpend) int ret, i, msixnum; u64 redirect[6]; u64 mask; + const struct cpumask *local_mask; + int firstcpu, secondcpu = 0, currrcvcpu = 0; if (!dd->num_pports) return; @@ -3118,13 +3122,28 @@ try_intx: memset(redirect, 0, sizeof redirect); mask = ~0ULL; msixnum = 0; + local_mask = cpumask_of_pcibus(dd->pcidev->bus); + firstcpu = cpumask_first(local_mask); + if (firstcpu >= nr_cpu_ids || + cpumask_weight(local_mask) == num_online_cpus()) { + local_mask = topology_core_cpumask(0); + firstcpu = cpumask_first(local_mask); + } + if (firstcpu < nr_cpu_ids) { + secondcpu = cpumask_next(firstcpu, local_mask); + if (secondcpu >= nr_cpu_ids) + secondcpu = firstcpu; + currrcvcpu = secondcpu; + } for (i = 0; msixnum < dd->cspec->num_msix_entries; i++) { irq_handler_t handler; - const char *name; void *arg; u64 val; int lsb, reg, sh; + dd->cspec->msix_entries[msixnum]. + name[sizeof(dd->cspec->msix_entries[msixnum].name) - 1] + = '\0'; if (i < ARRAY_SIZE(irq_table)) { if (irq_table[i].port) { /* skip if for a non-configured port */ @@ -3135,7 +3154,11 @@ try_intx: arg = dd; lsb = irq_table[i].lsb; handler = irq_table[i].handler; - name = irq_table[i].name; + snprintf(dd->cspec->msix_entries[msixnum].name, + sizeof(dd->cspec->msix_entries[msixnum].name) + - 1, + QIB_DRV_NAME "%d%s", dd->unit, + irq_table[i].name); } else { unsigned ctxt; @@ -3148,23 +3171,28 @@ try_intx: continue; lsb = QIB_I_RCVAVAIL_LSB + ctxt; handler = qib_7322pintr; - name = QIB_DRV_NAME " (kctx)"; + snprintf(dd->cspec->msix_entries[msixnum].name, + sizeof(dd->cspec->msix_entries[msixnum].name) + - 1, + QIB_DRV_NAME "%d (kctx)", dd->unit); } - ret = request_irq(dd->cspec->msix_entries[msixnum].vector, - handler, 0, name, arg); + ret = request_irq( + dd->cspec->msix_entries[msixnum].msix.vector, + handler, 0, dd->cspec->msix_entries[msixnum].name, + arg); if (ret) { /* * Shouldn't happen since the enable said we could * have as many as we are trying to setup here. */ qib_dev_err(dd, "Couldn't setup MSIx " - "interrupt (vec=%d, irq=%d): %d\n", msixnum, - dd->cspec->msix_entries[msixnum].vector, - ret); + "interrupt (vec=%d, irq=%d): %d\n", msixnum, + dd->cspec->msix_entries[msixnum].msix.vector, + ret); qib_7322_nomsix(dd); goto try_intx; } - dd->cspec->msix_arg[msixnum] = arg; + dd->cspec->msix_entries[msixnum].arg = arg; if (lsb >= 0) { reg = lsb / IBA7322_REDIRECT_VEC_PER_REG; sh = (lsb % IBA7322_REDIRECT_VEC_PER_REG) * @@ -3174,6 +3202,25 @@ try_intx: } val = qib_read_kreg64(dd, 2 * msixnum + 1 + (QIB_7322_MsixTable_OFFS / sizeof(u64))); + if (firstcpu < nr_cpu_ids && + zalloc_cpumask_var( + &dd->cspec->msix_entries[msixnum].mask, + GFP_KERNEL)) { + if (handler == qib_7322pintr) { + cpumask_set_cpu(currrcvcpu, + dd->cspec->msix_entries[msixnum].mask); + currrcvcpu = cpumask_next(currrcvcpu, + local_mask); + if (currrcvcpu >= nr_cpu_ids) + currrcvcpu = secondcpu; + } else { + cpumask_set_cpu(firstcpu, + dd->cspec->msix_entries[msixnum].mask); + } + irq_set_affinity_hint( + dd->cspec->msix_entries[msixnum].msix.vector, + dd->cspec->msix_entries[msixnum].mask); + } msixnum++; } /* Initialize the vector mapping */ @@ -3365,7 +3412,7 @@ static int qib_do_7322_reset(struct qib_devdata *dd) if (msix_entries) { /* restore the MSIx vector address and data if saved above */ for (i = 0; i < msix_entries; i++) { - dd->cspec->msix_entries[i].entry = i; + dd->cspec->msix_entries[i].msix.entry = i; if (!msix_vecsave || !msix_vecsave[2 * i]) continue; qib_write_kreg(dd, 2 * i + @@ -6865,15 +6912,13 @@ struct qib_devdata *qib_init_iba7322_funcs(struct pci_dev *pdev, tabsize = actual_cnt; dd->cspec->msix_entries = kmalloc(tabsize * - sizeof(struct msix_entry), GFP_KERNEL); - dd->cspec->msix_arg = kmalloc(tabsize * - sizeof(void *), GFP_KERNEL); - if (!dd->cspec->msix_entries || !dd->cspec->msix_arg) { + sizeof(struct qib_msix_entry), GFP_KERNEL); + if (!dd->cspec->msix_entries) { qib_dev_err(dd, "No memory for MSIx table\n"); tabsize = 0; } for (i = 0; i < tabsize; i++) - dd->cspec->msix_entries[i].entry = i; + dd->cspec->msix_entries[i].msix.entry = i; if (qib_pcie_params(dd, 8, &tabsize, dd->cspec->msix_entries)) qib_dev_err(dd, "Failed to setup PCIe or interrupts; " diff --git a/drivers/infiniband/hw/qib/qib_pcie.c b/drivers/infiniband/hw/qib/qib_pcie.c index 0fde788..790646e 100644 --- a/drivers/infiniband/hw/qib/qib_pcie.c +++ b/drivers/infiniband/hw/qib/qib_pcie.c @@ -194,11 +194,24 @@ void qib_pcie_ddcleanup(struct qib_devdata *dd) } static void qib_msix_setup(struct qib_devdata *dd, int pos, u32 *msixcnt, - struct msix_entry *msix_entry) + struct qib_msix_entry *qib_msix_entry) { int ret; u32 tabsize = 0; u16 msix_flags; + struct msix_entry *msix_entry; + int i; + + /* We can't pass qib_msix_entry array to qib_msix_setup + * so use a dummy msix_entry array and copy the allocated + * irq back to the qib_msix_entry array. */ + msix_entry = kmalloc(*msixcnt * sizeof(*msix_entry), GFP_KERNEL); + if (!msix_entry) { + ret = -ENOMEM; + goto do_intx; + } + for (i = 0; i < *msixcnt; i++) + msix_entry[i] = qib_msix_entry[i].msix; pci_read_config_word(dd->pcidev, pos + PCI_MSIX_FLAGS, &msix_flags); tabsize = 1 + (msix_flags & PCI_MSIX_FLAGS_QSIZE); @@ -209,11 +222,15 @@ static void qib_msix_setup(struct qib_devdata *dd, int pos, u32 *msixcnt, tabsize = ret; ret = pci_enable_msix(dd->pcidev, msix_entry, tabsize); } +do_intx: if (ret) { qib_dev_err(dd, "pci_enable_msix %d vectors failed: %d, " "falling back to INTx\n", tabsize, ret); tabsize = 0; } + for (i = 0; i < tabsize; i++) + qib_msix_entry[i].msix = msix_entry[i]; + kfree(msix_entry); *msixcnt = tabsize; if (ret) @@ -251,7 +268,7 @@ static int qib_msi_setup(struct qib_devdata *dd, int pos) } int qib_pcie_params(struct qib_devdata *dd, u32 minw, u32 *nent, - struct msix_entry *entry) + struct qib_msix_entry *entry) { u16 linkstat, speed; int pos = 0, pose, ret = 1; -- cgit v0.10.2 From 520b3ee70527cb47f0b08ceb25ace02aed71eab7 Mon Sep 17 00:00:00 2001 From: Mike Marciniszyn Date: Sat, 25 Feb 2012 17:45:50 -0800 Subject: IB/qib: Avoid filtering LID on SMA portinfo The current get portinfo handling filters the LID being sent, changing zero to 0xffff. This causes OpenSM to log excessive warning messages. Reviewed-by: Edward Mascarenhas Signed-off-by: Mike Marciniszyn Signed-off-by: Roland Dreier diff --git a/drivers/infiniband/hw/qib/qib_mad.c b/drivers/infiniband/hw/qib/qib_mad.c index 3b3745f..c4ff788 100644 --- a/drivers/infiniband/hw/qib/qib_mad.c +++ b/drivers/infiniband/hw/qib/qib_mad.c @@ -433,7 +433,6 @@ static int subn_get_portinfo(struct ib_smp *smp, struct ib_device *ibdev, struct qib_pportdata *ppd; struct qib_ibport *ibp; struct ib_port_info *pip = (struct ib_port_info *)smp->data; - u16 lid; u8 mtu; int ret; u32 state; @@ -469,8 +468,7 @@ static int subn_get_portinfo(struct ib_smp *smp, struct ib_device *ibdev, ibp->mkeyprot == 1)) pip->mkey = ibp->mkey; pip->gid_prefix = ibp->gid_prefix; - lid = ppd->lid; - pip->lid = lid ? cpu_to_be16(lid) : IB_LID_PERMISSIVE; + pip->lid = cpu_to_be16(ppd->lid); pip->sm_lid = cpu_to_be16(ibp->sm_lid); pip->cap_mask = cpu_to_be32(ibp->port_cap_flags); /* pip->diag_code; */ -- cgit v0.10.2 From 6aeaa48b0dd3a4261804be1cccaea46d82be3fcb Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Sat, 25 Feb 2012 17:47:21 -0800 Subject: IB/ehca: Use kthread_create_on_node() Since create_comp_task() creates percpu kthread, it makes sense to use kthread_create_on_node() to get proper NUMA affinity for kthread stack. Signed-off-by: Eric Dumazet Acked-by: Hoang-Nam Nguyen Signed-off-by: Roland Dreier diff --git a/drivers/infiniband/hw/ehca/ehca_irq.c b/drivers/infiniband/hw/ehca/ehca_irq.c index e571e60..5358900 100644 --- a/drivers/infiniband/hw/ehca/ehca_irq.c +++ b/drivers/infiniband/hw/ehca/ehca_irq.c @@ -786,7 +786,8 @@ static struct task_struct *create_comp_task(struct ehca_comp_pool *pool, spin_lock_init(&cct->task_lock); INIT_LIST_HEAD(&cct->cq_list); init_waitqueue_head(&cct->wait_queue); - cct->task = kthread_create(comp_task, cct, "ehca_comp/%d", cpu); + cct->task = kthread_create_on_node(comp_task, cct, cpu_to_node(cpu), + "ehca_comp/%d", cpu); return cct->task; } -- cgit v0.10.2 From d144b650c635b941c3d73ef995ec16984594157f Mon Sep 17 00:00:00 2001 From: Swapna Thete Date: Sat, 25 Feb 2012 17:47:31 -0800 Subject: IB/mad: Add MAD error codes from IBA spec Add defines for MAD error codes so that they can be used when returning error responses. Signed-off-by: Swapna Thete Signed-off-by: Roland Dreier diff --git a/include/rdma/ib_mad.h b/include/rdma/ib_mad.h index d3b9401..b513f57 100644 --- a/include/rdma/ib_mad.h +++ b/include/rdma/ib_mad.h @@ -77,6 +77,15 @@ #define IB_MGMT_MAX_METHODS 128 +/* MAD Status field bit masks */ +#define IB_MGMT_MAD_STATUS_SUCCESS 0x0000 +#define IB_MGMT_MAD_STATUS_BUSY 0x0001 +#define IB_MGMT_MAD_STATUS_REDIRECT_REQD 0x0002 +#define IB_MGMT_MAD_STATUS_BAD_VERSION 0x0004 +#define IB_MGMT_MAD_STATUS_UNSUPPORTED_METHOD 0x0008 +#define IB_MGMT_MAD_STATUS_UNSUPPORTED_METHOD_ATTRIB 0x000c +#define IB_MGMT_MAD_STATUS_INVALID_ATTRIB_VALUE 0x001c + /* RMPP information */ #define IB_MGMT_RMPP_VERSION 1 -- cgit v0.10.2 From 0b307043049f34211affdde46f82e7abbe8c4590 Mon Sep 17 00:00:00 2001 From: Swapna Thete Date: Sat, 25 Feb 2012 17:47:32 -0800 Subject: IB/mad: Return error response for unsupported MADs Set up a response with appropriate error status and send it for MADs that are not supported by a specific class/version. Reviewed-by: Hal Rosenstock Signed-off-by: Swapna Thete Signed-off-by: Roland Dreier diff --git a/drivers/infiniband/core/mad.c b/drivers/infiniband/core/mad.c index 2fe428b..426bb76 100644 --- a/drivers/infiniband/core/mad.c +++ b/drivers/infiniband/core/mad.c @@ -1842,6 +1842,24 @@ static void ib_mad_complete_recv(struct ib_mad_agent_private *mad_agent_priv, } } +static bool generate_unmatched_resp(struct ib_mad_private *recv, + struct ib_mad_private *response) +{ + if (recv->mad.mad.mad_hdr.method == IB_MGMT_METHOD_GET || + recv->mad.mad.mad_hdr.method == IB_MGMT_METHOD_SET) { + memcpy(response, recv, sizeof *response); + response->header.recv_wc.wc = &response->header.wc; + response->header.recv_wc.recv_buf.mad = &response->mad.mad; + response->header.recv_wc.recv_buf.grh = &response->grh; + response->mad.mad.mad_hdr.method = IB_MGMT_METHOD_GET_RESP; + response->mad.mad.mad_hdr.status = + cpu_to_be16(IB_MGMT_MAD_STATUS_UNSUPPORTED_METHOD_ATTRIB); + + return true; + } else { + return false; + } +} static void ib_mad_recv_done_handler(struct ib_mad_port_private *port_priv, struct ib_wc *wc) { @@ -1963,6 +1981,9 @@ local: * or via recv_handler in ib_mad_complete_recv() */ recv = NULL; + } else if (generate_unmatched_resp(recv, response)) { + agent_send_response(&response->mad.mad, &recv->grh, wc, + port_priv->device, port_num, qp_info->qp->qp_num); } out: -- cgit v0.10.2 From a776ce7cfc905a1f8ebf9f7e87f0ba705e7efaef Mon Sep 17 00:00:00 2001 From: Masanari Iida Date: Thu, 9 Feb 2012 23:37:43 +0900 Subject: IB/srpt: Fix typo "alocate" -> "allocate" Signed-off-by: Masanari Iida Signed-off-by: Roland Dreier diff --git a/drivers/infiniband/ulp/srpt/ib_srpt.c b/drivers/infiniband/ulp/srpt/ib_srpt.c index 2b73d43..ebe33d9 100644 --- a/drivers/infiniband/ulp/srpt/ib_srpt.c +++ b/drivers/infiniband/ulp/srpt/ib_srpt.c @@ -3450,7 +3450,7 @@ static struct se_node_acl *srpt_alloc_fabric_acl(struct se_portal_group *se_tpg) nacl = kzalloc(sizeof(struct srpt_node_acl), GFP_KERNEL); if (!nacl) { - printk(KERN_ERR "Unable to alocate struct srpt_node_acl\n"); + printk(KERN_ERR "Unable to allocate struct srpt_node_acl\n"); return NULL; } -- cgit v0.10.2 From 4ba6b8eaa9d67d03fb653cb8a13afca3236d4d70 Mon Sep 17 00:00:00 2001 From: Eli Cohen Date: Thu, 9 Feb 2012 18:52:50 +0200 Subject: IB/mlx4: Set bad_wr for invalid send opcode If the opcode of a work request exceeds the range of valid opcodes, return the pointer to the offending work request. Signed-off-by: Eli Cohen Signed-off-by: Roland Dreier diff --git a/drivers/infiniband/hw/mlx4/qp.c b/drivers/infiniband/hw/mlx4/qp.c index aa2aefa..3a78489 100644 --- a/drivers/infiniband/hw/mlx4/qp.c +++ b/drivers/infiniband/hw/mlx4/qp.c @@ -1884,6 +1884,7 @@ int mlx4_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, wmb(); if (wr->opcode < 0 || wr->opcode >= ARRAY_SIZE(mlx4_ib_opcode)) { + *bad_wr = wr; err = -EINVAL; goto out; } -- cgit v0.10.2 From a5bbe892da9441835cb6fece26d9bbd95fc820be Mon Sep 17 00:00:00 2001 From: Eli Cohen Date: Thu, 9 Feb 2012 18:10:06 +0200 Subject: mlx4: Enforce device max FMR maps in FMR alloc ConnectX devices have a limit on the number of mappings that can be done on an FMR before having to call sync_tpt. The current mlx4_ib driver reports the limit correctly in max_map_per_fmr in .query_device(), but mlx4_core doesn't check it when actually allocating FMRs. Add a max_fmr_maps field to struct mlx4_caps and enforce this maximum value on FMR allocations. Signed-off-by: Eli Cohen Signed-off-by: Roland Dreier diff --git a/drivers/infiniband/hw/mlx4/main.c b/drivers/infiniband/hw/mlx4/main.c index 7b445df..e152837 100644 --- a/drivers/infiniband/hw/mlx4/main.c +++ b/drivers/infiniband/hw/mlx4/main.c @@ -163,7 +163,7 @@ static int mlx4_ib_query_device(struct ib_device *ibdev, props->max_mcast_qp_attach = dev->dev->caps.num_qp_per_mgm; props->max_total_mcast_qp_attach = props->max_mcast_qp_attach * props->max_mcast_grp; - props->max_map_per_fmr = (1 << (32 - ilog2(dev->dev->caps.num_mpts))) - 1; + props->max_map_per_fmr = dev->dev->caps.max_fmr_maps; out: kfree(in_mad); diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c index 678558b..3e593ae 100644 --- a/drivers/net/ethernet/mellanox/mlx4/main.c +++ b/drivers/net/ethernet/mellanox/mlx4/main.c @@ -1131,6 +1131,8 @@ static int mlx4_init_hca(struct mlx4_dev *dev) goto err_stop_fw; } + dev->caps.max_fmr_maps = (1 << (32 - ilog2(dev->caps.num_mpts))) - 1; + init_hca.log_uar_sz = ilog2(dev->caps.num_uars); init_hca.uar_page_sz = PAGE_SHIFT - 12; diff --git a/drivers/net/ethernet/mellanox/mlx4/mr.c b/drivers/net/ethernet/mellanox/mlx4/mr.c index 25a80d7..5b7c06e 100644 --- a/drivers/net/ethernet/mellanox/mlx4/mr.c +++ b/drivers/net/ethernet/mellanox/mlx4/mr.c @@ -816,6 +816,9 @@ int mlx4_fmr_alloc(struct mlx4_dev *dev, u32 pd, u32 access, int max_pages, u64 mtt_offset; int err = -ENOMEM; + if (max_maps > dev->caps.max_fmr_maps) + return -EINVAL; + if (page_shift < (ffs(dev->caps.page_size_cap) - 1) || page_shift >= 32) return -EINVAL; diff --git a/include/linux/mlx4/device.h b/include/linux/mlx4/device.h index aea6190..263d2ae 100644 --- a/include/linux/mlx4/device.h +++ b/include/linux/mlx4/device.h @@ -273,6 +273,7 @@ struct mlx4_caps { int num_comp_vectors; int comp_pool; int num_mpts; + int max_fmr_maps; int num_mtts; int fmr_reserved_mtts; int reserved_mtts; -- cgit v0.10.2 From e9319b0cb00d4d68792fdae37e81e316cb632cb9 Mon Sep 17 00:00:00 2001 From: Roland Dreier Date: Mon, 27 Feb 2012 09:15:08 -0800 Subject: IB/core: Fix SDR rates in sysfs Commit 71eeba16 ("IB: Add new InfiniBand link speeds") introduced a bug where eg the rate for IB 4X SDR links iss displayed as "8.5 Gb/sec" instead of "10 Gb/sec" as it used to be. Fix that. Reported-by: Or Gerlitz Signed-off-by: Roland Dreier diff --git a/drivers/infiniband/core/sysfs.c b/drivers/infiniband/core/sysfs.c index c61bca3..a860b6d 100644 --- a/drivers/infiniband/core/sysfs.c +++ b/drivers/infiniband/core/sysfs.c @@ -179,33 +179,37 @@ static ssize_t rate_show(struct ib_port *p, struct port_attribute *unused, { struct ib_port_attr attr; char *speed = ""; - int rate; + int rate = -1; /* in deci-Gb/sec */ ssize_t ret; ret = ib_query_port(p->ibdev, p->port_num, &attr); if (ret) return ret; - rate = (25 * attr.active_speed) / 10; - switch (attr.active_speed) { + case 1: + /* SDR */ + rate = 25; + break; case 2: speed = " DDR"; + rate = 50; break; case 4: speed = " QDR"; + rate = 100; break; case 8: speed = " FDR10"; - rate = 10; + rate = 100; break; case 16: speed = " FDR"; - rate = 14; + rate = 140; break; case 32: speed = " EDR"; - rate = 25; + rate = 250; break; } @@ -214,7 +218,7 @@ static ssize_t rate_show(struct ib_port *p, struct port_attribute *unused, return -EINVAL; return sprintf(buf, "%d%s Gb/sec (%dX%s)\n", - rate, (attr.active_speed == 1) ? ".5" : "", + rate / 10, rate % 10 ? ".5" : "", ib_width_enum_to_int(attr.active_width), speed); } -- cgit v0.10.2 From e0bda7d8c33e60fb08cc0b5522cd86346313722c Mon Sep 17 00:00:00 2001 From: Bart Van Assche Date: Sat, 14 Jan 2012 12:39:44 +0000 Subject: IB/srp: Use pr_fmt() and pr_err()/pr_warn() Use pr_fmt() and pr_xxx() instead of more verbose printk() equivalents. Signed-off-by: Bart Van Assche Acked-by: David Dillow Signed-off-by: Roland Dreier diff --git a/drivers/infiniband/ulp/srp/ib_srp.c b/drivers/infiniband/ulp/srp/ib_srp.c index 0bfa545..895a9ff 100644 --- a/drivers/infiniband/ulp/srp/ib_srp.c +++ b/drivers/infiniband/ulp/srp/ib_srp.c @@ -30,6 +30,8 @@ * SOFTWARE. */ +#define pr_fmt(fmt) PFX fmt + #include #include #include @@ -165,7 +167,7 @@ static void srp_free_iu(struct srp_host *host, struct srp_iu *iu) static void srp_qp_event(struct ib_event *event, void *context) { - printk(KERN_ERR PFX "QP event %d\n", event->event); + pr_debug("QP event %d\n", event->event); } static int srp_init_qp(struct srp_target_port *target, @@ -1989,7 +1991,7 @@ static int srp_parse_options(const char *buf, struct srp_target_port *target) goto out; } if (strlen(p) != 32) { - printk(KERN_WARNING PFX "bad dest GID parameter '%s'\n", p); + pr_warn("bad dest GID parameter '%s'\n", p); kfree(p); goto out; } @@ -2004,7 +2006,7 @@ static int srp_parse_options(const char *buf, struct srp_target_port *target) case SRP_OPT_PKEY: if (match_hex(args, &token)) { - printk(KERN_WARNING PFX "bad P_Key parameter '%s'\n", p); + pr_warn("bad P_Key parameter '%s'\n", p); goto out; } target->path.pkey = cpu_to_be16(token); @@ -2023,7 +2025,7 @@ static int srp_parse_options(const char *buf, struct srp_target_port *target) case SRP_OPT_MAX_SECT: if (match_int(args, &token)) { - printk(KERN_WARNING PFX "bad max sect parameter '%s'\n", p); + pr_warn("bad max sect parameter '%s'\n", p); goto out; } target->scsi_host->max_sectors = token; @@ -2031,7 +2033,8 @@ static int srp_parse_options(const char *buf, struct srp_target_port *target) case SRP_OPT_MAX_CMD_PER_LUN: if (match_int(args, &token)) { - printk(KERN_WARNING PFX "bad max cmd_per_lun parameter '%s'\n", p); + pr_warn("bad max cmd_per_lun parameter '%s'\n", + p); goto out; } target->scsi_host->cmd_per_lun = min(token, SRP_CMD_SQ_SIZE); @@ -2039,14 +2042,14 @@ static int srp_parse_options(const char *buf, struct srp_target_port *target) case SRP_OPT_IO_CLASS: if (match_hex(args, &token)) { - printk(KERN_WARNING PFX "bad IO class parameter '%s' \n", p); + pr_warn("bad IO class parameter '%s'\n", p); goto out; } if (token != SRP_REV10_IB_IO_CLASS && token != SRP_REV16A_IB_IO_CLASS) { - printk(KERN_WARNING PFX "unknown IO class parameter value" - " %x specified (use %x or %x).\n", - token, SRP_REV10_IB_IO_CLASS, SRP_REV16A_IB_IO_CLASS); + pr_warn("unknown IO class parameter value %x specified (use %x or %x).\n", + token, SRP_REV10_IB_IO_CLASS, + SRP_REV16A_IB_IO_CLASS); goto out; } target->io_class = token; @@ -2064,7 +2067,8 @@ static int srp_parse_options(const char *buf, struct srp_target_port *target) case SRP_OPT_CMD_SG_ENTRIES: if (match_int(args, &token) || token < 1 || token > 255) { - printk(KERN_WARNING PFX "bad max cmd_sg_entries parameter '%s'\n", p); + pr_warn("bad max cmd_sg_entries parameter '%s'\n", + p); goto out; } target->cmd_sg_cnt = token; @@ -2072,7 +2076,7 @@ static int srp_parse_options(const char *buf, struct srp_target_port *target) case SRP_OPT_ALLOW_EXT_SG: if (match_int(args, &token)) { - printk(KERN_WARNING PFX "bad allow_ext_sg parameter '%s'\n", p); + pr_warn("bad allow_ext_sg parameter '%s'\n", p); goto out; } target->allow_ext_sg = !!token; @@ -2081,15 +2085,16 @@ static int srp_parse_options(const char *buf, struct srp_target_port *target) case SRP_OPT_SG_TABLESIZE: if (match_int(args, &token) || token < 1 || token > SCSI_MAX_SG_CHAIN_SEGMENTS) { - printk(KERN_WARNING PFX "bad max sg_tablesize parameter '%s'\n", p); + pr_warn("bad max sg_tablesize parameter '%s'\n", + p); goto out; } target->sg_tablesize = token; break; default: - printk(KERN_WARNING PFX "unknown parameter or missing value " - "'%s' in target creation request\n", p); + pr_warn("unknown parameter or missing value '%s' in target creation request\n", + p); goto out; } } @@ -2100,9 +2105,8 @@ static int srp_parse_options(const char *buf, struct srp_target_port *target) for (i = 0; i < ARRAY_SIZE(srp_opt_tokens); ++i) if ((srp_opt_tokens[i].token & SRP_OPT_ALL) && !(srp_opt_tokens[i].token & opt_mask)) - printk(KERN_WARNING PFX "target creation request is " - "missing parameter '%s'\n", - srp_opt_tokens[i].pattern); + pr_warn("target creation request is missing parameter '%s'\n", + srp_opt_tokens[i].pattern); out: kfree(options); @@ -2149,7 +2153,7 @@ static ssize_t srp_create_target(struct device *dev, if (!host->srp_dev->fmr_pool && !target->allow_ext_sg && target->cmd_sg_cnt < target->sg_tablesize) { - printk(KERN_WARNING PFX "No FMR pool and no external indirect descriptors, limiting sg_tablesize to cmd_sg_cnt\n"); + pr_warn("No FMR pool and no external indirect descriptors, limiting sg_tablesize to cmd_sg_cnt\n"); target->sg_tablesize = target->cmd_sg_cnt; } @@ -2309,8 +2313,7 @@ static void srp_add_one(struct ib_device *device) return; if (ib_query_device(device, dev_attr)) { - printk(KERN_WARNING PFX "Query device failed for %s\n", - device->name); + pr_warn("Query device failed for %s\n", device->name); goto free_attr; } @@ -2459,7 +2462,7 @@ static int __init srp_init_module(void) BUILD_BUG_ON(FIELD_SIZEOF(struct ib_wc, wr_id) < sizeof(void *)); if (srp_sg_tablesize) { - printk(KERN_WARNING PFX "srp_sg_tablesize is deprecated, please use cmd_sg_entries\n"); + pr_warn("srp_sg_tablesize is deprecated, please use cmd_sg_entries\n"); if (!cmd_sg_entries) cmd_sg_entries = srp_sg_tablesize; } @@ -2468,14 +2471,15 @@ static int __init srp_init_module(void) cmd_sg_entries = SRP_DEF_SG_TABLESIZE; if (cmd_sg_entries > 255) { - printk(KERN_WARNING PFX "Clamping cmd_sg_entries to 255\n"); + pr_warn("Clamping cmd_sg_entries to 255\n"); cmd_sg_entries = 255; } if (!indirect_sg_entries) indirect_sg_entries = cmd_sg_entries; else if (indirect_sg_entries < cmd_sg_entries) { - printk(KERN_WARNING PFX "Bumping up indirect_sg_entries to match cmd_sg_entries (%u)\n", cmd_sg_entries); + pr_warn("Bumping up indirect_sg_entries to match cmd_sg_entries (%u)\n", + cmd_sg_entries); indirect_sg_entries = cmd_sg_entries; } @@ -2486,7 +2490,7 @@ static int __init srp_init_module(void) ret = class_register(&srp_class); if (ret) { - printk(KERN_ERR PFX "couldn't register class infiniband_srp\n"); + pr_err("couldn't register class infiniband_srp\n"); srp_release_transport(ib_srp_transport_template); return ret; } @@ -2495,7 +2499,7 @@ static int __init srp_init_module(void) ret = ib_register_client(&srp_client); if (ret) { - printk(KERN_ERR PFX "couldn't register IB client\n"); + pr_err("couldn't register IB client\n"); srp_release_transport(ib_srp_transport_template); ib_sa_unregister_client(&srp_sa_client); class_unregister(&srp_class); -- cgit v0.10.2 From 683b159a2eef6544d49020eb9c88a94157df7d2d Mon Sep 17 00:00:00 2001 From: Bart Van Assche Date: Sat, 14 Jan 2012 12:40:44 +0000 Subject: IB/srp: Consolidate repetitive sysfs code Remove sysfs attributes before removing a target instead of testing the target state in every sysfs attribute callback method. Note: it is safe to invoke a sysfs attribute removal method like device_remove_file() twice on the same attribute. Signed-off-by: Bart Van Assche Acked-by: David Dillow Signed-off-by: Roland Dreier diff --git a/drivers/infiniband/ulp/srp/ib_srp.c b/drivers/infiniband/ulp/srp/ib_srp.c index 895a9ff..bcbf22e 100644 --- a/drivers/infiniband/ulp/srp/ib_srp.c +++ b/drivers/infiniband/ulp/srp/ib_srp.c @@ -474,6 +474,21 @@ static void srp_free_req_data(struct srp_target_port *target) } } +/** + * srp_del_scsi_host_attr() - Remove attributes defined in the host template. + * @shost: SCSI host whose attributes to remove from sysfs. + * + * Note: Any attributes defined in the host template and that did not exist + * before invocation of this function will be ignored. + */ +static void srp_del_scsi_host_attr(struct Scsi_Host *shost) +{ + struct device_attribute **attr; + + for (attr = shost->hostt->shost_attrs; attr && *attr; ++attr) + device_remove_file(&shost->shost_dev, *attr); +} + static void srp_remove_work(struct work_struct *work) { struct srp_target_port *target = @@ -486,6 +501,7 @@ static void srp_remove_work(struct work_struct *work) list_del(&target->list); spin_unlock(&target->srp_host->target_lock); + srp_del_scsi_host_attr(target->scsi_host); srp_remove_host(target->scsi_host); scsi_remove_host(target->scsi_host); ib_destroy_cm_id(target->cm_id); @@ -1678,10 +1694,6 @@ static ssize_t show_id_ext(struct device *dev, struct device_attribute *attr, { struct srp_target_port *target = host_to_target(class_to_shost(dev)); - if (target->state == SRP_TARGET_DEAD || - target->state == SRP_TARGET_REMOVED) - return -ENODEV; - return sprintf(buf, "0x%016llx\n", (unsigned long long) be64_to_cpu(target->id_ext)); } @@ -1691,10 +1703,6 @@ static ssize_t show_ioc_guid(struct device *dev, struct device_attribute *attr, { struct srp_target_port *target = host_to_target(class_to_shost(dev)); - if (target->state == SRP_TARGET_DEAD || - target->state == SRP_TARGET_REMOVED) - return -ENODEV; - return sprintf(buf, "0x%016llx\n", (unsigned long long) be64_to_cpu(target->ioc_guid)); } @@ -1704,10 +1712,6 @@ static ssize_t show_service_id(struct device *dev, { struct srp_target_port *target = host_to_target(class_to_shost(dev)); - if (target->state == SRP_TARGET_DEAD || - target->state == SRP_TARGET_REMOVED) - return -ENODEV; - return sprintf(buf, "0x%016llx\n", (unsigned long long) be64_to_cpu(target->service_id)); } @@ -1717,10 +1721,6 @@ static ssize_t show_pkey(struct device *dev, struct device_attribute *attr, { struct srp_target_port *target = host_to_target(class_to_shost(dev)); - if (target->state == SRP_TARGET_DEAD || - target->state == SRP_TARGET_REMOVED) - return -ENODEV; - return sprintf(buf, "0x%04x\n", be16_to_cpu(target->path.pkey)); } @@ -1729,10 +1729,6 @@ static ssize_t show_dgid(struct device *dev, struct device_attribute *attr, { struct srp_target_port *target = host_to_target(class_to_shost(dev)); - if (target->state == SRP_TARGET_DEAD || - target->state == SRP_TARGET_REMOVED) - return -ENODEV; - return sprintf(buf, "%pI6\n", target->path.dgid.raw); } @@ -1741,10 +1737,6 @@ static ssize_t show_orig_dgid(struct device *dev, { struct srp_target_port *target = host_to_target(class_to_shost(dev)); - if (target->state == SRP_TARGET_DEAD || - target->state == SRP_TARGET_REMOVED) - return -ENODEV; - return sprintf(buf, "%pI6\n", target->orig_dgid); } @@ -1753,10 +1745,6 @@ static ssize_t show_req_lim(struct device *dev, { struct srp_target_port *target = host_to_target(class_to_shost(dev)); - if (target->state == SRP_TARGET_DEAD || - target->state == SRP_TARGET_REMOVED) - return -ENODEV; - return sprintf(buf, "%d\n", target->req_lim); } @@ -1765,10 +1753,6 @@ static ssize_t show_zero_req_lim(struct device *dev, { struct srp_target_port *target = host_to_target(class_to_shost(dev)); - if (target->state == SRP_TARGET_DEAD || - target->state == SRP_TARGET_REMOVED) - return -ENODEV; - return sprintf(buf, "%d\n", target->zero_req_lim); } @@ -2432,6 +2416,7 @@ static void srp_remove_one(struct ib_device *device) list_for_each_entry_safe(target, tmp_target, &host->target_list, list) { + srp_del_scsi_host_attr(target->scsi_host); srp_remove_host(target->scsi_host); scsi_remove_host(target->scsi_host); srp_disconnect_target(target); -- cgit v0.10.2 From d474186f19d7ac1c7fbb293fdcfa46103e45e2ca Mon Sep 17 00:00:00 2001 From: Doug Ledford Date: Thu, 1 Mar 2012 19:55:21 +0200 Subject: IB/iser: Free IB connection resources in the proper place We allocate the login dma buffers in iser_verbs.c as part of alloc_ib_conn_resources(), however we are freeing them in iser_initiator.c as part of iser_free_rx_descriptors(). This is needlessly confusing. We have an alloc_rx_descriptors() and it doesn't alloc something that the free_rx_descriptors() frees, and we have an alloc_ib_conn_resources() that allocs something not freed by free_ib_conn_resources(). Clean that up. Signed-off-by: Doug Ledford [ Fix build error in iser_free_ib_conn_res(). - Or ] Signed-off-by: Or Gerlitz Signed-off-by: Roland Dreier diff --git a/drivers/infiniband/ulp/iser/iser_initiator.c b/drivers/infiniband/ulp/iser/iser_initiator.c index a607542..622e985 100644 --- a/drivers/infiniband/ulp/iser/iser_initiator.c +++ b/drivers/infiniband/ulp/iser/iser_initiator.c @@ -220,18 +220,6 @@ void iser_free_rx_descriptors(struct iser_conn *ib_conn) struct iser_rx_desc *rx_desc; struct iser_device *device = ib_conn->device; - if (ib_conn->login_buf) { - if (ib_conn->login_req_dma) - ib_dma_unmap_single(device->ib_device, - ib_conn->login_req_dma, - ISCSI_DEF_MAX_RECV_SEG_LEN, DMA_TO_DEVICE); - if (ib_conn->login_resp_dma) - ib_dma_unmap_single(device->ib_device, - ib_conn->login_resp_dma, - ISER_RX_LOGIN_SIZE, DMA_FROM_DEVICE); - kfree(ib_conn->login_buf); - } - if (!ib_conn->rx_descs) return; diff --git a/drivers/infiniband/ulp/iser/iser_verbs.c b/drivers/infiniband/ulp/iser/iser_verbs.c index e28877c..14224ba 100644 --- a/drivers/infiniband/ulp/iser/iser_verbs.c +++ b/drivers/infiniband/ulp/iser/iser_verbs.c @@ -274,6 +274,18 @@ static int iser_free_ib_conn_res(struct iser_conn *ib_conn, int can_destroy_id) ib_conn->cma_id = NULL; kfree(ib_conn->page_vec); + if (ib_conn->login_buf) { + if (ib_conn->login_req_dma) + ib_dma_unmap_single(ib_conn->device->ib_device, + ib_conn->login_req_dma, + ISCSI_DEF_MAX_RECV_SEG_LEN, DMA_TO_DEVICE); + if (ib_conn->login_resp_dma) + ib_dma_unmap_single(ib_conn->device->ib_device, + ib_conn->login_resp_dma, + ISER_RX_LOGIN_SIZE, DMA_FROM_DEVICE); + kfree(ib_conn->login_buf); + } + return 0; } -- cgit v0.10.2 From 89e984e2c2cd14f77ccb26c47726ac7f13b70ae8 Mon Sep 17 00:00:00 2001 From: Or Gerlitz Date: Mon, 5 Mar 2012 18:21:44 +0200 Subject: IB/iser: Post initial receive buffers before sending the final login request An iser target may send iscsi NO-OP PDUs as soon as it marks the iSER iSCSI session as fully operative. This means that there is window where there are no posted receive buffers on the initiator side, so it's possible for the iSER RC connection to break because of RNR NAK / retry errors. To fix this, rely on the flags bits in the login request to have FFP (0x3) in the lower nibble as a marker for the final login request, and post an initial chunk of receive buffers before sending that login request instead of after getting the login response. Signed-off-by: Or Gerlitz Cc: Signed-off-by: Roland Dreier diff --git a/drivers/infiniband/ulp/iser/iscsi_iser.c b/drivers/infiniband/ulp/iser/iscsi_iser.c index 9a43cb0..db43b31 100644 --- a/drivers/infiniband/ulp/iser/iscsi_iser.c +++ b/drivers/infiniband/ulp/iser/iscsi_iser.c @@ -364,6 +364,9 @@ iscsi_iser_conn_bind(struct iscsi_cls_session *cls_session, } ib_conn = ep->dd_data; + if (iser_alloc_rx_descriptors(ib_conn)) + return -ENOMEM; + /* binds the iSER connection retrieved from the previously * connected ep_handle to the iSCSI layer connection. exchanges * connection pointers */ @@ -398,19 +401,6 @@ iscsi_iser_conn_stop(struct iscsi_cls_conn *cls_conn, int flag) iser_conn->ib_conn = NULL; } -static int -iscsi_iser_conn_start(struct iscsi_cls_conn *cls_conn) -{ - struct iscsi_conn *conn = cls_conn->dd_data; - int err; - - err = iser_conn_set_full_featured_mode(conn); - if (err) - return err; - - return iscsi_conn_start(cls_conn); -} - static void iscsi_iser_session_destroy(struct iscsi_cls_session *cls_session) { struct Scsi_Host *shost = iscsi_session_to_shost(cls_session); @@ -724,7 +714,7 @@ static struct iscsi_transport iscsi_iser_transport = { .get_conn_param = iscsi_conn_get_param, .get_ep_param = iscsi_iser_get_ep_param, .get_session_param = iscsi_session_get_param, - .start_conn = iscsi_iser_conn_start, + .start_conn = iscsi_conn_start, .stop_conn = iscsi_iser_conn_stop, /* iscsi host params */ .get_host_param = iscsi_host_get_param, diff --git a/drivers/infiniband/ulp/iser/iscsi_iser.h b/drivers/infiniband/ulp/iser/iscsi_iser.h index db7ea37..296be43 100644 --- a/drivers/infiniband/ulp/iser/iscsi_iser.h +++ b/drivers/infiniband/ulp/iser/iscsi_iser.h @@ -366,4 +366,5 @@ int iser_dma_map_task_data(struct iscsi_iser_task *iser_task, void iser_dma_unmap_task_data(struct iscsi_iser_task *iser_task); int iser_initialize_task_headers(struct iscsi_task *task, struct iser_tx_desc *tx_desc); +int iser_alloc_rx_descriptors(struct iser_conn *ib_conn); #endif diff --git a/drivers/infiniband/ulp/iser/iser_initiator.c b/drivers/infiniband/ulp/iser/iser_initiator.c index 622e985..a00ccd1 100644 --- a/drivers/infiniband/ulp/iser/iser_initiator.c +++ b/drivers/infiniband/ulp/iser/iser_initiator.c @@ -170,7 +170,7 @@ static void iser_create_send_desc(struct iser_conn *ib_conn, } -static int iser_alloc_rx_descriptors(struct iser_conn *ib_conn) +int iser_alloc_rx_descriptors(struct iser_conn *ib_conn) { int i, j; u64 dma_addr; @@ -230,23 +230,24 @@ void iser_free_rx_descriptors(struct iser_conn *ib_conn) kfree(ib_conn->rx_descs); } -/** - * iser_conn_set_full_featured_mode - (iSER API) - */ -int iser_conn_set_full_featured_mode(struct iscsi_conn *conn) +static int iser_post_rx_bufs(struct iscsi_conn *conn, struct iscsi_hdr *req) { struct iscsi_iser_conn *iser_conn = conn->dd_data; - iser_dbg("Initially post: %d\n", ISER_MIN_POSTED_RX); - - /* Check that there is no posted recv or send buffers left - */ - /* they must be consumed during the login phase */ - BUG_ON(iser_conn->ib_conn->post_recv_buf_count != 0); - BUG_ON(atomic_read(&iser_conn->ib_conn->post_send_buf_count) != 0); + iser_dbg("req op %x flags %x\n", req->opcode, req->flags); + /* check if this is the last login - going to full feature phase */ + if ((req->flags & ISCSI_FULL_FEATURE_PHASE) != ISCSI_FULL_FEATURE_PHASE) + return 0; - if (iser_alloc_rx_descriptors(iser_conn->ib_conn)) - return -ENOMEM; + /* + * Check that there is one posted recv buffer (for the last login + * response) and no posted send buffers left - they must have been + * consumed during previous login phases. + */ + WARN_ON(iser_conn->ib_conn->post_recv_buf_count != 1); + WARN_ON(atomic_read(&iser_conn->ib_conn->post_send_buf_count) != 0); + iser_dbg("Initially post: %d\n", ISER_MIN_POSTED_RX); /* Initial post receive buffers */ if (iser_post_recvm(iser_conn->ib_conn, ISER_MIN_POSTED_RX)) return -ENOMEM; @@ -426,6 +427,9 @@ int iser_send_control(struct iscsi_conn *conn, err = iser_post_recvl(iser_conn->ib_conn); if (err) goto send_control_error; + err = iser_post_rx_bufs(conn, task->hdr); + if (err) + goto send_control_error; } err = iser_post_send(iser_conn->ib_conn, mdesc); -- cgit v0.10.2 From 2e96691c31ecf749f48aa94ea837b95dd656f5c2 Mon Sep 17 00:00:00 2001 From: Or Gerlitz Date: Tue, 28 Feb 2012 18:49:50 +0200 Subject: IB: Use central enum for speed instead of hard-coded values The kernel IB stack uses one enumeration for IB speed, which wasn't explicitly specified in the verbs header file. Add that enum, and use it all over the code. The IB speed/width notation is also used by iWARP and IBoE HW drivers, which use the convention of rate = speed * width to advertise their port link rate. Signed-off-by: Or Gerlitz Signed-off-by: Roland Dreier diff --git a/drivers/infiniband/core/sysfs.c b/drivers/infiniband/core/sysfs.c index a860b6d..83b720e 100644 --- a/drivers/infiniband/core/sysfs.c +++ b/drivers/infiniband/core/sysfs.c @@ -187,27 +187,26 @@ static ssize_t rate_show(struct ib_port *p, struct port_attribute *unused, return ret; switch (attr.active_speed) { - case 1: - /* SDR */ + case IB_SPEED_SDR: rate = 25; break; - case 2: + case IB_SPEED_DDR: speed = " DDR"; rate = 50; break; - case 4: + case IB_SPEED_QDR: speed = " QDR"; rate = 100; break; - case 8: + case IB_SPEED_FDR10: speed = " FDR10"; rate = 100; break; - case 16: + case IB_SPEED_FDR: speed = " FDR"; rate = 140; break; - case 32: + case IB_SPEED_EDR: speed = " EDR"; rate = 250; break; diff --git a/drivers/infiniband/hw/amso1100/c2_provider.c b/drivers/infiniband/hw/amso1100/c2_provider.c index 12f923d..07eb3a8 100644 --- a/drivers/infiniband/hw/amso1100/c2_provider.c +++ b/drivers/infiniband/hw/amso1100/c2_provider.c @@ -94,7 +94,7 @@ static int c2_query_port(struct ib_device *ibdev, props->pkey_tbl_len = 1; props->qkey_viol_cntr = 0; props->active_width = 1; - props->active_speed = 1; + props->active_speed = IB_SPEED_SDR; return 0; } diff --git a/drivers/infiniband/hw/cxgb3/iwch_provider.c b/drivers/infiniband/hw/cxgb3/iwch_provider.c index 37c224f..0bdf09a 100644 --- a/drivers/infiniband/hw/cxgb3/iwch_provider.c +++ b/drivers/infiniband/hw/cxgb3/iwch_provider.c @@ -1227,7 +1227,7 @@ static int iwch_query_port(struct ib_device *ibdev, props->gid_tbl_len = 1; props->pkey_tbl_len = 1; props->active_width = 2; - props->active_speed = 2; + props->active_speed = IB_SPEED_DDR; props->max_msg_sz = -1; return 0; diff --git a/drivers/infiniband/hw/cxgb4/provider.c b/drivers/infiniband/hw/cxgb4/provider.c index 247fe70..be1c18f 100644 --- a/drivers/infiniband/hw/cxgb4/provider.c +++ b/drivers/infiniband/hw/cxgb4/provider.c @@ -329,7 +329,7 @@ static int c4iw_query_port(struct ib_device *ibdev, u8 port, props->gid_tbl_len = 1; props->pkey_tbl_len = 1; props->active_width = 2; - props->active_speed = 2; + props->active_speed = IB_SPEED_DDR; props->max_msg_sz = -1; return 0; diff --git a/drivers/infiniband/hw/ehca/ehca_hca.c b/drivers/infiniband/hw/ehca/ehca_hca.c index 73edc36..9ed4d25 100644 --- a/drivers/infiniband/hw/ehca/ehca_hca.c +++ b/drivers/infiniband/hw/ehca/ehca_hca.c @@ -233,7 +233,7 @@ int ehca_query_port(struct ib_device *ibdev, props->phys_state = 5; props->state = rblock->state; props->active_width = IB_WIDTH_12X; - props->active_speed = 0x1; + props->active_speed = IB_SPEED_SDR; } query_port1: diff --git a/drivers/infiniband/hw/mlx4/main.c b/drivers/infiniband/hw/mlx4/main.c index 7b445df..6ff6bdf 100644 --- a/drivers/infiniband/hw/mlx4/main.c +++ b/drivers/infiniband/hw/mlx4/main.c @@ -215,16 +215,16 @@ static int ib_link_query_port(struct ib_device *ibdev, u8 port, switch (ext_active_speed) { case 1: - props->active_speed = 16; /* FDR */ + props->active_speed = IB_SPEED_FDR; break; case 2: - props->active_speed = 32; /* EDR */ + props->active_speed = IB_SPEED_EDR; break; } } /* If reported active speed is QDR, check if is FDR-10 */ - if (props->active_speed == 4) { + if (props->active_speed == IB_SPEED_QDR) { if (to_mdev(ibdev)->dev->caps.ext_port_cap[port] & MLX_EXT_PORT_CAP_FLAG_EXTENDED_PORT_INFO) { init_query_mad(in_mad); @@ -238,7 +238,7 @@ static int ib_link_query_port(struct ib_device *ibdev, u8 port, /* Checking LinkSpeedActive for FDR-10 */ if (out_mad->data[15] & 0x1) - props->active_speed = 8; + props->active_speed = IB_SPEED_FDR10; } } @@ -259,7 +259,7 @@ static int eth_link_query_port(struct ib_device *ibdev, u8 port, enum ib_mtu tmp; props->active_width = IB_WIDTH_1X; - props->active_speed = 4; + props->active_speed = IB_SPEED_QDR; props->port_cap_flags = IB_PORT_CM_SUP; props->gid_tbl_len = to_mdev(ibdev)->dev->caps.gid_table_len[port]; props->max_msg_sz = to_mdev(ibdev)->dev->caps.max_msg_sz; diff --git a/drivers/infiniband/hw/nes/nes_verbs.c b/drivers/infiniband/hw/nes/nes_verbs.c index 0927b5c..8b8812d 100644 --- a/drivers/infiniband/hw/nes/nes_verbs.c +++ b/drivers/infiniband/hw/nes/nes_verbs.c @@ -597,7 +597,7 @@ static int nes_query_port(struct ib_device *ibdev, u8 port, struct ib_port_attr props->pkey_tbl_len = 1; props->qkey_viol_cntr = 0; props->active_width = IB_WIDTH_4X; - props->active_speed = 1; + props->active_speed = IB_SPEED_SDR; props->max_msg_sz = 0x80000000; return 0; diff --git a/include/rdma/ib_verbs.h b/include/rdma/ib_verbs.h index bf5daaf..ff22a73 100644 --- a/include/rdma/ib_verbs.h +++ b/include/rdma/ib_verbs.h @@ -239,6 +239,15 @@ static inline int ib_width_enum_to_int(enum ib_port_width width) } } +enum ib_port_speed { + IB_SPEED_SDR = 1, + IB_SPEED_DDR = 2, + IB_SPEED_QDR = 4, + IB_SPEED_FDR10 = 8, + IB_SPEED_FDR = 16, + IB_SPEED_EDR = 32 +}; + struct ib_protocol_stats { /* TBD... */ }; -- cgit v0.10.2 From bd50f8924c684f84416fb58c11eb24619b041f25 Mon Sep 17 00:00:00 2001 From: Kyle McMartin Date: Mon, 27 Feb 2012 17:02:56 -0500 Subject: IB/ehca: Fix ilog2() compile failure I'm getting compile failures building this driver, which I narrowed down to the ilog2 call in ehca_get_max_hwpage_size... ERROR: ".____ilog2_NaN" [drivers/infiniband/hw/ehca/ib_ehca.ko] undefined! make[1]: *** [__modpost] Error 1 make: *** [modules] Error 2 The use of shca->hca_cap_mr_pgsize is confusing the compiler, and resulting in the __builtin_constant_p in ilog2 going insane. I tried making it take the u32 pgsize as an argument and the expansion of shca->_pgsize in the caller, but that failed as well. With this patch in place, the driver compiles on my GCC 4.6.2 here. Suggested-by: Roland Dreier Signed-off-by: Kyle McMartin Signed-off-by: Roland Dreier diff --git a/drivers/infiniband/hw/ehca/ehca_mrmw.c b/drivers/infiniband/hw/ehca/ehca_mrmw.c index 43cae84..b781b2c 100644 --- a/drivers/infiniband/hw/ehca/ehca_mrmw.c +++ b/drivers/infiniband/hw/ehca/ehca_mrmw.c @@ -112,7 +112,7 @@ static u32 ehca_encode_hwpage_size(u32 pgsize) static u64 ehca_get_max_hwpage_size(struct ehca_shca *shca) { - return 1UL << ilog2(shca->hca_cap_mr_pgsize); + return rounddown_pow_of_two(shca->hca_cap_mr_pgsize); } static struct ehca_mr *ehca_mr_new(void) -- cgit v0.10.2 From 186834b5de69a89ba6cc846e7259451ced689b64 Mon Sep 17 00:00:00 2001 From: "Hefty, Sean" Date: Fri, 2 Mar 2012 00:01:19 +0000 Subject: RDMA/ucma: Fix AB-BA deadlock When we destroy a cm_id, we must purge associated events from the event queue. If the cm_id is for a listen request, we also purge corresponding pending connect requests. This requires destroying the cm_id's associated with the connect requests by calling rdma_destroy_id(). rdma_destroy_id() blocks until all outstanding callbacks have completed. The issue is that we hold file->mut while purging events from the event queue. We also acquire file->mut in our event handler. Calling rdma_destroy_id() while holding file->mut can lead to a deadlock, since the event handler callback cannot acquire file->mut, which prevents rdma_destroy_id() from completing. Fix this by moving events to purge from the event queue to a temporary list. We can then release file->mut and call rdma_destroy_id() outside of holding any locks. Bug report by Or Gerlitz : [ INFO: possible circular locking dependency detected ] 3.3.0-rc5-00008-g79f1e43-dirty #34 Tainted: G I tgtd/9018 is trying to acquire lock: (&id_priv->handler_mutex){+.+.+.}, at: [] rdma_destroy_id+0x33/0x1f0 [rdma_cm] but task is already holding lock: (&file->mut){+.+.+.}, at: [] ucma_free_ctx+0xb6/0x196 [rdma_ucm] which lock already depends on the new lock. the existing dependency chain (in reverse order) is: -> #1 (&file->mut){+.+.+.}: [] lock_acquire+0xf0/0x116 [] mutex_lock_nested+0x64/0x2e6 [] ucma_event_handler+0x148/0x1dc [rdma_ucm] [] cma_ib_handler+0x1a7/0x1f7 [rdma_cm] [] cm_process_work+0x32/0x119 [ib_cm] [] cm_work_handler+0xfb8/0xfe5 [ib_cm] [] process_one_work+0x2bd/0x4a6 [] worker_thread+0x1d6/0x350 [] kthread+0x84/0x8c [] kernel_thread_helper+0x4/0x10 -> #0 (&id_priv->handler_mutex){+.+.+.}: [] __lock_acquire+0x10d5/0x1752 [] lock_acquire+0xf0/0x116 [] mutex_lock_nested+0x64/0x2e6 [] rdma_destroy_id+0x33/0x1f0 [rdma_cm] [] ucma_free_ctx+0x117/0x196 [rdma_ucm] [] ucma_close+0x77/0xb4 [rdma_ucm] [] fput+0x117/0x1cf [] filp_close+0x6d/0x78 [] put_files_struct+0xbd/0x17d [] exit_files+0x46/0x4e [] do_exit+0x299/0x75d [] do_group_exit+0x7e/0xa9 [] get_signal_to_deliver+0x536/0x555 [] do_signal+0x39/0x634 [] do_notify_resume+0x27/0x69 [] retint_signal+0x46/0x83 other info that might help us debug this: Possible unsafe locking scenario: CPU0 CPU1 ---- ---- lock(&file->mut); lock(&id_priv->handler_mutex); lock(&file->mut); lock(&id_priv->handler_mutex); *** DEADLOCK *** 1 lock held by tgtd/9018: #0: (&file->mut){+.+.+.}, at: [] ucma_free_ctx+0xb6/0x196 [rdma_ucm] stack backtrace: Pid: 9018, comm: tgtd Tainted: G I 3.3.0-rc5-00008-g79f1e43-dirty #34 Call Trace: [] ? console_unlock+0x18e/0x207 [] print_circular_bug+0x28e/0x29f [] __lock_acquire+0x10d5/0x1752 [] lock_acquire+0xf0/0x116 [] ? rdma_destroy_id+0x33/0x1f0 [rdma_cm] [] mutex_lock_nested+0x64/0x2e6 [] ? rdma_destroy_id+0x33/0x1f0 [rdma_cm] [] ? trace_hardirqs_on_caller+0x11e/0x155 [] ? trace_hardirqs_on+0xd/0xf [] rdma_destroy_id+0x33/0x1f0 [rdma_cm] [] ucma_free_ctx+0x117/0x196 [rdma_ucm] [] ucma_close+0x77/0xb4 [rdma_ucm] [] fput+0x117/0x1cf [] filp_close+0x6d/0x78 [] put_files_struct+0xbd/0x17d [] ? put_files_struct+0x22/0x17d [] exit_files+0x46/0x4e [] do_exit+0x299/0x75d [] do_group_exit+0x7e/0xa9 [] get_signal_to_deliver+0x536/0x555 [] ? trace_hardirqs_on+0xd/0xf [] do_signal+0x39/0x634 [] ? printk+0x3c/0x45 [] ? trace_hardirqs_on_caller+0x11e/0x155 [] ? trace_hardirqs_on+0xd/0xf [] ? _raw_spin_unlock_irq+0x2b/0x40 [] ? set_current_blocked+0x44/0x49 [] ? retint_signal+0x11/0x83 [] do_notify_resume+0x27/0x69 [] ? trace_hardirqs_on_thunk+0x3a/0x3f [] retint_signal+0x46/0x83 Signed-off-by: Sean Hefty Signed-off-by: Roland Dreier diff --git a/drivers/infiniband/core/ucma.c b/drivers/infiniband/core/ucma.c index 5034a87..5861cdb 100644 --- a/drivers/infiniband/core/ucma.c +++ b/drivers/infiniband/core/ucma.c @@ -449,24 +449,6 @@ static void ucma_cleanup_multicast(struct ucma_context *ctx) mutex_unlock(&mut); } -static void ucma_cleanup_events(struct ucma_context *ctx) -{ - struct ucma_event *uevent, *tmp; - - list_for_each_entry_safe(uevent, tmp, &ctx->file->event_list, list) { - if (uevent->ctx != ctx) - continue; - - list_del(&uevent->list); - - /* clear incoming connections. */ - if (uevent->resp.event == RDMA_CM_EVENT_CONNECT_REQUEST) - rdma_destroy_id(uevent->cm_id); - - kfree(uevent); - } -} - static void ucma_cleanup_mc_events(struct ucma_multicast *mc) { struct ucma_event *uevent, *tmp; @@ -480,9 +462,16 @@ static void ucma_cleanup_mc_events(struct ucma_multicast *mc) } } +/* + * We cannot hold file->mut when calling rdma_destroy_id() or we can + * deadlock. We also acquire file->mut in ucma_event_handler(), and + * rdma_destroy_id() will wait until all callbacks have completed. + */ static int ucma_free_ctx(struct ucma_context *ctx) { int events_reported; + struct ucma_event *uevent, *tmp; + LIST_HEAD(list); /* No new events will be generated after destroying the id. */ rdma_destroy_id(ctx->cm_id); @@ -491,10 +480,20 @@ static int ucma_free_ctx(struct ucma_context *ctx) /* Cleanup events not yet reported to the user. */ mutex_lock(&ctx->file->mut); - ucma_cleanup_events(ctx); + list_for_each_entry_safe(uevent, tmp, &ctx->file->event_list, list) { + if (uevent->ctx == ctx) + list_move_tail(&uevent->list, &list); + } list_del(&ctx->list); mutex_unlock(&ctx->file->mut); + list_for_each_entry_safe(uevent, tmp, &list, list) { + list_del(&uevent->list); + if (uevent->resp.event == RDMA_CM_EVENT_CONNECT_REQUEST) + rdma_destroy_id(uevent->cm_id); + kfree(uevent); + } + events_reported = ctx->events_reported; kfree(ctx); return events_reported; -- cgit v0.10.2 From 8154c07fe14e387c5a7c7f2eb70534813634e45e Mon Sep 17 00:00:00 2001 From: Or Gerlitz Date: Tue, 6 Mar 2012 15:50:50 +0200 Subject: mlx4_core: Get rid of redundant ext_port_cap flags While doing the work for commit a6f7feae6d ("IB/mlx4: pass SMP vendor-specific attribute MADs to firmware") we realized that the firmware would respond on all sorts of vendor-specific MADs. Therefore commit 97285b7817 ("mlx4_core: Add extended port capabilities support") adds redundant code into the driver, since there's no real reaon to maintain the extended capabilities of the port, as they can be queried on demand (e.g the FDR10 capability). This patch reverts commit 97285b7817 and removes the check for extended caps from the mlx4_ib driver port query flow. Signed-off-by: Or Gerlitz Signed-off-by: Roland Dreier diff --git a/drivers/infiniband/hw/mlx4/main.c b/drivers/infiniband/hw/mlx4/main.c index 6ff6bdf..abce99e 100644 --- a/drivers/infiniband/hw/mlx4/main.c +++ b/drivers/infiniband/hw/mlx4/main.c @@ -225,21 +225,18 @@ static int ib_link_query_port(struct ib_device *ibdev, u8 port, /* If reported active speed is QDR, check if is FDR-10 */ if (props->active_speed == IB_SPEED_QDR) { - if (to_mdev(ibdev)->dev->caps.ext_port_cap[port] & - MLX_EXT_PORT_CAP_FLAG_EXTENDED_PORT_INFO) { - init_query_mad(in_mad); - in_mad->attr_id = MLX4_ATTR_EXTENDED_PORT_INFO; - in_mad->attr_mod = cpu_to_be32(port); - - err = mlx4_MAD_IFC(to_mdev(ibdev), 1, 1, port, - NULL, NULL, in_mad, out_mad); - if (err) - return err; + init_query_mad(in_mad); + in_mad->attr_id = MLX4_ATTR_EXTENDED_PORT_INFO; + in_mad->attr_mod = cpu_to_be32(port); - /* Checking LinkSpeedActive for FDR-10 */ - if (out_mad->data[15] & 0x1) - props->active_speed = IB_SPEED_FDR10; - } + err = mlx4_MAD_IFC(to_mdev(ibdev), 1, 1, port, + NULL, NULL, in_mad, out_mad); + if (err) + return err; + + /* Checking LinkSpeedActive for FDR-10 */ + if (out_mad->data[15] & 0x1) + props->active_speed = IB_SPEED_FDR10; } return 0; diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c index 678558b..2f94d30 100644 --- a/drivers/net/ethernet/mellanox/mlx4/main.c +++ b/drivers/net/ethernet/mellanox/mlx4/main.c @@ -1361,13 +1361,6 @@ static int mlx4_setup_hca(struct mlx4_dev *dev) "with caps = 0\n", port, err); dev->caps.ib_port_def_cap[port] = ib_port_default_caps; - err = mlx4_check_ext_port_caps(dev, port); - if (err) - mlx4_warn(dev, "failed to get port %d extended " - "port capabilities support info (%d)." - " Assuming not supported\n", - port, err); - err = mlx4_SET_PORT(dev, port); if (err) { mlx4_err(dev, "Failed to set port %d, aborting\n", diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4.h b/drivers/net/ethernet/mellanox/mlx4/mlx4.h index c92269f..c34d30a 100644 --- a/drivers/net/ethernet/mellanox/mlx4/mlx4.h +++ b/drivers/net/ethernet/mellanox/mlx4/mlx4.h @@ -1025,7 +1025,6 @@ int mlx4_QUERY_PORT_wrapper(struct mlx4_dev *dev, int slave, struct mlx4_cmd_mailbox *outbox, struct mlx4_cmd_info *cmd); int mlx4_get_port_ib_caps(struct mlx4_dev *dev, u8 port, __be32 *caps); -int mlx4_check_ext_port_caps(struct mlx4_dev *dev, u8 port); int mlx4_QP_ATTACH_wrapper(struct mlx4_dev *dev, int slave, diff --git a/drivers/net/ethernet/mellanox/mlx4/port.c b/drivers/net/ethernet/mellanox/mlx4/port.c index f44ae55..51708dd 100644 --- a/drivers/net/ethernet/mellanox/mlx4/port.c +++ b/drivers/net/ethernet/mellanox/mlx4/port.c @@ -590,49 +590,6 @@ int mlx4_get_port_ib_caps(struct mlx4_dev *dev, u8 port, __be32 *caps) return err; } -int mlx4_check_ext_port_caps(struct mlx4_dev *dev, u8 port) -{ - struct mlx4_cmd_mailbox *inmailbox, *outmailbox; - u8 *inbuf, *outbuf; - int err, packet_error; - - inmailbox = mlx4_alloc_cmd_mailbox(dev); - if (IS_ERR(inmailbox)) - return PTR_ERR(inmailbox); - - outmailbox = mlx4_alloc_cmd_mailbox(dev); - if (IS_ERR(outmailbox)) { - mlx4_free_cmd_mailbox(dev, inmailbox); - return PTR_ERR(outmailbox); - } - - inbuf = inmailbox->buf; - outbuf = outmailbox->buf; - memset(inbuf, 0, 256); - memset(outbuf, 0, 256); - inbuf[0] = 1; - inbuf[1] = 1; - inbuf[2] = 1; - inbuf[3] = 1; - - *(__be16 *) (&inbuf[16]) = MLX4_ATTR_EXTENDED_PORT_INFO; - *(__be32 *) (&inbuf[20]) = cpu_to_be32(port); - - err = mlx4_cmd_box(dev, inmailbox->dma, outmailbox->dma, port, 3, - MLX4_CMD_MAD_IFC, MLX4_CMD_TIME_CLASS_C, - MLX4_CMD_NATIVE); - - packet_error = be16_to_cpu(*(__be16 *) (outbuf + 4)); - - dev->caps.ext_port_cap[port] = (!err && !packet_error) ? - MLX_EXT_PORT_CAP_FLAG_EXTENDED_PORT_INFO - : 0; - - mlx4_free_cmd_mailbox(dev, inmailbox); - mlx4_free_cmd_mailbox(dev, outmailbox); - return err; -} - static int mlx4_common_set_port(struct mlx4_dev *dev, int slave, u32 in_mod, u8 op_mod, struct mlx4_cmd_mailbox *inbox) { diff --git a/include/linux/mlx4/device.h b/include/linux/mlx4/device.h index aea6190..be7f235 100644 --- a/include/linux/mlx4/device.h +++ b/include/linux/mlx4/device.h @@ -101,10 +101,6 @@ enum { #define MLX4_ATTR_EXTENDED_PORT_INFO cpu_to_be16(0xff90) enum { - MLX_EXT_PORT_CAP_FLAG_EXTENDED_PORT_INFO = 1 << 0 -}; - -enum { MLX4_BMME_FLAG_LOCAL_INV = 1 << 6, MLX4_BMME_FLAG_REMOTE_INV = 1 << 7, MLX4_BMME_FLAG_TYPE_2_WIN = 1 << 9, @@ -308,7 +304,6 @@ struct mlx4_caps { u32 port_mask[MLX4_MAX_PORTS + 1]; enum mlx4_port_type possible_type[MLX4_MAX_PORTS + 1]; u32 max_counters; - u8 ext_port_cap[MLX4_MAX_PORTS + 1]; }; struct mlx4_buf_list { -- cgit v0.10.2 From db4106ce635830201fad1bfca731a635beab6a72 Mon Sep 17 00:00:00 2001 From: Steve Wise Date: Wed, 7 Mar 2012 16:48:46 -0600 Subject: RDMA/cxgb3: Don't pass irq flags to flush_qp() Since flush_qp() is always called with irqs disabled, all the locking inside flush_qp() and __flush_qp() doesn't need irq save/restore. Further, passing the flag variable from iwch_modify_qp() is just wrong and causes a WARN_ON() in local_bh_enable(). Signed-off-by: Steve Wise Signed-off-by: Roland Dreier diff --git a/drivers/infiniband/hw/cxgb3/iwch_qp.c b/drivers/infiniband/hw/cxgb3/iwch_qp.c index bea5839..6de8463 100644 --- a/drivers/infiniband/hw/cxgb3/iwch_qp.c +++ b/drivers/infiniband/hw/cxgb3/iwch_qp.c @@ -803,7 +803,7 @@ int iwch_post_terminate(struct iwch_qp *qhp, struct respQ_msg_t *rsp_msg) * Assumes qhp lock is held. */ static void __flush_qp(struct iwch_qp *qhp, struct iwch_cq *rchp, - struct iwch_cq *schp, unsigned long *flag) + struct iwch_cq *schp) { int count; int flushed; @@ -812,44 +812,44 @@ static void __flush_qp(struct iwch_qp *qhp, struct iwch_cq *rchp, PDBG("%s qhp %p rchp %p schp %p\n", __func__, qhp, rchp, schp); /* take a ref on the qhp since we must release the lock */ atomic_inc(&qhp->refcnt); - spin_unlock_irqrestore(&qhp->lock, *flag); + spin_unlock(&qhp->lock); /* locking hierarchy: cq lock first, then qp lock. */ - spin_lock_irqsave(&rchp->lock, *flag); + spin_lock(&rchp->lock); spin_lock(&qhp->lock); cxio_flush_hw_cq(&rchp->cq); cxio_count_rcqes(&rchp->cq, &qhp->wq, &count); flushed = cxio_flush_rq(&qhp->wq, &rchp->cq, count); spin_unlock(&qhp->lock); - spin_unlock_irqrestore(&rchp->lock, *flag); + spin_unlock(&rchp->lock); if (flushed) { - spin_lock_irqsave(&rchp->comp_handler_lock, *flag); + spin_lock(&rchp->comp_handler_lock); (*rchp->ibcq.comp_handler)(&rchp->ibcq, rchp->ibcq.cq_context); - spin_unlock_irqrestore(&rchp->comp_handler_lock, *flag); + spin_unlock(&rchp->comp_handler_lock); } /* locking hierarchy: cq lock first, then qp lock. */ - spin_lock_irqsave(&schp->lock, *flag); + spin_lock(&schp->lock); spin_lock(&qhp->lock); cxio_flush_hw_cq(&schp->cq); cxio_count_scqes(&schp->cq, &qhp->wq, &count); flushed = cxio_flush_sq(&qhp->wq, &schp->cq, count); spin_unlock(&qhp->lock); - spin_unlock_irqrestore(&schp->lock, *flag); + spin_unlock(&schp->lock); if (flushed) { - spin_lock_irqsave(&schp->comp_handler_lock, *flag); + spin_lock(&schp->comp_handler_lock); (*schp->ibcq.comp_handler)(&schp->ibcq, schp->ibcq.cq_context); - spin_unlock_irqrestore(&schp->comp_handler_lock, *flag); + spin_unlock(&schp->comp_handler_lock); } /* deref */ if (atomic_dec_and_test(&qhp->refcnt)) wake_up(&qhp->wait); - spin_lock_irqsave(&qhp->lock, *flag); + spin_lock(&qhp->lock); } -static void flush_qp(struct iwch_qp *qhp, unsigned long *flag) +static void flush_qp(struct iwch_qp *qhp) { struct iwch_cq *rchp, *schp; @@ -859,19 +859,19 @@ static void flush_qp(struct iwch_qp *qhp, unsigned long *flag) if (qhp->ibqp.uobject) { cxio_set_wq_in_error(&qhp->wq); cxio_set_cq_in_error(&rchp->cq); - spin_lock_irqsave(&rchp->comp_handler_lock, *flag); + spin_lock(&rchp->comp_handler_lock); (*rchp->ibcq.comp_handler)(&rchp->ibcq, rchp->ibcq.cq_context); - spin_unlock_irqrestore(&rchp->comp_handler_lock, *flag); + spin_unlock(&rchp->comp_handler_lock); if (schp != rchp) { cxio_set_cq_in_error(&schp->cq); - spin_lock_irqsave(&schp->comp_handler_lock, *flag); + spin_lock(&schp->comp_handler_lock); (*schp->ibcq.comp_handler)(&schp->ibcq, schp->ibcq.cq_context); - spin_unlock_irqrestore(&schp->comp_handler_lock, *flag); + spin_unlock(&schp->comp_handler_lock); } return; } - __flush_qp(qhp, rchp, schp, flag); + __flush_qp(qhp, rchp, schp); } @@ -1030,7 +1030,7 @@ int iwch_modify_qp(struct iwch_dev *rhp, struct iwch_qp *qhp, break; case IWCH_QP_STATE_ERROR: qhp->attr.state = IWCH_QP_STATE_ERROR; - flush_qp(qhp, &flag); + flush_qp(qhp); break; default: ret = -EINVAL; @@ -1078,7 +1078,7 @@ int iwch_modify_qp(struct iwch_dev *rhp, struct iwch_qp *qhp, } switch (attrs->next_state) { case IWCH_QP_STATE_IDLE: - flush_qp(qhp, &flag); + flush_qp(qhp); qhp->attr.state = IWCH_QP_STATE_IDLE; qhp->attr.llp_stream_handle = NULL; put_ep(&qhp->ep->com); @@ -1132,7 +1132,7 @@ err: free=1; wake_up(&qhp->wait); BUG_ON(!ep); - flush_qp(qhp, &flag); + flush_qp(qhp); out: spin_unlock_irqrestore(&qhp->lock, flag); -- cgit v0.10.2 From 3eae7c9f97415ac1f5ab5db4eeb03cde689eb427 Mon Sep 17 00:00:00 2001 From: Steve Wise Date: Wed, 7 Mar 2012 16:48:11 -0600 Subject: RDMA/iwcm: Reject connect requests if cmid is not in LISTEN state When destroying a listening cmid, the iwcm first marks the state of the cmid as DESTROYING, then releases the lock and calls into the iWARP provider to destroy the endpoint. Since the cmid is not locked, its possible for the iWARP provider to pass a connection request event to the iwcm, which will be silently dropped by the iwcm. This causes the iWARP provider to never free up the resources from this connection because the assumption is the iwcm will accept or reject this connection. The solution is to reject these connection requests. Signed-off-by: Steve Wise Signed-off-by: Roland Dreier diff --git a/drivers/infiniband/core/iwcm.c b/drivers/infiniband/core/iwcm.c index 1a696f7..0bb99bb 100644 --- a/drivers/infiniband/core/iwcm.c +++ b/drivers/infiniband/core/iwcm.c @@ -624,17 +624,6 @@ static void cm_conn_req_handler(struct iwcm_id_private *listen_id_priv, */ BUG_ON(iw_event->status); - /* - * We could be destroying the listening id. If so, ignore this - * upcall. - */ - spin_lock_irqsave(&listen_id_priv->lock, flags); - if (listen_id_priv->state != IW_CM_STATE_LISTEN) { - spin_unlock_irqrestore(&listen_id_priv->lock, flags); - goto out; - } - spin_unlock_irqrestore(&listen_id_priv->lock, flags); - cm_id = iw_create_cm_id(listen_id_priv->id.device, listen_id_priv->id.cm_handler, listen_id_priv->id.context); @@ -649,6 +638,19 @@ static void cm_conn_req_handler(struct iwcm_id_private *listen_id_priv, cm_id_priv = container_of(cm_id, struct iwcm_id_private, id); cm_id_priv->state = IW_CM_STATE_CONN_RECV; + /* + * We could be destroying the listening id. If so, ignore this + * upcall. + */ + spin_lock_irqsave(&listen_id_priv->lock, flags); + if (listen_id_priv->state != IW_CM_STATE_LISTEN) { + spin_unlock_irqrestore(&listen_id_priv->lock, flags); + iw_cm_reject(cm_id, NULL, 0); + iw_destroy_cm_id(cm_id); + goto out; + } + spin_unlock_irqrestore(&listen_id_priv->lock, flags); + ret = alloc_work_entries(cm_id_priv, 3); if (ret) { iw_cm_reject(cm_id, NULL, 0); -- cgit v0.10.2 From d927d505c59a0c7353343174e6225c43c61fba6d Mon Sep 17 00:00:00 2001 From: Or Gerlitz Date: Wed, 11 Jan 2012 19:03:51 +0200 Subject: IB: Change CQE "csum_ok" field to a bit flag Use a bit in wc_flags rather then a whole integer to hold the "checksum OK" flag. By itself, this change doesn't reduce the size of struct ib_wc on 64bit machines -- it stays on 56 bytes because of padding. However, it will allow to add more fields in the future without enlarging the struct. Also, it will let us have a unified approach with future libibverbs checksum offload reporting, because a bit flag doesn't break the library ABI. This patch was suggested during conversation with Liran Liss . Signed-off-by: Or Gerlitz Reviewed-by: Sean Hefty Signed-off-by: Roland Dreier diff --git a/drivers/infiniband/hw/mlx4/cq.c b/drivers/infiniband/hw/mlx4/cq.c index 5ecf38d..09eb1d1 100644 --- a/drivers/infiniband/hw/mlx4/cq.c +++ b/drivers/infiniband/hw/mlx4/cq.c @@ -720,7 +720,8 @@ repoll: wc->dlid_path_bits = (g_mlpath_rqpn >> 24) & 0x7f; wc->wc_flags |= g_mlpath_rqpn & 0x80000000 ? IB_WC_GRH : 0; wc->pkey_index = be32_to_cpu(cqe->immed_rss_invalid) & 0x7f; - wc->csum_ok = mlx4_ib_ipoib_csum_ok(cqe->status, cqe->checksum); + wc->wc_flags |= mlx4_ib_ipoib_csum_ok(cqe->status, + cqe->checksum) ? IB_WC_IP_CSUM_OK : 0; if (rdma_port_get_link_layer(wc->qp->device, (*cur_qp)->port) == IB_LINK_LAYER_ETHERNET) wc->sl = be16_to_cpu(cqe->sl_vid) >> 13; diff --git a/drivers/infiniband/hw/mthca/mthca_cq.c b/drivers/infiniband/hw/mthca/mthca_cq.c index 53157b8..40ba833 100644 --- a/drivers/infiniband/hw/mthca/mthca_cq.c +++ b/drivers/infiniband/hw/mthca/mthca_cq.c @@ -643,7 +643,8 @@ static inline int mthca_poll_one(struct mthca_dev *dev, entry->wc_flags |= cqe->g_mlpath & 0x80 ? IB_WC_GRH : 0; checksum = (be32_to_cpu(cqe->rqpn) >> 24) | ((be32_to_cpu(cqe->my_ee) >> 16) & 0xff00); - entry->csum_ok = (cqe->sl_ipok & 1 && checksum == 0xffff); + entry->wc_flags |= (cqe->sl_ipok & 1 && checksum == 0xffff) ? + IB_WC_IP_CSUM_OK : 0; } entry->status = IB_WC_SUCCESS; diff --git a/drivers/infiniband/hw/qib/qib_rc.c b/drivers/infiniband/hw/qib/qib_rc.c index 894afac..765b4cb 100644 --- a/drivers/infiniband/hw/qib/qib_rc.c +++ b/drivers/infiniband/hw/qib/qib_rc.c @@ -2048,7 +2048,6 @@ send_last: wc.pkey_index = 0; wc.dlid_path_bits = 0; wc.port_num = 0; - wc.csum_ok = 0; /* Signal completion event if the solicited bit is set. */ qib_cq_enter(to_icq(qp->ibqp.recv_cq), &wc, (ohdr->bth[0] & diff --git a/drivers/infiniband/hw/qib/qib_uc.c b/drivers/infiniband/hw/qib/qib_uc.c index 847e7af..7ce2ac2 100644 --- a/drivers/infiniband/hw/qib/qib_uc.c +++ b/drivers/infiniband/hw/qib/qib_uc.c @@ -422,7 +422,6 @@ last_imm: wc.pkey_index = 0; wc.dlid_path_bits = 0; wc.port_num = 0; - wc.csum_ok = 0; /* Signal completion event if the solicited bit is set. */ qib_cq_enter(to_icq(qp->ibqp.recv_cq), &wc, (ohdr->bth[0] & diff --git a/drivers/infiniband/ulp/ipoib/ipoib_ib.c b/drivers/infiniband/ulp/ipoib/ipoib_ib.c index 4115be5..5c1bc99 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_ib.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_ib.c @@ -296,7 +296,8 @@ static void ipoib_ib_handle_rx_wc(struct net_device *dev, struct ib_wc *wc) dev->stats.rx_bytes += skb->len; skb->dev = dev; - if ((dev->features & NETIF_F_RXCSUM) && likely(wc->csum_ok)) + if ((dev->features & NETIF_F_RXCSUM) && + likely(wc->wc_flags & IB_WC_IP_CSUM_OK)) skb->ip_summed = CHECKSUM_UNNECESSARY; napi_gro_receive(&priv->napi, skb); diff --git a/include/rdma/ib_verbs.h b/include/rdma/ib_verbs.h index ff22a73..c3cca5a 100644 --- a/include/rdma/ib_verbs.h +++ b/include/rdma/ib_verbs.h @@ -518,6 +518,7 @@ enum ib_wc_flags { IB_WC_GRH = 1, IB_WC_WITH_IMM = (1<<1), IB_WC_WITH_INVALIDATE = (1<<2), + IB_WC_IP_CSUM_OK = (1<<3), }; struct ib_wc { @@ -538,7 +539,6 @@ struct ib_wc { u8 sl; u8 dlid_path_bits; u8 port_num; /* valid only for DR SMPs on switches */ - int csum_ok; }; enum ib_cq_notify_flags { -- cgit v0.10.2 From e10903b087e425298fb86c6ad4b1a88735480db7 Mon Sep 17 00:00:00 2001 From: Roland Dreier Date: Sun, 26 Feb 2012 01:48:12 -0800 Subject: mlx4_core: Fix one more static exported function Commit 22c8bff6face ("mlx4_core: Exported functions can't be static") fixed most of this up, but forgot about mlx4_is_slave_active(). Fix this one too. Cc: Signed-off-by: Roland Dreier diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c index 3e593ae..f057896 100644 --- a/drivers/net/ethernet/mellanox/mlx4/main.c +++ b/drivers/net/ethernet/mellanox/mlx4/main.c @@ -394,7 +394,7 @@ static int mlx4_how_many_lives_vf(struct mlx4_dev *dev) return ret; } -static int mlx4_is_slave_active(struct mlx4_dev *dev, int slave) +int mlx4_is_slave_active(struct mlx4_dev *dev, int slave) { struct mlx4_priv *priv = mlx4_priv(dev); struct mlx4_slave_state *s_slave; -- cgit v0.10.2 From 5984be90046fa978d94a5ec08bbf8f760cff2b30 Mon Sep 17 00:00:00 2001 From: Jack Morgenstein Date: Tue, 6 Mar 2012 15:50:49 +0200 Subject: mlx4_core: Report thermal error events Print an error message when a thermal error async event is reported by the HW. Signed-off-by: Jack Morgenstein Signed-off-by: Dotan Barak Signed-off-by: Roland Dreier diff --git a/drivers/net/ethernet/mellanox/mlx4/eq.c b/drivers/net/ethernet/mellanox/mlx4/eq.c index 8fa41f3..780b5ad 100644 --- a/drivers/net/ethernet/mellanox/mlx4/eq.c +++ b/drivers/net/ethernet/mellanox/mlx4/eq.c @@ -79,7 +79,8 @@ enum { (1ull << MLX4_EVENT_TYPE_SRQ_LIMIT) | \ (1ull << MLX4_EVENT_TYPE_CMD) | \ (1ull << MLX4_EVENT_TYPE_COMM_CHANNEL) | \ - (1ull << MLX4_EVENT_TYPE_FLR_EVENT)) + (1ull << MLX4_EVENT_TYPE_FLR_EVENT) | \ + (1ull << MLX4_EVENT_TYPE_FATAL_WARNING)) static void eq_set_ci(struct mlx4_eq *eq, int req_not) { @@ -443,6 +444,35 @@ static int mlx4_eq_int(struct mlx4_dev *dev, struct mlx4_eq *eq) queue_work(priv->mfunc.master.comm_wq, &priv->mfunc.master.slave_flr_event_work); break; + + case MLX4_EVENT_TYPE_FATAL_WARNING: + if (eqe->subtype == MLX4_FATAL_WARNING_SUBTYPE_WARMING) { + if (mlx4_is_master(dev)) + for (i = 0; i < dev->num_slaves; i++) { + mlx4_dbg(dev, "%s: Sending " + "MLX4_FATAL_WARNING_SUBTYPE_WARMING" + " to slave: %d\n", __func__, i); + if (i == dev->caps.function) + continue; + mlx4_slave_event(dev, i, eqe); + } + mlx4_err(dev, "Temperature Threshold was reached! " + "Threshold: %d celsius degrees; " + "Current Temperature: %d\n", + be16_to_cpu(eqe->event.warming.warning_threshold), + be16_to_cpu(eqe->event.warming.current_temperature)); + } else + mlx4_warn(dev, "Unhandled event FATAL WARNING (%02x), " + "subtype %02x on EQ %d at index %u. owner=%x, " + "nent=0x%x, slave=%x, ownership=%s\n", + eqe->type, eqe->subtype, eq->eqn, + eq->cons_index, eqe->owner, eq->nent, + eqe->slave_id, + !!(eqe->owner & 0x80) ^ + !!(eq->cons_index & eq->nent) ? "HW" : "SW"); + + break; + case MLX4_EVENT_TYPE_EEC_CATAS_ERROR: case MLX4_EVENT_TYPE_ECC_DETECT: default: diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4.h b/drivers/net/ethernet/mellanox/mlx4/mlx4.h index c92269f..ac2d606 100644 --- a/drivers/net/ethernet/mellanox/mlx4/mlx4.h +++ b/drivers/net/ethernet/mellanox/mlx4/mlx4.h @@ -363,6 +363,10 @@ struct mlx4_eqe { struct { __be32 slave_id; } __packed flr_event; + struct { + __be16 current_temperature; + __be16 warning_threshold; + } __packed warming; } event; u8 slave_id; u8 reserved3[2]; diff --git a/include/linux/mlx4/device.h b/include/linux/mlx4/device.h index 263d2ae..4b3fbf1 100644 --- a/include/linux/mlx4/device.h +++ b/include/linux/mlx4/device.h @@ -133,6 +133,7 @@ enum mlx4_event { MLX4_EVENT_TYPE_CMD = 0x0a, MLX4_EVENT_TYPE_VEP_UPDATE = 0x19, MLX4_EVENT_TYPE_COMM_CHANNEL = 0x18, + MLX4_EVENT_TYPE_FATAL_WARNING = 0x1b, MLX4_EVENT_TYPE_FLR_EVENT = 0x1c, MLX4_EVENT_TYPE_NONE = 0xff, }; @@ -143,6 +144,10 @@ enum { }; enum { + MLX4_FATAL_WARNING_SUBTYPE_WARMING = 0, +}; + +enum { MLX4_PERM_LOCAL_READ = 1 << 10, MLX4_PERM_LOCAL_WRITE = 1 << 11, MLX4_PERM_REMOTE_READ = 1 << 12, -- cgit v0.10.2 From 3616f9cead935d4e4c35915600d5e4d1384219cd Mon Sep 17 00:00:00 2001 From: Eli Cohen Date: Tue, 6 Mar 2012 15:50:51 +0200 Subject: IB/mlx4: Fix possible missed completion event If an erroneous CQE is polled in the first iteration (i.e. npolled == 0), we don't update the consumer index and hence the hardware could get a wrong notion of how many CQEs software polled. Fix this by unconditionally updating the doorbell record. We could change the check to be something like if (npolled || err != -EAGAIN) ... but it does not seem worth the effort since a posted write to memory should not cost too much. Signed-off-by: Eli Cohen Signed-off-by: Roland Dreier diff --git a/drivers/infiniband/hw/mlx4/cq.c b/drivers/infiniband/hw/mlx4/cq.c index 5ecf38d..275861b 100644 --- a/drivers/infiniband/hw/mlx4/cq.c +++ b/drivers/infiniband/hw/mlx4/cq.c @@ -747,8 +747,7 @@ int mlx4_ib_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc) break; } - if (npolled) - mlx4_cq_set_ci(&cq->mcq); + mlx4_cq_set_ci(&cq->mcq); spin_unlock_irqrestore(&cq->lock, flags); -- cgit v0.10.2 From a9c766bb75ee2caad2735e41784387784ffd87db Mon Sep 17 00:00:00 2001 From: Or Gerlitz Date: Wed, 11 Jan 2012 19:00:29 +0200 Subject: IB/mlx4: Fix info returned when querying IBoE ports To issue a port query, use the QUERY_(Ethernet)_PORT command instead of the MAD_IFC command, since MAD_IFC attempts to query the firmware IB SMA, which is irrelevant for IBoE ports. This allows us to handle both 10Gb/s and 40Gb/s rates (e.g in sysfs), using QDR speed (10Gb/s) and width of 1X or 4X. Signed-off-by: Dotan Barak Signed-off-by: Or Gerlitz Signed-off-by: Roland Dreier diff --git a/drivers/infiniband/hw/mlx4/main.c b/drivers/infiniband/hw/mlx4/main.c index e152837..1349cb1 100644 --- a/drivers/infiniband/hw/mlx4/main.c +++ b/drivers/infiniband/hw/mlx4/main.c @@ -182,12 +182,27 @@ mlx4_ib_port_link_layer(struct ib_device *device, u8 port_num) } static int ib_link_query_port(struct ib_device *ibdev, u8 port, - struct ib_port_attr *props, - struct ib_smp *in_mad, - struct ib_smp *out_mad) + struct ib_port_attr *props) { + struct ib_smp *in_mad = NULL; + struct ib_smp *out_mad = NULL; int ext_active_speed; - int err; + int err = -ENOMEM; + + in_mad = kzalloc(sizeof *in_mad, GFP_KERNEL); + out_mad = kmalloc(sizeof *out_mad, GFP_KERNEL); + if (!in_mad || !out_mad) + goto out; + + init_query_mad(in_mad); + in_mad->attr_id = IB_SMP_ATTR_PORT_INFO; + in_mad->attr_mod = cpu_to_be32(port); + + err = mlx4_MAD_IFC(to_mdev(ibdev), 1, 1, port, NULL, NULL, + in_mad, out_mad); + if (err) + goto out; + props->lid = be16_to_cpup((__be16 *) (out_mad->data + 16)); props->lmc = out_mad->data[34] & 0x7; @@ -234,15 +249,17 @@ static int ib_link_query_port(struct ib_device *ibdev, u8 port, err = mlx4_MAD_IFC(to_mdev(ibdev), 1, 1, port, NULL, NULL, in_mad, out_mad); if (err) - return err; + goto out; /* Checking LinkSpeedActive for FDR-10 */ if (out_mad->data[15] & 0x1) props->active_speed = 8; } } - - return 0; +out: + kfree(in_mad); + kfree(out_mad); + return err; } static u8 state_to_phys_state(enum ib_port_state state) @@ -251,32 +268,42 @@ static u8 state_to_phys_state(enum ib_port_state state) } static int eth_link_query_port(struct ib_device *ibdev, u8 port, - struct ib_port_attr *props, - struct ib_smp *out_mad) + struct ib_port_attr *props) { - struct mlx4_ib_iboe *iboe = &to_mdev(ibdev)->iboe; + + struct mlx4_ib_dev *mdev = to_mdev(ibdev); + struct mlx4_ib_iboe *iboe = &mdev->iboe; struct net_device *ndev; enum ib_mtu tmp; + struct mlx4_cmd_mailbox *mailbox; + int err = 0; + + mailbox = mlx4_alloc_cmd_mailbox(mdev->dev); + if (IS_ERR(mailbox)) + return PTR_ERR(mailbox); + + err = mlx4_cmd_box(mdev->dev, 0, mailbox->dma, port, 0, + MLX4_CMD_QUERY_PORT, MLX4_CMD_TIME_CLASS_B, + MLX4_CMD_WRAPPED); + if (err) + goto out; - props->active_width = IB_WIDTH_1X; + props->active_width = (((u8 *)mailbox->buf)[5] == 0x40) ? + IB_WIDTH_4X : IB_WIDTH_1X; props->active_speed = 4; props->port_cap_flags = IB_PORT_CM_SUP; - props->gid_tbl_len = to_mdev(ibdev)->dev->caps.gid_table_len[port]; - props->max_msg_sz = to_mdev(ibdev)->dev->caps.max_msg_sz; + props->gid_tbl_len = mdev->dev->caps.gid_table_len[port]; + props->max_msg_sz = mdev->dev->caps.max_msg_sz; props->pkey_tbl_len = 1; - props->bad_pkey_cntr = be16_to_cpup((__be16 *) (out_mad->data + 46)); - props->qkey_viol_cntr = be16_to_cpup((__be16 *) (out_mad->data + 48)); props->max_mtu = IB_MTU_4096; - props->subnet_timeout = 0; - props->max_vl_num = out_mad->data[37] >> 4; - props->init_type_reply = 0; + props->max_vl_num = 2; props->state = IB_PORT_DOWN; props->phys_state = state_to_phys_state(props->state); props->active_mtu = IB_MTU_256; spin_lock(&iboe->lock); ndev = iboe->netdevs[port - 1]; if (!ndev) - goto out; + goto out_unlock; tmp = iboe_get_mtu(ndev->mtu); props->active_mtu = tmp ? min(props->max_mtu, tmp) : IB_MTU_256; @@ -284,41 +311,23 @@ static int eth_link_query_port(struct ib_device *ibdev, u8 port, props->state = (netif_running(ndev) && netif_carrier_ok(ndev)) ? IB_PORT_ACTIVE : IB_PORT_DOWN; props->phys_state = state_to_phys_state(props->state); - -out: +out_unlock: spin_unlock(&iboe->lock); - return 0; +out: + mlx4_free_cmd_mailbox(mdev->dev, mailbox); + return err; } static int mlx4_ib_query_port(struct ib_device *ibdev, u8 port, struct ib_port_attr *props) { - struct ib_smp *in_mad = NULL; - struct ib_smp *out_mad = NULL; - int err = -ENOMEM; - - in_mad = kzalloc(sizeof *in_mad, GFP_KERNEL); - out_mad = kmalloc(sizeof *out_mad, GFP_KERNEL); - if (!in_mad || !out_mad) - goto out; + int err; memset(props, 0, sizeof *props); - init_query_mad(in_mad); - in_mad->attr_id = IB_SMP_ATTR_PORT_INFO; - in_mad->attr_mod = cpu_to_be32(port); - - err = mlx4_MAD_IFC(to_mdev(ibdev), 1, 1, port, NULL, NULL, in_mad, out_mad); - if (err) - goto out; - err = mlx4_ib_port_link_layer(ibdev, port) == IB_LINK_LAYER_INFINIBAND ? - ib_link_query_port(ibdev, port, props, in_mad, out_mad) : - eth_link_query_port(ibdev, port, props, out_mad); - -out: - kfree(in_mad); - kfree(out_mad); + ib_link_query_port(ibdev, port, props) : + eth_link_query_port(ibdev, port, props); return err; } -- cgit v0.10.2 From 096335b3f9830b90d13aee77252cf6f5f12a258c Mon Sep 17 00:00:00 2001 From: Or Gerlitz Date: Wed, 11 Jan 2012 19:02:17 +0200 Subject: mlx4_core: Allow dynamic MTU configuration for IB ports Set the MTU for IB ports in the driver instead of using the firmware default of 2KB (the driver defaults to 4KB). Allow for dynamic mtu configuration through a new, per-port sysfs entry. Since there's a dependency between the port MTU and the max number of HW VLs the port can support, apply a mim/max approach, using a loop that goes down from the highest possible number of VLs to the lowest, using the firmware return status to know whether the requested number of VLs is possible with a given MTU. For now, as with the dynamic link type change / VPI support, the sysfs entry to change the mtu is exposed only when NOT running in SR-IOV mode. To allow changing the MTU for the master in SR-IOV mode, primary-function-initiated FLR (Function Level Reset) needs to be implemented. Signed-off-by: Or Gerlitz Signed-off-by: Roland Dreier diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c index f057896..e92cfae 100644 --- a/drivers/net/ethernet/mellanox/mlx4/main.c +++ b/drivers/net/ethernet/mellanox/mlx4/main.c @@ -647,6 +647,99 @@ out: return err ? err : count; } +enum ibta_mtu { + IB_MTU_256 = 1, + IB_MTU_512 = 2, + IB_MTU_1024 = 3, + IB_MTU_2048 = 4, + IB_MTU_4096 = 5 +}; + +static inline int int_to_ibta_mtu(int mtu) +{ + switch (mtu) { + case 256: return IB_MTU_256; + case 512: return IB_MTU_512; + case 1024: return IB_MTU_1024; + case 2048: return IB_MTU_2048; + case 4096: return IB_MTU_4096; + default: return -1; + } +} + +static inline int ibta_mtu_to_int(enum ibta_mtu mtu) +{ + switch (mtu) { + case IB_MTU_256: return 256; + case IB_MTU_512: return 512; + case IB_MTU_1024: return 1024; + case IB_MTU_2048: return 2048; + case IB_MTU_4096: return 4096; + default: return -1; + } +} + +static ssize_t show_port_ib_mtu(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct mlx4_port_info *info = container_of(attr, struct mlx4_port_info, + port_mtu_attr); + struct mlx4_dev *mdev = info->dev; + + if (mdev->caps.port_type[info->port] == MLX4_PORT_TYPE_ETH) + mlx4_warn(mdev, "port level mtu is only used for IB ports\n"); + + sprintf(buf, "%d\n", + ibta_mtu_to_int(mdev->caps.port_ib_mtu[info->port])); + return strlen(buf); +} + +static ssize_t set_port_ib_mtu(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct mlx4_port_info *info = container_of(attr, struct mlx4_port_info, + port_mtu_attr); + struct mlx4_dev *mdev = info->dev; + struct mlx4_priv *priv = mlx4_priv(mdev); + int err, port, mtu, ibta_mtu = -1; + + if (mdev->caps.port_type[info->port] == MLX4_PORT_TYPE_ETH) { + mlx4_warn(mdev, "port level mtu is only used for IB ports\n"); + return -EINVAL; + } + + err = sscanf(buf, "%d", &mtu); + if (err > 0) + ibta_mtu = int_to_ibta_mtu(mtu); + + if (err <= 0 || ibta_mtu < 0) { + mlx4_err(mdev, "%s is invalid IBTA mtu\n", buf); + return -EINVAL; + } + + mdev->caps.port_ib_mtu[info->port] = ibta_mtu; + + mlx4_stop_sense(mdev); + mutex_lock(&priv->port_mutex); + mlx4_unregister_device(mdev); + for (port = 1; port <= mdev->caps.num_ports; port++) { + mlx4_CLOSE_PORT(mdev, port); + err = mlx4_SET_PORT(mdev, port); + if (err) { + mlx4_err(mdev, "Failed to set port %d, " + "aborting\n", port); + goto err_set_port; + } + } + err = mlx4_register_device(mdev); +err_set_port: + mutex_unlock(&priv->port_mutex); + mlx4_start_sense(mdev); + return err ? err : count; +} + static int mlx4_load_fw(struct mlx4_dev *dev) { struct mlx4_priv *priv = mlx4_priv(dev); @@ -1362,7 +1455,10 @@ static int mlx4_setup_hca(struct mlx4_dev *dev) "ib capabilities (%d). Continuing " "with caps = 0\n", port, err); dev->caps.ib_port_def_cap[port] = ib_port_default_caps; - + if (mlx4_is_mfunc(dev)) + dev->caps.port_ib_mtu[port] = IB_MTU_2048; + else + dev->caps.port_ib_mtu[port] = IB_MTU_4096; err = mlx4_check_ext_port_caps(dev, port); if (err) mlx4_warn(dev, "failed to get port %d extended " @@ -1524,6 +1620,24 @@ static int mlx4_init_port_info(struct mlx4_dev *dev, int port) info->port = -1; } + sprintf(info->dev_mtu_name, "mlx4_port%d_mtu", port); + info->port_mtu_attr.attr.name = info->dev_mtu_name; + if (mlx4_is_mfunc(dev)) + info->port_mtu_attr.attr.mode = S_IRUGO; + else { + info->port_mtu_attr.attr.mode = S_IRUGO | S_IWUSR; + info->port_mtu_attr.store = set_port_ib_mtu; + } + info->port_mtu_attr.show = show_port_ib_mtu; + sysfs_attr_init(&info->port_mtu_attr.attr); + + err = device_create_file(&dev->pdev->dev, &info->port_mtu_attr); + if (err) { + mlx4_err(dev, "Failed to create mtu file for port %d\n", port); + device_remove_file(&info->dev->pdev->dev, &info->port_attr); + info->port = -1; + } + return err; } @@ -1533,6 +1647,7 @@ static void mlx4_cleanup_port_info(struct mlx4_port_info *info) return; device_remove_file(&info->dev->pdev->dev, &info->port_attr); + device_remove_file(&info->dev->pdev->dev, &info->port_mtu_attr); } static int mlx4_init_steering(struct mlx4_dev *dev) diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4.h b/drivers/net/ethernet/mellanox/mlx4/mlx4.h index ac2d606..1aa3621 100644 --- a/drivers/net/ethernet/mellanox/mlx4/mlx4.h +++ b/drivers/net/ethernet/mellanox/mlx4/mlx4.h @@ -686,6 +686,8 @@ struct mlx4_port_info { char dev_name[16]; struct device_attribute port_attr; enum mlx4_port_type tmp_type; + char dev_mtu_name[16]; + struct device_attribute port_mtu_attr; struct mlx4_mac_table mac_table; struct radix_tree_root mac_tree; struct mlx4_vlan_table vlan_table; diff --git a/drivers/net/ethernet/mellanox/mlx4/port.c b/drivers/net/ethernet/mellanox/mlx4/port.c index f44ae55..a6fd564 100644 --- a/drivers/net/ethernet/mellanox/mlx4/port.c +++ b/drivers/net/ethernet/mellanox/mlx4/port.c @@ -766,10 +766,18 @@ int mlx4_SET_PORT_wrapper(struct mlx4_dev *dev, int slave, vhcr->op_modifier, inbox); } +/* bit locations for set port command with zero op modifier */ +enum { + MLX4_SET_PORT_VL_CAP = 4, /* bits 7:4 */ + MLX4_SET_PORT_MTU_CAP = 12, /* bits 15:12 */ + MLX4_CHANGE_PORT_VL_CAP = 21, + MLX4_CHANGE_PORT_MTU_CAP = 22, +}; + int mlx4_SET_PORT(struct mlx4_dev *dev, u8 port) { struct mlx4_cmd_mailbox *mailbox; - int err; + int err, vl_cap; if (dev->caps.port_type[port] == MLX4_PORT_TYPE_ETH) return 0; @@ -781,8 +789,19 @@ int mlx4_SET_PORT(struct mlx4_dev *dev, u8 port) memset(mailbox->buf, 0, 256); ((__be32 *) mailbox->buf)[1] = dev->caps.ib_port_def_cap[port]; - err = mlx4_cmd(dev, mailbox->dma, port, 0, MLX4_CMD_SET_PORT, - MLX4_CMD_TIME_CLASS_B, MLX4_CMD_WRAPPED); + + /* IB VL CAP enum isn't used by the firmware, just numerical values */ + for (vl_cap = 8; vl_cap >= 1; vl_cap >>= 1) { + ((__be32 *) mailbox->buf)[0] = cpu_to_be32( + (1 << MLX4_CHANGE_PORT_MTU_CAP) | + (1 << MLX4_CHANGE_PORT_VL_CAP) | + (dev->caps.port_ib_mtu[port] << MLX4_SET_PORT_MTU_CAP) | + (vl_cap << MLX4_SET_PORT_VL_CAP)); + err = mlx4_cmd(dev, mailbox->dma, port, 0, MLX4_CMD_SET_PORT, + MLX4_CMD_TIME_CLASS_B, MLX4_CMD_WRAPPED); + if (err != -ENOMEM) + break; + } mlx4_free_cmd_mailbox(dev, mailbox); return err; diff --git a/include/linux/mlx4/device.h b/include/linux/mlx4/device.h index 4b3fbf1..b19fb9b 100644 --- a/include/linux/mlx4/device.h +++ b/include/linux/mlx4/device.h @@ -315,6 +315,7 @@ struct mlx4_caps { enum mlx4_port_type possible_type[MLX4_MAX_PORTS + 1]; u32 max_counters; u8 ext_port_cap[MLX4_MAX_PORTS + 1]; + u8 port_ib_mtu[MLX4_MAX_PORTS + 1]; }; struct mlx4_buf_list { -- cgit v0.10.2 From db5a7a65c05867cb6ff5cb6d556a0edfce631d2d Mon Sep 17 00:00:00 2001 From: Roland Dreier Date: Mon, 5 Mar 2012 10:05:28 -0800 Subject: mlx4_core: Scale size of MTT table with system RAM The current driver defaults to 1M MTT segments, where each segment holds 8 MTT entries. This limits the total memory registered to 8M * PAGE_SIZE which is 32GB with 4K pages. Since systems that have much more memory are pretty common now (at least among systems with InfiniBand hardware), this limit ends up getting hit in practice quite a bit. Handle this by having the driver allocate at least enough MTT entries to cover 2 * totalram pages. Signed-off-by: Roland Dreier diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4.h b/drivers/net/ethernet/mellanox/mlx4/mlx4.h index 1aa3621..cb17af4 100644 --- a/drivers/net/ethernet/mellanox/mlx4/mlx4.h +++ b/drivers/net/ethernet/mellanox/mlx4/mlx4.h @@ -403,7 +403,7 @@ struct mlx4_profile { int num_cq; int num_mcg; int num_mpt; - int num_mtt; + unsigned num_mtt; }; struct mlx4_fw { diff --git a/drivers/net/ethernet/mellanox/mlx4/profile.c b/drivers/net/ethernet/mellanox/mlx4/profile.c index 1129677..06e5ade 100644 --- a/drivers/net/ethernet/mellanox/mlx4/profile.c +++ b/drivers/net/ethernet/mellanox/mlx4/profile.c @@ -83,12 +83,31 @@ u64 mlx4_make_profile(struct mlx4_dev *dev, u64 total_size = 0; struct mlx4_resource *profile; struct mlx4_resource tmp; + struct sysinfo si; int i, j; profile = kcalloc(MLX4_RES_NUM, sizeof(*profile), GFP_KERNEL); if (!profile) return -ENOMEM; + /* + * We want to scale the number of MTTs with the size of the + * system memory, since it makes sense to register a lot of + * memory on a system with a lot of memory. As a heuristic, + * make sure we have enough MTTs to cover twice the system + * memory (with PAGE_SIZE entries). + * + * This number has to be a power of two and fit into 32 bits + * due to device limitations, so cap this at 2^31 as well. + * That limits us to 8TB of memory registration per HCA with + * 4KB pages, which is probably OK for the next few months. + */ + si_meminfo(&si); + request->num_mtt = + roundup_pow_of_two(max_t(unsigned, request->num_mtt, + min(1UL << 31, + si.totalram >> (log_mtts_per_seg - 1)))); + profile[MLX4_RES_QP].size = dev_cap->qpc_entry_sz; profile[MLX4_RES_RDMARC].size = dev_cap->rdmarc_entry_sz; profile[MLX4_RES_ALTC].size = dev_cap->altc_entry_sz; -- cgit v0.10.2