summaryrefslogtreecommitdiff
path: root/drivers/infiniband/ulp
diff options
context:
space:
mode:
authorScott Wood <scottwood@freescale.com>2014-04-07 23:49:35 (GMT)
committerScott Wood <scottwood@freescale.com>2014-04-07 23:49:35 (GMT)
commit62b8c978ee6b8d135d9e7953221de58000dba986 (patch)
tree683b04b2e627f6710c22c151b23c8cc9a165315e /drivers/infiniband/ulp
parent78fd82238d0e5716578c326404184a27ba67fd6e (diff)
downloadlinux-fsl-qoriq-62b8c978ee6b8d135d9e7953221de58000dba986.tar.xz
Rewind v3.13-rc3+ (78fd82238d0e5716) to v3.12
Diffstat (limited to 'drivers/infiniband/ulp')
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib.h4
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_cm.c14
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_ib.c24
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_main.c12
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_multicast.c29
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_netlink.c4
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_vlan.c10
-rw-r--r--drivers/infiniband/ulp/isert/Kconfig4
-rw-r--r--drivers/infiniband/ulp/isert/ib_isert.c99
-rw-r--r--drivers/infiniband/ulp/isert/ib_isert.h6
-rw-r--r--drivers/infiniband/ulp/srp/ib_srp.c500
-rw-r--r--drivers/infiniband/ulp/srp/ib_srp.h21
-rw-r--r--drivers/infiniband/ulp/srpt/ib_srpt.c10
13 files changed, 186 insertions, 551 deletions
diff --git a/drivers/infiniband/ulp/ipoib/ipoib.h b/drivers/infiniband/ulp/ipoib/ipoib.h
index c639f90..eb71aaa 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib.h
+++ b/drivers/infiniband/ulp/ipoib/ipoib.h
@@ -101,7 +101,6 @@ enum {
IPOIB_MCAST_FLAG_SENDONLY = 1,
IPOIB_MCAST_FLAG_BUSY = 2, /* joining or already joined */
IPOIB_MCAST_FLAG_ATTACHED = 3,
- IPOIB_MCAST_JOIN_STARTED = 4,
MAX_SEND_CQE = 16,
IPOIB_CM_COPYBREAK = 256,
@@ -152,7 +151,6 @@ struct ipoib_mcast {
struct sk_buff_head pkt_queue;
struct net_device *dev;
- struct completion done;
};
struct ipoib_rx_buf {
@@ -301,7 +299,7 @@ struct ipoib_dev_priv {
unsigned long flags;
- struct rw_semaphore vlan_rwsem;
+ struct mutex vlan_mutex;
struct rb_root path_tree;
struct list_head path_list;
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_cm.c b/drivers/infiniband/ulp/ipoib/ipoib_cm.c
index 1377f85..7a31754 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_cm.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_cm.c
@@ -140,8 +140,7 @@ static int ipoib_cm_post_receive_nonsrq(struct net_device *dev,
static struct sk_buff *ipoib_cm_alloc_rx_skb(struct net_device *dev,
struct ipoib_cm_rx_buf *rx_ring,
int id, int frags,
- u64 mapping[IPOIB_CM_RX_SG],
- gfp_t gfp)
+ u64 mapping[IPOIB_CM_RX_SG])
{
struct ipoib_dev_priv *priv = netdev_priv(dev);
struct sk_buff *skb;
@@ -165,7 +164,7 @@ static struct sk_buff *ipoib_cm_alloc_rx_skb(struct net_device *dev,
}
for (i = 0; i < frags; i++) {
- struct page *page = alloc_page(gfp);
+ struct page *page = alloc_page(GFP_ATOMIC);
if (!page)
goto partial_error;
@@ -383,8 +382,7 @@ static int ipoib_cm_nonsrq_init_rx(struct net_device *dev, struct ib_cm_id *cm_i
for (i = 0; i < ipoib_recvq_size; ++i) {
if (!ipoib_cm_alloc_rx_skb(dev, rx->rx_ring, i, IPOIB_CM_RX_SG - 1,
- rx->rx_ring[i].mapping,
- GFP_KERNEL)) {
+ rx->rx_ring[i].mapping)) {
ipoib_warn(priv, "failed to allocate receive buffer %d\n", i);
ret = -ENOMEM;
goto err_count;
@@ -641,8 +639,7 @@ void ipoib_cm_handle_rx_wc(struct net_device *dev, struct ib_wc *wc)
frags = PAGE_ALIGN(wc->byte_len - min(wc->byte_len,
(unsigned)IPOIB_CM_HEAD_SIZE)) / PAGE_SIZE;
- newskb = ipoib_cm_alloc_rx_skb(dev, rx_ring, wr_id, frags,
- mapping, GFP_ATOMIC);
+ newskb = ipoib_cm_alloc_rx_skb(dev, rx_ring, wr_id, frags, mapping);
if (unlikely(!newskb)) {
/*
* If we can't allocate a new RX buffer, dump
@@ -1559,8 +1556,7 @@ int ipoib_cm_dev_init(struct net_device *dev)
for (i = 0; i < ipoib_recvq_size; ++i) {
if (!ipoib_cm_alloc_rx_skb(dev, priv->cm.srq_ring, i,
priv->cm.num_frags - 1,
- priv->cm.srq_ring[i].mapping,
- GFP_KERNEL)) {
+ priv->cm.srq_ring[i].mapping)) {
ipoib_warn(priv, "failed to allocate "
"receive buffer %d\n", i);
ipoib_cm_dev_cleanup(dev);
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_ib.c b/drivers/infiniband/ulp/ipoib/ipoib_ib.c
index 6a7003d..196b1d1 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_ib.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_ib.c
@@ -685,13 +685,15 @@ int ipoib_ib_dev_open(struct net_device *dev)
ret = ipoib_ib_post_receives(dev);
if (ret) {
ipoib_warn(priv, "ipoib_ib_post_receives returned %d\n", ret);
- goto dev_stop;
+ ipoib_ib_dev_stop(dev, 1);
+ return -1;
}
ret = ipoib_cm_dev_open(dev);
if (ret) {
ipoib_warn(priv, "ipoib_cm_dev_open returned %d\n", ret);
- goto dev_stop;
+ ipoib_ib_dev_stop(dev, 1);
+ return -1;
}
clear_bit(IPOIB_STOP_REAPER, &priv->flags);
@@ -702,11 +704,6 @@ int ipoib_ib_dev_open(struct net_device *dev)
napi_enable(&priv->napi);
return 0;
-dev_stop:
- if (!test_and_set_bit(IPOIB_FLAG_INITIALIZED, &priv->flags))
- napi_enable(&priv->napi);
- ipoib_ib_dev_stop(dev, 1);
- return -1;
}
static void ipoib_pkey_dev_check_presence(struct net_device *dev)
@@ -749,8 +746,10 @@ int ipoib_ib_dev_down(struct net_device *dev, int flush)
if (!test_bit(IPOIB_PKEY_ASSIGNED, &priv->flags)) {
mutex_lock(&pkey_mutex);
set_bit(IPOIB_PKEY_STOP, &priv->flags);
- cancel_delayed_work_sync(&priv->pkey_poll_task);
+ cancel_delayed_work(&priv->pkey_poll_task);
mutex_unlock(&pkey_mutex);
+ if (flush)
+ flush_workqueue(ipoib_workqueue);
}
ipoib_mcast_stop_thread(dev, flush);
@@ -975,7 +974,7 @@ static void __ipoib_ib_dev_flush(struct ipoib_dev_priv *priv,
u16 new_index;
int result;
- down_read(&priv->vlan_rwsem);
+ mutex_lock(&priv->vlan_mutex);
/*
* Flush any child interfaces too -- they might be up even if
@@ -984,7 +983,7 @@ static void __ipoib_ib_dev_flush(struct ipoib_dev_priv *priv,
list_for_each_entry(cpriv, &priv->child_intfs, list)
__ipoib_ib_dev_flush(cpriv, level);
- up_read(&priv->vlan_rwsem);
+ mutex_unlock(&priv->vlan_mutex);
if (!test_bit(IPOIB_FLAG_INITIALIZED, &priv->flags)) {
/* for non-child devices must check/update the pkey value here */
@@ -1082,11 +1081,6 @@ void ipoib_ib_dev_cleanup(struct net_device *dev)
struct ipoib_dev_priv *priv = netdev_priv(dev);
ipoib_dbg(priv, "cleaning up ib_dev\n");
- /*
- * We must make sure there are no more (path) completions
- * that may wish to touch priv fields that are no longer valid
- */
- ipoib_flush_paths(dev);
ipoib_mcast_stop_thread(dev, 1);
ipoib_mcast_dev_flush(dev);
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_main.c b/drivers/infiniband/ulp/ipoib/ipoib_main.c
index d64ed05..82cec1a 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_main.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_main.c
@@ -119,7 +119,7 @@ int ipoib_open(struct net_device *dev)
struct ipoib_dev_priv *cpriv;
/* Bring up any child interfaces too */
- down_read(&priv->vlan_rwsem);
+ mutex_lock(&priv->vlan_mutex);
list_for_each_entry(cpriv, &priv->child_intfs, list) {
int flags;
@@ -129,7 +129,7 @@ int ipoib_open(struct net_device *dev)
dev_change_flags(cpriv->dev, flags | IFF_UP);
}
- up_read(&priv->vlan_rwsem);
+ mutex_unlock(&priv->vlan_mutex);
}
netif_start_queue(dev);
@@ -162,7 +162,7 @@ static int ipoib_stop(struct net_device *dev)
struct ipoib_dev_priv *cpriv;
/* Bring down any child interfaces too */
- down_read(&priv->vlan_rwsem);
+ mutex_lock(&priv->vlan_mutex);
list_for_each_entry(cpriv, &priv->child_intfs, list) {
int flags;
@@ -172,7 +172,7 @@ static int ipoib_stop(struct net_device *dev)
dev_change_flags(cpriv->dev, flags & ~IFF_UP);
}
- up_read(&priv->vlan_rwsem);
+ mutex_unlock(&priv->vlan_mutex);
}
return 0;
@@ -1350,7 +1350,7 @@ void ipoib_setup(struct net_device *dev)
ipoib_set_ethtool_ops(dev);
- netif_napi_add(dev, &priv->napi, ipoib_poll, NAPI_POLL_WEIGHT);
+ netif_napi_add(dev, &priv->napi, ipoib_poll, 100);
dev->watchdog_timeo = HZ;
@@ -1372,7 +1372,7 @@ void ipoib_setup(struct net_device *dev)
spin_lock_init(&priv->lock);
- init_rwsem(&priv->vlan_rwsem);
+ mutex_init(&priv->vlan_mutex);
INIT_LIST_HEAD(&priv->path_list);
INIT_LIST_HEAD(&priv->child_intfs);
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
index d4e0057..cecb98a 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
@@ -386,10 +386,8 @@ static int ipoib_mcast_join_complete(int status,
mcast->mcmember.mgid.raw, status);
/* We trap for port events ourselves. */
- if (status == -ENETRESET) {
- status = 0;
- goto out;
- }
+ if (status == -ENETRESET)
+ return 0;
if (!status)
status = ipoib_mcast_join_finish(mcast, &multicast->rec);
@@ -409,8 +407,7 @@ static int ipoib_mcast_join_complete(int status,
if (mcast == priv->broadcast)
queue_work(ipoib_workqueue, &priv->carrier_on_task);
- status = 0;
- goto out;
+ return 0;
}
if (mcast->logcount++ < 20) {
@@ -437,8 +434,7 @@ static int ipoib_mcast_join_complete(int status,
mcast->backoff * HZ);
spin_unlock_irq(&priv->lock);
mutex_unlock(&mcast_mutex);
-out:
- complete(&mcast->done);
+
return status;
}
@@ -488,15 +484,11 @@ static void ipoib_mcast_join(struct net_device *dev, struct ipoib_mcast *mcast,
}
set_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags);
- init_completion(&mcast->done);
- set_bit(IPOIB_MCAST_JOIN_STARTED, &mcast->flags);
-
mcast->mc = ib_sa_join_multicast(&ipoib_sa_client, priv->ca, priv->port,
&rec, comp_mask, GFP_KERNEL,
ipoib_mcast_join_complete, mcast);
if (IS_ERR(mcast->mc)) {
clear_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags);
- complete(&mcast->done);
ret = PTR_ERR(mcast->mc);
ipoib_warn(priv, "ib_sa_join_multicast failed, status %d\n", ret);
@@ -518,18 +510,10 @@ void ipoib_mcast_join_task(struct work_struct *work)
struct ipoib_dev_priv *priv =
container_of(work, struct ipoib_dev_priv, mcast_task.work);
struct net_device *dev = priv->dev;
- struct ib_port_attr port_attr;
if (!test_bit(IPOIB_MCAST_RUN, &priv->flags))
return;
- if (ib_query_port(priv->ca, priv->port, &port_attr) ||
- port_attr.state != IB_PORT_ACTIVE) {
- ipoib_dbg(priv, "port state is not ACTIVE (state = %d) suspending join task\n",
- port_attr.state);
- return;
- }
-
if (ib_query_gid(priv->ca, priv->port, 0, &priv->local_gid))
ipoib_warn(priv, "ib_query_gid() failed\n");
else
@@ -767,11 +751,6 @@ void ipoib_mcast_dev_flush(struct net_device *dev)
spin_unlock_irqrestore(&priv->lock, flags);
- /* seperate between the wait to the leave*/
- list_for_each_entry_safe(mcast, tmcast, &remove_list, list)
- if (test_bit(IPOIB_MCAST_JOIN_STARTED, &mcast->flags))
- wait_for_completion(&mcast->done);
-
list_for_each_entry_safe(mcast, tmcast, &remove_list, list) {
ipoib_mcast_leave(dev, mcast);
ipoib_mcast_free(mcast);
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_netlink.c b/drivers/infiniband/ulp/ipoib/ipoib_netlink.c
index c29b5c8..f81abe1 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_netlink.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_netlink.c
@@ -142,10 +142,10 @@ static void ipoib_unregister_child_dev(struct net_device *dev, struct list_head
priv = netdev_priv(dev);
ppriv = netdev_priv(priv->parent);
- down_write(&ppriv->vlan_rwsem);
+ mutex_lock(&ppriv->vlan_mutex);
unregister_netdevice_queue(dev, head);
list_del(&priv->list);
- up_write(&ppriv->vlan_rwsem);
+ mutex_unlock(&ppriv->vlan_mutex);
}
static size_t ipoib_get_size(const struct net_device *dev)
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_vlan.c b/drivers/infiniband/ulp/ipoib/ipoib_vlan.c
index 9fad7b5..8292554 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_vlan.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_vlan.c
@@ -140,7 +140,7 @@ int ipoib_vlan_add(struct net_device *pdev, unsigned short pkey)
if (!rtnl_trylock())
return restart_syscall();
- down_write(&ppriv->vlan_rwsem);
+ mutex_lock(&ppriv->vlan_mutex);
/*
* First ensure this isn't a duplicate. We check the parent device and
@@ -163,7 +163,7 @@ int ipoib_vlan_add(struct net_device *pdev, unsigned short pkey)
result = __ipoib_vlan_add(ppriv, priv, pkey, IPOIB_LEGACY_CHILD);
out:
- up_write(&ppriv->vlan_rwsem);
+ mutex_unlock(&ppriv->vlan_mutex);
if (result)
free_netdev(priv->dev);
@@ -185,8 +185,7 @@ int ipoib_vlan_delete(struct net_device *pdev, unsigned short pkey)
if (!rtnl_trylock())
return restart_syscall();
-
- down_write(&ppriv->vlan_rwsem);
+ mutex_lock(&ppriv->vlan_mutex);
list_for_each_entry_safe(priv, tpriv, &ppriv->child_intfs, list) {
if (priv->pkey == pkey &&
priv->child_type == IPOIB_LEGACY_CHILD) {
@@ -196,8 +195,7 @@ int ipoib_vlan_delete(struct net_device *pdev, unsigned short pkey)
break;
}
}
- up_write(&ppriv->vlan_rwsem);
-
+ mutex_unlock(&ppriv->vlan_mutex);
rtnl_unlock();
if (dev) {
diff --git a/drivers/infiniband/ulp/isert/Kconfig b/drivers/infiniband/ulp/isert/Kconfig
index 02f9759..ce3fd32 100644
--- a/drivers/infiniband/ulp/isert/Kconfig
+++ b/drivers/infiniband/ulp/isert/Kconfig
@@ -1,5 +1,5 @@
config INFINIBAND_ISERT
- tristate "iSCSI Extensions for RDMA (iSER) target support"
+ tristate "iSCSI Extentions for RDMA (iSER) target support"
depends on INET && INFINIBAND_ADDR_TRANS && TARGET_CORE && ISCSI_TARGET
---help---
- Support for iSCSI Extensions for RDMA (iSER) Target on Infiniband fabrics.
+ Support for iSCSI Extentions for RDMA (iSER) Target on Infiniband fabrics.
diff --git a/drivers/infiniband/ulp/isert/ib_isert.c b/drivers/infiniband/ulp/isert/ib_isert.c
index 6be57c3..6df2350 100644
--- a/drivers/infiniband/ulp/isert/ib_isert.c
+++ b/drivers/infiniband/ulp/isert/ib_isert.c
@@ -22,7 +22,6 @@
#include <linux/socket.h>
#include <linux/in.h>
#include <linux/in6.h>
-#include <linux/llist.h>
#include <rdma/ib_verbs.h>
#include <rdma/rdma_cm.h>
#include <target/target_core_base.h>
@@ -490,7 +489,6 @@ isert_connect_request(struct rdma_cm_id *cma_id, struct rdma_cm_event *event)
kref_init(&isert_conn->conn_kref);
kref_get(&isert_conn->conn_kref);
mutex_init(&isert_conn->conn_mutex);
- mutex_init(&isert_conn->conn_comp_mutex);
spin_lock_init(&isert_conn->conn_lock);
cma_id->context = isert_conn;
@@ -845,32 +843,14 @@ isert_init_tx_hdrs(struct isert_conn *isert_conn,
}
static void
-isert_init_send_wr(struct isert_conn *isert_conn, struct isert_cmd *isert_cmd,
- struct ib_send_wr *send_wr, bool coalesce)
+isert_init_send_wr(struct isert_cmd *isert_cmd, struct ib_send_wr *send_wr)
{
- struct iser_tx_desc *tx_desc = &isert_cmd->tx_desc;
-
isert_cmd->rdma_wr.iser_ib_op = ISER_IB_SEND;
send_wr->wr_id = (unsigned long)&isert_cmd->tx_desc;
send_wr->opcode = IB_WR_SEND;
- send_wr->sg_list = &tx_desc->tx_sg[0];
- send_wr->num_sge = isert_cmd->tx_desc.num_sge;
- /*
- * Coalesce send completion interrupts by only setting IB_SEND_SIGNALED
- * bit for every ISERT_COMP_BATCH_COUNT number of ib_post_send() calls.
- */
- mutex_lock(&isert_conn->conn_comp_mutex);
- if (coalesce &&
- ++isert_conn->conn_comp_batch < ISERT_COMP_BATCH_COUNT) {
- llist_add(&tx_desc->comp_llnode, &isert_conn->conn_comp_llist);
- mutex_unlock(&isert_conn->conn_comp_mutex);
- return;
- }
- isert_conn->conn_comp_batch = 0;
- tx_desc->comp_llnode_batch = llist_del_all(&isert_conn->conn_comp_llist);
- mutex_unlock(&isert_conn->conn_comp_mutex);
-
send_wr->send_flags = IB_SEND_SIGNALED;
+ send_wr->sg_list = &isert_cmd->tx_desc.tx_sg[0];
+ send_wr->num_sge = isert_cmd->tx_desc.num_sge;
}
static int
@@ -1602,8 +1582,8 @@ isert_response_completion(struct iser_tx_desc *tx_desc,
}
static void
-__isert_send_completion(struct iser_tx_desc *tx_desc,
- struct isert_conn *isert_conn)
+isert_send_completion(struct iser_tx_desc *tx_desc,
+ struct isert_conn *isert_conn)
{
struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
struct isert_cmd *isert_cmd = tx_desc->isert_cmd;
@@ -1644,24 +1624,6 @@ __isert_send_completion(struct iser_tx_desc *tx_desc,
}
static void
-isert_send_completion(struct iser_tx_desc *tx_desc,
- struct isert_conn *isert_conn)
-{
- struct llist_node *llnode = tx_desc->comp_llnode_batch;
- struct iser_tx_desc *t;
- /*
- * Drain coalesced completion llist starting from comp_llnode_batch
- * setup in isert_init_send_wr(), and then complete trailing tx_desc.
- */
- while (llnode) {
- t = llist_entry(llnode, struct iser_tx_desc, comp_llnode);
- llnode = llist_next(llnode);
- __isert_send_completion(t, isert_conn);
- }
- __isert_send_completion(tx_desc, isert_conn);
-}
-
-static void
isert_cq_comp_err(struct iser_tx_desc *tx_desc, struct isert_conn *isert_conn)
{
struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
@@ -1831,7 +1793,7 @@ isert_put_response(struct iscsi_conn *conn, struct iscsi_cmd *cmd)
isert_cmd->tx_desc.num_sge = 2;
}
- isert_init_send_wr(isert_conn, isert_cmd, send_wr, true);
+ isert_init_send_wr(isert_cmd, send_wr);
pr_debug("Posting SCSI Response IB_WR_SEND >>>>>>>>>>>>>>>>>>>>>>\n");
@@ -1851,7 +1813,7 @@ isert_put_nopin(struct iscsi_cmd *cmd, struct iscsi_conn *conn,
&isert_cmd->tx_desc.iscsi_header,
nopout_response);
isert_init_tx_hdrs(isert_conn, &isert_cmd->tx_desc);
- isert_init_send_wr(isert_conn, isert_cmd, send_wr, false);
+ isert_init_send_wr(isert_cmd, send_wr);
pr_debug("Posting NOPIN Response IB_WR_SEND >>>>>>>>>>>>>>>>>>>>>>\n");
@@ -1869,7 +1831,7 @@ isert_put_logout_rsp(struct iscsi_cmd *cmd, struct iscsi_conn *conn)
iscsit_build_logout_rsp(cmd, conn, (struct iscsi_logout_rsp *)
&isert_cmd->tx_desc.iscsi_header);
isert_init_tx_hdrs(isert_conn, &isert_cmd->tx_desc);
- isert_init_send_wr(isert_conn, isert_cmd, send_wr, false);
+ isert_init_send_wr(isert_cmd, send_wr);
pr_debug("Posting Logout Response IB_WR_SEND >>>>>>>>>>>>>>>>>>>>>>\n");
@@ -1887,7 +1849,7 @@ isert_put_tm_rsp(struct iscsi_cmd *cmd, struct iscsi_conn *conn)
iscsit_build_task_mgt_rsp(cmd, conn, (struct iscsi_tm_rsp *)
&isert_cmd->tx_desc.iscsi_header);
isert_init_tx_hdrs(isert_conn, &isert_cmd->tx_desc);
- isert_init_send_wr(isert_conn, isert_cmd, send_wr, false);
+ isert_init_send_wr(isert_cmd, send_wr);
pr_debug("Posting Task Management Response IB_WR_SEND >>>>>>>>>>>>>>>>>>>>>>\n");
@@ -1919,7 +1881,7 @@ isert_put_reject(struct iscsi_cmd *cmd, struct iscsi_conn *conn)
tx_dsg->lkey = isert_conn->conn_mr->lkey;
isert_cmd->tx_desc.num_sge = 2;
- isert_init_send_wr(isert_conn, isert_cmd, send_wr, false);
+ isert_init_send_wr(isert_cmd, send_wr);
pr_debug("Posting Reject IB_WR_SEND >>>>>>>>>>>>>>>>>>>>>>\n");
@@ -1959,7 +1921,7 @@ isert_put_text_rsp(struct iscsi_cmd *cmd, struct iscsi_conn *conn)
tx_dsg->lkey = isert_conn->conn_mr->lkey;
isert_cmd->tx_desc.num_sge = 2;
}
- isert_init_send_wr(isert_conn, isert_cmd, send_wr, false);
+ isert_init_send_wr(isert_cmd, send_wr);
pr_debug("Posting Text Response IB_WR_SEND >>>>>>>>>>>>>>>>>>>>>>\n");
@@ -2029,6 +1991,8 @@ isert_map_rdma(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
if (wr->iser_ib_op == ISER_IB_RDMA_WRITE) {
data_left = se_cmd->data_length;
+ iscsit_increment_maxcmdsn(cmd, conn->sess);
+ cmd->stat_sn = conn->stat_sn++;
} else {
sg_off = cmd->write_data_done / PAGE_SIZE;
data_left = se_cmd->data_length - cmd->write_data_done;
@@ -2240,6 +2204,8 @@ isert_reg_rdma_frwr(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
if (wr->iser_ib_op == ISER_IB_RDMA_WRITE) {
data_left = se_cmd->data_length;
+ iscsit_increment_maxcmdsn(cmd, conn->sess);
+ cmd->stat_sn = conn->stat_sn++;
} else {
sg_off = cmd->write_data_done / PAGE_SIZE;
data_left = se_cmd->data_length - cmd->write_data_done;
@@ -2293,26 +2259,18 @@ isert_reg_rdma_frwr(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
data_len = min(data_left, rdma_write_max);
wr->cur_rdma_length = data_len;
- /* if there is a single dma entry, dma mr is sufficient */
- if (count == 1) {
- ib_sge->addr = ib_sg_dma_address(ib_dev, &sg_start[0]);
- ib_sge->length = ib_sg_dma_len(ib_dev, &sg_start[0]);
- ib_sge->lkey = isert_conn->conn_mr->lkey;
- wr->fr_desc = NULL;
- } else {
- spin_lock_irqsave(&isert_conn->conn_lock, flags);
- fr_desc = list_first_entry(&isert_conn->conn_frwr_pool,
- struct fast_reg_descriptor, list);
- list_del(&fr_desc->list);
- spin_unlock_irqrestore(&isert_conn->conn_lock, flags);
- wr->fr_desc = fr_desc;
+ spin_lock_irqsave(&isert_conn->conn_lock, flags);
+ fr_desc = list_first_entry(&isert_conn->conn_frwr_pool,
+ struct fast_reg_descriptor, list);
+ list_del(&fr_desc->list);
+ spin_unlock_irqrestore(&isert_conn->conn_lock, flags);
+ wr->fr_desc = fr_desc;
- ret = isert_fast_reg_mr(fr_desc, isert_cmd, isert_conn,
- ib_sge, offset, data_len);
- if (ret) {
- list_add_tail(&fr_desc->list, &isert_conn->conn_frwr_pool);
- goto unmap_sg;
- }
+ ret = isert_fast_reg_mr(fr_desc, isert_cmd, isert_conn,
+ ib_sge, offset, data_len);
+ if (ret) {
+ list_add_tail(&fr_desc->list, &isert_conn->conn_frwr_pool);
+ goto unmap_sg;
}
return 0;
@@ -2348,11 +2306,10 @@ isert_put_datain(struct iscsi_conn *conn, struct iscsi_cmd *cmd)
* Build isert_conn->tx_desc for iSCSI response PDU and attach
*/
isert_create_send_desc(isert_conn, isert_cmd, &isert_cmd->tx_desc);
- iscsit_build_rsp_pdu(cmd, conn, true, (struct iscsi_scsi_rsp *)
+ iscsit_build_rsp_pdu(cmd, conn, false, (struct iscsi_scsi_rsp *)
&isert_cmd->tx_desc.iscsi_header);
isert_init_tx_hdrs(isert_conn, &isert_cmd->tx_desc);
- isert_init_send_wr(isert_conn, isert_cmd,
- &isert_cmd->tx_desc.send_wr, true);
+ isert_init_send_wr(isert_cmd, &isert_cmd->tx_desc.send_wr);
atomic_inc(&isert_conn->post_send_buf_count);
diff --git a/drivers/infiniband/ulp/isert/ib_isert.h b/drivers/infiniband/ulp/isert/ib_isert.h
index 691f90f..631f209 100644
--- a/drivers/infiniband/ulp/isert/ib_isert.h
+++ b/drivers/infiniband/ulp/isert/ib_isert.h
@@ -43,8 +43,6 @@ struct iser_tx_desc {
struct ib_sge tx_sg[2];
int num_sge;
struct isert_cmd *isert_cmd;
- struct llist_node *comp_llnode_batch;
- struct llist_node comp_llnode;
struct ib_send_wr send_wr;
} __packed;
@@ -123,10 +121,6 @@ struct isert_conn {
int conn_frwr_pool_size;
/* lock to protect frwr_pool */
spinlock_t conn_lock;
-#define ISERT_COMP_BATCH_COUNT 8
- int conn_comp_batch;
- struct llist_head conn_comp_llist;
- struct mutex conn_comp_mutex;
};
#define ISERT_MAX_CQ 64
diff --git a/drivers/infiniband/ulp/srp/ib_srp.c b/drivers/infiniband/ulp/srp/ib_srp.c
index a886319..f93baf8 100644
--- a/drivers/infiniband/ulp/srp/ib_srp.c
+++ b/drivers/infiniband/ulp/srp/ib_srp.c
@@ -46,7 +46,6 @@
#include <scsi/scsi.h>
#include <scsi/scsi_device.h>
#include <scsi/scsi_dbg.h>
-#include <scsi/scsi_tcq.h>
#include <scsi/srp.h>
#include <scsi/scsi_transport_srp.h>
@@ -87,32 +86,6 @@ module_param(topspin_workarounds, int, 0444);
MODULE_PARM_DESC(topspin_workarounds,
"Enable workarounds for Topspin/Cisco SRP target bugs if != 0");
-static struct kernel_param_ops srp_tmo_ops;
-
-static int srp_reconnect_delay = 10;
-module_param_cb(reconnect_delay, &srp_tmo_ops, &srp_reconnect_delay,
- S_IRUGO | S_IWUSR);
-MODULE_PARM_DESC(reconnect_delay, "Time between successive reconnect attempts");
-
-static int srp_fast_io_fail_tmo = 15;
-module_param_cb(fast_io_fail_tmo, &srp_tmo_ops, &srp_fast_io_fail_tmo,
- S_IRUGO | S_IWUSR);
-MODULE_PARM_DESC(fast_io_fail_tmo,
- "Number of seconds between the observation of a transport"
- " layer error and failing all I/O. \"off\" means that this"
- " functionality is disabled.");
-
-static int srp_dev_loss_tmo = 600;
-module_param_cb(dev_loss_tmo, &srp_tmo_ops, &srp_dev_loss_tmo,
- S_IRUGO | S_IWUSR);
-MODULE_PARM_DESC(dev_loss_tmo,
- "Maximum number of seconds that the SRP transport should"
- " insulate transport layer errors. After this time has been"
- " exceeded the SCSI host is removed. Should be"
- " between 1 and " __stringify(SCSI_DEVICE_BLOCK_MAX_TIMEOUT)
- " if fast_io_fail_tmo has not been set. \"off\" means that"
- " this functionality is disabled.");
-
static void srp_add_one(struct ib_device *device);
static void srp_remove_one(struct ib_device *device);
static void srp_recv_completion(struct ib_cq *cq, void *target_ptr);
@@ -129,48 +102,6 @@ static struct ib_client srp_client = {
static struct ib_sa_client srp_sa_client;
-static int srp_tmo_get(char *buffer, const struct kernel_param *kp)
-{
- int tmo = *(int *)kp->arg;
-
- if (tmo >= 0)
- return sprintf(buffer, "%d", tmo);
- else
- return sprintf(buffer, "off");
-}
-
-static int srp_tmo_set(const char *val, const struct kernel_param *kp)
-{
- int tmo, res;
-
- if (strncmp(val, "off", 3) != 0) {
- res = kstrtoint(val, 0, &tmo);
- if (res)
- goto out;
- } else {
- tmo = -1;
- }
- if (kp->arg == &srp_reconnect_delay)
- res = srp_tmo_valid(tmo, srp_fast_io_fail_tmo,
- srp_dev_loss_tmo);
- else if (kp->arg == &srp_fast_io_fail_tmo)
- res = srp_tmo_valid(srp_reconnect_delay, tmo, srp_dev_loss_tmo);
- else
- res = srp_tmo_valid(srp_reconnect_delay, srp_fast_io_fail_tmo,
- tmo);
- if (res)
- goto out;
- *(int *)kp->arg = tmo;
-
-out:
- return res;
-}
-
-static struct kernel_param_ops srp_tmo_ops = {
- .get = srp_tmo_get,
- .set = srp_tmo_set,
-};
-
static inline struct srp_target_port *host_to_target(struct Scsi_Host *host)
{
return (struct srp_target_port *) host->hostdata;
@@ -300,16 +231,16 @@ static int srp_create_target_ib(struct srp_target_port *target)
return -ENOMEM;
recv_cq = ib_create_cq(target->srp_host->srp_dev->dev,
- srp_recv_completion, NULL, target,
- target->queue_size, target->comp_vector);
+ srp_recv_completion, NULL, target, SRP_RQ_SIZE,
+ target->comp_vector);
if (IS_ERR(recv_cq)) {
ret = PTR_ERR(recv_cq);
goto err;
}
send_cq = ib_create_cq(target->srp_host->srp_dev->dev,
- srp_send_completion, NULL, target,
- target->queue_size, target->comp_vector);
+ srp_send_completion, NULL, target, SRP_SQ_SIZE,
+ target->comp_vector);
if (IS_ERR(send_cq)) {
ret = PTR_ERR(send_cq);
goto err_recv_cq;
@@ -318,8 +249,8 @@ static int srp_create_target_ib(struct srp_target_port *target)
ib_req_notify_cq(recv_cq, IB_CQ_NEXT_COMP);
init_attr->event_handler = srp_qp_event;
- init_attr->cap.max_send_wr = target->queue_size;
- init_attr->cap.max_recv_wr = target->queue_size;
+ init_attr->cap.max_send_wr = SRP_SQ_SIZE;
+ init_attr->cap.max_recv_wr = SRP_RQ_SIZE;
init_attr->cap.max_recv_sge = 1;
init_attr->cap.max_send_sge = 1;
init_attr->sq_sig_type = IB_SIGNAL_ALL_WR;
@@ -365,10 +296,6 @@ err:
return ret;
}
-/*
- * Note: this function may be called without srp_alloc_iu_bufs() having been
- * invoked. Hence the target->[rt]x_ring checks.
- */
static void srp_free_target_ib(struct srp_target_port *target)
{
int i;
@@ -380,18 +307,10 @@ static void srp_free_target_ib(struct srp_target_port *target)
target->qp = NULL;
target->send_cq = target->recv_cq = NULL;
- if (target->rx_ring) {
- for (i = 0; i < target->queue_size; ++i)
- srp_free_iu(target->srp_host, target->rx_ring[i]);
- kfree(target->rx_ring);
- target->rx_ring = NULL;
- }
- if (target->tx_ring) {
- for (i = 0; i < target->queue_size; ++i)
- srp_free_iu(target->srp_host, target->tx_ring[i]);
- kfree(target->tx_ring);
- target->tx_ring = NULL;
- }
+ for (i = 0; i < SRP_RQ_SIZE; ++i)
+ srp_free_iu(target->srp_host, target->rx_ring[i]);
+ for (i = 0; i < SRP_SQ_SIZE; ++i)
+ srp_free_iu(target->srp_host, target->tx_ring[i]);
}
static void srp_path_rec_completion(int status,
@@ -471,7 +390,7 @@ static int srp_send_req(struct srp_target_port *target)
req->param.responder_resources = 4;
req->param.remote_cm_response_timeout = 20;
req->param.local_cm_response_timeout = 20;
- req->param.retry_count = target->tl_retry_count;
+ req->param.retry_count = 7;
req->param.rnr_retry_count = 7;
req->param.max_cm_retries = 15;
@@ -577,11 +496,7 @@ static void srp_free_req_data(struct srp_target_port *target)
struct srp_request *req;
int i;
- if (!target->req_ring)
- return;
-
- for (i = 0; i < target->req_ring_size; ++i) {
- req = &target->req_ring[i];
+ for (i = 0, req = target->req_ring; i < SRP_CMD_SQ_SIZE; ++i, ++req) {
kfree(req->fmr_list);
kfree(req->map_page);
if (req->indirect_dma_addr) {
@@ -591,50 +506,6 @@ static void srp_free_req_data(struct srp_target_port *target)
}
kfree(req->indirect_desc);
}
-
- kfree(target->req_ring);
- target->req_ring = NULL;
-}
-
-static int srp_alloc_req_data(struct srp_target_port *target)
-{
- struct srp_device *srp_dev = target->srp_host->srp_dev;
- struct ib_device *ibdev = srp_dev->dev;
- struct srp_request *req;
- dma_addr_t dma_addr;
- int i, ret = -ENOMEM;
-
- INIT_LIST_HEAD(&target->free_reqs);
-
- target->req_ring = kzalloc(target->req_ring_size *
- sizeof(*target->req_ring), GFP_KERNEL);
- if (!target->req_ring)
- goto out;
-
- for (i = 0; i < target->req_ring_size; ++i) {
- req = &target->req_ring[i];
- req->fmr_list = kmalloc(target->cmd_sg_cnt * sizeof(void *),
- GFP_KERNEL);
- req->map_page = kmalloc(SRP_FMR_SIZE * sizeof(void *),
- GFP_KERNEL);
- req->indirect_desc = kmalloc(target->indirect_size, GFP_KERNEL);
- if (!req->fmr_list || !req->map_page || !req->indirect_desc)
- goto out;
-
- dma_addr = ib_dma_map_single(ibdev, req->indirect_desc,
- target->indirect_size,
- DMA_TO_DEVICE);
- if (ib_dma_mapping_error(ibdev, dma_addr))
- goto out;
-
- req->indirect_dma_addr = dma_addr;
- req->index = i;
- list_add_tail(&req->list, &target->free_reqs);
- }
- ret = 0;
-
-out:
- return ret;
}
/**
@@ -657,20 +528,12 @@ static void srp_remove_target(struct srp_target_port *target)
WARN_ON_ONCE(target->state != SRP_TARGET_REMOVED);
srp_del_scsi_host_attr(target->scsi_host);
- srp_rport_get(target->rport);
srp_remove_host(target->scsi_host);
scsi_remove_host(target->scsi_host);
srp_disconnect_target(target);
ib_destroy_cm_id(target->cm_id);
srp_free_target_ib(target);
- cancel_work_sync(&target->tl_err_work);
- srp_rport_put(target->rport);
srp_free_req_data(target);
-
- spin_lock(&target->srp_host->target_lock);
- list_del(&target->list);
- spin_unlock(&target->srp_host->target_lock);
-
scsi_host_put(target->scsi_host);
}
@@ -682,6 +545,10 @@ static void srp_remove_work(struct work_struct *work)
WARN_ON_ONCE(target->state != SRP_TARGET_REMOVED);
srp_remove_target(target);
+
+ spin_lock(&target->srp_host->target_lock);
+ list_del(&target->list);
+ spin_unlock(&target->srp_host->target_lock);
}
static void srp_rport_delete(struct srp_rport *rport)
@@ -819,43 +686,24 @@ static void srp_free_req(struct srp_target_port *target,
spin_unlock_irqrestore(&target->lock, flags);
}
-static void srp_finish_req(struct srp_target_port *target,
- struct srp_request *req, int result)
+static void srp_reset_req(struct srp_target_port *target, struct srp_request *req)
{
struct scsi_cmnd *scmnd = srp_claim_req(target, req, NULL);
if (scmnd) {
srp_free_req(target, req, scmnd, 0);
- scmnd->result = result;
+ scmnd->result = DID_RESET << 16;
scmnd->scsi_done(scmnd);
}
}
-static void srp_terminate_io(struct srp_rport *rport)
+static int srp_reconnect_target(struct srp_target_port *target)
{
- struct srp_target_port *target = rport->lld_data;
- int i;
-
- for (i = 0; i < target->req_ring_size; ++i) {
- struct srp_request *req = &target->req_ring[i];
- srp_finish_req(target, req, DID_TRANSPORT_FAILFAST << 16);
- }
-}
-
-/*
- * It is up to the caller to ensure that srp_rport_reconnect() calls are
- * serialized and that no concurrent srp_queuecommand(), srp_abort(),
- * srp_reset_device() or srp_reset_host() calls will occur while this function
- * is in progress. One way to realize that is not to call this function
- * directly but to call srp_reconnect_rport() instead since that last function
- * serializes calls of this function via rport->mutex and also blocks
- * srp_queuecommand() calls before invoking this function.
- */
-static int srp_rport_reconnect(struct srp_rport *rport)
-{
- struct srp_target_port *target = rport->lld_data;
+ struct Scsi_Host *shost = target->scsi_host;
int i, ret;
+ scsi_target_block(&shost->shost_gendev);
+
srp_disconnect_target(target);
/*
* Now get a new local CM ID so that we avoid confusing the target in
@@ -873,21 +721,41 @@ static int srp_rport_reconnect(struct srp_rport *rport)
else
srp_create_target_ib(target);
- for (i = 0; i < target->req_ring_size; ++i) {
+ for (i = 0; i < SRP_CMD_SQ_SIZE; ++i) {
struct srp_request *req = &target->req_ring[i];
- srp_finish_req(target, req, DID_RESET << 16);
+ if (req->scmnd)
+ srp_reset_req(target, req);
}
INIT_LIST_HEAD(&target->free_tx);
- for (i = 0; i < target->queue_size; ++i)
+ for (i = 0; i < SRP_SQ_SIZE; ++i)
list_add(&target->tx_ring[i]->list, &target->free_tx);
if (ret == 0)
ret = srp_connect_target(target);
- if (ret == 0)
- shost_printk(KERN_INFO, target->scsi_host,
- PFX "reconnect succeeded\n");
+ scsi_target_unblock(&shost->shost_gendev, ret == 0 ? SDEV_RUNNING :
+ SDEV_TRANSPORT_OFFLINE);
+ target->transport_offline = !!ret;
+
+ if (ret)
+ goto err;
+
+ shost_printk(KERN_INFO, target->scsi_host, PFX "reconnect succeeded\n");
+
+ return ret;
+
+err:
+ shost_printk(KERN_ERR, target->scsi_host,
+ PFX "reconnect failed (%d), removing target port.\n", ret);
+
+ /*
+ * We couldn't reconnect, so kill our target port off.
+ * However, we have to defer the real removal because we
+ * are in the context of the SCSI error handler now, which
+ * will deadlock if we call scsi_remove_host().
+ */
+ srp_queue_remove_work(target);
return ret;
}
@@ -1434,30 +1302,15 @@ static void srp_handle_recv(struct srp_target_port *target, struct ib_wc *wc)
PFX "Recv failed with error code %d\n", res);
}
-/**
- * srp_tl_err_work() - handle a transport layer error
- *
- * Note: This function may get invoked before the rport has been created,
- * hence the target->rport test.
- */
-static void srp_tl_err_work(struct work_struct *work)
-{
- struct srp_target_port *target;
-
- target = container_of(work, struct srp_target_port, tl_err_work);
- if (target->rport)
- srp_start_tl_fail_timers(target->rport);
-}
-
-static void srp_handle_qp_err(enum ib_wc_status wc_status, bool send_err,
+static void srp_handle_qp_err(enum ib_wc_status wc_status,
+ enum ib_wc_opcode wc_opcode,
struct srp_target_port *target)
{
if (target->connected && !target->qp_in_error) {
shost_printk(KERN_ERR, target->scsi_host,
PFX "failed %s status %d\n",
- send_err ? "send" : "receive",
+ wc_opcode & IB_WC_RECV ? "receive" : "send",
wc_status);
- queue_work(system_long_wq, &target->tl_err_work);
}
target->qp_in_error = true;
}
@@ -1472,7 +1325,7 @@ static void srp_recv_completion(struct ib_cq *cq, void *target_ptr)
if (likely(wc.status == IB_WC_SUCCESS)) {
srp_handle_recv(target, &wc);
} else {
- srp_handle_qp_err(wc.status, false, target);
+ srp_handle_qp_err(wc.status, wc.opcode, target);
}
}
}
@@ -1488,7 +1341,7 @@ static void srp_send_completion(struct ib_cq *cq, void *target_ptr)
iu = (struct srp_iu *) (uintptr_t) wc.wr_id;
list_add(&iu->list, &target->free_tx);
} else {
- srp_handle_qp_err(wc.status, true, target);
+ srp_handle_qp_err(wc.status, wc.opcode, target);
}
}
}
@@ -1496,29 +1349,17 @@ static void srp_send_completion(struct ib_cq *cq, void *target_ptr)
static int srp_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *scmnd)
{
struct srp_target_port *target = host_to_target(shost);
- struct srp_rport *rport = target->rport;
struct srp_request *req;
struct srp_iu *iu;
struct srp_cmd *cmd;
struct ib_device *dev;
unsigned long flags;
- int len, result;
- const bool in_scsi_eh = !in_interrupt() && current == shost->ehandler;
-
- /*
- * The SCSI EH thread is the only context from which srp_queuecommand()
- * can get invoked for blocked devices (SDEV_BLOCK /
- * SDEV_CREATED_BLOCK). Avoid racing with srp_reconnect_rport() by
- * locking the rport mutex if invoked from inside the SCSI EH.
- */
- if (in_scsi_eh)
- mutex_lock(&rport->mutex);
+ int len;
- result = srp_chkready(target->rport);
- if (unlikely(result)) {
- scmnd->result = result;
+ if (unlikely(target->transport_offline)) {
+ scmnd->result = DID_NO_CONNECT << 16;
scmnd->scsi_done(scmnd);
- goto unlock_rport;
+ return 0;
}
spin_lock_irqsave(&target->lock, flags);
@@ -1563,10 +1404,6 @@ static int srp_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *scmnd)
goto err_unmap;
}
-unlock_rport:
- if (in_scsi_eh)
- mutex_unlock(&rport->mutex);
-
return 0;
err_unmap:
@@ -1581,30 +1418,14 @@ err_iu:
err_unlock:
spin_unlock_irqrestore(&target->lock, flags);
- if (in_scsi_eh)
- mutex_unlock(&rport->mutex);
-
return SCSI_MLQUEUE_HOST_BUSY;
}
-/*
- * Note: the resources allocated in this function are freed in
- * srp_free_target_ib().
- */
static int srp_alloc_iu_bufs(struct srp_target_port *target)
{
int i;
- target->rx_ring = kzalloc(target->queue_size * sizeof(*target->rx_ring),
- GFP_KERNEL);
- if (!target->rx_ring)
- goto err_no_ring;
- target->tx_ring = kzalloc(target->queue_size * sizeof(*target->tx_ring),
- GFP_KERNEL);
- if (!target->tx_ring)
- goto err_no_ring;
-
- for (i = 0; i < target->queue_size; ++i) {
+ for (i = 0; i < SRP_RQ_SIZE; ++i) {
target->rx_ring[i] = srp_alloc_iu(target->srp_host,
target->max_ti_iu_len,
GFP_KERNEL, DMA_FROM_DEVICE);
@@ -1612,7 +1433,7 @@ static int srp_alloc_iu_bufs(struct srp_target_port *target)
goto err;
}
- for (i = 0; i < target->queue_size; ++i) {
+ for (i = 0; i < SRP_SQ_SIZE; ++i) {
target->tx_ring[i] = srp_alloc_iu(target->srp_host,
target->max_iu_len,
GFP_KERNEL, DMA_TO_DEVICE);
@@ -1625,17 +1446,15 @@ static int srp_alloc_iu_bufs(struct srp_target_port *target)
return 0;
err:
- for (i = 0; i < target->queue_size; ++i) {
+ for (i = 0; i < SRP_RQ_SIZE; ++i) {
srp_free_iu(target->srp_host, target->rx_ring[i]);
- srp_free_iu(target->srp_host, target->tx_ring[i]);
+ target->rx_ring[i] = NULL;
}
-
-err_no_ring:
- kfree(target->tx_ring);
- target->tx_ring = NULL;
- kfree(target->rx_ring);
- target->rx_ring = NULL;
+ for (i = 0; i < SRP_SQ_SIZE; ++i) {
+ srp_free_iu(target->srp_host, target->tx_ring[i]);
+ target->tx_ring[i] = NULL;
+ }
return -ENOMEM;
}
@@ -1687,9 +1506,6 @@ static void srp_cm_rep_handler(struct ib_cm_id *cm_id,
target->scsi_host->can_queue
= min(target->req_lim - SRP_TSK_MGMT_SQ_SIZE,
target->scsi_host->can_queue);
- target->scsi_host->cmd_per_lun
- = min_t(int, target->scsi_host->can_queue,
- target->scsi_host->cmd_per_lun);
} else {
shost_printk(KERN_WARNING, target->scsi_host,
PFX "Unhandled RSP opcode %#x\n", lrsp->opcode);
@@ -1697,7 +1513,7 @@ static void srp_cm_rep_handler(struct ib_cm_id *cm_id,
goto error;
}
- if (!target->rx_ring) {
+ if (!target->rx_ring[0]) {
ret = srp_alloc_iu_bufs(target);
if (ret)
goto error;
@@ -1717,7 +1533,7 @@ static void srp_cm_rep_handler(struct ib_cm_id *cm_id,
if (ret)
goto error_free;
- for (i = 0; i < target->queue_size; i++) {
+ for (i = 0; i < SRP_RQ_SIZE; i++) {
struct srp_iu *iu = target->rx_ring[i];
ret = srp_post_recv(target, iu);
if (ret)
@@ -1856,7 +1672,6 @@ static int srp_cm_handler(struct ib_cm_id *cm_id, struct ib_cm_event *event)
if (ib_send_cm_drep(cm_id, NULL, 0))
shost_printk(KERN_ERR, target->scsi_host,
PFX "Sending CM DREP failed\n");
- queue_work(system_long_wq, &target->tl_err_work);
break;
case IB_CM_TIMEWAIT_EXIT:
@@ -1883,61 +1698,9 @@ static int srp_cm_handler(struct ib_cm_id *cm_id, struct ib_cm_event *event)
return 0;
}
-/**
- * srp_change_queue_type - changing device queue tag type
- * @sdev: scsi device struct
- * @tag_type: requested tag type
- *
- * Returns queue tag type.
- */
-static int
-srp_change_queue_type(struct scsi_device *sdev, int tag_type)
-{
- if (sdev->tagged_supported) {
- scsi_set_tag_type(sdev, tag_type);
- if (tag_type)
- scsi_activate_tcq(sdev, sdev->queue_depth);
- else
- scsi_deactivate_tcq(sdev, sdev->queue_depth);
- } else
- tag_type = 0;
-
- return tag_type;
-}
-
-/**
- * srp_change_queue_depth - setting device queue depth
- * @sdev: scsi device struct
- * @qdepth: requested queue depth
- * @reason: SCSI_QDEPTH_DEFAULT/SCSI_QDEPTH_QFULL/SCSI_QDEPTH_RAMP_UP
- * (see include/scsi/scsi_host.h for definition)
- *
- * Returns queue depth.
- */
-static int
-srp_change_queue_depth(struct scsi_device *sdev, int qdepth, int reason)
-{
- struct Scsi_Host *shost = sdev->host;
- int max_depth;
- if (reason == SCSI_QDEPTH_DEFAULT || reason == SCSI_QDEPTH_RAMP_UP) {
- max_depth = shost->can_queue;
- if (!sdev->tagged_supported)
- max_depth = 1;
- if (qdepth > max_depth)
- qdepth = max_depth;
- scsi_adjust_queue_depth(sdev, scsi_get_tag_type(sdev), qdepth);
- } else if (reason == SCSI_QDEPTH_QFULL)
- scsi_track_queue_full(sdev, qdepth);
- else
- return -EOPNOTSUPP;
-
- return sdev->queue_depth;
-}
-
static int srp_send_tsk_mgmt(struct srp_target_port *target,
u64 req_tag, unsigned int lun, u8 func)
{
- struct srp_rport *rport = target->rport;
struct ib_device *dev = target->srp_host->srp_dev->dev;
struct srp_iu *iu;
struct srp_tsk_mgmt *tsk_mgmt;
@@ -1947,20 +1710,12 @@ static int srp_send_tsk_mgmt(struct srp_target_port *target,
init_completion(&target->tsk_mgmt_done);
- /*
- * Lock the rport mutex to avoid that srp_create_target_ib() is
- * invoked while a task management function is being sent.
- */
- mutex_lock(&rport->mutex);
spin_lock_irq(&target->lock);
iu = __srp_get_tx_iu(target, SRP_IU_TSK_MGMT);
spin_unlock_irq(&target->lock);
- if (!iu) {
- mutex_unlock(&rport->mutex);
-
+ if (!iu)
return -1;
- }
ib_dma_sync_single_for_cpu(dev, iu->dma, sizeof *tsk_mgmt,
DMA_TO_DEVICE);
@@ -1977,11 +1732,8 @@ static int srp_send_tsk_mgmt(struct srp_target_port *target,
DMA_TO_DEVICE);
if (srp_post_send(target, iu, sizeof *tsk_mgmt)) {
srp_put_tx_iu(target, iu, SRP_IU_TSK_MGMT);
- mutex_unlock(&rport->mutex);
-
return -1;
}
- mutex_unlock(&rport->mutex);
if (!wait_for_completion_timeout(&target->tsk_mgmt_done,
msecs_to_jiffies(SRP_ABORT_TIMEOUT_MS)))
@@ -1999,11 +1751,11 @@ static int srp_abort(struct scsi_cmnd *scmnd)
shost_printk(KERN_ERR, target->scsi_host, "SRP abort called\n");
if (!req || !srp_claim_req(target, req, scmnd))
- return SUCCESS;
+ return FAILED;
if (srp_send_tsk_mgmt(target, req->index, scmnd->device->lun,
SRP_TSK_ABORT_TASK) == 0)
ret = SUCCESS;
- else if (target->rport->state == SRP_RPORT_LOST)
+ else if (target->transport_offline)
ret = FAST_IO_FAIL;
else
ret = FAILED;
@@ -2027,10 +1779,10 @@ static int srp_reset_device(struct scsi_cmnd *scmnd)
if (target->tsk_mgmt_status)
return FAILED;
- for (i = 0; i < target->req_ring_size; ++i) {
+ for (i = 0; i < SRP_CMD_SQ_SIZE; ++i) {
struct srp_request *req = &target->req_ring[i];
if (req->scmnd && req->scmnd->device == scmnd->device)
- srp_finish_req(target, req, DID_RESET << 16);
+ srp_reset_req(target, req);
}
return SUCCESS;
@@ -2039,10 +1791,14 @@ static int srp_reset_device(struct scsi_cmnd *scmnd)
static int srp_reset_host(struct scsi_cmnd *scmnd)
{
struct srp_target_port *target = host_to_target(scmnd->device->host);
+ int ret = FAILED;
shost_printk(KERN_ERR, target->scsi_host, PFX "SRP reset_host called\n");
- return srp_reconnect_rport(target->rport) == 0 ? SUCCESS : FAILED;
+ if (!srp_reconnect_target(target))
+ ret = SUCCESS;
+
+ return ret;
}
static int srp_slave_configure(struct scsi_device *sdev)
@@ -2095,14 +1851,6 @@ static ssize_t show_pkey(struct device *dev, struct device_attribute *attr,
return sprintf(buf, "0x%04x\n", be16_to_cpu(target->path.pkey));
}
-static ssize_t show_sgid(struct device *dev, struct device_attribute *attr,
- char *buf)
-{
- struct srp_target_port *target = host_to_target(class_to_shost(dev));
-
- return sprintf(buf, "%pI6\n", target->path.sgid.raw);
-}
-
static ssize_t show_dgid(struct device *dev, struct device_attribute *attr,
char *buf)
{
@@ -2159,14 +1907,6 @@ static ssize_t show_comp_vector(struct device *dev,
return sprintf(buf, "%d\n", target->comp_vector);
}
-static ssize_t show_tl_retry_count(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- struct srp_target_port *target = host_to_target(class_to_shost(dev));
-
- return sprintf(buf, "%d\n", target->tl_retry_count);
-}
-
static ssize_t show_cmd_sg_entries(struct device *dev,
struct device_attribute *attr, char *buf)
{
@@ -2187,7 +1927,6 @@ static DEVICE_ATTR(id_ext, S_IRUGO, show_id_ext, NULL);
static DEVICE_ATTR(ioc_guid, S_IRUGO, show_ioc_guid, NULL);
static DEVICE_ATTR(service_id, S_IRUGO, show_service_id, NULL);
static DEVICE_ATTR(pkey, S_IRUGO, show_pkey, NULL);
-static DEVICE_ATTR(sgid, S_IRUGO, show_sgid, NULL);
static DEVICE_ATTR(dgid, S_IRUGO, show_dgid, NULL);
static DEVICE_ATTR(orig_dgid, S_IRUGO, show_orig_dgid, NULL);
static DEVICE_ATTR(req_lim, S_IRUGO, show_req_lim, NULL);
@@ -2195,7 +1934,6 @@ static DEVICE_ATTR(zero_req_lim, S_IRUGO, show_zero_req_lim, NULL);
static DEVICE_ATTR(local_ib_port, S_IRUGO, show_local_ib_port, NULL);
static DEVICE_ATTR(local_ib_device, S_IRUGO, show_local_ib_device, NULL);
static DEVICE_ATTR(comp_vector, S_IRUGO, show_comp_vector, NULL);
-static DEVICE_ATTR(tl_retry_count, S_IRUGO, show_tl_retry_count, NULL);
static DEVICE_ATTR(cmd_sg_entries, S_IRUGO, show_cmd_sg_entries, NULL);
static DEVICE_ATTR(allow_ext_sg, S_IRUGO, show_allow_ext_sg, NULL);
@@ -2204,7 +1942,6 @@ static struct device_attribute *srp_host_attrs[] = {
&dev_attr_ioc_guid,
&dev_attr_service_id,
&dev_attr_pkey,
- &dev_attr_sgid,
&dev_attr_dgid,
&dev_attr_orig_dgid,
&dev_attr_req_lim,
@@ -2212,7 +1949,6 @@ static struct device_attribute *srp_host_attrs[] = {
&dev_attr_local_ib_port,
&dev_attr_local_ib_device,
&dev_attr_comp_vector,
- &dev_attr_tl_retry_count,
&dev_attr_cmd_sg_entries,
&dev_attr_allow_ext_sg,
NULL
@@ -2225,16 +1961,14 @@ static struct scsi_host_template srp_template = {
.slave_configure = srp_slave_configure,
.info = srp_target_info,
.queuecommand = srp_queuecommand,
- .change_queue_depth = srp_change_queue_depth,
- .change_queue_type = srp_change_queue_type,
.eh_abort_handler = srp_abort,
.eh_device_reset_handler = srp_reset_device,
.eh_host_reset_handler = srp_reset_host,
.skip_settle_delay = true,
.sg_tablesize = SRP_DEF_SG_TABLESIZE,
- .can_queue = SRP_DEFAULT_CMD_SQ_SIZE,
+ .can_queue = SRP_CMD_SQ_SIZE,
.this_id = -1,
- .cmd_per_lun = SRP_DEFAULT_CMD_SQ_SIZE,
+ .cmd_per_lun = SRP_CMD_SQ_SIZE,
.use_clustering = ENABLE_CLUSTERING,
.shost_attrs = srp_host_attrs
};
@@ -2260,7 +1994,6 @@ static int srp_add_target(struct srp_host *host, struct srp_target_port *target)
}
rport->lld_data = target;
- target->rport = rport;
spin_lock(&host->target_lock);
list_add_tail(&target->list, &host->target_list);
@@ -2340,8 +2073,6 @@ enum {
SRP_OPT_ALLOW_EXT_SG = 1 << 10,
SRP_OPT_SG_TABLESIZE = 1 << 11,
SRP_OPT_COMP_VECTOR = 1 << 12,
- SRP_OPT_TL_RETRY_COUNT = 1 << 13,
- SRP_OPT_QUEUE_SIZE = 1 << 14,
SRP_OPT_ALL = (SRP_OPT_ID_EXT |
SRP_OPT_IOC_GUID |
SRP_OPT_DGID |
@@ -2363,8 +2094,6 @@ static const match_table_t srp_opt_tokens = {
{ SRP_OPT_ALLOW_EXT_SG, "allow_ext_sg=%u" },
{ SRP_OPT_SG_TABLESIZE, "sg_tablesize=%u" },
{ SRP_OPT_COMP_VECTOR, "comp_vector=%u" },
- { SRP_OPT_TL_RETRY_COUNT, "tl_retry_count=%u" },
- { SRP_OPT_QUEUE_SIZE, "queue_size=%d" },
{ SRP_OPT_ERR, NULL }
};
@@ -2459,25 +2188,13 @@ static int srp_parse_options(const char *buf, struct srp_target_port *target)
target->scsi_host->max_sectors = token;
break;
- case SRP_OPT_QUEUE_SIZE:
- if (match_int(args, &token) || token < 1) {
- pr_warn("bad queue_size parameter '%s'\n", p);
- goto out;
- }
- target->scsi_host->can_queue = token;
- target->queue_size = token + SRP_RSP_SQ_SIZE +
- SRP_TSK_MGMT_SQ_SIZE;
- if (!(opt_mask & SRP_OPT_MAX_CMD_PER_LUN))
- target->scsi_host->cmd_per_lun = token;
- break;
-
case SRP_OPT_MAX_CMD_PER_LUN:
- if (match_int(args, &token) || token < 1) {
+ if (match_int(args, &token)) {
pr_warn("bad max cmd_per_lun parameter '%s'\n",
p);
goto out;
}
- target->scsi_host->cmd_per_lun = token;
+ target->scsi_host->cmd_per_lun = min(token, SRP_CMD_SQ_SIZE);
break;
case SRP_OPT_IO_CLASS:
@@ -2540,15 +2257,6 @@ static int srp_parse_options(const char *buf, struct srp_target_port *target)
target->comp_vector = token;
break;
- case SRP_OPT_TL_RETRY_COUNT:
- if (match_int(args, &token) || token < 2 || token > 7) {
- pr_warn("bad tl_retry_count parameter '%s' (must be a number between 2 and 7)\n",
- p);
- goto out;
- }
- target->tl_retry_count = token;
- break;
-
default:
pr_warn("unknown parameter or missing value '%s' in target creation request\n",
p);
@@ -2565,12 +2273,6 @@ static int srp_parse_options(const char *buf, struct srp_target_port *target)
pr_warn("target creation request is missing parameter '%s'\n",
srp_opt_tokens[i].pattern);
- if (target->scsi_host->cmd_per_lun > target->scsi_host->can_queue
- && (opt_mask & SRP_OPT_MAX_CMD_PER_LUN))
- pr_warn("cmd_per_lun = %d > queue_size = %d\n",
- target->scsi_host->cmd_per_lun,
- target->scsi_host->can_queue);
-
out:
kfree(options);
return ret;
@@ -2585,7 +2287,8 @@ static ssize_t srp_create_target(struct device *dev,
struct Scsi_Host *target_host;
struct srp_target_port *target;
struct ib_device *ibdev = host->srp_dev->dev;
- int ret;
+ dma_addr_t dma_addr;
+ int i, ret;
target_host = scsi_host_alloc(&srp_template,
sizeof (struct srp_target_port));
@@ -2608,15 +2311,11 @@ static ssize_t srp_create_target(struct device *dev,
target->cmd_sg_cnt = cmd_sg_entries;
target->sg_tablesize = indirect_sg_entries ? : cmd_sg_entries;
target->allow_ext_sg = allow_ext_sg;
- target->tl_retry_count = 7;
- target->queue_size = SRP_DEFAULT_QUEUE_SIZE;
ret = srp_parse_options(buf, target);
if (ret)
goto err;
- target->req_ring_size = target->queue_size - SRP_TSK_MGMT_SQ_SIZE;
-
if (!srp_conn_unique(target->srp_host, target)) {
shost_printk(KERN_INFO, target->scsi_host,
PFX "Already connected to target port with id_ext=%016llx;ioc_guid=%016llx;initiator_ext=%016llx\n",
@@ -2640,13 +2339,31 @@ static ssize_t srp_create_target(struct device *dev,
sizeof (struct srp_indirect_buf) +
target->cmd_sg_cnt * sizeof (struct srp_direct_buf);
- INIT_WORK(&target->tl_err_work, srp_tl_err_work);
INIT_WORK(&target->remove_work, srp_remove_work);
spin_lock_init(&target->lock);
INIT_LIST_HEAD(&target->free_tx);
- ret = srp_alloc_req_data(target);
- if (ret)
- goto err_free_mem;
+ INIT_LIST_HEAD(&target->free_reqs);
+ for (i = 0; i < SRP_CMD_SQ_SIZE; ++i) {
+ struct srp_request *req = &target->req_ring[i];
+
+ req->fmr_list = kmalloc(target->cmd_sg_cnt * sizeof (void *),
+ GFP_KERNEL);
+ req->map_page = kmalloc(SRP_FMR_SIZE * sizeof (void *),
+ GFP_KERNEL);
+ req->indirect_desc = kmalloc(target->indirect_size, GFP_KERNEL);
+ if (!req->fmr_list || !req->map_page || !req->indirect_desc)
+ goto err_free_mem;
+
+ dma_addr = ib_dma_map_single(ibdev, req->indirect_desc,
+ target->indirect_size,
+ DMA_TO_DEVICE);
+ if (ib_dma_mapping_error(ibdev, dma_addr))
+ goto err_free_mem;
+
+ req->indirect_dma_addr = dma_addr;
+ req->index = i;
+ list_add_tail(&req->list, &target->free_reqs);
+ }
ib_query_gid(ibdev, host->port, 0, &target->path.sgid);
@@ -2895,14 +2612,7 @@ static void srp_remove_one(struct ib_device *device)
}
static struct srp_function_template ib_srp_transport_functions = {
- .has_rport_state = true,
- .reset_timer_if_blocked = true,
- .reconnect_delay = &srp_reconnect_delay,
- .fast_io_fail_tmo = &srp_fast_io_fail_tmo,
- .dev_loss_tmo = &srp_dev_loss_tmo,
- .reconnect = srp_rport_reconnect,
.rport_delete = srp_rport_delete,
- .terminate_rport_io = srp_terminate_io,
};
static int __init srp_init_module(void)
diff --git a/drivers/infiniband/ulp/srp/ib_srp.h b/drivers/infiniband/ulp/srp/ib_srp.h
index 5756810..e641088 100644
--- a/drivers/infiniband/ulp/srp/ib_srp.h
+++ b/drivers/infiniband/ulp/srp/ib_srp.h
@@ -57,11 +57,14 @@ enum {
SRP_MAX_LUN = 512,
SRP_DEF_SG_TABLESIZE = 12,
- SRP_DEFAULT_QUEUE_SIZE = 1 << 6,
+ SRP_RQ_SHIFT = 6,
+ SRP_RQ_SIZE = 1 << SRP_RQ_SHIFT,
+
+ SRP_SQ_SIZE = SRP_RQ_SIZE,
SRP_RSP_SQ_SIZE = 1,
+ SRP_REQ_SQ_SIZE = SRP_SQ_SIZE - SRP_RSP_SQ_SIZE,
SRP_TSK_MGMT_SQ_SIZE = 1,
- SRP_DEFAULT_CMD_SQ_SIZE = SRP_DEFAULT_QUEUE_SIZE - SRP_RSP_SQ_SIZE -
- SRP_TSK_MGMT_SQ_SIZE,
+ SRP_CMD_SQ_SIZE = SRP_REQ_SQ_SIZE - SRP_TSK_MGMT_SQ_SIZE,
SRP_TAG_NO_REQ = ~0U,
SRP_TAG_TSK_MGMT = 1U << 31,
@@ -137,6 +140,7 @@ struct srp_target_port {
unsigned int cmd_sg_cnt;
unsigned int indirect_size;
bool allow_ext_sg;
+ bool transport_offline;
/* Everything above this point is used in the hot path of
* command processing. Try to keep them packed into cachelines.
@@ -149,14 +153,10 @@ struct srp_target_port {
u16 io_class;
struct srp_host *srp_host;
struct Scsi_Host *scsi_host;
- struct srp_rport *rport;
char target_name[32];
unsigned int scsi_id;
unsigned int sg_tablesize;
- int queue_size;
- int req_ring_size;
int comp_vector;
- int tl_retry_count;
struct ib_sa_path_rec path;
__be16 orig_dgid[8];
@@ -172,11 +172,10 @@ struct srp_target_port {
int zero_req_lim;
- struct srp_iu **tx_ring;
- struct srp_iu **rx_ring;
- struct srp_request *req_ring;
+ struct srp_iu *tx_ring[SRP_SQ_SIZE];
+ struct srp_iu *rx_ring[SRP_RQ_SIZE];
+ struct srp_request req_ring[SRP_CMD_SQ_SIZE];
- struct work_struct tl_err_work;
struct work_struct remove_work;
struct list_head list;
diff --git a/drivers/infiniband/ulp/srpt/ib_srpt.c b/drivers/infiniband/ulp/srpt/ib_srpt.c
index 520a7e5..6c923c7 100644
--- a/drivers/infiniband/ulp/srpt/ib_srpt.c
+++ b/drivers/infiniband/ulp/srpt/ib_srpt.c
@@ -1352,8 +1352,11 @@ static int srpt_abort_cmd(struct srpt_send_ioctx *ioctx)
/* XXX(hch): this is a horrible layering violation.. */
spin_lock_irqsave(&ioctx->cmd.t_state_lock, flags);
+ ioctx->cmd.transport_state |= CMD_T_LUN_STOP;
ioctx->cmd.transport_state &= ~CMD_T_ACTIVE;
spin_unlock_irqrestore(&ioctx->cmd.t_state_lock, flags);
+
+ complete(&ioctx->cmd.transport_lun_stop_comp);
break;
case SRPT_STATE_CMD_RSP_SENT:
/*
@@ -1361,6 +1364,9 @@ static int srpt_abort_cmd(struct srpt_send_ioctx *ioctx)
* not been received in time.
*/
srpt_unmap_sg_to_ib_sge(ioctx->ch, ioctx);
+ spin_lock_irqsave(&ioctx->cmd.t_state_lock, flags);
+ ioctx->cmd.transport_state |= CMD_T_LUN_STOP;
+ spin_unlock_irqrestore(&ioctx->cmd.t_state_lock, flags);
target_put_sess_cmd(ioctx->ch->sess, &ioctx->cmd);
break;
case SRPT_STATE_MGMT_RSP_SENT:
@@ -1470,6 +1476,7 @@ static void srpt_handle_rdma_err_comp(struct srpt_rdma_ch *ch,
{
struct se_cmd *cmd;
enum srpt_command_state state;
+ unsigned long flags;
cmd = &ioctx->cmd;
state = srpt_get_cmd_state(ioctx);
@@ -1489,6 +1496,9 @@ static void srpt_handle_rdma_err_comp(struct srpt_rdma_ch *ch,
__func__, __LINE__, state);
break;
case SRPT_RDMA_WRITE_LAST:
+ spin_lock_irqsave(&ioctx->cmd.t_state_lock, flags);
+ ioctx->cmd.transport_state |= CMD_T_LUN_STOP;
+ spin_unlock_irqrestore(&ioctx->cmd.t_state_lock, flags);
break;
default:
printk(KERN_ERR "%s[%d]: opcode = %u\n", __func__,