summaryrefslogtreecommitdiff
path: root/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd_cb.c
diff options
context:
space:
mode:
authorLiang Zhen <liang.zhen@intel.com>2016-03-02 23:53:29 (GMT)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>2016-03-03 00:01:38 (GMT)
commit4d99b2581effe115376402e710fbcb1c3c073769 (patch)
tree8be9c880b0d789e7fffd6f2d903fee9e6f2b3c26 /drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd_cb.c
parent82fffff4afe93f90af4d023e3ee943b5c3be039c (diff)
downloadlinux-4d99b2581effe115376402e710fbcb1c3c073769.tar.xz
staging: lustre: avoid intensive reconnecting for ko2iblnd
When there is a connection race between two nodes and one side of the connection is rejected by the other side. o2iblnd will reconnect immediately, this is going to generate a lot of trashes if: - race winner is slow and can't send out connecting request in short time. - remote side leaves a cmid in TIMEWAIT state, which will reject future connection requests To resolve this problem, this patch changed the reconnection behave: reconnection is submitted by connd only if a zombie connection is being destroyed and there is a pending reconnection request for the corresponding peer. Also, after a few rejections, reconnection will have a time interval between each attempt. Signed-off-by: Liang Zhen <liang.zhen@intel.com> Intel-bug-id: https://jira.hpdd.intel.com/browse/LU-7569 Reviewed-on: http://review.whamcloud.com/17892 Reviewed-by: Doug Oucharek <doug.s.oucharek@intel.com> Reviewed-by: James Simmons <uja.ornl@yahoo.com> Tested-by: James Simmons <uja.ornl@yahoo.com> Reviewed-by: Oleg Drokin <oleg.drokin@intel.com> Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Diffstat (limited to 'drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd_cb.c')
-rw-r--r--drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd_cb.c280
1 files changed, 200 insertions, 80 deletions
diff --git a/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd_cb.c b/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd_cb.c
index 199c105..79051c0 100644
--- a/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd_cb.c
+++ b/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd_cb.c
@@ -1257,6 +1257,7 @@ kiblnd_connect_peer(kib_peer_t *peer)
LASSERT(net);
LASSERT(peer->ibp_connecting > 0);
+ LASSERT(!peer->ibp_reconnecting);
cmid = kiblnd_rdma_create_id(kiblnd_cm_callback, peer, RDMA_PS_TCP,
IB_QPT_RC);
@@ -1312,6 +1313,56 @@ kiblnd_connect_peer(kib_peer_t *peer)
kiblnd_peer_connect_failed(peer, 1, rc);
}
+bool
+kiblnd_reconnect_peer(kib_peer_t *peer)
+{
+ rwlock_t *glock = &kiblnd_data.kib_global_lock;
+ char *reason = NULL;
+ struct list_head txs;
+ unsigned long flags;
+
+ INIT_LIST_HEAD(&txs);
+
+ write_lock_irqsave(glock, flags);
+ if (!peer->ibp_reconnecting) {
+ if (peer->ibp_accepting)
+ reason = "accepting";
+ else if (peer->ibp_connecting)
+ reason = "connecting";
+ else if (!list_empty(&peer->ibp_conns))
+ reason = "connected";
+ else /* connected then closed */
+ reason = "closed";
+
+ goto no_reconnect;
+ }
+
+ LASSERT(!peer->ibp_accepting && !peer->ibp_connecting &&
+ list_empty(&peer->ibp_conns));
+ peer->ibp_reconnecting = 0;
+
+ if (!kiblnd_peer_active(peer)) {
+ list_splice_init(&peer->ibp_tx_queue, &txs);
+ reason = "unlinked";
+ goto no_reconnect;
+ }
+
+ peer->ibp_connecting++;
+ peer->ibp_reconnected++;
+ write_unlock_irqrestore(glock, flags);
+
+ kiblnd_connect_peer(peer);
+ return true;
+
+no_reconnect:
+ write_unlock_irqrestore(glock, flags);
+
+ CWARN("Abort reconnection of %s: %s\n",
+ libcfs_nid2str(peer->ibp_nid), reason);
+ kiblnd_txlist_done(peer->ibp_ni, &txs, -ECONNABORTED);
+ return false;
+}
+
void
kiblnd_launch_tx(lnet_ni_t *ni, kib_tx_t *tx, lnet_nid_t nid)
{
@@ -1357,8 +1408,7 @@ kiblnd_launch_tx(lnet_ni_t *ni, kib_tx_t *tx, lnet_nid_t nid)
if (peer) {
if (list_empty(&peer->ibp_conns)) {
/* found a peer, but it's still connecting... */
- LASSERT(peer->ibp_connecting ||
- peer->ibp_accepting);
+ LASSERT(kiblnd_peer_connecting(peer));
if (tx)
list_add_tail(&tx->tx_list,
&peer->ibp_tx_queue);
@@ -1396,8 +1446,7 @@ kiblnd_launch_tx(lnet_ni_t *ni, kib_tx_t *tx, lnet_nid_t nid)
if (peer2) {
if (list_empty(&peer2->ibp_conns)) {
/* found a peer, but it's still connecting... */
- LASSERT(peer2->ibp_connecting ||
- peer2->ibp_accepting);
+ LASSERT(kiblnd_peer_connecting(peer2));
if (tx)
list_add_tail(&tx->tx_list,
&peer2->ibp_tx_queue);
@@ -1817,10 +1866,7 @@ kiblnd_peer_notify(kib_peer_t *peer)
read_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
- if (list_empty(&peer->ibp_conns) &&
- !peer->ibp_accepting &&
- !peer->ibp_connecting &&
- peer->ibp_error) {
+ if (kiblnd_peer_idle(peer) && peer->ibp_error) {
error = peer->ibp_error;
peer->ibp_error = 0;
@@ -2020,14 +2066,14 @@ kiblnd_peer_connect_failed(kib_peer_t *peer, int active, int error)
peer->ibp_accepting--;
}
- if (peer->ibp_connecting ||
- peer->ibp_accepting) {
+ if (kiblnd_peer_connecting(peer)) {
/* another connection attempt under way... */
write_unlock_irqrestore(&kiblnd_data.kib_global_lock,
flags);
return;
}
+ peer->ibp_reconnected = 0;
if (list_empty(&peer->ibp_conns)) {
/* Take peer's blocked transmits to complete with error */
list_add(&zombies, &peer->ibp_tx_queue);
@@ -2100,6 +2146,7 @@ kiblnd_connreq_done(kib_conn_t *conn, int status)
*/
kiblnd_conn_addref(conn); /* +1 ref for ibc_list */
list_add(&conn->ibc_list, &peer->ibp_conns);
+ peer->ibp_reconnected = 0;
if (active)
peer->ibp_connecting--;
else
@@ -2355,10 +2402,16 @@ kiblnd_passive_connect(struct rdma_cm_id *cmid, void *priv, int priv_nob)
if (peer2->ibp_incarnation != reqmsg->ibm_srcstamp ||
peer2->ibp_version != version) {
kiblnd_close_peer_conns_locked(peer2, -ESTALE);
+
+ if (kiblnd_peer_active(peer2)) {
+ peer2->ibp_incarnation = reqmsg->ibm_srcstamp;
+ peer2->ibp_version = version;
+ }
write_unlock_irqrestore(g_lock, flags);
- CWARN("Conn stale %s [old ver: %x, new ver: %x]\n",
- libcfs_nid2str(nid), peer2->ibp_version, version);
+ CWARN("Conn stale %s version %x/%x incarnation %llu/%llu\n",
+ libcfs_nid2str(nid), peer2->ibp_version, version,
+ peer2->ibp_incarnation, reqmsg->ibm_srcstamp);
kiblnd_peer_decref(peer);
rej.ibr_why = IBLND_REJECT_CONN_STALE;
@@ -2377,6 +2430,11 @@ kiblnd_passive_connect(struct rdma_cm_id *cmid, void *priv, int priv_nob)
goto failed;
}
+ /**
+ * passive connection is allowed even this peer is waiting for
+ * reconnection.
+ */
+ peer2->ibp_reconnecting = 0;
peer2->ibp_accepting++;
kiblnd_peer_addref(peer2);
@@ -2478,75 +2536,79 @@ kiblnd_passive_connect(struct rdma_cm_id *cmid, void *priv, int priv_nob)
}
static void
-kiblnd_reconnect(kib_conn_t *conn, int version,
- __u64 incarnation, int why, kib_connparams_t *cp)
+kiblnd_check_reconnect(kib_conn_t *conn, int version,
+ __u64 incarnation, int why, kib_connparams_t *cp)
{
+ rwlock_t *glock = &kiblnd_data.kib_global_lock;
kib_peer_t *peer = conn->ibc_peer;
char *reason;
- int retry = 0;
+ int msg_size = IBLND_MSG_SIZE;
+ int frag_num = -1;
+ int queue_dep = -1;
+ bool reconnect;
unsigned long flags;
LASSERT(conn->ibc_state == IBLND_CONN_ACTIVE_CONNECT);
LASSERT(peer->ibp_connecting > 0); /* 'conn' at least */
+ LASSERT(!peer->ibp_reconnecting);
- write_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
+ if (cp) {
+ msg_size = cp->ibcp_max_msg_size;
+ frag_num = cp->ibcp_max_frags;
+ queue_dep = cp->ibcp_queue_depth;
+ }
- /*
+ write_lock_irqsave(glock, flags);
+ /**
* retry connection if it's still needed and no other connection
* attempts (active or passive) are in progress
* NB: reconnect is still needed even when ibp_tx_queue is
* empty if ibp_version != version because reconnect may be
* initiated by kiblnd_query()
*/
- if ((!list_empty(&peer->ibp_tx_queue) ||
- peer->ibp_version != version) &&
- peer->ibp_connecting == 1 &&
- !peer->ibp_accepting) {
- retry = 1;
- peer->ibp_connecting++;
-
- peer->ibp_version = version;
- peer->ibp_incarnation = incarnation;
+ reconnect = (!list_empty(&peer->ibp_tx_queue) ||
+ peer->ibp_version != version) &&
+ peer->ibp_connecting == 1 &&
+ !peer->ibp_accepting;
+ if (!reconnect) {
+ reason = "no need";
+ goto out;
}
- write_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags);
-
- if (!retry)
- return;
-
switch (why) {
default:
reason = "Unknown";
break;
case IBLND_REJECT_RDMA_FRAGS:
- if (!cp)
- goto failed;
-
- if (conn->ibc_max_frags <= cp->ibcp_max_frags) {
- CNETERR("Unsupported max frags, peer supports %d\n",
- cp->ibcp_max_frags);
- goto failed;
- } else if (!*kiblnd_tunables.kib_map_on_demand) {
- CNETERR("map_on_demand must be enabled to support map_on_demand peers\n");
- goto failed;
+ if (!cp) {
+ reason = "can't negotiate max frags";
+ goto out;
+ }
+ if (!*kiblnd_tunables.kib_map_on_demand) {
+ reason = "map_on_demand must be enabled";
+ goto out;
+ }
+ if (conn->ibc_max_frags <= frag_num) {
+ reason = "unsupported max frags";
+ goto out;
}
- peer->ibp_max_frags = cp->ibcp_max_frags;
+ peer->ibp_max_frags = frag_num;
reason = "rdma fragments";
break;
case IBLND_REJECT_MSG_QUEUE_SIZE:
- if (!cp)
- goto failed;
-
- if (conn->ibc_queue_depth <= cp->ibcp_queue_depth) {
- CNETERR("Unsupported queue depth, peer supports %d\n",
- cp->ibcp_queue_depth);
- goto failed;
+ if (!cp) {
+ reason = "can't negotiate queue depth";
+ goto out;
+ }
+ if (conn->ibc_queue_depth <= queue_dep) {
+ reason = "unsupported queue depth";
+ goto out;
}
- peer->ibp_queue_depth = cp->ibcp_queue_depth;
+ peer->ibp_queue_depth = queue_dep;
reason = "queue depth";
break;
@@ -2563,20 +2625,24 @@ kiblnd_reconnect(kib_conn_t *conn, int version,
break;
}
- CNETERR("%s: retrying (%s), %x, %x, queue_dep: %d, max_frag: %d, msg_size: %d\n",
- libcfs_nid2str(peer->ibp_nid),
- reason, IBLND_MSG_VERSION, version,
- conn->ibc_queue_depth, conn->ibc_max_frags,
- cp ? cp->ibcp_max_msg_size : IBLND_MSG_SIZE);
-
- kiblnd_connect_peer(peer);
- return;
-failed:
- write_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
- peer->ibp_connecting--;
- write_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags);
+ conn->ibc_reconnect = 1;
+ peer->ibp_reconnecting = 1;
+ peer->ibp_version = version;
+ if (incarnation)
+ peer->ibp_incarnation = incarnation;
+out:
+ write_unlock_irqrestore(glock, flags);
- return;
+ CNETERR("%s: %s (%s), %x, %x, msg_size: %d, queue_depth: %d/%d, max_frags: %d/%d\n",
+ libcfs_nid2str(peer->ibp_nid),
+ reconnect ? "reconnect" : "don't reconnect",
+ reason, IBLND_MSG_VERSION, version, msg_size,
+ conn->ibc_queue_depth, queue_dep,
+ conn->ibc_max_frags, frag_num);
+ /**
+ * if conn::ibc_reconnect is TRUE, connd will reconnect to the peer
+ * while destroying the zombie
+ */
}
static void
@@ -2589,8 +2655,8 @@ kiblnd_rejected(kib_conn_t *conn, int reason, void *priv, int priv_nob)
switch (reason) {
case IB_CM_REJ_STALE_CONN:
- kiblnd_reconnect(conn, IBLND_MSG_VERSION, 0,
- IBLND_REJECT_CONN_STALE, NULL);
+ kiblnd_check_reconnect(conn, IBLND_MSG_VERSION, 0,
+ IBLND_REJECT_CONN_STALE, NULL);
break;
case IB_CM_REJ_INVALID_SERVICE_ID:
@@ -2674,8 +2740,9 @@ kiblnd_rejected(kib_conn_t *conn, int reason, void *priv, int priv_nob)
case IBLND_REJECT_CONN_UNCOMPAT:
case IBLND_REJECT_MSG_QUEUE_SIZE:
case IBLND_REJECT_RDMA_FRAGS:
- kiblnd_reconnect(conn, rej->ibr_version,
- incarnation, rej->ibr_why, cp);
+ kiblnd_check_reconnect(conn, rej->ibr_version,
+ incarnation,
+ rej->ibr_why, cp);
break;
case IBLND_REJECT_NO_RESOURCES:
@@ -3179,9 +3246,21 @@ kiblnd_disconnect_conn(kib_conn_t *conn)
kiblnd_peer_notify(conn->ibc_peer);
}
+/**
+ * High-water for reconnection to the same peer, reconnection attempt should
+ * be delayed after trying more than KIB_RECONN_HIGH_RACE.
+ */
+#define KIB_RECONN_HIGH_RACE 10
+/**
+ * Allow connd to take a break and handle other things after consecutive
+ * reconnection attemps.
+ */
+#define KIB_RECONN_BREAK 100
+
int
kiblnd_connd(void *arg)
{
+ spinlock_t *lock= &kiblnd_data.kib_connd_lock;
wait_queue_t wait;
unsigned long flags;
kib_conn_t *conn;
@@ -3196,23 +3275,40 @@ kiblnd_connd(void *arg)
init_waitqueue_entry(&wait, current);
kiblnd_data.kib_connd = current;
- spin_lock_irqsave(&kiblnd_data.kib_connd_lock, flags);
+ spin_lock_irqsave(lock, flags);
while (!kiblnd_data.kib_shutdown) {
+ int reconn = 0;
+
dropped_lock = 0;
if (!list_empty(&kiblnd_data.kib_connd_zombies)) {
+ kib_peer_t *peer = NULL;
+
conn = list_entry(kiblnd_data.kib_connd_zombies.next,
kib_conn_t, ibc_list);
list_del(&conn->ibc_list);
+ if (conn->ibc_reconnect) {
+ peer = conn->ibc_peer;
+ kiblnd_peer_addref(peer);
+ }
- spin_unlock_irqrestore(&kiblnd_data.kib_connd_lock,
- flags);
+ spin_unlock_irqrestore(lock, flags);
dropped_lock = 1;
- kiblnd_destroy_conn(conn);
+ kiblnd_destroy_conn(conn, !peer);
- spin_lock_irqsave(&kiblnd_data.kib_connd_lock, flags);
+ spin_lock_irqsave(lock, flags);
+ if (!peer)
+ continue;
+
+ conn->ibc_peer = peer;
+ if (peer->ibp_reconnected < KIB_RECONN_HIGH_RACE)
+ list_add_tail(&conn->ibc_list,
+ &kiblnd_data.kib_reconn_list);
+ else
+ list_add_tail(&conn->ibc_list,
+ &kiblnd_data.kib_reconn_wait);
}
if (!list_empty(&kiblnd_data.kib_connd_conns)) {
@@ -3220,14 +3316,38 @@ kiblnd_connd(void *arg)
kib_conn_t, ibc_list);
list_del(&conn->ibc_list);
- spin_unlock_irqrestore(&kiblnd_data.kib_connd_lock,
- flags);
+ spin_unlock_irqrestore(lock, flags);
dropped_lock = 1;
kiblnd_disconnect_conn(conn);
kiblnd_conn_decref(conn);
- spin_lock_irqsave(&kiblnd_data.kib_connd_lock, flags);
+ spin_lock_irqsave(lock, flags);
+ }
+
+ while (reconn < KIB_RECONN_BREAK) {
+ if (kiblnd_data.kib_reconn_sec !=
+ ktime_get_real_seconds()) {
+ kiblnd_data.kib_reconn_sec = ktime_get_real_seconds();
+ list_splice_init(&kiblnd_data.kib_reconn_wait,
+ &kiblnd_data.kib_reconn_list);
+ }
+
+ if (list_empty(&kiblnd_data.kib_reconn_list))
+ break;
+
+ conn = list_entry(kiblnd_data.kib_reconn_list.next,
+ kib_conn_t, ibc_list);
+ list_del(&conn->ibc_list);
+
+ spin_unlock_irqrestore(lock, flags);
+ dropped_lock = 1;
+
+ reconn += kiblnd_reconnect_peer(conn->ibc_peer);
+ kiblnd_peer_decref(conn->ibc_peer);
+ LIBCFS_FREE(conn, sizeof(*conn));
+
+ spin_lock_irqsave(lock, flags);
}
/* careful with the jiffy wrap... */
@@ -3237,7 +3357,7 @@ kiblnd_connd(void *arg)
const int p = 1;
int chunk = kiblnd_data.kib_peer_hash_size;
- spin_unlock_irqrestore(&kiblnd_data.kib_connd_lock, flags);
+ spin_unlock_irqrestore(lock, flags);
dropped_lock = 1;
/*
@@ -3262,7 +3382,7 @@ kiblnd_connd(void *arg)
}
deadline += msecs_to_jiffies(p * MSEC_PER_SEC);
- spin_lock_irqsave(&kiblnd_data.kib_connd_lock, flags);
+ spin_lock_irqsave(lock, flags);
}
if (dropped_lock)
@@ -3271,15 +3391,15 @@ kiblnd_connd(void *arg)
/* Nothing to do for 'timeout' */
set_current_state(TASK_INTERRUPTIBLE);
add_wait_queue(&kiblnd_data.kib_connd_waitq, &wait);
- spin_unlock_irqrestore(&kiblnd_data.kib_connd_lock, flags);
+ spin_unlock_irqrestore(lock, flags);
schedule_timeout(timeout);
remove_wait_queue(&kiblnd_data.kib_connd_waitq, &wait);
- spin_lock_irqsave(&kiblnd_data.kib_connd_lock, flags);
+ spin_lock_irqsave(lock, flags);
}
- spin_unlock_irqrestore(&kiblnd_data.kib_connd_lock, flags);
+ spin_unlock_irqrestore(lock, flags);
kiblnd_thread_fini();
return 0;