From d0bea455dd48da1ecbd04fedf00eb89437455fdc Mon Sep 17 00:00:00 2001 From: Trond Myklebust Date: Tue, 23 Oct 2012 11:35:47 -0400 Subject: SUNRPC: Clear the connect flag when socket state is TCP_CLOSE_WAIT This is needed to ensure that we call xprt_connect() upon the next call to call_connect(). Signed-off-by: Trond Myklebust Cc: stable@vger.kernel.org Tested-by: Chris Perl diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c index aaaadfb..6e6967d 100644 --- a/net/sunrpc/xprtsock.c +++ b/net/sunrpc/xprtsock.c @@ -1516,6 +1516,7 @@ static void xs_tcp_state_change(struct sock *sk) case TCP_CLOSE_WAIT: /* The server initiated a shutdown of the socket */ xprt->connect_cookie++; + clear_bit(XPRT_CONNECTED, &xprt->state); xs_tcp_force_close(xprt); case TCP_CLOSING: /* -- cgit v0.10.2 From b9d2bb2ee537424a7f855e1f93eed44eb9ee0854 Mon Sep 17 00:00:00 2001 From: Trond Myklebust Date: Tue, 23 Oct 2012 11:40:02 -0400 Subject: Revert "SUNRPC: Ensure we close the socket on EPIPE errors too..." This reverts commit 55420c24a0d4d1fce70ca713f84aa00b6b74a70e. Now that we clear the connected flag when entering TCP_CLOSE_WAIT, the deadlock described in this commit is no longer possible. Instead, the resulting call to xs_tcp_shutdown() can interfere with pending reconnection attempts. Reported-by: Chris Perl Signed-off-by: Trond Myklebust Cc: stable@vger.kernel.org Tested-by: Chris Perl diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c index 6e6967d..7e2dd0d 100644 --- a/net/sunrpc/xprtsock.c +++ b/net/sunrpc/xprtsock.c @@ -737,10 +737,10 @@ static int xs_tcp_send_request(struct rpc_task *task) dprintk("RPC: sendmsg returned unrecognized error %d\n", -status); case -ECONNRESET: - case -EPIPE: xs_tcp_shutdown(xprt); case -ECONNREFUSED: case -ENOTCONN: + case -EPIPE: clear_bit(SOCK_ASYNC_NOSPACE, &transport->sock->flags); } -- cgit v0.10.2 From 4bc1e68ed6a8b59be8a79eb719be515a55c7bc68 Mon Sep 17 00:00:00 2001 From: Trond Myklebust Date: Tue, 23 Oct 2012 17:50:07 -0400 Subject: SUNRPC: Prevent races in xs_abort_connection() The call to xprt_disconnect_done() that is triggered by a successful connection reset will trigger another automatic wakeup of all tasks on the xprt->pending rpc_wait_queue. In particular it will cause an early wake up of the task that called xprt_connect(). All we really want to do here is clear all the socket-specific state flags, so we split that functionality out of xs_sock_mark_closed() into a helper that can be called by xs_abort_connection() Reported-by: Chris Perl Signed-off-by: Trond Myklebust Cc: stable@vger.kernel.org Tested-by: Chris Perl diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c index 7e2dd0d..1f105c2 100644 --- a/net/sunrpc/xprtsock.c +++ b/net/sunrpc/xprtsock.c @@ -1453,7 +1453,7 @@ static void xs_tcp_cancel_linger_timeout(struct rpc_xprt *xprt) xprt_clear_connecting(xprt); } -static void xs_sock_mark_closed(struct rpc_xprt *xprt) +static void xs_sock_reset_connection_flags(struct rpc_xprt *xprt) { smp_mb__before_clear_bit(); clear_bit(XPRT_CONNECTION_ABORT, &xprt->state); @@ -1461,6 +1461,11 @@ static void xs_sock_mark_closed(struct rpc_xprt *xprt) clear_bit(XPRT_CLOSE_WAIT, &xprt->state); clear_bit(XPRT_CLOSING, &xprt->state); smp_mb__after_clear_bit(); +} + +static void xs_sock_mark_closed(struct rpc_xprt *xprt) +{ + xs_sock_reset_connection_flags(xprt); /* Mark transport as closed and wake up all pending tasks */ xprt_disconnect_done(xprt); } @@ -2051,10 +2056,8 @@ static void xs_abort_connection(struct sock_xprt *transport) any.sa_family = AF_UNSPEC; result = kernel_connect(transport->sock, &any, sizeof(any), 0); if (!result) - xs_sock_mark_closed(&transport->xprt); - else - dprintk("RPC: AF_UNSPEC connect return code %d\n", - result); + xs_sock_reset_connection_flags(&transport->xprt); + dprintk("RPC: AF_UNSPEC connect return code %d\n", result); } static void xs_tcp_reuse_connection(struct sock_xprt *transport) -- cgit v0.10.2 From f878b657ce8e7d3673afe48110ec208a29e38c4a Mon Sep 17 00:00:00 2001 From: Trond Myklebust Date: Mon, 22 Oct 2012 17:14:36 -0400 Subject: SUNRPC: Get rid of the xs_error_report socket callback Chris Perl reports that we're seeing races between the wakeup call in xs_error_report and the connect attempts. Basically, Chris has shown that in certain circumstances, the call to xs_error_report causes the rpc_task that is responsible for reconnecting to wake up early, thus triggering a disconnect and retry. Since the sk->sk_error_report() calls in the socket layer are always followed by a tcp_done() in the cases where we care about waking up the rpc_tasks, just let the state_change callbacks take responsibility for those wake ups. Reported-by: Chris Perl Signed-off-by: Trond Myklebust Cc: stable@vger.kernel.org Tested-by: Chris Perl diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c index 1f105c2..75853ca 100644 --- a/net/sunrpc/xprtsock.c +++ b/net/sunrpc/xprtsock.c @@ -254,7 +254,6 @@ struct sock_xprt { void (*old_data_ready)(struct sock *, int); void (*old_state_change)(struct sock *); void (*old_write_space)(struct sock *); - void (*old_error_report)(struct sock *); }; /* @@ -781,7 +780,6 @@ static void xs_save_old_callbacks(struct sock_xprt *transport, struct sock *sk) transport->old_data_ready = sk->sk_data_ready; transport->old_state_change = sk->sk_state_change; transport->old_write_space = sk->sk_write_space; - transport->old_error_report = sk->sk_error_report; } static void xs_restore_old_callbacks(struct sock_xprt *transport, struct sock *sk) @@ -789,7 +787,6 @@ static void xs_restore_old_callbacks(struct sock_xprt *transport, struct sock *s sk->sk_data_ready = transport->old_data_ready; sk->sk_state_change = transport->old_state_change; sk->sk_write_space = transport->old_write_space; - sk->sk_error_report = transport->old_error_report; } static void xs_reset_transport(struct sock_xprt *transport) @@ -1546,25 +1543,6 @@ static void xs_tcp_state_change(struct sock *sk) read_unlock_bh(&sk->sk_callback_lock); } -/** - * xs_error_report - callback mainly for catching socket errors - * @sk: socket - */ -static void xs_error_report(struct sock *sk) -{ - struct rpc_xprt *xprt; - - read_lock_bh(&sk->sk_callback_lock); - if (!(xprt = xprt_from_sock(sk))) - goto out; - dprintk("RPC: %s client %p...\n" - "RPC: error %d\n", - __func__, xprt, sk->sk_err); - xprt_wake_pending_tasks(xprt, -EAGAIN); -out: - read_unlock_bh(&sk->sk_callback_lock); -} - static void xs_write_space(struct sock *sk) { struct socket *sock; @@ -1864,7 +1842,6 @@ static int xs_local_finish_connecting(struct rpc_xprt *xprt, sk->sk_user_data = xprt; sk->sk_data_ready = xs_local_data_ready; sk->sk_write_space = xs_udp_write_space; - sk->sk_error_report = xs_error_report; sk->sk_allocation = GFP_ATOMIC; xprt_clear_connected(xprt); @@ -1989,7 +1966,6 @@ static void xs_udp_finish_connecting(struct rpc_xprt *xprt, struct socket *sock) sk->sk_user_data = xprt; sk->sk_data_ready = xs_udp_data_ready; sk->sk_write_space = xs_udp_write_space; - sk->sk_error_report = xs_error_report; sk->sk_no_check = UDP_CSUM_NORCV; sk->sk_allocation = GFP_ATOMIC; @@ -2102,7 +2078,6 @@ static int xs_tcp_finish_connecting(struct rpc_xprt *xprt, struct socket *sock) sk->sk_data_ready = xs_tcp_data_ready; sk->sk_state_change = xs_tcp_state_change; sk->sk_write_space = xs_tcp_write_space; - sk->sk_error_report = xs_error_report; sk->sk_allocation = GFP_ATOMIC; /* socket options */ -- cgit v0.10.2 From a4ee8d978e47e79d536226dccb48991f70091168 Mon Sep 17 00:00:00 2001 From: Trond Myklebust Date: Tue, 23 Oct 2012 13:51:58 -0400 Subject: LOCKD: fix races in nsm_client_get Commit e9406db20fecbfcab646bad157b4cfdc7cadddfb (lockd: per-net NSM client creation and destruction helpers introduced) contains a nasty race on initialisation of the per-net NSM client because it doesn't check whether or not the client is set after grabbing the nsm_create_mutex. Reported-by: Nix Signed-off-by: Trond Myklebust Cc: stable@vger.kernel.org diff --git a/fs/lockd/mon.c b/fs/lockd/mon.c index e4fb3ba..fe69560 100644 --- a/fs/lockd/mon.c +++ b/fs/lockd/mon.c @@ -85,29 +85,38 @@ static struct rpc_clnt *nsm_create(struct net *net) return rpc_create(&args); } +static struct rpc_clnt *nsm_client_set(struct lockd_net *ln, + struct rpc_clnt *clnt) +{ + spin_lock(&ln->nsm_clnt_lock); + if (ln->nsm_users == 0) { + if (clnt == NULL) + goto out; + ln->nsm_clnt = clnt; + } + clnt = ln->nsm_clnt; + ln->nsm_users++; +out: + spin_unlock(&ln->nsm_clnt_lock); + return clnt; +} + static struct rpc_clnt *nsm_client_get(struct net *net) { - static DEFINE_MUTEX(nsm_create_mutex); - struct rpc_clnt *clnt; + struct rpc_clnt *clnt, *new; struct lockd_net *ln = net_generic(net, lockd_net_id); - spin_lock(&ln->nsm_clnt_lock); - if (ln->nsm_users) { - ln->nsm_users++; - clnt = ln->nsm_clnt; - spin_unlock(&ln->nsm_clnt_lock); + clnt = nsm_client_set(ln, NULL); + if (clnt != NULL) goto out; - } - spin_unlock(&ln->nsm_clnt_lock); - mutex_lock(&nsm_create_mutex); - clnt = nsm_create(net); - if (!IS_ERR(clnt)) { - ln->nsm_clnt = clnt; - smp_wmb(); - ln->nsm_users = 1; - } - mutex_unlock(&nsm_create_mutex); + clnt = new = nsm_create(net); + if (IS_ERR(clnt)) + goto out; + + clnt = nsm_client_set(ln, new); + if (clnt != new) + rpc_shutdown_client(new); out: return clnt; } -- cgit v0.10.2 From e498daa81295d02f7359af313c2b7f87e1062207 Mon Sep 17 00:00:00 2001 From: Trond Myklebust Date: Wed, 24 Oct 2012 08:53:35 -0400 Subject: LOCKD: Clear ln->nsm_clnt only when ln->nsm_users is zero The current code is clearing it in all cases _except_ when zero. Reported-by: Stanislav Kinsbursky Signed-off-by: Trond Myklebust Cc: stable@vger.kernel.org diff --git a/fs/lockd/mon.c b/fs/lockd/mon.c index fe69560..3d7e09b 100644 --- a/fs/lockd/mon.c +++ b/fs/lockd/mon.c @@ -124,18 +124,16 @@ out: static void nsm_client_put(struct net *net) { struct lockd_net *ln = net_generic(net, lockd_net_id); - struct rpc_clnt *clnt = ln->nsm_clnt; - int shutdown = 0; + struct rpc_clnt *clnt = NULL; spin_lock(&ln->nsm_clnt_lock); - if (ln->nsm_users) { - if (--ln->nsm_users) - ln->nsm_clnt = NULL; - shutdown = !ln->nsm_users; + ln->nsm_users--; + if (ln->nsm_users == 0) { + clnt = ln->nsm_clnt; + ln->nsm_clnt = NULL; } spin_unlock(&ln->nsm_clnt_lock); - - if (shutdown) + if (clnt != NULL) rpc_shutdown_client(clnt); } -- cgit v0.10.2