summaryrefslogtreecommitdiff
path: root/net/sunrpc
diff options
context:
space:
mode:
authorJeff Layton <jlayton@primarydata.com>2014-11-21 19:19:29 (GMT)
committerJ. Bruce Fields <bfields@redhat.com>2014-12-09 16:22:22 (GMT)
commit403c7b44441d60aba7f8a134c31279ffa60ea769 (patch)
tree97005025d0e50119a720f8a29672d3b695ff7886 /net/sunrpc
parent812443865c5fc255363d4a684a62c086af1addca (diff)
downloadlinux-403c7b44441d60aba7f8a134c31279ffa60ea769.tar.xz
sunrpc: fix potential races in pool_stats collection
In a later patch, we'll be removing some spinlocking around the socket and thread queueing code in order to fix some contention problems. At that point, the stats counters will no longer be protected by the sp_lock. Change the counters to atomic_long_t fields, except for the "sockets_queued" counter which will still be manipulated under a spinlock. Signed-off-by: Jeff Layton <jlayton@primarydata.com> Tested-by: Chris Worley <chris.worley@primarydata.com> Signed-off-by: J. Bruce Fields <bfields@redhat.com>
Diffstat (limited to 'net/sunrpc')
-rw-r--r--net/sunrpc/svc_xprt.c12
1 files changed, 6 insertions, 6 deletions
diff --git a/net/sunrpc/svc_xprt.c b/net/sunrpc/svc_xprt.c
index b2676e5..579ff22 100644
--- a/net/sunrpc/svc_xprt.c
+++ b/net/sunrpc/svc_xprt.c
@@ -362,7 +362,7 @@ static void svc_xprt_do_enqueue(struct svc_xprt *xprt)
pool = svc_pool_for_cpu(xprt->xpt_server, cpu);
spin_lock_bh(&pool->sp_lock);
- pool->sp_stats.packets++;
+ atomic_long_inc(&pool->sp_stats.packets);
if (!list_empty(&pool->sp_threads)) {
rqstp = list_entry(pool->sp_threads.next,
@@ -383,7 +383,7 @@ static void svc_xprt_do_enqueue(struct svc_xprt *xprt)
svc_xprt_get(xprt);
wake_up_process(rqstp->rq_task);
rqstp->rq_xprt = xprt;
- pool->sp_stats.threads_woken++;
+ atomic_long_inc(&pool->sp_stats.threads_woken);
} else {
dprintk("svc: transport %p put into queue\n", xprt);
list_add_tail(&xprt->xpt_ready, &pool->sp_sockets);
@@ -669,7 +669,7 @@ static struct svc_xprt *svc_get_next_xprt(struct svc_rqst *rqstp, long timeout)
spin_lock_bh(&pool->sp_lock);
if (!time_left)
- pool->sp_stats.threads_timedout++;
+ atomic_long_inc(&pool->sp_stats.threads_timedout);
xprt = rqstp->rq_xprt;
if (!xprt) {
@@ -1306,10 +1306,10 @@ static int svc_pool_stats_show(struct seq_file *m, void *p)
seq_printf(m, "%u %lu %lu %lu %lu\n",
pool->sp_id,
- pool->sp_stats.packets,
+ (unsigned long)atomic_long_read(&pool->sp_stats.packets),
pool->sp_stats.sockets_queued,
- pool->sp_stats.threads_woken,
- pool->sp_stats.threads_timedout);
+ (unsigned long)atomic_long_read(&pool->sp_stats.threads_woken),
+ (unsigned long)atomic_long_read(&pool->sp_stats.threads_timedout));
return 0;
}