aboutsummaryrefslogtreecommitdiffstats
path: root/net
diff options
context:
space:
mode:
authorJeff Layton <jlayton@primarydata.com>2014-11-21 14:19:29 -0500
committerJ. Bruce Fields <bfields@redhat.com>2014-12-09 11:22:22 -0500
commit403c7b44441d60aba7f8a134c31279ffa60ea769 (patch)
tree97005025d0e50119a720f8a29672d3b695ff7886 /net
parent812443865c5fc255363d4a684a62c086af1addca (diff)
sunrpc: fix potential races in pool_stats collection
In a later patch, we'll be removing some spinlocking around the socket and thread queueing code in order to fix some contention problems. At that point, the stats counters will no longer be protected by the sp_lock. Change the counters to atomic_long_t fields, except for the "sockets_queued" counter which will still be manipulated under a spinlock. Signed-off-by: Jeff Layton <jlayton@primarydata.com> Tested-by: Chris Worley <chris.worley@primarydata.com> Signed-off-by: J. Bruce Fields <bfields@redhat.com>
Diffstat (limited to 'net')
-rw-r--r--net/sunrpc/svc_xprt.c12
1 files changed, 6 insertions, 6 deletions
diff --git a/net/sunrpc/svc_xprt.c b/net/sunrpc/svc_xprt.c
index b2676e597fc4..579ff2249562 100644
--- a/net/sunrpc/svc_xprt.c
+++ b/net/sunrpc/svc_xprt.c
@@ -362,7 +362,7 @@ static void svc_xprt_do_enqueue(struct svc_xprt *xprt)
362 pool = svc_pool_for_cpu(xprt->xpt_server, cpu); 362 pool = svc_pool_for_cpu(xprt->xpt_server, cpu);
363 spin_lock_bh(&pool->sp_lock); 363 spin_lock_bh(&pool->sp_lock);
364 364
365 pool->sp_stats.packets++; 365 atomic_long_inc(&pool->sp_stats.packets);
366 366
367 if (!list_empty(&pool->sp_threads)) { 367 if (!list_empty(&pool->sp_threads)) {
368 rqstp = list_entry(pool->sp_threads.next, 368 rqstp = list_entry(pool->sp_threads.next,
@@ -383,7 +383,7 @@ static void svc_xprt_do_enqueue(struct svc_xprt *xprt)
383 svc_xprt_get(xprt); 383 svc_xprt_get(xprt);
384 wake_up_process(rqstp->rq_task); 384 wake_up_process(rqstp->rq_task);
385 rqstp->rq_xprt = xprt; 385 rqstp->rq_xprt = xprt;
386 pool->sp_stats.threads_woken++; 386 atomic_long_inc(&pool->sp_stats.threads_woken);
387 } else { 387 } else {
388 dprintk("svc: transport %p put into queue\n", xprt); 388 dprintk("svc: transport %p put into queue\n", xprt);
389 list_add_tail(&xprt->xpt_ready, &pool->sp_sockets); 389 list_add_tail(&xprt->xpt_ready, &pool->sp_sockets);
@@ -669,7 +669,7 @@ static struct svc_xprt *svc_get_next_xprt(struct svc_rqst *rqstp, long timeout)
669 669
670 spin_lock_bh(&pool->sp_lock); 670 spin_lock_bh(&pool->sp_lock);
671 if (!time_left) 671 if (!time_left)
672 pool->sp_stats.threads_timedout++; 672 atomic_long_inc(&pool->sp_stats.threads_timedout);
673 673
674 xprt = rqstp->rq_xprt; 674 xprt = rqstp->rq_xprt;
675 if (!xprt) { 675 if (!xprt) {
@@ -1306,10 +1306,10 @@ static int svc_pool_stats_show(struct seq_file *m, void *p)
1306 1306
1307 seq_printf(m, "%u %lu %lu %lu %lu\n", 1307 seq_printf(m, "%u %lu %lu %lu %lu\n",
1308 pool->sp_id, 1308 pool->sp_id,
1309 pool->sp_stats.packets, 1309 (unsigned long)atomic_long_read(&pool->sp_stats.packets),
1310 pool->sp_stats.sockets_queued, 1310 pool->sp_stats.sockets_queued,
1311 pool->sp_stats.threads_woken, 1311 (unsigned long)atomic_long_read(&pool->sp_stats.threads_woken),
1312 pool->sp_stats.threads_timedout); 1312 (unsigned long)atomic_long_read(&pool->sp_stats.threads_timedout));
1313 1313
1314 return 0; 1314 return 0;
1315} 1315}