aboutsummaryrefslogtreecommitdiffstats
path: root/net/sunrpc/svc_xprt.c
diff options
context:
space:
mode:
authorJ. Bruce Fields <bfields@citi.umich.edu>2009-08-06 15:41:34 -0400
committerJ. Bruce Fields <bfields@citi.umich.edu>2009-11-23 12:34:05 -0500
commit78c210efdefe07131f91ed512a3308b15bb14e2f (patch)
tree8b101c92584e27b0dbe238f6a1208308e54dac81 /net/sunrpc/svc_xprt.c
parent0a3adadee42f2865bb867b8c5f4955b7def9baad (diff)
Revert "knfsd: avoid overloading the CPU scheduler with enormous load averages"
This reverts commit 59a252ff8c0f2fa32c896f69d56ae33e641ce7ad. This helps in an entirely cached workload but not necessarily in workloads that require waiting on disk. Conflicts: include/linux/sunrpc/svc.h net/sunrpc/svc_xprt.c Reported-by: Simon Kirby <sim@hostway.ca> Tested-by: Jesper Krogh <jesper@krogh.cc> Signed-off-by: J. Bruce Fields <bfields@citi.umich.edu>
Diffstat (limited to 'net/sunrpc/svc_xprt.c')
-rw-r--r--net/sunrpc/svc_xprt.c31
1 files changed, 9 insertions, 22 deletions
diff --git a/net/sunrpc/svc_xprt.c b/net/sunrpc/svc_xprt.c
index df124f78ee48..2c58b75a236f 100644
--- a/net/sunrpc/svc_xprt.c
+++ b/net/sunrpc/svc_xprt.c
@@ -16,8 +16,6 @@
16 16
17#define RPCDBG_FACILITY RPCDBG_SVCXPRT 17#define RPCDBG_FACILITY RPCDBG_SVCXPRT
18 18
19#define SVC_MAX_WAKING 5
20
21static struct svc_deferred_req *svc_deferred_dequeue(struct svc_xprt *xprt); 19static struct svc_deferred_req *svc_deferred_dequeue(struct svc_xprt *xprt);
22static int svc_deferred_recv(struct svc_rqst *rqstp); 20static int svc_deferred_recv(struct svc_rqst *rqstp);
23static struct cache_deferred_req *svc_defer(struct cache_req *req); 21static struct cache_deferred_req *svc_defer(struct cache_req *req);
@@ -306,7 +304,6 @@ void svc_xprt_enqueue(struct svc_xprt *xprt)
306 struct svc_pool *pool; 304 struct svc_pool *pool;
307 struct svc_rqst *rqstp; 305 struct svc_rqst *rqstp;
308 int cpu; 306 int cpu;
309 int thread_avail;
310 307
311 if (!(xprt->xpt_flags & 308 if (!(xprt->xpt_flags &
312 ((1<<XPT_CONN)|(1<<XPT_DATA)|(1<<XPT_CLOSE)|(1<<XPT_DEFERRED)))) 309 ((1<<XPT_CONN)|(1<<XPT_DATA)|(1<<XPT_CLOSE)|(1<<XPT_DEFERRED))))
@@ -318,6 +315,12 @@ void svc_xprt_enqueue(struct svc_xprt *xprt)
318 315
319 spin_lock_bh(&pool->sp_lock); 316 spin_lock_bh(&pool->sp_lock);
320 317
318 if (!list_empty(&pool->sp_threads) &&
319 !list_empty(&pool->sp_sockets))
320 printk(KERN_ERR
321 "svc_xprt_enqueue: "
322 "threads and transports both waiting??\n");
323
321 if (test_bit(XPT_DEAD, &xprt->xpt_flags)) { 324 if (test_bit(XPT_DEAD, &xprt->xpt_flags)) {
322 /* Don't enqueue dead transports */ 325 /* Don't enqueue dead transports */
323 dprintk("svc: transport %p is dead, not enqueued\n", xprt); 326 dprintk("svc: transport %p is dead, not enqueued\n", xprt);
@@ -358,15 +361,7 @@ void svc_xprt_enqueue(struct svc_xprt *xprt)
358 } 361 }
359 362
360 process: 363 process:
361 /* Work out whether threads are available */ 364 if (!list_empty(&pool->sp_threads)) {
362 thread_avail = !list_empty(&pool->sp_threads); /* threads are asleep */
363 if (pool->sp_nwaking >= SVC_MAX_WAKING) {
364 /* too many threads are runnable and trying to wake up */
365 thread_avail = 0;
366 pool->sp_stats.overloads_avoided++;
367 }
368
369 if (thread_avail) {
370 rqstp = list_entry(pool->sp_threads.next, 365 rqstp = list_entry(pool->sp_threads.next,
371 struct svc_rqst, 366 struct svc_rqst,
372 rq_list); 367 rq_list);
@@ -381,8 +376,6 @@ void svc_xprt_enqueue(struct svc_xprt *xprt)
381 svc_xprt_get(xprt); 376 svc_xprt_get(xprt);
382 rqstp->rq_reserved = serv->sv_max_mesg; 377 rqstp->rq_reserved = serv->sv_max_mesg;
383 atomic_add(rqstp->rq_reserved, &xprt->xpt_reserved); 378 atomic_add(rqstp->rq_reserved, &xprt->xpt_reserved);
384 rqstp->rq_waking = 1;
385 pool->sp_nwaking++;
386 pool->sp_stats.threads_woken++; 379 pool->sp_stats.threads_woken++;
387 BUG_ON(xprt->xpt_pool != pool); 380 BUG_ON(xprt->xpt_pool != pool);
388 wake_up(&rqstp->rq_wait); 381 wake_up(&rqstp->rq_wait);
@@ -651,11 +644,6 @@ int svc_recv(struct svc_rqst *rqstp, long timeout)
651 return -EINTR; 644 return -EINTR;
652 645
653 spin_lock_bh(&pool->sp_lock); 646 spin_lock_bh(&pool->sp_lock);
654 if (rqstp->rq_waking) {
655 rqstp->rq_waking = 0;
656 pool->sp_nwaking--;
657 BUG_ON(pool->sp_nwaking < 0);
658 }
659 xprt = svc_xprt_dequeue(pool); 647 xprt = svc_xprt_dequeue(pool);
660 if (xprt) { 648 if (xprt) {
661 rqstp->rq_xprt = xprt; 649 rqstp->rq_xprt = xprt;
@@ -1204,16 +1192,15 @@ static int svc_pool_stats_show(struct seq_file *m, void *p)
1204 struct svc_pool *pool = p; 1192 struct svc_pool *pool = p;
1205 1193
1206 if (p == SEQ_START_TOKEN) { 1194 if (p == SEQ_START_TOKEN) {
1207 seq_puts(m, "# pool packets-arrived sockets-enqueued threads-woken overloads-avoided threads-timedout\n"); 1195 seq_puts(m, "# pool packets-arrived sockets-enqueued threads-woken threads-timedout\n");
1208 return 0; 1196 return 0;
1209 } 1197 }
1210 1198
1211 seq_printf(m, "%u %lu %lu %lu %lu %lu\n", 1199 seq_printf(m, "%u %lu %lu %lu %lu\n",
1212 pool->sp_id, 1200 pool->sp_id,
1213 pool->sp_stats.packets, 1201 pool->sp_stats.packets,
1214 pool->sp_stats.sockets_queued, 1202 pool->sp_stats.sockets_queued,
1215 pool->sp_stats.threads_woken, 1203 pool->sp_stats.threads_woken,
1216 pool->sp_stats.overloads_avoided,
1217 pool->sp_stats.threads_timedout); 1204 pool->sp_stats.threads_timedout);
1218 1205
1219 return 0; 1206 return 0;