aboutsummaryrefslogtreecommitdiffstats
path: root/net/sunrpc/svc.c
diff options
context:
space:
mode:
Diffstat (limited to 'net/sunrpc/svc.c')
-rw-r--r--net/sunrpc/svc.c111
1 files changed, 41 insertions, 70 deletions
diff --git a/net/sunrpc/svc.c b/net/sunrpc/svc.c
index 01c7e311b904..835d27413083 100644
--- a/net/sunrpc/svc.c
+++ b/net/sunrpc/svc.c
@@ -18,6 +18,7 @@
18#include <linux/mm.h> 18#include <linux/mm.h>
19#include <linux/interrupt.h> 19#include <linux/interrupt.h>
20#include <linux/module.h> 20#include <linux/module.h>
21#include <linux/kthread.h>
21 22
22#include <linux/sunrpc/types.h> 23#include <linux/sunrpc/types.h>
23#include <linux/sunrpc/xdr.h> 24#include <linux/sunrpc/xdr.h>
@@ -291,15 +292,14 @@ svc_pool_map_put(void)
291 292
292 293
293/* 294/*
294 * Set the current thread's cpus_allowed mask so that it 295 * Set the given thread's cpus_allowed mask so that it
295 * will only run on cpus in the given pool. 296 * will only run on cpus in the given pool.
296 *
297 * Returns 1 and fills in oldmask iff a cpumask was applied.
298 */ 297 */
299static inline int 298static inline void
300svc_pool_map_set_cpumask(unsigned int pidx, cpumask_t *oldmask) 299svc_pool_map_set_cpumask(struct task_struct *task, unsigned int pidx)
301{ 300{
302 struct svc_pool_map *m = &svc_pool_map; 301 struct svc_pool_map *m = &svc_pool_map;
302 unsigned int node = m->pool_to[pidx];
303 303
304 /* 304 /*
305 * The caller checks for sv_nrpools > 1, which 305 * The caller checks for sv_nrpools > 1, which
@@ -307,26 +307,18 @@ svc_pool_map_set_cpumask(unsigned int pidx, cpumask_t *oldmask)
307 */ 307 */
308 BUG_ON(m->count == 0); 308 BUG_ON(m->count == 0);
309 309
310 switch (m->mode) 310 switch (m->mode) {
311 {
312 default:
313 return 0;
314 case SVC_POOL_PERCPU: 311 case SVC_POOL_PERCPU:
315 { 312 {
316 unsigned int cpu = m->pool_to[pidx]; 313 cpumask_of_cpu_ptr(cpumask, node);
317 314 set_cpus_allowed_ptr(task, cpumask);
318 *oldmask = current->cpus_allowed; 315 break;
319 set_cpus_allowed_ptr(current, &cpumask_of_cpu(cpu));
320 return 1;
321 } 316 }
322 case SVC_POOL_PERNODE: 317 case SVC_POOL_PERNODE:
323 { 318 {
324 unsigned int node = m->pool_to[pidx];
325 node_to_cpumask_ptr(nodecpumask, node); 319 node_to_cpumask_ptr(nodecpumask, node);
326 320 set_cpus_allowed_ptr(task, nodecpumask);
327 *oldmask = current->cpus_allowed; 321 break;
328 set_cpus_allowed_ptr(current, nodecpumask);
329 return 1;
330 } 322 }
331 } 323 }
332} 324}
@@ -443,7 +435,7 @@ EXPORT_SYMBOL(svc_create);
443struct svc_serv * 435struct svc_serv *
444svc_create_pooled(struct svc_program *prog, unsigned int bufsize, 436svc_create_pooled(struct svc_program *prog, unsigned int bufsize,
445 void (*shutdown)(struct svc_serv *serv), 437 void (*shutdown)(struct svc_serv *serv),
446 svc_thread_fn func, int sig, struct module *mod) 438 svc_thread_fn func, struct module *mod)
447{ 439{
448 struct svc_serv *serv; 440 struct svc_serv *serv;
449 unsigned int npools = svc_pool_map_get(); 441 unsigned int npools = svc_pool_map_get();
@@ -452,7 +444,6 @@ svc_create_pooled(struct svc_program *prog, unsigned int bufsize,
452 444
453 if (serv != NULL) { 445 if (serv != NULL) {
454 serv->sv_function = func; 446 serv->sv_function = func;
455 serv->sv_kill_signal = sig;
456 serv->sv_module = mod; 447 serv->sv_module = mod;
457 } 448 }
458 449
@@ -461,7 +452,8 @@ svc_create_pooled(struct svc_program *prog, unsigned int bufsize,
461EXPORT_SYMBOL(svc_create_pooled); 452EXPORT_SYMBOL(svc_create_pooled);
462 453
463/* 454/*
464 * Destroy an RPC service. Should be called with the BKL held 455 * Destroy an RPC service. Should be called with appropriate locking to
456 * protect the sv_nrthreads, sv_permsocks and sv_tempsocks.
465 */ 457 */
466void 458void
467svc_destroy(struct svc_serv *serv) 459svc_destroy(struct svc_serv *serv)
@@ -578,46 +570,6 @@ out_enomem:
578EXPORT_SYMBOL(svc_prepare_thread); 570EXPORT_SYMBOL(svc_prepare_thread);
579 571
580/* 572/*
581 * Create a thread in the given pool. Caller must hold BKL.
582 * On a NUMA or SMP machine, with a multi-pool serv, the thread
583 * will be restricted to run on the cpus belonging to the pool.
584 */
585static int
586__svc_create_thread(svc_thread_fn func, struct svc_serv *serv,
587 struct svc_pool *pool)
588{
589 struct svc_rqst *rqstp;
590 int error = -ENOMEM;
591 int have_oldmask = 0;
592 cpumask_t uninitialized_var(oldmask);
593
594 rqstp = svc_prepare_thread(serv, pool);
595 if (IS_ERR(rqstp)) {
596 error = PTR_ERR(rqstp);
597 goto out;
598 }
599
600 if (serv->sv_nrpools > 1)
601 have_oldmask = svc_pool_map_set_cpumask(pool->sp_id, &oldmask);
602
603 error = kernel_thread((int (*)(void *)) func, rqstp, 0);
604
605 if (have_oldmask)
606 set_cpus_allowed(current, oldmask);
607
608 if (error < 0)
609 goto out_thread;
610 svc_sock_update_bufs(serv);
611 error = 0;
612out:
613 return error;
614
615out_thread:
616 svc_exit_thread(rqstp);
617 goto out;
618}
619
620/*
621 * Choose a pool in which to create a new thread, for svc_set_num_threads 573 * Choose a pool in which to create a new thread, for svc_set_num_threads
622 */ 574 */
623static inline struct svc_pool * 575static inline struct svc_pool *
@@ -674,7 +626,7 @@ found_pool:
674 * of threads the given number. If `pool' is non-NULL, applies 626 * of threads the given number. If `pool' is non-NULL, applies
675 * only to threads in that pool, otherwise round-robins between 627 * only to threads in that pool, otherwise round-robins between
676 * all pools. Must be called with a svc_get() reference and 628 * all pools. Must be called with a svc_get() reference and
677 * the BKL held. 629 * the BKL or another lock to protect access to svc_serv fields.
678 * 630 *
679 * Destroying threads relies on the service threads filling in 631 * Destroying threads relies on the service threads filling in
680 * rqstp->rq_task, which only the nfs ones do. Assumes the serv 632 * rqstp->rq_task, which only the nfs ones do. Assumes the serv
@@ -686,7 +638,9 @@ found_pool:
686int 638int
687svc_set_num_threads(struct svc_serv *serv, struct svc_pool *pool, int nrservs) 639svc_set_num_threads(struct svc_serv *serv, struct svc_pool *pool, int nrservs)
688{ 640{
689 struct task_struct *victim; 641 struct svc_rqst *rqstp;
642 struct task_struct *task;
643 struct svc_pool *chosen_pool;
690 int error = 0; 644 int error = 0;
691 unsigned int state = serv->sv_nrthreads-1; 645 unsigned int state = serv->sv_nrthreads-1;
692 646
@@ -702,18 +656,34 @@ svc_set_num_threads(struct svc_serv *serv, struct svc_pool *pool, int nrservs)
702 /* create new threads */ 656 /* create new threads */
703 while (nrservs > 0) { 657 while (nrservs > 0) {
704 nrservs--; 658 nrservs--;
659 chosen_pool = choose_pool(serv, pool, &state);
660
661 rqstp = svc_prepare_thread(serv, chosen_pool);
662 if (IS_ERR(rqstp)) {
663 error = PTR_ERR(rqstp);
664 break;
665 }
666
705 __module_get(serv->sv_module); 667 __module_get(serv->sv_module);
706 error = __svc_create_thread(serv->sv_function, serv, 668 task = kthread_create(serv->sv_function, rqstp, serv->sv_name);
707 choose_pool(serv, pool, &state)); 669 if (IS_ERR(task)) {
708 if (error < 0) { 670 error = PTR_ERR(task);
709 module_put(serv->sv_module); 671 module_put(serv->sv_module);
672 svc_exit_thread(rqstp);
710 break; 673 break;
711 } 674 }
675
676 rqstp->rq_task = task;
677 if (serv->sv_nrpools > 1)
678 svc_pool_map_set_cpumask(task, chosen_pool->sp_id);
679
680 svc_sock_update_bufs(serv);
681 wake_up_process(task);
712 } 682 }
713 /* destroy old threads */ 683 /* destroy old threads */
714 while (nrservs < 0 && 684 while (nrservs < 0 &&
715 (victim = choose_victim(serv, pool, &state)) != NULL) { 685 (task = choose_victim(serv, pool, &state)) != NULL) {
716 send_sig(serv->sv_kill_signal, victim, 1); 686 send_sig(SIGINT, task, 1);
717 nrservs++; 687 nrservs++;
718 } 688 }
719 689
@@ -722,7 +692,8 @@ svc_set_num_threads(struct svc_serv *serv, struct svc_pool *pool, int nrservs)
722EXPORT_SYMBOL(svc_set_num_threads); 692EXPORT_SYMBOL(svc_set_num_threads);
723 693
724/* 694/*
725 * Called from a server thread as it's exiting. Caller must hold BKL. 695 * Called from a server thread as it's exiting. Caller must hold the BKL or
696 * the "service mutex", whichever is appropriate for the service.
726 */ 697 */
727void 698void
728svc_exit_thread(struct svc_rqst *rqstp) 699svc_exit_thread(struct svc_rqst *rqstp)