diff options
Diffstat (limited to 'net/sunrpc/svc.c')
-rw-r--r-- | net/sunrpc/svc.c | 110 |
1 files changed, 40 insertions, 70 deletions
diff --git a/net/sunrpc/svc.c b/net/sunrpc/svc.c index 01c7e311b904..5a32cb7c4bb4 100644 --- a/net/sunrpc/svc.c +++ b/net/sunrpc/svc.c | |||
@@ -18,6 +18,7 @@ | |||
18 | #include <linux/mm.h> | 18 | #include <linux/mm.h> |
19 | #include <linux/interrupt.h> | 19 | #include <linux/interrupt.h> |
20 | #include <linux/module.h> | 20 | #include <linux/module.h> |
21 | #include <linux/kthread.h> | ||
21 | 22 | ||
22 | #include <linux/sunrpc/types.h> | 23 | #include <linux/sunrpc/types.h> |
23 | #include <linux/sunrpc/xdr.h> | 24 | #include <linux/sunrpc/xdr.h> |
@@ -291,15 +292,14 @@ svc_pool_map_put(void) | |||
291 | 292 | ||
292 | 293 | ||
293 | /* | 294 | /* |
294 | * Set the current thread's cpus_allowed mask so that it | 295 | * Set the given thread's cpus_allowed mask so that it |
295 | * will only run on cpus in the given pool. | 296 | * will only run on cpus in the given pool. |
296 | * | ||
297 | * Returns 1 and fills in oldmask iff a cpumask was applied. | ||
298 | */ | 297 | */ |
299 | static inline int | 298 | static inline void |
300 | svc_pool_map_set_cpumask(unsigned int pidx, cpumask_t *oldmask) | 299 | svc_pool_map_set_cpumask(struct task_struct *task, unsigned int pidx) |
301 | { | 300 | { |
302 | struct svc_pool_map *m = &svc_pool_map; | 301 | struct svc_pool_map *m = &svc_pool_map; |
302 | unsigned int node = m->pool_to[pidx]; | ||
303 | 303 | ||
304 | /* | 304 | /* |
305 | * The caller checks for sv_nrpools > 1, which | 305 | * The caller checks for sv_nrpools > 1, which |
@@ -307,26 +307,17 @@ svc_pool_map_set_cpumask(unsigned int pidx, cpumask_t *oldmask) | |||
307 | */ | 307 | */ |
308 | BUG_ON(m->count == 0); | 308 | BUG_ON(m->count == 0); |
309 | 309 | ||
310 | switch (m->mode) | 310 | switch (m->mode) { |
311 | { | ||
312 | default: | ||
313 | return 0; | ||
314 | case SVC_POOL_PERCPU: | 311 | case SVC_POOL_PERCPU: |
315 | { | 312 | { |
316 | unsigned int cpu = m->pool_to[pidx]; | 313 | set_cpus_allowed_ptr(task, &cpumask_of_cpu(node)); |
317 | 314 | break; | |
318 | *oldmask = current->cpus_allowed; | ||
319 | set_cpus_allowed_ptr(current, &cpumask_of_cpu(cpu)); | ||
320 | return 1; | ||
321 | } | 315 | } |
322 | case SVC_POOL_PERNODE: | 316 | case SVC_POOL_PERNODE: |
323 | { | 317 | { |
324 | unsigned int node = m->pool_to[pidx]; | ||
325 | node_to_cpumask_ptr(nodecpumask, node); | 318 | node_to_cpumask_ptr(nodecpumask, node); |
326 | 319 | set_cpus_allowed_ptr(task, nodecpumask); | |
327 | *oldmask = current->cpus_allowed; | 320 | break; |
328 | set_cpus_allowed_ptr(current, nodecpumask); | ||
329 | return 1; | ||
330 | } | 321 | } |
331 | } | 322 | } |
332 | } | 323 | } |
@@ -443,7 +434,7 @@ EXPORT_SYMBOL(svc_create); | |||
443 | struct svc_serv * | 434 | struct svc_serv * |
444 | svc_create_pooled(struct svc_program *prog, unsigned int bufsize, | 435 | svc_create_pooled(struct svc_program *prog, unsigned int bufsize, |
445 | void (*shutdown)(struct svc_serv *serv), | 436 | void (*shutdown)(struct svc_serv *serv), |
446 | svc_thread_fn func, int sig, struct module *mod) | 437 | svc_thread_fn func, struct module *mod) |
447 | { | 438 | { |
448 | struct svc_serv *serv; | 439 | struct svc_serv *serv; |
449 | unsigned int npools = svc_pool_map_get(); | 440 | unsigned int npools = svc_pool_map_get(); |
@@ -452,7 +443,6 @@ svc_create_pooled(struct svc_program *prog, unsigned int bufsize, | |||
452 | 443 | ||
453 | if (serv != NULL) { | 444 | if (serv != NULL) { |
454 | serv->sv_function = func; | 445 | serv->sv_function = func; |
455 | serv->sv_kill_signal = sig; | ||
456 | serv->sv_module = mod; | 446 | serv->sv_module = mod; |
457 | } | 447 | } |
458 | 448 | ||
@@ -461,7 +451,8 @@ svc_create_pooled(struct svc_program *prog, unsigned int bufsize, | |||
461 | EXPORT_SYMBOL(svc_create_pooled); | 451 | EXPORT_SYMBOL(svc_create_pooled); |
462 | 452 | ||
463 | /* | 453 | /* |
464 | * Destroy an RPC service. Should be called with the BKL held | 454 | * Destroy an RPC service. Should be called with appropriate locking to |
455 | * protect the sv_nrthreads, sv_permsocks and sv_tempsocks. | ||
465 | */ | 456 | */ |
466 | void | 457 | void |
467 | svc_destroy(struct svc_serv *serv) | 458 | svc_destroy(struct svc_serv *serv) |
@@ -578,46 +569,6 @@ out_enomem: | |||
578 | EXPORT_SYMBOL(svc_prepare_thread); | 569 | EXPORT_SYMBOL(svc_prepare_thread); |
579 | 570 | ||
580 | /* | 571 | /* |
581 | * Create a thread in the given pool. Caller must hold BKL. | ||
582 | * On a NUMA or SMP machine, with a multi-pool serv, the thread | ||
583 | * will be restricted to run on the cpus belonging to the pool. | ||
584 | */ | ||
585 | static int | ||
586 | __svc_create_thread(svc_thread_fn func, struct svc_serv *serv, | ||
587 | struct svc_pool *pool) | ||
588 | { | ||
589 | struct svc_rqst *rqstp; | ||
590 | int error = -ENOMEM; | ||
591 | int have_oldmask = 0; | ||
592 | cpumask_t uninitialized_var(oldmask); | ||
593 | |||
594 | rqstp = svc_prepare_thread(serv, pool); | ||
595 | if (IS_ERR(rqstp)) { | ||
596 | error = PTR_ERR(rqstp); | ||
597 | goto out; | ||
598 | } | ||
599 | |||
600 | if (serv->sv_nrpools > 1) | ||
601 | have_oldmask = svc_pool_map_set_cpumask(pool->sp_id, &oldmask); | ||
602 | |||
603 | error = kernel_thread((int (*)(void *)) func, rqstp, 0); | ||
604 | |||
605 | if (have_oldmask) | ||
606 | set_cpus_allowed(current, oldmask); | ||
607 | |||
608 | if (error < 0) | ||
609 | goto out_thread; | ||
610 | svc_sock_update_bufs(serv); | ||
611 | error = 0; | ||
612 | out: | ||
613 | return error; | ||
614 | |||
615 | out_thread: | ||
616 | svc_exit_thread(rqstp); | ||
617 | goto out; | ||
618 | } | ||
619 | |||
620 | /* | ||
621 | * Choose a pool in which to create a new thread, for svc_set_num_threads | 572 | * Choose a pool in which to create a new thread, for svc_set_num_threads |
622 | */ | 573 | */ |
623 | static inline struct svc_pool * | 574 | static inline struct svc_pool * |
@@ -674,7 +625,7 @@ found_pool: | |||
674 | * of threads the given number. If `pool' is non-NULL, applies | 625 | * of threads the given number. If `pool' is non-NULL, applies |
675 | * only to threads in that pool, otherwise round-robins between | 626 | * only to threads in that pool, otherwise round-robins between |
676 | * all pools. Must be called with a svc_get() reference and | 627 | * all pools. Must be called with a svc_get() reference and |
677 | * the BKL held. | 628 | * the BKL or another lock to protect access to svc_serv fields. |
678 | * | 629 | * |
679 | * Destroying threads relies on the service threads filling in | 630 | * Destroying threads relies on the service threads filling in |
680 | * rqstp->rq_task, which only the nfs ones do. Assumes the serv | 631 | * rqstp->rq_task, which only the nfs ones do. Assumes the serv |
@@ -686,7 +637,9 @@ found_pool: | |||
686 | int | 637 | int |
687 | svc_set_num_threads(struct svc_serv *serv, struct svc_pool *pool, int nrservs) | 638 | svc_set_num_threads(struct svc_serv *serv, struct svc_pool *pool, int nrservs) |
688 | { | 639 | { |
689 | struct task_struct *victim; | 640 | struct svc_rqst *rqstp; |
641 | struct task_struct *task; | ||
642 | struct svc_pool *chosen_pool; | ||
690 | int error = 0; | 643 | int error = 0; |
691 | unsigned int state = serv->sv_nrthreads-1; | 644 | unsigned int state = serv->sv_nrthreads-1; |
692 | 645 | ||
@@ -702,18 +655,34 @@ svc_set_num_threads(struct svc_serv *serv, struct svc_pool *pool, int nrservs) | |||
702 | /* create new threads */ | 655 | /* create new threads */ |
703 | while (nrservs > 0) { | 656 | while (nrservs > 0) { |
704 | nrservs--; | 657 | nrservs--; |
658 | chosen_pool = choose_pool(serv, pool, &state); | ||
659 | |||
660 | rqstp = svc_prepare_thread(serv, chosen_pool); | ||
661 | if (IS_ERR(rqstp)) { | ||
662 | error = PTR_ERR(rqstp); | ||
663 | break; | ||
664 | } | ||
665 | |||
705 | __module_get(serv->sv_module); | 666 | __module_get(serv->sv_module); |
706 | error = __svc_create_thread(serv->sv_function, serv, | 667 | task = kthread_create(serv->sv_function, rqstp, serv->sv_name); |
707 | choose_pool(serv, pool, &state)); | 668 | if (IS_ERR(task)) { |
708 | if (error < 0) { | 669 | error = PTR_ERR(task); |
709 | module_put(serv->sv_module); | 670 | module_put(serv->sv_module); |
671 | svc_exit_thread(rqstp); | ||
710 | break; | 672 | break; |
711 | } | 673 | } |
674 | |||
675 | rqstp->rq_task = task; | ||
676 | if (serv->sv_nrpools > 1) | ||
677 | svc_pool_map_set_cpumask(task, chosen_pool->sp_id); | ||
678 | |||
679 | svc_sock_update_bufs(serv); | ||
680 | wake_up_process(task); | ||
712 | } | 681 | } |
713 | /* destroy old threads */ | 682 | /* destroy old threads */ |
714 | while (nrservs < 0 && | 683 | while (nrservs < 0 && |
715 | (victim = choose_victim(serv, pool, &state)) != NULL) { | 684 | (task = choose_victim(serv, pool, &state)) != NULL) { |
716 | send_sig(serv->sv_kill_signal, victim, 1); | 685 | send_sig(SIGINT, task, 1); |
717 | nrservs++; | 686 | nrservs++; |
718 | } | 687 | } |
719 | 688 | ||
@@ -722,7 +691,8 @@ svc_set_num_threads(struct svc_serv *serv, struct svc_pool *pool, int nrservs) | |||
722 | EXPORT_SYMBOL(svc_set_num_threads); | 691 | EXPORT_SYMBOL(svc_set_num_threads); |
723 | 692 | ||
724 | /* | 693 | /* |
725 | * Called from a server thread as it's exiting. Caller must hold BKL. | 694 | * Called from a server thread as it's exiting. Caller must hold the BKL or |
695 | * the "service mutex", whichever is appropriate for the service. | ||
726 | */ | 696 | */ |
727 | void | 697 | void |
728 | svc_exit_thread(struct svc_rqst *rqstp) | 698 | svc_exit_thread(struct svc_rqst *rqstp) |