diff options
author | Ingo Molnar <mingo@elte.hu> | 2008-07-21 11:19:50 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2008-07-21 11:19:50 -0400 |
commit | eb6a12c2428d21a9f3e0f1a50e927d5fd80fc3d0 (patch) | |
tree | 5ac6f43899648abeab1d43aad3107f664e7f13d5 /net/sunrpc/svc.c | |
parent | c4762aba0b1f72659aae9ce37b772ca8bd8f06f4 (diff) | |
parent | 14b395e35d1afdd8019d11b92e28041fad591b71 (diff) |
Merge branch 'linus' into cpus4096-for-linus
Conflicts:
net/sunrpc/svc.c
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'net/sunrpc/svc.c')
-rw-r--r-- | net/sunrpc/svc.c | 112 |
1 files changed, 41 insertions, 71 deletions
diff --git a/net/sunrpc/svc.c b/net/sunrpc/svc.c index 083d12688134..835d27413083 100644 --- a/net/sunrpc/svc.c +++ b/net/sunrpc/svc.c | |||
@@ -18,6 +18,7 @@ | |||
18 | #include <linux/mm.h> | 18 | #include <linux/mm.h> |
19 | #include <linux/interrupt.h> | 19 | #include <linux/interrupt.h> |
20 | #include <linux/module.h> | 20 | #include <linux/module.h> |
21 | #include <linux/kthread.h> | ||
21 | 22 | ||
22 | #include <linux/sunrpc/types.h> | 23 | #include <linux/sunrpc/types.h> |
23 | #include <linux/sunrpc/xdr.h> | 24 | #include <linux/sunrpc/xdr.h> |
@@ -291,15 +292,14 @@ svc_pool_map_put(void) | |||
291 | 292 | ||
292 | 293 | ||
293 | /* | 294 | /* |
294 | * Set the current thread's cpus_allowed mask so that it | 295 | * Set the given thread's cpus_allowed mask so that it |
295 | * will only run on cpus in the given pool. | 296 | * will only run on cpus in the given pool. |
296 | * | ||
297 | * Returns 1 and fills in oldmask iff a cpumask was applied. | ||
298 | */ | 297 | */ |
299 | static inline int | 298 | static inline void |
300 | svc_pool_map_set_cpumask(unsigned int pidx, cpumask_t *oldmask) | 299 | svc_pool_map_set_cpumask(struct task_struct *task, unsigned int pidx) |
301 | { | 300 | { |
302 | struct svc_pool_map *m = &svc_pool_map; | 301 | struct svc_pool_map *m = &svc_pool_map; |
302 | unsigned int node = m->pool_to[pidx]; | ||
303 | 303 | ||
304 | /* | 304 | /* |
305 | * The caller checks for sv_nrpools > 1, which | 305 | * The caller checks for sv_nrpools > 1, which |
@@ -307,27 +307,18 @@ svc_pool_map_set_cpumask(unsigned int pidx, cpumask_t *oldmask) | |||
307 | */ | 307 | */ |
308 | BUG_ON(m->count == 0); | 308 | BUG_ON(m->count == 0); |
309 | 309 | ||
310 | switch (m->mode) | 310 | switch (m->mode) { |
311 | { | ||
312 | default: | ||
313 | return 0; | ||
314 | case SVC_POOL_PERCPU: | 311 | case SVC_POOL_PERCPU: |
315 | { | 312 | { |
316 | unsigned int cpu = m->pool_to[pidx]; | 313 | cpumask_of_cpu_ptr(cpumask, node); |
317 | cpumask_of_cpu_ptr(cpumask, cpu); | 314 | set_cpus_allowed_ptr(task, cpumask); |
318 | 315 | break; | |
319 | *oldmask = current->cpus_allowed; | ||
320 | set_cpus_allowed_ptr(current, cpumask); | ||
321 | return 1; | ||
322 | } | 316 | } |
323 | case SVC_POOL_PERNODE: | 317 | case SVC_POOL_PERNODE: |
324 | { | 318 | { |
325 | unsigned int node = m->pool_to[pidx]; | ||
326 | node_to_cpumask_ptr(nodecpumask, node); | 319 | node_to_cpumask_ptr(nodecpumask, node); |
327 | 320 | set_cpus_allowed_ptr(task, nodecpumask); | |
328 | *oldmask = current->cpus_allowed; | 321 | break; |
329 | set_cpus_allowed_ptr(current, nodecpumask); | ||
330 | return 1; | ||
331 | } | 322 | } |
332 | } | 323 | } |
333 | } | 324 | } |
@@ -444,7 +435,7 @@ EXPORT_SYMBOL(svc_create); | |||
444 | struct svc_serv * | 435 | struct svc_serv * |
445 | svc_create_pooled(struct svc_program *prog, unsigned int bufsize, | 436 | svc_create_pooled(struct svc_program *prog, unsigned int bufsize, |
446 | void (*shutdown)(struct svc_serv *serv), | 437 | void (*shutdown)(struct svc_serv *serv), |
447 | svc_thread_fn func, int sig, struct module *mod) | 438 | svc_thread_fn func, struct module *mod) |
448 | { | 439 | { |
449 | struct svc_serv *serv; | 440 | struct svc_serv *serv; |
450 | unsigned int npools = svc_pool_map_get(); | 441 | unsigned int npools = svc_pool_map_get(); |
@@ -453,7 +444,6 @@ svc_create_pooled(struct svc_program *prog, unsigned int bufsize, | |||
453 | 444 | ||
454 | if (serv != NULL) { | 445 | if (serv != NULL) { |
455 | serv->sv_function = func; | 446 | serv->sv_function = func; |
456 | serv->sv_kill_signal = sig; | ||
457 | serv->sv_module = mod; | 447 | serv->sv_module = mod; |
458 | } | 448 | } |
459 | 449 | ||
@@ -462,7 +452,8 @@ svc_create_pooled(struct svc_program *prog, unsigned int bufsize, | |||
462 | EXPORT_SYMBOL(svc_create_pooled); | 452 | EXPORT_SYMBOL(svc_create_pooled); |
463 | 453 | ||
464 | /* | 454 | /* |
465 | * Destroy an RPC service. Should be called with the BKL held | 455 | * Destroy an RPC service. Should be called with appropriate locking to |
456 | * protect the sv_nrthreads, sv_permsocks and sv_tempsocks. | ||
466 | */ | 457 | */ |
467 | void | 458 | void |
468 | svc_destroy(struct svc_serv *serv) | 459 | svc_destroy(struct svc_serv *serv) |
@@ -579,46 +570,6 @@ out_enomem: | |||
579 | EXPORT_SYMBOL(svc_prepare_thread); | 570 | EXPORT_SYMBOL(svc_prepare_thread); |
580 | 571 | ||
581 | /* | 572 | /* |
582 | * Create a thread in the given pool. Caller must hold BKL. | ||
583 | * On a NUMA or SMP machine, with a multi-pool serv, the thread | ||
584 | * will be restricted to run on the cpus belonging to the pool. | ||
585 | */ | ||
586 | static int | ||
587 | __svc_create_thread(svc_thread_fn func, struct svc_serv *serv, | ||
588 | struct svc_pool *pool) | ||
589 | { | ||
590 | struct svc_rqst *rqstp; | ||
591 | int error = -ENOMEM; | ||
592 | int have_oldmask = 0; | ||
593 | cpumask_t uninitialized_var(oldmask); | ||
594 | |||
595 | rqstp = svc_prepare_thread(serv, pool); | ||
596 | if (IS_ERR(rqstp)) { | ||
597 | error = PTR_ERR(rqstp); | ||
598 | goto out; | ||
599 | } | ||
600 | |||
601 | if (serv->sv_nrpools > 1) | ||
602 | have_oldmask = svc_pool_map_set_cpumask(pool->sp_id, &oldmask); | ||
603 | |||
604 | error = kernel_thread((int (*)(void *)) func, rqstp, 0); | ||
605 | |||
606 | if (have_oldmask) | ||
607 | set_cpus_allowed_ptr(current, &oldmask); | ||
608 | |||
609 | if (error < 0) | ||
610 | goto out_thread; | ||
611 | svc_sock_update_bufs(serv); | ||
612 | error = 0; | ||
613 | out: | ||
614 | return error; | ||
615 | |||
616 | out_thread: | ||
617 | svc_exit_thread(rqstp); | ||
618 | goto out; | ||
619 | } | ||
620 | |||
621 | /* | ||
622 | * Choose a pool in which to create a new thread, for svc_set_num_threads | 573 | * Choose a pool in which to create a new thread, for svc_set_num_threads |
623 | */ | 574 | */ |
624 | static inline struct svc_pool * | 575 | static inline struct svc_pool * |
@@ -675,7 +626,7 @@ found_pool: | |||
675 | * of threads the given number. If `pool' is non-NULL, applies | 626 | * of threads the given number. If `pool' is non-NULL, applies |
676 | * only to threads in that pool, otherwise round-robins between | 627 | * only to threads in that pool, otherwise round-robins between |
677 | * all pools. Must be called with a svc_get() reference and | 628 | * all pools. Must be called with a svc_get() reference and |
678 | * the BKL held. | 629 | * the BKL or another lock to protect access to svc_serv fields. |
679 | * | 630 | * |
680 | * Destroying threads relies on the service threads filling in | 631 | * Destroying threads relies on the service threads filling in |
681 | * rqstp->rq_task, which only the nfs ones do. Assumes the serv | 632 | * rqstp->rq_task, which only the nfs ones do. Assumes the serv |
@@ -687,7 +638,9 @@ found_pool: | |||
687 | int | 638 | int |
688 | svc_set_num_threads(struct svc_serv *serv, struct svc_pool *pool, int nrservs) | 639 | svc_set_num_threads(struct svc_serv *serv, struct svc_pool *pool, int nrservs) |
689 | { | 640 | { |
690 | struct task_struct *victim; | 641 | struct svc_rqst *rqstp; |
642 | struct task_struct *task; | ||
643 | struct svc_pool *chosen_pool; | ||
691 | int error = 0; | 644 | int error = 0; |
692 | unsigned int state = serv->sv_nrthreads-1; | 645 | unsigned int state = serv->sv_nrthreads-1; |
693 | 646 | ||
@@ -703,18 +656,34 @@ svc_set_num_threads(struct svc_serv *serv, struct svc_pool *pool, int nrservs) | |||
703 | /* create new threads */ | 656 | /* create new threads */ |
704 | while (nrservs > 0) { | 657 | while (nrservs > 0) { |
705 | nrservs--; | 658 | nrservs--; |
659 | chosen_pool = choose_pool(serv, pool, &state); | ||
660 | |||
661 | rqstp = svc_prepare_thread(serv, chosen_pool); | ||
662 | if (IS_ERR(rqstp)) { | ||
663 | error = PTR_ERR(rqstp); | ||
664 | break; | ||
665 | } | ||
666 | |||
706 | __module_get(serv->sv_module); | 667 | __module_get(serv->sv_module); |
707 | error = __svc_create_thread(serv->sv_function, serv, | 668 | task = kthread_create(serv->sv_function, rqstp, serv->sv_name); |
708 | choose_pool(serv, pool, &state)); | 669 | if (IS_ERR(task)) { |
709 | if (error < 0) { | 670 | error = PTR_ERR(task); |
710 | module_put(serv->sv_module); | 671 | module_put(serv->sv_module); |
672 | svc_exit_thread(rqstp); | ||
711 | break; | 673 | break; |
712 | } | 674 | } |
675 | |||
676 | rqstp->rq_task = task; | ||
677 | if (serv->sv_nrpools > 1) | ||
678 | svc_pool_map_set_cpumask(task, chosen_pool->sp_id); | ||
679 | |||
680 | svc_sock_update_bufs(serv); | ||
681 | wake_up_process(task); | ||
713 | } | 682 | } |
714 | /* destroy old threads */ | 683 | /* destroy old threads */ |
715 | while (nrservs < 0 && | 684 | while (nrservs < 0 && |
716 | (victim = choose_victim(serv, pool, &state)) != NULL) { | 685 | (task = choose_victim(serv, pool, &state)) != NULL) { |
717 | send_sig(serv->sv_kill_signal, victim, 1); | 686 | send_sig(SIGINT, task, 1); |
718 | nrservs++; | 687 | nrservs++; |
719 | } | 688 | } |
720 | 689 | ||
@@ -723,7 +692,8 @@ svc_set_num_threads(struct svc_serv *serv, struct svc_pool *pool, int nrservs) | |||
723 | EXPORT_SYMBOL(svc_set_num_threads); | 692 | EXPORT_SYMBOL(svc_set_num_threads); |
724 | 693 | ||
725 | /* | 694 | /* |
726 | * Called from a server thread as it's exiting. Caller must hold BKL. | 695 | * Called from a server thread as it's exiting. Caller must hold the BKL or |
696 | * the "service mutex", whichever is appropriate for the service. | ||
727 | */ | 697 | */ |
728 | void | 698 | void |
729 | svc_exit_thread(struct svc_rqst *rqstp) | 699 | svc_exit_thread(struct svc_rqst *rqstp) |