diff options
author | Jeff Layton <jlayton@redhat.com> | 2008-06-10 08:40:38 -0400 |
---|---|---|
committer | J. Bruce Fields <bfields@citi.umich.edu> | 2008-06-23 13:02:49 -0400 |
commit | 9867d76ca16b3f455f9ca83861f4ce5c94a25928 (patch) | |
tree | 1715f69545bf6e766e6e18b33c39270ea7bd15ce /net/sunrpc | |
parent | e096bbc6488d3e49d476bf986d33752709361277 (diff) |
knfsd: convert knfsd to kthread API
This patch is rather large, but I couldn't figure out a way to break it
up that would remain bisectable. It does several things:
- change svc_thread_fn typedef to better match what kthread_create expects
- change svc_pool_map_set_cpumask to be more kthread friendly. Make it
take a task arg and and get rid of the "oldmask"
- have svc_set_num_threads call kthread_create directly
- eliminate __svc_create_thread
Signed-off-by: Jeff Layton <jlayton@redhat.com>
Signed-off-by: J. Bruce Fields <bfields@citi.umich.edu>
Diffstat (limited to 'net/sunrpc')
-rw-r--r-- | net/sunrpc/svc.c | 100 |
1 files changed, 34 insertions, 66 deletions
diff --git a/net/sunrpc/svc.c b/net/sunrpc/svc.c index 7bffaff2a3ab..03a9f1a9e75c 100644 --- a/net/sunrpc/svc.c +++ b/net/sunrpc/svc.c | |||
@@ -18,6 +18,7 @@ | |||
18 | #include <linux/mm.h> | 18 | #include <linux/mm.h> |
19 | #include <linux/interrupt.h> | 19 | #include <linux/interrupt.h> |
20 | #include <linux/module.h> | 20 | #include <linux/module.h> |
21 | #include <linux/kthread.h> | ||
21 | 22 | ||
22 | #include <linux/sunrpc/types.h> | 23 | #include <linux/sunrpc/types.h> |
23 | #include <linux/sunrpc/xdr.h> | 24 | #include <linux/sunrpc/xdr.h> |
@@ -291,15 +292,14 @@ svc_pool_map_put(void) | |||
291 | 292 | ||
292 | 293 | ||
293 | /* | 294 | /* |
294 | * Set the current thread's cpus_allowed mask so that it | 295 | * Set the given thread's cpus_allowed mask so that it |
295 | * will only run on cpus in the given pool. | 296 | * will only run on cpus in the given pool. |
296 | * | ||
297 | * Returns 1 and fills in oldmask iff a cpumask was applied. | ||
298 | */ | 297 | */ |
299 | static inline int | 298 | static inline void |
300 | svc_pool_map_set_cpumask(unsigned int pidx, cpumask_t *oldmask) | 299 | svc_pool_map_set_cpumask(struct task_struct *task, unsigned int pidx) |
301 | { | 300 | { |
302 | struct svc_pool_map *m = &svc_pool_map; | 301 | struct svc_pool_map *m = &svc_pool_map; |
302 | unsigned int node = m->pool_to[pidx]; | ||
303 | 303 | ||
304 | /* | 304 | /* |
305 | * The caller checks for sv_nrpools > 1, which | 305 | * The caller checks for sv_nrpools > 1, which |
@@ -307,26 +307,17 @@ svc_pool_map_set_cpumask(unsigned int pidx, cpumask_t *oldmask) | |||
307 | */ | 307 | */ |
308 | BUG_ON(m->count == 0); | 308 | BUG_ON(m->count == 0); |
309 | 309 | ||
310 | switch (m->mode) | 310 | switch (m->mode) { |
311 | { | ||
312 | default: | ||
313 | return 0; | ||
314 | case SVC_POOL_PERCPU: | 311 | case SVC_POOL_PERCPU: |
315 | { | 312 | { |
316 | unsigned int cpu = m->pool_to[pidx]; | 313 | set_cpus_allowed_ptr(task, &cpumask_of_cpu(node)); |
317 | 314 | break; | |
318 | *oldmask = current->cpus_allowed; | ||
319 | set_cpus_allowed_ptr(current, &cpumask_of_cpu(cpu)); | ||
320 | return 1; | ||
321 | } | 315 | } |
322 | case SVC_POOL_PERNODE: | 316 | case SVC_POOL_PERNODE: |
323 | { | 317 | { |
324 | unsigned int node = m->pool_to[pidx]; | ||
325 | node_to_cpumask_ptr(nodecpumask, node); | 318 | node_to_cpumask_ptr(nodecpumask, node); |
326 | 319 | set_cpus_allowed_ptr(task, nodecpumask); | |
327 | *oldmask = current->cpus_allowed; | 320 | break; |
328 | set_cpus_allowed_ptr(current, nodecpumask); | ||
329 | return 1; | ||
330 | } | 321 | } |
331 | } | 322 | } |
332 | } | 323 | } |
@@ -579,47 +570,6 @@ out_enomem: | |||
579 | EXPORT_SYMBOL(svc_prepare_thread); | 570 | EXPORT_SYMBOL(svc_prepare_thread); |
580 | 571 | ||
581 | /* | 572 | /* |
582 | * Create a thread in the given pool. Caller must hold BKL or another lock to | ||
583 | * serialize access to the svc_serv struct. On a NUMA or SMP machine, with a | ||
584 | * multi-pool serv, the thread will be restricted to run on the cpus belonging | ||
585 | * to the pool. | ||
586 | */ | ||
587 | static int | ||
588 | __svc_create_thread(svc_thread_fn func, struct svc_serv *serv, | ||
589 | struct svc_pool *pool) | ||
590 | { | ||
591 | struct svc_rqst *rqstp; | ||
592 | int error = -ENOMEM; | ||
593 | int have_oldmask = 0; | ||
594 | cpumask_t uninitialized_var(oldmask); | ||
595 | |||
596 | rqstp = svc_prepare_thread(serv, pool); | ||
597 | if (IS_ERR(rqstp)) { | ||
598 | error = PTR_ERR(rqstp); | ||
599 | goto out; | ||
600 | } | ||
601 | |||
602 | if (serv->sv_nrpools > 1) | ||
603 | have_oldmask = svc_pool_map_set_cpumask(pool->sp_id, &oldmask); | ||
604 | |||
605 | error = kernel_thread((int (*)(void *)) func, rqstp, 0); | ||
606 | |||
607 | if (have_oldmask) | ||
608 | set_cpus_allowed(current, oldmask); | ||
609 | |||
610 | if (error < 0) | ||
611 | goto out_thread; | ||
612 | svc_sock_update_bufs(serv); | ||
613 | error = 0; | ||
614 | out: | ||
615 | return error; | ||
616 | |||
617 | out_thread: | ||
618 | svc_exit_thread(rqstp); | ||
619 | goto out; | ||
620 | } | ||
621 | |||
622 | /* | ||
623 | * Choose a pool in which to create a new thread, for svc_set_num_threads | 573 | * Choose a pool in which to create a new thread, for svc_set_num_threads |
624 | */ | 574 | */ |
625 | static inline struct svc_pool * | 575 | static inline struct svc_pool * |
@@ -688,7 +638,9 @@ found_pool: | |||
688 | int | 638 | int |
689 | svc_set_num_threads(struct svc_serv *serv, struct svc_pool *pool, int nrservs) | 639 | svc_set_num_threads(struct svc_serv *serv, struct svc_pool *pool, int nrservs) |
690 | { | 640 | { |
691 | struct task_struct *victim; | 641 | struct svc_rqst *rqstp; |
642 | struct task_struct *task; | ||
643 | struct svc_pool *chosen_pool; | ||
692 | int error = 0; | 644 | int error = 0; |
693 | unsigned int state = serv->sv_nrthreads-1; | 645 | unsigned int state = serv->sv_nrthreads-1; |
694 | 646 | ||
@@ -704,18 +656,34 @@ svc_set_num_threads(struct svc_serv *serv, struct svc_pool *pool, int nrservs) | |||
704 | /* create new threads */ | 656 | /* create new threads */ |
705 | while (nrservs > 0) { | 657 | while (nrservs > 0) { |
706 | nrservs--; | 658 | nrservs--; |
659 | chosen_pool = choose_pool(serv, pool, &state); | ||
660 | |||
661 | rqstp = svc_prepare_thread(serv, chosen_pool); | ||
662 | if (IS_ERR(rqstp)) { | ||
663 | error = PTR_ERR(rqstp); | ||
664 | break; | ||
665 | } | ||
666 | |||
707 | __module_get(serv->sv_module); | 667 | __module_get(serv->sv_module); |
708 | error = __svc_create_thread(serv->sv_function, serv, | 668 | task = kthread_create(serv->sv_function, rqstp, serv->sv_name); |
709 | choose_pool(serv, pool, &state)); | 669 | if (IS_ERR(task)) { |
710 | if (error < 0) { | 670 | error = PTR_ERR(task); |
711 | module_put(serv->sv_module); | 671 | module_put(serv->sv_module); |
672 | svc_exit_thread(rqstp); | ||
712 | break; | 673 | break; |
713 | } | 674 | } |
675 | |||
676 | rqstp->rq_task = task; | ||
677 | if (serv->sv_nrpools > 1) | ||
678 | svc_pool_map_set_cpumask(task, chosen_pool->sp_id); | ||
679 | |||
680 | svc_sock_update_bufs(serv); | ||
681 | wake_up_process(task); | ||
714 | } | 682 | } |
715 | /* destroy old threads */ | 683 | /* destroy old threads */ |
716 | while (nrservs < 0 && | 684 | while (nrservs < 0 && |
717 | (victim = choose_victim(serv, pool, &state)) != NULL) { | 685 | (task = choose_victim(serv, pool, &state)) != NULL) { |
718 | send_sig(serv->sv_kill_signal, victim, 1); | 686 | send_sig(serv->sv_kill_signal, task, 1); |
719 | nrservs++; | 687 | nrservs++; |
720 | } | 688 | } |
721 | 689 | ||