diff options
author | Jeff Layton <jlayton@redhat.com> | 2008-06-10 08:40:38 -0400 |
---|---|---|
committer | J. Bruce Fields <bfields@citi.umich.edu> | 2008-06-23 13:02:49 -0400 |
commit | 9867d76ca16b3f455f9ca83861f4ce5c94a25928 (patch) | |
tree | 1715f69545bf6e766e6e18b33c39270ea7bd15ce | |
parent | e096bbc6488d3e49d476bf986d33752709361277 (diff) |
knfsd: convert knfsd to kthread API
This patch is rather large, but I couldn't figure out a way to break it
up that would remain bisectable. It does several things:
- change svc_thread_fn typedef to better match what kthread_create expects
- change svc_pool_map_set_cpumask to be more kthread friendly. Make it
take a task arg and and get rid of the "oldmask"
- have svc_set_num_threads call kthread_create directly
- eliminate __svc_create_thread
Signed-off-by: Jeff Layton <jlayton@redhat.com>
Signed-off-by: J. Bruce Fields <bfields@citi.umich.edu>
-rw-r--r-- | fs/nfsd/nfssvc.c | 45 | ||||
-rw-r--r-- | include/linux/sunrpc/svc.h | 2 | ||||
-rw-r--r-- | net/sunrpc/svc.c | 100 |
3 files changed, 64 insertions, 83 deletions
diff --git a/fs/nfsd/nfssvc.c b/fs/nfsd/nfssvc.c index 6339cb70a08d..9e2156813710 100644 --- a/fs/nfsd/nfssvc.c +++ b/fs/nfsd/nfssvc.c | |||
@@ -21,6 +21,7 @@ | |||
21 | #include <linux/smp_lock.h> | 21 | #include <linux/smp_lock.h> |
22 | #include <linux/freezer.h> | 22 | #include <linux/freezer.h> |
23 | #include <linux/fs_struct.h> | 23 | #include <linux/fs_struct.h> |
24 | #include <linux/kthread.h> | ||
24 | 25 | ||
25 | #include <linux/sunrpc/types.h> | 26 | #include <linux/sunrpc/types.h> |
26 | #include <linux/sunrpc/stats.h> | 27 | #include <linux/sunrpc/stats.h> |
@@ -46,7 +47,7 @@ | |||
46 | #define SHUTDOWN_SIGS (sigmask(SIGKILL) | sigmask(SIGHUP) | sigmask(SIGINT) | sigmask(SIGQUIT)) | 47 | #define SHUTDOWN_SIGS (sigmask(SIGKILL) | sigmask(SIGHUP) | sigmask(SIGINT) | sigmask(SIGQUIT)) |
47 | 48 | ||
48 | extern struct svc_program nfsd_program; | 49 | extern struct svc_program nfsd_program; |
49 | static void nfsd(struct svc_rqst *rqstp); | 50 | static int nfsd(void *vrqstp); |
50 | struct timeval nfssvc_boot; | 51 | struct timeval nfssvc_boot; |
51 | static atomic_t nfsd_busy; | 52 | static atomic_t nfsd_busy; |
52 | static unsigned long nfsd_last_call; | 53 | static unsigned long nfsd_last_call; |
@@ -407,18 +408,19 @@ update_thread_usage(int busy_threads) | |||
407 | /* | 408 | /* |
408 | * This is the NFS server kernel thread | 409 | * This is the NFS server kernel thread |
409 | */ | 410 | */ |
410 | static void | 411 | static int |
411 | nfsd(struct svc_rqst *rqstp) | 412 | nfsd(void *vrqstp) |
412 | { | 413 | { |
414 | struct svc_rqst *rqstp = (struct svc_rqst *) vrqstp; | ||
413 | struct fs_struct *fsp; | 415 | struct fs_struct *fsp; |
414 | int err; | ||
415 | sigset_t shutdown_mask, allowed_mask; | 416 | sigset_t shutdown_mask, allowed_mask; |
417 | int err, preverr = 0; | ||
418 | unsigned int signo; | ||
416 | 419 | ||
417 | /* Lock module and set up kernel thread */ | 420 | /* Lock module and set up kernel thread */ |
418 | mutex_lock(&nfsd_mutex); | 421 | mutex_lock(&nfsd_mutex); |
419 | daemonize("nfsd"); | ||
420 | 422 | ||
421 | /* After daemonize() this kernel thread shares current->fs | 423 | /* At this point, the thread shares current->fs |
422 | * with the init process. We need to create files with a | 424 | * with the init process. We need to create files with a |
423 | * umask of 0 instead of init's umask. */ | 425 | * umask of 0 instead of init's umask. */ |
424 | fsp = copy_fs_struct(current->fs); | 426 | fsp = copy_fs_struct(current->fs); |
@@ -433,14 +435,18 @@ nfsd(struct svc_rqst *rqstp) | |||
433 | siginitsetinv(&shutdown_mask, SHUTDOWN_SIGS); | 435 | siginitsetinv(&shutdown_mask, SHUTDOWN_SIGS); |
434 | siginitsetinv(&allowed_mask, ALLOWED_SIGS); | 436 | siginitsetinv(&allowed_mask, ALLOWED_SIGS); |
435 | 437 | ||
438 | /* | ||
439 | * thread is spawned with all signals set to SIG_IGN, re-enable | ||
440 | * the ones that matter | ||
441 | */ | ||
442 | for (signo = 1; signo <= _NSIG; signo++) { | ||
443 | if (!sigismember(&shutdown_mask, signo)) | ||
444 | allow_signal(signo); | ||
445 | } | ||
436 | 446 | ||
437 | nfsdstats.th_cnt++; | 447 | nfsdstats.th_cnt++; |
438 | |||
439 | rqstp->rq_task = current; | ||
440 | |||
441 | mutex_unlock(&nfsd_mutex); | 448 | mutex_unlock(&nfsd_mutex); |
442 | 449 | ||
443 | |||
444 | /* | 450 | /* |
445 | * We want less throttling in balance_dirty_pages() so that nfs to | 451 | * We want less throttling in balance_dirty_pages() so that nfs to |
446 | * localhost doesn't cause nfsd to lock up due to all the client's | 452 | * localhost doesn't cause nfsd to lock up due to all the client's |
@@ -462,15 +468,25 @@ nfsd(struct svc_rqst *rqstp) | |||
462 | */ | 468 | */ |
463 | while ((err = svc_recv(rqstp, 60*60*HZ)) == -EAGAIN) | 469 | while ((err = svc_recv(rqstp, 60*60*HZ)) == -EAGAIN) |
464 | ; | 470 | ; |
465 | if (err < 0) | 471 | if (err == -EINTR) |
466 | break; | 472 | break; |
473 | else if (err < 0) { | ||
474 | if (err != preverr) { | ||
475 | printk(KERN_WARNING "%s: unexpected error " | ||
476 | "from svc_recv (%d)\n", __func__, -err); | ||
477 | preverr = err; | ||
478 | } | ||
479 | schedule_timeout_uninterruptible(HZ); | ||
480 | continue; | ||
481 | } | ||
482 | |||
467 | update_thread_usage(atomic_read(&nfsd_busy)); | 483 | update_thread_usage(atomic_read(&nfsd_busy)); |
468 | atomic_inc(&nfsd_busy); | 484 | atomic_inc(&nfsd_busy); |
469 | 485 | ||
470 | /* Lock the export hash tables for reading. */ | 486 | /* Lock the export hash tables for reading. */ |
471 | exp_readlock(); | 487 | exp_readlock(); |
472 | 488 | ||
473 | /* Process request with signals blocked. */ | 489 | /* Process request with signals blocked. */ |
474 | sigprocmask(SIG_SETMASK, &allowed_mask, NULL); | 490 | sigprocmask(SIG_SETMASK, &allowed_mask, NULL); |
475 | 491 | ||
476 | svc_process(rqstp); | 492 | svc_process(rqstp); |
@@ -481,14 +497,10 @@ nfsd(struct svc_rqst *rqstp) | |||
481 | atomic_dec(&nfsd_busy); | 497 | atomic_dec(&nfsd_busy); |
482 | } | 498 | } |
483 | 499 | ||
484 | if (err != -EINTR) | ||
485 | printk(KERN_WARNING "nfsd: terminating on error %d\n", -err); | ||
486 | |||
487 | /* Clear signals before calling svc_exit_thread() */ | 500 | /* Clear signals before calling svc_exit_thread() */ |
488 | flush_signals(current); | 501 | flush_signals(current); |
489 | 502 | ||
490 | mutex_lock(&nfsd_mutex); | 503 | mutex_lock(&nfsd_mutex); |
491 | |||
492 | nfsdstats.th_cnt --; | 504 | nfsdstats.th_cnt --; |
493 | 505 | ||
494 | out: | 506 | out: |
@@ -498,6 +510,7 @@ out: | |||
498 | /* Release module */ | 510 | /* Release module */ |
499 | mutex_unlock(&nfsd_mutex); | 511 | mutex_unlock(&nfsd_mutex); |
500 | module_put_and_exit(0); | 512 | module_put_and_exit(0); |
513 | return 0; | ||
501 | } | 514 | } |
502 | 515 | ||
503 | static __be32 map_new_errors(u32 vers, __be32 nfserr) | 516 | static __be32 map_new_errors(u32 vers, __be32 nfserr) |
diff --git a/include/linux/sunrpc/svc.h b/include/linux/sunrpc/svc.h index 4b54c5fdcfd9..011d6d8100d8 100644 --- a/include/linux/sunrpc/svc.h +++ b/include/linux/sunrpc/svc.h | |||
@@ -22,7 +22,7 @@ | |||
22 | /* | 22 | /* |
23 | * This is the RPC server thread function prototype | 23 | * This is the RPC server thread function prototype |
24 | */ | 24 | */ |
25 | typedef void (*svc_thread_fn)(struct svc_rqst *); | 25 | typedef int (*svc_thread_fn)(void *); |
26 | 26 | ||
27 | /* | 27 | /* |
28 | * | 28 | * |
diff --git a/net/sunrpc/svc.c b/net/sunrpc/svc.c index 7bffaff2a3ab..03a9f1a9e75c 100644 --- a/net/sunrpc/svc.c +++ b/net/sunrpc/svc.c | |||
@@ -18,6 +18,7 @@ | |||
18 | #include <linux/mm.h> | 18 | #include <linux/mm.h> |
19 | #include <linux/interrupt.h> | 19 | #include <linux/interrupt.h> |
20 | #include <linux/module.h> | 20 | #include <linux/module.h> |
21 | #include <linux/kthread.h> | ||
21 | 22 | ||
22 | #include <linux/sunrpc/types.h> | 23 | #include <linux/sunrpc/types.h> |
23 | #include <linux/sunrpc/xdr.h> | 24 | #include <linux/sunrpc/xdr.h> |
@@ -291,15 +292,14 @@ svc_pool_map_put(void) | |||
291 | 292 | ||
292 | 293 | ||
293 | /* | 294 | /* |
294 | * Set the current thread's cpus_allowed mask so that it | 295 | * Set the given thread's cpus_allowed mask so that it |
295 | * will only run on cpus in the given pool. | 296 | * will only run on cpus in the given pool. |
296 | * | ||
297 | * Returns 1 and fills in oldmask iff a cpumask was applied. | ||
298 | */ | 297 | */ |
299 | static inline int | 298 | static inline void |
300 | svc_pool_map_set_cpumask(unsigned int pidx, cpumask_t *oldmask) | 299 | svc_pool_map_set_cpumask(struct task_struct *task, unsigned int pidx) |
301 | { | 300 | { |
302 | struct svc_pool_map *m = &svc_pool_map; | 301 | struct svc_pool_map *m = &svc_pool_map; |
302 | unsigned int node = m->pool_to[pidx]; | ||
303 | 303 | ||
304 | /* | 304 | /* |
305 | * The caller checks for sv_nrpools > 1, which | 305 | * The caller checks for sv_nrpools > 1, which |
@@ -307,26 +307,17 @@ svc_pool_map_set_cpumask(unsigned int pidx, cpumask_t *oldmask) | |||
307 | */ | 307 | */ |
308 | BUG_ON(m->count == 0); | 308 | BUG_ON(m->count == 0); |
309 | 309 | ||
310 | switch (m->mode) | 310 | switch (m->mode) { |
311 | { | ||
312 | default: | ||
313 | return 0; | ||
314 | case SVC_POOL_PERCPU: | 311 | case SVC_POOL_PERCPU: |
315 | { | 312 | { |
316 | unsigned int cpu = m->pool_to[pidx]; | 313 | set_cpus_allowed_ptr(task, &cpumask_of_cpu(node)); |
317 | 314 | break; | |
318 | *oldmask = current->cpus_allowed; | ||
319 | set_cpus_allowed_ptr(current, &cpumask_of_cpu(cpu)); | ||
320 | return 1; | ||
321 | } | 315 | } |
322 | case SVC_POOL_PERNODE: | 316 | case SVC_POOL_PERNODE: |
323 | { | 317 | { |
324 | unsigned int node = m->pool_to[pidx]; | ||
325 | node_to_cpumask_ptr(nodecpumask, node); | 318 | node_to_cpumask_ptr(nodecpumask, node); |
326 | 319 | set_cpus_allowed_ptr(task, nodecpumask); | |
327 | *oldmask = current->cpus_allowed; | 320 | break; |
328 | set_cpus_allowed_ptr(current, nodecpumask); | ||
329 | return 1; | ||
330 | } | 321 | } |
331 | } | 322 | } |
332 | } | 323 | } |
@@ -579,47 +570,6 @@ out_enomem: | |||
579 | EXPORT_SYMBOL(svc_prepare_thread); | 570 | EXPORT_SYMBOL(svc_prepare_thread); |
580 | 571 | ||
581 | /* | 572 | /* |
582 | * Create a thread in the given pool. Caller must hold BKL or another lock to | ||
583 | * serialize access to the svc_serv struct. On a NUMA or SMP machine, with a | ||
584 | * multi-pool serv, the thread will be restricted to run on the cpus belonging | ||
585 | * to the pool. | ||
586 | */ | ||
587 | static int | ||
588 | __svc_create_thread(svc_thread_fn func, struct svc_serv *serv, | ||
589 | struct svc_pool *pool) | ||
590 | { | ||
591 | struct svc_rqst *rqstp; | ||
592 | int error = -ENOMEM; | ||
593 | int have_oldmask = 0; | ||
594 | cpumask_t uninitialized_var(oldmask); | ||
595 | |||
596 | rqstp = svc_prepare_thread(serv, pool); | ||
597 | if (IS_ERR(rqstp)) { | ||
598 | error = PTR_ERR(rqstp); | ||
599 | goto out; | ||
600 | } | ||
601 | |||
602 | if (serv->sv_nrpools > 1) | ||
603 | have_oldmask = svc_pool_map_set_cpumask(pool->sp_id, &oldmask); | ||
604 | |||
605 | error = kernel_thread((int (*)(void *)) func, rqstp, 0); | ||
606 | |||
607 | if (have_oldmask) | ||
608 | set_cpus_allowed(current, oldmask); | ||
609 | |||
610 | if (error < 0) | ||
611 | goto out_thread; | ||
612 | svc_sock_update_bufs(serv); | ||
613 | error = 0; | ||
614 | out: | ||
615 | return error; | ||
616 | |||
617 | out_thread: | ||
618 | svc_exit_thread(rqstp); | ||
619 | goto out; | ||
620 | } | ||
621 | |||
622 | /* | ||
623 | * Choose a pool in which to create a new thread, for svc_set_num_threads | 573 | * Choose a pool in which to create a new thread, for svc_set_num_threads |
624 | */ | 574 | */ |
625 | static inline struct svc_pool * | 575 | static inline struct svc_pool * |
@@ -688,7 +638,9 @@ found_pool: | |||
688 | int | 638 | int |
689 | svc_set_num_threads(struct svc_serv *serv, struct svc_pool *pool, int nrservs) | 639 | svc_set_num_threads(struct svc_serv *serv, struct svc_pool *pool, int nrservs) |
690 | { | 640 | { |
691 | struct task_struct *victim; | 641 | struct svc_rqst *rqstp; |
642 | struct task_struct *task; | ||
643 | struct svc_pool *chosen_pool; | ||
692 | int error = 0; | 644 | int error = 0; |
693 | unsigned int state = serv->sv_nrthreads-1; | 645 | unsigned int state = serv->sv_nrthreads-1; |
694 | 646 | ||
@@ -704,18 +656,34 @@ svc_set_num_threads(struct svc_serv *serv, struct svc_pool *pool, int nrservs) | |||
704 | /* create new threads */ | 656 | /* create new threads */ |
705 | while (nrservs > 0) { | 657 | while (nrservs > 0) { |
706 | nrservs--; | 658 | nrservs--; |
659 | chosen_pool = choose_pool(serv, pool, &state); | ||
660 | |||
661 | rqstp = svc_prepare_thread(serv, chosen_pool); | ||
662 | if (IS_ERR(rqstp)) { | ||
663 | error = PTR_ERR(rqstp); | ||
664 | break; | ||
665 | } | ||
666 | |||
707 | __module_get(serv->sv_module); | 667 | __module_get(serv->sv_module); |
708 | error = __svc_create_thread(serv->sv_function, serv, | 668 | task = kthread_create(serv->sv_function, rqstp, serv->sv_name); |
709 | choose_pool(serv, pool, &state)); | 669 | if (IS_ERR(task)) { |
710 | if (error < 0) { | 670 | error = PTR_ERR(task); |
711 | module_put(serv->sv_module); | 671 | module_put(serv->sv_module); |
672 | svc_exit_thread(rqstp); | ||
712 | break; | 673 | break; |
713 | } | 674 | } |
675 | |||
676 | rqstp->rq_task = task; | ||
677 | if (serv->sv_nrpools > 1) | ||
678 | svc_pool_map_set_cpumask(task, chosen_pool->sp_id); | ||
679 | |||
680 | svc_sock_update_bufs(serv); | ||
681 | wake_up_process(task); | ||
714 | } | 682 | } |
715 | /* destroy old threads */ | 683 | /* destroy old threads */ |
716 | while (nrservs < 0 && | 684 | while (nrservs < 0 && |
717 | (victim = choose_victim(serv, pool, &state)) != NULL) { | 685 | (task = choose_victim(serv, pool, &state)) != NULL) { |
718 | send_sig(serv->sv_kill_signal, victim, 1); | 686 | send_sig(serv->sv_kill_signal, task, 1); |
719 | nrservs++; | 687 | nrservs++; |
720 | } | 688 | } |
721 | 689 | ||