diff options
author | Oleg Nesterov <oleg@tv-sign.ru> | 2007-05-09 05:34:12 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@woody.linux-foundation.org> | 2007-05-09 15:30:52 -0400 |
commit | b1f4ec172f75bc2f5cc4f4be69b5587660a955d2 (patch) | |
tree | 41a96f3996246d1f8667b9b84705a800f03d7c49 | |
parent | dfb4b82e1c631b1a6057e77212996a890aa515b7 (diff) |
workqueue: introduce cpu_singlethread_map
The code like
if (is_single_threaded(wq))
do_something(singlethread_cpu);
else {
for_each_cpu_mask(cpu, cpu_populated_map)
do_something(cpu);
}
looks very annoying. We can add "static cpumask_t cpu_singlethread_map" and
simplify the code. Lessens .text a bit, and imho makes the code more readable.
Signed-off-by: Oleg Nesterov <oleg@tv-sign.ru>
Cc: Ingo Molnar <mingo@elte.hu>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-rw-r--r-- | kernel/workqueue.c | 55 |
1 files changed, 25 insertions, 30 deletions
diff --git a/kernel/workqueue.c b/kernel/workqueue.c index ce72d45c7fd8..6308a4bc6a82 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c | |||
@@ -69,6 +69,7 @@ static DEFINE_MUTEX(workqueue_mutex); | |||
69 | static LIST_HEAD(workqueues); | 69 | static LIST_HEAD(workqueues); |
70 | 70 | ||
71 | static int singlethread_cpu __read_mostly; | 71 | static int singlethread_cpu __read_mostly; |
72 | static cpumask_t cpu_singlethread_map __read_mostly; | ||
72 | /* optimization, we could use cpu_possible_map */ | 73 | /* optimization, we could use cpu_possible_map */ |
73 | static cpumask_t cpu_populated_map __read_mostly; | 74 | static cpumask_t cpu_populated_map __read_mostly; |
74 | 75 | ||
@@ -78,6 +79,12 @@ static inline int is_single_threaded(struct workqueue_struct *wq) | |||
78 | return list_empty(&wq->list); | 79 | return list_empty(&wq->list); |
79 | } | 80 | } |
80 | 81 | ||
82 | static const cpumask_t *wq_cpu_map(struct workqueue_struct *wq) | ||
83 | { | ||
84 | return is_single_threaded(wq) | ||
85 | ? &cpu_singlethread_map : &cpu_populated_map; | ||
86 | } | ||
87 | |||
81 | /* | 88 | /* |
82 | * Set the workqueue on which a work item is to be run | 89 | * Set the workqueue on which a work item is to be run |
83 | * - Must *only* be called if the pending flag is set | 90 | * - Must *only* be called if the pending flag is set |
@@ -393,16 +400,12 @@ static void flush_cpu_workqueue(struct cpu_workqueue_struct *cwq) | |||
393 | */ | 400 | */ |
394 | void fastcall flush_workqueue(struct workqueue_struct *wq) | 401 | void fastcall flush_workqueue(struct workqueue_struct *wq) |
395 | { | 402 | { |
396 | might_sleep(); | 403 | const cpumask_t *cpu_map = wq_cpu_map(wq); |
397 | 404 | int cpu | |
398 | if (is_single_threaded(wq)) | ||
399 | flush_cpu_workqueue(per_cpu_ptr(wq->cpu_wq, singlethread_cpu)); | ||
400 | else { | ||
401 | int cpu; | ||
402 | 405 | ||
403 | for_each_cpu_mask(cpu, cpu_populated_map) | 406 | might_sleep(); |
404 | flush_cpu_workqueue(per_cpu_ptr(wq->cpu_wq, cpu)); | 407 | for_each_cpu_mask(cpu, *cpu_map) |
405 | } | 408 | flush_cpu_workqueue(per_cpu_ptr(wq->cpu_wq, cpu)); |
406 | } | 409 | } |
407 | EXPORT_SYMBOL_GPL(flush_workqueue); | 410 | EXPORT_SYMBOL_GPL(flush_workqueue); |
408 | 411 | ||
@@ -439,7 +442,9 @@ static void wait_on_work(struct cpu_workqueue_struct *cwq, | |||
439 | */ | 442 | */ |
440 | void flush_work(struct workqueue_struct *wq, struct work_struct *work) | 443 | void flush_work(struct workqueue_struct *wq, struct work_struct *work) |
441 | { | 444 | { |
445 | const cpumask_t *cpu_map = wq_cpu_map(wq); | ||
442 | struct cpu_workqueue_struct *cwq; | 446 | struct cpu_workqueue_struct *cwq; |
447 | int cpu; | ||
443 | 448 | ||
444 | might_sleep(); | 449 | might_sleep(); |
445 | 450 | ||
@@ -457,14 +462,8 @@ void flush_work(struct workqueue_struct *wq, struct work_struct *work) | |||
457 | work_release(work); | 462 | work_release(work); |
458 | spin_unlock_irq(&cwq->lock); | 463 | spin_unlock_irq(&cwq->lock); |
459 | 464 | ||
460 | if (is_single_threaded(wq)) | 465 | for_each_cpu_mask(cpu, *cpu_map) |
461 | wait_on_work(per_cpu_ptr(wq->cpu_wq, singlethread_cpu), work); | 466 | wait_on_work(per_cpu_ptr(wq->cpu_wq, cpu), work); |
462 | else { | ||
463 | int cpu; | ||
464 | |||
465 | for_each_cpu_mask(cpu, cpu_populated_map) | ||
466 | wait_on_work(per_cpu_ptr(wq->cpu_wq, cpu), work); | ||
467 | } | ||
468 | } | 467 | } |
469 | EXPORT_SYMBOL_GPL(flush_work); | 468 | EXPORT_SYMBOL_GPL(flush_work); |
470 | 469 | ||
@@ -757,22 +756,17 @@ static void cleanup_workqueue_thread(struct cpu_workqueue_struct *cwq, int cpu) | |||
757 | */ | 756 | */ |
758 | void destroy_workqueue(struct workqueue_struct *wq) | 757 | void destroy_workqueue(struct workqueue_struct *wq) |
759 | { | 758 | { |
759 | const cpumask_t *cpu_map = wq_cpu_map(wq); | ||
760 | struct cpu_workqueue_struct *cwq; | 760 | struct cpu_workqueue_struct *cwq; |
761 | int cpu; | ||
761 | 762 | ||
762 | if (is_single_threaded(wq)) { | 763 | mutex_lock(&workqueue_mutex); |
763 | cwq = per_cpu_ptr(wq->cpu_wq, singlethread_cpu); | 764 | list_del(&wq->list); |
764 | cleanup_workqueue_thread(cwq, singlethread_cpu); | 765 | mutex_unlock(&workqueue_mutex); |
765 | } else { | ||
766 | int cpu; | ||
767 | 766 | ||
768 | mutex_lock(&workqueue_mutex); | 767 | for_each_cpu_mask(cpu, *cpu_map) { |
769 | list_del(&wq->list); | 768 | cwq = per_cpu_ptr(wq->cpu_wq, cpu); |
770 | mutex_unlock(&workqueue_mutex); | 769 | cleanup_workqueue_thread(cwq, cpu); |
771 | |||
772 | for_each_cpu_mask(cpu, cpu_populated_map) { | ||
773 | cwq = per_cpu_ptr(wq->cpu_wq, cpu); | ||
774 | cleanup_workqueue_thread(cwq, cpu); | ||
775 | } | ||
776 | } | 770 | } |
777 | 771 | ||
778 | free_percpu(wq->cpu_wq); | 772 | free_percpu(wq->cpu_wq); |
@@ -831,6 +825,7 @@ void init_workqueues(void) | |||
831 | { | 825 | { |
832 | cpu_populated_map = cpu_online_map; | 826 | cpu_populated_map = cpu_online_map; |
833 | singlethread_cpu = first_cpu(cpu_possible_map); | 827 | singlethread_cpu = first_cpu(cpu_possible_map); |
828 | cpu_singlethread_map = cpumask_of_cpu(singlethread_cpu); | ||
834 | hotcpu_notifier(workqueue_cpu_callback, 0); | 829 | hotcpu_notifier(workqueue_cpu_callback, 0); |
835 | keventd_wq = create_workqueue("events"); | 830 | keventd_wq = create_workqueue("events"); |
836 | BUG_ON(!keventd_wq); | 831 | BUG_ON(!keventd_wq); |