diff options
| author | Paul Mackerras <paulus@samba.org> | 2008-05-09 06:12:06 -0400 |
|---|---|---|
| committer | Paul Mackerras <paulus@samba.org> | 2008-05-09 06:12:06 -0400 |
| commit | 2a5f2e3e6cd1ce9fb3f8b186b6bc9aa1f1497a92 (patch) | |
| tree | b2306840f227972a7c9d4a2b75e516fe81358ce8 /kernel/workqueue.c | |
| parent | 02539d71fa98d5737bb668b02286c76241e4bac9 (diff) | |
| parent | 78be76476a34a77f0ea9db2f78ba46a2b0fd5ab5 (diff) | |
Merge branch 'for-2.6.26' of master.kernel.org:/pub/scm/linux/kernel/git/jwboyer/powerpc-4xx into merge
Diffstat (limited to 'kernel/workqueue.c')
| -rw-r--r-- | kernel/workqueue.c | 32 |
1 files changed, 16 insertions, 16 deletions
diff --git a/kernel/workqueue.c b/kernel/workqueue.c index 00ff4d08e370..29fc39f1029c 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c | |||
| @@ -158,8 +158,8 @@ static void __queue_work(struct cpu_workqueue_struct *cwq, | |||
| 158 | * | 158 | * |
| 159 | * Returns 0 if @work was already on a queue, non-zero otherwise. | 159 | * Returns 0 if @work was already on a queue, non-zero otherwise. |
| 160 | * | 160 | * |
| 161 | * We queue the work to the CPU it was submitted, but there is no | 161 | * We queue the work to the CPU on which it was submitted, but if the CPU dies |
| 162 | * guarantee that it will be processed by that CPU. | 162 | * it can be processed by another CPU. |
| 163 | */ | 163 | */ |
| 164 | int queue_work(struct workqueue_struct *wq, struct work_struct *work) | 164 | int queue_work(struct workqueue_struct *wq, struct work_struct *work) |
| 165 | { | 165 | { |
| @@ -195,7 +195,6 @@ static void delayed_work_timer_fn(unsigned long __data) | |||
| 195 | int queue_delayed_work(struct workqueue_struct *wq, | 195 | int queue_delayed_work(struct workqueue_struct *wq, |
| 196 | struct delayed_work *dwork, unsigned long delay) | 196 | struct delayed_work *dwork, unsigned long delay) |
| 197 | { | 197 | { |
| 198 | timer_stats_timer_set_start_info(&dwork->timer); | ||
| 199 | if (delay == 0) | 198 | if (delay == 0) |
| 200 | return queue_work(wq, &dwork->work); | 199 | return queue_work(wq, &dwork->work); |
| 201 | 200 | ||
| @@ -219,11 +218,12 @@ int queue_delayed_work_on(int cpu, struct workqueue_struct *wq, | |||
| 219 | struct timer_list *timer = &dwork->timer; | 218 | struct timer_list *timer = &dwork->timer; |
| 220 | struct work_struct *work = &dwork->work; | 219 | struct work_struct *work = &dwork->work; |
| 221 | 220 | ||
| 222 | timer_stats_timer_set_start_info(&dwork->timer); | ||
| 223 | if (!test_and_set_bit(WORK_STRUCT_PENDING, work_data_bits(work))) { | 221 | if (!test_and_set_bit(WORK_STRUCT_PENDING, work_data_bits(work))) { |
| 224 | BUG_ON(timer_pending(timer)); | 222 | BUG_ON(timer_pending(timer)); |
| 225 | BUG_ON(!list_empty(&work->entry)); | 223 | BUG_ON(!list_empty(&work->entry)); |
| 226 | 224 | ||
| 225 | timer_stats_timer_set_start_info(&dwork->timer); | ||
| 226 | |||
| 227 | /* This stores cwq for the moment, for the timer_fn */ | 227 | /* This stores cwq for the moment, for the timer_fn */ |
| 228 | set_wq_data(work, wq_per_cpu(wq, raw_smp_processor_id())); | 228 | set_wq_data(work, wq_per_cpu(wq, raw_smp_processor_id())); |
| 229 | timer->expires = jiffies + delay; | 229 | timer->expires = jiffies + delay; |
| @@ -247,7 +247,7 @@ static void run_workqueue(struct cpu_workqueue_struct *cwq) | |||
| 247 | if (cwq->run_depth > 3) { | 247 | if (cwq->run_depth > 3) { |
| 248 | /* morton gets to eat his hat */ | 248 | /* morton gets to eat his hat */ |
| 249 | printk("%s: recursion depth exceeded: %d\n", | 249 | printk("%s: recursion depth exceeded: %d\n", |
| 250 | __FUNCTION__, cwq->run_depth); | 250 | __func__, cwq->run_depth); |
| 251 | dump_stack(); | 251 | dump_stack(); |
| 252 | } | 252 | } |
| 253 | while (!list_empty(&cwq->worklist)) { | 253 | while (!list_empty(&cwq->worklist)) { |
| @@ -564,7 +564,6 @@ EXPORT_SYMBOL(schedule_work); | |||
| 564 | int schedule_delayed_work(struct delayed_work *dwork, | 564 | int schedule_delayed_work(struct delayed_work *dwork, |
| 565 | unsigned long delay) | 565 | unsigned long delay) |
| 566 | { | 566 | { |
| 567 | timer_stats_timer_set_start_info(&dwork->timer); | ||
| 568 | return queue_delayed_work(keventd_wq, dwork, delay); | 567 | return queue_delayed_work(keventd_wq, dwork, delay); |
| 569 | } | 568 | } |
| 570 | EXPORT_SYMBOL(schedule_delayed_work); | 569 | EXPORT_SYMBOL(schedule_delayed_work); |
| @@ -581,7 +580,6 @@ EXPORT_SYMBOL(schedule_delayed_work); | |||
| 581 | int schedule_delayed_work_on(int cpu, | 580 | int schedule_delayed_work_on(int cpu, |
| 582 | struct delayed_work *dwork, unsigned long delay) | 581 | struct delayed_work *dwork, unsigned long delay) |
| 583 | { | 582 | { |
| 584 | timer_stats_timer_set_start_info(&dwork->timer); | ||
| 585 | return queue_delayed_work_on(cpu, keventd_wq, dwork, delay); | 583 | return queue_delayed_work_on(cpu, keventd_wq, dwork, delay); |
| 586 | } | 584 | } |
| 587 | EXPORT_SYMBOL(schedule_delayed_work_on); | 585 | EXPORT_SYMBOL(schedule_delayed_work_on); |
| @@ -772,7 +770,7 @@ struct workqueue_struct *__create_workqueue_key(const char *name, | |||
| 772 | } | 770 | } |
| 773 | EXPORT_SYMBOL_GPL(__create_workqueue_key); | 771 | EXPORT_SYMBOL_GPL(__create_workqueue_key); |
| 774 | 772 | ||
| 775 | static void cleanup_workqueue_thread(struct cpu_workqueue_struct *cwq, int cpu) | 773 | static void cleanup_workqueue_thread(struct cpu_workqueue_struct *cwq) |
| 776 | { | 774 | { |
| 777 | /* | 775 | /* |
| 778 | * Our caller is either destroy_workqueue() or CPU_DEAD, | 776 | * Our caller is either destroy_workqueue() or CPU_DEAD, |
| @@ -808,19 +806,16 @@ static void cleanup_workqueue_thread(struct cpu_workqueue_struct *cwq, int cpu) | |||
| 808 | void destroy_workqueue(struct workqueue_struct *wq) | 806 | void destroy_workqueue(struct workqueue_struct *wq) |
| 809 | { | 807 | { |
| 810 | const cpumask_t *cpu_map = wq_cpu_map(wq); | 808 | const cpumask_t *cpu_map = wq_cpu_map(wq); |
| 811 | struct cpu_workqueue_struct *cwq; | ||
| 812 | int cpu; | 809 | int cpu; |
| 813 | 810 | ||
| 814 | get_online_cpus(); | 811 | get_online_cpus(); |
| 815 | spin_lock(&workqueue_lock); | 812 | spin_lock(&workqueue_lock); |
| 816 | list_del(&wq->list); | 813 | list_del(&wq->list); |
| 817 | spin_unlock(&workqueue_lock); | 814 | spin_unlock(&workqueue_lock); |
| 818 | put_online_cpus(); | ||
| 819 | 815 | ||
| 820 | for_each_cpu_mask(cpu, *cpu_map) { | 816 | for_each_cpu_mask(cpu, *cpu_map) |
| 821 | cwq = per_cpu_ptr(wq->cpu_wq, cpu); | 817 | cleanup_workqueue_thread(per_cpu_ptr(wq->cpu_wq, cpu)); |
| 822 | cleanup_workqueue_thread(cwq, cpu); | 818 | put_online_cpus(); |
| 823 | } | ||
| 824 | 819 | ||
| 825 | free_percpu(wq->cpu_wq); | 820 | free_percpu(wq->cpu_wq); |
| 826 | kfree(wq); | 821 | kfree(wq); |
| @@ -838,7 +833,6 @@ static int __devinit workqueue_cpu_callback(struct notifier_block *nfb, | |||
| 838 | action &= ~CPU_TASKS_FROZEN; | 833 | action &= ~CPU_TASKS_FROZEN; |
| 839 | 834 | ||
| 840 | switch (action) { | 835 | switch (action) { |
| 841 | |||
| 842 | case CPU_UP_PREPARE: | 836 | case CPU_UP_PREPARE: |
| 843 | cpu_set(cpu, cpu_populated_map); | 837 | cpu_set(cpu, cpu_populated_map); |
| 844 | } | 838 | } |
| @@ -861,11 +855,17 @@ static int __devinit workqueue_cpu_callback(struct notifier_block *nfb, | |||
| 861 | case CPU_UP_CANCELED: | 855 | case CPU_UP_CANCELED: |
| 862 | start_workqueue_thread(cwq, -1); | 856 | start_workqueue_thread(cwq, -1); |
| 863 | case CPU_DEAD: | 857 | case CPU_DEAD: |
| 864 | cleanup_workqueue_thread(cwq, cpu); | 858 | cleanup_workqueue_thread(cwq); |
| 865 | break; | 859 | break; |
| 866 | } | 860 | } |
| 867 | } | 861 | } |
| 868 | 862 | ||
| 863 | switch (action) { | ||
| 864 | case CPU_UP_CANCELED: | ||
| 865 | case CPU_DEAD: | ||
| 866 | cpu_clear(cpu, cpu_populated_map); | ||
| 867 | } | ||
| 868 | |||
| 869 | return NOTIFY_OK; | 869 | return NOTIFY_OK; |
| 870 | } | 870 | } |
| 871 | 871 | ||
