diff options
Diffstat (limited to 'kernel/workqueue.c')
| -rw-r--r-- | kernel/workqueue.c | 71 |
1 files changed, 33 insertions, 38 deletions
diff --git a/kernel/workqueue.c b/kernel/workqueue.c index 4026d1871407..faf7622246da 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c | |||
| @@ -841,43 +841,32 @@ static void wake_up_worker(struct worker_pool *pool) | |||
| 841 | } | 841 | } |
| 842 | 842 | ||
| 843 | /** | 843 | /** |
| 844 | * wq_worker_waking_up - a worker is waking up | 844 | * wq_worker_running - a worker is running again |
| 845 | * @task: task waking up | 845 | * @task: task waking up |
| 846 | * @cpu: CPU @task is waking up to | ||
| 847 | * | 846 | * |
| 848 | * This function is called during try_to_wake_up() when a worker is | 847 | * This function is called when a worker returns from schedule() |
| 849 | * being awoken. | ||
| 850 | * | ||
| 851 | * CONTEXT: | ||
| 852 | * spin_lock_irq(rq->lock) | ||
| 853 | */ | 848 | */ |
| 854 | void wq_worker_waking_up(struct task_struct *task, int cpu) | 849 | void wq_worker_running(struct task_struct *task) |
| 855 | { | 850 | { |
| 856 | struct worker *worker = kthread_data(task); | 851 | struct worker *worker = kthread_data(task); |
| 857 | 852 | ||
| 858 | if (!(worker->flags & WORKER_NOT_RUNNING)) { | 853 | if (!worker->sleeping) |
| 859 | WARN_ON_ONCE(worker->pool->cpu != cpu); | 854 | return; |
| 855 | if (!(worker->flags & WORKER_NOT_RUNNING)) | ||
| 860 | atomic_inc(&worker->pool->nr_running); | 856 | atomic_inc(&worker->pool->nr_running); |
| 861 | } | 857 | worker->sleeping = 0; |
| 862 | } | 858 | } |
| 863 | 859 | ||
| 864 | /** | 860 | /** |
| 865 | * wq_worker_sleeping - a worker is going to sleep | 861 | * wq_worker_sleeping - a worker is going to sleep |
| 866 | * @task: task going to sleep | 862 | * @task: task going to sleep |
| 867 | * | 863 | * |
| 868 | * This function is called during schedule() when a busy worker is | 864 | * This function is called from schedule() when a busy worker is |
| 869 | * going to sleep. Worker on the same cpu can be woken up by | 865 | * going to sleep. |
| 870 | * returning pointer to its task. | ||
| 871 | * | ||
| 872 | * CONTEXT: | ||
| 873 | * spin_lock_irq(rq->lock) | ||
| 874 | * | ||
| 875 | * Return: | ||
| 876 | * Worker task on @cpu to wake up, %NULL if none. | ||
| 877 | */ | 866 | */ |
| 878 | struct task_struct *wq_worker_sleeping(struct task_struct *task) | 867 | void wq_worker_sleeping(struct task_struct *task) |
| 879 | { | 868 | { |
| 880 | struct worker *worker = kthread_data(task), *to_wakeup = NULL; | 869 | struct worker *next, *worker = kthread_data(task); |
| 881 | struct worker_pool *pool; | 870 | struct worker_pool *pool; |
| 882 | 871 | ||
| 883 | /* | 872 | /* |
| @@ -886,13 +875,15 @@ struct task_struct *wq_worker_sleeping(struct task_struct *task) | |||
| 886 | * checking NOT_RUNNING. | 875 | * checking NOT_RUNNING. |
| 887 | */ | 876 | */ |
| 888 | if (worker->flags & WORKER_NOT_RUNNING) | 877 | if (worker->flags & WORKER_NOT_RUNNING) |
| 889 | return NULL; | 878 | return; |
| 890 | 879 | ||
| 891 | pool = worker->pool; | 880 | pool = worker->pool; |
| 892 | 881 | ||
| 893 | /* this can only happen on the local cpu */ | 882 | if (WARN_ON_ONCE(worker->sleeping)) |
| 894 | if (WARN_ON_ONCE(pool->cpu != raw_smp_processor_id())) | 883 | return; |
| 895 | return NULL; | 884 | |
| 885 | worker->sleeping = 1; | ||
| 886 | spin_lock_irq(&pool->lock); | ||
| 896 | 887 | ||
| 897 | /* | 888 | /* |
| 898 | * The counterpart of the following dec_and_test, implied mb, | 889 | * The counterpart of the following dec_and_test, implied mb, |
| @@ -906,9 +897,12 @@ struct task_struct *wq_worker_sleeping(struct task_struct *task) | |||
| 906 | * lock is safe. | 897 | * lock is safe. |
| 907 | */ | 898 | */ |
| 908 | if (atomic_dec_and_test(&pool->nr_running) && | 899 | if (atomic_dec_and_test(&pool->nr_running) && |
| 909 | !list_empty(&pool->worklist)) | 900 | !list_empty(&pool->worklist)) { |
| 910 | to_wakeup = first_idle_worker(pool); | 901 | next = first_idle_worker(pool); |
| 911 | return to_wakeup ? to_wakeup->task : NULL; | 902 | if (next) |
| 903 | wake_up_process(next->task); | ||
| 904 | } | ||
| 905 | spin_unlock_irq(&pool->lock); | ||
| 912 | } | 906 | } |
| 913 | 907 | ||
| 914 | /** | 908 | /** |
| @@ -2277,7 +2271,7 @@ __acquires(&pool->lock) | |||
| 2277 | 2271 | ||
| 2278 | if (unlikely(in_atomic() || lockdep_depth(current) > 0)) { | 2272 | if (unlikely(in_atomic() || lockdep_depth(current) > 0)) { |
| 2279 | pr_err("BUG: workqueue leaked lock or atomic: %s/0x%08x/%d\n" | 2273 | pr_err("BUG: workqueue leaked lock or atomic: %s/0x%08x/%d\n" |
| 2280 | " last function: %pf\n", | 2274 | " last function: %ps\n", |
| 2281 | current->comm, preempt_count(), task_pid_nr(current), | 2275 | current->comm, preempt_count(), task_pid_nr(current), |
| 2282 | worker->current_func); | 2276 | worker->current_func); |
| 2283 | debug_show_held_locks(current); | 2277 | debug_show_held_locks(current); |
| @@ -2596,11 +2590,11 @@ static void check_flush_dependency(struct workqueue_struct *target_wq, | |||
| 2596 | worker = current_wq_worker(); | 2590 | worker = current_wq_worker(); |
| 2597 | 2591 | ||
| 2598 | WARN_ONCE(current->flags & PF_MEMALLOC, | 2592 | WARN_ONCE(current->flags & PF_MEMALLOC, |
| 2599 | "workqueue: PF_MEMALLOC task %d(%s) is flushing !WQ_MEM_RECLAIM %s:%pf", | 2593 | "workqueue: PF_MEMALLOC task %d(%s) is flushing !WQ_MEM_RECLAIM %s:%ps", |
| 2600 | current->pid, current->comm, target_wq->name, target_func); | 2594 | current->pid, current->comm, target_wq->name, target_func); |
| 2601 | WARN_ONCE(worker && ((worker->current_pwq->wq->flags & | 2595 | WARN_ONCE(worker && ((worker->current_pwq->wq->flags & |
| 2602 | (WQ_MEM_RECLAIM | __WQ_LEGACY)) == WQ_MEM_RECLAIM), | 2596 | (WQ_MEM_RECLAIM | __WQ_LEGACY)) == WQ_MEM_RECLAIM), |
| 2603 | "workqueue: WQ_MEM_RECLAIM %s:%pf is flushing !WQ_MEM_RECLAIM %s:%pf", | 2597 | "workqueue: WQ_MEM_RECLAIM %s:%ps is flushing !WQ_MEM_RECLAIM %s:%ps", |
| 2604 | worker->current_pwq->wq->name, worker->current_func, | 2598 | worker->current_pwq->wq->name, worker->current_func, |
| 2605 | target_wq->name, target_func); | 2599 | target_wq->name, target_func); |
| 2606 | } | 2600 | } |
| @@ -4266,7 +4260,7 @@ struct workqueue_struct *alloc_workqueue(const char *fmt, | |||
| 4266 | INIT_LIST_HEAD(&wq->list); | 4260 | INIT_LIST_HEAD(&wq->list); |
| 4267 | 4261 | ||
| 4268 | if (alloc_and_link_pwqs(wq) < 0) | 4262 | if (alloc_and_link_pwqs(wq) < 0) |
| 4269 | goto err_free_wq; | 4263 | goto err_unreg_lockdep; |
| 4270 | 4264 | ||
| 4271 | if (wq_online && init_rescuer(wq) < 0) | 4265 | if (wq_online && init_rescuer(wq) < 0) |
| 4272 | goto err_destroy; | 4266 | goto err_destroy; |
| @@ -4292,9 +4286,10 @@ struct workqueue_struct *alloc_workqueue(const char *fmt, | |||
| 4292 | 4286 | ||
| 4293 | return wq; | 4287 | return wq; |
| 4294 | 4288 | ||
| 4295 | err_free_wq: | 4289 | err_unreg_lockdep: |
| 4296 | wq_unregister_lockdep(wq); | 4290 | wq_unregister_lockdep(wq); |
| 4297 | wq_free_lockdep(wq); | 4291 | wq_free_lockdep(wq); |
| 4292 | err_free_wq: | ||
| 4298 | free_workqueue_attrs(wq->unbound_attrs); | 4293 | free_workqueue_attrs(wq->unbound_attrs); |
| 4299 | kfree(wq); | 4294 | kfree(wq); |
| 4300 | return NULL; | 4295 | return NULL; |
| @@ -4586,7 +4581,7 @@ void print_worker_info(const char *log_lvl, struct task_struct *task) | |||
| 4586 | probe_kernel_read(desc, worker->desc, sizeof(desc) - 1); | 4581 | probe_kernel_read(desc, worker->desc, sizeof(desc) - 1); |
| 4587 | 4582 | ||
| 4588 | if (fn || name[0] || desc[0]) { | 4583 | if (fn || name[0] || desc[0]) { |
| 4589 | printk("%sWorkqueue: %s %pf", log_lvl, name, fn); | 4584 | printk("%sWorkqueue: %s %ps", log_lvl, name, fn); |
| 4590 | if (strcmp(name, desc)) | 4585 | if (strcmp(name, desc)) |
| 4591 | pr_cont(" (%s)", desc); | 4586 | pr_cont(" (%s)", desc); |
| 4592 | pr_cont("\n"); | 4587 | pr_cont("\n"); |
| @@ -4611,7 +4606,7 @@ static void pr_cont_work(bool comma, struct work_struct *work) | |||
| 4611 | pr_cont("%s BAR(%d)", comma ? "," : "", | 4606 | pr_cont("%s BAR(%d)", comma ? "," : "", |
| 4612 | task_pid_nr(barr->task)); | 4607 | task_pid_nr(barr->task)); |
| 4613 | } else { | 4608 | } else { |
| 4614 | pr_cont("%s %pf", comma ? "," : "", work->func); | 4609 | pr_cont("%s %ps", comma ? "," : "", work->func); |
| 4615 | } | 4610 | } |
| 4616 | } | 4611 | } |
| 4617 | 4612 | ||
| @@ -4643,7 +4638,7 @@ static void show_pwq(struct pool_workqueue *pwq) | |||
| 4643 | if (worker->current_pwq != pwq) | 4638 | if (worker->current_pwq != pwq) |
| 4644 | continue; | 4639 | continue; |
| 4645 | 4640 | ||
| 4646 | pr_cont("%s %d%s:%pf", comma ? "," : "", | 4641 | pr_cont("%s %d%s:%ps", comma ? "," : "", |
| 4647 | task_pid_nr(worker->task), | 4642 | task_pid_nr(worker->task), |
| 4648 | worker == pwq->wq->rescuer ? "(RESCUER)" : "", | 4643 | worker == pwq->wq->rescuer ? "(RESCUER)" : "", |
| 4649 | worker->current_func); | 4644 | worker->current_func); |
| @@ -4928,7 +4923,7 @@ static void rebind_workers(struct worker_pool *pool) | |||
| 4928 | * | 4923 | * |
| 4929 | * WRITE_ONCE() is necessary because @worker->flags may be | 4924 | * WRITE_ONCE() is necessary because @worker->flags may be |
| 4930 | * tested without holding any lock in | 4925 | * tested without holding any lock in |
| 4931 | * wq_worker_waking_up(). Without it, NOT_RUNNING test may | 4926 | * wq_worker_running(). Without it, NOT_RUNNING test may |
| 4932 | * fail incorrectly leading to premature concurrency | 4927 | * fail incorrectly leading to premature concurrency |
| 4933 | * management operations. | 4928 | * management operations. |
| 4934 | */ | 4929 | */ |
