diff options
| author | Linus Torvalds <torvalds@linux-foundation.org> | 2018-06-05 20:31:33 -0400 |
|---|---|---|
| committer | Linus Torvalds <torvalds@linux-foundation.org> | 2018-06-05 20:31:33 -0400 |
| commit | af6c5d5e01ad9f2c9ca38cccaae6b5d67ddd241f (patch) | |
| tree | 5bfdb1ee5c0b904732929eae55fe6da13bd273d5 | |
| parent | 9f25a8da423226d7797e35a132535186c531228b (diff) | |
| parent | 66448bc274cadedb71fda7d914e7c29d8dead217 (diff) | |
Merge branch 'for-4.18' of git://git.kernel.org/pub/scm/linux/kernel/git/tj/wq
Pull workqueue updates from Tejun Heo:
- make kworkers report the workqueue it is executing or has executed
most recently in /proc/PID/comm (so they show up in ps/top)
- CONFIG_SMP shuffle to move stuff which isn't necessary for UP builds
inside CONFIG_SMP.
* 'for-4.18' of git://git.kernel.org/pub/scm/linux/kernel/git/tj/wq:
workqueue: move function definitions within CONFIG_SMP block
workqueue: Make sure struct worker is accessible for wq_worker_comm()
workqueue: Show the latest workqueue name in /proc/PID/{comm,stat,status}
proc: Consolidate task->comm formatting into proc_task_name()
workqueue: Set worker->desc to workqueue name by default
workqueue: Make worker_attach/detach_pool() update worker->pool
workqueue: Replace pool->attach_mutex with global wq_pool_attach_mutex
| -rw-r--r-- | fs/proc/array.c | 33 | ||||
| -rw-r--r-- | fs/proc/base.c | 5 | ||||
| -rw-r--r-- | fs/proc/internal.h | 2 | ||||
| -rw-r--r-- | include/linux/workqueue.h | 1 | ||||
| -rw-r--r-- | kernel/workqueue.c | 139 | ||||
| -rw-r--r-- | kernel/workqueue_internal.h | 3 |
6 files changed, 119 insertions, 64 deletions
diff --git a/fs/proc/array.c b/fs/proc/array.c index e6d7f41b6684..004077f1a7bf 100644 --- a/fs/proc/array.c +++ b/fs/proc/array.c | |||
| @@ -96,22 +96,29 @@ | |||
| 96 | #include <asm/processor.h> | 96 | #include <asm/processor.h> |
| 97 | #include "internal.h" | 97 | #include "internal.h" |
| 98 | 98 | ||
| 99 | static inline void task_name(struct seq_file *m, struct task_struct *p) | 99 | void proc_task_name(struct seq_file *m, struct task_struct *p, bool escape) |
| 100 | { | 100 | { |
| 101 | char *buf; | 101 | char *buf; |
| 102 | size_t size; | 102 | size_t size; |
| 103 | char tcomm[sizeof(p->comm)]; | 103 | char tcomm[64]; |
| 104 | int ret; | 104 | int ret; |
| 105 | 105 | ||
| 106 | get_task_comm(tcomm, p); | 106 | if (p->flags & PF_WQ_WORKER) |
| 107 | 107 | wq_worker_comm(tcomm, sizeof(tcomm), p); | |
| 108 | seq_puts(m, "Name:\t"); | 108 | else |
| 109 | __get_task_comm(tcomm, sizeof(tcomm), p); | ||
| 109 | 110 | ||
| 110 | size = seq_get_buf(m, &buf); | 111 | size = seq_get_buf(m, &buf); |
| 111 | ret = string_escape_str(tcomm, buf, size, ESCAPE_SPACE | ESCAPE_SPECIAL, "\n\\"); | 112 | if (escape) { |
| 112 | seq_commit(m, ret < size ? ret : -1); | 113 | ret = string_escape_str(tcomm, buf, size, |
| 114 | ESCAPE_SPACE | ESCAPE_SPECIAL, "\n\\"); | ||
| 115 | if (ret >= size) | ||
| 116 | ret = -1; | ||
| 117 | } else { | ||
| 118 | ret = strscpy(buf, tcomm, size); | ||
| 119 | } | ||
| 113 | 120 | ||
| 114 | seq_putc(m, '\n'); | 121 | seq_commit(m, ret); |
| 115 | } | 122 | } |
| 116 | 123 | ||
| 117 | /* | 124 | /* |
| @@ -390,7 +397,10 @@ int proc_pid_status(struct seq_file *m, struct pid_namespace *ns, | |||
| 390 | { | 397 | { |
| 391 | struct mm_struct *mm = get_task_mm(task); | 398 | struct mm_struct *mm = get_task_mm(task); |
| 392 | 399 | ||
| 393 | task_name(m, task); | 400 | seq_puts(m, "Name:\t"); |
| 401 | proc_task_name(m, task, true); | ||
| 402 | seq_putc(m, '\n'); | ||
| 403 | |||
| 394 | task_state(m, ns, pid, task); | 404 | task_state(m, ns, pid, task); |
| 395 | 405 | ||
| 396 | if (mm) { | 406 | if (mm) { |
| @@ -425,7 +435,6 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns, | |||
| 425 | u64 cutime, cstime, utime, stime; | 435 | u64 cutime, cstime, utime, stime; |
| 426 | u64 cgtime, gtime; | 436 | u64 cgtime, gtime; |
| 427 | unsigned long rsslim = 0; | 437 | unsigned long rsslim = 0; |
| 428 | char tcomm[sizeof(task->comm)]; | ||
| 429 | unsigned long flags; | 438 | unsigned long flags; |
| 430 | 439 | ||
| 431 | state = *get_task_state(task); | 440 | state = *get_task_state(task); |
| @@ -452,8 +461,6 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns, | |||
| 452 | } | 461 | } |
| 453 | } | 462 | } |
| 454 | 463 | ||
| 455 | get_task_comm(tcomm, task); | ||
| 456 | |||
| 457 | sigemptyset(&sigign); | 464 | sigemptyset(&sigign); |
| 458 | sigemptyset(&sigcatch); | 465 | sigemptyset(&sigcatch); |
| 459 | cutime = cstime = utime = stime = 0; | 466 | cutime = cstime = utime = stime = 0; |
| @@ -520,7 +527,7 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns, | |||
| 520 | 527 | ||
| 521 | seq_put_decimal_ull(m, "", pid_nr_ns(pid, ns)); | 528 | seq_put_decimal_ull(m, "", pid_nr_ns(pid, ns)); |
| 522 | seq_puts(m, " ("); | 529 | seq_puts(m, " ("); |
| 523 | seq_puts(m, tcomm); | 530 | proc_task_name(m, task, false); |
| 524 | seq_puts(m, ") "); | 531 | seq_puts(m, ") "); |
| 525 | seq_putc(m, state); | 532 | seq_putc(m, state); |
| 526 | seq_put_decimal_ll(m, " ", ppid); | 533 | seq_put_decimal_ll(m, " ", ppid); |
diff --git a/fs/proc/base.c b/fs/proc/base.c index 33ed1746927a..af128b374143 100644 --- a/fs/proc/base.c +++ b/fs/proc/base.c | |||
| @@ -1563,9 +1563,8 @@ static int comm_show(struct seq_file *m, void *v) | |||
| 1563 | if (!p) | 1563 | if (!p) |
| 1564 | return -ESRCH; | 1564 | return -ESRCH; |
| 1565 | 1565 | ||
| 1566 | task_lock(p); | 1566 | proc_task_name(m, p, false); |
| 1567 | seq_printf(m, "%s\n", p->comm); | 1567 | seq_putc(m, '\n'); |
| 1568 | task_unlock(p); | ||
| 1569 | 1568 | ||
| 1570 | put_task_struct(p); | 1569 | put_task_struct(p); |
| 1571 | 1570 | ||
diff --git a/fs/proc/internal.h b/fs/proc/internal.h index 43c70c9e6b62..93eb1906c28d 100644 --- a/fs/proc/internal.h +++ b/fs/proc/internal.h | |||
| @@ -136,6 +136,8 @@ unsigned name_to_int(const struct qstr *qstr); | |||
| 136 | */ | 136 | */ |
| 137 | extern const struct file_operations proc_tid_children_operations; | 137 | extern const struct file_operations proc_tid_children_operations; |
| 138 | 138 | ||
| 139 | extern void proc_task_name(struct seq_file *m, struct task_struct *p, | ||
| 140 | bool escape); | ||
| 139 | extern int proc_tid_stat(struct seq_file *, struct pid_namespace *, | 141 | extern int proc_tid_stat(struct seq_file *, struct pid_namespace *, |
| 140 | struct pid *, struct task_struct *); | 142 | struct pid *, struct task_struct *); |
| 141 | extern int proc_tgid_stat(struct seq_file *, struct pid_namespace *, | 143 | extern int proc_tgid_stat(struct seq_file *, struct pid_namespace *, |
diff --git a/include/linux/workqueue.h b/include/linux/workqueue.h index 39a0e215022a..60d673e15632 100644 --- a/include/linux/workqueue.h +++ b/include/linux/workqueue.h | |||
| @@ -494,6 +494,7 @@ extern unsigned int work_busy(struct work_struct *work); | |||
| 494 | extern __printf(1, 2) void set_worker_desc(const char *fmt, ...); | 494 | extern __printf(1, 2) void set_worker_desc(const char *fmt, ...); |
| 495 | extern void print_worker_info(const char *log_lvl, struct task_struct *task); | 495 | extern void print_worker_info(const char *log_lvl, struct task_struct *task); |
| 496 | extern void show_workqueue_state(void); | 496 | extern void show_workqueue_state(void); |
| 497 | extern void wq_worker_comm(char *buf, size_t size, struct task_struct *task); | ||
| 497 | 498 | ||
| 498 | /** | 499 | /** |
| 499 | * queue_work - queue work on a workqueue | 500 | * queue_work - queue work on a workqueue |
diff --git a/kernel/workqueue.c b/kernel/workqueue.c index ca7959be8aaa..7ea75529eabb 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c | |||
| @@ -66,7 +66,7 @@ enum { | |||
| 66 | * be executing on any CPU. The pool behaves as an unbound one. | 66 | * be executing on any CPU. The pool behaves as an unbound one. |
| 67 | * | 67 | * |
| 68 | * Note that DISASSOCIATED should be flipped only while holding | 68 | * Note that DISASSOCIATED should be flipped only while holding |
| 69 | * attach_mutex to avoid changing binding state while | 69 | * wq_pool_attach_mutex to avoid changing binding state while |
| 70 | * worker_attach_to_pool() is in progress. | 70 | * worker_attach_to_pool() is in progress. |
| 71 | */ | 71 | */ |
| 72 | POOL_MANAGER_ACTIVE = 1 << 0, /* being managed */ | 72 | POOL_MANAGER_ACTIVE = 1 << 0, /* being managed */ |
| @@ -123,7 +123,7 @@ enum { | |||
| 123 | * cpu or grabbing pool->lock is enough for read access. If | 123 | * cpu or grabbing pool->lock is enough for read access. If |
| 124 | * POOL_DISASSOCIATED is set, it's identical to L. | 124 | * POOL_DISASSOCIATED is set, it's identical to L. |
| 125 | * | 125 | * |
| 126 | * A: pool->attach_mutex protected. | 126 | * A: wq_pool_attach_mutex protected. |
| 127 | * | 127 | * |
| 128 | * PL: wq_pool_mutex protected. | 128 | * PL: wq_pool_mutex protected. |
| 129 | * | 129 | * |
| @@ -166,7 +166,6 @@ struct worker_pool { | |||
| 166 | /* L: hash of busy workers */ | 166 | /* L: hash of busy workers */ |
| 167 | 167 | ||
| 168 | struct worker *manager; /* L: purely informational */ | 168 | struct worker *manager; /* L: purely informational */ |
| 169 | struct mutex attach_mutex; /* attach/detach exclusion */ | ||
| 170 | struct list_head workers; /* A: attached workers */ | 169 | struct list_head workers; /* A: attached workers */ |
| 171 | struct completion *detach_completion; /* all workers detached */ | 170 | struct completion *detach_completion; /* all workers detached */ |
| 172 | 171 | ||
| @@ -297,6 +296,7 @@ static bool wq_numa_enabled; /* unbound NUMA affinity enabled */ | |||
| 297 | static struct workqueue_attrs *wq_update_unbound_numa_attrs_buf; | 296 | static struct workqueue_attrs *wq_update_unbound_numa_attrs_buf; |
| 298 | 297 | ||
| 299 | static DEFINE_MUTEX(wq_pool_mutex); /* protects pools and workqueues list */ | 298 | static DEFINE_MUTEX(wq_pool_mutex); /* protects pools and workqueues list */ |
| 299 | static DEFINE_MUTEX(wq_pool_attach_mutex); /* protects worker attach/detach */ | ||
| 300 | static DEFINE_SPINLOCK(wq_mayday_lock); /* protects wq->maydays list */ | 300 | static DEFINE_SPINLOCK(wq_mayday_lock); /* protects wq->maydays list */ |
| 301 | static DECLARE_WAIT_QUEUE_HEAD(wq_manager_wait); /* wait for manager to go away */ | 301 | static DECLARE_WAIT_QUEUE_HEAD(wq_manager_wait); /* wait for manager to go away */ |
| 302 | 302 | ||
| @@ -399,14 +399,14 @@ static void workqueue_sysfs_unregister(struct workqueue_struct *wq); | |||
| 399 | * @worker: iteration cursor | 399 | * @worker: iteration cursor |
| 400 | * @pool: worker_pool to iterate workers of | 400 | * @pool: worker_pool to iterate workers of |
| 401 | * | 401 | * |
| 402 | * This must be called with @pool->attach_mutex. | 402 | * This must be called with wq_pool_attach_mutex. |
| 403 | * | 403 | * |
| 404 | * The if/else clause exists only for the lockdep assertion and can be | 404 | * The if/else clause exists only for the lockdep assertion and can be |
| 405 | * ignored. | 405 | * ignored. |
| 406 | */ | 406 | */ |
| 407 | #define for_each_pool_worker(worker, pool) \ | 407 | #define for_each_pool_worker(worker, pool) \ |
| 408 | list_for_each_entry((worker), &(pool)->workers, node) \ | 408 | list_for_each_entry((worker), &(pool)->workers, node) \ |
| 409 | if (({ lockdep_assert_held(&pool->attach_mutex); false; })) { } \ | 409 | if (({ lockdep_assert_held(&wq_pool_attach_mutex); false; })) { } \ |
| 410 | else | 410 | else |
| 411 | 411 | ||
| 412 | /** | 412 | /** |
| @@ -1724,7 +1724,7 @@ static struct worker *alloc_worker(int node) | |||
| 1724 | static void worker_attach_to_pool(struct worker *worker, | 1724 | static void worker_attach_to_pool(struct worker *worker, |
| 1725 | struct worker_pool *pool) | 1725 | struct worker_pool *pool) |
| 1726 | { | 1726 | { |
| 1727 | mutex_lock(&pool->attach_mutex); | 1727 | mutex_lock(&wq_pool_attach_mutex); |
| 1728 | 1728 | ||
| 1729 | /* | 1729 | /* |
| 1730 | * set_cpus_allowed_ptr() will fail if the cpumask doesn't have any | 1730 | * set_cpus_allowed_ptr() will fail if the cpumask doesn't have any |
| @@ -1733,37 +1733,40 @@ static void worker_attach_to_pool(struct worker *worker, | |||
| 1733 | set_cpus_allowed_ptr(worker->task, pool->attrs->cpumask); | 1733 | set_cpus_allowed_ptr(worker->task, pool->attrs->cpumask); |
| 1734 | 1734 | ||
| 1735 | /* | 1735 | /* |
| 1736 | * The pool->attach_mutex ensures %POOL_DISASSOCIATED remains | 1736 | * The wq_pool_attach_mutex ensures %POOL_DISASSOCIATED remains |
| 1737 | * stable across this function. See the comments above the | 1737 | * stable across this function. See the comments above the flag |
| 1738 | * flag definition for details. | 1738 | * definition for details. |
| 1739 | */ | 1739 | */ |
| 1740 | if (pool->flags & POOL_DISASSOCIATED) | 1740 | if (pool->flags & POOL_DISASSOCIATED) |
| 1741 | worker->flags |= WORKER_UNBOUND; | 1741 | worker->flags |= WORKER_UNBOUND; |
| 1742 | 1742 | ||
| 1743 | list_add_tail(&worker->node, &pool->workers); | 1743 | list_add_tail(&worker->node, &pool->workers); |
| 1744 | worker->pool = pool; | ||
| 1744 | 1745 | ||
| 1745 | mutex_unlock(&pool->attach_mutex); | 1746 | mutex_unlock(&wq_pool_attach_mutex); |
| 1746 | } | 1747 | } |
| 1747 | 1748 | ||
| 1748 | /** | 1749 | /** |
| 1749 | * worker_detach_from_pool() - detach a worker from its pool | 1750 | * worker_detach_from_pool() - detach a worker from its pool |
| 1750 | * @worker: worker which is attached to its pool | 1751 | * @worker: worker which is attached to its pool |
| 1751 | * @pool: the pool @worker is attached to | ||
| 1752 | * | 1752 | * |
| 1753 | * Undo the attaching which had been done in worker_attach_to_pool(). The | 1753 | * Undo the attaching which had been done in worker_attach_to_pool(). The |
| 1754 | * caller worker shouldn't access to the pool after detached except it has | 1754 | * caller worker shouldn't access to the pool after detached except it has |
| 1755 | * other reference to the pool. | 1755 | * other reference to the pool. |
| 1756 | */ | 1756 | */ |
| 1757 | static void worker_detach_from_pool(struct worker *worker, | 1757 | static void worker_detach_from_pool(struct worker *worker) |
| 1758 | struct worker_pool *pool) | ||
| 1759 | { | 1758 | { |
| 1759 | struct worker_pool *pool = worker->pool; | ||
| 1760 | struct completion *detach_completion = NULL; | 1760 | struct completion *detach_completion = NULL; |
| 1761 | 1761 | ||
| 1762 | mutex_lock(&pool->attach_mutex); | 1762 | mutex_lock(&wq_pool_attach_mutex); |
| 1763 | |||
| 1763 | list_del(&worker->node); | 1764 | list_del(&worker->node); |
| 1765 | worker->pool = NULL; | ||
| 1766 | |||
| 1764 | if (list_empty(&pool->workers)) | 1767 | if (list_empty(&pool->workers)) |
| 1765 | detach_completion = pool->detach_completion; | 1768 | detach_completion = pool->detach_completion; |
| 1766 | mutex_unlock(&pool->attach_mutex); | 1769 | mutex_unlock(&wq_pool_attach_mutex); |
| 1767 | 1770 | ||
| 1768 | /* clear leftover flags without pool->lock after it is detached */ | 1771 | /* clear leftover flags without pool->lock after it is detached */ |
| 1769 | worker->flags &= ~(WORKER_UNBOUND | WORKER_REBOUND); | 1772 | worker->flags &= ~(WORKER_UNBOUND | WORKER_REBOUND); |
| @@ -1799,7 +1802,6 @@ static struct worker *create_worker(struct worker_pool *pool) | |||
| 1799 | if (!worker) | 1802 | if (!worker) |
| 1800 | goto fail; | 1803 | goto fail; |
| 1801 | 1804 | ||
| 1802 | worker->pool = pool; | ||
| 1803 | worker->id = id; | 1805 | worker->id = id; |
| 1804 | 1806 | ||
| 1805 | if (pool->cpu >= 0) | 1807 | if (pool->cpu >= 0) |
| @@ -2086,6 +2088,12 @@ __acquires(&pool->lock) | |||
| 2086 | worker->current_pwq = pwq; | 2088 | worker->current_pwq = pwq; |
| 2087 | work_color = get_work_color(work); | 2089 | work_color = get_work_color(work); |
| 2088 | 2090 | ||
| 2091 | /* | ||
| 2092 | * Record wq name for cmdline and debug reporting, may get | ||
| 2093 | * overridden through set_worker_desc(). | ||
| 2094 | */ | ||
| 2095 | strscpy(worker->desc, pwq->wq->name, WORKER_DESC_LEN); | ||
| 2096 | |||
| 2089 | list_del_init(&work->entry); | 2097 | list_del_init(&work->entry); |
| 2090 | 2098 | ||
| 2091 | /* | 2099 | /* |
| @@ -2181,7 +2189,6 @@ __acquires(&pool->lock) | |||
| 2181 | worker->current_work = NULL; | 2189 | worker->current_work = NULL; |
| 2182 | worker->current_func = NULL; | 2190 | worker->current_func = NULL; |
| 2183 | worker->current_pwq = NULL; | 2191 | worker->current_pwq = NULL; |
| 2184 | worker->desc_valid = false; | ||
| 2185 | pwq_dec_nr_in_flight(pwq, work_color); | 2192 | pwq_dec_nr_in_flight(pwq, work_color); |
| 2186 | } | 2193 | } |
| 2187 | 2194 | ||
| @@ -2206,6 +2213,16 @@ static void process_scheduled_works(struct worker *worker) | |||
| 2206 | } | 2213 | } |
| 2207 | } | 2214 | } |
| 2208 | 2215 | ||
| 2216 | static void set_pf_worker(bool val) | ||
| 2217 | { | ||
| 2218 | mutex_lock(&wq_pool_attach_mutex); | ||
| 2219 | if (val) | ||
| 2220 | current->flags |= PF_WQ_WORKER; | ||
| 2221 | else | ||
| 2222 | current->flags &= ~PF_WQ_WORKER; | ||
| 2223 | mutex_unlock(&wq_pool_attach_mutex); | ||
| 2224 | } | ||
| 2225 | |||
| 2209 | /** | 2226 | /** |
| 2210 | * worker_thread - the worker thread function | 2227 | * worker_thread - the worker thread function |
| 2211 | * @__worker: self | 2228 | * @__worker: self |
| @@ -2224,7 +2241,7 @@ static int worker_thread(void *__worker) | |||
| 2224 | struct worker_pool *pool = worker->pool; | 2241 | struct worker_pool *pool = worker->pool; |
| 2225 | 2242 | ||
| 2226 | /* tell the scheduler that this is a workqueue worker */ | 2243 | /* tell the scheduler that this is a workqueue worker */ |
| 2227 | worker->task->flags |= PF_WQ_WORKER; | 2244 | set_pf_worker(true); |
| 2228 | woke_up: | 2245 | woke_up: |
| 2229 | spin_lock_irq(&pool->lock); | 2246 | spin_lock_irq(&pool->lock); |
| 2230 | 2247 | ||
| @@ -2232,11 +2249,11 @@ woke_up: | |||
| 2232 | if (unlikely(worker->flags & WORKER_DIE)) { | 2249 | if (unlikely(worker->flags & WORKER_DIE)) { |
| 2233 | spin_unlock_irq(&pool->lock); | 2250 | spin_unlock_irq(&pool->lock); |
| 2234 | WARN_ON_ONCE(!list_empty(&worker->entry)); | 2251 | WARN_ON_ONCE(!list_empty(&worker->entry)); |
| 2235 | worker->task->flags &= ~PF_WQ_WORKER; | 2252 | set_pf_worker(false); |
| 2236 | 2253 | ||
| 2237 | set_task_comm(worker->task, "kworker/dying"); | 2254 | set_task_comm(worker->task, "kworker/dying"); |
| 2238 | ida_simple_remove(&pool->worker_ida, worker->id); | 2255 | ida_simple_remove(&pool->worker_ida, worker->id); |
| 2239 | worker_detach_from_pool(worker, pool); | 2256 | worker_detach_from_pool(worker); |
| 2240 | kfree(worker); | 2257 | kfree(worker); |
| 2241 | return 0; | 2258 | return 0; |
| 2242 | } | 2259 | } |
| @@ -2335,7 +2352,7 @@ static int rescuer_thread(void *__rescuer) | |||
| 2335 | * Mark rescuer as worker too. As WORKER_PREP is never cleared, it | 2352 | * Mark rescuer as worker too. As WORKER_PREP is never cleared, it |
| 2336 | * doesn't participate in concurrency management. | 2353 | * doesn't participate in concurrency management. |
| 2337 | */ | 2354 | */ |
| 2338 | rescuer->task->flags |= PF_WQ_WORKER; | 2355 | set_pf_worker(true); |
| 2339 | repeat: | 2356 | repeat: |
| 2340 | set_current_state(TASK_IDLE); | 2357 | set_current_state(TASK_IDLE); |
| 2341 | 2358 | ||
| @@ -2367,7 +2384,6 @@ repeat: | |||
| 2367 | worker_attach_to_pool(rescuer, pool); | 2384 | worker_attach_to_pool(rescuer, pool); |
| 2368 | 2385 | ||
| 2369 | spin_lock_irq(&pool->lock); | 2386 | spin_lock_irq(&pool->lock); |
| 2370 | rescuer->pool = pool; | ||
| 2371 | 2387 | ||
| 2372 | /* | 2388 | /* |
| 2373 | * Slurp in all works issued via this workqueue and | 2389 | * Slurp in all works issued via this workqueue and |
| @@ -2417,10 +2433,9 @@ repeat: | |||
| 2417 | if (need_more_worker(pool)) | 2433 | if (need_more_worker(pool)) |
| 2418 | wake_up_worker(pool); | 2434 | wake_up_worker(pool); |
| 2419 | 2435 | ||
| 2420 | rescuer->pool = NULL; | ||
| 2421 | spin_unlock_irq(&pool->lock); | 2436 | spin_unlock_irq(&pool->lock); |
| 2422 | 2437 | ||
| 2423 | worker_detach_from_pool(rescuer, pool); | 2438 | worker_detach_from_pool(rescuer); |
| 2424 | 2439 | ||
| 2425 | spin_lock_irq(&wq_mayday_lock); | 2440 | spin_lock_irq(&wq_mayday_lock); |
| 2426 | } | 2441 | } |
| @@ -2429,7 +2444,7 @@ repeat: | |||
| 2429 | 2444 | ||
| 2430 | if (should_stop) { | 2445 | if (should_stop) { |
| 2431 | __set_current_state(TASK_RUNNING); | 2446 | __set_current_state(TASK_RUNNING); |
| 2432 | rescuer->task->flags &= ~PF_WQ_WORKER; | 2447 | set_pf_worker(false); |
| 2433 | return 0; | 2448 | return 0; |
| 2434 | } | 2449 | } |
| 2435 | 2450 | ||
| @@ -3271,7 +3286,6 @@ static int init_worker_pool(struct worker_pool *pool) | |||
| 3271 | 3286 | ||
| 3272 | timer_setup(&pool->mayday_timer, pool_mayday_timeout, 0); | 3287 | timer_setup(&pool->mayday_timer, pool_mayday_timeout, 0); |
| 3273 | 3288 | ||
| 3274 | mutex_init(&pool->attach_mutex); | ||
| 3275 | INIT_LIST_HEAD(&pool->workers); | 3289 | INIT_LIST_HEAD(&pool->workers); |
| 3276 | 3290 | ||
| 3277 | ida_init(&pool->worker_ida); | 3291 | ida_init(&pool->worker_ida); |
| @@ -3354,10 +3368,10 @@ static void put_unbound_pool(struct worker_pool *pool) | |||
| 3354 | WARN_ON(pool->nr_workers || pool->nr_idle); | 3368 | WARN_ON(pool->nr_workers || pool->nr_idle); |
| 3355 | spin_unlock_irq(&pool->lock); | 3369 | spin_unlock_irq(&pool->lock); |
| 3356 | 3370 | ||
| 3357 | mutex_lock(&pool->attach_mutex); | 3371 | mutex_lock(&wq_pool_attach_mutex); |
| 3358 | if (!list_empty(&pool->workers)) | 3372 | if (!list_empty(&pool->workers)) |
| 3359 | pool->detach_completion = &detach_completion; | 3373 | pool->detach_completion = &detach_completion; |
| 3360 | mutex_unlock(&pool->attach_mutex); | 3374 | mutex_unlock(&wq_pool_attach_mutex); |
| 3361 | 3375 | ||
| 3362 | if (pool->detach_completion) | 3376 | if (pool->detach_completion) |
| 3363 | wait_for_completion(pool->detach_completion); | 3377 | wait_for_completion(pool->detach_completion); |
| @@ -4347,7 +4361,6 @@ void set_worker_desc(const char *fmt, ...) | |||
| 4347 | va_start(args, fmt); | 4361 | va_start(args, fmt); |
| 4348 | vsnprintf(worker->desc, sizeof(worker->desc), fmt, args); | 4362 | vsnprintf(worker->desc, sizeof(worker->desc), fmt, args); |
| 4349 | va_end(args); | 4363 | va_end(args); |
| 4350 | worker->desc_valid = true; | ||
| 4351 | } | 4364 | } |
| 4352 | } | 4365 | } |
| 4353 | 4366 | ||
| @@ -4371,7 +4384,6 @@ void print_worker_info(const char *log_lvl, struct task_struct *task) | |||
| 4371 | char desc[WORKER_DESC_LEN] = { }; | 4384 | char desc[WORKER_DESC_LEN] = { }; |
| 4372 | struct pool_workqueue *pwq = NULL; | 4385 | struct pool_workqueue *pwq = NULL; |
| 4373 | struct workqueue_struct *wq = NULL; | 4386 | struct workqueue_struct *wq = NULL; |
| 4374 | bool desc_valid = false; | ||
| 4375 | struct worker *worker; | 4387 | struct worker *worker; |
| 4376 | 4388 | ||
| 4377 | if (!(task->flags & PF_WQ_WORKER)) | 4389 | if (!(task->flags & PF_WQ_WORKER)) |
| @@ -4384,22 +4396,18 @@ void print_worker_info(const char *log_lvl, struct task_struct *task) | |||
| 4384 | worker = kthread_probe_data(task); | 4396 | worker = kthread_probe_data(task); |
| 4385 | 4397 | ||
| 4386 | /* | 4398 | /* |
| 4387 | * Carefully copy the associated workqueue's workfn and name. Keep | 4399 | * Carefully copy the associated workqueue's workfn, name and desc. |
| 4388 | * the original last '\0' in case the original contains garbage. | 4400 | * Keep the original last '\0' in case the original is garbage. |
| 4389 | */ | 4401 | */ |
| 4390 | probe_kernel_read(&fn, &worker->current_func, sizeof(fn)); | 4402 | probe_kernel_read(&fn, &worker->current_func, sizeof(fn)); |
| 4391 | probe_kernel_read(&pwq, &worker->current_pwq, sizeof(pwq)); | 4403 | probe_kernel_read(&pwq, &worker->current_pwq, sizeof(pwq)); |
| 4392 | probe_kernel_read(&wq, &pwq->wq, sizeof(wq)); | 4404 | probe_kernel_read(&wq, &pwq->wq, sizeof(wq)); |
| 4393 | probe_kernel_read(name, wq->name, sizeof(name) - 1); | 4405 | probe_kernel_read(name, wq->name, sizeof(name) - 1); |
| 4394 | 4406 | probe_kernel_read(desc, worker->desc, sizeof(desc) - 1); | |
| 4395 | /* copy worker description */ | ||
| 4396 | probe_kernel_read(&desc_valid, &worker->desc_valid, sizeof(desc_valid)); | ||
| 4397 | if (desc_valid) | ||
| 4398 | probe_kernel_read(desc, worker->desc, sizeof(desc) - 1); | ||
| 4399 | 4407 | ||
| 4400 | if (fn || name[0] || desc[0]) { | 4408 | if (fn || name[0] || desc[0]) { |
| 4401 | printk("%sWorkqueue: %s %pf", log_lvl, name, fn); | 4409 | printk("%sWorkqueue: %s %pf", log_lvl, name, fn); |
| 4402 | if (desc[0]) | 4410 | if (strcmp(name, desc)) |
| 4403 | pr_cont(" (%s)", desc); | 4411 | pr_cont(" (%s)", desc); |
| 4404 | pr_cont("\n"); | 4412 | pr_cont("\n"); |
| 4405 | } | 4413 | } |
| @@ -4579,6 +4587,47 @@ void show_workqueue_state(void) | |||
| 4579 | rcu_read_unlock_sched(); | 4587 | rcu_read_unlock_sched(); |
| 4580 | } | 4588 | } |
| 4581 | 4589 | ||
| 4590 | /* used to show worker information through /proc/PID/{comm,stat,status} */ | ||
| 4591 | void wq_worker_comm(char *buf, size_t size, struct task_struct *task) | ||
| 4592 | { | ||
| 4593 | int off; | ||
| 4594 | |||
| 4595 | /* always show the actual comm */ | ||
| 4596 | off = strscpy(buf, task->comm, size); | ||
| 4597 | if (off < 0) | ||
| 4598 | return; | ||
| 4599 | |||
| 4600 | /* stabilize PF_WQ_WORKER and worker pool association */ | ||
| 4601 | mutex_lock(&wq_pool_attach_mutex); | ||
| 4602 | |||
| 4603 | if (task->flags & PF_WQ_WORKER) { | ||
| 4604 | struct worker *worker = kthread_data(task); | ||
| 4605 | struct worker_pool *pool = worker->pool; | ||
| 4606 | |||
| 4607 | if (pool) { | ||
| 4608 | spin_lock_irq(&pool->lock); | ||
| 4609 | /* | ||
| 4610 | * ->desc tracks information (wq name or | ||
| 4611 | * set_worker_desc()) for the latest execution. If | ||
| 4612 | * current, prepend '+', otherwise '-'. | ||
| 4613 | */ | ||
| 4614 | if (worker->desc[0] != '\0') { | ||
| 4615 | if (worker->current_work) | ||
| 4616 | scnprintf(buf + off, size - off, "+%s", | ||
| 4617 | worker->desc); | ||
| 4618 | else | ||
| 4619 | scnprintf(buf + off, size - off, "-%s", | ||
| 4620 | worker->desc); | ||
| 4621 | } | ||
| 4622 | spin_unlock_irq(&pool->lock); | ||
| 4623 | } | ||
| 4624 | } | ||
| 4625 | |||
| 4626 | mutex_unlock(&wq_pool_attach_mutex); | ||
| 4627 | } | ||
| 4628 | |||
| 4629 | #ifdef CONFIG_SMP | ||
| 4630 | |||
| 4582 | /* | 4631 | /* |
| 4583 | * CPU hotplug. | 4632 | * CPU hotplug. |
| 4584 | * | 4633 | * |
| @@ -4600,7 +4649,7 @@ static void unbind_workers(int cpu) | |||
| 4600 | struct worker *worker; | 4649 | struct worker *worker; |
| 4601 | 4650 | ||
| 4602 | for_each_cpu_worker_pool(pool, cpu) { | 4651 | for_each_cpu_worker_pool(pool, cpu) { |
| 4603 | mutex_lock(&pool->attach_mutex); | 4652 | mutex_lock(&wq_pool_attach_mutex); |
| 4604 | spin_lock_irq(&pool->lock); | 4653 | spin_lock_irq(&pool->lock); |
| 4605 | 4654 | ||
| 4606 | /* | 4655 | /* |
| @@ -4616,7 +4665,7 @@ static void unbind_workers(int cpu) | |||
| 4616 | pool->flags |= POOL_DISASSOCIATED; | 4665 | pool->flags |= POOL_DISASSOCIATED; |
| 4617 | 4666 | ||
| 4618 | spin_unlock_irq(&pool->lock); | 4667 | spin_unlock_irq(&pool->lock); |
| 4619 | mutex_unlock(&pool->attach_mutex); | 4668 | mutex_unlock(&wq_pool_attach_mutex); |
| 4620 | 4669 | ||
| 4621 | /* | 4670 | /* |
| 4622 | * Call schedule() so that we cross rq->lock and thus can | 4671 | * Call schedule() so that we cross rq->lock and thus can |
| @@ -4657,7 +4706,7 @@ static void rebind_workers(struct worker_pool *pool) | |||
| 4657 | { | 4706 | { |
| 4658 | struct worker *worker; | 4707 | struct worker *worker; |
| 4659 | 4708 | ||
| 4660 | lockdep_assert_held(&pool->attach_mutex); | 4709 | lockdep_assert_held(&wq_pool_attach_mutex); |
| 4661 | 4710 | ||
| 4662 | /* | 4711 | /* |
| 4663 | * Restore CPU affinity of all workers. As all idle workers should | 4712 | * Restore CPU affinity of all workers. As all idle workers should |
| @@ -4727,7 +4776,7 @@ static void restore_unbound_workers_cpumask(struct worker_pool *pool, int cpu) | |||
| 4727 | static cpumask_t cpumask; | 4776 | static cpumask_t cpumask; |
| 4728 | struct worker *worker; | 4777 | struct worker *worker; |
| 4729 | 4778 | ||
| 4730 | lockdep_assert_held(&pool->attach_mutex); | 4779 | lockdep_assert_held(&wq_pool_attach_mutex); |
| 4731 | 4780 | ||
| 4732 | /* is @cpu allowed for @pool? */ | 4781 | /* is @cpu allowed for @pool? */ |
| 4733 | if (!cpumask_test_cpu(cpu, pool->attrs->cpumask)) | 4782 | if (!cpumask_test_cpu(cpu, pool->attrs->cpumask)) |
| @@ -4762,14 +4811,14 @@ int workqueue_online_cpu(unsigned int cpu) | |||
| 4762 | mutex_lock(&wq_pool_mutex); | 4811 | mutex_lock(&wq_pool_mutex); |
| 4763 | 4812 | ||
| 4764 | for_each_pool(pool, pi) { | 4813 | for_each_pool(pool, pi) { |
| 4765 | mutex_lock(&pool->attach_mutex); | 4814 | mutex_lock(&wq_pool_attach_mutex); |
| 4766 | 4815 | ||
| 4767 | if (pool->cpu == cpu) | 4816 | if (pool->cpu == cpu) |
| 4768 | rebind_workers(pool); | 4817 | rebind_workers(pool); |
| 4769 | else if (pool->cpu < 0) | 4818 | else if (pool->cpu < 0) |
| 4770 | restore_unbound_workers_cpumask(pool, cpu); | 4819 | restore_unbound_workers_cpumask(pool, cpu); |
| 4771 | 4820 | ||
| 4772 | mutex_unlock(&pool->attach_mutex); | 4821 | mutex_unlock(&wq_pool_attach_mutex); |
| 4773 | } | 4822 | } |
| 4774 | 4823 | ||
| 4775 | /* update NUMA affinity of unbound workqueues */ | 4824 | /* update NUMA affinity of unbound workqueues */ |
| @@ -4799,8 +4848,6 @@ int workqueue_offline_cpu(unsigned int cpu) | |||
| 4799 | return 0; | 4848 | return 0; |
| 4800 | } | 4849 | } |
| 4801 | 4850 | ||
| 4802 | #ifdef CONFIG_SMP | ||
| 4803 | |||
| 4804 | struct work_for_cpu { | 4851 | struct work_for_cpu { |
| 4805 | struct work_struct work; | 4852 | struct work_struct work; |
| 4806 | long (*fn)(void *); | 4853 | long (*fn)(void *); |
diff --git a/kernel/workqueue_internal.h b/kernel/workqueue_internal.h index d390d1be3748..66fbb5a9e633 100644 --- a/kernel/workqueue_internal.h +++ b/kernel/workqueue_internal.h | |||
| @@ -31,13 +31,12 @@ struct worker { | |||
| 31 | struct work_struct *current_work; /* L: work being processed */ | 31 | struct work_struct *current_work; /* L: work being processed */ |
| 32 | work_func_t current_func; /* L: current_work's fn */ | 32 | work_func_t current_func; /* L: current_work's fn */ |
| 33 | struct pool_workqueue *current_pwq; /* L: current_work's pwq */ | 33 | struct pool_workqueue *current_pwq; /* L: current_work's pwq */ |
| 34 | bool desc_valid; /* ->desc is valid */ | ||
| 35 | struct list_head scheduled; /* L: scheduled works */ | 34 | struct list_head scheduled; /* L: scheduled works */ |
| 36 | 35 | ||
| 37 | /* 64 bytes boundary on 64bit, 32 on 32bit */ | 36 | /* 64 bytes boundary on 64bit, 32 on 32bit */ |
| 38 | 37 | ||
| 39 | struct task_struct *task; /* I: worker task */ | 38 | struct task_struct *task; /* I: worker task */ |
| 40 | struct worker_pool *pool; /* I: the associated pool */ | 39 | struct worker_pool *pool; /* A: the associated pool */ |
| 41 | /* L: for rescuers */ | 40 | /* L: for rescuers */ |
| 42 | struct list_head node; /* A: anchored at pool->workers */ | 41 | struct list_head node; /* A: anchored at pool->workers */ |
| 43 | /* A: runs through worker->node */ | 42 | /* A: runs through worker->node */ |
