aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorTejun Heo <tj@kernel.org>2010-08-24 08:22:47 -0400
committerTejun Heo <tj@kernel.org>2010-08-24 12:01:32 -0400
commite41e704bc4f49057fc68b643108366e6e6781aa3 (patch)
tree8cc85208970ba0c9adf533903243e28c506f23ae
parent972fa1c5316d18c8297123e08e9b6930ca34f888 (diff)
workqueue: improve destroy_workqueue() debuggability
Now that the worklist is global, having works pending after wq destruction can easily lead to oops and destroy_workqueue() have several BUG_ON()s to catch these cases. Unfortunately, BUG_ON() doesn't tell much about how the work became pending after the final flush_workqueue(). This patch adds WQ_DYING which is set before the final flush begins. If a work is requested to be queued on a dying workqueue, WARN_ON_ONCE() is triggered and the request is ignored. This clearly indicates which caller is trying to queue a work on a dying workqueue and keeps the system working in most cases. Locking rule comment is updated such that the 'I' rule includes modifying the field from destruction path. Signed-off-by: Tejun Heo <tj@kernel.org>
-rw-r--r--include/linux/workqueue.h2
-rw-r--r--kernel/workqueue.c7
2 files changed, 8 insertions, 1 deletions
diff --git a/include/linux/workqueue.h b/include/linux/workqueue.h
index 4f9d277bcd9a..c959666eafca 100644
--- a/include/linux/workqueue.h
+++ b/include/linux/workqueue.h
@@ -241,6 +241,8 @@ enum {
241 WQ_HIGHPRI = 1 << 4, /* high priority */ 241 WQ_HIGHPRI = 1 << 4, /* high priority */
242 WQ_CPU_INTENSIVE = 1 << 5, /* cpu instensive workqueue */ 242 WQ_CPU_INTENSIVE = 1 << 5, /* cpu instensive workqueue */
243 243
244 WQ_DYING = 1 << 6, /* internal: workqueue is dying */
245
244 WQ_MAX_ACTIVE = 512, /* I like 512, better ideas? */ 246 WQ_MAX_ACTIVE = 512, /* I like 512, better ideas? */
245 WQ_MAX_UNBOUND_PER_CPU = 4, /* 4 * #cpus for unbound wq */ 247 WQ_MAX_UNBOUND_PER_CPU = 4, /* 4 * #cpus for unbound wq */
246 WQ_DFL_ACTIVE = WQ_MAX_ACTIVE / 2, 248 WQ_DFL_ACTIVE = WQ_MAX_ACTIVE / 2,
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index cc3456f96c56..362b50d092e2 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -87,7 +87,8 @@ enum {
87/* 87/*
88 * Structure fields follow one of the following exclusion rules. 88 * Structure fields follow one of the following exclusion rules.
89 * 89 *
90 * I: Set during initialization and read-only afterwards. 90 * I: Modifiable by initialization/destruction paths and read-only for
91 * everyone else.
91 * 92 *
92 * P: Preemption protected. Disabling preemption is enough and should 93 * P: Preemption protected. Disabling preemption is enough and should
93 * only be modified and accessed from the local cpu. 94 * only be modified and accessed from the local cpu.
@@ -944,6 +945,9 @@ static void __queue_work(unsigned int cpu, struct workqueue_struct *wq,
944 945
945 debug_work_activate(work); 946 debug_work_activate(work);
946 947
948 if (WARN_ON_ONCE(wq->flags & WQ_DYING))
949 return;
950
947 /* determine gcwq to use */ 951 /* determine gcwq to use */
948 if (!(wq->flags & WQ_UNBOUND)) { 952 if (!(wq->flags & WQ_UNBOUND)) {
949 struct global_cwq *last_gcwq; 953 struct global_cwq *last_gcwq;
@@ -2828,6 +2832,7 @@ void destroy_workqueue(struct workqueue_struct *wq)
2828{ 2832{
2829 unsigned int cpu; 2833 unsigned int cpu;
2830 2834
2835 wq->flags |= WQ_DYING;
2831 flush_workqueue(wq); 2836 flush_workqueue(wq);
2832 2837
2833 /* 2838 /*