aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorTejun Heo <tj@kernel.org>2014-03-07 10:24:50 -0500
committerTejun Heo <tj@kernel.org>2014-03-07 10:24:50 -0500
commitf073f9229ff1137d3be20558bec3bfb77e3af2a4 (patch)
treef3a757aeaef233f894e2cc373061dcf5b3919d6c
parent6c256cb6467e60b54f41170076c7f625e231c282 (diff)
workqueue: remove PREPARE_[DELAYED_]WORK()
Peter Hurley noticed that since a2c1c57be8d9 ("workqueue: consider work function when searching for busy work items"), a work item which gets assigned a different work function would break out of the non-reentrancy guarantee as workqueue would consider it a different work item. This is fragile and extremely subtle. PREPARE_[DELAYED_]WORK() have never been used widely and its semantics has always been somewhat iffy. If the work item is known not to be on queue when PREPARE_WORK() is called, there's no difference from using INIT_WORK(). If the work item may be queued at the time of PREPARE_WORK(), we can't really tell whether the old or new function will be executed the next time. We really don't want this level of subtlety in workqueue interface for such marginal use cases. The previous patches converted all existing users away from PREPARE_[DELAYED_]WORK(). Let's remove them. Signed-off-by: Tejun Heo <tj@kernel.org> Cc: Peter Hurley <peter@hurleysoftware.com> Link: http://lkml.kernel.org/g/1392493119-9277-1-git-send-email-peter@hurleysoftware.com
-rw-r--r--include/linux/workqueue.h15
1 files changed, 2 insertions, 13 deletions
diff --git a/include/linux/workqueue.h b/include/linux/workqueue.h
index 8059334a6b02..29da9e77c3bb 100644
--- a/include/linux/workqueue.h
+++ b/include/linux/workqueue.h
@@ -177,17 +177,6 @@ struct execute_work {
177#define DECLARE_DEFERRABLE_WORK(n, f) \ 177#define DECLARE_DEFERRABLE_WORK(n, f) \
178 struct delayed_work n = __DELAYED_WORK_INITIALIZER(n, f, TIMER_DEFERRABLE) 178 struct delayed_work n = __DELAYED_WORK_INITIALIZER(n, f, TIMER_DEFERRABLE)
179 179
180/*
181 * initialize a work item's function pointer
182 */
183#define PREPARE_WORK(_work, _func) \
184 do { \
185 (_work)->func = (_func); \
186 } while (0)
187
188#define PREPARE_DELAYED_WORK(_work, _func) \
189 PREPARE_WORK(&(_work)->work, (_func))
190
191#ifdef CONFIG_DEBUG_OBJECTS_WORK 180#ifdef CONFIG_DEBUG_OBJECTS_WORK
192extern void __init_work(struct work_struct *work, int onstack); 181extern void __init_work(struct work_struct *work, int onstack);
193extern void destroy_work_on_stack(struct work_struct *work); 182extern void destroy_work_on_stack(struct work_struct *work);
@@ -217,7 +206,7 @@ static inline unsigned int work_static(struct work_struct *work) { return 0; }
217 (_work)->data = (atomic_long_t) WORK_DATA_INIT(); \ 206 (_work)->data = (atomic_long_t) WORK_DATA_INIT(); \
218 lockdep_init_map(&(_work)->lockdep_map, #_work, &__key, 0); \ 207 lockdep_init_map(&(_work)->lockdep_map, #_work, &__key, 0); \
219 INIT_LIST_HEAD(&(_work)->entry); \ 208 INIT_LIST_HEAD(&(_work)->entry); \
220 PREPARE_WORK((_work), (_func)); \ 209 (_work)->func = (_func); \
221 } while (0) 210 } while (0)
222#else 211#else
223#define __INIT_WORK(_work, _func, _onstack) \ 212#define __INIT_WORK(_work, _func, _onstack) \
@@ -225,7 +214,7 @@ static inline unsigned int work_static(struct work_struct *work) { return 0; }
225 __init_work((_work), _onstack); \ 214 __init_work((_work), _onstack); \
226 (_work)->data = (atomic_long_t) WORK_DATA_INIT(); \ 215 (_work)->data = (atomic_long_t) WORK_DATA_INIT(); \
227 INIT_LIST_HEAD(&(_work)->entry); \ 216 INIT_LIST_HEAD(&(_work)->entry); \
228 PREPARE_WORK((_work), (_func)); \ 217 (_work)->func = (_func); \
229 } while (0) 218 } while (0)
230#endif 219#endif
231 220