aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'kernel')
-rw-r--r--kernel/workqueue.c73
1 files changed, 73 insertions, 0 deletions
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index c5257316f4b9..6b186750e9be 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -108,6 +108,79 @@ static inline void *get_wq_data(struct work_struct *work)
108 return (void *) (work->management & WORK_STRUCT_WQ_DATA_MASK); 108 return (void *) (work->management & WORK_STRUCT_WQ_DATA_MASK);
109} 109}
110 110
111static int __run_work(struct cpu_workqueue_struct *cwq, struct work_struct *work)
112{
113 int ret = 0;
114 unsigned long flags;
115
116 spin_lock_irqsave(&cwq->lock, flags);
117 /*
118 * We need to re-validate the work info after we've gotten
119 * the cpu_workqueue lock. We can run the work now iff:
120 *
121 * - the wq_data still matches the cpu_workqueue_struct
122 * - AND the work is still marked pending
123 * - AND the work is still on a list (which will be this
124 * workqueue_struct list)
125 *
126 * All these conditions are important, because we
127 * need to protect against the work being run right
128 * now on another CPU (all but the last one might be
129 * true if it's currently running and has not been
130 * released yet, for example).
131 */
132 if (get_wq_data(work) == cwq
133 && work_pending(work)
134 && !list_empty(&work->entry)) {
135 work_func_t f = work->func;
136 list_del_init(&work->entry);
137 spin_unlock_irqrestore(&cwq->lock, flags);
138
139 if (!test_bit(WORK_STRUCT_NOAUTOREL, &work->management))
140 work_release(work);
141 f(work);
142
143 spin_lock_irqsave(&cwq->lock, flags);
144 cwq->remove_sequence++;
145 wake_up(&cwq->work_done);
146 ret = 1;
147 }
148 spin_unlock_irqrestore(&cwq->lock, flags);
149 return ret;
150}
151
152/**
153 * run_scheduled_work - run scheduled work synchronously
154 * @work: work to run
155 *
156 * This checks if the work was pending, and runs it
157 * synchronously if so. It returns a boolean to indicate
158 * whether it had any scheduled work to run or not.
159 *
160 * NOTE! This _only_ works for normal work_structs. You
161 * CANNOT use this for delayed work, because the wq data
162 * for delayed work will not point properly to the per-
163 * CPU workqueue struct, but will change!
164 */
165int fastcall run_scheduled_work(struct work_struct *work)
166{
167 for (;;) {
168 struct cpu_workqueue_struct *cwq;
169
170 if (!work_pending(work))
171 return 0;
172 if (list_empty(&work->entry))
173 return 0;
174 /* NOTE! This depends intimately on __queue_work! */
175 cwq = get_wq_data(work);
176 if (!cwq)
177 return 0;
178 if (__run_work(cwq, work))
179 return 1;
180 }
181}
182EXPORT_SYMBOL(run_scheduled_work);
183
111/* Preempt must be disabled. */ 184/* Preempt must be disabled. */
112static void __queue_work(struct cpu_workqueue_struct *cwq, 185static void __queue_work(struct cpu_workqueue_struct *cwq,
113 struct work_struct *work) 186 struct work_struct *work)