aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorOleg Nesterov <oleg@tv-sign.ru>2007-05-09 05:34:10 -0400
committerLinus Torvalds <torvalds@woody.linux-foundation.org>2007-05-09 15:30:52 -0400
commit7097a87afe937a5879528d52880c2d95f089e96c (patch)
treef06090c0f6ed327ee2894deb8ac7c588ab55bf4e
parent3af24433efac62f451bfdb1cf1edb7181fb73645 (diff)
workqueue: kill run_scheduled_work()
Because it has no callers. Actually, I think the whole idea of run_scheduled_work() was not right, not good to mix "unqueue this work and execute its ->func()" in one function. Signed-off-by: Oleg Nesterov <oleg@tv-sign.ru> Cc: Ingo Molnar <mingo@elte.hu> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-rw-r--r--include/linux/workqueue.h1
-rw-r--r--kernel/workqueue.c73
2 files changed, 0 insertions, 74 deletions
diff --git a/include/linux/workqueue.h b/include/linux/workqueue.h
index 26a70992dec8..2a58f16e1961 100644
--- a/include/linux/workqueue.h
+++ b/include/linux/workqueue.h
@@ -182,7 +182,6 @@ extern void flush_work(struct workqueue_struct *wq, struct work_struct *work);
182extern void flush_work_keventd(struct work_struct *work); 182extern void flush_work_keventd(struct work_struct *work);
183 183
184extern int FASTCALL(schedule_work(struct work_struct *work)); 184extern int FASTCALL(schedule_work(struct work_struct *work));
185extern int FASTCALL(run_scheduled_work(struct work_struct *work));
186extern int FASTCALL(schedule_delayed_work(struct delayed_work *work, unsigned long delay)); 185extern int FASTCALL(schedule_delayed_work(struct delayed_work *work, unsigned long delay));
187 186
188extern int schedule_delayed_work_on(int cpu, struct delayed_work *work, unsigned long delay); 187extern int schedule_delayed_work_on(int cpu, struct delayed_work *work, unsigned long delay);
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index a981add58fb9..ea422254f8bf 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -98,79 +98,6 @@ static inline void *get_wq_data(struct work_struct *work)
98 return (void *) (atomic_long_read(&work->data) & WORK_STRUCT_WQ_DATA_MASK); 98 return (void *) (atomic_long_read(&work->data) & WORK_STRUCT_WQ_DATA_MASK);
99} 99}
100 100
101static int __run_work(struct cpu_workqueue_struct *cwq, struct work_struct *work)
102{
103 int ret = 0;
104 unsigned long flags;
105
106 spin_lock_irqsave(&cwq->lock, flags);
107 /*
108 * We need to re-validate the work info after we've gotten
109 * the cpu_workqueue lock. We can run the work now iff:
110 *
111 * - the wq_data still matches the cpu_workqueue_struct
112 * - AND the work is still marked pending
113 * - AND the work is still on a list (which will be this
114 * workqueue_struct list)
115 *
116 * All these conditions are important, because we
117 * need to protect against the work being run right
118 * now on another CPU (all but the last one might be
119 * true if it's currently running and has not been
120 * released yet, for example).
121 */
122 if (get_wq_data(work) == cwq
123 && work_pending(work)
124 && !list_empty(&work->entry)) {
125 work_func_t f = work->func;
126 cwq->current_work = work;
127 list_del_init(&work->entry);
128 spin_unlock_irqrestore(&cwq->lock, flags);
129
130 if (!test_bit(WORK_STRUCT_NOAUTOREL, work_data_bits(work)))
131 work_release(work);
132 f(work);
133
134 spin_lock_irqsave(&cwq->lock, flags);
135 cwq->current_work = NULL;
136 ret = 1;
137 }
138 spin_unlock_irqrestore(&cwq->lock, flags);
139 return ret;
140}
141
142/**
143 * run_scheduled_work - run scheduled work synchronously
144 * @work: work to run
145 *
146 * This checks if the work was pending, and runs it
147 * synchronously if so. It returns a boolean to indicate
148 * whether it had any scheduled work to run or not.
149 *
150 * NOTE! This _only_ works for normal work_structs. You
151 * CANNOT use this for delayed work, because the wq data
152 * for delayed work will not point properly to the per-
153 * CPU workqueue struct, but will change!
154 */
155int fastcall run_scheduled_work(struct work_struct *work)
156{
157 for (;;) {
158 struct cpu_workqueue_struct *cwq;
159
160 if (!work_pending(work))
161 return 0;
162 if (list_empty(&work->entry))
163 return 0;
164 /* NOTE! This depends intimately on __queue_work! */
165 cwq = get_wq_data(work);
166 if (!cwq)
167 return 0;
168 if (__run_work(cwq, work))
169 return 1;
170 }
171}
172EXPORT_SYMBOL(run_scheduled_work);
173
174static void insert_work(struct cpu_workqueue_struct *cwq, 101static void insert_work(struct cpu_workqueue_struct *cwq,
175 struct work_struct *work, int tail) 102 struct work_struct *work, int tail)
176{ 103{