aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@woody.osdl.org>2006-12-07 12:28:19 -0500
committerLinus Torvalds <torvalds@woody.osdl.org>2006-12-07 12:28:19 -0500
commit68380b581383c028830f79ec2670f4a193854aa6 (patch)
tree49ea33a67213702ff56ca5843435c75749ac0ab3
parent2fd8507d14ef7af3ae05316b3277044cf6daa381 (diff)
Add "run_scheduled_work()" workqueue function
This allows workqueue users to run just their own pending work, rather than wait for the whole workqueue to finish running. This solves the deadlock with networking libphy that was due to other workqueue entries possibly needing a lock that was held by the routine that wanted to flush its own work. It's not wonderful: if you absolutely need to synchronize with the work function having been executed, any user strictly speaking should have its own completion tracking logic, since when we run things explicitly by hand, the generic workqueue layer can no longer help us synchronize. Also, this is strictly only usable for work that has been scheduled without any delayed timers. You can not mix the new interface with schedule_delayed_work(). But it's better than what we had currently. Acked-by: Maciej W. Rozycki <macro@linux-mips.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
-rw-r--r--drivers/net/phy/phy.c3
-rw-r--r--include/linux/workqueue.h1
-rw-r--r--kernel/workqueue.c73
3 files changed, 75 insertions, 2 deletions
diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c
index 4044bb1ada86..e175f3910b18 100644
--- a/drivers/net/phy/phy.c
+++ b/drivers/net/phy/phy.c
@@ -587,8 +587,7 @@ int phy_stop_interrupts(struct phy_device *phydev)
587 * Finish any pending work; we might have been scheduled 587 * Finish any pending work; we might have been scheduled
588 * to be called from keventd ourselves, though. 588 * to be called from keventd ourselves, though.
589 */ 589 */
590 if (!current_is_keventd()) 590 run_scheduled_work(&phydev->phy_queue);
591 flush_scheduled_work();
592 591
593 free_irq(phydev->irq, phydev); 592 free_irq(phydev->irq, phydev);
594 593
diff --git a/include/linux/workqueue.h b/include/linux/workqueue.h
index f0cb1df7b475..edef8d50b26b 100644
--- a/include/linux/workqueue.h
+++ b/include/linux/workqueue.h
@@ -162,6 +162,7 @@ extern int queue_delayed_work_on(int cpu, struct workqueue_struct *wq,
162extern void FASTCALL(flush_workqueue(struct workqueue_struct *wq)); 162extern void FASTCALL(flush_workqueue(struct workqueue_struct *wq));
163 163
164extern int FASTCALL(schedule_work(struct work_struct *work)); 164extern int FASTCALL(schedule_work(struct work_struct *work));
165extern int FASTCALL(run_scheduled_work(struct work_struct *work));
165extern int FASTCALL(schedule_delayed_work(struct delayed_work *work, unsigned long delay)); 166extern int FASTCALL(schedule_delayed_work(struct delayed_work *work, unsigned long delay));
166 167
167extern int schedule_delayed_work_on(int cpu, struct delayed_work *work, unsigned long delay); 168extern int schedule_delayed_work_on(int cpu, struct delayed_work *work, unsigned long delay);
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index c5257316f4b9..6b186750e9be 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -108,6 +108,79 @@ static inline void *get_wq_data(struct work_struct *work)
108 return (void *) (work->management & WORK_STRUCT_WQ_DATA_MASK); 108 return (void *) (work->management & WORK_STRUCT_WQ_DATA_MASK);
109} 109}
110 110
111static int __run_work(struct cpu_workqueue_struct *cwq, struct work_struct *work)
112{
113 int ret = 0;
114 unsigned long flags;
115
116 spin_lock_irqsave(&cwq->lock, flags);
117 /*
118 * We need to re-validate the work info after we've gotten
119 * the cpu_workqueue lock. We can run the work now iff:
120 *
121 * - the wq_data still matches the cpu_workqueue_struct
122 * - AND the work is still marked pending
123 * - AND the work is still on a list (which will be this
124 * workqueue_struct list)
125 *
126 * All these conditions are important, because we
127 * need to protect against the work being run right
128 * now on another CPU (all but the last one might be
129 * true if it's currently running and has not been
130 * released yet, for example).
131 */
132 if (get_wq_data(work) == cwq
133 && work_pending(work)
134 && !list_empty(&work->entry)) {
135 work_func_t f = work->func;
136 list_del_init(&work->entry);
137 spin_unlock_irqrestore(&cwq->lock, flags);
138
139 if (!test_bit(WORK_STRUCT_NOAUTOREL, &work->management))
140 work_release(work);
141 f(work);
142
143 spin_lock_irqsave(&cwq->lock, flags);
144 cwq->remove_sequence++;
145 wake_up(&cwq->work_done);
146 ret = 1;
147 }
148 spin_unlock_irqrestore(&cwq->lock, flags);
149 return ret;
150}
151
152/**
153 * run_scheduled_work - run scheduled work synchronously
154 * @work: work to run
155 *
156 * This checks if the work was pending, and runs it
157 * synchronously if so. It returns a boolean to indicate
158 * whether it had any scheduled work to run or not.
159 *
160 * NOTE! This _only_ works for normal work_structs. You
161 * CANNOT use this for delayed work, because the wq data
162 * for delayed work will not point properly to the per-
163 * CPU workqueue struct, but will change!
164 */
165int fastcall run_scheduled_work(struct work_struct *work)
166{
167 for (;;) {
168 struct cpu_workqueue_struct *cwq;
169
170 if (!work_pending(work))
171 return 0;
172 if (list_empty(&work->entry))
173 return 0;
174 /* NOTE! This depends intimately on __queue_work! */
175 cwq = get_wq_data(work);
176 if (!cwq)
177 return 0;
178 if (__run_work(cwq, work))
179 return 1;
180 }
181}
182EXPORT_SYMBOL(run_scheduled_work);
183
111/* Preempt must be disabled. */ 184/* Preempt must be disabled. */
112static void __queue_work(struct cpu_workqueue_struct *cwq, 185static void __queue_work(struct cpu_workqueue_struct *cwq,
113 struct work_struct *work) 186 struct work_struct *work)