aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/workqueue.c
diff options
context:
space:
mode:
authorJeff Garzik <jeff@garzik.org>2006-12-07 04:57:19 -0500
committerJeff Garzik <jeff@garzik.org>2006-12-07 04:57:19 -0500
commit8d1413b28033c49c7f1a4d320e815d7a5531acee (patch)
treeb37281abef014cd60803b81c100388d7a475d49e /kernel/workqueue.c
parented25ffa16434724f5ed825aa48734c7f3aefa203 (diff)
parent620034c84d1d939717bdfbe02c51a3fee43541c3 (diff)
Merge branch 'master' into upstream
Conflicts: drivers/net/netxen/netxen_nic.h drivers/net/netxen/netxen_nic_main.c
Diffstat (limited to 'kernel/workqueue.c')
-rw-r--r--kernel/workqueue.c109
1 files changed, 67 insertions, 42 deletions
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index 17c2f03d2c27..8d1e7cb8a51a 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -80,6 +80,29 @@ static inline int is_single_threaded(struct workqueue_struct *wq)
80 return list_empty(&wq->list); 80 return list_empty(&wq->list);
81} 81}
82 82
83static inline void set_wq_data(struct work_struct *work, void *wq)
84{
85 unsigned long new, old, res;
86
87 /* assume the pending flag is already set and that the task has already
88 * been queued on this workqueue */
89 new = (unsigned long) wq | (1UL << WORK_STRUCT_PENDING);
90 res = work->management;
91 if (res != new) {
92 do {
93 old = res;
94 new = (unsigned long) wq;
95 new |= (old & WORK_STRUCT_FLAG_MASK);
96 res = cmpxchg(&work->management, old, new);
97 } while (res != old);
98 }
99}
100
101static inline void *get_wq_data(struct work_struct *work)
102{
103 return (void *) (work->management & WORK_STRUCT_WQ_DATA_MASK);
104}
105
83/* Preempt must be disabled. */ 106/* Preempt must be disabled. */
84static void __queue_work(struct cpu_workqueue_struct *cwq, 107static void __queue_work(struct cpu_workqueue_struct *cwq,
85 struct work_struct *work) 108 struct work_struct *work)
@@ -87,7 +110,7 @@ static void __queue_work(struct cpu_workqueue_struct *cwq,
87 unsigned long flags; 110 unsigned long flags;
88 111
89 spin_lock_irqsave(&cwq->lock, flags); 112 spin_lock_irqsave(&cwq->lock, flags);
90 work->wq_data = cwq; 113 set_wq_data(work, cwq);
91 list_add_tail(&work->entry, &cwq->worklist); 114 list_add_tail(&work->entry, &cwq->worklist);
92 cwq->insert_sequence++; 115 cwq->insert_sequence++;
93 wake_up(&cwq->more_work); 116 wake_up(&cwq->more_work);
@@ -108,7 +131,7 @@ int fastcall queue_work(struct workqueue_struct *wq, struct work_struct *work)
108{ 131{
109 int ret = 0, cpu = get_cpu(); 132 int ret = 0, cpu = get_cpu();
110 133
111 if (!test_and_set_bit(0, &work->pending)) { 134 if (!test_and_set_bit(WORK_STRUCT_PENDING, &work->management)) {
112 if (unlikely(is_single_threaded(wq))) 135 if (unlikely(is_single_threaded(wq)))
113 cpu = singlethread_cpu; 136 cpu = singlethread_cpu;
114 BUG_ON(!list_empty(&work->entry)); 137 BUG_ON(!list_empty(&work->entry));
@@ -122,38 +145,42 @@ EXPORT_SYMBOL_GPL(queue_work);
122 145
123static void delayed_work_timer_fn(unsigned long __data) 146static void delayed_work_timer_fn(unsigned long __data)
124{ 147{
125 struct work_struct *work = (struct work_struct *)__data; 148 struct delayed_work *dwork = (struct delayed_work *)__data;
126 struct workqueue_struct *wq = work->wq_data; 149 struct workqueue_struct *wq = get_wq_data(&dwork->work);
127 int cpu = smp_processor_id(); 150 int cpu = smp_processor_id();
128 151
129 if (unlikely(is_single_threaded(wq))) 152 if (unlikely(is_single_threaded(wq)))
130 cpu = singlethread_cpu; 153 cpu = singlethread_cpu;
131 154
132 __queue_work(per_cpu_ptr(wq->cpu_wq, cpu), work); 155 __queue_work(per_cpu_ptr(wq->cpu_wq, cpu), &dwork->work);
133} 156}
134 157
135/** 158/**
136 * queue_delayed_work - queue work on a workqueue after delay 159 * queue_delayed_work - queue work on a workqueue after delay
137 * @wq: workqueue to use 160 * @wq: workqueue to use
138 * @work: work to queue 161 * @work: delayable work to queue
139 * @delay: number of jiffies to wait before queueing 162 * @delay: number of jiffies to wait before queueing
140 * 163 *
141 * Returns 0 if @work was already on a queue, non-zero otherwise. 164 * Returns 0 if @work was already on a queue, non-zero otherwise.
142 */ 165 */
143int fastcall queue_delayed_work(struct workqueue_struct *wq, 166int fastcall queue_delayed_work(struct workqueue_struct *wq,
144 struct work_struct *work, unsigned long delay) 167 struct delayed_work *dwork, unsigned long delay)
145{ 168{
146 int ret = 0; 169 int ret = 0;
147 struct timer_list *timer = &work->timer; 170 struct timer_list *timer = &dwork->timer;
171 struct work_struct *work = &dwork->work;
172
173 if (delay == 0)
174 return queue_work(wq, work);
148 175
149 if (!test_and_set_bit(0, &work->pending)) { 176 if (!test_and_set_bit(WORK_STRUCT_PENDING, &work->management)) {
150 BUG_ON(timer_pending(timer)); 177 BUG_ON(timer_pending(timer));
151 BUG_ON(!list_empty(&work->entry)); 178 BUG_ON(!list_empty(&work->entry));
152 179
153 /* This stores wq for the moment, for the timer_fn */ 180 /* This stores wq for the moment, for the timer_fn */
154 work->wq_data = wq; 181 set_wq_data(work, wq);
155 timer->expires = jiffies + delay; 182 timer->expires = jiffies + delay;
156 timer->data = (unsigned long)work; 183 timer->data = (unsigned long)dwork;
157 timer->function = delayed_work_timer_fn; 184 timer->function = delayed_work_timer_fn;
158 add_timer(timer); 185 add_timer(timer);
159 ret = 1; 186 ret = 1;
@@ -172,19 +199,20 @@ EXPORT_SYMBOL_GPL(queue_delayed_work);
172 * Returns 0 if @work was already on a queue, non-zero otherwise. 199 * Returns 0 if @work was already on a queue, non-zero otherwise.
173 */ 200 */
174int queue_delayed_work_on(int cpu, struct workqueue_struct *wq, 201int queue_delayed_work_on(int cpu, struct workqueue_struct *wq,
175 struct work_struct *work, unsigned long delay) 202 struct delayed_work *dwork, unsigned long delay)
176{ 203{
177 int ret = 0; 204 int ret = 0;
178 struct timer_list *timer = &work->timer; 205 struct timer_list *timer = &dwork->timer;
206 struct work_struct *work = &dwork->work;
179 207
180 if (!test_and_set_bit(0, &work->pending)) { 208 if (!test_and_set_bit(WORK_STRUCT_PENDING, &work->management)) {
181 BUG_ON(timer_pending(timer)); 209 BUG_ON(timer_pending(timer));
182 BUG_ON(!list_empty(&work->entry)); 210 BUG_ON(!list_empty(&work->entry));
183 211
184 /* This stores wq for the moment, for the timer_fn */ 212 /* This stores wq for the moment, for the timer_fn */
185 work->wq_data = wq; 213 set_wq_data(work, wq);
186 timer->expires = jiffies + delay; 214 timer->expires = jiffies + delay;
187 timer->data = (unsigned long)work; 215 timer->data = (unsigned long)dwork;
188 timer->function = delayed_work_timer_fn; 216 timer->function = delayed_work_timer_fn;
189 add_timer_on(timer, cpu); 217 add_timer_on(timer, cpu);
190 ret = 1; 218 ret = 1;
@@ -212,15 +240,15 @@ static void run_workqueue(struct cpu_workqueue_struct *cwq)
212 while (!list_empty(&cwq->worklist)) { 240 while (!list_empty(&cwq->worklist)) {
213 struct work_struct *work = list_entry(cwq->worklist.next, 241 struct work_struct *work = list_entry(cwq->worklist.next,
214 struct work_struct, entry); 242 struct work_struct, entry);
215 void (*f) (void *) = work->func; 243 work_func_t f = work->func;
216 void *data = work->data;
217 244
218 list_del_init(cwq->worklist.next); 245 list_del_init(cwq->worklist.next);
219 spin_unlock_irqrestore(&cwq->lock, flags); 246 spin_unlock_irqrestore(&cwq->lock, flags);
220 247
221 BUG_ON(work->wq_data != cwq); 248 BUG_ON(get_wq_data(work) != cwq);
222 clear_bit(0, &work->pending); 249 if (!test_bit(WORK_STRUCT_NOAUTOREL, &work->management))
223 f(data); 250 work_release(work);
251 f(work);
224 252
225 spin_lock_irqsave(&cwq->lock, flags); 253 spin_lock_irqsave(&cwq->lock, flags);
226 cwq->remove_sequence++; 254 cwq->remove_sequence++;
@@ -468,38 +496,37 @@ EXPORT_SYMBOL(schedule_work);
468 496
469/** 497/**
470 * schedule_delayed_work - put work task in global workqueue after delay 498 * schedule_delayed_work - put work task in global workqueue after delay
471 * @work: job to be done 499 * @dwork: job to be done
472 * @delay: number of jiffies to wait 500 * @delay: number of jiffies to wait or 0 for immediate execution
473 * 501 *
474 * After waiting for a given time this puts a job in the kernel-global 502 * After waiting for a given time this puts a job in the kernel-global
475 * workqueue. 503 * workqueue.
476 */ 504 */
477int fastcall schedule_delayed_work(struct work_struct *work, unsigned long delay) 505int fastcall schedule_delayed_work(struct delayed_work *dwork, unsigned long delay)
478{ 506{
479 return queue_delayed_work(keventd_wq, work, delay); 507 return queue_delayed_work(keventd_wq, dwork, delay);
480} 508}
481EXPORT_SYMBOL(schedule_delayed_work); 509EXPORT_SYMBOL(schedule_delayed_work);
482 510
483/** 511/**
484 * schedule_delayed_work_on - queue work in global workqueue on CPU after delay 512 * schedule_delayed_work_on - queue work in global workqueue on CPU after delay
485 * @cpu: cpu to use 513 * @cpu: cpu to use
486 * @work: job to be done 514 * @dwork: job to be done
487 * @delay: number of jiffies to wait 515 * @delay: number of jiffies to wait
488 * 516 *
489 * After waiting for a given time this puts a job in the kernel-global 517 * After waiting for a given time this puts a job in the kernel-global
490 * workqueue on the specified CPU. 518 * workqueue on the specified CPU.
491 */ 519 */
492int schedule_delayed_work_on(int cpu, 520int schedule_delayed_work_on(int cpu,
493 struct work_struct *work, unsigned long delay) 521 struct delayed_work *dwork, unsigned long delay)
494{ 522{
495 return queue_delayed_work_on(cpu, keventd_wq, work, delay); 523 return queue_delayed_work_on(cpu, keventd_wq, dwork, delay);
496} 524}
497EXPORT_SYMBOL(schedule_delayed_work_on); 525EXPORT_SYMBOL(schedule_delayed_work_on);
498 526
499/** 527/**
500 * schedule_on_each_cpu - call a function on each online CPU from keventd 528 * schedule_on_each_cpu - call a function on each online CPU from keventd
501 * @func: the function to call 529 * @func: the function to call
502 * @info: a pointer to pass to func()
503 * 530 *
504 * Returns zero on success. 531 * Returns zero on success.
505 * Returns -ve errno on failure. 532 * Returns -ve errno on failure.
@@ -508,7 +535,7 @@ EXPORT_SYMBOL(schedule_delayed_work_on);
508 * 535 *
509 * schedule_on_each_cpu() is very slow. 536 * schedule_on_each_cpu() is very slow.
510 */ 537 */
511int schedule_on_each_cpu(void (*func)(void *info), void *info) 538int schedule_on_each_cpu(work_func_t func)
512{ 539{
513 int cpu; 540 int cpu;
514 struct work_struct *works; 541 struct work_struct *works;
@@ -519,7 +546,7 @@ int schedule_on_each_cpu(void (*func)(void *info), void *info)
519 546
520 mutex_lock(&workqueue_mutex); 547 mutex_lock(&workqueue_mutex);
521 for_each_online_cpu(cpu) { 548 for_each_online_cpu(cpu) {
522 INIT_WORK(per_cpu_ptr(works, cpu), func, info); 549 INIT_WORK(per_cpu_ptr(works, cpu), func);
523 __queue_work(per_cpu_ptr(keventd_wq->cpu_wq, cpu), 550 __queue_work(per_cpu_ptr(keventd_wq->cpu_wq, cpu),
524 per_cpu_ptr(works, cpu)); 551 per_cpu_ptr(works, cpu));
525 } 552 }
@@ -539,12 +566,12 @@ EXPORT_SYMBOL(flush_scheduled_work);
539 * cancel_rearming_delayed_workqueue - reliably kill off a delayed 566 * cancel_rearming_delayed_workqueue - reliably kill off a delayed
540 * work whose handler rearms the delayed work. 567 * work whose handler rearms the delayed work.
541 * @wq: the controlling workqueue structure 568 * @wq: the controlling workqueue structure
542 * @work: the delayed work struct 569 * @dwork: the delayed work struct
543 */ 570 */
544void cancel_rearming_delayed_workqueue(struct workqueue_struct *wq, 571void cancel_rearming_delayed_workqueue(struct workqueue_struct *wq,
545 struct work_struct *work) 572 struct delayed_work *dwork)
546{ 573{
547 while (!cancel_delayed_work(work)) 574 while (!cancel_delayed_work(dwork))
548 flush_workqueue(wq); 575 flush_workqueue(wq);
549} 576}
550EXPORT_SYMBOL(cancel_rearming_delayed_workqueue); 577EXPORT_SYMBOL(cancel_rearming_delayed_workqueue);
@@ -552,18 +579,17 @@ EXPORT_SYMBOL(cancel_rearming_delayed_workqueue);
552/** 579/**
553 * cancel_rearming_delayed_work - reliably kill off a delayed keventd 580 * cancel_rearming_delayed_work - reliably kill off a delayed keventd
554 * work whose handler rearms the delayed work. 581 * work whose handler rearms the delayed work.
555 * @work: the delayed work struct 582 * @dwork: the delayed work struct
556 */ 583 */
557void cancel_rearming_delayed_work(struct work_struct *work) 584void cancel_rearming_delayed_work(struct delayed_work *dwork)
558{ 585{
559 cancel_rearming_delayed_workqueue(keventd_wq, work); 586 cancel_rearming_delayed_workqueue(keventd_wq, dwork);
560} 587}
561EXPORT_SYMBOL(cancel_rearming_delayed_work); 588EXPORT_SYMBOL(cancel_rearming_delayed_work);
562 589
563/** 590/**
564 * execute_in_process_context - reliably execute the routine with user context 591 * execute_in_process_context - reliably execute the routine with user context
565 * @fn: the function to execute 592 * @fn: the function to execute
566 * @data: data to pass to the function
567 * @ew: guaranteed storage for the execute work structure (must 593 * @ew: guaranteed storage for the execute work structure (must
568 * be available when the work executes) 594 * be available when the work executes)
569 * 595 *
@@ -573,15 +599,14 @@ EXPORT_SYMBOL(cancel_rearming_delayed_work);
573 * Returns: 0 - function was executed 599 * Returns: 0 - function was executed
574 * 1 - function was scheduled for execution 600 * 1 - function was scheduled for execution
575 */ 601 */
576int execute_in_process_context(void (*fn)(void *data), void *data, 602int execute_in_process_context(work_func_t fn, struct execute_work *ew)
577 struct execute_work *ew)
578{ 603{
579 if (!in_interrupt()) { 604 if (!in_interrupt()) {
580 fn(data); 605 fn(&ew->work);
581 return 0; 606 return 0;
582 } 607 }
583 608
584 INIT_WORK(&ew->work, fn, data); 609 INIT_WORK(&ew->work, fn);
585 schedule_work(&ew->work); 610 schedule_work(&ew->work);
586 611
587 return 1; 612 return 1;