aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@g5.osdl.org>2006-07-04 17:00:26 -0400
committerLinus Torvalds <torvalds@g5.osdl.org>2006-07-04 17:00:26 -0400
commitca78f6baca863afe2e6a244a0fe94b3a70211d46 (patch)
treef5a3a169b6cfafa36f9c35cc86e782596c820915 /kernel
parent7ad7153b051d9628ecd6a336b543ea6ef099bd2c (diff)
parentae90dd5dbee461652b90d9f7d292ba47dc3dc4b8 (diff)
Merge master.kernel.org:/pub/scm/linux/kernel/git/davej/cpufreq
* master.kernel.org:/pub/scm/linux/kernel/git/davej/cpufreq: Move workqueue exports to where the functions are defined. [CPUFREQ] Misc cleanups in ondemand. [CPUFREQ] Make ondemand sampling per CPU and remove the mutex usage in sampling path. [CPUFREQ] Add queue_delayed_work_on() interface for workqueues. [CPUFREQ] Remove slowdown from ondemand sampling path.
Diffstat (limited to 'kernel')
-rw-r--r--kernel/workqueue.c57
1 files changed, 32 insertions, 25 deletions
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index 90d2c6001659..eebb1d839235 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -114,6 +114,7 @@ int fastcall queue_work(struct workqueue_struct *wq, struct work_struct *work)
114 put_cpu(); 114 put_cpu();
115 return ret; 115 return ret;
116} 116}
117EXPORT_SYMBOL_GPL(queue_work);
117 118
118static void delayed_work_timer_fn(unsigned long __data) 119static void delayed_work_timer_fn(unsigned long __data)
119{ 120{
@@ -147,6 +148,29 @@ int fastcall queue_delayed_work(struct workqueue_struct *wq,
147 } 148 }
148 return ret; 149 return ret;
149} 150}
151EXPORT_SYMBOL_GPL(queue_delayed_work);
152
153int queue_delayed_work_on(int cpu, struct workqueue_struct *wq,
154 struct work_struct *work, unsigned long delay)
155{
156 int ret = 0;
157 struct timer_list *timer = &work->timer;
158
159 if (!test_and_set_bit(0, &work->pending)) {
160 BUG_ON(timer_pending(timer));
161 BUG_ON(!list_empty(&work->entry));
162
163 /* This stores wq for the moment, for the timer_fn */
164 work->wq_data = wq;
165 timer->expires = jiffies + delay;
166 timer->data = (unsigned long)work;
167 timer->function = delayed_work_timer_fn;
168 add_timer_on(timer, cpu);
169 ret = 1;
170 }
171 return ret;
172}
173EXPORT_SYMBOL_GPL(queue_delayed_work_on);
150 174
151static void run_workqueue(struct cpu_workqueue_struct *cwq) 175static void run_workqueue(struct cpu_workqueue_struct *cwq)
152{ 176{
@@ -281,6 +305,7 @@ void fastcall flush_workqueue(struct workqueue_struct *wq)
281 unlock_cpu_hotplug(); 305 unlock_cpu_hotplug();
282 } 306 }
283} 307}
308EXPORT_SYMBOL_GPL(flush_workqueue);
284 309
285static struct task_struct *create_workqueue_thread(struct workqueue_struct *wq, 310static struct task_struct *create_workqueue_thread(struct workqueue_struct *wq,
286 int cpu) 311 int cpu)
@@ -358,6 +383,7 @@ struct workqueue_struct *__create_workqueue(const char *name,
358 } 383 }
359 return wq; 384 return wq;
360} 385}
386EXPORT_SYMBOL_GPL(__create_workqueue);
361 387
362static void cleanup_workqueue_thread(struct workqueue_struct *wq, int cpu) 388static void cleanup_workqueue_thread(struct workqueue_struct *wq, int cpu)
363{ 389{
@@ -395,6 +421,7 @@ void destroy_workqueue(struct workqueue_struct *wq)
395 free_percpu(wq->cpu_wq); 421 free_percpu(wq->cpu_wq);
396 kfree(wq); 422 kfree(wq);
397} 423}
424EXPORT_SYMBOL_GPL(destroy_workqueue);
398 425
399static struct workqueue_struct *keventd_wq; 426static struct workqueue_struct *keventd_wq;
400 427
@@ -402,31 +429,20 @@ int fastcall schedule_work(struct work_struct *work)
402{ 429{
403 return queue_work(keventd_wq, work); 430 return queue_work(keventd_wq, work);
404} 431}
432EXPORT_SYMBOL(schedule_work);
405 433
406int fastcall schedule_delayed_work(struct work_struct *work, unsigned long delay) 434int fastcall schedule_delayed_work(struct work_struct *work, unsigned long delay)
407{ 435{
408 return queue_delayed_work(keventd_wq, work, delay); 436 return queue_delayed_work(keventd_wq, work, delay);
409} 437}
438EXPORT_SYMBOL(schedule_delayed_work);
410 439
411int schedule_delayed_work_on(int cpu, 440int schedule_delayed_work_on(int cpu,
412 struct work_struct *work, unsigned long delay) 441 struct work_struct *work, unsigned long delay)
413{ 442{
414 int ret = 0; 443 return queue_delayed_work_on(cpu, keventd_wq, work, delay);
415 struct timer_list *timer = &work->timer;
416
417 if (!test_and_set_bit(0, &work->pending)) {
418 BUG_ON(timer_pending(timer));
419 BUG_ON(!list_empty(&work->entry));
420 /* This stores keventd_wq for the moment, for the timer_fn */
421 work->wq_data = keventd_wq;
422 timer->expires = jiffies + delay;
423 timer->data = (unsigned long)work;
424 timer->function = delayed_work_timer_fn;
425 add_timer_on(timer, cpu);
426 ret = 1;
427 }
428 return ret;
429} 444}
445EXPORT_SYMBOL(schedule_delayed_work_on);
430 446
431/** 447/**
432 * schedule_on_each_cpu - call a function on each online CPU from keventd 448 * schedule_on_each_cpu - call a function on each online CPU from keventd
@@ -463,6 +479,7 @@ void flush_scheduled_work(void)
463{ 479{
464 flush_workqueue(keventd_wq); 480 flush_workqueue(keventd_wq);
465} 481}
482EXPORT_SYMBOL(flush_scheduled_work);
466 483
467/** 484/**
468 * cancel_rearming_delayed_workqueue - reliably kill off a delayed 485 * cancel_rearming_delayed_workqueue - reliably kill off a delayed
@@ -619,13 +636,3 @@ void init_workqueues(void)
619 BUG_ON(!keventd_wq); 636 BUG_ON(!keventd_wq);
620} 637}
621 638
622EXPORT_SYMBOL_GPL(__create_workqueue);
623EXPORT_SYMBOL_GPL(queue_work);
624EXPORT_SYMBOL_GPL(queue_delayed_work);
625EXPORT_SYMBOL_GPL(flush_workqueue);
626EXPORT_SYMBOL_GPL(destroy_workqueue);
627
628EXPORT_SYMBOL(schedule_work);
629EXPORT_SYMBOL(schedule_delayed_work);
630EXPORT_SYMBOL(schedule_delayed_work_on);
631EXPORT_SYMBOL(flush_scheduled_work);