diff options
author | Linus Torvalds <torvalds@g5.osdl.org> | 2006-07-04 17:00:26 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@g5.osdl.org> | 2006-07-04 17:00:26 -0400 |
commit | ca78f6baca863afe2e6a244a0fe94b3a70211d46 (patch) | |
tree | f5a3a169b6cfafa36f9c35cc86e782596c820915 /kernel | |
parent | 7ad7153b051d9628ecd6a336b543ea6ef099bd2c (diff) | |
parent | ae90dd5dbee461652b90d9f7d292ba47dc3dc4b8 (diff) |
Merge master.kernel.org:/pub/scm/linux/kernel/git/davej/cpufreq
* master.kernel.org:/pub/scm/linux/kernel/git/davej/cpufreq:
Move workqueue exports to where the functions are defined.
[CPUFREQ] Misc cleanups in ondemand.
[CPUFREQ] Make ondemand sampling per CPU and remove the mutex usage in sampling path.
[CPUFREQ] Add queue_delayed_work_on() interface for workqueues.
[CPUFREQ] Remove slowdown from ondemand sampling path.
Diffstat (limited to 'kernel')
-rw-r--r-- | kernel/workqueue.c | 57 |
1 files changed, 32 insertions, 25 deletions
diff --git a/kernel/workqueue.c b/kernel/workqueue.c index 90d2c6001659..eebb1d839235 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c | |||
@@ -114,6 +114,7 @@ int fastcall queue_work(struct workqueue_struct *wq, struct work_struct *work) | |||
114 | put_cpu(); | 114 | put_cpu(); |
115 | return ret; | 115 | return ret; |
116 | } | 116 | } |
117 | EXPORT_SYMBOL_GPL(queue_work); | ||
117 | 118 | ||
118 | static void delayed_work_timer_fn(unsigned long __data) | 119 | static void delayed_work_timer_fn(unsigned long __data) |
119 | { | 120 | { |
@@ -147,6 +148,29 @@ int fastcall queue_delayed_work(struct workqueue_struct *wq, | |||
147 | } | 148 | } |
148 | return ret; | 149 | return ret; |
149 | } | 150 | } |
151 | EXPORT_SYMBOL_GPL(queue_delayed_work); | ||
152 | |||
153 | int queue_delayed_work_on(int cpu, struct workqueue_struct *wq, | ||
154 | struct work_struct *work, unsigned long delay) | ||
155 | { | ||
156 | int ret = 0; | ||
157 | struct timer_list *timer = &work->timer; | ||
158 | |||
159 | if (!test_and_set_bit(0, &work->pending)) { | ||
160 | BUG_ON(timer_pending(timer)); | ||
161 | BUG_ON(!list_empty(&work->entry)); | ||
162 | |||
163 | /* This stores wq for the moment, for the timer_fn */ | ||
164 | work->wq_data = wq; | ||
165 | timer->expires = jiffies + delay; | ||
166 | timer->data = (unsigned long)work; | ||
167 | timer->function = delayed_work_timer_fn; | ||
168 | add_timer_on(timer, cpu); | ||
169 | ret = 1; | ||
170 | } | ||
171 | return ret; | ||
172 | } | ||
173 | EXPORT_SYMBOL_GPL(queue_delayed_work_on); | ||
150 | 174 | ||
151 | static void run_workqueue(struct cpu_workqueue_struct *cwq) | 175 | static void run_workqueue(struct cpu_workqueue_struct *cwq) |
152 | { | 176 | { |
@@ -281,6 +305,7 @@ void fastcall flush_workqueue(struct workqueue_struct *wq) | |||
281 | unlock_cpu_hotplug(); | 305 | unlock_cpu_hotplug(); |
282 | } | 306 | } |
283 | } | 307 | } |
308 | EXPORT_SYMBOL_GPL(flush_workqueue); | ||
284 | 309 | ||
285 | static struct task_struct *create_workqueue_thread(struct workqueue_struct *wq, | 310 | static struct task_struct *create_workqueue_thread(struct workqueue_struct *wq, |
286 | int cpu) | 311 | int cpu) |
@@ -358,6 +383,7 @@ struct workqueue_struct *__create_workqueue(const char *name, | |||
358 | } | 383 | } |
359 | return wq; | 384 | return wq; |
360 | } | 385 | } |
386 | EXPORT_SYMBOL_GPL(__create_workqueue); | ||
361 | 387 | ||
362 | static void cleanup_workqueue_thread(struct workqueue_struct *wq, int cpu) | 388 | static void cleanup_workqueue_thread(struct workqueue_struct *wq, int cpu) |
363 | { | 389 | { |
@@ -395,6 +421,7 @@ void destroy_workqueue(struct workqueue_struct *wq) | |||
395 | free_percpu(wq->cpu_wq); | 421 | free_percpu(wq->cpu_wq); |
396 | kfree(wq); | 422 | kfree(wq); |
397 | } | 423 | } |
424 | EXPORT_SYMBOL_GPL(destroy_workqueue); | ||
398 | 425 | ||
399 | static struct workqueue_struct *keventd_wq; | 426 | static struct workqueue_struct *keventd_wq; |
400 | 427 | ||
@@ -402,31 +429,20 @@ int fastcall schedule_work(struct work_struct *work) | |||
402 | { | 429 | { |
403 | return queue_work(keventd_wq, work); | 430 | return queue_work(keventd_wq, work); |
404 | } | 431 | } |
432 | EXPORT_SYMBOL(schedule_work); | ||
405 | 433 | ||
406 | int fastcall schedule_delayed_work(struct work_struct *work, unsigned long delay) | 434 | int fastcall schedule_delayed_work(struct work_struct *work, unsigned long delay) |
407 | { | 435 | { |
408 | return queue_delayed_work(keventd_wq, work, delay); | 436 | return queue_delayed_work(keventd_wq, work, delay); |
409 | } | 437 | } |
438 | EXPORT_SYMBOL(schedule_delayed_work); | ||
410 | 439 | ||
411 | int schedule_delayed_work_on(int cpu, | 440 | int schedule_delayed_work_on(int cpu, |
412 | struct work_struct *work, unsigned long delay) | 441 | struct work_struct *work, unsigned long delay) |
413 | { | 442 | { |
414 | int ret = 0; | 443 | return queue_delayed_work_on(cpu, keventd_wq, work, delay); |
415 | struct timer_list *timer = &work->timer; | ||
416 | |||
417 | if (!test_and_set_bit(0, &work->pending)) { | ||
418 | BUG_ON(timer_pending(timer)); | ||
419 | BUG_ON(!list_empty(&work->entry)); | ||
420 | /* This stores keventd_wq for the moment, for the timer_fn */ | ||
421 | work->wq_data = keventd_wq; | ||
422 | timer->expires = jiffies + delay; | ||
423 | timer->data = (unsigned long)work; | ||
424 | timer->function = delayed_work_timer_fn; | ||
425 | add_timer_on(timer, cpu); | ||
426 | ret = 1; | ||
427 | } | ||
428 | return ret; | ||
429 | } | 444 | } |
445 | EXPORT_SYMBOL(schedule_delayed_work_on); | ||
430 | 446 | ||
431 | /** | 447 | /** |
432 | * schedule_on_each_cpu - call a function on each online CPU from keventd | 448 | * schedule_on_each_cpu - call a function on each online CPU from keventd |
@@ -463,6 +479,7 @@ void flush_scheduled_work(void) | |||
463 | { | 479 | { |
464 | flush_workqueue(keventd_wq); | 480 | flush_workqueue(keventd_wq); |
465 | } | 481 | } |
482 | EXPORT_SYMBOL(flush_scheduled_work); | ||
466 | 483 | ||
467 | /** | 484 | /** |
468 | * cancel_rearming_delayed_workqueue - reliably kill off a delayed | 485 | * cancel_rearming_delayed_workqueue - reliably kill off a delayed |
@@ -619,13 +636,3 @@ void init_workqueues(void) | |||
619 | BUG_ON(!keventd_wq); | 636 | BUG_ON(!keventd_wq); |
620 | } | 637 | } |
621 | 638 | ||
622 | EXPORT_SYMBOL_GPL(__create_workqueue); | ||
623 | EXPORT_SYMBOL_GPL(queue_work); | ||
624 | EXPORT_SYMBOL_GPL(queue_delayed_work); | ||
625 | EXPORT_SYMBOL_GPL(flush_workqueue); | ||
626 | EXPORT_SYMBOL_GPL(destroy_workqueue); | ||
627 | |||
628 | EXPORT_SYMBOL(schedule_work); | ||
629 | EXPORT_SYMBOL(schedule_delayed_work); | ||
630 | EXPORT_SYMBOL(schedule_delayed_work_on); | ||
631 | EXPORT_SYMBOL(flush_scheduled_work); | ||