diff options
| author | Dave Jones <davej@redhat.com> | 2006-06-30 01:40:45 -0400 |
|---|---|---|
| committer | Dave Jones <davej@redhat.com> | 2006-06-30 01:40:45 -0400 |
| commit | ae90dd5dbee461652b90d9f7d292ba47dc3dc4b8 (patch) | |
| tree | 694e12850d9686b1989d63ca1f92b8214359b47e | |
| parent | ffac80e925e54d84f6ea580231aa46d0ef051756 (diff) | |
Move workqueue exports to where the functions are defined.
Signed-off-by: Dave Jones <davej@redhat.com>
| -rw-r--r-- | kernel/workqueue.c | 21 |
1 files changed, 10 insertions, 11 deletions
diff --git a/kernel/workqueue.c b/kernel/workqueue.c index 8fbef7008a7e..7f1c30c7273b 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c | |||
| @@ -114,6 +114,7 @@ int fastcall queue_work(struct workqueue_struct *wq, struct work_struct *work) | |||
| 114 | put_cpu(); | 114 | put_cpu(); |
| 115 | return ret; | 115 | return ret; |
| 116 | } | 116 | } |
| 117 | EXPORT_SYMBOL_GPL(queue_work); | ||
| 117 | 118 | ||
| 118 | static void delayed_work_timer_fn(unsigned long __data) | 119 | static void delayed_work_timer_fn(unsigned long __data) |
| 119 | { | 120 | { |
| @@ -147,6 +148,7 @@ int fastcall queue_delayed_work(struct workqueue_struct *wq, | |||
| 147 | } | 148 | } |
| 148 | return ret; | 149 | return ret; |
| 149 | } | 150 | } |
| 151 | EXPORT_SYMBOL_GPL(queue_delayed_work); | ||
| 150 | 152 | ||
| 151 | int queue_delayed_work_on(int cpu, struct workqueue_struct *wq, | 153 | int queue_delayed_work_on(int cpu, struct workqueue_struct *wq, |
| 152 | struct work_struct *work, unsigned long delay) | 154 | struct work_struct *work, unsigned long delay) |
| @@ -168,6 +170,7 @@ int queue_delayed_work_on(int cpu, struct workqueue_struct *wq, | |||
| 168 | } | 170 | } |
| 169 | return ret; | 171 | return ret; |
| 170 | } | 172 | } |
| 173 | EXPORT_SYMBOL_GPL(queue_delayed_work_on); | ||
| 171 | 174 | ||
| 172 | static void run_workqueue(struct cpu_workqueue_struct *cwq) | 175 | static void run_workqueue(struct cpu_workqueue_struct *cwq) |
| 173 | { | 176 | { |
| @@ -302,6 +305,7 @@ void fastcall flush_workqueue(struct workqueue_struct *wq) | |||
| 302 | unlock_cpu_hotplug(); | 305 | unlock_cpu_hotplug(); |
| 303 | } | 306 | } |
| 304 | } | 307 | } |
| 308 | EXPORT_SYMBOL_GPL(flush_workqueue); | ||
| 305 | 309 | ||
| 306 | static struct task_struct *create_workqueue_thread(struct workqueue_struct *wq, | 310 | static struct task_struct *create_workqueue_thread(struct workqueue_struct *wq, |
| 307 | int cpu) | 311 | int cpu) |
| @@ -379,6 +383,7 @@ struct workqueue_struct *__create_workqueue(const char *name, | |||
| 379 | } | 383 | } |
| 380 | return wq; | 384 | return wq; |
| 381 | } | 385 | } |
| 386 | EXPORT_SYMBOL_GPL(__create_workqueue); | ||
| 382 | 387 | ||
| 383 | static void cleanup_workqueue_thread(struct workqueue_struct *wq, int cpu) | 388 | static void cleanup_workqueue_thread(struct workqueue_struct *wq, int cpu) |
| 384 | { | 389 | { |
| @@ -416,6 +421,7 @@ void destroy_workqueue(struct workqueue_struct *wq) | |||
| 416 | free_percpu(wq->cpu_wq); | 421 | free_percpu(wq->cpu_wq); |
| 417 | kfree(wq); | 422 | kfree(wq); |
| 418 | } | 423 | } |
| 424 | EXPORT_SYMBOL_GPL(destroy_workqueue); | ||
| 419 | 425 | ||
| 420 | static struct workqueue_struct *keventd_wq; | 426 | static struct workqueue_struct *keventd_wq; |
| 421 | 427 | ||
| @@ -423,17 +429,20 @@ int fastcall schedule_work(struct work_struct *work) | |||
| 423 | { | 429 | { |
| 424 | return queue_work(keventd_wq, work); | 430 | return queue_work(keventd_wq, work); |
| 425 | } | 431 | } |
| 432 | EXPORT_SYMBOL(schedule_work); | ||
| 426 | 433 | ||
| 427 | int fastcall schedule_delayed_work(struct work_struct *work, unsigned long delay) | 434 | int fastcall schedule_delayed_work(struct work_struct *work, unsigned long delay) |
| 428 | { | 435 | { |
| 429 | return queue_delayed_work(keventd_wq, work, delay); | 436 | return queue_delayed_work(keventd_wq, work, delay); |
| 430 | } | 437 | } |
| 438 | EXPORT_SYMBOL(schedule_delayed_work); | ||
| 431 | 439 | ||
| 432 | int schedule_delayed_work_on(int cpu, | 440 | int schedule_delayed_work_on(int cpu, |
| 433 | struct work_struct *work, unsigned long delay) | 441 | struct work_struct *work, unsigned long delay) |
| 434 | { | 442 | { |
| 435 | return queue_delayed_work_on(cpu, keventd_wq, work, delay); | 443 | return queue_delayed_work_on(cpu, keventd_wq, work, delay); |
| 436 | } | 444 | } |
| 445 | EXPORT_SYMBOL(schedule_delayed_work_on); | ||
| 437 | 446 | ||
| 438 | /** | 447 | /** |
| 439 | * schedule_on_each_cpu - call a function on each online CPU from keventd | 448 | * schedule_on_each_cpu - call a function on each online CPU from keventd |
| @@ -470,6 +479,7 @@ void flush_scheduled_work(void) | |||
| 470 | { | 479 | { |
| 471 | flush_workqueue(keventd_wq); | 480 | flush_workqueue(keventd_wq); |
| 472 | } | 481 | } |
| 482 | EXPORT_SYMBOL(flush_scheduled_work); | ||
| 473 | 483 | ||
| 474 | /** | 484 | /** |
| 475 | * cancel_rearming_delayed_workqueue - reliably kill off a delayed | 485 | * cancel_rearming_delayed_workqueue - reliably kill off a delayed |
| @@ -626,14 +636,3 @@ void init_workqueues(void) | |||
| 626 | BUG_ON(!keventd_wq); | 636 | BUG_ON(!keventd_wq); |
| 627 | } | 637 | } |
| 628 | 638 | ||
| 629 | EXPORT_SYMBOL_GPL(__create_workqueue); | ||
| 630 | EXPORT_SYMBOL_GPL(queue_work); | ||
| 631 | EXPORT_SYMBOL_GPL(queue_delayed_work); | ||
| 632 | EXPORT_SYMBOL_GPL(queue_delayed_work_on); | ||
| 633 | EXPORT_SYMBOL_GPL(flush_workqueue); | ||
| 634 | EXPORT_SYMBOL_GPL(destroy_workqueue); | ||
| 635 | |||
| 636 | EXPORT_SYMBOL(schedule_work); | ||
| 637 | EXPORT_SYMBOL(schedule_delayed_work); | ||
| 638 | EXPORT_SYMBOL(schedule_delayed_work_on); | ||
| 639 | EXPORT_SYMBOL(flush_scheduled_work); | ||
