diff options
| author | Tejun Heo <tj@kernel.org> | 2013-03-13 19:51:36 -0400 |
|---|---|---|
| committer | Tejun Heo <tj@kernel.org> | 2013-03-13 19:51:36 -0400 |
| commit | 8425e3d5bdbe8e741d2c73cf3189ed59b4038b84 (patch) | |
| tree | 5880573b3804d2b313b0b6b640836e57df63a5e9 /include/linux/workqueue.h | |
| parent | 611c92a0203091bb022edec7e2d8b765fe148622 (diff) | |
workqueue: inline trivial wrappers
There's no reason to make these trivial wrappers full (exported)
functions. Inline the followings.
queue_work()
queue_delayed_work()
mod_delayed_work()
schedule_work_on()
schedule_work()
schedule_delayed_work_on()
schedule_delayed_work()
keventd_up()
Signed-off-by: Tejun Heo <tj@kernel.org>
Diffstat (limited to 'include/linux/workqueue.h')
| -rw-r--r-- | include/linux/workqueue.h | 123 |
1 files changed, 111 insertions, 12 deletions
diff --git a/include/linux/workqueue.h b/include/linux/workqueue.h index df30763c8682..835d12b76960 100644 --- a/include/linux/workqueue.h +++ b/include/linux/workqueue.h | |||
| @@ -417,28 +417,16 @@ int apply_workqueue_attrs(struct workqueue_struct *wq, | |||
| 417 | 417 | ||
| 418 | extern bool queue_work_on(int cpu, struct workqueue_struct *wq, | 418 | extern bool queue_work_on(int cpu, struct workqueue_struct *wq, |
| 419 | struct work_struct *work); | 419 | struct work_struct *work); |
| 420 | extern bool queue_work(struct workqueue_struct *wq, struct work_struct *work); | ||
| 421 | extern bool queue_delayed_work_on(int cpu, struct workqueue_struct *wq, | 420 | extern bool queue_delayed_work_on(int cpu, struct workqueue_struct *wq, |
| 422 | struct delayed_work *work, unsigned long delay); | 421 | struct delayed_work *work, unsigned long delay); |
| 423 | extern bool queue_delayed_work(struct workqueue_struct *wq, | ||
| 424 | struct delayed_work *work, unsigned long delay); | ||
| 425 | extern bool mod_delayed_work_on(int cpu, struct workqueue_struct *wq, | 422 | extern bool mod_delayed_work_on(int cpu, struct workqueue_struct *wq, |
| 426 | struct delayed_work *dwork, unsigned long delay); | 423 | struct delayed_work *dwork, unsigned long delay); |
| 427 | extern bool mod_delayed_work(struct workqueue_struct *wq, | ||
| 428 | struct delayed_work *dwork, unsigned long delay); | ||
| 429 | 424 | ||
| 430 | extern void flush_workqueue(struct workqueue_struct *wq); | 425 | extern void flush_workqueue(struct workqueue_struct *wq); |
| 431 | extern void drain_workqueue(struct workqueue_struct *wq); | 426 | extern void drain_workqueue(struct workqueue_struct *wq); |
| 432 | extern void flush_scheduled_work(void); | 427 | extern void flush_scheduled_work(void); |
| 433 | 428 | ||
| 434 | extern bool schedule_work_on(int cpu, struct work_struct *work); | ||
| 435 | extern bool schedule_work(struct work_struct *work); | ||
| 436 | extern bool schedule_delayed_work_on(int cpu, struct delayed_work *work, | ||
| 437 | unsigned long delay); | ||
| 438 | extern bool schedule_delayed_work(struct delayed_work *work, | ||
| 439 | unsigned long delay); | ||
| 440 | extern int schedule_on_each_cpu(work_func_t func); | 429 | extern int schedule_on_each_cpu(work_func_t func); |
| 441 | extern int keventd_up(void); | ||
| 442 | 430 | ||
| 443 | int execute_in_process_context(work_func_t fn, struct execute_work *); | 431 | int execute_in_process_context(work_func_t fn, struct execute_work *); |
| 444 | 432 | ||
| @@ -455,6 +443,117 @@ extern bool current_is_workqueue_rescuer(void); | |||
| 455 | extern bool workqueue_congested(int cpu, struct workqueue_struct *wq); | 443 | extern bool workqueue_congested(int cpu, struct workqueue_struct *wq); |
| 456 | extern unsigned int work_busy(struct work_struct *work); | 444 | extern unsigned int work_busy(struct work_struct *work); |
| 457 | 445 | ||
| 446 | /** | ||
| 447 | * queue_work - queue work on a workqueue | ||
| 448 | * @wq: workqueue to use | ||
| 449 | * @work: work to queue | ||
| 450 | * | ||
| 451 | * Returns %false if @work was already on a queue, %true otherwise. | ||
| 452 | * | ||
| 453 | * We queue the work to the CPU on which it was submitted, but if the CPU dies | ||
| 454 | * it can be processed by another CPU. | ||
| 455 | */ | ||
| 456 | static inline bool queue_work(struct workqueue_struct *wq, | ||
| 457 | struct work_struct *work) | ||
| 458 | { | ||
| 459 | return queue_work_on(WORK_CPU_UNBOUND, wq, work); | ||
| 460 | } | ||
| 461 | |||
| 462 | /** | ||
| 463 | * queue_delayed_work - queue work on a workqueue after delay | ||
| 464 | * @wq: workqueue to use | ||
| 465 | * @dwork: delayable work to queue | ||
| 466 | * @delay: number of jiffies to wait before queueing | ||
| 467 | * | ||
| 468 | * Equivalent to queue_delayed_work_on() but tries to use the local CPU. | ||
| 469 | */ | ||
| 470 | static inline bool queue_delayed_work(struct workqueue_struct *wq, | ||
| 471 | struct delayed_work *dwork, | ||
| 472 | unsigned long delay) | ||
| 473 | { | ||
| 474 | return queue_delayed_work_on(WORK_CPU_UNBOUND, wq, dwork, delay); | ||
| 475 | } | ||
| 476 | |||
| 477 | /** | ||
| 478 | * mod_delayed_work - modify delay of or queue a delayed work | ||
| 479 | * @wq: workqueue to use | ||
| 480 | * @dwork: work to queue | ||
| 481 | * @delay: number of jiffies to wait before queueing | ||
| 482 | * | ||
| 483 | * mod_delayed_work_on() on local CPU. | ||
| 484 | */ | ||
| 485 | static inline bool mod_delayed_work(struct workqueue_struct *wq, | ||
| 486 | struct delayed_work *dwork, | ||
| 487 | unsigned long delay) | ||
| 488 | { | ||
| 489 | return mod_delayed_work_on(WORK_CPU_UNBOUND, wq, dwork, delay); | ||
| 490 | } | ||
| 491 | |||
| 492 | /** | ||
| 493 | * schedule_work_on - put work task on a specific cpu | ||
| 494 | * @cpu: cpu to put the work task on | ||
| 495 | * @work: job to be done | ||
| 496 | * | ||
| 497 | * This puts a job on a specific cpu | ||
| 498 | */ | ||
| 499 | static inline bool schedule_work_on(int cpu, struct work_struct *work) | ||
| 500 | { | ||
| 501 | return queue_work_on(cpu, system_wq, work); | ||
| 502 | } | ||
| 503 | |||
| 504 | /** | ||
| 505 | * schedule_work - put work task in global workqueue | ||
| 506 | * @work: job to be done | ||
| 507 | * | ||
| 508 | * Returns %false if @work was already on the kernel-global workqueue and | ||
| 509 | * %true otherwise. | ||
| 510 | * | ||
| 511 | * This puts a job in the kernel-global workqueue if it was not already | ||
| 512 | * queued and leaves it in the same position on the kernel-global | ||
| 513 | * workqueue otherwise. | ||
| 514 | */ | ||
| 515 | static inline bool schedule_work(struct work_struct *work) | ||
| 516 | { | ||
| 517 | return queue_work(system_wq, work); | ||
| 518 | } | ||
| 519 | |||
| 520 | /** | ||
| 521 | * schedule_delayed_work_on - queue work in global workqueue on CPU after delay | ||
| 522 | * @cpu: cpu to use | ||
| 523 | * @dwork: job to be done | ||
| 524 | * @delay: number of jiffies to wait | ||
| 525 | * | ||
| 526 | * After waiting for a given time this puts a job in the kernel-global | ||
| 527 | * workqueue on the specified CPU. | ||
| 528 | */ | ||
| 529 | static inline bool schedule_delayed_work_on(int cpu, struct delayed_work *dwork, | ||
| 530 | unsigned long delay) | ||
| 531 | { | ||
| 532 | return queue_delayed_work_on(cpu, system_wq, dwork, delay); | ||
| 533 | } | ||
| 534 | |||
| 535 | /** | ||
| 536 | * schedule_delayed_work - put work task in global workqueue after delay | ||
| 537 | * @dwork: job to be done | ||
| 538 | * @delay: number of jiffies to wait or 0 for immediate execution | ||
| 539 | * | ||
| 540 | * After waiting for a given time this puts a job in the kernel-global | ||
| 541 | * workqueue. | ||
| 542 | */ | ||
| 543 | static inline bool schedule_delayed_work(struct delayed_work *dwork, | ||
| 544 | unsigned long delay) | ||
| 545 | { | ||
| 546 | return queue_delayed_work(system_wq, dwork, delay); | ||
| 547 | } | ||
| 548 | |||
| 549 | /** | ||
| 550 | * keventd_up - is workqueue initialized yet? | ||
| 551 | */ | ||
| 552 | static inline bool keventd_up(void) | ||
| 553 | { | ||
| 554 | return system_wq != NULL; | ||
| 555 | } | ||
| 556 | |||
| 458 | /* | 557 | /* |
| 459 | * Like above, but uses del_timer() instead of del_timer_sync(). This means, | 558 | * Like above, but uses del_timer() instead of del_timer_sync(). This means, |
| 460 | * if it returns 0 the timer function may be running and the queueing is in | 559 | * if it returns 0 the timer function may be running and the queueing is in |
