aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorTejun Heo <tj@kernel.org>2013-03-13 19:51:36 -0400
committerTejun Heo <tj@kernel.org>2013-03-13 19:51:36 -0400
commit8425e3d5bdbe8e741d2c73cf3189ed59b4038b84 (patch)
tree5880573b3804d2b313b0b6b640836e57df63a5e9
parent611c92a0203091bb022edec7e2d8b765fe148622 (diff)
workqueue: inline trivial wrappers
There's no reason to make these trivial wrappers full (exported) functions. Inline the followings. queue_work() queue_delayed_work() mod_delayed_work() schedule_work_on() schedule_work() schedule_delayed_work_on() schedule_delayed_work() keventd_up() Signed-off-by: Tejun Heo <tj@kernel.org>
-rw-r--r--include/linux/workqueue.h123
-rw-r--r--kernel/workqueue.c111
2 files changed, 111 insertions, 123 deletions
diff --git a/include/linux/workqueue.h b/include/linux/workqueue.h
index df30763c8682..835d12b76960 100644
--- a/include/linux/workqueue.h
+++ b/include/linux/workqueue.h
@@ -417,28 +417,16 @@ int apply_workqueue_attrs(struct workqueue_struct *wq,
417 417
418extern bool queue_work_on(int cpu, struct workqueue_struct *wq, 418extern bool queue_work_on(int cpu, struct workqueue_struct *wq,
419 struct work_struct *work); 419 struct work_struct *work);
420extern bool queue_work(struct workqueue_struct *wq, struct work_struct *work);
421extern bool queue_delayed_work_on(int cpu, struct workqueue_struct *wq, 420extern bool queue_delayed_work_on(int cpu, struct workqueue_struct *wq,
422 struct delayed_work *work, unsigned long delay); 421 struct delayed_work *work, unsigned long delay);
423extern bool queue_delayed_work(struct workqueue_struct *wq,
424 struct delayed_work *work, unsigned long delay);
425extern bool mod_delayed_work_on(int cpu, struct workqueue_struct *wq, 422extern bool mod_delayed_work_on(int cpu, struct workqueue_struct *wq,
426 struct delayed_work *dwork, unsigned long delay); 423 struct delayed_work *dwork, unsigned long delay);
427extern bool mod_delayed_work(struct workqueue_struct *wq,
428 struct delayed_work *dwork, unsigned long delay);
429 424
430extern void flush_workqueue(struct workqueue_struct *wq); 425extern void flush_workqueue(struct workqueue_struct *wq);
431extern void drain_workqueue(struct workqueue_struct *wq); 426extern void drain_workqueue(struct workqueue_struct *wq);
432extern void flush_scheduled_work(void); 427extern void flush_scheduled_work(void);
433 428
434extern bool schedule_work_on(int cpu, struct work_struct *work);
435extern bool schedule_work(struct work_struct *work);
436extern bool schedule_delayed_work_on(int cpu, struct delayed_work *work,
437 unsigned long delay);
438extern bool schedule_delayed_work(struct delayed_work *work,
439 unsigned long delay);
440extern int schedule_on_each_cpu(work_func_t func); 429extern int schedule_on_each_cpu(work_func_t func);
441extern int keventd_up(void);
442 430
443int execute_in_process_context(work_func_t fn, struct execute_work *); 431int execute_in_process_context(work_func_t fn, struct execute_work *);
444 432
@@ -455,6 +443,117 @@ extern bool current_is_workqueue_rescuer(void);
455extern bool workqueue_congested(int cpu, struct workqueue_struct *wq); 443extern bool workqueue_congested(int cpu, struct workqueue_struct *wq);
456extern unsigned int work_busy(struct work_struct *work); 444extern unsigned int work_busy(struct work_struct *work);
457 445
446/**
447 * queue_work - queue work on a workqueue
448 * @wq: workqueue to use
449 * @work: work to queue
450 *
451 * Returns %false if @work was already on a queue, %true otherwise.
452 *
453 * We queue the work to the CPU on which it was submitted, but if the CPU dies
454 * it can be processed by another CPU.
455 */
456static inline bool queue_work(struct workqueue_struct *wq,
457 struct work_struct *work)
458{
459 return queue_work_on(WORK_CPU_UNBOUND, wq, work);
460}
461
462/**
463 * queue_delayed_work - queue work on a workqueue after delay
464 * @wq: workqueue to use
465 * @dwork: delayable work to queue
466 * @delay: number of jiffies to wait before queueing
467 *
468 * Equivalent to queue_delayed_work_on() but tries to use the local CPU.
469 */
470static inline bool queue_delayed_work(struct workqueue_struct *wq,
471 struct delayed_work *dwork,
472 unsigned long delay)
473{
474 return queue_delayed_work_on(WORK_CPU_UNBOUND, wq, dwork, delay);
475}
476
477/**
478 * mod_delayed_work - modify delay of or queue a delayed work
479 * @wq: workqueue to use
480 * @dwork: work to queue
481 * @delay: number of jiffies to wait before queueing
482 *
483 * mod_delayed_work_on() on local CPU.
484 */
485static inline bool mod_delayed_work(struct workqueue_struct *wq,
486 struct delayed_work *dwork,
487 unsigned long delay)
488{
489 return mod_delayed_work_on(WORK_CPU_UNBOUND, wq, dwork, delay);
490}
491
492/**
493 * schedule_work_on - put work task on a specific cpu
494 * @cpu: cpu to put the work task on
495 * @work: job to be done
496 *
497 * This puts a job on a specific cpu
498 */
499static inline bool schedule_work_on(int cpu, struct work_struct *work)
500{
501 return queue_work_on(cpu, system_wq, work);
502}
503
504/**
505 * schedule_work - put work task in global workqueue
506 * @work: job to be done
507 *
508 * Returns %false if @work was already on the kernel-global workqueue and
509 * %true otherwise.
510 *
511 * This puts a job in the kernel-global workqueue if it was not already
512 * queued and leaves it in the same position on the kernel-global
513 * workqueue otherwise.
514 */
515static inline bool schedule_work(struct work_struct *work)
516{
517 return queue_work(system_wq, work);
518}
519
520/**
521 * schedule_delayed_work_on - queue work in global workqueue on CPU after delay
522 * @cpu: cpu to use
523 * @dwork: job to be done
524 * @delay: number of jiffies to wait
525 *
526 * After waiting for a given time this puts a job in the kernel-global
527 * workqueue on the specified CPU.
528 */
529static inline bool schedule_delayed_work_on(int cpu, struct delayed_work *dwork,
530 unsigned long delay)
531{
532 return queue_delayed_work_on(cpu, system_wq, dwork, delay);
533}
534
535/**
536 * schedule_delayed_work - put work task in global workqueue after delay
537 * @dwork: job to be done
538 * @delay: number of jiffies to wait or 0 for immediate execution
539 *
540 * After waiting for a given time this puts a job in the kernel-global
541 * workqueue.
542 */
543static inline bool schedule_delayed_work(struct delayed_work *dwork,
544 unsigned long delay)
545{
546 return queue_delayed_work(system_wq, dwork, delay);
547}
548
549/**
550 * keventd_up - is workqueue initialized yet?
551 */
552static inline bool keventd_up(void)
553{
554 return system_wq != NULL;
555}
556
458/* 557/*
459 * Like above, but uses del_timer() instead of del_timer_sync(). This means, 558 * Like above, but uses del_timer() instead of del_timer_sync(). This means,
460 * if it returns 0 the timer function may be running and the queueing is in 559 * if it returns 0 the timer function may be running and the queueing is in
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index 147fc5a784f0..f37421fb4f35 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -1340,22 +1340,6 @@ bool queue_work_on(int cpu, struct workqueue_struct *wq,
1340} 1340}
1341EXPORT_SYMBOL_GPL(queue_work_on); 1341EXPORT_SYMBOL_GPL(queue_work_on);
1342 1342
1343/**
1344 * queue_work - queue work on a workqueue
1345 * @wq: workqueue to use
1346 * @work: work to queue
1347 *
1348 * Returns %false if @work was already on a queue, %true otherwise.
1349 *
1350 * We queue the work to the CPU on which it was submitted, but if the CPU dies
1351 * it can be processed by another CPU.
1352 */
1353bool queue_work(struct workqueue_struct *wq, struct work_struct *work)
1354{
1355 return queue_work_on(WORK_CPU_UNBOUND, wq, work);
1356}
1357EXPORT_SYMBOL_GPL(queue_work);
1358
1359void delayed_work_timer_fn(unsigned long __data) 1343void delayed_work_timer_fn(unsigned long __data)
1360{ 1344{
1361 struct delayed_work *dwork = (struct delayed_work *)__data; 1345 struct delayed_work *dwork = (struct delayed_work *)__data;
@@ -1431,21 +1415,6 @@ bool queue_delayed_work_on(int cpu, struct workqueue_struct *wq,
1431EXPORT_SYMBOL_GPL(queue_delayed_work_on); 1415EXPORT_SYMBOL_GPL(queue_delayed_work_on);
1432 1416
1433/** 1417/**
1434 * queue_delayed_work - queue work on a workqueue after delay
1435 * @wq: workqueue to use
1436 * @dwork: delayable work to queue
1437 * @delay: number of jiffies to wait before queueing
1438 *
1439 * Equivalent to queue_delayed_work_on() but tries to use the local CPU.
1440 */
1441bool queue_delayed_work(struct workqueue_struct *wq,
1442 struct delayed_work *dwork, unsigned long delay)
1443{
1444 return queue_delayed_work_on(WORK_CPU_UNBOUND, wq, dwork, delay);
1445}
1446EXPORT_SYMBOL_GPL(queue_delayed_work);
1447
1448/**
1449 * mod_delayed_work_on - modify delay of or queue a delayed work on specific CPU 1418 * mod_delayed_work_on - modify delay of or queue a delayed work on specific CPU
1450 * @cpu: CPU number to execute work on 1419 * @cpu: CPU number to execute work on
1451 * @wq: workqueue to use 1420 * @wq: workqueue to use
@@ -1484,21 +1453,6 @@ bool mod_delayed_work_on(int cpu, struct workqueue_struct *wq,
1484EXPORT_SYMBOL_GPL(mod_delayed_work_on); 1453EXPORT_SYMBOL_GPL(mod_delayed_work_on);
1485 1454
1486/** 1455/**
1487 * mod_delayed_work - modify delay of or queue a delayed work
1488 * @wq: workqueue to use
1489 * @dwork: work to queue
1490 * @delay: number of jiffies to wait before queueing
1491 *
1492 * mod_delayed_work_on() on local CPU.
1493 */
1494bool mod_delayed_work(struct workqueue_struct *wq, struct delayed_work *dwork,
1495 unsigned long delay)
1496{
1497 return mod_delayed_work_on(WORK_CPU_UNBOUND, wq, dwork, delay);
1498}
1499EXPORT_SYMBOL_GPL(mod_delayed_work);
1500
1501/**
1502 * worker_enter_idle - enter idle state 1456 * worker_enter_idle - enter idle state
1503 * @worker: worker which is entering idle state 1457 * @worker: worker which is entering idle state
1504 * 1458 *
@@ -3002,66 +2956,6 @@ bool cancel_delayed_work_sync(struct delayed_work *dwork)
3002EXPORT_SYMBOL(cancel_delayed_work_sync); 2956EXPORT_SYMBOL(cancel_delayed_work_sync);
3003 2957
3004/** 2958/**
3005 * schedule_work_on - put work task on a specific cpu
3006 * @cpu: cpu to put the work task on
3007 * @work: job to be done
3008 *
3009 * This puts a job on a specific cpu
3010 */
3011bool schedule_work_on(int cpu, struct work_struct *work)
3012{
3013 return queue_work_on(cpu, system_wq, work);
3014}
3015EXPORT_SYMBOL(schedule_work_on);
3016
3017/**
3018 * schedule_work - put work task in global workqueue
3019 * @work: job to be done
3020 *
3021 * Returns %false if @work was already on the kernel-global workqueue and
3022 * %true otherwise.
3023 *
3024 * This puts a job in the kernel-global workqueue if it was not already
3025 * queued and leaves it in the same position on the kernel-global
3026 * workqueue otherwise.
3027 */
3028bool schedule_work(struct work_struct *work)
3029{
3030 return queue_work(system_wq, work);
3031}
3032EXPORT_SYMBOL(schedule_work);
3033
3034/**
3035 * schedule_delayed_work_on - queue work in global workqueue on CPU after delay
3036 * @cpu: cpu to use
3037 * @dwork: job to be done
3038 * @delay: number of jiffies to wait
3039 *
3040 * After waiting for a given time this puts a job in the kernel-global
3041 * workqueue on the specified CPU.
3042 */
3043bool schedule_delayed_work_on(int cpu, struct delayed_work *dwork,
3044 unsigned long delay)
3045{
3046 return queue_delayed_work_on(cpu, system_wq, dwork, delay);
3047}
3048EXPORT_SYMBOL(schedule_delayed_work_on);
3049
3050/**
3051 * schedule_delayed_work - put work task in global workqueue after delay
3052 * @dwork: job to be done
3053 * @delay: number of jiffies to wait or 0 for immediate execution
3054 *
3055 * After waiting for a given time this puts a job in the kernel-global
3056 * workqueue.
3057 */
3058bool schedule_delayed_work(struct delayed_work *dwork, unsigned long delay)
3059{
3060 return queue_delayed_work(system_wq, dwork, delay);
3061}
3062EXPORT_SYMBOL(schedule_delayed_work);
3063
3064/**
3065 * schedule_on_each_cpu - execute a function synchronously on each online CPU 2959 * schedule_on_each_cpu - execute a function synchronously on each online CPU
3066 * @func: the function to call 2960 * @func: the function to call
3067 * 2961 *
@@ -3154,11 +3048,6 @@ int execute_in_process_context(work_func_t fn, struct execute_work *ew)
3154} 3048}
3155EXPORT_SYMBOL_GPL(execute_in_process_context); 3049EXPORT_SYMBOL_GPL(execute_in_process_context);
3156 3050
3157int keventd_up(void)
3158{
3159 return system_wq != NULL;
3160}
3161
3162#ifdef CONFIG_SYSFS 3051#ifdef CONFIG_SYSFS
3163/* 3052/*
3164 * Workqueues with WQ_SYSFS flag set is visible to userland via 3053 * Workqueues with WQ_SYSFS flag set is visible to userland via