diff options
author | Tejun Heo <tj@kernel.org> | 2010-06-29 04:07:14 -0400 |
---|---|---|
committer | Tejun Heo <tj@kernel.org> | 2010-06-29 04:07:14 -0400 |
commit | dcd989cb73ab0f7b722d64ab6516f101d9f43f88 (patch) | |
tree | 8c2f14e708367cb67dd9d29f2da0f7e5f454cf31 /kernel/workqueue.c | |
parent | d320c03830b17af64e4547075003b1eeb274bc6c (diff) |
workqueue: implement several utility APIs
Implement the following utility APIs.
workqueue_set_max_active() : adjust max_active of a wq
workqueue_congested() : test whether a wq is contested
work_cpu() : determine the last / current cpu of a work
work_busy() : query whether a work is busy
* Anton Blanchard fixed missing ret initialization in work_busy().
Signed-off-by: Tejun Heo <tj@kernel.org>
Cc: Anton Blanchard <anton@samba.org>
Diffstat (limited to 'kernel/workqueue.c')
-rw-r--r-- | kernel/workqueue.c | 108 |
1 files changed, 107 insertions, 1 deletions
diff --git a/kernel/workqueue.c b/kernel/workqueue.c index 16ce617974d2..c1aa65c2ff38 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c | |||
@@ -203,7 +203,7 @@ struct workqueue_struct { | |||
203 | cpumask_var_t mayday_mask; /* cpus requesting rescue */ | 203 | cpumask_var_t mayday_mask; /* cpus requesting rescue */ |
204 | struct worker *rescuer; /* I: rescue worker */ | 204 | struct worker *rescuer; /* I: rescue worker */ |
205 | 205 | ||
206 | int saved_max_active; /* I: saved cwq max_active */ | 206 | int saved_max_active; /* W: saved cwq max_active */ |
207 | const char *name; /* I: workqueue name */ | 207 | const char *name; /* I: workqueue name */ |
208 | #ifdef CONFIG_LOCKDEP | 208 | #ifdef CONFIG_LOCKDEP |
209 | struct lockdep_map lockdep_map; | 209 | struct lockdep_map lockdep_map; |
@@ -2675,6 +2675,112 @@ void destroy_workqueue(struct workqueue_struct *wq) | |||
2675 | } | 2675 | } |
2676 | EXPORT_SYMBOL_GPL(destroy_workqueue); | 2676 | EXPORT_SYMBOL_GPL(destroy_workqueue); |
2677 | 2677 | ||
2678 | /** | ||
2679 | * workqueue_set_max_active - adjust max_active of a workqueue | ||
2680 | * @wq: target workqueue | ||
2681 | * @max_active: new max_active value. | ||
2682 | * | ||
2683 | * Set max_active of @wq to @max_active. | ||
2684 | * | ||
2685 | * CONTEXT: | ||
2686 | * Don't call from IRQ context. | ||
2687 | */ | ||
2688 | void workqueue_set_max_active(struct workqueue_struct *wq, int max_active) | ||
2689 | { | ||
2690 | unsigned int cpu; | ||
2691 | |||
2692 | max_active = wq_clamp_max_active(max_active, wq->name); | ||
2693 | |||
2694 | spin_lock(&workqueue_lock); | ||
2695 | |||
2696 | wq->saved_max_active = max_active; | ||
2697 | |||
2698 | for_each_possible_cpu(cpu) { | ||
2699 | struct global_cwq *gcwq = get_gcwq(cpu); | ||
2700 | |||
2701 | spin_lock_irq(&gcwq->lock); | ||
2702 | |||
2703 | if (!(wq->flags & WQ_FREEZEABLE) || | ||
2704 | !(gcwq->flags & GCWQ_FREEZING)) | ||
2705 | get_cwq(gcwq->cpu, wq)->max_active = max_active; | ||
2706 | |||
2707 | spin_unlock_irq(&gcwq->lock); | ||
2708 | } | ||
2709 | |||
2710 | spin_unlock(&workqueue_lock); | ||
2711 | } | ||
2712 | EXPORT_SYMBOL_GPL(workqueue_set_max_active); | ||
2713 | |||
2714 | /** | ||
2715 | * workqueue_congested - test whether a workqueue is congested | ||
2716 | * @cpu: CPU in question | ||
2717 | * @wq: target workqueue | ||
2718 | * | ||
2719 | * Test whether @wq's cpu workqueue for @cpu is congested. There is | ||
2720 | * no synchronization around this function and the test result is | ||
2721 | * unreliable and only useful as advisory hints or for debugging. | ||
2722 | * | ||
2723 | * RETURNS: | ||
2724 | * %true if congested, %false otherwise. | ||
2725 | */ | ||
2726 | bool workqueue_congested(unsigned int cpu, struct workqueue_struct *wq) | ||
2727 | { | ||
2728 | struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq); | ||
2729 | |||
2730 | return !list_empty(&cwq->delayed_works); | ||
2731 | } | ||
2732 | EXPORT_SYMBOL_GPL(workqueue_congested); | ||
2733 | |||
2734 | /** | ||
2735 | * work_cpu - return the last known associated cpu for @work | ||
2736 | * @work: the work of interest | ||
2737 | * | ||
2738 | * RETURNS: | ||
2739 | * CPU number if @work was ever queued. NR_CPUS otherwise. | ||
2740 | */ | ||
2741 | unsigned int work_cpu(struct work_struct *work) | ||
2742 | { | ||
2743 | struct global_cwq *gcwq = get_work_gcwq(work); | ||
2744 | |||
2745 | return gcwq ? gcwq->cpu : NR_CPUS; | ||
2746 | } | ||
2747 | EXPORT_SYMBOL_GPL(work_cpu); | ||
2748 | |||
2749 | /** | ||
2750 | * work_busy - test whether a work is currently pending or running | ||
2751 | * @work: the work to be tested | ||
2752 | * | ||
2753 | * Test whether @work is currently pending or running. There is no | ||
2754 | * synchronization around this function and the test result is | ||
2755 | * unreliable and only useful as advisory hints or for debugging. | ||
2756 | * Especially for reentrant wqs, the pending state might hide the | ||
2757 | * running state. | ||
2758 | * | ||
2759 | * RETURNS: | ||
2760 | * OR'd bitmask of WORK_BUSY_* bits. | ||
2761 | */ | ||
2762 | unsigned int work_busy(struct work_struct *work) | ||
2763 | { | ||
2764 | struct global_cwq *gcwq = get_work_gcwq(work); | ||
2765 | unsigned long flags; | ||
2766 | unsigned int ret = 0; | ||
2767 | |||
2768 | if (!gcwq) | ||
2769 | return false; | ||
2770 | |||
2771 | spin_lock_irqsave(&gcwq->lock, flags); | ||
2772 | |||
2773 | if (work_pending(work)) | ||
2774 | ret |= WORK_BUSY_PENDING; | ||
2775 | if (find_worker_executing_work(gcwq, work)) | ||
2776 | ret |= WORK_BUSY_RUNNING; | ||
2777 | |||
2778 | spin_unlock_irqrestore(&gcwq->lock, flags); | ||
2779 | |||
2780 | return ret; | ||
2781 | } | ||
2782 | EXPORT_SYMBOL_GPL(work_busy); | ||
2783 | |||
2678 | /* | 2784 | /* |
2679 | * CPU hotplug. | 2785 | * CPU hotplug. |
2680 | * | 2786 | * |