diff options
-rw-r--r-- | include/linux/workqueue.h | 11 | ||||
-rw-r--r-- | kernel/workqueue.c | 108 |
2 files changed, 117 insertions, 2 deletions
diff --git a/include/linux/workqueue.h b/include/linux/workqueue.h index 48b7422f25ae..0a7f79729380 100644 --- a/include/linux/workqueue.h +++ b/include/linux/workqueue.h | |||
@@ -61,6 +61,10 @@ enum { | |||
61 | WORK_STRUCT_FLAG_MASK = (1UL << WORK_STRUCT_FLAG_BITS) - 1, | 61 | WORK_STRUCT_FLAG_MASK = (1UL << WORK_STRUCT_FLAG_BITS) - 1, |
62 | WORK_STRUCT_WQ_DATA_MASK = ~WORK_STRUCT_FLAG_MASK, | 62 | WORK_STRUCT_WQ_DATA_MASK = ~WORK_STRUCT_FLAG_MASK, |
63 | WORK_STRUCT_NO_CPU = NR_CPUS << WORK_STRUCT_FLAG_BITS, | 63 | WORK_STRUCT_NO_CPU = NR_CPUS << WORK_STRUCT_FLAG_BITS, |
64 | |||
65 | /* bit mask for work_busy() return values */ | ||
66 | WORK_BUSY_PENDING = 1 << 0, | ||
67 | WORK_BUSY_RUNNING = 1 << 1, | ||
64 | }; | 68 | }; |
65 | 69 | ||
66 | struct work_struct { | 70 | struct work_struct { |
@@ -307,9 +311,14 @@ extern void init_workqueues(void); | |||
307 | int execute_in_process_context(work_func_t fn, struct execute_work *); | 311 | int execute_in_process_context(work_func_t fn, struct execute_work *); |
308 | 312 | ||
309 | extern int flush_work(struct work_struct *work); | 313 | extern int flush_work(struct work_struct *work); |
310 | |||
311 | extern int cancel_work_sync(struct work_struct *work); | 314 | extern int cancel_work_sync(struct work_struct *work); |
312 | 315 | ||
316 | extern void workqueue_set_max_active(struct workqueue_struct *wq, | ||
317 | int max_active); | ||
318 | extern bool workqueue_congested(unsigned int cpu, struct workqueue_struct *wq); | ||
319 | extern unsigned int work_cpu(struct work_struct *work); | ||
320 | extern unsigned int work_busy(struct work_struct *work); | ||
321 | |||
313 | /* | 322 | /* |
314 | * Kill off a pending schedule_delayed_work(). Note that the work callback | 323 | * Kill off a pending schedule_delayed_work(). Note that the work callback |
315 | * function may still be running on return from cancel_delayed_work(), unless | 324 | * function may still be running on return from cancel_delayed_work(), unless |
diff --git a/kernel/workqueue.c b/kernel/workqueue.c index 16ce617974d2..c1aa65c2ff38 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c | |||
@@ -203,7 +203,7 @@ struct workqueue_struct { | |||
203 | cpumask_var_t mayday_mask; /* cpus requesting rescue */ | 203 | cpumask_var_t mayday_mask; /* cpus requesting rescue */ |
204 | struct worker *rescuer; /* I: rescue worker */ | 204 | struct worker *rescuer; /* I: rescue worker */ |
205 | 205 | ||
206 | int saved_max_active; /* I: saved cwq max_active */ | 206 | int saved_max_active; /* W: saved cwq max_active */ |
207 | const char *name; /* I: workqueue name */ | 207 | const char *name; /* I: workqueue name */ |
208 | #ifdef CONFIG_LOCKDEP | 208 | #ifdef CONFIG_LOCKDEP |
209 | struct lockdep_map lockdep_map; | 209 | struct lockdep_map lockdep_map; |
@@ -2675,6 +2675,112 @@ void destroy_workqueue(struct workqueue_struct *wq) | |||
2675 | } | 2675 | } |
2676 | EXPORT_SYMBOL_GPL(destroy_workqueue); | 2676 | EXPORT_SYMBOL_GPL(destroy_workqueue); |
2677 | 2677 | ||
2678 | /** | ||
2679 | * workqueue_set_max_active - adjust max_active of a workqueue | ||
2680 | * @wq: target workqueue | ||
2681 | * @max_active: new max_active value. | ||
2682 | * | ||
2683 | * Set max_active of @wq to @max_active. | ||
2684 | * | ||
2685 | * CONTEXT: | ||
2686 | * Don't call from IRQ context. | ||
2687 | */ | ||
2688 | void workqueue_set_max_active(struct workqueue_struct *wq, int max_active) | ||
2689 | { | ||
2690 | unsigned int cpu; | ||
2691 | |||
2692 | max_active = wq_clamp_max_active(max_active, wq->name); | ||
2693 | |||
2694 | spin_lock(&workqueue_lock); | ||
2695 | |||
2696 | wq->saved_max_active = max_active; | ||
2697 | |||
2698 | for_each_possible_cpu(cpu) { | ||
2699 | struct global_cwq *gcwq = get_gcwq(cpu); | ||
2700 | |||
2701 | spin_lock_irq(&gcwq->lock); | ||
2702 | |||
2703 | if (!(wq->flags & WQ_FREEZEABLE) || | ||
2704 | !(gcwq->flags & GCWQ_FREEZING)) | ||
2705 | get_cwq(gcwq->cpu, wq)->max_active = max_active; | ||
2706 | |||
2707 | spin_unlock_irq(&gcwq->lock); | ||
2708 | } | ||
2709 | |||
2710 | spin_unlock(&workqueue_lock); | ||
2711 | } | ||
2712 | EXPORT_SYMBOL_GPL(workqueue_set_max_active); | ||
2713 | |||
2714 | /** | ||
2715 | * workqueue_congested - test whether a workqueue is congested | ||
2716 | * @cpu: CPU in question | ||
2717 | * @wq: target workqueue | ||
2718 | * | ||
2719 | * Test whether @wq's cpu workqueue for @cpu is congested. There is | ||
2720 | * no synchronization around this function and the test result is | ||
2721 | * unreliable and only useful as advisory hints or for debugging. | ||
2722 | * | ||
2723 | * RETURNS: | ||
2724 | * %true if congested, %false otherwise. | ||
2725 | */ | ||
2726 | bool workqueue_congested(unsigned int cpu, struct workqueue_struct *wq) | ||
2727 | { | ||
2728 | struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq); | ||
2729 | |||
2730 | return !list_empty(&cwq->delayed_works); | ||
2731 | } | ||
2732 | EXPORT_SYMBOL_GPL(workqueue_congested); | ||
2733 | |||
2734 | /** | ||
2735 | * work_cpu - return the last known associated cpu for @work | ||
2736 | * @work: the work of interest | ||
2737 | * | ||
2738 | * RETURNS: | ||
2739 | * CPU number if @work was ever queued. NR_CPUS otherwise. | ||
2740 | */ | ||
2741 | unsigned int work_cpu(struct work_struct *work) | ||
2742 | { | ||
2743 | struct global_cwq *gcwq = get_work_gcwq(work); | ||
2744 | |||
2745 | return gcwq ? gcwq->cpu : NR_CPUS; | ||
2746 | } | ||
2747 | EXPORT_SYMBOL_GPL(work_cpu); | ||
2748 | |||
2749 | /** | ||
2750 | * work_busy - test whether a work is currently pending or running | ||
2751 | * @work: the work to be tested | ||
2752 | * | ||
2753 | * Test whether @work is currently pending or running. There is no | ||
2754 | * synchronization around this function and the test result is | ||
2755 | * unreliable and only useful as advisory hints or for debugging. | ||
2756 | * Especially for reentrant wqs, the pending state might hide the | ||
2757 | * running state. | ||
2758 | * | ||
2759 | * RETURNS: | ||
2760 | * OR'd bitmask of WORK_BUSY_* bits. | ||
2761 | */ | ||
2762 | unsigned int work_busy(struct work_struct *work) | ||
2763 | { | ||
2764 | struct global_cwq *gcwq = get_work_gcwq(work); | ||
2765 | unsigned long flags; | ||
2766 | unsigned int ret = 0; | ||
2767 | |||
2768 | if (!gcwq) | ||
2769 | return false; | ||
2770 | |||
2771 | spin_lock_irqsave(&gcwq->lock, flags); | ||
2772 | |||
2773 | if (work_pending(work)) | ||
2774 | ret |= WORK_BUSY_PENDING; | ||
2775 | if (find_worker_executing_work(gcwq, work)) | ||
2776 | ret |= WORK_BUSY_RUNNING; | ||
2777 | |||
2778 | spin_unlock_irqrestore(&gcwq->lock, flags); | ||
2779 | |||
2780 | return ret; | ||
2781 | } | ||
2782 | EXPORT_SYMBOL_GPL(work_busy); | ||
2783 | |||
2678 | /* | 2784 | /* |
2679 | * CPU hotplug. | 2785 | * CPU hotplug. |
2680 | * | 2786 | * |