diff options
Diffstat (limited to 'kernel/workqueue.c')
-rw-r--r-- | kernel/workqueue.c | 19 |
1 files changed, 15 insertions, 4 deletions
diff --git a/kernel/workqueue.c b/kernel/workqueue.c index 4aa9f5bc6b2d..ee8e29a2320c 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c | |||
@@ -296,7 +296,7 @@ static DEFINE_HASHTABLE(unbound_pool_hash, UNBOUND_POOL_HASH_ORDER); | |||
296 | static struct workqueue_attrs *unbound_std_wq_attrs[NR_STD_WORKER_POOLS]; | 296 | static struct workqueue_attrs *unbound_std_wq_attrs[NR_STD_WORKER_POOLS]; |
297 | 297 | ||
298 | struct workqueue_struct *system_wq __read_mostly; | 298 | struct workqueue_struct *system_wq __read_mostly; |
299 | EXPORT_SYMBOL_GPL(system_wq); | 299 | EXPORT_SYMBOL(system_wq); |
300 | struct workqueue_struct *system_highpri_wq __read_mostly; | 300 | struct workqueue_struct *system_highpri_wq __read_mostly; |
301 | EXPORT_SYMBOL_GPL(system_highpri_wq); | 301 | EXPORT_SYMBOL_GPL(system_highpri_wq); |
302 | struct workqueue_struct *system_long_wq __read_mostly; | 302 | struct workqueue_struct *system_long_wq __read_mostly; |
@@ -1411,7 +1411,7 @@ bool queue_work_on(int cpu, struct workqueue_struct *wq, | |||
1411 | local_irq_restore(flags); | 1411 | local_irq_restore(flags); |
1412 | return ret; | 1412 | return ret; |
1413 | } | 1413 | } |
1414 | EXPORT_SYMBOL_GPL(queue_work_on); | 1414 | EXPORT_SYMBOL(queue_work_on); |
1415 | 1415 | ||
1416 | void delayed_work_timer_fn(unsigned long __data) | 1416 | void delayed_work_timer_fn(unsigned long __data) |
1417 | { | 1417 | { |
@@ -1485,7 +1485,7 @@ bool queue_delayed_work_on(int cpu, struct workqueue_struct *wq, | |||
1485 | local_irq_restore(flags); | 1485 | local_irq_restore(flags); |
1486 | return ret; | 1486 | return ret; |
1487 | } | 1487 | } |
1488 | EXPORT_SYMBOL_GPL(queue_delayed_work_on); | 1488 | EXPORT_SYMBOL(queue_delayed_work_on); |
1489 | 1489 | ||
1490 | /** | 1490 | /** |
1491 | * mod_delayed_work_on - modify delay of or queue a delayed work on specific CPU | 1491 | * mod_delayed_work_on - modify delay of or queue a delayed work on specific CPU |
@@ -2059,6 +2059,7 @@ static bool manage_workers(struct worker *worker) | |||
2059 | if (unlikely(!mutex_trylock(&pool->manager_mutex))) { | 2059 | if (unlikely(!mutex_trylock(&pool->manager_mutex))) { |
2060 | spin_unlock_irq(&pool->lock); | 2060 | spin_unlock_irq(&pool->lock); |
2061 | mutex_lock(&pool->manager_mutex); | 2061 | mutex_lock(&pool->manager_mutex); |
2062 | spin_lock_irq(&pool->lock); | ||
2062 | ret = true; | 2063 | ret = true; |
2063 | } | 2064 | } |
2064 | 2065 | ||
@@ -4311,6 +4312,12 @@ bool current_is_workqueue_rescuer(void) | |||
4311 | * no synchronization around this function and the test result is | 4312 | * no synchronization around this function and the test result is |
4312 | * unreliable and only useful as advisory hints or for debugging. | 4313 | * unreliable and only useful as advisory hints or for debugging. |
4313 | * | 4314 | * |
4315 | * If @cpu is WORK_CPU_UNBOUND, the test is performed on the local CPU. | ||
4316 | * Note that both per-cpu and unbound workqueues may be associated with | ||
4317 | * multiple pool_workqueues which have separate congested states. A | ||
4318 | * workqueue being congested on one CPU doesn't mean the workqueue is also | ||
4319 | * contested on other CPUs / NUMA nodes. | ||
4320 | * | ||
4314 | * RETURNS: | 4321 | * RETURNS: |
4315 | * %true if congested, %false otherwise. | 4322 | * %true if congested, %false otherwise. |
4316 | */ | 4323 | */ |
@@ -4321,6 +4328,9 @@ bool workqueue_congested(int cpu, struct workqueue_struct *wq) | |||
4321 | 4328 | ||
4322 | rcu_read_lock_sched(); | 4329 | rcu_read_lock_sched(); |
4323 | 4330 | ||
4331 | if (cpu == WORK_CPU_UNBOUND) | ||
4332 | cpu = smp_processor_id(); | ||
4333 | |||
4324 | if (!(wq->flags & WQ_UNBOUND)) | 4334 | if (!(wq->flags & WQ_UNBOUND)) |
4325 | pwq = per_cpu_ptr(wq->cpu_pwqs, cpu); | 4335 | pwq = per_cpu_ptr(wq->cpu_pwqs, cpu); |
4326 | else | 4336 | else |
@@ -4895,7 +4905,8 @@ static void __init wq_numa_init(void) | |||
4895 | BUG_ON(!tbl); | 4905 | BUG_ON(!tbl); |
4896 | 4906 | ||
4897 | for_each_node(node) | 4907 | for_each_node(node) |
4898 | BUG_ON(!alloc_cpumask_var_node(&tbl[node], GFP_KERNEL, node)); | 4908 | BUG_ON(!alloc_cpumask_var_node(&tbl[node], GFP_KERNEL, |
4909 | node_online(node) ? node : NUMA_NO_NODE)); | ||
4899 | 4910 | ||
4900 | for_each_possible_cpu(cpu) { | 4911 | for_each_possible_cpu(cpu) { |
4901 | node = cpu_to_node(cpu); | 4912 | node = cpu_to_node(cpu); |