diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2013-05-16 15:03:28 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2013-05-16 15:03:28 -0400 |
commit | 4a007ed926fd4cbb4afe4566dbfd252cc49f22fe (patch) | |
tree | 0e71b161d387881fb8c197125a3bff6e444aa300 /kernel | |
parent | ff89acc563a0bd49965674f56552ad6620415fe2 (diff) | |
parent | 1be0c25da56e860992af972a60321563ca2cfcd1 (diff) |
Merge branch 'for-3.10-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/tj/wq
Pull workqueue fixes from Tejun Heo:
"Three more workqueue regression fixes.
- Fix unbalanced unlock in trylock failure path of manage_workers().
This shouldn't happen often in the wild but is possible.
- While making schedule_work() and friends inline, they become
unavailable to !GPL modules. Allow !GPL modules to access basic
stuff - system_wq and queue_*work_on() - so that schedule_work()
and friends can be used.
- During boot, the unbound NUMA support code allocates a cpumask for
each possible node using alloc_cpumask_var_node(), which ends up
trying to allocate node-specific memory even for offline nodes
triggering BUG in the memory alloc code. Use NUMA_NO_NODE for
offline nodes."
* 'for-3.10-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/tj/wq:
workqueue: don't perform NUMA-aware allocations on offline nodes in wq_numa_init()
workqueue: Make schedule_work() available again to non GPL modules
workqueue: correct handling of the pool spin_lock
Diffstat (limited to 'kernel')
-rw-r--r-- | kernel/workqueue.c | 10 |
1 files changed, 6 insertions, 4 deletions
diff --git a/kernel/workqueue.c b/kernel/workqueue.c index 1ae602809efb..ee8e29a2320c 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c | |||
@@ -296,7 +296,7 @@ static DEFINE_HASHTABLE(unbound_pool_hash, UNBOUND_POOL_HASH_ORDER); | |||
296 | static struct workqueue_attrs *unbound_std_wq_attrs[NR_STD_WORKER_POOLS]; | 296 | static struct workqueue_attrs *unbound_std_wq_attrs[NR_STD_WORKER_POOLS]; |
297 | 297 | ||
298 | struct workqueue_struct *system_wq __read_mostly; | 298 | struct workqueue_struct *system_wq __read_mostly; |
299 | EXPORT_SYMBOL_GPL(system_wq); | 299 | EXPORT_SYMBOL(system_wq); |
300 | struct workqueue_struct *system_highpri_wq __read_mostly; | 300 | struct workqueue_struct *system_highpri_wq __read_mostly; |
301 | EXPORT_SYMBOL_GPL(system_highpri_wq); | 301 | EXPORT_SYMBOL_GPL(system_highpri_wq); |
302 | struct workqueue_struct *system_long_wq __read_mostly; | 302 | struct workqueue_struct *system_long_wq __read_mostly; |
@@ -1411,7 +1411,7 @@ bool queue_work_on(int cpu, struct workqueue_struct *wq, | |||
1411 | local_irq_restore(flags); | 1411 | local_irq_restore(flags); |
1412 | return ret; | 1412 | return ret; |
1413 | } | 1413 | } |
1414 | EXPORT_SYMBOL_GPL(queue_work_on); | 1414 | EXPORT_SYMBOL(queue_work_on); |
1415 | 1415 | ||
1416 | void delayed_work_timer_fn(unsigned long __data) | 1416 | void delayed_work_timer_fn(unsigned long __data) |
1417 | { | 1417 | { |
@@ -1485,7 +1485,7 @@ bool queue_delayed_work_on(int cpu, struct workqueue_struct *wq, | |||
1485 | local_irq_restore(flags); | 1485 | local_irq_restore(flags); |
1486 | return ret; | 1486 | return ret; |
1487 | } | 1487 | } |
1488 | EXPORT_SYMBOL_GPL(queue_delayed_work_on); | 1488 | EXPORT_SYMBOL(queue_delayed_work_on); |
1489 | 1489 | ||
1490 | /** | 1490 | /** |
1491 | * mod_delayed_work_on - modify delay of or queue a delayed work on specific CPU | 1491 | * mod_delayed_work_on - modify delay of or queue a delayed work on specific CPU |
@@ -2059,6 +2059,7 @@ static bool manage_workers(struct worker *worker) | |||
2059 | if (unlikely(!mutex_trylock(&pool->manager_mutex))) { | 2059 | if (unlikely(!mutex_trylock(&pool->manager_mutex))) { |
2060 | spin_unlock_irq(&pool->lock); | 2060 | spin_unlock_irq(&pool->lock); |
2061 | mutex_lock(&pool->manager_mutex); | 2061 | mutex_lock(&pool->manager_mutex); |
2062 | spin_lock_irq(&pool->lock); | ||
2062 | ret = true; | 2063 | ret = true; |
2063 | } | 2064 | } |
2064 | 2065 | ||
@@ -4904,7 +4905,8 @@ static void __init wq_numa_init(void) | |||
4904 | BUG_ON(!tbl); | 4905 | BUG_ON(!tbl); |
4905 | 4906 | ||
4906 | for_each_node(node) | 4907 | for_each_node(node) |
4907 | BUG_ON(!alloc_cpumask_var_node(&tbl[node], GFP_KERNEL, node)); | 4908 | BUG_ON(!alloc_cpumask_var_node(&tbl[node], GFP_KERNEL, |
4909 | node_online(node) ? node : NUMA_NO_NODE)); | ||
4908 | 4910 | ||
4909 | for_each_possible_cpu(cpu) { | 4911 | for_each_possible_cpu(cpu) { |
4910 | node = cpu_to_node(cpu); | 4912 | node = cpu_to_node(cpu); |