aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorLi Bin <huawei.libin@huawei.com>2013-09-09 21:52:35 -0400
committerTejun Heo <tj@kernel.org>2013-11-22 18:14:47 -0500
commit4e8b22bd1a37447712f1b1d96352fc53b463c6b3 (patch)
treeacc56d30147d1276c85e1ff602ba36c96fda20a4 /kernel
parent9ef28a73ff6a1598d6f915973c282fe28291f800 (diff)
workqueue: fix pool ID allocation leakage and remove BUILD_BUG_ON() in init_workqueues
When one work starts execution, the high bits of work's data contain pool ID. It can represent a maximum of WORK_OFFQ_POOL_NONE. Pool ID is assigned WORK_OFFQ_POOL_NONE when the work being initialized indicating that no pool is associated and get_work_pool() uses it to check the associated pool. So if worker_pool_assign_id() assigns a ID greater than or equal WORK_OFFQ_POOL_NONE to a pool, it triggers leakage, and it may break the non-reentrance guarantee. This patch fix this issue by modifying the worker_pool_assign_id() function calling idr_alloc() by setting @end param WORK_OFFQ_POOL_NONE. Furthermore, in the current implementation, the BUILD_BUG_ON() in init_workqueues makes no sense. The number of worker pools needed cannot be determined at compile time, because the number of backing pools for UNBOUND workqueues is dynamic based on the assigned custom attributes. So remove it. tj: Minor comment and indentation updates. Signed-off-by: Li Bin <huawei.libin@huawei.com> Signed-off-by: Tejun Heo <tj@kernel.org>
Diffstat (limited to 'kernel')
-rw-r--r--kernel/workqueue.c15
1 files changed, 9 insertions, 6 deletions
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index 73bdf3c1f9b7..c66912be990f 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -521,14 +521,21 @@ static inline void debug_work_activate(struct work_struct *work) { }
521static inline void debug_work_deactivate(struct work_struct *work) { } 521static inline void debug_work_deactivate(struct work_struct *work) { }
522#endif 522#endif
523 523
524/* allocate ID and assign it to @pool */ 524/**
525 * worker_pool_assign_id - allocate ID and assing it to @pool
526 * @pool: the pool pointer of interest
527 *
528 * Returns 0 if ID in [0, WORK_OFFQ_POOL_NONE) is allocated and assigned
529 * successfully, -errno on failure.
530 */
525static int worker_pool_assign_id(struct worker_pool *pool) 531static int worker_pool_assign_id(struct worker_pool *pool)
526{ 532{
527 int ret; 533 int ret;
528 534
529 lockdep_assert_held(&wq_pool_mutex); 535 lockdep_assert_held(&wq_pool_mutex);
530 536
531 ret = idr_alloc(&worker_pool_idr, pool, 0, 0, GFP_KERNEL); 537 ret = idr_alloc(&worker_pool_idr, pool, 0, WORK_OFFQ_POOL_NONE,
538 GFP_KERNEL);
532 if (ret >= 0) { 539 if (ret >= 0) {
533 pool->id = ret; 540 pool->id = ret;
534 return 0; 541 return 0;
@@ -5020,10 +5027,6 @@ static int __init init_workqueues(void)
5020 int std_nice[NR_STD_WORKER_POOLS] = { 0, HIGHPRI_NICE_LEVEL }; 5027 int std_nice[NR_STD_WORKER_POOLS] = { 0, HIGHPRI_NICE_LEVEL };
5021 int i, cpu; 5028 int i, cpu;
5022 5029
5023 /* make sure we have enough bits for OFFQ pool ID */
5024 BUILD_BUG_ON((1LU << (BITS_PER_LONG - WORK_OFFQ_POOL_SHIFT)) <
5025 WORK_CPU_END * NR_STD_WORKER_POOLS);
5026
5027 WARN_ON(__alignof__(struct pool_workqueue) < __alignof__(long long)); 5030 WARN_ON(__alignof__(struct pool_workqueue) < __alignof__(long long));
5028 5031
5029 pwq_cache = KMEM_CACHE(pool_workqueue, SLAB_PANIC); 5032 pwq_cache = KMEM_CACHE(pool_workqueue, SLAB_PANIC);