aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorThomas Gleixner <tglx@linutronix.de>2019-06-26 10:52:38 -0400
committerTejun Heo <tj@kernel.org>2019-06-27 17:12:19 -0400
commitbe69d00d9769575e35d83367f465a58dbf82748c (patch)
tree4e0ee0ab44461f63b502c93ea14ca6efce117f4d
parent2c9858ecbeb1e68224290043445990e29337d4c0 (diff)
workqueue: Remove GPF argument from alloc_workqueue_attrs()
All callers use GFP_KERNEL. No point in having that argument. Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> Signed-off-by: Tejun Heo <tj@kernel.org>
-rw-r--r--kernel/workqueue.c23
1 files changed, 11 insertions, 12 deletions
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index b8fa7afe6e7d..601d61150b65 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -3339,21 +3339,20 @@ static void free_workqueue_attrs(struct workqueue_attrs *attrs)
3339 3339
3340/** 3340/**
3341 * alloc_workqueue_attrs - allocate a workqueue_attrs 3341 * alloc_workqueue_attrs - allocate a workqueue_attrs
3342 * @gfp_mask: allocation mask to use
3343 * 3342 *
3344 * Allocate a new workqueue_attrs, initialize with default settings and 3343 * Allocate a new workqueue_attrs, initialize with default settings and
3345 * return it. 3344 * return it.
3346 * 3345 *
3347 * Return: The allocated new workqueue_attr on success. %NULL on failure. 3346 * Return: The allocated new workqueue_attr on success. %NULL on failure.
3348 */ 3347 */
3349static struct workqueue_attrs *alloc_workqueue_attrs(gfp_t gfp_mask) 3348static struct workqueue_attrs *alloc_workqueue_attrs(void)
3350{ 3349{
3351 struct workqueue_attrs *attrs; 3350 struct workqueue_attrs *attrs;
3352 3351
3353 attrs = kzalloc(sizeof(*attrs), gfp_mask); 3352 attrs = kzalloc(sizeof(*attrs), GFP_KERNEL);
3354 if (!attrs) 3353 if (!attrs)
3355 goto fail; 3354 goto fail;
3356 if (!alloc_cpumask_var(&attrs->cpumask, gfp_mask)) 3355 if (!alloc_cpumask_var(&attrs->cpumask, GFP_KERNEL))
3357 goto fail; 3356 goto fail;
3358 3357
3359 cpumask_copy(attrs->cpumask, cpu_possible_mask); 3358 cpumask_copy(attrs->cpumask, cpu_possible_mask);
@@ -3431,7 +3430,7 @@ static int init_worker_pool(struct worker_pool *pool)
3431 pool->refcnt = 1; 3430 pool->refcnt = 1;
3432 3431
3433 /* shouldn't fail above this point */ 3432 /* shouldn't fail above this point */
3434 pool->attrs = alloc_workqueue_attrs(GFP_KERNEL); 3433 pool->attrs = alloc_workqueue_attrs();
3435 if (!pool->attrs) 3434 if (!pool->attrs)
3436 return -ENOMEM; 3435 return -ENOMEM;
3437 return 0; 3436 return 0;
@@ -3896,8 +3895,8 @@ apply_wqattrs_prepare(struct workqueue_struct *wq,
3896 3895
3897 ctx = kzalloc(struct_size(ctx, pwq_tbl, nr_node_ids), GFP_KERNEL); 3896 ctx = kzalloc(struct_size(ctx, pwq_tbl, nr_node_ids), GFP_KERNEL);
3898 3897
3899 new_attrs = alloc_workqueue_attrs(GFP_KERNEL); 3898 new_attrs = alloc_workqueue_attrs();
3900 tmp_attrs = alloc_workqueue_attrs(GFP_KERNEL); 3899 tmp_attrs = alloc_workqueue_attrs();
3901 if (!ctx || !new_attrs || !tmp_attrs) 3900 if (!ctx || !new_attrs || !tmp_attrs)
3902 goto out_free; 3901 goto out_free;
3903 3902
@@ -4241,7 +4240,7 @@ struct workqueue_struct *alloc_workqueue(const char *fmt,
4241 return NULL; 4240 return NULL;
4242 4241
4243 if (flags & WQ_UNBOUND) { 4242 if (flags & WQ_UNBOUND) {
4244 wq->unbound_attrs = alloc_workqueue_attrs(GFP_KERNEL); 4243 wq->unbound_attrs = alloc_workqueue_attrs();
4245 if (!wq->unbound_attrs) 4244 if (!wq->unbound_attrs)
4246 goto err_free_wq; 4245 goto err_free_wq;
4247 } 4246 }
@@ -5394,7 +5393,7 @@ static struct workqueue_attrs *wq_sysfs_prep_attrs(struct workqueue_struct *wq)
5394 5393
5395 lockdep_assert_held(&wq_pool_mutex); 5394 lockdep_assert_held(&wq_pool_mutex);
5396 5395
5397 attrs = alloc_workqueue_attrs(GFP_KERNEL); 5396 attrs = alloc_workqueue_attrs();
5398 if (!attrs) 5397 if (!attrs)
5399 return NULL; 5398 return NULL;
5400 5399
@@ -5816,7 +5815,7 @@ static void __init wq_numa_init(void)
5816 return; 5815 return;
5817 } 5816 }
5818 5817
5819 wq_update_unbound_numa_attrs_buf = alloc_workqueue_attrs(GFP_KERNEL); 5818 wq_update_unbound_numa_attrs_buf = alloc_workqueue_attrs();
5820 BUG_ON(!wq_update_unbound_numa_attrs_buf); 5819 BUG_ON(!wq_update_unbound_numa_attrs_buf);
5821 5820
5822 /* 5821 /*
@@ -5891,7 +5890,7 @@ int __init workqueue_init_early(void)
5891 for (i = 0; i < NR_STD_WORKER_POOLS; i++) { 5890 for (i = 0; i < NR_STD_WORKER_POOLS; i++) {
5892 struct workqueue_attrs *attrs; 5891 struct workqueue_attrs *attrs;
5893 5892
5894 BUG_ON(!(attrs = alloc_workqueue_attrs(GFP_KERNEL))); 5893 BUG_ON(!(attrs = alloc_workqueue_attrs()));
5895 attrs->nice = std_nice[i]; 5894 attrs->nice = std_nice[i];
5896 unbound_std_wq_attrs[i] = attrs; 5895 unbound_std_wq_attrs[i] = attrs;
5897 5896
@@ -5900,7 +5899,7 @@ int __init workqueue_init_early(void)
5900 * guaranteed by max_active which is enforced by pwqs. 5899 * guaranteed by max_active which is enforced by pwqs.
5901 * Turn off NUMA so that dfl_pwq is used for all nodes. 5900 * Turn off NUMA so that dfl_pwq is used for all nodes.
5902 */ 5901 */
5903 BUG_ON(!(attrs = alloc_workqueue_attrs(GFP_KERNEL))); 5902 BUG_ON(!(attrs = alloc_workqueue_attrs()));
5904 attrs->nice = std_nice[i]; 5903 attrs->nice = std_nice[i];
5905 attrs->no_numa = true; 5904 attrs->no_numa = true;
5906 ordered_wq_attrs[i] = attrs; 5905 ordered_wq_attrs[i] = attrs;