aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorTejun Heo <tj@kernel.org>2013-03-12 14:30:00 -0400
committerTejun Heo <tj@kernel.org>2013-03-12 14:30:00 -0400
commit7a4e344c5675eefbde93ed9a98ef45e0e4957bc2 (patch)
tree7a4383063512328184db9d20e27164824c44dc2c
parent4e1a1f9a051b4c9a2821a2a0f7f4a27c701fba51 (diff)
workqueue: introduce workqueue_attrs
Introduce struct workqueue_attrs which carries worker attributes - currently the nice level and allowed cpumask along with helper routines alloc_workqueue_attrs() and free_workqueue_attrs(). Each worker_pool now carries ->attrs describing the attributes of its workers. All functions dealing with cpumask and nice level of workers are updated to follow worker_pool->attrs instead of determining them from other characteristics of the worker_pool, and init_workqueues() is updated to set worker_pool->attrs appropriately for all standard pools. Note that create_worker() is updated to always perform set_user_nice() and use set_cpus_allowed_ptr() combined with manual assertion of PF_THREAD_BOUND instead of kthread_bind(). This simplifies handling random attributes without affecting the outcome. This patch doesn't introduce any behavior changes. v2: Missing cpumask_var_t definition caused build failure on some archs. linux/cpumask.h included. Signed-off-by: Tejun Heo <tj@kernel.org> Reported-by: kbuild test robot <fengguang.wu@intel.com> Reviewed-by: Lai Jiangshan <laijs@cn.fujitsu.com>
-rw-r--r--include/linux/workqueue.h13
-rw-r--r--kernel/workqueue.c103
2 files changed, 94 insertions, 22 deletions
diff --git a/include/linux/workqueue.h b/include/linux/workqueue.h
index 899be6636d20..00c1b9ba8252 100644
--- a/include/linux/workqueue.h
+++ b/include/linux/workqueue.h
@@ -11,6 +11,7 @@
11#include <linux/lockdep.h> 11#include <linux/lockdep.h>
12#include <linux/threads.h> 12#include <linux/threads.h>
13#include <linux/atomic.h> 13#include <linux/atomic.h>
14#include <linux/cpumask.h>
14 15
15struct workqueue_struct; 16struct workqueue_struct;
16 17
@@ -115,6 +116,15 @@ struct delayed_work {
115 int cpu; 116 int cpu;
116}; 117};
117 118
119/*
120 * A struct for workqueue attributes. This can be used to change
121 * attributes of an unbound workqueue.
122 */
123struct workqueue_attrs {
124 int nice; /* nice level */
125 cpumask_var_t cpumask; /* allowed CPUs */
126};
127
118static inline struct delayed_work *to_delayed_work(struct work_struct *work) 128static inline struct delayed_work *to_delayed_work(struct work_struct *work)
119{ 129{
120 return container_of(work, struct delayed_work, work); 130 return container_of(work, struct delayed_work, work);
@@ -399,6 +409,9 @@ __alloc_workqueue_key(const char *fmt, unsigned int flags, int max_active,
399 409
400extern void destroy_workqueue(struct workqueue_struct *wq); 410extern void destroy_workqueue(struct workqueue_struct *wq);
401 411
412struct workqueue_attrs *alloc_workqueue_attrs(gfp_t gfp_mask);
413void free_workqueue_attrs(struct workqueue_attrs *attrs);
414
402extern bool queue_work_on(int cpu, struct workqueue_struct *wq, 415extern bool queue_work_on(int cpu, struct workqueue_struct *wq,
403 struct work_struct *work); 416 struct work_struct *work);
404extern bool queue_work(struct workqueue_struct *wq, struct work_struct *work); 417extern bool queue_work(struct workqueue_struct *wq, struct work_struct *work);
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index 094f16668e1b..b0d3cbb83f63 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -148,6 +148,8 @@ struct worker_pool {
148 struct mutex assoc_mutex; /* protect POOL_DISASSOCIATED */ 148 struct mutex assoc_mutex; /* protect POOL_DISASSOCIATED */
149 struct ida worker_ida; /* L: for worker IDs */ 149 struct ida worker_ida; /* L: for worker IDs */
150 150
151 struct workqueue_attrs *attrs; /* I: worker attributes */
152
151 /* 153 /*
152 * The current concurrency level. As it's likely to be accessed 154 * The current concurrency level. As it's likely to be accessed
153 * from other CPUs during try_to_wake_up(), put it in a separate 155 * from other CPUs during try_to_wake_up(), put it in a separate
@@ -1566,14 +1568,13 @@ __acquires(&pool->lock)
1566 * against POOL_DISASSOCIATED. 1568 * against POOL_DISASSOCIATED.
1567 */ 1569 */
1568 if (!(pool->flags & POOL_DISASSOCIATED)) 1570 if (!(pool->flags & POOL_DISASSOCIATED))
1569 set_cpus_allowed_ptr(current, get_cpu_mask(pool->cpu)); 1571 set_cpus_allowed_ptr(current, pool->attrs->cpumask);
1570 1572
1571 spin_lock_irq(&pool->lock); 1573 spin_lock_irq(&pool->lock);
1572 if (pool->flags & POOL_DISASSOCIATED) 1574 if (pool->flags & POOL_DISASSOCIATED)
1573 return false; 1575 return false;
1574 if (task_cpu(current) == pool->cpu && 1576 if (task_cpu(current) == pool->cpu &&
1575 cpumask_equal(&current->cpus_allowed, 1577 cpumask_equal(&current->cpus_allowed, pool->attrs->cpumask))
1576 get_cpu_mask(pool->cpu)))
1577 return true; 1578 return true;
1578 spin_unlock_irq(&pool->lock); 1579 spin_unlock_irq(&pool->lock);
1579 1580
@@ -1679,7 +1680,7 @@ static void rebind_workers(struct worker_pool *pool)
1679 * wq doesn't really matter but let's keep @worker->pool 1680 * wq doesn't really matter but let's keep @worker->pool
1680 * and @pwq->pool consistent for sanity. 1681 * and @pwq->pool consistent for sanity.
1681 */ 1682 */
1682 if (std_worker_pool_pri(worker->pool)) 1683 if (worker->pool->attrs->nice < 0)
1683 wq = system_highpri_wq; 1684 wq = system_highpri_wq;
1684 else 1685 else
1685 wq = system_wq; 1686 wq = system_wq;
@@ -1721,7 +1722,7 @@ static struct worker *alloc_worker(void)
1721 */ 1722 */
1722static struct worker *create_worker(struct worker_pool *pool) 1723static struct worker *create_worker(struct worker_pool *pool)
1723{ 1724{
1724 const char *pri = std_worker_pool_pri(pool) ? "H" : ""; 1725 const char *pri = pool->attrs->nice < 0 ? "H" : "";
1725 struct worker *worker = NULL; 1726 struct worker *worker = NULL;
1726 int id = -1; 1727 int id = -1;
1727 1728
@@ -1751,24 +1752,23 @@ static struct worker *create_worker(struct worker_pool *pool)
1751 if (IS_ERR(worker->task)) 1752 if (IS_ERR(worker->task))
1752 goto fail; 1753 goto fail;
1753 1754
1754 if (std_worker_pool_pri(pool)) 1755 set_user_nice(worker->task, pool->attrs->nice);
1755 set_user_nice(worker->task, HIGHPRI_NICE_LEVEL); 1756 set_cpus_allowed_ptr(worker->task, pool->attrs->cpumask);
1756 1757
1757 /* 1758 /*
1758 * Determine CPU binding of the new worker depending on 1759 * %PF_THREAD_BOUND is used to prevent userland from meddling with
1759 * %POOL_DISASSOCIATED. The caller is responsible for ensuring the 1760 * cpumask of workqueue workers. This is an abuse. We need
1760 * flag remains stable across this function. See the comments 1761 * %PF_NO_SETAFFINITY.
1761 * above the flag definition for details.
1762 *
1763 * As an unbound worker may later become a regular one if CPU comes
1764 * online, make sure every worker has %PF_THREAD_BOUND set.
1765 */ 1762 */
1766 if (!(pool->flags & POOL_DISASSOCIATED)) { 1763 worker->task->flags |= PF_THREAD_BOUND;
1767 kthread_bind(worker->task, pool->cpu); 1764
1768 } else { 1765 /*
1769 worker->task->flags |= PF_THREAD_BOUND; 1766 * The caller is responsible for ensuring %POOL_DISASSOCIATED
1767 * remains stable across this function. See the comments above the
1768 * flag definition for details.
1769 */
1770 if (pool->flags & POOL_DISASSOCIATED)
1770 worker->flags |= WORKER_UNBOUND; 1771 worker->flags |= WORKER_UNBOUND;
1771 }
1772 1772
1773 return worker; 1773 return worker;
1774fail: 1774fail:
@@ -3123,7 +3123,52 @@ int keventd_up(void)
3123 return system_wq != NULL; 3123 return system_wq != NULL;
3124} 3124}
3125 3125
3126static void init_worker_pool(struct worker_pool *pool) 3126/**
3127 * free_workqueue_attrs - free a workqueue_attrs
3128 * @attrs: workqueue_attrs to free
3129 *
3130 * Undo alloc_workqueue_attrs().
3131 */
3132void free_workqueue_attrs(struct workqueue_attrs *attrs)
3133{
3134 if (attrs) {
3135 free_cpumask_var(attrs->cpumask);
3136 kfree(attrs);
3137 }
3138}
3139
3140/**
3141 * alloc_workqueue_attrs - allocate a workqueue_attrs
3142 * @gfp_mask: allocation mask to use
3143 *
3144 * Allocate a new workqueue_attrs, initialize with default settings and
3145 * return it. Returns NULL on failure.
3146 */
3147struct workqueue_attrs *alloc_workqueue_attrs(gfp_t gfp_mask)
3148{
3149 struct workqueue_attrs *attrs;
3150
3151 attrs = kzalloc(sizeof(*attrs), gfp_mask);
3152 if (!attrs)
3153 goto fail;
3154 if (!alloc_cpumask_var(&attrs->cpumask, gfp_mask))
3155 goto fail;
3156
3157 cpumask_setall(attrs->cpumask);
3158 return attrs;
3159fail:
3160 free_workqueue_attrs(attrs);
3161 return NULL;
3162}
3163
3164/**
3165 * init_worker_pool - initialize a newly zalloc'd worker_pool
3166 * @pool: worker_pool to initialize
3167 *
3168 * Initiailize a newly zalloc'd @pool. It also allocates @pool->attrs.
3169 * Returns 0 on success, -errno on failure.
3170 */
3171static int init_worker_pool(struct worker_pool *pool)
3127{ 3172{
3128 spin_lock_init(&pool->lock); 3173 spin_lock_init(&pool->lock);
3129 pool->flags |= POOL_DISASSOCIATED; 3174 pool->flags |= POOL_DISASSOCIATED;
@@ -3141,6 +3186,11 @@ static void init_worker_pool(struct worker_pool *pool)
3141 mutex_init(&pool->manager_arb); 3186 mutex_init(&pool->manager_arb);
3142 mutex_init(&pool->assoc_mutex); 3187 mutex_init(&pool->assoc_mutex);
3143 ida_init(&pool->worker_ida); 3188 ida_init(&pool->worker_ida);
3189
3190 pool->attrs = alloc_workqueue_attrs(GFP_KERNEL);
3191 if (!pool->attrs)
3192 return -ENOMEM;
3193 return 0;
3144} 3194}
3145 3195
3146static int alloc_and_link_pwqs(struct workqueue_struct *wq) 3196static int alloc_and_link_pwqs(struct workqueue_struct *wq)
@@ -3792,7 +3842,8 @@ out_unlock:
3792 3842
3793static int __init init_workqueues(void) 3843static int __init init_workqueues(void)
3794{ 3844{
3795 int cpu; 3845 int std_nice[NR_STD_WORKER_POOLS] = { 0, HIGHPRI_NICE_LEVEL };
3846 int i, cpu;
3796 3847
3797 /* make sure we have enough bits for OFFQ pool ID */ 3848 /* make sure we have enough bits for OFFQ pool ID */
3798 BUILD_BUG_ON((1LU << (BITS_PER_LONG - WORK_OFFQ_POOL_SHIFT)) < 3849 BUILD_BUG_ON((1LU << (BITS_PER_LONG - WORK_OFFQ_POOL_SHIFT)) <
@@ -3809,10 +3860,18 @@ static int __init init_workqueues(void)
3809 for_each_wq_cpu(cpu) { 3860 for_each_wq_cpu(cpu) {
3810 struct worker_pool *pool; 3861 struct worker_pool *pool;
3811 3862
3863 i = 0;
3812 for_each_std_worker_pool(pool, cpu) { 3864 for_each_std_worker_pool(pool, cpu) {
3813 init_worker_pool(pool); 3865 BUG_ON(init_worker_pool(pool));
3814 pool->cpu = cpu; 3866 pool->cpu = cpu;
3815 3867
3868 if (cpu != WORK_CPU_UNBOUND)
3869 cpumask_copy(pool->attrs->cpumask, cpumask_of(cpu));
3870 else
3871 cpumask_setall(pool->attrs->cpumask);
3872
3873 pool->attrs->nice = std_nice[i++];
3874
3816 /* alloc pool ID */ 3875 /* alloc pool ID */
3817 BUG_ON(worker_pool_assign_id(pool)); 3876 BUG_ON(worker_pool_assign_id(pool));
3818 } 3877 }