diff options
Diffstat (limited to 'kernel/workqueue.c')
| -rw-r--r-- | kernel/workqueue.c | 50 |
1 files changed, 37 insertions, 13 deletions
diff --git a/kernel/workqueue.c b/kernel/workqueue.c index 987293d03ebc..c66912be990f 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c | |||
| @@ -305,6 +305,9 @@ static DEFINE_HASHTABLE(unbound_pool_hash, UNBOUND_POOL_HASH_ORDER); | |||
| 305 | /* I: attributes used when instantiating standard unbound pools on demand */ | 305 | /* I: attributes used when instantiating standard unbound pools on demand */ |
| 306 | static struct workqueue_attrs *unbound_std_wq_attrs[NR_STD_WORKER_POOLS]; | 306 | static struct workqueue_attrs *unbound_std_wq_attrs[NR_STD_WORKER_POOLS]; |
| 307 | 307 | ||
| 308 | /* I: attributes used when instantiating ordered pools on demand */ | ||
| 309 | static struct workqueue_attrs *ordered_wq_attrs[NR_STD_WORKER_POOLS]; | ||
| 310 | |||
| 308 | struct workqueue_struct *system_wq __read_mostly; | 311 | struct workqueue_struct *system_wq __read_mostly; |
| 309 | EXPORT_SYMBOL(system_wq); | 312 | EXPORT_SYMBOL(system_wq); |
| 310 | struct workqueue_struct *system_highpri_wq __read_mostly; | 313 | struct workqueue_struct *system_highpri_wq __read_mostly; |
| @@ -518,14 +521,21 @@ static inline void debug_work_activate(struct work_struct *work) { } | |||
| 518 | static inline void debug_work_deactivate(struct work_struct *work) { } | 521 | static inline void debug_work_deactivate(struct work_struct *work) { } |
| 519 | #endif | 522 | #endif |
| 520 | 523 | ||
| 521 | /* allocate ID and assign it to @pool */ | 524 | /** |
| 525 | * worker_pool_assign_id - allocate ID and assing it to @pool | ||
| 526 | * @pool: the pool pointer of interest | ||
| 527 | * | ||
| 528 | * Returns 0 if ID in [0, WORK_OFFQ_POOL_NONE) is allocated and assigned | ||
| 529 | * successfully, -errno on failure. | ||
| 530 | */ | ||
| 522 | static int worker_pool_assign_id(struct worker_pool *pool) | 531 | static int worker_pool_assign_id(struct worker_pool *pool) |
| 523 | { | 532 | { |
| 524 | int ret; | 533 | int ret; |
| 525 | 534 | ||
| 526 | lockdep_assert_held(&wq_pool_mutex); | 535 | lockdep_assert_held(&wq_pool_mutex); |
| 527 | 536 | ||
| 528 | ret = idr_alloc(&worker_pool_idr, pool, 0, 0, GFP_KERNEL); | 537 | ret = idr_alloc(&worker_pool_idr, pool, 0, WORK_OFFQ_POOL_NONE, |
| 538 | GFP_KERNEL); | ||
| 529 | if (ret >= 0) { | 539 | if (ret >= 0) { |
| 530 | pool->id = ret; | 540 | pool->id = ret; |
| 531 | return 0; | 541 | return 0; |
| @@ -1320,7 +1330,7 @@ static void __queue_work(int cpu, struct workqueue_struct *wq, | |||
| 1320 | 1330 | ||
| 1321 | debug_work_activate(work); | 1331 | debug_work_activate(work); |
| 1322 | 1332 | ||
| 1323 | /* if dying, only works from the same workqueue are allowed */ | 1333 | /* if draining, only works from the same workqueue are allowed */ |
| 1324 | if (unlikely(wq->flags & __WQ_DRAINING) && | 1334 | if (unlikely(wq->flags & __WQ_DRAINING) && |
| 1325 | WARN_ON_ONCE(!is_chained_work(wq))) | 1335 | WARN_ON_ONCE(!is_chained_work(wq))) |
| 1326 | return; | 1336 | return; |
| @@ -1736,16 +1746,17 @@ static struct worker *create_worker(struct worker_pool *pool) | |||
| 1736 | if (IS_ERR(worker->task)) | 1746 | if (IS_ERR(worker->task)) |
| 1737 | goto fail; | 1747 | goto fail; |
| 1738 | 1748 | ||
| 1749 | set_user_nice(worker->task, pool->attrs->nice); | ||
| 1750 | |||
| 1751 | /* prevent userland from meddling with cpumask of workqueue workers */ | ||
| 1752 | worker->task->flags |= PF_NO_SETAFFINITY; | ||
| 1753 | |||
| 1739 | /* | 1754 | /* |
| 1740 | * set_cpus_allowed_ptr() will fail if the cpumask doesn't have any | 1755 | * set_cpus_allowed_ptr() will fail if the cpumask doesn't have any |
| 1741 | * online CPUs. It'll be re-applied when any of the CPUs come up. | 1756 | * online CPUs. It'll be re-applied when any of the CPUs come up. |
| 1742 | */ | 1757 | */ |
| 1743 | set_user_nice(worker->task, pool->attrs->nice); | ||
| 1744 | set_cpus_allowed_ptr(worker->task, pool->attrs->cpumask); | 1758 | set_cpus_allowed_ptr(worker->task, pool->attrs->cpumask); |
| 1745 | 1759 | ||
| 1746 | /* prevent userland from meddling with cpumask of workqueue workers */ | ||
| 1747 | worker->task->flags |= PF_NO_SETAFFINITY; | ||
| 1748 | |||
| 1749 | /* | 1760 | /* |
| 1750 | * The caller is responsible for ensuring %POOL_DISASSOCIATED | 1761 | * The caller is responsible for ensuring %POOL_DISASSOCIATED |
| 1751 | * remains stable across this function. See the comments above the | 1762 | * remains stable across this function. See the comments above the |
| @@ -4106,7 +4117,7 @@ out_unlock: | |||
| 4106 | static int alloc_and_link_pwqs(struct workqueue_struct *wq) | 4117 | static int alloc_and_link_pwqs(struct workqueue_struct *wq) |
| 4107 | { | 4118 | { |
| 4108 | bool highpri = wq->flags & WQ_HIGHPRI; | 4119 | bool highpri = wq->flags & WQ_HIGHPRI; |
| 4109 | int cpu; | 4120 | int cpu, ret; |
| 4110 | 4121 | ||
| 4111 | if (!(wq->flags & WQ_UNBOUND)) { | 4122 | if (!(wq->flags & WQ_UNBOUND)) { |
| 4112 | wq->cpu_pwqs = alloc_percpu(struct pool_workqueue); | 4123 | wq->cpu_pwqs = alloc_percpu(struct pool_workqueue); |
| @@ -4126,6 +4137,13 @@ static int alloc_and_link_pwqs(struct workqueue_struct *wq) | |||
| 4126 | mutex_unlock(&wq->mutex); | 4137 | mutex_unlock(&wq->mutex); |
| 4127 | } | 4138 | } |
| 4128 | return 0; | 4139 | return 0; |
| 4140 | } else if (wq->flags & __WQ_ORDERED) { | ||
| 4141 | ret = apply_workqueue_attrs(wq, ordered_wq_attrs[highpri]); | ||
| 4142 | /* there should only be single pwq for ordering guarantee */ | ||
| 4143 | WARN(!ret && (wq->pwqs.next != &wq->dfl_pwq->pwqs_node || | ||
| 4144 | wq->pwqs.prev != &wq->dfl_pwq->pwqs_node), | ||
| 4145 | "ordering guarantee broken for workqueue %s\n", wq->name); | ||
| 4146 | return ret; | ||
| 4129 | } else { | 4147 | } else { |
| 4130 | return apply_workqueue_attrs(wq, unbound_std_wq_attrs[highpri]); | 4148 | return apply_workqueue_attrs(wq, unbound_std_wq_attrs[highpri]); |
| 4131 | } | 4149 | } |
| @@ -5009,10 +5027,6 @@ static int __init init_workqueues(void) | |||
| 5009 | int std_nice[NR_STD_WORKER_POOLS] = { 0, HIGHPRI_NICE_LEVEL }; | 5027 | int std_nice[NR_STD_WORKER_POOLS] = { 0, HIGHPRI_NICE_LEVEL }; |
| 5010 | int i, cpu; | 5028 | int i, cpu; |
| 5011 | 5029 | ||
| 5012 | /* make sure we have enough bits for OFFQ pool ID */ | ||
| 5013 | BUILD_BUG_ON((1LU << (BITS_PER_LONG - WORK_OFFQ_POOL_SHIFT)) < | ||
| 5014 | WORK_CPU_END * NR_STD_WORKER_POOLS); | ||
| 5015 | |||
| 5016 | WARN_ON(__alignof__(struct pool_workqueue) < __alignof__(long long)); | 5030 | WARN_ON(__alignof__(struct pool_workqueue) < __alignof__(long long)); |
| 5017 | 5031 | ||
| 5018 | pwq_cache = KMEM_CACHE(pool_workqueue, SLAB_PANIC); | 5032 | pwq_cache = KMEM_CACHE(pool_workqueue, SLAB_PANIC); |
| @@ -5051,13 +5065,23 @@ static int __init init_workqueues(void) | |||
| 5051 | } | 5065 | } |
| 5052 | } | 5066 | } |
| 5053 | 5067 | ||
| 5054 | /* create default unbound wq attrs */ | 5068 | /* create default unbound and ordered wq attrs */ |
| 5055 | for (i = 0; i < NR_STD_WORKER_POOLS; i++) { | 5069 | for (i = 0; i < NR_STD_WORKER_POOLS; i++) { |
| 5056 | struct workqueue_attrs *attrs; | 5070 | struct workqueue_attrs *attrs; |
| 5057 | 5071 | ||
| 5058 | BUG_ON(!(attrs = alloc_workqueue_attrs(GFP_KERNEL))); | 5072 | BUG_ON(!(attrs = alloc_workqueue_attrs(GFP_KERNEL))); |
| 5059 | attrs->nice = std_nice[i]; | 5073 | attrs->nice = std_nice[i]; |
| 5060 | unbound_std_wq_attrs[i] = attrs; | 5074 | unbound_std_wq_attrs[i] = attrs; |
| 5075 | |||
| 5076 | /* | ||
| 5077 | * An ordered wq should have only one pwq as ordering is | ||
| 5078 | * guaranteed by max_active which is enforced by pwqs. | ||
| 5079 | * Turn off NUMA so that dfl_pwq is used for all nodes. | ||
| 5080 | */ | ||
| 5081 | BUG_ON(!(attrs = alloc_workqueue_attrs(GFP_KERNEL))); | ||
| 5082 | attrs->nice = std_nice[i]; | ||
| 5083 | attrs->no_numa = true; | ||
| 5084 | ordered_wq_attrs[i] = attrs; | ||
| 5061 | } | 5085 | } |
| 5062 | 5086 | ||
| 5063 | system_wq = alloc_workqueue("events", 0, 0); | 5087 | system_wq = alloc_workqueue("events", 0, 0); |
