diff options
Diffstat (limited to 'kernel/workqueue.c')
-rw-r--r-- | kernel/workqueue.c | 53 |
1 files changed, 22 insertions, 31 deletions
diff --git a/kernel/workqueue.c b/kernel/workqueue.c index 8634fc9d52d2..2db1532b09dc 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c | |||
@@ -273,12 +273,6 @@ static inline int __next_wq_cpu(int cpu, const struct cpumask *mask, | |||
273 | return WORK_CPU_END; | 273 | return WORK_CPU_END; |
274 | } | 274 | } |
275 | 275 | ||
276 | static inline int __next_pwq_cpu(int cpu, const struct cpumask *mask, | ||
277 | struct workqueue_struct *wq) | ||
278 | { | ||
279 | return __next_wq_cpu(cpu, mask, !(wq->flags & WQ_UNBOUND) ? 1 : 2); | ||
280 | } | ||
281 | |||
282 | /* | 276 | /* |
283 | * CPU iterators | 277 | * CPU iterators |
284 | * | 278 | * |
@@ -289,8 +283,6 @@ static inline int __next_pwq_cpu(int cpu, const struct cpumask *mask, | |||
289 | * | 283 | * |
290 | * for_each_wq_cpu() : possible CPUs + WORK_CPU_UNBOUND | 284 | * for_each_wq_cpu() : possible CPUs + WORK_CPU_UNBOUND |
291 | * for_each_online_wq_cpu() : online CPUs + WORK_CPU_UNBOUND | 285 | * for_each_online_wq_cpu() : online CPUs + WORK_CPU_UNBOUND |
292 | * for_each_pwq_cpu() : possible CPUs for bound workqueues, | ||
293 | * WORK_CPU_UNBOUND for unbound workqueues | ||
294 | */ | 286 | */ |
295 | #define for_each_wq_cpu(cpu) \ | 287 | #define for_each_wq_cpu(cpu) \ |
296 | for ((cpu) = __next_wq_cpu(-1, cpu_possible_mask, 3); \ | 288 | for ((cpu) = __next_wq_cpu(-1, cpu_possible_mask, 3); \ |
@@ -302,10 +294,13 @@ static inline int __next_pwq_cpu(int cpu, const struct cpumask *mask, | |||
302 | (cpu) < WORK_CPU_END; \ | 294 | (cpu) < WORK_CPU_END; \ |
303 | (cpu) = __next_wq_cpu((cpu), cpu_online_mask, 3)) | 295 | (cpu) = __next_wq_cpu((cpu), cpu_online_mask, 3)) |
304 | 296 | ||
305 | #define for_each_pwq_cpu(cpu, wq) \ | 297 | /** |
306 | for ((cpu) = __next_pwq_cpu(-1, cpu_possible_mask, (wq)); \ | 298 | * for_each_pwq - iterate through all pool_workqueues of the specified workqueue |
307 | (cpu) < WORK_CPU_END; \ | 299 | * @pwq: iteration cursor |
308 | (cpu) = __next_pwq_cpu((cpu), cpu_possible_mask, (wq))) | 300 | * @wq: the target workqueue |
301 | */ | ||
302 | #define for_each_pwq(pwq, wq) \ | ||
303 | list_for_each_entry((pwq), &(wq)->pwqs, pwqs_node) | ||
309 | 304 | ||
310 | #ifdef CONFIG_DEBUG_OBJECTS_WORK | 305 | #ifdef CONFIG_DEBUG_OBJECTS_WORK |
311 | 306 | ||
@@ -2505,15 +2500,14 @@ static bool flush_workqueue_prep_pwqs(struct workqueue_struct *wq, | |||
2505 | int flush_color, int work_color) | 2500 | int flush_color, int work_color) |
2506 | { | 2501 | { |
2507 | bool wait = false; | 2502 | bool wait = false; |
2508 | unsigned int cpu; | 2503 | struct pool_workqueue *pwq; |
2509 | 2504 | ||
2510 | if (flush_color >= 0) { | 2505 | if (flush_color >= 0) { |
2511 | WARN_ON_ONCE(atomic_read(&wq->nr_pwqs_to_flush)); | 2506 | WARN_ON_ONCE(atomic_read(&wq->nr_pwqs_to_flush)); |
2512 | atomic_set(&wq->nr_pwqs_to_flush, 1); | 2507 | atomic_set(&wq->nr_pwqs_to_flush, 1); |
2513 | } | 2508 | } |
2514 | 2509 | ||
2515 | for_each_pwq_cpu(cpu, wq) { | 2510 | for_each_pwq(pwq, wq) { |
2516 | struct pool_workqueue *pwq = get_pwq(cpu, wq); | ||
2517 | struct worker_pool *pool = pwq->pool; | 2511 | struct worker_pool *pool = pwq->pool; |
2518 | 2512 | ||
2519 | spin_lock_irq(&pool->lock); | 2513 | spin_lock_irq(&pool->lock); |
@@ -2712,7 +2706,7 @@ EXPORT_SYMBOL_GPL(flush_workqueue); | |||
2712 | void drain_workqueue(struct workqueue_struct *wq) | 2706 | void drain_workqueue(struct workqueue_struct *wq) |
2713 | { | 2707 | { |
2714 | unsigned int flush_cnt = 0; | 2708 | unsigned int flush_cnt = 0; |
2715 | unsigned int cpu; | 2709 | struct pool_workqueue *pwq; |
2716 | 2710 | ||
2717 | /* | 2711 | /* |
2718 | * __queue_work() needs to test whether there are drainers, is much | 2712 | * __queue_work() needs to test whether there are drainers, is much |
@@ -2726,8 +2720,7 @@ void drain_workqueue(struct workqueue_struct *wq) | |||
2726 | reflush: | 2720 | reflush: |
2727 | flush_workqueue(wq); | 2721 | flush_workqueue(wq); |
2728 | 2722 | ||
2729 | for_each_pwq_cpu(cpu, wq) { | 2723 | for_each_pwq(pwq, wq) { |
2730 | struct pool_workqueue *pwq = get_pwq(cpu, wq); | ||
2731 | bool drained; | 2724 | bool drained; |
2732 | 2725 | ||
2733 | spin_lock_irq(&pwq->pool->lock); | 2726 | spin_lock_irq(&pwq->pool->lock); |
@@ -3100,6 +3093,7 @@ int keventd_up(void) | |||
3100 | 3093 | ||
3101 | static int alloc_and_link_pwqs(struct workqueue_struct *wq) | 3094 | static int alloc_and_link_pwqs(struct workqueue_struct *wq) |
3102 | { | 3095 | { |
3096 | bool highpri = wq->flags & WQ_HIGHPRI; | ||
3103 | int cpu; | 3097 | int cpu; |
3104 | 3098 | ||
3105 | if (!(wq->flags & WQ_UNBOUND)) { | 3099 | if (!(wq->flags & WQ_UNBOUND)) { |
@@ -3110,6 +3104,7 @@ static int alloc_and_link_pwqs(struct workqueue_struct *wq) | |||
3110 | for_each_possible_cpu(cpu) { | 3104 | for_each_possible_cpu(cpu) { |
3111 | struct pool_workqueue *pwq = get_pwq(cpu, wq); | 3105 | struct pool_workqueue *pwq = get_pwq(cpu, wq); |
3112 | 3106 | ||
3107 | pwq->pool = get_std_worker_pool(cpu, highpri); | ||
3113 | list_add_tail(&pwq->pwqs_node, &wq->pwqs); | 3108 | list_add_tail(&pwq->pwqs_node, &wq->pwqs); |
3114 | } | 3109 | } |
3115 | } else { | 3110 | } else { |
@@ -3120,6 +3115,7 @@ static int alloc_and_link_pwqs(struct workqueue_struct *wq) | |||
3120 | return -ENOMEM; | 3115 | return -ENOMEM; |
3121 | 3116 | ||
3122 | wq->pool_wq.single = pwq; | 3117 | wq->pool_wq.single = pwq; |
3118 | pwq->pool = get_std_worker_pool(WORK_CPU_UNBOUND, highpri); | ||
3123 | list_add_tail(&pwq->pwqs_node, &wq->pwqs); | 3119 | list_add_tail(&pwq->pwqs_node, &wq->pwqs); |
3124 | } | 3120 | } |
3125 | 3121 | ||
@@ -3154,7 +3150,7 @@ struct workqueue_struct *__alloc_workqueue_key(const char *fmt, | |||
3154 | { | 3150 | { |
3155 | va_list args, args1; | 3151 | va_list args, args1; |
3156 | struct workqueue_struct *wq; | 3152 | struct workqueue_struct *wq; |
3157 | unsigned int cpu; | 3153 | struct pool_workqueue *pwq; |
3158 | size_t namelen; | 3154 | size_t namelen; |
3159 | 3155 | ||
3160 | /* determine namelen, allocate wq and format name */ | 3156 | /* determine namelen, allocate wq and format name */ |
@@ -3195,11 +3191,8 @@ struct workqueue_struct *__alloc_workqueue_key(const char *fmt, | |||
3195 | if (alloc_and_link_pwqs(wq) < 0) | 3191 | if (alloc_and_link_pwqs(wq) < 0) |
3196 | goto err; | 3192 | goto err; |
3197 | 3193 | ||
3198 | for_each_pwq_cpu(cpu, wq) { | 3194 | for_each_pwq(pwq, wq) { |
3199 | struct pool_workqueue *pwq = get_pwq(cpu, wq); | ||
3200 | |||
3201 | BUG_ON((unsigned long)pwq & WORK_STRUCT_FLAG_MASK); | 3195 | BUG_ON((unsigned long)pwq & WORK_STRUCT_FLAG_MASK); |
3202 | pwq->pool = get_std_worker_pool(cpu, flags & WQ_HIGHPRI); | ||
3203 | pwq->wq = wq; | 3196 | pwq->wq = wq; |
3204 | pwq->flush_color = -1; | 3197 | pwq->flush_color = -1; |
3205 | pwq->max_active = max_active; | 3198 | pwq->max_active = max_active; |
@@ -3234,8 +3227,8 @@ struct workqueue_struct *__alloc_workqueue_key(const char *fmt, | |||
3234 | spin_lock_irq(&workqueue_lock); | 3227 | spin_lock_irq(&workqueue_lock); |
3235 | 3228 | ||
3236 | if (workqueue_freezing && wq->flags & WQ_FREEZABLE) | 3229 | if (workqueue_freezing && wq->flags & WQ_FREEZABLE) |
3237 | for_each_pwq_cpu(cpu, wq) | 3230 | for_each_pwq(pwq, wq) |
3238 | get_pwq(cpu, wq)->max_active = 0; | 3231 | pwq->max_active = 0; |
3239 | 3232 | ||
3240 | list_add(&wq->list, &workqueues); | 3233 | list_add(&wq->list, &workqueues); |
3241 | 3234 | ||
@@ -3261,14 +3254,13 @@ EXPORT_SYMBOL_GPL(__alloc_workqueue_key); | |||
3261 | */ | 3254 | */ |
3262 | void destroy_workqueue(struct workqueue_struct *wq) | 3255 | void destroy_workqueue(struct workqueue_struct *wq) |
3263 | { | 3256 | { |
3264 | unsigned int cpu; | 3257 | struct pool_workqueue *pwq; |
3265 | 3258 | ||
3266 | /* drain it before proceeding with destruction */ | 3259 | /* drain it before proceeding with destruction */ |
3267 | drain_workqueue(wq); | 3260 | drain_workqueue(wq); |
3268 | 3261 | ||
3269 | /* sanity checks */ | 3262 | /* sanity checks */ |
3270 | for_each_pwq_cpu(cpu, wq) { | 3263 | for_each_pwq(pwq, wq) { |
3271 | struct pool_workqueue *pwq = get_pwq(cpu, wq); | ||
3272 | int i; | 3264 | int i; |
3273 | 3265 | ||
3274 | for (i = 0; i < WORK_NR_COLORS; i++) | 3266 | for (i = 0; i < WORK_NR_COLORS; i++) |
@@ -3330,7 +3322,7 @@ static void pwq_set_max_active(struct pool_workqueue *pwq, int max_active) | |||
3330 | */ | 3322 | */ |
3331 | void workqueue_set_max_active(struct workqueue_struct *wq, int max_active) | 3323 | void workqueue_set_max_active(struct workqueue_struct *wq, int max_active) |
3332 | { | 3324 | { |
3333 | unsigned int cpu; | 3325 | struct pool_workqueue *pwq; |
3334 | 3326 | ||
3335 | max_active = wq_clamp_max_active(max_active, wq->flags, wq->name); | 3327 | max_active = wq_clamp_max_active(max_active, wq->flags, wq->name); |
3336 | 3328 | ||
@@ -3338,8 +3330,7 @@ void workqueue_set_max_active(struct workqueue_struct *wq, int max_active) | |||
3338 | 3330 | ||
3339 | wq->saved_max_active = max_active; | 3331 | wq->saved_max_active = max_active; |
3340 | 3332 | ||
3341 | for_each_pwq_cpu(cpu, wq) { | 3333 | for_each_pwq(pwq, wq) { |
3342 | struct pool_workqueue *pwq = get_pwq(cpu, wq); | ||
3343 | struct worker_pool *pool = pwq->pool; | 3334 | struct worker_pool *pool = pwq->pool; |
3344 | 3335 | ||
3345 | spin_lock(&pool->lock); | 3336 | spin_lock(&pool->lock); |