diff options
author | Tejun Heo <tj@kernel.org> | 2013-03-12 14:29:59 -0400 |
---|---|---|
committer | Tejun Heo <tj@kernel.org> | 2013-03-12 14:29:59 -0400 |
commit | d84ff0512f1bfc0d8c864efadb4523fce68919cc (patch) | |
tree | b91fe48e9bd59e0709b00869cd200c79f882afff /kernel/workqueue.c | |
parent | 493a1724fef9a3e931d9199f1a19e358e526a6e7 (diff) |
workqueue: consistently use int for @cpu variables
Workqueue is mixing unsigned int and int for @cpu variables. There's
no point in using unsigned int for cpus - many of cpu related APIs
take int anyway. Consistently use int for @cpu variables so that we
can use negative values to mark special ones.
This patch doesn't introduce any visible behavior changes.
Signed-off-by: Tejun Heo <tj@kernel.org>
Reviewed-by: Lai Jiangshan <laijs@cn.fujitsu.com>
Diffstat (limited to 'kernel/workqueue.c')
-rw-r--r-- | kernel/workqueue.c | 24 |
1 files changed, 11 insertions, 13 deletions
diff --git a/kernel/workqueue.c b/kernel/workqueue.c index 26c67c76b6c5..73c5f68065b5 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c | |||
@@ -124,7 +124,7 @@ enum { | |||
124 | 124 | ||
125 | struct worker_pool { | 125 | struct worker_pool { |
126 | spinlock_t lock; /* the pool lock */ | 126 | spinlock_t lock; /* the pool lock */ |
127 | unsigned int cpu; /* I: the associated cpu */ | 127 | int cpu; /* I: the associated cpu */ |
128 | int id; /* I: pool ID */ | 128 | int id; /* I: pool ID */ |
129 | unsigned int flags; /* X: flags */ | 129 | unsigned int flags; /* X: flags */ |
130 | 130 | ||
@@ -467,8 +467,7 @@ static struct worker_pool *get_std_worker_pool(int cpu, bool highpri) | |||
467 | return &pools[highpri]; | 467 | return &pools[highpri]; |
468 | } | 468 | } |
469 | 469 | ||
470 | static struct pool_workqueue *get_pwq(unsigned int cpu, | 470 | static struct pool_workqueue *get_pwq(int cpu, struct workqueue_struct *wq) |
471 | struct workqueue_struct *wq) | ||
472 | { | 471 | { |
473 | if (!(wq->flags & WQ_UNBOUND)) { | 472 | if (!(wq->flags & WQ_UNBOUND)) { |
474 | if (likely(cpu < nr_cpu_ids)) | 473 | if (likely(cpu < nr_cpu_ids)) |
@@ -730,7 +729,7 @@ static void wake_up_worker(struct worker_pool *pool) | |||
730 | * CONTEXT: | 729 | * CONTEXT: |
731 | * spin_lock_irq(rq->lock) | 730 | * spin_lock_irq(rq->lock) |
732 | */ | 731 | */ |
733 | void wq_worker_waking_up(struct task_struct *task, unsigned int cpu) | 732 | void wq_worker_waking_up(struct task_struct *task, int cpu) |
734 | { | 733 | { |
735 | struct worker *worker = kthread_data(task); | 734 | struct worker *worker = kthread_data(task); |
736 | 735 | ||
@@ -755,8 +754,7 @@ void wq_worker_waking_up(struct task_struct *task, unsigned int cpu) | |||
755 | * RETURNS: | 754 | * RETURNS: |
756 | * Worker task on @cpu to wake up, %NULL if none. | 755 | * Worker task on @cpu to wake up, %NULL if none. |
757 | */ | 756 | */ |
758 | struct task_struct *wq_worker_sleeping(struct task_struct *task, | 757 | struct task_struct *wq_worker_sleeping(struct task_struct *task, int cpu) |
759 | unsigned int cpu) | ||
760 | { | 758 | { |
761 | struct worker *worker = kthread_data(task), *to_wakeup = NULL; | 759 | struct worker *worker = kthread_data(task), *to_wakeup = NULL; |
762 | struct worker_pool *pool; | 760 | struct worker_pool *pool; |
@@ -1159,7 +1157,7 @@ static bool is_chained_work(struct workqueue_struct *wq) | |||
1159 | return worker && worker->current_pwq->wq == wq; | 1157 | return worker && worker->current_pwq->wq == wq; |
1160 | } | 1158 | } |
1161 | 1159 | ||
1162 | static void __queue_work(unsigned int cpu, struct workqueue_struct *wq, | 1160 | static void __queue_work(int cpu, struct workqueue_struct *wq, |
1163 | struct work_struct *work) | 1161 | struct work_struct *work) |
1164 | { | 1162 | { |
1165 | struct pool_workqueue *pwq; | 1163 | struct pool_workqueue *pwq; |
@@ -1714,7 +1712,7 @@ static struct worker *create_worker(struct worker_pool *pool) | |||
1714 | if (pool->cpu != WORK_CPU_UNBOUND) | 1712 | if (pool->cpu != WORK_CPU_UNBOUND) |
1715 | worker->task = kthread_create_on_node(worker_thread, | 1713 | worker->task = kthread_create_on_node(worker_thread, |
1716 | worker, cpu_to_node(pool->cpu), | 1714 | worker, cpu_to_node(pool->cpu), |
1717 | "kworker/%u:%d%s", pool->cpu, id, pri); | 1715 | "kworker/%d:%d%s", pool->cpu, id, pri); |
1718 | else | 1716 | else |
1719 | worker->task = kthread_create(worker_thread, worker, | 1717 | worker->task = kthread_create(worker_thread, worker, |
1720 | "kworker/u:%d%s", id, pri); | 1718 | "kworker/u:%d%s", id, pri); |
@@ -3345,7 +3343,7 @@ EXPORT_SYMBOL_GPL(workqueue_set_max_active); | |||
3345 | * RETURNS: | 3343 | * RETURNS: |
3346 | * %true if congested, %false otherwise. | 3344 | * %true if congested, %false otherwise. |
3347 | */ | 3345 | */ |
3348 | bool workqueue_congested(unsigned int cpu, struct workqueue_struct *wq) | 3346 | bool workqueue_congested(int cpu, struct workqueue_struct *wq) |
3349 | { | 3347 | { |
3350 | struct pool_workqueue *pwq = get_pwq(cpu, wq); | 3348 | struct pool_workqueue *pwq = get_pwq(cpu, wq); |
3351 | 3349 | ||
@@ -3461,7 +3459,7 @@ static int __cpuinit workqueue_cpu_up_callback(struct notifier_block *nfb, | |||
3461 | unsigned long action, | 3459 | unsigned long action, |
3462 | void *hcpu) | 3460 | void *hcpu) |
3463 | { | 3461 | { |
3464 | unsigned int cpu = (unsigned long)hcpu; | 3462 | int cpu = (unsigned long)hcpu; |
3465 | struct worker_pool *pool; | 3463 | struct worker_pool *pool; |
3466 | 3464 | ||
3467 | switch (action & ~CPU_TASKS_FROZEN) { | 3465 | switch (action & ~CPU_TASKS_FROZEN) { |
@@ -3507,7 +3505,7 @@ static int __cpuinit workqueue_cpu_down_callback(struct notifier_block *nfb, | |||
3507 | unsigned long action, | 3505 | unsigned long action, |
3508 | void *hcpu) | 3506 | void *hcpu) |
3509 | { | 3507 | { |
3510 | unsigned int cpu = (unsigned long)hcpu; | 3508 | int cpu = (unsigned long)hcpu; |
3511 | struct work_struct unbind_work; | 3509 | struct work_struct unbind_work; |
3512 | 3510 | ||
3513 | switch (action & ~CPU_TASKS_FROZEN) { | 3511 | switch (action & ~CPU_TASKS_FROZEN) { |
@@ -3547,7 +3545,7 @@ static void work_for_cpu_fn(struct work_struct *work) | |||
3547 | * It is up to the caller to ensure that the cpu doesn't go offline. | 3545 | * It is up to the caller to ensure that the cpu doesn't go offline. |
3548 | * The caller must not hold any locks which would prevent @fn from completing. | 3546 | * The caller must not hold any locks which would prevent @fn from completing. |
3549 | */ | 3547 | */ |
3550 | long work_on_cpu(unsigned int cpu, long (*fn)(void *), void *arg) | 3548 | long work_on_cpu(int cpu, long (*fn)(void *), void *arg) |
3551 | { | 3549 | { |
3552 | struct work_for_cpu wfc = { .fn = fn, .arg = arg }; | 3550 | struct work_for_cpu wfc = { .fn = fn, .arg = arg }; |
3553 | 3551 | ||
@@ -3705,7 +3703,7 @@ out_unlock: | |||
3705 | 3703 | ||
3706 | static int __init init_workqueues(void) | 3704 | static int __init init_workqueues(void) |
3707 | { | 3705 | { |
3708 | unsigned int cpu; | 3706 | int cpu; |
3709 | 3707 | ||
3710 | /* make sure we have enough bits for OFFQ pool ID */ | 3708 | /* make sure we have enough bits for OFFQ pool ID */ |
3711 | BUILD_BUG_ON((1LU << (BITS_PER_LONG - WORK_OFFQ_POOL_SHIFT)) < | 3709 | BUILD_BUG_ON((1LU << (BITS_PER_LONG - WORK_OFFQ_POOL_SHIFT)) < |