aboutsummaryrefslogtreecommitdiffstats
path: root/block
diff options
context:
space:
mode:
authorTejun Heo <tj@kernel.org>2009-06-24 02:13:48 -0400
committerTejun Heo <tj@kernel.org>2009-06-24 02:13:48 -0400
commit245b2e70eabd797932adb263a65da0bab3711753 (patch)
tree30f0b790dadd2b70bf06e534abcf66a76e97b05a /block
parentb9bf3121af348d9255f1c917830fe8c2df52efcb (diff)
percpu: clean up percpu variable definitions
Percpu variable definition is about to be updated such that all percpu symbols including the static ones must be unique. Update percpu variable definitions accordingly. * as,cfq: rename ioc_count uniquely * cpufreq: rename cpu_dbs_info uniquely * xen: move nesting_count out of xen_evtchn_do_upcall() and rename it * mm: move ratelimits out of balance_dirty_pages_ratelimited_nr() and rename it * ipv4,6: rename cookie_scratch uniquely * x86 perf_counter: rename prev_left to pmc_prev_left, irq_entry to pmc_irq_entry and nmi_entry to pmc_nmi_entry * perf_counter: rename disable_count to perf_disable_count * ftrace: rename test_event_disable to ftrace_test_event_disable * kmemleak: rename test_pointer to kmemleak_test_pointer * mce: rename next_interval to mce_next_interval [ Impact: percpu usage cleanups, no duplicate static percpu var names ] Signed-off-by: Tejun Heo <tj@kernel.org> Reviewed-by: Christoph Lameter <cl@linux-foundation.org> Cc: Ivan Kokshaysky <ink@jurassic.park.msu.ru> Cc: Jens Axboe <jens.axboe@oracle.com> Cc: Dave Jones <davej@redhat.com> Cc: Jeremy Fitzhardinge <jeremy@xensource.com> Cc: linux-mm <linux-mm@kvack.org> Cc: David S. Miller <davem@davemloft.net> Cc: Peter Zijlstra <a.p.zijlstra@chello.nl> Cc: Steven Rostedt <srostedt@redhat.com> Cc: Li Zefan <lizf@cn.fujitsu.com> Cc: Catalin Marinas <catalin.marinas@arm.com> Cc: Andi Kleen <andi@firstfloor.org>
Diffstat (limited to 'block')
-rw-r--r--block/as-iosched.c10
-rw-r--r--block/cfq-iosched.c10
2 files changed, 10 insertions, 10 deletions
diff --git a/block/as-iosched.c b/block/as-iosched.c
index 7a12cf6ee1d3..ce8ba57c6557 100644
--- a/block/as-iosched.c
+++ b/block/as-iosched.c
@@ -146,7 +146,7 @@ enum arq_state {
146#define RQ_STATE(rq) ((enum arq_state)(rq)->elevator_private2) 146#define RQ_STATE(rq) ((enum arq_state)(rq)->elevator_private2)
147#define RQ_SET_STATE(rq, state) ((rq)->elevator_private2 = (void *) state) 147#define RQ_SET_STATE(rq, state) ((rq)->elevator_private2 = (void *) state)
148 148
149static DEFINE_PER_CPU(unsigned long, ioc_count); 149static DEFINE_PER_CPU(unsigned long, as_ioc_count);
150static struct completion *ioc_gone; 150static struct completion *ioc_gone;
151static DEFINE_SPINLOCK(ioc_gone_lock); 151static DEFINE_SPINLOCK(ioc_gone_lock);
152 152
@@ -161,7 +161,7 @@ static void as_antic_stop(struct as_data *ad);
161static void free_as_io_context(struct as_io_context *aic) 161static void free_as_io_context(struct as_io_context *aic)
162{ 162{
163 kfree(aic); 163 kfree(aic);
164 elv_ioc_count_dec(ioc_count); 164 elv_ioc_count_dec(as_ioc_count);
165 if (ioc_gone) { 165 if (ioc_gone) {
166 /* 166 /*
167 * AS scheduler is exiting, grab exit lock and check 167 * AS scheduler is exiting, grab exit lock and check
@@ -169,7 +169,7 @@ static void free_as_io_context(struct as_io_context *aic)
169 * complete ioc_gone and set it back to NULL. 169 * complete ioc_gone and set it back to NULL.
170 */ 170 */
171 spin_lock(&ioc_gone_lock); 171 spin_lock(&ioc_gone_lock);
172 if (ioc_gone && !elv_ioc_count_read(ioc_count)) { 172 if (ioc_gone && !elv_ioc_count_read(as_ioc_count)) {
173 complete(ioc_gone); 173 complete(ioc_gone);
174 ioc_gone = NULL; 174 ioc_gone = NULL;
175 } 175 }
@@ -211,7 +211,7 @@ static struct as_io_context *alloc_as_io_context(void)
211 ret->seek_total = 0; 211 ret->seek_total = 0;
212 ret->seek_samples = 0; 212 ret->seek_samples = 0;
213 ret->seek_mean = 0; 213 ret->seek_mean = 0;
214 elv_ioc_count_inc(ioc_count); 214 elv_ioc_count_inc(as_ioc_count);
215 } 215 }
216 216
217 return ret; 217 return ret;
@@ -1507,7 +1507,7 @@ static void __exit as_exit(void)
1507 ioc_gone = &all_gone; 1507 ioc_gone = &all_gone;
1508 /* ioc_gone's update must be visible before reading ioc_count */ 1508 /* ioc_gone's update must be visible before reading ioc_count */
1509 smp_wmb(); 1509 smp_wmb();
1510 if (elv_ioc_count_read(ioc_count)) 1510 if (elv_ioc_count_read(as_ioc_count))
1511 wait_for_completion(&all_gone); 1511 wait_for_completion(&all_gone);
1512 synchronize_rcu(); 1512 synchronize_rcu();
1513} 1513}
diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c
index 833ec18eaa63..0f1cc7d3855e 100644
--- a/block/cfq-iosched.c
+++ b/block/cfq-iosched.c
@@ -48,7 +48,7 @@ static int cfq_slice_idle = HZ / 125;
48static struct kmem_cache *cfq_pool; 48static struct kmem_cache *cfq_pool;
49static struct kmem_cache *cfq_ioc_pool; 49static struct kmem_cache *cfq_ioc_pool;
50 50
51static DEFINE_PER_CPU(unsigned long, ioc_count); 51static DEFINE_PER_CPU(unsigned long, cfq_ioc_count);
52static struct completion *ioc_gone; 52static struct completion *ioc_gone;
53static DEFINE_SPINLOCK(ioc_gone_lock); 53static DEFINE_SPINLOCK(ioc_gone_lock);
54 54
@@ -1422,7 +1422,7 @@ static void cfq_cic_free_rcu(struct rcu_head *head)
1422 cic = container_of(head, struct cfq_io_context, rcu_head); 1422 cic = container_of(head, struct cfq_io_context, rcu_head);
1423 1423
1424 kmem_cache_free(cfq_ioc_pool, cic); 1424 kmem_cache_free(cfq_ioc_pool, cic);
1425 elv_ioc_count_dec(ioc_count); 1425 elv_ioc_count_dec(cfq_ioc_count);
1426 1426
1427 if (ioc_gone) { 1427 if (ioc_gone) {
1428 /* 1428 /*
@@ -1431,7 +1431,7 @@ static void cfq_cic_free_rcu(struct rcu_head *head)
1431 * complete ioc_gone and set it back to NULL 1431 * complete ioc_gone and set it back to NULL
1432 */ 1432 */
1433 spin_lock(&ioc_gone_lock); 1433 spin_lock(&ioc_gone_lock);
1434 if (ioc_gone && !elv_ioc_count_read(ioc_count)) { 1434 if (ioc_gone && !elv_ioc_count_read(cfq_ioc_count)) {
1435 complete(ioc_gone); 1435 complete(ioc_gone);
1436 ioc_gone = NULL; 1436 ioc_gone = NULL;
1437 } 1437 }
@@ -1557,7 +1557,7 @@ cfq_alloc_io_context(struct cfq_data *cfqd, gfp_t gfp_mask)
1557 INIT_HLIST_NODE(&cic->cic_list); 1557 INIT_HLIST_NODE(&cic->cic_list);
1558 cic->dtor = cfq_free_io_context; 1558 cic->dtor = cfq_free_io_context;
1559 cic->exit = cfq_exit_io_context; 1559 cic->exit = cfq_exit_io_context;
1560 elv_ioc_count_inc(ioc_count); 1560 elv_ioc_count_inc(cfq_ioc_count);
1561 } 1561 }
1562 1562
1563 return cic; 1563 return cic;
@@ -2658,7 +2658,7 @@ static void __exit cfq_exit(void)
2658 * this also protects us from entering cfq_slab_kill() with 2658 * this also protects us from entering cfq_slab_kill() with
2659 * pending RCU callbacks 2659 * pending RCU callbacks
2660 */ 2660 */
2661 if (elv_ioc_count_read(ioc_count)) 2661 if (elv_ioc_count_read(cfq_ioc_count))
2662 wait_for_completion(&all_gone); 2662 wait_for_completion(&all_gone);
2663 cfq_slab_kill(); 2663 cfq_slab_kill();
2664} 2664}