aboutsummaryrefslogtreecommitdiffstats
path: root/block
diff options
context:
space:
mode:
authorTejun Heo <tj@kernel.org>2009-09-14 20:57:19 -0400
committerTejun Heo <tj@kernel.org>2009-09-14 20:57:19 -0400
commit5579fd7e6aed8860ea0c8e3f11897493153b10ad (patch)
tree8f797ccd0f1a2c88f1605ae9e90b3ac17485de27 /block
parent04a13c7c632e1fe04a5f6e6c83565d2559e37598 (diff)
parentc2a7e818019f20a5cf7fb26a6eb59e212e6c0cd8 (diff)
Merge branch 'for-next' into for-linus
* pcpu_chunk_page_occupied() doesn't exist in for-next. * pcpu_chunk_addr_search() updated to use raw_smp_processor_id(). Conflicts: mm/percpu.c
Diffstat (limited to 'block')
-rw-r--r--block/as-iosched.c10
-rw-r--r--block/cfq-iosched.c10
2 files changed, 10 insertions, 10 deletions
diff --git a/block/as-iosched.c b/block/as-iosched.c
index 7a12cf6ee1d3..ce8ba57c6557 100644
--- a/block/as-iosched.c
+++ b/block/as-iosched.c
@@ -146,7 +146,7 @@ enum arq_state {
146#define RQ_STATE(rq) ((enum arq_state)(rq)->elevator_private2) 146#define RQ_STATE(rq) ((enum arq_state)(rq)->elevator_private2)
147#define RQ_SET_STATE(rq, state) ((rq)->elevator_private2 = (void *) state) 147#define RQ_SET_STATE(rq, state) ((rq)->elevator_private2 = (void *) state)
148 148
149static DEFINE_PER_CPU(unsigned long, ioc_count); 149static DEFINE_PER_CPU(unsigned long, as_ioc_count);
150static struct completion *ioc_gone; 150static struct completion *ioc_gone;
151static DEFINE_SPINLOCK(ioc_gone_lock); 151static DEFINE_SPINLOCK(ioc_gone_lock);
152 152
@@ -161,7 +161,7 @@ static void as_antic_stop(struct as_data *ad);
161static void free_as_io_context(struct as_io_context *aic) 161static void free_as_io_context(struct as_io_context *aic)
162{ 162{
163 kfree(aic); 163 kfree(aic);
164 elv_ioc_count_dec(ioc_count); 164 elv_ioc_count_dec(as_ioc_count);
165 if (ioc_gone) { 165 if (ioc_gone) {
166 /* 166 /*
167 * AS scheduler is exiting, grab exit lock and check 167 * AS scheduler is exiting, grab exit lock and check
@@ -169,7 +169,7 @@ static void free_as_io_context(struct as_io_context *aic)
169 * complete ioc_gone and set it back to NULL. 169 * complete ioc_gone and set it back to NULL.
170 */ 170 */
171 spin_lock(&ioc_gone_lock); 171 spin_lock(&ioc_gone_lock);
172 if (ioc_gone && !elv_ioc_count_read(ioc_count)) { 172 if (ioc_gone && !elv_ioc_count_read(as_ioc_count)) {
173 complete(ioc_gone); 173 complete(ioc_gone);
174 ioc_gone = NULL; 174 ioc_gone = NULL;
175 } 175 }
@@ -211,7 +211,7 @@ static struct as_io_context *alloc_as_io_context(void)
211 ret->seek_total = 0; 211 ret->seek_total = 0;
212 ret->seek_samples = 0; 212 ret->seek_samples = 0;
213 ret->seek_mean = 0; 213 ret->seek_mean = 0;
214 elv_ioc_count_inc(ioc_count); 214 elv_ioc_count_inc(as_ioc_count);
215 } 215 }
216 216
217 return ret; 217 return ret;
@@ -1507,7 +1507,7 @@ static void __exit as_exit(void)
1507 ioc_gone = &all_gone; 1507 ioc_gone = &all_gone;
1508 /* ioc_gone's update must be visible before reading ioc_count */ 1508 /* ioc_gone's update must be visible before reading ioc_count */
1509 smp_wmb(); 1509 smp_wmb();
1510 if (elv_ioc_count_read(ioc_count)) 1510 if (elv_ioc_count_read(as_ioc_count))
1511 wait_for_completion(&all_gone); 1511 wait_for_completion(&all_gone);
1512 synchronize_rcu(); 1512 synchronize_rcu();
1513} 1513}
diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c
index fd7080ed7935..1b2d12cda43e 100644
--- a/block/cfq-iosched.c
+++ b/block/cfq-iosched.c
@@ -48,7 +48,7 @@ static int cfq_slice_idle = HZ / 125;
48static struct kmem_cache *cfq_pool; 48static struct kmem_cache *cfq_pool;
49static struct kmem_cache *cfq_ioc_pool; 49static struct kmem_cache *cfq_ioc_pool;
50 50
51static DEFINE_PER_CPU(unsigned long, ioc_count); 51static DEFINE_PER_CPU(unsigned long, cfq_ioc_count);
52static struct completion *ioc_gone; 52static struct completion *ioc_gone;
53static DEFINE_SPINLOCK(ioc_gone_lock); 53static DEFINE_SPINLOCK(ioc_gone_lock);
54 54
@@ -1427,7 +1427,7 @@ static void cfq_cic_free_rcu(struct rcu_head *head)
1427 cic = container_of(head, struct cfq_io_context, rcu_head); 1427 cic = container_of(head, struct cfq_io_context, rcu_head);
1428 1428
1429 kmem_cache_free(cfq_ioc_pool, cic); 1429 kmem_cache_free(cfq_ioc_pool, cic);
1430 elv_ioc_count_dec(ioc_count); 1430 elv_ioc_count_dec(cfq_ioc_count);
1431 1431
1432 if (ioc_gone) { 1432 if (ioc_gone) {
1433 /* 1433 /*
@@ -1436,7 +1436,7 @@ static void cfq_cic_free_rcu(struct rcu_head *head)
1436 * complete ioc_gone and set it back to NULL 1436 * complete ioc_gone and set it back to NULL
1437 */ 1437 */
1438 spin_lock(&ioc_gone_lock); 1438 spin_lock(&ioc_gone_lock);
1439 if (ioc_gone && !elv_ioc_count_read(ioc_count)) { 1439 if (ioc_gone && !elv_ioc_count_read(cfq_ioc_count)) {
1440 complete(ioc_gone); 1440 complete(ioc_gone);
1441 ioc_gone = NULL; 1441 ioc_gone = NULL;
1442 } 1442 }
@@ -1562,7 +1562,7 @@ cfq_alloc_io_context(struct cfq_data *cfqd, gfp_t gfp_mask)
1562 INIT_HLIST_NODE(&cic->cic_list); 1562 INIT_HLIST_NODE(&cic->cic_list);
1563 cic->dtor = cfq_free_io_context; 1563 cic->dtor = cfq_free_io_context;
1564 cic->exit = cfq_exit_io_context; 1564 cic->exit = cfq_exit_io_context;
1565 elv_ioc_count_inc(ioc_count); 1565 elv_ioc_count_inc(cfq_ioc_count);
1566 } 1566 }
1567 1567
1568 return cic; 1568 return cic;
@@ -2668,7 +2668,7 @@ static void __exit cfq_exit(void)
2668 * this also protects us from entering cfq_slab_kill() with 2668 * this also protects us from entering cfq_slab_kill() with
2669 * pending RCU callbacks 2669 * pending RCU callbacks
2670 */ 2670 */
2671 if (elv_ioc_count_read(ioc_count)) 2671 if (elv_ioc_count_read(cfq_ioc_count))
2672 wait_for_completion(&all_gone); 2672 wait_for_completion(&all_gone);
2673 cfq_slab_kill(); 2673 cfq_slab_kill();
2674} 2674}