aboutsummaryrefslogtreecommitdiffstats
path: root/block
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2009-09-15 12:39:44 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2009-09-15 12:39:44 -0400
commitada3fa15057205b7d3f727bba5cd26b5912e350f (patch)
tree60962fc9e4021b92f484d1a58e72cd3906d4f3db /block
parent2f82af08fcc7dc01a7e98a49a5995a77e32a2925 (diff)
parent5579fd7e6aed8860ea0c8e3f11897493153b10ad (diff)
Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tj/percpu
* 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tj/percpu: (46 commits) powerpc64: convert to dynamic percpu allocator sparc64: use embedding percpu first chunk allocator percpu: kill lpage first chunk allocator x86,percpu: use embedding for 64bit NUMA and page for 32bit NUMA percpu: update embedding first chunk allocator to handle sparse units percpu: use group information to allocate vmap areas sparsely vmalloc: implement pcpu_get_vm_areas() vmalloc: separate out insert_vmalloc_vm() percpu: add chunk->base_addr percpu: add pcpu_unit_offsets[] percpu: introduce pcpu_alloc_info and pcpu_group_info percpu: move pcpu_lpage_build_unit_map() and pcpul_lpage_dump_cfg() upward percpu: add @align to pcpu_fc_alloc_fn_t percpu: make @dyn_size mandatory for pcpu_setup_first_chunk() percpu: drop @static_size from first chunk allocators percpu: generalize first chunk allocator selection percpu: build first chunk allocators selectively percpu: rename 4k first chunk allocator to page percpu: improve boot messages percpu: fix pcpu_reclaim() locking ... Fix trivial conflict as by Tejun Heo in kernel/sched.c
Diffstat (limited to 'block')
-rw-r--r--block/as-iosched.c10
-rw-r--r--block/cfq-iosched.c10
2 files changed, 10 insertions, 10 deletions
diff --git a/block/as-iosched.c b/block/as-iosched.c
index 7a12cf6ee1d3..ce8ba57c6557 100644
--- a/block/as-iosched.c
+++ b/block/as-iosched.c
@@ -146,7 +146,7 @@ enum arq_state {
146#define RQ_STATE(rq) ((enum arq_state)(rq)->elevator_private2) 146#define RQ_STATE(rq) ((enum arq_state)(rq)->elevator_private2)
147#define RQ_SET_STATE(rq, state) ((rq)->elevator_private2 = (void *) state) 147#define RQ_SET_STATE(rq, state) ((rq)->elevator_private2 = (void *) state)
148 148
149static DEFINE_PER_CPU(unsigned long, ioc_count); 149static DEFINE_PER_CPU(unsigned long, as_ioc_count);
150static struct completion *ioc_gone; 150static struct completion *ioc_gone;
151static DEFINE_SPINLOCK(ioc_gone_lock); 151static DEFINE_SPINLOCK(ioc_gone_lock);
152 152
@@ -161,7 +161,7 @@ static void as_antic_stop(struct as_data *ad);
161static void free_as_io_context(struct as_io_context *aic) 161static void free_as_io_context(struct as_io_context *aic)
162{ 162{
163 kfree(aic); 163 kfree(aic);
164 elv_ioc_count_dec(ioc_count); 164 elv_ioc_count_dec(as_ioc_count);
165 if (ioc_gone) { 165 if (ioc_gone) {
166 /* 166 /*
167 * AS scheduler is exiting, grab exit lock and check 167 * AS scheduler is exiting, grab exit lock and check
@@ -169,7 +169,7 @@ static void free_as_io_context(struct as_io_context *aic)
169 * complete ioc_gone and set it back to NULL. 169 * complete ioc_gone and set it back to NULL.
170 */ 170 */
171 spin_lock(&ioc_gone_lock); 171 spin_lock(&ioc_gone_lock);
172 if (ioc_gone && !elv_ioc_count_read(ioc_count)) { 172 if (ioc_gone && !elv_ioc_count_read(as_ioc_count)) {
173 complete(ioc_gone); 173 complete(ioc_gone);
174 ioc_gone = NULL; 174 ioc_gone = NULL;
175 } 175 }
@@ -211,7 +211,7 @@ static struct as_io_context *alloc_as_io_context(void)
211 ret->seek_total = 0; 211 ret->seek_total = 0;
212 ret->seek_samples = 0; 212 ret->seek_samples = 0;
213 ret->seek_mean = 0; 213 ret->seek_mean = 0;
214 elv_ioc_count_inc(ioc_count); 214 elv_ioc_count_inc(as_ioc_count);
215 } 215 }
216 216
217 return ret; 217 return ret;
@@ -1507,7 +1507,7 @@ static void __exit as_exit(void)
1507 ioc_gone = &all_gone; 1507 ioc_gone = &all_gone;
1508 /* ioc_gone's update must be visible before reading ioc_count */ 1508 /* ioc_gone's update must be visible before reading ioc_count */
1509 smp_wmb(); 1509 smp_wmb();
1510 if (elv_ioc_count_read(ioc_count)) 1510 if (elv_ioc_count_read(as_ioc_count))
1511 wait_for_completion(&all_gone); 1511 wait_for_completion(&all_gone);
1512 synchronize_rcu(); 1512 synchronize_rcu();
1513} 1513}
diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c
index 0e3814b662af..1ca813b16e78 100644
--- a/block/cfq-iosched.c
+++ b/block/cfq-iosched.c
@@ -48,7 +48,7 @@ static int cfq_slice_idle = HZ / 125;
48static struct kmem_cache *cfq_pool; 48static struct kmem_cache *cfq_pool;
49static struct kmem_cache *cfq_ioc_pool; 49static struct kmem_cache *cfq_ioc_pool;
50 50
51static DEFINE_PER_CPU(unsigned long, ioc_count); 51static DEFINE_PER_CPU(unsigned long, cfq_ioc_count);
52static struct completion *ioc_gone; 52static struct completion *ioc_gone;
53static DEFINE_SPINLOCK(ioc_gone_lock); 53static DEFINE_SPINLOCK(ioc_gone_lock);
54 54
@@ -1415,7 +1415,7 @@ static void cfq_cic_free_rcu(struct rcu_head *head)
1415 cic = container_of(head, struct cfq_io_context, rcu_head); 1415 cic = container_of(head, struct cfq_io_context, rcu_head);
1416 1416
1417 kmem_cache_free(cfq_ioc_pool, cic); 1417 kmem_cache_free(cfq_ioc_pool, cic);
1418 elv_ioc_count_dec(ioc_count); 1418 elv_ioc_count_dec(cfq_ioc_count);
1419 1419
1420 if (ioc_gone) { 1420 if (ioc_gone) {
1421 /* 1421 /*
@@ -1424,7 +1424,7 @@ static void cfq_cic_free_rcu(struct rcu_head *head)
1424 * complete ioc_gone and set it back to NULL 1424 * complete ioc_gone and set it back to NULL
1425 */ 1425 */
1426 spin_lock(&ioc_gone_lock); 1426 spin_lock(&ioc_gone_lock);
1427 if (ioc_gone && !elv_ioc_count_read(ioc_count)) { 1427 if (ioc_gone && !elv_ioc_count_read(cfq_ioc_count)) {
1428 complete(ioc_gone); 1428 complete(ioc_gone);
1429 ioc_gone = NULL; 1429 ioc_gone = NULL;
1430 } 1430 }
@@ -1550,7 +1550,7 @@ cfq_alloc_io_context(struct cfq_data *cfqd, gfp_t gfp_mask)
1550 INIT_HLIST_NODE(&cic->cic_list); 1550 INIT_HLIST_NODE(&cic->cic_list);
1551 cic->dtor = cfq_free_io_context; 1551 cic->dtor = cfq_free_io_context;
1552 cic->exit = cfq_exit_io_context; 1552 cic->exit = cfq_exit_io_context;
1553 elv_ioc_count_inc(ioc_count); 1553 elv_ioc_count_inc(cfq_ioc_count);
1554 } 1554 }
1555 1555
1556 return cic; 1556 return cic;
@@ -2654,7 +2654,7 @@ static void __exit cfq_exit(void)
2654 * this also protects us from entering cfq_slab_kill() with 2654 * this also protects us from entering cfq_slab_kill() with
2655 * pending RCU callbacks 2655 * pending RCU callbacks
2656 */ 2656 */
2657 if (elv_ioc_count_read(ioc_count)) 2657 if (elv_ioc_count_read(cfq_ioc_count))
2658 wait_for_completion(&all_gone); 2658 wait_for_completion(&all_gone);
2659 cfq_slab_kill(); 2659 cfq_slab_kill();
2660} 2660}