diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2009-09-15 12:39:44 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2009-09-15 12:39:44 -0400 |
commit | ada3fa15057205b7d3f727bba5cd26b5912e350f (patch) | |
tree | 60962fc9e4021b92f484d1a58e72cd3906d4f3db /block/cfq-iosched.c | |
parent | 2f82af08fcc7dc01a7e98a49a5995a77e32a2925 (diff) | |
parent | 5579fd7e6aed8860ea0c8e3f11897493153b10ad (diff) |
Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tj/percpu
* 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tj/percpu: (46 commits)
powerpc64: convert to dynamic percpu allocator
sparc64: use embedding percpu first chunk allocator
percpu: kill lpage first chunk allocator
x86,percpu: use embedding for 64bit NUMA and page for 32bit NUMA
percpu: update embedding first chunk allocator to handle sparse units
percpu: use group information to allocate vmap areas sparsely
vmalloc: implement pcpu_get_vm_areas()
vmalloc: separate out insert_vmalloc_vm()
percpu: add chunk->base_addr
percpu: add pcpu_unit_offsets[]
percpu: introduce pcpu_alloc_info and pcpu_group_info
percpu: move pcpu_lpage_build_unit_map() and pcpul_lpage_dump_cfg() upward
percpu: add @align to pcpu_fc_alloc_fn_t
percpu: make @dyn_size mandatory for pcpu_setup_first_chunk()
percpu: drop @static_size from first chunk allocators
percpu: generalize first chunk allocator selection
percpu: build first chunk allocators selectively
percpu: rename 4k first chunk allocator to page
percpu: improve boot messages
percpu: fix pcpu_reclaim() locking
...
Fix trivial conflict as by Tejun Heo in kernel/sched.c
Diffstat (limited to 'block/cfq-iosched.c')
-rw-r--r-- | block/cfq-iosched.c | 10 |
1 files changed, 5 insertions, 5 deletions
diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c index 0e3814b662af..1ca813b16e78 100644 --- a/block/cfq-iosched.c +++ b/block/cfq-iosched.c | |||
@@ -48,7 +48,7 @@ static int cfq_slice_idle = HZ / 125; | |||
48 | static struct kmem_cache *cfq_pool; | 48 | static struct kmem_cache *cfq_pool; |
49 | static struct kmem_cache *cfq_ioc_pool; | 49 | static struct kmem_cache *cfq_ioc_pool; |
50 | 50 | ||
51 | static DEFINE_PER_CPU(unsigned long, ioc_count); | 51 | static DEFINE_PER_CPU(unsigned long, cfq_ioc_count); |
52 | static struct completion *ioc_gone; | 52 | static struct completion *ioc_gone; |
53 | static DEFINE_SPINLOCK(ioc_gone_lock); | 53 | static DEFINE_SPINLOCK(ioc_gone_lock); |
54 | 54 | ||
@@ -1415,7 +1415,7 @@ static void cfq_cic_free_rcu(struct rcu_head *head) | |||
1415 | cic = container_of(head, struct cfq_io_context, rcu_head); | 1415 | cic = container_of(head, struct cfq_io_context, rcu_head); |
1416 | 1416 | ||
1417 | kmem_cache_free(cfq_ioc_pool, cic); | 1417 | kmem_cache_free(cfq_ioc_pool, cic); |
1418 | elv_ioc_count_dec(ioc_count); | 1418 | elv_ioc_count_dec(cfq_ioc_count); |
1419 | 1419 | ||
1420 | if (ioc_gone) { | 1420 | if (ioc_gone) { |
1421 | /* | 1421 | /* |
@@ -1424,7 +1424,7 @@ static void cfq_cic_free_rcu(struct rcu_head *head) | |||
1424 | * complete ioc_gone and set it back to NULL | 1424 | * complete ioc_gone and set it back to NULL |
1425 | */ | 1425 | */ |
1426 | spin_lock(&ioc_gone_lock); | 1426 | spin_lock(&ioc_gone_lock); |
1427 | if (ioc_gone && !elv_ioc_count_read(ioc_count)) { | 1427 | if (ioc_gone && !elv_ioc_count_read(cfq_ioc_count)) { |
1428 | complete(ioc_gone); | 1428 | complete(ioc_gone); |
1429 | ioc_gone = NULL; | 1429 | ioc_gone = NULL; |
1430 | } | 1430 | } |
@@ -1550,7 +1550,7 @@ cfq_alloc_io_context(struct cfq_data *cfqd, gfp_t gfp_mask) | |||
1550 | INIT_HLIST_NODE(&cic->cic_list); | 1550 | INIT_HLIST_NODE(&cic->cic_list); |
1551 | cic->dtor = cfq_free_io_context; | 1551 | cic->dtor = cfq_free_io_context; |
1552 | cic->exit = cfq_exit_io_context; | 1552 | cic->exit = cfq_exit_io_context; |
1553 | elv_ioc_count_inc(ioc_count); | 1553 | elv_ioc_count_inc(cfq_ioc_count); |
1554 | } | 1554 | } |
1555 | 1555 | ||
1556 | return cic; | 1556 | return cic; |
@@ -2654,7 +2654,7 @@ static void __exit cfq_exit(void) | |||
2654 | * this also protects us from entering cfq_slab_kill() with | 2654 | * this also protects us from entering cfq_slab_kill() with |
2655 | * pending RCU callbacks | 2655 | * pending RCU callbacks |
2656 | */ | 2656 | */ |
2657 | if (elv_ioc_count_read(ioc_count)) | 2657 | if (elv_ioc_count_read(cfq_ioc_count)) |
2658 | wait_for_completion(&all_gone); | 2658 | wait_for_completion(&all_gone); |
2659 | cfq_slab_kill(); | 2659 | cfq_slab_kill(); |
2660 | } | 2660 | } |