aboutsummaryrefslogtreecommitdiffstats
path: root/fs/ext4
diff options
context:
space:
mode:
authorEric Sandeen <sandeen@redhat.com>2008-09-13 15:23:29 -0400
committerTheodore Ts'o <tytso@mit.edu>2008-09-13 15:23:29 -0400
commit730c213c79a638137b47a90624e4bac252f07ae7 (patch)
treedd4e5ebd64045721d5834cea52e6edb4de2fe676 /fs/ext4
parent8eea80d52b9d87cfd771055534bd2c24f73704d7 (diff)
ext4: use percpu data structures for lg_prealloc_list
lg_prealloc_list seems to cry out for a per-cpu data structure; on a large smp system I think this should be better. I've lightly tested this change on a 4-cpu system. Signed-off-by: Eric Sandeen <sandeen@redhat.com> Acked-by: Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com> Signed-off-by: "Theodore Ts'o" <tytso@mit.edu>
Diffstat (limited to 'fs/ext4')
-rw-r--r--fs/ext4/mballoc.c13
1 files changed, 5 insertions, 8 deletions
diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c
index 9122271e3d65..14ebd572bea8 100644
--- a/fs/ext4/mballoc.c
+++ b/fs/ext4/mballoc.c
@@ -2541,17 +2541,16 @@ int ext4_mb_init(struct super_block *sb, int needs_recovery)
2541 sbi->s_mb_history_filter = EXT4_MB_HISTORY_DEFAULT; 2541 sbi->s_mb_history_filter = EXT4_MB_HISTORY_DEFAULT;
2542 sbi->s_mb_group_prealloc = MB_DEFAULT_GROUP_PREALLOC; 2542 sbi->s_mb_group_prealloc = MB_DEFAULT_GROUP_PREALLOC;
2543 2543
2544 i = sizeof(struct ext4_locality_group) * nr_cpu_ids; 2544 sbi->s_locality_groups = alloc_percpu(struct ext4_locality_group);
2545 sbi->s_locality_groups = kmalloc(i, GFP_KERNEL);
2546 if (sbi->s_locality_groups == NULL) { 2545 if (sbi->s_locality_groups == NULL) {
2547 clear_opt(sbi->s_mount_opt, MBALLOC); 2546 clear_opt(sbi->s_mount_opt, MBALLOC);
2548 kfree(sbi->s_mb_offsets); 2547 kfree(sbi->s_mb_offsets);
2549 kfree(sbi->s_mb_maxs); 2548 kfree(sbi->s_mb_maxs);
2550 return -ENOMEM; 2549 return -ENOMEM;
2551 } 2550 }
2552 for (i = 0; i < nr_cpu_ids; i++) { 2551 for_each_possible_cpu(i) {
2553 struct ext4_locality_group *lg; 2552 struct ext4_locality_group *lg;
2554 lg = &sbi->s_locality_groups[i]; 2553 lg = per_cpu_ptr(sbi->s_locality_groups, i);
2555 mutex_init(&lg->lg_mutex); 2554 mutex_init(&lg->lg_mutex);
2556 for (j = 0; j < PREALLOC_TB_SIZE; j++) 2555 for (j = 0; j < PREALLOC_TB_SIZE; j++)
2557 INIT_LIST_HEAD(&lg->lg_prealloc_list[j]); 2556 INIT_LIST_HEAD(&lg->lg_prealloc_list[j]);
@@ -2648,8 +2647,7 @@ int ext4_mb_release(struct super_block *sb)
2648 atomic_read(&sbi->s_mb_discarded)); 2647 atomic_read(&sbi->s_mb_discarded));
2649 } 2648 }
2650 2649
2651 kfree(sbi->s_locality_groups); 2650 free_percpu(sbi->s_locality_groups);
2652
2653 ext4_mb_history_release(sb); 2651 ext4_mb_history_release(sb);
2654 ext4_mb_destroy_per_dev_proc(sb); 2652 ext4_mb_destroy_per_dev_proc(sb);
2655 2653
@@ -4106,8 +4104,7 @@ static void ext4_mb_group_or_file(struct ext4_allocation_context *ac)
4106 * per cpu locality group is to reduce the contention between block 4104 * per cpu locality group is to reduce the contention between block
4107 * request from multiple CPUs. 4105 * request from multiple CPUs.
4108 */ 4106 */
4109 ac->ac_lg = &sbi->s_locality_groups[get_cpu()]; 4107 ac->ac_lg = per_cpu_ptr(sbi->s_locality_groups, raw_smp_processor_id());
4110 put_cpu();
4111 4108
4112 /* we're going to use group allocation */ 4109 /* we're going to use group allocation */
4113 ac->ac_flags |= EXT4_MB_HINT_GROUP_ALLOC; 4110 ac->ac_flags |= EXT4_MB_HINT_GROUP_ALLOC;