diff options
Diffstat (limited to 'fs/ext4/mballoc.c')
-rw-r--r-- | fs/ext4/mballoc.c | 78 |
1 files changed, 27 insertions, 51 deletions
diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c index df75855ae6f7..e76459cedcdb 100644 --- a/fs/ext4/mballoc.c +++ b/fs/ext4/mballoc.c | |||
@@ -372,24 +372,12 @@ static inline void mb_set_bit(int bit, void *addr) | |||
372 | ext4_set_bit(bit, addr); | 372 | ext4_set_bit(bit, addr); |
373 | } | 373 | } |
374 | 374 | ||
375 | static inline void mb_set_bit_atomic(spinlock_t *lock, int bit, void *addr) | ||
376 | { | ||
377 | addr = mb_correct_addr_and_bit(&bit, addr); | ||
378 | ext4_set_bit_atomic(lock, bit, addr); | ||
379 | } | ||
380 | |||
381 | static inline void mb_clear_bit(int bit, void *addr) | 375 | static inline void mb_clear_bit(int bit, void *addr) |
382 | { | 376 | { |
383 | addr = mb_correct_addr_and_bit(&bit, addr); | 377 | addr = mb_correct_addr_and_bit(&bit, addr); |
384 | ext4_clear_bit(bit, addr); | 378 | ext4_clear_bit(bit, addr); |
385 | } | 379 | } |
386 | 380 | ||
387 | static inline void mb_clear_bit_atomic(spinlock_t *lock, int bit, void *addr) | ||
388 | { | ||
389 | addr = mb_correct_addr_and_bit(&bit, addr); | ||
390 | ext4_clear_bit_atomic(lock, bit, addr); | ||
391 | } | ||
392 | |||
393 | static inline int mb_find_next_zero_bit(void *addr, int max, int start) | 381 | static inline int mb_find_next_zero_bit(void *addr, int max, int start) |
394 | { | 382 | { |
395 | int fix = 0, ret, tmpmax; | 383 | int fix = 0, ret, tmpmax; |
@@ -803,17 +791,17 @@ static int ext4_mb_init_cache(struct page *page, char *incore) | |||
803 | unlock_buffer(bh[i]); | 791 | unlock_buffer(bh[i]); |
804 | continue; | 792 | continue; |
805 | } | 793 | } |
806 | spin_lock(sb_bgl_lock(EXT4_SB(sb), first_group + i)); | 794 | ext4_lock_group(sb, first_group + i); |
807 | if (desc->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT)) { | 795 | if (desc->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT)) { |
808 | ext4_init_block_bitmap(sb, bh[i], | 796 | ext4_init_block_bitmap(sb, bh[i], |
809 | first_group + i, desc); | 797 | first_group + i, desc); |
810 | set_bitmap_uptodate(bh[i]); | 798 | set_bitmap_uptodate(bh[i]); |
811 | set_buffer_uptodate(bh[i]); | 799 | set_buffer_uptodate(bh[i]); |
812 | spin_unlock(sb_bgl_lock(EXT4_SB(sb), first_group + i)); | 800 | ext4_unlock_group(sb, first_group + i); |
813 | unlock_buffer(bh[i]); | 801 | unlock_buffer(bh[i]); |
814 | continue; | 802 | continue; |
815 | } | 803 | } |
816 | spin_unlock(sb_bgl_lock(EXT4_SB(sb), first_group + i)); | 804 | ext4_unlock_group(sb, first_group + i); |
817 | if (buffer_uptodate(bh[i])) { | 805 | if (buffer_uptodate(bh[i])) { |
818 | /* | 806 | /* |
819 | * if not uninit if bh is uptodate, | 807 | * if not uninit if bh is uptodate, |
@@ -1080,7 +1068,7 @@ static int mb_find_order_for_block(struct ext4_buddy *e4b, int block) | |||
1080 | return 0; | 1068 | return 0; |
1081 | } | 1069 | } |
1082 | 1070 | ||
1083 | static void mb_clear_bits(spinlock_t *lock, void *bm, int cur, int len) | 1071 | static void mb_clear_bits(void *bm, int cur, int len) |
1084 | { | 1072 | { |
1085 | __u32 *addr; | 1073 | __u32 *addr; |
1086 | 1074 | ||
@@ -1093,15 +1081,12 @@ static void mb_clear_bits(spinlock_t *lock, void *bm, int cur, int len) | |||
1093 | cur += 32; | 1081 | cur += 32; |
1094 | continue; | 1082 | continue; |
1095 | } | 1083 | } |
1096 | if (lock) | 1084 | mb_clear_bit(cur, bm); |
1097 | mb_clear_bit_atomic(lock, cur, bm); | ||
1098 | else | ||
1099 | mb_clear_bit(cur, bm); | ||
1100 | cur++; | 1085 | cur++; |
1101 | } | 1086 | } |
1102 | } | 1087 | } |
1103 | 1088 | ||
1104 | static void mb_set_bits(spinlock_t *lock, void *bm, int cur, int len) | 1089 | static void mb_set_bits(void *bm, int cur, int len) |
1105 | { | 1090 | { |
1106 | __u32 *addr; | 1091 | __u32 *addr; |
1107 | 1092 | ||
@@ -1114,10 +1099,7 @@ static void mb_set_bits(spinlock_t *lock, void *bm, int cur, int len) | |||
1114 | cur += 32; | 1099 | cur += 32; |
1115 | continue; | 1100 | continue; |
1116 | } | 1101 | } |
1117 | if (lock) | 1102 | mb_set_bit(cur, bm); |
1118 | mb_set_bit_atomic(lock, cur, bm); | ||
1119 | else | ||
1120 | mb_set_bit(cur, bm); | ||
1121 | cur++; | 1103 | cur++; |
1122 | } | 1104 | } |
1123 | } | 1105 | } |
@@ -1332,8 +1314,7 @@ static int mb_mark_used(struct ext4_buddy *e4b, struct ext4_free_extent *ex) | |||
1332 | e4b->bd_info->bb_counters[ord]++; | 1314 | e4b->bd_info->bb_counters[ord]++; |
1333 | } | 1315 | } |
1334 | 1316 | ||
1335 | mb_set_bits(sb_bgl_lock(EXT4_SB(e4b->bd_sb), ex->fe_group), | 1317 | mb_set_bits(EXT4_MB_BITMAP(e4b), ex->fe_start, len0); |
1336 | EXT4_MB_BITMAP(e4b), ex->fe_start, len0); | ||
1337 | mb_check_buddy(e4b); | 1318 | mb_check_buddy(e4b); |
1338 | 1319 | ||
1339 | return ret; | 1320 | return ret; |
@@ -2756,7 +2737,7 @@ int ext4_mb_init(struct super_block *sb, int needs_recovery) | |||
2756 | return 0; | 2737 | return 0; |
2757 | } | 2738 | } |
2758 | 2739 | ||
2759 | /* need to called with ext4 group lock (ext4_lock_group) */ | 2740 | /* need to called with the ext4 group lock held */ |
2760 | static void ext4_mb_cleanup_pa(struct ext4_group_info *grp) | 2741 | static void ext4_mb_cleanup_pa(struct ext4_group_info *grp) |
2761 | { | 2742 | { |
2762 | struct ext4_prealloc_space *pa; | 2743 | struct ext4_prealloc_space *pa; |
@@ -2993,14 +2974,17 @@ ext4_mb_mark_diskspace_used(struct ext4_allocation_context *ac, | |||
2993 | * Fix the bitmap and repeat the block allocation | 2974 | * Fix the bitmap and repeat the block allocation |
2994 | * We leak some of the blocks here. | 2975 | * We leak some of the blocks here. |
2995 | */ | 2976 | */ |
2996 | mb_set_bits(sb_bgl_lock(sbi, ac->ac_b_ex.fe_group), | 2977 | ext4_lock_group(sb, ac->ac_b_ex.fe_group); |
2997 | bitmap_bh->b_data, ac->ac_b_ex.fe_start, | 2978 | mb_set_bits(bitmap_bh->b_data, ac->ac_b_ex.fe_start, |
2998 | ac->ac_b_ex.fe_len); | 2979 | ac->ac_b_ex.fe_len); |
2980 | ext4_unlock_group(sb, ac->ac_b_ex.fe_group); | ||
2999 | err = ext4_handle_dirty_metadata(handle, NULL, bitmap_bh); | 2981 | err = ext4_handle_dirty_metadata(handle, NULL, bitmap_bh); |
3000 | if (!err) | 2982 | if (!err) |
3001 | err = -EAGAIN; | 2983 | err = -EAGAIN; |
3002 | goto out_err; | 2984 | goto out_err; |
3003 | } | 2985 | } |
2986 | |||
2987 | ext4_lock_group(sb, ac->ac_b_ex.fe_group); | ||
3004 | #ifdef AGGRESSIVE_CHECK | 2988 | #ifdef AGGRESSIVE_CHECK |
3005 | { | 2989 | { |
3006 | int i; | 2990 | int i; |
@@ -3010,9 +2994,7 @@ ext4_mb_mark_diskspace_used(struct ext4_allocation_context *ac, | |||
3010 | } | 2994 | } |
3011 | } | 2995 | } |
3012 | #endif | 2996 | #endif |
3013 | spin_lock(sb_bgl_lock(sbi, ac->ac_b_ex.fe_group)); | 2997 | mb_set_bits(bitmap_bh->b_data, ac->ac_b_ex.fe_start,ac->ac_b_ex.fe_len); |
3014 | mb_set_bits(NULL, bitmap_bh->b_data, | ||
3015 | ac->ac_b_ex.fe_start, ac->ac_b_ex.fe_len); | ||
3016 | if (gdp->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT)) { | 2998 | if (gdp->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT)) { |
3017 | gdp->bg_flags &= cpu_to_le16(~EXT4_BG_BLOCK_UNINIT); | 2999 | gdp->bg_flags &= cpu_to_le16(~EXT4_BG_BLOCK_UNINIT); |
3018 | ext4_free_blks_set(sb, gdp, | 3000 | ext4_free_blks_set(sb, gdp, |
@@ -3022,7 +3004,8 @@ ext4_mb_mark_diskspace_used(struct ext4_allocation_context *ac, | |||
3022 | len = ext4_free_blks_count(sb, gdp) - ac->ac_b_ex.fe_len; | 3004 | len = ext4_free_blks_count(sb, gdp) - ac->ac_b_ex.fe_len; |
3023 | ext4_free_blks_set(sb, gdp, len); | 3005 | ext4_free_blks_set(sb, gdp, len); |
3024 | gdp->bg_checksum = ext4_group_desc_csum(sbi, ac->ac_b_ex.fe_group, gdp); | 3006 | gdp->bg_checksum = ext4_group_desc_csum(sbi, ac->ac_b_ex.fe_group, gdp); |
3025 | spin_unlock(sb_bgl_lock(sbi, ac->ac_b_ex.fe_group)); | 3007 | |
3008 | ext4_unlock_group(sb, ac->ac_b_ex.fe_group); | ||
3026 | percpu_counter_sub(&sbi->s_freeblocks_counter, ac->ac_b_ex.fe_len); | 3009 | percpu_counter_sub(&sbi->s_freeblocks_counter, ac->ac_b_ex.fe_len); |
3027 | /* | 3010 | /* |
3028 | * Now reduce the dirty block count also. Should not go negative | 3011 | * Now reduce the dirty block count also. Should not go negative |
@@ -3455,7 +3438,7 @@ ext4_mb_use_preallocated(struct ext4_allocation_context *ac) | |||
3455 | * the function goes through all block freed in the group | 3438 | * the function goes through all block freed in the group |
3456 | * but not yet committed and marks them used in in-core bitmap. | 3439 | * but not yet committed and marks them used in in-core bitmap. |
3457 | * buddy must be generated from this bitmap | 3440 | * buddy must be generated from this bitmap |
3458 | * Need to be called with ext4 group lock (ext4_lock_group) | 3441 | * Need to be called with the ext4 group lock held |
3459 | */ | 3442 | */ |
3460 | static void ext4_mb_generate_from_freelist(struct super_block *sb, void *bitmap, | 3443 | static void ext4_mb_generate_from_freelist(struct super_block *sb, void *bitmap, |
3461 | ext4_group_t group) | 3444 | ext4_group_t group) |
@@ -3469,9 +3452,7 @@ static void ext4_mb_generate_from_freelist(struct super_block *sb, void *bitmap, | |||
3469 | 3452 | ||
3470 | while (n) { | 3453 | while (n) { |
3471 | entry = rb_entry(n, struct ext4_free_data, node); | 3454 | entry = rb_entry(n, struct ext4_free_data, node); |
3472 | mb_set_bits(sb_bgl_lock(EXT4_SB(sb), group), | 3455 | mb_set_bits(bitmap, entry->start_blk, entry->count); |
3473 | bitmap, entry->start_blk, | ||
3474 | entry->count); | ||
3475 | n = rb_next(n); | 3456 | n = rb_next(n); |
3476 | } | 3457 | } |
3477 | return; | 3458 | return; |
@@ -3480,7 +3461,7 @@ static void ext4_mb_generate_from_freelist(struct super_block *sb, void *bitmap, | |||
3480 | /* | 3461 | /* |
3481 | * the function goes through all preallocation in this group and marks them | 3462 | * the function goes through all preallocation in this group and marks them |
3482 | * used in in-core bitmap. buddy must be generated from this bitmap | 3463 | * used in in-core bitmap. buddy must be generated from this bitmap |
3483 | * Need to be called with ext4 group lock (ext4_lock_group) | 3464 | * Need to be called with ext4 group lock held |
3484 | */ | 3465 | */ |
3485 | static void ext4_mb_generate_from_pa(struct super_block *sb, void *bitmap, | 3466 | static void ext4_mb_generate_from_pa(struct super_block *sb, void *bitmap, |
3486 | ext4_group_t group) | 3467 | ext4_group_t group) |
@@ -3512,8 +3493,7 @@ static void ext4_mb_generate_from_pa(struct super_block *sb, void *bitmap, | |||
3512 | if (unlikely(len == 0)) | 3493 | if (unlikely(len == 0)) |
3513 | continue; | 3494 | continue; |
3514 | BUG_ON(groupnr != group); | 3495 | BUG_ON(groupnr != group); |
3515 | mb_set_bits(sb_bgl_lock(EXT4_SB(sb), group), | 3496 | mb_set_bits(bitmap, start, len); |
3516 | bitmap, start, len); | ||
3517 | preallocated += len; | 3497 | preallocated += len; |
3518 | count++; | 3498 | count++; |
3519 | } | 3499 | } |
@@ -4856,29 +4836,25 @@ do_more: | |||
4856 | new_entry->group = block_group; | 4836 | new_entry->group = block_group; |
4857 | new_entry->count = count; | 4837 | new_entry->count = count; |
4858 | new_entry->t_tid = handle->h_transaction->t_tid; | 4838 | new_entry->t_tid = handle->h_transaction->t_tid; |
4839 | |||
4859 | ext4_lock_group(sb, block_group); | 4840 | ext4_lock_group(sb, block_group); |
4860 | mb_clear_bits(sb_bgl_lock(sbi, block_group), bitmap_bh->b_data, | 4841 | mb_clear_bits(bitmap_bh->b_data, bit, count); |
4861 | bit, count); | ||
4862 | ext4_mb_free_metadata(handle, &e4b, new_entry); | 4842 | ext4_mb_free_metadata(handle, &e4b, new_entry); |
4863 | ext4_unlock_group(sb, block_group); | ||
4864 | } else { | 4843 | } else { |
4865 | ext4_lock_group(sb, block_group); | ||
4866 | /* need to update group_info->bb_free and bitmap | 4844 | /* need to update group_info->bb_free and bitmap |
4867 | * with group lock held. generate_buddy look at | 4845 | * with group lock held. generate_buddy look at |
4868 | * them with group lock_held | 4846 | * them with group lock_held |
4869 | */ | 4847 | */ |
4870 | mb_clear_bits(sb_bgl_lock(sbi, block_group), bitmap_bh->b_data, | 4848 | ext4_lock_group(sb, block_group); |
4871 | bit, count); | 4849 | mb_clear_bits(bitmap_bh->b_data, bit, count); |
4872 | mb_free_blocks(inode, &e4b, bit, count); | 4850 | mb_free_blocks(inode, &e4b, bit, count); |
4873 | ext4_mb_return_to_preallocation(inode, &e4b, block, count); | 4851 | ext4_mb_return_to_preallocation(inode, &e4b, block, count); |
4874 | ext4_unlock_group(sb, block_group); | ||
4875 | } | 4852 | } |
4876 | 4853 | ||
4877 | spin_lock(sb_bgl_lock(sbi, block_group)); | ||
4878 | ret = ext4_free_blks_count(sb, gdp) + count; | 4854 | ret = ext4_free_blks_count(sb, gdp) + count; |
4879 | ext4_free_blks_set(sb, gdp, ret); | 4855 | ext4_free_blks_set(sb, gdp, ret); |
4880 | gdp->bg_checksum = ext4_group_desc_csum(sbi, block_group, gdp); | 4856 | gdp->bg_checksum = ext4_group_desc_csum(sbi, block_group, gdp); |
4881 | spin_unlock(sb_bgl_lock(sbi, block_group)); | 4857 | ext4_unlock_group(sb, block_group); |
4882 | percpu_counter_add(&sbi->s_freeblocks_counter, count); | 4858 | percpu_counter_add(&sbi->s_freeblocks_counter, count); |
4883 | 4859 | ||
4884 | if (sbi->s_log_groups_per_flex) { | 4860 | if (sbi->s_log_groups_per_flex) { |