diff options
author | Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com> | 2009-05-02 20:35:09 -0400 |
---|---|---|
committer | Theodore Ts'o <tytso@mit.edu> | 2009-05-02 20:35:09 -0400 |
commit | 955ce5f5be67dfe0d1d096b543af33fe8a1ce3dd (patch) | |
tree | 68b0bdbef1594a3e43c1ef28ae8e096b40a06ae4 /fs | |
parent | eefd7f03b86b8a319890e7fac5a6fcc7f8694b76 (diff) |
ext4: Convert ext4_lock_group to use sb_bgl_lock
We have sb_bgl_lock() and ext4_group_info.bb_state
bit spinlock to protech group information. The later is only
used within mballoc code. Consolidate them to use sb_bgl_lock().
This makes the mballoc.c code much simpler and also avoid
confusion with two locks protecting same info.
Signed-off-by: Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>
Signed-off-by: "Theodore Ts'o" <tytso@mit.edu>
Diffstat (limited to 'fs')
-rw-r--r-- | fs/ext4/balloc.c | 12 | ||||
-rw-r--r-- | fs/ext4/ext4.h | 26 | ||||
-rw-r--r-- | fs/ext4/ialloc.c | 29 | ||||
-rw-r--r-- | fs/ext4/mballoc.c | 78 | ||||
-rw-r--r-- | fs/ext4/super.c | 6 |
5 files changed, 59 insertions, 92 deletions
diff --git a/fs/ext4/balloc.c b/fs/ext4/balloc.c index 92f557d957d9..e2126d70dff5 100644 --- a/fs/ext4/balloc.c +++ b/fs/ext4/balloc.c | |||
@@ -326,16 +326,16 @@ ext4_read_block_bitmap(struct super_block *sb, ext4_group_t block_group) | |||
326 | unlock_buffer(bh); | 326 | unlock_buffer(bh); |
327 | return bh; | 327 | return bh; |
328 | } | 328 | } |
329 | spin_lock(sb_bgl_lock(EXT4_SB(sb), block_group)); | 329 | ext4_lock_group(sb, block_group); |
330 | if (desc->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT)) { | 330 | if (desc->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT)) { |
331 | ext4_init_block_bitmap(sb, bh, block_group, desc); | 331 | ext4_init_block_bitmap(sb, bh, block_group, desc); |
332 | set_bitmap_uptodate(bh); | 332 | set_bitmap_uptodate(bh); |
333 | set_buffer_uptodate(bh); | 333 | set_buffer_uptodate(bh); |
334 | spin_unlock(sb_bgl_lock(EXT4_SB(sb), block_group)); | 334 | ext4_unlock_group(sb, block_group); |
335 | unlock_buffer(bh); | 335 | unlock_buffer(bh); |
336 | return bh; | 336 | return bh; |
337 | } | 337 | } |
338 | spin_unlock(sb_bgl_lock(EXT4_SB(sb), block_group)); | 338 | ext4_unlock_group(sb, block_group); |
339 | if (buffer_uptodate(bh)) { | 339 | if (buffer_uptodate(bh)) { |
340 | /* | 340 | /* |
341 | * if not uninit if bh is uptodate, | 341 | * if not uninit if bh is uptodate, |
@@ -451,7 +451,7 @@ void ext4_add_groupblocks(handle_t *handle, struct super_block *sb, | |||
451 | down_write(&grp->alloc_sem); | 451 | down_write(&grp->alloc_sem); |
452 | for (i = 0, blocks_freed = 0; i < count; i++) { | 452 | for (i = 0, blocks_freed = 0; i < count; i++) { |
453 | BUFFER_TRACE(bitmap_bh, "clear bit"); | 453 | BUFFER_TRACE(bitmap_bh, "clear bit"); |
454 | if (!ext4_clear_bit_atomic(sb_bgl_lock(sbi, block_group), | 454 | if (!ext4_clear_bit_atomic(ext4_group_lock_ptr(sb, block_group), |
455 | bit + i, bitmap_bh->b_data)) { | 455 | bit + i, bitmap_bh->b_data)) { |
456 | ext4_error(sb, __func__, | 456 | ext4_error(sb, __func__, |
457 | "bit already cleared for block %llu", | 457 | "bit already cleared for block %llu", |
@@ -461,11 +461,11 @@ void ext4_add_groupblocks(handle_t *handle, struct super_block *sb, | |||
461 | blocks_freed++; | 461 | blocks_freed++; |
462 | } | 462 | } |
463 | } | 463 | } |
464 | spin_lock(sb_bgl_lock(sbi, block_group)); | 464 | ext4_lock_group(sb, block_group); |
465 | blk_free_count = blocks_freed + ext4_free_blks_count(sb, desc); | 465 | blk_free_count = blocks_freed + ext4_free_blks_count(sb, desc); |
466 | ext4_free_blks_set(sb, desc, blk_free_count); | 466 | ext4_free_blks_set(sb, desc, blk_free_count); |
467 | desc->bg_checksum = ext4_group_desc_csum(sbi, block_group, desc); | 467 | desc->bg_checksum = ext4_group_desc_csum(sbi, block_group, desc); |
468 | spin_unlock(sb_bgl_lock(sbi, block_group)); | 468 | ext4_unlock_group(sb, block_group); |
469 | percpu_counter_add(&sbi->s_freeblocks_counter, blocks_freed); | 469 | percpu_counter_add(&sbi->s_freeblocks_counter, blocks_freed); |
470 | 470 | ||
471 | if (sbi->s_log_groups_per_flex) { | 471 | if (sbi->s_log_groups_per_flex) { |
diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h index 5973f3261b0c..149e02dc3606 100644 --- a/fs/ext4/ext4.h +++ b/fs/ext4/ext4.h | |||
@@ -963,12 +963,6 @@ static inline int ext4_valid_inum(struct super_block *sb, unsigned long ino) | |||
963 | (ino >= EXT4_FIRST_INO(sb) && | 963 | (ino >= EXT4_FIRST_INO(sb) && |
964 | ino <= le32_to_cpu(EXT4_SB(sb)->s_es->s_inodes_count)); | 964 | ino <= le32_to_cpu(EXT4_SB(sb)->s_es->s_inodes_count)); |
965 | } | 965 | } |
966 | |||
967 | static inline spinlock_t * | ||
968 | sb_bgl_lock(struct ext4_sb_info *sbi, unsigned int block_group) | ||
969 | { | ||
970 | return bgl_lock_ptr(sbi->s_blockgroup_lock, block_group); | ||
971 | } | ||
972 | #else | 966 | #else |
973 | /* Assume that user mode programs are passing in an ext4fs superblock, not | 967 | /* Assume that user mode programs are passing in an ext4fs superblock, not |
974 | * a kernel struct super_block. This will allow us to call the feature-test | 968 | * a kernel struct super_block. This will allow us to call the feature-test |
@@ -1568,33 +1562,31 @@ struct ext4_group_info { | |||
1568 | }; | 1562 | }; |
1569 | 1563 | ||
1570 | #define EXT4_GROUP_INFO_NEED_INIT_BIT 0 | 1564 | #define EXT4_GROUP_INFO_NEED_INIT_BIT 0 |
1571 | #define EXT4_GROUP_INFO_LOCKED_BIT 1 | ||
1572 | 1565 | ||
1573 | #define EXT4_MB_GRP_NEED_INIT(grp) \ | 1566 | #define EXT4_MB_GRP_NEED_INIT(grp) \ |
1574 | (test_bit(EXT4_GROUP_INFO_NEED_INIT_BIT, &((grp)->bb_state))) | 1567 | (test_bit(EXT4_GROUP_INFO_NEED_INIT_BIT, &((grp)->bb_state))) |
1575 | 1568 | ||
1576 | static inline void ext4_lock_group(struct super_block *sb, ext4_group_t group) | 1569 | static inline spinlock_t *ext4_group_lock_ptr(struct super_block *sb, |
1570 | ext4_group_t group) | ||
1577 | { | 1571 | { |
1578 | struct ext4_group_info *grinfo = ext4_get_group_info(sb, group); | 1572 | return bgl_lock_ptr(EXT4_SB(sb)->s_blockgroup_lock, group); |
1573 | } | ||
1579 | 1574 | ||
1580 | bit_spin_lock(EXT4_GROUP_INFO_LOCKED_BIT, &(grinfo->bb_state)); | 1575 | static inline void ext4_lock_group(struct super_block *sb, ext4_group_t group) |
1576 | { | ||
1577 | spin_lock(ext4_group_lock_ptr(sb, group)); | ||
1581 | } | 1578 | } |
1582 | 1579 | ||
1583 | static inline void ext4_unlock_group(struct super_block *sb, | 1580 | static inline void ext4_unlock_group(struct super_block *sb, |
1584 | ext4_group_t group) | 1581 | ext4_group_t group) |
1585 | { | 1582 | { |
1586 | struct ext4_group_info *grinfo = ext4_get_group_info(sb, group); | 1583 | spin_unlock(ext4_group_lock_ptr(sb, group)); |
1587 | |||
1588 | bit_spin_unlock(EXT4_GROUP_INFO_LOCKED_BIT, &(grinfo->bb_state)); | ||
1589 | } | 1584 | } |
1590 | 1585 | ||
1591 | static inline int ext4_is_group_locked(struct super_block *sb, | 1586 | static inline int ext4_is_group_locked(struct super_block *sb, |
1592 | ext4_group_t group) | 1587 | ext4_group_t group) |
1593 | { | 1588 | { |
1594 | struct ext4_group_info *grinfo = ext4_get_group_info(sb, group); | 1589 | return spin_is_locked(ext4_group_lock_ptr(sb, group)); |
1595 | |||
1596 | return bit_spin_is_locked(EXT4_GROUP_INFO_LOCKED_BIT, | ||
1597 | &(grinfo->bb_state)); | ||
1598 | } | 1590 | } |
1599 | 1591 | ||
1600 | /* | 1592 | /* |
diff --git a/fs/ext4/ialloc.c b/fs/ext4/ialloc.c index 916d05c881ca..82f7d1d7eae0 100644 --- a/fs/ext4/ialloc.c +++ b/fs/ext4/ialloc.c | |||
@@ -122,16 +122,16 @@ ext4_read_inode_bitmap(struct super_block *sb, ext4_group_t block_group) | |||
122 | unlock_buffer(bh); | 122 | unlock_buffer(bh); |
123 | return bh; | 123 | return bh; |
124 | } | 124 | } |
125 | spin_lock(sb_bgl_lock(EXT4_SB(sb), block_group)); | 125 | ext4_lock_group(sb, block_group); |
126 | if (desc->bg_flags & cpu_to_le16(EXT4_BG_INODE_UNINIT)) { | 126 | if (desc->bg_flags & cpu_to_le16(EXT4_BG_INODE_UNINIT)) { |
127 | ext4_init_inode_bitmap(sb, bh, block_group, desc); | 127 | ext4_init_inode_bitmap(sb, bh, block_group, desc); |
128 | set_bitmap_uptodate(bh); | 128 | set_bitmap_uptodate(bh); |
129 | set_buffer_uptodate(bh); | 129 | set_buffer_uptodate(bh); |
130 | spin_unlock(sb_bgl_lock(EXT4_SB(sb), block_group)); | 130 | ext4_unlock_group(sb, block_group); |
131 | unlock_buffer(bh); | 131 | unlock_buffer(bh); |
132 | return bh; | 132 | return bh; |
133 | } | 133 | } |
134 | spin_unlock(sb_bgl_lock(EXT4_SB(sb), block_group)); | 134 | ext4_unlock_group(sb, block_group); |
135 | if (buffer_uptodate(bh)) { | 135 | if (buffer_uptodate(bh)) { |
136 | /* | 136 | /* |
137 | * if not uninit if bh is uptodate, | 137 | * if not uninit if bh is uptodate, |
@@ -246,9 +246,8 @@ void ext4_free_inode(handle_t *handle, struct inode *inode) | |||
246 | goto error_return; | 246 | goto error_return; |
247 | 247 | ||
248 | /* Ok, now we can actually update the inode bitmaps.. */ | 248 | /* Ok, now we can actually update the inode bitmaps.. */ |
249 | spin_lock(sb_bgl_lock(sbi, block_group)); | 249 | cleared = ext4_clear_bit_atomic(ext4_group_lock_ptr(sb, block_group), |
250 | cleared = ext4_clear_bit(bit, bitmap_bh->b_data); | 250 | bit, bitmap_bh->b_data); |
251 | spin_unlock(sb_bgl_lock(sbi, block_group)); | ||
252 | if (!cleared) | 251 | if (!cleared) |
253 | ext4_error(sb, "ext4_free_inode", | 252 | ext4_error(sb, "ext4_free_inode", |
254 | "bit already cleared for inode %lu", ino); | 253 | "bit already cleared for inode %lu", ino); |
@@ -260,7 +259,7 @@ void ext4_free_inode(handle_t *handle, struct inode *inode) | |||
260 | if (fatal) goto error_return; | 259 | if (fatal) goto error_return; |
261 | 260 | ||
262 | if (gdp) { | 261 | if (gdp) { |
263 | spin_lock(sb_bgl_lock(sbi, block_group)); | 262 | ext4_lock_group(sb, block_group); |
264 | count = ext4_free_inodes_count(sb, gdp) + 1; | 263 | count = ext4_free_inodes_count(sb, gdp) + 1; |
265 | ext4_free_inodes_set(sb, gdp, count); | 264 | ext4_free_inodes_set(sb, gdp, count); |
266 | if (is_directory) { | 265 | if (is_directory) { |
@@ -276,7 +275,7 @@ void ext4_free_inode(handle_t *handle, struct inode *inode) | |||
276 | } | 275 | } |
277 | gdp->bg_checksum = ext4_group_desc_csum(sbi, | 276 | gdp->bg_checksum = ext4_group_desc_csum(sbi, |
278 | block_group, gdp); | 277 | block_group, gdp); |
279 | spin_unlock(sb_bgl_lock(sbi, block_group)); | 278 | ext4_unlock_group(sb, block_group); |
280 | percpu_counter_inc(&sbi->s_freeinodes_counter); | 279 | percpu_counter_inc(&sbi->s_freeinodes_counter); |
281 | if (is_directory) | 280 | if (is_directory) |
282 | percpu_counter_dec(&sbi->s_dirs_counter); | 281 | percpu_counter_dec(&sbi->s_dirs_counter); |
@@ -707,10 +706,10 @@ static int find_group_other(struct super_block *sb, struct inode *parent, | |||
707 | 706 | ||
708 | /* | 707 | /* |
709 | * claim the inode from the inode bitmap. If the group | 708 | * claim the inode from the inode bitmap. If the group |
710 | * is uninit we need to take the groups's sb_bgl_lock | 709 | * is uninit we need to take the groups's ext4_group_lock |
711 | * and clear the uninit flag. The inode bitmap update | 710 | * and clear the uninit flag. The inode bitmap update |
712 | * and group desc uninit flag clear should be done | 711 | * and group desc uninit flag clear should be done |
713 | * after holding sb_bgl_lock so that ext4_read_inode_bitmap | 712 | * after holding ext4_group_lock so that ext4_read_inode_bitmap |
714 | * doesn't race with the ext4_claim_inode | 713 | * doesn't race with the ext4_claim_inode |
715 | */ | 714 | */ |
716 | static int ext4_claim_inode(struct super_block *sb, | 715 | static int ext4_claim_inode(struct super_block *sb, |
@@ -721,7 +720,7 @@ static int ext4_claim_inode(struct super_block *sb, | |||
721 | struct ext4_sb_info *sbi = EXT4_SB(sb); | 720 | struct ext4_sb_info *sbi = EXT4_SB(sb); |
722 | struct ext4_group_desc *gdp = ext4_get_group_desc(sb, group, NULL); | 721 | struct ext4_group_desc *gdp = ext4_get_group_desc(sb, group, NULL); |
723 | 722 | ||
724 | spin_lock(sb_bgl_lock(sbi, group)); | 723 | ext4_lock_group(sb, group); |
725 | if (ext4_set_bit(ino, inode_bitmap_bh->b_data)) { | 724 | if (ext4_set_bit(ino, inode_bitmap_bh->b_data)) { |
726 | /* not a free inode */ | 725 | /* not a free inode */ |
727 | retval = 1; | 726 | retval = 1; |
@@ -730,7 +729,7 @@ static int ext4_claim_inode(struct super_block *sb, | |||
730 | ino++; | 729 | ino++; |
731 | if ((group == 0 && ino < EXT4_FIRST_INO(sb)) || | 730 | if ((group == 0 && ino < EXT4_FIRST_INO(sb)) || |
732 | ino > EXT4_INODES_PER_GROUP(sb)) { | 731 | ino > EXT4_INODES_PER_GROUP(sb)) { |
733 | spin_unlock(sb_bgl_lock(sbi, group)); | 732 | ext4_unlock_group(sb, group); |
734 | ext4_error(sb, __func__, | 733 | ext4_error(sb, __func__, |
735 | "reserved inode or inode > inodes count - " | 734 | "reserved inode or inode > inodes count - " |
736 | "block_group = %u, inode=%lu", group, | 735 | "block_group = %u, inode=%lu", group, |
@@ -779,7 +778,7 @@ static int ext4_claim_inode(struct super_block *sb, | |||
779 | } | 778 | } |
780 | gdp->bg_checksum = ext4_group_desc_csum(sbi, group, gdp); | 779 | gdp->bg_checksum = ext4_group_desc_csum(sbi, group, gdp); |
781 | err_ret: | 780 | err_ret: |
782 | spin_unlock(sb_bgl_lock(sbi, group)); | 781 | ext4_unlock_group(sb, group); |
783 | return retval; | 782 | return retval; |
784 | } | 783 | } |
785 | 784 | ||
@@ -935,7 +934,7 @@ got: | |||
935 | } | 934 | } |
936 | 935 | ||
937 | free = 0; | 936 | free = 0; |
938 | spin_lock(sb_bgl_lock(sbi, group)); | 937 | ext4_lock_group(sb, group); |
939 | /* recheck and clear flag under lock if we still need to */ | 938 | /* recheck and clear flag under lock if we still need to */ |
940 | if (gdp->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT)) { | 939 | if (gdp->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT)) { |
941 | free = ext4_free_blocks_after_init(sb, group, gdp); | 940 | free = ext4_free_blocks_after_init(sb, group, gdp); |
@@ -944,7 +943,7 @@ got: | |||
944 | gdp->bg_checksum = ext4_group_desc_csum(sbi, group, | 943 | gdp->bg_checksum = ext4_group_desc_csum(sbi, group, |
945 | gdp); | 944 | gdp); |
946 | } | 945 | } |
947 | spin_unlock(sb_bgl_lock(sbi, group)); | 946 | ext4_unlock_group(sb, group); |
948 | 947 | ||
949 | /* Don't need to dirty bitmap block if we didn't change it */ | 948 | /* Don't need to dirty bitmap block if we didn't change it */ |
950 | if (free) { | 949 | if (free) { |
diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c index df75855ae6f7..e76459cedcdb 100644 --- a/fs/ext4/mballoc.c +++ b/fs/ext4/mballoc.c | |||
@@ -372,24 +372,12 @@ static inline void mb_set_bit(int bit, void *addr) | |||
372 | ext4_set_bit(bit, addr); | 372 | ext4_set_bit(bit, addr); |
373 | } | 373 | } |
374 | 374 | ||
375 | static inline void mb_set_bit_atomic(spinlock_t *lock, int bit, void *addr) | ||
376 | { | ||
377 | addr = mb_correct_addr_and_bit(&bit, addr); | ||
378 | ext4_set_bit_atomic(lock, bit, addr); | ||
379 | } | ||
380 | |||
381 | static inline void mb_clear_bit(int bit, void *addr) | 375 | static inline void mb_clear_bit(int bit, void *addr) |
382 | { | 376 | { |
383 | addr = mb_correct_addr_and_bit(&bit, addr); | 377 | addr = mb_correct_addr_and_bit(&bit, addr); |
384 | ext4_clear_bit(bit, addr); | 378 | ext4_clear_bit(bit, addr); |
385 | } | 379 | } |
386 | 380 | ||
387 | static inline void mb_clear_bit_atomic(spinlock_t *lock, int bit, void *addr) | ||
388 | { | ||
389 | addr = mb_correct_addr_and_bit(&bit, addr); | ||
390 | ext4_clear_bit_atomic(lock, bit, addr); | ||
391 | } | ||
392 | |||
393 | static inline int mb_find_next_zero_bit(void *addr, int max, int start) | 381 | static inline int mb_find_next_zero_bit(void *addr, int max, int start) |
394 | { | 382 | { |
395 | int fix = 0, ret, tmpmax; | 383 | int fix = 0, ret, tmpmax; |
@@ -803,17 +791,17 @@ static int ext4_mb_init_cache(struct page *page, char *incore) | |||
803 | unlock_buffer(bh[i]); | 791 | unlock_buffer(bh[i]); |
804 | continue; | 792 | continue; |
805 | } | 793 | } |
806 | spin_lock(sb_bgl_lock(EXT4_SB(sb), first_group + i)); | 794 | ext4_lock_group(sb, first_group + i); |
807 | if (desc->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT)) { | 795 | if (desc->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT)) { |
808 | ext4_init_block_bitmap(sb, bh[i], | 796 | ext4_init_block_bitmap(sb, bh[i], |
809 | first_group + i, desc); | 797 | first_group + i, desc); |
810 | set_bitmap_uptodate(bh[i]); | 798 | set_bitmap_uptodate(bh[i]); |
811 | set_buffer_uptodate(bh[i]); | 799 | set_buffer_uptodate(bh[i]); |
812 | spin_unlock(sb_bgl_lock(EXT4_SB(sb), first_group + i)); | 800 | ext4_unlock_group(sb, first_group + i); |
813 | unlock_buffer(bh[i]); | 801 | unlock_buffer(bh[i]); |
814 | continue; | 802 | continue; |
815 | } | 803 | } |
816 | spin_unlock(sb_bgl_lock(EXT4_SB(sb), first_group + i)); | 804 | ext4_unlock_group(sb, first_group + i); |
817 | if (buffer_uptodate(bh[i])) { | 805 | if (buffer_uptodate(bh[i])) { |
818 | /* | 806 | /* |
819 | * if not uninit if bh is uptodate, | 807 | * if not uninit if bh is uptodate, |
@@ -1080,7 +1068,7 @@ static int mb_find_order_for_block(struct ext4_buddy *e4b, int block) | |||
1080 | return 0; | 1068 | return 0; |
1081 | } | 1069 | } |
1082 | 1070 | ||
1083 | static void mb_clear_bits(spinlock_t *lock, void *bm, int cur, int len) | 1071 | static void mb_clear_bits(void *bm, int cur, int len) |
1084 | { | 1072 | { |
1085 | __u32 *addr; | 1073 | __u32 *addr; |
1086 | 1074 | ||
@@ -1093,15 +1081,12 @@ static void mb_clear_bits(spinlock_t *lock, void *bm, int cur, int len) | |||
1093 | cur += 32; | 1081 | cur += 32; |
1094 | continue; | 1082 | continue; |
1095 | } | 1083 | } |
1096 | if (lock) | 1084 | mb_clear_bit(cur, bm); |
1097 | mb_clear_bit_atomic(lock, cur, bm); | ||
1098 | else | ||
1099 | mb_clear_bit(cur, bm); | ||
1100 | cur++; | 1085 | cur++; |
1101 | } | 1086 | } |
1102 | } | 1087 | } |
1103 | 1088 | ||
1104 | static void mb_set_bits(spinlock_t *lock, void *bm, int cur, int len) | 1089 | static void mb_set_bits(void *bm, int cur, int len) |
1105 | { | 1090 | { |
1106 | __u32 *addr; | 1091 | __u32 *addr; |
1107 | 1092 | ||
@@ -1114,10 +1099,7 @@ static void mb_set_bits(spinlock_t *lock, void *bm, int cur, int len) | |||
1114 | cur += 32; | 1099 | cur += 32; |
1115 | continue; | 1100 | continue; |
1116 | } | 1101 | } |
1117 | if (lock) | 1102 | mb_set_bit(cur, bm); |
1118 | mb_set_bit_atomic(lock, cur, bm); | ||
1119 | else | ||
1120 | mb_set_bit(cur, bm); | ||
1121 | cur++; | 1103 | cur++; |
1122 | } | 1104 | } |
1123 | } | 1105 | } |
@@ -1332,8 +1314,7 @@ static int mb_mark_used(struct ext4_buddy *e4b, struct ext4_free_extent *ex) | |||
1332 | e4b->bd_info->bb_counters[ord]++; | 1314 | e4b->bd_info->bb_counters[ord]++; |
1333 | } | 1315 | } |
1334 | 1316 | ||
1335 | mb_set_bits(sb_bgl_lock(EXT4_SB(e4b->bd_sb), ex->fe_group), | 1317 | mb_set_bits(EXT4_MB_BITMAP(e4b), ex->fe_start, len0); |
1336 | EXT4_MB_BITMAP(e4b), ex->fe_start, len0); | ||
1337 | mb_check_buddy(e4b); | 1318 | mb_check_buddy(e4b); |
1338 | 1319 | ||
1339 | return ret; | 1320 | return ret; |
@@ -2756,7 +2737,7 @@ int ext4_mb_init(struct super_block *sb, int needs_recovery) | |||
2756 | return 0; | 2737 | return 0; |
2757 | } | 2738 | } |
2758 | 2739 | ||
2759 | /* need to called with ext4 group lock (ext4_lock_group) */ | 2740 | /* need to called with the ext4 group lock held */ |
2760 | static void ext4_mb_cleanup_pa(struct ext4_group_info *grp) | 2741 | static void ext4_mb_cleanup_pa(struct ext4_group_info *grp) |
2761 | { | 2742 | { |
2762 | struct ext4_prealloc_space *pa; | 2743 | struct ext4_prealloc_space *pa; |
@@ -2993,14 +2974,17 @@ ext4_mb_mark_diskspace_used(struct ext4_allocation_context *ac, | |||
2993 | * Fix the bitmap and repeat the block allocation | 2974 | * Fix the bitmap and repeat the block allocation |
2994 | * We leak some of the blocks here. | 2975 | * We leak some of the blocks here. |
2995 | */ | 2976 | */ |
2996 | mb_set_bits(sb_bgl_lock(sbi, ac->ac_b_ex.fe_group), | 2977 | ext4_lock_group(sb, ac->ac_b_ex.fe_group); |
2997 | bitmap_bh->b_data, ac->ac_b_ex.fe_start, | 2978 | mb_set_bits(bitmap_bh->b_data, ac->ac_b_ex.fe_start, |
2998 | ac->ac_b_ex.fe_len); | 2979 | ac->ac_b_ex.fe_len); |
2980 | ext4_unlock_group(sb, ac->ac_b_ex.fe_group); | ||
2999 | err = ext4_handle_dirty_metadata(handle, NULL, bitmap_bh); | 2981 | err = ext4_handle_dirty_metadata(handle, NULL, bitmap_bh); |
3000 | if (!err) | 2982 | if (!err) |
3001 | err = -EAGAIN; | 2983 | err = -EAGAIN; |
3002 | goto out_err; | 2984 | goto out_err; |
3003 | } | 2985 | } |
2986 | |||
2987 | ext4_lock_group(sb, ac->ac_b_ex.fe_group); | ||
3004 | #ifdef AGGRESSIVE_CHECK | 2988 | #ifdef AGGRESSIVE_CHECK |
3005 | { | 2989 | { |
3006 | int i; | 2990 | int i; |
@@ -3010,9 +2994,7 @@ ext4_mb_mark_diskspace_used(struct ext4_allocation_context *ac, | |||
3010 | } | 2994 | } |
3011 | } | 2995 | } |
3012 | #endif | 2996 | #endif |
3013 | spin_lock(sb_bgl_lock(sbi, ac->ac_b_ex.fe_group)); | 2997 | mb_set_bits(bitmap_bh->b_data, ac->ac_b_ex.fe_start,ac->ac_b_ex.fe_len); |
3014 | mb_set_bits(NULL, bitmap_bh->b_data, | ||
3015 | ac->ac_b_ex.fe_start, ac->ac_b_ex.fe_len); | ||
3016 | if (gdp->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT)) { | 2998 | if (gdp->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT)) { |
3017 | gdp->bg_flags &= cpu_to_le16(~EXT4_BG_BLOCK_UNINIT); | 2999 | gdp->bg_flags &= cpu_to_le16(~EXT4_BG_BLOCK_UNINIT); |
3018 | ext4_free_blks_set(sb, gdp, | 3000 | ext4_free_blks_set(sb, gdp, |
@@ -3022,7 +3004,8 @@ ext4_mb_mark_diskspace_used(struct ext4_allocation_context *ac, | |||
3022 | len = ext4_free_blks_count(sb, gdp) - ac->ac_b_ex.fe_len; | 3004 | len = ext4_free_blks_count(sb, gdp) - ac->ac_b_ex.fe_len; |
3023 | ext4_free_blks_set(sb, gdp, len); | 3005 | ext4_free_blks_set(sb, gdp, len); |
3024 | gdp->bg_checksum = ext4_group_desc_csum(sbi, ac->ac_b_ex.fe_group, gdp); | 3006 | gdp->bg_checksum = ext4_group_desc_csum(sbi, ac->ac_b_ex.fe_group, gdp); |
3025 | spin_unlock(sb_bgl_lock(sbi, ac->ac_b_ex.fe_group)); | 3007 | |
3008 | ext4_unlock_group(sb, ac->ac_b_ex.fe_group); | ||
3026 | percpu_counter_sub(&sbi->s_freeblocks_counter, ac->ac_b_ex.fe_len); | 3009 | percpu_counter_sub(&sbi->s_freeblocks_counter, ac->ac_b_ex.fe_len); |
3027 | /* | 3010 | /* |
3028 | * Now reduce the dirty block count also. Should not go negative | 3011 | * Now reduce the dirty block count also. Should not go negative |
@@ -3455,7 +3438,7 @@ ext4_mb_use_preallocated(struct ext4_allocation_context *ac) | |||
3455 | * the function goes through all block freed in the group | 3438 | * the function goes through all block freed in the group |
3456 | * but not yet committed and marks them used in in-core bitmap. | 3439 | * but not yet committed and marks them used in in-core bitmap. |
3457 | * buddy must be generated from this bitmap | 3440 | * buddy must be generated from this bitmap |
3458 | * Need to be called with ext4 group lock (ext4_lock_group) | 3441 | * Need to be called with the ext4 group lock held |
3459 | */ | 3442 | */ |
3460 | static void ext4_mb_generate_from_freelist(struct super_block *sb, void *bitmap, | 3443 | static void ext4_mb_generate_from_freelist(struct super_block *sb, void *bitmap, |
3461 | ext4_group_t group) | 3444 | ext4_group_t group) |
@@ -3469,9 +3452,7 @@ static void ext4_mb_generate_from_freelist(struct super_block *sb, void *bitmap, | |||
3469 | 3452 | ||
3470 | while (n) { | 3453 | while (n) { |
3471 | entry = rb_entry(n, struct ext4_free_data, node); | 3454 | entry = rb_entry(n, struct ext4_free_data, node); |
3472 | mb_set_bits(sb_bgl_lock(EXT4_SB(sb), group), | 3455 | mb_set_bits(bitmap, entry->start_blk, entry->count); |
3473 | bitmap, entry->start_blk, | ||
3474 | entry->count); | ||
3475 | n = rb_next(n); | 3456 | n = rb_next(n); |
3476 | } | 3457 | } |
3477 | return; | 3458 | return; |
@@ -3480,7 +3461,7 @@ static void ext4_mb_generate_from_freelist(struct super_block *sb, void *bitmap, | |||
3480 | /* | 3461 | /* |
3481 | * the function goes through all preallocation in this group and marks them | 3462 | * the function goes through all preallocation in this group and marks them |
3482 | * used in in-core bitmap. buddy must be generated from this bitmap | 3463 | * used in in-core bitmap. buddy must be generated from this bitmap |
3483 | * Need to be called with ext4 group lock (ext4_lock_group) | 3464 | * Need to be called with ext4 group lock held |
3484 | */ | 3465 | */ |
3485 | static void ext4_mb_generate_from_pa(struct super_block *sb, void *bitmap, | 3466 | static void ext4_mb_generate_from_pa(struct super_block *sb, void *bitmap, |
3486 | ext4_group_t group) | 3467 | ext4_group_t group) |
@@ -3512,8 +3493,7 @@ static void ext4_mb_generate_from_pa(struct super_block *sb, void *bitmap, | |||
3512 | if (unlikely(len == 0)) | 3493 | if (unlikely(len == 0)) |
3513 | continue; | 3494 | continue; |
3514 | BUG_ON(groupnr != group); | 3495 | BUG_ON(groupnr != group); |
3515 | mb_set_bits(sb_bgl_lock(EXT4_SB(sb), group), | 3496 | mb_set_bits(bitmap, start, len); |
3516 | bitmap, start, len); | ||
3517 | preallocated += len; | 3497 | preallocated += len; |
3518 | count++; | 3498 | count++; |
3519 | } | 3499 | } |
@@ -4856,29 +4836,25 @@ do_more: | |||
4856 | new_entry->group = block_group; | 4836 | new_entry->group = block_group; |
4857 | new_entry->count = count; | 4837 | new_entry->count = count; |
4858 | new_entry->t_tid = handle->h_transaction->t_tid; | 4838 | new_entry->t_tid = handle->h_transaction->t_tid; |
4839 | |||
4859 | ext4_lock_group(sb, block_group); | 4840 | ext4_lock_group(sb, block_group); |
4860 | mb_clear_bits(sb_bgl_lock(sbi, block_group), bitmap_bh->b_data, | 4841 | mb_clear_bits(bitmap_bh->b_data, bit, count); |
4861 | bit, count); | ||
4862 | ext4_mb_free_metadata(handle, &e4b, new_entry); | 4842 | ext4_mb_free_metadata(handle, &e4b, new_entry); |
4863 | ext4_unlock_group(sb, block_group); | ||
4864 | } else { | 4843 | } else { |
4865 | ext4_lock_group(sb, block_group); | ||
4866 | /* need to update group_info->bb_free and bitmap | 4844 | /* need to update group_info->bb_free and bitmap |
4867 | * with group lock held. generate_buddy look at | 4845 | * with group lock held. generate_buddy look at |
4868 | * them with group lock_held | 4846 | * them with group lock_held |
4869 | */ | 4847 | */ |
4870 | mb_clear_bits(sb_bgl_lock(sbi, block_group), bitmap_bh->b_data, | 4848 | ext4_lock_group(sb, block_group); |
4871 | bit, count); | 4849 | mb_clear_bits(bitmap_bh->b_data, bit, count); |
4872 | mb_free_blocks(inode, &e4b, bit, count); | 4850 | mb_free_blocks(inode, &e4b, bit, count); |
4873 | ext4_mb_return_to_preallocation(inode, &e4b, block, count); | 4851 | ext4_mb_return_to_preallocation(inode, &e4b, block, count); |
4874 | ext4_unlock_group(sb, block_group); | ||
4875 | } | 4852 | } |
4876 | 4853 | ||
4877 | spin_lock(sb_bgl_lock(sbi, block_group)); | ||
4878 | ret = ext4_free_blks_count(sb, gdp) + count; | 4854 | ret = ext4_free_blks_count(sb, gdp) + count; |
4879 | ext4_free_blks_set(sb, gdp, ret); | 4855 | ext4_free_blks_set(sb, gdp, ret); |
4880 | gdp->bg_checksum = ext4_group_desc_csum(sbi, block_group, gdp); | 4856 | gdp->bg_checksum = ext4_group_desc_csum(sbi, block_group, gdp); |
4881 | spin_unlock(sb_bgl_lock(sbi, block_group)); | 4857 | ext4_unlock_group(sb, block_group); |
4882 | percpu_counter_add(&sbi->s_freeblocks_counter, count); | 4858 | percpu_counter_add(&sbi->s_freeblocks_counter, count); |
4883 | 4859 | ||
4884 | if (sbi->s_log_groups_per_flex) { | 4860 | if (sbi->s_log_groups_per_flex) { |
diff --git a/fs/ext4/super.c b/fs/ext4/super.c index 39223a52bc71..dc34ed3d1327 100644 --- a/fs/ext4/super.c +++ b/fs/ext4/super.c | |||
@@ -1784,18 +1784,18 @@ static int ext4_check_descriptors(struct super_block *sb) | |||
1784 | "(block %llu)!\n", i, inode_table); | 1784 | "(block %llu)!\n", i, inode_table); |
1785 | return 0; | 1785 | return 0; |
1786 | } | 1786 | } |
1787 | spin_lock(sb_bgl_lock(sbi, i)); | 1787 | ext4_lock_group(sb, i); |
1788 | if (!ext4_group_desc_csum_verify(sbi, i, gdp)) { | 1788 | if (!ext4_group_desc_csum_verify(sbi, i, gdp)) { |
1789 | printk(KERN_ERR "EXT4-fs: ext4_check_descriptors: " | 1789 | printk(KERN_ERR "EXT4-fs: ext4_check_descriptors: " |
1790 | "Checksum for group %u failed (%u!=%u)\n", | 1790 | "Checksum for group %u failed (%u!=%u)\n", |
1791 | i, le16_to_cpu(ext4_group_desc_csum(sbi, i, | 1791 | i, le16_to_cpu(ext4_group_desc_csum(sbi, i, |
1792 | gdp)), le16_to_cpu(gdp->bg_checksum)); | 1792 | gdp)), le16_to_cpu(gdp->bg_checksum)); |
1793 | if (!(sb->s_flags & MS_RDONLY)) { | 1793 | if (!(sb->s_flags & MS_RDONLY)) { |
1794 | spin_unlock(sb_bgl_lock(sbi, i)); | 1794 | ext4_unlock_group(sb, i); |
1795 | return 0; | 1795 | return 0; |
1796 | } | 1796 | } |
1797 | } | 1797 | } |
1798 | spin_unlock(sb_bgl_lock(sbi, i)); | 1798 | ext4_unlock_group(sb, i); |
1799 | if (!flexbg_flag) | 1799 | if (!flexbg_flag) |
1800 | first_block += EXT4_BLOCKS_PER_GROUP(sb); | 1800 | first_block += EXT4_BLOCKS_PER_GROUP(sb); |
1801 | } | 1801 | } |