aboutsummaryrefslogtreecommitdiffstats
path: root/fs/ext4/ialloc.c
diff options
context:
space:
mode:
authorAneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>2009-05-02 20:35:09 -0400
committerTheodore Ts'o <tytso@mit.edu>2009-05-02 20:35:09 -0400
commit955ce5f5be67dfe0d1d096b543af33fe8a1ce3dd (patch)
tree68b0bdbef1594a3e43c1ef28ae8e096b40a06ae4 /fs/ext4/ialloc.c
parenteefd7f03b86b8a319890e7fac5a6fcc7f8694b76 (diff)
ext4: Convert ext4_lock_group to use sb_bgl_lock
We have sb_bgl_lock() and ext4_group_info.bb_state bit spinlock to protech group information. The later is only used within mballoc code. Consolidate them to use sb_bgl_lock(). This makes the mballoc.c code much simpler and also avoid confusion with two locks protecting same info. Signed-off-by: Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com> Signed-off-by: "Theodore Ts'o" <tytso@mit.edu>
Diffstat (limited to 'fs/ext4/ialloc.c')
-rw-r--r--fs/ext4/ialloc.c29
1 files changed, 14 insertions, 15 deletions
diff --git a/fs/ext4/ialloc.c b/fs/ext4/ialloc.c
index 916d05c881ca..82f7d1d7eae0 100644
--- a/fs/ext4/ialloc.c
+++ b/fs/ext4/ialloc.c
@@ -122,16 +122,16 @@ ext4_read_inode_bitmap(struct super_block *sb, ext4_group_t block_group)
122 unlock_buffer(bh); 122 unlock_buffer(bh);
123 return bh; 123 return bh;
124 } 124 }
125 spin_lock(sb_bgl_lock(EXT4_SB(sb), block_group)); 125 ext4_lock_group(sb, block_group);
126 if (desc->bg_flags & cpu_to_le16(EXT4_BG_INODE_UNINIT)) { 126 if (desc->bg_flags & cpu_to_le16(EXT4_BG_INODE_UNINIT)) {
127 ext4_init_inode_bitmap(sb, bh, block_group, desc); 127 ext4_init_inode_bitmap(sb, bh, block_group, desc);
128 set_bitmap_uptodate(bh); 128 set_bitmap_uptodate(bh);
129 set_buffer_uptodate(bh); 129 set_buffer_uptodate(bh);
130 spin_unlock(sb_bgl_lock(EXT4_SB(sb), block_group)); 130 ext4_unlock_group(sb, block_group);
131 unlock_buffer(bh); 131 unlock_buffer(bh);
132 return bh; 132 return bh;
133 } 133 }
134 spin_unlock(sb_bgl_lock(EXT4_SB(sb), block_group)); 134 ext4_unlock_group(sb, block_group);
135 if (buffer_uptodate(bh)) { 135 if (buffer_uptodate(bh)) {
136 /* 136 /*
137 * if not uninit if bh is uptodate, 137 * if not uninit if bh is uptodate,
@@ -246,9 +246,8 @@ void ext4_free_inode(handle_t *handle, struct inode *inode)
246 goto error_return; 246 goto error_return;
247 247
248 /* Ok, now we can actually update the inode bitmaps.. */ 248 /* Ok, now we can actually update the inode bitmaps.. */
249 spin_lock(sb_bgl_lock(sbi, block_group)); 249 cleared = ext4_clear_bit_atomic(ext4_group_lock_ptr(sb, block_group),
250 cleared = ext4_clear_bit(bit, bitmap_bh->b_data); 250 bit, bitmap_bh->b_data);
251 spin_unlock(sb_bgl_lock(sbi, block_group));
252 if (!cleared) 251 if (!cleared)
253 ext4_error(sb, "ext4_free_inode", 252 ext4_error(sb, "ext4_free_inode",
254 "bit already cleared for inode %lu", ino); 253 "bit already cleared for inode %lu", ino);
@@ -260,7 +259,7 @@ void ext4_free_inode(handle_t *handle, struct inode *inode)
260 if (fatal) goto error_return; 259 if (fatal) goto error_return;
261 260
262 if (gdp) { 261 if (gdp) {
263 spin_lock(sb_bgl_lock(sbi, block_group)); 262 ext4_lock_group(sb, block_group);
264 count = ext4_free_inodes_count(sb, gdp) + 1; 263 count = ext4_free_inodes_count(sb, gdp) + 1;
265 ext4_free_inodes_set(sb, gdp, count); 264 ext4_free_inodes_set(sb, gdp, count);
266 if (is_directory) { 265 if (is_directory) {
@@ -276,7 +275,7 @@ void ext4_free_inode(handle_t *handle, struct inode *inode)
276 } 275 }
277 gdp->bg_checksum = ext4_group_desc_csum(sbi, 276 gdp->bg_checksum = ext4_group_desc_csum(sbi,
278 block_group, gdp); 277 block_group, gdp);
279 spin_unlock(sb_bgl_lock(sbi, block_group)); 278 ext4_unlock_group(sb, block_group);
280 percpu_counter_inc(&sbi->s_freeinodes_counter); 279 percpu_counter_inc(&sbi->s_freeinodes_counter);
281 if (is_directory) 280 if (is_directory)
282 percpu_counter_dec(&sbi->s_dirs_counter); 281 percpu_counter_dec(&sbi->s_dirs_counter);
@@ -707,10 +706,10 @@ static int find_group_other(struct super_block *sb, struct inode *parent,
707 706
708/* 707/*
709 * claim the inode from the inode bitmap. If the group 708 * claim the inode from the inode bitmap. If the group
710 * is uninit we need to take the groups's sb_bgl_lock 709 * is uninit we need to take the groups's ext4_group_lock
711 * and clear the uninit flag. The inode bitmap update 710 * and clear the uninit flag. The inode bitmap update
712 * and group desc uninit flag clear should be done 711 * and group desc uninit flag clear should be done
713 * after holding sb_bgl_lock so that ext4_read_inode_bitmap 712 * after holding ext4_group_lock so that ext4_read_inode_bitmap
714 * doesn't race with the ext4_claim_inode 713 * doesn't race with the ext4_claim_inode
715 */ 714 */
716static int ext4_claim_inode(struct super_block *sb, 715static int ext4_claim_inode(struct super_block *sb,
@@ -721,7 +720,7 @@ static int ext4_claim_inode(struct super_block *sb,
721 struct ext4_sb_info *sbi = EXT4_SB(sb); 720 struct ext4_sb_info *sbi = EXT4_SB(sb);
722 struct ext4_group_desc *gdp = ext4_get_group_desc(sb, group, NULL); 721 struct ext4_group_desc *gdp = ext4_get_group_desc(sb, group, NULL);
723 722
724 spin_lock(sb_bgl_lock(sbi, group)); 723 ext4_lock_group(sb, group);
725 if (ext4_set_bit(ino, inode_bitmap_bh->b_data)) { 724 if (ext4_set_bit(ino, inode_bitmap_bh->b_data)) {
726 /* not a free inode */ 725 /* not a free inode */
727 retval = 1; 726 retval = 1;
@@ -730,7 +729,7 @@ static int ext4_claim_inode(struct super_block *sb,
730 ino++; 729 ino++;
731 if ((group == 0 && ino < EXT4_FIRST_INO(sb)) || 730 if ((group == 0 && ino < EXT4_FIRST_INO(sb)) ||
732 ino > EXT4_INODES_PER_GROUP(sb)) { 731 ino > EXT4_INODES_PER_GROUP(sb)) {
733 spin_unlock(sb_bgl_lock(sbi, group)); 732 ext4_unlock_group(sb, group);
734 ext4_error(sb, __func__, 733 ext4_error(sb, __func__,
735 "reserved inode or inode > inodes count - " 734 "reserved inode or inode > inodes count - "
736 "block_group = %u, inode=%lu", group, 735 "block_group = %u, inode=%lu", group,
@@ -779,7 +778,7 @@ static int ext4_claim_inode(struct super_block *sb,
779 } 778 }
780 gdp->bg_checksum = ext4_group_desc_csum(sbi, group, gdp); 779 gdp->bg_checksum = ext4_group_desc_csum(sbi, group, gdp);
781err_ret: 780err_ret:
782 spin_unlock(sb_bgl_lock(sbi, group)); 781 ext4_unlock_group(sb, group);
783 return retval; 782 return retval;
784} 783}
785 784
@@ -935,7 +934,7 @@ got:
935 } 934 }
936 935
937 free = 0; 936 free = 0;
938 spin_lock(sb_bgl_lock(sbi, group)); 937 ext4_lock_group(sb, group);
939 /* recheck and clear flag under lock if we still need to */ 938 /* recheck and clear flag under lock if we still need to */
940 if (gdp->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT)) { 939 if (gdp->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT)) {
941 free = ext4_free_blocks_after_init(sb, group, gdp); 940 free = ext4_free_blocks_after_init(sb, group, gdp);
@@ -944,7 +943,7 @@ got:
944 gdp->bg_checksum = ext4_group_desc_csum(sbi, group, 943 gdp->bg_checksum = ext4_group_desc_csum(sbi, group,
945 gdp); 944 gdp);
946 } 945 }
947 spin_unlock(sb_bgl_lock(sbi, group)); 946 ext4_unlock_group(sb, group);
948 947
949 /* Don't need to dirty bitmap block if we didn't change it */ 948 /* Don't need to dirty bitmap block if we didn't change it */
950 if (free) { 949 if (free) {