diff options
Diffstat (limited to 'fs')
-rw-r--r-- | fs/ext3/inode.c | 8 | ||||
-rw-r--r-- | fs/ext3/namei.c | 28 | ||||
-rw-r--r-- | fs/ext3/resize.c | 35 | ||||
-rw-r--r-- | fs/ext3/super.c | 19 | ||||
-rw-r--r-- | fs/ext4/ext4.h | 6 | ||||
-rw-r--r-- | fs/ext4/inode.c | 36 | ||||
-rw-r--r-- | fs/ext4/mballoc.c | 6 | ||||
-rw-r--r-- | fs/ext4/super.c | 5 | ||||
-rw-r--r-- | fs/jbd/journal.c | 2 | ||||
-rw-r--r-- | fs/jbd2/journal.c | 3 | ||||
-rw-r--r-- | fs/ocfs2/alloc.c | 10 | ||||
-rw-r--r-- | fs/ocfs2/alloc.h | 5 | ||||
-rw-r--r-- | fs/ocfs2/namei.c | 5 | ||||
-rw-r--r-- | fs/ocfs2/refcounttree.c | 150 | ||||
-rw-r--r-- | fs/quota/dquot.c | 288 | ||||
-rw-r--r-- | fs/quota/quota_v2.c | 9 | ||||
-rw-r--r-- | fs/stat.c | 10 |
17 files changed, 373 insertions, 252 deletions
diff --git a/fs/ext3/inode.c b/fs/ext3/inode.c index ad14227f509e..455e6e6e5cb9 100644 --- a/fs/ext3/inode.c +++ b/fs/ext3/inode.c | |||
@@ -970,7 +970,7 @@ static int ext3_get_block(struct inode *inode, sector_t iblock, | |||
970 | if (max_blocks > DIO_MAX_BLOCKS) | 970 | if (max_blocks > DIO_MAX_BLOCKS) |
971 | max_blocks = DIO_MAX_BLOCKS; | 971 | max_blocks = DIO_MAX_BLOCKS; |
972 | handle = ext3_journal_start(inode, DIO_CREDITS + | 972 | handle = ext3_journal_start(inode, DIO_CREDITS + |
973 | 2 * EXT3_QUOTA_TRANS_BLOCKS(inode->i_sb)); | 973 | EXT3_MAXQUOTAS_TRANS_BLOCKS(inode->i_sb)); |
974 | if (IS_ERR(handle)) { | 974 | if (IS_ERR(handle)) { |
975 | ret = PTR_ERR(handle); | 975 | ret = PTR_ERR(handle); |
976 | goto out; | 976 | goto out; |
@@ -3146,8 +3146,8 @@ int ext3_setattr(struct dentry *dentry, struct iattr *attr) | |||
3146 | 3146 | ||
3147 | /* (user+group)*(old+new) structure, inode write (sb, | 3147 | /* (user+group)*(old+new) structure, inode write (sb, |
3148 | * inode block, ? - but truncate inode update has it) */ | 3148 | * inode block, ? - but truncate inode update has it) */ |
3149 | handle = ext3_journal_start(inode, 2*(EXT3_QUOTA_INIT_BLOCKS(inode->i_sb)+ | 3149 | handle = ext3_journal_start(inode, EXT3_MAXQUOTAS_INIT_BLOCKS(inode->i_sb)+ |
3150 | EXT3_QUOTA_DEL_BLOCKS(inode->i_sb))+3); | 3150 | EXT3_MAXQUOTAS_DEL_BLOCKS(inode->i_sb)+3); |
3151 | if (IS_ERR(handle)) { | 3151 | if (IS_ERR(handle)) { |
3152 | error = PTR_ERR(handle); | 3152 | error = PTR_ERR(handle); |
3153 | goto err_out; | 3153 | goto err_out; |
@@ -3239,7 +3239,7 @@ static int ext3_writepage_trans_blocks(struct inode *inode) | |||
3239 | #ifdef CONFIG_QUOTA | 3239 | #ifdef CONFIG_QUOTA |
3240 | /* We know that structure was already allocated during vfs_dq_init so | 3240 | /* We know that structure was already allocated during vfs_dq_init so |
3241 | * we will be updating only the data blocks + inodes */ | 3241 | * we will be updating only the data blocks + inodes */ |
3242 | ret += 2*EXT3_QUOTA_TRANS_BLOCKS(inode->i_sb); | 3242 | ret += EXT3_MAXQUOTAS_TRANS_BLOCKS(inode->i_sb); |
3243 | #endif | 3243 | #endif |
3244 | 3244 | ||
3245 | return ret; | 3245 | return ret; |
diff --git a/fs/ext3/namei.c b/fs/ext3/namei.c index aad6400c9b77..7b0e44f7d66f 100644 --- a/fs/ext3/namei.c +++ b/fs/ext3/namei.c | |||
@@ -1699,7 +1699,7 @@ static int ext3_create (struct inode * dir, struct dentry * dentry, int mode, | |||
1699 | retry: | 1699 | retry: |
1700 | handle = ext3_journal_start(dir, EXT3_DATA_TRANS_BLOCKS(dir->i_sb) + | 1700 | handle = ext3_journal_start(dir, EXT3_DATA_TRANS_BLOCKS(dir->i_sb) + |
1701 | EXT3_INDEX_EXTRA_TRANS_BLOCKS + 3 + | 1701 | EXT3_INDEX_EXTRA_TRANS_BLOCKS + 3 + |
1702 | 2*EXT3_QUOTA_INIT_BLOCKS(dir->i_sb)); | 1702 | EXT3_MAXQUOTAS_INIT_BLOCKS(dir->i_sb)); |
1703 | if (IS_ERR(handle)) | 1703 | if (IS_ERR(handle)) |
1704 | return PTR_ERR(handle); | 1704 | return PTR_ERR(handle); |
1705 | 1705 | ||
@@ -1733,7 +1733,7 @@ static int ext3_mknod (struct inode * dir, struct dentry *dentry, | |||
1733 | retry: | 1733 | retry: |
1734 | handle = ext3_journal_start(dir, EXT3_DATA_TRANS_BLOCKS(dir->i_sb) + | 1734 | handle = ext3_journal_start(dir, EXT3_DATA_TRANS_BLOCKS(dir->i_sb) + |
1735 | EXT3_INDEX_EXTRA_TRANS_BLOCKS + 3 + | 1735 | EXT3_INDEX_EXTRA_TRANS_BLOCKS + 3 + |
1736 | 2*EXT3_QUOTA_INIT_BLOCKS(dir->i_sb)); | 1736 | EXT3_MAXQUOTAS_INIT_BLOCKS(dir->i_sb)); |
1737 | if (IS_ERR(handle)) | 1737 | if (IS_ERR(handle)) |
1738 | return PTR_ERR(handle); | 1738 | return PTR_ERR(handle); |
1739 | 1739 | ||
@@ -1769,7 +1769,7 @@ static int ext3_mkdir(struct inode * dir, struct dentry * dentry, int mode) | |||
1769 | retry: | 1769 | retry: |
1770 | handle = ext3_journal_start(dir, EXT3_DATA_TRANS_BLOCKS(dir->i_sb) + | 1770 | handle = ext3_journal_start(dir, EXT3_DATA_TRANS_BLOCKS(dir->i_sb) + |
1771 | EXT3_INDEX_EXTRA_TRANS_BLOCKS + 3 + | 1771 | EXT3_INDEX_EXTRA_TRANS_BLOCKS + 3 + |
1772 | 2*EXT3_QUOTA_INIT_BLOCKS(dir->i_sb)); | 1772 | EXT3_MAXQUOTAS_INIT_BLOCKS(dir->i_sb)); |
1773 | if (IS_ERR(handle)) | 1773 | if (IS_ERR(handle)) |
1774 | return PTR_ERR(handle); | 1774 | return PTR_ERR(handle); |
1775 | 1775 | ||
@@ -1920,7 +1920,7 @@ int ext3_orphan_add(handle_t *handle, struct inode *inode) | |||
1920 | struct ext3_iloc iloc; | 1920 | struct ext3_iloc iloc; |
1921 | int err = 0, rc; | 1921 | int err = 0, rc; |
1922 | 1922 | ||
1923 | lock_super(sb); | 1923 | mutex_lock(&EXT3_SB(sb)->s_orphan_lock); |
1924 | if (!list_empty(&EXT3_I(inode)->i_orphan)) | 1924 | if (!list_empty(&EXT3_I(inode)->i_orphan)) |
1925 | goto out_unlock; | 1925 | goto out_unlock; |
1926 | 1926 | ||
@@ -1929,9 +1929,13 @@ int ext3_orphan_add(handle_t *handle, struct inode *inode) | |||
1929 | 1929 | ||
1930 | /* @@@ FIXME: Observation from aviro: | 1930 | /* @@@ FIXME: Observation from aviro: |
1931 | * I think I can trigger J_ASSERT in ext3_orphan_add(). We block | 1931 | * I think I can trigger J_ASSERT in ext3_orphan_add(). We block |
1932 | * here (on lock_super()), so race with ext3_link() which might bump | 1932 | * here (on s_orphan_lock), so race with ext3_link() which might bump |
1933 | * ->i_nlink. For, say it, character device. Not a regular file, | 1933 | * ->i_nlink. For, say it, character device. Not a regular file, |
1934 | * not a directory, not a symlink and ->i_nlink > 0. | 1934 | * not a directory, not a symlink and ->i_nlink > 0. |
1935 | * | ||
1936 | * tytso, 4/25/2009: I'm not sure how that could happen; | ||
1937 | * shouldn't the fs core protect us from these sort of | ||
1938 | * unlink()/link() races? | ||
1935 | */ | 1939 | */ |
1936 | J_ASSERT ((S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) || | 1940 | J_ASSERT ((S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) || |
1937 | S_ISLNK(inode->i_mode)) || inode->i_nlink == 0); | 1941 | S_ISLNK(inode->i_mode)) || inode->i_nlink == 0); |
@@ -1968,7 +1972,7 @@ int ext3_orphan_add(handle_t *handle, struct inode *inode) | |||
1968 | jbd_debug(4, "orphan inode %lu will point to %d\n", | 1972 | jbd_debug(4, "orphan inode %lu will point to %d\n", |
1969 | inode->i_ino, NEXT_ORPHAN(inode)); | 1973 | inode->i_ino, NEXT_ORPHAN(inode)); |
1970 | out_unlock: | 1974 | out_unlock: |
1971 | unlock_super(sb); | 1975 | mutex_unlock(&EXT3_SB(sb)->s_orphan_lock); |
1972 | ext3_std_error(inode->i_sb, err); | 1976 | ext3_std_error(inode->i_sb, err); |
1973 | return err; | 1977 | return err; |
1974 | } | 1978 | } |
@@ -1986,11 +1990,9 @@ int ext3_orphan_del(handle_t *handle, struct inode *inode) | |||
1986 | struct ext3_iloc iloc; | 1990 | struct ext3_iloc iloc; |
1987 | int err = 0; | 1991 | int err = 0; |
1988 | 1992 | ||
1989 | lock_super(inode->i_sb); | 1993 | mutex_lock(&EXT3_SB(inode->i_sb)->s_orphan_lock); |
1990 | if (list_empty(&ei->i_orphan)) { | 1994 | if (list_empty(&ei->i_orphan)) |
1991 | unlock_super(inode->i_sb); | 1995 | goto out; |
1992 | return 0; | ||
1993 | } | ||
1994 | 1996 | ||
1995 | ino_next = NEXT_ORPHAN(inode); | 1997 | ino_next = NEXT_ORPHAN(inode); |
1996 | prev = ei->i_orphan.prev; | 1998 | prev = ei->i_orphan.prev; |
@@ -2040,7 +2042,7 @@ int ext3_orphan_del(handle_t *handle, struct inode *inode) | |||
2040 | out_err: | 2042 | out_err: |
2041 | ext3_std_error(inode->i_sb, err); | 2043 | ext3_std_error(inode->i_sb, err); |
2042 | out: | 2044 | out: |
2043 | unlock_super(inode->i_sb); | 2045 | mutex_unlock(&EXT3_SB(inode->i_sb)->s_orphan_lock); |
2044 | return err; | 2046 | return err; |
2045 | 2047 | ||
2046 | out_brelse: | 2048 | out_brelse: |
@@ -2175,7 +2177,7 @@ static int ext3_symlink (struct inode * dir, | |||
2175 | retry: | 2177 | retry: |
2176 | handle = ext3_journal_start(dir, EXT3_DATA_TRANS_BLOCKS(dir->i_sb) + | 2178 | handle = ext3_journal_start(dir, EXT3_DATA_TRANS_BLOCKS(dir->i_sb) + |
2177 | EXT3_INDEX_EXTRA_TRANS_BLOCKS + 5 + | 2179 | EXT3_INDEX_EXTRA_TRANS_BLOCKS + 5 + |
2178 | 2*EXT3_QUOTA_INIT_BLOCKS(dir->i_sb)); | 2180 | EXT3_MAXQUOTAS_INIT_BLOCKS(dir->i_sb)); |
2179 | if (IS_ERR(handle)) | 2181 | if (IS_ERR(handle)) |
2180 | return PTR_ERR(handle); | 2182 | return PTR_ERR(handle); |
2181 | 2183 | ||
diff --git a/fs/ext3/resize.c b/fs/ext3/resize.c index 5f83b6179178..54351ac7cef9 100644 --- a/fs/ext3/resize.c +++ b/fs/ext3/resize.c | |||
@@ -209,7 +209,7 @@ static int setup_new_group_blocks(struct super_block *sb, | |||
209 | if (IS_ERR(handle)) | 209 | if (IS_ERR(handle)) |
210 | return PTR_ERR(handle); | 210 | return PTR_ERR(handle); |
211 | 211 | ||
212 | lock_super(sb); | 212 | mutex_lock(&sbi->s_resize_lock); |
213 | if (input->group != sbi->s_groups_count) { | 213 | if (input->group != sbi->s_groups_count) { |
214 | err = -EBUSY; | 214 | err = -EBUSY; |
215 | goto exit_journal; | 215 | goto exit_journal; |
@@ -324,7 +324,7 @@ exit_bh: | |||
324 | brelse(bh); | 324 | brelse(bh); |
325 | 325 | ||
326 | exit_journal: | 326 | exit_journal: |
327 | unlock_super(sb); | 327 | mutex_unlock(&sbi->s_resize_lock); |
328 | if ((err2 = ext3_journal_stop(handle)) && !err) | 328 | if ((err2 = ext3_journal_stop(handle)) && !err) |
329 | err = err2; | 329 | err = err2; |
330 | 330 | ||
@@ -662,11 +662,12 @@ exit_free: | |||
662 | * important part is that the new block and inode counts are in the backup | 662 | * important part is that the new block and inode counts are in the backup |
663 | * superblocks, and the location of the new group metadata in the GDT backups. | 663 | * superblocks, and the location of the new group metadata in the GDT backups. |
664 | * | 664 | * |
665 | * We do not need lock_super() for this, because these blocks are not | 665 | * We do not need take the s_resize_lock for this, because these |
666 | * otherwise touched by the filesystem code when it is mounted. We don't | 666 | * blocks are not otherwise touched by the filesystem code when it is |
667 | * need to worry about last changing from sbi->s_groups_count, because the | 667 | * mounted. We don't need to worry about last changing from |
668 | * worst that can happen is that we do not copy the full number of backups | 668 | * sbi->s_groups_count, because the worst that can happen is that we |
669 | * at this time. The resize which changed s_groups_count will backup again. | 669 | * do not copy the full number of backups at this time. The resize |
670 | * which changed s_groups_count will backup again. | ||
670 | */ | 671 | */ |
671 | static void update_backups(struct super_block *sb, | 672 | static void update_backups(struct super_block *sb, |
672 | int blk_off, char *data, int size) | 673 | int blk_off, char *data, int size) |
@@ -825,7 +826,7 @@ int ext3_group_add(struct super_block *sb, struct ext3_new_group_data *input) | |||
825 | goto exit_put; | 826 | goto exit_put; |
826 | } | 827 | } |
827 | 828 | ||
828 | lock_super(sb); | 829 | mutex_lock(&sbi->s_resize_lock); |
829 | if (input->group != sbi->s_groups_count) { | 830 | if (input->group != sbi->s_groups_count) { |
830 | ext3_warning(sb, __func__, | 831 | ext3_warning(sb, __func__, |
831 | "multiple resizers run on filesystem!"); | 832 | "multiple resizers run on filesystem!"); |
@@ -856,7 +857,7 @@ int ext3_group_add(struct super_block *sb, struct ext3_new_group_data *input) | |||
856 | /* | 857 | /* |
857 | * OK, now we've set up the new group. Time to make it active. | 858 | * OK, now we've set up the new group. Time to make it active. |
858 | * | 859 | * |
859 | * Current kernels don't lock all allocations via lock_super(), | 860 | * We do not lock all allocations via s_resize_lock |
860 | * so we have to be safe wrt. concurrent accesses the group | 861 | * so we have to be safe wrt. concurrent accesses the group |
861 | * data. So we need to be careful to set all of the relevant | 862 | * data. So we need to be careful to set all of the relevant |
862 | * group descriptor data etc. *before* we enable the group. | 863 | * group descriptor data etc. *before* we enable the group. |
@@ -900,12 +901,12 @@ int ext3_group_add(struct super_block *sb, struct ext3_new_group_data *input) | |||
900 | * | 901 | * |
901 | * The precise rules we use are: | 902 | * The precise rules we use are: |
902 | * | 903 | * |
903 | * * Writers of s_groups_count *must* hold lock_super | 904 | * * Writers of s_groups_count *must* hold s_resize_lock |
904 | * AND | 905 | * AND |
905 | * * Writers must perform a smp_wmb() after updating all dependent | 906 | * * Writers must perform a smp_wmb() after updating all dependent |
906 | * data and before modifying the groups count | 907 | * data and before modifying the groups count |
907 | * | 908 | * |
908 | * * Readers must hold lock_super() over the access | 909 | * * Readers must hold s_resize_lock over the access |
909 | * OR | 910 | * OR |
910 | * * Readers must perform an smp_rmb() after reading the groups count | 911 | * * Readers must perform an smp_rmb() after reading the groups count |
911 | * and before reading any dependent data. | 912 | * and before reading any dependent data. |
@@ -936,7 +937,7 @@ int ext3_group_add(struct super_block *sb, struct ext3_new_group_data *input) | |||
936 | ext3_journal_dirty_metadata(handle, sbi->s_sbh); | 937 | ext3_journal_dirty_metadata(handle, sbi->s_sbh); |
937 | 938 | ||
938 | exit_journal: | 939 | exit_journal: |
939 | unlock_super(sb); | 940 | mutex_unlock(&sbi->s_resize_lock); |
940 | if ((err2 = ext3_journal_stop(handle)) && !err) | 941 | if ((err2 = ext3_journal_stop(handle)) && !err) |
941 | err = err2; | 942 | err = err2; |
942 | if (!err) { | 943 | if (!err) { |
@@ -973,7 +974,7 @@ int ext3_group_extend(struct super_block *sb, struct ext3_super_block *es, | |||
973 | 974 | ||
974 | /* We don't need to worry about locking wrt other resizers just | 975 | /* We don't need to worry about locking wrt other resizers just |
975 | * yet: we're going to revalidate es->s_blocks_count after | 976 | * yet: we're going to revalidate es->s_blocks_count after |
976 | * taking lock_super() below. */ | 977 | * taking the s_resize_lock below. */ |
977 | o_blocks_count = le32_to_cpu(es->s_blocks_count); | 978 | o_blocks_count = le32_to_cpu(es->s_blocks_count); |
978 | o_groups_count = EXT3_SB(sb)->s_groups_count; | 979 | o_groups_count = EXT3_SB(sb)->s_groups_count; |
979 | 980 | ||
@@ -1045,11 +1046,11 @@ int ext3_group_extend(struct super_block *sb, struct ext3_super_block *es, | |||
1045 | goto exit_put; | 1046 | goto exit_put; |
1046 | } | 1047 | } |
1047 | 1048 | ||
1048 | lock_super(sb); | 1049 | mutex_lock(&EXT3_SB(sb)->s_resize_lock); |
1049 | if (o_blocks_count != le32_to_cpu(es->s_blocks_count)) { | 1050 | if (o_blocks_count != le32_to_cpu(es->s_blocks_count)) { |
1050 | ext3_warning(sb, __func__, | 1051 | ext3_warning(sb, __func__, |
1051 | "multiple resizers run on filesystem!"); | 1052 | "multiple resizers run on filesystem!"); |
1052 | unlock_super(sb); | 1053 | mutex_unlock(&EXT3_SB(sb)->s_resize_lock); |
1053 | ext3_journal_stop(handle); | 1054 | ext3_journal_stop(handle); |
1054 | err = -EBUSY; | 1055 | err = -EBUSY; |
1055 | goto exit_put; | 1056 | goto exit_put; |
@@ -1059,13 +1060,13 @@ int ext3_group_extend(struct super_block *sb, struct ext3_super_block *es, | |||
1059 | EXT3_SB(sb)->s_sbh))) { | 1060 | EXT3_SB(sb)->s_sbh))) { |
1060 | ext3_warning(sb, __func__, | 1061 | ext3_warning(sb, __func__, |
1061 | "error %d on journal write access", err); | 1062 | "error %d on journal write access", err); |
1062 | unlock_super(sb); | 1063 | mutex_unlock(&EXT3_SB(sb)->s_resize_lock); |
1063 | ext3_journal_stop(handle); | 1064 | ext3_journal_stop(handle); |
1064 | goto exit_put; | 1065 | goto exit_put; |
1065 | } | 1066 | } |
1066 | es->s_blocks_count = cpu_to_le32(o_blocks_count + add); | 1067 | es->s_blocks_count = cpu_to_le32(o_blocks_count + add); |
1067 | ext3_journal_dirty_metadata(handle, EXT3_SB(sb)->s_sbh); | 1068 | ext3_journal_dirty_metadata(handle, EXT3_SB(sb)->s_sbh); |
1068 | unlock_super(sb); | 1069 | mutex_unlock(&EXT3_SB(sb)->s_resize_lock); |
1069 | ext3_debug("freeing blocks %lu through "E3FSBLK"\n", o_blocks_count, | 1070 | ext3_debug("freeing blocks %lu through "E3FSBLK"\n", o_blocks_count, |
1070 | o_blocks_count + add); | 1071 | o_blocks_count + add); |
1071 | ext3_free_blocks_sb(handle, sb, o_blocks_count, add, &freed_blocks); | 1072 | ext3_free_blocks_sb(handle, sb, o_blocks_count, add, &freed_blocks); |
diff --git a/fs/ext3/super.c b/fs/ext3/super.c index 7ad1e8c30bd0..afa2b569da10 100644 --- a/fs/ext3/super.c +++ b/fs/ext3/super.c | |||
@@ -1928,6 +1928,8 @@ static int ext3_fill_super (struct super_block *sb, void *data, int silent) | |||
1928 | sb->dq_op = &ext3_quota_operations; | 1928 | sb->dq_op = &ext3_quota_operations; |
1929 | #endif | 1929 | #endif |
1930 | INIT_LIST_HEAD(&sbi->s_orphan); /* unlinked but open files */ | 1930 | INIT_LIST_HEAD(&sbi->s_orphan); /* unlinked but open files */ |
1931 | mutex_init(&sbi->s_orphan_lock); | ||
1932 | mutex_init(&sbi->s_resize_lock); | ||
1931 | 1933 | ||
1932 | sb->s_root = NULL; | 1934 | sb->s_root = NULL; |
1933 | 1935 | ||
@@ -2014,14 +2016,7 @@ static int ext3_fill_super (struct super_block *sb, void *data, int silent) | |||
2014 | } | 2016 | } |
2015 | 2017 | ||
2016 | ext3_setup_super (sb, es, sb->s_flags & MS_RDONLY); | 2018 | ext3_setup_super (sb, es, sb->s_flags & MS_RDONLY); |
2017 | /* | 2019 | |
2018 | * akpm: core read_super() calls in here with the superblock locked. | ||
2019 | * That deadlocks, because orphan cleanup needs to lock the superblock | ||
2020 | * in numerous places. Here we just pop the lock - it's relatively | ||
2021 | * harmless, because we are now ready to accept write_super() requests, | ||
2022 | * and aviro says that's the only reason for hanging onto the | ||
2023 | * superblock lock. | ||
2024 | */ | ||
2025 | EXT3_SB(sb)->s_mount_state |= EXT3_ORPHAN_FS; | 2020 | EXT3_SB(sb)->s_mount_state |= EXT3_ORPHAN_FS; |
2026 | ext3_orphan_cleanup(sb, es); | 2021 | ext3_orphan_cleanup(sb, es); |
2027 | EXT3_SB(sb)->s_mount_state &= ~EXT3_ORPHAN_FS; | 2022 | EXT3_SB(sb)->s_mount_state &= ~EXT3_ORPHAN_FS; |
@@ -2403,13 +2398,11 @@ static void ext3_mark_recovery_complete(struct super_block * sb, | |||
2403 | if (journal_flush(journal) < 0) | 2398 | if (journal_flush(journal) < 0) |
2404 | goto out; | 2399 | goto out; |
2405 | 2400 | ||
2406 | lock_super(sb); | ||
2407 | if (EXT3_HAS_INCOMPAT_FEATURE(sb, EXT3_FEATURE_INCOMPAT_RECOVER) && | 2401 | if (EXT3_HAS_INCOMPAT_FEATURE(sb, EXT3_FEATURE_INCOMPAT_RECOVER) && |
2408 | sb->s_flags & MS_RDONLY) { | 2402 | sb->s_flags & MS_RDONLY) { |
2409 | EXT3_CLEAR_INCOMPAT_FEATURE(sb, EXT3_FEATURE_INCOMPAT_RECOVER); | 2403 | EXT3_CLEAR_INCOMPAT_FEATURE(sb, EXT3_FEATURE_INCOMPAT_RECOVER); |
2410 | ext3_commit_super(sb, es, 1); | 2404 | ext3_commit_super(sb, es, 1); |
2411 | } | 2405 | } |
2412 | unlock_super(sb); | ||
2413 | 2406 | ||
2414 | out: | 2407 | out: |
2415 | journal_unlock_updates(journal); | 2408 | journal_unlock_updates(journal); |
@@ -2601,13 +2594,7 @@ static int ext3_remount (struct super_block * sb, int * flags, char * data) | |||
2601 | (sbi->s_mount_state & EXT3_VALID_FS)) | 2594 | (sbi->s_mount_state & EXT3_VALID_FS)) |
2602 | es->s_state = cpu_to_le16(sbi->s_mount_state); | 2595 | es->s_state = cpu_to_le16(sbi->s_mount_state); |
2603 | 2596 | ||
2604 | /* | ||
2605 | * We have to unlock super so that we can wait for | ||
2606 | * transactions. | ||
2607 | */ | ||
2608 | unlock_super(sb); | ||
2609 | ext3_mark_recovery_complete(sb, es); | 2597 | ext3_mark_recovery_complete(sb, es); |
2610 | lock_super(sb); | ||
2611 | } else { | 2598 | } else { |
2612 | __le32 ret; | 2599 | __le32 ret; |
2613 | if ((ret = EXT3_HAS_RO_COMPAT_FEATURE(sb, | 2600 | if ((ret = EXT3_HAS_RO_COMPAT_FEATURE(sb, |
diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h index ab31e65d46d0..56f9271ee8cc 100644 --- a/fs/ext4/ext4.h +++ b/fs/ext4/ext4.h | |||
@@ -704,6 +704,10 @@ struct ext4_inode_info { | |||
704 | __u16 i_extra_isize; | 704 | __u16 i_extra_isize; |
705 | 705 | ||
706 | spinlock_t i_block_reservation_lock; | 706 | spinlock_t i_block_reservation_lock; |
707 | #ifdef CONFIG_QUOTA | ||
708 | /* quota space reservation, managed internally by quota code */ | ||
709 | qsize_t i_reserved_quota; | ||
710 | #endif | ||
707 | 711 | ||
708 | /* completed async DIOs that might need unwritten extents handling */ | 712 | /* completed async DIOs that might need unwritten extents handling */ |
709 | struct list_head i_aio_dio_complete_list; | 713 | struct list_head i_aio_dio_complete_list; |
@@ -1435,7 +1439,7 @@ extern int ext4_chunk_trans_blocks(struct inode *, int nrblocks); | |||
1435 | extern int ext4_block_truncate_page(handle_t *handle, | 1439 | extern int ext4_block_truncate_page(handle_t *handle, |
1436 | struct address_space *mapping, loff_t from); | 1440 | struct address_space *mapping, loff_t from); |
1437 | extern int ext4_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf); | 1441 | extern int ext4_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf); |
1438 | extern qsize_t ext4_get_reserved_space(struct inode *inode); | 1442 | extern qsize_t *ext4_get_reserved_space(struct inode *inode); |
1439 | extern int flush_aio_dio_completed_IO(struct inode *inode); | 1443 | extern int flush_aio_dio_completed_IO(struct inode *inode); |
1440 | /* ioctl.c */ | 1444 | /* ioctl.c */ |
1441 | extern long ext4_ioctl(struct file *, unsigned int, unsigned long); | 1445 | extern long ext4_ioctl(struct file *, unsigned int, unsigned long); |
diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c index 5352db1a3086..ab807963a614 100644 --- a/fs/ext4/inode.c +++ b/fs/ext4/inode.c | |||
@@ -1003,17 +1003,12 @@ out: | |||
1003 | return err; | 1003 | return err; |
1004 | } | 1004 | } |
1005 | 1005 | ||
1006 | qsize_t ext4_get_reserved_space(struct inode *inode) | 1006 | #ifdef CONFIG_QUOTA |
1007 | qsize_t *ext4_get_reserved_space(struct inode *inode) | ||
1007 | { | 1008 | { |
1008 | unsigned long long total; | 1009 | return &EXT4_I(inode)->i_reserved_quota; |
1009 | |||
1010 | spin_lock(&EXT4_I(inode)->i_block_reservation_lock); | ||
1011 | total = EXT4_I(inode)->i_reserved_data_blocks + | ||
1012 | EXT4_I(inode)->i_reserved_meta_blocks; | ||
1013 | spin_unlock(&EXT4_I(inode)->i_block_reservation_lock); | ||
1014 | |||
1015 | return (total << inode->i_blkbits); | ||
1016 | } | 1010 | } |
1011 | #endif | ||
1017 | /* | 1012 | /* |
1018 | * Calculate the number of metadata blocks need to reserve | 1013 | * Calculate the number of metadata blocks need to reserve |
1019 | * to allocate @blocks for non extent file based file | 1014 | * to allocate @blocks for non extent file based file |
@@ -1051,7 +1046,7 @@ static int ext4_calc_metadata_amount(struct inode *inode, int blocks) | |||
1051 | static void ext4_da_update_reserve_space(struct inode *inode, int used) | 1046 | static void ext4_da_update_reserve_space(struct inode *inode, int used) |
1052 | { | 1047 | { |
1053 | struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); | 1048 | struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); |
1054 | int total, mdb, mdb_free; | 1049 | int total, mdb, mdb_free, mdb_claim = 0; |
1055 | 1050 | ||
1056 | spin_lock(&EXT4_I(inode)->i_block_reservation_lock); | 1051 | spin_lock(&EXT4_I(inode)->i_block_reservation_lock); |
1057 | /* recalculate the number of metablocks still need to be reserved */ | 1052 | /* recalculate the number of metablocks still need to be reserved */ |
@@ -1064,7 +1059,9 @@ static void ext4_da_update_reserve_space(struct inode *inode, int used) | |||
1064 | 1059 | ||
1065 | if (mdb_free) { | 1060 | if (mdb_free) { |
1066 | /* Account for allocated meta_blocks */ | 1061 | /* Account for allocated meta_blocks */ |
1067 | mdb_free -= EXT4_I(inode)->i_allocated_meta_blocks; | 1062 | mdb_claim = EXT4_I(inode)->i_allocated_meta_blocks; |
1063 | BUG_ON(mdb_free < mdb_claim); | ||
1064 | mdb_free -= mdb_claim; | ||
1068 | 1065 | ||
1069 | /* update fs dirty blocks counter */ | 1066 | /* update fs dirty blocks counter */ |
1070 | percpu_counter_sub(&sbi->s_dirtyblocks_counter, mdb_free); | 1067 | percpu_counter_sub(&sbi->s_dirtyblocks_counter, mdb_free); |
@@ -1075,8 +1072,11 @@ static void ext4_da_update_reserve_space(struct inode *inode, int used) | |||
1075 | /* update per-inode reservations */ | 1072 | /* update per-inode reservations */ |
1076 | BUG_ON(used > EXT4_I(inode)->i_reserved_data_blocks); | 1073 | BUG_ON(used > EXT4_I(inode)->i_reserved_data_blocks); |
1077 | EXT4_I(inode)->i_reserved_data_blocks -= used; | 1074 | EXT4_I(inode)->i_reserved_data_blocks -= used; |
1075 | percpu_counter_sub(&sbi->s_dirtyblocks_counter, used + mdb_claim); | ||
1078 | spin_unlock(&EXT4_I(inode)->i_block_reservation_lock); | 1076 | spin_unlock(&EXT4_I(inode)->i_block_reservation_lock); |
1079 | 1077 | ||
1078 | vfs_dq_claim_block(inode, used + mdb_claim); | ||
1079 | |||
1080 | /* | 1080 | /* |
1081 | * free those over-booking quota for metadata blocks | 1081 | * free those over-booking quota for metadata blocks |
1082 | */ | 1082 | */ |
@@ -1816,19 +1816,17 @@ repeat: | |||
1816 | 1816 | ||
1817 | md_needed = mdblocks - EXT4_I(inode)->i_reserved_meta_blocks; | 1817 | md_needed = mdblocks - EXT4_I(inode)->i_reserved_meta_blocks; |
1818 | total = md_needed + nrblocks; | 1818 | total = md_needed + nrblocks; |
1819 | spin_unlock(&EXT4_I(inode)->i_block_reservation_lock); | ||
1819 | 1820 | ||
1820 | /* | 1821 | /* |
1821 | * Make quota reservation here to prevent quota overflow | 1822 | * Make quota reservation here to prevent quota overflow |
1822 | * later. Real quota accounting is done at pages writeout | 1823 | * later. Real quota accounting is done at pages writeout |
1823 | * time. | 1824 | * time. |
1824 | */ | 1825 | */ |
1825 | if (vfs_dq_reserve_block(inode, total)) { | 1826 | if (vfs_dq_reserve_block(inode, total)) |
1826 | spin_unlock(&EXT4_I(inode)->i_block_reservation_lock); | ||
1827 | return -EDQUOT; | 1827 | return -EDQUOT; |
1828 | } | ||
1829 | 1828 | ||
1830 | if (ext4_claim_free_blocks(sbi, total)) { | 1829 | if (ext4_claim_free_blocks(sbi, total)) { |
1831 | spin_unlock(&EXT4_I(inode)->i_block_reservation_lock); | ||
1832 | vfs_dq_release_reservation_block(inode, total); | 1830 | vfs_dq_release_reservation_block(inode, total); |
1833 | if (ext4_should_retry_alloc(inode->i_sb, &retries)) { | 1831 | if (ext4_should_retry_alloc(inode->i_sb, &retries)) { |
1834 | yield(); | 1832 | yield(); |
@@ -1836,10 +1834,11 @@ repeat: | |||
1836 | } | 1834 | } |
1837 | return -ENOSPC; | 1835 | return -ENOSPC; |
1838 | } | 1836 | } |
1837 | spin_lock(&EXT4_I(inode)->i_block_reservation_lock); | ||
1839 | EXT4_I(inode)->i_reserved_data_blocks += nrblocks; | 1838 | EXT4_I(inode)->i_reserved_data_blocks += nrblocks; |
1840 | EXT4_I(inode)->i_reserved_meta_blocks = mdblocks; | 1839 | EXT4_I(inode)->i_reserved_meta_blocks += md_needed; |
1841 | |||
1842 | spin_unlock(&EXT4_I(inode)->i_block_reservation_lock); | 1840 | spin_unlock(&EXT4_I(inode)->i_block_reservation_lock); |
1841 | |||
1843 | return 0; /* success */ | 1842 | return 0; /* success */ |
1844 | } | 1843 | } |
1845 | 1844 | ||
@@ -4794,6 +4793,9 @@ struct inode *ext4_iget(struct super_block *sb, unsigned long ino) | |||
4794 | ((__u64)le16_to_cpu(raw_inode->i_file_acl_high)) << 32; | 4793 | ((__u64)le16_to_cpu(raw_inode->i_file_acl_high)) << 32; |
4795 | inode->i_size = ext4_isize(raw_inode); | 4794 | inode->i_size = ext4_isize(raw_inode); |
4796 | ei->i_disksize = inode->i_size; | 4795 | ei->i_disksize = inode->i_size; |
4796 | #ifdef CONFIG_QUOTA | ||
4797 | ei->i_reserved_quota = 0; | ||
4798 | #endif | ||
4797 | inode->i_generation = le32_to_cpu(raw_inode->i_generation); | 4799 | inode->i_generation = le32_to_cpu(raw_inode->i_generation); |
4798 | ei->i_block_group = iloc.block_group; | 4800 | ei->i_block_group = iloc.block_group; |
4799 | ei->i_last_alloc_group = ~0; | 4801 | ei->i_last_alloc_group = ~0; |
diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c index b1fd3daadc9c..d34afad3e137 100644 --- a/fs/ext4/mballoc.c +++ b/fs/ext4/mballoc.c | |||
@@ -2755,12 +2755,6 @@ ext4_mb_mark_diskspace_used(struct ext4_allocation_context *ac, | |||
2755 | if (!(ac->ac_flags & EXT4_MB_DELALLOC_RESERVED)) | 2755 | if (!(ac->ac_flags & EXT4_MB_DELALLOC_RESERVED)) |
2756 | /* release all the reserved blocks if non delalloc */ | 2756 | /* release all the reserved blocks if non delalloc */ |
2757 | percpu_counter_sub(&sbi->s_dirtyblocks_counter, reserv_blks); | 2757 | percpu_counter_sub(&sbi->s_dirtyblocks_counter, reserv_blks); |
2758 | else { | ||
2759 | percpu_counter_sub(&sbi->s_dirtyblocks_counter, | ||
2760 | ac->ac_b_ex.fe_len); | ||
2761 | /* convert reserved quota blocks to real quota blocks */ | ||
2762 | vfs_dq_claim_block(ac->ac_inode, ac->ac_b_ex.fe_len); | ||
2763 | } | ||
2764 | 2758 | ||
2765 | if (sbi->s_log_groups_per_flex) { | 2759 | if (sbi->s_log_groups_per_flex) { |
2766 | ext4_group_t flex_group = ext4_flex_group(sbi, | 2760 | ext4_group_t flex_group = ext4_flex_group(sbi, |
diff --git a/fs/ext4/super.c b/fs/ext4/super.c index 827bde1f2594..6ed9aa91f27d 100644 --- a/fs/ext4/super.c +++ b/fs/ext4/super.c | |||
@@ -704,6 +704,9 @@ static struct inode *ext4_alloc_inode(struct super_block *sb) | |||
704 | ei->i_allocated_meta_blocks = 0; | 704 | ei->i_allocated_meta_blocks = 0; |
705 | ei->i_delalloc_reserved_flag = 0; | 705 | ei->i_delalloc_reserved_flag = 0; |
706 | spin_lock_init(&(ei->i_block_reservation_lock)); | 706 | spin_lock_init(&(ei->i_block_reservation_lock)); |
707 | #ifdef CONFIG_QUOTA | ||
708 | ei->i_reserved_quota = 0; | ||
709 | #endif | ||
707 | INIT_LIST_HEAD(&ei->i_aio_dio_complete_list); | 710 | INIT_LIST_HEAD(&ei->i_aio_dio_complete_list); |
708 | ei->cur_aio_dio = NULL; | 711 | ei->cur_aio_dio = NULL; |
709 | ei->i_sync_tid = 0; | 712 | ei->i_sync_tid = 0; |
@@ -1014,7 +1017,9 @@ static const struct dquot_operations ext4_quota_operations = { | |||
1014 | .reserve_space = dquot_reserve_space, | 1017 | .reserve_space = dquot_reserve_space, |
1015 | .claim_space = dquot_claim_space, | 1018 | .claim_space = dquot_claim_space, |
1016 | .release_rsv = dquot_release_reserved_space, | 1019 | .release_rsv = dquot_release_reserved_space, |
1020 | #ifdef CONFIG_QUOTA | ||
1017 | .get_reserved_space = ext4_get_reserved_space, | 1021 | .get_reserved_space = ext4_get_reserved_space, |
1022 | #endif | ||
1018 | .alloc_inode = dquot_alloc_inode, | 1023 | .alloc_inode = dquot_alloc_inode, |
1019 | .free_space = dquot_free_space, | 1024 | .free_space = dquot_free_space, |
1020 | .free_inode = dquot_free_inode, | 1025 | .free_inode = dquot_free_inode, |
diff --git a/fs/jbd/journal.c b/fs/jbd/journal.c index 4160afad6d00..bd224eec9b07 100644 --- a/fs/jbd/journal.c +++ b/fs/jbd/journal.c | |||
@@ -1913,7 +1913,7 @@ static void __init jbd_create_debugfs_entry(void) | |||
1913 | { | 1913 | { |
1914 | jbd_debugfs_dir = debugfs_create_dir("jbd", NULL); | 1914 | jbd_debugfs_dir = debugfs_create_dir("jbd", NULL); |
1915 | if (jbd_debugfs_dir) | 1915 | if (jbd_debugfs_dir) |
1916 | jbd_debug = debugfs_create_u8("jbd-debug", S_IRUGO, | 1916 | jbd_debug = debugfs_create_u8("jbd-debug", S_IRUGO | S_IWUSR, |
1917 | jbd_debugfs_dir, | 1917 | jbd_debugfs_dir, |
1918 | &journal_enable_debug); | 1918 | &journal_enable_debug); |
1919 | } | 1919 | } |
diff --git a/fs/jbd2/journal.c b/fs/jbd2/journal.c index b7ca3a92a4db..17af879e6e9e 100644 --- a/fs/jbd2/journal.c +++ b/fs/jbd2/journal.c | |||
@@ -2115,7 +2115,8 @@ static void __init jbd2_create_debugfs_entry(void) | |||
2115 | { | 2115 | { |
2116 | jbd2_debugfs_dir = debugfs_create_dir("jbd2", NULL); | 2116 | jbd2_debugfs_dir = debugfs_create_dir("jbd2", NULL); |
2117 | if (jbd2_debugfs_dir) | 2117 | if (jbd2_debugfs_dir) |
2118 | jbd2_debug = debugfs_create_u8(JBD2_DEBUG_NAME, S_IRUGO, | 2118 | jbd2_debug = debugfs_create_u8(JBD2_DEBUG_NAME, |
2119 | S_IRUGO | S_IWUSR, | ||
2119 | jbd2_debugfs_dir, | 2120 | jbd2_debugfs_dir, |
2120 | &jbd2_journal_enable_debug); | 2121 | &jbd2_journal_enable_debug); |
2121 | } | 2122 | } |
diff --git a/fs/ocfs2/alloc.c b/fs/ocfs2/alloc.c index fb4e672579b8..d17bdc718f74 100644 --- a/fs/ocfs2/alloc.c +++ b/fs/ocfs2/alloc.c | |||
@@ -1765,9 +1765,9 @@ set_and_inc: | |||
1765 | * | 1765 | * |
1766 | * The array index of the subtree root is passed back. | 1766 | * The array index of the subtree root is passed back. |
1767 | */ | 1767 | */ |
1768 | static int ocfs2_find_subtree_root(struct ocfs2_extent_tree *et, | 1768 | int ocfs2_find_subtree_root(struct ocfs2_extent_tree *et, |
1769 | struct ocfs2_path *left, | 1769 | struct ocfs2_path *left, |
1770 | struct ocfs2_path *right) | 1770 | struct ocfs2_path *right) |
1771 | { | 1771 | { |
1772 | int i = 0; | 1772 | int i = 0; |
1773 | 1773 | ||
@@ -2872,8 +2872,8 @@ out: | |||
2872 | * This looks similar, but is subtly different to | 2872 | * This looks similar, but is subtly different to |
2873 | * ocfs2_find_cpos_for_left_leaf(). | 2873 | * ocfs2_find_cpos_for_left_leaf(). |
2874 | */ | 2874 | */ |
2875 | static int ocfs2_find_cpos_for_right_leaf(struct super_block *sb, | 2875 | int ocfs2_find_cpos_for_right_leaf(struct super_block *sb, |
2876 | struct ocfs2_path *path, u32 *cpos) | 2876 | struct ocfs2_path *path, u32 *cpos) |
2877 | { | 2877 | { |
2878 | int i, j, ret = 0; | 2878 | int i, j, ret = 0; |
2879 | u64 blkno; | 2879 | u64 blkno; |
diff --git a/fs/ocfs2/alloc.h b/fs/ocfs2/alloc.h index 9c122d574464..1db4359ccb90 100644 --- a/fs/ocfs2/alloc.h +++ b/fs/ocfs2/alloc.h | |||
@@ -317,4 +317,9 @@ int ocfs2_path_bh_journal_access(handle_t *handle, | |||
317 | int ocfs2_journal_access_path(struct ocfs2_caching_info *ci, | 317 | int ocfs2_journal_access_path(struct ocfs2_caching_info *ci, |
318 | handle_t *handle, | 318 | handle_t *handle, |
319 | struct ocfs2_path *path); | 319 | struct ocfs2_path *path); |
320 | int ocfs2_find_cpos_for_right_leaf(struct super_block *sb, | ||
321 | struct ocfs2_path *path, u32 *cpos); | ||
322 | int ocfs2_find_subtree_root(struct ocfs2_extent_tree *et, | ||
323 | struct ocfs2_path *left, | ||
324 | struct ocfs2_path *right); | ||
320 | #endif /* OCFS2_ALLOC_H */ | 325 | #endif /* OCFS2_ALLOC_H */ |
diff --git a/fs/ocfs2/namei.c b/fs/ocfs2/namei.c index f010b22b1c44..3e9b46002f22 100644 --- a/fs/ocfs2/namei.c +++ b/fs/ocfs2/namei.c | |||
@@ -2108,6 +2108,7 @@ int ocfs2_create_inode_in_orphan(struct inode *dir, | |||
2108 | } | 2108 | } |
2109 | did_quota_inode = 1; | 2109 | did_quota_inode = 1; |
2110 | 2110 | ||
2111 | inode->i_nlink = 0; | ||
2111 | /* do the real work now. */ | 2112 | /* do the real work now. */ |
2112 | status = ocfs2_mknod_locked(osb, dir, inode, | 2113 | status = ocfs2_mknod_locked(osb, dir, inode, |
2113 | 0, &new_di_bh, parent_di_bh, handle, | 2114 | 0, &new_di_bh, parent_di_bh, handle, |
@@ -2136,6 +2137,7 @@ int ocfs2_create_inode_in_orphan(struct inode *dir, | |||
2136 | if (status < 0) | 2137 | if (status < 0) |
2137 | mlog_errno(status); | 2138 | mlog_errno(status); |
2138 | 2139 | ||
2140 | insert_inode_hash(inode); | ||
2139 | leave: | 2141 | leave: |
2140 | if (status < 0 && did_quota_inode) | 2142 | if (status < 0 && did_quota_inode) |
2141 | vfs_dq_free_inode(inode); | 2143 | vfs_dq_free_inode(inode); |
@@ -2267,6 +2269,8 @@ int ocfs2_mv_orphaned_inode_to_new(struct inode *dir, | |||
2267 | di = (struct ocfs2_dinode *)di_bh->b_data; | 2269 | di = (struct ocfs2_dinode *)di_bh->b_data; |
2268 | le32_add_cpu(&di->i_flags, -OCFS2_ORPHANED_FL); | 2270 | le32_add_cpu(&di->i_flags, -OCFS2_ORPHANED_FL); |
2269 | di->i_orphaned_slot = 0; | 2271 | di->i_orphaned_slot = 0; |
2272 | inode->i_nlink = 1; | ||
2273 | ocfs2_set_links_count(di, inode->i_nlink); | ||
2270 | ocfs2_journal_dirty(handle, di_bh); | 2274 | ocfs2_journal_dirty(handle, di_bh); |
2271 | 2275 | ||
2272 | status = ocfs2_add_entry(handle, dentry, inode, | 2276 | status = ocfs2_add_entry(handle, dentry, inode, |
@@ -2284,7 +2288,6 @@ int ocfs2_mv_orphaned_inode_to_new(struct inode *dir, | |||
2284 | goto out_commit; | 2288 | goto out_commit; |
2285 | } | 2289 | } |
2286 | 2290 | ||
2287 | insert_inode_hash(inode); | ||
2288 | dentry->d_op = &ocfs2_dentry_ops; | 2291 | dentry->d_op = &ocfs2_dentry_ops; |
2289 | d_instantiate(dentry, inode); | 2292 | d_instantiate(dentry, inode); |
2290 | status = 0; | 2293 | status = 0; |
diff --git a/fs/ocfs2/refcounttree.c b/fs/ocfs2/refcounttree.c index 30967e3f5e43..74db2be75dd6 100644 --- a/fs/ocfs2/refcounttree.c +++ b/fs/ocfs2/refcounttree.c | |||
@@ -276,7 +276,7 @@ static void ocfs2_erase_refcount_tree_from_list(struct ocfs2_super *osb, | |||
276 | spin_unlock(&osb->osb_lock); | 276 | spin_unlock(&osb->osb_lock); |
277 | } | 277 | } |
278 | 278 | ||
279 | void ocfs2_kref_remove_refcount_tree(struct kref *kref) | 279 | static void ocfs2_kref_remove_refcount_tree(struct kref *kref) |
280 | { | 280 | { |
281 | struct ocfs2_refcount_tree *tree = | 281 | struct ocfs2_refcount_tree *tree = |
282 | container_of(kref, struct ocfs2_refcount_tree, rf_getcnt); | 282 | container_of(kref, struct ocfs2_refcount_tree, rf_getcnt); |
@@ -524,23 +524,6 @@ out: | |||
524 | return ret; | 524 | return ret; |
525 | } | 525 | } |
526 | 526 | ||
527 | int ocfs2_lock_refcount_tree_by_inode(struct inode *inode, int rw, | ||
528 | struct ocfs2_refcount_tree **ret_tree, | ||
529 | struct buffer_head **ref_bh) | ||
530 | { | ||
531 | int ret; | ||
532 | u64 ref_blkno; | ||
533 | |||
534 | ret = ocfs2_get_refcount_block(inode, &ref_blkno); | ||
535 | if (ret) { | ||
536 | mlog_errno(ret); | ||
537 | return ret; | ||
538 | } | ||
539 | |||
540 | return ocfs2_lock_refcount_tree(OCFS2_SB(inode->i_sb), ref_blkno, | ||
541 | rw, ret_tree, ref_bh); | ||
542 | } | ||
543 | |||
544 | void ocfs2_unlock_refcount_tree(struct ocfs2_super *osb, | 527 | void ocfs2_unlock_refcount_tree(struct ocfs2_super *osb, |
545 | struct ocfs2_refcount_tree *tree, int rw) | 528 | struct ocfs2_refcount_tree *tree, int rw) |
546 | { | 529 | { |
@@ -969,6 +952,103 @@ out: | |||
969 | } | 952 | } |
970 | 953 | ||
971 | /* | 954 | /* |
955 | * Find the end range for a leaf refcount block indicated by | ||
956 | * el->l_recs[index].e_blkno. | ||
957 | */ | ||
958 | static int ocfs2_get_refcount_cpos_end(struct ocfs2_caching_info *ci, | ||
959 | struct buffer_head *ref_root_bh, | ||
960 | struct ocfs2_extent_block *eb, | ||
961 | struct ocfs2_extent_list *el, | ||
962 | int index, u32 *cpos_end) | ||
963 | { | ||
964 | int ret, i, subtree_root; | ||
965 | u32 cpos; | ||
966 | u64 blkno; | ||
967 | struct super_block *sb = ocfs2_metadata_cache_get_super(ci); | ||
968 | struct ocfs2_path *left_path = NULL, *right_path = NULL; | ||
969 | struct ocfs2_extent_tree et; | ||
970 | struct ocfs2_extent_list *tmp_el; | ||
971 | |||
972 | if (index < le16_to_cpu(el->l_next_free_rec) - 1) { | ||
973 | /* | ||
974 | * We have a extent rec after index, so just use the e_cpos | ||
975 | * of the next extent rec. | ||
976 | */ | ||
977 | *cpos_end = le32_to_cpu(el->l_recs[index+1].e_cpos); | ||
978 | return 0; | ||
979 | } | ||
980 | |||
981 | if (!eb || (eb && !eb->h_next_leaf_blk)) { | ||
982 | /* | ||
983 | * We are the last extent rec, so any high cpos should | ||
984 | * be stored in this leaf refcount block. | ||
985 | */ | ||
986 | *cpos_end = UINT_MAX; | ||
987 | return 0; | ||
988 | } | ||
989 | |||
990 | /* | ||
991 | * If the extent block isn't the last one, we have to find | ||
992 | * the subtree root between this extent block and the next | ||
993 | * leaf extent block and get the corresponding e_cpos from | ||
994 | * the subroot. Otherwise we may corrupt the b-tree. | ||
995 | */ | ||
996 | ocfs2_init_refcount_extent_tree(&et, ci, ref_root_bh); | ||
997 | |||
998 | left_path = ocfs2_new_path_from_et(&et); | ||
999 | if (!left_path) { | ||
1000 | ret = -ENOMEM; | ||
1001 | mlog_errno(ret); | ||
1002 | goto out; | ||
1003 | } | ||
1004 | |||
1005 | cpos = le32_to_cpu(eb->h_list.l_recs[index].e_cpos); | ||
1006 | ret = ocfs2_find_path(ci, left_path, cpos); | ||
1007 | if (ret) { | ||
1008 | mlog_errno(ret); | ||
1009 | goto out; | ||
1010 | } | ||
1011 | |||
1012 | right_path = ocfs2_new_path_from_path(left_path); | ||
1013 | if (!right_path) { | ||
1014 | ret = -ENOMEM; | ||
1015 | mlog_errno(ret); | ||
1016 | goto out; | ||
1017 | } | ||
1018 | |||
1019 | ret = ocfs2_find_cpos_for_right_leaf(sb, left_path, &cpos); | ||
1020 | if (ret) { | ||
1021 | mlog_errno(ret); | ||
1022 | goto out; | ||
1023 | } | ||
1024 | |||
1025 | ret = ocfs2_find_path(ci, right_path, cpos); | ||
1026 | if (ret) { | ||
1027 | mlog_errno(ret); | ||
1028 | goto out; | ||
1029 | } | ||
1030 | |||
1031 | subtree_root = ocfs2_find_subtree_root(&et, left_path, | ||
1032 | right_path); | ||
1033 | |||
1034 | tmp_el = left_path->p_node[subtree_root].el; | ||
1035 | blkno = left_path->p_node[subtree_root+1].bh->b_blocknr; | ||
1036 | for (i = 0; i < le32_to_cpu(tmp_el->l_next_free_rec); i++) { | ||
1037 | if (le64_to_cpu(tmp_el->l_recs[i].e_blkno) == blkno) { | ||
1038 | *cpos_end = le32_to_cpu(tmp_el->l_recs[i+1].e_cpos); | ||
1039 | break; | ||
1040 | } | ||
1041 | } | ||
1042 | |||
1043 | BUG_ON(i == le32_to_cpu(tmp_el->l_next_free_rec)); | ||
1044 | |||
1045 | out: | ||
1046 | ocfs2_free_path(left_path); | ||
1047 | ocfs2_free_path(right_path); | ||
1048 | return ret; | ||
1049 | } | ||
1050 | |||
1051 | /* | ||
972 | * Given a cpos and len, try to find the refcount record which contains cpos. | 1052 | * Given a cpos and len, try to find the refcount record which contains cpos. |
973 | * 1. If cpos can be found in one refcount record, return the record. | 1053 | * 1. If cpos can be found in one refcount record, return the record. |
974 | * 2. If cpos can't be found, return a fake record which start from cpos | 1054 | * 2. If cpos can't be found, return a fake record which start from cpos |
@@ -983,10 +1063,10 @@ static int ocfs2_get_refcount_rec(struct ocfs2_caching_info *ci, | |||
983 | struct buffer_head **ret_bh) | 1063 | struct buffer_head **ret_bh) |
984 | { | 1064 | { |
985 | int ret = 0, i, found; | 1065 | int ret = 0, i, found; |
986 | u32 low_cpos; | 1066 | u32 low_cpos, uninitialized_var(cpos_end); |
987 | struct ocfs2_extent_list *el; | 1067 | struct ocfs2_extent_list *el; |
988 | struct ocfs2_extent_rec *tmp, *rec = NULL; | 1068 | struct ocfs2_extent_rec *rec = NULL; |
989 | struct ocfs2_extent_block *eb; | 1069 | struct ocfs2_extent_block *eb = NULL; |
990 | struct buffer_head *eb_bh = NULL, *ref_leaf_bh = NULL; | 1070 | struct buffer_head *eb_bh = NULL, *ref_leaf_bh = NULL; |
991 | struct super_block *sb = ocfs2_metadata_cache_get_super(ci); | 1071 | struct super_block *sb = ocfs2_metadata_cache_get_super(ci); |
992 | struct ocfs2_refcount_block *rb = | 1072 | struct ocfs2_refcount_block *rb = |
@@ -1034,12 +1114,16 @@ static int ocfs2_get_refcount_rec(struct ocfs2_caching_info *ci, | |||
1034 | } | 1114 | } |
1035 | } | 1115 | } |
1036 | 1116 | ||
1037 | /* adjust len when we have ocfs2_extent_rec after it. */ | 1117 | if (found) { |
1038 | if (found && i < le16_to_cpu(el->l_next_free_rec) - 1) { | 1118 | ret = ocfs2_get_refcount_cpos_end(ci, ref_root_bh, |
1039 | tmp = &el->l_recs[i+1]; | 1119 | eb, el, i, &cpos_end); |
1120 | if (ret) { | ||
1121 | mlog_errno(ret); | ||
1122 | goto out; | ||
1123 | } | ||
1040 | 1124 | ||
1041 | if (le32_to_cpu(tmp->e_cpos) < cpos + len) | 1125 | if (cpos_end < low_cpos + len) |
1042 | len = le32_to_cpu(tmp->e_cpos) - cpos; | 1126 | len = cpos_end - low_cpos; |
1043 | } | 1127 | } |
1044 | 1128 | ||
1045 | ret = ocfs2_read_refcount_block(ci, le64_to_cpu(rec->e_blkno), | 1129 | ret = ocfs2_read_refcount_block(ci, le64_to_cpu(rec->e_blkno), |
@@ -1418,7 +1502,7 @@ static int ocfs2_divide_leaf_refcount_block(struct buffer_head *ref_leaf_bh, | |||
1418 | 1502 | ||
1419 | /* change old and new rl_used accordingly. */ | 1503 | /* change old and new rl_used accordingly. */ |
1420 | le16_add_cpu(&rl->rl_used, -num_moved); | 1504 | le16_add_cpu(&rl->rl_used, -num_moved); |
1421 | new_rl->rl_used = cpu_to_le32(num_moved); | 1505 | new_rl->rl_used = cpu_to_le16(num_moved); |
1422 | 1506 | ||
1423 | sort(&rl->rl_recs, le16_to_cpu(rl->rl_used), | 1507 | sort(&rl->rl_recs, le16_to_cpu(rl->rl_used), |
1424 | sizeof(struct ocfs2_refcount_rec), | 1508 | sizeof(struct ocfs2_refcount_rec), |
@@ -1797,7 +1881,8 @@ static int ocfs2_split_refcount_rec(handle_t *handle, | |||
1797 | recs_need++; | 1881 | recs_need++; |
1798 | 1882 | ||
1799 | /* If the leaf block don't have enough record, expand it. */ | 1883 | /* If the leaf block don't have enough record, expand it. */ |
1800 | if (le16_to_cpu(rf_list->rl_used) + recs_need > rf_list->rl_count) { | 1884 | if (le16_to_cpu(rf_list->rl_used) + recs_need > |
1885 | le16_to_cpu(rf_list->rl_count)) { | ||
1801 | struct ocfs2_refcount_rec tmp_rec; | 1886 | struct ocfs2_refcount_rec tmp_rec; |
1802 | u64 cpos = le64_to_cpu(orig_rec->r_cpos); | 1887 | u64 cpos = le64_to_cpu(orig_rec->r_cpos); |
1803 | len = le32_to_cpu(orig_rec->r_clusters); | 1888 | len = le32_to_cpu(orig_rec->r_clusters); |
@@ -1859,7 +1944,7 @@ static int ocfs2_split_refcount_rec(handle_t *handle, | |||
1859 | memcpy(tail_rec, orig_rec, sizeof(struct ocfs2_refcount_rec)); | 1944 | memcpy(tail_rec, orig_rec, sizeof(struct ocfs2_refcount_rec)); |
1860 | le64_add_cpu(&tail_rec->r_cpos, | 1945 | le64_add_cpu(&tail_rec->r_cpos, |
1861 | le32_to_cpu(tail_rec->r_clusters) - len); | 1946 | le32_to_cpu(tail_rec->r_clusters) - len); |
1862 | tail_rec->r_clusters = le32_to_cpu(len); | 1947 | tail_rec->r_clusters = cpu_to_le32(len); |
1863 | } | 1948 | } |
1864 | 1949 | ||
1865 | /* | 1950 | /* |
@@ -3840,8 +3925,7 @@ static int ocfs2_add_refcounted_extent(struct inode *inode, | |||
3840 | } | 3925 | } |
3841 | 3926 | ||
3842 | ret = ocfs2_insert_extent(handle, et, cpos, | 3927 | ret = ocfs2_insert_extent(handle, et, cpos, |
3843 | cpu_to_le64(ocfs2_clusters_to_blocks(inode->i_sb, | 3928 | ocfs2_clusters_to_blocks(inode->i_sb, p_cluster), |
3844 | p_cluster)), | ||
3845 | num_clusters, ext_flags, meta_ac); | 3929 | num_clusters, ext_flags, meta_ac); |
3846 | if (ret) { | 3930 | if (ret) { |
3847 | mlog_errno(ret); | 3931 | mlog_errno(ret); |
@@ -4253,8 +4337,8 @@ static int ocfs2_user_path_parent(const char __user *path, | |||
4253 | * @new_dentry: target dentry | 4337 | * @new_dentry: target dentry |
4254 | * @preserve: if true, preserve all file attributes | 4338 | * @preserve: if true, preserve all file attributes |
4255 | */ | 4339 | */ |
4256 | int ocfs2_vfs_reflink(struct dentry *old_dentry, struct inode *dir, | 4340 | static int ocfs2_vfs_reflink(struct dentry *old_dentry, struct inode *dir, |
4257 | struct dentry *new_dentry, bool preserve) | 4341 | struct dentry *new_dentry, bool preserve) |
4258 | { | 4342 | { |
4259 | struct inode *inode = old_dentry->d_inode; | 4343 | struct inode *inode = old_dentry->d_inode; |
4260 | int error; | 4344 | int error; |
diff --git a/fs/quota/dquot.c b/fs/quota/dquot.c index cd6bb9a33c13..dea86abdf2e7 100644 --- a/fs/quota/dquot.c +++ b/fs/quota/dquot.c | |||
@@ -323,6 +323,30 @@ int dquot_mark_dquot_dirty(struct dquot *dquot) | |||
323 | } | 323 | } |
324 | EXPORT_SYMBOL(dquot_mark_dquot_dirty); | 324 | EXPORT_SYMBOL(dquot_mark_dquot_dirty); |
325 | 325 | ||
326 | /* Dirtify all the dquots - this can block when journalling */ | ||
327 | static inline int mark_all_dquot_dirty(struct dquot * const *dquot) | ||
328 | { | ||
329 | int ret, err, cnt; | ||
330 | |||
331 | ret = err = 0; | ||
332 | for (cnt = 0; cnt < MAXQUOTAS; cnt++) { | ||
333 | if (dquot[cnt]) | ||
334 | /* Even in case of error we have to continue */ | ||
335 | ret = mark_dquot_dirty(dquot[cnt]); | ||
336 | if (!err) | ||
337 | err = ret; | ||
338 | } | ||
339 | return err; | ||
340 | } | ||
341 | |||
342 | static inline void dqput_all(struct dquot **dquot) | ||
343 | { | ||
344 | unsigned int cnt; | ||
345 | |||
346 | for (cnt = 0; cnt < MAXQUOTAS; cnt++) | ||
347 | dqput(dquot[cnt]); | ||
348 | } | ||
349 | |||
326 | /* This function needs dq_list_lock */ | 350 | /* This function needs dq_list_lock */ |
327 | static inline int clear_dquot_dirty(struct dquot *dquot) | 351 | static inline int clear_dquot_dirty(struct dquot *dquot) |
328 | { | 352 | { |
@@ -1268,8 +1292,7 @@ int dquot_initialize(struct inode *inode, int type) | |||
1268 | out_err: | 1292 | out_err: |
1269 | up_write(&sb_dqopt(sb)->dqptr_sem); | 1293 | up_write(&sb_dqopt(sb)->dqptr_sem); |
1270 | /* Drop unused references */ | 1294 | /* Drop unused references */ |
1271 | for (cnt = 0; cnt < MAXQUOTAS; cnt++) | 1295 | dqput_all(got); |
1272 | dqput(got[cnt]); | ||
1273 | return ret; | 1296 | return ret; |
1274 | } | 1297 | } |
1275 | EXPORT_SYMBOL(dquot_initialize); | 1298 | EXPORT_SYMBOL(dquot_initialize); |
@@ -1288,9 +1311,7 @@ int dquot_drop(struct inode *inode) | |||
1288 | inode->i_dquot[cnt] = NULL; | 1311 | inode->i_dquot[cnt] = NULL; |
1289 | } | 1312 | } |
1290 | up_write(&sb_dqopt(inode->i_sb)->dqptr_sem); | 1313 | up_write(&sb_dqopt(inode->i_sb)->dqptr_sem); |
1291 | 1314 | dqput_all(put); | |
1292 | for (cnt = 0; cnt < MAXQUOTAS; cnt++) | ||
1293 | dqput(put[cnt]); | ||
1294 | return 0; | 1315 | return 0; |
1295 | } | 1316 | } |
1296 | EXPORT_SYMBOL(dquot_drop); | 1317 | EXPORT_SYMBOL(dquot_drop); |
@@ -1319,6 +1340,67 @@ void vfs_dq_drop(struct inode *inode) | |||
1319 | EXPORT_SYMBOL(vfs_dq_drop); | 1340 | EXPORT_SYMBOL(vfs_dq_drop); |
1320 | 1341 | ||
1321 | /* | 1342 | /* |
1343 | * inode_reserved_space is managed internally by quota, and protected by | ||
1344 | * i_lock similar to i_blocks+i_bytes. | ||
1345 | */ | ||
1346 | static qsize_t *inode_reserved_space(struct inode * inode) | ||
1347 | { | ||
1348 | /* Filesystem must explicitly define it's own method in order to use | ||
1349 | * quota reservation interface */ | ||
1350 | BUG_ON(!inode->i_sb->dq_op->get_reserved_space); | ||
1351 | return inode->i_sb->dq_op->get_reserved_space(inode); | ||
1352 | } | ||
1353 | |||
1354 | static void inode_add_rsv_space(struct inode *inode, qsize_t number) | ||
1355 | { | ||
1356 | spin_lock(&inode->i_lock); | ||
1357 | *inode_reserved_space(inode) += number; | ||
1358 | spin_unlock(&inode->i_lock); | ||
1359 | } | ||
1360 | |||
1361 | |||
1362 | static void inode_claim_rsv_space(struct inode *inode, qsize_t number) | ||
1363 | { | ||
1364 | spin_lock(&inode->i_lock); | ||
1365 | *inode_reserved_space(inode) -= number; | ||
1366 | __inode_add_bytes(inode, number); | ||
1367 | spin_unlock(&inode->i_lock); | ||
1368 | } | ||
1369 | |||
1370 | static void inode_sub_rsv_space(struct inode *inode, qsize_t number) | ||
1371 | { | ||
1372 | spin_lock(&inode->i_lock); | ||
1373 | *inode_reserved_space(inode) -= number; | ||
1374 | spin_unlock(&inode->i_lock); | ||
1375 | } | ||
1376 | |||
1377 | static qsize_t inode_get_rsv_space(struct inode *inode) | ||
1378 | { | ||
1379 | qsize_t ret; | ||
1380 | spin_lock(&inode->i_lock); | ||
1381 | ret = *inode_reserved_space(inode); | ||
1382 | spin_unlock(&inode->i_lock); | ||
1383 | return ret; | ||
1384 | } | ||
1385 | |||
1386 | static void inode_incr_space(struct inode *inode, qsize_t number, | ||
1387 | int reserve) | ||
1388 | { | ||
1389 | if (reserve) | ||
1390 | inode_add_rsv_space(inode, number); | ||
1391 | else | ||
1392 | inode_add_bytes(inode, number); | ||
1393 | } | ||
1394 | |||
1395 | static void inode_decr_space(struct inode *inode, qsize_t number, int reserve) | ||
1396 | { | ||
1397 | if (reserve) | ||
1398 | inode_sub_rsv_space(inode, number); | ||
1399 | else | ||
1400 | inode_sub_bytes(inode, number); | ||
1401 | } | ||
1402 | |||
1403 | /* | ||
1322 | * Following four functions update i_blocks+i_bytes fields and | 1404 | * Following four functions update i_blocks+i_bytes fields and |
1323 | * quota information (together with appropriate checks) | 1405 | * quota information (together with appropriate checks) |
1324 | * NOTE: We absolutely rely on the fact that caller dirties | 1406 | * NOTE: We absolutely rely on the fact that caller dirties |
@@ -1336,6 +1418,21 @@ int __dquot_alloc_space(struct inode *inode, qsize_t number, | |||
1336 | int cnt, ret = QUOTA_OK; | 1418 | int cnt, ret = QUOTA_OK; |
1337 | char warntype[MAXQUOTAS]; | 1419 | char warntype[MAXQUOTAS]; |
1338 | 1420 | ||
1421 | /* | ||
1422 | * First test before acquiring mutex - solves deadlocks when we | ||
1423 | * re-enter the quota code and are already holding the mutex | ||
1424 | */ | ||
1425 | if (IS_NOQUOTA(inode)) { | ||
1426 | inode_incr_space(inode, number, reserve); | ||
1427 | goto out; | ||
1428 | } | ||
1429 | |||
1430 | down_read(&sb_dqopt(inode->i_sb)->dqptr_sem); | ||
1431 | if (IS_NOQUOTA(inode)) { | ||
1432 | inode_incr_space(inode, number, reserve); | ||
1433 | goto out_unlock; | ||
1434 | } | ||
1435 | |||
1339 | for (cnt = 0; cnt < MAXQUOTAS; cnt++) | 1436 | for (cnt = 0; cnt < MAXQUOTAS; cnt++) |
1340 | warntype[cnt] = QUOTA_NL_NOWARN; | 1437 | warntype[cnt] = QUOTA_NL_NOWARN; |
1341 | 1438 | ||
@@ -1346,7 +1443,8 @@ int __dquot_alloc_space(struct inode *inode, qsize_t number, | |||
1346 | if (check_bdq(inode->i_dquot[cnt], number, warn, warntype+cnt) | 1443 | if (check_bdq(inode->i_dquot[cnt], number, warn, warntype+cnt) |
1347 | == NO_QUOTA) { | 1444 | == NO_QUOTA) { |
1348 | ret = NO_QUOTA; | 1445 | ret = NO_QUOTA; |
1349 | goto out_unlock; | 1446 | spin_unlock(&dq_data_lock); |
1447 | goto out_flush_warn; | ||
1350 | } | 1448 | } |
1351 | } | 1449 | } |
1352 | for (cnt = 0; cnt < MAXQUOTAS; cnt++) { | 1450 | for (cnt = 0; cnt < MAXQUOTAS; cnt++) { |
@@ -1357,64 +1455,29 @@ int __dquot_alloc_space(struct inode *inode, qsize_t number, | |||
1357 | else | 1455 | else |
1358 | dquot_incr_space(inode->i_dquot[cnt], number); | 1456 | dquot_incr_space(inode->i_dquot[cnt], number); |
1359 | } | 1457 | } |
1360 | if (!reserve) | 1458 | inode_incr_space(inode, number, reserve); |
1361 | inode_add_bytes(inode, number); | ||
1362 | out_unlock: | ||
1363 | spin_unlock(&dq_data_lock); | 1459 | spin_unlock(&dq_data_lock); |
1460 | |||
1461 | if (reserve) | ||
1462 | goto out_flush_warn; | ||
1463 | mark_all_dquot_dirty(inode->i_dquot); | ||
1464 | out_flush_warn: | ||
1364 | flush_warnings(inode->i_dquot, warntype); | 1465 | flush_warnings(inode->i_dquot, warntype); |
1466 | out_unlock: | ||
1467 | up_read(&sb_dqopt(inode->i_sb)->dqptr_sem); | ||
1468 | out: | ||
1365 | return ret; | 1469 | return ret; |
1366 | } | 1470 | } |
1367 | 1471 | ||
1368 | int dquot_alloc_space(struct inode *inode, qsize_t number, int warn) | 1472 | int dquot_alloc_space(struct inode *inode, qsize_t number, int warn) |
1369 | { | 1473 | { |
1370 | int cnt, ret = QUOTA_OK; | 1474 | return __dquot_alloc_space(inode, number, warn, 0); |
1371 | |||
1372 | /* | ||
1373 | * First test before acquiring mutex - solves deadlocks when we | ||
1374 | * re-enter the quota code and are already holding the mutex | ||
1375 | */ | ||
1376 | if (IS_NOQUOTA(inode)) { | ||
1377 | inode_add_bytes(inode, number); | ||
1378 | goto out; | ||
1379 | } | ||
1380 | |||
1381 | down_read(&sb_dqopt(inode->i_sb)->dqptr_sem); | ||
1382 | if (IS_NOQUOTA(inode)) { | ||
1383 | inode_add_bytes(inode, number); | ||
1384 | goto out_unlock; | ||
1385 | } | ||
1386 | |||
1387 | ret = __dquot_alloc_space(inode, number, warn, 0); | ||
1388 | if (ret == NO_QUOTA) | ||
1389 | goto out_unlock; | ||
1390 | |||
1391 | /* Dirtify all the dquots - this can block when journalling */ | ||
1392 | for (cnt = 0; cnt < MAXQUOTAS; cnt++) | ||
1393 | if (inode->i_dquot[cnt]) | ||
1394 | mark_dquot_dirty(inode->i_dquot[cnt]); | ||
1395 | out_unlock: | ||
1396 | up_read(&sb_dqopt(inode->i_sb)->dqptr_sem); | ||
1397 | out: | ||
1398 | return ret; | ||
1399 | } | 1475 | } |
1400 | EXPORT_SYMBOL(dquot_alloc_space); | 1476 | EXPORT_SYMBOL(dquot_alloc_space); |
1401 | 1477 | ||
1402 | int dquot_reserve_space(struct inode *inode, qsize_t number, int warn) | 1478 | int dquot_reserve_space(struct inode *inode, qsize_t number, int warn) |
1403 | { | 1479 | { |
1404 | int ret = QUOTA_OK; | 1480 | return __dquot_alloc_space(inode, number, warn, 1); |
1405 | |||
1406 | if (IS_NOQUOTA(inode)) | ||
1407 | goto out; | ||
1408 | |||
1409 | down_read(&sb_dqopt(inode->i_sb)->dqptr_sem); | ||
1410 | if (IS_NOQUOTA(inode)) | ||
1411 | goto out_unlock; | ||
1412 | |||
1413 | ret = __dquot_alloc_space(inode, number, warn, 1); | ||
1414 | out_unlock: | ||
1415 | up_read(&sb_dqopt(inode->i_sb)->dqptr_sem); | ||
1416 | out: | ||
1417 | return ret; | ||
1418 | } | 1481 | } |
1419 | EXPORT_SYMBOL(dquot_reserve_space); | 1482 | EXPORT_SYMBOL(dquot_reserve_space); |
1420 | 1483 | ||
@@ -1455,10 +1518,7 @@ int dquot_alloc_inode(const struct inode *inode, qsize_t number) | |||
1455 | warn_put_all: | 1518 | warn_put_all: |
1456 | spin_unlock(&dq_data_lock); | 1519 | spin_unlock(&dq_data_lock); |
1457 | if (ret == QUOTA_OK) | 1520 | if (ret == QUOTA_OK) |
1458 | /* Dirtify all the dquots - this can block when journalling */ | 1521 | mark_all_dquot_dirty(inode->i_dquot); |
1459 | for (cnt = 0; cnt < MAXQUOTAS; cnt++) | ||
1460 | if (inode->i_dquot[cnt]) | ||
1461 | mark_dquot_dirty(inode->i_dquot[cnt]); | ||
1462 | flush_warnings(inode->i_dquot, warntype); | 1522 | flush_warnings(inode->i_dquot, warntype); |
1463 | up_read(&sb_dqopt(inode->i_sb)->dqptr_sem); | 1523 | up_read(&sb_dqopt(inode->i_sb)->dqptr_sem); |
1464 | return ret; | 1524 | return ret; |
@@ -1471,14 +1531,14 @@ int dquot_claim_space(struct inode *inode, qsize_t number) | |||
1471 | int ret = QUOTA_OK; | 1531 | int ret = QUOTA_OK; |
1472 | 1532 | ||
1473 | if (IS_NOQUOTA(inode)) { | 1533 | if (IS_NOQUOTA(inode)) { |
1474 | inode_add_bytes(inode, number); | 1534 | inode_claim_rsv_space(inode, number); |
1475 | goto out; | 1535 | goto out; |
1476 | } | 1536 | } |
1477 | 1537 | ||
1478 | down_read(&sb_dqopt(inode->i_sb)->dqptr_sem); | 1538 | down_read(&sb_dqopt(inode->i_sb)->dqptr_sem); |
1479 | if (IS_NOQUOTA(inode)) { | 1539 | if (IS_NOQUOTA(inode)) { |
1480 | up_read(&sb_dqopt(inode->i_sb)->dqptr_sem); | 1540 | up_read(&sb_dqopt(inode->i_sb)->dqptr_sem); |
1481 | inode_add_bytes(inode, number); | 1541 | inode_claim_rsv_space(inode, number); |
1482 | goto out; | 1542 | goto out; |
1483 | } | 1543 | } |
1484 | 1544 | ||
@@ -1490,12 +1550,9 @@ int dquot_claim_space(struct inode *inode, qsize_t number) | |||
1490 | number); | 1550 | number); |
1491 | } | 1551 | } |
1492 | /* Update inode bytes */ | 1552 | /* Update inode bytes */ |
1493 | inode_add_bytes(inode, number); | 1553 | inode_claim_rsv_space(inode, number); |
1494 | spin_unlock(&dq_data_lock); | 1554 | spin_unlock(&dq_data_lock); |
1495 | /* Dirtify all the dquots - this can block when journalling */ | 1555 | mark_all_dquot_dirty(inode->i_dquot); |
1496 | for (cnt = 0; cnt < MAXQUOTAS; cnt++) | ||
1497 | if (inode->i_dquot[cnt]) | ||
1498 | mark_dquot_dirty(inode->i_dquot[cnt]); | ||
1499 | up_read(&sb_dqopt(inode->i_sb)->dqptr_sem); | 1556 | up_read(&sb_dqopt(inode->i_sb)->dqptr_sem); |
1500 | out: | 1557 | out: |
1501 | return ret; | 1558 | return ret; |
@@ -1503,38 +1560,9 @@ out: | |||
1503 | EXPORT_SYMBOL(dquot_claim_space); | 1560 | EXPORT_SYMBOL(dquot_claim_space); |
1504 | 1561 | ||
1505 | /* | 1562 | /* |
1506 | * Release reserved quota space | ||
1507 | */ | ||
1508 | void dquot_release_reserved_space(struct inode *inode, qsize_t number) | ||
1509 | { | ||
1510 | int cnt; | ||
1511 | |||
1512 | if (IS_NOQUOTA(inode)) | ||
1513 | goto out; | ||
1514 | |||
1515 | down_read(&sb_dqopt(inode->i_sb)->dqptr_sem); | ||
1516 | if (IS_NOQUOTA(inode)) | ||
1517 | goto out_unlock; | ||
1518 | |||
1519 | spin_lock(&dq_data_lock); | ||
1520 | /* Release reserved dquots */ | ||
1521 | for (cnt = 0; cnt < MAXQUOTAS; cnt++) { | ||
1522 | if (inode->i_dquot[cnt]) | ||
1523 | dquot_free_reserved_space(inode->i_dquot[cnt], number); | ||
1524 | } | ||
1525 | spin_unlock(&dq_data_lock); | ||
1526 | |||
1527 | out_unlock: | ||
1528 | up_read(&sb_dqopt(inode->i_sb)->dqptr_sem); | ||
1529 | out: | ||
1530 | return; | ||
1531 | } | ||
1532 | EXPORT_SYMBOL(dquot_release_reserved_space); | ||
1533 | |||
1534 | /* | ||
1535 | * This operation can block, but only after everything is updated | 1563 | * This operation can block, but only after everything is updated |
1536 | */ | 1564 | */ |
1537 | int dquot_free_space(struct inode *inode, qsize_t number) | 1565 | int __dquot_free_space(struct inode *inode, qsize_t number, int reserve) |
1538 | { | 1566 | { |
1539 | unsigned int cnt; | 1567 | unsigned int cnt; |
1540 | char warntype[MAXQUOTAS]; | 1568 | char warntype[MAXQUOTAS]; |
@@ -1543,7 +1571,7 @@ int dquot_free_space(struct inode *inode, qsize_t number) | |||
1543 | * re-enter the quota code and are already holding the mutex */ | 1571 | * re-enter the quota code and are already holding the mutex */ |
1544 | if (IS_NOQUOTA(inode)) { | 1572 | if (IS_NOQUOTA(inode)) { |
1545 | out_sub: | 1573 | out_sub: |
1546 | inode_sub_bytes(inode, number); | 1574 | inode_decr_space(inode, number, reserve); |
1547 | return QUOTA_OK; | 1575 | return QUOTA_OK; |
1548 | } | 1576 | } |
1549 | 1577 | ||
@@ -1558,21 +1586,40 @@ out_sub: | |||
1558 | if (!inode->i_dquot[cnt]) | 1586 | if (!inode->i_dquot[cnt]) |
1559 | continue; | 1587 | continue; |
1560 | warntype[cnt] = info_bdq_free(inode->i_dquot[cnt], number); | 1588 | warntype[cnt] = info_bdq_free(inode->i_dquot[cnt], number); |
1561 | dquot_decr_space(inode->i_dquot[cnt], number); | 1589 | if (reserve) |
1590 | dquot_free_reserved_space(inode->i_dquot[cnt], number); | ||
1591 | else | ||
1592 | dquot_decr_space(inode->i_dquot[cnt], number); | ||
1562 | } | 1593 | } |
1563 | inode_sub_bytes(inode, number); | 1594 | inode_decr_space(inode, number, reserve); |
1564 | spin_unlock(&dq_data_lock); | 1595 | spin_unlock(&dq_data_lock); |
1565 | /* Dirtify all the dquots - this can block when journalling */ | 1596 | |
1566 | for (cnt = 0; cnt < MAXQUOTAS; cnt++) | 1597 | if (reserve) |
1567 | if (inode->i_dquot[cnt]) | 1598 | goto out_unlock; |
1568 | mark_dquot_dirty(inode->i_dquot[cnt]); | 1599 | mark_all_dquot_dirty(inode->i_dquot); |
1600 | out_unlock: | ||
1569 | flush_warnings(inode->i_dquot, warntype); | 1601 | flush_warnings(inode->i_dquot, warntype); |
1570 | up_read(&sb_dqopt(inode->i_sb)->dqptr_sem); | 1602 | up_read(&sb_dqopt(inode->i_sb)->dqptr_sem); |
1571 | return QUOTA_OK; | 1603 | return QUOTA_OK; |
1572 | } | 1604 | } |
1605 | |||
1606 | int dquot_free_space(struct inode *inode, qsize_t number) | ||
1607 | { | ||
1608 | return __dquot_free_space(inode, number, 0); | ||
1609 | } | ||
1573 | EXPORT_SYMBOL(dquot_free_space); | 1610 | EXPORT_SYMBOL(dquot_free_space); |
1574 | 1611 | ||
1575 | /* | 1612 | /* |
1613 | * Release reserved quota space | ||
1614 | */ | ||
1615 | void dquot_release_reserved_space(struct inode *inode, qsize_t number) | ||
1616 | { | ||
1617 | __dquot_free_space(inode, number, 1); | ||
1618 | |||
1619 | } | ||
1620 | EXPORT_SYMBOL(dquot_release_reserved_space); | ||
1621 | |||
1622 | /* | ||
1576 | * This operation can block, but only after everything is updated | 1623 | * This operation can block, but only after everything is updated |
1577 | */ | 1624 | */ |
1578 | int dquot_free_inode(const struct inode *inode, qsize_t number) | 1625 | int dquot_free_inode(const struct inode *inode, qsize_t number) |
@@ -1599,10 +1646,7 @@ int dquot_free_inode(const struct inode *inode, qsize_t number) | |||
1599 | dquot_decr_inodes(inode->i_dquot[cnt], number); | 1646 | dquot_decr_inodes(inode->i_dquot[cnt], number); |
1600 | } | 1647 | } |
1601 | spin_unlock(&dq_data_lock); | 1648 | spin_unlock(&dq_data_lock); |
1602 | /* Dirtify all the dquots - this can block when journalling */ | 1649 | mark_all_dquot_dirty(inode->i_dquot); |
1603 | for (cnt = 0; cnt < MAXQUOTAS; cnt++) | ||
1604 | if (inode->i_dquot[cnt]) | ||
1605 | mark_dquot_dirty(inode->i_dquot[cnt]); | ||
1606 | flush_warnings(inode->i_dquot, warntype); | 1650 | flush_warnings(inode->i_dquot, warntype); |
1607 | up_read(&sb_dqopt(inode->i_sb)->dqptr_sem); | 1651 | up_read(&sb_dqopt(inode->i_sb)->dqptr_sem); |
1608 | return QUOTA_OK; | 1652 | return QUOTA_OK; |
@@ -1610,19 +1654,6 @@ int dquot_free_inode(const struct inode *inode, qsize_t number) | |||
1610 | EXPORT_SYMBOL(dquot_free_inode); | 1654 | EXPORT_SYMBOL(dquot_free_inode); |
1611 | 1655 | ||
1612 | /* | 1656 | /* |
1613 | * call back function, get reserved quota space from underlying fs | ||
1614 | */ | ||
1615 | qsize_t dquot_get_reserved_space(struct inode *inode) | ||
1616 | { | ||
1617 | qsize_t reserved_space = 0; | ||
1618 | |||
1619 | if (sb_any_quota_active(inode->i_sb) && | ||
1620 | inode->i_sb->dq_op->get_reserved_space) | ||
1621 | reserved_space = inode->i_sb->dq_op->get_reserved_space(inode); | ||
1622 | return reserved_space; | ||
1623 | } | ||
1624 | |||
1625 | /* | ||
1626 | * Transfer the number of inode and blocks from one diskquota to an other. | 1657 | * Transfer the number of inode and blocks from one diskquota to an other. |
1627 | * | 1658 | * |
1628 | * This operation can block, but only after everything is updated | 1659 | * This operation can block, but only after everything is updated |
@@ -1665,7 +1696,7 @@ int dquot_transfer(struct inode *inode, struct iattr *iattr) | |||
1665 | } | 1696 | } |
1666 | spin_lock(&dq_data_lock); | 1697 | spin_lock(&dq_data_lock); |
1667 | cur_space = inode_get_bytes(inode); | 1698 | cur_space = inode_get_bytes(inode); |
1668 | rsv_space = dquot_get_reserved_space(inode); | 1699 | rsv_space = inode_get_rsv_space(inode); |
1669 | space = cur_space + rsv_space; | 1700 | space = cur_space + rsv_space; |
1670 | /* Build the transfer_from list and check the limits */ | 1701 | /* Build the transfer_from list and check the limits */ |
1671 | for (cnt = 0; cnt < MAXQUOTAS; cnt++) { | 1702 | for (cnt = 0; cnt < MAXQUOTAS; cnt++) { |
@@ -1709,25 +1740,18 @@ int dquot_transfer(struct inode *inode, struct iattr *iattr) | |||
1709 | spin_unlock(&dq_data_lock); | 1740 | spin_unlock(&dq_data_lock); |
1710 | up_write(&sb_dqopt(inode->i_sb)->dqptr_sem); | 1741 | up_write(&sb_dqopt(inode->i_sb)->dqptr_sem); |
1711 | 1742 | ||
1712 | /* Dirtify all the dquots - this can block when journalling */ | 1743 | mark_all_dquot_dirty(transfer_from); |
1713 | for (cnt = 0; cnt < MAXQUOTAS; cnt++) { | 1744 | mark_all_dquot_dirty(transfer_to); |
1714 | if (transfer_from[cnt]) | 1745 | /* The reference we got is transferred to the inode */ |
1715 | mark_dquot_dirty(transfer_from[cnt]); | 1746 | for (cnt = 0; cnt < MAXQUOTAS; cnt++) |
1716 | if (transfer_to[cnt]) { | 1747 | transfer_to[cnt] = NULL; |
1717 | mark_dquot_dirty(transfer_to[cnt]); | ||
1718 | /* The reference we got is transferred to the inode */ | ||
1719 | transfer_to[cnt] = NULL; | ||
1720 | } | ||
1721 | } | ||
1722 | warn_put_all: | 1748 | warn_put_all: |
1723 | flush_warnings(transfer_to, warntype_to); | 1749 | flush_warnings(transfer_to, warntype_to); |
1724 | flush_warnings(transfer_from, warntype_from_inodes); | 1750 | flush_warnings(transfer_from, warntype_from_inodes); |
1725 | flush_warnings(transfer_from, warntype_from_space); | 1751 | flush_warnings(transfer_from, warntype_from_space); |
1726 | put_all: | 1752 | put_all: |
1727 | for (cnt = 0; cnt < MAXQUOTAS; cnt++) { | 1753 | dqput_all(transfer_from); |
1728 | dqput(transfer_from[cnt]); | 1754 | dqput_all(transfer_to); |
1729 | dqput(transfer_to[cnt]); | ||
1730 | } | ||
1731 | return ret; | 1755 | return ret; |
1732 | over_quota: | 1756 | over_quota: |
1733 | spin_unlock(&dq_data_lock); | 1757 | spin_unlock(&dq_data_lock); |
diff --git a/fs/quota/quota_v2.c b/fs/quota/quota_v2.c index 3dfc23e02135..e3da02f4986f 100644 --- a/fs/quota/quota_v2.c +++ b/fs/quota/quota_v2.c | |||
@@ -97,8 +97,11 @@ static int v2_read_file_info(struct super_block *sb, int type) | |||
97 | unsigned int version; | 97 | unsigned int version; |
98 | 98 | ||
99 | if (!v2_read_header(sb, type, &dqhead)) | 99 | if (!v2_read_header(sb, type, &dqhead)) |
100 | return 0; | 100 | return -1; |
101 | version = le32_to_cpu(dqhead.dqh_version); | 101 | version = le32_to_cpu(dqhead.dqh_version); |
102 | if ((info->dqi_fmt_id == QFMT_VFS_V0 && version != 0) || | ||
103 | (info->dqi_fmt_id == QFMT_VFS_V1 && version != 1)) | ||
104 | return -1; | ||
102 | 105 | ||
103 | size = sb->s_op->quota_read(sb, type, (char *)&dinfo, | 106 | size = sb->s_op->quota_read(sb, type, (char *)&dinfo, |
104 | sizeof(struct v2_disk_dqinfo), V2_DQINFOOFF); | 107 | sizeof(struct v2_disk_dqinfo), V2_DQINFOOFF); |
@@ -120,8 +123,8 @@ static int v2_read_file_info(struct super_block *sb, int type) | |||
120 | info->dqi_maxilimit = 0xffffffff; | 123 | info->dqi_maxilimit = 0xffffffff; |
121 | } else { | 124 | } else { |
122 | /* used space is stored as unsigned 64-bit value */ | 125 | /* used space is stored as unsigned 64-bit value */ |
123 | info->dqi_maxblimit = 0xffffffffffffffff; /* 2^64-1 */ | 126 | info->dqi_maxblimit = 0xffffffffffffffffULL; /* 2^64-1 */ |
124 | info->dqi_maxilimit = 0xffffffffffffffff; | 127 | info->dqi_maxilimit = 0xffffffffffffffffULL; |
125 | } | 128 | } |
126 | info->dqi_bgrace = le32_to_cpu(dinfo.dqi_bgrace); | 129 | info->dqi_bgrace = le32_to_cpu(dinfo.dqi_bgrace); |
127 | info->dqi_igrace = le32_to_cpu(dinfo.dqi_igrace); | 130 | info->dqi_igrace = le32_to_cpu(dinfo.dqi_igrace); |
@@ -401,9 +401,9 @@ SYSCALL_DEFINE4(fstatat64, int, dfd, char __user *, filename, | |||
401 | } | 401 | } |
402 | #endif /* __ARCH_WANT_STAT64 */ | 402 | #endif /* __ARCH_WANT_STAT64 */ |
403 | 403 | ||
404 | void inode_add_bytes(struct inode *inode, loff_t bytes) | 404 | /* Caller is here responsible for sufficient locking (ie. inode->i_lock) */ |
405 | void __inode_add_bytes(struct inode *inode, loff_t bytes) | ||
405 | { | 406 | { |
406 | spin_lock(&inode->i_lock); | ||
407 | inode->i_blocks += bytes >> 9; | 407 | inode->i_blocks += bytes >> 9; |
408 | bytes &= 511; | 408 | bytes &= 511; |
409 | inode->i_bytes += bytes; | 409 | inode->i_bytes += bytes; |
@@ -411,6 +411,12 @@ void inode_add_bytes(struct inode *inode, loff_t bytes) | |||
411 | inode->i_blocks++; | 411 | inode->i_blocks++; |
412 | inode->i_bytes -= 512; | 412 | inode->i_bytes -= 512; |
413 | } | 413 | } |
414 | } | ||
415 | |||
416 | void inode_add_bytes(struct inode *inode, loff_t bytes) | ||
417 | { | ||
418 | spin_lock(&inode->i_lock); | ||
419 | __inode_add_bytes(inode, bytes); | ||
414 | spin_unlock(&inode->i_lock); | 420 | spin_unlock(&inode->i_lock); |
415 | } | 421 | } |
416 | 422 | ||