diff options
Diffstat (limited to 'fs')
149 files changed, 3354 insertions, 2394 deletions
diff --git a/fs/adfs/adfs.h b/fs/adfs/adfs.h index 9cc18775b832..2ff622f6f547 100644 --- a/fs/adfs/adfs.h +++ b/fs/adfs/adfs.h | |||
| @@ -121,7 +121,7 @@ struct adfs_discmap { | |||
| 121 | 121 | ||
| 122 | /* Inode stuff */ | 122 | /* Inode stuff */ |
| 123 | struct inode *adfs_iget(struct super_block *sb, struct object_info *obj); | 123 | struct inode *adfs_iget(struct super_block *sb, struct object_info *obj); |
| 124 | int adfs_write_inode(struct inode *inode,int unused); | 124 | int adfs_write_inode(struct inode *inode, struct writeback_control *wbc); |
| 125 | int adfs_notify_change(struct dentry *dentry, struct iattr *attr); | 125 | int adfs_notify_change(struct dentry *dentry, struct iattr *attr); |
| 126 | 126 | ||
| 127 | /* map.c */ | 127 | /* map.c */ |
diff --git a/fs/adfs/inode.c b/fs/adfs/inode.c index 3f57ce4bee5d..0f5e30978135 100644 --- a/fs/adfs/inode.c +++ b/fs/adfs/inode.c | |||
| @@ -9,6 +9,7 @@ | |||
| 9 | */ | 9 | */ |
| 10 | #include <linux/smp_lock.h> | 10 | #include <linux/smp_lock.h> |
| 11 | #include <linux/buffer_head.h> | 11 | #include <linux/buffer_head.h> |
| 12 | #include <linux/writeback.h> | ||
| 12 | #include "adfs.h" | 13 | #include "adfs.h" |
| 13 | 14 | ||
| 14 | /* | 15 | /* |
| @@ -360,7 +361,7 @@ out: | |||
| 360 | * The adfs-specific inode data has already been updated by | 361 | * The adfs-specific inode data has already been updated by |
| 361 | * adfs_notify_change() | 362 | * adfs_notify_change() |
| 362 | */ | 363 | */ |
| 363 | int adfs_write_inode(struct inode *inode, int wait) | 364 | int adfs_write_inode(struct inode *inode, struct writeback_control *wbc) |
| 364 | { | 365 | { |
| 365 | struct super_block *sb = inode->i_sb; | 366 | struct super_block *sb = inode->i_sb; |
| 366 | struct object_info obj; | 367 | struct object_info obj; |
| @@ -375,7 +376,7 @@ int adfs_write_inode(struct inode *inode, int wait) | |||
| 375 | obj.attr = ADFS_I(inode)->attr; | 376 | obj.attr = ADFS_I(inode)->attr; |
| 376 | obj.size = inode->i_size; | 377 | obj.size = inode->i_size; |
| 377 | 378 | ||
| 378 | ret = adfs_dir_update(sb, &obj, wait); | 379 | ret = adfs_dir_update(sb, &obj, wbc->sync_mode == WB_SYNC_ALL); |
| 379 | unlock_kernel(); | 380 | unlock_kernel(); |
| 380 | return ret; | 381 | return ret; |
| 381 | } | 382 | } |
diff --git a/fs/affs/affs.h b/fs/affs/affs.h index 0e40caaba456..861dae68ac12 100644 --- a/fs/affs/affs.h +++ b/fs/affs/affs.h | |||
| @@ -175,7 +175,8 @@ extern void affs_delete_inode(struct inode *inode); | |||
| 175 | extern void affs_clear_inode(struct inode *inode); | 175 | extern void affs_clear_inode(struct inode *inode); |
| 176 | extern struct inode *affs_iget(struct super_block *sb, | 176 | extern struct inode *affs_iget(struct super_block *sb, |
| 177 | unsigned long ino); | 177 | unsigned long ino); |
| 178 | extern int affs_write_inode(struct inode *inode, int); | 178 | extern int affs_write_inode(struct inode *inode, |
| 179 | struct writeback_control *wbc); | ||
| 179 | extern int affs_add_entry(struct inode *dir, struct inode *inode, struct dentry *dentry, s32 type); | 180 | extern int affs_add_entry(struct inode *dir, struct inode *inode, struct dentry *dentry, s32 type); |
| 180 | 181 | ||
| 181 | /* file.c */ | 182 | /* file.c */ |
diff --git a/fs/affs/inode.c b/fs/affs/inode.c index 3c4ec7d864c4..c9744d771d98 100644 --- a/fs/affs/inode.c +++ b/fs/affs/inode.c | |||
| @@ -166,7 +166,7 @@ bad_inode: | |||
| 166 | } | 166 | } |
| 167 | 167 | ||
| 168 | int | 168 | int |
| 169 | affs_write_inode(struct inode *inode, int unused) | 169 | affs_write_inode(struct inode *inode, struct writeback_control *wbc) |
| 170 | { | 170 | { |
| 171 | struct super_block *sb = inode->i_sb; | 171 | struct super_block *sb = inode->i_sb; |
| 172 | struct buffer_head *bh; | 172 | struct buffer_head *bh; |
diff --git a/fs/afs/internal.h b/fs/afs/internal.h index 6ece2a13bf71..c54dad4e6063 100644 --- a/fs/afs/internal.h +++ b/fs/afs/internal.h | |||
| @@ -733,7 +733,6 @@ extern int afs_write_end(struct file *file, struct address_space *mapping, | |||
| 733 | struct page *page, void *fsdata); | 733 | struct page *page, void *fsdata); |
| 734 | extern int afs_writepage(struct page *, struct writeback_control *); | 734 | extern int afs_writepage(struct page *, struct writeback_control *); |
| 735 | extern int afs_writepages(struct address_space *, struct writeback_control *); | 735 | extern int afs_writepages(struct address_space *, struct writeback_control *); |
| 736 | extern int afs_write_inode(struct inode *, int); | ||
| 737 | extern void afs_pages_written_back(struct afs_vnode *, struct afs_call *); | 736 | extern void afs_pages_written_back(struct afs_vnode *, struct afs_call *); |
| 738 | extern ssize_t afs_file_write(struct kiocb *, const struct iovec *, | 737 | extern ssize_t afs_file_write(struct kiocb *, const struct iovec *, |
| 739 | unsigned long, loff_t); | 738 | unsigned long, loff_t); |
diff --git a/fs/afs/super.c b/fs/afs/super.c index e1ea1c240b6a..14f6431598ad 100644 --- a/fs/afs/super.c +++ b/fs/afs/super.c | |||
| @@ -48,7 +48,6 @@ struct file_system_type afs_fs_type = { | |||
| 48 | static const struct super_operations afs_super_ops = { | 48 | static const struct super_operations afs_super_ops = { |
| 49 | .statfs = afs_statfs, | 49 | .statfs = afs_statfs, |
| 50 | .alloc_inode = afs_alloc_inode, | 50 | .alloc_inode = afs_alloc_inode, |
| 51 | .write_inode = afs_write_inode, | ||
| 52 | .destroy_inode = afs_destroy_inode, | 51 | .destroy_inode = afs_destroy_inode, |
| 53 | .clear_inode = afs_clear_inode, | 52 | .clear_inode = afs_clear_inode, |
| 54 | .put_super = afs_put_super, | 53 | .put_super = afs_put_super, |
diff --git a/fs/afs/write.c b/fs/afs/write.c index 5e15a21dbf9f..3bed54a294d4 100644 --- a/fs/afs/write.c +++ b/fs/afs/write.c | |||
| @@ -585,27 +585,6 @@ int afs_writepages(struct address_space *mapping, | |||
| 585 | } | 585 | } |
| 586 | 586 | ||
| 587 | /* | 587 | /* |
| 588 | * write an inode back | ||
| 589 | */ | ||
| 590 | int afs_write_inode(struct inode *inode, int sync) | ||
| 591 | { | ||
| 592 | struct afs_vnode *vnode = AFS_FS_I(inode); | ||
| 593 | int ret; | ||
| 594 | |||
| 595 | _enter("{%x:%u},", vnode->fid.vid, vnode->fid.vnode); | ||
| 596 | |||
| 597 | ret = 0; | ||
| 598 | if (sync) { | ||
| 599 | ret = filemap_fdatawait(inode->i_mapping); | ||
| 600 | if (ret < 0) | ||
| 601 | __mark_inode_dirty(inode, I_DIRTY_DATASYNC); | ||
| 602 | } | ||
| 603 | |||
| 604 | _leave(" = %d", ret); | ||
| 605 | return ret; | ||
| 606 | } | ||
| 607 | |||
| 608 | /* | ||
| 609 | * completion of write to server | 588 | * completion of write to server |
| 610 | */ | 589 | */ |
| 611 | void afs_pages_written_back(struct afs_vnode *vnode, struct afs_call *call) | 590 | void afs_pages_written_back(struct afs_vnode *vnode, struct afs_call *call) |
| @@ -12,7 +12,6 @@ | |||
| 12 | #include <linux/capability.h> | 12 | #include <linux/capability.h> |
| 13 | #include <linux/fsnotify.h> | 13 | #include <linux/fsnotify.h> |
| 14 | #include <linux/fcntl.h> | 14 | #include <linux/fcntl.h> |
| 15 | #include <linux/quotaops.h> | ||
| 16 | #include <linux/security.h> | 15 | #include <linux/security.h> |
| 17 | 16 | ||
| 18 | /* Taken over from the old code... */ | 17 | /* Taken over from the old code... */ |
| @@ -212,14 +211,8 @@ int notify_change(struct dentry * dentry, struct iattr * attr) | |||
| 212 | error = inode->i_op->setattr(dentry, attr); | 211 | error = inode->i_op->setattr(dentry, attr); |
| 213 | } else { | 212 | } else { |
| 214 | error = inode_change_ok(inode, attr); | 213 | error = inode_change_ok(inode, attr); |
| 215 | if (!error) { | 214 | if (!error) |
| 216 | if ((ia_valid & ATTR_UID && attr->ia_uid != inode->i_uid) || | 215 | error = inode_setattr(inode, attr); |
| 217 | (ia_valid & ATTR_GID && attr->ia_gid != inode->i_gid)) | ||
| 218 | error = vfs_dq_transfer(inode, attr) ? | ||
| 219 | -EDQUOT : 0; | ||
| 220 | if (!error) | ||
| 221 | error = inode_setattr(inode, attr); | ||
| 222 | } | ||
| 223 | } | 216 | } |
| 224 | 217 | ||
| 225 | if (ia_valid & ATTR_SIZE) | 218 | if (ia_valid & ATTR_SIZE) |
diff --git a/fs/bfs/inode.c b/fs/bfs/inode.c index 8f3d9fd89604..f22a7d3dc362 100644 --- a/fs/bfs/inode.c +++ b/fs/bfs/inode.c | |||
| @@ -15,6 +15,7 @@ | |||
| 15 | #include <linux/smp_lock.h> | 15 | #include <linux/smp_lock.h> |
| 16 | #include <linux/buffer_head.h> | 16 | #include <linux/buffer_head.h> |
| 17 | #include <linux/vfs.h> | 17 | #include <linux/vfs.h> |
| 18 | #include <linux/writeback.h> | ||
| 18 | #include <asm/uaccess.h> | 19 | #include <asm/uaccess.h> |
| 19 | #include "bfs.h" | 20 | #include "bfs.h" |
| 20 | 21 | ||
| @@ -98,7 +99,7 @@ error: | |||
| 98 | return ERR_PTR(-EIO); | 99 | return ERR_PTR(-EIO); |
| 99 | } | 100 | } |
| 100 | 101 | ||
| 101 | static int bfs_write_inode(struct inode *inode, int wait) | 102 | static int bfs_write_inode(struct inode *inode, struct writeback_control *wbc) |
| 102 | { | 103 | { |
| 103 | struct bfs_sb_info *info = BFS_SB(inode->i_sb); | 104 | struct bfs_sb_info *info = BFS_SB(inode->i_sb); |
| 104 | unsigned int ino = (u16)inode->i_ino; | 105 | unsigned int ino = (u16)inode->i_ino; |
| @@ -147,7 +148,7 @@ static int bfs_write_inode(struct inode *inode, int wait) | |||
| 147 | di->i_eoffset = cpu_to_le32(i_sblock * BFS_BSIZE + inode->i_size - 1); | 148 | di->i_eoffset = cpu_to_le32(i_sblock * BFS_BSIZE + inode->i_size - 1); |
| 148 | 149 | ||
| 149 | mark_buffer_dirty(bh); | 150 | mark_buffer_dirty(bh); |
| 150 | if (wait) { | 151 | if (wbc->sync_mode == WB_SYNC_ALL) { |
| 151 | sync_dirty_buffer(bh); | 152 | sync_dirty_buffer(bh); |
| 152 | if (buffer_req(bh) && !buffer_uptodate(bh)) | 153 | if (buffer_req(bh) && !buffer_uptodate(bh)) |
| 153 | err = -EIO; | 154 | err = -EIO; |
diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h index 2aa8ec6a0981..8b5cfdd4bfc1 100644 --- a/fs/btrfs/ctree.h +++ b/fs/btrfs/ctree.h | |||
| @@ -2326,7 +2326,7 @@ int btrfs_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf); | |||
| 2326 | int btrfs_readpage(struct file *file, struct page *page); | 2326 | int btrfs_readpage(struct file *file, struct page *page); |
| 2327 | void btrfs_delete_inode(struct inode *inode); | 2327 | void btrfs_delete_inode(struct inode *inode); |
| 2328 | void btrfs_put_inode(struct inode *inode); | 2328 | void btrfs_put_inode(struct inode *inode); |
| 2329 | int btrfs_write_inode(struct inode *inode, int wait); | 2329 | int btrfs_write_inode(struct inode *inode, struct writeback_control *wbc); |
| 2330 | void btrfs_dirty_inode(struct inode *inode); | 2330 | void btrfs_dirty_inode(struct inode *inode); |
| 2331 | struct inode *btrfs_alloc_inode(struct super_block *sb); | 2331 | struct inode *btrfs_alloc_inode(struct super_block *sb); |
| 2332 | void btrfs_destroy_inode(struct inode *inode); | 2332 | void btrfs_destroy_inode(struct inode *inode); |
diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index 4deb280f8969..c41db6d45ab6 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c | |||
| @@ -3968,7 +3968,7 @@ err: | |||
| 3968 | return ret; | 3968 | return ret; |
| 3969 | } | 3969 | } |
| 3970 | 3970 | ||
| 3971 | int btrfs_write_inode(struct inode *inode, int wait) | 3971 | int btrfs_write_inode(struct inode *inode, struct writeback_control *wbc) |
| 3972 | { | 3972 | { |
| 3973 | struct btrfs_root *root = BTRFS_I(inode)->root; | 3973 | struct btrfs_root *root = BTRFS_I(inode)->root; |
| 3974 | struct btrfs_trans_handle *trans; | 3974 | struct btrfs_trans_handle *trans; |
| @@ -3977,7 +3977,7 @@ int btrfs_write_inode(struct inode *inode, int wait) | |||
| 3977 | if (root->fs_info->btree_inode == inode) | 3977 | if (root->fs_info->btree_inode == inode) |
| 3978 | return 0; | 3978 | return 0; |
| 3979 | 3979 | ||
| 3980 | if (wait) { | 3980 | if (wbc->sync_mode == WB_SYNC_ALL) { |
| 3981 | trans = btrfs_join_transaction(root, 1); | 3981 | trans = btrfs_join_transaction(root, 1); |
| 3982 | btrfs_set_trans_block_group(trans, inode); | 3982 | btrfs_set_trans_block_group(trans, inode); |
| 3983 | ret = btrfs_commit_transaction(trans, root); | 3983 | ret = btrfs_commit_transaction(trans, root); |
diff --git a/fs/exofs/exofs.h b/fs/exofs/exofs.h index 59b8bf2825c7..8442e353309f 100644 --- a/fs/exofs/exofs.h +++ b/fs/exofs/exofs.h | |||
| @@ -261,7 +261,7 @@ int exofs_write_begin(struct file *file, struct address_space *mapping, | |||
| 261 | struct page **pagep, void **fsdata); | 261 | struct page **pagep, void **fsdata); |
| 262 | extern struct inode *exofs_iget(struct super_block *, unsigned long); | 262 | extern struct inode *exofs_iget(struct super_block *, unsigned long); |
| 263 | struct inode *exofs_new_inode(struct inode *, int); | 263 | struct inode *exofs_new_inode(struct inode *, int); |
| 264 | extern int exofs_write_inode(struct inode *, int); | 264 | extern int exofs_write_inode(struct inode *, struct writeback_control *wbc); |
| 265 | extern void exofs_delete_inode(struct inode *); | 265 | extern void exofs_delete_inode(struct inode *); |
| 266 | 266 | ||
| 267 | /* dir.c: */ | 267 | /* dir.c: */ |
diff --git a/fs/exofs/inode.c b/fs/exofs/inode.c index 5514f3c2c2f4..a17e4b733e35 100644 --- a/fs/exofs/inode.c +++ b/fs/exofs/inode.c | |||
| @@ -1280,9 +1280,9 @@ out: | |||
| 1280 | return ret; | 1280 | return ret; |
| 1281 | } | 1281 | } |
| 1282 | 1282 | ||
| 1283 | int exofs_write_inode(struct inode *inode, int wait) | 1283 | int exofs_write_inode(struct inode *inode, struct writeback_control *wbc) |
| 1284 | { | 1284 | { |
| 1285 | return exofs_update_inode(inode, wait); | 1285 | return exofs_update_inode(inode, wbc->sync_mode == WB_SYNC_ALL); |
| 1286 | } | 1286 | } |
| 1287 | 1287 | ||
| 1288 | /* | 1288 | /* |
diff --git a/fs/ext2/balloc.c b/fs/ext2/balloc.c index 7f8d2e5a7ea6..1d081f0cfec2 100644 --- a/fs/ext2/balloc.c +++ b/fs/ext2/balloc.c | |||
| @@ -570,7 +570,7 @@ do_more: | |||
| 570 | error_return: | 570 | error_return: |
| 571 | brelse(bitmap_bh); | 571 | brelse(bitmap_bh); |
| 572 | release_blocks(sb, freed); | 572 | release_blocks(sb, freed); |
| 573 | vfs_dq_free_block(inode, freed); | 573 | dquot_free_block(inode, freed); |
| 574 | } | 574 | } |
| 575 | 575 | ||
| 576 | /** | 576 | /** |
| @@ -1236,6 +1236,7 @@ ext2_fsblk_t ext2_new_blocks(struct inode *inode, ext2_fsblk_t goal, | |||
| 1236 | unsigned short windowsz = 0; | 1236 | unsigned short windowsz = 0; |
| 1237 | unsigned long ngroups; | 1237 | unsigned long ngroups; |
| 1238 | unsigned long num = *count; | 1238 | unsigned long num = *count; |
| 1239 | int ret; | ||
| 1239 | 1240 | ||
| 1240 | *errp = -ENOSPC; | 1241 | *errp = -ENOSPC; |
| 1241 | sb = inode->i_sb; | 1242 | sb = inode->i_sb; |
| @@ -1247,8 +1248,9 @@ ext2_fsblk_t ext2_new_blocks(struct inode *inode, ext2_fsblk_t goal, | |||
| 1247 | /* | 1248 | /* |
| 1248 | * Check quota for allocation of this block. | 1249 | * Check quota for allocation of this block. |
| 1249 | */ | 1250 | */ |
| 1250 | if (vfs_dq_alloc_block(inode, num)) { | 1251 | ret = dquot_alloc_block(inode, num); |
| 1251 | *errp = -EDQUOT; | 1252 | if (ret) { |
| 1253 | *errp = ret; | ||
| 1252 | return 0; | 1254 | return 0; |
| 1253 | } | 1255 | } |
| 1254 | 1256 | ||
| @@ -1409,7 +1411,7 @@ allocated: | |||
| 1409 | 1411 | ||
| 1410 | *errp = 0; | 1412 | *errp = 0; |
| 1411 | brelse(bitmap_bh); | 1413 | brelse(bitmap_bh); |
| 1412 | vfs_dq_free_block(inode, *count-num); | 1414 | dquot_free_block(inode, *count-num); |
| 1413 | *count = num; | 1415 | *count = num; |
| 1414 | return ret_block; | 1416 | return ret_block; |
| 1415 | 1417 | ||
| @@ -1420,7 +1422,7 @@ out: | |||
| 1420 | * Undo the block allocation | 1422 | * Undo the block allocation |
| 1421 | */ | 1423 | */ |
| 1422 | if (!performed_allocation) | 1424 | if (!performed_allocation) |
| 1423 | vfs_dq_free_block(inode, *count); | 1425 | dquot_free_block(inode, *count); |
| 1424 | brelse(bitmap_bh); | 1426 | brelse(bitmap_bh); |
| 1425 | return 0; | 1427 | return 0; |
| 1426 | } | 1428 | } |
diff --git a/fs/ext2/ext2.h b/fs/ext2/ext2.h index 061914add3cf..0b038e47ad2f 100644 --- a/fs/ext2/ext2.h +++ b/fs/ext2/ext2.h | |||
| @@ -118,7 +118,7 @@ extern unsigned long ext2_count_free (struct buffer_head *, unsigned); | |||
| 118 | 118 | ||
| 119 | /* inode.c */ | 119 | /* inode.c */ |
| 120 | extern struct inode *ext2_iget (struct super_block *, unsigned long); | 120 | extern struct inode *ext2_iget (struct super_block *, unsigned long); |
| 121 | extern int ext2_write_inode (struct inode *, int); | 121 | extern int ext2_write_inode (struct inode *, struct writeback_control *); |
| 122 | extern void ext2_delete_inode (struct inode *); | 122 | extern void ext2_delete_inode (struct inode *); |
| 123 | extern int ext2_sync_inode (struct inode *); | 123 | extern int ext2_sync_inode (struct inode *); |
| 124 | extern int ext2_get_block(struct inode *, sector_t, struct buffer_head *, int); | 124 | extern int ext2_get_block(struct inode *, sector_t, struct buffer_head *, int); |
diff --git a/fs/ext2/file.c b/fs/ext2/file.c index 586e3589d4c2..5d198d0697fb 100644 --- a/fs/ext2/file.c +++ b/fs/ext2/file.c | |||
| @@ -20,6 +20,7 @@ | |||
| 20 | 20 | ||
| 21 | #include <linux/time.h> | 21 | #include <linux/time.h> |
| 22 | #include <linux/pagemap.h> | 22 | #include <linux/pagemap.h> |
| 23 | #include <linux/quotaops.h> | ||
| 23 | #include "ext2.h" | 24 | #include "ext2.h" |
| 24 | #include "xattr.h" | 25 | #include "xattr.h" |
| 25 | #include "acl.h" | 26 | #include "acl.h" |
| @@ -70,7 +71,7 @@ const struct file_operations ext2_file_operations = { | |||
| 70 | .compat_ioctl = ext2_compat_ioctl, | 71 | .compat_ioctl = ext2_compat_ioctl, |
| 71 | #endif | 72 | #endif |
| 72 | .mmap = generic_file_mmap, | 73 | .mmap = generic_file_mmap, |
| 73 | .open = generic_file_open, | 74 | .open = dquot_file_open, |
| 74 | .release = ext2_release_file, | 75 | .release = ext2_release_file, |
| 75 | .fsync = ext2_fsync, | 76 | .fsync = ext2_fsync, |
| 76 | .splice_read = generic_file_splice_read, | 77 | .splice_read = generic_file_splice_read, |
| @@ -87,7 +88,7 @@ const struct file_operations ext2_xip_file_operations = { | |||
| 87 | .compat_ioctl = ext2_compat_ioctl, | 88 | .compat_ioctl = ext2_compat_ioctl, |
| 88 | #endif | 89 | #endif |
| 89 | .mmap = xip_file_mmap, | 90 | .mmap = xip_file_mmap, |
| 90 | .open = generic_file_open, | 91 | .open = dquot_file_open, |
| 91 | .release = ext2_release_file, | 92 | .release = ext2_release_file, |
| 92 | .fsync = ext2_fsync, | 93 | .fsync = ext2_fsync, |
| 93 | }; | 94 | }; |
diff --git a/fs/ext2/ialloc.c b/fs/ext2/ialloc.c index 15387c9c17d8..ad7d572ee8dc 100644 --- a/fs/ext2/ialloc.c +++ b/fs/ext2/ialloc.c | |||
| @@ -121,8 +121,8 @@ void ext2_free_inode (struct inode * inode) | |||
| 121 | if (!is_bad_inode(inode)) { | 121 | if (!is_bad_inode(inode)) { |
| 122 | /* Quota is already initialized in iput() */ | 122 | /* Quota is already initialized in iput() */ |
| 123 | ext2_xattr_delete_inode(inode); | 123 | ext2_xattr_delete_inode(inode); |
| 124 | vfs_dq_free_inode(inode); | 124 | dquot_free_inode(inode); |
| 125 | vfs_dq_drop(inode); | 125 | dquot_drop(inode); |
| 126 | } | 126 | } |
| 127 | 127 | ||
| 128 | es = EXT2_SB(sb)->s_es; | 128 | es = EXT2_SB(sb)->s_es; |
| @@ -586,10 +586,10 @@ got: | |||
| 586 | goto fail_drop; | 586 | goto fail_drop; |
| 587 | } | 587 | } |
| 588 | 588 | ||
| 589 | if (vfs_dq_alloc_inode(inode)) { | 589 | dquot_initialize(inode); |
| 590 | err = -EDQUOT; | 590 | err = dquot_alloc_inode(inode); |
| 591 | if (err) | ||
| 591 | goto fail_drop; | 592 | goto fail_drop; |
| 592 | } | ||
| 593 | 593 | ||
| 594 | err = ext2_init_acl(inode, dir); | 594 | err = ext2_init_acl(inode, dir); |
| 595 | if (err) | 595 | if (err) |
| @@ -605,10 +605,10 @@ got: | |||
| 605 | return inode; | 605 | return inode; |
| 606 | 606 | ||
| 607 | fail_free_drop: | 607 | fail_free_drop: |
| 608 | vfs_dq_free_inode(inode); | 608 | dquot_free_inode(inode); |
| 609 | 609 | ||
| 610 | fail_drop: | 610 | fail_drop: |
| 611 | vfs_dq_drop(inode); | 611 | dquot_drop(inode); |
| 612 | inode->i_flags |= S_NOQUOTA; | 612 | inode->i_flags |= S_NOQUOTA; |
| 613 | inode->i_nlink = 0; | 613 | inode->i_nlink = 0; |
| 614 | unlock_new_inode(inode); | 614 | unlock_new_inode(inode); |
diff --git a/fs/ext2/inode.c b/fs/ext2/inode.c index 71b032c65a02..fc13cc119aad 100644 --- a/fs/ext2/inode.c +++ b/fs/ext2/inode.c | |||
| @@ -41,6 +41,8 @@ MODULE_AUTHOR("Remy Card and others"); | |||
| 41 | MODULE_DESCRIPTION("Second Extended Filesystem"); | 41 | MODULE_DESCRIPTION("Second Extended Filesystem"); |
| 42 | MODULE_LICENSE("GPL"); | 42 | MODULE_LICENSE("GPL"); |
| 43 | 43 | ||
| 44 | static int __ext2_write_inode(struct inode *inode, int do_sync); | ||
| 45 | |||
| 44 | /* | 46 | /* |
| 45 | * Test whether an inode is a fast symlink. | 47 | * Test whether an inode is a fast symlink. |
| 46 | */ | 48 | */ |
| @@ -58,13 +60,15 @@ static inline int ext2_inode_is_fast_symlink(struct inode *inode) | |||
| 58 | */ | 60 | */ |
| 59 | void ext2_delete_inode (struct inode * inode) | 61 | void ext2_delete_inode (struct inode * inode) |
| 60 | { | 62 | { |
| 63 | if (!is_bad_inode(inode)) | ||
| 64 | dquot_initialize(inode); | ||
| 61 | truncate_inode_pages(&inode->i_data, 0); | 65 | truncate_inode_pages(&inode->i_data, 0); |
| 62 | 66 | ||
| 63 | if (is_bad_inode(inode)) | 67 | if (is_bad_inode(inode)) |
| 64 | goto no_delete; | 68 | goto no_delete; |
| 65 | EXT2_I(inode)->i_dtime = get_seconds(); | 69 | EXT2_I(inode)->i_dtime = get_seconds(); |
| 66 | mark_inode_dirty(inode); | 70 | mark_inode_dirty(inode); |
| 67 | ext2_write_inode(inode, inode_needs_sync(inode)); | 71 | __ext2_write_inode(inode, inode_needs_sync(inode)); |
| 68 | 72 | ||
| 69 | inode->i_size = 0; | 73 | inode->i_size = 0; |
| 70 | if (inode->i_blocks) | 74 | if (inode->i_blocks) |
| @@ -1335,7 +1339,7 @@ bad_inode: | |||
| 1335 | return ERR_PTR(ret); | 1339 | return ERR_PTR(ret); |
| 1336 | } | 1340 | } |
| 1337 | 1341 | ||
| 1338 | int ext2_write_inode(struct inode *inode, int do_sync) | 1342 | static int __ext2_write_inode(struct inode *inode, int do_sync) |
| 1339 | { | 1343 | { |
| 1340 | struct ext2_inode_info *ei = EXT2_I(inode); | 1344 | struct ext2_inode_info *ei = EXT2_I(inode); |
| 1341 | struct super_block *sb = inode->i_sb; | 1345 | struct super_block *sb = inode->i_sb; |
| @@ -1440,6 +1444,11 @@ int ext2_write_inode(struct inode *inode, int do_sync) | |||
| 1440 | return err; | 1444 | return err; |
| 1441 | } | 1445 | } |
| 1442 | 1446 | ||
| 1447 | int ext2_write_inode(struct inode *inode, struct writeback_control *wbc) | ||
| 1448 | { | ||
| 1449 | return __ext2_write_inode(inode, wbc->sync_mode == WB_SYNC_ALL); | ||
| 1450 | } | ||
| 1451 | |||
| 1443 | int ext2_sync_inode(struct inode *inode) | 1452 | int ext2_sync_inode(struct inode *inode) |
| 1444 | { | 1453 | { |
| 1445 | struct writeback_control wbc = { | 1454 | struct writeback_control wbc = { |
| @@ -1457,9 +1466,12 @@ int ext2_setattr(struct dentry *dentry, struct iattr *iattr) | |||
| 1457 | error = inode_change_ok(inode, iattr); | 1466 | error = inode_change_ok(inode, iattr); |
| 1458 | if (error) | 1467 | if (error) |
| 1459 | return error; | 1468 | return error; |
| 1469 | |||
| 1470 | if (iattr->ia_valid & ATTR_SIZE) | ||
| 1471 | dquot_initialize(inode); | ||
| 1460 | if ((iattr->ia_valid & ATTR_UID && iattr->ia_uid != inode->i_uid) || | 1472 | if ((iattr->ia_valid & ATTR_UID && iattr->ia_uid != inode->i_uid) || |
| 1461 | (iattr->ia_valid & ATTR_GID && iattr->ia_gid != inode->i_gid)) { | 1473 | (iattr->ia_valid & ATTR_GID && iattr->ia_gid != inode->i_gid)) { |
| 1462 | error = vfs_dq_transfer(inode, iattr) ? -EDQUOT : 0; | 1474 | error = dquot_transfer(inode, iattr); |
| 1463 | if (error) | 1475 | if (error) |
| 1464 | return error; | 1476 | return error; |
| 1465 | } | 1477 | } |
diff --git a/fs/ext2/namei.c b/fs/ext2/namei.c index dd7175ce5606..71efb0e9a3f2 100644 --- a/fs/ext2/namei.c +++ b/fs/ext2/namei.c | |||
| @@ -31,6 +31,7 @@ | |||
| 31 | */ | 31 | */ |
| 32 | 32 | ||
| 33 | #include <linux/pagemap.h> | 33 | #include <linux/pagemap.h> |
| 34 | #include <linux/quotaops.h> | ||
| 34 | #include "ext2.h" | 35 | #include "ext2.h" |
| 35 | #include "xattr.h" | 36 | #include "xattr.h" |
| 36 | #include "acl.h" | 37 | #include "acl.h" |
| @@ -99,24 +100,27 @@ struct dentry *ext2_get_parent(struct dentry *child) | |||
| 99 | */ | 100 | */ |
| 100 | static int ext2_create (struct inode * dir, struct dentry * dentry, int mode, struct nameidata *nd) | 101 | static int ext2_create (struct inode * dir, struct dentry * dentry, int mode, struct nameidata *nd) |
| 101 | { | 102 | { |
| 102 | struct inode * inode = ext2_new_inode (dir, mode); | 103 | struct inode *inode; |
| 103 | int err = PTR_ERR(inode); | 104 | |
| 104 | if (!IS_ERR(inode)) { | 105 | dquot_initialize(dir); |
| 105 | inode->i_op = &ext2_file_inode_operations; | 106 | |
| 106 | if (ext2_use_xip(inode->i_sb)) { | 107 | inode = ext2_new_inode(dir, mode); |
| 107 | inode->i_mapping->a_ops = &ext2_aops_xip; | 108 | if (IS_ERR(inode)) |
| 108 | inode->i_fop = &ext2_xip_file_operations; | 109 | return PTR_ERR(inode); |
| 109 | } else if (test_opt(inode->i_sb, NOBH)) { | 110 | |
| 110 | inode->i_mapping->a_ops = &ext2_nobh_aops; | 111 | inode->i_op = &ext2_file_inode_operations; |
| 111 | inode->i_fop = &ext2_file_operations; | 112 | if (ext2_use_xip(inode->i_sb)) { |
| 112 | } else { | 113 | inode->i_mapping->a_ops = &ext2_aops_xip; |
| 113 | inode->i_mapping->a_ops = &ext2_aops; | 114 | inode->i_fop = &ext2_xip_file_operations; |
| 114 | inode->i_fop = &ext2_file_operations; | 115 | } else if (test_opt(inode->i_sb, NOBH)) { |
| 115 | } | 116 | inode->i_mapping->a_ops = &ext2_nobh_aops; |
| 116 | mark_inode_dirty(inode); | 117 | inode->i_fop = &ext2_file_operations; |
| 117 | err = ext2_add_nondir(dentry, inode); | 118 | } else { |
| 119 | inode->i_mapping->a_ops = &ext2_aops; | ||
| 120 | inode->i_fop = &ext2_file_operations; | ||
| 118 | } | 121 | } |
| 119 | return err; | 122 | mark_inode_dirty(inode); |
| 123 | return ext2_add_nondir(dentry, inode); | ||
| 120 | } | 124 | } |
| 121 | 125 | ||
| 122 | static int ext2_mknod (struct inode * dir, struct dentry *dentry, int mode, dev_t rdev) | 126 | static int ext2_mknod (struct inode * dir, struct dentry *dentry, int mode, dev_t rdev) |
| @@ -127,6 +131,8 @@ static int ext2_mknod (struct inode * dir, struct dentry *dentry, int mode, dev_ | |||
| 127 | if (!new_valid_dev(rdev)) | 131 | if (!new_valid_dev(rdev)) |
| 128 | return -EINVAL; | 132 | return -EINVAL; |
| 129 | 133 | ||
| 134 | dquot_initialize(dir); | ||
| 135 | |||
| 130 | inode = ext2_new_inode (dir, mode); | 136 | inode = ext2_new_inode (dir, mode); |
| 131 | err = PTR_ERR(inode); | 137 | err = PTR_ERR(inode); |
| 132 | if (!IS_ERR(inode)) { | 138 | if (!IS_ERR(inode)) { |
| @@ -151,6 +157,8 @@ static int ext2_symlink (struct inode * dir, struct dentry * dentry, | |||
| 151 | if (l > sb->s_blocksize) | 157 | if (l > sb->s_blocksize) |
| 152 | goto out; | 158 | goto out; |
| 153 | 159 | ||
| 160 | dquot_initialize(dir); | ||
| 161 | |||
| 154 | inode = ext2_new_inode (dir, S_IFLNK | S_IRWXUGO); | 162 | inode = ext2_new_inode (dir, S_IFLNK | S_IRWXUGO); |
| 155 | err = PTR_ERR(inode); | 163 | err = PTR_ERR(inode); |
| 156 | if (IS_ERR(inode)) | 164 | if (IS_ERR(inode)) |
| @@ -194,6 +202,8 @@ static int ext2_link (struct dentry * old_dentry, struct inode * dir, | |||
| 194 | if (inode->i_nlink >= EXT2_LINK_MAX) | 202 | if (inode->i_nlink >= EXT2_LINK_MAX) |
| 195 | return -EMLINK; | 203 | return -EMLINK; |
| 196 | 204 | ||
| 205 | dquot_initialize(dir); | ||
| 206 | |||
| 197 | inode->i_ctime = CURRENT_TIME_SEC; | 207 | inode->i_ctime = CURRENT_TIME_SEC; |
| 198 | inode_inc_link_count(inode); | 208 | inode_inc_link_count(inode); |
| 199 | atomic_inc(&inode->i_count); | 209 | atomic_inc(&inode->i_count); |
| @@ -216,6 +226,8 @@ static int ext2_mkdir(struct inode * dir, struct dentry * dentry, int mode) | |||
| 216 | if (dir->i_nlink >= EXT2_LINK_MAX) | 226 | if (dir->i_nlink >= EXT2_LINK_MAX) |
| 217 | goto out; | 227 | goto out; |
| 218 | 228 | ||
| 229 | dquot_initialize(dir); | ||
| 230 | |||
| 219 | inode_inc_link_count(dir); | 231 | inode_inc_link_count(dir); |
| 220 | 232 | ||
| 221 | inode = ext2_new_inode (dir, S_IFDIR | mode); | 233 | inode = ext2_new_inode (dir, S_IFDIR | mode); |
| @@ -262,6 +274,8 @@ static int ext2_unlink(struct inode * dir, struct dentry *dentry) | |||
| 262 | struct page * page; | 274 | struct page * page; |
| 263 | int err = -ENOENT; | 275 | int err = -ENOENT; |
| 264 | 276 | ||
| 277 | dquot_initialize(dir); | ||
| 278 | |||
| 265 | de = ext2_find_entry (dir, &dentry->d_name, &page); | 279 | de = ext2_find_entry (dir, &dentry->d_name, &page); |
| 266 | if (!de) | 280 | if (!de) |
| 267 | goto out; | 281 | goto out; |
| @@ -304,6 +318,9 @@ static int ext2_rename (struct inode * old_dir, struct dentry * old_dentry, | |||
| 304 | struct ext2_dir_entry_2 * old_de; | 318 | struct ext2_dir_entry_2 * old_de; |
| 305 | int err = -ENOENT; | 319 | int err = -ENOENT; |
| 306 | 320 | ||
| 321 | dquot_initialize(old_dir); | ||
| 322 | dquot_initialize(new_dir); | ||
| 323 | |||
| 307 | old_de = ext2_find_entry (old_dir, &old_dentry->d_name, &old_page); | 324 | old_de = ext2_find_entry (old_dir, &old_dentry->d_name, &old_page); |
| 308 | if (!old_de) | 325 | if (!old_de) |
| 309 | goto out; | 326 | goto out; |
diff --git a/fs/ext2/super.c b/fs/ext2/super.c index f9cb54a585ce..42e4a303b675 100644 --- a/fs/ext2/super.c +++ b/fs/ext2/super.c | |||
| @@ -194,6 +194,8 @@ static void destroy_inodecache(void) | |||
| 194 | static void ext2_clear_inode(struct inode *inode) | 194 | static void ext2_clear_inode(struct inode *inode) |
| 195 | { | 195 | { |
| 196 | struct ext2_block_alloc_info *rsv = EXT2_I(inode)->i_block_alloc_info; | 196 | struct ext2_block_alloc_info *rsv = EXT2_I(inode)->i_block_alloc_info; |
| 197 | |||
| 198 | dquot_drop(inode); | ||
| 197 | ext2_discard_reservation(inode); | 199 | ext2_discard_reservation(inode); |
| 198 | EXT2_I(inode)->i_block_alloc_info = NULL; | 200 | EXT2_I(inode)->i_block_alloc_info = NULL; |
| 199 | if (unlikely(rsv)) | 201 | if (unlikely(rsv)) |
diff --git a/fs/ext2/xattr.c b/fs/ext2/xattr.c index 904f00642f84..e44dc92609be 100644 --- a/fs/ext2/xattr.c +++ b/fs/ext2/xattr.c | |||
| @@ -644,8 +644,8 @@ ext2_xattr_set2(struct inode *inode, struct buffer_head *old_bh, | |||
| 644 | the inode. */ | 644 | the inode. */ |
| 645 | ea_bdebug(new_bh, "reusing block"); | 645 | ea_bdebug(new_bh, "reusing block"); |
| 646 | 646 | ||
| 647 | error = -EDQUOT; | 647 | error = dquot_alloc_block(inode, 1); |
| 648 | if (vfs_dq_alloc_block(inode, 1)) { | 648 | if (error) { |
| 649 | unlock_buffer(new_bh); | 649 | unlock_buffer(new_bh); |
| 650 | goto cleanup; | 650 | goto cleanup; |
| 651 | } | 651 | } |
| @@ -702,7 +702,7 @@ ext2_xattr_set2(struct inode *inode, struct buffer_head *old_bh, | |||
| 702 | * as if nothing happened and cleanup the unused block */ | 702 | * as if nothing happened and cleanup the unused block */ |
| 703 | if (error && error != -ENOSPC) { | 703 | if (error && error != -ENOSPC) { |
| 704 | if (new_bh && new_bh != old_bh) | 704 | if (new_bh && new_bh != old_bh) |
| 705 | vfs_dq_free_block(inode, 1); | 705 | dquot_free_block(inode, 1); |
| 706 | goto cleanup; | 706 | goto cleanup; |
| 707 | } | 707 | } |
| 708 | } else | 708 | } else |
| @@ -734,7 +734,7 @@ ext2_xattr_set2(struct inode *inode, struct buffer_head *old_bh, | |||
| 734 | le32_add_cpu(&HDR(old_bh)->h_refcount, -1); | 734 | le32_add_cpu(&HDR(old_bh)->h_refcount, -1); |
| 735 | if (ce) | 735 | if (ce) |
| 736 | mb_cache_entry_release(ce); | 736 | mb_cache_entry_release(ce); |
| 737 | vfs_dq_free_block(inode, 1); | 737 | dquot_free_block(inode, 1); |
| 738 | mark_buffer_dirty(old_bh); | 738 | mark_buffer_dirty(old_bh); |
| 739 | ea_bdebug(old_bh, "refcount now=%d", | 739 | ea_bdebug(old_bh, "refcount now=%d", |
| 740 | le32_to_cpu(HDR(old_bh)->h_refcount)); | 740 | le32_to_cpu(HDR(old_bh)->h_refcount)); |
| @@ -797,7 +797,7 @@ ext2_xattr_delete_inode(struct inode *inode) | |||
| 797 | mark_buffer_dirty(bh); | 797 | mark_buffer_dirty(bh); |
| 798 | if (IS_SYNC(inode)) | 798 | if (IS_SYNC(inode)) |
| 799 | sync_dirty_buffer(bh); | 799 | sync_dirty_buffer(bh); |
| 800 | vfs_dq_free_block(inode, 1); | 800 | dquot_free_block(inode, 1); |
| 801 | } | 801 | } |
| 802 | EXT2_I(inode)->i_file_acl = 0; | 802 | EXT2_I(inode)->i_file_acl = 0; |
| 803 | 803 | ||
diff --git a/fs/ext3/balloc.c b/fs/ext3/balloc.c index 27967f92e820..161da2d3f890 100644 --- a/fs/ext3/balloc.c +++ b/fs/ext3/balloc.c | |||
| @@ -676,7 +676,7 @@ void ext3_free_blocks(handle_t *handle, struct inode *inode, | |||
| 676 | } | 676 | } |
| 677 | ext3_free_blocks_sb(handle, sb, block, count, &dquot_freed_blocks); | 677 | ext3_free_blocks_sb(handle, sb, block, count, &dquot_freed_blocks); |
| 678 | if (dquot_freed_blocks) | 678 | if (dquot_freed_blocks) |
| 679 | vfs_dq_free_block(inode, dquot_freed_blocks); | 679 | dquot_free_block(inode, dquot_freed_blocks); |
| 680 | return; | 680 | return; |
| 681 | } | 681 | } |
| 682 | 682 | ||
| @@ -1502,8 +1502,9 @@ ext3_fsblk_t ext3_new_blocks(handle_t *handle, struct inode *inode, | |||
| 1502 | /* | 1502 | /* |
| 1503 | * Check quota for allocation of this block. | 1503 | * Check quota for allocation of this block. |
| 1504 | */ | 1504 | */ |
| 1505 | if (vfs_dq_alloc_block(inode, num)) { | 1505 | err = dquot_alloc_block(inode, num); |
| 1506 | *errp = -EDQUOT; | 1506 | if (err) { |
| 1507 | *errp = err; | ||
| 1507 | return 0; | 1508 | return 0; |
| 1508 | } | 1509 | } |
| 1509 | 1510 | ||
| @@ -1713,7 +1714,7 @@ allocated: | |||
| 1713 | 1714 | ||
| 1714 | *errp = 0; | 1715 | *errp = 0; |
| 1715 | brelse(bitmap_bh); | 1716 | brelse(bitmap_bh); |
| 1716 | vfs_dq_free_block(inode, *count-num); | 1717 | dquot_free_block(inode, *count-num); |
| 1717 | *count = num; | 1718 | *count = num; |
| 1718 | return ret_block; | 1719 | return ret_block; |
| 1719 | 1720 | ||
| @@ -1728,7 +1729,7 @@ out: | |||
| 1728 | * Undo the block allocation | 1729 | * Undo the block allocation |
| 1729 | */ | 1730 | */ |
| 1730 | if (!performed_allocation) | 1731 | if (!performed_allocation) |
| 1731 | vfs_dq_free_block(inode, *count); | 1732 | dquot_free_block(inode, *count); |
| 1732 | brelse(bitmap_bh); | 1733 | brelse(bitmap_bh); |
| 1733 | return 0; | 1734 | return 0; |
| 1734 | } | 1735 | } |
diff --git a/fs/ext3/file.c b/fs/ext3/file.c index 388bbdfa0b4e..f55df0e61cbd 100644 --- a/fs/ext3/file.c +++ b/fs/ext3/file.c | |||
| @@ -21,6 +21,7 @@ | |||
| 21 | #include <linux/time.h> | 21 | #include <linux/time.h> |
| 22 | #include <linux/fs.h> | 22 | #include <linux/fs.h> |
| 23 | #include <linux/jbd.h> | 23 | #include <linux/jbd.h> |
| 24 | #include <linux/quotaops.h> | ||
| 24 | #include <linux/ext3_fs.h> | 25 | #include <linux/ext3_fs.h> |
| 25 | #include <linux/ext3_jbd.h> | 26 | #include <linux/ext3_jbd.h> |
| 26 | #include "xattr.h" | 27 | #include "xattr.h" |
| @@ -33,9 +34,9 @@ | |||
| 33 | */ | 34 | */ |
| 34 | static int ext3_release_file (struct inode * inode, struct file * filp) | 35 | static int ext3_release_file (struct inode * inode, struct file * filp) |
| 35 | { | 36 | { |
| 36 | if (EXT3_I(inode)->i_state & EXT3_STATE_FLUSH_ON_CLOSE) { | 37 | if (ext3_test_inode_state(inode, EXT3_STATE_FLUSH_ON_CLOSE)) { |
| 37 | filemap_flush(inode->i_mapping); | 38 | filemap_flush(inode->i_mapping); |
| 38 | EXT3_I(inode)->i_state &= ~EXT3_STATE_FLUSH_ON_CLOSE; | 39 | ext3_clear_inode_state(inode, EXT3_STATE_FLUSH_ON_CLOSE); |
| 39 | } | 40 | } |
| 40 | /* if we are the last writer on the inode, drop the block reservation */ | 41 | /* if we are the last writer on the inode, drop the block reservation */ |
| 41 | if ((filp->f_mode & FMODE_WRITE) && | 42 | if ((filp->f_mode & FMODE_WRITE) && |
| @@ -62,7 +63,7 @@ const struct file_operations ext3_file_operations = { | |||
| 62 | .compat_ioctl = ext3_compat_ioctl, | 63 | .compat_ioctl = ext3_compat_ioctl, |
| 63 | #endif | 64 | #endif |
| 64 | .mmap = generic_file_mmap, | 65 | .mmap = generic_file_mmap, |
| 65 | .open = generic_file_open, | 66 | .open = dquot_file_open, |
| 66 | .release = ext3_release_file, | 67 | .release = ext3_release_file, |
| 67 | .fsync = ext3_sync_file, | 68 | .fsync = ext3_sync_file, |
| 68 | .splice_read = generic_file_splice_read, | 69 | .splice_read = generic_file_splice_read, |
diff --git a/fs/ext3/ialloc.c b/fs/ext3/ialloc.c index b39991285136..ef9008b885b5 100644 --- a/fs/ext3/ialloc.c +++ b/fs/ext3/ialloc.c | |||
| @@ -123,10 +123,10 @@ void ext3_free_inode (handle_t *handle, struct inode * inode) | |||
| 123 | * Note: we must free any quota before locking the superblock, | 123 | * Note: we must free any quota before locking the superblock, |
| 124 | * as writing the quota to disk may need the lock as well. | 124 | * as writing the quota to disk may need the lock as well. |
| 125 | */ | 125 | */ |
| 126 | vfs_dq_init(inode); | 126 | dquot_initialize(inode); |
| 127 | ext3_xattr_delete_inode(handle, inode); | 127 | ext3_xattr_delete_inode(handle, inode); |
| 128 | vfs_dq_free_inode(inode); | 128 | dquot_free_inode(inode); |
| 129 | vfs_dq_drop(inode); | 129 | dquot_drop(inode); |
| 130 | 130 | ||
| 131 | is_directory = S_ISDIR(inode->i_mode); | 131 | is_directory = S_ISDIR(inode->i_mode); |
| 132 | 132 | ||
| @@ -588,10 +588,10 @@ got: | |||
| 588 | sizeof(struct ext3_inode) - EXT3_GOOD_OLD_INODE_SIZE : 0; | 588 | sizeof(struct ext3_inode) - EXT3_GOOD_OLD_INODE_SIZE : 0; |
| 589 | 589 | ||
| 590 | ret = inode; | 590 | ret = inode; |
| 591 | if (vfs_dq_alloc_inode(inode)) { | 591 | dquot_initialize(inode); |
| 592 | err = -EDQUOT; | 592 | err = dquot_alloc_inode(inode); |
| 593 | if (err) | ||
| 593 | goto fail_drop; | 594 | goto fail_drop; |
| 594 | } | ||
| 595 | 595 | ||
| 596 | err = ext3_init_acl(handle, inode, dir); | 596 | err = ext3_init_acl(handle, inode, dir); |
| 597 | if (err) | 597 | if (err) |
| @@ -619,10 +619,10 @@ really_out: | |||
| 619 | return ret; | 619 | return ret; |
| 620 | 620 | ||
| 621 | fail_free_drop: | 621 | fail_free_drop: |
| 622 | vfs_dq_free_inode(inode); | 622 | dquot_free_inode(inode); |
| 623 | 623 | ||
| 624 | fail_drop: | 624 | fail_drop: |
| 625 | vfs_dq_drop(inode); | 625 | dquot_drop(inode); |
| 626 | inode->i_flags |= S_NOQUOTA; | 626 | inode->i_flags |= S_NOQUOTA; |
| 627 | inode->i_nlink = 0; | 627 | inode->i_nlink = 0; |
| 628 | unlock_new_inode(inode); | 628 | unlock_new_inode(inode); |
diff --git a/fs/ext3/inode.c b/fs/ext3/inode.c index 455e6e6e5cb9..7f920b7263a4 100644 --- a/fs/ext3/inode.c +++ b/fs/ext3/inode.c | |||
| @@ -196,6 +196,9 @@ void ext3_delete_inode (struct inode * inode) | |||
| 196 | { | 196 | { |
| 197 | handle_t *handle; | 197 | handle_t *handle; |
| 198 | 198 | ||
| 199 | if (!is_bad_inode(inode)) | ||
| 200 | dquot_initialize(inode); | ||
| 201 | |||
| 199 | truncate_inode_pages(&inode->i_data, 0); | 202 | truncate_inode_pages(&inode->i_data, 0); |
| 200 | 203 | ||
| 201 | if (is_bad_inode(inode)) | 204 | if (is_bad_inode(inode)) |
| @@ -1378,7 +1381,7 @@ static int ext3_journalled_write_end(struct file *file, | |||
| 1378 | */ | 1381 | */ |
| 1379 | if (pos + len > inode->i_size && ext3_can_truncate(inode)) | 1382 | if (pos + len > inode->i_size && ext3_can_truncate(inode)) |
| 1380 | ext3_orphan_add(handle, inode); | 1383 | ext3_orphan_add(handle, inode); |
| 1381 | EXT3_I(inode)->i_state |= EXT3_STATE_JDATA; | 1384 | ext3_set_inode_state(inode, EXT3_STATE_JDATA); |
| 1382 | if (inode->i_size > EXT3_I(inode)->i_disksize) { | 1385 | if (inode->i_size > EXT3_I(inode)->i_disksize) { |
| 1383 | EXT3_I(inode)->i_disksize = inode->i_size; | 1386 | EXT3_I(inode)->i_disksize = inode->i_size; |
| 1384 | ret2 = ext3_mark_inode_dirty(handle, inode); | 1387 | ret2 = ext3_mark_inode_dirty(handle, inode); |
| @@ -1417,7 +1420,7 @@ static sector_t ext3_bmap(struct address_space *mapping, sector_t block) | |||
| 1417 | journal_t *journal; | 1420 | journal_t *journal; |
| 1418 | int err; | 1421 | int err; |
| 1419 | 1422 | ||
| 1420 | if (EXT3_I(inode)->i_state & EXT3_STATE_JDATA) { | 1423 | if (ext3_test_inode_state(inode, EXT3_STATE_JDATA)) { |
| 1421 | /* | 1424 | /* |
| 1422 | * This is a REALLY heavyweight approach, but the use of | 1425 | * This is a REALLY heavyweight approach, but the use of |
| 1423 | * bmap on dirty files is expected to be extremely rare: | 1426 | * bmap on dirty files is expected to be extremely rare: |
| @@ -1436,7 +1439,7 @@ static sector_t ext3_bmap(struct address_space *mapping, sector_t block) | |||
| 1436 | * everything they get. | 1439 | * everything they get. |
| 1437 | */ | 1440 | */ |
| 1438 | 1441 | ||
| 1439 | EXT3_I(inode)->i_state &= ~EXT3_STATE_JDATA; | 1442 | ext3_clear_inode_state(inode, EXT3_STATE_JDATA); |
| 1440 | journal = EXT3_JOURNAL(inode); | 1443 | journal = EXT3_JOURNAL(inode); |
| 1441 | journal_lock_updates(journal); | 1444 | journal_lock_updates(journal); |
| 1442 | err = journal_flush(journal); | 1445 | err = journal_flush(journal); |
| @@ -1528,6 +1531,7 @@ static int ext3_ordered_writepage(struct page *page, | |||
| 1528 | int err; | 1531 | int err; |
| 1529 | 1532 | ||
| 1530 | J_ASSERT(PageLocked(page)); | 1533 | J_ASSERT(PageLocked(page)); |
| 1534 | WARN_ON_ONCE(IS_RDONLY(inode)); | ||
| 1531 | 1535 | ||
| 1532 | /* | 1536 | /* |
| 1533 | * We give up here if we're reentered, because it might be for a | 1537 | * We give up here if we're reentered, because it might be for a |
| @@ -1600,6 +1604,9 @@ static int ext3_writeback_writepage(struct page *page, | |||
| 1600 | int ret = 0; | 1604 | int ret = 0; |
| 1601 | int err; | 1605 | int err; |
| 1602 | 1606 | ||
| 1607 | J_ASSERT(PageLocked(page)); | ||
| 1608 | WARN_ON_ONCE(IS_RDONLY(inode)); | ||
| 1609 | |||
| 1603 | if (ext3_journal_current_handle()) | 1610 | if (ext3_journal_current_handle()) |
| 1604 | goto out_fail; | 1611 | goto out_fail; |
| 1605 | 1612 | ||
| @@ -1642,6 +1649,9 @@ static int ext3_journalled_writepage(struct page *page, | |||
| 1642 | int ret = 0; | 1649 | int ret = 0; |
| 1643 | int err; | 1650 | int err; |
| 1644 | 1651 | ||
| 1652 | J_ASSERT(PageLocked(page)); | ||
| 1653 | WARN_ON_ONCE(IS_RDONLY(inode)); | ||
| 1654 | |||
| 1645 | if (ext3_journal_current_handle()) | 1655 | if (ext3_journal_current_handle()) |
| 1646 | goto no_write; | 1656 | goto no_write; |
| 1647 | 1657 | ||
| @@ -1670,7 +1680,7 @@ static int ext3_journalled_writepage(struct page *page, | |||
| 1670 | PAGE_CACHE_SIZE, NULL, write_end_fn); | 1680 | PAGE_CACHE_SIZE, NULL, write_end_fn); |
| 1671 | if (ret == 0) | 1681 | if (ret == 0) |
| 1672 | ret = err; | 1682 | ret = err; |
| 1673 | EXT3_I(inode)->i_state |= EXT3_STATE_JDATA; | 1683 | ext3_set_inode_state(inode, EXT3_STATE_JDATA); |
| 1674 | unlock_page(page); | 1684 | unlock_page(page); |
| 1675 | } else { | 1685 | } else { |
| 1676 | /* | 1686 | /* |
| @@ -1785,8 +1795,9 @@ retry: | |||
| 1785 | handle = ext3_journal_start(inode, 2); | 1795 | handle = ext3_journal_start(inode, 2); |
| 1786 | if (IS_ERR(handle)) { | 1796 | if (IS_ERR(handle)) { |
| 1787 | /* This is really bad luck. We've written the data | 1797 | /* This is really bad luck. We've written the data |
| 1788 | * but cannot extend i_size. Bail out and pretend | 1798 | * but cannot extend i_size. Truncate allocated blocks |
| 1789 | * the write failed... */ | 1799 | * and pretend the write failed... */ |
| 1800 | ext3_truncate(inode); | ||
| 1790 | ret = PTR_ERR(handle); | 1801 | ret = PTR_ERR(handle); |
| 1791 | goto out; | 1802 | goto out; |
| 1792 | } | 1803 | } |
| @@ -2402,7 +2413,7 @@ void ext3_truncate(struct inode *inode) | |||
| 2402 | goto out_notrans; | 2413 | goto out_notrans; |
| 2403 | 2414 | ||
| 2404 | if (inode->i_size == 0 && ext3_should_writeback_data(inode)) | 2415 | if (inode->i_size == 0 && ext3_should_writeback_data(inode)) |
| 2405 | ei->i_state |= EXT3_STATE_FLUSH_ON_CLOSE; | 2416 | ext3_set_inode_state(inode, EXT3_STATE_FLUSH_ON_CLOSE); |
| 2406 | 2417 | ||
| 2407 | /* | 2418 | /* |
| 2408 | * We have to lock the EOF page here, because lock_page() nests | 2419 | * We have to lock the EOF page here, because lock_page() nests |
| @@ -2721,7 +2732,7 @@ int ext3_get_inode_loc(struct inode *inode, struct ext3_iloc *iloc) | |||
| 2721 | { | 2732 | { |
| 2722 | /* We have all inode data except xattrs in memory here. */ | 2733 | /* We have all inode data except xattrs in memory here. */ |
| 2723 | return __ext3_get_inode_loc(inode, iloc, | 2734 | return __ext3_get_inode_loc(inode, iloc, |
| 2724 | !(EXT3_I(inode)->i_state & EXT3_STATE_XATTR)); | 2735 | !ext3_test_inode_state(inode, EXT3_STATE_XATTR)); |
| 2725 | } | 2736 | } |
| 2726 | 2737 | ||
| 2727 | void ext3_set_inode_flags(struct inode *inode) | 2738 | void ext3_set_inode_flags(struct inode *inode) |
| @@ -2893,7 +2904,7 @@ struct inode *ext3_iget(struct super_block *sb, unsigned long ino) | |||
| 2893 | EXT3_GOOD_OLD_INODE_SIZE + | 2904 | EXT3_GOOD_OLD_INODE_SIZE + |
| 2894 | ei->i_extra_isize; | 2905 | ei->i_extra_isize; |
| 2895 | if (*magic == cpu_to_le32(EXT3_XATTR_MAGIC)) | 2906 | if (*magic == cpu_to_le32(EXT3_XATTR_MAGIC)) |
| 2896 | ei->i_state |= EXT3_STATE_XATTR; | 2907 | ext3_set_inode_state(inode, EXT3_STATE_XATTR); |
| 2897 | } | 2908 | } |
| 2898 | } else | 2909 | } else |
| 2899 | ei->i_extra_isize = 0; | 2910 | ei->i_extra_isize = 0; |
| @@ -2955,7 +2966,7 @@ again: | |||
| 2955 | 2966 | ||
| 2956 | /* For fields not not tracking in the in-memory inode, | 2967 | /* For fields not not tracking in the in-memory inode, |
| 2957 | * initialise them to zero for new inodes. */ | 2968 | * initialise them to zero for new inodes. */ |
| 2958 | if (ei->i_state & EXT3_STATE_NEW) | 2969 | if (ext3_test_inode_state(inode, EXT3_STATE_NEW)) |
| 2959 | memset(raw_inode, 0, EXT3_SB(inode->i_sb)->s_inode_size); | 2970 | memset(raw_inode, 0, EXT3_SB(inode->i_sb)->s_inode_size); |
| 2960 | 2971 | ||
| 2961 | ext3_get_inode_flags(ei); | 2972 | ext3_get_inode_flags(ei); |
| @@ -3052,7 +3063,7 @@ again: | |||
| 3052 | rc = ext3_journal_dirty_metadata(handle, bh); | 3063 | rc = ext3_journal_dirty_metadata(handle, bh); |
| 3053 | if (!err) | 3064 | if (!err) |
| 3054 | err = rc; | 3065 | err = rc; |
| 3055 | ei->i_state &= ~EXT3_STATE_NEW; | 3066 | ext3_clear_inode_state(inode, EXT3_STATE_NEW); |
| 3056 | 3067 | ||
| 3057 | atomic_set(&ei->i_sync_tid, handle->h_transaction->t_tid); | 3068 | atomic_set(&ei->i_sync_tid, handle->h_transaction->t_tid); |
| 3058 | out_brelse: | 3069 | out_brelse: |
| @@ -3096,7 +3107,7 @@ out_brelse: | |||
| 3096 | * `stuff()' is running, and the new i_size will be lost. Plus the inode | 3107 | * `stuff()' is running, and the new i_size will be lost. Plus the inode |
| 3097 | * will no longer be on the superblock's dirty inode list. | 3108 | * will no longer be on the superblock's dirty inode list. |
| 3098 | */ | 3109 | */ |
| 3099 | int ext3_write_inode(struct inode *inode, int wait) | 3110 | int ext3_write_inode(struct inode *inode, struct writeback_control *wbc) |
| 3100 | { | 3111 | { |
| 3101 | if (current->flags & PF_MEMALLOC) | 3112 | if (current->flags & PF_MEMALLOC) |
| 3102 | return 0; | 3113 | return 0; |
| @@ -3107,7 +3118,7 @@ int ext3_write_inode(struct inode *inode, int wait) | |||
| 3107 | return -EIO; | 3118 | return -EIO; |
| 3108 | } | 3119 | } |
| 3109 | 3120 | ||
| 3110 | if (!wait) | 3121 | if (wbc->sync_mode != WB_SYNC_ALL) |
| 3111 | return 0; | 3122 | return 0; |
| 3112 | 3123 | ||
| 3113 | return ext3_force_commit(inode->i_sb); | 3124 | return ext3_force_commit(inode->i_sb); |
| @@ -3140,6 +3151,8 @@ int ext3_setattr(struct dentry *dentry, struct iattr *attr) | |||
| 3140 | if (error) | 3151 | if (error) |
| 3141 | return error; | 3152 | return error; |
| 3142 | 3153 | ||
| 3154 | if (ia_valid & ATTR_SIZE) | ||
| 3155 | dquot_initialize(inode); | ||
| 3143 | if ((ia_valid & ATTR_UID && attr->ia_uid != inode->i_uid) || | 3156 | if ((ia_valid & ATTR_UID && attr->ia_uid != inode->i_uid) || |
| 3144 | (ia_valid & ATTR_GID && attr->ia_gid != inode->i_gid)) { | 3157 | (ia_valid & ATTR_GID && attr->ia_gid != inode->i_gid)) { |
| 3145 | handle_t *handle; | 3158 | handle_t *handle; |
| @@ -3152,7 +3165,7 @@ int ext3_setattr(struct dentry *dentry, struct iattr *attr) | |||
| 3152 | error = PTR_ERR(handle); | 3165 | error = PTR_ERR(handle); |
| 3153 | goto err_out; | 3166 | goto err_out; |
| 3154 | } | 3167 | } |
| 3155 | error = vfs_dq_transfer(inode, attr) ? -EDQUOT : 0; | 3168 | error = dquot_transfer(inode, attr); |
| 3156 | if (error) { | 3169 | if (error) { |
| 3157 | ext3_journal_stop(handle); | 3170 | ext3_journal_stop(handle); |
| 3158 | return error; | 3171 | return error; |
| @@ -3237,7 +3250,7 @@ static int ext3_writepage_trans_blocks(struct inode *inode) | |||
| 3237 | ret = 2 * (bpp + indirects) + 2; | 3250 | ret = 2 * (bpp + indirects) + 2; |
| 3238 | 3251 | ||
| 3239 | #ifdef CONFIG_QUOTA | 3252 | #ifdef CONFIG_QUOTA |
| 3240 | /* We know that structure was already allocated during vfs_dq_init so | 3253 | /* We know that structure was already allocated during dquot_initialize so |
| 3241 | * we will be updating only the data blocks + inodes */ | 3254 | * we will be updating only the data blocks + inodes */ |
| 3242 | ret += EXT3_MAXQUOTAS_TRANS_BLOCKS(inode->i_sb); | 3255 | ret += EXT3_MAXQUOTAS_TRANS_BLOCKS(inode->i_sb); |
| 3243 | #endif | 3256 | #endif |
| @@ -3328,7 +3341,7 @@ int ext3_mark_inode_dirty(handle_t *handle, struct inode *inode) | |||
| 3328 | * i_size has been changed by generic_commit_write() and we thus need | 3341 | * i_size has been changed by generic_commit_write() and we thus need |
| 3329 | * to include the updated inode in the current transaction. | 3342 | * to include the updated inode in the current transaction. |
| 3330 | * | 3343 | * |
| 3331 | * Also, vfs_dq_alloc_space() will always dirty the inode when blocks | 3344 | * Also, dquot_alloc_space() will always dirty the inode when blocks |
| 3332 | * are allocated to the file. | 3345 | * are allocated to the file. |
| 3333 | * | 3346 | * |
| 3334 | * If the inode is marked synchronous, we don't honour that here - doing | 3347 | * If the inode is marked synchronous, we don't honour that here - doing |
diff --git a/fs/ext3/namei.c b/fs/ext3/namei.c index 7b0e44f7d66f..ee184084ca42 100644 --- a/fs/ext3/namei.c +++ b/fs/ext3/namei.c | |||
| @@ -1696,6 +1696,8 @@ static int ext3_create (struct inode * dir, struct dentry * dentry, int mode, | |||
| 1696 | struct inode * inode; | 1696 | struct inode * inode; |
| 1697 | int err, retries = 0; | 1697 | int err, retries = 0; |
| 1698 | 1698 | ||
| 1699 | dquot_initialize(dir); | ||
| 1700 | |||
| 1699 | retry: | 1701 | retry: |
| 1700 | handle = ext3_journal_start(dir, EXT3_DATA_TRANS_BLOCKS(dir->i_sb) + | 1702 | handle = ext3_journal_start(dir, EXT3_DATA_TRANS_BLOCKS(dir->i_sb) + |
| 1701 | EXT3_INDEX_EXTRA_TRANS_BLOCKS + 3 + | 1703 | EXT3_INDEX_EXTRA_TRANS_BLOCKS + 3 + |
| @@ -1730,6 +1732,8 @@ static int ext3_mknod (struct inode * dir, struct dentry *dentry, | |||
| 1730 | if (!new_valid_dev(rdev)) | 1732 | if (!new_valid_dev(rdev)) |
| 1731 | return -EINVAL; | 1733 | return -EINVAL; |
| 1732 | 1734 | ||
| 1735 | dquot_initialize(dir); | ||
| 1736 | |||
| 1733 | retry: | 1737 | retry: |
| 1734 | handle = ext3_journal_start(dir, EXT3_DATA_TRANS_BLOCKS(dir->i_sb) + | 1738 | handle = ext3_journal_start(dir, EXT3_DATA_TRANS_BLOCKS(dir->i_sb) + |
| 1735 | EXT3_INDEX_EXTRA_TRANS_BLOCKS + 3 + | 1739 | EXT3_INDEX_EXTRA_TRANS_BLOCKS + 3 + |
| @@ -1766,6 +1770,8 @@ static int ext3_mkdir(struct inode * dir, struct dentry * dentry, int mode) | |||
| 1766 | if (dir->i_nlink >= EXT3_LINK_MAX) | 1770 | if (dir->i_nlink >= EXT3_LINK_MAX) |
| 1767 | return -EMLINK; | 1771 | return -EMLINK; |
| 1768 | 1772 | ||
| 1773 | dquot_initialize(dir); | ||
| 1774 | |||
| 1769 | retry: | 1775 | retry: |
| 1770 | handle = ext3_journal_start(dir, EXT3_DATA_TRANS_BLOCKS(dir->i_sb) + | 1776 | handle = ext3_journal_start(dir, EXT3_DATA_TRANS_BLOCKS(dir->i_sb) + |
| 1771 | EXT3_INDEX_EXTRA_TRANS_BLOCKS + 3 + | 1777 | EXT3_INDEX_EXTRA_TRANS_BLOCKS + 3 + |
| @@ -2060,7 +2066,9 @@ static int ext3_rmdir (struct inode * dir, struct dentry *dentry) | |||
| 2060 | 2066 | ||
| 2061 | /* Initialize quotas before so that eventual writes go in | 2067 | /* Initialize quotas before so that eventual writes go in |
| 2062 | * separate transaction */ | 2068 | * separate transaction */ |
| 2063 | vfs_dq_init(dentry->d_inode); | 2069 | dquot_initialize(dir); |
| 2070 | dquot_initialize(dentry->d_inode); | ||
| 2071 | |||
| 2064 | handle = ext3_journal_start(dir, EXT3_DELETE_TRANS_BLOCKS(dir->i_sb)); | 2072 | handle = ext3_journal_start(dir, EXT3_DELETE_TRANS_BLOCKS(dir->i_sb)); |
| 2065 | if (IS_ERR(handle)) | 2073 | if (IS_ERR(handle)) |
| 2066 | return PTR_ERR(handle); | 2074 | return PTR_ERR(handle); |
| @@ -2119,7 +2127,9 @@ static int ext3_unlink(struct inode * dir, struct dentry *dentry) | |||
| 2119 | 2127 | ||
| 2120 | /* Initialize quotas before so that eventual writes go | 2128 | /* Initialize quotas before so that eventual writes go |
| 2121 | * in separate transaction */ | 2129 | * in separate transaction */ |
| 2122 | vfs_dq_init(dentry->d_inode); | 2130 | dquot_initialize(dir); |
| 2131 | dquot_initialize(dentry->d_inode); | ||
| 2132 | |||
| 2123 | handle = ext3_journal_start(dir, EXT3_DELETE_TRANS_BLOCKS(dir->i_sb)); | 2133 | handle = ext3_journal_start(dir, EXT3_DELETE_TRANS_BLOCKS(dir->i_sb)); |
| 2124 | if (IS_ERR(handle)) | 2134 | if (IS_ERR(handle)) |
| 2125 | return PTR_ERR(handle); | 2135 | return PTR_ERR(handle); |
| @@ -2174,6 +2184,8 @@ static int ext3_symlink (struct inode * dir, | |||
| 2174 | if (l > dir->i_sb->s_blocksize) | 2184 | if (l > dir->i_sb->s_blocksize) |
| 2175 | return -ENAMETOOLONG; | 2185 | return -ENAMETOOLONG; |
| 2176 | 2186 | ||
| 2187 | dquot_initialize(dir); | ||
| 2188 | |||
| 2177 | retry: | 2189 | retry: |
| 2178 | handle = ext3_journal_start(dir, EXT3_DATA_TRANS_BLOCKS(dir->i_sb) + | 2190 | handle = ext3_journal_start(dir, EXT3_DATA_TRANS_BLOCKS(dir->i_sb) + |
| 2179 | EXT3_INDEX_EXTRA_TRANS_BLOCKS + 5 + | 2191 | EXT3_INDEX_EXTRA_TRANS_BLOCKS + 5 + |
| @@ -2228,6 +2240,9 @@ static int ext3_link (struct dentry * old_dentry, | |||
| 2228 | 2240 | ||
| 2229 | if (inode->i_nlink >= EXT3_LINK_MAX) | 2241 | if (inode->i_nlink >= EXT3_LINK_MAX) |
| 2230 | return -EMLINK; | 2242 | return -EMLINK; |
| 2243 | |||
| 2244 | dquot_initialize(dir); | ||
| 2245 | |||
| 2231 | /* | 2246 | /* |
| 2232 | * Return -ENOENT if we've raced with unlink and i_nlink is 0. Doing | 2247 | * Return -ENOENT if we've raced with unlink and i_nlink is 0. Doing |
| 2233 | * otherwise has the potential to corrupt the orphan inode list. | 2248 | * otherwise has the potential to corrupt the orphan inode list. |
| @@ -2278,12 +2293,15 @@ static int ext3_rename (struct inode * old_dir, struct dentry *old_dentry, | |||
| 2278 | struct ext3_dir_entry_2 * old_de, * new_de; | 2293 | struct ext3_dir_entry_2 * old_de, * new_de; |
| 2279 | int retval, flush_file = 0; | 2294 | int retval, flush_file = 0; |
| 2280 | 2295 | ||
| 2296 | dquot_initialize(old_dir); | ||
| 2297 | dquot_initialize(new_dir); | ||
| 2298 | |||
| 2281 | old_bh = new_bh = dir_bh = NULL; | 2299 | old_bh = new_bh = dir_bh = NULL; |
| 2282 | 2300 | ||
| 2283 | /* Initialize quotas before so that eventual writes go | 2301 | /* Initialize quotas before so that eventual writes go |
| 2284 | * in separate transaction */ | 2302 | * in separate transaction */ |
| 2285 | if (new_dentry->d_inode) | 2303 | if (new_dentry->d_inode) |
| 2286 | vfs_dq_init(new_dentry->d_inode); | 2304 | dquot_initialize(new_dentry->d_inode); |
| 2287 | handle = ext3_journal_start(old_dir, 2 * | 2305 | handle = ext3_journal_start(old_dir, 2 * |
| 2288 | EXT3_DATA_TRANS_BLOCKS(old_dir->i_sb) + | 2306 | EXT3_DATA_TRANS_BLOCKS(old_dir->i_sb) + |
| 2289 | EXT3_INDEX_EXTRA_TRANS_BLOCKS + 2); | 2307 | EXT3_INDEX_EXTRA_TRANS_BLOCKS + 2); |
diff --git a/fs/ext3/super.c b/fs/ext3/super.c index afa2b569da10..e844accbf55d 100644 --- a/fs/ext3/super.c +++ b/fs/ext3/super.c | |||
| @@ -181,7 +181,7 @@ static void ext3_handle_error(struct super_block *sb) | |||
| 181 | if (!test_opt (sb, ERRORS_CONT)) { | 181 | if (!test_opt (sb, ERRORS_CONT)) { |
| 182 | journal_t *journal = EXT3_SB(sb)->s_journal; | 182 | journal_t *journal = EXT3_SB(sb)->s_journal; |
| 183 | 183 | ||
| 184 | EXT3_SB(sb)->s_mount_opt |= EXT3_MOUNT_ABORT; | 184 | set_opt(EXT3_SB(sb)->s_mount_opt, ABORT); |
| 185 | if (journal) | 185 | if (journal) |
| 186 | journal_abort(journal, -EIO); | 186 | journal_abort(journal, -EIO); |
| 187 | } | 187 | } |
| @@ -296,7 +296,7 @@ void ext3_abort (struct super_block * sb, const char * function, | |||
| 296 | "error: remounting filesystem read-only"); | 296 | "error: remounting filesystem read-only"); |
| 297 | EXT3_SB(sb)->s_mount_state |= EXT3_ERROR_FS; | 297 | EXT3_SB(sb)->s_mount_state |= EXT3_ERROR_FS; |
| 298 | sb->s_flags |= MS_RDONLY; | 298 | sb->s_flags |= MS_RDONLY; |
| 299 | EXT3_SB(sb)->s_mount_opt |= EXT3_MOUNT_ABORT; | 299 | set_opt(EXT3_SB(sb)->s_mount_opt, ABORT); |
| 300 | if (EXT3_SB(sb)->s_journal) | 300 | if (EXT3_SB(sb)->s_journal) |
| 301 | journal_abort(EXT3_SB(sb)->s_journal, -EIO); | 301 | journal_abort(EXT3_SB(sb)->s_journal, -EIO); |
| 302 | } | 302 | } |
| @@ -528,6 +528,8 @@ static void destroy_inodecache(void) | |||
| 528 | static void ext3_clear_inode(struct inode *inode) | 528 | static void ext3_clear_inode(struct inode *inode) |
| 529 | { | 529 | { |
| 530 | struct ext3_block_alloc_info *rsv = EXT3_I(inode)->i_block_alloc_info; | 530 | struct ext3_block_alloc_info *rsv = EXT3_I(inode)->i_block_alloc_info; |
| 531 | |||
| 532 | dquot_drop(inode); | ||
| 531 | ext3_discard_reservation(inode); | 533 | ext3_discard_reservation(inode); |
| 532 | EXT3_I(inode)->i_block_alloc_info = NULL; | 534 | EXT3_I(inode)->i_block_alloc_info = NULL; |
| 533 | if (unlikely(rsv)) | 535 | if (unlikely(rsv)) |
| @@ -562,10 +564,10 @@ static inline void ext3_show_quota_options(struct seq_file *seq, struct super_bl | |||
| 562 | if (sbi->s_qf_names[GRPQUOTA]) | 564 | if (sbi->s_qf_names[GRPQUOTA]) |
| 563 | seq_printf(seq, ",grpjquota=%s", sbi->s_qf_names[GRPQUOTA]); | 565 | seq_printf(seq, ",grpjquota=%s", sbi->s_qf_names[GRPQUOTA]); |
| 564 | 566 | ||
| 565 | if (sbi->s_mount_opt & EXT3_MOUNT_USRQUOTA) | 567 | if (test_opt(sb, USRQUOTA)) |
| 566 | seq_puts(seq, ",usrquota"); | 568 | seq_puts(seq, ",usrquota"); |
| 567 | 569 | ||
| 568 | if (sbi->s_mount_opt & EXT3_MOUNT_GRPQUOTA) | 570 | if (test_opt(sb, GRPQUOTA)) |
| 569 | seq_puts(seq, ",grpquota"); | 571 | seq_puts(seq, ",grpquota"); |
| 570 | #endif | 572 | #endif |
| 571 | } | 573 | } |
| @@ -656,8 +658,7 @@ static int ext3_show_options(struct seq_file *seq, struct vfsmount *vfs) | |||
| 656 | if (test_opt(sb, NOBH)) | 658 | if (test_opt(sb, NOBH)) |
| 657 | seq_puts(seq, ",nobh"); | 659 | seq_puts(seq, ",nobh"); |
| 658 | 660 | ||
| 659 | seq_printf(seq, ",data=%s", data_mode_string(sbi->s_mount_opt & | 661 | seq_printf(seq, ",data=%s", data_mode_string(test_opt(sb, DATA_FLAGS))); |
| 660 | EXT3_MOUNT_DATA_FLAGS)); | ||
| 661 | if (test_opt(sb, DATA_ERR_ABORT)) | 662 | if (test_opt(sb, DATA_ERR_ABORT)) |
| 662 | seq_puts(seq, ",data_err=abort"); | 663 | seq_puts(seq, ",data_err=abort"); |
| 663 | 664 | ||
| @@ -751,13 +752,6 @@ static ssize_t ext3_quota_write(struct super_block *sb, int type, | |||
| 751 | const char *data, size_t len, loff_t off); | 752 | const char *data, size_t len, loff_t off); |
| 752 | 753 | ||
| 753 | static const struct dquot_operations ext3_quota_operations = { | 754 | static const struct dquot_operations ext3_quota_operations = { |
| 754 | .initialize = dquot_initialize, | ||
| 755 | .drop = dquot_drop, | ||
| 756 | .alloc_space = dquot_alloc_space, | ||
| 757 | .alloc_inode = dquot_alloc_inode, | ||
| 758 | .free_space = dquot_free_space, | ||
| 759 | .free_inode = dquot_free_inode, | ||
| 760 | .transfer = dquot_transfer, | ||
| 761 | .write_dquot = ext3_write_dquot, | 755 | .write_dquot = ext3_write_dquot, |
| 762 | .acquire_dquot = ext3_acquire_dquot, | 756 | .acquire_dquot = ext3_acquire_dquot, |
| 763 | .release_dquot = ext3_release_dquot, | 757 | .release_dquot = ext3_release_dquot, |
| @@ -896,6 +890,63 @@ static ext3_fsblk_t get_sb_block(void **data, struct super_block *sb) | |||
| 896 | return sb_block; | 890 | return sb_block; |
| 897 | } | 891 | } |
| 898 | 892 | ||
| 893 | #ifdef CONFIG_QUOTA | ||
| 894 | static int set_qf_name(struct super_block *sb, int qtype, substring_t *args) | ||
| 895 | { | ||
| 896 | struct ext3_sb_info *sbi = EXT3_SB(sb); | ||
| 897 | char *qname; | ||
| 898 | |||
| 899 | if (sb_any_quota_loaded(sb) && | ||
| 900 | !sbi->s_qf_names[qtype]) { | ||
| 901 | ext3_msg(sb, KERN_ERR, | ||
| 902 | "Cannot change journaled " | ||
| 903 | "quota options when quota turned on"); | ||
| 904 | return 0; | ||
| 905 | } | ||
| 906 | qname = match_strdup(args); | ||
| 907 | if (!qname) { | ||
| 908 | ext3_msg(sb, KERN_ERR, | ||
| 909 | "Not enough memory for storing quotafile name"); | ||
| 910 | return 0; | ||
| 911 | } | ||
| 912 | if (sbi->s_qf_names[qtype] && | ||
| 913 | strcmp(sbi->s_qf_names[qtype], qname)) { | ||
| 914 | ext3_msg(sb, KERN_ERR, | ||
| 915 | "%s quota file already specified", QTYPE2NAME(qtype)); | ||
| 916 | kfree(qname); | ||
| 917 | return 0; | ||
| 918 | } | ||
| 919 | sbi->s_qf_names[qtype] = qname; | ||
| 920 | if (strchr(sbi->s_qf_names[qtype], '/')) { | ||
| 921 | ext3_msg(sb, KERN_ERR, | ||
| 922 | "quotafile must be on filesystem root"); | ||
| 923 | kfree(sbi->s_qf_names[qtype]); | ||
| 924 | sbi->s_qf_names[qtype] = NULL; | ||
| 925 | return 0; | ||
| 926 | } | ||
| 927 | set_opt(sbi->s_mount_opt, QUOTA); | ||
| 928 | return 1; | ||
| 929 | } | ||
| 930 | |||
| 931 | static int clear_qf_name(struct super_block *sb, int qtype) { | ||
| 932 | |||
| 933 | struct ext3_sb_info *sbi = EXT3_SB(sb); | ||
| 934 | |||
| 935 | if (sb_any_quota_loaded(sb) && | ||
| 936 | sbi->s_qf_names[qtype]) { | ||
| 937 | ext3_msg(sb, KERN_ERR, "Cannot change journaled quota options" | ||
| 938 | " when quota turned on"); | ||
| 939 | return 0; | ||
| 940 | } | ||
| 941 | /* | ||
| 942 | * The space will be released later when all options are confirmed | ||
| 943 | * to be correct | ||
| 944 | */ | ||
| 945 | sbi->s_qf_names[qtype] = NULL; | ||
| 946 | return 1; | ||
| 947 | } | ||
| 948 | #endif | ||
| 949 | |||
| 899 | static int parse_options (char *options, struct super_block *sb, | 950 | static int parse_options (char *options, struct super_block *sb, |
| 900 | unsigned int *inum, unsigned long *journal_devnum, | 951 | unsigned int *inum, unsigned long *journal_devnum, |
| 901 | ext3_fsblk_t *n_blocks_count, int is_remount) | 952 | ext3_fsblk_t *n_blocks_count, int is_remount) |
| @@ -906,8 +957,7 @@ static int parse_options (char *options, struct super_block *sb, | |||
| 906 | int data_opt = 0; | 957 | int data_opt = 0; |
| 907 | int option; | 958 | int option; |
| 908 | #ifdef CONFIG_QUOTA | 959 | #ifdef CONFIG_QUOTA |
| 909 | int qtype, qfmt; | 960 | int qfmt; |
| 910 | char *qname; | ||
| 911 | #endif | 961 | #endif |
| 912 | 962 | ||
| 913 | if (!options) | 963 | if (!options) |
| @@ -1065,20 +1115,19 @@ static int parse_options (char *options, struct super_block *sb, | |||
| 1065 | data_opt = EXT3_MOUNT_WRITEBACK_DATA; | 1115 | data_opt = EXT3_MOUNT_WRITEBACK_DATA; |
| 1066 | datacheck: | 1116 | datacheck: |
| 1067 | if (is_remount) { | 1117 | if (is_remount) { |
| 1068 | if ((sbi->s_mount_opt & EXT3_MOUNT_DATA_FLAGS) | 1118 | if (test_opt(sb, DATA_FLAGS) == data_opt) |
| 1069 | == data_opt) | ||
| 1070 | break; | 1119 | break; |
| 1071 | ext3_msg(sb, KERN_ERR, | 1120 | ext3_msg(sb, KERN_ERR, |
| 1072 | "error: cannot change " | 1121 | "error: cannot change " |
| 1073 | "data mode on remount. The filesystem " | 1122 | "data mode on remount. The filesystem " |
| 1074 | "is mounted in data=%s mode and you " | 1123 | "is mounted in data=%s mode and you " |
| 1075 | "try to remount it in data=%s mode.", | 1124 | "try to remount it in data=%s mode.", |
| 1076 | data_mode_string(sbi->s_mount_opt & | 1125 | data_mode_string(test_opt(sb, |
| 1077 | EXT3_MOUNT_DATA_FLAGS), | 1126 | DATA_FLAGS)), |
| 1078 | data_mode_string(data_opt)); | 1127 | data_mode_string(data_opt)); |
| 1079 | return 0; | 1128 | return 0; |
| 1080 | } else { | 1129 | } else { |
| 1081 | sbi->s_mount_opt &= ~EXT3_MOUNT_DATA_FLAGS; | 1130 | clear_opt(sbi->s_mount_opt, DATA_FLAGS); |
| 1082 | sbi->s_mount_opt |= data_opt; | 1131 | sbi->s_mount_opt |= data_opt; |
| 1083 | } | 1132 | } |
| 1084 | break; | 1133 | break; |
| @@ -1090,62 +1139,20 @@ static int parse_options (char *options, struct super_block *sb, | |||
| 1090 | break; | 1139 | break; |
| 1091 | #ifdef CONFIG_QUOTA | 1140 | #ifdef CONFIG_QUOTA |
| 1092 | case Opt_usrjquota: | 1141 | case Opt_usrjquota: |
| 1093 | qtype = USRQUOTA; | 1142 | if (!set_qf_name(sb, USRQUOTA, &args[0])) |
| 1094 | goto set_qf_name; | ||
| 1095 | case Opt_grpjquota: | ||
| 1096 | qtype = GRPQUOTA; | ||
| 1097 | set_qf_name: | ||
| 1098 | if (sb_any_quota_loaded(sb) && | ||
| 1099 | !sbi->s_qf_names[qtype]) { | ||
| 1100 | ext3_msg(sb, KERN_ERR, | ||
| 1101 | "error: cannot change journaled " | ||
| 1102 | "quota options when quota turned on."); | ||
| 1103 | return 0; | ||
| 1104 | } | ||
| 1105 | qname = match_strdup(&args[0]); | ||
| 1106 | if (!qname) { | ||
| 1107 | ext3_msg(sb, KERN_ERR, | ||
| 1108 | "error: not enough memory for " | ||
| 1109 | "storing quotafile name."); | ||
| 1110 | return 0; | 1143 | return 0; |
| 1111 | } | 1144 | break; |
| 1112 | if (sbi->s_qf_names[qtype] && | 1145 | case Opt_grpjquota: |
| 1113 | strcmp(sbi->s_qf_names[qtype], qname)) { | 1146 | if (!set_qf_name(sb, GRPQUOTA, &args[0])) |
| 1114 | ext3_msg(sb, KERN_ERR, | ||
| 1115 | "error: %s quota file already " | ||
| 1116 | "specified.", QTYPE2NAME(qtype)); | ||
| 1117 | kfree(qname); | ||
| 1118 | return 0; | ||
| 1119 | } | ||
| 1120 | sbi->s_qf_names[qtype] = qname; | ||
| 1121 | if (strchr(sbi->s_qf_names[qtype], '/')) { | ||
| 1122 | ext3_msg(sb, KERN_ERR, | ||
| 1123 | "error: quotafile must be on " | ||
| 1124 | "filesystem root."); | ||
| 1125 | kfree(sbi->s_qf_names[qtype]); | ||
| 1126 | sbi->s_qf_names[qtype] = NULL; | ||
| 1127 | return 0; | 1147 | return 0; |
| 1128 | } | ||
| 1129 | set_opt(sbi->s_mount_opt, QUOTA); | ||
| 1130 | break; | 1148 | break; |
| 1131 | case Opt_offusrjquota: | 1149 | case Opt_offusrjquota: |
| 1132 | qtype = USRQUOTA; | 1150 | if (!clear_qf_name(sb, USRQUOTA)) |
| 1133 | goto clear_qf_name; | 1151 | return 0; |
| 1152 | break; | ||
| 1134 | case Opt_offgrpjquota: | 1153 | case Opt_offgrpjquota: |
| 1135 | qtype = GRPQUOTA; | 1154 | if (!clear_qf_name(sb, GRPQUOTA)) |
| 1136 | clear_qf_name: | ||
| 1137 | if (sb_any_quota_loaded(sb) && | ||
| 1138 | sbi->s_qf_names[qtype]) { | ||
| 1139 | ext3_msg(sb, KERN_ERR, "error: cannot change " | ||
| 1140 | "journaled quota options when " | ||
| 1141 | "quota turned on."); | ||
| 1142 | return 0; | 1155 | return 0; |
| 1143 | } | ||
| 1144 | /* | ||
| 1145 | * The space will be released later when all options | ||
| 1146 | * are confirmed to be correct | ||
| 1147 | */ | ||
| 1148 | sbi->s_qf_names[qtype] = NULL; | ||
| 1149 | break; | 1156 | break; |
| 1150 | case Opt_jqfmt_vfsold: | 1157 | case Opt_jqfmt_vfsold: |
| 1151 | qfmt = QFMT_VFS_OLD; | 1158 | qfmt = QFMT_VFS_OLD; |
| @@ -1244,18 +1251,12 @@ set_qf_format: | |||
| 1244 | } | 1251 | } |
| 1245 | #ifdef CONFIG_QUOTA | 1252 | #ifdef CONFIG_QUOTA |
| 1246 | if (sbi->s_qf_names[USRQUOTA] || sbi->s_qf_names[GRPQUOTA]) { | 1253 | if (sbi->s_qf_names[USRQUOTA] || sbi->s_qf_names[GRPQUOTA]) { |
| 1247 | if ((sbi->s_mount_opt & EXT3_MOUNT_USRQUOTA) && | 1254 | if (test_opt(sb, USRQUOTA) && sbi->s_qf_names[USRQUOTA]) |
| 1248 | sbi->s_qf_names[USRQUOTA]) | ||
| 1249 | clear_opt(sbi->s_mount_opt, USRQUOTA); | 1255 | clear_opt(sbi->s_mount_opt, USRQUOTA); |
| 1250 | 1256 | if (test_opt(sb, GRPQUOTA) && sbi->s_qf_names[GRPQUOTA]) | |
| 1251 | if ((sbi->s_mount_opt & EXT3_MOUNT_GRPQUOTA) && | ||
| 1252 | sbi->s_qf_names[GRPQUOTA]) | ||
| 1253 | clear_opt(sbi->s_mount_opt, GRPQUOTA); | 1257 | clear_opt(sbi->s_mount_opt, GRPQUOTA); |
| 1254 | 1258 | ||
| 1255 | if ((sbi->s_qf_names[USRQUOTA] && | 1259 | if (test_opt(sb, GRPQUOTA) || test_opt(sb, USRQUOTA)) { |
| 1256 | (sbi->s_mount_opt & EXT3_MOUNT_GRPQUOTA)) || | ||
| 1257 | (sbi->s_qf_names[GRPQUOTA] && | ||
| 1258 | (sbi->s_mount_opt & EXT3_MOUNT_USRQUOTA))) { | ||
| 1259 | ext3_msg(sb, KERN_ERR, "error: old and new quota " | 1260 | ext3_msg(sb, KERN_ERR, "error: old and new quota " |
| 1260 | "format mixing."); | 1261 | "format mixing."); |
| 1261 | return 0; | 1262 | return 0; |
| @@ -1478,7 +1479,7 @@ static void ext3_orphan_cleanup (struct super_block * sb, | |||
| 1478 | } | 1479 | } |
| 1479 | 1480 | ||
| 1480 | list_add(&EXT3_I(inode)->i_orphan, &EXT3_SB(sb)->s_orphan); | 1481 | list_add(&EXT3_I(inode)->i_orphan, &EXT3_SB(sb)->s_orphan); |
| 1481 | vfs_dq_init(inode); | 1482 | dquot_initialize(inode); |
| 1482 | if (inode->i_nlink) { | 1483 | if (inode->i_nlink) { |
| 1483 | printk(KERN_DEBUG | 1484 | printk(KERN_DEBUG |
| 1484 | "%s: truncating inode %lu to %Ld bytes\n", | 1485 | "%s: truncating inode %lu to %Ld bytes\n", |
| @@ -1671,11 +1672,11 @@ static int ext3_fill_super (struct super_block *sb, void *data, int silent) | |||
| 1671 | set_opt(sbi->s_mount_opt, POSIX_ACL); | 1672 | set_opt(sbi->s_mount_opt, POSIX_ACL); |
| 1672 | #endif | 1673 | #endif |
| 1673 | if ((def_mount_opts & EXT3_DEFM_JMODE) == EXT3_DEFM_JMODE_DATA) | 1674 | if ((def_mount_opts & EXT3_DEFM_JMODE) == EXT3_DEFM_JMODE_DATA) |
| 1674 | sbi->s_mount_opt |= EXT3_MOUNT_JOURNAL_DATA; | 1675 | set_opt(sbi->s_mount_opt, JOURNAL_DATA); |
| 1675 | else if ((def_mount_opts & EXT3_DEFM_JMODE) == EXT3_DEFM_JMODE_ORDERED) | 1676 | else if ((def_mount_opts & EXT3_DEFM_JMODE) == EXT3_DEFM_JMODE_ORDERED) |
| 1676 | sbi->s_mount_opt |= EXT3_MOUNT_ORDERED_DATA; | 1677 | set_opt(sbi->s_mount_opt, ORDERED_DATA); |
| 1677 | else if ((def_mount_opts & EXT3_DEFM_JMODE) == EXT3_DEFM_JMODE_WBACK) | 1678 | else if ((def_mount_opts & EXT3_DEFM_JMODE) == EXT3_DEFM_JMODE_WBACK) |
| 1678 | sbi->s_mount_opt |= EXT3_MOUNT_WRITEBACK_DATA; | 1679 | set_opt(sbi->s_mount_opt, WRITEBACK_DATA); |
| 1679 | 1680 | ||
| 1680 | if (le16_to_cpu(sbi->s_es->s_errors) == EXT3_ERRORS_PANIC) | 1681 | if (le16_to_cpu(sbi->s_es->s_errors) == EXT3_ERRORS_PANIC) |
| 1681 | set_opt(sbi->s_mount_opt, ERRORS_PANIC); | 1682 | set_opt(sbi->s_mount_opt, ERRORS_PANIC); |
| @@ -1694,7 +1695,7 @@ static int ext3_fill_super (struct super_block *sb, void *data, int silent) | |||
| 1694 | goto failed_mount; | 1695 | goto failed_mount; |
| 1695 | 1696 | ||
| 1696 | sb->s_flags = (sb->s_flags & ~MS_POSIXACL) | | 1697 | sb->s_flags = (sb->s_flags & ~MS_POSIXACL) | |
| 1697 | ((sbi->s_mount_opt & EXT3_MOUNT_POSIX_ACL) ? MS_POSIXACL : 0); | 1698 | (test_opt(sb, POSIX_ACL) ? MS_POSIXACL : 0); |
| 1698 | 1699 | ||
| 1699 | if (le32_to_cpu(es->s_rev_level) == EXT3_GOOD_OLD_REV && | 1700 | if (le32_to_cpu(es->s_rev_level) == EXT3_GOOD_OLD_REV && |
| 1700 | (EXT3_HAS_COMPAT_FEATURE(sb, ~0U) || | 1701 | (EXT3_HAS_COMPAT_FEATURE(sb, ~0U) || |
| @@ -2561,11 +2562,11 @@ static int ext3_remount (struct super_block * sb, int * flags, char * data) | |||
| 2561 | goto restore_opts; | 2562 | goto restore_opts; |
| 2562 | } | 2563 | } |
| 2563 | 2564 | ||
| 2564 | if (sbi->s_mount_opt & EXT3_MOUNT_ABORT) | 2565 | if (test_opt(sb, ABORT)) |
| 2565 | ext3_abort(sb, __func__, "Abort forced by user"); | 2566 | ext3_abort(sb, __func__, "Abort forced by user"); |
| 2566 | 2567 | ||
| 2567 | sb->s_flags = (sb->s_flags & ~MS_POSIXACL) | | 2568 | sb->s_flags = (sb->s_flags & ~MS_POSIXACL) | |
| 2568 | ((sbi->s_mount_opt & EXT3_MOUNT_POSIX_ACL) ? MS_POSIXACL : 0); | 2569 | (test_opt(sb, POSIX_ACL) ? MS_POSIXACL : 0); |
| 2569 | 2570 | ||
| 2570 | es = sbi->s_es; | 2571 | es = sbi->s_es; |
| 2571 | 2572 | ||
| @@ -2573,7 +2574,7 @@ static int ext3_remount (struct super_block * sb, int * flags, char * data) | |||
| 2573 | 2574 | ||
| 2574 | if ((*flags & MS_RDONLY) != (sb->s_flags & MS_RDONLY) || | 2575 | if ((*flags & MS_RDONLY) != (sb->s_flags & MS_RDONLY) || |
| 2575 | n_blocks_count > le32_to_cpu(es->s_blocks_count)) { | 2576 | n_blocks_count > le32_to_cpu(es->s_blocks_count)) { |
| 2576 | if (sbi->s_mount_opt & EXT3_MOUNT_ABORT) { | 2577 | if (test_opt(sb, ABORT)) { |
| 2577 | err = -EROFS; | 2578 | err = -EROFS; |
| 2578 | goto restore_opts; | 2579 | goto restore_opts; |
| 2579 | } | 2580 | } |
| @@ -2734,7 +2735,7 @@ static int ext3_statfs (struct dentry * dentry, struct kstatfs * buf) | |||
| 2734 | * Process 1 Process 2 | 2735 | * Process 1 Process 2 |
| 2735 | * ext3_create() quota_sync() | 2736 | * ext3_create() quota_sync() |
| 2736 | * journal_start() write_dquot() | 2737 | * journal_start() write_dquot() |
| 2737 | * vfs_dq_init() down(dqio_mutex) | 2738 | * dquot_initialize() down(dqio_mutex) |
| 2738 | * down(dqio_mutex) journal_start() | 2739 | * down(dqio_mutex) journal_start() |
| 2739 | * | 2740 | * |
| 2740 | */ | 2741 | */ |
| @@ -2942,9 +2943,7 @@ static ssize_t ext3_quota_write(struct super_block *sb, int type, | |||
| 2942 | sector_t blk = off >> EXT3_BLOCK_SIZE_BITS(sb); | 2943 | sector_t blk = off >> EXT3_BLOCK_SIZE_BITS(sb); |
| 2943 | int err = 0; | 2944 | int err = 0; |
| 2944 | int offset = off & (sb->s_blocksize - 1); | 2945 | int offset = off & (sb->s_blocksize - 1); |
| 2945 | int tocopy; | ||
| 2946 | int journal_quota = EXT3_SB(sb)->s_qf_names[type] != NULL; | 2946 | int journal_quota = EXT3_SB(sb)->s_qf_names[type] != NULL; |
| 2947 | size_t towrite = len; | ||
| 2948 | struct buffer_head *bh; | 2947 | struct buffer_head *bh; |
| 2949 | handle_t *handle = journal_current_handle(); | 2948 | handle_t *handle = journal_current_handle(); |
| 2950 | 2949 | ||
| @@ -2955,53 +2954,54 @@ static ssize_t ext3_quota_write(struct super_block *sb, int type, | |||
| 2955 | (unsigned long long)off, (unsigned long long)len); | 2954 | (unsigned long long)off, (unsigned long long)len); |
| 2956 | return -EIO; | 2955 | return -EIO; |
| 2957 | } | 2956 | } |
| 2957 | |||
| 2958 | /* | ||
| 2959 | * Since we account only one data block in transaction credits, | ||
| 2960 | * then it is impossible to cross a block boundary. | ||
| 2961 | */ | ||
| 2962 | if (sb->s_blocksize - offset < len) { | ||
| 2963 | ext3_msg(sb, KERN_WARNING, "Quota write (off=%llu, len=%llu)" | ||
| 2964 | " cancelled because not block aligned", | ||
| 2965 | (unsigned long long)off, (unsigned long long)len); | ||
| 2966 | return -EIO; | ||
| 2967 | } | ||
| 2958 | mutex_lock_nested(&inode->i_mutex, I_MUTEX_QUOTA); | 2968 | mutex_lock_nested(&inode->i_mutex, I_MUTEX_QUOTA); |
| 2959 | while (towrite > 0) { | 2969 | bh = ext3_bread(handle, inode, blk, 1, &err); |
| 2960 | tocopy = sb->s_blocksize - offset < towrite ? | 2970 | if (!bh) |
| 2961 | sb->s_blocksize - offset : towrite; | 2971 | goto out; |
| 2962 | bh = ext3_bread(handle, inode, blk, 1, &err); | 2972 | if (journal_quota) { |
| 2963 | if (!bh) | 2973 | err = ext3_journal_get_write_access(handle, bh); |
| 2974 | if (err) { | ||
| 2975 | brelse(bh); | ||
| 2964 | goto out; | 2976 | goto out; |
| 2965 | if (journal_quota) { | ||
| 2966 | err = ext3_journal_get_write_access(handle, bh); | ||
| 2967 | if (err) { | ||
| 2968 | brelse(bh); | ||
| 2969 | goto out; | ||
| 2970 | } | ||
| 2971 | } | ||
| 2972 | lock_buffer(bh); | ||
| 2973 | memcpy(bh->b_data+offset, data, tocopy); | ||
| 2974 | flush_dcache_page(bh->b_page); | ||
| 2975 | unlock_buffer(bh); | ||
| 2976 | if (journal_quota) | ||
| 2977 | err = ext3_journal_dirty_metadata(handle, bh); | ||
| 2978 | else { | ||
| 2979 | /* Always do at least ordered writes for quotas */ | ||
| 2980 | err = ext3_journal_dirty_data(handle, bh); | ||
| 2981 | mark_buffer_dirty(bh); | ||
| 2982 | } | 2977 | } |
| 2983 | brelse(bh); | ||
| 2984 | if (err) | ||
| 2985 | goto out; | ||
| 2986 | offset = 0; | ||
| 2987 | towrite -= tocopy; | ||
| 2988 | data += tocopy; | ||
| 2989 | blk++; | ||
| 2990 | } | 2978 | } |
| 2979 | lock_buffer(bh); | ||
| 2980 | memcpy(bh->b_data+offset, data, len); | ||
| 2981 | flush_dcache_page(bh->b_page); | ||
| 2982 | unlock_buffer(bh); | ||
| 2983 | if (journal_quota) | ||
| 2984 | err = ext3_journal_dirty_metadata(handle, bh); | ||
| 2985 | else { | ||
| 2986 | /* Always do at least ordered writes for quotas */ | ||
| 2987 | err = ext3_journal_dirty_data(handle, bh); | ||
| 2988 | mark_buffer_dirty(bh); | ||
| 2989 | } | ||
| 2990 | brelse(bh); | ||
| 2991 | out: | 2991 | out: |
| 2992 | if (len == towrite) { | 2992 | if (err) { |
| 2993 | mutex_unlock(&inode->i_mutex); | 2993 | mutex_unlock(&inode->i_mutex); |
| 2994 | return err; | 2994 | return err; |
| 2995 | } | 2995 | } |
| 2996 | if (inode->i_size < off+len-towrite) { | 2996 | if (inode->i_size < off + len) { |
| 2997 | i_size_write(inode, off+len-towrite); | 2997 | i_size_write(inode, off + len); |
| 2998 | EXT3_I(inode)->i_disksize = inode->i_size; | 2998 | EXT3_I(inode)->i_disksize = inode->i_size; |
| 2999 | } | 2999 | } |
| 3000 | inode->i_version++; | 3000 | inode->i_version++; |
| 3001 | inode->i_mtime = inode->i_ctime = CURRENT_TIME; | 3001 | inode->i_mtime = inode->i_ctime = CURRENT_TIME; |
| 3002 | ext3_mark_inode_dirty(handle, inode); | 3002 | ext3_mark_inode_dirty(handle, inode); |
| 3003 | mutex_unlock(&inode->i_mutex); | 3003 | mutex_unlock(&inode->i_mutex); |
| 3004 | return len - towrite; | 3004 | return len; |
| 3005 | } | 3005 | } |
| 3006 | 3006 | ||
| 3007 | #endif | 3007 | #endif |
diff --git a/fs/ext3/xattr.c b/fs/ext3/xattr.c index 66895ccf76c7..534a94c3a933 100644 --- a/fs/ext3/xattr.c +++ b/fs/ext3/xattr.c | |||
| @@ -274,7 +274,7 @@ ext3_xattr_ibody_get(struct inode *inode, int name_index, const char *name, | |||
| 274 | void *end; | 274 | void *end; |
| 275 | int error; | 275 | int error; |
| 276 | 276 | ||
| 277 | if (!(EXT3_I(inode)->i_state & EXT3_STATE_XATTR)) | 277 | if (!ext3_test_inode_state(inode, EXT3_STATE_XATTR)) |
| 278 | return -ENODATA; | 278 | return -ENODATA; |
| 279 | error = ext3_get_inode_loc(inode, &iloc); | 279 | error = ext3_get_inode_loc(inode, &iloc); |
| 280 | if (error) | 280 | if (error) |
| @@ -403,7 +403,7 @@ ext3_xattr_ibody_list(struct dentry *dentry, char *buffer, size_t buffer_size) | |||
| 403 | void *end; | 403 | void *end; |
| 404 | int error; | 404 | int error; |
| 405 | 405 | ||
| 406 | if (!(EXT3_I(inode)->i_state & EXT3_STATE_XATTR)) | 406 | if (!ext3_test_inode_state(inode, EXT3_STATE_XATTR)) |
| 407 | return 0; | 407 | return 0; |
| 408 | error = ext3_get_inode_loc(inode, &iloc); | 408 | error = ext3_get_inode_loc(inode, &iloc); |
| 409 | if (error) | 409 | if (error) |
| @@ -500,7 +500,7 @@ ext3_xattr_release_block(handle_t *handle, struct inode *inode, | |||
| 500 | error = ext3_journal_dirty_metadata(handle, bh); | 500 | error = ext3_journal_dirty_metadata(handle, bh); |
| 501 | if (IS_SYNC(inode)) | 501 | if (IS_SYNC(inode)) |
| 502 | handle->h_sync = 1; | 502 | handle->h_sync = 1; |
| 503 | vfs_dq_free_block(inode, 1); | 503 | dquot_free_block(inode, 1); |
| 504 | ea_bdebug(bh, "refcount now=%d; releasing", | 504 | ea_bdebug(bh, "refcount now=%d; releasing", |
| 505 | le32_to_cpu(BHDR(bh)->h_refcount)); | 505 | le32_to_cpu(BHDR(bh)->h_refcount)); |
| 506 | if (ce) | 506 | if (ce) |
| @@ -775,8 +775,8 @@ inserted: | |||
| 775 | else { | 775 | else { |
| 776 | /* The old block is released after updating | 776 | /* The old block is released after updating |
| 777 | the inode. */ | 777 | the inode. */ |
| 778 | error = -EDQUOT; | 778 | error = dquot_alloc_block(inode, 1); |
| 779 | if (vfs_dq_alloc_block(inode, 1)) | 779 | if (error) |
| 780 | goto cleanup; | 780 | goto cleanup; |
| 781 | error = ext3_journal_get_write_access(handle, | 781 | error = ext3_journal_get_write_access(handle, |
| 782 | new_bh); | 782 | new_bh); |
| @@ -850,7 +850,7 @@ cleanup: | |||
| 850 | return error; | 850 | return error; |
| 851 | 851 | ||
| 852 | cleanup_dquot: | 852 | cleanup_dquot: |
| 853 | vfs_dq_free_block(inode, 1); | 853 | dquot_free_block(inode, 1); |
| 854 | goto cleanup; | 854 | goto cleanup; |
| 855 | 855 | ||
| 856 | bad_block: | 856 | bad_block: |
| @@ -882,7 +882,7 @@ ext3_xattr_ibody_find(struct inode *inode, struct ext3_xattr_info *i, | |||
| 882 | is->s.base = is->s.first = IFIRST(header); | 882 | is->s.base = is->s.first = IFIRST(header); |
| 883 | is->s.here = is->s.first; | 883 | is->s.here = is->s.first; |
| 884 | is->s.end = (void *)raw_inode + EXT3_SB(inode->i_sb)->s_inode_size; | 884 | is->s.end = (void *)raw_inode + EXT3_SB(inode->i_sb)->s_inode_size; |
| 885 | if (EXT3_I(inode)->i_state & EXT3_STATE_XATTR) { | 885 | if (ext3_test_inode_state(inode, EXT3_STATE_XATTR)) { |
| 886 | error = ext3_xattr_check_names(IFIRST(header), is->s.end); | 886 | error = ext3_xattr_check_names(IFIRST(header), is->s.end); |
| 887 | if (error) | 887 | if (error) |
| 888 | return error; | 888 | return error; |
| @@ -914,10 +914,10 @@ ext3_xattr_ibody_set(handle_t *handle, struct inode *inode, | |||
| 914 | header = IHDR(inode, ext3_raw_inode(&is->iloc)); | 914 | header = IHDR(inode, ext3_raw_inode(&is->iloc)); |
| 915 | if (!IS_LAST_ENTRY(s->first)) { | 915 | if (!IS_LAST_ENTRY(s->first)) { |
| 916 | header->h_magic = cpu_to_le32(EXT3_XATTR_MAGIC); | 916 | header->h_magic = cpu_to_le32(EXT3_XATTR_MAGIC); |
| 917 | EXT3_I(inode)->i_state |= EXT3_STATE_XATTR; | 917 | ext3_set_inode_state(inode, EXT3_STATE_XATTR); |
| 918 | } else { | 918 | } else { |
| 919 | header->h_magic = cpu_to_le32(0); | 919 | header->h_magic = cpu_to_le32(0); |
| 920 | EXT3_I(inode)->i_state &= ~EXT3_STATE_XATTR; | 920 | ext3_clear_inode_state(inode, EXT3_STATE_XATTR); |
| 921 | } | 921 | } |
| 922 | return 0; | 922 | return 0; |
| 923 | } | 923 | } |
| @@ -967,10 +967,10 @@ ext3_xattr_set_handle(handle_t *handle, struct inode *inode, int name_index, | |||
| 967 | if (error) | 967 | if (error) |
| 968 | goto cleanup; | 968 | goto cleanup; |
| 969 | 969 | ||
| 970 | if (EXT3_I(inode)->i_state & EXT3_STATE_NEW) { | 970 | if (ext3_test_inode_state(inode, EXT3_STATE_NEW)) { |
| 971 | struct ext3_inode *raw_inode = ext3_raw_inode(&is.iloc); | 971 | struct ext3_inode *raw_inode = ext3_raw_inode(&is.iloc); |
| 972 | memset(raw_inode, 0, EXT3_SB(inode->i_sb)->s_inode_size); | 972 | memset(raw_inode, 0, EXT3_SB(inode->i_sb)->s_inode_size); |
| 973 | EXT3_I(inode)->i_state &= ~EXT3_STATE_NEW; | 973 | ext3_clear_inode_state(inode, EXT3_STATE_NEW); |
| 974 | } | 974 | } |
| 975 | 975 | ||
| 976 | error = ext3_xattr_ibody_find(inode, &i, &is); | 976 | error = ext3_xattr_ibody_find(inode, &i, &is); |
diff --git a/fs/ext4/balloc.c b/fs/ext4/balloc.c index 22bc7435d913..d2f37a5516c7 100644 --- a/fs/ext4/balloc.c +++ b/fs/ext4/balloc.c | |||
| @@ -97,8 +97,8 @@ unsigned ext4_init_block_bitmap(struct super_block *sb, struct buffer_head *bh, | |||
| 97 | /* If checksum is bad mark all blocks used to prevent allocation | 97 | /* If checksum is bad mark all blocks used to prevent allocation |
| 98 | * essentially implementing a per-group read-only flag. */ | 98 | * essentially implementing a per-group read-only flag. */ |
| 99 | if (!ext4_group_desc_csum_verify(sbi, block_group, gdp)) { | 99 | if (!ext4_group_desc_csum_verify(sbi, block_group, gdp)) { |
| 100 | ext4_error(sb, __func__, | 100 | ext4_error(sb, "Checksum bad for group %u", |
| 101 | "Checksum bad for group %u", block_group); | 101 | block_group); |
| 102 | ext4_free_blks_set(sb, gdp, 0); | 102 | ext4_free_blks_set(sb, gdp, 0); |
| 103 | ext4_free_inodes_set(sb, gdp, 0); | 103 | ext4_free_inodes_set(sb, gdp, 0); |
| 104 | ext4_itable_unused_set(sb, gdp, 0); | 104 | ext4_itable_unused_set(sb, gdp, 0); |
| @@ -130,8 +130,7 @@ unsigned ext4_init_block_bitmap(struct super_block *sb, struct buffer_head *bh, | |||
| 130 | * to make sure we calculate the right free blocks | 130 | * to make sure we calculate the right free blocks |
| 131 | */ | 131 | */ |
| 132 | group_blocks = ext4_blocks_count(sbi->s_es) - | 132 | group_blocks = ext4_blocks_count(sbi->s_es) - |
| 133 | le32_to_cpu(sbi->s_es->s_first_data_block) - | 133 | ext4_group_first_block_no(sb, ngroups - 1); |
| 134 | (EXT4_BLOCKS_PER_GROUP(sb) * (ngroups - 1)); | ||
| 135 | } else { | 134 | } else { |
| 136 | group_blocks = EXT4_BLOCKS_PER_GROUP(sb); | 135 | group_blocks = EXT4_BLOCKS_PER_GROUP(sb); |
| 137 | } | 136 | } |
| @@ -189,9 +188,6 @@ unsigned ext4_init_block_bitmap(struct super_block *sb, struct buffer_head *bh, | |||
| 189 | * when a file system is mounted (see ext4_fill_super). | 188 | * when a file system is mounted (see ext4_fill_super). |
| 190 | */ | 189 | */ |
| 191 | 190 | ||
| 192 | |||
| 193 | #define in_range(b, first, len) ((b) >= (first) && (b) <= (first) + (len) - 1) | ||
| 194 | |||
| 195 | /** | 191 | /** |
| 196 | * ext4_get_group_desc() -- load group descriptor from disk | 192 | * ext4_get_group_desc() -- load group descriptor from disk |
| 197 | * @sb: super block | 193 | * @sb: super block |
| @@ -210,10 +206,8 @@ struct ext4_group_desc * ext4_get_group_desc(struct super_block *sb, | |||
| 210 | struct ext4_sb_info *sbi = EXT4_SB(sb); | 206 | struct ext4_sb_info *sbi = EXT4_SB(sb); |
| 211 | 207 | ||
| 212 | if (block_group >= ngroups) { | 208 | if (block_group >= ngroups) { |
| 213 | ext4_error(sb, "ext4_get_group_desc", | 209 | ext4_error(sb, "block_group >= groups_count - block_group = %u," |
| 214 | "block_group >= groups_count - " | 210 | " groups_count = %u", block_group, ngroups); |
| 215 | "block_group = %u, groups_count = %u", | ||
| 216 | block_group, ngroups); | ||
| 217 | 211 | ||
| 218 | return NULL; | 212 | return NULL; |
| 219 | } | 213 | } |
| @@ -221,8 +215,7 @@ struct ext4_group_desc * ext4_get_group_desc(struct super_block *sb, | |||
| 221 | group_desc = block_group >> EXT4_DESC_PER_BLOCK_BITS(sb); | 215 | group_desc = block_group >> EXT4_DESC_PER_BLOCK_BITS(sb); |
| 222 | offset = block_group & (EXT4_DESC_PER_BLOCK(sb) - 1); | 216 | offset = block_group & (EXT4_DESC_PER_BLOCK(sb) - 1); |
| 223 | if (!sbi->s_group_desc[group_desc]) { | 217 | if (!sbi->s_group_desc[group_desc]) { |
| 224 | ext4_error(sb, "ext4_get_group_desc", | 218 | ext4_error(sb, "Group descriptor not loaded - " |
| 225 | "Group descriptor not loaded - " | ||
| 226 | "block_group = %u, group_desc = %u, desc = %u", | 219 | "block_group = %u, group_desc = %u, desc = %u", |
| 227 | block_group, group_desc, offset); | 220 | block_group, group_desc, offset); |
| 228 | return NULL; | 221 | return NULL; |
| @@ -282,9 +275,7 @@ static int ext4_valid_block_bitmap(struct super_block *sb, | |||
| 282 | return 1; | 275 | return 1; |
| 283 | 276 | ||
| 284 | err_out: | 277 | err_out: |
| 285 | ext4_error(sb, __func__, | 278 | ext4_error(sb, "Invalid block bitmap - block_group = %d, block = %llu", |
| 286 | "Invalid block bitmap - " | ||
| 287 | "block_group = %d, block = %llu", | ||
| 288 | block_group, bitmap_blk); | 279 | block_group, bitmap_blk); |
| 289 | return 0; | 280 | return 0; |
| 290 | } | 281 | } |
| @@ -311,8 +302,7 @@ ext4_read_block_bitmap(struct super_block *sb, ext4_group_t block_group) | |||
| 311 | bitmap_blk = ext4_block_bitmap(sb, desc); | 302 | bitmap_blk = ext4_block_bitmap(sb, desc); |
| 312 | bh = sb_getblk(sb, bitmap_blk); | 303 | bh = sb_getblk(sb, bitmap_blk); |
| 313 | if (unlikely(!bh)) { | 304 | if (unlikely(!bh)) { |
| 314 | ext4_error(sb, __func__, | 305 | ext4_error(sb, "Cannot read block bitmap - " |
| 315 | "Cannot read block bitmap - " | ||
| 316 | "block_group = %u, block_bitmap = %llu", | 306 | "block_group = %u, block_bitmap = %llu", |
| 317 | block_group, bitmap_blk); | 307 | block_group, bitmap_blk); |
| 318 | return NULL; | 308 | return NULL; |
| @@ -354,8 +344,7 @@ ext4_read_block_bitmap(struct super_block *sb, ext4_group_t block_group) | |||
| 354 | set_bitmap_uptodate(bh); | 344 | set_bitmap_uptodate(bh); |
| 355 | if (bh_submit_read(bh) < 0) { | 345 | if (bh_submit_read(bh) < 0) { |
| 356 | put_bh(bh); | 346 | put_bh(bh); |
| 357 | ext4_error(sb, __func__, | 347 | ext4_error(sb, "Cannot read block bitmap - " |
| 358 | "Cannot read block bitmap - " | ||
| 359 | "block_group = %u, block_bitmap = %llu", | 348 | "block_group = %u, block_bitmap = %llu", |
| 360 | block_group, bitmap_blk); | 349 | block_group, bitmap_blk); |
| 361 | return NULL; | 350 | return NULL; |
| @@ -419,8 +408,7 @@ void ext4_add_groupblocks(handle_t *handle, struct super_block *sb, | |||
| 419 | in_range(block, ext4_inode_table(sb, desc), sbi->s_itb_per_group) || | 408 | in_range(block, ext4_inode_table(sb, desc), sbi->s_itb_per_group) || |
| 420 | in_range(block + count - 1, ext4_inode_table(sb, desc), | 409 | in_range(block + count - 1, ext4_inode_table(sb, desc), |
| 421 | sbi->s_itb_per_group)) { | 410 | sbi->s_itb_per_group)) { |
| 422 | ext4_error(sb, __func__, | 411 | ext4_error(sb, "Adding blocks in system zones - " |
| 423 | "Adding blocks in system zones - " | ||
| 424 | "Block = %llu, count = %lu", | 412 | "Block = %llu, count = %lu", |
| 425 | block, count); | 413 | block, count); |
| 426 | goto error_return; | 414 | goto error_return; |
| @@ -453,8 +441,7 @@ void ext4_add_groupblocks(handle_t *handle, struct super_block *sb, | |||
| 453 | BUFFER_TRACE(bitmap_bh, "clear bit"); | 441 | BUFFER_TRACE(bitmap_bh, "clear bit"); |
| 454 | if (!ext4_clear_bit_atomic(ext4_group_lock_ptr(sb, block_group), | 442 | if (!ext4_clear_bit_atomic(ext4_group_lock_ptr(sb, block_group), |
| 455 | bit + i, bitmap_bh->b_data)) { | 443 | bit + i, bitmap_bh->b_data)) { |
| 456 | ext4_error(sb, __func__, | 444 | ext4_error(sb, "bit already cleared for block %llu", |
| 457 | "bit already cleared for block %llu", | ||
| 458 | (ext4_fsblk_t)(block + i)); | 445 | (ext4_fsblk_t)(block + i)); |
| 459 | BUFFER_TRACE(bitmap_bh, "bit already cleared"); | 446 | BUFFER_TRACE(bitmap_bh, "bit already cleared"); |
| 460 | } else { | 447 | } else { |
diff --git a/fs/ext4/block_validity.c b/fs/ext4/block_validity.c index a60ab9aad57d..983f0e127493 100644 --- a/fs/ext4/block_validity.c +++ b/fs/ext4/block_validity.c | |||
| @@ -205,14 +205,14 @@ void ext4_release_system_zone(struct super_block *sb) | |||
| 205 | entry = rb_entry(n, struct ext4_system_zone, node); | 205 | entry = rb_entry(n, struct ext4_system_zone, node); |
| 206 | kmem_cache_free(ext4_system_zone_cachep, entry); | 206 | kmem_cache_free(ext4_system_zone_cachep, entry); |
| 207 | if (!parent) | 207 | if (!parent) |
| 208 | EXT4_SB(sb)->system_blks.rb_node = NULL; | 208 | EXT4_SB(sb)->system_blks = RB_ROOT; |
| 209 | else if (parent->rb_left == n) | 209 | else if (parent->rb_left == n) |
| 210 | parent->rb_left = NULL; | 210 | parent->rb_left = NULL; |
| 211 | else if (parent->rb_right == n) | 211 | else if (parent->rb_right == n) |
| 212 | parent->rb_right = NULL; | 212 | parent->rb_right = NULL; |
| 213 | n = parent; | 213 | n = parent; |
| 214 | } | 214 | } |
| 215 | EXT4_SB(sb)->system_blks.rb_node = NULL; | 215 | EXT4_SB(sb)->system_blks = RB_ROOT; |
| 216 | } | 216 | } |
| 217 | 217 | ||
| 218 | /* | 218 | /* |
diff --git a/fs/ext4/dir.c b/fs/ext4/dir.c index 9dc93168e262..86cb6d86a048 100644 --- a/fs/ext4/dir.c +++ b/fs/ext4/dir.c | |||
| @@ -83,10 +83,12 @@ int ext4_check_dir_entry(const char *function, struct inode *dir, | |||
| 83 | error_msg = "inode out of bounds"; | 83 | error_msg = "inode out of bounds"; |
| 84 | 84 | ||
| 85 | if (error_msg != NULL) | 85 | if (error_msg != NULL) |
| 86 | ext4_error(dir->i_sb, function, | 86 | __ext4_error(dir->i_sb, function, |
| 87 | "bad entry in directory #%lu: %s - " | 87 | "bad entry in directory #%lu: %s - block=%llu" |
| 88 | "offset=%u, inode=%u, rec_len=%d, name_len=%d", | 88 | "offset=%u(%u), inode=%u, rec_len=%d, name_len=%d", |
| 89 | dir->i_ino, error_msg, offset, | 89 | dir->i_ino, error_msg, |
| 90 | (unsigned long long) bh->b_blocknr, | ||
| 91 | (unsigned) (offset%bh->b_size), offset, | ||
| 90 | le32_to_cpu(de->inode), | 92 | le32_to_cpu(de->inode), |
| 91 | rlen, de->name_len); | 93 | rlen, de->name_len); |
| 92 | return error_msg == NULL ? 1 : 0; | 94 | return error_msg == NULL ? 1 : 0; |
| @@ -150,7 +152,7 @@ static int ext4_readdir(struct file *filp, | |||
| 150 | */ | 152 | */ |
| 151 | if (!bh) { | 153 | if (!bh) { |
| 152 | if (!dir_has_error) { | 154 | if (!dir_has_error) { |
| 153 | ext4_error(sb, __func__, "directory #%lu " | 155 | ext4_error(sb, "directory #%lu " |
| 154 | "contains a hole at offset %Lu", | 156 | "contains a hole at offset %Lu", |
| 155 | inode->i_ino, | 157 | inode->i_ino, |
| 156 | (unsigned long long) filp->f_pos); | 158 | (unsigned long long) filp->f_pos); |
| @@ -303,7 +305,7 @@ static void free_rb_tree_fname(struct rb_root *root) | |||
| 303 | kfree(old); | 305 | kfree(old); |
| 304 | } | 306 | } |
| 305 | if (!parent) | 307 | if (!parent) |
| 306 | root->rb_node = NULL; | 308 | *root = RB_ROOT; |
| 307 | else if (parent->rb_left == n) | 309 | else if (parent->rb_left == n) |
| 308 | parent->rb_left = NULL; | 310 | parent->rb_left = NULL; |
| 309 | else if (parent->rb_right == n) | 311 | else if (parent->rb_right == n) |
diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h index 4cedc91ec59d..bf938cf7c5f0 100644 --- a/fs/ext4/ext4.h +++ b/fs/ext4/ext4.h | |||
| @@ -53,6 +53,12 @@ | |||
| 53 | #define ext4_debug(f, a...) do {} while (0) | 53 | #define ext4_debug(f, a...) do {} while (0) |
| 54 | #endif | 54 | #endif |
| 55 | 55 | ||
| 56 | #define EXT4_ERROR_INODE(inode, fmt, a...) \ | ||
| 57 | ext4_error_inode(__func__, (inode), (fmt), ## a); | ||
| 58 | |||
| 59 | #define EXT4_ERROR_FILE(file, fmt, a...) \ | ||
| 60 | ext4_error_file(__func__, (file), (fmt), ## a); | ||
| 61 | |||
| 56 | /* data type for block offset of block group */ | 62 | /* data type for block offset of block group */ |
| 57 | typedef int ext4_grpblk_t; | 63 | typedef int ext4_grpblk_t; |
| 58 | 64 | ||
| @@ -133,14 +139,14 @@ struct mpage_da_data { | |||
| 133 | int pages_written; | 139 | int pages_written; |
| 134 | int retval; | 140 | int retval; |
| 135 | }; | 141 | }; |
| 136 | #define DIO_AIO_UNWRITTEN 0x1 | 142 | #define EXT4_IO_UNWRITTEN 0x1 |
| 137 | typedef struct ext4_io_end { | 143 | typedef struct ext4_io_end { |
| 138 | struct list_head list; /* per-file finished AIO list */ | 144 | struct list_head list; /* per-file finished AIO list */ |
| 139 | struct inode *inode; /* file being written to */ | 145 | struct inode *inode; /* file being written to */ |
| 140 | unsigned int flag; /* unwritten or not */ | 146 | unsigned int flag; /* unwritten or not */ |
| 141 | int error; /* I/O error code */ | 147 | struct page *page; /* page struct for buffer write */ |
| 142 | ext4_lblk_t offset; /* offset in the file */ | 148 | loff_t offset; /* offset in the file */ |
| 143 | size_t size; /* size of the extent */ | 149 | ssize_t size; /* size of the extent */ |
| 144 | struct work_struct work; /* data work queue */ | 150 | struct work_struct work; /* data work queue */ |
| 145 | } ext4_io_end_t; | 151 | } ext4_io_end_t; |
| 146 | 152 | ||
| @@ -284,10 +290,12 @@ struct flex_groups { | |||
| 284 | #define EXT4_TOPDIR_FL 0x00020000 /* Top of directory hierarchies*/ | 290 | #define EXT4_TOPDIR_FL 0x00020000 /* Top of directory hierarchies*/ |
| 285 | #define EXT4_HUGE_FILE_FL 0x00040000 /* Set to each huge file */ | 291 | #define EXT4_HUGE_FILE_FL 0x00040000 /* Set to each huge file */ |
| 286 | #define EXT4_EXTENTS_FL 0x00080000 /* Inode uses extents */ | 292 | #define EXT4_EXTENTS_FL 0x00080000 /* Inode uses extents */ |
| 293 | #define EXT4_EA_INODE_FL 0x00200000 /* Inode used for large EA */ | ||
| 294 | #define EXT4_EOFBLOCKS_FL 0x00400000 /* Blocks allocated beyond EOF */ | ||
| 287 | #define EXT4_RESERVED_FL 0x80000000 /* reserved for ext4 lib */ | 295 | #define EXT4_RESERVED_FL 0x80000000 /* reserved for ext4 lib */ |
| 288 | 296 | ||
| 289 | #define EXT4_FL_USER_VISIBLE 0x000BDFFF /* User visible flags */ | 297 | #define EXT4_FL_USER_VISIBLE 0x004BDFFF /* User visible flags */ |
| 290 | #define EXT4_FL_USER_MODIFIABLE 0x000B80FF /* User modifiable flags */ | 298 | #define EXT4_FL_USER_MODIFIABLE 0x004B80FF /* User modifiable flags */ |
| 291 | 299 | ||
| 292 | /* Flags that should be inherited by new inodes from their parent. */ | 300 | /* Flags that should be inherited by new inodes from their parent. */ |
| 293 | #define EXT4_FL_INHERITED (EXT4_SECRM_FL | EXT4_UNRM_FL | EXT4_COMPR_FL |\ | 301 | #define EXT4_FL_INHERITED (EXT4_SECRM_FL | EXT4_UNRM_FL | EXT4_COMPR_FL |\ |
| @@ -313,17 +321,6 @@ static inline __u32 ext4_mask_flags(umode_t mode, __u32 flags) | |||
| 313 | return flags & EXT4_OTHER_FLMASK; | 321 | return flags & EXT4_OTHER_FLMASK; |
| 314 | } | 322 | } |
| 315 | 323 | ||
| 316 | /* | ||
| 317 | * Inode dynamic state flags | ||
| 318 | */ | ||
| 319 | #define EXT4_STATE_JDATA 0x00000001 /* journaled data exists */ | ||
| 320 | #define EXT4_STATE_NEW 0x00000002 /* inode is newly created */ | ||
| 321 | #define EXT4_STATE_XATTR 0x00000004 /* has in-inode xattrs */ | ||
| 322 | #define EXT4_STATE_NO_EXPAND 0x00000008 /* No space for expansion */ | ||
| 323 | #define EXT4_STATE_DA_ALLOC_CLOSE 0x00000010 /* Alloc DA blks on close */ | ||
| 324 | #define EXT4_STATE_EXT_MIGRATE 0x00000020 /* Inode is migrating */ | ||
| 325 | #define EXT4_STATE_DIO_UNWRITTEN 0x00000040 /* need convert on dio done*/ | ||
| 326 | |||
| 327 | /* Used to pass group descriptor data when online resize is done */ | 324 | /* Used to pass group descriptor data when online resize is done */ |
| 328 | struct ext4_new_group_input { | 325 | struct ext4_new_group_input { |
| 329 | __u32 group; /* Group number for this data */ | 326 | __u32 group; /* Group number for this data */ |
| @@ -364,19 +361,20 @@ struct ext4_new_group_data { | |||
| 364 | /* caller is from the direct IO path, request to creation of an | 361 | /* caller is from the direct IO path, request to creation of an |
| 365 | unitialized extents if not allocated, split the uninitialized | 362 | unitialized extents if not allocated, split the uninitialized |
| 366 | extent if blocks has been preallocated already*/ | 363 | extent if blocks has been preallocated already*/ |
| 367 | #define EXT4_GET_BLOCKS_DIO 0x0008 | 364 | #define EXT4_GET_BLOCKS_PRE_IO 0x0008 |
| 368 | #define EXT4_GET_BLOCKS_CONVERT 0x0010 | 365 | #define EXT4_GET_BLOCKS_CONVERT 0x0010 |
| 369 | #define EXT4_GET_BLOCKS_DIO_CREATE_EXT (EXT4_GET_BLOCKS_DIO|\ | 366 | #define EXT4_GET_BLOCKS_IO_CREATE_EXT (EXT4_GET_BLOCKS_PRE_IO|\ |
| 367 | EXT4_GET_BLOCKS_CREATE_UNINIT_EXT) | ||
| 368 | /* Convert extent to initialized after IO complete */ | ||
| 369 | #define EXT4_GET_BLOCKS_IO_CONVERT_EXT (EXT4_GET_BLOCKS_CONVERT|\ | ||
| 370 | EXT4_GET_BLOCKS_CREATE_UNINIT_EXT) | 370 | EXT4_GET_BLOCKS_CREATE_UNINIT_EXT) |
| 371 | /* Convert extent to initialized after direct IO complete */ | ||
| 372 | #define EXT4_GET_BLOCKS_DIO_CONVERT_EXT (EXT4_GET_BLOCKS_CONVERT|\ | ||
| 373 | EXT4_GET_BLOCKS_DIO_CREATE_EXT) | ||
| 374 | 371 | ||
| 375 | /* | 372 | /* |
| 376 | * Flags used by ext4_free_blocks | 373 | * Flags used by ext4_free_blocks |
| 377 | */ | 374 | */ |
| 378 | #define EXT4_FREE_BLOCKS_METADATA 0x0001 | 375 | #define EXT4_FREE_BLOCKS_METADATA 0x0001 |
| 379 | #define EXT4_FREE_BLOCKS_FORGET 0x0002 | 376 | #define EXT4_FREE_BLOCKS_FORGET 0x0002 |
| 377 | #define EXT4_FREE_BLOCKS_VALIDATED 0x0004 | ||
| 380 | 378 | ||
| 381 | /* | 379 | /* |
| 382 | * ioctl commands | 380 | * ioctl commands |
| @@ -630,7 +628,7 @@ struct ext4_inode_info { | |||
| 630 | * near to their parent directory's inode. | 628 | * near to their parent directory's inode. |
| 631 | */ | 629 | */ |
| 632 | ext4_group_t i_block_group; | 630 | ext4_group_t i_block_group; |
| 633 | __u32 i_state; /* Dynamic state flags for ext4 */ | 631 | unsigned long i_state_flags; /* Dynamic state flags */ |
| 634 | 632 | ||
| 635 | ext4_lblk_t i_dir_start_lookup; | 633 | ext4_lblk_t i_dir_start_lookup; |
| 636 | #ifdef CONFIG_EXT4_FS_XATTR | 634 | #ifdef CONFIG_EXT4_FS_XATTR |
| @@ -708,8 +706,9 @@ struct ext4_inode_info { | |||
| 708 | qsize_t i_reserved_quota; | 706 | qsize_t i_reserved_quota; |
| 709 | #endif | 707 | #endif |
| 710 | 708 | ||
| 711 | /* completed async DIOs that might need unwritten extents handling */ | 709 | /* completed IOs that might need unwritten extents handling */ |
| 712 | struct list_head i_aio_dio_complete_list; | 710 | struct list_head i_completed_io_list; |
| 711 | spinlock_t i_completed_io_lock; | ||
| 713 | /* current io_end structure for async DIO write*/ | 712 | /* current io_end structure for async DIO write*/ |
| 714 | ext4_io_end_t *cur_aio_dio; | 713 | ext4_io_end_t *cur_aio_dio; |
| 715 | 714 | ||
| @@ -760,6 +759,7 @@ struct ext4_inode_info { | |||
| 760 | #define EXT4_MOUNT_QUOTA 0x80000 /* Some quota option set */ | 759 | #define EXT4_MOUNT_QUOTA 0x80000 /* Some quota option set */ |
| 761 | #define EXT4_MOUNT_USRQUOTA 0x100000 /* "old" user quota */ | 760 | #define EXT4_MOUNT_USRQUOTA 0x100000 /* "old" user quota */ |
| 762 | #define EXT4_MOUNT_GRPQUOTA 0x200000 /* "old" group quota */ | 761 | #define EXT4_MOUNT_GRPQUOTA 0x200000 /* "old" group quota */ |
| 762 | #define EXT4_MOUNT_DIOREAD_NOLOCK 0x400000 /* Enable support for dio read nolocking */ | ||
| 763 | #define EXT4_MOUNT_JOURNAL_CHECKSUM 0x800000 /* Journal checksums */ | 763 | #define EXT4_MOUNT_JOURNAL_CHECKSUM 0x800000 /* Journal checksums */ |
| 764 | #define EXT4_MOUNT_JOURNAL_ASYNC_COMMIT 0x1000000 /* Journal Async Commit */ | 764 | #define EXT4_MOUNT_JOURNAL_ASYNC_COMMIT 0x1000000 /* Journal Async Commit */ |
| 765 | #define EXT4_MOUNT_I_VERSION 0x2000000 /* i_version support */ | 765 | #define EXT4_MOUNT_I_VERSION 0x2000000 /* i_version support */ |
| @@ -1050,6 +1050,34 @@ static inline int ext4_valid_inum(struct super_block *sb, unsigned long ino) | |||
| 1050 | (ino >= EXT4_FIRST_INO(sb) && | 1050 | (ino >= EXT4_FIRST_INO(sb) && |
| 1051 | ino <= le32_to_cpu(EXT4_SB(sb)->s_es->s_inodes_count)); | 1051 | ino <= le32_to_cpu(EXT4_SB(sb)->s_es->s_inodes_count)); |
| 1052 | } | 1052 | } |
| 1053 | |||
| 1054 | /* | ||
| 1055 | * Inode dynamic state flags | ||
| 1056 | */ | ||
| 1057 | enum { | ||
| 1058 | EXT4_STATE_JDATA, /* journaled data exists */ | ||
| 1059 | EXT4_STATE_NEW, /* inode is newly created */ | ||
| 1060 | EXT4_STATE_XATTR, /* has in-inode xattrs */ | ||
| 1061 | EXT4_STATE_NO_EXPAND, /* No space for expansion */ | ||
| 1062 | EXT4_STATE_DA_ALLOC_CLOSE, /* Alloc DA blks on close */ | ||
| 1063 | EXT4_STATE_EXT_MIGRATE, /* Inode is migrating */ | ||
| 1064 | EXT4_STATE_DIO_UNWRITTEN, /* need convert on dio done*/ | ||
| 1065 | }; | ||
| 1066 | |||
| 1067 | static inline int ext4_test_inode_state(struct inode *inode, int bit) | ||
| 1068 | { | ||
| 1069 | return test_bit(bit, &EXT4_I(inode)->i_state_flags); | ||
| 1070 | } | ||
| 1071 | |||
| 1072 | static inline void ext4_set_inode_state(struct inode *inode, int bit) | ||
| 1073 | { | ||
| 1074 | set_bit(bit, &EXT4_I(inode)->i_state_flags); | ||
| 1075 | } | ||
| 1076 | |||
| 1077 | static inline void ext4_clear_inode_state(struct inode *inode, int bit) | ||
| 1078 | { | ||
| 1079 | clear_bit(bit, &EXT4_I(inode)->i_state_flags); | ||
| 1080 | } | ||
| 1053 | #else | 1081 | #else |
| 1054 | /* Assume that user mode programs are passing in an ext4fs superblock, not | 1082 | /* Assume that user mode programs are passing in an ext4fs superblock, not |
| 1055 | * a kernel struct super_block. This will allow us to call the feature-test | 1083 | * a kernel struct super_block. This will allow us to call the feature-test |
| @@ -1126,6 +1154,8 @@ static inline int ext4_valid_inum(struct super_block *sb, unsigned long ino) | |||
| 1126 | #define EXT4_FEATURE_INCOMPAT_64BIT 0x0080 | 1154 | #define EXT4_FEATURE_INCOMPAT_64BIT 0x0080 |
| 1127 | #define EXT4_FEATURE_INCOMPAT_MMP 0x0100 | 1155 | #define EXT4_FEATURE_INCOMPAT_MMP 0x0100 |
| 1128 | #define EXT4_FEATURE_INCOMPAT_FLEX_BG 0x0200 | 1156 | #define EXT4_FEATURE_INCOMPAT_FLEX_BG 0x0200 |
| 1157 | #define EXT4_FEATURE_INCOMPAT_EA_INODE 0x0400 /* EA in inode */ | ||
| 1158 | #define EXT4_FEATURE_INCOMPAT_DIRDATA 0x1000 /* data in dirent */ | ||
| 1129 | 1159 | ||
| 1130 | #define EXT4_FEATURE_COMPAT_SUPP EXT2_FEATURE_COMPAT_EXT_ATTR | 1160 | #define EXT4_FEATURE_COMPAT_SUPP EXT2_FEATURE_COMPAT_EXT_ATTR |
| 1131 | #define EXT4_FEATURE_INCOMPAT_SUPP (EXT4_FEATURE_INCOMPAT_FILETYPE| \ | 1161 | #define EXT4_FEATURE_INCOMPAT_SUPP (EXT4_FEATURE_INCOMPAT_FILETYPE| \ |
| @@ -1416,7 +1446,7 @@ int ext4_get_block(struct inode *inode, sector_t iblock, | |||
| 1416 | struct buffer_head *bh_result, int create); | 1446 | struct buffer_head *bh_result, int create); |
| 1417 | 1447 | ||
| 1418 | extern struct inode *ext4_iget(struct super_block *, unsigned long); | 1448 | extern struct inode *ext4_iget(struct super_block *, unsigned long); |
| 1419 | extern int ext4_write_inode(struct inode *, int); | 1449 | extern int ext4_write_inode(struct inode *, struct writeback_control *); |
| 1420 | extern int ext4_setattr(struct dentry *, struct iattr *); | 1450 | extern int ext4_setattr(struct dentry *, struct iattr *); |
| 1421 | extern int ext4_getattr(struct vfsmount *mnt, struct dentry *dentry, | 1451 | extern int ext4_getattr(struct vfsmount *mnt, struct dentry *dentry, |
| 1422 | struct kstat *stat); | 1452 | struct kstat *stat); |
| @@ -1439,7 +1469,7 @@ extern int ext4_block_truncate_page(handle_t *handle, | |||
| 1439 | struct address_space *mapping, loff_t from); | 1469 | struct address_space *mapping, loff_t from); |
| 1440 | extern int ext4_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf); | 1470 | extern int ext4_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf); |
| 1441 | extern qsize_t *ext4_get_reserved_space(struct inode *inode); | 1471 | extern qsize_t *ext4_get_reserved_space(struct inode *inode); |
| 1442 | extern int flush_aio_dio_completed_IO(struct inode *inode); | 1472 | extern int flush_completed_IO(struct inode *inode); |
| 1443 | extern void ext4_da_update_reserve_space(struct inode *inode, | 1473 | extern void ext4_da_update_reserve_space(struct inode *inode, |
| 1444 | int used, int quota_claim); | 1474 | int used, int quota_claim); |
| 1445 | /* ioctl.c */ | 1475 | /* ioctl.c */ |
| @@ -1465,13 +1495,20 @@ extern int ext4_group_extend(struct super_block *sb, | |||
| 1465 | ext4_fsblk_t n_blocks_count); | 1495 | ext4_fsblk_t n_blocks_count); |
| 1466 | 1496 | ||
| 1467 | /* super.c */ | 1497 | /* super.c */ |
| 1468 | extern void ext4_error(struct super_block *, const char *, const char *, ...) | 1498 | extern void __ext4_error(struct super_block *, const char *, const char *, ...) |
| 1499 | __attribute__ ((format (printf, 3, 4))); | ||
| 1500 | #define ext4_error(sb, message...) __ext4_error(sb, __func__, ## message) | ||
| 1501 | extern void ext4_error_inode(const char *, struct inode *, const char *, ...) | ||
| 1502 | __attribute__ ((format (printf, 3, 4))); | ||
| 1503 | extern void ext4_error_file(const char *, struct file *, const char *, ...) | ||
| 1469 | __attribute__ ((format (printf, 3, 4))); | 1504 | __attribute__ ((format (printf, 3, 4))); |
| 1470 | extern void __ext4_std_error(struct super_block *, const char *, int); | 1505 | extern void __ext4_std_error(struct super_block *, const char *, int); |
| 1471 | extern void ext4_abort(struct super_block *, const char *, const char *, ...) | 1506 | extern void ext4_abort(struct super_block *, const char *, const char *, ...) |
| 1472 | __attribute__ ((format (printf, 3, 4))); | 1507 | __attribute__ ((format (printf, 3, 4))); |
| 1473 | extern void ext4_warning(struct super_block *, const char *, const char *, ...) | 1508 | extern void __ext4_warning(struct super_block *, const char *, |
| 1509 | const char *, ...) | ||
| 1474 | __attribute__ ((format (printf, 3, 4))); | 1510 | __attribute__ ((format (printf, 3, 4))); |
| 1511 | #define ext4_warning(sb, message...) __ext4_warning(sb, __func__, ## message) | ||
| 1475 | extern void ext4_msg(struct super_block *, const char *, const char *, ...) | 1512 | extern void ext4_msg(struct super_block *, const char *, const char *, ...) |
| 1476 | __attribute__ ((format (printf, 3, 4))); | 1513 | __attribute__ ((format (printf, 3, 4))); |
| 1477 | extern void ext4_grp_locked_error(struct super_block *, ext4_group_t, | 1514 | extern void ext4_grp_locked_error(struct super_block *, ext4_group_t, |
| @@ -1744,7 +1781,7 @@ extern void ext4_ext_release(struct super_block *); | |||
| 1744 | extern long ext4_fallocate(struct inode *inode, int mode, loff_t offset, | 1781 | extern long ext4_fallocate(struct inode *inode, int mode, loff_t offset, |
| 1745 | loff_t len); | 1782 | loff_t len); |
| 1746 | extern int ext4_convert_unwritten_extents(struct inode *inode, loff_t offset, | 1783 | extern int ext4_convert_unwritten_extents(struct inode *inode, loff_t offset, |
| 1747 | loff_t len); | 1784 | ssize_t len); |
| 1748 | extern int ext4_get_blocks(handle_t *handle, struct inode *inode, | 1785 | extern int ext4_get_blocks(handle_t *handle, struct inode *inode, |
| 1749 | sector_t block, unsigned int max_blocks, | 1786 | sector_t block, unsigned int max_blocks, |
| 1750 | struct buffer_head *bh, int flags); | 1787 | struct buffer_head *bh, int flags); |
| @@ -1756,6 +1793,15 @@ extern int ext4_move_extents(struct file *o_filp, struct file *d_filp, | |||
| 1756 | __u64 len, __u64 *moved_len); | 1793 | __u64 len, __u64 *moved_len); |
| 1757 | 1794 | ||
| 1758 | 1795 | ||
| 1796 | /* BH_Uninit flag: blocks are allocated but uninitialized on disk */ | ||
| 1797 | enum ext4_state_bits { | ||
| 1798 | BH_Uninit /* blocks are allocated but uninitialized on disk */ | ||
| 1799 | = BH_JBDPrivateStart, | ||
| 1800 | }; | ||
| 1801 | |||
| 1802 | BUFFER_FNS(Uninit, uninit) | ||
| 1803 | TAS_BUFFER_FNS(Uninit, uninit) | ||
| 1804 | |||
| 1759 | /* | 1805 | /* |
| 1760 | * Add new method to test wether block and inode bitmaps are properly | 1806 | * Add new method to test wether block and inode bitmaps are properly |
| 1761 | * initialized. With uninit_bg reading the block from disk is not enough | 1807 | * initialized. With uninit_bg reading the block from disk is not enough |
| @@ -1773,6 +1819,8 @@ static inline void set_bitmap_uptodate(struct buffer_head *bh) | |||
| 1773 | set_bit(BH_BITMAP_UPTODATE, &(bh)->b_state); | 1819 | set_bit(BH_BITMAP_UPTODATE, &(bh)->b_state); |
| 1774 | } | 1820 | } |
| 1775 | 1821 | ||
| 1822 | #define in_range(b, first, len) ((b) >= (first) && (b) <= (first) + (len) - 1) | ||
| 1823 | |||
| 1776 | #endif /* __KERNEL__ */ | 1824 | #endif /* __KERNEL__ */ |
| 1777 | 1825 | ||
| 1778 | #endif /* _EXT4_H */ | 1826 | #endif /* _EXT4_H */ |
diff --git a/fs/ext4/ext4_jbd2.c b/fs/ext4/ext4_jbd2.c index b57e5c711b6d..53d2764d71ca 100644 --- a/fs/ext4/ext4_jbd2.c +++ b/fs/ext4/ext4_jbd2.c | |||
| @@ -125,14 +125,14 @@ int __ext4_handle_dirty_metadata(const char *where, handle_t *handle, | |||
| 125 | ext4_journal_abort_handle(where, __func__, bh, | 125 | ext4_journal_abort_handle(where, __func__, bh, |
| 126 | handle, err); | 126 | handle, err); |
| 127 | } else { | 127 | } else { |
| 128 | if (inode && bh) | 128 | if (inode) |
| 129 | mark_buffer_dirty_inode(bh, inode); | 129 | mark_buffer_dirty_inode(bh, inode); |
| 130 | else | 130 | else |
| 131 | mark_buffer_dirty(bh); | 131 | mark_buffer_dirty(bh); |
| 132 | if (inode && inode_needs_sync(inode)) { | 132 | if (inode && inode_needs_sync(inode)) { |
| 133 | sync_dirty_buffer(bh); | 133 | sync_dirty_buffer(bh); |
| 134 | if (buffer_req(bh) && !buffer_uptodate(bh)) { | 134 | if (buffer_req(bh) && !buffer_uptodate(bh)) { |
| 135 | ext4_error(inode->i_sb, __func__, | 135 | ext4_error(inode->i_sb, |
| 136 | "IO error syncing inode, " | 136 | "IO error syncing inode, " |
| 137 | "inode=%lu, block=%llu", | 137 | "inode=%lu, block=%llu", |
| 138 | inode->i_ino, | 138 | inode->i_ino, |
diff --git a/fs/ext4/ext4_jbd2.h b/fs/ext4/ext4_jbd2.h index 05eca817d704..b79ad5126468 100644 --- a/fs/ext4/ext4_jbd2.h +++ b/fs/ext4/ext4_jbd2.h | |||
| @@ -304,4 +304,28 @@ static inline int ext4_should_writeback_data(struct inode *inode) | |||
| 304 | return 0; | 304 | return 0; |
| 305 | } | 305 | } |
| 306 | 306 | ||
| 307 | /* | ||
| 308 | * This function controls whether or not we should try to go down the | ||
| 309 | * dioread_nolock code paths, which makes it safe to avoid taking | ||
| 310 | * i_mutex for direct I/O reads. This only works for extent-based | ||
| 311 | * files, and it doesn't work for nobh or if data journaling is | ||
| 312 | * enabled, since the dioread_nolock code uses b_private to pass | ||
| 313 | * information back to the I/O completion handler, and this conflicts | ||
| 314 | * with the jbd's use of b_private. | ||
| 315 | */ | ||
| 316 | static inline int ext4_should_dioread_nolock(struct inode *inode) | ||
| 317 | { | ||
| 318 | if (!test_opt(inode->i_sb, DIOREAD_NOLOCK)) | ||
| 319 | return 0; | ||
| 320 | if (test_opt(inode->i_sb, NOBH)) | ||
| 321 | return 0; | ||
| 322 | if (!S_ISREG(inode->i_mode)) | ||
| 323 | return 0; | ||
| 324 | if (!(EXT4_I(inode)->i_flags & EXT4_EXTENTS_FL)) | ||
| 325 | return 0; | ||
| 326 | if (ext4_should_journal_data(inode)) | ||
| 327 | return 0; | ||
| 328 | return 1; | ||
| 329 | } | ||
| 330 | |||
| 307 | #endif /* _EXT4_JBD2_H */ | 331 | #endif /* _EXT4_JBD2_H */ |
diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c index 765a4826b118..94c8ee81f5e1 100644 --- a/fs/ext4/extents.c +++ b/fs/ext4/extents.c | |||
| @@ -195,8 +195,7 @@ static ext4_fsblk_t ext4_ext_find_goal(struct inode *inode, | |||
| 195 | if (S_ISREG(inode->i_mode)) | 195 | if (S_ISREG(inode->i_mode)) |
| 196 | block_group++; | 196 | block_group++; |
| 197 | } | 197 | } |
| 198 | bg_start = (block_group * EXT4_BLOCKS_PER_GROUP(inode->i_sb)) + | 198 | bg_start = ext4_group_first_block_no(inode->i_sb, block_group); |
| 199 | le32_to_cpu(EXT4_SB(inode->i_sb)->s_es->s_first_data_block); | ||
| 200 | last_block = ext4_blocks_count(EXT4_SB(inode->i_sb)->s_es) - 1; | 199 | last_block = ext4_blocks_count(EXT4_SB(inode->i_sb)->s_es) - 1; |
| 201 | 200 | ||
| 202 | /* | 201 | /* |
| @@ -440,7 +439,7 @@ static int __ext4_ext_check(const char *function, struct inode *inode, | |||
| 440 | return 0; | 439 | return 0; |
| 441 | 440 | ||
| 442 | corrupted: | 441 | corrupted: |
| 443 | ext4_error(inode->i_sb, function, | 442 | __ext4_error(inode->i_sb, function, |
| 444 | "bad header/extent in inode #%lu: %s - magic %x, " | 443 | "bad header/extent in inode #%lu: %s - magic %x, " |
| 445 | "entries %u, max %u(%u), depth %u(%u)", | 444 | "entries %u, max %u(%u), depth %u(%u)", |
| 446 | inode->i_ino, error_msg, le16_to_cpu(eh->eh_magic), | 445 | inode->i_ino, error_msg, le16_to_cpu(eh->eh_magic), |
| @@ -703,7 +702,12 @@ ext4_ext_find_extent(struct inode *inode, ext4_lblk_t block, | |||
| 703 | } | 702 | } |
| 704 | eh = ext_block_hdr(bh); | 703 | eh = ext_block_hdr(bh); |
| 705 | ppos++; | 704 | ppos++; |
| 706 | BUG_ON(ppos > depth); | 705 | if (unlikely(ppos > depth)) { |
| 706 | put_bh(bh); | ||
| 707 | EXT4_ERROR_INODE(inode, | ||
| 708 | "ppos %d > depth %d", ppos, depth); | ||
| 709 | goto err; | ||
| 710 | } | ||
| 707 | path[ppos].p_bh = bh; | 711 | path[ppos].p_bh = bh; |
| 708 | path[ppos].p_hdr = eh; | 712 | path[ppos].p_hdr = eh; |
| 709 | i--; | 713 | i--; |
| @@ -749,7 +753,12 @@ int ext4_ext_insert_index(handle_t *handle, struct inode *inode, | |||
| 749 | if (err) | 753 | if (err) |
| 750 | return err; | 754 | return err; |
| 751 | 755 | ||
| 752 | BUG_ON(logical == le32_to_cpu(curp->p_idx->ei_block)); | 756 | if (unlikely(logical == le32_to_cpu(curp->p_idx->ei_block))) { |
| 757 | EXT4_ERROR_INODE(inode, | ||
| 758 | "logical %d == ei_block %d!", | ||
| 759 | logical, le32_to_cpu(curp->p_idx->ei_block)); | ||
| 760 | return -EIO; | ||
| 761 | } | ||
| 753 | len = EXT_MAX_INDEX(curp->p_hdr) - curp->p_idx; | 762 | len = EXT_MAX_INDEX(curp->p_hdr) - curp->p_idx; |
| 754 | if (logical > le32_to_cpu(curp->p_idx->ei_block)) { | 763 | if (logical > le32_to_cpu(curp->p_idx->ei_block)) { |
| 755 | /* insert after */ | 764 | /* insert after */ |
| @@ -779,9 +788,17 @@ int ext4_ext_insert_index(handle_t *handle, struct inode *inode, | |||
| 779 | ext4_idx_store_pblock(ix, ptr); | 788 | ext4_idx_store_pblock(ix, ptr); |
| 780 | le16_add_cpu(&curp->p_hdr->eh_entries, 1); | 789 | le16_add_cpu(&curp->p_hdr->eh_entries, 1); |
| 781 | 790 | ||
| 782 | BUG_ON(le16_to_cpu(curp->p_hdr->eh_entries) | 791 | if (unlikely(le16_to_cpu(curp->p_hdr->eh_entries) |
| 783 | > le16_to_cpu(curp->p_hdr->eh_max)); | 792 | > le16_to_cpu(curp->p_hdr->eh_max))) { |
| 784 | BUG_ON(ix > EXT_LAST_INDEX(curp->p_hdr)); | 793 | EXT4_ERROR_INODE(inode, |
| 794 | "logical %d == ei_block %d!", | ||
| 795 | logical, le32_to_cpu(curp->p_idx->ei_block)); | ||
| 796 | return -EIO; | ||
| 797 | } | ||
| 798 | if (unlikely(ix > EXT_LAST_INDEX(curp->p_hdr))) { | ||
| 799 | EXT4_ERROR_INODE(inode, "ix > EXT_LAST_INDEX!"); | ||
| 800 | return -EIO; | ||
| 801 | } | ||
| 785 | 802 | ||
| 786 | err = ext4_ext_dirty(handle, inode, curp); | 803 | err = ext4_ext_dirty(handle, inode, curp); |
| 787 | ext4_std_error(inode->i_sb, err); | 804 | ext4_std_error(inode->i_sb, err); |
| @@ -819,7 +836,10 @@ static int ext4_ext_split(handle_t *handle, struct inode *inode, | |||
| 819 | 836 | ||
| 820 | /* if current leaf will be split, then we should use | 837 | /* if current leaf will be split, then we should use |
| 821 | * border from split point */ | 838 | * border from split point */ |
| 822 | BUG_ON(path[depth].p_ext > EXT_MAX_EXTENT(path[depth].p_hdr)); | 839 | if (unlikely(path[depth].p_ext > EXT_MAX_EXTENT(path[depth].p_hdr))) { |
| 840 | EXT4_ERROR_INODE(inode, "p_ext > EXT_MAX_EXTENT!"); | ||
| 841 | return -EIO; | ||
| 842 | } | ||
| 823 | if (path[depth].p_ext != EXT_MAX_EXTENT(path[depth].p_hdr)) { | 843 | if (path[depth].p_ext != EXT_MAX_EXTENT(path[depth].p_hdr)) { |
| 824 | border = path[depth].p_ext[1].ee_block; | 844 | border = path[depth].p_ext[1].ee_block; |
| 825 | ext_debug("leaf will be split." | 845 | ext_debug("leaf will be split." |
| @@ -860,7 +880,11 @@ static int ext4_ext_split(handle_t *handle, struct inode *inode, | |||
| 860 | 880 | ||
| 861 | /* initialize new leaf */ | 881 | /* initialize new leaf */ |
| 862 | newblock = ablocks[--a]; | 882 | newblock = ablocks[--a]; |
| 863 | BUG_ON(newblock == 0); | 883 | if (unlikely(newblock == 0)) { |
| 884 | EXT4_ERROR_INODE(inode, "newblock == 0!"); | ||
| 885 | err = -EIO; | ||
| 886 | goto cleanup; | ||
| 887 | } | ||
| 864 | bh = sb_getblk(inode->i_sb, newblock); | 888 | bh = sb_getblk(inode->i_sb, newblock); |
| 865 | if (!bh) { | 889 | if (!bh) { |
| 866 | err = -EIO; | 890 | err = -EIO; |
| @@ -880,7 +904,14 @@ static int ext4_ext_split(handle_t *handle, struct inode *inode, | |||
| 880 | ex = EXT_FIRST_EXTENT(neh); | 904 | ex = EXT_FIRST_EXTENT(neh); |
| 881 | 905 | ||
| 882 | /* move remainder of path[depth] to the new leaf */ | 906 | /* move remainder of path[depth] to the new leaf */ |
| 883 | BUG_ON(path[depth].p_hdr->eh_entries != path[depth].p_hdr->eh_max); | 907 | if (unlikely(path[depth].p_hdr->eh_entries != |
| 908 | path[depth].p_hdr->eh_max)) { | ||
| 909 | EXT4_ERROR_INODE(inode, "eh_entries %d != eh_max %d!", | ||
| 910 | path[depth].p_hdr->eh_entries, | ||
| 911 | path[depth].p_hdr->eh_max); | ||
| 912 | err = -EIO; | ||
| 913 | goto cleanup; | ||
| 914 | } | ||
| 884 | /* start copy from next extent */ | 915 | /* start copy from next extent */ |
| 885 | /* TODO: we could do it by single memmove */ | 916 | /* TODO: we could do it by single memmove */ |
| 886 | m = 0; | 917 | m = 0; |
| @@ -927,7 +958,11 @@ static int ext4_ext_split(handle_t *handle, struct inode *inode, | |||
| 927 | 958 | ||
| 928 | /* create intermediate indexes */ | 959 | /* create intermediate indexes */ |
| 929 | k = depth - at - 1; | 960 | k = depth - at - 1; |
| 930 | BUG_ON(k < 0); | 961 | if (unlikely(k < 0)) { |
| 962 | EXT4_ERROR_INODE(inode, "k %d < 0!", k); | ||
| 963 | err = -EIO; | ||
| 964 | goto cleanup; | ||
| 965 | } | ||
| 931 | if (k) | 966 | if (k) |
| 932 | ext_debug("create %d intermediate indices\n", k); | 967 | ext_debug("create %d intermediate indices\n", k); |
| 933 | /* insert new index into current index block */ | 968 | /* insert new index into current index block */ |
| @@ -964,8 +999,14 @@ static int ext4_ext_split(handle_t *handle, struct inode *inode, | |||
| 964 | 999 | ||
| 965 | ext_debug("cur 0x%p, last 0x%p\n", path[i].p_idx, | 1000 | ext_debug("cur 0x%p, last 0x%p\n", path[i].p_idx, |
| 966 | EXT_MAX_INDEX(path[i].p_hdr)); | 1001 | EXT_MAX_INDEX(path[i].p_hdr)); |
| 967 | BUG_ON(EXT_MAX_INDEX(path[i].p_hdr) != | 1002 | if (unlikely(EXT_MAX_INDEX(path[i].p_hdr) != |
| 968 | EXT_LAST_INDEX(path[i].p_hdr)); | 1003 | EXT_LAST_INDEX(path[i].p_hdr))) { |
| 1004 | EXT4_ERROR_INODE(inode, | ||
| 1005 | "EXT_MAX_INDEX != EXT_LAST_INDEX ee_block %d!", | ||
| 1006 | le32_to_cpu(path[i].p_ext->ee_block)); | ||
| 1007 | err = -EIO; | ||
| 1008 | goto cleanup; | ||
| 1009 | } | ||
| 969 | while (path[i].p_idx <= EXT_MAX_INDEX(path[i].p_hdr)) { | 1010 | while (path[i].p_idx <= EXT_MAX_INDEX(path[i].p_hdr)) { |
| 970 | ext_debug("%d: move %d:%llu in new index %llu\n", i, | 1011 | ext_debug("%d: move %d:%llu in new index %llu\n", i, |
| 971 | le32_to_cpu(path[i].p_idx->ei_block), | 1012 | le32_to_cpu(path[i].p_idx->ei_block), |
| @@ -1203,7 +1244,10 @@ ext4_ext_search_left(struct inode *inode, struct ext4_ext_path *path, | |||
| 1203 | struct ext4_extent *ex; | 1244 | struct ext4_extent *ex; |
| 1204 | int depth, ee_len; | 1245 | int depth, ee_len; |
| 1205 | 1246 | ||
| 1206 | BUG_ON(path == NULL); | 1247 | if (unlikely(path == NULL)) { |
| 1248 | EXT4_ERROR_INODE(inode, "path == NULL *logical %d!", *logical); | ||
| 1249 | return -EIO; | ||
| 1250 | } | ||
| 1207 | depth = path->p_depth; | 1251 | depth = path->p_depth; |
| 1208 | *phys = 0; | 1252 | *phys = 0; |
| 1209 | 1253 | ||
| @@ -1217,15 +1261,33 @@ ext4_ext_search_left(struct inode *inode, struct ext4_ext_path *path, | |||
| 1217 | ex = path[depth].p_ext; | 1261 | ex = path[depth].p_ext; |
| 1218 | ee_len = ext4_ext_get_actual_len(ex); | 1262 | ee_len = ext4_ext_get_actual_len(ex); |
| 1219 | if (*logical < le32_to_cpu(ex->ee_block)) { | 1263 | if (*logical < le32_to_cpu(ex->ee_block)) { |
| 1220 | BUG_ON(EXT_FIRST_EXTENT(path[depth].p_hdr) != ex); | 1264 | if (unlikely(EXT_FIRST_EXTENT(path[depth].p_hdr) != ex)) { |
| 1265 | EXT4_ERROR_INODE(inode, | ||
| 1266 | "EXT_FIRST_EXTENT != ex *logical %d ee_block %d!", | ||
| 1267 | *logical, le32_to_cpu(ex->ee_block)); | ||
| 1268 | return -EIO; | ||
| 1269 | } | ||
| 1221 | while (--depth >= 0) { | 1270 | while (--depth >= 0) { |
| 1222 | ix = path[depth].p_idx; | 1271 | ix = path[depth].p_idx; |
| 1223 | BUG_ON(ix != EXT_FIRST_INDEX(path[depth].p_hdr)); | 1272 | if (unlikely(ix != EXT_FIRST_INDEX(path[depth].p_hdr))) { |
| 1273 | EXT4_ERROR_INODE(inode, | ||
| 1274 | "ix (%d) != EXT_FIRST_INDEX (%d) (depth %d)!", | ||
| 1275 | ix != NULL ? ix->ei_block : 0, | ||
| 1276 | EXT_FIRST_INDEX(path[depth].p_hdr) != NULL ? | ||
| 1277 | EXT_FIRST_INDEX(path[depth].p_hdr)->ei_block : 0, | ||
| 1278 | depth); | ||
| 1279 | return -EIO; | ||
| 1280 | } | ||
| 1224 | } | 1281 | } |
| 1225 | return 0; | 1282 | return 0; |
| 1226 | } | 1283 | } |
| 1227 | 1284 | ||
| 1228 | BUG_ON(*logical < (le32_to_cpu(ex->ee_block) + ee_len)); | 1285 | if (unlikely(*logical < (le32_to_cpu(ex->ee_block) + ee_len))) { |
| 1286 | EXT4_ERROR_INODE(inode, | ||
| 1287 | "logical %d < ee_block %d + ee_len %d!", | ||
| 1288 | *logical, le32_to_cpu(ex->ee_block), ee_len); | ||
| 1289 | return -EIO; | ||
| 1290 | } | ||
| 1229 | 1291 | ||
| 1230 | *logical = le32_to_cpu(ex->ee_block) + ee_len - 1; | 1292 | *logical = le32_to_cpu(ex->ee_block) + ee_len - 1; |
| 1231 | *phys = ext_pblock(ex) + ee_len - 1; | 1293 | *phys = ext_pblock(ex) + ee_len - 1; |
| @@ -1251,7 +1313,10 @@ ext4_ext_search_right(struct inode *inode, struct ext4_ext_path *path, | |||
| 1251 | int depth; /* Note, NOT eh_depth; depth from top of tree */ | 1313 | int depth; /* Note, NOT eh_depth; depth from top of tree */ |
| 1252 | int ee_len; | 1314 | int ee_len; |
| 1253 | 1315 | ||
| 1254 | BUG_ON(path == NULL); | 1316 | if (unlikely(path == NULL)) { |
| 1317 | EXT4_ERROR_INODE(inode, "path == NULL *logical %d!", *logical); | ||
| 1318 | return -EIO; | ||
| 1319 | } | ||
| 1255 | depth = path->p_depth; | 1320 | depth = path->p_depth; |
| 1256 | *phys = 0; | 1321 | *phys = 0; |
| 1257 | 1322 | ||
| @@ -1265,17 +1330,32 @@ ext4_ext_search_right(struct inode *inode, struct ext4_ext_path *path, | |||
| 1265 | ex = path[depth].p_ext; | 1330 | ex = path[depth].p_ext; |
| 1266 | ee_len = ext4_ext_get_actual_len(ex); | 1331 | ee_len = ext4_ext_get_actual_len(ex); |
| 1267 | if (*logical < le32_to_cpu(ex->ee_block)) { | 1332 | if (*logical < le32_to_cpu(ex->ee_block)) { |
| 1268 | BUG_ON(EXT_FIRST_EXTENT(path[depth].p_hdr) != ex); | 1333 | if (unlikely(EXT_FIRST_EXTENT(path[depth].p_hdr) != ex)) { |
| 1334 | EXT4_ERROR_INODE(inode, | ||
| 1335 | "first_extent(path[%d].p_hdr) != ex", | ||
| 1336 | depth); | ||
| 1337 | return -EIO; | ||
| 1338 | } | ||
| 1269 | while (--depth >= 0) { | 1339 | while (--depth >= 0) { |
| 1270 | ix = path[depth].p_idx; | 1340 | ix = path[depth].p_idx; |
| 1271 | BUG_ON(ix != EXT_FIRST_INDEX(path[depth].p_hdr)); | 1341 | if (unlikely(ix != EXT_FIRST_INDEX(path[depth].p_hdr))) { |
| 1342 | EXT4_ERROR_INODE(inode, | ||
| 1343 | "ix != EXT_FIRST_INDEX *logical %d!", | ||
| 1344 | *logical); | ||
| 1345 | return -EIO; | ||
| 1346 | } | ||
| 1272 | } | 1347 | } |
| 1273 | *logical = le32_to_cpu(ex->ee_block); | 1348 | *logical = le32_to_cpu(ex->ee_block); |
| 1274 | *phys = ext_pblock(ex); | 1349 | *phys = ext_pblock(ex); |
| 1275 | return 0; | 1350 | return 0; |
| 1276 | } | 1351 | } |
| 1277 | 1352 | ||
| 1278 | BUG_ON(*logical < (le32_to_cpu(ex->ee_block) + ee_len)); | 1353 | if (unlikely(*logical < (le32_to_cpu(ex->ee_block) + ee_len))) { |
| 1354 | EXT4_ERROR_INODE(inode, | ||
| 1355 | "logical %d < ee_block %d + ee_len %d!", | ||
| 1356 | *logical, le32_to_cpu(ex->ee_block), ee_len); | ||
| 1357 | return -EIO; | ||
| 1358 | } | ||
| 1279 | 1359 | ||
| 1280 | if (ex != EXT_LAST_EXTENT(path[depth].p_hdr)) { | 1360 | if (ex != EXT_LAST_EXTENT(path[depth].p_hdr)) { |
| 1281 | /* next allocated block in this leaf */ | 1361 | /* next allocated block in this leaf */ |
| @@ -1414,8 +1494,12 @@ static int ext4_ext_correct_indexes(handle_t *handle, struct inode *inode, | |||
| 1414 | 1494 | ||
| 1415 | eh = path[depth].p_hdr; | 1495 | eh = path[depth].p_hdr; |
| 1416 | ex = path[depth].p_ext; | 1496 | ex = path[depth].p_ext; |
| 1417 | BUG_ON(ex == NULL); | 1497 | |
| 1418 | BUG_ON(eh == NULL); | 1498 | if (unlikely(ex == NULL || eh == NULL)) { |
| 1499 | EXT4_ERROR_INODE(inode, | ||
| 1500 | "ex %p == NULL or eh %p == NULL", ex, eh); | ||
| 1501 | return -EIO; | ||
| 1502 | } | ||
| 1419 | 1503 | ||
| 1420 | if (depth == 0) { | 1504 | if (depth == 0) { |
| 1421 | /* there is no tree at all */ | 1505 | /* there is no tree at all */ |
| @@ -1538,8 +1622,9 @@ int ext4_ext_try_to_merge(struct inode *inode, | |||
| 1538 | merge_done = 1; | 1622 | merge_done = 1; |
| 1539 | WARN_ON(eh->eh_entries == 0); | 1623 | WARN_ON(eh->eh_entries == 0); |
| 1540 | if (!eh->eh_entries) | 1624 | if (!eh->eh_entries) |
| 1541 | ext4_error(inode->i_sb, "ext4_ext_try_to_merge", | 1625 | ext4_error(inode->i_sb, |
| 1542 | "inode#%lu, eh->eh_entries = 0!", inode->i_ino); | 1626 | "inode#%lu, eh->eh_entries = 0!", |
| 1627 | inode->i_ino); | ||
| 1543 | } | 1628 | } |
| 1544 | 1629 | ||
| 1545 | return merge_done; | 1630 | return merge_done; |
| @@ -1612,13 +1697,19 @@ int ext4_ext_insert_extent(handle_t *handle, struct inode *inode, | |||
| 1612 | ext4_lblk_t next; | 1697 | ext4_lblk_t next; |
| 1613 | unsigned uninitialized = 0; | 1698 | unsigned uninitialized = 0; |
| 1614 | 1699 | ||
| 1615 | BUG_ON(ext4_ext_get_actual_len(newext) == 0); | 1700 | if (unlikely(ext4_ext_get_actual_len(newext) == 0)) { |
| 1701 | EXT4_ERROR_INODE(inode, "ext4_ext_get_actual_len(newext) == 0"); | ||
| 1702 | return -EIO; | ||
| 1703 | } | ||
| 1616 | depth = ext_depth(inode); | 1704 | depth = ext_depth(inode); |
| 1617 | ex = path[depth].p_ext; | 1705 | ex = path[depth].p_ext; |
| 1618 | BUG_ON(path[depth].p_hdr == NULL); | 1706 | if (unlikely(path[depth].p_hdr == NULL)) { |
| 1707 | EXT4_ERROR_INODE(inode, "path[%d].p_hdr == NULL", depth); | ||
| 1708 | return -EIO; | ||
| 1709 | } | ||
| 1619 | 1710 | ||
| 1620 | /* try to insert block into found extent and return */ | 1711 | /* try to insert block into found extent and return */ |
| 1621 | if (ex && (flag != EXT4_GET_BLOCKS_DIO_CREATE_EXT) | 1712 | if (ex && !(flag & EXT4_GET_BLOCKS_PRE_IO) |
| 1622 | && ext4_can_extents_be_merged(inode, ex, newext)) { | 1713 | && ext4_can_extents_be_merged(inode, ex, newext)) { |
| 1623 | ext_debug("append [%d]%d block to %d:[%d]%d (from %llu)\n", | 1714 | ext_debug("append [%d]%d block to %d:[%d]%d (from %llu)\n", |
| 1624 | ext4_ext_is_uninitialized(newext), | 1715 | ext4_ext_is_uninitialized(newext), |
| @@ -1739,7 +1830,7 @@ has_space: | |||
| 1739 | 1830 | ||
| 1740 | merge: | 1831 | merge: |
| 1741 | /* try to merge extents to the right */ | 1832 | /* try to merge extents to the right */ |
| 1742 | if (flag != EXT4_GET_BLOCKS_DIO_CREATE_EXT) | 1833 | if (!(flag & EXT4_GET_BLOCKS_PRE_IO)) |
| 1743 | ext4_ext_try_to_merge(inode, path, nearex); | 1834 | ext4_ext_try_to_merge(inode, path, nearex); |
| 1744 | 1835 | ||
| 1745 | /* try to merge extents to the left */ | 1836 | /* try to merge extents to the left */ |
| @@ -1787,7 +1878,11 @@ int ext4_ext_walk_space(struct inode *inode, ext4_lblk_t block, | |||
| 1787 | } | 1878 | } |
| 1788 | 1879 | ||
| 1789 | depth = ext_depth(inode); | 1880 | depth = ext_depth(inode); |
| 1790 | BUG_ON(path[depth].p_hdr == NULL); | 1881 | if (unlikely(path[depth].p_hdr == NULL)) { |
| 1882 | EXT4_ERROR_INODE(inode, "path[%d].p_hdr == NULL", depth); | ||
| 1883 | err = -EIO; | ||
| 1884 | break; | ||
| 1885 | } | ||
| 1791 | ex = path[depth].p_ext; | 1886 | ex = path[depth].p_ext; |
| 1792 | next = ext4_ext_next_allocated_block(path); | 1887 | next = ext4_ext_next_allocated_block(path); |
| 1793 | 1888 | ||
| @@ -1838,7 +1933,11 @@ int ext4_ext_walk_space(struct inode *inode, ext4_lblk_t block, | |||
| 1838 | cbex.ec_type = EXT4_EXT_CACHE_EXTENT; | 1933 | cbex.ec_type = EXT4_EXT_CACHE_EXTENT; |
| 1839 | } | 1934 | } |
| 1840 | 1935 | ||
| 1841 | BUG_ON(cbex.ec_len == 0); | 1936 | if (unlikely(cbex.ec_len == 0)) { |
| 1937 | EXT4_ERROR_INODE(inode, "cbex.ec_len == 0"); | ||
| 1938 | err = -EIO; | ||
| 1939 | break; | ||
| 1940 | } | ||
| 1842 | err = func(inode, path, &cbex, ex, cbdata); | 1941 | err = func(inode, path, &cbex, ex, cbdata); |
| 1843 | ext4_ext_drop_refs(path); | 1942 | ext4_ext_drop_refs(path); |
| 1844 | 1943 | ||
| @@ -1952,7 +2051,7 @@ ext4_ext_in_cache(struct inode *inode, ext4_lblk_t block, | |||
| 1952 | 2051 | ||
| 1953 | BUG_ON(cex->ec_type != EXT4_EXT_CACHE_GAP && | 2052 | BUG_ON(cex->ec_type != EXT4_EXT_CACHE_GAP && |
| 1954 | cex->ec_type != EXT4_EXT_CACHE_EXTENT); | 2053 | cex->ec_type != EXT4_EXT_CACHE_EXTENT); |
| 1955 | if (block >= cex->ec_block && block < cex->ec_block + cex->ec_len) { | 2054 | if (in_range(block, cex->ec_block, cex->ec_len)) { |
| 1956 | ex->ee_block = cpu_to_le32(cex->ec_block); | 2055 | ex->ee_block = cpu_to_le32(cex->ec_block); |
| 1957 | ext4_ext_store_pblock(ex, cex->ec_start); | 2056 | ext4_ext_store_pblock(ex, cex->ec_start); |
| 1958 | ex->ee_len = cpu_to_le16(cex->ec_len); | 2057 | ex->ee_len = cpu_to_le16(cex->ec_len); |
| @@ -1981,7 +2080,10 @@ static int ext4_ext_rm_idx(handle_t *handle, struct inode *inode, | |||
| 1981 | /* free index block */ | 2080 | /* free index block */ |
| 1982 | path--; | 2081 | path--; |
| 1983 | leaf = idx_pblock(path->p_idx); | 2082 | leaf = idx_pblock(path->p_idx); |
| 1984 | BUG_ON(path->p_hdr->eh_entries == 0); | 2083 | if (unlikely(path->p_hdr->eh_entries == 0)) { |
| 2084 | EXT4_ERROR_INODE(inode, "path->p_hdr->eh_entries == 0"); | ||
| 2085 | return -EIO; | ||
| 2086 | } | ||
| 1985 | err = ext4_ext_get_access(handle, inode, path); | 2087 | err = ext4_ext_get_access(handle, inode, path); |
| 1986 | if (err) | 2088 | if (err) |
| 1987 | return err; | 2089 | return err; |
| @@ -2119,8 +2221,10 @@ ext4_ext_rm_leaf(handle_t *handle, struct inode *inode, | |||
| 2119 | if (!path[depth].p_hdr) | 2221 | if (!path[depth].p_hdr) |
| 2120 | path[depth].p_hdr = ext_block_hdr(path[depth].p_bh); | 2222 | path[depth].p_hdr = ext_block_hdr(path[depth].p_bh); |
| 2121 | eh = path[depth].p_hdr; | 2223 | eh = path[depth].p_hdr; |
| 2122 | BUG_ON(eh == NULL); | 2224 | if (unlikely(path[depth].p_hdr == NULL)) { |
| 2123 | 2225 | EXT4_ERROR_INODE(inode, "path[%d].p_hdr == NULL", depth); | |
| 2226 | return -EIO; | ||
| 2227 | } | ||
| 2124 | /* find where to start removing */ | 2228 | /* find where to start removing */ |
| 2125 | ex = EXT_LAST_EXTENT(eh); | 2229 | ex = EXT_LAST_EXTENT(eh); |
| 2126 | 2230 | ||
| @@ -2983,7 +3087,7 @@ fix_extent_len: | |||
| 2983 | ext4_ext_dirty(handle, inode, path + depth); | 3087 | ext4_ext_dirty(handle, inode, path + depth); |
| 2984 | return err; | 3088 | return err; |
| 2985 | } | 3089 | } |
| 2986 | static int ext4_convert_unwritten_extents_dio(handle_t *handle, | 3090 | static int ext4_convert_unwritten_extents_endio(handle_t *handle, |
| 2987 | struct inode *inode, | 3091 | struct inode *inode, |
| 2988 | struct ext4_ext_path *path) | 3092 | struct ext4_ext_path *path) |
| 2989 | { | 3093 | { |
| @@ -3063,8 +3167,8 @@ ext4_ext_handle_uninitialized_extents(handle_t *handle, struct inode *inode, | |||
| 3063 | flags, allocated); | 3167 | flags, allocated); |
| 3064 | ext4_ext_show_leaf(inode, path); | 3168 | ext4_ext_show_leaf(inode, path); |
| 3065 | 3169 | ||
| 3066 | /* DIO get_block() before submit the IO, split the extent */ | 3170 | /* get_block() before submit the IO, split the extent */ |
| 3067 | if (flags == EXT4_GET_BLOCKS_DIO_CREATE_EXT) { | 3171 | if ((flags & EXT4_GET_BLOCKS_PRE_IO)) { |
| 3068 | ret = ext4_split_unwritten_extents(handle, | 3172 | ret = ext4_split_unwritten_extents(handle, |
| 3069 | inode, path, iblock, | 3173 | inode, path, iblock, |
| 3070 | max_blocks, flags); | 3174 | max_blocks, flags); |
| @@ -3074,14 +3178,16 @@ ext4_ext_handle_uninitialized_extents(handle_t *handle, struct inode *inode, | |||
| 3074 | * completed | 3178 | * completed |
| 3075 | */ | 3179 | */ |
| 3076 | if (io) | 3180 | if (io) |
| 3077 | io->flag = DIO_AIO_UNWRITTEN; | 3181 | io->flag = EXT4_IO_UNWRITTEN; |
| 3078 | else | 3182 | else |
| 3079 | EXT4_I(inode)->i_state |= EXT4_STATE_DIO_UNWRITTEN; | 3183 | ext4_set_inode_state(inode, EXT4_STATE_DIO_UNWRITTEN); |
| 3184 | if (ext4_should_dioread_nolock(inode)) | ||
| 3185 | set_buffer_uninit(bh_result); | ||
| 3080 | goto out; | 3186 | goto out; |
| 3081 | } | 3187 | } |
| 3082 | /* async DIO end_io complete, convert the filled extent to written */ | 3188 | /* IO end_io complete, convert the filled extent to written */ |
| 3083 | if (flags == EXT4_GET_BLOCKS_DIO_CONVERT_EXT) { | 3189 | if ((flags & EXT4_GET_BLOCKS_CONVERT)) { |
| 3084 | ret = ext4_convert_unwritten_extents_dio(handle, inode, | 3190 | ret = ext4_convert_unwritten_extents_endio(handle, inode, |
| 3085 | path); | 3191 | path); |
| 3086 | if (ret >= 0) | 3192 | if (ret >= 0) |
| 3087 | ext4_update_inode_fsync_trans(handle, inode, 1); | 3193 | ext4_update_inode_fsync_trans(handle, inode, 1); |
| @@ -3185,7 +3291,7 @@ int ext4_ext_get_blocks(handle_t *handle, struct inode *inode, | |||
| 3185 | { | 3291 | { |
| 3186 | struct ext4_ext_path *path = NULL; | 3292 | struct ext4_ext_path *path = NULL; |
| 3187 | struct ext4_extent_header *eh; | 3293 | struct ext4_extent_header *eh; |
| 3188 | struct ext4_extent newex, *ex; | 3294 | struct ext4_extent newex, *ex, *last_ex; |
| 3189 | ext4_fsblk_t newblock; | 3295 | ext4_fsblk_t newblock; |
| 3190 | int err = 0, depth, ret, cache_type; | 3296 | int err = 0, depth, ret, cache_type; |
| 3191 | unsigned int allocated = 0; | 3297 | unsigned int allocated = 0; |
| @@ -3237,10 +3343,10 @@ int ext4_ext_get_blocks(handle_t *handle, struct inode *inode, | |||
| 3237 | * this situation is possible, though, _during_ tree modification; | 3343 | * this situation is possible, though, _during_ tree modification; |
| 3238 | * this is why assert can't be put in ext4_ext_find_extent() | 3344 | * this is why assert can't be put in ext4_ext_find_extent() |
| 3239 | */ | 3345 | */ |
| 3240 | if (path[depth].p_ext == NULL && depth != 0) { | 3346 | if (unlikely(path[depth].p_ext == NULL && depth != 0)) { |
| 3241 | ext4_error(inode->i_sb, __func__, "bad extent address " | 3347 | EXT4_ERROR_INODE(inode, "bad extent address " |
| 3242 | "inode: %lu, iblock: %d, depth: %d", | 3348 | "iblock: %d, depth: %d pblock %lld", |
| 3243 | inode->i_ino, iblock, depth); | 3349 | iblock, depth, path[depth].p_block); |
| 3244 | err = -EIO; | 3350 | err = -EIO; |
| 3245 | goto out2; | 3351 | goto out2; |
| 3246 | } | 3352 | } |
| @@ -3258,7 +3364,7 @@ int ext4_ext_get_blocks(handle_t *handle, struct inode *inode, | |||
| 3258 | */ | 3364 | */ |
| 3259 | ee_len = ext4_ext_get_actual_len(ex); | 3365 | ee_len = ext4_ext_get_actual_len(ex); |
| 3260 | /* if found extent covers block, simply return it */ | 3366 | /* if found extent covers block, simply return it */ |
| 3261 | if (iblock >= ee_block && iblock < ee_block + ee_len) { | 3367 | if (in_range(iblock, ee_block, ee_len)) { |
| 3262 | newblock = iblock - ee_block + ee_start; | 3368 | newblock = iblock - ee_block + ee_start; |
| 3263 | /* number of remaining blocks in the extent */ | 3369 | /* number of remaining blocks in the extent */ |
| 3264 | allocated = ee_len - (iblock - ee_block); | 3370 | allocated = ee_len - (iblock - ee_block); |
| @@ -3350,21 +3456,35 @@ int ext4_ext_get_blocks(handle_t *handle, struct inode *inode, | |||
| 3350 | if (flags & EXT4_GET_BLOCKS_UNINIT_EXT){ | 3456 | if (flags & EXT4_GET_BLOCKS_UNINIT_EXT){ |
| 3351 | ext4_ext_mark_uninitialized(&newex); | 3457 | ext4_ext_mark_uninitialized(&newex); |
| 3352 | /* | 3458 | /* |
| 3353 | * io_end structure was created for every async | 3459 | * io_end structure was created for every IO write to an |
| 3354 | * direct IO write to the middle of the file. | 3460 | * uninitialized extent. To avoid unecessary conversion, |
| 3355 | * To avoid unecessary convertion for every aio dio rewrite | 3461 | * here we flag the IO that really needs the conversion. |
| 3356 | * to the mid of file, here we flag the IO that is really | ||
| 3357 | * need the convertion. | ||
| 3358 | * For non asycn direct IO case, flag the inode state | 3462 | * For non asycn direct IO case, flag the inode state |
| 3359 | * that we need to perform convertion when IO is done. | 3463 | * that we need to perform convertion when IO is done. |
| 3360 | */ | 3464 | */ |
| 3361 | if (flags == EXT4_GET_BLOCKS_DIO_CREATE_EXT) { | 3465 | if ((flags & EXT4_GET_BLOCKS_PRE_IO)) { |
| 3362 | if (io) | 3466 | if (io) |
| 3363 | io->flag = DIO_AIO_UNWRITTEN; | 3467 | io->flag = EXT4_IO_UNWRITTEN; |
| 3364 | else | 3468 | else |
| 3365 | EXT4_I(inode)->i_state |= | 3469 | ext4_set_inode_state(inode, |
| 3366 | EXT4_STATE_DIO_UNWRITTEN;; | 3470 | EXT4_STATE_DIO_UNWRITTEN); |
| 3471 | } | ||
| 3472 | if (ext4_should_dioread_nolock(inode)) | ||
| 3473 | set_buffer_uninit(bh_result); | ||
| 3474 | } | ||
| 3475 | |||
| 3476 | if (unlikely(EXT4_I(inode)->i_flags & EXT4_EOFBLOCKS_FL)) { | ||
| 3477 | if (unlikely(!eh->eh_entries)) { | ||
| 3478 | EXT4_ERROR_INODE(inode, | ||
| 3479 | "eh->eh_entries == 0 ee_block %d", | ||
| 3480 | ex->ee_block); | ||
| 3481 | err = -EIO; | ||
| 3482 | goto out2; | ||
| 3367 | } | 3483 | } |
| 3484 | last_ex = EXT_LAST_EXTENT(eh); | ||
| 3485 | if (iblock + ar.len > le32_to_cpu(last_ex->ee_block) | ||
| 3486 | + ext4_ext_get_actual_len(last_ex)) | ||
| 3487 | EXT4_I(inode)->i_flags &= ~EXT4_EOFBLOCKS_FL; | ||
| 3368 | } | 3488 | } |
| 3369 | err = ext4_ext_insert_extent(handle, inode, path, &newex, flags); | 3489 | err = ext4_ext_insert_extent(handle, inode, path, &newex, flags); |
| 3370 | if (err) { | 3490 | if (err) { |
| @@ -3499,6 +3619,13 @@ static void ext4_falloc_update_inode(struct inode *inode, | |||
| 3499 | i_size_write(inode, new_size); | 3619 | i_size_write(inode, new_size); |
| 3500 | if (new_size > EXT4_I(inode)->i_disksize) | 3620 | if (new_size > EXT4_I(inode)->i_disksize) |
| 3501 | ext4_update_i_disksize(inode, new_size); | 3621 | ext4_update_i_disksize(inode, new_size); |
| 3622 | } else { | ||
| 3623 | /* | ||
| 3624 | * Mark that we allocate beyond EOF so the subsequent truncate | ||
| 3625 | * can proceed even if the new size is the same as i_size. | ||
| 3626 | */ | ||
| 3627 | if (new_size > i_size_read(inode)) | ||
| 3628 | EXT4_I(inode)->i_flags |= EXT4_EOFBLOCKS_FL; | ||
| 3502 | } | 3629 | } |
| 3503 | 3630 | ||
| 3504 | } | 3631 | } |
| @@ -3603,7 +3730,7 @@ retry: | |||
| 3603 | * Returns 0 on success. | 3730 | * Returns 0 on success. |
| 3604 | */ | 3731 | */ |
| 3605 | int ext4_convert_unwritten_extents(struct inode *inode, loff_t offset, | 3732 | int ext4_convert_unwritten_extents(struct inode *inode, loff_t offset, |
| 3606 | loff_t len) | 3733 | ssize_t len) |
| 3607 | { | 3734 | { |
| 3608 | handle_t *handle; | 3735 | handle_t *handle; |
| 3609 | ext4_lblk_t block; | 3736 | ext4_lblk_t block; |
| @@ -3635,7 +3762,7 @@ int ext4_convert_unwritten_extents(struct inode *inode, loff_t offset, | |||
| 3635 | map_bh.b_state = 0; | 3762 | map_bh.b_state = 0; |
| 3636 | ret = ext4_get_blocks(handle, inode, block, | 3763 | ret = ext4_get_blocks(handle, inode, block, |
| 3637 | max_blocks, &map_bh, | 3764 | max_blocks, &map_bh, |
| 3638 | EXT4_GET_BLOCKS_DIO_CONVERT_EXT); | 3765 | EXT4_GET_BLOCKS_IO_CONVERT_EXT); |
| 3639 | if (ret <= 0) { | 3766 | if (ret <= 0) { |
| 3640 | WARN_ON(ret <= 0); | 3767 | WARN_ON(ret <= 0); |
| 3641 | printk(KERN_ERR "%s: ext4_ext_get_blocks " | 3768 | printk(KERN_ERR "%s: ext4_ext_get_blocks " |
| @@ -3739,7 +3866,7 @@ static int ext4_xattr_fiemap(struct inode *inode, | |||
| 3739 | int error = 0; | 3866 | int error = 0; |
| 3740 | 3867 | ||
| 3741 | /* in-inode? */ | 3868 | /* in-inode? */ |
| 3742 | if (EXT4_I(inode)->i_state & EXT4_STATE_XATTR) { | 3869 | if (ext4_test_inode_state(inode, EXT4_STATE_XATTR)) { |
| 3743 | struct ext4_iloc iloc; | 3870 | struct ext4_iloc iloc; |
| 3744 | int offset; /* offset of xattr in inode */ | 3871 | int offset; /* offset of xattr in inode */ |
| 3745 | 3872 | ||
| @@ -3767,7 +3894,6 @@ int ext4_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo, | |||
| 3767 | __u64 start, __u64 len) | 3894 | __u64 start, __u64 len) |
| 3768 | { | 3895 | { |
| 3769 | ext4_lblk_t start_blk; | 3896 | ext4_lblk_t start_blk; |
| 3770 | ext4_lblk_t len_blks; | ||
| 3771 | int error = 0; | 3897 | int error = 0; |
| 3772 | 3898 | ||
| 3773 | /* fallback to generic here if not in extents fmt */ | 3899 | /* fallback to generic here if not in extents fmt */ |
| @@ -3781,8 +3907,14 @@ int ext4_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo, | |||
| 3781 | if (fieinfo->fi_flags & FIEMAP_FLAG_XATTR) { | 3907 | if (fieinfo->fi_flags & FIEMAP_FLAG_XATTR) { |
| 3782 | error = ext4_xattr_fiemap(inode, fieinfo); | 3908 | error = ext4_xattr_fiemap(inode, fieinfo); |
| 3783 | } else { | 3909 | } else { |
| 3910 | ext4_lblk_t len_blks; | ||
| 3911 | __u64 last_blk; | ||
| 3912 | |||
| 3784 | start_blk = start >> inode->i_sb->s_blocksize_bits; | 3913 | start_blk = start >> inode->i_sb->s_blocksize_bits; |
| 3785 | len_blks = len >> inode->i_sb->s_blocksize_bits; | 3914 | last_blk = (start + len - 1) >> inode->i_sb->s_blocksize_bits; |
| 3915 | if (last_blk >= EXT_MAX_BLOCK) | ||
| 3916 | last_blk = EXT_MAX_BLOCK-1; | ||
| 3917 | len_blks = ((ext4_lblk_t) last_blk) - start_blk + 1; | ||
| 3786 | 3918 | ||
| 3787 | /* | 3919 | /* |
| 3788 | * Walk the extent tree gathering extent information. | 3920 | * Walk the extent tree gathering extent information. |
diff --git a/fs/ext4/file.c b/fs/ext4/file.c index 56eee3d796c2..d0776e410f34 100644 --- a/fs/ext4/file.c +++ b/fs/ext4/file.c | |||
| @@ -23,6 +23,7 @@ | |||
| 23 | #include <linux/jbd2.h> | 23 | #include <linux/jbd2.h> |
| 24 | #include <linux/mount.h> | 24 | #include <linux/mount.h> |
| 25 | #include <linux/path.h> | 25 | #include <linux/path.h> |
| 26 | #include <linux/quotaops.h> | ||
| 26 | #include "ext4.h" | 27 | #include "ext4.h" |
| 27 | #include "ext4_jbd2.h" | 28 | #include "ext4_jbd2.h" |
| 28 | #include "xattr.h" | 29 | #include "xattr.h" |
| @@ -35,9 +36,9 @@ | |||
| 35 | */ | 36 | */ |
| 36 | static int ext4_release_file(struct inode *inode, struct file *filp) | 37 | static int ext4_release_file(struct inode *inode, struct file *filp) |
| 37 | { | 38 | { |
| 38 | if (EXT4_I(inode)->i_state & EXT4_STATE_DA_ALLOC_CLOSE) { | 39 | if (ext4_test_inode_state(inode, EXT4_STATE_DA_ALLOC_CLOSE)) { |
| 39 | ext4_alloc_da_blocks(inode); | 40 | ext4_alloc_da_blocks(inode); |
| 40 | EXT4_I(inode)->i_state &= ~EXT4_STATE_DA_ALLOC_CLOSE; | 41 | ext4_clear_inode_state(inode, EXT4_STATE_DA_ALLOC_CLOSE); |
| 41 | } | 42 | } |
| 42 | /* if we are the last writer on the inode, drop the block reservation */ | 43 | /* if we are the last writer on the inode, drop the block reservation */ |
| 43 | if ((filp->f_mode & FMODE_WRITE) && | 44 | if ((filp->f_mode & FMODE_WRITE) && |
| @@ -125,7 +126,7 @@ static int ext4_file_open(struct inode * inode, struct file * filp) | |||
| 125 | sb->s_dirt = 1; | 126 | sb->s_dirt = 1; |
| 126 | } | 127 | } |
| 127 | } | 128 | } |
| 128 | return generic_file_open(inode, filp); | 129 | return dquot_file_open(inode, filp); |
| 129 | } | 130 | } |
| 130 | 131 | ||
| 131 | const struct file_operations ext4_file_operations = { | 132 | const struct file_operations ext4_file_operations = { |
diff --git a/fs/ext4/fsync.c b/fs/ext4/fsync.c index 98bd140aad01..0d0c3239c1cd 100644 --- a/fs/ext4/fsync.c +++ b/fs/ext4/fsync.c | |||
| @@ -63,7 +63,7 @@ int ext4_sync_file(struct file *file, struct dentry *dentry, int datasync) | |||
| 63 | if (inode->i_sb->s_flags & MS_RDONLY) | 63 | if (inode->i_sb->s_flags & MS_RDONLY) |
| 64 | return 0; | 64 | return 0; |
| 65 | 65 | ||
| 66 | ret = flush_aio_dio_completed_IO(inode); | 66 | ret = flush_completed_IO(inode); |
| 67 | if (ret < 0) | 67 | if (ret < 0) |
| 68 | return ret; | 68 | return ret; |
| 69 | 69 | ||
diff --git a/fs/ext4/ialloc.c b/fs/ext4/ialloc.c index f3624ead4f6c..361c0b9962a8 100644 --- a/fs/ext4/ialloc.c +++ b/fs/ext4/ialloc.c | |||
| @@ -76,8 +76,7 @@ unsigned ext4_init_inode_bitmap(struct super_block *sb, struct buffer_head *bh, | |||
| 76 | /* If checksum is bad mark all blocks and inodes use to prevent | 76 | /* If checksum is bad mark all blocks and inodes use to prevent |
| 77 | * allocation, essentially implementing a per-group read-only flag. */ | 77 | * allocation, essentially implementing a per-group read-only flag. */ |
| 78 | if (!ext4_group_desc_csum_verify(sbi, block_group, gdp)) { | 78 | if (!ext4_group_desc_csum_verify(sbi, block_group, gdp)) { |
| 79 | ext4_error(sb, __func__, "Checksum bad for group %u", | 79 | ext4_error(sb, "Checksum bad for group %u", block_group); |
| 80 | block_group); | ||
| 81 | ext4_free_blks_set(sb, gdp, 0); | 80 | ext4_free_blks_set(sb, gdp, 0); |
| 82 | ext4_free_inodes_set(sb, gdp, 0); | 81 | ext4_free_inodes_set(sb, gdp, 0); |
| 83 | ext4_itable_unused_set(sb, gdp, 0); | 82 | ext4_itable_unused_set(sb, gdp, 0); |
| @@ -111,8 +110,7 @@ ext4_read_inode_bitmap(struct super_block *sb, ext4_group_t block_group) | |||
| 111 | bitmap_blk = ext4_inode_bitmap(sb, desc); | 110 | bitmap_blk = ext4_inode_bitmap(sb, desc); |
| 112 | bh = sb_getblk(sb, bitmap_blk); | 111 | bh = sb_getblk(sb, bitmap_blk); |
| 113 | if (unlikely(!bh)) { | 112 | if (unlikely(!bh)) { |
| 114 | ext4_error(sb, __func__, | 113 | ext4_error(sb, "Cannot read inode bitmap - " |
| 115 | "Cannot read inode bitmap - " | ||
| 116 | "block_group = %u, inode_bitmap = %llu", | 114 | "block_group = %u, inode_bitmap = %llu", |
| 117 | block_group, bitmap_blk); | 115 | block_group, bitmap_blk); |
| 118 | return NULL; | 116 | return NULL; |
| @@ -153,8 +151,7 @@ ext4_read_inode_bitmap(struct super_block *sb, ext4_group_t block_group) | |||
| 153 | set_bitmap_uptodate(bh); | 151 | set_bitmap_uptodate(bh); |
| 154 | if (bh_submit_read(bh) < 0) { | 152 | if (bh_submit_read(bh) < 0) { |
| 155 | put_bh(bh); | 153 | put_bh(bh); |
| 156 | ext4_error(sb, __func__, | 154 | ext4_error(sb, "Cannot read inode bitmap - " |
| 157 | "Cannot read inode bitmap - " | ||
| 158 | "block_group = %u, inode_bitmap = %llu", | 155 | "block_group = %u, inode_bitmap = %llu", |
| 159 | block_group, bitmap_blk); | 156 | block_group, bitmap_blk); |
| 160 | return NULL; | 157 | return NULL; |
| @@ -217,10 +214,10 @@ void ext4_free_inode(handle_t *handle, struct inode *inode) | |||
| 217 | * Note: we must free any quota before locking the superblock, | 214 | * Note: we must free any quota before locking the superblock, |
| 218 | * as writing the quota to disk may need the lock as well. | 215 | * as writing the quota to disk may need the lock as well. |
| 219 | */ | 216 | */ |
| 220 | vfs_dq_init(inode); | 217 | dquot_initialize(inode); |
| 221 | ext4_xattr_delete_inode(handle, inode); | 218 | ext4_xattr_delete_inode(handle, inode); |
| 222 | vfs_dq_free_inode(inode); | 219 | dquot_free_inode(inode); |
| 223 | vfs_dq_drop(inode); | 220 | dquot_drop(inode); |
| 224 | 221 | ||
| 225 | is_directory = S_ISDIR(inode->i_mode); | 222 | is_directory = S_ISDIR(inode->i_mode); |
| 226 | 223 | ||
| @@ -229,8 +226,7 @@ void ext4_free_inode(handle_t *handle, struct inode *inode) | |||
| 229 | 226 | ||
| 230 | es = EXT4_SB(sb)->s_es; | 227 | es = EXT4_SB(sb)->s_es; |
| 231 | if (ino < EXT4_FIRST_INO(sb) || ino > le32_to_cpu(es->s_inodes_count)) { | 228 | if (ino < EXT4_FIRST_INO(sb) || ino > le32_to_cpu(es->s_inodes_count)) { |
| 232 | ext4_error(sb, "ext4_free_inode", | 229 | ext4_error(sb, "reserved or nonexistent inode %lu", ino); |
| 233 | "reserved or nonexistent inode %lu", ino); | ||
| 234 | goto error_return; | 230 | goto error_return; |
| 235 | } | 231 | } |
| 236 | block_group = (ino - 1) / EXT4_INODES_PER_GROUP(sb); | 232 | block_group = (ino - 1) / EXT4_INODES_PER_GROUP(sb); |
| @@ -248,8 +244,7 @@ void ext4_free_inode(handle_t *handle, struct inode *inode) | |||
| 248 | cleared = ext4_clear_bit_atomic(ext4_group_lock_ptr(sb, block_group), | 244 | cleared = ext4_clear_bit_atomic(ext4_group_lock_ptr(sb, block_group), |
| 249 | bit, bitmap_bh->b_data); | 245 | bit, bitmap_bh->b_data); |
| 250 | if (!cleared) | 246 | if (!cleared) |
| 251 | ext4_error(sb, "ext4_free_inode", | 247 | ext4_error(sb, "bit already cleared for inode %lu", ino); |
| 252 | "bit already cleared for inode %lu", ino); | ||
| 253 | else { | 248 | else { |
| 254 | gdp = ext4_get_group_desc(sb, block_group, &bh2); | 249 | gdp = ext4_get_group_desc(sb, block_group, &bh2); |
| 255 | 250 | ||
| @@ -736,8 +731,7 @@ static int ext4_claim_inode(struct super_block *sb, | |||
| 736 | if ((group == 0 && ino < EXT4_FIRST_INO(sb)) || | 731 | if ((group == 0 && ino < EXT4_FIRST_INO(sb)) || |
| 737 | ino > EXT4_INODES_PER_GROUP(sb)) { | 732 | ino > EXT4_INODES_PER_GROUP(sb)) { |
| 738 | ext4_unlock_group(sb, group); | 733 | ext4_unlock_group(sb, group); |
| 739 | ext4_error(sb, __func__, | 734 | ext4_error(sb, "reserved inode or inode > inodes count - " |
| 740 | "reserved inode or inode > inodes count - " | ||
| 741 | "block_group = %u, inode=%lu", group, | 735 | "block_group = %u, inode=%lu", group, |
| 742 | ino + group * EXT4_INODES_PER_GROUP(sb)); | 736 | ino + group * EXT4_INODES_PER_GROUP(sb)); |
| 743 | return 1; | 737 | return 1; |
| @@ -904,7 +898,7 @@ repeat_in_this_group: | |||
| 904 | BUFFER_TRACE(inode_bitmap_bh, | 898 | BUFFER_TRACE(inode_bitmap_bh, |
| 905 | "call ext4_handle_dirty_metadata"); | 899 | "call ext4_handle_dirty_metadata"); |
| 906 | err = ext4_handle_dirty_metadata(handle, | 900 | err = ext4_handle_dirty_metadata(handle, |
| 907 | inode, | 901 | NULL, |
| 908 | inode_bitmap_bh); | 902 | inode_bitmap_bh); |
| 909 | if (err) | 903 | if (err) |
| 910 | goto fail; | 904 | goto fail; |
| @@ -1029,15 +1023,16 @@ got: | |||
| 1029 | inode->i_generation = sbi->s_next_generation++; | 1023 | inode->i_generation = sbi->s_next_generation++; |
| 1030 | spin_unlock(&sbi->s_next_gen_lock); | 1024 | spin_unlock(&sbi->s_next_gen_lock); |
| 1031 | 1025 | ||
| 1032 | ei->i_state = EXT4_STATE_NEW; | 1026 | ei->i_state_flags = 0; |
| 1027 | ext4_set_inode_state(inode, EXT4_STATE_NEW); | ||
| 1033 | 1028 | ||
| 1034 | ei->i_extra_isize = EXT4_SB(sb)->s_want_extra_isize; | 1029 | ei->i_extra_isize = EXT4_SB(sb)->s_want_extra_isize; |
| 1035 | 1030 | ||
| 1036 | ret = inode; | 1031 | ret = inode; |
| 1037 | if (vfs_dq_alloc_inode(inode)) { | 1032 | dquot_initialize(inode); |
| 1038 | err = -EDQUOT; | 1033 | err = dquot_alloc_inode(inode); |
| 1034 | if (err) | ||
| 1039 | goto fail_drop; | 1035 | goto fail_drop; |
| 1040 | } | ||
| 1041 | 1036 | ||
| 1042 | err = ext4_init_acl(handle, inode, dir); | 1037 | err = ext4_init_acl(handle, inode, dir); |
| 1043 | if (err) | 1038 | if (err) |
| @@ -1074,10 +1069,10 @@ really_out: | |||
| 1074 | return ret; | 1069 | return ret; |
| 1075 | 1070 | ||
| 1076 | fail_free_drop: | 1071 | fail_free_drop: |
| 1077 | vfs_dq_free_inode(inode); | 1072 | dquot_free_inode(inode); |
| 1078 | 1073 | ||
| 1079 | fail_drop: | 1074 | fail_drop: |
| 1080 | vfs_dq_drop(inode); | 1075 | dquot_drop(inode); |
| 1081 | inode->i_flags |= S_NOQUOTA; | 1076 | inode->i_flags |= S_NOQUOTA; |
| 1082 | inode->i_nlink = 0; | 1077 | inode->i_nlink = 0; |
| 1083 | unlock_new_inode(inode); | 1078 | unlock_new_inode(inode); |
| @@ -1098,8 +1093,7 @@ struct inode *ext4_orphan_get(struct super_block *sb, unsigned long ino) | |||
| 1098 | 1093 | ||
| 1099 | /* Error cases - e2fsck has already cleaned up for us */ | 1094 | /* Error cases - e2fsck has already cleaned up for us */ |
| 1100 | if (ino > max_ino) { | 1095 | if (ino > max_ino) { |
| 1101 | ext4_warning(sb, __func__, | 1096 | ext4_warning(sb, "bad orphan ino %lu! e2fsck was run?", ino); |
| 1102 | "bad orphan ino %lu! e2fsck was run?", ino); | ||
| 1103 | goto error; | 1097 | goto error; |
| 1104 | } | 1098 | } |
| 1105 | 1099 | ||
| @@ -1107,8 +1101,7 @@ struct inode *ext4_orphan_get(struct super_block *sb, unsigned long ino) | |||
| 1107 | bit = (ino - 1) % EXT4_INODES_PER_GROUP(sb); | 1101 | bit = (ino - 1) % EXT4_INODES_PER_GROUP(sb); |
| 1108 | bitmap_bh = ext4_read_inode_bitmap(sb, block_group); | 1102 | bitmap_bh = ext4_read_inode_bitmap(sb, block_group); |
| 1109 | if (!bitmap_bh) { | 1103 | if (!bitmap_bh) { |
| 1110 | ext4_warning(sb, __func__, | 1104 | ext4_warning(sb, "inode bitmap error for orphan %lu", ino); |
| 1111 | "inode bitmap error for orphan %lu", ino); | ||
| 1112 | goto error; | 1105 | goto error; |
| 1113 | } | 1106 | } |
| 1114 | 1107 | ||
| @@ -1140,8 +1133,7 @@ iget_failed: | |||
| 1140 | err = PTR_ERR(inode); | 1133 | err = PTR_ERR(inode); |
| 1141 | inode = NULL; | 1134 | inode = NULL; |
| 1142 | bad_orphan: | 1135 | bad_orphan: |
| 1143 | ext4_warning(sb, __func__, | 1136 | ext4_warning(sb, "bad orphan inode %lu! e2fsck was run?", ino); |
| 1144 | "bad orphan inode %lu! e2fsck was run?", ino); | ||
| 1145 | printk(KERN_NOTICE "ext4_test_bit(bit=%d, block=%llu) = %d\n", | 1137 | printk(KERN_NOTICE "ext4_test_bit(bit=%d, block=%llu) = %d\n", |
| 1146 | bit, (unsigned long long)bitmap_bh->b_blocknr, | 1138 | bit, (unsigned long long)bitmap_bh->b_blocknr, |
| 1147 | ext4_test_bit(bit, bitmap_bh->b_data)); | 1139 | ext4_test_bit(bit, bitmap_bh->b_data)); |
diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c index e11952404e02..986120f30066 100644 --- a/fs/ext4/inode.c +++ b/fs/ext4/inode.c | |||
| @@ -38,6 +38,7 @@ | |||
| 38 | #include <linux/uio.h> | 38 | #include <linux/uio.h> |
| 39 | #include <linux/bio.h> | 39 | #include <linux/bio.h> |
| 40 | #include <linux/workqueue.h> | 40 | #include <linux/workqueue.h> |
| 41 | #include <linux/kernel.h> | ||
| 41 | 42 | ||
| 42 | #include "ext4_jbd2.h" | 43 | #include "ext4_jbd2.h" |
| 43 | #include "xattr.h" | 44 | #include "xattr.h" |
| @@ -170,6 +171,9 @@ void ext4_delete_inode(struct inode *inode) | |||
| 170 | handle_t *handle; | 171 | handle_t *handle; |
| 171 | int err; | 172 | int err; |
| 172 | 173 | ||
| 174 | if (!is_bad_inode(inode)) | ||
| 175 | dquot_initialize(inode); | ||
| 176 | |||
| 173 | if (ext4_should_order_data(inode)) | 177 | if (ext4_should_order_data(inode)) |
| 174 | ext4_begin_ordered_truncate(inode, 0); | 178 | ext4_begin_ordered_truncate(inode, 0); |
| 175 | truncate_inode_pages(&inode->i_data, 0); | 179 | truncate_inode_pages(&inode->i_data, 0); |
| @@ -194,7 +198,7 @@ void ext4_delete_inode(struct inode *inode) | |||
| 194 | inode->i_size = 0; | 198 | inode->i_size = 0; |
| 195 | err = ext4_mark_inode_dirty(handle, inode); | 199 | err = ext4_mark_inode_dirty(handle, inode); |
| 196 | if (err) { | 200 | if (err) { |
| 197 | ext4_warning(inode->i_sb, __func__, | 201 | ext4_warning(inode->i_sb, |
| 198 | "couldn't mark inode dirty (err %d)", err); | 202 | "couldn't mark inode dirty (err %d)", err); |
| 199 | goto stop_handle; | 203 | goto stop_handle; |
| 200 | } | 204 | } |
| @@ -212,7 +216,7 @@ void ext4_delete_inode(struct inode *inode) | |||
| 212 | if (err > 0) | 216 | if (err > 0) |
| 213 | err = ext4_journal_restart(handle, 3); | 217 | err = ext4_journal_restart(handle, 3); |
| 214 | if (err != 0) { | 218 | if (err != 0) { |
| 215 | ext4_warning(inode->i_sb, __func__, | 219 | ext4_warning(inode->i_sb, |
| 216 | "couldn't extend journal (err %d)", err); | 220 | "couldn't extend journal (err %d)", err); |
| 217 | stop_handle: | 221 | stop_handle: |
| 218 | ext4_journal_stop(handle); | 222 | ext4_journal_stop(handle); |
| @@ -323,8 +327,7 @@ static int ext4_block_to_path(struct inode *inode, | |||
| 323 | offsets[n++] = i_block & (ptrs - 1); | 327 | offsets[n++] = i_block & (ptrs - 1); |
| 324 | final = ptrs; | 328 | final = ptrs; |
| 325 | } else { | 329 | } else { |
| 326 | ext4_warning(inode->i_sb, "ext4_block_to_path", | 330 | ext4_warning(inode->i_sb, "block %lu > max in inode %lu", |
| 327 | "block %lu > max in inode %lu", | ||
| 328 | i_block + direct_blocks + | 331 | i_block + direct_blocks + |
| 329 | indirect_blocks + double_blocks, inode->i_ino); | 332 | indirect_blocks + double_blocks, inode->i_ino); |
| 330 | } | 333 | } |
| @@ -344,7 +347,7 @@ static int __ext4_check_blockref(const char *function, struct inode *inode, | |||
| 344 | if (blk && | 347 | if (blk && |
| 345 | unlikely(!ext4_data_block_valid(EXT4_SB(inode->i_sb), | 348 | unlikely(!ext4_data_block_valid(EXT4_SB(inode->i_sb), |
| 346 | blk, 1))) { | 349 | blk, 1))) { |
| 347 | ext4_error(inode->i_sb, function, | 350 | __ext4_error(inode->i_sb, function, |
| 348 | "invalid block reference %u " | 351 | "invalid block reference %u " |
| 349 | "in inode #%lu", blk, inode->i_ino); | 352 | "in inode #%lu", blk, inode->i_ino); |
| 350 | return -EIO; | 353 | return -EIO; |
| @@ -607,7 +610,14 @@ static int ext4_alloc_blocks(handle_t *handle, struct inode *inode, | |||
| 607 | if (*err) | 610 | if (*err) |
| 608 | goto failed_out; | 611 | goto failed_out; |
| 609 | 612 | ||
| 610 | BUG_ON(current_block + count > EXT4_MAX_BLOCK_FILE_PHYS); | 613 | if (unlikely(current_block + count > EXT4_MAX_BLOCK_FILE_PHYS)) { |
| 614 | EXT4_ERROR_INODE(inode, | ||
| 615 | "current_block %llu + count %lu > %d!", | ||
| 616 | current_block, count, | ||
| 617 | EXT4_MAX_BLOCK_FILE_PHYS); | ||
| 618 | *err = -EIO; | ||
| 619 | goto failed_out; | ||
| 620 | } | ||
| 611 | 621 | ||
| 612 | target -= count; | 622 | target -= count; |
| 613 | /* allocate blocks for indirect blocks */ | 623 | /* allocate blocks for indirect blocks */ |
| @@ -643,7 +653,14 @@ static int ext4_alloc_blocks(handle_t *handle, struct inode *inode, | |||
| 643 | ar.flags = EXT4_MB_HINT_DATA; | 653 | ar.flags = EXT4_MB_HINT_DATA; |
| 644 | 654 | ||
| 645 | current_block = ext4_mb_new_blocks(handle, &ar, err); | 655 | current_block = ext4_mb_new_blocks(handle, &ar, err); |
| 646 | BUG_ON(current_block + ar.len > EXT4_MAX_BLOCK_FILE_PHYS); | 656 | if (unlikely(current_block + ar.len > EXT4_MAX_BLOCK_FILE_PHYS)) { |
| 657 | EXT4_ERROR_INODE(inode, | ||
| 658 | "current_block %llu + ar.len %d > %d!", | ||
| 659 | current_block, ar.len, | ||
| 660 | EXT4_MAX_BLOCK_FILE_PHYS); | ||
| 661 | *err = -EIO; | ||
| 662 | goto failed_out; | ||
| 663 | } | ||
| 647 | 664 | ||
| 648 | if (*err && (target == blks)) { | 665 | if (*err && (target == blks)) { |
| 649 | /* | 666 | /* |
| @@ -1061,6 +1078,7 @@ void ext4_da_update_reserve_space(struct inode *inode, | |||
| 1061 | int mdb_free = 0, allocated_meta_blocks = 0; | 1078 | int mdb_free = 0, allocated_meta_blocks = 0; |
| 1062 | 1079 | ||
| 1063 | spin_lock(&ei->i_block_reservation_lock); | 1080 | spin_lock(&ei->i_block_reservation_lock); |
| 1081 | trace_ext4_da_update_reserve_space(inode, used); | ||
| 1064 | if (unlikely(used > ei->i_reserved_data_blocks)) { | 1082 | if (unlikely(used > ei->i_reserved_data_blocks)) { |
| 1065 | ext4_msg(inode->i_sb, KERN_NOTICE, "%s: ino %lu, used %d " | 1083 | ext4_msg(inode->i_sb, KERN_NOTICE, "%s: ino %lu, used %d " |
| 1066 | "with only %d reserved data blocks\n", | 1084 | "with only %d reserved data blocks\n", |
| @@ -1093,9 +1111,9 @@ void ext4_da_update_reserve_space(struct inode *inode, | |||
| 1093 | 1111 | ||
| 1094 | /* Update quota subsystem */ | 1112 | /* Update quota subsystem */ |
| 1095 | if (quota_claim) { | 1113 | if (quota_claim) { |
| 1096 | vfs_dq_claim_block(inode, used); | 1114 | dquot_claim_block(inode, used); |
| 1097 | if (mdb_free) | 1115 | if (mdb_free) |
| 1098 | vfs_dq_release_reservation_block(inode, mdb_free); | 1116 | dquot_release_reservation_block(inode, mdb_free); |
| 1099 | } else { | 1117 | } else { |
| 1100 | /* | 1118 | /* |
| 1101 | * We did fallocate with an offset that is already delayed | 1119 | * We did fallocate with an offset that is already delayed |
| @@ -1106,8 +1124,8 @@ void ext4_da_update_reserve_space(struct inode *inode, | |||
| 1106 | * that | 1124 | * that |
| 1107 | */ | 1125 | */ |
| 1108 | if (allocated_meta_blocks) | 1126 | if (allocated_meta_blocks) |
| 1109 | vfs_dq_claim_block(inode, allocated_meta_blocks); | 1127 | dquot_claim_block(inode, allocated_meta_blocks); |
| 1110 | vfs_dq_release_reservation_block(inode, mdb_free + used); | 1128 | dquot_release_reservation_block(inode, mdb_free + used); |
| 1111 | } | 1129 | } |
| 1112 | 1130 | ||
| 1113 | /* | 1131 | /* |
| @@ -1124,7 +1142,7 @@ static int check_block_validity(struct inode *inode, const char *msg, | |||
| 1124 | sector_t logical, sector_t phys, int len) | 1142 | sector_t logical, sector_t phys, int len) |
| 1125 | { | 1143 | { |
| 1126 | if (!ext4_data_block_valid(EXT4_SB(inode->i_sb), phys, len)) { | 1144 | if (!ext4_data_block_valid(EXT4_SB(inode->i_sb), phys, len)) { |
| 1127 | ext4_error(inode->i_sb, msg, | 1145 | __ext4_error(inode->i_sb, msg, |
| 1128 | "inode #%lu logical block %llu mapped to %llu " | 1146 | "inode #%lu logical block %llu mapped to %llu " |
| 1129 | "(size %d)", inode->i_ino, | 1147 | "(size %d)", inode->i_ino, |
| 1130 | (unsigned long long) logical, | 1148 | (unsigned long long) logical, |
| @@ -1306,7 +1324,7 @@ int ext4_get_blocks(handle_t *handle, struct inode *inode, sector_t block, | |||
| 1306 | * i_data's format changing. Force the migrate | 1324 | * i_data's format changing. Force the migrate |
| 1307 | * to fail by clearing migrate flags | 1325 | * to fail by clearing migrate flags |
| 1308 | */ | 1326 | */ |
| 1309 | EXT4_I(inode)->i_state &= ~EXT4_STATE_EXT_MIGRATE; | 1327 | ext4_clear_inode_state(inode, EXT4_STATE_EXT_MIGRATE); |
| 1310 | } | 1328 | } |
| 1311 | 1329 | ||
| 1312 | /* | 1330 | /* |
| @@ -1534,6 +1552,8 @@ static void ext4_truncate_failed_write(struct inode *inode) | |||
| 1534 | ext4_truncate(inode); | 1552 | ext4_truncate(inode); |
| 1535 | } | 1553 | } |
| 1536 | 1554 | ||
| 1555 | static int ext4_get_block_write(struct inode *inode, sector_t iblock, | ||
| 1556 | struct buffer_head *bh_result, int create); | ||
| 1537 | static int ext4_write_begin(struct file *file, struct address_space *mapping, | 1557 | static int ext4_write_begin(struct file *file, struct address_space *mapping, |
| 1538 | loff_t pos, unsigned len, unsigned flags, | 1558 | loff_t pos, unsigned len, unsigned flags, |
| 1539 | struct page **pagep, void **fsdata) | 1559 | struct page **pagep, void **fsdata) |
| @@ -1575,8 +1595,12 @@ retry: | |||
| 1575 | } | 1595 | } |
| 1576 | *pagep = page; | 1596 | *pagep = page; |
| 1577 | 1597 | ||
| 1578 | ret = block_write_begin(file, mapping, pos, len, flags, pagep, fsdata, | 1598 | if (ext4_should_dioread_nolock(inode)) |
| 1579 | ext4_get_block); | 1599 | ret = block_write_begin(file, mapping, pos, len, flags, pagep, |
| 1600 | fsdata, ext4_get_block_write); | ||
| 1601 | else | ||
| 1602 | ret = block_write_begin(file, mapping, pos, len, flags, pagep, | ||
| 1603 | fsdata, ext4_get_block); | ||
| 1580 | 1604 | ||
| 1581 | if (!ret && ext4_should_journal_data(inode)) { | 1605 | if (!ret && ext4_should_journal_data(inode)) { |
| 1582 | ret = walk_page_buffers(handle, page_buffers(page), | 1606 | ret = walk_page_buffers(handle, page_buffers(page), |
| @@ -1793,7 +1817,7 @@ static int ext4_journalled_write_end(struct file *file, | |||
| 1793 | new_i_size = pos + copied; | 1817 | new_i_size = pos + copied; |
| 1794 | if (new_i_size > inode->i_size) | 1818 | if (new_i_size > inode->i_size) |
| 1795 | i_size_write(inode, pos+copied); | 1819 | i_size_write(inode, pos+copied); |
| 1796 | EXT4_I(inode)->i_state |= EXT4_STATE_JDATA; | 1820 | ext4_set_inode_state(inode, EXT4_STATE_JDATA); |
| 1797 | if (new_i_size > EXT4_I(inode)->i_disksize) { | 1821 | if (new_i_size > EXT4_I(inode)->i_disksize) { |
| 1798 | ext4_update_i_disksize(inode, new_i_size); | 1822 | ext4_update_i_disksize(inode, new_i_size); |
| 1799 | ret2 = ext4_mark_inode_dirty(handle, inode); | 1823 | ret2 = ext4_mark_inode_dirty(handle, inode); |
| @@ -1836,6 +1860,7 @@ static int ext4_da_reserve_space(struct inode *inode, sector_t lblock) | |||
| 1836 | struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); | 1860 | struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); |
| 1837 | struct ext4_inode_info *ei = EXT4_I(inode); | 1861 | struct ext4_inode_info *ei = EXT4_I(inode); |
| 1838 | unsigned long md_needed, md_reserved; | 1862 | unsigned long md_needed, md_reserved; |
| 1863 | int ret; | ||
| 1839 | 1864 | ||
| 1840 | /* | 1865 | /* |
| 1841 | * recalculate the amount of metadata blocks to reserve | 1866 | * recalculate the amount of metadata blocks to reserve |
| @@ -1846,6 +1871,7 @@ repeat: | |||
| 1846 | spin_lock(&ei->i_block_reservation_lock); | 1871 | spin_lock(&ei->i_block_reservation_lock); |
| 1847 | md_reserved = ei->i_reserved_meta_blocks; | 1872 | md_reserved = ei->i_reserved_meta_blocks; |
| 1848 | md_needed = ext4_calc_metadata_amount(inode, lblock); | 1873 | md_needed = ext4_calc_metadata_amount(inode, lblock); |
| 1874 | trace_ext4_da_reserve_space(inode, md_needed); | ||
| 1849 | spin_unlock(&ei->i_block_reservation_lock); | 1875 | spin_unlock(&ei->i_block_reservation_lock); |
| 1850 | 1876 | ||
| 1851 | /* | 1877 | /* |
| @@ -1853,11 +1879,12 @@ repeat: | |||
| 1853 | * later. Real quota accounting is done at pages writeout | 1879 | * later. Real quota accounting is done at pages writeout |
| 1854 | * time. | 1880 | * time. |
| 1855 | */ | 1881 | */ |
| 1856 | if (vfs_dq_reserve_block(inode, md_needed + 1)) | 1882 | ret = dquot_reserve_block(inode, md_needed + 1); |
| 1857 | return -EDQUOT; | 1883 | if (ret) |
| 1884 | return ret; | ||
| 1858 | 1885 | ||
| 1859 | if (ext4_claim_free_blocks(sbi, md_needed + 1)) { | 1886 | if (ext4_claim_free_blocks(sbi, md_needed + 1)) { |
| 1860 | vfs_dq_release_reservation_block(inode, md_needed + 1); | 1887 | dquot_release_reservation_block(inode, md_needed + 1); |
| 1861 | if (ext4_should_retry_alloc(inode->i_sb, &retries)) { | 1888 | if (ext4_should_retry_alloc(inode->i_sb, &retries)) { |
| 1862 | yield(); | 1889 | yield(); |
| 1863 | goto repeat; | 1890 | goto repeat; |
| @@ -1914,7 +1941,7 @@ static void ext4_da_release_space(struct inode *inode, int to_free) | |||
| 1914 | 1941 | ||
| 1915 | spin_unlock(&EXT4_I(inode)->i_block_reservation_lock); | 1942 | spin_unlock(&EXT4_I(inode)->i_block_reservation_lock); |
| 1916 | 1943 | ||
| 1917 | vfs_dq_release_reservation_block(inode, to_free); | 1944 | dquot_release_reservation_block(inode, to_free); |
| 1918 | } | 1945 | } |
| 1919 | 1946 | ||
| 1920 | static void ext4_da_page_release_reservation(struct page *page, | 1947 | static void ext4_da_page_release_reservation(struct page *page, |
| @@ -2091,6 +2118,8 @@ static void mpage_put_bnr_to_bhs(struct mpage_da_data *mpd, sector_t logical, | |||
| 2091 | } else if (buffer_mapped(bh)) | 2118 | } else if (buffer_mapped(bh)) |
| 2092 | BUG_ON(bh->b_blocknr != pblock); | 2119 | BUG_ON(bh->b_blocknr != pblock); |
| 2093 | 2120 | ||
| 2121 | if (buffer_uninit(exbh)) | ||
| 2122 | set_buffer_uninit(bh); | ||
| 2094 | cur_logical++; | 2123 | cur_logical++; |
| 2095 | pblock++; | 2124 | pblock++; |
| 2096 | } while ((bh = bh->b_this_page) != head); | 2125 | } while ((bh = bh->b_this_page) != head); |
| @@ -2133,17 +2162,16 @@ static void ext4_da_block_invalidatepages(struct mpage_da_data *mpd, | |||
| 2133 | break; | 2162 | break; |
| 2134 | for (i = 0; i < nr_pages; i++) { | 2163 | for (i = 0; i < nr_pages; i++) { |
| 2135 | struct page *page = pvec.pages[i]; | 2164 | struct page *page = pvec.pages[i]; |
| 2136 | index = page->index; | 2165 | if (page->index > end) |
| 2137 | if (index > end) | ||
| 2138 | break; | 2166 | break; |
| 2139 | index++; | ||
| 2140 | |||
| 2141 | BUG_ON(!PageLocked(page)); | 2167 | BUG_ON(!PageLocked(page)); |
| 2142 | BUG_ON(PageWriteback(page)); | 2168 | BUG_ON(PageWriteback(page)); |
| 2143 | block_invalidatepage(page, 0); | 2169 | block_invalidatepage(page, 0); |
| 2144 | ClearPageUptodate(page); | 2170 | ClearPageUptodate(page); |
| 2145 | unlock_page(page); | 2171 | unlock_page(page); |
| 2146 | } | 2172 | } |
| 2173 | index = pvec.pages[nr_pages - 1]->index + 1; | ||
| 2174 | pagevec_release(&pvec); | ||
| 2147 | } | 2175 | } |
| 2148 | return; | 2176 | return; |
| 2149 | } | 2177 | } |
| @@ -2220,6 +2248,8 @@ static int mpage_da_map_blocks(struct mpage_da_data *mpd) | |||
| 2220 | */ | 2248 | */ |
| 2221 | new.b_state = 0; | 2249 | new.b_state = 0; |
| 2222 | get_blocks_flags = EXT4_GET_BLOCKS_CREATE; | 2250 | get_blocks_flags = EXT4_GET_BLOCKS_CREATE; |
| 2251 | if (ext4_should_dioread_nolock(mpd->inode)) | ||
| 2252 | get_blocks_flags |= EXT4_GET_BLOCKS_IO_CREATE_EXT; | ||
| 2223 | if (mpd->b_state & (1 << BH_Delay)) | 2253 | if (mpd->b_state & (1 << BH_Delay)) |
| 2224 | get_blocks_flags |= EXT4_GET_BLOCKS_DELALLOC_RESERVE; | 2254 | get_blocks_flags |= EXT4_GET_BLOCKS_DELALLOC_RESERVE; |
| 2225 | 2255 | ||
| @@ -2630,11 +2660,14 @@ static int __ext4_journalled_writepage(struct page *page, | |||
| 2630 | ret = err; | 2660 | ret = err; |
| 2631 | 2661 | ||
| 2632 | walk_page_buffers(handle, page_bufs, 0, len, NULL, bput_one); | 2662 | walk_page_buffers(handle, page_bufs, 0, len, NULL, bput_one); |
| 2633 | EXT4_I(inode)->i_state |= EXT4_STATE_JDATA; | 2663 | ext4_set_inode_state(inode, EXT4_STATE_JDATA); |
| 2634 | out: | 2664 | out: |
| 2635 | return ret; | 2665 | return ret; |
| 2636 | } | 2666 | } |
| 2637 | 2667 | ||
| 2668 | static int ext4_set_bh_endio(struct buffer_head *bh, struct inode *inode); | ||
| 2669 | static void ext4_end_io_buffer_write(struct buffer_head *bh, int uptodate); | ||
| 2670 | |||
| 2638 | /* | 2671 | /* |
| 2639 | * Note that we don't need to start a transaction unless we're journaling data | 2672 | * Note that we don't need to start a transaction unless we're journaling data |
| 2640 | * because we should have holes filled from ext4_page_mkwrite(). We even don't | 2673 | * because we should have holes filled from ext4_page_mkwrite(). We even don't |
| @@ -2682,7 +2715,7 @@ static int ext4_writepage(struct page *page, | |||
| 2682 | int ret = 0; | 2715 | int ret = 0; |
| 2683 | loff_t size; | 2716 | loff_t size; |
| 2684 | unsigned int len; | 2717 | unsigned int len; |
| 2685 | struct buffer_head *page_bufs; | 2718 | struct buffer_head *page_bufs = NULL; |
| 2686 | struct inode *inode = page->mapping->host; | 2719 | struct inode *inode = page->mapping->host; |
| 2687 | 2720 | ||
| 2688 | trace_ext4_writepage(inode, page); | 2721 | trace_ext4_writepage(inode, page); |
| @@ -2758,7 +2791,11 @@ static int ext4_writepage(struct page *page, | |||
| 2758 | 2791 | ||
| 2759 | if (test_opt(inode->i_sb, NOBH) && ext4_should_writeback_data(inode)) | 2792 | if (test_opt(inode->i_sb, NOBH) && ext4_should_writeback_data(inode)) |
| 2760 | ret = nobh_writepage(page, noalloc_get_block_write, wbc); | 2793 | ret = nobh_writepage(page, noalloc_get_block_write, wbc); |
| 2761 | else | 2794 | else if (page_bufs && buffer_uninit(page_bufs)) { |
| 2795 | ext4_set_bh_endio(page_bufs, inode); | ||
| 2796 | ret = block_write_full_page_endio(page, noalloc_get_block_write, | ||
| 2797 | wbc, ext4_end_io_buffer_write); | ||
| 2798 | } else | ||
| 2762 | ret = block_write_full_page(page, noalloc_get_block_write, | 2799 | ret = block_write_full_page(page, noalloc_get_block_write, |
| 2763 | wbc); | 2800 | wbc); |
| 2764 | 2801 | ||
| @@ -3301,7 +3338,8 @@ static sector_t ext4_bmap(struct address_space *mapping, sector_t block) | |||
| 3301 | filemap_write_and_wait(mapping); | 3338 | filemap_write_and_wait(mapping); |
| 3302 | } | 3339 | } |
| 3303 | 3340 | ||
| 3304 | if (EXT4_JOURNAL(inode) && EXT4_I(inode)->i_state & EXT4_STATE_JDATA) { | 3341 | if (EXT4_JOURNAL(inode) && |
| 3342 | ext4_test_inode_state(inode, EXT4_STATE_JDATA)) { | ||
| 3305 | /* | 3343 | /* |
| 3306 | * This is a REALLY heavyweight approach, but the use of | 3344 | * This is a REALLY heavyweight approach, but the use of |
| 3307 | * bmap on dirty files is expected to be extremely rare: | 3345 | * bmap on dirty files is expected to be extremely rare: |
| @@ -3320,7 +3358,7 @@ static sector_t ext4_bmap(struct address_space *mapping, sector_t block) | |||
| 3320 | * everything they get. | 3358 | * everything they get. |
| 3321 | */ | 3359 | */ |
| 3322 | 3360 | ||
| 3323 | EXT4_I(inode)->i_state &= ~EXT4_STATE_JDATA; | 3361 | ext4_clear_inode_state(inode, EXT4_STATE_JDATA); |
| 3324 | journal = EXT4_JOURNAL(inode); | 3362 | journal = EXT4_JOURNAL(inode); |
| 3325 | jbd2_journal_lock_updates(journal); | 3363 | jbd2_journal_lock_updates(journal); |
| 3326 | err = jbd2_journal_flush(journal); | 3364 | err = jbd2_journal_flush(journal); |
| @@ -3345,11 +3383,45 @@ ext4_readpages(struct file *file, struct address_space *mapping, | |||
| 3345 | return mpage_readpages(mapping, pages, nr_pages, ext4_get_block); | 3383 | return mpage_readpages(mapping, pages, nr_pages, ext4_get_block); |
| 3346 | } | 3384 | } |
| 3347 | 3385 | ||
| 3386 | static void ext4_free_io_end(ext4_io_end_t *io) | ||
| 3387 | { | ||
| 3388 | BUG_ON(!io); | ||
| 3389 | if (io->page) | ||
| 3390 | put_page(io->page); | ||
| 3391 | iput(io->inode); | ||
| 3392 | kfree(io); | ||
| 3393 | } | ||
| 3394 | |||
| 3395 | static void ext4_invalidatepage_free_endio(struct page *page, unsigned long offset) | ||
| 3396 | { | ||
| 3397 | struct buffer_head *head, *bh; | ||
| 3398 | unsigned int curr_off = 0; | ||
| 3399 | |||
| 3400 | if (!page_has_buffers(page)) | ||
| 3401 | return; | ||
| 3402 | head = bh = page_buffers(page); | ||
| 3403 | do { | ||
| 3404 | if (offset <= curr_off && test_clear_buffer_uninit(bh) | ||
| 3405 | && bh->b_private) { | ||
| 3406 | ext4_free_io_end(bh->b_private); | ||
| 3407 | bh->b_private = NULL; | ||
| 3408 | bh->b_end_io = NULL; | ||
| 3409 | } | ||
| 3410 | curr_off = curr_off + bh->b_size; | ||
| 3411 | bh = bh->b_this_page; | ||
| 3412 | } while (bh != head); | ||
| 3413 | } | ||
| 3414 | |||
| 3348 | static void ext4_invalidatepage(struct page *page, unsigned long offset) | 3415 | static void ext4_invalidatepage(struct page *page, unsigned long offset) |
| 3349 | { | 3416 | { |
| 3350 | journal_t *journal = EXT4_JOURNAL(page->mapping->host); | 3417 | journal_t *journal = EXT4_JOURNAL(page->mapping->host); |
| 3351 | 3418 | ||
| 3352 | /* | 3419 | /* |
| 3420 | * free any io_end structure allocated for buffers to be discarded | ||
| 3421 | */ | ||
| 3422 | if (ext4_should_dioread_nolock(page->mapping->host)) | ||
| 3423 | ext4_invalidatepage_free_endio(page, offset); | ||
| 3424 | /* | ||
| 3353 | * If it's a full truncate we just forget about the pending dirtying | 3425 | * If it's a full truncate we just forget about the pending dirtying |
| 3354 | */ | 3426 | */ |
| 3355 | if (offset == 0) | 3427 | if (offset == 0) |
| @@ -3420,7 +3492,14 @@ static ssize_t ext4_ind_direct_IO(int rw, struct kiocb *iocb, | |||
| 3420 | } | 3492 | } |
| 3421 | 3493 | ||
| 3422 | retry: | 3494 | retry: |
| 3423 | ret = blockdev_direct_IO(rw, iocb, inode, inode->i_sb->s_bdev, iov, | 3495 | if (rw == READ && ext4_should_dioread_nolock(inode)) |
| 3496 | ret = blockdev_direct_IO_no_locking(rw, iocb, inode, | ||
| 3497 | inode->i_sb->s_bdev, iov, | ||
| 3498 | offset, nr_segs, | ||
| 3499 | ext4_get_block, NULL); | ||
| 3500 | else | ||
| 3501 | ret = blockdev_direct_IO(rw, iocb, inode, | ||
| 3502 | inode->i_sb->s_bdev, iov, | ||
| 3424 | offset, nr_segs, | 3503 | offset, nr_segs, |
| 3425 | ext4_get_block, NULL); | 3504 | ext4_get_block, NULL); |
| 3426 | if (ret == -ENOSPC && ext4_should_retry_alloc(inode->i_sb, &retries)) | 3505 | if (ret == -ENOSPC && ext4_should_retry_alloc(inode->i_sb, &retries)) |
| @@ -3436,6 +3515,9 @@ retry: | |||
| 3436 | * but cannot extend i_size. Bail out and pretend | 3515 | * but cannot extend i_size. Bail out and pretend |
| 3437 | * the write failed... */ | 3516 | * the write failed... */ |
| 3438 | ret = PTR_ERR(handle); | 3517 | ret = PTR_ERR(handle); |
| 3518 | if (inode->i_nlink) | ||
| 3519 | ext4_orphan_del(NULL, inode); | ||
| 3520 | |||
| 3439 | goto out; | 3521 | goto out; |
| 3440 | } | 3522 | } |
| 3441 | if (inode->i_nlink) | 3523 | if (inode->i_nlink) |
| @@ -3463,75 +3545,63 @@ out: | |||
| 3463 | return ret; | 3545 | return ret; |
| 3464 | } | 3546 | } |
| 3465 | 3547 | ||
| 3466 | static int ext4_get_block_dio_write(struct inode *inode, sector_t iblock, | 3548 | static int ext4_get_block_write(struct inode *inode, sector_t iblock, |
| 3467 | struct buffer_head *bh_result, int create) | 3549 | struct buffer_head *bh_result, int create) |
| 3468 | { | 3550 | { |
| 3469 | handle_t *handle = NULL; | 3551 | handle_t *handle = ext4_journal_current_handle(); |
| 3470 | int ret = 0; | 3552 | int ret = 0; |
| 3471 | unsigned max_blocks = bh_result->b_size >> inode->i_blkbits; | 3553 | unsigned max_blocks = bh_result->b_size >> inode->i_blkbits; |
| 3472 | int dio_credits; | 3554 | int dio_credits; |
| 3555 | int started = 0; | ||
| 3473 | 3556 | ||
| 3474 | ext4_debug("ext4_get_block_dio_write: inode %lu, create flag %d\n", | 3557 | ext4_debug("ext4_get_block_write: inode %lu, create flag %d\n", |
| 3475 | inode->i_ino, create); | 3558 | inode->i_ino, create); |
| 3476 | /* | 3559 | /* |
| 3477 | * DIO VFS code passes create = 0 flag for write to | 3560 | * ext4_get_block in prepare for a DIO write or buffer write. |
| 3478 | * the middle of file. It does this to avoid block | 3561 | * We allocate an uinitialized extent if blocks haven't been allocated. |
| 3479 | * allocation for holes, to prevent expose stale data | 3562 | * The extent will be converted to initialized after IO complete. |
| 3480 | * out when there is parallel buffered read (which does | ||
| 3481 | * not hold the i_mutex lock) while direct IO write has | ||
| 3482 | * not completed. DIO request on holes finally falls back | ||
| 3483 | * to buffered IO for this reason. | ||
| 3484 | * | ||
| 3485 | * For ext4 extent based file, since we support fallocate, | ||
| 3486 | * new allocated extent as uninitialized, for holes, we | ||
| 3487 | * could fallocate blocks for holes, thus parallel | ||
| 3488 | * buffered IO read will zero out the page when read on | ||
| 3489 | * a hole while parallel DIO write to the hole has not completed. | ||
| 3490 | * | ||
| 3491 | * when we come here, we know it's a direct IO write to | ||
| 3492 | * to the middle of file (<i_size) | ||
| 3493 | * so it's safe to override the create flag from VFS. | ||
| 3494 | */ | 3563 | */ |
| 3495 | create = EXT4_GET_BLOCKS_DIO_CREATE_EXT; | 3564 | create = EXT4_GET_BLOCKS_IO_CREATE_EXT; |
| 3496 | 3565 | ||
| 3497 | if (max_blocks > DIO_MAX_BLOCKS) | 3566 | if (!handle) { |
| 3498 | max_blocks = DIO_MAX_BLOCKS; | 3567 | if (max_blocks > DIO_MAX_BLOCKS) |
| 3499 | dio_credits = ext4_chunk_trans_blocks(inode, max_blocks); | 3568 | max_blocks = DIO_MAX_BLOCKS; |
| 3500 | handle = ext4_journal_start(inode, dio_credits); | 3569 | dio_credits = ext4_chunk_trans_blocks(inode, max_blocks); |
| 3501 | if (IS_ERR(handle)) { | 3570 | handle = ext4_journal_start(inode, dio_credits); |
| 3502 | ret = PTR_ERR(handle); | 3571 | if (IS_ERR(handle)) { |
| 3503 | goto out; | 3572 | ret = PTR_ERR(handle); |
| 3573 | goto out; | ||
| 3574 | } | ||
| 3575 | started = 1; | ||
| 3504 | } | 3576 | } |
| 3577 | |||
| 3505 | ret = ext4_get_blocks(handle, inode, iblock, max_blocks, bh_result, | 3578 | ret = ext4_get_blocks(handle, inode, iblock, max_blocks, bh_result, |
| 3506 | create); | 3579 | create); |
| 3507 | if (ret > 0) { | 3580 | if (ret > 0) { |
| 3508 | bh_result->b_size = (ret << inode->i_blkbits); | 3581 | bh_result->b_size = (ret << inode->i_blkbits); |
| 3509 | ret = 0; | 3582 | ret = 0; |
| 3510 | } | 3583 | } |
| 3511 | ext4_journal_stop(handle); | 3584 | if (started) |
| 3585 | ext4_journal_stop(handle); | ||
| 3512 | out: | 3586 | out: |
| 3513 | return ret; | 3587 | return ret; |
| 3514 | } | 3588 | } |
| 3515 | 3589 | ||
| 3516 | static void ext4_free_io_end(ext4_io_end_t *io) | 3590 | static void dump_completed_IO(struct inode * inode) |
| 3517 | { | ||
| 3518 | BUG_ON(!io); | ||
| 3519 | iput(io->inode); | ||
| 3520 | kfree(io); | ||
| 3521 | } | ||
| 3522 | static void dump_aio_dio_list(struct inode * inode) | ||
| 3523 | { | 3591 | { |
| 3524 | #ifdef EXT4_DEBUG | 3592 | #ifdef EXT4_DEBUG |
| 3525 | struct list_head *cur, *before, *after; | 3593 | struct list_head *cur, *before, *after; |
| 3526 | ext4_io_end_t *io, *io0, *io1; | 3594 | ext4_io_end_t *io, *io0, *io1; |
| 3595 | unsigned long flags; | ||
| 3527 | 3596 | ||
| 3528 | if (list_empty(&EXT4_I(inode)->i_aio_dio_complete_list)){ | 3597 | if (list_empty(&EXT4_I(inode)->i_completed_io_list)){ |
| 3529 | ext4_debug("inode %lu aio dio list is empty\n", inode->i_ino); | 3598 | ext4_debug("inode %lu completed_io list is empty\n", inode->i_ino); |
| 3530 | return; | 3599 | return; |
| 3531 | } | 3600 | } |
| 3532 | 3601 | ||
| 3533 | ext4_debug("Dump inode %lu aio_dio_completed_IO list \n", inode->i_ino); | 3602 | ext4_debug("Dump inode %lu completed_io list \n", inode->i_ino); |
| 3534 | list_for_each_entry(io, &EXT4_I(inode)->i_aio_dio_complete_list, list){ | 3603 | spin_lock_irqsave(&EXT4_I(inode)->i_completed_io_lock, flags); |
| 3604 | list_for_each_entry(io, &EXT4_I(inode)->i_completed_io_list, list){ | ||
| 3535 | cur = &io->list; | 3605 | cur = &io->list; |
| 3536 | before = cur->prev; | 3606 | before = cur->prev; |
| 3537 | io0 = container_of(before, ext4_io_end_t, list); | 3607 | io0 = container_of(before, ext4_io_end_t, list); |
| @@ -3541,32 +3611,31 @@ static void dump_aio_dio_list(struct inode * inode) | |||
| 3541 | ext4_debug("io 0x%p from inode %lu,prev 0x%p,next 0x%p\n", | 3611 | ext4_debug("io 0x%p from inode %lu,prev 0x%p,next 0x%p\n", |
| 3542 | io, inode->i_ino, io0, io1); | 3612 | io, inode->i_ino, io0, io1); |
| 3543 | } | 3613 | } |
| 3614 | spin_unlock_irqrestore(&EXT4_I(inode)->i_completed_io_lock, flags); | ||
| 3544 | #endif | 3615 | #endif |
| 3545 | } | 3616 | } |
| 3546 | 3617 | ||
| 3547 | /* | 3618 | /* |
| 3548 | * check a range of space and convert unwritten extents to written. | 3619 | * check a range of space and convert unwritten extents to written. |
| 3549 | */ | 3620 | */ |
| 3550 | static int ext4_end_aio_dio_nolock(ext4_io_end_t *io) | 3621 | static int ext4_end_io_nolock(ext4_io_end_t *io) |
| 3551 | { | 3622 | { |
| 3552 | struct inode *inode = io->inode; | 3623 | struct inode *inode = io->inode; |
| 3553 | loff_t offset = io->offset; | 3624 | loff_t offset = io->offset; |
| 3554 | size_t size = io->size; | 3625 | ssize_t size = io->size; |
| 3555 | int ret = 0; | 3626 | int ret = 0; |
| 3556 | 3627 | ||
| 3557 | ext4_debug("end_aio_dio_onlock: io 0x%p from inode %lu,list->next 0x%p," | 3628 | ext4_debug("ext4_end_io_nolock: io 0x%p from inode %lu,list->next 0x%p," |
| 3558 | "list->prev 0x%p\n", | 3629 | "list->prev 0x%p\n", |
| 3559 | io, inode->i_ino, io->list.next, io->list.prev); | 3630 | io, inode->i_ino, io->list.next, io->list.prev); |
| 3560 | 3631 | ||
| 3561 | if (list_empty(&io->list)) | 3632 | if (list_empty(&io->list)) |
| 3562 | return ret; | 3633 | return ret; |
| 3563 | 3634 | ||
| 3564 | if (io->flag != DIO_AIO_UNWRITTEN) | 3635 | if (io->flag != EXT4_IO_UNWRITTEN) |
| 3565 | return ret; | 3636 | return ret; |
| 3566 | 3637 | ||
| 3567 | if (offset + size <= i_size_read(inode)) | 3638 | ret = ext4_convert_unwritten_extents(inode, offset, size); |
| 3568 | ret = ext4_convert_unwritten_extents(inode, offset, size); | ||
| 3569 | |||
| 3570 | if (ret < 0) { | 3639 | if (ret < 0) { |
| 3571 | printk(KERN_EMERG "%s: failed to convert unwritten" | 3640 | printk(KERN_EMERG "%s: failed to convert unwritten" |
| 3572 | "extents to written extents, error is %d" | 3641 | "extents to written extents, error is %d" |
| @@ -3579,50 +3648,64 @@ static int ext4_end_aio_dio_nolock(ext4_io_end_t *io) | |||
| 3579 | io->flag = 0; | 3648 | io->flag = 0; |
| 3580 | return ret; | 3649 | return ret; |
| 3581 | } | 3650 | } |
| 3651 | |||
| 3582 | /* | 3652 | /* |
| 3583 | * work on completed aio dio IO, to convert unwritten extents to extents | 3653 | * work on completed aio dio IO, to convert unwritten extents to extents |
| 3584 | */ | 3654 | */ |
| 3585 | static void ext4_end_aio_dio_work(struct work_struct *work) | 3655 | static void ext4_end_io_work(struct work_struct *work) |
| 3586 | { | 3656 | { |
| 3587 | ext4_io_end_t *io = container_of(work, ext4_io_end_t, work); | 3657 | ext4_io_end_t *io = container_of(work, ext4_io_end_t, work); |
| 3588 | struct inode *inode = io->inode; | 3658 | struct inode *inode = io->inode; |
| 3589 | int ret = 0; | 3659 | struct ext4_inode_info *ei = EXT4_I(inode); |
| 3660 | unsigned long flags; | ||
| 3661 | int ret; | ||
| 3590 | 3662 | ||
| 3591 | mutex_lock(&inode->i_mutex); | 3663 | mutex_lock(&inode->i_mutex); |
| 3592 | ret = ext4_end_aio_dio_nolock(io); | 3664 | ret = ext4_end_io_nolock(io); |
| 3593 | if (ret >= 0) { | 3665 | if (ret < 0) { |
| 3594 | if (!list_empty(&io->list)) | 3666 | mutex_unlock(&inode->i_mutex); |
| 3595 | list_del_init(&io->list); | 3667 | return; |
| 3596 | ext4_free_io_end(io); | ||
| 3597 | } | 3668 | } |
| 3669 | |||
| 3670 | spin_lock_irqsave(&ei->i_completed_io_lock, flags); | ||
| 3671 | if (!list_empty(&io->list)) | ||
| 3672 | list_del_init(&io->list); | ||
| 3673 | spin_unlock_irqrestore(&ei->i_completed_io_lock, flags); | ||
| 3598 | mutex_unlock(&inode->i_mutex); | 3674 | mutex_unlock(&inode->i_mutex); |
| 3675 | ext4_free_io_end(io); | ||
| 3599 | } | 3676 | } |
| 3677 | |||
| 3600 | /* | 3678 | /* |
| 3601 | * This function is called from ext4_sync_file(). | 3679 | * This function is called from ext4_sync_file(). |
| 3602 | * | 3680 | * |
| 3603 | * When AIO DIO IO is completed, the work to convert unwritten | 3681 | * When IO is completed, the work to convert unwritten extents to |
| 3604 | * extents to written is queued on workqueue but may not get immediately | 3682 | * written is queued on workqueue but may not get immediately |
| 3605 | * scheduled. When fsync is called, we need to ensure the | 3683 | * scheduled. When fsync is called, we need to ensure the |
| 3606 | * conversion is complete before fsync returns. | 3684 | * conversion is complete before fsync returns. |
| 3607 | * The inode keeps track of a list of completed AIO from DIO path | 3685 | * The inode keeps track of a list of pending/completed IO that |
| 3608 | * that might needs to do the conversion. This function walks through | 3686 | * might needs to do the conversion. This function walks through |
| 3609 | * the list and convert the related unwritten extents to written. | 3687 | * the list and convert the related unwritten extents for completed IO |
| 3688 | * to written. | ||
| 3689 | * The function return the number of pending IOs on success. | ||
| 3610 | */ | 3690 | */ |
| 3611 | int flush_aio_dio_completed_IO(struct inode *inode) | 3691 | int flush_completed_IO(struct inode *inode) |
| 3612 | { | 3692 | { |
| 3613 | ext4_io_end_t *io; | 3693 | ext4_io_end_t *io; |
| 3694 | struct ext4_inode_info *ei = EXT4_I(inode); | ||
| 3695 | unsigned long flags; | ||
| 3614 | int ret = 0; | 3696 | int ret = 0; |
| 3615 | int ret2 = 0; | 3697 | int ret2 = 0; |
| 3616 | 3698 | ||
| 3617 | if (list_empty(&EXT4_I(inode)->i_aio_dio_complete_list)) | 3699 | if (list_empty(&ei->i_completed_io_list)) |
| 3618 | return ret; | 3700 | return ret; |
| 3619 | 3701 | ||
| 3620 | dump_aio_dio_list(inode); | 3702 | dump_completed_IO(inode); |
| 3621 | while (!list_empty(&EXT4_I(inode)->i_aio_dio_complete_list)){ | 3703 | spin_lock_irqsave(&ei->i_completed_io_lock, flags); |
| 3622 | io = list_entry(EXT4_I(inode)->i_aio_dio_complete_list.next, | 3704 | while (!list_empty(&ei->i_completed_io_list)){ |
| 3705 | io = list_entry(ei->i_completed_io_list.next, | ||
| 3623 | ext4_io_end_t, list); | 3706 | ext4_io_end_t, list); |
| 3624 | /* | 3707 | /* |
| 3625 | * Calling ext4_end_aio_dio_nolock() to convert completed | 3708 | * Calling ext4_end_io_nolock() to convert completed |
| 3626 | * IO to written. | 3709 | * IO to written. |
| 3627 | * | 3710 | * |
| 3628 | * When ext4_sync_file() is called, run_queue() may already | 3711 | * When ext4_sync_file() is called, run_queue() may already |
| @@ -3635,20 +3718,23 @@ int flush_aio_dio_completed_IO(struct inode *inode) | |||
| 3635 | * avoid double converting from both fsync and background work | 3718 | * avoid double converting from both fsync and background work |
| 3636 | * queue work. | 3719 | * queue work. |
| 3637 | */ | 3720 | */ |
| 3638 | ret = ext4_end_aio_dio_nolock(io); | 3721 | spin_unlock_irqrestore(&ei->i_completed_io_lock, flags); |
| 3722 | ret = ext4_end_io_nolock(io); | ||
| 3723 | spin_lock_irqsave(&ei->i_completed_io_lock, flags); | ||
| 3639 | if (ret < 0) | 3724 | if (ret < 0) |
| 3640 | ret2 = ret; | 3725 | ret2 = ret; |
| 3641 | else | 3726 | else |
| 3642 | list_del_init(&io->list); | 3727 | list_del_init(&io->list); |
| 3643 | } | 3728 | } |
| 3729 | spin_unlock_irqrestore(&ei->i_completed_io_lock, flags); | ||
| 3644 | return (ret2 < 0) ? ret2 : 0; | 3730 | return (ret2 < 0) ? ret2 : 0; |
| 3645 | } | 3731 | } |
| 3646 | 3732 | ||
| 3647 | static ext4_io_end_t *ext4_init_io_end (struct inode *inode) | 3733 | static ext4_io_end_t *ext4_init_io_end (struct inode *inode, gfp_t flags) |
| 3648 | { | 3734 | { |
| 3649 | ext4_io_end_t *io = NULL; | 3735 | ext4_io_end_t *io = NULL; |
| 3650 | 3736 | ||
| 3651 | io = kmalloc(sizeof(*io), GFP_NOFS); | 3737 | io = kmalloc(sizeof(*io), flags); |
| 3652 | 3738 | ||
| 3653 | if (io) { | 3739 | if (io) { |
| 3654 | igrab(inode); | 3740 | igrab(inode); |
| @@ -3656,8 +3742,8 @@ static ext4_io_end_t *ext4_init_io_end (struct inode *inode) | |||
| 3656 | io->flag = 0; | 3742 | io->flag = 0; |
| 3657 | io->offset = 0; | 3743 | io->offset = 0; |
| 3658 | io->size = 0; | 3744 | io->size = 0; |
| 3659 | io->error = 0; | 3745 | io->page = NULL; |
| 3660 | INIT_WORK(&io->work, ext4_end_aio_dio_work); | 3746 | INIT_WORK(&io->work, ext4_end_io_work); |
| 3661 | INIT_LIST_HEAD(&io->list); | 3747 | INIT_LIST_HEAD(&io->list); |
| 3662 | } | 3748 | } |
| 3663 | 3749 | ||
| @@ -3669,6 +3755,8 @@ static void ext4_end_io_dio(struct kiocb *iocb, loff_t offset, | |||
| 3669 | { | 3755 | { |
| 3670 | ext4_io_end_t *io_end = iocb->private; | 3756 | ext4_io_end_t *io_end = iocb->private; |
| 3671 | struct workqueue_struct *wq; | 3757 | struct workqueue_struct *wq; |
| 3758 | unsigned long flags; | ||
| 3759 | struct ext4_inode_info *ei; | ||
| 3672 | 3760 | ||
| 3673 | /* if not async direct IO or dio with 0 bytes write, just return */ | 3761 | /* if not async direct IO or dio with 0 bytes write, just return */ |
| 3674 | if (!io_end || !size) | 3762 | if (!io_end || !size) |
| @@ -3680,7 +3768,7 @@ static void ext4_end_io_dio(struct kiocb *iocb, loff_t offset, | |||
| 3680 | size); | 3768 | size); |
| 3681 | 3769 | ||
| 3682 | /* if not aio dio with unwritten extents, just free io and return */ | 3770 | /* if not aio dio with unwritten extents, just free io and return */ |
| 3683 | if (io_end->flag != DIO_AIO_UNWRITTEN){ | 3771 | if (io_end->flag != EXT4_IO_UNWRITTEN){ |
| 3684 | ext4_free_io_end(io_end); | 3772 | ext4_free_io_end(io_end); |
| 3685 | iocb->private = NULL; | 3773 | iocb->private = NULL; |
| 3686 | return; | 3774 | return; |
| @@ -3688,16 +3776,85 @@ static void ext4_end_io_dio(struct kiocb *iocb, loff_t offset, | |||
| 3688 | 3776 | ||
| 3689 | io_end->offset = offset; | 3777 | io_end->offset = offset; |
| 3690 | io_end->size = size; | 3778 | io_end->size = size; |
| 3779 | io_end->flag = EXT4_IO_UNWRITTEN; | ||
| 3691 | wq = EXT4_SB(io_end->inode->i_sb)->dio_unwritten_wq; | 3780 | wq = EXT4_SB(io_end->inode->i_sb)->dio_unwritten_wq; |
| 3692 | 3781 | ||
| 3693 | /* queue the work to convert unwritten extents to written */ | 3782 | /* queue the work to convert unwritten extents to written */ |
| 3694 | queue_work(wq, &io_end->work); | 3783 | queue_work(wq, &io_end->work); |
| 3695 | 3784 | ||
| 3696 | /* Add the io_end to per-inode completed aio dio list*/ | 3785 | /* Add the io_end to per-inode completed aio dio list*/ |
| 3697 | list_add_tail(&io_end->list, | 3786 | ei = EXT4_I(io_end->inode); |
| 3698 | &EXT4_I(io_end->inode)->i_aio_dio_complete_list); | 3787 | spin_lock_irqsave(&ei->i_completed_io_lock, flags); |
| 3788 | list_add_tail(&io_end->list, &ei->i_completed_io_list); | ||
| 3789 | spin_unlock_irqrestore(&ei->i_completed_io_lock, flags); | ||
| 3699 | iocb->private = NULL; | 3790 | iocb->private = NULL; |
| 3700 | } | 3791 | } |
| 3792 | |||
| 3793 | static void ext4_end_io_buffer_write(struct buffer_head *bh, int uptodate) | ||
| 3794 | { | ||
| 3795 | ext4_io_end_t *io_end = bh->b_private; | ||
| 3796 | struct workqueue_struct *wq; | ||
| 3797 | struct inode *inode; | ||
| 3798 | unsigned long flags; | ||
| 3799 | |||
| 3800 | if (!test_clear_buffer_uninit(bh) || !io_end) | ||
| 3801 | goto out; | ||
| 3802 | |||
| 3803 | if (!(io_end->inode->i_sb->s_flags & MS_ACTIVE)) { | ||
| 3804 | printk("sb umounted, discard end_io request for inode %lu\n", | ||
| 3805 | io_end->inode->i_ino); | ||
| 3806 | ext4_free_io_end(io_end); | ||
| 3807 | goto out; | ||
| 3808 | } | ||
| 3809 | |||
| 3810 | io_end->flag = EXT4_IO_UNWRITTEN; | ||
| 3811 | inode = io_end->inode; | ||
| 3812 | |||
| 3813 | /* Add the io_end to per-inode completed io list*/ | ||
| 3814 | spin_lock_irqsave(&EXT4_I(inode)->i_completed_io_lock, flags); | ||
| 3815 | list_add_tail(&io_end->list, &EXT4_I(inode)->i_completed_io_list); | ||
| 3816 | spin_unlock_irqrestore(&EXT4_I(inode)->i_completed_io_lock, flags); | ||
| 3817 | |||
| 3818 | wq = EXT4_SB(inode->i_sb)->dio_unwritten_wq; | ||
| 3819 | /* queue the work to convert unwritten extents to written */ | ||
| 3820 | queue_work(wq, &io_end->work); | ||
| 3821 | out: | ||
| 3822 | bh->b_private = NULL; | ||
| 3823 | bh->b_end_io = NULL; | ||
| 3824 | clear_buffer_uninit(bh); | ||
| 3825 | end_buffer_async_write(bh, uptodate); | ||
| 3826 | } | ||
| 3827 | |||
| 3828 | static int ext4_set_bh_endio(struct buffer_head *bh, struct inode *inode) | ||
| 3829 | { | ||
| 3830 | ext4_io_end_t *io_end; | ||
| 3831 | struct page *page = bh->b_page; | ||
| 3832 | loff_t offset = (sector_t)page->index << PAGE_CACHE_SHIFT; | ||
| 3833 | size_t size = bh->b_size; | ||
| 3834 | |||
| 3835 | retry: | ||
| 3836 | io_end = ext4_init_io_end(inode, GFP_ATOMIC); | ||
| 3837 | if (!io_end) { | ||
| 3838 | if (printk_ratelimit()) | ||
| 3839 | printk(KERN_WARNING "%s: allocation fail\n", __func__); | ||
| 3840 | schedule(); | ||
| 3841 | goto retry; | ||
| 3842 | } | ||
| 3843 | io_end->offset = offset; | ||
| 3844 | io_end->size = size; | ||
| 3845 | /* | ||
| 3846 | * We need to hold a reference to the page to make sure it | ||
| 3847 | * doesn't get evicted before ext4_end_io_work() has a chance | ||
| 3848 | * to convert the extent from written to unwritten. | ||
| 3849 | */ | ||
| 3850 | io_end->page = page; | ||
| 3851 | get_page(io_end->page); | ||
| 3852 | |||
| 3853 | bh->b_private = io_end; | ||
| 3854 | bh->b_end_io = ext4_end_io_buffer_write; | ||
| 3855 | return 0; | ||
| 3856 | } | ||
| 3857 | |||
| 3701 | /* | 3858 | /* |
| 3702 | * For ext4 extent files, ext4 will do direct-io write to holes, | 3859 | * For ext4 extent files, ext4 will do direct-io write to holes, |
| 3703 | * preallocated extents, and those write extend the file, no need to | 3860 | * preallocated extents, and those write extend the file, no need to |
| @@ -3751,7 +3908,7 @@ static ssize_t ext4_ext_direct_IO(int rw, struct kiocb *iocb, | |||
| 3751 | iocb->private = NULL; | 3908 | iocb->private = NULL; |
| 3752 | EXT4_I(inode)->cur_aio_dio = NULL; | 3909 | EXT4_I(inode)->cur_aio_dio = NULL; |
| 3753 | if (!is_sync_kiocb(iocb)) { | 3910 | if (!is_sync_kiocb(iocb)) { |
| 3754 | iocb->private = ext4_init_io_end(inode); | 3911 | iocb->private = ext4_init_io_end(inode, GFP_NOFS); |
| 3755 | if (!iocb->private) | 3912 | if (!iocb->private) |
| 3756 | return -ENOMEM; | 3913 | return -ENOMEM; |
| 3757 | /* | 3914 | /* |
| @@ -3767,7 +3924,7 @@ static ssize_t ext4_ext_direct_IO(int rw, struct kiocb *iocb, | |||
| 3767 | ret = blockdev_direct_IO(rw, iocb, inode, | 3924 | ret = blockdev_direct_IO(rw, iocb, inode, |
| 3768 | inode->i_sb->s_bdev, iov, | 3925 | inode->i_sb->s_bdev, iov, |
| 3769 | offset, nr_segs, | 3926 | offset, nr_segs, |
| 3770 | ext4_get_block_dio_write, | 3927 | ext4_get_block_write, |
| 3771 | ext4_end_io_dio); | 3928 | ext4_end_io_dio); |
| 3772 | if (iocb->private) | 3929 | if (iocb->private) |
| 3773 | EXT4_I(inode)->cur_aio_dio = NULL; | 3930 | EXT4_I(inode)->cur_aio_dio = NULL; |
| @@ -3788,8 +3945,8 @@ static ssize_t ext4_ext_direct_IO(int rw, struct kiocb *iocb, | |||
| 3788 | if (ret != -EIOCBQUEUED && ret <= 0 && iocb->private) { | 3945 | if (ret != -EIOCBQUEUED && ret <= 0 && iocb->private) { |
| 3789 | ext4_free_io_end(iocb->private); | 3946 | ext4_free_io_end(iocb->private); |
| 3790 | iocb->private = NULL; | 3947 | iocb->private = NULL; |
| 3791 | } else if (ret > 0 && (EXT4_I(inode)->i_state & | 3948 | } else if (ret > 0 && ext4_test_inode_state(inode, |
| 3792 | EXT4_STATE_DIO_UNWRITTEN)) { | 3949 | EXT4_STATE_DIO_UNWRITTEN)) { |
| 3793 | int err; | 3950 | int err; |
| 3794 | /* | 3951 | /* |
| 3795 | * for non AIO case, since the IO is already | 3952 | * for non AIO case, since the IO is already |
| @@ -3799,7 +3956,7 @@ static ssize_t ext4_ext_direct_IO(int rw, struct kiocb *iocb, | |||
| 3799 | offset, ret); | 3956 | offset, ret); |
| 3800 | if (err < 0) | 3957 | if (err < 0) |
| 3801 | ret = err; | 3958 | ret = err; |
| 3802 | EXT4_I(inode)->i_state &= ~EXT4_STATE_DIO_UNWRITTEN; | 3959 | ext4_clear_inode_state(inode, EXT4_STATE_DIO_UNWRITTEN); |
| 3803 | } | 3960 | } |
| 3804 | return ret; | 3961 | return ret; |
| 3805 | } | 3962 | } |
| @@ -4130,18 +4287,27 @@ no_top: | |||
| 4130 | * We release `count' blocks on disk, but (last - first) may be greater | 4287 | * We release `count' blocks on disk, but (last - first) may be greater |
| 4131 | * than `count' because there can be holes in there. | 4288 | * than `count' because there can be holes in there. |
| 4132 | */ | 4289 | */ |
| 4133 | static void ext4_clear_blocks(handle_t *handle, struct inode *inode, | 4290 | static int ext4_clear_blocks(handle_t *handle, struct inode *inode, |
| 4134 | struct buffer_head *bh, | 4291 | struct buffer_head *bh, |
| 4135 | ext4_fsblk_t block_to_free, | 4292 | ext4_fsblk_t block_to_free, |
| 4136 | unsigned long count, __le32 *first, | 4293 | unsigned long count, __le32 *first, |
| 4137 | __le32 *last) | 4294 | __le32 *last) |
| 4138 | { | 4295 | { |
| 4139 | __le32 *p; | 4296 | __le32 *p; |
| 4140 | int flags = EXT4_FREE_BLOCKS_FORGET; | 4297 | int flags = EXT4_FREE_BLOCKS_FORGET | EXT4_FREE_BLOCKS_VALIDATED; |
| 4141 | 4298 | ||
| 4142 | if (S_ISDIR(inode->i_mode) || S_ISLNK(inode->i_mode)) | 4299 | if (S_ISDIR(inode->i_mode) || S_ISLNK(inode->i_mode)) |
| 4143 | flags |= EXT4_FREE_BLOCKS_METADATA; | 4300 | flags |= EXT4_FREE_BLOCKS_METADATA; |
| 4144 | 4301 | ||
| 4302 | if (!ext4_data_block_valid(EXT4_SB(inode->i_sb), block_to_free, | ||
| 4303 | count)) { | ||
| 4304 | ext4_error(inode->i_sb, "inode #%lu: " | ||
| 4305 | "attempt to clear blocks %llu len %lu, invalid", | ||
| 4306 | inode->i_ino, (unsigned long long) block_to_free, | ||
| 4307 | count); | ||
| 4308 | return 1; | ||
| 4309 | } | ||
| 4310 | |||
| 4145 | if (try_to_extend_transaction(handle, inode)) { | 4311 | if (try_to_extend_transaction(handle, inode)) { |
| 4146 | if (bh) { | 4312 | if (bh) { |
| 4147 | BUFFER_TRACE(bh, "call ext4_handle_dirty_metadata"); | 4313 | BUFFER_TRACE(bh, "call ext4_handle_dirty_metadata"); |
| @@ -4160,6 +4326,7 @@ static void ext4_clear_blocks(handle_t *handle, struct inode *inode, | |||
| 4160 | *p = 0; | 4326 | *p = 0; |
| 4161 | 4327 | ||
| 4162 | ext4_free_blocks(handle, inode, 0, block_to_free, count, flags); | 4328 | ext4_free_blocks(handle, inode, 0, block_to_free, count, flags); |
| 4329 | return 0; | ||
| 4163 | } | 4330 | } |
| 4164 | 4331 | ||
| 4165 | /** | 4332 | /** |
| @@ -4215,9 +4382,10 @@ static void ext4_free_data(handle_t *handle, struct inode *inode, | |||
| 4215 | } else if (nr == block_to_free + count) { | 4382 | } else if (nr == block_to_free + count) { |
| 4216 | count++; | 4383 | count++; |
| 4217 | } else { | 4384 | } else { |
| 4218 | ext4_clear_blocks(handle, inode, this_bh, | 4385 | if (ext4_clear_blocks(handle, inode, this_bh, |
| 4219 | block_to_free, | 4386 | block_to_free, count, |
| 4220 | count, block_to_free_p, p); | 4387 | block_to_free_p, p)) |
| 4388 | break; | ||
| 4221 | block_to_free = nr; | 4389 | block_to_free = nr; |
| 4222 | block_to_free_p = p; | 4390 | block_to_free_p = p; |
| 4223 | count = 1; | 4391 | count = 1; |
| @@ -4241,7 +4409,7 @@ static void ext4_free_data(handle_t *handle, struct inode *inode, | |||
| 4241 | if ((EXT4_JOURNAL(inode) == NULL) || bh2jh(this_bh)) | 4409 | if ((EXT4_JOURNAL(inode) == NULL) || bh2jh(this_bh)) |
| 4242 | ext4_handle_dirty_metadata(handle, inode, this_bh); | 4410 | ext4_handle_dirty_metadata(handle, inode, this_bh); |
| 4243 | else | 4411 | else |
| 4244 | ext4_error(inode->i_sb, __func__, | 4412 | ext4_error(inode->i_sb, |
| 4245 | "circular indirect block detected, " | 4413 | "circular indirect block detected, " |
| 4246 | "inode=%lu, block=%llu", | 4414 | "inode=%lu, block=%llu", |
| 4247 | inode->i_ino, | 4415 | inode->i_ino, |
| @@ -4281,6 +4449,16 @@ static void ext4_free_branches(handle_t *handle, struct inode *inode, | |||
| 4281 | if (!nr) | 4449 | if (!nr) |
| 4282 | continue; /* A hole */ | 4450 | continue; /* A hole */ |
| 4283 | 4451 | ||
| 4452 | if (!ext4_data_block_valid(EXT4_SB(inode->i_sb), | ||
| 4453 | nr, 1)) { | ||
| 4454 | ext4_error(inode->i_sb, | ||
| 4455 | "indirect mapped block in inode " | ||
| 4456 | "#%lu invalid (level %d, blk #%lu)", | ||
| 4457 | inode->i_ino, depth, | ||
| 4458 | (unsigned long) nr); | ||
| 4459 | break; | ||
| 4460 | } | ||
| 4461 | |||
| 4284 | /* Go read the buffer for the next level down */ | 4462 | /* Go read the buffer for the next level down */ |
| 4285 | bh = sb_bread(inode->i_sb, nr); | 4463 | bh = sb_bread(inode->i_sb, nr); |
| 4286 | 4464 | ||
| @@ -4289,7 +4467,7 @@ static void ext4_free_branches(handle_t *handle, struct inode *inode, | |||
| 4289 | * (should be rare). | 4467 | * (should be rare). |
| 4290 | */ | 4468 | */ |
| 4291 | if (!bh) { | 4469 | if (!bh) { |
| 4292 | ext4_error(inode->i_sb, "ext4_free_branches", | 4470 | ext4_error(inode->i_sb, |
| 4293 | "Read failure, inode=%lu, block=%llu", | 4471 | "Read failure, inode=%lu, block=%llu", |
| 4294 | inode->i_ino, nr); | 4472 | inode->i_ino, nr); |
| 4295 | continue; | 4473 | continue; |
| @@ -4433,8 +4611,10 @@ void ext4_truncate(struct inode *inode) | |||
| 4433 | if (!ext4_can_truncate(inode)) | 4611 | if (!ext4_can_truncate(inode)) |
| 4434 | return; | 4612 | return; |
| 4435 | 4613 | ||
| 4614 | EXT4_I(inode)->i_flags &= ~EXT4_EOFBLOCKS_FL; | ||
| 4615 | |||
| 4436 | if (inode->i_size == 0 && !test_opt(inode->i_sb, NO_AUTO_DA_ALLOC)) | 4616 | if (inode->i_size == 0 && !test_opt(inode->i_sb, NO_AUTO_DA_ALLOC)) |
| 4437 | ei->i_state |= EXT4_STATE_DA_ALLOC_CLOSE; | 4617 | ext4_set_inode_state(inode, EXT4_STATE_DA_ALLOC_CLOSE); |
| 4438 | 4618 | ||
| 4439 | if (EXT4_I(inode)->i_flags & EXT4_EXTENTS_FL) { | 4619 | if (EXT4_I(inode)->i_flags & EXT4_EXTENTS_FL) { |
| 4440 | ext4_ext_truncate(inode); | 4620 | ext4_ext_truncate(inode); |
| @@ -4604,9 +4784,8 @@ static int __ext4_get_inode_loc(struct inode *inode, | |||
| 4604 | 4784 | ||
| 4605 | bh = sb_getblk(sb, block); | 4785 | bh = sb_getblk(sb, block); |
| 4606 | if (!bh) { | 4786 | if (!bh) { |
| 4607 | ext4_error(sb, "ext4_get_inode_loc", "unable to read " | 4787 | ext4_error(sb, "unable to read inode block - " |
| 4608 | "inode block - inode=%lu, block=%llu", | 4788 | "inode=%lu, block=%llu", inode->i_ino, block); |
| 4609 | inode->i_ino, block); | ||
| 4610 | return -EIO; | 4789 | return -EIO; |
| 4611 | } | 4790 | } |
| 4612 | if (!buffer_uptodate(bh)) { | 4791 | if (!buffer_uptodate(bh)) { |
| @@ -4704,9 +4883,8 @@ make_io: | |||
| 4704 | submit_bh(READ_META, bh); | 4883 | submit_bh(READ_META, bh); |
| 4705 | wait_on_buffer(bh); | 4884 | wait_on_buffer(bh); |
| 4706 | if (!buffer_uptodate(bh)) { | 4885 | if (!buffer_uptodate(bh)) { |
| 4707 | ext4_error(sb, __func__, | 4886 | ext4_error(sb, "unable to read inode block - inode=%lu," |
| 4708 | "unable to read inode block - inode=%lu, " | 4887 | " block=%llu", inode->i_ino, block); |
| 4709 | "block=%llu", inode->i_ino, block); | ||
| 4710 | brelse(bh); | 4888 | brelse(bh); |
| 4711 | return -EIO; | 4889 | return -EIO; |
| 4712 | } | 4890 | } |
| @@ -4720,7 +4898,7 @@ int ext4_get_inode_loc(struct inode *inode, struct ext4_iloc *iloc) | |||
| 4720 | { | 4898 | { |
| 4721 | /* We have all inode data except xattrs in memory here. */ | 4899 | /* We have all inode data except xattrs in memory here. */ |
| 4722 | return __ext4_get_inode_loc(inode, iloc, | 4900 | return __ext4_get_inode_loc(inode, iloc, |
| 4723 | !(EXT4_I(inode)->i_state & EXT4_STATE_XATTR)); | 4901 | !ext4_test_inode_state(inode, EXT4_STATE_XATTR)); |
| 4724 | } | 4902 | } |
| 4725 | 4903 | ||
| 4726 | void ext4_set_inode_flags(struct inode *inode) | 4904 | void ext4_set_inode_flags(struct inode *inode) |
| @@ -4814,7 +4992,7 @@ struct inode *ext4_iget(struct super_block *sb, unsigned long ino) | |||
| 4814 | } | 4992 | } |
| 4815 | inode->i_nlink = le16_to_cpu(raw_inode->i_links_count); | 4993 | inode->i_nlink = le16_to_cpu(raw_inode->i_links_count); |
| 4816 | 4994 | ||
| 4817 | ei->i_state = 0; | 4995 | ei->i_state_flags = 0; |
| 4818 | ei->i_dir_start_lookup = 0; | 4996 | ei->i_dir_start_lookup = 0; |
| 4819 | ei->i_dtime = le32_to_cpu(raw_inode->i_dtime); | 4997 | ei->i_dtime = le32_to_cpu(raw_inode->i_dtime); |
| 4820 | /* We now have enough fields to check if the inode was active or not. | 4998 | /* We now have enough fields to check if the inode was active or not. |
| @@ -4897,7 +5075,7 @@ struct inode *ext4_iget(struct super_block *sb, unsigned long ino) | |||
| 4897 | EXT4_GOOD_OLD_INODE_SIZE + | 5075 | EXT4_GOOD_OLD_INODE_SIZE + |
| 4898 | ei->i_extra_isize; | 5076 | ei->i_extra_isize; |
| 4899 | if (*magic == cpu_to_le32(EXT4_XATTR_MAGIC)) | 5077 | if (*magic == cpu_to_le32(EXT4_XATTR_MAGIC)) |
| 4900 | ei->i_state |= EXT4_STATE_XATTR; | 5078 | ext4_set_inode_state(inode, EXT4_STATE_XATTR); |
| 4901 | } | 5079 | } |
| 4902 | } else | 5080 | } else |
| 4903 | ei->i_extra_isize = 0; | 5081 | ei->i_extra_isize = 0; |
| @@ -4917,8 +5095,7 @@ struct inode *ext4_iget(struct super_block *sb, unsigned long ino) | |||
| 4917 | ret = 0; | 5095 | ret = 0; |
| 4918 | if (ei->i_file_acl && | 5096 | if (ei->i_file_acl && |
| 4919 | !ext4_data_block_valid(EXT4_SB(sb), ei->i_file_acl, 1)) { | 5097 | !ext4_data_block_valid(EXT4_SB(sb), ei->i_file_acl, 1)) { |
| 4920 | ext4_error(sb, __func__, | 5098 | ext4_error(sb, "bad extended attribute block %llu inode #%lu", |
| 4921 | "bad extended attribute block %llu in inode #%lu", | ||
| 4922 | ei->i_file_acl, inode->i_ino); | 5099 | ei->i_file_acl, inode->i_ino); |
| 4923 | ret = -EIO; | 5100 | ret = -EIO; |
| 4924 | goto bad_inode; | 5101 | goto bad_inode; |
| @@ -4964,8 +5141,7 @@ struct inode *ext4_iget(struct super_block *sb, unsigned long ino) | |||
| 4964 | new_decode_dev(le32_to_cpu(raw_inode->i_block[1]))); | 5141 | new_decode_dev(le32_to_cpu(raw_inode->i_block[1]))); |
| 4965 | } else { | 5142 | } else { |
| 4966 | ret = -EIO; | 5143 | ret = -EIO; |
| 4967 | ext4_error(inode->i_sb, __func__, | 5144 | ext4_error(inode->i_sb, "bogus i_mode (%o) for inode=%lu", |
| 4968 | "bogus i_mode (%o) for inode=%lu", | ||
| 4969 | inode->i_mode, inode->i_ino); | 5145 | inode->i_mode, inode->i_ino); |
| 4970 | goto bad_inode; | 5146 | goto bad_inode; |
| 4971 | } | 5147 | } |
| @@ -5037,7 +5213,7 @@ static int ext4_do_update_inode(handle_t *handle, | |||
| 5037 | 5213 | ||
| 5038 | /* For fields not not tracking in the in-memory inode, | 5214 | /* For fields not not tracking in the in-memory inode, |
| 5039 | * initialise them to zero for new inodes. */ | 5215 | * initialise them to zero for new inodes. */ |
| 5040 | if (ei->i_state & EXT4_STATE_NEW) | 5216 | if (ext4_test_inode_state(inode, EXT4_STATE_NEW)) |
| 5041 | memset(raw_inode, 0, EXT4_SB(inode->i_sb)->s_inode_size); | 5217 | memset(raw_inode, 0, EXT4_SB(inode->i_sb)->s_inode_size); |
| 5042 | 5218 | ||
| 5043 | ext4_get_inode_flags(ei); | 5219 | ext4_get_inode_flags(ei); |
| @@ -5101,7 +5277,7 @@ static int ext4_do_update_inode(handle_t *handle, | |||
| 5101 | EXT4_FEATURE_RO_COMPAT_LARGE_FILE); | 5277 | EXT4_FEATURE_RO_COMPAT_LARGE_FILE); |
| 5102 | sb->s_dirt = 1; | 5278 | sb->s_dirt = 1; |
| 5103 | ext4_handle_sync(handle); | 5279 | ext4_handle_sync(handle); |
| 5104 | err = ext4_handle_dirty_metadata(handle, inode, | 5280 | err = ext4_handle_dirty_metadata(handle, NULL, |
| 5105 | EXT4_SB(sb)->s_sbh); | 5281 | EXT4_SB(sb)->s_sbh); |
| 5106 | } | 5282 | } |
| 5107 | } | 5283 | } |
| @@ -5130,10 +5306,10 @@ static int ext4_do_update_inode(handle_t *handle, | |||
| 5130 | } | 5306 | } |
| 5131 | 5307 | ||
| 5132 | BUFFER_TRACE(bh, "call ext4_handle_dirty_metadata"); | 5308 | BUFFER_TRACE(bh, "call ext4_handle_dirty_metadata"); |
| 5133 | rc = ext4_handle_dirty_metadata(handle, inode, bh); | 5309 | rc = ext4_handle_dirty_metadata(handle, NULL, bh); |
| 5134 | if (!err) | 5310 | if (!err) |
| 5135 | err = rc; | 5311 | err = rc; |
| 5136 | ei->i_state &= ~EXT4_STATE_NEW; | 5312 | ext4_clear_inode_state(inode, EXT4_STATE_NEW); |
| 5137 | 5313 | ||
| 5138 | ext4_update_inode_fsync_trans(handle, inode, 0); | 5314 | ext4_update_inode_fsync_trans(handle, inode, 0); |
| 5139 | out_brelse: | 5315 | out_brelse: |
| @@ -5177,7 +5353,7 @@ out_brelse: | |||
| 5177 | * `stuff()' is running, and the new i_size will be lost. Plus the inode | 5353 | * `stuff()' is running, and the new i_size will be lost. Plus the inode |
| 5178 | * will no longer be on the superblock's dirty inode list. | 5354 | * will no longer be on the superblock's dirty inode list. |
| 5179 | */ | 5355 | */ |
| 5180 | int ext4_write_inode(struct inode *inode, int wait) | 5356 | int ext4_write_inode(struct inode *inode, struct writeback_control *wbc) |
| 5181 | { | 5357 | { |
| 5182 | int err; | 5358 | int err; |
| 5183 | 5359 | ||
| @@ -5191,7 +5367,7 @@ int ext4_write_inode(struct inode *inode, int wait) | |||
| 5191 | return -EIO; | 5367 | return -EIO; |
| 5192 | } | 5368 | } |
| 5193 | 5369 | ||
| 5194 | if (!wait) | 5370 | if (wbc->sync_mode != WB_SYNC_ALL) |
| 5195 | return 0; | 5371 | return 0; |
| 5196 | 5372 | ||
| 5197 | err = ext4_force_commit(inode->i_sb); | 5373 | err = ext4_force_commit(inode->i_sb); |
| @@ -5201,13 +5377,11 @@ int ext4_write_inode(struct inode *inode, int wait) | |||
| 5201 | err = ext4_get_inode_loc(inode, &iloc); | 5377 | err = ext4_get_inode_loc(inode, &iloc); |
| 5202 | if (err) | 5378 | if (err) |
| 5203 | return err; | 5379 | return err; |
| 5204 | if (wait) | 5380 | if (wbc->sync_mode == WB_SYNC_ALL) |
| 5205 | sync_dirty_buffer(iloc.bh); | 5381 | sync_dirty_buffer(iloc.bh); |
| 5206 | if (buffer_req(iloc.bh) && !buffer_uptodate(iloc.bh)) { | 5382 | if (buffer_req(iloc.bh) && !buffer_uptodate(iloc.bh)) { |
| 5207 | ext4_error(inode->i_sb, __func__, | 5383 | ext4_error(inode->i_sb, "IO error syncing inode, " |
| 5208 | "IO error syncing inode, " | 5384 | "inode=%lu, block=%llu", inode->i_ino, |
| 5209 | "inode=%lu, block=%llu", | ||
| 5210 | inode->i_ino, | ||
| 5211 | (unsigned long long)iloc.bh->b_blocknr); | 5385 | (unsigned long long)iloc.bh->b_blocknr); |
| 5212 | err = -EIO; | 5386 | err = -EIO; |
| 5213 | } | 5387 | } |
| @@ -5249,6 +5423,8 @@ int ext4_setattr(struct dentry *dentry, struct iattr *attr) | |||
| 5249 | if (error) | 5423 | if (error) |
| 5250 | return error; | 5424 | return error; |
| 5251 | 5425 | ||
| 5426 | if (ia_valid & ATTR_SIZE) | ||
| 5427 | dquot_initialize(inode); | ||
| 5252 | if ((ia_valid & ATTR_UID && attr->ia_uid != inode->i_uid) || | 5428 | if ((ia_valid & ATTR_UID && attr->ia_uid != inode->i_uid) || |
| 5253 | (ia_valid & ATTR_GID && attr->ia_gid != inode->i_gid)) { | 5429 | (ia_valid & ATTR_GID && attr->ia_gid != inode->i_gid)) { |
| 5254 | handle_t *handle; | 5430 | handle_t *handle; |
| @@ -5261,7 +5437,7 @@ int ext4_setattr(struct dentry *dentry, struct iattr *attr) | |||
| 5261 | error = PTR_ERR(handle); | 5437 | error = PTR_ERR(handle); |
| 5262 | goto err_out; | 5438 | goto err_out; |
| 5263 | } | 5439 | } |
| 5264 | error = vfs_dq_transfer(inode, attr) ? -EDQUOT : 0; | 5440 | error = dquot_transfer(inode, attr); |
| 5265 | if (error) { | 5441 | if (error) { |
| 5266 | ext4_journal_stop(handle); | 5442 | ext4_journal_stop(handle); |
| 5267 | return error; | 5443 | return error; |
| @@ -5288,7 +5464,9 @@ int ext4_setattr(struct dentry *dentry, struct iattr *attr) | |||
| 5288 | } | 5464 | } |
| 5289 | 5465 | ||
| 5290 | if (S_ISREG(inode->i_mode) && | 5466 | if (S_ISREG(inode->i_mode) && |
| 5291 | attr->ia_valid & ATTR_SIZE && attr->ia_size < inode->i_size) { | 5467 | attr->ia_valid & ATTR_SIZE && |
| 5468 | (attr->ia_size < inode->i_size || | ||
| 5469 | (EXT4_I(inode)->i_flags & EXT4_EOFBLOCKS_FL))) { | ||
| 5292 | handle_t *handle; | 5470 | handle_t *handle; |
| 5293 | 5471 | ||
| 5294 | handle = ext4_journal_start(inode, 3); | 5472 | handle = ext4_journal_start(inode, 3); |
| @@ -5319,6 +5497,9 @@ int ext4_setattr(struct dentry *dentry, struct iattr *attr) | |||
| 5319 | goto err_out; | 5497 | goto err_out; |
| 5320 | } | 5498 | } |
| 5321 | } | 5499 | } |
| 5500 | /* ext4_truncate will clear the flag */ | ||
| 5501 | if ((EXT4_I(inode)->i_flags & EXT4_EOFBLOCKS_FL)) | ||
| 5502 | ext4_truncate(inode); | ||
| 5322 | } | 5503 | } |
| 5323 | 5504 | ||
| 5324 | rc = inode_setattr(inode, attr); | 5505 | rc = inode_setattr(inode, attr); |
| @@ -5557,8 +5738,8 @@ static int ext4_expand_extra_isize(struct inode *inode, | |||
| 5557 | entry = IFIRST(header); | 5738 | entry = IFIRST(header); |
| 5558 | 5739 | ||
| 5559 | /* No extended attributes present */ | 5740 | /* No extended attributes present */ |
| 5560 | if (!(EXT4_I(inode)->i_state & EXT4_STATE_XATTR) || | 5741 | if (!ext4_test_inode_state(inode, EXT4_STATE_XATTR) || |
| 5561 | header->h_magic != cpu_to_le32(EXT4_XATTR_MAGIC)) { | 5742 | header->h_magic != cpu_to_le32(EXT4_XATTR_MAGIC)) { |
| 5562 | memset((void *)raw_inode + EXT4_GOOD_OLD_INODE_SIZE, 0, | 5743 | memset((void *)raw_inode + EXT4_GOOD_OLD_INODE_SIZE, 0, |
| 5563 | new_extra_isize); | 5744 | new_extra_isize); |
| 5564 | EXT4_I(inode)->i_extra_isize = new_extra_isize; | 5745 | EXT4_I(inode)->i_extra_isize = new_extra_isize; |
| @@ -5602,7 +5783,7 @@ int ext4_mark_inode_dirty(handle_t *handle, struct inode *inode) | |||
| 5602 | err = ext4_reserve_inode_write(handle, inode, &iloc); | 5783 | err = ext4_reserve_inode_write(handle, inode, &iloc); |
| 5603 | if (ext4_handle_valid(handle) && | 5784 | if (ext4_handle_valid(handle) && |
| 5604 | EXT4_I(inode)->i_extra_isize < sbi->s_want_extra_isize && | 5785 | EXT4_I(inode)->i_extra_isize < sbi->s_want_extra_isize && |
| 5605 | !(EXT4_I(inode)->i_state & EXT4_STATE_NO_EXPAND)) { | 5786 | !ext4_test_inode_state(inode, EXT4_STATE_NO_EXPAND)) { |
| 5606 | /* | 5787 | /* |
| 5607 | * We need extra buffer credits since we may write into EA block | 5788 | * We need extra buffer credits since we may write into EA block |
| 5608 | * with this same handle. If journal_extend fails, then it will | 5789 | * with this same handle. If journal_extend fails, then it will |
| @@ -5616,10 +5797,11 @@ int ext4_mark_inode_dirty(handle_t *handle, struct inode *inode) | |||
| 5616 | sbi->s_want_extra_isize, | 5797 | sbi->s_want_extra_isize, |
| 5617 | iloc, handle); | 5798 | iloc, handle); |
| 5618 | if (ret) { | 5799 | if (ret) { |
| 5619 | EXT4_I(inode)->i_state |= EXT4_STATE_NO_EXPAND; | 5800 | ext4_set_inode_state(inode, |
| 5801 | EXT4_STATE_NO_EXPAND); | ||
| 5620 | if (mnt_count != | 5802 | if (mnt_count != |
| 5621 | le16_to_cpu(sbi->s_es->s_mnt_count)) { | 5803 | le16_to_cpu(sbi->s_es->s_mnt_count)) { |
| 5622 | ext4_warning(inode->i_sb, __func__, | 5804 | ext4_warning(inode->i_sb, |
| 5623 | "Unable to expand inode %lu. Delete" | 5805 | "Unable to expand inode %lu. Delete" |
| 5624 | " some EAs or run e2fsck.", | 5806 | " some EAs or run e2fsck.", |
| 5625 | inode->i_ino); | 5807 | inode->i_ino); |
| @@ -5641,7 +5823,7 @@ int ext4_mark_inode_dirty(handle_t *handle, struct inode *inode) | |||
| 5641 | * i_size has been changed by generic_commit_write() and we thus need | 5823 | * i_size has been changed by generic_commit_write() and we thus need |
| 5642 | * to include the updated inode in the current transaction. | 5824 | * to include the updated inode in the current transaction. |
| 5643 | * | 5825 | * |
| 5644 | * Also, vfs_dq_alloc_block() will always dirty the inode when blocks | 5826 | * Also, dquot_alloc_block() will always dirty the inode when blocks |
| 5645 | * are allocated to the file. | 5827 | * are allocated to the file. |
| 5646 | * | 5828 | * |
| 5647 | * If the inode is marked synchronous, we don't honour that here - doing | 5829 | * If the inode is marked synchronous, we don't honour that here - doing |
| @@ -5683,7 +5865,7 @@ static int ext4_pin_inode(handle_t *handle, struct inode *inode) | |||
| 5683 | err = jbd2_journal_get_write_access(handle, iloc.bh); | 5865 | err = jbd2_journal_get_write_access(handle, iloc.bh); |
| 5684 | if (!err) | 5866 | if (!err) |
| 5685 | err = ext4_handle_dirty_metadata(handle, | 5867 | err = ext4_handle_dirty_metadata(handle, |
| 5686 | inode, | 5868 | NULL, |
| 5687 | iloc.bh); | 5869 | iloc.bh); |
| 5688 | brelse(iloc.bh); | 5870 | brelse(iloc.bh); |
| 5689 | } | 5871 | } |
diff --git a/fs/ext4/ioctl.c b/fs/ext4/ioctl.c index b63d193126db..016d0249294f 100644 --- a/fs/ext4/ioctl.c +++ b/fs/ext4/ioctl.c | |||
| @@ -92,6 +92,15 @@ long ext4_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) | |||
| 92 | flags &= ~EXT4_EXTENTS_FL; | 92 | flags &= ~EXT4_EXTENTS_FL; |
| 93 | } | 93 | } |
| 94 | 94 | ||
| 95 | if (flags & EXT4_EOFBLOCKS_FL) { | ||
| 96 | /* we don't support adding EOFBLOCKS flag */ | ||
| 97 | if (!(oldflags & EXT4_EOFBLOCKS_FL)) { | ||
| 98 | err = -EOPNOTSUPP; | ||
| 99 | goto flags_out; | ||
| 100 | } | ||
| 101 | } else if (oldflags & EXT4_EOFBLOCKS_FL) | ||
| 102 | ext4_truncate(inode); | ||
| 103 | |||
| 95 | handle = ext4_journal_start(inode, 1); | 104 | handle = ext4_journal_start(inode, 1); |
| 96 | if (IS_ERR(handle)) { | 105 | if (IS_ERR(handle)) { |
| 97 | err = PTR_ERR(handle); | 106 | err = PTR_ERR(handle); |
| @@ -249,7 +258,8 @@ setversion_out: | |||
| 249 | if (me.moved_len > 0) | 258 | if (me.moved_len > 0) |
| 250 | file_remove_suid(donor_filp); | 259 | file_remove_suid(donor_filp); |
| 251 | 260 | ||
| 252 | if (copy_to_user((struct move_extent *)arg, &me, sizeof(me))) | 261 | if (copy_to_user((struct move_extent __user *)arg, |
| 262 | &me, sizeof(me))) | ||
| 253 | err = -EFAULT; | 263 | err = -EFAULT; |
| 254 | mext_out: | 264 | mext_out: |
| 255 | fput(donor_filp); | 265 | fput(donor_filp); |
diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c index d34afad3e137..506713a2ebd8 100644 --- a/fs/ext4/mballoc.c +++ b/fs/ext4/mballoc.c | |||
| @@ -441,10 +441,9 @@ static void mb_free_blocks_double(struct inode *inode, struct ext4_buddy *e4b, | |||
| 441 | for (i = 0; i < count; i++) { | 441 | for (i = 0; i < count; i++) { |
| 442 | if (!mb_test_bit(first + i, e4b->bd_info->bb_bitmap)) { | 442 | if (!mb_test_bit(first + i, e4b->bd_info->bb_bitmap)) { |
| 443 | ext4_fsblk_t blocknr; | 443 | ext4_fsblk_t blocknr; |
| 444 | blocknr = e4b->bd_group * EXT4_BLOCKS_PER_GROUP(sb); | 444 | |
| 445 | blocknr = ext4_group_first_block_no(sb, e4b->bd_group); | ||
| 445 | blocknr += first + i; | 446 | blocknr += first + i; |
| 446 | blocknr += | ||
| 447 | le32_to_cpu(EXT4_SB(sb)->s_es->s_first_data_block); | ||
| 448 | ext4_grp_locked_error(sb, e4b->bd_group, | 447 | ext4_grp_locked_error(sb, e4b->bd_group, |
| 449 | __func__, "double-free of inode" | 448 | __func__, "double-free of inode" |
| 450 | " %lu's block %llu(bit %u in group %u)", | 449 | " %lu's block %llu(bit %u in group %u)", |
| @@ -1255,10 +1254,9 @@ static void mb_free_blocks(struct inode *inode, struct ext4_buddy *e4b, | |||
| 1255 | 1254 | ||
| 1256 | if (!mb_test_bit(block, EXT4_MB_BITMAP(e4b))) { | 1255 | if (!mb_test_bit(block, EXT4_MB_BITMAP(e4b))) { |
| 1257 | ext4_fsblk_t blocknr; | 1256 | ext4_fsblk_t blocknr; |
| 1258 | blocknr = e4b->bd_group * EXT4_BLOCKS_PER_GROUP(sb); | 1257 | |
| 1258 | blocknr = ext4_group_first_block_no(sb, e4b->bd_group); | ||
| 1259 | blocknr += block; | 1259 | blocknr += block; |
| 1260 | blocknr += | ||
| 1261 | le32_to_cpu(EXT4_SB(sb)->s_es->s_first_data_block); | ||
| 1262 | ext4_grp_locked_error(sb, e4b->bd_group, | 1260 | ext4_grp_locked_error(sb, e4b->bd_group, |
| 1263 | __func__, "double-free of inode" | 1261 | __func__, "double-free of inode" |
| 1264 | " %lu's block %llu(bit %u in group %u)", | 1262 | " %lu's block %llu(bit %u in group %u)", |
| @@ -1631,7 +1629,6 @@ int ext4_mb_find_by_goal(struct ext4_allocation_context *ac, | |||
| 1631 | int max; | 1629 | int max; |
| 1632 | int err; | 1630 | int err; |
| 1633 | struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb); | 1631 | struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb); |
| 1634 | struct ext4_super_block *es = sbi->s_es; | ||
| 1635 | struct ext4_free_extent ex; | 1632 | struct ext4_free_extent ex; |
| 1636 | 1633 | ||
| 1637 | if (!(ac->ac_flags & EXT4_MB_HINT_TRY_GOAL)) | 1634 | if (!(ac->ac_flags & EXT4_MB_HINT_TRY_GOAL)) |
| @@ -1648,8 +1645,8 @@ int ext4_mb_find_by_goal(struct ext4_allocation_context *ac, | |||
| 1648 | if (max >= ac->ac_g_ex.fe_len && ac->ac_g_ex.fe_len == sbi->s_stripe) { | 1645 | if (max >= ac->ac_g_ex.fe_len && ac->ac_g_ex.fe_len == sbi->s_stripe) { |
| 1649 | ext4_fsblk_t start; | 1646 | ext4_fsblk_t start; |
| 1650 | 1647 | ||
| 1651 | start = (e4b->bd_group * EXT4_BLOCKS_PER_GROUP(ac->ac_sb)) + | 1648 | start = ext4_group_first_block_no(ac->ac_sb, e4b->bd_group) + |
| 1652 | ex.fe_start + le32_to_cpu(es->s_first_data_block); | 1649 | ex.fe_start; |
| 1653 | /* use do_div to get remainder (would be 64-bit modulo) */ | 1650 | /* use do_div to get remainder (would be 64-bit modulo) */ |
| 1654 | if (do_div(start, sbi->s_stripe) == 0) { | 1651 | if (do_div(start, sbi->s_stripe) == 0) { |
| 1655 | ac->ac_found++; | 1652 | ac->ac_found++; |
| @@ -1803,8 +1800,8 @@ void ext4_mb_scan_aligned(struct ext4_allocation_context *ac, | |||
| 1803 | BUG_ON(sbi->s_stripe == 0); | 1800 | BUG_ON(sbi->s_stripe == 0); |
| 1804 | 1801 | ||
| 1805 | /* find first stripe-aligned block in group */ | 1802 | /* find first stripe-aligned block in group */ |
| 1806 | first_group_block = e4b->bd_group * EXT4_BLOCKS_PER_GROUP(sb) | 1803 | first_group_block = ext4_group_first_block_no(sb, e4b->bd_group); |
| 1807 | + le32_to_cpu(sbi->s_es->s_first_data_block); | 1804 | |
| 1808 | a = first_group_block + sbi->s_stripe - 1; | 1805 | a = first_group_block + sbi->s_stripe - 1; |
| 1809 | do_div(a, sbi->s_stripe); | 1806 | do_div(a, sbi->s_stripe); |
| 1810 | i = (a * sbi->s_stripe) - first_group_block; | 1807 | i = (a * sbi->s_stripe) - first_group_block; |
| @@ -2256,7 +2253,7 @@ int ext4_mb_add_groupinfo(struct super_block *sb, ext4_group_t group, | |||
| 2256 | 2253 | ||
| 2257 | INIT_LIST_HEAD(&meta_group_info[i]->bb_prealloc_list); | 2254 | INIT_LIST_HEAD(&meta_group_info[i]->bb_prealloc_list); |
| 2258 | init_rwsem(&meta_group_info[i]->alloc_sem); | 2255 | init_rwsem(&meta_group_info[i]->alloc_sem); |
| 2259 | meta_group_info[i]->bb_free_root.rb_node = NULL; | 2256 | meta_group_info[i]->bb_free_root = RB_ROOT; |
| 2260 | 2257 | ||
| 2261 | #ifdef DOUBLE_CHECK | 2258 | #ifdef DOUBLE_CHECK |
| 2262 | { | 2259 | { |
| @@ -2560,12 +2557,9 @@ static void release_blocks_on_commit(journal_t *journal, transaction_t *txn) | |||
| 2560 | ext4_unlock_group(sb, entry->group); | 2557 | ext4_unlock_group(sb, entry->group); |
| 2561 | if (test_opt(sb, DISCARD)) { | 2558 | if (test_opt(sb, DISCARD)) { |
| 2562 | ext4_fsblk_t discard_block; | 2559 | ext4_fsblk_t discard_block; |
| 2563 | struct ext4_super_block *es = EXT4_SB(sb)->s_es; | ||
| 2564 | 2560 | ||
| 2565 | discard_block = (ext4_fsblk_t)entry->group * | 2561 | discard_block = entry->start_blk + |
| 2566 | EXT4_BLOCKS_PER_GROUP(sb) | 2562 | ext4_group_first_block_no(sb, entry->group); |
| 2567 | + entry->start_blk | ||
| 2568 | + le32_to_cpu(es->s_first_data_block); | ||
| 2569 | trace_ext4_discard_blocks(sb, | 2563 | trace_ext4_discard_blocks(sb, |
| 2570 | (unsigned long long)discard_block, | 2564 | (unsigned long long)discard_block, |
| 2571 | entry->count); | 2565 | entry->count); |
| @@ -2703,14 +2697,11 @@ ext4_mb_mark_diskspace_used(struct ext4_allocation_context *ac, | |||
| 2703 | if (err) | 2697 | if (err) |
| 2704 | goto out_err; | 2698 | goto out_err; |
| 2705 | 2699 | ||
| 2706 | block = ac->ac_b_ex.fe_group * EXT4_BLOCKS_PER_GROUP(sb) | 2700 | block = ext4_grp_offs_to_block(sb, &ac->ac_b_ex); |
| 2707 | + ac->ac_b_ex.fe_start | ||
| 2708 | + le32_to_cpu(es->s_first_data_block); | ||
| 2709 | 2701 | ||
| 2710 | len = ac->ac_b_ex.fe_len; | 2702 | len = ac->ac_b_ex.fe_len; |
| 2711 | if (!ext4_data_block_valid(sbi, block, len)) { | 2703 | if (!ext4_data_block_valid(sbi, block, len)) { |
| 2712 | ext4_error(sb, __func__, | 2704 | ext4_error(sb, "Allocating blocks %llu-%llu which overlap " |
| 2713 | "Allocating blocks %llu-%llu which overlap " | ||
| 2714 | "fs metadata\n", block, block+len); | 2705 | "fs metadata\n", block, block+len); |
| 2715 | /* File system mounted not to panic on error | 2706 | /* File system mounted not to panic on error |
| 2716 | * Fix the bitmap and repeat the block allocation | 2707 | * Fix the bitmap and repeat the block allocation |
| @@ -3161,9 +3152,7 @@ ext4_mb_use_preallocated(struct ext4_allocation_context *ac) | |||
| 3161 | /* The max size of hash table is PREALLOC_TB_SIZE */ | 3152 | /* The max size of hash table is PREALLOC_TB_SIZE */ |
| 3162 | order = PREALLOC_TB_SIZE - 1; | 3153 | order = PREALLOC_TB_SIZE - 1; |
| 3163 | 3154 | ||
| 3164 | goal_block = ac->ac_g_ex.fe_group * EXT4_BLOCKS_PER_GROUP(ac->ac_sb) + | 3155 | goal_block = ext4_grp_offs_to_block(ac->ac_sb, &ac->ac_g_ex); |
| 3165 | ac->ac_g_ex.fe_start + | ||
| 3166 | le32_to_cpu(EXT4_SB(ac->ac_sb)->s_es->s_first_data_block); | ||
| 3167 | /* | 3156 | /* |
| 3168 | * search for the prealloc space that is having | 3157 | * search for the prealloc space that is having |
| 3169 | * minimal distance from the goal block. | 3158 | * minimal distance from the goal block. |
| @@ -3526,8 +3515,7 @@ ext4_mb_release_inode_pa(struct ext4_buddy *e4b, struct buffer_head *bitmap_bh, | |||
| 3526 | if (bit >= end) | 3515 | if (bit >= end) |
| 3527 | break; | 3516 | break; |
| 3528 | next = mb_find_next_bit(bitmap_bh->b_data, end, bit); | 3517 | next = mb_find_next_bit(bitmap_bh->b_data, end, bit); |
| 3529 | start = group * EXT4_BLOCKS_PER_GROUP(sb) + bit + | 3518 | start = ext4_group_first_block_no(sb, group) + bit; |
| 3530 | le32_to_cpu(sbi->s_es->s_first_data_block); | ||
| 3531 | mb_debug(1, " free preallocated %u/%u in group %u\n", | 3519 | mb_debug(1, " free preallocated %u/%u in group %u\n", |
| 3532 | (unsigned) start, (unsigned) next - bit, | 3520 | (unsigned) start, (unsigned) next - bit, |
| 3533 | (unsigned) group); | 3521 | (unsigned) group); |
| @@ -3623,15 +3611,13 @@ ext4_mb_discard_group_preallocations(struct super_block *sb, | |||
| 3623 | 3611 | ||
| 3624 | bitmap_bh = ext4_read_block_bitmap(sb, group); | 3612 | bitmap_bh = ext4_read_block_bitmap(sb, group); |
| 3625 | if (bitmap_bh == NULL) { | 3613 | if (bitmap_bh == NULL) { |
| 3626 | ext4_error(sb, __func__, "Error in reading block " | 3614 | ext4_error(sb, "Error reading block bitmap for %u", group); |
| 3627 | "bitmap for %u", group); | ||
| 3628 | return 0; | 3615 | return 0; |
| 3629 | } | 3616 | } |
| 3630 | 3617 | ||
| 3631 | err = ext4_mb_load_buddy(sb, group, &e4b); | 3618 | err = ext4_mb_load_buddy(sb, group, &e4b); |
| 3632 | if (err) { | 3619 | if (err) { |
| 3633 | ext4_error(sb, __func__, "Error in loading buddy " | 3620 | ext4_error(sb, "Error loading buddy information for %u", group); |
| 3634 | "information for %u", group); | ||
| 3635 | put_bh(bitmap_bh); | 3621 | put_bh(bitmap_bh); |
| 3636 | return 0; | 3622 | return 0; |
| 3637 | } | 3623 | } |
| @@ -3804,15 +3790,15 @@ repeat: | |||
| 3804 | 3790 | ||
| 3805 | err = ext4_mb_load_buddy(sb, group, &e4b); | 3791 | err = ext4_mb_load_buddy(sb, group, &e4b); |
| 3806 | if (err) { | 3792 | if (err) { |
| 3807 | ext4_error(sb, __func__, "Error in loading buddy " | 3793 | ext4_error(sb, "Error loading buddy information for %u", |
| 3808 | "information for %u", group); | 3794 | group); |
| 3809 | continue; | 3795 | continue; |
| 3810 | } | 3796 | } |
| 3811 | 3797 | ||
| 3812 | bitmap_bh = ext4_read_block_bitmap(sb, group); | 3798 | bitmap_bh = ext4_read_block_bitmap(sb, group); |
| 3813 | if (bitmap_bh == NULL) { | 3799 | if (bitmap_bh == NULL) { |
| 3814 | ext4_error(sb, __func__, "Error in reading block " | 3800 | ext4_error(sb, "Error reading block bitmap for %u", |
| 3815 | "bitmap for %u", group); | 3801 | group); |
| 3816 | ext4_mb_release_desc(&e4b); | 3802 | ext4_mb_release_desc(&e4b); |
| 3817 | continue; | 3803 | continue; |
| 3818 | } | 3804 | } |
| @@ -3938,7 +3924,7 @@ static void ext4_mb_group_or_file(struct ext4_allocation_context *ac) | |||
| 3938 | 3924 | ||
| 3939 | /* don't use group allocation for large files */ | 3925 | /* don't use group allocation for large files */ |
| 3940 | size = max(size, isize); | 3926 | size = max(size, isize); |
| 3941 | if (size >= sbi->s_mb_stream_request) { | 3927 | if (size > sbi->s_mb_stream_request) { |
| 3942 | ac->ac_flags |= EXT4_MB_STREAM_ALLOC; | 3928 | ac->ac_flags |= EXT4_MB_STREAM_ALLOC; |
| 3943 | return; | 3929 | return; |
| 3944 | } | 3930 | } |
| @@ -4077,8 +4063,8 @@ ext4_mb_discard_lg_preallocations(struct super_block *sb, | |||
| 4077 | 4063 | ||
| 4078 | ext4_get_group_no_and_offset(sb, pa->pa_pstart, &group, NULL); | 4064 | ext4_get_group_no_and_offset(sb, pa->pa_pstart, &group, NULL); |
| 4079 | if (ext4_mb_load_buddy(sb, group, &e4b)) { | 4065 | if (ext4_mb_load_buddy(sb, group, &e4b)) { |
| 4080 | ext4_error(sb, __func__, "Error in loading buddy " | 4066 | ext4_error(sb, "Error loading buddy information for %u", |
| 4081 | "information for %u", group); | 4067 | group); |
| 4082 | continue; | 4068 | continue; |
| 4083 | } | 4069 | } |
| 4084 | ext4_lock_group(sb, group); | 4070 | ext4_lock_group(sb, group); |
| @@ -4254,7 +4240,7 @@ ext4_fsblk_t ext4_mb_new_blocks(handle_t *handle, | |||
| 4254 | return 0; | 4240 | return 0; |
| 4255 | } | 4241 | } |
| 4256 | reserv_blks = ar->len; | 4242 | reserv_blks = ar->len; |
| 4257 | while (ar->len && vfs_dq_alloc_block(ar->inode, ar->len)) { | 4243 | while (ar->len && dquot_alloc_block(ar->inode, ar->len)) { |
| 4258 | ar->flags |= EXT4_MB_HINT_NOPREALLOC; | 4244 | ar->flags |= EXT4_MB_HINT_NOPREALLOC; |
| 4259 | ar->len--; | 4245 | ar->len--; |
| 4260 | } | 4246 | } |
| @@ -4331,7 +4317,7 @@ out2: | |||
| 4331 | kmem_cache_free(ext4_ac_cachep, ac); | 4317 | kmem_cache_free(ext4_ac_cachep, ac); |
| 4332 | out1: | 4318 | out1: |
| 4333 | if (inquota && ar->len < inquota) | 4319 | if (inquota && ar->len < inquota) |
| 4334 | vfs_dq_free_block(ar->inode, inquota - ar->len); | 4320 | dquot_free_block(ar->inode, inquota - ar->len); |
| 4335 | out3: | 4321 | out3: |
| 4336 | if (!ar->len) { | 4322 | if (!ar->len) { |
| 4337 | if (!EXT4_I(ar->inode)->i_delalloc_reserved_flag) | 4323 | if (!EXT4_I(ar->inode)->i_delalloc_reserved_flag) |
| @@ -4476,10 +4462,10 @@ void ext4_free_blocks(handle_t *handle, struct inode *inode, | |||
| 4476 | 4462 | ||
| 4477 | sbi = EXT4_SB(sb); | 4463 | sbi = EXT4_SB(sb); |
| 4478 | es = EXT4_SB(sb)->s_es; | 4464 | es = EXT4_SB(sb)->s_es; |
| 4479 | if (!ext4_data_block_valid(sbi, block, count)) { | 4465 | if (!(flags & EXT4_FREE_BLOCKS_VALIDATED) && |
| 4480 | ext4_error(sb, __func__, | 4466 | !ext4_data_block_valid(sbi, block, count)) { |
| 4481 | "Freeing blocks not in datazone - " | 4467 | ext4_error(sb, "Freeing blocks not in datazone - " |
| 4482 | "block = %llu, count = %lu", block, count); | 4468 | "block = %llu, count = %lu", block, count); |
| 4483 | goto error_return; | 4469 | goto error_return; |
| 4484 | } | 4470 | } |
| 4485 | 4471 | ||
| @@ -4547,8 +4533,7 @@ do_more: | |||
| 4547 | in_range(block + count - 1, ext4_inode_table(sb, gdp), | 4533 | in_range(block + count - 1, ext4_inode_table(sb, gdp), |
| 4548 | EXT4_SB(sb)->s_itb_per_group)) { | 4534 | EXT4_SB(sb)->s_itb_per_group)) { |
| 4549 | 4535 | ||
| 4550 | ext4_error(sb, __func__, | 4536 | ext4_error(sb, "Freeing blocks in system zone - " |
| 4551 | "Freeing blocks in system zone - " | ||
| 4552 | "Block = %llu, count = %lu", block, count); | 4537 | "Block = %llu, count = %lu", block, count); |
| 4553 | /* err = 0. ext4_std_error should be a no op */ | 4538 | /* err = 0. ext4_std_error should be a no op */ |
| 4554 | goto error_return; | 4539 | goto error_return; |
| @@ -4646,7 +4631,7 @@ do_more: | |||
| 4646 | sb->s_dirt = 1; | 4631 | sb->s_dirt = 1; |
| 4647 | error_return: | 4632 | error_return: |
| 4648 | if (freed) | 4633 | if (freed) |
| 4649 | vfs_dq_free_block(inode, freed); | 4634 | dquot_free_block(inode, freed); |
| 4650 | brelse(bitmap_bh); | 4635 | brelse(bitmap_bh); |
| 4651 | ext4_std_error(sb, err); | 4636 | ext4_std_error(sb, err); |
| 4652 | if (ac) | 4637 | if (ac) |
diff --git a/fs/ext4/mballoc.h b/fs/ext4/mballoc.h index 436521cae456..b619322c76f0 100644 --- a/fs/ext4/mballoc.h +++ b/fs/ext4/mballoc.h | |||
| @@ -220,16 +220,9 @@ struct ext4_buddy { | |||
| 220 | #define EXT4_MB_BITMAP(e4b) ((e4b)->bd_bitmap) | 220 | #define EXT4_MB_BITMAP(e4b) ((e4b)->bd_bitmap) |
| 221 | #define EXT4_MB_BUDDY(e4b) ((e4b)->bd_buddy) | 221 | #define EXT4_MB_BUDDY(e4b) ((e4b)->bd_buddy) |
| 222 | 222 | ||
| 223 | #define in_range(b, first, len) ((b) >= (first) && (b) <= (first) + (len) - 1) | ||
| 224 | |||
| 225 | static inline ext4_fsblk_t ext4_grp_offs_to_block(struct super_block *sb, | 223 | static inline ext4_fsblk_t ext4_grp_offs_to_block(struct super_block *sb, |
| 226 | struct ext4_free_extent *fex) | 224 | struct ext4_free_extent *fex) |
| 227 | { | 225 | { |
| 228 | ext4_fsblk_t block; | 226 | return ext4_group_first_block_no(sb, fex->fe_group) + fex->fe_start; |
| 229 | |||
| 230 | block = (ext4_fsblk_t) fex->fe_group * EXT4_BLOCKS_PER_GROUP(sb) | ||
| 231 | + fex->fe_start | ||
| 232 | + le32_to_cpu(EXT4_SB(sb)->s_es->s_first_data_block); | ||
| 233 | return block; | ||
| 234 | } | 227 | } |
| 235 | #endif | 228 | #endif |
diff --git a/fs/ext4/migrate.c b/fs/ext4/migrate.c index 81415814b00b..8b87bd0eac95 100644 --- a/fs/ext4/migrate.c +++ b/fs/ext4/migrate.c | |||
| @@ -365,12 +365,12 @@ static int ext4_ext_swap_inode_data(handle_t *handle, struct inode *inode, | |||
| 365 | * happened after we started the migrate. We need to | 365 | * happened after we started the migrate. We need to |
| 366 | * fail the migrate | 366 | * fail the migrate |
| 367 | */ | 367 | */ |
| 368 | if (!(EXT4_I(inode)->i_state & EXT4_STATE_EXT_MIGRATE)) { | 368 | if (!ext4_test_inode_state(inode, EXT4_STATE_EXT_MIGRATE)) { |
| 369 | retval = -EAGAIN; | 369 | retval = -EAGAIN; |
| 370 | up_write(&EXT4_I(inode)->i_data_sem); | 370 | up_write(&EXT4_I(inode)->i_data_sem); |
| 371 | goto err_out; | 371 | goto err_out; |
| 372 | } else | 372 | } else |
| 373 | EXT4_I(inode)->i_state &= ~EXT4_STATE_EXT_MIGRATE; | 373 | ext4_clear_inode_state(inode, EXT4_STATE_EXT_MIGRATE); |
| 374 | /* | 374 | /* |
| 375 | * We have the extent map build with the tmp inode. | 375 | * We have the extent map build with the tmp inode. |
| 376 | * Now copy the i_data across | 376 | * Now copy the i_data across |
| @@ -503,14 +503,10 @@ int ext4_ext_migrate(struct inode *inode) | |||
| 503 | } | 503 | } |
| 504 | i_size_write(tmp_inode, i_size_read(inode)); | 504 | i_size_write(tmp_inode, i_size_read(inode)); |
| 505 | /* | 505 | /* |
| 506 | * We don't want the inode to be reclaimed | 506 | * Set the i_nlink to zero so it will be deleted later |
| 507 | * if we got interrupted in between. We have | 507 | * when we drop inode reference. |
| 508 | * this tmp inode carrying reference to the | ||
| 509 | * data blocks of the original file. We set | ||
| 510 | * the i_nlink to zero at the last stage after | ||
| 511 | * switching the original file to extent format | ||
| 512 | */ | 508 | */ |
| 513 | tmp_inode->i_nlink = 1; | 509 | tmp_inode->i_nlink = 0; |
| 514 | 510 | ||
| 515 | ext4_ext_tree_init(handle, tmp_inode); | 511 | ext4_ext_tree_init(handle, tmp_inode); |
| 516 | ext4_orphan_add(handle, tmp_inode); | 512 | ext4_orphan_add(handle, tmp_inode); |
| @@ -533,10 +529,20 @@ int ext4_ext_migrate(struct inode *inode) | |||
| 533 | * allocation. | 529 | * allocation. |
| 534 | */ | 530 | */ |
| 535 | down_read((&EXT4_I(inode)->i_data_sem)); | 531 | down_read((&EXT4_I(inode)->i_data_sem)); |
| 536 | EXT4_I(inode)->i_state |= EXT4_STATE_EXT_MIGRATE; | 532 | ext4_set_inode_state(inode, EXT4_STATE_EXT_MIGRATE); |
| 537 | up_read((&EXT4_I(inode)->i_data_sem)); | 533 | up_read((&EXT4_I(inode)->i_data_sem)); |
| 538 | 534 | ||
| 539 | handle = ext4_journal_start(inode, 1); | 535 | handle = ext4_journal_start(inode, 1); |
| 536 | if (IS_ERR(handle)) { | ||
| 537 | /* | ||
| 538 | * It is impossible to update on-disk structures without | ||
| 539 | * a handle, so just rollback in-core changes and live other | ||
| 540 | * work to orphan_list_cleanup() | ||
| 541 | */ | ||
| 542 | ext4_orphan_del(NULL, tmp_inode); | ||
| 543 | retval = PTR_ERR(handle); | ||
| 544 | goto out; | ||
| 545 | } | ||
| 540 | 546 | ||
| 541 | ei = EXT4_I(inode); | 547 | ei = EXT4_I(inode); |
| 542 | i_data = ei->i_data; | 548 | i_data = ei->i_data; |
| @@ -618,15 +624,8 @@ err_out: | |||
| 618 | 624 | ||
| 619 | /* Reset the extent details */ | 625 | /* Reset the extent details */ |
| 620 | ext4_ext_tree_init(handle, tmp_inode); | 626 | ext4_ext_tree_init(handle, tmp_inode); |
| 621 | |||
| 622 | /* | ||
| 623 | * Set the i_nlink to zero so that | ||
| 624 | * generic_drop_inode really deletes the | ||
| 625 | * inode | ||
| 626 | */ | ||
| 627 | tmp_inode->i_nlink = 0; | ||
| 628 | |||
| 629 | ext4_journal_stop(handle); | 627 | ext4_journal_stop(handle); |
| 628 | out: | ||
| 630 | unlock_new_inode(tmp_inode); | 629 | unlock_new_inode(tmp_inode); |
| 631 | iput(tmp_inode); | 630 | iput(tmp_inode); |
| 632 | 631 | ||
diff --git a/fs/ext4/move_extent.c b/fs/ext4/move_extent.c index 82c415be87a4..aa5fe28d180f 100644 --- a/fs/ext4/move_extent.c +++ b/fs/ext4/move_extent.c | |||
| @@ -152,12 +152,12 @@ mext_check_null_inode(struct inode *inode1, struct inode *inode2, | |||
| 152 | int ret = 0; | 152 | int ret = 0; |
| 153 | 153 | ||
| 154 | if (inode1 == NULL) { | 154 | if (inode1 == NULL) { |
| 155 | ext4_error(inode2->i_sb, function, | 155 | __ext4_error(inode2->i_sb, function, |
| 156 | "Both inodes should not be NULL: " | 156 | "Both inodes should not be NULL: " |
| 157 | "inode1 NULL inode2 %lu", inode2->i_ino); | 157 | "inode1 NULL inode2 %lu", inode2->i_ino); |
| 158 | ret = -EIO; | 158 | ret = -EIO; |
| 159 | } else if (inode2 == NULL) { | 159 | } else if (inode2 == NULL) { |
| 160 | ext4_error(inode1->i_sb, function, | 160 | __ext4_error(inode1->i_sb, function, |
| 161 | "Both inodes should not be NULL: " | 161 | "Both inodes should not be NULL: " |
| 162 | "inode1 %lu inode2 NULL", inode1->i_ino); | 162 | "inode1 %lu inode2 NULL", inode1->i_ino); |
| 163 | ret = -EIO; | 163 | ret = -EIO; |
| @@ -252,6 +252,7 @@ mext_insert_across_blocks(handle_t *handle, struct inode *orig_inode, | |||
| 252 | } | 252 | } |
| 253 | 253 | ||
| 254 | o_start->ee_len = start_ext->ee_len; | 254 | o_start->ee_len = start_ext->ee_len; |
| 255 | eblock = le32_to_cpu(start_ext->ee_block); | ||
| 255 | new_flag = 1; | 256 | new_flag = 1; |
| 256 | 257 | ||
| 257 | } else if (start_ext->ee_len && new_ext->ee_len && | 258 | } else if (start_ext->ee_len && new_ext->ee_len && |
| @@ -262,6 +263,7 @@ mext_insert_across_blocks(handle_t *handle, struct inode *orig_inode, | |||
| 262 | * orig |------------------------------| | 263 | * orig |------------------------------| |
| 263 | */ | 264 | */ |
| 264 | o_start->ee_len = start_ext->ee_len; | 265 | o_start->ee_len = start_ext->ee_len; |
| 266 | eblock = le32_to_cpu(start_ext->ee_block); | ||
| 265 | new_flag = 1; | 267 | new_flag = 1; |
| 266 | 268 | ||
| 267 | } else if (!start_ext->ee_len && new_ext->ee_len && | 269 | } else if (!start_ext->ee_len && new_ext->ee_len && |
| @@ -475,7 +477,6 @@ mext_leaf_block(handle_t *handle, struct inode *orig_inode, | |||
| 475 | struct ext4_extent *oext, *o_start, *o_end, *prev_ext; | 477 | struct ext4_extent *oext, *o_start, *o_end, *prev_ext; |
| 476 | struct ext4_extent new_ext, start_ext, end_ext; | 478 | struct ext4_extent new_ext, start_ext, end_ext; |
| 477 | ext4_lblk_t new_ext_end; | 479 | ext4_lblk_t new_ext_end; |
| 478 | ext4_fsblk_t new_phys_end; | ||
| 479 | int oext_alen, new_ext_alen, end_ext_alen; | 480 | int oext_alen, new_ext_alen, end_ext_alen; |
| 480 | int depth = ext_depth(orig_inode); | 481 | int depth = ext_depth(orig_inode); |
| 481 | int ret; | 482 | int ret; |
| @@ -489,7 +490,6 @@ mext_leaf_block(handle_t *handle, struct inode *orig_inode, | |||
| 489 | new_ext.ee_len = dext->ee_len; | 490 | new_ext.ee_len = dext->ee_len; |
| 490 | new_ext_alen = ext4_ext_get_actual_len(&new_ext); | 491 | new_ext_alen = ext4_ext_get_actual_len(&new_ext); |
| 491 | new_ext_end = le32_to_cpu(new_ext.ee_block) + new_ext_alen - 1; | 492 | new_ext_end = le32_to_cpu(new_ext.ee_block) + new_ext_alen - 1; |
| 492 | new_phys_end = ext_pblock(&new_ext) + new_ext_alen - 1; | ||
| 493 | 493 | ||
| 494 | /* | 494 | /* |
| 495 | * Case: original extent is first | 495 | * Case: original extent is first |
| @@ -502,6 +502,7 @@ mext_leaf_block(handle_t *handle, struct inode *orig_inode, | |||
| 502 | le32_to_cpu(oext->ee_block) + oext_alen) { | 502 | le32_to_cpu(oext->ee_block) + oext_alen) { |
| 503 | start_ext.ee_len = cpu_to_le16(le32_to_cpu(new_ext.ee_block) - | 503 | start_ext.ee_len = cpu_to_le16(le32_to_cpu(new_ext.ee_block) - |
| 504 | le32_to_cpu(oext->ee_block)); | 504 | le32_to_cpu(oext->ee_block)); |
| 505 | start_ext.ee_block = oext->ee_block; | ||
| 505 | copy_extent_status(oext, &start_ext); | 506 | copy_extent_status(oext, &start_ext); |
| 506 | } else if (oext > EXT_FIRST_EXTENT(orig_path[depth].p_hdr)) { | 507 | } else if (oext > EXT_FIRST_EXTENT(orig_path[depth].p_hdr)) { |
| 507 | prev_ext = oext - 1; | 508 | prev_ext = oext - 1; |
| @@ -515,6 +516,7 @@ mext_leaf_block(handle_t *handle, struct inode *orig_inode, | |||
| 515 | start_ext.ee_len = cpu_to_le16( | 516 | start_ext.ee_len = cpu_to_le16( |
| 516 | ext4_ext_get_actual_len(prev_ext) + | 517 | ext4_ext_get_actual_len(prev_ext) + |
| 517 | new_ext_alen); | 518 | new_ext_alen); |
| 519 | start_ext.ee_block = oext->ee_block; | ||
| 518 | copy_extent_status(prev_ext, &start_ext); | 520 | copy_extent_status(prev_ext, &start_ext); |
| 519 | new_ext.ee_len = 0; | 521 | new_ext.ee_len = 0; |
| 520 | } | 522 | } |
| @@ -526,7 +528,7 @@ mext_leaf_block(handle_t *handle, struct inode *orig_inode, | |||
| 526 | * new_ext |-------| | 528 | * new_ext |-------| |
| 527 | */ | 529 | */ |
| 528 | if (le32_to_cpu(oext->ee_block) + oext_alen - 1 < new_ext_end) { | 530 | if (le32_to_cpu(oext->ee_block) + oext_alen - 1 < new_ext_end) { |
| 529 | ext4_error(orig_inode->i_sb, __func__, | 531 | ext4_error(orig_inode->i_sb, |
| 530 | "new_ext_end(%u) should be less than or equal to " | 532 | "new_ext_end(%u) should be less than or equal to " |
| 531 | "oext->ee_block(%u) + oext_alen(%d) - 1", | 533 | "oext->ee_block(%u) + oext_alen(%d) - 1", |
| 532 | new_ext_end, le32_to_cpu(oext->ee_block), | 534 | new_ext_end, le32_to_cpu(oext->ee_block), |
| @@ -689,12 +691,12 @@ mext_replace_branches(handle_t *handle, struct inode *orig_inode, | |||
| 689 | while (1) { | 691 | while (1) { |
| 690 | /* The extent for donor must be found. */ | 692 | /* The extent for donor must be found. */ |
| 691 | if (!dext) { | 693 | if (!dext) { |
| 692 | ext4_error(donor_inode->i_sb, __func__, | 694 | ext4_error(donor_inode->i_sb, |
| 693 | "The extent for donor must be found"); | 695 | "The extent for donor must be found"); |
| 694 | *err = -EIO; | 696 | *err = -EIO; |
| 695 | goto out; | 697 | goto out; |
| 696 | } else if (donor_off != le32_to_cpu(tmp_dext.ee_block)) { | 698 | } else if (donor_off != le32_to_cpu(tmp_dext.ee_block)) { |
| 697 | ext4_error(donor_inode->i_sb, __func__, | 699 | ext4_error(donor_inode->i_sb, |
| 698 | "Donor offset(%u) and the first block of donor " | 700 | "Donor offset(%u) and the first block of donor " |
| 699 | "extent(%u) should be equal", | 701 | "extent(%u) should be equal", |
| 700 | donor_off, | 702 | donor_off, |
| @@ -928,7 +930,7 @@ out2: | |||
| 928 | } | 930 | } |
| 929 | 931 | ||
| 930 | /** | 932 | /** |
| 931 | * mext_check_argumants - Check whether move extent can be done | 933 | * mext_check_arguments - Check whether move extent can be done |
| 932 | * | 934 | * |
| 933 | * @orig_inode: original inode | 935 | * @orig_inode: original inode |
| 934 | * @donor_inode: donor inode | 936 | * @donor_inode: donor inode |
| @@ -949,14 +951,6 @@ mext_check_arguments(struct inode *orig_inode, | |||
| 949 | unsigned int blkbits = orig_inode->i_blkbits; | 951 | unsigned int blkbits = orig_inode->i_blkbits; |
| 950 | unsigned int blocksize = 1 << blkbits; | 952 | unsigned int blocksize = 1 << blkbits; |
| 951 | 953 | ||
| 952 | /* Regular file check */ | ||
| 953 | if (!S_ISREG(orig_inode->i_mode) || !S_ISREG(donor_inode->i_mode)) { | ||
| 954 | ext4_debug("ext4 move extent: The argument files should be " | ||
| 955 | "regular file [ino:orig %lu, donor %lu]\n", | ||
| 956 | orig_inode->i_ino, donor_inode->i_ino); | ||
| 957 | return -EINVAL; | ||
| 958 | } | ||
| 959 | |||
| 960 | if (donor_inode->i_mode & (S_ISUID|S_ISGID)) { | 954 | if (donor_inode->i_mode & (S_ISUID|S_ISGID)) { |
| 961 | ext4_debug("ext4 move extent: suid or sgid is set" | 955 | ext4_debug("ext4 move extent: suid or sgid is set" |
| 962 | " to donor file [ino:orig %lu, donor %lu]\n", | 956 | " to donor file [ino:orig %lu, donor %lu]\n", |
| @@ -1204,6 +1198,14 @@ ext4_move_extents(struct file *o_filp, struct file *d_filp, | |||
| 1204 | return -EINVAL; | 1198 | return -EINVAL; |
| 1205 | } | 1199 | } |
| 1206 | 1200 | ||
| 1201 | /* Regular file check */ | ||
| 1202 | if (!S_ISREG(orig_inode->i_mode) || !S_ISREG(donor_inode->i_mode)) { | ||
| 1203 | ext4_debug("ext4 move extent: The argument files should be " | ||
| 1204 | "regular file [ino:orig %lu, donor %lu]\n", | ||
| 1205 | orig_inode->i_ino, donor_inode->i_ino); | ||
| 1206 | return -EINVAL; | ||
| 1207 | } | ||
| 1208 | |||
| 1207 | /* Protect orig and donor inodes against a truncate */ | 1209 | /* Protect orig and donor inodes against a truncate */ |
| 1208 | ret1 = mext_inode_double_lock(orig_inode, donor_inode); | 1210 | ret1 = mext_inode_double_lock(orig_inode, donor_inode); |
| 1209 | if (ret1 < 0) | 1211 | if (ret1 < 0) |
| @@ -1351,7 +1353,7 @@ ext4_move_extents(struct file *o_filp, struct file *d_filp, | |||
| 1351 | if (ret1 < 0) | 1353 | if (ret1 < 0) |
| 1352 | break; | 1354 | break; |
| 1353 | if (*moved_len > len) { | 1355 | if (*moved_len > len) { |
| 1354 | ext4_error(orig_inode->i_sb, __func__, | 1356 | ext4_error(orig_inode->i_sb, |
| 1355 | "We replaced blocks too much! " | 1357 | "We replaced blocks too much! " |
| 1356 | "sum of replaced: %llu requested: %llu", | 1358 | "sum of replaced: %llu requested: %llu", |
| 1357 | *moved_len, len); | 1359 | *moved_len, len); |
diff --git a/fs/ext4/namei.c b/fs/ext4/namei.c index 17a17e10dd60..0c070fabd108 100644 --- a/fs/ext4/namei.c +++ b/fs/ext4/namei.c | |||
| @@ -383,8 +383,7 @@ dx_probe(const struct qstr *d_name, struct inode *dir, | |||
| 383 | if (root->info.hash_version != DX_HASH_TEA && | 383 | if (root->info.hash_version != DX_HASH_TEA && |
| 384 | root->info.hash_version != DX_HASH_HALF_MD4 && | 384 | root->info.hash_version != DX_HASH_HALF_MD4 && |
| 385 | root->info.hash_version != DX_HASH_LEGACY) { | 385 | root->info.hash_version != DX_HASH_LEGACY) { |
| 386 | ext4_warning(dir->i_sb, __func__, | 386 | ext4_warning(dir->i_sb, "Unrecognised inode hash code %d", |
| 387 | "Unrecognised inode hash code %d", | ||
| 388 | root->info.hash_version); | 387 | root->info.hash_version); |
| 389 | brelse(bh); | 388 | brelse(bh); |
| 390 | *err = ERR_BAD_DX_DIR; | 389 | *err = ERR_BAD_DX_DIR; |
| @@ -399,8 +398,7 @@ dx_probe(const struct qstr *d_name, struct inode *dir, | |||
| 399 | hash = hinfo->hash; | 398 | hash = hinfo->hash; |
| 400 | 399 | ||
| 401 | if (root->info.unused_flags & 1) { | 400 | if (root->info.unused_flags & 1) { |
| 402 | ext4_warning(dir->i_sb, __func__, | 401 | ext4_warning(dir->i_sb, "Unimplemented inode hash flags: %#06x", |
| 403 | "Unimplemented inode hash flags: %#06x", | ||
| 404 | root->info.unused_flags); | 402 | root->info.unused_flags); |
| 405 | brelse(bh); | 403 | brelse(bh); |
| 406 | *err = ERR_BAD_DX_DIR; | 404 | *err = ERR_BAD_DX_DIR; |
| @@ -408,8 +406,7 @@ dx_probe(const struct qstr *d_name, struct inode *dir, | |||
| 408 | } | 406 | } |
| 409 | 407 | ||
| 410 | if ((indirect = root->info.indirect_levels) > 1) { | 408 | if ((indirect = root->info.indirect_levels) > 1) { |
| 411 | ext4_warning(dir->i_sb, __func__, | 409 | ext4_warning(dir->i_sb, "Unimplemented inode hash depth: %#06x", |
| 412 | "Unimplemented inode hash depth: %#06x", | ||
| 413 | root->info.indirect_levels); | 410 | root->info.indirect_levels); |
| 414 | brelse(bh); | 411 | brelse(bh); |
| 415 | *err = ERR_BAD_DX_DIR; | 412 | *err = ERR_BAD_DX_DIR; |
| @@ -421,8 +418,7 @@ dx_probe(const struct qstr *d_name, struct inode *dir, | |||
| 421 | 418 | ||
| 422 | if (dx_get_limit(entries) != dx_root_limit(dir, | 419 | if (dx_get_limit(entries) != dx_root_limit(dir, |
| 423 | root->info.info_length)) { | 420 | root->info.info_length)) { |
| 424 | ext4_warning(dir->i_sb, __func__, | 421 | ext4_warning(dir->i_sb, "dx entry: limit != root limit"); |
| 425 | "dx entry: limit != root limit"); | ||
| 426 | brelse(bh); | 422 | brelse(bh); |
| 427 | *err = ERR_BAD_DX_DIR; | 423 | *err = ERR_BAD_DX_DIR; |
| 428 | goto fail; | 424 | goto fail; |
| @@ -433,7 +429,7 @@ dx_probe(const struct qstr *d_name, struct inode *dir, | |||
| 433 | { | 429 | { |
| 434 | count = dx_get_count(entries); | 430 | count = dx_get_count(entries); |
| 435 | if (!count || count > dx_get_limit(entries)) { | 431 | if (!count || count > dx_get_limit(entries)) { |
| 436 | ext4_warning(dir->i_sb, __func__, | 432 | ext4_warning(dir->i_sb, |
| 437 | "dx entry: no count or count > limit"); | 433 | "dx entry: no count or count > limit"); |
| 438 | brelse(bh); | 434 | brelse(bh); |
| 439 | *err = ERR_BAD_DX_DIR; | 435 | *err = ERR_BAD_DX_DIR; |
| @@ -478,7 +474,7 @@ dx_probe(const struct qstr *d_name, struct inode *dir, | |||
| 478 | goto fail2; | 474 | goto fail2; |
| 479 | at = entries = ((struct dx_node *) bh->b_data)->entries; | 475 | at = entries = ((struct dx_node *) bh->b_data)->entries; |
| 480 | if (dx_get_limit(entries) != dx_node_limit (dir)) { | 476 | if (dx_get_limit(entries) != dx_node_limit (dir)) { |
| 481 | ext4_warning(dir->i_sb, __func__, | 477 | ext4_warning(dir->i_sb, |
| 482 | "dx entry: limit != node limit"); | 478 | "dx entry: limit != node limit"); |
| 483 | brelse(bh); | 479 | brelse(bh); |
| 484 | *err = ERR_BAD_DX_DIR; | 480 | *err = ERR_BAD_DX_DIR; |
| @@ -494,7 +490,7 @@ fail2: | |||
| 494 | } | 490 | } |
| 495 | fail: | 491 | fail: |
| 496 | if (*err == ERR_BAD_DX_DIR) | 492 | if (*err == ERR_BAD_DX_DIR) |
| 497 | ext4_warning(dir->i_sb, __func__, | 493 | ext4_warning(dir->i_sb, |
| 498 | "Corrupt dir inode %ld, running e2fsck is " | 494 | "Corrupt dir inode %ld, running e2fsck is " |
| 499 | "recommended.", dir->i_ino); | 495 | "recommended.", dir->i_ino); |
| 500 | return NULL; | 496 | return NULL; |
| @@ -947,9 +943,8 @@ restart: | |||
| 947 | wait_on_buffer(bh); | 943 | wait_on_buffer(bh); |
| 948 | if (!buffer_uptodate(bh)) { | 944 | if (!buffer_uptodate(bh)) { |
| 949 | /* read error, skip block & hope for the best */ | 945 | /* read error, skip block & hope for the best */ |
| 950 | ext4_error(sb, __func__, "reading directory #%lu " | 946 | ext4_error(sb, "reading directory #%lu offset %lu", |
| 951 | "offset %lu", dir->i_ino, | 947 | dir->i_ino, (unsigned long)block); |
| 952 | (unsigned long)block); | ||
| 953 | brelse(bh); | 948 | brelse(bh); |
| 954 | goto next; | 949 | goto next; |
| 955 | } | 950 | } |
| @@ -1041,7 +1036,7 @@ static struct buffer_head * ext4_dx_find_entry(struct inode *dir, const struct q | |||
| 1041 | retval = ext4_htree_next_block(dir, hash, frame, | 1036 | retval = ext4_htree_next_block(dir, hash, frame, |
| 1042 | frames, NULL); | 1037 | frames, NULL); |
| 1043 | if (retval < 0) { | 1038 | if (retval < 0) { |
| 1044 | ext4_warning(sb, __func__, | 1039 | ext4_warning(sb, |
| 1045 | "error reading index page in directory #%lu", | 1040 | "error reading index page in directory #%lu", |
| 1046 | dir->i_ino); | 1041 | dir->i_ino); |
| 1047 | *err = retval; | 1042 | *err = retval; |
| @@ -1071,14 +1066,13 @@ static struct dentry *ext4_lookup(struct inode *dir, struct dentry *dentry, stru | |||
| 1071 | __u32 ino = le32_to_cpu(de->inode); | 1066 | __u32 ino = le32_to_cpu(de->inode); |
| 1072 | brelse(bh); | 1067 | brelse(bh); |
| 1073 | if (!ext4_valid_inum(dir->i_sb, ino)) { | 1068 | if (!ext4_valid_inum(dir->i_sb, ino)) { |
| 1074 | ext4_error(dir->i_sb, "ext4_lookup", | 1069 | ext4_error(dir->i_sb, "bad inode number: %u", ino); |
| 1075 | "bad inode number: %u", ino); | ||
| 1076 | return ERR_PTR(-EIO); | 1070 | return ERR_PTR(-EIO); |
| 1077 | } | 1071 | } |
| 1078 | inode = ext4_iget(dir->i_sb, ino); | 1072 | inode = ext4_iget(dir->i_sb, ino); |
| 1079 | if (unlikely(IS_ERR(inode))) { | 1073 | if (unlikely(IS_ERR(inode))) { |
| 1080 | if (PTR_ERR(inode) == -ESTALE) { | 1074 | if (PTR_ERR(inode) == -ESTALE) { |
| 1081 | ext4_error(dir->i_sb, __func__, | 1075 | ext4_error(dir->i_sb, |
| 1082 | "deleted inode referenced: %u", | 1076 | "deleted inode referenced: %u", |
| 1083 | ino); | 1077 | ino); |
| 1084 | return ERR_PTR(-EIO); | 1078 | return ERR_PTR(-EIO); |
| @@ -1110,7 +1104,7 @@ struct dentry *ext4_get_parent(struct dentry *child) | |||
| 1110 | brelse(bh); | 1104 | brelse(bh); |
| 1111 | 1105 | ||
| 1112 | if (!ext4_valid_inum(child->d_inode->i_sb, ino)) { | 1106 | if (!ext4_valid_inum(child->d_inode->i_sb, ino)) { |
| 1113 | ext4_error(child->d_inode->i_sb, "ext4_get_parent", | 1107 | ext4_error(child->d_inode->i_sb, |
| 1114 | "bad inode number: %u", ino); | 1108 | "bad inode number: %u", ino); |
| 1115 | return ERR_PTR(-EIO); | 1109 | return ERR_PTR(-EIO); |
| 1116 | } | 1110 | } |
| @@ -1410,7 +1404,7 @@ static int make_indexed_dir(handle_t *handle, struct dentry *dentry, | |||
| 1410 | de = (struct ext4_dir_entry_2 *)((char *)fde + | 1404 | de = (struct ext4_dir_entry_2 *)((char *)fde + |
| 1411 | ext4_rec_len_from_disk(fde->rec_len, blocksize)); | 1405 | ext4_rec_len_from_disk(fde->rec_len, blocksize)); |
| 1412 | if ((char *) de >= (((char *) root) + blocksize)) { | 1406 | if ((char *) de >= (((char *) root) + blocksize)) { |
| 1413 | ext4_error(dir->i_sb, __func__, | 1407 | ext4_error(dir->i_sb, |
| 1414 | "invalid rec_len for '..' in inode %lu", | 1408 | "invalid rec_len for '..' in inode %lu", |
| 1415 | dir->i_ino); | 1409 | dir->i_ino); |
| 1416 | brelse(bh); | 1410 | brelse(bh); |
| @@ -1575,8 +1569,7 @@ static int ext4_dx_add_entry(handle_t *handle, struct dentry *dentry, | |||
| 1575 | 1569 | ||
| 1576 | if (levels && (dx_get_count(frames->entries) == | 1570 | if (levels && (dx_get_count(frames->entries) == |
| 1577 | dx_get_limit(frames->entries))) { | 1571 | dx_get_limit(frames->entries))) { |
| 1578 | ext4_warning(sb, __func__, | 1572 | ext4_warning(sb, "Directory index full!"); |
| 1579 | "Directory index full!"); | ||
| 1580 | err = -ENOSPC; | 1573 | err = -ENOSPC; |
| 1581 | goto cleanup; | 1574 | goto cleanup; |
| 1582 | } | 1575 | } |
| @@ -1766,6 +1759,8 @@ static int ext4_create(struct inode *dir, struct dentry *dentry, int mode, | |||
| 1766 | struct inode *inode; | 1759 | struct inode *inode; |
| 1767 | int err, retries = 0; | 1760 | int err, retries = 0; |
| 1768 | 1761 | ||
| 1762 | dquot_initialize(dir); | ||
| 1763 | |||
| 1769 | retry: | 1764 | retry: |
| 1770 | handle = ext4_journal_start(dir, EXT4_DATA_TRANS_BLOCKS(dir->i_sb) + | 1765 | handle = ext4_journal_start(dir, EXT4_DATA_TRANS_BLOCKS(dir->i_sb) + |
| 1771 | EXT4_INDEX_EXTRA_TRANS_BLOCKS + 3 + | 1766 | EXT4_INDEX_EXTRA_TRANS_BLOCKS + 3 + |
| @@ -1800,6 +1795,8 @@ static int ext4_mknod(struct inode *dir, struct dentry *dentry, | |||
| 1800 | if (!new_valid_dev(rdev)) | 1795 | if (!new_valid_dev(rdev)) |
| 1801 | return -EINVAL; | 1796 | return -EINVAL; |
| 1802 | 1797 | ||
| 1798 | dquot_initialize(dir); | ||
| 1799 | |||
| 1803 | retry: | 1800 | retry: |
| 1804 | handle = ext4_journal_start(dir, EXT4_DATA_TRANS_BLOCKS(dir->i_sb) + | 1801 | handle = ext4_journal_start(dir, EXT4_DATA_TRANS_BLOCKS(dir->i_sb) + |
| 1805 | EXT4_INDEX_EXTRA_TRANS_BLOCKS + 3 + | 1802 | EXT4_INDEX_EXTRA_TRANS_BLOCKS + 3 + |
| @@ -1837,6 +1834,8 @@ static int ext4_mkdir(struct inode *dir, struct dentry *dentry, int mode) | |||
| 1837 | if (EXT4_DIR_LINK_MAX(dir)) | 1834 | if (EXT4_DIR_LINK_MAX(dir)) |
| 1838 | return -EMLINK; | 1835 | return -EMLINK; |
| 1839 | 1836 | ||
| 1837 | dquot_initialize(dir); | ||
| 1838 | |||
| 1840 | retry: | 1839 | retry: |
| 1841 | handle = ext4_journal_start(dir, EXT4_DATA_TRANS_BLOCKS(dir->i_sb) + | 1840 | handle = ext4_journal_start(dir, EXT4_DATA_TRANS_BLOCKS(dir->i_sb) + |
| 1842 | EXT4_INDEX_EXTRA_TRANS_BLOCKS + 3 + | 1841 | EXT4_INDEX_EXTRA_TRANS_BLOCKS + 3 + |
| @@ -1916,11 +1915,11 @@ static int empty_dir(struct inode *inode) | |||
| 1916 | if (inode->i_size < EXT4_DIR_REC_LEN(1) + EXT4_DIR_REC_LEN(2) || | 1915 | if (inode->i_size < EXT4_DIR_REC_LEN(1) + EXT4_DIR_REC_LEN(2) || |
| 1917 | !(bh = ext4_bread(NULL, inode, 0, 0, &err))) { | 1916 | !(bh = ext4_bread(NULL, inode, 0, 0, &err))) { |
| 1918 | if (err) | 1917 | if (err) |
| 1919 | ext4_error(inode->i_sb, __func__, | 1918 | ext4_error(inode->i_sb, |
| 1920 | "error %d reading directory #%lu offset 0", | 1919 | "error %d reading directory #%lu offset 0", |
| 1921 | err, inode->i_ino); | 1920 | err, inode->i_ino); |
| 1922 | else | 1921 | else |
| 1923 | ext4_warning(inode->i_sb, __func__, | 1922 | ext4_warning(inode->i_sb, |
| 1924 | "bad directory (dir #%lu) - no data block", | 1923 | "bad directory (dir #%lu) - no data block", |
| 1925 | inode->i_ino); | 1924 | inode->i_ino); |
| 1926 | return 1; | 1925 | return 1; |
| @@ -1931,7 +1930,7 @@ static int empty_dir(struct inode *inode) | |||
| 1931 | !le32_to_cpu(de1->inode) || | 1930 | !le32_to_cpu(de1->inode) || |
| 1932 | strcmp(".", de->name) || | 1931 | strcmp(".", de->name) || |
| 1933 | strcmp("..", de1->name)) { | 1932 | strcmp("..", de1->name)) { |
| 1934 | ext4_warning(inode->i_sb, "empty_dir", | 1933 | ext4_warning(inode->i_sb, |
| 1935 | "bad directory (dir #%lu) - no `.' or `..'", | 1934 | "bad directory (dir #%lu) - no `.' or `..'", |
| 1936 | inode->i_ino); | 1935 | inode->i_ino); |
| 1937 | brelse(bh); | 1936 | brelse(bh); |
| @@ -1949,7 +1948,7 @@ static int empty_dir(struct inode *inode) | |||
| 1949 | offset >> EXT4_BLOCK_SIZE_BITS(sb), 0, &err); | 1948 | offset >> EXT4_BLOCK_SIZE_BITS(sb), 0, &err); |
| 1950 | if (!bh) { | 1949 | if (!bh) { |
| 1951 | if (err) | 1950 | if (err) |
| 1952 | ext4_error(sb, __func__, | 1951 | ext4_error(sb, |
| 1953 | "error %d reading directory" | 1952 | "error %d reading directory" |
| 1954 | " #%lu offset %u", | 1953 | " #%lu offset %u", |
| 1955 | err, inode->i_ino, offset); | 1954 | err, inode->i_ino, offset); |
| @@ -2020,11 +2019,18 @@ int ext4_orphan_add(handle_t *handle, struct inode *inode) | |||
| 2020 | err = ext4_reserve_inode_write(handle, inode, &iloc); | 2019 | err = ext4_reserve_inode_write(handle, inode, &iloc); |
| 2021 | if (err) | 2020 | if (err) |
| 2022 | goto out_unlock; | 2021 | goto out_unlock; |
| 2022 | /* | ||
| 2023 | * Due to previous errors inode may be already a part of on-disk | ||
| 2024 | * orphan list. If so skip on-disk list modification. | ||
| 2025 | */ | ||
| 2026 | if (NEXT_ORPHAN(inode) && NEXT_ORPHAN(inode) <= | ||
| 2027 | (le32_to_cpu(EXT4_SB(sb)->s_es->s_inodes_count))) | ||
| 2028 | goto mem_insert; | ||
| 2023 | 2029 | ||
| 2024 | /* Insert this inode at the head of the on-disk orphan list... */ | 2030 | /* Insert this inode at the head of the on-disk orphan list... */ |
| 2025 | NEXT_ORPHAN(inode) = le32_to_cpu(EXT4_SB(sb)->s_es->s_last_orphan); | 2031 | NEXT_ORPHAN(inode) = le32_to_cpu(EXT4_SB(sb)->s_es->s_last_orphan); |
| 2026 | EXT4_SB(sb)->s_es->s_last_orphan = cpu_to_le32(inode->i_ino); | 2032 | EXT4_SB(sb)->s_es->s_last_orphan = cpu_to_le32(inode->i_ino); |
| 2027 | err = ext4_handle_dirty_metadata(handle, inode, EXT4_SB(sb)->s_sbh); | 2033 | err = ext4_handle_dirty_metadata(handle, NULL, EXT4_SB(sb)->s_sbh); |
| 2028 | rc = ext4_mark_iloc_dirty(handle, inode, &iloc); | 2034 | rc = ext4_mark_iloc_dirty(handle, inode, &iloc); |
| 2029 | if (!err) | 2035 | if (!err) |
| 2030 | err = rc; | 2036 | err = rc; |
| @@ -2037,6 +2043,7 @@ int ext4_orphan_add(handle_t *handle, struct inode *inode) | |||
| 2037 | * | 2043 | * |
| 2038 | * This is safe: on error we're going to ignore the orphan list | 2044 | * This is safe: on error we're going to ignore the orphan list |
| 2039 | * anyway on the next recovery. */ | 2045 | * anyway on the next recovery. */ |
| 2046 | mem_insert: | ||
| 2040 | if (!err) | 2047 | if (!err) |
| 2041 | list_add(&EXT4_I(inode)->i_orphan, &EXT4_SB(sb)->s_orphan); | 2048 | list_add(&EXT4_I(inode)->i_orphan, &EXT4_SB(sb)->s_orphan); |
| 2042 | 2049 | ||
| @@ -2096,7 +2103,7 @@ int ext4_orphan_del(handle_t *handle, struct inode *inode) | |||
| 2096 | if (err) | 2103 | if (err) |
| 2097 | goto out_brelse; | 2104 | goto out_brelse; |
| 2098 | sbi->s_es->s_last_orphan = cpu_to_le32(ino_next); | 2105 | sbi->s_es->s_last_orphan = cpu_to_le32(ino_next); |
| 2099 | err = ext4_handle_dirty_metadata(handle, inode, sbi->s_sbh); | 2106 | err = ext4_handle_dirty_metadata(handle, NULL, sbi->s_sbh); |
| 2100 | } else { | 2107 | } else { |
| 2101 | struct ext4_iloc iloc2; | 2108 | struct ext4_iloc iloc2; |
| 2102 | struct inode *i_prev = | 2109 | struct inode *i_prev = |
| @@ -2136,7 +2143,9 @@ static int ext4_rmdir(struct inode *dir, struct dentry *dentry) | |||
| 2136 | 2143 | ||
| 2137 | /* Initialize quotas before so that eventual writes go in | 2144 | /* Initialize quotas before so that eventual writes go in |
| 2138 | * separate transaction */ | 2145 | * separate transaction */ |
| 2139 | vfs_dq_init(dentry->d_inode); | 2146 | dquot_initialize(dir); |
| 2147 | dquot_initialize(dentry->d_inode); | ||
| 2148 | |||
| 2140 | handle = ext4_journal_start(dir, EXT4_DELETE_TRANS_BLOCKS(dir->i_sb)); | 2149 | handle = ext4_journal_start(dir, EXT4_DELETE_TRANS_BLOCKS(dir->i_sb)); |
| 2141 | if (IS_ERR(handle)) | 2150 | if (IS_ERR(handle)) |
| 2142 | return PTR_ERR(handle); | 2151 | return PTR_ERR(handle); |
| @@ -2163,7 +2172,7 @@ static int ext4_rmdir(struct inode *dir, struct dentry *dentry) | |||
| 2163 | if (retval) | 2172 | if (retval) |
| 2164 | goto end_rmdir; | 2173 | goto end_rmdir; |
| 2165 | if (!EXT4_DIR_LINK_EMPTY(inode)) | 2174 | if (!EXT4_DIR_LINK_EMPTY(inode)) |
| 2166 | ext4_warning(inode->i_sb, "ext4_rmdir", | 2175 | ext4_warning(inode->i_sb, |
| 2167 | "empty directory has too many links (%d)", | 2176 | "empty directory has too many links (%d)", |
| 2168 | inode->i_nlink); | 2177 | inode->i_nlink); |
| 2169 | inode->i_version++; | 2178 | inode->i_version++; |
| @@ -2195,7 +2204,9 @@ static int ext4_unlink(struct inode *dir, struct dentry *dentry) | |||
| 2195 | 2204 | ||
| 2196 | /* Initialize quotas before so that eventual writes go | 2205 | /* Initialize quotas before so that eventual writes go |
| 2197 | * in separate transaction */ | 2206 | * in separate transaction */ |
| 2198 | vfs_dq_init(dentry->d_inode); | 2207 | dquot_initialize(dir); |
| 2208 | dquot_initialize(dentry->d_inode); | ||
| 2209 | |||
| 2199 | handle = ext4_journal_start(dir, EXT4_DELETE_TRANS_BLOCKS(dir->i_sb)); | 2210 | handle = ext4_journal_start(dir, EXT4_DELETE_TRANS_BLOCKS(dir->i_sb)); |
| 2200 | if (IS_ERR(handle)) | 2211 | if (IS_ERR(handle)) |
| 2201 | return PTR_ERR(handle); | 2212 | return PTR_ERR(handle); |
| @@ -2215,7 +2226,7 @@ static int ext4_unlink(struct inode *dir, struct dentry *dentry) | |||
| 2215 | goto end_unlink; | 2226 | goto end_unlink; |
| 2216 | 2227 | ||
| 2217 | if (!inode->i_nlink) { | 2228 | if (!inode->i_nlink) { |
| 2218 | ext4_warning(inode->i_sb, "ext4_unlink", | 2229 | ext4_warning(inode->i_sb, |
| 2219 | "Deleting nonexistent file (%lu), %d", | 2230 | "Deleting nonexistent file (%lu), %d", |
| 2220 | inode->i_ino, inode->i_nlink); | 2231 | inode->i_ino, inode->i_nlink); |
| 2221 | inode->i_nlink = 1; | 2232 | inode->i_nlink = 1; |
| @@ -2250,6 +2261,8 @@ static int ext4_symlink(struct inode *dir, | |||
| 2250 | if (l > dir->i_sb->s_blocksize) | 2261 | if (l > dir->i_sb->s_blocksize) |
| 2251 | return -ENAMETOOLONG; | 2262 | return -ENAMETOOLONG; |
| 2252 | 2263 | ||
| 2264 | dquot_initialize(dir); | ||
| 2265 | |||
| 2253 | retry: | 2266 | retry: |
| 2254 | handle = ext4_journal_start(dir, EXT4_DATA_TRANS_BLOCKS(dir->i_sb) + | 2267 | handle = ext4_journal_start(dir, EXT4_DATA_TRANS_BLOCKS(dir->i_sb) + |
| 2255 | EXT4_INDEX_EXTRA_TRANS_BLOCKS + 5 + | 2268 | EXT4_INDEX_EXTRA_TRANS_BLOCKS + 5 + |
| @@ -2308,6 +2321,8 @@ static int ext4_link(struct dentry *old_dentry, | |||
| 2308 | if (inode->i_nlink >= EXT4_LINK_MAX) | 2321 | if (inode->i_nlink >= EXT4_LINK_MAX) |
| 2309 | return -EMLINK; | 2322 | return -EMLINK; |
| 2310 | 2323 | ||
| 2324 | dquot_initialize(dir); | ||
| 2325 | |||
| 2311 | /* | 2326 | /* |
| 2312 | * Return -ENOENT if we've raced with unlink and i_nlink is 0. Doing | 2327 | * Return -ENOENT if we've raced with unlink and i_nlink is 0. Doing |
| 2313 | * otherwise has the potential to corrupt the orphan inode list. | 2328 | * otherwise has the potential to corrupt the orphan inode list. |
| @@ -2358,12 +2373,15 @@ static int ext4_rename(struct inode *old_dir, struct dentry *old_dentry, | |||
| 2358 | struct ext4_dir_entry_2 *old_de, *new_de; | 2373 | struct ext4_dir_entry_2 *old_de, *new_de; |
| 2359 | int retval, force_da_alloc = 0; | 2374 | int retval, force_da_alloc = 0; |
| 2360 | 2375 | ||
| 2376 | dquot_initialize(old_dir); | ||
| 2377 | dquot_initialize(new_dir); | ||
| 2378 | |||
| 2361 | old_bh = new_bh = dir_bh = NULL; | 2379 | old_bh = new_bh = dir_bh = NULL; |
| 2362 | 2380 | ||
| 2363 | /* Initialize quotas before so that eventual writes go | 2381 | /* Initialize quotas before so that eventual writes go |
| 2364 | * in separate transaction */ | 2382 | * in separate transaction */ |
| 2365 | if (new_dentry->d_inode) | 2383 | if (new_dentry->d_inode) |
| 2366 | vfs_dq_init(new_dentry->d_inode); | 2384 | dquot_initialize(new_dentry->d_inode); |
| 2367 | handle = ext4_journal_start(old_dir, 2 * | 2385 | handle = ext4_journal_start(old_dir, 2 * |
| 2368 | EXT4_DATA_TRANS_BLOCKS(old_dir->i_sb) + | 2386 | EXT4_DATA_TRANS_BLOCKS(old_dir->i_sb) + |
| 2369 | EXT4_INDEX_EXTRA_TRANS_BLOCKS + 2); | 2387 | EXT4_INDEX_EXTRA_TRANS_BLOCKS + 2); |
| @@ -2462,7 +2480,7 @@ static int ext4_rename(struct inode *old_dir, struct dentry *old_dentry, | |||
| 2462 | } | 2480 | } |
| 2463 | } | 2481 | } |
| 2464 | if (retval) { | 2482 | if (retval) { |
| 2465 | ext4_warning(old_dir->i_sb, "ext4_rename", | 2483 | ext4_warning(old_dir->i_sb, |
| 2466 | "Deleting old file (%lu), %d, error=%d", | 2484 | "Deleting old file (%lu), %d, error=%d", |
| 2467 | old_dir->i_ino, old_dir->i_nlink, retval); | 2485 | old_dir->i_ino, old_dir->i_nlink, retval); |
| 2468 | } | 2486 | } |
diff --git a/fs/ext4/resize.c b/fs/ext4/resize.c index 3b2c5541d8a6..5692c48754a0 100644 --- a/fs/ext4/resize.c +++ b/fs/ext4/resize.c | |||
| @@ -48,65 +48,54 @@ static int verify_group_input(struct super_block *sb, | |||
| 48 | 48 | ||
| 49 | ext4_get_group_no_and_offset(sb, start, NULL, &offset); | 49 | ext4_get_group_no_and_offset(sb, start, NULL, &offset); |
| 50 | if (group != sbi->s_groups_count) | 50 | if (group != sbi->s_groups_count) |
| 51 | ext4_warning(sb, __func__, | 51 | ext4_warning(sb, "Cannot add at group %u (only %u groups)", |
| 52 | "Cannot add at group %u (only %u groups)", | ||
| 53 | input->group, sbi->s_groups_count); | 52 | input->group, sbi->s_groups_count); |
| 54 | else if (offset != 0) | 53 | else if (offset != 0) |
| 55 | ext4_warning(sb, __func__, "Last group not full"); | 54 | ext4_warning(sb, "Last group not full"); |
| 56 | else if (input->reserved_blocks > input->blocks_count / 5) | 55 | else if (input->reserved_blocks > input->blocks_count / 5) |
| 57 | ext4_warning(sb, __func__, "Reserved blocks too high (%u)", | 56 | ext4_warning(sb, "Reserved blocks too high (%u)", |
| 58 | input->reserved_blocks); | 57 | input->reserved_blocks); |
| 59 | else if (free_blocks_count < 0) | 58 | else if (free_blocks_count < 0) |
| 60 | ext4_warning(sb, __func__, "Bad blocks count %u", | 59 | ext4_warning(sb, "Bad blocks count %u", |
| 61 | input->blocks_count); | 60 | input->blocks_count); |
| 62 | else if (!(bh = sb_bread(sb, end - 1))) | 61 | else if (!(bh = sb_bread(sb, end - 1))) |
| 63 | ext4_warning(sb, __func__, | 62 | ext4_warning(sb, "Cannot read last block (%llu)", |
| 64 | "Cannot read last block (%llu)", | ||
| 65 | end - 1); | 63 | end - 1); |
| 66 | else if (outside(input->block_bitmap, start, end)) | 64 | else if (outside(input->block_bitmap, start, end)) |
| 67 | ext4_warning(sb, __func__, | 65 | ext4_warning(sb, "Block bitmap not in group (block %llu)", |
| 68 | "Block bitmap not in group (block %llu)", | ||
| 69 | (unsigned long long)input->block_bitmap); | 66 | (unsigned long long)input->block_bitmap); |
| 70 | else if (outside(input->inode_bitmap, start, end)) | 67 | else if (outside(input->inode_bitmap, start, end)) |
| 71 | ext4_warning(sb, __func__, | 68 | ext4_warning(sb, "Inode bitmap not in group (block %llu)", |
| 72 | "Inode bitmap not in group (block %llu)", | ||
| 73 | (unsigned long long)input->inode_bitmap); | 69 | (unsigned long long)input->inode_bitmap); |
| 74 | else if (outside(input->inode_table, start, end) || | 70 | else if (outside(input->inode_table, start, end) || |
| 75 | outside(itend - 1, start, end)) | 71 | outside(itend - 1, start, end)) |
| 76 | ext4_warning(sb, __func__, | 72 | ext4_warning(sb, "Inode table not in group (blocks %llu-%llu)", |
| 77 | "Inode table not in group (blocks %llu-%llu)", | ||
| 78 | (unsigned long long)input->inode_table, itend - 1); | 73 | (unsigned long long)input->inode_table, itend - 1); |
| 79 | else if (input->inode_bitmap == input->block_bitmap) | 74 | else if (input->inode_bitmap == input->block_bitmap) |
| 80 | ext4_warning(sb, __func__, | 75 | ext4_warning(sb, "Block bitmap same as inode bitmap (%llu)", |
| 81 | "Block bitmap same as inode bitmap (%llu)", | ||
| 82 | (unsigned long long)input->block_bitmap); | 76 | (unsigned long long)input->block_bitmap); |
| 83 | else if (inside(input->block_bitmap, input->inode_table, itend)) | 77 | else if (inside(input->block_bitmap, input->inode_table, itend)) |
| 84 | ext4_warning(sb, __func__, | 78 | ext4_warning(sb, "Block bitmap (%llu) in inode table " |
| 85 | "Block bitmap (%llu) in inode table (%llu-%llu)", | 79 | "(%llu-%llu)", |
| 86 | (unsigned long long)input->block_bitmap, | 80 | (unsigned long long)input->block_bitmap, |
| 87 | (unsigned long long)input->inode_table, itend - 1); | 81 | (unsigned long long)input->inode_table, itend - 1); |
| 88 | else if (inside(input->inode_bitmap, input->inode_table, itend)) | 82 | else if (inside(input->inode_bitmap, input->inode_table, itend)) |
| 89 | ext4_warning(sb, __func__, | 83 | ext4_warning(sb, "Inode bitmap (%llu) in inode table " |
| 90 | "Inode bitmap (%llu) in inode table (%llu-%llu)", | 84 | "(%llu-%llu)", |
| 91 | (unsigned long long)input->inode_bitmap, | 85 | (unsigned long long)input->inode_bitmap, |
| 92 | (unsigned long long)input->inode_table, itend - 1); | 86 | (unsigned long long)input->inode_table, itend - 1); |
| 93 | else if (inside(input->block_bitmap, start, metaend)) | 87 | else if (inside(input->block_bitmap, start, metaend)) |
| 94 | ext4_warning(sb, __func__, | 88 | ext4_warning(sb, "Block bitmap (%llu) in GDT table (%llu-%llu)", |
| 95 | "Block bitmap (%llu) in GDT table" | ||
| 96 | " (%llu-%llu)", | ||
| 97 | (unsigned long long)input->block_bitmap, | 89 | (unsigned long long)input->block_bitmap, |
| 98 | start, metaend - 1); | 90 | start, metaend - 1); |
| 99 | else if (inside(input->inode_bitmap, start, metaend)) | 91 | else if (inside(input->inode_bitmap, start, metaend)) |
| 100 | ext4_warning(sb, __func__, | 92 | ext4_warning(sb, "Inode bitmap (%llu) in GDT table (%llu-%llu)", |
| 101 | "Inode bitmap (%llu) in GDT table" | ||
| 102 | " (%llu-%llu)", | ||
| 103 | (unsigned long long)input->inode_bitmap, | 93 | (unsigned long long)input->inode_bitmap, |
| 104 | start, metaend - 1); | 94 | start, metaend - 1); |
| 105 | else if (inside(input->inode_table, start, metaend) || | 95 | else if (inside(input->inode_table, start, metaend) || |
| 106 | inside(itend - 1, start, metaend)) | 96 | inside(itend - 1, start, metaend)) |
| 107 | ext4_warning(sb, __func__, | 97 | ext4_warning(sb, "Inode table (%llu-%llu) overlaps GDT table " |
| 108 | "Inode table (%llu-%llu) overlaps" | 98 | "(%llu-%llu)", |
| 109 | "GDT table (%llu-%llu)", | ||
| 110 | (unsigned long long)input->inode_table, | 99 | (unsigned long long)input->inode_table, |
| 111 | itend - 1, start, metaend - 1); | 100 | itend - 1, start, metaend - 1); |
| 112 | else | 101 | else |
| @@ -364,8 +353,7 @@ static int verify_reserved_gdb(struct super_block *sb, | |||
| 364 | while ((grp = ext4_list_backups(sb, &three, &five, &seven)) < end) { | 353 | while ((grp = ext4_list_backups(sb, &three, &five, &seven)) < end) { |
| 365 | if (le32_to_cpu(*p++) != | 354 | if (le32_to_cpu(*p++) != |
| 366 | grp * EXT4_BLOCKS_PER_GROUP(sb) + blk){ | 355 | grp * EXT4_BLOCKS_PER_GROUP(sb) + blk){ |
| 367 | ext4_warning(sb, __func__, | 356 | ext4_warning(sb, "reserved GDT %llu" |
| 368 | "reserved GDT %llu" | ||
| 369 | " missing grp %d (%llu)", | 357 | " missing grp %d (%llu)", |
| 370 | blk, grp, | 358 | blk, grp, |
| 371 | grp * | 359 | grp * |
| @@ -420,8 +408,7 @@ static int add_new_gdb(handle_t *handle, struct inode *inode, | |||
| 420 | */ | 408 | */ |
| 421 | if (EXT4_SB(sb)->s_sbh->b_blocknr != | 409 | if (EXT4_SB(sb)->s_sbh->b_blocknr != |
| 422 | le32_to_cpu(EXT4_SB(sb)->s_es->s_first_data_block)) { | 410 | le32_to_cpu(EXT4_SB(sb)->s_es->s_first_data_block)) { |
| 423 | ext4_warning(sb, __func__, | 411 | ext4_warning(sb, "won't resize using backup superblock at %llu", |
| 424 | "won't resize using backup superblock at %llu", | ||
| 425 | (unsigned long long)EXT4_SB(sb)->s_sbh->b_blocknr); | 412 | (unsigned long long)EXT4_SB(sb)->s_sbh->b_blocknr); |
| 426 | return -EPERM; | 413 | return -EPERM; |
| 427 | } | 414 | } |
| @@ -444,8 +431,7 @@ static int add_new_gdb(handle_t *handle, struct inode *inode, | |||
| 444 | 431 | ||
| 445 | data = (__le32 *)dind->b_data; | 432 | data = (__le32 *)dind->b_data; |
| 446 | if (le32_to_cpu(data[gdb_num % EXT4_ADDR_PER_BLOCK(sb)]) != gdblock) { | 433 | if (le32_to_cpu(data[gdb_num % EXT4_ADDR_PER_BLOCK(sb)]) != gdblock) { |
| 447 | ext4_warning(sb, __func__, | 434 | ext4_warning(sb, "new group %u GDT block %llu not reserved", |
| 448 | "new group %u GDT block %llu not reserved", | ||
| 449 | input->group, gdblock); | 435 | input->group, gdblock); |
| 450 | err = -EINVAL; | 436 | err = -EINVAL; |
| 451 | goto exit_dind; | 437 | goto exit_dind; |
| @@ -468,7 +454,7 @@ static int add_new_gdb(handle_t *handle, struct inode *inode, | |||
| 468 | GFP_NOFS); | 454 | GFP_NOFS); |
| 469 | if (!n_group_desc) { | 455 | if (!n_group_desc) { |
| 470 | err = -ENOMEM; | 456 | err = -ENOMEM; |
| 471 | ext4_warning(sb, __func__, | 457 | ext4_warning(sb, |
| 472 | "not enough memory for %lu groups", gdb_num + 1); | 458 | "not enough memory for %lu groups", gdb_num + 1); |
| 473 | goto exit_inode; | 459 | goto exit_inode; |
| 474 | } | 460 | } |
| @@ -567,8 +553,7 @@ static int reserve_backup_gdb(handle_t *handle, struct inode *inode, | |||
| 567 | /* Get each reserved primary GDT block and verify it holds backups */ | 553 | /* Get each reserved primary GDT block and verify it holds backups */ |
| 568 | for (res = 0; res < reserved_gdb; res++, blk++) { | 554 | for (res = 0; res < reserved_gdb; res++, blk++) { |
| 569 | if (le32_to_cpu(*data) != blk) { | 555 | if (le32_to_cpu(*data) != blk) { |
| 570 | ext4_warning(sb, __func__, | 556 | ext4_warning(sb, "reserved block %llu" |
| 571 | "reserved block %llu" | ||
| 572 | " not at offset %ld", | 557 | " not at offset %ld", |
| 573 | blk, | 558 | blk, |
| 574 | (long)(data - (__le32 *)dind->b_data)); | 559 | (long)(data - (__le32 *)dind->b_data)); |
| @@ -713,8 +698,7 @@ static void update_backups(struct super_block *sb, | |||
| 713 | */ | 698 | */ |
| 714 | exit_err: | 699 | exit_err: |
| 715 | if (err) { | 700 | if (err) { |
| 716 | ext4_warning(sb, __func__, | 701 | ext4_warning(sb, "can't update backup for group %u (err %d), " |
| 717 | "can't update backup for group %u (err %d), " | ||
| 718 | "forcing fsck on next reboot", group, err); | 702 | "forcing fsck on next reboot", group, err); |
| 719 | sbi->s_mount_state &= ~EXT4_VALID_FS; | 703 | sbi->s_mount_state &= ~EXT4_VALID_FS; |
| 720 | sbi->s_es->s_state &= cpu_to_le16(~EXT4_VALID_FS); | 704 | sbi->s_es->s_state &= cpu_to_le16(~EXT4_VALID_FS); |
| @@ -753,20 +737,19 @@ int ext4_group_add(struct super_block *sb, struct ext4_new_group_data *input) | |||
| 753 | 737 | ||
| 754 | if (gdb_off == 0 && !EXT4_HAS_RO_COMPAT_FEATURE(sb, | 738 | if (gdb_off == 0 && !EXT4_HAS_RO_COMPAT_FEATURE(sb, |
| 755 | EXT4_FEATURE_RO_COMPAT_SPARSE_SUPER)) { | 739 | EXT4_FEATURE_RO_COMPAT_SPARSE_SUPER)) { |
| 756 | ext4_warning(sb, __func__, | 740 | ext4_warning(sb, "Can't resize non-sparse filesystem further"); |
| 757 | "Can't resize non-sparse filesystem further"); | ||
| 758 | return -EPERM; | 741 | return -EPERM; |
| 759 | } | 742 | } |
| 760 | 743 | ||
| 761 | if (ext4_blocks_count(es) + input->blocks_count < | 744 | if (ext4_blocks_count(es) + input->blocks_count < |
| 762 | ext4_blocks_count(es)) { | 745 | ext4_blocks_count(es)) { |
| 763 | ext4_warning(sb, __func__, "blocks_count overflow"); | 746 | ext4_warning(sb, "blocks_count overflow"); |
| 764 | return -EINVAL; | 747 | return -EINVAL; |
| 765 | } | 748 | } |
| 766 | 749 | ||
| 767 | if (le32_to_cpu(es->s_inodes_count) + EXT4_INODES_PER_GROUP(sb) < | 750 | if (le32_to_cpu(es->s_inodes_count) + EXT4_INODES_PER_GROUP(sb) < |
| 768 | le32_to_cpu(es->s_inodes_count)) { | 751 | le32_to_cpu(es->s_inodes_count)) { |
| 769 | ext4_warning(sb, __func__, "inodes_count overflow"); | 752 | ext4_warning(sb, "inodes_count overflow"); |
| 770 | return -EINVAL; | 753 | return -EINVAL; |
| 771 | } | 754 | } |
| 772 | 755 | ||
| @@ -774,14 +757,13 @@ int ext4_group_add(struct super_block *sb, struct ext4_new_group_data *input) | |||
| 774 | if (!EXT4_HAS_COMPAT_FEATURE(sb, | 757 | if (!EXT4_HAS_COMPAT_FEATURE(sb, |
| 775 | EXT4_FEATURE_COMPAT_RESIZE_INODE) | 758 | EXT4_FEATURE_COMPAT_RESIZE_INODE) |
| 776 | || !le16_to_cpu(es->s_reserved_gdt_blocks)) { | 759 | || !le16_to_cpu(es->s_reserved_gdt_blocks)) { |
| 777 | ext4_warning(sb, __func__, | 760 | ext4_warning(sb, |
| 778 | "No reserved GDT blocks, can't resize"); | 761 | "No reserved GDT blocks, can't resize"); |
| 779 | return -EPERM; | 762 | return -EPERM; |
| 780 | } | 763 | } |
| 781 | inode = ext4_iget(sb, EXT4_RESIZE_INO); | 764 | inode = ext4_iget(sb, EXT4_RESIZE_INO); |
| 782 | if (IS_ERR(inode)) { | 765 | if (IS_ERR(inode)) { |
| 783 | ext4_warning(sb, __func__, | 766 | ext4_warning(sb, "Error opening resize inode"); |
| 784 | "Error opening resize inode"); | ||
| 785 | return PTR_ERR(inode); | 767 | return PTR_ERR(inode); |
| 786 | } | 768 | } |
| 787 | } | 769 | } |
| @@ -810,8 +792,7 @@ int ext4_group_add(struct super_block *sb, struct ext4_new_group_data *input) | |||
| 810 | 792 | ||
| 811 | mutex_lock(&sbi->s_resize_lock); | 793 | mutex_lock(&sbi->s_resize_lock); |
| 812 | if (input->group != sbi->s_groups_count) { | 794 | if (input->group != sbi->s_groups_count) { |
| 813 | ext4_warning(sb, __func__, | 795 | ext4_warning(sb, "multiple resizers run on filesystem!"); |
| 814 | "multiple resizers run on filesystem!"); | ||
| 815 | err = -EBUSY; | 796 | err = -EBUSY; |
| 816 | goto exit_journal; | 797 | goto exit_journal; |
| 817 | } | 798 | } |
| @@ -997,13 +978,12 @@ int ext4_group_extend(struct super_block *sb, struct ext4_super_block *es, | |||
| 997 | " too large to resize to %llu blocks safely\n", | 978 | " too large to resize to %llu blocks safely\n", |
| 998 | sb->s_id, n_blocks_count); | 979 | sb->s_id, n_blocks_count); |
| 999 | if (sizeof(sector_t) < 8) | 980 | if (sizeof(sector_t) < 8) |
| 1000 | ext4_warning(sb, __func__, "CONFIG_LBDAF not enabled"); | 981 | ext4_warning(sb, "CONFIG_LBDAF not enabled"); |
| 1001 | return -EINVAL; | 982 | return -EINVAL; |
| 1002 | } | 983 | } |
| 1003 | 984 | ||
| 1004 | if (n_blocks_count < o_blocks_count) { | 985 | if (n_blocks_count < o_blocks_count) { |
| 1005 | ext4_warning(sb, __func__, | 986 | ext4_warning(sb, "can't shrink FS - resize aborted"); |
| 1006 | "can't shrink FS - resize aborted"); | ||
| 1007 | return -EBUSY; | 987 | return -EBUSY; |
| 1008 | } | 988 | } |
| 1009 | 989 | ||
| @@ -1011,15 +991,14 @@ int ext4_group_extend(struct super_block *sb, struct ext4_super_block *es, | |||
| 1011 | ext4_get_group_no_and_offset(sb, o_blocks_count, &group, &last); | 991 | ext4_get_group_no_and_offset(sb, o_blocks_count, &group, &last); |
| 1012 | 992 | ||
| 1013 | if (last == 0) { | 993 | if (last == 0) { |
| 1014 | ext4_warning(sb, __func__, | 994 | ext4_warning(sb, "need to use ext2online to resize further"); |
| 1015 | "need to use ext2online to resize further"); | ||
| 1016 | return -EPERM; | 995 | return -EPERM; |
| 1017 | } | 996 | } |
| 1018 | 997 | ||
| 1019 | add = EXT4_BLOCKS_PER_GROUP(sb) - last; | 998 | add = EXT4_BLOCKS_PER_GROUP(sb) - last; |
| 1020 | 999 | ||
| 1021 | if (o_blocks_count + add < o_blocks_count) { | 1000 | if (o_blocks_count + add < o_blocks_count) { |
| 1022 | ext4_warning(sb, __func__, "blocks_count overflow"); | 1001 | ext4_warning(sb, "blocks_count overflow"); |
| 1023 | return -EINVAL; | 1002 | return -EINVAL; |
| 1024 | } | 1003 | } |
| 1025 | 1004 | ||
| @@ -1027,16 +1006,13 @@ int ext4_group_extend(struct super_block *sb, struct ext4_super_block *es, | |||
| 1027 | add = n_blocks_count - o_blocks_count; | 1006 | add = n_blocks_count - o_blocks_count; |
| 1028 | 1007 | ||
| 1029 | if (o_blocks_count + add < n_blocks_count) | 1008 | if (o_blocks_count + add < n_blocks_count) |
| 1030 | ext4_warning(sb, __func__, | 1009 | ext4_warning(sb, "will only finish group (%llu blocks, %u new)", |
| 1031 | "will only finish group (%llu" | ||
| 1032 | " blocks, %u new)", | ||
| 1033 | o_blocks_count + add, add); | 1010 | o_blocks_count + add, add); |
| 1034 | 1011 | ||
| 1035 | /* See if the device is actually as big as what was requested */ | 1012 | /* See if the device is actually as big as what was requested */ |
| 1036 | bh = sb_bread(sb, o_blocks_count + add - 1); | 1013 | bh = sb_bread(sb, o_blocks_count + add - 1); |
| 1037 | if (!bh) { | 1014 | if (!bh) { |
| 1038 | ext4_warning(sb, __func__, | 1015 | ext4_warning(sb, "can't read last block, resize aborted"); |
| 1039 | "can't read last block, resize aborted"); | ||
| 1040 | return -ENOSPC; | 1016 | return -ENOSPC; |
| 1041 | } | 1017 | } |
| 1042 | brelse(bh); | 1018 | brelse(bh); |
| @@ -1047,14 +1023,13 @@ int ext4_group_extend(struct super_block *sb, struct ext4_super_block *es, | |||
| 1047 | handle = ext4_journal_start_sb(sb, 3); | 1023 | handle = ext4_journal_start_sb(sb, 3); |
| 1048 | if (IS_ERR(handle)) { | 1024 | if (IS_ERR(handle)) { |
| 1049 | err = PTR_ERR(handle); | 1025 | err = PTR_ERR(handle); |
| 1050 | ext4_warning(sb, __func__, "error %d on journal start", err); | 1026 | ext4_warning(sb, "error %d on journal start", err); |
| 1051 | goto exit_put; | 1027 | goto exit_put; |
| 1052 | } | 1028 | } |
| 1053 | 1029 | ||
| 1054 | mutex_lock(&EXT4_SB(sb)->s_resize_lock); | 1030 | mutex_lock(&EXT4_SB(sb)->s_resize_lock); |
| 1055 | if (o_blocks_count != ext4_blocks_count(es)) { | 1031 | if (o_blocks_count != ext4_blocks_count(es)) { |
| 1056 | ext4_warning(sb, __func__, | 1032 | ext4_warning(sb, "multiple resizers run on filesystem!"); |
| 1057 | "multiple resizers run on filesystem!"); | ||
| 1058 | mutex_unlock(&EXT4_SB(sb)->s_resize_lock); | 1033 | mutex_unlock(&EXT4_SB(sb)->s_resize_lock); |
| 1059 | ext4_journal_stop(handle); | 1034 | ext4_journal_stop(handle); |
| 1060 | err = -EBUSY; | 1035 | err = -EBUSY; |
| @@ -1063,8 +1038,7 @@ int ext4_group_extend(struct super_block *sb, struct ext4_super_block *es, | |||
| 1063 | 1038 | ||
| 1064 | if ((err = ext4_journal_get_write_access(handle, | 1039 | if ((err = ext4_journal_get_write_access(handle, |
| 1065 | EXT4_SB(sb)->s_sbh))) { | 1040 | EXT4_SB(sb)->s_sbh))) { |
| 1066 | ext4_warning(sb, __func__, | 1041 | ext4_warning(sb, "error %d on journal write access", err); |
| 1067 | "error %d on journal write access", err); | ||
| 1068 | mutex_unlock(&EXT4_SB(sb)->s_resize_lock); | 1042 | mutex_unlock(&EXT4_SB(sb)->s_resize_lock); |
| 1069 | ext4_journal_stop(handle); | 1043 | ext4_journal_stop(handle); |
| 1070 | goto exit_put; | 1044 | goto exit_put; |
diff --git a/fs/ext4/super.c b/fs/ext4/super.c index 735c20d5fd56..2b83b96cb2eb 100644 --- a/fs/ext4/super.c +++ b/fs/ext4/super.c | |||
| @@ -333,7 +333,7 @@ static void ext4_handle_error(struct super_block *sb) | |||
| 333 | sb->s_id); | 333 | sb->s_id); |
| 334 | } | 334 | } |
| 335 | 335 | ||
| 336 | void ext4_error(struct super_block *sb, const char *function, | 336 | void __ext4_error(struct super_block *sb, const char *function, |
| 337 | const char *fmt, ...) | 337 | const char *fmt, ...) |
| 338 | { | 338 | { |
| 339 | va_list args; | 339 | va_list args; |
| @@ -347,6 +347,42 @@ void ext4_error(struct super_block *sb, const char *function, | |||
| 347 | ext4_handle_error(sb); | 347 | ext4_handle_error(sb); |
| 348 | } | 348 | } |
| 349 | 349 | ||
| 350 | void ext4_error_inode(const char *function, struct inode *inode, | ||
| 351 | const char *fmt, ...) | ||
| 352 | { | ||
| 353 | va_list args; | ||
| 354 | |||
| 355 | va_start(args, fmt); | ||
| 356 | printk(KERN_CRIT "EXT4-fs error (device %s): %s: inode #%lu: (comm %s) ", | ||
| 357 | inode->i_sb->s_id, function, inode->i_ino, current->comm); | ||
| 358 | vprintk(fmt, args); | ||
| 359 | printk("\n"); | ||
| 360 | va_end(args); | ||
| 361 | |||
| 362 | ext4_handle_error(inode->i_sb); | ||
| 363 | } | ||
| 364 | |||
| 365 | void ext4_error_file(const char *function, struct file *file, | ||
| 366 | const char *fmt, ...) | ||
| 367 | { | ||
| 368 | va_list args; | ||
| 369 | struct inode *inode = file->f_dentry->d_inode; | ||
| 370 | char pathname[80], *path; | ||
| 371 | |||
| 372 | va_start(args, fmt); | ||
| 373 | path = d_path(&(file->f_path), pathname, sizeof(pathname)); | ||
| 374 | if (!path) | ||
| 375 | path = "(unknown)"; | ||
| 376 | printk(KERN_CRIT | ||
| 377 | "EXT4-fs error (device %s): %s: inode #%lu (comm %s path %s): ", | ||
| 378 | inode->i_sb->s_id, function, inode->i_ino, current->comm, path); | ||
| 379 | vprintk(fmt, args); | ||
| 380 | printk("\n"); | ||
| 381 | va_end(args); | ||
| 382 | |||
| 383 | ext4_handle_error(inode->i_sb); | ||
| 384 | } | ||
| 385 | |||
| 350 | static const char *ext4_decode_error(struct super_block *sb, int errno, | 386 | static const char *ext4_decode_error(struct super_block *sb, int errno, |
| 351 | char nbuf[16]) | 387 | char nbuf[16]) |
| 352 | { | 388 | { |
| @@ -450,7 +486,7 @@ void ext4_msg (struct super_block * sb, const char *prefix, | |||
| 450 | va_end(args); | 486 | va_end(args); |
| 451 | } | 487 | } |
| 452 | 488 | ||
| 453 | void ext4_warning(struct super_block *sb, const char *function, | 489 | void __ext4_warning(struct super_block *sb, const char *function, |
| 454 | const char *fmt, ...) | 490 | const char *fmt, ...) |
| 455 | { | 491 | { |
| 456 | va_list args; | 492 | va_list args; |
| @@ -507,7 +543,7 @@ void ext4_update_dynamic_rev(struct super_block *sb) | |||
| 507 | if (le32_to_cpu(es->s_rev_level) > EXT4_GOOD_OLD_REV) | 543 | if (le32_to_cpu(es->s_rev_level) > EXT4_GOOD_OLD_REV) |
| 508 | return; | 544 | return; |
| 509 | 545 | ||
| 510 | ext4_warning(sb, __func__, | 546 | ext4_warning(sb, |
| 511 | "updating to rev %d because of new feature flag, " | 547 | "updating to rev %d because of new feature flag, " |
| 512 | "running e2fsck is recommended", | 548 | "running e2fsck is recommended", |
| 513 | EXT4_DYNAMIC_REV); | 549 | EXT4_DYNAMIC_REV); |
| @@ -708,7 +744,8 @@ static struct inode *ext4_alloc_inode(struct super_block *sb) | |||
| 708 | #ifdef CONFIG_QUOTA | 744 | #ifdef CONFIG_QUOTA |
| 709 | ei->i_reserved_quota = 0; | 745 | ei->i_reserved_quota = 0; |
| 710 | #endif | 746 | #endif |
| 711 | INIT_LIST_HEAD(&ei->i_aio_dio_complete_list); | 747 | INIT_LIST_HEAD(&ei->i_completed_io_list); |
| 748 | spin_lock_init(&ei->i_completed_io_lock); | ||
| 712 | ei->cur_aio_dio = NULL; | 749 | ei->cur_aio_dio = NULL; |
| 713 | ei->i_sync_tid = 0; | 750 | ei->i_sync_tid = 0; |
| 714 | ei->i_datasync_tid = 0; | 751 | ei->i_datasync_tid = 0; |
| @@ -761,6 +798,7 @@ static void destroy_inodecache(void) | |||
| 761 | 798 | ||
| 762 | static void ext4_clear_inode(struct inode *inode) | 799 | static void ext4_clear_inode(struct inode *inode) |
| 763 | { | 800 | { |
| 801 | dquot_drop(inode); | ||
| 764 | ext4_discard_preallocations(inode); | 802 | ext4_discard_preallocations(inode); |
| 765 | if (EXT4_JOURNAL(inode)) | 803 | if (EXT4_JOURNAL(inode)) |
| 766 | jbd2_journal_release_jbd_inode(EXT4_SB(inode->i_sb)->s_journal, | 804 | jbd2_journal_release_jbd_inode(EXT4_SB(inode->i_sb)->s_journal, |
| @@ -796,10 +834,10 @@ static inline void ext4_show_quota_options(struct seq_file *seq, | |||
| 796 | if (sbi->s_qf_names[GRPQUOTA]) | 834 | if (sbi->s_qf_names[GRPQUOTA]) |
| 797 | seq_printf(seq, ",grpjquota=%s", sbi->s_qf_names[GRPQUOTA]); | 835 | seq_printf(seq, ",grpjquota=%s", sbi->s_qf_names[GRPQUOTA]); |
| 798 | 836 | ||
| 799 | if (sbi->s_mount_opt & EXT4_MOUNT_USRQUOTA) | 837 | if (test_opt(sb, USRQUOTA)) |
| 800 | seq_puts(seq, ",usrquota"); | 838 | seq_puts(seq, ",usrquota"); |
| 801 | 839 | ||
| 802 | if (sbi->s_mount_opt & EXT4_MOUNT_GRPQUOTA) | 840 | if (test_opt(sb, GRPQUOTA)) |
| 803 | seq_puts(seq, ",grpquota"); | 841 | seq_puts(seq, ",grpquota"); |
| 804 | #endif | 842 | #endif |
| 805 | } | 843 | } |
| @@ -926,6 +964,9 @@ static int ext4_show_options(struct seq_file *seq, struct vfsmount *vfs) | |||
| 926 | if (test_opt(sb, NOLOAD)) | 964 | if (test_opt(sb, NOLOAD)) |
| 927 | seq_puts(seq, ",norecovery"); | 965 | seq_puts(seq, ",norecovery"); |
| 928 | 966 | ||
| 967 | if (test_opt(sb, DIOREAD_NOLOCK)) | ||
| 968 | seq_puts(seq, ",dioread_nolock"); | ||
| 969 | |||
| 929 | ext4_show_quota_options(seq, sb); | 970 | ext4_show_quota_options(seq, sb); |
| 930 | 971 | ||
| 931 | return 0; | 972 | return 0; |
| @@ -1012,19 +1053,9 @@ static ssize_t ext4_quota_write(struct super_block *sb, int type, | |||
| 1012 | const char *data, size_t len, loff_t off); | 1053 | const char *data, size_t len, loff_t off); |
| 1013 | 1054 | ||
| 1014 | static const struct dquot_operations ext4_quota_operations = { | 1055 | static const struct dquot_operations ext4_quota_operations = { |
| 1015 | .initialize = dquot_initialize, | ||
| 1016 | .drop = dquot_drop, | ||
| 1017 | .alloc_space = dquot_alloc_space, | ||
| 1018 | .reserve_space = dquot_reserve_space, | ||
| 1019 | .claim_space = dquot_claim_space, | ||
| 1020 | .release_rsv = dquot_release_reserved_space, | ||
| 1021 | #ifdef CONFIG_QUOTA | 1056 | #ifdef CONFIG_QUOTA |
| 1022 | .get_reserved_space = ext4_get_reserved_space, | 1057 | .get_reserved_space = ext4_get_reserved_space, |
| 1023 | #endif | 1058 | #endif |
| 1024 | .alloc_inode = dquot_alloc_inode, | ||
| 1025 | .free_space = dquot_free_space, | ||
| 1026 | .free_inode = dquot_free_inode, | ||
| 1027 | .transfer = dquot_transfer, | ||
| 1028 | .write_dquot = ext4_write_dquot, | 1059 | .write_dquot = ext4_write_dquot, |
| 1029 | .acquire_dquot = ext4_acquire_dquot, | 1060 | .acquire_dquot = ext4_acquire_dquot, |
| 1030 | .release_dquot = ext4_release_dquot, | 1061 | .release_dquot = ext4_release_dquot, |
| @@ -1109,6 +1140,7 @@ enum { | |||
| 1109 | Opt_stripe, Opt_delalloc, Opt_nodelalloc, | 1140 | Opt_stripe, Opt_delalloc, Opt_nodelalloc, |
| 1110 | Opt_block_validity, Opt_noblock_validity, | 1141 | Opt_block_validity, Opt_noblock_validity, |
| 1111 | Opt_inode_readahead_blks, Opt_journal_ioprio, | 1142 | Opt_inode_readahead_blks, Opt_journal_ioprio, |
| 1143 | Opt_dioread_nolock, Opt_dioread_lock, | ||
| 1112 | Opt_discard, Opt_nodiscard, | 1144 | Opt_discard, Opt_nodiscard, |
| 1113 | }; | 1145 | }; |
| 1114 | 1146 | ||
| @@ -1176,6 +1208,8 @@ static const match_table_t tokens = { | |||
| 1176 | {Opt_auto_da_alloc, "auto_da_alloc=%u"}, | 1208 | {Opt_auto_da_alloc, "auto_da_alloc=%u"}, |
| 1177 | {Opt_auto_da_alloc, "auto_da_alloc"}, | 1209 | {Opt_auto_da_alloc, "auto_da_alloc"}, |
| 1178 | {Opt_noauto_da_alloc, "noauto_da_alloc"}, | 1210 | {Opt_noauto_da_alloc, "noauto_da_alloc"}, |
| 1211 | {Opt_dioread_nolock, "dioread_nolock"}, | ||
| 1212 | {Opt_dioread_lock, "dioread_lock"}, | ||
| 1179 | {Opt_discard, "discard"}, | 1213 | {Opt_discard, "discard"}, |
| 1180 | {Opt_nodiscard, "nodiscard"}, | 1214 | {Opt_nodiscard, "nodiscard"}, |
| 1181 | {Opt_err, NULL}, | 1215 | {Opt_err, NULL}, |
| @@ -1205,6 +1239,66 @@ static ext4_fsblk_t get_sb_block(void **data) | |||
| 1205 | } | 1239 | } |
| 1206 | 1240 | ||
| 1207 | #define DEFAULT_JOURNAL_IOPRIO (IOPRIO_PRIO_VALUE(IOPRIO_CLASS_BE, 3)) | 1241 | #define DEFAULT_JOURNAL_IOPRIO (IOPRIO_PRIO_VALUE(IOPRIO_CLASS_BE, 3)) |
| 1242 | static char deprecated_msg[] = "Mount option \"%s\" will be removed by %s\n" | ||
| 1243 | "Contact linux-ext4@vger.kernel.org if you think we should keep it.\n"; | ||
| 1244 | |||
| 1245 | #ifdef CONFIG_QUOTA | ||
| 1246 | static int set_qf_name(struct super_block *sb, int qtype, substring_t *args) | ||
| 1247 | { | ||
| 1248 | struct ext4_sb_info *sbi = EXT4_SB(sb); | ||
| 1249 | char *qname; | ||
| 1250 | |||
| 1251 | if (sb_any_quota_loaded(sb) && | ||
| 1252 | !sbi->s_qf_names[qtype]) { | ||
| 1253 | ext4_msg(sb, KERN_ERR, | ||
| 1254 | "Cannot change journaled " | ||
| 1255 | "quota options when quota turned on"); | ||
| 1256 | return 0; | ||
| 1257 | } | ||
| 1258 | qname = match_strdup(args); | ||
| 1259 | if (!qname) { | ||
| 1260 | ext4_msg(sb, KERN_ERR, | ||
| 1261 | "Not enough memory for storing quotafile name"); | ||
| 1262 | return 0; | ||
| 1263 | } | ||
| 1264 | if (sbi->s_qf_names[qtype] && | ||
| 1265 | strcmp(sbi->s_qf_names[qtype], qname)) { | ||
| 1266 | ext4_msg(sb, KERN_ERR, | ||
| 1267 | "%s quota file already specified", QTYPE2NAME(qtype)); | ||
| 1268 | kfree(qname); | ||
| 1269 | return 0; | ||
| 1270 | } | ||
| 1271 | sbi->s_qf_names[qtype] = qname; | ||
| 1272 | if (strchr(sbi->s_qf_names[qtype], '/')) { | ||
| 1273 | ext4_msg(sb, KERN_ERR, | ||
| 1274 | "quotafile must be on filesystem root"); | ||
| 1275 | kfree(sbi->s_qf_names[qtype]); | ||
| 1276 | sbi->s_qf_names[qtype] = NULL; | ||
| 1277 | return 0; | ||
| 1278 | } | ||
| 1279 | set_opt(sbi->s_mount_opt, QUOTA); | ||
| 1280 | return 1; | ||
| 1281 | } | ||
| 1282 | |||
| 1283 | static int clear_qf_name(struct super_block *sb, int qtype) | ||
| 1284 | { | ||
| 1285 | |||
| 1286 | struct ext4_sb_info *sbi = EXT4_SB(sb); | ||
| 1287 | |||
| 1288 | if (sb_any_quota_loaded(sb) && | ||
| 1289 | sbi->s_qf_names[qtype]) { | ||
| 1290 | ext4_msg(sb, KERN_ERR, "Cannot change journaled quota options" | ||
| 1291 | " when quota turned on"); | ||
| 1292 | return 0; | ||
| 1293 | } | ||
| 1294 | /* | ||
| 1295 | * The space will be released later when all options are confirmed | ||
| 1296 | * to be correct | ||
| 1297 | */ | ||
| 1298 | sbi->s_qf_names[qtype] = NULL; | ||
| 1299 | return 1; | ||
| 1300 | } | ||
| 1301 | #endif | ||
| 1208 | 1302 | ||
| 1209 | static int parse_options(char *options, struct super_block *sb, | 1303 | static int parse_options(char *options, struct super_block *sb, |
| 1210 | unsigned long *journal_devnum, | 1304 | unsigned long *journal_devnum, |
| @@ -1217,8 +1311,7 @@ static int parse_options(char *options, struct super_block *sb, | |||
| 1217 | int data_opt = 0; | 1311 | int data_opt = 0; |
| 1218 | int option; | 1312 | int option; |
| 1219 | #ifdef CONFIG_QUOTA | 1313 | #ifdef CONFIG_QUOTA |
| 1220 | int qtype, qfmt; | 1314 | int qfmt; |
| 1221 | char *qname; | ||
| 1222 | #endif | 1315 | #endif |
| 1223 | 1316 | ||
| 1224 | if (!options) | 1317 | if (!options) |
| @@ -1229,19 +1322,31 @@ static int parse_options(char *options, struct super_block *sb, | |||
| 1229 | if (!*p) | 1322 | if (!*p) |
| 1230 | continue; | 1323 | continue; |
| 1231 | 1324 | ||
| 1325 | /* | ||
| 1326 | * Initialize args struct so we know whether arg was | ||
| 1327 | * found; some options take optional arguments. | ||
| 1328 | */ | ||
| 1329 | args[0].to = args[0].from = 0; | ||
| 1232 | token = match_token(p, tokens, args); | 1330 | token = match_token(p, tokens, args); |
| 1233 | switch (token) { | 1331 | switch (token) { |
| 1234 | case Opt_bsd_df: | 1332 | case Opt_bsd_df: |
| 1333 | ext4_msg(sb, KERN_WARNING, deprecated_msg, p, "2.6.38"); | ||
| 1235 | clear_opt(sbi->s_mount_opt, MINIX_DF); | 1334 | clear_opt(sbi->s_mount_opt, MINIX_DF); |
| 1236 | break; | 1335 | break; |
| 1237 | case Opt_minix_df: | 1336 | case Opt_minix_df: |
| 1337 | ext4_msg(sb, KERN_WARNING, deprecated_msg, p, "2.6.38"); | ||
| 1238 | set_opt(sbi->s_mount_opt, MINIX_DF); | 1338 | set_opt(sbi->s_mount_opt, MINIX_DF); |
| 1339 | |||
| 1239 | break; | 1340 | break; |
| 1240 | case Opt_grpid: | 1341 | case Opt_grpid: |
| 1342 | ext4_msg(sb, KERN_WARNING, deprecated_msg, p, "2.6.38"); | ||
| 1241 | set_opt(sbi->s_mount_opt, GRPID); | 1343 | set_opt(sbi->s_mount_opt, GRPID); |
| 1344 | |||
| 1242 | break; | 1345 | break; |
| 1243 | case Opt_nogrpid: | 1346 | case Opt_nogrpid: |
| 1347 | ext4_msg(sb, KERN_WARNING, deprecated_msg, p, "2.6.38"); | ||
| 1244 | clear_opt(sbi->s_mount_opt, GRPID); | 1348 | clear_opt(sbi->s_mount_opt, GRPID); |
| 1349 | |||
| 1245 | break; | 1350 | break; |
| 1246 | case Opt_resuid: | 1351 | case Opt_resuid: |
| 1247 | if (match_int(&args[0], &option)) | 1352 | if (match_int(&args[0], &option)) |
| @@ -1378,14 +1483,13 @@ static int parse_options(char *options, struct super_block *sb, | |||
| 1378 | data_opt = EXT4_MOUNT_WRITEBACK_DATA; | 1483 | data_opt = EXT4_MOUNT_WRITEBACK_DATA; |
| 1379 | datacheck: | 1484 | datacheck: |
| 1380 | if (is_remount) { | 1485 | if (is_remount) { |
| 1381 | if ((sbi->s_mount_opt & EXT4_MOUNT_DATA_FLAGS) | 1486 | if (test_opt(sb, DATA_FLAGS) != data_opt) { |
| 1382 | != data_opt) { | ||
| 1383 | ext4_msg(sb, KERN_ERR, | 1487 | ext4_msg(sb, KERN_ERR, |
| 1384 | "Cannot change data mode on remount"); | 1488 | "Cannot change data mode on remount"); |
| 1385 | return 0; | 1489 | return 0; |
| 1386 | } | 1490 | } |
| 1387 | } else { | 1491 | } else { |
| 1388 | sbi->s_mount_opt &= ~EXT4_MOUNT_DATA_FLAGS; | 1492 | clear_opt(sbi->s_mount_opt, DATA_FLAGS); |
| 1389 | sbi->s_mount_opt |= data_opt; | 1493 | sbi->s_mount_opt |= data_opt; |
| 1390 | } | 1494 | } |
| 1391 | break; | 1495 | break; |
| @@ -1397,63 +1501,22 @@ static int parse_options(char *options, struct super_block *sb, | |||
| 1397 | break; | 1501 | break; |
| 1398 | #ifdef CONFIG_QUOTA | 1502 | #ifdef CONFIG_QUOTA |
| 1399 | case Opt_usrjquota: | 1503 | case Opt_usrjquota: |
| 1400 | qtype = USRQUOTA; | 1504 | if (!set_qf_name(sb, USRQUOTA, &args[0])) |
| 1401 | goto set_qf_name; | ||
| 1402 | case Opt_grpjquota: | ||
| 1403 | qtype = GRPQUOTA; | ||
| 1404 | set_qf_name: | ||
| 1405 | if (sb_any_quota_loaded(sb) && | ||
| 1406 | !sbi->s_qf_names[qtype]) { | ||
| 1407 | ext4_msg(sb, KERN_ERR, | ||
| 1408 | "Cannot change journaled " | ||
| 1409 | "quota options when quota turned on"); | ||
| 1410 | return 0; | 1505 | return 0; |
| 1411 | } | 1506 | break; |
| 1412 | qname = match_strdup(&args[0]); | 1507 | case Opt_grpjquota: |
| 1413 | if (!qname) { | 1508 | if (!set_qf_name(sb, GRPQUOTA, &args[0])) |
| 1414 | ext4_msg(sb, KERN_ERR, | ||
| 1415 | "Not enough memory for " | ||
| 1416 | "storing quotafile name"); | ||
| 1417 | return 0; | ||
| 1418 | } | ||
| 1419 | if (sbi->s_qf_names[qtype] && | ||
| 1420 | strcmp(sbi->s_qf_names[qtype], qname)) { | ||
| 1421 | ext4_msg(sb, KERN_ERR, | ||
| 1422 | "%s quota file already " | ||
| 1423 | "specified", QTYPE2NAME(qtype)); | ||
| 1424 | kfree(qname); | ||
| 1425 | return 0; | ||
| 1426 | } | ||
| 1427 | sbi->s_qf_names[qtype] = qname; | ||
| 1428 | if (strchr(sbi->s_qf_names[qtype], '/')) { | ||
| 1429 | ext4_msg(sb, KERN_ERR, | ||
| 1430 | "quotafile must be on " | ||
| 1431 | "filesystem root"); | ||
| 1432 | kfree(sbi->s_qf_names[qtype]); | ||
| 1433 | sbi->s_qf_names[qtype] = NULL; | ||
| 1434 | return 0; | 1509 | return 0; |
| 1435 | } | ||
| 1436 | set_opt(sbi->s_mount_opt, QUOTA); | ||
| 1437 | break; | 1510 | break; |
| 1438 | case Opt_offusrjquota: | 1511 | case Opt_offusrjquota: |
| 1439 | qtype = USRQUOTA; | 1512 | if (!clear_qf_name(sb, USRQUOTA)) |
| 1440 | goto clear_qf_name; | 1513 | return 0; |
| 1514 | break; | ||
| 1441 | case Opt_offgrpjquota: | 1515 | case Opt_offgrpjquota: |
| 1442 | qtype = GRPQUOTA; | 1516 | if (!clear_qf_name(sb, GRPQUOTA)) |
| 1443 | clear_qf_name: | ||
| 1444 | if (sb_any_quota_loaded(sb) && | ||
| 1445 | sbi->s_qf_names[qtype]) { | ||
| 1446 | ext4_msg(sb, KERN_ERR, "Cannot change " | ||
| 1447 | "journaled quota options when " | ||
| 1448 | "quota turned on"); | ||
| 1449 | return 0; | 1517 | return 0; |
| 1450 | } | ||
| 1451 | /* | ||
| 1452 | * The space will be released later when all options | ||
| 1453 | * are confirmed to be correct | ||
| 1454 | */ | ||
| 1455 | sbi->s_qf_names[qtype] = NULL; | ||
| 1456 | break; | 1518 | break; |
| 1519 | |||
| 1457 | case Opt_jqfmt_vfsold: | 1520 | case Opt_jqfmt_vfsold: |
| 1458 | qfmt = QFMT_VFS_OLD; | 1521 | qfmt = QFMT_VFS_OLD; |
| 1459 | goto set_qf_format; | 1522 | goto set_qf_format; |
| @@ -1518,10 +1581,11 @@ set_qf_format: | |||
| 1518 | clear_opt(sbi->s_mount_opt, BARRIER); | 1581 | clear_opt(sbi->s_mount_opt, BARRIER); |
| 1519 | break; | 1582 | break; |
| 1520 | case Opt_barrier: | 1583 | case Opt_barrier: |
| 1521 | if (match_int(&args[0], &option)) { | 1584 | if (args[0].from) { |
| 1522 | set_opt(sbi->s_mount_opt, BARRIER); | 1585 | if (match_int(&args[0], &option)) |
| 1523 | break; | 1586 | return 0; |
| 1524 | } | 1587 | } else |
| 1588 | option = 1; /* No argument, default to 1 */ | ||
| 1525 | if (option) | 1589 | if (option) |
| 1526 | set_opt(sbi->s_mount_opt, BARRIER); | 1590 | set_opt(sbi->s_mount_opt, BARRIER); |
| 1527 | else | 1591 | else |
| @@ -1594,10 +1658,11 @@ set_qf_format: | |||
| 1594 | set_opt(sbi->s_mount_opt,NO_AUTO_DA_ALLOC); | 1658 | set_opt(sbi->s_mount_opt,NO_AUTO_DA_ALLOC); |
| 1595 | break; | 1659 | break; |
| 1596 | case Opt_auto_da_alloc: | 1660 | case Opt_auto_da_alloc: |
| 1597 | if (match_int(&args[0], &option)) { | 1661 | if (args[0].from) { |
| 1598 | clear_opt(sbi->s_mount_opt, NO_AUTO_DA_ALLOC); | 1662 | if (match_int(&args[0], &option)) |
| 1599 | break; | 1663 | return 0; |
| 1600 | } | 1664 | } else |
| 1665 | option = 1; /* No argument, default to 1 */ | ||
| 1601 | if (option) | 1666 | if (option) |
| 1602 | clear_opt(sbi->s_mount_opt, NO_AUTO_DA_ALLOC); | 1667 | clear_opt(sbi->s_mount_opt, NO_AUTO_DA_ALLOC); |
| 1603 | else | 1668 | else |
| @@ -1609,6 +1674,12 @@ set_qf_format: | |||
| 1609 | case Opt_nodiscard: | 1674 | case Opt_nodiscard: |
| 1610 | clear_opt(sbi->s_mount_opt, DISCARD); | 1675 | clear_opt(sbi->s_mount_opt, DISCARD); |
| 1611 | break; | 1676 | break; |
| 1677 | case Opt_dioread_nolock: | ||
| 1678 | set_opt(sbi->s_mount_opt, DIOREAD_NOLOCK); | ||
| 1679 | break; | ||
| 1680 | case Opt_dioread_lock: | ||
| 1681 | clear_opt(sbi->s_mount_opt, DIOREAD_NOLOCK); | ||
| 1682 | break; | ||
| 1612 | default: | 1683 | default: |
| 1613 | ext4_msg(sb, KERN_ERR, | 1684 | ext4_msg(sb, KERN_ERR, |
| 1614 | "Unrecognized mount option \"%s\" " | 1685 | "Unrecognized mount option \"%s\" " |
| @@ -1618,18 +1689,13 @@ set_qf_format: | |||
| 1618 | } | 1689 | } |
| 1619 | #ifdef CONFIG_QUOTA | 1690 | #ifdef CONFIG_QUOTA |
| 1620 | if (sbi->s_qf_names[USRQUOTA] || sbi->s_qf_names[GRPQUOTA]) { | 1691 | if (sbi->s_qf_names[USRQUOTA] || sbi->s_qf_names[GRPQUOTA]) { |
| 1621 | if ((sbi->s_mount_opt & EXT4_MOUNT_USRQUOTA) && | 1692 | if (test_opt(sb, USRQUOTA) && sbi->s_qf_names[USRQUOTA]) |
| 1622 | sbi->s_qf_names[USRQUOTA]) | ||
| 1623 | clear_opt(sbi->s_mount_opt, USRQUOTA); | 1693 | clear_opt(sbi->s_mount_opt, USRQUOTA); |
| 1624 | 1694 | ||
| 1625 | if ((sbi->s_mount_opt & EXT4_MOUNT_GRPQUOTA) && | 1695 | if (test_opt(sb, GRPQUOTA) && sbi->s_qf_names[GRPQUOTA]) |
| 1626 | sbi->s_qf_names[GRPQUOTA]) | ||
| 1627 | clear_opt(sbi->s_mount_opt, GRPQUOTA); | 1696 | clear_opt(sbi->s_mount_opt, GRPQUOTA); |
| 1628 | 1697 | ||
| 1629 | if ((sbi->s_qf_names[USRQUOTA] && | 1698 | if (test_opt(sb, GRPQUOTA) || test_opt(sb, USRQUOTA)) { |
| 1630 | (sbi->s_mount_opt & EXT4_MOUNT_GRPQUOTA)) || | ||
| 1631 | (sbi->s_qf_names[GRPQUOTA] && | ||
| 1632 | (sbi->s_mount_opt & EXT4_MOUNT_USRQUOTA))) { | ||
| 1633 | ext4_msg(sb, KERN_ERR, "old and new quota " | 1699 | ext4_msg(sb, KERN_ERR, "old and new quota " |
| 1634 | "format mixing"); | 1700 | "format mixing"); |
| 1635 | return 0; | 1701 | return 0; |
| @@ -1939,7 +2005,7 @@ static void ext4_orphan_cleanup(struct super_block *sb, | |||
| 1939 | } | 2005 | } |
| 1940 | 2006 | ||
| 1941 | list_add(&EXT4_I(inode)->i_orphan, &EXT4_SB(sb)->s_orphan); | 2007 | list_add(&EXT4_I(inode)->i_orphan, &EXT4_SB(sb)->s_orphan); |
| 1942 | vfs_dq_init(inode); | 2008 | dquot_initialize(inode); |
| 1943 | if (inode->i_nlink) { | 2009 | if (inode->i_nlink) { |
| 1944 | ext4_msg(sb, KERN_DEBUG, | 2010 | ext4_msg(sb, KERN_DEBUG, |
| 1945 | "%s: truncating inode %lu to %lld bytes", | 2011 | "%s: truncating inode %lu to %lld bytes", |
| @@ -2432,8 +2498,11 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent) | |||
| 2432 | def_mount_opts = le32_to_cpu(es->s_default_mount_opts); | 2498 | def_mount_opts = le32_to_cpu(es->s_default_mount_opts); |
| 2433 | if (def_mount_opts & EXT4_DEFM_DEBUG) | 2499 | if (def_mount_opts & EXT4_DEFM_DEBUG) |
| 2434 | set_opt(sbi->s_mount_opt, DEBUG); | 2500 | set_opt(sbi->s_mount_opt, DEBUG); |
| 2435 | if (def_mount_opts & EXT4_DEFM_BSDGROUPS) | 2501 | if (def_mount_opts & EXT4_DEFM_BSDGROUPS) { |
| 2502 | ext4_msg(sb, KERN_WARNING, deprecated_msg, "bsdgroups", | ||
| 2503 | "2.6.38"); | ||
| 2436 | set_opt(sbi->s_mount_opt, GRPID); | 2504 | set_opt(sbi->s_mount_opt, GRPID); |
| 2505 | } | ||
| 2437 | if (def_mount_opts & EXT4_DEFM_UID16) | 2506 | if (def_mount_opts & EXT4_DEFM_UID16) |
| 2438 | set_opt(sbi->s_mount_opt, NO_UID32); | 2507 | set_opt(sbi->s_mount_opt, NO_UID32); |
| 2439 | #ifdef CONFIG_EXT4_FS_XATTR | 2508 | #ifdef CONFIG_EXT4_FS_XATTR |
| @@ -2445,11 +2514,11 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent) | |||
| 2445 | set_opt(sbi->s_mount_opt, POSIX_ACL); | 2514 | set_opt(sbi->s_mount_opt, POSIX_ACL); |
| 2446 | #endif | 2515 | #endif |
| 2447 | if ((def_mount_opts & EXT4_DEFM_JMODE) == EXT4_DEFM_JMODE_DATA) | 2516 | if ((def_mount_opts & EXT4_DEFM_JMODE) == EXT4_DEFM_JMODE_DATA) |
| 2448 | sbi->s_mount_opt |= EXT4_MOUNT_JOURNAL_DATA; | 2517 | set_opt(sbi->s_mount_opt, JOURNAL_DATA); |
| 2449 | else if ((def_mount_opts & EXT4_DEFM_JMODE) == EXT4_DEFM_JMODE_ORDERED) | 2518 | else if ((def_mount_opts & EXT4_DEFM_JMODE) == EXT4_DEFM_JMODE_ORDERED) |
| 2450 | sbi->s_mount_opt |= EXT4_MOUNT_ORDERED_DATA; | 2519 | set_opt(sbi->s_mount_opt, ORDERED_DATA); |
| 2451 | else if ((def_mount_opts & EXT4_DEFM_JMODE) == EXT4_DEFM_JMODE_WBACK) | 2520 | else if ((def_mount_opts & EXT4_DEFM_JMODE) == EXT4_DEFM_JMODE_WBACK) |
| 2452 | sbi->s_mount_opt |= EXT4_MOUNT_WRITEBACK_DATA; | 2521 | set_opt(sbi->s_mount_opt, WRITEBACK_DATA); |
| 2453 | 2522 | ||
| 2454 | if (le16_to_cpu(sbi->s_es->s_errors) == EXT4_ERRORS_PANIC) | 2523 | if (le16_to_cpu(sbi->s_es->s_errors) == EXT4_ERRORS_PANIC) |
| 2455 | set_opt(sbi->s_mount_opt, ERRORS_PANIC); | 2524 | set_opt(sbi->s_mount_opt, ERRORS_PANIC); |
| @@ -2477,7 +2546,7 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent) | |||
| 2477 | goto failed_mount; | 2546 | goto failed_mount; |
| 2478 | 2547 | ||
| 2479 | sb->s_flags = (sb->s_flags & ~MS_POSIXACL) | | 2548 | sb->s_flags = (sb->s_flags & ~MS_POSIXACL) | |
| 2480 | ((sbi->s_mount_opt & EXT4_MOUNT_POSIX_ACL) ? MS_POSIXACL : 0); | 2549 | (test_opt(sb, POSIX_ACL) ? MS_POSIXACL : 0); |
| 2481 | 2550 | ||
| 2482 | if (le32_to_cpu(es->s_rev_level) == EXT4_GOOD_OLD_REV && | 2551 | if (le32_to_cpu(es->s_rev_level) == EXT4_GOOD_OLD_REV && |
| 2483 | (EXT4_HAS_COMPAT_FEATURE(sb, ~0U) || | 2552 | (EXT4_HAS_COMPAT_FEATURE(sb, ~0U) || |
| @@ -2766,7 +2835,7 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent) | |||
| 2766 | EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_RECOVER)) { | 2835 | EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_RECOVER)) { |
| 2767 | ext4_msg(sb, KERN_ERR, "required journal recovery " | 2836 | ext4_msg(sb, KERN_ERR, "required journal recovery " |
| 2768 | "suppressed and not mounted read-only"); | 2837 | "suppressed and not mounted read-only"); |
| 2769 | goto failed_mount4; | 2838 | goto failed_mount_wq; |
| 2770 | } else { | 2839 | } else { |
| 2771 | clear_opt(sbi->s_mount_opt, DATA_FLAGS); | 2840 | clear_opt(sbi->s_mount_opt, DATA_FLAGS); |
| 2772 | set_opt(sbi->s_mount_opt, WRITEBACK_DATA); | 2841 | set_opt(sbi->s_mount_opt, WRITEBACK_DATA); |
| @@ -2779,7 +2848,7 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent) | |||
| 2779 | !jbd2_journal_set_features(EXT4_SB(sb)->s_journal, 0, 0, | 2848 | !jbd2_journal_set_features(EXT4_SB(sb)->s_journal, 0, 0, |
| 2780 | JBD2_FEATURE_INCOMPAT_64BIT)) { | 2849 | JBD2_FEATURE_INCOMPAT_64BIT)) { |
| 2781 | ext4_msg(sb, KERN_ERR, "Failed to set 64-bit journal feature"); | 2850 | ext4_msg(sb, KERN_ERR, "Failed to set 64-bit journal feature"); |
| 2782 | goto failed_mount4; | 2851 | goto failed_mount_wq; |
| 2783 | } | 2852 | } |
| 2784 | 2853 | ||
| 2785 | if (test_opt(sb, JOURNAL_ASYNC_COMMIT)) { | 2854 | if (test_opt(sb, JOURNAL_ASYNC_COMMIT)) { |
| @@ -2818,7 +2887,7 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent) | |||
| 2818 | (sbi->s_journal, 0, 0, JBD2_FEATURE_INCOMPAT_REVOKE)) { | 2887 | (sbi->s_journal, 0, 0, JBD2_FEATURE_INCOMPAT_REVOKE)) { |
| 2819 | ext4_msg(sb, KERN_ERR, "Journal does not support " | 2888 | ext4_msg(sb, KERN_ERR, "Journal does not support " |
| 2820 | "requested data journaling mode"); | 2889 | "requested data journaling mode"); |
| 2821 | goto failed_mount4; | 2890 | goto failed_mount_wq; |
| 2822 | } | 2891 | } |
| 2823 | default: | 2892 | default: |
| 2824 | break; | 2893 | break; |
| @@ -2826,13 +2895,17 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent) | |||
| 2826 | set_task_ioprio(sbi->s_journal->j_task, journal_ioprio); | 2895 | set_task_ioprio(sbi->s_journal->j_task, journal_ioprio); |
| 2827 | 2896 | ||
| 2828 | no_journal: | 2897 | no_journal: |
| 2829 | |||
| 2830 | if (test_opt(sb, NOBH)) { | 2898 | if (test_opt(sb, NOBH)) { |
| 2831 | if (!(test_opt(sb, DATA_FLAGS) == EXT4_MOUNT_WRITEBACK_DATA)) { | 2899 | if (!(test_opt(sb, DATA_FLAGS) == EXT4_MOUNT_WRITEBACK_DATA)) { |
| 2832 | ext4_msg(sb, KERN_WARNING, "Ignoring nobh option - " | 2900 | ext4_msg(sb, KERN_WARNING, "Ignoring nobh option - " |
| 2833 | "its supported only with writeback mode"); | 2901 | "its supported only with writeback mode"); |
| 2834 | clear_opt(sbi->s_mount_opt, NOBH); | 2902 | clear_opt(sbi->s_mount_opt, NOBH); |
| 2835 | } | 2903 | } |
| 2904 | if (test_opt(sb, DIOREAD_NOLOCK)) { | ||
| 2905 | ext4_msg(sb, KERN_WARNING, "dioread_nolock option is " | ||
| 2906 | "not supported with nobh mode"); | ||
| 2907 | goto failed_mount_wq; | ||
| 2908 | } | ||
| 2836 | } | 2909 | } |
| 2837 | EXT4_SB(sb)->dio_unwritten_wq = create_workqueue("ext4-dio-unwritten"); | 2910 | EXT4_SB(sb)->dio_unwritten_wq = create_workqueue("ext4-dio-unwritten"); |
| 2838 | if (!EXT4_SB(sb)->dio_unwritten_wq) { | 2911 | if (!EXT4_SB(sb)->dio_unwritten_wq) { |
| @@ -2897,6 +2970,18 @@ no_journal: | |||
| 2897 | "requested data journaling mode"); | 2970 | "requested data journaling mode"); |
| 2898 | clear_opt(sbi->s_mount_opt, DELALLOC); | 2971 | clear_opt(sbi->s_mount_opt, DELALLOC); |
| 2899 | } | 2972 | } |
| 2973 | if (test_opt(sb, DIOREAD_NOLOCK)) { | ||
| 2974 | if (test_opt(sb, DATA_FLAGS) == EXT4_MOUNT_JOURNAL_DATA) { | ||
| 2975 | ext4_msg(sb, KERN_WARNING, "Ignoring dioread_nolock " | ||
| 2976 | "option - requested data journaling mode"); | ||
| 2977 | clear_opt(sbi->s_mount_opt, DIOREAD_NOLOCK); | ||
| 2978 | } | ||
| 2979 | if (sb->s_blocksize < PAGE_SIZE) { | ||
| 2980 | ext4_msg(sb, KERN_WARNING, "Ignoring dioread_nolock " | ||
| 2981 | "option - block size is too small"); | ||
| 2982 | clear_opt(sbi->s_mount_opt, DIOREAD_NOLOCK); | ||
| 2983 | } | ||
| 2984 | } | ||
| 2900 | 2985 | ||
| 2901 | err = ext4_setup_system_zone(sb); | 2986 | err = ext4_setup_system_zone(sb); |
| 2902 | if (err) { | 2987 | if (err) { |
| @@ -3360,10 +3445,9 @@ static void ext4_clear_journal_err(struct super_block *sb, | |||
| 3360 | char nbuf[16]; | 3445 | char nbuf[16]; |
| 3361 | 3446 | ||
| 3362 | errstr = ext4_decode_error(sb, j_errno, nbuf); | 3447 | errstr = ext4_decode_error(sb, j_errno, nbuf); |
| 3363 | ext4_warning(sb, __func__, "Filesystem error recorded " | 3448 | ext4_warning(sb, "Filesystem error recorded " |
| 3364 | "from previous mount: %s", errstr); | 3449 | "from previous mount: %s", errstr); |
| 3365 | ext4_warning(sb, __func__, "Marking fs in need of " | 3450 | ext4_warning(sb, "Marking fs in need of filesystem check."); |
| 3366 | "filesystem check."); | ||
| 3367 | 3451 | ||
| 3368 | EXT4_SB(sb)->s_mount_state |= EXT4_ERROR_FS; | 3452 | EXT4_SB(sb)->s_mount_state |= EXT4_ERROR_FS; |
| 3369 | es->s_state |= cpu_to_le16(EXT4_ERROR_FS); | 3453 | es->s_state |= cpu_to_le16(EXT4_ERROR_FS); |
| @@ -3514,7 +3598,7 @@ static int ext4_remount(struct super_block *sb, int *flags, char *data) | |||
| 3514 | ext4_abort(sb, __func__, "Abort forced by user"); | 3598 | ext4_abort(sb, __func__, "Abort forced by user"); |
| 3515 | 3599 | ||
| 3516 | sb->s_flags = (sb->s_flags & ~MS_POSIXACL) | | 3600 | sb->s_flags = (sb->s_flags & ~MS_POSIXACL) | |
| 3517 | ((sbi->s_mount_opt & EXT4_MOUNT_POSIX_ACL) ? MS_POSIXACL : 0); | 3601 | (test_opt(sb, POSIX_ACL) ? MS_POSIXACL : 0); |
| 3518 | 3602 | ||
| 3519 | es = sbi->s_es; | 3603 | es = sbi->s_es; |
| 3520 | 3604 | ||
| @@ -3708,7 +3792,7 @@ static int ext4_statfs(struct dentry *dentry, struct kstatfs *buf) | |||
| 3708 | * Process 1 Process 2 | 3792 | * Process 1 Process 2 |
| 3709 | * ext4_create() quota_sync() | 3793 | * ext4_create() quota_sync() |
| 3710 | * jbd2_journal_start() write_dquot() | 3794 | * jbd2_journal_start() write_dquot() |
| 3711 | * vfs_dq_init() down(dqio_mutex) | 3795 | * dquot_initialize() down(dqio_mutex) |
| 3712 | * down(dqio_mutex) jbd2_journal_start() | 3796 | * down(dqio_mutex) jbd2_journal_start() |
| 3713 | * | 3797 | * |
| 3714 | */ | 3798 | */ |
| @@ -3917,9 +4001,7 @@ static ssize_t ext4_quota_write(struct super_block *sb, int type, | |||
| 3917 | ext4_lblk_t blk = off >> EXT4_BLOCK_SIZE_BITS(sb); | 4001 | ext4_lblk_t blk = off >> EXT4_BLOCK_SIZE_BITS(sb); |
| 3918 | int err = 0; | 4002 | int err = 0; |
| 3919 | int offset = off & (sb->s_blocksize - 1); | 4003 | int offset = off & (sb->s_blocksize - 1); |
| 3920 | int tocopy; | ||
| 3921 | int journal_quota = EXT4_SB(sb)->s_qf_names[type] != NULL; | 4004 | int journal_quota = EXT4_SB(sb)->s_qf_names[type] != NULL; |
| 3922 | size_t towrite = len; | ||
| 3923 | struct buffer_head *bh; | 4005 | struct buffer_head *bh; |
| 3924 | handle_t *handle = journal_current_handle(); | 4006 | handle_t *handle = journal_current_handle(); |
| 3925 | 4007 | ||
| @@ -3929,52 +4011,53 @@ static ssize_t ext4_quota_write(struct super_block *sb, int type, | |||
| 3929 | (unsigned long long)off, (unsigned long long)len); | 4011 | (unsigned long long)off, (unsigned long long)len); |
| 3930 | return -EIO; | 4012 | return -EIO; |
| 3931 | } | 4013 | } |
| 4014 | /* | ||
| 4015 | * Since we account only one data block in transaction credits, | ||
| 4016 | * then it is impossible to cross a block boundary. | ||
| 4017 | */ | ||
| 4018 | if (sb->s_blocksize - offset < len) { | ||
| 4019 | ext4_msg(sb, KERN_WARNING, "Quota write (off=%llu, len=%llu)" | ||
| 4020 | " cancelled because not block aligned", | ||
| 4021 | (unsigned long long)off, (unsigned long long)len); | ||
| 4022 | return -EIO; | ||
| 4023 | } | ||
| 4024 | |||
| 3932 | mutex_lock_nested(&inode->i_mutex, I_MUTEX_QUOTA); | 4025 | mutex_lock_nested(&inode->i_mutex, I_MUTEX_QUOTA); |
| 3933 | while (towrite > 0) { | 4026 | bh = ext4_bread(handle, inode, blk, 1, &err); |
| 3934 | tocopy = sb->s_blocksize - offset < towrite ? | 4027 | if (!bh) |
| 3935 | sb->s_blocksize - offset : towrite; | 4028 | goto out; |
| 3936 | bh = ext4_bread(handle, inode, blk, 1, &err); | 4029 | if (journal_quota) { |
| 3937 | if (!bh) | 4030 | err = ext4_journal_get_write_access(handle, bh); |
| 4031 | if (err) { | ||
| 4032 | brelse(bh); | ||
| 3938 | goto out; | 4033 | goto out; |
| 3939 | if (journal_quota) { | ||
| 3940 | err = ext4_journal_get_write_access(handle, bh); | ||
| 3941 | if (err) { | ||
| 3942 | brelse(bh); | ||
| 3943 | goto out; | ||
| 3944 | } | ||
| 3945 | } | 4034 | } |
| 3946 | lock_buffer(bh); | ||
| 3947 | memcpy(bh->b_data+offset, data, tocopy); | ||
| 3948 | flush_dcache_page(bh->b_page); | ||
| 3949 | unlock_buffer(bh); | ||
| 3950 | if (journal_quota) | ||
| 3951 | err = ext4_handle_dirty_metadata(handle, NULL, bh); | ||
| 3952 | else { | ||
| 3953 | /* Always do at least ordered writes for quotas */ | ||
| 3954 | err = ext4_jbd2_file_inode(handle, inode); | ||
| 3955 | mark_buffer_dirty(bh); | ||
| 3956 | } | ||
| 3957 | brelse(bh); | ||
| 3958 | if (err) | ||
| 3959 | goto out; | ||
| 3960 | offset = 0; | ||
| 3961 | towrite -= tocopy; | ||
| 3962 | data += tocopy; | ||
| 3963 | blk++; | ||
| 3964 | } | 4035 | } |
| 4036 | lock_buffer(bh); | ||
| 4037 | memcpy(bh->b_data+offset, data, len); | ||
| 4038 | flush_dcache_page(bh->b_page); | ||
| 4039 | unlock_buffer(bh); | ||
| 4040 | if (journal_quota) | ||
| 4041 | err = ext4_handle_dirty_metadata(handle, NULL, bh); | ||
| 4042 | else { | ||
| 4043 | /* Always do at least ordered writes for quotas */ | ||
| 4044 | err = ext4_jbd2_file_inode(handle, inode); | ||
| 4045 | mark_buffer_dirty(bh); | ||
| 4046 | } | ||
| 4047 | brelse(bh); | ||
| 3965 | out: | 4048 | out: |
| 3966 | if (len == towrite) { | 4049 | if (err) { |
| 3967 | mutex_unlock(&inode->i_mutex); | 4050 | mutex_unlock(&inode->i_mutex); |
| 3968 | return err; | 4051 | return err; |
| 3969 | } | 4052 | } |
| 3970 | if (inode->i_size < off+len-towrite) { | 4053 | if (inode->i_size < off + len) { |
| 3971 | i_size_write(inode, off+len-towrite); | 4054 | i_size_write(inode, off + len); |
| 3972 | EXT4_I(inode)->i_disksize = inode->i_size; | 4055 | EXT4_I(inode)->i_disksize = inode->i_size; |
| 3973 | } | 4056 | } |
| 3974 | inode->i_mtime = inode->i_ctime = CURRENT_TIME; | 4057 | inode->i_mtime = inode->i_ctime = CURRENT_TIME; |
| 3975 | ext4_mark_inode_dirty(handle, inode); | 4058 | ext4_mark_inode_dirty(handle, inode); |
| 3976 | mutex_unlock(&inode->i_mutex); | 4059 | mutex_unlock(&inode->i_mutex); |
| 3977 | return len - towrite; | 4060 | return len; |
| 3978 | } | 4061 | } |
| 3979 | 4062 | ||
| 3980 | #endif | 4063 | #endif |
diff --git a/fs/ext4/xattr.c b/fs/ext4/xattr.c index f3a2f7ed45aa..b4c5aa8489d8 100644 --- a/fs/ext4/xattr.c +++ b/fs/ext4/xattr.c | |||
| @@ -227,7 +227,8 @@ ext4_xattr_block_get(struct inode *inode, int name_index, const char *name, | |||
| 227 | ea_bdebug(bh, "b_count=%d, refcount=%d", | 227 | ea_bdebug(bh, "b_count=%d, refcount=%d", |
| 228 | atomic_read(&(bh->b_count)), le32_to_cpu(BHDR(bh)->h_refcount)); | 228 | atomic_read(&(bh->b_count)), le32_to_cpu(BHDR(bh)->h_refcount)); |
| 229 | if (ext4_xattr_check_block(bh)) { | 229 | if (ext4_xattr_check_block(bh)) { |
| 230 | bad_block: ext4_error(inode->i_sb, __func__, | 230 | bad_block: |
| 231 | ext4_error(inode->i_sb, | ||
| 231 | "inode %lu: bad block %llu", inode->i_ino, | 232 | "inode %lu: bad block %llu", inode->i_ino, |
| 232 | EXT4_I(inode)->i_file_acl); | 233 | EXT4_I(inode)->i_file_acl); |
| 233 | error = -EIO; | 234 | error = -EIO; |
| @@ -267,7 +268,7 @@ ext4_xattr_ibody_get(struct inode *inode, int name_index, const char *name, | |||
| 267 | void *end; | 268 | void *end; |
| 268 | int error; | 269 | int error; |
| 269 | 270 | ||
| 270 | if (!(EXT4_I(inode)->i_state & EXT4_STATE_XATTR)) | 271 | if (!ext4_test_inode_state(inode, EXT4_STATE_XATTR)) |
| 271 | return -ENODATA; | 272 | return -ENODATA; |
| 272 | error = ext4_get_inode_loc(inode, &iloc); | 273 | error = ext4_get_inode_loc(inode, &iloc); |
| 273 | if (error) | 274 | if (error) |
| @@ -371,7 +372,7 @@ ext4_xattr_block_list(struct dentry *dentry, char *buffer, size_t buffer_size) | |||
| 371 | ea_bdebug(bh, "b_count=%d, refcount=%d", | 372 | ea_bdebug(bh, "b_count=%d, refcount=%d", |
| 372 | atomic_read(&(bh->b_count)), le32_to_cpu(BHDR(bh)->h_refcount)); | 373 | atomic_read(&(bh->b_count)), le32_to_cpu(BHDR(bh)->h_refcount)); |
| 373 | if (ext4_xattr_check_block(bh)) { | 374 | if (ext4_xattr_check_block(bh)) { |
| 374 | ext4_error(inode->i_sb, __func__, | 375 | ext4_error(inode->i_sb, |
| 375 | "inode %lu: bad block %llu", inode->i_ino, | 376 | "inode %lu: bad block %llu", inode->i_ino, |
| 376 | EXT4_I(inode)->i_file_acl); | 377 | EXT4_I(inode)->i_file_acl); |
| 377 | error = -EIO; | 378 | error = -EIO; |
| @@ -396,7 +397,7 @@ ext4_xattr_ibody_list(struct dentry *dentry, char *buffer, size_t buffer_size) | |||
| 396 | void *end; | 397 | void *end; |
| 397 | int error; | 398 | int error; |
| 398 | 399 | ||
| 399 | if (!(EXT4_I(inode)->i_state & EXT4_STATE_XATTR)) | 400 | if (!ext4_test_inode_state(inode, EXT4_STATE_XATTR)) |
| 400 | return 0; | 401 | return 0; |
| 401 | error = ext4_get_inode_loc(inode, &iloc); | 402 | error = ext4_get_inode_loc(inode, &iloc); |
| 402 | if (error) | 403 | if (error) |
| @@ -494,7 +495,7 @@ ext4_xattr_release_block(handle_t *handle, struct inode *inode, | |||
| 494 | error = ext4_handle_dirty_metadata(handle, inode, bh); | 495 | error = ext4_handle_dirty_metadata(handle, inode, bh); |
| 495 | if (IS_SYNC(inode)) | 496 | if (IS_SYNC(inode)) |
| 496 | ext4_handle_sync(handle); | 497 | ext4_handle_sync(handle); |
| 497 | vfs_dq_free_block(inode, 1); | 498 | dquot_free_block(inode, 1); |
| 498 | ea_bdebug(bh, "refcount now=%d; releasing", | 499 | ea_bdebug(bh, "refcount now=%d; releasing", |
| 499 | le32_to_cpu(BHDR(bh)->h_refcount)); | 500 | le32_to_cpu(BHDR(bh)->h_refcount)); |
| 500 | if (ce) | 501 | if (ce) |
| @@ -665,9 +666,8 @@ ext4_xattr_block_find(struct inode *inode, struct ext4_xattr_info *i, | |||
| 665 | atomic_read(&(bs->bh->b_count)), | 666 | atomic_read(&(bs->bh->b_count)), |
| 666 | le32_to_cpu(BHDR(bs->bh)->h_refcount)); | 667 | le32_to_cpu(BHDR(bs->bh)->h_refcount)); |
| 667 | if (ext4_xattr_check_block(bs->bh)) { | 668 | if (ext4_xattr_check_block(bs->bh)) { |
| 668 | ext4_error(sb, __func__, | 669 | ext4_error(sb, "inode %lu: bad block %llu", |
| 669 | "inode %lu: bad block %llu", inode->i_ino, | 670 | inode->i_ino, EXT4_I(inode)->i_file_acl); |
| 670 | EXT4_I(inode)->i_file_acl); | ||
| 671 | error = -EIO; | 671 | error = -EIO; |
| 672 | goto cleanup; | 672 | goto cleanup; |
| 673 | } | 673 | } |
| @@ -787,8 +787,8 @@ inserted: | |||
| 787 | else { | 787 | else { |
| 788 | /* The old block is released after updating | 788 | /* The old block is released after updating |
| 789 | the inode. */ | 789 | the inode. */ |
| 790 | error = -EDQUOT; | 790 | error = dquot_alloc_block(inode, 1); |
| 791 | if (vfs_dq_alloc_block(inode, 1)) | 791 | if (error) |
| 792 | goto cleanup; | 792 | goto cleanup; |
| 793 | error = ext4_journal_get_write_access(handle, | 793 | error = ext4_journal_get_write_access(handle, |
| 794 | new_bh); | 794 | new_bh); |
| @@ -876,13 +876,12 @@ cleanup: | |||
| 876 | return error; | 876 | return error; |
| 877 | 877 | ||
| 878 | cleanup_dquot: | 878 | cleanup_dquot: |
| 879 | vfs_dq_free_block(inode, 1); | 879 | dquot_free_block(inode, 1); |
| 880 | goto cleanup; | 880 | goto cleanup; |
| 881 | 881 | ||
| 882 | bad_block: | 882 | bad_block: |
| 883 | ext4_error(inode->i_sb, __func__, | 883 | ext4_error(inode->i_sb, "inode %lu: bad block %llu", |
| 884 | "inode %lu: bad block %llu", inode->i_ino, | 884 | inode->i_ino, EXT4_I(inode)->i_file_acl); |
| 885 | EXT4_I(inode)->i_file_acl); | ||
| 886 | goto cleanup; | 885 | goto cleanup; |
| 887 | 886 | ||
| 888 | #undef header | 887 | #undef header |
| @@ -908,7 +907,7 @@ ext4_xattr_ibody_find(struct inode *inode, struct ext4_xattr_info *i, | |||
| 908 | is->s.base = is->s.first = IFIRST(header); | 907 | is->s.base = is->s.first = IFIRST(header); |
| 909 | is->s.here = is->s.first; | 908 | is->s.here = is->s.first; |
| 910 | is->s.end = (void *)raw_inode + EXT4_SB(inode->i_sb)->s_inode_size; | 909 | is->s.end = (void *)raw_inode + EXT4_SB(inode->i_sb)->s_inode_size; |
| 911 | if (EXT4_I(inode)->i_state & EXT4_STATE_XATTR) { | 910 | if (ext4_test_inode_state(inode, EXT4_STATE_XATTR)) { |
| 912 | error = ext4_xattr_check_names(IFIRST(header), is->s.end); | 911 | error = ext4_xattr_check_names(IFIRST(header), is->s.end); |
| 913 | if (error) | 912 | if (error) |
| 914 | return error; | 913 | return error; |
| @@ -940,10 +939,10 @@ ext4_xattr_ibody_set(handle_t *handle, struct inode *inode, | |||
| 940 | header = IHDR(inode, ext4_raw_inode(&is->iloc)); | 939 | header = IHDR(inode, ext4_raw_inode(&is->iloc)); |
| 941 | if (!IS_LAST_ENTRY(s->first)) { | 940 | if (!IS_LAST_ENTRY(s->first)) { |
| 942 | header->h_magic = cpu_to_le32(EXT4_XATTR_MAGIC); | 941 | header->h_magic = cpu_to_le32(EXT4_XATTR_MAGIC); |
| 943 | EXT4_I(inode)->i_state |= EXT4_STATE_XATTR; | 942 | ext4_set_inode_state(inode, EXT4_STATE_XATTR); |
| 944 | } else { | 943 | } else { |
| 945 | header->h_magic = cpu_to_le32(0); | 944 | header->h_magic = cpu_to_le32(0); |
| 946 | EXT4_I(inode)->i_state &= ~EXT4_STATE_XATTR; | 945 | ext4_clear_inode_state(inode, EXT4_STATE_XATTR); |
| 947 | } | 946 | } |
| 948 | return 0; | 947 | return 0; |
| 949 | } | 948 | } |
| @@ -986,8 +985,8 @@ ext4_xattr_set_handle(handle_t *handle, struct inode *inode, int name_index, | |||
| 986 | if (strlen(name) > 255) | 985 | if (strlen(name) > 255) |
| 987 | return -ERANGE; | 986 | return -ERANGE; |
| 988 | down_write(&EXT4_I(inode)->xattr_sem); | 987 | down_write(&EXT4_I(inode)->xattr_sem); |
| 989 | no_expand = EXT4_I(inode)->i_state & EXT4_STATE_NO_EXPAND; | 988 | no_expand = ext4_test_inode_state(inode, EXT4_STATE_NO_EXPAND); |
| 990 | EXT4_I(inode)->i_state |= EXT4_STATE_NO_EXPAND; | 989 | ext4_set_inode_state(inode, EXT4_STATE_NO_EXPAND); |
| 991 | 990 | ||
| 992 | error = ext4_get_inode_loc(inode, &is.iloc); | 991 | error = ext4_get_inode_loc(inode, &is.iloc); |
| 993 | if (error) | 992 | if (error) |
| @@ -997,10 +996,10 @@ ext4_xattr_set_handle(handle_t *handle, struct inode *inode, int name_index, | |||
| 997 | if (error) | 996 | if (error) |
| 998 | goto cleanup; | 997 | goto cleanup; |
| 999 | 998 | ||
| 1000 | if (EXT4_I(inode)->i_state & EXT4_STATE_NEW) { | 999 | if (ext4_test_inode_state(inode, EXT4_STATE_NEW)) { |
| 1001 | struct ext4_inode *raw_inode = ext4_raw_inode(&is.iloc); | 1000 | struct ext4_inode *raw_inode = ext4_raw_inode(&is.iloc); |
| 1002 | memset(raw_inode, 0, EXT4_SB(inode->i_sb)->s_inode_size); | 1001 | memset(raw_inode, 0, EXT4_SB(inode->i_sb)->s_inode_size); |
| 1003 | EXT4_I(inode)->i_state &= ~EXT4_STATE_NEW; | 1002 | ext4_clear_inode_state(inode, EXT4_STATE_NEW); |
| 1004 | } | 1003 | } |
| 1005 | 1004 | ||
| 1006 | error = ext4_xattr_ibody_find(inode, &i, &is); | 1005 | error = ext4_xattr_ibody_find(inode, &i, &is); |
| @@ -1052,7 +1051,7 @@ ext4_xattr_set_handle(handle_t *handle, struct inode *inode, int name_index, | |||
| 1052 | ext4_xattr_update_super_block(handle, inode->i_sb); | 1051 | ext4_xattr_update_super_block(handle, inode->i_sb); |
| 1053 | inode->i_ctime = ext4_current_time(inode); | 1052 | inode->i_ctime = ext4_current_time(inode); |
| 1054 | if (!value) | 1053 | if (!value) |
| 1055 | EXT4_I(inode)->i_state &= ~EXT4_STATE_NO_EXPAND; | 1054 | ext4_clear_inode_state(inode, EXT4_STATE_NO_EXPAND); |
| 1056 | error = ext4_mark_iloc_dirty(handle, inode, &is.iloc); | 1055 | error = ext4_mark_iloc_dirty(handle, inode, &is.iloc); |
| 1057 | /* | 1056 | /* |
| 1058 | * The bh is consumed by ext4_mark_iloc_dirty, even with | 1057 | * The bh is consumed by ext4_mark_iloc_dirty, even with |
| @@ -1067,7 +1066,7 @@ cleanup: | |||
| 1067 | brelse(is.iloc.bh); | 1066 | brelse(is.iloc.bh); |
| 1068 | brelse(bs.bh); | 1067 | brelse(bs.bh); |
| 1069 | if (no_expand == 0) | 1068 | if (no_expand == 0) |
| 1070 | EXT4_I(inode)->i_state &= ~EXT4_STATE_NO_EXPAND; | 1069 | ext4_clear_inode_state(inode, EXT4_STATE_NO_EXPAND); |
| 1071 | up_write(&EXT4_I(inode)->xattr_sem); | 1070 | up_write(&EXT4_I(inode)->xattr_sem); |
| 1072 | return error; | 1071 | return error; |
| 1073 | } | 1072 | } |
| @@ -1195,9 +1194,8 @@ retry: | |||
| 1195 | if (!bh) | 1194 | if (!bh) |
| 1196 | goto cleanup; | 1195 | goto cleanup; |
| 1197 | if (ext4_xattr_check_block(bh)) { | 1196 | if (ext4_xattr_check_block(bh)) { |
| 1198 | ext4_error(inode->i_sb, __func__, | 1197 | ext4_error(inode->i_sb, "inode %lu: bad block %llu", |
| 1199 | "inode %lu: bad block %llu", inode->i_ino, | 1198 | inode->i_ino, EXT4_I(inode)->i_file_acl); |
| 1200 | EXT4_I(inode)->i_file_acl); | ||
| 1201 | error = -EIO; | 1199 | error = -EIO; |
| 1202 | goto cleanup; | 1200 | goto cleanup; |
| 1203 | } | 1201 | } |
| @@ -1302,6 +1300,8 @@ retry: | |||
| 1302 | 1300 | ||
| 1303 | /* Remove the chosen entry from the inode */ | 1301 | /* Remove the chosen entry from the inode */ |
| 1304 | error = ext4_xattr_ibody_set(handle, inode, &i, is); | 1302 | error = ext4_xattr_ibody_set(handle, inode, &i, is); |
| 1303 | if (error) | ||
| 1304 | goto cleanup; | ||
| 1305 | 1305 | ||
| 1306 | entry = IFIRST(header); | 1306 | entry = IFIRST(header); |
| 1307 | if (entry_size + EXT4_XATTR_SIZE(size) >= new_extra_isize) | 1307 | if (entry_size + EXT4_XATTR_SIZE(size) >= new_extra_isize) |
| @@ -1372,16 +1372,14 @@ ext4_xattr_delete_inode(handle_t *handle, struct inode *inode) | |||
| 1372 | goto cleanup; | 1372 | goto cleanup; |
| 1373 | bh = sb_bread(inode->i_sb, EXT4_I(inode)->i_file_acl); | 1373 | bh = sb_bread(inode->i_sb, EXT4_I(inode)->i_file_acl); |
| 1374 | if (!bh) { | 1374 | if (!bh) { |
| 1375 | ext4_error(inode->i_sb, __func__, | 1375 | ext4_error(inode->i_sb, "inode %lu: block %llu read error", |
| 1376 | "inode %lu: block %llu read error", inode->i_ino, | 1376 | inode->i_ino, EXT4_I(inode)->i_file_acl); |
| 1377 | EXT4_I(inode)->i_file_acl); | ||
| 1378 | goto cleanup; | 1377 | goto cleanup; |
| 1379 | } | 1378 | } |
| 1380 | if (BHDR(bh)->h_magic != cpu_to_le32(EXT4_XATTR_MAGIC) || | 1379 | if (BHDR(bh)->h_magic != cpu_to_le32(EXT4_XATTR_MAGIC) || |
| 1381 | BHDR(bh)->h_blocks != cpu_to_le32(1)) { | 1380 | BHDR(bh)->h_blocks != cpu_to_le32(1)) { |
| 1382 | ext4_error(inode->i_sb, __func__, | 1381 | ext4_error(inode->i_sb, "inode %lu: bad block %llu", |
| 1383 | "inode %lu: bad block %llu", inode->i_ino, | 1382 | inode->i_ino, EXT4_I(inode)->i_file_acl); |
| 1384 | EXT4_I(inode)->i_file_acl); | ||
| 1385 | goto cleanup; | 1383 | goto cleanup; |
| 1386 | } | 1384 | } |
| 1387 | ext4_xattr_release_block(handle, inode, bh); | 1385 | ext4_xattr_release_block(handle, inode, bh); |
| @@ -1506,7 +1504,7 @@ again: | |||
| 1506 | } | 1504 | } |
| 1507 | bh = sb_bread(inode->i_sb, ce->e_block); | 1505 | bh = sb_bread(inode->i_sb, ce->e_block); |
| 1508 | if (!bh) { | 1506 | if (!bh) { |
| 1509 | ext4_error(inode->i_sb, __func__, | 1507 | ext4_error(inode->i_sb, |
| 1510 | "inode %lu: block %lu read error", | 1508 | "inode %lu: block %lu read error", |
| 1511 | inode->i_ino, (unsigned long) ce->e_block); | 1509 | inode->i_ino, (unsigned long) ce->e_block); |
| 1512 | } else if (le32_to_cpu(BHDR(bh)->h_refcount) >= | 1510 | } else if (le32_to_cpu(BHDR(bh)->h_refcount) >= |
diff --git a/fs/fat/inode.c b/fs/fat/inode.c index 14da530b05ca..fbeecdc194dc 100644 --- a/fs/fat/inode.c +++ b/fs/fat/inode.c | |||
| @@ -577,7 +577,7 @@ static inline loff_t fat_i_pos_read(struct msdos_sb_info *sbi, | |||
| 577 | return i_pos; | 577 | return i_pos; |
| 578 | } | 578 | } |
| 579 | 579 | ||
| 580 | static int fat_write_inode(struct inode *inode, int wait) | 580 | static int __fat_write_inode(struct inode *inode, int wait) |
| 581 | { | 581 | { |
| 582 | struct super_block *sb = inode->i_sb; | 582 | struct super_block *sb = inode->i_sb; |
| 583 | struct msdos_sb_info *sbi = MSDOS_SB(sb); | 583 | struct msdos_sb_info *sbi = MSDOS_SB(sb); |
| @@ -634,9 +634,14 @@ retry: | |||
| 634 | return err; | 634 | return err; |
| 635 | } | 635 | } |
| 636 | 636 | ||
| 637 | static int fat_write_inode(struct inode *inode, struct writeback_control *wbc) | ||
| 638 | { | ||
| 639 | return __fat_write_inode(inode, wbc->sync_mode == WB_SYNC_ALL); | ||
| 640 | } | ||
| 641 | |||
| 637 | int fat_sync_inode(struct inode *inode) | 642 | int fat_sync_inode(struct inode *inode) |
| 638 | { | 643 | { |
| 639 | return fat_write_inode(inode, 1); | 644 | return __fat_write_inode(inode, 1); |
| 640 | } | 645 | } |
| 641 | 646 | ||
| 642 | EXPORT_SYMBOL_GPL(fat_sync_inode); | 647 | EXPORT_SYMBOL_GPL(fat_sync_inode); |
diff --git a/fs/fs-writeback.c b/fs/fs-writeback.c index 1a7c42c64ff4..76fc4d594acb 100644 --- a/fs/fs-writeback.c +++ b/fs/fs-writeback.c | |||
| @@ -381,10 +381,10 @@ static void queue_io(struct bdi_writeback *wb, unsigned long *older_than_this) | |||
| 381 | move_expired_inodes(&wb->b_dirty, &wb->b_io, older_than_this); | 381 | move_expired_inodes(&wb->b_dirty, &wb->b_io, older_than_this); |
| 382 | } | 382 | } |
| 383 | 383 | ||
| 384 | static int write_inode(struct inode *inode, int sync) | 384 | static int write_inode(struct inode *inode, struct writeback_control *wbc) |
| 385 | { | 385 | { |
| 386 | if (inode->i_sb->s_op->write_inode && !is_bad_inode(inode)) | 386 | if (inode->i_sb->s_op->write_inode && !is_bad_inode(inode)) |
| 387 | return inode->i_sb->s_op->write_inode(inode, sync); | 387 | return inode->i_sb->s_op->write_inode(inode, wbc); |
| 388 | return 0; | 388 | return 0; |
| 389 | } | 389 | } |
| 390 | 390 | ||
| @@ -421,7 +421,6 @@ static int | |||
| 421 | writeback_single_inode(struct inode *inode, struct writeback_control *wbc) | 421 | writeback_single_inode(struct inode *inode, struct writeback_control *wbc) |
| 422 | { | 422 | { |
| 423 | struct address_space *mapping = inode->i_mapping; | 423 | struct address_space *mapping = inode->i_mapping; |
| 424 | int wait = wbc->sync_mode == WB_SYNC_ALL; | ||
| 425 | unsigned dirty; | 424 | unsigned dirty; |
| 426 | int ret; | 425 | int ret; |
| 427 | 426 | ||
| @@ -439,7 +438,7 @@ writeback_single_inode(struct inode *inode, struct writeback_control *wbc) | |||
| 439 | * We'll have another go at writing back this inode when we | 438 | * We'll have another go at writing back this inode when we |
| 440 | * completed a full scan of b_io. | 439 | * completed a full scan of b_io. |
| 441 | */ | 440 | */ |
| 442 | if (!wait) { | 441 | if (wbc->sync_mode != WB_SYNC_ALL) { |
| 443 | requeue_io(inode); | 442 | requeue_io(inode); |
| 444 | return 0; | 443 | return 0; |
| 445 | } | 444 | } |
| @@ -461,15 +460,20 @@ writeback_single_inode(struct inode *inode, struct writeback_control *wbc) | |||
| 461 | 460 | ||
| 462 | ret = do_writepages(mapping, wbc); | 461 | ret = do_writepages(mapping, wbc); |
| 463 | 462 | ||
| 464 | /* Don't write the inode if only I_DIRTY_PAGES was set */ | 463 | /* |
| 465 | if (dirty & (I_DIRTY_SYNC | I_DIRTY_DATASYNC)) { | 464 | * Make sure to wait on the data before writing out the metadata. |
| 466 | int err = write_inode(inode, wait); | 465 | * This is important for filesystems that modify metadata on data |
| 466 | * I/O completion. | ||
| 467 | */ | ||
| 468 | if (wbc->sync_mode == WB_SYNC_ALL) { | ||
| 469 | int err = filemap_fdatawait(mapping); | ||
| 467 | if (ret == 0) | 470 | if (ret == 0) |
| 468 | ret = err; | 471 | ret = err; |
| 469 | } | 472 | } |
| 470 | 473 | ||
| 471 | if (wait) { | 474 | /* Don't write the inode if only I_DIRTY_PAGES was set */ |
| 472 | int err = filemap_fdatawait(mapping); | 475 | if (dirty & (I_DIRTY_SYNC | I_DIRTY_DATASYNC)) { |
| 476 | int err = write_inode(inode, wbc); | ||
| 473 | if (ret == 0) | 477 | if (ret == 0) |
| 474 | ret = err; | 478 | ret = err; |
| 475 | } | 479 | } |
diff --git a/fs/gfs2/quota.c b/fs/gfs2/quota.c index e3bf6eab8750..6dbcbad6ab17 100644 --- a/fs/gfs2/quota.c +++ b/fs/gfs2/quota.c | |||
| @@ -1083,7 +1083,7 @@ void gfs2_quota_change(struct gfs2_inode *ip, s64 change, | |||
| 1083 | } | 1083 | } |
| 1084 | } | 1084 | } |
| 1085 | 1085 | ||
| 1086 | int gfs2_quota_sync(struct super_block *sb, int type) | 1086 | int gfs2_quota_sync(struct super_block *sb, int type, int wait) |
| 1087 | { | 1087 | { |
| 1088 | struct gfs2_sbd *sdp = sb->s_fs_info; | 1088 | struct gfs2_sbd *sdp = sb->s_fs_info; |
| 1089 | struct gfs2_quota_data **qda; | 1089 | struct gfs2_quota_data **qda; |
| @@ -1127,6 +1127,11 @@ int gfs2_quota_sync(struct super_block *sb, int type) | |||
| 1127 | return error; | 1127 | return error; |
| 1128 | } | 1128 | } |
| 1129 | 1129 | ||
| 1130 | static int gfs2_quota_sync_timeo(struct super_block *sb, int type) | ||
| 1131 | { | ||
| 1132 | return gfs2_quota_sync(sb, type, 0); | ||
| 1133 | } | ||
| 1134 | |||
| 1130 | int gfs2_quota_refresh(struct gfs2_sbd *sdp, int user, u32 id) | 1135 | int gfs2_quota_refresh(struct gfs2_sbd *sdp, int user, u32 id) |
| 1131 | { | 1136 | { |
| 1132 | struct gfs2_quota_data *qd; | 1137 | struct gfs2_quota_data *qd; |
| @@ -1382,7 +1387,7 @@ int gfs2_quotad(void *data) | |||
| 1382 | &tune->gt_statfs_quantum); | 1387 | &tune->gt_statfs_quantum); |
| 1383 | 1388 | ||
| 1384 | /* Update quota file */ | 1389 | /* Update quota file */ |
| 1385 | quotad_check_timeo(sdp, "sync", gfs2_quota_sync, t, | 1390 | quotad_check_timeo(sdp, "sync", gfs2_quota_sync_timeo, t, |
| 1386 | "ad_timeo, &tune->gt_quota_quantum); | 1391 | "ad_timeo, &tune->gt_quota_quantum); |
| 1387 | 1392 | ||
| 1388 | /* Check for & recover partially truncated inodes */ | 1393 | /* Check for & recover partially truncated inodes */ |
diff --git a/fs/gfs2/quota.h b/fs/gfs2/quota.h index e271fa07ad02..195f60c8bd14 100644 --- a/fs/gfs2/quota.h +++ b/fs/gfs2/quota.h | |||
| @@ -25,7 +25,7 @@ extern int gfs2_quota_check(struct gfs2_inode *ip, u32 uid, u32 gid); | |||
| 25 | extern void gfs2_quota_change(struct gfs2_inode *ip, s64 change, | 25 | extern void gfs2_quota_change(struct gfs2_inode *ip, s64 change, |
| 26 | u32 uid, u32 gid); | 26 | u32 uid, u32 gid); |
| 27 | 27 | ||
| 28 | extern int gfs2_quota_sync(struct super_block *sb, int type); | 28 | extern int gfs2_quota_sync(struct super_block *sb, int type, int wait); |
| 29 | extern int gfs2_quota_refresh(struct gfs2_sbd *sdp, int user, u32 id); | 29 | extern int gfs2_quota_refresh(struct gfs2_sbd *sdp, int user, u32 id); |
| 30 | 30 | ||
| 31 | extern int gfs2_quota_init(struct gfs2_sbd *sdp); | 31 | extern int gfs2_quota_init(struct gfs2_sbd *sdp); |
diff --git a/fs/gfs2/super.c b/fs/gfs2/super.c index e5e22629da67..50aac606b990 100644 --- a/fs/gfs2/super.c +++ b/fs/gfs2/super.c | |||
| @@ -22,6 +22,7 @@ | |||
| 22 | #include <linux/crc32.h> | 22 | #include <linux/crc32.h> |
| 23 | #include <linux/time.h> | 23 | #include <linux/time.h> |
| 24 | #include <linux/wait.h> | 24 | #include <linux/wait.h> |
| 25 | #include <linux/writeback.h> | ||
| 25 | 26 | ||
| 26 | #include "gfs2.h" | 27 | #include "gfs2.h" |
| 27 | #include "incore.h" | 28 | #include "incore.h" |
| @@ -711,7 +712,7 @@ void gfs2_unfreeze_fs(struct gfs2_sbd *sdp) | |||
| 711 | * Returns: errno | 712 | * Returns: errno |
| 712 | */ | 713 | */ |
| 713 | 714 | ||
| 714 | static int gfs2_write_inode(struct inode *inode, int sync) | 715 | static int gfs2_write_inode(struct inode *inode, struct writeback_control *wbc) |
| 715 | { | 716 | { |
| 716 | struct gfs2_inode *ip = GFS2_I(inode); | 717 | struct gfs2_inode *ip = GFS2_I(inode); |
| 717 | struct gfs2_sbd *sdp = GFS2_SB(inode); | 718 | struct gfs2_sbd *sdp = GFS2_SB(inode); |
| @@ -745,7 +746,7 @@ static int gfs2_write_inode(struct inode *inode, int sync) | |||
| 745 | do_unlock: | 746 | do_unlock: |
| 746 | gfs2_glock_dq_uninit(&gh); | 747 | gfs2_glock_dq_uninit(&gh); |
| 747 | do_flush: | 748 | do_flush: |
| 748 | if (sync != 0) | 749 | if (wbc->sync_mode == WB_SYNC_ALL) |
| 749 | gfs2_log_flush(GFS2_SB(inode), ip->i_gl); | 750 | gfs2_log_flush(GFS2_SB(inode), ip->i_gl); |
| 750 | return ret; | 751 | return ret; |
| 751 | } | 752 | } |
| @@ -763,7 +764,7 @@ static int gfs2_make_fs_ro(struct gfs2_sbd *sdp) | |||
| 763 | int error; | 764 | int error; |
| 764 | 765 | ||
| 765 | flush_workqueue(gfs2_delete_workqueue); | 766 | flush_workqueue(gfs2_delete_workqueue); |
| 766 | gfs2_quota_sync(sdp->sd_vfs, 0); | 767 | gfs2_quota_sync(sdp->sd_vfs, 0, 1); |
| 767 | gfs2_statfs_sync(sdp->sd_vfs, 0); | 768 | gfs2_statfs_sync(sdp->sd_vfs, 0); |
| 768 | 769 | ||
| 769 | error = gfs2_glock_nq_init(sdp->sd_trans_gl, LM_ST_SHARED, GL_NOCACHE, | 770 | error = gfs2_glock_nq_init(sdp->sd_trans_gl, LM_ST_SHARED, GL_NOCACHE, |
diff --git a/fs/gfs2/sys.c b/fs/gfs2/sys.c index a0db1c94317d..b5f1a46133c8 100644 --- a/fs/gfs2/sys.c +++ b/fs/gfs2/sys.c | |||
| @@ -167,7 +167,7 @@ static ssize_t quota_sync_store(struct gfs2_sbd *sdp, const char *buf, | |||
| 167 | if (simple_strtol(buf, NULL, 0) != 1) | 167 | if (simple_strtol(buf, NULL, 0) != 1) |
| 168 | return -EINVAL; | 168 | return -EINVAL; |
| 169 | 169 | ||
| 170 | gfs2_quota_sync(sdp->sd_vfs, 0); | 170 | gfs2_quota_sync(sdp->sd_vfs, 0, 1); |
| 171 | return len; | 171 | return len; |
| 172 | } | 172 | } |
| 173 | 173 | ||
diff --git a/fs/hfs/hfs_fs.h b/fs/hfs/hfs_fs.h index 052387e11671..fe35e3b626c4 100644 --- a/fs/hfs/hfs_fs.h +++ b/fs/hfs/hfs_fs.h | |||
| @@ -188,7 +188,7 @@ extern const struct address_space_operations hfs_btree_aops; | |||
| 188 | 188 | ||
| 189 | extern struct inode *hfs_new_inode(struct inode *, struct qstr *, int); | 189 | extern struct inode *hfs_new_inode(struct inode *, struct qstr *, int); |
| 190 | extern void hfs_inode_write_fork(struct inode *, struct hfs_extent *, __be32 *, __be32 *); | 190 | extern void hfs_inode_write_fork(struct inode *, struct hfs_extent *, __be32 *, __be32 *); |
| 191 | extern int hfs_write_inode(struct inode *, int); | 191 | extern int hfs_write_inode(struct inode *, struct writeback_control *); |
| 192 | extern int hfs_inode_setattr(struct dentry *, struct iattr *); | 192 | extern int hfs_inode_setattr(struct dentry *, struct iattr *); |
| 193 | extern void hfs_inode_read_fork(struct inode *inode, struct hfs_extent *ext, | 193 | extern void hfs_inode_read_fork(struct inode *inode, struct hfs_extent *ext, |
| 194 | __be32 log_size, __be32 phys_size, u32 clump_size); | 194 | __be32 log_size, __be32 phys_size, u32 clump_size); |
diff --git a/fs/hfs/inode.c b/fs/hfs/inode.c index a1cbff2b4d99..14f5cb1b9fdc 100644 --- a/fs/hfs/inode.c +++ b/fs/hfs/inode.c | |||
| @@ -381,7 +381,7 @@ void hfs_inode_write_fork(struct inode *inode, struct hfs_extent *ext, | |||
| 381 | HFS_SB(inode->i_sb)->alloc_blksz); | 381 | HFS_SB(inode->i_sb)->alloc_blksz); |
| 382 | } | 382 | } |
| 383 | 383 | ||
| 384 | int hfs_write_inode(struct inode *inode, int unused) | 384 | int hfs_write_inode(struct inode *inode, struct writeback_control *wbc) |
| 385 | { | 385 | { |
| 386 | struct inode *main_inode = inode; | 386 | struct inode *main_inode = inode; |
| 387 | struct hfs_find_data fd; | 387 | struct hfs_find_data fd; |
diff --git a/fs/hfsplus/super.c b/fs/hfsplus/super.c index 43022f3d5148..74b473a8ef92 100644 --- a/fs/hfsplus/super.c +++ b/fs/hfsplus/super.c | |||
| @@ -87,7 +87,8 @@ bad_inode: | |||
| 87 | return ERR_PTR(err); | 87 | return ERR_PTR(err); |
| 88 | } | 88 | } |
| 89 | 89 | ||
| 90 | static int hfsplus_write_inode(struct inode *inode, int unused) | 90 | static int hfsplus_write_inode(struct inode *inode, |
| 91 | struct writeback_control *wbc) | ||
| 91 | { | 92 | { |
| 92 | struct hfsplus_vh *vhdr; | 93 | struct hfsplus_vh *vhdr; |
| 93 | int ret = 0; | 94 | int ret = 0; |
diff --git a/fs/inode.c b/fs/inode.c index 03dfeb2e3928..407bf392e20a 100644 --- a/fs/inode.c +++ b/fs/inode.c | |||
| @@ -8,7 +8,6 @@ | |||
| 8 | #include <linux/mm.h> | 8 | #include <linux/mm.h> |
| 9 | #include <linux/dcache.h> | 9 | #include <linux/dcache.h> |
| 10 | #include <linux/init.h> | 10 | #include <linux/init.h> |
| 11 | #include <linux/quotaops.h> | ||
| 12 | #include <linux/slab.h> | 11 | #include <linux/slab.h> |
| 13 | #include <linux/writeback.h> | 12 | #include <linux/writeback.h> |
| 14 | #include <linux/module.h> | 13 | #include <linux/module.h> |
| @@ -314,7 +313,6 @@ void clear_inode(struct inode *inode) | |||
| 314 | BUG_ON(!(inode->i_state & I_FREEING)); | 313 | BUG_ON(!(inode->i_state & I_FREEING)); |
| 315 | BUG_ON(inode->i_state & I_CLEAR); | 314 | BUG_ON(inode->i_state & I_CLEAR); |
| 316 | inode_sync_wait(inode); | 315 | inode_sync_wait(inode); |
| 317 | vfs_dq_drop(inode); | ||
| 318 | if (inode->i_sb->s_op->clear_inode) | 316 | if (inode->i_sb->s_op->clear_inode) |
| 319 | inode->i_sb->s_op->clear_inode(inode); | 317 | inode->i_sb->s_op->clear_inode(inode); |
| 320 | if (S_ISBLK(inode->i_mode) && inode->i_bdev) | 318 | if (S_ISBLK(inode->i_mode) && inode->i_bdev) |
| @@ -1211,8 +1209,6 @@ void generic_delete_inode(struct inode *inode) | |||
| 1211 | 1209 | ||
| 1212 | if (op->delete_inode) { | 1210 | if (op->delete_inode) { |
| 1213 | void (*delete)(struct inode *) = op->delete_inode; | 1211 | void (*delete)(struct inode *) = op->delete_inode; |
| 1214 | if (!is_bad_inode(inode)) | ||
| 1215 | vfs_dq_init(inode); | ||
| 1216 | /* Filesystems implementing their own | 1212 | /* Filesystems implementing their own |
| 1217 | * s_op->delete_inode are required to call | 1213 | * s_op->delete_inode are required to call |
| 1218 | * truncate_inode_pages and clear_inode() | 1214 | * truncate_inode_pages and clear_inode() |
diff --git a/fs/jbd/commit.c b/fs/jbd/commit.c index 4bd882548c45..2c90e3ef625f 100644 --- a/fs/jbd/commit.c +++ b/fs/jbd/commit.c | |||
| @@ -862,12 +862,12 @@ restart_loop: | |||
| 862 | /* A buffer which has been freed while still being | 862 | /* A buffer which has been freed while still being |
| 863 | * journaled by a previous transaction may end up still | 863 | * journaled by a previous transaction may end up still |
| 864 | * being dirty here, but we want to avoid writing back | 864 | * being dirty here, but we want to avoid writing back |
| 865 | * that buffer in the future now that the last use has | 865 | * that buffer in the future after the "add to orphan" |
| 866 | * been committed. That's not only a performance gain, | 866 | * operation been committed, That's not only a performance |
| 867 | * it also stops aliasing problems if the buffer is left | 867 | * gain, it also stops aliasing problems if the buffer is |
| 868 | * behind for writeback and gets reallocated for another | 868 | * left behind for writeback and gets reallocated for another |
| 869 | * use in a different page. */ | 869 | * use in a different page. */ |
| 870 | if (buffer_freed(bh)) { | 870 | if (buffer_freed(bh) && !jh->b_next_transaction) { |
| 871 | clear_buffer_freed(bh); | 871 | clear_buffer_freed(bh); |
| 872 | clear_buffer_jbddirty(bh); | 872 | clear_buffer_jbddirty(bh); |
| 873 | } | 873 | } |
diff --git a/fs/jbd/transaction.c b/fs/jbd/transaction.c index 006f9ad838a2..99e9fea11077 100644 --- a/fs/jbd/transaction.c +++ b/fs/jbd/transaction.c | |||
| @@ -1864,6 +1864,21 @@ static int journal_unmap_buffer(journal_t *journal, struct buffer_head *bh) | |||
| 1864 | if (!jh) | 1864 | if (!jh) |
| 1865 | goto zap_buffer_no_jh; | 1865 | goto zap_buffer_no_jh; |
| 1866 | 1866 | ||
| 1867 | /* | ||
| 1868 | * We cannot remove the buffer from checkpoint lists until the | ||
| 1869 | * transaction adding inode to orphan list (let's call it T) | ||
| 1870 | * is committed. Otherwise if the transaction changing the | ||
| 1871 | * buffer would be cleaned from the journal before T is | ||
| 1872 | * committed, a crash will cause that the correct contents of | ||
| 1873 | * the buffer will be lost. On the other hand we have to | ||
| 1874 | * clear the buffer dirty bit at latest at the moment when the | ||
| 1875 | * transaction marking the buffer as freed in the filesystem | ||
| 1876 | * structures is committed because from that moment on the | ||
| 1877 | * buffer can be reallocated and used by a different page. | ||
| 1878 | * Since the block hasn't been freed yet but the inode has | ||
| 1879 | * already been added to orphan list, it is safe for us to add | ||
| 1880 | * the buffer to BJ_Forget list of the newest transaction. | ||
| 1881 | */ | ||
| 1867 | transaction = jh->b_transaction; | 1882 | transaction = jh->b_transaction; |
| 1868 | if (transaction == NULL) { | 1883 | if (transaction == NULL) { |
| 1869 | /* First case: not on any transaction. If it | 1884 | /* First case: not on any transaction. If it |
| @@ -1929,16 +1944,15 @@ static int journal_unmap_buffer(journal_t *journal, struct buffer_head *bh) | |||
| 1929 | goto zap_buffer; | 1944 | goto zap_buffer; |
| 1930 | } | 1945 | } |
| 1931 | /* | 1946 | /* |
| 1932 | * If it is committing, we simply cannot touch it. We | 1947 | * The buffer is committing, we simply cannot touch |
| 1933 | * can remove it's next_transaction pointer from the | 1948 | * it. So we just set j_next_transaction to the |
| 1934 | * running transaction if that is set, but nothing | 1949 | * running transaction (if there is one) and mark |
| 1935 | * else. */ | 1950 | * buffer as freed so that commit code knows it should |
| 1951 | * clear dirty bits when it is done with the buffer. | ||
| 1952 | */ | ||
| 1936 | set_buffer_freed(bh); | 1953 | set_buffer_freed(bh); |
| 1937 | if (jh->b_next_transaction) { | 1954 | if (journal->j_running_transaction && buffer_jbddirty(bh)) |
| 1938 | J_ASSERT(jh->b_next_transaction == | 1955 | jh->b_next_transaction = journal->j_running_transaction; |
| 1939 | journal->j_running_transaction); | ||
| 1940 | jh->b_next_transaction = NULL; | ||
| 1941 | } | ||
| 1942 | journal_put_journal_head(jh); | 1956 | journal_put_journal_head(jh); |
| 1943 | spin_unlock(&journal->j_list_lock); | 1957 | spin_unlock(&journal->j_list_lock); |
| 1944 | jbd_unlock_bh_state(bh); | 1958 | jbd_unlock_bh_state(bh); |
| @@ -2120,7 +2134,7 @@ void journal_file_buffer(struct journal_head *jh, | |||
| 2120 | */ | 2134 | */ |
| 2121 | void __journal_refile_buffer(struct journal_head *jh) | 2135 | void __journal_refile_buffer(struct journal_head *jh) |
| 2122 | { | 2136 | { |
| 2123 | int was_dirty; | 2137 | int was_dirty, jlist; |
| 2124 | struct buffer_head *bh = jh2bh(jh); | 2138 | struct buffer_head *bh = jh2bh(jh); |
| 2125 | 2139 | ||
| 2126 | J_ASSERT_JH(jh, jbd_is_locked_bh_state(bh)); | 2140 | J_ASSERT_JH(jh, jbd_is_locked_bh_state(bh)); |
| @@ -2142,8 +2156,13 @@ void __journal_refile_buffer(struct journal_head *jh) | |||
| 2142 | __journal_temp_unlink_buffer(jh); | 2156 | __journal_temp_unlink_buffer(jh); |
| 2143 | jh->b_transaction = jh->b_next_transaction; | 2157 | jh->b_transaction = jh->b_next_transaction; |
| 2144 | jh->b_next_transaction = NULL; | 2158 | jh->b_next_transaction = NULL; |
| 2145 | __journal_file_buffer(jh, jh->b_transaction, | 2159 | if (buffer_freed(bh)) |
| 2146 | jh->b_modified ? BJ_Metadata : BJ_Reserved); | 2160 | jlist = BJ_Forget; |
| 2161 | else if (jh->b_modified) | ||
| 2162 | jlist = BJ_Metadata; | ||
| 2163 | else | ||
| 2164 | jlist = BJ_Reserved; | ||
| 2165 | __journal_file_buffer(jh, jh->b_transaction, jlist); | ||
| 2147 | J_ASSERT_JH(jh, jh->b_transaction->t_state == T_RUNNING); | 2166 | J_ASSERT_JH(jh, jh->b_transaction->t_state == T_RUNNING); |
| 2148 | 2167 | ||
| 2149 | if (was_dirty) | 2168 | if (was_dirty) |
diff --git a/fs/jbd2/checkpoint.c b/fs/jbd2/checkpoint.c index 886849370950..30beb11ef928 100644 --- a/fs/jbd2/checkpoint.c +++ b/fs/jbd2/checkpoint.c | |||
| @@ -507,6 +507,7 @@ int jbd2_cleanup_journal_tail(journal_t *journal) | |||
| 507 | if (blocknr < journal->j_tail) | 507 | if (blocknr < journal->j_tail) |
| 508 | freed = freed + journal->j_last - journal->j_first; | 508 | freed = freed + journal->j_last - journal->j_first; |
| 509 | 509 | ||
| 510 | trace_jbd2_cleanup_journal_tail(journal, first_tid, blocknr, freed); | ||
| 510 | jbd_debug(1, | 511 | jbd_debug(1, |
| 511 | "Cleaning journal tail from %d to %d (offset %lu), " | 512 | "Cleaning journal tail from %d to %d (offset %lu), " |
| 512 | "freeing %lu\n", | 513 | "freeing %lu\n", |
diff --git a/fs/jbd2/commit.c b/fs/jbd2/commit.c index 1bc74b6f26d2..671da7fb7ffd 100644 --- a/fs/jbd2/commit.c +++ b/fs/jbd2/commit.c | |||
| @@ -883,8 +883,7 @@ restart_loop: | |||
| 883 | spin_unlock(&journal->j_list_lock); | 883 | spin_unlock(&journal->j_list_lock); |
| 884 | bh = jh2bh(jh); | 884 | bh = jh2bh(jh); |
| 885 | jbd_lock_bh_state(bh); | 885 | jbd_lock_bh_state(bh); |
| 886 | J_ASSERT_JH(jh, jh->b_transaction == commit_transaction || | 886 | J_ASSERT_JH(jh, jh->b_transaction == commit_transaction); |
| 887 | jh->b_transaction == journal->j_running_transaction); | ||
| 888 | 887 | ||
| 889 | /* | 888 | /* |
| 890 | * If there is undo-protected committed data against | 889 | * If there is undo-protected committed data against |
| @@ -930,12 +929,12 @@ restart_loop: | |||
| 930 | /* A buffer which has been freed while still being | 929 | /* A buffer which has been freed while still being |
| 931 | * journaled by a previous transaction may end up still | 930 | * journaled by a previous transaction may end up still |
| 932 | * being dirty here, but we want to avoid writing back | 931 | * being dirty here, but we want to avoid writing back |
| 933 | * that buffer in the future now that the last use has | 932 | * that buffer in the future after the "add to orphan" |
| 934 | * been committed. That's not only a performance gain, | 933 | * operation been committed, That's not only a performance |
| 935 | * it also stops aliasing problems if the buffer is left | 934 | * gain, it also stops aliasing problems if the buffer is |
| 936 | * behind for writeback and gets reallocated for another | 935 | * left behind for writeback and gets reallocated for another |
| 937 | * use in a different page. */ | 936 | * use in a different page. */ |
| 938 | if (buffer_freed(bh)) { | 937 | if (buffer_freed(bh) && !jh->b_next_transaction) { |
| 939 | clear_buffer_freed(bh); | 938 | clear_buffer_freed(bh); |
| 940 | clear_buffer_jbddirty(bh); | 939 | clear_buffer_jbddirty(bh); |
| 941 | } | 940 | } |
diff --git a/fs/jbd2/journal.c b/fs/jbd2/journal.c index ac0d027595d0..c03d4dce4d76 100644 --- a/fs/jbd2/journal.c +++ b/fs/jbd2/journal.c | |||
| @@ -39,6 +39,8 @@ | |||
| 39 | #include <linux/seq_file.h> | 39 | #include <linux/seq_file.h> |
| 40 | #include <linux/math64.h> | 40 | #include <linux/math64.h> |
| 41 | #include <linux/hash.h> | 41 | #include <linux/hash.h> |
| 42 | #include <linux/log2.h> | ||
| 43 | #include <linux/vmalloc.h> | ||
| 42 | 44 | ||
| 43 | #define CREATE_TRACE_POINTS | 45 | #define CREATE_TRACE_POINTS |
| 44 | #include <trace/events/jbd2.h> | 46 | #include <trace/events/jbd2.h> |
| @@ -93,6 +95,7 @@ EXPORT_SYMBOL(jbd2_journal_begin_ordered_truncate); | |||
| 93 | 95 | ||
| 94 | static int journal_convert_superblock_v1(journal_t *, journal_superblock_t *); | 96 | static int journal_convert_superblock_v1(journal_t *, journal_superblock_t *); |
| 95 | static void __journal_abort_soft (journal_t *journal, int errno); | 97 | static void __journal_abort_soft (journal_t *journal, int errno); |
| 98 | static int jbd2_journal_create_slab(size_t slab_size); | ||
| 96 | 99 | ||
| 97 | /* | 100 | /* |
| 98 | * Helper function used to manage commit timeouts | 101 | * Helper function used to manage commit timeouts |
| @@ -1248,6 +1251,13 @@ int jbd2_journal_load(journal_t *journal) | |||
| 1248 | } | 1251 | } |
| 1249 | } | 1252 | } |
| 1250 | 1253 | ||
| 1254 | /* | ||
| 1255 | * Create a slab for this blocksize | ||
| 1256 | */ | ||
| 1257 | err = jbd2_journal_create_slab(be32_to_cpu(sb->s_blocksize)); | ||
| 1258 | if (err) | ||
| 1259 | return err; | ||
| 1260 | |||
| 1251 | /* Let the recovery code check whether it needs to recover any | 1261 | /* Let the recovery code check whether it needs to recover any |
| 1252 | * data from the journal. */ | 1262 | * data from the journal. */ |
| 1253 | if (jbd2_journal_recover(journal)) | 1263 | if (jbd2_journal_recover(journal)) |
| @@ -1807,6 +1817,127 @@ size_t journal_tag_bytes(journal_t *journal) | |||
| 1807 | } | 1817 | } |
| 1808 | 1818 | ||
| 1809 | /* | 1819 | /* |
| 1820 | * JBD memory management | ||
| 1821 | * | ||
| 1822 | * These functions are used to allocate block-sized chunks of memory | ||
| 1823 | * used for making copies of buffer_head data. Very often it will be | ||
| 1824 | * page-sized chunks of data, but sometimes it will be in | ||
| 1825 | * sub-page-size chunks. (For example, 16k pages on Power systems | ||
| 1826 | * with a 4k block file system.) For blocks smaller than a page, we | ||
| 1827 | * use a SLAB allocator. There are slab caches for each block size, | ||
| 1828 | * which are allocated at mount time, if necessary, and we only free | ||
| 1829 | * (all of) the slab caches when/if the jbd2 module is unloaded. For | ||
| 1830 | * this reason we don't need to a mutex to protect access to | ||
| 1831 | * jbd2_slab[] allocating or releasing memory; only in | ||
| 1832 | * jbd2_journal_create_slab(). | ||
| 1833 | */ | ||
| 1834 | #define JBD2_MAX_SLABS 8 | ||
| 1835 | static struct kmem_cache *jbd2_slab[JBD2_MAX_SLABS]; | ||
| 1836 | static DECLARE_MUTEX(jbd2_slab_create_sem); | ||
| 1837 | |||
| 1838 | static const char *jbd2_slab_names[JBD2_MAX_SLABS] = { | ||
| 1839 | "jbd2_1k", "jbd2_2k", "jbd2_4k", "jbd2_8k", | ||
| 1840 | "jbd2_16k", "jbd2_32k", "jbd2_64k", "jbd2_128k" | ||
| 1841 | }; | ||
| 1842 | |||
| 1843 | |||
| 1844 | static void jbd2_journal_destroy_slabs(void) | ||
| 1845 | { | ||
| 1846 | int i; | ||
| 1847 | |||
| 1848 | for (i = 0; i < JBD2_MAX_SLABS; i++) { | ||
| 1849 | if (jbd2_slab[i]) | ||
| 1850 | kmem_cache_destroy(jbd2_slab[i]); | ||
| 1851 | jbd2_slab[i] = NULL; | ||
| 1852 | } | ||
| 1853 | } | ||
| 1854 | |||
| 1855 | static int jbd2_journal_create_slab(size_t size) | ||
| 1856 | { | ||
| 1857 | int i = order_base_2(size) - 10; | ||
| 1858 | size_t slab_size; | ||
| 1859 | |||
| 1860 | if (size == PAGE_SIZE) | ||
| 1861 | return 0; | ||
| 1862 | |||
| 1863 | if (i >= JBD2_MAX_SLABS) | ||
| 1864 | return -EINVAL; | ||
| 1865 | |||
| 1866 | if (unlikely(i < 0)) | ||
| 1867 | i = 0; | ||
| 1868 | down(&jbd2_slab_create_sem); | ||
| 1869 | if (jbd2_slab[i]) { | ||
| 1870 | up(&jbd2_slab_create_sem); | ||
| 1871 | return 0; /* Already created */ | ||
| 1872 | } | ||
| 1873 | |||
| 1874 | slab_size = 1 << (i+10); | ||
| 1875 | jbd2_slab[i] = kmem_cache_create(jbd2_slab_names[i], slab_size, | ||
| 1876 | slab_size, 0, NULL); | ||
| 1877 | up(&jbd2_slab_create_sem); | ||
| 1878 | if (!jbd2_slab[i]) { | ||
| 1879 | printk(KERN_EMERG "JBD2: no memory for jbd2_slab cache\n"); | ||
| 1880 | return -ENOMEM; | ||
| 1881 | } | ||
| 1882 | return 0; | ||
| 1883 | } | ||
| 1884 | |||
| 1885 | static struct kmem_cache *get_slab(size_t size) | ||
| 1886 | { | ||
| 1887 | int i = order_base_2(size) - 10; | ||
| 1888 | |||
| 1889 | BUG_ON(i >= JBD2_MAX_SLABS); | ||
| 1890 | if (unlikely(i < 0)) | ||
| 1891 | i = 0; | ||
| 1892 | BUG_ON(jbd2_slab[i] == 0); | ||
| 1893 | return jbd2_slab[i]; | ||
| 1894 | } | ||
| 1895 | |||
| 1896 | void *jbd2_alloc(size_t size, gfp_t flags) | ||
| 1897 | { | ||
| 1898 | void *ptr; | ||
| 1899 | |||
| 1900 | BUG_ON(size & (size-1)); /* Must be a power of 2 */ | ||
| 1901 | |||
| 1902 | flags |= __GFP_REPEAT; | ||
| 1903 | if (size == PAGE_SIZE) | ||
| 1904 | ptr = (void *)__get_free_pages(flags, 0); | ||
| 1905 | else if (size > PAGE_SIZE) { | ||
| 1906 | int order = get_order(size); | ||
| 1907 | |||
| 1908 | if (order < 3) | ||
| 1909 | ptr = (void *)__get_free_pages(flags, order); | ||
| 1910 | else | ||
| 1911 | ptr = vmalloc(size); | ||
| 1912 | } else | ||
| 1913 | ptr = kmem_cache_alloc(get_slab(size), flags); | ||
| 1914 | |||
| 1915 | /* Check alignment; SLUB has gotten this wrong in the past, | ||
| 1916 | * and this can lead to user data corruption! */ | ||
| 1917 | BUG_ON(((unsigned long) ptr) & (size-1)); | ||
| 1918 | |||
| 1919 | return ptr; | ||
| 1920 | } | ||
| 1921 | |||
| 1922 | void jbd2_free(void *ptr, size_t size) | ||
| 1923 | { | ||
| 1924 | if (size == PAGE_SIZE) { | ||
| 1925 | free_pages((unsigned long)ptr, 0); | ||
| 1926 | return; | ||
| 1927 | } | ||
| 1928 | if (size > PAGE_SIZE) { | ||
| 1929 | int order = get_order(size); | ||
| 1930 | |||
| 1931 | if (order < 3) | ||
| 1932 | free_pages((unsigned long)ptr, order); | ||
| 1933 | else | ||
| 1934 | vfree(ptr); | ||
| 1935 | return; | ||
| 1936 | } | ||
| 1937 | kmem_cache_free(get_slab(size), ptr); | ||
| 1938 | }; | ||
| 1939 | |||
| 1940 | /* | ||
| 1810 | * Journal_head storage management | 1941 | * Journal_head storage management |
| 1811 | */ | 1942 | */ |
| 1812 | static struct kmem_cache *jbd2_journal_head_cache; | 1943 | static struct kmem_cache *jbd2_journal_head_cache; |
| @@ -2204,6 +2335,7 @@ static void jbd2_journal_destroy_caches(void) | |||
| 2204 | jbd2_journal_destroy_revoke_caches(); | 2335 | jbd2_journal_destroy_revoke_caches(); |
| 2205 | jbd2_journal_destroy_jbd2_journal_head_cache(); | 2336 | jbd2_journal_destroy_jbd2_journal_head_cache(); |
| 2206 | jbd2_journal_destroy_handle_cache(); | 2337 | jbd2_journal_destroy_handle_cache(); |
| 2338 | jbd2_journal_destroy_slabs(); | ||
| 2207 | } | 2339 | } |
| 2208 | 2340 | ||
| 2209 | static int __init journal_init(void) | 2341 | static int __init journal_init(void) |
diff --git a/fs/jbd2/transaction.c b/fs/jbd2/transaction.c index a0512700542f..bfc70f57900f 100644 --- a/fs/jbd2/transaction.c +++ b/fs/jbd2/transaction.c | |||
| @@ -1727,6 +1727,21 @@ static int journal_unmap_buffer(journal_t *journal, struct buffer_head *bh) | |||
| 1727 | if (!jh) | 1727 | if (!jh) |
| 1728 | goto zap_buffer_no_jh; | 1728 | goto zap_buffer_no_jh; |
| 1729 | 1729 | ||
| 1730 | /* | ||
| 1731 | * We cannot remove the buffer from checkpoint lists until the | ||
| 1732 | * transaction adding inode to orphan list (let's call it T) | ||
| 1733 | * is committed. Otherwise if the transaction changing the | ||
| 1734 | * buffer would be cleaned from the journal before T is | ||
| 1735 | * committed, a crash will cause that the correct contents of | ||
| 1736 | * the buffer will be lost. On the other hand we have to | ||
| 1737 | * clear the buffer dirty bit at latest at the moment when the | ||
| 1738 | * transaction marking the buffer as freed in the filesystem | ||
| 1739 | * structures is committed because from that moment on the | ||
| 1740 | * buffer can be reallocated and used by a different page. | ||
| 1741 | * Since the block hasn't been freed yet but the inode has | ||
| 1742 | * already been added to orphan list, it is safe for us to add | ||
| 1743 | * the buffer to BJ_Forget list of the newest transaction. | ||
| 1744 | */ | ||
| 1730 | transaction = jh->b_transaction; | 1745 | transaction = jh->b_transaction; |
| 1731 | if (transaction == NULL) { | 1746 | if (transaction == NULL) { |
| 1732 | /* First case: not on any transaction. If it | 1747 | /* First case: not on any transaction. If it |
| @@ -1783,16 +1798,15 @@ static int journal_unmap_buffer(journal_t *journal, struct buffer_head *bh) | |||
| 1783 | } else if (transaction == journal->j_committing_transaction) { | 1798 | } else if (transaction == journal->j_committing_transaction) { |
| 1784 | JBUFFER_TRACE(jh, "on committing transaction"); | 1799 | JBUFFER_TRACE(jh, "on committing transaction"); |
| 1785 | /* | 1800 | /* |
| 1786 | * If it is committing, we simply cannot touch it. We | 1801 | * The buffer is committing, we simply cannot touch |
| 1787 | * can remove it's next_transaction pointer from the | 1802 | * it. So we just set j_next_transaction to the |
| 1788 | * running transaction if that is set, but nothing | 1803 | * running transaction (if there is one) and mark |
| 1789 | * else. */ | 1804 | * buffer as freed so that commit code knows it should |
| 1805 | * clear dirty bits when it is done with the buffer. | ||
| 1806 | */ | ||
| 1790 | set_buffer_freed(bh); | 1807 | set_buffer_freed(bh); |
| 1791 | if (jh->b_next_transaction) { | 1808 | if (journal->j_running_transaction && buffer_jbddirty(bh)) |
| 1792 | J_ASSERT(jh->b_next_transaction == | 1809 | jh->b_next_transaction = journal->j_running_transaction; |
| 1793 | journal->j_running_transaction); | ||
| 1794 | jh->b_next_transaction = NULL; | ||
| 1795 | } | ||
| 1796 | jbd2_journal_put_journal_head(jh); | 1810 | jbd2_journal_put_journal_head(jh); |
| 1797 | spin_unlock(&journal->j_list_lock); | 1811 | spin_unlock(&journal->j_list_lock); |
| 1798 | jbd_unlock_bh_state(bh); | 1812 | jbd_unlock_bh_state(bh); |
| @@ -1969,7 +1983,7 @@ void jbd2_journal_file_buffer(struct journal_head *jh, | |||
| 1969 | */ | 1983 | */ |
| 1970 | void __jbd2_journal_refile_buffer(struct journal_head *jh) | 1984 | void __jbd2_journal_refile_buffer(struct journal_head *jh) |
| 1971 | { | 1985 | { |
| 1972 | int was_dirty; | 1986 | int was_dirty, jlist; |
| 1973 | struct buffer_head *bh = jh2bh(jh); | 1987 | struct buffer_head *bh = jh2bh(jh); |
| 1974 | 1988 | ||
| 1975 | J_ASSERT_JH(jh, jbd_is_locked_bh_state(bh)); | 1989 | J_ASSERT_JH(jh, jbd_is_locked_bh_state(bh)); |
| @@ -1991,8 +2005,13 @@ void __jbd2_journal_refile_buffer(struct journal_head *jh) | |||
| 1991 | __jbd2_journal_temp_unlink_buffer(jh); | 2005 | __jbd2_journal_temp_unlink_buffer(jh); |
| 1992 | jh->b_transaction = jh->b_next_transaction; | 2006 | jh->b_transaction = jh->b_next_transaction; |
| 1993 | jh->b_next_transaction = NULL; | 2007 | jh->b_next_transaction = NULL; |
| 1994 | __jbd2_journal_file_buffer(jh, jh->b_transaction, | 2008 | if (buffer_freed(bh)) |
| 1995 | jh->b_modified ? BJ_Metadata : BJ_Reserved); | 2009 | jlist = BJ_Forget; |
| 2010 | else if (jh->b_modified) | ||
| 2011 | jlist = BJ_Metadata; | ||
| 2012 | else | ||
| 2013 | jlist = BJ_Reserved; | ||
| 2014 | __jbd2_journal_file_buffer(jh, jh->b_transaction, jlist); | ||
| 1996 | J_ASSERT_JH(jh, jh->b_transaction->t_state == T_RUNNING); | 2015 | J_ASSERT_JH(jh, jh->b_transaction->t_state == T_RUNNING); |
| 1997 | 2016 | ||
| 1998 | if (was_dirty) | 2017 | if (was_dirty) |
diff --git a/fs/jfs/acl.c b/fs/jfs/acl.c index d66477c34306..213169780b6c 100644 --- a/fs/jfs/acl.c +++ b/fs/jfs/acl.c | |||
| @@ -20,7 +20,6 @@ | |||
| 20 | 20 | ||
| 21 | #include <linux/sched.h> | 21 | #include <linux/sched.h> |
| 22 | #include <linux/fs.h> | 22 | #include <linux/fs.h> |
| 23 | #include <linux/quotaops.h> | ||
| 24 | #include <linux/posix_acl_xattr.h> | 23 | #include <linux/posix_acl_xattr.h> |
| 25 | #include "jfs_incore.h" | 24 | #include "jfs_incore.h" |
| 26 | #include "jfs_txnmgr.h" | 25 | #include "jfs_txnmgr.h" |
| @@ -174,7 +173,7 @@ cleanup: | |||
| 174 | return rc; | 173 | return rc; |
| 175 | } | 174 | } |
| 176 | 175 | ||
| 177 | static int jfs_acl_chmod(struct inode *inode) | 176 | int jfs_acl_chmod(struct inode *inode) |
| 178 | { | 177 | { |
| 179 | struct posix_acl *acl, *clone; | 178 | struct posix_acl *acl, *clone; |
| 180 | int rc; | 179 | int rc; |
| @@ -205,26 +204,3 @@ static int jfs_acl_chmod(struct inode *inode) | |||
| 205 | posix_acl_release(clone); | 204 | posix_acl_release(clone); |
| 206 | return rc; | 205 | return rc; |
| 207 | } | 206 | } |
| 208 | |||
| 209 | int jfs_setattr(struct dentry *dentry, struct iattr *iattr) | ||
| 210 | { | ||
| 211 | struct inode *inode = dentry->d_inode; | ||
| 212 | int rc; | ||
| 213 | |||
| 214 | rc = inode_change_ok(inode, iattr); | ||
| 215 | if (rc) | ||
| 216 | return rc; | ||
| 217 | |||
| 218 | if ((iattr->ia_valid & ATTR_UID && iattr->ia_uid != inode->i_uid) || | ||
| 219 | (iattr->ia_valid & ATTR_GID && iattr->ia_gid != inode->i_gid)) { | ||
| 220 | if (vfs_dq_transfer(inode, iattr)) | ||
| 221 | return -EDQUOT; | ||
| 222 | } | ||
| 223 | |||
| 224 | rc = inode_setattr(inode, iattr); | ||
| 225 | |||
| 226 | if (!rc && (iattr->ia_valid & ATTR_MODE)) | ||
| 227 | rc = jfs_acl_chmod(inode); | ||
| 228 | |||
| 229 | return rc; | ||
| 230 | } | ||
diff --git a/fs/jfs/file.c b/fs/jfs/file.c index 2b70fa78e4a7..14ba982b3f24 100644 --- a/fs/jfs/file.c +++ b/fs/jfs/file.c | |||
| @@ -18,6 +18,7 @@ | |||
| 18 | */ | 18 | */ |
| 19 | 19 | ||
| 20 | #include <linux/fs.h> | 20 | #include <linux/fs.h> |
| 21 | #include <linux/quotaops.h> | ||
| 21 | #include "jfs_incore.h" | 22 | #include "jfs_incore.h" |
| 22 | #include "jfs_inode.h" | 23 | #include "jfs_inode.h" |
| 23 | #include "jfs_dmap.h" | 24 | #include "jfs_dmap.h" |
| @@ -47,7 +48,7 @@ static int jfs_open(struct inode *inode, struct file *file) | |||
| 47 | { | 48 | { |
| 48 | int rc; | 49 | int rc; |
| 49 | 50 | ||
| 50 | if ((rc = generic_file_open(inode, file))) | 51 | if ((rc = dquot_file_open(inode, file))) |
| 51 | return rc; | 52 | return rc; |
| 52 | 53 | ||
| 53 | /* | 54 | /* |
| @@ -88,14 +89,40 @@ static int jfs_release(struct inode *inode, struct file *file) | |||
| 88 | return 0; | 89 | return 0; |
| 89 | } | 90 | } |
| 90 | 91 | ||
| 92 | int jfs_setattr(struct dentry *dentry, struct iattr *iattr) | ||
| 93 | { | ||
| 94 | struct inode *inode = dentry->d_inode; | ||
| 95 | int rc; | ||
| 96 | |||
| 97 | rc = inode_change_ok(inode, iattr); | ||
| 98 | if (rc) | ||
| 99 | return rc; | ||
| 100 | |||
| 101 | if (iattr->ia_valid & ATTR_SIZE) | ||
| 102 | dquot_initialize(inode); | ||
| 103 | if ((iattr->ia_valid & ATTR_UID && iattr->ia_uid != inode->i_uid) || | ||
| 104 | (iattr->ia_valid & ATTR_GID && iattr->ia_gid != inode->i_gid)) { | ||
| 105 | rc = dquot_transfer(inode, iattr); | ||
| 106 | if (rc) | ||
| 107 | return rc; | ||
| 108 | } | ||
| 109 | |||
| 110 | rc = inode_setattr(inode, iattr); | ||
| 111 | |||
| 112 | if (!rc && (iattr->ia_valid & ATTR_MODE)) | ||
| 113 | rc = jfs_acl_chmod(inode); | ||
| 114 | |||
| 115 | return rc; | ||
| 116 | } | ||
| 117 | |||
| 91 | const struct inode_operations jfs_file_inode_operations = { | 118 | const struct inode_operations jfs_file_inode_operations = { |
| 92 | .truncate = jfs_truncate, | 119 | .truncate = jfs_truncate, |
| 93 | .setxattr = jfs_setxattr, | 120 | .setxattr = jfs_setxattr, |
| 94 | .getxattr = jfs_getxattr, | 121 | .getxattr = jfs_getxattr, |
| 95 | .listxattr = jfs_listxattr, | 122 | .listxattr = jfs_listxattr, |
| 96 | .removexattr = jfs_removexattr, | 123 | .removexattr = jfs_removexattr, |
| 97 | #ifdef CONFIG_JFS_POSIX_ACL | ||
| 98 | .setattr = jfs_setattr, | 124 | .setattr = jfs_setattr, |
| 125 | #ifdef CONFIG_JFS_POSIX_ACL | ||
| 99 | .check_acl = jfs_check_acl, | 126 | .check_acl = jfs_check_acl, |
| 100 | #endif | 127 | #endif |
| 101 | }; | 128 | }; |
diff --git a/fs/jfs/inode.c b/fs/jfs/inode.c index b2ae190a77ba..9dd126276c9f 100644 --- a/fs/jfs/inode.c +++ b/fs/jfs/inode.c | |||
| @@ -22,6 +22,7 @@ | |||
| 22 | #include <linux/buffer_head.h> | 22 | #include <linux/buffer_head.h> |
| 23 | #include <linux/pagemap.h> | 23 | #include <linux/pagemap.h> |
| 24 | #include <linux/quotaops.h> | 24 | #include <linux/quotaops.h> |
| 25 | #include <linux/writeback.h> | ||
| 25 | #include "jfs_incore.h" | 26 | #include "jfs_incore.h" |
| 26 | #include "jfs_inode.h" | 27 | #include "jfs_inode.h" |
| 27 | #include "jfs_filsys.h" | 28 | #include "jfs_filsys.h" |
| @@ -120,8 +121,10 @@ int jfs_commit_inode(struct inode *inode, int wait) | |||
| 120 | return rc; | 121 | return rc; |
| 121 | } | 122 | } |
| 122 | 123 | ||
| 123 | int jfs_write_inode(struct inode *inode, int wait) | 124 | int jfs_write_inode(struct inode *inode, struct writeback_control *wbc) |
| 124 | { | 125 | { |
| 126 | int wait = wbc->sync_mode == WB_SYNC_ALL; | ||
| 127 | |||
| 125 | if (test_cflag(COMMIT_Nolink, inode)) | 128 | if (test_cflag(COMMIT_Nolink, inode)) |
| 126 | return 0; | 129 | return 0; |
| 127 | /* | 130 | /* |
| @@ -146,6 +149,9 @@ void jfs_delete_inode(struct inode *inode) | |||
| 146 | { | 149 | { |
| 147 | jfs_info("In jfs_delete_inode, inode = 0x%p", inode); | 150 | jfs_info("In jfs_delete_inode, inode = 0x%p", inode); |
| 148 | 151 | ||
| 152 | if (!is_bad_inode(inode)) | ||
| 153 | dquot_initialize(inode); | ||
| 154 | |||
| 149 | if (!is_bad_inode(inode) && | 155 | if (!is_bad_inode(inode) && |
| 150 | (JFS_IP(inode)->fileset == FILESYSTEM_I)) { | 156 | (JFS_IP(inode)->fileset == FILESYSTEM_I)) { |
| 151 | truncate_inode_pages(&inode->i_data, 0); | 157 | truncate_inode_pages(&inode->i_data, 0); |
| @@ -158,9 +164,9 @@ void jfs_delete_inode(struct inode *inode) | |||
| 158 | /* | 164 | /* |
| 159 | * Free the inode from the quota allocation. | 165 | * Free the inode from the quota allocation. |
| 160 | */ | 166 | */ |
| 161 | vfs_dq_init(inode); | 167 | dquot_initialize(inode); |
| 162 | vfs_dq_free_inode(inode); | 168 | dquot_free_inode(inode); |
| 163 | vfs_dq_drop(inode); | 169 | dquot_drop(inode); |
| 164 | } | 170 | } |
| 165 | 171 | ||
| 166 | clear_inode(inode); | 172 | clear_inode(inode); |
diff --git a/fs/jfs/jfs_acl.h b/fs/jfs/jfs_acl.h index b07bd417ef85..54e07559878d 100644 --- a/fs/jfs/jfs_acl.h +++ b/fs/jfs/jfs_acl.h | |||
| @@ -22,7 +22,7 @@ | |||
| 22 | 22 | ||
| 23 | int jfs_check_acl(struct inode *, int); | 23 | int jfs_check_acl(struct inode *, int); |
| 24 | int jfs_init_acl(tid_t, struct inode *, struct inode *); | 24 | int jfs_init_acl(tid_t, struct inode *, struct inode *); |
| 25 | int jfs_setattr(struct dentry *, struct iattr *); | 25 | int jfs_acl_chmod(struct inode *inode); |
| 26 | 26 | ||
| 27 | #else | 27 | #else |
| 28 | 28 | ||
| @@ -32,5 +32,10 @@ static inline int jfs_init_acl(tid_t tid, struct inode *inode, | |||
| 32 | return 0; | 32 | return 0; |
| 33 | } | 33 | } |
| 34 | 34 | ||
| 35 | static inline int jfs_acl_chmod(struct inode *inode) | ||
| 36 | { | ||
| 37 | return 0; | ||
| 38 | } | ||
| 39 | |||
| 35 | #endif | 40 | #endif |
| 36 | #endif /* _H_JFS_ACL */ | 41 | #endif /* _H_JFS_ACL */ |
diff --git a/fs/jfs/jfs_dtree.c b/fs/jfs/jfs_dtree.c index 925871e9887b..0e4623be70ce 100644 --- a/fs/jfs/jfs_dtree.c +++ b/fs/jfs/jfs_dtree.c | |||
| @@ -381,10 +381,10 @@ static u32 add_index(tid_t tid, struct inode *ip, s64 bn, int slot) | |||
| 381 | * It's time to move the inline table to an external | 381 | * It's time to move the inline table to an external |
| 382 | * page and begin to build the xtree | 382 | * page and begin to build the xtree |
| 383 | */ | 383 | */ |
| 384 | if (vfs_dq_alloc_block(ip, sbi->nbperpage)) | 384 | if (dquot_alloc_block(ip, sbi->nbperpage)) |
| 385 | goto clean_up; | 385 | goto clean_up; |
| 386 | if (dbAlloc(ip, 0, sbi->nbperpage, &xaddr)) { | 386 | if (dbAlloc(ip, 0, sbi->nbperpage, &xaddr)) { |
| 387 | vfs_dq_free_block(ip, sbi->nbperpage); | 387 | dquot_free_block(ip, sbi->nbperpage); |
| 388 | goto clean_up; | 388 | goto clean_up; |
| 389 | } | 389 | } |
| 390 | 390 | ||
| @@ -408,7 +408,7 @@ static u32 add_index(tid_t tid, struct inode *ip, s64 bn, int slot) | |||
| 408 | memcpy(&jfs_ip->i_dirtable, temp_table, | 408 | memcpy(&jfs_ip->i_dirtable, temp_table, |
| 409 | sizeof (temp_table)); | 409 | sizeof (temp_table)); |
| 410 | dbFree(ip, xaddr, sbi->nbperpage); | 410 | dbFree(ip, xaddr, sbi->nbperpage); |
| 411 | vfs_dq_free_block(ip, sbi->nbperpage); | 411 | dquot_free_block(ip, sbi->nbperpage); |
| 412 | goto clean_up; | 412 | goto clean_up; |
| 413 | } | 413 | } |
| 414 | ip->i_size = PSIZE; | 414 | ip->i_size = PSIZE; |
| @@ -1027,10 +1027,9 @@ static int dtSplitUp(tid_t tid, | |||
| 1027 | n = xlen; | 1027 | n = xlen; |
| 1028 | 1028 | ||
| 1029 | /* Allocate blocks to quota. */ | 1029 | /* Allocate blocks to quota. */ |
| 1030 | if (vfs_dq_alloc_block(ip, n)) { | 1030 | rc = dquot_alloc_block(ip, n); |
| 1031 | rc = -EDQUOT; | 1031 | if (rc) |
| 1032 | goto extendOut; | 1032 | goto extendOut; |
| 1033 | } | ||
| 1034 | quota_allocation += n; | 1033 | quota_allocation += n; |
| 1035 | 1034 | ||
| 1036 | if ((rc = dbReAlloc(sbi->ipbmap, xaddr, (s64) xlen, | 1035 | if ((rc = dbReAlloc(sbi->ipbmap, xaddr, (s64) xlen, |
| @@ -1308,7 +1307,7 @@ static int dtSplitUp(tid_t tid, | |||
| 1308 | 1307 | ||
| 1309 | /* Rollback quota allocation */ | 1308 | /* Rollback quota allocation */ |
| 1310 | if (rc && quota_allocation) | 1309 | if (rc && quota_allocation) |
| 1311 | vfs_dq_free_block(ip, quota_allocation); | 1310 | dquot_free_block(ip, quota_allocation); |
| 1312 | 1311 | ||
| 1313 | dtSplitUp_Exit: | 1312 | dtSplitUp_Exit: |
| 1314 | 1313 | ||
| @@ -1369,9 +1368,10 @@ static int dtSplitPage(tid_t tid, struct inode *ip, struct dtsplit * split, | |||
| 1369 | return -EIO; | 1368 | return -EIO; |
| 1370 | 1369 | ||
| 1371 | /* Allocate blocks to quota. */ | 1370 | /* Allocate blocks to quota. */ |
| 1372 | if (vfs_dq_alloc_block(ip, lengthPXD(pxd))) { | 1371 | rc = dquot_alloc_block(ip, lengthPXD(pxd)); |
| 1372 | if (rc) { | ||
| 1373 | release_metapage(rmp); | 1373 | release_metapage(rmp); |
| 1374 | return -EDQUOT; | 1374 | return rc; |
| 1375 | } | 1375 | } |
| 1376 | 1376 | ||
| 1377 | jfs_info("dtSplitPage: ip:0x%p smp:0x%p rmp:0x%p", ip, smp, rmp); | 1377 | jfs_info("dtSplitPage: ip:0x%p smp:0x%p rmp:0x%p", ip, smp, rmp); |
| @@ -1892,6 +1892,7 @@ static int dtSplitRoot(tid_t tid, | |||
| 1892 | struct dt_lock *dtlck; | 1892 | struct dt_lock *dtlck; |
| 1893 | struct tlock *tlck; | 1893 | struct tlock *tlck; |
| 1894 | struct lv *lv; | 1894 | struct lv *lv; |
| 1895 | int rc; | ||
| 1895 | 1896 | ||
| 1896 | /* get split root page */ | 1897 | /* get split root page */ |
| 1897 | smp = split->mp; | 1898 | smp = split->mp; |
| @@ -1916,9 +1917,10 @@ static int dtSplitRoot(tid_t tid, | |||
| 1916 | rp = rmp->data; | 1917 | rp = rmp->data; |
| 1917 | 1918 | ||
| 1918 | /* Allocate blocks to quota. */ | 1919 | /* Allocate blocks to quota. */ |
| 1919 | if (vfs_dq_alloc_block(ip, lengthPXD(pxd))) { | 1920 | rc = dquot_alloc_block(ip, lengthPXD(pxd)); |
| 1921 | if (rc) { | ||
| 1920 | release_metapage(rmp); | 1922 | release_metapage(rmp); |
| 1921 | return -EDQUOT; | 1923 | return rc; |
| 1922 | } | 1924 | } |
| 1923 | 1925 | ||
| 1924 | BT_MARK_DIRTY(rmp, ip); | 1926 | BT_MARK_DIRTY(rmp, ip); |
| @@ -2287,7 +2289,7 @@ static int dtDeleteUp(tid_t tid, struct inode *ip, | |||
| 2287 | xlen = lengthPXD(&fp->header.self); | 2289 | xlen = lengthPXD(&fp->header.self); |
| 2288 | 2290 | ||
| 2289 | /* Free quota allocation. */ | 2291 | /* Free quota allocation. */ |
| 2290 | vfs_dq_free_block(ip, xlen); | 2292 | dquot_free_block(ip, xlen); |
| 2291 | 2293 | ||
| 2292 | /* free/invalidate its buffer page */ | 2294 | /* free/invalidate its buffer page */ |
| 2293 | discard_metapage(fmp); | 2295 | discard_metapage(fmp); |
| @@ -2363,7 +2365,7 @@ static int dtDeleteUp(tid_t tid, struct inode *ip, | |||
| 2363 | xlen = lengthPXD(&p->header.self); | 2365 | xlen = lengthPXD(&p->header.self); |
| 2364 | 2366 | ||
| 2365 | /* Free quota allocation */ | 2367 | /* Free quota allocation */ |
| 2366 | vfs_dq_free_block(ip, xlen); | 2368 | dquot_free_block(ip, xlen); |
| 2367 | 2369 | ||
| 2368 | /* free/invalidate its buffer page */ | 2370 | /* free/invalidate its buffer page */ |
| 2369 | discard_metapage(mp); | 2371 | discard_metapage(mp); |
diff --git a/fs/jfs/jfs_extent.c b/fs/jfs/jfs_extent.c index 41d6045dbeb0..5d3bbd10f8db 100644 --- a/fs/jfs/jfs_extent.c +++ b/fs/jfs/jfs_extent.c | |||
| @@ -141,10 +141,11 @@ extAlloc(struct inode *ip, s64 xlen, s64 pno, xad_t * xp, bool abnr) | |||
| 141 | } | 141 | } |
| 142 | 142 | ||
| 143 | /* Allocate blocks to quota. */ | 143 | /* Allocate blocks to quota. */ |
| 144 | if (vfs_dq_alloc_block(ip, nxlen)) { | 144 | rc = dquot_alloc_block(ip, nxlen); |
| 145 | if (rc) { | ||
| 145 | dbFree(ip, nxaddr, (s64) nxlen); | 146 | dbFree(ip, nxaddr, (s64) nxlen); |
| 146 | mutex_unlock(&JFS_IP(ip)->commit_mutex); | 147 | mutex_unlock(&JFS_IP(ip)->commit_mutex); |
| 147 | return -EDQUOT; | 148 | return rc; |
| 148 | } | 149 | } |
| 149 | 150 | ||
| 150 | /* determine the value of the extent flag */ | 151 | /* determine the value of the extent flag */ |
| @@ -164,7 +165,7 @@ extAlloc(struct inode *ip, s64 xlen, s64 pno, xad_t * xp, bool abnr) | |||
| 164 | */ | 165 | */ |
| 165 | if (rc) { | 166 | if (rc) { |
| 166 | dbFree(ip, nxaddr, nxlen); | 167 | dbFree(ip, nxaddr, nxlen); |
| 167 | vfs_dq_free_block(ip, nxlen); | 168 | dquot_free_block(ip, nxlen); |
| 168 | mutex_unlock(&JFS_IP(ip)->commit_mutex); | 169 | mutex_unlock(&JFS_IP(ip)->commit_mutex); |
| 169 | return (rc); | 170 | return (rc); |
| 170 | } | 171 | } |
| @@ -256,10 +257,11 @@ int extRealloc(struct inode *ip, s64 nxlen, xad_t * xp, bool abnr) | |||
| 256 | goto exit; | 257 | goto exit; |
| 257 | 258 | ||
| 258 | /* Allocat blocks to quota. */ | 259 | /* Allocat blocks to quota. */ |
| 259 | if (vfs_dq_alloc_block(ip, nxlen)) { | 260 | rc = dquot_alloc_block(ip, nxlen); |
| 261 | if (rc) { | ||
| 260 | dbFree(ip, nxaddr, (s64) nxlen); | 262 | dbFree(ip, nxaddr, (s64) nxlen); |
| 261 | mutex_unlock(&JFS_IP(ip)->commit_mutex); | 263 | mutex_unlock(&JFS_IP(ip)->commit_mutex); |
| 262 | return -EDQUOT; | 264 | return rc; |
| 263 | } | 265 | } |
| 264 | 266 | ||
| 265 | delta = nxlen - xlen; | 267 | delta = nxlen - xlen; |
| @@ -297,7 +299,7 @@ int extRealloc(struct inode *ip, s64 nxlen, xad_t * xp, bool abnr) | |||
| 297 | /* extend the extent */ | 299 | /* extend the extent */ |
| 298 | if ((rc = xtExtend(0, ip, xoff + xlen, (int) nextend, 0))) { | 300 | if ((rc = xtExtend(0, ip, xoff + xlen, (int) nextend, 0))) { |
| 299 | dbFree(ip, xaddr + xlen, delta); | 301 | dbFree(ip, xaddr + xlen, delta); |
| 300 | vfs_dq_free_block(ip, nxlen); | 302 | dquot_free_block(ip, nxlen); |
| 301 | goto exit; | 303 | goto exit; |
| 302 | } | 304 | } |
| 303 | } else { | 305 | } else { |
| @@ -308,7 +310,7 @@ int extRealloc(struct inode *ip, s64 nxlen, xad_t * xp, bool abnr) | |||
| 308 | */ | 310 | */ |
| 309 | if ((rc = xtTailgate(0, ip, xoff, (int) ntail, nxaddr, 0))) { | 311 | if ((rc = xtTailgate(0, ip, xoff, (int) ntail, nxaddr, 0))) { |
| 310 | dbFree(ip, nxaddr, nxlen); | 312 | dbFree(ip, nxaddr, nxlen); |
| 311 | vfs_dq_free_block(ip, nxlen); | 313 | dquot_free_block(ip, nxlen); |
| 312 | goto exit; | 314 | goto exit; |
| 313 | } | 315 | } |
| 314 | } | 316 | } |
diff --git a/fs/jfs/jfs_inode.c b/fs/jfs/jfs_inode.c index dc0e02159ac9..829921b67765 100644 --- a/fs/jfs/jfs_inode.c +++ b/fs/jfs/jfs_inode.c | |||
| @@ -116,10 +116,10 @@ struct inode *ialloc(struct inode *parent, umode_t mode) | |||
| 116 | /* | 116 | /* |
| 117 | * Allocate inode to quota. | 117 | * Allocate inode to quota. |
| 118 | */ | 118 | */ |
| 119 | if (vfs_dq_alloc_inode(inode)) { | 119 | dquot_initialize(inode); |
| 120 | rc = -EDQUOT; | 120 | rc = dquot_alloc_inode(inode); |
| 121 | if (rc) | ||
| 121 | goto fail_drop; | 122 | goto fail_drop; |
| 122 | } | ||
| 123 | 123 | ||
| 124 | inode->i_mode = mode; | 124 | inode->i_mode = mode; |
| 125 | /* inherit flags from parent */ | 125 | /* inherit flags from parent */ |
| @@ -162,7 +162,7 @@ struct inode *ialloc(struct inode *parent, umode_t mode) | |||
| 162 | return inode; | 162 | return inode; |
| 163 | 163 | ||
| 164 | fail_drop: | 164 | fail_drop: |
| 165 | vfs_dq_drop(inode); | 165 | dquot_drop(inode); |
| 166 | inode->i_flags |= S_NOQUOTA; | 166 | inode->i_flags |= S_NOQUOTA; |
| 167 | fail_unlock: | 167 | fail_unlock: |
| 168 | inode->i_nlink = 0; | 168 | inode->i_nlink = 0; |
diff --git a/fs/jfs/jfs_inode.h b/fs/jfs/jfs_inode.h index 1eff7db34d63..79e2c79661df 100644 --- a/fs/jfs/jfs_inode.h +++ b/fs/jfs/jfs_inode.h | |||
| @@ -26,7 +26,7 @@ extern long jfs_ioctl(struct file *, unsigned int, unsigned long); | |||
| 26 | extern long jfs_compat_ioctl(struct file *, unsigned int, unsigned long); | 26 | extern long jfs_compat_ioctl(struct file *, unsigned int, unsigned long); |
| 27 | extern struct inode *jfs_iget(struct super_block *, unsigned long); | 27 | extern struct inode *jfs_iget(struct super_block *, unsigned long); |
| 28 | extern int jfs_commit_inode(struct inode *, int); | 28 | extern int jfs_commit_inode(struct inode *, int); |
| 29 | extern int jfs_write_inode(struct inode*, int); | 29 | extern int jfs_write_inode(struct inode *, struct writeback_control *); |
| 30 | extern void jfs_delete_inode(struct inode *); | 30 | extern void jfs_delete_inode(struct inode *); |
| 31 | extern void jfs_dirty_inode(struct inode *); | 31 | extern void jfs_dirty_inode(struct inode *); |
| 32 | extern void jfs_truncate(struct inode *); | 32 | extern void jfs_truncate(struct inode *); |
| @@ -40,6 +40,7 @@ extern struct dentry *jfs_fh_to_parent(struct super_block *sb, struct fid *fid, | |||
| 40 | int fh_len, int fh_type); | 40 | int fh_len, int fh_type); |
| 41 | extern void jfs_set_inode_flags(struct inode *); | 41 | extern void jfs_set_inode_flags(struct inode *); |
| 42 | extern int jfs_get_block(struct inode *, sector_t, struct buffer_head *, int); | 42 | extern int jfs_get_block(struct inode *, sector_t, struct buffer_head *, int); |
| 43 | extern int jfs_setattr(struct dentry *, struct iattr *); | ||
| 43 | 44 | ||
| 44 | extern const struct address_space_operations jfs_aops; | 45 | extern const struct address_space_operations jfs_aops; |
| 45 | extern const struct inode_operations jfs_dir_inode_operations; | 46 | extern const struct inode_operations jfs_dir_inode_operations; |
diff --git a/fs/jfs/jfs_xtree.c b/fs/jfs/jfs_xtree.c index d654a6458648..6c50871e6220 100644 --- a/fs/jfs/jfs_xtree.c +++ b/fs/jfs/jfs_xtree.c | |||
| @@ -585,10 +585,10 @@ int xtInsert(tid_t tid, /* transaction id */ | |||
| 585 | hint = addressXAD(xad) + lengthXAD(xad) - 1; | 585 | hint = addressXAD(xad) + lengthXAD(xad) - 1; |
| 586 | } else | 586 | } else |
| 587 | hint = 0; | 587 | hint = 0; |
| 588 | if ((rc = vfs_dq_alloc_block(ip, xlen))) | 588 | if ((rc = dquot_alloc_block(ip, xlen))) |
| 589 | goto out; | 589 | goto out; |
| 590 | if ((rc = dbAlloc(ip, hint, (s64) xlen, &xaddr))) { | 590 | if ((rc = dbAlloc(ip, hint, (s64) xlen, &xaddr))) { |
| 591 | vfs_dq_free_block(ip, xlen); | 591 | dquot_free_block(ip, xlen); |
| 592 | goto out; | 592 | goto out; |
| 593 | } | 593 | } |
| 594 | } | 594 | } |
| @@ -617,7 +617,7 @@ int xtInsert(tid_t tid, /* transaction id */ | |||
| 617 | /* undo data extent allocation */ | 617 | /* undo data extent allocation */ |
| 618 | if (*xaddrp == 0) { | 618 | if (*xaddrp == 0) { |
| 619 | dbFree(ip, xaddr, (s64) xlen); | 619 | dbFree(ip, xaddr, (s64) xlen); |
| 620 | vfs_dq_free_block(ip, xlen); | 620 | dquot_free_block(ip, xlen); |
| 621 | } | 621 | } |
| 622 | return rc; | 622 | return rc; |
| 623 | } | 623 | } |
| @@ -985,10 +985,9 @@ xtSplitPage(tid_t tid, struct inode *ip, | |||
| 985 | rbn = addressPXD(pxd); | 985 | rbn = addressPXD(pxd); |
| 986 | 986 | ||
| 987 | /* Allocate blocks to quota. */ | 987 | /* Allocate blocks to quota. */ |
| 988 | if (vfs_dq_alloc_block(ip, lengthPXD(pxd))) { | 988 | rc = dquot_alloc_block(ip, lengthPXD(pxd)); |
| 989 | rc = -EDQUOT; | 989 | if (rc) |
| 990 | goto clean_up; | 990 | goto clean_up; |
| 991 | } | ||
| 992 | 991 | ||
| 993 | quota_allocation += lengthPXD(pxd); | 992 | quota_allocation += lengthPXD(pxd); |
| 994 | 993 | ||
| @@ -1195,7 +1194,7 @@ xtSplitPage(tid_t tid, struct inode *ip, | |||
| 1195 | 1194 | ||
| 1196 | /* Rollback quota allocation. */ | 1195 | /* Rollback quota allocation. */ |
| 1197 | if (quota_allocation) | 1196 | if (quota_allocation) |
| 1198 | vfs_dq_free_block(ip, quota_allocation); | 1197 | dquot_free_block(ip, quota_allocation); |
| 1199 | 1198 | ||
| 1200 | return (rc); | 1199 | return (rc); |
| 1201 | } | 1200 | } |
| @@ -1235,6 +1234,7 @@ xtSplitRoot(tid_t tid, | |||
| 1235 | struct pxdlist *pxdlist; | 1234 | struct pxdlist *pxdlist; |
| 1236 | struct tlock *tlck; | 1235 | struct tlock *tlck; |
| 1237 | struct xtlock *xtlck; | 1236 | struct xtlock *xtlck; |
| 1237 | int rc; | ||
| 1238 | 1238 | ||
| 1239 | sp = &JFS_IP(ip)->i_xtroot; | 1239 | sp = &JFS_IP(ip)->i_xtroot; |
| 1240 | 1240 | ||
| @@ -1252,9 +1252,10 @@ xtSplitRoot(tid_t tid, | |||
| 1252 | return -EIO; | 1252 | return -EIO; |
| 1253 | 1253 | ||
| 1254 | /* Allocate blocks to quota. */ | 1254 | /* Allocate blocks to quota. */ |
| 1255 | if (vfs_dq_alloc_block(ip, lengthPXD(pxd))) { | 1255 | rc = dquot_alloc_block(ip, lengthPXD(pxd)); |
| 1256 | if (rc) { | ||
| 1256 | release_metapage(rmp); | 1257 | release_metapage(rmp); |
| 1257 | return -EDQUOT; | 1258 | return rc; |
| 1258 | } | 1259 | } |
| 1259 | 1260 | ||
| 1260 | jfs_info("xtSplitRoot: ip:0x%p rmp:0x%p", ip, rmp); | 1261 | jfs_info("xtSplitRoot: ip:0x%p rmp:0x%p", ip, rmp); |
| @@ -3680,7 +3681,7 @@ s64 xtTruncate(tid_t tid, struct inode *ip, s64 newsize, int flag) | |||
| 3680 | ip->i_size = newsize; | 3681 | ip->i_size = newsize; |
| 3681 | 3682 | ||
| 3682 | /* update quota allocation to reflect freed blocks */ | 3683 | /* update quota allocation to reflect freed blocks */ |
| 3683 | vfs_dq_free_block(ip, nfreed); | 3684 | dquot_free_block(ip, nfreed); |
| 3684 | 3685 | ||
| 3685 | /* | 3686 | /* |
| 3686 | * free tlock of invalidated pages | 3687 | * free tlock of invalidated pages |
diff --git a/fs/jfs/namei.c b/fs/jfs/namei.c index c79a4270f083..4a3e9f39c21d 100644 --- a/fs/jfs/namei.c +++ b/fs/jfs/namei.c | |||
| @@ -85,6 +85,8 @@ static int jfs_create(struct inode *dip, struct dentry *dentry, int mode, | |||
| 85 | 85 | ||
| 86 | jfs_info("jfs_create: dip:0x%p name:%s", dip, dentry->d_name.name); | 86 | jfs_info("jfs_create: dip:0x%p name:%s", dip, dentry->d_name.name); |
| 87 | 87 | ||
| 88 | dquot_initialize(dip); | ||
| 89 | |||
| 88 | /* | 90 | /* |
| 89 | * search parent directory for entry/freespace | 91 | * search parent directory for entry/freespace |
| 90 | * (dtSearch() returns parent directory page pinned) | 92 | * (dtSearch() returns parent directory page pinned) |
| @@ -215,6 +217,8 @@ static int jfs_mkdir(struct inode *dip, struct dentry *dentry, int mode) | |||
| 215 | 217 | ||
| 216 | jfs_info("jfs_mkdir: dip:0x%p name:%s", dip, dentry->d_name.name); | 218 | jfs_info("jfs_mkdir: dip:0x%p name:%s", dip, dentry->d_name.name); |
| 217 | 219 | ||
| 220 | dquot_initialize(dip); | ||
| 221 | |||
| 218 | /* link count overflow on parent directory ? */ | 222 | /* link count overflow on parent directory ? */ |
| 219 | if (dip->i_nlink == JFS_LINK_MAX) { | 223 | if (dip->i_nlink == JFS_LINK_MAX) { |
| 220 | rc = -EMLINK; | 224 | rc = -EMLINK; |
| @@ -356,7 +360,8 @@ static int jfs_rmdir(struct inode *dip, struct dentry *dentry) | |||
| 356 | jfs_info("jfs_rmdir: dip:0x%p name:%s", dip, dentry->d_name.name); | 360 | jfs_info("jfs_rmdir: dip:0x%p name:%s", dip, dentry->d_name.name); |
| 357 | 361 | ||
| 358 | /* Init inode for quota operations. */ | 362 | /* Init inode for quota operations. */ |
| 359 | vfs_dq_init(ip); | 363 | dquot_initialize(dip); |
| 364 | dquot_initialize(ip); | ||
| 360 | 365 | ||
| 361 | /* directory must be empty to be removed */ | 366 | /* directory must be empty to be removed */ |
| 362 | if (!dtEmpty(ip)) { | 367 | if (!dtEmpty(ip)) { |
| @@ -483,7 +488,8 @@ static int jfs_unlink(struct inode *dip, struct dentry *dentry) | |||
| 483 | jfs_info("jfs_unlink: dip:0x%p name:%s", dip, dentry->d_name.name); | 488 | jfs_info("jfs_unlink: dip:0x%p name:%s", dip, dentry->d_name.name); |
| 484 | 489 | ||
| 485 | /* Init inode for quota operations. */ | 490 | /* Init inode for quota operations. */ |
| 486 | vfs_dq_init(ip); | 491 | dquot_initialize(dip); |
| 492 | dquot_initialize(ip); | ||
| 487 | 493 | ||
| 488 | if ((rc = get_UCSname(&dname, dentry))) | 494 | if ((rc = get_UCSname(&dname, dentry))) |
| 489 | goto out; | 495 | goto out; |
| @@ -805,6 +811,8 @@ static int jfs_link(struct dentry *old_dentry, | |||
| 805 | if (ip->i_nlink == 0) | 811 | if (ip->i_nlink == 0) |
| 806 | return -ENOENT; | 812 | return -ENOENT; |
| 807 | 813 | ||
| 814 | dquot_initialize(dir); | ||
| 815 | |||
| 808 | tid = txBegin(ip->i_sb, 0); | 816 | tid = txBegin(ip->i_sb, 0); |
| 809 | 817 | ||
| 810 | mutex_lock_nested(&JFS_IP(dir)->commit_mutex, COMMIT_MUTEX_PARENT); | 818 | mutex_lock_nested(&JFS_IP(dir)->commit_mutex, COMMIT_MUTEX_PARENT); |
| @@ -896,6 +904,8 @@ static int jfs_symlink(struct inode *dip, struct dentry *dentry, | |||
| 896 | 904 | ||
| 897 | jfs_info("jfs_symlink: dip:0x%p name:%s", dip, name); | 905 | jfs_info("jfs_symlink: dip:0x%p name:%s", dip, name); |
| 898 | 906 | ||
| 907 | dquot_initialize(dip); | ||
| 908 | |||
| 899 | ssize = strlen(name) + 1; | 909 | ssize = strlen(name) + 1; |
| 900 | 910 | ||
| 901 | /* | 911 | /* |
| @@ -1087,6 +1097,9 @@ static int jfs_rename(struct inode *old_dir, struct dentry *old_dentry, | |||
| 1087 | jfs_info("jfs_rename: %s %s", old_dentry->d_name.name, | 1097 | jfs_info("jfs_rename: %s %s", old_dentry->d_name.name, |
| 1088 | new_dentry->d_name.name); | 1098 | new_dentry->d_name.name); |
| 1089 | 1099 | ||
| 1100 | dquot_initialize(old_dir); | ||
| 1101 | dquot_initialize(new_dir); | ||
| 1102 | |||
| 1090 | old_ip = old_dentry->d_inode; | 1103 | old_ip = old_dentry->d_inode; |
| 1091 | new_ip = new_dentry->d_inode; | 1104 | new_ip = new_dentry->d_inode; |
| 1092 | 1105 | ||
| @@ -1136,7 +1149,7 @@ static int jfs_rename(struct inode *old_dir, struct dentry *old_dentry, | |||
| 1136 | } else if (new_ip) { | 1149 | } else if (new_ip) { |
| 1137 | IWRITE_LOCK(new_ip, RDWRLOCK_NORMAL); | 1150 | IWRITE_LOCK(new_ip, RDWRLOCK_NORMAL); |
| 1138 | /* Init inode for quota operations. */ | 1151 | /* Init inode for quota operations. */ |
| 1139 | vfs_dq_init(new_ip); | 1152 | dquot_initialize(new_ip); |
| 1140 | } | 1153 | } |
| 1141 | 1154 | ||
| 1142 | /* | 1155 | /* |
| @@ -1360,6 +1373,8 @@ static int jfs_mknod(struct inode *dir, struct dentry *dentry, | |||
| 1360 | 1373 | ||
| 1361 | jfs_info("jfs_mknod: %s", dentry->d_name.name); | 1374 | jfs_info("jfs_mknod: %s", dentry->d_name.name); |
| 1362 | 1375 | ||
| 1376 | dquot_initialize(dir); | ||
| 1377 | |||
| 1363 | if ((rc = get_UCSname(&dname, dentry))) | 1378 | if ((rc = get_UCSname(&dname, dentry))) |
| 1364 | goto out; | 1379 | goto out; |
| 1365 | 1380 | ||
| @@ -1541,8 +1556,8 @@ const struct inode_operations jfs_dir_inode_operations = { | |||
| 1541 | .getxattr = jfs_getxattr, | 1556 | .getxattr = jfs_getxattr, |
| 1542 | .listxattr = jfs_listxattr, | 1557 | .listxattr = jfs_listxattr, |
| 1543 | .removexattr = jfs_removexattr, | 1558 | .removexattr = jfs_removexattr, |
| 1544 | #ifdef CONFIG_JFS_POSIX_ACL | ||
| 1545 | .setattr = jfs_setattr, | 1559 | .setattr = jfs_setattr, |
| 1560 | #ifdef CONFIG_JFS_POSIX_ACL | ||
| 1546 | .check_acl = jfs_check_acl, | 1561 | .check_acl = jfs_check_acl, |
| 1547 | #endif | 1562 | #endif |
| 1548 | }; | 1563 | }; |
diff --git a/fs/jfs/super.c b/fs/jfs/super.c index d929a822a74e..266699deb1c6 100644 --- a/fs/jfs/super.c +++ b/fs/jfs/super.c | |||
| @@ -131,6 +131,11 @@ static void jfs_destroy_inode(struct inode *inode) | |||
| 131 | kmem_cache_free(jfs_inode_cachep, ji); | 131 | kmem_cache_free(jfs_inode_cachep, ji); |
| 132 | } | 132 | } |
| 133 | 133 | ||
| 134 | static void jfs_clear_inode(struct inode *inode) | ||
| 135 | { | ||
| 136 | dquot_drop(inode); | ||
| 137 | } | ||
| 138 | |||
| 134 | static int jfs_statfs(struct dentry *dentry, struct kstatfs *buf) | 139 | static int jfs_statfs(struct dentry *dentry, struct kstatfs *buf) |
| 135 | { | 140 | { |
| 136 | struct jfs_sb_info *sbi = JFS_SBI(dentry->d_sb); | 141 | struct jfs_sb_info *sbi = JFS_SBI(dentry->d_sb); |
| @@ -745,6 +750,7 @@ static const struct super_operations jfs_super_operations = { | |||
| 745 | .dirty_inode = jfs_dirty_inode, | 750 | .dirty_inode = jfs_dirty_inode, |
| 746 | .write_inode = jfs_write_inode, | 751 | .write_inode = jfs_write_inode, |
| 747 | .delete_inode = jfs_delete_inode, | 752 | .delete_inode = jfs_delete_inode, |
| 753 | .clear_inode = jfs_clear_inode, | ||
| 748 | .put_super = jfs_put_super, | 754 | .put_super = jfs_put_super, |
| 749 | .sync_fs = jfs_sync_fs, | 755 | .sync_fs = jfs_sync_fs, |
| 750 | .freeze_fs = jfs_freeze, | 756 | .freeze_fs = jfs_freeze, |
diff --git a/fs/jfs/xattr.c b/fs/jfs/xattr.c index fad364548bc9..1f594ab21895 100644 --- a/fs/jfs/xattr.c +++ b/fs/jfs/xattr.c | |||
| @@ -260,14 +260,14 @@ static int ea_write(struct inode *ip, struct jfs_ea_list *ealist, int size, | |||
| 260 | nblocks = (size + (sb->s_blocksize - 1)) >> sb->s_blocksize_bits; | 260 | nblocks = (size + (sb->s_blocksize - 1)) >> sb->s_blocksize_bits; |
| 261 | 261 | ||
| 262 | /* Allocate new blocks to quota. */ | 262 | /* Allocate new blocks to quota. */ |
| 263 | if (vfs_dq_alloc_block(ip, nblocks)) { | 263 | rc = dquot_alloc_block(ip, nblocks); |
| 264 | return -EDQUOT; | 264 | if (rc) |
| 265 | } | 265 | return rc; |
| 266 | 266 | ||
| 267 | rc = dbAlloc(ip, INOHINT(ip), nblocks, &blkno); | 267 | rc = dbAlloc(ip, INOHINT(ip), nblocks, &blkno); |
| 268 | if (rc) { | 268 | if (rc) { |
| 269 | /*Rollback quota allocation. */ | 269 | /*Rollback quota allocation. */ |
| 270 | vfs_dq_free_block(ip, nblocks); | 270 | dquot_free_block(ip, nblocks); |
| 271 | return rc; | 271 | return rc; |
| 272 | } | 272 | } |
| 273 | 273 | ||
| @@ -332,7 +332,7 @@ static int ea_write(struct inode *ip, struct jfs_ea_list *ealist, int size, | |||
| 332 | 332 | ||
| 333 | failed: | 333 | failed: |
| 334 | /* Rollback quota allocation. */ | 334 | /* Rollback quota allocation. */ |
| 335 | vfs_dq_free_block(ip, nblocks); | 335 | dquot_free_block(ip, nblocks); |
| 336 | 336 | ||
| 337 | dbFree(ip, blkno, nblocks); | 337 | dbFree(ip, blkno, nblocks); |
| 338 | return rc; | 338 | return rc; |
| @@ -538,7 +538,8 @@ static int ea_get(struct inode *inode, struct ea_buffer *ea_buf, int min_size) | |||
| 538 | 538 | ||
| 539 | if (blocks_needed > current_blocks) { | 539 | if (blocks_needed > current_blocks) { |
| 540 | /* Allocate new blocks to quota. */ | 540 | /* Allocate new blocks to quota. */ |
| 541 | if (vfs_dq_alloc_block(inode, blocks_needed)) | 541 | rc = dquot_alloc_block(inode, blocks_needed); |
| 542 | if (rc) | ||
| 542 | return -EDQUOT; | 543 | return -EDQUOT; |
| 543 | 544 | ||
| 544 | quota_allocation = blocks_needed; | 545 | quota_allocation = blocks_needed; |
| @@ -602,7 +603,7 @@ static int ea_get(struct inode *inode, struct ea_buffer *ea_buf, int min_size) | |||
| 602 | clean_up: | 603 | clean_up: |
| 603 | /* Rollback quota allocation */ | 604 | /* Rollback quota allocation */ |
| 604 | if (quota_allocation) | 605 | if (quota_allocation) |
| 605 | vfs_dq_free_block(inode, quota_allocation); | 606 | dquot_free_block(inode, quota_allocation); |
| 606 | 607 | ||
| 607 | return (rc); | 608 | return (rc); |
| 608 | } | 609 | } |
| @@ -677,7 +678,7 @@ static int ea_put(tid_t tid, struct inode *inode, struct ea_buffer *ea_buf, | |||
| 677 | 678 | ||
| 678 | /* If old blocks exist, they must be removed from quota allocation. */ | 679 | /* If old blocks exist, they must be removed from quota allocation. */ |
| 679 | if (old_blocks) | 680 | if (old_blocks) |
| 680 | vfs_dq_free_block(inode, old_blocks); | 681 | dquot_free_block(inode, old_blocks); |
| 681 | 682 | ||
| 682 | inode->i_ctime = CURRENT_TIME; | 683 | inode->i_ctime = CURRENT_TIME; |
| 683 | 684 | ||
diff --git a/fs/minix/inode.c b/fs/minix/inode.c index 74ea82d72164..756f8c93780c 100644 --- a/fs/minix/inode.c +++ b/fs/minix/inode.c | |||
| @@ -17,8 +17,10 @@ | |||
| 17 | #include <linux/init.h> | 17 | #include <linux/init.h> |
| 18 | #include <linux/highuid.h> | 18 | #include <linux/highuid.h> |
| 19 | #include <linux/vfs.h> | 19 | #include <linux/vfs.h> |
| 20 | #include <linux/writeback.h> | ||
| 20 | 21 | ||
| 21 | static int minix_write_inode(struct inode * inode, int wait); | 22 | static int minix_write_inode(struct inode *inode, |
| 23 | struct writeback_control *wbc); | ||
| 22 | static int minix_statfs(struct dentry *dentry, struct kstatfs *buf); | 24 | static int minix_statfs(struct dentry *dentry, struct kstatfs *buf); |
| 23 | static int minix_remount (struct super_block * sb, int * flags, char * data); | 25 | static int minix_remount (struct super_block * sb, int * flags, char * data); |
| 24 | 26 | ||
| @@ -552,7 +554,7 @@ static struct buffer_head * V2_minix_update_inode(struct inode * inode) | |||
| 552 | return bh; | 554 | return bh; |
| 553 | } | 555 | } |
| 554 | 556 | ||
| 555 | static int minix_write_inode(struct inode *inode, int wait) | 557 | static int minix_write_inode(struct inode *inode, struct writeback_control *wbc) |
| 556 | { | 558 | { |
| 557 | int err = 0; | 559 | int err = 0; |
| 558 | struct buffer_head *bh; | 560 | struct buffer_head *bh; |
| @@ -563,7 +565,7 @@ static int minix_write_inode(struct inode *inode, int wait) | |||
| 563 | bh = V2_minix_update_inode(inode); | 565 | bh = V2_minix_update_inode(inode); |
| 564 | if (!bh) | 566 | if (!bh) |
| 565 | return -EIO; | 567 | return -EIO; |
| 566 | if (wait && buffer_dirty(bh)) { | 568 | if (wbc->sync_mode == WB_SYNC_ALL && buffer_dirty(bh)) { |
| 567 | sync_dirty_buffer(bh); | 569 | sync_dirty_buffer(bh); |
| 568 | if (buffer_req(bh) && !buffer_uptodate(bh)) { | 570 | if (buffer_req(bh) && !buffer_uptodate(bh)) { |
| 569 | printk("IO error syncing minix inode [%s:%08lx]\n", | 571 | printk("IO error syncing minix inode [%s:%08lx]\n", |
diff --git a/fs/namei.c b/fs/namei.c index 0741c69b3319..3d9d2f965f84 100644 --- a/fs/namei.c +++ b/fs/namei.c | |||
| @@ -19,7 +19,6 @@ | |||
| 19 | #include <linux/slab.h> | 19 | #include <linux/slab.h> |
| 20 | #include <linux/fs.h> | 20 | #include <linux/fs.h> |
| 21 | #include <linux/namei.h> | 21 | #include <linux/namei.h> |
| 22 | #include <linux/quotaops.h> | ||
| 23 | #include <linux/pagemap.h> | 22 | #include <linux/pagemap.h> |
| 24 | #include <linux/fsnotify.h> | 23 | #include <linux/fsnotify.h> |
| 25 | #include <linux/personality.h> | 24 | #include <linux/personality.h> |
| @@ -498,8 +497,6 @@ static int link_path_walk(const char *, struct nameidata *); | |||
| 498 | 497 | ||
| 499 | static __always_inline int __vfs_follow_link(struct nameidata *nd, const char *link) | 498 | static __always_inline int __vfs_follow_link(struct nameidata *nd, const char *link) |
| 500 | { | 499 | { |
| 501 | int res = 0; | ||
| 502 | char *name; | ||
| 503 | if (IS_ERR(link)) | 500 | if (IS_ERR(link)) |
| 504 | goto fail; | 501 | goto fail; |
| 505 | 502 | ||
| @@ -510,22 +507,7 @@ static __always_inline int __vfs_follow_link(struct nameidata *nd, const char *l | |||
| 510 | path_get(&nd->root); | 507 | path_get(&nd->root); |
| 511 | } | 508 | } |
| 512 | 509 | ||
| 513 | res = link_path_walk(link, nd); | 510 | return link_path_walk(link, nd); |
| 514 | if (nd->depth || res || nd->last_type!=LAST_NORM) | ||
| 515 | return res; | ||
| 516 | /* | ||
| 517 | * If it is an iterative symlinks resolution in open_namei() we | ||
| 518 | * have to copy the last component. And all that crap because of | ||
| 519 | * bloody create() on broken symlinks. Furrfu... | ||
| 520 | */ | ||
| 521 | name = __getname(); | ||
| 522 | if (unlikely(!name)) { | ||
| 523 | path_put(&nd->path); | ||
| 524 | return -ENOMEM; | ||
| 525 | } | ||
| 526 | strcpy(name, nd->last.name); | ||
| 527 | nd->last.name = name; | ||
| 528 | return 0; | ||
| 529 | fail: | 511 | fail: |
| 530 | path_put(&nd->path); | 512 | path_put(&nd->path); |
| 531 | return PTR_ERR(link); | 513 | return PTR_ERR(link); |
| @@ -547,10 +529,10 @@ static inline void path_to_nameidata(struct path *path, struct nameidata *nd) | |||
| 547 | nd->path.dentry = path->dentry; | 529 | nd->path.dentry = path->dentry; |
| 548 | } | 530 | } |
| 549 | 531 | ||
| 550 | static __always_inline int __do_follow_link(struct path *path, struct nameidata *nd) | 532 | static __always_inline int |
| 533 | __do_follow_link(struct path *path, struct nameidata *nd, void **p) | ||
| 551 | { | 534 | { |
| 552 | int error; | 535 | int error; |
| 553 | void *cookie; | ||
| 554 | struct dentry *dentry = path->dentry; | 536 | struct dentry *dentry = path->dentry; |
| 555 | 537 | ||
| 556 | touch_atime(path->mnt, dentry); | 538 | touch_atime(path->mnt, dentry); |
| @@ -562,9 +544,9 @@ static __always_inline int __do_follow_link(struct path *path, struct nameidata | |||
| 562 | } | 544 | } |
| 563 | mntget(path->mnt); | 545 | mntget(path->mnt); |
| 564 | nd->last_type = LAST_BIND; | 546 | nd->last_type = LAST_BIND; |
| 565 | cookie = dentry->d_inode->i_op->follow_link(dentry, nd); | 547 | *p = dentry->d_inode->i_op->follow_link(dentry, nd); |
| 566 | error = PTR_ERR(cookie); | 548 | error = PTR_ERR(*p); |
| 567 | if (!IS_ERR(cookie)) { | 549 | if (!IS_ERR(*p)) { |
| 568 | char *s = nd_get_link(nd); | 550 | char *s = nd_get_link(nd); |
| 569 | error = 0; | 551 | error = 0; |
| 570 | if (s) | 552 | if (s) |
| @@ -574,8 +556,6 @@ static __always_inline int __do_follow_link(struct path *path, struct nameidata | |||
| 574 | if (error) | 556 | if (error) |
| 575 | path_put(&nd->path); | 557 | path_put(&nd->path); |
| 576 | } | 558 | } |
| 577 | if (dentry->d_inode->i_op->put_link) | ||
| 578 | dentry->d_inode->i_op->put_link(dentry, nd, cookie); | ||
| 579 | } | 559 | } |
| 580 | return error; | 560 | return error; |
| 581 | } | 561 | } |
| @@ -589,6 +569,7 @@ static __always_inline int __do_follow_link(struct path *path, struct nameidata | |||
| 589 | */ | 569 | */ |
| 590 | static inline int do_follow_link(struct path *path, struct nameidata *nd) | 570 | static inline int do_follow_link(struct path *path, struct nameidata *nd) |
| 591 | { | 571 | { |
| 572 | void *cookie; | ||
| 592 | int err = -ELOOP; | 573 | int err = -ELOOP; |
| 593 | if (current->link_count >= MAX_NESTED_LINKS) | 574 | if (current->link_count >= MAX_NESTED_LINKS) |
| 594 | goto loop; | 575 | goto loop; |
| @@ -602,7 +583,9 @@ static inline int do_follow_link(struct path *path, struct nameidata *nd) | |||
| 602 | current->link_count++; | 583 | current->link_count++; |
| 603 | current->total_link_count++; | 584 | current->total_link_count++; |
| 604 | nd->depth++; | 585 | nd->depth++; |
| 605 | err = __do_follow_link(path, nd); | 586 | err = __do_follow_link(path, nd, &cookie); |
| 587 | if (!IS_ERR(cookie) && path->dentry->d_inode->i_op->put_link) | ||
| 588 | path->dentry->d_inode->i_op->put_link(path->dentry, nd, cookie); | ||
| 606 | path_put(path); | 589 | path_put(path); |
| 607 | current->link_count--; | 590 | current->link_count--; |
| 608 | nd->depth--; | 591 | nd->depth--; |
| @@ -1375,22 +1358,6 @@ static inline int may_create(struct inode *dir, struct dentry *child) | |||
| 1375 | return inode_permission(dir, MAY_WRITE | MAY_EXEC); | 1358 | return inode_permission(dir, MAY_WRITE | MAY_EXEC); |
| 1376 | } | 1359 | } |
| 1377 | 1360 | ||
| 1378 | /* | ||
| 1379 | * O_DIRECTORY translates into forcing a directory lookup. | ||
| 1380 | */ | ||
| 1381 | static inline int lookup_flags(unsigned int f) | ||
| 1382 | { | ||
| 1383 | unsigned long retval = LOOKUP_FOLLOW; | ||
| 1384 | |||
| 1385 | if (f & O_NOFOLLOW) | ||
| 1386 | retval &= ~LOOKUP_FOLLOW; | ||
| 1387 | |||
| 1388 | if (f & O_DIRECTORY) | ||
| 1389 | retval |= LOOKUP_DIRECTORY; | ||
| 1390 | |||
| 1391 | return retval; | ||
| 1392 | } | ||
| 1393 | |||
| 1394 | /* | 1361 | /* |
| 1395 | * p1 and p2 should be directories on the same fs. | 1362 | * p1 and p2 should be directories on the same fs. |
| 1396 | */ | 1363 | */ |
| @@ -1448,7 +1415,6 @@ int vfs_create(struct inode *dir, struct dentry *dentry, int mode, | |||
| 1448 | error = security_inode_create(dir, dentry, mode); | 1415 | error = security_inode_create(dir, dentry, mode); |
| 1449 | if (error) | 1416 | if (error) |
| 1450 | return error; | 1417 | return error; |
| 1451 | vfs_dq_init(dir); | ||
| 1452 | error = dir->i_op->create(dir, dentry, mode, nd); | 1418 | error = dir->i_op->create(dir, dentry, mode, nd); |
| 1453 | if (!error) | 1419 | if (!error) |
| 1454 | fsnotify_create(dir, dentry); | 1420 | fsnotify_create(dir, dentry); |
| @@ -1590,129 +1556,132 @@ static int open_will_truncate(int flag, struct inode *inode) | |||
| 1590 | return (flag & O_TRUNC); | 1556 | return (flag & O_TRUNC); |
| 1591 | } | 1557 | } |
| 1592 | 1558 | ||
| 1593 | /* | 1559 | static struct file *finish_open(struct nameidata *nd, |
| 1594 | * Note that the low bits of the passed in "open_flag" | 1560 | int open_flag, int acc_mode) |
| 1595 | * are not the same as in the local variable "flag". See | ||
| 1596 | * open_to_namei_flags() for more details. | ||
| 1597 | */ | ||
| 1598 | struct file *do_filp_open(int dfd, const char *pathname, | ||
| 1599 | int open_flag, int mode, int acc_mode) | ||
| 1600 | { | 1561 | { |
| 1601 | struct file *filp; | 1562 | struct file *filp; |
| 1602 | struct nameidata nd; | ||
| 1603 | int error; | ||
| 1604 | struct path path; | ||
| 1605 | struct dentry *dir; | ||
| 1606 | int count = 0; | ||
| 1607 | int will_truncate; | 1563 | int will_truncate; |
| 1608 | int flag = open_to_namei_flags(open_flag); | 1564 | int error; |
| 1609 | int force_reval = 0; | ||
| 1610 | 1565 | ||
| 1566 | will_truncate = open_will_truncate(open_flag, nd->path.dentry->d_inode); | ||
| 1567 | if (will_truncate) { | ||
| 1568 | error = mnt_want_write(nd->path.mnt); | ||
| 1569 | if (error) | ||
| 1570 | goto exit; | ||
| 1571 | } | ||
| 1572 | error = may_open(&nd->path, acc_mode, open_flag); | ||
| 1573 | if (error) { | ||
| 1574 | if (will_truncate) | ||
| 1575 | mnt_drop_write(nd->path.mnt); | ||
| 1576 | goto exit; | ||
| 1577 | } | ||
| 1578 | filp = nameidata_to_filp(nd); | ||
| 1579 | if (!IS_ERR(filp)) { | ||
| 1580 | error = ima_file_check(filp, acc_mode); | ||
| 1581 | if (error) { | ||
| 1582 | fput(filp); | ||
| 1583 | filp = ERR_PTR(error); | ||
| 1584 | } | ||
| 1585 | } | ||
| 1586 | if (!IS_ERR(filp)) { | ||
| 1587 | if (will_truncate) { | ||
| 1588 | error = handle_truncate(&nd->path); | ||
| 1589 | if (error) { | ||
| 1590 | fput(filp); | ||
| 1591 | filp = ERR_PTR(error); | ||
| 1592 | } | ||
| 1593 | } | ||
| 1594 | } | ||
| 1611 | /* | 1595 | /* |
| 1612 | * O_SYNC is implemented as __O_SYNC|O_DSYNC. As many places only | 1596 | * It is now safe to drop the mnt write |
| 1613 | * check for O_DSYNC if the need any syncing at all we enforce it's | 1597 | * because the filp has had a write taken |
| 1614 | * always set instead of having to deal with possibly weird behaviour | 1598 | * on its behalf. |
| 1615 | * for malicious applications setting only __O_SYNC. | ||
| 1616 | */ | 1599 | */ |
| 1617 | if (open_flag & __O_SYNC) | 1600 | if (will_truncate) |
| 1618 | open_flag |= O_DSYNC; | 1601 | mnt_drop_write(nd->path.mnt); |
| 1619 | 1602 | return filp; | |
| 1620 | if (!acc_mode) | ||
| 1621 | acc_mode = MAY_OPEN | ACC_MODE(open_flag); | ||
| 1622 | 1603 | ||
| 1623 | /* O_TRUNC implies we need access checks for write permissions */ | 1604 | exit: |
| 1624 | if (flag & O_TRUNC) | 1605 | if (!IS_ERR(nd->intent.open.file)) |
| 1625 | acc_mode |= MAY_WRITE; | 1606 | release_open_intent(nd); |
| 1607 | path_put(&nd->path); | ||
| 1608 | return ERR_PTR(error); | ||
| 1609 | } | ||
| 1626 | 1610 | ||
| 1627 | /* Allow the LSM permission hook to distinguish append | 1611 | static struct file *do_last(struct nameidata *nd, struct path *path, |
| 1628 | access from general write access. */ | 1612 | int open_flag, int acc_mode, |
| 1629 | if (flag & O_APPEND) | 1613 | int mode, const char *pathname, |
| 1630 | acc_mode |= MAY_APPEND; | 1614 | int *want_dir) |
| 1615 | { | ||
| 1616 | struct dentry *dir = nd->path.dentry; | ||
| 1617 | struct file *filp; | ||
| 1618 | int error = -EISDIR; | ||
| 1631 | 1619 | ||
| 1632 | /* | 1620 | switch (nd->last_type) { |
| 1633 | * The simplest case - just a plain lookup. | 1621 | case LAST_DOTDOT: |
| 1634 | */ | 1622 | follow_dotdot(nd); |
| 1635 | if (!(flag & O_CREAT)) { | 1623 | dir = nd->path.dentry; |
| 1636 | filp = get_empty_filp(); | 1624 | if (nd->path.mnt->mnt_sb->s_type->fs_flags & FS_REVAL_DOT) { |
| 1637 | 1625 | if (!dir->d_op->d_revalidate(dir, nd)) { | |
| 1638 | if (filp == NULL) | 1626 | error = -ESTALE; |
| 1639 | return ERR_PTR(-ENFILE); | 1627 | goto exit; |
| 1640 | nd.intent.open.file = filp; | ||
| 1641 | filp->f_flags = open_flag; | ||
| 1642 | nd.intent.open.flags = flag; | ||
| 1643 | nd.intent.open.create_mode = 0; | ||
| 1644 | error = do_path_lookup(dfd, pathname, | ||
| 1645 | lookup_flags(flag)|LOOKUP_OPEN, &nd); | ||
| 1646 | if (IS_ERR(nd.intent.open.file)) { | ||
| 1647 | if (error == 0) { | ||
| 1648 | error = PTR_ERR(nd.intent.open.file); | ||
| 1649 | path_put(&nd.path); | ||
| 1650 | } | 1628 | } |
| 1651 | } else if (error) | 1629 | } |
| 1652 | release_open_intent(&nd); | 1630 | /* fallthrough */ |
| 1653 | if (error) | 1631 | case LAST_DOT: |
| 1654 | return ERR_PTR(error); | 1632 | case LAST_ROOT: |
| 1633 | if (open_flag & O_CREAT) | ||
| 1634 | goto exit; | ||
| 1635 | /* fallthrough */ | ||
| 1636 | case LAST_BIND: | ||
| 1637 | audit_inode(pathname, dir); | ||
| 1655 | goto ok; | 1638 | goto ok; |
| 1656 | } | 1639 | } |
| 1657 | 1640 | ||
| 1658 | /* | 1641 | /* trailing slashes? */ |
| 1659 | * Create - we need to know the parent. | 1642 | if (nd->last.name[nd->last.len]) { |
| 1660 | */ | 1643 | if (open_flag & O_CREAT) |
| 1661 | reval: | 1644 | goto exit; |
| 1662 | error = path_init(dfd, pathname, LOOKUP_PARENT, &nd); | 1645 | *want_dir = 1; |
| 1663 | if (error) | ||
| 1664 | return ERR_PTR(error); | ||
| 1665 | if (force_reval) | ||
| 1666 | nd.flags |= LOOKUP_REVAL; | ||
| 1667 | error = path_walk(pathname, &nd); | ||
| 1668 | if (error) { | ||
| 1669 | if (nd.root.mnt) | ||
| 1670 | path_put(&nd.root); | ||
| 1671 | return ERR_PTR(error); | ||
| 1672 | } | 1646 | } |
| 1673 | if (unlikely(!audit_dummy_context())) | ||
| 1674 | audit_inode(pathname, nd.path.dentry); | ||
| 1675 | 1647 | ||
| 1676 | /* | 1648 | /* just plain open? */ |
| 1677 | * We have the parent and last component. First of all, check | 1649 | if (!(open_flag & O_CREAT)) { |
| 1678 | * that we are not asked to creat(2) an obvious directory - that | 1650 | error = do_lookup(nd, &nd->last, path); |
| 1679 | * will not do. | 1651 | if (error) |
| 1680 | */ | 1652 | goto exit; |
| 1681 | error = -EISDIR; | 1653 | error = -ENOENT; |
| 1682 | if (nd.last_type != LAST_NORM || nd.last.name[nd.last.len]) | 1654 | if (!path->dentry->d_inode) |
| 1683 | goto exit_parent; | 1655 | goto exit_dput; |
| 1656 | if (path->dentry->d_inode->i_op->follow_link) | ||
| 1657 | return NULL; | ||
| 1658 | error = -ENOTDIR; | ||
| 1659 | if (*want_dir & !path->dentry->d_inode->i_op->lookup) | ||
| 1660 | goto exit_dput; | ||
| 1661 | path_to_nameidata(path, nd); | ||
| 1662 | audit_inode(pathname, nd->path.dentry); | ||
| 1663 | goto ok; | ||
| 1664 | } | ||
| 1684 | 1665 | ||
| 1685 | error = -ENFILE; | 1666 | /* OK, it's O_CREAT */ |
| 1686 | filp = get_empty_filp(); | ||
| 1687 | if (filp == NULL) | ||
| 1688 | goto exit_parent; | ||
| 1689 | nd.intent.open.file = filp; | ||
| 1690 | filp->f_flags = open_flag; | ||
| 1691 | nd.intent.open.flags = flag; | ||
| 1692 | nd.intent.open.create_mode = mode; | ||
| 1693 | dir = nd.path.dentry; | ||
| 1694 | nd.flags &= ~LOOKUP_PARENT; | ||
| 1695 | nd.flags |= LOOKUP_CREATE | LOOKUP_OPEN; | ||
| 1696 | if (flag & O_EXCL) | ||
| 1697 | nd.flags |= LOOKUP_EXCL; | ||
| 1698 | mutex_lock(&dir->d_inode->i_mutex); | 1667 | mutex_lock(&dir->d_inode->i_mutex); |
| 1699 | path.dentry = lookup_hash(&nd); | ||
| 1700 | path.mnt = nd.path.mnt; | ||
| 1701 | 1668 | ||
| 1702 | do_last: | 1669 | path->dentry = lookup_hash(nd); |
| 1703 | error = PTR_ERR(path.dentry); | 1670 | path->mnt = nd->path.mnt; |
| 1704 | if (IS_ERR(path.dentry)) { | 1671 | |
| 1672 | error = PTR_ERR(path->dentry); | ||
| 1673 | if (IS_ERR(path->dentry)) { | ||
| 1705 | mutex_unlock(&dir->d_inode->i_mutex); | 1674 | mutex_unlock(&dir->d_inode->i_mutex); |
| 1706 | goto exit; | 1675 | goto exit; |
| 1707 | } | 1676 | } |
| 1708 | 1677 | ||
| 1709 | if (IS_ERR(nd.intent.open.file)) { | 1678 | if (IS_ERR(nd->intent.open.file)) { |
| 1710 | error = PTR_ERR(nd.intent.open.file); | 1679 | error = PTR_ERR(nd->intent.open.file); |
| 1711 | goto exit_mutex_unlock; | 1680 | goto exit_mutex_unlock; |
| 1712 | } | 1681 | } |
| 1713 | 1682 | ||
| 1714 | /* Negative dentry, just create the file */ | 1683 | /* Negative dentry, just create the file */ |
| 1715 | if (!path.dentry->d_inode) { | 1684 | if (!path->dentry->d_inode) { |
| 1716 | /* | 1685 | /* |
| 1717 | * This write is needed to ensure that a | 1686 | * This write is needed to ensure that a |
| 1718 | * ro->rw transition does not occur between | 1687 | * ro->rw transition does not occur between |
| @@ -1720,18 +1689,16 @@ do_last: | |||
| 1720 | * a permanent write count is taken through | 1689 | * a permanent write count is taken through |
| 1721 | * the 'struct file' in nameidata_to_filp(). | 1690 | * the 'struct file' in nameidata_to_filp(). |
| 1722 | */ | 1691 | */ |
| 1723 | error = mnt_want_write(nd.path.mnt); | 1692 | error = mnt_want_write(nd->path.mnt); |
| 1724 | if (error) | 1693 | if (error) |
| 1725 | goto exit_mutex_unlock; | 1694 | goto exit_mutex_unlock; |
| 1726 | error = __open_namei_create(&nd, &path, open_flag, mode); | 1695 | error = __open_namei_create(nd, path, open_flag, mode); |
| 1727 | if (error) { | 1696 | if (error) { |
| 1728 | mnt_drop_write(nd.path.mnt); | 1697 | mnt_drop_write(nd->path.mnt); |
| 1729 | goto exit; | 1698 | goto exit; |
| 1730 | } | 1699 | } |
| 1731 | filp = nameidata_to_filp(&nd); | 1700 | filp = nameidata_to_filp(nd); |
| 1732 | mnt_drop_write(nd.path.mnt); | 1701 | mnt_drop_write(nd->path.mnt); |
| 1733 | if (nd.root.mnt) | ||
| 1734 | path_put(&nd.root); | ||
| 1735 | if (!IS_ERR(filp)) { | 1702 | if (!IS_ERR(filp)) { |
| 1736 | error = ima_file_check(filp, acc_mode); | 1703 | error = ima_file_check(filp, acc_mode); |
| 1737 | if (error) { | 1704 | if (error) { |
| @@ -1746,150 +1713,181 @@ do_last: | |||
| 1746 | * It already exists. | 1713 | * It already exists. |
| 1747 | */ | 1714 | */ |
| 1748 | mutex_unlock(&dir->d_inode->i_mutex); | 1715 | mutex_unlock(&dir->d_inode->i_mutex); |
| 1749 | audit_inode(pathname, path.dentry); | 1716 | audit_inode(pathname, path->dentry); |
| 1750 | 1717 | ||
| 1751 | error = -EEXIST; | 1718 | error = -EEXIST; |
| 1752 | if (flag & O_EXCL) | 1719 | if (open_flag & O_EXCL) |
| 1753 | goto exit_dput; | 1720 | goto exit_dput; |
| 1754 | 1721 | ||
| 1755 | if (__follow_mount(&path)) { | 1722 | if (__follow_mount(path)) { |
| 1756 | error = -ELOOP; | 1723 | error = -ELOOP; |
| 1757 | if (flag & O_NOFOLLOW) | 1724 | if (open_flag & O_NOFOLLOW) |
| 1758 | goto exit_dput; | 1725 | goto exit_dput; |
| 1759 | } | 1726 | } |
| 1760 | 1727 | ||
| 1761 | error = -ENOENT; | 1728 | error = -ENOENT; |
| 1762 | if (!path.dentry->d_inode) | 1729 | if (!path->dentry->d_inode) |
| 1763 | goto exit_dput; | 1730 | goto exit_dput; |
| 1764 | if (path.dentry->d_inode->i_op->follow_link) | ||
| 1765 | goto do_link; | ||
| 1766 | 1731 | ||
| 1767 | path_to_nameidata(&path, &nd); | 1732 | if (path->dentry->d_inode->i_op->follow_link) |
| 1733 | return NULL; | ||
| 1734 | |||
| 1735 | path_to_nameidata(path, nd); | ||
| 1768 | error = -EISDIR; | 1736 | error = -EISDIR; |
| 1769 | if (S_ISDIR(path.dentry->d_inode->i_mode)) | 1737 | if (S_ISDIR(path->dentry->d_inode->i_mode)) |
| 1770 | goto exit; | 1738 | goto exit; |
| 1771 | ok: | 1739 | ok: |
| 1740 | filp = finish_open(nd, open_flag, acc_mode); | ||
| 1741 | return filp; | ||
| 1742 | |||
| 1743 | exit_mutex_unlock: | ||
| 1744 | mutex_unlock(&dir->d_inode->i_mutex); | ||
| 1745 | exit_dput: | ||
| 1746 | path_put_conditional(path, nd); | ||
| 1747 | exit: | ||
| 1748 | if (!IS_ERR(nd->intent.open.file)) | ||
| 1749 | release_open_intent(nd); | ||
| 1750 | path_put(&nd->path); | ||
| 1751 | return ERR_PTR(error); | ||
| 1752 | } | ||
| 1753 | |||
| 1754 | /* | ||
| 1755 | * Note that the low bits of the passed in "open_flag" | ||
| 1756 | * are not the same as in the local variable "flag". See | ||
| 1757 | * open_to_namei_flags() for more details. | ||
| 1758 | */ | ||
| 1759 | struct file *do_filp_open(int dfd, const char *pathname, | ||
| 1760 | int open_flag, int mode, int acc_mode) | ||
| 1761 | { | ||
| 1762 | struct file *filp; | ||
| 1763 | struct nameidata nd; | ||
| 1764 | int error; | ||
| 1765 | struct path path; | ||
| 1766 | int count = 0; | ||
| 1767 | int flag = open_to_namei_flags(open_flag); | ||
| 1768 | int force_reval = 0; | ||
| 1769 | int want_dir = open_flag & O_DIRECTORY; | ||
| 1770 | |||
| 1771 | if (!(open_flag & O_CREAT)) | ||
| 1772 | mode = 0; | ||
| 1773 | |||
| 1772 | /* | 1774 | /* |
| 1773 | * Consider: | 1775 | * O_SYNC is implemented as __O_SYNC|O_DSYNC. As many places only |
| 1774 | * 1. may_open() truncates a file | 1776 | * check for O_DSYNC if the need any syncing at all we enforce it's |
| 1775 | * 2. a rw->ro mount transition occurs | 1777 | * always set instead of having to deal with possibly weird behaviour |
| 1776 | * 3. nameidata_to_filp() fails due to | 1778 | * for malicious applications setting only __O_SYNC. |
| 1777 | * the ro mount. | ||
| 1778 | * That would be inconsistent, and should | ||
| 1779 | * be avoided. Taking this mnt write here | ||
| 1780 | * ensures that (2) can not occur. | ||
| 1781 | */ | 1779 | */ |
| 1782 | will_truncate = open_will_truncate(flag, nd.path.dentry->d_inode); | 1780 | if (open_flag & __O_SYNC) |
| 1783 | if (will_truncate) { | 1781 | open_flag |= O_DSYNC; |
| 1784 | error = mnt_want_write(nd.path.mnt); | 1782 | |
| 1785 | if (error) | 1783 | if (!acc_mode) |
| 1786 | goto exit; | 1784 | acc_mode = MAY_OPEN | ACC_MODE(open_flag); |
| 1787 | } | 1785 | |
| 1788 | error = may_open(&nd.path, acc_mode, open_flag); | 1786 | /* O_TRUNC implies we need access checks for write permissions */ |
| 1787 | if (open_flag & O_TRUNC) | ||
| 1788 | acc_mode |= MAY_WRITE; | ||
| 1789 | |||
| 1790 | /* Allow the LSM permission hook to distinguish append | ||
| 1791 | access from general write access. */ | ||
| 1792 | if (open_flag & O_APPEND) | ||
| 1793 | acc_mode |= MAY_APPEND; | ||
| 1794 | |||
| 1795 | /* find the parent */ | ||
| 1796 | reval: | ||
| 1797 | error = path_init(dfd, pathname, LOOKUP_PARENT, &nd); | ||
| 1798 | if (error) | ||
| 1799 | return ERR_PTR(error); | ||
| 1800 | if (force_reval) | ||
| 1801 | nd.flags |= LOOKUP_REVAL; | ||
| 1802 | |||
| 1803 | current->total_link_count = 0; | ||
| 1804 | error = link_path_walk(pathname, &nd); | ||
| 1789 | if (error) { | 1805 | if (error) { |
| 1790 | if (will_truncate) | 1806 | filp = ERR_PTR(error); |
| 1791 | mnt_drop_write(nd.path.mnt); | 1807 | goto out; |
| 1792 | goto exit; | ||
| 1793 | } | ||
| 1794 | filp = nameidata_to_filp(&nd); | ||
| 1795 | if (!IS_ERR(filp)) { | ||
| 1796 | error = ima_file_check(filp, acc_mode); | ||
| 1797 | if (error) { | ||
| 1798 | fput(filp); | ||
| 1799 | filp = ERR_PTR(error); | ||
| 1800 | } | ||
| 1801 | } | 1808 | } |
| 1802 | if (!IS_ERR(filp)) { | 1809 | if (unlikely(!audit_dummy_context()) && (open_flag & O_CREAT)) |
| 1803 | if (acc_mode & MAY_WRITE) | 1810 | audit_inode(pathname, nd.path.dentry); |
| 1804 | vfs_dq_init(nd.path.dentry->d_inode); | ||
| 1805 | 1811 | ||
| 1806 | if (will_truncate) { | ||
| 1807 | error = handle_truncate(&nd.path); | ||
| 1808 | if (error) { | ||
| 1809 | fput(filp); | ||
| 1810 | filp = ERR_PTR(error); | ||
| 1811 | } | ||
| 1812 | } | ||
| 1813 | } | ||
| 1814 | /* | 1812 | /* |
| 1815 | * It is now safe to drop the mnt write | 1813 | * We have the parent and last component. |
| 1816 | * because the filp has had a write taken | ||
| 1817 | * on its behalf. | ||
| 1818 | */ | 1814 | */ |
| 1819 | if (will_truncate) | 1815 | |
| 1820 | mnt_drop_write(nd.path.mnt); | 1816 | error = -ENFILE; |
| 1817 | filp = get_empty_filp(); | ||
| 1818 | if (filp == NULL) | ||
| 1819 | goto exit_parent; | ||
| 1820 | nd.intent.open.file = filp; | ||
| 1821 | filp->f_flags = open_flag; | ||
| 1822 | nd.intent.open.flags = flag; | ||
| 1823 | nd.intent.open.create_mode = mode; | ||
| 1824 | nd.flags &= ~LOOKUP_PARENT; | ||
| 1825 | nd.flags |= LOOKUP_OPEN; | ||
| 1826 | if (open_flag & O_CREAT) { | ||
| 1827 | nd.flags |= LOOKUP_CREATE; | ||
| 1828 | if (open_flag & O_EXCL) | ||
| 1829 | nd.flags |= LOOKUP_EXCL; | ||
| 1830 | } | ||
| 1831 | filp = do_last(&nd, &path, open_flag, acc_mode, mode, pathname, &want_dir); | ||
| 1832 | while (unlikely(!filp)) { /* trailing symlink */ | ||
| 1833 | struct path holder; | ||
| 1834 | struct inode *inode = path.dentry->d_inode; | ||
| 1835 | void *cookie; | ||
| 1836 | error = -ELOOP; | ||
| 1837 | /* S_ISDIR part is a temporary automount kludge */ | ||
| 1838 | if ((open_flag & O_NOFOLLOW) && !S_ISDIR(inode->i_mode)) | ||
| 1839 | goto exit_dput; | ||
| 1840 | if (count++ == 32) | ||
| 1841 | goto exit_dput; | ||
| 1842 | /* | ||
| 1843 | * This is subtle. Instead of calling do_follow_link() we do | ||
| 1844 | * the thing by hands. The reason is that this way we have zero | ||
| 1845 | * link_count and path_walk() (called from ->follow_link) | ||
| 1846 | * honoring LOOKUP_PARENT. After that we have the parent and | ||
| 1847 | * last component, i.e. we are in the same situation as after | ||
| 1848 | * the first path_walk(). Well, almost - if the last component | ||
| 1849 | * is normal we get its copy stored in nd->last.name and we will | ||
| 1850 | * have to putname() it when we are done. Procfs-like symlinks | ||
| 1851 | * just set LAST_BIND. | ||
| 1852 | */ | ||
| 1853 | nd.flags |= LOOKUP_PARENT; | ||
| 1854 | error = security_inode_follow_link(path.dentry, &nd); | ||
| 1855 | if (error) | ||
| 1856 | goto exit_dput; | ||
| 1857 | error = __do_follow_link(&path, &nd, &cookie); | ||
| 1858 | if (unlikely(error)) { | ||
| 1859 | /* nd.path had been dropped */ | ||
| 1860 | if (!IS_ERR(cookie) && inode->i_op->put_link) | ||
| 1861 | inode->i_op->put_link(path.dentry, &nd, cookie); | ||
| 1862 | path_put(&path); | ||
| 1863 | release_open_intent(&nd); | ||
| 1864 | filp = ERR_PTR(error); | ||
| 1865 | goto out; | ||
| 1866 | } | ||
| 1867 | holder = path; | ||
| 1868 | nd.flags &= ~LOOKUP_PARENT; | ||
| 1869 | filp = do_last(&nd, &path, open_flag, acc_mode, mode, pathname, &want_dir); | ||
| 1870 | if (inode->i_op->put_link) | ||
| 1871 | inode->i_op->put_link(holder.dentry, &nd, cookie); | ||
| 1872 | path_put(&holder); | ||
| 1873 | } | ||
| 1874 | out: | ||
| 1821 | if (nd.root.mnt) | 1875 | if (nd.root.mnt) |
| 1822 | path_put(&nd.root); | 1876 | path_put(&nd.root); |
| 1877 | if (filp == ERR_PTR(-ESTALE) && !force_reval) { | ||
| 1878 | force_reval = 1; | ||
| 1879 | goto reval; | ||
| 1880 | } | ||
| 1823 | return filp; | 1881 | return filp; |
| 1824 | 1882 | ||
| 1825 | exit_mutex_unlock: | ||
| 1826 | mutex_unlock(&dir->d_inode->i_mutex); | ||
| 1827 | exit_dput: | 1883 | exit_dput: |
| 1828 | path_put_conditional(&path, &nd); | 1884 | path_put_conditional(&path, &nd); |
| 1829 | exit: | ||
| 1830 | if (!IS_ERR(nd.intent.open.file)) | 1885 | if (!IS_ERR(nd.intent.open.file)) |
| 1831 | release_open_intent(&nd); | 1886 | release_open_intent(&nd); |
| 1832 | exit_parent: | 1887 | exit_parent: |
| 1833 | if (nd.root.mnt) | ||
| 1834 | path_put(&nd.root); | ||
| 1835 | path_put(&nd.path); | 1888 | path_put(&nd.path); |
| 1836 | return ERR_PTR(error); | 1889 | filp = ERR_PTR(error); |
| 1837 | 1890 | goto out; | |
| 1838 | do_link: | ||
| 1839 | error = -ELOOP; | ||
| 1840 | if (flag & O_NOFOLLOW) | ||
| 1841 | goto exit_dput; | ||
| 1842 | /* | ||
| 1843 | * This is subtle. Instead of calling do_follow_link() we do the | ||
| 1844 | * thing by hands. The reason is that this way we have zero link_count | ||
| 1845 | * and path_walk() (called from ->follow_link) honoring LOOKUP_PARENT. | ||
| 1846 | * After that we have the parent and last component, i.e. | ||
| 1847 | * we are in the same situation as after the first path_walk(). | ||
| 1848 | * Well, almost - if the last component is normal we get its copy | ||
| 1849 | * stored in nd->last.name and we will have to putname() it when we | ||
| 1850 | * are done. Procfs-like symlinks just set LAST_BIND. | ||
| 1851 | */ | ||
| 1852 | nd.flags |= LOOKUP_PARENT; | ||
| 1853 | error = security_inode_follow_link(path.dentry, &nd); | ||
| 1854 | if (error) | ||
| 1855 | goto exit_dput; | ||
| 1856 | error = __do_follow_link(&path, &nd); | ||
| 1857 | path_put(&path); | ||
| 1858 | if (error) { | ||
| 1859 | /* Does someone understand code flow here? Or it is only | ||
| 1860 | * me so stupid? Anathema to whoever designed this non-sense | ||
| 1861 | * with "intent.open". | ||
| 1862 | */ | ||
| 1863 | release_open_intent(&nd); | ||
| 1864 | if (nd.root.mnt) | ||
| 1865 | path_put(&nd.root); | ||
| 1866 | if (error == -ESTALE && !force_reval) { | ||
| 1867 | force_reval = 1; | ||
| 1868 | goto reval; | ||
| 1869 | } | ||
| 1870 | return ERR_PTR(error); | ||
| 1871 | } | ||
| 1872 | nd.flags &= ~LOOKUP_PARENT; | ||
| 1873 | if (nd.last_type == LAST_BIND) | ||
| 1874 | goto ok; | ||
| 1875 | error = -EISDIR; | ||
| 1876 | if (nd.last_type != LAST_NORM) | ||
| 1877 | goto exit; | ||
| 1878 | if (nd.last.name[nd.last.len]) { | ||
| 1879 | __putname(nd.last.name); | ||
| 1880 | goto exit; | ||
| 1881 | } | ||
| 1882 | error = -ELOOP; | ||
| 1883 | if (count++==32) { | ||
| 1884 | __putname(nd.last.name); | ||
| 1885 | goto exit; | ||
| 1886 | } | ||
| 1887 | dir = nd.path.dentry; | ||
| 1888 | mutex_lock(&dir->d_inode->i_mutex); | ||
| 1889 | path.dentry = lookup_hash(&nd); | ||
| 1890 | path.mnt = nd.path.mnt; | ||
| 1891 | __putname(nd.last.name); | ||
| 1892 | goto do_last; | ||
| 1893 | } | 1891 | } |
| 1894 | 1892 | ||
| 1895 | /** | 1893 | /** |
| @@ -1983,7 +1981,6 @@ int vfs_mknod(struct inode *dir, struct dentry *dentry, int mode, dev_t dev) | |||
| 1983 | if (error) | 1981 | if (error) |
| 1984 | return error; | 1982 | return error; |
| 1985 | 1983 | ||
| 1986 | vfs_dq_init(dir); | ||
| 1987 | error = dir->i_op->mknod(dir, dentry, mode, dev); | 1984 | error = dir->i_op->mknod(dir, dentry, mode, dev); |
| 1988 | if (!error) | 1985 | if (!error) |
| 1989 | fsnotify_create(dir, dentry); | 1986 | fsnotify_create(dir, dentry); |
| @@ -2082,7 +2079,6 @@ int vfs_mkdir(struct inode *dir, struct dentry *dentry, int mode) | |||
| 2082 | if (error) | 2079 | if (error) |
| 2083 | return error; | 2080 | return error; |
| 2084 | 2081 | ||
| 2085 | vfs_dq_init(dir); | ||
| 2086 | error = dir->i_op->mkdir(dir, dentry, mode); | 2082 | error = dir->i_op->mkdir(dir, dentry, mode); |
| 2087 | if (!error) | 2083 | if (!error) |
| 2088 | fsnotify_mkdir(dir, dentry); | 2084 | fsnotify_mkdir(dir, dentry); |
| @@ -2168,8 +2164,6 @@ int vfs_rmdir(struct inode *dir, struct dentry *dentry) | |||
| 2168 | if (!dir->i_op->rmdir) | 2164 | if (!dir->i_op->rmdir) |
| 2169 | return -EPERM; | 2165 | return -EPERM; |
| 2170 | 2166 | ||
| 2171 | vfs_dq_init(dir); | ||
| 2172 | |||
| 2173 | mutex_lock(&dentry->d_inode->i_mutex); | 2167 | mutex_lock(&dentry->d_inode->i_mutex); |
| 2174 | dentry_unhash(dentry); | 2168 | dentry_unhash(dentry); |
| 2175 | if (d_mountpoint(dentry)) | 2169 | if (d_mountpoint(dentry)) |
| @@ -2255,8 +2249,6 @@ int vfs_unlink(struct inode *dir, struct dentry *dentry) | |||
| 2255 | if (!dir->i_op->unlink) | 2249 | if (!dir->i_op->unlink) |
| 2256 | return -EPERM; | 2250 | return -EPERM; |
| 2257 | 2251 | ||
| 2258 | vfs_dq_init(dir); | ||
| 2259 | |||
| 2260 | mutex_lock(&dentry->d_inode->i_mutex); | 2252 | mutex_lock(&dentry->d_inode->i_mutex); |
| 2261 | if (d_mountpoint(dentry)) | 2253 | if (d_mountpoint(dentry)) |
| 2262 | error = -EBUSY; | 2254 | error = -EBUSY; |
| @@ -2369,7 +2361,6 @@ int vfs_symlink(struct inode *dir, struct dentry *dentry, const char *oldname) | |||
| 2369 | if (error) | 2361 | if (error) |
| 2370 | return error; | 2362 | return error; |
| 2371 | 2363 | ||
| 2372 | vfs_dq_init(dir); | ||
| 2373 | error = dir->i_op->symlink(dir, dentry, oldname); | 2364 | error = dir->i_op->symlink(dir, dentry, oldname); |
| 2374 | if (!error) | 2365 | if (!error) |
| 2375 | fsnotify_create(dir, dentry); | 2366 | fsnotify_create(dir, dentry); |
| @@ -2453,7 +2444,6 @@ int vfs_link(struct dentry *old_dentry, struct inode *dir, struct dentry *new_de | |||
| 2453 | return error; | 2444 | return error; |
| 2454 | 2445 | ||
| 2455 | mutex_lock(&inode->i_mutex); | 2446 | mutex_lock(&inode->i_mutex); |
| 2456 | vfs_dq_init(dir); | ||
| 2457 | error = dir->i_op->link(old_dentry, dir, new_dentry); | 2447 | error = dir->i_op->link(old_dentry, dir, new_dentry); |
| 2458 | mutex_unlock(&inode->i_mutex); | 2448 | mutex_unlock(&inode->i_mutex); |
| 2459 | if (!error) | 2449 | if (!error) |
| @@ -2654,9 +2644,6 @@ int vfs_rename(struct inode *old_dir, struct dentry *old_dentry, | |||
| 2654 | if (!old_dir->i_op->rename) | 2644 | if (!old_dir->i_op->rename) |
| 2655 | return -EPERM; | 2645 | return -EPERM; |
| 2656 | 2646 | ||
| 2657 | vfs_dq_init(old_dir); | ||
| 2658 | vfs_dq_init(new_dir); | ||
| 2659 | |||
| 2660 | old_name = fsnotify_oldname_init(old_dentry->d_name.name); | 2647 | old_name = fsnotify_oldname_init(old_dentry->d_name.name); |
| 2661 | 2648 | ||
| 2662 | if (is_dir) | 2649 | if (is_dir) |
diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c index 7570573bdb30..7f9ecc46f3fb 100644 --- a/fs/nfs/inode.c +++ b/fs/nfs/inode.c | |||
| @@ -97,16 +97,12 @@ u64 nfs_compat_user_ino64(u64 fileid) | |||
| 97 | return ino; | 97 | return ino; |
| 98 | } | 98 | } |
| 99 | 99 | ||
| 100 | int nfs_write_inode(struct inode *inode, int sync) | 100 | int nfs_write_inode(struct inode *inode, struct writeback_control *wbc) |
| 101 | { | 101 | { |
| 102 | int ret; | 102 | int ret; |
| 103 | 103 | ||
| 104 | if (sync) { | 104 | ret = nfs_commit_inode(inode, |
| 105 | ret = filemap_fdatawait(inode->i_mapping); | 105 | wbc->sync_mode == WB_SYNC_ALL ? FLUSH_SYNC : 0); |
| 106 | if (ret == 0) | ||
| 107 | ret = nfs_commit_inode(inode, FLUSH_SYNC); | ||
| 108 | } else | ||
| 109 | ret = nfs_commit_inode(inode, 0); | ||
| 110 | if (ret >= 0) | 106 | if (ret >= 0) |
| 111 | return 0; | 107 | return 0; |
| 112 | __mark_inode_dirty(inode, I_DIRTY_DATASYNC); | 108 | __mark_inode_dirty(inode, I_DIRTY_DATASYNC); |
diff --git a/fs/nfs/internal.h b/fs/nfs/internal.h index 29e464d23b32..11f82f03c5de 100644 --- a/fs/nfs/internal.h +++ b/fs/nfs/internal.h | |||
| @@ -211,7 +211,7 @@ extern int nfs_access_cache_shrinker(int nr_to_scan, gfp_t gfp_mask); | |||
| 211 | extern struct workqueue_struct *nfsiod_workqueue; | 211 | extern struct workqueue_struct *nfsiod_workqueue; |
| 212 | extern struct inode *nfs_alloc_inode(struct super_block *sb); | 212 | extern struct inode *nfs_alloc_inode(struct super_block *sb); |
| 213 | extern void nfs_destroy_inode(struct inode *); | 213 | extern void nfs_destroy_inode(struct inode *); |
| 214 | extern int nfs_write_inode(struct inode *,int); | 214 | extern int nfs_write_inode(struct inode *, struct writeback_control *); |
| 215 | extern void nfs_clear_inode(struct inode *); | 215 | extern void nfs_clear_inode(struct inode *); |
| 216 | #ifdef CONFIG_NFS_V4 | 216 | #ifdef CONFIG_NFS_V4 |
| 217 | extern void nfs4_clear_inode(struct inode *); | 217 | extern void nfs4_clear_inode(struct inode *); |
diff --git a/fs/nfsd/vfs.c b/fs/nfsd/vfs.c index 15dc2deaac5f..8eca17df4f63 100644 --- a/fs/nfsd/vfs.c +++ b/fs/nfsd/vfs.c | |||
| @@ -20,7 +20,6 @@ | |||
| 20 | #include <linux/fcntl.h> | 20 | #include <linux/fcntl.h> |
| 21 | #include <linux/namei.h> | 21 | #include <linux/namei.h> |
| 22 | #include <linux/delay.h> | 22 | #include <linux/delay.h> |
| 23 | #include <linux/quotaops.h> | ||
| 24 | #include <linux/fsnotify.h> | 23 | #include <linux/fsnotify.h> |
| 25 | #include <linux/posix_acl_xattr.h> | 24 | #include <linux/posix_acl_xattr.h> |
| 26 | #include <linux/xattr.h> | 25 | #include <linux/xattr.h> |
| @@ -377,7 +376,6 @@ nfsd_setattr(struct svc_rqst *rqstp, struct svc_fh *fhp, struct iattr *iap, | |||
| 377 | put_write_access(inode); | 376 | put_write_access(inode); |
| 378 | goto out_nfserr; | 377 | goto out_nfserr; |
| 379 | } | 378 | } |
| 380 | vfs_dq_init(inode); | ||
| 381 | } | 379 | } |
| 382 | 380 | ||
| 383 | /* sanitize the mode change */ | 381 | /* sanitize the mode change */ |
| @@ -745,8 +743,6 @@ nfsd_open(struct svc_rqst *rqstp, struct svc_fh *fhp, int type, | |||
| 745 | flags = O_RDWR|O_LARGEFILE; | 743 | flags = O_RDWR|O_LARGEFILE; |
| 746 | else | 744 | else |
| 747 | flags = O_WRONLY|O_LARGEFILE; | 745 | flags = O_WRONLY|O_LARGEFILE; |
| 748 | |||
| 749 | vfs_dq_init(inode); | ||
| 750 | } | 746 | } |
| 751 | *filp = dentry_open(dget(dentry), mntget(fhp->fh_export->ex_path.mnt), | 747 | *filp = dentry_open(dget(dentry), mntget(fhp->fh_export->ex_path.mnt), |
| 752 | flags, current_cred()); | 748 | flags, current_cred()); |
diff --git a/fs/ntfs/dir.c b/fs/ntfs/dir.c index 5a9e34475e37..9173e82a45d1 100644 --- a/fs/ntfs/dir.c +++ b/fs/ntfs/dir.c | |||
| @@ -1545,7 +1545,7 @@ static int ntfs_dir_fsync(struct file *filp, struct dentry *dentry, | |||
| 1545 | write_inode_now(bmp_vi, !datasync); | 1545 | write_inode_now(bmp_vi, !datasync); |
| 1546 | iput(bmp_vi); | 1546 | iput(bmp_vi); |
| 1547 | } | 1547 | } |
| 1548 | ret = ntfs_write_inode(vi, 1); | 1548 | ret = __ntfs_write_inode(vi, 1); |
| 1549 | write_inode_now(vi, !datasync); | 1549 | write_inode_now(vi, !datasync); |
| 1550 | err = sync_blockdev(vi->i_sb->s_bdev); | 1550 | err = sync_blockdev(vi->i_sb->s_bdev); |
| 1551 | if (unlikely(err && !ret)) | 1551 | if (unlikely(err && !ret)) |
diff --git a/fs/ntfs/file.c b/fs/ntfs/file.c index 43179ddd336f..b681c71d7069 100644 --- a/fs/ntfs/file.c +++ b/fs/ntfs/file.c | |||
| @@ -2182,7 +2182,7 @@ static int ntfs_file_fsync(struct file *filp, struct dentry *dentry, | |||
| 2182 | ntfs_debug("Entering for inode 0x%lx.", vi->i_ino); | 2182 | ntfs_debug("Entering for inode 0x%lx.", vi->i_ino); |
| 2183 | BUG_ON(S_ISDIR(vi->i_mode)); | 2183 | BUG_ON(S_ISDIR(vi->i_mode)); |
| 2184 | if (!datasync || !NInoNonResident(NTFS_I(vi))) | 2184 | if (!datasync || !NInoNonResident(NTFS_I(vi))) |
| 2185 | ret = ntfs_write_inode(vi, 1); | 2185 | ret = __ntfs_write_inode(vi, 1); |
| 2186 | write_inode_now(vi, !datasync); | 2186 | write_inode_now(vi, !datasync); |
| 2187 | /* | 2187 | /* |
| 2188 | * NOTE: If we were to use mapping->private_list (see ext2 and | 2188 | * NOTE: If we were to use mapping->private_list (see ext2 and |
diff --git a/fs/ntfs/inode.c b/fs/ntfs/inode.c index dc2505abb6d7..4b57fb1eac2a 100644 --- a/fs/ntfs/inode.c +++ b/fs/ntfs/inode.c | |||
| @@ -2957,7 +2957,7 @@ out: | |||
| 2957 | * | 2957 | * |
| 2958 | * Return 0 on success and -errno on error. | 2958 | * Return 0 on success and -errno on error. |
| 2959 | */ | 2959 | */ |
| 2960 | int ntfs_write_inode(struct inode *vi, int sync) | 2960 | int __ntfs_write_inode(struct inode *vi, int sync) |
| 2961 | { | 2961 | { |
| 2962 | sle64 nt; | 2962 | sle64 nt; |
| 2963 | ntfs_inode *ni = NTFS_I(vi); | 2963 | ntfs_inode *ni = NTFS_I(vi); |
diff --git a/fs/ntfs/inode.h b/fs/ntfs/inode.h index 117eaf8032a3..9a113544605d 100644 --- a/fs/ntfs/inode.h +++ b/fs/ntfs/inode.h | |||
| @@ -307,12 +307,12 @@ extern void ntfs_truncate_vfs(struct inode *vi); | |||
| 307 | 307 | ||
| 308 | extern int ntfs_setattr(struct dentry *dentry, struct iattr *attr); | 308 | extern int ntfs_setattr(struct dentry *dentry, struct iattr *attr); |
| 309 | 309 | ||
| 310 | extern int ntfs_write_inode(struct inode *vi, int sync); | 310 | extern int __ntfs_write_inode(struct inode *vi, int sync); |
| 311 | 311 | ||
| 312 | static inline void ntfs_commit_inode(struct inode *vi) | 312 | static inline void ntfs_commit_inode(struct inode *vi) |
| 313 | { | 313 | { |
| 314 | if (!is_bad_inode(vi)) | 314 | if (!is_bad_inode(vi)) |
| 315 | ntfs_write_inode(vi, 1); | 315 | __ntfs_write_inode(vi, 1); |
| 316 | return; | 316 | return; |
| 317 | } | 317 | } |
| 318 | 318 | ||
diff --git a/fs/ntfs/super.c b/fs/ntfs/super.c index 80b04770e8e9..1cf39dfaee7a 100644 --- a/fs/ntfs/super.c +++ b/fs/ntfs/super.c | |||
| @@ -39,6 +39,7 @@ | |||
| 39 | #include "dir.h" | 39 | #include "dir.h" |
| 40 | #include "debug.h" | 40 | #include "debug.h" |
| 41 | #include "index.h" | 41 | #include "index.h" |
| 42 | #include "inode.h" | ||
| 42 | #include "aops.h" | 43 | #include "aops.h" |
| 43 | #include "layout.h" | 44 | #include "layout.h" |
| 44 | #include "malloc.h" | 45 | #include "malloc.h" |
| @@ -2662,6 +2663,13 @@ static int ntfs_statfs(struct dentry *dentry, struct kstatfs *sfs) | |||
| 2662 | return 0; | 2663 | return 0; |
| 2663 | } | 2664 | } |
| 2664 | 2665 | ||
| 2666 | #ifdef NTFS_RW | ||
| 2667 | static int ntfs_write_inode(struct inode *vi, struct writeback_control *wbc) | ||
| 2668 | { | ||
| 2669 | return __ntfs_write_inode(vi, wbc->sync_mode == WB_SYNC_ALL); | ||
| 2670 | } | ||
| 2671 | #endif | ||
| 2672 | |||
| 2665 | /** | 2673 | /** |
| 2666 | * The complete super operations. | 2674 | * The complete super operations. |
| 2667 | */ | 2675 | */ |
diff --git a/fs/ocfs2/alloc.c b/fs/ocfs2/alloc.c index 2bbe1ecc08c0..9f8bd913c51e 100644 --- a/fs/ocfs2/alloc.c +++ b/fs/ocfs2/alloc.c | |||
| @@ -5713,7 +5713,7 @@ int ocfs2_remove_btree_range(struct inode *inode, | |||
| 5713 | goto out; | 5713 | goto out; |
| 5714 | } | 5714 | } |
| 5715 | 5715 | ||
| 5716 | vfs_dq_free_space_nodirty(inode, | 5716 | dquot_free_space_nodirty(inode, |
| 5717 | ocfs2_clusters_to_bytes(inode->i_sb, len)); | 5717 | ocfs2_clusters_to_bytes(inode->i_sb, len)); |
| 5718 | 5718 | ||
| 5719 | ret = ocfs2_remove_extent(handle, et, cpos, len, meta_ac, dealloc); | 5719 | ret = ocfs2_remove_extent(handle, et, cpos, len, meta_ac, dealloc); |
| @@ -6936,7 +6936,7 @@ static int ocfs2_do_truncate(struct ocfs2_super *osb, | |||
| 6936 | goto bail; | 6936 | goto bail; |
| 6937 | } | 6937 | } |
| 6938 | 6938 | ||
| 6939 | vfs_dq_free_space_nodirty(inode, | 6939 | dquot_free_space_nodirty(inode, |
| 6940 | ocfs2_clusters_to_bytes(osb->sb, clusters_to_del)); | 6940 | ocfs2_clusters_to_bytes(osb->sb, clusters_to_del)); |
| 6941 | spin_lock(&OCFS2_I(inode)->ip_lock); | 6941 | spin_lock(&OCFS2_I(inode)->ip_lock); |
| 6942 | OCFS2_I(inode)->ip_clusters = le32_to_cpu(fe->i_clusters) - | 6942 | OCFS2_I(inode)->ip_clusters = le32_to_cpu(fe->i_clusters) - |
| @@ -7301,11 +7301,10 @@ int ocfs2_convert_inline_data_to_extents(struct inode *inode, | |||
| 7301 | unsigned int page_end; | 7301 | unsigned int page_end; |
| 7302 | u64 phys; | 7302 | u64 phys; |
| 7303 | 7303 | ||
| 7304 | if (vfs_dq_alloc_space_nodirty(inode, | 7304 | ret = dquot_alloc_space_nodirty(inode, |
| 7305 | ocfs2_clusters_to_bytes(osb->sb, 1))) { | 7305 | ocfs2_clusters_to_bytes(osb->sb, 1)); |
| 7306 | ret = -EDQUOT; | 7306 | if (ret) |
| 7307 | goto out_commit; | 7307 | goto out_commit; |
| 7308 | } | ||
| 7309 | did_quota = 1; | 7308 | did_quota = 1; |
| 7310 | 7309 | ||
| 7311 | ret = ocfs2_claim_clusters(osb, handle, data_ac, 1, &bit_off, | 7310 | ret = ocfs2_claim_clusters(osb, handle, data_ac, 1, &bit_off, |
| @@ -7381,7 +7380,7 @@ int ocfs2_convert_inline_data_to_extents(struct inode *inode, | |||
| 7381 | 7380 | ||
| 7382 | out_commit: | 7381 | out_commit: |
| 7383 | if (ret < 0 && did_quota) | 7382 | if (ret < 0 && did_quota) |
| 7384 | vfs_dq_free_space_nodirty(inode, | 7383 | dquot_free_space_nodirty(inode, |
| 7385 | ocfs2_clusters_to_bytes(osb->sb, 1)); | 7384 | ocfs2_clusters_to_bytes(osb->sb, 1)); |
| 7386 | 7385 | ||
| 7387 | ocfs2_commit_trans(osb, handle); | 7386 | ocfs2_commit_trans(osb, handle); |
diff --git a/fs/ocfs2/aops.c b/fs/ocfs2/aops.c index 4c2a6d282c4d..21441ddb5506 100644 --- a/fs/ocfs2/aops.c +++ b/fs/ocfs2/aops.c | |||
| @@ -1764,10 +1764,11 @@ int ocfs2_write_begin_nolock(struct address_space *mapping, | |||
| 1764 | 1764 | ||
| 1765 | wc->w_handle = handle; | 1765 | wc->w_handle = handle; |
| 1766 | 1766 | ||
| 1767 | if (clusters_to_alloc && vfs_dq_alloc_space_nodirty(inode, | 1767 | if (clusters_to_alloc) { |
| 1768 | ocfs2_clusters_to_bytes(osb->sb, clusters_to_alloc))) { | 1768 | ret = dquot_alloc_space_nodirty(inode, |
| 1769 | ret = -EDQUOT; | 1769 | ocfs2_clusters_to_bytes(osb->sb, clusters_to_alloc)); |
| 1770 | goto out_commit; | 1770 | if (ret) |
| 1771 | goto out_commit; | ||
| 1771 | } | 1772 | } |
| 1772 | /* | 1773 | /* |
| 1773 | * We don't want this to fail in ocfs2_write_end(), so do it | 1774 | * We don't want this to fail in ocfs2_write_end(), so do it |
| @@ -1810,7 +1811,7 @@ success: | |||
| 1810 | return 0; | 1811 | return 0; |
| 1811 | out_quota: | 1812 | out_quota: |
| 1812 | if (clusters_to_alloc) | 1813 | if (clusters_to_alloc) |
| 1813 | vfs_dq_free_space(inode, | 1814 | dquot_free_space(inode, |
| 1814 | ocfs2_clusters_to_bytes(osb->sb, clusters_to_alloc)); | 1815 | ocfs2_clusters_to_bytes(osb->sb, clusters_to_alloc)); |
| 1815 | out_commit: | 1816 | out_commit: |
| 1816 | ocfs2_commit_trans(osb, handle); | 1817 | ocfs2_commit_trans(osb, handle); |
diff --git a/fs/ocfs2/dir.c b/fs/ocfs2/dir.c index 765d66c70989..efd77d071c80 100644 --- a/fs/ocfs2/dir.c +++ b/fs/ocfs2/dir.c | |||
| @@ -2964,12 +2964,10 @@ static int ocfs2_expand_inline_dir(struct inode *dir, struct buffer_head *di_bh, | |||
| 2964 | goto out; | 2964 | goto out; |
| 2965 | } | 2965 | } |
| 2966 | 2966 | ||
| 2967 | if (vfs_dq_alloc_space_nodirty(dir, | 2967 | ret = dquot_alloc_space_nodirty(dir, |
| 2968 | ocfs2_clusters_to_bytes(osb->sb, | 2968 | ocfs2_clusters_to_bytes(osb->sb, alloc + dx_alloc)); |
| 2969 | alloc + dx_alloc))) { | 2969 | if (ret) |
| 2970 | ret = -EDQUOT; | ||
| 2971 | goto out_commit; | 2970 | goto out_commit; |
| 2972 | } | ||
| 2973 | did_quota = 1; | 2971 | did_quota = 1; |
| 2974 | 2972 | ||
| 2975 | if (ocfs2_supports_indexed_dirs(osb) && !dx_inline) { | 2973 | if (ocfs2_supports_indexed_dirs(osb) && !dx_inline) { |
| @@ -3178,7 +3176,7 @@ static int ocfs2_expand_inline_dir(struct inode *dir, struct buffer_head *di_bh, | |||
| 3178 | 3176 | ||
| 3179 | out_commit: | 3177 | out_commit: |
| 3180 | if (ret < 0 && did_quota) | 3178 | if (ret < 0 && did_quota) |
| 3181 | vfs_dq_free_space_nodirty(dir, bytes_allocated); | 3179 | dquot_free_space_nodirty(dir, bytes_allocated); |
| 3182 | 3180 | ||
| 3183 | ocfs2_commit_trans(osb, handle); | 3181 | ocfs2_commit_trans(osb, handle); |
| 3184 | 3182 | ||
| @@ -3221,11 +3219,10 @@ static int ocfs2_do_extend_dir(struct super_block *sb, | |||
| 3221 | if (extend) { | 3219 | if (extend) { |
| 3222 | u32 offset = OCFS2_I(dir)->ip_clusters; | 3220 | u32 offset = OCFS2_I(dir)->ip_clusters; |
| 3223 | 3221 | ||
| 3224 | if (vfs_dq_alloc_space_nodirty(dir, | 3222 | status = dquot_alloc_space_nodirty(dir, |
| 3225 | ocfs2_clusters_to_bytes(sb, 1))) { | 3223 | ocfs2_clusters_to_bytes(sb, 1)); |
| 3226 | status = -EDQUOT; | 3224 | if (status) |
| 3227 | goto bail; | 3225 | goto bail; |
| 3228 | } | ||
| 3229 | did_quota = 1; | 3226 | did_quota = 1; |
| 3230 | 3227 | ||
| 3231 | status = ocfs2_add_inode_data(OCFS2_SB(sb), dir, &offset, | 3228 | status = ocfs2_add_inode_data(OCFS2_SB(sb), dir, &offset, |
| @@ -3254,7 +3251,7 @@ static int ocfs2_do_extend_dir(struct super_block *sb, | |||
| 3254 | status = 0; | 3251 | status = 0; |
| 3255 | bail: | 3252 | bail: |
| 3256 | if (did_quota && status < 0) | 3253 | if (did_quota && status < 0) |
| 3257 | vfs_dq_free_space_nodirty(dir, ocfs2_clusters_to_bytes(sb, 1)); | 3254 | dquot_free_space_nodirty(dir, ocfs2_clusters_to_bytes(sb, 1)); |
| 3258 | mlog_exit(status); | 3255 | mlog_exit(status); |
| 3259 | return status; | 3256 | return status; |
| 3260 | } | 3257 | } |
| @@ -3889,11 +3886,10 @@ static int ocfs2_dx_dir_rebalance(struct ocfs2_super *osb, struct inode *dir, | |||
| 3889 | goto out; | 3886 | goto out; |
| 3890 | } | 3887 | } |
| 3891 | 3888 | ||
| 3892 | if (vfs_dq_alloc_space_nodirty(dir, | 3889 | ret = dquot_alloc_space_nodirty(dir, |
| 3893 | ocfs2_clusters_to_bytes(dir->i_sb, 1))) { | 3890 | ocfs2_clusters_to_bytes(dir->i_sb, 1)); |
| 3894 | ret = -EDQUOT; | 3891 | if (ret) |
| 3895 | goto out_commit; | 3892 | goto out_commit; |
| 3896 | } | ||
| 3897 | did_quota = 1; | 3893 | did_quota = 1; |
| 3898 | 3894 | ||
| 3899 | ret = ocfs2_journal_access_dl(handle, INODE_CACHE(dir), dx_leaf_bh, | 3895 | ret = ocfs2_journal_access_dl(handle, INODE_CACHE(dir), dx_leaf_bh, |
| @@ -3983,7 +3979,7 @@ static int ocfs2_dx_dir_rebalance(struct ocfs2_super *osb, struct inode *dir, | |||
| 3983 | 3979 | ||
| 3984 | out_commit: | 3980 | out_commit: |
| 3985 | if (ret < 0 && did_quota) | 3981 | if (ret < 0 && did_quota) |
| 3986 | vfs_dq_free_space_nodirty(dir, | 3982 | dquot_free_space_nodirty(dir, |
| 3987 | ocfs2_clusters_to_bytes(dir->i_sb, 1)); | 3983 | ocfs2_clusters_to_bytes(dir->i_sb, 1)); |
| 3988 | 3984 | ||
| 3989 | ocfs2_commit_trans(osb, handle); | 3985 | ocfs2_commit_trans(osb, handle); |
| @@ -4165,11 +4161,10 @@ static int ocfs2_expand_inline_dx_root(struct inode *dir, | |||
| 4165 | goto out; | 4161 | goto out; |
| 4166 | } | 4162 | } |
| 4167 | 4163 | ||
| 4168 | if (vfs_dq_alloc_space_nodirty(dir, | 4164 | ret = dquot_alloc_space_nodirty(dir, |
| 4169 | ocfs2_clusters_to_bytes(osb->sb, 1))) { | 4165 | ocfs2_clusters_to_bytes(osb->sb, 1)); |
| 4170 | ret = -EDQUOT; | 4166 | if (ret) |
| 4171 | goto out_commit; | 4167 | goto out_commit; |
| 4172 | } | ||
| 4173 | did_quota = 1; | 4168 | did_quota = 1; |
| 4174 | 4169 | ||
| 4175 | /* | 4170 | /* |
| @@ -4229,7 +4224,7 @@ static int ocfs2_expand_inline_dx_root(struct inode *dir, | |||
| 4229 | 4224 | ||
| 4230 | out_commit: | 4225 | out_commit: |
| 4231 | if (ret < 0 && did_quota) | 4226 | if (ret < 0 && did_quota) |
| 4232 | vfs_dq_free_space_nodirty(dir, | 4227 | dquot_free_space_nodirty(dir, |
| 4233 | ocfs2_clusters_to_bytes(dir->i_sb, 1)); | 4228 | ocfs2_clusters_to_bytes(dir->i_sb, 1)); |
| 4234 | 4229 | ||
| 4235 | ocfs2_commit_trans(osb, handle); | 4230 | ocfs2_commit_trans(osb, handle); |
diff --git a/fs/ocfs2/file.c b/fs/ocfs2/file.c index 5b52547d6299..17947dc8341e 100644 --- a/fs/ocfs2/file.c +++ b/fs/ocfs2/file.c | |||
| @@ -107,6 +107,9 @@ static int ocfs2_file_open(struct inode *inode, struct file *file) | |||
| 107 | mlog_entry("(0x%p, 0x%p, '%.*s')\n", inode, file, | 107 | mlog_entry("(0x%p, 0x%p, '%.*s')\n", inode, file, |
| 108 | file->f_path.dentry->d_name.len, file->f_path.dentry->d_name.name); | 108 | file->f_path.dentry->d_name.len, file->f_path.dentry->d_name.name); |
| 109 | 109 | ||
| 110 | if (file->f_mode & FMODE_WRITE) | ||
| 111 | dquot_initialize(inode); | ||
| 112 | |||
| 110 | spin_lock(&oi->ip_lock); | 113 | spin_lock(&oi->ip_lock); |
| 111 | 114 | ||
| 112 | /* Check that the inode hasn't been wiped from disk by another | 115 | /* Check that the inode hasn't been wiped from disk by another |
| @@ -629,11 +632,10 @@ restart_all: | |||
| 629 | } | 632 | } |
| 630 | 633 | ||
| 631 | restarted_transaction: | 634 | restarted_transaction: |
| 632 | if (vfs_dq_alloc_space_nodirty(inode, ocfs2_clusters_to_bytes(osb->sb, | 635 | status = dquot_alloc_space_nodirty(inode, |
| 633 | clusters_to_add))) { | 636 | ocfs2_clusters_to_bytes(osb->sb, clusters_to_add)); |
| 634 | status = -EDQUOT; | 637 | if (status) |
| 635 | goto leave; | 638 | goto leave; |
| 636 | } | ||
| 637 | did_quota = 1; | 639 | did_quota = 1; |
| 638 | 640 | ||
| 639 | /* reserve a write to the file entry early on - that we if we | 641 | /* reserve a write to the file entry early on - that we if we |
| @@ -674,7 +676,7 @@ restarted_transaction: | |||
| 674 | clusters_to_add -= (OCFS2_I(inode)->ip_clusters - prev_clusters); | 676 | clusters_to_add -= (OCFS2_I(inode)->ip_clusters - prev_clusters); |
| 675 | spin_unlock(&OCFS2_I(inode)->ip_lock); | 677 | spin_unlock(&OCFS2_I(inode)->ip_lock); |
| 676 | /* Release unused quota reservation */ | 678 | /* Release unused quota reservation */ |
| 677 | vfs_dq_free_space(inode, | 679 | dquot_free_space(inode, |
| 678 | ocfs2_clusters_to_bytes(osb->sb, clusters_to_add)); | 680 | ocfs2_clusters_to_bytes(osb->sb, clusters_to_add)); |
| 679 | did_quota = 0; | 681 | did_quota = 0; |
| 680 | 682 | ||
| @@ -710,7 +712,7 @@ restarted_transaction: | |||
| 710 | 712 | ||
| 711 | leave: | 713 | leave: |
| 712 | if (status < 0 && did_quota) | 714 | if (status < 0 && did_quota) |
| 713 | vfs_dq_free_space(inode, | 715 | dquot_free_space(inode, |
| 714 | ocfs2_clusters_to_bytes(osb->sb, clusters_to_add)); | 716 | ocfs2_clusters_to_bytes(osb->sb, clusters_to_add)); |
| 715 | if (handle) { | 717 | if (handle) { |
| 716 | ocfs2_commit_trans(osb, handle); | 718 | ocfs2_commit_trans(osb, handle); |
| @@ -978,6 +980,8 @@ int ocfs2_setattr(struct dentry *dentry, struct iattr *attr) | |||
| 978 | 980 | ||
| 979 | size_change = S_ISREG(inode->i_mode) && attr->ia_valid & ATTR_SIZE; | 981 | size_change = S_ISREG(inode->i_mode) && attr->ia_valid & ATTR_SIZE; |
| 980 | if (size_change) { | 982 | if (size_change) { |
| 983 | dquot_initialize(inode); | ||
| 984 | |||
| 981 | status = ocfs2_rw_lock(inode, 1); | 985 | status = ocfs2_rw_lock(inode, 1); |
| 982 | if (status < 0) { | 986 | if (status < 0) { |
| 983 | mlog_errno(status); | 987 | mlog_errno(status); |
| @@ -1020,7 +1024,7 @@ int ocfs2_setattr(struct dentry *dentry, struct iattr *attr) | |||
| 1020 | /* | 1024 | /* |
| 1021 | * Gather pointers to quota structures so that allocation / | 1025 | * Gather pointers to quota structures so that allocation / |
| 1022 | * freeing of quota structures happens here and not inside | 1026 | * freeing of quota structures happens here and not inside |
| 1023 | * vfs_dq_transfer() where we have problems with lock ordering | 1027 | * dquot_transfer() where we have problems with lock ordering |
| 1024 | */ | 1028 | */ |
| 1025 | if (attr->ia_valid & ATTR_UID && attr->ia_uid != inode->i_uid | 1029 | if (attr->ia_valid & ATTR_UID && attr->ia_uid != inode->i_uid |
| 1026 | && OCFS2_HAS_RO_COMPAT_FEATURE(sb, | 1030 | && OCFS2_HAS_RO_COMPAT_FEATURE(sb, |
| @@ -1053,7 +1057,7 @@ int ocfs2_setattr(struct dentry *dentry, struct iattr *attr) | |||
| 1053 | mlog_errno(status); | 1057 | mlog_errno(status); |
| 1054 | goto bail_unlock; | 1058 | goto bail_unlock; |
| 1055 | } | 1059 | } |
| 1056 | status = vfs_dq_transfer(inode, attr) ? -EDQUOT : 0; | 1060 | status = dquot_transfer(inode, attr); |
| 1057 | if (status < 0) | 1061 | if (status < 0) |
| 1058 | goto bail_commit; | 1062 | goto bail_commit; |
| 1059 | } else { | 1063 | } else { |
diff --git a/fs/ocfs2/inode.c b/fs/ocfs2/inode.c index 88459bdd1ff3..278a223aae14 100644 --- a/fs/ocfs2/inode.c +++ b/fs/ocfs2/inode.c | |||
| @@ -665,7 +665,7 @@ static int ocfs2_remove_inode(struct inode *inode, | |||
| 665 | } | 665 | } |
| 666 | 666 | ||
| 667 | ocfs2_remove_from_cache(INODE_CACHE(inode), di_bh); | 667 | ocfs2_remove_from_cache(INODE_CACHE(inode), di_bh); |
| 668 | vfs_dq_free_inode(inode); | 668 | dquot_free_inode(inode); |
| 669 | 669 | ||
| 670 | status = ocfs2_free_dinode(handle, inode_alloc_inode, | 670 | status = ocfs2_free_dinode(handle, inode_alloc_inode, |
| 671 | inode_alloc_bh, di); | 671 | inode_alloc_bh, di); |
| @@ -971,6 +971,8 @@ void ocfs2_delete_inode(struct inode *inode) | |||
| 971 | goto bail; | 971 | goto bail; |
| 972 | } | 972 | } |
| 973 | 973 | ||
| 974 | dquot_initialize(inode); | ||
| 975 | |||
| 974 | if (!ocfs2_inode_is_valid_to_delete(inode)) { | 976 | if (!ocfs2_inode_is_valid_to_delete(inode)) { |
| 975 | /* It's probably not necessary to truncate_inode_pages | 977 | /* It's probably not necessary to truncate_inode_pages |
| 976 | * here but we do it for safety anyway (it will most | 978 | * here but we do it for safety anyway (it will most |
| @@ -1087,6 +1089,8 @@ void ocfs2_clear_inode(struct inode *inode) | |||
| 1087 | mlog_bug_on_msg(OCFS2_SB(inode->i_sb) == NULL, | 1089 | mlog_bug_on_msg(OCFS2_SB(inode->i_sb) == NULL, |
| 1088 | "Inode=%lu\n", inode->i_ino); | 1090 | "Inode=%lu\n", inode->i_ino); |
| 1089 | 1091 | ||
| 1092 | dquot_drop(inode); | ||
| 1093 | |||
| 1090 | /* To preven remote deletes we hold open lock before, now it | 1094 | /* To preven remote deletes we hold open lock before, now it |
| 1091 | * is time to unlock PR and EX open locks. */ | 1095 | * is time to unlock PR and EX open locks. */ |
| 1092 | ocfs2_open_unlock(inode); | 1096 | ocfs2_open_unlock(inode); |
diff --git a/fs/ocfs2/namei.c b/fs/ocfs2/namei.c index 50fb26a6a5f5..d9cd4e373a53 100644 --- a/fs/ocfs2/namei.c +++ b/fs/ocfs2/namei.c | |||
| @@ -212,7 +212,7 @@ static struct inode *ocfs2_get_init_inode(struct inode *dir, int mode) | |||
| 212 | } else | 212 | } else |
| 213 | inode->i_gid = current_fsgid(); | 213 | inode->i_gid = current_fsgid(); |
| 214 | inode->i_mode = mode; | 214 | inode->i_mode = mode; |
| 215 | vfs_dq_init(inode); | 215 | dquot_initialize(inode); |
| 216 | return inode; | 216 | return inode; |
| 217 | } | 217 | } |
| 218 | 218 | ||
| @@ -244,6 +244,8 @@ static int ocfs2_mknod(struct inode *dir, | |||
| 244 | (unsigned long)dev, dentry->d_name.len, | 244 | (unsigned long)dev, dentry->d_name.len, |
| 245 | dentry->d_name.name); | 245 | dentry->d_name.name); |
| 246 | 246 | ||
| 247 | dquot_initialize(dir); | ||
| 248 | |||
| 247 | /* get our super block */ | 249 | /* get our super block */ |
| 248 | osb = OCFS2_SB(dir->i_sb); | 250 | osb = OCFS2_SB(dir->i_sb); |
| 249 | 251 | ||
| @@ -348,13 +350,9 @@ static int ocfs2_mknod(struct inode *dir, | |||
| 348 | goto leave; | 350 | goto leave; |
| 349 | } | 351 | } |
| 350 | 352 | ||
| 351 | /* We don't use standard VFS wrapper because we don't want vfs_dq_init | 353 | status = dquot_alloc_inode(inode); |
| 352 | * to be called. */ | 354 | if (status) |
| 353 | if (sb_any_quota_active(osb->sb) && | ||
| 354 | osb->sb->dq_op->alloc_inode(inode, 1) == NO_QUOTA) { | ||
| 355 | status = -EDQUOT; | ||
| 356 | goto leave; | 355 | goto leave; |
| 357 | } | ||
| 358 | did_quota_inode = 1; | 356 | did_quota_inode = 1; |
| 359 | 357 | ||
| 360 | mlog_entry("(0x%p, 0x%p, %d, %lu, '%.*s')\n", dir, dentry, | 358 | mlog_entry("(0x%p, 0x%p, %d, %lu, '%.*s')\n", dir, dentry, |
| @@ -431,7 +429,7 @@ static int ocfs2_mknod(struct inode *dir, | |||
| 431 | status = 0; | 429 | status = 0; |
| 432 | leave: | 430 | leave: |
| 433 | if (status < 0 && did_quota_inode) | 431 | if (status < 0 && did_quota_inode) |
| 434 | vfs_dq_free_inode(inode); | 432 | dquot_free_inode(inode); |
| 435 | if (handle) | 433 | if (handle) |
| 436 | ocfs2_commit_trans(osb, handle); | 434 | ocfs2_commit_trans(osb, handle); |
| 437 | 435 | ||
| @@ -636,6 +634,8 @@ static int ocfs2_link(struct dentry *old_dentry, | |||
| 636 | if (S_ISDIR(inode->i_mode)) | 634 | if (S_ISDIR(inode->i_mode)) |
| 637 | return -EPERM; | 635 | return -EPERM; |
| 638 | 636 | ||
| 637 | dquot_initialize(dir); | ||
| 638 | |||
| 639 | err = ocfs2_inode_lock_nested(dir, &parent_fe_bh, 1, OI_LS_PARENT); | 639 | err = ocfs2_inode_lock_nested(dir, &parent_fe_bh, 1, OI_LS_PARENT); |
| 640 | if (err < 0) { | 640 | if (err < 0) { |
| 641 | if (err != -ENOENT) | 641 | if (err != -ENOENT) |
| @@ -791,6 +791,8 @@ static int ocfs2_unlink(struct inode *dir, | |||
| 791 | mlog_entry("(0x%p, 0x%p, '%.*s')\n", dir, dentry, | 791 | mlog_entry("(0x%p, 0x%p, '%.*s')\n", dir, dentry, |
| 792 | dentry->d_name.len, dentry->d_name.name); | 792 | dentry->d_name.len, dentry->d_name.name); |
| 793 | 793 | ||
| 794 | dquot_initialize(dir); | ||
| 795 | |||
| 794 | BUG_ON(dentry->d_parent->d_inode != dir); | 796 | BUG_ON(dentry->d_parent->d_inode != dir); |
| 795 | 797 | ||
| 796 | mlog(0, "ino = %llu\n", (unsigned long long)OCFS2_I(inode)->ip_blkno); | 798 | mlog(0, "ino = %llu\n", (unsigned long long)OCFS2_I(inode)->ip_blkno); |
| @@ -1051,6 +1053,9 @@ static int ocfs2_rename(struct inode *old_dir, | |||
| 1051 | old_dentry->d_name.len, old_dentry->d_name.name, | 1053 | old_dentry->d_name.len, old_dentry->d_name.name, |
| 1052 | new_dentry->d_name.len, new_dentry->d_name.name); | 1054 | new_dentry->d_name.len, new_dentry->d_name.name); |
| 1053 | 1055 | ||
| 1056 | dquot_initialize(old_dir); | ||
| 1057 | dquot_initialize(new_dir); | ||
| 1058 | |||
| 1054 | osb = OCFS2_SB(old_dir->i_sb); | 1059 | osb = OCFS2_SB(old_dir->i_sb); |
| 1055 | 1060 | ||
| 1056 | if (new_inode) { | 1061 | if (new_inode) { |
| @@ -1599,6 +1604,8 @@ static int ocfs2_symlink(struct inode *dir, | |||
| 1599 | mlog_entry("(0x%p, 0x%p, symname='%s' actual='%.*s')\n", dir, | 1604 | mlog_entry("(0x%p, 0x%p, symname='%s' actual='%.*s')\n", dir, |
| 1600 | dentry, symname, dentry->d_name.len, dentry->d_name.name); | 1605 | dentry, symname, dentry->d_name.len, dentry->d_name.name); |
| 1601 | 1606 | ||
| 1607 | dquot_initialize(dir); | ||
| 1608 | |||
| 1602 | sb = dir->i_sb; | 1609 | sb = dir->i_sb; |
| 1603 | osb = OCFS2_SB(sb); | 1610 | osb = OCFS2_SB(sb); |
| 1604 | 1611 | ||
| @@ -1688,13 +1695,9 @@ static int ocfs2_symlink(struct inode *dir, | |||
| 1688 | goto bail; | 1695 | goto bail; |
| 1689 | } | 1696 | } |
| 1690 | 1697 | ||
| 1691 | /* We don't use standard VFS wrapper because we don't want vfs_dq_init | 1698 | status = dquot_alloc_inode(inode); |
| 1692 | * to be called. */ | 1699 | if (status) |
| 1693 | if (sb_any_quota_active(osb->sb) && | ||
| 1694 | osb->sb->dq_op->alloc_inode(inode, 1) == NO_QUOTA) { | ||
| 1695 | status = -EDQUOT; | ||
| 1696 | goto bail; | 1700 | goto bail; |
| 1697 | } | ||
| 1698 | did_quota_inode = 1; | 1701 | did_quota_inode = 1; |
| 1699 | 1702 | ||
| 1700 | mlog_entry("(0x%p, 0x%p, %d, '%.*s')\n", dir, dentry, | 1703 | mlog_entry("(0x%p, 0x%p, %d, '%.*s')\n", dir, dentry, |
| @@ -1716,11 +1719,10 @@ static int ocfs2_symlink(struct inode *dir, | |||
| 1716 | u32 offset = 0; | 1719 | u32 offset = 0; |
| 1717 | 1720 | ||
| 1718 | inode->i_op = &ocfs2_symlink_inode_operations; | 1721 | inode->i_op = &ocfs2_symlink_inode_operations; |
| 1719 | if (vfs_dq_alloc_space_nodirty(inode, | 1722 | status = dquot_alloc_space_nodirty(inode, |
| 1720 | ocfs2_clusters_to_bytes(osb->sb, 1))) { | 1723 | ocfs2_clusters_to_bytes(osb->sb, 1)); |
| 1721 | status = -EDQUOT; | 1724 | if (status) |
| 1722 | goto bail; | 1725 | goto bail; |
| 1723 | } | ||
| 1724 | did_quota = 1; | 1726 | did_quota = 1; |
| 1725 | status = ocfs2_add_inode_data(osb, inode, &offset, 1, 0, | 1727 | status = ocfs2_add_inode_data(osb, inode, &offset, 1, 0, |
| 1726 | new_fe_bh, | 1728 | new_fe_bh, |
| @@ -1788,10 +1790,10 @@ static int ocfs2_symlink(struct inode *dir, | |||
| 1788 | d_instantiate(dentry, inode); | 1790 | d_instantiate(dentry, inode); |
| 1789 | bail: | 1791 | bail: |
| 1790 | if (status < 0 && did_quota) | 1792 | if (status < 0 && did_quota) |
| 1791 | vfs_dq_free_space_nodirty(inode, | 1793 | dquot_free_space_nodirty(inode, |
| 1792 | ocfs2_clusters_to_bytes(osb->sb, 1)); | 1794 | ocfs2_clusters_to_bytes(osb->sb, 1)); |
| 1793 | if (status < 0 && did_quota_inode) | 1795 | if (status < 0 && did_quota_inode) |
| 1794 | vfs_dq_free_inode(inode); | 1796 | dquot_free_inode(inode); |
| 1795 | if (handle) | 1797 | if (handle) |
| 1796 | ocfs2_commit_trans(osb, handle); | 1798 | ocfs2_commit_trans(osb, handle); |
| 1797 | 1799 | ||
| @@ -2099,13 +2101,9 @@ int ocfs2_create_inode_in_orphan(struct inode *dir, | |||
| 2099 | goto leave; | 2101 | goto leave; |
| 2100 | } | 2102 | } |
| 2101 | 2103 | ||
| 2102 | /* We don't use standard VFS wrapper because we don't want vfs_dq_init | 2104 | status = dquot_alloc_inode(inode); |
| 2103 | * to be called. */ | 2105 | if (status) |
| 2104 | if (sb_any_quota_active(osb->sb) && | ||
| 2105 | osb->sb->dq_op->alloc_inode(inode, 1) == NO_QUOTA) { | ||
| 2106 | status = -EDQUOT; | ||
| 2107 | goto leave; | 2106 | goto leave; |
| 2108 | } | ||
| 2109 | did_quota_inode = 1; | 2107 | did_quota_inode = 1; |
| 2110 | 2108 | ||
| 2111 | inode->i_nlink = 0; | 2109 | inode->i_nlink = 0; |
| @@ -2140,7 +2138,7 @@ int ocfs2_create_inode_in_orphan(struct inode *dir, | |||
| 2140 | insert_inode_hash(inode); | 2138 | insert_inode_hash(inode); |
| 2141 | leave: | 2139 | leave: |
| 2142 | if (status < 0 && did_quota_inode) | 2140 | if (status < 0 && did_quota_inode) |
| 2143 | vfs_dq_free_inode(inode); | 2141 | dquot_free_inode(inode); |
| 2144 | if (handle) | 2142 | if (handle) |
| 2145 | ocfs2_commit_trans(osb, handle); | 2143 | ocfs2_commit_trans(osb, handle); |
| 2146 | 2144 | ||
diff --git a/fs/ocfs2/quota_global.c b/fs/ocfs2/quota_global.c index b437dc0c4cad..355f41d1d520 100644 --- a/fs/ocfs2/quota_global.c +++ b/fs/ocfs2/quota_global.c | |||
| @@ -851,13 +851,6 @@ static void ocfs2_destroy_dquot(struct dquot *dquot) | |||
| 851 | } | 851 | } |
| 852 | 852 | ||
| 853 | const struct dquot_operations ocfs2_quota_operations = { | 853 | const struct dquot_operations ocfs2_quota_operations = { |
| 854 | .initialize = dquot_initialize, | ||
| 855 | .drop = dquot_drop, | ||
| 856 | .alloc_space = dquot_alloc_space, | ||
| 857 | .alloc_inode = dquot_alloc_inode, | ||
| 858 | .free_space = dquot_free_space, | ||
| 859 | .free_inode = dquot_free_inode, | ||
| 860 | .transfer = dquot_transfer, | ||
| 861 | .write_dquot = ocfs2_write_dquot, | 854 | .write_dquot = ocfs2_write_dquot, |
| 862 | .acquire_dquot = ocfs2_acquire_dquot, | 855 | .acquire_dquot = ocfs2_acquire_dquot, |
| 863 | .release_dquot = ocfs2_release_dquot, | 856 | .release_dquot = ocfs2_release_dquot, |
diff --git a/fs/ocfs2/refcounttree.c b/fs/ocfs2/refcounttree.c index fb6aa7acf54b..9e96921dffda 100644 --- a/fs/ocfs2/refcounttree.c +++ b/fs/ocfs2/refcounttree.c | |||
| @@ -4390,7 +4390,7 @@ static int ocfs2_vfs_reflink(struct dentry *old_dentry, struct inode *dir, | |||
| 4390 | } | 4390 | } |
| 4391 | 4391 | ||
| 4392 | mutex_lock(&inode->i_mutex); | 4392 | mutex_lock(&inode->i_mutex); |
| 4393 | vfs_dq_init(dir); | 4393 | dquot_initialize(dir); |
| 4394 | error = ocfs2_reflink(old_dentry, dir, new_dentry, preserve); | 4394 | error = ocfs2_reflink(old_dentry, dir, new_dentry, preserve); |
| 4395 | mutex_unlock(&inode->i_mutex); | 4395 | mutex_unlock(&inode->i_mutex); |
| 4396 | if (!error) | 4396 | if (!error) |
diff --git a/fs/omfs/inode.c b/fs/omfs/inode.c index f3b7c1541f3a..75d9b5ba1d45 100644 --- a/fs/omfs/inode.c +++ b/fs/omfs/inode.c | |||
| @@ -11,6 +11,7 @@ | |||
| 11 | #include <linux/parser.h> | 11 | #include <linux/parser.h> |
| 12 | #include <linux/buffer_head.h> | 12 | #include <linux/buffer_head.h> |
| 13 | #include <linux/vmalloc.h> | 13 | #include <linux/vmalloc.h> |
| 14 | #include <linux/writeback.h> | ||
| 14 | #include <linux/crc-itu-t.h> | 15 | #include <linux/crc-itu-t.h> |
| 15 | #include "omfs.h" | 16 | #include "omfs.h" |
| 16 | 17 | ||
| @@ -89,7 +90,7 @@ static void omfs_update_checksums(struct omfs_inode *oi) | |||
| 89 | oi->i_head.h_check_xor = xor; | 90 | oi->i_head.h_check_xor = xor; |
| 90 | } | 91 | } |
| 91 | 92 | ||
| 92 | static int omfs_write_inode(struct inode *inode, int wait) | 93 | static int __omfs_write_inode(struct inode *inode, int wait) |
| 93 | { | 94 | { |
| 94 | struct omfs_inode *oi; | 95 | struct omfs_inode *oi; |
| 95 | struct omfs_sb_info *sbi = OMFS_SB(inode->i_sb); | 96 | struct omfs_sb_info *sbi = OMFS_SB(inode->i_sb); |
| @@ -162,9 +163,14 @@ out: | |||
| 162 | return ret; | 163 | return ret; |
| 163 | } | 164 | } |
| 164 | 165 | ||
| 166 | static int omfs_write_inode(struct inode *inode, struct writeback_control *wbc) | ||
| 167 | { | ||
| 168 | return __omfs_write_inode(inode, wbc->sync_mode == WB_SYNC_ALL); | ||
| 169 | } | ||
| 170 | |||
| 165 | int omfs_sync_inode(struct inode *inode) | 171 | int omfs_sync_inode(struct inode *inode) |
| 166 | { | 172 | { |
| 167 | return omfs_write_inode(inode, 1); | 173 | return __omfs_write_inode(inode, 1); |
| 168 | } | 174 | } |
| 169 | 175 | ||
| 170 | /* | 176 | /* |
| @@ -8,7 +8,6 @@ | |||
| 8 | #include <linux/mm.h> | 8 | #include <linux/mm.h> |
| 9 | #include <linux/file.h> | 9 | #include <linux/file.h> |
| 10 | #include <linux/fdtable.h> | 10 | #include <linux/fdtable.h> |
| 11 | #include <linux/quotaops.h> | ||
| 12 | #include <linux/fsnotify.h> | 11 | #include <linux/fsnotify.h> |
| 13 | #include <linux/module.h> | 12 | #include <linux/module.h> |
| 14 | #include <linux/slab.h> | 13 | #include <linux/slab.h> |
| @@ -278,10 +277,8 @@ static long do_sys_truncate(const char __user *pathname, loff_t length) | |||
| 278 | error = locks_verify_truncate(inode, NULL, length); | 277 | error = locks_verify_truncate(inode, NULL, length); |
| 279 | if (!error) | 278 | if (!error) |
| 280 | error = security_path_truncate(&path, length, 0); | 279 | error = security_path_truncate(&path, length, 0); |
| 281 | if (!error) { | 280 | if (!error) |
| 282 | vfs_dq_init(inode); | ||
| 283 | error = do_truncate(path.dentry, length, 0, NULL); | 281 | error = do_truncate(path.dentry, length, 0, NULL); |
| 284 | } | ||
| 285 | 282 | ||
| 286 | put_write_and_out: | 283 | put_write_and_out: |
| 287 | put_write_access(inode); | 284 | put_write_access(inode); |
diff --git a/fs/quota/Kconfig b/fs/quota/Kconfig index efc02ebb8c70..dad7fb247ddc 100644 --- a/fs/quota/Kconfig +++ b/fs/quota/Kconfig | |||
| @@ -59,3 +59,8 @@ config QUOTACTL | |||
| 59 | bool | 59 | bool |
| 60 | depends on XFS_QUOTA || QUOTA | 60 | depends on XFS_QUOTA || QUOTA |
| 61 | default y | 61 | default y |
| 62 | |||
| 63 | config QUOTACTL_COMPAT | ||
| 64 | bool | ||
| 65 | depends on QUOTACTL && COMPAT_FOR_U64_ALIGNMENT | ||
| 66 | default y | ||
diff --git a/fs/quota/Makefile b/fs/quota/Makefile index 68d4f6dc0578..5f9e9e276af0 100644 --- a/fs/quota/Makefile +++ b/fs/quota/Makefile | |||
| @@ -3,3 +3,5 @@ obj-$(CONFIG_QFMT_V1) += quota_v1.o | |||
| 3 | obj-$(CONFIG_QFMT_V2) += quota_v2.o | 3 | obj-$(CONFIG_QFMT_V2) += quota_v2.o |
| 4 | obj-$(CONFIG_QUOTA_TREE) += quota_tree.o | 4 | obj-$(CONFIG_QUOTA_TREE) += quota_tree.o |
| 5 | obj-$(CONFIG_QUOTACTL) += quota.o | 5 | obj-$(CONFIG_QUOTACTL) += quota.o |
| 6 | obj-$(CONFIG_QUOTACTL_COMPAT) += compat.o | ||
| 7 | obj-$(CONFIG_QUOTA_NETLINK_INTERFACE) += netlink.o | ||
diff --git a/fs/quota/compat.c b/fs/quota/compat.c new file mode 100644 index 000000000000..fb1892fe3e56 --- /dev/null +++ b/fs/quota/compat.c | |||
| @@ -0,0 +1,118 @@ | |||
| 1 | |||
| 2 | #include <linux/syscalls.h> | ||
| 3 | #include <linux/compat.h> | ||
| 4 | #include <linux/quotaops.h> | ||
| 5 | |||
| 6 | /* | ||
| 7 | * This code works only for 32 bit quota tools over 64 bit OS (x86_64, ia64) | ||
| 8 | * and is necessary due to alignment problems. | ||
| 9 | */ | ||
| 10 | struct compat_if_dqblk { | ||
| 11 | compat_u64 dqb_bhardlimit; | ||
| 12 | compat_u64 dqb_bsoftlimit; | ||
| 13 | compat_u64 dqb_curspace; | ||
| 14 | compat_u64 dqb_ihardlimit; | ||
| 15 | compat_u64 dqb_isoftlimit; | ||
| 16 | compat_u64 dqb_curinodes; | ||
| 17 | compat_u64 dqb_btime; | ||
| 18 | compat_u64 dqb_itime; | ||
| 19 | compat_uint_t dqb_valid; | ||
| 20 | }; | ||
| 21 | |||
| 22 | /* XFS structures */ | ||
| 23 | struct compat_fs_qfilestat { | ||
| 24 | compat_u64 dqb_bhardlimit; | ||
| 25 | compat_u64 qfs_nblks; | ||
| 26 | compat_uint_t qfs_nextents; | ||
| 27 | }; | ||
| 28 | |||
| 29 | struct compat_fs_quota_stat { | ||
| 30 | __s8 qs_version; | ||
| 31 | __u16 qs_flags; | ||
| 32 | __s8 qs_pad; | ||
| 33 | struct compat_fs_qfilestat qs_uquota; | ||
| 34 | struct compat_fs_qfilestat qs_gquota; | ||
| 35 | compat_uint_t qs_incoredqs; | ||
| 36 | compat_int_t qs_btimelimit; | ||
| 37 | compat_int_t qs_itimelimit; | ||
| 38 | compat_int_t qs_rtbtimelimit; | ||
| 39 | __u16 qs_bwarnlimit; | ||
| 40 | __u16 qs_iwarnlimit; | ||
| 41 | }; | ||
| 42 | |||
| 43 | asmlinkage long sys32_quotactl(unsigned int cmd, const char __user *special, | ||
| 44 | qid_t id, void __user *addr) | ||
| 45 | { | ||
| 46 | unsigned int cmds; | ||
| 47 | struct if_dqblk __user *dqblk; | ||
| 48 | struct compat_if_dqblk __user *compat_dqblk; | ||
| 49 | struct fs_quota_stat __user *fsqstat; | ||
| 50 | struct compat_fs_quota_stat __user *compat_fsqstat; | ||
| 51 | compat_uint_t data; | ||
| 52 | u16 xdata; | ||
| 53 | long ret; | ||
| 54 | |||
| 55 | cmds = cmd >> SUBCMDSHIFT; | ||
| 56 | |||
| 57 | switch (cmds) { | ||
| 58 | case Q_GETQUOTA: | ||
| 59 | dqblk = compat_alloc_user_space(sizeof(struct if_dqblk)); | ||
| 60 | compat_dqblk = addr; | ||
| 61 | ret = sys_quotactl(cmd, special, id, dqblk); | ||
| 62 | if (ret) | ||
| 63 | break; | ||
| 64 | if (copy_in_user(compat_dqblk, dqblk, sizeof(*compat_dqblk)) || | ||
| 65 | get_user(data, &dqblk->dqb_valid) || | ||
| 66 | put_user(data, &compat_dqblk->dqb_valid)) | ||
| 67 | ret = -EFAULT; | ||
| 68 | break; | ||
| 69 | case Q_SETQUOTA: | ||
| 70 | dqblk = compat_alloc_user_space(sizeof(struct if_dqblk)); | ||
| 71 | compat_dqblk = addr; | ||
| 72 | ret = -EFAULT; | ||
| 73 | if (copy_in_user(dqblk, compat_dqblk, sizeof(*compat_dqblk)) || | ||
| 74 | get_user(data, &compat_dqblk->dqb_valid) || | ||
| 75 | put_user(data, &dqblk->dqb_valid)) | ||
| 76 | break; | ||
| 77 | ret = sys_quotactl(cmd, special, id, dqblk); | ||
| 78 | break; | ||
| 79 | case Q_XGETQSTAT: | ||
| 80 | fsqstat = compat_alloc_user_space(sizeof(struct fs_quota_stat)); | ||
| 81 | compat_fsqstat = addr; | ||
| 82 | ret = sys_quotactl(cmd, special, id, fsqstat); | ||
| 83 | if (ret) | ||
| 84 | break; | ||
| 85 | ret = -EFAULT; | ||
| 86 | /* Copying qs_version, qs_flags, qs_pad */ | ||
| 87 | if (copy_in_user(compat_fsqstat, fsqstat, | ||
| 88 | offsetof(struct compat_fs_quota_stat, qs_uquota))) | ||
| 89 | break; | ||
| 90 | /* Copying qs_uquota */ | ||
| 91 | if (copy_in_user(&compat_fsqstat->qs_uquota, | ||
| 92 | &fsqstat->qs_uquota, | ||
| 93 | sizeof(compat_fsqstat->qs_uquota)) || | ||
| 94 | get_user(data, &fsqstat->qs_uquota.qfs_nextents) || | ||
| 95 | put_user(data, &compat_fsqstat->qs_uquota.qfs_nextents)) | ||
| 96 | break; | ||
| 97 | /* Copying qs_gquota */ | ||
| 98 | if (copy_in_user(&compat_fsqstat->qs_gquota, | ||
| 99 | &fsqstat->qs_gquota, | ||
| 100 | sizeof(compat_fsqstat->qs_gquota)) || | ||
| 101 | get_user(data, &fsqstat->qs_gquota.qfs_nextents) || | ||
| 102 | put_user(data, &compat_fsqstat->qs_gquota.qfs_nextents)) | ||
| 103 | break; | ||
| 104 | /* Copying the rest */ | ||
| 105 | if (copy_in_user(&compat_fsqstat->qs_incoredqs, | ||
| 106 | &fsqstat->qs_incoredqs, | ||
| 107 | sizeof(struct compat_fs_quota_stat) - | ||
| 108 | offsetof(struct compat_fs_quota_stat, qs_incoredqs)) || | ||
| 109 | get_user(xdata, &fsqstat->qs_iwarnlimit) || | ||
| 110 | put_user(xdata, &compat_fsqstat->qs_iwarnlimit)) | ||
| 111 | break; | ||
| 112 | ret = 0; | ||
| 113 | break; | ||
| 114 | default: | ||
| 115 | ret = sys_quotactl(cmd, special, id, addr); | ||
| 116 | } | ||
| 117 | return ret; | ||
| 118 | } | ||
diff --git a/fs/quota/dquot.c b/fs/quota/dquot.c index 3fc62b097bed..e0b870f4749f 100644 --- a/fs/quota/dquot.c +++ b/fs/quota/dquot.c | |||
| @@ -100,9 +100,13 @@ | |||
| 100 | * | 100 | * |
| 101 | * Any operation working on dquots via inode pointers must hold dqptr_sem. If | 101 | * Any operation working on dquots via inode pointers must hold dqptr_sem. If |
| 102 | * operation is just reading pointers from inode (or not using them at all) the | 102 | * operation is just reading pointers from inode (or not using them at all) the |
| 103 | * read lock is enough. If pointers are altered function must hold write lock | 103 | * read lock is enough. If pointers are altered function must hold write lock. |
| 104 | * (these locking rules also apply for S_NOQUOTA flag in the inode - note that | 104 | * Special care needs to be taken about S_NOQUOTA inode flag (marking that |
| 105 | * for altering the flag i_mutex is also needed). | 105 | * inode is a quota file). Functions adding pointers from inode to dquots have |
| 106 | * to check this flag under dqptr_sem and then (if S_NOQUOTA is not set) they | ||
| 107 | * have to do all pointer modifications before dropping dqptr_sem. This makes | ||
| 108 | * sure they cannot race with quotaon which first sets S_NOQUOTA flag and | ||
| 109 | * then drops all pointers to dquots from an inode. | ||
| 106 | * | 110 | * |
| 107 | * Each dquot has its dq_lock mutex. Locked dquots might not be referenced | 111 | * Each dquot has its dq_lock mutex. Locked dquots might not be referenced |
| 108 | * from inodes (dquot_alloc_space() and such don't check the dq_lock). | 112 | * from inodes (dquot_alloc_space() and such don't check the dq_lock). |
| @@ -225,6 +229,9 @@ static struct hlist_head *dquot_hash; | |||
| 225 | struct dqstats dqstats; | 229 | struct dqstats dqstats; |
| 226 | EXPORT_SYMBOL(dqstats); | 230 | EXPORT_SYMBOL(dqstats); |
| 227 | 231 | ||
| 232 | static qsize_t inode_get_rsv_space(struct inode *inode); | ||
| 233 | static void __dquot_initialize(struct inode *inode, int type); | ||
| 234 | |||
| 228 | static inline unsigned int | 235 | static inline unsigned int |
| 229 | hashfn(const struct super_block *sb, unsigned int id, int type) | 236 | hashfn(const struct super_block *sb, unsigned int id, int type) |
| 230 | { | 237 | { |
| @@ -564,7 +571,7 @@ out: | |||
| 564 | } | 571 | } |
| 565 | EXPORT_SYMBOL(dquot_scan_active); | 572 | EXPORT_SYMBOL(dquot_scan_active); |
| 566 | 573 | ||
| 567 | int vfs_quota_sync(struct super_block *sb, int type) | 574 | int vfs_quota_sync(struct super_block *sb, int type, int wait) |
| 568 | { | 575 | { |
| 569 | struct list_head *dirty; | 576 | struct list_head *dirty; |
| 570 | struct dquot *dquot; | 577 | struct dquot *dquot; |
| @@ -609,6 +616,33 @@ int vfs_quota_sync(struct super_block *sb, int type) | |||
| 609 | spin_unlock(&dq_list_lock); | 616 | spin_unlock(&dq_list_lock); |
| 610 | mutex_unlock(&dqopt->dqonoff_mutex); | 617 | mutex_unlock(&dqopt->dqonoff_mutex); |
| 611 | 618 | ||
| 619 | if (!wait || (sb_dqopt(sb)->flags & DQUOT_QUOTA_SYS_FILE)) | ||
| 620 | return 0; | ||
| 621 | |||
| 622 | /* This is not very clever (and fast) but currently I don't know about | ||
| 623 | * any other simple way of getting quota data to disk and we must get | ||
| 624 | * them there for userspace to be visible... */ | ||
| 625 | if (sb->s_op->sync_fs) | ||
| 626 | sb->s_op->sync_fs(sb, 1); | ||
| 627 | sync_blockdev(sb->s_bdev); | ||
| 628 | |||
| 629 | /* | ||
| 630 | * Now when everything is written we can discard the pagecache so | ||
| 631 | * that userspace sees the changes. | ||
| 632 | */ | ||
| 633 | mutex_lock(&sb_dqopt(sb)->dqonoff_mutex); | ||
| 634 | for (cnt = 0; cnt < MAXQUOTAS; cnt++) { | ||
| 635 | if (type != -1 && cnt != type) | ||
| 636 | continue; | ||
| 637 | if (!sb_has_quota_active(sb, cnt)) | ||
| 638 | continue; | ||
| 639 | mutex_lock_nested(&sb_dqopt(sb)->files[cnt]->i_mutex, | ||
| 640 | I_MUTEX_QUOTA); | ||
| 641 | truncate_inode_pages(&sb_dqopt(sb)->files[cnt]->i_data, 0); | ||
| 642 | mutex_unlock(&sb_dqopt(sb)->files[cnt]->i_mutex); | ||
| 643 | } | ||
| 644 | mutex_unlock(&sb_dqopt(sb)->dqonoff_mutex); | ||
| 645 | |||
| 612 | return 0; | 646 | return 0; |
| 613 | } | 647 | } |
| 614 | EXPORT_SYMBOL(vfs_quota_sync); | 648 | EXPORT_SYMBOL(vfs_quota_sync); |
| @@ -840,11 +874,14 @@ static int dqinit_needed(struct inode *inode, int type) | |||
| 840 | static void add_dquot_ref(struct super_block *sb, int type) | 874 | static void add_dquot_ref(struct super_block *sb, int type) |
| 841 | { | 875 | { |
| 842 | struct inode *inode, *old_inode = NULL; | 876 | struct inode *inode, *old_inode = NULL; |
| 877 | int reserved = 0; | ||
| 843 | 878 | ||
| 844 | spin_lock(&inode_lock); | 879 | spin_lock(&inode_lock); |
| 845 | list_for_each_entry(inode, &sb->s_inodes, i_sb_list) { | 880 | list_for_each_entry(inode, &sb->s_inodes, i_sb_list) { |
| 846 | if (inode->i_state & (I_FREEING|I_CLEAR|I_WILL_FREE|I_NEW)) | 881 | if (inode->i_state & (I_FREEING|I_CLEAR|I_WILL_FREE|I_NEW)) |
| 847 | continue; | 882 | continue; |
| 883 | if (unlikely(inode_get_rsv_space(inode) > 0)) | ||
| 884 | reserved = 1; | ||
| 848 | if (!atomic_read(&inode->i_writecount)) | 885 | if (!atomic_read(&inode->i_writecount)) |
| 849 | continue; | 886 | continue; |
| 850 | if (!dqinit_needed(inode, type)) | 887 | if (!dqinit_needed(inode, type)) |
| @@ -854,7 +891,7 @@ static void add_dquot_ref(struct super_block *sb, int type) | |||
| 854 | spin_unlock(&inode_lock); | 891 | spin_unlock(&inode_lock); |
| 855 | 892 | ||
| 856 | iput(old_inode); | 893 | iput(old_inode); |
| 857 | sb->dq_op->initialize(inode, type); | 894 | __dquot_initialize(inode, type); |
| 858 | /* We hold a reference to 'inode' so it couldn't have been | 895 | /* We hold a reference to 'inode' so it couldn't have been |
| 859 | * removed from s_inodes list while we dropped the inode_lock. | 896 | * removed from s_inodes list while we dropped the inode_lock. |
| 860 | * We cannot iput the inode now as we can be holding the last | 897 | * We cannot iput the inode now as we can be holding the last |
| @@ -865,6 +902,12 @@ static void add_dquot_ref(struct super_block *sb, int type) | |||
| 865 | } | 902 | } |
| 866 | spin_unlock(&inode_lock); | 903 | spin_unlock(&inode_lock); |
| 867 | iput(old_inode); | 904 | iput(old_inode); |
| 905 | |||
| 906 | if (reserved) { | ||
| 907 | printk(KERN_WARNING "VFS (%s): Writes happened before quota" | ||
| 908 | " was turned on thus quota information is probably " | ||
| 909 | "inconsistent. Please run quotacheck(8).\n", sb->s_id); | ||
| 910 | } | ||
| 868 | } | 911 | } |
| 869 | 912 | ||
| 870 | /* | 913 | /* |
| @@ -978,10 +1021,12 @@ static inline void dquot_resv_space(struct dquot *dquot, qsize_t number) | |||
| 978 | /* | 1021 | /* |
| 979 | * Claim reserved quota space | 1022 | * Claim reserved quota space |
| 980 | */ | 1023 | */ |
| 981 | static void dquot_claim_reserved_space(struct dquot *dquot, | 1024 | static void dquot_claim_reserved_space(struct dquot *dquot, qsize_t number) |
| 982 | qsize_t number) | ||
| 983 | { | 1025 | { |
| 984 | WARN_ON(dquot->dq_dqb.dqb_rsvspace < number); | 1026 | if (dquot->dq_dqb.dqb_rsvspace < number) { |
| 1027 | WARN_ON_ONCE(1); | ||
| 1028 | number = dquot->dq_dqb.dqb_rsvspace; | ||
| 1029 | } | ||
| 985 | dquot->dq_dqb.dqb_curspace += number; | 1030 | dquot->dq_dqb.dqb_curspace += number; |
| 986 | dquot->dq_dqb.dqb_rsvspace -= number; | 1031 | dquot->dq_dqb.dqb_rsvspace -= number; |
| 987 | } | 1032 | } |
| @@ -989,7 +1034,12 @@ static void dquot_claim_reserved_space(struct dquot *dquot, | |||
| 989 | static inline | 1034 | static inline |
| 990 | void dquot_free_reserved_space(struct dquot *dquot, qsize_t number) | 1035 | void dquot_free_reserved_space(struct dquot *dquot, qsize_t number) |
| 991 | { | 1036 | { |
| 992 | dquot->dq_dqb.dqb_rsvspace -= number; | 1037 | if (dquot->dq_dqb.dqb_rsvspace >= number) |
| 1038 | dquot->dq_dqb.dqb_rsvspace -= number; | ||
| 1039 | else { | ||
| 1040 | WARN_ON_ONCE(1); | ||
| 1041 | dquot->dq_dqb.dqb_rsvspace = 0; | ||
| 1042 | } | ||
| 993 | } | 1043 | } |
| 994 | 1044 | ||
| 995 | static void dquot_decr_inodes(struct dquot *dquot, qsize_t number) | 1045 | static void dquot_decr_inodes(struct dquot *dquot, qsize_t number) |
| @@ -1131,13 +1181,13 @@ static int check_idq(struct dquot *dquot, qsize_t inodes, char *warntype) | |||
| 1131 | *warntype = QUOTA_NL_NOWARN; | 1181 | *warntype = QUOTA_NL_NOWARN; |
| 1132 | if (!sb_has_quota_limits_enabled(dquot->dq_sb, dquot->dq_type) || | 1182 | if (!sb_has_quota_limits_enabled(dquot->dq_sb, dquot->dq_type) || |
| 1133 | test_bit(DQ_FAKE_B, &dquot->dq_flags)) | 1183 | test_bit(DQ_FAKE_B, &dquot->dq_flags)) |
| 1134 | return QUOTA_OK; | 1184 | return 0; |
| 1135 | 1185 | ||
| 1136 | if (dquot->dq_dqb.dqb_ihardlimit && | 1186 | if (dquot->dq_dqb.dqb_ihardlimit && |
| 1137 | newinodes > dquot->dq_dqb.dqb_ihardlimit && | 1187 | newinodes > dquot->dq_dqb.dqb_ihardlimit && |
| 1138 | !ignore_hardlimit(dquot)) { | 1188 | !ignore_hardlimit(dquot)) { |
| 1139 | *warntype = QUOTA_NL_IHARDWARN; | 1189 | *warntype = QUOTA_NL_IHARDWARN; |
| 1140 | return NO_QUOTA; | 1190 | return -EDQUOT; |
| 1141 | } | 1191 | } |
| 1142 | 1192 | ||
| 1143 | if (dquot->dq_dqb.dqb_isoftlimit && | 1193 | if (dquot->dq_dqb.dqb_isoftlimit && |
| @@ -1146,7 +1196,7 @@ static int check_idq(struct dquot *dquot, qsize_t inodes, char *warntype) | |||
| 1146 | get_seconds() >= dquot->dq_dqb.dqb_itime && | 1196 | get_seconds() >= dquot->dq_dqb.dqb_itime && |
| 1147 | !ignore_hardlimit(dquot)) { | 1197 | !ignore_hardlimit(dquot)) { |
| 1148 | *warntype = QUOTA_NL_ISOFTLONGWARN; | 1198 | *warntype = QUOTA_NL_ISOFTLONGWARN; |
| 1149 | return NO_QUOTA; | 1199 | return -EDQUOT; |
| 1150 | } | 1200 | } |
| 1151 | 1201 | ||
| 1152 | if (dquot->dq_dqb.dqb_isoftlimit && | 1202 | if (dquot->dq_dqb.dqb_isoftlimit && |
| @@ -1157,7 +1207,7 @@ static int check_idq(struct dquot *dquot, qsize_t inodes, char *warntype) | |||
| 1157 | sb_dqopt(dquot->dq_sb)->info[dquot->dq_type].dqi_igrace; | 1207 | sb_dqopt(dquot->dq_sb)->info[dquot->dq_type].dqi_igrace; |
| 1158 | } | 1208 | } |
| 1159 | 1209 | ||
| 1160 | return QUOTA_OK; | 1210 | return 0; |
| 1161 | } | 1211 | } |
| 1162 | 1212 | ||
| 1163 | /* needs dq_data_lock */ | 1213 | /* needs dq_data_lock */ |
| @@ -1169,7 +1219,7 @@ static int check_bdq(struct dquot *dquot, qsize_t space, int prealloc, char *war | |||
| 1169 | *warntype = QUOTA_NL_NOWARN; | 1219 | *warntype = QUOTA_NL_NOWARN; |
| 1170 | if (!sb_has_quota_limits_enabled(sb, dquot->dq_type) || | 1220 | if (!sb_has_quota_limits_enabled(sb, dquot->dq_type) || |
| 1171 | test_bit(DQ_FAKE_B, &dquot->dq_flags)) | 1221 | test_bit(DQ_FAKE_B, &dquot->dq_flags)) |
| 1172 | return QUOTA_OK; | 1222 | return 0; |
| 1173 | 1223 | ||
| 1174 | tspace = dquot->dq_dqb.dqb_curspace + dquot->dq_dqb.dqb_rsvspace | 1224 | tspace = dquot->dq_dqb.dqb_curspace + dquot->dq_dqb.dqb_rsvspace |
| 1175 | + space; | 1225 | + space; |
| @@ -1179,7 +1229,7 @@ static int check_bdq(struct dquot *dquot, qsize_t space, int prealloc, char *war | |||
| 1179 | !ignore_hardlimit(dquot)) { | 1229 | !ignore_hardlimit(dquot)) { |
| 1180 | if (!prealloc) | 1230 | if (!prealloc) |
| 1181 | *warntype = QUOTA_NL_BHARDWARN; | 1231 | *warntype = QUOTA_NL_BHARDWARN; |
| 1182 | return NO_QUOTA; | 1232 | return -EDQUOT; |
| 1183 | } | 1233 | } |
| 1184 | 1234 | ||
| 1185 | if (dquot->dq_dqb.dqb_bsoftlimit && | 1235 | if (dquot->dq_dqb.dqb_bsoftlimit && |
| @@ -1189,7 +1239,7 @@ static int check_bdq(struct dquot *dquot, qsize_t space, int prealloc, char *war | |||
| 1189 | !ignore_hardlimit(dquot)) { | 1239 | !ignore_hardlimit(dquot)) { |
| 1190 | if (!prealloc) | 1240 | if (!prealloc) |
| 1191 | *warntype = QUOTA_NL_BSOFTLONGWARN; | 1241 | *warntype = QUOTA_NL_BSOFTLONGWARN; |
| 1192 | return NO_QUOTA; | 1242 | return -EDQUOT; |
| 1193 | } | 1243 | } |
| 1194 | 1244 | ||
| 1195 | if (dquot->dq_dqb.dqb_bsoftlimit && | 1245 | if (dquot->dq_dqb.dqb_bsoftlimit && |
| @@ -1205,10 +1255,10 @@ static int check_bdq(struct dquot *dquot, qsize_t space, int prealloc, char *war | |||
| 1205 | * We don't allow preallocation to exceed softlimit so exceeding will | 1255 | * We don't allow preallocation to exceed softlimit so exceeding will |
| 1206 | * be always printed | 1256 | * be always printed |
| 1207 | */ | 1257 | */ |
| 1208 | return NO_QUOTA; | 1258 | return -EDQUOT; |
| 1209 | } | 1259 | } |
| 1210 | 1260 | ||
| 1211 | return QUOTA_OK; | 1261 | return 0; |
| 1212 | } | 1262 | } |
| 1213 | 1263 | ||
| 1214 | static int info_idq_free(struct dquot *dquot, qsize_t inodes) | 1264 | static int info_idq_free(struct dquot *dquot, qsize_t inodes) |
| @@ -1242,25 +1292,32 @@ static int info_bdq_free(struct dquot *dquot, qsize_t space) | |||
| 1242 | return QUOTA_NL_BHARDBELOW; | 1292 | return QUOTA_NL_BHARDBELOW; |
| 1243 | return QUOTA_NL_NOWARN; | 1293 | return QUOTA_NL_NOWARN; |
| 1244 | } | 1294 | } |
| 1295 | |||
| 1245 | /* | 1296 | /* |
| 1246 | * Initialize quota pointers in inode | 1297 | * Initialize quota pointers in inode |
| 1247 | * We do things in a bit complicated way but by that we avoid calling | 1298 | * |
| 1248 | * dqget() and thus filesystem callbacks under dqptr_sem. | 1299 | * We do things in a bit complicated way but by that we avoid calling |
| 1300 | * dqget() and thus filesystem callbacks under dqptr_sem. | ||
| 1301 | * | ||
| 1302 | * It is better to call this function outside of any transaction as it | ||
| 1303 | * might need a lot of space in journal for dquot structure allocation. | ||
| 1249 | */ | 1304 | */ |
| 1250 | int dquot_initialize(struct inode *inode, int type) | 1305 | static void __dquot_initialize(struct inode *inode, int type) |
| 1251 | { | 1306 | { |
| 1252 | unsigned int id = 0; | 1307 | unsigned int id = 0; |
| 1253 | int cnt, ret = 0; | 1308 | int cnt; |
| 1254 | struct dquot *got[MAXQUOTAS] = { NULL, NULL }; | 1309 | struct dquot *got[MAXQUOTAS]; |
| 1255 | struct super_block *sb = inode->i_sb; | 1310 | struct super_block *sb = inode->i_sb; |
| 1311 | qsize_t rsv; | ||
| 1256 | 1312 | ||
| 1257 | /* First test before acquiring mutex - solves deadlocks when we | 1313 | /* First test before acquiring mutex - solves deadlocks when we |
| 1258 | * re-enter the quota code and are already holding the mutex */ | 1314 | * re-enter the quota code and are already holding the mutex */ |
| 1259 | if (IS_NOQUOTA(inode)) | 1315 | if (!sb_any_quota_active(inode->i_sb) || IS_NOQUOTA(inode)) |
| 1260 | return 0; | 1316 | return; |
| 1261 | 1317 | ||
| 1262 | /* First get references to structures we might need. */ | 1318 | /* First get references to structures we might need. */ |
| 1263 | for (cnt = 0; cnt < MAXQUOTAS; cnt++) { | 1319 | for (cnt = 0; cnt < MAXQUOTAS; cnt++) { |
| 1320 | got[cnt] = NULL; | ||
| 1264 | if (type != -1 && cnt != type) | 1321 | if (type != -1 && cnt != type) |
| 1265 | continue; | 1322 | continue; |
| 1266 | switch (cnt) { | 1323 | switch (cnt) { |
| @@ -1275,7 +1332,6 @@ int dquot_initialize(struct inode *inode, int type) | |||
| 1275 | } | 1332 | } |
| 1276 | 1333 | ||
| 1277 | down_write(&sb_dqopt(sb)->dqptr_sem); | 1334 | down_write(&sb_dqopt(sb)->dqptr_sem); |
| 1278 | /* Having dqptr_sem we know NOQUOTA flags can't be altered... */ | ||
| 1279 | if (IS_NOQUOTA(inode)) | 1335 | if (IS_NOQUOTA(inode)) |
| 1280 | goto out_err; | 1336 | goto out_err; |
| 1281 | for (cnt = 0; cnt < MAXQUOTAS; cnt++) { | 1337 | for (cnt = 0; cnt < MAXQUOTAS; cnt++) { |
| @@ -1287,20 +1343,31 @@ int dquot_initialize(struct inode *inode, int type) | |||
| 1287 | if (!inode->i_dquot[cnt]) { | 1343 | if (!inode->i_dquot[cnt]) { |
| 1288 | inode->i_dquot[cnt] = got[cnt]; | 1344 | inode->i_dquot[cnt] = got[cnt]; |
| 1289 | got[cnt] = NULL; | 1345 | got[cnt] = NULL; |
| 1346 | /* | ||
| 1347 | * Make quota reservation system happy if someone | ||
| 1348 | * did a write before quota was turned on | ||
| 1349 | */ | ||
| 1350 | rsv = inode_get_rsv_space(inode); | ||
| 1351 | if (unlikely(rsv)) | ||
| 1352 | dquot_resv_space(inode->i_dquot[cnt], rsv); | ||
| 1290 | } | 1353 | } |
| 1291 | } | 1354 | } |
| 1292 | out_err: | 1355 | out_err: |
| 1293 | up_write(&sb_dqopt(sb)->dqptr_sem); | 1356 | up_write(&sb_dqopt(sb)->dqptr_sem); |
| 1294 | /* Drop unused references */ | 1357 | /* Drop unused references */ |
| 1295 | dqput_all(got); | 1358 | dqput_all(got); |
| 1296 | return ret; | 1359 | } |
| 1360 | |||
| 1361 | void dquot_initialize(struct inode *inode) | ||
| 1362 | { | ||
| 1363 | __dquot_initialize(inode, -1); | ||
| 1297 | } | 1364 | } |
| 1298 | EXPORT_SYMBOL(dquot_initialize); | 1365 | EXPORT_SYMBOL(dquot_initialize); |
| 1299 | 1366 | ||
| 1300 | /* | 1367 | /* |
| 1301 | * Release all quotas referenced by inode | 1368 | * Release all quotas referenced by inode |
| 1302 | */ | 1369 | */ |
| 1303 | int dquot_drop(struct inode *inode) | 1370 | static void __dquot_drop(struct inode *inode) |
| 1304 | { | 1371 | { |
| 1305 | int cnt; | 1372 | int cnt; |
| 1306 | struct dquot *put[MAXQUOTAS]; | 1373 | struct dquot *put[MAXQUOTAS]; |
| @@ -1312,32 +1379,31 @@ int dquot_drop(struct inode *inode) | |||
| 1312 | } | 1379 | } |
| 1313 | up_write(&sb_dqopt(inode->i_sb)->dqptr_sem); | 1380 | up_write(&sb_dqopt(inode->i_sb)->dqptr_sem); |
| 1314 | dqput_all(put); | 1381 | dqput_all(put); |
| 1315 | return 0; | ||
| 1316 | } | 1382 | } |
| 1317 | EXPORT_SYMBOL(dquot_drop); | ||
| 1318 | 1383 | ||
| 1319 | /* Wrapper to remove references to quota structures from inode */ | 1384 | void dquot_drop(struct inode *inode) |
| 1320 | void vfs_dq_drop(struct inode *inode) | 1385 | { |
| 1321 | { | 1386 | int cnt; |
| 1322 | /* Here we can get arbitrary inode from clear_inode() so we have | 1387 | |
| 1323 | * to be careful. OTOH we don't need locking as quota operations | 1388 | if (IS_NOQUOTA(inode)) |
| 1324 | * are allowed to change only at mount time */ | 1389 | return; |
| 1325 | if (!IS_NOQUOTA(inode) && inode->i_sb && inode->i_sb->dq_op | 1390 | |
| 1326 | && inode->i_sb->dq_op->drop) { | 1391 | /* |
| 1327 | int cnt; | 1392 | * Test before calling to rule out calls from proc and such |
| 1328 | /* Test before calling to rule out calls from proc and such | 1393 | * where we are not allowed to block. Note that this is |
| 1329 | * where we are not allowed to block. Note that this is | 1394 | * actually reliable test even without the lock - the caller |
| 1330 | * actually reliable test even without the lock - the caller | 1395 | * must assure that nobody can come after the DQUOT_DROP and |
| 1331 | * must assure that nobody can come after the DQUOT_DROP and | 1396 | * add quota pointers back anyway. |
| 1332 | * add quota pointers back anyway */ | 1397 | */ |
| 1333 | for (cnt = 0; cnt < MAXQUOTAS; cnt++) | 1398 | for (cnt = 0; cnt < MAXQUOTAS; cnt++) { |
| 1334 | if (inode->i_dquot[cnt]) | 1399 | if (inode->i_dquot[cnt]) |
| 1335 | break; | 1400 | break; |
| 1336 | if (cnt < MAXQUOTAS) | 1401 | } |
| 1337 | inode->i_sb->dq_op->drop(inode); | 1402 | |
| 1338 | } | 1403 | if (cnt < MAXQUOTAS) |
| 1339 | } | 1404 | __dquot_drop(inode); |
| 1340 | EXPORT_SYMBOL(vfs_dq_drop); | 1405 | } |
| 1406 | EXPORT_SYMBOL(dquot_drop); | ||
| 1341 | 1407 | ||
| 1342 | /* | 1408 | /* |
| 1343 | * inode_reserved_space is managed internally by quota, and protected by | 1409 | * inode_reserved_space is managed internally by quota, and protected by |
| @@ -1351,28 +1417,30 @@ static qsize_t *inode_reserved_space(struct inode * inode) | |||
| 1351 | return inode->i_sb->dq_op->get_reserved_space(inode); | 1417 | return inode->i_sb->dq_op->get_reserved_space(inode); |
| 1352 | } | 1418 | } |
| 1353 | 1419 | ||
| 1354 | static void inode_add_rsv_space(struct inode *inode, qsize_t number) | 1420 | void inode_add_rsv_space(struct inode *inode, qsize_t number) |
| 1355 | { | 1421 | { |
| 1356 | spin_lock(&inode->i_lock); | 1422 | spin_lock(&inode->i_lock); |
| 1357 | *inode_reserved_space(inode) += number; | 1423 | *inode_reserved_space(inode) += number; |
| 1358 | spin_unlock(&inode->i_lock); | 1424 | spin_unlock(&inode->i_lock); |
| 1359 | } | 1425 | } |
| 1426 | EXPORT_SYMBOL(inode_add_rsv_space); | ||
| 1360 | 1427 | ||
| 1361 | 1428 | void inode_claim_rsv_space(struct inode *inode, qsize_t number) | |
| 1362 | static void inode_claim_rsv_space(struct inode *inode, qsize_t number) | ||
| 1363 | { | 1429 | { |
| 1364 | spin_lock(&inode->i_lock); | 1430 | spin_lock(&inode->i_lock); |
| 1365 | *inode_reserved_space(inode) -= number; | 1431 | *inode_reserved_space(inode) -= number; |
| 1366 | __inode_add_bytes(inode, number); | 1432 | __inode_add_bytes(inode, number); |
| 1367 | spin_unlock(&inode->i_lock); | 1433 | spin_unlock(&inode->i_lock); |
| 1368 | } | 1434 | } |
| 1435 | EXPORT_SYMBOL(inode_claim_rsv_space); | ||
| 1369 | 1436 | ||
| 1370 | static void inode_sub_rsv_space(struct inode *inode, qsize_t number) | 1437 | void inode_sub_rsv_space(struct inode *inode, qsize_t number) |
| 1371 | { | 1438 | { |
| 1372 | spin_lock(&inode->i_lock); | 1439 | spin_lock(&inode->i_lock); |
| 1373 | *inode_reserved_space(inode) -= number; | 1440 | *inode_reserved_space(inode) -= number; |
| 1374 | spin_unlock(&inode->i_lock); | 1441 | spin_unlock(&inode->i_lock); |
| 1375 | } | 1442 | } |
| 1443 | EXPORT_SYMBOL(inode_sub_rsv_space); | ||
| 1376 | 1444 | ||
| 1377 | static qsize_t inode_get_rsv_space(struct inode *inode) | 1445 | static qsize_t inode_get_rsv_space(struct inode *inode) |
| 1378 | { | 1446 | { |
| @@ -1404,38 +1472,34 @@ static void inode_decr_space(struct inode *inode, qsize_t number, int reserve) | |||
| 1404 | } | 1472 | } |
| 1405 | 1473 | ||
| 1406 | /* | 1474 | /* |
| 1407 | * Following four functions update i_blocks+i_bytes fields and | 1475 | * This functions updates i_blocks+i_bytes fields and quota information |
| 1408 | * quota information (together with appropriate checks) | 1476 | * (together with appropriate checks). |
| 1409 | * NOTE: We absolutely rely on the fact that caller dirties | 1477 | * |
| 1410 | * the inode (usually macros in quotaops.h care about this) and | 1478 | * NOTE: We absolutely rely on the fact that caller dirties the inode |
| 1411 | * holds a handle for the current transaction so that dquot write and | 1479 | * (usually helpers in quotaops.h care about this) and holds a handle for |
| 1412 | * inode write go into the same transaction. | 1480 | * the current transaction so that dquot write and inode write go into the |
| 1481 | * same transaction. | ||
| 1413 | */ | 1482 | */ |
| 1414 | 1483 | ||
| 1415 | /* | 1484 | /* |
| 1416 | * This operation can block, but only after everything is updated | 1485 | * This operation can block, but only after everything is updated |
| 1417 | */ | 1486 | */ |
| 1418 | int __dquot_alloc_space(struct inode *inode, qsize_t number, | 1487 | int __dquot_alloc_space(struct inode *inode, qsize_t number, |
| 1419 | int warn, int reserve) | 1488 | int warn, int reserve) |
| 1420 | { | 1489 | { |
| 1421 | int cnt, ret = QUOTA_OK; | 1490 | int cnt, ret = 0; |
| 1422 | char warntype[MAXQUOTAS]; | 1491 | char warntype[MAXQUOTAS]; |
| 1423 | 1492 | ||
| 1424 | /* | 1493 | /* |
| 1425 | * First test before acquiring mutex - solves deadlocks when we | 1494 | * First test before acquiring mutex - solves deadlocks when we |
| 1426 | * re-enter the quota code and are already holding the mutex | 1495 | * re-enter the quota code and are already holding the mutex |
| 1427 | */ | 1496 | */ |
| 1428 | if (IS_NOQUOTA(inode)) { | 1497 | if (!sb_any_quota_active(inode->i_sb) || IS_NOQUOTA(inode)) { |
| 1429 | inode_incr_space(inode, number, reserve); | 1498 | inode_incr_space(inode, number, reserve); |
| 1430 | goto out; | 1499 | goto out; |
| 1431 | } | 1500 | } |
| 1432 | 1501 | ||
| 1433 | down_read(&sb_dqopt(inode->i_sb)->dqptr_sem); | 1502 | down_read(&sb_dqopt(inode->i_sb)->dqptr_sem); |
| 1434 | if (IS_NOQUOTA(inode)) { | ||
| 1435 | inode_incr_space(inode, number, reserve); | ||
| 1436 | goto out_unlock; | ||
| 1437 | } | ||
| 1438 | |||
| 1439 | for (cnt = 0; cnt < MAXQUOTAS; cnt++) | 1503 | for (cnt = 0; cnt < MAXQUOTAS; cnt++) |
| 1440 | warntype[cnt] = QUOTA_NL_NOWARN; | 1504 | warntype[cnt] = QUOTA_NL_NOWARN; |
| 1441 | 1505 | ||
| @@ -1443,9 +1507,9 @@ int __dquot_alloc_space(struct inode *inode, qsize_t number, | |||
| 1443 | for (cnt = 0; cnt < MAXQUOTAS; cnt++) { | 1507 | for (cnt = 0; cnt < MAXQUOTAS; cnt++) { |
| 1444 | if (!inode->i_dquot[cnt]) | 1508 | if (!inode->i_dquot[cnt]) |
| 1445 | continue; | 1509 | continue; |
| 1446 | if (check_bdq(inode->i_dquot[cnt], number, warn, warntype+cnt) | 1510 | ret = check_bdq(inode->i_dquot[cnt], number, !warn, |
| 1447 | == NO_QUOTA) { | 1511 | warntype+cnt); |
| 1448 | ret = NO_QUOTA; | 1512 | if (ret) { |
| 1449 | spin_unlock(&dq_data_lock); | 1513 | spin_unlock(&dq_data_lock); |
| 1450 | goto out_flush_warn; | 1514 | goto out_flush_warn; |
| 1451 | } | 1515 | } |
| @@ -1466,61 +1530,45 @@ int __dquot_alloc_space(struct inode *inode, qsize_t number, | |||
| 1466 | mark_all_dquot_dirty(inode->i_dquot); | 1530 | mark_all_dquot_dirty(inode->i_dquot); |
| 1467 | out_flush_warn: | 1531 | out_flush_warn: |
| 1468 | flush_warnings(inode->i_dquot, warntype); | 1532 | flush_warnings(inode->i_dquot, warntype); |
| 1469 | out_unlock: | ||
| 1470 | up_read(&sb_dqopt(inode->i_sb)->dqptr_sem); | 1533 | up_read(&sb_dqopt(inode->i_sb)->dqptr_sem); |
| 1471 | out: | 1534 | out: |
| 1472 | return ret; | 1535 | return ret; |
| 1473 | } | 1536 | } |
| 1474 | 1537 | EXPORT_SYMBOL(__dquot_alloc_space); | |
| 1475 | int dquot_alloc_space(struct inode *inode, qsize_t number, int warn) | ||
| 1476 | { | ||
| 1477 | return __dquot_alloc_space(inode, number, warn, 0); | ||
| 1478 | } | ||
| 1479 | EXPORT_SYMBOL(dquot_alloc_space); | ||
| 1480 | |||
| 1481 | int dquot_reserve_space(struct inode *inode, qsize_t number, int warn) | ||
| 1482 | { | ||
| 1483 | return __dquot_alloc_space(inode, number, warn, 1); | ||
| 1484 | } | ||
| 1485 | EXPORT_SYMBOL(dquot_reserve_space); | ||
| 1486 | 1538 | ||
| 1487 | /* | 1539 | /* |
| 1488 | * This operation can block, but only after everything is updated | 1540 | * This operation can block, but only after everything is updated |
| 1489 | */ | 1541 | */ |
| 1490 | int dquot_alloc_inode(const struct inode *inode, qsize_t number) | 1542 | int dquot_alloc_inode(const struct inode *inode) |
| 1491 | { | 1543 | { |
| 1492 | int cnt, ret = NO_QUOTA; | 1544 | int cnt, ret = 0; |
| 1493 | char warntype[MAXQUOTAS]; | 1545 | char warntype[MAXQUOTAS]; |
| 1494 | 1546 | ||
| 1495 | /* First test before acquiring mutex - solves deadlocks when we | 1547 | /* First test before acquiring mutex - solves deadlocks when we |
| 1496 | * re-enter the quota code and are already holding the mutex */ | 1548 | * re-enter the quota code and are already holding the mutex */ |
| 1497 | if (IS_NOQUOTA(inode)) | 1549 | if (!sb_any_quota_active(inode->i_sb) || IS_NOQUOTA(inode)) |
| 1498 | return QUOTA_OK; | 1550 | return 0; |
| 1499 | for (cnt = 0; cnt < MAXQUOTAS; cnt++) | 1551 | for (cnt = 0; cnt < MAXQUOTAS; cnt++) |
| 1500 | warntype[cnt] = QUOTA_NL_NOWARN; | 1552 | warntype[cnt] = QUOTA_NL_NOWARN; |
| 1501 | down_read(&sb_dqopt(inode->i_sb)->dqptr_sem); | 1553 | down_read(&sb_dqopt(inode->i_sb)->dqptr_sem); |
| 1502 | if (IS_NOQUOTA(inode)) { | ||
| 1503 | up_read(&sb_dqopt(inode->i_sb)->dqptr_sem); | ||
| 1504 | return QUOTA_OK; | ||
| 1505 | } | ||
| 1506 | spin_lock(&dq_data_lock); | 1554 | spin_lock(&dq_data_lock); |
| 1507 | for (cnt = 0; cnt < MAXQUOTAS; cnt++) { | 1555 | for (cnt = 0; cnt < MAXQUOTAS; cnt++) { |
| 1508 | if (!inode->i_dquot[cnt]) | 1556 | if (!inode->i_dquot[cnt]) |
| 1509 | continue; | 1557 | continue; |
| 1510 | if (check_idq(inode->i_dquot[cnt], number, warntype+cnt) | 1558 | ret = check_idq(inode->i_dquot[cnt], 1, warntype + cnt); |
| 1511 | == NO_QUOTA) | 1559 | if (ret) |
| 1512 | goto warn_put_all; | 1560 | goto warn_put_all; |
| 1513 | } | 1561 | } |
| 1514 | 1562 | ||
| 1515 | for (cnt = 0; cnt < MAXQUOTAS; cnt++) { | 1563 | for (cnt = 0; cnt < MAXQUOTAS; cnt++) { |
| 1516 | if (!inode->i_dquot[cnt]) | 1564 | if (!inode->i_dquot[cnt]) |
| 1517 | continue; | 1565 | continue; |
| 1518 | dquot_incr_inodes(inode->i_dquot[cnt], number); | 1566 | dquot_incr_inodes(inode->i_dquot[cnt], 1); |
| 1519 | } | 1567 | } |
| 1520 | ret = QUOTA_OK; | 1568 | |
| 1521 | warn_put_all: | 1569 | warn_put_all: |
| 1522 | spin_unlock(&dq_data_lock); | 1570 | spin_unlock(&dq_data_lock); |
| 1523 | if (ret == QUOTA_OK) | 1571 | if (ret == 0) |
| 1524 | mark_all_dquot_dirty(inode->i_dquot); | 1572 | mark_all_dquot_dirty(inode->i_dquot); |
| 1525 | flush_warnings(inode->i_dquot, warntype); | 1573 | flush_warnings(inode->i_dquot, warntype); |
| 1526 | up_read(&sb_dqopt(inode->i_sb)->dqptr_sem); | 1574 | up_read(&sb_dqopt(inode->i_sb)->dqptr_sem); |
| @@ -1528,23 +1576,19 @@ warn_put_all: | |||
| 1528 | } | 1576 | } |
| 1529 | EXPORT_SYMBOL(dquot_alloc_inode); | 1577 | EXPORT_SYMBOL(dquot_alloc_inode); |
| 1530 | 1578 | ||
| 1531 | int dquot_claim_space(struct inode *inode, qsize_t number) | 1579 | /* |
| 1580 | * Convert in-memory reserved quotas to real consumed quotas | ||
| 1581 | */ | ||
| 1582 | int dquot_claim_space_nodirty(struct inode *inode, qsize_t number) | ||
| 1532 | { | 1583 | { |
| 1533 | int cnt; | 1584 | int cnt; |
| 1534 | int ret = QUOTA_OK; | ||
| 1535 | 1585 | ||
| 1536 | if (IS_NOQUOTA(inode)) { | 1586 | if (!sb_any_quota_active(inode->i_sb) || IS_NOQUOTA(inode)) { |
| 1537 | inode_claim_rsv_space(inode, number); | 1587 | inode_claim_rsv_space(inode, number); |
| 1538 | goto out; | 1588 | return 0; |
| 1539 | } | 1589 | } |
| 1540 | 1590 | ||
| 1541 | down_read(&sb_dqopt(inode->i_sb)->dqptr_sem); | 1591 | down_read(&sb_dqopt(inode->i_sb)->dqptr_sem); |
| 1542 | if (IS_NOQUOTA(inode)) { | ||
| 1543 | up_read(&sb_dqopt(inode->i_sb)->dqptr_sem); | ||
| 1544 | inode_claim_rsv_space(inode, number); | ||
| 1545 | goto out; | ||
| 1546 | } | ||
| 1547 | |||
| 1548 | spin_lock(&dq_data_lock); | 1592 | spin_lock(&dq_data_lock); |
| 1549 | /* Claim reserved quotas to allocated quotas */ | 1593 | /* Claim reserved quotas to allocated quotas */ |
| 1550 | for (cnt = 0; cnt < MAXQUOTAS; cnt++) { | 1594 | for (cnt = 0; cnt < MAXQUOTAS; cnt++) { |
| @@ -1557,33 +1601,26 @@ int dquot_claim_space(struct inode *inode, qsize_t number) | |||
| 1557 | spin_unlock(&dq_data_lock); | 1601 | spin_unlock(&dq_data_lock); |
| 1558 | mark_all_dquot_dirty(inode->i_dquot); | 1602 | mark_all_dquot_dirty(inode->i_dquot); |
| 1559 | up_read(&sb_dqopt(inode->i_sb)->dqptr_sem); | 1603 | up_read(&sb_dqopt(inode->i_sb)->dqptr_sem); |
| 1560 | out: | 1604 | return 0; |
| 1561 | return ret; | ||
| 1562 | } | 1605 | } |
| 1563 | EXPORT_SYMBOL(dquot_claim_space); | 1606 | EXPORT_SYMBOL(dquot_claim_space_nodirty); |
| 1564 | 1607 | ||
| 1565 | /* | 1608 | /* |
| 1566 | * This operation can block, but only after everything is updated | 1609 | * This operation can block, but only after everything is updated |
| 1567 | */ | 1610 | */ |
| 1568 | int __dquot_free_space(struct inode *inode, qsize_t number, int reserve) | 1611 | void __dquot_free_space(struct inode *inode, qsize_t number, int reserve) |
| 1569 | { | 1612 | { |
| 1570 | unsigned int cnt; | 1613 | unsigned int cnt; |
| 1571 | char warntype[MAXQUOTAS]; | 1614 | char warntype[MAXQUOTAS]; |
| 1572 | 1615 | ||
| 1573 | /* First test before acquiring mutex - solves deadlocks when we | 1616 | /* First test before acquiring mutex - solves deadlocks when we |
| 1574 | * re-enter the quota code and are already holding the mutex */ | 1617 | * re-enter the quota code and are already holding the mutex */ |
| 1575 | if (IS_NOQUOTA(inode)) { | 1618 | if (!sb_any_quota_active(inode->i_sb) || IS_NOQUOTA(inode)) { |
| 1576 | out_sub: | ||
| 1577 | inode_decr_space(inode, number, reserve); | 1619 | inode_decr_space(inode, number, reserve); |
| 1578 | return QUOTA_OK; | 1620 | return; |
| 1579 | } | 1621 | } |
| 1580 | 1622 | ||
| 1581 | down_read(&sb_dqopt(inode->i_sb)->dqptr_sem); | 1623 | down_read(&sb_dqopt(inode->i_sb)->dqptr_sem); |
| 1582 | /* Now recheck reliably when holding dqptr_sem */ | ||
| 1583 | if (IS_NOQUOTA(inode)) { | ||
| 1584 | up_read(&sb_dqopt(inode->i_sb)->dqptr_sem); | ||
| 1585 | goto out_sub; | ||
| 1586 | } | ||
| 1587 | spin_lock(&dq_data_lock); | 1624 | spin_lock(&dq_data_lock); |
| 1588 | for (cnt = 0; cnt < MAXQUOTAS; cnt++) { | 1625 | for (cnt = 0; cnt < MAXQUOTAS; cnt++) { |
| 1589 | if (!inode->i_dquot[cnt]) | 1626 | if (!inode->i_dquot[cnt]) |
| @@ -1603,56 +1640,34 @@ out_sub: | |||
| 1603 | out_unlock: | 1640 | out_unlock: |
| 1604 | flush_warnings(inode->i_dquot, warntype); | 1641 | flush_warnings(inode->i_dquot, warntype); |
| 1605 | up_read(&sb_dqopt(inode->i_sb)->dqptr_sem); | 1642 | up_read(&sb_dqopt(inode->i_sb)->dqptr_sem); |
| 1606 | return QUOTA_OK; | ||
| 1607 | } | ||
| 1608 | |||
| 1609 | int dquot_free_space(struct inode *inode, qsize_t number) | ||
| 1610 | { | ||
| 1611 | return __dquot_free_space(inode, number, 0); | ||
| 1612 | } | 1643 | } |
| 1613 | EXPORT_SYMBOL(dquot_free_space); | 1644 | EXPORT_SYMBOL(__dquot_free_space); |
| 1614 | |||
| 1615 | /* | ||
| 1616 | * Release reserved quota space | ||
| 1617 | */ | ||
| 1618 | void dquot_release_reserved_space(struct inode *inode, qsize_t number) | ||
| 1619 | { | ||
| 1620 | __dquot_free_space(inode, number, 1); | ||
| 1621 | |||
| 1622 | } | ||
| 1623 | EXPORT_SYMBOL(dquot_release_reserved_space); | ||
| 1624 | 1645 | ||
| 1625 | /* | 1646 | /* |
| 1626 | * This operation can block, but only after everything is updated | 1647 | * This operation can block, but only after everything is updated |
| 1627 | */ | 1648 | */ |
| 1628 | int dquot_free_inode(const struct inode *inode, qsize_t number) | 1649 | void dquot_free_inode(const struct inode *inode) |
| 1629 | { | 1650 | { |
| 1630 | unsigned int cnt; | 1651 | unsigned int cnt; |
| 1631 | char warntype[MAXQUOTAS]; | 1652 | char warntype[MAXQUOTAS]; |
| 1632 | 1653 | ||
| 1633 | /* First test before acquiring mutex - solves deadlocks when we | 1654 | /* First test before acquiring mutex - solves deadlocks when we |
| 1634 | * re-enter the quota code and are already holding the mutex */ | 1655 | * re-enter the quota code and are already holding the mutex */ |
| 1635 | if (IS_NOQUOTA(inode)) | 1656 | if (!sb_any_quota_active(inode->i_sb) || IS_NOQUOTA(inode)) |
| 1636 | return QUOTA_OK; | 1657 | return; |
| 1637 | 1658 | ||
| 1638 | down_read(&sb_dqopt(inode->i_sb)->dqptr_sem); | 1659 | down_read(&sb_dqopt(inode->i_sb)->dqptr_sem); |
| 1639 | /* Now recheck reliably when holding dqptr_sem */ | ||
| 1640 | if (IS_NOQUOTA(inode)) { | ||
| 1641 | up_read(&sb_dqopt(inode->i_sb)->dqptr_sem); | ||
| 1642 | return QUOTA_OK; | ||
| 1643 | } | ||
| 1644 | spin_lock(&dq_data_lock); | 1660 | spin_lock(&dq_data_lock); |
| 1645 | for (cnt = 0; cnt < MAXQUOTAS; cnt++) { | 1661 | for (cnt = 0; cnt < MAXQUOTAS; cnt++) { |
| 1646 | if (!inode->i_dquot[cnt]) | 1662 | if (!inode->i_dquot[cnt]) |
| 1647 | continue; | 1663 | continue; |
| 1648 | warntype[cnt] = info_idq_free(inode->i_dquot[cnt], number); | 1664 | warntype[cnt] = info_idq_free(inode->i_dquot[cnt], 1); |
| 1649 | dquot_decr_inodes(inode->i_dquot[cnt], number); | 1665 | dquot_decr_inodes(inode->i_dquot[cnt], 1); |
| 1650 | } | 1666 | } |
| 1651 | spin_unlock(&dq_data_lock); | 1667 | spin_unlock(&dq_data_lock); |
| 1652 | mark_all_dquot_dirty(inode->i_dquot); | 1668 | mark_all_dquot_dirty(inode->i_dquot); |
| 1653 | flush_warnings(inode->i_dquot, warntype); | 1669 | flush_warnings(inode->i_dquot, warntype); |
| 1654 | up_read(&sb_dqopt(inode->i_sb)->dqptr_sem); | 1670 | up_read(&sb_dqopt(inode->i_sb)->dqptr_sem); |
| 1655 | return QUOTA_OK; | ||
| 1656 | } | 1671 | } |
| 1657 | EXPORT_SYMBOL(dquot_free_inode); | 1672 | EXPORT_SYMBOL(dquot_free_inode); |
| 1658 | 1673 | ||
| @@ -1662,37 +1677,31 @@ EXPORT_SYMBOL(dquot_free_inode); | |||
| 1662 | * This operation can block, but only after everything is updated | 1677 | * This operation can block, but only after everything is updated |
| 1663 | * A transaction must be started when entering this function. | 1678 | * A transaction must be started when entering this function. |
| 1664 | */ | 1679 | */ |
| 1665 | int dquot_transfer(struct inode *inode, struct iattr *iattr) | 1680 | static int __dquot_transfer(struct inode *inode, qid_t *chid, unsigned long mask) |
| 1666 | { | 1681 | { |
| 1667 | qsize_t space, cur_space; | 1682 | qsize_t space, cur_space; |
| 1668 | qsize_t rsv_space = 0; | 1683 | qsize_t rsv_space = 0; |
| 1669 | struct dquot *transfer_from[MAXQUOTAS]; | 1684 | struct dquot *transfer_from[MAXQUOTAS]; |
| 1670 | struct dquot *transfer_to[MAXQUOTAS]; | 1685 | struct dquot *transfer_to[MAXQUOTAS]; |
| 1671 | int cnt, ret = QUOTA_OK; | 1686 | int cnt, ret = 0; |
| 1672 | int chuid = iattr->ia_valid & ATTR_UID && inode->i_uid != iattr->ia_uid, | ||
| 1673 | chgid = iattr->ia_valid & ATTR_GID && inode->i_gid != iattr->ia_gid; | ||
| 1674 | char warntype_to[MAXQUOTAS]; | 1687 | char warntype_to[MAXQUOTAS]; |
| 1675 | char warntype_from_inodes[MAXQUOTAS], warntype_from_space[MAXQUOTAS]; | 1688 | char warntype_from_inodes[MAXQUOTAS], warntype_from_space[MAXQUOTAS]; |
| 1676 | 1689 | ||
| 1677 | /* First test before acquiring mutex - solves deadlocks when we | 1690 | /* First test before acquiring mutex - solves deadlocks when we |
| 1678 | * re-enter the quota code and are already holding the mutex */ | 1691 | * re-enter the quota code and are already holding the mutex */ |
| 1679 | if (IS_NOQUOTA(inode)) | 1692 | if (IS_NOQUOTA(inode)) |
| 1680 | return QUOTA_OK; | 1693 | return 0; |
| 1681 | /* Initialize the arrays */ | 1694 | /* Initialize the arrays */ |
| 1682 | for (cnt = 0; cnt < MAXQUOTAS; cnt++) { | 1695 | for (cnt = 0; cnt < MAXQUOTAS; cnt++) { |
| 1683 | transfer_from[cnt] = NULL; | 1696 | transfer_from[cnt] = NULL; |
| 1684 | transfer_to[cnt] = NULL; | 1697 | transfer_to[cnt] = NULL; |
| 1685 | warntype_to[cnt] = QUOTA_NL_NOWARN; | 1698 | warntype_to[cnt] = QUOTA_NL_NOWARN; |
| 1686 | } | 1699 | } |
| 1687 | if (chuid) | 1700 | for (cnt = 0; cnt < MAXQUOTAS; cnt++) { |
| 1688 | transfer_to[USRQUOTA] = dqget(inode->i_sb, iattr->ia_uid, | 1701 | if (mask & (1 << cnt)) |
| 1689 | USRQUOTA); | 1702 | transfer_to[cnt] = dqget(inode->i_sb, chid[cnt], cnt); |
| 1690 | if (chgid) | 1703 | } |
| 1691 | transfer_to[GRPQUOTA] = dqget(inode->i_sb, iattr->ia_gid, | ||
| 1692 | GRPQUOTA); | ||
| 1693 | |||
| 1694 | down_write(&sb_dqopt(inode->i_sb)->dqptr_sem); | 1704 | down_write(&sb_dqopt(inode->i_sb)->dqptr_sem); |
| 1695 | /* Now recheck reliably when holding dqptr_sem */ | ||
| 1696 | if (IS_NOQUOTA(inode)) { /* File without quota accounting? */ | 1705 | if (IS_NOQUOTA(inode)) { /* File without quota accounting? */ |
| 1697 | up_write(&sb_dqopt(inode->i_sb)->dqptr_sem); | 1706 | up_write(&sb_dqopt(inode->i_sb)->dqptr_sem); |
| 1698 | goto put_all; | 1707 | goto put_all; |
| @@ -1706,9 +1715,11 @@ int dquot_transfer(struct inode *inode, struct iattr *iattr) | |||
| 1706 | if (!transfer_to[cnt]) | 1715 | if (!transfer_to[cnt]) |
| 1707 | continue; | 1716 | continue; |
| 1708 | transfer_from[cnt] = inode->i_dquot[cnt]; | 1717 | transfer_from[cnt] = inode->i_dquot[cnt]; |
| 1709 | if (check_idq(transfer_to[cnt], 1, warntype_to + cnt) == | 1718 | ret = check_idq(transfer_to[cnt], 1, warntype_to + cnt); |
| 1710 | NO_QUOTA || check_bdq(transfer_to[cnt], space, 0, | 1719 | if (ret) |
| 1711 | warntype_to + cnt) == NO_QUOTA) | 1720 | goto over_quota; |
| 1721 | ret = check_bdq(transfer_to[cnt], space, 0, warntype_to + cnt); | ||
| 1722 | if (ret) | ||
| 1712 | goto over_quota; | 1723 | goto over_quota; |
| 1713 | } | 1724 | } |
| 1714 | 1725 | ||
| @@ -1762,22 +1773,32 @@ over_quota: | |||
| 1762 | /* Clear dquot pointers we don't want to dqput() */ | 1773 | /* Clear dquot pointers we don't want to dqput() */ |
| 1763 | for (cnt = 0; cnt < MAXQUOTAS; cnt++) | 1774 | for (cnt = 0; cnt < MAXQUOTAS; cnt++) |
| 1764 | transfer_from[cnt] = NULL; | 1775 | transfer_from[cnt] = NULL; |
| 1765 | ret = NO_QUOTA; | ||
| 1766 | goto warn_put_all; | 1776 | goto warn_put_all; |
| 1767 | } | 1777 | } |
| 1768 | EXPORT_SYMBOL(dquot_transfer); | ||
| 1769 | 1778 | ||
| 1770 | /* Wrapper for transferring ownership of an inode */ | 1779 | /* Wrapper for transferring ownership of an inode for uid/gid only |
| 1771 | int vfs_dq_transfer(struct inode *inode, struct iattr *iattr) | 1780 | * Called from FSXXX_setattr() |
| 1781 | */ | ||
| 1782 | int dquot_transfer(struct inode *inode, struct iattr *iattr) | ||
| 1772 | { | 1783 | { |
| 1784 | qid_t chid[MAXQUOTAS]; | ||
| 1785 | unsigned long mask = 0; | ||
| 1786 | |||
| 1787 | if (iattr->ia_valid & ATTR_UID && iattr->ia_uid != inode->i_uid) { | ||
| 1788 | mask |= 1 << USRQUOTA; | ||
| 1789 | chid[USRQUOTA] = iattr->ia_uid; | ||
| 1790 | } | ||
| 1791 | if (iattr->ia_valid & ATTR_GID && iattr->ia_gid != inode->i_gid) { | ||
| 1792 | mask |= 1 << GRPQUOTA; | ||
| 1793 | chid[GRPQUOTA] = iattr->ia_gid; | ||
| 1794 | } | ||
| 1773 | if (sb_any_quota_active(inode->i_sb) && !IS_NOQUOTA(inode)) { | 1795 | if (sb_any_quota_active(inode->i_sb) && !IS_NOQUOTA(inode)) { |
| 1774 | vfs_dq_init(inode); | 1796 | dquot_initialize(inode); |
| 1775 | if (inode->i_sb->dq_op->transfer(inode, iattr) == NO_QUOTA) | 1797 | return __dquot_transfer(inode, chid, mask); |
| 1776 | return 1; | ||
| 1777 | } | 1798 | } |
| 1778 | return 0; | 1799 | return 0; |
| 1779 | } | 1800 | } |
| 1780 | EXPORT_SYMBOL(vfs_dq_transfer); | 1801 | EXPORT_SYMBOL(dquot_transfer); |
| 1781 | 1802 | ||
| 1782 | /* | 1803 | /* |
| 1783 | * Write info of quota file to disk | 1804 | * Write info of quota file to disk |
| @@ -1798,13 +1819,6 @@ EXPORT_SYMBOL(dquot_commit_info); | |||
| 1798 | * Definitions of diskquota operations. | 1819 | * Definitions of diskquota operations. |
| 1799 | */ | 1820 | */ |
| 1800 | const struct dquot_operations dquot_operations = { | 1821 | const struct dquot_operations dquot_operations = { |
| 1801 | .initialize = dquot_initialize, | ||
| 1802 | .drop = dquot_drop, | ||
| 1803 | .alloc_space = dquot_alloc_space, | ||
| 1804 | .alloc_inode = dquot_alloc_inode, | ||
| 1805 | .free_space = dquot_free_space, | ||
| 1806 | .free_inode = dquot_free_inode, | ||
| 1807 | .transfer = dquot_transfer, | ||
| 1808 | .write_dquot = dquot_commit, | 1822 | .write_dquot = dquot_commit, |
| 1809 | .acquire_dquot = dquot_acquire, | 1823 | .acquire_dquot = dquot_acquire, |
| 1810 | .release_dquot = dquot_release, | 1824 | .release_dquot = dquot_release, |
| @@ -1815,6 +1829,20 @@ const struct dquot_operations dquot_operations = { | |||
| 1815 | }; | 1829 | }; |
| 1816 | 1830 | ||
| 1817 | /* | 1831 | /* |
| 1832 | * Generic helper for ->open on filesystems supporting disk quotas. | ||
| 1833 | */ | ||
| 1834 | int dquot_file_open(struct inode *inode, struct file *file) | ||
| 1835 | { | ||
| 1836 | int error; | ||
| 1837 | |||
| 1838 | error = generic_file_open(inode, file); | ||
| 1839 | if (!error && (file->f_mode & FMODE_WRITE)) | ||
| 1840 | dquot_initialize(inode); | ||
| 1841 | return error; | ||
| 1842 | } | ||
| 1843 | EXPORT_SYMBOL(dquot_file_open); | ||
| 1844 | |||
| 1845 | /* | ||
| 1818 | * Turn quota off on a device. type == -1 ==> quotaoff for all types (umount) | 1846 | * Turn quota off on a device. type == -1 ==> quotaoff for all types (umount) |
| 1819 | */ | 1847 | */ |
| 1820 | int vfs_quota_disable(struct super_block *sb, int type, unsigned int flags) | 1848 | int vfs_quota_disable(struct super_block *sb, int type, unsigned int flags) |
| @@ -1993,11 +2021,13 @@ static int vfs_load_quota_inode(struct inode *inode, int type, int format_id, | |||
| 1993 | } | 2021 | } |
| 1994 | 2022 | ||
| 1995 | if (!(dqopt->flags & DQUOT_QUOTA_SYS_FILE)) { | 2023 | if (!(dqopt->flags & DQUOT_QUOTA_SYS_FILE)) { |
| 1996 | /* As we bypass the pagecache we must now flush the inode so | 2024 | /* As we bypass the pagecache we must now flush all the |
| 1997 | * that we see all the changes from userspace... */ | 2025 | * dirty data and invalidate caches so that kernel sees |
| 1998 | write_inode_now(inode, 1); | 2026 | * changes from userspace. It is not enough to just flush |
| 1999 | /* And now flush the block cache so that kernel sees the | 2027 | * the quota file since if blocksize < pagesize, invalidation |
| 2000 | * changes */ | 2028 | * of the cache could fail because of other unrelated dirty |
| 2029 | * data */ | ||
| 2030 | sync_filesystem(sb); | ||
| 2001 | invalidate_bdev(sb->s_bdev); | 2031 | invalidate_bdev(sb->s_bdev); |
| 2002 | } | 2032 | } |
| 2003 | mutex_lock(&dqopt->dqonoff_mutex); | 2033 | mutex_lock(&dqopt->dqonoff_mutex); |
| @@ -2010,14 +2040,16 @@ static int vfs_load_quota_inode(struct inode *inode, int type, int format_id, | |||
| 2010 | /* We don't want quota and atime on quota files (deadlocks | 2040 | /* We don't want quota and atime on quota files (deadlocks |
| 2011 | * possible) Also nobody should write to the file - we use | 2041 | * possible) Also nobody should write to the file - we use |
| 2012 | * special IO operations which ignore the immutable bit. */ | 2042 | * special IO operations which ignore the immutable bit. */ |
| 2013 | down_write(&dqopt->dqptr_sem); | ||
| 2014 | mutex_lock_nested(&inode->i_mutex, I_MUTEX_QUOTA); | 2043 | mutex_lock_nested(&inode->i_mutex, I_MUTEX_QUOTA); |
| 2015 | oldflags = inode->i_flags & (S_NOATIME | S_IMMUTABLE | | 2044 | oldflags = inode->i_flags & (S_NOATIME | S_IMMUTABLE | |
| 2016 | S_NOQUOTA); | 2045 | S_NOQUOTA); |
| 2017 | inode->i_flags |= S_NOQUOTA | S_NOATIME | S_IMMUTABLE; | 2046 | inode->i_flags |= S_NOQUOTA | S_NOATIME | S_IMMUTABLE; |
| 2018 | mutex_unlock(&inode->i_mutex); | 2047 | mutex_unlock(&inode->i_mutex); |
| 2019 | up_write(&dqopt->dqptr_sem); | 2048 | /* |
| 2020 | sb->dq_op->drop(inode); | 2049 | * When S_NOQUOTA is set, remove dquot references as no more |
| 2050 | * references can be added | ||
| 2051 | */ | ||
| 2052 | __dquot_drop(inode); | ||
| 2021 | } | 2053 | } |
| 2022 | 2054 | ||
| 2023 | error = -EIO; | 2055 | error = -EIO; |
| @@ -2053,14 +2085,12 @@ out_file_init: | |||
| 2053 | iput(inode); | 2085 | iput(inode); |
| 2054 | out_lock: | 2086 | out_lock: |
| 2055 | if (oldflags != -1) { | 2087 | if (oldflags != -1) { |
| 2056 | down_write(&dqopt->dqptr_sem); | ||
| 2057 | mutex_lock_nested(&inode->i_mutex, I_MUTEX_QUOTA); | 2088 | mutex_lock_nested(&inode->i_mutex, I_MUTEX_QUOTA); |
| 2058 | /* Set the flags back (in the case of accidental quotaon() | 2089 | /* Set the flags back (in the case of accidental quotaon() |
| 2059 | * on a wrong file we don't want to mess up the flags) */ | 2090 | * on a wrong file we don't want to mess up the flags) */ |
| 2060 | inode->i_flags &= ~(S_NOATIME | S_NOQUOTA | S_IMMUTABLE); | 2091 | inode->i_flags &= ~(S_NOATIME | S_NOQUOTA | S_IMMUTABLE); |
| 2061 | inode->i_flags |= oldflags; | 2092 | inode->i_flags |= oldflags; |
| 2062 | mutex_unlock(&inode->i_mutex); | 2093 | mutex_unlock(&inode->i_mutex); |
| 2063 | up_write(&dqopt->dqptr_sem); | ||
| 2064 | } | 2094 | } |
| 2065 | mutex_unlock(&dqopt->dqonoff_mutex); | 2095 | mutex_unlock(&dqopt->dqonoff_mutex); |
| 2066 | out_fmt: | 2096 | out_fmt: |
diff --git a/fs/quota/netlink.c b/fs/quota/netlink.c new file mode 100644 index 000000000000..2663ed90fb03 --- /dev/null +++ b/fs/quota/netlink.c | |||
| @@ -0,0 +1,95 @@ | |||
| 1 | |||
| 2 | #include <linux/cred.h> | ||
| 3 | #include <linux/init.h> | ||
| 4 | #include <linux/module.h> | ||
| 5 | #include <linux/kernel.h> | ||
| 6 | #include <linux/quotaops.h> | ||
| 7 | #include <linux/sched.h> | ||
| 8 | #include <net/netlink.h> | ||
| 9 | #include <net/genetlink.h> | ||
| 10 | |||
| 11 | /* Netlink family structure for quota */ | ||
| 12 | static struct genl_family quota_genl_family = { | ||
| 13 | .id = GENL_ID_GENERATE, | ||
| 14 | .hdrsize = 0, | ||
| 15 | .name = "VFS_DQUOT", | ||
| 16 | .version = 1, | ||
| 17 | .maxattr = QUOTA_NL_A_MAX, | ||
| 18 | }; | ||
| 19 | |||
| 20 | /** | ||
| 21 | * quota_send_warning - Send warning to userspace about exceeded quota | ||
| 22 | * @type: The quota type: USRQQUOTA, GRPQUOTA,... | ||
| 23 | * @id: The user or group id of the quota that was exceeded | ||
| 24 | * @dev: The device on which the fs is mounted (sb->s_dev) | ||
| 25 | * @warntype: The type of the warning: QUOTA_NL_... | ||
| 26 | * | ||
| 27 | * This can be used by filesystems (including those which don't use | ||
| 28 | * dquot) to send a message to userspace relating to quota limits. | ||
| 29 | * | ||
| 30 | */ | ||
| 31 | |||
| 32 | void quota_send_warning(short type, unsigned int id, dev_t dev, | ||
| 33 | const char warntype) | ||
| 34 | { | ||
| 35 | static atomic_t seq; | ||
| 36 | struct sk_buff *skb; | ||
| 37 | void *msg_head; | ||
| 38 | int ret; | ||
| 39 | int msg_size = 4 * nla_total_size(sizeof(u32)) + | ||
| 40 | 2 * nla_total_size(sizeof(u64)); | ||
| 41 | |||
| 42 | /* We have to allocate using GFP_NOFS as we are called from a | ||
| 43 | * filesystem performing write and thus further recursion into | ||
| 44 | * the fs to free some data could cause deadlocks. */ | ||
| 45 | skb = genlmsg_new(msg_size, GFP_NOFS); | ||
| 46 | if (!skb) { | ||
| 47 | printk(KERN_ERR | ||
| 48 | "VFS: Not enough memory to send quota warning.\n"); | ||
| 49 | return; | ||
| 50 | } | ||
| 51 | msg_head = genlmsg_put(skb, 0, atomic_add_return(1, &seq), | ||
| 52 | "a_genl_family, 0, QUOTA_NL_C_WARNING); | ||
| 53 | if (!msg_head) { | ||
| 54 | printk(KERN_ERR | ||
| 55 | "VFS: Cannot store netlink header in quota warning.\n"); | ||
| 56 | goto err_out; | ||
| 57 | } | ||
| 58 | ret = nla_put_u32(skb, QUOTA_NL_A_QTYPE, type); | ||
| 59 | if (ret) | ||
| 60 | goto attr_err_out; | ||
| 61 | ret = nla_put_u64(skb, QUOTA_NL_A_EXCESS_ID, id); | ||
| 62 | if (ret) | ||
| 63 | goto attr_err_out; | ||
| 64 | ret = nla_put_u32(skb, QUOTA_NL_A_WARNING, warntype); | ||
| 65 | if (ret) | ||
| 66 | goto attr_err_out; | ||
| 67 | ret = nla_put_u32(skb, QUOTA_NL_A_DEV_MAJOR, MAJOR(dev)); | ||
| 68 | if (ret) | ||
| 69 | goto attr_err_out; | ||
| 70 | ret = nla_put_u32(skb, QUOTA_NL_A_DEV_MINOR, MINOR(dev)); | ||
| 71 | if (ret) | ||
| 72 | goto attr_err_out; | ||
| 73 | ret = nla_put_u64(skb, QUOTA_NL_A_CAUSED_ID, current_uid()); | ||
| 74 | if (ret) | ||
| 75 | goto attr_err_out; | ||
| 76 | genlmsg_end(skb, msg_head); | ||
| 77 | |||
| 78 | genlmsg_multicast(skb, 0, quota_genl_family.id, GFP_NOFS); | ||
| 79 | return; | ||
| 80 | attr_err_out: | ||
| 81 | printk(KERN_ERR "VFS: Not enough space to compose quota message!\n"); | ||
| 82 | err_out: | ||
| 83 | kfree_skb(skb); | ||
| 84 | } | ||
| 85 | EXPORT_SYMBOL(quota_send_warning); | ||
| 86 | |||
| 87 | static int __init quota_init(void) | ||
| 88 | { | ||
| 89 | if (genl_register_family("a_genl_family) != 0) | ||
| 90 | printk(KERN_ERR | ||
| 91 | "VFS: Failed to create quota netlink interface.\n"); | ||
| 92 | return 0; | ||
| 93 | }; | ||
| 94 | |||
| 95 | module_init(quota_init); | ||
diff --git a/fs/quota/quota.c b/fs/quota/quota.c index ee91e2756950..95388f9b7356 100644 --- a/fs/quota/quota.c +++ b/fs/quota/quota.c | |||
| @@ -10,7 +10,6 @@ | |||
| 10 | #include <linux/slab.h> | 10 | #include <linux/slab.h> |
| 11 | #include <asm/current.h> | 11 | #include <asm/current.h> |
| 12 | #include <asm/uaccess.h> | 12 | #include <asm/uaccess.h> |
| 13 | #include <linux/compat.h> | ||
| 14 | #include <linux/kernel.h> | 13 | #include <linux/kernel.h> |
| 15 | #include <linux/security.h> | 14 | #include <linux/security.h> |
| 16 | #include <linux/syscalls.h> | 15 | #include <linux/syscalls.h> |
| @@ -18,220 +17,205 @@ | |||
| 18 | #include <linux/capability.h> | 17 | #include <linux/capability.h> |
| 19 | #include <linux/quotaops.h> | 18 | #include <linux/quotaops.h> |
| 20 | #include <linux/types.h> | 19 | #include <linux/types.h> |
| 21 | #include <net/netlink.h> | 20 | #include <linux/writeback.h> |
| 22 | #include <net/genetlink.h> | ||
| 23 | 21 | ||
| 24 | /* Check validity of generic quotactl commands */ | 22 | static int check_quotactl_permission(struct super_block *sb, int type, int cmd, |
| 25 | static int generic_quotactl_valid(struct super_block *sb, int type, int cmd, | 23 | qid_t id) |
| 26 | qid_t id) | ||
| 27 | { | 24 | { |
| 28 | if (type >= MAXQUOTAS) | ||
| 29 | return -EINVAL; | ||
| 30 | if (!sb && cmd != Q_SYNC) | ||
| 31 | return -ENODEV; | ||
| 32 | /* Is operation supported? */ | ||
| 33 | if (sb && !sb->s_qcop) | ||
| 34 | return -ENOSYS; | ||
| 35 | |||
| 36 | switch (cmd) { | 25 | switch (cmd) { |
| 37 | case Q_GETFMT: | 26 | /* these commands do not require any special privilegues */ |
| 38 | break; | 27 | case Q_GETFMT: |
| 39 | case Q_QUOTAON: | 28 | case Q_SYNC: |
| 40 | if (!sb->s_qcop->quota_on) | 29 | case Q_GETINFO: |
| 41 | return -ENOSYS; | 30 | case Q_XGETQSTAT: |
| 42 | break; | 31 | case Q_XQUOTASYNC: |
| 43 | case Q_QUOTAOFF: | 32 | break; |
| 44 | if (!sb->s_qcop->quota_off) | 33 | /* allow to query information for dquots we "own" */ |
| 45 | return -ENOSYS; | 34 | case Q_GETQUOTA: |
| 46 | break; | 35 | case Q_XGETQUOTA: |
| 47 | case Q_SETINFO: | 36 | if ((type == USRQUOTA && current_euid() == id) || |
| 48 | if (!sb->s_qcop->set_info) | 37 | (type == GRPQUOTA && in_egroup_p(id))) |
| 49 | return -ENOSYS; | ||
| 50 | break; | ||
| 51 | case Q_GETINFO: | ||
| 52 | if (!sb->s_qcop->get_info) | ||
| 53 | return -ENOSYS; | ||
| 54 | break; | ||
| 55 | case Q_SETQUOTA: | ||
| 56 | if (!sb->s_qcop->set_dqblk) | ||
| 57 | return -ENOSYS; | ||
| 58 | break; | ||
| 59 | case Q_GETQUOTA: | ||
| 60 | if (!sb->s_qcop->get_dqblk) | ||
| 61 | return -ENOSYS; | ||
| 62 | break; | ||
| 63 | case Q_SYNC: | ||
| 64 | if (sb && !sb->s_qcop->quota_sync) | ||
| 65 | return -ENOSYS; | ||
| 66 | break; | 38 | break; |
| 67 | default: | 39 | /*FALLTHROUGH*/ |
| 68 | return -EINVAL; | 40 | default: |
| 41 | if (!capable(CAP_SYS_ADMIN)) | ||
| 42 | return -EPERM; | ||
| 69 | } | 43 | } |
| 70 | 44 | ||
| 71 | /* Is quota turned on for commands which need it? */ | 45 | return security_quotactl(cmd, type, id, sb); |
| 72 | switch (cmd) { | 46 | } |
| 73 | case Q_GETFMT: | ||
| 74 | case Q_GETINFO: | ||
| 75 | case Q_SETINFO: | ||
| 76 | case Q_SETQUOTA: | ||
| 77 | case Q_GETQUOTA: | ||
| 78 | /* This is just an informative test so we are satisfied | ||
| 79 | * without the lock */ | ||
| 80 | if (!sb_has_quota_active(sb, type)) | ||
| 81 | return -ESRCH; | ||
| 82 | } | ||
| 83 | 47 | ||
| 84 | /* Check privileges */ | 48 | static int quota_sync_all(int type) |
| 85 | if (cmd == Q_GETQUOTA) { | 49 | { |
| 86 | if (((type == USRQUOTA && current_euid() != id) || | 50 | struct super_block *sb; |
| 87 | (type == GRPQUOTA && !in_egroup_p(id))) && | 51 | int ret; |
| 88 | !capable(CAP_SYS_ADMIN)) | 52 | |
| 89 | return -EPERM; | 53 | if (type >= MAXQUOTAS) |
| 54 | return -EINVAL; | ||
| 55 | ret = security_quotactl(Q_SYNC, type, 0, NULL); | ||
| 56 | if (ret) | ||
| 57 | return ret; | ||
| 58 | |||
| 59 | spin_lock(&sb_lock); | ||
| 60 | restart: | ||
| 61 | list_for_each_entry(sb, &super_blocks, s_list) { | ||
| 62 | if (!sb->s_qcop || !sb->s_qcop->quota_sync) | ||
| 63 | continue; | ||
| 64 | |||
| 65 | sb->s_count++; | ||
| 66 | spin_unlock(&sb_lock); | ||
| 67 | down_read(&sb->s_umount); | ||
| 68 | if (sb->s_root) | ||
| 69 | sb->s_qcop->quota_sync(sb, type, 1); | ||
| 70 | up_read(&sb->s_umount); | ||
| 71 | spin_lock(&sb_lock); | ||
| 72 | if (__put_super_and_need_restart(sb)) | ||
| 73 | goto restart; | ||
| 90 | } | 74 | } |
| 91 | else if (cmd != Q_GETFMT && cmd != Q_SYNC && cmd != Q_GETINFO) | 75 | spin_unlock(&sb_lock); |
| 92 | if (!capable(CAP_SYS_ADMIN)) | ||
| 93 | return -EPERM; | ||
| 94 | 76 | ||
| 95 | return 0; | 77 | return 0; |
| 96 | } | 78 | } |
| 97 | 79 | ||
| 98 | /* Check validity of XFS Quota Manager commands */ | 80 | static int quota_quotaon(struct super_block *sb, int type, int cmd, qid_t id, |
| 99 | static int xqm_quotactl_valid(struct super_block *sb, int type, int cmd, | 81 | void __user *addr) |
| 100 | qid_t id) | ||
| 101 | { | 82 | { |
| 102 | if (type >= XQM_MAXQUOTAS) | 83 | char *pathname; |
| 103 | return -EINVAL; | 84 | int ret = -ENOSYS; |
| 104 | if (!sb) | 85 | |
| 105 | return -ENODEV; | 86 | pathname = getname(addr); |
| 106 | if (!sb->s_qcop) | 87 | if (IS_ERR(pathname)) |
| 107 | return -ENOSYS; | 88 | return PTR_ERR(pathname); |
| 89 | if (sb->s_qcop->quota_on) | ||
| 90 | ret = sb->s_qcop->quota_on(sb, type, id, pathname, 0); | ||
| 91 | putname(pathname); | ||
| 92 | return ret; | ||
| 93 | } | ||
| 108 | 94 | ||
| 109 | switch (cmd) { | 95 | static int quota_getfmt(struct super_block *sb, int type, void __user *addr) |
| 110 | case Q_XQUOTAON: | 96 | { |
| 111 | case Q_XQUOTAOFF: | 97 | __u32 fmt; |
| 112 | case Q_XQUOTARM: | ||
| 113 | if (!sb->s_qcop->set_xstate) | ||
| 114 | return -ENOSYS; | ||
| 115 | break; | ||
| 116 | case Q_XGETQSTAT: | ||
| 117 | if (!sb->s_qcop->get_xstate) | ||
| 118 | return -ENOSYS; | ||
| 119 | break; | ||
| 120 | case Q_XSETQLIM: | ||
| 121 | if (!sb->s_qcop->set_xquota) | ||
| 122 | return -ENOSYS; | ||
| 123 | break; | ||
| 124 | case Q_XGETQUOTA: | ||
| 125 | if (!sb->s_qcop->get_xquota) | ||
| 126 | return -ENOSYS; | ||
| 127 | break; | ||
| 128 | case Q_XQUOTASYNC: | ||
| 129 | if (!sb->s_qcop->quota_sync) | ||
| 130 | return -ENOSYS; | ||
| 131 | break; | ||
| 132 | default: | ||
| 133 | return -EINVAL; | ||
| 134 | } | ||
| 135 | 98 | ||
| 136 | /* Check privileges */ | 99 | down_read(&sb_dqopt(sb)->dqptr_sem); |
| 137 | if (cmd == Q_XGETQUOTA) { | 100 | if (!sb_has_quota_active(sb, type)) { |
| 138 | if (((type == XQM_USRQUOTA && current_euid() != id) || | 101 | up_read(&sb_dqopt(sb)->dqptr_sem); |
| 139 | (type == XQM_GRPQUOTA && !in_egroup_p(id))) && | 102 | return -ESRCH; |
| 140 | !capable(CAP_SYS_ADMIN)) | ||
| 141 | return -EPERM; | ||
| 142 | } else if (cmd != Q_XGETQSTAT && cmd != Q_XQUOTASYNC) { | ||
| 143 | if (!capable(CAP_SYS_ADMIN)) | ||
| 144 | return -EPERM; | ||
| 145 | } | 103 | } |
| 104 | fmt = sb_dqopt(sb)->info[type].dqi_format->qf_fmt_id; | ||
| 105 | up_read(&sb_dqopt(sb)->dqptr_sem); | ||
| 106 | if (copy_to_user(addr, &fmt, sizeof(fmt))) | ||
| 107 | return -EFAULT; | ||
| 108 | return 0; | ||
| 109 | } | ||
| 146 | 110 | ||
| 111 | static int quota_getinfo(struct super_block *sb, int type, void __user *addr) | ||
| 112 | { | ||
| 113 | struct if_dqinfo info; | ||
| 114 | int ret; | ||
| 115 | |||
| 116 | if (!sb_has_quota_active(sb, type)) | ||
| 117 | return -ESRCH; | ||
| 118 | if (!sb->s_qcop->get_info) | ||
| 119 | return -ENOSYS; | ||
| 120 | ret = sb->s_qcop->get_info(sb, type, &info); | ||
| 121 | if (!ret && copy_to_user(addr, &info, sizeof(info))) | ||
| 122 | return -EFAULT; | ||
| 123 | return ret; | ||
| 124 | } | ||
| 125 | |||
| 126 | static int quota_setinfo(struct super_block *sb, int type, void __user *addr) | ||
| 127 | { | ||
| 128 | struct if_dqinfo info; | ||
| 129 | |||
| 130 | if (copy_from_user(&info, addr, sizeof(info))) | ||
| 131 | return -EFAULT; | ||
| 132 | if (!sb_has_quota_active(sb, type)) | ||
| 133 | return -ESRCH; | ||
| 134 | if (!sb->s_qcop->set_info) | ||
| 135 | return -ENOSYS; | ||
| 136 | return sb->s_qcop->set_info(sb, type, &info); | ||
| 137 | } | ||
| 138 | |||
| 139 | static int quota_getquota(struct super_block *sb, int type, qid_t id, | ||
| 140 | void __user *addr) | ||
| 141 | { | ||
| 142 | struct if_dqblk idq; | ||
| 143 | int ret; | ||
| 144 | |||
| 145 | if (!sb_has_quota_active(sb, type)) | ||
| 146 | return -ESRCH; | ||
| 147 | if (!sb->s_qcop->get_dqblk) | ||
| 148 | return -ENOSYS; | ||
| 149 | ret = sb->s_qcop->get_dqblk(sb, type, id, &idq); | ||
| 150 | if (ret) | ||
| 151 | return ret; | ||
| 152 | if (copy_to_user(addr, &idq, sizeof(idq))) | ||
| 153 | return -EFAULT; | ||
| 147 | return 0; | 154 | return 0; |
| 148 | } | 155 | } |
| 149 | 156 | ||
| 150 | static int check_quotactl_valid(struct super_block *sb, int type, int cmd, | 157 | static int quota_setquota(struct super_block *sb, int type, qid_t id, |
| 151 | qid_t id) | 158 | void __user *addr) |
| 152 | { | 159 | { |
| 153 | int error; | 160 | struct if_dqblk idq; |
| 154 | 161 | ||
| 155 | if (XQM_COMMAND(cmd)) | 162 | if (copy_from_user(&idq, addr, sizeof(idq))) |
| 156 | error = xqm_quotactl_valid(sb, type, cmd, id); | 163 | return -EFAULT; |
| 157 | else | 164 | if (!sb_has_quota_active(sb, type)) |
| 158 | error = generic_quotactl_valid(sb, type, cmd, id); | 165 | return -ESRCH; |
| 159 | if (!error) | 166 | if (!sb->s_qcop->set_dqblk) |
| 160 | error = security_quotactl(cmd, type, id, sb); | 167 | return -ENOSYS; |
| 161 | return error; | 168 | return sb->s_qcop->set_dqblk(sb, type, id, &idq); |
| 162 | } | 169 | } |
| 163 | 170 | ||
| 164 | #ifdef CONFIG_QUOTA | 171 | static int quota_setxstate(struct super_block *sb, int cmd, void __user *addr) |
| 165 | void sync_quota_sb(struct super_block *sb, int type) | ||
| 166 | { | 172 | { |
| 167 | int cnt; | 173 | __u32 flags; |
| 168 | 174 | ||
| 169 | if (!sb->s_qcop->quota_sync) | 175 | if (copy_from_user(&flags, addr, sizeof(flags))) |
| 170 | return; | 176 | return -EFAULT; |
| 177 | if (!sb->s_qcop->set_xstate) | ||
| 178 | return -ENOSYS; | ||
| 179 | return sb->s_qcop->set_xstate(sb, flags, cmd); | ||
| 180 | } | ||
| 171 | 181 | ||
| 172 | sb->s_qcop->quota_sync(sb, type); | 182 | static int quota_getxstate(struct super_block *sb, void __user *addr) |
| 183 | { | ||
| 184 | struct fs_quota_stat fqs; | ||
| 185 | int ret; | ||
| 173 | 186 | ||
| 174 | if (sb_dqopt(sb)->flags & DQUOT_QUOTA_SYS_FILE) | 187 | if (!sb->s_qcop->get_xstate) |
| 175 | return; | 188 | return -ENOSYS; |
| 176 | /* This is not very clever (and fast) but currently I don't know about | 189 | ret = sb->s_qcop->get_xstate(sb, &fqs); |
| 177 | * any other simple way of getting quota data to disk and we must get | 190 | if (!ret && copy_to_user(addr, &fqs, sizeof(fqs))) |
| 178 | * them there for userspace to be visible... */ | 191 | return -EFAULT; |
| 179 | if (sb->s_op->sync_fs) | 192 | return ret; |
| 180 | sb->s_op->sync_fs(sb, 1); | 193 | } |
| 181 | sync_blockdev(sb->s_bdev); | ||
| 182 | 194 | ||
| 183 | /* | 195 | static int quota_setxquota(struct super_block *sb, int type, qid_t id, |
| 184 | * Now when everything is written we can discard the pagecache so | 196 | void __user *addr) |
| 185 | * that userspace sees the changes. | 197 | { |
| 186 | */ | 198 | struct fs_disk_quota fdq; |
| 187 | mutex_lock(&sb_dqopt(sb)->dqonoff_mutex); | 199 | |
| 188 | for (cnt = 0; cnt < MAXQUOTAS; cnt++) { | 200 | if (copy_from_user(&fdq, addr, sizeof(fdq))) |
| 189 | if (type != -1 && cnt != type) | 201 | return -EFAULT; |
| 190 | continue; | 202 | if (!sb->s_qcop->set_xquota) |
| 191 | if (!sb_has_quota_active(sb, cnt)) | 203 | return -ENOSYS; |
| 192 | continue; | 204 | return sb->s_qcop->set_xquota(sb, type, id, &fdq); |
| 193 | mutex_lock_nested(&sb_dqopt(sb)->files[cnt]->i_mutex, | ||
| 194 | I_MUTEX_QUOTA); | ||
| 195 | truncate_inode_pages(&sb_dqopt(sb)->files[cnt]->i_data, 0); | ||
| 196 | mutex_unlock(&sb_dqopt(sb)->files[cnt]->i_mutex); | ||
| 197 | } | ||
| 198 | mutex_unlock(&sb_dqopt(sb)->dqonoff_mutex); | ||
| 199 | } | 205 | } |
| 200 | #endif | ||
| 201 | 206 | ||
| 202 | static void sync_dquots(int type) | 207 | static int quota_getxquota(struct super_block *sb, int type, qid_t id, |
| 208 | void __user *addr) | ||
| 203 | { | 209 | { |
| 204 | struct super_block *sb; | 210 | struct fs_disk_quota fdq; |
| 205 | int cnt; | 211 | int ret; |
| 206 | 212 | ||
| 207 | spin_lock(&sb_lock); | 213 | if (!sb->s_qcop->get_xquota) |
| 208 | restart: | 214 | return -ENOSYS; |
| 209 | list_for_each_entry(sb, &super_blocks, s_list) { | 215 | ret = sb->s_qcop->get_xquota(sb, type, id, &fdq); |
| 210 | /* This test just improves performance so it needn't be | 216 | if (!ret && copy_to_user(addr, &fdq, sizeof(fdq))) |
| 211 | * reliable... */ | 217 | return -EFAULT; |
| 212 | for (cnt = 0; cnt < MAXQUOTAS; cnt++) { | 218 | return ret; |
| 213 | if (type != -1 && type != cnt) | ||
| 214 | continue; | ||
| 215 | if (!sb_has_quota_active(sb, cnt)) | ||
| 216 | continue; | ||
| 217 | if (!info_dirty(&sb_dqopt(sb)->info[cnt]) && | ||
| 218 | list_empty(&sb_dqopt(sb)->info[cnt].dqi_dirty_list)) | ||
| 219 | continue; | ||
| 220 | break; | ||
| 221 | } | ||
| 222 | if (cnt == MAXQUOTAS) | ||
| 223 | continue; | ||
| 224 | sb->s_count++; | ||
| 225 | spin_unlock(&sb_lock); | ||
| 226 | down_read(&sb->s_umount); | ||
| 227 | if (sb->s_root) | ||
| 228 | sync_quota_sb(sb, type); | ||
| 229 | up_read(&sb->s_umount); | ||
| 230 | spin_lock(&sb_lock); | ||
| 231 | if (__put_super_and_need_restart(sb)) | ||
| 232 | goto restart; | ||
| 233 | } | ||
| 234 | spin_unlock(&sb_lock); | ||
| 235 | } | 219 | } |
| 236 | 220 | ||
| 237 | /* Copy parameters and call proper function */ | 221 | /* Copy parameters and call proper function */ |
| @@ -240,117 +224,55 @@ static int do_quotactl(struct super_block *sb, int type, int cmd, qid_t id, | |||
| 240 | { | 224 | { |
| 241 | int ret; | 225 | int ret; |
| 242 | 226 | ||
| 227 | if (type >= (XQM_COMMAND(cmd) ? XQM_MAXQUOTAS : MAXQUOTAS)) | ||
| 228 | return -EINVAL; | ||
| 229 | if (!sb->s_qcop) | ||
| 230 | return -ENOSYS; | ||
| 231 | |||
| 232 | ret = check_quotactl_permission(sb, type, cmd, id); | ||
| 233 | if (ret < 0) | ||
| 234 | return ret; | ||
| 235 | |||
| 243 | switch (cmd) { | 236 | switch (cmd) { |
| 244 | case Q_QUOTAON: { | 237 | case Q_QUOTAON: |
| 245 | char *pathname; | 238 | return quota_quotaon(sb, type, cmd, id, addr); |
| 246 | 239 | case Q_QUOTAOFF: | |
| 247 | pathname = getname(addr); | 240 | if (!sb->s_qcop->quota_off) |
| 248 | if (IS_ERR(pathname)) | 241 | return -ENOSYS; |
| 249 | return PTR_ERR(pathname); | 242 | return sb->s_qcop->quota_off(sb, type, 0); |
| 250 | ret = sb->s_qcop->quota_on(sb, type, id, pathname, 0); | 243 | case Q_GETFMT: |
| 251 | putname(pathname); | 244 | return quota_getfmt(sb, type, addr); |
| 252 | return ret; | 245 | case Q_GETINFO: |
| 253 | } | 246 | return quota_getinfo(sb, type, addr); |
| 254 | case Q_QUOTAOFF: | 247 | case Q_SETINFO: |
| 255 | return sb->s_qcop->quota_off(sb, type, 0); | 248 | return quota_setinfo(sb, type, addr); |
| 256 | 249 | case Q_GETQUOTA: | |
| 257 | case Q_GETFMT: { | 250 | return quota_getquota(sb, type, id, addr); |
| 258 | __u32 fmt; | 251 | case Q_SETQUOTA: |
| 259 | 252 | return quota_setquota(sb, type, id, addr); | |
| 260 | down_read(&sb_dqopt(sb)->dqptr_sem); | 253 | case Q_SYNC: |
| 261 | if (!sb_has_quota_active(sb, type)) { | 254 | if (!sb->s_qcop->quota_sync) |
| 262 | up_read(&sb_dqopt(sb)->dqptr_sem); | 255 | return -ENOSYS; |
| 263 | return -ESRCH; | 256 | return sb->s_qcop->quota_sync(sb, type, 1); |
| 264 | } | 257 | case Q_XQUOTAON: |
| 265 | fmt = sb_dqopt(sb)->info[type].dqi_format->qf_fmt_id; | 258 | case Q_XQUOTAOFF: |
| 266 | up_read(&sb_dqopt(sb)->dqptr_sem); | 259 | case Q_XQUOTARM: |
| 267 | if (copy_to_user(addr, &fmt, sizeof(fmt))) | 260 | return quota_setxstate(sb, cmd, addr); |
| 268 | return -EFAULT; | 261 | case Q_XGETQSTAT: |
| 269 | return 0; | 262 | return quota_getxstate(sb, addr); |
| 270 | } | 263 | case Q_XSETQLIM: |
| 271 | case Q_GETINFO: { | 264 | return quota_setxquota(sb, type, id, addr); |
| 272 | struct if_dqinfo info; | 265 | case Q_XGETQUOTA: |
| 273 | 266 | return quota_getxquota(sb, type, id, addr); | |
| 274 | ret = sb->s_qcop->get_info(sb, type, &info); | 267 | case Q_XQUOTASYNC: |
| 275 | if (ret) | 268 | /* caller already holds s_umount */ |
| 276 | return ret; | 269 | if (sb->s_flags & MS_RDONLY) |
| 277 | if (copy_to_user(addr, &info, sizeof(info))) | 270 | return -EROFS; |
| 278 | return -EFAULT; | 271 | writeback_inodes_sb(sb); |
| 279 | return 0; | 272 | return 0; |
| 280 | } | 273 | default: |
| 281 | case Q_SETINFO: { | 274 | return -EINVAL; |
| 282 | struct if_dqinfo info; | ||
| 283 | |||
| 284 | if (copy_from_user(&info, addr, sizeof(info))) | ||
| 285 | return -EFAULT; | ||
| 286 | return sb->s_qcop->set_info(sb, type, &info); | ||
| 287 | } | ||
| 288 | case Q_GETQUOTA: { | ||
| 289 | struct if_dqblk idq; | ||
| 290 | |||
| 291 | ret = sb->s_qcop->get_dqblk(sb, type, id, &idq); | ||
| 292 | if (ret) | ||
| 293 | return ret; | ||
| 294 | if (copy_to_user(addr, &idq, sizeof(idq))) | ||
| 295 | return -EFAULT; | ||
| 296 | return 0; | ||
| 297 | } | ||
| 298 | case Q_SETQUOTA: { | ||
| 299 | struct if_dqblk idq; | ||
| 300 | |||
| 301 | if (copy_from_user(&idq, addr, sizeof(idq))) | ||
| 302 | return -EFAULT; | ||
| 303 | return sb->s_qcop->set_dqblk(sb, type, id, &idq); | ||
| 304 | } | ||
| 305 | case Q_SYNC: | ||
| 306 | if (sb) | ||
| 307 | sync_quota_sb(sb, type); | ||
| 308 | else | ||
| 309 | sync_dquots(type); | ||
| 310 | return 0; | ||
| 311 | |||
| 312 | case Q_XQUOTAON: | ||
| 313 | case Q_XQUOTAOFF: | ||
| 314 | case Q_XQUOTARM: { | ||
| 315 | __u32 flags; | ||
| 316 | |||
| 317 | if (copy_from_user(&flags, addr, sizeof(flags))) | ||
| 318 | return -EFAULT; | ||
| 319 | return sb->s_qcop->set_xstate(sb, flags, cmd); | ||
| 320 | } | ||
| 321 | case Q_XGETQSTAT: { | ||
| 322 | struct fs_quota_stat fqs; | ||
| 323 | |||
| 324 | if ((ret = sb->s_qcop->get_xstate(sb, &fqs))) | ||
| 325 | return ret; | ||
| 326 | if (copy_to_user(addr, &fqs, sizeof(fqs))) | ||
| 327 | return -EFAULT; | ||
| 328 | return 0; | ||
| 329 | } | ||
| 330 | case Q_XSETQLIM: { | ||
| 331 | struct fs_disk_quota fdq; | ||
| 332 | |||
| 333 | if (copy_from_user(&fdq, addr, sizeof(fdq))) | ||
| 334 | return -EFAULT; | ||
| 335 | return sb->s_qcop->set_xquota(sb, type, id, &fdq); | ||
| 336 | } | ||
| 337 | case Q_XGETQUOTA: { | ||
| 338 | struct fs_disk_quota fdq; | ||
| 339 | |||
| 340 | ret = sb->s_qcop->get_xquota(sb, type, id, &fdq); | ||
| 341 | if (ret) | ||
| 342 | return ret; | ||
| 343 | if (copy_to_user(addr, &fdq, sizeof(fdq))) | ||
| 344 | return -EFAULT; | ||
| 345 | return 0; | ||
| 346 | } | ||
| 347 | case Q_XQUOTASYNC: | ||
| 348 | return sb->s_qcop->quota_sync(sb, type); | ||
| 349 | /* We never reach here unless validity check is broken */ | ||
| 350 | default: | ||
| 351 | BUG(); | ||
| 352 | } | 275 | } |
| 353 | return 0; | ||
| 354 | } | 276 | } |
| 355 | 277 | ||
| 356 | /* | 278 | /* |
| @@ -397,224 +319,23 @@ SYSCALL_DEFINE4(quotactl, unsigned int, cmd, const char __user *, special, | |||
| 397 | cmds = cmd >> SUBCMDSHIFT; | 319 | cmds = cmd >> SUBCMDSHIFT; |
| 398 | type = cmd & SUBCMDMASK; | 320 | type = cmd & SUBCMDMASK; |
| 399 | 321 | ||
| 400 | if (cmds != Q_SYNC || special) { | 322 | /* |
| 401 | sb = quotactl_block(special); | 323 | * As a special case Q_SYNC can be called without a specific device. |
| 402 | if (IS_ERR(sb)) | 324 | * It will iterate all superblocks that have quota enabled and call |
| 403 | return PTR_ERR(sb); | 325 | * the sync action on each of them. |
| 326 | */ | ||
| 327 | if (!special) { | ||
| 328 | if (cmds == Q_SYNC) | ||
| 329 | return quota_sync_all(type); | ||
| 330 | return -ENODEV; | ||
| 404 | } | 331 | } |
| 405 | 332 | ||
| 406 | ret = check_quotactl_valid(sb, type, cmds, id); | 333 | sb = quotactl_block(special); |
| 407 | if (ret >= 0) | 334 | if (IS_ERR(sb)) |
| 408 | ret = do_quotactl(sb, type, cmds, id, addr); | 335 | return PTR_ERR(sb); |
| 409 | if (sb) | ||
| 410 | drop_super(sb); | ||
| 411 | 336 | ||
| 412 | return ret; | 337 | ret = do_quotactl(sb, type, cmds, id, addr); |
| 413 | } | ||
| 414 | |||
| 415 | #if defined(CONFIG_COMPAT_FOR_U64_ALIGNMENT) | ||
| 416 | /* | ||
| 417 | * This code works only for 32 bit quota tools over 64 bit OS (x86_64, ia64) | ||
| 418 | * and is necessary due to alignment problems. | ||
| 419 | */ | ||
| 420 | struct compat_if_dqblk { | ||
| 421 | compat_u64 dqb_bhardlimit; | ||
| 422 | compat_u64 dqb_bsoftlimit; | ||
| 423 | compat_u64 dqb_curspace; | ||
| 424 | compat_u64 dqb_ihardlimit; | ||
| 425 | compat_u64 dqb_isoftlimit; | ||
| 426 | compat_u64 dqb_curinodes; | ||
| 427 | compat_u64 dqb_btime; | ||
| 428 | compat_u64 dqb_itime; | ||
| 429 | compat_uint_t dqb_valid; | ||
| 430 | }; | ||
| 431 | |||
| 432 | /* XFS structures */ | ||
| 433 | struct compat_fs_qfilestat { | ||
| 434 | compat_u64 dqb_bhardlimit; | ||
| 435 | compat_u64 qfs_nblks; | ||
| 436 | compat_uint_t qfs_nextents; | ||
| 437 | }; | ||
| 438 | |||
| 439 | struct compat_fs_quota_stat { | ||
| 440 | __s8 qs_version; | ||
| 441 | __u16 qs_flags; | ||
| 442 | __s8 qs_pad; | ||
| 443 | struct compat_fs_qfilestat qs_uquota; | ||
| 444 | struct compat_fs_qfilestat qs_gquota; | ||
| 445 | compat_uint_t qs_incoredqs; | ||
| 446 | compat_int_t qs_btimelimit; | ||
| 447 | compat_int_t qs_itimelimit; | ||
| 448 | compat_int_t qs_rtbtimelimit; | ||
| 449 | __u16 qs_bwarnlimit; | ||
| 450 | __u16 qs_iwarnlimit; | ||
| 451 | }; | ||
| 452 | |||
| 453 | asmlinkage long sys32_quotactl(unsigned int cmd, const char __user *special, | ||
| 454 | qid_t id, void __user *addr) | ||
| 455 | { | ||
| 456 | unsigned int cmds; | ||
| 457 | struct if_dqblk __user *dqblk; | ||
| 458 | struct compat_if_dqblk __user *compat_dqblk; | ||
| 459 | struct fs_quota_stat __user *fsqstat; | ||
| 460 | struct compat_fs_quota_stat __user *compat_fsqstat; | ||
| 461 | compat_uint_t data; | ||
| 462 | u16 xdata; | ||
| 463 | long ret; | ||
| 464 | 338 | ||
| 465 | cmds = cmd >> SUBCMDSHIFT; | 339 | drop_super(sb); |
| 466 | |||
| 467 | switch (cmds) { | ||
| 468 | case Q_GETQUOTA: | ||
| 469 | dqblk = compat_alloc_user_space(sizeof(struct if_dqblk)); | ||
| 470 | compat_dqblk = addr; | ||
| 471 | ret = sys_quotactl(cmd, special, id, dqblk); | ||
| 472 | if (ret) | ||
| 473 | break; | ||
| 474 | if (copy_in_user(compat_dqblk, dqblk, sizeof(*compat_dqblk)) || | ||
| 475 | get_user(data, &dqblk->dqb_valid) || | ||
| 476 | put_user(data, &compat_dqblk->dqb_valid)) | ||
| 477 | ret = -EFAULT; | ||
| 478 | break; | ||
| 479 | case Q_SETQUOTA: | ||
| 480 | dqblk = compat_alloc_user_space(sizeof(struct if_dqblk)); | ||
| 481 | compat_dqblk = addr; | ||
| 482 | ret = -EFAULT; | ||
| 483 | if (copy_in_user(dqblk, compat_dqblk, sizeof(*compat_dqblk)) || | ||
| 484 | get_user(data, &compat_dqblk->dqb_valid) || | ||
| 485 | put_user(data, &dqblk->dqb_valid)) | ||
| 486 | break; | ||
| 487 | ret = sys_quotactl(cmd, special, id, dqblk); | ||
| 488 | break; | ||
| 489 | case Q_XGETQSTAT: | ||
| 490 | fsqstat = compat_alloc_user_space(sizeof(struct fs_quota_stat)); | ||
| 491 | compat_fsqstat = addr; | ||
| 492 | ret = sys_quotactl(cmd, special, id, fsqstat); | ||
| 493 | if (ret) | ||
| 494 | break; | ||
| 495 | ret = -EFAULT; | ||
| 496 | /* Copying qs_version, qs_flags, qs_pad */ | ||
| 497 | if (copy_in_user(compat_fsqstat, fsqstat, | ||
| 498 | offsetof(struct compat_fs_quota_stat, qs_uquota))) | ||
| 499 | break; | ||
| 500 | /* Copying qs_uquota */ | ||
| 501 | if (copy_in_user(&compat_fsqstat->qs_uquota, | ||
| 502 | &fsqstat->qs_uquota, | ||
| 503 | sizeof(compat_fsqstat->qs_uquota)) || | ||
| 504 | get_user(data, &fsqstat->qs_uquota.qfs_nextents) || | ||
| 505 | put_user(data, &compat_fsqstat->qs_uquota.qfs_nextents)) | ||
| 506 | break; | ||
| 507 | /* Copying qs_gquota */ | ||
| 508 | if (copy_in_user(&compat_fsqstat->qs_gquota, | ||
| 509 | &fsqstat->qs_gquota, | ||
| 510 | sizeof(compat_fsqstat->qs_gquota)) || | ||
| 511 | get_user(data, &fsqstat->qs_gquota.qfs_nextents) || | ||
| 512 | put_user(data, &compat_fsqstat->qs_gquota.qfs_nextents)) | ||
| 513 | break; | ||
| 514 | /* Copying the rest */ | ||
| 515 | if (copy_in_user(&compat_fsqstat->qs_incoredqs, | ||
| 516 | &fsqstat->qs_incoredqs, | ||
| 517 | sizeof(struct compat_fs_quota_stat) - | ||
| 518 | offsetof(struct compat_fs_quota_stat, qs_incoredqs)) || | ||
| 519 | get_user(xdata, &fsqstat->qs_iwarnlimit) || | ||
| 520 | put_user(xdata, &compat_fsqstat->qs_iwarnlimit)) | ||
| 521 | break; | ||
| 522 | ret = 0; | ||
| 523 | break; | ||
| 524 | default: | ||
| 525 | ret = sys_quotactl(cmd, special, id, addr); | ||
| 526 | } | ||
| 527 | return ret; | 340 | return ret; |
| 528 | } | 341 | } |
| 529 | #endif | ||
| 530 | |||
| 531 | |||
| 532 | #ifdef CONFIG_QUOTA_NETLINK_INTERFACE | ||
| 533 | |||
| 534 | /* Netlink family structure for quota */ | ||
| 535 | static struct genl_family quota_genl_family = { | ||
| 536 | .id = GENL_ID_GENERATE, | ||
| 537 | .hdrsize = 0, | ||
| 538 | .name = "VFS_DQUOT", | ||
| 539 | .version = 1, | ||
| 540 | .maxattr = QUOTA_NL_A_MAX, | ||
| 541 | }; | ||
| 542 | |||
| 543 | /** | ||
| 544 | * quota_send_warning - Send warning to userspace about exceeded quota | ||
| 545 | * @type: The quota type: USRQQUOTA, GRPQUOTA,... | ||
| 546 | * @id: The user or group id of the quota that was exceeded | ||
| 547 | * @dev: The device on which the fs is mounted (sb->s_dev) | ||
| 548 | * @warntype: The type of the warning: QUOTA_NL_... | ||
| 549 | * | ||
| 550 | * This can be used by filesystems (including those which don't use | ||
| 551 | * dquot) to send a message to userspace relating to quota limits. | ||
| 552 | * | ||
| 553 | */ | ||
| 554 | |||
| 555 | void quota_send_warning(short type, unsigned int id, dev_t dev, | ||
| 556 | const char warntype) | ||
| 557 | { | ||
| 558 | static atomic_t seq; | ||
| 559 | struct sk_buff *skb; | ||
| 560 | void *msg_head; | ||
| 561 | int ret; | ||
| 562 | int msg_size = 4 * nla_total_size(sizeof(u32)) + | ||
| 563 | 2 * nla_total_size(sizeof(u64)); | ||
| 564 | |||
| 565 | /* We have to allocate using GFP_NOFS as we are called from a | ||
| 566 | * filesystem performing write and thus further recursion into | ||
| 567 | * the fs to free some data could cause deadlocks. */ | ||
| 568 | skb = genlmsg_new(msg_size, GFP_NOFS); | ||
| 569 | if (!skb) { | ||
| 570 | printk(KERN_ERR | ||
| 571 | "VFS: Not enough memory to send quota warning.\n"); | ||
| 572 | return; | ||
| 573 | } | ||
| 574 | msg_head = genlmsg_put(skb, 0, atomic_add_return(1, &seq), | ||
| 575 | "a_genl_family, 0, QUOTA_NL_C_WARNING); | ||
| 576 | if (!msg_head) { | ||
| 577 | printk(KERN_ERR | ||
| 578 | "VFS: Cannot store netlink header in quota warning.\n"); | ||
| 579 | goto err_out; | ||
| 580 | } | ||
| 581 | ret = nla_put_u32(skb, QUOTA_NL_A_QTYPE, type); | ||
| 582 | if (ret) | ||
| 583 | goto attr_err_out; | ||
| 584 | ret = nla_put_u64(skb, QUOTA_NL_A_EXCESS_ID, id); | ||
| 585 | if (ret) | ||
| 586 | goto attr_err_out; | ||
| 587 | ret = nla_put_u32(skb, QUOTA_NL_A_WARNING, warntype); | ||
| 588 | if (ret) | ||
| 589 | goto attr_err_out; | ||
| 590 | ret = nla_put_u32(skb, QUOTA_NL_A_DEV_MAJOR, MAJOR(dev)); | ||
| 591 | if (ret) | ||
| 592 | goto attr_err_out; | ||
| 593 | ret = nla_put_u32(skb, QUOTA_NL_A_DEV_MINOR, MINOR(dev)); | ||
| 594 | if (ret) | ||
| 595 | goto attr_err_out; | ||
| 596 | ret = nla_put_u64(skb, QUOTA_NL_A_CAUSED_ID, current_uid()); | ||
| 597 | if (ret) | ||
| 598 | goto attr_err_out; | ||
| 599 | genlmsg_end(skb, msg_head); | ||
| 600 | |||
| 601 | genlmsg_multicast(skb, 0, quota_genl_family.id, GFP_NOFS); | ||
| 602 | return; | ||
| 603 | attr_err_out: | ||
| 604 | printk(KERN_ERR "VFS: Not enough space to compose quota message!\n"); | ||
| 605 | err_out: | ||
| 606 | kfree_skb(skb); | ||
| 607 | } | ||
| 608 | EXPORT_SYMBOL(quota_send_warning); | ||
| 609 | |||
| 610 | static int __init quota_init(void) | ||
| 611 | { | ||
| 612 | if (genl_register_family("a_genl_family) != 0) | ||
| 613 | printk(KERN_ERR | ||
| 614 | "VFS: Failed to create quota netlink interface.\n"); | ||
| 615 | return 0; | ||
| 616 | }; | ||
| 617 | |||
| 618 | module_init(quota_init); | ||
| 619 | #endif | ||
| 620 | |||
diff --git a/fs/reiserfs/bitmap.c b/fs/reiserfs/bitmap.c index 65c872761177..dc014f7def05 100644 --- a/fs/reiserfs/bitmap.c +++ b/fs/reiserfs/bitmap.c | |||
| @@ -425,7 +425,7 @@ static void _reiserfs_free_block(struct reiserfs_transaction_handle *th, | |||
| 425 | 425 | ||
| 426 | journal_mark_dirty(th, s, sbh); | 426 | journal_mark_dirty(th, s, sbh); |
| 427 | if (for_unformatted) | 427 | if (for_unformatted) |
| 428 | vfs_dq_free_block_nodirty(inode, 1); | 428 | dquot_free_block_nodirty(inode, 1); |
| 429 | } | 429 | } |
| 430 | 430 | ||
| 431 | void reiserfs_free_block(struct reiserfs_transaction_handle *th, | 431 | void reiserfs_free_block(struct reiserfs_transaction_handle *th, |
| @@ -1049,7 +1049,7 @@ static inline int blocknrs_and_prealloc_arrays_from_search_start | |||
| 1049 | amount_needed, hint->inode->i_uid); | 1049 | amount_needed, hint->inode->i_uid); |
| 1050 | #endif | 1050 | #endif |
| 1051 | quota_ret = | 1051 | quota_ret = |
| 1052 | vfs_dq_alloc_block_nodirty(hint->inode, amount_needed); | 1052 | dquot_alloc_block_nodirty(hint->inode, amount_needed); |
| 1053 | if (quota_ret) /* Quota exceeded? */ | 1053 | if (quota_ret) /* Quota exceeded? */ |
| 1054 | return QUOTA_EXCEEDED; | 1054 | return QUOTA_EXCEEDED; |
| 1055 | if (hint->preallocate && hint->prealloc_size) { | 1055 | if (hint->preallocate && hint->prealloc_size) { |
| @@ -1058,7 +1058,7 @@ static inline int blocknrs_and_prealloc_arrays_from_search_start | |||
| 1058 | "reiserquota: allocating (prealloc) %d blocks id=%u", | 1058 | "reiserquota: allocating (prealloc) %d blocks id=%u", |
| 1059 | hint->prealloc_size, hint->inode->i_uid); | 1059 | hint->prealloc_size, hint->inode->i_uid); |
| 1060 | #endif | 1060 | #endif |
| 1061 | quota_ret = vfs_dq_prealloc_block_nodirty(hint->inode, | 1061 | quota_ret = dquot_prealloc_block_nodirty(hint->inode, |
| 1062 | hint->prealloc_size); | 1062 | hint->prealloc_size); |
| 1063 | if (quota_ret) | 1063 | if (quota_ret) |
| 1064 | hint->preallocate = hint->prealloc_size = 0; | 1064 | hint->preallocate = hint->prealloc_size = 0; |
| @@ -1092,7 +1092,7 @@ static inline int blocknrs_and_prealloc_arrays_from_search_start | |||
| 1092 | hint->inode->i_uid); | 1092 | hint->inode->i_uid); |
| 1093 | #endif | 1093 | #endif |
| 1094 | /* Free not allocated blocks */ | 1094 | /* Free not allocated blocks */ |
| 1095 | vfs_dq_free_block_nodirty(hint->inode, | 1095 | dquot_free_block_nodirty(hint->inode, |
| 1096 | amount_needed + hint->prealloc_size - | 1096 | amount_needed + hint->prealloc_size - |
| 1097 | nr_allocated); | 1097 | nr_allocated); |
| 1098 | } | 1098 | } |
| @@ -1125,7 +1125,7 @@ static inline int blocknrs_and_prealloc_arrays_from_search_start | |||
| 1125 | REISERFS_I(hint->inode)->i_prealloc_count, | 1125 | REISERFS_I(hint->inode)->i_prealloc_count, |
| 1126 | hint->inode->i_uid); | 1126 | hint->inode->i_uid); |
| 1127 | #endif | 1127 | #endif |
| 1128 | vfs_dq_free_block_nodirty(hint->inode, amount_needed + | 1128 | dquot_free_block_nodirty(hint->inode, amount_needed + |
| 1129 | hint->prealloc_size - nr_allocated - | 1129 | hint->prealloc_size - nr_allocated - |
| 1130 | REISERFS_I(hint->inode)-> | 1130 | REISERFS_I(hint->inode)-> |
| 1131 | i_prealloc_count); | 1131 | i_prealloc_count); |
diff --git a/fs/reiserfs/file.c b/fs/reiserfs/file.c index da2dba082e2d..1d9c12714c5c 100644 --- a/fs/reiserfs/file.c +++ b/fs/reiserfs/file.c | |||
| @@ -289,7 +289,7 @@ const struct file_operations reiserfs_file_operations = { | |||
| 289 | .compat_ioctl = reiserfs_compat_ioctl, | 289 | .compat_ioctl = reiserfs_compat_ioctl, |
| 290 | #endif | 290 | #endif |
| 291 | .mmap = reiserfs_file_mmap, | 291 | .mmap = reiserfs_file_mmap, |
| 292 | .open = generic_file_open, | 292 | .open = dquot_file_open, |
| 293 | .release = reiserfs_file_release, | 293 | .release = reiserfs_file_release, |
| 294 | .fsync = reiserfs_sync_file, | 294 | .fsync = reiserfs_sync_file, |
| 295 | .aio_read = generic_file_aio_read, | 295 | .aio_read = generic_file_aio_read, |
diff --git a/fs/reiserfs/inode.c b/fs/reiserfs/inode.c index 2df0f5c7c60b..d1da94b82d8f 100644 --- a/fs/reiserfs/inode.c +++ b/fs/reiserfs/inode.c | |||
| @@ -34,6 +34,9 @@ void reiserfs_delete_inode(struct inode *inode) | |||
| 34 | int depth; | 34 | int depth; |
| 35 | int err; | 35 | int err; |
| 36 | 36 | ||
| 37 | if (!is_bad_inode(inode)) | ||
| 38 | dquot_initialize(inode); | ||
| 39 | |||
| 37 | truncate_inode_pages(&inode->i_data, 0); | 40 | truncate_inode_pages(&inode->i_data, 0); |
| 38 | 41 | ||
| 39 | depth = reiserfs_write_lock_once(inode->i_sb); | 42 | depth = reiserfs_write_lock_once(inode->i_sb); |
| @@ -54,7 +57,7 @@ void reiserfs_delete_inode(struct inode *inode) | |||
| 54 | * after delete_object so that quota updates go into the same transaction as | 57 | * after delete_object so that quota updates go into the same transaction as |
| 55 | * stat data deletion */ | 58 | * stat data deletion */ |
| 56 | if (!err) | 59 | if (!err) |
| 57 | vfs_dq_free_inode(inode); | 60 | dquot_free_inode(inode); |
| 58 | 61 | ||
| 59 | if (journal_end(&th, inode->i_sb, jbegin_count)) | 62 | if (journal_end(&th, inode->i_sb, jbegin_count)) |
| 60 | goto out; | 63 | goto out; |
| @@ -1615,7 +1618,7 @@ int reiserfs_encode_fh(struct dentry *dentry, __u32 * data, int *lenp, | |||
| 1615 | ** to properly mark inodes for datasync and such, but only actually | 1618 | ** to properly mark inodes for datasync and such, but only actually |
| 1616 | ** does something when called for a synchronous update. | 1619 | ** does something when called for a synchronous update. |
| 1617 | */ | 1620 | */ |
| 1618 | int reiserfs_write_inode(struct inode *inode, int do_sync) | 1621 | int reiserfs_write_inode(struct inode *inode, struct writeback_control *wbc) |
| 1619 | { | 1622 | { |
| 1620 | struct reiserfs_transaction_handle th; | 1623 | struct reiserfs_transaction_handle th; |
| 1621 | int jbegin_count = 1; | 1624 | int jbegin_count = 1; |
| @@ -1627,7 +1630,7 @@ int reiserfs_write_inode(struct inode *inode, int do_sync) | |||
| 1627 | ** inode needs to reach disk for safety, and they can safely be | 1630 | ** inode needs to reach disk for safety, and they can safely be |
| 1628 | ** ignored because the altered inode has already been logged. | 1631 | ** ignored because the altered inode has already been logged. |
| 1629 | */ | 1632 | */ |
| 1630 | if (do_sync && !(current->flags & PF_MEMALLOC)) { | 1633 | if (wbc->sync_mode == WB_SYNC_ALL && !(current->flags & PF_MEMALLOC)) { |
| 1631 | reiserfs_write_lock(inode->i_sb); | 1634 | reiserfs_write_lock(inode->i_sb); |
| 1632 | if (!journal_begin(&th, inode->i_sb, jbegin_count)) { | 1635 | if (!journal_begin(&th, inode->i_sb, jbegin_count)) { |
| 1633 | reiserfs_update_sd(&th, inode); | 1636 | reiserfs_update_sd(&th, inode); |
| @@ -1765,10 +1768,10 @@ int reiserfs_new_inode(struct reiserfs_transaction_handle *th, | |||
| 1765 | 1768 | ||
| 1766 | BUG_ON(!th->t_trans_id); | 1769 | BUG_ON(!th->t_trans_id); |
| 1767 | 1770 | ||
| 1768 | if (vfs_dq_alloc_inode(inode)) { | 1771 | dquot_initialize(inode); |
| 1769 | err = -EDQUOT; | 1772 | err = dquot_alloc_inode(inode); |
| 1773 | if (err) | ||
| 1770 | goto out_end_trans; | 1774 | goto out_end_trans; |
| 1771 | } | ||
| 1772 | if (!dir->i_nlink) { | 1775 | if (!dir->i_nlink) { |
| 1773 | err = -EPERM; | 1776 | err = -EPERM; |
| 1774 | goto out_bad_inode; | 1777 | goto out_bad_inode; |
| @@ -1959,12 +1962,12 @@ int reiserfs_new_inode(struct reiserfs_transaction_handle *th, | |||
| 1959 | INODE_PKEY(inode)->k_objectid = 0; | 1962 | INODE_PKEY(inode)->k_objectid = 0; |
| 1960 | 1963 | ||
| 1961 | /* Quota change must be inside a transaction for journaling */ | 1964 | /* Quota change must be inside a transaction for journaling */ |
| 1962 | vfs_dq_free_inode(inode); | 1965 | dquot_free_inode(inode); |
| 1963 | 1966 | ||
| 1964 | out_end_trans: | 1967 | out_end_trans: |
| 1965 | journal_end(th, th->t_super, th->t_blocks_allocated); | 1968 | journal_end(th, th->t_super, th->t_blocks_allocated); |
| 1966 | /* Drop can be outside and it needs more credits so it's better to have it outside */ | 1969 | /* Drop can be outside and it needs more credits so it's better to have it outside */ |
| 1967 | vfs_dq_drop(inode); | 1970 | dquot_drop(inode); |
| 1968 | inode->i_flags |= S_NOQUOTA; | 1971 | inode->i_flags |= S_NOQUOTA; |
| 1969 | make_bad_inode(inode); | 1972 | make_bad_inode(inode); |
| 1970 | 1973 | ||
| @@ -3073,6 +3076,8 @@ int reiserfs_setattr(struct dentry *dentry, struct iattr *attr) | |||
| 3073 | 3076 | ||
| 3074 | depth = reiserfs_write_lock_once(inode->i_sb); | 3077 | depth = reiserfs_write_lock_once(inode->i_sb); |
| 3075 | if (attr->ia_valid & ATTR_SIZE) { | 3078 | if (attr->ia_valid & ATTR_SIZE) { |
| 3079 | dquot_initialize(inode); | ||
| 3080 | |||
| 3076 | /* version 2 items will be caught by the s_maxbytes check | 3081 | /* version 2 items will be caught by the s_maxbytes check |
| 3077 | ** done for us in vmtruncate | 3082 | ** done for us in vmtruncate |
| 3078 | */ | 3083 | */ |
| @@ -3134,8 +3139,7 @@ int reiserfs_setattr(struct dentry *dentry, struct iattr *attr) | |||
| 3134 | jbegin_count); | 3139 | jbegin_count); |
| 3135 | if (error) | 3140 | if (error) |
| 3136 | goto out; | 3141 | goto out; |
| 3137 | error = | 3142 | error = dquot_transfer(inode, attr); |
| 3138 | vfs_dq_transfer(inode, attr) ? -EDQUOT : 0; | ||
| 3139 | if (error) { | 3143 | if (error) { |
| 3140 | journal_end(&th, inode->i_sb, | 3144 | journal_end(&th, inode->i_sb, |
| 3141 | jbegin_count); | 3145 | jbegin_count); |
diff --git a/fs/reiserfs/namei.c b/fs/reiserfs/namei.c index 9d4dcf0b07cb..96e4cbbfaa18 100644 --- a/fs/reiserfs/namei.c +++ b/fs/reiserfs/namei.c | |||
| @@ -546,7 +546,7 @@ static int reiserfs_add_entry(struct reiserfs_transaction_handle *th, | |||
| 546 | */ | 546 | */ |
| 547 | static int drop_new_inode(struct inode *inode) | 547 | static int drop_new_inode(struct inode *inode) |
| 548 | { | 548 | { |
| 549 | vfs_dq_drop(inode); | 549 | dquot_drop(inode); |
| 550 | make_bad_inode(inode); | 550 | make_bad_inode(inode); |
| 551 | inode->i_flags |= S_NOQUOTA; | 551 | inode->i_flags |= S_NOQUOTA; |
| 552 | iput(inode); | 552 | iput(inode); |
| @@ -554,7 +554,7 @@ static int drop_new_inode(struct inode *inode) | |||
| 554 | } | 554 | } |
| 555 | 555 | ||
| 556 | /* utility function that does setup for reiserfs_new_inode. | 556 | /* utility function that does setup for reiserfs_new_inode. |
| 557 | ** vfs_dq_init needs lots of credits so it's better to have it | 557 | ** dquot_initialize needs lots of credits so it's better to have it |
| 558 | ** outside of a transaction, so we had to pull some bits of | 558 | ** outside of a transaction, so we had to pull some bits of |
| 559 | ** reiserfs_new_inode out into this func. | 559 | ** reiserfs_new_inode out into this func. |
| 560 | */ | 560 | */ |
| @@ -577,7 +577,7 @@ static int new_inode_init(struct inode *inode, struct inode *dir, int mode) | |||
| 577 | } else { | 577 | } else { |
| 578 | inode->i_gid = current_fsgid(); | 578 | inode->i_gid = current_fsgid(); |
| 579 | } | 579 | } |
| 580 | vfs_dq_init(inode); | 580 | dquot_initialize(inode); |
| 581 | return 0; | 581 | return 0; |
| 582 | } | 582 | } |
| 583 | 583 | ||
| @@ -594,6 +594,8 @@ static int reiserfs_create(struct inode *dir, struct dentry *dentry, int mode, | |||
| 594 | struct reiserfs_transaction_handle th; | 594 | struct reiserfs_transaction_handle th; |
| 595 | struct reiserfs_security_handle security; | 595 | struct reiserfs_security_handle security; |
| 596 | 596 | ||
| 597 | dquot_initialize(dir); | ||
| 598 | |||
| 597 | if (!(inode = new_inode(dir->i_sb))) { | 599 | if (!(inode = new_inode(dir->i_sb))) { |
| 598 | return -ENOMEM; | 600 | return -ENOMEM; |
| 599 | } | 601 | } |
| @@ -666,6 +668,8 @@ static int reiserfs_mknod(struct inode *dir, struct dentry *dentry, int mode, | |||
| 666 | if (!new_valid_dev(rdev)) | 668 | if (!new_valid_dev(rdev)) |
| 667 | return -EINVAL; | 669 | return -EINVAL; |
| 668 | 670 | ||
| 671 | dquot_initialize(dir); | ||
| 672 | |||
| 669 | if (!(inode = new_inode(dir->i_sb))) { | 673 | if (!(inode = new_inode(dir->i_sb))) { |
| 670 | return -ENOMEM; | 674 | return -ENOMEM; |
| 671 | } | 675 | } |
| @@ -739,6 +743,8 @@ static int reiserfs_mkdir(struct inode *dir, struct dentry *dentry, int mode) | |||
| 739 | 2 * (REISERFS_QUOTA_INIT_BLOCKS(dir->i_sb) + | 743 | 2 * (REISERFS_QUOTA_INIT_BLOCKS(dir->i_sb) + |
| 740 | REISERFS_QUOTA_TRANS_BLOCKS(dir->i_sb)); | 744 | REISERFS_QUOTA_TRANS_BLOCKS(dir->i_sb)); |
| 741 | 745 | ||
| 746 | dquot_initialize(dir); | ||
| 747 | |||
| 742 | #ifdef DISPLACE_NEW_PACKING_LOCALITIES | 748 | #ifdef DISPLACE_NEW_PACKING_LOCALITIES |
| 743 | /* set flag that new packing locality created and new blocks for the content * of that directory are not displaced yet */ | 749 | /* set flag that new packing locality created and new blocks for the content * of that directory are not displaced yet */ |
| 744 | REISERFS_I(dir)->new_packing_locality = 1; | 750 | REISERFS_I(dir)->new_packing_locality = 1; |
| @@ -842,6 +848,8 @@ static int reiserfs_rmdir(struct inode *dir, struct dentry *dentry) | |||
| 842 | JOURNAL_PER_BALANCE_CNT * 2 + 2 + | 848 | JOURNAL_PER_BALANCE_CNT * 2 + 2 + |
| 843 | 4 * REISERFS_QUOTA_TRANS_BLOCKS(dir->i_sb); | 849 | 4 * REISERFS_QUOTA_TRANS_BLOCKS(dir->i_sb); |
| 844 | 850 | ||
| 851 | dquot_initialize(dir); | ||
| 852 | |||
| 845 | reiserfs_write_lock(dir->i_sb); | 853 | reiserfs_write_lock(dir->i_sb); |
| 846 | retval = journal_begin(&th, dir->i_sb, jbegin_count); | 854 | retval = journal_begin(&th, dir->i_sb, jbegin_count); |
| 847 | if (retval) | 855 | if (retval) |
| @@ -923,6 +931,8 @@ static int reiserfs_unlink(struct inode *dir, struct dentry *dentry) | |||
| 923 | unsigned long savelink; | 931 | unsigned long savelink; |
| 924 | int depth; | 932 | int depth; |
| 925 | 933 | ||
| 934 | dquot_initialize(dir); | ||
| 935 | |||
| 926 | inode = dentry->d_inode; | 936 | inode = dentry->d_inode; |
| 927 | 937 | ||
| 928 | /* in this transaction we can be doing at max two balancings and update | 938 | /* in this transaction we can be doing at max two balancings and update |
| @@ -1024,6 +1034,8 @@ static int reiserfs_symlink(struct inode *parent_dir, | |||
| 1024 | 2 * (REISERFS_QUOTA_INIT_BLOCKS(parent_dir->i_sb) + | 1034 | 2 * (REISERFS_QUOTA_INIT_BLOCKS(parent_dir->i_sb) + |
| 1025 | REISERFS_QUOTA_TRANS_BLOCKS(parent_dir->i_sb)); | 1035 | REISERFS_QUOTA_TRANS_BLOCKS(parent_dir->i_sb)); |
| 1026 | 1036 | ||
| 1037 | dquot_initialize(parent_dir); | ||
| 1038 | |||
| 1027 | if (!(inode = new_inode(parent_dir->i_sb))) { | 1039 | if (!(inode = new_inode(parent_dir->i_sb))) { |
| 1028 | return -ENOMEM; | 1040 | return -ENOMEM; |
| 1029 | } | 1041 | } |
| @@ -1111,6 +1123,8 @@ static int reiserfs_link(struct dentry *old_dentry, struct inode *dir, | |||
| 1111 | JOURNAL_PER_BALANCE_CNT * 3 + | 1123 | JOURNAL_PER_BALANCE_CNT * 3 + |
| 1112 | 2 * REISERFS_QUOTA_TRANS_BLOCKS(dir->i_sb); | 1124 | 2 * REISERFS_QUOTA_TRANS_BLOCKS(dir->i_sb); |
| 1113 | 1125 | ||
| 1126 | dquot_initialize(dir); | ||
| 1127 | |||
| 1114 | reiserfs_write_lock(dir->i_sb); | 1128 | reiserfs_write_lock(dir->i_sb); |
| 1115 | if (inode->i_nlink >= REISERFS_LINK_MAX) { | 1129 | if (inode->i_nlink >= REISERFS_LINK_MAX) { |
| 1116 | //FIXME: sd_nlink is 32 bit for new files | 1130 | //FIXME: sd_nlink is 32 bit for new files |
| @@ -1235,6 +1249,9 @@ static int reiserfs_rename(struct inode *old_dir, struct dentry *old_dentry, | |||
| 1235 | JOURNAL_PER_BALANCE_CNT * 3 + 5 + | 1249 | JOURNAL_PER_BALANCE_CNT * 3 + 5 + |
| 1236 | 4 * REISERFS_QUOTA_TRANS_BLOCKS(old_dir->i_sb); | 1250 | 4 * REISERFS_QUOTA_TRANS_BLOCKS(old_dir->i_sb); |
| 1237 | 1251 | ||
| 1252 | dquot_initialize(old_dir); | ||
| 1253 | dquot_initialize(new_dir); | ||
| 1254 | |||
| 1238 | old_inode = old_dentry->d_inode; | 1255 | old_inode = old_dentry->d_inode; |
| 1239 | new_dentry_inode = new_dentry->d_inode; | 1256 | new_dentry_inode = new_dentry->d_inode; |
| 1240 | 1257 | ||
diff --git a/fs/reiserfs/stree.c b/fs/reiserfs/stree.c index 5fa7118f04e1..313d39d639eb 100644 --- a/fs/reiserfs/stree.c +++ b/fs/reiserfs/stree.c | |||
| @@ -1299,7 +1299,7 @@ int reiserfs_delete_item(struct reiserfs_transaction_handle *th, | |||
| 1299 | "reiserquota delete_item(): freeing %u, id=%u type=%c", | 1299 | "reiserquota delete_item(): freeing %u, id=%u type=%c", |
| 1300 | quota_cut_bytes, inode->i_uid, head2type(&s_ih)); | 1300 | quota_cut_bytes, inode->i_uid, head2type(&s_ih)); |
| 1301 | #endif | 1301 | #endif |
| 1302 | vfs_dq_free_space_nodirty(inode, quota_cut_bytes); | 1302 | dquot_free_space_nodirty(inode, quota_cut_bytes); |
| 1303 | 1303 | ||
| 1304 | /* Return deleted body length */ | 1304 | /* Return deleted body length */ |
| 1305 | return ret_value; | 1305 | return ret_value; |
| @@ -1383,7 +1383,7 @@ void reiserfs_delete_solid_item(struct reiserfs_transaction_handle *th, | |||
| 1383 | quota_cut_bytes, inode->i_uid, | 1383 | quota_cut_bytes, inode->i_uid, |
| 1384 | key2type(key)); | 1384 | key2type(key)); |
| 1385 | #endif | 1385 | #endif |
| 1386 | vfs_dq_free_space_nodirty(inode, | 1386 | dquot_free_space_nodirty(inode, |
| 1387 | quota_cut_bytes); | 1387 | quota_cut_bytes); |
| 1388 | } | 1388 | } |
| 1389 | break; | 1389 | break; |
| @@ -1733,7 +1733,7 @@ int reiserfs_cut_from_item(struct reiserfs_transaction_handle *th, | |||
| 1733 | "reiserquota cut_from_item(): freeing %u id=%u type=%c", | 1733 | "reiserquota cut_from_item(): freeing %u id=%u type=%c", |
| 1734 | quota_cut_bytes, inode->i_uid, '?'); | 1734 | quota_cut_bytes, inode->i_uid, '?'); |
| 1735 | #endif | 1735 | #endif |
| 1736 | vfs_dq_free_space_nodirty(inode, quota_cut_bytes); | 1736 | dquot_free_space_nodirty(inode, quota_cut_bytes); |
| 1737 | return ret_value; | 1737 | return ret_value; |
| 1738 | } | 1738 | } |
| 1739 | 1739 | ||
| @@ -1968,9 +1968,10 @@ int reiserfs_paste_into_item(struct reiserfs_transaction_handle *th, struct tree | |||
| 1968 | key2type(&(key->on_disk_key))); | 1968 | key2type(&(key->on_disk_key))); |
| 1969 | #endif | 1969 | #endif |
| 1970 | 1970 | ||
| 1971 | if (vfs_dq_alloc_space_nodirty(inode, pasted_size)) { | 1971 | retval = dquot_alloc_space_nodirty(inode, pasted_size); |
| 1972 | if (retval) { | ||
| 1972 | pathrelse(search_path); | 1973 | pathrelse(search_path); |
| 1973 | return -EDQUOT; | 1974 | return retval; |
| 1974 | } | 1975 | } |
| 1975 | init_tb_struct(th, &s_paste_balance, th->t_super, search_path, | 1976 | init_tb_struct(th, &s_paste_balance, th->t_super, search_path, |
| 1976 | pasted_size); | 1977 | pasted_size); |
| @@ -2024,7 +2025,7 @@ int reiserfs_paste_into_item(struct reiserfs_transaction_handle *th, struct tree | |||
| 2024 | pasted_size, inode->i_uid, | 2025 | pasted_size, inode->i_uid, |
| 2025 | key2type(&(key->on_disk_key))); | 2026 | key2type(&(key->on_disk_key))); |
| 2026 | #endif | 2027 | #endif |
| 2027 | vfs_dq_free_space_nodirty(inode, pasted_size); | 2028 | dquot_free_space_nodirty(inode, pasted_size); |
| 2028 | return retval; | 2029 | return retval; |
| 2029 | } | 2030 | } |
| 2030 | 2031 | ||
| @@ -2062,9 +2063,10 @@ int reiserfs_insert_item(struct reiserfs_transaction_handle *th, | |||
| 2062 | #endif | 2063 | #endif |
| 2063 | /* We can't dirty inode here. It would be immediately written but | 2064 | /* We can't dirty inode here. It would be immediately written but |
| 2064 | * appropriate stat item isn't inserted yet... */ | 2065 | * appropriate stat item isn't inserted yet... */ |
| 2065 | if (vfs_dq_alloc_space_nodirty(inode, quota_bytes)) { | 2066 | retval = dquot_alloc_space_nodirty(inode, quota_bytes); |
| 2067 | if (retval) { | ||
| 2066 | pathrelse(path); | 2068 | pathrelse(path); |
| 2067 | return -EDQUOT; | 2069 | return retval; |
| 2068 | } | 2070 | } |
| 2069 | } | 2071 | } |
| 2070 | init_tb_struct(th, &s_ins_balance, th->t_super, path, | 2072 | init_tb_struct(th, &s_ins_balance, th->t_super, path, |
| @@ -2113,6 +2115,6 @@ int reiserfs_insert_item(struct reiserfs_transaction_handle *th, | |||
| 2113 | quota_bytes, inode->i_uid, head2type(ih)); | 2115 | quota_bytes, inode->i_uid, head2type(ih)); |
| 2114 | #endif | 2116 | #endif |
| 2115 | if (inode) | 2117 | if (inode) |
| 2116 | vfs_dq_free_space_nodirty(inode, quota_bytes); | 2118 | dquot_free_space_nodirty(inode, quota_bytes); |
| 2117 | return retval; | 2119 | return retval; |
| 2118 | } | 2120 | } |
diff --git a/fs/reiserfs/super.c b/fs/reiserfs/super.c index b4a7dd03bdb9..04bf5d791bda 100644 --- a/fs/reiserfs/super.c +++ b/fs/reiserfs/super.c | |||
| @@ -246,7 +246,7 @@ static int finish_unfinished(struct super_block *s) | |||
| 246 | retval = remove_save_link_only(s, &save_link_key, 0); | 246 | retval = remove_save_link_only(s, &save_link_key, 0); |
| 247 | continue; | 247 | continue; |
| 248 | } | 248 | } |
| 249 | vfs_dq_init(inode); | 249 | dquot_initialize(inode); |
| 250 | 250 | ||
| 251 | if (truncate && S_ISDIR(inode->i_mode)) { | 251 | if (truncate && S_ISDIR(inode->i_mode)) { |
| 252 | /* We got a truncate request for a dir which is impossible. | 252 | /* We got a truncate request for a dir which is impossible. |
| @@ -578,6 +578,11 @@ out: | |||
| 578 | reiserfs_write_unlock_once(inode->i_sb, lock_depth); | 578 | reiserfs_write_unlock_once(inode->i_sb, lock_depth); |
| 579 | } | 579 | } |
| 580 | 580 | ||
| 581 | static void reiserfs_clear_inode(struct inode *inode) | ||
| 582 | { | ||
| 583 | dquot_drop(inode); | ||
| 584 | } | ||
| 585 | |||
| 581 | #ifdef CONFIG_QUOTA | 586 | #ifdef CONFIG_QUOTA |
| 582 | static ssize_t reiserfs_quota_write(struct super_block *, int, const char *, | 587 | static ssize_t reiserfs_quota_write(struct super_block *, int, const char *, |
| 583 | size_t, loff_t); | 588 | size_t, loff_t); |
| @@ -590,6 +595,7 @@ static const struct super_operations reiserfs_sops = { | |||
| 590 | .destroy_inode = reiserfs_destroy_inode, | 595 | .destroy_inode = reiserfs_destroy_inode, |
| 591 | .write_inode = reiserfs_write_inode, | 596 | .write_inode = reiserfs_write_inode, |
| 592 | .dirty_inode = reiserfs_dirty_inode, | 597 | .dirty_inode = reiserfs_dirty_inode, |
| 598 | .clear_inode = reiserfs_clear_inode, | ||
| 593 | .delete_inode = reiserfs_delete_inode, | 599 | .delete_inode = reiserfs_delete_inode, |
| 594 | .put_super = reiserfs_put_super, | 600 | .put_super = reiserfs_put_super, |
| 595 | .write_super = reiserfs_write_super, | 601 | .write_super = reiserfs_write_super, |
| @@ -616,13 +622,6 @@ static int reiserfs_write_info(struct super_block *, int); | |||
| 616 | static int reiserfs_quota_on(struct super_block *, int, int, char *, int); | 622 | static int reiserfs_quota_on(struct super_block *, int, int, char *, int); |
| 617 | 623 | ||
| 618 | static const struct dquot_operations reiserfs_quota_operations = { | 624 | static const struct dquot_operations reiserfs_quota_operations = { |
| 619 | .initialize = dquot_initialize, | ||
| 620 | .drop = dquot_drop, | ||
| 621 | .alloc_space = dquot_alloc_space, | ||
| 622 | .alloc_inode = dquot_alloc_inode, | ||
| 623 | .free_space = dquot_free_space, | ||
| 624 | .free_inode = dquot_free_inode, | ||
| 625 | .transfer = dquot_transfer, | ||
| 626 | .write_dquot = reiserfs_write_dquot, | 625 | .write_dquot = reiserfs_write_dquot, |
| 627 | .acquire_dquot = reiserfs_acquire_dquot, | 626 | .acquire_dquot = reiserfs_acquire_dquot, |
| 628 | .release_dquot = reiserfs_release_dquot, | 627 | .release_dquot = reiserfs_release_dquot, |
diff --git a/fs/reiserfs/xattr.c b/fs/reiserfs/xattr.c index 81f09fab8ae4..37d034ca7d99 100644 --- a/fs/reiserfs/xattr.c +++ b/fs/reiserfs/xattr.c | |||
| @@ -61,7 +61,6 @@ | |||
| 61 | static int xattr_create(struct inode *dir, struct dentry *dentry, int mode) | 61 | static int xattr_create(struct inode *dir, struct dentry *dentry, int mode) |
| 62 | { | 62 | { |
| 63 | BUG_ON(!mutex_is_locked(&dir->i_mutex)); | 63 | BUG_ON(!mutex_is_locked(&dir->i_mutex)); |
| 64 | vfs_dq_init(dir); | ||
| 65 | return dir->i_op->create(dir, dentry, mode, NULL); | 64 | return dir->i_op->create(dir, dentry, mode, NULL); |
| 66 | } | 65 | } |
| 67 | #endif | 66 | #endif |
| @@ -69,7 +68,6 @@ static int xattr_create(struct inode *dir, struct dentry *dentry, int mode) | |||
| 69 | static int xattr_mkdir(struct inode *dir, struct dentry *dentry, int mode) | 68 | static int xattr_mkdir(struct inode *dir, struct dentry *dentry, int mode) |
| 70 | { | 69 | { |
| 71 | BUG_ON(!mutex_is_locked(&dir->i_mutex)); | 70 | BUG_ON(!mutex_is_locked(&dir->i_mutex)); |
| 72 | vfs_dq_init(dir); | ||
| 73 | return dir->i_op->mkdir(dir, dentry, mode); | 71 | return dir->i_op->mkdir(dir, dentry, mode); |
| 74 | } | 72 | } |
| 75 | 73 | ||
| @@ -81,7 +79,6 @@ static int xattr_unlink(struct inode *dir, struct dentry *dentry) | |||
| 81 | { | 79 | { |
| 82 | int error; | 80 | int error; |
| 83 | BUG_ON(!mutex_is_locked(&dir->i_mutex)); | 81 | BUG_ON(!mutex_is_locked(&dir->i_mutex)); |
| 84 | vfs_dq_init(dir); | ||
| 85 | 82 | ||
| 86 | reiserfs_mutex_lock_nested_safe(&dentry->d_inode->i_mutex, | 83 | reiserfs_mutex_lock_nested_safe(&dentry->d_inode->i_mutex, |
| 87 | I_MUTEX_CHILD, dir->i_sb); | 84 | I_MUTEX_CHILD, dir->i_sb); |
| @@ -97,7 +94,6 @@ static int xattr_rmdir(struct inode *dir, struct dentry *dentry) | |||
| 97 | { | 94 | { |
| 98 | int error; | 95 | int error; |
| 99 | BUG_ON(!mutex_is_locked(&dir->i_mutex)); | 96 | BUG_ON(!mutex_is_locked(&dir->i_mutex)); |
| 100 | vfs_dq_init(dir); | ||
| 101 | 97 | ||
| 102 | reiserfs_mutex_lock_nested_safe(&dentry->d_inode->i_mutex, | 98 | reiserfs_mutex_lock_nested_safe(&dentry->d_inode->i_mutex, |
| 103 | I_MUTEX_CHILD, dir->i_sb); | 99 | I_MUTEX_CHILD, dir->i_sb); |
diff --git a/fs/squashfs/Makefile b/fs/squashfs/Makefile index 70e3244fa30f..df8a19ef870d 100644 --- a/fs/squashfs/Makefile +++ b/fs/squashfs/Makefile | |||
| @@ -4,4 +4,4 @@ | |||
| 4 | 4 | ||
| 5 | obj-$(CONFIG_SQUASHFS) += squashfs.o | 5 | obj-$(CONFIG_SQUASHFS) += squashfs.o |
| 6 | squashfs-y += block.o cache.o dir.o export.o file.o fragment.o id.o inode.o | 6 | squashfs-y += block.o cache.o dir.o export.o file.o fragment.o id.o inode.o |
| 7 | squashfs-y += namei.o super.o symlink.o | 7 | squashfs-y += namei.o super.o symlink.o zlib_wrapper.o decompressor.o |
diff --git a/fs/squashfs/block.c b/fs/squashfs/block.c index 2a7960310349..1cb0d81b164b 100644 --- a/fs/squashfs/block.c +++ b/fs/squashfs/block.c | |||
| @@ -29,15 +29,14 @@ | |||
| 29 | #include <linux/fs.h> | 29 | #include <linux/fs.h> |
| 30 | #include <linux/vfs.h> | 30 | #include <linux/vfs.h> |
| 31 | #include <linux/slab.h> | 31 | #include <linux/slab.h> |
| 32 | #include <linux/mutex.h> | ||
| 33 | #include <linux/string.h> | 32 | #include <linux/string.h> |
| 34 | #include <linux/buffer_head.h> | 33 | #include <linux/buffer_head.h> |
| 35 | #include <linux/zlib.h> | ||
| 36 | 34 | ||
| 37 | #include "squashfs_fs.h" | 35 | #include "squashfs_fs.h" |
| 38 | #include "squashfs_fs_sb.h" | 36 | #include "squashfs_fs_sb.h" |
| 39 | #include "squashfs_fs_i.h" | 37 | #include "squashfs_fs_i.h" |
| 40 | #include "squashfs.h" | 38 | #include "squashfs.h" |
| 39 | #include "decompressor.h" | ||
| 41 | 40 | ||
| 42 | /* | 41 | /* |
| 43 | * Read the metadata block length, this is stored in the first two | 42 | * Read the metadata block length, this is stored in the first two |
| @@ -153,72 +152,10 @@ int squashfs_read_data(struct super_block *sb, void **buffer, u64 index, | |||
| 153 | } | 152 | } |
| 154 | 153 | ||
| 155 | if (compressed) { | 154 | if (compressed) { |
| 156 | int zlib_err = 0, zlib_init = 0; | 155 | length = squashfs_decompress(msblk, buffer, bh, b, offset, |
| 157 | 156 | length, srclength, pages); | |
| 158 | /* | 157 | if (length < 0) |
| 159 | * Uncompress block. | 158 | goto read_failure; |
| 160 | */ | ||
| 161 | |||
| 162 | mutex_lock(&msblk->read_data_mutex); | ||
| 163 | |||
| 164 | msblk->stream.avail_out = 0; | ||
| 165 | msblk->stream.avail_in = 0; | ||
| 166 | |||
| 167 | bytes = length; | ||
| 168 | do { | ||
| 169 | if (msblk->stream.avail_in == 0 && k < b) { | ||
| 170 | avail = min(bytes, msblk->devblksize - offset); | ||
| 171 | bytes -= avail; | ||
| 172 | wait_on_buffer(bh[k]); | ||
| 173 | if (!buffer_uptodate(bh[k])) | ||
| 174 | goto release_mutex; | ||
| 175 | |||
| 176 | if (avail == 0) { | ||
| 177 | offset = 0; | ||
| 178 | put_bh(bh[k++]); | ||
| 179 | continue; | ||
| 180 | } | ||
| 181 | |||
| 182 | msblk->stream.next_in = bh[k]->b_data + offset; | ||
| 183 | msblk->stream.avail_in = avail; | ||
| 184 | offset = 0; | ||
| 185 | } | ||
| 186 | |||
| 187 | if (msblk->stream.avail_out == 0 && page < pages) { | ||
| 188 | msblk->stream.next_out = buffer[page++]; | ||
| 189 | msblk->stream.avail_out = PAGE_CACHE_SIZE; | ||
| 190 | } | ||
| 191 | |||
| 192 | if (!zlib_init) { | ||
| 193 | zlib_err = zlib_inflateInit(&msblk->stream); | ||
| 194 | if (zlib_err != Z_OK) { | ||
| 195 | ERROR("zlib_inflateInit returned" | ||
| 196 | " unexpected result 0x%x," | ||
| 197 | " srclength %d\n", zlib_err, | ||
| 198 | srclength); | ||
| 199 | goto release_mutex; | ||
| 200 | } | ||
| 201 | zlib_init = 1; | ||
| 202 | } | ||
| 203 | |||
| 204 | zlib_err = zlib_inflate(&msblk->stream, Z_SYNC_FLUSH); | ||
| 205 | |||
| 206 | if (msblk->stream.avail_in == 0 && k < b) | ||
| 207 | put_bh(bh[k++]); | ||
| 208 | } while (zlib_err == Z_OK); | ||
| 209 | |||
| 210 | if (zlib_err != Z_STREAM_END) { | ||
| 211 | ERROR("zlib_inflate error, data probably corrupt\n"); | ||
| 212 | goto release_mutex; | ||
| 213 | } | ||
| 214 | |||
| 215 | zlib_err = zlib_inflateEnd(&msblk->stream); | ||
| 216 | if (zlib_err != Z_OK) { | ||
| 217 | ERROR("zlib_inflate error, data probably corrupt\n"); | ||
| 218 | goto release_mutex; | ||
| 219 | } | ||
| 220 | length = msblk->stream.total_out; | ||
| 221 | mutex_unlock(&msblk->read_data_mutex); | ||
| 222 | } else { | 159 | } else { |
| 223 | /* | 160 | /* |
| 224 | * Block is uncompressed. | 161 | * Block is uncompressed. |
| @@ -255,9 +192,6 @@ int squashfs_read_data(struct super_block *sb, void **buffer, u64 index, | |||
| 255 | kfree(bh); | 192 | kfree(bh); |
| 256 | return length; | 193 | return length; |
| 257 | 194 | ||
| 258 | release_mutex: | ||
| 259 | mutex_unlock(&msblk->read_data_mutex); | ||
| 260 | |||
| 261 | block_release: | 195 | block_release: |
| 262 | for (; k < b; k++) | 196 | for (; k < b; k++) |
| 263 | put_bh(bh[k]); | 197 | put_bh(bh[k]); |
diff --git a/fs/squashfs/cache.c b/fs/squashfs/cache.c index 40c98fa6b5d6..57314bee9059 100644 --- a/fs/squashfs/cache.c +++ b/fs/squashfs/cache.c | |||
| @@ -51,7 +51,6 @@ | |||
| 51 | #include <linux/sched.h> | 51 | #include <linux/sched.h> |
| 52 | #include <linux/spinlock.h> | 52 | #include <linux/spinlock.h> |
| 53 | #include <linux/wait.h> | 53 | #include <linux/wait.h> |
| 54 | #include <linux/zlib.h> | ||
| 55 | #include <linux/pagemap.h> | 54 | #include <linux/pagemap.h> |
| 56 | 55 | ||
| 57 | #include "squashfs_fs.h" | 56 | #include "squashfs_fs.h" |
diff --git a/fs/squashfs/decompressor.c b/fs/squashfs/decompressor.c new file mode 100644 index 000000000000..157478da6ac9 --- /dev/null +++ b/fs/squashfs/decompressor.c | |||
| @@ -0,0 +1,68 @@ | |||
| 1 | /* | ||
| 2 | * Squashfs - a compressed read only filesystem for Linux | ||
| 3 | * | ||
| 4 | * Copyright (c) 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009 | ||
| 5 | * Phillip Lougher <phillip@lougher.demon.co.uk> | ||
| 6 | * | ||
| 7 | * This program is free software; you can redistribute it and/or | ||
| 8 | * modify it under the terms of the GNU General Public License | ||
| 9 | * as published by the Free Software Foundation; either version 2, | ||
| 10 | * or (at your option) any later version. | ||
| 11 | * | ||
| 12 | * This program is distributed in the hope that it will be useful, | ||
| 13 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
| 14 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
| 15 | * GNU General Public License for more details. | ||
| 16 | * | ||
| 17 | * You should have received a copy of the GNU General Public License | ||
| 18 | * along with this program; if not, write to the Free Software | ||
| 19 | * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. | ||
| 20 | * | ||
| 21 | * decompressor.c | ||
| 22 | */ | ||
| 23 | |||
| 24 | #include <linux/types.h> | ||
| 25 | #include <linux/mutex.h> | ||
| 26 | #include <linux/buffer_head.h> | ||
| 27 | |||
| 28 | #include "squashfs_fs.h" | ||
| 29 | #include "squashfs_fs_sb.h" | ||
| 30 | #include "squashfs_fs_i.h" | ||
| 31 | #include "decompressor.h" | ||
| 32 | #include "squashfs.h" | ||
| 33 | |||
| 34 | /* | ||
| 35 | * This file (and decompressor.h) implements a decompressor framework for | ||
| 36 | * Squashfs, allowing multiple decompressors to be easily supported | ||
| 37 | */ | ||
| 38 | |||
| 39 | static const struct squashfs_decompressor squashfs_lzma_unsupported_comp_ops = { | ||
| 40 | NULL, NULL, NULL, LZMA_COMPRESSION, "lzma", 0 | ||
| 41 | }; | ||
| 42 | |||
| 43 | static const struct squashfs_decompressor squashfs_lzo_unsupported_comp_ops = { | ||
| 44 | NULL, NULL, NULL, LZO_COMPRESSION, "lzo", 0 | ||
| 45 | }; | ||
| 46 | |||
| 47 | static const struct squashfs_decompressor squashfs_unknown_comp_ops = { | ||
| 48 | NULL, NULL, NULL, 0, "unknown", 0 | ||
| 49 | }; | ||
| 50 | |||
| 51 | static const struct squashfs_decompressor *decompressor[] = { | ||
| 52 | &squashfs_zlib_comp_ops, | ||
| 53 | &squashfs_lzma_unsupported_comp_ops, | ||
| 54 | &squashfs_lzo_unsupported_comp_ops, | ||
| 55 | &squashfs_unknown_comp_ops | ||
| 56 | }; | ||
| 57 | |||
| 58 | |||
| 59 | const struct squashfs_decompressor *squashfs_lookup_decompressor(int id) | ||
| 60 | { | ||
| 61 | int i; | ||
| 62 | |||
| 63 | for (i = 0; decompressor[i]->id; i++) | ||
| 64 | if (id == decompressor[i]->id) | ||
| 65 | break; | ||
| 66 | |||
| 67 | return decompressor[i]; | ||
| 68 | } | ||
diff --git a/fs/squashfs/decompressor.h b/fs/squashfs/decompressor.h new file mode 100644 index 000000000000..7425f80783f6 --- /dev/null +++ b/fs/squashfs/decompressor.h | |||
| @@ -0,0 +1,55 @@ | |||
| 1 | #ifndef DECOMPRESSOR_H | ||
| 2 | #define DECOMPRESSOR_H | ||
| 3 | /* | ||
| 4 | * Squashfs - a compressed read only filesystem for Linux | ||
| 5 | * | ||
| 6 | * Copyright (c) 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009 | ||
| 7 | * Phillip Lougher <phillip@lougher.demon.co.uk> | ||
| 8 | * | ||
| 9 | * This program is free software; you can redistribute it and/or | ||
| 10 | * modify it under the terms of the GNU General Public License | ||
| 11 | * as published by the Free Software Foundation; either version 2, | ||
| 12 | * or (at your option) any later version. | ||
| 13 | * | ||
| 14 | * This program is distributed in the hope that it will be useful, | ||
| 15 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
| 16 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
| 17 | * GNU General Public License for more details. | ||
| 18 | * | ||
| 19 | * You should have received a copy of the GNU General Public License | ||
| 20 | * along with this program; if not, write to the Free Software | ||
| 21 | * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. | ||
| 22 | * | ||
| 23 | * decompressor.h | ||
| 24 | */ | ||
| 25 | |||
| 26 | struct squashfs_decompressor { | ||
| 27 | void *(*init)(struct squashfs_sb_info *); | ||
| 28 | void (*free)(void *); | ||
| 29 | int (*decompress)(struct squashfs_sb_info *, void **, | ||
| 30 | struct buffer_head **, int, int, int, int, int); | ||
| 31 | int id; | ||
| 32 | char *name; | ||
| 33 | int supported; | ||
| 34 | }; | ||
| 35 | |||
| 36 | static inline void *squashfs_decompressor_init(struct squashfs_sb_info *msblk) | ||
| 37 | { | ||
| 38 | return msblk->decompressor->init(msblk); | ||
| 39 | } | ||
| 40 | |||
| 41 | static inline void squashfs_decompressor_free(struct squashfs_sb_info *msblk, | ||
| 42 | void *s) | ||
| 43 | { | ||
| 44 | if (msblk->decompressor) | ||
| 45 | msblk->decompressor->free(s); | ||
| 46 | } | ||
| 47 | |||
| 48 | static inline int squashfs_decompress(struct squashfs_sb_info *msblk, | ||
| 49 | void **buffer, struct buffer_head **bh, int b, int offset, int length, | ||
| 50 | int srclength, int pages) | ||
| 51 | { | ||
| 52 | return msblk->decompressor->decompress(msblk, buffer, bh, b, offset, | ||
| 53 | length, srclength, pages); | ||
| 54 | } | ||
| 55 | #endif | ||
diff --git a/fs/squashfs/dir.c b/fs/squashfs/dir.c index 566b0eaed868..12b933ac6585 100644 --- a/fs/squashfs/dir.c +++ b/fs/squashfs/dir.c | |||
| @@ -30,7 +30,6 @@ | |||
| 30 | #include <linux/fs.h> | 30 | #include <linux/fs.h> |
| 31 | #include <linux/vfs.h> | 31 | #include <linux/vfs.h> |
| 32 | #include <linux/slab.h> | 32 | #include <linux/slab.h> |
| 33 | #include <linux/zlib.h> | ||
| 34 | 33 | ||
| 35 | #include "squashfs_fs.h" | 34 | #include "squashfs_fs.h" |
| 36 | #include "squashfs_fs_sb.h" | 35 | #include "squashfs_fs_sb.h" |
diff --git a/fs/squashfs/export.c b/fs/squashfs/export.c index 2b1b8fe5e037..7f93d5a9ee05 100644 --- a/fs/squashfs/export.c +++ b/fs/squashfs/export.c | |||
| @@ -39,7 +39,6 @@ | |||
| 39 | #include <linux/vfs.h> | 39 | #include <linux/vfs.h> |
| 40 | #include <linux/dcache.h> | 40 | #include <linux/dcache.h> |
| 41 | #include <linux/exportfs.h> | 41 | #include <linux/exportfs.h> |
| 42 | #include <linux/zlib.h> | ||
| 43 | #include <linux/slab.h> | 42 | #include <linux/slab.h> |
| 44 | 43 | ||
| 45 | #include "squashfs_fs.h" | 44 | #include "squashfs_fs.h" |
diff --git a/fs/squashfs/file.c b/fs/squashfs/file.c index 717767d831df..a25c5060bdcb 100644 --- a/fs/squashfs/file.c +++ b/fs/squashfs/file.c | |||
| @@ -47,7 +47,6 @@ | |||
| 47 | #include <linux/string.h> | 47 | #include <linux/string.h> |
| 48 | #include <linux/pagemap.h> | 48 | #include <linux/pagemap.h> |
| 49 | #include <linux/mutex.h> | 49 | #include <linux/mutex.h> |
| 50 | #include <linux/zlib.h> | ||
| 51 | 50 | ||
| 52 | #include "squashfs_fs.h" | 51 | #include "squashfs_fs.h" |
| 53 | #include "squashfs_fs_sb.h" | 52 | #include "squashfs_fs_sb.h" |
diff --git a/fs/squashfs/fragment.c b/fs/squashfs/fragment.c index b5a2c15bbbc7..7c90bbd6879d 100644 --- a/fs/squashfs/fragment.c +++ b/fs/squashfs/fragment.c | |||
| @@ -36,7 +36,6 @@ | |||
| 36 | #include <linux/fs.h> | 36 | #include <linux/fs.h> |
| 37 | #include <linux/vfs.h> | 37 | #include <linux/vfs.h> |
| 38 | #include <linux/slab.h> | 38 | #include <linux/slab.h> |
| 39 | #include <linux/zlib.h> | ||
| 40 | 39 | ||
| 41 | #include "squashfs_fs.h" | 40 | #include "squashfs_fs.h" |
| 42 | #include "squashfs_fs_sb.h" | 41 | #include "squashfs_fs_sb.h" |
diff --git a/fs/squashfs/id.c b/fs/squashfs/id.c index 3795b837ba28..b7f64bcd2b70 100644 --- a/fs/squashfs/id.c +++ b/fs/squashfs/id.c | |||
| @@ -34,7 +34,6 @@ | |||
| 34 | #include <linux/fs.h> | 34 | #include <linux/fs.h> |
| 35 | #include <linux/vfs.h> | 35 | #include <linux/vfs.h> |
| 36 | #include <linux/slab.h> | 36 | #include <linux/slab.h> |
| 37 | #include <linux/zlib.h> | ||
| 38 | 37 | ||
| 39 | #include "squashfs_fs.h" | 38 | #include "squashfs_fs.h" |
| 40 | #include "squashfs_fs_sb.h" | 39 | #include "squashfs_fs_sb.h" |
diff --git a/fs/squashfs/inode.c b/fs/squashfs/inode.c index 9101dbde39ec..49daaf669e41 100644 --- a/fs/squashfs/inode.c +++ b/fs/squashfs/inode.c | |||
| @@ -40,7 +40,6 @@ | |||
| 40 | 40 | ||
| 41 | #include <linux/fs.h> | 41 | #include <linux/fs.h> |
| 42 | #include <linux/vfs.h> | 42 | #include <linux/vfs.h> |
| 43 | #include <linux/zlib.h> | ||
| 44 | 43 | ||
| 45 | #include "squashfs_fs.h" | 44 | #include "squashfs_fs.h" |
| 46 | #include "squashfs_fs_sb.h" | 45 | #include "squashfs_fs_sb.h" |
diff --git a/fs/squashfs/namei.c b/fs/squashfs/namei.c index 9e398653b22b..5266bd8ad932 100644 --- a/fs/squashfs/namei.c +++ b/fs/squashfs/namei.c | |||
| @@ -57,7 +57,6 @@ | |||
| 57 | #include <linux/slab.h> | 57 | #include <linux/slab.h> |
| 58 | #include <linux/string.h> | 58 | #include <linux/string.h> |
| 59 | #include <linux/dcache.h> | 59 | #include <linux/dcache.h> |
| 60 | #include <linux/zlib.h> | ||
| 61 | 60 | ||
| 62 | #include "squashfs_fs.h" | 61 | #include "squashfs_fs.h" |
| 63 | #include "squashfs_fs_sb.h" | 62 | #include "squashfs_fs_sb.h" |
diff --git a/fs/squashfs/squashfs.h b/fs/squashfs/squashfs.h index 0e9feb6adf7e..fe2587af5512 100644 --- a/fs/squashfs/squashfs.h +++ b/fs/squashfs/squashfs.h | |||
| @@ -51,6 +51,9 @@ extern struct squashfs_cache_entry *squashfs_get_datablock(struct super_block *, | |||
| 51 | u64, int); | 51 | u64, int); |
| 52 | extern int squashfs_read_table(struct super_block *, void *, u64, int); | 52 | extern int squashfs_read_table(struct super_block *, void *, u64, int); |
| 53 | 53 | ||
| 54 | /* decompressor.c */ | ||
| 55 | extern const struct squashfs_decompressor *squashfs_lookup_decompressor(int); | ||
| 56 | |||
| 54 | /* export.c */ | 57 | /* export.c */ |
| 55 | extern __le64 *squashfs_read_inode_lookup_table(struct super_block *, u64, | 58 | extern __le64 *squashfs_read_inode_lookup_table(struct super_block *, u64, |
| 56 | unsigned int); | 59 | unsigned int); |
| @@ -71,7 +74,7 @@ extern struct inode *squashfs_iget(struct super_block *, long long, | |||
| 71 | extern int squashfs_read_inode(struct inode *, long long); | 74 | extern int squashfs_read_inode(struct inode *, long long); |
| 72 | 75 | ||
| 73 | /* | 76 | /* |
| 74 | * Inodes and files operations | 77 | * Inodes, files and decompressor operations |
| 75 | */ | 78 | */ |
| 76 | 79 | ||
| 77 | /* dir.c */ | 80 | /* dir.c */ |
| @@ -88,3 +91,6 @@ extern const struct inode_operations squashfs_dir_inode_ops; | |||
| 88 | 91 | ||
| 89 | /* symlink.c */ | 92 | /* symlink.c */ |
| 90 | extern const struct address_space_operations squashfs_symlink_aops; | 93 | extern const struct address_space_operations squashfs_symlink_aops; |
| 94 | |||
| 95 | /* zlib_wrapper.c */ | ||
| 96 | extern const struct squashfs_decompressor squashfs_zlib_comp_ops; | ||
diff --git a/fs/squashfs/squashfs_fs.h b/fs/squashfs/squashfs_fs.h index 283daafc568e..79024245ea00 100644 --- a/fs/squashfs/squashfs_fs.h +++ b/fs/squashfs/squashfs_fs.h | |||
| @@ -183,8 +183,6 @@ | |||
| 183 | #define SQUASHFS_MAX_FILE_SIZE (1LL << \ | 183 | #define SQUASHFS_MAX_FILE_SIZE (1LL << \ |
| 184 | (SQUASHFS_MAX_FILE_SIZE_LOG - 2)) | 184 | (SQUASHFS_MAX_FILE_SIZE_LOG - 2)) |
| 185 | 185 | ||
| 186 | #define SQUASHFS_MARKER_BYTE 0xff | ||
| 187 | |||
| 188 | /* meta index cache */ | 186 | /* meta index cache */ |
| 189 | #define SQUASHFS_META_INDEXES (SQUASHFS_METADATA_SIZE / sizeof(unsigned int)) | 187 | #define SQUASHFS_META_INDEXES (SQUASHFS_METADATA_SIZE / sizeof(unsigned int)) |
| 190 | #define SQUASHFS_META_ENTRIES 127 | 188 | #define SQUASHFS_META_ENTRIES 127 |
| @@ -211,7 +209,9 @@ struct meta_index { | |||
| 211 | /* | 209 | /* |
| 212 | * definitions for structures on disk | 210 | * definitions for structures on disk |
| 213 | */ | 211 | */ |
| 214 | #define ZLIB_COMPRESSION 1 | 212 | #define ZLIB_COMPRESSION 1 |
| 213 | #define LZMA_COMPRESSION 2 | ||
| 214 | #define LZO_COMPRESSION 3 | ||
| 215 | 215 | ||
| 216 | struct squashfs_super_block { | 216 | struct squashfs_super_block { |
| 217 | __le32 s_magic; | 217 | __le32 s_magic; |
diff --git a/fs/squashfs/squashfs_fs_sb.h b/fs/squashfs/squashfs_fs_sb.h index c8c65614dd1c..2e77dc547e25 100644 --- a/fs/squashfs/squashfs_fs_sb.h +++ b/fs/squashfs/squashfs_fs_sb.h | |||
| @@ -52,25 +52,25 @@ struct squashfs_cache_entry { | |||
| 52 | }; | 52 | }; |
| 53 | 53 | ||
| 54 | struct squashfs_sb_info { | 54 | struct squashfs_sb_info { |
| 55 | int devblksize; | 55 | const struct squashfs_decompressor *decompressor; |
| 56 | int devblksize_log2; | 56 | int devblksize; |
| 57 | struct squashfs_cache *block_cache; | 57 | int devblksize_log2; |
| 58 | struct squashfs_cache *fragment_cache; | 58 | struct squashfs_cache *block_cache; |
| 59 | struct squashfs_cache *read_page; | 59 | struct squashfs_cache *fragment_cache; |
| 60 | int next_meta_index; | 60 | struct squashfs_cache *read_page; |
| 61 | __le64 *id_table; | 61 | int next_meta_index; |
| 62 | __le64 *fragment_index; | 62 | __le64 *id_table; |
| 63 | unsigned int *fragment_index_2; | 63 | __le64 *fragment_index; |
| 64 | struct mutex read_data_mutex; | 64 | struct mutex read_data_mutex; |
| 65 | struct mutex meta_index_mutex; | 65 | struct mutex meta_index_mutex; |
| 66 | struct meta_index *meta_index; | 66 | struct meta_index *meta_index; |
| 67 | z_stream stream; | 67 | void *stream; |
| 68 | __le64 *inode_lookup_table; | 68 | __le64 *inode_lookup_table; |
| 69 | u64 inode_table; | 69 | u64 inode_table; |
| 70 | u64 directory_table; | 70 | u64 directory_table; |
| 71 | unsigned int block_size; | 71 | unsigned int block_size; |
| 72 | unsigned short block_log; | 72 | unsigned short block_log; |
| 73 | long long bytes_used; | 73 | long long bytes_used; |
| 74 | unsigned int inodes; | 74 | unsigned int inodes; |
| 75 | }; | 75 | }; |
| 76 | #endif | 76 | #endif |
diff --git a/fs/squashfs/super.c b/fs/squashfs/super.c index 6c197ef53add..3550aec2f655 100644 --- a/fs/squashfs/super.c +++ b/fs/squashfs/super.c | |||
| @@ -35,34 +35,41 @@ | |||
| 35 | #include <linux/pagemap.h> | 35 | #include <linux/pagemap.h> |
| 36 | #include <linux/init.h> | 36 | #include <linux/init.h> |
| 37 | #include <linux/module.h> | 37 | #include <linux/module.h> |
| 38 | #include <linux/zlib.h> | ||
| 39 | #include <linux/magic.h> | 38 | #include <linux/magic.h> |
| 40 | 39 | ||
| 41 | #include "squashfs_fs.h" | 40 | #include "squashfs_fs.h" |
| 42 | #include "squashfs_fs_sb.h" | 41 | #include "squashfs_fs_sb.h" |
| 43 | #include "squashfs_fs_i.h" | 42 | #include "squashfs_fs_i.h" |
| 44 | #include "squashfs.h" | 43 | #include "squashfs.h" |
| 44 | #include "decompressor.h" | ||
| 45 | 45 | ||
| 46 | static struct file_system_type squashfs_fs_type; | 46 | static struct file_system_type squashfs_fs_type; |
| 47 | static const struct super_operations squashfs_super_ops; | 47 | static const struct super_operations squashfs_super_ops; |
| 48 | 48 | ||
| 49 | static int supported_squashfs_filesystem(short major, short minor, short comp) | 49 | static const struct squashfs_decompressor *supported_squashfs_filesystem(short |
| 50 | major, short minor, short id) | ||
| 50 | { | 51 | { |
| 52 | const struct squashfs_decompressor *decompressor; | ||
| 53 | |||
| 51 | if (major < SQUASHFS_MAJOR) { | 54 | if (major < SQUASHFS_MAJOR) { |
| 52 | ERROR("Major/Minor mismatch, older Squashfs %d.%d " | 55 | ERROR("Major/Minor mismatch, older Squashfs %d.%d " |
| 53 | "filesystems are unsupported\n", major, minor); | 56 | "filesystems are unsupported\n", major, minor); |
| 54 | return -EINVAL; | 57 | return NULL; |
| 55 | } else if (major > SQUASHFS_MAJOR || minor > SQUASHFS_MINOR) { | 58 | } else if (major > SQUASHFS_MAJOR || minor > SQUASHFS_MINOR) { |
| 56 | ERROR("Major/Minor mismatch, trying to mount newer " | 59 | ERROR("Major/Minor mismatch, trying to mount newer " |
| 57 | "%d.%d filesystem\n", major, minor); | 60 | "%d.%d filesystem\n", major, minor); |
| 58 | ERROR("Please update your kernel\n"); | 61 | ERROR("Please update your kernel\n"); |
| 59 | return -EINVAL; | 62 | return NULL; |
| 60 | } | 63 | } |
| 61 | 64 | ||
| 62 | if (comp != ZLIB_COMPRESSION) | 65 | decompressor = squashfs_lookup_decompressor(id); |
| 63 | return -EINVAL; | 66 | if (!decompressor->supported) { |
| 67 | ERROR("Filesystem uses \"%s\" compression. This is not " | ||
| 68 | "supported\n", decompressor->name); | ||
| 69 | return NULL; | ||
| 70 | } | ||
| 64 | 71 | ||
| 65 | return 0; | 72 | return decompressor; |
| 66 | } | 73 | } |
| 67 | 74 | ||
| 68 | 75 | ||
| @@ -87,13 +94,6 @@ static int squashfs_fill_super(struct super_block *sb, void *data, int silent) | |||
| 87 | } | 94 | } |
| 88 | msblk = sb->s_fs_info; | 95 | msblk = sb->s_fs_info; |
| 89 | 96 | ||
| 90 | msblk->stream.workspace = kmalloc(zlib_inflate_workspacesize(), | ||
| 91 | GFP_KERNEL); | ||
| 92 | if (msblk->stream.workspace == NULL) { | ||
| 93 | ERROR("Failed to allocate zlib workspace\n"); | ||
| 94 | goto failure; | ||
| 95 | } | ||
| 96 | |||
| 97 | sblk = kzalloc(sizeof(*sblk), GFP_KERNEL); | 97 | sblk = kzalloc(sizeof(*sblk), GFP_KERNEL); |
| 98 | if (sblk == NULL) { | 98 | if (sblk == NULL) { |
| 99 | ERROR("Failed to allocate squashfs_super_block\n"); | 99 | ERROR("Failed to allocate squashfs_super_block\n"); |
| @@ -120,25 +120,25 @@ static int squashfs_fill_super(struct super_block *sb, void *data, int silent) | |||
| 120 | goto failed_mount; | 120 | goto failed_mount; |
| 121 | } | 121 | } |
| 122 | 122 | ||
| 123 | err = -EINVAL; | ||
| 124 | |||
| 123 | /* Check it is a SQUASHFS superblock */ | 125 | /* Check it is a SQUASHFS superblock */ |
| 124 | sb->s_magic = le32_to_cpu(sblk->s_magic); | 126 | sb->s_magic = le32_to_cpu(sblk->s_magic); |
| 125 | if (sb->s_magic != SQUASHFS_MAGIC) { | 127 | if (sb->s_magic != SQUASHFS_MAGIC) { |
| 126 | if (!silent) | 128 | if (!silent) |
| 127 | ERROR("Can't find a SQUASHFS superblock on %s\n", | 129 | ERROR("Can't find a SQUASHFS superblock on %s\n", |
| 128 | bdevname(sb->s_bdev, b)); | 130 | bdevname(sb->s_bdev, b)); |
| 129 | err = -EINVAL; | ||
| 130 | goto failed_mount; | 131 | goto failed_mount; |
| 131 | } | 132 | } |
| 132 | 133 | ||
| 133 | /* Check the MAJOR & MINOR versions and compression type */ | 134 | /* Check the MAJOR & MINOR versions and lookup compression type */ |
| 134 | err = supported_squashfs_filesystem(le16_to_cpu(sblk->s_major), | 135 | msblk->decompressor = supported_squashfs_filesystem( |
| 136 | le16_to_cpu(sblk->s_major), | ||
| 135 | le16_to_cpu(sblk->s_minor), | 137 | le16_to_cpu(sblk->s_minor), |
| 136 | le16_to_cpu(sblk->compression)); | 138 | le16_to_cpu(sblk->compression)); |
| 137 | if (err < 0) | 139 | if (msblk->decompressor == NULL) |
| 138 | goto failed_mount; | 140 | goto failed_mount; |
| 139 | 141 | ||
| 140 | err = -EINVAL; | ||
| 141 | |||
| 142 | /* | 142 | /* |
| 143 | * Check if there's xattrs in the filesystem. These are not | 143 | * Check if there's xattrs in the filesystem. These are not |
| 144 | * supported in this version, so warn that they will be ignored. | 144 | * supported in this version, so warn that they will be ignored. |
| @@ -205,6 +205,10 @@ static int squashfs_fill_super(struct super_block *sb, void *data, int silent) | |||
| 205 | 205 | ||
| 206 | err = -ENOMEM; | 206 | err = -ENOMEM; |
| 207 | 207 | ||
| 208 | msblk->stream = squashfs_decompressor_init(msblk); | ||
| 209 | if (msblk->stream == NULL) | ||
| 210 | goto failed_mount; | ||
| 211 | |||
| 208 | msblk->block_cache = squashfs_cache_init("metadata", | 212 | msblk->block_cache = squashfs_cache_init("metadata", |
| 209 | SQUASHFS_CACHED_BLKS, SQUASHFS_METADATA_SIZE); | 213 | SQUASHFS_CACHED_BLKS, SQUASHFS_METADATA_SIZE); |
| 210 | if (msblk->block_cache == NULL) | 214 | if (msblk->block_cache == NULL) |
| @@ -292,17 +296,16 @@ failed_mount: | |||
| 292 | squashfs_cache_delete(msblk->block_cache); | 296 | squashfs_cache_delete(msblk->block_cache); |
| 293 | squashfs_cache_delete(msblk->fragment_cache); | 297 | squashfs_cache_delete(msblk->fragment_cache); |
| 294 | squashfs_cache_delete(msblk->read_page); | 298 | squashfs_cache_delete(msblk->read_page); |
| 299 | squashfs_decompressor_free(msblk, msblk->stream); | ||
| 295 | kfree(msblk->inode_lookup_table); | 300 | kfree(msblk->inode_lookup_table); |
| 296 | kfree(msblk->fragment_index); | 301 | kfree(msblk->fragment_index); |
| 297 | kfree(msblk->id_table); | 302 | kfree(msblk->id_table); |
| 298 | kfree(msblk->stream.workspace); | ||
| 299 | kfree(sb->s_fs_info); | 303 | kfree(sb->s_fs_info); |
| 300 | sb->s_fs_info = NULL; | 304 | sb->s_fs_info = NULL; |
| 301 | kfree(sblk); | 305 | kfree(sblk); |
| 302 | return err; | 306 | return err; |
| 303 | 307 | ||
| 304 | failure: | 308 | failure: |
| 305 | kfree(msblk->stream.workspace); | ||
| 306 | kfree(sb->s_fs_info); | 309 | kfree(sb->s_fs_info); |
| 307 | sb->s_fs_info = NULL; | 310 | sb->s_fs_info = NULL; |
| 308 | return -ENOMEM; | 311 | return -ENOMEM; |
| @@ -346,10 +349,10 @@ static void squashfs_put_super(struct super_block *sb) | |||
| 346 | squashfs_cache_delete(sbi->block_cache); | 349 | squashfs_cache_delete(sbi->block_cache); |
| 347 | squashfs_cache_delete(sbi->fragment_cache); | 350 | squashfs_cache_delete(sbi->fragment_cache); |
| 348 | squashfs_cache_delete(sbi->read_page); | 351 | squashfs_cache_delete(sbi->read_page); |
| 352 | squashfs_decompressor_free(sbi, sbi->stream); | ||
| 349 | kfree(sbi->id_table); | 353 | kfree(sbi->id_table); |
| 350 | kfree(sbi->fragment_index); | 354 | kfree(sbi->fragment_index); |
| 351 | kfree(sbi->meta_index); | 355 | kfree(sbi->meta_index); |
| 352 | kfree(sbi->stream.workspace); | ||
| 353 | kfree(sb->s_fs_info); | 356 | kfree(sb->s_fs_info); |
| 354 | sb->s_fs_info = NULL; | 357 | sb->s_fs_info = NULL; |
| 355 | } | 358 | } |
diff --git a/fs/squashfs/symlink.c b/fs/squashfs/symlink.c index 83d87880aac8..e80be2022a7f 100644 --- a/fs/squashfs/symlink.c +++ b/fs/squashfs/symlink.c | |||
| @@ -36,7 +36,6 @@ | |||
| 36 | #include <linux/slab.h> | 36 | #include <linux/slab.h> |
| 37 | #include <linux/string.h> | 37 | #include <linux/string.h> |
| 38 | #include <linux/pagemap.h> | 38 | #include <linux/pagemap.h> |
| 39 | #include <linux/zlib.h> | ||
| 40 | 39 | ||
| 41 | #include "squashfs_fs.h" | 40 | #include "squashfs_fs.h" |
| 42 | #include "squashfs_fs_sb.h" | 41 | #include "squashfs_fs_sb.h" |
diff --git a/fs/squashfs/zlib_wrapper.c b/fs/squashfs/zlib_wrapper.c new file mode 100644 index 000000000000..4dd70e04333b --- /dev/null +++ b/fs/squashfs/zlib_wrapper.c | |||
| @@ -0,0 +1,150 @@ | |||
| 1 | /* | ||
| 2 | * Squashfs - a compressed read only filesystem for Linux | ||
| 3 | * | ||
| 4 | * Copyright (c) 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009 | ||
| 5 | * Phillip Lougher <phillip@lougher.demon.co.uk> | ||
| 6 | * | ||
| 7 | * This program is free software; you can redistribute it and/or | ||
| 8 | * modify it under the terms of the GNU General Public License | ||
| 9 | * as published by the Free Software Foundation; either version 2, | ||
| 10 | * or (at your option) any later version. | ||
| 11 | * | ||
| 12 | * This program is distributed in the hope that it will be useful, | ||
| 13 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
| 14 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
| 15 | * GNU General Public License for more details. | ||
| 16 | * | ||
| 17 | * You should have received a copy of the GNU General Public License | ||
| 18 | * along with this program; if not, write to the Free Software | ||
| 19 | * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. | ||
| 20 | * | ||
| 21 | * zlib_wrapper.c | ||
| 22 | */ | ||
| 23 | |||
| 24 | |||
| 25 | #include <linux/mutex.h> | ||
| 26 | #include <linux/buffer_head.h> | ||
| 27 | #include <linux/zlib.h> | ||
| 28 | |||
| 29 | #include "squashfs_fs.h" | ||
| 30 | #include "squashfs_fs_sb.h" | ||
| 31 | #include "squashfs_fs_i.h" | ||
| 32 | #include "squashfs.h" | ||
| 33 | #include "decompressor.h" | ||
| 34 | |||
| 35 | static void *zlib_init(struct squashfs_sb_info *dummy) | ||
| 36 | { | ||
| 37 | z_stream *stream = kmalloc(sizeof(z_stream), GFP_KERNEL); | ||
| 38 | if (stream == NULL) | ||
| 39 | goto failed; | ||
| 40 | stream->workspace = kmalloc(zlib_inflate_workspacesize(), | ||
| 41 | GFP_KERNEL); | ||
| 42 | if (stream->workspace == NULL) | ||
| 43 | goto failed; | ||
| 44 | |||
| 45 | return stream; | ||
| 46 | |||
| 47 | failed: | ||
| 48 | ERROR("Failed to allocate zlib workspace\n"); | ||
| 49 | kfree(stream); | ||
| 50 | return NULL; | ||
| 51 | } | ||
| 52 | |||
| 53 | |||
| 54 | static void zlib_free(void *strm) | ||
| 55 | { | ||
| 56 | z_stream *stream = strm; | ||
| 57 | |||
| 58 | if (stream) | ||
| 59 | kfree(stream->workspace); | ||
| 60 | kfree(stream); | ||
| 61 | } | ||
| 62 | |||
| 63 | |||
| 64 | static int zlib_uncompress(struct squashfs_sb_info *msblk, void **buffer, | ||
| 65 | struct buffer_head **bh, int b, int offset, int length, int srclength, | ||
| 66 | int pages) | ||
| 67 | { | ||
| 68 | int zlib_err = 0, zlib_init = 0; | ||
| 69 | int avail, bytes, k = 0, page = 0; | ||
| 70 | z_stream *stream = msblk->stream; | ||
| 71 | |||
| 72 | mutex_lock(&msblk->read_data_mutex); | ||
| 73 | |||
| 74 | stream->avail_out = 0; | ||
| 75 | stream->avail_in = 0; | ||
| 76 | |||
| 77 | bytes = length; | ||
| 78 | do { | ||
| 79 | if (stream->avail_in == 0 && k < b) { | ||
| 80 | avail = min(bytes, msblk->devblksize - offset); | ||
| 81 | bytes -= avail; | ||
| 82 | wait_on_buffer(bh[k]); | ||
| 83 | if (!buffer_uptodate(bh[k])) | ||
| 84 | goto release_mutex; | ||
| 85 | |||
| 86 | if (avail == 0) { | ||
| 87 | offset = 0; | ||
| 88 | put_bh(bh[k++]); | ||
| 89 | continue; | ||
| 90 | } | ||
| 91 | |||
| 92 | stream->next_in = bh[k]->b_data + offset; | ||
| 93 | stream->avail_in = avail; | ||
| 94 | offset = 0; | ||
| 95 | } | ||
| 96 | |||
| 97 | if (stream->avail_out == 0 && page < pages) { | ||
| 98 | stream->next_out = buffer[page++]; | ||
| 99 | stream->avail_out = PAGE_CACHE_SIZE; | ||
| 100 | } | ||
| 101 | |||
| 102 | if (!zlib_init) { | ||
| 103 | zlib_err = zlib_inflateInit(stream); | ||
| 104 | if (zlib_err != Z_OK) { | ||
| 105 | ERROR("zlib_inflateInit returned unexpected " | ||
| 106 | "result 0x%x, srclength %d\n", | ||
| 107 | zlib_err, srclength); | ||
| 108 | goto release_mutex; | ||
| 109 | } | ||
| 110 | zlib_init = 1; | ||
| 111 | } | ||
| 112 | |||
| 113 | zlib_err = zlib_inflate(stream, Z_SYNC_FLUSH); | ||
| 114 | |||
| 115 | if (stream->avail_in == 0 && k < b) | ||
| 116 | put_bh(bh[k++]); | ||
| 117 | } while (zlib_err == Z_OK); | ||
| 118 | |||
| 119 | if (zlib_err != Z_STREAM_END) { | ||
| 120 | ERROR("zlib_inflate error, data probably corrupt\n"); | ||
| 121 | goto release_mutex; | ||
| 122 | } | ||
| 123 | |||
| 124 | zlib_err = zlib_inflateEnd(stream); | ||
| 125 | if (zlib_err != Z_OK) { | ||
| 126 | ERROR("zlib_inflate error, data probably corrupt\n"); | ||
| 127 | goto release_mutex; | ||
| 128 | } | ||
| 129 | |||
| 130 | mutex_unlock(&msblk->read_data_mutex); | ||
| 131 | return stream->total_out; | ||
| 132 | |||
| 133 | release_mutex: | ||
| 134 | mutex_unlock(&msblk->read_data_mutex); | ||
| 135 | |||
| 136 | for (; k < b; k++) | ||
| 137 | put_bh(bh[k]); | ||
| 138 | |||
| 139 | return -EIO; | ||
| 140 | } | ||
| 141 | |||
| 142 | const struct squashfs_decompressor squashfs_zlib_comp_ops = { | ||
| 143 | .init = zlib_init, | ||
| 144 | .free = zlib_free, | ||
| 145 | .decompress = zlib_uncompress, | ||
| 146 | .id = ZLIB_COMPRESSION, | ||
| 147 | .name = "zlib", | ||
| 148 | .supported = 1 | ||
| 149 | }; | ||
| 150 | |||
| @@ -34,14 +34,14 @@ static int __sync_filesystem(struct super_block *sb, int wait) | |||
| 34 | if (!sb->s_bdi) | 34 | if (!sb->s_bdi) |
| 35 | return 0; | 35 | return 0; |
| 36 | 36 | ||
| 37 | /* Avoid doing twice syncing and cache pruning for quota sync */ | 37 | if (sb->s_qcop && sb->s_qcop->quota_sync) |
| 38 | if (!wait) { | 38 | sb->s_qcop->quota_sync(sb, -1, wait); |
| 39 | writeout_quota_sb(sb, -1); | 39 | |
| 40 | writeback_inodes_sb(sb); | 40 | if (wait) |
| 41 | } else { | ||
| 42 | sync_quota_sb(sb, -1); | ||
| 43 | sync_inodes_sb(sb); | 41 | sync_inodes_sb(sb); |
| 44 | } | 42 | else |
| 43 | writeback_inodes_sb(sb); | ||
| 44 | |||
| 45 | if (sb->s_op->sync_fs) | 45 | if (sb->s_op->sync_fs) |
| 46 | sb->s_op->sync_fs(sb, wait); | 46 | sb->s_op->sync_fs(sb, wait); |
| 47 | return __sync_blockdev(sb->s_bdev, wait); | 47 | return __sync_blockdev(sb->s_bdev, wait); |
diff --git a/fs/sysv/inode.c b/fs/sysv/inode.c index 9824743832a7..4573734d723d 100644 --- a/fs/sysv/inode.c +++ b/fs/sysv/inode.c | |||
| @@ -26,6 +26,7 @@ | |||
| 26 | #include <linux/init.h> | 26 | #include <linux/init.h> |
| 27 | #include <linux/buffer_head.h> | 27 | #include <linux/buffer_head.h> |
| 28 | #include <linux/vfs.h> | 28 | #include <linux/vfs.h> |
| 29 | #include <linux/writeback.h> | ||
| 29 | #include <linux/namei.h> | 30 | #include <linux/namei.h> |
| 30 | #include <asm/byteorder.h> | 31 | #include <asm/byteorder.h> |
| 31 | #include "sysv.h" | 32 | #include "sysv.h" |
| @@ -246,7 +247,7 @@ bad_inode: | |||
| 246 | return ERR_PTR(-EIO); | 247 | return ERR_PTR(-EIO); |
| 247 | } | 248 | } |
| 248 | 249 | ||
| 249 | int sysv_write_inode(struct inode *inode, int wait) | 250 | static int __sysv_write_inode(struct inode *inode, int wait) |
| 250 | { | 251 | { |
| 251 | struct super_block * sb = inode->i_sb; | 252 | struct super_block * sb = inode->i_sb; |
| 252 | struct sysv_sb_info * sbi = SYSV_SB(sb); | 253 | struct sysv_sb_info * sbi = SYSV_SB(sb); |
| @@ -296,9 +297,14 @@ int sysv_write_inode(struct inode *inode, int wait) | |||
| 296 | return 0; | 297 | return 0; |
| 297 | } | 298 | } |
| 298 | 299 | ||
| 300 | int sysv_write_inode(struct inode *inode, struct writeback_control *wbc) | ||
| 301 | { | ||
| 302 | return __sysv_write_inode(inode, wbc->sync_mode == WB_SYNC_ALL); | ||
| 303 | } | ||
| 304 | |||
| 299 | int sysv_sync_inode(struct inode *inode) | 305 | int sysv_sync_inode(struct inode *inode) |
| 300 | { | 306 | { |
| 301 | return sysv_write_inode(inode, 1); | 307 | return __sysv_write_inode(inode, 1); |
| 302 | } | 308 | } |
| 303 | 309 | ||
| 304 | static void sysv_delete_inode(struct inode *inode) | 310 | static void sysv_delete_inode(struct inode *inode) |
diff --git a/fs/sysv/sysv.h b/fs/sysv/sysv.h index 53786eb5cf60..94cb9b4d76c2 100644 --- a/fs/sysv/sysv.h +++ b/fs/sysv/sysv.h | |||
| @@ -142,7 +142,7 @@ extern int __sysv_write_begin(struct file *file, struct address_space *mapping, | |||
| 142 | 142 | ||
| 143 | /* inode.c */ | 143 | /* inode.c */ |
| 144 | extern struct inode *sysv_iget(struct super_block *, unsigned int); | 144 | extern struct inode *sysv_iget(struct super_block *, unsigned int); |
| 145 | extern int sysv_write_inode(struct inode *, int); | 145 | extern int sysv_write_inode(struct inode *, struct writeback_control *wbc); |
| 146 | extern int sysv_sync_inode(struct inode *); | 146 | extern int sysv_sync_inode(struct inode *); |
| 147 | extern void sysv_set_inode(struct inode *, dev_t); | 147 | extern void sysv_set_inode(struct inode *, dev_t); |
| 148 | extern int sysv_getattr(struct vfsmount *, struct dentry *, struct kstat *); | 148 | extern int sysv_getattr(struct vfsmount *, struct dentry *, struct kstat *); |
diff --git a/fs/ubifs/dir.c b/fs/ubifs/dir.c index 552fb0111fff..401e503d44a1 100644 --- a/fs/ubifs/dir.c +++ b/fs/ubifs/dir.c | |||
| @@ -1120,7 +1120,7 @@ static int ubifs_rename(struct inode *old_dir, struct dentry *old_dentry, | |||
| 1120 | if (release) | 1120 | if (release) |
| 1121 | ubifs_release_budget(c, &ino_req); | 1121 | ubifs_release_budget(c, &ino_req); |
| 1122 | if (IS_SYNC(old_inode)) | 1122 | if (IS_SYNC(old_inode)) |
| 1123 | err = old_inode->i_sb->s_op->write_inode(old_inode, 1); | 1123 | err = old_inode->i_sb->s_op->write_inode(old_inode, NULL); |
| 1124 | return err; | 1124 | return err; |
| 1125 | 1125 | ||
| 1126 | out_cancel: | 1126 | out_cancel: |
diff --git a/fs/ubifs/file.c b/fs/ubifs/file.c index 16a6444330ec..e26c02ab6cd5 100644 --- a/fs/ubifs/file.c +++ b/fs/ubifs/file.c | |||
| @@ -1011,7 +1011,7 @@ static int ubifs_writepage(struct page *page, struct writeback_control *wbc) | |||
| 1011 | /* Is the page fully inside @i_size? */ | 1011 | /* Is the page fully inside @i_size? */ |
| 1012 | if (page->index < end_index) { | 1012 | if (page->index < end_index) { |
| 1013 | if (page->index >= synced_i_size >> PAGE_CACHE_SHIFT) { | 1013 | if (page->index >= synced_i_size >> PAGE_CACHE_SHIFT) { |
| 1014 | err = inode->i_sb->s_op->write_inode(inode, 1); | 1014 | err = inode->i_sb->s_op->write_inode(inode, NULL); |
| 1015 | if (err) | 1015 | if (err) |
| 1016 | goto out_unlock; | 1016 | goto out_unlock; |
| 1017 | /* | 1017 | /* |
| @@ -1039,7 +1039,7 @@ static int ubifs_writepage(struct page *page, struct writeback_control *wbc) | |||
| 1039 | kunmap_atomic(kaddr, KM_USER0); | 1039 | kunmap_atomic(kaddr, KM_USER0); |
| 1040 | 1040 | ||
| 1041 | if (i_size > synced_i_size) { | 1041 | if (i_size > synced_i_size) { |
| 1042 | err = inode->i_sb->s_op->write_inode(inode, 1); | 1042 | err = inode->i_sb->s_op->write_inode(inode, NULL); |
| 1043 | if (err) | 1043 | if (err) |
| 1044 | goto out_unlock; | 1044 | goto out_unlock; |
| 1045 | } | 1045 | } |
| @@ -1242,7 +1242,7 @@ static int do_setattr(struct ubifs_info *c, struct inode *inode, | |||
| 1242 | if (release) | 1242 | if (release) |
| 1243 | ubifs_release_budget(c, &req); | 1243 | ubifs_release_budget(c, &req); |
| 1244 | if (IS_SYNC(inode)) | 1244 | if (IS_SYNC(inode)) |
| 1245 | err = inode->i_sb->s_op->write_inode(inode, 1); | 1245 | err = inode->i_sb->s_op->write_inode(inode, NULL); |
| 1246 | return err; | 1246 | return err; |
| 1247 | 1247 | ||
| 1248 | out: | 1248 | out: |
| @@ -1316,7 +1316,7 @@ int ubifs_fsync(struct file *file, struct dentry *dentry, int datasync) | |||
| 1316 | * the inode unless this is a 'datasync()' call. | 1316 | * the inode unless this is a 'datasync()' call. |
| 1317 | */ | 1317 | */ |
| 1318 | if (!datasync || (inode->i_state & I_DIRTY_DATASYNC)) { | 1318 | if (!datasync || (inode->i_state & I_DIRTY_DATASYNC)) { |
| 1319 | err = inode->i_sb->s_op->write_inode(inode, 1); | 1319 | err = inode->i_sb->s_op->write_inode(inode, NULL); |
| 1320 | if (err) | 1320 | if (err) |
| 1321 | return err; | 1321 | return err; |
| 1322 | } | 1322 | } |
diff --git a/fs/ubifs/super.c b/fs/ubifs/super.c index 43f9d19a6f33..4d2f2157dd3f 100644 --- a/fs/ubifs/super.c +++ b/fs/ubifs/super.c | |||
| @@ -283,7 +283,7 @@ static void ubifs_destroy_inode(struct inode *inode) | |||
| 283 | /* | 283 | /* |
| 284 | * Note, Linux write-back code calls this without 'i_mutex'. | 284 | * Note, Linux write-back code calls this without 'i_mutex'. |
| 285 | */ | 285 | */ |
| 286 | static int ubifs_write_inode(struct inode *inode, int wait) | 286 | static int ubifs_write_inode(struct inode *inode, struct writeback_control *wbc) |
| 287 | { | 287 | { |
| 288 | int err = 0; | 288 | int err = 0; |
| 289 | struct ubifs_info *c = inode->i_sb->s_fs_info; | 289 | struct ubifs_info *c = inode->i_sb->s_fs_info; |
diff --git a/fs/udf/balloc.c b/fs/udf/balloc.c index b2d96f45c12b..ccc3ad7242d4 100644 --- a/fs/udf/balloc.c +++ b/fs/udf/balloc.c | |||
| @@ -208,7 +208,7 @@ static void udf_bitmap_free_blocks(struct super_block *sb, | |||
| 208 | ((char *)bh->b_data)[(bit + i) >> 3]); | 208 | ((char *)bh->b_data)[(bit + i) >> 3]); |
| 209 | } else { | 209 | } else { |
| 210 | if (inode) | 210 | if (inode) |
| 211 | vfs_dq_free_block(inode, 1); | 211 | dquot_free_block(inode, 1); |
| 212 | udf_add_free_space(sb, sbi->s_partition, 1); | 212 | udf_add_free_space(sb, sbi->s_partition, 1); |
| 213 | } | 213 | } |
| 214 | } | 214 | } |
| @@ -260,11 +260,11 @@ static int udf_bitmap_prealloc_blocks(struct super_block *sb, | |||
| 260 | while (bit < (sb->s_blocksize << 3) && block_count > 0) { | 260 | while (bit < (sb->s_blocksize << 3) && block_count > 0) { |
| 261 | if (!udf_test_bit(bit, bh->b_data)) | 261 | if (!udf_test_bit(bit, bh->b_data)) |
| 262 | goto out; | 262 | goto out; |
| 263 | else if (vfs_dq_prealloc_block(inode, 1)) | 263 | else if (dquot_prealloc_block(inode, 1)) |
| 264 | goto out; | 264 | goto out; |
| 265 | else if (!udf_clear_bit(bit, bh->b_data)) { | 265 | else if (!udf_clear_bit(bit, bh->b_data)) { |
| 266 | udf_debug("bit already cleared for block %d\n", bit); | 266 | udf_debug("bit already cleared for block %d\n", bit); |
| 267 | vfs_dq_free_block(inode, 1); | 267 | dquot_free_block(inode, 1); |
| 268 | goto out; | 268 | goto out; |
| 269 | } | 269 | } |
| 270 | block_count--; | 270 | block_count--; |
| @@ -390,10 +390,14 @@ got_block: | |||
| 390 | /* | 390 | /* |
| 391 | * Check quota for allocation of this block. | 391 | * Check quota for allocation of this block. |
| 392 | */ | 392 | */ |
| 393 | if (inode && vfs_dq_alloc_block(inode, 1)) { | 393 | if (inode) { |
| 394 | mutex_unlock(&sbi->s_alloc_mutex); | 394 | int ret = dquot_alloc_block(inode, 1); |
| 395 | *err = -EDQUOT; | 395 | |
| 396 | return 0; | 396 | if (ret) { |
| 397 | mutex_unlock(&sbi->s_alloc_mutex); | ||
| 398 | *err = ret; | ||
| 399 | return 0; | ||
| 400 | } | ||
| 397 | } | 401 | } |
| 398 | 402 | ||
| 399 | newblock = bit + (block_group << (sb->s_blocksize_bits + 3)) - | 403 | newblock = bit + (block_group << (sb->s_blocksize_bits + 3)) - |
| @@ -449,7 +453,7 @@ static void udf_table_free_blocks(struct super_block *sb, | |||
| 449 | /* We do this up front - There are some error conditions that | 453 | /* We do this up front - There are some error conditions that |
| 450 | could occure, but.. oh well */ | 454 | could occure, but.. oh well */ |
| 451 | if (inode) | 455 | if (inode) |
| 452 | vfs_dq_free_block(inode, count); | 456 | dquot_free_block(inode, count); |
| 453 | udf_add_free_space(sb, sbi->s_partition, count); | 457 | udf_add_free_space(sb, sbi->s_partition, count); |
| 454 | 458 | ||
| 455 | start = bloc->logicalBlockNum + offset; | 459 | start = bloc->logicalBlockNum + offset; |
| @@ -694,7 +698,7 @@ static int udf_table_prealloc_blocks(struct super_block *sb, | |||
| 694 | epos.offset -= adsize; | 698 | epos.offset -= adsize; |
| 695 | 699 | ||
| 696 | alloc_count = (elen >> sb->s_blocksize_bits); | 700 | alloc_count = (elen >> sb->s_blocksize_bits); |
| 697 | if (inode && vfs_dq_prealloc_block(inode, | 701 | if (inode && dquot_prealloc_block(inode, |
| 698 | alloc_count > block_count ? block_count : alloc_count)) | 702 | alloc_count > block_count ? block_count : alloc_count)) |
| 699 | alloc_count = 0; | 703 | alloc_count = 0; |
| 700 | else if (alloc_count > block_count) { | 704 | else if (alloc_count > block_count) { |
| @@ -797,12 +801,13 @@ static int udf_table_new_block(struct super_block *sb, | |||
| 797 | newblock = goal_eloc.logicalBlockNum; | 801 | newblock = goal_eloc.logicalBlockNum; |
| 798 | goal_eloc.logicalBlockNum++; | 802 | goal_eloc.logicalBlockNum++; |
| 799 | goal_elen -= sb->s_blocksize; | 803 | goal_elen -= sb->s_blocksize; |
| 800 | 804 | if (inode) { | |
| 801 | if (inode && vfs_dq_alloc_block(inode, 1)) { | 805 | *err = dquot_alloc_block(inode, 1); |
| 802 | brelse(goal_epos.bh); | 806 | if (*err) { |
| 803 | mutex_unlock(&sbi->s_alloc_mutex); | 807 | brelse(goal_epos.bh); |
| 804 | *err = -EDQUOT; | 808 | mutex_unlock(&sbi->s_alloc_mutex); |
| 805 | return 0; | 809 | return 0; |
| 810 | } | ||
| 806 | } | 811 | } |
| 807 | 812 | ||
| 808 | if (goal_elen) | 813 | if (goal_elen) |
diff --git a/fs/udf/file.c b/fs/udf/file.c index f311d509b6a3..1eb06774ed90 100644 --- a/fs/udf/file.c +++ b/fs/udf/file.c | |||
| @@ -34,6 +34,7 @@ | |||
| 34 | #include <linux/errno.h> | 34 | #include <linux/errno.h> |
| 35 | #include <linux/smp_lock.h> | 35 | #include <linux/smp_lock.h> |
| 36 | #include <linux/pagemap.h> | 36 | #include <linux/pagemap.h> |
| 37 | #include <linux/quotaops.h> | ||
| 37 | #include <linux/buffer_head.h> | 38 | #include <linux/buffer_head.h> |
| 38 | #include <linux/aio.h> | 39 | #include <linux/aio.h> |
| 39 | 40 | ||
| @@ -207,7 +208,7 @@ const struct file_operations udf_file_operations = { | |||
| 207 | .read = do_sync_read, | 208 | .read = do_sync_read, |
| 208 | .aio_read = generic_file_aio_read, | 209 | .aio_read = generic_file_aio_read, |
| 209 | .ioctl = udf_ioctl, | 210 | .ioctl = udf_ioctl, |
| 210 | .open = generic_file_open, | 211 | .open = dquot_file_open, |
| 211 | .mmap = generic_file_mmap, | 212 | .mmap = generic_file_mmap, |
| 212 | .write = do_sync_write, | 213 | .write = do_sync_write, |
| 213 | .aio_write = udf_file_aio_write, | 214 | .aio_write = udf_file_aio_write, |
| @@ -217,6 +218,29 @@ const struct file_operations udf_file_operations = { | |||
| 217 | .llseek = generic_file_llseek, | 218 | .llseek = generic_file_llseek, |
| 218 | }; | 219 | }; |
| 219 | 220 | ||
| 221 | static int udf_setattr(struct dentry *dentry, struct iattr *iattr) | ||
| 222 | { | ||
| 223 | struct inode *inode = dentry->d_inode; | ||
| 224 | int error; | ||
| 225 | |||
| 226 | error = inode_change_ok(inode, iattr); | ||
| 227 | if (error) | ||
| 228 | return error; | ||
| 229 | |||
| 230 | if (iattr->ia_valid & ATTR_SIZE) | ||
| 231 | dquot_initialize(inode); | ||
| 232 | |||
| 233 | if ((iattr->ia_valid & ATTR_UID && iattr->ia_uid != inode->i_uid) || | ||
| 234 | (iattr->ia_valid & ATTR_GID && iattr->ia_gid != inode->i_gid)) { | ||
| 235 | error = dquot_transfer(inode, iattr); | ||
| 236 | if (error) | ||
| 237 | return error; | ||
| 238 | } | ||
| 239 | |||
| 240 | return inode_setattr(inode, iattr); | ||
| 241 | } | ||
| 242 | |||
| 220 | const struct inode_operations udf_file_inode_operations = { | 243 | const struct inode_operations udf_file_inode_operations = { |
| 221 | .truncate = udf_truncate, | 244 | .truncate = udf_truncate, |
| 245 | .setattr = udf_setattr, | ||
| 222 | }; | 246 | }; |
diff --git a/fs/udf/ialloc.c b/fs/udf/ialloc.c index c10fa39f97e2..fb68c9cd0c3e 100644 --- a/fs/udf/ialloc.c +++ b/fs/udf/ialloc.c | |||
| @@ -36,8 +36,8 @@ void udf_free_inode(struct inode *inode) | |||
| 36 | * Note: we must free any quota before locking the superblock, | 36 | * Note: we must free any quota before locking the superblock, |
| 37 | * as writing the quota to disk may need the lock as well. | 37 | * as writing the quota to disk may need the lock as well. |
| 38 | */ | 38 | */ |
| 39 | vfs_dq_free_inode(inode); | 39 | dquot_free_inode(inode); |
| 40 | vfs_dq_drop(inode); | 40 | dquot_drop(inode); |
| 41 | 41 | ||
| 42 | clear_inode(inode); | 42 | clear_inode(inode); |
| 43 | 43 | ||
| @@ -61,7 +61,7 @@ struct inode *udf_new_inode(struct inode *dir, int mode, int *err) | |||
| 61 | struct super_block *sb = dir->i_sb; | 61 | struct super_block *sb = dir->i_sb; |
| 62 | struct udf_sb_info *sbi = UDF_SB(sb); | 62 | struct udf_sb_info *sbi = UDF_SB(sb); |
| 63 | struct inode *inode; | 63 | struct inode *inode; |
| 64 | int block; | 64 | int block, ret; |
| 65 | uint32_t start = UDF_I(dir)->i_location.logicalBlockNum; | 65 | uint32_t start = UDF_I(dir)->i_location.logicalBlockNum; |
| 66 | struct udf_inode_info *iinfo; | 66 | struct udf_inode_info *iinfo; |
| 67 | struct udf_inode_info *dinfo = UDF_I(dir); | 67 | struct udf_inode_info *dinfo = UDF_I(dir); |
| @@ -153,12 +153,14 @@ struct inode *udf_new_inode(struct inode *dir, int mode, int *err) | |||
| 153 | insert_inode_hash(inode); | 153 | insert_inode_hash(inode); |
| 154 | mark_inode_dirty(inode); | 154 | mark_inode_dirty(inode); |
| 155 | 155 | ||
| 156 | if (vfs_dq_alloc_inode(inode)) { | 156 | dquot_initialize(inode); |
| 157 | vfs_dq_drop(inode); | 157 | ret = dquot_alloc_inode(inode); |
| 158 | if (ret) { | ||
| 159 | dquot_drop(inode); | ||
| 158 | inode->i_flags |= S_NOQUOTA; | 160 | inode->i_flags |= S_NOQUOTA; |
| 159 | inode->i_nlink = 0; | 161 | inode->i_nlink = 0; |
| 160 | iput(inode); | 162 | iput(inode); |
| 161 | *err = -EDQUOT; | 163 | *err = ret; |
| 162 | return NULL; | 164 | return NULL; |
| 163 | } | 165 | } |
| 164 | 166 | ||
diff --git a/fs/udf/inode.c b/fs/udf/inode.c index 378a7592257c..b57ab0402d89 100644 --- a/fs/udf/inode.c +++ b/fs/udf/inode.c | |||
| @@ -36,6 +36,7 @@ | |||
| 36 | #include <linux/pagemap.h> | 36 | #include <linux/pagemap.h> |
| 37 | #include <linux/buffer_head.h> | 37 | #include <linux/buffer_head.h> |
| 38 | #include <linux/writeback.h> | 38 | #include <linux/writeback.h> |
| 39 | #include <linux/quotaops.h> | ||
| 39 | #include <linux/slab.h> | 40 | #include <linux/slab.h> |
| 40 | #include <linux/crc-itu-t.h> | 41 | #include <linux/crc-itu-t.h> |
| 41 | 42 | ||
| @@ -70,6 +71,9 @@ static int udf_get_block(struct inode *, sector_t, struct buffer_head *, int); | |||
| 70 | 71 | ||
| 71 | void udf_delete_inode(struct inode *inode) | 72 | void udf_delete_inode(struct inode *inode) |
| 72 | { | 73 | { |
| 74 | if (!is_bad_inode(inode)) | ||
| 75 | dquot_initialize(inode); | ||
| 76 | |||
| 73 | truncate_inode_pages(&inode->i_data, 0); | 77 | truncate_inode_pages(&inode->i_data, 0); |
| 74 | 78 | ||
| 75 | if (is_bad_inode(inode)) | 79 | if (is_bad_inode(inode)) |
| @@ -108,6 +112,8 @@ void udf_clear_inode(struct inode *inode) | |||
| 108 | (unsigned long long)inode->i_size, | 112 | (unsigned long long)inode->i_size, |
| 109 | (unsigned long long)iinfo->i_lenExtents); | 113 | (unsigned long long)iinfo->i_lenExtents); |
| 110 | } | 114 | } |
| 115 | |||
| 116 | dquot_drop(inode); | ||
| 111 | kfree(iinfo->i_ext.i_data); | 117 | kfree(iinfo->i_ext.i_data); |
| 112 | iinfo->i_ext.i_data = NULL; | 118 | iinfo->i_ext.i_data = NULL; |
| 113 | } | 119 | } |
| @@ -1373,12 +1379,12 @@ static mode_t udf_convert_permissions(struct fileEntry *fe) | |||
| 1373 | return mode; | 1379 | return mode; |
| 1374 | } | 1380 | } |
| 1375 | 1381 | ||
| 1376 | int udf_write_inode(struct inode *inode, int sync) | 1382 | int udf_write_inode(struct inode *inode, struct writeback_control *wbc) |
| 1377 | { | 1383 | { |
| 1378 | int ret; | 1384 | int ret; |
| 1379 | 1385 | ||
| 1380 | lock_kernel(); | 1386 | lock_kernel(); |
| 1381 | ret = udf_update_inode(inode, sync); | 1387 | ret = udf_update_inode(inode, wbc->sync_mode == WB_SYNC_ALL); |
| 1382 | unlock_kernel(); | 1388 | unlock_kernel(); |
| 1383 | 1389 | ||
| 1384 | return ret; | 1390 | return ret; |
diff --git a/fs/udf/namei.c b/fs/udf/namei.c index 7c56ff00cd53..db423ab078b1 100644 --- a/fs/udf/namei.c +++ b/fs/udf/namei.c | |||
| @@ -563,6 +563,8 @@ static int udf_create(struct inode *dir, struct dentry *dentry, int mode, | |||
| 563 | int err; | 563 | int err; |
| 564 | struct udf_inode_info *iinfo; | 564 | struct udf_inode_info *iinfo; |
| 565 | 565 | ||
| 566 | dquot_initialize(dir); | ||
| 567 | |||
| 566 | lock_kernel(); | 568 | lock_kernel(); |
| 567 | inode = udf_new_inode(dir, mode, &err); | 569 | inode = udf_new_inode(dir, mode, &err); |
| 568 | if (!inode) { | 570 | if (!inode) { |
| @@ -616,6 +618,8 @@ static int udf_mknod(struct inode *dir, struct dentry *dentry, int mode, | |||
| 616 | if (!old_valid_dev(rdev)) | 618 | if (!old_valid_dev(rdev)) |
| 617 | return -EINVAL; | 619 | return -EINVAL; |
| 618 | 620 | ||
| 621 | dquot_initialize(dir); | ||
| 622 | |||
| 619 | lock_kernel(); | 623 | lock_kernel(); |
| 620 | err = -EIO; | 624 | err = -EIO; |
| 621 | inode = udf_new_inode(dir, mode, &err); | 625 | inode = udf_new_inode(dir, mode, &err); |
| @@ -662,6 +666,8 @@ static int udf_mkdir(struct inode *dir, struct dentry *dentry, int mode) | |||
| 662 | struct udf_inode_info *dinfo = UDF_I(dir); | 666 | struct udf_inode_info *dinfo = UDF_I(dir); |
| 663 | struct udf_inode_info *iinfo; | 667 | struct udf_inode_info *iinfo; |
| 664 | 668 | ||
| 669 | dquot_initialize(dir); | ||
| 670 | |||
| 665 | lock_kernel(); | 671 | lock_kernel(); |
| 666 | err = -EMLINK; | 672 | err = -EMLINK; |
| 667 | if (dir->i_nlink >= (256 << sizeof(dir->i_nlink)) - 1) | 673 | if (dir->i_nlink >= (256 << sizeof(dir->i_nlink)) - 1) |
| @@ -799,6 +805,8 @@ static int udf_rmdir(struct inode *dir, struct dentry *dentry) | |||
| 799 | struct fileIdentDesc *fi, cfi; | 805 | struct fileIdentDesc *fi, cfi; |
| 800 | struct kernel_lb_addr tloc; | 806 | struct kernel_lb_addr tloc; |
| 801 | 807 | ||
| 808 | dquot_initialize(dir); | ||
| 809 | |||
| 802 | retval = -ENOENT; | 810 | retval = -ENOENT; |
| 803 | lock_kernel(); | 811 | lock_kernel(); |
| 804 | fi = udf_find_entry(dir, &dentry->d_name, &fibh, &cfi); | 812 | fi = udf_find_entry(dir, &dentry->d_name, &fibh, &cfi); |
| @@ -845,6 +853,8 @@ static int udf_unlink(struct inode *dir, struct dentry *dentry) | |||
| 845 | struct fileIdentDesc cfi; | 853 | struct fileIdentDesc cfi; |
| 846 | struct kernel_lb_addr tloc; | 854 | struct kernel_lb_addr tloc; |
| 847 | 855 | ||
| 856 | dquot_initialize(dir); | ||
| 857 | |||
| 848 | retval = -ENOENT; | 858 | retval = -ENOENT; |
| 849 | lock_kernel(); | 859 | lock_kernel(); |
| 850 | fi = udf_find_entry(dir, &dentry->d_name, &fibh, &cfi); | 860 | fi = udf_find_entry(dir, &dentry->d_name, &fibh, &cfi); |
| @@ -899,6 +909,8 @@ static int udf_symlink(struct inode *dir, struct dentry *dentry, | |||
| 899 | struct buffer_head *bh; | 909 | struct buffer_head *bh; |
| 900 | struct udf_inode_info *iinfo; | 910 | struct udf_inode_info *iinfo; |
| 901 | 911 | ||
| 912 | dquot_initialize(dir); | ||
| 913 | |||
| 902 | lock_kernel(); | 914 | lock_kernel(); |
| 903 | inode = udf_new_inode(dir, S_IFLNK, &err); | 915 | inode = udf_new_inode(dir, S_IFLNK, &err); |
| 904 | if (!inode) | 916 | if (!inode) |
| @@ -1069,6 +1081,8 @@ static int udf_link(struct dentry *old_dentry, struct inode *dir, | |||
| 1069 | int err; | 1081 | int err; |
| 1070 | struct buffer_head *bh; | 1082 | struct buffer_head *bh; |
| 1071 | 1083 | ||
| 1084 | dquot_initialize(dir); | ||
| 1085 | |||
| 1072 | lock_kernel(); | 1086 | lock_kernel(); |
| 1073 | if (inode->i_nlink >= (256 << sizeof(inode->i_nlink)) - 1) { | 1087 | if (inode->i_nlink >= (256 << sizeof(inode->i_nlink)) - 1) { |
| 1074 | unlock_kernel(); | 1088 | unlock_kernel(); |
| @@ -1131,6 +1145,9 @@ static int udf_rename(struct inode *old_dir, struct dentry *old_dentry, | |||
| 1131 | struct kernel_lb_addr tloc; | 1145 | struct kernel_lb_addr tloc; |
| 1132 | struct udf_inode_info *old_iinfo = UDF_I(old_inode); | 1146 | struct udf_inode_info *old_iinfo = UDF_I(old_inode); |
| 1133 | 1147 | ||
| 1148 | dquot_initialize(old_dir); | ||
| 1149 | dquot_initialize(new_dir); | ||
| 1150 | |||
| 1134 | lock_kernel(); | 1151 | lock_kernel(); |
| 1135 | ofi = udf_find_entry(old_dir, &old_dentry->d_name, &ofibh, &ocfi); | 1152 | ofi = udf_find_entry(old_dir, &old_dentry->d_name, &ofibh, &ocfi); |
| 1136 | if (ofi) { | 1153 | if (ofi) { |
diff --git a/fs/udf/udfdecl.h b/fs/udf/udfdecl.h index 8d46f4294ee7..4223ac855da9 100644 --- a/fs/udf/udfdecl.h +++ b/fs/udf/udfdecl.h | |||
| @@ -142,7 +142,7 @@ extern void udf_truncate(struct inode *); | |||
| 142 | extern void udf_read_inode(struct inode *); | 142 | extern void udf_read_inode(struct inode *); |
| 143 | extern void udf_delete_inode(struct inode *); | 143 | extern void udf_delete_inode(struct inode *); |
| 144 | extern void udf_clear_inode(struct inode *); | 144 | extern void udf_clear_inode(struct inode *); |
| 145 | extern int udf_write_inode(struct inode *, int); | 145 | extern int udf_write_inode(struct inode *, struct writeback_control *wbc); |
| 146 | extern long udf_block_map(struct inode *, sector_t); | 146 | extern long udf_block_map(struct inode *, sector_t); |
| 147 | extern int udf_extend_file(struct inode *, struct extent_position *, | 147 | extern int udf_extend_file(struct inode *, struct extent_position *, |
| 148 | struct kernel_long_ad *, sector_t); | 148 | struct kernel_long_ad *, sector_t); |
diff --git a/fs/ufs/balloc.c b/fs/ufs/balloc.c index 54c16ec95dff..5cfa4d85ccf2 100644 --- a/fs/ufs/balloc.c +++ b/fs/ufs/balloc.c | |||
| @@ -85,7 +85,7 @@ void ufs_free_fragments(struct inode *inode, u64 fragment, unsigned count) | |||
| 85 | "bit already cleared for fragment %u", i); | 85 | "bit already cleared for fragment %u", i); |
| 86 | } | 86 | } |
| 87 | 87 | ||
| 88 | vfs_dq_free_block(inode, count); | 88 | dquot_free_block(inode, count); |
| 89 | 89 | ||
| 90 | 90 | ||
| 91 | fs32_add(sb, &ucg->cg_cs.cs_nffree, count); | 91 | fs32_add(sb, &ucg->cg_cs.cs_nffree, count); |
| @@ -195,7 +195,7 @@ do_more: | |||
| 195 | ubh_setblock(UCPI_UBH(ucpi), ucpi->c_freeoff, blkno); | 195 | ubh_setblock(UCPI_UBH(ucpi), ucpi->c_freeoff, blkno); |
| 196 | if ((UFS_SB(sb)->s_flags & UFS_CG_MASK) == UFS_CG_44BSD) | 196 | if ((UFS_SB(sb)->s_flags & UFS_CG_MASK) == UFS_CG_44BSD) |
| 197 | ufs_clusteracct (sb, ucpi, blkno, 1); | 197 | ufs_clusteracct (sb, ucpi, blkno, 1); |
| 198 | vfs_dq_free_block(inode, uspi->s_fpb); | 198 | dquot_free_block(inode, uspi->s_fpb); |
| 199 | 199 | ||
| 200 | fs32_add(sb, &ucg->cg_cs.cs_nbfree, 1); | 200 | fs32_add(sb, &ucg->cg_cs.cs_nbfree, 1); |
| 201 | uspi->cs_total.cs_nbfree++; | 201 | uspi->cs_total.cs_nbfree++; |
| @@ -511,6 +511,7 @@ static u64 ufs_add_fragments(struct inode *inode, u64 fragment, | |||
| 511 | struct ufs_cg_private_info * ucpi; | 511 | struct ufs_cg_private_info * ucpi; |
| 512 | struct ufs_cylinder_group * ucg; | 512 | struct ufs_cylinder_group * ucg; |
| 513 | unsigned cgno, fragno, fragoff, count, fragsize, i; | 513 | unsigned cgno, fragno, fragoff, count, fragsize, i; |
| 514 | int ret; | ||
| 514 | 515 | ||
| 515 | UFSD("ENTER, fragment %llu, oldcount %u, newcount %u\n", | 516 | UFSD("ENTER, fragment %llu, oldcount %u, newcount %u\n", |
| 516 | (unsigned long long)fragment, oldcount, newcount); | 517 | (unsigned long long)fragment, oldcount, newcount); |
| @@ -556,8 +557,9 @@ static u64 ufs_add_fragments(struct inode *inode, u64 fragment, | |||
| 556 | fs32_add(sb, &ucg->cg_frsum[fragsize - count], 1); | 557 | fs32_add(sb, &ucg->cg_frsum[fragsize - count], 1); |
| 557 | for (i = oldcount; i < newcount; i++) | 558 | for (i = oldcount; i < newcount; i++) |
| 558 | ubh_clrbit (UCPI_UBH(ucpi), ucpi->c_freeoff, fragno + i); | 559 | ubh_clrbit (UCPI_UBH(ucpi), ucpi->c_freeoff, fragno + i); |
| 559 | if (vfs_dq_alloc_block(inode, count)) { | 560 | ret = dquot_alloc_block(inode, count); |
| 560 | *err = -EDQUOT; | 561 | if (ret) { |
| 562 | *err = ret; | ||
| 561 | return 0; | 563 | return 0; |
| 562 | } | 564 | } |
| 563 | 565 | ||
| @@ -596,6 +598,7 @@ static u64 ufs_alloc_fragments(struct inode *inode, unsigned cgno, | |||
| 596 | struct ufs_cylinder_group * ucg; | 598 | struct ufs_cylinder_group * ucg; |
| 597 | unsigned oldcg, i, j, k, allocsize; | 599 | unsigned oldcg, i, j, k, allocsize; |
| 598 | u64 result; | 600 | u64 result; |
| 601 | int ret; | ||
| 599 | 602 | ||
| 600 | UFSD("ENTER, ino %lu, cgno %u, goal %llu, count %u\n", | 603 | UFSD("ENTER, ino %lu, cgno %u, goal %llu, count %u\n", |
| 601 | inode->i_ino, cgno, (unsigned long long)goal, count); | 604 | inode->i_ino, cgno, (unsigned long long)goal, count); |
| @@ -664,7 +667,7 @@ cg_found: | |||
| 664 | for (i = count; i < uspi->s_fpb; i++) | 667 | for (i = count; i < uspi->s_fpb; i++) |
| 665 | ubh_setbit (UCPI_UBH(ucpi), ucpi->c_freeoff, goal + i); | 668 | ubh_setbit (UCPI_UBH(ucpi), ucpi->c_freeoff, goal + i); |
| 666 | i = uspi->s_fpb - count; | 669 | i = uspi->s_fpb - count; |
| 667 | vfs_dq_free_block(inode, i); | 670 | dquot_free_block(inode, i); |
| 668 | 671 | ||
| 669 | fs32_add(sb, &ucg->cg_cs.cs_nffree, i); | 672 | fs32_add(sb, &ucg->cg_cs.cs_nffree, i); |
| 670 | uspi->cs_total.cs_nffree += i; | 673 | uspi->cs_total.cs_nffree += i; |
| @@ -676,8 +679,9 @@ cg_found: | |||
| 676 | result = ufs_bitmap_search (sb, ucpi, goal, allocsize); | 679 | result = ufs_bitmap_search (sb, ucpi, goal, allocsize); |
| 677 | if (result == INVBLOCK) | 680 | if (result == INVBLOCK) |
| 678 | return 0; | 681 | return 0; |
| 679 | if (vfs_dq_alloc_block(inode, count)) { | 682 | ret = dquot_alloc_block(inode, count); |
| 680 | *err = -EDQUOT; | 683 | if (ret) { |
| 684 | *err = ret; | ||
| 681 | return 0; | 685 | return 0; |
| 682 | } | 686 | } |
| 683 | for (i = 0; i < count; i++) | 687 | for (i = 0; i < count; i++) |
| @@ -714,6 +718,7 @@ static u64 ufs_alloccg_block(struct inode *inode, | |||
| 714 | struct ufs_super_block_first * usb1; | 718 | struct ufs_super_block_first * usb1; |
| 715 | struct ufs_cylinder_group * ucg; | 719 | struct ufs_cylinder_group * ucg; |
| 716 | u64 result, blkno; | 720 | u64 result, blkno; |
| 721 | int ret; | ||
| 717 | 722 | ||
| 718 | UFSD("ENTER, goal %llu\n", (unsigned long long)goal); | 723 | UFSD("ENTER, goal %llu\n", (unsigned long long)goal); |
| 719 | 724 | ||
| @@ -747,8 +752,9 @@ gotit: | |||
| 747 | ubh_clrblock (UCPI_UBH(ucpi), ucpi->c_freeoff, blkno); | 752 | ubh_clrblock (UCPI_UBH(ucpi), ucpi->c_freeoff, blkno); |
| 748 | if ((UFS_SB(sb)->s_flags & UFS_CG_MASK) == UFS_CG_44BSD) | 753 | if ((UFS_SB(sb)->s_flags & UFS_CG_MASK) == UFS_CG_44BSD) |
| 749 | ufs_clusteracct (sb, ucpi, blkno, -1); | 754 | ufs_clusteracct (sb, ucpi, blkno, -1); |
| 750 | if (vfs_dq_alloc_block(inode, uspi->s_fpb)) { | 755 | ret = dquot_alloc_block(inode, uspi->s_fpb); |
| 751 | *err = -EDQUOT; | 756 | if (ret) { |
| 757 | *err = ret; | ||
| 752 | return INVBLOCK; | 758 | return INVBLOCK; |
| 753 | } | 759 | } |
| 754 | 760 | ||
diff --git a/fs/ufs/file.c b/fs/ufs/file.c index 73655c61240a..a8962cecde5b 100644 --- a/fs/ufs/file.c +++ b/fs/ufs/file.c | |||
| @@ -24,6 +24,7 @@ | |||
| 24 | */ | 24 | */ |
| 25 | 25 | ||
| 26 | #include <linux/fs.h> | 26 | #include <linux/fs.h> |
| 27 | #include <linux/quotaops.h> | ||
| 27 | 28 | ||
| 28 | #include "ufs_fs.h" | 29 | #include "ufs_fs.h" |
| 29 | #include "ufs.h" | 30 | #include "ufs.h" |
| @@ -40,7 +41,7 @@ const struct file_operations ufs_file_operations = { | |||
| 40 | .write = do_sync_write, | 41 | .write = do_sync_write, |
| 41 | .aio_write = generic_file_aio_write, | 42 | .aio_write = generic_file_aio_write, |
| 42 | .mmap = generic_file_mmap, | 43 | .mmap = generic_file_mmap, |
| 43 | .open = generic_file_open, | 44 | .open = dquot_file_open, |
| 44 | .fsync = simple_fsync, | 45 | .fsync = simple_fsync, |
| 45 | .splice_read = generic_file_splice_read, | 46 | .splice_read = generic_file_splice_read, |
| 46 | }; | 47 | }; |
diff --git a/fs/ufs/ialloc.c b/fs/ufs/ialloc.c index 3527c00fef0d..230ecf608026 100644 --- a/fs/ufs/ialloc.c +++ b/fs/ufs/ialloc.c | |||
| @@ -95,8 +95,8 @@ void ufs_free_inode (struct inode * inode) | |||
| 95 | 95 | ||
| 96 | is_directory = S_ISDIR(inode->i_mode); | 96 | is_directory = S_ISDIR(inode->i_mode); |
| 97 | 97 | ||
| 98 | vfs_dq_free_inode(inode); | 98 | dquot_free_inode(inode); |
| 99 | vfs_dq_drop(inode); | 99 | dquot_drop(inode); |
| 100 | 100 | ||
| 101 | clear_inode (inode); | 101 | clear_inode (inode); |
| 102 | 102 | ||
| @@ -355,9 +355,10 @@ cg_found: | |||
| 355 | 355 | ||
| 356 | unlock_super (sb); | 356 | unlock_super (sb); |
| 357 | 357 | ||
| 358 | if (vfs_dq_alloc_inode(inode)) { | 358 | dquot_initialize(inode); |
| 359 | vfs_dq_drop(inode); | 359 | err = dquot_alloc_inode(inode); |
| 360 | err = -EDQUOT; | 360 | if (err) { |
| 361 | dquot_drop(inode); | ||
| 361 | goto fail_without_unlock; | 362 | goto fail_without_unlock; |
| 362 | } | 363 | } |
| 363 | 364 | ||
diff --git a/fs/ufs/inode.c b/fs/ufs/inode.c index 7cf33379fd46..80b68c3702d1 100644 --- a/fs/ufs/inode.c +++ b/fs/ufs/inode.c | |||
| @@ -36,6 +36,8 @@ | |||
| 36 | #include <linux/mm.h> | 36 | #include <linux/mm.h> |
| 37 | #include <linux/smp_lock.h> | 37 | #include <linux/smp_lock.h> |
| 38 | #include <linux/buffer_head.h> | 38 | #include <linux/buffer_head.h> |
| 39 | #include <linux/writeback.h> | ||
| 40 | #include <linux/quotaops.h> | ||
| 39 | 41 | ||
| 40 | #include "ufs_fs.h" | 42 | #include "ufs_fs.h" |
| 41 | #include "ufs.h" | 43 | #include "ufs.h" |
| @@ -890,11 +892,11 @@ static int ufs_update_inode(struct inode * inode, int do_sync) | |||
| 890 | return 0; | 892 | return 0; |
| 891 | } | 893 | } |
| 892 | 894 | ||
| 893 | int ufs_write_inode (struct inode * inode, int wait) | 895 | int ufs_write_inode(struct inode *inode, struct writeback_control *wbc) |
| 894 | { | 896 | { |
| 895 | int ret; | 897 | int ret; |
| 896 | lock_kernel(); | 898 | lock_kernel(); |
| 897 | ret = ufs_update_inode (inode, wait); | 899 | ret = ufs_update_inode(inode, wbc->sync_mode == WB_SYNC_ALL); |
| 898 | unlock_kernel(); | 900 | unlock_kernel(); |
| 899 | return ret; | 901 | return ret; |
| 900 | } | 902 | } |
| @@ -908,6 +910,9 @@ void ufs_delete_inode (struct inode * inode) | |||
| 908 | { | 910 | { |
| 909 | loff_t old_i_size; | 911 | loff_t old_i_size; |
| 910 | 912 | ||
| 913 | if (!is_bad_inode(inode)) | ||
| 914 | dquot_initialize(inode); | ||
| 915 | |||
| 911 | truncate_inode_pages(&inode->i_data, 0); | 916 | truncate_inode_pages(&inode->i_data, 0); |
| 912 | if (is_bad_inode(inode)) | 917 | if (is_bad_inode(inode)) |
| 913 | goto no_delete; | 918 | goto no_delete; |
diff --git a/fs/ufs/namei.c b/fs/ufs/namei.c index 4c26d9e8bc94..118556243e7a 100644 --- a/fs/ufs/namei.c +++ b/fs/ufs/namei.c | |||
| @@ -30,6 +30,7 @@ | |||
| 30 | #include <linux/time.h> | 30 | #include <linux/time.h> |
| 31 | #include <linux/fs.h> | 31 | #include <linux/fs.h> |
| 32 | #include <linux/smp_lock.h> | 32 | #include <linux/smp_lock.h> |
| 33 | #include <linux/quotaops.h> | ||
| 33 | 34 | ||
| 34 | #include "ufs_fs.h" | 35 | #include "ufs_fs.h" |
| 35 | #include "ufs.h" | 36 | #include "ufs.h" |
| @@ -84,6 +85,9 @@ static int ufs_create (struct inode * dir, struct dentry * dentry, int mode, | |||
| 84 | int err; | 85 | int err; |
| 85 | 86 | ||
| 86 | UFSD("BEGIN\n"); | 87 | UFSD("BEGIN\n"); |
| 88 | |||
| 89 | dquot_initialize(dir); | ||
| 90 | |||
| 87 | inode = ufs_new_inode(dir, mode); | 91 | inode = ufs_new_inode(dir, mode); |
| 88 | err = PTR_ERR(inode); | 92 | err = PTR_ERR(inode); |
| 89 | 93 | ||
| @@ -107,6 +111,9 @@ static int ufs_mknod (struct inode * dir, struct dentry *dentry, int mode, dev_t | |||
| 107 | 111 | ||
| 108 | if (!old_valid_dev(rdev)) | 112 | if (!old_valid_dev(rdev)) |
| 109 | return -EINVAL; | 113 | return -EINVAL; |
| 114 | |||
| 115 | dquot_initialize(dir); | ||
| 116 | |||
| 110 | inode = ufs_new_inode(dir, mode); | 117 | inode = ufs_new_inode(dir, mode); |
| 111 | err = PTR_ERR(inode); | 118 | err = PTR_ERR(inode); |
| 112 | if (!IS_ERR(inode)) { | 119 | if (!IS_ERR(inode)) { |
| @@ -131,6 +138,8 @@ static int ufs_symlink (struct inode * dir, struct dentry * dentry, | |||
| 131 | if (l > sb->s_blocksize) | 138 | if (l > sb->s_blocksize) |
| 132 | goto out_notlocked; | 139 | goto out_notlocked; |
| 133 | 140 | ||
| 141 | dquot_initialize(dir); | ||
| 142 | |||
| 134 | lock_kernel(); | 143 | lock_kernel(); |
| 135 | inode = ufs_new_inode(dir, S_IFLNK | S_IRWXUGO); | 144 | inode = ufs_new_inode(dir, S_IFLNK | S_IRWXUGO); |
| 136 | err = PTR_ERR(inode); | 145 | err = PTR_ERR(inode); |
| @@ -176,6 +185,8 @@ static int ufs_link (struct dentry * old_dentry, struct inode * dir, | |||
| 176 | return -EMLINK; | 185 | return -EMLINK; |
| 177 | } | 186 | } |
| 178 | 187 | ||
| 188 | dquot_initialize(dir); | ||
| 189 | |||
| 179 | inode->i_ctime = CURRENT_TIME_SEC; | 190 | inode->i_ctime = CURRENT_TIME_SEC; |
| 180 | inode_inc_link_count(inode); | 191 | inode_inc_link_count(inode); |
| 181 | atomic_inc(&inode->i_count); | 192 | atomic_inc(&inode->i_count); |
| @@ -193,6 +204,8 @@ static int ufs_mkdir(struct inode * dir, struct dentry * dentry, int mode) | |||
| 193 | if (dir->i_nlink >= UFS_LINK_MAX) | 204 | if (dir->i_nlink >= UFS_LINK_MAX) |
| 194 | goto out; | 205 | goto out; |
| 195 | 206 | ||
| 207 | dquot_initialize(dir); | ||
| 208 | |||
| 196 | lock_kernel(); | 209 | lock_kernel(); |
| 197 | inode_inc_link_count(dir); | 210 | inode_inc_link_count(dir); |
| 198 | 211 | ||
| @@ -237,6 +250,8 @@ static int ufs_unlink(struct inode *dir, struct dentry *dentry) | |||
| 237 | struct page *page; | 250 | struct page *page; |
| 238 | int err = -ENOENT; | 251 | int err = -ENOENT; |
| 239 | 252 | ||
| 253 | dquot_initialize(dir); | ||
| 254 | |||
| 240 | de = ufs_find_entry(dir, &dentry->d_name, &page); | 255 | de = ufs_find_entry(dir, &dentry->d_name, &page); |
| 241 | if (!de) | 256 | if (!de) |
| 242 | goto out; | 257 | goto out; |
| @@ -281,6 +296,9 @@ static int ufs_rename(struct inode *old_dir, struct dentry *old_dentry, | |||
| 281 | struct ufs_dir_entry *old_de; | 296 | struct ufs_dir_entry *old_de; |
| 282 | int err = -ENOENT; | 297 | int err = -ENOENT; |
| 283 | 298 | ||
| 299 | dquot_initialize(old_dir); | ||
| 300 | dquot_initialize(new_dir); | ||
| 301 | |||
| 284 | old_de = ufs_find_entry(old_dir, &old_dentry->d_name, &old_page); | 302 | old_de = ufs_find_entry(old_dir, &old_dentry->d_name, &old_page); |
| 285 | if (!old_de) | 303 | if (!old_de) |
| 286 | goto out; | 304 | goto out; |
diff --git a/fs/ufs/super.c b/fs/ufs/super.c index 143c20bfb04b..66b63a751615 100644 --- a/fs/ufs/super.c +++ b/fs/ufs/super.c | |||
| @@ -1432,6 +1432,11 @@ static void destroy_inodecache(void) | |||
| 1432 | kmem_cache_destroy(ufs_inode_cachep); | 1432 | kmem_cache_destroy(ufs_inode_cachep); |
| 1433 | } | 1433 | } |
| 1434 | 1434 | ||
| 1435 | static void ufs_clear_inode(struct inode *inode) | ||
| 1436 | { | ||
| 1437 | dquot_drop(inode); | ||
| 1438 | } | ||
| 1439 | |||
| 1435 | #ifdef CONFIG_QUOTA | 1440 | #ifdef CONFIG_QUOTA |
| 1436 | static ssize_t ufs_quota_read(struct super_block *, int, char *,size_t, loff_t); | 1441 | static ssize_t ufs_quota_read(struct super_block *, int, char *,size_t, loff_t); |
| 1437 | static ssize_t ufs_quota_write(struct super_block *, int, const char *, size_t, loff_t); | 1442 | static ssize_t ufs_quota_write(struct super_block *, int, const char *, size_t, loff_t); |
| @@ -1442,6 +1447,7 @@ static const struct super_operations ufs_super_ops = { | |||
| 1442 | .destroy_inode = ufs_destroy_inode, | 1447 | .destroy_inode = ufs_destroy_inode, |
| 1443 | .write_inode = ufs_write_inode, | 1448 | .write_inode = ufs_write_inode, |
| 1444 | .delete_inode = ufs_delete_inode, | 1449 | .delete_inode = ufs_delete_inode, |
| 1450 | .clear_inode = ufs_clear_inode, | ||
| 1445 | .put_super = ufs_put_super, | 1451 | .put_super = ufs_put_super, |
| 1446 | .write_super = ufs_write_super, | 1452 | .write_super = ufs_write_super, |
| 1447 | .sync_fs = ufs_sync_fs, | 1453 | .sync_fs = ufs_sync_fs, |
diff --git a/fs/ufs/truncate.c b/fs/ufs/truncate.c index 41dd431ce228..d3b6270cb377 100644 --- a/fs/ufs/truncate.c +++ b/fs/ufs/truncate.c | |||
| @@ -44,6 +44,7 @@ | |||
| 44 | #include <linux/buffer_head.h> | 44 | #include <linux/buffer_head.h> |
| 45 | #include <linux/blkdev.h> | 45 | #include <linux/blkdev.h> |
| 46 | #include <linux/sched.h> | 46 | #include <linux/sched.h> |
| 47 | #include <linux/quotaops.h> | ||
| 47 | 48 | ||
| 48 | #include "ufs_fs.h" | 49 | #include "ufs_fs.h" |
| 49 | #include "ufs.h" | 50 | #include "ufs.h" |
| @@ -517,9 +518,18 @@ static int ufs_setattr(struct dentry *dentry, struct iattr *attr) | |||
| 517 | if (error) | 518 | if (error) |
| 518 | return error; | 519 | return error; |
| 519 | 520 | ||
| 521 | if ((ia_valid & ATTR_UID && attr->ia_uid != inode->i_uid) || | ||
| 522 | (ia_valid & ATTR_GID && attr->ia_gid != inode->i_gid)) { | ||
| 523 | error = dquot_transfer(inode, attr); | ||
| 524 | if (error) | ||
| 525 | return error; | ||
| 526 | } | ||
| 520 | if (ia_valid & ATTR_SIZE && | 527 | if (ia_valid & ATTR_SIZE && |
| 521 | attr->ia_size != i_size_read(inode)) { | 528 | attr->ia_size != i_size_read(inode)) { |
| 522 | loff_t old_i_size = inode->i_size; | 529 | loff_t old_i_size = inode->i_size; |
| 530 | |||
| 531 | dquot_initialize(inode); | ||
| 532 | |||
| 523 | error = vmtruncate(inode, attr->ia_size); | 533 | error = vmtruncate(inode, attr->ia_size); |
| 524 | if (error) | 534 | if (error) |
| 525 | return error; | 535 | return error; |
diff --git a/fs/ufs/ufs.h b/fs/ufs/ufs.h index 01d0e2a3b230..43f9f5d5670e 100644 --- a/fs/ufs/ufs.h +++ b/fs/ufs/ufs.h | |||
| @@ -106,7 +106,7 @@ extern struct inode * ufs_new_inode (struct inode *, int); | |||
| 106 | 106 | ||
| 107 | /* inode.c */ | 107 | /* inode.c */ |
| 108 | extern struct inode *ufs_iget(struct super_block *, unsigned long); | 108 | extern struct inode *ufs_iget(struct super_block *, unsigned long); |
| 109 | extern int ufs_write_inode (struct inode *, int); | 109 | extern int ufs_write_inode (struct inode *, struct writeback_control *); |
| 110 | extern int ufs_sync_inode (struct inode *); | 110 | extern int ufs_sync_inode (struct inode *); |
| 111 | extern void ufs_delete_inode (struct inode *); | 111 | extern void ufs_delete_inode (struct inode *); |
| 112 | extern struct buffer_head * ufs_bread (struct inode *, unsigned, int, int *); | 112 | extern struct buffer_head * ufs_bread (struct inode *, unsigned, int, int *); |
diff --git a/fs/xfs/linux-2.6/xfs_quotaops.c b/fs/xfs/linux-2.6/xfs_quotaops.c index 3d4a0c84d634..1947514ce1ad 100644 --- a/fs/xfs/linux-2.6/xfs_quotaops.c +++ b/fs/xfs/linux-2.6/xfs_quotaops.c | |||
| @@ -44,20 +44,6 @@ xfs_quota_type(int type) | |||
| 44 | } | 44 | } |
| 45 | 45 | ||
| 46 | STATIC int | 46 | STATIC int |
| 47 | xfs_fs_quota_sync( | ||
| 48 | struct super_block *sb, | ||
| 49 | int type) | ||
| 50 | { | ||
| 51 | struct xfs_mount *mp = XFS_M(sb); | ||
| 52 | |||
| 53 | if (sb->s_flags & MS_RDONLY) | ||
| 54 | return -EROFS; | ||
| 55 | if (!XFS_IS_QUOTA_RUNNING(mp)) | ||
| 56 | return -ENOSYS; | ||
| 57 | return -xfs_sync_data(mp, 0); | ||
| 58 | } | ||
| 59 | |||
| 60 | STATIC int | ||
| 61 | xfs_fs_get_xstate( | 47 | xfs_fs_get_xstate( |
| 62 | struct super_block *sb, | 48 | struct super_block *sb, |
| 63 | struct fs_quota_stat *fqs) | 49 | struct fs_quota_stat *fqs) |
| @@ -82,8 +68,6 @@ xfs_fs_set_xstate( | |||
| 82 | return -EROFS; | 68 | return -EROFS; |
| 83 | if (op != Q_XQUOTARM && !XFS_IS_QUOTA_RUNNING(mp)) | 69 | if (op != Q_XQUOTARM && !XFS_IS_QUOTA_RUNNING(mp)) |
| 84 | return -ENOSYS; | 70 | return -ENOSYS; |
| 85 | if (!capable(CAP_SYS_ADMIN)) | ||
| 86 | return -EPERM; | ||
| 87 | 71 | ||
| 88 | if (uflags & XFS_QUOTA_UDQ_ACCT) | 72 | if (uflags & XFS_QUOTA_UDQ_ACCT) |
| 89 | flags |= XFS_UQUOTA_ACCT; | 73 | flags |= XFS_UQUOTA_ACCT; |
| @@ -144,14 +128,11 @@ xfs_fs_set_xquota( | |||
| 144 | return -ENOSYS; | 128 | return -ENOSYS; |
| 145 | if (!XFS_IS_QUOTA_ON(mp)) | 129 | if (!XFS_IS_QUOTA_ON(mp)) |
| 146 | return -ESRCH; | 130 | return -ESRCH; |
| 147 | if (!capable(CAP_SYS_ADMIN)) | ||
| 148 | return -EPERM; | ||
| 149 | 131 | ||
| 150 | return -xfs_qm_scall_setqlim(mp, id, xfs_quota_type(type), fdq); | 132 | return -xfs_qm_scall_setqlim(mp, id, xfs_quota_type(type), fdq); |
| 151 | } | 133 | } |
| 152 | 134 | ||
| 153 | const struct quotactl_ops xfs_quotactl_operations = { | 135 | const struct quotactl_ops xfs_quotactl_operations = { |
| 154 | .quota_sync = xfs_fs_quota_sync, | ||
| 155 | .get_xstate = xfs_fs_get_xstate, | 136 | .get_xstate = xfs_fs_get_xstate, |
| 156 | .set_xstate = xfs_fs_set_xstate, | 137 | .set_xstate = xfs_fs_set_xstate, |
| 157 | .get_xquota = xfs_fs_get_xquota, | 138 | .get_xquota = xfs_fs_get_xquota, |
diff --git a/fs/xfs/linux-2.6/xfs_super.c b/fs/xfs/linux-2.6/xfs_super.c index 25ea2408118f..71345a370d9f 100644 --- a/fs/xfs/linux-2.6/xfs_super.c +++ b/fs/xfs/linux-2.6/xfs_super.c | |||
| @@ -1063,7 +1063,7 @@ xfs_log_inode( | |||
| 1063 | STATIC int | 1063 | STATIC int |
| 1064 | xfs_fs_write_inode( | 1064 | xfs_fs_write_inode( |
| 1065 | struct inode *inode, | 1065 | struct inode *inode, |
| 1066 | int sync) | 1066 | struct writeback_control *wbc) |
| 1067 | { | 1067 | { |
| 1068 | struct xfs_inode *ip = XFS_I(inode); | 1068 | struct xfs_inode *ip = XFS_I(inode); |
| 1069 | struct xfs_mount *mp = ip->i_mount; | 1069 | struct xfs_mount *mp = ip->i_mount; |
| @@ -1074,11 +1074,7 @@ xfs_fs_write_inode( | |||
| 1074 | if (XFS_FORCED_SHUTDOWN(mp)) | 1074 | if (XFS_FORCED_SHUTDOWN(mp)) |
| 1075 | return XFS_ERROR(EIO); | 1075 | return XFS_ERROR(EIO); |
| 1076 | 1076 | ||
| 1077 | if (sync) { | 1077 | if (wbc->sync_mode == WB_SYNC_ALL) { |
| 1078 | error = xfs_wait_on_pages(ip, 0, -1); | ||
| 1079 | if (error) | ||
| 1080 | goto out; | ||
| 1081 | |||
| 1082 | /* | 1078 | /* |
| 1083 | * Make sure the inode has hit stable storage. By using the | 1079 | * Make sure the inode has hit stable storage. By using the |
| 1084 | * log and the fsync transactions we reduce the IOs we have | 1080 | * log and the fsync transactions we reduce the IOs we have |
