diff options
Diffstat (limited to 'fs')
-rw-r--r-- | fs/Makefile | 2 | ||||
-rw-r--r-- | fs/aio.c | 86 | ||||
-rw-r--r-- | fs/bad_inode.c | 7 | ||||
-rw-r--r-- | fs/btrfs/inode.c | 12 | ||||
-rw-r--r-- | fs/btrfs/super.c | 9 | ||||
-rw-r--r-- | fs/ceph/acl.c | 14 | ||||
-rw-r--r-- | fs/ceph/caps.c | 2 | ||||
-rw-r--r-- | fs/ceph/file.c | 24 | ||||
-rw-r--r-- | fs/ceph/mds_client.c | 16 | ||||
-rw-r--r-- | fs/ceph/super.c | 2 | ||||
-rw-r--r-- | fs/ceph/xattr.c | 4 | ||||
-rw-r--r-- | fs/cifs/cifsfs.c | 2 | ||||
-rw-r--r-- | fs/cifs/cifsfs.h | 4 | ||||
-rw-r--r-- | fs/cifs/inode.c | 14 | ||||
-rw-r--r-- | fs/dcache.c | 196 | ||||
-rw-r--r-- | fs/direct-io.c | 2 | ||||
-rw-r--r-- | fs/ext2/super.c | 2 | ||||
-rw-r--r-- | fs/ext4/namei.c | 1 | ||||
-rw-r--r-- | fs/fs_pin.c | 78 | ||||
-rw-r--r-- | fs/fuse/dir.c | 7 | ||||
-rw-r--r-- | fs/fuse/file.c | 4 | ||||
-rw-r--r-- | fs/hostfs/hostfs.h | 1 | ||||
-rw-r--r-- | fs/hostfs/hostfs_kern.c | 30 | ||||
-rw-r--r-- | fs/hostfs/hostfs_user.c | 28 | ||||
-rw-r--r-- | fs/internal.h | 7 | ||||
-rw-r--r-- | fs/mount.h | 2 | ||||
-rw-r--r-- | fs/namei.c | 34 | ||||
-rw-r--r-- | fs/namespace.c | 67 | ||||
-rw-r--r-- | fs/nfs/blocklayout/blocklayout.c | 101 | ||||
-rw-r--r-- | fs/nfs/callback.c | 12 | ||||
-rw-r--r-- | fs/nfs/client.c | 18 | ||||
-rw-r--r-- | fs/nfs/delegation.c | 34 | ||||
-rw-r--r-- | fs/nfs/delegation.h | 1 | ||||
-rw-r--r-- | fs/nfs/dir.c | 208 | ||||
-rw-r--r-- | fs/nfs/direct.c | 33 | ||||
-rw-r--r-- | fs/nfs/filelayout/filelayout.c | 298 | ||||
-rw-r--r-- | fs/nfs/filelayout/filelayoutdev.c | 2 | ||||
-rw-r--r-- | fs/nfs/getroot.c | 2 | ||||
-rw-r--r-- | fs/nfs/inode.c | 9 | ||||
-rw-r--r-- | fs/nfs/internal.h | 11 | ||||
-rw-r--r-- | fs/nfs/nfs3acl.c | 2 | ||||
-rw-r--r-- | fs/nfs/nfs3proc.c | 21 | ||||
-rw-r--r-- | fs/nfs/nfs4_fs.h | 32 | ||||
-rw-r--r-- | fs/nfs/nfs4client.c | 5 | ||||
-rw-r--r-- | fs/nfs/nfs4proc.c | 248 | ||||
-rw-r--r-- | fs/nfs/nfs4state.c | 69 | ||||
-rw-r--r-- | fs/nfs/nfs4trace.h | 28 | ||||
-rw-r--r-- | fs/nfs/nfs4xdr.c | 2 | ||||
-rw-r--r-- | fs/nfs/objlayout/objio_osd.c | 24 | ||||
-rw-r--r-- | fs/nfs/objlayout/objlayout.c | 81 | ||||
-rw-r--r-- | fs/nfs/objlayout/objlayout.h | 8 | ||||
-rw-r--r-- | fs/nfs/pagelist.c | 276 | ||||
-rw-r--r-- | fs/nfs/pnfs.c | 178 | ||||
-rw-r--r-- | fs/nfs/pnfs.h | 45 | ||||
-rw-r--r-- | fs/nfs/proc.c | 27 | ||||
-rw-r--r-- | fs/nfs/read.c | 54 | ||||
-rw-r--r-- | fs/nfs/super.c | 12 | ||||
-rw-r--r-- | fs/nfs/write.c | 150 | ||||
-rw-r--r-- | fs/nfs_common/nfsacl.c | 5 | ||||
-rw-r--r-- | fs/nilfs2/super.c | 2 | ||||
-rw-r--r-- | fs/quota/dquot.c | 180 | ||||
-rw-r--r-- | fs/quota/kqid.c | 2 | ||||
-rw-r--r-- | fs/quota/netlink.c | 3 | ||||
-rw-r--r-- | fs/quota/quota.c | 6 | ||||
-rw-r--r-- | fs/reiserfs/do_balan.c | 111 | ||||
-rw-r--r-- | fs/reiserfs/journal.c | 22 | ||||
-rw-r--r-- | fs/reiserfs/lbalance.c | 5 | ||||
-rw-r--r-- | fs/reiserfs/reiserfs.h | 9 | ||||
-rw-r--r-- | fs/reiserfs/super.c | 6 | ||||
-rw-r--r-- | fs/super.c | 20 | ||||
-rw-r--r-- | fs/ubifs/commit.c | 2 | ||||
-rw-r--r-- | fs/ubifs/io.c | 2 | ||||
-rw-r--r-- | fs/ubifs/log.c | 12 | ||||
-rw-r--r-- | fs/ubifs/lpt.c | 5 | ||||
-rw-r--r-- | fs/ubifs/lpt_commit.c | 7 | ||||
-rw-r--r-- | fs/ubifs/master.c | 7 | ||||
-rw-r--r-- | fs/ubifs/orphan.c | 1 | ||||
-rw-r--r-- | fs/ubifs/recovery.c | 5 | ||||
-rw-r--r-- | fs/ubifs/sb.c | 4 | ||||
-rw-r--r-- | fs/ubifs/scan.c | 14 | ||||
-rw-r--r-- | fs/ubifs/super.c | 19 | ||||
-rw-r--r-- | fs/ubifs/tnc.c | 1 | ||||
-rw-r--r-- | fs/ubifs/tnc_commit.c | 1 | ||||
-rw-r--r-- | fs/ubifs/ubifs.h | 4 | ||||
-rw-r--r-- | fs/udf/file.c | 22 | ||||
-rw-r--r-- | fs/udf/lowlevel.c | 2 | ||||
-rw-r--r-- | fs/udf/super.c | 2 | ||||
-rw-r--r-- | fs/udf/symlink.c | 2 | ||||
-rw-r--r-- | fs/udf/unicode.c | 9 | ||||
-rw-r--r-- | fs/xfs/Kconfig | 1 | ||||
-rw-r--r-- | fs/xfs/Makefile | 71 | ||||
-rw-r--r-- | fs/xfs/libxfs/xfs_ag.h (renamed from fs/xfs/xfs_ag.h) | 0 | ||||
-rw-r--r-- | fs/xfs/libxfs/xfs_alloc.c (renamed from fs/xfs/xfs_alloc.c) | 20 | ||||
-rw-r--r-- | fs/xfs/libxfs/xfs_alloc.h (renamed from fs/xfs/xfs_alloc.h) | 0 | ||||
-rw-r--r-- | fs/xfs/libxfs/xfs_alloc_btree.c (renamed from fs/xfs/xfs_alloc_btree.c) | 6 | ||||
-rw-r--r-- | fs/xfs/libxfs/xfs_alloc_btree.h (renamed from fs/xfs/xfs_alloc_btree.h) | 0 | ||||
-rw-r--r-- | fs/xfs/libxfs/xfs_attr.c (renamed from fs/xfs/xfs_attr.c) | 92 | ||||
-rw-r--r-- | fs/xfs/libxfs/xfs_attr_leaf.c (renamed from fs/xfs/xfs_attr_leaf.c) | 78 | ||||
-rw-r--r-- | fs/xfs/libxfs/xfs_attr_leaf.h (renamed from fs/xfs/xfs_attr_leaf.h) | 0 | ||||
-rw-r--r-- | fs/xfs/libxfs/xfs_attr_remote.c (renamed from fs/xfs/xfs_attr_remote.c) | 22 | ||||
-rw-r--r-- | fs/xfs/libxfs/xfs_attr_remote.h (renamed from fs/xfs/xfs_attr_remote.h) | 0 | ||||
-rw-r--r-- | fs/xfs/libxfs/xfs_attr_sf.h (renamed from fs/xfs/xfs_attr_sf.h) | 0 | ||||
-rw-r--r-- | fs/xfs/libxfs/xfs_bit.h (renamed from fs/xfs/xfs_bit.h) | 0 | ||||
-rw-r--r-- | fs/xfs/libxfs/xfs_bmap.c (renamed from fs/xfs/xfs_bmap.c) | 60 | ||||
-rw-r--r-- | fs/xfs/libxfs/xfs_bmap.h (renamed from fs/xfs/xfs_bmap.h) | 0 | ||||
-rw-r--r-- | fs/xfs/libxfs/xfs_bmap_btree.c (renamed from fs/xfs/xfs_bmap_btree.c) | 99 | ||||
-rw-r--r-- | fs/xfs/libxfs/xfs_bmap_btree.h (renamed from fs/xfs/xfs_bmap_btree.h) | 0 | ||||
-rw-r--r-- | fs/xfs/libxfs/xfs_btree.c (renamed from fs/xfs/xfs_btree.c) | 46 | ||||
-rw-r--r-- | fs/xfs/libxfs/xfs_btree.h (renamed from fs/xfs/xfs_btree.h) | 2 | ||||
-rw-r--r-- | fs/xfs/libxfs/xfs_cksum.h (renamed from fs/xfs/xfs_cksum.h) | 0 | ||||
-rw-r--r-- | fs/xfs/libxfs/xfs_da_btree.c (renamed from fs/xfs/xfs_da_btree.c) | 112 | ||||
-rw-r--r-- | fs/xfs/libxfs/xfs_da_btree.h (renamed from fs/xfs/xfs_da_btree.h) | 0 | ||||
-rw-r--r-- | fs/xfs/libxfs/xfs_da_format.c (renamed from fs/xfs/xfs_da_format.c) | 0 | ||||
-rw-r--r-- | fs/xfs/libxfs/xfs_da_format.h (renamed from fs/xfs/xfs_da_format.h) | 0 | ||||
-rw-r--r-- | fs/xfs/libxfs/xfs_dinode.h (renamed from fs/xfs/xfs_dinode.h) | 0 | ||||
-rw-r--r-- | fs/xfs/libxfs/xfs_dir2.c (renamed from fs/xfs/xfs_dir2.c) | 24 | ||||
-rw-r--r-- | fs/xfs/libxfs/xfs_dir2.h (renamed from fs/xfs/xfs_dir2.h) | 0 | ||||
-rw-r--r-- | fs/xfs/libxfs/xfs_dir2_block.c (renamed from fs/xfs/xfs_dir2_block.c) | 18 | ||||
-rw-r--r-- | fs/xfs/libxfs/xfs_dir2_data.c (renamed from fs/xfs/xfs_dir2_data.c) | 10 | ||||
-rw-r--r-- | fs/xfs/libxfs/xfs_dir2_leaf.c (renamed from fs/xfs/xfs_dir2_leaf.c) | 24 | ||||
-rw-r--r-- | fs/xfs/libxfs/xfs_dir2_node.c (renamed from fs/xfs/xfs_dir2_node.c) | 40 | ||||
-rw-r--r-- | fs/xfs/libxfs/xfs_dir2_priv.h (renamed from fs/xfs/xfs_dir2_priv.h) | 0 | ||||
-rw-r--r-- | fs/xfs/libxfs/xfs_dir2_sf.c (renamed from fs/xfs/xfs_dir2_sf.c) | 75 | ||||
-rw-r--r-- | fs/xfs/libxfs/xfs_dquot_buf.c (renamed from fs/xfs/xfs_dquot_buf.c) | 6 | ||||
-rw-r--r-- | fs/xfs/libxfs/xfs_format.h (renamed from fs/xfs/xfs_format.h) | 14 | ||||
-rw-r--r-- | fs/xfs/libxfs/xfs_ialloc.c (renamed from fs/xfs/xfs_ialloc.c) | 34 | ||||
-rw-r--r-- | fs/xfs/libxfs/xfs_ialloc.h (renamed from fs/xfs/xfs_ialloc.h) | 0 | ||||
-rw-r--r-- | fs/xfs/libxfs/xfs_ialloc_btree.c (renamed from fs/xfs/xfs_ialloc_btree.c) | 6 | ||||
-rw-r--r-- | fs/xfs/libxfs/xfs_ialloc_btree.h (renamed from fs/xfs/xfs_ialloc_btree.h) | 0 | ||||
-rw-r--r-- | fs/xfs/libxfs/xfs_inode_buf.c (renamed from fs/xfs/xfs_inode_buf.c) | 10 | ||||
-rw-r--r-- | fs/xfs/libxfs/xfs_inode_buf.h (renamed from fs/xfs/xfs_inode_buf.h) | 0 | ||||
-rw-r--r-- | fs/xfs/libxfs/xfs_inode_fork.c (renamed from fs/xfs/xfs_inode_fork.c) | 36 | ||||
-rw-r--r-- | fs/xfs/libxfs/xfs_inode_fork.h (renamed from fs/xfs/xfs_inode_fork.h) | 0 | ||||
-rw-r--r-- | fs/xfs/libxfs/xfs_inum.h (renamed from fs/xfs/xfs_inum.h) | 4 | ||||
-rw-r--r-- | fs/xfs/libxfs/xfs_log_format.h (renamed from fs/xfs/xfs_log_format.h) | 4 | ||||
-rw-r--r-- | fs/xfs/libxfs/xfs_log_recover.h (renamed from fs/xfs/xfs_log_recover.h) | 0 | ||||
-rw-r--r-- | fs/xfs/libxfs/xfs_log_rlimit.c (renamed from fs/xfs/xfs_log_rlimit.c) | 0 | ||||
-rw-r--r-- | fs/xfs/libxfs/xfs_quota_defs.h (renamed from fs/xfs/xfs_quota_defs.h) | 2 | ||||
-rw-r--r-- | fs/xfs/libxfs/xfs_rtbitmap.c (renamed from fs/xfs/xfs_rtbitmap.c) | 0 | ||||
-rw-r--r-- | fs/xfs/libxfs/xfs_sb.c (renamed from fs/xfs/xfs_sb.c) | 56 | ||||
-rw-r--r-- | fs/xfs/libxfs/xfs_sb.h (renamed from fs/xfs/xfs_sb.h) | 8 | ||||
-rw-r--r-- | fs/xfs/libxfs/xfs_shared.h (renamed from fs/xfs/xfs_shared.h) | 0 | ||||
-rw-r--r-- | fs/xfs/libxfs/xfs_symlink_remote.c (renamed from fs/xfs/xfs_symlink_remote.c) | 6 | ||||
-rw-r--r-- | fs/xfs/libxfs/xfs_trans_resv.c (renamed from fs/xfs/xfs_trans_resv.c) | 0 | ||||
-rw-r--r-- | fs/xfs/libxfs/xfs_trans_resv.h (renamed from fs/xfs/xfs_trans_resv.h) | 0 | ||||
-rw-r--r-- | fs/xfs/libxfs/xfs_trans_space.h (renamed from fs/xfs/xfs_trans_space.h) | 0 | ||||
-rw-r--r-- | fs/xfs/xfs_acl.c | 8 | ||||
-rw-r--r-- | fs/xfs/xfs_aops.c | 18 | ||||
-rw-r--r-- | fs/xfs/xfs_attr_inactive.c | 22 | ||||
-rw-r--r-- | fs/xfs/xfs_attr_list.c | 38 | ||||
-rw-r--r-- | fs/xfs/xfs_bmap_util.c | 174 | ||||
-rw-r--r-- | fs/xfs/xfs_buf.c | 40 | ||||
-rw-r--r-- | fs/xfs/xfs_buf.h | 2 | ||||
-rw-r--r-- | fs/xfs/xfs_buf_item.c | 4 | ||||
-rw-r--r-- | fs/xfs/xfs_dir2_readdir.c | 4 | ||||
-rw-r--r-- | fs/xfs/xfs_discard.c | 18 | ||||
-rw-r--r-- | fs/xfs/xfs_dquot.c | 41 | ||||
-rw-r--r-- | fs/xfs/xfs_dquot.h | 15 | ||||
-rw-r--r-- | fs/xfs/xfs_error.c | 25 | ||||
-rw-r--r-- | fs/xfs/xfs_error.h | 13 | ||||
-rw-r--r-- | fs/xfs/xfs_export.c | 10 | ||||
-rw-r--r-- | fs/xfs/xfs_extfree_item.c | 2 | ||||
-rw-r--r-- | fs/xfs/xfs_file.c | 75 | ||||
-rw-r--r-- | fs/xfs/xfs_filestream.c | 4 | ||||
-rw-r--r-- | fs/xfs/xfs_fs.h | 7 | ||||
-rw-r--r-- | fs/xfs/xfs_fsops.c | 42 | ||||
-rw-r--r-- | fs/xfs/xfs_icache.c | 148 | ||||
-rw-r--r-- | fs/xfs/xfs_icache.h | 13 | ||||
-rw-r--r-- | fs/xfs/xfs_inode.c | 68 | ||||
-rw-r--r-- | fs/xfs/xfs_inode.h | 10 | ||||
-rw-r--r-- | fs/xfs/xfs_inode_item.c | 2 | ||||
-rw-r--r-- | fs/xfs/xfs_ioctl.c | 266 | ||||
-rw-r--r-- | fs/xfs/xfs_ioctl32.c | 111 | ||||
-rw-r--r-- | fs/xfs/xfs_iomap.c | 54 | ||||
-rw-r--r-- | fs/xfs/xfs_iops.c | 72 | ||||
-rw-r--r-- | fs/xfs/xfs_itable.c | 579 | ||||
-rw-r--r-- | fs/xfs/xfs_itable.h | 23 | ||||
-rw-r--r-- | fs/xfs/xfs_linux.h | 27 | ||||
-rw-r--r-- | fs/xfs/xfs_log.c | 69 | ||||
-rw-r--r-- | fs/xfs/xfs_log_cil.c | 8 | ||||
-rw-r--r-- | fs/xfs/xfs_log_priv.h | 2 | ||||
-rw-r--r-- | fs/xfs/xfs_log_recover.c | 284 | ||||
-rw-r--r-- | fs/xfs/xfs_mount.c | 97 | ||||
-rw-r--r-- | fs/xfs/xfs_mount.h | 1 | ||||
-rw-r--r-- | fs/xfs/xfs_mru_cache.c | 14 | ||||
-rw-r--r-- | fs/xfs/xfs_qm.c | 229 | ||||
-rw-r--r-- | fs/xfs/xfs_qm.h | 1 | ||||
-rw-r--r-- | fs/xfs/xfs_qm_bhv.c | 2 | ||||
-rw-r--r-- | fs/xfs/xfs_qm_syscalls.c | 46 | ||||
-rw-r--r-- | fs/xfs/xfs_quotaops.c | 20 | ||||
-rw-r--r-- | fs/xfs/xfs_rtalloc.c | 24 | ||||
-rw-r--r-- | fs/xfs/xfs_rtalloc.h | 2 | ||||
-rw-r--r-- | fs/xfs/xfs_super.c | 132 | ||||
-rw-r--r-- | fs/xfs/xfs_super.h | 15 | ||||
-rw-r--r-- | fs/xfs/xfs_symlink.c | 30 | ||||
-rw-r--r-- | fs/xfs/xfs_sysfs.c | 165 | ||||
-rw-r--r-- | fs/xfs/xfs_sysfs.h | 59 | ||||
-rw-r--r-- | fs/xfs/xfs_trans.c | 10 | ||||
-rw-r--r-- | fs/xfs/xfs_trans_ail.c | 4 | ||||
-rw-r--r-- | fs/xfs/xfs_trans_buf.c | 37 | ||||
-rw-r--r-- | fs/xfs/xfs_trans_dquot.c | 4 | ||||
-rw-r--r-- | fs/xfs/xfs_types.h | 29 | ||||
-rw-r--r-- | fs/xfs/xfs_vnode.h | 46 | ||||
-rw-r--r-- | fs/xfs/xfs_xattr.c | 6 |
204 files changed, 3994 insertions, 3403 deletions
diff --git a/fs/Makefile b/fs/Makefile index 4030cbfbc9af..90c88529892b 100644 --- a/fs/Makefile +++ b/fs/Makefile | |||
@@ -11,7 +11,7 @@ obj-y := open.o read_write.o file_table.o super.o \ | |||
11 | attr.o bad_inode.o file.o filesystems.o namespace.o \ | 11 | attr.o bad_inode.o file.o filesystems.o namespace.o \ |
12 | seq_file.o xattr.o libfs.o fs-writeback.o \ | 12 | seq_file.o xattr.o libfs.o fs-writeback.o \ |
13 | pnode.o splice.o sync.o utimes.o \ | 13 | pnode.o splice.o sync.o utimes.o \ |
14 | stack.o fs_struct.o statfs.o | 14 | stack.o fs_struct.o statfs.o fs_pin.o |
15 | 15 | ||
16 | ifeq ($(CONFIG_BLOCK),y) | 16 | ifeq ($(CONFIG_BLOCK),y) |
17 | obj-y += buffer.o block_dev.o direct-io.o mpage.o | 17 | obj-y += buffer.o block_dev.o direct-io.o mpage.o |
@@ -192,7 +192,6 @@ static struct file *aio_private_file(struct kioctx *ctx, loff_t nr_pages) | |||
192 | } | 192 | } |
193 | 193 | ||
194 | file->f_flags = O_RDWR; | 194 | file->f_flags = O_RDWR; |
195 | file->private_data = ctx; | ||
196 | return file; | 195 | return file; |
197 | } | 196 | } |
198 | 197 | ||
@@ -202,7 +201,7 @@ static struct dentry *aio_mount(struct file_system_type *fs_type, | |||
202 | static const struct dentry_operations ops = { | 201 | static const struct dentry_operations ops = { |
203 | .d_dname = simple_dname, | 202 | .d_dname = simple_dname, |
204 | }; | 203 | }; |
205 | return mount_pseudo(fs_type, "aio:", NULL, &ops, 0xa10a10a1); | 204 | return mount_pseudo(fs_type, "aio:", NULL, &ops, AIO_RING_MAGIC); |
206 | } | 205 | } |
207 | 206 | ||
208 | /* aio_setup | 207 | /* aio_setup |
@@ -556,8 +555,7 @@ static int ioctx_add_table(struct kioctx *ctx, struct mm_struct *mm) | |||
556 | struct aio_ring *ring; | 555 | struct aio_ring *ring; |
557 | 556 | ||
558 | spin_lock(&mm->ioctx_lock); | 557 | spin_lock(&mm->ioctx_lock); |
559 | rcu_read_lock(); | 558 | table = rcu_dereference_raw(mm->ioctx_table); |
560 | table = rcu_dereference(mm->ioctx_table); | ||
561 | 559 | ||
562 | while (1) { | 560 | while (1) { |
563 | if (table) | 561 | if (table) |
@@ -565,7 +563,6 @@ static int ioctx_add_table(struct kioctx *ctx, struct mm_struct *mm) | |||
565 | if (!table->table[i]) { | 563 | if (!table->table[i]) { |
566 | ctx->id = i; | 564 | ctx->id = i; |
567 | table->table[i] = ctx; | 565 | table->table[i] = ctx; |
568 | rcu_read_unlock(); | ||
569 | spin_unlock(&mm->ioctx_lock); | 566 | spin_unlock(&mm->ioctx_lock); |
570 | 567 | ||
571 | /* While kioctx setup is in progress, | 568 | /* While kioctx setup is in progress, |
@@ -579,8 +576,6 @@ static int ioctx_add_table(struct kioctx *ctx, struct mm_struct *mm) | |||
579 | } | 576 | } |
580 | 577 | ||
581 | new_nr = (table ? table->nr : 1) * 4; | 578 | new_nr = (table ? table->nr : 1) * 4; |
582 | |||
583 | rcu_read_unlock(); | ||
584 | spin_unlock(&mm->ioctx_lock); | 579 | spin_unlock(&mm->ioctx_lock); |
585 | 580 | ||
586 | table = kzalloc(sizeof(*table) + sizeof(struct kioctx *) * | 581 | table = kzalloc(sizeof(*table) + sizeof(struct kioctx *) * |
@@ -591,8 +586,7 @@ static int ioctx_add_table(struct kioctx *ctx, struct mm_struct *mm) | |||
591 | table->nr = new_nr; | 586 | table->nr = new_nr; |
592 | 587 | ||
593 | spin_lock(&mm->ioctx_lock); | 588 | spin_lock(&mm->ioctx_lock); |
594 | rcu_read_lock(); | 589 | old = rcu_dereference_raw(mm->ioctx_table); |
595 | old = rcu_dereference(mm->ioctx_table); | ||
596 | 590 | ||
597 | if (!old) { | 591 | if (!old) { |
598 | rcu_assign_pointer(mm->ioctx_table, table); | 592 | rcu_assign_pointer(mm->ioctx_table, table); |
@@ -739,12 +733,9 @@ static int kill_ioctx(struct mm_struct *mm, struct kioctx *ctx, | |||
739 | 733 | ||
740 | 734 | ||
741 | spin_lock(&mm->ioctx_lock); | 735 | spin_lock(&mm->ioctx_lock); |
742 | rcu_read_lock(); | 736 | table = rcu_dereference_raw(mm->ioctx_table); |
743 | table = rcu_dereference(mm->ioctx_table); | ||
744 | |||
745 | WARN_ON(ctx != table->table[ctx->id]); | 737 | WARN_ON(ctx != table->table[ctx->id]); |
746 | table->table[ctx->id] = NULL; | 738 | table->table[ctx->id] = NULL; |
747 | rcu_read_unlock(); | ||
748 | spin_unlock(&mm->ioctx_lock); | 739 | spin_unlock(&mm->ioctx_lock); |
749 | 740 | ||
750 | /* percpu_ref_kill() will do the necessary call_rcu() */ | 741 | /* percpu_ref_kill() will do the necessary call_rcu() */ |
@@ -793,40 +784,30 @@ EXPORT_SYMBOL(wait_on_sync_kiocb); | |||
793 | */ | 784 | */ |
794 | void exit_aio(struct mm_struct *mm) | 785 | void exit_aio(struct mm_struct *mm) |
795 | { | 786 | { |
796 | struct kioctx_table *table; | 787 | struct kioctx_table *table = rcu_dereference_raw(mm->ioctx_table); |
797 | struct kioctx *ctx; | 788 | int i; |
798 | unsigned i = 0; | ||
799 | |||
800 | while (1) { | ||
801 | rcu_read_lock(); | ||
802 | table = rcu_dereference(mm->ioctx_table); | ||
803 | |||
804 | do { | ||
805 | if (!table || i >= table->nr) { | ||
806 | rcu_read_unlock(); | ||
807 | rcu_assign_pointer(mm->ioctx_table, NULL); | ||
808 | if (table) | ||
809 | kfree(table); | ||
810 | return; | ||
811 | } | ||
812 | 789 | ||
813 | ctx = table->table[i++]; | 790 | if (!table) |
814 | } while (!ctx); | 791 | return; |
815 | 792 | ||
816 | rcu_read_unlock(); | 793 | for (i = 0; i < table->nr; ++i) { |
794 | struct kioctx *ctx = table->table[i]; | ||
817 | 795 | ||
796 | if (!ctx) | ||
797 | continue; | ||
818 | /* | 798 | /* |
819 | * We don't need to bother with munmap() here - | 799 | * We don't need to bother with munmap() here - exit_mmap(mm) |
820 | * exit_mmap(mm) is coming and it'll unmap everything. | 800 | * is coming and it'll unmap everything. And we simply can't, |
821 | * Since aio_free_ring() uses non-zero ->mmap_size | 801 | * this is not necessarily our ->mm. |
822 | * as indicator that it needs to unmap the area, | 802 | * Since kill_ioctx() uses non-zero ->mmap_size as indicator |
823 | * just set it to 0; aio_free_ring() is the only | 803 | * that it needs to unmap the area, just set it to 0. |
824 | * place that uses ->mmap_size, so it's safe. | ||
825 | */ | 804 | */ |
826 | ctx->mmap_size = 0; | 805 | ctx->mmap_size = 0; |
827 | |||
828 | kill_ioctx(mm, ctx, NULL); | 806 | kill_ioctx(mm, ctx, NULL); |
829 | } | 807 | } |
808 | |||
809 | RCU_INIT_POINTER(mm->ioctx_table, NULL); | ||
810 | kfree(table); | ||
830 | } | 811 | } |
831 | 812 | ||
832 | static void put_reqs_available(struct kioctx *ctx, unsigned nr) | 813 | static void put_reqs_available(struct kioctx *ctx, unsigned nr) |
@@ -834,10 +815,8 @@ static void put_reqs_available(struct kioctx *ctx, unsigned nr) | |||
834 | struct kioctx_cpu *kcpu; | 815 | struct kioctx_cpu *kcpu; |
835 | unsigned long flags; | 816 | unsigned long flags; |
836 | 817 | ||
837 | preempt_disable(); | ||
838 | kcpu = this_cpu_ptr(ctx->cpu); | ||
839 | |||
840 | local_irq_save(flags); | 818 | local_irq_save(flags); |
819 | kcpu = this_cpu_ptr(ctx->cpu); | ||
841 | kcpu->reqs_available += nr; | 820 | kcpu->reqs_available += nr; |
842 | 821 | ||
843 | while (kcpu->reqs_available >= ctx->req_batch * 2) { | 822 | while (kcpu->reqs_available >= ctx->req_batch * 2) { |
@@ -846,7 +825,6 @@ static void put_reqs_available(struct kioctx *ctx, unsigned nr) | |||
846 | } | 825 | } |
847 | 826 | ||
848 | local_irq_restore(flags); | 827 | local_irq_restore(flags); |
849 | preempt_enable(); | ||
850 | } | 828 | } |
851 | 829 | ||
852 | static bool get_reqs_available(struct kioctx *ctx) | 830 | static bool get_reqs_available(struct kioctx *ctx) |
@@ -855,10 +833,8 @@ static bool get_reqs_available(struct kioctx *ctx) | |||
855 | bool ret = false; | 833 | bool ret = false; |
856 | unsigned long flags; | 834 | unsigned long flags; |
857 | 835 | ||
858 | preempt_disable(); | ||
859 | kcpu = this_cpu_ptr(ctx->cpu); | ||
860 | |||
861 | local_irq_save(flags); | 836 | local_irq_save(flags); |
837 | kcpu = this_cpu_ptr(ctx->cpu); | ||
862 | if (!kcpu->reqs_available) { | 838 | if (!kcpu->reqs_available) { |
863 | int old, avail = atomic_read(&ctx->reqs_available); | 839 | int old, avail = atomic_read(&ctx->reqs_available); |
864 | 840 | ||
@@ -878,7 +854,6 @@ static bool get_reqs_available(struct kioctx *ctx) | |||
878 | kcpu->reqs_available--; | 854 | kcpu->reqs_available--; |
879 | out: | 855 | out: |
880 | local_irq_restore(flags); | 856 | local_irq_restore(flags); |
881 | preempt_enable(); | ||
882 | return ret; | 857 | return ret; |
883 | } | 858 | } |
884 | 859 | ||
@@ -1047,7 +1022,7 @@ void aio_complete(struct kiocb *iocb, long res, long res2) | |||
1047 | } | 1022 | } |
1048 | EXPORT_SYMBOL(aio_complete); | 1023 | EXPORT_SYMBOL(aio_complete); |
1049 | 1024 | ||
1050 | /* aio_read_events | 1025 | /* aio_read_events_ring |
1051 | * Pull an event off of the ioctx's event ring. Returns the number of | 1026 | * Pull an event off of the ioctx's event ring. Returns the number of |
1052 | * events fetched | 1027 | * events fetched |
1053 | */ | 1028 | */ |
@@ -1270,12 +1245,12 @@ static ssize_t aio_setup_vectored_rw(struct kiocb *kiocb, | |||
1270 | if (compat) | 1245 | if (compat) |
1271 | ret = compat_rw_copy_check_uvector(rw, | 1246 | ret = compat_rw_copy_check_uvector(rw, |
1272 | (struct compat_iovec __user *)buf, | 1247 | (struct compat_iovec __user *)buf, |
1273 | *nr_segs, 1, *iovec, iovec); | 1248 | *nr_segs, UIO_FASTIOV, *iovec, iovec); |
1274 | else | 1249 | else |
1275 | #endif | 1250 | #endif |
1276 | ret = rw_copy_check_uvector(rw, | 1251 | ret = rw_copy_check_uvector(rw, |
1277 | (struct iovec __user *)buf, | 1252 | (struct iovec __user *)buf, |
1278 | *nr_segs, 1, *iovec, iovec); | 1253 | *nr_segs, UIO_FASTIOV, *iovec, iovec); |
1279 | if (ret < 0) | 1254 | if (ret < 0) |
1280 | return ret; | 1255 | return ret; |
1281 | 1256 | ||
@@ -1299,9 +1274,8 @@ static ssize_t aio_setup_single_vector(struct kiocb *kiocb, | |||
1299 | } | 1274 | } |
1300 | 1275 | ||
1301 | /* | 1276 | /* |
1302 | * aio_setup_iocb: | 1277 | * aio_run_iocb: |
1303 | * Performs the initial checks and aio retry method | 1278 | * Performs the initial checks and io submission. |
1304 | * setup for the kiocb at the time of io submission. | ||
1305 | */ | 1279 | */ |
1306 | static ssize_t aio_run_iocb(struct kiocb *req, unsigned opcode, | 1280 | static ssize_t aio_run_iocb(struct kiocb *req, unsigned opcode, |
1307 | char __user *buf, bool compat) | 1281 | char __user *buf, bool compat) |
@@ -1313,7 +1287,7 @@ static ssize_t aio_run_iocb(struct kiocb *req, unsigned opcode, | |||
1313 | fmode_t mode; | 1287 | fmode_t mode; |
1314 | aio_rw_op *rw_op; | 1288 | aio_rw_op *rw_op; |
1315 | rw_iter_op *iter_op; | 1289 | rw_iter_op *iter_op; |
1316 | struct iovec inline_vec, *iovec = &inline_vec; | 1290 | struct iovec inline_vecs[UIO_FASTIOV], *iovec = inline_vecs; |
1317 | struct iov_iter iter; | 1291 | struct iov_iter iter; |
1318 | 1292 | ||
1319 | switch (opcode) { | 1293 | switch (opcode) { |
@@ -1348,7 +1322,7 @@ rw_common: | |||
1348 | if (!ret) | 1322 | if (!ret) |
1349 | ret = rw_verify_area(rw, file, &req->ki_pos, req->ki_nbytes); | 1323 | ret = rw_verify_area(rw, file, &req->ki_pos, req->ki_nbytes); |
1350 | if (ret < 0) { | 1324 | if (ret < 0) { |
1351 | if (iovec != &inline_vec) | 1325 | if (iovec != inline_vecs) |
1352 | kfree(iovec); | 1326 | kfree(iovec); |
1353 | return ret; | 1327 | return ret; |
1354 | } | 1328 | } |
@@ -1395,7 +1369,7 @@ rw_common: | |||
1395 | return -EINVAL; | 1369 | return -EINVAL; |
1396 | } | 1370 | } |
1397 | 1371 | ||
1398 | if (iovec != &inline_vec) | 1372 | if (iovec != inline_vecs) |
1399 | kfree(iovec); | 1373 | kfree(iovec); |
1400 | 1374 | ||
1401 | if (ret != -EIOCBQUEUED) { | 1375 | if (ret != -EIOCBQUEUED) { |
diff --git a/fs/bad_inode.c b/fs/bad_inode.c index 7c93953030fb..afd2b4408adf 100644 --- a/fs/bad_inode.c +++ b/fs/bad_inode.c | |||
@@ -218,8 +218,9 @@ static int bad_inode_mknod (struct inode *dir, struct dentry *dentry, | |||
218 | return -EIO; | 218 | return -EIO; |
219 | } | 219 | } |
220 | 220 | ||
221 | static int bad_inode_rename (struct inode *old_dir, struct dentry *old_dentry, | 221 | static int bad_inode_rename2(struct inode *old_dir, struct dentry *old_dentry, |
222 | struct inode *new_dir, struct dentry *new_dentry) | 222 | struct inode *new_dir, struct dentry *new_dentry, |
223 | unsigned int flags) | ||
223 | { | 224 | { |
224 | return -EIO; | 225 | return -EIO; |
225 | } | 226 | } |
@@ -279,7 +280,7 @@ static const struct inode_operations bad_inode_ops = | |||
279 | .mkdir = bad_inode_mkdir, | 280 | .mkdir = bad_inode_mkdir, |
280 | .rmdir = bad_inode_rmdir, | 281 | .rmdir = bad_inode_rmdir, |
281 | .mknod = bad_inode_mknod, | 282 | .mknod = bad_inode_mknod, |
282 | .rename = bad_inode_rename, | 283 | .rename2 = bad_inode_rename2, |
283 | .readlink = bad_inode_readlink, | 284 | .readlink = bad_inode_readlink, |
284 | /* follow_link must be no-op, otherwise unmounting this inode | 285 | /* follow_link must be no-op, otherwise unmounting this inode |
285 | won't work */ | 286 | won't work */ |
diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index 3668048e16f8..3183742d6f0d 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c | |||
@@ -8476,6 +8476,16 @@ out_notrans: | |||
8476 | return ret; | 8476 | return ret; |
8477 | } | 8477 | } |
8478 | 8478 | ||
8479 | static int btrfs_rename2(struct inode *old_dir, struct dentry *old_dentry, | ||
8480 | struct inode *new_dir, struct dentry *new_dentry, | ||
8481 | unsigned int flags) | ||
8482 | { | ||
8483 | if (flags & ~RENAME_NOREPLACE) | ||
8484 | return -EINVAL; | ||
8485 | |||
8486 | return btrfs_rename(old_dir, old_dentry, new_dir, new_dentry); | ||
8487 | } | ||
8488 | |||
8479 | static void btrfs_run_delalloc_work(struct btrfs_work *work) | 8489 | static void btrfs_run_delalloc_work(struct btrfs_work *work) |
8480 | { | 8490 | { |
8481 | struct btrfs_delalloc_work *delalloc_work; | 8491 | struct btrfs_delalloc_work *delalloc_work; |
@@ -9019,7 +9029,7 @@ static const struct inode_operations btrfs_dir_inode_operations = { | |||
9019 | .link = btrfs_link, | 9029 | .link = btrfs_link, |
9020 | .mkdir = btrfs_mkdir, | 9030 | .mkdir = btrfs_mkdir, |
9021 | .rmdir = btrfs_rmdir, | 9031 | .rmdir = btrfs_rmdir, |
9022 | .rename = btrfs_rename, | 9032 | .rename2 = btrfs_rename2, |
9023 | .symlink = btrfs_symlink, | 9033 | .symlink = btrfs_symlink, |
9024 | .setattr = btrfs_setattr, | 9034 | .setattr = btrfs_setattr, |
9025 | .mknod = btrfs_mknod, | 9035 | .mknod = btrfs_mknod, |
diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c index 8e16bca69c56..67b48b9a03e0 100644 --- a/fs/btrfs/super.c +++ b/fs/btrfs/super.c | |||
@@ -851,7 +851,6 @@ static struct dentry *get_default_root(struct super_block *sb, | |||
851 | struct btrfs_path *path; | 851 | struct btrfs_path *path; |
852 | struct btrfs_key location; | 852 | struct btrfs_key location; |
853 | struct inode *inode; | 853 | struct inode *inode; |
854 | struct dentry *dentry; | ||
855 | u64 dir_id; | 854 | u64 dir_id; |
856 | int new = 0; | 855 | int new = 0; |
857 | 856 | ||
@@ -922,13 +921,7 @@ setup_root: | |||
922 | return dget(sb->s_root); | 921 | return dget(sb->s_root); |
923 | } | 922 | } |
924 | 923 | ||
925 | dentry = d_obtain_alias(inode); | 924 | return d_obtain_root(inode); |
926 | if (!IS_ERR(dentry)) { | ||
927 | spin_lock(&dentry->d_lock); | ||
928 | dentry->d_flags &= ~DCACHE_DISCONNECTED; | ||
929 | spin_unlock(&dentry->d_lock); | ||
930 | } | ||
931 | return dentry; | ||
932 | } | 925 | } |
933 | 926 | ||
934 | static int btrfs_fill_super(struct super_block *sb, | 927 | static int btrfs_fill_super(struct super_block *sb, |
diff --git a/fs/ceph/acl.c b/fs/ceph/acl.c index 469f2e8657e8..cebf2ebefb55 100644 --- a/fs/ceph/acl.c +++ b/fs/ceph/acl.c | |||
@@ -172,14 +172,24 @@ out: | |||
172 | int ceph_init_acl(struct dentry *dentry, struct inode *inode, struct inode *dir) | 172 | int ceph_init_acl(struct dentry *dentry, struct inode *inode, struct inode *dir) |
173 | { | 173 | { |
174 | struct posix_acl *default_acl, *acl; | 174 | struct posix_acl *default_acl, *acl; |
175 | umode_t new_mode = inode->i_mode; | ||
175 | int error; | 176 | int error; |
176 | 177 | ||
177 | error = posix_acl_create(dir, &inode->i_mode, &default_acl, &acl); | 178 | error = posix_acl_create(dir, &new_mode, &default_acl, &acl); |
178 | if (error) | 179 | if (error) |
179 | return error; | 180 | return error; |
180 | 181 | ||
181 | if (!default_acl && !acl) | 182 | if (!default_acl && !acl) { |
182 | cache_no_acl(inode); | 183 | cache_no_acl(inode); |
184 | if (new_mode != inode->i_mode) { | ||
185 | struct iattr newattrs = { | ||
186 | .ia_mode = new_mode, | ||
187 | .ia_valid = ATTR_MODE, | ||
188 | }; | ||
189 | error = ceph_setattr(dentry, &newattrs); | ||
190 | } | ||
191 | return error; | ||
192 | } | ||
183 | 193 | ||
184 | if (default_acl) { | 194 | if (default_acl) { |
185 | error = ceph_set_acl(inode, default_acl, ACL_TYPE_DEFAULT); | 195 | error = ceph_set_acl(inode, default_acl, ACL_TYPE_DEFAULT); |
diff --git a/fs/ceph/caps.c b/fs/ceph/caps.c index 1fde164b74b5..6d1cd45dca89 100644 --- a/fs/ceph/caps.c +++ b/fs/ceph/caps.c | |||
@@ -3277,7 +3277,7 @@ int ceph_encode_inode_release(void **p, struct inode *inode, | |||
3277 | rel->ino = cpu_to_le64(ceph_ino(inode)); | 3277 | rel->ino = cpu_to_le64(ceph_ino(inode)); |
3278 | rel->cap_id = cpu_to_le64(cap->cap_id); | 3278 | rel->cap_id = cpu_to_le64(cap->cap_id); |
3279 | rel->seq = cpu_to_le32(cap->seq); | 3279 | rel->seq = cpu_to_le32(cap->seq); |
3280 | rel->issue_seq = cpu_to_le32(cap->issue_seq), | 3280 | rel->issue_seq = cpu_to_le32(cap->issue_seq); |
3281 | rel->mseq = cpu_to_le32(cap->mseq); | 3281 | rel->mseq = cpu_to_le32(cap->mseq); |
3282 | rel->caps = cpu_to_le32(cap->implemented); | 3282 | rel->caps = cpu_to_le32(cap->implemented); |
3283 | rel->wanted = cpu_to_le32(cap->mds_wanted); | 3283 | rel->wanted = cpu_to_le32(cap->mds_wanted); |
diff --git a/fs/ceph/file.c b/fs/ceph/file.c index 302085100c28..2eb02f80a0ab 100644 --- a/fs/ceph/file.c +++ b/fs/ceph/file.c | |||
@@ -423,6 +423,9 @@ static ssize_t ceph_sync_read(struct kiocb *iocb, struct iov_iter *i, | |||
423 | dout("sync_read on file %p %llu~%u %s\n", file, off, | 423 | dout("sync_read on file %p %llu~%u %s\n", file, off, |
424 | (unsigned)len, | 424 | (unsigned)len, |
425 | (file->f_flags & O_DIRECT) ? "O_DIRECT" : ""); | 425 | (file->f_flags & O_DIRECT) ? "O_DIRECT" : ""); |
426 | |||
427 | if (!len) | ||
428 | return 0; | ||
426 | /* | 429 | /* |
427 | * flush any page cache pages in this range. this | 430 | * flush any page cache pages in this range. this |
428 | * will make concurrent normal and sync io slow, | 431 | * will make concurrent normal and sync io slow, |
@@ -470,8 +473,11 @@ static ssize_t ceph_sync_read(struct kiocb *iocb, struct iov_iter *i, | |||
470 | size_t left = ret; | 473 | size_t left = ret; |
471 | 474 | ||
472 | while (left) { | 475 | while (left) { |
473 | int copy = min_t(size_t, PAGE_SIZE, left); | 476 | size_t page_off = off & ~PAGE_MASK; |
474 | l = copy_page_to_iter(pages[k++], 0, copy, i); | 477 | size_t copy = min_t(size_t, |
478 | PAGE_SIZE - page_off, left); | ||
479 | l = copy_page_to_iter(pages[k++], page_off, | ||
480 | copy, i); | ||
475 | off += l; | 481 | off += l; |
476 | left -= l; | 482 | left -= l; |
477 | if (l < copy) | 483 | if (l < copy) |
@@ -531,7 +537,7 @@ static void ceph_sync_write_unsafe(struct ceph_osd_request *req, bool unsafe) | |||
531 | * objects, rollback on failure, etc.) | 537 | * objects, rollback on failure, etc.) |
532 | */ | 538 | */ |
533 | static ssize_t | 539 | static ssize_t |
534 | ceph_sync_direct_write(struct kiocb *iocb, struct iov_iter *from) | 540 | ceph_sync_direct_write(struct kiocb *iocb, struct iov_iter *from, loff_t pos) |
535 | { | 541 | { |
536 | struct file *file = iocb->ki_filp; | 542 | struct file *file = iocb->ki_filp; |
537 | struct inode *inode = file_inode(file); | 543 | struct inode *inode = file_inode(file); |
@@ -547,7 +553,6 @@ ceph_sync_direct_write(struct kiocb *iocb, struct iov_iter *from) | |||
547 | int check_caps = 0; | 553 | int check_caps = 0; |
548 | int ret; | 554 | int ret; |
549 | struct timespec mtime = CURRENT_TIME; | 555 | struct timespec mtime = CURRENT_TIME; |
550 | loff_t pos = iocb->ki_pos; | ||
551 | size_t count = iov_iter_count(from); | 556 | size_t count = iov_iter_count(from); |
552 | 557 | ||
553 | if (ceph_snap(file_inode(file)) != CEPH_NOSNAP) | 558 | if (ceph_snap(file_inode(file)) != CEPH_NOSNAP) |
@@ -646,7 +651,8 @@ ceph_sync_direct_write(struct kiocb *iocb, struct iov_iter *from) | |||
646 | * correct atomic write, we should e.g. take write locks on all | 651 | * correct atomic write, we should e.g. take write locks on all |
647 | * objects, rollback on failure, etc.) | 652 | * objects, rollback on failure, etc.) |
648 | */ | 653 | */ |
649 | static ssize_t ceph_sync_write(struct kiocb *iocb, struct iov_iter *from) | 654 | static ssize_t |
655 | ceph_sync_write(struct kiocb *iocb, struct iov_iter *from, loff_t pos) | ||
650 | { | 656 | { |
651 | struct file *file = iocb->ki_filp; | 657 | struct file *file = iocb->ki_filp; |
652 | struct inode *inode = file_inode(file); | 658 | struct inode *inode = file_inode(file); |
@@ -663,7 +669,6 @@ static ssize_t ceph_sync_write(struct kiocb *iocb, struct iov_iter *from) | |||
663 | int check_caps = 0; | 669 | int check_caps = 0; |
664 | int ret; | 670 | int ret; |
665 | struct timespec mtime = CURRENT_TIME; | 671 | struct timespec mtime = CURRENT_TIME; |
666 | loff_t pos = iocb->ki_pos; | ||
667 | size_t count = iov_iter_count(from); | 672 | size_t count = iov_iter_count(from); |
668 | 673 | ||
669 | if (ceph_snap(file_inode(file)) != CEPH_NOSNAP) | 674 | if (ceph_snap(file_inode(file)) != CEPH_NOSNAP) |
@@ -918,9 +923,9 @@ retry_snap: | |||
918 | /* we might need to revert back to that point */ | 923 | /* we might need to revert back to that point */ |
919 | data = *from; | 924 | data = *from; |
920 | if (file->f_flags & O_DIRECT) | 925 | if (file->f_flags & O_DIRECT) |
921 | written = ceph_sync_direct_write(iocb, &data); | 926 | written = ceph_sync_direct_write(iocb, &data, pos); |
922 | else | 927 | else |
923 | written = ceph_sync_write(iocb, &data); | 928 | written = ceph_sync_write(iocb, &data, pos); |
924 | if (written == -EOLDSNAPC) { | 929 | if (written == -EOLDSNAPC) { |
925 | dout("aio_write %p %llx.%llx %llu~%u" | 930 | dout("aio_write %p %llx.%llx %llu~%u" |
926 | "got EOLDSNAPC, retrying\n", | 931 | "got EOLDSNAPC, retrying\n", |
@@ -1177,6 +1182,9 @@ static long ceph_fallocate(struct file *file, int mode, | |||
1177 | loff_t endoff = 0; | 1182 | loff_t endoff = 0; |
1178 | loff_t size; | 1183 | loff_t size; |
1179 | 1184 | ||
1185 | if (mode & ~(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE)) | ||
1186 | return -EOPNOTSUPP; | ||
1187 | |||
1180 | if (!S_ISREG(inode->i_mode)) | 1188 | if (!S_ISREG(inode->i_mode)) |
1181 | return -EOPNOTSUPP; | 1189 | return -EOPNOTSUPP; |
1182 | 1190 | ||
diff --git a/fs/ceph/mds_client.c b/fs/ceph/mds_client.c index 92a2548278fc..bad07c09f91e 100644 --- a/fs/ceph/mds_client.c +++ b/fs/ceph/mds_client.c | |||
@@ -1904,6 +1904,7 @@ static int __prepare_send_request(struct ceph_mds_client *mdsc, | |||
1904 | req->r_tid, ceph_mds_op_name(req->r_op), req->r_attempts); | 1904 | req->r_tid, ceph_mds_op_name(req->r_op), req->r_attempts); |
1905 | 1905 | ||
1906 | if (req->r_got_unsafe) { | 1906 | if (req->r_got_unsafe) { |
1907 | void *p; | ||
1907 | /* | 1908 | /* |
1908 | * Replay. Do not regenerate message (and rebuild | 1909 | * Replay. Do not regenerate message (and rebuild |
1909 | * paths, etc.); just use the original message. | 1910 | * paths, etc.); just use the original message. |
@@ -1924,8 +1925,13 @@ static int __prepare_send_request(struct ceph_mds_client *mdsc, | |||
1924 | 1925 | ||
1925 | /* remove cap/dentry releases from message */ | 1926 | /* remove cap/dentry releases from message */ |
1926 | rhead->num_releases = 0; | 1927 | rhead->num_releases = 0; |
1927 | msg->hdr.front_len = cpu_to_le32(req->r_request_release_offset); | 1928 | |
1928 | msg->front.iov_len = req->r_request_release_offset; | 1929 | /* time stamp */ |
1930 | p = msg->front.iov_base + req->r_request_release_offset; | ||
1931 | ceph_encode_copy(&p, &req->r_stamp, sizeof(req->r_stamp)); | ||
1932 | |||
1933 | msg->front.iov_len = p - msg->front.iov_base; | ||
1934 | msg->hdr.front_len = cpu_to_le32(msg->front.iov_len); | ||
1929 | return 0; | 1935 | return 0; |
1930 | } | 1936 | } |
1931 | 1937 | ||
@@ -2061,11 +2067,12 @@ static void __wake_requests(struct ceph_mds_client *mdsc, | |||
2061 | static void kick_requests(struct ceph_mds_client *mdsc, int mds) | 2067 | static void kick_requests(struct ceph_mds_client *mdsc, int mds) |
2062 | { | 2068 | { |
2063 | struct ceph_mds_request *req; | 2069 | struct ceph_mds_request *req; |
2064 | struct rb_node *p; | 2070 | struct rb_node *p = rb_first(&mdsc->request_tree); |
2065 | 2071 | ||
2066 | dout("kick_requests mds%d\n", mds); | 2072 | dout("kick_requests mds%d\n", mds); |
2067 | for (p = rb_first(&mdsc->request_tree); p; p = rb_next(p)) { | 2073 | while (p) { |
2068 | req = rb_entry(p, struct ceph_mds_request, r_node); | 2074 | req = rb_entry(p, struct ceph_mds_request, r_node); |
2075 | p = rb_next(p); | ||
2069 | if (req->r_got_unsafe) | 2076 | if (req->r_got_unsafe) |
2070 | continue; | 2077 | continue; |
2071 | if (req->r_session && | 2078 | if (req->r_session && |
@@ -2248,6 +2255,7 @@ static void handle_reply(struct ceph_mds_session *session, struct ceph_msg *msg) | |||
2248 | */ | 2255 | */ |
2249 | if (result == -ESTALE) { | 2256 | if (result == -ESTALE) { |
2250 | dout("got ESTALE on request %llu", req->r_tid); | 2257 | dout("got ESTALE on request %llu", req->r_tid); |
2258 | req->r_resend_mds = -1; | ||
2251 | if (req->r_direct_mode != USE_AUTH_MDS) { | 2259 | if (req->r_direct_mode != USE_AUTH_MDS) { |
2252 | dout("not using auth, setting for that now"); | 2260 | dout("not using auth, setting for that now"); |
2253 | req->r_direct_mode = USE_AUTH_MDS; | 2261 | req->r_direct_mode = USE_AUTH_MDS; |
diff --git a/fs/ceph/super.c b/fs/ceph/super.c index 06150fd745ac..f6e12377335c 100644 --- a/fs/ceph/super.c +++ b/fs/ceph/super.c | |||
@@ -755,7 +755,7 @@ static struct dentry *open_root_dentry(struct ceph_fs_client *fsc, | |||
755 | goto out; | 755 | goto out; |
756 | } | 756 | } |
757 | } else { | 757 | } else { |
758 | root = d_obtain_alias(inode); | 758 | root = d_obtain_root(inode); |
759 | } | 759 | } |
760 | ceph_init_dentry(root); | 760 | ceph_init_dentry(root); |
761 | dout("open_root_inode success, root dentry is %p\n", root); | 761 | dout("open_root_inode success, root dentry is %p\n", root); |
diff --git a/fs/ceph/xattr.c b/fs/ceph/xattr.c index c9c2b887381e..12f58d22e017 100644 --- a/fs/ceph/xattr.c +++ b/fs/ceph/xattr.c | |||
@@ -592,12 +592,12 @@ start: | |||
592 | xattr_version = ci->i_xattrs.version; | 592 | xattr_version = ci->i_xattrs.version; |
593 | spin_unlock(&ci->i_ceph_lock); | 593 | spin_unlock(&ci->i_ceph_lock); |
594 | 594 | ||
595 | xattrs = kcalloc(numattr, sizeof(struct ceph_xattr *), | 595 | xattrs = kcalloc(numattr, sizeof(struct ceph_inode_xattr *), |
596 | GFP_NOFS); | 596 | GFP_NOFS); |
597 | err = -ENOMEM; | 597 | err = -ENOMEM; |
598 | if (!xattrs) | 598 | if (!xattrs) |
599 | goto bad_lock; | 599 | goto bad_lock; |
600 | memset(xattrs, 0, numattr*sizeof(struct ceph_xattr *)); | 600 | |
601 | for (i = 0; i < numattr; i++) { | 601 | for (i = 0; i < numattr; i++) { |
602 | xattrs[i] = kmalloc(sizeof(struct ceph_inode_xattr), | 602 | xattrs[i] = kmalloc(sizeof(struct ceph_inode_xattr), |
603 | GFP_NOFS); | 603 | GFP_NOFS); |
diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c index 888398067420..ac4f260155c8 100644 --- a/fs/cifs/cifsfs.c +++ b/fs/cifs/cifsfs.c | |||
@@ -848,7 +848,7 @@ const struct inode_operations cifs_dir_inode_ops = { | |||
848 | .link = cifs_hardlink, | 848 | .link = cifs_hardlink, |
849 | .mkdir = cifs_mkdir, | 849 | .mkdir = cifs_mkdir, |
850 | .rmdir = cifs_rmdir, | 850 | .rmdir = cifs_rmdir, |
851 | .rename = cifs_rename, | 851 | .rename2 = cifs_rename2, |
852 | .permission = cifs_permission, | 852 | .permission = cifs_permission, |
853 | /* revalidate:cifs_revalidate, */ | 853 | /* revalidate:cifs_revalidate, */ |
854 | .setattr = cifs_setattr, | 854 | .setattr = cifs_setattr, |
diff --git a/fs/cifs/cifsfs.h b/fs/cifs/cifsfs.h index 560480263336..b0fafa499505 100644 --- a/fs/cifs/cifsfs.h +++ b/fs/cifs/cifsfs.h | |||
@@ -68,8 +68,8 @@ extern int cifs_hardlink(struct dentry *, struct inode *, struct dentry *); | |||
68 | extern int cifs_mknod(struct inode *, struct dentry *, umode_t, dev_t); | 68 | extern int cifs_mknod(struct inode *, struct dentry *, umode_t, dev_t); |
69 | extern int cifs_mkdir(struct inode *, struct dentry *, umode_t); | 69 | extern int cifs_mkdir(struct inode *, struct dentry *, umode_t); |
70 | extern int cifs_rmdir(struct inode *, struct dentry *); | 70 | extern int cifs_rmdir(struct inode *, struct dentry *); |
71 | extern int cifs_rename(struct inode *, struct dentry *, struct inode *, | 71 | extern int cifs_rename2(struct inode *, struct dentry *, struct inode *, |
72 | struct dentry *); | 72 | struct dentry *, unsigned int); |
73 | extern int cifs_revalidate_file_attr(struct file *filp); | 73 | extern int cifs_revalidate_file_attr(struct file *filp); |
74 | extern int cifs_revalidate_dentry_attr(struct dentry *); | 74 | extern int cifs_revalidate_dentry_attr(struct dentry *); |
75 | extern int cifs_revalidate_file(struct file *filp); | 75 | extern int cifs_revalidate_file(struct file *filp); |
diff --git a/fs/cifs/inode.c b/fs/cifs/inode.c index 41de3935caa0..426d6c6ad8bf 100644 --- a/fs/cifs/inode.c +++ b/fs/cifs/inode.c | |||
@@ -1627,8 +1627,9 @@ do_rename_exit: | |||
1627 | } | 1627 | } |
1628 | 1628 | ||
1629 | int | 1629 | int |
1630 | cifs_rename(struct inode *source_dir, struct dentry *source_dentry, | 1630 | cifs_rename2(struct inode *source_dir, struct dentry *source_dentry, |
1631 | struct inode *target_dir, struct dentry *target_dentry) | 1631 | struct inode *target_dir, struct dentry *target_dentry, |
1632 | unsigned int flags) | ||
1632 | { | 1633 | { |
1633 | char *from_name = NULL; | 1634 | char *from_name = NULL; |
1634 | char *to_name = NULL; | 1635 | char *to_name = NULL; |
@@ -1640,6 +1641,9 @@ cifs_rename(struct inode *source_dir, struct dentry *source_dentry, | |||
1640 | unsigned int xid; | 1641 | unsigned int xid; |
1641 | int rc, tmprc; | 1642 | int rc, tmprc; |
1642 | 1643 | ||
1644 | if (flags & ~RENAME_NOREPLACE) | ||
1645 | return -EINVAL; | ||
1646 | |||
1643 | cifs_sb = CIFS_SB(source_dir->i_sb); | 1647 | cifs_sb = CIFS_SB(source_dir->i_sb); |
1644 | tlink = cifs_sb_tlink(cifs_sb); | 1648 | tlink = cifs_sb_tlink(cifs_sb); |
1645 | if (IS_ERR(tlink)) | 1649 | if (IS_ERR(tlink)) |
@@ -1667,6 +1671,12 @@ cifs_rename(struct inode *source_dir, struct dentry *source_dentry, | |||
1667 | rc = cifs_do_rename(xid, source_dentry, from_name, target_dentry, | 1671 | rc = cifs_do_rename(xid, source_dentry, from_name, target_dentry, |
1668 | to_name); | 1672 | to_name); |
1669 | 1673 | ||
1674 | /* | ||
1675 | * No-replace is the natural behavior for CIFS, so skip unlink hacks. | ||
1676 | */ | ||
1677 | if (flags & RENAME_NOREPLACE) | ||
1678 | goto cifs_rename_exit; | ||
1679 | |||
1670 | if (rc == -EEXIST && tcon->unix_ext) { | 1680 | if (rc == -EEXIST && tcon->unix_ext) { |
1671 | /* | 1681 | /* |
1672 | * Are src and dst hardlinks of same inode? We can only tell | 1682 | * Are src and dst hardlinks of same inode? We can only tell |
diff --git a/fs/dcache.c b/fs/dcache.c index 06f65857a855..d30ce699ae4b 100644 --- a/fs/dcache.c +++ b/fs/dcache.c | |||
@@ -731,8 +731,6 @@ EXPORT_SYMBOL(dget_parent); | |||
731 | /** | 731 | /** |
732 | * d_find_alias - grab a hashed alias of inode | 732 | * d_find_alias - grab a hashed alias of inode |
733 | * @inode: inode in question | 733 | * @inode: inode in question |
734 | * @want_discon: flag, used by d_splice_alias, to request | ||
735 | * that only a DISCONNECTED alias be returned. | ||
736 | * | 734 | * |
737 | * If inode has a hashed alias, or is a directory and has any alias, | 735 | * If inode has a hashed alias, or is a directory and has any alias, |
738 | * acquire the reference to alias and return it. Otherwise return NULL. | 736 | * acquire the reference to alias and return it. Otherwise return NULL. |
@@ -741,10 +739,9 @@ EXPORT_SYMBOL(dget_parent); | |||
741 | * of a filesystem. | 739 | * of a filesystem. |
742 | * | 740 | * |
743 | * If the inode has an IS_ROOT, DCACHE_DISCONNECTED alias, then prefer | 741 | * If the inode has an IS_ROOT, DCACHE_DISCONNECTED alias, then prefer |
744 | * any other hashed alias over that one unless @want_discon is set, | 742 | * any other hashed alias over that one. |
745 | * in which case only return an IS_ROOT, DCACHE_DISCONNECTED alias. | ||
746 | */ | 743 | */ |
747 | static struct dentry *__d_find_alias(struct inode *inode, int want_discon) | 744 | static struct dentry *__d_find_alias(struct inode *inode) |
748 | { | 745 | { |
749 | struct dentry *alias, *discon_alias; | 746 | struct dentry *alias, *discon_alias; |
750 | 747 | ||
@@ -756,7 +753,7 @@ again: | |||
756 | if (IS_ROOT(alias) && | 753 | if (IS_ROOT(alias) && |
757 | (alias->d_flags & DCACHE_DISCONNECTED)) { | 754 | (alias->d_flags & DCACHE_DISCONNECTED)) { |
758 | discon_alias = alias; | 755 | discon_alias = alias; |
759 | } else if (!want_discon) { | 756 | } else { |
760 | __dget_dlock(alias); | 757 | __dget_dlock(alias); |
761 | spin_unlock(&alias->d_lock); | 758 | spin_unlock(&alias->d_lock); |
762 | return alias; | 759 | return alias; |
@@ -768,12 +765,9 @@ again: | |||
768 | alias = discon_alias; | 765 | alias = discon_alias; |
769 | spin_lock(&alias->d_lock); | 766 | spin_lock(&alias->d_lock); |
770 | if (S_ISDIR(inode->i_mode) || !d_unhashed(alias)) { | 767 | if (S_ISDIR(inode->i_mode) || !d_unhashed(alias)) { |
771 | if (IS_ROOT(alias) && | 768 | __dget_dlock(alias); |
772 | (alias->d_flags & DCACHE_DISCONNECTED)) { | 769 | spin_unlock(&alias->d_lock); |
773 | __dget_dlock(alias); | 770 | return alias; |
774 | spin_unlock(&alias->d_lock); | ||
775 | return alias; | ||
776 | } | ||
777 | } | 771 | } |
778 | spin_unlock(&alias->d_lock); | 772 | spin_unlock(&alias->d_lock); |
779 | goto again; | 773 | goto again; |
@@ -787,7 +781,7 @@ struct dentry *d_find_alias(struct inode *inode) | |||
787 | 781 | ||
788 | if (!hlist_empty(&inode->i_dentry)) { | 782 | if (!hlist_empty(&inode->i_dentry)) { |
789 | spin_lock(&inode->i_lock); | 783 | spin_lock(&inode->i_lock); |
790 | de = __d_find_alias(inode, 0); | 784 | de = __d_find_alias(inode); |
791 | spin_unlock(&inode->i_lock); | 785 | spin_unlock(&inode->i_lock); |
792 | } | 786 | } |
793 | return de; | 787 | return de; |
@@ -1781,25 +1775,7 @@ struct dentry *d_find_any_alias(struct inode *inode) | |||
1781 | } | 1775 | } |
1782 | EXPORT_SYMBOL(d_find_any_alias); | 1776 | EXPORT_SYMBOL(d_find_any_alias); |
1783 | 1777 | ||
1784 | /** | 1778 | static struct dentry *__d_obtain_alias(struct inode *inode, int disconnected) |
1785 | * d_obtain_alias - find or allocate a dentry for a given inode | ||
1786 | * @inode: inode to allocate the dentry for | ||
1787 | * | ||
1788 | * Obtain a dentry for an inode resulting from NFS filehandle conversion or | ||
1789 | * similar open by handle operations. The returned dentry may be anonymous, | ||
1790 | * or may have a full name (if the inode was already in the cache). | ||
1791 | * | ||
1792 | * When called on a directory inode, we must ensure that the inode only ever | ||
1793 | * has one dentry. If a dentry is found, that is returned instead of | ||
1794 | * allocating a new one. | ||
1795 | * | ||
1796 | * On successful return, the reference to the inode has been transferred | ||
1797 | * to the dentry. In case of an error the reference on the inode is released. | ||
1798 | * To make it easier to use in export operations a %NULL or IS_ERR inode may | ||
1799 | * be passed in and will be the error will be propagate to the return value, | ||
1800 | * with a %NULL @inode replaced by ERR_PTR(-ESTALE). | ||
1801 | */ | ||
1802 | struct dentry *d_obtain_alias(struct inode *inode) | ||
1803 | { | 1779 | { |
1804 | static const struct qstr anonstring = QSTR_INIT("/", 1); | 1780 | static const struct qstr anonstring = QSTR_INIT("/", 1); |
1805 | struct dentry *tmp; | 1781 | struct dentry *tmp; |
@@ -1830,7 +1806,10 @@ struct dentry *d_obtain_alias(struct inode *inode) | |||
1830 | } | 1806 | } |
1831 | 1807 | ||
1832 | /* attach a disconnected dentry */ | 1808 | /* attach a disconnected dentry */ |
1833 | add_flags = d_flags_for_inode(inode) | DCACHE_DISCONNECTED; | 1809 | add_flags = d_flags_for_inode(inode); |
1810 | |||
1811 | if (disconnected) | ||
1812 | add_flags |= DCACHE_DISCONNECTED; | ||
1834 | 1813 | ||
1835 | spin_lock(&tmp->d_lock); | 1814 | spin_lock(&tmp->d_lock); |
1836 | tmp->d_inode = inode; | 1815 | tmp->d_inode = inode; |
@@ -1851,59 +1830,51 @@ struct dentry *d_obtain_alias(struct inode *inode) | |||
1851 | iput(inode); | 1830 | iput(inode); |
1852 | return res; | 1831 | return res; |
1853 | } | 1832 | } |
1854 | EXPORT_SYMBOL(d_obtain_alias); | ||
1855 | 1833 | ||
1856 | /** | 1834 | /** |
1857 | * d_splice_alias - splice a disconnected dentry into the tree if one exists | 1835 | * d_obtain_alias - find or allocate a DISCONNECTED dentry for a given inode |
1858 | * @inode: the inode which may have a disconnected dentry | 1836 | * @inode: inode to allocate the dentry for |
1859 | * @dentry: a negative dentry which we want to point to the inode. | ||
1860 | * | ||
1861 | * If inode is a directory and has a 'disconnected' dentry (i.e. IS_ROOT and | ||
1862 | * DCACHE_DISCONNECTED), then d_move that in place of the given dentry | ||
1863 | * and return it, else simply d_add the inode to the dentry and return NULL. | ||
1864 | * | 1837 | * |
1865 | * This is needed in the lookup routine of any filesystem that is exportable | 1838 | * Obtain a dentry for an inode resulting from NFS filehandle conversion or |
1866 | * (via knfsd) so that we can build dcache paths to directories effectively. | 1839 | * similar open by handle operations. The returned dentry may be anonymous, |
1840 | * or may have a full name (if the inode was already in the cache). | ||
1867 | * | 1841 | * |
1868 | * If a dentry was found and moved, then it is returned. Otherwise NULL | 1842 | * When called on a directory inode, we must ensure that the inode only ever |
1869 | * is returned. This matches the expected return value of ->lookup. | 1843 | * has one dentry. If a dentry is found, that is returned instead of |
1844 | * allocating a new one. | ||
1870 | * | 1845 | * |
1871 | * Cluster filesystems may call this function with a negative, hashed dentry. | 1846 | * On successful return, the reference to the inode has been transferred |
1872 | * In that case, we know that the inode will be a regular file, and also this | 1847 | * to the dentry. In case of an error the reference on the inode is released. |
1873 | * will only occur during atomic_open. So we need to check for the dentry | 1848 | * To make it easier to use in export operations a %NULL or IS_ERR inode may |
1874 | * being already hashed only in the final case. | 1849 | * be passed in and the error will be propagated to the return value, |
1850 | * with a %NULL @inode replaced by ERR_PTR(-ESTALE). | ||
1875 | */ | 1851 | */ |
1876 | struct dentry *d_splice_alias(struct inode *inode, struct dentry *dentry) | 1852 | struct dentry *d_obtain_alias(struct inode *inode) |
1877 | { | 1853 | { |
1878 | struct dentry *new = NULL; | 1854 | return __d_obtain_alias(inode, 1); |
1879 | 1855 | } | |
1880 | if (IS_ERR(inode)) | 1856 | EXPORT_SYMBOL(d_obtain_alias); |
1881 | return ERR_CAST(inode); | ||
1882 | 1857 | ||
1883 | if (inode && S_ISDIR(inode->i_mode)) { | 1858 | /** |
1884 | spin_lock(&inode->i_lock); | 1859 | * d_obtain_root - find or allocate a dentry for a given inode |
1885 | new = __d_find_alias(inode, 1); | 1860 | * @inode: inode to allocate the dentry for |
1886 | if (new) { | 1861 | * |
1887 | BUG_ON(!(new->d_flags & DCACHE_DISCONNECTED)); | 1862 | * Obtain an IS_ROOT dentry for the root of a filesystem. |
1888 | spin_unlock(&inode->i_lock); | 1863 | * |
1889 | security_d_instantiate(new, inode); | 1864 | * We must ensure that directory inodes only ever have one dentry. If a |
1890 | d_move(new, dentry); | 1865 | * dentry is found, that is returned instead of allocating a new one. |
1891 | iput(inode); | 1866 | * |
1892 | } else { | 1867 | * On successful return, the reference to the inode has been transferred |
1893 | /* already taking inode->i_lock, so d_add() by hand */ | 1868 | * to the dentry. In case of an error the reference on the inode is |
1894 | __d_instantiate(dentry, inode); | 1869 | * released. A %NULL or IS_ERR inode may be passed in and will be the |
1895 | spin_unlock(&inode->i_lock); | 1870 | * error will be propagate to the return value, with a %NULL @inode |
1896 | security_d_instantiate(dentry, inode); | 1871 | * replaced by ERR_PTR(-ESTALE). |
1897 | d_rehash(dentry); | 1872 | */ |
1898 | } | 1873 | struct dentry *d_obtain_root(struct inode *inode) |
1899 | } else { | 1874 | { |
1900 | d_instantiate(dentry, inode); | 1875 | return __d_obtain_alias(inode, 0); |
1901 | if (d_unhashed(dentry)) | ||
1902 | d_rehash(dentry); | ||
1903 | } | ||
1904 | return new; | ||
1905 | } | 1876 | } |
1906 | EXPORT_SYMBOL(d_splice_alias); | 1877 | EXPORT_SYMBOL(d_obtain_root); |
1907 | 1878 | ||
1908 | /** | 1879 | /** |
1909 | * d_add_ci - lookup or allocate new dentry with case-exact name | 1880 | * d_add_ci - lookup or allocate new dentry with case-exact name |
@@ -2697,6 +2668,75 @@ static void __d_materialise_dentry(struct dentry *dentry, struct dentry *anon) | |||
2697 | } | 2668 | } |
2698 | 2669 | ||
2699 | /** | 2670 | /** |
2671 | * d_splice_alias - splice a disconnected dentry into the tree if one exists | ||
2672 | * @inode: the inode which may have a disconnected dentry | ||
2673 | * @dentry: a negative dentry which we want to point to the inode. | ||
2674 | * | ||
2675 | * If inode is a directory and has an IS_ROOT alias, then d_move that in | ||
2676 | * place of the given dentry and return it, else simply d_add the inode | ||
2677 | * to the dentry and return NULL. | ||
2678 | * | ||
2679 | * If a non-IS_ROOT directory is found, the filesystem is corrupt, and | ||
2680 | * we should error out: directories can't have multiple aliases. | ||
2681 | * | ||
2682 | * This is needed in the lookup routine of any filesystem that is exportable | ||
2683 | * (via knfsd) so that we can build dcache paths to directories effectively. | ||
2684 | * | ||
2685 | * If a dentry was found and moved, then it is returned. Otherwise NULL | ||
2686 | * is returned. This matches the expected return value of ->lookup. | ||
2687 | * | ||
2688 | * Cluster filesystems may call this function with a negative, hashed dentry. | ||
2689 | * In that case, we know that the inode will be a regular file, and also this | ||
2690 | * will only occur during atomic_open. So we need to check for the dentry | ||
2691 | * being already hashed only in the final case. | ||
2692 | */ | ||
2693 | struct dentry *d_splice_alias(struct inode *inode, struct dentry *dentry) | ||
2694 | { | ||
2695 | struct dentry *new = NULL; | ||
2696 | |||
2697 | if (IS_ERR(inode)) | ||
2698 | return ERR_CAST(inode); | ||
2699 | |||
2700 | if (inode && S_ISDIR(inode->i_mode)) { | ||
2701 | spin_lock(&inode->i_lock); | ||
2702 | new = __d_find_any_alias(inode); | ||
2703 | if (new) { | ||
2704 | if (!IS_ROOT(new)) { | ||
2705 | spin_unlock(&inode->i_lock); | ||
2706 | dput(new); | ||
2707 | return ERR_PTR(-EIO); | ||
2708 | } | ||
2709 | if (d_ancestor(new, dentry)) { | ||
2710 | spin_unlock(&inode->i_lock); | ||
2711 | dput(new); | ||
2712 | return ERR_PTR(-EIO); | ||
2713 | } | ||
2714 | write_seqlock(&rename_lock); | ||
2715 | __d_materialise_dentry(dentry, new); | ||
2716 | write_sequnlock(&rename_lock); | ||
2717 | __d_drop(new); | ||
2718 | _d_rehash(new); | ||
2719 | spin_unlock(&new->d_lock); | ||
2720 | spin_unlock(&inode->i_lock); | ||
2721 | security_d_instantiate(new, inode); | ||
2722 | iput(inode); | ||
2723 | } else { | ||
2724 | /* already taking inode->i_lock, so d_add() by hand */ | ||
2725 | __d_instantiate(dentry, inode); | ||
2726 | spin_unlock(&inode->i_lock); | ||
2727 | security_d_instantiate(dentry, inode); | ||
2728 | d_rehash(dentry); | ||
2729 | } | ||
2730 | } else { | ||
2731 | d_instantiate(dentry, inode); | ||
2732 | if (d_unhashed(dentry)) | ||
2733 | d_rehash(dentry); | ||
2734 | } | ||
2735 | return new; | ||
2736 | } | ||
2737 | EXPORT_SYMBOL(d_splice_alias); | ||
2738 | |||
2739 | /** | ||
2700 | * d_materialise_unique - introduce an inode into the tree | 2740 | * d_materialise_unique - introduce an inode into the tree |
2701 | * @dentry: candidate dentry | 2741 | * @dentry: candidate dentry |
2702 | * @inode: inode to bind to the dentry, to which aliases may be attached | 2742 | * @inode: inode to bind to the dentry, to which aliases may be attached |
@@ -2724,7 +2764,7 @@ struct dentry *d_materialise_unique(struct dentry *dentry, struct inode *inode) | |||
2724 | struct dentry *alias; | 2764 | struct dentry *alias; |
2725 | 2765 | ||
2726 | /* Does an aliased dentry already exist? */ | 2766 | /* Does an aliased dentry already exist? */ |
2727 | alias = __d_find_alias(inode, 0); | 2767 | alias = __d_find_alias(inode); |
2728 | if (alias) { | 2768 | if (alias) { |
2729 | actual = alias; | 2769 | actual = alias; |
2730 | write_seqlock(&rename_lock); | 2770 | write_seqlock(&rename_lock); |
diff --git a/fs/direct-io.c b/fs/direct-io.c index 17e39b047de5..c3116404ab49 100644 --- a/fs/direct-io.c +++ b/fs/direct-io.c | |||
@@ -158,7 +158,7 @@ static inline int dio_refill_pages(struct dio *dio, struct dio_submit *sdio) | |||
158 | { | 158 | { |
159 | ssize_t ret; | 159 | ssize_t ret; |
160 | 160 | ||
161 | ret = iov_iter_get_pages(sdio->iter, dio->pages, DIO_PAGES * PAGE_SIZE, | 161 | ret = iov_iter_get_pages(sdio->iter, dio->pages, DIO_PAGES, |
162 | &sdio->from); | 162 | &sdio->from); |
163 | 163 | ||
164 | if (ret < 0 && sdio->blocks_available && (dio->rw & WRITE)) { | 164 | if (ret < 0 && sdio->blocks_available && (dio->rw & WRITE)) { |
diff --git a/fs/ext2/super.c b/fs/ext2/super.c index 3750031cfa2f..b88edc05c230 100644 --- a/fs/ext2/super.c +++ b/fs/ext2/super.c | |||
@@ -161,7 +161,7 @@ static struct kmem_cache * ext2_inode_cachep; | |||
161 | static struct inode *ext2_alloc_inode(struct super_block *sb) | 161 | static struct inode *ext2_alloc_inode(struct super_block *sb) |
162 | { | 162 | { |
163 | struct ext2_inode_info *ei; | 163 | struct ext2_inode_info *ei; |
164 | ei = (struct ext2_inode_info *)kmem_cache_alloc(ext2_inode_cachep, GFP_KERNEL); | 164 | ei = kmem_cache_alloc(ext2_inode_cachep, GFP_KERNEL); |
165 | if (!ei) | 165 | if (!ei) |
166 | return NULL; | 166 | return NULL; |
167 | ei->i_block_alloc_info = NULL; | 167 | ei->i_block_alloc_info = NULL; |
diff --git a/fs/ext4/namei.c b/fs/ext4/namei.c index 3520ab8a6639..b147a67baa0d 100644 --- a/fs/ext4/namei.c +++ b/fs/ext4/namei.c | |||
@@ -3455,7 +3455,6 @@ const struct inode_operations ext4_dir_inode_operations = { | |||
3455 | .rmdir = ext4_rmdir, | 3455 | .rmdir = ext4_rmdir, |
3456 | .mknod = ext4_mknod, | 3456 | .mknod = ext4_mknod, |
3457 | .tmpfile = ext4_tmpfile, | 3457 | .tmpfile = ext4_tmpfile, |
3458 | .rename = ext4_rename, | ||
3459 | .rename2 = ext4_rename2, | 3458 | .rename2 = ext4_rename2, |
3460 | .setattr = ext4_setattr, | 3459 | .setattr = ext4_setattr, |
3461 | .setxattr = generic_setxattr, | 3460 | .setxattr = generic_setxattr, |
diff --git a/fs/fs_pin.c b/fs/fs_pin.c new file mode 100644 index 000000000000..9368236ca100 --- /dev/null +++ b/fs/fs_pin.c | |||
@@ -0,0 +1,78 @@ | |||
1 | #include <linux/fs.h> | ||
2 | #include <linux/slab.h> | ||
3 | #include <linux/fs_pin.h> | ||
4 | #include "internal.h" | ||
5 | #include "mount.h" | ||
6 | |||
7 | static void pin_free_rcu(struct rcu_head *head) | ||
8 | { | ||
9 | kfree(container_of(head, struct fs_pin, rcu)); | ||
10 | } | ||
11 | |||
12 | static DEFINE_SPINLOCK(pin_lock); | ||
13 | |||
14 | void pin_put(struct fs_pin *p) | ||
15 | { | ||
16 | if (atomic_long_dec_and_test(&p->count)) | ||
17 | call_rcu(&p->rcu, pin_free_rcu); | ||
18 | } | ||
19 | |||
20 | void pin_remove(struct fs_pin *pin) | ||
21 | { | ||
22 | spin_lock(&pin_lock); | ||
23 | hlist_del(&pin->m_list); | ||
24 | hlist_del(&pin->s_list); | ||
25 | spin_unlock(&pin_lock); | ||
26 | } | ||
27 | |||
28 | void pin_insert(struct fs_pin *pin, struct vfsmount *m) | ||
29 | { | ||
30 | spin_lock(&pin_lock); | ||
31 | hlist_add_head(&pin->s_list, &m->mnt_sb->s_pins); | ||
32 | hlist_add_head(&pin->m_list, &real_mount(m)->mnt_pins); | ||
33 | spin_unlock(&pin_lock); | ||
34 | } | ||
35 | |||
36 | void mnt_pin_kill(struct mount *m) | ||
37 | { | ||
38 | while (1) { | ||
39 | struct hlist_node *p; | ||
40 | struct fs_pin *pin; | ||
41 | rcu_read_lock(); | ||
42 | p = ACCESS_ONCE(m->mnt_pins.first); | ||
43 | if (!p) { | ||
44 | rcu_read_unlock(); | ||
45 | break; | ||
46 | } | ||
47 | pin = hlist_entry(p, struct fs_pin, m_list); | ||
48 | if (!atomic_long_inc_not_zero(&pin->count)) { | ||
49 | rcu_read_unlock(); | ||
50 | cpu_relax(); | ||
51 | continue; | ||
52 | } | ||
53 | rcu_read_unlock(); | ||
54 | pin->kill(pin); | ||
55 | } | ||
56 | } | ||
57 | |||
58 | void sb_pin_kill(struct super_block *sb) | ||
59 | { | ||
60 | while (1) { | ||
61 | struct hlist_node *p; | ||
62 | struct fs_pin *pin; | ||
63 | rcu_read_lock(); | ||
64 | p = ACCESS_ONCE(sb->s_pins.first); | ||
65 | if (!p) { | ||
66 | rcu_read_unlock(); | ||
67 | break; | ||
68 | } | ||
69 | pin = hlist_entry(p, struct fs_pin, s_list); | ||
70 | if (!atomic_long_inc_not_zero(&pin->count)) { | ||
71 | rcu_read_unlock(); | ||
72 | cpu_relax(); | ||
73 | continue; | ||
74 | } | ||
75 | rcu_read_unlock(); | ||
76 | pin->kill(pin); | ||
77 | } | ||
78 | } | ||
diff --git a/fs/fuse/dir.c b/fs/fuse/dir.c index 0c6048247a34..de1d84af9f7c 100644 --- a/fs/fuse/dir.c +++ b/fs/fuse/dir.c | |||
@@ -845,12 +845,6 @@ static int fuse_rename2(struct inode *olddir, struct dentry *oldent, | |||
845 | return err; | 845 | return err; |
846 | } | 846 | } |
847 | 847 | ||
848 | static int fuse_rename(struct inode *olddir, struct dentry *oldent, | ||
849 | struct inode *newdir, struct dentry *newent) | ||
850 | { | ||
851 | return fuse_rename2(olddir, oldent, newdir, newent, 0); | ||
852 | } | ||
853 | |||
854 | static int fuse_link(struct dentry *entry, struct inode *newdir, | 848 | static int fuse_link(struct dentry *entry, struct inode *newdir, |
855 | struct dentry *newent) | 849 | struct dentry *newent) |
856 | { | 850 | { |
@@ -2024,7 +2018,6 @@ static const struct inode_operations fuse_dir_inode_operations = { | |||
2024 | .symlink = fuse_symlink, | 2018 | .symlink = fuse_symlink, |
2025 | .unlink = fuse_unlink, | 2019 | .unlink = fuse_unlink, |
2026 | .rmdir = fuse_rmdir, | 2020 | .rmdir = fuse_rmdir, |
2027 | .rename = fuse_rename, | ||
2028 | .rename2 = fuse_rename2, | 2021 | .rename2 = fuse_rename2, |
2029 | .link = fuse_link, | 2022 | .link = fuse_link, |
2030 | .setattr = fuse_setattr, | 2023 | .setattr = fuse_setattr, |
diff --git a/fs/fuse/file.c b/fs/fuse/file.c index 40ac2628ddcf..912061ac4baf 100644 --- a/fs/fuse/file.c +++ b/fs/fuse/file.c | |||
@@ -1303,10 +1303,10 @@ static int fuse_get_user_pages(struct fuse_req *req, struct iov_iter *ii, | |||
1303 | while (nbytes < *nbytesp && req->num_pages < req->max_pages) { | 1303 | while (nbytes < *nbytesp && req->num_pages < req->max_pages) { |
1304 | unsigned npages; | 1304 | unsigned npages; |
1305 | size_t start; | 1305 | size_t start; |
1306 | unsigned n = req->max_pages - req->num_pages; | ||
1307 | ssize_t ret = iov_iter_get_pages(ii, | 1306 | ssize_t ret = iov_iter_get_pages(ii, |
1308 | &req->pages[req->num_pages], | 1307 | &req->pages[req->num_pages], |
1309 | n * PAGE_SIZE, &start); | 1308 | req->max_pages - req->num_pages, |
1309 | &start); | ||
1310 | if (ret < 0) | 1310 | if (ret < 0) |
1311 | return ret; | 1311 | return ret; |
1312 | 1312 | ||
diff --git a/fs/hostfs/hostfs.h b/fs/hostfs/hostfs.h index 9c88da0e855a..4fcd40d6f308 100644 --- a/fs/hostfs/hostfs.h +++ b/fs/hostfs/hostfs.h | |||
@@ -89,6 +89,7 @@ extern int do_mknod(const char *file, int mode, unsigned int major, | |||
89 | extern int link_file(const char *from, const char *to); | 89 | extern int link_file(const char *from, const char *to); |
90 | extern int hostfs_do_readlink(char *file, char *buf, int size); | 90 | extern int hostfs_do_readlink(char *file, char *buf, int size); |
91 | extern int rename_file(char *from, char *to); | 91 | extern int rename_file(char *from, char *to); |
92 | extern int rename2_file(char *from, char *to, unsigned int flags); | ||
92 | extern int do_statfs(char *root, long *bsize_out, long long *blocks_out, | 93 | extern int do_statfs(char *root, long *bsize_out, long long *blocks_out, |
93 | long long *bfree_out, long long *bavail_out, | 94 | long long *bfree_out, long long *bavail_out, |
94 | long long *files_out, long long *ffree_out, | 95 | long long *files_out, long long *ffree_out, |
diff --git a/fs/hostfs/hostfs_kern.c b/fs/hostfs/hostfs_kern.c index bb529f3b7f2b..fd62cae0fdcb 100644 --- a/fs/hostfs/hostfs_kern.c +++ b/fs/hostfs/hostfs_kern.c | |||
@@ -741,21 +741,31 @@ static int hostfs_mknod(struct inode *dir, struct dentry *dentry, umode_t mode, | |||
741 | return err; | 741 | return err; |
742 | } | 742 | } |
743 | 743 | ||
744 | static int hostfs_rename(struct inode *from_ino, struct dentry *from, | 744 | static int hostfs_rename2(struct inode *old_dir, struct dentry *old_dentry, |
745 | struct inode *to_ino, struct dentry *to) | 745 | struct inode *new_dir, struct dentry *new_dentry, |
746 | unsigned int flags) | ||
746 | { | 747 | { |
747 | char *from_name, *to_name; | 748 | char *old_name, *new_name; |
748 | int err; | 749 | int err; |
749 | 750 | ||
750 | if ((from_name = dentry_name(from)) == NULL) | 751 | if (flags & ~(RENAME_NOREPLACE | RENAME_EXCHANGE)) |
752 | return -EINVAL; | ||
753 | |||
754 | old_name = dentry_name(old_dentry); | ||
755 | if (old_name == NULL) | ||
751 | return -ENOMEM; | 756 | return -ENOMEM; |
752 | if ((to_name = dentry_name(to)) == NULL) { | 757 | new_name = dentry_name(new_dentry); |
753 | __putname(from_name); | 758 | if (new_name == NULL) { |
759 | __putname(old_name); | ||
754 | return -ENOMEM; | 760 | return -ENOMEM; |
755 | } | 761 | } |
756 | err = rename_file(from_name, to_name); | 762 | if (!flags) |
757 | __putname(from_name); | 763 | err = rename_file(old_name, new_name); |
758 | __putname(to_name); | 764 | else |
765 | err = rename2_file(old_name, new_name, flags); | ||
766 | |||
767 | __putname(old_name); | ||
768 | __putname(new_name); | ||
759 | return err; | 769 | return err; |
760 | } | 770 | } |
761 | 771 | ||
@@ -867,7 +877,7 @@ static const struct inode_operations hostfs_dir_iops = { | |||
867 | .mkdir = hostfs_mkdir, | 877 | .mkdir = hostfs_mkdir, |
868 | .rmdir = hostfs_rmdir, | 878 | .rmdir = hostfs_rmdir, |
869 | .mknod = hostfs_mknod, | 879 | .mknod = hostfs_mknod, |
870 | .rename = hostfs_rename, | 880 | .rename2 = hostfs_rename2, |
871 | .permission = hostfs_permission, | 881 | .permission = hostfs_permission, |
872 | .setattr = hostfs_setattr, | 882 | .setattr = hostfs_setattr, |
873 | }; | 883 | }; |
diff --git a/fs/hostfs/hostfs_user.c b/fs/hostfs/hostfs_user.c index 67838f3aa20a..9765dab95cbd 100644 --- a/fs/hostfs/hostfs_user.c +++ b/fs/hostfs/hostfs_user.c | |||
@@ -14,6 +14,7 @@ | |||
14 | #include <sys/time.h> | 14 | #include <sys/time.h> |
15 | #include <sys/types.h> | 15 | #include <sys/types.h> |
16 | #include <sys/vfs.h> | 16 | #include <sys/vfs.h> |
17 | #include <sys/syscall.h> | ||
17 | #include "hostfs.h" | 18 | #include "hostfs.h" |
18 | #include <utime.h> | 19 | #include <utime.h> |
19 | 20 | ||
@@ -360,6 +361,33 @@ int rename_file(char *from, char *to) | |||
360 | return 0; | 361 | return 0; |
361 | } | 362 | } |
362 | 363 | ||
364 | int rename2_file(char *from, char *to, unsigned int flags) | ||
365 | { | ||
366 | int err; | ||
367 | |||
368 | #ifndef SYS_renameat2 | ||
369 | # ifdef __x86_64__ | ||
370 | # define SYS_renameat2 316 | ||
371 | # endif | ||
372 | # ifdef __i386__ | ||
373 | # define SYS_renameat2 353 | ||
374 | # endif | ||
375 | #endif | ||
376 | |||
377 | #ifdef SYS_renameat2 | ||
378 | err = syscall(SYS_renameat2, AT_FDCWD, from, AT_FDCWD, to, flags); | ||
379 | if (err < 0) { | ||
380 | if (errno != ENOSYS) | ||
381 | return -errno; | ||
382 | else | ||
383 | return -EINVAL; | ||
384 | } | ||
385 | return 0; | ||
386 | #else | ||
387 | return -EINVAL; | ||
388 | #endif | ||
389 | } | ||
390 | |||
363 | int do_statfs(char *root, long *bsize_out, long long *blocks_out, | 391 | int do_statfs(char *root, long *bsize_out, long long *blocks_out, |
364 | long long *bfree_out, long long *bavail_out, | 392 | long long *bfree_out, long long *bavail_out, |
365 | long long *files_out, long long *ffree_out, | 393 | long long *files_out, long long *ffree_out, |
diff --git a/fs/internal.h b/fs/internal.h index 465742407466..e325b4f9c799 100644 --- a/fs/internal.h +++ b/fs/internal.h | |||
@@ -131,7 +131,6 @@ extern long prune_dcache_sb(struct super_block *sb, unsigned long nr_to_scan, | |||
131 | /* | 131 | /* |
132 | * read_write.c | 132 | * read_write.c |
133 | */ | 133 | */ |
134 | extern ssize_t __kernel_write(struct file *, const char *, size_t, loff_t *); | ||
135 | extern int rw_verify_area(int, struct file *, const loff_t *, size_t); | 134 | extern int rw_verify_area(int, struct file *, const loff_t *, size_t); |
136 | 135 | ||
137 | /* | 136 | /* |
@@ -144,3 +143,9 @@ extern long do_splice_direct(struct file *in, loff_t *ppos, struct file *out, | |||
144 | * pipe.c | 143 | * pipe.c |
145 | */ | 144 | */ |
146 | extern const struct file_operations pipefifo_fops; | 145 | extern const struct file_operations pipefifo_fops; |
146 | |||
147 | /* | ||
148 | * fs_pin.c | ||
149 | */ | ||
150 | extern void sb_pin_kill(struct super_block *sb); | ||
151 | extern void mnt_pin_kill(struct mount *m); | ||
diff --git a/fs/mount.h b/fs/mount.h index d55297f2fa05..6740a6215529 100644 --- a/fs/mount.h +++ b/fs/mount.h | |||
@@ -55,7 +55,7 @@ struct mount { | |||
55 | int mnt_id; /* mount identifier */ | 55 | int mnt_id; /* mount identifier */ |
56 | int mnt_group_id; /* peer group identifier */ | 56 | int mnt_group_id; /* peer group identifier */ |
57 | int mnt_expiry_mark; /* true if marked for expiry */ | 57 | int mnt_expiry_mark; /* true if marked for expiry */ |
58 | int mnt_pinned; | 58 | struct hlist_head mnt_pins; |
59 | struct path mnt_ex_mountpoint; | 59 | struct path mnt_ex_mountpoint; |
60 | }; | 60 | }; |
61 | 61 | ||
diff --git a/fs/namei.c b/fs/namei.c index 9eb787e5c167..a996bb48dfab 100644 --- a/fs/namei.c +++ b/fs/namei.c | |||
@@ -1091,10 +1091,10 @@ int follow_down_one(struct path *path) | |||
1091 | } | 1091 | } |
1092 | EXPORT_SYMBOL(follow_down_one); | 1092 | EXPORT_SYMBOL(follow_down_one); |
1093 | 1093 | ||
1094 | static inline bool managed_dentry_might_block(struct dentry *dentry) | 1094 | static inline int managed_dentry_rcu(struct dentry *dentry) |
1095 | { | 1095 | { |
1096 | return (dentry->d_flags & DCACHE_MANAGE_TRANSIT && | 1096 | return (dentry->d_flags & DCACHE_MANAGE_TRANSIT) ? |
1097 | dentry->d_op->d_manage(dentry, true) < 0); | 1097 | dentry->d_op->d_manage(dentry, true) : 0; |
1098 | } | 1098 | } |
1099 | 1099 | ||
1100 | /* | 1100 | /* |
@@ -1110,11 +1110,18 @@ static bool __follow_mount_rcu(struct nameidata *nd, struct path *path, | |||
1110 | * Don't forget we might have a non-mountpoint managed dentry | 1110 | * Don't forget we might have a non-mountpoint managed dentry |
1111 | * that wants to block transit. | 1111 | * that wants to block transit. |
1112 | */ | 1112 | */ |
1113 | if (unlikely(managed_dentry_might_block(path->dentry))) | 1113 | switch (managed_dentry_rcu(path->dentry)) { |
1114 | case -ECHILD: | ||
1115 | default: | ||
1114 | return false; | 1116 | return false; |
1117 | case -EISDIR: | ||
1118 | return true; | ||
1119 | case 0: | ||
1120 | break; | ||
1121 | } | ||
1115 | 1122 | ||
1116 | if (!d_mountpoint(path->dentry)) | 1123 | if (!d_mountpoint(path->dentry)) |
1117 | return true; | 1124 | return !(path->dentry->d_flags & DCACHE_NEED_AUTOMOUNT); |
1118 | 1125 | ||
1119 | mounted = __lookup_mnt(path->mnt, path->dentry); | 1126 | mounted = __lookup_mnt(path->mnt, path->dentry); |
1120 | if (!mounted) | 1127 | if (!mounted) |
@@ -1130,7 +1137,8 @@ static bool __follow_mount_rcu(struct nameidata *nd, struct path *path, | |||
1130 | */ | 1137 | */ |
1131 | *inode = path->dentry->d_inode; | 1138 | *inode = path->dentry->d_inode; |
1132 | } | 1139 | } |
1133 | return read_seqretry(&mount_lock, nd->m_seq); | 1140 | return read_seqretry(&mount_lock, nd->m_seq) && |
1141 | !(path->dentry->d_flags & DCACHE_NEED_AUTOMOUNT); | ||
1134 | } | 1142 | } |
1135 | 1143 | ||
1136 | static int follow_dotdot_rcu(struct nameidata *nd) | 1144 | static int follow_dotdot_rcu(struct nameidata *nd) |
@@ -1402,11 +1410,8 @@ static int lookup_fast(struct nameidata *nd, | |||
1402 | } | 1410 | } |
1403 | path->mnt = mnt; | 1411 | path->mnt = mnt; |
1404 | path->dentry = dentry; | 1412 | path->dentry = dentry; |
1405 | if (unlikely(!__follow_mount_rcu(nd, path, inode))) | 1413 | if (likely(__follow_mount_rcu(nd, path, inode))) |
1406 | goto unlazy; | 1414 | return 0; |
1407 | if (unlikely(path->dentry->d_flags & DCACHE_NEED_AUTOMOUNT)) | ||
1408 | goto unlazy; | ||
1409 | return 0; | ||
1410 | unlazy: | 1415 | unlazy: |
1411 | if (unlazy_walk(nd, dentry)) | 1416 | if (unlazy_walk(nd, dentry)) |
1412 | return -ECHILD; | 1417 | return -ECHILD; |
@@ -4019,7 +4024,7 @@ SYSCALL_DEFINE2(link, const char __user *, oldname, const char __user *, newname | |||
4019 | * The worst of all namespace operations - renaming directory. "Perverted" | 4024 | * The worst of all namespace operations - renaming directory. "Perverted" |
4020 | * doesn't even start to describe it. Somebody in UCB had a heck of a trip... | 4025 | * doesn't even start to describe it. Somebody in UCB had a heck of a trip... |
4021 | * Problems: | 4026 | * Problems: |
4022 | * a) we can get into loop creation. Check is done in is_subdir(). | 4027 | * a) we can get into loop creation. |
4023 | * b) race potential - two innocent renames can create a loop together. | 4028 | * b) race potential - two innocent renames can create a loop together. |
4024 | * That's where 4.4 screws up. Current fix: serialization on | 4029 | * That's where 4.4 screws up. Current fix: serialization on |
4025 | * sb->s_vfs_rename_mutex. We might be more accurate, but that's another | 4030 | * sb->s_vfs_rename_mutex. We might be more accurate, but that's another |
@@ -4075,7 +4080,7 @@ int vfs_rename(struct inode *old_dir, struct dentry *old_dentry, | |||
4075 | if (error) | 4080 | if (error) |
4076 | return error; | 4081 | return error; |
4077 | 4082 | ||
4078 | if (!old_dir->i_op->rename) | 4083 | if (!old_dir->i_op->rename && !old_dir->i_op->rename2) |
4079 | return -EPERM; | 4084 | return -EPERM; |
4080 | 4085 | ||
4081 | if (flags && !old_dir->i_op->rename2) | 4086 | if (flags && !old_dir->i_op->rename2) |
@@ -4134,10 +4139,11 @@ int vfs_rename(struct inode *old_dir, struct dentry *old_dentry, | |||
4134 | if (error) | 4139 | if (error) |
4135 | goto out; | 4140 | goto out; |
4136 | } | 4141 | } |
4137 | if (!flags) { | 4142 | if (!old_dir->i_op->rename2) { |
4138 | error = old_dir->i_op->rename(old_dir, old_dentry, | 4143 | error = old_dir->i_op->rename(old_dir, old_dentry, |
4139 | new_dir, new_dentry); | 4144 | new_dir, new_dentry); |
4140 | } else { | 4145 | } else { |
4146 | WARN_ON(old_dir->i_op->rename != NULL); | ||
4141 | error = old_dir->i_op->rename2(old_dir, old_dentry, | 4147 | error = old_dir->i_op->rename2(old_dir, old_dentry, |
4142 | new_dir, new_dentry, flags); | 4148 | new_dir, new_dentry, flags); |
4143 | } | 4149 | } |
diff --git a/fs/namespace.c b/fs/namespace.c index 0acabea58319..a01c7730e9af 100644 --- a/fs/namespace.c +++ b/fs/namespace.c | |||
@@ -16,7 +16,6 @@ | |||
16 | #include <linux/namei.h> | 16 | #include <linux/namei.h> |
17 | #include <linux/security.h> | 17 | #include <linux/security.h> |
18 | #include <linux/idr.h> | 18 | #include <linux/idr.h> |
19 | #include <linux/acct.h> /* acct_auto_close_mnt */ | ||
20 | #include <linux/init.h> /* init_rootfs */ | 19 | #include <linux/init.h> /* init_rootfs */ |
21 | #include <linux/fs_struct.h> /* get_fs_root et.al. */ | 20 | #include <linux/fs_struct.h> /* get_fs_root et.al. */ |
22 | #include <linux/fsnotify.h> /* fsnotify_vfsmount_delete */ | 21 | #include <linux/fsnotify.h> /* fsnotify_vfsmount_delete */ |
@@ -779,6 +778,20 @@ static void attach_mnt(struct mount *mnt, | |||
779 | list_add_tail(&mnt->mnt_child, &parent->mnt_mounts); | 778 | list_add_tail(&mnt->mnt_child, &parent->mnt_mounts); |
780 | } | 779 | } |
781 | 780 | ||
781 | static void attach_shadowed(struct mount *mnt, | ||
782 | struct mount *parent, | ||
783 | struct mount *shadows) | ||
784 | { | ||
785 | if (shadows) { | ||
786 | hlist_add_behind_rcu(&mnt->mnt_hash, &shadows->mnt_hash); | ||
787 | list_add(&mnt->mnt_child, &shadows->mnt_child); | ||
788 | } else { | ||
789 | hlist_add_head_rcu(&mnt->mnt_hash, | ||
790 | m_hash(&parent->mnt, mnt->mnt_mountpoint)); | ||
791 | list_add_tail(&mnt->mnt_child, &parent->mnt_mounts); | ||
792 | } | ||
793 | } | ||
794 | |||
782 | /* | 795 | /* |
783 | * vfsmount lock must be held for write | 796 | * vfsmount lock must be held for write |
784 | */ | 797 | */ |
@@ -797,12 +810,7 @@ static void commit_tree(struct mount *mnt, struct mount *shadows) | |||
797 | 810 | ||
798 | list_splice(&head, n->list.prev); | 811 | list_splice(&head, n->list.prev); |
799 | 812 | ||
800 | if (shadows) | 813 | attach_shadowed(mnt, parent, shadows); |
801 | hlist_add_behind_rcu(&mnt->mnt_hash, &shadows->mnt_hash); | ||
802 | else | ||
803 | hlist_add_head_rcu(&mnt->mnt_hash, | ||
804 | m_hash(&parent->mnt, mnt->mnt_mountpoint)); | ||
805 | list_add_tail(&mnt->mnt_child, &parent->mnt_mounts); | ||
806 | touch_mnt_namespace(n); | 814 | touch_mnt_namespace(n); |
807 | } | 815 | } |
808 | 816 | ||
@@ -951,7 +959,6 @@ static struct mount *clone_mnt(struct mount *old, struct dentry *root, | |||
951 | 959 | ||
952 | static void mntput_no_expire(struct mount *mnt) | 960 | static void mntput_no_expire(struct mount *mnt) |
953 | { | 961 | { |
954 | put_again: | ||
955 | rcu_read_lock(); | 962 | rcu_read_lock(); |
956 | mnt_add_count(mnt, -1); | 963 | mnt_add_count(mnt, -1); |
957 | if (likely(mnt->mnt_ns)) { /* shouldn't be the last one */ | 964 | if (likely(mnt->mnt_ns)) { /* shouldn't be the last one */ |
@@ -964,14 +971,6 @@ put_again: | |||
964 | unlock_mount_hash(); | 971 | unlock_mount_hash(); |
965 | return; | 972 | return; |
966 | } | 973 | } |
967 | if (unlikely(mnt->mnt_pinned)) { | ||
968 | mnt_add_count(mnt, mnt->mnt_pinned + 1); | ||
969 | mnt->mnt_pinned = 0; | ||
970 | rcu_read_unlock(); | ||
971 | unlock_mount_hash(); | ||
972 | acct_auto_close_mnt(&mnt->mnt); | ||
973 | goto put_again; | ||
974 | } | ||
975 | if (unlikely(mnt->mnt.mnt_flags & MNT_DOOMED)) { | 974 | if (unlikely(mnt->mnt.mnt_flags & MNT_DOOMED)) { |
976 | rcu_read_unlock(); | 975 | rcu_read_unlock(); |
977 | unlock_mount_hash(); | 976 | unlock_mount_hash(); |
@@ -994,6 +993,8 @@ put_again: | |||
994 | * so mnt_get_writers() below is safe. | 993 | * so mnt_get_writers() below is safe. |
995 | */ | 994 | */ |
996 | WARN_ON(mnt_get_writers(mnt)); | 995 | WARN_ON(mnt_get_writers(mnt)); |
996 | if (unlikely(mnt->mnt_pins.first)) | ||
997 | mnt_pin_kill(mnt); | ||
997 | fsnotify_vfsmount_delete(&mnt->mnt); | 998 | fsnotify_vfsmount_delete(&mnt->mnt); |
998 | dput(mnt->mnt.mnt_root); | 999 | dput(mnt->mnt.mnt_root); |
999 | deactivate_super(mnt->mnt.mnt_sb); | 1000 | deactivate_super(mnt->mnt.mnt_sb); |
@@ -1021,25 +1022,15 @@ struct vfsmount *mntget(struct vfsmount *mnt) | |||
1021 | } | 1022 | } |
1022 | EXPORT_SYMBOL(mntget); | 1023 | EXPORT_SYMBOL(mntget); |
1023 | 1024 | ||
1024 | void mnt_pin(struct vfsmount *mnt) | 1025 | struct vfsmount *mnt_clone_internal(struct path *path) |
1025 | { | ||
1026 | lock_mount_hash(); | ||
1027 | real_mount(mnt)->mnt_pinned++; | ||
1028 | unlock_mount_hash(); | ||
1029 | } | ||
1030 | EXPORT_SYMBOL(mnt_pin); | ||
1031 | |||
1032 | void mnt_unpin(struct vfsmount *m) | ||
1033 | { | 1026 | { |
1034 | struct mount *mnt = real_mount(m); | 1027 | struct mount *p; |
1035 | lock_mount_hash(); | 1028 | p = clone_mnt(real_mount(path->mnt), path->dentry, CL_PRIVATE); |
1036 | if (mnt->mnt_pinned) { | 1029 | if (IS_ERR(p)) |
1037 | mnt_add_count(mnt, 1); | 1030 | return ERR_CAST(p); |
1038 | mnt->mnt_pinned--; | 1031 | p->mnt.mnt_flags |= MNT_INTERNAL; |
1039 | } | 1032 | return &p->mnt; |
1040 | unlock_mount_hash(); | ||
1041 | } | 1033 | } |
1042 | EXPORT_SYMBOL(mnt_unpin); | ||
1043 | 1034 | ||
1044 | static inline void mangle(struct seq_file *m, const char *s) | 1035 | static inline void mangle(struct seq_file *m, const char *s) |
1045 | { | 1036 | { |
@@ -1505,6 +1496,7 @@ struct mount *copy_tree(struct mount *mnt, struct dentry *dentry, | |||
1505 | continue; | 1496 | continue; |
1506 | 1497 | ||
1507 | for (s = r; s; s = next_mnt(s, r)) { | 1498 | for (s = r; s; s = next_mnt(s, r)) { |
1499 | struct mount *t = NULL; | ||
1508 | if (!(flag & CL_COPY_UNBINDABLE) && | 1500 | if (!(flag & CL_COPY_UNBINDABLE) && |
1509 | IS_MNT_UNBINDABLE(s)) { | 1501 | IS_MNT_UNBINDABLE(s)) { |
1510 | s = skip_mnt_tree(s); | 1502 | s = skip_mnt_tree(s); |
@@ -1526,7 +1518,14 @@ struct mount *copy_tree(struct mount *mnt, struct dentry *dentry, | |||
1526 | goto out; | 1518 | goto out; |
1527 | lock_mount_hash(); | 1519 | lock_mount_hash(); |
1528 | list_add_tail(&q->mnt_list, &res->mnt_list); | 1520 | list_add_tail(&q->mnt_list, &res->mnt_list); |
1529 | attach_mnt(q, parent, p->mnt_mp); | 1521 | mnt_set_mountpoint(parent, p->mnt_mp, q); |
1522 | if (!list_empty(&parent->mnt_mounts)) { | ||
1523 | t = list_last_entry(&parent->mnt_mounts, | ||
1524 | struct mount, mnt_child); | ||
1525 | if (t->mnt_mp != p->mnt_mp) | ||
1526 | t = NULL; | ||
1527 | } | ||
1528 | attach_shadowed(q, parent, t); | ||
1530 | unlock_mount_hash(); | 1529 | unlock_mount_hash(); |
1531 | } | 1530 | } |
1532 | } | 1531 | } |
diff --git a/fs/nfs/blocklayout/blocklayout.c b/fs/nfs/blocklayout/blocklayout.c index 9b431f44fad9..cbb1797149d5 100644 --- a/fs/nfs/blocklayout/blocklayout.c +++ b/fs/nfs/blocklayout/blocklayout.c | |||
@@ -210,8 +210,7 @@ static void bl_end_io_read(struct bio *bio, int err) | |||
210 | SetPageUptodate(bvec->bv_page); | 210 | SetPageUptodate(bvec->bv_page); |
211 | 211 | ||
212 | if (err) { | 212 | if (err) { |
213 | struct nfs_pgio_data *rdata = par->data; | 213 | struct nfs_pgio_header *header = par->data; |
214 | struct nfs_pgio_header *header = rdata->header; | ||
215 | 214 | ||
216 | if (!header->pnfs_error) | 215 | if (!header->pnfs_error) |
217 | header->pnfs_error = -EIO; | 216 | header->pnfs_error = -EIO; |
@@ -224,43 +223,44 @@ static void bl_end_io_read(struct bio *bio, int err) | |||
224 | static void bl_read_cleanup(struct work_struct *work) | 223 | static void bl_read_cleanup(struct work_struct *work) |
225 | { | 224 | { |
226 | struct rpc_task *task; | 225 | struct rpc_task *task; |
227 | struct nfs_pgio_data *rdata; | 226 | struct nfs_pgio_header *hdr; |
228 | dprintk("%s enter\n", __func__); | 227 | dprintk("%s enter\n", __func__); |
229 | task = container_of(work, struct rpc_task, u.tk_work); | 228 | task = container_of(work, struct rpc_task, u.tk_work); |
230 | rdata = container_of(task, struct nfs_pgio_data, task); | 229 | hdr = container_of(task, struct nfs_pgio_header, task); |
231 | pnfs_ld_read_done(rdata); | 230 | pnfs_ld_read_done(hdr); |
232 | } | 231 | } |
233 | 232 | ||
234 | static void | 233 | static void |
235 | bl_end_par_io_read(void *data, int unused) | 234 | bl_end_par_io_read(void *data, int unused) |
236 | { | 235 | { |
237 | struct nfs_pgio_data *rdata = data; | 236 | struct nfs_pgio_header *hdr = data; |
238 | 237 | ||
239 | rdata->task.tk_status = rdata->header->pnfs_error; | 238 | hdr->task.tk_status = hdr->pnfs_error; |
240 | INIT_WORK(&rdata->task.u.tk_work, bl_read_cleanup); | 239 | INIT_WORK(&hdr->task.u.tk_work, bl_read_cleanup); |
241 | schedule_work(&rdata->task.u.tk_work); | 240 | schedule_work(&hdr->task.u.tk_work); |
242 | } | 241 | } |
243 | 242 | ||
244 | static enum pnfs_try_status | 243 | static enum pnfs_try_status |
245 | bl_read_pagelist(struct nfs_pgio_data *rdata) | 244 | bl_read_pagelist(struct nfs_pgio_header *hdr) |
246 | { | 245 | { |
247 | struct nfs_pgio_header *header = rdata->header; | 246 | struct nfs_pgio_header *header = hdr; |
248 | int i, hole; | 247 | int i, hole; |
249 | struct bio *bio = NULL; | 248 | struct bio *bio = NULL; |
250 | struct pnfs_block_extent *be = NULL, *cow_read = NULL; | 249 | struct pnfs_block_extent *be = NULL, *cow_read = NULL; |
251 | sector_t isect, extent_length = 0; | 250 | sector_t isect, extent_length = 0; |
252 | struct parallel_io *par; | 251 | struct parallel_io *par; |
253 | loff_t f_offset = rdata->args.offset; | 252 | loff_t f_offset = hdr->args.offset; |
254 | size_t bytes_left = rdata->args.count; | 253 | size_t bytes_left = hdr->args.count; |
255 | unsigned int pg_offset, pg_len; | 254 | unsigned int pg_offset, pg_len; |
256 | struct page **pages = rdata->args.pages; | 255 | struct page **pages = hdr->args.pages; |
257 | int pg_index = rdata->args.pgbase >> PAGE_CACHE_SHIFT; | 256 | int pg_index = hdr->args.pgbase >> PAGE_CACHE_SHIFT; |
258 | const bool is_dio = (header->dreq != NULL); | 257 | const bool is_dio = (header->dreq != NULL); |
259 | 258 | ||
260 | dprintk("%s enter nr_pages %u offset %lld count %u\n", __func__, | 259 | dprintk("%s enter nr_pages %u offset %lld count %u\n", __func__, |
261 | rdata->pages.npages, f_offset, (unsigned int)rdata->args.count); | 260 | hdr->page_array.npages, f_offset, |
261 | (unsigned int)hdr->args.count); | ||
262 | 262 | ||
263 | par = alloc_parallel(rdata); | 263 | par = alloc_parallel(hdr); |
264 | if (!par) | 264 | if (!par) |
265 | goto use_mds; | 265 | goto use_mds; |
266 | par->pnfs_callback = bl_end_par_io_read; | 266 | par->pnfs_callback = bl_end_par_io_read; |
@@ -268,7 +268,7 @@ bl_read_pagelist(struct nfs_pgio_data *rdata) | |||
268 | 268 | ||
269 | isect = (sector_t) (f_offset >> SECTOR_SHIFT); | 269 | isect = (sector_t) (f_offset >> SECTOR_SHIFT); |
270 | /* Code assumes extents are page-aligned */ | 270 | /* Code assumes extents are page-aligned */ |
271 | for (i = pg_index; i < rdata->pages.npages; i++) { | 271 | for (i = pg_index; i < hdr->page_array.npages; i++) { |
272 | if (!extent_length) { | 272 | if (!extent_length) { |
273 | /* We've used up the previous extent */ | 273 | /* We've used up the previous extent */ |
274 | bl_put_extent(be); | 274 | bl_put_extent(be); |
@@ -317,7 +317,8 @@ bl_read_pagelist(struct nfs_pgio_data *rdata) | |||
317 | struct pnfs_block_extent *be_read; | 317 | struct pnfs_block_extent *be_read; |
318 | 318 | ||
319 | be_read = (hole && cow_read) ? cow_read : be; | 319 | be_read = (hole && cow_read) ? cow_read : be; |
320 | bio = do_add_page_to_bio(bio, rdata->pages.npages - i, | 320 | bio = do_add_page_to_bio(bio, |
321 | hdr->page_array.npages - i, | ||
321 | READ, | 322 | READ, |
322 | isect, pages[i], be_read, | 323 | isect, pages[i], be_read, |
323 | bl_end_io_read, par, | 324 | bl_end_io_read, par, |
@@ -332,10 +333,10 @@ bl_read_pagelist(struct nfs_pgio_data *rdata) | |||
332 | extent_length -= PAGE_CACHE_SECTORS; | 333 | extent_length -= PAGE_CACHE_SECTORS; |
333 | } | 334 | } |
334 | if ((isect << SECTOR_SHIFT) >= header->inode->i_size) { | 335 | if ((isect << SECTOR_SHIFT) >= header->inode->i_size) { |
335 | rdata->res.eof = 1; | 336 | hdr->res.eof = 1; |
336 | rdata->res.count = header->inode->i_size - rdata->args.offset; | 337 | hdr->res.count = header->inode->i_size - hdr->args.offset; |
337 | } else { | 338 | } else { |
338 | rdata->res.count = (isect << SECTOR_SHIFT) - rdata->args.offset; | 339 | hdr->res.count = (isect << SECTOR_SHIFT) - hdr->args.offset; |
339 | } | 340 | } |
340 | out: | 341 | out: |
341 | bl_put_extent(be); | 342 | bl_put_extent(be); |
@@ -390,8 +391,7 @@ static void bl_end_io_write_zero(struct bio *bio, int err) | |||
390 | } | 391 | } |
391 | 392 | ||
392 | if (unlikely(err)) { | 393 | if (unlikely(err)) { |
393 | struct nfs_pgio_data *data = par->data; | 394 | struct nfs_pgio_header *header = par->data; |
394 | struct nfs_pgio_header *header = data->header; | ||
395 | 395 | ||
396 | if (!header->pnfs_error) | 396 | if (!header->pnfs_error) |
397 | header->pnfs_error = -EIO; | 397 | header->pnfs_error = -EIO; |
@@ -405,8 +405,7 @@ static void bl_end_io_write(struct bio *bio, int err) | |||
405 | { | 405 | { |
406 | struct parallel_io *par = bio->bi_private; | 406 | struct parallel_io *par = bio->bi_private; |
407 | const int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags); | 407 | const int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags); |
408 | struct nfs_pgio_data *data = par->data; | 408 | struct nfs_pgio_header *header = par->data; |
409 | struct nfs_pgio_header *header = data->header; | ||
410 | 409 | ||
411 | if (!uptodate) { | 410 | if (!uptodate) { |
412 | if (!header->pnfs_error) | 411 | if (!header->pnfs_error) |
@@ -423,32 +422,32 @@ static void bl_end_io_write(struct bio *bio, int err) | |||
423 | static void bl_write_cleanup(struct work_struct *work) | 422 | static void bl_write_cleanup(struct work_struct *work) |
424 | { | 423 | { |
425 | struct rpc_task *task; | 424 | struct rpc_task *task; |
426 | struct nfs_pgio_data *wdata; | 425 | struct nfs_pgio_header *hdr; |
427 | dprintk("%s enter\n", __func__); | 426 | dprintk("%s enter\n", __func__); |
428 | task = container_of(work, struct rpc_task, u.tk_work); | 427 | task = container_of(work, struct rpc_task, u.tk_work); |
429 | wdata = container_of(task, struct nfs_pgio_data, task); | 428 | hdr = container_of(task, struct nfs_pgio_header, task); |
430 | if (likely(!wdata->header->pnfs_error)) { | 429 | if (likely(!hdr->pnfs_error)) { |
431 | /* Marks for LAYOUTCOMMIT */ | 430 | /* Marks for LAYOUTCOMMIT */ |
432 | mark_extents_written(BLK_LSEG2EXT(wdata->header->lseg), | 431 | mark_extents_written(BLK_LSEG2EXT(hdr->lseg), |
433 | wdata->args.offset, wdata->args.count); | 432 | hdr->args.offset, hdr->args.count); |
434 | } | 433 | } |
435 | pnfs_ld_write_done(wdata); | 434 | pnfs_ld_write_done(hdr); |
436 | } | 435 | } |
437 | 436 | ||
438 | /* Called when last of bios associated with a bl_write_pagelist call finishes */ | 437 | /* Called when last of bios associated with a bl_write_pagelist call finishes */ |
439 | static void bl_end_par_io_write(void *data, int num_se) | 438 | static void bl_end_par_io_write(void *data, int num_se) |
440 | { | 439 | { |
441 | struct nfs_pgio_data *wdata = data; | 440 | struct nfs_pgio_header *hdr = data; |
442 | 441 | ||
443 | if (unlikely(wdata->header->pnfs_error)) { | 442 | if (unlikely(hdr->pnfs_error)) { |
444 | bl_free_short_extents(&BLK_LSEG2EXT(wdata->header->lseg)->bl_inval, | 443 | bl_free_short_extents(&BLK_LSEG2EXT(hdr->lseg)->bl_inval, |
445 | num_se); | 444 | num_se); |
446 | } | 445 | } |
447 | 446 | ||
448 | wdata->task.tk_status = wdata->header->pnfs_error; | 447 | hdr->task.tk_status = hdr->pnfs_error; |
449 | wdata->verf.committed = NFS_FILE_SYNC; | 448 | hdr->verf.committed = NFS_FILE_SYNC; |
450 | INIT_WORK(&wdata->task.u.tk_work, bl_write_cleanup); | 449 | INIT_WORK(&hdr->task.u.tk_work, bl_write_cleanup); |
451 | schedule_work(&wdata->task.u.tk_work); | 450 | schedule_work(&hdr->task.u.tk_work); |
452 | } | 451 | } |
453 | 452 | ||
454 | /* FIXME STUB - mark intersection of layout and page as bad, so is not | 453 | /* FIXME STUB - mark intersection of layout and page as bad, so is not |
@@ -673,18 +672,17 @@ check_page: | |||
673 | } | 672 | } |
674 | 673 | ||
675 | static enum pnfs_try_status | 674 | static enum pnfs_try_status |
676 | bl_write_pagelist(struct nfs_pgio_data *wdata, int sync) | 675 | bl_write_pagelist(struct nfs_pgio_header *header, int sync) |
677 | { | 676 | { |
678 | struct nfs_pgio_header *header = wdata->header; | ||
679 | int i, ret, npg_zero, pg_index, last = 0; | 677 | int i, ret, npg_zero, pg_index, last = 0; |
680 | struct bio *bio = NULL; | 678 | struct bio *bio = NULL; |
681 | struct pnfs_block_extent *be = NULL, *cow_read = NULL; | 679 | struct pnfs_block_extent *be = NULL, *cow_read = NULL; |
682 | sector_t isect, last_isect = 0, extent_length = 0; | 680 | sector_t isect, last_isect = 0, extent_length = 0; |
683 | struct parallel_io *par = NULL; | 681 | struct parallel_io *par = NULL; |
684 | loff_t offset = wdata->args.offset; | 682 | loff_t offset = header->args.offset; |
685 | size_t count = wdata->args.count; | 683 | size_t count = header->args.count; |
686 | unsigned int pg_offset, pg_len, saved_len; | 684 | unsigned int pg_offset, pg_len, saved_len; |
687 | struct page **pages = wdata->args.pages; | 685 | struct page **pages = header->args.pages; |
688 | struct page *page; | 686 | struct page *page; |
689 | pgoff_t index; | 687 | pgoff_t index; |
690 | u64 temp; | 688 | u64 temp; |
@@ -699,11 +697,11 @@ bl_write_pagelist(struct nfs_pgio_data *wdata, int sync) | |||
699 | dprintk("pnfsblock nonblock aligned DIO writes. Resend MDS\n"); | 697 | dprintk("pnfsblock nonblock aligned DIO writes. Resend MDS\n"); |
700 | goto out_mds; | 698 | goto out_mds; |
701 | } | 699 | } |
702 | /* At this point, wdata->pages is a (sequential) list of nfs_pages. | 700 | /* At this point, header->page_aray is a (sequential) list of nfs_pages. |
703 | * We want to write each, and if there is an error set pnfs_error | 701 | * We want to write each, and if there is an error set pnfs_error |
704 | * to have it redone using nfs. | 702 | * to have it redone using nfs. |
705 | */ | 703 | */ |
706 | par = alloc_parallel(wdata); | 704 | par = alloc_parallel(header); |
707 | if (!par) | 705 | if (!par) |
708 | goto out_mds; | 706 | goto out_mds; |
709 | par->pnfs_callback = bl_end_par_io_write; | 707 | par->pnfs_callback = bl_end_par_io_write; |
@@ -790,8 +788,8 @@ next_page: | |||
790 | bio = bl_submit_bio(WRITE, bio); | 788 | bio = bl_submit_bio(WRITE, bio); |
791 | 789 | ||
792 | /* Middle pages */ | 790 | /* Middle pages */ |
793 | pg_index = wdata->args.pgbase >> PAGE_CACHE_SHIFT; | 791 | pg_index = header->args.pgbase >> PAGE_CACHE_SHIFT; |
794 | for (i = pg_index; i < wdata->pages.npages; i++) { | 792 | for (i = pg_index; i < header->page_array.npages; i++) { |
795 | if (!extent_length) { | 793 | if (!extent_length) { |
796 | /* We've used up the previous extent */ | 794 | /* We've used up the previous extent */ |
797 | bl_put_extent(be); | 795 | bl_put_extent(be); |
@@ -862,7 +860,8 @@ next_page: | |||
862 | } | 860 | } |
863 | 861 | ||
864 | 862 | ||
865 | bio = do_add_page_to_bio(bio, wdata->pages.npages - i, WRITE, | 863 | bio = do_add_page_to_bio(bio, header->page_array.npages - i, |
864 | WRITE, | ||
866 | isect, pages[i], be, | 865 | isect, pages[i], be, |
867 | bl_end_io_write, par, | 866 | bl_end_io_write, par, |
868 | pg_offset, pg_len); | 867 | pg_offset, pg_len); |
@@ -890,7 +889,7 @@ next_page: | |||
890 | } | 889 | } |
891 | 890 | ||
892 | write_done: | 891 | write_done: |
893 | wdata->res.count = wdata->args.count; | 892 | header->res.count = header->args.count; |
894 | out: | 893 | out: |
895 | bl_put_extent(be); | 894 | bl_put_extent(be); |
896 | bl_put_extent(cow_read); | 895 | bl_put_extent(cow_read); |
@@ -1063,7 +1062,7 @@ nfs4_blk_get_deviceinfo(struct nfs_server *server, const struct nfs_fh *fh, | |||
1063 | return ERR_PTR(-ENOMEM); | 1062 | return ERR_PTR(-ENOMEM); |
1064 | } | 1063 | } |
1065 | 1064 | ||
1066 | pages = kzalloc(max_pages * sizeof(struct page *), GFP_NOFS); | 1065 | pages = kcalloc(max_pages, sizeof(struct page *), GFP_NOFS); |
1067 | if (pages == NULL) { | 1066 | if (pages == NULL) { |
1068 | kfree(dev); | 1067 | kfree(dev); |
1069 | return ERR_PTR(-ENOMEM); | 1068 | return ERR_PTR(-ENOMEM); |
diff --git a/fs/nfs/callback.c b/fs/nfs/callback.c index 073b4cf67ed9..54de482143cc 100644 --- a/fs/nfs/callback.c +++ b/fs/nfs/callback.c | |||
@@ -428,6 +428,18 @@ check_gss_callback_principal(struct nfs_client *clp, struct svc_rqst *rqstp) | |||
428 | if (p == NULL) | 428 | if (p == NULL) |
429 | return 0; | 429 | return 0; |
430 | 430 | ||
431 | /* | ||
432 | * Did we get the acceptor from userland during the SETCLIENID | ||
433 | * negotiation? | ||
434 | */ | ||
435 | if (clp->cl_acceptor) | ||
436 | return !strcmp(p, clp->cl_acceptor); | ||
437 | |||
438 | /* | ||
439 | * Otherwise try to verify it using the cl_hostname. Note that this | ||
440 | * doesn't work if a non-canonical hostname was used in the devname. | ||
441 | */ | ||
442 | |||
431 | /* Expect a GSS_C_NT_HOSTBASED_NAME like "nfs@serverhostname" */ | 443 | /* Expect a GSS_C_NT_HOSTBASED_NAME like "nfs@serverhostname" */ |
432 | 444 | ||
433 | if (memcmp(p, "nfs@", 4) != 0) | 445 | if (memcmp(p, "nfs@", 4) != 0) |
diff --git a/fs/nfs/client.c b/fs/nfs/client.c index 180d1ec9c32e..1c5ff6d58385 100644 --- a/fs/nfs/client.c +++ b/fs/nfs/client.c | |||
@@ -110,8 +110,8 @@ struct nfs_subversion *get_nfs_version(unsigned int version) | |||
110 | mutex_unlock(&nfs_version_mutex); | 110 | mutex_unlock(&nfs_version_mutex); |
111 | } | 111 | } |
112 | 112 | ||
113 | if (!IS_ERR(nfs)) | 113 | if (!IS_ERR(nfs) && !try_module_get(nfs->owner)) |
114 | try_module_get(nfs->owner); | 114 | return ERR_PTR(-EAGAIN); |
115 | return nfs; | 115 | return nfs; |
116 | } | 116 | } |
117 | 117 | ||
@@ -158,7 +158,8 @@ struct nfs_client *nfs_alloc_client(const struct nfs_client_initdata *cl_init) | |||
158 | goto error_0; | 158 | goto error_0; |
159 | 159 | ||
160 | clp->cl_nfs_mod = cl_init->nfs_mod; | 160 | clp->cl_nfs_mod = cl_init->nfs_mod; |
161 | try_module_get(clp->cl_nfs_mod->owner); | 161 | if (!try_module_get(clp->cl_nfs_mod->owner)) |
162 | goto error_dealloc; | ||
162 | 163 | ||
163 | clp->rpc_ops = clp->cl_nfs_mod->rpc_ops; | 164 | clp->rpc_ops = clp->cl_nfs_mod->rpc_ops; |
164 | 165 | ||
@@ -190,6 +191,7 @@ struct nfs_client *nfs_alloc_client(const struct nfs_client_initdata *cl_init) | |||
190 | 191 | ||
191 | error_cleanup: | 192 | error_cleanup: |
192 | put_nfs_version(clp->cl_nfs_mod); | 193 | put_nfs_version(clp->cl_nfs_mod); |
194 | error_dealloc: | ||
193 | kfree(clp); | 195 | kfree(clp); |
194 | error_0: | 196 | error_0: |
195 | return ERR_PTR(err); | 197 | return ERR_PTR(err); |
@@ -252,6 +254,7 @@ void nfs_free_client(struct nfs_client *clp) | |||
252 | put_net(clp->cl_net); | 254 | put_net(clp->cl_net); |
253 | put_nfs_version(clp->cl_nfs_mod); | 255 | put_nfs_version(clp->cl_nfs_mod); |
254 | kfree(clp->cl_hostname); | 256 | kfree(clp->cl_hostname); |
257 | kfree(clp->cl_acceptor); | ||
255 | kfree(clp); | 258 | kfree(clp); |
256 | 259 | ||
257 | dprintk("<-- nfs_free_client()\n"); | 260 | dprintk("<-- nfs_free_client()\n"); |
@@ -482,8 +485,13 @@ nfs_get_client(const struct nfs_client_initdata *cl_init, | |||
482 | struct nfs_net *nn = net_generic(cl_init->net, nfs_net_id); | 485 | struct nfs_net *nn = net_generic(cl_init->net, nfs_net_id); |
483 | const struct nfs_rpc_ops *rpc_ops = cl_init->nfs_mod->rpc_ops; | 486 | const struct nfs_rpc_ops *rpc_ops = cl_init->nfs_mod->rpc_ops; |
484 | 487 | ||
488 | if (cl_init->hostname == NULL) { | ||
489 | WARN_ON(1); | ||
490 | return NULL; | ||
491 | } | ||
492 | |||
485 | dprintk("--> nfs_get_client(%s,v%u)\n", | 493 | dprintk("--> nfs_get_client(%s,v%u)\n", |
486 | cl_init->hostname ?: "", rpc_ops->version); | 494 | cl_init->hostname, rpc_ops->version); |
487 | 495 | ||
488 | /* see if the client already exists */ | 496 | /* see if the client already exists */ |
489 | do { | 497 | do { |
@@ -510,7 +518,7 @@ nfs_get_client(const struct nfs_client_initdata *cl_init, | |||
510 | } while (!IS_ERR(new)); | 518 | } while (!IS_ERR(new)); |
511 | 519 | ||
512 | dprintk("<-- nfs_get_client() Failed to find %s (%ld)\n", | 520 | dprintk("<-- nfs_get_client() Failed to find %s (%ld)\n", |
513 | cl_init->hostname ?: "", PTR_ERR(new)); | 521 | cl_init->hostname, PTR_ERR(new)); |
514 | return new; | 522 | return new; |
515 | } | 523 | } |
516 | EXPORT_SYMBOL_GPL(nfs_get_client); | 524 | EXPORT_SYMBOL_GPL(nfs_get_client); |
diff --git a/fs/nfs/delegation.c b/fs/nfs/delegation.c index 5d8ccecf5f5c..5853f53db732 100644 --- a/fs/nfs/delegation.c +++ b/fs/nfs/delegation.c | |||
@@ -41,14 +41,8 @@ void nfs_mark_delegation_referenced(struct nfs_delegation *delegation) | |||
41 | set_bit(NFS_DELEGATION_REFERENCED, &delegation->flags); | 41 | set_bit(NFS_DELEGATION_REFERENCED, &delegation->flags); |
42 | } | 42 | } |
43 | 43 | ||
44 | /** | 44 | static int |
45 | * nfs_have_delegation - check if inode has a delegation | 45 | nfs4_do_check_delegation(struct inode *inode, fmode_t flags, bool mark) |
46 | * @inode: inode to check | ||
47 | * @flags: delegation types to check for | ||
48 | * | ||
49 | * Returns one if inode has the indicated delegation, otherwise zero. | ||
50 | */ | ||
51 | int nfs4_have_delegation(struct inode *inode, fmode_t flags) | ||
52 | { | 46 | { |
53 | struct nfs_delegation *delegation; | 47 | struct nfs_delegation *delegation; |
54 | int ret = 0; | 48 | int ret = 0; |
@@ -58,12 +52,34 @@ int nfs4_have_delegation(struct inode *inode, fmode_t flags) | |||
58 | delegation = rcu_dereference(NFS_I(inode)->delegation); | 52 | delegation = rcu_dereference(NFS_I(inode)->delegation); |
59 | if (delegation != NULL && (delegation->type & flags) == flags && | 53 | if (delegation != NULL && (delegation->type & flags) == flags && |
60 | !test_bit(NFS_DELEGATION_RETURNING, &delegation->flags)) { | 54 | !test_bit(NFS_DELEGATION_RETURNING, &delegation->flags)) { |
61 | nfs_mark_delegation_referenced(delegation); | 55 | if (mark) |
56 | nfs_mark_delegation_referenced(delegation); | ||
62 | ret = 1; | 57 | ret = 1; |
63 | } | 58 | } |
64 | rcu_read_unlock(); | 59 | rcu_read_unlock(); |
65 | return ret; | 60 | return ret; |
66 | } | 61 | } |
62 | /** | ||
63 | * nfs_have_delegation - check if inode has a delegation, mark it | ||
64 | * NFS_DELEGATION_REFERENCED if there is one. | ||
65 | * @inode: inode to check | ||
66 | * @flags: delegation types to check for | ||
67 | * | ||
68 | * Returns one if inode has the indicated delegation, otherwise zero. | ||
69 | */ | ||
70 | int nfs4_have_delegation(struct inode *inode, fmode_t flags) | ||
71 | { | ||
72 | return nfs4_do_check_delegation(inode, flags, true); | ||
73 | } | ||
74 | |||
75 | /* | ||
76 | * nfs4_check_delegation - check if inode has a delegation, do not mark | ||
77 | * NFS_DELEGATION_REFERENCED if it has one. | ||
78 | */ | ||
79 | int nfs4_check_delegation(struct inode *inode, fmode_t flags) | ||
80 | { | ||
81 | return nfs4_do_check_delegation(inode, flags, false); | ||
82 | } | ||
67 | 83 | ||
68 | static int nfs_delegation_claim_locks(struct nfs_open_context *ctx, struct nfs4_state *state, const nfs4_stateid *stateid) | 84 | static int nfs_delegation_claim_locks(struct nfs_open_context *ctx, struct nfs4_state *state, const nfs4_stateid *stateid) |
69 | { | 85 | { |
diff --git a/fs/nfs/delegation.h b/fs/nfs/delegation.h index 9a79c7a99d6d..5c1cce39297f 100644 --- a/fs/nfs/delegation.h +++ b/fs/nfs/delegation.h | |||
@@ -59,6 +59,7 @@ bool nfs4_copy_delegation_stateid(nfs4_stateid *dst, struct inode *inode, fmode_ | |||
59 | 59 | ||
60 | void nfs_mark_delegation_referenced(struct nfs_delegation *delegation); | 60 | void nfs_mark_delegation_referenced(struct nfs_delegation *delegation); |
61 | int nfs4_have_delegation(struct inode *inode, fmode_t flags); | 61 | int nfs4_have_delegation(struct inode *inode, fmode_t flags); |
62 | int nfs4_check_delegation(struct inode *inode, fmode_t flags); | ||
62 | 63 | ||
63 | #endif | 64 | #endif |
64 | 65 | ||
diff --git a/fs/nfs/dir.c b/fs/nfs/dir.c index 4a3d4ef76127..36d921f0c602 100644 --- a/fs/nfs/dir.c +++ b/fs/nfs/dir.c | |||
@@ -988,9 +988,13 @@ EXPORT_SYMBOL_GPL(nfs_force_lookup_revalidate); | |||
988 | * A check for whether or not the parent directory has changed. | 988 | * A check for whether or not the parent directory has changed. |
989 | * In the case it has, we assume that the dentries are untrustworthy | 989 | * In the case it has, we assume that the dentries are untrustworthy |
990 | * and may need to be looked up again. | 990 | * and may need to be looked up again. |
991 | * If rcu_walk prevents us from performing a full check, return 0. | ||
991 | */ | 992 | */ |
992 | static int nfs_check_verifier(struct inode *dir, struct dentry *dentry) | 993 | static int nfs_check_verifier(struct inode *dir, struct dentry *dentry, |
994 | int rcu_walk) | ||
993 | { | 995 | { |
996 | int ret; | ||
997 | |||
994 | if (IS_ROOT(dentry)) | 998 | if (IS_ROOT(dentry)) |
995 | return 1; | 999 | return 1; |
996 | if (NFS_SERVER(dir)->flags & NFS_MOUNT_LOOKUP_CACHE_NONE) | 1000 | if (NFS_SERVER(dir)->flags & NFS_MOUNT_LOOKUP_CACHE_NONE) |
@@ -998,7 +1002,11 @@ static int nfs_check_verifier(struct inode *dir, struct dentry *dentry) | |||
998 | if (!nfs_verify_change_attribute(dir, dentry->d_time)) | 1002 | if (!nfs_verify_change_attribute(dir, dentry->d_time)) |
999 | return 0; | 1003 | return 0; |
1000 | /* Revalidate nfsi->cache_change_attribute before we declare a match */ | 1004 | /* Revalidate nfsi->cache_change_attribute before we declare a match */ |
1001 | if (nfs_revalidate_inode(NFS_SERVER(dir), dir) < 0) | 1005 | if (rcu_walk) |
1006 | ret = nfs_revalidate_inode_rcu(NFS_SERVER(dir), dir); | ||
1007 | else | ||
1008 | ret = nfs_revalidate_inode(NFS_SERVER(dir), dir); | ||
1009 | if (ret < 0) | ||
1002 | return 0; | 1010 | return 0; |
1003 | if (!nfs_verify_change_attribute(dir, dentry->d_time)) | 1011 | if (!nfs_verify_change_attribute(dir, dentry->d_time)) |
1004 | return 0; | 1012 | return 0; |
@@ -1042,6 +1050,8 @@ int nfs_lookup_verify_inode(struct inode *inode, unsigned int flags) | |||
1042 | out: | 1050 | out: |
1043 | return (inode->i_nlink == 0) ? -ENOENT : 0; | 1051 | return (inode->i_nlink == 0) ? -ENOENT : 0; |
1044 | out_force: | 1052 | out_force: |
1053 | if (flags & LOOKUP_RCU) | ||
1054 | return -ECHILD; | ||
1045 | ret = __nfs_revalidate_inode(server, inode); | 1055 | ret = __nfs_revalidate_inode(server, inode); |
1046 | if (ret != 0) | 1056 | if (ret != 0) |
1047 | return ret; | 1057 | return ret; |
@@ -1054,6 +1064,9 @@ out_force: | |||
1054 | * | 1064 | * |
1055 | * If parent mtime has changed, we revalidate, else we wait for a | 1065 | * If parent mtime has changed, we revalidate, else we wait for a |
1056 | * period corresponding to the parent's attribute cache timeout value. | 1066 | * period corresponding to the parent's attribute cache timeout value. |
1067 | * | ||
1068 | * If LOOKUP_RCU prevents us from performing a full check, return 1 | ||
1069 | * suggesting a reval is needed. | ||
1057 | */ | 1070 | */ |
1058 | static inline | 1071 | static inline |
1059 | int nfs_neg_need_reval(struct inode *dir, struct dentry *dentry, | 1072 | int nfs_neg_need_reval(struct inode *dir, struct dentry *dentry, |
@@ -1064,7 +1077,7 @@ int nfs_neg_need_reval(struct inode *dir, struct dentry *dentry, | |||
1064 | return 0; | 1077 | return 0; |
1065 | if (NFS_SERVER(dir)->flags & NFS_MOUNT_LOOKUP_CACHE_NONEG) | 1078 | if (NFS_SERVER(dir)->flags & NFS_MOUNT_LOOKUP_CACHE_NONEG) |
1066 | return 1; | 1079 | return 1; |
1067 | return !nfs_check_verifier(dir, dentry); | 1080 | return !nfs_check_verifier(dir, dentry, flags & LOOKUP_RCU); |
1068 | } | 1081 | } |
1069 | 1082 | ||
1070 | /* | 1083 | /* |
@@ -1088,21 +1101,30 @@ static int nfs_lookup_revalidate(struct dentry *dentry, unsigned int flags) | |||
1088 | struct nfs4_label *label = NULL; | 1101 | struct nfs4_label *label = NULL; |
1089 | int error; | 1102 | int error; |
1090 | 1103 | ||
1091 | if (flags & LOOKUP_RCU) | 1104 | if (flags & LOOKUP_RCU) { |
1092 | return -ECHILD; | 1105 | parent = ACCESS_ONCE(dentry->d_parent); |
1093 | 1106 | dir = ACCESS_ONCE(parent->d_inode); | |
1094 | parent = dget_parent(dentry); | 1107 | if (!dir) |
1095 | dir = parent->d_inode; | 1108 | return -ECHILD; |
1109 | } else { | ||
1110 | parent = dget_parent(dentry); | ||
1111 | dir = parent->d_inode; | ||
1112 | } | ||
1096 | nfs_inc_stats(dir, NFSIOS_DENTRYREVALIDATE); | 1113 | nfs_inc_stats(dir, NFSIOS_DENTRYREVALIDATE); |
1097 | inode = dentry->d_inode; | 1114 | inode = dentry->d_inode; |
1098 | 1115 | ||
1099 | if (!inode) { | 1116 | if (!inode) { |
1100 | if (nfs_neg_need_reval(dir, dentry, flags)) | 1117 | if (nfs_neg_need_reval(dir, dentry, flags)) { |
1118 | if (flags & LOOKUP_RCU) | ||
1119 | return -ECHILD; | ||
1101 | goto out_bad; | 1120 | goto out_bad; |
1121 | } | ||
1102 | goto out_valid_noent; | 1122 | goto out_valid_noent; |
1103 | } | 1123 | } |
1104 | 1124 | ||
1105 | if (is_bad_inode(inode)) { | 1125 | if (is_bad_inode(inode)) { |
1126 | if (flags & LOOKUP_RCU) | ||
1127 | return -ECHILD; | ||
1106 | dfprintk(LOOKUPCACHE, "%s: %pd2 has dud inode\n", | 1128 | dfprintk(LOOKUPCACHE, "%s: %pd2 has dud inode\n", |
1107 | __func__, dentry); | 1129 | __func__, dentry); |
1108 | goto out_bad; | 1130 | goto out_bad; |
@@ -1112,12 +1134,20 @@ static int nfs_lookup_revalidate(struct dentry *dentry, unsigned int flags) | |||
1112 | goto out_set_verifier; | 1134 | goto out_set_verifier; |
1113 | 1135 | ||
1114 | /* Force a full look up iff the parent directory has changed */ | 1136 | /* Force a full look up iff the parent directory has changed */ |
1115 | if (!nfs_is_exclusive_create(dir, flags) && nfs_check_verifier(dir, dentry)) { | 1137 | if (!nfs_is_exclusive_create(dir, flags) && |
1116 | if (nfs_lookup_verify_inode(inode, flags)) | 1138 | nfs_check_verifier(dir, dentry, flags & LOOKUP_RCU)) { |
1139 | |||
1140 | if (nfs_lookup_verify_inode(inode, flags)) { | ||
1141 | if (flags & LOOKUP_RCU) | ||
1142 | return -ECHILD; | ||
1117 | goto out_zap_parent; | 1143 | goto out_zap_parent; |
1144 | } | ||
1118 | goto out_valid; | 1145 | goto out_valid; |
1119 | } | 1146 | } |
1120 | 1147 | ||
1148 | if (flags & LOOKUP_RCU) | ||
1149 | return -ECHILD; | ||
1150 | |||
1121 | if (NFS_STALE(inode)) | 1151 | if (NFS_STALE(inode)) |
1122 | goto out_bad; | 1152 | goto out_bad; |
1123 | 1153 | ||
@@ -1153,13 +1183,18 @@ out_set_verifier: | |||
1153 | /* Success: notify readdir to use READDIRPLUS */ | 1183 | /* Success: notify readdir to use READDIRPLUS */ |
1154 | nfs_advise_use_readdirplus(dir); | 1184 | nfs_advise_use_readdirplus(dir); |
1155 | out_valid_noent: | 1185 | out_valid_noent: |
1156 | dput(parent); | 1186 | if (flags & LOOKUP_RCU) { |
1187 | if (parent != ACCESS_ONCE(dentry->d_parent)) | ||
1188 | return -ECHILD; | ||
1189 | } else | ||
1190 | dput(parent); | ||
1157 | dfprintk(LOOKUPCACHE, "NFS: %s(%pd2) is valid\n", | 1191 | dfprintk(LOOKUPCACHE, "NFS: %s(%pd2) is valid\n", |
1158 | __func__, dentry); | 1192 | __func__, dentry); |
1159 | return 1; | 1193 | return 1; |
1160 | out_zap_parent: | 1194 | out_zap_parent: |
1161 | nfs_zap_caches(dir); | 1195 | nfs_zap_caches(dir); |
1162 | out_bad: | 1196 | out_bad: |
1197 | WARN_ON(flags & LOOKUP_RCU); | ||
1163 | nfs_free_fattr(fattr); | 1198 | nfs_free_fattr(fattr); |
1164 | nfs_free_fhandle(fhandle); | 1199 | nfs_free_fhandle(fhandle); |
1165 | nfs4_label_free(label); | 1200 | nfs4_label_free(label); |
@@ -1185,6 +1220,7 @@ out_zap_parent: | |||
1185 | __func__, dentry); | 1220 | __func__, dentry); |
1186 | return 0; | 1221 | return 0; |
1187 | out_error: | 1222 | out_error: |
1223 | WARN_ON(flags & LOOKUP_RCU); | ||
1188 | nfs_free_fattr(fattr); | 1224 | nfs_free_fattr(fattr); |
1189 | nfs_free_fhandle(fhandle); | 1225 | nfs_free_fhandle(fhandle); |
1190 | nfs4_label_free(label); | 1226 | nfs4_label_free(label); |
@@ -1529,14 +1565,9 @@ EXPORT_SYMBOL_GPL(nfs_atomic_open); | |||
1529 | 1565 | ||
1530 | static int nfs4_lookup_revalidate(struct dentry *dentry, unsigned int flags) | 1566 | static int nfs4_lookup_revalidate(struct dentry *dentry, unsigned int flags) |
1531 | { | 1567 | { |
1532 | struct dentry *parent = NULL; | ||
1533 | struct inode *inode; | 1568 | struct inode *inode; |
1534 | struct inode *dir; | ||
1535 | int ret = 0; | 1569 | int ret = 0; |
1536 | 1570 | ||
1537 | if (flags & LOOKUP_RCU) | ||
1538 | return -ECHILD; | ||
1539 | |||
1540 | if (!(flags & LOOKUP_OPEN) || (flags & LOOKUP_DIRECTORY)) | 1571 | if (!(flags & LOOKUP_OPEN) || (flags & LOOKUP_DIRECTORY)) |
1541 | goto no_open; | 1572 | goto no_open; |
1542 | if (d_mountpoint(dentry)) | 1573 | if (d_mountpoint(dentry)) |
@@ -1545,34 +1576,47 @@ static int nfs4_lookup_revalidate(struct dentry *dentry, unsigned int flags) | |||
1545 | goto no_open; | 1576 | goto no_open; |
1546 | 1577 | ||
1547 | inode = dentry->d_inode; | 1578 | inode = dentry->d_inode; |
1548 | parent = dget_parent(dentry); | ||
1549 | dir = parent->d_inode; | ||
1550 | 1579 | ||
1551 | /* We can't create new files in nfs_open_revalidate(), so we | 1580 | /* We can't create new files in nfs_open_revalidate(), so we |
1552 | * optimize away revalidation of negative dentries. | 1581 | * optimize away revalidation of negative dentries. |
1553 | */ | 1582 | */ |
1554 | if (inode == NULL) { | 1583 | if (inode == NULL) { |
1584 | struct dentry *parent; | ||
1585 | struct inode *dir; | ||
1586 | |||
1587 | if (flags & LOOKUP_RCU) { | ||
1588 | parent = ACCESS_ONCE(dentry->d_parent); | ||
1589 | dir = ACCESS_ONCE(parent->d_inode); | ||
1590 | if (!dir) | ||
1591 | return -ECHILD; | ||
1592 | } else { | ||
1593 | parent = dget_parent(dentry); | ||
1594 | dir = parent->d_inode; | ||
1595 | } | ||
1555 | if (!nfs_neg_need_reval(dir, dentry, flags)) | 1596 | if (!nfs_neg_need_reval(dir, dentry, flags)) |
1556 | ret = 1; | 1597 | ret = 1; |
1598 | else if (flags & LOOKUP_RCU) | ||
1599 | ret = -ECHILD; | ||
1600 | if (!(flags & LOOKUP_RCU)) | ||
1601 | dput(parent); | ||
1602 | else if (parent != ACCESS_ONCE(dentry->d_parent)) | ||
1603 | return -ECHILD; | ||
1557 | goto out; | 1604 | goto out; |
1558 | } | 1605 | } |
1559 | 1606 | ||
1560 | /* NFS only supports OPEN on regular files */ | 1607 | /* NFS only supports OPEN on regular files */ |
1561 | if (!S_ISREG(inode->i_mode)) | 1608 | if (!S_ISREG(inode->i_mode)) |
1562 | goto no_open_dput; | 1609 | goto no_open; |
1563 | /* We cannot do exclusive creation on a positive dentry */ | 1610 | /* We cannot do exclusive creation on a positive dentry */ |
1564 | if (flags & LOOKUP_EXCL) | 1611 | if (flags & LOOKUP_EXCL) |
1565 | goto no_open_dput; | 1612 | goto no_open; |
1566 | 1613 | ||
1567 | /* Let f_op->open() actually open (and revalidate) the file */ | 1614 | /* Let f_op->open() actually open (and revalidate) the file */ |
1568 | ret = 1; | 1615 | ret = 1; |
1569 | 1616 | ||
1570 | out: | 1617 | out: |
1571 | dput(parent); | ||
1572 | return ret; | 1618 | return ret; |
1573 | 1619 | ||
1574 | no_open_dput: | ||
1575 | dput(parent); | ||
1576 | no_open: | 1620 | no_open: |
1577 | return nfs_lookup_revalidate(dentry, flags); | 1621 | return nfs_lookup_revalidate(dentry, flags); |
1578 | } | 1622 | } |
@@ -2028,10 +2072,14 @@ static DEFINE_SPINLOCK(nfs_access_lru_lock); | |||
2028 | static LIST_HEAD(nfs_access_lru_list); | 2072 | static LIST_HEAD(nfs_access_lru_list); |
2029 | static atomic_long_t nfs_access_nr_entries; | 2073 | static atomic_long_t nfs_access_nr_entries; |
2030 | 2074 | ||
2075 | static unsigned long nfs_access_max_cachesize = ULONG_MAX; | ||
2076 | module_param(nfs_access_max_cachesize, ulong, 0644); | ||
2077 | MODULE_PARM_DESC(nfs_access_max_cachesize, "NFS access maximum total cache length"); | ||
2078 | |||
2031 | static void nfs_access_free_entry(struct nfs_access_entry *entry) | 2079 | static void nfs_access_free_entry(struct nfs_access_entry *entry) |
2032 | { | 2080 | { |
2033 | put_rpccred(entry->cred); | 2081 | put_rpccred(entry->cred); |
2034 | kfree(entry); | 2082 | kfree_rcu(entry, rcu_head); |
2035 | smp_mb__before_atomic(); | 2083 | smp_mb__before_atomic(); |
2036 | atomic_long_dec(&nfs_access_nr_entries); | 2084 | atomic_long_dec(&nfs_access_nr_entries); |
2037 | smp_mb__after_atomic(); | 2085 | smp_mb__after_atomic(); |
@@ -2048,19 +2096,14 @@ static void nfs_access_free_list(struct list_head *head) | |||
2048 | } | 2096 | } |
2049 | } | 2097 | } |
2050 | 2098 | ||
2051 | unsigned long | 2099 | static unsigned long |
2052 | nfs_access_cache_scan(struct shrinker *shrink, struct shrink_control *sc) | 2100 | nfs_do_access_cache_scan(unsigned int nr_to_scan) |
2053 | { | 2101 | { |
2054 | LIST_HEAD(head); | 2102 | LIST_HEAD(head); |
2055 | struct nfs_inode *nfsi, *next; | 2103 | struct nfs_inode *nfsi, *next; |
2056 | struct nfs_access_entry *cache; | 2104 | struct nfs_access_entry *cache; |
2057 | int nr_to_scan = sc->nr_to_scan; | ||
2058 | gfp_t gfp_mask = sc->gfp_mask; | ||
2059 | long freed = 0; | 2105 | long freed = 0; |
2060 | 2106 | ||
2061 | if ((gfp_mask & GFP_KERNEL) != GFP_KERNEL) | ||
2062 | return SHRINK_STOP; | ||
2063 | |||
2064 | spin_lock(&nfs_access_lru_lock); | 2107 | spin_lock(&nfs_access_lru_lock); |
2065 | list_for_each_entry_safe(nfsi, next, &nfs_access_lru_list, access_cache_inode_lru) { | 2108 | list_for_each_entry_safe(nfsi, next, &nfs_access_lru_list, access_cache_inode_lru) { |
2066 | struct inode *inode; | 2109 | struct inode *inode; |
@@ -2094,11 +2137,39 @@ remove_lru_entry: | |||
2094 | } | 2137 | } |
2095 | 2138 | ||
2096 | unsigned long | 2139 | unsigned long |
2140 | nfs_access_cache_scan(struct shrinker *shrink, struct shrink_control *sc) | ||
2141 | { | ||
2142 | int nr_to_scan = sc->nr_to_scan; | ||
2143 | gfp_t gfp_mask = sc->gfp_mask; | ||
2144 | |||
2145 | if ((gfp_mask & GFP_KERNEL) != GFP_KERNEL) | ||
2146 | return SHRINK_STOP; | ||
2147 | return nfs_do_access_cache_scan(nr_to_scan); | ||
2148 | } | ||
2149 | |||
2150 | |||
2151 | unsigned long | ||
2097 | nfs_access_cache_count(struct shrinker *shrink, struct shrink_control *sc) | 2152 | nfs_access_cache_count(struct shrinker *shrink, struct shrink_control *sc) |
2098 | { | 2153 | { |
2099 | return vfs_pressure_ratio(atomic_long_read(&nfs_access_nr_entries)); | 2154 | return vfs_pressure_ratio(atomic_long_read(&nfs_access_nr_entries)); |
2100 | } | 2155 | } |
2101 | 2156 | ||
2157 | static void | ||
2158 | nfs_access_cache_enforce_limit(void) | ||
2159 | { | ||
2160 | long nr_entries = atomic_long_read(&nfs_access_nr_entries); | ||
2161 | unsigned long diff; | ||
2162 | unsigned int nr_to_scan; | ||
2163 | |||
2164 | if (nr_entries < 0 || nr_entries <= nfs_access_max_cachesize) | ||
2165 | return; | ||
2166 | nr_to_scan = 100; | ||
2167 | diff = nr_entries - nfs_access_max_cachesize; | ||
2168 | if (diff < nr_to_scan) | ||
2169 | nr_to_scan = diff; | ||
2170 | nfs_do_access_cache_scan(nr_to_scan); | ||
2171 | } | ||
2172 | |||
2102 | static void __nfs_access_zap_cache(struct nfs_inode *nfsi, struct list_head *head) | 2173 | static void __nfs_access_zap_cache(struct nfs_inode *nfsi, struct list_head *head) |
2103 | { | 2174 | { |
2104 | struct rb_root *root_node = &nfsi->access_cache; | 2175 | struct rb_root *root_node = &nfsi->access_cache; |
@@ -2186,6 +2257,38 @@ out_zap: | |||
2186 | return -ENOENT; | 2257 | return -ENOENT; |
2187 | } | 2258 | } |
2188 | 2259 | ||
2260 | static int nfs_access_get_cached_rcu(struct inode *inode, struct rpc_cred *cred, struct nfs_access_entry *res) | ||
2261 | { | ||
2262 | /* Only check the most recently returned cache entry, | ||
2263 | * but do it without locking. | ||
2264 | */ | ||
2265 | struct nfs_inode *nfsi = NFS_I(inode); | ||
2266 | struct nfs_access_entry *cache; | ||
2267 | int err = -ECHILD; | ||
2268 | struct list_head *lh; | ||
2269 | |||
2270 | rcu_read_lock(); | ||
2271 | if (nfsi->cache_validity & NFS_INO_INVALID_ACCESS) | ||
2272 | goto out; | ||
2273 | lh = rcu_dereference(nfsi->access_cache_entry_lru.prev); | ||
2274 | cache = list_entry(lh, struct nfs_access_entry, lru); | ||
2275 | if (lh == &nfsi->access_cache_entry_lru || | ||
2276 | cred != cache->cred) | ||
2277 | cache = NULL; | ||
2278 | if (cache == NULL) | ||
2279 | goto out; | ||
2280 | if (!nfs_have_delegated_attributes(inode) && | ||
2281 | !time_in_range_open(jiffies, cache->jiffies, cache->jiffies + nfsi->attrtimeo)) | ||
2282 | goto out; | ||
2283 | res->jiffies = cache->jiffies; | ||
2284 | res->cred = cache->cred; | ||
2285 | res->mask = cache->mask; | ||
2286 | err = 0; | ||
2287 | out: | ||
2288 | rcu_read_unlock(); | ||
2289 | return err; | ||
2290 | } | ||
2291 | |||
2189 | static void nfs_access_add_rbtree(struct inode *inode, struct nfs_access_entry *set) | 2292 | static void nfs_access_add_rbtree(struct inode *inode, struct nfs_access_entry *set) |
2190 | { | 2293 | { |
2191 | struct nfs_inode *nfsi = NFS_I(inode); | 2294 | struct nfs_inode *nfsi = NFS_I(inode); |
@@ -2229,6 +2332,11 @@ void nfs_access_add_cache(struct inode *inode, struct nfs_access_entry *set) | |||
2229 | cache->cred = get_rpccred(set->cred); | 2332 | cache->cred = get_rpccred(set->cred); |
2230 | cache->mask = set->mask; | 2333 | cache->mask = set->mask; |
2231 | 2334 | ||
2335 | /* The above field assignments must be visible | ||
2336 | * before this item appears on the lru. We cannot easily | ||
2337 | * use rcu_assign_pointer, so just force the memory barrier. | ||
2338 | */ | ||
2339 | smp_wmb(); | ||
2232 | nfs_access_add_rbtree(inode, cache); | 2340 | nfs_access_add_rbtree(inode, cache); |
2233 | 2341 | ||
2234 | /* Update accounting */ | 2342 | /* Update accounting */ |
@@ -2244,6 +2352,7 @@ void nfs_access_add_cache(struct inode *inode, struct nfs_access_entry *set) | |||
2244 | &nfs_access_lru_list); | 2352 | &nfs_access_lru_list); |
2245 | spin_unlock(&nfs_access_lru_lock); | 2353 | spin_unlock(&nfs_access_lru_lock); |
2246 | } | 2354 | } |
2355 | nfs_access_cache_enforce_limit(); | ||
2247 | } | 2356 | } |
2248 | EXPORT_SYMBOL_GPL(nfs_access_add_cache); | 2357 | EXPORT_SYMBOL_GPL(nfs_access_add_cache); |
2249 | 2358 | ||
@@ -2267,10 +2376,16 @@ static int nfs_do_access(struct inode *inode, struct rpc_cred *cred, int mask) | |||
2267 | 2376 | ||
2268 | trace_nfs_access_enter(inode); | 2377 | trace_nfs_access_enter(inode); |
2269 | 2378 | ||
2270 | status = nfs_access_get_cached(inode, cred, &cache); | 2379 | status = nfs_access_get_cached_rcu(inode, cred, &cache); |
2380 | if (status != 0) | ||
2381 | status = nfs_access_get_cached(inode, cred, &cache); | ||
2271 | if (status == 0) | 2382 | if (status == 0) |
2272 | goto out_cached; | 2383 | goto out_cached; |
2273 | 2384 | ||
2385 | status = -ECHILD; | ||
2386 | if (mask & MAY_NOT_BLOCK) | ||
2387 | goto out; | ||
2388 | |||
2274 | /* Be clever: ask server to check for all possible rights */ | 2389 | /* Be clever: ask server to check for all possible rights */ |
2275 | cache.mask = MAY_EXEC | MAY_WRITE | MAY_READ; | 2390 | cache.mask = MAY_EXEC | MAY_WRITE | MAY_READ; |
2276 | cache.cred = cred; | 2391 | cache.cred = cred; |
@@ -2321,9 +2436,6 @@ int nfs_permission(struct inode *inode, int mask) | |||
2321 | struct rpc_cred *cred; | 2436 | struct rpc_cred *cred; |
2322 | int res = 0; | 2437 | int res = 0; |
2323 | 2438 | ||
2324 | if (mask & MAY_NOT_BLOCK) | ||
2325 | return -ECHILD; | ||
2326 | |||
2327 | nfs_inc_stats(inode, NFSIOS_VFSACCESS); | 2439 | nfs_inc_stats(inode, NFSIOS_VFSACCESS); |
2328 | 2440 | ||
2329 | if ((mask & (MAY_READ | MAY_WRITE | MAY_EXEC)) == 0) | 2441 | if ((mask & (MAY_READ | MAY_WRITE | MAY_EXEC)) == 0) |
@@ -2350,12 +2462,23 @@ force_lookup: | |||
2350 | if (!NFS_PROTO(inode)->access) | 2462 | if (!NFS_PROTO(inode)->access) |
2351 | goto out_notsup; | 2463 | goto out_notsup; |
2352 | 2464 | ||
2353 | cred = rpc_lookup_cred(); | 2465 | /* Always try fast lookups first */ |
2354 | if (!IS_ERR(cred)) { | 2466 | rcu_read_lock(); |
2355 | res = nfs_do_access(inode, cred, mask); | 2467 | cred = rpc_lookup_cred_nonblock(); |
2356 | put_rpccred(cred); | 2468 | if (!IS_ERR(cred)) |
2357 | } else | 2469 | res = nfs_do_access(inode, cred, mask|MAY_NOT_BLOCK); |
2470 | else | ||
2358 | res = PTR_ERR(cred); | 2471 | res = PTR_ERR(cred); |
2472 | rcu_read_unlock(); | ||
2473 | if (res == -ECHILD && !(mask & MAY_NOT_BLOCK)) { | ||
2474 | /* Fast lookup failed, try the slow way */ | ||
2475 | cred = rpc_lookup_cred(); | ||
2476 | if (!IS_ERR(cred)) { | ||
2477 | res = nfs_do_access(inode, cred, mask); | ||
2478 | put_rpccred(cred); | ||
2479 | } else | ||
2480 | res = PTR_ERR(cred); | ||
2481 | } | ||
2359 | out: | 2482 | out: |
2360 | if (!res && (mask & MAY_EXEC) && !execute_ok(inode)) | 2483 | if (!res && (mask & MAY_EXEC) && !execute_ok(inode)) |
2361 | res = -EACCES; | 2484 | res = -EACCES; |
@@ -2364,6 +2487,9 @@ out: | |||
2364 | inode->i_sb->s_id, inode->i_ino, mask, res); | 2487 | inode->i_sb->s_id, inode->i_ino, mask, res); |
2365 | return res; | 2488 | return res; |
2366 | out_notsup: | 2489 | out_notsup: |
2490 | if (mask & MAY_NOT_BLOCK) | ||
2491 | return -ECHILD; | ||
2492 | |||
2367 | res = nfs_revalidate_inode(NFS_SERVER(inode), inode); | 2493 | res = nfs_revalidate_inode(NFS_SERVER(inode), inode); |
2368 | if (res == 0) | 2494 | if (res == 0) |
2369 | res = generic_permission(inode, mask); | 2495 | res = generic_permission(inode, mask); |
diff --git a/fs/nfs/direct.c b/fs/nfs/direct.c index f11b9eed0de1..65ef6e00deee 100644 --- a/fs/nfs/direct.c +++ b/fs/nfs/direct.c | |||
@@ -148,8 +148,8 @@ static void nfs_direct_set_hdr_verf(struct nfs_direct_req *dreq, | |||
148 | { | 148 | { |
149 | struct nfs_writeverf *verfp; | 149 | struct nfs_writeverf *verfp; |
150 | 150 | ||
151 | verfp = nfs_direct_select_verf(dreq, hdr->data->ds_clp, | 151 | verfp = nfs_direct_select_verf(dreq, hdr->ds_clp, |
152 | hdr->data->ds_idx); | 152 | hdr->ds_idx); |
153 | WARN_ON_ONCE(verfp->committed >= 0); | 153 | WARN_ON_ONCE(verfp->committed >= 0); |
154 | memcpy(verfp, &hdr->verf, sizeof(struct nfs_writeverf)); | 154 | memcpy(verfp, &hdr->verf, sizeof(struct nfs_writeverf)); |
155 | WARN_ON_ONCE(verfp->committed < 0); | 155 | WARN_ON_ONCE(verfp->committed < 0); |
@@ -169,8 +169,8 @@ static int nfs_direct_set_or_cmp_hdr_verf(struct nfs_direct_req *dreq, | |||
169 | { | 169 | { |
170 | struct nfs_writeverf *verfp; | 170 | struct nfs_writeverf *verfp; |
171 | 171 | ||
172 | verfp = nfs_direct_select_verf(dreq, hdr->data->ds_clp, | 172 | verfp = nfs_direct_select_verf(dreq, hdr->ds_clp, |
173 | hdr->data->ds_idx); | 173 | hdr->ds_idx); |
174 | if (verfp->committed < 0) { | 174 | if (verfp->committed < 0) { |
175 | nfs_direct_set_hdr_verf(dreq, hdr); | 175 | nfs_direct_set_hdr_verf(dreq, hdr); |
176 | return 0; | 176 | return 0; |
@@ -715,7 +715,7 @@ static void nfs_direct_write_completion(struct nfs_pgio_header *hdr) | |||
715 | { | 715 | { |
716 | struct nfs_direct_req *dreq = hdr->dreq; | 716 | struct nfs_direct_req *dreq = hdr->dreq; |
717 | struct nfs_commit_info cinfo; | 717 | struct nfs_commit_info cinfo; |
718 | int bit = -1; | 718 | bool request_commit = false; |
719 | struct nfs_page *req = nfs_list_entry(hdr->pages.next); | 719 | struct nfs_page *req = nfs_list_entry(hdr->pages.next); |
720 | 720 | ||
721 | if (test_bit(NFS_IOHDR_REDO, &hdr->flags)) | 721 | if (test_bit(NFS_IOHDR_REDO, &hdr->flags)) |
@@ -729,27 +729,20 @@ static void nfs_direct_write_completion(struct nfs_pgio_header *hdr) | |||
729 | dreq->flags = 0; | 729 | dreq->flags = 0; |
730 | dreq->error = hdr->error; | 730 | dreq->error = hdr->error; |
731 | } | 731 | } |
732 | if (dreq->error != 0) | 732 | if (dreq->error == 0) { |
733 | bit = NFS_IOHDR_ERROR; | ||
734 | else { | ||
735 | dreq->count += hdr->good_bytes; | 733 | dreq->count += hdr->good_bytes; |
736 | if (test_bit(NFS_IOHDR_NEED_RESCHED, &hdr->flags)) { | 734 | if (nfs_write_need_commit(hdr)) { |
737 | dreq->flags = NFS_ODIRECT_RESCHED_WRITES; | ||
738 | bit = NFS_IOHDR_NEED_RESCHED; | ||
739 | } else if (test_bit(NFS_IOHDR_NEED_COMMIT, &hdr->flags)) { | ||
740 | if (dreq->flags == NFS_ODIRECT_RESCHED_WRITES) | 735 | if (dreq->flags == NFS_ODIRECT_RESCHED_WRITES) |
741 | bit = NFS_IOHDR_NEED_RESCHED; | 736 | request_commit = true; |
742 | else if (dreq->flags == 0) { | 737 | else if (dreq->flags == 0) { |
743 | nfs_direct_set_hdr_verf(dreq, hdr); | 738 | nfs_direct_set_hdr_verf(dreq, hdr); |
744 | bit = NFS_IOHDR_NEED_COMMIT; | 739 | request_commit = true; |
745 | dreq->flags = NFS_ODIRECT_DO_COMMIT; | 740 | dreq->flags = NFS_ODIRECT_DO_COMMIT; |
746 | } else if (dreq->flags == NFS_ODIRECT_DO_COMMIT) { | 741 | } else if (dreq->flags == NFS_ODIRECT_DO_COMMIT) { |
747 | if (nfs_direct_set_or_cmp_hdr_verf(dreq, hdr)) { | 742 | request_commit = true; |
743 | if (nfs_direct_set_or_cmp_hdr_verf(dreq, hdr)) | ||
748 | dreq->flags = | 744 | dreq->flags = |
749 | NFS_ODIRECT_RESCHED_WRITES; | 745 | NFS_ODIRECT_RESCHED_WRITES; |
750 | bit = NFS_IOHDR_NEED_RESCHED; | ||
751 | } else | ||
752 | bit = NFS_IOHDR_NEED_COMMIT; | ||
753 | } | 746 | } |
754 | } | 747 | } |
755 | } | 748 | } |
@@ -759,9 +752,7 @@ static void nfs_direct_write_completion(struct nfs_pgio_header *hdr) | |||
759 | 752 | ||
760 | req = nfs_list_entry(hdr->pages.next); | 753 | req = nfs_list_entry(hdr->pages.next); |
761 | nfs_list_remove_request(req); | 754 | nfs_list_remove_request(req); |
762 | switch (bit) { | 755 | if (request_commit) { |
763 | case NFS_IOHDR_NEED_RESCHED: | ||
764 | case NFS_IOHDR_NEED_COMMIT: | ||
765 | kref_get(&req->wb_kref); | 756 | kref_get(&req->wb_kref); |
766 | nfs_mark_request_commit(req, hdr->lseg, &cinfo); | 757 | nfs_mark_request_commit(req, hdr->lseg, &cinfo); |
767 | } | 758 | } |
diff --git a/fs/nfs/filelayout/filelayout.c b/fs/nfs/filelayout/filelayout.c index d2eba1c13b7e..1359c4a27393 100644 --- a/fs/nfs/filelayout/filelayout.c +++ b/fs/nfs/filelayout/filelayout.c | |||
@@ -84,45 +84,37 @@ filelayout_get_dserver_offset(struct pnfs_layout_segment *lseg, loff_t offset) | |||
84 | BUG(); | 84 | BUG(); |
85 | } | 85 | } |
86 | 86 | ||
87 | static void filelayout_reset_write(struct nfs_pgio_data *data) | 87 | static void filelayout_reset_write(struct nfs_pgio_header *hdr) |
88 | { | 88 | { |
89 | struct nfs_pgio_header *hdr = data->header; | 89 | struct rpc_task *task = &hdr->task; |
90 | struct rpc_task *task = &data->task; | ||
91 | 90 | ||
92 | if (!test_and_set_bit(NFS_IOHDR_REDO, &hdr->flags)) { | 91 | if (!test_and_set_bit(NFS_IOHDR_REDO, &hdr->flags)) { |
93 | dprintk("%s Reset task %5u for i/o through MDS " | 92 | dprintk("%s Reset task %5u for i/o through MDS " |
94 | "(req %s/%llu, %u bytes @ offset %llu)\n", __func__, | 93 | "(req %s/%llu, %u bytes @ offset %llu)\n", __func__, |
95 | data->task.tk_pid, | 94 | hdr->task.tk_pid, |
96 | hdr->inode->i_sb->s_id, | 95 | hdr->inode->i_sb->s_id, |
97 | (unsigned long long)NFS_FILEID(hdr->inode), | 96 | (unsigned long long)NFS_FILEID(hdr->inode), |
98 | data->args.count, | 97 | hdr->args.count, |
99 | (unsigned long long)data->args.offset); | 98 | (unsigned long long)hdr->args.offset); |
100 | 99 | ||
101 | task->tk_status = pnfs_write_done_resend_to_mds(hdr->inode, | 100 | task->tk_status = pnfs_write_done_resend_to_mds(hdr); |
102 | &hdr->pages, | ||
103 | hdr->completion_ops, | ||
104 | hdr->dreq); | ||
105 | } | 101 | } |
106 | } | 102 | } |
107 | 103 | ||
108 | static void filelayout_reset_read(struct nfs_pgio_data *data) | 104 | static void filelayout_reset_read(struct nfs_pgio_header *hdr) |
109 | { | 105 | { |
110 | struct nfs_pgio_header *hdr = data->header; | 106 | struct rpc_task *task = &hdr->task; |
111 | struct rpc_task *task = &data->task; | ||
112 | 107 | ||
113 | if (!test_and_set_bit(NFS_IOHDR_REDO, &hdr->flags)) { | 108 | if (!test_and_set_bit(NFS_IOHDR_REDO, &hdr->flags)) { |
114 | dprintk("%s Reset task %5u for i/o through MDS " | 109 | dprintk("%s Reset task %5u for i/o through MDS " |
115 | "(req %s/%llu, %u bytes @ offset %llu)\n", __func__, | 110 | "(req %s/%llu, %u bytes @ offset %llu)\n", __func__, |
116 | data->task.tk_pid, | 111 | hdr->task.tk_pid, |
117 | hdr->inode->i_sb->s_id, | 112 | hdr->inode->i_sb->s_id, |
118 | (unsigned long long)NFS_FILEID(hdr->inode), | 113 | (unsigned long long)NFS_FILEID(hdr->inode), |
119 | data->args.count, | 114 | hdr->args.count, |
120 | (unsigned long long)data->args.offset); | 115 | (unsigned long long)hdr->args.offset); |
121 | 116 | ||
122 | task->tk_status = pnfs_read_done_resend_to_mds(hdr->inode, | 117 | task->tk_status = pnfs_read_done_resend_to_mds(hdr); |
123 | &hdr->pages, | ||
124 | hdr->completion_ops, | ||
125 | hdr->dreq); | ||
126 | } | 118 | } |
127 | } | 119 | } |
128 | 120 | ||
@@ -243,18 +235,17 @@ wait_on_recovery: | |||
243 | /* NFS_PROTO call done callback routines */ | 235 | /* NFS_PROTO call done callback routines */ |
244 | 236 | ||
245 | static int filelayout_read_done_cb(struct rpc_task *task, | 237 | static int filelayout_read_done_cb(struct rpc_task *task, |
246 | struct nfs_pgio_data *data) | 238 | struct nfs_pgio_header *hdr) |
247 | { | 239 | { |
248 | struct nfs_pgio_header *hdr = data->header; | ||
249 | int err; | 240 | int err; |
250 | 241 | ||
251 | trace_nfs4_pnfs_read(data, task->tk_status); | 242 | trace_nfs4_pnfs_read(hdr, task->tk_status); |
252 | err = filelayout_async_handle_error(task, data->args.context->state, | 243 | err = filelayout_async_handle_error(task, hdr->args.context->state, |
253 | data->ds_clp, hdr->lseg); | 244 | hdr->ds_clp, hdr->lseg); |
254 | 245 | ||
255 | switch (err) { | 246 | switch (err) { |
256 | case -NFS4ERR_RESET_TO_MDS: | 247 | case -NFS4ERR_RESET_TO_MDS: |
257 | filelayout_reset_read(data); | 248 | filelayout_reset_read(hdr); |
258 | return task->tk_status; | 249 | return task->tk_status; |
259 | case -EAGAIN: | 250 | case -EAGAIN: |
260 | rpc_restart_call_prepare(task); | 251 | rpc_restart_call_prepare(task); |
@@ -270,15 +261,14 @@ static int filelayout_read_done_cb(struct rpc_task *task, | |||
270 | * rfc5661 is not clear about which credential should be used. | 261 | * rfc5661 is not clear about which credential should be used. |
271 | */ | 262 | */ |
272 | static void | 263 | static void |
273 | filelayout_set_layoutcommit(struct nfs_pgio_data *wdata) | 264 | filelayout_set_layoutcommit(struct nfs_pgio_header *hdr) |
274 | { | 265 | { |
275 | struct nfs_pgio_header *hdr = wdata->header; | ||
276 | 266 | ||
277 | if (FILELAYOUT_LSEG(hdr->lseg)->commit_through_mds || | 267 | if (FILELAYOUT_LSEG(hdr->lseg)->commit_through_mds || |
278 | wdata->res.verf->committed == NFS_FILE_SYNC) | 268 | hdr->res.verf->committed == NFS_FILE_SYNC) |
279 | return; | 269 | return; |
280 | 270 | ||
281 | pnfs_set_layoutcommit(wdata); | 271 | pnfs_set_layoutcommit(hdr); |
282 | dprintk("%s inode %lu pls_end_pos %lu\n", __func__, hdr->inode->i_ino, | 272 | dprintk("%s inode %lu pls_end_pos %lu\n", __func__, hdr->inode->i_ino, |
283 | (unsigned long) NFS_I(hdr->inode)->layout->plh_lwb); | 273 | (unsigned long) NFS_I(hdr->inode)->layout->plh_lwb); |
284 | } | 274 | } |
@@ -305,83 +295,82 @@ filelayout_reset_to_mds(struct pnfs_layout_segment *lseg) | |||
305 | */ | 295 | */ |
306 | static void filelayout_read_prepare(struct rpc_task *task, void *data) | 296 | static void filelayout_read_prepare(struct rpc_task *task, void *data) |
307 | { | 297 | { |
308 | struct nfs_pgio_data *rdata = data; | 298 | struct nfs_pgio_header *hdr = data; |
309 | 299 | ||
310 | if (unlikely(test_bit(NFS_CONTEXT_BAD, &rdata->args.context->flags))) { | 300 | if (unlikely(test_bit(NFS_CONTEXT_BAD, &hdr->args.context->flags))) { |
311 | rpc_exit(task, -EIO); | 301 | rpc_exit(task, -EIO); |
312 | return; | 302 | return; |
313 | } | 303 | } |
314 | if (filelayout_reset_to_mds(rdata->header->lseg)) { | 304 | if (filelayout_reset_to_mds(hdr->lseg)) { |
315 | dprintk("%s task %u reset io to MDS\n", __func__, task->tk_pid); | 305 | dprintk("%s task %u reset io to MDS\n", __func__, task->tk_pid); |
316 | filelayout_reset_read(rdata); | 306 | filelayout_reset_read(hdr); |
317 | rpc_exit(task, 0); | 307 | rpc_exit(task, 0); |
318 | return; | 308 | return; |
319 | } | 309 | } |
320 | rdata->pgio_done_cb = filelayout_read_done_cb; | 310 | hdr->pgio_done_cb = filelayout_read_done_cb; |
321 | 311 | ||
322 | if (nfs41_setup_sequence(rdata->ds_clp->cl_session, | 312 | if (nfs41_setup_sequence(hdr->ds_clp->cl_session, |
323 | &rdata->args.seq_args, | 313 | &hdr->args.seq_args, |
324 | &rdata->res.seq_res, | 314 | &hdr->res.seq_res, |
325 | task)) | 315 | task)) |
326 | return; | 316 | return; |
327 | if (nfs4_set_rw_stateid(&rdata->args.stateid, rdata->args.context, | 317 | if (nfs4_set_rw_stateid(&hdr->args.stateid, hdr->args.context, |
328 | rdata->args.lock_context, FMODE_READ) == -EIO) | 318 | hdr->args.lock_context, FMODE_READ) == -EIO) |
329 | rpc_exit(task, -EIO); /* lost lock, terminate I/O */ | 319 | rpc_exit(task, -EIO); /* lost lock, terminate I/O */ |
330 | } | 320 | } |
331 | 321 | ||
332 | static void filelayout_read_call_done(struct rpc_task *task, void *data) | 322 | static void filelayout_read_call_done(struct rpc_task *task, void *data) |
333 | { | 323 | { |
334 | struct nfs_pgio_data *rdata = data; | 324 | struct nfs_pgio_header *hdr = data; |
335 | 325 | ||
336 | dprintk("--> %s task->tk_status %d\n", __func__, task->tk_status); | 326 | dprintk("--> %s task->tk_status %d\n", __func__, task->tk_status); |
337 | 327 | ||
338 | if (test_bit(NFS_IOHDR_REDO, &rdata->header->flags) && | 328 | if (test_bit(NFS_IOHDR_REDO, &hdr->flags) && |
339 | task->tk_status == 0) { | 329 | task->tk_status == 0) { |
340 | nfs41_sequence_done(task, &rdata->res.seq_res); | 330 | nfs41_sequence_done(task, &hdr->res.seq_res); |
341 | return; | 331 | return; |
342 | } | 332 | } |
343 | 333 | ||
344 | /* Note this may cause RPC to be resent */ | 334 | /* Note this may cause RPC to be resent */ |
345 | rdata->header->mds_ops->rpc_call_done(task, data); | 335 | hdr->mds_ops->rpc_call_done(task, data); |
346 | } | 336 | } |
347 | 337 | ||
348 | static void filelayout_read_count_stats(struct rpc_task *task, void *data) | 338 | static void filelayout_read_count_stats(struct rpc_task *task, void *data) |
349 | { | 339 | { |
350 | struct nfs_pgio_data *rdata = data; | 340 | struct nfs_pgio_header *hdr = data; |
351 | 341 | ||
352 | rpc_count_iostats(task, NFS_SERVER(rdata->header->inode)->client->cl_metrics); | 342 | rpc_count_iostats(task, NFS_SERVER(hdr->inode)->client->cl_metrics); |
353 | } | 343 | } |
354 | 344 | ||
355 | static void filelayout_read_release(void *data) | 345 | static void filelayout_read_release(void *data) |
356 | { | 346 | { |
357 | struct nfs_pgio_data *rdata = data; | 347 | struct nfs_pgio_header *hdr = data; |
358 | struct pnfs_layout_hdr *lo = rdata->header->lseg->pls_layout; | 348 | struct pnfs_layout_hdr *lo = hdr->lseg->pls_layout; |
359 | 349 | ||
360 | filelayout_fenceme(lo->plh_inode, lo); | 350 | filelayout_fenceme(lo->plh_inode, lo); |
361 | nfs_put_client(rdata->ds_clp); | 351 | nfs_put_client(hdr->ds_clp); |
362 | rdata->header->mds_ops->rpc_release(data); | 352 | hdr->mds_ops->rpc_release(data); |
363 | } | 353 | } |
364 | 354 | ||
365 | static int filelayout_write_done_cb(struct rpc_task *task, | 355 | static int filelayout_write_done_cb(struct rpc_task *task, |
366 | struct nfs_pgio_data *data) | 356 | struct nfs_pgio_header *hdr) |
367 | { | 357 | { |
368 | struct nfs_pgio_header *hdr = data->header; | ||
369 | int err; | 358 | int err; |
370 | 359 | ||
371 | trace_nfs4_pnfs_write(data, task->tk_status); | 360 | trace_nfs4_pnfs_write(hdr, task->tk_status); |
372 | err = filelayout_async_handle_error(task, data->args.context->state, | 361 | err = filelayout_async_handle_error(task, hdr->args.context->state, |
373 | data->ds_clp, hdr->lseg); | 362 | hdr->ds_clp, hdr->lseg); |
374 | 363 | ||
375 | switch (err) { | 364 | switch (err) { |
376 | case -NFS4ERR_RESET_TO_MDS: | 365 | case -NFS4ERR_RESET_TO_MDS: |
377 | filelayout_reset_write(data); | 366 | filelayout_reset_write(hdr); |
378 | return task->tk_status; | 367 | return task->tk_status; |
379 | case -EAGAIN: | 368 | case -EAGAIN: |
380 | rpc_restart_call_prepare(task); | 369 | rpc_restart_call_prepare(task); |
381 | return -EAGAIN; | 370 | return -EAGAIN; |
382 | } | 371 | } |
383 | 372 | ||
384 | filelayout_set_layoutcommit(data); | 373 | filelayout_set_layoutcommit(hdr); |
385 | return 0; | 374 | return 0; |
386 | } | 375 | } |
387 | 376 | ||
@@ -419,57 +408,57 @@ static int filelayout_commit_done_cb(struct rpc_task *task, | |||
419 | 408 | ||
420 | static void filelayout_write_prepare(struct rpc_task *task, void *data) | 409 | static void filelayout_write_prepare(struct rpc_task *task, void *data) |
421 | { | 410 | { |
422 | struct nfs_pgio_data *wdata = data; | 411 | struct nfs_pgio_header *hdr = data; |
423 | 412 | ||
424 | if (unlikely(test_bit(NFS_CONTEXT_BAD, &wdata->args.context->flags))) { | 413 | if (unlikely(test_bit(NFS_CONTEXT_BAD, &hdr->args.context->flags))) { |
425 | rpc_exit(task, -EIO); | 414 | rpc_exit(task, -EIO); |
426 | return; | 415 | return; |
427 | } | 416 | } |
428 | if (filelayout_reset_to_mds(wdata->header->lseg)) { | 417 | if (filelayout_reset_to_mds(hdr->lseg)) { |
429 | dprintk("%s task %u reset io to MDS\n", __func__, task->tk_pid); | 418 | dprintk("%s task %u reset io to MDS\n", __func__, task->tk_pid); |
430 | filelayout_reset_write(wdata); | 419 | filelayout_reset_write(hdr); |
431 | rpc_exit(task, 0); | 420 | rpc_exit(task, 0); |
432 | return; | 421 | return; |
433 | } | 422 | } |
434 | if (nfs41_setup_sequence(wdata->ds_clp->cl_session, | 423 | if (nfs41_setup_sequence(hdr->ds_clp->cl_session, |
435 | &wdata->args.seq_args, | 424 | &hdr->args.seq_args, |
436 | &wdata->res.seq_res, | 425 | &hdr->res.seq_res, |
437 | task)) | 426 | task)) |
438 | return; | 427 | return; |
439 | if (nfs4_set_rw_stateid(&wdata->args.stateid, wdata->args.context, | 428 | if (nfs4_set_rw_stateid(&hdr->args.stateid, hdr->args.context, |
440 | wdata->args.lock_context, FMODE_WRITE) == -EIO) | 429 | hdr->args.lock_context, FMODE_WRITE) == -EIO) |
441 | rpc_exit(task, -EIO); /* lost lock, terminate I/O */ | 430 | rpc_exit(task, -EIO); /* lost lock, terminate I/O */ |
442 | } | 431 | } |
443 | 432 | ||
444 | static void filelayout_write_call_done(struct rpc_task *task, void *data) | 433 | static void filelayout_write_call_done(struct rpc_task *task, void *data) |
445 | { | 434 | { |
446 | struct nfs_pgio_data *wdata = data; | 435 | struct nfs_pgio_header *hdr = data; |
447 | 436 | ||
448 | if (test_bit(NFS_IOHDR_REDO, &wdata->header->flags) && | 437 | if (test_bit(NFS_IOHDR_REDO, &hdr->flags) && |
449 | task->tk_status == 0) { | 438 | task->tk_status == 0) { |
450 | nfs41_sequence_done(task, &wdata->res.seq_res); | 439 | nfs41_sequence_done(task, &hdr->res.seq_res); |
451 | return; | 440 | return; |
452 | } | 441 | } |
453 | 442 | ||
454 | /* Note this may cause RPC to be resent */ | 443 | /* Note this may cause RPC to be resent */ |
455 | wdata->header->mds_ops->rpc_call_done(task, data); | 444 | hdr->mds_ops->rpc_call_done(task, data); |
456 | } | 445 | } |
457 | 446 | ||
458 | static void filelayout_write_count_stats(struct rpc_task *task, void *data) | 447 | static void filelayout_write_count_stats(struct rpc_task *task, void *data) |
459 | { | 448 | { |
460 | struct nfs_pgio_data *wdata = data; | 449 | struct nfs_pgio_header *hdr = data; |
461 | 450 | ||
462 | rpc_count_iostats(task, NFS_SERVER(wdata->header->inode)->client->cl_metrics); | 451 | rpc_count_iostats(task, NFS_SERVER(hdr->inode)->client->cl_metrics); |
463 | } | 452 | } |
464 | 453 | ||
465 | static void filelayout_write_release(void *data) | 454 | static void filelayout_write_release(void *data) |
466 | { | 455 | { |
467 | struct nfs_pgio_data *wdata = data; | 456 | struct nfs_pgio_header *hdr = data; |
468 | struct pnfs_layout_hdr *lo = wdata->header->lseg->pls_layout; | 457 | struct pnfs_layout_hdr *lo = hdr->lseg->pls_layout; |
469 | 458 | ||
470 | filelayout_fenceme(lo->plh_inode, lo); | 459 | filelayout_fenceme(lo->plh_inode, lo); |
471 | nfs_put_client(wdata->ds_clp); | 460 | nfs_put_client(hdr->ds_clp); |
472 | wdata->header->mds_ops->rpc_release(data); | 461 | hdr->mds_ops->rpc_release(data); |
473 | } | 462 | } |
474 | 463 | ||
475 | static void filelayout_commit_prepare(struct rpc_task *task, void *data) | 464 | static void filelayout_commit_prepare(struct rpc_task *task, void *data) |
@@ -529,19 +518,18 @@ static const struct rpc_call_ops filelayout_commit_call_ops = { | |||
529 | }; | 518 | }; |
530 | 519 | ||
531 | static enum pnfs_try_status | 520 | static enum pnfs_try_status |
532 | filelayout_read_pagelist(struct nfs_pgio_data *data) | 521 | filelayout_read_pagelist(struct nfs_pgio_header *hdr) |
533 | { | 522 | { |
534 | struct nfs_pgio_header *hdr = data->header; | ||
535 | struct pnfs_layout_segment *lseg = hdr->lseg; | 523 | struct pnfs_layout_segment *lseg = hdr->lseg; |
536 | struct nfs4_pnfs_ds *ds; | 524 | struct nfs4_pnfs_ds *ds; |
537 | struct rpc_clnt *ds_clnt; | 525 | struct rpc_clnt *ds_clnt; |
538 | loff_t offset = data->args.offset; | 526 | loff_t offset = hdr->args.offset; |
539 | u32 j, idx; | 527 | u32 j, idx; |
540 | struct nfs_fh *fh; | 528 | struct nfs_fh *fh; |
541 | 529 | ||
542 | dprintk("--> %s ino %lu pgbase %u req %Zu@%llu\n", | 530 | dprintk("--> %s ino %lu pgbase %u req %Zu@%llu\n", |
543 | __func__, hdr->inode->i_ino, | 531 | __func__, hdr->inode->i_ino, |
544 | data->args.pgbase, (size_t)data->args.count, offset); | 532 | hdr->args.pgbase, (size_t)hdr->args.count, offset); |
545 | 533 | ||
546 | /* Retrieve the correct rpc_client for the byte range */ | 534 | /* Retrieve the correct rpc_client for the byte range */ |
547 | j = nfs4_fl_calc_j_index(lseg, offset); | 535 | j = nfs4_fl_calc_j_index(lseg, offset); |
@@ -559,30 +547,29 @@ filelayout_read_pagelist(struct nfs_pgio_data *data) | |||
559 | 547 | ||
560 | /* No multipath support. Use first DS */ | 548 | /* No multipath support. Use first DS */ |
561 | atomic_inc(&ds->ds_clp->cl_count); | 549 | atomic_inc(&ds->ds_clp->cl_count); |
562 | data->ds_clp = ds->ds_clp; | 550 | hdr->ds_clp = ds->ds_clp; |
563 | data->ds_idx = idx; | 551 | hdr->ds_idx = idx; |
564 | fh = nfs4_fl_select_ds_fh(lseg, j); | 552 | fh = nfs4_fl_select_ds_fh(lseg, j); |
565 | if (fh) | 553 | if (fh) |
566 | data->args.fh = fh; | 554 | hdr->args.fh = fh; |
567 | 555 | ||
568 | data->args.offset = filelayout_get_dserver_offset(lseg, offset); | 556 | hdr->args.offset = filelayout_get_dserver_offset(lseg, offset); |
569 | data->mds_offset = offset; | 557 | hdr->mds_offset = offset; |
570 | 558 | ||
571 | /* Perform an asynchronous read to ds */ | 559 | /* Perform an asynchronous read to ds */ |
572 | nfs_initiate_pgio(ds_clnt, data, | 560 | nfs_initiate_pgio(ds_clnt, hdr, |
573 | &filelayout_read_call_ops, 0, RPC_TASK_SOFTCONN); | 561 | &filelayout_read_call_ops, 0, RPC_TASK_SOFTCONN); |
574 | return PNFS_ATTEMPTED; | 562 | return PNFS_ATTEMPTED; |
575 | } | 563 | } |
576 | 564 | ||
577 | /* Perform async writes. */ | 565 | /* Perform async writes. */ |
578 | static enum pnfs_try_status | 566 | static enum pnfs_try_status |
579 | filelayout_write_pagelist(struct nfs_pgio_data *data, int sync) | 567 | filelayout_write_pagelist(struct nfs_pgio_header *hdr, int sync) |
580 | { | 568 | { |
581 | struct nfs_pgio_header *hdr = data->header; | ||
582 | struct pnfs_layout_segment *lseg = hdr->lseg; | 569 | struct pnfs_layout_segment *lseg = hdr->lseg; |
583 | struct nfs4_pnfs_ds *ds; | 570 | struct nfs4_pnfs_ds *ds; |
584 | struct rpc_clnt *ds_clnt; | 571 | struct rpc_clnt *ds_clnt; |
585 | loff_t offset = data->args.offset; | 572 | loff_t offset = hdr->args.offset; |
586 | u32 j, idx; | 573 | u32 j, idx; |
587 | struct nfs_fh *fh; | 574 | struct nfs_fh *fh; |
588 | 575 | ||
@@ -598,21 +585,20 @@ filelayout_write_pagelist(struct nfs_pgio_data *data, int sync) | |||
598 | return PNFS_NOT_ATTEMPTED; | 585 | return PNFS_NOT_ATTEMPTED; |
599 | 586 | ||
600 | dprintk("%s ino %lu sync %d req %Zu@%llu DS: %s cl_count %d\n", | 587 | dprintk("%s ino %lu sync %d req %Zu@%llu DS: %s cl_count %d\n", |
601 | __func__, hdr->inode->i_ino, sync, (size_t) data->args.count, | 588 | __func__, hdr->inode->i_ino, sync, (size_t) hdr->args.count, |
602 | offset, ds->ds_remotestr, atomic_read(&ds->ds_clp->cl_count)); | 589 | offset, ds->ds_remotestr, atomic_read(&ds->ds_clp->cl_count)); |
603 | 590 | ||
604 | data->pgio_done_cb = filelayout_write_done_cb; | 591 | hdr->pgio_done_cb = filelayout_write_done_cb; |
605 | atomic_inc(&ds->ds_clp->cl_count); | 592 | atomic_inc(&ds->ds_clp->cl_count); |
606 | data->ds_clp = ds->ds_clp; | 593 | hdr->ds_clp = ds->ds_clp; |
607 | data->ds_idx = idx; | 594 | hdr->ds_idx = idx; |
608 | fh = nfs4_fl_select_ds_fh(lseg, j); | 595 | fh = nfs4_fl_select_ds_fh(lseg, j); |
609 | if (fh) | 596 | if (fh) |
610 | data->args.fh = fh; | 597 | hdr->args.fh = fh; |
611 | 598 | hdr->args.offset = filelayout_get_dserver_offset(lseg, offset); | |
612 | data->args.offset = filelayout_get_dserver_offset(lseg, offset); | ||
613 | 599 | ||
614 | /* Perform an asynchronous write */ | 600 | /* Perform an asynchronous write */ |
615 | nfs_initiate_pgio(ds_clnt, data, | 601 | nfs_initiate_pgio(ds_clnt, hdr, |
616 | &filelayout_write_call_ops, sync, | 602 | &filelayout_write_call_ops, sync, |
617 | RPC_TASK_SOFTCONN); | 603 | RPC_TASK_SOFTCONN); |
618 | return PNFS_ATTEMPTED; | 604 | return PNFS_ATTEMPTED; |
@@ -1023,6 +1009,7 @@ static u32 select_bucket_index(struct nfs4_filelayout_segment *fl, u32 j) | |||
1023 | 1009 | ||
1024 | /* The generic layer is about to remove the req from the commit list. | 1010 | /* The generic layer is about to remove the req from the commit list. |
1025 | * If this will make the bucket empty, it will need to put the lseg reference. | 1011 | * If this will make the bucket empty, it will need to put the lseg reference. |
1012 | * Note this is must be called holding the inode (/cinfo) lock | ||
1026 | */ | 1013 | */ |
1027 | static void | 1014 | static void |
1028 | filelayout_clear_request_commit(struct nfs_page *req, | 1015 | filelayout_clear_request_commit(struct nfs_page *req, |
@@ -1030,7 +1017,6 @@ filelayout_clear_request_commit(struct nfs_page *req, | |||
1030 | { | 1017 | { |
1031 | struct pnfs_layout_segment *freeme = NULL; | 1018 | struct pnfs_layout_segment *freeme = NULL; |
1032 | 1019 | ||
1033 | spin_lock(cinfo->lock); | ||
1034 | if (!test_and_clear_bit(PG_COMMIT_TO_DS, &req->wb_flags)) | 1020 | if (!test_and_clear_bit(PG_COMMIT_TO_DS, &req->wb_flags)) |
1035 | goto out; | 1021 | goto out; |
1036 | cinfo->ds->nwritten--; | 1022 | cinfo->ds->nwritten--; |
@@ -1045,22 +1031,25 @@ filelayout_clear_request_commit(struct nfs_page *req, | |||
1045 | } | 1031 | } |
1046 | out: | 1032 | out: |
1047 | nfs_request_remove_commit_list(req, cinfo); | 1033 | nfs_request_remove_commit_list(req, cinfo); |
1048 | spin_unlock(cinfo->lock); | 1034 | pnfs_put_lseg_async(freeme); |
1049 | pnfs_put_lseg(freeme); | ||
1050 | } | 1035 | } |
1051 | 1036 | ||
1052 | static struct list_head * | 1037 | static void |
1053 | filelayout_choose_commit_list(struct nfs_page *req, | 1038 | filelayout_mark_request_commit(struct nfs_page *req, |
1054 | struct pnfs_layout_segment *lseg, | 1039 | struct pnfs_layout_segment *lseg, |
1055 | struct nfs_commit_info *cinfo) | 1040 | struct nfs_commit_info *cinfo) |
1041 | |||
1056 | { | 1042 | { |
1057 | struct nfs4_filelayout_segment *fl = FILELAYOUT_LSEG(lseg); | 1043 | struct nfs4_filelayout_segment *fl = FILELAYOUT_LSEG(lseg); |
1058 | u32 i, j; | 1044 | u32 i, j; |
1059 | struct list_head *list; | 1045 | struct list_head *list; |
1060 | struct pnfs_commit_bucket *buckets; | 1046 | struct pnfs_commit_bucket *buckets; |
1061 | 1047 | ||
1062 | if (fl->commit_through_mds) | 1048 | if (fl->commit_through_mds) { |
1063 | return &cinfo->mds->list; | 1049 | list = &cinfo->mds->list; |
1050 | spin_lock(cinfo->lock); | ||
1051 | goto mds_commit; | ||
1052 | } | ||
1064 | 1053 | ||
1065 | /* Note that we are calling nfs4_fl_calc_j_index on each page | 1054 | /* Note that we are calling nfs4_fl_calc_j_index on each page |
1066 | * that ends up being committed to a data server. An attractive | 1055 | * that ends up being committed to a data server. An attractive |
@@ -1084,19 +1073,22 @@ filelayout_choose_commit_list(struct nfs_page *req, | |||
1084 | } | 1073 | } |
1085 | set_bit(PG_COMMIT_TO_DS, &req->wb_flags); | 1074 | set_bit(PG_COMMIT_TO_DS, &req->wb_flags); |
1086 | cinfo->ds->nwritten++; | 1075 | cinfo->ds->nwritten++; |
1087 | spin_unlock(cinfo->lock); | ||
1088 | return list; | ||
1089 | } | ||
1090 | 1076 | ||
1091 | static void | 1077 | mds_commit: |
1092 | filelayout_mark_request_commit(struct nfs_page *req, | 1078 | /* nfs_request_add_commit_list(). We need to add req to list without |
1093 | struct pnfs_layout_segment *lseg, | 1079 | * dropping cinfo lock. |
1094 | struct nfs_commit_info *cinfo) | 1080 | */ |
1095 | { | 1081 | set_bit(PG_CLEAN, &(req)->wb_flags); |
1096 | struct list_head *list; | 1082 | nfs_list_add_request(req, list); |
1097 | 1083 | cinfo->mds->ncommit++; | |
1098 | list = filelayout_choose_commit_list(req, lseg, cinfo); | 1084 | spin_unlock(cinfo->lock); |
1099 | nfs_request_add_commit_list(req, list, cinfo); | 1085 | if (!cinfo->dreq) { |
1086 | inc_zone_page_state(req->wb_page, NR_UNSTABLE_NFS); | ||
1087 | inc_bdi_stat(page_file_mapping(req->wb_page)->backing_dev_info, | ||
1088 | BDI_RECLAIMABLE); | ||
1089 | __mark_inode_dirty(req->wb_context->dentry->d_inode, | ||
1090 | I_DIRTY_DATASYNC); | ||
1091 | } | ||
1100 | } | 1092 | } |
1101 | 1093 | ||
1102 | static u32 calc_ds_index_from_commit(struct pnfs_layout_segment *lseg, u32 i) | 1094 | static u32 calc_ds_index_from_commit(struct pnfs_layout_segment *lseg, u32 i) |
@@ -1244,15 +1236,63 @@ restart: | |||
1244 | spin_unlock(cinfo->lock); | 1236 | spin_unlock(cinfo->lock); |
1245 | } | 1237 | } |
1246 | 1238 | ||
1239 | /* filelayout_search_commit_reqs - Search lists in @cinfo for the head reqest | ||
1240 | * for @page | ||
1241 | * @cinfo - commit info for current inode | ||
1242 | * @page - page to search for matching head request | ||
1243 | * | ||
1244 | * Returns a the head request if one is found, otherwise returns NULL. | ||
1245 | */ | ||
1246 | static struct nfs_page * | ||
1247 | filelayout_search_commit_reqs(struct nfs_commit_info *cinfo, struct page *page) | ||
1248 | { | ||
1249 | struct nfs_page *freq, *t; | ||
1250 | struct pnfs_commit_bucket *b; | ||
1251 | int i; | ||
1252 | |||
1253 | /* Linearly search the commit lists for each bucket until a matching | ||
1254 | * request is found */ | ||
1255 | for (i = 0, b = cinfo->ds->buckets; i < cinfo->ds->nbuckets; i++, b++) { | ||
1256 | list_for_each_entry_safe(freq, t, &b->written, wb_list) { | ||
1257 | if (freq->wb_page == page) | ||
1258 | return freq->wb_head; | ||
1259 | } | ||
1260 | list_for_each_entry_safe(freq, t, &b->committing, wb_list) { | ||
1261 | if (freq->wb_page == page) | ||
1262 | return freq->wb_head; | ||
1263 | } | ||
1264 | } | ||
1265 | |||
1266 | return NULL; | ||
1267 | } | ||
1268 | |||
1269 | static void filelayout_retry_commit(struct nfs_commit_info *cinfo, int idx) | ||
1270 | { | ||
1271 | struct pnfs_ds_commit_info *fl_cinfo = cinfo->ds; | ||
1272 | struct pnfs_commit_bucket *bucket = fl_cinfo->buckets; | ||
1273 | struct pnfs_layout_segment *freeme; | ||
1274 | int i; | ||
1275 | |||
1276 | for (i = idx; i < fl_cinfo->nbuckets; i++, bucket++) { | ||
1277 | if (list_empty(&bucket->committing)) | ||
1278 | continue; | ||
1279 | nfs_retry_commit(&bucket->committing, bucket->clseg, cinfo); | ||
1280 | spin_lock(cinfo->lock); | ||
1281 | freeme = bucket->clseg; | ||
1282 | bucket->clseg = NULL; | ||
1283 | spin_unlock(cinfo->lock); | ||
1284 | pnfs_put_lseg(freeme); | ||
1285 | } | ||
1286 | } | ||
1287 | |||
1247 | static unsigned int | 1288 | static unsigned int |
1248 | alloc_ds_commits(struct nfs_commit_info *cinfo, struct list_head *list) | 1289 | alloc_ds_commits(struct nfs_commit_info *cinfo, struct list_head *list) |
1249 | { | 1290 | { |
1250 | struct pnfs_ds_commit_info *fl_cinfo; | 1291 | struct pnfs_ds_commit_info *fl_cinfo; |
1251 | struct pnfs_commit_bucket *bucket; | 1292 | struct pnfs_commit_bucket *bucket; |
1252 | struct nfs_commit_data *data; | 1293 | struct nfs_commit_data *data; |
1253 | int i, j; | 1294 | int i; |
1254 | unsigned int nreq = 0; | 1295 | unsigned int nreq = 0; |
1255 | struct pnfs_layout_segment *freeme; | ||
1256 | 1296 | ||
1257 | fl_cinfo = cinfo->ds; | 1297 | fl_cinfo = cinfo->ds; |
1258 | bucket = fl_cinfo->buckets; | 1298 | bucket = fl_cinfo->buckets; |
@@ -1272,16 +1312,7 @@ alloc_ds_commits(struct nfs_commit_info *cinfo, struct list_head *list) | |||
1272 | } | 1312 | } |
1273 | 1313 | ||
1274 | /* Clean up on error */ | 1314 | /* Clean up on error */ |
1275 | for (j = i; j < fl_cinfo->nbuckets; j++, bucket++) { | 1315 | filelayout_retry_commit(cinfo, i); |
1276 | if (list_empty(&bucket->committing)) | ||
1277 | continue; | ||
1278 | nfs_retry_commit(&bucket->committing, bucket->clseg, cinfo); | ||
1279 | spin_lock(cinfo->lock); | ||
1280 | freeme = bucket->clseg; | ||
1281 | bucket->clseg = NULL; | ||
1282 | spin_unlock(cinfo->lock); | ||
1283 | pnfs_put_lseg(freeme); | ||
1284 | } | ||
1285 | /* Caller will clean up entries put on list */ | 1316 | /* Caller will clean up entries put on list */ |
1286 | return nreq; | 1317 | return nreq; |
1287 | } | 1318 | } |
@@ -1301,8 +1332,12 @@ filelayout_commit_pagelist(struct inode *inode, struct list_head *mds_pages, | |||
1301 | data->lseg = NULL; | 1332 | data->lseg = NULL; |
1302 | list_add(&data->pages, &list); | 1333 | list_add(&data->pages, &list); |
1303 | nreq++; | 1334 | nreq++; |
1304 | } else | 1335 | } else { |
1305 | nfs_retry_commit(mds_pages, NULL, cinfo); | 1336 | nfs_retry_commit(mds_pages, NULL, cinfo); |
1337 | filelayout_retry_commit(cinfo, 0); | ||
1338 | cinfo->completion_ops->error_cleanup(NFS_I(inode)); | ||
1339 | return -ENOMEM; | ||
1340 | } | ||
1306 | } | 1341 | } |
1307 | 1342 | ||
1308 | nreq += alloc_ds_commits(cinfo, &list); | 1343 | nreq += alloc_ds_commits(cinfo, &list); |
@@ -1380,6 +1415,7 @@ static struct pnfs_layoutdriver_type filelayout_type = { | |||
1380 | .clear_request_commit = filelayout_clear_request_commit, | 1415 | .clear_request_commit = filelayout_clear_request_commit, |
1381 | .scan_commit_lists = filelayout_scan_commit_lists, | 1416 | .scan_commit_lists = filelayout_scan_commit_lists, |
1382 | .recover_commit_reqs = filelayout_recover_commit_reqs, | 1417 | .recover_commit_reqs = filelayout_recover_commit_reqs, |
1418 | .search_commit_reqs = filelayout_search_commit_reqs, | ||
1383 | .commit_pagelist = filelayout_commit_pagelist, | 1419 | .commit_pagelist = filelayout_commit_pagelist, |
1384 | .read_pagelist = filelayout_read_pagelist, | 1420 | .read_pagelist = filelayout_read_pagelist, |
1385 | .write_pagelist = filelayout_write_pagelist, | 1421 | .write_pagelist = filelayout_write_pagelist, |
diff --git a/fs/nfs/filelayout/filelayoutdev.c b/fs/nfs/filelayout/filelayoutdev.c index e2a0361e24c6..8540516f4d71 100644 --- a/fs/nfs/filelayout/filelayoutdev.c +++ b/fs/nfs/filelayout/filelayoutdev.c | |||
@@ -695,7 +695,7 @@ filelayout_get_device_info(struct inode *inode, | |||
695 | if (pdev == NULL) | 695 | if (pdev == NULL) |
696 | return NULL; | 696 | return NULL; |
697 | 697 | ||
698 | pages = kzalloc(max_pages * sizeof(struct page *), gfp_flags); | 698 | pages = kcalloc(max_pages, sizeof(struct page *), gfp_flags); |
699 | if (pages == NULL) { | 699 | if (pages == NULL) { |
700 | kfree(pdev); | 700 | kfree(pdev); |
701 | return NULL; | 701 | return NULL; |
diff --git a/fs/nfs/getroot.c b/fs/nfs/getroot.c index b94f80420a58..880618a8b048 100644 --- a/fs/nfs/getroot.c +++ b/fs/nfs/getroot.c | |||
@@ -112,7 +112,7 @@ struct dentry *nfs_get_root(struct super_block *sb, struct nfs_fh *mntfh, | |||
112 | * if the dentry tree reaches them; however if the dentry already | 112 | * if the dentry tree reaches them; however if the dentry already |
113 | * exists, we'll pick it up at this point and use it as the root | 113 | * exists, we'll pick it up at this point and use it as the root |
114 | */ | 114 | */ |
115 | ret = d_obtain_alias(inode); | 115 | ret = d_obtain_root(inode); |
116 | if (IS_ERR(ret)) { | 116 | if (IS_ERR(ret)) { |
117 | dprintk("nfs_get_root: get root dentry failed\n"); | 117 | dprintk("nfs_get_root: get root dentry failed\n"); |
118 | goto out; | 118 | goto out; |
diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c index 68921b01b792..577a36f0a510 100644 --- a/fs/nfs/inode.c +++ b/fs/nfs/inode.c | |||
@@ -1002,6 +1002,15 @@ int nfs_revalidate_inode(struct nfs_server *server, struct inode *inode) | |||
1002 | } | 1002 | } |
1003 | EXPORT_SYMBOL_GPL(nfs_revalidate_inode); | 1003 | EXPORT_SYMBOL_GPL(nfs_revalidate_inode); |
1004 | 1004 | ||
1005 | int nfs_revalidate_inode_rcu(struct nfs_server *server, struct inode *inode) | ||
1006 | { | ||
1007 | if (!(NFS_I(inode)->cache_validity & | ||
1008 | (NFS_INO_INVALID_ATTR|NFS_INO_INVALID_LABEL)) | ||
1009 | && !nfs_attribute_cache_expired(inode)) | ||
1010 | return NFS_STALE(inode) ? -ESTALE : 0; | ||
1011 | return -ECHILD; | ||
1012 | } | ||
1013 | |||
1005 | static int nfs_invalidate_mapping(struct inode *inode, struct address_space *mapping) | 1014 | static int nfs_invalidate_mapping(struct inode *inode, struct address_space *mapping) |
1006 | { | 1015 | { |
1007 | struct nfs_inode *nfsi = NFS_I(inode); | 1016 | struct nfs_inode *nfsi = NFS_I(inode); |
diff --git a/fs/nfs/internal.h b/fs/nfs/internal.h index e2a45ae5014e..9056622d2230 100644 --- a/fs/nfs/internal.h +++ b/fs/nfs/internal.h | |||
@@ -247,11 +247,11 @@ void nfs_set_pgio_error(struct nfs_pgio_header *hdr, int error, loff_t pos); | |||
247 | int nfs_iocounter_wait(struct nfs_io_counter *c); | 247 | int nfs_iocounter_wait(struct nfs_io_counter *c); |
248 | 248 | ||
249 | extern const struct nfs_pageio_ops nfs_pgio_rw_ops; | 249 | extern const struct nfs_pageio_ops nfs_pgio_rw_ops; |
250 | struct nfs_rw_header *nfs_rw_header_alloc(const struct nfs_rw_ops *); | 250 | struct nfs_pgio_header *nfs_pgio_header_alloc(const struct nfs_rw_ops *); |
251 | void nfs_rw_header_free(struct nfs_pgio_header *); | 251 | void nfs_pgio_header_free(struct nfs_pgio_header *); |
252 | void nfs_pgio_data_release(struct nfs_pgio_data *); | 252 | void nfs_pgio_data_destroy(struct nfs_pgio_header *); |
253 | int nfs_generic_pgio(struct nfs_pageio_descriptor *, struct nfs_pgio_header *); | 253 | int nfs_generic_pgio(struct nfs_pageio_descriptor *, struct nfs_pgio_header *); |
254 | int nfs_initiate_pgio(struct rpc_clnt *, struct nfs_pgio_data *, | 254 | int nfs_initiate_pgio(struct rpc_clnt *, struct nfs_pgio_header *, |
255 | const struct rpc_call_ops *, int, int); | 255 | const struct rpc_call_ops *, int, int); |
256 | void nfs_free_request(struct nfs_page *req); | 256 | void nfs_free_request(struct nfs_page *req); |
257 | 257 | ||
@@ -451,6 +451,7 @@ int nfs_scan_commit(struct inode *inode, struct list_head *dst, | |||
451 | void nfs_mark_request_commit(struct nfs_page *req, | 451 | void nfs_mark_request_commit(struct nfs_page *req, |
452 | struct pnfs_layout_segment *lseg, | 452 | struct pnfs_layout_segment *lseg, |
453 | struct nfs_commit_info *cinfo); | 453 | struct nfs_commit_info *cinfo); |
454 | int nfs_write_need_commit(struct nfs_pgio_header *); | ||
454 | int nfs_generic_commit_list(struct inode *inode, struct list_head *head, | 455 | int nfs_generic_commit_list(struct inode *inode, struct list_head *head, |
455 | int how, struct nfs_commit_info *cinfo); | 456 | int how, struct nfs_commit_info *cinfo); |
456 | void nfs_retry_commit(struct list_head *page_list, | 457 | void nfs_retry_commit(struct list_head *page_list, |
@@ -491,7 +492,7 @@ static inline void nfs_inode_dio_wait(struct inode *inode) | |||
491 | extern ssize_t nfs_dreq_bytes_left(struct nfs_direct_req *dreq); | 492 | extern ssize_t nfs_dreq_bytes_left(struct nfs_direct_req *dreq); |
492 | 493 | ||
493 | /* nfs4proc.c */ | 494 | /* nfs4proc.c */ |
494 | extern void __nfs4_read_done_cb(struct nfs_pgio_data *); | 495 | extern void __nfs4_read_done_cb(struct nfs_pgio_header *); |
495 | extern struct nfs_client *nfs4_init_client(struct nfs_client *clp, | 496 | extern struct nfs_client *nfs4_init_client(struct nfs_client *clp, |
496 | const struct rpc_timeout *timeparms, | 497 | const struct rpc_timeout *timeparms, |
497 | const char *ip_addr); | 498 | const char *ip_addr); |
diff --git a/fs/nfs/nfs3acl.c b/fs/nfs/nfs3acl.c index 8f854dde4150..d0fec260132a 100644 --- a/fs/nfs/nfs3acl.c +++ b/fs/nfs/nfs3acl.c | |||
@@ -256,7 +256,7 @@ nfs3_list_one_acl(struct inode *inode, int type, const char *name, void *data, | |||
256 | char *p = data + *result; | 256 | char *p = data + *result; |
257 | 257 | ||
258 | acl = get_acl(inode, type); | 258 | acl = get_acl(inode, type); |
259 | if (!acl) | 259 | if (IS_ERR_OR_NULL(acl)) |
260 | return 0; | 260 | return 0; |
261 | 261 | ||
262 | posix_acl_release(acl); | 262 | posix_acl_release(acl); |
diff --git a/fs/nfs/nfs3proc.c b/fs/nfs/nfs3proc.c index f0afa291fd58..809670eba52a 100644 --- a/fs/nfs/nfs3proc.c +++ b/fs/nfs/nfs3proc.c | |||
@@ -795,41 +795,44 @@ nfs3_proc_pathconf(struct nfs_server *server, struct nfs_fh *fhandle, | |||
795 | return status; | 795 | return status; |
796 | } | 796 | } |
797 | 797 | ||
798 | static int nfs3_read_done(struct rpc_task *task, struct nfs_pgio_data *data) | 798 | static int nfs3_read_done(struct rpc_task *task, struct nfs_pgio_header *hdr) |
799 | { | 799 | { |
800 | struct inode *inode = data->header->inode; | 800 | struct inode *inode = hdr->inode; |
801 | 801 | ||
802 | if (nfs3_async_handle_jukebox(task, inode)) | 802 | if (nfs3_async_handle_jukebox(task, inode)) |
803 | return -EAGAIN; | 803 | return -EAGAIN; |
804 | 804 | ||
805 | nfs_invalidate_atime(inode); | 805 | nfs_invalidate_atime(inode); |
806 | nfs_refresh_inode(inode, &data->fattr); | 806 | nfs_refresh_inode(inode, &hdr->fattr); |
807 | return 0; | 807 | return 0; |
808 | } | 808 | } |
809 | 809 | ||
810 | static void nfs3_proc_read_setup(struct nfs_pgio_data *data, struct rpc_message *msg) | 810 | static void nfs3_proc_read_setup(struct nfs_pgio_header *hdr, |
811 | struct rpc_message *msg) | ||
811 | { | 812 | { |
812 | msg->rpc_proc = &nfs3_procedures[NFS3PROC_READ]; | 813 | msg->rpc_proc = &nfs3_procedures[NFS3PROC_READ]; |
813 | } | 814 | } |
814 | 815 | ||
815 | static int nfs3_proc_pgio_rpc_prepare(struct rpc_task *task, struct nfs_pgio_data *data) | 816 | static int nfs3_proc_pgio_rpc_prepare(struct rpc_task *task, |
817 | struct nfs_pgio_header *hdr) | ||
816 | { | 818 | { |
817 | rpc_call_start(task); | 819 | rpc_call_start(task); |
818 | return 0; | 820 | return 0; |
819 | } | 821 | } |
820 | 822 | ||
821 | static int nfs3_write_done(struct rpc_task *task, struct nfs_pgio_data *data) | 823 | static int nfs3_write_done(struct rpc_task *task, struct nfs_pgio_header *hdr) |
822 | { | 824 | { |
823 | struct inode *inode = data->header->inode; | 825 | struct inode *inode = hdr->inode; |
824 | 826 | ||
825 | if (nfs3_async_handle_jukebox(task, inode)) | 827 | if (nfs3_async_handle_jukebox(task, inode)) |
826 | return -EAGAIN; | 828 | return -EAGAIN; |
827 | if (task->tk_status >= 0) | 829 | if (task->tk_status >= 0) |
828 | nfs_post_op_update_inode_force_wcc(inode, data->res.fattr); | 830 | nfs_post_op_update_inode_force_wcc(inode, hdr->res.fattr); |
829 | return 0; | 831 | return 0; |
830 | } | 832 | } |
831 | 833 | ||
832 | static void nfs3_proc_write_setup(struct nfs_pgio_data *data, struct rpc_message *msg) | 834 | static void nfs3_proc_write_setup(struct nfs_pgio_header *hdr, |
835 | struct rpc_message *msg) | ||
833 | { | 836 | { |
834 | msg->rpc_proc = &nfs3_procedures[NFS3PROC_WRITE]; | 837 | msg->rpc_proc = &nfs3_procedures[NFS3PROC_WRITE]; |
835 | } | 838 | } |
diff --git a/fs/nfs/nfs4_fs.h b/fs/nfs/nfs4_fs.h index ba2affa51941..92193eddb41d 100644 --- a/fs/nfs/nfs4_fs.h +++ b/fs/nfs/nfs4_fs.h | |||
@@ -54,7 +54,7 @@ struct nfs4_minor_version_ops { | |||
54 | const nfs4_stateid *); | 54 | const nfs4_stateid *); |
55 | int (*find_root_sec)(struct nfs_server *, struct nfs_fh *, | 55 | int (*find_root_sec)(struct nfs_server *, struct nfs_fh *, |
56 | struct nfs_fsinfo *); | 56 | struct nfs_fsinfo *); |
57 | int (*free_lock_state)(struct nfs_server *, | 57 | void (*free_lock_state)(struct nfs_server *, |
58 | struct nfs4_lock_state *); | 58 | struct nfs4_lock_state *); |
59 | const struct rpc_call_ops *call_sync_ops; | 59 | const struct rpc_call_ops *call_sync_ops; |
60 | const struct nfs4_state_recovery_ops *reboot_recovery_ops; | 60 | const struct nfs4_state_recovery_ops *reboot_recovery_ops; |
@@ -129,27 +129,17 @@ enum { | |||
129 | * LOCK: one nfs4_state (LOCK) to hold the lock stateid nfs4_state(OPEN) | 129 | * LOCK: one nfs4_state (LOCK) to hold the lock stateid nfs4_state(OPEN) |
130 | */ | 130 | */ |
131 | 131 | ||
132 | struct nfs4_lock_owner { | ||
133 | unsigned int lo_type; | ||
134 | #define NFS4_ANY_LOCK_TYPE (0U) | ||
135 | #define NFS4_FLOCK_LOCK_TYPE (1U << 0) | ||
136 | #define NFS4_POSIX_LOCK_TYPE (1U << 1) | ||
137 | union { | ||
138 | fl_owner_t posix_owner; | ||
139 | pid_t flock_owner; | ||
140 | } lo_u; | ||
141 | }; | ||
142 | |||
143 | struct nfs4_lock_state { | 132 | struct nfs4_lock_state { |
144 | struct list_head ls_locks; /* Other lock stateids */ | 133 | struct list_head ls_locks; /* Other lock stateids */ |
145 | struct nfs4_state * ls_state; /* Pointer to open state */ | 134 | struct nfs4_state * ls_state; /* Pointer to open state */ |
146 | #define NFS_LOCK_INITIALIZED 0 | 135 | #define NFS_LOCK_INITIALIZED 0 |
147 | #define NFS_LOCK_LOST 1 | 136 | #define NFS_LOCK_LOST 1 |
148 | unsigned long ls_flags; | 137 | unsigned long ls_flags; |
149 | struct nfs_seqid_counter ls_seqid; | 138 | struct nfs_seqid_counter ls_seqid; |
150 | nfs4_stateid ls_stateid; | 139 | nfs4_stateid ls_stateid; |
151 | atomic_t ls_count; | 140 | atomic_t ls_count; |
152 | struct nfs4_lock_owner ls_owner; | 141 | fl_owner_t ls_owner; |
142 | struct work_struct ls_release; | ||
153 | }; | 143 | }; |
154 | 144 | ||
155 | /* bits for nfs4_state->flags */ | 145 | /* bits for nfs4_state->flags */ |
@@ -337,11 +327,11 @@ nfs4_state_protect(struct nfs_client *clp, unsigned long sp4_mode, | |||
337 | */ | 327 | */ |
338 | static inline void | 328 | static inline void |
339 | nfs4_state_protect_write(struct nfs_client *clp, struct rpc_clnt **clntp, | 329 | nfs4_state_protect_write(struct nfs_client *clp, struct rpc_clnt **clntp, |
340 | struct rpc_message *msg, struct nfs_pgio_data *wdata) | 330 | struct rpc_message *msg, struct nfs_pgio_header *hdr) |
341 | { | 331 | { |
342 | if (_nfs4_state_protect(clp, NFS_SP4_MACH_CRED_WRITE, clntp, msg) && | 332 | if (_nfs4_state_protect(clp, NFS_SP4_MACH_CRED_WRITE, clntp, msg) && |
343 | !test_bit(NFS_SP4_MACH_CRED_COMMIT, &clp->cl_sp4_flags)) | 333 | !test_bit(NFS_SP4_MACH_CRED_COMMIT, &clp->cl_sp4_flags)) |
344 | wdata->args.stable = NFS_FILE_SYNC; | 334 | hdr->args.stable = NFS_FILE_SYNC; |
345 | } | 335 | } |
346 | #else /* CONFIG_NFS_v4_1 */ | 336 | #else /* CONFIG_NFS_v4_1 */ |
347 | static inline struct nfs4_session *nfs4_get_session(const struct nfs_server *server) | 337 | static inline struct nfs4_session *nfs4_get_session(const struct nfs_server *server) |
@@ -369,7 +359,7 @@ nfs4_state_protect(struct nfs_client *clp, unsigned long sp4_flags, | |||
369 | 359 | ||
370 | static inline void | 360 | static inline void |
371 | nfs4_state_protect_write(struct nfs_client *clp, struct rpc_clnt **clntp, | 361 | nfs4_state_protect_write(struct nfs_client *clp, struct rpc_clnt **clntp, |
372 | struct rpc_message *msg, struct nfs_pgio_data *wdata) | 362 | struct rpc_message *msg, struct nfs_pgio_header *hdr) |
373 | { | 363 | { |
374 | } | 364 | } |
375 | #endif /* CONFIG_NFS_V4_1 */ | 365 | #endif /* CONFIG_NFS_V4_1 */ |
diff --git a/fs/nfs/nfs4client.c b/fs/nfs/nfs4client.c index aa9ef4876046..53e435a95260 100644 --- a/fs/nfs/nfs4client.c +++ b/fs/nfs/nfs4client.c | |||
@@ -855,6 +855,11 @@ struct nfs_client *nfs4_set_ds_client(struct nfs_client* mds_clp, | |||
855 | }; | 855 | }; |
856 | struct rpc_timeout ds_timeout; | 856 | struct rpc_timeout ds_timeout; |
857 | struct nfs_client *clp; | 857 | struct nfs_client *clp; |
858 | char buf[INET6_ADDRSTRLEN + 1]; | ||
859 | |||
860 | if (rpc_ntop(ds_addr, buf, sizeof(buf)) <= 0) | ||
861 | return ERR_PTR(-EINVAL); | ||
862 | cl_init.hostname = buf; | ||
858 | 863 | ||
859 | /* | 864 | /* |
860 | * Set an authflavor equual to the MDS value. Use the MDS nfs_client | 865 | * Set an authflavor equual to the MDS value. Use the MDS nfs_client |
diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c index 4bf3d97cc5a0..75ae8d22f067 100644 --- a/fs/nfs/nfs4proc.c +++ b/fs/nfs/nfs4proc.c | |||
@@ -1952,6 +1952,14 @@ static int _nfs4_recover_proc_open(struct nfs4_opendata *data) | |||
1952 | return status; | 1952 | return status; |
1953 | } | 1953 | } |
1954 | 1954 | ||
1955 | /* | ||
1956 | * Additional permission checks in order to distinguish between an | ||
1957 | * open for read, and an open for execute. This works around the | ||
1958 | * fact that NFSv4 OPEN treats read and execute permissions as being | ||
1959 | * the same. | ||
1960 | * Note that in the non-execute case, we want to turn off permission | ||
1961 | * checking if we just created a new file (POSIX open() semantics). | ||
1962 | */ | ||
1955 | static int nfs4_opendata_access(struct rpc_cred *cred, | 1963 | static int nfs4_opendata_access(struct rpc_cred *cred, |
1956 | struct nfs4_opendata *opendata, | 1964 | struct nfs4_opendata *opendata, |
1957 | struct nfs4_state *state, fmode_t fmode, | 1965 | struct nfs4_state *state, fmode_t fmode, |
@@ -1966,14 +1974,14 @@ static int nfs4_opendata_access(struct rpc_cred *cred, | |||
1966 | return 0; | 1974 | return 0; |
1967 | 1975 | ||
1968 | mask = 0; | 1976 | mask = 0; |
1969 | /* don't check MAY_WRITE - a newly created file may not have | 1977 | /* |
1970 | * write mode bits, but POSIX allows the creating process to write. | 1978 | * Use openflags to check for exec, because fmode won't |
1971 | * use openflags to check for exec, because fmode won't | 1979 | * always have FMODE_EXEC set when file open for exec. |
1972 | * always have FMODE_EXEC set when file open for exec. */ | 1980 | */ |
1973 | if (openflags & __FMODE_EXEC) { | 1981 | if (openflags & __FMODE_EXEC) { |
1974 | /* ONLY check for exec rights */ | 1982 | /* ONLY check for exec rights */ |
1975 | mask = MAY_EXEC; | 1983 | mask = MAY_EXEC; |
1976 | } else if (fmode & FMODE_READ) | 1984 | } else if ((fmode & FMODE_READ) && !opendata->file_created) |
1977 | mask = MAY_READ; | 1985 | mask = MAY_READ; |
1978 | 1986 | ||
1979 | cache.cred = cred; | 1987 | cache.cred = cred; |
@@ -2216,8 +2224,15 @@ static int _nfs4_open_and_get_state(struct nfs4_opendata *opendata, | |||
2216 | seq = raw_seqcount_begin(&sp->so_reclaim_seqcount); | 2224 | seq = raw_seqcount_begin(&sp->so_reclaim_seqcount); |
2217 | 2225 | ||
2218 | ret = _nfs4_proc_open(opendata); | 2226 | ret = _nfs4_proc_open(opendata); |
2219 | if (ret != 0) | 2227 | if (ret != 0) { |
2228 | if (ret == -ENOENT) { | ||
2229 | d_drop(opendata->dentry); | ||
2230 | d_add(opendata->dentry, NULL); | ||
2231 | nfs_set_verifier(opendata->dentry, | ||
2232 | nfs_save_change_attribute(opendata->dir->d_inode)); | ||
2233 | } | ||
2220 | goto out; | 2234 | goto out; |
2235 | } | ||
2221 | 2236 | ||
2222 | state = nfs4_opendata_to_nfs4_state(opendata); | 2237 | state = nfs4_opendata_to_nfs4_state(opendata); |
2223 | ret = PTR_ERR(state); | 2238 | ret = PTR_ERR(state); |
@@ -2647,6 +2662,48 @@ static const struct rpc_call_ops nfs4_close_ops = { | |||
2647 | .rpc_release = nfs4_free_closedata, | 2662 | .rpc_release = nfs4_free_closedata, |
2648 | }; | 2663 | }; |
2649 | 2664 | ||
2665 | static bool nfs4_state_has_opener(struct nfs4_state *state) | ||
2666 | { | ||
2667 | /* first check existing openers */ | ||
2668 | if (test_bit(NFS_O_RDONLY_STATE, &state->flags) != 0 && | ||
2669 | state->n_rdonly != 0) | ||
2670 | return true; | ||
2671 | |||
2672 | if (test_bit(NFS_O_WRONLY_STATE, &state->flags) != 0 && | ||
2673 | state->n_wronly != 0) | ||
2674 | return true; | ||
2675 | |||
2676 | if (test_bit(NFS_O_RDWR_STATE, &state->flags) != 0 && | ||
2677 | state->n_rdwr != 0) | ||
2678 | return true; | ||
2679 | |||
2680 | return false; | ||
2681 | } | ||
2682 | |||
2683 | static bool nfs4_roc(struct inode *inode) | ||
2684 | { | ||
2685 | struct nfs_inode *nfsi = NFS_I(inode); | ||
2686 | struct nfs_open_context *ctx; | ||
2687 | struct nfs4_state *state; | ||
2688 | |||
2689 | spin_lock(&inode->i_lock); | ||
2690 | list_for_each_entry(ctx, &nfsi->open_files, list) { | ||
2691 | state = ctx->state; | ||
2692 | if (state == NULL) | ||
2693 | continue; | ||
2694 | if (nfs4_state_has_opener(state)) { | ||
2695 | spin_unlock(&inode->i_lock); | ||
2696 | return false; | ||
2697 | } | ||
2698 | } | ||
2699 | spin_unlock(&inode->i_lock); | ||
2700 | |||
2701 | if (nfs4_check_delegation(inode, FMODE_READ)) | ||
2702 | return false; | ||
2703 | |||
2704 | return pnfs_roc(inode); | ||
2705 | } | ||
2706 | |||
2650 | /* | 2707 | /* |
2651 | * It is possible for data to be read/written from a mem-mapped file | 2708 | * It is possible for data to be read/written from a mem-mapped file |
2652 | * after the sys_close call (which hits the vfs layer as a flush). | 2709 | * after the sys_close call (which hits the vfs layer as a flush). |
@@ -2697,7 +2754,7 @@ int nfs4_do_close(struct nfs4_state *state, gfp_t gfp_mask, int wait) | |||
2697 | calldata->res.fattr = &calldata->fattr; | 2754 | calldata->res.fattr = &calldata->fattr; |
2698 | calldata->res.seqid = calldata->arg.seqid; | 2755 | calldata->res.seqid = calldata->arg.seqid; |
2699 | calldata->res.server = server; | 2756 | calldata->res.server = server; |
2700 | calldata->roc = pnfs_roc(state->inode); | 2757 | calldata->roc = nfs4_roc(state->inode); |
2701 | nfs_sb_active(calldata->inode->i_sb); | 2758 | nfs_sb_active(calldata->inode->i_sb); |
2702 | 2759 | ||
2703 | msg.rpc_argp = &calldata->arg; | 2760 | msg.rpc_argp = &calldata->arg; |
@@ -4033,24 +4090,25 @@ static bool nfs4_error_stateid_expired(int err) | |||
4033 | return false; | 4090 | return false; |
4034 | } | 4091 | } |
4035 | 4092 | ||
4036 | void __nfs4_read_done_cb(struct nfs_pgio_data *data) | 4093 | void __nfs4_read_done_cb(struct nfs_pgio_header *hdr) |
4037 | { | 4094 | { |
4038 | nfs_invalidate_atime(data->header->inode); | 4095 | nfs_invalidate_atime(hdr->inode); |
4039 | } | 4096 | } |
4040 | 4097 | ||
4041 | static int nfs4_read_done_cb(struct rpc_task *task, struct nfs_pgio_data *data) | 4098 | static int nfs4_read_done_cb(struct rpc_task *task, struct nfs_pgio_header *hdr) |
4042 | { | 4099 | { |
4043 | struct nfs_server *server = NFS_SERVER(data->header->inode); | 4100 | struct nfs_server *server = NFS_SERVER(hdr->inode); |
4044 | 4101 | ||
4045 | trace_nfs4_read(data, task->tk_status); | 4102 | trace_nfs4_read(hdr, task->tk_status); |
4046 | if (nfs4_async_handle_error(task, server, data->args.context->state) == -EAGAIN) { | 4103 | if (nfs4_async_handle_error(task, server, |
4104 | hdr->args.context->state) == -EAGAIN) { | ||
4047 | rpc_restart_call_prepare(task); | 4105 | rpc_restart_call_prepare(task); |
4048 | return -EAGAIN; | 4106 | return -EAGAIN; |
4049 | } | 4107 | } |
4050 | 4108 | ||
4051 | __nfs4_read_done_cb(data); | 4109 | __nfs4_read_done_cb(hdr); |
4052 | if (task->tk_status > 0) | 4110 | if (task->tk_status > 0) |
4053 | renew_lease(server, data->timestamp); | 4111 | renew_lease(server, hdr->timestamp); |
4054 | return 0; | 4112 | return 0; |
4055 | } | 4113 | } |
4056 | 4114 | ||
@@ -4068,54 +4126,59 @@ static bool nfs4_read_stateid_changed(struct rpc_task *task, | |||
4068 | return true; | 4126 | return true; |
4069 | } | 4127 | } |
4070 | 4128 | ||
4071 | static int nfs4_read_done(struct rpc_task *task, struct nfs_pgio_data *data) | 4129 | static int nfs4_read_done(struct rpc_task *task, struct nfs_pgio_header *hdr) |
4072 | { | 4130 | { |
4073 | 4131 | ||
4074 | dprintk("--> %s\n", __func__); | 4132 | dprintk("--> %s\n", __func__); |
4075 | 4133 | ||
4076 | if (!nfs4_sequence_done(task, &data->res.seq_res)) | 4134 | if (!nfs4_sequence_done(task, &hdr->res.seq_res)) |
4077 | return -EAGAIN; | 4135 | return -EAGAIN; |
4078 | if (nfs4_read_stateid_changed(task, &data->args)) | 4136 | if (nfs4_read_stateid_changed(task, &hdr->args)) |
4079 | return -EAGAIN; | 4137 | return -EAGAIN; |
4080 | return data->pgio_done_cb ? data->pgio_done_cb(task, data) : | 4138 | return hdr->pgio_done_cb ? hdr->pgio_done_cb(task, hdr) : |
4081 | nfs4_read_done_cb(task, data); | 4139 | nfs4_read_done_cb(task, hdr); |
4082 | } | 4140 | } |
4083 | 4141 | ||
4084 | static void nfs4_proc_read_setup(struct nfs_pgio_data *data, struct rpc_message *msg) | 4142 | static void nfs4_proc_read_setup(struct nfs_pgio_header *hdr, |
4143 | struct rpc_message *msg) | ||
4085 | { | 4144 | { |
4086 | data->timestamp = jiffies; | 4145 | hdr->timestamp = jiffies; |
4087 | data->pgio_done_cb = nfs4_read_done_cb; | 4146 | hdr->pgio_done_cb = nfs4_read_done_cb; |
4088 | msg->rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_READ]; | 4147 | msg->rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_READ]; |
4089 | nfs4_init_sequence(&data->args.seq_args, &data->res.seq_res, 0); | 4148 | nfs4_init_sequence(&hdr->args.seq_args, &hdr->res.seq_res, 0); |
4090 | } | 4149 | } |
4091 | 4150 | ||
4092 | static int nfs4_proc_pgio_rpc_prepare(struct rpc_task *task, struct nfs_pgio_data *data) | 4151 | static int nfs4_proc_pgio_rpc_prepare(struct rpc_task *task, |
4152 | struct nfs_pgio_header *hdr) | ||
4093 | { | 4153 | { |
4094 | if (nfs4_setup_sequence(NFS_SERVER(data->header->inode), | 4154 | if (nfs4_setup_sequence(NFS_SERVER(hdr->inode), |
4095 | &data->args.seq_args, | 4155 | &hdr->args.seq_args, |
4096 | &data->res.seq_res, | 4156 | &hdr->res.seq_res, |
4097 | task)) | 4157 | task)) |
4098 | return 0; | 4158 | return 0; |
4099 | if (nfs4_set_rw_stateid(&data->args.stateid, data->args.context, | 4159 | if (nfs4_set_rw_stateid(&hdr->args.stateid, hdr->args.context, |
4100 | data->args.lock_context, data->header->rw_ops->rw_mode) == -EIO) | 4160 | hdr->args.lock_context, |
4161 | hdr->rw_ops->rw_mode) == -EIO) | ||
4101 | return -EIO; | 4162 | return -EIO; |
4102 | if (unlikely(test_bit(NFS_CONTEXT_BAD, &data->args.context->flags))) | 4163 | if (unlikely(test_bit(NFS_CONTEXT_BAD, &hdr->args.context->flags))) |
4103 | return -EIO; | 4164 | return -EIO; |
4104 | return 0; | 4165 | return 0; |
4105 | } | 4166 | } |
4106 | 4167 | ||
4107 | static int nfs4_write_done_cb(struct rpc_task *task, struct nfs_pgio_data *data) | 4168 | static int nfs4_write_done_cb(struct rpc_task *task, |
4169 | struct nfs_pgio_header *hdr) | ||
4108 | { | 4170 | { |
4109 | struct inode *inode = data->header->inode; | 4171 | struct inode *inode = hdr->inode; |
4110 | 4172 | ||
4111 | trace_nfs4_write(data, task->tk_status); | 4173 | trace_nfs4_write(hdr, task->tk_status); |
4112 | if (nfs4_async_handle_error(task, NFS_SERVER(inode), data->args.context->state) == -EAGAIN) { | 4174 | if (nfs4_async_handle_error(task, NFS_SERVER(inode), |
4175 | hdr->args.context->state) == -EAGAIN) { | ||
4113 | rpc_restart_call_prepare(task); | 4176 | rpc_restart_call_prepare(task); |
4114 | return -EAGAIN; | 4177 | return -EAGAIN; |
4115 | } | 4178 | } |
4116 | if (task->tk_status >= 0) { | 4179 | if (task->tk_status >= 0) { |
4117 | renew_lease(NFS_SERVER(inode), data->timestamp); | 4180 | renew_lease(NFS_SERVER(inode), hdr->timestamp); |
4118 | nfs_post_op_update_inode_force_wcc(inode, &data->fattr); | 4181 | nfs_post_op_update_inode_force_wcc(inode, &hdr->fattr); |
4119 | } | 4182 | } |
4120 | return 0; | 4183 | return 0; |
4121 | } | 4184 | } |
@@ -4134,23 +4197,21 @@ static bool nfs4_write_stateid_changed(struct rpc_task *task, | |||
4134 | return true; | 4197 | return true; |
4135 | } | 4198 | } |
4136 | 4199 | ||
4137 | static int nfs4_write_done(struct rpc_task *task, struct nfs_pgio_data *data) | 4200 | static int nfs4_write_done(struct rpc_task *task, struct nfs_pgio_header *hdr) |
4138 | { | 4201 | { |
4139 | if (!nfs4_sequence_done(task, &data->res.seq_res)) | 4202 | if (!nfs4_sequence_done(task, &hdr->res.seq_res)) |
4140 | return -EAGAIN; | 4203 | return -EAGAIN; |
4141 | if (nfs4_write_stateid_changed(task, &data->args)) | 4204 | if (nfs4_write_stateid_changed(task, &hdr->args)) |
4142 | return -EAGAIN; | 4205 | return -EAGAIN; |
4143 | return data->pgio_done_cb ? data->pgio_done_cb(task, data) : | 4206 | return hdr->pgio_done_cb ? hdr->pgio_done_cb(task, hdr) : |
4144 | nfs4_write_done_cb(task, data); | 4207 | nfs4_write_done_cb(task, hdr); |
4145 | } | 4208 | } |
4146 | 4209 | ||
4147 | static | 4210 | static |
4148 | bool nfs4_write_need_cache_consistency_data(const struct nfs_pgio_data *data) | 4211 | bool nfs4_write_need_cache_consistency_data(struct nfs_pgio_header *hdr) |
4149 | { | 4212 | { |
4150 | const struct nfs_pgio_header *hdr = data->header; | ||
4151 | |||
4152 | /* Don't request attributes for pNFS or O_DIRECT writes */ | 4213 | /* Don't request attributes for pNFS or O_DIRECT writes */ |
4153 | if (data->ds_clp != NULL || hdr->dreq != NULL) | 4214 | if (hdr->ds_clp != NULL || hdr->dreq != NULL) |
4154 | return false; | 4215 | return false; |
4155 | /* Otherwise, request attributes if and only if we don't hold | 4216 | /* Otherwise, request attributes if and only if we don't hold |
4156 | * a delegation | 4217 | * a delegation |
@@ -4158,23 +4219,24 @@ bool nfs4_write_need_cache_consistency_data(const struct nfs_pgio_data *data) | |||
4158 | return nfs4_have_delegation(hdr->inode, FMODE_READ) == 0; | 4219 | return nfs4_have_delegation(hdr->inode, FMODE_READ) == 0; |
4159 | } | 4220 | } |
4160 | 4221 | ||
4161 | static void nfs4_proc_write_setup(struct nfs_pgio_data *data, struct rpc_message *msg) | 4222 | static void nfs4_proc_write_setup(struct nfs_pgio_header *hdr, |
4223 | struct rpc_message *msg) | ||
4162 | { | 4224 | { |
4163 | struct nfs_server *server = NFS_SERVER(data->header->inode); | 4225 | struct nfs_server *server = NFS_SERVER(hdr->inode); |
4164 | 4226 | ||
4165 | if (!nfs4_write_need_cache_consistency_data(data)) { | 4227 | if (!nfs4_write_need_cache_consistency_data(hdr)) { |
4166 | data->args.bitmask = NULL; | 4228 | hdr->args.bitmask = NULL; |
4167 | data->res.fattr = NULL; | 4229 | hdr->res.fattr = NULL; |
4168 | } else | 4230 | } else |
4169 | data->args.bitmask = server->cache_consistency_bitmask; | 4231 | hdr->args.bitmask = server->cache_consistency_bitmask; |
4170 | 4232 | ||
4171 | if (!data->pgio_done_cb) | 4233 | if (!hdr->pgio_done_cb) |
4172 | data->pgio_done_cb = nfs4_write_done_cb; | 4234 | hdr->pgio_done_cb = nfs4_write_done_cb; |
4173 | data->res.server = server; | 4235 | hdr->res.server = server; |
4174 | data->timestamp = jiffies; | 4236 | hdr->timestamp = jiffies; |
4175 | 4237 | ||
4176 | msg->rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_WRITE]; | 4238 | msg->rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_WRITE]; |
4177 | nfs4_init_sequence(&data->args.seq_args, &data->res.seq_res, 1); | 4239 | nfs4_init_sequence(&hdr->args.seq_args, &hdr->res.seq_res, 1); |
4178 | } | 4240 | } |
4179 | 4241 | ||
4180 | static void nfs4_proc_commit_rpc_prepare(struct rpc_task *task, struct nfs_commit_data *data) | 4242 | static void nfs4_proc_commit_rpc_prepare(struct rpc_task *task, struct nfs_commit_data *data) |
@@ -4881,6 +4943,18 @@ nfs4_init_callback_netid(const struct nfs_client *clp, char *buf, size_t len) | |||
4881 | return scnprintf(buf, len, "tcp"); | 4943 | return scnprintf(buf, len, "tcp"); |
4882 | } | 4944 | } |
4883 | 4945 | ||
4946 | static void nfs4_setclientid_done(struct rpc_task *task, void *calldata) | ||
4947 | { | ||
4948 | struct nfs4_setclientid *sc = calldata; | ||
4949 | |||
4950 | if (task->tk_status == 0) | ||
4951 | sc->sc_cred = get_rpccred(task->tk_rqstp->rq_cred); | ||
4952 | } | ||
4953 | |||
4954 | static const struct rpc_call_ops nfs4_setclientid_ops = { | ||
4955 | .rpc_call_done = nfs4_setclientid_done, | ||
4956 | }; | ||
4957 | |||
4884 | /** | 4958 | /** |
4885 | * nfs4_proc_setclientid - Negotiate client ID | 4959 | * nfs4_proc_setclientid - Negotiate client ID |
4886 | * @clp: state data structure | 4960 | * @clp: state data structure |
@@ -4907,6 +4981,14 @@ int nfs4_proc_setclientid(struct nfs_client *clp, u32 program, | |||
4907 | .rpc_resp = res, | 4981 | .rpc_resp = res, |
4908 | .rpc_cred = cred, | 4982 | .rpc_cred = cred, |
4909 | }; | 4983 | }; |
4984 | struct rpc_task *task; | ||
4985 | struct rpc_task_setup task_setup_data = { | ||
4986 | .rpc_client = clp->cl_rpcclient, | ||
4987 | .rpc_message = &msg, | ||
4988 | .callback_ops = &nfs4_setclientid_ops, | ||
4989 | .callback_data = &setclientid, | ||
4990 | .flags = RPC_TASK_TIMEOUT, | ||
4991 | }; | ||
4910 | int status; | 4992 | int status; |
4911 | 4993 | ||
4912 | /* nfs_client_id4 */ | 4994 | /* nfs_client_id4 */ |
@@ -4933,7 +5015,18 @@ int nfs4_proc_setclientid(struct nfs_client *clp, u32 program, | |||
4933 | dprintk("NFS call setclientid auth=%s, '%.*s'\n", | 5015 | dprintk("NFS call setclientid auth=%s, '%.*s'\n", |
4934 | clp->cl_rpcclient->cl_auth->au_ops->au_name, | 5016 | clp->cl_rpcclient->cl_auth->au_ops->au_name, |
4935 | setclientid.sc_name_len, setclientid.sc_name); | 5017 | setclientid.sc_name_len, setclientid.sc_name); |
4936 | status = rpc_call_sync(clp->cl_rpcclient, &msg, RPC_TASK_TIMEOUT); | 5018 | task = rpc_run_task(&task_setup_data); |
5019 | if (IS_ERR(task)) { | ||
5020 | status = PTR_ERR(task); | ||
5021 | goto out; | ||
5022 | } | ||
5023 | status = task->tk_status; | ||
5024 | if (setclientid.sc_cred) { | ||
5025 | clp->cl_acceptor = rpcauth_stringify_acceptor(setclientid.sc_cred); | ||
5026 | put_rpccred(setclientid.sc_cred); | ||
5027 | } | ||
5028 | rpc_put_task(task); | ||
5029 | out: | ||
4937 | trace_nfs4_setclientid(clp, status); | 5030 | trace_nfs4_setclientid(clp, status); |
4938 | dprintk("NFS reply setclientid: %d\n", status); | 5031 | dprintk("NFS reply setclientid: %d\n", status); |
4939 | return status; | 5032 | return status; |
@@ -4975,6 +5068,9 @@ struct nfs4_delegreturndata { | |||
4975 | unsigned long timestamp; | 5068 | unsigned long timestamp; |
4976 | struct nfs_fattr fattr; | 5069 | struct nfs_fattr fattr; |
4977 | int rpc_status; | 5070 | int rpc_status; |
5071 | struct inode *inode; | ||
5072 | bool roc; | ||
5073 | u32 roc_barrier; | ||
4978 | }; | 5074 | }; |
4979 | 5075 | ||
4980 | static void nfs4_delegreturn_done(struct rpc_task *task, void *calldata) | 5076 | static void nfs4_delegreturn_done(struct rpc_task *task, void *calldata) |
@@ -4988,7 +5084,6 @@ static void nfs4_delegreturn_done(struct rpc_task *task, void *calldata) | |||
4988 | switch (task->tk_status) { | 5084 | switch (task->tk_status) { |
4989 | case 0: | 5085 | case 0: |
4990 | renew_lease(data->res.server, data->timestamp); | 5086 | renew_lease(data->res.server, data->timestamp); |
4991 | break; | ||
4992 | case -NFS4ERR_ADMIN_REVOKED: | 5087 | case -NFS4ERR_ADMIN_REVOKED: |
4993 | case -NFS4ERR_DELEG_REVOKED: | 5088 | case -NFS4ERR_DELEG_REVOKED: |
4994 | case -NFS4ERR_BAD_STATEID: | 5089 | case -NFS4ERR_BAD_STATEID: |
@@ -4996,6 +5091,8 @@ static void nfs4_delegreturn_done(struct rpc_task *task, void *calldata) | |||
4996 | case -NFS4ERR_STALE_STATEID: | 5091 | case -NFS4ERR_STALE_STATEID: |
4997 | case -NFS4ERR_EXPIRED: | 5092 | case -NFS4ERR_EXPIRED: |
4998 | task->tk_status = 0; | 5093 | task->tk_status = 0; |
5094 | if (data->roc) | ||
5095 | pnfs_roc_set_barrier(data->inode, data->roc_barrier); | ||
4999 | break; | 5096 | break; |
5000 | default: | 5097 | default: |
5001 | if (nfs4_async_handle_error(task, data->res.server, NULL) == | 5098 | if (nfs4_async_handle_error(task, data->res.server, NULL) == |
@@ -5009,6 +5106,10 @@ static void nfs4_delegreturn_done(struct rpc_task *task, void *calldata) | |||
5009 | 5106 | ||
5010 | static void nfs4_delegreturn_release(void *calldata) | 5107 | static void nfs4_delegreturn_release(void *calldata) |
5011 | { | 5108 | { |
5109 | struct nfs4_delegreturndata *data = calldata; | ||
5110 | |||
5111 | if (data->roc) | ||
5112 | pnfs_roc_release(data->inode); | ||
5012 | kfree(calldata); | 5113 | kfree(calldata); |
5013 | } | 5114 | } |
5014 | 5115 | ||
@@ -5018,6 +5119,10 @@ static void nfs4_delegreturn_prepare(struct rpc_task *task, void *data) | |||
5018 | 5119 | ||
5019 | d_data = (struct nfs4_delegreturndata *)data; | 5120 | d_data = (struct nfs4_delegreturndata *)data; |
5020 | 5121 | ||
5122 | if (d_data->roc && | ||
5123 | pnfs_roc_drain(d_data->inode, &d_data->roc_barrier, task)) | ||
5124 | return; | ||
5125 | |||
5021 | nfs4_setup_sequence(d_data->res.server, | 5126 | nfs4_setup_sequence(d_data->res.server, |
5022 | &d_data->args.seq_args, | 5127 | &d_data->args.seq_args, |
5023 | &d_data->res.seq_res, | 5128 | &d_data->res.seq_res, |
@@ -5061,6 +5166,9 @@ static int _nfs4_proc_delegreturn(struct inode *inode, struct rpc_cred *cred, co | |||
5061 | nfs_fattr_init(data->res.fattr); | 5166 | nfs_fattr_init(data->res.fattr); |
5062 | data->timestamp = jiffies; | 5167 | data->timestamp = jiffies; |
5063 | data->rpc_status = 0; | 5168 | data->rpc_status = 0; |
5169 | data->inode = inode; | ||
5170 | data->roc = list_empty(&NFS_I(inode)->open_files) ? | ||
5171 | pnfs_roc(inode) : false; | ||
5064 | 5172 | ||
5065 | task_setup_data.callback_data = data; | 5173 | task_setup_data.callback_data = data; |
5066 | msg.rpc_argp = &data->args; | 5174 | msg.rpc_argp = &data->args; |
@@ -5834,8 +5942,10 @@ struct nfs_release_lockowner_data { | |||
5834 | static void nfs4_release_lockowner_prepare(struct rpc_task *task, void *calldata) | 5942 | static void nfs4_release_lockowner_prepare(struct rpc_task *task, void *calldata) |
5835 | { | 5943 | { |
5836 | struct nfs_release_lockowner_data *data = calldata; | 5944 | struct nfs_release_lockowner_data *data = calldata; |
5837 | nfs40_setup_sequence(data->server, | 5945 | struct nfs_server *server = data->server; |
5838 | &data->args.seq_args, &data->res.seq_res, task); | 5946 | nfs40_setup_sequence(server, &data->args.seq_args, |
5947 | &data->res.seq_res, task); | ||
5948 | data->args.lock_owner.clientid = server->nfs_client->cl_clientid; | ||
5839 | data->timestamp = jiffies; | 5949 | data->timestamp = jiffies; |
5840 | } | 5950 | } |
5841 | 5951 | ||
@@ -5852,6 +5962,8 @@ static void nfs4_release_lockowner_done(struct rpc_task *task, void *calldata) | |||
5852 | break; | 5962 | break; |
5853 | case -NFS4ERR_STALE_CLIENTID: | 5963 | case -NFS4ERR_STALE_CLIENTID: |
5854 | case -NFS4ERR_EXPIRED: | 5964 | case -NFS4ERR_EXPIRED: |
5965 | nfs4_schedule_lease_recovery(server->nfs_client); | ||
5966 | break; | ||
5855 | case -NFS4ERR_LEASE_MOVED: | 5967 | case -NFS4ERR_LEASE_MOVED: |
5856 | case -NFS4ERR_DELAY: | 5968 | case -NFS4ERR_DELAY: |
5857 | if (nfs4_async_handle_error(task, server, NULL) == -EAGAIN) | 5969 | if (nfs4_async_handle_error(task, server, NULL) == -EAGAIN) |
@@ -5872,7 +5984,8 @@ static const struct rpc_call_ops nfs4_release_lockowner_ops = { | |||
5872 | .rpc_release = nfs4_release_lockowner_release, | 5984 | .rpc_release = nfs4_release_lockowner_release, |
5873 | }; | 5985 | }; |
5874 | 5986 | ||
5875 | static int nfs4_release_lockowner(struct nfs_server *server, struct nfs4_lock_state *lsp) | 5987 | static void |
5988 | nfs4_release_lockowner(struct nfs_server *server, struct nfs4_lock_state *lsp) | ||
5876 | { | 5989 | { |
5877 | struct nfs_release_lockowner_data *data; | 5990 | struct nfs_release_lockowner_data *data; |
5878 | struct rpc_message msg = { | 5991 | struct rpc_message msg = { |
@@ -5880,11 +5993,11 @@ static int nfs4_release_lockowner(struct nfs_server *server, struct nfs4_lock_st | |||
5880 | }; | 5993 | }; |
5881 | 5994 | ||
5882 | if (server->nfs_client->cl_mvops->minor_version != 0) | 5995 | if (server->nfs_client->cl_mvops->minor_version != 0) |
5883 | return -EINVAL; | 5996 | return; |
5884 | 5997 | ||
5885 | data = kmalloc(sizeof(*data), GFP_NOFS); | 5998 | data = kmalloc(sizeof(*data), GFP_NOFS); |
5886 | if (!data) | 5999 | if (!data) |
5887 | return -ENOMEM; | 6000 | return; |
5888 | data->lsp = lsp; | 6001 | data->lsp = lsp; |
5889 | data->server = server; | 6002 | data->server = server; |
5890 | data->args.lock_owner.clientid = server->nfs_client->cl_clientid; | 6003 | data->args.lock_owner.clientid = server->nfs_client->cl_clientid; |
@@ -5895,7 +6008,6 @@ static int nfs4_release_lockowner(struct nfs_server *server, struct nfs4_lock_st | |||
5895 | msg.rpc_resp = &data->res; | 6008 | msg.rpc_resp = &data->res; |
5896 | nfs4_init_sequence(&data->args.seq_args, &data->res.seq_res, 0); | 6009 | nfs4_init_sequence(&data->args.seq_args, &data->res.seq_res, 0); |
5897 | rpc_call_async(server->client, &msg, 0, &nfs4_release_lockowner_ops, data); | 6010 | rpc_call_async(server->client, &msg, 0, &nfs4_release_lockowner_ops, data); |
5898 | return 0; | ||
5899 | } | 6011 | } |
5900 | 6012 | ||
5901 | #define XATTR_NAME_NFSV4_ACL "system.nfs4_acl" | 6013 | #define XATTR_NAME_NFSV4_ACL "system.nfs4_acl" |
@@ -8182,7 +8294,8 @@ static int nfs41_free_stateid(struct nfs_server *server, | |||
8182 | return ret; | 8294 | return ret; |
8183 | } | 8295 | } |
8184 | 8296 | ||
8185 | static int nfs41_free_lock_state(struct nfs_server *server, struct nfs4_lock_state *lsp) | 8297 | static void |
8298 | nfs41_free_lock_state(struct nfs_server *server, struct nfs4_lock_state *lsp) | ||
8186 | { | 8299 | { |
8187 | struct rpc_task *task; | 8300 | struct rpc_task *task; |
8188 | struct rpc_cred *cred = lsp->ls_state->owner->so_cred; | 8301 | struct rpc_cred *cred = lsp->ls_state->owner->so_cred; |
@@ -8190,9 +8303,8 @@ static int nfs41_free_lock_state(struct nfs_server *server, struct nfs4_lock_sta | |||
8190 | task = _nfs41_free_stateid(server, &lsp->ls_stateid, cred, false); | 8303 | task = _nfs41_free_stateid(server, &lsp->ls_stateid, cred, false); |
8191 | nfs4_free_lock_state(server, lsp); | 8304 | nfs4_free_lock_state(server, lsp); |
8192 | if (IS_ERR(task)) | 8305 | if (IS_ERR(task)) |
8193 | return PTR_ERR(task); | 8306 | return; |
8194 | rpc_put_task(task); | 8307 | rpc_put_task(task); |
8195 | return 0; | ||
8196 | } | 8308 | } |
8197 | 8309 | ||
8198 | static bool nfs41_match_stateid(const nfs4_stateid *s1, | 8310 | static bool nfs41_match_stateid(const nfs4_stateid *s1, |
diff --git a/fs/nfs/nfs4state.c b/fs/nfs/nfs4state.c index 42f121182167..a043f618cd5a 100644 --- a/fs/nfs/nfs4state.c +++ b/fs/nfs/nfs4state.c | |||
@@ -787,33 +787,36 @@ void nfs4_close_sync(struct nfs4_state *state, fmode_t fmode) | |||
787 | * that is compatible with current->files | 787 | * that is compatible with current->files |
788 | */ | 788 | */ |
789 | static struct nfs4_lock_state * | 789 | static struct nfs4_lock_state * |
790 | __nfs4_find_lock_state(struct nfs4_state *state, fl_owner_t fl_owner, pid_t fl_pid, unsigned int type) | 790 | __nfs4_find_lock_state(struct nfs4_state *state, fl_owner_t fl_owner) |
791 | { | 791 | { |
792 | struct nfs4_lock_state *pos; | 792 | struct nfs4_lock_state *pos; |
793 | list_for_each_entry(pos, &state->lock_states, ls_locks) { | 793 | list_for_each_entry(pos, &state->lock_states, ls_locks) { |
794 | if (type != NFS4_ANY_LOCK_TYPE && pos->ls_owner.lo_type != type) | 794 | if (pos->ls_owner != fl_owner) |
795 | continue; | 795 | continue; |
796 | switch (pos->ls_owner.lo_type) { | ||
797 | case NFS4_POSIX_LOCK_TYPE: | ||
798 | if (pos->ls_owner.lo_u.posix_owner != fl_owner) | ||
799 | continue; | ||
800 | break; | ||
801 | case NFS4_FLOCK_LOCK_TYPE: | ||
802 | if (pos->ls_owner.lo_u.flock_owner != fl_pid) | ||
803 | continue; | ||
804 | } | ||
805 | atomic_inc(&pos->ls_count); | 796 | atomic_inc(&pos->ls_count); |
806 | return pos; | 797 | return pos; |
807 | } | 798 | } |
808 | return NULL; | 799 | return NULL; |
809 | } | 800 | } |
810 | 801 | ||
802 | static void | ||
803 | free_lock_state_work(struct work_struct *work) | ||
804 | { | ||
805 | struct nfs4_lock_state *lsp = container_of(work, | ||
806 | struct nfs4_lock_state, ls_release); | ||
807 | struct nfs4_state *state = lsp->ls_state; | ||
808 | struct nfs_server *server = state->owner->so_server; | ||
809 | struct nfs_client *clp = server->nfs_client; | ||
810 | |||
811 | clp->cl_mvops->free_lock_state(server, lsp); | ||
812 | } | ||
813 | |||
811 | /* | 814 | /* |
812 | * Return a compatible lock_state. If no initialized lock_state structure | 815 | * Return a compatible lock_state. If no initialized lock_state structure |
813 | * exists, return an uninitialized one. | 816 | * exists, return an uninitialized one. |
814 | * | 817 | * |
815 | */ | 818 | */ |
816 | static struct nfs4_lock_state *nfs4_alloc_lock_state(struct nfs4_state *state, fl_owner_t fl_owner, pid_t fl_pid, unsigned int type) | 819 | static struct nfs4_lock_state *nfs4_alloc_lock_state(struct nfs4_state *state, fl_owner_t fl_owner) |
817 | { | 820 | { |
818 | struct nfs4_lock_state *lsp; | 821 | struct nfs4_lock_state *lsp; |
819 | struct nfs_server *server = state->owner->so_server; | 822 | struct nfs_server *server = state->owner->so_server; |
@@ -824,21 +827,12 @@ static struct nfs4_lock_state *nfs4_alloc_lock_state(struct nfs4_state *state, f | |||
824 | nfs4_init_seqid_counter(&lsp->ls_seqid); | 827 | nfs4_init_seqid_counter(&lsp->ls_seqid); |
825 | atomic_set(&lsp->ls_count, 1); | 828 | atomic_set(&lsp->ls_count, 1); |
826 | lsp->ls_state = state; | 829 | lsp->ls_state = state; |
827 | lsp->ls_owner.lo_type = type; | 830 | lsp->ls_owner = fl_owner; |
828 | switch (lsp->ls_owner.lo_type) { | ||
829 | case NFS4_FLOCK_LOCK_TYPE: | ||
830 | lsp->ls_owner.lo_u.flock_owner = fl_pid; | ||
831 | break; | ||
832 | case NFS4_POSIX_LOCK_TYPE: | ||
833 | lsp->ls_owner.lo_u.posix_owner = fl_owner; | ||
834 | break; | ||
835 | default: | ||
836 | goto out_free; | ||
837 | } | ||
838 | lsp->ls_seqid.owner_id = ida_simple_get(&server->lockowner_id, 0, 0, GFP_NOFS); | 831 | lsp->ls_seqid.owner_id = ida_simple_get(&server->lockowner_id, 0, 0, GFP_NOFS); |
839 | if (lsp->ls_seqid.owner_id < 0) | 832 | if (lsp->ls_seqid.owner_id < 0) |
840 | goto out_free; | 833 | goto out_free; |
841 | INIT_LIST_HEAD(&lsp->ls_locks); | 834 | INIT_LIST_HEAD(&lsp->ls_locks); |
835 | INIT_WORK(&lsp->ls_release, free_lock_state_work); | ||
842 | return lsp; | 836 | return lsp; |
843 | out_free: | 837 | out_free: |
844 | kfree(lsp); | 838 | kfree(lsp); |
@@ -857,13 +851,13 @@ void nfs4_free_lock_state(struct nfs_server *server, struct nfs4_lock_state *lsp | |||
857 | * exists, return an uninitialized one. | 851 | * exists, return an uninitialized one. |
858 | * | 852 | * |
859 | */ | 853 | */ |
860 | static struct nfs4_lock_state *nfs4_get_lock_state(struct nfs4_state *state, fl_owner_t owner, pid_t pid, unsigned int type) | 854 | static struct nfs4_lock_state *nfs4_get_lock_state(struct nfs4_state *state, fl_owner_t owner) |
861 | { | 855 | { |
862 | struct nfs4_lock_state *lsp, *new = NULL; | 856 | struct nfs4_lock_state *lsp, *new = NULL; |
863 | 857 | ||
864 | for(;;) { | 858 | for(;;) { |
865 | spin_lock(&state->state_lock); | 859 | spin_lock(&state->state_lock); |
866 | lsp = __nfs4_find_lock_state(state, owner, pid, type); | 860 | lsp = __nfs4_find_lock_state(state, owner); |
867 | if (lsp != NULL) | 861 | if (lsp != NULL) |
868 | break; | 862 | break; |
869 | if (new != NULL) { | 863 | if (new != NULL) { |
@@ -874,7 +868,7 @@ static struct nfs4_lock_state *nfs4_get_lock_state(struct nfs4_state *state, fl_ | |||
874 | break; | 868 | break; |
875 | } | 869 | } |
876 | spin_unlock(&state->state_lock); | 870 | spin_unlock(&state->state_lock); |
877 | new = nfs4_alloc_lock_state(state, owner, pid, type); | 871 | new = nfs4_alloc_lock_state(state, owner); |
878 | if (new == NULL) | 872 | if (new == NULL) |
879 | return NULL; | 873 | return NULL; |
880 | } | 874 | } |
@@ -902,13 +896,12 @@ void nfs4_put_lock_state(struct nfs4_lock_state *lsp) | |||
902 | if (list_empty(&state->lock_states)) | 896 | if (list_empty(&state->lock_states)) |
903 | clear_bit(LK_STATE_IN_USE, &state->flags); | 897 | clear_bit(LK_STATE_IN_USE, &state->flags); |
904 | spin_unlock(&state->state_lock); | 898 | spin_unlock(&state->state_lock); |
905 | server = state->owner->so_server; | 899 | if (test_bit(NFS_LOCK_INITIALIZED, &lsp->ls_flags)) |
906 | if (test_bit(NFS_LOCK_INITIALIZED, &lsp->ls_flags)) { | 900 | queue_work(nfsiod_workqueue, &lsp->ls_release); |
907 | struct nfs_client *clp = server->nfs_client; | 901 | else { |
908 | 902 | server = state->owner->so_server; | |
909 | clp->cl_mvops->free_lock_state(server, lsp); | ||
910 | } else | ||
911 | nfs4_free_lock_state(server, lsp); | 903 | nfs4_free_lock_state(server, lsp); |
904 | } | ||
912 | } | 905 | } |
913 | 906 | ||
914 | static void nfs4_fl_copy_lock(struct file_lock *dst, struct file_lock *src) | 907 | static void nfs4_fl_copy_lock(struct file_lock *dst, struct file_lock *src) |
@@ -935,13 +928,7 @@ int nfs4_set_lock_state(struct nfs4_state *state, struct file_lock *fl) | |||
935 | 928 | ||
936 | if (fl->fl_ops != NULL) | 929 | if (fl->fl_ops != NULL) |
937 | return 0; | 930 | return 0; |
938 | if (fl->fl_flags & FL_POSIX) | 931 | lsp = nfs4_get_lock_state(state, fl->fl_owner); |
939 | lsp = nfs4_get_lock_state(state, fl->fl_owner, 0, NFS4_POSIX_LOCK_TYPE); | ||
940 | else if (fl->fl_flags & FL_FLOCK) | ||
941 | lsp = nfs4_get_lock_state(state, NULL, fl->fl_pid, | ||
942 | NFS4_FLOCK_LOCK_TYPE); | ||
943 | else | ||
944 | return -EINVAL; | ||
945 | if (lsp == NULL) | 932 | if (lsp == NULL) |
946 | return -ENOMEM; | 933 | return -ENOMEM; |
947 | fl->fl_u.nfs4_fl.owner = lsp; | 934 | fl->fl_u.nfs4_fl.owner = lsp; |
@@ -955,7 +942,6 @@ static int nfs4_copy_lock_stateid(nfs4_stateid *dst, | |||
955 | { | 942 | { |
956 | struct nfs4_lock_state *lsp; | 943 | struct nfs4_lock_state *lsp; |
957 | fl_owner_t fl_owner; | 944 | fl_owner_t fl_owner; |
958 | pid_t fl_pid; | ||
959 | int ret = -ENOENT; | 945 | int ret = -ENOENT; |
960 | 946 | ||
961 | 947 | ||
@@ -966,9 +952,8 @@ static int nfs4_copy_lock_stateid(nfs4_stateid *dst, | |||
966 | goto out; | 952 | goto out; |
967 | 953 | ||
968 | fl_owner = lockowner->l_owner; | 954 | fl_owner = lockowner->l_owner; |
969 | fl_pid = lockowner->l_pid; | ||
970 | spin_lock(&state->state_lock); | 955 | spin_lock(&state->state_lock); |
971 | lsp = __nfs4_find_lock_state(state, fl_owner, fl_pid, NFS4_ANY_LOCK_TYPE); | 956 | lsp = __nfs4_find_lock_state(state, fl_owner); |
972 | if (lsp && test_bit(NFS_LOCK_LOST, &lsp->ls_flags)) | 957 | if (lsp && test_bit(NFS_LOCK_LOST, &lsp->ls_flags)) |
973 | ret = -EIO; | 958 | ret = -EIO; |
974 | else if (lsp != NULL && test_bit(NFS_LOCK_INITIALIZED, &lsp->ls_flags) != 0) { | 959 | else if (lsp != NULL && test_bit(NFS_LOCK_INITIALIZED, &lsp->ls_flags) != 0) { |
diff --git a/fs/nfs/nfs4trace.h b/fs/nfs/nfs4trace.h index 0a744f3a86f6..1c32adbe728d 100644 --- a/fs/nfs/nfs4trace.h +++ b/fs/nfs/nfs4trace.h | |||
@@ -932,11 +932,11 @@ DEFINE_NFS4_IDMAP_EVENT(nfs4_map_gid_to_group); | |||
932 | 932 | ||
933 | DECLARE_EVENT_CLASS(nfs4_read_event, | 933 | DECLARE_EVENT_CLASS(nfs4_read_event, |
934 | TP_PROTO( | 934 | TP_PROTO( |
935 | const struct nfs_pgio_data *data, | 935 | const struct nfs_pgio_header *hdr, |
936 | int error | 936 | int error |
937 | ), | 937 | ), |
938 | 938 | ||
939 | TP_ARGS(data, error), | 939 | TP_ARGS(hdr, error), |
940 | 940 | ||
941 | TP_STRUCT__entry( | 941 | TP_STRUCT__entry( |
942 | __field(dev_t, dev) | 942 | __field(dev_t, dev) |
@@ -948,12 +948,12 @@ DECLARE_EVENT_CLASS(nfs4_read_event, | |||
948 | ), | 948 | ), |
949 | 949 | ||
950 | TP_fast_assign( | 950 | TP_fast_assign( |
951 | const struct inode *inode = data->header->inode; | 951 | const struct inode *inode = hdr->inode; |
952 | __entry->dev = inode->i_sb->s_dev; | 952 | __entry->dev = inode->i_sb->s_dev; |
953 | __entry->fileid = NFS_FILEID(inode); | 953 | __entry->fileid = NFS_FILEID(inode); |
954 | __entry->fhandle = nfs_fhandle_hash(NFS_FH(inode)); | 954 | __entry->fhandle = nfs_fhandle_hash(NFS_FH(inode)); |
955 | __entry->offset = data->args.offset; | 955 | __entry->offset = hdr->args.offset; |
956 | __entry->count = data->args.count; | 956 | __entry->count = hdr->args.count; |
957 | __entry->error = error; | 957 | __entry->error = error; |
958 | ), | 958 | ), |
959 | 959 | ||
@@ -972,10 +972,10 @@ DECLARE_EVENT_CLASS(nfs4_read_event, | |||
972 | #define DEFINE_NFS4_READ_EVENT(name) \ | 972 | #define DEFINE_NFS4_READ_EVENT(name) \ |
973 | DEFINE_EVENT(nfs4_read_event, name, \ | 973 | DEFINE_EVENT(nfs4_read_event, name, \ |
974 | TP_PROTO( \ | 974 | TP_PROTO( \ |
975 | const struct nfs_pgio_data *data, \ | 975 | const struct nfs_pgio_header *hdr, \ |
976 | int error \ | 976 | int error \ |
977 | ), \ | 977 | ), \ |
978 | TP_ARGS(data, error)) | 978 | TP_ARGS(hdr, error)) |
979 | DEFINE_NFS4_READ_EVENT(nfs4_read); | 979 | DEFINE_NFS4_READ_EVENT(nfs4_read); |
980 | #ifdef CONFIG_NFS_V4_1 | 980 | #ifdef CONFIG_NFS_V4_1 |
981 | DEFINE_NFS4_READ_EVENT(nfs4_pnfs_read); | 981 | DEFINE_NFS4_READ_EVENT(nfs4_pnfs_read); |
@@ -983,11 +983,11 @@ DEFINE_NFS4_READ_EVENT(nfs4_pnfs_read); | |||
983 | 983 | ||
984 | DECLARE_EVENT_CLASS(nfs4_write_event, | 984 | DECLARE_EVENT_CLASS(nfs4_write_event, |
985 | TP_PROTO( | 985 | TP_PROTO( |
986 | const struct nfs_pgio_data *data, | 986 | const struct nfs_pgio_header *hdr, |
987 | int error | 987 | int error |
988 | ), | 988 | ), |
989 | 989 | ||
990 | TP_ARGS(data, error), | 990 | TP_ARGS(hdr, error), |
991 | 991 | ||
992 | TP_STRUCT__entry( | 992 | TP_STRUCT__entry( |
993 | __field(dev_t, dev) | 993 | __field(dev_t, dev) |
@@ -999,12 +999,12 @@ DECLARE_EVENT_CLASS(nfs4_write_event, | |||
999 | ), | 999 | ), |
1000 | 1000 | ||
1001 | TP_fast_assign( | 1001 | TP_fast_assign( |
1002 | const struct inode *inode = data->header->inode; | 1002 | const struct inode *inode = hdr->inode; |
1003 | __entry->dev = inode->i_sb->s_dev; | 1003 | __entry->dev = inode->i_sb->s_dev; |
1004 | __entry->fileid = NFS_FILEID(inode); | 1004 | __entry->fileid = NFS_FILEID(inode); |
1005 | __entry->fhandle = nfs_fhandle_hash(NFS_FH(inode)); | 1005 | __entry->fhandle = nfs_fhandle_hash(NFS_FH(inode)); |
1006 | __entry->offset = data->args.offset; | 1006 | __entry->offset = hdr->args.offset; |
1007 | __entry->count = data->args.count; | 1007 | __entry->count = hdr->args.count; |
1008 | __entry->error = error; | 1008 | __entry->error = error; |
1009 | ), | 1009 | ), |
1010 | 1010 | ||
@@ -1024,10 +1024,10 @@ DECLARE_EVENT_CLASS(nfs4_write_event, | |||
1024 | #define DEFINE_NFS4_WRITE_EVENT(name) \ | 1024 | #define DEFINE_NFS4_WRITE_EVENT(name) \ |
1025 | DEFINE_EVENT(nfs4_write_event, name, \ | 1025 | DEFINE_EVENT(nfs4_write_event, name, \ |
1026 | TP_PROTO( \ | 1026 | TP_PROTO( \ |
1027 | const struct nfs_pgio_data *data, \ | 1027 | const struct nfs_pgio_header *hdr, \ |
1028 | int error \ | 1028 | int error \ |
1029 | ), \ | 1029 | ), \ |
1030 | TP_ARGS(data, error)) | 1030 | TP_ARGS(hdr, error)) |
1031 | DEFINE_NFS4_WRITE_EVENT(nfs4_write); | 1031 | DEFINE_NFS4_WRITE_EVENT(nfs4_write); |
1032 | #ifdef CONFIG_NFS_V4_1 | 1032 | #ifdef CONFIG_NFS_V4_1 |
1033 | DEFINE_NFS4_WRITE_EVENT(nfs4_pnfs_write); | 1033 | DEFINE_NFS4_WRITE_EVENT(nfs4_pnfs_write); |
diff --git a/fs/nfs/nfs4xdr.c b/fs/nfs/nfs4xdr.c index 939ae606cfa4..e13b59d8d9aa 100644 --- a/fs/nfs/nfs4xdr.c +++ b/fs/nfs/nfs4xdr.c | |||
@@ -7092,7 +7092,7 @@ static int nfs4_xdr_dec_reclaim_complete(struct rpc_rqst *rqstp, | |||
7092 | if (!status) | 7092 | if (!status) |
7093 | status = decode_sequence(xdr, &res->seq_res, rqstp); | 7093 | status = decode_sequence(xdr, &res->seq_res, rqstp); |
7094 | if (!status) | 7094 | if (!status) |
7095 | status = decode_reclaim_complete(xdr, (void *)NULL); | 7095 | status = decode_reclaim_complete(xdr, NULL); |
7096 | return status; | 7096 | return status; |
7097 | } | 7097 | } |
7098 | 7098 | ||
diff --git a/fs/nfs/objlayout/objio_osd.c b/fs/nfs/objlayout/objio_osd.c index 611320753db2..ae05278b3761 100644 --- a/fs/nfs/objlayout/objio_osd.c +++ b/fs/nfs/objlayout/objio_osd.c | |||
@@ -439,22 +439,21 @@ static void _read_done(struct ore_io_state *ios, void *private) | |||
439 | objlayout_read_done(&objios->oir, status, objios->sync); | 439 | objlayout_read_done(&objios->oir, status, objios->sync); |
440 | } | 440 | } |
441 | 441 | ||
442 | int objio_read_pagelist(struct nfs_pgio_data *rdata) | 442 | int objio_read_pagelist(struct nfs_pgio_header *hdr) |
443 | { | 443 | { |
444 | struct nfs_pgio_header *hdr = rdata->header; | ||
445 | struct objio_state *objios; | 444 | struct objio_state *objios; |
446 | int ret; | 445 | int ret; |
447 | 446 | ||
448 | ret = objio_alloc_io_state(NFS_I(hdr->inode)->layout, true, | 447 | ret = objio_alloc_io_state(NFS_I(hdr->inode)->layout, true, |
449 | hdr->lseg, rdata->args.pages, rdata->args.pgbase, | 448 | hdr->lseg, hdr->args.pages, hdr->args.pgbase, |
450 | rdata->args.offset, rdata->args.count, rdata, | 449 | hdr->args.offset, hdr->args.count, hdr, |
451 | GFP_KERNEL, &objios); | 450 | GFP_KERNEL, &objios); |
452 | if (unlikely(ret)) | 451 | if (unlikely(ret)) |
453 | return ret; | 452 | return ret; |
454 | 453 | ||
455 | objios->ios->done = _read_done; | 454 | objios->ios->done = _read_done; |
456 | dprintk("%s: offset=0x%llx length=0x%x\n", __func__, | 455 | dprintk("%s: offset=0x%llx length=0x%x\n", __func__, |
457 | rdata->args.offset, rdata->args.count); | 456 | hdr->args.offset, hdr->args.count); |
458 | ret = ore_read(objios->ios); | 457 | ret = ore_read(objios->ios); |
459 | if (unlikely(ret)) | 458 | if (unlikely(ret)) |
460 | objio_free_result(&objios->oir); | 459 | objio_free_result(&objios->oir); |
@@ -487,11 +486,11 @@ static void _write_done(struct ore_io_state *ios, void *private) | |||
487 | static struct page *__r4w_get_page(void *priv, u64 offset, bool *uptodate) | 486 | static struct page *__r4w_get_page(void *priv, u64 offset, bool *uptodate) |
488 | { | 487 | { |
489 | struct objio_state *objios = priv; | 488 | struct objio_state *objios = priv; |
490 | struct nfs_pgio_data *wdata = objios->oir.rpcdata; | 489 | struct nfs_pgio_header *hdr = objios->oir.rpcdata; |
491 | struct address_space *mapping = wdata->header->inode->i_mapping; | 490 | struct address_space *mapping = hdr->inode->i_mapping; |
492 | pgoff_t index = offset / PAGE_SIZE; | 491 | pgoff_t index = offset / PAGE_SIZE; |
493 | struct page *page; | 492 | struct page *page; |
494 | loff_t i_size = i_size_read(wdata->header->inode); | 493 | loff_t i_size = i_size_read(hdr->inode); |
495 | 494 | ||
496 | if (offset >= i_size) { | 495 | if (offset >= i_size) { |
497 | *uptodate = true; | 496 | *uptodate = true; |
@@ -531,15 +530,14 @@ static const struct _ore_r4w_op _r4w_op = { | |||
531 | .put_page = &__r4w_put_page, | 530 | .put_page = &__r4w_put_page, |
532 | }; | 531 | }; |
533 | 532 | ||
534 | int objio_write_pagelist(struct nfs_pgio_data *wdata, int how) | 533 | int objio_write_pagelist(struct nfs_pgio_header *hdr, int how) |
535 | { | 534 | { |
536 | struct nfs_pgio_header *hdr = wdata->header; | ||
537 | struct objio_state *objios; | 535 | struct objio_state *objios; |
538 | int ret; | 536 | int ret; |
539 | 537 | ||
540 | ret = objio_alloc_io_state(NFS_I(hdr->inode)->layout, false, | 538 | ret = objio_alloc_io_state(NFS_I(hdr->inode)->layout, false, |
541 | hdr->lseg, wdata->args.pages, wdata->args.pgbase, | 539 | hdr->lseg, hdr->args.pages, hdr->args.pgbase, |
542 | wdata->args.offset, wdata->args.count, wdata, GFP_NOFS, | 540 | hdr->args.offset, hdr->args.count, hdr, GFP_NOFS, |
543 | &objios); | 541 | &objios); |
544 | if (unlikely(ret)) | 542 | if (unlikely(ret)) |
545 | return ret; | 543 | return ret; |
@@ -551,7 +549,7 @@ int objio_write_pagelist(struct nfs_pgio_data *wdata, int how) | |||
551 | objios->ios->done = _write_done; | 549 | objios->ios->done = _write_done; |
552 | 550 | ||
553 | dprintk("%s: offset=0x%llx length=0x%x\n", __func__, | 551 | dprintk("%s: offset=0x%llx length=0x%x\n", __func__, |
554 | wdata->args.offset, wdata->args.count); | 552 | hdr->args.offset, hdr->args.count); |
555 | ret = ore_write(objios->ios); | 553 | ret = ore_write(objios->ios); |
556 | if (unlikely(ret)) { | 554 | if (unlikely(ret)) { |
557 | objio_free_result(&objios->oir); | 555 | objio_free_result(&objios->oir); |
diff --git a/fs/nfs/objlayout/objlayout.c b/fs/nfs/objlayout/objlayout.c index 765d3f54e986..697a16d11fac 100644 --- a/fs/nfs/objlayout/objlayout.c +++ b/fs/nfs/objlayout/objlayout.c | |||
@@ -229,36 +229,36 @@ objlayout_io_set_result(struct objlayout_io_res *oir, unsigned index, | |||
229 | static void _rpc_read_complete(struct work_struct *work) | 229 | static void _rpc_read_complete(struct work_struct *work) |
230 | { | 230 | { |
231 | struct rpc_task *task; | 231 | struct rpc_task *task; |
232 | struct nfs_pgio_data *rdata; | 232 | struct nfs_pgio_header *hdr; |
233 | 233 | ||
234 | dprintk("%s enter\n", __func__); | 234 | dprintk("%s enter\n", __func__); |
235 | task = container_of(work, struct rpc_task, u.tk_work); | 235 | task = container_of(work, struct rpc_task, u.tk_work); |
236 | rdata = container_of(task, struct nfs_pgio_data, task); | 236 | hdr = container_of(task, struct nfs_pgio_header, task); |
237 | 237 | ||
238 | pnfs_ld_read_done(rdata); | 238 | pnfs_ld_read_done(hdr); |
239 | } | 239 | } |
240 | 240 | ||
241 | void | 241 | void |
242 | objlayout_read_done(struct objlayout_io_res *oir, ssize_t status, bool sync) | 242 | objlayout_read_done(struct objlayout_io_res *oir, ssize_t status, bool sync) |
243 | { | 243 | { |
244 | struct nfs_pgio_data *rdata = oir->rpcdata; | 244 | struct nfs_pgio_header *hdr = oir->rpcdata; |
245 | 245 | ||
246 | oir->status = rdata->task.tk_status = status; | 246 | oir->status = hdr->task.tk_status = status; |
247 | if (status >= 0) | 247 | if (status >= 0) |
248 | rdata->res.count = status; | 248 | hdr->res.count = status; |
249 | else | 249 | else |
250 | rdata->header->pnfs_error = status; | 250 | hdr->pnfs_error = status; |
251 | objlayout_iodone(oir); | 251 | objlayout_iodone(oir); |
252 | /* must not use oir after this point */ | 252 | /* must not use oir after this point */ |
253 | 253 | ||
254 | dprintk("%s: Return status=%zd eof=%d sync=%d\n", __func__, | 254 | dprintk("%s: Return status=%zd eof=%d sync=%d\n", __func__, |
255 | status, rdata->res.eof, sync); | 255 | status, hdr->res.eof, sync); |
256 | 256 | ||
257 | if (sync) | 257 | if (sync) |
258 | pnfs_ld_read_done(rdata); | 258 | pnfs_ld_read_done(hdr); |
259 | else { | 259 | else { |
260 | INIT_WORK(&rdata->task.u.tk_work, _rpc_read_complete); | 260 | INIT_WORK(&hdr->task.u.tk_work, _rpc_read_complete); |
261 | schedule_work(&rdata->task.u.tk_work); | 261 | schedule_work(&hdr->task.u.tk_work); |
262 | } | 262 | } |
263 | } | 263 | } |
264 | 264 | ||
@@ -266,12 +266,11 @@ objlayout_read_done(struct objlayout_io_res *oir, ssize_t status, bool sync) | |||
266 | * Perform sync or async reads. | 266 | * Perform sync or async reads. |
267 | */ | 267 | */ |
268 | enum pnfs_try_status | 268 | enum pnfs_try_status |
269 | objlayout_read_pagelist(struct nfs_pgio_data *rdata) | 269 | objlayout_read_pagelist(struct nfs_pgio_header *hdr) |
270 | { | 270 | { |
271 | struct nfs_pgio_header *hdr = rdata->header; | ||
272 | struct inode *inode = hdr->inode; | 271 | struct inode *inode = hdr->inode; |
273 | loff_t offset = rdata->args.offset; | 272 | loff_t offset = hdr->args.offset; |
274 | size_t count = rdata->args.count; | 273 | size_t count = hdr->args.count; |
275 | int err; | 274 | int err; |
276 | loff_t eof; | 275 | loff_t eof; |
277 | 276 | ||
@@ -279,23 +278,23 @@ objlayout_read_pagelist(struct nfs_pgio_data *rdata) | |||
279 | if (unlikely(offset + count > eof)) { | 278 | if (unlikely(offset + count > eof)) { |
280 | if (offset >= eof) { | 279 | if (offset >= eof) { |
281 | err = 0; | 280 | err = 0; |
282 | rdata->res.count = 0; | 281 | hdr->res.count = 0; |
283 | rdata->res.eof = 1; | 282 | hdr->res.eof = 1; |
284 | /*FIXME: do we need to call pnfs_ld_read_done() */ | 283 | /*FIXME: do we need to call pnfs_ld_read_done() */ |
285 | goto out; | 284 | goto out; |
286 | } | 285 | } |
287 | count = eof - offset; | 286 | count = eof - offset; |
288 | } | 287 | } |
289 | 288 | ||
290 | rdata->res.eof = (offset + count) >= eof; | 289 | hdr->res.eof = (offset + count) >= eof; |
291 | _fix_verify_io_params(hdr->lseg, &rdata->args.pages, | 290 | _fix_verify_io_params(hdr->lseg, &hdr->args.pages, |
292 | &rdata->args.pgbase, | 291 | &hdr->args.pgbase, |
293 | rdata->args.offset, rdata->args.count); | 292 | hdr->args.offset, hdr->args.count); |
294 | 293 | ||
295 | dprintk("%s: inode(%lx) offset 0x%llx count 0x%Zx eof=%d\n", | 294 | dprintk("%s: inode(%lx) offset 0x%llx count 0x%Zx eof=%d\n", |
296 | __func__, inode->i_ino, offset, count, rdata->res.eof); | 295 | __func__, inode->i_ino, offset, count, hdr->res.eof); |
297 | 296 | ||
298 | err = objio_read_pagelist(rdata); | 297 | err = objio_read_pagelist(hdr); |
299 | out: | 298 | out: |
300 | if (unlikely(err)) { | 299 | if (unlikely(err)) { |
301 | hdr->pnfs_error = err; | 300 | hdr->pnfs_error = err; |
@@ -312,38 +311,38 @@ objlayout_read_pagelist(struct nfs_pgio_data *rdata) | |||
312 | static void _rpc_write_complete(struct work_struct *work) | 311 | static void _rpc_write_complete(struct work_struct *work) |
313 | { | 312 | { |
314 | struct rpc_task *task; | 313 | struct rpc_task *task; |
315 | struct nfs_pgio_data *wdata; | 314 | struct nfs_pgio_header *hdr; |
316 | 315 | ||
317 | dprintk("%s enter\n", __func__); | 316 | dprintk("%s enter\n", __func__); |
318 | task = container_of(work, struct rpc_task, u.tk_work); | 317 | task = container_of(work, struct rpc_task, u.tk_work); |
319 | wdata = container_of(task, struct nfs_pgio_data, task); | 318 | hdr = container_of(task, struct nfs_pgio_header, task); |
320 | 319 | ||
321 | pnfs_ld_write_done(wdata); | 320 | pnfs_ld_write_done(hdr); |
322 | } | 321 | } |
323 | 322 | ||
324 | void | 323 | void |
325 | objlayout_write_done(struct objlayout_io_res *oir, ssize_t status, bool sync) | 324 | objlayout_write_done(struct objlayout_io_res *oir, ssize_t status, bool sync) |
326 | { | 325 | { |
327 | struct nfs_pgio_data *wdata = oir->rpcdata; | 326 | struct nfs_pgio_header *hdr = oir->rpcdata; |
328 | 327 | ||
329 | oir->status = wdata->task.tk_status = status; | 328 | oir->status = hdr->task.tk_status = status; |
330 | if (status >= 0) { | 329 | if (status >= 0) { |
331 | wdata->res.count = status; | 330 | hdr->res.count = status; |
332 | wdata->verf.committed = oir->committed; | 331 | hdr->verf.committed = oir->committed; |
333 | } else { | 332 | } else { |
334 | wdata->header->pnfs_error = status; | 333 | hdr->pnfs_error = status; |
335 | } | 334 | } |
336 | objlayout_iodone(oir); | 335 | objlayout_iodone(oir); |
337 | /* must not use oir after this point */ | 336 | /* must not use oir after this point */ |
338 | 337 | ||
339 | dprintk("%s: Return status %zd committed %d sync=%d\n", __func__, | 338 | dprintk("%s: Return status %zd committed %d sync=%d\n", __func__, |
340 | status, wdata->verf.committed, sync); | 339 | status, hdr->verf.committed, sync); |
341 | 340 | ||
342 | if (sync) | 341 | if (sync) |
343 | pnfs_ld_write_done(wdata); | 342 | pnfs_ld_write_done(hdr); |
344 | else { | 343 | else { |
345 | INIT_WORK(&wdata->task.u.tk_work, _rpc_write_complete); | 344 | INIT_WORK(&hdr->task.u.tk_work, _rpc_write_complete); |
346 | schedule_work(&wdata->task.u.tk_work); | 345 | schedule_work(&hdr->task.u.tk_work); |
347 | } | 346 | } |
348 | } | 347 | } |
349 | 348 | ||
@@ -351,17 +350,15 @@ objlayout_write_done(struct objlayout_io_res *oir, ssize_t status, bool sync) | |||
351 | * Perform sync or async writes. | 350 | * Perform sync or async writes. |
352 | */ | 351 | */ |
353 | enum pnfs_try_status | 352 | enum pnfs_try_status |
354 | objlayout_write_pagelist(struct nfs_pgio_data *wdata, | 353 | objlayout_write_pagelist(struct nfs_pgio_header *hdr, int how) |
355 | int how) | ||
356 | { | 354 | { |
357 | struct nfs_pgio_header *hdr = wdata->header; | ||
358 | int err; | 355 | int err; |
359 | 356 | ||
360 | _fix_verify_io_params(hdr->lseg, &wdata->args.pages, | 357 | _fix_verify_io_params(hdr->lseg, &hdr->args.pages, |
361 | &wdata->args.pgbase, | 358 | &hdr->args.pgbase, |
362 | wdata->args.offset, wdata->args.count); | 359 | hdr->args.offset, hdr->args.count); |
363 | 360 | ||
364 | err = objio_write_pagelist(wdata, how); | 361 | err = objio_write_pagelist(hdr, how); |
365 | if (unlikely(err)) { | 362 | if (unlikely(err)) { |
366 | hdr->pnfs_error = err; | 363 | hdr->pnfs_error = err; |
367 | dprintk("%s: Returned Error %d\n", __func__, err); | 364 | dprintk("%s: Returned Error %d\n", __func__, err); |
diff --git a/fs/nfs/objlayout/objlayout.h b/fs/nfs/objlayout/objlayout.h index 01e041029a6c..fd13f1d2f136 100644 --- a/fs/nfs/objlayout/objlayout.h +++ b/fs/nfs/objlayout/objlayout.h | |||
@@ -119,8 +119,8 @@ extern void objio_free_lseg(struct pnfs_layout_segment *lseg); | |||
119 | */ | 119 | */ |
120 | extern void objio_free_result(struct objlayout_io_res *oir); | 120 | extern void objio_free_result(struct objlayout_io_res *oir); |
121 | 121 | ||
122 | extern int objio_read_pagelist(struct nfs_pgio_data *rdata); | 122 | extern int objio_read_pagelist(struct nfs_pgio_header *rdata); |
123 | extern int objio_write_pagelist(struct nfs_pgio_data *wdata, int how); | 123 | extern int objio_write_pagelist(struct nfs_pgio_header *wdata, int how); |
124 | 124 | ||
125 | /* | 125 | /* |
126 | * callback API | 126 | * callback API |
@@ -168,10 +168,10 @@ extern struct pnfs_layout_segment *objlayout_alloc_lseg( | |||
168 | extern void objlayout_free_lseg(struct pnfs_layout_segment *); | 168 | extern void objlayout_free_lseg(struct pnfs_layout_segment *); |
169 | 169 | ||
170 | extern enum pnfs_try_status objlayout_read_pagelist( | 170 | extern enum pnfs_try_status objlayout_read_pagelist( |
171 | struct nfs_pgio_data *); | 171 | struct nfs_pgio_header *); |
172 | 172 | ||
173 | extern enum pnfs_try_status objlayout_write_pagelist( | 173 | extern enum pnfs_try_status objlayout_write_pagelist( |
174 | struct nfs_pgio_data *, | 174 | struct nfs_pgio_header *, |
175 | int how); | 175 | int how); |
176 | 176 | ||
177 | extern void objlayout_encode_layoutcommit( | 177 | extern void objlayout_encode_layoutcommit( |
diff --git a/fs/nfs/pagelist.c b/fs/nfs/pagelist.c index 0be5050638f7..ba491926df5f 100644 --- a/fs/nfs/pagelist.c +++ b/fs/nfs/pagelist.c | |||
@@ -141,16 +141,24 @@ nfs_iocounter_wait(struct nfs_io_counter *c) | |||
141 | * @req - request in group that is to be locked | 141 | * @req - request in group that is to be locked |
142 | * | 142 | * |
143 | * this lock must be held if modifying the page group list | 143 | * this lock must be held if modifying the page group list |
144 | * | ||
145 | * returns result from wait_on_bit_lock: 0 on success, < 0 on error | ||
144 | */ | 146 | */ |
145 | void | 147 | int |
146 | nfs_page_group_lock(struct nfs_page *req) | 148 | nfs_page_group_lock(struct nfs_page *req, bool wait) |
147 | { | 149 | { |
148 | struct nfs_page *head = req->wb_head; | 150 | struct nfs_page *head = req->wb_head; |
151 | int ret; | ||
149 | 152 | ||
150 | WARN_ON_ONCE(head != head->wb_head); | 153 | WARN_ON_ONCE(head != head->wb_head); |
151 | 154 | ||
152 | wait_on_bit_lock(&head->wb_flags, PG_HEADLOCK, | 155 | do { |
156 | ret = wait_on_bit_lock(&head->wb_flags, PG_HEADLOCK, | ||
153 | TASK_UNINTERRUPTIBLE); | 157 | TASK_UNINTERRUPTIBLE); |
158 | } while (wait && ret != 0); | ||
159 | |||
160 | WARN_ON_ONCE(ret > 0); | ||
161 | return ret; | ||
154 | } | 162 | } |
155 | 163 | ||
156 | /* | 164 | /* |
@@ -211,7 +219,7 @@ bool nfs_page_group_sync_on_bit(struct nfs_page *req, unsigned int bit) | |||
211 | { | 219 | { |
212 | bool ret; | 220 | bool ret; |
213 | 221 | ||
214 | nfs_page_group_lock(req); | 222 | nfs_page_group_lock(req, true); |
215 | ret = nfs_page_group_sync_on_bit_locked(req, bit); | 223 | ret = nfs_page_group_sync_on_bit_locked(req, bit); |
216 | nfs_page_group_unlock(req); | 224 | nfs_page_group_unlock(req); |
217 | 225 | ||
@@ -454,123 +462,72 @@ size_t nfs_generic_pg_test(struct nfs_pageio_descriptor *desc, | |||
454 | } | 462 | } |
455 | EXPORT_SYMBOL_GPL(nfs_generic_pg_test); | 463 | EXPORT_SYMBOL_GPL(nfs_generic_pg_test); |
456 | 464 | ||
457 | static inline struct nfs_rw_header *NFS_RW_HEADER(struct nfs_pgio_header *hdr) | 465 | struct nfs_pgio_header *nfs_pgio_header_alloc(const struct nfs_rw_ops *ops) |
458 | { | ||
459 | return container_of(hdr, struct nfs_rw_header, header); | ||
460 | } | ||
461 | |||
462 | /** | ||
463 | * nfs_rw_header_alloc - Allocate a header for a read or write | ||
464 | * @ops: Read or write function vector | ||
465 | */ | ||
466 | struct nfs_rw_header *nfs_rw_header_alloc(const struct nfs_rw_ops *ops) | ||
467 | { | 466 | { |
468 | struct nfs_rw_header *header = ops->rw_alloc_header(); | 467 | struct nfs_pgio_header *hdr = ops->rw_alloc_header(); |
469 | |||
470 | if (header) { | ||
471 | struct nfs_pgio_header *hdr = &header->header; | ||
472 | 468 | ||
469 | if (hdr) { | ||
473 | INIT_LIST_HEAD(&hdr->pages); | 470 | INIT_LIST_HEAD(&hdr->pages); |
474 | spin_lock_init(&hdr->lock); | 471 | spin_lock_init(&hdr->lock); |
475 | atomic_set(&hdr->refcnt, 0); | ||
476 | hdr->rw_ops = ops; | 472 | hdr->rw_ops = ops; |
477 | } | 473 | } |
478 | return header; | 474 | return hdr; |
479 | } | 475 | } |
480 | EXPORT_SYMBOL_GPL(nfs_rw_header_alloc); | 476 | EXPORT_SYMBOL_GPL(nfs_pgio_header_alloc); |
481 | 477 | ||
482 | /* | 478 | /* |
483 | * nfs_rw_header_free - Free a read or write header | 479 | * nfs_pgio_header_free - Free a read or write header |
484 | * @hdr: The header to free | 480 | * @hdr: The header to free |
485 | */ | 481 | */ |
486 | void nfs_rw_header_free(struct nfs_pgio_header *hdr) | 482 | void nfs_pgio_header_free(struct nfs_pgio_header *hdr) |
487 | { | 483 | { |
488 | hdr->rw_ops->rw_free_header(NFS_RW_HEADER(hdr)); | 484 | hdr->rw_ops->rw_free_header(hdr); |
489 | } | 485 | } |
490 | EXPORT_SYMBOL_GPL(nfs_rw_header_free); | 486 | EXPORT_SYMBOL_GPL(nfs_pgio_header_free); |
491 | 487 | ||
492 | /** | 488 | /** |
493 | * nfs_pgio_data_alloc - Allocate pageio data | 489 | * nfs_pgio_data_destroy - make @hdr suitable for reuse |
494 | * @hdr: The header making a request | 490 | * |
495 | * @pagecount: Number of pages to create | 491 | * Frees memory and releases refs from nfs_generic_pgio, so that it may |
496 | */ | 492 | * be called again. |
497 | static struct nfs_pgio_data *nfs_pgio_data_alloc(struct nfs_pgio_header *hdr, | 493 | * |
498 | unsigned int pagecount) | 494 | * @hdr: A header that has had nfs_generic_pgio called |
499 | { | ||
500 | struct nfs_pgio_data *data, *prealloc; | ||
501 | |||
502 | prealloc = &NFS_RW_HEADER(hdr)->rpc_data; | ||
503 | if (prealloc->header == NULL) | ||
504 | data = prealloc; | ||
505 | else | ||
506 | data = kzalloc(sizeof(*data), GFP_KERNEL); | ||
507 | if (!data) | ||
508 | goto out; | ||
509 | |||
510 | if (nfs_pgarray_set(&data->pages, pagecount)) { | ||
511 | data->header = hdr; | ||
512 | atomic_inc(&hdr->refcnt); | ||
513 | } else { | ||
514 | if (data != prealloc) | ||
515 | kfree(data); | ||
516 | data = NULL; | ||
517 | } | ||
518 | out: | ||
519 | return data; | ||
520 | } | ||
521 | |||
522 | /** | ||
523 | * nfs_pgio_data_release - Properly free pageio data | ||
524 | * @data: The data to release | ||
525 | */ | 495 | */ |
526 | void nfs_pgio_data_release(struct nfs_pgio_data *data) | 496 | void nfs_pgio_data_destroy(struct nfs_pgio_header *hdr) |
527 | { | 497 | { |
528 | struct nfs_pgio_header *hdr = data->header; | 498 | put_nfs_open_context(hdr->args.context); |
529 | struct nfs_rw_header *pageio_header = NFS_RW_HEADER(hdr); | 499 | if (hdr->page_array.pagevec != hdr->page_array.page_array) |
530 | 500 | kfree(hdr->page_array.pagevec); | |
531 | put_nfs_open_context(data->args.context); | ||
532 | if (data->pages.pagevec != data->pages.page_array) | ||
533 | kfree(data->pages.pagevec); | ||
534 | if (data == &pageio_header->rpc_data) { | ||
535 | data->header = NULL; | ||
536 | data = NULL; | ||
537 | } | ||
538 | if (atomic_dec_and_test(&hdr->refcnt)) | ||
539 | hdr->completion_ops->completion(hdr); | ||
540 | /* Note: we only free the rpc_task after callbacks are done. | ||
541 | * See the comment in rpc_free_task() for why | ||
542 | */ | ||
543 | kfree(data); | ||
544 | } | 501 | } |
545 | EXPORT_SYMBOL_GPL(nfs_pgio_data_release); | 502 | EXPORT_SYMBOL_GPL(nfs_pgio_data_destroy); |
546 | 503 | ||
547 | /** | 504 | /** |
548 | * nfs_pgio_rpcsetup - Set up arguments for a pageio call | 505 | * nfs_pgio_rpcsetup - Set up arguments for a pageio call |
549 | * @data: The pageio data | 506 | * @hdr: The pageio hdr |
550 | * @count: Number of bytes to read | 507 | * @count: Number of bytes to read |
551 | * @offset: Initial offset | 508 | * @offset: Initial offset |
552 | * @how: How to commit data (writes only) | 509 | * @how: How to commit data (writes only) |
553 | * @cinfo: Commit information for the call (writes only) | 510 | * @cinfo: Commit information for the call (writes only) |
554 | */ | 511 | */ |
555 | static void nfs_pgio_rpcsetup(struct nfs_pgio_data *data, | 512 | static void nfs_pgio_rpcsetup(struct nfs_pgio_header *hdr, |
556 | unsigned int count, unsigned int offset, | 513 | unsigned int count, unsigned int offset, |
557 | int how, struct nfs_commit_info *cinfo) | 514 | int how, struct nfs_commit_info *cinfo) |
558 | { | 515 | { |
559 | struct nfs_page *req = data->header->req; | 516 | struct nfs_page *req = hdr->req; |
560 | 517 | ||
561 | /* Set up the RPC argument and reply structs | 518 | /* Set up the RPC argument and reply structs |
562 | * NB: take care not to mess about with data->commit et al. */ | 519 | * NB: take care not to mess about with hdr->commit et al. */ |
563 | 520 | ||
564 | data->args.fh = NFS_FH(data->header->inode); | 521 | hdr->args.fh = NFS_FH(hdr->inode); |
565 | data->args.offset = req_offset(req) + offset; | 522 | hdr->args.offset = req_offset(req) + offset; |
566 | /* pnfs_set_layoutcommit needs this */ | 523 | /* pnfs_set_layoutcommit needs this */ |
567 | data->mds_offset = data->args.offset; | 524 | hdr->mds_offset = hdr->args.offset; |
568 | data->args.pgbase = req->wb_pgbase + offset; | 525 | hdr->args.pgbase = req->wb_pgbase + offset; |
569 | data->args.pages = data->pages.pagevec; | 526 | hdr->args.pages = hdr->page_array.pagevec; |
570 | data->args.count = count; | 527 | hdr->args.count = count; |
571 | data->args.context = get_nfs_open_context(req->wb_context); | 528 | hdr->args.context = get_nfs_open_context(req->wb_context); |
572 | data->args.lock_context = req->wb_lock_context; | 529 | hdr->args.lock_context = req->wb_lock_context; |
573 | data->args.stable = NFS_UNSTABLE; | 530 | hdr->args.stable = NFS_UNSTABLE; |
574 | switch (how & (FLUSH_STABLE | FLUSH_COND_STABLE)) { | 531 | switch (how & (FLUSH_STABLE | FLUSH_COND_STABLE)) { |
575 | case 0: | 532 | case 0: |
576 | break; | 533 | break; |
@@ -578,59 +535,59 @@ static void nfs_pgio_rpcsetup(struct nfs_pgio_data *data, | |||
578 | if (nfs_reqs_to_commit(cinfo)) | 535 | if (nfs_reqs_to_commit(cinfo)) |
579 | break; | 536 | break; |
580 | default: | 537 | default: |
581 | data->args.stable = NFS_FILE_SYNC; | 538 | hdr->args.stable = NFS_FILE_SYNC; |
582 | } | 539 | } |
583 | 540 | ||
584 | data->res.fattr = &data->fattr; | 541 | hdr->res.fattr = &hdr->fattr; |
585 | data->res.count = count; | 542 | hdr->res.count = count; |
586 | data->res.eof = 0; | 543 | hdr->res.eof = 0; |
587 | data->res.verf = &data->verf; | 544 | hdr->res.verf = &hdr->verf; |
588 | nfs_fattr_init(&data->fattr); | 545 | nfs_fattr_init(&hdr->fattr); |
589 | } | 546 | } |
590 | 547 | ||
591 | /** | 548 | /** |
592 | * nfs_pgio_prepare - Prepare pageio data to go over the wire | 549 | * nfs_pgio_prepare - Prepare pageio hdr to go over the wire |
593 | * @task: The current task | 550 | * @task: The current task |
594 | * @calldata: pageio data to prepare | 551 | * @calldata: pageio header to prepare |
595 | */ | 552 | */ |
596 | static void nfs_pgio_prepare(struct rpc_task *task, void *calldata) | 553 | static void nfs_pgio_prepare(struct rpc_task *task, void *calldata) |
597 | { | 554 | { |
598 | struct nfs_pgio_data *data = calldata; | 555 | struct nfs_pgio_header *hdr = calldata; |
599 | int err; | 556 | int err; |
600 | err = NFS_PROTO(data->header->inode)->pgio_rpc_prepare(task, data); | 557 | err = NFS_PROTO(hdr->inode)->pgio_rpc_prepare(task, hdr); |
601 | if (err) | 558 | if (err) |
602 | rpc_exit(task, err); | 559 | rpc_exit(task, err); |
603 | } | 560 | } |
604 | 561 | ||
605 | int nfs_initiate_pgio(struct rpc_clnt *clnt, struct nfs_pgio_data *data, | 562 | int nfs_initiate_pgio(struct rpc_clnt *clnt, struct nfs_pgio_header *hdr, |
606 | const struct rpc_call_ops *call_ops, int how, int flags) | 563 | const struct rpc_call_ops *call_ops, int how, int flags) |
607 | { | 564 | { |
608 | struct rpc_task *task; | 565 | struct rpc_task *task; |
609 | struct rpc_message msg = { | 566 | struct rpc_message msg = { |
610 | .rpc_argp = &data->args, | 567 | .rpc_argp = &hdr->args, |
611 | .rpc_resp = &data->res, | 568 | .rpc_resp = &hdr->res, |
612 | .rpc_cred = data->header->cred, | 569 | .rpc_cred = hdr->cred, |
613 | }; | 570 | }; |
614 | struct rpc_task_setup task_setup_data = { | 571 | struct rpc_task_setup task_setup_data = { |
615 | .rpc_client = clnt, | 572 | .rpc_client = clnt, |
616 | .task = &data->task, | 573 | .task = &hdr->task, |
617 | .rpc_message = &msg, | 574 | .rpc_message = &msg, |
618 | .callback_ops = call_ops, | 575 | .callback_ops = call_ops, |
619 | .callback_data = data, | 576 | .callback_data = hdr, |
620 | .workqueue = nfsiod_workqueue, | 577 | .workqueue = nfsiod_workqueue, |
621 | .flags = RPC_TASK_ASYNC | flags, | 578 | .flags = RPC_TASK_ASYNC | flags, |
622 | }; | 579 | }; |
623 | int ret = 0; | 580 | int ret = 0; |
624 | 581 | ||
625 | data->header->rw_ops->rw_initiate(data, &msg, &task_setup_data, how); | 582 | hdr->rw_ops->rw_initiate(hdr, &msg, &task_setup_data, how); |
626 | 583 | ||
627 | dprintk("NFS: %5u initiated pgio call " | 584 | dprintk("NFS: %5u initiated pgio call " |
628 | "(req %s/%llu, %u bytes @ offset %llu)\n", | 585 | "(req %s/%llu, %u bytes @ offset %llu)\n", |
629 | data->task.tk_pid, | 586 | hdr->task.tk_pid, |
630 | data->header->inode->i_sb->s_id, | 587 | hdr->inode->i_sb->s_id, |
631 | (unsigned long long)NFS_FILEID(data->header->inode), | 588 | (unsigned long long)NFS_FILEID(hdr->inode), |
632 | data->args.count, | 589 | hdr->args.count, |
633 | (unsigned long long)data->args.offset); | 590 | (unsigned long long)hdr->args.offset); |
634 | 591 | ||
635 | task = rpc_run_task(&task_setup_data); | 592 | task = rpc_run_task(&task_setup_data); |
636 | if (IS_ERR(task)) { | 593 | if (IS_ERR(task)) { |
@@ -657,22 +614,23 @@ static int nfs_pgio_error(struct nfs_pageio_descriptor *desc, | |||
657 | struct nfs_pgio_header *hdr) | 614 | struct nfs_pgio_header *hdr) |
658 | { | 615 | { |
659 | set_bit(NFS_IOHDR_REDO, &hdr->flags); | 616 | set_bit(NFS_IOHDR_REDO, &hdr->flags); |
660 | nfs_pgio_data_release(hdr->data); | 617 | nfs_pgio_data_destroy(hdr); |
661 | hdr->data = NULL; | 618 | hdr->completion_ops->completion(hdr); |
662 | desc->pg_completion_ops->error_cleanup(&desc->pg_list); | 619 | desc->pg_completion_ops->error_cleanup(&desc->pg_list); |
663 | return -ENOMEM; | 620 | return -ENOMEM; |
664 | } | 621 | } |
665 | 622 | ||
666 | /** | 623 | /** |
667 | * nfs_pgio_release - Release pageio data | 624 | * nfs_pgio_release - Release pageio data |
668 | * @calldata: The pageio data to release | 625 | * @calldata: The pageio header to release |
669 | */ | 626 | */ |
670 | static void nfs_pgio_release(void *calldata) | 627 | static void nfs_pgio_release(void *calldata) |
671 | { | 628 | { |
672 | struct nfs_pgio_data *data = calldata; | 629 | struct nfs_pgio_header *hdr = calldata; |
673 | if (data->header->rw_ops->rw_release) | 630 | if (hdr->rw_ops->rw_release) |
674 | data->header->rw_ops->rw_release(data); | 631 | hdr->rw_ops->rw_release(hdr); |
675 | nfs_pgio_data_release(data); | 632 | nfs_pgio_data_destroy(hdr); |
633 | hdr->completion_ops->completion(hdr); | ||
676 | } | 634 | } |
677 | 635 | ||
678 | /** | 636 | /** |
@@ -713,22 +671,22 @@ EXPORT_SYMBOL_GPL(nfs_pageio_init); | |||
713 | /** | 671 | /** |
714 | * nfs_pgio_result - Basic pageio error handling | 672 | * nfs_pgio_result - Basic pageio error handling |
715 | * @task: The task that ran | 673 | * @task: The task that ran |
716 | * @calldata: Pageio data to check | 674 | * @calldata: Pageio header to check |
717 | */ | 675 | */ |
718 | static void nfs_pgio_result(struct rpc_task *task, void *calldata) | 676 | static void nfs_pgio_result(struct rpc_task *task, void *calldata) |
719 | { | 677 | { |
720 | struct nfs_pgio_data *data = calldata; | 678 | struct nfs_pgio_header *hdr = calldata; |
721 | struct inode *inode = data->header->inode; | 679 | struct inode *inode = hdr->inode; |
722 | 680 | ||
723 | dprintk("NFS: %s: %5u, (status %d)\n", __func__, | 681 | dprintk("NFS: %s: %5u, (status %d)\n", __func__, |
724 | task->tk_pid, task->tk_status); | 682 | task->tk_pid, task->tk_status); |
725 | 683 | ||
726 | if (data->header->rw_ops->rw_done(task, data, inode) != 0) | 684 | if (hdr->rw_ops->rw_done(task, hdr, inode) != 0) |
727 | return; | 685 | return; |
728 | if (task->tk_status < 0) | 686 | if (task->tk_status < 0) |
729 | nfs_set_pgio_error(data->header, task->tk_status, data->args.offset); | 687 | nfs_set_pgio_error(hdr, task->tk_status, hdr->args.offset); |
730 | else | 688 | else |
731 | data->header->rw_ops->rw_result(task, data); | 689 | hdr->rw_ops->rw_result(task, hdr); |
732 | } | 690 | } |
733 | 691 | ||
734 | /* | 692 | /* |
@@ -744,17 +702,16 @@ int nfs_generic_pgio(struct nfs_pageio_descriptor *desc, | |||
744 | { | 702 | { |
745 | struct nfs_page *req; | 703 | struct nfs_page *req; |
746 | struct page **pages; | 704 | struct page **pages; |
747 | struct nfs_pgio_data *data; | ||
748 | struct list_head *head = &desc->pg_list; | 705 | struct list_head *head = &desc->pg_list; |
749 | struct nfs_commit_info cinfo; | 706 | struct nfs_commit_info cinfo; |
707 | unsigned int pagecount; | ||
750 | 708 | ||
751 | data = nfs_pgio_data_alloc(hdr, nfs_page_array_len(desc->pg_base, | 709 | pagecount = nfs_page_array_len(desc->pg_base, desc->pg_count); |
752 | desc->pg_count)); | 710 | if (!nfs_pgarray_set(&hdr->page_array, pagecount)) |
753 | if (!data) | ||
754 | return nfs_pgio_error(desc, hdr); | 711 | return nfs_pgio_error(desc, hdr); |
755 | 712 | ||
756 | nfs_init_cinfo(&cinfo, desc->pg_inode, desc->pg_dreq); | 713 | nfs_init_cinfo(&cinfo, desc->pg_inode, desc->pg_dreq); |
757 | pages = data->pages.pagevec; | 714 | pages = hdr->page_array.pagevec; |
758 | while (!list_empty(head)) { | 715 | while (!list_empty(head)) { |
759 | req = nfs_list_entry(head->next); | 716 | req = nfs_list_entry(head->next); |
760 | nfs_list_remove_request(req); | 717 | nfs_list_remove_request(req); |
@@ -767,8 +724,7 @@ int nfs_generic_pgio(struct nfs_pageio_descriptor *desc, | |||
767 | desc->pg_ioflags &= ~FLUSH_COND_STABLE; | 724 | desc->pg_ioflags &= ~FLUSH_COND_STABLE; |
768 | 725 | ||
769 | /* Set up the argument struct */ | 726 | /* Set up the argument struct */ |
770 | nfs_pgio_rpcsetup(data, desc->pg_count, 0, desc->pg_ioflags, &cinfo); | 727 | nfs_pgio_rpcsetup(hdr, desc->pg_count, 0, desc->pg_ioflags, &cinfo); |
771 | hdr->data = data; | ||
772 | desc->pg_rpc_callops = &nfs_pgio_common_ops; | 728 | desc->pg_rpc_callops = &nfs_pgio_common_ops; |
773 | return 0; | 729 | return 0; |
774 | } | 730 | } |
@@ -776,25 +732,20 @@ EXPORT_SYMBOL_GPL(nfs_generic_pgio); | |||
776 | 732 | ||
777 | static int nfs_generic_pg_pgios(struct nfs_pageio_descriptor *desc) | 733 | static int nfs_generic_pg_pgios(struct nfs_pageio_descriptor *desc) |
778 | { | 734 | { |
779 | struct nfs_rw_header *rw_hdr; | ||
780 | struct nfs_pgio_header *hdr; | 735 | struct nfs_pgio_header *hdr; |
781 | int ret; | 736 | int ret; |
782 | 737 | ||
783 | rw_hdr = nfs_rw_header_alloc(desc->pg_rw_ops); | 738 | hdr = nfs_pgio_header_alloc(desc->pg_rw_ops); |
784 | if (!rw_hdr) { | 739 | if (!hdr) { |
785 | desc->pg_completion_ops->error_cleanup(&desc->pg_list); | 740 | desc->pg_completion_ops->error_cleanup(&desc->pg_list); |
786 | return -ENOMEM; | 741 | return -ENOMEM; |
787 | } | 742 | } |
788 | hdr = &rw_hdr->header; | 743 | nfs_pgheader_init(desc, hdr, nfs_pgio_header_free); |
789 | nfs_pgheader_init(desc, hdr, nfs_rw_header_free); | ||
790 | atomic_inc(&hdr->refcnt); | ||
791 | ret = nfs_generic_pgio(desc, hdr); | 744 | ret = nfs_generic_pgio(desc, hdr); |
792 | if (ret == 0) | 745 | if (ret == 0) |
793 | ret = nfs_initiate_pgio(NFS_CLIENT(hdr->inode), | 746 | ret = nfs_initiate_pgio(NFS_CLIENT(hdr->inode), |
794 | hdr->data, desc->pg_rpc_callops, | 747 | hdr, desc->pg_rpc_callops, |
795 | desc->pg_ioflags, 0); | 748 | desc->pg_ioflags, 0); |
796 | if (atomic_dec_and_test(&hdr->refcnt)) | ||
797 | hdr->completion_ops->completion(hdr); | ||
798 | return ret; | 749 | return ret; |
799 | } | 750 | } |
800 | 751 | ||
@@ -907,8 +858,13 @@ static int __nfs_pageio_add_request(struct nfs_pageio_descriptor *desc, | |||
907 | struct nfs_page *subreq; | 858 | struct nfs_page *subreq; |
908 | unsigned int bytes_left = 0; | 859 | unsigned int bytes_left = 0; |
909 | unsigned int offset, pgbase; | 860 | unsigned int offset, pgbase; |
861 | int ret; | ||
910 | 862 | ||
911 | nfs_page_group_lock(req); | 863 | ret = nfs_page_group_lock(req, false); |
864 | if (ret < 0) { | ||
865 | desc->pg_error = ret; | ||
866 | return 0; | ||
867 | } | ||
912 | 868 | ||
913 | subreq = req; | 869 | subreq = req; |
914 | bytes_left = subreq->wb_bytes; | 870 | bytes_left = subreq->wb_bytes; |
@@ -930,7 +886,11 @@ static int __nfs_pageio_add_request(struct nfs_pageio_descriptor *desc, | |||
930 | if (desc->pg_recoalesce) | 886 | if (desc->pg_recoalesce) |
931 | return 0; | 887 | return 0; |
932 | /* retry add_request for this subreq */ | 888 | /* retry add_request for this subreq */ |
933 | nfs_page_group_lock(req); | 889 | ret = nfs_page_group_lock(req, false); |
890 | if (ret < 0) { | ||
891 | desc->pg_error = ret; | ||
892 | return 0; | ||
893 | } | ||
934 | continue; | 894 | continue; |
935 | } | 895 | } |
936 | 896 | ||
@@ -1005,7 +965,38 @@ int nfs_pageio_add_request(struct nfs_pageio_descriptor *desc, | |||
1005 | } while (ret); | 965 | } while (ret); |
1006 | return ret; | 966 | return ret; |
1007 | } | 967 | } |
1008 | EXPORT_SYMBOL_GPL(nfs_pageio_add_request); | 968 | |
969 | /* | ||
970 | * nfs_pageio_resend - Transfer requests to new descriptor and resend | ||
971 | * @hdr - the pgio header to move request from | ||
972 | * @desc - the pageio descriptor to add requests to | ||
973 | * | ||
974 | * Try to move each request (nfs_page) from @hdr to @desc then attempt | ||
975 | * to send them. | ||
976 | * | ||
977 | * Returns 0 on success and < 0 on error. | ||
978 | */ | ||
979 | int nfs_pageio_resend(struct nfs_pageio_descriptor *desc, | ||
980 | struct nfs_pgio_header *hdr) | ||
981 | { | ||
982 | LIST_HEAD(failed); | ||
983 | |||
984 | desc->pg_dreq = hdr->dreq; | ||
985 | while (!list_empty(&hdr->pages)) { | ||
986 | struct nfs_page *req = nfs_list_entry(hdr->pages.next); | ||
987 | |||
988 | nfs_list_remove_request(req); | ||
989 | if (!nfs_pageio_add_request(desc, req)) | ||
990 | nfs_list_add_request(req, &failed); | ||
991 | } | ||
992 | nfs_pageio_complete(desc); | ||
993 | if (!list_empty(&failed)) { | ||
994 | list_move(&failed, &hdr->pages); | ||
995 | return -EIO; | ||
996 | } | ||
997 | return 0; | ||
998 | } | ||
999 | EXPORT_SYMBOL_GPL(nfs_pageio_resend); | ||
1009 | 1000 | ||
1010 | /** | 1001 | /** |
1011 | * nfs_pageio_complete - Complete I/O on an nfs_pageio_descriptor | 1002 | * nfs_pageio_complete - Complete I/O on an nfs_pageio_descriptor |
@@ -1021,7 +1012,6 @@ void nfs_pageio_complete(struct nfs_pageio_descriptor *desc) | |||
1021 | break; | 1012 | break; |
1022 | } | 1013 | } |
1023 | } | 1014 | } |
1024 | EXPORT_SYMBOL_GPL(nfs_pageio_complete); | ||
1025 | 1015 | ||
1026 | /** | 1016 | /** |
1027 | * nfs_pageio_cond_complete - Conditional I/O completion | 1017 | * nfs_pageio_cond_complete - Conditional I/O completion |
diff --git a/fs/nfs/pnfs.c b/fs/nfs/pnfs.c index a8914b335617..a3851debf8a2 100644 --- a/fs/nfs/pnfs.c +++ b/fs/nfs/pnfs.c | |||
@@ -361,6 +361,23 @@ pnfs_put_lseg(struct pnfs_layout_segment *lseg) | |||
361 | } | 361 | } |
362 | EXPORT_SYMBOL_GPL(pnfs_put_lseg); | 362 | EXPORT_SYMBOL_GPL(pnfs_put_lseg); |
363 | 363 | ||
364 | static void pnfs_put_lseg_async_work(struct work_struct *work) | ||
365 | { | ||
366 | struct pnfs_layout_segment *lseg; | ||
367 | |||
368 | lseg = container_of(work, struct pnfs_layout_segment, pls_work); | ||
369 | |||
370 | pnfs_put_lseg(lseg); | ||
371 | } | ||
372 | |||
373 | void | ||
374 | pnfs_put_lseg_async(struct pnfs_layout_segment *lseg) | ||
375 | { | ||
376 | INIT_WORK(&lseg->pls_work, pnfs_put_lseg_async_work); | ||
377 | schedule_work(&lseg->pls_work); | ||
378 | } | ||
379 | EXPORT_SYMBOL_GPL(pnfs_put_lseg_async); | ||
380 | |||
364 | static u64 | 381 | static u64 |
365 | end_offset(u64 start, u64 len) | 382 | end_offset(u64 start, u64 len) |
366 | { | 383 | { |
@@ -1470,41 +1487,19 @@ pnfs_generic_pg_test(struct nfs_pageio_descriptor *pgio, struct nfs_page *prev, | |||
1470 | } | 1487 | } |
1471 | EXPORT_SYMBOL_GPL(pnfs_generic_pg_test); | 1488 | EXPORT_SYMBOL_GPL(pnfs_generic_pg_test); |
1472 | 1489 | ||
1473 | int pnfs_write_done_resend_to_mds(struct inode *inode, | 1490 | int pnfs_write_done_resend_to_mds(struct nfs_pgio_header *hdr) |
1474 | struct list_head *head, | ||
1475 | const struct nfs_pgio_completion_ops *compl_ops, | ||
1476 | struct nfs_direct_req *dreq) | ||
1477 | { | 1491 | { |
1478 | struct nfs_pageio_descriptor pgio; | 1492 | struct nfs_pageio_descriptor pgio; |
1479 | LIST_HEAD(failed); | ||
1480 | 1493 | ||
1481 | /* Resend all requests through the MDS */ | 1494 | /* Resend all requests through the MDS */ |
1482 | nfs_pageio_init_write(&pgio, inode, FLUSH_STABLE, true, compl_ops); | 1495 | nfs_pageio_init_write(&pgio, hdr->inode, FLUSH_STABLE, true, |
1483 | pgio.pg_dreq = dreq; | 1496 | hdr->completion_ops); |
1484 | while (!list_empty(head)) { | 1497 | return nfs_pageio_resend(&pgio, hdr); |
1485 | struct nfs_page *req = nfs_list_entry(head->next); | ||
1486 | |||
1487 | nfs_list_remove_request(req); | ||
1488 | if (!nfs_pageio_add_request(&pgio, req)) | ||
1489 | nfs_list_add_request(req, &failed); | ||
1490 | } | ||
1491 | nfs_pageio_complete(&pgio); | ||
1492 | |||
1493 | if (!list_empty(&failed)) { | ||
1494 | /* For some reason our attempt to resend pages. Mark the | ||
1495 | * overall send request as having failed, and let | ||
1496 | * nfs_writeback_release_full deal with the error. | ||
1497 | */ | ||
1498 | list_move(&failed, head); | ||
1499 | return -EIO; | ||
1500 | } | ||
1501 | return 0; | ||
1502 | } | 1498 | } |
1503 | EXPORT_SYMBOL_GPL(pnfs_write_done_resend_to_mds); | 1499 | EXPORT_SYMBOL_GPL(pnfs_write_done_resend_to_mds); |
1504 | 1500 | ||
1505 | static void pnfs_ld_handle_write_error(struct nfs_pgio_data *data) | 1501 | static void pnfs_ld_handle_write_error(struct nfs_pgio_header *hdr) |
1506 | { | 1502 | { |
1507 | struct nfs_pgio_header *hdr = data->header; | ||
1508 | 1503 | ||
1509 | dprintk("pnfs write error = %d\n", hdr->pnfs_error); | 1504 | dprintk("pnfs write error = %d\n", hdr->pnfs_error); |
1510 | if (NFS_SERVER(hdr->inode)->pnfs_curr_ld->flags & | 1505 | if (NFS_SERVER(hdr->inode)->pnfs_curr_ld->flags & |
@@ -1512,50 +1507,42 @@ static void pnfs_ld_handle_write_error(struct nfs_pgio_data *data) | |||
1512 | pnfs_return_layout(hdr->inode); | 1507 | pnfs_return_layout(hdr->inode); |
1513 | } | 1508 | } |
1514 | if (!test_and_set_bit(NFS_IOHDR_REDO, &hdr->flags)) | 1509 | if (!test_and_set_bit(NFS_IOHDR_REDO, &hdr->flags)) |
1515 | data->task.tk_status = pnfs_write_done_resend_to_mds(hdr->inode, | 1510 | hdr->task.tk_status = pnfs_write_done_resend_to_mds(hdr); |
1516 | &hdr->pages, | ||
1517 | hdr->completion_ops, | ||
1518 | hdr->dreq); | ||
1519 | } | 1511 | } |
1520 | 1512 | ||
1521 | /* | 1513 | /* |
1522 | * Called by non rpc-based layout drivers | 1514 | * Called by non rpc-based layout drivers |
1523 | */ | 1515 | */ |
1524 | void pnfs_ld_write_done(struct nfs_pgio_data *data) | 1516 | void pnfs_ld_write_done(struct nfs_pgio_header *hdr) |
1525 | { | 1517 | { |
1526 | struct nfs_pgio_header *hdr = data->header; | 1518 | trace_nfs4_pnfs_write(hdr, hdr->pnfs_error); |
1527 | |||
1528 | trace_nfs4_pnfs_write(data, hdr->pnfs_error); | ||
1529 | if (!hdr->pnfs_error) { | 1519 | if (!hdr->pnfs_error) { |
1530 | pnfs_set_layoutcommit(data); | 1520 | pnfs_set_layoutcommit(hdr); |
1531 | hdr->mds_ops->rpc_call_done(&data->task, data); | 1521 | hdr->mds_ops->rpc_call_done(&hdr->task, hdr); |
1532 | } else | 1522 | } else |
1533 | pnfs_ld_handle_write_error(data); | 1523 | pnfs_ld_handle_write_error(hdr); |
1534 | hdr->mds_ops->rpc_release(data); | 1524 | hdr->mds_ops->rpc_release(hdr); |
1535 | } | 1525 | } |
1536 | EXPORT_SYMBOL_GPL(pnfs_ld_write_done); | 1526 | EXPORT_SYMBOL_GPL(pnfs_ld_write_done); |
1537 | 1527 | ||
1538 | static void | 1528 | static void |
1539 | pnfs_write_through_mds(struct nfs_pageio_descriptor *desc, | 1529 | pnfs_write_through_mds(struct nfs_pageio_descriptor *desc, |
1540 | struct nfs_pgio_data *data) | 1530 | struct nfs_pgio_header *hdr) |
1541 | { | 1531 | { |
1542 | struct nfs_pgio_header *hdr = data->header; | ||
1543 | |||
1544 | if (!test_and_set_bit(NFS_IOHDR_REDO, &hdr->flags)) { | 1532 | if (!test_and_set_bit(NFS_IOHDR_REDO, &hdr->flags)) { |
1545 | list_splice_tail_init(&hdr->pages, &desc->pg_list); | 1533 | list_splice_tail_init(&hdr->pages, &desc->pg_list); |
1546 | nfs_pageio_reset_write_mds(desc); | 1534 | nfs_pageio_reset_write_mds(desc); |
1547 | desc->pg_recoalesce = 1; | 1535 | desc->pg_recoalesce = 1; |
1548 | } | 1536 | } |
1549 | nfs_pgio_data_release(data); | 1537 | nfs_pgio_data_destroy(hdr); |
1550 | } | 1538 | } |
1551 | 1539 | ||
1552 | static enum pnfs_try_status | 1540 | static enum pnfs_try_status |
1553 | pnfs_try_to_write_data(struct nfs_pgio_data *wdata, | 1541 | pnfs_try_to_write_data(struct nfs_pgio_header *hdr, |
1554 | const struct rpc_call_ops *call_ops, | 1542 | const struct rpc_call_ops *call_ops, |
1555 | struct pnfs_layout_segment *lseg, | 1543 | struct pnfs_layout_segment *lseg, |
1556 | int how) | 1544 | int how) |
1557 | { | 1545 | { |
1558 | struct nfs_pgio_header *hdr = wdata->header; | ||
1559 | struct inode *inode = hdr->inode; | 1546 | struct inode *inode = hdr->inode; |
1560 | enum pnfs_try_status trypnfs; | 1547 | enum pnfs_try_status trypnfs; |
1561 | struct nfs_server *nfss = NFS_SERVER(inode); | 1548 | struct nfs_server *nfss = NFS_SERVER(inode); |
@@ -1563,8 +1550,8 @@ pnfs_try_to_write_data(struct nfs_pgio_data *wdata, | |||
1563 | hdr->mds_ops = call_ops; | 1550 | hdr->mds_ops = call_ops; |
1564 | 1551 | ||
1565 | dprintk("%s: Writing ino:%lu %u@%llu (how %d)\n", __func__, | 1552 | dprintk("%s: Writing ino:%lu %u@%llu (how %d)\n", __func__, |
1566 | inode->i_ino, wdata->args.count, wdata->args.offset, how); | 1553 | inode->i_ino, hdr->args.count, hdr->args.offset, how); |
1567 | trypnfs = nfss->pnfs_curr_ld->write_pagelist(wdata, how); | 1554 | trypnfs = nfss->pnfs_curr_ld->write_pagelist(hdr, how); |
1568 | if (trypnfs != PNFS_NOT_ATTEMPTED) | 1555 | if (trypnfs != PNFS_NOT_ATTEMPTED) |
1569 | nfs_inc_stats(inode, NFSIOS_PNFS_WRITE); | 1556 | nfs_inc_stats(inode, NFSIOS_PNFS_WRITE); |
1570 | dprintk("%s End (trypnfs:%d)\n", __func__, trypnfs); | 1557 | dprintk("%s End (trypnfs:%d)\n", __func__, trypnfs); |
@@ -1575,139 +1562,105 @@ static void | |||
1575 | pnfs_do_write(struct nfs_pageio_descriptor *desc, | 1562 | pnfs_do_write(struct nfs_pageio_descriptor *desc, |
1576 | struct nfs_pgio_header *hdr, int how) | 1563 | struct nfs_pgio_header *hdr, int how) |
1577 | { | 1564 | { |
1578 | struct nfs_pgio_data *data = hdr->data; | ||
1579 | const struct rpc_call_ops *call_ops = desc->pg_rpc_callops; | 1565 | const struct rpc_call_ops *call_ops = desc->pg_rpc_callops; |
1580 | struct pnfs_layout_segment *lseg = desc->pg_lseg; | 1566 | struct pnfs_layout_segment *lseg = desc->pg_lseg; |
1581 | enum pnfs_try_status trypnfs; | 1567 | enum pnfs_try_status trypnfs; |
1582 | 1568 | ||
1583 | desc->pg_lseg = NULL; | 1569 | desc->pg_lseg = NULL; |
1584 | trypnfs = pnfs_try_to_write_data(data, call_ops, lseg, how); | 1570 | trypnfs = pnfs_try_to_write_data(hdr, call_ops, lseg, how); |
1585 | if (trypnfs == PNFS_NOT_ATTEMPTED) | 1571 | if (trypnfs == PNFS_NOT_ATTEMPTED) |
1586 | pnfs_write_through_mds(desc, data); | 1572 | pnfs_write_through_mds(desc, hdr); |
1587 | pnfs_put_lseg(lseg); | 1573 | pnfs_put_lseg(lseg); |
1588 | } | 1574 | } |
1589 | 1575 | ||
1590 | static void pnfs_writehdr_free(struct nfs_pgio_header *hdr) | 1576 | static void pnfs_writehdr_free(struct nfs_pgio_header *hdr) |
1591 | { | 1577 | { |
1592 | pnfs_put_lseg(hdr->lseg); | 1578 | pnfs_put_lseg(hdr->lseg); |
1593 | nfs_rw_header_free(hdr); | 1579 | nfs_pgio_header_free(hdr); |
1594 | } | 1580 | } |
1595 | EXPORT_SYMBOL_GPL(pnfs_writehdr_free); | 1581 | EXPORT_SYMBOL_GPL(pnfs_writehdr_free); |
1596 | 1582 | ||
1597 | int | 1583 | int |
1598 | pnfs_generic_pg_writepages(struct nfs_pageio_descriptor *desc) | 1584 | pnfs_generic_pg_writepages(struct nfs_pageio_descriptor *desc) |
1599 | { | 1585 | { |
1600 | struct nfs_rw_header *whdr; | ||
1601 | struct nfs_pgio_header *hdr; | 1586 | struct nfs_pgio_header *hdr; |
1602 | int ret; | 1587 | int ret; |
1603 | 1588 | ||
1604 | whdr = nfs_rw_header_alloc(desc->pg_rw_ops); | 1589 | hdr = nfs_pgio_header_alloc(desc->pg_rw_ops); |
1605 | if (!whdr) { | 1590 | if (!hdr) { |
1606 | desc->pg_completion_ops->error_cleanup(&desc->pg_list); | 1591 | desc->pg_completion_ops->error_cleanup(&desc->pg_list); |
1607 | pnfs_put_lseg(desc->pg_lseg); | 1592 | pnfs_put_lseg(desc->pg_lseg); |
1608 | desc->pg_lseg = NULL; | 1593 | desc->pg_lseg = NULL; |
1609 | return -ENOMEM; | 1594 | return -ENOMEM; |
1610 | } | 1595 | } |
1611 | hdr = &whdr->header; | ||
1612 | nfs_pgheader_init(desc, hdr, pnfs_writehdr_free); | 1596 | nfs_pgheader_init(desc, hdr, pnfs_writehdr_free); |
1613 | hdr->lseg = pnfs_get_lseg(desc->pg_lseg); | 1597 | hdr->lseg = pnfs_get_lseg(desc->pg_lseg); |
1614 | atomic_inc(&hdr->refcnt); | ||
1615 | ret = nfs_generic_pgio(desc, hdr); | 1598 | ret = nfs_generic_pgio(desc, hdr); |
1616 | if (ret != 0) { | 1599 | if (ret != 0) { |
1617 | pnfs_put_lseg(desc->pg_lseg); | 1600 | pnfs_put_lseg(desc->pg_lseg); |
1618 | desc->pg_lseg = NULL; | 1601 | desc->pg_lseg = NULL; |
1619 | } else | 1602 | } else |
1620 | pnfs_do_write(desc, hdr, desc->pg_ioflags); | 1603 | pnfs_do_write(desc, hdr, desc->pg_ioflags); |
1621 | if (atomic_dec_and_test(&hdr->refcnt)) | ||
1622 | hdr->completion_ops->completion(hdr); | ||
1623 | return ret; | 1604 | return ret; |
1624 | } | 1605 | } |
1625 | EXPORT_SYMBOL_GPL(pnfs_generic_pg_writepages); | 1606 | EXPORT_SYMBOL_GPL(pnfs_generic_pg_writepages); |
1626 | 1607 | ||
1627 | int pnfs_read_done_resend_to_mds(struct inode *inode, | 1608 | int pnfs_read_done_resend_to_mds(struct nfs_pgio_header *hdr) |
1628 | struct list_head *head, | ||
1629 | const struct nfs_pgio_completion_ops *compl_ops, | ||
1630 | struct nfs_direct_req *dreq) | ||
1631 | { | 1609 | { |
1632 | struct nfs_pageio_descriptor pgio; | 1610 | struct nfs_pageio_descriptor pgio; |
1633 | LIST_HEAD(failed); | ||
1634 | 1611 | ||
1635 | /* Resend all requests through the MDS */ | 1612 | /* Resend all requests through the MDS */ |
1636 | nfs_pageio_init_read(&pgio, inode, true, compl_ops); | 1613 | nfs_pageio_init_read(&pgio, hdr->inode, true, hdr->completion_ops); |
1637 | pgio.pg_dreq = dreq; | 1614 | return nfs_pageio_resend(&pgio, hdr); |
1638 | while (!list_empty(head)) { | ||
1639 | struct nfs_page *req = nfs_list_entry(head->next); | ||
1640 | |||
1641 | nfs_list_remove_request(req); | ||
1642 | if (!nfs_pageio_add_request(&pgio, req)) | ||
1643 | nfs_list_add_request(req, &failed); | ||
1644 | } | ||
1645 | nfs_pageio_complete(&pgio); | ||
1646 | |||
1647 | if (!list_empty(&failed)) { | ||
1648 | list_move(&failed, head); | ||
1649 | return -EIO; | ||
1650 | } | ||
1651 | return 0; | ||
1652 | } | 1615 | } |
1653 | EXPORT_SYMBOL_GPL(pnfs_read_done_resend_to_mds); | 1616 | EXPORT_SYMBOL_GPL(pnfs_read_done_resend_to_mds); |
1654 | 1617 | ||
1655 | static void pnfs_ld_handle_read_error(struct nfs_pgio_data *data) | 1618 | static void pnfs_ld_handle_read_error(struct nfs_pgio_header *hdr) |
1656 | { | 1619 | { |
1657 | struct nfs_pgio_header *hdr = data->header; | ||
1658 | |||
1659 | dprintk("pnfs read error = %d\n", hdr->pnfs_error); | 1620 | dprintk("pnfs read error = %d\n", hdr->pnfs_error); |
1660 | if (NFS_SERVER(hdr->inode)->pnfs_curr_ld->flags & | 1621 | if (NFS_SERVER(hdr->inode)->pnfs_curr_ld->flags & |
1661 | PNFS_LAYOUTRET_ON_ERROR) { | 1622 | PNFS_LAYOUTRET_ON_ERROR) { |
1662 | pnfs_return_layout(hdr->inode); | 1623 | pnfs_return_layout(hdr->inode); |
1663 | } | 1624 | } |
1664 | if (!test_and_set_bit(NFS_IOHDR_REDO, &hdr->flags)) | 1625 | if (!test_and_set_bit(NFS_IOHDR_REDO, &hdr->flags)) |
1665 | data->task.tk_status = pnfs_read_done_resend_to_mds(hdr->inode, | 1626 | hdr->task.tk_status = pnfs_read_done_resend_to_mds(hdr); |
1666 | &hdr->pages, | ||
1667 | hdr->completion_ops, | ||
1668 | hdr->dreq); | ||
1669 | } | 1627 | } |
1670 | 1628 | ||
1671 | /* | 1629 | /* |
1672 | * Called by non rpc-based layout drivers | 1630 | * Called by non rpc-based layout drivers |
1673 | */ | 1631 | */ |
1674 | void pnfs_ld_read_done(struct nfs_pgio_data *data) | 1632 | void pnfs_ld_read_done(struct nfs_pgio_header *hdr) |
1675 | { | 1633 | { |
1676 | struct nfs_pgio_header *hdr = data->header; | 1634 | trace_nfs4_pnfs_read(hdr, hdr->pnfs_error); |
1677 | |||
1678 | trace_nfs4_pnfs_read(data, hdr->pnfs_error); | ||
1679 | if (likely(!hdr->pnfs_error)) { | 1635 | if (likely(!hdr->pnfs_error)) { |
1680 | __nfs4_read_done_cb(data); | 1636 | __nfs4_read_done_cb(hdr); |
1681 | hdr->mds_ops->rpc_call_done(&data->task, data); | 1637 | hdr->mds_ops->rpc_call_done(&hdr->task, hdr); |
1682 | } else | 1638 | } else |
1683 | pnfs_ld_handle_read_error(data); | 1639 | pnfs_ld_handle_read_error(hdr); |
1684 | hdr->mds_ops->rpc_release(data); | 1640 | hdr->mds_ops->rpc_release(hdr); |
1685 | } | 1641 | } |
1686 | EXPORT_SYMBOL_GPL(pnfs_ld_read_done); | 1642 | EXPORT_SYMBOL_GPL(pnfs_ld_read_done); |
1687 | 1643 | ||
1688 | static void | 1644 | static void |
1689 | pnfs_read_through_mds(struct nfs_pageio_descriptor *desc, | 1645 | pnfs_read_through_mds(struct nfs_pageio_descriptor *desc, |
1690 | struct nfs_pgio_data *data) | 1646 | struct nfs_pgio_header *hdr) |
1691 | { | 1647 | { |
1692 | struct nfs_pgio_header *hdr = data->header; | ||
1693 | |||
1694 | if (!test_and_set_bit(NFS_IOHDR_REDO, &hdr->flags)) { | 1648 | if (!test_and_set_bit(NFS_IOHDR_REDO, &hdr->flags)) { |
1695 | list_splice_tail_init(&hdr->pages, &desc->pg_list); | 1649 | list_splice_tail_init(&hdr->pages, &desc->pg_list); |
1696 | nfs_pageio_reset_read_mds(desc); | 1650 | nfs_pageio_reset_read_mds(desc); |
1697 | desc->pg_recoalesce = 1; | 1651 | desc->pg_recoalesce = 1; |
1698 | } | 1652 | } |
1699 | nfs_pgio_data_release(data); | 1653 | nfs_pgio_data_destroy(hdr); |
1700 | } | 1654 | } |
1701 | 1655 | ||
1702 | /* | 1656 | /* |
1703 | * Call the appropriate parallel I/O subsystem read function. | 1657 | * Call the appropriate parallel I/O subsystem read function. |
1704 | */ | 1658 | */ |
1705 | static enum pnfs_try_status | 1659 | static enum pnfs_try_status |
1706 | pnfs_try_to_read_data(struct nfs_pgio_data *rdata, | 1660 | pnfs_try_to_read_data(struct nfs_pgio_header *hdr, |
1707 | const struct rpc_call_ops *call_ops, | 1661 | const struct rpc_call_ops *call_ops, |
1708 | struct pnfs_layout_segment *lseg) | 1662 | struct pnfs_layout_segment *lseg) |
1709 | { | 1663 | { |
1710 | struct nfs_pgio_header *hdr = rdata->header; | ||
1711 | struct inode *inode = hdr->inode; | 1664 | struct inode *inode = hdr->inode; |
1712 | struct nfs_server *nfss = NFS_SERVER(inode); | 1665 | struct nfs_server *nfss = NFS_SERVER(inode); |
1713 | enum pnfs_try_status trypnfs; | 1666 | enum pnfs_try_status trypnfs; |
@@ -1715,9 +1668,9 @@ pnfs_try_to_read_data(struct nfs_pgio_data *rdata, | |||
1715 | hdr->mds_ops = call_ops; | 1668 | hdr->mds_ops = call_ops; |
1716 | 1669 | ||
1717 | dprintk("%s: Reading ino:%lu %u@%llu\n", | 1670 | dprintk("%s: Reading ino:%lu %u@%llu\n", |
1718 | __func__, inode->i_ino, rdata->args.count, rdata->args.offset); | 1671 | __func__, inode->i_ino, hdr->args.count, hdr->args.offset); |
1719 | 1672 | ||
1720 | trypnfs = nfss->pnfs_curr_ld->read_pagelist(rdata); | 1673 | trypnfs = nfss->pnfs_curr_ld->read_pagelist(hdr); |
1721 | if (trypnfs != PNFS_NOT_ATTEMPTED) | 1674 | if (trypnfs != PNFS_NOT_ATTEMPTED) |
1722 | nfs_inc_stats(inode, NFSIOS_PNFS_READ); | 1675 | nfs_inc_stats(inode, NFSIOS_PNFS_READ); |
1723 | dprintk("%s End (trypnfs:%d)\n", __func__, trypnfs); | 1676 | dprintk("%s End (trypnfs:%d)\n", __func__, trypnfs); |
@@ -1727,52 +1680,46 @@ pnfs_try_to_read_data(struct nfs_pgio_data *rdata, | |||
1727 | static void | 1680 | static void |
1728 | pnfs_do_read(struct nfs_pageio_descriptor *desc, struct nfs_pgio_header *hdr) | 1681 | pnfs_do_read(struct nfs_pageio_descriptor *desc, struct nfs_pgio_header *hdr) |
1729 | { | 1682 | { |
1730 | struct nfs_pgio_data *data = hdr->data; | ||
1731 | const struct rpc_call_ops *call_ops = desc->pg_rpc_callops; | 1683 | const struct rpc_call_ops *call_ops = desc->pg_rpc_callops; |
1732 | struct pnfs_layout_segment *lseg = desc->pg_lseg; | 1684 | struct pnfs_layout_segment *lseg = desc->pg_lseg; |
1733 | enum pnfs_try_status trypnfs; | 1685 | enum pnfs_try_status trypnfs; |
1734 | 1686 | ||
1735 | desc->pg_lseg = NULL; | 1687 | desc->pg_lseg = NULL; |
1736 | trypnfs = pnfs_try_to_read_data(data, call_ops, lseg); | 1688 | trypnfs = pnfs_try_to_read_data(hdr, call_ops, lseg); |
1737 | if (trypnfs == PNFS_NOT_ATTEMPTED) | 1689 | if (trypnfs == PNFS_NOT_ATTEMPTED) |
1738 | pnfs_read_through_mds(desc, data); | 1690 | pnfs_read_through_mds(desc, hdr); |
1739 | pnfs_put_lseg(lseg); | 1691 | pnfs_put_lseg(lseg); |
1740 | } | 1692 | } |
1741 | 1693 | ||
1742 | static void pnfs_readhdr_free(struct nfs_pgio_header *hdr) | 1694 | static void pnfs_readhdr_free(struct nfs_pgio_header *hdr) |
1743 | { | 1695 | { |
1744 | pnfs_put_lseg(hdr->lseg); | 1696 | pnfs_put_lseg(hdr->lseg); |
1745 | nfs_rw_header_free(hdr); | 1697 | nfs_pgio_header_free(hdr); |
1746 | } | 1698 | } |
1747 | EXPORT_SYMBOL_GPL(pnfs_readhdr_free); | 1699 | EXPORT_SYMBOL_GPL(pnfs_readhdr_free); |
1748 | 1700 | ||
1749 | int | 1701 | int |
1750 | pnfs_generic_pg_readpages(struct nfs_pageio_descriptor *desc) | 1702 | pnfs_generic_pg_readpages(struct nfs_pageio_descriptor *desc) |
1751 | { | 1703 | { |
1752 | struct nfs_rw_header *rhdr; | ||
1753 | struct nfs_pgio_header *hdr; | 1704 | struct nfs_pgio_header *hdr; |
1754 | int ret; | 1705 | int ret; |
1755 | 1706 | ||
1756 | rhdr = nfs_rw_header_alloc(desc->pg_rw_ops); | 1707 | hdr = nfs_pgio_header_alloc(desc->pg_rw_ops); |
1757 | if (!rhdr) { | 1708 | if (!hdr) { |
1758 | desc->pg_completion_ops->error_cleanup(&desc->pg_list); | 1709 | desc->pg_completion_ops->error_cleanup(&desc->pg_list); |
1759 | ret = -ENOMEM; | 1710 | ret = -ENOMEM; |
1760 | pnfs_put_lseg(desc->pg_lseg); | 1711 | pnfs_put_lseg(desc->pg_lseg); |
1761 | desc->pg_lseg = NULL; | 1712 | desc->pg_lseg = NULL; |
1762 | return ret; | 1713 | return ret; |
1763 | } | 1714 | } |
1764 | hdr = &rhdr->header; | ||
1765 | nfs_pgheader_init(desc, hdr, pnfs_readhdr_free); | 1715 | nfs_pgheader_init(desc, hdr, pnfs_readhdr_free); |
1766 | hdr->lseg = pnfs_get_lseg(desc->pg_lseg); | 1716 | hdr->lseg = pnfs_get_lseg(desc->pg_lseg); |
1767 | atomic_inc(&hdr->refcnt); | ||
1768 | ret = nfs_generic_pgio(desc, hdr); | 1717 | ret = nfs_generic_pgio(desc, hdr); |
1769 | if (ret != 0) { | 1718 | if (ret != 0) { |
1770 | pnfs_put_lseg(desc->pg_lseg); | 1719 | pnfs_put_lseg(desc->pg_lseg); |
1771 | desc->pg_lseg = NULL; | 1720 | desc->pg_lseg = NULL; |
1772 | } else | 1721 | } else |
1773 | pnfs_do_read(desc, hdr); | 1722 | pnfs_do_read(desc, hdr); |
1774 | if (atomic_dec_and_test(&hdr->refcnt)) | ||
1775 | hdr->completion_ops->completion(hdr); | ||
1776 | return ret; | 1723 | return ret; |
1777 | } | 1724 | } |
1778 | EXPORT_SYMBOL_GPL(pnfs_generic_pg_readpages); | 1725 | EXPORT_SYMBOL_GPL(pnfs_generic_pg_readpages); |
@@ -1820,12 +1767,11 @@ void pnfs_set_lo_fail(struct pnfs_layout_segment *lseg) | |||
1820 | EXPORT_SYMBOL_GPL(pnfs_set_lo_fail); | 1767 | EXPORT_SYMBOL_GPL(pnfs_set_lo_fail); |
1821 | 1768 | ||
1822 | void | 1769 | void |
1823 | pnfs_set_layoutcommit(struct nfs_pgio_data *wdata) | 1770 | pnfs_set_layoutcommit(struct nfs_pgio_header *hdr) |
1824 | { | 1771 | { |
1825 | struct nfs_pgio_header *hdr = wdata->header; | ||
1826 | struct inode *inode = hdr->inode; | 1772 | struct inode *inode = hdr->inode; |
1827 | struct nfs_inode *nfsi = NFS_I(inode); | 1773 | struct nfs_inode *nfsi = NFS_I(inode); |
1828 | loff_t end_pos = wdata->mds_offset + wdata->res.count; | 1774 | loff_t end_pos = hdr->mds_offset + hdr->res.count; |
1829 | bool mark_as_dirty = false; | 1775 | bool mark_as_dirty = false; |
1830 | 1776 | ||
1831 | spin_lock(&inode->i_lock); | 1777 | spin_lock(&inode->i_lock); |
diff --git a/fs/nfs/pnfs.h b/fs/nfs/pnfs.h index 4fb309a2b4c4..aca3dff5dae6 100644 --- a/fs/nfs/pnfs.h +++ b/fs/nfs/pnfs.h | |||
@@ -32,6 +32,7 @@ | |||
32 | 32 | ||
33 | #include <linux/nfs_fs.h> | 33 | #include <linux/nfs_fs.h> |
34 | #include <linux/nfs_page.h> | 34 | #include <linux/nfs_page.h> |
35 | #include <linux/workqueue.h> | ||
35 | 36 | ||
36 | enum { | 37 | enum { |
37 | NFS_LSEG_VALID = 0, /* cleared when lseg is recalled/returned */ | 38 | NFS_LSEG_VALID = 0, /* cleared when lseg is recalled/returned */ |
@@ -46,6 +47,7 @@ struct pnfs_layout_segment { | |||
46 | atomic_t pls_refcount; | 47 | atomic_t pls_refcount; |
47 | unsigned long pls_flags; | 48 | unsigned long pls_flags; |
48 | struct pnfs_layout_hdr *pls_layout; | 49 | struct pnfs_layout_hdr *pls_layout; |
50 | struct work_struct pls_work; | ||
49 | }; | 51 | }; |
50 | 52 | ||
51 | enum pnfs_try_status { | 53 | enum pnfs_try_status { |
@@ -104,6 +106,8 @@ struct pnfs_layoutdriver_type { | |||
104 | int max); | 106 | int max); |
105 | void (*recover_commit_reqs) (struct list_head *list, | 107 | void (*recover_commit_reqs) (struct list_head *list, |
106 | struct nfs_commit_info *cinfo); | 108 | struct nfs_commit_info *cinfo); |
109 | struct nfs_page * (*search_commit_reqs)(struct nfs_commit_info *cinfo, | ||
110 | struct page *page); | ||
107 | int (*commit_pagelist)(struct inode *inode, | 111 | int (*commit_pagelist)(struct inode *inode, |
108 | struct list_head *mds_pages, | 112 | struct list_head *mds_pages, |
109 | int how, | 113 | int how, |
@@ -113,8 +117,8 @@ struct pnfs_layoutdriver_type { | |||
113 | * Return PNFS_ATTEMPTED to indicate the layout code has attempted | 117 | * Return PNFS_ATTEMPTED to indicate the layout code has attempted |
114 | * I/O, else return PNFS_NOT_ATTEMPTED to fall back to normal NFS | 118 | * I/O, else return PNFS_NOT_ATTEMPTED to fall back to normal NFS |
115 | */ | 119 | */ |
116 | enum pnfs_try_status (*read_pagelist) (struct nfs_pgio_data *nfs_data); | 120 | enum pnfs_try_status (*read_pagelist)(struct nfs_pgio_header *); |
117 | enum pnfs_try_status (*write_pagelist) (struct nfs_pgio_data *nfs_data, int how); | 121 | enum pnfs_try_status (*write_pagelist)(struct nfs_pgio_header *, int); |
118 | 122 | ||
119 | void (*free_deviceid_node) (struct nfs4_deviceid_node *); | 123 | void (*free_deviceid_node) (struct nfs4_deviceid_node *); |
120 | 124 | ||
@@ -179,6 +183,7 @@ extern int nfs4_proc_layoutreturn(struct nfs4_layoutreturn *lrp); | |||
179 | /* pnfs.c */ | 183 | /* pnfs.c */ |
180 | void pnfs_get_layout_hdr(struct pnfs_layout_hdr *lo); | 184 | void pnfs_get_layout_hdr(struct pnfs_layout_hdr *lo); |
181 | void pnfs_put_lseg(struct pnfs_layout_segment *lseg); | 185 | void pnfs_put_lseg(struct pnfs_layout_segment *lseg); |
186 | void pnfs_put_lseg_async(struct pnfs_layout_segment *lseg); | ||
182 | 187 | ||
183 | void set_pnfs_layoutdriver(struct nfs_server *, const struct nfs_fh *, u32); | 188 | void set_pnfs_layoutdriver(struct nfs_server *, const struct nfs_fh *, u32); |
184 | void unset_pnfs_layoutdriver(struct nfs_server *); | 189 | void unset_pnfs_layoutdriver(struct nfs_server *); |
@@ -213,13 +218,13 @@ bool pnfs_roc(struct inode *ino); | |||
213 | void pnfs_roc_release(struct inode *ino); | 218 | void pnfs_roc_release(struct inode *ino); |
214 | void pnfs_roc_set_barrier(struct inode *ino, u32 barrier); | 219 | void pnfs_roc_set_barrier(struct inode *ino, u32 barrier); |
215 | bool pnfs_roc_drain(struct inode *ino, u32 *barrier, struct rpc_task *task); | 220 | bool pnfs_roc_drain(struct inode *ino, u32 *barrier, struct rpc_task *task); |
216 | void pnfs_set_layoutcommit(struct nfs_pgio_data *wdata); | 221 | void pnfs_set_layoutcommit(struct nfs_pgio_header *); |
217 | void pnfs_cleanup_layoutcommit(struct nfs4_layoutcommit_data *data); | 222 | void pnfs_cleanup_layoutcommit(struct nfs4_layoutcommit_data *data); |
218 | int pnfs_layoutcommit_inode(struct inode *inode, bool sync); | 223 | int pnfs_layoutcommit_inode(struct inode *inode, bool sync); |
219 | int _pnfs_return_layout(struct inode *); | 224 | int _pnfs_return_layout(struct inode *); |
220 | int pnfs_commit_and_return_layout(struct inode *); | 225 | int pnfs_commit_and_return_layout(struct inode *); |
221 | void pnfs_ld_write_done(struct nfs_pgio_data *); | 226 | void pnfs_ld_write_done(struct nfs_pgio_header *); |
222 | void pnfs_ld_read_done(struct nfs_pgio_data *); | 227 | void pnfs_ld_read_done(struct nfs_pgio_header *); |
223 | struct pnfs_layout_segment *pnfs_update_layout(struct inode *ino, | 228 | struct pnfs_layout_segment *pnfs_update_layout(struct inode *ino, |
224 | struct nfs_open_context *ctx, | 229 | struct nfs_open_context *ctx, |
225 | loff_t pos, | 230 | loff_t pos, |
@@ -228,12 +233,8 @@ struct pnfs_layout_segment *pnfs_update_layout(struct inode *ino, | |||
228 | gfp_t gfp_flags); | 233 | gfp_t gfp_flags); |
229 | 234 | ||
230 | void nfs4_deviceid_mark_client_invalid(struct nfs_client *clp); | 235 | void nfs4_deviceid_mark_client_invalid(struct nfs_client *clp); |
231 | int pnfs_read_done_resend_to_mds(struct inode *inode, struct list_head *head, | 236 | int pnfs_read_done_resend_to_mds(struct nfs_pgio_header *); |
232 | const struct nfs_pgio_completion_ops *compl_ops, | 237 | int pnfs_write_done_resend_to_mds(struct nfs_pgio_header *); |
233 | struct nfs_direct_req *dreq); | ||
234 | int pnfs_write_done_resend_to_mds(struct inode *inode, struct list_head *head, | ||
235 | const struct nfs_pgio_completion_ops *compl_ops, | ||
236 | struct nfs_direct_req *dreq); | ||
237 | struct nfs4_threshold *pnfs_mdsthreshold_alloc(void); | 238 | struct nfs4_threshold *pnfs_mdsthreshold_alloc(void); |
238 | 239 | ||
239 | /* nfs4_deviceid_flags */ | 240 | /* nfs4_deviceid_flags */ |
@@ -345,6 +346,17 @@ pnfs_recover_commit_reqs(struct inode *inode, struct list_head *list, | |||
345 | NFS_SERVER(inode)->pnfs_curr_ld->recover_commit_reqs(list, cinfo); | 346 | NFS_SERVER(inode)->pnfs_curr_ld->recover_commit_reqs(list, cinfo); |
346 | } | 347 | } |
347 | 348 | ||
349 | static inline struct nfs_page * | ||
350 | pnfs_search_commit_reqs(struct inode *inode, struct nfs_commit_info *cinfo, | ||
351 | struct page *page) | ||
352 | { | ||
353 | struct pnfs_layoutdriver_type *ld = NFS_SERVER(inode)->pnfs_curr_ld; | ||
354 | |||
355 | if (ld == NULL || ld->search_commit_reqs == NULL) | ||
356 | return NULL; | ||
357 | return ld->search_commit_reqs(cinfo, page); | ||
358 | } | ||
359 | |||
348 | /* Should the pNFS client commit and return the layout upon a setattr */ | 360 | /* Should the pNFS client commit and return the layout upon a setattr */ |
349 | static inline bool | 361 | static inline bool |
350 | pnfs_ld_layoutret_on_setattr(struct inode *inode) | 362 | pnfs_ld_layoutret_on_setattr(struct inode *inode) |
@@ -410,6 +422,10 @@ static inline void pnfs_put_lseg(struct pnfs_layout_segment *lseg) | |||
410 | { | 422 | { |
411 | } | 423 | } |
412 | 424 | ||
425 | static inline void pnfs_put_lseg_async(struct pnfs_layout_segment *lseg) | ||
426 | { | ||
427 | } | ||
428 | |||
413 | static inline int pnfs_return_layout(struct inode *ino) | 429 | static inline int pnfs_return_layout(struct inode *ino) |
414 | { | 430 | { |
415 | return 0; | 431 | return 0; |
@@ -496,6 +512,13 @@ pnfs_recover_commit_reqs(struct inode *inode, struct list_head *list, | |||
496 | { | 512 | { |
497 | } | 513 | } |
498 | 514 | ||
515 | static inline struct nfs_page * | ||
516 | pnfs_search_commit_reqs(struct inode *inode, struct nfs_commit_info *cinfo, | ||
517 | struct page *page) | ||
518 | { | ||
519 | return NULL; | ||
520 | } | ||
521 | |||
499 | static inline int pnfs_layoutcommit_inode(struct inode *inode, bool sync) | 522 | static inline int pnfs_layoutcommit_inode(struct inode *inode, bool sync) |
500 | { | 523 | { |
501 | return 0; | 524 | return 0; |
diff --git a/fs/nfs/proc.c b/fs/nfs/proc.c index c171ce1a8a30..b09cc23d6f43 100644 --- a/fs/nfs/proc.c +++ b/fs/nfs/proc.c | |||
@@ -578,46 +578,49 @@ nfs_proc_pathconf(struct nfs_server *server, struct nfs_fh *fhandle, | |||
578 | return 0; | 578 | return 0; |
579 | } | 579 | } |
580 | 580 | ||
581 | static int nfs_read_done(struct rpc_task *task, struct nfs_pgio_data *data) | 581 | static int nfs_read_done(struct rpc_task *task, struct nfs_pgio_header *hdr) |
582 | { | 582 | { |
583 | struct inode *inode = data->header->inode; | 583 | struct inode *inode = hdr->inode; |
584 | 584 | ||
585 | nfs_invalidate_atime(inode); | 585 | nfs_invalidate_atime(inode); |
586 | if (task->tk_status >= 0) { | 586 | if (task->tk_status >= 0) { |
587 | nfs_refresh_inode(inode, data->res.fattr); | 587 | nfs_refresh_inode(inode, hdr->res.fattr); |
588 | /* Emulate the eof flag, which isn't normally needed in NFSv2 | 588 | /* Emulate the eof flag, which isn't normally needed in NFSv2 |
589 | * as it is guaranteed to always return the file attributes | 589 | * as it is guaranteed to always return the file attributes |
590 | */ | 590 | */ |
591 | if (data->args.offset + data->res.count >= data->res.fattr->size) | 591 | if (hdr->args.offset + hdr->res.count >= hdr->res.fattr->size) |
592 | data->res.eof = 1; | 592 | hdr->res.eof = 1; |
593 | } | 593 | } |
594 | return 0; | 594 | return 0; |
595 | } | 595 | } |
596 | 596 | ||
597 | static void nfs_proc_read_setup(struct nfs_pgio_data *data, struct rpc_message *msg) | 597 | static void nfs_proc_read_setup(struct nfs_pgio_header *hdr, |
598 | struct rpc_message *msg) | ||
598 | { | 599 | { |
599 | msg->rpc_proc = &nfs_procedures[NFSPROC_READ]; | 600 | msg->rpc_proc = &nfs_procedures[NFSPROC_READ]; |
600 | } | 601 | } |
601 | 602 | ||
602 | static int nfs_proc_pgio_rpc_prepare(struct rpc_task *task, struct nfs_pgio_data *data) | 603 | static int nfs_proc_pgio_rpc_prepare(struct rpc_task *task, |
604 | struct nfs_pgio_header *hdr) | ||
603 | { | 605 | { |
604 | rpc_call_start(task); | 606 | rpc_call_start(task); |
605 | return 0; | 607 | return 0; |
606 | } | 608 | } |
607 | 609 | ||
608 | static int nfs_write_done(struct rpc_task *task, struct nfs_pgio_data *data) | 610 | static int nfs_write_done(struct rpc_task *task, struct nfs_pgio_header *hdr) |
609 | { | 611 | { |
610 | struct inode *inode = data->header->inode; | 612 | struct inode *inode = hdr->inode; |
611 | 613 | ||
612 | if (task->tk_status >= 0) | 614 | if (task->tk_status >= 0) |
613 | nfs_post_op_update_inode_force_wcc(inode, data->res.fattr); | 615 | nfs_post_op_update_inode_force_wcc(inode, hdr->res.fattr); |
614 | return 0; | 616 | return 0; |
615 | } | 617 | } |
616 | 618 | ||
617 | static void nfs_proc_write_setup(struct nfs_pgio_data *data, struct rpc_message *msg) | 619 | static void nfs_proc_write_setup(struct nfs_pgio_header *hdr, |
620 | struct rpc_message *msg) | ||
618 | { | 621 | { |
619 | /* Note: NFSv2 ignores @stable and always uses NFS_FILE_SYNC */ | 622 | /* Note: NFSv2 ignores @stable and always uses NFS_FILE_SYNC */ |
620 | data->args.stable = NFS_FILE_SYNC; | 623 | hdr->args.stable = NFS_FILE_SYNC; |
621 | msg->rpc_proc = &nfs_procedures[NFSPROC_WRITE]; | 624 | msg->rpc_proc = &nfs_procedures[NFSPROC_WRITE]; |
622 | } | 625 | } |
623 | 626 | ||
diff --git a/fs/nfs/read.c b/fs/nfs/read.c index e818a475ca64..beff2769c5c5 100644 --- a/fs/nfs/read.c +++ b/fs/nfs/read.c | |||
@@ -33,12 +33,12 @@ static const struct nfs_rw_ops nfs_rw_read_ops; | |||
33 | 33 | ||
34 | static struct kmem_cache *nfs_rdata_cachep; | 34 | static struct kmem_cache *nfs_rdata_cachep; |
35 | 35 | ||
36 | static struct nfs_rw_header *nfs_readhdr_alloc(void) | 36 | static struct nfs_pgio_header *nfs_readhdr_alloc(void) |
37 | { | 37 | { |
38 | return kmem_cache_zalloc(nfs_rdata_cachep, GFP_KERNEL); | 38 | return kmem_cache_zalloc(nfs_rdata_cachep, GFP_KERNEL); |
39 | } | 39 | } |
40 | 40 | ||
41 | static void nfs_readhdr_free(struct nfs_rw_header *rhdr) | 41 | static void nfs_readhdr_free(struct nfs_pgio_header *rhdr) |
42 | { | 42 | { |
43 | kmem_cache_free(nfs_rdata_cachep, rhdr); | 43 | kmem_cache_free(nfs_rdata_cachep, rhdr); |
44 | } | 44 | } |
@@ -115,12 +115,6 @@ static void nfs_readpage_release(struct nfs_page *req) | |||
115 | 115 | ||
116 | unlock_page(req->wb_page); | 116 | unlock_page(req->wb_page); |
117 | } | 117 | } |
118 | |||
119 | dprintk("NFS: read done (%s/%Lu %d@%Ld)\n", | ||
120 | req->wb_context->dentry->d_inode->i_sb->s_id, | ||
121 | (unsigned long long)NFS_FILEID(req->wb_context->dentry->d_inode), | ||
122 | req->wb_bytes, | ||
123 | (long long)req_offset(req)); | ||
124 | nfs_release_request(req); | 118 | nfs_release_request(req); |
125 | } | 119 | } |
126 | 120 | ||
@@ -172,14 +166,15 @@ out: | |||
172 | hdr->release(hdr); | 166 | hdr->release(hdr); |
173 | } | 167 | } |
174 | 168 | ||
175 | static void nfs_initiate_read(struct nfs_pgio_data *data, struct rpc_message *msg, | 169 | static void nfs_initiate_read(struct nfs_pgio_header *hdr, |
170 | struct rpc_message *msg, | ||
176 | struct rpc_task_setup *task_setup_data, int how) | 171 | struct rpc_task_setup *task_setup_data, int how) |
177 | { | 172 | { |
178 | struct inode *inode = data->header->inode; | 173 | struct inode *inode = hdr->inode; |
179 | int swap_flags = IS_SWAPFILE(inode) ? NFS_RPC_SWAPFLAGS : 0; | 174 | int swap_flags = IS_SWAPFILE(inode) ? NFS_RPC_SWAPFLAGS : 0; |
180 | 175 | ||
181 | task_setup_data->flags |= swap_flags; | 176 | task_setup_data->flags |= swap_flags; |
182 | NFS_PROTO(inode)->read_setup(data, msg); | 177 | NFS_PROTO(inode)->read_setup(hdr, msg); |
183 | } | 178 | } |
184 | 179 | ||
185 | static void | 180 | static void |
@@ -203,14 +198,15 @@ static const struct nfs_pgio_completion_ops nfs_async_read_completion_ops = { | |||
203 | * This is the callback from RPC telling us whether a reply was | 198 | * This is the callback from RPC telling us whether a reply was |
204 | * received or some error occurred (timeout or socket shutdown). | 199 | * received or some error occurred (timeout or socket shutdown). |
205 | */ | 200 | */ |
206 | static int nfs_readpage_done(struct rpc_task *task, struct nfs_pgio_data *data, | 201 | static int nfs_readpage_done(struct rpc_task *task, |
202 | struct nfs_pgio_header *hdr, | ||
207 | struct inode *inode) | 203 | struct inode *inode) |
208 | { | 204 | { |
209 | int status = NFS_PROTO(inode)->read_done(task, data); | 205 | int status = NFS_PROTO(inode)->read_done(task, hdr); |
210 | if (status != 0) | 206 | if (status != 0) |
211 | return status; | 207 | return status; |
212 | 208 | ||
213 | nfs_add_stats(inode, NFSIOS_SERVERREADBYTES, data->res.count); | 209 | nfs_add_stats(inode, NFSIOS_SERVERREADBYTES, hdr->res.count); |
214 | 210 | ||
215 | if (task->tk_status == -ESTALE) { | 211 | if (task->tk_status == -ESTALE) { |
216 | set_bit(NFS_INO_STALE, &NFS_I(inode)->flags); | 212 | set_bit(NFS_INO_STALE, &NFS_I(inode)->flags); |
@@ -219,34 +215,34 @@ static int nfs_readpage_done(struct rpc_task *task, struct nfs_pgio_data *data, | |||
219 | return 0; | 215 | return 0; |
220 | } | 216 | } |
221 | 217 | ||
222 | static void nfs_readpage_retry(struct rpc_task *task, struct nfs_pgio_data *data) | 218 | static void nfs_readpage_retry(struct rpc_task *task, |
219 | struct nfs_pgio_header *hdr) | ||
223 | { | 220 | { |
224 | struct nfs_pgio_args *argp = &data->args; | 221 | struct nfs_pgio_args *argp = &hdr->args; |
225 | struct nfs_pgio_res *resp = &data->res; | 222 | struct nfs_pgio_res *resp = &hdr->res; |
226 | 223 | ||
227 | /* This is a short read! */ | 224 | /* This is a short read! */ |
228 | nfs_inc_stats(data->header->inode, NFSIOS_SHORTREAD); | 225 | nfs_inc_stats(hdr->inode, NFSIOS_SHORTREAD); |
229 | /* Has the server at least made some progress? */ | 226 | /* Has the server at least made some progress? */ |
230 | if (resp->count == 0) { | 227 | if (resp->count == 0) { |
231 | nfs_set_pgio_error(data->header, -EIO, argp->offset); | 228 | nfs_set_pgio_error(hdr, -EIO, argp->offset); |
232 | return; | 229 | return; |
233 | } | 230 | } |
234 | /* Yes, so retry the read at the end of the data */ | 231 | /* Yes, so retry the read at the end of the hdr */ |
235 | data->mds_offset += resp->count; | 232 | hdr->mds_offset += resp->count; |
236 | argp->offset += resp->count; | 233 | argp->offset += resp->count; |
237 | argp->pgbase += resp->count; | 234 | argp->pgbase += resp->count; |
238 | argp->count -= resp->count; | 235 | argp->count -= resp->count; |
239 | rpc_restart_call_prepare(task); | 236 | rpc_restart_call_prepare(task); |
240 | } | 237 | } |
241 | 238 | ||
242 | static void nfs_readpage_result(struct rpc_task *task, struct nfs_pgio_data *data) | 239 | static void nfs_readpage_result(struct rpc_task *task, |
240 | struct nfs_pgio_header *hdr) | ||
243 | { | 241 | { |
244 | struct nfs_pgio_header *hdr = data->header; | 242 | if (hdr->res.eof) { |
245 | |||
246 | if (data->res.eof) { | ||
247 | loff_t bound; | 243 | loff_t bound; |
248 | 244 | ||
249 | bound = data->args.offset + data->res.count; | 245 | bound = hdr->args.offset + hdr->res.count; |
250 | spin_lock(&hdr->lock); | 246 | spin_lock(&hdr->lock); |
251 | if (bound < hdr->io_start + hdr->good_bytes) { | 247 | if (bound < hdr->io_start + hdr->good_bytes) { |
252 | set_bit(NFS_IOHDR_EOF, &hdr->flags); | 248 | set_bit(NFS_IOHDR_EOF, &hdr->flags); |
@@ -254,8 +250,8 @@ static void nfs_readpage_result(struct rpc_task *task, struct nfs_pgio_data *dat | |||
254 | hdr->good_bytes = bound - hdr->io_start; | 250 | hdr->good_bytes = bound - hdr->io_start; |
255 | } | 251 | } |
256 | spin_unlock(&hdr->lock); | 252 | spin_unlock(&hdr->lock); |
257 | } else if (data->res.count != data->args.count) | 253 | } else if (hdr->res.count != hdr->args.count) |
258 | nfs_readpage_retry(task, data); | 254 | nfs_readpage_retry(task, hdr); |
259 | } | 255 | } |
260 | 256 | ||
261 | /* | 257 | /* |
@@ -404,7 +400,7 @@ out: | |||
404 | int __init nfs_init_readpagecache(void) | 400 | int __init nfs_init_readpagecache(void) |
405 | { | 401 | { |
406 | nfs_rdata_cachep = kmem_cache_create("nfs_read_data", | 402 | nfs_rdata_cachep = kmem_cache_create("nfs_read_data", |
407 | sizeof(struct nfs_rw_header), | 403 | sizeof(struct nfs_pgio_header), |
408 | 0, SLAB_HWCACHE_ALIGN, | 404 | 0, SLAB_HWCACHE_ALIGN, |
409 | NULL); | 405 | NULL); |
410 | if (nfs_rdata_cachep == NULL) | 406 | if (nfs_rdata_cachep == NULL) |
diff --git a/fs/nfs/super.c b/fs/nfs/super.c index 084af1060d79..e4499d5b51e8 100644 --- a/fs/nfs/super.c +++ b/fs/nfs/super.c | |||
@@ -1027,8 +1027,7 @@ static bool nfs_auth_info_add(struct nfs_auth_info *auth_info, | |||
1027 | rpc_authflavor_t flavor) | 1027 | rpc_authflavor_t flavor) |
1028 | { | 1028 | { |
1029 | unsigned int i; | 1029 | unsigned int i; |
1030 | unsigned int max_flavor_len = (sizeof(auth_info->flavors) / | 1030 | unsigned int max_flavor_len = ARRAY_SIZE(auth_info->flavors); |
1031 | sizeof(auth_info->flavors[0])); | ||
1032 | 1031 | ||
1033 | /* make sure this flavor isn't already in the list */ | 1032 | /* make sure this flavor isn't already in the list */ |
1034 | for (i = 0; i < auth_info->flavor_len; i++) { | 1033 | for (i = 0; i < auth_info->flavor_len; i++) { |
@@ -2180,7 +2179,7 @@ out_no_address: | |||
2180 | return -EINVAL; | 2179 | return -EINVAL; |
2181 | } | 2180 | } |
2182 | 2181 | ||
2183 | #define NFS_MOUNT_CMP_FLAGMASK ~(NFS_MOUNT_INTR \ | 2182 | #define NFS_REMOUNT_CMP_FLAGMASK ~(NFS_MOUNT_INTR \ |
2184 | | NFS_MOUNT_SECURE \ | 2183 | | NFS_MOUNT_SECURE \ |
2185 | | NFS_MOUNT_TCP \ | 2184 | | NFS_MOUNT_TCP \ |
2186 | | NFS_MOUNT_VER3 \ | 2185 | | NFS_MOUNT_VER3 \ |
@@ -2188,15 +2187,16 @@ out_no_address: | |||
2188 | | NFS_MOUNT_NONLM \ | 2187 | | NFS_MOUNT_NONLM \ |
2189 | | NFS_MOUNT_BROKEN_SUID \ | 2188 | | NFS_MOUNT_BROKEN_SUID \ |
2190 | | NFS_MOUNT_STRICTLOCK \ | 2189 | | NFS_MOUNT_STRICTLOCK \ |
2191 | | NFS_MOUNT_UNSHARED \ | ||
2192 | | NFS_MOUNT_NORESVPORT \ | ||
2193 | | NFS_MOUNT_LEGACY_INTERFACE) | 2190 | | NFS_MOUNT_LEGACY_INTERFACE) |
2194 | 2191 | ||
2192 | #define NFS_MOUNT_CMP_FLAGMASK (NFS_REMOUNT_CMP_FLAGMASK & \ | ||
2193 | ~(NFS_MOUNT_UNSHARED | NFS_MOUNT_NORESVPORT)) | ||
2194 | |||
2195 | static int | 2195 | static int |
2196 | nfs_compare_remount_data(struct nfs_server *nfss, | 2196 | nfs_compare_remount_data(struct nfs_server *nfss, |
2197 | struct nfs_parsed_mount_data *data) | 2197 | struct nfs_parsed_mount_data *data) |
2198 | { | 2198 | { |
2199 | if ((data->flags ^ nfss->flags) & NFS_MOUNT_CMP_FLAGMASK || | 2199 | if ((data->flags ^ nfss->flags) & NFS_REMOUNT_CMP_FLAGMASK || |
2200 | data->rsize != nfss->rsize || | 2200 | data->rsize != nfss->rsize || |
2201 | data->wsize != nfss->wsize || | 2201 | data->wsize != nfss->wsize || |
2202 | data->version != nfss->nfs_client->rpc_ops->version || | 2202 | data->version != nfss->nfs_client->rpc_ops->version || |
diff --git a/fs/nfs/write.c b/fs/nfs/write.c index 962c9ee758be..e3b5cf28bdc5 100644 --- a/fs/nfs/write.c +++ b/fs/nfs/write.c | |||
@@ -47,6 +47,8 @@ static const struct nfs_pgio_completion_ops nfs_async_write_completion_ops; | |||
47 | static const struct nfs_commit_completion_ops nfs_commit_completion_ops; | 47 | static const struct nfs_commit_completion_ops nfs_commit_completion_ops; |
48 | static const struct nfs_rw_ops nfs_rw_write_ops; | 48 | static const struct nfs_rw_ops nfs_rw_write_ops; |
49 | static void nfs_clear_request_commit(struct nfs_page *req); | 49 | static void nfs_clear_request_commit(struct nfs_page *req); |
50 | static void nfs_init_cinfo_from_inode(struct nfs_commit_info *cinfo, | ||
51 | struct inode *inode); | ||
50 | 52 | ||
51 | static struct kmem_cache *nfs_wdata_cachep; | 53 | static struct kmem_cache *nfs_wdata_cachep; |
52 | static mempool_t *nfs_wdata_mempool; | 54 | static mempool_t *nfs_wdata_mempool; |
@@ -71,18 +73,18 @@ void nfs_commit_free(struct nfs_commit_data *p) | |||
71 | } | 73 | } |
72 | EXPORT_SYMBOL_GPL(nfs_commit_free); | 74 | EXPORT_SYMBOL_GPL(nfs_commit_free); |
73 | 75 | ||
74 | static struct nfs_rw_header *nfs_writehdr_alloc(void) | 76 | static struct nfs_pgio_header *nfs_writehdr_alloc(void) |
75 | { | 77 | { |
76 | struct nfs_rw_header *p = mempool_alloc(nfs_wdata_mempool, GFP_NOIO); | 78 | struct nfs_pgio_header *p = mempool_alloc(nfs_wdata_mempool, GFP_NOIO); |
77 | 79 | ||
78 | if (p) | 80 | if (p) |
79 | memset(p, 0, sizeof(*p)); | 81 | memset(p, 0, sizeof(*p)); |
80 | return p; | 82 | return p; |
81 | } | 83 | } |
82 | 84 | ||
83 | static void nfs_writehdr_free(struct nfs_rw_header *whdr) | 85 | static void nfs_writehdr_free(struct nfs_pgio_header *hdr) |
84 | { | 86 | { |
85 | mempool_free(whdr, nfs_wdata_mempool); | 87 | mempool_free(hdr, nfs_wdata_mempool); |
86 | } | 88 | } |
87 | 89 | ||
88 | static void nfs_context_set_write_error(struct nfs_open_context *ctx, int error) | 90 | static void nfs_context_set_write_error(struct nfs_open_context *ctx, int error) |
@@ -93,6 +95,38 @@ static void nfs_context_set_write_error(struct nfs_open_context *ctx, int error) | |||
93 | } | 95 | } |
94 | 96 | ||
95 | /* | 97 | /* |
98 | * nfs_page_search_commits_for_head_request_locked | ||
99 | * | ||
100 | * Search through commit lists on @inode for the head request for @page. | ||
101 | * Must be called while holding the inode (which is cinfo) lock. | ||
102 | * | ||
103 | * Returns the head request if found, or NULL if not found. | ||
104 | */ | ||
105 | static struct nfs_page * | ||
106 | nfs_page_search_commits_for_head_request_locked(struct nfs_inode *nfsi, | ||
107 | struct page *page) | ||
108 | { | ||
109 | struct nfs_page *freq, *t; | ||
110 | struct nfs_commit_info cinfo; | ||
111 | struct inode *inode = &nfsi->vfs_inode; | ||
112 | |||
113 | nfs_init_cinfo_from_inode(&cinfo, inode); | ||
114 | |||
115 | /* search through pnfs commit lists */ | ||
116 | freq = pnfs_search_commit_reqs(inode, &cinfo, page); | ||
117 | if (freq) | ||
118 | return freq->wb_head; | ||
119 | |||
120 | /* Linearly search the commit list for the correct request */ | ||
121 | list_for_each_entry_safe(freq, t, &cinfo.mds->list, wb_list) { | ||
122 | if (freq->wb_page == page) | ||
123 | return freq->wb_head; | ||
124 | } | ||
125 | |||
126 | return NULL; | ||
127 | } | ||
128 | |||
129 | /* | ||
96 | * nfs_page_find_head_request_locked - find head request associated with @page | 130 | * nfs_page_find_head_request_locked - find head request associated with @page |
97 | * | 131 | * |
98 | * must be called while holding the inode lock. | 132 | * must be called while holding the inode lock. |
@@ -106,21 +140,12 @@ nfs_page_find_head_request_locked(struct nfs_inode *nfsi, struct page *page) | |||
106 | 140 | ||
107 | if (PagePrivate(page)) | 141 | if (PagePrivate(page)) |
108 | req = (struct nfs_page *)page_private(page); | 142 | req = (struct nfs_page *)page_private(page); |
109 | else if (unlikely(PageSwapCache(page))) { | 143 | else if (unlikely(PageSwapCache(page))) |
110 | struct nfs_page *freq, *t; | 144 | req = nfs_page_search_commits_for_head_request_locked(nfsi, |
111 | 145 | page); | |
112 | /* Linearly search the commit list for the correct req */ | ||
113 | list_for_each_entry_safe(freq, t, &nfsi->commit_info.list, wb_list) { | ||
114 | if (freq->wb_page == page) { | ||
115 | req = freq->wb_head; | ||
116 | break; | ||
117 | } | ||
118 | } | ||
119 | } | ||
120 | 146 | ||
121 | if (req) { | 147 | if (req) { |
122 | WARN_ON_ONCE(req->wb_head != req); | 148 | WARN_ON_ONCE(req->wb_head != req); |
123 | |||
124 | kref_get(&req->wb_kref); | 149 | kref_get(&req->wb_kref); |
125 | } | 150 | } |
126 | 151 | ||
@@ -216,7 +241,7 @@ static bool nfs_page_group_covers_page(struct nfs_page *req) | |||
216 | unsigned int pos = 0; | 241 | unsigned int pos = 0; |
217 | unsigned int len = nfs_page_length(req->wb_page); | 242 | unsigned int len = nfs_page_length(req->wb_page); |
218 | 243 | ||
219 | nfs_page_group_lock(req); | 244 | nfs_page_group_lock(req, true); |
220 | 245 | ||
221 | do { | 246 | do { |
222 | tmp = nfs_page_group_search_locked(req->wb_head, pos); | 247 | tmp = nfs_page_group_search_locked(req->wb_head, pos); |
@@ -379,8 +404,6 @@ nfs_destroy_unlinked_subrequests(struct nfs_page *destroy_list, | |||
379 | subreq->wb_head = subreq; | 404 | subreq->wb_head = subreq; |
380 | subreq->wb_this_page = subreq; | 405 | subreq->wb_this_page = subreq; |
381 | 406 | ||
382 | nfs_clear_request_commit(subreq); | ||
383 | |||
384 | /* subreq is now totally disconnected from page group or any | 407 | /* subreq is now totally disconnected from page group or any |
385 | * write / commit lists. last chance to wake any waiters */ | 408 | * write / commit lists. last chance to wake any waiters */ |
386 | nfs_unlock_request(subreq); | 409 | nfs_unlock_request(subreq); |
@@ -456,7 +479,9 @@ try_again: | |||
456 | } | 479 | } |
457 | 480 | ||
458 | /* lock each request in the page group */ | 481 | /* lock each request in the page group */ |
459 | nfs_page_group_lock(head); | 482 | ret = nfs_page_group_lock(head, false); |
483 | if (ret < 0) | ||
484 | return ERR_PTR(ret); | ||
460 | subreq = head; | 485 | subreq = head; |
461 | do { | 486 | do { |
462 | /* | 487 | /* |
@@ -488,7 +513,7 @@ try_again: | |||
488 | * Commit list removal accounting is done after locks are dropped */ | 513 | * Commit list removal accounting is done after locks are dropped */ |
489 | subreq = head; | 514 | subreq = head; |
490 | do { | 515 | do { |
491 | nfs_list_remove_request(subreq); | 516 | nfs_clear_request_commit(subreq); |
492 | subreq = subreq->wb_this_page; | 517 | subreq = subreq->wb_this_page; |
493 | } while (subreq != head); | 518 | } while (subreq != head); |
494 | 519 | ||
@@ -518,15 +543,11 @@ try_again: | |||
518 | 543 | ||
519 | nfs_page_group_unlock(head); | 544 | nfs_page_group_unlock(head); |
520 | 545 | ||
521 | /* drop lock to clear_request_commit the head req and clean up | 546 | /* drop lock to clean uprequests on destroy list */ |
522 | * requests on destroy list */ | ||
523 | spin_unlock(&inode->i_lock); | 547 | spin_unlock(&inode->i_lock); |
524 | 548 | ||
525 | nfs_destroy_unlinked_subrequests(destroy_list, head); | 549 | nfs_destroy_unlinked_subrequests(destroy_list, head); |
526 | 550 | ||
527 | /* clean up commit list state */ | ||
528 | nfs_clear_request_commit(head); | ||
529 | |||
530 | /* still holds ref on head from nfs_page_find_head_request_locked | 551 | /* still holds ref on head from nfs_page_find_head_request_locked |
531 | * and still has lock on head from lock loop */ | 552 | * and still has lock on head from lock loop */ |
532 | return head; | 553 | return head; |
@@ -705,6 +726,8 @@ static void nfs_inode_remove_request(struct nfs_page *req) | |||
705 | 726 | ||
706 | if (test_and_clear_bit(PG_INODE_REF, &req->wb_flags)) | 727 | if (test_and_clear_bit(PG_INODE_REF, &req->wb_flags)) |
707 | nfs_release_request(req); | 728 | nfs_release_request(req); |
729 | else | ||
730 | WARN_ON_ONCE(1); | ||
708 | } | 731 | } |
709 | 732 | ||
710 | static void | 733 | static void |
@@ -808,6 +831,7 @@ nfs_clear_page_commit(struct page *page) | |||
808 | dec_bdi_stat(page_file_mapping(page)->backing_dev_info, BDI_RECLAIMABLE); | 831 | dec_bdi_stat(page_file_mapping(page)->backing_dev_info, BDI_RECLAIMABLE); |
809 | } | 832 | } |
810 | 833 | ||
834 | /* Called holding inode (/cinfo) lock */ | ||
811 | static void | 835 | static void |
812 | nfs_clear_request_commit(struct nfs_page *req) | 836 | nfs_clear_request_commit(struct nfs_page *req) |
813 | { | 837 | { |
@@ -817,20 +841,17 @@ nfs_clear_request_commit(struct nfs_page *req) | |||
817 | 841 | ||
818 | nfs_init_cinfo_from_inode(&cinfo, inode); | 842 | nfs_init_cinfo_from_inode(&cinfo, inode); |
819 | if (!pnfs_clear_request_commit(req, &cinfo)) { | 843 | if (!pnfs_clear_request_commit(req, &cinfo)) { |
820 | spin_lock(cinfo.lock); | ||
821 | nfs_request_remove_commit_list(req, &cinfo); | 844 | nfs_request_remove_commit_list(req, &cinfo); |
822 | spin_unlock(cinfo.lock); | ||
823 | } | 845 | } |
824 | nfs_clear_page_commit(req->wb_page); | 846 | nfs_clear_page_commit(req->wb_page); |
825 | } | 847 | } |
826 | } | 848 | } |
827 | 849 | ||
828 | static inline | 850 | int nfs_write_need_commit(struct nfs_pgio_header *hdr) |
829 | int nfs_write_need_commit(struct nfs_pgio_data *data) | ||
830 | { | 851 | { |
831 | if (data->verf.committed == NFS_DATA_SYNC) | 852 | if (hdr->verf.committed == NFS_DATA_SYNC) |
832 | return data->header->lseg == NULL; | 853 | return hdr->lseg == NULL; |
833 | return data->verf.committed != NFS_FILE_SYNC; | 854 | return hdr->verf.committed != NFS_FILE_SYNC; |
834 | } | 855 | } |
835 | 856 | ||
836 | #else | 857 | #else |
@@ -856,8 +877,7 @@ nfs_clear_request_commit(struct nfs_page *req) | |||
856 | { | 877 | { |
857 | } | 878 | } |
858 | 879 | ||
859 | static inline | 880 | int nfs_write_need_commit(struct nfs_pgio_header *hdr) |
860 | int nfs_write_need_commit(struct nfs_pgio_data *data) | ||
861 | { | 881 | { |
862 | return 0; | 882 | return 0; |
863 | } | 883 | } |
@@ -883,11 +903,7 @@ static void nfs_write_completion(struct nfs_pgio_header *hdr) | |||
883 | nfs_context_set_write_error(req->wb_context, hdr->error); | 903 | nfs_context_set_write_error(req->wb_context, hdr->error); |
884 | goto remove_req; | 904 | goto remove_req; |
885 | } | 905 | } |
886 | if (test_bit(NFS_IOHDR_NEED_RESCHED, &hdr->flags)) { | 906 | if (nfs_write_need_commit(hdr)) { |
887 | nfs_mark_request_dirty(req); | ||
888 | goto next; | ||
889 | } | ||
890 | if (test_bit(NFS_IOHDR_NEED_COMMIT, &hdr->flags)) { | ||
891 | memcpy(&req->wb_verf, &hdr->verf.verifier, sizeof(req->wb_verf)); | 907 | memcpy(&req->wb_verf, &hdr->verf.verifier, sizeof(req->wb_verf)); |
892 | nfs_mark_request_commit(req, hdr->lseg, &cinfo); | 908 | nfs_mark_request_commit(req, hdr->lseg, &cinfo); |
893 | goto next; | 909 | goto next; |
@@ -1038,9 +1054,9 @@ static struct nfs_page *nfs_try_to_update_request(struct inode *inode, | |||
1038 | else | 1054 | else |
1039 | req->wb_bytes = rqend - req->wb_offset; | 1055 | req->wb_bytes = rqend - req->wb_offset; |
1040 | out_unlock: | 1056 | out_unlock: |
1041 | spin_unlock(&inode->i_lock); | ||
1042 | if (req) | 1057 | if (req) |
1043 | nfs_clear_request_commit(req); | 1058 | nfs_clear_request_commit(req); |
1059 | spin_unlock(&inode->i_lock); | ||
1044 | return req; | 1060 | return req; |
1045 | out_flushme: | 1061 | out_flushme: |
1046 | spin_unlock(&inode->i_lock); | 1062 | spin_unlock(&inode->i_lock); |
@@ -1241,17 +1257,18 @@ static int flush_task_priority(int how) | |||
1241 | return RPC_PRIORITY_NORMAL; | 1257 | return RPC_PRIORITY_NORMAL; |
1242 | } | 1258 | } |
1243 | 1259 | ||
1244 | static void nfs_initiate_write(struct nfs_pgio_data *data, struct rpc_message *msg, | 1260 | static void nfs_initiate_write(struct nfs_pgio_header *hdr, |
1261 | struct rpc_message *msg, | ||
1245 | struct rpc_task_setup *task_setup_data, int how) | 1262 | struct rpc_task_setup *task_setup_data, int how) |
1246 | { | 1263 | { |
1247 | struct inode *inode = data->header->inode; | 1264 | struct inode *inode = hdr->inode; |
1248 | int priority = flush_task_priority(how); | 1265 | int priority = flush_task_priority(how); |
1249 | 1266 | ||
1250 | task_setup_data->priority = priority; | 1267 | task_setup_data->priority = priority; |
1251 | NFS_PROTO(inode)->write_setup(data, msg); | 1268 | NFS_PROTO(inode)->write_setup(hdr, msg); |
1252 | 1269 | ||
1253 | nfs4_state_protect_write(NFS_SERVER(inode)->nfs_client, | 1270 | nfs4_state_protect_write(NFS_SERVER(inode)->nfs_client, |
1254 | &task_setup_data->rpc_client, msg, data); | 1271 | &task_setup_data->rpc_client, msg, hdr); |
1255 | } | 1272 | } |
1256 | 1273 | ||
1257 | /* If a nfs_flush_* function fails, it should remove reqs from @head and | 1274 | /* If a nfs_flush_* function fails, it should remove reqs from @head and |
@@ -1313,21 +1330,9 @@ void nfs_commit_prepare(struct rpc_task *task, void *calldata) | |||
1313 | NFS_PROTO(data->inode)->commit_rpc_prepare(task, data); | 1330 | NFS_PROTO(data->inode)->commit_rpc_prepare(task, data); |
1314 | } | 1331 | } |
1315 | 1332 | ||
1316 | static void nfs_writeback_release_common(struct nfs_pgio_data *data) | 1333 | static void nfs_writeback_release_common(struct nfs_pgio_header *hdr) |
1317 | { | 1334 | { |
1318 | struct nfs_pgio_header *hdr = data->header; | 1335 | /* do nothing! */ |
1319 | int status = data->task.tk_status; | ||
1320 | |||
1321 | if ((status >= 0) && nfs_write_need_commit(data)) { | ||
1322 | spin_lock(&hdr->lock); | ||
1323 | if (test_bit(NFS_IOHDR_NEED_RESCHED, &hdr->flags)) | ||
1324 | ; /* Do nothing */ | ||
1325 | else if (!test_and_set_bit(NFS_IOHDR_NEED_COMMIT, &hdr->flags)) | ||
1326 | memcpy(&hdr->verf, &data->verf, sizeof(hdr->verf)); | ||
1327 | else if (memcmp(&hdr->verf, &data->verf, sizeof(hdr->verf))) | ||
1328 | set_bit(NFS_IOHDR_NEED_RESCHED, &hdr->flags); | ||
1329 | spin_unlock(&hdr->lock); | ||
1330 | } | ||
1331 | } | 1336 | } |
1332 | 1337 | ||
1333 | /* | 1338 | /* |
@@ -1358,7 +1363,8 @@ static int nfs_should_remove_suid(const struct inode *inode) | |||
1358 | /* | 1363 | /* |
1359 | * This function is called when the WRITE call is complete. | 1364 | * This function is called when the WRITE call is complete. |
1360 | */ | 1365 | */ |
1361 | static int nfs_writeback_done(struct rpc_task *task, struct nfs_pgio_data *data, | 1366 | static int nfs_writeback_done(struct rpc_task *task, |
1367 | struct nfs_pgio_header *hdr, | ||
1362 | struct inode *inode) | 1368 | struct inode *inode) |
1363 | { | 1369 | { |
1364 | int status; | 1370 | int status; |
@@ -1370,13 +1376,14 @@ static int nfs_writeback_done(struct rpc_task *task, struct nfs_pgio_data *data, | |||
1370 | * another writer had changed the file, but some applications | 1376 | * another writer had changed the file, but some applications |
1371 | * depend on tighter cache coherency when writing. | 1377 | * depend on tighter cache coherency when writing. |
1372 | */ | 1378 | */ |
1373 | status = NFS_PROTO(inode)->write_done(task, data); | 1379 | status = NFS_PROTO(inode)->write_done(task, hdr); |
1374 | if (status != 0) | 1380 | if (status != 0) |
1375 | return status; | 1381 | return status; |
1376 | nfs_add_stats(inode, NFSIOS_SERVERWRITTENBYTES, data->res.count); | 1382 | nfs_add_stats(inode, NFSIOS_SERVERWRITTENBYTES, hdr->res.count); |
1377 | 1383 | ||
1378 | #if IS_ENABLED(CONFIG_NFS_V3) || IS_ENABLED(CONFIG_NFS_V4) | 1384 | #if IS_ENABLED(CONFIG_NFS_V3) || IS_ENABLED(CONFIG_NFS_V4) |
1379 | if (data->res.verf->committed < data->args.stable && task->tk_status >= 0) { | 1385 | if (hdr->res.verf->committed < hdr->args.stable && |
1386 | task->tk_status >= 0) { | ||
1380 | /* We tried a write call, but the server did not | 1387 | /* We tried a write call, but the server did not |
1381 | * commit data to stable storage even though we | 1388 | * commit data to stable storage even though we |
1382 | * requested it. | 1389 | * requested it. |
@@ -1392,7 +1399,7 @@ static int nfs_writeback_done(struct rpc_task *task, struct nfs_pgio_data *data, | |||
1392 | dprintk("NFS: faulty NFS server %s:" | 1399 | dprintk("NFS: faulty NFS server %s:" |
1393 | " (committed = %d) != (stable = %d)\n", | 1400 | " (committed = %d) != (stable = %d)\n", |
1394 | NFS_SERVER(inode)->nfs_client->cl_hostname, | 1401 | NFS_SERVER(inode)->nfs_client->cl_hostname, |
1395 | data->res.verf->committed, data->args.stable); | 1402 | hdr->res.verf->committed, hdr->args.stable); |
1396 | complain = jiffies + 300 * HZ; | 1403 | complain = jiffies + 300 * HZ; |
1397 | } | 1404 | } |
1398 | } | 1405 | } |
@@ -1407,16 +1414,17 @@ static int nfs_writeback_done(struct rpc_task *task, struct nfs_pgio_data *data, | |||
1407 | /* | 1414 | /* |
1408 | * This function is called when the WRITE call is complete. | 1415 | * This function is called when the WRITE call is complete. |
1409 | */ | 1416 | */ |
1410 | static void nfs_writeback_result(struct rpc_task *task, struct nfs_pgio_data *data) | 1417 | static void nfs_writeback_result(struct rpc_task *task, |
1418 | struct nfs_pgio_header *hdr) | ||
1411 | { | 1419 | { |
1412 | struct nfs_pgio_args *argp = &data->args; | 1420 | struct nfs_pgio_args *argp = &hdr->args; |
1413 | struct nfs_pgio_res *resp = &data->res; | 1421 | struct nfs_pgio_res *resp = &hdr->res; |
1414 | 1422 | ||
1415 | if (resp->count < argp->count) { | 1423 | if (resp->count < argp->count) { |
1416 | static unsigned long complain; | 1424 | static unsigned long complain; |
1417 | 1425 | ||
1418 | /* This a short write! */ | 1426 | /* This a short write! */ |
1419 | nfs_inc_stats(data->header->inode, NFSIOS_SHORTWRITE); | 1427 | nfs_inc_stats(hdr->inode, NFSIOS_SHORTWRITE); |
1420 | 1428 | ||
1421 | /* Has the server at least made some progress? */ | 1429 | /* Has the server at least made some progress? */ |
1422 | if (resp->count == 0) { | 1430 | if (resp->count == 0) { |
@@ -1426,14 +1434,14 @@ static void nfs_writeback_result(struct rpc_task *task, struct nfs_pgio_data *da | |||
1426 | argp->count); | 1434 | argp->count); |
1427 | complain = jiffies + 300 * HZ; | 1435 | complain = jiffies + 300 * HZ; |
1428 | } | 1436 | } |
1429 | nfs_set_pgio_error(data->header, -EIO, argp->offset); | 1437 | nfs_set_pgio_error(hdr, -EIO, argp->offset); |
1430 | task->tk_status = -EIO; | 1438 | task->tk_status = -EIO; |
1431 | return; | 1439 | return; |
1432 | } | 1440 | } |
1433 | /* Was this an NFSv2 write or an NFSv3 stable write? */ | 1441 | /* Was this an NFSv2 write or an NFSv3 stable write? */ |
1434 | if (resp->verf->committed != NFS_UNSTABLE) { | 1442 | if (resp->verf->committed != NFS_UNSTABLE) { |
1435 | /* Resend from where the server left off */ | 1443 | /* Resend from where the server left off */ |
1436 | data->mds_offset += resp->count; | 1444 | hdr->mds_offset += resp->count; |
1437 | argp->offset += resp->count; | 1445 | argp->offset += resp->count; |
1438 | argp->pgbase += resp->count; | 1446 | argp->pgbase += resp->count; |
1439 | argp->count -= resp->count; | 1447 | argp->count -= resp->count; |
@@ -1884,7 +1892,7 @@ int nfs_migrate_page(struct address_space *mapping, struct page *newpage, | |||
1884 | int __init nfs_init_writepagecache(void) | 1892 | int __init nfs_init_writepagecache(void) |
1885 | { | 1893 | { |
1886 | nfs_wdata_cachep = kmem_cache_create("nfs_write_data", | 1894 | nfs_wdata_cachep = kmem_cache_create("nfs_write_data", |
1887 | sizeof(struct nfs_rw_header), | 1895 | sizeof(struct nfs_pgio_header), |
1888 | 0, SLAB_HWCACHE_ALIGN, | 1896 | 0, SLAB_HWCACHE_ALIGN, |
1889 | NULL); | 1897 | NULL); |
1890 | if (nfs_wdata_cachep == NULL) | 1898 | if (nfs_wdata_cachep == NULL) |
diff --git a/fs/nfs_common/nfsacl.c b/fs/nfs_common/nfsacl.c index ed628f71274c..538f142935ea 100644 --- a/fs/nfs_common/nfsacl.c +++ b/fs/nfs_common/nfsacl.c | |||
@@ -30,9 +30,6 @@ | |||
30 | 30 | ||
31 | MODULE_LICENSE("GPL"); | 31 | MODULE_LICENSE("GPL"); |
32 | 32 | ||
33 | EXPORT_SYMBOL_GPL(nfsacl_encode); | ||
34 | EXPORT_SYMBOL_GPL(nfsacl_decode); | ||
35 | |||
36 | struct nfsacl_encode_desc { | 33 | struct nfsacl_encode_desc { |
37 | struct xdr_array2_desc desc; | 34 | struct xdr_array2_desc desc; |
38 | unsigned int count; | 35 | unsigned int count; |
@@ -136,6 +133,7 @@ int nfsacl_encode(struct xdr_buf *buf, unsigned int base, struct inode *inode, | |||
136 | nfsacl_desc.desc.array_len; | 133 | nfsacl_desc.desc.array_len; |
137 | return err; | 134 | return err; |
138 | } | 135 | } |
136 | EXPORT_SYMBOL_GPL(nfsacl_encode); | ||
139 | 137 | ||
140 | struct nfsacl_decode_desc { | 138 | struct nfsacl_decode_desc { |
141 | struct xdr_array2_desc desc; | 139 | struct xdr_array2_desc desc; |
@@ -295,3 +293,4 @@ int nfsacl_decode(struct xdr_buf *buf, unsigned int base, unsigned int *aclcnt, | |||
295 | return 8 + nfsacl_desc.desc.elem_size * | 293 | return 8 + nfsacl_desc.desc.elem_size * |
296 | nfsacl_desc.desc.array_len; | 294 | nfsacl_desc.desc.array_len; |
297 | } | 295 | } |
296 | EXPORT_SYMBOL_GPL(nfsacl_decode); | ||
diff --git a/fs/nilfs2/super.c b/fs/nilfs2/super.c index c519927b7b5e..228f5bdf0772 100644 --- a/fs/nilfs2/super.c +++ b/fs/nilfs2/super.c | |||
@@ -942,7 +942,7 @@ static int nilfs_get_root_dentry(struct super_block *sb, | |||
942 | iput(inode); | 942 | iput(inode); |
943 | } | 943 | } |
944 | } else { | 944 | } else { |
945 | dentry = d_obtain_alias(inode); | 945 | dentry = d_obtain_root(inode); |
946 | if (IS_ERR(dentry)) { | 946 | if (IS_ERR(dentry)) { |
947 | ret = PTR_ERR(dentry); | 947 | ret = PTR_ERR(dentry); |
948 | goto failed_dentry; | 948 | goto failed_dentry; |
diff --git a/fs/quota/dquot.c b/fs/quota/dquot.c index 7f30bdc57d13..f2d0eee9d1f1 100644 --- a/fs/quota/dquot.c +++ b/fs/quota/dquot.c | |||
@@ -96,13 +96,16 @@ | |||
96 | * Note that some things (eg. sb pointer, type, id) doesn't change during | 96 | * Note that some things (eg. sb pointer, type, id) doesn't change during |
97 | * the life of the dquot structure and so needn't to be protected by a lock | 97 | * the life of the dquot structure and so needn't to be protected by a lock |
98 | * | 98 | * |
99 | * Any operation working on dquots via inode pointers must hold dqptr_sem. If | 99 | * Operation accessing dquots via inode pointers are protected by dquot_srcu. |
100 | * operation is just reading pointers from inode (or not using them at all) the | 100 | * Operation of reading pointer needs srcu_read_lock(&dquot_srcu), and |
101 | * read lock is enough. If pointers are altered function must hold write lock. | 101 | * synchronize_srcu(&dquot_srcu) is called after clearing pointers from |
102 | * inode and before dropping dquot references to avoid use of dquots after | ||
103 | * they are freed. dq_data_lock is used to serialize the pointer setting and | ||
104 | * clearing operations. | ||
102 | * Special care needs to be taken about S_NOQUOTA inode flag (marking that | 105 | * Special care needs to be taken about S_NOQUOTA inode flag (marking that |
103 | * inode is a quota file). Functions adding pointers from inode to dquots have | 106 | * inode is a quota file). Functions adding pointers from inode to dquots have |
104 | * to check this flag under dqptr_sem and then (if S_NOQUOTA is not set) they | 107 | * to check this flag under dq_data_lock and then (if S_NOQUOTA is not set) they |
105 | * have to do all pointer modifications before dropping dqptr_sem. This makes | 108 | * have to do all pointer modifications before dropping dq_data_lock. This makes |
106 | * sure they cannot race with quotaon which first sets S_NOQUOTA flag and | 109 | * sure they cannot race with quotaon which first sets S_NOQUOTA flag and |
107 | * then drops all pointers to dquots from an inode. | 110 | * then drops all pointers to dquots from an inode. |
108 | * | 111 | * |
@@ -116,21 +119,15 @@ | |||
116 | * spinlock to internal buffers before writing. | 119 | * spinlock to internal buffers before writing. |
117 | * | 120 | * |
118 | * Lock ordering (including related VFS locks) is the following: | 121 | * Lock ordering (including related VFS locks) is the following: |
119 | * dqonoff_mutex > i_mutex > journal_lock > dqptr_sem > dquot->dq_lock > | 122 | * dqonoff_mutex > i_mutex > journal_lock > dquot->dq_lock > dqio_mutex |
120 | * dqio_mutex | ||
121 | * dqonoff_mutex > i_mutex comes from dquot_quota_sync, dquot_enable, etc. | 123 | * dqonoff_mutex > i_mutex comes from dquot_quota_sync, dquot_enable, etc. |
122 | * The lock ordering of dqptr_sem imposed by quota code is only dqonoff_sem > | ||
123 | * dqptr_sem. But filesystem has to count with the fact that functions such as | ||
124 | * dquot_alloc_space() acquire dqptr_sem and they usually have to be called | ||
125 | * from inside a transaction to keep filesystem consistency after a crash. Also | ||
126 | * filesystems usually want to do some IO on dquot from ->mark_dirty which is | ||
127 | * called with dqptr_sem held. | ||
128 | */ | 124 | */ |
129 | 125 | ||
130 | static __cacheline_aligned_in_smp DEFINE_SPINLOCK(dq_list_lock); | 126 | static __cacheline_aligned_in_smp DEFINE_SPINLOCK(dq_list_lock); |
131 | static __cacheline_aligned_in_smp DEFINE_SPINLOCK(dq_state_lock); | 127 | static __cacheline_aligned_in_smp DEFINE_SPINLOCK(dq_state_lock); |
132 | __cacheline_aligned_in_smp DEFINE_SPINLOCK(dq_data_lock); | 128 | __cacheline_aligned_in_smp DEFINE_SPINLOCK(dq_data_lock); |
133 | EXPORT_SYMBOL(dq_data_lock); | 129 | EXPORT_SYMBOL(dq_data_lock); |
130 | DEFINE_STATIC_SRCU(dquot_srcu); | ||
134 | 131 | ||
135 | void __quota_error(struct super_block *sb, const char *func, | 132 | void __quota_error(struct super_block *sb, const char *func, |
136 | const char *fmt, ...) | 133 | const char *fmt, ...) |
@@ -733,7 +730,6 @@ static struct shrinker dqcache_shrinker = { | |||
733 | 730 | ||
734 | /* | 731 | /* |
735 | * Put reference to dquot | 732 | * Put reference to dquot |
736 | * NOTE: If you change this function please check whether dqput_blocks() works right... | ||
737 | */ | 733 | */ |
738 | void dqput(struct dquot *dquot) | 734 | void dqput(struct dquot *dquot) |
739 | { | 735 | { |
@@ -963,46 +959,33 @@ static void add_dquot_ref(struct super_block *sb, int type) | |||
963 | } | 959 | } |
964 | 960 | ||
965 | /* | 961 | /* |
966 | * Return 0 if dqput() won't block. | ||
967 | * (note that 1 doesn't necessarily mean blocking) | ||
968 | */ | ||
969 | static inline int dqput_blocks(struct dquot *dquot) | ||
970 | { | ||
971 | if (atomic_read(&dquot->dq_count) <= 1) | ||
972 | return 1; | ||
973 | return 0; | ||
974 | } | ||
975 | |||
976 | /* | ||
977 | * Remove references to dquots from inode and add dquot to list for freeing | 962 | * Remove references to dquots from inode and add dquot to list for freeing |
978 | * if we have the last reference to dquot | 963 | * if we have the last reference to dquot |
979 | * We can't race with anybody because we hold dqptr_sem for writing... | ||
980 | */ | 964 | */ |
981 | static int remove_inode_dquot_ref(struct inode *inode, int type, | 965 | static void remove_inode_dquot_ref(struct inode *inode, int type, |
982 | struct list_head *tofree_head) | 966 | struct list_head *tofree_head) |
983 | { | 967 | { |
984 | struct dquot *dquot = inode->i_dquot[type]; | 968 | struct dquot *dquot = inode->i_dquot[type]; |
985 | 969 | ||
986 | inode->i_dquot[type] = NULL; | 970 | inode->i_dquot[type] = NULL; |
987 | if (dquot) { | 971 | if (!dquot) |
988 | if (dqput_blocks(dquot)) { | 972 | return; |
989 | #ifdef CONFIG_QUOTA_DEBUG | 973 | |
990 | if (atomic_read(&dquot->dq_count) != 1) | 974 | if (list_empty(&dquot->dq_free)) { |
991 | quota_error(inode->i_sb, "Adding dquot with " | 975 | /* |
992 | "dq_count %d to dispose list", | 976 | * The inode still has reference to dquot so it can't be in the |
993 | atomic_read(&dquot->dq_count)); | 977 | * free list |
994 | #endif | 978 | */ |
995 | spin_lock(&dq_list_lock); | 979 | spin_lock(&dq_list_lock); |
996 | /* As dquot must have currently users it can't be on | 980 | list_add(&dquot->dq_free, tofree_head); |
997 | * the free list... */ | 981 | spin_unlock(&dq_list_lock); |
998 | list_add(&dquot->dq_free, tofree_head); | 982 | } else { |
999 | spin_unlock(&dq_list_lock); | 983 | /* |
1000 | return 1; | 984 | * Dquot is already in a list to put so we won't drop the last |
1001 | } | 985 | * reference here. |
1002 | else | 986 | */ |
1003 | dqput(dquot); /* We have guaranteed we won't block */ | 987 | dqput(dquot); |
1004 | } | 988 | } |
1005 | return 0; | ||
1006 | } | 989 | } |
1007 | 990 | ||
1008 | /* | 991 | /* |
@@ -1037,13 +1020,15 @@ static void remove_dquot_ref(struct super_block *sb, int type, | |||
1037 | * We have to scan also I_NEW inodes because they can already | 1020 | * We have to scan also I_NEW inodes because they can already |
1038 | * have quota pointer initialized. Luckily, we need to touch | 1021 | * have quota pointer initialized. Luckily, we need to touch |
1039 | * only quota pointers and these have separate locking | 1022 | * only quota pointers and these have separate locking |
1040 | * (dqptr_sem). | 1023 | * (dq_data_lock). |
1041 | */ | 1024 | */ |
1025 | spin_lock(&dq_data_lock); | ||
1042 | if (!IS_NOQUOTA(inode)) { | 1026 | if (!IS_NOQUOTA(inode)) { |
1043 | if (unlikely(inode_get_rsv_space(inode) > 0)) | 1027 | if (unlikely(inode_get_rsv_space(inode) > 0)) |
1044 | reserved = 1; | 1028 | reserved = 1; |
1045 | remove_inode_dquot_ref(inode, type, tofree_head); | 1029 | remove_inode_dquot_ref(inode, type, tofree_head); |
1046 | } | 1030 | } |
1031 | spin_unlock(&dq_data_lock); | ||
1047 | } | 1032 | } |
1048 | spin_unlock(&inode_sb_list_lock); | 1033 | spin_unlock(&inode_sb_list_lock); |
1049 | #ifdef CONFIG_QUOTA_DEBUG | 1034 | #ifdef CONFIG_QUOTA_DEBUG |
@@ -1061,9 +1046,8 @@ static void drop_dquot_ref(struct super_block *sb, int type) | |||
1061 | LIST_HEAD(tofree_head); | 1046 | LIST_HEAD(tofree_head); |
1062 | 1047 | ||
1063 | if (sb->dq_op) { | 1048 | if (sb->dq_op) { |
1064 | down_write(&sb_dqopt(sb)->dqptr_sem); | ||
1065 | remove_dquot_ref(sb, type, &tofree_head); | 1049 | remove_dquot_ref(sb, type, &tofree_head); |
1066 | up_write(&sb_dqopt(sb)->dqptr_sem); | 1050 | synchronize_srcu(&dquot_srcu); |
1067 | put_dquot_list(&tofree_head); | 1051 | put_dquot_list(&tofree_head); |
1068 | } | 1052 | } |
1069 | } | 1053 | } |
@@ -1394,21 +1378,16 @@ static int dquot_active(const struct inode *inode) | |||
1394 | /* | 1378 | /* |
1395 | * Initialize quota pointers in inode | 1379 | * Initialize quota pointers in inode |
1396 | * | 1380 | * |
1397 | * We do things in a bit complicated way but by that we avoid calling | ||
1398 | * dqget() and thus filesystem callbacks under dqptr_sem. | ||
1399 | * | ||
1400 | * It is better to call this function outside of any transaction as it | 1381 | * It is better to call this function outside of any transaction as it |
1401 | * might need a lot of space in journal for dquot structure allocation. | 1382 | * might need a lot of space in journal for dquot structure allocation. |
1402 | */ | 1383 | */ |
1403 | static void __dquot_initialize(struct inode *inode, int type) | 1384 | static void __dquot_initialize(struct inode *inode, int type) |
1404 | { | 1385 | { |
1405 | int cnt; | 1386 | int cnt, init_needed = 0; |
1406 | struct dquot *got[MAXQUOTAS]; | 1387 | struct dquot *got[MAXQUOTAS]; |
1407 | struct super_block *sb = inode->i_sb; | 1388 | struct super_block *sb = inode->i_sb; |
1408 | qsize_t rsv; | 1389 | qsize_t rsv; |
1409 | 1390 | ||
1410 | /* First test before acquiring mutex - solves deadlocks when we | ||
1411 | * re-enter the quota code and are already holding the mutex */ | ||
1412 | if (!dquot_active(inode)) | 1391 | if (!dquot_active(inode)) |
1413 | return; | 1392 | return; |
1414 | 1393 | ||
@@ -1418,6 +1397,15 @@ static void __dquot_initialize(struct inode *inode, int type) | |||
1418 | got[cnt] = NULL; | 1397 | got[cnt] = NULL; |
1419 | if (type != -1 && cnt != type) | 1398 | if (type != -1 && cnt != type) |
1420 | continue; | 1399 | continue; |
1400 | /* | ||
1401 | * The i_dquot should have been initialized in most cases, | ||
1402 | * we check it without locking here to avoid unnecessary | ||
1403 | * dqget()/dqput() calls. | ||
1404 | */ | ||
1405 | if (inode->i_dquot[cnt]) | ||
1406 | continue; | ||
1407 | init_needed = 1; | ||
1408 | |||
1421 | switch (cnt) { | 1409 | switch (cnt) { |
1422 | case USRQUOTA: | 1410 | case USRQUOTA: |
1423 | qid = make_kqid_uid(inode->i_uid); | 1411 | qid = make_kqid_uid(inode->i_uid); |
@@ -1429,7 +1417,11 @@ static void __dquot_initialize(struct inode *inode, int type) | |||
1429 | got[cnt] = dqget(sb, qid); | 1417 | got[cnt] = dqget(sb, qid); |
1430 | } | 1418 | } |
1431 | 1419 | ||
1432 | down_write(&sb_dqopt(sb)->dqptr_sem); | 1420 | /* All required i_dquot has been initialized */ |
1421 | if (!init_needed) | ||
1422 | return; | ||
1423 | |||
1424 | spin_lock(&dq_data_lock); | ||
1433 | if (IS_NOQUOTA(inode)) | 1425 | if (IS_NOQUOTA(inode)) |
1434 | goto out_err; | 1426 | goto out_err; |
1435 | for (cnt = 0; cnt < MAXQUOTAS; cnt++) { | 1427 | for (cnt = 0; cnt < MAXQUOTAS; cnt++) { |
@@ -1449,15 +1441,12 @@ static void __dquot_initialize(struct inode *inode, int type) | |||
1449 | * did a write before quota was turned on | 1441 | * did a write before quota was turned on |
1450 | */ | 1442 | */ |
1451 | rsv = inode_get_rsv_space(inode); | 1443 | rsv = inode_get_rsv_space(inode); |
1452 | if (unlikely(rsv)) { | 1444 | if (unlikely(rsv)) |
1453 | spin_lock(&dq_data_lock); | ||
1454 | dquot_resv_space(inode->i_dquot[cnt], rsv); | 1445 | dquot_resv_space(inode->i_dquot[cnt], rsv); |
1455 | spin_unlock(&dq_data_lock); | ||
1456 | } | ||
1457 | } | 1446 | } |
1458 | } | 1447 | } |
1459 | out_err: | 1448 | out_err: |
1460 | up_write(&sb_dqopt(sb)->dqptr_sem); | 1449 | spin_unlock(&dq_data_lock); |
1461 | /* Drop unused references */ | 1450 | /* Drop unused references */ |
1462 | dqput_all(got); | 1451 | dqput_all(got); |
1463 | } | 1452 | } |
@@ -1469,19 +1458,24 @@ void dquot_initialize(struct inode *inode) | |||
1469 | EXPORT_SYMBOL(dquot_initialize); | 1458 | EXPORT_SYMBOL(dquot_initialize); |
1470 | 1459 | ||
1471 | /* | 1460 | /* |
1472 | * Release all quotas referenced by inode | 1461 | * Release all quotas referenced by inode. |
1462 | * | ||
1463 | * This function only be called on inode free or converting | ||
1464 | * a file to quota file, no other users for the i_dquot in | ||
1465 | * both cases, so we needn't call synchronize_srcu() after | ||
1466 | * clearing i_dquot. | ||
1473 | */ | 1467 | */ |
1474 | static void __dquot_drop(struct inode *inode) | 1468 | static void __dquot_drop(struct inode *inode) |
1475 | { | 1469 | { |
1476 | int cnt; | 1470 | int cnt; |
1477 | struct dquot *put[MAXQUOTAS]; | 1471 | struct dquot *put[MAXQUOTAS]; |
1478 | 1472 | ||
1479 | down_write(&sb_dqopt(inode->i_sb)->dqptr_sem); | 1473 | spin_lock(&dq_data_lock); |
1480 | for (cnt = 0; cnt < MAXQUOTAS; cnt++) { | 1474 | for (cnt = 0; cnt < MAXQUOTAS; cnt++) { |
1481 | put[cnt] = inode->i_dquot[cnt]; | 1475 | put[cnt] = inode->i_dquot[cnt]; |
1482 | inode->i_dquot[cnt] = NULL; | 1476 | inode->i_dquot[cnt] = NULL; |
1483 | } | 1477 | } |
1484 | up_write(&sb_dqopt(inode->i_sb)->dqptr_sem); | 1478 | spin_unlock(&dq_data_lock); |
1485 | dqput_all(put); | 1479 | dqput_all(put); |
1486 | } | 1480 | } |
1487 | 1481 | ||
@@ -1599,15 +1593,11 @@ static void inode_decr_space(struct inode *inode, qsize_t number, int reserve) | |||
1599 | */ | 1593 | */ |
1600 | int __dquot_alloc_space(struct inode *inode, qsize_t number, int flags) | 1594 | int __dquot_alloc_space(struct inode *inode, qsize_t number, int flags) |
1601 | { | 1595 | { |
1602 | int cnt, ret = 0; | 1596 | int cnt, ret = 0, index; |
1603 | struct dquot_warn warn[MAXQUOTAS]; | 1597 | struct dquot_warn warn[MAXQUOTAS]; |
1604 | struct dquot **dquots = inode->i_dquot; | 1598 | struct dquot **dquots = inode->i_dquot; |
1605 | int reserve = flags & DQUOT_SPACE_RESERVE; | 1599 | int reserve = flags & DQUOT_SPACE_RESERVE; |
1606 | 1600 | ||
1607 | /* | ||
1608 | * First test before acquiring mutex - solves deadlocks when we | ||
1609 | * re-enter the quota code and are already holding the mutex | ||
1610 | */ | ||
1611 | if (!dquot_active(inode)) { | 1601 | if (!dquot_active(inode)) { |
1612 | inode_incr_space(inode, number, reserve); | 1602 | inode_incr_space(inode, number, reserve); |
1613 | goto out; | 1603 | goto out; |
@@ -1616,7 +1606,7 @@ int __dquot_alloc_space(struct inode *inode, qsize_t number, int flags) | |||
1616 | for (cnt = 0; cnt < MAXQUOTAS; cnt++) | 1606 | for (cnt = 0; cnt < MAXQUOTAS; cnt++) |
1617 | warn[cnt].w_type = QUOTA_NL_NOWARN; | 1607 | warn[cnt].w_type = QUOTA_NL_NOWARN; |
1618 | 1608 | ||
1619 | down_read(&sb_dqopt(inode->i_sb)->dqptr_sem); | 1609 | index = srcu_read_lock(&dquot_srcu); |
1620 | spin_lock(&dq_data_lock); | 1610 | spin_lock(&dq_data_lock); |
1621 | for (cnt = 0; cnt < MAXQUOTAS; cnt++) { | 1611 | for (cnt = 0; cnt < MAXQUOTAS; cnt++) { |
1622 | if (!dquots[cnt]) | 1612 | if (!dquots[cnt]) |
@@ -1643,7 +1633,7 @@ int __dquot_alloc_space(struct inode *inode, qsize_t number, int flags) | |||
1643 | goto out_flush_warn; | 1633 | goto out_flush_warn; |
1644 | mark_all_dquot_dirty(dquots); | 1634 | mark_all_dquot_dirty(dquots); |
1645 | out_flush_warn: | 1635 | out_flush_warn: |
1646 | up_read(&sb_dqopt(inode->i_sb)->dqptr_sem); | 1636 | srcu_read_unlock(&dquot_srcu, index); |
1647 | flush_warnings(warn); | 1637 | flush_warnings(warn); |
1648 | out: | 1638 | out: |
1649 | return ret; | 1639 | return ret; |
@@ -1655,17 +1645,16 @@ EXPORT_SYMBOL(__dquot_alloc_space); | |||
1655 | */ | 1645 | */ |
1656 | int dquot_alloc_inode(const struct inode *inode) | 1646 | int dquot_alloc_inode(const struct inode *inode) |
1657 | { | 1647 | { |
1658 | int cnt, ret = 0; | 1648 | int cnt, ret = 0, index; |
1659 | struct dquot_warn warn[MAXQUOTAS]; | 1649 | struct dquot_warn warn[MAXQUOTAS]; |
1660 | struct dquot * const *dquots = inode->i_dquot; | 1650 | struct dquot * const *dquots = inode->i_dquot; |
1661 | 1651 | ||
1662 | /* First test before acquiring mutex - solves deadlocks when we | ||
1663 | * re-enter the quota code and are already holding the mutex */ | ||
1664 | if (!dquot_active(inode)) | 1652 | if (!dquot_active(inode)) |
1665 | return 0; | 1653 | return 0; |
1666 | for (cnt = 0; cnt < MAXQUOTAS; cnt++) | 1654 | for (cnt = 0; cnt < MAXQUOTAS; cnt++) |
1667 | warn[cnt].w_type = QUOTA_NL_NOWARN; | 1655 | warn[cnt].w_type = QUOTA_NL_NOWARN; |
1668 | down_read(&sb_dqopt(inode->i_sb)->dqptr_sem); | 1656 | |
1657 | index = srcu_read_lock(&dquot_srcu); | ||
1669 | spin_lock(&dq_data_lock); | 1658 | spin_lock(&dq_data_lock); |
1670 | for (cnt = 0; cnt < MAXQUOTAS; cnt++) { | 1659 | for (cnt = 0; cnt < MAXQUOTAS; cnt++) { |
1671 | if (!dquots[cnt]) | 1660 | if (!dquots[cnt]) |
@@ -1685,7 +1674,7 @@ warn_put_all: | |||
1685 | spin_unlock(&dq_data_lock); | 1674 | spin_unlock(&dq_data_lock); |
1686 | if (ret == 0) | 1675 | if (ret == 0) |
1687 | mark_all_dquot_dirty(dquots); | 1676 | mark_all_dquot_dirty(dquots); |
1688 | up_read(&sb_dqopt(inode->i_sb)->dqptr_sem); | 1677 | srcu_read_unlock(&dquot_srcu, index); |
1689 | flush_warnings(warn); | 1678 | flush_warnings(warn); |
1690 | return ret; | 1679 | return ret; |
1691 | } | 1680 | } |
@@ -1696,14 +1685,14 @@ EXPORT_SYMBOL(dquot_alloc_inode); | |||
1696 | */ | 1685 | */ |
1697 | int dquot_claim_space_nodirty(struct inode *inode, qsize_t number) | 1686 | int dquot_claim_space_nodirty(struct inode *inode, qsize_t number) |
1698 | { | 1687 | { |
1699 | int cnt; | 1688 | int cnt, index; |
1700 | 1689 | ||
1701 | if (!dquot_active(inode)) { | 1690 | if (!dquot_active(inode)) { |
1702 | inode_claim_rsv_space(inode, number); | 1691 | inode_claim_rsv_space(inode, number); |
1703 | return 0; | 1692 | return 0; |
1704 | } | 1693 | } |
1705 | 1694 | ||
1706 | down_read(&sb_dqopt(inode->i_sb)->dqptr_sem); | 1695 | index = srcu_read_lock(&dquot_srcu); |
1707 | spin_lock(&dq_data_lock); | 1696 | spin_lock(&dq_data_lock); |
1708 | /* Claim reserved quotas to allocated quotas */ | 1697 | /* Claim reserved quotas to allocated quotas */ |
1709 | for (cnt = 0; cnt < MAXQUOTAS; cnt++) { | 1698 | for (cnt = 0; cnt < MAXQUOTAS; cnt++) { |
@@ -1715,7 +1704,7 @@ int dquot_claim_space_nodirty(struct inode *inode, qsize_t number) | |||
1715 | inode_claim_rsv_space(inode, number); | 1704 | inode_claim_rsv_space(inode, number); |
1716 | spin_unlock(&dq_data_lock); | 1705 | spin_unlock(&dq_data_lock); |
1717 | mark_all_dquot_dirty(inode->i_dquot); | 1706 | mark_all_dquot_dirty(inode->i_dquot); |
1718 | up_read(&sb_dqopt(inode->i_sb)->dqptr_sem); | 1707 | srcu_read_unlock(&dquot_srcu, index); |
1719 | return 0; | 1708 | return 0; |
1720 | } | 1709 | } |
1721 | EXPORT_SYMBOL(dquot_claim_space_nodirty); | 1710 | EXPORT_SYMBOL(dquot_claim_space_nodirty); |
@@ -1725,14 +1714,14 @@ EXPORT_SYMBOL(dquot_claim_space_nodirty); | |||
1725 | */ | 1714 | */ |
1726 | void dquot_reclaim_space_nodirty(struct inode *inode, qsize_t number) | 1715 | void dquot_reclaim_space_nodirty(struct inode *inode, qsize_t number) |
1727 | { | 1716 | { |
1728 | int cnt; | 1717 | int cnt, index; |
1729 | 1718 | ||
1730 | if (!dquot_active(inode)) { | 1719 | if (!dquot_active(inode)) { |
1731 | inode_reclaim_rsv_space(inode, number); | 1720 | inode_reclaim_rsv_space(inode, number); |
1732 | return; | 1721 | return; |
1733 | } | 1722 | } |
1734 | 1723 | ||
1735 | down_read(&sb_dqopt(inode->i_sb)->dqptr_sem); | 1724 | index = srcu_read_lock(&dquot_srcu); |
1736 | spin_lock(&dq_data_lock); | 1725 | spin_lock(&dq_data_lock); |
1737 | /* Claim reserved quotas to allocated quotas */ | 1726 | /* Claim reserved quotas to allocated quotas */ |
1738 | for (cnt = 0; cnt < MAXQUOTAS; cnt++) { | 1727 | for (cnt = 0; cnt < MAXQUOTAS; cnt++) { |
@@ -1744,7 +1733,7 @@ void dquot_reclaim_space_nodirty(struct inode *inode, qsize_t number) | |||
1744 | inode_reclaim_rsv_space(inode, number); | 1733 | inode_reclaim_rsv_space(inode, number); |
1745 | spin_unlock(&dq_data_lock); | 1734 | spin_unlock(&dq_data_lock); |
1746 | mark_all_dquot_dirty(inode->i_dquot); | 1735 | mark_all_dquot_dirty(inode->i_dquot); |
1747 | up_read(&sb_dqopt(inode->i_sb)->dqptr_sem); | 1736 | srcu_read_unlock(&dquot_srcu, index); |
1748 | return; | 1737 | return; |
1749 | } | 1738 | } |
1750 | EXPORT_SYMBOL(dquot_reclaim_space_nodirty); | 1739 | EXPORT_SYMBOL(dquot_reclaim_space_nodirty); |
@@ -1757,16 +1746,14 @@ void __dquot_free_space(struct inode *inode, qsize_t number, int flags) | |||
1757 | unsigned int cnt; | 1746 | unsigned int cnt; |
1758 | struct dquot_warn warn[MAXQUOTAS]; | 1747 | struct dquot_warn warn[MAXQUOTAS]; |
1759 | struct dquot **dquots = inode->i_dquot; | 1748 | struct dquot **dquots = inode->i_dquot; |
1760 | int reserve = flags & DQUOT_SPACE_RESERVE; | 1749 | int reserve = flags & DQUOT_SPACE_RESERVE, index; |
1761 | 1750 | ||
1762 | /* First test before acquiring mutex - solves deadlocks when we | ||
1763 | * re-enter the quota code and are already holding the mutex */ | ||
1764 | if (!dquot_active(inode)) { | 1751 | if (!dquot_active(inode)) { |
1765 | inode_decr_space(inode, number, reserve); | 1752 | inode_decr_space(inode, number, reserve); |
1766 | return; | 1753 | return; |
1767 | } | 1754 | } |
1768 | 1755 | ||
1769 | down_read(&sb_dqopt(inode->i_sb)->dqptr_sem); | 1756 | index = srcu_read_lock(&dquot_srcu); |
1770 | spin_lock(&dq_data_lock); | 1757 | spin_lock(&dq_data_lock); |
1771 | for (cnt = 0; cnt < MAXQUOTAS; cnt++) { | 1758 | for (cnt = 0; cnt < MAXQUOTAS; cnt++) { |
1772 | int wtype; | 1759 | int wtype; |
@@ -1789,7 +1776,7 @@ void __dquot_free_space(struct inode *inode, qsize_t number, int flags) | |||
1789 | goto out_unlock; | 1776 | goto out_unlock; |
1790 | mark_all_dquot_dirty(dquots); | 1777 | mark_all_dquot_dirty(dquots); |
1791 | out_unlock: | 1778 | out_unlock: |
1792 | up_read(&sb_dqopt(inode->i_sb)->dqptr_sem); | 1779 | srcu_read_unlock(&dquot_srcu, index); |
1793 | flush_warnings(warn); | 1780 | flush_warnings(warn); |
1794 | } | 1781 | } |
1795 | EXPORT_SYMBOL(__dquot_free_space); | 1782 | EXPORT_SYMBOL(__dquot_free_space); |
@@ -1802,13 +1789,12 @@ void dquot_free_inode(const struct inode *inode) | |||
1802 | unsigned int cnt; | 1789 | unsigned int cnt; |
1803 | struct dquot_warn warn[MAXQUOTAS]; | 1790 | struct dquot_warn warn[MAXQUOTAS]; |
1804 | struct dquot * const *dquots = inode->i_dquot; | 1791 | struct dquot * const *dquots = inode->i_dquot; |
1792 | int index; | ||
1805 | 1793 | ||
1806 | /* First test before acquiring mutex - solves deadlocks when we | ||
1807 | * re-enter the quota code and are already holding the mutex */ | ||
1808 | if (!dquot_active(inode)) | 1794 | if (!dquot_active(inode)) |
1809 | return; | 1795 | return; |
1810 | 1796 | ||
1811 | down_read(&sb_dqopt(inode->i_sb)->dqptr_sem); | 1797 | index = srcu_read_lock(&dquot_srcu); |
1812 | spin_lock(&dq_data_lock); | 1798 | spin_lock(&dq_data_lock); |
1813 | for (cnt = 0; cnt < MAXQUOTAS; cnt++) { | 1799 | for (cnt = 0; cnt < MAXQUOTAS; cnt++) { |
1814 | int wtype; | 1800 | int wtype; |
@@ -1823,7 +1809,7 @@ void dquot_free_inode(const struct inode *inode) | |||
1823 | } | 1809 | } |
1824 | spin_unlock(&dq_data_lock); | 1810 | spin_unlock(&dq_data_lock); |
1825 | mark_all_dquot_dirty(dquots); | 1811 | mark_all_dquot_dirty(dquots); |
1826 | up_read(&sb_dqopt(inode->i_sb)->dqptr_sem); | 1812 | srcu_read_unlock(&dquot_srcu, index); |
1827 | flush_warnings(warn); | 1813 | flush_warnings(warn); |
1828 | } | 1814 | } |
1829 | EXPORT_SYMBOL(dquot_free_inode); | 1815 | EXPORT_SYMBOL(dquot_free_inode); |
@@ -1837,6 +1823,8 @@ EXPORT_SYMBOL(dquot_free_inode); | |||
1837 | * This operation can block, but only after everything is updated | 1823 | * This operation can block, but only after everything is updated |
1838 | * A transaction must be started when entering this function. | 1824 | * A transaction must be started when entering this function. |
1839 | * | 1825 | * |
1826 | * We are holding reference on transfer_from & transfer_to, no need to | ||
1827 | * protect them by srcu_read_lock(). | ||
1840 | */ | 1828 | */ |
1841 | int __dquot_transfer(struct inode *inode, struct dquot **transfer_to) | 1829 | int __dquot_transfer(struct inode *inode, struct dquot **transfer_to) |
1842 | { | 1830 | { |
@@ -1849,8 +1837,6 @@ int __dquot_transfer(struct inode *inode, struct dquot **transfer_to) | |||
1849 | struct dquot_warn warn_from_inodes[MAXQUOTAS]; | 1837 | struct dquot_warn warn_from_inodes[MAXQUOTAS]; |
1850 | struct dquot_warn warn_from_space[MAXQUOTAS]; | 1838 | struct dquot_warn warn_from_space[MAXQUOTAS]; |
1851 | 1839 | ||
1852 | /* First test before acquiring mutex - solves deadlocks when we | ||
1853 | * re-enter the quota code and are already holding the mutex */ | ||
1854 | if (IS_NOQUOTA(inode)) | 1840 | if (IS_NOQUOTA(inode)) |
1855 | return 0; | 1841 | return 0; |
1856 | /* Initialize the arrays */ | 1842 | /* Initialize the arrays */ |
@@ -1859,12 +1845,12 @@ int __dquot_transfer(struct inode *inode, struct dquot **transfer_to) | |||
1859 | warn_from_inodes[cnt].w_type = QUOTA_NL_NOWARN; | 1845 | warn_from_inodes[cnt].w_type = QUOTA_NL_NOWARN; |
1860 | warn_from_space[cnt].w_type = QUOTA_NL_NOWARN; | 1846 | warn_from_space[cnt].w_type = QUOTA_NL_NOWARN; |
1861 | } | 1847 | } |
1862 | down_write(&sb_dqopt(inode->i_sb)->dqptr_sem); | 1848 | |
1849 | spin_lock(&dq_data_lock); | ||
1863 | if (IS_NOQUOTA(inode)) { /* File without quota accounting? */ | 1850 | if (IS_NOQUOTA(inode)) { /* File without quota accounting? */ |
1864 | up_write(&sb_dqopt(inode->i_sb)->dqptr_sem); | 1851 | spin_unlock(&dq_data_lock); |
1865 | return 0; | 1852 | return 0; |
1866 | } | 1853 | } |
1867 | spin_lock(&dq_data_lock); | ||
1868 | cur_space = inode_get_bytes(inode); | 1854 | cur_space = inode_get_bytes(inode); |
1869 | rsv_space = inode_get_rsv_space(inode); | 1855 | rsv_space = inode_get_rsv_space(inode); |
1870 | space = cur_space + rsv_space; | 1856 | space = cur_space + rsv_space; |
@@ -1918,7 +1904,6 @@ int __dquot_transfer(struct inode *inode, struct dquot **transfer_to) | |||
1918 | inode->i_dquot[cnt] = transfer_to[cnt]; | 1904 | inode->i_dquot[cnt] = transfer_to[cnt]; |
1919 | } | 1905 | } |
1920 | spin_unlock(&dq_data_lock); | 1906 | spin_unlock(&dq_data_lock); |
1921 | up_write(&sb_dqopt(inode->i_sb)->dqptr_sem); | ||
1922 | 1907 | ||
1923 | mark_all_dquot_dirty(transfer_from); | 1908 | mark_all_dquot_dirty(transfer_from); |
1924 | mark_all_dquot_dirty(transfer_to); | 1909 | mark_all_dquot_dirty(transfer_to); |
@@ -1932,7 +1917,6 @@ int __dquot_transfer(struct inode *inode, struct dquot **transfer_to) | |||
1932 | return 0; | 1917 | return 0; |
1933 | over_quota: | 1918 | over_quota: |
1934 | spin_unlock(&dq_data_lock); | 1919 | spin_unlock(&dq_data_lock); |
1935 | up_write(&sb_dqopt(inode->i_sb)->dqptr_sem); | ||
1936 | flush_warnings(warn_to); | 1920 | flush_warnings(warn_to); |
1937 | return ret; | 1921 | return ret; |
1938 | } | 1922 | } |
diff --git a/fs/quota/kqid.c b/fs/quota/kqid.c index 2f97b0e2c501..ebc5e6285800 100644 --- a/fs/quota/kqid.c +++ b/fs/quota/kqid.c | |||
@@ -55,7 +55,7 @@ EXPORT_SYMBOL(qid_lt); | |||
55 | /** | 55 | /** |
56 | * from_kqid - Create a qid from a kqid user-namespace pair. | 56 | * from_kqid - Create a qid from a kqid user-namespace pair. |
57 | * @targ: The user namespace we want a qid in. | 57 | * @targ: The user namespace we want a qid in. |
58 | * @kuid: The kernel internal quota identifier to start with. | 58 | * @kqid: The kernel internal quota identifier to start with. |
59 | * | 59 | * |
60 | * Map @kqid into the user-namespace specified by @targ and | 60 | * Map @kqid into the user-namespace specified by @targ and |
61 | * return the resulting qid. | 61 | * return the resulting qid. |
diff --git a/fs/quota/netlink.c b/fs/quota/netlink.c index 72d29177998e..bb2869f5dfd8 100644 --- a/fs/quota/netlink.c +++ b/fs/quota/netlink.c | |||
@@ -32,8 +32,7 @@ static struct genl_family quota_genl_family = { | |||
32 | 32 | ||
33 | /** | 33 | /** |
34 | * quota_send_warning - Send warning to userspace about exceeded quota | 34 | * quota_send_warning - Send warning to userspace about exceeded quota |
35 | * @type: The quota type: USRQQUOTA, GRPQUOTA,... | 35 | * @qid: The kernel internal quota identifier. |
36 | * @id: The user or group id of the quota that was exceeded | ||
37 | * @dev: The device on which the fs is mounted (sb->s_dev) | 36 | * @dev: The device on which the fs is mounted (sb->s_dev) |
38 | * @warntype: The type of the warning: QUOTA_NL_... | 37 | * @warntype: The type of the warning: QUOTA_NL_... |
39 | * | 38 | * |
diff --git a/fs/quota/quota.c b/fs/quota/quota.c index ff3f0b3cfdb3..75621649dbd7 100644 --- a/fs/quota/quota.c +++ b/fs/quota/quota.c | |||
@@ -79,13 +79,13 @@ static int quota_getfmt(struct super_block *sb, int type, void __user *addr) | |||
79 | { | 79 | { |
80 | __u32 fmt; | 80 | __u32 fmt; |
81 | 81 | ||
82 | down_read(&sb_dqopt(sb)->dqptr_sem); | 82 | mutex_lock(&sb_dqopt(sb)->dqonoff_mutex); |
83 | if (!sb_has_quota_active(sb, type)) { | 83 | if (!sb_has_quota_active(sb, type)) { |
84 | up_read(&sb_dqopt(sb)->dqptr_sem); | 84 | mutex_unlock(&sb_dqopt(sb)->dqonoff_mutex); |
85 | return -ESRCH; | 85 | return -ESRCH; |
86 | } | 86 | } |
87 | fmt = sb_dqopt(sb)->info[type].dqi_format->qf_fmt_id; | 87 | fmt = sb_dqopt(sb)->info[type].dqi_format->qf_fmt_id; |
88 | up_read(&sb_dqopt(sb)->dqptr_sem); | 88 | mutex_unlock(&sb_dqopt(sb)->dqonoff_mutex); |
89 | if (copy_to_user(addr, &fmt, sizeof(fmt))) | 89 | if (copy_to_user(addr, &fmt, sizeof(fmt))) |
90 | return -EFAULT; | 90 | return -EFAULT; |
91 | return 0; | 91 | return 0; |
diff --git a/fs/reiserfs/do_balan.c b/fs/reiserfs/do_balan.c index 5739cb99de7b..9c02d96d3a42 100644 --- a/fs/reiserfs/do_balan.c +++ b/fs/reiserfs/do_balan.c | |||
@@ -286,12 +286,14 @@ static int balance_leaf_when_delete(struct tree_balance *tb, int flag) | |||
286 | return 0; | 286 | return 0; |
287 | } | 287 | } |
288 | 288 | ||
289 | static void balance_leaf_insert_left(struct tree_balance *tb, | 289 | static unsigned int balance_leaf_insert_left(struct tree_balance *tb, |
290 | struct item_head *ih, const char *body) | 290 | struct item_head *const ih, |
291 | const char * const body) | ||
291 | { | 292 | { |
292 | int ret; | 293 | int ret; |
293 | struct buffer_info bi; | 294 | struct buffer_info bi; |
294 | int n = B_NR_ITEMS(tb->L[0]); | 295 | int n = B_NR_ITEMS(tb->L[0]); |
296 | unsigned body_shift_bytes = 0; | ||
295 | 297 | ||
296 | if (tb->item_pos == tb->lnum[0] - 1 && tb->lbytes != -1) { | 298 | if (tb->item_pos == tb->lnum[0] - 1 && tb->lbytes != -1) { |
297 | /* part of new item falls into L[0] */ | 299 | /* part of new item falls into L[0] */ |
@@ -329,7 +331,7 @@ static void balance_leaf_insert_left(struct tree_balance *tb, | |||
329 | 331 | ||
330 | put_ih_item_len(ih, new_item_len); | 332 | put_ih_item_len(ih, new_item_len); |
331 | if (tb->lbytes > tb->zeroes_num) { | 333 | if (tb->lbytes > tb->zeroes_num) { |
332 | body += (tb->lbytes - tb->zeroes_num); | 334 | body_shift_bytes = tb->lbytes - tb->zeroes_num; |
333 | tb->zeroes_num = 0; | 335 | tb->zeroes_num = 0; |
334 | } else | 336 | } else |
335 | tb->zeroes_num -= tb->lbytes; | 337 | tb->zeroes_num -= tb->lbytes; |
@@ -349,11 +351,12 @@ static void balance_leaf_insert_left(struct tree_balance *tb, | |||
349 | tb->insert_size[0] = 0; | 351 | tb->insert_size[0] = 0; |
350 | tb->zeroes_num = 0; | 352 | tb->zeroes_num = 0; |
351 | } | 353 | } |
354 | return body_shift_bytes; | ||
352 | } | 355 | } |
353 | 356 | ||
354 | static void balance_leaf_paste_left_shift_dirent(struct tree_balance *tb, | 357 | static void balance_leaf_paste_left_shift_dirent(struct tree_balance *tb, |
355 | struct item_head *ih, | 358 | struct item_head * const ih, |
356 | const char *body) | 359 | const char * const body) |
357 | { | 360 | { |
358 | int n = B_NR_ITEMS(tb->L[0]); | 361 | int n = B_NR_ITEMS(tb->L[0]); |
359 | struct buffer_info bi; | 362 | struct buffer_info bi; |
@@ -413,17 +416,18 @@ static void balance_leaf_paste_left_shift_dirent(struct tree_balance *tb, | |||
413 | tb->pos_in_item -= tb->lbytes; | 416 | tb->pos_in_item -= tb->lbytes; |
414 | } | 417 | } |
415 | 418 | ||
416 | static void balance_leaf_paste_left_shift(struct tree_balance *tb, | 419 | static unsigned int balance_leaf_paste_left_shift(struct tree_balance *tb, |
417 | struct item_head *ih, | 420 | struct item_head * const ih, |
418 | const char *body) | 421 | const char * const body) |
419 | { | 422 | { |
420 | struct buffer_head *tbS0 = PATH_PLAST_BUFFER(tb->tb_path); | 423 | struct buffer_head *tbS0 = PATH_PLAST_BUFFER(tb->tb_path); |
421 | int n = B_NR_ITEMS(tb->L[0]); | 424 | int n = B_NR_ITEMS(tb->L[0]); |
422 | struct buffer_info bi; | 425 | struct buffer_info bi; |
426 | int body_shift_bytes = 0; | ||
423 | 427 | ||
424 | if (is_direntry_le_ih(item_head(tbS0, tb->item_pos))) { | 428 | if (is_direntry_le_ih(item_head(tbS0, tb->item_pos))) { |
425 | balance_leaf_paste_left_shift_dirent(tb, ih, body); | 429 | balance_leaf_paste_left_shift_dirent(tb, ih, body); |
426 | return; | 430 | return 0; |
427 | } | 431 | } |
428 | 432 | ||
429 | RFALSE(tb->lbytes <= 0, | 433 | RFALSE(tb->lbytes <= 0, |
@@ -497,7 +501,7 @@ static void balance_leaf_paste_left_shift(struct tree_balance *tb, | |||
497 | * insert_size[0] | 501 | * insert_size[0] |
498 | */ | 502 | */ |
499 | if (l_n > tb->zeroes_num) { | 503 | if (l_n > tb->zeroes_num) { |
500 | body += (l_n - tb->zeroes_num); | 504 | body_shift_bytes = l_n - tb->zeroes_num; |
501 | tb->zeroes_num = 0; | 505 | tb->zeroes_num = 0; |
502 | } else | 506 | } else |
503 | tb->zeroes_num -= l_n; | 507 | tb->zeroes_num -= l_n; |
@@ -526,13 +530,14 @@ static void balance_leaf_paste_left_shift(struct tree_balance *tb, | |||
526 | */ | 530 | */ |
527 | leaf_shift_left(tb, tb->lnum[0], tb->lbytes); | 531 | leaf_shift_left(tb, tb->lnum[0], tb->lbytes); |
528 | } | 532 | } |
533 | return body_shift_bytes; | ||
529 | } | 534 | } |
530 | 535 | ||
531 | 536 | ||
532 | /* appended item will be in L[0] in whole */ | 537 | /* appended item will be in L[0] in whole */ |
533 | static void balance_leaf_paste_left_whole(struct tree_balance *tb, | 538 | static void balance_leaf_paste_left_whole(struct tree_balance *tb, |
534 | struct item_head *ih, | 539 | struct item_head * const ih, |
535 | const char *body) | 540 | const char * const body) |
536 | { | 541 | { |
537 | struct buffer_head *tbS0 = PATH_PLAST_BUFFER(tb->tb_path); | 542 | struct buffer_head *tbS0 = PATH_PLAST_BUFFER(tb->tb_path); |
538 | int n = B_NR_ITEMS(tb->L[0]); | 543 | int n = B_NR_ITEMS(tb->L[0]); |
@@ -584,39 +589,44 @@ static void balance_leaf_paste_left_whole(struct tree_balance *tb, | |||
584 | tb->zeroes_num = 0; | 589 | tb->zeroes_num = 0; |
585 | } | 590 | } |
586 | 591 | ||
587 | static void balance_leaf_paste_left(struct tree_balance *tb, | 592 | static unsigned int balance_leaf_paste_left(struct tree_balance *tb, |
588 | struct item_head *ih, const char *body) | 593 | struct item_head * const ih, |
594 | const char * const body) | ||
589 | { | 595 | { |
590 | /* we must shift the part of the appended item */ | 596 | /* we must shift the part of the appended item */ |
591 | if (tb->item_pos == tb->lnum[0] - 1 && tb->lbytes != -1) | 597 | if (tb->item_pos == tb->lnum[0] - 1 && tb->lbytes != -1) |
592 | balance_leaf_paste_left_shift(tb, ih, body); | 598 | return balance_leaf_paste_left_shift(tb, ih, body); |
593 | else | 599 | else |
594 | balance_leaf_paste_left_whole(tb, ih, body); | 600 | balance_leaf_paste_left_whole(tb, ih, body); |
601 | return 0; | ||
595 | } | 602 | } |
596 | 603 | ||
597 | /* Shift lnum[0] items from S[0] to the left neighbor L[0] */ | 604 | /* Shift lnum[0] items from S[0] to the left neighbor L[0] */ |
598 | static void balance_leaf_left(struct tree_balance *tb, struct item_head *ih, | 605 | static unsigned int balance_leaf_left(struct tree_balance *tb, |
599 | const char *body, int flag) | 606 | struct item_head * const ih, |
607 | const char * const body, int flag) | ||
600 | { | 608 | { |
601 | if (tb->lnum[0] <= 0) | 609 | if (tb->lnum[0] <= 0) |
602 | return; | 610 | return 0; |
603 | 611 | ||
604 | /* new item or it part falls to L[0], shift it too */ | 612 | /* new item or it part falls to L[0], shift it too */ |
605 | if (tb->item_pos < tb->lnum[0]) { | 613 | if (tb->item_pos < tb->lnum[0]) { |
606 | BUG_ON(flag != M_INSERT && flag != M_PASTE); | 614 | BUG_ON(flag != M_INSERT && flag != M_PASTE); |
607 | 615 | ||
608 | if (flag == M_INSERT) | 616 | if (flag == M_INSERT) |
609 | balance_leaf_insert_left(tb, ih, body); | 617 | return balance_leaf_insert_left(tb, ih, body); |
610 | else /* M_PASTE */ | 618 | else /* M_PASTE */ |
611 | balance_leaf_paste_left(tb, ih, body); | 619 | return balance_leaf_paste_left(tb, ih, body); |
612 | } else | 620 | } else |
613 | /* new item doesn't fall into L[0] */ | 621 | /* new item doesn't fall into L[0] */ |
614 | leaf_shift_left(tb, tb->lnum[0], tb->lbytes); | 622 | leaf_shift_left(tb, tb->lnum[0], tb->lbytes); |
623 | return 0; | ||
615 | } | 624 | } |
616 | 625 | ||
617 | 626 | ||
618 | static void balance_leaf_insert_right(struct tree_balance *tb, | 627 | static void balance_leaf_insert_right(struct tree_balance *tb, |
619 | struct item_head *ih, const char *body) | 628 | struct item_head * const ih, |
629 | const char * const body) | ||
620 | { | 630 | { |
621 | 631 | ||
622 | struct buffer_head *tbS0 = PATH_PLAST_BUFFER(tb->tb_path); | 632 | struct buffer_head *tbS0 = PATH_PLAST_BUFFER(tb->tb_path); |
@@ -704,7 +714,8 @@ static void balance_leaf_insert_right(struct tree_balance *tb, | |||
704 | 714 | ||
705 | 715 | ||
706 | static void balance_leaf_paste_right_shift_dirent(struct tree_balance *tb, | 716 | static void balance_leaf_paste_right_shift_dirent(struct tree_balance *tb, |
707 | struct item_head *ih, const char *body) | 717 | struct item_head * const ih, |
718 | const char * const body) | ||
708 | { | 719 | { |
709 | struct buffer_head *tbS0 = PATH_PLAST_BUFFER(tb->tb_path); | 720 | struct buffer_head *tbS0 = PATH_PLAST_BUFFER(tb->tb_path); |
710 | struct buffer_info bi; | 721 | struct buffer_info bi; |
@@ -754,7 +765,8 @@ static void balance_leaf_paste_right_shift_dirent(struct tree_balance *tb, | |||
754 | } | 765 | } |
755 | 766 | ||
756 | static void balance_leaf_paste_right_shift(struct tree_balance *tb, | 767 | static void balance_leaf_paste_right_shift(struct tree_balance *tb, |
757 | struct item_head *ih, const char *body) | 768 | struct item_head * const ih, |
769 | const char * const body) | ||
758 | { | 770 | { |
759 | struct buffer_head *tbS0 = PATH_PLAST_BUFFER(tb->tb_path); | 771 | struct buffer_head *tbS0 = PATH_PLAST_BUFFER(tb->tb_path); |
760 | int n_shift, n_rem, r_zeroes_number, version; | 772 | int n_shift, n_rem, r_zeroes_number, version; |
@@ -831,7 +843,8 @@ static void balance_leaf_paste_right_shift(struct tree_balance *tb, | |||
831 | } | 843 | } |
832 | 844 | ||
833 | static void balance_leaf_paste_right_whole(struct tree_balance *tb, | 845 | static void balance_leaf_paste_right_whole(struct tree_balance *tb, |
834 | struct item_head *ih, const char *body) | 846 | struct item_head * const ih, |
847 | const char * const body) | ||
835 | { | 848 | { |
836 | struct buffer_head *tbS0 = PATH_PLAST_BUFFER(tb->tb_path); | 849 | struct buffer_head *tbS0 = PATH_PLAST_BUFFER(tb->tb_path); |
837 | int n = B_NR_ITEMS(tbS0); | 850 | int n = B_NR_ITEMS(tbS0); |
@@ -874,7 +887,8 @@ static void balance_leaf_paste_right_whole(struct tree_balance *tb, | |||
874 | } | 887 | } |
875 | 888 | ||
876 | static void balance_leaf_paste_right(struct tree_balance *tb, | 889 | static void balance_leaf_paste_right(struct tree_balance *tb, |
877 | struct item_head *ih, const char *body) | 890 | struct item_head * const ih, |
891 | const char * const body) | ||
878 | { | 892 | { |
879 | struct buffer_head *tbS0 = PATH_PLAST_BUFFER(tb->tb_path); | 893 | struct buffer_head *tbS0 = PATH_PLAST_BUFFER(tb->tb_path); |
880 | int n = B_NR_ITEMS(tbS0); | 894 | int n = B_NR_ITEMS(tbS0); |
@@ -896,8 +910,9 @@ static void balance_leaf_paste_right(struct tree_balance *tb, | |||
896 | } | 910 | } |
897 | 911 | ||
898 | /* shift rnum[0] items from S[0] to the right neighbor R[0] */ | 912 | /* shift rnum[0] items from S[0] to the right neighbor R[0] */ |
899 | static void balance_leaf_right(struct tree_balance *tb, struct item_head *ih, | 913 | static void balance_leaf_right(struct tree_balance *tb, |
900 | const char *body, int flag) | 914 | struct item_head * const ih, |
915 | const char * const body, int flag) | ||
901 | { | 916 | { |
902 | if (tb->rnum[0] <= 0) | 917 | if (tb->rnum[0] <= 0) |
903 | return; | 918 | return; |
@@ -911,8 +926,8 @@ static void balance_leaf_right(struct tree_balance *tb, struct item_head *ih, | |||
911 | } | 926 | } |
912 | 927 | ||
913 | static void balance_leaf_new_nodes_insert(struct tree_balance *tb, | 928 | static void balance_leaf_new_nodes_insert(struct tree_balance *tb, |
914 | struct item_head *ih, | 929 | struct item_head * const ih, |
915 | const char *body, | 930 | const char * const body, |
916 | struct item_head *insert_key, | 931 | struct item_head *insert_key, |
917 | struct buffer_head **insert_ptr, | 932 | struct buffer_head **insert_ptr, |
918 | int i) | 933 | int i) |
@@ -1003,8 +1018,8 @@ static void balance_leaf_new_nodes_insert(struct tree_balance *tb, | |||
1003 | 1018 | ||
1004 | /* we append to directory item */ | 1019 | /* we append to directory item */ |
1005 | static void balance_leaf_new_nodes_paste_dirent(struct tree_balance *tb, | 1020 | static void balance_leaf_new_nodes_paste_dirent(struct tree_balance *tb, |
1006 | struct item_head *ih, | 1021 | struct item_head * const ih, |
1007 | const char *body, | 1022 | const char * const body, |
1008 | struct item_head *insert_key, | 1023 | struct item_head *insert_key, |
1009 | struct buffer_head **insert_ptr, | 1024 | struct buffer_head **insert_ptr, |
1010 | int i) | 1025 | int i) |
@@ -1058,8 +1073,8 @@ static void balance_leaf_new_nodes_paste_dirent(struct tree_balance *tb, | |||
1058 | } | 1073 | } |
1059 | 1074 | ||
1060 | static void balance_leaf_new_nodes_paste_shift(struct tree_balance *tb, | 1075 | static void balance_leaf_new_nodes_paste_shift(struct tree_balance *tb, |
1061 | struct item_head *ih, | 1076 | struct item_head * const ih, |
1062 | const char *body, | 1077 | const char * const body, |
1063 | struct item_head *insert_key, | 1078 | struct item_head *insert_key, |
1064 | struct buffer_head **insert_ptr, | 1079 | struct buffer_head **insert_ptr, |
1065 | int i) | 1080 | int i) |
@@ -1131,8 +1146,8 @@ static void balance_leaf_new_nodes_paste_shift(struct tree_balance *tb, | |||
1131 | } | 1146 | } |
1132 | 1147 | ||
1133 | static void balance_leaf_new_nodes_paste_whole(struct tree_balance *tb, | 1148 | static void balance_leaf_new_nodes_paste_whole(struct tree_balance *tb, |
1134 | struct item_head *ih, | 1149 | struct item_head * const ih, |
1135 | const char *body, | 1150 | const char * const body, |
1136 | struct item_head *insert_key, | 1151 | struct item_head *insert_key, |
1137 | struct buffer_head **insert_ptr, | 1152 | struct buffer_head **insert_ptr, |
1138 | int i) | 1153 | int i) |
@@ -1184,8 +1199,8 @@ static void balance_leaf_new_nodes_paste_whole(struct tree_balance *tb, | |||
1184 | 1199 | ||
1185 | } | 1200 | } |
1186 | static void balance_leaf_new_nodes_paste(struct tree_balance *tb, | 1201 | static void balance_leaf_new_nodes_paste(struct tree_balance *tb, |
1187 | struct item_head *ih, | 1202 | struct item_head * const ih, |
1188 | const char *body, | 1203 | const char * const body, |
1189 | struct item_head *insert_key, | 1204 | struct item_head *insert_key, |
1190 | struct buffer_head **insert_ptr, | 1205 | struct buffer_head **insert_ptr, |
1191 | int i) | 1206 | int i) |
@@ -1214,8 +1229,8 @@ static void balance_leaf_new_nodes_paste(struct tree_balance *tb, | |||
1214 | 1229 | ||
1215 | /* Fill new nodes that appear in place of S[0] */ | 1230 | /* Fill new nodes that appear in place of S[0] */ |
1216 | static void balance_leaf_new_nodes(struct tree_balance *tb, | 1231 | static void balance_leaf_new_nodes(struct tree_balance *tb, |
1217 | struct item_head *ih, | 1232 | struct item_head * const ih, |
1218 | const char *body, | 1233 | const char * const body, |
1219 | struct item_head *insert_key, | 1234 | struct item_head *insert_key, |
1220 | struct buffer_head **insert_ptr, | 1235 | struct buffer_head **insert_ptr, |
1221 | int flag) | 1236 | int flag) |
@@ -1254,8 +1269,8 @@ static void balance_leaf_new_nodes(struct tree_balance *tb, | |||
1254 | } | 1269 | } |
1255 | 1270 | ||
1256 | static void balance_leaf_finish_node_insert(struct tree_balance *tb, | 1271 | static void balance_leaf_finish_node_insert(struct tree_balance *tb, |
1257 | struct item_head *ih, | 1272 | struct item_head * const ih, |
1258 | const char *body) | 1273 | const char * const body) |
1259 | { | 1274 | { |
1260 | struct buffer_head *tbS0 = PATH_PLAST_BUFFER(tb->tb_path); | 1275 | struct buffer_head *tbS0 = PATH_PLAST_BUFFER(tb->tb_path); |
1261 | struct buffer_info bi; | 1276 | struct buffer_info bi; |
@@ -1271,8 +1286,8 @@ static void balance_leaf_finish_node_insert(struct tree_balance *tb, | |||
1271 | } | 1286 | } |
1272 | 1287 | ||
1273 | static void balance_leaf_finish_node_paste_dirent(struct tree_balance *tb, | 1288 | static void balance_leaf_finish_node_paste_dirent(struct tree_balance *tb, |
1274 | struct item_head *ih, | 1289 | struct item_head * const ih, |
1275 | const char *body) | 1290 | const char * const body) |
1276 | { | 1291 | { |
1277 | struct buffer_head *tbS0 = PATH_PLAST_BUFFER(tb->tb_path); | 1292 | struct buffer_head *tbS0 = PATH_PLAST_BUFFER(tb->tb_path); |
1278 | struct item_head *pasted = item_head(tbS0, tb->item_pos); | 1293 | struct item_head *pasted = item_head(tbS0, tb->item_pos); |
@@ -1305,8 +1320,8 @@ static void balance_leaf_finish_node_paste_dirent(struct tree_balance *tb, | |||
1305 | } | 1320 | } |
1306 | 1321 | ||
1307 | static void balance_leaf_finish_node_paste(struct tree_balance *tb, | 1322 | static void balance_leaf_finish_node_paste(struct tree_balance *tb, |
1308 | struct item_head *ih, | 1323 | struct item_head * const ih, |
1309 | const char *body) | 1324 | const char * const body) |
1310 | { | 1325 | { |
1311 | struct buffer_head *tbS0 = PATH_PLAST_BUFFER(tb->tb_path); | 1326 | struct buffer_head *tbS0 = PATH_PLAST_BUFFER(tb->tb_path); |
1312 | struct buffer_info bi; | 1327 | struct buffer_info bi; |
@@ -1349,8 +1364,8 @@ static void balance_leaf_finish_node_paste(struct tree_balance *tb, | |||
1349 | * of the affected item which remains in S | 1364 | * of the affected item which remains in S |
1350 | */ | 1365 | */ |
1351 | static void balance_leaf_finish_node(struct tree_balance *tb, | 1366 | static void balance_leaf_finish_node(struct tree_balance *tb, |
1352 | struct item_head *ih, | 1367 | struct item_head * const ih, |
1353 | const char *body, int flag) | 1368 | const char * const body, int flag) |
1354 | { | 1369 | { |
1355 | /* if we must insert or append into buffer S[0] */ | 1370 | /* if we must insert or append into buffer S[0] */ |
1356 | if (0 <= tb->item_pos && tb->item_pos < tb->s0num) { | 1371 | if (0 <= tb->item_pos && tb->item_pos < tb->s0num) { |
@@ -1402,7 +1417,7 @@ static int balance_leaf(struct tree_balance *tb, struct item_head *ih, | |||
1402 | && is_indirect_le_ih(item_head(tbS0, tb->item_pos))) | 1417 | && is_indirect_le_ih(item_head(tbS0, tb->item_pos))) |
1403 | tb->pos_in_item *= UNFM_P_SIZE; | 1418 | tb->pos_in_item *= UNFM_P_SIZE; |
1404 | 1419 | ||
1405 | balance_leaf_left(tb, ih, body, flag); | 1420 | body += balance_leaf_left(tb, ih, body, flag); |
1406 | 1421 | ||
1407 | /* tb->lnum[0] > 0 */ | 1422 | /* tb->lnum[0] > 0 */ |
1408 | /* Calculate new item position */ | 1423 | /* Calculate new item position */ |
diff --git a/fs/reiserfs/journal.c b/fs/reiserfs/journal.c index e8870de4627e..a88b1b3e7db3 100644 --- a/fs/reiserfs/journal.c +++ b/fs/reiserfs/journal.c | |||
@@ -1947,8 +1947,6 @@ static int do_journal_release(struct reiserfs_transaction_handle *th, | |||
1947 | } | 1947 | } |
1948 | } | 1948 | } |
1949 | 1949 | ||
1950 | /* wait for all commits to finish */ | ||
1951 | cancel_delayed_work(&SB_JOURNAL(sb)->j_work); | ||
1952 | 1950 | ||
1953 | /* | 1951 | /* |
1954 | * We must release the write lock here because | 1952 | * We must release the write lock here because |
@@ -1956,8 +1954,14 @@ static int do_journal_release(struct reiserfs_transaction_handle *th, | |||
1956 | */ | 1954 | */ |
1957 | reiserfs_write_unlock(sb); | 1955 | reiserfs_write_unlock(sb); |
1958 | 1956 | ||
1957 | /* | ||
1958 | * Cancel flushing of old commits. Note that neither of these works | ||
1959 | * will be requeued because superblock is being shutdown and doesn't | ||
1960 | * have MS_ACTIVE set. | ||
1961 | */ | ||
1959 | cancel_delayed_work_sync(&REISERFS_SB(sb)->old_work); | 1962 | cancel_delayed_work_sync(&REISERFS_SB(sb)->old_work); |
1960 | flush_workqueue(REISERFS_SB(sb)->commit_wq); | 1963 | /* wait for all commits to finish */ |
1964 | cancel_delayed_work_sync(&SB_JOURNAL(sb)->j_work); | ||
1961 | 1965 | ||
1962 | free_journal_ram(sb); | 1966 | free_journal_ram(sb); |
1963 | 1967 | ||
@@ -4292,9 +4296,15 @@ static int do_journal_end(struct reiserfs_transaction_handle *th, int flags) | |||
4292 | if (flush) { | 4296 | if (flush) { |
4293 | flush_commit_list(sb, jl, 1); | 4297 | flush_commit_list(sb, jl, 1); |
4294 | flush_journal_list(sb, jl, 1); | 4298 | flush_journal_list(sb, jl, 1); |
4295 | } else if (!(jl->j_state & LIST_COMMIT_PENDING)) | 4299 | } else if (!(jl->j_state & LIST_COMMIT_PENDING)) { |
4296 | queue_delayed_work(REISERFS_SB(sb)->commit_wq, | 4300 | /* |
4297 | &journal->j_work, HZ / 10); | 4301 | * Avoid queueing work when sb is being shut down. Transaction |
4302 | * will be flushed on journal shutdown. | ||
4303 | */ | ||
4304 | if (sb->s_flags & MS_ACTIVE) | ||
4305 | queue_delayed_work(REISERFS_SB(sb)->commit_wq, | ||
4306 | &journal->j_work, HZ / 10); | ||
4307 | } | ||
4298 | 4308 | ||
4299 | /* | 4309 | /* |
4300 | * if the next transaction has any chance of wrapping, flush | 4310 | * if the next transaction has any chance of wrapping, flush |
diff --git a/fs/reiserfs/lbalance.c b/fs/reiserfs/lbalance.c index 814dda3ec998..249594a821e0 100644 --- a/fs/reiserfs/lbalance.c +++ b/fs/reiserfs/lbalance.c | |||
@@ -899,8 +899,9 @@ void leaf_delete_items(struct buffer_info *cur_bi, int last_first, | |||
899 | 899 | ||
900 | /* insert item into the leaf node in position before */ | 900 | /* insert item into the leaf node in position before */ |
901 | void leaf_insert_into_buf(struct buffer_info *bi, int before, | 901 | void leaf_insert_into_buf(struct buffer_info *bi, int before, |
902 | struct item_head *inserted_item_ih, | 902 | struct item_head * const inserted_item_ih, |
903 | const char *inserted_item_body, int zeros_number) | 903 | const char * const inserted_item_body, |
904 | int zeros_number) | ||
904 | { | 905 | { |
905 | struct buffer_head *bh = bi->bi_bh; | 906 | struct buffer_head *bh = bi->bi_bh; |
906 | int nr, free_space; | 907 | int nr, free_space; |
diff --git a/fs/reiserfs/reiserfs.h b/fs/reiserfs/reiserfs.h index bf53888c7f59..735c2c2b4536 100644 --- a/fs/reiserfs/reiserfs.h +++ b/fs/reiserfs/reiserfs.h | |||
@@ -3216,11 +3216,12 @@ int leaf_shift_right(struct tree_balance *tb, int shift_num, int shift_bytes); | |||
3216 | void leaf_delete_items(struct buffer_info *cur_bi, int last_first, int first, | 3216 | void leaf_delete_items(struct buffer_info *cur_bi, int last_first, int first, |
3217 | int del_num, int del_bytes); | 3217 | int del_num, int del_bytes); |
3218 | void leaf_insert_into_buf(struct buffer_info *bi, int before, | 3218 | void leaf_insert_into_buf(struct buffer_info *bi, int before, |
3219 | struct item_head *inserted_item_ih, | 3219 | struct item_head * const inserted_item_ih, |
3220 | const char *inserted_item_body, int zeros_number); | 3220 | const char * const inserted_item_body, |
3221 | void leaf_paste_in_buffer(struct buffer_info *bi, int pasted_item_num, | ||
3222 | int pos_in_item, int paste_size, const char *body, | ||
3223 | int zeros_number); | 3221 | int zeros_number); |
3222 | void leaf_paste_in_buffer(struct buffer_info *bi, int pasted_item_num, | ||
3223 | int pos_in_item, int paste_size, | ||
3224 | const char * const body, int zeros_number); | ||
3224 | void leaf_cut_from_buffer(struct buffer_info *bi, int cut_item_num, | 3225 | void leaf_cut_from_buffer(struct buffer_info *bi, int cut_item_num, |
3225 | int pos_in_item, int cut_size); | 3226 | int pos_in_item, int cut_size); |
3226 | void leaf_paste_entries(struct buffer_info *bi, int item_num, int before, | 3227 | void leaf_paste_entries(struct buffer_info *bi, int item_num, int before, |
diff --git a/fs/reiserfs/super.c b/fs/reiserfs/super.c index 709ea92d716f..d46e88a33b02 100644 --- a/fs/reiserfs/super.c +++ b/fs/reiserfs/super.c | |||
@@ -100,7 +100,11 @@ void reiserfs_schedule_old_flush(struct super_block *s) | |||
100 | struct reiserfs_sb_info *sbi = REISERFS_SB(s); | 100 | struct reiserfs_sb_info *sbi = REISERFS_SB(s); |
101 | unsigned long delay; | 101 | unsigned long delay; |
102 | 102 | ||
103 | if (s->s_flags & MS_RDONLY) | 103 | /* |
104 | * Avoid scheduling flush when sb is being shut down. It can race | ||
105 | * with journal shutdown and free still queued delayed work. | ||
106 | */ | ||
107 | if (s->s_flags & MS_RDONLY || !(s->s_flags & MS_ACTIVE)) | ||
104 | return; | 108 | return; |
105 | 109 | ||
106 | spin_lock(&sbi->old_work_lock); | 110 | spin_lock(&sbi->old_work_lock); |
diff --git a/fs/super.c b/fs/super.c index d20d5b11dedf..b9a214d2fe98 100644 --- a/fs/super.c +++ b/fs/super.c | |||
@@ -22,7 +22,6 @@ | |||
22 | 22 | ||
23 | #include <linux/export.h> | 23 | #include <linux/export.h> |
24 | #include <linux/slab.h> | 24 | #include <linux/slab.h> |
25 | #include <linux/acct.h> | ||
26 | #include <linux/blkdev.h> | 25 | #include <linux/blkdev.h> |
27 | #include <linux/mount.h> | 26 | #include <linux/mount.h> |
28 | #include <linux/security.h> | 27 | #include <linux/security.h> |
@@ -218,7 +217,6 @@ static struct super_block *alloc_super(struct file_system_type *type, int flags) | |||
218 | lockdep_set_class(&s->s_vfs_rename_mutex, &type->s_vfs_rename_key); | 217 | lockdep_set_class(&s->s_vfs_rename_mutex, &type->s_vfs_rename_key); |
219 | mutex_init(&s->s_dquot.dqio_mutex); | 218 | mutex_init(&s->s_dquot.dqio_mutex); |
220 | mutex_init(&s->s_dquot.dqonoff_mutex); | 219 | mutex_init(&s->s_dquot.dqonoff_mutex); |
221 | init_rwsem(&s->s_dquot.dqptr_sem); | ||
222 | s->s_maxbytes = MAX_NON_LFS; | 220 | s->s_maxbytes = MAX_NON_LFS; |
223 | s->s_op = &default_op; | 221 | s->s_op = &default_op; |
224 | s->s_time_gran = 1000000000; | 222 | s->s_time_gran = 1000000000; |
@@ -702,12 +700,22 @@ int do_remount_sb(struct super_block *sb, int flags, void *data, int force) | |||
702 | return -EACCES; | 700 | return -EACCES; |
703 | #endif | 701 | #endif |
704 | 702 | ||
705 | if (flags & MS_RDONLY) | ||
706 | acct_auto_close(sb); | ||
707 | shrink_dcache_sb(sb); | ||
708 | |||
709 | remount_ro = (flags & MS_RDONLY) && !(sb->s_flags & MS_RDONLY); | 703 | remount_ro = (flags & MS_RDONLY) && !(sb->s_flags & MS_RDONLY); |
710 | 704 | ||
705 | if (remount_ro) { | ||
706 | if (sb->s_pins.first) { | ||
707 | up_write(&sb->s_umount); | ||
708 | sb_pin_kill(sb); | ||
709 | down_write(&sb->s_umount); | ||
710 | if (!sb->s_root) | ||
711 | return 0; | ||
712 | if (sb->s_writers.frozen != SB_UNFROZEN) | ||
713 | return -EBUSY; | ||
714 | remount_ro = (flags & MS_RDONLY) && !(sb->s_flags & MS_RDONLY); | ||
715 | } | ||
716 | } | ||
717 | shrink_dcache_sb(sb); | ||
718 | |||
711 | /* If we are remounting RDONLY and current sb is read/write, | 719 | /* If we are remounting RDONLY and current sb is read/write, |
712 | make sure there are no rw files opened */ | 720 | make sure there are no rw files opened */ |
713 | if (remount_ro) { | 721 | if (remount_ro) { |
diff --git a/fs/ubifs/commit.c b/fs/ubifs/commit.c index ff8229340cd5..aa13ad053b14 100644 --- a/fs/ubifs/commit.c +++ b/fs/ubifs/commit.c | |||
@@ -174,7 +174,6 @@ static int do_commit(struct ubifs_info *c) | |||
174 | if (err) | 174 | if (err) |
175 | goto out; | 175 | goto out; |
176 | 176 | ||
177 | mutex_lock(&c->mst_mutex); | ||
178 | c->mst_node->cmt_no = cpu_to_le64(c->cmt_no); | 177 | c->mst_node->cmt_no = cpu_to_le64(c->cmt_no); |
179 | c->mst_node->log_lnum = cpu_to_le32(new_ltail_lnum); | 178 | c->mst_node->log_lnum = cpu_to_le32(new_ltail_lnum); |
180 | c->mst_node->root_lnum = cpu_to_le32(zroot.lnum); | 179 | c->mst_node->root_lnum = cpu_to_le32(zroot.lnum); |
@@ -204,7 +203,6 @@ static int do_commit(struct ubifs_info *c) | |||
204 | else | 203 | else |
205 | c->mst_node->flags &= ~cpu_to_le32(UBIFS_MST_NO_ORPHS); | 204 | c->mst_node->flags &= ~cpu_to_le32(UBIFS_MST_NO_ORPHS); |
206 | err = ubifs_write_master(c); | 205 | err = ubifs_write_master(c); |
207 | mutex_unlock(&c->mst_mutex); | ||
208 | if (err) | 206 | if (err) |
209 | goto out; | 207 | goto out; |
210 | 208 | ||
diff --git a/fs/ubifs/io.c b/fs/ubifs/io.c index 2290d5866725..fb08b0c514b6 100644 --- a/fs/ubifs/io.c +++ b/fs/ubifs/io.c | |||
@@ -431,7 +431,7 @@ void ubifs_prep_grp_node(struct ubifs_info *c, void *node, int len, int last) | |||
431 | 431 | ||
432 | /** | 432 | /** |
433 | * wbuf_timer_callback - write-buffer timer callback function. | 433 | * wbuf_timer_callback - write-buffer timer callback function. |
434 | * @data: timer data (write-buffer descriptor) | 434 | * @timer: timer data (write-buffer descriptor) |
435 | * | 435 | * |
436 | * This function is called when the write-buffer timer expires. | 436 | * This function is called when the write-buffer timer expires. |
437 | */ | 437 | */ |
diff --git a/fs/ubifs/log.c b/fs/ubifs/log.c index a902c5919e42..a47ddfc9be6b 100644 --- a/fs/ubifs/log.c +++ b/fs/ubifs/log.c | |||
@@ -240,6 +240,7 @@ int ubifs_add_bud_to_log(struct ubifs_info *c, int jhead, int lnum, int offs) | |||
240 | 240 | ||
241 | if (c->lhead_offs > c->leb_size - c->ref_node_alsz) { | 241 | if (c->lhead_offs > c->leb_size - c->ref_node_alsz) { |
242 | c->lhead_lnum = ubifs_next_log_lnum(c, c->lhead_lnum); | 242 | c->lhead_lnum = ubifs_next_log_lnum(c, c->lhead_lnum); |
243 | ubifs_assert(c->lhead_lnum != c->ltail_lnum); | ||
243 | c->lhead_offs = 0; | 244 | c->lhead_offs = 0; |
244 | } | 245 | } |
245 | 246 | ||
@@ -404,15 +405,14 @@ int ubifs_log_start_commit(struct ubifs_info *c, int *ltail_lnum) | |||
404 | /* Switch to the next log LEB */ | 405 | /* Switch to the next log LEB */ |
405 | if (c->lhead_offs) { | 406 | if (c->lhead_offs) { |
406 | c->lhead_lnum = ubifs_next_log_lnum(c, c->lhead_lnum); | 407 | c->lhead_lnum = ubifs_next_log_lnum(c, c->lhead_lnum); |
408 | ubifs_assert(c->lhead_lnum != c->ltail_lnum); | ||
407 | c->lhead_offs = 0; | 409 | c->lhead_offs = 0; |
408 | } | 410 | } |
409 | 411 | ||
410 | if (c->lhead_offs == 0) { | 412 | /* Must ensure next LEB has been unmapped */ |
411 | /* Must ensure next LEB has been unmapped */ | 413 | err = ubifs_leb_unmap(c, c->lhead_lnum); |
412 | err = ubifs_leb_unmap(c, c->lhead_lnum); | 414 | if (err) |
413 | if (err) | 415 | goto out; |
414 | goto out; | ||
415 | } | ||
416 | 416 | ||
417 | len = ALIGN(len, c->min_io_size); | 417 | len = ALIGN(len, c->min_io_size); |
418 | dbg_log("writing commit start at LEB %d:0, len %d", c->lhead_lnum, len); | 418 | dbg_log("writing commit start at LEB %d:0, len %d", c->lhead_lnum, len); |
diff --git a/fs/ubifs/lpt.c b/fs/ubifs/lpt.c index d46b19ec1815..421bd0a80424 100644 --- a/fs/ubifs/lpt.c +++ b/fs/ubifs/lpt.c | |||
@@ -1464,7 +1464,6 @@ struct ubifs_lprops *ubifs_lpt_lookup(struct ubifs_info *c, int lnum) | |||
1464 | return ERR_CAST(nnode); | 1464 | return ERR_CAST(nnode); |
1465 | } | 1465 | } |
1466 | iip = ((i >> shft) & (UBIFS_LPT_FANOUT - 1)); | 1466 | iip = ((i >> shft) & (UBIFS_LPT_FANOUT - 1)); |
1467 | shft -= UBIFS_LPT_FANOUT_SHIFT; | ||
1468 | pnode = ubifs_get_pnode(c, nnode, iip); | 1467 | pnode = ubifs_get_pnode(c, nnode, iip); |
1469 | if (IS_ERR(pnode)) | 1468 | if (IS_ERR(pnode)) |
1470 | return ERR_CAST(pnode); | 1469 | return ERR_CAST(pnode); |
@@ -1604,7 +1603,6 @@ struct ubifs_lprops *ubifs_lpt_lookup_dirty(struct ubifs_info *c, int lnum) | |||
1604 | return ERR_CAST(nnode); | 1603 | return ERR_CAST(nnode); |
1605 | } | 1604 | } |
1606 | iip = ((i >> shft) & (UBIFS_LPT_FANOUT - 1)); | 1605 | iip = ((i >> shft) & (UBIFS_LPT_FANOUT - 1)); |
1607 | shft -= UBIFS_LPT_FANOUT_SHIFT; | ||
1608 | pnode = ubifs_get_pnode(c, nnode, iip); | 1606 | pnode = ubifs_get_pnode(c, nnode, iip); |
1609 | if (IS_ERR(pnode)) | 1607 | if (IS_ERR(pnode)) |
1610 | return ERR_CAST(pnode); | 1608 | return ERR_CAST(pnode); |
@@ -1964,7 +1962,6 @@ again: | |||
1964 | } | 1962 | } |
1965 | } | 1963 | } |
1966 | iip = ((i >> shft) & (UBIFS_LPT_FANOUT - 1)); | 1964 | iip = ((i >> shft) & (UBIFS_LPT_FANOUT - 1)); |
1967 | shft -= UBIFS_LPT_FANOUT_SHIFT; | ||
1968 | pnode = scan_get_pnode(c, path + h, nnode, iip); | 1965 | pnode = scan_get_pnode(c, path + h, nnode, iip); |
1969 | if (IS_ERR(pnode)) { | 1966 | if (IS_ERR(pnode)) { |
1970 | err = PTR_ERR(pnode); | 1967 | err = PTR_ERR(pnode); |
@@ -2198,6 +2195,7 @@ static int dbg_chk_pnode(struct ubifs_info *c, struct ubifs_pnode *pnode, | |||
2198 | lprops->dirty); | 2195 | lprops->dirty); |
2199 | return -EINVAL; | 2196 | return -EINVAL; |
2200 | } | 2197 | } |
2198 | break; | ||
2201 | case LPROPS_FREEABLE: | 2199 | case LPROPS_FREEABLE: |
2202 | case LPROPS_FRDI_IDX: | 2200 | case LPROPS_FRDI_IDX: |
2203 | if (lprops->free + lprops->dirty != c->leb_size) { | 2201 | if (lprops->free + lprops->dirty != c->leb_size) { |
@@ -2206,6 +2204,7 @@ static int dbg_chk_pnode(struct ubifs_info *c, struct ubifs_pnode *pnode, | |||
2206 | lprops->dirty); | 2204 | lprops->dirty); |
2207 | return -EINVAL; | 2205 | return -EINVAL; |
2208 | } | 2206 | } |
2207 | break; | ||
2209 | } | 2208 | } |
2210 | } | 2209 | } |
2211 | return 0; | 2210 | return 0; |
diff --git a/fs/ubifs/lpt_commit.c b/fs/ubifs/lpt_commit.c index 45d4e96a6bac..d9c02928e992 100644 --- a/fs/ubifs/lpt_commit.c +++ b/fs/ubifs/lpt_commit.c | |||
@@ -304,7 +304,6 @@ static int layout_cnodes(struct ubifs_info *c) | |||
304 | ubifs_assert(lnum >= c->lpt_first && | 304 | ubifs_assert(lnum >= c->lpt_first && |
305 | lnum <= c->lpt_last); | 305 | lnum <= c->lpt_last); |
306 | } | 306 | } |
307 | done_ltab = 1; | ||
308 | c->ltab_lnum = lnum; | 307 | c->ltab_lnum = lnum; |
309 | c->ltab_offs = offs; | 308 | c->ltab_offs = offs; |
310 | offs += c->ltab_sz; | 309 | offs += c->ltab_sz; |
@@ -514,7 +513,6 @@ static int write_cnodes(struct ubifs_info *c) | |||
514 | if (err) | 513 | if (err) |
515 | return err; | 514 | return err; |
516 | } | 515 | } |
517 | done_ltab = 1; | ||
518 | ubifs_pack_ltab(c, buf + offs, c->ltab_cmt); | 516 | ubifs_pack_ltab(c, buf + offs, c->ltab_cmt); |
519 | offs += c->ltab_sz; | 517 | offs += c->ltab_sz; |
520 | dbg_chk_lpt_sz(c, 1, c->ltab_sz); | 518 | dbg_chk_lpt_sz(c, 1, c->ltab_sz); |
@@ -1941,6 +1939,11 @@ static void dump_lpt_leb(const struct ubifs_info *c, int lnum) | |||
1941 | pr_err("LEB %d:%d, nnode, ", | 1939 | pr_err("LEB %d:%d, nnode, ", |
1942 | lnum, offs); | 1940 | lnum, offs); |
1943 | err = ubifs_unpack_nnode(c, p, &nnode); | 1941 | err = ubifs_unpack_nnode(c, p, &nnode); |
1942 | if (err) { | ||
1943 | pr_err("failed to unpack_node, error %d\n", | ||
1944 | err); | ||
1945 | break; | ||
1946 | } | ||
1944 | for (i = 0; i < UBIFS_LPT_FANOUT; i++) { | 1947 | for (i = 0; i < UBIFS_LPT_FANOUT; i++) { |
1945 | pr_cont("%d:%d", nnode.nbranch[i].lnum, | 1948 | pr_cont("%d:%d", nnode.nbranch[i].lnum, |
1946 | nnode.nbranch[i].offs); | 1949 | nnode.nbranch[i].offs); |
diff --git a/fs/ubifs/master.c b/fs/ubifs/master.c index ab83ace9910a..1a4bb9e8b3b8 100644 --- a/fs/ubifs/master.c +++ b/fs/ubifs/master.c | |||
@@ -352,10 +352,9 @@ int ubifs_read_master(struct ubifs_info *c) | |||
352 | * ubifs_write_master - write master node. | 352 | * ubifs_write_master - write master node. |
353 | * @c: UBIFS file-system description object | 353 | * @c: UBIFS file-system description object |
354 | * | 354 | * |
355 | * This function writes the master node. The caller has to take the | 355 | * This function writes the master node. Returns zero in case of success and a |
356 | * @c->mst_mutex lock before calling this function. Returns zero in case of | 356 | * negative error code in case of failure. The master node is written twice to |
357 | * success and a negative error code in case of failure. The master node is | 357 | * enable recovery. |
358 | * written twice to enable recovery. | ||
359 | */ | 358 | */ |
360 | int ubifs_write_master(struct ubifs_info *c) | 359 | int ubifs_write_master(struct ubifs_info *c) |
361 | { | 360 | { |
diff --git a/fs/ubifs/orphan.c b/fs/ubifs/orphan.c index f1c3e5a1b315..4409f486ecef 100644 --- a/fs/ubifs/orphan.c +++ b/fs/ubifs/orphan.c | |||
@@ -346,7 +346,6 @@ static int write_orph_nodes(struct ubifs_info *c, int atomic) | |||
346 | int lnum; | 346 | int lnum; |
347 | 347 | ||
348 | /* Unmap any unused LEBs after consolidation */ | 348 | /* Unmap any unused LEBs after consolidation */ |
349 | lnum = c->ohead_lnum + 1; | ||
350 | for (lnum = c->ohead_lnum + 1; lnum <= c->orph_last; lnum++) { | 349 | for (lnum = c->ohead_lnum + 1; lnum <= c->orph_last; lnum++) { |
351 | err = ubifs_leb_unmap(c, lnum); | 350 | err = ubifs_leb_unmap(c, lnum); |
352 | if (err) | 351 | if (err) |
diff --git a/fs/ubifs/recovery.c b/fs/ubifs/recovery.c index c14adb2f420c..c640938f62f0 100644 --- a/fs/ubifs/recovery.c +++ b/fs/ubifs/recovery.c | |||
@@ -596,7 +596,6 @@ static void drop_last_group(struct ubifs_scan_leb *sleb, int *offs) | |||
596 | * drop_last_node - drop the last node. | 596 | * drop_last_node - drop the last node. |
597 | * @sleb: scanned LEB information | 597 | * @sleb: scanned LEB information |
598 | * @offs: offset of dropped nodes is returned here | 598 | * @offs: offset of dropped nodes is returned here |
599 | * @grouped: non-zero if whole group of nodes have to be dropped | ||
600 | * | 599 | * |
601 | * This is a helper function for 'ubifs_recover_leb()' which drops the last | 600 | * This is a helper function for 'ubifs_recover_leb()' which drops the last |
602 | * node of the scanned LEB. | 601 | * node of the scanned LEB. |
@@ -629,8 +628,8 @@ static void drop_last_node(struct ubifs_scan_leb *sleb, int *offs) | |||
629 | * | 628 | * |
630 | * This function does a scan of a LEB, but caters for errors that might have | 629 | * This function does a scan of a LEB, but caters for errors that might have |
631 | * been caused by the unclean unmount from which we are attempting to recover. | 630 | * been caused by the unclean unmount from which we are attempting to recover. |
632 | * Returns %0 in case of success, %-EUCLEAN if an unrecoverable corruption is | 631 | * Returns the scanned information on success and a negative error code on |
633 | * found, and a negative error code in case of failure. | 632 | * failure. |
634 | */ | 633 | */ |
635 | struct ubifs_scan_leb *ubifs_recover_leb(struct ubifs_info *c, int lnum, | 634 | struct ubifs_scan_leb *ubifs_recover_leb(struct ubifs_info *c, int lnum, |
636 | int offs, void *sbuf, int jhead) | 635 | int offs, void *sbuf, int jhead) |
diff --git a/fs/ubifs/sb.c b/fs/ubifs/sb.c index 4c37607a958e..79c6dbbc0e04 100644 --- a/fs/ubifs/sb.c +++ b/fs/ubifs/sb.c | |||
@@ -332,6 +332,8 @@ static int create_default_filesystem(struct ubifs_info *c) | |||
332 | cs->ch.node_type = UBIFS_CS_NODE; | 332 | cs->ch.node_type = UBIFS_CS_NODE; |
333 | err = ubifs_write_node(c, cs, UBIFS_CS_NODE_SZ, UBIFS_LOG_LNUM, 0); | 333 | err = ubifs_write_node(c, cs, UBIFS_CS_NODE_SZ, UBIFS_LOG_LNUM, 0); |
334 | kfree(cs); | 334 | kfree(cs); |
335 | if (err) | ||
336 | return err; | ||
335 | 337 | ||
336 | ubifs_msg("default file-system created"); | 338 | ubifs_msg("default file-system created"); |
337 | return 0; | 339 | return 0; |
@@ -447,7 +449,7 @@ static int validate_sb(struct ubifs_info *c, struct ubifs_sb_node *sup) | |||
447 | goto failed; | 449 | goto failed; |
448 | } | 450 | } |
449 | 451 | ||
450 | if (c->default_compr < 0 || c->default_compr >= UBIFS_COMPR_TYPES_CNT) { | 452 | if (c->default_compr >= UBIFS_COMPR_TYPES_CNT) { |
451 | err = 13; | 453 | err = 13; |
452 | goto failed; | 454 | goto failed; |
453 | } | 455 | } |
diff --git a/fs/ubifs/scan.c b/fs/ubifs/scan.c index 58aa05df2bb6..89adbc4d08ac 100644 --- a/fs/ubifs/scan.c +++ b/fs/ubifs/scan.c | |||
@@ -131,7 +131,8 @@ int ubifs_scan_a_node(const struct ubifs_info *c, void *buf, int len, int lnum, | |||
131 | * @offs: offset to start at (usually zero) | 131 | * @offs: offset to start at (usually zero) |
132 | * @sbuf: scan buffer (must be c->leb_size) | 132 | * @sbuf: scan buffer (must be c->leb_size) |
133 | * | 133 | * |
134 | * This function returns %0 on success and a negative error code on failure. | 134 | * This function returns the scanned information on success and a negative error |
135 | * code on failure. | ||
135 | */ | 136 | */ |
136 | struct ubifs_scan_leb *ubifs_start_scan(const struct ubifs_info *c, int lnum, | 137 | struct ubifs_scan_leb *ubifs_start_scan(const struct ubifs_info *c, int lnum, |
137 | int offs, void *sbuf) | 138 | int offs, void *sbuf) |
@@ -157,9 +158,10 @@ struct ubifs_scan_leb *ubifs_start_scan(const struct ubifs_info *c, int lnum, | |||
157 | return ERR_PTR(err); | 158 | return ERR_PTR(err); |
158 | } | 159 | } |
159 | 160 | ||
160 | if (err == -EBADMSG) | 161 | /* |
161 | sleb->ecc = 1; | 162 | * Note, we ignore integrity errors (EBASMSG) because all the nodes are |
162 | 163 | * protected by CRC checksums. | |
164 | */ | ||
163 | return sleb; | 165 | return sleb; |
164 | } | 166 | } |
165 | 167 | ||
@@ -169,8 +171,6 @@ struct ubifs_scan_leb *ubifs_start_scan(const struct ubifs_info *c, int lnum, | |||
169 | * @sleb: scanning information | 171 | * @sleb: scanning information |
170 | * @lnum: logical eraseblock number | 172 | * @lnum: logical eraseblock number |
171 | * @offs: offset to start at (usually zero) | 173 | * @offs: offset to start at (usually zero) |
172 | * | ||
173 | * This function returns %0 on success and a negative error code on failure. | ||
174 | */ | 174 | */ |
175 | void ubifs_end_scan(const struct ubifs_info *c, struct ubifs_scan_leb *sleb, | 175 | void ubifs_end_scan(const struct ubifs_info *c, struct ubifs_scan_leb *sleb, |
176 | int lnum, int offs) | 176 | int lnum, int offs) |
@@ -257,7 +257,7 @@ void ubifs_scanned_corruption(const struct ubifs_info *c, int lnum, int offs, | |||
257 | * @quiet: print no messages | 257 | * @quiet: print no messages |
258 | * | 258 | * |
259 | * This function scans LEB number @lnum and returns complete information about | 259 | * This function scans LEB number @lnum and returns complete information about |
260 | * its contents. Returns the scaned information in case of success and, | 260 | * its contents. Returns the scanned information in case of success and, |
261 | * %-EUCLEAN if the LEB neads recovery, and other negative error codes in case | 261 | * %-EUCLEAN if the LEB neads recovery, and other negative error codes in case |
262 | * of failure. | 262 | * of failure. |
263 | * | 263 | * |
diff --git a/fs/ubifs/super.c b/fs/ubifs/super.c index 3904c8574ef9..106bf20629ce 100644 --- a/fs/ubifs/super.c +++ b/fs/ubifs/super.c | |||
@@ -75,7 +75,7 @@ static int validate_inode(struct ubifs_info *c, const struct inode *inode) | |||
75 | return 1; | 75 | return 1; |
76 | } | 76 | } |
77 | 77 | ||
78 | if (ui->compr_type < 0 || ui->compr_type >= UBIFS_COMPR_TYPES_CNT) { | 78 | if (ui->compr_type >= UBIFS_COMPR_TYPES_CNT) { |
79 | ubifs_err("unknown compression type %d", ui->compr_type); | 79 | ubifs_err("unknown compression type %d", ui->compr_type); |
80 | return 2; | 80 | return 2; |
81 | } | 81 | } |
@@ -424,19 +424,19 @@ static int ubifs_show_options(struct seq_file *s, struct dentry *root) | |||
424 | struct ubifs_info *c = root->d_sb->s_fs_info; | 424 | struct ubifs_info *c = root->d_sb->s_fs_info; |
425 | 425 | ||
426 | if (c->mount_opts.unmount_mode == 2) | 426 | if (c->mount_opts.unmount_mode == 2) |
427 | seq_printf(s, ",fast_unmount"); | 427 | seq_puts(s, ",fast_unmount"); |
428 | else if (c->mount_opts.unmount_mode == 1) | 428 | else if (c->mount_opts.unmount_mode == 1) |
429 | seq_printf(s, ",norm_unmount"); | 429 | seq_puts(s, ",norm_unmount"); |
430 | 430 | ||
431 | if (c->mount_opts.bulk_read == 2) | 431 | if (c->mount_opts.bulk_read == 2) |
432 | seq_printf(s, ",bulk_read"); | 432 | seq_puts(s, ",bulk_read"); |
433 | else if (c->mount_opts.bulk_read == 1) | 433 | else if (c->mount_opts.bulk_read == 1) |
434 | seq_printf(s, ",no_bulk_read"); | 434 | seq_puts(s, ",no_bulk_read"); |
435 | 435 | ||
436 | if (c->mount_opts.chk_data_crc == 2) | 436 | if (c->mount_opts.chk_data_crc == 2) |
437 | seq_printf(s, ",chk_data_crc"); | 437 | seq_puts(s, ",chk_data_crc"); |
438 | else if (c->mount_opts.chk_data_crc == 1) | 438 | else if (c->mount_opts.chk_data_crc == 1) |
439 | seq_printf(s, ",no_chk_data_crc"); | 439 | seq_puts(s, ",no_chk_data_crc"); |
440 | 440 | ||
441 | if (c->mount_opts.override_compr) { | 441 | if (c->mount_opts.override_compr) { |
442 | seq_printf(s, ",compr=%s", | 442 | seq_printf(s, ",compr=%s", |
@@ -796,8 +796,8 @@ static int alloc_wbufs(struct ubifs_info *c) | |||
796 | { | 796 | { |
797 | int i, err; | 797 | int i, err; |
798 | 798 | ||
799 | c->jheads = kzalloc(c->jhead_cnt * sizeof(struct ubifs_jhead), | 799 | c->jheads = kcalloc(c->jhead_cnt, sizeof(struct ubifs_jhead), |
800 | GFP_KERNEL); | 800 | GFP_KERNEL); |
801 | if (!c->jheads) | 801 | if (!c->jheads) |
802 | return -ENOMEM; | 802 | return -ENOMEM; |
803 | 803 | ||
@@ -1963,7 +1963,6 @@ static struct ubifs_info *alloc_ubifs_info(struct ubi_volume_desc *ubi) | |||
1963 | mutex_init(&c->lp_mutex); | 1963 | mutex_init(&c->lp_mutex); |
1964 | mutex_init(&c->tnc_mutex); | 1964 | mutex_init(&c->tnc_mutex); |
1965 | mutex_init(&c->log_mutex); | 1965 | mutex_init(&c->log_mutex); |
1966 | mutex_init(&c->mst_mutex); | ||
1967 | mutex_init(&c->umount_mutex); | 1966 | mutex_init(&c->umount_mutex); |
1968 | mutex_init(&c->bu_mutex); | 1967 | mutex_init(&c->bu_mutex); |
1969 | mutex_init(&c->write_reserve_mutex); | 1968 | mutex_init(&c->write_reserve_mutex); |
diff --git a/fs/ubifs/tnc.c b/fs/ubifs/tnc.c index 8a40cf9c02d7..6793db0754f6 100644 --- a/fs/ubifs/tnc.c +++ b/fs/ubifs/tnc.c | |||
@@ -3294,7 +3294,6 @@ int dbg_check_inode_size(struct ubifs_info *c, const struct inode *inode, | |||
3294 | goto out_unlock; | 3294 | goto out_unlock; |
3295 | 3295 | ||
3296 | if (err) { | 3296 | if (err) { |
3297 | err = -EINVAL; | ||
3298 | key = &from_key; | 3297 | key = &from_key; |
3299 | goto out_dump; | 3298 | goto out_dump; |
3300 | } | 3299 | } |
diff --git a/fs/ubifs/tnc_commit.c b/fs/ubifs/tnc_commit.c index 3600994f8411..7a205e046776 100644 --- a/fs/ubifs/tnc_commit.c +++ b/fs/ubifs/tnc_commit.c | |||
@@ -389,7 +389,6 @@ static int layout_in_gaps(struct ubifs_info *c, int cnt) | |||
389 | ubifs_dump_lprops(c); | 389 | ubifs_dump_lprops(c); |
390 | } | 390 | } |
391 | /* Try to commit anyway */ | 391 | /* Try to commit anyway */ |
392 | err = 0; | ||
393 | break; | 392 | break; |
394 | } | 393 | } |
395 | p++; | 394 | p++; |
diff --git a/fs/ubifs/ubifs.h b/fs/ubifs/ubifs.h index c1f71fe17cc0..c4fe900c67ab 100644 --- a/fs/ubifs/ubifs.h +++ b/fs/ubifs/ubifs.h | |||
@@ -314,7 +314,6 @@ struct ubifs_scan_node { | |||
314 | * @nodes_cnt: number of nodes scanned | 314 | * @nodes_cnt: number of nodes scanned |
315 | * @nodes: list of struct ubifs_scan_node | 315 | * @nodes: list of struct ubifs_scan_node |
316 | * @endpt: end point (and therefore the start of empty space) | 316 | * @endpt: end point (and therefore the start of empty space) |
317 | * @ecc: read returned -EBADMSG | ||
318 | * @buf: buffer containing entire LEB scanned | 317 | * @buf: buffer containing entire LEB scanned |
319 | */ | 318 | */ |
320 | struct ubifs_scan_leb { | 319 | struct ubifs_scan_leb { |
@@ -322,7 +321,6 @@ struct ubifs_scan_leb { | |||
322 | int nodes_cnt; | 321 | int nodes_cnt; |
323 | struct list_head nodes; | 322 | struct list_head nodes; |
324 | int endpt; | 323 | int endpt; |
325 | int ecc; | ||
326 | void *buf; | 324 | void *buf; |
327 | }; | 325 | }; |
328 | 326 | ||
@@ -1051,7 +1049,6 @@ struct ubifs_debug_info; | |||
1051 | * | 1049 | * |
1052 | * @mst_node: master node | 1050 | * @mst_node: master node |
1053 | * @mst_offs: offset of valid master node | 1051 | * @mst_offs: offset of valid master node |
1054 | * @mst_mutex: protects the master node area, @mst_node, and @mst_offs | ||
1055 | * | 1052 | * |
1056 | * @max_bu_buf_len: maximum bulk-read buffer length | 1053 | * @max_bu_buf_len: maximum bulk-read buffer length |
1057 | * @bu_mutex: protects the pre-allocated bulk-read buffer and @c->bu | 1054 | * @bu_mutex: protects the pre-allocated bulk-read buffer and @c->bu |
@@ -1292,7 +1289,6 @@ struct ubifs_info { | |||
1292 | 1289 | ||
1293 | struct ubifs_mst_node *mst_node; | 1290 | struct ubifs_mst_node *mst_node; |
1294 | int mst_offs; | 1291 | int mst_offs; |
1295 | struct mutex mst_mutex; | ||
1296 | 1292 | ||
1297 | int max_bu_buf_len; | 1293 | int max_bu_buf_len; |
1298 | struct mutex bu_mutex; | 1294 | struct mutex bu_mutex; |
diff --git a/fs/udf/file.c b/fs/udf/file.c index d80738fdf424..86c6743ec1fe 100644 --- a/fs/udf/file.c +++ b/fs/udf/file.c | |||
@@ -27,7 +27,7 @@ | |||
27 | 27 | ||
28 | #include "udfdecl.h" | 28 | #include "udfdecl.h" |
29 | #include <linux/fs.h> | 29 | #include <linux/fs.h> |
30 | #include <asm/uaccess.h> | 30 | #include <linux/uaccess.h> |
31 | #include <linux/kernel.h> | 31 | #include <linux/kernel.h> |
32 | #include <linux/string.h> /* memset */ | 32 | #include <linux/string.h> /* memset */ |
33 | #include <linux/capability.h> | 33 | #include <linux/capability.h> |
@@ -100,24 +100,6 @@ static int udf_adinicb_write_begin(struct file *file, | |||
100 | return 0; | 100 | return 0; |
101 | } | 101 | } |
102 | 102 | ||
103 | static int udf_adinicb_write_end(struct file *file, | ||
104 | struct address_space *mapping, | ||
105 | loff_t pos, unsigned len, unsigned copied, | ||
106 | struct page *page, void *fsdata) | ||
107 | { | ||
108 | struct inode *inode = mapping->host; | ||
109 | unsigned offset = pos & (PAGE_CACHE_SIZE - 1); | ||
110 | char *kaddr; | ||
111 | struct udf_inode_info *iinfo = UDF_I(inode); | ||
112 | |||
113 | kaddr = kmap_atomic(page); | ||
114 | memcpy(iinfo->i_ext.i_data + iinfo->i_lenEAttr + offset, | ||
115 | kaddr + offset, copied); | ||
116 | kunmap_atomic(kaddr); | ||
117 | |||
118 | return simple_write_end(file, mapping, pos, len, copied, page, fsdata); | ||
119 | } | ||
120 | |||
121 | static ssize_t udf_adinicb_direct_IO(int rw, struct kiocb *iocb, | 103 | static ssize_t udf_adinicb_direct_IO(int rw, struct kiocb *iocb, |
122 | struct iov_iter *iter, | 104 | struct iov_iter *iter, |
123 | loff_t offset) | 105 | loff_t offset) |
@@ -130,7 +112,7 @@ const struct address_space_operations udf_adinicb_aops = { | |||
130 | .readpage = udf_adinicb_readpage, | 112 | .readpage = udf_adinicb_readpage, |
131 | .writepage = udf_adinicb_writepage, | 113 | .writepage = udf_adinicb_writepage, |
132 | .write_begin = udf_adinicb_write_begin, | 114 | .write_begin = udf_adinicb_write_begin, |
133 | .write_end = udf_adinicb_write_end, | 115 | .write_end = simple_write_end, |
134 | .direct_IO = udf_adinicb_direct_IO, | 116 | .direct_IO = udf_adinicb_direct_IO, |
135 | }; | 117 | }; |
136 | 118 | ||
diff --git a/fs/udf/lowlevel.c b/fs/udf/lowlevel.c index 6583fe9b0645..6ad5a453af97 100644 --- a/fs/udf/lowlevel.c +++ b/fs/udf/lowlevel.c | |||
@@ -21,7 +21,7 @@ | |||
21 | 21 | ||
22 | #include <linux/blkdev.h> | 22 | #include <linux/blkdev.h> |
23 | #include <linux/cdrom.h> | 23 | #include <linux/cdrom.h> |
24 | #include <asm/uaccess.h> | 24 | #include <linux/uaccess.h> |
25 | 25 | ||
26 | #include "udf_sb.h" | 26 | #include "udf_sb.h" |
27 | 27 | ||
diff --git a/fs/udf/super.c b/fs/udf/super.c index 3286db047a40..813da94d447b 100644 --- a/fs/udf/super.c +++ b/fs/udf/super.c | |||
@@ -63,7 +63,7 @@ | |||
63 | #include "udf_i.h" | 63 | #include "udf_i.h" |
64 | 64 | ||
65 | #include <linux/init.h> | 65 | #include <linux/init.h> |
66 | #include <asm/uaccess.h> | 66 | #include <linux/uaccess.h> |
67 | 67 | ||
68 | #define VDS_POS_PRIMARY_VOL_DESC 0 | 68 | #define VDS_POS_PRIMARY_VOL_DESC 0 |
69 | #define VDS_POS_UNALLOC_SPACE_DESC 1 | 69 | #define VDS_POS_UNALLOC_SPACE_DESC 1 |
diff --git a/fs/udf/symlink.c b/fs/udf/symlink.c index d7c6dbe4194b..6fb7945c1e6e 100644 --- a/fs/udf/symlink.c +++ b/fs/udf/symlink.c | |||
@@ -20,7 +20,7 @@ | |||
20 | */ | 20 | */ |
21 | 21 | ||
22 | #include "udfdecl.h" | 22 | #include "udfdecl.h" |
23 | #include <asm/uaccess.h> | 23 | #include <linux/uaccess.h> |
24 | #include <linux/errno.h> | 24 | #include <linux/errno.h> |
25 | #include <linux/fs.h> | 25 | #include <linux/fs.h> |
26 | #include <linux/time.h> | 26 | #include <linux/time.h> |
diff --git a/fs/udf/unicode.c b/fs/udf/unicode.c index 44b815e57f94..afd470e588ff 100644 --- a/fs/udf/unicode.c +++ b/fs/udf/unicode.c | |||
@@ -412,7 +412,6 @@ static int udf_translate_to_linux(uint8_t *newName, uint8_t *udfName, | |||
412 | int extIndex = 0, newExtIndex = 0, hasExt = 0; | 412 | int extIndex = 0, newExtIndex = 0, hasExt = 0; |
413 | unsigned short valueCRC; | 413 | unsigned short valueCRC; |
414 | uint8_t curr; | 414 | uint8_t curr; |
415 | const uint8_t hexChar[] = "0123456789ABCDEF"; | ||
416 | 415 | ||
417 | if (udfName[0] == '.' && | 416 | if (udfName[0] == '.' && |
418 | (udfLen == 1 || (udfLen == 2 && udfName[1] == '.'))) { | 417 | (udfLen == 1 || (udfLen == 2 && udfName[1] == '.'))) { |
@@ -477,10 +476,10 @@ static int udf_translate_to_linux(uint8_t *newName, uint8_t *udfName, | |||
477 | newIndex = 250; | 476 | newIndex = 250; |
478 | newName[newIndex++] = CRC_MARK; | 477 | newName[newIndex++] = CRC_MARK; |
479 | valueCRC = crc_itu_t(0, fidName, fidNameLen); | 478 | valueCRC = crc_itu_t(0, fidName, fidNameLen); |
480 | newName[newIndex++] = hexChar[(valueCRC & 0xf000) >> 12]; | 479 | newName[newIndex++] = hex_asc_upper_hi(valueCRC >> 8); |
481 | newName[newIndex++] = hexChar[(valueCRC & 0x0f00) >> 8]; | 480 | newName[newIndex++] = hex_asc_upper_lo(valueCRC >> 8); |
482 | newName[newIndex++] = hexChar[(valueCRC & 0x00f0) >> 4]; | 481 | newName[newIndex++] = hex_asc_upper_hi(valueCRC); |
483 | newName[newIndex++] = hexChar[(valueCRC & 0x000f)]; | 482 | newName[newIndex++] = hex_asc_upper_lo(valueCRC); |
484 | 483 | ||
485 | if (hasExt) { | 484 | if (hasExt) { |
486 | newName[newIndex++] = EXT_MARK; | 485 | newName[newIndex++] = EXT_MARK; |
diff --git a/fs/xfs/Kconfig b/fs/xfs/Kconfig index 399e8cec6e60..5d47b4df61ea 100644 --- a/fs/xfs/Kconfig +++ b/fs/xfs/Kconfig | |||
@@ -1,6 +1,7 @@ | |||
1 | config XFS_FS | 1 | config XFS_FS |
2 | tristate "XFS filesystem support" | 2 | tristate "XFS filesystem support" |
3 | depends on BLOCK | 3 | depends on BLOCK |
4 | depends on (64BIT || LBDAF) | ||
4 | select EXPORTFS | 5 | select EXPORTFS |
5 | select LIBCRC32C | 6 | select LIBCRC32C |
6 | help | 7 | help |
diff --git a/fs/xfs/Makefile b/fs/xfs/Makefile index c21f43506661..d61799949580 100644 --- a/fs/xfs/Makefile +++ b/fs/xfs/Makefile | |||
@@ -17,6 +17,7 @@ | |||
17 | # | 17 | # |
18 | 18 | ||
19 | ccflags-y += -I$(src) # needed for trace events | 19 | ccflags-y += -I$(src) # needed for trace events |
20 | ccflags-y += -I$(src)/libxfs | ||
20 | 21 | ||
21 | ccflags-$(CONFIG_XFS_DEBUG) += -g | 22 | ccflags-$(CONFIG_XFS_DEBUG) += -g |
22 | 23 | ||
@@ -25,6 +26,39 @@ obj-$(CONFIG_XFS_FS) += xfs.o | |||
25 | # this one should be compiled first, as the tracing macros can easily blow up | 26 | # this one should be compiled first, as the tracing macros can easily blow up |
26 | xfs-y += xfs_trace.o | 27 | xfs-y += xfs_trace.o |
27 | 28 | ||
29 | # build the libxfs code first | ||
30 | xfs-y += $(addprefix libxfs/, \ | ||
31 | xfs_alloc.o \ | ||
32 | xfs_alloc_btree.o \ | ||
33 | xfs_attr.o \ | ||
34 | xfs_attr_leaf.o \ | ||
35 | xfs_attr_remote.o \ | ||
36 | xfs_bmap.o \ | ||
37 | xfs_bmap_btree.o \ | ||
38 | xfs_btree.o \ | ||
39 | xfs_da_btree.o \ | ||
40 | xfs_da_format.o \ | ||
41 | xfs_dir2.o \ | ||
42 | xfs_dir2_block.o \ | ||
43 | xfs_dir2_data.o \ | ||
44 | xfs_dir2_leaf.o \ | ||
45 | xfs_dir2_node.o \ | ||
46 | xfs_dir2_sf.o \ | ||
47 | xfs_dquot_buf.o \ | ||
48 | xfs_ialloc.o \ | ||
49 | xfs_ialloc_btree.o \ | ||
50 | xfs_inode_fork.o \ | ||
51 | xfs_inode_buf.o \ | ||
52 | xfs_log_rlimit.o \ | ||
53 | xfs_sb.o \ | ||
54 | xfs_symlink_remote.o \ | ||
55 | xfs_trans_resv.o \ | ||
56 | ) | ||
57 | # xfs_rtbitmap is shared with libxfs | ||
58 | xfs-$(CONFIG_XFS_RT) += $(addprefix libxfs/, \ | ||
59 | xfs_rtbitmap.o \ | ||
60 | ) | ||
61 | |||
28 | # highlevel code | 62 | # highlevel code |
29 | xfs-y += xfs_aops.o \ | 63 | xfs-y += xfs_aops.o \ |
30 | xfs_attr_inactive.o \ | 64 | xfs_attr_inactive.o \ |
@@ -45,53 +79,27 @@ xfs-y += xfs_aops.o \ | |||
45 | xfs_ioctl.o \ | 79 | xfs_ioctl.o \ |
46 | xfs_iomap.o \ | 80 | xfs_iomap.o \ |
47 | xfs_iops.o \ | 81 | xfs_iops.o \ |
82 | xfs_inode.o \ | ||
48 | xfs_itable.o \ | 83 | xfs_itable.o \ |
49 | xfs_message.o \ | 84 | xfs_message.o \ |
50 | xfs_mount.o \ | 85 | xfs_mount.o \ |
51 | xfs_mru_cache.o \ | 86 | xfs_mru_cache.o \ |
52 | xfs_super.o \ | 87 | xfs_super.o \ |
53 | xfs_symlink.o \ | 88 | xfs_symlink.o \ |
89 | xfs_sysfs.o \ | ||
54 | xfs_trans.o \ | 90 | xfs_trans.o \ |
55 | xfs_xattr.o \ | 91 | xfs_xattr.o \ |
56 | kmem.o \ | 92 | kmem.o \ |
57 | uuid.o | 93 | uuid.o |
58 | 94 | ||
59 | # code shared with libxfs | ||
60 | xfs-y += xfs_alloc.o \ | ||
61 | xfs_alloc_btree.o \ | ||
62 | xfs_attr.o \ | ||
63 | xfs_attr_leaf.o \ | ||
64 | xfs_attr_remote.o \ | ||
65 | xfs_bmap.o \ | ||
66 | xfs_bmap_btree.o \ | ||
67 | xfs_btree.o \ | ||
68 | xfs_da_btree.o \ | ||
69 | xfs_da_format.o \ | ||
70 | xfs_dir2.o \ | ||
71 | xfs_dir2_block.o \ | ||
72 | xfs_dir2_data.o \ | ||
73 | xfs_dir2_leaf.o \ | ||
74 | xfs_dir2_node.o \ | ||
75 | xfs_dir2_sf.o \ | ||
76 | xfs_dquot_buf.o \ | ||
77 | xfs_ialloc.o \ | ||
78 | xfs_ialloc_btree.o \ | ||
79 | xfs_icreate_item.o \ | ||
80 | xfs_inode.o \ | ||
81 | xfs_inode_fork.o \ | ||
82 | xfs_inode_buf.o \ | ||
83 | xfs_log_recover.o \ | ||
84 | xfs_log_rlimit.o \ | ||
85 | xfs_sb.o \ | ||
86 | xfs_symlink_remote.o \ | ||
87 | xfs_trans_resv.o | ||
88 | |||
89 | # low-level transaction/log code | 95 | # low-level transaction/log code |
90 | xfs-y += xfs_log.o \ | 96 | xfs-y += xfs_log.o \ |
91 | xfs_log_cil.o \ | 97 | xfs_log_cil.o \ |
92 | xfs_buf_item.o \ | 98 | xfs_buf_item.o \ |
93 | xfs_extfree_item.o \ | 99 | xfs_extfree_item.o \ |
100 | xfs_icreate_item.o \ | ||
94 | xfs_inode_item.o \ | 101 | xfs_inode_item.o \ |
102 | xfs_log_recover.o \ | ||
95 | xfs_trans_ail.o \ | 103 | xfs_trans_ail.o \ |
96 | xfs_trans_buf.o \ | 104 | xfs_trans_buf.o \ |
97 | xfs_trans_extfree.o \ | 105 | xfs_trans_extfree.o \ |
@@ -107,8 +115,7 @@ xfs-$(CONFIG_XFS_QUOTA) += xfs_dquot.o \ | |||
107 | xfs_quotaops.o | 115 | xfs_quotaops.o |
108 | 116 | ||
109 | # xfs_rtbitmap is shared with libxfs | 117 | # xfs_rtbitmap is shared with libxfs |
110 | xfs-$(CONFIG_XFS_RT) += xfs_rtalloc.o \ | 118 | xfs-$(CONFIG_XFS_RT) += xfs_rtalloc.o |
111 | xfs_rtbitmap.o | ||
112 | 119 | ||
113 | xfs-$(CONFIG_XFS_POSIX_ACL) += xfs_acl.o | 120 | xfs-$(CONFIG_XFS_POSIX_ACL) += xfs_acl.o |
114 | xfs-$(CONFIG_PROC_FS) += xfs_stats.o | 121 | xfs-$(CONFIG_PROC_FS) += xfs_stats.o |
diff --git a/fs/xfs/xfs_ag.h b/fs/xfs/libxfs/xfs_ag.h index 6e247a99f5db..6e247a99f5db 100644 --- a/fs/xfs/xfs_ag.h +++ b/fs/xfs/libxfs/xfs_ag.h | |||
diff --git a/fs/xfs/xfs_alloc.c b/fs/xfs/libxfs/xfs_alloc.c index d43813267a80..4bffffe038a1 100644 --- a/fs/xfs/xfs_alloc.c +++ b/fs/xfs/libxfs/xfs_alloc.c | |||
@@ -483,9 +483,9 @@ xfs_agfl_read_verify( | |||
483 | return; | 483 | return; |
484 | 484 | ||
485 | if (!xfs_buf_verify_cksum(bp, XFS_AGFL_CRC_OFF)) | 485 | if (!xfs_buf_verify_cksum(bp, XFS_AGFL_CRC_OFF)) |
486 | xfs_buf_ioerror(bp, EFSBADCRC); | 486 | xfs_buf_ioerror(bp, -EFSBADCRC); |
487 | else if (!xfs_agfl_verify(bp)) | 487 | else if (!xfs_agfl_verify(bp)) |
488 | xfs_buf_ioerror(bp, EFSCORRUPTED); | 488 | xfs_buf_ioerror(bp, -EFSCORRUPTED); |
489 | 489 | ||
490 | if (bp->b_error) | 490 | if (bp->b_error) |
491 | xfs_verifier_error(bp); | 491 | xfs_verifier_error(bp); |
@@ -503,7 +503,7 @@ xfs_agfl_write_verify( | |||
503 | return; | 503 | return; |
504 | 504 | ||
505 | if (!xfs_agfl_verify(bp)) { | 505 | if (!xfs_agfl_verify(bp)) { |
506 | xfs_buf_ioerror(bp, EFSCORRUPTED); | 506 | xfs_buf_ioerror(bp, -EFSCORRUPTED); |
507 | xfs_verifier_error(bp); | 507 | xfs_verifier_error(bp); |
508 | return; | 508 | return; |
509 | } | 509 | } |
@@ -559,7 +559,7 @@ xfs_alloc_update_counters( | |||
559 | xfs_trans_agblocks_delta(tp, len); | 559 | xfs_trans_agblocks_delta(tp, len); |
560 | if (unlikely(be32_to_cpu(agf->agf_freeblks) > | 560 | if (unlikely(be32_to_cpu(agf->agf_freeblks) > |
561 | be32_to_cpu(agf->agf_length))) | 561 | be32_to_cpu(agf->agf_length))) |
562 | return EFSCORRUPTED; | 562 | return -EFSCORRUPTED; |
563 | 563 | ||
564 | xfs_alloc_log_agf(tp, agbp, XFS_AGF_FREEBLKS); | 564 | xfs_alloc_log_agf(tp, agbp, XFS_AGF_FREEBLKS); |
565 | return 0; | 565 | return 0; |
@@ -2234,11 +2234,11 @@ xfs_agf_read_verify( | |||
2234 | 2234 | ||
2235 | if (xfs_sb_version_hascrc(&mp->m_sb) && | 2235 | if (xfs_sb_version_hascrc(&mp->m_sb) && |
2236 | !xfs_buf_verify_cksum(bp, XFS_AGF_CRC_OFF)) | 2236 | !xfs_buf_verify_cksum(bp, XFS_AGF_CRC_OFF)) |
2237 | xfs_buf_ioerror(bp, EFSBADCRC); | 2237 | xfs_buf_ioerror(bp, -EFSBADCRC); |
2238 | else if (XFS_TEST_ERROR(!xfs_agf_verify(mp, bp), mp, | 2238 | else if (XFS_TEST_ERROR(!xfs_agf_verify(mp, bp), mp, |
2239 | XFS_ERRTAG_ALLOC_READ_AGF, | 2239 | XFS_ERRTAG_ALLOC_READ_AGF, |
2240 | XFS_RANDOM_ALLOC_READ_AGF)) | 2240 | XFS_RANDOM_ALLOC_READ_AGF)) |
2241 | xfs_buf_ioerror(bp, EFSCORRUPTED); | 2241 | xfs_buf_ioerror(bp, -EFSCORRUPTED); |
2242 | 2242 | ||
2243 | if (bp->b_error) | 2243 | if (bp->b_error) |
2244 | xfs_verifier_error(bp); | 2244 | xfs_verifier_error(bp); |
@@ -2252,7 +2252,7 @@ xfs_agf_write_verify( | |||
2252 | struct xfs_buf_log_item *bip = bp->b_fspriv; | 2252 | struct xfs_buf_log_item *bip = bp->b_fspriv; |
2253 | 2253 | ||
2254 | if (!xfs_agf_verify(mp, bp)) { | 2254 | if (!xfs_agf_verify(mp, bp)) { |
2255 | xfs_buf_ioerror(bp, EFSCORRUPTED); | 2255 | xfs_buf_ioerror(bp, -EFSCORRUPTED); |
2256 | xfs_verifier_error(bp); | 2256 | xfs_verifier_error(bp); |
2257 | return; | 2257 | return; |
2258 | } | 2258 | } |
@@ -2601,11 +2601,11 @@ xfs_free_extent( | |||
2601 | */ | 2601 | */ |
2602 | args.agno = XFS_FSB_TO_AGNO(args.mp, bno); | 2602 | args.agno = XFS_FSB_TO_AGNO(args.mp, bno); |
2603 | if (args.agno >= args.mp->m_sb.sb_agcount) | 2603 | if (args.agno >= args.mp->m_sb.sb_agcount) |
2604 | return EFSCORRUPTED; | 2604 | return -EFSCORRUPTED; |
2605 | 2605 | ||
2606 | args.agbno = XFS_FSB_TO_AGBNO(args.mp, bno); | 2606 | args.agbno = XFS_FSB_TO_AGBNO(args.mp, bno); |
2607 | if (args.agbno >= args.mp->m_sb.sb_agblocks) | 2607 | if (args.agbno >= args.mp->m_sb.sb_agblocks) |
2608 | return EFSCORRUPTED; | 2608 | return -EFSCORRUPTED; |
2609 | 2609 | ||
2610 | args.pag = xfs_perag_get(args.mp, args.agno); | 2610 | args.pag = xfs_perag_get(args.mp, args.agno); |
2611 | ASSERT(args.pag); | 2611 | ASSERT(args.pag); |
@@ -2617,7 +2617,7 @@ xfs_free_extent( | |||
2617 | /* validate the extent size is legal now we have the agf locked */ | 2617 | /* validate the extent size is legal now we have the agf locked */ |
2618 | if (args.agbno + len > | 2618 | if (args.agbno + len > |
2619 | be32_to_cpu(XFS_BUF_TO_AGF(args.agbp)->agf_length)) { | 2619 | be32_to_cpu(XFS_BUF_TO_AGF(args.agbp)->agf_length)) { |
2620 | error = EFSCORRUPTED; | 2620 | error = -EFSCORRUPTED; |
2621 | goto error0; | 2621 | goto error0; |
2622 | } | 2622 | } |
2623 | 2623 | ||
diff --git a/fs/xfs/xfs_alloc.h b/fs/xfs/libxfs/xfs_alloc.h index feacb061bab7..feacb061bab7 100644 --- a/fs/xfs/xfs_alloc.h +++ b/fs/xfs/libxfs/xfs_alloc.h | |||
diff --git a/fs/xfs/xfs_alloc_btree.c b/fs/xfs/libxfs/xfs_alloc_btree.c index 8358f1ded94d..e0e83e24d3ef 100644 --- a/fs/xfs/xfs_alloc_btree.c +++ b/fs/xfs/libxfs/xfs_alloc_btree.c | |||
@@ -355,9 +355,9 @@ xfs_allocbt_read_verify( | |||
355 | struct xfs_buf *bp) | 355 | struct xfs_buf *bp) |
356 | { | 356 | { |
357 | if (!xfs_btree_sblock_verify_crc(bp)) | 357 | if (!xfs_btree_sblock_verify_crc(bp)) |
358 | xfs_buf_ioerror(bp, EFSBADCRC); | 358 | xfs_buf_ioerror(bp, -EFSBADCRC); |
359 | else if (!xfs_allocbt_verify(bp)) | 359 | else if (!xfs_allocbt_verify(bp)) |
360 | xfs_buf_ioerror(bp, EFSCORRUPTED); | 360 | xfs_buf_ioerror(bp, -EFSCORRUPTED); |
361 | 361 | ||
362 | if (bp->b_error) { | 362 | if (bp->b_error) { |
363 | trace_xfs_btree_corrupt(bp, _RET_IP_); | 363 | trace_xfs_btree_corrupt(bp, _RET_IP_); |
@@ -371,7 +371,7 @@ xfs_allocbt_write_verify( | |||
371 | { | 371 | { |
372 | if (!xfs_allocbt_verify(bp)) { | 372 | if (!xfs_allocbt_verify(bp)) { |
373 | trace_xfs_btree_corrupt(bp, _RET_IP_); | 373 | trace_xfs_btree_corrupt(bp, _RET_IP_); |
374 | xfs_buf_ioerror(bp, EFSCORRUPTED); | 374 | xfs_buf_ioerror(bp, -EFSCORRUPTED); |
375 | xfs_verifier_error(bp); | 375 | xfs_verifier_error(bp); |
376 | return; | 376 | return; |
377 | } | 377 | } |
diff --git a/fs/xfs/xfs_alloc_btree.h b/fs/xfs/libxfs/xfs_alloc_btree.h index 45e189e7e81c..45e189e7e81c 100644 --- a/fs/xfs/xfs_alloc_btree.h +++ b/fs/xfs/libxfs/xfs_alloc_btree.h | |||
diff --git a/fs/xfs/xfs_attr.c b/fs/xfs/libxfs/xfs_attr.c index bfe36fc2cdc2..353fb425faef 100644 --- a/fs/xfs/xfs_attr.c +++ b/fs/xfs/libxfs/xfs_attr.c | |||
@@ -85,7 +85,7 @@ xfs_attr_args_init( | |||
85 | { | 85 | { |
86 | 86 | ||
87 | if (!name) | 87 | if (!name) |
88 | return EINVAL; | 88 | return -EINVAL; |
89 | 89 | ||
90 | memset(args, 0, sizeof(*args)); | 90 | memset(args, 0, sizeof(*args)); |
91 | args->geo = dp->i_mount->m_attr_geo; | 91 | args->geo = dp->i_mount->m_attr_geo; |
@@ -95,7 +95,7 @@ xfs_attr_args_init( | |||
95 | args->name = name; | 95 | args->name = name; |
96 | args->namelen = strlen((const char *)name); | 96 | args->namelen = strlen((const char *)name); |
97 | if (args->namelen >= MAXNAMELEN) | 97 | if (args->namelen >= MAXNAMELEN) |
98 | return EFAULT; /* match IRIX behaviour */ | 98 | return -EFAULT; /* match IRIX behaviour */ |
99 | 99 | ||
100 | args->hashval = xfs_da_hashname(args->name, args->namelen); | 100 | args->hashval = xfs_da_hashname(args->name, args->namelen); |
101 | return 0; | 101 | return 0; |
@@ -131,10 +131,10 @@ xfs_attr_get( | |||
131 | XFS_STATS_INC(xs_attr_get); | 131 | XFS_STATS_INC(xs_attr_get); |
132 | 132 | ||
133 | if (XFS_FORCED_SHUTDOWN(ip->i_mount)) | 133 | if (XFS_FORCED_SHUTDOWN(ip->i_mount)) |
134 | return EIO; | 134 | return -EIO; |
135 | 135 | ||
136 | if (!xfs_inode_hasattr(ip)) | 136 | if (!xfs_inode_hasattr(ip)) |
137 | return ENOATTR; | 137 | return -ENOATTR; |
138 | 138 | ||
139 | error = xfs_attr_args_init(&args, ip, name, flags); | 139 | error = xfs_attr_args_init(&args, ip, name, flags); |
140 | if (error) | 140 | if (error) |
@@ -145,7 +145,7 @@ xfs_attr_get( | |||
145 | 145 | ||
146 | lock_mode = xfs_ilock_attr_map_shared(ip); | 146 | lock_mode = xfs_ilock_attr_map_shared(ip); |
147 | if (!xfs_inode_hasattr(ip)) | 147 | if (!xfs_inode_hasattr(ip)) |
148 | error = ENOATTR; | 148 | error = -ENOATTR; |
149 | else if (ip->i_d.di_aformat == XFS_DINODE_FMT_LOCAL) | 149 | else if (ip->i_d.di_aformat == XFS_DINODE_FMT_LOCAL) |
150 | error = xfs_attr_shortform_getvalue(&args); | 150 | error = xfs_attr_shortform_getvalue(&args); |
151 | else if (xfs_bmap_one_block(ip, XFS_ATTR_FORK)) | 151 | else if (xfs_bmap_one_block(ip, XFS_ATTR_FORK)) |
@@ -155,7 +155,7 @@ xfs_attr_get( | |||
155 | xfs_iunlock(ip, lock_mode); | 155 | xfs_iunlock(ip, lock_mode); |
156 | 156 | ||
157 | *valuelenp = args.valuelen; | 157 | *valuelenp = args.valuelen; |
158 | return error == EEXIST ? 0 : error; | 158 | return error == -EEXIST ? 0 : error; |
159 | } | 159 | } |
160 | 160 | ||
161 | /* | 161 | /* |
@@ -213,7 +213,7 @@ xfs_attr_set( | |||
213 | XFS_STATS_INC(xs_attr_set); | 213 | XFS_STATS_INC(xs_attr_set); |
214 | 214 | ||
215 | if (XFS_FORCED_SHUTDOWN(dp->i_mount)) | 215 | if (XFS_FORCED_SHUTDOWN(dp->i_mount)) |
216 | return EIO; | 216 | return -EIO; |
217 | 217 | ||
218 | error = xfs_attr_args_init(&args, dp, name, flags); | 218 | error = xfs_attr_args_init(&args, dp, name, flags); |
219 | if (error) | 219 | if (error) |
@@ -304,7 +304,7 @@ xfs_attr_set( | |||
304 | * the inode. | 304 | * the inode. |
305 | */ | 305 | */ |
306 | error = xfs_attr_shortform_addname(&args); | 306 | error = xfs_attr_shortform_addname(&args); |
307 | if (error != ENOSPC) { | 307 | if (error != -ENOSPC) { |
308 | /* | 308 | /* |
309 | * Commit the shortform mods, and we're done. | 309 | * Commit the shortform mods, and we're done. |
310 | * NOTE: this is also the error path (EEXIST, etc). | 310 | * NOTE: this is also the error path (EEXIST, etc). |
@@ -419,10 +419,10 @@ xfs_attr_remove( | |||
419 | XFS_STATS_INC(xs_attr_remove); | 419 | XFS_STATS_INC(xs_attr_remove); |
420 | 420 | ||
421 | if (XFS_FORCED_SHUTDOWN(dp->i_mount)) | 421 | if (XFS_FORCED_SHUTDOWN(dp->i_mount)) |
422 | return EIO; | 422 | return -EIO; |
423 | 423 | ||
424 | if (!xfs_inode_hasattr(dp)) | 424 | if (!xfs_inode_hasattr(dp)) |
425 | return ENOATTR; | 425 | return -ENOATTR; |
426 | 426 | ||
427 | error = xfs_attr_args_init(&args, dp, name, flags); | 427 | error = xfs_attr_args_init(&args, dp, name, flags); |
428 | if (error) | 428 | if (error) |
@@ -477,7 +477,7 @@ xfs_attr_remove( | |||
477 | xfs_trans_ijoin(args.trans, dp, 0); | 477 | xfs_trans_ijoin(args.trans, dp, 0); |
478 | 478 | ||
479 | if (!xfs_inode_hasattr(dp)) { | 479 | if (!xfs_inode_hasattr(dp)) { |
480 | error = XFS_ERROR(ENOATTR); | 480 | error = -ENOATTR; |
481 | } else if (dp->i_d.di_aformat == XFS_DINODE_FMT_LOCAL) { | 481 | } else if (dp->i_d.di_aformat == XFS_DINODE_FMT_LOCAL) { |
482 | ASSERT(dp->i_afp->if_flags & XFS_IFINLINE); | 482 | ASSERT(dp->i_afp->if_flags & XFS_IFINLINE); |
483 | error = xfs_attr_shortform_remove(&args); | 483 | error = xfs_attr_shortform_remove(&args); |
@@ -534,28 +534,28 @@ xfs_attr_shortform_addname(xfs_da_args_t *args) | |||
534 | trace_xfs_attr_sf_addname(args); | 534 | trace_xfs_attr_sf_addname(args); |
535 | 535 | ||
536 | retval = xfs_attr_shortform_lookup(args); | 536 | retval = xfs_attr_shortform_lookup(args); |
537 | if ((args->flags & ATTR_REPLACE) && (retval == ENOATTR)) { | 537 | if ((args->flags & ATTR_REPLACE) && (retval == -ENOATTR)) { |
538 | return(retval); | 538 | return retval; |
539 | } else if (retval == EEXIST) { | 539 | } else if (retval == -EEXIST) { |
540 | if (args->flags & ATTR_CREATE) | 540 | if (args->flags & ATTR_CREATE) |
541 | return(retval); | 541 | return retval; |
542 | retval = xfs_attr_shortform_remove(args); | 542 | retval = xfs_attr_shortform_remove(args); |
543 | ASSERT(retval == 0); | 543 | ASSERT(retval == 0); |
544 | } | 544 | } |
545 | 545 | ||
546 | if (args->namelen >= XFS_ATTR_SF_ENTSIZE_MAX || | 546 | if (args->namelen >= XFS_ATTR_SF_ENTSIZE_MAX || |
547 | args->valuelen >= XFS_ATTR_SF_ENTSIZE_MAX) | 547 | args->valuelen >= XFS_ATTR_SF_ENTSIZE_MAX) |
548 | return(XFS_ERROR(ENOSPC)); | 548 | return -ENOSPC; |
549 | 549 | ||
550 | newsize = XFS_ATTR_SF_TOTSIZE(args->dp); | 550 | newsize = XFS_ATTR_SF_TOTSIZE(args->dp); |
551 | newsize += XFS_ATTR_SF_ENTSIZE_BYNAME(args->namelen, args->valuelen); | 551 | newsize += XFS_ATTR_SF_ENTSIZE_BYNAME(args->namelen, args->valuelen); |
552 | 552 | ||
553 | forkoff = xfs_attr_shortform_bytesfit(args->dp, newsize); | 553 | forkoff = xfs_attr_shortform_bytesfit(args->dp, newsize); |
554 | if (!forkoff) | 554 | if (!forkoff) |
555 | return(XFS_ERROR(ENOSPC)); | 555 | return -ENOSPC; |
556 | 556 | ||
557 | xfs_attr_shortform_add(args, forkoff); | 557 | xfs_attr_shortform_add(args, forkoff); |
558 | return(0); | 558 | return 0; |
559 | } | 559 | } |
560 | 560 | ||
561 | 561 | ||
@@ -592,10 +592,10 @@ xfs_attr_leaf_addname(xfs_da_args_t *args) | |||
592 | * the given flags produce an error or call for an atomic rename. | 592 | * the given flags produce an error or call for an atomic rename. |
593 | */ | 593 | */ |
594 | retval = xfs_attr3_leaf_lookup_int(bp, args); | 594 | retval = xfs_attr3_leaf_lookup_int(bp, args); |
595 | if ((args->flags & ATTR_REPLACE) && (retval == ENOATTR)) { | 595 | if ((args->flags & ATTR_REPLACE) && (retval == -ENOATTR)) { |
596 | xfs_trans_brelse(args->trans, bp); | 596 | xfs_trans_brelse(args->trans, bp); |
597 | return retval; | 597 | return retval; |
598 | } else if (retval == EEXIST) { | 598 | } else if (retval == -EEXIST) { |
599 | if (args->flags & ATTR_CREATE) { /* pure create op */ | 599 | if (args->flags & ATTR_CREATE) { /* pure create op */ |
600 | xfs_trans_brelse(args->trans, bp); | 600 | xfs_trans_brelse(args->trans, bp); |
601 | return retval; | 601 | return retval; |
@@ -626,7 +626,7 @@ xfs_attr_leaf_addname(xfs_da_args_t *args) | |||
626 | * if required. | 626 | * if required. |
627 | */ | 627 | */ |
628 | retval = xfs_attr3_leaf_add(bp, args); | 628 | retval = xfs_attr3_leaf_add(bp, args); |
629 | if (retval == ENOSPC) { | 629 | if (retval == -ENOSPC) { |
630 | /* | 630 | /* |
631 | * Promote the attribute list to the Btree format, then | 631 | * Promote the attribute list to the Btree format, then |
632 | * Commit that transaction so that the node_addname() call | 632 | * Commit that transaction so that the node_addname() call |
@@ -642,7 +642,7 @@ xfs_attr_leaf_addname(xfs_da_args_t *args) | |||
642 | ASSERT(committed); | 642 | ASSERT(committed); |
643 | args->trans = NULL; | 643 | args->trans = NULL; |
644 | xfs_bmap_cancel(args->flist); | 644 | xfs_bmap_cancel(args->flist); |
645 | return(error); | 645 | return error; |
646 | } | 646 | } |
647 | 647 | ||
648 | /* | 648 | /* |
@@ -658,13 +658,13 @@ xfs_attr_leaf_addname(xfs_da_args_t *args) | |||
658 | */ | 658 | */ |
659 | error = xfs_trans_roll(&args->trans, dp); | 659 | error = xfs_trans_roll(&args->trans, dp); |
660 | if (error) | 660 | if (error) |
661 | return (error); | 661 | return error; |
662 | 662 | ||
663 | /* | 663 | /* |
664 | * Fob the whole rest of the problem off on the Btree code. | 664 | * Fob the whole rest of the problem off on the Btree code. |
665 | */ | 665 | */ |
666 | error = xfs_attr_node_addname(args); | 666 | error = xfs_attr_node_addname(args); |
667 | return(error); | 667 | return error; |
668 | } | 668 | } |
669 | 669 | ||
670 | /* | 670 | /* |
@@ -673,7 +673,7 @@ xfs_attr_leaf_addname(xfs_da_args_t *args) | |||
673 | */ | 673 | */ |
674 | error = xfs_trans_roll(&args->trans, dp); | 674 | error = xfs_trans_roll(&args->trans, dp); |
675 | if (error) | 675 | if (error) |
676 | return (error); | 676 | return error; |
677 | 677 | ||
678 | /* | 678 | /* |
679 | * If there was an out-of-line value, allocate the blocks we | 679 | * If there was an out-of-line value, allocate the blocks we |
@@ -684,7 +684,7 @@ xfs_attr_leaf_addname(xfs_da_args_t *args) | |||
684 | if (args->rmtblkno > 0) { | 684 | if (args->rmtblkno > 0) { |
685 | error = xfs_attr_rmtval_set(args); | 685 | error = xfs_attr_rmtval_set(args); |
686 | if (error) | 686 | if (error) |
687 | return(error); | 687 | return error; |
688 | } | 688 | } |
689 | 689 | ||
690 | /* | 690 | /* |
@@ -700,7 +700,7 @@ xfs_attr_leaf_addname(xfs_da_args_t *args) | |||
700 | */ | 700 | */ |
701 | error = xfs_attr3_leaf_flipflags(args); | 701 | error = xfs_attr3_leaf_flipflags(args); |
702 | if (error) | 702 | if (error) |
703 | return(error); | 703 | return error; |
704 | 704 | ||
705 | /* | 705 | /* |
706 | * Dismantle the "old" attribute/value pair by removing | 706 | * Dismantle the "old" attribute/value pair by removing |
@@ -714,7 +714,7 @@ xfs_attr_leaf_addname(xfs_da_args_t *args) | |||
714 | if (args->rmtblkno) { | 714 | if (args->rmtblkno) { |
715 | error = xfs_attr_rmtval_remove(args); | 715 | error = xfs_attr_rmtval_remove(args); |
716 | if (error) | 716 | if (error) |
717 | return(error); | 717 | return error; |
718 | } | 718 | } |
719 | 719 | ||
720 | /* | 720 | /* |
@@ -744,7 +744,7 @@ xfs_attr_leaf_addname(xfs_da_args_t *args) | |||
744 | ASSERT(committed); | 744 | ASSERT(committed); |
745 | args->trans = NULL; | 745 | args->trans = NULL; |
746 | xfs_bmap_cancel(args->flist); | 746 | xfs_bmap_cancel(args->flist); |
747 | return(error); | 747 | return error; |
748 | } | 748 | } |
749 | 749 | ||
750 | /* | 750 | /* |
@@ -795,7 +795,7 @@ xfs_attr_leaf_removename(xfs_da_args_t *args) | |||
795 | return error; | 795 | return error; |
796 | 796 | ||
797 | error = xfs_attr3_leaf_lookup_int(bp, args); | 797 | error = xfs_attr3_leaf_lookup_int(bp, args); |
798 | if (error == ENOATTR) { | 798 | if (error == -ENOATTR) { |
799 | xfs_trans_brelse(args->trans, bp); | 799 | xfs_trans_brelse(args->trans, bp); |
800 | return error; | 800 | return error; |
801 | } | 801 | } |
@@ -850,7 +850,7 @@ xfs_attr_leaf_get(xfs_da_args_t *args) | |||
850 | return error; | 850 | return error; |
851 | 851 | ||
852 | error = xfs_attr3_leaf_lookup_int(bp, args); | 852 | error = xfs_attr3_leaf_lookup_int(bp, args); |
853 | if (error != EEXIST) { | 853 | if (error != -EEXIST) { |
854 | xfs_trans_brelse(args->trans, bp); | 854 | xfs_trans_brelse(args->trans, bp); |
855 | return error; | 855 | return error; |
856 | } | 856 | } |
@@ -906,9 +906,9 @@ restart: | |||
906 | goto out; | 906 | goto out; |
907 | blk = &state->path.blk[ state->path.active-1 ]; | 907 | blk = &state->path.blk[ state->path.active-1 ]; |
908 | ASSERT(blk->magic == XFS_ATTR_LEAF_MAGIC); | 908 | ASSERT(blk->magic == XFS_ATTR_LEAF_MAGIC); |
909 | if ((args->flags & ATTR_REPLACE) && (retval == ENOATTR)) { | 909 | if ((args->flags & ATTR_REPLACE) && (retval == -ENOATTR)) { |
910 | goto out; | 910 | goto out; |
911 | } else if (retval == EEXIST) { | 911 | } else if (retval == -EEXIST) { |
912 | if (args->flags & ATTR_CREATE) | 912 | if (args->flags & ATTR_CREATE) |
913 | goto out; | 913 | goto out; |
914 | 914 | ||
@@ -933,7 +933,7 @@ restart: | |||
933 | } | 933 | } |
934 | 934 | ||
935 | retval = xfs_attr3_leaf_add(blk->bp, state->args); | 935 | retval = xfs_attr3_leaf_add(blk->bp, state->args); |
936 | if (retval == ENOSPC) { | 936 | if (retval == -ENOSPC) { |
937 | if (state->path.active == 1) { | 937 | if (state->path.active == 1) { |
938 | /* | 938 | /* |
939 | * Its really a single leaf node, but it had | 939 | * Its really a single leaf node, but it had |
@@ -1031,7 +1031,7 @@ restart: | |||
1031 | if (args->rmtblkno > 0) { | 1031 | if (args->rmtblkno > 0) { |
1032 | error = xfs_attr_rmtval_set(args); | 1032 | error = xfs_attr_rmtval_set(args); |
1033 | if (error) | 1033 | if (error) |
1034 | return(error); | 1034 | return error; |
1035 | } | 1035 | } |
1036 | 1036 | ||
1037 | /* | 1037 | /* |
@@ -1061,7 +1061,7 @@ restart: | |||
1061 | if (args->rmtblkno) { | 1061 | if (args->rmtblkno) { |
1062 | error = xfs_attr_rmtval_remove(args); | 1062 | error = xfs_attr_rmtval_remove(args); |
1063 | if (error) | 1063 | if (error) |
1064 | return(error); | 1064 | return error; |
1065 | } | 1065 | } |
1066 | 1066 | ||
1067 | /* | 1067 | /* |
@@ -1134,8 +1134,8 @@ out: | |||
1134 | if (state) | 1134 | if (state) |
1135 | xfs_da_state_free(state); | 1135 | xfs_da_state_free(state); |
1136 | if (error) | 1136 | if (error) |
1137 | return(error); | 1137 | return error; |
1138 | return(retval); | 1138 | return retval; |
1139 | } | 1139 | } |
1140 | 1140 | ||
1141 | /* | 1141 | /* |
@@ -1168,7 +1168,7 @@ xfs_attr_node_removename(xfs_da_args_t *args) | |||
1168 | * Search to see if name exists, and get back a pointer to it. | 1168 | * Search to see if name exists, and get back a pointer to it. |
1169 | */ | 1169 | */ |
1170 | error = xfs_da3_node_lookup_int(state, &retval); | 1170 | error = xfs_da3_node_lookup_int(state, &retval); |
1171 | if (error || (retval != EEXIST)) { | 1171 | if (error || (retval != -EEXIST)) { |
1172 | if (error == 0) | 1172 | if (error == 0) |
1173 | error = retval; | 1173 | error = retval; |
1174 | goto out; | 1174 | goto out; |
@@ -1297,7 +1297,7 @@ xfs_attr_node_removename(xfs_da_args_t *args) | |||
1297 | 1297 | ||
1298 | out: | 1298 | out: |
1299 | xfs_da_state_free(state); | 1299 | xfs_da_state_free(state); |
1300 | return(error); | 1300 | return error; |
1301 | } | 1301 | } |
1302 | 1302 | ||
1303 | /* | 1303 | /* |
@@ -1345,7 +1345,7 @@ xfs_attr_fillstate(xfs_da_state_t *state) | |||
1345 | } | 1345 | } |
1346 | } | 1346 | } |
1347 | 1347 | ||
1348 | return(0); | 1348 | return 0; |
1349 | } | 1349 | } |
1350 | 1350 | ||
1351 | /* | 1351 | /* |
@@ -1376,7 +1376,7 @@ xfs_attr_refillstate(xfs_da_state_t *state) | |||
1376 | blk->blkno, blk->disk_blkno, | 1376 | blk->blkno, blk->disk_blkno, |
1377 | &blk->bp, XFS_ATTR_FORK); | 1377 | &blk->bp, XFS_ATTR_FORK); |
1378 | if (error) | 1378 | if (error) |
1379 | return(error); | 1379 | return error; |
1380 | } else { | 1380 | } else { |
1381 | blk->bp = NULL; | 1381 | blk->bp = NULL; |
1382 | } | 1382 | } |
@@ -1395,13 +1395,13 @@ xfs_attr_refillstate(xfs_da_state_t *state) | |||
1395 | blk->blkno, blk->disk_blkno, | 1395 | blk->blkno, blk->disk_blkno, |
1396 | &blk->bp, XFS_ATTR_FORK); | 1396 | &blk->bp, XFS_ATTR_FORK); |
1397 | if (error) | 1397 | if (error) |
1398 | return(error); | 1398 | return error; |
1399 | } else { | 1399 | } else { |
1400 | blk->bp = NULL; | 1400 | blk->bp = NULL; |
1401 | } | 1401 | } |
1402 | } | 1402 | } |
1403 | 1403 | ||
1404 | return(0); | 1404 | return 0; |
1405 | } | 1405 | } |
1406 | 1406 | ||
1407 | /* | 1407 | /* |
@@ -1431,7 +1431,7 @@ xfs_attr_node_get(xfs_da_args_t *args) | |||
1431 | error = xfs_da3_node_lookup_int(state, &retval); | 1431 | error = xfs_da3_node_lookup_int(state, &retval); |
1432 | if (error) { | 1432 | if (error) { |
1433 | retval = error; | 1433 | retval = error; |
1434 | } else if (retval == EEXIST) { | 1434 | } else if (retval == -EEXIST) { |
1435 | blk = &state->path.blk[ state->path.active-1 ]; | 1435 | blk = &state->path.blk[ state->path.active-1 ]; |
1436 | ASSERT(blk->bp != NULL); | 1436 | ASSERT(blk->bp != NULL); |
1437 | ASSERT(blk->magic == XFS_ATTR_LEAF_MAGIC); | 1437 | ASSERT(blk->magic == XFS_ATTR_LEAF_MAGIC); |
@@ -1455,5 +1455,5 @@ xfs_attr_node_get(xfs_da_args_t *args) | |||
1455 | } | 1455 | } |
1456 | 1456 | ||
1457 | xfs_da_state_free(state); | 1457 | xfs_da_state_free(state); |
1458 | return(retval); | 1458 | return retval; |
1459 | } | 1459 | } |
diff --git a/fs/xfs/xfs_attr_leaf.c b/fs/xfs/libxfs/xfs_attr_leaf.c index 28712d29e43c..b1f73dbbf3d8 100644 --- a/fs/xfs/xfs_attr_leaf.c +++ b/fs/xfs/libxfs/xfs_attr_leaf.c | |||
@@ -214,7 +214,7 @@ xfs_attr3_leaf_write_verify( | |||
214 | struct xfs_attr3_leaf_hdr *hdr3 = bp->b_addr; | 214 | struct xfs_attr3_leaf_hdr *hdr3 = bp->b_addr; |
215 | 215 | ||
216 | if (!xfs_attr3_leaf_verify(bp)) { | 216 | if (!xfs_attr3_leaf_verify(bp)) { |
217 | xfs_buf_ioerror(bp, EFSCORRUPTED); | 217 | xfs_buf_ioerror(bp, -EFSCORRUPTED); |
218 | xfs_verifier_error(bp); | 218 | xfs_verifier_error(bp); |
219 | return; | 219 | return; |
220 | } | 220 | } |
@@ -242,9 +242,9 @@ xfs_attr3_leaf_read_verify( | |||
242 | 242 | ||
243 | if (xfs_sb_version_hascrc(&mp->m_sb) && | 243 | if (xfs_sb_version_hascrc(&mp->m_sb) && |
244 | !xfs_buf_verify_cksum(bp, XFS_ATTR3_LEAF_CRC_OFF)) | 244 | !xfs_buf_verify_cksum(bp, XFS_ATTR3_LEAF_CRC_OFF)) |
245 | xfs_buf_ioerror(bp, EFSBADCRC); | 245 | xfs_buf_ioerror(bp, -EFSBADCRC); |
246 | else if (!xfs_attr3_leaf_verify(bp)) | 246 | else if (!xfs_attr3_leaf_verify(bp)) |
247 | xfs_buf_ioerror(bp, EFSCORRUPTED); | 247 | xfs_buf_ioerror(bp, -EFSCORRUPTED); |
248 | 248 | ||
249 | if (bp->b_error) | 249 | if (bp->b_error) |
250 | xfs_verifier_error(bp); | 250 | xfs_verifier_error(bp); |
@@ -547,7 +547,7 @@ xfs_attr_shortform_remove(xfs_da_args_t *args) | |||
547 | break; | 547 | break; |
548 | } | 548 | } |
549 | if (i == end) | 549 | if (i == end) |
550 | return(XFS_ERROR(ENOATTR)); | 550 | return -ENOATTR; |
551 | 551 | ||
552 | /* | 552 | /* |
553 | * Fix up the attribute fork data, covering the hole | 553 | * Fix up the attribute fork data, covering the hole |
@@ -582,7 +582,7 @@ xfs_attr_shortform_remove(xfs_da_args_t *args) | |||
582 | 582 | ||
583 | xfs_sbversion_add_attr2(mp, args->trans); | 583 | xfs_sbversion_add_attr2(mp, args->trans); |
584 | 584 | ||
585 | return(0); | 585 | return 0; |
586 | } | 586 | } |
587 | 587 | ||
588 | /* | 588 | /* |
@@ -611,9 +611,9 @@ xfs_attr_shortform_lookup(xfs_da_args_t *args) | |||
611 | continue; | 611 | continue; |
612 | if (!xfs_attr_namesp_match(args->flags, sfe->flags)) | 612 | if (!xfs_attr_namesp_match(args->flags, sfe->flags)) |
613 | continue; | 613 | continue; |
614 | return(XFS_ERROR(EEXIST)); | 614 | return -EEXIST; |
615 | } | 615 | } |
616 | return(XFS_ERROR(ENOATTR)); | 616 | return -ENOATTR; |
617 | } | 617 | } |
618 | 618 | ||
619 | /* | 619 | /* |
@@ -640,18 +640,18 @@ xfs_attr_shortform_getvalue(xfs_da_args_t *args) | |||
640 | continue; | 640 | continue; |
641 | if (args->flags & ATTR_KERNOVAL) { | 641 | if (args->flags & ATTR_KERNOVAL) { |
642 | args->valuelen = sfe->valuelen; | 642 | args->valuelen = sfe->valuelen; |
643 | return(XFS_ERROR(EEXIST)); | 643 | return -EEXIST; |
644 | } | 644 | } |
645 | if (args->valuelen < sfe->valuelen) { | 645 | if (args->valuelen < sfe->valuelen) { |
646 | args->valuelen = sfe->valuelen; | 646 | args->valuelen = sfe->valuelen; |
647 | return(XFS_ERROR(ERANGE)); | 647 | return -ERANGE; |
648 | } | 648 | } |
649 | args->valuelen = sfe->valuelen; | 649 | args->valuelen = sfe->valuelen; |
650 | memcpy(args->value, &sfe->nameval[args->namelen], | 650 | memcpy(args->value, &sfe->nameval[args->namelen], |
651 | args->valuelen); | 651 | args->valuelen); |
652 | return(XFS_ERROR(EEXIST)); | 652 | return -EEXIST; |
653 | } | 653 | } |
654 | return(XFS_ERROR(ENOATTR)); | 654 | return -ENOATTR; |
655 | } | 655 | } |
656 | 656 | ||
657 | /* | 657 | /* |
@@ -691,7 +691,7 @@ xfs_attr_shortform_to_leaf(xfs_da_args_t *args) | |||
691 | * If we hit an IO error middle of the transaction inside | 691 | * If we hit an IO error middle of the transaction inside |
692 | * grow_inode(), we may have inconsistent data. Bail out. | 692 | * grow_inode(), we may have inconsistent data. Bail out. |
693 | */ | 693 | */ |
694 | if (error == EIO) | 694 | if (error == -EIO) |
695 | goto out; | 695 | goto out; |
696 | xfs_idata_realloc(dp, size, XFS_ATTR_FORK); /* try to put */ | 696 | xfs_idata_realloc(dp, size, XFS_ATTR_FORK); /* try to put */ |
697 | memcpy(ifp->if_u1.if_data, tmpbuffer, size); /* it back */ | 697 | memcpy(ifp->if_u1.if_data, tmpbuffer, size); /* it back */ |
@@ -730,9 +730,9 @@ xfs_attr_shortform_to_leaf(xfs_da_args_t *args) | |||
730 | sfe->namelen); | 730 | sfe->namelen); |
731 | nargs.flags = XFS_ATTR_NSP_ONDISK_TO_ARGS(sfe->flags); | 731 | nargs.flags = XFS_ATTR_NSP_ONDISK_TO_ARGS(sfe->flags); |
732 | error = xfs_attr3_leaf_lookup_int(bp, &nargs); /* set a->index */ | 732 | error = xfs_attr3_leaf_lookup_int(bp, &nargs); /* set a->index */ |
733 | ASSERT(error == ENOATTR); | 733 | ASSERT(error == -ENOATTR); |
734 | error = xfs_attr3_leaf_add(bp, &nargs); | 734 | error = xfs_attr3_leaf_add(bp, &nargs); |
735 | ASSERT(error != ENOSPC); | 735 | ASSERT(error != -ENOSPC); |
736 | if (error) | 736 | if (error) |
737 | goto out; | 737 | goto out; |
738 | sfe = XFS_ATTR_SF_NEXTENTRY(sfe); | 738 | sfe = XFS_ATTR_SF_NEXTENTRY(sfe); |
@@ -741,7 +741,7 @@ xfs_attr_shortform_to_leaf(xfs_da_args_t *args) | |||
741 | 741 | ||
742 | out: | 742 | out: |
743 | kmem_free(tmpbuffer); | 743 | kmem_free(tmpbuffer); |
744 | return(error); | 744 | return error; |
745 | } | 745 | } |
746 | 746 | ||
747 | /* | 747 | /* |
@@ -769,12 +769,12 @@ xfs_attr_shortform_allfit( | |||
769 | if (entry->flags & XFS_ATTR_INCOMPLETE) | 769 | if (entry->flags & XFS_ATTR_INCOMPLETE) |
770 | continue; /* don't copy partial entries */ | 770 | continue; /* don't copy partial entries */ |
771 | if (!(entry->flags & XFS_ATTR_LOCAL)) | 771 | if (!(entry->flags & XFS_ATTR_LOCAL)) |
772 | return(0); | 772 | return 0; |
773 | name_loc = xfs_attr3_leaf_name_local(leaf, i); | 773 | name_loc = xfs_attr3_leaf_name_local(leaf, i); |
774 | if (name_loc->namelen >= XFS_ATTR_SF_ENTSIZE_MAX) | 774 | if (name_loc->namelen >= XFS_ATTR_SF_ENTSIZE_MAX) |
775 | return(0); | 775 | return 0; |
776 | if (be16_to_cpu(name_loc->valuelen) >= XFS_ATTR_SF_ENTSIZE_MAX) | 776 | if (be16_to_cpu(name_loc->valuelen) >= XFS_ATTR_SF_ENTSIZE_MAX) |
777 | return(0); | 777 | return 0; |
778 | bytes += sizeof(struct xfs_attr_sf_entry) - 1 | 778 | bytes += sizeof(struct xfs_attr_sf_entry) - 1 |
779 | + name_loc->namelen | 779 | + name_loc->namelen |
780 | + be16_to_cpu(name_loc->valuelen); | 780 | + be16_to_cpu(name_loc->valuelen); |
@@ -809,7 +809,7 @@ xfs_attr3_leaf_to_shortform( | |||
809 | 809 | ||
810 | tmpbuffer = kmem_alloc(args->geo->blksize, KM_SLEEP); | 810 | tmpbuffer = kmem_alloc(args->geo->blksize, KM_SLEEP); |
811 | if (!tmpbuffer) | 811 | if (!tmpbuffer) |
812 | return ENOMEM; | 812 | return -ENOMEM; |
813 | 813 | ||
814 | memcpy(tmpbuffer, bp->b_addr, args->geo->blksize); | 814 | memcpy(tmpbuffer, bp->b_addr, args->geo->blksize); |
815 | 815 | ||
@@ -1017,10 +1017,10 @@ xfs_attr3_leaf_split( | |||
1017 | ASSERT(oldblk->magic == XFS_ATTR_LEAF_MAGIC); | 1017 | ASSERT(oldblk->magic == XFS_ATTR_LEAF_MAGIC); |
1018 | error = xfs_da_grow_inode(state->args, &blkno); | 1018 | error = xfs_da_grow_inode(state->args, &blkno); |
1019 | if (error) | 1019 | if (error) |
1020 | return(error); | 1020 | return error; |
1021 | error = xfs_attr3_leaf_create(state->args, blkno, &newblk->bp); | 1021 | error = xfs_attr3_leaf_create(state->args, blkno, &newblk->bp); |
1022 | if (error) | 1022 | if (error) |
1023 | return(error); | 1023 | return error; |
1024 | newblk->blkno = blkno; | 1024 | newblk->blkno = blkno; |
1025 | newblk->magic = XFS_ATTR_LEAF_MAGIC; | 1025 | newblk->magic = XFS_ATTR_LEAF_MAGIC; |
1026 | 1026 | ||
@@ -1031,7 +1031,7 @@ xfs_attr3_leaf_split( | |||
1031 | xfs_attr3_leaf_rebalance(state, oldblk, newblk); | 1031 | xfs_attr3_leaf_rebalance(state, oldblk, newblk); |
1032 | error = xfs_da3_blk_link(state, oldblk, newblk); | 1032 | error = xfs_da3_blk_link(state, oldblk, newblk); |
1033 | if (error) | 1033 | if (error) |
1034 | return(error); | 1034 | return error; |
1035 | 1035 | ||
1036 | /* | 1036 | /* |
1037 | * Save info on "old" attribute for "atomic rename" ops, leaf_add() | 1037 | * Save info on "old" attribute for "atomic rename" ops, leaf_add() |
@@ -1053,7 +1053,7 @@ xfs_attr3_leaf_split( | |||
1053 | */ | 1053 | */ |
1054 | oldblk->hashval = xfs_attr_leaf_lasthash(oldblk->bp, NULL); | 1054 | oldblk->hashval = xfs_attr_leaf_lasthash(oldblk->bp, NULL); |
1055 | newblk->hashval = xfs_attr_leaf_lasthash(newblk->bp, NULL); | 1055 | newblk->hashval = xfs_attr_leaf_lasthash(newblk->bp, NULL); |
1056 | return(error); | 1056 | return error; |
1057 | } | 1057 | } |
1058 | 1058 | ||
1059 | /* | 1059 | /* |
@@ -1108,7 +1108,7 @@ xfs_attr3_leaf_add( | |||
1108 | * no good and we should just give up. | 1108 | * no good and we should just give up. |
1109 | */ | 1109 | */ |
1110 | if (!ichdr.holes && sum < entsize) | 1110 | if (!ichdr.holes && sum < entsize) |
1111 | return XFS_ERROR(ENOSPC); | 1111 | return -ENOSPC; |
1112 | 1112 | ||
1113 | /* | 1113 | /* |
1114 | * Compact the entries to coalesce free space. | 1114 | * Compact the entries to coalesce free space. |
@@ -1121,7 +1121,7 @@ xfs_attr3_leaf_add( | |||
1121 | * free region, in freemap[0]. If it is not big enough, give up. | 1121 | * free region, in freemap[0]. If it is not big enough, give up. |
1122 | */ | 1122 | */ |
1123 | if (ichdr.freemap[0].size < (entsize + sizeof(xfs_attr_leaf_entry_t))) { | 1123 | if (ichdr.freemap[0].size < (entsize + sizeof(xfs_attr_leaf_entry_t))) { |
1124 | tmp = ENOSPC; | 1124 | tmp = -ENOSPC; |
1125 | goto out_log_hdr; | 1125 | goto out_log_hdr; |
1126 | } | 1126 | } |
1127 | 1127 | ||
@@ -1692,7 +1692,7 @@ xfs_attr3_leaf_toosmall( | |||
1692 | ichdr.usedbytes; | 1692 | ichdr.usedbytes; |
1693 | if (bytes > (state->args->geo->blksize >> 1)) { | 1693 | if (bytes > (state->args->geo->blksize >> 1)) { |
1694 | *action = 0; /* blk over 50%, don't try to join */ | 1694 | *action = 0; /* blk over 50%, don't try to join */ |
1695 | return(0); | 1695 | return 0; |
1696 | } | 1696 | } |
1697 | 1697 | ||
1698 | /* | 1698 | /* |
@@ -1711,7 +1711,7 @@ xfs_attr3_leaf_toosmall( | |||
1711 | error = xfs_da3_path_shift(state, &state->altpath, forward, | 1711 | error = xfs_da3_path_shift(state, &state->altpath, forward, |
1712 | 0, &retval); | 1712 | 0, &retval); |
1713 | if (error) | 1713 | if (error) |
1714 | return(error); | 1714 | return error; |
1715 | if (retval) { | 1715 | if (retval) { |
1716 | *action = 0; | 1716 | *action = 0; |
1717 | } else { | 1717 | } else { |
@@ -1740,7 +1740,7 @@ xfs_attr3_leaf_toosmall( | |||
1740 | error = xfs_attr3_leaf_read(state->args->trans, state->args->dp, | 1740 | error = xfs_attr3_leaf_read(state->args->trans, state->args->dp, |
1741 | blkno, -1, &bp); | 1741 | blkno, -1, &bp); |
1742 | if (error) | 1742 | if (error) |
1743 | return(error); | 1743 | return error; |
1744 | 1744 | ||
1745 | xfs_attr3_leaf_hdr_from_disk(&ichdr2, bp->b_addr); | 1745 | xfs_attr3_leaf_hdr_from_disk(&ichdr2, bp->b_addr); |
1746 | 1746 | ||
@@ -1757,7 +1757,7 @@ xfs_attr3_leaf_toosmall( | |||
1757 | } | 1757 | } |
1758 | if (i >= 2) { | 1758 | if (i >= 2) { |
1759 | *action = 0; | 1759 | *action = 0; |
1760 | return(0); | 1760 | return 0; |
1761 | } | 1761 | } |
1762 | 1762 | ||
1763 | /* | 1763 | /* |
@@ -1773,13 +1773,13 @@ xfs_attr3_leaf_toosmall( | |||
1773 | 0, &retval); | 1773 | 0, &retval); |
1774 | } | 1774 | } |
1775 | if (error) | 1775 | if (error) |
1776 | return(error); | 1776 | return error; |
1777 | if (retval) { | 1777 | if (retval) { |
1778 | *action = 0; | 1778 | *action = 0; |
1779 | } else { | 1779 | } else { |
1780 | *action = 1; | 1780 | *action = 1; |
1781 | } | 1781 | } |
1782 | return(0); | 1782 | return 0; |
1783 | } | 1783 | } |
1784 | 1784 | ||
1785 | /* | 1785 | /* |
@@ -2123,7 +2123,7 @@ xfs_attr3_leaf_lookup_int( | |||
2123 | } | 2123 | } |
2124 | if (probe == ichdr.count || be32_to_cpu(entry->hashval) != hashval) { | 2124 | if (probe == ichdr.count || be32_to_cpu(entry->hashval) != hashval) { |
2125 | args->index = probe; | 2125 | args->index = probe; |
2126 | return XFS_ERROR(ENOATTR); | 2126 | return -ENOATTR; |
2127 | } | 2127 | } |
2128 | 2128 | ||
2129 | /* | 2129 | /* |
@@ -2152,7 +2152,7 @@ xfs_attr3_leaf_lookup_int( | |||
2152 | if (!xfs_attr_namesp_match(args->flags, entry->flags)) | 2152 | if (!xfs_attr_namesp_match(args->flags, entry->flags)) |
2153 | continue; | 2153 | continue; |
2154 | args->index = probe; | 2154 | args->index = probe; |
2155 | return XFS_ERROR(EEXIST); | 2155 | return -EEXIST; |
2156 | } else { | 2156 | } else { |
2157 | name_rmt = xfs_attr3_leaf_name_remote(leaf, probe); | 2157 | name_rmt = xfs_attr3_leaf_name_remote(leaf, probe); |
2158 | if (name_rmt->namelen != args->namelen) | 2158 | if (name_rmt->namelen != args->namelen) |
@@ -2168,11 +2168,11 @@ xfs_attr3_leaf_lookup_int( | |||
2168 | args->rmtblkcnt = xfs_attr3_rmt_blocks( | 2168 | args->rmtblkcnt = xfs_attr3_rmt_blocks( |
2169 | args->dp->i_mount, | 2169 | args->dp->i_mount, |
2170 | args->rmtvaluelen); | 2170 | args->rmtvaluelen); |
2171 | return XFS_ERROR(EEXIST); | 2171 | return -EEXIST; |
2172 | } | 2172 | } |
2173 | } | 2173 | } |
2174 | args->index = probe; | 2174 | args->index = probe; |
2175 | return XFS_ERROR(ENOATTR); | 2175 | return -ENOATTR; |
2176 | } | 2176 | } |
2177 | 2177 | ||
2178 | /* | 2178 | /* |
@@ -2208,7 +2208,7 @@ xfs_attr3_leaf_getvalue( | |||
2208 | } | 2208 | } |
2209 | if (args->valuelen < valuelen) { | 2209 | if (args->valuelen < valuelen) { |
2210 | args->valuelen = valuelen; | 2210 | args->valuelen = valuelen; |
2211 | return XFS_ERROR(ERANGE); | 2211 | return -ERANGE; |
2212 | } | 2212 | } |
2213 | args->valuelen = valuelen; | 2213 | args->valuelen = valuelen; |
2214 | memcpy(args->value, &name_loc->nameval[args->namelen], valuelen); | 2214 | memcpy(args->value, &name_loc->nameval[args->namelen], valuelen); |
@@ -2226,7 +2226,7 @@ xfs_attr3_leaf_getvalue( | |||
2226 | } | 2226 | } |
2227 | if (args->valuelen < args->rmtvaluelen) { | 2227 | if (args->valuelen < args->rmtvaluelen) { |
2228 | args->valuelen = args->rmtvaluelen; | 2228 | args->valuelen = args->rmtvaluelen; |
2229 | return XFS_ERROR(ERANGE); | 2229 | return -ERANGE; |
2230 | } | 2230 | } |
2231 | args->valuelen = args->rmtvaluelen; | 2231 | args->valuelen = args->rmtvaluelen; |
2232 | } | 2232 | } |
@@ -2481,7 +2481,7 @@ xfs_attr3_leaf_clearflag( | |||
2481 | */ | 2481 | */ |
2482 | error = xfs_attr3_leaf_read(args->trans, args->dp, args->blkno, -1, &bp); | 2482 | error = xfs_attr3_leaf_read(args->trans, args->dp, args->blkno, -1, &bp); |
2483 | if (error) | 2483 | if (error) |
2484 | return(error); | 2484 | return error; |
2485 | 2485 | ||
2486 | leaf = bp->b_addr; | 2486 | leaf = bp->b_addr; |
2487 | entry = &xfs_attr3_leaf_entryp(leaf)[args->index]; | 2487 | entry = &xfs_attr3_leaf_entryp(leaf)[args->index]; |
@@ -2548,7 +2548,7 @@ xfs_attr3_leaf_setflag( | |||
2548 | */ | 2548 | */ |
2549 | error = xfs_attr3_leaf_read(args->trans, args->dp, args->blkno, -1, &bp); | 2549 | error = xfs_attr3_leaf_read(args->trans, args->dp, args->blkno, -1, &bp); |
2550 | if (error) | 2550 | if (error) |
2551 | return(error); | 2551 | return error; |
2552 | 2552 | ||
2553 | leaf = bp->b_addr; | 2553 | leaf = bp->b_addr; |
2554 | #ifdef DEBUG | 2554 | #ifdef DEBUG |
diff --git a/fs/xfs/xfs_attr_leaf.h b/fs/xfs/libxfs/xfs_attr_leaf.h index e2929da7c3ba..e2929da7c3ba 100644 --- a/fs/xfs/xfs_attr_leaf.h +++ b/fs/xfs/libxfs/xfs_attr_leaf.h | |||
diff --git a/fs/xfs/xfs_attr_remote.c b/fs/xfs/libxfs/xfs_attr_remote.c index b5adfecbb8ee..7510ab8058a4 100644 --- a/fs/xfs/xfs_attr_remote.c +++ b/fs/xfs/libxfs/xfs_attr_remote.c | |||
@@ -138,11 +138,11 @@ xfs_attr3_rmt_read_verify( | |||
138 | 138 | ||
139 | while (len > 0) { | 139 | while (len > 0) { |
140 | if (!xfs_verify_cksum(ptr, blksize, XFS_ATTR3_RMT_CRC_OFF)) { | 140 | if (!xfs_verify_cksum(ptr, blksize, XFS_ATTR3_RMT_CRC_OFF)) { |
141 | xfs_buf_ioerror(bp, EFSBADCRC); | 141 | xfs_buf_ioerror(bp, -EFSBADCRC); |
142 | break; | 142 | break; |
143 | } | 143 | } |
144 | if (!xfs_attr3_rmt_verify(mp, ptr, blksize, bno)) { | 144 | if (!xfs_attr3_rmt_verify(mp, ptr, blksize, bno)) { |
145 | xfs_buf_ioerror(bp, EFSCORRUPTED); | 145 | xfs_buf_ioerror(bp, -EFSCORRUPTED); |
146 | break; | 146 | break; |
147 | } | 147 | } |
148 | len -= blksize; | 148 | len -= blksize; |
@@ -178,7 +178,7 @@ xfs_attr3_rmt_write_verify( | |||
178 | 178 | ||
179 | while (len > 0) { | 179 | while (len > 0) { |
180 | if (!xfs_attr3_rmt_verify(mp, ptr, blksize, bno)) { | 180 | if (!xfs_attr3_rmt_verify(mp, ptr, blksize, bno)) { |
181 | xfs_buf_ioerror(bp, EFSCORRUPTED); | 181 | xfs_buf_ioerror(bp, -EFSCORRUPTED); |
182 | xfs_verifier_error(bp); | 182 | xfs_verifier_error(bp); |
183 | return; | 183 | return; |
184 | } | 184 | } |
@@ -257,7 +257,7 @@ xfs_attr_rmtval_copyout( | |||
257 | xfs_alert(mp, | 257 | xfs_alert(mp, |
258 | "remote attribute header mismatch bno/off/len/owner (0x%llx/0x%x/Ox%x/0x%llx)", | 258 | "remote attribute header mismatch bno/off/len/owner (0x%llx/0x%x/Ox%x/0x%llx)", |
259 | bno, *offset, byte_cnt, ino); | 259 | bno, *offset, byte_cnt, ino); |
260 | return EFSCORRUPTED; | 260 | return -EFSCORRUPTED; |
261 | } | 261 | } |
262 | hdr_size = sizeof(struct xfs_attr3_rmt_hdr); | 262 | hdr_size = sizeof(struct xfs_attr3_rmt_hdr); |
263 | } | 263 | } |
@@ -452,7 +452,7 @@ xfs_attr_rmtval_set( | |||
452 | ASSERT(committed); | 452 | ASSERT(committed); |
453 | args->trans = NULL; | 453 | args->trans = NULL; |
454 | xfs_bmap_cancel(args->flist); | 454 | xfs_bmap_cancel(args->flist); |
455 | return(error); | 455 | return error; |
456 | } | 456 | } |
457 | 457 | ||
458 | /* | 458 | /* |
@@ -473,7 +473,7 @@ xfs_attr_rmtval_set( | |||
473 | */ | 473 | */ |
474 | error = xfs_trans_roll(&args->trans, dp); | 474 | error = xfs_trans_roll(&args->trans, dp); |
475 | if (error) | 475 | if (error) |
476 | return (error); | 476 | return error; |
477 | } | 477 | } |
478 | 478 | ||
479 | /* | 479 | /* |
@@ -498,7 +498,7 @@ xfs_attr_rmtval_set( | |||
498 | blkcnt, &map, &nmap, | 498 | blkcnt, &map, &nmap, |
499 | XFS_BMAPI_ATTRFORK); | 499 | XFS_BMAPI_ATTRFORK); |
500 | if (error) | 500 | if (error) |
501 | return(error); | 501 | return error; |
502 | ASSERT(nmap == 1); | 502 | ASSERT(nmap == 1); |
503 | ASSERT((map.br_startblock != DELAYSTARTBLOCK) && | 503 | ASSERT((map.br_startblock != DELAYSTARTBLOCK) && |
504 | (map.br_startblock != HOLESTARTBLOCK)); | 504 | (map.br_startblock != HOLESTARTBLOCK)); |
@@ -508,7 +508,7 @@ xfs_attr_rmtval_set( | |||
508 | 508 | ||
509 | bp = xfs_buf_get(mp->m_ddev_targp, dblkno, dblkcnt, 0); | 509 | bp = xfs_buf_get(mp->m_ddev_targp, dblkno, dblkcnt, 0); |
510 | if (!bp) | 510 | if (!bp) |
511 | return ENOMEM; | 511 | return -ENOMEM; |
512 | bp->b_ops = &xfs_attr3_rmt_buf_ops; | 512 | bp->b_ops = &xfs_attr3_rmt_buf_ops; |
513 | 513 | ||
514 | xfs_attr_rmtval_copyin(mp, bp, args->dp->i_ino, &offset, | 514 | xfs_attr_rmtval_copyin(mp, bp, args->dp->i_ino, &offset, |
@@ -563,7 +563,7 @@ xfs_attr_rmtval_remove( | |||
563 | error = xfs_bmapi_read(args->dp, (xfs_fileoff_t)lblkno, | 563 | error = xfs_bmapi_read(args->dp, (xfs_fileoff_t)lblkno, |
564 | blkcnt, &map, &nmap, XFS_BMAPI_ATTRFORK); | 564 | blkcnt, &map, &nmap, XFS_BMAPI_ATTRFORK); |
565 | if (error) | 565 | if (error) |
566 | return(error); | 566 | return error; |
567 | ASSERT(nmap == 1); | 567 | ASSERT(nmap == 1); |
568 | ASSERT((map.br_startblock != DELAYSTARTBLOCK) && | 568 | ASSERT((map.br_startblock != DELAYSTARTBLOCK) && |
569 | (map.br_startblock != HOLESTARTBLOCK)); | 569 | (map.br_startblock != HOLESTARTBLOCK)); |
@@ -622,7 +622,7 @@ xfs_attr_rmtval_remove( | |||
622 | */ | 622 | */ |
623 | error = xfs_trans_roll(&args->trans, args->dp); | 623 | error = xfs_trans_roll(&args->trans, args->dp); |
624 | if (error) | 624 | if (error) |
625 | return (error); | 625 | return error; |
626 | } | 626 | } |
627 | return(0); | 627 | return 0; |
628 | } | 628 | } |
diff --git a/fs/xfs/xfs_attr_remote.h b/fs/xfs/libxfs/xfs_attr_remote.h index 5a9acfa156d7..5a9acfa156d7 100644 --- a/fs/xfs/xfs_attr_remote.h +++ b/fs/xfs/libxfs/xfs_attr_remote.h | |||
diff --git a/fs/xfs/xfs_attr_sf.h b/fs/xfs/libxfs/xfs_attr_sf.h index 919756e3ba53..919756e3ba53 100644 --- a/fs/xfs/xfs_attr_sf.h +++ b/fs/xfs/libxfs/xfs_attr_sf.h | |||
diff --git a/fs/xfs/xfs_bit.h b/fs/xfs/libxfs/xfs_bit.h index e1649c0d3e02..e1649c0d3e02 100644 --- a/fs/xfs/xfs_bit.h +++ b/fs/xfs/libxfs/xfs_bit.h | |||
diff --git a/fs/xfs/xfs_bmap.c b/fs/xfs/libxfs/xfs_bmap.c index 75c3fe5f3d9d..de2d26d32844 100644 --- a/fs/xfs/xfs_bmap.c +++ b/fs/xfs/libxfs/xfs_bmap.c | |||
@@ -392,7 +392,7 @@ xfs_bmap_check_leaf_extents( | |||
392 | pp = XFS_BMAP_BROOT_PTR_ADDR(mp, block, 1, ifp->if_broot_bytes); | 392 | pp = XFS_BMAP_BROOT_PTR_ADDR(mp, block, 1, ifp->if_broot_bytes); |
393 | bno = be64_to_cpu(*pp); | 393 | bno = be64_to_cpu(*pp); |
394 | 394 | ||
395 | ASSERT(bno != NULLDFSBNO); | 395 | ASSERT(bno != NULLFSBLOCK); |
396 | ASSERT(XFS_FSB_TO_AGNO(mp, bno) < mp->m_sb.sb_agcount); | 396 | ASSERT(XFS_FSB_TO_AGNO(mp, bno) < mp->m_sb.sb_agcount); |
397 | ASSERT(XFS_FSB_TO_AGBNO(mp, bno) < mp->m_sb.sb_agblocks); | 397 | ASSERT(XFS_FSB_TO_AGBNO(mp, bno) < mp->m_sb.sb_agblocks); |
398 | 398 | ||
@@ -1033,7 +1033,7 @@ xfs_bmap_add_attrfork_btree( | |||
1033 | goto error0; | 1033 | goto error0; |
1034 | if (stat == 0) { | 1034 | if (stat == 0) { |
1035 | xfs_btree_del_cursor(cur, XFS_BTREE_NOERROR); | 1035 | xfs_btree_del_cursor(cur, XFS_BTREE_NOERROR); |
1036 | return XFS_ERROR(ENOSPC); | 1036 | return -ENOSPC; |
1037 | } | 1037 | } |
1038 | *firstblock = cur->bc_private.b.firstblock; | 1038 | *firstblock = cur->bc_private.b.firstblock; |
1039 | cur->bc_private.b.allocated = 0; | 1039 | cur->bc_private.b.allocated = 0; |
@@ -1115,7 +1115,7 @@ xfs_bmap_add_attrfork_local( | |||
1115 | 1115 | ||
1116 | /* should only be called for types that support local format data */ | 1116 | /* should only be called for types that support local format data */ |
1117 | ASSERT(0); | 1117 | ASSERT(0); |
1118 | return EFSCORRUPTED; | 1118 | return -EFSCORRUPTED; |
1119 | } | 1119 | } |
1120 | 1120 | ||
1121 | /* | 1121 | /* |
@@ -1192,7 +1192,7 @@ xfs_bmap_add_attrfork( | |||
1192 | break; | 1192 | break; |
1193 | default: | 1193 | default: |
1194 | ASSERT(0); | 1194 | ASSERT(0); |
1195 | error = XFS_ERROR(EINVAL); | 1195 | error = -EINVAL; |
1196 | goto trans_cancel; | 1196 | goto trans_cancel; |
1197 | } | 1197 | } |
1198 | 1198 | ||
@@ -1299,7 +1299,7 @@ xfs_bmap_read_extents( | |||
1299 | ASSERT(level > 0); | 1299 | ASSERT(level > 0); |
1300 | pp = XFS_BMAP_BROOT_PTR_ADDR(mp, block, 1, ifp->if_broot_bytes); | 1300 | pp = XFS_BMAP_BROOT_PTR_ADDR(mp, block, 1, ifp->if_broot_bytes); |
1301 | bno = be64_to_cpu(*pp); | 1301 | bno = be64_to_cpu(*pp); |
1302 | ASSERT(bno != NULLDFSBNO); | 1302 | ASSERT(bno != NULLFSBLOCK); |
1303 | ASSERT(XFS_FSB_TO_AGNO(mp, bno) < mp->m_sb.sb_agcount); | 1303 | ASSERT(XFS_FSB_TO_AGNO(mp, bno) < mp->m_sb.sb_agcount); |
1304 | ASSERT(XFS_FSB_TO_AGBNO(mp, bno) < mp->m_sb.sb_agblocks); | 1304 | ASSERT(XFS_FSB_TO_AGBNO(mp, bno) < mp->m_sb.sb_agblocks); |
1305 | /* | 1305 | /* |
@@ -1399,7 +1399,7 @@ xfs_bmap_read_extents( | |||
1399 | return 0; | 1399 | return 0; |
1400 | error0: | 1400 | error0: |
1401 | xfs_trans_brelse(tp, bp); | 1401 | xfs_trans_brelse(tp, bp); |
1402 | return XFS_ERROR(EFSCORRUPTED); | 1402 | return -EFSCORRUPTED; |
1403 | } | 1403 | } |
1404 | 1404 | ||
1405 | 1405 | ||
@@ -1429,11 +1429,7 @@ xfs_bmap_search_multi_extents( | |||
1429 | gotp->br_startoff = 0xffa5a5a5a5a5a5a5LL; | 1429 | gotp->br_startoff = 0xffa5a5a5a5a5a5a5LL; |
1430 | gotp->br_blockcount = 0xa55a5a5a5a5a5a5aLL; | 1430 | gotp->br_blockcount = 0xa55a5a5a5a5a5a5aLL; |
1431 | gotp->br_state = XFS_EXT_INVALID; | 1431 | gotp->br_state = XFS_EXT_INVALID; |
1432 | #if XFS_BIG_BLKNOS | ||
1433 | gotp->br_startblock = 0xffffa5a5a5a5a5a5LL; | 1432 | gotp->br_startblock = 0xffffa5a5a5a5a5a5LL; |
1434 | #else | ||
1435 | gotp->br_startblock = 0xffffa5a5; | ||
1436 | #endif | ||
1437 | prevp->br_startoff = NULLFILEOFF; | 1433 | prevp->br_startoff = NULLFILEOFF; |
1438 | 1434 | ||
1439 | ep = xfs_iext_bno_to_ext(ifp, bno, &lastx); | 1435 | ep = xfs_iext_bno_to_ext(ifp, bno, &lastx); |
@@ -1576,7 +1572,7 @@ xfs_bmap_last_before( | |||
1576 | if (XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE && | 1572 | if (XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE && |
1577 | XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_EXTENTS && | 1573 | XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_EXTENTS && |
1578 | XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_LOCAL) | 1574 | XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_LOCAL) |
1579 | return XFS_ERROR(EIO); | 1575 | return -EIO; |
1580 | if (XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_LOCAL) { | 1576 | if (XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_LOCAL) { |
1581 | *last_block = 0; | 1577 | *last_block = 0; |
1582 | return 0; | 1578 | return 0; |
@@ -1690,7 +1686,7 @@ xfs_bmap_last_offset( | |||
1690 | 1686 | ||
1691 | if (XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE && | 1687 | if (XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE && |
1692 | XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_EXTENTS) | 1688 | XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_EXTENTS) |
1693 | return XFS_ERROR(EIO); | 1689 | return -EIO; |
1694 | 1690 | ||
1695 | error = xfs_bmap_last_extent(NULL, ip, whichfork, &rec, &is_empty); | 1691 | error = xfs_bmap_last_extent(NULL, ip, whichfork, &rec, &is_empty); |
1696 | if (error || is_empty) | 1692 | if (error || is_empty) |
@@ -3323,7 +3319,7 @@ xfs_bmap_extsize_align( | |||
3323 | if (orig_off < align_off || | 3319 | if (orig_off < align_off || |
3324 | orig_end > align_off + align_alen || | 3320 | orig_end > align_off + align_alen || |
3325 | align_alen - temp < orig_alen) | 3321 | align_alen - temp < orig_alen) |
3326 | return XFS_ERROR(EINVAL); | 3322 | return -EINVAL; |
3327 | /* | 3323 | /* |
3328 | * Try to fix it by moving the start up. | 3324 | * Try to fix it by moving the start up. |
3329 | */ | 3325 | */ |
@@ -3348,7 +3344,7 @@ xfs_bmap_extsize_align( | |||
3348 | * Result doesn't cover the request, fail it. | 3344 | * Result doesn't cover the request, fail it. |
3349 | */ | 3345 | */ |
3350 | if (orig_off < align_off || orig_end > align_off + align_alen) | 3346 | if (orig_off < align_off || orig_end > align_off + align_alen) |
3351 | return XFS_ERROR(EINVAL); | 3347 | return -EINVAL; |
3352 | } else { | 3348 | } else { |
3353 | ASSERT(orig_off >= align_off); | 3349 | ASSERT(orig_off >= align_off); |
3354 | ASSERT(orig_end <= align_off + align_alen); | 3350 | ASSERT(orig_end <= align_off + align_alen); |
@@ -4051,11 +4047,11 @@ xfs_bmapi_read( | |||
4051 | XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE), | 4047 | XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE), |
4052 | mp, XFS_ERRTAG_BMAPIFORMAT, XFS_RANDOM_BMAPIFORMAT))) { | 4048 | mp, XFS_ERRTAG_BMAPIFORMAT, XFS_RANDOM_BMAPIFORMAT))) { |
4053 | XFS_ERROR_REPORT("xfs_bmapi_read", XFS_ERRLEVEL_LOW, mp); | 4049 | XFS_ERROR_REPORT("xfs_bmapi_read", XFS_ERRLEVEL_LOW, mp); |
4054 | return XFS_ERROR(EFSCORRUPTED); | 4050 | return -EFSCORRUPTED; |
4055 | } | 4051 | } |
4056 | 4052 | ||
4057 | if (XFS_FORCED_SHUTDOWN(mp)) | 4053 | if (XFS_FORCED_SHUTDOWN(mp)) |
4058 | return XFS_ERROR(EIO); | 4054 | return -EIO; |
4059 | 4055 | ||
4060 | XFS_STATS_INC(xs_blk_mapr); | 4056 | XFS_STATS_INC(xs_blk_mapr); |
4061 | 4057 | ||
@@ -4246,11 +4242,11 @@ xfs_bmapi_delay( | |||
4246 | XFS_IFORK_FORMAT(ip, XFS_DATA_FORK) != XFS_DINODE_FMT_BTREE), | 4242 | XFS_IFORK_FORMAT(ip, XFS_DATA_FORK) != XFS_DINODE_FMT_BTREE), |
4247 | mp, XFS_ERRTAG_BMAPIFORMAT, XFS_RANDOM_BMAPIFORMAT))) { | 4243 | mp, XFS_ERRTAG_BMAPIFORMAT, XFS_RANDOM_BMAPIFORMAT))) { |
4248 | XFS_ERROR_REPORT("xfs_bmapi_delay", XFS_ERRLEVEL_LOW, mp); | 4244 | XFS_ERROR_REPORT("xfs_bmapi_delay", XFS_ERRLEVEL_LOW, mp); |
4249 | return XFS_ERROR(EFSCORRUPTED); | 4245 | return -EFSCORRUPTED; |
4250 | } | 4246 | } |
4251 | 4247 | ||
4252 | if (XFS_FORCED_SHUTDOWN(mp)) | 4248 | if (XFS_FORCED_SHUTDOWN(mp)) |
4253 | return XFS_ERROR(EIO); | 4249 | return -EIO; |
4254 | 4250 | ||
4255 | XFS_STATS_INC(xs_blk_mapw); | 4251 | XFS_STATS_INC(xs_blk_mapw); |
4256 | 4252 | ||
@@ -4469,7 +4465,7 @@ xfs_bmapi_convert_unwritten( | |||
4469 | * so generate another request. | 4465 | * so generate another request. |
4470 | */ | 4466 | */ |
4471 | if (mval->br_blockcount < len) | 4467 | if (mval->br_blockcount < len) |
4472 | return EAGAIN; | 4468 | return -EAGAIN; |
4473 | return 0; | 4469 | return 0; |
4474 | } | 4470 | } |
4475 | 4471 | ||
@@ -4540,11 +4536,11 @@ xfs_bmapi_write( | |||
4540 | XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE), | 4536 | XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE), |
4541 | mp, XFS_ERRTAG_BMAPIFORMAT, XFS_RANDOM_BMAPIFORMAT))) { | 4537 | mp, XFS_ERRTAG_BMAPIFORMAT, XFS_RANDOM_BMAPIFORMAT))) { |
4542 | XFS_ERROR_REPORT("xfs_bmapi_write", XFS_ERRLEVEL_LOW, mp); | 4538 | XFS_ERROR_REPORT("xfs_bmapi_write", XFS_ERRLEVEL_LOW, mp); |
4543 | return XFS_ERROR(EFSCORRUPTED); | 4539 | return -EFSCORRUPTED; |
4544 | } | 4540 | } |
4545 | 4541 | ||
4546 | if (XFS_FORCED_SHUTDOWN(mp)) | 4542 | if (XFS_FORCED_SHUTDOWN(mp)) |
4547 | return XFS_ERROR(EIO); | 4543 | return -EIO; |
4548 | 4544 | ||
4549 | ifp = XFS_IFORK_PTR(ip, whichfork); | 4545 | ifp = XFS_IFORK_PTR(ip, whichfork); |
4550 | 4546 | ||
@@ -4620,7 +4616,7 @@ xfs_bmapi_write( | |||
4620 | 4616 | ||
4621 | /* Execute unwritten extent conversion if necessary */ | 4617 | /* Execute unwritten extent conversion if necessary */ |
4622 | error = xfs_bmapi_convert_unwritten(&bma, mval, len, flags); | 4618 | error = xfs_bmapi_convert_unwritten(&bma, mval, len, flags); |
4623 | if (error == EAGAIN) | 4619 | if (error == -EAGAIN) |
4624 | continue; | 4620 | continue; |
4625 | if (error) | 4621 | if (error) |
4626 | goto error0; | 4622 | goto error0; |
@@ -4922,7 +4918,7 @@ xfs_bmap_del_extent( | |||
4922 | goto done; | 4918 | goto done; |
4923 | cur->bc_rec.b = new; | 4919 | cur->bc_rec.b = new; |
4924 | error = xfs_btree_insert(cur, &i); | 4920 | error = xfs_btree_insert(cur, &i); |
4925 | if (error && error != ENOSPC) | 4921 | if (error && error != -ENOSPC) |
4926 | goto done; | 4922 | goto done; |
4927 | /* | 4923 | /* |
4928 | * If get no-space back from btree insert, | 4924 | * If get no-space back from btree insert, |
@@ -4930,7 +4926,7 @@ xfs_bmap_del_extent( | |||
4930 | * block reservation. | 4926 | * block reservation. |
4931 | * Fix up our state and return the error. | 4927 | * Fix up our state and return the error. |
4932 | */ | 4928 | */ |
4933 | if (error == ENOSPC) { | 4929 | if (error == -ENOSPC) { |
4934 | /* | 4930 | /* |
4935 | * Reset the cursor, don't trust | 4931 | * Reset the cursor, don't trust |
4936 | * it after any insert operation. | 4932 | * it after any insert operation. |
@@ -4958,7 +4954,7 @@ xfs_bmap_del_extent( | |||
4958 | xfs_bmbt_set_blockcount(ep, | 4954 | xfs_bmbt_set_blockcount(ep, |
4959 | got.br_blockcount); | 4955 | got.br_blockcount); |
4960 | flags = 0; | 4956 | flags = 0; |
4961 | error = XFS_ERROR(ENOSPC); | 4957 | error = -ENOSPC; |
4962 | goto done; | 4958 | goto done; |
4963 | } | 4959 | } |
4964 | XFS_WANT_CORRUPTED_GOTO(i == 1, done); | 4960 | XFS_WANT_CORRUPTED_GOTO(i == 1, done); |
@@ -5076,11 +5072,11 @@ xfs_bunmapi( | |||
5076 | XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE)) { | 5072 | XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE)) { |
5077 | XFS_ERROR_REPORT("xfs_bunmapi", XFS_ERRLEVEL_LOW, | 5073 | XFS_ERROR_REPORT("xfs_bunmapi", XFS_ERRLEVEL_LOW, |
5078 | ip->i_mount); | 5074 | ip->i_mount); |
5079 | return XFS_ERROR(EFSCORRUPTED); | 5075 | return -EFSCORRUPTED; |
5080 | } | 5076 | } |
5081 | mp = ip->i_mount; | 5077 | mp = ip->i_mount; |
5082 | if (XFS_FORCED_SHUTDOWN(mp)) | 5078 | if (XFS_FORCED_SHUTDOWN(mp)) |
5083 | return XFS_ERROR(EIO); | 5079 | return -EIO; |
5084 | 5080 | ||
5085 | ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL)); | 5081 | ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL)); |
5086 | ASSERT(len > 0); | 5082 | ASSERT(len > 0); |
@@ -5325,7 +5321,7 @@ xfs_bunmapi( | |||
5325 | del.br_startoff > got.br_startoff && | 5321 | del.br_startoff > got.br_startoff && |
5326 | del.br_startoff + del.br_blockcount < | 5322 | del.br_startoff + del.br_blockcount < |
5327 | got.br_startoff + got.br_blockcount) { | 5323 | got.br_startoff + got.br_blockcount) { |
5328 | error = XFS_ERROR(ENOSPC); | 5324 | error = -ENOSPC; |
5329 | goto error0; | 5325 | goto error0; |
5330 | } | 5326 | } |
5331 | error = xfs_bmap_del_extent(ip, tp, &lastx, flist, cur, &del, | 5327 | error = xfs_bmap_del_extent(ip, tp, &lastx, flist, cur, &del, |
@@ -5449,11 +5445,11 @@ xfs_bmap_shift_extents( | |||
5449 | mp, XFS_ERRTAG_BMAPIFORMAT, XFS_RANDOM_BMAPIFORMAT))) { | 5445 | mp, XFS_ERRTAG_BMAPIFORMAT, XFS_RANDOM_BMAPIFORMAT))) { |
5450 | XFS_ERROR_REPORT("xfs_bmap_shift_extents", | 5446 | XFS_ERROR_REPORT("xfs_bmap_shift_extents", |
5451 | XFS_ERRLEVEL_LOW, mp); | 5447 | XFS_ERRLEVEL_LOW, mp); |
5452 | return XFS_ERROR(EFSCORRUPTED); | 5448 | return -EFSCORRUPTED; |
5453 | } | 5449 | } |
5454 | 5450 | ||
5455 | if (XFS_FORCED_SHUTDOWN(mp)) | 5451 | if (XFS_FORCED_SHUTDOWN(mp)) |
5456 | return XFS_ERROR(EIO); | 5452 | return -EIO; |
5457 | 5453 | ||
5458 | ASSERT(current_ext != NULL); | 5454 | ASSERT(current_ext != NULL); |
5459 | 5455 | ||
@@ -5516,14 +5512,14 @@ xfs_bmap_shift_extents( | |||
5516 | *current_ext - 1), &left); | 5512 | *current_ext - 1), &left); |
5517 | 5513 | ||
5518 | if (startoff < left.br_startoff + left.br_blockcount) | 5514 | if (startoff < left.br_startoff + left.br_blockcount) |
5519 | error = XFS_ERROR(EINVAL); | 5515 | error = -EINVAL; |
5520 | } else if (offset_shift_fsb > got.br_startoff) { | 5516 | } else if (offset_shift_fsb > got.br_startoff) { |
5521 | /* | 5517 | /* |
5522 | * When first extent is shifted, offset_shift_fsb | 5518 | * When first extent is shifted, offset_shift_fsb |
5523 | * should be less than the stating offset of | 5519 | * should be less than the stating offset of |
5524 | * the first extent. | 5520 | * the first extent. |
5525 | */ | 5521 | */ |
5526 | error = XFS_ERROR(EINVAL); | 5522 | error = -EINVAL; |
5527 | } | 5523 | } |
5528 | 5524 | ||
5529 | if (error) | 5525 | if (error) |
diff --git a/fs/xfs/xfs_bmap.h b/fs/xfs/libxfs/xfs_bmap.h index b879ca56a64c..b879ca56a64c 100644 --- a/fs/xfs/xfs_bmap.h +++ b/fs/xfs/libxfs/xfs_bmap.h | |||
diff --git a/fs/xfs/xfs_bmap_btree.c b/fs/xfs/libxfs/xfs_bmap_btree.c index 948836c4fd90..fba753308f31 100644 --- a/fs/xfs/xfs_bmap_btree.c +++ b/fs/xfs/libxfs/xfs_bmap_btree.c | |||
@@ -111,23 +111,8 @@ __xfs_bmbt_get_all( | |||
111 | ext_flag = (int)(l0 >> (64 - BMBT_EXNTFLAG_BITLEN)); | 111 | ext_flag = (int)(l0 >> (64 - BMBT_EXNTFLAG_BITLEN)); |
112 | s->br_startoff = ((xfs_fileoff_t)l0 & | 112 | s->br_startoff = ((xfs_fileoff_t)l0 & |
113 | xfs_mask64lo(64 - BMBT_EXNTFLAG_BITLEN)) >> 9; | 113 | xfs_mask64lo(64 - BMBT_EXNTFLAG_BITLEN)) >> 9; |
114 | #if XFS_BIG_BLKNOS | ||
115 | s->br_startblock = (((xfs_fsblock_t)l0 & xfs_mask64lo(9)) << 43) | | 114 | s->br_startblock = (((xfs_fsblock_t)l0 & xfs_mask64lo(9)) << 43) | |
116 | (((xfs_fsblock_t)l1) >> 21); | 115 | (((xfs_fsblock_t)l1) >> 21); |
117 | #else | ||
118 | #ifdef DEBUG | ||
119 | { | ||
120 | xfs_dfsbno_t b; | ||
121 | |||
122 | b = (((xfs_dfsbno_t)l0 & xfs_mask64lo(9)) << 43) | | ||
123 | (((xfs_dfsbno_t)l1) >> 21); | ||
124 | ASSERT((b >> 32) == 0 || isnulldstartblock(b)); | ||
125 | s->br_startblock = (xfs_fsblock_t)b; | ||
126 | } | ||
127 | #else /* !DEBUG */ | ||
128 | s->br_startblock = (xfs_fsblock_t)(((xfs_dfsbno_t)l1) >> 21); | ||
129 | #endif /* DEBUG */ | ||
130 | #endif /* XFS_BIG_BLKNOS */ | ||
131 | s->br_blockcount = (xfs_filblks_t)(l1 & xfs_mask64lo(21)); | 116 | s->br_blockcount = (xfs_filblks_t)(l1 & xfs_mask64lo(21)); |
132 | /* This is xfs_extent_state() in-line */ | 117 | /* This is xfs_extent_state() in-line */ |
133 | if (ext_flag) { | 118 | if (ext_flag) { |
@@ -163,21 +148,8 @@ xfs_fsblock_t | |||
163 | xfs_bmbt_get_startblock( | 148 | xfs_bmbt_get_startblock( |
164 | xfs_bmbt_rec_host_t *r) | 149 | xfs_bmbt_rec_host_t *r) |
165 | { | 150 | { |
166 | #if XFS_BIG_BLKNOS | ||
167 | return (((xfs_fsblock_t)r->l0 & xfs_mask64lo(9)) << 43) | | 151 | return (((xfs_fsblock_t)r->l0 & xfs_mask64lo(9)) << 43) | |
168 | (((xfs_fsblock_t)r->l1) >> 21); | 152 | (((xfs_fsblock_t)r->l1) >> 21); |
169 | #else | ||
170 | #ifdef DEBUG | ||
171 | xfs_dfsbno_t b; | ||
172 | |||
173 | b = (((xfs_dfsbno_t)r->l0 & xfs_mask64lo(9)) << 43) | | ||
174 | (((xfs_dfsbno_t)r->l1) >> 21); | ||
175 | ASSERT((b >> 32) == 0 || isnulldstartblock(b)); | ||
176 | return (xfs_fsblock_t)b; | ||
177 | #else /* !DEBUG */ | ||
178 | return (xfs_fsblock_t)(((xfs_dfsbno_t)r->l1) >> 21); | ||
179 | #endif /* DEBUG */ | ||
180 | #endif /* XFS_BIG_BLKNOS */ | ||
181 | } | 153 | } |
182 | 154 | ||
183 | /* | 155 | /* |
@@ -241,7 +213,6 @@ xfs_bmbt_set_allf( | |||
241 | ASSERT((startoff & xfs_mask64hi(64-BMBT_STARTOFF_BITLEN)) == 0); | 213 | ASSERT((startoff & xfs_mask64hi(64-BMBT_STARTOFF_BITLEN)) == 0); |
242 | ASSERT((blockcount & xfs_mask64hi(64-BMBT_BLOCKCOUNT_BITLEN)) == 0); | 214 | ASSERT((blockcount & xfs_mask64hi(64-BMBT_BLOCKCOUNT_BITLEN)) == 0); |
243 | 215 | ||
244 | #if XFS_BIG_BLKNOS | ||
245 | ASSERT((startblock & xfs_mask64hi(64-BMBT_STARTBLOCK_BITLEN)) == 0); | 216 | ASSERT((startblock & xfs_mask64hi(64-BMBT_STARTBLOCK_BITLEN)) == 0); |
246 | 217 | ||
247 | r->l0 = ((xfs_bmbt_rec_base_t)extent_flag << 63) | | 218 | r->l0 = ((xfs_bmbt_rec_base_t)extent_flag << 63) | |
@@ -250,23 +221,6 @@ xfs_bmbt_set_allf( | |||
250 | r->l1 = ((xfs_bmbt_rec_base_t)startblock << 21) | | 221 | r->l1 = ((xfs_bmbt_rec_base_t)startblock << 21) | |
251 | ((xfs_bmbt_rec_base_t)blockcount & | 222 | ((xfs_bmbt_rec_base_t)blockcount & |
252 | (xfs_bmbt_rec_base_t)xfs_mask64lo(21)); | 223 | (xfs_bmbt_rec_base_t)xfs_mask64lo(21)); |
253 | #else /* !XFS_BIG_BLKNOS */ | ||
254 | if (isnullstartblock(startblock)) { | ||
255 | r->l0 = ((xfs_bmbt_rec_base_t)extent_flag << 63) | | ||
256 | ((xfs_bmbt_rec_base_t)startoff << 9) | | ||
257 | (xfs_bmbt_rec_base_t)xfs_mask64lo(9); | ||
258 | r->l1 = xfs_mask64hi(11) | | ||
259 | ((xfs_bmbt_rec_base_t)startblock << 21) | | ||
260 | ((xfs_bmbt_rec_base_t)blockcount & | ||
261 | (xfs_bmbt_rec_base_t)xfs_mask64lo(21)); | ||
262 | } else { | ||
263 | r->l0 = ((xfs_bmbt_rec_base_t)extent_flag << 63) | | ||
264 | ((xfs_bmbt_rec_base_t)startoff << 9); | ||
265 | r->l1 = ((xfs_bmbt_rec_base_t)startblock << 21) | | ||
266 | ((xfs_bmbt_rec_base_t)blockcount & | ||
267 | (xfs_bmbt_rec_base_t)xfs_mask64lo(21)); | ||
268 | } | ||
269 | #endif /* XFS_BIG_BLKNOS */ | ||
270 | } | 224 | } |
271 | 225 | ||
272 | /* | 226 | /* |
@@ -298,8 +252,6 @@ xfs_bmbt_disk_set_allf( | |||
298 | ASSERT(state == XFS_EXT_NORM || state == XFS_EXT_UNWRITTEN); | 252 | ASSERT(state == XFS_EXT_NORM || state == XFS_EXT_UNWRITTEN); |
299 | ASSERT((startoff & xfs_mask64hi(64-BMBT_STARTOFF_BITLEN)) == 0); | 253 | ASSERT((startoff & xfs_mask64hi(64-BMBT_STARTOFF_BITLEN)) == 0); |
300 | ASSERT((blockcount & xfs_mask64hi(64-BMBT_BLOCKCOUNT_BITLEN)) == 0); | 254 | ASSERT((blockcount & xfs_mask64hi(64-BMBT_BLOCKCOUNT_BITLEN)) == 0); |
301 | |||
302 | #if XFS_BIG_BLKNOS | ||
303 | ASSERT((startblock & xfs_mask64hi(64-BMBT_STARTBLOCK_BITLEN)) == 0); | 255 | ASSERT((startblock & xfs_mask64hi(64-BMBT_STARTBLOCK_BITLEN)) == 0); |
304 | 256 | ||
305 | r->l0 = cpu_to_be64( | 257 | r->l0 = cpu_to_be64( |
@@ -310,26 +262,6 @@ xfs_bmbt_disk_set_allf( | |||
310 | ((xfs_bmbt_rec_base_t)startblock << 21) | | 262 | ((xfs_bmbt_rec_base_t)startblock << 21) | |
311 | ((xfs_bmbt_rec_base_t)blockcount & | 263 | ((xfs_bmbt_rec_base_t)blockcount & |
312 | (xfs_bmbt_rec_base_t)xfs_mask64lo(21))); | 264 | (xfs_bmbt_rec_base_t)xfs_mask64lo(21))); |
313 | #else /* !XFS_BIG_BLKNOS */ | ||
314 | if (isnullstartblock(startblock)) { | ||
315 | r->l0 = cpu_to_be64( | ||
316 | ((xfs_bmbt_rec_base_t)extent_flag << 63) | | ||
317 | ((xfs_bmbt_rec_base_t)startoff << 9) | | ||
318 | (xfs_bmbt_rec_base_t)xfs_mask64lo(9)); | ||
319 | r->l1 = cpu_to_be64(xfs_mask64hi(11) | | ||
320 | ((xfs_bmbt_rec_base_t)startblock << 21) | | ||
321 | ((xfs_bmbt_rec_base_t)blockcount & | ||
322 | (xfs_bmbt_rec_base_t)xfs_mask64lo(21))); | ||
323 | } else { | ||
324 | r->l0 = cpu_to_be64( | ||
325 | ((xfs_bmbt_rec_base_t)extent_flag << 63) | | ||
326 | ((xfs_bmbt_rec_base_t)startoff << 9)); | ||
327 | r->l1 = cpu_to_be64( | ||
328 | ((xfs_bmbt_rec_base_t)startblock << 21) | | ||
329 | ((xfs_bmbt_rec_base_t)blockcount & | ||
330 | (xfs_bmbt_rec_base_t)xfs_mask64lo(21))); | ||
331 | } | ||
332 | #endif /* XFS_BIG_BLKNOS */ | ||
333 | } | 265 | } |
334 | 266 | ||
335 | /* | 267 | /* |
@@ -365,24 +297,11 @@ xfs_bmbt_set_startblock( | |||
365 | xfs_bmbt_rec_host_t *r, | 297 | xfs_bmbt_rec_host_t *r, |
366 | xfs_fsblock_t v) | 298 | xfs_fsblock_t v) |
367 | { | 299 | { |
368 | #if XFS_BIG_BLKNOS | ||
369 | ASSERT((v & xfs_mask64hi(12)) == 0); | 300 | ASSERT((v & xfs_mask64hi(12)) == 0); |
370 | r->l0 = (r->l0 & (xfs_bmbt_rec_base_t)xfs_mask64hi(55)) | | 301 | r->l0 = (r->l0 & (xfs_bmbt_rec_base_t)xfs_mask64hi(55)) | |
371 | (xfs_bmbt_rec_base_t)(v >> 43); | 302 | (xfs_bmbt_rec_base_t)(v >> 43); |
372 | r->l1 = (r->l1 & (xfs_bmbt_rec_base_t)xfs_mask64lo(21)) | | 303 | r->l1 = (r->l1 & (xfs_bmbt_rec_base_t)xfs_mask64lo(21)) | |
373 | (xfs_bmbt_rec_base_t)(v << 21); | 304 | (xfs_bmbt_rec_base_t)(v << 21); |
374 | #else /* !XFS_BIG_BLKNOS */ | ||
375 | if (isnullstartblock(v)) { | ||
376 | r->l0 |= (xfs_bmbt_rec_base_t)xfs_mask64lo(9); | ||
377 | r->l1 = (xfs_bmbt_rec_base_t)xfs_mask64hi(11) | | ||
378 | ((xfs_bmbt_rec_base_t)v << 21) | | ||
379 | (r->l1 & (xfs_bmbt_rec_base_t)xfs_mask64lo(21)); | ||
380 | } else { | ||
381 | r->l0 &= ~(xfs_bmbt_rec_base_t)xfs_mask64lo(9); | ||
382 | r->l1 = ((xfs_bmbt_rec_base_t)v << 21) | | ||
383 | (r->l1 & (xfs_bmbt_rec_base_t)xfs_mask64lo(21)); | ||
384 | } | ||
385 | #endif /* XFS_BIG_BLKNOS */ | ||
386 | } | 305 | } |
387 | 306 | ||
388 | /* | 307 | /* |
@@ -438,8 +357,8 @@ xfs_bmbt_to_bmdr( | |||
438 | cpu_to_be64(XFS_BUF_DADDR_NULL)); | 357 | cpu_to_be64(XFS_BUF_DADDR_NULL)); |
439 | } else | 358 | } else |
440 | ASSERT(rblock->bb_magic == cpu_to_be32(XFS_BMAP_MAGIC)); | 359 | ASSERT(rblock->bb_magic == cpu_to_be32(XFS_BMAP_MAGIC)); |
441 | ASSERT(rblock->bb_u.l.bb_leftsib == cpu_to_be64(NULLDFSBNO)); | 360 | ASSERT(rblock->bb_u.l.bb_leftsib == cpu_to_be64(NULLFSBLOCK)); |
442 | ASSERT(rblock->bb_u.l.bb_rightsib == cpu_to_be64(NULLDFSBNO)); | 361 | ASSERT(rblock->bb_u.l.bb_rightsib == cpu_to_be64(NULLFSBLOCK)); |
443 | ASSERT(rblock->bb_level != 0); | 362 | ASSERT(rblock->bb_level != 0); |
444 | dblock->bb_level = rblock->bb_level; | 363 | dblock->bb_level = rblock->bb_level; |
445 | dblock->bb_numrecs = rblock->bb_numrecs; | 364 | dblock->bb_numrecs = rblock->bb_numrecs; |
@@ -554,7 +473,7 @@ xfs_bmbt_alloc_block( | |||
554 | args.minlen = args.maxlen = args.prod = 1; | 473 | args.minlen = args.maxlen = args.prod = 1; |
555 | args.wasdel = cur->bc_private.b.flags & XFS_BTCUR_BPRV_WASDEL; | 474 | args.wasdel = cur->bc_private.b.flags & XFS_BTCUR_BPRV_WASDEL; |
556 | if (!args.wasdel && xfs_trans_get_block_res(args.tp) == 0) { | 475 | if (!args.wasdel && xfs_trans_get_block_res(args.tp) == 0) { |
557 | error = XFS_ERROR(ENOSPC); | 476 | error = -ENOSPC; |
558 | goto error0; | 477 | goto error0; |
559 | } | 478 | } |
560 | error = xfs_alloc_vextent(&args); | 479 | error = xfs_alloc_vextent(&args); |
@@ -763,11 +682,11 @@ xfs_bmbt_verify( | |||
763 | 682 | ||
764 | /* sibling pointer verification */ | 683 | /* sibling pointer verification */ |
765 | if (!block->bb_u.l.bb_leftsib || | 684 | if (!block->bb_u.l.bb_leftsib || |
766 | (block->bb_u.l.bb_leftsib != cpu_to_be64(NULLDFSBNO) && | 685 | (block->bb_u.l.bb_leftsib != cpu_to_be64(NULLFSBLOCK) && |
767 | !XFS_FSB_SANITY_CHECK(mp, be64_to_cpu(block->bb_u.l.bb_leftsib)))) | 686 | !XFS_FSB_SANITY_CHECK(mp, be64_to_cpu(block->bb_u.l.bb_leftsib)))) |
768 | return false; | 687 | return false; |
769 | if (!block->bb_u.l.bb_rightsib || | 688 | if (!block->bb_u.l.bb_rightsib || |
770 | (block->bb_u.l.bb_rightsib != cpu_to_be64(NULLDFSBNO) && | 689 | (block->bb_u.l.bb_rightsib != cpu_to_be64(NULLFSBLOCK) && |
771 | !XFS_FSB_SANITY_CHECK(mp, be64_to_cpu(block->bb_u.l.bb_rightsib)))) | 690 | !XFS_FSB_SANITY_CHECK(mp, be64_to_cpu(block->bb_u.l.bb_rightsib)))) |
772 | return false; | 691 | return false; |
773 | 692 | ||
@@ -779,9 +698,9 @@ xfs_bmbt_read_verify( | |||
779 | struct xfs_buf *bp) | 698 | struct xfs_buf *bp) |
780 | { | 699 | { |
781 | if (!xfs_btree_lblock_verify_crc(bp)) | 700 | if (!xfs_btree_lblock_verify_crc(bp)) |
782 | xfs_buf_ioerror(bp, EFSBADCRC); | 701 | xfs_buf_ioerror(bp, -EFSBADCRC); |
783 | else if (!xfs_bmbt_verify(bp)) | 702 | else if (!xfs_bmbt_verify(bp)) |
784 | xfs_buf_ioerror(bp, EFSCORRUPTED); | 703 | xfs_buf_ioerror(bp, -EFSCORRUPTED); |
785 | 704 | ||
786 | if (bp->b_error) { | 705 | if (bp->b_error) { |
787 | trace_xfs_btree_corrupt(bp, _RET_IP_); | 706 | trace_xfs_btree_corrupt(bp, _RET_IP_); |
@@ -795,7 +714,7 @@ xfs_bmbt_write_verify( | |||
795 | { | 714 | { |
796 | if (!xfs_bmbt_verify(bp)) { | 715 | if (!xfs_bmbt_verify(bp)) { |
797 | trace_xfs_btree_corrupt(bp, _RET_IP_); | 716 | trace_xfs_btree_corrupt(bp, _RET_IP_); |
798 | xfs_buf_ioerror(bp, EFSCORRUPTED); | 717 | xfs_buf_ioerror(bp, -EFSCORRUPTED); |
799 | xfs_verifier_error(bp); | 718 | xfs_verifier_error(bp); |
800 | return; | 719 | return; |
801 | } | 720 | } |
@@ -959,7 +878,7 @@ xfs_bmbt_change_owner( | |||
959 | 878 | ||
960 | cur = xfs_bmbt_init_cursor(ip->i_mount, tp, ip, whichfork); | 879 | cur = xfs_bmbt_init_cursor(ip->i_mount, tp, ip, whichfork); |
961 | if (!cur) | 880 | if (!cur) |
962 | return ENOMEM; | 881 | return -ENOMEM; |
963 | 882 | ||
964 | error = xfs_btree_change_owner(cur, new_owner, buffer_list); | 883 | error = xfs_btree_change_owner(cur, new_owner, buffer_list); |
965 | xfs_btree_del_cursor(cur, error ? XFS_BTREE_ERROR : XFS_BTREE_NOERROR); | 884 | xfs_btree_del_cursor(cur, error ? XFS_BTREE_ERROR : XFS_BTREE_NOERROR); |
diff --git a/fs/xfs/xfs_bmap_btree.h b/fs/xfs/libxfs/xfs_bmap_btree.h index 819a8a4dee95..819a8a4dee95 100644 --- a/fs/xfs/xfs_bmap_btree.h +++ b/fs/xfs/libxfs/xfs_bmap_btree.h | |||
diff --git a/fs/xfs/xfs_btree.c b/fs/xfs/libxfs/xfs_btree.c index cf893bc1e373..8fe6a93ff473 100644 --- a/fs/xfs/xfs_btree.c +++ b/fs/xfs/libxfs/xfs_btree.c | |||
@@ -78,11 +78,11 @@ xfs_btree_check_lblock( | |||
78 | be16_to_cpu(block->bb_numrecs) <= | 78 | be16_to_cpu(block->bb_numrecs) <= |
79 | cur->bc_ops->get_maxrecs(cur, level) && | 79 | cur->bc_ops->get_maxrecs(cur, level) && |
80 | block->bb_u.l.bb_leftsib && | 80 | block->bb_u.l.bb_leftsib && |
81 | (block->bb_u.l.bb_leftsib == cpu_to_be64(NULLDFSBNO) || | 81 | (block->bb_u.l.bb_leftsib == cpu_to_be64(NULLFSBLOCK) || |
82 | XFS_FSB_SANITY_CHECK(mp, | 82 | XFS_FSB_SANITY_CHECK(mp, |
83 | be64_to_cpu(block->bb_u.l.bb_leftsib))) && | 83 | be64_to_cpu(block->bb_u.l.bb_leftsib))) && |
84 | block->bb_u.l.bb_rightsib && | 84 | block->bb_u.l.bb_rightsib && |
85 | (block->bb_u.l.bb_rightsib == cpu_to_be64(NULLDFSBNO) || | 85 | (block->bb_u.l.bb_rightsib == cpu_to_be64(NULLFSBLOCK) || |
86 | XFS_FSB_SANITY_CHECK(mp, | 86 | XFS_FSB_SANITY_CHECK(mp, |
87 | be64_to_cpu(block->bb_u.l.bb_rightsib))); | 87 | be64_to_cpu(block->bb_u.l.bb_rightsib))); |
88 | 88 | ||
@@ -92,7 +92,7 @@ xfs_btree_check_lblock( | |||
92 | if (bp) | 92 | if (bp) |
93 | trace_xfs_btree_corrupt(bp, _RET_IP_); | 93 | trace_xfs_btree_corrupt(bp, _RET_IP_); |
94 | XFS_ERROR_REPORT(__func__, XFS_ERRLEVEL_LOW, mp); | 94 | XFS_ERROR_REPORT(__func__, XFS_ERRLEVEL_LOW, mp); |
95 | return XFS_ERROR(EFSCORRUPTED); | 95 | return -EFSCORRUPTED; |
96 | } | 96 | } |
97 | return 0; | 97 | return 0; |
98 | } | 98 | } |
@@ -140,7 +140,7 @@ xfs_btree_check_sblock( | |||
140 | if (bp) | 140 | if (bp) |
141 | trace_xfs_btree_corrupt(bp, _RET_IP_); | 141 | trace_xfs_btree_corrupt(bp, _RET_IP_); |
142 | XFS_ERROR_REPORT(__func__, XFS_ERRLEVEL_LOW, mp); | 142 | XFS_ERROR_REPORT(__func__, XFS_ERRLEVEL_LOW, mp); |
143 | return XFS_ERROR(EFSCORRUPTED); | 143 | return -EFSCORRUPTED; |
144 | } | 144 | } |
145 | return 0; | 145 | return 0; |
146 | } | 146 | } |
@@ -167,12 +167,12 @@ xfs_btree_check_block( | |||
167 | int /* error (0 or EFSCORRUPTED) */ | 167 | int /* error (0 or EFSCORRUPTED) */ |
168 | xfs_btree_check_lptr( | 168 | xfs_btree_check_lptr( |
169 | struct xfs_btree_cur *cur, /* btree cursor */ | 169 | struct xfs_btree_cur *cur, /* btree cursor */ |
170 | xfs_dfsbno_t bno, /* btree block disk address */ | 170 | xfs_fsblock_t bno, /* btree block disk address */ |
171 | int level) /* btree block level */ | 171 | int level) /* btree block level */ |
172 | { | 172 | { |
173 | XFS_WANT_CORRUPTED_RETURN( | 173 | XFS_WANT_CORRUPTED_RETURN( |
174 | level > 0 && | 174 | level > 0 && |
175 | bno != NULLDFSBNO && | 175 | bno != NULLFSBLOCK && |
176 | XFS_FSB_SANITY_CHECK(cur->bc_mp, bno)); | 176 | XFS_FSB_SANITY_CHECK(cur->bc_mp, bno)); |
177 | return 0; | 177 | return 0; |
178 | } | 178 | } |
@@ -595,7 +595,7 @@ xfs_btree_islastblock( | |||
595 | block = xfs_btree_get_block(cur, level, &bp); | 595 | block = xfs_btree_get_block(cur, level, &bp); |
596 | xfs_btree_check_block(cur, block, level, bp); | 596 | xfs_btree_check_block(cur, block, level, bp); |
597 | if (cur->bc_flags & XFS_BTREE_LONG_PTRS) | 597 | if (cur->bc_flags & XFS_BTREE_LONG_PTRS) |
598 | return block->bb_u.l.bb_rightsib == cpu_to_be64(NULLDFSBNO); | 598 | return block->bb_u.l.bb_rightsib == cpu_to_be64(NULLFSBLOCK); |
599 | else | 599 | else |
600 | return block->bb_u.s.bb_rightsib == cpu_to_be32(NULLAGBLOCK); | 600 | return block->bb_u.s.bb_rightsib == cpu_to_be32(NULLAGBLOCK); |
601 | } | 601 | } |
@@ -771,16 +771,16 @@ xfs_btree_readahead_lblock( | |||
771 | struct xfs_btree_block *block) | 771 | struct xfs_btree_block *block) |
772 | { | 772 | { |
773 | int rval = 0; | 773 | int rval = 0; |
774 | xfs_dfsbno_t left = be64_to_cpu(block->bb_u.l.bb_leftsib); | 774 | xfs_fsblock_t left = be64_to_cpu(block->bb_u.l.bb_leftsib); |
775 | xfs_dfsbno_t right = be64_to_cpu(block->bb_u.l.bb_rightsib); | 775 | xfs_fsblock_t right = be64_to_cpu(block->bb_u.l.bb_rightsib); |
776 | 776 | ||
777 | if ((lr & XFS_BTCUR_LEFTRA) && left != NULLDFSBNO) { | 777 | if ((lr & XFS_BTCUR_LEFTRA) && left != NULLFSBLOCK) { |
778 | xfs_btree_reada_bufl(cur->bc_mp, left, 1, | 778 | xfs_btree_reada_bufl(cur->bc_mp, left, 1, |
779 | cur->bc_ops->buf_ops); | 779 | cur->bc_ops->buf_ops); |
780 | rval++; | 780 | rval++; |
781 | } | 781 | } |
782 | 782 | ||
783 | if ((lr & XFS_BTCUR_RIGHTRA) && right != NULLDFSBNO) { | 783 | if ((lr & XFS_BTCUR_RIGHTRA) && right != NULLFSBLOCK) { |
784 | xfs_btree_reada_bufl(cur->bc_mp, right, 1, | 784 | xfs_btree_reada_bufl(cur->bc_mp, right, 1, |
785 | cur->bc_ops->buf_ops); | 785 | cur->bc_ops->buf_ops); |
786 | rval++; | 786 | rval++; |
@@ -852,7 +852,7 @@ xfs_btree_ptr_to_daddr( | |||
852 | union xfs_btree_ptr *ptr) | 852 | union xfs_btree_ptr *ptr) |
853 | { | 853 | { |
854 | if (cur->bc_flags & XFS_BTREE_LONG_PTRS) { | 854 | if (cur->bc_flags & XFS_BTREE_LONG_PTRS) { |
855 | ASSERT(ptr->l != cpu_to_be64(NULLDFSBNO)); | 855 | ASSERT(ptr->l != cpu_to_be64(NULLFSBLOCK)); |
856 | 856 | ||
857 | return XFS_FSB_TO_DADDR(cur->bc_mp, be64_to_cpu(ptr->l)); | 857 | return XFS_FSB_TO_DADDR(cur->bc_mp, be64_to_cpu(ptr->l)); |
858 | } else { | 858 | } else { |
@@ -900,9 +900,9 @@ xfs_btree_setbuf( | |||
900 | 900 | ||
901 | b = XFS_BUF_TO_BLOCK(bp); | 901 | b = XFS_BUF_TO_BLOCK(bp); |
902 | if (cur->bc_flags & XFS_BTREE_LONG_PTRS) { | 902 | if (cur->bc_flags & XFS_BTREE_LONG_PTRS) { |
903 | if (b->bb_u.l.bb_leftsib == cpu_to_be64(NULLDFSBNO)) | 903 | if (b->bb_u.l.bb_leftsib == cpu_to_be64(NULLFSBLOCK)) |
904 | cur->bc_ra[lev] |= XFS_BTCUR_LEFTRA; | 904 | cur->bc_ra[lev] |= XFS_BTCUR_LEFTRA; |
905 | if (b->bb_u.l.bb_rightsib == cpu_to_be64(NULLDFSBNO)) | 905 | if (b->bb_u.l.bb_rightsib == cpu_to_be64(NULLFSBLOCK)) |
906 | cur->bc_ra[lev] |= XFS_BTCUR_RIGHTRA; | 906 | cur->bc_ra[lev] |= XFS_BTCUR_RIGHTRA; |
907 | } else { | 907 | } else { |
908 | if (b->bb_u.s.bb_leftsib == cpu_to_be32(NULLAGBLOCK)) | 908 | if (b->bb_u.s.bb_leftsib == cpu_to_be32(NULLAGBLOCK)) |
@@ -918,7 +918,7 @@ xfs_btree_ptr_is_null( | |||
918 | union xfs_btree_ptr *ptr) | 918 | union xfs_btree_ptr *ptr) |
919 | { | 919 | { |
920 | if (cur->bc_flags & XFS_BTREE_LONG_PTRS) | 920 | if (cur->bc_flags & XFS_BTREE_LONG_PTRS) |
921 | return ptr->l == cpu_to_be64(NULLDFSBNO); | 921 | return ptr->l == cpu_to_be64(NULLFSBLOCK); |
922 | else | 922 | else |
923 | return ptr->s == cpu_to_be32(NULLAGBLOCK); | 923 | return ptr->s == cpu_to_be32(NULLAGBLOCK); |
924 | } | 924 | } |
@@ -929,7 +929,7 @@ xfs_btree_set_ptr_null( | |||
929 | union xfs_btree_ptr *ptr) | 929 | union xfs_btree_ptr *ptr) |
930 | { | 930 | { |
931 | if (cur->bc_flags & XFS_BTREE_LONG_PTRS) | 931 | if (cur->bc_flags & XFS_BTREE_LONG_PTRS) |
932 | ptr->l = cpu_to_be64(NULLDFSBNO); | 932 | ptr->l = cpu_to_be64(NULLFSBLOCK); |
933 | else | 933 | else |
934 | ptr->s = cpu_to_be32(NULLAGBLOCK); | 934 | ptr->s = cpu_to_be32(NULLAGBLOCK); |
935 | } | 935 | } |
@@ -997,8 +997,8 @@ xfs_btree_init_block_int( | |||
997 | buf->bb_numrecs = cpu_to_be16(numrecs); | 997 | buf->bb_numrecs = cpu_to_be16(numrecs); |
998 | 998 | ||
999 | if (flags & XFS_BTREE_LONG_PTRS) { | 999 | if (flags & XFS_BTREE_LONG_PTRS) { |
1000 | buf->bb_u.l.bb_leftsib = cpu_to_be64(NULLDFSBNO); | 1000 | buf->bb_u.l.bb_leftsib = cpu_to_be64(NULLFSBLOCK); |
1001 | buf->bb_u.l.bb_rightsib = cpu_to_be64(NULLDFSBNO); | 1001 | buf->bb_u.l.bb_rightsib = cpu_to_be64(NULLFSBLOCK); |
1002 | if (flags & XFS_BTREE_CRC_BLOCKS) { | 1002 | if (flags & XFS_BTREE_CRC_BLOCKS) { |
1003 | buf->bb_u.l.bb_blkno = cpu_to_be64(blkno); | 1003 | buf->bb_u.l.bb_blkno = cpu_to_be64(blkno); |
1004 | buf->bb_u.l.bb_owner = cpu_to_be64(owner); | 1004 | buf->bb_u.l.bb_owner = cpu_to_be64(owner); |
@@ -1140,7 +1140,7 @@ xfs_btree_get_buf_block( | |||
1140 | mp->m_bsize, flags); | 1140 | mp->m_bsize, flags); |
1141 | 1141 | ||
1142 | if (!*bpp) | 1142 | if (!*bpp) |
1143 | return ENOMEM; | 1143 | return -ENOMEM; |
1144 | 1144 | ||
1145 | (*bpp)->b_ops = cur->bc_ops->buf_ops; | 1145 | (*bpp)->b_ops = cur->bc_ops->buf_ops; |
1146 | *block = XFS_BUF_TO_BLOCK(*bpp); | 1146 | *block = XFS_BUF_TO_BLOCK(*bpp); |
@@ -1498,7 +1498,7 @@ xfs_btree_increment( | |||
1498 | if (cur->bc_flags & XFS_BTREE_ROOT_IN_INODE) | 1498 | if (cur->bc_flags & XFS_BTREE_ROOT_IN_INODE) |
1499 | goto out0; | 1499 | goto out0; |
1500 | ASSERT(0); | 1500 | ASSERT(0); |
1501 | error = EFSCORRUPTED; | 1501 | error = -EFSCORRUPTED; |
1502 | goto error0; | 1502 | goto error0; |
1503 | } | 1503 | } |
1504 | ASSERT(lev < cur->bc_nlevels); | 1504 | ASSERT(lev < cur->bc_nlevels); |
@@ -1597,7 +1597,7 @@ xfs_btree_decrement( | |||
1597 | if (cur->bc_flags & XFS_BTREE_ROOT_IN_INODE) | 1597 | if (cur->bc_flags & XFS_BTREE_ROOT_IN_INODE) |
1598 | goto out0; | 1598 | goto out0; |
1599 | ASSERT(0); | 1599 | ASSERT(0); |
1600 | error = EFSCORRUPTED; | 1600 | error = -EFSCORRUPTED; |
1601 | goto error0; | 1601 | goto error0; |
1602 | } | 1602 | } |
1603 | ASSERT(lev < cur->bc_nlevels); | 1603 | ASSERT(lev < cur->bc_nlevels); |
@@ -4018,7 +4018,7 @@ xfs_btree_block_change_owner( | |||
4018 | /* now read rh sibling block for next iteration */ | 4018 | /* now read rh sibling block for next iteration */ |
4019 | xfs_btree_get_sibling(cur, block, &rptr, XFS_BB_RIGHTSIB); | 4019 | xfs_btree_get_sibling(cur, block, &rptr, XFS_BB_RIGHTSIB); |
4020 | if (xfs_btree_ptr_is_null(cur, &rptr)) | 4020 | if (xfs_btree_ptr_is_null(cur, &rptr)) |
4021 | return ENOENT; | 4021 | return -ENOENT; |
4022 | 4022 | ||
4023 | return xfs_btree_lookup_get_block(cur, level, &rptr, &block); | 4023 | return xfs_btree_lookup_get_block(cur, level, &rptr, &block); |
4024 | } | 4024 | } |
@@ -4061,7 +4061,7 @@ xfs_btree_change_owner( | |||
4061 | buffer_list); | 4061 | buffer_list); |
4062 | } while (!error); | 4062 | } while (!error); |
4063 | 4063 | ||
4064 | if (error != ENOENT) | 4064 | if (error != -ENOENT) |
4065 | return error; | 4065 | return error; |
4066 | } | 4066 | } |
4067 | 4067 | ||
diff --git a/fs/xfs/xfs_btree.h b/fs/xfs/libxfs/xfs_btree.h index a04b69422f67..8f18bab73ea5 100644 --- a/fs/xfs/xfs_btree.h +++ b/fs/xfs/libxfs/xfs_btree.h | |||
@@ -258,7 +258,7 @@ xfs_btree_check_block( | |||
258 | int /* error (0 or EFSCORRUPTED) */ | 258 | int /* error (0 or EFSCORRUPTED) */ |
259 | xfs_btree_check_lptr( | 259 | xfs_btree_check_lptr( |
260 | struct xfs_btree_cur *cur, /* btree cursor */ | 260 | struct xfs_btree_cur *cur, /* btree cursor */ |
261 | xfs_dfsbno_t ptr, /* btree block disk address */ | 261 | xfs_fsblock_t ptr, /* btree block disk address */ |
262 | int level); /* btree block level */ | 262 | int level); /* btree block level */ |
263 | 263 | ||
264 | /* | 264 | /* |
diff --git a/fs/xfs/xfs_cksum.h b/fs/xfs/libxfs/xfs_cksum.h index fad1676ad8cd..fad1676ad8cd 100644 --- a/fs/xfs/xfs_cksum.h +++ b/fs/xfs/libxfs/xfs_cksum.h | |||
diff --git a/fs/xfs/xfs_da_btree.c b/fs/xfs/libxfs/xfs_da_btree.c index a514ab616650..2c42ae28d027 100644 --- a/fs/xfs/xfs_da_btree.c +++ b/fs/xfs/libxfs/xfs_da_btree.c | |||
@@ -185,7 +185,7 @@ xfs_da3_node_write_verify( | |||
185 | struct xfs_da3_node_hdr *hdr3 = bp->b_addr; | 185 | struct xfs_da3_node_hdr *hdr3 = bp->b_addr; |
186 | 186 | ||
187 | if (!xfs_da3_node_verify(bp)) { | 187 | if (!xfs_da3_node_verify(bp)) { |
188 | xfs_buf_ioerror(bp, EFSCORRUPTED); | 188 | xfs_buf_ioerror(bp, -EFSCORRUPTED); |
189 | xfs_verifier_error(bp); | 189 | xfs_verifier_error(bp); |
190 | return; | 190 | return; |
191 | } | 191 | } |
@@ -214,13 +214,13 @@ xfs_da3_node_read_verify( | |||
214 | switch (be16_to_cpu(info->magic)) { | 214 | switch (be16_to_cpu(info->magic)) { |
215 | case XFS_DA3_NODE_MAGIC: | 215 | case XFS_DA3_NODE_MAGIC: |
216 | if (!xfs_buf_verify_cksum(bp, XFS_DA3_NODE_CRC_OFF)) { | 216 | if (!xfs_buf_verify_cksum(bp, XFS_DA3_NODE_CRC_OFF)) { |
217 | xfs_buf_ioerror(bp, EFSBADCRC); | 217 | xfs_buf_ioerror(bp, -EFSBADCRC); |
218 | break; | 218 | break; |
219 | } | 219 | } |
220 | /* fall through */ | 220 | /* fall through */ |
221 | case XFS_DA_NODE_MAGIC: | 221 | case XFS_DA_NODE_MAGIC: |
222 | if (!xfs_da3_node_verify(bp)) { | 222 | if (!xfs_da3_node_verify(bp)) { |
223 | xfs_buf_ioerror(bp, EFSCORRUPTED); | 223 | xfs_buf_ioerror(bp, -EFSCORRUPTED); |
224 | break; | 224 | break; |
225 | } | 225 | } |
226 | return; | 226 | return; |
@@ -315,7 +315,7 @@ xfs_da3_node_create( | |||
315 | 315 | ||
316 | error = xfs_da_get_buf(tp, dp, blkno, -1, &bp, whichfork); | 316 | error = xfs_da_get_buf(tp, dp, blkno, -1, &bp, whichfork); |
317 | if (error) | 317 | if (error) |
318 | return(error); | 318 | return error; |
319 | bp->b_ops = &xfs_da3_node_buf_ops; | 319 | bp->b_ops = &xfs_da3_node_buf_ops; |
320 | xfs_trans_buf_set_type(tp, bp, XFS_BLFT_DA_NODE_BUF); | 320 | xfs_trans_buf_set_type(tp, bp, XFS_BLFT_DA_NODE_BUF); |
321 | node = bp->b_addr; | 321 | node = bp->b_addr; |
@@ -337,7 +337,7 @@ xfs_da3_node_create( | |||
337 | XFS_DA_LOGRANGE(node, &node->hdr, dp->d_ops->node_hdr_size)); | 337 | XFS_DA_LOGRANGE(node, &node->hdr, dp->d_ops->node_hdr_size)); |
338 | 338 | ||
339 | *bpp = bp; | 339 | *bpp = bp; |
340 | return(0); | 340 | return 0; |
341 | } | 341 | } |
342 | 342 | ||
343 | /* | 343 | /* |
@@ -385,8 +385,8 @@ xfs_da3_split( | |||
385 | switch (oldblk->magic) { | 385 | switch (oldblk->magic) { |
386 | case XFS_ATTR_LEAF_MAGIC: | 386 | case XFS_ATTR_LEAF_MAGIC: |
387 | error = xfs_attr3_leaf_split(state, oldblk, newblk); | 387 | error = xfs_attr3_leaf_split(state, oldblk, newblk); |
388 | if ((error != 0) && (error != ENOSPC)) { | 388 | if ((error != 0) && (error != -ENOSPC)) { |
389 | return(error); /* GROT: attr is inconsistent */ | 389 | return error; /* GROT: attr is inconsistent */ |
390 | } | 390 | } |
391 | if (!error) { | 391 | if (!error) { |
392 | addblk = newblk; | 392 | addblk = newblk; |
@@ -408,7 +408,7 @@ xfs_da3_split( | |||
408 | &state->extrablk); | 408 | &state->extrablk); |
409 | } | 409 | } |
410 | if (error) | 410 | if (error) |
411 | return(error); /* GROT: attr inconsistent */ | 411 | return error; /* GROT: attr inconsistent */ |
412 | addblk = newblk; | 412 | addblk = newblk; |
413 | break; | 413 | break; |
414 | case XFS_DIR2_LEAFN_MAGIC: | 414 | case XFS_DIR2_LEAFN_MAGIC: |
@@ -422,7 +422,7 @@ xfs_da3_split( | |||
422 | max - i, &action); | 422 | max - i, &action); |
423 | addblk->bp = NULL; | 423 | addblk->bp = NULL; |
424 | if (error) | 424 | if (error) |
425 | return(error); /* GROT: dir is inconsistent */ | 425 | return error; /* GROT: dir is inconsistent */ |
426 | /* | 426 | /* |
427 | * Record the newly split block for the next time thru? | 427 | * Record the newly split block for the next time thru? |
428 | */ | 428 | */ |
@@ -439,7 +439,7 @@ xfs_da3_split( | |||
439 | xfs_da3_fixhashpath(state, &state->path); | 439 | xfs_da3_fixhashpath(state, &state->path); |
440 | } | 440 | } |
441 | if (!addblk) | 441 | if (!addblk) |
442 | return(0); | 442 | return 0; |
443 | 443 | ||
444 | /* | 444 | /* |
445 | * Split the root node. | 445 | * Split the root node. |
@@ -449,7 +449,7 @@ xfs_da3_split( | |||
449 | error = xfs_da3_root_split(state, oldblk, addblk); | 449 | error = xfs_da3_root_split(state, oldblk, addblk); |
450 | if (error) { | 450 | if (error) { |
451 | addblk->bp = NULL; | 451 | addblk->bp = NULL; |
452 | return(error); /* GROT: dir is inconsistent */ | 452 | return error; /* GROT: dir is inconsistent */ |
453 | } | 453 | } |
454 | 454 | ||
455 | /* | 455 | /* |
@@ -492,7 +492,7 @@ xfs_da3_split( | |||
492 | sizeof(node->hdr.info))); | 492 | sizeof(node->hdr.info))); |
493 | } | 493 | } |
494 | addblk->bp = NULL; | 494 | addblk->bp = NULL; |
495 | return(0); | 495 | return 0; |
496 | } | 496 | } |
497 | 497 | ||
498 | /* | 498 | /* |
@@ -670,18 +670,18 @@ xfs_da3_node_split( | |||
670 | */ | 670 | */ |
671 | error = xfs_da_grow_inode(state->args, &blkno); | 671 | error = xfs_da_grow_inode(state->args, &blkno); |
672 | if (error) | 672 | if (error) |
673 | return(error); /* GROT: dir is inconsistent */ | 673 | return error; /* GROT: dir is inconsistent */ |
674 | 674 | ||
675 | error = xfs_da3_node_create(state->args, blkno, treelevel, | 675 | error = xfs_da3_node_create(state->args, blkno, treelevel, |
676 | &newblk->bp, state->args->whichfork); | 676 | &newblk->bp, state->args->whichfork); |
677 | if (error) | 677 | if (error) |
678 | return(error); /* GROT: dir is inconsistent */ | 678 | return error; /* GROT: dir is inconsistent */ |
679 | newblk->blkno = blkno; | 679 | newblk->blkno = blkno; |
680 | newblk->magic = XFS_DA_NODE_MAGIC; | 680 | newblk->magic = XFS_DA_NODE_MAGIC; |
681 | xfs_da3_node_rebalance(state, oldblk, newblk); | 681 | xfs_da3_node_rebalance(state, oldblk, newblk); |
682 | error = xfs_da3_blk_link(state, oldblk, newblk); | 682 | error = xfs_da3_blk_link(state, oldblk, newblk); |
683 | if (error) | 683 | if (error) |
684 | return(error); | 684 | return error; |
685 | *result = 1; | 685 | *result = 1; |
686 | } else { | 686 | } else { |
687 | *result = 0; | 687 | *result = 0; |
@@ -721,7 +721,7 @@ xfs_da3_node_split( | |||
721 | } | 721 | } |
722 | } | 722 | } |
723 | 723 | ||
724 | return(0); | 724 | return 0; |
725 | } | 725 | } |
726 | 726 | ||
727 | /* | 727 | /* |
@@ -963,9 +963,9 @@ xfs_da3_join( | |||
963 | case XFS_ATTR_LEAF_MAGIC: | 963 | case XFS_ATTR_LEAF_MAGIC: |
964 | error = xfs_attr3_leaf_toosmall(state, &action); | 964 | error = xfs_attr3_leaf_toosmall(state, &action); |
965 | if (error) | 965 | if (error) |
966 | return(error); | 966 | return error; |
967 | if (action == 0) | 967 | if (action == 0) |
968 | return(0); | 968 | return 0; |
969 | xfs_attr3_leaf_unbalance(state, drop_blk, save_blk); | 969 | xfs_attr3_leaf_unbalance(state, drop_blk, save_blk); |
970 | break; | 970 | break; |
971 | case XFS_DIR2_LEAFN_MAGIC: | 971 | case XFS_DIR2_LEAFN_MAGIC: |
@@ -985,7 +985,7 @@ xfs_da3_join( | |||
985 | xfs_da3_fixhashpath(state, &state->path); | 985 | xfs_da3_fixhashpath(state, &state->path); |
986 | error = xfs_da3_node_toosmall(state, &action); | 986 | error = xfs_da3_node_toosmall(state, &action); |
987 | if (error) | 987 | if (error) |
988 | return(error); | 988 | return error; |
989 | if (action == 0) | 989 | if (action == 0) |
990 | return 0; | 990 | return 0; |
991 | xfs_da3_node_unbalance(state, drop_blk, save_blk); | 991 | xfs_da3_node_unbalance(state, drop_blk, save_blk); |
@@ -995,12 +995,12 @@ xfs_da3_join( | |||
995 | error = xfs_da3_blk_unlink(state, drop_blk, save_blk); | 995 | error = xfs_da3_blk_unlink(state, drop_blk, save_blk); |
996 | xfs_da_state_kill_altpath(state); | 996 | xfs_da_state_kill_altpath(state); |
997 | if (error) | 997 | if (error) |
998 | return(error); | 998 | return error; |
999 | error = xfs_da_shrink_inode(state->args, drop_blk->blkno, | 999 | error = xfs_da_shrink_inode(state->args, drop_blk->blkno, |
1000 | drop_blk->bp); | 1000 | drop_blk->bp); |
1001 | drop_blk->bp = NULL; | 1001 | drop_blk->bp = NULL; |
1002 | if (error) | 1002 | if (error) |
1003 | return(error); | 1003 | return error; |
1004 | } | 1004 | } |
1005 | /* | 1005 | /* |
1006 | * We joined all the way to the top. If it turns out that | 1006 | * We joined all the way to the top. If it turns out that |
@@ -1010,7 +1010,7 @@ xfs_da3_join( | |||
1010 | xfs_da3_node_remove(state, drop_blk); | 1010 | xfs_da3_node_remove(state, drop_blk); |
1011 | xfs_da3_fixhashpath(state, &state->path); | 1011 | xfs_da3_fixhashpath(state, &state->path); |
1012 | error = xfs_da3_root_join(state, &state->path.blk[0]); | 1012 | error = xfs_da3_root_join(state, &state->path.blk[0]); |
1013 | return(error); | 1013 | return error; |
1014 | } | 1014 | } |
1015 | 1015 | ||
1016 | #ifdef DEBUG | 1016 | #ifdef DEBUG |
@@ -1099,7 +1099,7 @@ xfs_da3_root_join( | |||
1099 | xfs_trans_log_buf(args->trans, root_blk->bp, 0, | 1099 | xfs_trans_log_buf(args->trans, root_blk->bp, 0, |
1100 | args->geo->blksize - 1); | 1100 | args->geo->blksize - 1); |
1101 | error = xfs_da_shrink_inode(args, child, bp); | 1101 | error = xfs_da_shrink_inode(args, child, bp); |
1102 | return(error); | 1102 | return error; |
1103 | } | 1103 | } |
1104 | 1104 | ||
1105 | /* | 1105 | /* |
@@ -1142,7 +1142,7 @@ xfs_da3_node_toosmall( | |||
1142 | dp->d_ops->node_hdr_from_disk(&nodehdr, node); | 1142 | dp->d_ops->node_hdr_from_disk(&nodehdr, node); |
1143 | if (nodehdr.count > (state->args->geo->node_ents >> 1)) { | 1143 | if (nodehdr.count > (state->args->geo->node_ents >> 1)) { |
1144 | *action = 0; /* blk over 50%, don't try to join */ | 1144 | *action = 0; /* blk over 50%, don't try to join */ |
1145 | return(0); /* blk over 50%, don't try to join */ | 1145 | return 0; /* blk over 50%, don't try to join */ |
1146 | } | 1146 | } |
1147 | 1147 | ||
1148 | /* | 1148 | /* |
@@ -1161,13 +1161,13 @@ xfs_da3_node_toosmall( | |||
1161 | error = xfs_da3_path_shift(state, &state->altpath, forward, | 1161 | error = xfs_da3_path_shift(state, &state->altpath, forward, |
1162 | 0, &retval); | 1162 | 0, &retval); |
1163 | if (error) | 1163 | if (error) |
1164 | return(error); | 1164 | return error; |
1165 | if (retval) { | 1165 | if (retval) { |
1166 | *action = 0; | 1166 | *action = 0; |
1167 | } else { | 1167 | } else { |
1168 | *action = 2; | 1168 | *action = 2; |
1169 | } | 1169 | } |
1170 | return(0); | 1170 | return 0; |
1171 | } | 1171 | } |
1172 | 1172 | ||
1173 | /* | 1173 | /* |
@@ -1194,7 +1194,7 @@ xfs_da3_node_toosmall( | |||
1194 | error = xfs_da3_node_read(state->args->trans, dp, | 1194 | error = xfs_da3_node_read(state->args->trans, dp, |
1195 | blkno, -1, &bp, state->args->whichfork); | 1195 | blkno, -1, &bp, state->args->whichfork); |
1196 | if (error) | 1196 | if (error) |
1197 | return(error); | 1197 | return error; |
1198 | 1198 | ||
1199 | node = bp->b_addr; | 1199 | node = bp->b_addr; |
1200 | dp->d_ops->node_hdr_from_disk(&thdr, node); | 1200 | dp->d_ops->node_hdr_from_disk(&thdr, node); |
@@ -1486,7 +1486,7 @@ xfs_da3_node_lookup_int( | |||
1486 | if (error) { | 1486 | if (error) { |
1487 | blk->blkno = 0; | 1487 | blk->blkno = 0; |
1488 | state->path.active--; | 1488 | state->path.active--; |
1489 | return(error); | 1489 | return error; |
1490 | } | 1490 | } |
1491 | curr = blk->bp->b_addr; | 1491 | curr = blk->bp->b_addr; |
1492 | blk->magic = be16_to_cpu(curr->magic); | 1492 | blk->magic = be16_to_cpu(curr->magic); |
@@ -1579,25 +1579,25 @@ xfs_da3_node_lookup_int( | |||
1579 | args->blkno = blk->blkno; | 1579 | args->blkno = blk->blkno; |
1580 | } else { | 1580 | } else { |
1581 | ASSERT(0); | 1581 | ASSERT(0); |
1582 | return XFS_ERROR(EFSCORRUPTED); | 1582 | return -EFSCORRUPTED; |
1583 | } | 1583 | } |
1584 | if (((retval == ENOENT) || (retval == ENOATTR)) && | 1584 | if (((retval == -ENOENT) || (retval == -ENOATTR)) && |
1585 | (blk->hashval == args->hashval)) { | 1585 | (blk->hashval == args->hashval)) { |
1586 | error = xfs_da3_path_shift(state, &state->path, 1, 1, | 1586 | error = xfs_da3_path_shift(state, &state->path, 1, 1, |
1587 | &retval); | 1587 | &retval); |
1588 | if (error) | 1588 | if (error) |
1589 | return(error); | 1589 | return error; |
1590 | if (retval == 0) { | 1590 | if (retval == 0) { |
1591 | continue; | 1591 | continue; |
1592 | } else if (blk->magic == XFS_ATTR_LEAF_MAGIC) { | 1592 | } else if (blk->magic == XFS_ATTR_LEAF_MAGIC) { |
1593 | /* path_shift() gives ENOENT */ | 1593 | /* path_shift() gives ENOENT */ |
1594 | retval = XFS_ERROR(ENOATTR); | 1594 | retval = -ENOATTR; |
1595 | } | 1595 | } |
1596 | } | 1596 | } |
1597 | break; | 1597 | break; |
1598 | } | 1598 | } |
1599 | *result = retval; | 1599 | *result = retval; |
1600 | return(0); | 1600 | return 0; |
1601 | } | 1601 | } |
1602 | 1602 | ||
1603 | /*======================================================================== | 1603 | /*======================================================================== |
@@ -1692,7 +1692,7 @@ xfs_da3_blk_link( | |||
1692 | be32_to_cpu(old_info->back), | 1692 | be32_to_cpu(old_info->back), |
1693 | -1, &bp, args->whichfork); | 1693 | -1, &bp, args->whichfork); |
1694 | if (error) | 1694 | if (error) |
1695 | return(error); | 1695 | return error; |
1696 | ASSERT(bp != NULL); | 1696 | ASSERT(bp != NULL); |
1697 | tmp_info = bp->b_addr; | 1697 | tmp_info = bp->b_addr; |
1698 | ASSERT(tmp_info->magic == old_info->magic); | 1698 | ASSERT(tmp_info->magic == old_info->magic); |
@@ -1713,7 +1713,7 @@ xfs_da3_blk_link( | |||
1713 | be32_to_cpu(old_info->forw), | 1713 | be32_to_cpu(old_info->forw), |
1714 | -1, &bp, args->whichfork); | 1714 | -1, &bp, args->whichfork); |
1715 | if (error) | 1715 | if (error) |
1716 | return(error); | 1716 | return error; |
1717 | ASSERT(bp != NULL); | 1717 | ASSERT(bp != NULL); |
1718 | tmp_info = bp->b_addr; | 1718 | tmp_info = bp->b_addr; |
1719 | ASSERT(tmp_info->magic == old_info->magic); | 1719 | ASSERT(tmp_info->magic == old_info->magic); |
@@ -1726,7 +1726,7 @@ xfs_da3_blk_link( | |||
1726 | 1726 | ||
1727 | xfs_trans_log_buf(args->trans, old_blk->bp, 0, sizeof(*tmp_info) - 1); | 1727 | xfs_trans_log_buf(args->trans, old_blk->bp, 0, sizeof(*tmp_info) - 1); |
1728 | xfs_trans_log_buf(args->trans, new_blk->bp, 0, sizeof(*tmp_info) - 1); | 1728 | xfs_trans_log_buf(args->trans, new_blk->bp, 0, sizeof(*tmp_info) - 1); |
1729 | return(0); | 1729 | return 0; |
1730 | } | 1730 | } |
1731 | 1731 | ||
1732 | /* | 1732 | /* |
@@ -1772,7 +1772,7 @@ xfs_da3_blk_unlink( | |||
1772 | be32_to_cpu(drop_info->back), | 1772 | be32_to_cpu(drop_info->back), |
1773 | -1, &bp, args->whichfork); | 1773 | -1, &bp, args->whichfork); |
1774 | if (error) | 1774 | if (error) |
1775 | return(error); | 1775 | return error; |
1776 | ASSERT(bp != NULL); | 1776 | ASSERT(bp != NULL); |
1777 | tmp_info = bp->b_addr; | 1777 | tmp_info = bp->b_addr; |
1778 | ASSERT(tmp_info->magic == save_info->magic); | 1778 | ASSERT(tmp_info->magic == save_info->magic); |
@@ -1789,7 +1789,7 @@ xfs_da3_blk_unlink( | |||
1789 | be32_to_cpu(drop_info->forw), | 1789 | be32_to_cpu(drop_info->forw), |
1790 | -1, &bp, args->whichfork); | 1790 | -1, &bp, args->whichfork); |
1791 | if (error) | 1791 | if (error) |
1792 | return(error); | 1792 | return error; |
1793 | ASSERT(bp != NULL); | 1793 | ASSERT(bp != NULL); |
1794 | tmp_info = bp->b_addr; | 1794 | tmp_info = bp->b_addr; |
1795 | ASSERT(tmp_info->magic == save_info->magic); | 1795 | ASSERT(tmp_info->magic == save_info->magic); |
@@ -1801,7 +1801,7 @@ xfs_da3_blk_unlink( | |||
1801 | } | 1801 | } |
1802 | 1802 | ||
1803 | xfs_trans_log_buf(args->trans, save_blk->bp, 0, sizeof(*save_info) - 1); | 1803 | xfs_trans_log_buf(args->trans, save_blk->bp, 0, sizeof(*save_info) - 1); |
1804 | return(0); | 1804 | return 0; |
1805 | } | 1805 | } |
1806 | 1806 | ||
1807 | /* | 1807 | /* |
@@ -1859,9 +1859,9 @@ xfs_da3_path_shift( | |||
1859 | } | 1859 | } |
1860 | } | 1860 | } |
1861 | if (level < 0) { | 1861 | if (level < 0) { |
1862 | *result = XFS_ERROR(ENOENT); /* we're out of our tree */ | 1862 | *result = -ENOENT; /* we're out of our tree */ |
1863 | ASSERT(args->op_flags & XFS_DA_OP_OKNOENT); | 1863 | ASSERT(args->op_flags & XFS_DA_OP_OKNOENT); |
1864 | return(0); | 1864 | return 0; |
1865 | } | 1865 | } |
1866 | 1866 | ||
1867 | /* | 1867 | /* |
@@ -1883,7 +1883,7 @@ xfs_da3_path_shift( | |||
1883 | error = xfs_da3_node_read(args->trans, dp, blkno, -1, | 1883 | error = xfs_da3_node_read(args->trans, dp, blkno, -1, |
1884 | &blk->bp, args->whichfork); | 1884 | &blk->bp, args->whichfork); |
1885 | if (error) | 1885 | if (error) |
1886 | return(error); | 1886 | return error; |
1887 | info = blk->bp->b_addr; | 1887 | info = blk->bp->b_addr; |
1888 | ASSERT(info->magic == cpu_to_be16(XFS_DA_NODE_MAGIC) || | 1888 | ASSERT(info->magic == cpu_to_be16(XFS_DA_NODE_MAGIC) || |
1889 | info->magic == cpu_to_be16(XFS_DA3_NODE_MAGIC) || | 1889 | info->magic == cpu_to_be16(XFS_DA3_NODE_MAGIC) || |
@@ -2004,7 +2004,7 @@ xfs_da_grow_inode_int( | |||
2004 | struct xfs_trans *tp = args->trans; | 2004 | struct xfs_trans *tp = args->trans; |
2005 | struct xfs_inode *dp = args->dp; | 2005 | struct xfs_inode *dp = args->dp; |
2006 | int w = args->whichfork; | 2006 | int w = args->whichfork; |
2007 | xfs_drfsbno_t nblks = dp->i_d.di_nblocks; | 2007 | xfs_rfsblock_t nblks = dp->i_d.di_nblocks; |
2008 | struct xfs_bmbt_irec map, *mapp; | 2008 | struct xfs_bmbt_irec map, *mapp; |
2009 | int nmap, error, got, i, mapi; | 2009 | int nmap, error, got, i, mapi; |
2010 | 2010 | ||
@@ -2068,7 +2068,7 @@ xfs_da_grow_inode_int( | |||
2068 | if (got != count || mapp[0].br_startoff != *bno || | 2068 | if (got != count || mapp[0].br_startoff != *bno || |
2069 | mapp[mapi - 1].br_startoff + mapp[mapi - 1].br_blockcount != | 2069 | mapp[mapi - 1].br_startoff + mapp[mapi - 1].br_blockcount != |
2070 | *bno + count) { | 2070 | *bno + count) { |
2071 | error = XFS_ERROR(ENOSPC); | 2071 | error = -ENOSPC; |
2072 | goto out_free_map; | 2072 | goto out_free_map; |
2073 | } | 2073 | } |
2074 | 2074 | ||
@@ -2158,7 +2158,7 @@ xfs_da3_swap_lastblock( | |||
2158 | if (unlikely(lastoff == 0)) { | 2158 | if (unlikely(lastoff == 0)) { |
2159 | XFS_ERROR_REPORT("xfs_da_swap_lastblock(1)", XFS_ERRLEVEL_LOW, | 2159 | XFS_ERROR_REPORT("xfs_da_swap_lastblock(1)", XFS_ERRLEVEL_LOW, |
2160 | mp); | 2160 | mp); |
2161 | return XFS_ERROR(EFSCORRUPTED); | 2161 | return -EFSCORRUPTED; |
2162 | } | 2162 | } |
2163 | /* | 2163 | /* |
2164 | * Read the last block in the btree space. | 2164 | * Read the last block in the btree space. |
@@ -2209,7 +2209,7 @@ xfs_da3_swap_lastblock( | |||
2209 | sib_info->magic != dead_info->magic)) { | 2209 | sib_info->magic != dead_info->magic)) { |
2210 | XFS_ERROR_REPORT("xfs_da_swap_lastblock(2)", | 2210 | XFS_ERROR_REPORT("xfs_da_swap_lastblock(2)", |
2211 | XFS_ERRLEVEL_LOW, mp); | 2211 | XFS_ERRLEVEL_LOW, mp); |
2212 | error = XFS_ERROR(EFSCORRUPTED); | 2212 | error = -EFSCORRUPTED; |
2213 | goto done; | 2213 | goto done; |
2214 | } | 2214 | } |
2215 | sib_info->forw = cpu_to_be32(dead_blkno); | 2215 | sib_info->forw = cpu_to_be32(dead_blkno); |
@@ -2231,7 +2231,7 @@ xfs_da3_swap_lastblock( | |||
2231 | sib_info->magic != dead_info->magic)) { | 2231 | sib_info->magic != dead_info->magic)) { |
2232 | XFS_ERROR_REPORT("xfs_da_swap_lastblock(3)", | 2232 | XFS_ERROR_REPORT("xfs_da_swap_lastblock(3)", |
2233 | XFS_ERRLEVEL_LOW, mp); | 2233 | XFS_ERRLEVEL_LOW, mp); |
2234 | error = XFS_ERROR(EFSCORRUPTED); | 2234 | error = -EFSCORRUPTED; |
2235 | goto done; | 2235 | goto done; |
2236 | } | 2236 | } |
2237 | sib_info->back = cpu_to_be32(dead_blkno); | 2237 | sib_info->back = cpu_to_be32(dead_blkno); |
@@ -2254,7 +2254,7 @@ xfs_da3_swap_lastblock( | |||
2254 | if (level >= 0 && level != par_hdr.level + 1) { | 2254 | if (level >= 0 && level != par_hdr.level + 1) { |
2255 | XFS_ERROR_REPORT("xfs_da_swap_lastblock(4)", | 2255 | XFS_ERROR_REPORT("xfs_da_swap_lastblock(4)", |
2256 | XFS_ERRLEVEL_LOW, mp); | 2256 | XFS_ERRLEVEL_LOW, mp); |
2257 | error = XFS_ERROR(EFSCORRUPTED); | 2257 | error = -EFSCORRUPTED; |
2258 | goto done; | 2258 | goto done; |
2259 | } | 2259 | } |
2260 | level = par_hdr.level; | 2260 | level = par_hdr.level; |
@@ -2267,7 +2267,7 @@ xfs_da3_swap_lastblock( | |||
2267 | if (entno == par_hdr.count) { | 2267 | if (entno == par_hdr.count) { |
2268 | XFS_ERROR_REPORT("xfs_da_swap_lastblock(5)", | 2268 | XFS_ERROR_REPORT("xfs_da_swap_lastblock(5)", |
2269 | XFS_ERRLEVEL_LOW, mp); | 2269 | XFS_ERRLEVEL_LOW, mp); |
2270 | error = XFS_ERROR(EFSCORRUPTED); | 2270 | error = -EFSCORRUPTED; |
2271 | goto done; | 2271 | goto done; |
2272 | } | 2272 | } |
2273 | par_blkno = be32_to_cpu(btree[entno].before); | 2273 | par_blkno = be32_to_cpu(btree[entno].before); |
@@ -2294,7 +2294,7 @@ xfs_da3_swap_lastblock( | |||
2294 | if (unlikely(par_blkno == 0)) { | 2294 | if (unlikely(par_blkno == 0)) { |
2295 | XFS_ERROR_REPORT("xfs_da_swap_lastblock(6)", | 2295 | XFS_ERROR_REPORT("xfs_da_swap_lastblock(6)", |
2296 | XFS_ERRLEVEL_LOW, mp); | 2296 | XFS_ERRLEVEL_LOW, mp); |
2297 | error = XFS_ERROR(EFSCORRUPTED); | 2297 | error = -EFSCORRUPTED; |
2298 | goto done; | 2298 | goto done; |
2299 | } | 2299 | } |
2300 | error = xfs_da3_node_read(tp, dp, par_blkno, -1, &par_buf, w); | 2300 | error = xfs_da3_node_read(tp, dp, par_blkno, -1, &par_buf, w); |
@@ -2305,7 +2305,7 @@ xfs_da3_swap_lastblock( | |||
2305 | if (par_hdr.level != level) { | 2305 | if (par_hdr.level != level) { |
2306 | XFS_ERROR_REPORT("xfs_da_swap_lastblock(7)", | 2306 | XFS_ERROR_REPORT("xfs_da_swap_lastblock(7)", |
2307 | XFS_ERRLEVEL_LOW, mp); | 2307 | XFS_ERRLEVEL_LOW, mp); |
2308 | error = XFS_ERROR(EFSCORRUPTED); | 2308 | error = -EFSCORRUPTED; |
2309 | goto done; | 2309 | goto done; |
2310 | } | 2310 | } |
2311 | btree = dp->d_ops->node_tree_p(par_node); | 2311 | btree = dp->d_ops->node_tree_p(par_node); |
@@ -2359,7 +2359,7 @@ xfs_da_shrink_inode( | |||
2359 | error = xfs_bunmapi(tp, dp, dead_blkno, count, | 2359 | error = xfs_bunmapi(tp, dp, dead_blkno, count, |
2360 | xfs_bmapi_aflag(w)|XFS_BMAPI_METADATA, | 2360 | xfs_bmapi_aflag(w)|XFS_BMAPI_METADATA, |
2361 | 0, args->firstblock, args->flist, &done); | 2361 | 0, args->firstblock, args->flist, &done); |
2362 | if (error == ENOSPC) { | 2362 | if (error == -ENOSPC) { |
2363 | if (w != XFS_DATA_FORK) | 2363 | if (w != XFS_DATA_FORK) |
2364 | break; | 2364 | break; |
2365 | error = xfs_da3_swap_lastblock(args, &dead_blkno, | 2365 | error = xfs_da3_swap_lastblock(args, &dead_blkno, |
@@ -2427,7 +2427,7 @@ xfs_buf_map_from_irec( | |||
2427 | map = kmem_zalloc(nirecs * sizeof(struct xfs_buf_map), | 2427 | map = kmem_zalloc(nirecs * sizeof(struct xfs_buf_map), |
2428 | KM_SLEEP | KM_NOFS); | 2428 | KM_SLEEP | KM_NOFS); |
2429 | if (!map) | 2429 | if (!map) |
2430 | return ENOMEM; | 2430 | return -ENOMEM; |
2431 | *mapp = map; | 2431 | *mapp = map; |
2432 | } | 2432 | } |
2433 | 2433 | ||
@@ -2500,8 +2500,8 @@ xfs_dabuf_map( | |||
2500 | } | 2500 | } |
2501 | 2501 | ||
2502 | if (!xfs_da_map_covers_blocks(nirecs, irecs, bno, nfsb)) { | 2502 | if (!xfs_da_map_covers_blocks(nirecs, irecs, bno, nfsb)) { |
2503 | error = mappedbno == -2 ? -1 : XFS_ERROR(EFSCORRUPTED); | 2503 | error = mappedbno == -2 ? -1 : -EFSCORRUPTED; |
2504 | if (unlikely(error == EFSCORRUPTED)) { | 2504 | if (unlikely(error == -EFSCORRUPTED)) { |
2505 | if (xfs_error_level >= XFS_ERRLEVEL_LOW) { | 2505 | if (xfs_error_level >= XFS_ERRLEVEL_LOW) { |
2506 | int i; | 2506 | int i; |
2507 | xfs_alert(mp, "%s: bno %lld dir: inode %lld", | 2507 | xfs_alert(mp, "%s: bno %lld dir: inode %lld", |
@@ -2561,7 +2561,7 @@ xfs_da_get_buf( | |||
2561 | 2561 | ||
2562 | bp = xfs_trans_get_buf_map(trans, dp->i_mount->m_ddev_targp, | 2562 | bp = xfs_trans_get_buf_map(trans, dp->i_mount->m_ddev_targp, |
2563 | mapp, nmap, 0); | 2563 | mapp, nmap, 0); |
2564 | error = bp ? bp->b_error : XFS_ERROR(EIO); | 2564 | error = bp ? bp->b_error : -EIO; |
2565 | if (error) { | 2565 | if (error) { |
2566 | xfs_trans_brelse(trans, bp); | 2566 | xfs_trans_brelse(trans, bp); |
2567 | goto out_free; | 2567 | goto out_free; |
diff --git a/fs/xfs/xfs_da_btree.h b/fs/xfs/libxfs/xfs_da_btree.h index 6e153e399a77..6e153e399a77 100644 --- a/fs/xfs/xfs_da_btree.h +++ b/fs/xfs/libxfs/xfs_da_btree.h | |||
diff --git a/fs/xfs/xfs_da_format.c b/fs/xfs/libxfs/xfs_da_format.c index c9aee52a37e2..c9aee52a37e2 100644 --- a/fs/xfs/xfs_da_format.c +++ b/fs/xfs/libxfs/xfs_da_format.c | |||
diff --git a/fs/xfs/xfs_da_format.h b/fs/xfs/libxfs/xfs_da_format.h index 0a49b0286372..0a49b0286372 100644 --- a/fs/xfs/xfs_da_format.h +++ b/fs/xfs/libxfs/xfs_da_format.h | |||
diff --git a/fs/xfs/xfs_dinode.h b/fs/xfs/libxfs/xfs_dinode.h index 623bbe8fd921..623bbe8fd921 100644 --- a/fs/xfs/xfs_dinode.h +++ b/fs/xfs/libxfs/xfs_dinode.h | |||
diff --git a/fs/xfs/xfs_dir2.c b/fs/xfs/libxfs/xfs_dir2.c index 79670cda48ae..6cef22152fd6 100644 --- a/fs/xfs/xfs_dir2.c +++ b/fs/xfs/libxfs/xfs_dir2.c | |||
@@ -108,7 +108,7 @@ xfs_da_mount( | |||
108 | if (!mp->m_dir_geo || !mp->m_attr_geo) { | 108 | if (!mp->m_dir_geo || !mp->m_attr_geo) { |
109 | kmem_free(mp->m_dir_geo); | 109 | kmem_free(mp->m_dir_geo); |
110 | kmem_free(mp->m_attr_geo); | 110 | kmem_free(mp->m_attr_geo); |
111 | return ENOMEM; | 111 | return -ENOMEM; |
112 | } | 112 | } |
113 | 113 | ||
114 | /* set up directory geometry */ | 114 | /* set up directory geometry */ |
@@ -202,7 +202,7 @@ xfs_dir_ino_validate( | |||
202 | xfs_warn(mp, "Invalid inode number 0x%Lx", | 202 | xfs_warn(mp, "Invalid inode number 0x%Lx", |
203 | (unsigned long long) ino); | 203 | (unsigned long long) ino); |
204 | XFS_ERROR_REPORT("xfs_dir_ino_validate", XFS_ERRLEVEL_LOW, mp); | 204 | XFS_ERROR_REPORT("xfs_dir_ino_validate", XFS_ERRLEVEL_LOW, mp); |
205 | return XFS_ERROR(EFSCORRUPTED); | 205 | return -EFSCORRUPTED; |
206 | } | 206 | } |
207 | return 0; | 207 | return 0; |
208 | } | 208 | } |
@@ -226,7 +226,7 @@ xfs_dir_init( | |||
226 | 226 | ||
227 | args = kmem_zalloc(sizeof(*args), KM_SLEEP | KM_NOFS); | 227 | args = kmem_zalloc(sizeof(*args), KM_SLEEP | KM_NOFS); |
228 | if (!args) | 228 | if (!args) |
229 | return ENOMEM; | 229 | return -ENOMEM; |
230 | 230 | ||
231 | args->geo = dp->i_mount->m_dir_geo; | 231 | args->geo = dp->i_mount->m_dir_geo; |
232 | args->dp = dp; | 232 | args->dp = dp; |
@@ -261,7 +261,7 @@ xfs_dir_createname( | |||
261 | 261 | ||
262 | args = kmem_zalloc(sizeof(*args), KM_SLEEP | KM_NOFS); | 262 | args = kmem_zalloc(sizeof(*args), KM_SLEEP | KM_NOFS); |
263 | if (!args) | 263 | if (!args) |
264 | return ENOMEM; | 264 | return -ENOMEM; |
265 | 265 | ||
266 | args->geo = dp->i_mount->m_dir_geo; | 266 | args->geo = dp->i_mount->m_dir_geo; |
267 | args->name = name->name; | 267 | args->name = name->name; |
@@ -314,18 +314,18 @@ xfs_dir_cilookup_result( | |||
314 | int len) | 314 | int len) |
315 | { | 315 | { |
316 | if (args->cmpresult == XFS_CMP_DIFFERENT) | 316 | if (args->cmpresult == XFS_CMP_DIFFERENT) |
317 | return ENOENT; | 317 | return -ENOENT; |
318 | if (args->cmpresult != XFS_CMP_CASE || | 318 | if (args->cmpresult != XFS_CMP_CASE || |
319 | !(args->op_flags & XFS_DA_OP_CILOOKUP)) | 319 | !(args->op_flags & XFS_DA_OP_CILOOKUP)) |
320 | return EEXIST; | 320 | return -EEXIST; |
321 | 321 | ||
322 | args->value = kmem_alloc(len, KM_NOFS | KM_MAYFAIL); | 322 | args->value = kmem_alloc(len, KM_NOFS | KM_MAYFAIL); |
323 | if (!args->value) | 323 | if (!args->value) |
324 | return ENOMEM; | 324 | return -ENOMEM; |
325 | 325 | ||
326 | memcpy(args->value, name, len); | 326 | memcpy(args->value, name, len); |
327 | args->valuelen = len; | 327 | args->valuelen = len; |
328 | return EEXIST; | 328 | return -EEXIST; |
329 | } | 329 | } |
330 | 330 | ||
331 | /* | 331 | /* |
@@ -392,7 +392,7 @@ xfs_dir_lookup( | |||
392 | rval = xfs_dir2_node_lookup(args); | 392 | rval = xfs_dir2_node_lookup(args); |
393 | 393 | ||
394 | out_check_rval: | 394 | out_check_rval: |
395 | if (rval == EEXIST) | 395 | if (rval == -EEXIST) |
396 | rval = 0; | 396 | rval = 0; |
397 | if (!rval) { | 397 | if (!rval) { |
398 | *inum = args->inumber; | 398 | *inum = args->inumber; |
@@ -428,7 +428,7 @@ xfs_dir_removename( | |||
428 | 428 | ||
429 | args = kmem_zalloc(sizeof(*args), KM_SLEEP | KM_NOFS); | 429 | args = kmem_zalloc(sizeof(*args), KM_SLEEP | KM_NOFS); |
430 | if (!args) | 430 | if (!args) |
431 | return ENOMEM; | 431 | return -ENOMEM; |
432 | 432 | ||
433 | args->geo = dp->i_mount->m_dir_geo; | 433 | args->geo = dp->i_mount->m_dir_geo; |
434 | args->name = name->name; | 434 | args->name = name->name; |
@@ -493,7 +493,7 @@ xfs_dir_replace( | |||
493 | 493 | ||
494 | args = kmem_zalloc(sizeof(*args), KM_SLEEP | KM_NOFS); | 494 | args = kmem_zalloc(sizeof(*args), KM_SLEEP | KM_NOFS); |
495 | if (!args) | 495 | if (!args) |
496 | return ENOMEM; | 496 | return -ENOMEM; |
497 | 497 | ||
498 | args->geo = dp->i_mount->m_dir_geo; | 498 | args->geo = dp->i_mount->m_dir_geo; |
499 | args->name = name->name; | 499 | args->name = name->name; |
@@ -555,7 +555,7 @@ xfs_dir_canenter( | |||
555 | 555 | ||
556 | args = kmem_zalloc(sizeof(*args), KM_SLEEP | KM_NOFS); | 556 | args = kmem_zalloc(sizeof(*args), KM_SLEEP | KM_NOFS); |
557 | if (!args) | 557 | if (!args) |
558 | return ENOMEM; | 558 | return -ENOMEM; |
559 | 559 | ||
560 | args->geo = dp->i_mount->m_dir_geo; | 560 | args->geo = dp->i_mount->m_dir_geo; |
561 | args->name = name->name; | 561 | args->name = name->name; |
diff --git a/fs/xfs/xfs_dir2.h b/fs/xfs/libxfs/xfs_dir2.h index c8e86b0b5e99..c8e86b0b5e99 100644 --- a/fs/xfs/xfs_dir2.h +++ b/fs/xfs/libxfs/xfs_dir2.h | |||
diff --git a/fs/xfs/xfs_dir2_block.c b/fs/xfs/libxfs/xfs_dir2_block.c index c7cd3154026a..9628ceccfa02 100644 --- a/fs/xfs/xfs_dir2_block.c +++ b/fs/xfs/libxfs/xfs_dir2_block.c | |||
@@ -91,9 +91,9 @@ xfs_dir3_block_read_verify( | |||
91 | 91 | ||
92 | if (xfs_sb_version_hascrc(&mp->m_sb) && | 92 | if (xfs_sb_version_hascrc(&mp->m_sb) && |
93 | !xfs_buf_verify_cksum(bp, XFS_DIR3_DATA_CRC_OFF)) | 93 | !xfs_buf_verify_cksum(bp, XFS_DIR3_DATA_CRC_OFF)) |
94 | xfs_buf_ioerror(bp, EFSBADCRC); | 94 | xfs_buf_ioerror(bp, -EFSBADCRC); |
95 | else if (!xfs_dir3_block_verify(bp)) | 95 | else if (!xfs_dir3_block_verify(bp)) |
96 | xfs_buf_ioerror(bp, EFSCORRUPTED); | 96 | xfs_buf_ioerror(bp, -EFSCORRUPTED); |
97 | 97 | ||
98 | if (bp->b_error) | 98 | if (bp->b_error) |
99 | xfs_verifier_error(bp); | 99 | xfs_verifier_error(bp); |
@@ -108,7 +108,7 @@ xfs_dir3_block_write_verify( | |||
108 | struct xfs_dir3_blk_hdr *hdr3 = bp->b_addr; | 108 | struct xfs_dir3_blk_hdr *hdr3 = bp->b_addr; |
109 | 109 | ||
110 | if (!xfs_dir3_block_verify(bp)) { | 110 | if (!xfs_dir3_block_verify(bp)) { |
111 | xfs_buf_ioerror(bp, EFSCORRUPTED); | 111 | xfs_buf_ioerror(bp, -EFSCORRUPTED); |
112 | xfs_verifier_error(bp); | 112 | xfs_verifier_error(bp); |
113 | return; | 113 | return; |
114 | } | 114 | } |
@@ -392,7 +392,7 @@ xfs_dir2_block_addname( | |||
392 | if (args->op_flags & XFS_DA_OP_JUSTCHECK) { | 392 | if (args->op_flags & XFS_DA_OP_JUSTCHECK) { |
393 | xfs_trans_brelse(tp, bp); | 393 | xfs_trans_brelse(tp, bp); |
394 | if (!dup) | 394 | if (!dup) |
395 | return XFS_ERROR(ENOSPC); | 395 | return -ENOSPC; |
396 | return 0; | 396 | return 0; |
397 | } | 397 | } |
398 | 398 | ||
@@ -402,7 +402,7 @@ xfs_dir2_block_addname( | |||
402 | if (!dup) { | 402 | if (!dup) { |
403 | /* Don't have a space reservation: return no-space. */ | 403 | /* Don't have a space reservation: return no-space. */ |
404 | if (args->total == 0) | 404 | if (args->total == 0) |
405 | return XFS_ERROR(ENOSPC); | 405 | return -ENOSPC; |
406 | /* | 406 | /* |
407 | * Convert to the next larger format. | 407 | * Convert to the next larger format. |
408 | * Then add the new entry in that format. | 408 | * Then add the new entry in that format. |
@@ -647,7 +647,7 @@ xfs_dir2_block_lookup( | |||
647 | args->filetype = dp->d_ops->data_get_ftype(dep); | 647 | args->filetype = dp->d_ops->data_get_ftype(dep); |
648 | error = xfs_dir_cilookup_result(args, dep->name, dep->namelen); | 648 | error = xfs_dir_cilookup_result(args, dep->name, dep->namelen); |
649 | xfs_trans_brelse(args->trans, bp); | 649 | xfs_trans_brelse(args->trans, bp); |
650 | return XFS_ERROR(error); | 650 | return error; |
651 | } | 651 | } |
652 | 652 | ||
653 | /* | 653 | /* |
@@ -703,7 +703,7 @@ xfs_dir2_block_lookup_int( | |||
703 | if (low > high) { | 703 | if (low > high) { |
704 | ASSERT(args->op_flags & XFS_DA_OP_OKNOENT); | 704 | ASSERT(args->op_flags & XFS_DA_OP_OKNOENT); |
705 | xfs_trans_brelse(tp, bp); | 705 | xfs_trans_brelse(tp, bp); |
706 | return XFS_ERROR(ENOENT); | 706 | return -ENOENT; |
707 | } | 707 | } |
708 | } | 708 | } |
709 | /* | 709 | /* |
@@ -751,7 +751,7 @@ xfs_dir2_block_lookup_int( | |||
751 | * No match, release the buffer and return ENOENT. | 751 | * No match, release the buffer and return ENOENT. |
752 | */ | 752 | */ |
753 | xfs_trans_brelse(tp, bp); | 753 | xfs_trans_brelse(tp, bp); |
754 | return XFS_ERROR(ENOENT); | 754 | return -ENOENT; |
755 | } | 755 | } |
756 | 756 | ||
757 | /* | 757 | /* |
@@ -1091,7 +1091,7 @@ xfs_dir2_sf_to_block( | |||
1091 | */ | 1091 | */ |
1092 | if (dp->i_d.di_size < offsetof(xfs_dir2_sf_hdr_t, parent)) { | 1092 | if (dp->i_d.di_size < offsetof(xfs_dir2_sf_hdr_t, parent)) { |
1093 | ASSERT(XFS_FORCED_SHUTDOWN(mp)); | 1093 | ASSERT(XFS_FORCED_SHUTDOWN(mp)); |
1094 | return XFS_ERROR(EIO); | 1094 | return -EIO; |
1095 | } | 1095 | } |
1096 | 1096 | ||
1097 | oldsfp = (xfs_dir2_sf_hdr_t *)ifp->if_u1.if_data; | 1097 | oldsfp = (xfs_dir2_sf_hdr_t *)ifp->if_u1.if_data; |
diff --git a/fs/xfs/xfs_dir2_data.c b/fs/xfs/libxfs/xfs_dir2_data.c index 8c2f6422648e..fdd803fecb8e 100644 --- a/fs/xfs/xfs_dir2_data.c +++ b/fs/xfs/libxfs/xfs_dir2_data.c | |||
@@ -100,7 +100,7 @@ __xfs_dir3_data_check( | |||
100 | break; | 100 | break; |
101 | default: | 101 | default: |
102 | XFS_ERROR_REPORT("Bad Magic", XFS_ERRLEVEL_LOW, mp); | 102 | XFS_ERROR_REPORT("Bad Magic", XFS_ERRLEVEL_LOW, mp); |
103 | return EFSCORRUPTED; | 103 | return -EFSCORRUPTED; |
104 | } | 104 | } |
105 | 105 | ||
106 | /* | 106 | /* |
@@ -256,7 +256,7 @@ xfs_dir3_data_reada_verify( | |||
256 | xfs_dir3_data_verify(bp); | 256 | xfs_dir3_data_verify(bp); |
257 | return; | 257 | return; |
258 | default: | 258 | default: |
259 | xfs_buf_ioerror(bp, EFSCORRUPTED); | 259 | xfs_buf_ioerror(bp, -EFSCORRUPTED); |
260 | xfs_verifier_error(bp); | 260 | xfs_verifier_error(bp); |
261 | break; | 261 | break; |
262 | } | 262 | } |
@@ -270,9 +270,9 @@ xfs_dir3_data_read_verify( | |||
270 | 270 | ||
271 | if (xfs_sb_version_hascrc(&mp->m_sb) && | 271 | if (xfs_sb_version_hascrc(&mp->m_sb) && |
272 | !xfs_buf_verify_cksum(bp, XFS_DIR3_DATA_CRC_OFF)) | 272 | !xfs_buf_verify_cksum(bp, XFS_DIR3_DATA_CRC_OFF)) |
273 | xfs_buf_ioerror(bp, EFSBADCRC); | 273 | xfs_buf_ioerror(bp, -EFSBADCRC); |
274 | else if (!xfs_dir3_data_verify(bp)) | 274 | else if (!xfs_dir3_data_verify(bp)) |
275 | xfs_buf_ioerror(bp, EFSCORRUPTED); | 275 | xfs_buf_ioerror(bp, -EFSCORRUPTED); |
276 | 276 | ||
277 | if (bp->b_error) | 277 | if (bp->b_error) |
278 | xfs_verifier_error(bp); | 278 | xfs_verifier_error(bp); |
@@ -287,7 +287,7 @@ xfs_dir3_data_write_verify( | |||
287 | struct xfs_dir3_blk_hdr *hdr3 = bp->b_addr; | 287 | struct xfs_dir3_blk_hdr *hdr3 = bp->b_addr; |
288 | 288 | ||
289 | if (!xfs_dir3_data_verify(bp)) { | 289 | if (!xfs_dir3_data_verify(bp)) { |
290 | xfs_buf_ioerror(bp, EFSCORRUPTED); | 290 | xfs_buf_ioerror(bp, -EFSCORRUPTED); |
291 | xfs_verifier_error(bp); | 291 | xfs_verifier_error(bp); |
292 | return; | 292 | return; |
293 | } | 293 | } |
diff --git a/fs/xfs/xfs_dir2_leaf.c b/fs/xfs/libxfs/xfs_dir2_leaf.c index fb0aad4440c1..a19174eb3cb2 100644 --- a/fs/xfs/xfs_dir2_leaf.c +++ b/fs/xfs/libxfs/xfs_dir2_leaf.c | |||
@@ -183,9 +183,9 @@ __read_verify( | |||
183 | 183 | ||
184 | if (xfs_sb_version_hascrc(&mp->m_sb) && | 184 | if (xfs_sb_version_hascrc(&mp->m_sb) && |
185 | !xfs_buf_verify_cksum(bp, XFS_DIR3_LEAF_CRC_OFF)) | 185 | !xfs_buf_verify_cksum(bp, XFS_DIR3_LEAF_CRC_OFF)) |
186 | xfs_buf_ioerror(bp, EFSBADCRC); | 186 | xfs_buf_ioerror(bp, -EFSBADCRC); |
187 | else if (!xfs_dir3_leaf_verify(bp, magic)) | 187 | else if (!xfs_dir3_leaf_verify(bp, magic)) |
188 | xfs_buf_ioerror(bp, EFSCORRUPTED); | 188 | xfs_buf_ioerror(bp, -EFSCORRUPTED); |
189 | 189 | ||
190 | if (bp->b_error) | 190 | if (bp->b_error) |
191 | xfs_verifier_error(bp); | 191 | xfs_verifier_error(bp); |
@@ -201,7 +201,7 @@ __write_verify( | |||
201 | struct xfs_dir3_leaf_hdr *hdr3 = bp->b_addr; | 201 | struct xfs_dir3_leaf_hdr *hdr3 = bp->b_addr; |
202 | 202 | ||
203 | if (!xfs_dir3_leaf_verify(bp, magic)) { | 203 | if (!xfs_dir3_leaf_verify(bp, magic)) { |
204 | xfs_buf_ioerror(bp, EFSCORRUPTED); | 204 | xfs_buf_ioerror(bp, -EFSCORRUPTED); |
205 | xfs_verifier_error(bp); | 205 | xfs_verifier_error(bp); |
206 | return; | 206 | return; |
207 | } | 207 | } |
@@ -731,7 +731,7 @@ xfs_dir2_leaf_addname( | |||
731 | if ((args->op_flags & XFS_DA_OP_JUSTCHECK) || | 731 | if ((args->op_flags & XFS_DA_OP_JUSTCHECK) || |
732 | args->total == 0) { | 732 | args->total == 0) { |
733 | xfs_trans_brelse(tp, lbp); | 733 | xfs_trans_brelse(tp, lbp); |
734 | return XFS_ERROR(ENOSPC); | 734 | return -ENOSPC; |
735 | } | 735 | } |
736 | /* | 736 | /* |
737 | * Convert to node form. | 737 | * Convert to node form. |
@@ -755,7 +755,7 @@ xfs_dir2_leaf_addname( | |||
755 | */ | 755 | */ |
756 | if (args->op_flags & XFS_DA_OP_JUSTCHECK) { | 756 | if (args->op_flags & XFS_DA_OP_JUSTCHECK) { |
757 | xfs_trans_brelse(tp, lbp); | 757 | xfs_trans_brelse(tp, lbp); |
758 | return use_block == -1 ? XFS_ERROR(ENOSPC) : 0; | 758 | return use_block == -1 ? -ENOSPC : 0; |
759 | } | 759 | } |
760 | /* | 760 | /* |
761 | * If no allocations are allowed, return now before we've | 761 | * If no allocations are allowed, return now before we've |
@@ -763,7 +763,7 @@ xfs_dir2_leaf_addname( | |||
763 | */ | 763 | */ |
764 | if (args->total == 0 && use_block == -1) { | 764 | if (args->total == 0 && use_block == -1) { |
765 | xfs_trans_brelse(tp, lbp); | 765 | xfs_trans_brelse(tp, lbp); |
766 | return XFS_ERROR(ENOSPC); | 766 | return -ENOSPC; |
767 | } | 767 | } |
768 | /* | 768 | /* |
769 | * Need to compact the leaf entries, removing stale ones. | 769 | * Need to compact the leaf entries, removing stale ones. |
@@ -1198,7 +1198,7 @@ xfs_dir2_leaf_lookup( | |||
1198 | error = xfs_dir_cilookup_result(args, dep->name, dep->namelen); | 1198 | error = xfs_dir_cilookup_result(args, dep->name, dep->namelen); |
1199 | xfs_trans_brelse(tp, dbp); | 1199 | xfs_trans_brelse(tp, dbp); |
1200 | xfs_trans_brelse(tp, lbp); | 1200 | xfs_trans_brelse(tp, lbp); |
1201 | return XFS_ERROR(error); | 1201 | return error; |
1202 | } | 1202 | } |
1203 | 1203 | ||
1204 | /* | 1204 | /* |
@@ -1327,13 +1327,13 @@ xfs_dir2_leaf_lookup_int( | |||
1327 | return 0; | 1327 | return 0; |
1328 | } | 1328 | } |
1329 | /* | 1329 | /* |
1330 | * No match found, return ENOENT. | 1330 | * No match found, return -ENOENT. |
1331 | */ | 1331 | */ |
1332 | ASSERT(cidb == -1); | 1332 | ASSERT(cidb == -1); |
1333 | if (dbp) | 1333 | if (dbp) |
1334 | xfs_trans_brelse(tp, dbp); | 1334 | xfs_trans_brelse(tp, dbp); |
1335 | xfs_trans_brelse(tp, lbp); | 1335 | xfs_trans_brelse(tp, lbp); |
1336 | return XFS_ERROR(ENOENT); | 1336 | return -ENOENT; |
1337 | } | 1337 | } |
1338 | 1338 | ||
1339 | /* | 1339 | /* |
@@ -1440,7 +1440,7 @@ xfs_dir2_leaf_removename( | |||
1440 | * Just go on, returning success, leaving the | 1440 | * Just go on, returning success, leaving the |
1441 | * empty block in place. | 1441 | * empty block in place. |
1442 | */ | 1442 | */ |
1443 | if (error == ENOSPC && args->total == 0) | 1443 | if (error == -ENOSPC && args->total == 0) |
1444 | error = 0; | 1444 | error = 0; |
1445 | xfs_dir3_leaf_check(dp, lbp); | 1445 | xfs_dir3_leaf_check(dp, lbp); |
1446 | return error; | 1446 | return error; |
@@ -1641,7 +1641,7 @@ xfs_dir2_leaf_trim_data( | |||
1641 | * Get rid of the data block. | 1641 | * Get rid of the data block. |
1642 | */ | 1642 | */ |
1643 | if ((error = xfs_dir2_shrink_inode(args, db, dbp))) { | 1643 | if ((error = xfs_dir2_shrink_inode(args, db, dbp))) { |
1644 | ASSERT(error != ENOSPC); | 1644 | ASSERT(error != -ENOSPC); |
1645 | xfs_trans_brelse(tp, dbp); | 1645 | xfs_trans_brelse(tp, dbp); |
1646 | return error; | 1646 | return error; |
1647 | } | 1647 | } |
@@ -1815,7 +1815,7 @@ xfs_dir2_node_to_leaf( | |||
1815 | * punching out the middle of an extent, and this is an | 1815 | * punching out the middle of an extent, and this is an |
1816 | * isolated block. | 1816 | * isolated block. |
1817 | */ | 1817 | */ |
1818 | ASSERT(error != ENOSPC); | 1818 | ASSERT(error != -ENOSPC); |
1819 | return error; | 1819 | return error; |
1820 | } | 1820 | } |
1821 | fbp = NULL; | 1821 | fbp = NULL; |
diff --git a/fs/xfs/xfs_dir2_node.c b/fs/xfs/libxfs/xfs_dir2_node.c index da43d304fca2..2ae6ac2c11ae 100644 --- a/fs/xfs/xfs_dir2_node.c +++ b/fs/xfs/libxfs/xfs_dir2_node.c | |||
@@ -117,9 +117,9 @@ xfs_dir3_free_read_verify( | |||
117 | 117 | ||
118 | if (xfs_sb_version_hascrc(&mp->m_sb) && | 118 | if (xfs_sb_version_hascrc(&mp->m_sb) && |
119 | !xfs_buf_verify_cksum(bp, XFS_DIR3_FREE_CRC_OFF)) | 119 | !xfs_buf_verify_cksum(bp, XFS_DIR3_FREE_CRC_OFF)) |
120 | xfs_buf_ioerror(bp, EFSBADCRC); | 120 | xfs_buf_ioerror(bp, -EFSBADCRC); |
121 | else if (!xfs_dir3_free_verify(bp)) | 121 | else if (!xfs_dir3_free_verify(bp)) |
122 | xfs_buf_ioerror(bp, EFSCORRUPTED); | 122 | xfs_buf_ioerror(bp, -EFSCORRUPTED); |
123 | 123 | ||
124 | if (bp->b_error) | 124 | if (bp->b_error) |
125 | xfs_verifier_error(bp); | 125 | xfs_verifier_error(bp); |
@@ -134,7 +134,7 @@ xfs_dir3_free_write_verify( | |||
134 | struct xfs_dir3_blk_hdr *hdr3 = bp->b_addr; | 134 | struct xfs_dir3_blk_hdr *hdr3 = bp->b_addr; |
135 | 135 | ||
136 | if (!xfs_dir3_free_verify(bp)) { | 136 | if (!xfs_dir3_free_verify(bp)) { |
137 | xfs_buf_ioerror(bp, EFSCORRUPTED); | 137 | xfs_buf_ioerror(bp, -EFSCORRUPTED); |
138 | xfs_verifier_error(bp); | 138 | xfs_verifier_error(bp); |
139 | return; | 139 | return; |
140 | } | 140 | } |
@@ -406,7 +406,7 @@ xfs_dir2_leafn_add( | |||
406 | * into other peoples memory | 406 | * into other peoples memory |
407 | */ | 407 | */ |
408 | if (index < 0) | 408 | if (index < 0) |
409 | return XFS_ERROR(EFSCORRUPTED); | 409 | return -EFSCORRUPTED; |
410 | 410 | ||
411 | /* | 411 | /* |
412 | * If there are already the maximum number of leaf entries in | 412 | * If there are already the maximum number of leaf entries in |
@@ -417,7 +417,7 @@ xfs_dir2_leafn_add( | |||
417 | 417 | ||
418 | if (leafhdr.count == dp->d_ops->leaf_max_ents(args->geo)) { | 418 | if (leafhdr.count == dp->d_ops->leaf_max_ents(args->geo)) { |
419 | if (!leafhdr.stale) | 419 | if (!leafhdr.stale) |
420 | return XFS_ERROR(ENOSPC); | 420 | return -ENOSPC; |
421 | compact = leafhdr.stale > 1; | 421 | compact = leafhdr.stale > 1; |
422 | } else | 422 | } else |
423 | compact = 0; | 423 | compact = 0; |
@@ -629,7 +629,7 @@ xfs_dir2_leafn_lookup_for_addname( | |||
629 | XFS_ERRLEVEL_LOW, mp); | 629 | XFS_ERRLEVEL_LOW, mp); |
630 | if (curfdb != newfdb) | 630 | if (curfdb != newfdb) |
631 | xfs_trans_brelse(tp, curbp); | 631 | xfs_trans_brelse(tp, curbp); |
632 | return XFS_ERROR(EFSCORRUPTED); | 632 | return -EFSCORRUPTED; |
633 | } | 633 | } |
634 | curfdb = newfdb; | 634 | curfdb = newfdb; |
635 | if (be16_to_cpu(bests[fi]) >= length) | 635 | if (be16_to_cpu(bests[fi]) >= length) |
@@ -660,7 +660,7 @@ out: | |||
660 | * Return the index, that will be the insertion point. | 660 | * Return the index, that will be the insertion point. |
661 | */ | 661 | */ |
662 | *indexp = index; | 662 | *indexp = index; |
663 | return XFS_ERROR(ENOENT); | 663 | return -ENOENT; |
664 | } | 664 | } |
665 | 665 | ||
666 | /* | 666 | /* |
@@ -789,7 +789,7 @@ xfs_dir2_leafn_lookup_for_entry( | |||
789 | curbp->b_ops = &xfs_dir3_data_buf_ops; | 789 | curbp->b_ops = &xfs_dir3_data_buf_ops; |
790 | xfs_trans_buf_set_type(tp, curbp, XFS_BLFT_DIR_DATA_BUF); | 790 | xfs_trans_buf_set_type(tp, curbp, XFS_BLFT_DIR_DATA_BUF); |
791 | if (cmp == XFS_CMP_EXACT) | 791 | if (cmp == XFS_CMP_EXACT) |
792 | return XFS_ERROR(EEXIST); | 792 | return -EEXIST; |
793 | } | 793 | } |
794 | } | 794 | } |
795 | ASSERT(index == leafhdr.count || (args->op_flags & XFS_DA_OP_OKNOENT)); | 795 | ASSERT(index == leafhdr.count || (args->op_flags & XFS_DA_OP_OKNOENT)); |
@@ -812,7 +812,7 @@ xfs_dir2_leafn_lookup_for_entry( | |||
812 | state->extravalid = 0; | 812 | state->extravalid = 0; |
813 | } | 813 | } |
814 | *indexp = index; | 814 | *indexp = index; |
815 | return XFS_ERROR(ENOENT); | 815 | return -ENOENT; |
816 | } | 816 | } |
817 | 817 | ||
818 | /* | 818 | /* |
@@ -1133,7 +1133,7 @@ xfs_dir3_data_block_free( | |||
1133 | if (error == 0) { | 1133 | if (error == 0) { |
1134 | fbp = NULL; | 1134 | fbp = NULL; |
1135 | logfree = 0; | 1135 | logfree = 0; |
1136 | } else if (error != ENOSPC || args->total != 0) | 1136 | } else if (error != -ENOSPC || args->total != 0) |
1137 | return error; | 1137 | return error; |
1138 | /* | 1138 | /* |
1139 | * It's possible to get ENOSPC if there is no | 1139 | * It's possible to get ENOSPC if there is no |
@@ -1287,7 +1287,7 @@ xfs_dir2_leafn_remove( | |||
1287 | * In this case just drop the buffer and some one else | 1287 | * In this case just drop the buffer and some one else |
1288 | * will eventually get rid of the empty block. | 1288 | * will eventually get rid of the empty block. |
1289 | */ | 1289 | */ |
1290 | else if (!(error == ENOSPC && args->total == 0)) | 1290 | else if (!(error == -ENOSPC && args->total == 0)) |
1291 | return error; | 1291 | return error; |
1292 | } | 1292 | } |
1293 | /* | 1293 | /* |
@@ -1599,7 +1599,7 @@ xfs_dir2_node_addname( | |||
1599 | error = xfs_da3_node_lookup_int(state, &rval); | 1599 | error = xfs_da3_node_lookup_int(state, &rval); |
1600 | if (error) | 1600 | if (error) |
1601 | rval = error; | 1601 | rval = error; |
1602 | if (rval != ENOENT) { | 1602 | if (rval != -ENOENT) { |
1603 | goto done; | 1603 | goto done; |
1604 | } | 1604 | } |
1605 | /* | 1605 | /* |
@@ -1628,7 +1628,7 @@ xfs_dir2_node_addname( | |||
1628 | * It didn't work, we need to split the leaf block. | 1628 | * It didn't work, we need to split the leaf block. |
1629 | */ | 1629 | */ |
1630 | if (args->total == 0) { | 1630 | if (args->total == 0) { |
1631 | ASSERT(rval == ENOSPC); | 1631 | ASSERT(rval == -ENOSPC); |
1632 | goto done; | 1632 | goto done; |
1633 | } | 1633 | } |
1634 | /* | 1634 | /* |
@@ -1815,7 +1815,7 @@ xfs_dir2_node_addname_int( | |||
1815 | * Not allowed to allocate, return failure. | 1815 | * Not allowed to allocate, return failure. |
1816 | */ | 1816 | */ |
1817 | if ((args->op_flags & XFS_DA_OP_JUSTCHECK) || args->total == 0) | 1817 | if ((args->op_flags & XFS_DA_OP_JUSTCHECK) || args->total == 0) |
1818 | return XFS_ERROR(ENOSPC); | 1818 | return -ENOSPC; |
1819 | 1819 | ||
1820 | /* | 1820 | /* |
1821 | * Allocate and initialize the new data block. | 1821 | * Allocate and initialize the new data block. |
@@ -1876,7 +1876,7 @@ xfs_dir2_node_addname_int( | |||
1876 | } | 1876 | } |
1877 | XFS_ERROR_REPORT("xfs_dir2_node_addname_int", | 1877 | XFS_ERROR_REPORT("xfs_dir2_node_addname_int", |
1878 | XFS_ERRLEVEL_LOW, mp); | 1878 | XFS_ERRLEVEL_LOW, mp); |
1879 | return XFS_ERROR(EFSCORRUPTED); | 1879 | return -EFSCORRUPTED; |
1880 | } | 1880 | } |
1881 | 1881 | ||
1882 | /* | 1882 | /* |
@@ -2042,8 +2042,8 @@ xfs_dir2_node_lookup( | |||
2042 | error = xfs_da3_node_lookup_int(state, &rval); | 2042 | error = xfs_da3_node_lookup_int(state, &rval); |
2043 | if (error) | 2043 | if (error) |
2044 | rval = error; | 2044 | rval = error; |
2045 | else if (rval == ENOENT && args->cmpresult == XFS_CMP_CASE) { | 2045 | else if (rval == -ENOENT && args->cmpresult == XFS_CMP_CASE) { |
2046 | /* If a CI match, dup the actual name and return EEXIST */ | 2046 | /* If a CI match, dup the actual name and return -EEXIST */ |
2047 | xfs_dir2_data_entry_t *dep; | 2047 | xfs_dir2_data_entry_t *dep; |
2048 | 2048 | ||
2049 | dep = (xfs_dir2_data_entry_t *) | 2049 | dep = (xfs_dir2_data_entry_t *) |
@@ -2096,7 +2096,7 @@ xfs_dir2_node_removename( | |||
2096 | goto out_free; | 2096 | goto out_free; |
2097 | 2097 | ||
2098 | /* Didn't find it, upper layer screwed up. */ | 2098 | /* Didn't find it, upper layer screwed up. */ |
2099 | if (rval != EEXIST) { | 2099 | if (rval != -EEXIST) { |
2100 | error = rval; | 2100 | error = rval; |
2101 | goto out_free; | 2101 | goto out_free; |
2102 | } | 2102 | } |
@@ -2169,7 +2169,7 @@ xfs_dir2_node_replace( | |||
2169 | * It should be found, since the vnodeops layer has looked it up | 2169 | * It should be found, since the vnodeops layer has looked it up |
2170 | * and locked it. But paranoia is good. | 2170 | * and locked it. But paranoia is good. |
2171 | */ | 2171 | */ |
2172 | if (rval == EEXIST) { | 2172 | if (rval == -EEXIST) { |
2173 | struct xfs_dir2_leaf_entry *ents; | 2173 | struct xfs_dir2_leaf_entry *ents; |
2174 | /* | 2174 | /* |
2175 | * Find the leaf entry. | 2175 | * Find the leaf entry. |
@@ -2272,7 +2272,7 @@ xfs_dir2_node_trim_free( | |||
2272 | * space reservation, when breaking up an extent into two | 2272 | * space reservation, when breaking up an extent into two |
2273 | * pieces. This is the last block of an extent. | 2273 | * pieces. This is the last block of an extent. |
2274 | */ | 2274 | */ |
2275 | ASSERT(error != ENOSPC); | 2275 | ASSERT(error != -ENOSPC); |
2276 | xfs_trans_brelse(tp, bp); | 2276 | xfs_trans_brelse(tp, bp); |
2277 | return error; | 2277 | return error; |
2278 | } | 2278 | } |
diff --git a/fs/xfs/xfs_dir2_priv.h b/fs/xfs/libxfs/xfs_dir2_priv.h index 27ce0794d196..27ce0794d196 100644 --- a/fs/xfs/xfs_dir2_priv.h +++ b/fs/xfs/libxfs/xfs_dir2_priv.h | |||
diff --git a/fs/xfs/xfs_dir2_sf.c b/fs/xfs/libxfs/xfs_dir2_sf.c index 53c3be619db5..5079e051ef08 100644 --- a/fs/xfs/xfs_dir2_sf.c +++ b/fs/xfs/libxfs/xfs_dir2_sf.c | |||
@@ -51,10 +51,9 @@ static void xfs_dir2_sf_check(xfs_da_args_t *args); | |||
51 | #else | 51 | #else |
52 | #define xfs_dir2_sf_check(args) | 52 | #define xfs_dir2_sf_check(args) |
53 | #endif /* DEBUG */ | 53 | #endif /* DEBUG */ |
54 | #if XFS_BIG_INUMS | 54 | |
55 | static void xfs_dir2_sf_toino4(xfs_da_args_t *args); | 55 | static void xfs_dir2_sf_toino4(xfs_da_args_t *args); |
56 | static void xfs_dir2_sf_toino8(xfs_da_args_t *args); | 56 | static void xfs_dir2_sf_toino8(xfs_da_args_t *args); |
57 | #endif /* XFS_BIG_INUMS */ | ||
58 | 57 | ||
59 | /* | 58 | /* |
60 | * Given a block directory (dp/block), calculate its size as a shortform (sf) | 59 | * Given a block directory (dp/block), calculate its size as a shortform (sf) |
@@ -117,10 +116,10 @@ xfs_dir2_block_sfsize( | |||
117 | isdotdot = | 116 | isdotdot = |
118 | dep->namelen == 2 && | 117 | dep->namelen == 2 && |
119 | dep->name[0] == '.' && dep->name[1] == '.'; | 118 | dep->name[0] == '.' && dep->name[1] == '.'; |
120 | #if XFS_BIG_INUMS | 119 | |
121 | if (!isdot) | 120 | if (!isdot) |
122 | i8count += be64_to_cpu(dep->inumber) > XFS_DIR2_MAX_SHORT_INUM; | 121 | i8count += be64_to_cpu(dep->inumber) > XFS_DIR2_MAX_SHORT_INUM; |
123 | #endif | 122 | |
124 | /* take into account the file type field */ | 123 | /* take into account the file type field */ |
125 | if (!isdot && !isdotdot) { | 124 | if (!isdot && !isdotdot) { |
126 | count++; | 125 | count++; |
@@ -251,7 +250,7 @@ xfs_dir2_block_to_sf( | |||
251 | logflags = XFS_ILOG_CORE; | 250 | logflags = XFS_ILOG_CORE; |
252 | error = xfs_dir2_shrink_inode(args, args->geo->datablk, bp); | 251 | error = xfs_dir2_shrink_inode(args, args->geo->datablk, bp); |
253 | if (error) { | 252 | if (error) { |
254 | ASSERT(error != ENOSPC); | 253 | ASSERT(error != -ENOSPC); |
255 | goto out; | 254 | goto out; |
256 | } | 255 | } |
257 | 256 | ||
@@ -299,7 +298,7 @@ xfs_dir2_sf_addname( | |||
299 | 298 | ||
300 | trace_xfs_dir2_sf_addname(args); | 299 | trace_xfs_dir2_sf_addname(args); |
301 | 300 | ||
302 | ASSERT(xfs_dir2_sf_lookup(args) == ENOENT); | 301 | ASSERT(xfs_dir2_sf_lookup(args) == -ENOENT); |
303 | dp = args->dp; | 302 | dp = args->dp; |
304 | ASSERT(dp->i_df.if_flags & XFS_IFINLINE); | 303 | ASSERT(dp->i_df.if_flags & XFS_IFINLINE); |
305 | /* | 304 | /* |
@@ -307,7 +306,7 @@ xfs_dir2_sf_addname( | |||
307 | */ | 306 | */ |
308 | if (dp->i_d.di_size < offsetof(xfs_dir2_sf_hdr_t, parent)) { | 307 | if (dp->i_d.di_size < offsetof(xfs_dir2_sf_hdr_t, parent)) { |
309 | ASSERT(XFS_FORCED_SHUTDOWN(dp->i_mount)); | 308 | ASSERT(XFS_FORCED_SHUTDOWN(dp->i_mount)); |
310 | return XFS_ERROR(EIO); | 309 | return -EIO; |
311 | } | 310 | } |
312 | ASSERT(dp->i_df.if_bytes == dp->i_d.di_size); | 311 | ASSERT(dp->i_df.if_bytes == dp->i_d.di_size); |
313 | ASSERT(dp->i_df.if_u1.if_data != NULL); | 312 | ASSERT(dp->i_df.if_u1.if_data != NULL); |
@@ -318,7 +317,7 @@ xfs_dir2_sf_addname( | |||
318 | */ | 317 | */ |
319 | incr_isize = dp->d_ops->sf_entsize(sfp, args->namelen); | 318 | incr_isize = dp->d_ops->sf_entsize(sfp, args->namelen); |
320 | objchange = 0; | 319 | objchange = 0; |
321 | #if XFS_BIG_INUMS | 320 | |
322 | /* | 321 | /* |
323 | * Do we have to change to 8 byte inodes? | 322 | * Do we have to change to 8 byte inodes? |
324 | */ | 323 | */ |
@@ -332,7 +331,7 @@ xfs_dir2_sf_addname( | |||
332 | (uint)sizeof(xfs_dir2_ino4_t)); | 331 | (uint)sizeof(xfs_dir2_ino4_t)); |
333 | objchange = 1; | 332 | objchange = 1; |
334 | } | 333 | } |
335 | #endif | 334 | |
336 | new_isize = (int)dp->i_d.di_size + incr_isize; | 335 | new_isize = (int)dp->i_d.di_size + incr_isize; |
337 | /* | 336 | /* |
338 | * Won't fit as shortform any more (due to size), | 337 | * Won't fit as shortform any more (due to size), |
@@ -345,7 +344,7 @@ xfs_dir2_sf_addname( | |||
345 | * Just checking or no space reservation, it doesn't fit. | 344 | * Just checking or no space reservation, it doesn't fit. |
346 | */ | 345 | */ |
347 | if ((args->op_flags & XFS_DA_OP_JUSTCHECK) || args->total == 0) | 346 | if ((args->op_flags & XFS_DA_OP_JUSTCHECK) || args->total == 0) |
348 | return XFS_ERROR(ENOSPC); | 347 | return -ENOSPC; |
349 | /* | 348 | /* |
350 | * Convert to block form then add the name. | 349 | * Convert to block form then add the name. |
351 | */ | 350 | */ |
@@ -370,10 +369,8 @@ xfs_dir2_sf_addname( | |||
370 | */ | 369 | */ |
371 | else { | 370 | else { |
372 | ASSERT(pick == 2); | 371 | ASSERT(pick == 2); |
373 | #if XFS_BIG_INUMS | ||
374 | if (objchange) | 372 | if (objchange) |
375 | xfs_dir2_sf_toino8(args); | 373 | xfs_dir2_sf_toino8(args); |
376 | #endif | ||
377 | xfs_dir2_sf_addname_hard(args, objchange, new_isize); | 374 | xfs_dir2_sf_addname_hard(args, objchange, new_isize); |
378 | } | 375 | } |
379 | xfs_trans_log_inode(args->trans, dp, XFS_ILOG_CORE | XFS_ILOG_DDATA); | 376 | xfs_trans_log_inode(args->trans, dp, XFS_ILOG_CORE | XFS_ILOG_DDATA); |
@@ -425,10 +422,8 @@ xfs_dir2_sf_addname_easy( | |||
425 | * Update the header and inode. | 422 | * Update the header and inode. |
426 | */ | 423 | */ |
427 | sfp->count++; | 424 | sfp->count++; |
428 | #if XFS_BIG_INUMS | ||
429 | if (args->inumber > XFS_DIR2_MAX_SHORT_INUM) | 425 | if (args->inumber > XFS_DIR2_MAX_SHORT_INUM) |
430 | sfp->i8count++; | 426 | sfp->i8count++; |
431 | #endif | ||
432 | dp->i_d.di_size = new_isize; | 427 | dp->i_d.di_size = new_isize; |
433 | xfs_dir2_sf_check(args); | 428 | xfs_dir2_sf_check(args); |
434 | } | 429 | } |
@@ -516,10 +511,8 @@ xfs_dir2_sf_addname_hard( | |||
516 | dp->d_ops->sf_put_ino(sfp, sfep, args->inumber); | 511 | dp->d_ops->sf_put_ino(sfp, sfep, args->inumber); |
517 | dp->d_ops->sf_put_ftype(sfep, args->filetype); | 512 | dp->d_ops->sf_put_ftype(sfep, args->filetype); |
518 | sfp->count++; | 513 | sfp->count++; |
519 | #if XFS_BIG_INUMS | ||
520 | if (args->inumber > XFS_DIR2_MAX_SHORT_INUM && !objchange) | 514 | if (args->inumber > XFS_DIR2_MAX_SHORT_INUM && !objchange) |
521 | sfp->i8count++; | 515 | sfp->i8count++; |
522 | #endif | ||
523 | /* | 516 | /* |
524 | * If there's more left to copy, do that. | 517 | * If there's more left to copy, do that. |
525 | */ | 518 | */ |
@@ -593,13 +586,8 @@ xfs_dir2_sf_addname_pick( | |||
593 | /* | 586 | /* |
594 | * If changing the inode number size, do it the hard way. | 587 | * If changing the inode number size, do it the hard way. |
595 | */ | 588 | */ |
596 | #if XFS_BIG_INUMS | 589 | if (objchange) |
597 | if (objchange) { | ||
598 | return 2; | 590 | return 2; |
599 | } | ||
600 | #else | ||
601 | ASSERT(objchange == 0); | ||
602 | #endif | ||
603 | /* | 591 | /* |
604 | * If it won't fit at the end then do it the hard way (use the hole). | 592 | * If it won't fit at the end then do it the hard way (use the hole). |
605 | */ | 593 | */ |
@@ -650,7 +638,6 @@ xfs_dir2_sf_check( | |||
650 | ASSERT(dp->d_ops->sf_get_ftype(sfep) < XFS_DIR3_FT_MAX); | 638 | ASSERT(dp->d_ops->sf_get_ftype(sfep) < XFS_DIR3_FT_MAX); |
651 | } | 639 | } |
652 | ASSERT(i8count == sfp->i8count); | 640 | ASSERT(i8count == sfp->i8count); |
653 | ASSERT(XFS_BIG_INUMS || i8count == 0); | ||
654 | ASSERT((char *)sfep - (char *)sfp == dp->i_d.di_size); | 641 | ASSERT((char *)sfep - (char *)sfp == dp->i_d.di_size); |
655 | ASSERT(offset + | 642 | ASSERT(offset + |
656 | (sfp->count + 2) * (uint)sizeof(xfs_dir2_leaf_entry_t) + | 643 | (sfp->count + 2) * (uint)sizeof(xfs_dir2_leaf_entry_t) + |
@@ -738,7 +725,7 @@ xfs_dir2_sf_lookup( | |||
738 | */ | 725 | */ |
739 | if (dp->i_d.di_size < offsetof(xfs_dir2_sf_hdr_t, parent)) { | 726 | if (dp->i_d.di_size < offsetof(xfs_dir2_sf_hdr_t, parent)) { |
740 | ASSERT(XFS_FORCED_SHUTDOWN(dp->i_mount)); | 727 | ASSERT(XFS_FORCED_SHUTDOWN(dp->i_mount)); |
741 | return XFS_ERROR(EIO); | 728 | return -EIO; |
742 | } | 729 | } |
743 | ASSERT(dp->i_df.if_bytes == dp->i_d.di_size); | 730 | ASSERT(dp->i_df.if_bytes == dp->i_d.di_size); |
744 | ASSERT(dp->i_df.if_u1.if_data != NULL); | 731 | ASSERT(dp->i_df.if_u1.if_data != NULL); |
@@ -751,7 +738,7 @@ xfs_dir2_sf_lookup( | |||
751 | args->inumber = dp->i_ino; | 738 | args->inumber = dp->i_ino; |
752 | args->cmpresult = XFS_CMP_EXACT; | 739 | args->cmpresult = XFS_CMP_EXACT; |
753 | args->filetype = XFS_DIR3_FT_DIR; | 740 | args->filetype = XFS_DIR3_FT_DIR; |
754 | return XFS_ERROR(EEXIST); | 741 | return -EEXIST; |
755 | } | 742 | } |
756 | /* | 743 | /* |
757 | * Special case for .. | 744 | * Special case for .. |
@@ -761,7 +748,7 @@ xfs_dir2_sf_lookup( | |||
761 | args->inumber = dp->d_ops->sf_get_parent_ino(sfp); | 748 | args->inumber = dp->d_ops->sf_get_parent_ino(sfp); |
762 | args->cmpresult = XFS_CMP_EXACT; | 749 | args->cmpresult = XFS_CMP_EXACT; |
763 | args->filetype = XFS_DIR3_FT_DIR; | 750 | args->filetype = XFS_DIR3_FT_DIR; |
764 | return XFS_ERROR(EEXIST); | 751 | return -EEXIST; |
765 | } | 752 | } |
766 | /* | 753 | /* |
767 | * Loop over all the entries trying to match ours. | 754 | * Loop over all the entries trying to match ours. |
@@ -781,20 +768,20 @@ xfs_dir2_sf_lookup( | |||
781 | args->inumber = dp->d_ops->sf_get_ino(sfp, sfep); | 768 | args->inumber = dp->d_ops->sf_get_ino(sfp, sfep); |
782 | args->filetype = dp->d_ops->sf_get_ftype(sfep); | 769 | args->filetype = dp->d_ops->sf_get_ftype(sfep); |
783 | if (cmp == XFS_CMP_EXACT) | 770 | if (cmp == XFS_CMP_EXACT) |
784 | return XFS_ERROR(EEXIST); | 771 | return -EEXIST; |
785 | ci_sfep = sfep; | 772 | ci_sfep = sfep; |
786 | } | 773 | } |
787 | } | 774 | } |
788 | ASSERT(args->op_flags & XFS_DA_OP_OKNOENT); | 775 | ASSERT(args->op_flags & XFS_DA_OP_OKNOENT); |
789 | /* | 776 | /* |
790 | * Here, we can only be doing a lookup (not a rename or replace). | 777 | * Here, we can only be doing a lookup (not a rename or replace). |
791 | * If a case-insensitive match was not found, return ENOENT. | 778 | * If a case-insensitive match was not found, return -ENOENT. |
792 | */ | 779 | */ |
793 | if (!ci_sfep) | 780 | if (!ci_sfep) |
794 | return XFS_ERROR(ENOENT); | 781 | return -ENOENT; |
795 | /* otherwise process the CI match as required by the caller */ | 782 | /* otherwise process the CI match as required by the caller */ |
796 | error = xfs_dir_cilookup_result(args, ci_sfep->name, ci_sfep->namelen); | 783 | error = xfs_dir_cilookup_result(args, ci_sfep->name, ci_sfep->namelen); |
797 | return XFS_ERROR(error); | 784 | return error; |
798 | } | 785 | } |
799 | 786 | ||
800 | /* | 787 | /* |
@@ -824,7 +811,7 @@ xfs_dir2_sf_removename( | |||
824 | */ | 811 | */ |
825 | if (oldsize < offsetof(xfs_dir2_sf_hdr_t, parent)) { | 812 | if (oldsize < offsetof(xfs_dir2_sf_hdr_t, parent)) { |
826 | ASSERT(XFS_FORCED_SHUTDOWN(dp->i_mount)); | 813 | ASSERT(XFS_FORCED_SHUTDOWN(dp->i_mount)); |
827 | return XFS_ERROR(EIO); | 814 | return -EIO; |
828 | } | 815 | } |
829 | ASSERT(dp->i_df.if_bytes == oldsize); | 816 | ASSERT(dp->i_df.if_bytes == oldsize); |
830 | ASSERT(dp->i_df.if_u1.if_data != NULL); | 817 | ASSERT(dp->i_df.if_u1.if_data != NULL); |
@@ -847,7 +834,7 @@ xfs_dir2_sf_removename( | |||
847 | * Didn't find it. | 834 | * Didn't find it. |
848 | */ | 835 | */ |
849 | if (i == sfp->count) | 836 | if (i == sfp->count) |
850 | return XFS_ERROR(ENOENT); | 837 | return -ENOENT; |
851 | /* | 838 | /* |
852 | * Calculate sizes. | 839 | * Calculate sizes. |
853 | */ | 840 | */ |
@@ -870,7 +857,6 @@ xfs_dir2_sf_removename( | |||
870 | */ | 857 | */ |
871 | xfs_idata_realloc(dp, newsize - oldsize, XFS_DATA_FORK); | 858 | xfs_idata_realloc(dp, newsize - oldsize, XFS_DATA_FORK); |
872 | sfp = (xfs_dir2_sf_hdr_t *)dp->i_df.if_u1.if_data; | 859 | sfp = (xfs_dir2_sf_hdr_t *)dp->i_df.if_u1.if_data; |
873 | #if XFS_BIG_INUMS | ||
874 | /* | 860 | /* |
875 | * Are we changing inode number size? | 861 | * Are we changing inode number size? |
876 | */ | 862 | */ |
@@ -880,7 +866,6 @@ xfs_dir2_sf_removename( | |||
880 | else | 866 | else |
881 | sfp->i8count--; | 867 | sfp->i8count--; |
882 | } | 868 | } |
883 | #endif | ||
884 | xfs_dir2_sf_check(args); | 869 | xfs_dir2_sf_check(args); |
885 | xfs_trans_log_inode(args->trans, dp, XFS_ILOG_CORE | XFS_ILOG_DDATA); | 870 | xfs_trans_log_inode(args->trans, dp, XFS_ILOG_CORE | XFS_ILOG_DDATA); |
886 | return 0; | 871 | return 0; |
@@ -895,12 +880,8 @@ xfs_dir2_sf_replace( | |||
895 | { | 880 | { |
896 | xfs_inode_t *dp; /* incore directory inode */ | 881 | xfs_inode_t *dp; /* incore directory inode */ |
897 | int i; /* entry index */ | 882 | int i; /* entry index */ |
898 | #if XFS_BIG_INUMS || defined(DEBUG) | ||
899 | xfs_ino_t ino=0; /* entry old inode number */ | 883 | xfs_ino_t ino=0; /* entry old inode number */ |
900 | #endif | ||
901 | #if XFS_BIG_INUMS | ||
902 | int i8elevated; /* sf_toino8 set i8count=1 */ | 884 | int i8elevated; /* sf_toino8 set i8count=1 */ |
903 | #endif | ||
904 | xfs_dir2_sf_entry_t *sfep; /* shortform directory entry */ | 885 | xfs_dir2_sf_entry_t *sfep; /* shortform directory entry */ |
905 | xfs_dir2_sf_hdr_t *sfp; /* shortform structure */ | 886 | xfs_dir2_sf_hdr_t *sfp; /* shortform structure */ |
906 | 887 | ||
@@ -914,13 +895,13 @@ xfs_dir2_sf_replace( | |||
914 | */ | 895 | */ |
915 | if (dp->i_d.di_size < offsetof(xfs_dir2_sf_hdr_t, parent)) { | 896 | if (dp->i_d.di_size < offsetof(xfs_dir2_sf_hdr_t, parent)) { |
916 | ASSERT(XFS_FORCED_SHUTDOWN(dp->i_mount)); | 897 | ASSERT(XFS_FORCED_SHUTDOWN(dp->i_mount)); |
917 | return XFS_ERROR(EIO); | 898 | return -EIO; |
918 | } | 899 | } |
919 | ASSERT(dp->i_df.if_bytes == dp->i_d.di_size); | 900 | ASSERT(dp->i_df.if_bytes == dp->i_d.di_size); |
920 | ASSERT(dp->i_df.if_u1.if_data != NULL); | 901 | ASSERT(dp->i_df.if_u1.if_data != NULL); |
921 | sfp = (xfs_dir2_sf_hdr_t *)dp->i_df.if_u1.if_data; | 902 | sfp = (xfs_dir2_sf_hdr_t *)dp->i_df.if_u1.if_data; |
922 | ASSERT(dp->i_d.di_size >= xfs_dir2_sf_hdr_size(sfp->i8count)); | 903 | ASSERT(dp->i_d.di_size >= xfs_dir2_sf_hdr_size(sfp->i8count)); |
923 | #if XFS_BIG_INUMS | 904 | |
924 | /* | 905 | /* |
925 | * New inode number is large, and need to convert to 8-byte inodes. | 906 | * New inode number is large, and need to convert to 8-byte inodes. |
926 | */ | 907 | */ |
@@ -951,17 +932,15 @@ xfs_dir2_sf_replace( | |||
951 | sfp = (xfs_dir2_sf_hdr_t *)dp->i_df.if_u1.if_data; | 932 | sfp = (xfs_dir2_sf_hdr_t *)dp->i_df.if_u1.if_data; |
952 | } else | 933 | } else |
953 | i8elevated = 0; | 934 | i8elevated = 0; |
954 | #endif | 935 | |
955 | ASSERT(args->namelen != 1 || args->name[0] != '.'); | 936 | ASSERT(args->namelen != 1 || args->name[0] != '.'); |
956 | /* | 937 | /* |
957 | * Replace ..'s entry. | 938 | * Replace ..'s entry. |
958 | */ | 939 | */ |
959 | if (args->namelen == 2 && | 940 | if (args->namelen == 2 && |
960 | args->name[0] == '.' && args->name[1] == '.') { | 941 | args->name[0] == '.' && args->name[1] == '.') { |
961 | #if XFS_BIG_INUMS || defined(DEBUG) | ||
962 | ino = dp->d_ops->sf_get_parent_ino(sfp); | 942 | ino = dp->d_ops->sf_get_parent_ino(sfp); |
963 | ASSERT(args->inumber != ino); | 943 | ASSERT(args->inumber != ino); |
964 | #endif | ||
965 | dp->d_ops->sf_put_parent_ino(sfp, args->inumber); | 944 | dp->d_ops->sf_put_parent_ino(sfp, args->inumber); |
966 | } | 945 | } |
967 | /* | 946 | /* |
@@ -972,10 +951,8 @@ xfs_dir2_sf_replace( | |||
972 | i++, sfep = dp->d_ops->sf_nextentry(sfp, sfep)) { | 951 | i++, sfep = dp->d_ops->sf_nextentry(sfp, sfep)) { |
973 | if (xfs_da_compname(args, sfep->name, sfep->namelen) == | 952 | if (xfs_da_compname(args, sfep->name, sfep->namelen) == |
974 | XFS_CMP_EXACT) { | 953 | XFS_CMP_EXACT) { |
975 | #if XFS_BIG_INUMS || defined(DEBUG) | ||
976 | ino = dp->d_ops->sf_get_ino(sfp, sfep); | 954 | ino = dp->d_ops->sf_get_ino(sfp, sfep); |
977 | ASSERT(args->inumber != ino); | 955 | ASSERT(args->inumber != ino); |
978 | #endif | ||
979 | dp->d_ops->sf_put_ino(sfp, sfep, args->inumber); | 956 | dp->d_ops->sf_put_ino(sfp, sfep, args->inumber); |
980 | dp->d_ops->sf_put_ftype(sfep, args->filetype); | 957 | dp->d_ops->sf_put_ftype(sfep, args->filetype); |
981 | break; | 958 | break; |
@@ -986,14 +963,11 @@ xfs_dir2_sf_replace( | |||
986 | */ | 963 | */ |
987 | if (i == sfp->count) { | 964 | if (i == sfp->count) { |
988 | ASSERT(args->op_flags & XFS_DA_OP_OKNOENT); | 965 | ASSERT(args->op_flags & XFS_DA_OP_OKNOENT); |
989 | #if XFS_BIG_INUMS | ||
990 | if (i8elevated) | 966 | if (i8elevated) |
991 | xfs_dir2_sf_toino4(args); | 967 | xfs_dir2_sf_toino4(args); |
992 | #endif | 968 | return -ENOENT; |
993 | return XFS_ERROR(ENOENT); | ||
994 | } | 969 | } |
995 | } | 970 | } |
996 | #if XFS_BIG_INUMS | ||
997 | /* | 971 | /* |
998 | * See if the old number was large, the new number is small. | 972 | * See if the old number was large, the new number is small. |
999 | */ | 973 | */ |
@@ -1020,13 +994,11 @@ xfs_dir2_sf_replace( | |||
1020 | if (!i8elevated) | 994 | if (!i8elevated) |
1021 | sfp->i8count++; | 995 | sfp->i8count++; |
1022 | } | 996 | } |
1023 | #endif | ||
1024 | xfs_dir2_sf_check(args); | 997 | xfs_dir2_sf_check(args); |
1025 | xfs_trans_log_inode(args->trans, dp, XFS_ILOG_DDATA); | 998 | xfs_trans_log_inode(args->trans, dp, XFS_ILOG_DDATA); |
1026 | return 0; | 999 | return 0; |
1027 | } | 1000 | } |
1028 | 1001 | ||
1029 | #if XFS_BIG_INUMS | ||
1030 | /* | 1002 | /* |
1031 | * Convert from 8-byte inode numbers to 4-byte inode numbers. | 1003 | * Convert from 8-byte inode numbers to 4-byte inode numbers. |
1032 | * The last 8-byte inode number is gone, but the count is still 1. | 1004 | * The last 8-byte inode number is gone, but the count is still 1. |
@@ -1181,4 +1153,3 @@ xfs_dir2_sf_toino8( | |||
1181 | dp->i_d.di_size = newsize; | 1153 | dp->i_d.di_size = newsize; |
1182 | xfs_trans_log_inode(args->trans, dp, XFS_ILOG_CORE | XFS_ILOG_DDATA); | 1154 | xfs_trans_log_inode(args->trans, dp, XFS_ILOG_CORE | XFS_ILOG_DDATA); |
1183 | } | 1155 | } |
1184 | #endif /* XFS_BIG_INUMS */ | ||
diff --git a/fs/xfs/xfs_dquot_buf.c b/fs/xfs/libxfs/xfs_dquot_buf.c index c2ac0c611ad8..bb969337efc8 100644 --- a/fs/xfs/xfs_dquot_buf.c +++ b/fs/xfs/libxfs/xfs_dquot_buf.c | |||
@@ -257,9 +257,9 @@ xfs_dquot_buf_read_verify( | |||
257 | struct xfs_mount *mp = bp->b_target->bt_mount; | 257 | struct xfs_mount *mp = bp->b_target->bt_mount; |
258 | 258 | ||
259 | if (!xfs_dquot_buf_verify_crc(mp, bp)) | 259 | if (!xfs_dquot_buf_verify_crc(mp, bp)) |
260 | xfs_buf_ioerror(bp, EFSBADCRC); | 260 | xfs_buf_ioerror(bp, -EFSBADCRC); |
261 | else if (!xfs_dquot_buf_verify(mp, bp)) | 261 | else if (!xfs_dquot_buf_verify(mp, bp)) |
262 | xfs_buf_ioerror(bp, EFSCORRUPTED); | 262 | xfs_buf_ioerror(bp, -EFSCORRUPTED); |
263 | 263 | ||
264 | if (bp->b_error) | 264 | if (bp->b_error) |
265 | xfs_verifier_error(bp); | 265 | xfs_verifier_error(bp); |
@@ -277,7 +277,7 @@ xfs_dquot_buf_write_verify( | |||
277 | struct xfs_mount *mp = bp->b_target->bt_mount; | 277 | struct xfs_mount *mp = bp->b_target->bt_mount; |
278 | 278 | ||
279 | if (!xfs_dquot_buf_verify(mp, bp)) { | 279 | if (!xfs_dquot_buf_verify(mp, bp)) { |
280 | xfs_buf_ioerror(bp, EFSCORRUPTED); | 280 | xfs_buf_ioerror(bp, -EFSCORRUPTED); |
281 | xfs_verifier_error(bp); | 281 | xfs_verifier_error(bp); |
282 | return; | 282 | return; |
283 | } | 283 | } |
diff --git a/fs/xfs/xfs_format.h b/fs/xfs/libxfs/xfs_format.h index 34d85aca3058..7e42bba9a420 100644 --- a/fs/xfs/xfs_format.h +++ b/fs/xfs/libxfs/xfs_format.h | |||
@@ -68,11 +68,7 @@ struct xfs_ifork; | |||
68 | #define XFS_RTLOBIT(w) xfs_lowbit32(w) | 68 | #define XFS_RTLOBIT(w) xfs_lowbit32(w) |
69 | #define XFS_RTHIBIT(w) xfs_highbit32(w) | 69 | #define XFS_RTHIBIT(w) xfs_highbit32(w) |
70 | 70 | ||
71 | #if XFS_BIG_BLKNOS | ||
72 | #define XFS_RTBLOCKLOG(b) xfs_highbit64(b) | 71 | #define XFS_RTBLOCKLOG(b) xfs_highbit64(b) |
73 | #else | ||
74 | #define XFS_RTBLOCKLOG(b) xfs_highbit32(b) | ||
75 | #endif | ||
76 | 72 | ||
77 | /* | 73 | /* |
78 | * Dquot and dquot block format definitions | 74 | * Dquot and dquot block format definitions |
@@ -304,23 +300,15 @@ typedef struct xfs_bmbt_rec_host { | |||
304 | * Values and macros for delayed-allocation startblock fields. | 300 | * Values and macros for delayed-allocation startblock fields. |
305 | */ | 301 | */ |
306 | #define STARTBLOCKVALBITS 17 | 302 | #define STARTBLOCKVALBITS 17 |
307 | #define STARTBLOCKMASKBITS (15 + XFS_BIG_BLKNOS * 20) | 303 | #define STARTBLOCKMASKBITS (15 + 20) |
308 | #define DSTARTBLOCKMASKBITS (15 + 20) | ||
309 | #define STARTBLOCKMASK \ | 304 | #define STARTBLOCKMASK \ |
310 | (((((xfs_fsblock_t)1) << STARTBLOCKMASKBITS) - 1) << STARTBLOCKVALBITS) | 305 | (((((xfs_fsblock_t)1) << STARTBLOCKMASKBITS) - 1) << STARTBLOCKVALBITS) |
311 | #define DSTARTBLOCKMASK \ | ||
312 | (((((xfs_dfsbno_t)1) << DSTARTBLOCKMASKBITS) - 1) << STARTBLOCKVALBITS) | ||
313 | 306 | ||
314 | static inline int isnullstartblock(xfs_fsblock_t x) | 307 | static inline int isnullstartblock(xfs_fsblock_t x) |
315 | { | 308 | { |
316 | return ((x) & STARTBLOCKMASK) == STARTBLOCKMASK; | 309 | return ((x) & STARTBLOCKMASK) == STARTBLOCKMASK; |
317 | } | 310 | } |
318 | 311 | ||
319 | static inline int isnulldstartblock(xfs_dfsbno_t x) | ||
320 | { | ||
321 | return ((x) & DSTARTBLOCKMASK) == DSTARTBLOCKMASK; | ||
322 | } | ||
323 | |||
324 | static inline xfs_fsblock_t nullstartblock(int k) | 312 | static inline xfs_fsblock_t nullstartblock(int k) |
325 | { | 313 | { |
326 | ASSERT(k < (1 << STARTBLOCKVALBITS)); | 314 | ASSERT(k < (1 << STARTBLOCKVALBITS)); |
diff --git a/fs/xfs/xfs_ialloc.c b/fs/xfs/libxfs/xfs_ialloc.c index 5960e5593fe0..b62771f1f4b5 100644 --- a/fs/xfs/xfs_ialloc.c +++ b/fs/xfs/libxfs/xfs_ialloc.c | |||
@@ -292,7 +292,7 @@ xfs_ialloc_inode_init( | |||
292 | mp->m_bsize * blks_per_cluster, | 292 | mp->m_bsize * blks_per_cluster, |
293 | XBF_UNMAPPED); | 293 | XBF_UNMAPPED); |
294 | if (!fbuf) | 294 | if (!fbuf) |
295 | return ENOMEM; | 295 | return -ENOMEM; |
296 | 296 | ||
297 | /* Initialize the inode buffers and log them appropriately. */ | 297 | /* Initialize the inode buffers and log them appropriately. */ |
298 | fbuf->b_ops = &xfs_inode_buf_ops; | 298 | fbuf->b_ops = &xfs_inode_buf_ops; |
@@ -380,7 +380,7 @@ xfs_ialloc_ag_alloc( | |||
380 | newlen = args.mp->m_ialloc_inos; | 380 | newlen = args.mp->m_ialloc_inos; |
381 | if (args.mp->m_maxicount && | 381 | if (args.mp->m_maxicount && |
382 | args.mp->m_sb.sb_icount + newlen > args.mp->m_maxicount) | 382 | args.mp->m_sb.sb_icount + newlen > args.mp->m_maxicount) |
383 | return XFS_ERROR(ENOSPC); | 383 | return -ENOSPC; |
384 | args.minlen = args.maxlen = args.mp->m_ialloc_blks; | 384 | args.minlen = args.maxlen = args.mp->m_ialloc_blks; |
385 | /* | 385 | /* |
386 | * First try to allocate inodes contiguous with the last-allocated | 386 | * First try to allocate inodes contiguous with the last-allocated |
@@ -1385,7 +1385,7 @@ xfs_dialloc( | |||
1385 | if (error) { | 1385 | if (error) { |
1386 | xfs_trans_brelse(tp, agbp); | 1386 | xfs_trans_brelse(tp, agbp); |
1387 | 1387 | ||
1388 | if (error != ENOSPC) | 1388 | if (error != -ENOSPC) |
1389 | goto out_error; | 1389 | goto out_error; |
1390 | 1390 | ||
1391 | xfs_perag_put(pag); | 1391 | xfs_perag_put(pag); |
@@ -1416,7 +1416,7 @@ nextag: | |||
1416 | agno = 0; | 1416 | agno = 0; |
1417 | if (agno == start_agno) { | 1417 | if (agno == start_agno) { |
1418 | *inop = NULLFSINO; | 1418 | *inop = NULLFSINO; |
1419 | return noroom ? ENOSPC : 0; | 1419 | return noroom ? -ENOSPC : 0; |
1420 | } | 1420 | } |
1421 | } | 1421 | } |
1422 | 1422 | ||
@@ -1425,7 +1425,7 @@ out_alloc: | |||
1425 | return xfs_dialloc_ag(tp, agbp, parent, inop); | 1425 | return xfs_dialloc_ag(tp, agbp, parent, inop); |
1426 | out_error: | 1426 | out_error: |
1427 | xfs_perag_put(pag); | 1427 | xfs_perag_put(pag); |
1428 | return XFS_ERROR(error); | 1428 | return error; |
1429 | } | 1429 | } |
1430 | 1430 | ||
1431 | STATIC int | 1431 | STATIC int |
@@ -1682,7 +1682,7 @@ xfs_difree( | |||
1682 | xfs_warn(mp, "%s: agno >= mp->m_sb.sb_agcount (%d >= %d).", | 1682 | xfs_warn(mp, "%s: agno >= mp->m_sb.sb_agcount (%d >= %d).", |
1683 | __func__, agno, mp->m_sb.sb_agcount); | 1683 | __func__, agno, mp->m_sb.sb_agcount); |
1684 | ASSERT(0); | 1684 | ASSERT(0); |
1685 | return XFS_ERROR(EINVAL); | 1685 | return -EINVAL; |
1686 | } | 1686 | } |
1687 | agino = XFS_INO_TO_AGINO(mp, inode); | 1687 | agino = XFS_INO_TO_AGINO(mp, inode); |
1688 | if (inode != XFS_AGINO_TO_INO(mp, agno, agino)) { | 1688 | if (inode != XFS_AGINO_TO_INO(mp, agno, agino)) { |
@@ -1690,14 +1690,14 @@ xfs_difree( | |||
1690 | __func__, (unsigned long long)inode, | 1690 | __func__, (unsigned long long)inode, |
1691 | (unsigned long long)XFS_AGINO_TO_INO(mp, agno, agino)); | 1691 | (unsigned long long)XFS_AGINO_TO_INO(mp, agno, agino)); |
1692 | ASSERT(0); | 1692 | ASSERT(0); |
1693 | return XFS_ERROR(EINVAL); | 1693 | return -EINVAL; |
1694 | } | 1694 | } |
1695 | agbno = XFS_AGINO_TO_AGBNO(mp, agino); | 1695 | agbno = XFS_AGINO_TO_AGBNO(mp, agino); |
1696 | if (agbno >= mp->m_sb.sb_agblocks) { | 1696 | if (agbno >= mp->m_sb.sb_agblocks) { |
1697 | xfs_warn(mp, "%s: agbno >= mp->m_sb.sb_agblocks (%d >= %d).", | 1697 | xfs_warn(mp, "%s: agbno >= mp->m_sb.sb_agblocks (%d >= %d).", |
1698 | __func__, agbno, mp->m_sb.sb_agblocks); | 1698 | __func__, agbno, mp->m_sb.sb_agblocks); |
1699 | ASSERT(0); | 1699 | ASSERT(0); |
1700 | return XFS_ERROR(EINVAL); | 1700 | return -EINVAL; |
1701 | } | 1701 | } |
1702 | /* | 1702 | /* |
1703 | * Get the allocation group header. | 1703 | * Get the allocation group header. |
@@ -1769,7 +1769,7 @@ xfs_imap_lookup( | |||
1769 | if (i) | 1769 | if (i) |
1770 | error = xfs_inobt_get_rec(cur, &rec, &i); | 1770 | error = xfs_inobt_get_rec(cur, &rec, &i); |
1771 | if (!error && i == 0) | 1771 | if (!error && i == 0) |
1772 | error = EINVAL; | 1772 | error = -EINVAL; |
1773 | } | 1773 | } |
1774 | 1774 | ||
1775 | xfs_trans_brelse(tp, agbp); | 1775 | xfs_trans_brelse(tp, agbp); |
@@ -1780,12 +1780,12 @@ xfs_imap_lookup( | |||
1780 | /* check that the returned record contains the required inode */ | 1780 | /* check that the returned record contains the required inode */ |
1781 | if (rec.ir_startino > agino || | 1781 | if (rec.ir_startino > agino || |
1782 | rec.ir_startino + mp->m_ialloc_inos <= agino) | 1782 | rec.ir_startino + mp->m_ialloc_inos <= agino) |
1783 | return EINVAL; | 1783 | return -EINVAL; |
1784 | 1784 | ||
1785 | /* for untrusted inodes check it is allocated first */ | 1785 | /* for untrusted inodes check it is allocated first */ |
1786 | if ((flags & XFS_IGET_UNTRUSTED) && | 1786 | if ((flags & XFS_IGET_UNTRUSTED) && |
1787 | (rec.ir_free & XFS_INOBT_MASK(agino - rec.ir_startino))) | 1787 | (rec.ir_free & XFS_INOBT_MASK(agino - rec.ir_startino))) |
1788 | return EINVAL; | 1788 | return -EINVAL; |
1789 | 1789 | ||
1790 | *chunk_agbno = XFS_AGINO_TO_AGBNO(mp, rec.ir_startino); | 1790 | *chunk_agbno = XFS_AGINO_TO_AGBNO(mp, rec.ir_startino); |
1791 | *offset_agbno = agbno - *chunk_agbno; | 1791 | *offset_agbno = agbno - *chunk_agbno; |
@@ -1829,7 +1829,7 @@ xfs_imap( | |||
1829 | * as they can be invalid without implying corruption. | 1829 | * as they can be invalid without implying corruption. |
1830 | */ | 1830 | */ |
1831 | if (flags & XFS_IGET_UNTRUSTED) | 1831 | if (flags & XFS_IGET_UNTRUSTED) |
1832 | return XFS_ERROR(EINVAL); | 1832 | return -EINVAL; |
1833 | if (agno >= mp->m_sb.sb_agcount) { | 1833 | if (agno >= mp->m_sb.sb_agcount) { |
1834 | xfs_alert(mp, | 1834 | xfs_alert(mp, |
1835 | "%s: agno (%d) >= mp->m_sb.sb_agcount (%d)", | 1835 | "%s: agno (%d) >= mp->m_sb.sb_agcount (%d)", |
@@ -1849,7 +1849,7 @@ xfs_imap( | |||
1849 | } | 1849 | } |
1850 | xfs_stack_trace(); | 1850 | xfs_stack_trace(); |
1851 | #endif /* DEBUG */ | 1851 | #endif /* DEBUG */ |
1852 | return XFS_ERROR(EINVAL); | 1852 | return -EINVAL; |
1853 | } | 1853 | } |
1854 | 1854 | ||
1855 | blks_per_cluster = xfs_icluster_size_fsb(mp); | 1855 | blks_per_cluster = xfs_icluster_size_fsb(mp); |
@@ -1922,7 +1922,7 @@ out_map: | |||
1922 | __func__, (unsigned long long) imap->im_blkno, | 1922 | __func__, (unsigned long long) imap->im_blkno, |
1923 | (unsigned long long) imap->im_len, | 1923 | (unsigned long long) imap->im_len, |
1924 | XFS_FSB_TO_BB(mp, mp->m_sb.sb_dblocks)); | 1924 | XFS_FSB_TO_BB(mp, mp->m_sb.sb_dblocks)); |
1925 | return XFS_ERROR(EINVAL); | 1925 | return -EINVAL; |
1926 | } | 1926 | } |
1927 | return 0; | 1927 | return 0; |
1928 | } | 1928 | } |
@@ -2072,11 +2072,11 @@ xfs_agi_read_verify( | |||
2072 | 2072 | ||
2073 | if (xfs_sb_version_hascrc(&mp->m_sb) && | 2073 | if (xfs_sb_version_hascrc(&mp->m_sb) && |
2074 | !xfs_buf_verify_cksum(bp, XFS_AGI_CRC_OFF)) | 2074 | !xfs_buf_verify_cksum(bp, XFS_AGI_CRC_OFF)) |
2075 | xfs_buf_ioerror(bp, EFSBADCRC); | 2075 | xfs_buf_ioerror(bp, -EFSBADCRC); |
2076 | else if (XFS_TEST_ERROR(!xfs_agi_verify(bp), mp, | 2076 | else if (XFS_TEST_ERROR(!xfs_agi_verify(bp), mp, |
2077 | XFS_ERRTAG_IALLOC_READ_AGI, | 2077 | XFS_ERRTAG_IALLOC_READ_AGI, |
2078 | XFS_RANDOM_IALLOC_READ_AGI)) | 2078 | XFS_RANDOM_IALLOC_READ_AGI)) |
2079 | xfs_buf_ioerror(bp, EFSCORRUPTED); | 2079 | xfs_buf_ioerror(bp, -EFSCORRUPTED); |
2080 | 2080 | ||
2081 | if (bp->b_error) | 2081 | if (bp->b_error) |
2082 | xfs_verifier_error(bp); | 2082 | xfs_verifier_error(bp); |
@@ -2090,7 +2090,7 @@ xfs_agi_write_verify( | |||
2090 | struct xfs_buf_log_item *bip = bp->b_fspriv; | 2090 | struct xfs_buf_log_item *bip = bp->b_fspriv; |
2091 | 2091 | ||
2092 | if (!xfs_agi_verify(bp)) { | 2092 | if (!xfs_agi_verify(bp)) { |
2093 | xfs_buf_ioerror(bp, EFSCORRUPTED); | 2093 | xfs_buf_ioerror(bp, -EFSCORRUPTED); |
2094 | xfs_verifier_error(bp); | 2094 | xfs_verifier_error(bp); |
2095 | return; | 2095 | return; |
2096 | } | 2096 | } |
diff --git a/fs/xfs/xfs_ialloc.h b/fs/xfs/libxfs/xfs_ialloc.h index 95ad1c002d60..95ad1c002d60 100644 --- a/fs/xfs/xfs_ialloc.h +++ b/fs/xfs/libxfs/xfs_ialloc.h | |||
diff --git a/fs/xfs/xfs_ialloc_btree.c b/fs/xfs/libxfs/xfs_ialloc_btree.c index 726f83a681a5..c9b06f30fe86 100644 --- a/fs/xfs/xfs_ialloc_btree.c +++ b/fs/xfs/libxfs/xfs_ialloc_btree.c | |||
@@ -272,9 +272,9 @@ xfs_inobt_read_verify( | |||
272 | struct xfs_buf *bp) | 272 | struct xfs_buf *bp) |
273 | { | 273 | { |
274 | if (!xfs_btree_sblock_verify_crc(bp)) | 274 | if (!xfs_btree_sblock_verify_crc(bp)) |
275 | xfs_buf_ioerror(bp, EFSBADCRC); | 275 | xfs_buf_ioerror(bp, -EFSBADCRC); |
276 | else if (!xfs_inobt_verify(bp)) | 276 | else if (!xfs_inobt_verify(bp)) |
277 | xfs_buf_ioerror(bp, EFSCORRUPTED); | 277 | xfs_buf_ioerror(bp, -EFSCORRUPTED); |
278 | 278 | ||
279 | if (bp->b_error) { | 279 | if (bp->b_error) { |
280 | trace_xfs_btree_corrupt(bp, _RET_IP_); | 280 | trace_xfs_btree_corrupt(bp, _RET_IP_); |
@@ -288,7 +288,7 @@ xfs_inobt_write_verify( | |||
288 | { | 288 | { |
289 | if (!xfs_inobt_verify(bp)) { | 289 | if (!xfs_inobt_verify(bp)) { |
290 | trace_xfs_btree_corrupt(bp, _RET_IP_); | 290 | trace_xfs_btree_corrupt(bp, _RET_IP_); |
291 | xfs_buf_ioerror(bp, EFSCORRUPTED); | 291 | xfs_buf_ioerror(bp, -EFSCORRUPTED); |
292 | xfs_verifier_error(bp); | 292 | xfs_verifier_error(bp); |
293 | return; | 293 | return; |
294 | } | 294 | } |
diff --git a/fs/xfs/xfs_ialloc_btree.h b/fs/xfs/libxfs/xfs_ialloc_btree.h index d7ebea72c2d0..d7ebea72c2d0 100644 --- a/fs/xfs/xfs_ialloc_btree.h +++ b/fs/xfs/libxfs/xfs_ialloc_btree.h | |||
diff --git a/fs/xfs/xfs_inode_buf.c b/fs/xfs/libxfs/xfs_inode_buf.c index cb35ae41d4a1..f18fd2da49f7 100644 --- a/fs/xfs/xfs_inode_buf.c +++ b/fs/xfs/libxfs/xfs_inode_buf.c | |||
@@ -101,7 +101,7 @@ xfs_inode_buf_verify( | |||
101 | return; | 101 | return; |
102 | } | 102 | } |
103 | 103 | ||
104 | xfs_buf_ioerror(bp, EFSCORRUPTED); | 104 | xfs_buf_ioerror(bp, -EFSCORRUPTED); |
105 | xfs_verifier_error(bp); | 105 | xfs_verifier_error(bp); |
106 | #ifdef DEBUG | 106 | #ifdef DEBUG |
107 | xfs_alert(mp, | 107 | xfs_alert(mp, |
@@ -174,14 +174,14 @@ xfs_imap_to_bp( | |||
174 | (int)imap->im_len, buf_flags, &bp, | 174 | (int)imap->im_len, buf_flags, &bp, |
175 | &xfs_inode_buf_ops); | 175 | &xfs_inode_buf_ops); |
176 | if (error) { | 176 | if (error) { |
177 | if (error == EAGAIN) { | 177 | if (error == -EAGAIN) { |
178 | ASSERT(buf_flags & XBF_TRYLOCK); | 178 | ASSERT(buf_flags & XBF_TRYLOCK); |
179 | return error; | 179 | return error; |
180 | } | 180 | } |
181 | 181 | ||
182 | if (error == EFSCORRUPTED && | 182 | if (error == -EFSCORRUPTED && |
183 | (iget_flags & XFS_IGET_UNTRUSTED)) | 183 | (iget_flags & XFS_IGET_UNTRUSTED)) |
184 | return XFS_ERROR(EINVAL); | 184 | return -EINVAL; |
185 | 185 | ||
186 | xfs_warn(mp, "%s: xfs_trans_read_buf() returned error %d.", | 186 | xfs_warn(mp, "%s: xfs_trans_read_buf() returned error %d.", |
187 | __func__, error); | 187 | __func__, error); |
@@ -390,7 +390,7 @@ xfs_iread( | |||
390 | __func__, ip->i_ino); | 390 | __func__, ip->i_ino); |
391 | 391 | ||
392 | XFS_CORRUPTION_ERROR(__func__, XFS_ERRLEVEL_LOW, mp, dip); | 392 | XFS_CORRUPTION_ERROR(__func__, XFS_ERRLEVEL_LOW, mp, dip); |
393 | error = XFS_ERROR(EFSCORRUPTED); | 393 | error = -EFSCORRUPTED; |
394 | goto out_brelse; | 394 | goto out_brelse; |
395 | } | 395 | } |
396 | 396 | ||
diff --git a/fs/xfs/xfs_inode_buf.h b/fs/xfs/libxfs/xfs_inode_buf.h index 9308c47f2a52..9308c47f2a52 100644 --- a/fs/xfs/xfs_inode_buf.h +++ b/fs/xfs/libxfs/xfs_inode_buf.h | |||
diff --git a/fs/xfs/xfs_inode_fork.c b/fs/xfs/libxfs/xfs_inode_fork.c index b031e8d0d928..6a00f7fed69d 100644 --- a/fs/xfs/xfs_inode_fork.c +++ b/fs/xfs/libxfs/xfs_inode_fork.c | |||
@@ -102,7 +102,7 @@ xfs_iformat_fork( | |||
102 | be64_to_cpu(dip->di_nblocks)); | 102 | be64_to_cpu(dip->di_nblocks)); |
103 | XFS_CORRUPTION_ERROR("xfs_iformat(1)", XFS_ERRLEVEL_LOW, | 103 | XFS_CORRUPTION_ERROR("xfs_iformat(1)", XFS_ERRLEVEL_LOW, |
104 | ip->i_mount, dip); | 104 | ip->i_mount, dip); |
105 | return XFS_ERROR(EFSCORRUPTED); | 105 | return -EFSCORRUPTED; |
106 | } | 106 | } |
107 | 107 | ||
108 | if (unlikely(dip->di_forkoff > ip->i_mount->m_sb.sb_inodesize)) { | 108 | if (unlikely(dip->di_forkoff > ip->i_mount->m_sb.sb_inodesize)) { |
@@ -111,7 +111,7 @@ xfs_iformat_fork( | |||
111 | dip->di_forkoff); | 111 | dip->di_forkoff); |
112 | XFS_CORRUPTION_ERROR("xfs_iformat(2)", XFS_ERRLEVEL_LOW, | 112 | XFS_CORRUPTION_ERROR("xfs_iformat(2)", XFS_ERRLEVEL_LOW, |
113 | ip->i_mount, dip); | 113 | ip->i_mount, dip); |
114 | return XFS_ERROR(EFSCORRUPTED); | 114 | return -EFSCORRUPTED; |
115 | } | 115 | } |
116 | 116 | ||
117 | if (unlikely((ip->i_d.di_flags & XFS_DIFLAG_REALTIME) && | 117 | if (unlikely((ip->i_d.di_flags & XFS_DIFLAG_REALTIME) && |
@@ -121,7 +121,7 @@ xfs_iformat_fork( | |||
121 | ip->i_ino); | 121 | ip->i_ino); |
122 | XFS_CORRUPTION_ERROR("xfs_iformat(realtime)", | 122 | XFS_CORRUPTION_ERROR("xfs_iformat(realtime)", |
123 | XFS_ERRLEVEL_LOW, ip->i_mount, dip); | 123 | XFS_ERRLEVEL_LOW, ip->i_mount, dip); |
124 | return XFS_ERROR(EFSCORRUPTED); | 124 | return -EFSCORRUPTED; |
125 | } | 125 | } |
126 | 126 | ||
127 | switch (ip->i_d.di_mode & S_IFMT) { | 127 | switch (ip->i_d.di_mode & S_IFMT) { |
@@ -132,7 +132,7 @@ xfs_iformat_fork( | |||
132 | if (unlikely(dip->di_format != XFS_DINODE_FMT_DEV)) { | 132 | if (unlikely(dip->di_format != XFS_DINODE_FMT_DEV)) { |
133 | XFS_CORRUPTION_ERROR("xfs_iformat(3)", XFS_ERRLEVEL_LOW, | 133 | XFS_CORRUPTION_ERROR("xfs_iformat(3)", XFS_ERRLEVEL_LOW, |
134 | ip->i_mount, dip); | 134 | ip->i_mount, dip); |
135 | return XFS_ERROR(EFSCORRUPTED); | 135 | return -EFSCORRUPTED; |
136 | } | 136 | } |
137 | ip->i_d.di_size = 0; | 137 | ip->i_d.di_size = 0; |
138 | ip->i_df.if_u2.if_rdev = xfs_dinode_get_rdev(dip); | 138 | ip->i_df.if_u2.if_rdev = xfs_dinode_get_rdev(dip); |
@@ -153,7 +153,7 @@ xfs_iformat_fork( | |||
153 | XFS_CORRUPTION_ERROR("xfs_iformat(4)", | 153 | XFS_CORRUPTION_ERROR("xfs_iformat(4)", |
154 | XFS_ERRLEVEL_LOW, | 154 | XFS_ERRLEVEL_LOW, |
155 | ip->i_mount, dip); | 155 | ip->i_mount, dip); |
156 | return XFS_ERROR(EFSCORRUPTED); | 156 | return -EFSCORRUPTED; |
157 | } | 157 | } |
158 | 158 | ||
159 | di_size = be64_to_cpu(dip->di_size); | 159 | di_size = be64_to_cpu(dip->di_size); |
@@ -166,7 +166,7 @@ xfs_iformat_fork( | |||
166 | XFS_CORRUPTION_ERROR("xfs_iformat(5)", | 166 | XFS_CORRUPTION_ERROR("xfs_iformat(5)", |
167 | XFS_ERRLEVEL_LOW, | 167 | XFS_ERRLEVEL_LOW, |
168 | ip->i_mount, dip); | 168 | ip->i_mount, dip); |
169 | return XFS_ERROR(EFSCORRUPTED); | 169 | return -EFSCORRUPTED; |
170 | } | 170 | } |
171 | 171 | ||
172 | size = (int)di_size; | 172 | size = (int)di_size; |
@@ -181,13 +181,13 @@ xfs_iformat_fork( | |||
181 | default: | 181 | default: |
182 | XFS_ERROR_REPORT("xfs_iformat(6)", XFS_ERRLEVEL_LOW, | 182 | XFS_ERROR_REPORT("xfs_iformat(6)", XFS_ERRLEVEL_LOW, |
183 | ip->i_mount); | 183 | ip->i_mount); |
184 | return XFS_ERROR(EFSCORRUPTED); | 184 | return -EFSCORRUPTED; |
185 | } | 185 | } |
186 | break; | 186 | break; |
187 | 187 | ||
188 | default: | 188 | default: |
189 | XFS_ERROR_REPORT("xfs_iformat(7)", XFS_ERRLEVEL_LOW, ip->i_mount); | 189 | XFS_ERROR_REPORT("xfs_iformat(7)", XFS_ERRLEVEL_LOW, ip->i_mount); |
190 | return XFS_ERROR(EFSCORRUPTED); | 190 | return -EFSCORRUPTED; |
191 | } | 191 | } |
192 | if (error) { | 192 | if (error) { |
193 | return error; | 193 | return error; |
@@ -211,7 +211,7 @@ xfs_iformat_fork( | |||
211 | XFS_CORRUPTION_ERROR("xfs_iformat(8)", | 211 | XFS_CORRUPTION_ERROR("xfs_iformat(8)", |
212 | XFS_ERRLEVEL_LOW, | 212 | XFS_ERRLEVEL_LOW, |
213 | ip->i_mount, dip); | 213 | ip->i_mount, dip); |
214 | return XFS_ERROR(EFSCORRUPTED); | 214 | return -EFSCORRUPTED; |
215 | } | 215 | } |
216 | 216 | ||
217 | error = xfs_iformat_local(ip, dip, XFS_ATTR_FORK, size); | 217 | error = xfs_iformat_local(ip, dip, XFS_ATTR_FORK, size); |
@@ -223,7 +223,7 @@ xfs_iformat_fork( | |||
223 | error = xfs_iformat_btree(ip, dip, XFS_ATTR_FORK); | 223 | error = xfs_iformat_btree(ip, dip, XFS_ATTR_FORK); |
224 | break; | 224 | break; |
225 | default: | 225 | default: |
226 | error = XFS_ERROR(EFSCORRUPTED); | 226 | error = -EFSCORRUPTED; |
227 | break; | 227 | break; |
228 | } | 228 | } |
229 | if (error) { | 229 | if (error) { |
@@ -266,7 +266,7 @@ xfs_iformat_local( | |||
266 | XFS_DFORK_SIZE(dip, ip->i_mount, whichfork)); | 266 | XFS_DFORK_SIZE(dip, ip->i_mount, whichfork)); |
267 | XFS_CORRUPTION_ERROR("xfs_iformat_local", XFS_ERRLEVEL_LOW, | 267 | XFS_CORRUPTION_ERROR("xfs_iformat_local", XFS_ERRLEVEL_LOW, |
268 | ip->i_mount, dip); | 268 | ip->i_mount, dip); |
269 | return XFS_ERROR(EFSCORRUPTED); | 269 | return -EFSCORRUPTED; |
270 | } | 270 | } |
271 | ifp = XFS_IFORK_PTR(ip, whichfork); | 271 | ifp = XFS_IFORK_PTR(ip, whichfork); |
272 | real_size = 0; | 272 | real_size = 0; |
@@ -322,7 +322,7 @@ xfs_iformat_extents( | |||
322 | (unsigned long long) ip->i_ino, nex); | 322 | (unsigned long long) ip->i_ino, nex); |
323 | XFS_CORRUPTION_ERROR("xfs_iformat_extents(1)", XFS_ERRLEVEL_LOW, | 323 | XFS_CORRUPTION_ERROR("xfs_iformat_extents(1)", XFS_ERRLEVEL_LOW, |
324 | ip->i_mount, dip); | 324 | ip->i_mount, dip); |
325 | return XFS_ERROR(EFSCORRUPTED); | 325 | return -EFSCORRUPTED; |
326 | } | 326 | } |
327 | 327 | ||
328 | ifp->if_real_bytes = 0; | 328 | ifp->if_real_bytes = 0; |
@@ -350,7 +350,7 @@ xfs_iformat_extents( | |||
350 | XFS_ERROR_REPORT("xfs_iformat_extents(2)", | 350 | XFS_ERROR_REPORT("xfs_iformat_extents(2)", |
351 | XFS_ERRLEVEL_LOW, | 351 | XFS_ERRLEVEL_LOW, |
352 | ip->i_mount); | 352 | ip->i_mount); |
353 | return XFS_ERROR(EFSCORRUPTED); | 353 | return -EFSCORRUPTED; |
354 | } | 354 | } |
355 | } | 355 | } |
356 | ifp->if_flags |= XFS_IFEXTENTS; | 356 | ifp->if_flags |= XFS_IFEXTENTS; |
@@ -399,7 +399,7 @@ xfs_iformat_btree( | |||
399 | (unsigned long long) ip->i_ino); | 399 | (unsigned long long) ip->i_ino); |
400 | XFS_CORRUPTION_ERROR("xfs_iformat_btree", XFS_ERRLEVEL_LOW, | 400 | XFS_CORRUPTION_ERROR("xfs_iformat_btree", XFS_ERRLEVEL_LOW, |
401 | mp, dip); | 401 | mp, dip); |
402 | return XFS_ERROR(EFSCORRUPTED); | 402 | return -EFSCORRUPTED; |
403 | } | 403 | } |
404 | 404 | ||
405 | ifp->if_broot_bytes = size; | 405 | ifp->if_broot_bytes = size; |
@@ -436,7 +436,7 @@ xfs_iread_extents( | |||
436 | if (unlikely(XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE)) { | 436 | if (unlikely(XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE)) { |
437 | XFS_ERROR_REPORT("xfs_iread_extents", XFS_ERRLEVEL_LOW, | 437 | XFS_ERROR_REPORT("xfs_iread_extents", XFS_ERRLEVEL_LOW, |
438 | ip->i_mount); | 438 | ip->i_mount); |
439 | return XFS_ERROR(EFSCORRUPTED); | 439 | return -EFSCORRUPTED; |
440 | } | 440 | } |
441 | nextents = XFS_IFORK_NEXTENTS(ip, whichfork); | 441 | nextents = XFS_IFORK_NEXTENTS(ip, whichfork); |
442 | ifp = XFS_IFORK_PTR(ip, whichfork); | 442 | ifp = XFS_IFORK_PTR(ip, whichfork); |
@@ -528,7 +528,7 @@ xfs_iroot_realloc( | |||
528 | ifp->if_broot_bytes = (int)new_size; | 528 | ifp->if_broot_bytes = (int)new_size; |
529 | ASSERT(XFS_BMAP_BMDR_SPACE(ifp->if_broot) <= | 529 | ASSERT(XFS_BMAP_BMDR_SPACE(ifp->if_broot) <= |
530 | XFS_IFORK_SIZE(ip, whichfork)); | 530 | XFS_IFORK_SIZE(ip, whichfork)); |
531 | memmove(np, op, cur_max * (uint)sizeof(xfs_dfsbno_t)); | 531 | memmove(np, op, cur_max * (uint)sizeof(xfs_fsblock_t)); |
532 | return; | 532 | return; |
533 | } | 533 | } |
534 | 534 | ||
@@ -575,7 +575,7 @@ xfs_iroot_realloc( | |||
575 | ifp->if_broot_bytes); | 575 | ifp->if_broot_bytes); |
576 | np = (char *)XFS_BMAP_BROOT_PTR_ADDR(mp, new_broot, 1, | 576 | np = (char *)XFS_BMAP_BROOT_PTR_ADDR(mp, new_broot, 1, |
577 | (int)new_size); | 577 | (int)new_size); |
578 | memcpy(np, op, new_max * (uint)sizeof(xfs_dfsbno_t)); | 578 | memcpy(np, op, new_max * (uint)sizeof(xfs_fsblock_t)); |
579 | } | 579 | } |
580 | kmem_free(ifp->if_broot); | 580 | kmem_free(ifp->if_broot); |
581 | ifp->if_broot = new_broot; | 581 | ifp->if_broot = new_broot; |
@@ -1692,7 +1692,7 @@ xfs_iext_idx_to_irec( | |||
1692 | } | 1692 | } |
1693 | *idxp = page_idx; | 1693 | *idxp = page_idx; |
1694 | *erp_idxp = erp_idx; | 1694 | *erp_idxp = erp_idx; |
1695 | return(erp); | 1695 | return erp; |
1696 | } | 1696 | } |
1697 | 1697 | ||
1698 | /* | 1698 | /* |
diff --git a/fs/xfs/xfs_inode_fork.h b/fs/xfs/libxfs/xfs_inode_fork.h index 7d3b1ed6dcbe..7d3b1ed6dcbe 100644 --- a/fs/xfs/xfs_inode_fork.h +++ b/fs/xfs/libxfs/xfs_inode_fork.h | |||
diff --git a/fs/xfs/xfs_inum.h b/fs/xfs/libxfs/xfs_inum.h index 90efdaf1706f..4ff2278e147a 100644 --- a/fs/xfs/xfs_inum.h +++ b/fs/xfs/libxfs/xfs_inum.h | |||
@@ -54,11 +54,7 @@ struct xfs_mount; | |||
54 | #define XFS_OFFBNO_TO_AGINO(mp,b,o) \ | 54 | #define XFS_OFFBNO_TO_AGINO(mp,b,o) \ |
55 | ((xfs_agino_t)(((b) << XFS_INO_OFFSET_BITS(mp)) | (o))) | 55 | ((xfs_agino_t)(((b) << XFS_INO_OFFSET_BITS(mp)) | (o))) |
56 | 56 | ||
57 | #if XFS_BIG_INUMS | ||
58 | #define XFS_MAXINUMBER ((xfs_ino_t)((1ULL << 56) - 1ULL)) | 57 | #define XFS_MAXINUMBER ((xfs_ino_t)((1ULL << 56) - 1ULL)) |
59 | #else | ||
60 | #define XFS_MAXINUMBER ((xfs_ino_t)((1ULL << 32) - 1ULL)) | ||
61 | #endif | ||
62 | #define XFS_MAXINUMBER_32 ((xfs_ino_t)((1ULL << 32) - 1ULL)) | 58 | #define XFS_MAXINUMBER_32 ((xfs_ino_t)((1ULL << 32) - 1ULL)) |
63 | 59 | ||
64 | #endif /* __XFS_INUM_H__ */ | 60 | #endif /* __XFS_INUM_H__ */ |
diff --git a/fs/xfs/xfs_log_format.h b/fs/xfs/libxfs/xfs_log_format.h index f0969c77bdbe..aff12f2d4428 100644 --- a/fs/xfs/xfs_log_format.h +++ b/fs/xfs/libxfs/xfs_log_format.h | |||
@@ -380,7 +380,7 @@ typedef struct xfs_icdinode { | |||
380 | xfs_ictimestamp_t di_mtime; /* time last modified */ | 380 | xfs_ictimestamp_t di_mtime; /* time last modified */ |
381 | xfs_ictimestamp_t di_ctime; /* time created/inode modified */ | 381 | xfs_ictimestamp_t di_ctime; /* time created/inode modified */ |
382 | xfs_fsize_t di_size; /* number of bytes in file */ | 382 | xfs_fsize_t di_size; /* number of bytes in file */ |
383 | xfs_drfsbno_t di_nblocks; /* # of direct & btree blocks used */ | 383 | xfs_rfsblock_t di_nblocks; /* # of direct & btree blocks used */ |
384 | xfs_extlen_t di_extsize; /* basic/minimum extent size for file */ | 384 | xfs_extlen_t di_extsize; /* basic/minimum extent size for file */ |
385 | xfs_extnum_t di_nextents; /* number of extents in data fork */ | 385 | xfs_extnum_t di_nextents; /* number of extents in data fork */ |
386 | xfs_aextnum_t di_anextents; /* number of extents in attribute fork*/ | 386 | xfs_aextnum_t di_anextents; /* number of extents in attribute fork*/ |
@@ -516,7 +516,7 @@ xfs_blft_from_flags(struct xfs_buf_log_format *blf) | |||
516 | * EFI/EFD log format definitions | 516 | * EFI/EFD log format definitions |
517 | */ | 517 | */ |
518 | typedef struct xfs_extent { | 518 | typedef struct xfs_extent { |
519 | xfs_dfsbno_t ext_start; | 519 | xfs_fsblock_t ext_start; |
520 | xfs_extlen_t ext_len; | 520 | xfs_extlen_t ext_len; |
521 | } xfs_extent_t; | 521 | } xfs_extent_t; |
522 | 522 | ||
diff --git a/fs/xfs/xfs_log_recover.h b/fs/xfs/libxfs/xfs_log_recover.h index 1c55ccbb379d..1c55ccbb379d 100644 --- a/fs/xfs/xfs_log_recover.h +++ b/fs/xfs/libxfs/xfs_log_recover.h | |||
diff --git a/fs/xfs/xfs_log_rlimit.c b/fs/xfs/libxfs/xfs_log_rlimit.c index ee7e0e80246b..ee7e0e80246b 100644 --- a/fs/xfs/xfs_log_rlimit.c +++ b/fs/xfs/libxfs/xfs_log_rlimit.c | |||
diff --git a/fs/xfs/xfs_quota_defs.h b/fs/xfs/libxfs/xfs_quota_defs.h index 137e20937077..1b0a08379759 100644 --- a/fs/xfs/xfs_quota_defs.h +++ b/fs/xfs/libxfs/xfs_quota_defs.h | |||
@@ -98,8 +98,6 @@ typedef __uint16_t xfs_qwarncnt_t; | |||
98 | #define XFS_IS_QUOTA_ON(mp) ((mp)->m_qflags & (XFS_UQUOTA_ACTIVE | \ | 98 | #define XFS_IS_QUOTA_ON(mp) ((mp)->m_qflags & (XFS_UQUOTA_ACTIVE | \ |
99 | XFS_GQUOTA_ACTIVE | \ | 99 | XFS_GQUOTA_ACTIVE | \ |
100 | XFS_PQUOTA_ACTIVE)) | 100 | XFS_PQUOTA_ACTIVE)) |
101 | #define XFS_IS_OQUOTA_ON(mp) ((mp)->m_qflags & (XFS_GQUOTA_ACTIVE | \ | ||
102 | XFS_PQUOTA_ACTIVE)) | ||
103 | #define XFS_IS_UQUOTA_ON(mp) ((mp)->m_qflags & XFS_UQUOTA_ACTIVE) | 101 | #define XFS_IS_UQUOTA_ON(mp) ((mp)->m_qflags & XFS_UQUOTA_ACTIVE) |
104 | #define XFS_IS_GQUOTA_ON(mp) ((mp)->m_qflags & XFS_GQUOTA_ACTIVE) | 102 | #define XFS_IS_GQUOTA_ON(mp) ((mp)->m_qflags & XFS_GQUOTA_ACTIVE) |
105 | #define XFS_IS_PQUOTA_ON(mp) ((mp)->m_qflags & XFS_PQUOTA_ACTIVE) | 103 | #define XFS_IS_PQUOTA_ON(mp) ((mp)->m_qflags & XFS_PQUOTA_ACTIVE) |
diff --git a/fs/xfs/xfs_rtbitmap.c b/fs/xfs/libxfs/xfs_rtbitmap.c index f4dd697cac08..f4dd697cac08 100644 --- a/fs/xfs/xfs_rtbitmap.c +++ b/fs/xfs/libxfs/xfs_rtbitmap.c | |||
diff --git a/fs/xfs/xfs_sb.c b/fs/xfs/libxfs/xfs_sb.c index 7703fa6770ff..ad525a5623a4 100644 --- a/fs/xfs/xfs_sb.c +++ b/fs/xfs/libxfs/xfs_sb.c | |||
@@ -186,13 +186,13 @@ xfs_mount_validate_sb( | |||
186 | */ | 186 | */ |
187 | if (sbp->sb_magicnum != XFS_SB_MAGIC) { | 187 | if (sbp->sb_magicnum != XFS_SB_MAGIC) { |
188 | xfs_warn(mp, "bad magic number"); | 188 | xfs_warn(mp, "bad magic number"); |
189 | return XFS_ERROR(EWRONGFS); | 189 | return -EWRONGFS; |
190 | } | 190 | } |
191 | 191 | ||
192 | 192 | ||
193 | if (!xfs_sb_good_version(sbp)) { | 193 | if (!xfs_sb_good_version(sbp)) { |
194 | xfs_warn(mp, "bad version"); | 194 | xfs_warn(mp, "bad version"); |
195 | return XFS_ERROR(EWRONGFS); | 195 | return -EWRONGFS; |
196 | } | 196 | } |
197 | 197 | ||
198 | /* | 198 | /* |
@@ -220,7 +220,7 @@ xfs_mount_validate_sb( | |||
220 | xfs_warn(mp, | 220 | xfs_warn(mp, |
221 | "Attempted to mount read-only compatible filesystem read-write.\n" | 221 | "Attempted to mount read-only compatible filesystem read-write.\n" |
222 | "Filesystem can only be safely mounted read only."); | 222 | "Filesystem can only be safely mounted read only."); |
223 | return XFS_ERROR(EINVAL); | 223 | return -EINVAL; |
224 | } | 224 | } |
225 | } | 225 | } |
226 | if (xfs_sb_has_incompat_feature(sbp, | 226 | if (xfs_sb_has_incompat_feature(sbp, |
@@ -230,7 +230,7 @@ xfs_mount_validate_sb( | |||
230 | "Filesystem can not be safely mounted by this kernel.", | 230 | "Filesystem can not be safely mounted by this kernel.", |
231 | (sbp->sb_features_incompat & | 231 | (sbp->sb_features_incompat & |
232 | XFS_SB_FEAT_INCOMPAT_UNKNOWN)); | 232 | XFS_SB_FEAT_INCOMPAT_UNKNOWN)); |
233 | return XFS_ERROR(EINVAL); | 233 | return -EINVAL; |
234 | } | 234 | } |
235 | } | 235 | } |
236 | 236 | ||
@@ -238,13 +238,13 @@ xfs_mount_validate_sb( | |||
238 | if (sbp->sb_qflags & (XFS_OQUOTA_ENFD | XFS_OQUOTA_CHKD)) { | 238 | if (sbp->sb_qflags & (XFS_OQUOTA_ENFD | XFS_OQUOTA_CHKD)) { |
239 | xfs_notice(mp, | 239 | xfs_notice(mp, |
240 | "Version 5 of Super block has XFS_OQUOTA bits."); | 240 | "Version 5 of Super block has XFS_OQUOTA bits."); |
241 | return XFS_ERROR(EFSCORRUPTED); | 241 | return -EFSCORRUPTED; |
242 | } | 242 | } |
243 | } else if (sbp->sb_qflags & (XFS_PQUOTA_ENFD | XFS_GQUOTA_ENFD | | 243 | } else if (sbp->sb_qflags & (XFS_PQUOTA_ENFD | XFS_GQUOTA_ENFD | |
244 | XFS_PQUOTA_CHKD | XFS_GQUOTA_CHKD)) { | 244 | XFS_PQUOTA_CHKD | XFS_GQUOTA_CHKD)) { |
245 | xfs_notice(mp, | 245 | xfs_notice(mp, |
246 | "Superblock earlier than Version 5 has XFS_[PQ]UOTA_{ENFD|CHKD} bits."); | 246 | "Superblock earlier than Version 5 has XFS_[PQ]UOTA_{ENFD|CHKD} bits."); |
247 | return XFS_ERROR(EFSCORRUPTED); | 247 | return -EFSCORRUPTED; |
248 | } | 248 | } |
249 | 249 | ||
250 | if (unlikely( | 250 | if (unlikely( |
@@ -252,7 +252,7 @@ xfs_mount_validate_sb( | |||
252 | xfs_warn(mp, | 252 | xfs_warn(mp, |
253 | "filesystem is marked as having an external log; " | 253 | "filesystem is marked as having an external log; " |
254 | "specify logdev on the mount command line."); | 254 | "specify logdev on the mount command line."); |
255 | return XFS_ERROR(EINVAL); | 255 | return -EINVAL; |
256 | } | 256 | } |
257 | 257 | ||
258 | if (unlikely( | 258 | if (unlikely( |
@@ -260,7 +260,7 @@ xfs_mount_validate_sb( | |||
260 | xfs_warn(mp, | 260 | xfs_warn(mp, |
261 | "filesystem is marked as having an internal log; " | 261 | "filesystem is marked as having an internal log; " |
262 | "do not specify logdev on the mount command line."); | 262 | "do not specify logdev on the mount command line."); |
263 | return XFS_ERROR(EINVAL); | 263 | return -EINVAL; |
264 | } | 264 | } |
265 | 265 | ||
266 | /* | 266 | /* |
@@ -294,7 +294,7 @@ xfs_mount_validate_sb( | |||
294 | sbp->sb_dblocks < XFS_MIN_DBLOCKS(sbp) || | 294 | sbp->sb_dblocks < XFS_MIN_DBLOCKS(sbp) || |
295 | sbp->sb_shared_vn != 0)) { | 295 | sbp->sb_shared_vn != 0)) { |
296 | xfs_notice(mp, "SB sanity check failed"); | 296 | xfs_notice(mp, "SB sanity check failed"); |
297 | return XFS_ERROR(EFSCORRUPTED); | 297 | return -EFSCORRUPTED; |
298 | } | 298 | } |
299 | 299 | ||
300 | /* | 300 | /* |
@@ -305,7 +305,7 @@ xfs_mount_validate_sb( | |||
305 | "File system with blocksize %d bytes. " | 305 | "File system with blocksize %d bytes. " |
306 | "Only pagesize (%ld) or less will currently work.", | 306 | "Only pagesize (%ld) or less will currently work.", |
307 | sbp->sb_blocksize, PAGE_SIZE); | 307 | sbp->sb_blocksize, PAGE_SIZE); |
308 | return XFS_ERROR(ENOSYS); | 308 | return -ENOSYS; |
309 | } | 309 | } |
310 | 310 | ||
311 | /* | 311 | /* |
@@ -320,19 +320,19 @@ xfs_mount_validate_sb( | |||
320 | default: | 320 | default: |
321 | xfs_warn(mp, "inode size of %d bytes not supported", | 321 | xfs_warn(mp, "inode size of %d bytes not supported", |
322 | sbp->sb_inodesize); | 322 | sbp->sb_inodesize); |
323 | return XFS_ERROR(ENOSYS); | 323 | return -ENOSYS; |
324 | } | 324 | } |
325 | 325 | ||
326 | if (xfs_sb_validate_fsb_count(sbp, sbp->sb_dblocks) || | 326 | if (xfs_sb_validate_fsb_count(sbp, sbp->sb_dblocks) || |
327 | xfs_sb_validate_fsb_count(sbp, sbp->sb_rblocks)) { | 327 | xfs_sb_validate_fsb_count(sbp, sbp->sb_rblocks)) { |
328 | xfs_warn(mp, | 328 | xfs_warn(mp, |
329 | "file system too large to be mounted on this system."); | 329 | "file system too large to be mounted on this system."); |
330 | return XFS_ERROR(EFBIG); | 330 | return -EFBIG; |
331 | } | 331 | } |
332 | 332 | ||
333 | if (check_inprogress && sbp->sb_inprogress) { | 333 | if (check_inprogress && sbp->sb_inprogress) { |
334 | xfs_warn(mp, "Offline file system operation in progress!"); | 334 | xfs_warn(mp, "Offline file system operation in progress!"); |
335 | return XFS_ERROR(EFSCORRUPTED); | 335 | return -EFSCORRUPTED; |
336 | } | 336 | } |
337 | return 0; | 337 | return 0; |
338 | } | 338 | } |
@@ -386,10 +386,11 @@ xfs_sb_quota_from_disk(struct xfs_sb *sbp) | |||
386 | } | 386 | } |
387 | } | 387 | } |
388 | 388 | ||
389 | void | 389 | static void |
390 | xfs_sb_from_disk( | 390 | __xfs_sb_from_disk( |
391 | struct xfs_sb *to, | 391 | struct xfs_sb *to, |
392 | xfs_dsb_t *from) | 392 | xfs_dsb_t *from, |
393 | bool convert_xquota) | ||
393 | { | 394 | { |
394 | to->sb_magicnum = be32_to_cpu(from->sb_magicnum); | 395 | to->sb_magicnum = be32_to_cpu(from->sb_magicnum); |
395 | to->sb_blocksize = be32_to_cpu(from->sb_blocksize); | 396 | to->sb_blocksize = be32_to_cpu(from->sb_blocksize); |
@@ -445,6 +446,17 @@ xfs_sb_from_disk( | |||
445 | to->sb_pad = 0; | 446 | to->sb_pad = 0; |
446 | to->sb_pquotino = be64_to_cpu(from->sb_pquotino); | 447 | to->sb_pquotino = be64_to_cpu(from->sb_pquotino); |
447 | to->sb_lsn = be64_to_cpu(from->sb_lsn); | 448 | to->sb_lsn = be64_to_cpu(from->sb_lsn); |
449 | /* Convert on-disk flags to in-memory flags? */ | ||
450 | if (convert_xquota) | ||
451 | xfs_sb_quota_from_disk(to); | ||
452 | } | ||
453 | |||
454 | void | ||
455 | xfs_sb_from_disk( | ||
456 | struct xfs_sb *to, | ||
457 | xfs_dsb_t *from) | ||
458 | { | ||
459 | __xfs_sb_from_disk(to, from, true); | ||
448 | } | 460 | } |
449 | 461 | ||
450 | static inline void | 462 | static inline void |
@@ -577,7 +589,11 @@ xfs_sb_verify( | |||
577 | struct xfs_mount *mp = bp->b_target->bt_mount; | 589 | struct xfs_mount *mp = bp->b_target->bt_mount; |
578 | struct xfs_sb sb; | 590 | struct xfs_sb sb; |
579 | 591 | ||
580 | xfs_sb_from_disk(&sb, XFS_BUF_TO_SBP(bp)); | 592 | /* |
593 | * Use call variant which doesn't convert quota flags from disk | ||
594 | * format, because xfs_mount_validate_sb checks the on-disk flags. | ||
595 | */ | ||
596 | __xfs_sb_from_disk(&sb, XFS_BUF_TO_SBP(bp), false); | ||
581 | 597 | ||
582 | /* | 598 | /* |
583 | * Only check the in progress field for the primary superblock as | 599 | * Only check the in progress field for the primary superblock as |
@@ -620,7 +636,7 @@ xfs_sb_read_verify( | |||
620 | /* Only fail bad secondaries on a known V5 filesystem */ | 636 | /* Only fail bad secondaries on a known V5 filesystem */ |
621 | if (bp->b_bn == XFS_SB_DADDR || | 637 | if (bp->b_bn == XFS_SB_DADDR || |
622 | xfs_sb_version_hascrc(&mp->m_sb)) { | 638 | xfs_sb_version_hascrc(&mp->m_sb)) { |
623 | error = EFSBADCRC; | 639 | error = -EFSBADCRC; |
624 | goto out_error; | 640 | goto out_error; |
625 | } | 641 | } |
626 | } | 642 | } |
@@ -630,7 +646,7 @@ xfs_sb_read_verify( | |||
630 | out_error: | 646 | out_error: |
631 | if (error) { | 647 | if (error) { |
632 | xfs_buf_ioerror(bp, error); | 648 | xfs_buf_ioerror(bp, error); |
633 | if (error == EFSCORRUPTED || error == EFSBADCRC) | 649 | if (error == -EFSCORRUPTED || error == -EFSBADCRC) |
634 | xfs_verifier_error(bp); | 650 | xfs_verifier_error(bp); |
635 | } | 651 | } |
636 | } | 652 | } |
@@ -653,7 +669,7 @@ xfs_sb_quiet_read_verify( | |||
653 | return; | 669 | return; |
654 | } | 670 | } |
655 | /* quietly fail */ | 671 | /* quietly fail */ |
656 | xfs_buf_ioerror(bp, EWRONGFS); | 672 | xfs_buf_ioerror(bp, -EWRONGFS); |
657 | } | 673 | } |
658 | 674 | ||
659 | static void | 675 | static void |
diff --git a/fs/xfs/xfs_sb.h b/fs/xfs/libxfs/xfs_sb.h index c43c2d609a24..2e739708afd3 100644 --- a/fs/xfs/xfs_sb.h +++ b/fs/xfs/libxfs/xfs_sb.h | |||
@@ -87,11 +87,11 @@ struct xfs_trans; | |||
87 | typedef struct xfs_sb { | 87 | typedef struct xfs_sb { |
88 | __uint32_t sb_magicnum; /* magic number == XFS_SB_MAGIC */ | 88 | __uint32_t sb_magicnum; /* magic number == XFS_SB_MAGIC */ |
89 | __uint32_t sb_blocksize; /* logical block size, bytes */ | 89 | __uint32_t sb_blocksize; /* logical block size, bytes */ |
90 | xfs_drfsbno_t sb_dblocks; /* number of data blocks */ | 90 | xfs_rfsblock_t sb_dblocks; /* number of data blocks */ |
91 | xfs_drfsbno_t sb_rblocks; /* number of realtime blocks */ | 91 | xfs_rfsblock_t sb_rblocks; /* number of realtime blocks */ |
92 | xfs_drtbno_t sb_rextents; /* number of realtime extents */ | 92 | xfs_rtblock_t sb_rextents; /* number of realtime extents */ |
93 | uuid_t sb_uuid; /* file system unique id */ | 93 | uuid_t sb_uuid; /* file system unique id */ |
94 | xfs_dfsbno_t sb_logstart; /* starting block of log if internal */ | 94 | xfs_fsblock_t sb_logstart; /* starting block of log if internal */ |
95 | xfs_ino_t sb_rootino; /* root inode number */ | 95 | xfs_ino_t sb_rootino; /* root inode number */ |
96 | xfs_ino_t sb_rbmino; /* bitmap inode for realtime extents */ | 96 | xfs_ino_t sb_rbmino; /* bitmap inode for realtime extents */ |
97 | xfs_ino_t sb_rsumino; /* summary inode for rt bitmap */ | 97 | xfs_ino_t sb_rsumino; /* summary inode for rt bitmap */ |
diff --git a/fs/xfs/xfs_shared.h b/fs/xfs/libxfs/xfs_shared.h index 82404da2ca67..82404da2ca67 100644 --- a/fs/xfs/xfs_shared.h +++ b/fs/xfs/libxfs/xfs_shared.h | |||
diff --git a/fs/xfs/xfs_symlink_remote.c b/fs/xfs/libxfs/xfs_symlink_remote.c index 23c2f2577c8d..5782f037eab4 100644 --- a/fs/xfs/xfs_symlink_remote.c +++ b/fs/xfs/libxfs/xfs_symlink_remote.c | |||
@@ -133,9 +133,9 @@ xfs_symlink_read_verify( | |||
133 | return; | 133 | return; |
134 | 134 | ||
135 | if (!xfs_buf_verify_cksum(bp, XFS_SYMLINK_CRC_OFF)) | 135 | if (!xfs_buf_verify_cksum(bp, XFS_SYMLINK_CRC_OFF)) |
136 | xfs_buf_ioerror(bp, EFSBADCRC); | 136 | xfs_buf_ioerror(bp, -EFSBADCRC); |
137 | else if (!xfs_symlink_verify(bp)) | 137 | else if (!xfs_symlink_verify(bp)) |
138 | xfs_buf_ioerror(bp, EFSCORRUPTED); | 138 | xfs_buf_ioerror(bp, -EFSCORRUPTED); |
139 | 139 | ||
140 | if (bp->b_error) | 140 | if (bp->b_error) |
141 | xfs_verifier_error(bp); | 141 | xfs_verifier_error(bp); |
@@ -153,7 +153,7 @@ xfs_symlink_write_verify( | |||
153 | return; | 153 | return; |
154 | 154 | ||
155 | if (!xfs_symlink_verify(bp)) { | 155 | if (!xfs_symlink_verify(bp)) { |
156 | xfs_buf_ioerror(bp, EFSCORRUPTED); | 156 | xfs_buf_ioerror(bp, -EFSCORRUPTED); |
157 | xfs_verifier_error(bp); | 157 | xfs_verifier_error(bp); |
158 | return; | 158 | return; |
159 | } | 159 | } |
diff --git a/fs/xfs/xfs_trans_resv.c b/fs/xfs/libxfs/xfs_trans_resv.c index f2bda7c76b8a..f2bda7c76b8a 100644 --- a/fs/xfs/xfs_trans_resv.c +++ b/fs/xfs/libxfs/xfs_trans_resv.c | |||
diff --git a/fs/xfs/xfs_trans_resv.h b/fs/xfs/libxfs/xfs_trans_resv.h index 1097d14cd583..1097d14cd583 100644 --- a/fs/xfs/xfs_trans_resv.h +++ b/fs/xfs/libxfs/xfs_trans_resv.h | |||
diff --git a/fs/xfs/xfs_trans_space.h b/fs/xfs/libxfs/xfs_trans_space.h index bf9c4579334d..bf9c4579334d 100644 --- a/fs/xfs/xfs_trans_space.h +++ b/fs/xfs/libxfs/xfs_trans_space.h | |||
diff --git a/fs/xfs/xfs_acl.c b/fs/xfs/xfs_acl.c index 6888ad886ff6..a65fa5dde6e9 100644 --- a/fs/xfs/xfs_acl.c +++ b/fs/xfs/xfs_acl.c | |||
@@ -152,7 +152,7 @@ xfs_get_acl(struct inode *inode, int type) | |||
152 | if (!xfs_acl) | 152 | if (!xfs_acl) |
153 | return ERR_PTR(-ENOMEM); | 153 | return ERR_PTR(-ENOMEM); |
154 | 154 | ||
155 | error = -xfs_attr_get(ip, ea_name, (unsigned char *)xfs_acl, | 155 | error = xfs_attr_get(ip, ea_name, (unsigned char *)xfs_acl, |
156 | &len, ATTR_ROOT); | 156 | &len, ATTR_ROOT); |
157 | if (error) { | 157 | if (error) { |
158 | /* | 158 | /* |
@@ -210,7 +210,7 @@ __xfs_set_acl(struct inode *inode, int type, struct posix_acl *acl) | |||
210 | len -= sizeof(struct xfs_acl_entry) * | 210 | len -= sizeof(struct xfs_acl_entry) * |
211 | (XFS_ACL_MAX_ENTRIES(ip->i_mount) - acl->a_count); | 211 | (XFS_ACL_MAX_ENTRIES(ip->i_mount) - acl->a_count); |
212 | 212 | ||
213 | error = -xfs_attr_set(ip, ea_name, (unsigned char *)xfs_acl, | 213 | error = xfs_attr_set(ip, ea_name, (unsigned char *)xfs_acl, |
214 | len, ATTR_ROOT); | 214 | len, ATTR_ROOT); |
215 | 215 | ||
216 | kmem_free(xfs_acl); | 216 | kmem_free(xfs_acl); |
@@ -218,7 +218,7 @@ __xfs_set_acl(struct inode *inode, int type, struct posix_acl *acl) | |||
218 | /* | 218 | /* |
219 | * A NULL ACL argument means we want to remove the ACL. | 219 | * A NULL ACL argument means we want to remove the ACL. |
220 | */ | 220 | */ |
221 | error = -xfs_attr_remove(ip, ea_name, ATTR_ROOT); | 221 | error = xfs_attr_remove(ip, ea_name, ATTR_ROOT); |
222 | 222 | ||
223 | /* | 223 | /* |
224 | * If the attribute didn't exist to start with that's fine. | 224 | * If the attribute didn't exist to start with that's fine. |
@@ -244,7 +244,7 @@ xfs_set_mode(struct inode *inode, umode_t mode) | |||
244 | iattr.ia_mode = mode; | 244 | iattr.ia_mode = mode; |
245 | iattr.ia_ctime = current_fs_time(inode->i_sb); | 245 | iattr.ia_ctime = current_fs_time(inode->i_sb); |
246 | 246 | ||
247 | error = -xfs_setattr_nonsize(XFS_I(inode), &iattr, XFS_ATTR_NOACL); | 247 | error = xfs_setattr_nonsize(XFS_I(inode), &iattr, XFS_ATTR_NOACL); |
248 | } | 248 | } |
249 | 249 | ||
250 | return error; | 250 | return error; |
diff --git a/fs/xfs/xfs_aops.c b/fs/xfs/xfs_aops.c index faaf716e2080..11e9b4caa54f 100644 --- a/fs/xfs/xfs_aops.c +++ b/fs/xfs/xfs_aops.c | |||
@@ -240,7 +240,7 @@ xfs_end_io( | |||
240 | 240 | ||
241 | done: | 241 | done: |
242 | if (error) | 242 | if (error) |
243 | ioend->io_error = -error; | 243 | ioend->io_error = error; |
244 | xfs_destroy_ioend(ioend); | 244 | xfs_destroy_ioend(ioend); |
245 | } | 245 | } |
246 | 246 | ||
@@ -308,14 +308,14 @@ xfs_map_blocks( | |||
308 | int nimaps = 1; | 308 | int nimaps = 1; |
309 | 309 | ||
310 | if (XFS_FORCED_SHUTDOWN(mp)) | 310 | if (XFS_FORCED_SHUTDOWN(mp)) |
311 | return -XFS_ERROR(EIO); | 311 | return -EIO; |
312 | 312 | ||
313 | if (type == XFS_IO_UNWRITTEN) | 313 | if (type == XFS_IO_UNWRITTEN) |
314 | bmapi_flags |= XFS_BMAPI_IGSTATE; | 314 | bmapi_flags |= XFS_BMAPI_IGSTATE; |
315 | 315 | ||
316 | if (!xfs_ilock_nowait(ip, XFS_ILOCK_SHARED)) { | 316 | if (!xfs_ilock_nowait(ip, XFS_ILOCK_SHARED)) { |
317 | if (nonblocking) | 317 | if (nonblocking) |
318 | return -XFS_ERROR(EAGAIN); | 318 | return -EAGAIN; |
319 | xfs_ilock(ip, XFS_ILOCK_SHARED); | 319 | xfs_ilock(ip, XFS_ILOCK_SHARED); |
320 | } | 320 | } |
321 | 321 | ||
@@ -332,14 +332,14 @@ xfs_map_blocks( | |||
332 | xfs_iunlock(ip, XFS_ILOCK_SHARED); | 332 | xfs_iunlock(ip, XFS_ILOCK_SHARED); |
333 | 333 | ||
334 | if (error) | 334 | if (error) |
335 | return -XFS_ERROR(error); | 335 | return error; |
336 | 336 | ||
337 | if (type == XFS_IO_DELALLOC && | 337 | if (type == XFS_IO_DELALLOC && |
338 | (!nimaps || isnullstartblock(imap->br_startblock))) { | 338 | (!nimaps || isnullstartblock(imap->br_startblock))) { |
339 | error = xfs_iomap_write_allocate(ip, offset, imap); | 339 | error = xfs_iomap_write_allocate(ip, offset, imap); |
340 | if (!error) | 340 | if (!error) |
341 | trace_xfs_map_blocks_alloc(ip, offset, count, type, imap); | 341 | trace_xfs_map_blocks_alloc(ip, offset, count, type, imap); |
342 | return -XFS_ERROR(error); | 342 | return error; |
343 | } | 343 | } |
344 | 344 | ||
345 | #ifdef DEBUG | 345 | #ifdef DEBUG |
@@ -502,7 +502,7 @@ xfs_submit_ioend( | |||
502 | * time. | 502 | * time. |
503 | */ | 503 | */ |
504 | if (fail) { | 504 | if (fail) { |
505 | ioend->io_error = -fail; | 505 | ioend->io_error = fail; |
506 | xfs_finish_ioend(ioend); | 506 | xfs_finish_ioend(ioend); |
507 | continue; | 507 | continue; |
508 | } | 508 | } |
@@ -1253,7 +1253,7 @@ __xfs_get_blocks( | |||
1253 | int new = 0; | 1253 | int new = 0; |
1254 | 1254 | ||
1255 | if (XFS_FORCED_SHUTDOWN(mp)) | 1255 | if (XFS_FORCED_SHUTDOWN(mp)) |
1256 | return -XFS_ERROR(EIO); | 1256 | return -EIO; |
1257 | 1257 | ||
1258 | offset = (xfs_off_t)iblock << inode->i_blkbits; | 1258 | offset = (xfs_off_t)iblock << inode->i_blkbits; |
1259 | ASSERT(bh_result->b_size >= (1 << inode->i_blkbits)); | 1259 | ASSERT(bh_result->b_size >= (1 << inode->i_blkbits)); |
@@ -1302,7 +1302,7 @@ __xfs_get_blocks( | |||
1302 | error = xfs_iomap_write_direct(ip, offset, size, | 1302 | error = xfs_iomap_write_direct(ip, offset, size, |
1303 | &imap, nimaps); | 1303 | &imap, nimaps); |
1304 | if (error) | 1304 | if (error) |
1305 | return -error; | 1305 | return error; |
1306 | new = 1; | 1306 | new = 1; |
1307 | } else { | 1307 | } else { |
1308 | /* | 1308 | /* |
@@ -1415,7 +1415,7 @@ __xfs_get_blocks( | |||
1415 | 1415 | ||
1416 | out_unlock: | 1416 | out_unlock: |
1417 | xfs_iunlock(ip, lockmode); | 1417 | xfs_iunlock(ip, lockmode); |
1418 | return -error; | 1418 | return error; |
1419 | } | 1419 | } |
1420 | 1420 | ||
1421 | int | 1421 | int |
diff --git a/fs/xfs/xfs_attr_inactive.c b/fs/xfs/xfs_attr_inactive.c index 09480c57f069..aa2a8b1838a2 100644 --- a/fs/xfs/xfs_attr_inactive.c +++ b/fs/xfs/xfs_attr_inactive.c | |||
@@ -76,7 +76,7 @@ xfs_attr3_leaf_freextent( | |||
76 | error = xfs_bmapi_read(dp, (xfs_fileoff_t)tblkno, tblkcnt, | 76 | error = xfs_bmapi_read(dp, (xfs_fileoff_t)tblkno, tblkcnt, |
77 | &map, &nmap, XFS_BMAPI_ATTRFORK); | 77 | &map, &nmap, XFS_BMAPI_ATTRFORK); |
78 | if (error) { | 78 | if (error) { |
79 | return(error); | 79 | return error; |
80 | } | 80 | } |
81 | ASSERT(nmap == 1); | 81 | ASSERT(nmap == 1); |
82 | ASSERT(map.br_startblock != DELAYSTARTBLOCK); | 82 | ASSERT(map.br_startblock != DELAYSTARTBLOCK); |
@@ -95,21 +95,21 @@ xfs_attr3_leaf_freextent( | |||
95 | dp->i_mount->m_ddev_targp, | 95 | dp->i_mount->m_ddev_targp, |
96 | dblkno, dblkcnt, 0); | 96 | dblkno, dblkcnt, 0); |
97 | if (!bp) | 97 | if (!bp) |
98 | return ENOMEM; | 98 | return -ENOMEM; |
99 | xfs_trans_binval(*trans, bp); | 99 | xfs_trans_binval(*trans, bp); |
100 | /* | 100 | /* |
101 | * Roll to next transaction. | 101 | * Roll to next transaction. |
102 | */ | 102 | */ |
103 | error = xfs_trans_roll(trans, dp); | 103 | error = xfs_trans_roll(trans, dp); |
104 | if (error) | 104 | if (error) |
105 | return (error); | 105 | return error; |
106 | } | 106 | } |
107 | 107 | ||
108 | tblkno += map.br_blockcount; | 108 | tblkno += map.br_blockcount; |
109 | tblkcnt -= map.br_blockcount; | 109 | tblkcnt -= map.br_blockcount; |
110 | } | 110 | } |
111 | 111 | ||
112 | return(0); | 112 | return 0; |
113 | } | 113 | } |
114 | 114 | ||
115 | /* | 115 | /* |
@@ -227,7 +227,7 @@ xfs_attr3_node_inactive( | |||
227 | */ | 227 | */ |
228 | if (level > XFS_DA_NODE_MAXDEPTH) { | 228 | if (level > XFS_DA_NODE_MAXDEPTH) { |
229 | xfs_trans_brelse(*trans, bp); /* no locks for later trans */ | 229 | xfs_trans_brelse(*trans, bp); /* no locks for later trans */ |
230 | return XFS_ERROR(EIO); | 230 | return -EIO; |
231 | } | 231 | } |
232 | 232 | ||
233 | node = bp->b_addr; | 233 | node = bp->b_addr; |
@@ -256,7 +256,7 @@ xfs_attr3_node_inactive( | |||
256 | error = xfs_da3_node_read(*trans, dp, child_fsb, -2, &child_bp, | 256 | error = xfs_da3_node_read(*trans, dp, child_fsb, -2, &child_bp, |
257 | XFS_ATTR_FORK); | 257 | XFS_ATTR_FORK); |
258 | if (error) | 258 | if (error) |
259 | return(error); | 259 | return error; |
260 | if (child_bp) { | 260 | if (child_bp) { |
261 | /* save for re-read later */ | 261 | /* save for re-read later */ |
262 | child_blkno = XFS_BUF_ADDR(child_bp); | 262 | child_blkno = XFS_BUF_ADDR(child_bp); |
@@ -277,7 +277,7 @@ xfs_attr3_node_inactive( | |||
277 | child_bp); | 277 | child_bp); |
278 | break; | 278 | break; |
279 | default: | 279 | default: |
280 | error = XFS_ERROR(EIO); | 280 | error = -EIO; |
281 | xfs_trans_brelse(*trans, child_bp); | 281 | xfs_trans_brelse(*trans, child_bp); |
282 | break; | 282 | break; |
283 | } | 283 | } |
@@ -360,7 +360,7 @@ xfs_attr3_root_inactive( | |||
360 | error = xfs_attr3_leaf_inactive(trans, dp, bp); | 360 | error = xfs_attr3_leaf_inactive(trans, dp, bp); |
361 | break; | 361 | break; |
362 | default: | 362 | default: |
363 | error = XFS_ERROR(EIO); | 363 | error = -EIO; |
364 | xfs_trans_brelse(*trans, bp); | 364 | xfs_trans_brelse(*trans, bp); |
365 | break; | 365 | break; |
366 | } | 366 | } |
@@ -414,7 +414,7 @@ xfs_attr_inactive(xfs_inode_t *dp) | |||
414 | error = xfs_trans_reserve(trans, &M_RES(mp)->tr_attrinval, 0, 0); | 414 | error = xfs_trans_reserve(trans, &M_RES(mp)->tr_attrinval, 0, 0); |
415 | if (error) { | 415 | if (error) { |
416 | xfs_trans_cancel(trans, 0); | 416 | xfs_trans_cancel(trans, 0); |
417 | return(error); | 417 | return error; |
418 | } | 418 | } |
419 | xfs_ilock(dp, XFS_ILOCK_EXCL); | 419 | xfs_ilock(dp, XFS_ILOCK_EXCL); |
420 | 420 | ||
@@ -443,10 +443,10 @@ xfs_attr_inactive(xfs_inode_t *dp) | |||
443 | error = xfs_trans_commit(trans, XFS_TRANS_RELEASE_LOG_RES); | 443 | error = xfs_trans_commit(trans, XFS_TRANS_RELEASE_LOG_RES); |
444 | xfs_iunlock(dp, XFS_ILOCK_EXCL); | 444 | xfs_iunlock(dp, XFS_ILOCK_EXCL); |
445 | 445 | ||
446 | return(error); | 446 | return error; |
447 | 447 | ||
448 | out: | 448 | out: |
449 | xfs_trans_cancel(trans, XFS_TRANS_RELEASE_LOG_RES|XFS_TRANS_ABORT); | 449 | xfs_trans_cancel(trans, XFS_TRANS_RELEASE_LOG_RES|XFS_TRANS_ABORT); |
450 | xfs_iunlock(dp, XFS_ILOCK_EXCL); | 450 | xfs_iunlock(dp, XFS_ILOCK_EXCL); |
451 | return(error); | 451 | return error; |
452 | } | 452 | } |
diff --git a/fs/xfs/xfs_attr_list.c b/fs/xfs/xfs_attr_list.c index 90e2eeb21207..62db83ab6cbc 100644 --- a/fs/xfs/xfs_attr_list.c +++ b/fs/xfs/xfs_attr_list.c | |||
@@ -50,11 +50,11 @@ xfs_attr_shortform_compare(const void *a, const void *b) | |||
50 | sa = (xfs_attr_sf_sort_t *)a; | 50 | sa = (xfs_attr_sf_sort_t *)a; |
51 | sb = (xfs_attr_sf_sort_t *)b; | 51 | sb = (xfs_attr_sf_sort_t *)b; |
52 | if (sa->hash < sb->hash) { | 52 | if (sa->hash < sb->hash) { |
53 | return(-1); | 53 | return -1; |
54 | } else if (sa->hash > sb->hash) { | 54 | } else if (sa->hash > sb->hash) { |
55 | return(1); | 55 | return 1; |
56 | } else { | 56 | } else { |
57 | return(sa->entno - sb->entno); | 57 | return sa->entno - sb->entno; |
58 | } | 58 | } |
59 | } | 59 | } |
60 | 60 | ||
@@ -86,7 +86,7 @@ xfs_attr_shortform_list(xfs_attr_list_context_t *context) | |||
86 | sf = (xfs_attr_shortform_t *)dp->i_afp->if_u1.if_data; | 86 | sf = (xfs_attr_shortform_t *)dp->i_afp->if_u1.if_data; |
87 | ASSERT(sf != NULL); | 87 | ASSERT(sf != NULL); |
88 | if (!sf->hdr.count) | 88 | if (!sf->hdr.count) |
89 | return(0); | 89 | return 0; |
90 | cursor = context->cursor; | 90 | cursor = context->cursor; |
91 | ASSERT(cursor != NULL); | 91 | ASSERT(cursor != NULL); |
92 | 92 | ||
@@ -124,7 +124,7 @@ xfs_attr_shortform_list(xfs_attr_list_context_t *context) | |||
124 | sfe = XFS_ATTR_SF_NEXTENTRY(sfe); | 124 | sfe = XFS_ATTR_SF_NEXTENTRY(sfe); |
125 | } | 125 | } |
126 | trace_xfs_attr_list_sf_all(context); | 126 | trace_xfs_attr_list_sf_all(context); |
127 | return(0); | 127 | return 0; |
128 | } | 128 | } |
129 | 129 | ||
130 | /* do no more for a search callback */ | 130 | /* do no more for a search callback */ |
@@ -150,7 +150,7 @@ xfs_attr_shortform_list(xfs_attr_list_context_t *context) | |||
150 | XFS_ERRLEVEL_LOW, | 150 | XFS_ERRLEVEL_LOW, |
151 | context->dp->i_mount, sfe); | 151 | context->dp->i_mount, sfe); |
152 | kmem_free(sbuf); | 152 | kmem_free(sbuf); |
153 | return XFS_ERROR(EFSCORRUPTED); | 153 | return -EFSCORRUPTED; |
154 | } | 154 | } |
155 | 155 | ||
156 | sbp->entno = i; | 156 | sbp->entno = i; |
@@ -188,7 +188,7 @@ xfs_attr_shortform_list(xfs_attr_list_context_t *context) | |||
188 | } | 188 | } |
189 | if (i == nsbuf) { | 189 | if (i == nsbuf) { |
190 | kmem_free(sbuf); | 190 | kmem_free(sbuf); |
191 | return(0); | 191 | return 0; |
192 | } | 192 | } |
193 | 193 | ||
194 | /* | 194 | /* |
@@ -213,7 +213,7 @@ xfs_attr_shortform_list(xfs_attr_list_context_t *context) | |||
213 | } | 213 | } |
214 | 214 | ||
215 | kmem_free(sbuf); | 215 | kmem_free(sbuf); |
216 | return(0); | 216 | return 0; |
217 | } | 217 | } |
218 | 218 | ||
219 | STATIC int | 219 | STATIC int |
@@ -243,8 +243,8 @@ xfs_attr_node_list(xfs_attr_list_context_t *context) | |||
243 | if (cursor->blkno > 0) { | 243 | if (cursor->blkno > 0) { |
244 | error = xfs_da3_node_read(NULL, dp, cursor->blkno, -1, | 244 | error = xfs_da3_node_read(NULL, dp, cursor->blkno, -1, |
245 | &bp, XFS_ATTR_FORK); | 245 | &bp, XFS_ATTR_FORK); |
246 | if ((error != 0) && (error != EFSCORRUPTED)) | 246 | if ((error != 0) && (error != -EFSCORRUPTED)) |
247 | return(error); | 247 | return error; |
248 | if (bp) { | 248 | if (bp) { |
249 | struct xfs_attr_leaf_entry *entries; | 249 | struct xfs_attr_leaf_entry *entries; |
250 | 250 | ||
@@ -295,7 +295,7 @@ xfs_attr_node_list(xfs_attr_list_context_t *context) | |||
295 | cursor->blkno, -1, &bp, | 295 | cursor->blkno, -1, &bp, |
296 | XFS_ATTR_FORK); | 296 | XFS_ATTR_FORK); |
297 | if (error) | 297 | if (error) |
298 | return(error); | 298 | return error; |
299 | node = bp->b_addr; | 299 | node = bp->b_addr; |
300 | magic = be16_to_cpu(node->hdr.info.magic); | 300 | magic = be16_to_cpu(node->hdr.info.magic); |
301 | if (magic == XFS_ATTR_LEAF_MAGIC || | 301 | if (magic == XFS_ATTR_LEAF_MAGIC || |
@@ -308,7 +308,7 @@ xfs_attr_node_list(xfs_attr_list_context_t *context) | |||
308 | context->dp->i_mount, | 308 | context->dp->i_mount, |
309 | node); | 309 | node); |
310 | xfs_trans_brelse(NULL, bp); | 310 | xfs_trans_brelse(NULL, bp); |
311 | return XFS_ERROR(EFSCORRUPTED); | 311 | return -EFSCORRUPTED; |
312 | } | 312 | } |
313 | 313 | ||
314 | dp->d_ops->node_hdr_from_disk(&nodehdr, node); | 314 | dp->d_ops->node_hdr_from_disk(&nodehdr, node); |
@@ -496,11 +496,11 @@ xfs_attr_leaf_list(xfs_attr_list_context_t *context) | |||
496 | context->cursor->blkno = 0; | 496 | context->cursor->blkno = 0; |
497 | error = xfs_attr3_leaf_read(NULL, context->dp, 0, -1, &bp); | 497 | error = xfs_attr3_leaf_read(NULL, context->dp, 0, -1, &bp); |
498 | if (error) | 498 | if (error) |
499 | return XFS_ERROR(error); | 499 | return error; |
500 | 500 | ||
501 | error = xfs_attr3_leaf_list_int(bp, context); | 501 | error = xfs_attr3_leaf_list_int(bp, context); |
502 | xfs_trans_brelse(NULL, bp); | 502 | xfs_trans_brelse(NULL, bp); |
503 | return XFS_ERROR(error); | 503 | return error; |
504 | } | 504 | } |
505 | 505 | ||
506 | int | 506 | int |
@@ -514,7 +514,7 @@ xfs_attr_list_int( | |||
514 | XFS_STATS_INC(xs_attr_list); | 514 | XFS_STATS_INC(xs_attr_list); |
515 | 515 | ||
516 | if (XFS_FORCED_SHUTDOWN(dp->i_mount)) | 516 | if (XFS_FORCED_SHUTDOWN(dp->i_mount)) |
517 | return EIO; | 517 | return -EIO; |
518 | 518 | ||
519 | /* | 519 | /* |
520 | * Decide on what work routines to call based on the inode size. | 520 | * Decide on what work routines to call based on the inode size. |
@@ -616,16 +616,16 @@ xfs_attr_list( | |||
616 | * Validate the cursor. | 616 | * Validate the cursor. |
617 | */ | 617 | */ |
618 | if (cursor->pad1 || cursor->pad2) | 618 | if (cursor->pad1 || cursor->pad2) |
619 | return(XFS_ERROR(EINVAL)); | 619 | return -EINVAL; |
620 | if ((cursor->initted == 0) && | 620 | if ((cursor->initted == 0) && |
621 | (cursor->hashval || cursor->blkno || cursor->offset)) | 621 | (cursor->hashval || cursor->blkno || cursor->offset)) |
622 | return XFS_ERROR(EINVAL); | 622 | return -EINVAL; |
623 | 623 | ||
624 | /* | 624 | /* |
625 | * Check for a properly aligned buffer. | 625 | * Check for a properly aligned buffer. |
626 | */ | 626 | */ |
627 | if (((long)buffer) & (sizeof(int)-1)) | 627 | if (((long)buffer) & (sizeof(int)-1)) |
628 | return XFS_ERROR(EFAULT); | 628 | return -EFAULT; |
629 | if (flags & ATTR_KERNOVAL) | 629 | if (flags & ATTR_KERNOVAL) |
630 | bufsize = 0; | 630 | bufsize = 0; |
631 | 631 | ||
@@ -648,6 +648,6 @@ xfs_attr_list( | |||
648 | alist->al_offset[0] = context.bufsize; | 648 | alist->al_offset[0] = context.bufsize; |
649 | 649 | ||
650 | error = xfs_attr_list_int(&context); | 650 | error = xfs_attr_list_int(&context); |
651 | ASSERT(error >= 0); | 651 | ASSERT(error <= 0); |
652 | return error; | 652 | return error; |
653 | } | 653 | } |
diff --git a/fs/xfs/xfs_bmap_util.c b/fs/xfs/xfs_bmap_util.c index 64731ef3324d..2f1e30d39a35 100644 --- a/fs/xfs/xfs_bmap_util.c +++ b/fs/xfs/xfs_bmap_util.c | |||
@@ -133,7 +133,7 @@ xfs_bmap_finish( | |||
133 | mp = ntp->t_mountp; | 133 | mp = ntp->t_mountp; |
134 | if (!XFS_FORCED_SHUTDOWN(mp)) | 134 | if (!XFS_FORCED_SHUTDOWN(mp)) |
135 | xfs_force_shutdown(mp, | 135 | xfs_force_shutdown(mp, |
136 | (error == EFSCORRUPTED) ? | 136 | (error == -EFSCORRUPTED) ? |
137 | SHUTDOWN_CORRUPT_INCORE : | 137 | SHUTDOWN_CORRUPT_INCORE : |
138 | SHUTDOWN_META_IO_ERROR); | 138 | SHUTDOWN_META_IO_ERROR); |
139 | return error; | 139 | return error; |
@@ -365,7 +365,7 @@ xfs_bmap_count_tree( | |||
365 | xfs_trans_brelse(tp, bp); | 365 | xfs_trans_brelse(tp, bp); |
366 | XFS_ERROR_REPORT("xfs_bmap_count_tree(1)", | 366 | XFS_ERROR_REPORT("xfs_bmap_count_tree(1)", |
367 | XFS_ERRLEVEL_LOW, mp); | 367 | XFS_ERRLEVEL_LOW, mp); |
368 | return XFS_ERROR(EFSCORRUPTED); | 368 | return -EFSCORRUPTED; |
369 | } | 369 | } |
370 | xfs_trans_brelse(tp, bp); | 370 | xfs_trans_brelse(tp, bp); |
371 | } else { | 371 | } else { |
@@ -425,14 +425,14 @@ xfs_bmap_count_blocks( | |||
425 | ASSERT(level > 0); | 425 | ASSERT(level > 0); |
426 | pp = XFS_BMAP_BROOT_PTR_ADDR(mp, block, 1, ifp->if_broot_bytes); | 426 | pp = XFS_BMAP_BROOT_PTR_ADDR(mp, block, 1, ifp->if_broot_bytes); |
427 | bno = be64_to_cpu(*pp); | 427 | bno = be64_to_cpu(*pp); |
428 | ASSERT(bno != NULLDFSBNO); | 428 | ASSERT(bno != NULLFSBLOCK); |
429 | ASSERT(XFS_FSB_TO_AGNO(mp, bno) < mp->m_sb.sb_agcount); | 429 | ASSERT(XFS_FSB_TO_AGNO(mp, bno) < mp->m_sb.sb_agcount); |
430 | ASSERT(XFS_FSB_TO_AGBNO(mp, bno) < mp->m_sb.sb_agblocks); | 430 | ASSERT(XFS_FSB_TO_AGBNO(mp, bno) < mp->m_sb.sb_agblocks); |
431 | 431 | ||
432 | if (unlikely(xfs_bmap_count_tree(mp, tp, ifp, bno, level, count) < 0)) { | 432 | if (unlikely(xfs_bmap_count_tree(mp, tp, ifp, bno, level, count) < 0)) { |
433 | XFS_ERROR_REPORT("xfs_bmap_count_blocks(2)", XFS_ERRLEVEL_LOW, | 433 | XFS_ERROR_REPORT("xfs_bmap_count_blocks(2)", XFS_ERRLEVEL_LOW, |
434 | mp); | 434 | mp); |
435 | return XFS_ERROR(EFSCORRUPTED); | 435 | return -EFSCORRUPTED; |
436 | } | 436 | } |
437 | 437 | ||
438 | return 0; | 438 | return 0; |
@@ -524,13 +524,13 @@ xfs_getbmap( | |||
524 | if (ip->i_d.di_aformat != XFS_DINODE_FMT_EXTENTS && | 524 | if (ip->i_d.di_aformat != XFS_DINODE_FMT_EXTENTS && |
525 | ip->i_d.di_aformat != XFS_DINODE_FMT_BTREE && | 525 | ip->i_d.di_aformat != XFS_DINODE_FMT_BTREE && |
526 | ip->i_d.di_aformat != XFS_DINODE_FMT_LOCAL) | 526 | ip->i_d.di_aformat != XFS_DINODE_FMT_LOCAL) |
527 | return XFS_ERROR(EINVAL); | 527 | return -EINVAL; |
528 | } else if (unlikely( | 528 | } else if (unlikely( |
529 | ip->i_d.di_aformat != 0 && | 529 | ip->i_d.di_aformat != 0 && |
530 | ip->i_d.di_aformat != XFS_DINODE_FMT_EXTENTS)) { | 530 | ip->i_d.di_aformat != XFS_DINODE_FMT_EXTENTS)) { |
531 | XFS_ERROR_REPORT("xfs_getbmap", XFS_ERRLEVEL_LOW, | 531 | XFS_ERROR_REPORT("xfs_getbmap", XFS_ERRLEVEL_LOW, |
532 | ip->i_mount); | 532 | ip->i_mount); |
533 | return XFS_ERROR(EFSCORRUPTED); | 533 | return -EFSCORRUPTED; |
534 | } | 534 | } |
535 | 535 | ||
536 | prealloced = 0; | 536 | prealloced = 0; |
@@ -539,7 +539,7 @@ xfs_getbmap( | |||
539 | if (ip->i_d.di_format != XFS_DINODE_FMT_EXTENTS && | 539 | if (ip->i_d.di_format != XFS_DINODE_FMT_EXTENTS && |
540 | ip->i_d.di_format != XFS_DINODE_FMT_BTREE && | 540 | ip->i_d.di_format != XFS_DINODE_FMT_BTREE && |
541 | ip->i_d.di_format != XFS_DINODE_FMT_LOCAL) | 541 | ip->i_d.di_format != XFS_DINODE_FMT_LOCAL) |
542 | return XFS_ERROR(EINVAL); | 542 | return -EINVAL; |
543 | 543 | ||
544 | if (xfs_get_extsz_hint(ip) || | 544 | if (xfs_get_extsz_hint(ip) || |
545 | ip->i_d.di_flags & (XFS_DIFLAG_PREALLOC|XFS_DIFLAG_APPEND)){ | 545 | ip->i_d.di_flags & (XFS_DIFLAG_PREALLOC|XFS_DIFLAG_APPEND)){ |
@@ -559,26 +559,26 @@ xfs_getbmap( | |||
559 | bmv->bmv_entries = 0; | 559 | bmv->bmv_entries = 0; |
560 | return 0; | 560 | return 0; |
561 | } else if (bmv->bmv_length < 0) { | 561 | } else if (bmv->bmv_length < 0) { |
562 | return XFS_ERROR(EINVAL); | 562 | return -EINVAL; |
563 | } | 563 | } |
564 | 564 | ||
565 | nex = bmv->bmv_count - 1; | 565 | nex = bmv->bmv_count - 1; |
566 | if (nex <= 0) | 566 | if (nex <= 0) |
567 | return XFS_ERROR(EINVAL); | 567 | return -EINVAL; |
568 | bmvend = bmv->bmv_offset + bmv->bmv_length; | 568 | bmvend = bmv->bmv_offset + bmv->bmv_length; |
569 | 569 | ||
570 | 570 | ||
571 | if (bmv->bmv_count > ULONG_MAX / sizeof(struct getbmapx)) | 571 | if (bmv->bmv_count > ULONG_MAX / sizeof(struct getbmapx)) |
572 | return XFS_ERROR(ENOMEM); | 572 | return -ENOMEM; |
573 | out = kmem_zalloc_large(bmv->bmv_count * sizeof(struct getbmapx), 0); | 573 | out = kmem_zalloc_large(bmv->bmv_count * sizeof(struct getbmapx), 0); |
574 | if (!out) | 574 | if (!out) |
575 | return XFS_ERROR(ENOMEM); | 575 | return -ENOMEM; |
576 | 576 | ||
577 | xfs_ilock(ip, XFS_IOLOCK_SHARED); | 577 | xfs_ilock(ip, XFS_IOLOCK_SHARED); |
578 | if (whichfork == XFS_DATA_FORK) { | 578 | if (whichfork == XFS_DATA_FORK) { |
579 | if (!(iflags & BMV_IF_DELALLOC) && | 579 | if (!(iflags & BMV_IF_DELALLOC) && |
580 | (ip->i_delayed_blks || XFS_ISIZE(ip) > ip->i_d.di_size)) { | 580 | (ip->i_delayed_blks || XFS_ISIZE(ip) > ip->i_d.di_size)) { |
581 | error = -filemap_write_and_wait(VFS_I(ip)->i_mapping); | 581 | error = filemap_write_and_wait(VFS_I(ip)->i_mapping); |
582 | if (error) | 582 | if (error) |
583 | goto out_unlock_iolock; | 583 | goto out_unlock_iolock; |
584 | 584 | ||
@@ -611,7 +611,7 @@ xfs_getbmap( | |||
611 | /* | 611 | /* |
612 | * Allocate enough space to handle "subnex" maps at a time. | 612 | * Allocate enough space to handle "subnex" maps at a time. |
613 | */ | 613 | */ |
614 | error = ENOMEM; | 614 | error = -ENOMEM; |
615 | subnex = 16; | 615 | subnex = 16; |
616 | map = kmem_alloc(subnex * sizeof(*map), KM_MAYFAIL | KM_NOFS); | 616 | map = kmem_alloc(subnex * sizeof(*map), KM_MAYFAIL | KM_NOFS); |
617 | if (!map) | 617 | if (!map) |
@@ -809,7 +809,7 @@ xfs_can_free_eofblocks(struct xfs_inode *ip, bool force) | |||
809 | * have speculative prealloc/delalloc blocks to remove. | 809 | * have speculative prealloc/delalloc blocks to remove. |
810 | */ | 810 | */ |
811 | if (VFS_I(ip)->i_size == 0 && | 811 | if (VFS_I(ip)->i_size == 0 && |
812 | VN_CACHED(VFS_I(ip)) == 0 && | 812 | VFS_I(ip)->i_mapping->nrpages == 0 && |
813 | ip->i_delayed_blks == 0) | 813 | ip->i_delayed_blks == 0) |
814 | return false; | 814 | return false; |
815 | 815 | ||
@@ -882,7 +882,7 @@ xfs_free_eofblocks( | |||
882 | if (need_iolock) { | 882 | if (need_iolock) { |
883 | if (!xfs_ilock_nowait(ip, XFS_IOLOCK_EXCL)) { | 883 | if (!xfs_ilock_nowait(ip, XFS_IOLOCK_EXCL)) { |
884 | xfs_trans_cancel(tp, 0); | 884 | xfs_trans_cancel(tp, 0); |
885 | return EAGAIN; | 885 | return -EAGAIN; |
886 | } | 886 | } |
887 | } | 887 | } |
888 | 888 | ||
@@ -955,14 +955,14 @@ xfs_alloc_file_space( | |||
955 | trace_xfs_alloc_file_space(ip); | 955 | trace_xfs_alloc_file_space(ip); |
956 | 956 | ||
957 | if (XFS_FORCED_SHUTDOWN(mp)) | 957 | if (XFS_FORCED_SHUTDOWN(mp)) |
958 | return XFS_ERROR(EIO); | 958 | return -EIO; |
959 | 959 | ||
960 | error = xfs_qm_dqattach(ip, 0); | 960 | error = xfs_qm_dqattach(ip, 0); |
961 | if (error) | 961 | if (error) |
962 | return error; | 962 | return error; |
963 | 963 | ||
964 | if (len <= 0) | 964 | if (len <= 0) |
965 | return XFS_ERROR(EINVAL); | 965 | return -EINVAL; |
966 | 966 | ||
967 | rt = XFS_IS_REALTIME_INODE(ip); | 967 | rt = XFS_IS_REALTIME_INODE(ip); |
968 | extsz = xfs_get_extsz_hint(ip); | 968 | extsz = xfs_get_extsz_hint(ip); |
@@ -1028,7 +1028,7 @@ xfs_alloc_file_space( | |||
1028 | /* | 1028 | /* |
1029 | * Free the transaction structure. | 1029 | * Free the transaction structure. |
1030 | */ | 1030 | */ |
1031 | ASSERT(error == ENOSPC || XFS_FORCED_SHUTDOWN(mp)); | 1031 | ASSERT(error == -ENOSPC || XFS_FORCED_SHUTDOWN(mp)); |
1032 | xfs_trans_cancel(tp, 0); | 1032 | xfs_trans_cancel(tp, 0); |
1033 | break; | 1033 | break; |
1034 | } | 1034 | } |
@@ -1065,7 +1065,7 @@ xfs_alloc_file_space( | |||
1065 | allocated_fsb = imapp->br_blockcount; | 1065 | allocated_fsb = imapp->br_blockcount; |
1066 | 1066 | ||
1067 | if (nimaps == 0) { | 1067 | if (nimaps == 0) { |
1068 | error = XFS_ERROR(ENOSPC); | 1068 | error = -ENOSPC; |
1069 | break; | 1069 | break; |
1070 | } | 1070 | } |
1071 | 1071 | ||
@@ -1126,7 +1126,7 @@ xfs_zero_remaining_bytes( | |||
1126 | mp->m_rtdev_targp : mp->m_ddev_targp, | 1126 | mp->m_rtdev_targp : mp->m_ddev_targp, |
1127 | BTOBB(mp->m_sb.sb_blocksize), 0); | 1127 | BTOBB(mp->m_sb.sb_blocksize), 0); |
1128 | if (!bp) | 1128 | if (!bp) |
1129 | return XFS_ERROR(ENOMEM); | 1129 | return -ENOMEM; |
1130 | 1130 | ||
1131 | xfs_buf_unlock(bp); | 1131 | xfs_buf_unlock(bp); |
1132 | 1132 | ||
@@ -1158,7 +1158,7 @@ xfs_zero_remaining_bytes( | |||
1158 | XFS_BUF_SET_ADDR(bp, xfs_fsb_to_db(ip, imap.br_startblock)); | 1158 | XFS_BUF_SET_ADDR(bp, xfs_fsb_to_db(ip, imap.br_startblock)); |
1159 | 1159 | ||
1160 | if (XFS_FORCED_SHUTDOWN(mp)) { | 1160 | if (XFS_FORCED_SHUTDOWN(mp)) { |
1161 | error = XFS_ERROR(EIO); | 1161 | error = -EIO; |
1162 | break; | 1162 | break; |
1163 | } | 1163 | } |
1164 | xfs_buf_iorequest(bp); | 1164 | xfs_buf_iorequest(bp); |
@@ -1176,7 +1176,7 @@ xfs_zero_remaining_bytes( | |||
1176 | XFS_BUF_WRITE(bp); | 1176 | XFS_BUF_WRITE(bp); |
1177 | 1177 | ||
1178 | if (XFS_FORCED_SHUTDOWN(mp)) { | 1178 | if (XFS_FORCED_SHUTDOWN(mp)) { |
1179 | error = XFS_ERROR(EIO); | 1179 | error = -EIO; |
1180 | break; | 1180 | break; |
1181 | } | 1181 | } |
1182 | xfs_buf_iorequest(bp); | 1182 | xfs_buf_iorequest(bp); |
@@ -1234,7 +1234,7 @@ xfs_free_file_space( | |||
1234 | 1234 | ||
1235 | rounding = max_t(xfs_off_t, 1 << mp->m_sb.sb_blocklog, PAGE_CACHE_SIZE); | 1235 | rounding = max_t(xfs_off_t, 1 << mp->m_sb.sb_blocklog, PAGE_CACHE_SIZE); |
1236 | ioffset = offset & ~(rounding - 1); | 1236 | ioffset = offset & ~(rounding - 1); |
1237 | error = -filemap_write_and_wait_range(VFS_I(ip)->i_mapping, | 1237 | error = filemap_write_and_wait_range(VFS_I(ip)->i_mapping, |
1238 | ioffset, -1); | 1238 | ioffset, -1); |
1239 | if (error) | 1239 | if (error) |
1240 | goto out; | 1240 | goto out; |
@@ -1315,7 +1315,7 @@ xfs_free_file_space( | |||
1315 | /* | 1315 | /* |
1316 | * Free the transaction structure. | 1316 | * Free the transaction structure. |
1317 | */ | 1317 | */ |
1318 | ASSERT(error == ENOSPC || XFS_FORCED_SHUTDOWN(mp)); | 1318 | ASSERT(error == -ENOSPC || XFS_FORCED_SHUTDOWN(mp)); |
1319 | xfs_trans_cancel(tp, 0); | 1319 | xfs_trans_cancel(tp, 0); |
1320 | break; | 1320 | break; |
1321 | } | 1321 | } |
@@ -1557,14 +1557,14 @@ xfs_swap_extents_check_format( | |||
1557 | /* Should never get a local format */ | 1557 | /* Should never get a local format */ |
1558 | if (ip->i_d.di_format == XFS_DINODE_FMT_LOCAL || | 1558 | if (ip->i_d.di_format == XFS_DINODE_FMT_LOCAL || |
1559 | tip->i_d.di_format == XFS_DINODE_FMT_LOCAL) | 1559 | tip->i_d.di_format == XFS_DINODE_FMT_LOCAL) |
1560 | return EINVAL; | 1560 | return -EINVAL; |
1561 | 1561 | ||
1562 | /* | 1562 | /* |
1563 | * if the target inode has less extents that then temporary inode then | 1563 | * if the target inode has less extents that then temporary inode then |
1564 | * why did userspace call us? | 1564 | * why did userspace call us? |
1565 | */ | 1565 | */ |
1566 | if (ip->i_d.di_nextents < tip->i_d.di_nextents) | 1566 | if (ip->i_d.di_nextents < tip->i_d.di_nextents) |
1567 | return EINVAL; | 1567 | return -EINVAL; |
1568 | 1568 | ||
1569 | /* | 1569 | /* |
1570 | * if the target inode is in extent form and the temp inode is in btree | 1570 | * if the target inode is in extent form and the temp inode is in btree |
@@ -1573,19 +1573,19 @@ xfs_swap_extents_check_format( | |||
1573 | */ | 1573 | */ |
1574 | if (ip->i_d.di_format == XFS_DINODE_FMT_EXTENTS && | 1574 | if (ip->i_d.di_format == XFS_DINODE_FMT_EXTENTS && |
1575 | tip->i_d.di_format == XFS_DINODE_FMT_BTREE) | 1575 | tip->i_d.di_format == XFS_DINODE_FMT_BTREE) |
1576 | return EINVAL; | 1576 | return -EINVAL; |
1577 | 1577 | ||
1578 | /* Check temp in extent form to max in target */ | 1578 | /* Check temp in extent form to max in target */ |
1579 | if (tip->i_d.di_format == XFS_DINODE_FMT_EXTENTS && | 1579 | if (tip->i_d.di_format == XFS_DINODE_FMT_EXTENTS && |
1580 | XFS_IFORK_NEXTENTS(tip, XFS_DATA_FORK) > | 1580 | XFS_IFORK_NEXTENTS(tip, XFS_DATA_FORK) > |
1581 | XFS_IFORK_MAXEXT(ip, XFS_DATA_FORK)) | 1581 | XFS_IFORK_MAXEXT(ip, XFS_DATA_FORK)) |
1582 | return EINVAL; | 1582 | return -EINVAL; |
1583 | 1583 | ||
1584 | /* Check target in extent form to max in temp */ | 1584 | /* Check target in extent form to max in temp */ |
1585 | if (ip->i_d.di_format == XFS_DINODE_FMT_EXTENTS && | 1585 | if (ip->i_d.di_format == XFS_DINODE_FMT_EXTENTS && |
1586 | XFS_IFORK_NEXTENTS(ip, XFS_DATA_FORK) > | 1586 | XFS_IFORK_NEXTENTS(ip, XFS_DATA_FORK) > |
1587 | XFS_IFORK_MAXEXT(tip, XFS_DATA_FORK)) | 1587 | XFS_IFORK_MAXEXT(tip, XFS_DATA_FORK)) |
1588 | return EINVAL; | 1588 | return -EINVAL; |
1589 | 1589 | ||
1590 | /* | 1590 | /* |
1591 | * If we are in a btree format, check that the temp root block will fit | 1591 | * If we are in a btree format, check that the temp root block will fit |
@@ -1599,26 +1599,50 @@ xfs_swap_extents_check_format( | |||
1599 | if (tip->i_d.di_format == XFS_DINODE_FMT_BTREE) { | 1599 | if (tip->i_d.di_format == XFS_DINODE_FMT_BTREE) { |
1600 | if (XFS_IFORK_BOFF(ip) && | 1600 | if (XFS_IFORK_BOFF(ip) && |
1601 | XFS_BMAP_BMDR_SPACE(tip->i_df.if_broot) > XFS_IFORK_BOFF(ip)) | 1601 | XFS_BMAP_BMDR_SPACE(tip->i_df.if_broot) > XFS_IFORK_BOFF(ip)) |
1602 | return EINVAL; | 1602 | return -EINVAL; |
1603 | if (XFS_IFORK_NEXTENTS(tip, XFS_DATA_FORK) <= | 1603 | if (XFS_IFORK_NEXTENTS(tip, XFS_DATA_FORK) <= |
1604 | XFS_IFORK_MAXEXT(ip, XFS_DATA_FORK)) | 1604 | XFS_IFORK_MAXEXT(ip, XFS_DATA_FORK)) |
1605 | return EINVAL; | 1605 | return -EINVAL; |
1606 | } | 1606 | } |
1607 | 1607 | ||
1608 | /* Reciprocal target->temp btree format checks */ | 1608 | /* Reciprocal target->temp btree format checks */ |
1609 | if (ip->i_d.di_format == XFS_DINODE_FMT_BTREE) { | 1609 | if (ip->i_d.di_format == XFS_DINODE_FMT_BTREE) { |
1610 | if (XFS_IFORK_BOFF(tip) && | 1610 | if (XFS_IFORK_BOFF(tip) && |
1611 | XFS_BMAP_BMDR_SPACE(ip->i_df.if_broot) > XFS_IFORK_BOFF(tip)) | 1611 | XFS_BMAP_BMDR_SPACE(ip->i_df.if_broot) > XFS_IFORK_BOFF(tip)) |
1612 | return EINVAL; | 1612 | return -EINVAL; |
1613 | if (XFS_IFORK_NEXTENTS(ip, XFS_DATA_FORK) <= | 1613 | if (XFS_IFORK_NEXTENTS(ip, XFS_DATA_FORK) <= |
1614 | XFS_IFORK_MAXEXT(tip, XFS_DATA_FORK)) | 1614 | XFS_IFORK_MAXEXT(tip, XFS_DATA_FORK)) |
1615 | return EINVAL; | 1615 | return -EINVAL; |
1616 | } | 1616 | } |
1617 | 1617 | ||
1618 | return 0; | 1618 | return 0; |
1619 | } | 1619 | } |
1620 | 1620 | ||
1621 | int | 1621 | int |
1622 | xfs_swap_extent_flush( | ||
1623 | struct xfs_inode *ip) | ||
1624 | { | ||
1625 | int error; | ||
1626 | |||
1627 | error = filemap_write_and_wait(VFS_I(ip)->i_mapping); | ||
1628 | if (error) | ||
1629 | return error; | ||
1630 | truncate_pagecache_range(VFS_I(ip), 0, -1); | ||
1631 | |||
1632 | /* Verify O_DIRECT for ftmp */ | ||
1633 | if (VFS_I(ip)->i_mapping->nrpages) | ||
1634 | return -EINVAL; | ||
1635 | |||
1636 | /* | ||
1637 | * Don't try to swap extents on mmap()d files because we can't lock | ||
1638 | * out races against page faults safely. | ||
1639 | */ | ||
1640 | if (mapping_mapped(VFS_I(ip)->i_mapping)) | ||
1641 | return -EBUSY; | ||
1642 | return 0; | ||
1643 | } | ||
1644 | |||
1645 | int | ||
1622 | xfs_swap_extents( | 1646 | xfs_swap_extents( |
1623 | xfs_inode_t *ip, /* target inode */ | 1647 | xfs_inode_t *ip, /* target inode */ |
1624 | xfs_inode_t *tip, /* tmp inode */ | 1648 | xfs_inode_t *tip, /* tmp inode */ |
@@ -1633,51 +1657,57 @@ xfs_swap_extents( | |||
1633 | int aforkblks = 0; | 1657 | int aforkblks = 0; |
1634 | int taforkblks = 0; | 1658 | int taforkblks = 0; |
1635 | __uint64_t tmp; | 1659 | __uint64_t tmp; |
1660 | int lock_flags; | ||
1636 | 1661 | ||
1637 | tempifp = kmem_alloc(sizeof(xfs_ifork_t), KM_MAYFAIL); | 1662 | tempifp = kmem_alloc(sizeof(xfs_ifork_t), KM_MAYFAIL); |
1638 | if (!tempifp) { | 1663 | if (!tempifp) { |
1639 | error = XFS_ERROR(ENOMEM); | 1664 | error = -ENOMEM; |
1640 | goto out; | 1665 | goto out; |
1641 | } | 1666 | } |
1642 | 1667 | ||
1643 | /* | 1668 | /* |
1644 | * we have to do two separate lock calls here to keep lockdep | 1669 | * Lock up the inodes against other IO and truncate to begin with. |
1645 | * happy. If we try to get all the locks in one call, lock will | 1670 | * Then we can ensure the inodes are flushed and have no page cache |
1646 | * report false positives when we drop the ILOCK and regain them | 1671 | * safely. Once we have done this we can take the ilocks and do the rest |
1647 | * below. | 1672 | * of the checks. |
1648 | */ | 1673 | */ |
1674 | lock_flags = XFS_IOLOCK_EXCL; | ||
1649 | xfs_lock_two_inodes(ip, tip, XFS_IOLOCK_EXCL); | 1675 | xfs_lock_two_inodes(ip, tip, XFS_IOLOCK_EXCL); |
1650 | xfs_lock_two_inodes(ip, tip, XFS_ILOCK_EXCL); | ||
1651 | 1676 | ||
1652 | /* Verify that both files have the same format */ | 1677 | /* Verify that both files have the same format */ |
1653 | if ((ip->i_d.di_mode & S_IFMT) != (tip->i_d.di_mode & S_IFMT)) { | 1678 | if ((ip->i_d.di_mode & S_IFMT) != (tip->i_d.di_mode & S_IFMT)) { |
1654 | error = XFS_ERROR(EINVAL); | 1679 | error = -EINVAL; |
1655 | goto out_unlock; | 1680 | goto out_unlock; |
1656 | } | 1681 | } |
1657 | 1682 | ||
1658 | /* Verify both files are either real-time or non-realtime */ | 1683 | /* Verify both files are either real-time or non-realtime */ |
1659 | if (XFS_IS_REALTIME_INODE(ip) != XFS_IS_REALTIME_INODE(tip)) { | 1684 | if (XFS_IS_REALTIME_INODE(ip) != XFS_IS_REALTIME_INODE(tip)) { |
1660 | error = XFS_ERROR(EINVAL); | 1685 | error = -EINVAL; |
1661 | goto out_unlock; | 1686 | goto out_unlock; |
1662 | } | 1687 | } |
1663 | 1688 | ||
1664 | error = -filemap_write_and_wait(VFS_I(tip)->i_mapping); | 1689 | error = xfs_swap_extent_flush(ip); |
1690 | if (error) | ||
1691 | goto out_unlock; | ||
1692 | error = xfs_swap_extent_flush(tip); | ||
1665 | if (error) | 1693 | if (error) |
1666 | goto out_unlock; | 1694 | goto out_unlock; |
1667 | truncate_pagecache_range(VFS_I(tip), 0, -1); | ||
1668 | 1695 | ||
1669 | /* Verify O_DIRECT for ftmp */ | 1696 | tp = xfs_trans_alloc(mp, XFS_TRANS_SWAPEXT); |
1670 | if (VN_CACHED(VFS_I(tip)) != 0) { | 1697 | error = xfs_trans_reserve(tp, &M_RES(mp)->tr_ichange, 0, 0); |
1671 | error = XFS_ERROR(EINVAL); | 1698 | if (error) { |
1699 | xfs_trans_cancel(tp, 0); | ||
1672 | goto out_unlock; | 1700 | goto out_unlock; |
1673 | } | 1701 | } |
1702 | xfs_lock_two_inodes(ip, tip, XFS_ILOCK_EXCL); | ||
1703 | lock_flags |= XFS_ILOCK_EXCL; | ||
1674 | 1704 | ||
1675 | /* Verify all data are being swapped */ | 1705 | /* Verify all data are being swapped */ |
1676 | if (sxp->sx_offset != 0 || | 1706 | if (sxp->sx_offset != 0 || |
1677 | sxp->sx_length != ip->i_d.di_size || | 1707 | sxp->sx_length != ip->i_d.di_size || |
1678 | sxp->sx_length != tip->i_d.di_size) { | 1708 | sxp->sx_length != tip->i_d.di_size) { |
1679 | error = XFS_ERROR(EFAULT); | 1709 | error = -EFAULT; |
1680 | goto out_unlock; | 1710 | goto out_trans_cancel; |
1681 | } | 1711 | } |
1682 | 1712 | ||
1683 | trace_xfs_swap_extent_before(ip, 0); | 1713 | trace_xfs_swap_extent_before(ip, 0); |
@@ -1689,7 +1719,7 @@ xfs_swap_extents( | |||
1689 | xfs_notice(mp, | 1719 | xfs_notice(mp, |
1690 | "%s: inode 0x%llx format is incompatible for exchanging.", | 1720 | "%s: inode 0x%llx format is incompatible for exchanging.", |
1691 | __func__, ip->i_ino); | 1721 | __func__, ip->i_ino); |
1692 | goto out_unlock; | 1722 | goto out_trans_cancel; |
1693 | } | 1723 | } |
1694 | 1724 | ||
1695 | /* | 1725 | /* |
@@ -1703,43 +1733,9 @@ xfs_swap_extents( | |||
1703 | (sbp->bs_ctime.tv_nsec != VFS_I(ip)->i_ctime.tv_nsec) || | 1733 | (sbp->bs_ctime.tv_nsec != VFS_I(ip)->i_ctime.tv_nsec) || |
1704 | (sbp->bs_mtime.tv_sec != VFS_I(ip)->i_mtime.tv_sec) || | 1734 | (sbp->bs_mtime.tv_sec != VFS_I(ip)->i_mtime.tv_sec) || |
1705 | (sbp->bs_mtime.tv_nsec != VFS_I(ip)->i_mtime.tv_nsec)) { | 1735 | (sbp->bs_mtime.tv_nsec != VFS_I(ip)->i_mtime.tv_nsec)) { |
1706 | error = XFS_ERROR(EBUSY); | 1736 | error = -EBUSY; |
1707 | goto out_unlock; | 1737 | goto out_trans_cancel; |
1708 | } | ||
1709 | |||
1710 | /* We need to fail if the file is memory mapped. Once we have tossed | ||
1711 | * all existing pages, the page fault will have no option | ||
1712 | * but to go to the filesystem for pages. By making the page fault call | ||
1713 | * vop_read (or write in the case of autogrow) they block on the iolock | ||
1714 | * until we have switched the extents. | ||
1715 | */ | ||
1716 | if (VN_MAPPED(VFS_I(ip))) { | ||
1717 | error = XFS_ERROR(EBUSY); | ||
1718 | goto out_unlock; | ||
1719 | } | ||
1720 | |||
1721 | xfs_iunlock(ip, XFS_ILOCK_EXCL); | ||
1722 | xfs_iunlock(tip, XFS_ILOCK_EXCL); | ||
1723 | |||
1724 | /* | ||
1725 | * There is a race condition here since we gave up the | ||
1726 | * ilock. However, the data fork will not change since | ||
1727 | * we have the iolock (locked for truncation too) so we | ||
1728 | * are safe. We don't really care if non-io related | ||
1729 | * fields change. | ||
1730 | */ | ||
1731 | truncate_pagecache_range(VFS_I(ip), 0, -1); | ||
1732 | |||
1733 | tp = xfs_trans_alloc(mp, XFS_TRANS_SWAPEXT); | ||
1734 | error = xfs_trans_reserve(tp, &M_RES(mp)->tr_ichange, 0, 0); | ||
1735 | if (error) { | ||
1736 | xfs_iunlock(ip, XFS_IOLOCK_EXCL); | ||
1737 | xfs_iunlock(tip, XFS_IOLOCK_EXCL); | ||
1738 | xfs_trans_cancel(tp, 0); | ||
1739 | goto out; | ||
1740 | } | 1738 | } |
1741 | xfs_lock_two_inodes(ip, tip, XFS_ILOCK_EXCL); | ||
1742 | |||
1743 | /* | 1739 | /* |
1744 | * Count the number of extended attribute blocks | 1740 | * Count the number of extended attribute blocks |
1745 | */ | 1741 | */ |
@@ -1757,8 +1753,8 @@ xfs_swap_extents( | |||
1757 | goto out_trans_cancel; | 1753 | goto out_trans_cancel; |
1758 | } | 1754 | } |
1759 | 1755 | ||
1760 | xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL | XFS_IOLOCK_EXCL); | 1756 | xfs_trans_ijoin(tp, ip, lock_flags); |
1761 | xfs_trans_ijoin(tp, tip, XFS_ILOCK_EXCL | XFS_IOLOCK_EXCL); | 1757 | xfs_trans_ijoin(tp, tip, lock_flags); |
1762 | 1758 | ||
1763 | /* | 1759 | /* |
1764 | * Before we've swapped the forks, lets set the owners of the forks | 1760 | * Before we've swapped the forks, lets set the owners of the forks |
@@ -1887,8 +1883,8 @@ out: | |||
1887 | return error; | 1883 | return error; |
1888 | 1884 | ||
1889 | out_unlock: | 1885 | out_unlock: |
1890 | xfs_iunlock(ip, XFS_ILOCK_EXCL | XFS_IOLOCK_EXCL); | 1886 | xfs_iunlock(ip, lock_flags); |
1891 | xfs_iunlock(tip, XFS_ILOCK_EXCL | XFS_IOLOCK_EXCL); | 1887 | xfs_iunlock(tip, lock_flags); |
1892 | goto out; | 1888 | goto out; |
1893 | 1889 | ||
1894 | out_trans_cancel: | 1890 | out_trans_cancel: |
diff --git a/fs/xfs/xfs_buf.c b/fs/xfs/xfs_buf.c index 7a34a1ae6552..cd7b8ca9b064 100644 --- a/fs/xfs/xfs_buf.c +++ b/fs/xfs/xfs_buf.c | |||
@@ -130,7 +130,7 @@ xfs_buf_get_maps( | |||
130 | bp->b_maps = kmem_zalloc(map_count * sizeof(struct xfs_buf_map), | 130 | bp->b_maps = kmem_zalloc(map_count * sizeof(struct xfs_buf_map), |
131 | KM_NOFS); | 131 | KM_NOFS); |
132 | if (!bp->b_maps) | 132 | if (!bp->b_maps) |
133 | return ENOMEM; | 133 | return -ENOMEM; |
134 | return 0; | 134 | return 0; |
135 | } | 135 | } |
136 | 136 | ||
@@ -344,7 +344,7 @@ retry: | |||
344 | if (unlikely(page == NULL)) { | 344 | if (unlikely(page == NULL)) { |
345 | if (flags & XBF_READ_AHEAD) { | 345 | if (flags & XBF_READ_AHEAD) { |
346 | bp->b_page_count = i; | 346 | bp->b_page_count = i; |
347 | error = ENOMEM; | 347 | error = -ENOMEM; |
348 | goto out_free_pages; | 348 | goto out_free_pages; |
349 | } | 349 | } |
350 | 350 | ||
@@ -465,7 +465,7 @@ _xfs_buf_find( | |||
465 | eofs = XFS_FSB_TO_BB(btp->bt_mount, btp->bt_mount->m_sb.sb_dblocks); | 465 | eofs = XFS_FSB_TO_BB(btp->bt_mount, btp->bt_mount->m_sb.sb_dblocks); |
466 | if (blkno >= eofs) { | 466 | if (blkno >= eofs) { |
467 | /* | 467 | /* |
468 | * XXX (dgc): we should really be returning EFSCORRUPTED here, | 468 | * XXX (dgc): we should really be returning -EFSCORRUPTED here, |
469 | * but none of the higher level infrastructure supports | 469 | * but none of the higher level infrastructure supports |
470 | * returning a specific error on buffer lookup failures. | 470 | * returning a specific error on buffer lookup failures. |
471 | */ | 471 | */ |
@@ -1052,8 +1052,8 @@ xfs_buf_ioerror( | |||
1052 | xfs_buf_t *bp, | 1052 | xfs_buf_t *bp, |
1053 | int error) | 1053 | int error) |
1054 | { | 1054 | { |
1055 | ASSERT(error >= 0 && error <= 0xffff); | 1055 | ASSERT(error <= 0 && error >= -1000); |
1056 | bp->b_error = (unsigned short)error; | 1056 | bp->b_error = error; |
1057 | trace_xfs_buf_ioerror(bp, error, _RET_IP_); | 1057 | trace_xfs_buf_ioerror(bp, error, _RET_IP_); |
1058 | } | 1058 | } |
1059 | 1059 | ||
@@ -1064,7 +1064,7 @@ xfs_buf_ioerror_alert( | |||
1064 | { | 1064 | { |
1065 | xfs_alert(bp->b_target->bt_mount, | 1065 | xfs_alert(bp->b_target->bt_mount, |
1066 | "metadata I/O error: block 0x%llx (\"%s\") error %d numblks %d", | 1066 | "metadata I/O error: block 0x%llx (\"%s\") error %d numblks %d", |
1067 | (__uint64_t)XFS_BUF_ADDR(bp), func, bp->b_error, bp->b_length); | 1067 | (__uint64_t)XFS_BUF_ADDR(bp), func, -bp->b_error, bp->b_length); |
1068 | } | 1068 | } |
1069 | 1069 | ||
1070 | /* | 1070 | /* |
@@ -1083,7 +1083,7 @@ xfs_bioerror( | |||
1083 | /* | 1083 | /* |
1084 | * No need to wait until the buffer is unpinned, we aren't flushing it. | 1084 | * No need to wait until the buffer is unpinned, we aren't flushing it. |
1085 | */ | 1085 | */ |
1086 | xfs_buf_ioerror(bp, EIO); | 1086 | xfs_buf_ioerror(bp, -EIO); |
1087 | 1087 | ||
1088 | /* | 1088 | /* |
1089 | * We're calling xfs_buf_ioend, so delete XBF_DONE flag. | 1089 | * We're calling xfs_buf_ioend, so delete XBF_DONE flag. |
@@ -1094,7 +1094,7 @@ xfs_bioerror( | |||
1094 | 1094 | ||
1095 | xfs_buf_ioend(bp, 0); | 1095 | xfs_buf_ioend(bp, 0); |
1096 | 1096 | ||
1097 | return EIO; | 1097 | return -EIO; |
1098 | } | 1098 | } |
1099 | 1099 | ||
1100 | /* | 1100 | /* |
@@ -1127,13 +1127,13 @@ xfs_bioerror_relse( | |||
1127 | * There's no reason to mark error for | 1127 | * There's no reason to mark error for |
1128 | * ASYNC buffers. | 1128 | * ASYNC buffers. |
1129 | */ | 1129 | */ |
1130 | xfs_buf_ioerror(bp, EIO); | 1130 | xfs_buf_ioerror(bp, -EIO); |
1131 | complete(&bp->b_iowait); | 1131 | complete(&bp->b_iowait); |
1132 | } else { | 1132 | } else { |
1133 | xfs_buf_relse(bp); | 1133 | xfs_buf_relse(bp); |
1134 | } | 1134 | } |
1135 | 1135 | ||
1136 | return EIO; | 1136 | return -EIO; |
1137 | } | 1137 | } |
1138 | 1138 | ||
1139 | STATIC int | 1139 | STATIC int |
@@ -1199,7 +1199,7 @@ xfs_buf_bio_end_io( | |||
1199 | * buffers that require multiple bios to complete. | 1199 | * buffers that require multiple bios to complete. |
1200 | */ | 1200 | */ |
1201 | if (!bp->b_error) | 1201 | if (!bp->b_error) |
1202 | xfs_buf_ioerror(bp, -error); | 1202 | xfs_buf_ioerror(bp, error); |
1203 | 1203 | ||
1204 | if (!bp->b_error && xfs_buf_is_vmapped(bp) && (bp->b_flags & XBF_READ)) | 1204 | if (!bp->b_error && xfs_buf_is_vmapped(bp) && (bp->b_flags & XBF_READ)) |
1205 | invalidate_kernel_vmap_range(bp->b_addr, xfs_buf_vmap_len(bp)); | 1205 | invalidate_kernel_vmap_range(bp->b_addr, xfs_buf_vmap_len(bp)); |
@@ -1286,7 +1286,7 @@ next_chunk: | |||
1286 | * because the caller (xfs_buf_iorequest) holds a count itself. | 1286 | * because the caller (xfs_buf_iorequest) holds a count itself. |
1287 | */ | 1287 | */ |
1288 | atomic_dec(&bp->b_io_remaining); | 1288 | atomic_dec(&bp->b_io_remaining); |
1289 | xfs_buf_ioerror(bp, EIO); | 1289 | xfs_buf_ioerror(bp, -EIO); |
1290 | bio_put(bio); | 1290 | bio_put(bio); |
1291 | } | 1291 | } |
1292 | 1292 | ||
@@ -1330,6 +1330,20 @@ _xfs_buf_ioapply( | |||
1330 | SHUTDOWN_CORRUPT_INCORE); | 1330 | SHUTDOWN_CORRUPT_INCORE); |
1331 | return; | 1331 | return; |
1332 | } | 1332 | } |
1333 | } else if (bp->b_bn != XFS_BUF_DADDR_NULL) { | ||
1334 | struct xfs_mount *mp = bp->b_target->bt_mount; | ||
1335 | |||
1336 | /* | ||
1337 | * non-crc filesystems don't attach verifiers during | ||
1338 | * log recovery, so don't warn for such filesystems. | ||
1339 | */ | ||
1340 | if (xfs_sb_version_hascrc(&mp->m_sb)) { | ||
1341 | xfs_warn(mp, | ||
1342 | "%s: no ops on block 0x%llx/0x%x", | ||
1343 | __func__, bp->b_bn, bp->b_length); | ||
1344 | xfs_hex_dump(bp->b_addr, 64); | ||
1345 | dump_stack(); | ||
1346 | } | ||
1333 | } | 1347 | } |
1334 | } else if (bp->b_flags & XBF_READ_AHEAD) { | 1348 | } else if (bp->b_flags & XBF_READ_AHEAD) { |
1335 | rw = READA; | 1349 | rw = READA; |
@@ -1628,7 +1642,7 @@ xfs_setsize_buftarg( | |||
1628 | xfs_warn(btp->bt_mount, | 1642 | xfs_warn(btp->bt_mount, |
1629 | "Cannot set_blocksize to %u on device %s", | 1643 | "Cannot set_blocksize to %u on device %s", |
1630 | sectorsize, name); | 1644 | sectorsize, name); |
1631 | return EINVAL; | 1645 | return -EINVAL; |
1632 | } | 1646 | } |
1633 | 1647 | ||
1634 | /* Set up device logical sector size mask */ | 1648 | /* Set up device logical sector size mask */ |
diff --git a/fs/xfs/xfs_buf.h b/fs/xfs/xfs_buf.h index 3a7a5523d3dc..c753183900b3 100644 --- a/fs/xfs/xfs_buf.h +++ b/fs/xfs/xfs_buf.h | |||
@@ -178,7 +178,7 @@ typedef struct xfs_buf { | |||
178 | atomic_t b_io_remaining; /* #outstanding I/O requests */ | 178 | atomic_t b_io_remaining; /* #outstanding I/O requests */ |
179 | unsigned int b_page_count; /* size of page array */ | 179 | unsigned int b_page_count; /* size of page array */ |
180 | unsigned int b_offset; /* page offset in first page */ | 180 | unsigned int b_offset; /* page offset in first page */ |
181 | unsigned short b_error; /* error code on I/O */ | 181 | int b_error; /* error code on I/O */ |
182 | const struct xfs_buf_ops *b_ops; | 182 | const struct xfs_buf_ops *b_ops; |
183 | 183 | ||
184 | #ifdef XFS_BUF_LOCK_TRACKING | 184 | #ifdef XFS_BUF_LOCK_TRACKING |
diff --git a/fs/xfs/xfs_buf_item.c b/fs/xfs/xfs_buf_item.c index 4654338b03fc..76007deed31f 100644 --- a/fs/xfs/xfs_buf_item.c +++ b/fs/xfs/xfs_buf_item.c | |||
@@ -488,7 +488,7 @@ xfs_buf_item_unpin( | |||
488 | xfs_buf_lock(bp); | 488 | xfs_buf_lock(bp); |
489 | xfs_buf_hold(bp); | 489 | xfs_buf_hold(bp); |
490 | bp->b_flags |= XBF_ASYNC; | 490 | bp->b_flags |= XBF_ASYNC; |
491 | xfs_buf_ioerror(bp, EIO); | 491 | xfs_buf_ioerror(bp, -EIO); |
492 | XFS_BUF_UNDONE(bp); | 492 | XFS_BUF_UNDONE(bp); |
493 | xfs_buf_stale(bp); | 493 | xfs_buf_stale(bp); |
494 | xfs_buf_ioend(bp, 0); | 494 | xfs_buf_ioend(bp, 0); |
@@ -725,7 +725,7 @@ xfs_buf_item_get_format( | |||
725 | bip->bli_formats = kmem_zalloc(count * sizeof(struct xfs_buf_log_format), | 725 | bip->bli_formats = kmem_zalloc(count * sizeof(struct xfs_buf_log_format), |
726 | KM_SLEEP); | 726 | KM_SLEEP); |
727 | if (!bip->bli_formats) | 727 | if (!bip->bli_formats) |
728 | return ENOMEM; | 728 | return -ENOMEM; |
729 | return 0; | 729 | return 0; |
730 | } | 730 | } |
731 | 731 | ||
diff --git a/fs/xfs/xfs_dir2_readdir.c b/fs/xfs/xfs_dir2_readdir.c index 48e99afb9cb0..f1b69edcdf31 100644 --- a/fs/xfs/xfs_dir2_readdir.c +++ b/fs/xfs/xfs_dir2_readdir.c | |||
@@ -95,7 +95,7 @@ xfs_dir2_sf_getdents( | |||
95 | */ | 95 | */ |
96 | if (dp->i_d.di_size < offsetof(xfs_dir2_sf_hdr_t, parent)) { | 96 | if (dp->i_d.di_size < offsetof(xfs_dir2_sf_hdr_t, parent)) { |
97 | ASSERT(XFS_FORCED_SHUTDOWN(dp->i_mount)); | 97 | ASSERT(XFS_FORCED_SHUTDOWN(dp->i_mount)); |
98 | return XFS_ERROR(EIO); | 98 | return -EIO; |
99 | } | 99 | } |
100 | 100 | ||
101 | ASSERT(dp->i_df.if_bytes == dp->i_d.di_size); | 101 | ASSERT(dp->i_df.if_bytes == dp->i_d.di_size); |
@@ -677,7 +677,7 @@ xfs_readdir( | |||
677 | trace_xfs_readdir(dp); | 677 | trace_xfs_readdir(dp); |
678 | 678 | ||
679 | if (XFS_FORCED_SHUTDOWN(dp->i_mount)) | 679 | if (XFS_FORCED_SHUTDOWN(dp->i_mount)) |
680 | return XFS_ERROR(EIO); | 680 | return -EIO; |
681 | 681 | ||
682 | ASSERT(S_ISDIR(dp->i_d.di_mode)); | 682 | ASSERT(S_ISDIR(dp->i_d.di_mode)); |
683 | XFS_STATS_INC(xs_dir_getdents); | 683 | XFS_STATS_INC(xs_dir_getdents); |
diff --git a/fs/xfs/xfs_discard.c b/fs/xfs/xfs_discard.c index 4f11ef011139..13d08a1b390e 100644 --- a/fs/xfs/xfs_discard.c +++ b/fs/xfs/xfs_discard.c | |||
@@ -124,7 +124,7 @@ xfs_trim_extents( | |||
124 | } | 124 | } |
125 | 125 | ||
126 | trace_xfs_discard_extent(mp, agno, fbno, flen); | 126 | trace_xfs_discard_extent(mp, agno, fbno, flen); |
127 | error = -blkdev_issue_discard(bdev, dbno, dlen, GFP_NOFS, 0); | 127 | error = blkdev_issue_discard(bdev, dbno, dlen, GFP_NOFS, 0); |
128 | if (error) | 128 | if (error) |
129 | goto out_del_cursor; | 129 | goto out_del_cursor; |
130 | *blocks_trimmed += flen; | 130 | *blocks_trimmed += flen; |
@@ -166,11 +166,11 @@ xfs_ioc_trim( | |||
166 | int error, last_error = 0; | 166 | int error, last_error = 0; |
167 | 167 | ||
168 | if (!capable(CAP_SYS_ADMIN)) | 168 | if (!capable(CAP_SYS_ADMIN)) |
169 | return -XFS_ERROR(EPERM); | 169 | return -EPERM; |
170 | if (!blk_queue_discard(q)) | 170 | if (!blk_queue_discard(q)) |
171 | return -XFS_ERROR(EOPNOTSUPP); | 171 | return -EOPNOTSUPP; |
172 | if (copy_from_user(&range, urange, sizeof(range))) | 172 | if (copy_from_user(&range, urange, sizeof(range))) |
173 | return -XFS_ERROR(EFAULT); | 173 | return -EFAULT; |
174 | 174 | ||
175 | /* | 175 | /* |
176 | * Truncating down the len isn't actually quite correct, but using | 176 | * Truncating down the len isn't actually quite correct, but using |
@@ -182,7 +182,7 @@ xfs_ioc_trim( | |||
182 | if (range.start >= XFS_FSB_TO_B(mp, mp->m_sb.sb_dblocks) || | 182 | if (range.start >= XFS_FSB_TO_B(mp, mp->m_sb.sb_dblocks) || |
183 | range.minlen > XFS_FSB_TO_B(mp, XFS_ALLOC_AG_MAX_USABLE(mp)) || | 183 | range.minlen > XFS_FSB_TO_B(mp, XFS_ALLOC_AG_MAX_USABLE(mp)) || |
184 | range.len < mp->m_sb.sb_blocksize) | 184 | range.len < mp->m_sb.sb_blocksize) |
185 | return -XFS_ERROR(EINVAL); | 185 | return -EINVAL; |
186 | 186 | ||
187 | start = BTOBB(range.start); | 187 | start = BTOBB(range.start); |
188 | end = start + BTOBBT(range.len) - 1; | 188 | end = start + BTOBBT(range.len) - 1; |
@@ -195,7 +195,7 @@ xfs_ioc_trim( | |||
195 | end_agno = xfs_daddr_to_agno(mp, end); | 195 | end_agno = xfs_daddr_to_agno(mp, end); |
196 | 196 | ||
197 | for (agno = start_agno; agno <= end_agno; agno++) { | 197 | for (agno = start_agno; agno <= end_agno; agno++) { |
198 | error = -xfs_trim_extents(mp, agno, start, end, minlen, | 198 | error = xfs_trim_extents(mp, agno, start, end, minlen, |
199 | &blocks_trimmed); | 199 | &blocks_trimmed); |
200 | if (error) | 200 | if (error) |
201 | last_error = error; | 201 | last_error = error; |
@@ -206,7 +206,7 @@ xfs_ioc_trim( | |||
206 | 206 | ||
207 | range.len = XFS_FSB_TO_B(mp, blocks_trimmed); | 207 | range.len = XFS_FSB_TO_B(mp, blocks_trimmed); |
208 | if (copy_to_user(urange, &range, sizeof(range))) | 208 | if (copy_to_user(urange, &range, sizeof(range))) |
209 | return -XFS_ERROR(EFAULT); | 209 | return -EFAULT; |
210 | return 0; | 210 | return 0; |
211 | } | 211 | } |
212 | 212 | ||
@@ -222,11 +222,11 @@ xfs_discard_extents( | |||
222 | trace_xfs_discard_extent(mp, busyp->agno, busyp->bno, | 222 | trace_xfs_discard_extent(mp, busyp->agno, busyp->bno, |
223 | busyp->length); | 223 | busyp->length); |
224 | 224 | ||
225 | error = -blkdev_issue_discard(mp->m_ddev_targp->bt_bdev, | 225 | error = blkdev_issue_discard(mp->m_ddev_targp->bt_bdev, |
226 | XFS_AGB_TO_DADDR(mp, busyp->agno, busyp->bno), | 226 | XFS_AGB_TO_DADDR(mp, busyp->agno, busyp->bno), |
227 | XFS_FSB_TO_BB(mp, busyp->length), | 227 | XFS_FSB_TO_BB(mp, busyp->length), |
228 | GFP_NOFS, 0); | 228 | GFP_NOFS, 0); |
229 | if (error && error != EOPNOTSUPP) { | 229 | if (error && error != -EOPNOTSUPP) { |
230 | xfs_info(mp, | 230 | xfs_info(mp, |
231 | "discard failed for extent [0x%llu,%u], error %d", | 231 | "discard failed for extent [0x%llu,%u], error %d", |
232 | (unsigned long long)busyp->bno, | 232 | (unsigned long long)busyp->bno, |
diff --git a/fs/xfs/xfs_dquot.c b/fs/xfs/xfs_dquot.c index 3ee0cd43edc0..63c2de49f61d 100644 --- a/fs/xfs/xfs_dquot.c +++ b/fs/xfs/xfs_dquot.c | |||
@@ -327,7 +327,7 @@ xfs_qm_dqalloc( | |||
327 | */ | 327 | */ |
328 | if (!xfs_this_quota_on(dqp->q_mount, dqp->dq_flags)) { | 328 | if (!xfs_this_quota_on(dqp->q_mount, dqp->dq_flags)) { |
329 | xfs_iunlock(quotip, XFS_ILOCK_EXCL); | 329 | xfs_iunlock(quotip, XFS_ILOCK_EXCL); |
330 | return (ESRCH); | 330 | return -ESRCH; |
331 | } | 331 | } |
332 | 332 | ||
333 | xfs_trans_ijoin(tp, quotip, XFS_ILOCK_EXCL); | 333 | xfs_trans_ijoin(tp, quotip, XFS_ILOCK_EXCL); |
@@ -354,7 +354,7 @@ xfs_qm_dqalloc( | |||
354 | mp->m_quotainfo->qi_dqchunklen, | 354 | mp->m_quotainfo->qi_dqchunklen, |
355 | 0); | 355 | 0); |
356 | if (!bp) { | 356 | if (!bp) { |
357 | error = ENOMEM; | 357 | error = -ENOMEM; |
358 | goto error1; | 358 | goto error1; |
359 | } | 359 | } |
360 | bp->b_ops = &xfs_dquot_buf_ops; | 360 | bp->b_ops = &xfs_dquot_buf_ops; |
@@ -400,7 +400,7 @@ xfs_qm_dqalloc( | |||
400 | error0: | 400 | error0: |
401 | xfs_iunlock(quotip, XFS_ILOCK_EXCL); | 401 | xfs_iunlock(quotip, XFS_ILOCK_EXCL); |
402 | 402 | ||
403 | return (error); | 403 | return error; |
404 | } | 404 | } |
405 | 405 | ||
406 | STATIC int | 406 | STATIC int |
@@ -426,7 +426,7 @@ xfs_qm_dqrepair( | |||
426 | 426 | ||
427 | if (error) { | 427 | if (error) { |
428 | ASSERT(*bpp == NULL); | 428 | ASSERT(*bpp == NULL); |
429 | return XFS_ERROR(error); | 429 | return error; |
430 | } | 430 | } |
431 | (*bpp)->b_ops = &xfs_dquot_buf_ops; | 431 | (*bpp)->b_ops = &xfs_dquot_buf_ops; |
432 | 432 | ||
@@ -442,7 +442,7 @@ xfs_qm_dqrepair( | |||
442 | if (error) { | 442 | if (error) { |
443 | /* repair failed, we're screwed */ | 443 | /* repair failed, we're screwed */ |
444 | xfs_trans_brelse(tp, *bpp); | 444 | xfs_trans_brelse(tp, *bpp); |
445 | return XFS_ERROR(EIO); | 445 | return -EIO; |
446 | } | 446 | } |
447 | } | 447 | } |
448 | 448 | ||
@@ -480,7 +480,7 @@ xfs_qm_dqtobp( | |||
480 | * didn't have the quota inode lock. | 480 | * didn't have the quota inode lock. |
481 | */ | 481 | */ |
482 | xfs_iunlock(quotip, lock_mode); | 482 | xfs_iunlock(quotip, lock_mode); |
483 | return ESRCH; | 483 | return -ESRCH; |
484 | } | 484 | } |
485 | 485 | ||
486 | /* | 486 | /* |
@@ -508,7 +508,7 @@ xfs_qm_dqtobp( | |||
508 | * We don't allocate unless we're asked to | 508 | * We don't allocate unless we're asked to |
509 | */ | 509 | */ |
510 | if (!(flags & XFS_QMOPT_DQALLOC)) | 510 | if (!(flags & XFS_QMOPT_DQALLOC)) |
511 | return ENOENT; | 511 | return -ENOENT; |
512 | 512 | ||
513 | ASSERT(tp); | 513 | ASSERT(tp); |
514 | error = xfs_qm_dqalloc(tpp, mp, dqp, quotip, | 514 | error = xfs_qm_dqalloc(tpp, mp, dqp, quotip, |
@@ -530,7 +530,7 @@ xfs_qm_dqtobp( | |||
530 | mp->m_quotainfo->qi_dqchunklen, | 530 | mp->m_quotainfo->qi_dqchunklen, |
531 | 0, &bp, &xfs_dquot_buf_ops); | 531 | 0, &bp, &xfs_dquot_buf_ops); |
532 | 532 | ||
533 | if (error == EFSCORRUPTED && (flags & XFS_QMOPT_DQREPAIR)) { | 533 | if (error == -EFSCORRUPTED && (flags & XFS_QMOPT_DQREPAIR)) { |
534 | xfs_dqid_t firstid = (xfs_dqid_t)map.br_startoff * | 534 | xfs_dqid_t firstid = (xfs_dqid_t)map.br_startoff * |
535 | mp->m_quotainfo->qi_dqperchunk; | 535 | mp->m_quotainfo->qi_dqperchunk; |
536 | ASSERT(bp == NULL); | 536 | ASSERT(bp == NULL); |
@@ -539,7 +539,7 @@ xfs_qm_dqtobp( | |||
539 | 539 | ||
540 | if (error) { | 540 | if (error) { |
541 | ASSERT(bp == NULL); | 541 | ASSERT(bp == NULL); |
542 | return XFS_ERROR(error); | 542 | return error; |
543 | } | 543 | } |
544 | } | 544 | } |
545 | 545 | ||
@@ -547,7 +547,7 @@ xfs_qm_dqtobp( | |||
547 | *O_bpp = bp; | 547 | *O_bpp = bp; |
548 | *O_ddpp = bp->b_addr + dqp->q_bufoffset; | 548 | *O_ddpp = bp->b_addr + dqp->q_bufoffset; |
549 | 549 | ||
550 | return (0); | 550 | return 0; |
551 | } | 551 | } |
552 | 552 | ||
553 | 553 | ||
@@ -715,7 +715,7 @@ xfs_qm_dqget( | |||
715 | if ((! XFS_IS_UQUOTA_ON(mp) && type == XFS_DQ_USER) || | 715 | if ((! XFS_IS_UQUOTA_ON(mp) && type == XFS_DQ_USER) || |
716 | (! XFS_IS_PQUOTA_ON(mp) && type == XFS_DQ_PROJ) || | 716 | (! XFS_IS_PQUOTA_ON(mp) && type == XFS_DQ_PROJ) || |
717 | (! XFS_IS_GQUOTA_ON(mp) && type == XFS_DQ_GROUP)) { | 717 | (! XFS_IS_GQUOTA_ON(mp) && type == XFS_DQ_GROUP)) { |
718 | return (ESRCH); | 718 | return -ESRCH; |
719 | } | 719 | } |
720 | 720 | ||
721 | #ifdef DEBUG | 721 | #ifdef DEBUG |
@@ -723,7 +723,7 @@ xfs_qm_dqget( | |||
723 | if ((xfs_dqerror_target == mp->m_ddev_targp) && | 723 | if ((xfs_dqerror_target == mp->m_ddev_targp) && |
724 | (xfs_dqreq_num++ % xfs_dqerror_mod) == 0) { | 724 | (xfs_dqreq_num++ % xfs_dqerror_mod) == 0) { |
725 | xfs_debug(mp, "Returning error in dqget"); | 725 | xfs_debug(mp, "Returning error in dqget"); |
726 | return (EIO); | 726 | return -EIO; |
727 | } | 727 | } |
728 | } | 728 | } |
729 | 729 | ||
@@ -796,14 +796,14 @@ restart: | |||
796 | } else { | 796 | } else { |
797 | /* inode stays locked on return */ | 797 | /* inode stays locked on return */ |
798 | xfs_qm_dqdestroy(dqp); | 798 | xfs_qm_dqdestroy(dqp); |
799 | return XFS_ERROR(ESRCH); | 799 | return -ESRCH; |
800 | } | 800 | } |
801 | } | 801 | } |
802 | 802 | ||
803 | mutex_lock(&qi->qi_tree_lock); | 803 | mutex_lock(&qi->qi_tree_lock); |
804 | error = -radix_tree_insert(tree, id, dqp); | 804 | error = radix_tree_insert(tree, id, dqp); |
805 | if (unlikely(error)) { | 805 | if (unlikely(error)) { |
806 | WARN_ON(error != EEXIST); | 806 | WARN_ON(error != -EEXIST); |
807 | 807 | ||
808 | /* | 808 | /* |
809 | * Duplicate found. Just throw away the new dquot and start | 809 | * Duplicate found. Just throw away the new dquot and start |
@@ -829,7 +829,7 @@ restart: | |||
829 | ASSERT((ip == NULL) || xfs_isilocked(ip, XFS_ILOCK_EXCL)); | 829 | ASSERT((ip == NULL) || xfs_isilocked(ip, XFS_ILOCK_EXCL)); |
830 | trace_xfs_dqget_miss(dqp); | 830 | trace_xfs_dqget_miss(dqp); |
831 | *O_dqpp = dqp; | 831 | *O_dqpp = dqp; |
832 | return (0); | 832 | return 0; |
833 | } | 833 | } |
834 | 834 | ||
835 | /* | 835 | /* |
@@ -966,7 +966,7 @@ xfs_qm_dqflush( | |||
966 | SHUTDOWN_CORRUPT_INCORE); | 966 | SHUTDOWN_CORRUPT_INCORE); |
967 | else | 967 | else |
968 | spin_unlock(&mp->m_ail->xa_lock); | 968 | spin_unlock(&mp->m_ail->xa_lock); |
969 | error = XFS_ERROR(EIO); | 969 | error = -EIO; |
970 | goto out_unlock; | 970 | goto out_unlock; |
971 | } | 971 | } |
972 | 972 | ||
@@ -974,7 +974,8 @@ xfs_qm_dqflush( | |||
974 | * Get the buffer containing the on-disk dquot | 974 | * Get the buffer containing the on-disk dquot |
975 | */ | 975 | */ |
976 | error = xfs_trans_read_buf(mp, NULL, mp->m_ddev_targp, dqp->q_blkno, | 976 | error = xfs_trans_read_buf(mp, NULL, mp->m_ddev_targp, dqp->q_blkno, |
977 | mp->m_quotainfo->qi_dqchunklen, 0, &bp, NULL); | 977 | mp->m_quotainfo->qi_dqchunklen, 0, &bp, |
978 | &xfs_dquot_buf_ops); | ||
978 | if (error) | 979 | if (error) |
979 | goto out_unlock; | 980 | goto out_unlock; |
980 | 981 | ||
@@ -992,7 +993,7 @@ xfs_qm_dqflush( | |||
992 | xfs_buf_relse(bp); | 993 | xfs_buf_relse(bp); |
993 | xfs_dqfunlock(dqp); | 994 | xfs_dqfunlock(dqp); |
994 | xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE); | 995 | xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE); |
995 | return XFS_ERROR(EIO); | 996 | return -EIO; |
996 | } | 997 | } |
997 | 998 | ||
998 | /* This is the only portion of data that needs to persist */ | 999 | /* This is the only portion of data that needs to persist */ |
@@ -1045,7 +1046,7 @@ xfs_qm_dqflush( | |||
1045 | 1046 | ||
1046 | out_unlock: | 1047 | out_unlock: |
1047 | xfs_dqfunlock(dqp); | 1048 | xfs_dqfunlock(dqp); |
1048 | return XFS_ERROR(EIO); | 1049 | return -EIO; |
1049 | } | 1050 | } |
1050 | 1051 | ||
1051 | /* | 1052 | /* |
diff --git a/fs/xfs/xfs_dquot.h b/fs/xfs/xfs_dquot.h index 68a68f704837..c24c67e22a2a 100644 --- a/fs/xfs/xfs_dquot.h +++ b/fs/xfs/xfs_dquot.h | |||
@@ -139,6 +139,21 @@ static inline xfs_dquot_t *xfs_inode_dquot(struct xfs_inode *ip, int type) | |||
139 | } | 139 | } |
140 | } | 140 | } |
141 | 141 | ||
142 | /* | ||
143 | * Check whether a dquot is under low free space conditions. We assume the quota | ||
144 | * is enabled and enforced. | ||
145 | */ | ||
146 | static inline bool xfs_dquot_lowsp(struct xfs_dquot *dqp) | ||
147 | { | ||
148 | int64_t freesp; | ||
149 | |||
150 | freesp = be64_to_cpu(dqp->q_core.d_blk_hardlimit) - dqp->q_res_bcount; | ||
151 | if (freesp < dqp->q_low_space[XFS_QLOWSP_1_PCNT]) | ||
152 | return true; | ||
153 | |||
154 | return false; | ||
155 | } | ||
156 | |||
142 | #define XFS_DQ_IS_LOCKED(dqp) (mutex_is_locked(&((dqp)->q_qlock))) | 157 | #define XFS_DQ_IS_LOCKED(dqp) (mutex_is_locked(&((dqp)->q_qlock))) |
143 | #define XFS_DQ_IS_DIRTY(dqp) ((dqp)->dq_flags & XFS_DQ_DIRTY) | 158 | #define XFS_DQ_IS_DIRTY(dqp) ((dqp)->dq_flags & XFS_DQ_DIRTY) |
144 | #define XFS_QM_ISUDQ(dqp) ((dqp)->dq_flags & XFS_DQ_USER) | 159 | #define XFS_QM_ISUDQ(dqp) ((dqp)->dq_flags & XFS_DQ_USER) |
diff --git a/fs/xfs/xfs_error.c b/fs/xfs/xfs_error.c index edac5b057d28..b92fd7bc49e3 100644 --- a/fs/xfs/xfs_error.c +++ b/fs/xfs/xfs_error.c | |||
@@ -27,29 +27,6 @@ | |||
27 | 27 | ||
28 | #ifdef DEBUG | 28 | #ifdef DEBUG |
29 | 29 | ||
30 | int xfs_etrap[XFS_ERROR_NTRAP] = { | ||
31 | 0, | ||
32 | }; | ||
33 | |||
34 | int | ||
35 | xfs_error_trap(int e) | ||
36 | { | ||
37 | int i; | ||
38 | |||
39 | if (!e) | ||
40 | return 0; | ||
41 | for (i = 0; i < XFS_ERROR_NTRAP; i++) { | ||
42 | if (xfs_etrap[i] == 0) | ||
43 | break; | ||
44 | if (e != xfs_etrap[i]) | ||
45 | continue; | ||
46 | xfs_notice(NULL, "%s: error %d", __func__, e); | ||
47 | BUG(); | ||
48 | break; | ||
49 | } | ||
50 | return e; | ||
51 | } | ||
52 | |||
53 | int xfs_etest[XFS_NUM_INJECT_ERROR]; | 30 | int xfs_etest[XFS_NUM_INJECT_ERROR]; |
54 | int64_t xfs_etest_fsid[XFS_NUM_INJECT_ERROR]; | 31 | int64_t xfs_etest_fsid[XFS_NUM_INJECT_ERROR]; |
55 | char * xfs_etest_fsname[XFS_NUM_INJECT_ERROR]; | 32 | char * xfs_etest_fsname[XFS_NUM_INJECT_ERROR]; |
@@ -190,7 +167,7 @@ xfs_verifier_error( | |||
190 | struct xfs_mount *mp = bp->b_target->bt_mount; | 167 | struct xfs_mount *mp = bp->b_target->bt_mount; |
191 | 168 | ||
192 | xfs_alert(mp, "Metadata %s detected at %pF, block 0x%llx", | 169 | xfs_alert(mp, "Metadata %s detected at %pF, block 0x%llx", |
193 | bp->b_error == EFSBADCRC ? "CRC error" : "corruption", | 170 | bp->b_error == -EFSBADCRC ? "CRC error" : "corruption", |
194 | __return_address, bp->b_bn); | 171 | __return_address, bp->b_bn); |
195 | 172 | ||
196 | xfs_alert(mp, "Unmount and run xfs_repair"); | 173 | xfs_alert(mp, "Unmount and run xfs_repair"); |
diff --git a/fs/xfs/xfs_error.h b/fs/xfs/xfs_error.h index c1c57d4a4b5d..279a76e52791 100644 --- a/fs/xfs/xfs_error.h +++ b/fs/xfs/xfs_error.h | |||
@@ -18,15 +18,6 @@ | |||
18 | #ifndef __XFS_ERROR_H__ | 18 | #ifndef __XFS_ERROR_H__ |
19 | #define __XFS_ERROR_H__ | 19 | #define __XFS_ERROR_H__ |
20 | 20 | ||
21 | #ifdef DEBUG | ||
22 | #define XFS_ERROR_NTRAP 10 | ||
23 | extern int xfs_etrap[XFS_ERROR_NTRAP]; | ||
24 | extern int xfs_error_trap(int); | ||
25 | #define XFS_ERROR(e) xfs_error_trap(e) | ||
26 | #else | ||
27 | #define XFS_ERROR(e) (e) | ||
28 | #endif | ||
29 | |||
30 | struct xfs_mount; | 21 | struct xfs_mount; |
31 | 22 | ||
32 | extern void xfs_error_report(const char *tag, int level, struct xfs_mount *mp, | 23 | extern void xfs_error_report(const char *tag, int level, struct xfs_mount *mp, |
@@ -56,7 +47,7 @@ extern void xfs_verifier_error(struct xfs_buf *bp); | |||
56 | if (unlikely(!fs_is_ok)) { \ | 47 | if (unlikely(!fs_is_ok)) { \ |
57 | XFS_ERROR_REPORT("XFS_WANT_CORRUPTED_GOTO", \ | 48 | XFS_ERROR_REPORT("XFS_WANT_CORRUPTED_GOTO", \ |
58 | XFS_ERRLEVEL_LOW, NULL); \ | 49 | XFS_ERRLEVEL_LOW, NULL); \ |
59 | error = XFS_ERROR(EFSCORRUPTED); \ | 50 | error = -EFSCORRUPTED; \ |
60 | goto l; \ | 51 | goto l; \ |
61 | } \ | 52 | } \ |
62 | } | 53 | } |
@@ -68,7 +59,7 @@ extern void xfs_verifier_error(struct xfs_buf *bp); | |||
68 | if (unlikely(!fs_is_ok)) { \ | 59 | if (unlikely(!fs_is_ok)) { \ |
69 | XFS_ERROR_REPORT("XFS_WANT_CORRUPTED_RETURN", \ | 60 | XFS_ERROR_REPORT("XFS_WANT_CORRUPTED_RETURN", \ |
70 | XFS_ERRLEVEL_LOW, NULL); \ | 61 | XFS_ERRLEVEL_LOW, NULL); \ |
71 | return XFS_ERROR(EFSCORRUPTED); \ | 62 | return -EFSCORRUPTED; \ |
72 | } \ | 63 | } \ |
73 | } | 64 | } |
74 | 65 | ||
diff --git a/fs/xfs/xfs_export.c b/fs/xfs/xfs_export.c index 753e467aa1a5..5a6bd5d8779a 100644 --- a/fs/xfs/xfs_export.c +++ b/fs/xfs/xfs_export.c | |||
@@ -147,9 +147,9 @@ xfs_nfs_get_inode( | |||
147 | * We don't use ESTALE directly down the chain to not | 147 | * We don't use ESTALE directly down the chain to not |
148 | * confuse applications using bulkstat that expect EINVAL. | 148 | * confuse applications using bulkstat that expect EINVAL. |
149 | */ | 149 | */ |
150 | if (error == EINVAL || error == ENOENT) | 150 | if (error == -EINVAL || error == -ENOENT) |
151 | error = ESTALE; | 151 | error = -ESTALE; |
152 | return ERR_PTR(-error); | 152 | return ERR_PTR(error); |
153 | } | 153 | } |
154 | 154 | ||
155 | if (ip->i_d.di_gen != generation) { | 155 | if (ip->i_d.di_gen != generation) { |
@@ -217,7 +217,7 @@ xfs_fs_get_parent( | |||
217 | 217 | ||
218 | error = xfs_lookup(XFS_I(child->d_inode), &xfs_name_dotdot, &cip, NULL); | 218 | error = xfs_lookup(XFS_I(child->d_inode), &xfs_name_dotdot, &cip, NULL); |
219 | if (unlikely(error)) | 219 | if (unlikely(error)) |
220 | return ERR_PTR(-error); | 220 | return ERR_PTR(error); |
221 | 221 | ||
222 | return d_obtain_alias(VFS_I(cip)); | 222 | return d_obtain_alias(VFS_I(cip)); |
223 | } | 223 | } |
@@ -237,7 +237,7 @@ xfs_fs_nfs_commit_metadata( | |||
237 | 237 | ||
238 | if (!lsn) | 238 | if (!lsn) |
239 | return 0; | 239 | return 0; |
240 | return -_xfs_log_force_lsn(mp, lsn, XFS_LOG_SYNC, NULL); | 240 | return _xfs_log_force_lsn(mp, lsn, XFS_LOG_SYNC, NULL); |
241 | } | 241 | } |
242 | 242 | ||
243 | const struct export_operations xfs_export_operations = { | 243 | const struct export_operations xfs_export_operations = { |
diff --git a/fs/xfs/xfs_extfree_item.c b/fs/xfs/xfs_extfree_item.c index fb7a4c1ce1c5..c4327419dc5c 100644 --- a/fs/xfs/xfs_extfree_item.c +++ b/fs/xfs/xfs_extfree_item.c | |||
@@ -298,7 +298,7 @@ xfs_efi_copy_format(xfs_log_iovec_t *buf, xfs_efi_log_format_t *dst_efi_fmt) | |||
298 | } | 298 | } |
299 | return 0; | 299 | return 0; |
300 | } | 300 | } |
301 | return EFSCORRUPTED; | 301 | return -EFSCORRUPTED; |
302 | } | 302 | } |
303 | 303 | ||
304 | /* | 304 | /* |
diff --git a/fs/xfs/xfs_file.c b/fs/xfs/xfs_file.c index 1f66779d7a46..076b1708d134 100644 --- a/fs/xfs/xfs_file.c +++ b/fs/xfs/xfs_file.c | |||
@@ -38,6 +38,7 @@ | |||
38 | #include "xfs_trace.h" | 38 | #include "xfs_trace.h" |
39 | #include "xfs_log.h" | 39 | #include "xfs_log.h" |
40 | #include "xfs_dinode.h" | 40 | #include "xfs_dinode.h" |
41 | #include "xfs_icache.h" | ||
41 | 42 | ||
42 | #include <linux/aio.h> | 43 | #include <linux/aio.h> |
43 | #include <linux/dcache.h> | 44 | #include <linux/dcache.h> |
@@ -155,7 +156,7 @@ xfs_dir_fsync( | |||
155 | 156 | ||
156 | if (!lsn) | 157 | if (!lsn) |
157 | return 0; | 158 | return 0; |
158 | return -_xfs_log_force_lsn(mp, lsn, XFS_LOG_SYNC, NULL); | 159 | return _xfs_log_force_lsn(mp, lsn, XFS_LOG_SYNC, NULL); |
159 | } | 160 | } |
160 | 161 | ||
161 | STATIC int | 162 | STATIC int |
@@ -179,7 +180,7 @@ xfs_file_fsync( | |||
179 | return error; | 180 | return error; |
180 | 181 | ||
181 | if (XFS_FORCED_SHUTDOWN(mp)) | 182 | if (XFS_FORCED_SHUTDOWN(mp)) |
182 | return -XFS_ERROR(EIO); | 183 | return -EIO; |
183 | 184 | ||
184 | xfs_iflags_clear(ip, XFS_ITRUNCATED); | 185 | xfs_iflags_clear(ip, XFS_ITRUNCATED); |
185 | 186 | ||
@@ -225,7 +226,7 @@ xfs_file_fsync( | |||
225 | !log_flushed) | 226 | !log_flushed) |
226 | xfs_blkdev_issue_flush(mp->m_ddev_targp); | 227 | xfs_blkdev_issue_flush(mp->m_ddev_targp); |
227 | 228 | ||
228 | return -error; | 229 | return error; |
229 | } | 230 | } |
230 | 231 | ||
231 | STATIC ssize_t | 232 | STATIC ssize_t |
@@ -246,11 +247,11 @@ xfs_file_read_iter( | |||
246 | XFS_STATS_INC(xs_read_calls); | 247 | XFS_STATS_INC(xs_read_calls); |
247 | 248 | ||
248 | if (unlikely(file->f_flags & O_DIRECT)) | 249 | if (unlikely(file->f_flags & O_DIRECT)) |
249 | ioflags |= IO_ISDIRECT; | 250 | ioflags |= XFS_IO_ISDIRECT; |
250 | if (file->f_mode & FMODE_NOCMTIME) | 251 | if (file->f_mode & FMODE_NOCMTIME) |
251 | ioflags |= IO_INVIS; | 252 | ioflags |= XFS_IO_INVIS; |
252 | 253 | ||
253 | if (unlikely(ioflags & IO_ISDIRECT)) { | 254 | if (unlikely(ioflags & XFS_IO_ISDIRECT)) { |
254 | xfs_buftarg_t *target = | 255 | xfs_buftarg_t *target = |
255 | XFS_IS_REALTIME_INODE(ip) ? | 256 | XFS_IS_REALTIME_INODE(ip) ? |
256 | mp->m_rtdev_targp : mp->m_ddev_targp; | 257 | mp->m_rtdev_targp : mp->m_ddev_targp; |
@@ -258,7 +259,7 @@ xfs_file_read_iter( | |||
258 | if ((pos | size) & target->bt_logical_sectormask) { | 259 | if ((pos | size) & target->bt_logical_sectormask) { |
259 | if (pos == i_size_read(inode)) | 260 | if (pos == i_size_read(inode)) |
260 | return 0; | 261 | return 0; |
261 | return -XFS_ERROR(EINVAL); | 262 | return -EINVAL; |
262 | } | 263 | } |
263 | } | 264 | } |
264 | 265 | ||
@@ -283,7 +284,7 @@ xfs_file_read_iter( | |||
283 | * proceeed concurrently without serialisation. | 284 | * proceeed concurrently without serialisation. |
284 | */ | 285 | */ |
285 | xfs_rw_ilock(ip, XFS_IOLOCK_SHARED); | 286 | xfs_rw_ilock(ip, XFS_IOLOCK_SHARED); |
286 | if ((ioflags & IO_ISDIRECT) && inode->i_mapping->nrpages) { | 287 | if ((ioflags & XFS_IO_ISDIRECT) && inode->i_mapping->nrpages) { |
287 | xfs_rw_iunlock(ip, XFS_IOLOCK_SHARED); | 288 | xfs_rw_iunlock(ip, XFS_IOLOCK_SHARED); |
288 | xfs_rw_ilock(ip, XFS_IOLOCK_EXCL); | 289 | xfs_rw_ilock(ip, XFS_IOLOCK_EXCL); |
289 | 290 | ||
@@ -325,7 +326,7 @@ xfs_file_splice_read( | |||
325 | XFS_STATS_INC(xs_read_calls); | 326 | XFS_STATS_INC(xs_read_calls); |
326 | 327 | ||
327 | if (infilp->f_mode & FMODE_NOCMTIME) | 328 | if (infilp->f_mode & FMODE_NOCMTIME) |
328 | ioflags |= IO_INVIS; | 329 | ioflags |= XFS_IO_INVIS; |
329 | 330 | ||
330 | if (XFS_FORCED_SHUTDOWN(ip->i_mount)) | 331 | if (XFS_FORCED_SHUTDOWN(ip->i_mount)) |
331 | return -EIO; | 332 | return -EIO; |
@@ -524,7 +525,7 @@ restart: | |||
524 | xfs_rw_ilock(ip, *iolock); | 525 | xfs_rw_ilock(ip, *iolock); |
525 | goto restart; | 526 | goto restart; |
526 | } | 527 | } |
527 | error = -xfs_zero_eof(ip, *pos, i_size_read(inode)); | 528 | error = xfs_zero_eof(ip, *pos, i_size_read(inode)); |
528 | if (error) | 529 | if (error) |
529 | return error; | 530 | return error; |
530 | } | 531 | } |
@@ -594,7 +595,7 @@ xfs_file_dio_aio_write( | |||
594 | 595 | ||
595 | /* DIO must be aligned to device logical sector size */ | 596 | /* DIO must be aligned to device logical sector size */ |
596 | if ((pos | count) & target->bt_logical_sectormask) | 597 | if ((pos | count) & target->bt_logical_sectormask) |
597 | return -XFS_ERROR(EINVAL); | 598 | return -EINVAL; |
598 | 599 | ||
599 | /* "unaligned" here means not aligned to a filesystem block */ | 600 | /* "unaligned" here means not aligned to a filesystem block */ |
600 | if ((pos & mp->m_blockmask) || ((pos + count) & mp->m_blockmask)) | 601 | if ((pos & mp->m_blockmask) || ((pos + count) & mp->m_blockmask)) |
@@ -689,14 +690,28 @@ write_retry: | |||
689 | ret = generic_perform_write(file, from, pos); | 690 | ret = generic_perform_write(file, from, pos); |
690 | if (likely(ret >= 0)) | 691 | if (likely(ret >= 0)) |
691 | iocb->ki_pos = pos + ret; | 692 | iocb->ki_pos = pos + ret; |
693 | |||
692 | /* | 694 | /* |
693 | * If we just got an ENOSPC, try to write back all dirty inodes to | 695 | * If we hit a space limit, try to free up some lingering preallocated |
694 | * convert delalloc space to free up some of the excess reserved | 696 | * space before returning an error. In the case of ENOSPC, first try to |
695 | * metadata space. | 697 | * write back all dirty inodes to free up some of the excess reserved |
698 | * metadata space. This reduces the chances that the eofblocks scan | ||
699 | * waits on dirty mappings. Since xfs_flush_inodes() is serialized, this | ||
700 | * also behaves as a filter to prevent too many eofblocks scans from | ||
701 | * running at the same time. | ||
696 | */ | 702 | */ |
697 | if (ret == -ENOSPC && !enospc) { | 703 | if (ret == -EDQUOT && !enospc) { |
704 | enospc = xfs_inode_free_quota_eofblocks(ip); | ||
705 | if (enospc) | ||
706 | goto write_retry; | ||
707 | } else if (ret == -ENOSPC && !enospc) { | ||
708 | struct xfs_eofblocks eofb = {0}; | ||
709 | |||
698 | enospc = 1; | 710 | enospc = 1; |
699 | xfs_flush_inodes(ip->i_mount); | 711 | xfs_flush_inodes(ip->i_mount); |
712 | eofb.eof_scan_owner = ip->i_ino; /* for locking */ | ||
713 | eofb.eof_flags = XFS_EOF_FLAGS_SYNC; | ||
714 | xfs_icache_free_eofblocks(ip->i_mount, &eofb); | ||
700 | goto write_retry; | 715 | goto write_retry; |
701 | } | 716 | } |
702 | 717 | ||
@@ -772,7 +787,7 @@ xfs_file_fallocate( | |||
772 | unsigned blksize_mask = (1 << inode->i_blkbits) - 1; | 787 | unsigned blksize_mask = (1 << inode->i_blkbits) - 1; |
773 | 788 | ||
774 | if (offset & blksize_mask || len & blksize_mask) { | 789 | if (offset & blksize_mask || len & blksize_mask) { |
775 | error = EINVAL; | 790 | error = -EINVAL; |
776 | goto out_unlock; | 791 | goto out_unlock; |
777 | } | 792 | } |
778 | 793 | ||
@@ -781,7 +796,7 @@ xfs_file_fallocate( | |||
781 | * in which case it is effectively a truncate operation | 796 | * in which case it is effectively a truncate operation |
782 | */ | 797 | */ |
783 | if (offset + len >= i_size_read(inode)) { | 798 | if (offset + len >= i_size_read(inode)) { |
784 | error = EINVAL; | 799 | error = -EINVAL; |
785 | goto out_unlock; | 800 | goto out_unlock; |
786 | } | 801 | } |
787 | 802 | ||
@@ -794,7 +809,7 @@ xfs_file_fallocate( | |||
794 | if (!(mode & FALLOC_FL_KEEP_SIZE) && | 809 | if (!(mode & FALLOC_FL_KEEP_SIZE) && |
795 | offset + len > i_size_read(inode)) { | 810 | offset + len > i_size_read(inode)) { |
796 | new_size = offset + len; | 811 | new_size = offset + len; |
797 | error = -inode_newsize_ok(inode, new_size); | 812 | error = inode_newsize_ok(inode, new_size); |
798 | if (error) | 813 | if (error) |
799 | goto out_unlock; | 814 | goto out_unlock; |
800 | } | 815 | } |
@@ -844,7 +859,7 @@ xfs_file_fallocate( | |||
844 | 859 | ||
845 | out_unlock: | 860 | out_unlock: |
846 | xfs_iunlock(ip, XFS_IOLOCK_EXCL); | 861 | xfs_iunlock(ip, XFS_IOLOCK_EXCL); |
847 | return -error; | 862 | return error; |
848 | } | 863 | } |
849 | 864 | ||
850 | 865 | ||
@@ -889,7 +904,7 @@ xfs_file_release( | |||
889 | struct inode *inode, | 904 | struct inode *inode, |
890 | struct file *filp) | 905 | struct file *filp) |
891 | { | 906 | { |
892 | return -xfs_release(XFS_I(inode)); | 907 | return xfs_release(XFS_I(inode)); |
893 | } | 908 | } |
894 | 909 | ||
895 | STATIC int | 910 | STATIC int |
@@ -918,7 +933,7 @@ xfs_file_readdir( | |||
918 | 933 | ||
919 | error = xfs_readdir(ip, ctx, bufsize); | 934 | error = xfs_readdir(ip, ctx, bufsize); |
920 | if (error) | 935 | if (error) |
921 | return -error; | 936 | return error; |
922 | return 0; | 937 | return 0; |
923 | } | 938 | } |
924 | 939 | ||
@@ -1184,7 +1199,7 @@ xfs_seek_data( | |||
1184 | 1199 | ||
1185 | isize = i_size_read(inode); | 1200 | isize = i_size_read(inode); |
1186 | if (start >= isize) { | 1201 | if (start >= isize) { |
1187 | error = ENXIO; | 1202 | error = -ENXIO; |
1188 | goto out_unlock; | 1203 | goto out_unlock; |
1189 | } | 1204 | } |
1190 | 1205 | ||
@@ -1206,7 +1221,7 @@ xfs_seek_data( | |||
1206 | 1221 | ||
1207 | /* No extents at given offset, must be beyond EOF */ | 1222 | /* No extents at given offset, must be beyond EOF */ |
1208 | if (nmap == 0) { | 1223 | if (nmap == 0) { |
1209 | error = ENXIO; | 1224 | error = -ENXIO; |
1210 | goto out_unlock; | 1225 | goto out_unlock; |
1211 | } | 1226 | } |
1212 | 1227 | ||
@@ -1237,7 +1252,7 @@ xfs_seek_data( | |||
1237 | * we are reading after EOF if nothing in map[1]. | 1252 | * we are reading after EOF if nothing in map[1]. |
1238 | */ | 1253 | */ |
1239 | if (nmap == 1) { | 1254 | if (nmap == 1) { |
1240 | error = ENXIO; | 1255 | error = -ENXIO; |
1241 | goto out_unlock; | 1256 | goto out_unlock; |
1242 | } | 1257 | } |
1243 | 1258 | ||
@@ -1250,7 +1265,7 @@ xfs_seek_data( | |||
1250 | fsbno = map[i - 1].br_startoff + map[i - 1].br_blockcount; | 1265 | fsbno = map[i - 1].br_startoff + map[i - 1].br_blockcount; |
1251 | start = XFS_FSB_TO_B(mp, fsbno); | 1266 | start = XFS_FSB_TO_B(mp, fsbno); |
1252 | if (start >= isize) { | 1267 | if (start >= isize) { |
1253 | error = ENXIO; | 1268 | error = -ENXIO; |
1254 | goto out_unlock; | 1269 | goto out_unlock; |
1255 | } | 1270 | } |
1256 | } | 1271 | } |
@@ -1262,7 +1277,7 @@ out_unlock: | |||
1262 | xfs_iunlock(ip, lock); | 1277 | xfs_iunlock(ip, lock); |
1263 | 1278 | ||
1264 | if (error) | 1279 | if (error) |
1265 | return -error; | 1280 | return error; |
1266 | return offset; | 1281 | return offset; |
1267 | } | 1282 | } |
1268 | 1283 | ||
@@ -1282,13 +1297,13 @@ xfs_seek_hole( | |||
1282 | int error; | 1297 | int error; |
1283 | 1298 | ||
1284 | if (XFS_FORCED_SHUTDOWN(mp)) | 1299 | if (XFS_FORCED_SHUTDOWN(mp)) |
1285 | return -XFS_ERROR(EIO); | 1300 | return -EIO; |
1286 | 1301 | ||
1287 | lock = xfs_ilock_data_map_shared(ip); | 1302 | lock = xfs_ilock_data_map_shared(ip); |
1288 | 1303 | ||
1289 | isize = i_size_read(inode); | 1304 | isize = i_size_read(inode); |
1290 | if (start >= isize) { | 1305 | if (start >= isize) { |
1291 | error = ENXIO; | 1306 | error = -ENXIO; |
1292 | goto out_unlock; | 1307 | goto out_unlock; |
1293 | } | 1308 | } |
1294 | 1309 | ||
@@ -1307,7 +1322,7 @@ xfs_seek_hole( | |||
1307 | 1322 | ||
1308 | /* No extents at given offset, must be beyond EOF */ | 1323 | /* No extents at given offset, must be beyond EOF */ |
1309 | if (nmap == 0) { | 1324 | if (nmap == 0) { |
1310 | error = ENXIO; | 1325 | error = -ENXIO; |
1311 | goto out_unlock; | 1326 | goto out_unlock; |
1312 | } | 1327 | } |
1313 | 1328 | ||
@@ -1370,7 +1385,7 @@ out_unlock: | |||
1370 | xfs_iunlock(ip, lock); | 1385 | xfs_iunlock(ip, lock); |
1371 | 1386 | ||
1372 | if (error) | 1387 | if (error) |
1373 | return -error; | 1388 | return error; |
1374 | return offset; | 1389 | return offset; |
1375 | } | 1390 | } |
1376 | 1391 | ||
diff --git a/fs/xfs/xfs_filestream.c b/fs/xfs/xfs_filestream.c index 8ec81bed7992..e92730c1d3ca 100644 --- a/fs/xfs/xfs_filestream.c +++ b/fs/xfs/xfs_filestream.c | |||
@@ -258,7 +258,7 @@ next_ag: | |||
258 | if (*agp == NULLAGNUMBER) | 258 | if (*agp == NULLAGNUMBER) |
259 | return 0; | 259 | return 0; |
260 | 260 | ||
261 | err = ENOMEM; | 261 | err = -ENOMEM; |
262 | item = kmem_alloc(sizeof(*item), KM_MAYFAIL); | 262 | item = kmem_alloc(sizeof(*item), KM_MAYFAIL); |
263 | if (!item) | 263 | if (!item) |
264 | goto out_put_ag; | 264 | goto out_put_ag; |
@@ -268,7 +268,7 @@ next_ag: | |||
268 | 268 | ||
269 | err = xfs_mru_cache_insert(mp->m_filestream, ip->i_ino, &item->mru); | 269 | err = xfs_mru_cache_insert(mp->m_filestream, ip->i_ino, &item->mru); |
270 | if (err) { | 270 | if (err) { |
271 | if (err == EEXIST) | 271 | if (err == -EEXIST) |
272 | err = 0; | 272 | err = 0; |
273 | goto out_free_item; | 273 | goto out_free_item; |
274 | } | 274 | } |
diff --git a/fs/xfs/xfs_fs.h b/fs/xfs/xfs_fs.h index d34703dbcb42..18dc721ca19f 100644 --- a/fs/xfs/xfs_fs.h +++ b/fs/xfs/xfs_fs.h | |||
@@ -255,8 +255,8 @@ typedef struct xfs_fsop_resblks { | |||
255 | ((2 * 1024 * 1024 * 1024ULL) - XFS_MIN_LOG_BYTES) | 255 | ((2 * 1024 * 1024 * 1024ULL) - XFS_MIN_LOG_BYTES) |
256 | 256 | ||
257 | /* Used for sanity checks on superblock */ | 257 | /* Used for sanity checks on superblock */ |
258 | #define XFS_MAX_DBLOCKS(s) ((xfs_drfsbno_t)(s)->sb_agcount * (s)->sb_agblocks) | 258 | #define XFS_MAX_DBLOCKS(s) ((xfs_rfsblock_t)(s)->sb_agcount * (s)->sb_agblocks) |
259 | #define XFS_MIN_DBLOCKS(s) ((xfs_drfsbno_t)((s)->sb_agcount - 1) * \ | 259 | #define XFS_MIN_DBLOCKS(s) ((xfs_rfsblock_t)((s)->sb_agcount - 1) * \ |
260 | (s)->sb_agblocks + XFS_MIN_AG_BLOCKS) | 260 | (s)->sb_agblocks + XFS_MIN_AG_BLOCKS) |
261 | 261 | ||
262 | /* | 262 | /* |
@@ -375,6 +375,9 @@ struct xfs_fs_eofblocks { | |||
375 | #define XFS_EOF_FLAGS_GID (1 << 2) /* filter by gid */ | 375 | #define XFS_EOF_FLAGS_GID (1 << 2) /* filter by gid */ |
376 | #define XFS_EOF_FLAGS_PRID (1 << 3) /* filter by project id */ | 376 | #define XFS_EOF_FLAGS_PRID (1 << 3) /* filter by project id */ |
377 | #define XFS_EOF_FLAGS_MINFILESIZE (1 << 4) /* filter by min file size */ | 377 | #define XFS_EOF_FLAGS_MINFILESIZE (1 << 4) /* filter by min file size */ |
378 | #define XFS_EOF_FLAGS_UNION (1 << 5) /* union filter algorithm; | ||
379 | * kernel only, not included in | ||
380 | * valid mask */ | ||
378 | #define XFS_EOF_FLAGS_VALID \ | 381 | #define XFS_EOF_FLAGS_VALID \ |
379 | (XFS_EOF_FLAGS_SYNC | \ | 382 | (XFS_EOF_FLAGS_SYNC | \ |
380 | XFS_EOF_FLAGS_UID | \ | 383 | XFS_EOF_FLAGS_UID | \ |
diff --git a/fs/xfs/xfs_fsops.c b/fs/xfs/xfs_fsops.c index d2295561570a..f91de1ef05e1 100644 --- a/fs/xfs/xfs_fsops.c +++ b/fs/xfs/xfs_fsops.c | |||
@@ -168,7 +168,7 @@ xfs_growfs_data_private( | |||
168 | nb = in->newblocks; | 168 | nb = in->newblocks; |
169 | pct = in->imaxpct; | 169 | pct = in->imaxpct; |
170 | if (nb < mp->m_sb.sb_dblocks || pct < 0 || pct > 100) | 170 | if (nb < mp->m_sb.sb_dblocks || pct < 0 || pct > 100) |
171 | return XFS_ERROR(EINVAL); | 171 | return -EINVAL; |
172 | if ((error = xfs_sb_validate_fsb_count(&mp->m_sb, nb))) | 172 | if ((error = xfs_sb_validate_fsb_count(&mp->m_sb, nb))) |
173 | return error; | 173 | return error; |
174 | dpct = pct - mp->m_sb.sb_imax_pct; | 174 | dpct = pct - mp->m_sb.sb_imax_pct; |
@@ -176,7 +176,7 @@ xfs_growfs_data_private( | |||
176 | XFS_FSB_TO_BB(mp, nb) - XFS_FSS_TO_BB(mp, 1), | 176 | XFS_FSB_TO_BB(mp, nb) - XFS_FSS_TO_BB(mp, 1), |
177 | XFS_FSS_TO_BB(mp, 1), 0, NULL); | 177 | XFS_FSS_TO_BB(mp, 1), 0, NULL); |
178 | if (!bp) | 178 | if (!bp) |
179 | return EIO; | 179 | return -EIO; |
180 | if (bp->b_error) { | 180 | if (bp->b_error) { |
181 | error = bp->b_error; | 181 | error = bp->b_error; |
182 | xfs_buf_relse(bp); | 182 | xfs_buf_relse(bp); |
@@ -191,7 +191,7 @@ xfs_growfs_data_private( | |||
191 | nagcount--; | 191 | nagcount--; |
192 | nb = (xfs_rfsblock_t)nagcount * mp->m_sb.sb_agblocks; | 192 | nb = (xfs_rfsblock_t)nagcount * mp->m_sb.sb_agblocks; |
193 | if (nb < mp->m_sb.sb_dblocks) | 193 | if (nb < mp->m_sb.sb_dblocks) |
194 | return XFS_ERROR(EINVAL); | 194 | return -EINVAL; |
195 | } | 195 | } |
196 | new = nb - mp->m_sb.sb_dblocks; | 196 | new = nb - mp->m_sb.sb_dblocks; |
197 | oagcount = mp->m_sb.sb_agcount; | 197 | oagcount = mp->m_sb.sb_agcount; |
@@ -229,7 +229,7 @@ xfs_growfs_data_private( | |||
229 | XFS_FSS_TO_BB(mp, 1), 0, | 229 | XFS_FSS_TO_BB(mp, 1), 0, |
230 | &xfs_agf_buf_ops); | 230 | &xfs_agf_buf_ops); |
231 | if (!bp) { | 231 | if (!bp) { |
232 | error = ENOMEM; | 232 | error = -ENOMEM; |
233 | goto error0; | 233 | goto error0; |
234 | } | 234 | } |
235 | 235 | ||
@@ -270,7 +270,7 @@ xfs_growfs_data_private( | |||
270 | XFS_FSS_TO_BB(mp, 1), 0, | 270 | XFS_FSS_TO_BB(mp, 1), 0, |
271 | &xfs_agfl_buf_ops); | 271 | &xfs_agfl_buf_ops); |
272 | if (!bp) { | 272 | if (!bp) { |
273 | error = ENOMEM; | 273 | error = -ENOMEM; |
274 | goto error0; | 274 | goto error0; |
275 | } | 275 | } |
276 | 276 | ||
@@ -298,7 +298,7 @@ xfs_growfs_data_private( | |||
298 | XFS_FSS_TO_BB(mp, 1), 0, | 298 | XFS_FSS_TO_BB(mp, 1), 0, |
299 | &xfs_agi_buf_ops); | 299 | &xfs_agi_buf_ops); |
300 | if (!bp) { | 300 | if (!bp) { |
301 | error = ENOMEM; | 301 | error = -ENOMEM; |
302 | goto error0; | 302 | goto error0; |
303 | } | 303 | } |
304 | 304 | ||
@@ -336,7 +336,7 @@ xfs_growfs_data_private( | |||
336 | &xfs_allocbt_buf_ops); | 336 | &xfs_allocbt_buf_ops); |
337 | 337 | ||
338 | if (!bp) { | 338 | if (!bp) { |
339 | error = ENOMEM; | 339 | error = -ENOMEM; |
340 | goto error0; | 340 | goto error0; |
341 | } | 341 | } |
342 | 342 | ||
@@ -365,7 +365,7 @@ xfs_growfs_data_private( | |||
365 | BTOBB(mp->m_sb.sb_blocksize), 0, | 365 | BTOBB(mp->m_sb.sb_blocksize), 0, |
366 | &xfs_allocbt_buf_ops); | 366 | &xfs_allocbt_buf_ops); |
367 | if (!bp) { | 367 | if (!bp) { |
368 | error = ENOMEM; | 368 | error = -ENOMEM; |
369 | goto error0; | 369 | goto error0; |
370 | } | 370 | } |
371 | 371 | ||
@@ -395,7 +395,7 @@ xfs_growfs_data_private( | |||
395 | BTOBB(mp->m_sb.sb_blocksize), 0, | 395 | BTOBB(mp->m_sb.sb_blocksize), 0, |
396 | &xfs_inobt_buf_ops); | 396 | &xfs_inobt_buf_ops); |
397 | if (!bp) { | 397 | if (!bp) { |
398 | error = ENOMEM; | 398 | error = -ENOMEM; |
399 | goto error0; | 399 | goto error0; |
400 | } | 400 | } |
401 | 401 | ||
@@ -420,7 +420,7 @@ xfs_growfs_data_private( | |||
420 | BTOBB(mp->m_sb.sb_blocksize), 0, | 420 | BTOBB(mp->m_sb.sb_blocksize), 0, |
421 | &xfs_inobt_buf_ops); | 421 | &xfs_inobt_buf_ops); |
422 | if (!bp) { | 422 | if (!bp) { |
423 | error = ENOMEM; | 423 | error = -ENOMEM; |
424 | goto error0; | 424 | goto error0; |
425 | } | 425 | } |
426 | 426 | ||
@@ -531,7 +531,7 @@ xfs_growfs_data_private( | |||
531 | bp->b_ops = &xfs_sb_buf_ops; | 531 | bp->b_ops = &xfs_sb_buf_ops; |
532 | xfs_buf_zero(bp, 0, BBTOB(bp->b_length)); | 532 | xfs_buf_zero(bp, 0, BBTOB(bp->b_length)); |
533 | } else | 533 | } else |
534 | error = ENOMEM; | 534 | error = -ENOMEM; |
535 | } | 535 | } |
536 | 536 | ||
537 | /* | 537 | /* |
@@ -576,17 +576,17 @@ xfs_growfs_log_private( | |||
576 | 576 | ||
577 | nb = in->newblocks; | 577 | nb = in->newblocks; |
578 | if (nb < XFS_MIN_LOG_BLOCKS || nb < XFS_B_TO_FSB(mp, XFS_MIN_LOG_BYTES)) | 578 | if (nb < XFS_MIN_LOG_BLOCKS || nb < XFS_B_TO_FSB(mp, XFS_MIN_LOG_BYTES)) |
579 | return XFS_ERROR(EINVAL); | 579 | return -EINVAL; |
580 | if (nb == mp->m_sb.sb_logblocks && | 580 | if (nb == mp->m_sb.sb_logblocks && |
581 | in->isint == (mp->m_sb.sb_logstart != 0)) | 581 | in->isint == (mp->m_sb.sb_logstart != 0)) |
582 | return XFS_ERROR(EINVAL); | 582 | return -EINVAL; |
583 | /* | 583 | /* |
584 | * Moving the log is hard, need new interfaces to sync | 584 | * Moving the log is hard, need new interfaces to sync |
585 | * the log first, hold off all activity while moving it. | 585 | * the log first, hold off all activity while moving it. |
586 | * Can have shorter or longer log in the same space, | 586 | * Can have shorter or longer log in the same space, |
587 | * or transform internal to external log or vice versa. | 587 | * or transform internal to external log or vice versa. |
588 | */ | 588 | */ |
589 | return XFS_ERROR(ENOSYS); | 589 | return -ENOSYS; |
590 | } | 590 | } |
591 | 591 | ||
592 | /* | 592 | /* |
@@ -604,9 +604,9 @@ xfs_growfs_data( | |||
604 | int error; | 604 | int error; |
605 | 605 | ||
606 | if (!capable(CAP_SYS_ADMIN)) | 606 | if (!capable(CAP_SYS_ADMIN)) |
607 | return XFS_ERROR(EPERM); | 607 | return -EPERM; |
608 | if (!mutex_trylock(&mp->m_growlock)) | 608 | if (!mutex_trylock(&mp->m_growlock)) |
609 | return XFS_ERROR(EWOULDBLOCK); | 609 | return -EWOULDBLOCK; |
610 | error = xfs_growfs_data_private(mp, in); | 610 | error = xfs_growfs_data_private(mp, in); |
611 | mutex_unlock(&mp->m_growlock); | 611 | mutex_unlock(&mp->m_growlock); |
612 | return error; | 612 | return error; |
@@ -620,9 +620,9 @@ xfs_growfs_log( | |||
620 | int error; | 620 | int error; |
621 | 621 | ||
622 | if (!capable(CAP_SYS_ADMIN)) | 622 | if (!capable(CAP_SYS_ADMIN)) |
623 | return XFS_ERROR(EPERM); | 623 | return -EPERM; |
624 | if (!mutex_trylock(&mp->m_growlock)) | 624 | if (!mutex_trylock(&mp->m_growlock)) |
625 | return XFS_ERROR(EWOULDBLOCK); | 625 | return -EWOULDBLOCK; |
626 | error = xfs_growfs_log_private(mp, in); | 626 | error = xfs_growfs_log_private(mp, in); |
627 | mutex_unlock(&mp->m_growlock); | 627 | mutex_unlock(&mp->m_growlock); |
628 | return error; | 628 | return error; |
@@ -674,7 +674,7 @@ xfs_reserve_blocks( | |||
674 | /* If inval is null, report current values and return */ | 674 | /* If inval is null, report current values and return */ |
675 | if (inval == (__uint64_t *)NULL) { | 675 | if (inval == (__uint64_t *)NULL) { |
676 | if (!outval) | 676 | if (!outval) |
677 | return EINVAL; | 677 | return -EINVAL; |
678 | outval->resblks = mp->m_resblks; | 678 | outval->resblks = mp->m_resblks; |
679 | outval->resblks_avail = mp->m_resblks_avail; | 679 | outval->resblks_avail = mp->m_resblks_avail; |
680 | return 0; | 680 | return 0; |
@@ -757,7 +757,7 @@ out: | |||
757 | int error; | 757 | int error; |
758 | error = xfs_icsb_modify_counters(mp, XFS_SBS_FDBLOCKS, | 758 | error = xfs_icsb_modify_counters(mp, XFS_SBS_FDBLOCKS, |
759 | fdblks_delta, 0); | 759 | fdblks_delta, 0); |
760 | if (error == ENOSPC) | 760 | if (error == -ENOSPC) |
761 | goto retry; | 761 | goto retry; |
762 | } | 762 | } |
763 | return 0; | 763 | return 0; |
@@ -818,7 +818,7 @@ xfs_fs_goingdown( | |||
818 | SHUTDOWN_FORCE_UMOUNT | SHUTDOWN_LOG_IO_ERROR); | 818 | SHUTDOWN_FORCE_UMOUNT | SHUTDOWN_LOG_IO_ERROR); |
819 | break; | 819 | break; |
820 | default: | 820 | default: |
821 | return XFS_ERROR(EINVAL); | 821 | return -EINVAL; |
822 | } | 822 | } |
823 | 823 | ||
824 | return 0; | 824 | return 0; |
diff --git a/fs/xfs/xfs_icache.c b/fs/xfs/xfs_icache.c index c48df5f25b9f..981b2cf51985 100644 --- a/fs/xfs/xfs_icache.c +++ b/fs/xfs/xfs_icache.c | |||
@@ -33,6 +33,9 @@ | |||
33 | #include "xfs_trace.h" | 33 | #include "xfs_trace.h" |
34 | #include "xfs_icache.h" | 34 | #include "xfs_icache.h" |
35 | #include "xfs_bmap_util.h" | 35 | #include "xfs_bmap_util.h" |
36 | #include "xfs_quota.h" | ||
37 | #include "xfs_dquot_item.h" | ||
38 | #include "xfs_dquot.h" | ||
36 | 39 | ||
37 | #include <linux/kthread.h> | 40 | #include <linux/kthread.h> |
38 | #include <linux/freezer.h> | 41 | #include <linux/freezer.h> |
@@ -158,7 +161,7 @@ xfs_iget_cache_hit( | |||
158 | if (ip->i_ino != ino) { | 161 | if (ip->i_ino != ino) { |
159 | trace_xfs_iget_skip(ip); | 162 | trace_xfs_iget_skip(ip); |
160 | XFS_STATS_INC(xs_ig_frecycle); | 163 | XFS_STATS_INC(xs_ig_frecycle); |
161 | error = EAGAIN; | 164 | error = -EAGAIN; |
162 | goto out_error; | 165 | goto out_error; |
163 | } | 166 | } |
164 | 167 | ||
@@ -176,7 +179,7 @@ xfs_iget_cache_hit( | |||
176 | if (ip->i_flags & (XFS_INEW|XFS_IRECLAIM)) { | 179 | if (ip->i_flags & (XFS_INEW|XFS_IRECLAIM)) { |
177 | trace_xfs_iget_skip(ip); | 180 | trace_xfs_iget_skip(ip); |
178 | XFS_STATS_INC(xs_ig_frecycle); | 181 | XFS_STATS_INC(xs_ig_frecycle); |
179 | error = EAGAIN; | 182 | error = -EAGAIN; |
180 | goto out_error; | 183 | goto out_error; |
181 | } | 184 | } |
182 | 185 | ||
@@ -184,7 +187,7 @@ xfs_iget_cache_hit( | |||
184 | * If lookup is racing with unlink return an error immediately. | 187 | * If lookup is racing with unlink return an error immediately. |
185 | */ | 188 | */ |
186 | if (ip->i_d.di_mode == 0 && !(flags & XFS_IGET_CREATE)) { | 189 | if (ip->i_d.di_mode == 0 && !(flags & XFS_IGET_CREATE)) { |
187 | error = ENOENT; | 190 | error = -ENOENT; |
188 | goto out_error; | 191 | goto out_error; |
189 | } | 192 | } |
190 | 193 | ||
@@ -206,7 +209,7 @@ xfs_iget_cache_hit( | |||
206 | spin_unlock(&ip->i_flags_lock); | 209 | spin_unlock(&ip->i_flags_lock); |
207 | rcu_read_unlock(); | 210 | rcu_read_unlock(); |
208 | 211 | ||
209 | error = -inode_init_always(mp->m_super, inode); | 212 | error = inode_init_always(mp->m_super, inode); |
210 | if (error) { | 213 | if (error) { |
211 | /* | 214 | /* |
212 | * Re-initializing the inode failed, and we are in deep | 215 | * Re-initializing the inode failed, and we are in deep |
@@ -243,7 +246,7 @@ xfs_iget_cache_hit( | |||
243 | /* If the VFS inode is being torn down, pause and try again. */ | 246 | /* If the VFS inode is being torn down, pause and try again. */ |
244 | if (!igrab(inode)) { | 247 | if (!igrab(inode)) { |
245 | trace_xfs_iget_skip(ip); | 248 | trace_xfs_iget_skip(ip); |
246 | error = EAGAIN; | 249 | error = -EAGAIN; |
247 | goto out_error; | 250 | goto out_error; |
248 | } | 251 | } |
249 | 252 | ||
@@ -285,7 +288,7 @@ xfs_iget_cache_miss( | |||
285 | 288 | ||
286 | ip = xfs_inode_alloc(mp, ino); | 289 | ip = xfs_inode_alloc(mp, ino); |
287 | if (!ip) | 290 | if (!ip) |
288 | return ENOMEM; | 291 | return -ENOMEM; |
289 | 292 | ||
290 | error = xfs_iread(mp, tp, ip, flags); | 293 | error = xfs_iread(mp, tp, ip, flags); |
291 | if (error) | 294 | if (error) |
@@ -294,7 +297,7 @@ xfs_iget_cache_miss( | |||
294 | trace_xfs_iget_miss(ip); | 297 | trace_xfs_iget_miss(ip); |
295 | 298 | ||
296 | if ((ip->i_d.di_mode == 0) && !(flags & XFS_IGET_CREATE)) { | 299 | if ((ip->i_d.di_mode == 0) && !(flags & XFS_IGET_CREATE)) { |
297 | error = ENOENT; | 300 | error = -ENOENT; |
298 | goto out_destroy; | 301 | goto out_destroy; |
299 | } | 302 | } |
300 | 303 | ||
@@ -305,7 +308,7 @@ xfs_iget_cache_miss( | |||
305 | * recurse into the file system. | 308 | * recurse into the file system. |
306 | */ | 309 | */ |
307 | if (radix_tree_preload(GFP_NOFS)) { | 310 | if (radix_tree_preload(GFP_NOFS)) { |
308 | error = EAGAIN; | 311 | error = -EAGAIN; |
309 | goto out_destroy; | 312 | goto out_destroy; |
310 | } | 313 | } |
311 | 314 | ||
@@ -341,7 +344,7 @@ xfs_iget_cache_miss( | |||
341 | if (unlikely(error)) { | 344 | if (unlikely(error)) { |
342 | WARN_ON(error != -EEXIST); | 345 | WARN_ON(error != -EEXIST); |
343 | XFS_STATS_INC(xs_ig_dup); | 346 | XFS_STATS_INC(xs_ig_dup); |
344 | error = EAGAIN; | 347 | error = -EAGAIN; |
345 | goto out_preload_end; | 348 | goto out_preload_end; |
346 | } | 349 | } |
347 | spin_unlock(&pag->pag_ici_lock); | 350 | spin_unlock(&pag->pag_ici_lock); |
@@ -408,7 +411,7 @@ xfs_iget( | |||
408 | 411 | ||
409 | /* reject inode numbers outside existing AGs */ | 412 | /* reject inode numbers outside existing AGs */ |
410 | if (!ino || XFS_INO_TO_AGNO(mp, ino) >= mp->m_sb.sb_agcount) | 413 | if (!ino || XFS_INO_TO_AGNO(mp, ino) >= mp->m_sb.sb_agcount) |
411 | return EINVAL; | 414 | return -EINVAL; |
412 | 415 | ||
413 | /* get the perag structure and ensure that it's inode capable */ | 416 | /* get the perag structure and ensure that it's inode capable */ |
414 | pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, ino)); | 417 | pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, ino)); |
@@ -445,7 +448,7 @@ again: | |||
445 | return 0; | 448 | return 0; |
446 | 449 | ||
447 | out_error_or_again: | 450 | out_error_or_again: |
448 | if (error == EAGAIN) { | 451 | if (error == -EAGAIN) { |
449 | delay(1); | 452 | delay(1); |
450 | goto again; | 453 | goto again; |
451 | } | 454 | } |
@@ -489,18 +492,18 @@ xfs_inode_ag_walk_grab( | |||
489 | 492 | ||
490 | /* nothing to sync during shutdown */ | 493 | /* nothing to sync during shutdown */ |
491 | if (XFS_FORCED_SHUTDOWN(ip->i_mount)) | 494 | if (XFS_FORCED_SHUTDOWN(ip->i_mount)) |
492 | return EFSCORRUPTED; | 495 | return -EFSCORRUPTED; |
493 | 496 | ||
494 | /* If we can't grab the inode, it must on it's way to reclaim. */ | 497 | /* If we can't grab the inode, it must on it's way to reclaim. */ |
495 | if (!igrab(inode)) | 498 | if (!igrab(inode)) |
496 | return ENOENT; | 499 | return -ENOENT; |
497 | 500 | ||
498 | /* inode is valid */ | 501 | /* inode is valid */ |
499 | return 0; | 502 | return 0; |
500 | 503 | ||
501 | out_unlock_noent: | 504 | out_unlock_noent: |
502 | spin_unlock(&ip->i_flags_lock); | 505 | spin_unlock(&ip->i_flags_lock); |
503 | return ENOENT; | 506 | return -ENOENT; |
504 | } | 507 | } |
505 | 508 | ||
506 | STATIC int | 509 | STATIC int |
@@ -583,16 +586,16 @@ restart: | |||
583 | continue; | 586 | continue; |
584 | error = execute(batch[i], flags, args); | 587 | error = execute(batch[i], flags, args); |
585 | IRELE(batch[i]); | 588 | IRELE(batch[i]); |
586 | if (error == EAGAIN) { | 589 | if (error == -EAGAIN) { |
587 | skipped++; | 590 | skipped++; |
588 | continue; | 591 | continue; |
589 | } | 592 | } |
590 | if (error && last_error != EFSCORRUPTED) | 593 | if (error && last_error != -EFSCORRUPTED) |
591 | last_error = error; | 594 | last_error = error; |
592 | } | 595 | } |
593 | 596 | ||
594 | /* bail out if the filesystem is corrupted. */ | 597 | /* bail out if the filesystem is corrupted. */ |
595 | if (error == EFSCORRUPTED) | 598 | if (error == -EFSCORRUPTED) |
596 | break; | 599 | break; |
597 | 600 | ||
598 | cond_resched(); | 601 | cond_resched(); |
@@ -652,11 +655,11 @@ xfs_inode_ag_iterator( | |||
652 | xfs_perag_put(pag); | 655 | xfs_perag_put(pag); |
653 | if (error) { | 656 | if (error) { |
654 | last_error = error; | 657 | last_error = error; |
655 | if (error == EFSCORRUPTED) | 658 | if (error == -EFSCORRUPTED) |
656 | break; | 659 | break; |
657 | } | 660 | } |
658 | } | 661 | } |
659 | return XFS_ERROR(last_error); | 662 | return last_error; |
660 | } | 663 | } |
661 | 664 | ||
662 | int | 665 | int |
@@ -680,11 +683,11 @@ xfs_inode_ag_iterator_tag( | |||
680 | xfs_perag_put(pag); | 683 | xfs_perag_put(pag); |
681 | if (error) { | 684 | if (error) { |
682 | last_error = error; | 685 | last_error = error; |
683 | if (error == EFSCORRUPTED) | 686 | if (error == -EFSCORRUPTED) |
684 | break; | 687 | break; |
685 | } | 688 | } |
686 | } | 689 | } |
687 | return XFS_ERROR(last_error); | 690 | return last_error; |
688 | } | 691 | } |
689 | 692 | ||
690 | /* | 693 | /* |
@@ -944,7 +947,7 @@ restart: | |||
944 | * see the stale flag set on the inode. | 947 | * see the stale flag set on the inode. |
945 | */ | 948 | */ |
946 | error = xfs_iflush(ip, &bp); | 949 | error = xfs_iflush(ip, &bp); |
947 | if (error == EAGAIN) { | 950 | if (error == -EAGAIN) { |
948 | xfs_iunlock(ip, XFS_ILOCK_EXCL); | 951 | xfs_iunlock(ip, XFS_ILOCK_EXCL); |
949 | /* backoff longer than in xfs_ifree_cluster */ | 952 | /* backoff longer than in xfs_ifree_cluster */ |
950 | delay(2); | 953 | delay(2); |
@@ -997,7 +1000,7 @@ out: | |||
997 | xfs_iflags_clear(ip, XFS_IRECLAIM); | 1000 | xfs_iflags_clear(ip, XFS_IRECLAIM); |
998 | xfs_iunlock(ip, XFS_ILOCK_EXCL); | 1001 | xfs_iunlock(ip, XFS_ILOCK_EXCL); |
999 | /* | 1002 | /* |
1000 | * We could return EAGAIN here to make reclaim rescan the inode tree in | 1003 | * We could return -EAGAIN here to make reclaim rescan the inode tree in |
1001 | * a short while. However, this just burns CPU time scanning the tree | 1004 | * a short while. However, this just burns CPU time scanning the tree |
1002 | * waiting for IO to complete and the reclaim work never goes back to | 1005 | * waiting for IO to complete and the reclaim work never goes back to |
1003 | * the idle state. Instead, return 0 to let the next scheduled | 1006 | * the idle state. Instead, return 0 to let the next scheduled |
@@ -1100,7 +1103,7 @@ restart: | |||
1100 | if (!batch[i]) | 1103 | if (!batch[i]) |
1101 | continue; | 1104 | continue; |
1102 | error = xfs_reclaim_inode(batch[i], pag, flags); | 1105 | error = xfs_reclaim_inode(batch[i], pag, flags); |
1103 | if (error && last_error != EFSCORRUPTED) | 1106 | if (error && last_error != -EFSCORRUPTED) |
1104 | last_error = error; | 1107 | last_error = error; |
1105 | } | 1108 | } |
1106 | 1109 | ||
@@ -1129,7 +1132,7 @@ restart: | |||
1129 | trylock = 0; | 1132 | trylock = 0; |
1130 | goto restart; | 1133 | goto restart; |
1131 | } | 1134 | } |
1132 | return XFS_ERROR(last_error); | 1135 | return last_error; |
1133 | } | 1136 | } |
1134 | 1137 | ||
1135 | int | 1138 | int |
@@ -1203,6 +1206,30 @@ xfs_inode_match_id( | |||
1203 | return 1; | 1206 | return 1; |
1204 | } | 1207 | } |
1205 | 1208 | ||
1209 | /* | ||
1210 | * A union-based inode filtering algorithm. Process the inode if any of the | ||
1211 | * criteria match. This is for global/internal scans only. | ||
1212 | */ | ||
1213 | STATIC int | ||
1214 | xfs_inode_match_id_union( | ||
1215 | struct xfs_inode *ip, | ||
1216 | struct xfs_eofblocks *eofb) | ||
1217 | { | ||
1218 | if ((eofb->eof_flags & XFS_EOF_FLAGS_UID) && | ||
1219 | uid_eq(VFS_I(ip)->i_uid, eofb->eof_uid)) | ||
1220 | return 1; | ||
1221 | |||
1222 | if ((eofb->eof_flags & XFS_EOF_FLAGS_GID) && | ||
1223 | gid_eq(VFS_I(ip)->i_gid, eofb->eof_gid)) | ||
1224 | return 1; | ||
1225 | |||
1226 | if ((eofb->eof_flags & XFS_EOF_FLAGS_PRID) && | ||
1227 | xfs_get_projid(ip) == eofb->eof_prid) | ||
1228 | return 1; | ||
1229 | |||
1230 | return 0; | ||
1231 | } | ||
1232 | |||
1206 | STATIC int | 1233 | STATIC int |
1207 | xfs_inode_free_eofblocks( | 1234 | xfs_inode_free_eofblocks( |
1208 | struct xfs_inode *ip, | 1235 | struct xfs_inode *ip, |
@@ -1211,6 +1238,10 @@ xfs_inode_free_eofblocks( | |||
1211 | { | 1238 | { |
1212 | int ret; | 1239 | int ret; |
1213 | struct xfs_eofblocks *eofb = args; | 1240 | struct xfs_eofblocks *eofb = args; |
1241 | bool need_iolock = true; | ||
1242 | int match; | ||
1243 | |||
1244 | ASSERT(!eofb || (eofb && eofb->eof_scan_owner != 0)); | ||
1214 | 1245 | ||
1215 | if (!xfs_can_free_eofblocks(ip, false)) { | 1246 | if (!xfs_can_free_eofblocks(ip, false)) { |
1216 | /* inode could be preallocated or append-only */ | 1247 | /* inode could be preallocated or append-only */ |
@@ -1228,19 +1259,31 @@ xfs_inode_free_eofblocks( | |||
1228 | return 0; | 1259 | return 0; |
1229 | 1260 | ||
1230 | if (eofb) { | 1261 | if (eofb) { |
1231 | if (!xfs_inode_match_id(ip, eofb)) | 1262 | if (eofb->eof_flags & XFS_EOF_FLAGS_UNION) |
1263 | match = xfs_inode_match_id_union(ip, eofb); | ||
1264 | else | ||
1265 | match = xfs_inode_match_id(ip, eofb); | ||
1266 | if (!match) | ||
1232 | return 0; | 1267 | return 0; |
1233 | 1268 | ||
1234 | /* skip the inode if the file size is too small */ | 1269 | /* skip the inode if the file size is too small */ |
1235 | if (eofb->eof_flags & XFS_EOF_FLAGS_MINFILESIZE && | 1270 | if (eofb->eof_flags & XFS_EOF_FLAGS_MINFILESIZE && |
1236 | XFS_ISIZE(ip) < eofb->eof_min_file_size) | 1271 | XFS_ISIZE(ip) < eofb->eof_min_file_size) |
1237 | return 0; | 1272 | return 0; |
1273 | |||
1274 | /* | ||
1275 | * A scan owner implies we already hold the iolock. Skip it in | ||
1276 | * xfs_free_eofblocks() to avoid deadlock. This also eliminates | ||
1277 | * the possibility of EAGAIN being returned. | ||
1278 | */ | ||
1279 | if (eofb->eof_scan_owner == ip->i_ino) | ||
1280 | need_iolock = false; | ||
1238 | } | 1281 | } |
1239 | 1282 | ||
1240 | ret = xfs_free_eofblocks(ip->i_mount, ip, true); | 1283 | ret = xfs_free_eofblocks(ip->i_mount, ip, need_iolock); |
1241 | 1284 | ||
1242 | /* don't revisit the inode if we're not waiting */ | 1285 | /* don't revisit the inode if we're not waiting */ |
1243 | if (ret == EAGAIN && !(flags & SYNC_WAIT)) | 1286 | if (ret == -EAGAIN && !(flags & SYNC_WAIT)) |
1244 | ret = 0; | 1287 | ret = 0; |
1245 | 1288 | ||
1246 | return ret; | 1289 | return ret; |
@@ -1260,6 +1303,55 @@ xfs_icache_free_eofblocks( | |||
1260 | eofb, XFS_ICI_EOFBLOCKS_TAG); | 1303 | eofb, XFS_ICI_EOFBLOCKS_TAG); |
1261 | } | 1304 | } |
1262 | 1305 | ||
1306 | /* | ||
1307 | * Run eofblocks scans on the quotas applicable to the inode. For inodes with | ||
1308 | * multiple quotas, we don't know exactly which quota caused an allocation | ||
1309 | * failure. We make a best effort by including each quota under low free space | ||
1310 | * conditions (less than 1% free space) in the scan. | ||
1311 | */ | ||
1312 | int | ||
1313 | xfs_inode_free_quota_eofblocks( | ||
1314 | struct xfs_inode *ip) | ||
1315 | { | ||
1316 | int scan = 0; | ||
1317 | struct xfs_eofblocks eofb = {0}; | ||
1318 | struct xfs_dquot *dq; | ||
1319 | |||
1320 | ASSERT(xfs_isilocked(ip, XFS_IOLOCK_EXCL)); | ||
1321 | |||
1322 | /* | ||
1323 | * Set the scan owner to avoid a potential livelock. Otherwise, the scan | ||
1324 | * can repeatedly trylock on the inode we're currently processing. We | ||
1325 | * run a sync scan to increase effectiveness and use the union filter to | ||
1326 | * cover all applicable quotas in a single scan. | ||
1327 | */ | ||
1328 | eofb.eof_scan_owner = ip->i_ino; | ||
1329 | eofb.eof_flags = XFS_EOF_FLAGS_UNION|XFS_EOF_FLAGS_SYNC; | ||
1330 | |||
1331 | if (XFS_IS_UQUOTA_ENFORCED(ip->i_mount)) { | ||
1332 | dq = xfs_inode_dquot(ip, XFS_DQ_USER); | ||
1333 | if (dq && xfs_dquot_lowsp(dq)) { | ||
1334 | eofb.eof_uid = VFS_I(ip)->i_uid; | ||
1335 | eofb.eof_flags |= XFS_EOF_FLAGS_UID; | ||
1336 | scan = 1; | ||
1337 | } | ||
1338 | } | ||
1339 | |||
1340 | if (XFS_IS_GQUOTA_ENFORCED(ip->i_mount)) { | ||
1341 | dq = xfs_inode_dquot(ip, XFS_DQ_GROUP); | ||
1342 | if (dq && xfs_dquot_lowsp(dq)) { | ||
1343 | eofb.eof_gid = VFS_I(ip)->i_gid; | ||
1344 | eofb.eof_flags |= XFS_EOF_FLAGS_GID; | ||
1345 | scan = 1; | ||
1346 | } | ||
1347 | } | ||
1348 | |||
1349 | if (scan) | ||
1350 | xfs_icache_free_eofblocks(ip->i_mount, &eofb); | ||
1351 | |||
1352 | return scan; | ||
1353 | } | ||
1354 | |||
1263 | void | 1355 | void |
1264 | xfs_inode_set_eofblocks_tag( | 1356 | xfs_inode_set_eofblocks_tag( |
1265 | xfs_inode_t *ip) | 1357 | xfs_inode_t *ip) |
diff --git a/fs/xfs/xfs_icache.h b/fs/xfs/xfs_icache.h index 9cf017b899be..46748b86b12f 100644 --- a/fs/xfs/xfs_icache.h +++ b/fs/xfs/xfs_icache.h | |||
@@ -27,6 +27,7 @@ struct xfs_eofblocks { | |||
27 | kgid_t eof_gid; | 27 | kgid_t eof_gid; |
28 | prid_t eof_prid; | 28 | prid_t eof_prid; |
29 | __u64 eof_min_file_size; | 29 | __u64 eof_min_file_size; |
30 | xfs_ino_t eof_scan_owner; | ||
30 | }; | 31 | }; |
31 | 32 | ||
32 | #define SYNC_WAIT 0x0001 /* wait for i/o to complete */ | 33 | #define SYNC_WAIT 0x0001 /* wait for i/o to complete */ |
@@ -57,6 +58,7 @@ void xfs_inode_set_reclaim_tag(struct xfs_inode *ip); | |||
57 | void xfs_inode_set_eofblocks_tag(struct xfs_inode *ip); | 58 | void xfs_inode_set_eofblocks_tag(struct xfs_inode *ip); |
58 | void xfs_inode_clear_eofblocks_tag(struct xfs_inode *ip); | 59 | void xfs_inode_clear_eofblocks_tag(struct xfs_inode *ip); |
59 | int xfs_icache_free_eofblocks(struct xfs_mount *, struct xfs_eofblocks *); | 60 | int xfs_icache_free_eofblocks(struct xfs_mount *, struct xfs_eofblocks *); |
61 | int xfs_inode_free_quota_eofblocks(struct xfs_inode *ip); | ||
60 | void xfs_eofblocks_worker(struct work_struct *); | 62 | void xfs_eofblocks_worker(struct work_struct *); |
61 | 63 | ||
62 | int xfs_inode_ag_iterator(struct xfs_mount *mp, | 64 | int xfs_inode_ag_iterator(struct xfs_mount *mp, |
@@ -72,31 +74,32 @@ xfs_fs_eofblocks_from_user( | |||
72 | struct xfs_eofblocks *dst) | 74 | struct xfs_eofblocks *dst) |
73 | { | 75 | { |
74 | if (src->eof_version != XFS_EOFBLOCKS_VERSION) | 76 | if (src->eof_version != XFS_EOFBLOCKS_VERSION) |
75 | return EINVAL; | 77 | return -EINVAL; |
76 | 78 | ||
77 | if (src->eof_flags & ~XFS_EOF_FLAGS_VALID) | 79 | if (src->eof_flags & ~XFS_EOF_FLAGS_VALID) |
78 | return EINVAL; | 80 | return -EINVAL; |
79 | 81 | ||
80 | if (memchr_inv(&src->pad32, 0, sizeof(src->pad32)) || | 82 | if (memchr_inv(&src->pad32, 0, sizeof(src->pad32)) || |
81 | memchr_inv(src->pad64, 0, sizeof(src->pad64))) | 83 | memchr_inv(src->pad64, 0, sizeof(src->pad64))) |
82 | return EINVAL; | 84 | return -EINVAL; |
83 | 85 | ||
84 | dst->eof_flags = src->eof_flags; | 86 | dst->eof_flags = src->eof_flags; |
85 | dst->eof_prid = src->eof_prid; | 87 | dst->eof_prid = src->eof_prid; |
86 | dst->eof_min_file_size = src->eof_min_file_size; | 88 | dst->eof_min_file_size = src->eof_min_file_size; |
89 | dst->eof_scan_owner = NULLFSINO; | ||
87 | 90 | ||
88 | dst->eof_uid = INVALID_UID; | 91 | dst->eof_uid = INVALID_UID; |
89 | if (src->eof_flags & XFS_EOF_FLAGS_UID) { | 92 | if (src->eof_flags & XFS_EOF_FLAGS_UID) { |
90 | dst->eof_uid = make_kuid(current_user_ns(), src->eof_uid); | 93 | dst->eof_uid = make_kuid(current_user_ns(), src->eof_uid); |
91 | if (!uid_valid(dst->eof_uid)) | 94 | if (!uid_valid(dst->eof_uid)) |
92 | return EINVAL; | 95 | return -EINVAL; |
93 | } | 96 | } |
94 | 97 | ||
95 | dst->eof_gid = INVALID_GID; | 98 | dst->eof_gid = INVALID_GID; |
96 | if (src->eof_flags & XFS_EOF_FLAGS_GID) { | 99 | if (src->eof_flags & XFS_EOF_FLAGS_GID) { |
97 | dst->eof_gid = make_kgid(current_user_ns(), src->eof_gid); | 100 | dst->eof_gid = make_kgid(current_user_ns(), src->eof_gid); |
98 | if (!gid_valid(dst->eof_gid)) | 101 | if (!gid_valid(dst->eof_gid)) |
99 | return EINVAL; | 102 | return -EINVAL; |
100 | } | 103 | } |
101 | return 0; | 104 | return 0; |
102 | } | 105 | } |
diff --git a/fs/xfs/xfs_inode.c b/fs/xfs/xfs_inode.c index a6115fe1ac94..fea3c92fb3f0 100644 --- a/fs/xfs/xfs_inode.c +++ b/fs/xfs/xfs_inode.c | |||
@@ -583,7 +583,7 @@ xfs_lookup( | |||
583 | trace_xfs_lookup(dp, name); | 583 | trace_xfs_lookup(dp, name); |
584 | 584 | ||
585 | if (XFS_FORCED_SHUTDOWN(dp->i_mount)) | 585 | if (XFS_FORCED_SHUTDOWN(dp->i_mount)) |
586 | return XFS_ERROR(EIO); | 586 | return -EIO; |
587 | 587 | ||
588 | lock_mode = xfs_ilock_data_map_shared(dp); | 588 | lock_mode = xfs_ilock_data_map_shared(dp); |
589 | error = xfs_dir_lookup(NULL, dp, name, &inum, ci_name); | 589 | error = xfs_dir_lookup(NULL, dp, name, &inum, ci_name); |
@@ -893,7 +893,7 @@ xfs_dir_ialloc( | |||
893 | } | 893 | } |
894 | if (!ialloc_context && !ip) { | 894 | if (!ialloc_context && !ip) { |
895 | *ipp = NULL; | 895 | *ipp = NULL; |
896 | return XFS_ERROR(ENOSPC); | 896 | return -ENOSPC; |
897 | } | 897 | } |
898 | 898 | ||
899 | /* | 899 | /* |
@@ -1088,7 +1088,7 @@ xfs_create( | |||
1088 | trace_xfs_create(dp, name); | 1088 | trace_xfs_create(dp, name); |
1089 | 1089 | ||
1090 | if (XFS_FORCED_SHUTDOWN(mp)) | 1090 | if (XFS_FORCED_SHUTDOWN(mp)) |
1091 | return XFS_ERROR(EIO); | 1091 | return -EIO; |
1092 | 1092 | ||
1093 | prid = xfs_get_initial_prid(dp); | 1093 | prid = xfs_get_initial_prid(dp); |
1094 | 1094 | ||
@@ -1125,12 +1125,12 @@ xfs_create( | |||
1125 | */ | 1125 | */ |
1126 | tres.tr_logflags = XFS_TRANS_PERM_LOG_RES; | 1126 | tres.tr_logflags = XFS_TRANS_PERM_LOG_RES; |
1127 | error = xfs_trans_reserve(tp, &tres, resblks, 0); | 1127 | error = xfs_trans_reserve(tp, &tres, resblks, 0); |
1128 | if (error == ENOSPC) { | 1128 | if (error == -ENOSPC) { |
1129 | /* flush outstanding delalloc blocks and retry */ | 1129 | /* flush outstanding delalloc blocks and retry */ |
1130 | xfs_flush_inodes(mp); | 1130 | xfs_flush_inodes(mp); |
1131 | error = xfs_trans_reserve(tp, &tres, resblks, 0); | 1131 | error = xfs_trans_reserve(tp, &tres, resblks, 0); |
1132 | } | 1132 | } |
1133 | if (error == ENOSPC) { | 1133 | if (error == -ENOSPC) { |
1134 | /* No space at all so try a "no-allocation" reservation */ | 1134 | /* No space at all so try a "no-allocation" reservation */ |
1135 | resblks = 0; | 1135 | resblks = 0; |
1136 | error = xfs_trans_reserve(tp, &tres, 0, 0); | 1136 | error = xfs_trans_reserve(tp, &tres, 0, 0); |
@@ -1165,7 +1165,7 @@ xfs_create( | |||
1165 | error = xfs_dir_ialloc(&tp, dp, mode, is_dir ? 2 : 1, rdev, | 1165 | error = xfs_dir_ialloc(&tp, dp, mode, is_dir ? 2 : 1, rdev, |
1166 | prid, resblks > 0, &ip, &committed); | 1166 | prid, resblks > 0, &ip, &committed); |
1167 | if (error) { | 1167 | if (error) { |
1168 | if (error == ENOSPC) | 1168 | if (error == -ENOSPC) |
1169 | goto out_trans_cancel; | 1169 | goto out_trans_cancel; |
1170 | goto out_trans_abort; | 1170 | goto out_trans_abort; |
1171 | } | 1171 | } |
@@ -1184,7 +1184,7 @@ xfs_create( | |||
1184 | &first_block, &free_list, resblks ? | 1184 | &first_block, &free_list, resblks ? |
1185 | resblks - XFS_IALLOC_SPACE_RES(mp) : 0); | 1185 | resblks - XFS_IALLOC_SPACE_RES(mp) : 0); |
1186 | if (error) { | 1186 | if (error) { |
1187 | ASSERT(error != ENOSPC); | 1187 | ASSERT(error != -ENOSPC); |
1188 | goto out_trans_abort; | 1188 | goto out_trans_abort; |
1189 | } | 1189 | } |
1190 | xfs_trans_ichgtime(tp, dp, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG); | 1190 | xfs_trans_ichgtime(tp, dp, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG); |
@@ -1274,7 +1274,7 @@ xfs_create_tmpfile( | |||
1274 | uint resblks; | 1274 | uint resblks; |
1275 | 1275 | ||
1276 | if (XFS_FORCED_SHUTDOWN(mp)) | 1276 | if (XFS_FORCED_SHUTDOWN(mp)) |
1277 | return XFS_ERROR(EIO); | 1277 | return -EIO; |
1278 | 1278 | ||
1279 | prid = xfs_get_initial_prid(dp); | 1279 | prid = xfs_get_initial_prid(dp); |
1280 | 1280 | ||
@@ -1293,7 +1293,7 @@ xfs_create_tmpfile( | |||
1293 | 1293 | ||
1294 | tres = &M_RES(mp)->tr_create_tmpfile; | 1294 | tres = &M_RES(mp)->tr_create_tmpfile; |
1295 | error = xfs_trans_reserve(tp, tres, resblks, 0); | 1295 | error = xfs_trans_reserve(tp, tres, resblks, 0); |
1296 | if (error == ENOSPC) { | 1296 | if (error == -ENOSPC) { |
1297 | /* No space at all so try a "no-allocation" reservation */ | 1297 | /* No space at all so try a "no-allocation" reservation */ |
1298 | resblks = 0; | 1298 | resblks = 0; |
1299 | error = xfs_trans_reserve(tp, tres, 0, 0); | 1299 | error = xfs_trans_reserve(tp, tres, 0, 0); |
@@ -1311,7 +1311,7 @@ xfs_create_tmpfile( | |||
1311 | error = xfs_dir_ialloc(&tp, dp, mode, 1, 0, | 1311 | error = xfs_dir_ialloc(&tp, dp, mode, 1, 0, |
1312 | prid, resblks > 0, &ip, NULL); | 1312 | prid, resblks > 0, &ip, NULL); |
1313 | if (error) { | 1313 | if (error) { |
1314 | if (error == ENOSPC) | 1314 | if (error == -ENOSPC) |
1315 | goto out_trans_cancel; | 1315 | goto out_trans_cancel; |
1316 | goto out_trans_abort; | 1316 | goto out_trans_abort; |
1317 | } | 1317 | } |
@@ -1382,7 +1382,7 @@ xfs_link( | |||
1382 | ASSERT(!S_ISDIR(sip->i_d.di_mode)); | 1382 | ASSERT(!S_ISDIR(sip->i_d.di_mode)); |
1383 | 1383 | ||
1384 | if (XFS_FORCED_SHUTDOWN(mp)) | 1384 | if (XFS_FORCED_SHUTDOWN(mp)) |
1385 | return XFS_ERROR(EIO); | 1385 | return -EIO; |
1386 | 1386 | ||
1387 | error = xfs_qm_dqattach(sip, 0); | 1387 | error = xfs_qm_dqattach(sip, 0); |
1388 | if (error) | 1388 | if (error) |
@@ -1396,7 +1396,7 @@ xfs_link( | |||
1396 | cancel_flags = XFS_TRANS_RELEASE_LOG_RES; | 1396 | cancel_flags = XFS_TRANS_RELEASE_LOG_RES; |
1397 | resblks = XFS_LINK_SPACE_RES(mp, target_name->len); | 1397 | resblks = XFS_LINK_SPACE_RES(mp, target_name->len); |
1398 | error = xfs_trans_reserve(tp, &M_RES(mp)->tr_link, resblks, 0); | 1398 | error = xfs_trans_reserve(tp, &M_RES(mp)->tr_link, resblks, 0); |
1399 | if (error == ENOSPC) { | 1399 | if (error == -ENOSPC) { |
1400 | resblks = 0; | 1400 | resblks = 0; |
1401 | error = xfs_trans_reserve(tp, &M_RES(mp)->tr_link, 0, 0); | 1401 | error = xfs_trans_reserve(tp, &M_RES(mp)->tr_link, 0, 0); |
1402 | } | 1402 | } |
@@ -1417,7 +1417,7 @@ xfs_link( | |||
1417 | */ | 1417 | */ |
1418 | if (unlikely((tdp->i_d.di_flags & XFS_DIFLAG_PROJINHERIT) && | 1418 | if (unlikely((tdp->i_d.di_flags & XFS_DIFLAG_PROJINHERIT) && |
1419 | (xfs_get_projid(tdp) != xfs_get_projid(sip)))) { | 1419 | (xfs_get_projid(tdp) != xfs_get_projid(sip)))) { |
1420 | error = XFS_ERROR(EXDEV); | 1420 | error = -EXDEV; |
1421 | goto error_return; | 1421 | goto error_return; |
1422 | } | 1422 | } |
1423 | 1423 | ||
@@ -1635,8 +1635,8 @@ xfs_release( | |||
1635 | truncated = xfs_iflags_test_and_clear(ip, XFS_ITRUNCATED); | 1635 | truncated = xfs_iflags_test_and_clear(ip, XFS_ITRUNCATED); |
1636 | if (truncated) { | 1636 | if (truncated) { |
1637 | xfs_iflags_clear(ip, XFS_IDIRTY_RELEASE); | 1637 | xfs_iflags_clear(ip, XFS_IDIRTY_RELEASE); |
1638 | if (VN_DIRTY(VFS_I(ip)) && ip->i_delayed_blks > 0) { | 1638 | if (ip->i_delayed_blks > 0) { |
1639 | error = -filemap_flush(VFS_I(ip)->i_mapping); | 1639 | error = filemap_flush(VFS_I(ip)->i_mapping); |
1640 | if (error) | 1640 | if (error) |
1641 | return error; | 1641 | return error; |
1642 | } | 1642 | } |
@@ -1673,7 +1673,7 @@ xfs_release( | |||
1673 | return 0; | 1673 | return 0; |
1674 | 1674 | ||
1675 | error = xfs_free_eofblocks(mp, ip, true); | 1675 | error = xfs_free_eofblocks(mp, ip, true); |
1676 | if (error && error != EAGAIN) | 1676 | if (error && error != -EAGAIN) |
1677 | return error; | 1677 | return error; |
1678 | 1678 | ||
1679 | /* delalloc blocks after truncation means it really is dirty */ | 1679 | /* delalloc blocks after truncation means it really is dirty */ |
@@ -1772,7 +1772,7 @@ xfs_inactive_ifree( | |||
1772 | error = xfs_trans_reserve(tp, &M_RES(mp)->tr_ifree, | 1772 | error = xfs_trans_reserve(tp, &M_RES(mp)->tr_ifree, |
1773 | XFS_IFREE_SPACE_RES(mp), 0); | 1773 | XFS_IFREE_SPACE_RES(mp), 0); |
1774 | if (error) { | 1774 | if (error) { |
1775 | if (error == ENOSPC) { | 1775 | if (error == -ENOSPC) { |
1776 | xfs_warn_ratelimited(mp, | 1776 | xfs_warn_ratelimited(mp, |
1777 | "Failed to remove inode(s) from unlinked list. " | 1777 | "Failed to remove inode(s) from unlinked list. " |
1778 | "Please free space, unmount and run xfs_repair."); | 1778 | "Please free space, unmount and run xfs_repair."); |
@@ -2219,7 +2219,7 @@ xfs_ifree_cluster( | |||
2219 | XBF_UNMAPPED); | 2219 | XBF_UNMAPPED); |
2220 | 2220 | ||
2221 | if (!bp) | 2221 | if (!bp) |
2222 | return ENOMEM; | 2222 | return -ENOMEM; |
2223 | 2223 | ||
2224 | /* | 2224 | /* |
2225 | * This buffer may not have been correctly initialised as we | 2225 | * This buffer may not have been correctly initialised as we |
@@ -2491,7 +2491,7 @@ xfs_remove( | |||
2491 | trace_xfs_remove(dp, name); | 2491 | trace_xfs_remove(dp, name); |
2492 | 2492 | ||
2493 | if (XFS_FORCED_SHUTDOWN(mp)) | 2493 | if (XFS_FORCED_SHUTDOWN(mp)) |
2494 | return XFS_ERROR(EIO); | 2494 | return -EIO; |
2495 | 2495 | ||
2496 | error = xfs_qm_dqattach(dp, 0); | 2496 | error = xfs_qm_dqattach(dp, 0); |
2497 | if (error) | 2497 | if (error) |
@@ -2521,12 +2521,12 @@ xfs_remove( | |||
2521 | */ | 2521 | */ |
2522 | resblks = XFS_REMOVE_SPACE_RES(mp); | 2522 | resblks = XFS_REMOVE_SPACE_RES(mp); |
2523 | error = xfs_trans_reserve(tp, &M_RES(mp)->tr_remove, resblks, 0); | 2523 | error = xfs_trans_reserve(tp, &M_RES(mp)->tr_remove, resblks, 0); |
2524 | if (error == ENOSPC) { | 2524 | if (error == -ENOSPC) { |
2525 | resblks = 0; | 2525 | resblks = 0; |
2526 | error = xfs_trans_reserve(tp, &M_RES(mp)->tr_remove, 0, 0); | 2526 | error = xfs_trans_reserve(tp, &M_RES(mp)->tr_remove, 0, 0); |
2527 | } | 2527 | } |
2528 | if (error) { | 2528 | if (error) { |
2529 | ASSERT(error != ENOSPC); | 2529 | ASSERT(error != -ENOSPC); |
2530 | cancel_flags = 0; | 2530 | cancel_flags = 0; |
2531 | goto out_trans_cancel; | 2531 | goto out_trans_cancel; |
2532 | } | 2532 | } |
@@ -2543,11 +2543,11 @@ xfs_remove( | |||
2543 | if (is_dir) { | 2543 | if (is_dir) { |
2544 | ASSERT(ip->i_d.di_nlink >= 2); | 2544 | ASSERT(ip->i_d.di_nlink >= 2); |
2545 | if (ip->i_d.di_nlink != 2) { | 2545 | if (ip->i_d.di_nlink != 2) { |
2546 | error = XFS_ERROR(ENOTEMPTY); | 2546 | error = -ENOTEMPTY; |
2547 | goto out_trans_cancel; | 2547 | goto out_trans_cancel; |
2548 | } | 2548 | } |
2549 | if (!xfs_dir_isempty(ip)) { | 2549 | if (!xfs_dir_isempty(ip)) { |
2550 | error = XFS_ERROR(ENOTEMPTY); | 2550 | error = -ENOTEMPTY; |
2551 | goto out_trans_cancel; | 2551 | goto out_trans_cancel; |
2552 | } | 2552 | } |
2553 | 2553 | ||
@@ -2582,7 +2582,7 @@ xfs_remove( | |||
2582 | error = xfs_dir_removename(tp, dp, name, ip->i_ino, | 2582 | error = xfs_dir_removename(tp, dp, name, ip->i_ino, |
2583 | &first_block, &free_list, resblks); | 2583 | &first_block, &free_list, resblks); |
2584 | if (error) { | 2584 | if (error) { |
2585 | ASSERT(error != ENOENT); | 2585 | ASSERT(error != -ENOENT); |
2586 | goto out_bmap_cancel; | 2586 | goto out_bmap_cancel; |
2587 | } | 2587 | } |
2588 | 2588 | ||
@@ -2702,7 +2702,7 @@ xfs_rename( | |||
2702 | cancel_flags = XFS_TRANS_RELEASE_LOG_RES; | 2702 | cancel_flags = XFS_TRANS_RELEASE_LOG_RES; |
2703 | spaceres = XFS_RENAME_SPACE_RES(mp, target_name->len); | 2703 | spaceres = XFS_RENAME_SPACE_RES(mp, target_name->len); |
2704 | error = xfs_trans_reserve(tp, &M_RES(mp)->tr_rename, spaceres, 0); | 2704 | error = xfs_trans_reserve(tp, &M_RES(mp)->tr_rename, spaceres, 0); |
2705 | if (error == ENOSPC) { | 2705 | if (error == -ENOSPC) { |
2706 | spaceres = 0; | 2706 | spaceres = 0; |
2707 | error = xfs_trans_reserve(tp, &M_RES(mp)->tr_rename, 0, 0); | 2707 | error = xfs_trans_reserve(tp, &M_RES(mp)->tr_rename, 0, 0); |
2708 | } | 2708 | } |
@@ -2747,7 +2747,7 @@ xfs_rename( | |||
2747 | */ | 2747 | */ |
2748 | if (unlikely((target_dp->i_d.di_flags & XFS_DIFLAG_PROJINHERIT) && | 2748 | if (unlikely((target_dp->i_d.di_flags & XFS_DIFLAG_PROJINHERIT) && |
2749 | (xfs_get_projid(target_dp) != xfs_get_projid(src_ip)))) { | 2749 | (xfs_get_projid(target_dp) != xfs_get_projid(src_ip)))) { |
2750 | error = XFS_ERROR(EXDEV); | 2750 | error = -EXDEV; |
2751 | goto error_return; | 2751 | goto error_return; |
2752 | } | 2752 | } |
2753 | 2753 | ||
@@ -2770,7 +2770,7 @@ xfs_rename( | |||
2770 | error = xfs_dir_createname(tp, target_dp, target_name, | 2770 | error = xfs_dir_createname(tp, target_dp, target_name, |
2771 | src_ip->i_ino, &first_block, | 2771 | src_ip->i_ino, &first_block, |
2772 | &free_list, spaceres); | 2772 | &free_list, spaceres); |
2773 | if (error == ENOSPC) | 2773 | if (error == -ENOSPC) |
2774 | goto error_return; | 2774 | goto error_return; |
2775 | if (error) | 2775 | if (error) |
2776 | goto abort_return; | 2776 | goto abort_return; |
@@ -2795,7 +2795,7 @@ xfs_rename( | |||
2795 | */ | 2795 | */ |
2796 | if (!(xfs_dir_isempty(target_ip)) || | 2796 | if (!(xfs_dir_isempty(target_ip)) || |
2797 | (target_ip->i_d.di_nlink > 2)) { | 2797 | (target_ip->i_d.di_nlink > 2)) { |
2798 | error = XFS_ERROR(EEXIST); | 2798 | error = -EEXIST; |
2799 | goto error_return; | 2799 | goto error_return; |
2800 | } | 2800 | } |
2801 | } | 2801 | } |
@@ -2847,7 +2847,7 @@ xfs_rename( | |||
2847 | error = xfs_dir_replace(tp, src_ip, &xfs_name_dotdot, | 2847 | error = xfs_dir_replace(tp, src_ip, &xfs_name_dotdot, |
2848 | target_dp->i_ino, | 2848 | target_dp->i_ino, |
2849 | &first_block, &free_list, spaceres); | 2849 | &first_block, &free_list, spaceres); |
2850 | ASSERT(error != EEXIST); | 2850 | ASSERT(error != -EEXIST); |
2851 | if (error) | 2851 | if (error) |
2852 | goto abort_return; | 2852 | goto abort_return; |
2853 | } | 2853 | } |
@@ -3055,7 +3055,7 @@ cluster_corrupt_out: | |||
3055 | if (bp->b_iodone) { | 3055 | if (bp->b_iodone) { |
3056 | XFS_BUF_UNDONE(bp); | 3056 | XFS_BUF_UNDONE(bp); |
3057 | xfs_buf_stale(bp); | 3057 | xfs_buf_stale(bp); |
3058 | xfs_buf_ioerror(bp, EIO); | 3058 | xfs_buf_ioerror(bp, -EIO); |
3059 | xfs_buf_ioend(bp, 0); | 3059 | xfs_buf_ioend(bp, 0); |
3060 | } else { | 3060 | } else { |
3061 | xfs_buf_stale(bp); | 3061 | xfs_buf_stale(bp); |
@@ -3069,7 +3069,7 @@ cluster_corrupt_out: | |||
3069 | xfs_iflush_abort(iq, false); | 3069 | xfs_iflush_abort(iq, false); |
3070 | kmem_free(ilist); | 3070 | kmem_free(ilist); |
3071 | xfs_perag_put(pag); | 3071 | xfs_perag_put(pag); |
3072 | return XFS_ERROR(EFSCORRUPTED); | 3072 | return -EFSCORRUPTED; |
3073 | } | 3073 | } |
3074 | 3074 | ||
3075 | /* | 3075 | /* |
@@ -3124,7 +3124,7 @@ xfs_iflush( | |||
3124 | * as we wait for an empty AIL as part of the unmount process. | 3124 | * as we wait for an empty AIL as part of the unmount process. |
3125 | */ | 3125 | */ |
3126 | if (XFS_FORCED_SHUTDOWN(mp)) { | 3126 | if (XFS_FORCED_SHUTDOWN(mp)) { |
3127 | error = XFS_ERROR(EIO); | 3127 | error = -EIO; |
3128 | goto abort_out; | 3128 | goto abort_out; |
3129 | } | 3129 | } |
3130 | 3130 | ||
@@ -3167,7 +3167,7 @@ corrupt_out: | |||
3167 | xfs_buf_relse(bp); | 3167 | xfs_buf_relse(bp); |
3168 | xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE); | 3168 | xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE); |
3169 | cluster_corrupt_out: | 3169 | cluster_corrupt_out: |
3170 | error = XFS_ERROR(EFSCORRUPTED); | 3170 | error = -EFSCORRUPTED; |
3171 | abort_out: | 3171 | abort_out: |
3172 | /* | 3172 | /* |
3173 | * Unlocks the flush lock | 3173 | * Unlocks the flush lock |
@@ -3331,5 +3331,5 @@ xfs_iflush_int( | |||
3331 | return 0; | 3331 | return 0; |
3332 | 3332 | ||
3333 | corrupt_out: | 3333 | corrupt_out: |
3334 | return XFS_ERROR(EFSCORRUPTED); | 3334 | return -EFSCORRUPTED; |
3335 | } | 3335 | } |
diff --git a/fs/xfs/xfs_inode.h b/fs/xfs/xfs_inode.h index f72bffa67266..c10e3fadd9af 100644 --- a/fs/xfs/xfs_inode.h +++ b/fs/xfs/xfs_inode.h | |||
@@ -398,4 +398,14 @@ do { \ | |||
398 | 398 | ||
399 | extern struct kmem_zone *xfs_inode_zone; | 399 | extern struct kmem_zone *xfs_inode_zone; |
400 | 400 | ||
401 | /* | ||
402 | * Flags for read/write calls | ||
403 | */ | ||
404 | #define XFS_IO_ISDIRECT 0x00001 /* bypass page cache */ | ||
405 | #define XFS_IO_INVIS 0x00002 /* don't update inode timestamps */ | ||
406 | |||
407 | #define XFS_IO_FLAGS \ | ||
408 | { XFS_IO_ISDIRECT, "DIRECT" }, \ | ||
409 | { XFS_IO_INVIS, "INVIS"} | ||
410 | |||
401 | #endif /* __XFS_INODE_H__ */ | 411 | #endif /* __XFS_INODE_H__ */ |
diff --git a/fs/xfs/xfs_inode_item.c b/fs/xfs/xfs_inode_item.c index a640137b3573..de5a7be36e60 100644 --- a/fs/xfs/xfs_inode_item.c +++ b/fs/xfs/xfs_inode_item.c | |||
@@ -788,5 +788,5 @@ xfs_inode_item_format_convert( | |||
788 | in_f->ilf_boffset = in_f64->ilf_boffset; | 788 | in_f->ilf_boffset = in_f64->ilf_boffset; |
789 | return 0; | 789 | return 0; |
790 | } | 790 | } |
791 | return EFSCORRUPTED; | 791 | return -EFSCORRUPTED; |
792 | } | 792 | } |
diff --git a/fs/xfs/xfs_ioctl.c b/fs/xfs/xfs_ioctl.c index 8bc1bbce7451..3799695b9249 100644 --- a/fs/xfs/xfs_ioctl.c +++ b/fs/xfs/xfs_ioctl.c | |||
@@ -207,7 +207,7 @@ xfs_open_by_handle( | |||
207 | struct path path; | 207 | struct path path; |
208 | 208 | ||
209 | if (!capable(CAP_SYS_ADMIN)) | 209 | if (!capable(CAP_SYS_ADMIN)) |
210 | return -XFS_ERROR(EPERM); | 210 | return -EPERM; |
211 | 211 | ||
212 | dentry = xfs_handlereq_to_dentry(parfilp, hreq); | 212 | dentry = xfs_handlereq_to_dentry(parfilp, hreq); |
213 | if (IS_ERR(dentry)) | 213 | if (IS_ERR(dentry)) |
@@ -216,7 +216,7 @@ xfs_open_by_handle( | |||
216 | 216 | ||
217 | /* Restrict xfs_open_by_handle to directories & regular files. */ | 217 | /* Restrict xfs_open_by_handle to directories & regular files. */ |
218 | if (!(S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode))) { | 218 | if (!(S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode))) { |
219 | error = -XFS_ERROR(EPERM); | 219 | error = -EPERM; |
220 | goto out_dput; | 220 | goto out_dput; |
221 | } | 221 | } |
222 | 222 | ||
@@ -228,18 +228,18 @@ xfs_open_by_handle( | |||
228 | fmode = OPEN_FMODE(permflag); | 228 | fmode = OPEN_FMODE(permflag); |
229 | if ((!(permflag & O_APPEND) || (permflag & O_TRUNC)) && | 229 | if ((!(permflag & O_APPEND) || (permflag & O_TRUNC)) && |
230 | (fmode & FMODE_WRITE) && IS_APPEND(inode)) { | 230 | (fmode & FMODE_WRITE) && IS_APPEND(inode)) { |
231 | error = -XFS_ERROR(EPERM); | 231 | error = -EPERM; |
232 | goto out_dput; | 232 | goto out_dput; |
233 | } | 233 | } |
234 | 234 | ||
235 | if ((fmode & FMODE_WRITE) && IS_IMMUTABLE(inode)) { | 235 | if ((fmode & FMODE_WRITE) && IS_IMMUTABLE(inode)) { |
236 | error = -XFS_ERROR(EACCES); | 236 | error = -EACCES; |
237 | goto out_dput; | 237 | goto out_dput; |
238 | } | 238 | } |
239 | 239 | ||
240 | /* Can't write directories. */ | 240 | /* Can't write directories. */ |
241 | if (S_ISDIR(inode->i_mode) && (fmode & FMODE_WRITE)) { | 241 | if (S_ISDIR(inode->i_mode) && (fmode & FMODE_WRITE)) { |
242 | error = -XFS_ERROR(EISDIR); | 242 | error = -EISDIR; |
243 | goto out_dput; | 243 | goto out_dput; |
244 | } | 244 | } |
245 | 245 | ||
@@ -282,7 +282,7 @@ xfs_readlink_by_handle( | |||
282 | int error; | 282 | int error; |
283 | 283 | ||
284 | if (!capable(CAP_SYS_ADMIN)) | 284 | if (!capable(CAP_SYS_ADMIN)) |
285 | return -XFS_ERROR(EPERM); | 285 | return -EPERM; |
286 | 286 | ||
287 | dentry = xfs_handlereq_to_dentry(parfilp, hreq); | 287 | dentry = xfs_handlereq_to_dentry(parfilp, hreq); |
288 | if (IS_ERR(dentry)) | 288 | if (IS_ERR(dentry)) |
@@ -290,22 +290,22 @@ xfs_readlink_by_handle( | |||
290 | 290 | ||
291 | /* Restrict this handle operation to symlinks only. */ | 291 | /* Restrict this handle operation to symlinks only. */ |
292 | if (!S_ISLNK(dentry->d_inode->i_mode)) { | 292 | if (!S_ISLNK(dentry->d_inode->i_mode)) { |
293 | error = -XFS_ERROR(EINVAL); | 293 | error = -EINVAL; |
294 | goto out_dput; | 294 | goto out_dput; |
295 | } | 295 | } |
296 | 296 | ||
297 | if (copy_from_user(&olen, hreq->ohandlen, sizeof(__u32))) { | 297 | if (copy_from_user(&olen, hreq->ohandlen, sizeof(__u32))) { |
298 | error = -XFS_ERROR(EFAULT); | 298 | error = -EFAULT; |
299 | goto out_dput; | 299 | goto out_dput; |
300 | } | 300 | } |
301 | 301 | ||
302 | link = kmalloc(MAXPATHLEN+1, GFP_KERNEL); | 302 | link = kmalloc(MAXPATHLEN+1, GFP_KERNEL); |
303 | if (!link) { | 303 | if (!link) { |
304 | error = -XFS_ERROR(ENOMEM); | 304 | error = -ENOMEM; |
305 | goto out_dput; | 305 | goto out_dput; |
306 | } | 306 | } |
307 | 307 | ||
308 | error = -xfs_readlink(XFS_I(dentry->d_inode), link); | 308 | error = xfs_readlink(XFS_I(dentry->d_inode), link); |
309 | if (error) | 309 | if (error) |
310 | goto out_kfree; | 310 | goto out_kfree; |
311 | error = readlink_copy(hreq->ohandle, olen, link); | 311 | error = readlink_copy(hreq->ohandle, olen, link); |
@@ -330,10 +330,10 @@ xfs_set_dmattrs( | |||
330 | int error; | 330 | int error; |
331 | 331 | ||
332 | if (!capable(CAP_SYS_ADMIN)) | 332 | if (!capable(CAP_SYS_ADMIN)) |
333 | return XFS_ERROR(EPERM); | 333 | return -EPERM; |
334 | 334 | ||
335 | if (XFS_FORCED_SHUTDOWN(mp)) | 335 | if (XFS_FORCED_SHUTDOWN(mp)) |
336 | return XFS_ERROR(EIO); | 336 | return -EIO; |
337 | 337 | ||
338 | tp = xfs_trans_alloc(mp, XFS_TRANS_SET_DMATTRS); | 338 | tp = xfs_trans_alloc(mp, XFS_TRANS_SET_DMATTRS); |
339 | error = xfs_trans_reserve(tp, &M_RES(mp)->tr_ichange, 0, 0); | 339 | error = xfs_trans_reserve(tp, &M_RES(mp)->tr_ichange, 0, 0); |
@@ -364,9 +364,9 @@ xfs_fssetdm_by_handle( | |||
364 | struct dentry *dentry; | 364 | struct dentry *dentry; |
365 | 365 | ||
366 | if (!capable(CAP_MKNOD)) | 366 | if (!capable(CAP_MKNOD)) |
367 | return -XFS_ERROR(EPERM); | 367 | return -EPERM; |
368 | if (copy_from_user(&dmhreq, arg, sizeof(xfs_fsop_setdm_handlereq_t))) | 368 | if (copy_from_user(&dmhreq, arg, sizeof(xfs_fsop_setdm_handlereq_t))) |
369 | return -XFS_ERROR(EFAULT); | 369 | return -EFAULT; |
370 | 370 | ||
371 | error = mnt_want_write_file(parfilp); | 371 | error = mnt_want_write_file(parfilp); |
372 | if (error) | 372 | if (error) |
@@ -379,16 +379,16 @@ xfs_fssetdm_by_handle( | |||
379 | } | 379 | } |
380 | 380 | ||
381 | if (IS_IMMUTABLE(dentry->d_inode) || IS_APPEND(dentry->d_inode)) { | 381 | if (IS_IMMUTABLE(dentry->d_inode) || IS_APPEND(dentry->d_inode)) { |
382 | error = -XFS_ERROR(EPERM); | 382 | error = -EPERM; |
383 | goto out; | 383 | goto out; |
384 | } | 384 | } |
385 | 385 | ||
386 | if (copy_from_user(&fsd, dmhreq.data, sizeof(fsd))) { | 386 | if (copy_from_user(&fsd, dmhreq.data, sizeof(fsd))) { |
387 | error = -XFS_ERROR(EFAULT); | 387 | error = -EFAULT; |
388 | goto out; | 388 | goto out; |
389 | } | 389 | } |
390 | 390 | ||
391 | error = -xfs_set_dmattrs(XFS_I(dentry->d_inode), fsd.fsd_dmevmask, | 391 | error = xfs_set_dmattrs(XFS_I(dentry->d_inode), fsd.fsd_dmevmask, |
392 | fsd.fsd_dmstate); | 392 | fsd.fsd_dmstate); |
393 | 393 | ||
394 | out: | 394 | out: |
@@ -409,18 +409,18 @@ xfs_attrlist_by_handle( | |||
409 | char *kbuf; | 409 | char *kbuf; |
410 | 410 | ||
411 | if (!capable(CAP_SYS_ADMIN)) | 411 | if (!capable(CAP_SYS_ADMIN)) |
412 | return -XFS_ERROR(EPERM); | 412 | return -EPERM; |
413 | if (copy_from_user(&al_hreq, arg, sizeof(xfs_fsop_attrlist_handlereq_t))) | 413 | if (copy_from_user(&al_hreq, arg, sizeof(xfs_fsop_attrlist_handlereq_t))) |
414 | return -XFS_ERROR(EFAULT); | 414 | return -EFAULT; |
415 | if (al_hreq.buflen < sizeof(struct attrlist) || | 415 | if (al_hreq.buflen < sizeof(struct attrlist) || |
416 | al_hreq.buflen > XATTR_LIST_MAX) | 416 | al_hreq.buflen > XATTR_LIST_MAX) |
417 | return -XFS_ERROR(EINVAL); | 417 | return -EINVAL; |
418 | 418 | ||
419 | /* | 419 | /* |
420 | * Reject flags, only allow namespaces. | 420 | * Reject flags, only allow namespaces. |
421 | */ | 421 | */ |
422 | if (al_hreq.flags & ~(ATTR_ROOT | ATTR_SECURE)) | 422 | if (al_hreq.flags & ~(ATTR_ROOT | ATTR_SECURE)) |
423 | return -XFS_ERROR(EINVAL); | 423 | return -EINVAL; |
424 | 424 | ||
425 | dentry = xfs_handlereq_to_dentry(parfilp, &al_hreq.hreq); | 425 | dentry = xfs_handlereq_to_dentry(parfilp, &al_hreq.hreq); |
426 | if (IS_ERR(dentry)) | 426 | if (IS_ERR(dentry)) |
@@ -431,7 +431,7 @@ xfs_attrlist_by_handle( | |||
431 | goto out_dput; | 431 | goto out_dput; |
432 | 432 | ||
433 | cursor = (attrlist_cursor_kern_t *)&al_hreq.pos; | 433 | cursor = (attrlist_cursor_kern_t *)&al_hreq.pos; |
434 | error = -xfs_attr_list(XFS_I(dentry->d_inode), kbuf, al_hreq.buflen, | 434 | error = xfs_attr_list(XFS_I(dentry->d_inode), kbuf, al_hreq.buflen, |
435 | al_hreq.flags, cursor); | 435 | al_hreq.flags, cursor); |
436 | if (error) | 436 | if (error) |
437 | goto out_kfree; | 437 | goto out_kfree; |
@@ -455,20 +455,20 @@ xfs_attrmulti_attr_get( | |||
455 | __uint32_t flags) | 455 | __uint32_t flags) |
456 | { | 456 | { |
457 | unsigned char *kbuf; | 457 | unsigned char *kbuf; |
458 | int error = EFAULT; | 458 | int error = -EFAULT; |
459 | 459 | ||
460 | if (*len > XATTR_SIZE_MAX) | 460 | if (*len > XATTR_SIZE_MAX) |
461 | return EINVAL; | 461 | return -EINVAL; |
462 | kbuf = kmem_zalloc_large(*len, KM_SLEEP); | 462 | kbuf = kmem_zalloc_large(*len, KM_SLEEP); |
463 | if (!kbuf) | 463 | if (!kbuf) |
464 | return ENOMEM; | 464 | return -ENOMEM; |
465 | 465 | ||
466 | error = xfs_attr_get(XFS_I(inode), name, kbuf, (int *)len, flags); | 466 | error = xfs_attr_get(XFS_I(inode), name, kbuf, (int *)len, flags); |
467 | if (error) | 467 | if (error) |
468 | goto out_kfree; | 468 | goto out_kfree; |
469 | 469 | ||
470 | if (copy_to_user(ubuf, kbuf, *len)) | 470 | if (copy_to_user(ubuf, kbuf, *len)) |
471 | error = EFAULT; | 471 | error = -EFAULT; |
472 | 472 | ||
473 | out_kfree: | 473 | out_kfree: |
474 | kmem_free(kbuf); | 474 | kmem_free(kbuf); |
@@ -484,20 +484,17 @@ xfs_attrmulti_attr_set( | |||
484 | __uint32_t flags) | 484 | __uint32_t flags) |
485 | { | 485 | { |
486 | unsigned char *kbuf; | 486 | unsigned char *kbuf; |
487 | int error = EFAULT; | ||
488 | 487 | ||
489 | if (IS_IMMUTABLE(inode) || IS_APPEND(inode)) | 488 | if (IS_IMMUTABLE(inode) || IS_APPEND(inode)) |
490 | return EPERM; | 489 | return -EPERM; |
491 | if (len > XATTR_SIZE_MAX) | 490 | if (len > XATTR_SIZE_MAX) |
492 | return EINVAL; | 491 | return -EINVAL; |
493 | 492 | ||
494 | kbuf = memdup_user(ubuf, len); | 493 | kbuf = memdup_user(ubuf, len); |
495 | if (IS_ERR(kbuf)) | 494 | if (IS_ERR(kbuf)) |
496 | return PTR_ERR(kbuf); | 495 | return PTR_ERR(kbuf); |
497 | 496 | ||
498 | error = xfs_attr_set(XFS_I(inode), name, kbuf, len, flags); | 497 | return xfs_attr_set(XFS_I(inode), name, kbuf, len, flags); |
499 | |||
500 | return error; | ||
501 | } | 498 | } |
502 | 499 | ||
503 | int | 500 | int |
@@ -507,7 +504,7 @@ xfs_attrmulti_attr_remove( | |||
507 | __uint32_t flags) | 504 | __uint32_t flags) |
508 | { | 505 | { |
509 | if (IS_IMMUTABLE(inode) || IS_APPEND(inode)) | 506 | if (IS_IMMUTABLE(inode) || IS_APPEND(inode)) |
510 | return EPERM; | 507 | return -EPERM; |
511 | return xfs_attr_remove(XFS_I(inode), name, flags); | 508 | return xfs_attr_remove(XFS_I(inode), name, flags); |
512 | } | 509 | } |
513 | 510 | ||
@@ -524,9 +521,9 @@ xfs_attrmulti_by_handle( | |||
524 | unsigned char *attr_name; | 521 | unsigned char *attr_name; |
525 | 522 | ||
526 | if (!capable(CAP_SYS_ADMIN)) | 523 | if (!capable(CAP_SYS_ADMIN)) |
527 | return -XFS_ERROR(EPERM); | 524 | return -EPERM; |
528 | if (copy_from_user(&am_hreq, arg, sizeof(xfs_fsop_attrmulti_handlereq_t))) | 525 | if (copy_from_user(&am_hreq, arg, sizeof(xfs_fsop_attrmulti_handlereq_t))) |
529 | return -XFS_ERROR(EFAULT); | 526 | return -EFAULT; |
530 | 527 | ||
531 | /* overflow check */ | 528 | /* overflow check */ |
532 | if (am_hreq.opcount >= INT_MAX / sizeof(xfs_attr_multiop_t)) | 529 | if (am_hreq.opcount >= INT_MAX / sizeof(xfs_attr_multiop_t)) |
@@ -536,18 +533,18 @@ xfs_attrmulti_by_handle( | |||
536 | if (IS_ERR(dentry)) | 533 | if (IS_ERR(dentry)) |
537 | return PTR_ERR(dentry); | 534 | return PTR_ERR(dentry); |
538 | 535 | ||
539 | error = E2BIG; | 536 | error = -E2BIG; |
540 | size = am_hreq.opcount * sizeof(xfs_attr_multiop_t); | 537 | size = am_hreq.opcount * sizeof(xfs_attr_multiop_t); |
541 | if (!size || size > 16 * PAGE_SIZE) | 538 | if (!size || size > 16 * PAGE_SIZE) |
542 | goto out_dput; | 539 | goto out_dput; |
543 | 540 | ||
544 | ops = memdup_user(am_hreq.ops, size); | 541 | ops = memdup_user(am_hreq.ops, size); |
545 | if (IS_ERR(ops)) { | 542 | if (IS_ERR(ops)) { |
546 | error = -PTR_ERR(ops); | 543 | error = PTR_ERR(ops); |
547 | goto out_dput; | 544 | goto out_dput; |
548 | } | 545 | } |
549 | 546 | ||
550 | error = ENOMEM; | 547 | error = -ENOMEM; |
551 | attr_name = kmalloc(MAXNAMELEN, GFP_KERNEL); | 548 | attr_name = kmalloc(MAXNAMELEN, GFP_KERNEL); |
552 | if (!attr_name) | 549 | if (!attr_name) |
553 | goto out_kfree_ops; | 550 | goto out_kfree_ops; |
@@ -557,7 +554,7 @@ xfs_attrmulti_by_handle( | |||
557 | ops[i].am_error = strncpy_from_user((char *)attr_name, | 554 | ops[i].am_error = strncpy_from_user((char *)attr_name, |
558 | ops[i].am_attrname, MAXNAMELEN); | 555 | ops[i].am_attrname, MAXNAMELEN); |
559 | if (ops[i].am_error == 0 || ops[i].am_error == MAXNAMELEN) | 556 | if (ops[i].am_error == 0 || ops[i].am_error == MAXNAMELEN) |
560 | error = ERANGE; | 557 | error = -ERANGE; |
561 | if (ops[i].am_error < 0) | 558 | if (ops[i].am_error < 0) |
562 | break; | 559 | break; |
563 | 560 | ||
@@ -588,19 +585,19 @@ xfs_attrmulti_by_handle( | |||
588 | mnt_drop_write_file(parfilp); | 585 | mnt_drop_write_file(parfilp); |
589 | break; | 586 | break; |
590 | default: | 587 | default: |
591 | ops[i].am_error = EINVAL; | 588 | ops[i].am_error = -EINVAL; |
592 | } | 589 | } |
593 | } | 590 | } |
594 | 591 | ||
595 | if (copy_to_user(am_hreq.ops, ops, size)) | 592 | if (copy_to_user(am_hreq.ops, ops, size)) |
596 | error = XFS_ERROR(EFAULT); | 593 | error = -EFAULT; |
597 | 594 | ||
598 | kfree(attr_name); | 595 | kfree(attr_name); |
599 | out_kfree_ops: | 596 | out_kfree_ops: |
600 | kfree(ops); | 597 | kfree(ops); |
601 | out_dput: | 598 | out_dput: |
602 | dput(dentry); | 599 | dput(dentry); |
603 | return -error; | 600 | return error; |
604 | } | 601 | } |
605 | 602 | ||
606 | int | 603 | int |
@@ -625,16 +622,16 @@ xfs_ioc_space( | |||
625 | */ | 622 | */ |
626 | if (!xfs_sb_version_hasextflgbit(&ip->i_mount->m_sb) && | 623 | if (!xfs_sb_version_hasextflgbit(&ip->i_mount->m_sb) && |
627 | !capable(CAP_SYS_ADMIN)) | 624 | !capable(CAP_SYS_ADMIN)) |
628 | return -XFS_ERROR(EPERM); | 625 | return -EPERM; |
629 | 626 | ||
630 | if (inode->i_flags & (S_IMMUTABLE|S_APPEND)) | 627 | if (inode->i_flags & (S_IMMUTABLE|S_APPEND)) |
631 | return -XFS_ERROR(EPERM); | 628 | return -EPERM; |
632 | 629 | ||
633 | if (!(filp->f_mode & FMODE_WRITE)) | 630 | if (!(filp->f_mode & FMODE_WRITE)) |
634 | return -XFS_ERROR(EBADF); | 631 | return -EBADF; |
635 | 632 | ||
636 | if (!S_ISREG(inode->i_mode)) | 633 | if (!S_ISREG(inode->i_mode)) |
637 | return -XFS_ERROR(EINVAL); | 634 | return -EINVAL; |
638 | 635 | ||
639 | error = mnt_want_write_file(filp); | 636 | error = mnt_want_write_file(filp); |
640 | if (error) | 637 | if (error) |
@@ -652,7 +649,7 @@ xfs_ioc_space( | |||
652 | bf->l_start += XFS_ISIZE(ip); | 649 | bf->l_start += XFS_ISIZE(ip); |
653 | break; | 650 | break; |
654 | default: | 651 | default: |
655 | error = XFS_ERROR(EINVAL); | 652 | error = -EINVAL; |
656 | goto out_unlock; | 653 | goto out_unlock; |
657 | } | 654 | } |
658 | 655 | ||
@@ -669,7 +666,7 @@ xfs_ioc_space( | |||
669 | case XFS_IOC_UNRESVSP: | 666 | case XFS_IOC_UNRESVSP: |
670 | case XFS_IOC_UNRESVSP64: | 667 | case XFS_IOC_UNRESVSP64: |
671 | if (bf->l_len <= 0) { | 668 | if (bf->l_len <= 0) { |
672 | error = XFS_ERROR(EINVAL); | 669 | error = -EINVAL; |
673 | goto out_unlock; | 670 | goto out_unlock; |
674 | } | 671 | } |
675 | break; | 672 | break; |
@@ -682,7 +679,7 @@ xfs_ioc_space( | |||
682 | bf->l_start > mp->m_super->s_maxbytes || | 679 | bf->l_start > mp->m_super->s_maxbytes || |
683 | bf->l_start + bf->l_len < 0 || | 680 | bf->l_start + bf->l_len < 0 || |
684 | bf->l_start + bf->l_len >= mp->m_super->s_maxbytes) { | 681 | bf->l_start + bf->l_len >= mp->m_super->s_maxbytes) { |
685 | error = XFS_ERROR(EINVAL); | 682 | error = -EINVAL; |
686 | goto out_unlock; | 683 | goto out_unlock; |
687 | } | 684 | } |
688 | 685 | ||
@@ -723,7 +720,7 @@ xfs_ioc_space( | |||
723 | break; | 720 | break; |
724 | default: | 721 | default: |
725 | ASSERT(0); | 722 | ASSERT(0); |
726 | error = XFS_ERROR(EINVAL); | 723 | error = -EINVAL; |
727 | } | 724 | } |
728 | 725 | ||
729 | if (error) | 726 | if (error) |
@@ -739,7 +736,7 @@ xfs_ioc_space( | |||
739 | xfs_ilock(ip, XFS_ILOCK_EXCL); | 736 | xfs_ilock(ip, XFS_ILOCK_EXCL); |
740 | xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL); | 737 | xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL); |
741 | 738 | ||
742 | if (!(ioflags & IO_INVIS)) { | 739 | if (!(ioflags & XFS_IO_INVIS)) { |
743 | ip->i_d.di_mode &= ~S_ISUID; | 740 | ip->i_d.di_mode &= ~S_ISUID; |
744 | if (ip->i_d.di_mode & S_IXGRP) | 741 | if (ip->i_d.di_mode & S_IXGRP) |
745 | ip->i_d.di_mode &= ~S_ISGID; | 742 | ip->i_d.di_mode &= ~S_ISGID; |
@@ -759,7 +756,7 @@ xfs_ioc_space( | |||
759 | out_unlock: | 756 | out_unlock: |
760 | xfs_iunlock(ip, XFS_IOLOCK_EXCL); | 757 | xfs_iunlock(ip, XFS_IOLOCK_EXCL); |
761 | mnt_drop_write_file(filp); | 758 | mnt_drop_write_file(filp); |
762 | return -error; | 759 | return error; |
763 | } | 760 | } |
764 | 761 | ||
765 | STATIC int | 762 | STATIC int |
@@ -781,41 +778,41 @@ xfs_ioc_bulkstat( | |||
781 | return -EPERM; | 778 | return -EPERM; |
782 | 779 | ||
783 | if (XFS_FORCED_SHUTDOWN(mp)) | 780 | if (XFS_FORCED_SHUTDOWN(mp)) |
784 | return -XFS_ERROR(EIO); | 781 | return -EIO; |
785 | 782 | ||
786 | if (copy_from_user(&bulkreq, arg, sizeof(xfs_fsop_bulkreq_t))) | 783 | if (copy_from_user(&bulkreq, arg, sizeof(xfs_fsop_bulkreq_t))) |
787 | return -XFS_ERROR(EFAULT); | 784 | return -EFAULT; |
788 | 785 | ||
789 | if (copy_from_user(&inlast, bulkreq.lastip, sizeof(__s64))) | 786 | if (copy_from_user(&inlast, bulkreq.lastip, sizeof(__s64))) |
790 | return -XFS_ERROR(EFAULT); | 787 | return -EFAULT; |
791 | 788 | ||
792 | if ((count = bulkreq.icount) <= 0) | 789 | if ((count = bulkreq.icount) <= 0) |
793 | return -XFS_ERROR(EINVAL); | 790 | return -EINVAL; |
794 | 791 | ||
795 | if (bulkreq.ubuffer == NULL) | 792 | if (bulkreq.ubuffer == NULL) |
796 | return -XFS_ERROR(EINVAL); | 793 | return -EINVAL; |
797 | 794 | ||
798 | if (cmd == XFS_IOC_FSINUMBERS) | 795 | if (cmd == XFS_IOC_FSINUMBERS) |
799 | error = xfs_inumbers(mp, &inlast, &count, | 796 | error = xfs_inumbers(mp, &inlast, &count, |
800 | bulkreq.ubuffer, xfs_inumbers_fmt); | 797 | bulkreq.ubuffer, xfs_inumbers_fmt); |
801 | else if (cmd == XFS_IOC_FSBULKSTAT_SINGLE) | 798 | else if (cmd == XFS_IOC_FSBULKSTAT_SINGLE) |
802 | error = xfs_bulkstat_single(mp, &inlast, | 799 | error = xfs_bulkstat_one(mp, inlast, bulkreq.ubuffer, |
803 | bulkreq.ubuffer, &done); | 800 | sizeof(xfs_bstat_t), NULL, &done); |
804 | else /* XFS_IOC_FSBULKSTAT */ | 801 | else /* XFS_IOC_FSBULKSTAT */ |
805 | error = xfs_bulkstat(mp, &inlast, &count, xfs_bulkstat_one, | 802 | error = xfs_bulkstat(mp, &inlast, &count, xfs_bulkstat_one, |
806 | sizeof(xfs_bstat_t), bulkreq.ubuffer, | 803 | sizeof(xfs_bstat_t), bulkreq.ubuffer, |
807 | &done); | 804 | &done); |
808 | 805 | ||
809 | if (error) | 806 | if (error) |
810 | return -error; | 807 | return error; |
811 | 808 | ||
812 | if (bulkreq.ocount != NULL) { | 809 | if (bulkreq.ocount != NULL) { |
813 | if (copy_to_user(bulkreq.lastip, &inlast, | 810 | if (copy_to_user(bulkreq.lastip, &inlast, |
814 | sizeof(xfs_ino_t))) | 811 | sizeof(xfs_ino_t))) |
815 | return -XFS_ERROR(EFAULT); | 812 | return -EFAULT; |
816 | 813 | ||
817 | if (copy_to_user(bulkreq.ocount, &count, sizeof(count))) | 814 | if (copy_to_user(bulkreq.ocount, &count, sizeof(count))) |
818 | return -XFS_ERROR(EFAULT); | 815 | return -EFAULT; |
819 | } | 816 | } |
820 | 817 | ||
821 | return 0; | 818 | return 0; |
@@ -831,7 +828,7 @@ xfs_ioc_fsgeometry_v1( | |||
831 | 828 | ||
832 | error = xfs_fs_geometry(mp, &fsgeo, 3); | 829 | error = xfs_fs_geometry(mp, &fsgeo, 3); |
833 | if (error) | 830 | if (error) |
834 | return -error; | 831 | return error; |
835 | 832 | ||
836 | /* | 833 | /* |
837 | * Caller should have passed an argument of type | 834 | * Caller should have passed an argument of type |
@@ -839,7 +836,7 @@ xfs_ioc_fsgeometry_v1( | |||
839 | * xfs_fsop_geom_t that xfs_fs_geometry() fills in. | 836 | * xfs_fsop_geom_t that xfs_fs_geometry() fills in. |
840 | */ | 837 | */ |
841 | if (copy_to_user(arg, &fsgeo, sizeof(xfs_fsop_geom_v1_t))) | 838 | if (copy_to_user(arg, &fsgeo, sizeof(xfs_fsop_geom_v1_t))) |
842 | return -XFS_ERROR(EFAULT); | 839 | return -EFAULT; |
843 | return 0; | 840 | return 0; |
844 | } | 841 | } |
845 | 842 | ||
@@ -853,10 +850,10 @@ xfs_ioc_fsgeometry( | |||
853 | 850 | ||
854 | error = xfs_fs_geometry(mp, &fsgeo, 4); | 851 | error = xfs_fs_geometry(mp, &fsgeo, 4); |
855 | if (error) | 852 | if (error) |
856 | return -error; | 853 | return error; |
857 | 854 | ||
858 | if (copy_to_user(arg, &fsgeo, sizeof(fsgeo))) | 855 | if (copy_to_user(arg, &fsgeo, sizeof(fsgeo))) |
859 | return -XFS_ERROR(EFAULT); | 856 | return -EFAULT; |
860 | return 0; | 857 | return 0; |
861 | } | 858 | } |
862 | 859 | ||
@@ -1041,16 +1038,16 @@ xfs_ioctl_setattr( | |||
1041 | trace_xfs_ioctl_setattr(ip); | 1038 | trace_xfs_ioctl_setattr(ip); |
1042 | 1039 | ||
1043 | if (mp->m_flags & XFS_MOUNT_RDONLY) | 1040 | if (mp->m_flags & XFS_MOUNT_RDONLY) |
1044 | return XFS_ERROR(EROFS); | 1041 | return -EROFS; |
1045 | if (XFS_FORCED_SHUTDOWN(mp)) | 1042 | if (XFS_FORCED_SHUTDOWN(mp)) |
1046 | return XFS_ERROR(EIO); | 1043 | return -EIO; |
1047 | 1044 | ||
1048 | /* | 1045 | /* |
1049 | * Disallow 32bit project ids when projid32bit feature is not enabled. | 1046 | * Disallow 32bit project ids when projid32bit feature is not enabled. |
1050 | */ | 1047 | */ |
1051 | if ((mask & FSX_PROJID) && (fa->fsx_projid > (__uint16_t)-1) && | 1048 | if ((mask & FSX_PROJID) && (fa->fsx_projid > (__uint16_t)-1) && |
1052 | !xfs_sb_version_hasprojid32bit(&ip->i_mount->m_sb)) | 1049 | !xfs_sb_version_hasprojid32bit(&ip->i_mount->m_sb)) |
1053 | return XFS_ERROR(EINVAL); | 1050 | return -EINVAL; |
1054 | 1051 | ||
1055 | /* | 1052 | /* |
1056 | * If disk quotas is on, we make sure that the dquots do exist on disk, | 1053 | * If disk quotas is on, we make sure that the dquots do exist on disk, |
@@ -1088,7 +1085,7 @@ xfs_ioctl_setattr( | |||
1088 | * CAP_FSETID capability is applicable. | 1085 | * CAP_FSETID capability is applicable. |
1089 | */ | 1086 | */ |
1090 | if (!inode_owner_or_capable(VFS_I(ip))) { | 1087 | if (!inode_owner_or_capable(VFS_I(ip))) { |
1091 | code = XFS_ERROR(EPERM); | 1088 | code = -EPERM; |
1092 | goto error_return; | 1089 | goto error_return; |
1093 | } | 1090 | } |
1094 | 1091 | ||
@@ -1099,7 +1096,7 @@ xfs_ioctl_setattr( | |||
1099 | */ | 1096 | */ |
1100 | if (mask & FSX_PROJID) { | 1097 | if (mask & FSX_PROJID) { |
1101 | if (current_user_ns() != &init_user_ns) { | 1098 | if (current_user_ns() != &init_user_ns) { |
1102 | code = XFS_ERROR(EINVAL); | 1099 | code = -EINVAL; |
1103 | goto error_return; | 1100 | goto error_return; |
1104 | } | 1101 | } |
1105 | 1102 | ||
@@ -1122,7 +1119,7 @@ xfs_ioctl_setattr( | |||
1122 | if (ip->i_d.di_nextents && | 1119 | if (ip->i_d.di_nextents && |
1123 | ((ip->i_d.di_extsize << mp->m_sb.sb_blocklog) != | 1120 | ((ip->i_d.di_extsize << mp->m_sb.sb_blocklog) != |
1124 | fa->fsx_extsize)) { | 1121 | fa->fsx_extsize)) { |
1125 | code = XFS_ERROR(EINVAL); /* EFBIG? */ | 1122 | code = -EINVAL; /* EFBIG? */ |
1126 | goto error_return; | 1123 | goto error_return; |
1127 | } | 1124 | } |
1128 | 1125 | ||
@@ -1141,7 +1138,7 @@ xfs_ioctl_setattr( | |||
1141 | 1138 | ||
1142 | extsize_fsb = XFS_B_TO_FSB(mp, fa->fsx_extsize); | 1139 | extsize_fsb = XFS_B_TO_FSB(mp, fa->fsx_extsize); |
1143 | if (extsize_fsb > MAXEXTLEN) { | 1140 | if (extsize_fsb > MAXEXTLEN) { |
1144 | code = XFS_ERROR(EINVAL); | 1141 | code = -EINVAL; |
1145 | goto error_return; | 1142 | goto error_return; |
1146 | } | 1143 | } |
1147 | 1144 | ||
@@ -1153,13 +1150,13 @@ xfs_ioctl_setattr( | |||
1153 | } else { | 1150 | } else { |
1154 | size = mp->m_sb.sb_blocksize; | 1151 | size = mp->m_sb.sb_blocksize; |
1155 | if (extsize_fsb > mp->m_sb.sb_agblocks / 2) { | 1152 | if (extsize_fsb > mp->m_sb.sb_agblocks / 2) { |
1156 | code = XFS_ERROR(EINVAL); | 1153 | code = -EINVAL; |
1157 | goto error_return; | 1154 | goto error_return; |
1158 | } | 1155 | } |
1159 | } | 1156 | } |
1160 | 1157 | ||
1161 | if (fa->fsx_extsize % size) { | 1158 | if (fa->fsx_extsize % size) { |
1162 | code = XFS_ERROR(EINVAL); | 1159 | code = -EINVAL; |
1163 | goto error_return; | 1160 | goto error_return; |
1164 | } | 1161 | } |
1165 | } | 1162 | } |
@@ -1173,7 +1170,7 @@ xfs_ioctl_setattr( | |||
1173 | if ((ip->i_d.di_nextents || ip->i_delayed_blks) && | 1170 | if ((ip->i_d.di_nextents || ip->i_delayed_blks) && |
1174 | (XFS_IS_REALTIME_INODE(ip)) != | 1171 | (XFS_IS_REALTIME_INODE(ip)) != |
1175 | (fa->fsx_xflags & XFS_XFLAG_REALTIME)) { | 1172 | (fa->fsx_xflags & XFS_XFLAG_REALTIME)) { |
1176 | code = XFS_ERROR(EINVAL); /* EFBIG? */ | 1173 | code = -EINVAL; /* EFBIG? */ |
1177 | goto error_return; | 1174 | goto error_return; |
1178 | } | 1175 | } |
1179 | 1176 | ||
@@ -1184,7 +1181,7 @@ xfs_ioctl_setattr( | |||
1184 | if ((mp->m_sb.sb_rblocks == 0) || | 1181 | if ((mp->m_sb.sb_rblocks == 0) || |
1185 | (mp->m_sb.sb_rextsize == 0) || | 1182 | (mp->m_sb.sb_rextsize == 0) || |
1186 | (ip->i_d.di_extsize % mp->m_sb.sb_rextsize)) { | 1183 | (ip->i_d.di_extsize % mp->m_sb.sb_rextsize)) { |
1187 | code = XFS_ERROR(EINVAL); | 1184 | code = -EINVAL; |
1188 | goto error_return; | 1185 | goto error_return; |
1189 | } | 1186 | } |
1190 | } | 1187 | } |
@@ -1198,7 +1195,7 @@ xfs_ioctl_setattr( | |||
1198 | (fa->fsx_xflags & | 1195 | (fa->fsx_xflags & |
1199 | (XFS_XFLAG_IMMUTABLE | XFS_XFLAG_APPEND))) && | 1196 | (XFS_XFLAG_IMMUTABLE | XFS_XFLAG_APPEND))) && |
1200 | !capable(CAP_LINUX_IMMUTABLE)) { | 1197 | !capable(CAP_LINUX_IMMUTABLE)) { |
1201 | code = XFS_ERROR(EPERM); | 1198 | code = -EPERM; |
1202 | goto error_return; | 1199 | goto error_return; |
1203 | } | 1200 | } |
1204 | } | 1201 | } |
@@ -1301,7 +1298,7 @@ xfs_ioc_fssetxattr( | |||
1301 | return error; | 1298 | return error; |
1302 | error = xfs_ioctl_setattr(ip, &fa, mask); | 1299 | error = xfs_ioctl_setattr(ip, &fa, mask); |
1303 | mnt_drop_write_file(filp); | 1300 | mnt_drop_write_file(filp); |
1304 | return -error; | 1301 | return error; |
1305 | } | 1302 | } |
1306 | 1303 | ||
1307 | STATIC int | 1304 | STATIC int |
@@ -1346,7 +1343,7 @@ xfs_ioc_setxflags( | |||
1346 | return error; | 1343 | return error; |
1347 | error = xfs_ioctl_setattr(ip, &fa, mask); | 1344 | error = xfs_ioctl_setattr(ip, &fa, mask); |
1348 | mnt_drop_write_file(filp); | 1345 | mnt_drop_write_file(filp); |
1349 | return -error; | 1346 | return error; |
1350 | } | 1347 | } |
1351 | 1348 | ||
1352 | STATIC int | 1349 | STATIC int |
@@ -1356,7 +1353,7 @@ xfs_getbmap_format(void **ap, struct getbmapx *bmv, int *full) | |||
1356 | 1353 | ||
1357 | /* copy only getbmap portion (not getbmapx) */ | 1354 | /* copy only getbmap portion (not getbmapx) */ |
1358 | if (copy_to_user(base, bmv, sizeof(struct getbmap))) | 1355 | if (copy_to_user(base, bmv, sizeof(struct getbmap))) |
1359 | return XFS_ERROR(EFAULT); | 1356 | return -EFAULT; |
1360 | 1357 | ||
1361 | *ap += sizeof(struct getbmap); | 1358 | *ap += sizeof(struct getbmap); |
1362 | return 0; | 1359 | return 0; |
@@ -1373,23 +1370,23 @@ xfs_ioc_getbmap( | |||
1373 | int error; | 1370 | int error; |
1374 | 1371 | ||
1375 | if (copy_from_user(&bmx, arg, sizeof(struct getbmapx))) | 1372 | if (copy_from_user(&bmx, arg, sizeof(struct getbmapx))) |
1376 | return -XFS_ERROR(EFAULT); | 1373 | return -EFAULT; |
1377 | 1374 | ||
1378 | if (bmx.bmv_count < 2) | 1375 | if (bmx.bmv_count < 2) |
1379 | return -XFS_ERROR(EINVAL); | 1376 | return -EINVAL; |
1380 | 1377 | ||
1381 | bmx.bmv_iflags = (cmd == XFS_IOC_GETBMAPA ? BMV_IF_ATTRFORK : 0); | 1378 | bmx.bmv_iflags = (cmd == XFS_IOC_GETBMAPA ? BMV_IF_ATTRFORK : 0); |
1382 | if (ioflags & IO_INVIS) | 1379 | if (ioflags & XFS_IO_INVIS) |
1383 | bmx.bmv_iflags |= BMV_IF_NO_DMAPI_READ; | 1380 | bmx.bmv_iflags |= BMV_IF_NO_DMAPI_READ; |
1384 | 1381 | ||
1385 | error = xfs_getbmap(ip, &bmx, xfs_getbmap_format, | 1382 | error = xfs_getbmap(ip, &bmx, xfs_getbmap_format, |
1386 | (struct getbmap *)arg+1); | 1383 | (struct getbmap *)arg+1); |
1387 | if (error) | 1384 | if (error) |
1388 | return -error; | 1385 | return error; |
1389 | 1386 | ||
1390 | /* copy back header - only size of getbmap */ | 1387 | /* copy back header - only size of getbmap */ |
1391 | if (copy_to_user(arg, &bmx, sizeof(struct getbmap))) | 1388 | if (copy_to_user(arg, &bmx, sizeof(struct getbmap))) |
1392 | return -XFS_ERROR(EFAULT); | 1389 | return -EFAULT; |
1393 | return 0; | 1390 | return 0; |
1394 | } | 1391 | } |
1395 | 1392 | ||
@@ -1399,7 +1396,7 @@ xfs_getbmapx_format(void **ap, struct getbmapx *bmv, int *full) | |||
1399 | struct getbmapx __user *base = *ap; | 1396 | struct getbmapx __user *base = *ap; |
1400 | 1397 | ||
1401 | if (copy_to_user(base, bmv, sizeof(struct getbmapx))) | 1398 | if (copy_to_user(base, bmv, sizeof(struct getbmapx))) |
1402 | return XFS_ERROR(EFAULT); | 1399 | return -EFAULT; |
1403 | 1400 | ||
1404 | *ap += sizeof(struct getbmapx); | 1401 | *ap += sizeof(struct getbmapx); |
1405 | return 0; | 1402 | return 0; |
@@ -1414,22 +1411,22 @@ xfs_ioc_getbmapx( | |||
1414 | int error; | 1411 | int error; |
1415 | 1412 | ||
1416 | if (copy_from_user(&bmx, arg, sizeof(bmx))) | 1413 | if (copy_from_user(&bmx, arg, sizeof(bmx))) |
1417 | return -XFS_ERROR(EFAULT); | 1414 | return -EFAULT; |
1418 | 1415 | ||
1419 | if (bmx.bmv_count < 2) | 1416 | if (bmx.bmv_count < 2) |
1420 | return -XFS_ERROR(EINVAL); | 1417 | return -EINVAL; |
1421 | 1418 | ||
1422 | if (bmx.bmv_iflags & (~BMV_IF_VALID)) | 1419 | if (bmx.bmv_iflags & (~BMV_IF_VALID)) |
1423 | return -XFS_ERROR(EINVAL); | 1420 | return -EINVAL; |
1424 | 1421 | ||
1425 | error = xfs_getbmap(ip, &bmx, xfs_getbmapx_format, | 1422 | error = xfs_getbmap(ip, &bmx, xfs_getbmapx_format, |
1426 | (struct getbmapx *)arg+1); | 1423 | (struct getbmapx *)arg+1); |
1427 | if (error) | 1424 | if (error) |
1428 | return -error; | 1425 | return error; |
1429 | 1426 | ||
1430 | /* copy back header */ | 1427 | /* copy back header */ |
1431 | if (copy_to_user(arg, &bmx, sizeof(struct getbmapx))) | 1428 | if (copy_to_user(arg, &bmx, sizeof(struct getbmapx))) |
1432 | return -XFS_ERROR(EFAULT); | 1429 | return -EFAULT; |
1433 | 1430 | ||
1434 | return 0; | 1431 | return 0; |
1435 | } | 1432 | } |
@@ -1445,33 +1442,33 @@ xfs_ioc_swapext( | |||
1445 | /* Pull information for the target fd */ | 1442 | /* Pull information for the target fd */ |
1446 | f = fdget((int)sxp->sx_fdtarget); | 1443 | f = fdget((int)sxp->sx_fdtarget); |
1447 | if (!f.file) { | 1444 | if (!f.file) { |
1448 | error = XFS_ERROR(EINVAL); | 1445 | error = -EINVAL; |
1449 | goto out; | 1446 | goto out; |
1450 | } | 1447 | } |
1451 | 1448 | ||
1452 | if (!(f.file->f_mode & FMODE_WRITE) || | 1449 | if (!(f.file->f_mode & FMODE_WRITE) || |
1453 | !(f.file->f_mode & FMODE_READ) || | 1450 | !(f.file->f_mode & FMODE_READ) || |
1454 | (f.file->f_flags & O_APPEND)) { | 1451 | (f.file->f_flags & O_APPEND)) { |
1455 | error = XFS_ERROR(EBADF); | 1452 | error = -EBADF; |
1456 | goto out_put_file; | 1453 | goto out_put_file; |
1457 | } | 1454 | } |
1458 | 1455 | ||
1459 | tmp = fdget((int)sxp->sx_fdtmp); | 1456 | tmp = fdget((int)sxp->sx_fdtmp); |
1460 | if (!tmp.file) { | 1457 | if (!tmp.file) { |
1461 | error = XFS_ERROR(EINVAL); | 1458 | error = -EINVAL; |
1462 | goto out_put_file; | 1459 | goto out_put_file; |
1463 | } | 1460 | } |
1464 | 1461 | ||
1465 | if (!(tmp.file->f_mode & FMODE_WRITE) || | 1462 | if (!(tmp.file->f_mode & FMODE_WRITE) || |
1466 | !(tmp.file->f_mode & FMODE_READ) || | 1463 | !(tmp.file->f_mode & FMODE_READ) || |
1467 | (tmp.file->f_flags & O_APPEND)) { | 1464 | (tmp.file->f_flags & O_APPEND)) { |
1468 | error = XFS_ERROR(EBADF); | 1465 | error = -EBADF; |
1469 | goto out_put_tmp_file; | 1466 | goto out_put_tmp_file; |
1470 | } | 1467 | } |
1471 | 1468 | ||
1472 | if (IS_SWAPFILE(file_inode(f.file)) || | 1469 | if (IS_SWAPFILE(file_inode(f.file)) || |
1473 | IS_SWAPFILE(file_inode(tmp.file))) { | 1470 | IS_SWAPFILE(file_inode(tmp.file))) { |
1474 | error = XFS_ERROR(EINVAL); | 1471 | error = -EINVAL; |
1475 | goto out_put_tmp_file; | 1472 | goto out_put_tmp_file; |
1476 | } | 1473 | } |
1477 | 1474 | ||
@@ -1479,17 +1476,17 @@ xfs_ioc_swapext( | |||
1479 | tip = XFS_I(file_inode(tmp.file)); | 1476 | tip = XFS_I(file_inode(tmp.file)); |
1480 | 1477 | ||
1481 | if (ip->i_mount != tip->i_mount) { | 1478 | if (ip->i_mount != tip->i_mount) { |
1482 | error = XFS_ERROR(EINVAL); | 1479 | error = -EINVAL; |
1483 | goto out_put_tmp_file; | 1480 | goto out_put_tmp_file; |
1484 | } | 1481 | } |
1485 | 1482 | ||
1486 | if (ip->i_ino == tip->i_ino) { | 1483 | if (ip->i_ino == tip->i_ino) { |
1487 | error = XFS_ERROR(EINVAL); | 1484 | error = -EINVAL; |
1488 | goto out_put_tmp_file; | 1485 | goto out_put_tmp_file; |
1489 | } | 1486 | } |
1490 | 1487 | ||
1491 | if (XFS_FORCED_SHUTDOWN(ip->i_mount)) { | 1488 | if (XFS_FORCED_SHUTDOWN(ip->i_mount)) { |
1492 | error = XFS_ERROR(EIO); | 1489 | error = -EIO; |
1493 | goto out_put_tmp_file; | 1490 | goto out_put_tmp_file; |
1494 | } | 1491 | } |
1495 | 1492 | ||
@@ -1523,7 +1520,7 @@ xfs_file_ioctl( | |||
1523 | int error; | 1520 | int error; |
1524 | 1521 | ||
1525 | if (filp->f_mode & FMODE_NOCMTIME) | 1522 | if (filp->f_mode & FMODE_NOCMTIME) |
1526 | ioflags |= IO_INVIS; | 1523 | ioflags |= XFS_IO_INVIS; |
1527 | 1524 | ||
1528 | trace_xfs_file_ioctl(ip); | 1525 | trace_xfs_file_ioctl(ip); |
1529 | 1526 | ||
@@ -1542,7 +1539,7 @@ xfs_file_ioctl( | |||
1542 | xfs_flock64_t bf; | 1539 | xfs_flock64_t bf; |
1543 | 1540 | ||
1544 | if (copy_from_user(&bf, arg, sizeof(bf))) | 1541 | if (copy_from_user(&bf, arg, sizeof(bf))) |
1545 | return -XFS_ERROR(EFAULT); | 1542 | return -EFAULT; |
1546 | return xfs_ioc_space(ip, inode, filp, ioflags, cmd, &bf); | 1543 | return xfs_ioc_space(ip, inode, filp, ioflags, cmd, &bf); |
1547 | } | 1544 | } |
1548 | case XFS_IOC_DIOINFO: { | 1545 | case XFS_IOC_DIOINFO: { |
@@ -1555,7 +1552,7 @@ xfs_file_ioctl( | |||
1555 | da.d_maxiosz = INT_MAX & ~(da.d_miniosz - 1); | 1552 | da.d_maxiosz = INT_MAX & ~(da.d_miniosz - 1); |
1556 | 1553 | ||
1557 | if (copy_to_user(arg, &da, sizeof(da))) | 1554 | if (copy_to_user(arg, &da, sizeof(da))) |
1558 | return -XFS_ERROR(EFAULT); | 1555 | return -EFAULT; |
1559 | return 0; | 1556 | return 0; |
1560 | } | 1557 | } |
1561 | 1558 | ||
@@ -1588,7 +1585,7 @@ xfs_file_ioctl( | |||
1588 | struct fsdmidata dmi; | 1585 | struct fsdmidata dmi; |
1589 | 1586 | ||
1590 | if (copy_from_user(&dmi, arg, sizeof(dmi))) | 1587 | if (copy_from_user(&dmi, arg, sizeof(dmi))) |
1591 | return -XFS_ERROR(EFAULT); | 1588 | return -EFAULT; |
1592 | 1589 | ||
1593 | error = mnt_want_write_file(filp); | 1590 | error = mnt_want_write_file(filp); |
1594 | if (error) | 1591 | if (error) |
@@ -1597,7 +1594,7 @@ xfs_file_ioctl( | |||
1597 | error = xfs_set_dmattrs(ip, dmi.fsd_dmevmask, | 1594 | error = xfs_set_dmattrs(ip, dmi.fsd_dmevmask, |
1598 | dmi.fsd_dmstate); | 1595 | dmi.fsd_dmstate); |
1599 | mnt_drop_write_file(filp); | 1596 | mnt_drop_write_file(filp); |
1600 | return -error; | 1597 | return error; |
1601 | } | 1598 | } |
1602 | 1599 | ||
1603 | case XFS_IOC_GETBMAP: | 1600 | case XFS_IOC_GETBMAP: |
@@ -1613,14 +1610,14 @@ xfs_file_ioctl( | |||
1613 | xfs_fsop_handlereq_t hreq; | 1610 | xfs_fsop_handlereq_t hreq; |
1614 | 1611 | ||
1615 | if (copy_from_user(&hreq, arg, sizeof(hreq))) | 1612 | if (copy_from_user(&hreq, arg, sizeof(hreq))) |
1616 | return -XFS_ERROR(EFAULT); | 1613 | return -EFAULT; |
1617 | return xfs_find_handle(cmd, &hreq); | 1614 | return xfs_find_handle(cmd, &hreq); |
1618 | } | 1615 | } |
1619 | case XFS_IOC_OPEN_BY_HANDLE: { | 1616 | case XFS_IOC_OPEN_BY_HANDLE: { |
1620 | xfs_fsop_handlereq_t hreq; | 1617 | xfs_fsop_handlereq_t hreq; |
1621 | 1618 | ||
1622 | if (copy_from_user(&hreq, arg, sizeof(xfs_fsop_handlereq_t))) | 1619 | if (copy_from_user(&hreq, arg, sizeof(xfs_fsop_handlereq_t))) |
1623 | return -XFS_ERROR(EFAULT); | 1620 | return -EFAULT; |
1624 | return xfs_open_by_handle(filp, &hreq); | 1621 | return xfs_open_by_handle(filp, &hreq); |
1625 | } | 1622 | } |
1626 | case XFS_IOC_FSSETDM_BY_HANDLE: | 1623 | case XFS_IOC_FSSETDM_BY_HANDLE: |
@@ -1630,7 +1627,7 @@ xfs_file_ioctl( | |||
1630 | xfs_fsop_handlereq_t hreq; | 1627 | xfs_fsop_handlereq_t hreq; |
1631 | 1628 | ||
1632 | if (copy_from_user(&hreq, arg, sizeof(xfs_fsop_handlereq_t))) | 1629 | if (copy_from_user(&hreq, arg, sizeof(xfs_fsop_handlereq_t))) |
1633 | return -XFS_ERROR(EFAULT); | 1630 | return -EFAULT; |
1634 | return xfs_readlink_by_handle(filp, &hreq); | 1631 | return xfs_readlink_by_handle(filp, &hreq); |
1635 | } | 1632 | } |
1636 | case XFS_IOC_ATTRLIST_BY_HANDLE: | 1633 | case XFS_IOC_ATTRLIST_BY_HANDLE: |
@@ -1643,13 +1640,13 @@ xfs_file_ioctl( | |||
1643 | struct xfs_swapext sxp; | 1640 | struct xfs_swapext sxp; |
1644 | 1641 | ||
1645 | if (copy_from_user(&sxp, arg, sizeof(xfs_swapext_t))) | 1642 | if (copy_from_user(&sxp, arg, sizeof(xfs_swapext_t))) |
1646 | return -XFS_ERROR(EFAULT); | 1643 | return -EFAULT; |
1647 | error = mnt_want_write_file(filp); | 1644 | error = mnt_want_write_file(filp); |
1648 | if (error) | 1645 | if (error) |
1649 | return error; | 1646 | return error; |
1650 | error = xfs_ioc_swapext(&sxp); | 1647 | error = xfs_ioc_swapext(&sxp); |
1651 | mnt_drop_write_file(filp); | 1648 | mnt_drop_write_file(filp); |
1652 | return -error; | 1649 | return error; |
1653 | } | 1650 | } |
1654 | 1651 | ||
1655 | case XFS_IOC_FSCOUNTS: { | 1652 | case XFS_IOC_FSCOUNTS: { |
@@ -1657,10 +1654,10 @@ xfs_file_ioctl( | |||
1657 | 1654 | ||
1658 | error = xfs_fs_counts(mp, &out); | 1655 | error = xfs_fs_counts(mp, &out); |
1659 | if (error) | 1656 | if (error) |
1660 | return -error; | 1657 | return error; |
1661 | 1658 | ||
1662 | if (copy_to_user(arg, &out, sizeof(out))) | 1659 | if (copy_to_user(arg, &out, sizeof(out))) |
1663 | return -XFS_ERROR(EFAULT); | 1660 | return -EFAULT; |
1664 | return 0; | 1661 | return 0; |
1665 | } | 1662 | } |
1666 | 1663 | ||
@@ -1672,10 +1669,10 @@ xfs_file_ioctl( | |||
1672 | return -EPERM; | 1669 | return -EPERM; |
1673 | 1670 | ||
1674 | if (mp->m_flags & XFS_MOUNT_RDONLY) | 1671 | if (mp->m_flags & XFS_MOUNT_RDONLY) |
1675 | return -XFS_ERROR(EROFS); | 1672 | return -EROFS; |
1676 | 1673 | ||
1677 | if (copy_from_user(&inout, arg, sizeof(inout))) | 1674 | if (copy_from_user(&inout, arg, sizeof(inout))) |
1678 | return -XFS_ERROR(EFAULT); | 1675 | return -EFAULT; |
1679 | 1676 | ||
1680 | error = mnt_want_write_file(filp); | 1677 | error = mnt_want_write_file(filp); |
1681 | if (error) | 1678 | if (error) |
@@ -1686,10 +1683,10 @@ xfs_file_ioctl( | |||
1686 | error = xfs_reserve_blocks(mp, &in, &inout); | 1683 | error = xfs_reserve_blocks(mp, &in, &inout); |
1687 | mnt_drop_write_file(filp); | 1684 | mnt_drop_write_file(filp); |
1688 | if (error) | 1685 | if (error) |
1689 | return -error; | 1686 | return error; |
1690 | 1687 | ||
1691 | if (copy_to_user(arg, &inout, sizeof(inout))) | 1688 | if (copy_to_user(arg, &inout, sizeof(inout))) |
1692 | return -XFS_ERROR(EFAULT); | 1689 | return -EFAULT; |
1693 | return 0; | 1690 | return 0; |
1694 | } | 1691 | } |
1695 | 1692 | ||
@@ -1701,10 +1698,10 @@ xfs_file_ioctl( | |||
1701 | 1698 | ||
1702 | error = xfs_reserve_blocks(mp, NULL, &out); | 1699 | error = xfs_reserve_blocks(mp, NULL, &out); |
1703 | if (error) | 1700 | if (error) |
1704 | return -error; | 1701 | return error; |
1705 | 1702 | ||
1706 | if (copy_to_user(arg, &out, sizeof(out))) | 1703 | if (copy_to_user(arg, &out, sizeof(out))) |
1707 | return -XFS_ERROR(EFAULT); | 1704 | return -EFAULT; |
1708 | 1705 | ||
1709 | return 0; | 1706 | return 0; |
1710 | } | 1707 | } |
@@ -1713,42 +1710,42 @@ xfs_file_ioctl( | |||
1713 | xfs_growfs_data_t in; | 1710 | xfs_growfs_data_t in; |
1714 | 1711 | ||
1715 | if (copy_from_user(&in, arg, sizeof(in))) | 1712 | if (copy_from_user(&in, arg, sizeof(in))) |
1716 | return -XFS_ERROR(EFAULT); | 1713 | return -EFAULT; |
1717 | 1714 | ||
1718 | error = mnt_want_write_file(filp); | 1715 | error = mnt_want_write_file(filp); |
1719 | if (error) | 1716 | if (error) |
1720 | return error; | 1717 | return error; |
1721 | error = xfs_growfs_data(mp, &in); | 1718 | error = xfs_growfs_data(mp, &in); |
1722 | mnt_drop_write_file(filp); | 1719 | mnt_drop_write_file(filp); |
1723 | return -error; | 1720 | return error; |
1724 | } | 1721 | } |
1725 | 1722 | ||
1726 | case XFS_IOC_FSGROWFSLOG: { | 1723 | case XFS_IOC_FSGROWFSLOG: { |
1727 | xfs_growfs_log_t in; | 1724 | xfs_growfs_log_t in; |
1728 | 1725 | ||
1729 | if (copy_from_user(&in, arg, sizeof(in))) | 1726 | if (copy_from_user(&in, arg, sizeof(in))) |
1730 | return -XFS_ERROR(EFAULT); | 1727 | return -EFAULT; |
1731 | 1728 | ||
1732 | error = mnt_want_write_file(filp); | 1729 | error = mnt_want_write_file(filp); |
1733 | if (error) | 1730 | if (error) |
1734 | return error; | 1731 | return error; |
1735 | error = xfs_growfs_log(mp, &in); | 1732 | error = xfs_growfs_log(mp, &in); |
1736 | mnt_drop_write_file(filp); | 1733 | mnt_drop_write_file(filp); |
1737 | return -error; | 1734 | return error; |
1738 | } | 1735 | } |
1739 | 1736 | ||
1740 | case XFS_IOC_FSGROWFSRT: { | 1737 | case XFS_IOC_FSGROWFSRT: { |
1741 | xfs_growfs_rt_t in; | 1738 | xfs_growfs_rt_t in; |
1742 | 1739 | ||
1743 | if (copy_from_user(&in, arg, sizeof(in))) | 1740 | if (copy_from_user(&in, arg, sizeof(in))) |
1744 | return -XFS_ERROR(EFAULT); | 1741 | return -EFAULT; |
1745 | 1742 | ||
1746 | error = mnt_want_write_file(filp); | 1743 | error = mnt_want_write_file(filp); |
1747 | if (error) | 1744 | if (error) |
1748 | return error; | 1745 | return error; |
1749 | error = xfs_growfs_rt(mp, &in); | 1746 | error = xfs_growfs_rt(mp, &in); |
1750 | mnt_drop_write_file(filp); | 1747 | mnt_drop_write_file(filp); |
1751 | return -error; | 1748 | return error; |
1752 | } | 1749 | } |
1753 | 1750 | ||
1754 | case XFS_IOC_GOINGDOWN: { | 1751 | case XFS_IOC_GOINGDOWN: { |
@@ -1758,10 +1755,9 @@ xfs_file_ioctl( | |||
1758 | return -EPERM; | 1755 | return -EPERM; |
1759 | 1756 | ||
1760 | if (get_user(in, (__uint32_t __user *)arg)) | 1757 | if (get_user(in, (__uint32_t __user *)arg)) |
1761 | return -XFS_ERROR(EFAULT); | 1758 | return -EFAULT; |
1762 | 1759 | ||
1763 | error = xfs_fs_goingdown(mp, in); | 1760 | return xfs_fs_goingdown(mp, in); |
1764 | return -error; | ||
1765 | } | 1761 | } |
1766 | 1762 | ||
1767 | case XFS_IOC_ERROR_INJECTION: { | 1763 | case XFS_IOC_ERROR_INJECTION: { |
@@ -1771,18 +1767,16 @@ xfs_file_ioctl( | |||
1771 | return -EPERM; | 1767 | return -EPERM; |
1772 | 1768 | ||
1773 | if (copy_from_user(&in, arg, sizeof(in))) | 1769 | if (copy_from_user(&in, arg, sizeof(in))) |
1774 | return -XFS_ERROR(EFAULT); | 1770 | return -EFAULT; |
1775 | 1771 | ||
1776 | error = xfs_errortag_add(in.errtag, mp); | 1772 | return xfs_errortag_add(in.errtag, mp); |
1777 | return -error; | ||
1778 | } | 1773 | } |
1779 | 1774 | ||
1780 | case XFS_IOC_ERROR_CLEARALL: | 1775 | case XFS_IOC_ERROR_CLEARALL: |
1781 | if (!capable(CAP_SYS_ADMIN)) | 1776 | if (!capable(CAP_SYS_ADMIN)) |
1782 | return -EPERM; | 1777 | return -EPERM; |
1783 | 1778 | ||
1784 | error = xfs_errortag_clearall(mp, 1); | 1779 | return xfs_errortag_clearall(mp, 1); |
1785 | return -error; | ||
1786 | 1780 | ||
1787 | case XFS_IOC_FREE_EOFBLOCKS: { | 1781 | case XFS_IOC_FREE_EOFBLOCKS: { |
1788 | struct xfs_fs_eofblocks eofb; | 1782 | struct xfs_fs_eofblocks eofb; |
@@ -1792,16 +1786,16 @@ xfs_file_ioctl( | |||
1792 | return -EPERM; | 1786 | return -EPERM; |
1793 | 1787 | ||
1794 | if (mp->m_flags & XFS_MOUNT_RDONLY) | 1788 | if (mp->m_flags & XFS_MOUNT_RDONLY) |
1795 | return -XFS_ERROR(EROFS); | 1789 | return -EROFS; |
1796 | 1790 | ||
1797 | if (copy_from_user(&eofb, arg, sizeof(eofb))) | 1791 | if (copy_from_user(&eofb, arg, sizeof(eofb))) |
1798 | return -XFS_ERROR(EFAULT); | 1792 | return -EFAULT; |
1799 | 1793 | ||
1800 | error = xfs_fs_eofblocks_from_user(&eofb, &keofb); | 1794 | error = xfs_fs_eofblocks_from_user(&eofb, &keofb); |
1801 | if (error) | 1795 | if (error) |
1802 | return -error; | 1796 | return error; |
1803 | 1797 | ||
1804 | return -xfs_icache_free_eofblocks(mp, &keofb); | 1798 | return xfs_icache_free_eofblocks(mp, &keofb); |
1805 | } | 1799 | } |
1806 | 1800 | ||
1807 | default: | 1801 | default: |
diff --git a/fs/xfs/xfs_ioctl32.c b/fs/xfs/xfs_ioctl32.c index 944d5baa710a..a554646ff141 100644 --- a/fs/xfs/xfs_ioctl32.c +++ b/fs/xfs/xfs_ioctl32.c | |||
@@ -28,7 +28,6 @@ | |||
28 | #include "xfs_sb.h" | 28 | #include "xfs_sb.h" |
29 | #include "xfs_ag.h" | 29 | #include "xfs_ag.h" |
30 | #include "xfs_mount.h" | 30 | #include "xfs_mount.h" |
31 | #include "xfs_vnode.h" | ||
32 | #include "xfs_inode.h" | 31 | #include "xfs_inode.h" |
33 | #include "xfs_itable.h" | 32 | #include "xfs_itable.h" |
34 | #include "xfs_error.h" | 33 | #include "xfs_error.h" |
@@ -56,7 +55,7 @@ xfs_compat_flock64_copyin( | |||
56 | get_user(bf->l_sysid, &arg32->l_sysid) || | 55 | get_user(bf->l_sysid, &arg32->l_sysid) || |
57 | get_user(bf->l_pid, &arg32->l_pid) || | 56 | get_user(bf->l_pid, &arg32->l_pid) || |
58 | copy_from_user(bf->l_pad, &arg32->l_pad, 4*sizeof(u32))) | 57 | copy_from_user(bf->l_pad, &arg32->l_pad, 4*sizeof(u32))) |
59 | return -XFS_ERROR(EFAULT); | 58 | return -EFAULT; |
60 | return 0; | 59 | return 0; |
61 | } | 60 | } |
62 | 61 | ||
@@ -70,10 +69,10 @@ xfs_compat_ioc_fsgeometry_v1( | |||
70 | 69 | ||
71 | error = xfs_fs_geometry(mp, &fsgeo, 3); | 70 | error = xfs_fs_geometry(mp, &fsgeo, 3); |
72 | if (error) | 71 | if (error) |
73 | return -error; | 72 | return error; |
74 | /* The 32-bit variant simply has some padding at the end */ | 73 | /* The 32-bit variant simply has some padding at the end */ |
75 | if (copy_to_user(arg32, &fsgeo, sizeof(struct compat_xfs_fsop_geom_v1))) | 74 | if (copy_to_user(arg32, &fsgeo, sizeof(struct compat_xfs_fsop_geom_v1))) |
76 | return -XFS_ERROR(EFAULT); | 75 | return -EFAULT; |
77 | return 0; | 76 | return 0; |
78 | } | 77 | } |
79 | 78 | ||
@@ -84,7 +83,7 @@ xfs_compat_growfs_data_copyin( | |||
84 | { | 83 | { |
85 | if (get_user(in->newblocks, &arg32->newblocks) || | 84 | if (get_user(in->newblocks, &arg32->newblocks) || |
86 | get_user(in->imaxpct, &arg32->imaxpct)) | 85 | get_user(in->imaxpct, &arg32->imaxpct)) |
87 | return -XFS_ERROR(EFAULT); | 86 | return -EFAULT; |
88 | return 0; | 87 | return 0; |
89 | } | 88 | } |
90 | 89 | ||
@@ -95,14 +94,14 @@ xfs_compat_growfs_rt_copyin( | |||
95 | { | 94 | { |
96 | if (get_user(in->newblocks, &arg32->newblocks) || | 95 | if (get_user(in->newblocks, &arg32->newblocks) || |
97 | get_user(in->extsize, &arg32->extsize)) | 96 | get_user(in->extsize, &arg32->extsize)) |
98 | return -XFS_ERROR(EFAULT); | 97 | return -EFAULT; |
99 | return 0; | 98 | return 0; |
100 | } | 99 | } |
101 | 100 | ||
102 | STATIC int | 101 | STATIC int |
103 | xfs_inumbers_fmt_compat( | 102 | xfs_inumbers_fmt_compat( |
104 | void __user *ubuffer, | 103 | void __user *ubuffer, |
105 | const xfs_inogrp_t *buffer, | 104 | const struct xfs_inogrp *buffer, |
106 | long count, | 105 | long count, |
107 | long *written) | 106 | long *written) |
108 | { | 107 | { |
@@ -113,7 +112,7 @@ xfs_inumbers_fmt_compat( | |||
113 | if (put_user(buffer[i].xi_startino, &p32[i].xi_startino) || | 112 | if (put_user(buffer[i].xi_startino, &p32[i].xi_startino) || |
114 | put_user(buffer[i].xi_alloccount, &p32[i].xi_alloccount) || | 113 | put_user(buffer[i].xi_alloccount, &p32[i].xi_alloccount) || |
115 | put_user(buffer[i].xi_allocmask, &p32[i].xi_allocmask)) | 114 | put_user(buffer[i].xi_allocmask, &p32[i].xi_allocmask)) |
116 | return -XFS_ERROR(EFAULT); | 115 | return -EFAULT; |
117 | } | 116 | } |
118 | *written = count * sizeof(*p32); | 117 | *written = count * sizeof(*p32); |
119 | return 0; | 118 | return 0; |
@@ -132,7 +131,7 @@ xfs_ioctl32_bstime_copyin( | |||
132 | 131 | ||
133 | if (get_user(sec32, &bstime32->tv_sec) || | 132 | if (get_user(sec32, &bstime32->tv_sec) || |
134 | get_user(bstime->tv_nsec, &bstime32->tv_nsec)) | 133 | get_user(bstime->tv_nsec, &bstime32->tv_nsec)) |
135 | return -XFS_ERROR(EFAULT); | 134 | return -EFAULT; |
136 | bstime->tv_sec = sec32; | 135 | bstime->tv_sec = sec32; |
137 | return 0; | 136 | return 0; |
138 | } | 137 | } |
@@ -164,7 +163,7 @@ xfs_ioctl32_bstat_copyin( | |||
164 | get_user(bstat->bs_dmevmask, &bstat32->bs_dmevmask) || | 163 | get_user(bstat->bs_dmevmask, &bstat32->bs_dmevmask) || |
165 | get_user(bstat->bs_dmstate, &bstat32->bs_dmstate) || | 164 | get_user(bstat->bs_dmstate, &bstat32->bs_dmstate) || |
166 | get_user(bstat->bs_aextents, &bstat32->bs_aextents)) | 165 | get_user(bstat->bs_aextents, &bstat32->bs_aextents)) |
167 | return -XFS_ERROR(EFAULT); | 166 | return -EFAULT; |
168 | return 0; | 167 | return 0; |
169 | } | 168 | } |
170 | 169 | ||
@@ -180,7 +179,7 @@ xfs_bstime_store_compat( | |||
180 | sec32 = p->tv_sec; | 179 | sec32 = p->tv_sec; |
181 | if (put_user(sec32, &p32->tv_sec) || | 180 | if (put_user(sec32, &p32->tv_sec) || |
182 | put_user(p->tv_nsec, &p32->tv_nsec)) | 181 | put_user(p->tv_nsec, &p32->tv_nsec)) |
183 | return -XFS_ERROR(EFAULT); | 182 | return -EFAULT; |
184 | return 0; | 183 | return 0; |
185 | } | 184 | } |
186 | 185 | ||
@@ -195,7 +194,7 @@ xfs_bulkstat_one_fmt_compat( | |||
195 | compat_xfs_bstat_t __user *p32 = ubuffer; | 194 | compat_xfs_bstat_t __user *p32 = ubuffer; |
196 | 195 | ||
197 | if (ubsize < sizeof(*p32)) | 196 | if (ubsize < sizeof(*p32)) |
198 | return XFS_ERROR(ENOMEM); | 197 | return -ENOMEM; |
199 | 198 | ||
200 | if (put_user(buffer->bs_ino, &p32->bs_ino) || | 199 | if (put_user(buffer->bs_ino, &p32->bs_ino) || |
201 | put_user(buffer->bs_mode, &p32->bs_mode) || | 200 | put_user(buffer->bs_mode, &p32->bs_mode) || |
@@ -218,7 +217,7 @@ xfs_bulkstat_one_fmt_compat( | |||
218 | put_user(buffer->bs_dmevmask, &p32->bs_dmevmask) || | 217 | put_user(buffer->bs_dmevmask, &p32->bs_dmevmask) || |
219 | put_user(buffer->bs_dmstate, &p32->bs_dmstate) || | 218 | put_user(buffer->bs_dmstate, &p32->bs_dmstate) || |
220 | put_user(buffer->bs_aextents, &p32->bs_aextents)) | 219 | put_user(buffer->bs_aextents, &p32->bs_aextents)) |
221 | return XFS_ERROR(EFAULT); | 220 | return -EFAULT; |
222 | if (ubused) | 221 | if (ubused) |
223 | *ubused = sizeof(*p32); | 222 | *ubused = sizeof(*p32); |
224 | return 0; | 223 | return 0; |
@@ -256,30 +255,30 @@ xfs_compat_ioc_bulkstat( | |||
256 | /* should be called again (unused here, but used in dmapi) */ | 255 | /* should be called again (unused here, but used in dmapi) */ |
257 | 256 | ||
258 | if (!capable(CAP_SYS_ADMIN)) | 257 | if (!capable(CAP_SYS_ADMIN)) |
259 | return -XFS_ERROR(EPERM); | 258 | return -EPERM; |
260 | 259 | ||
261 | if (XFS_FORCED_SHUTDOWN(mp)) | 260 | if (XFS_FORCED_SHUTDOWN(mp)) |
262 | return -XFS_ERROR(EIO); | 261 | return -EIO; |
263 | 262 | ||
264 | if (get_user(addr, &p32->lastip)) | 263 | if (get_user(addr, &p32->lastip)) |
265 | return -XFS_ERROR(EFAULT); | 264 | return -EFAULT; |
266 | bulkreq.lastip = compat_ptr(addr); | 265 | bulkreq.lastip = compat_ptr(addr); |
267 | if (get_user(bulkreq.icount, &p32->icount) || | 266 | if (get_user(bulkreq.icount, &p32->icount) || |
268 | get_user(addr, &p32->ubuffer)) | 267 | get_user(addr, &p32->ubuffer)) |
269 | return -XFS_ERROR(EFAULT); | 268 | return -EFAULT; |
270 | bulkreq.ubuffer = compat_ptr(addr); | 269 | bulkreq.ubuffer = compat_ptr(addr); |
271 | if (get_user(addr, &p32->ocount)) | 270 | if (get_user(addr, &p32->ocount)) |
272 | return -XFS_ERROR(EFAULT); | 271 | return -EFAULT; |
273 | bulkreq.ocount = compat_ptr(addr); | 272 | bulkreq.ocount = compat_ptr(addr); |
274 | 273 | ||
275 | if (copy_from_user(&inlast, bulkreq.lastip, sizeof(__s64))) | 274 | if (copy_from_user(&inlast, bulkreq.lastip, sizeof(__s64))) |
276 | return -XFS_ERROR(EFAULT); | 275 | return -EFAULT; |
277 | 276 | ||
278 | if ((count = bulkreq.icount) <= 0) | 277 | if ((count = bulkreq.icount) <= 0) |
279 | return -XFS_ERROR(EINVAL); | 278 | return -EINVAL; |
280 | 279 | ||
281 | if (bulkreq.ubuffer == NULL) | 280 | if (bulkreq.ubuffer == NULL) |
282 | return -XFS_ERROR(EINVAL); | 281 | return -EINVAL; |
283 | 282 | ||
284 | if (cmd == XFS_IOC_FSINUMBERS_32) { | 283 | if (cmd == XFS_IOC_FSINUMBERS_32) { |
285 | error = xfs_inumbers(mp, &inlast, &count, | 284 | error = xfs_inumbers(mp, &inlast, &count, |
@@ -294,17 +293,17 @@ xfs_compat_ioc_bulkstat( | |||
294 | xfs_bulkstat_one_compat, sizeof(compat_xfs_bstat_t), | 293 | xfs_bulkstat_one_compat, sizeof(compat_xfs_bstat_t), |
295 | bulkreq.ubuffer, &done); | 294 | bulkreq.ubuffer, &done); |
296 | } else | 295 | } else |
297 | error = XFS_ERROR(EINVAL); | 296 | error = -EINVAL; |
298 | if (error) | 297 | if (error) |
299 | return -error; | 298 | return error; |
300 | 299 | ||
301 | if (bulkreq.ocount != NULL) { | 300 | if (bulkreq.ocount != NULL) { |
302 | if (copy_to_user(bulkreq.lastip, &inlast, | 301 | if (copy_to_user(bulkreq.lastip, &inlast, |
303 | sizeof(xfs_ino_t))) | 302 | sizeof(xfs_ino_t))) |
304 | return -XFS_ERROR(EFAULT); | 303 | return -EFAULT; |
305 | 304 | ||
306 | if (copy_to_user(bulkreq.ocount, &count, sizeof(count))) | 305 | if (copy_to_user(bulkreq.ocount, &count, sizeof(count))) |
307 | return -XFS_ERROR(EFAULT); | 306 | return -EFAULT; |
308 | } | 307 | } |
309 | 308 | ||
310 | return 0; | 309 | return 0; |
@@ -318,7 +317,7 @@ xfs_compat_handlereq_copyin( | |||
318 | compat_xfs_fsop_handlereq_t hreq32; | 317 | compat_xfs_fsop_handlereq_t hreq32; |
319 | 318 | ||
320 | if (copy_from_user(&hreq32, arg32, sizeof(compat_xfs_fsop_handlereq_t))) | 319 | if (copy_from_user(&hreq32, arg32, sizeof(compat_xfs_fsop_handlereq_t))) |
321 | return -XFS_ERROR(EFAULT); | 320 | return -EFAULT; |
322 | 321 | ||
323 | hreq->fd = hreq32.fd; | 322 | hreq->fd = hreq32.fd; |
324 | hreq->path = compat_ptr(hreq32.path); | 323 | hreq->path = compat_ptr(hreq32.path); |
@@ -352,19 +351,19 @@ xfs_compat_attrlist_by_handle( | |||
352 | char *kbuf; | 351 | char *kbuf; |
353 | 352 | ||
354 | if (!capable(CAP_SYS_ADMIN)) | 353 | if (!capable(CAP_SYS_ADMIN)) |
355 | return -XFS_ERROR(EPERM); | 354 | return -EPERM; |
356 | if (copy_from_user(&al_hreq, arg, | 355 | if (copy_from_user(&al_hreq, arg, |
357 | sizeof(compat_xfs_fsop_attrlist_handlereq_t))) | 356 | sizeof(compat_xfs_fsop_attrlist_handlereq_t))) |
358 | return -XFS_ERROR(EFAULT); | 357 | return -EFAULT; |
359 | if (al_hreq.buflen < sizeof(struct attrlist) || | 358 | if (al_hreq.buflen < sizeof(struct attrlist) || |
360 | al_hreq.buflen > XATTR_LIST_MAX) | 359 | al_hreq.buflen > XATTR_LIST_MAX) |
361 | return -XFS_ERROR(EINVAL); | 360 | return -EINVAL; |
362 | 361 | ||
363 | /* | 362 | /* |
364 | * Reject flags, only allow namespaces. | 363 | * Reject flags, only allow namespaces. |
365 | */ | 364 | */ |
366 | if (al_hreq.flags & ~(ATTR_ROOT | ATTR_SECURE)) | 365 | if (al_hreq.flags & ~(ATTR_ROOT | ATTR_SECURE)) |
367 | return -XFS_ERROR(EINVAL); | 366 | return -EINVAL; |
368 | 367 | ||
369 | dentry = xfs_compat_handlereq_to_dentry(parfilp, &al_hreq.hreq); | 368 | dentry = xfs_compat_handlereq_to_dentry(parfilp, &al_hreq.hreq); |
370 | if (IS_ERR(dentry)) | 369 | if (IS_ERR(dentry)) |
@@ -376,7 +375,7 @@ xfs_compat_attrlist_by_handle( | |||
376 | goto out_dput; | 375 | goto out_dput; |
377 | 376 | ||
378 | cursor = (attrlist_cursor_kern_t *)&al_hreq.pos; | 377 | cursor = (attrlist_cursor_kern_t *)&al_hreq.pos; |
379 | error = -xfs_attr_list(XFS_I(dentry->d_inode), kbuf, al_hreq.buflen, | 378 | error = xfs_attr_list(XFS_I(dentry->d_inode), kbuf, al_hreq.buflen, |
380 | al_hreq.flags, cursor); | 379 | al_hreq.flags, cursor); |
381 | if (error) | 380 | if (error) |
382 | goto out_kfree; | 381 | goto out_kfree; |
@@ -404,10 +403,10 @@ xfs_compat_attrmulti_by_handle( | |||
404 | unsigned char *attr_name; | 403 | unsigned char *attr_name; |
405 | 404 | ||
406 | if (!capable(CAP_SYS_ADMIN)) | 405 | if (!capable(CAP_SYS_ADMIN)) |
407 | return -XFS_ERROR(EPERM); | 406 | return -EPERM; |
408 | if (copy_from_user(&am_hreq, arg, | 407 | if (copy_from_user(&am_hreq, arg, |
409 | sizeof(compat_xfs_fsop_attrmulti_handlereq_t))) | 408 | sizeof(compat_xfs_fsop_attrmulti_handlereq_t))) |
410 | return -XFS_ERROR(EFAULT); | 409 | return -EFAULT; |
411 | 410 | ||
412 | /* overflow check */ | 411 | /* overflow check */ |
413 | if (am_hreq.opcount >= INT_MAX / sizeof(compat_xfs_attr_multiop_t)) | 412 | if (am_hreq.opcount >= INT_MAX / sizeof(compat_xfs_attr_multiop_t)) |
@@ -417,7 +416,7 @@ xfs_compat_attrmulti_by_handle( | |||
417 | if (IS_ERR(dentry)) | 416 | if (IS_ERR(dentry)) |
418 | return PTR_ERR(dentry); | 417 | return PTR_ERR(dentry); |
419 | 418 | ||
420 | error = E2BIG; | 419 | error = -E2BIG; |
421 | size = am_hreq.opcount * sizeof(compat_xfs_attr_multiop_t); | 420 | size = am_hreq.opcount * sizeof(compat_xfs_attr_multiop_t); |
422 | if (!size || size > 16 * PAGE_SIZE) | 421 | if (!size || size > 16 * PAGE_SIZE) |
423 | goto out_dput; | 422 | goto out_dput; |
@@ -428,7 +427,7 @@ xfs_compat_attrmulti_by_handle( | |||
428 | goto out_dput; | 427 | goto out_dput; |
429 | } | 428 | } |
430 | 429 | ||
431 | error = ENOMEM; | 430 | error = -ENOMEM; |
432 | attr_name = kmalloc(MAXNAMELEN, GFP_KERNEL); | 431 | attr_name = kmalloc(MAXNAMELEN, GFP_KERNEL); |
433 | if (!attr_name) | 432 | if (!attr_name) |
434 | goto out_kfree_ops; | 433 | goto out_kfree_ops; |
@@ -439,7 +438,7 @@ xfs_compat_attrmulti_by_handle( | |||
439 | compat_ptr(ops[i].am_attrname), | 438 | compat_ptr(ops[i].am_attrname), |
440 | MAXNAMELEN); | 439 | MAXNAMELEN); |
441 | if (ops[i].am_error == 0 || ops[i].am_error == MAXNAMELEN) | 440 | if (ops[i].am_error == 0 || ops[i].am_error == MAXNAMELEN) |
442 | error = ERANGE; | 441 | error = -ERANGE; |
443 | if (ops[i].am_error < 0) | 442 | if (ops[i].am_error < 0) |
444 | break; | 443 | break; |
445 | 444 | ||
@@ -470,19 +469,19 @@ xfs_compat_attrmulti_by_handle( | |||
470 | mnt_drop_write_file(parfilp); | 469 | mnt_drop_write_file(parfilp); |
471 | break; | 470 | break; |
472 | default: | 471 | default: |
473 | ops[i].am_error = EINVAL; | 472 | ops[i].am_error = -EINVAL; |
474 | } | 473 | } |
475 | } | 474 | } |
476 | 475 | ||
477 | if (copy_to_user(compat_ptr(am_hreq.ops), ops, size)) | 476 | if (copy_to_user(compat_ptr(am_hreq.ops), ops, size)) |
478 | error = XFS_ERROR(EFAULT); | 477 | error = -EFAULT; |
479 | 478 | ||
480 | kfree(attr_name); | 479 | kfree(attr_name); |
481 | out_kfree_ops: | 480 | out_kfree_ops: |
482 | kfree(ops); | 481 | kfree(ops); |
483 | out_dput: | 482 | out_dput: |
484 | dput(dentry); | 483 | dput(dentry); |
485 | return -error; | 484 | return error; |
486 | } | 485 | } |
487 | 486 | ||
488 | STATIC int | 487 | STATIC int |
@@ -496,26 +495,26 @@ xfs_compat_fssetdm_by_handle( | |||
496 | struct dentry *dentry; | 495 | struct dentry *dentry; |
497 | 496 | ||
498 | if (!capable(CAP_MKNOD)) | 497 | if (!capable(CAP_MKNOD)) |
499 | return -XFS_ERROR(EPERM); | 498 | return -EPERM; |
500 | if (copy_from_user(&dmhreq, arg, | 499 | if (copy_from_user(&dmhreq, arg, |
501 | sizeof(compat_xfs_fsop_setdm_handlereq_t))) | 500 | sizeof(compat_xfs_fsop_setdm_handlereq_t))) |
502 | return -XFS_ERROR(EFAULT); | 501 | return -EFAULT; |
503 | 502 | ||
504 | dentry = xfs_compat_handlereq_to_dentry(parfilp, &dmhreq.hreq); | 503 | dentry = xfs_compat_handlereq_to_dentry(parfilp, &dmhreq.hreq); |
505 | if (IS_ERR(dentry)) | 504 | if (IS_ERR(dentry)) |
506 | return PTR_ERR(dentry); | 505 | return PTR_ERR(dentry); |
507 | 506 | ||
508 | if (IS_IMMUTABLE(dentry->d_inode) || IS_APPEND(dentry->d_inode)) { | 507 | if (IS_IMMUTABLE(dentry->d_inode) || IS_APPEND(dentry->d_inode)) { |
509 | error = -XFS_ERROR(EPERM); | 508 | error = -EPERM; |
510 | goto out; | 509 | goto out; |
511 | } | 510 | } |
512 | 511 | ||
513 | if (copy_from_user(&fsd, compat_ptr(dmhreq.data), sizeof(fsd))) { | 512 | if (copy_from_user(&fsd, compat_ptr(dmhreq.data), sizeof(fsd))) { |
514 | error = -XFS_ERROR(EFAULT); | 513 | error = -EFAULT; |
515 | goto out; | 514 | goto out; |
516 | } | 515 | } |
517 | 516 | ||
518 | error = -xfs_set_dmattrs(XFS_I(dentry->d_inode), fsd.fsd_dmevmask, | 517 | error = xfs_set_dmattrs(XFS_I(dentry->d_inode), fsd.fsd_dmevmask, |
519 | fsd.fsd_dmstate); | 518 | fsd.fsd_dmstate); |
520 | 519 | ||
521 | out: | 520 | out: |
@@ -537,7 +536,7 @@ xfs_file_compat_ioctl( | |||
537 | int error; | 536 | int error; |
538 | 537 | ||
539 | if (filp->f_mode & FMODE_NOCMTIME) | 538 | if (filp->f_mode & FMODE_NOCMTIME) |
540 | ioflags |= IO_INVIS; | 539 | ioflags |= XFS_IO_INVIS; |
541 | 540 | ||
542 | trace_xfs_file_compat_ioctl(ip); | 541 | trace_xfs_file_compat_ioctl(ip); |
543 | 542 | ||
@@ -588,7 +587,7 @@ xfs_file_compat_ioctl( | |||
588 | struct xfs_flock64 bf; | 587 | struct xfs_flock64 bf; |
589 | 588 | ||
590 | if (xfs_compat_flock64_copyin(&bf, arg)) | 589 | if (xfs_compat_flock64_copyin(&bf, arg)) |
591 | return -XFS_ERROR(EFAULT); | 590 | return -EFAULT; |
592 | cmd = _NATIVE_IOC(cmd, struct xfs_flock64); | 591 | cmd = _NATIVE_IOC(cmd, struct xfs_flock64); |
593 | return xfs_ioc_space(ip, inode, filp, ioflags, cmd, &bf); | 592 | return xfs_ioc_space(ip, inode, filp, ioflags, cmd, &bf); |
594 | } | 593 | } |
@@ -598,25 +597,25 @@ xfs_file_compat_ioctl( | |||
598 | struct xfs_growfs_data in; | 597 | struct xfs_growfs_data in; |
599 | 598 | ||
600 | if (xfs_compat_growfs_data_copyin(&in, arg)) | 599 | if (xfs_compat_growfs_data_copyin(&in, arg)) |
601 | return -XFS_ERROR(EFAULT); | 600 | return -EFAULT; |
602 | error = mnt_want_write_file(filp); | 601 | error = mnt_want_write_file(filp); |
603 | if (error) | 602 | if (error) |
604 | return error; | 603 | return error; |
605 | error = xfs_growfs_data(mp, &in); | 604 | error = xfs_growfs_data(mp, &in); |
606 | mnt_drop_write_file(filp); | 605 | mnt_drop_write_file(filp); |
607 | return -error; | 606 | return error; |
608 | } | 607 | } |
609 | case XFS_IOC_FSGROWFSRT_32: { | 608 | case XFS_IOC_FSGROWFSRT_32: { |
610 | struct xfs_growfs_rt in; | 609 | struct xfs_growfs_rt in; |
611 | 610 | ||
612 | if (xfs_compat_growfs_rt_copyin(&in, arg)) | 611 | if (xfs_compat_growfs_rt_copyin(&in, arg)) |
613 | return -XFS_ERROR(EFAULT); | 612 | return -EFAULT; |
614 | error = mnt_want_write_file(filp); | 613 | error = mnt_want_write_file(filp); |
615 | if (error) | 614 | if (error) |
616 | return error; | 615 | return error; |
617 | error = xfs_growfs_rt(mp, &in); | 616 | error = xfs_growfs_rt(mp, &in); |
618 | mnt_drop_write_file(filp); | 617 | mnt_drop_write_file(filp); |
619 | return -error; | 618 | return error; |
620 | } | 619 | } |
621 | #endif | 620 | #endif |
622 | /* long changes size, but xfs only copiese out 32 bits */ | 621 | /* long changes size, but xfs only copiese out 32 bits */ |
@@ -633,13 +632,13 @@ xfs_file_compat_ioctl( | |||
633 | if (copy_from_user(&sxp, sxu, | 632 | if (copy_from_user(&sxp, sxu, |
634 | offsetof(struct xfs_swapext, sx_stat)) || | 633 | offsetof(struct xfs_swapext, sx_stat)) || |
635 | xfs_ioctl32_bstat_copyin(&sxp.sx_stat, &sxu->sx_stat)) | 634 | xfs_ioctl32_bstat_copyin(&sxp.sx_stat, &sxu->sx_stat)) |
636 | return -XFS_ERROR(EFAULT); | 635 | return -EFAULT; |
637 | error = mnt_want_write_file(filp); | 636 | error = mnt_want_write_file(filp); |
638 | if (error) | 637 | if (error) |
639 | return error; | 638 | return error; |
640 | error = xfs_ioc_swapext(&sxp); | 639 | error = xfs_ioc_swapext(&sxp); |
641 | mnt_drop_write_file(filp); | 640 | mnt_drop_write_file(filp); |
642 | return -error; | 641 | return error; |
643 | } | 642 | } |
644 | case XFS_IOC_FSBULKSTAT_32: | 643 | case XFS_IOC_FSBULKSTAT_32: |
645 | case XFS_IOC_FSBULKSTAT_SINGLE_32: | 644 | case XFS_IOC_FSBULKSTAT_SINGLE_32: |
@@ -651,7 +650,7 @@ xfs_file_compat_ioctl( | |||
651 | struct xfs_fsop_handlereq hreq; | 650 | struct xfs_fsop_handlereq hreq; |
652 | 651 | ||
653 | if (xfs_compat_handlereq_copyin(&hreq, arg)) | 652 | if (xfs_compat_handlereq_copyin(&hreq, arg)) |
654 | return -XFS_ERROR(EFAULT); | 653 | return -EFAULT; |
655 | cmd = _NATIVE_IOC(cmd, struct xfs_fsop_handlereq); | 654 | cmd = _NATIVE_IOC(cmd, struct xfs_fsop_handlereq); |
656 | return xfs_find_handle(cmd, &hreq); | 655 | return xfs_find_handle(cmd, &hreq); |
657 | } | 656 | } |
@@ -659,14 +658,14 @@ xfs_file_compat_ioctl( | |||
659 | struct xfs_fsop_handlereq hreq; | 658 | struct xfs_fsop_handlereq hreq; |
660 | 659 | ||
661 | if (xfs_compat_handlereq_copyin(&hreq, arg)) | 660 | if (xfs_compat_handlereq_copyin(&hreq, arg)) |
662 | return -XFS_ERROR(EFAULT); | 661 | return -EFAULT; |
663 | return xfs_open_by_handle(filp, &hreq); | 662 | return xfs_open_by_handle(filp, &hreq); |
664 | } | 663 | } |
665 | case XFS_IOC_READLINK_BY_HANDLE_32: { | 664 | case XFS_IOC_READLINK_BY_HANDLE_32: { |
666 | struct xfs_fsop_handlereq hreq; | 665 | struct xfs_fsop_handlereq hreq; |
667 | 666 | ||
668 | if (xfs_compat_handlereq_copyin(&hreq, arg)) | 667 | if (xfs_compat_handlereq_copyin(&hreq, arg)) |
669 | return -XFS_ERROR(EFAULT); | 668 | return -EFAULT; |
670 | return xfs_readlink_by_handle(filp, &hreq); | 669 | return xfs_readlink_by_handle(filp, &hreq); |
671 | } | 670 | } |
672 | case XFS_IOC_ATTRLIST_BY_HANDLE_32: | 671 | case XFS_IOC_ATTRLIST_BY_HANDLE_32: |
@@ -676,6 +675,6 @@ xfs_file_compat_ioctl( | |||
676 | case XFS_IOC_FSSETDM_BY_HANDLE_32: | 675 | case XFS_IOC_FSSETDM_BY_HANDLE_32: |
677 | return xfs_compat_fssetdm_by_handle(filp, arg); | 676 | return xfs_compat_fssetdm_by_handle(filp, arg); |
678 | default: | 677 | default: |
679 | return -XFS_ERROR(ENOIOCTLCMD); | 678 | return -ENOIOCTLCMD; |
680 | } | 679 | } |
681 | } | 680 | } |
diff --git a/fs/xfs/xfs_iomap.c b/fs/xfs/xfs_iomap.c index 6d3ec2b6ee29..e9c47b6f5e5a 100644 --- a/fs/xfs/xfs_iomap.c +++ b/fs/xfs/xfs_iomap.c | |||
@@ -110,7 +110,7 @@ xfs_alert_fsblock_zero( | |||
110 | (unsigned long long)imap->br_startoff, | 110 | (unsigned long long)imap->br_startoff, |
111 | (unsigned long long)imap->br_blockcount, | 111 | (unsigned long long)imap->br_blockcount, |
112 | imap->br_state); | 112 | imap->br_state); |
113 | return EFSCORRUPTED; | 113 | return -EFSCORRUPTED; |
114 | } | 114 | } |
115 | 115 | ||
116 | int | 116 | int |
@@ -138,7 +138,7 @@ xfs_iomap_write_direct( | |||
138 | 138 | ||
139 | error = xfs_qm_dqattach(ip, 0); | 139 | error = xfs_qm_dqattach(ip, 0); |
140 | if (error) | 140 | if (error) |
141 | return XFS_ERROR(error); | 141 | return error; |
142 | 142 | ||
143 | rt = XFS_IS_REALTIME_INODE(ip); | 143 | rt = XFS_IS_REALTIME_INODE(ip); |
144 | extsz = xfs_get_extsz_hint(ip); | 144 | extsz = xfs_get_extsz_hint(ip); |
@@ -148,7 +148,7 @@ xfs_iomap_write_direct( | |||
148 | if ((offset + count) > XFS_ISIZE(ip)) { | 148 | if ((offset + count) > XFS_ISIZE(ip)) { |
149 | error = xfs_iomap_eof_align_last_fsb(mp, ip, extsz, &last_fsb); | 149 | error = xfs_iomap_eof_align_last_fsb(mp, ip, extsz, &last_fsb); |
150 | if (error) | 150 | if (error) |
151 | return XFS_ERROR(error); | 151 | return error; |
152 | } else { | 152 | } else { |
153 | if (nmaps && (imap->br_startblock == HOLESTARTBLOCK)) | 153 | if (nmaps && (imap->br_startblock == HOLESTARTBLOCK)) |
154 | last_fsb = MIN(last_fsb, (xfs_fileoff_t) | 154 | last_fsb = MIN(last_fsb, (xfs_fileoff_t) |
@@ -188,7 +188,7 @@ xfs_iomap_write_direct( | |||
188 | */ | 188 | */ |
189 | if (error) { | 189 | if (error) { |
190 | xfs_trans_cancel(tp, 0); | 190 | xfs_trans_cancel(tp, 0); |
191 | return XFS_ERROR(error); | 191 | return error; |
192 | } | 192 | } |
193 | 193 | ||
194 | xfs_ilock(ip, XFS_ILOCK_EXCL); | 194 | xfs_ilock(ip, XFS_ILOCK_EXCL); |
@@ -225,7 +225,7 @@ xfs_iomap_write_direct( | |||
225 | * Copy any maps to caller's array and return any error. | 225 | * Copy any maps to caller's array and return any error. |
226 | */ | 226 | */ |
227 | if (nimaps == 0) { | 227 | if (nimaps == 0) { |
228 | error = XFS_ERROR(ENOSPC); | 228 | error = -ENOSPC; |
229 | goto out_unlock; | 229 | goto out_unlock; |
230 | } | 230 | } |
231 | 231 | ||
@@ -397,7 +397,8 @@ xfs_quota_calc_throttle( | |||
397 | struct xfs_inode *ip, | 397 | struct xfs_inode *ip, |
398 | int type, | 398 | int type, |
399 | xfs_fsblock_t *qblocks, | 399 | xfs_fsblock_t *qblocks, |
400 | int *qshift) | 400 | int *qshift, |
401 | int64_t *qfreesp) | ||
401 | { | 402 | { |
402 | int64_t freesp; | 403 | int64_t freesp; |
403 | int shift = 0; | 404 | int shift = 0; |
@@ -406,6 +407,7 @@ xfs_quota_calc_throttle( | |||
406 | /* over hi wmark, squash the prealloc completely */ | 407 | /* over hi wmark, squash the prealloc completely */ |
407 | if (dq->q_res_bcount >= dq->q_prealloc_hi_wmark) { | 408 | if (dq->q_res_bcount >= dq->q_prealloc_hi_wmark) { |
408 | *qblocks = 0; | 409 | *qblocks = 0; |
410 | *qfreesp = 0; | ||
409 | return; | 411 | return; |
410 | } | 412 | } |
411 | 413 | ||
@@ -418,6 +420,9 @@ xfs_quota_calc_throttle( | |||
418 | shift += 2; | 420 | shift += 2; |
419 | } | 421 | } |
420 | 422 | ||
423 | if (freesp < *qfreesp) | ||
424 | *qfreesp = freesp; | ||
425 | |||
421 | /* only overwrite the throttle values if we are more aggressive */ | 426 | /* only overwrite the throttle values if we are more aggressive */ |
422 | if ((freesp >> shift) < (*qblocks >> *qshift)) { | 427 | if ((freesp >> shift) < (*qblocks >> *qshift)) { |
423 | *qblocks = freesp; | 428 | *qblocks = freesp; |
@@ -476,15 +481,18 @@ xfs_iomap_prealloc_size( | |||
476 | } | 481 | } |
477 | 482 | ||
478 | /* | 483 | /* |
479 | * Check each quota to cap the prealloc size and provide a shift | 484 | * Check each quota to cap the prealloc size, provide a shift value to |
480 | * value to throttle with. | 485 | * throttle with and adjust amount of available space. |
481 | */ | 486 | */ |
482 | if (xfs_quota_need_throttle(ip, XFS_DQ_USER, alloc_blocks)) | 487 | if (xfs_quota_need_throttle(ip, XFS_DQ_USER, alloc_blocks)) |
483 | xfs_quota_calc_throttle(ip, XFS_DQ_USER, &qblocks, &qshift); | 488 | xfs_quota_calc_throttle(ip, XFS_DQ_USER, &qblocks, &qshift, |
489 | &freesp); | ||
484 | if (xfs_quota_need_throttle(ip, XFS_DQ_GROUP, alloc_blocks)) | 490 | if (xfs_quota_need_throttle(ip, XFS_DQ_GROUP, alloc_blocks)) |
485 | xfs_quota_calc_throttle(ip, XFS_DQ_GROUP, &qblocks, &qshift); | 491 | xfs_quota_calc_throttle(ip, XFS_DQ_GROUP, &qblocks, &qshift, |
492 | &freesp); | ||
486 | if (xfs_quota_need_throttle(ip, XFS_DQ_PROJ, alloc_blocks)) | 493 | if (xfs_quota_need_throttle(ip, XFS_DQ_PROJ, alloc_blocks)) |
487 | xfs_quota_calc_throttle(ip, XFS_DQ_PROJ, &qblocks, &qshift); | 494 | xfs_quota_calc_throttle(ip, XFS_DQ_PROJ, &qblocks, &qshift, |
495 | &freesp); | ||
488 | 496 | ||
489 | /* | 497 | /* |
490 | * The final prealloc size is set to the minimum of free space available | 498 | * The final prealloc size is set to the minimum of free space available |
@@ -552,7 +560,7 @@ xfs_iomap_write_delay( | |||
552 | */ | 560 | */ |
553 | error = xfs_qm_dqattach_locked(ip, 0); | 561 | error = xfs_qm_dqattach_locked(ip, 0); |
554 | if (error) | 562 | if (error) |
555 | return XFS_ERROR(error); | 563 | return error; |
556 | 564 | ||
557 | extsz = xfs_get_extsz_hint(ip); | 565 | extsz = xfs_get_extsz_hint(ip); |
558 | offset_fsb = XFS_B_TO_FSBT(mp, offset); | 566 | offset_fsb = XFS_B_TO_FSBT(mp, offset); |
@@ -596,11 +604,11 @@ retry: | |||
596 | imap, &nimaps, XFS_BMAPI_ENTIRE); | 604 | imap, &nimaps, XFS_BMAPI_ENTIRE); |
597 | switch (error) { | 605 | switch (error) { |
598 | case 0: | 606 | case 0: |
599 | case ENOSPC: | 607 | case -ENOSPC: |
600 | case EDQUOT: | 608 | case -EDQUOT: |
601 | break; | 609 | break; |
602 | default: | 610 | default: |
603 | return XFS_ERROR(error); | 611 | return error; |
604 | } | 612 | } |
605 | 613 | ||
606 | /* | 614 | /* |
@@ -614,7 +622,7 @@ retry: | |||
614 | error = 0; | 622 | error = 0; |
615 | goto retry; | 623 | goto retry; |
616 | } | 624 | } |
617 | return XFS_ERROR(error ? error : ENOSPC); | 625 | return error ? error : -ENOSPC; |
618 | } | 626 | } |
619 | 627 | ||
620 | if (!(imap[0].br_startblock || XFS_IS_REALTIME_INODE(ip))) | 628 | if (!(imap[0].br_startblock || XFS_IS_REALTIME_INODE(ip))) |
@@ -663,7 +671,7 @@ xfs_iomap_write_allocate( | |||
663 | */ | 671 | */ |
664 | error = xfs_qm_dqattach(ip, 0); | 672 | error = xfs_qm_dqattach(ip, 0); |
665 | if (error) | 673 | if (error) |
666 | return XFS_ERROR(error); | 674 | return error; |
667 | 675 | ||
668 | offset_fsb = XFS_B_TO_FSBT(mp, offset); | 676 | offset_fsb = XFS_B_TO_FSBT(mp, offset); |
669 | count_fsb = imap->br_blockcount; | 677 | count_fsb = imap->br_blockcount; |
@@ -690,7 +698,7 @@ xfs_iomap_write_allocate( | |||
690 | nres, 0); | 698 | nres, 0); |
691 | if (error) { | 699 | if (error) { |
692 | xfs_trans_cancel(tp, 0); | 700 | xfs_trans_cancel(tp, 0); |
693 | return XFS_ERROR(error); | 701 | return error; |
694 | } | 702 | } |
695 | xfs_ilock(ip, XFS_ILOCK_EXCL); | 703 | xfs_ilock(ip, XFS_ILOCK_EXCL); |
696 | xfs_trans_ijoin(tp, ip, 0); | 704 | xfs_trans_ijoin(tp, ip, 0); |
@@ -739,7 +747,7 @@ xfs_iomap_write_allocate( | |||
739 | if ((map_start_fsb + count_fsb) > last_block) { | 747 | if ((map_start_fsb + count_fsb) > last_block) { |
740 | count_fsb = last_block - map_start_fsb; | 748 | count_fsb = last_block - map_start_fsb; |
741 | if (count_fsb == 0) { | 749 | if (count_fsb == 0) { |
742 | error = EAGAIN; | 750 | error = -EAGAIN; |
743 | goto trans_cancel; | 751 | goto trans_cancel; |
744 | } | 752 | } |
745 | } | 753 | } |
@@ -793,7 +801,7 @@ trans_cancel: | |||
793 | xfs_trans_cancel(tp, XFS_TRANS_RELEASE_LOG_RES | XFS_TRANS_ABORT); | 801 | xfs_trans_cancel(tp, XFS_TRANS_RELEASE_LOG_RES | XFS_TRANS_ABORT); |
794 | error0: | 802 | error0: |
795 | xfs_iunlock(ip, XFS_ILOCK_EXCL); | 803 | xfs_iunlock(ip, XFS_ILOCK_EXCL); |
796 | return XFS_ERROR(error); | 804 | return error; |
797 | } | 805 | } |
798 | 806 | ||
799 | int | 807 | int |
@@ -853,7 +861,7 @@ xfs_iomap_write_unwritten( | |||
853 | resblks, 0); | 861 | resblks, 0); |
854 | if (error) { | 862 | if (error) { |
855 | xfs_trans_cancel(tp, 0); | 863 | xfs_trans_cancel(tp, 0); |
856 | return XFS_ERROR(error); | 864 | return error; |
857 | } | 865 | } |
858 | 866 | ||
859 | xfs_ilock(ip, XFS_ILOCK_EXCL); | 867 | xfs_ilock(ip, XFS_ILOCK_EXCL); |
@@ -892,7 +900,7 @@ xfs_iomap_write_unwritten( | |||
892 | error = xfs_trans_commit(tp, XFS_TRANS_RELEASE_LOG_RES); | 900 | error = xfs_trans_commit(tp, XFS_TRANS_RELEASE_LOG_RES); |
893 | xfs_iunlock(ip, XFS_ILOCK_EXCL); | 901 | xfs_iunlock(ip, XFS_ILOCK_EXCL); |
894 | if (error) | 902 | if (error) |
895 | return XFS_ERROR(error); | 903 | return error; |
896 | 904 | ||
897 | if (!(imap.br_startblock || XFS_IS_REALTIME_INODE(ip))) | 905 | if (!(imap.br_startblock || XFS_IS_REALTIME_INODE(ip))) |
898 | return xfs_alert_fsblock_zero(ip, &imap); | 906 | return xfs_alert_fsblock_zero(ip, &imap); |
@@ -915,5 +923,5 @@ error_on_bmapi_transaction: | |||
915 | xfs_bmap_cancel(&free_list); | 923 | xfs_bmap_cancel(&free_list); |
916 | xfs_trans_cancel(tp, (XFS_TRANS_RELEASE_LOG_RES | XFS_TRANS_ABORT)); | 924 | xfs_trans_cancel(tp, (XFS_TRANS_RELEASE_LOG_RES | XFS_TRANS_ABORT)); |
917 | xfs_iunlock(ip, XFS_ILOCK_EXCL); | 925 | xfs_iunlock(ip, XFS_ILOCK_EXCL); |
918 | return XFS_ERROR(error); | 926 | return error; |
919 | } | 927 | } |
diff --git a/fs/xfs/xfs_iops.c b/fs/xfs/xfs_iops.c index 205613a06068..72129493e9d3 100644 --- a/fs/xfs/xfs_iops.c +++ b/fs/xfs/xfs_iops.c | |||
@@ -72,7 +72,7 @@ xfs_initxattrs( | |||
72 | int error = 0; | 72 | int error = 0; |
73 | 73 | ||
74 | for (xattr = xattr_array; xattr->name != NULL; xattr++) { | 74 | for (xattr = xattr_array; xattr->name != NULL; xattr++) { |
75 | error = -xfs_attr_set(ip, xattr->name, xattr->value, | 75 | error = xfs_attr_set(ip, xattr->name, xattr->value, |
76 | xattr->value_len, ATTR_SECURE); | 76 | xattr->value_len, ATTR_SECURE); |
77 | if (error < 0) | 77 | if (error < 0) |
78 | break; | 78 | break; |
@@ -93,7 +93,7 @@ xfs_init_security( | |||
93 | struct inode *dir, | 93 | struct inode *dir, |
94 | const struct qstr *qstr) | 94 | const struct qstr *qstr) |
95 | { | 95 | { |
96 | return -security_inode_init_security(inode, dir, qstr, | 96 | return security_inode_init_security(inode, dir, qstr, |
97 | &xfs_initxattrs, NULL); | 97 | &xfs_initxattrs, NULL); |
98 | } | 98 | } |
99 | 99 | ||
@@ -173,12 +173,12 @@ xfs_generic_create( | |||
173 | 173 | ||
174 | #ifdef CONFIG_XFS_POSIX_ACL | 174 | #ifdef CONFIG_XFS_POSIX_ACL |
175 | if (default_acl) { | 175 | if (default_acl) { |
176 | error = -xfs_set_acl(inode, default_acl, ACL_TYPE_DEFAULT); | 176 | error = xfs_set_acl(inode, default_acl, ACL_TYPE_DEFAULT); |
177 | if (error) | 177 | if (error) |
178 | goto out_cleanup_inode; | 178 | goto out_cleanup_inode; |
179 | } | 179 | } |
180 | if (acl) { | 180 | if (acl) { |
181 | error = -xfs_set_acl(inode, acl, ACL_TYPE_ACCESS); | 181 | error = xfs_set_acl(inode, acl, ACL_TYPE_ACCESS); |
182 | if (error) | 182 | if (error) |
183 | goto out_cleanup_inode; | 183 | goto out_cleanup_inode; |
184 | } | 184 | } |
@@ -194,7 +194,7 @@ xfs_generic_create( | |||
194 | posix_acl_release(default_acl); | 194 | posix_acl_release(default_acl); |
195 | if (acl) | 195 | if (acl) |
196 | posix_acl_release(acl); | 196 | posix_acl_release(acl); |
197 | return -error; | 197 | return error; |
198 | 198 | ||
199 | out_cleanup_inode: | 199 | out_cleanup_inode: |
200 | if (!tmpfile) | 200 | if (!tmpfile) |
@@ -248,8 +248,8 @@ xfs_vn_lookup( | |||
248 | xfs_dentry_to_name(&name, dentry, 0); | 248 | xfs_dentry_to_name(&name, dentry, 0); |
249 | error = xfs_lookup(XFS_I(dir), &name, &cip, NULL); | 249 | error = xfs_lookup(XFS_I(dir), &name, &cip, NULL); |
250 | if (unlikely(error)) { | 250 | if (unlikely(error)) { |
251 | if (unlikely(error != ENOENT)) | 251 | if (unlikely(error != -ENOENT)) |
252 | return ERR_PTR(-error); | 252 | return ERR_PTR(error); |
253 | d_add(dentry, NULL); | 253 | d_add(dentry, NULL); |
254 | return NULL; | 254 | return NULL; |
255 | } | 255 | } |
@@ -275,8 +275,8 @@ xfs_vn_ci_lookup( | |||
275 | xfs_dentry_to_name(&xname, dentry, 0); | 275 | xfs_dentry_to_name(&xname, dentry, 0); |
276 | error = xfs_lookup(XFS_I(dir), &xname, &ip, &ci_name); | 276 | error = xfs_lookup(XFS_I(dir), &xname, &ip, &ci_name); |
277 | if (unlikely(error)) { | 277 | if (unlikely(error)) { |
278 | if (unlikely(error != ENOENT)) | 278 | if (unlikely(error != -ENOENT)) |
279 | return ERR_PTR(-error); | 279 | return ERR_PTR(error); |
280 | /* | 280 | /* |
281 | * call d_add(dentry, NULL) here when d_drop_negative_children | 281 | * call d_add(dentry, NULL) here when d_drop_negative_children |
282 | * is called in xfs_vn_mknod (ie. allow negative dentries | 282 | * is called in xfs_vn_mknod (ie. allow negative dentries |
@@ -311,7 +311,7 @@ xfs_vn_link( | |||
311 | 311 | ||
312 | error = xfs_link(XFS_I(dir), XFS_I(inode), &name); | 312 | error = xfs_link(XFS_I(dir), XFS_I(inode), &name); |
313 | if (unlikely(error)) | 313 | if (unlikely(error)) |
314 | return -error; | 314 | return error; |
315 | 315 | ||
316 | ihold(inode); | 316 | ihold(inode); |
317 | d_instantiate(dentry, inode); | 317 | d_instantiate(dentry, inode); |
@@ -328,7 +328,7 @@ xfs_vn_unlink( | |||
328 | 328 | ||
329 | xfs_dentry_to_name(&name, dentry, 0); | 329 | xfs_dentry_to_name(&name, dentry, 0); |
330 | 330 | ||
331 | error = -xfs_remove(XFS_I(dir), &name, XFS_I(dentry->d_inode)); | 331 | error = xfs_remove(XFS_I(dir), &name, XFS_I(dentry->d_inode)); |
332 | if (error) | 332 | if (error) |
333 | return error; | 333 | return error; |
334 | 334 | ||
@@ -375,7 +375,7 @@ xfs_vn_symlink( | |||
375 | xfs_cleanup_inode(dir, inode, dentry); | 375 | xfs_cleanup_inode(dir, inode, dentry); |
376 | iput(inode); | 376 | iput(inode); |
377 | out: | 377 | out: |
378 | return -error; | 378 | return error; |
379 | } | 379 | } |
380 | 380 | ||
381 | STATIC int | 381 | STATIC int |
@@ -392,8 +392,8 @@ xfs_vn_rename( | |||
392 | xfs_dentry_to_name(&oname, odentry, 0); | 392 | xfs_dentry_to_name(&oname, odentry, 0); |
393 | xfs_dentry_to_name(&nname, ndentry, odentry->d_inode->i_mode); | 393 | xfs_dentry_to_name(&nname, ndentry, odentry->d_inode->i_mode); |
394 | 394 | ||
395 | return -xfs_rename(XFS_I(odir), &oname, XFS_I(odentry->d_inode), | 395 | return xfs_rename(XFS_I(odir), &oname, XFS_I(odentry->d_inode), |
396 | XFS_I(ndir), &nname, new_inode ? | 396 | XFS_I(ndir), &nname, new_inode ? |
397 | XFS_I(new_inode) : NULL); | 397 | XFS_I(new_inode) : NULL); |
398 | } | 398 | } |
399 | 399 | ||
@@ -414,7 +414,7 @@ xfs_vn_follow_link( | |||
414 | if (!link) | 414 | if (!link) |
415 | goto out_err; | 415 | goto out_err; |
416 | 416 | ||
417 | error = -xfs_readlink(XFS_I(dentry->d_inode), link); | 417 | error = xfs_readlink(XFS_I(dentry->d_inode), link); |
418 | if (unlikely(error)) | 418 | if (unlikely(error)) |
419 | goto out_kfree; | 419 | goto out_kfree; |
420 | 420 | ||
@@ -441,7 +441,7 @@ xfs_vn_getattr( | |||
441 | trace_xfs_getattr(ip); | 441 | trace_xfs_getattr(ip); |
442 | 442 | ||
443 | if (XFS_FORCED_SHUTDOWN(mp)) | 443 | if (XFS_FORCED_SHUTDOWN(mp)) |
444 | return -XFS_ERROR(EIO); | 444 | return -EIO; |
445 | 445 | ||
446 | stat->size = XFS_ISIZE(ip); | 446 | stat->size = XFS_ISIZE(ip); |
447 | stat->dev = inode->i_sb->s_dev; | 447 | stat->dev = inode->i_sb->s_dev; |
@@ -546,14 +546,14 @@ xfs_setattr_nonsize( | |||
546 | /* If acls are being inherited, we already have this checked */ | 546 | /* If acls are being inherited, we already have this checked */ |
547 | if (!(flags & XFS_ATTR_NOACL)) { | 547 | if (!(flags & XFS_ATTR_NOACL)) { |
548 | if (mp->m_flags & XFS_MOUNT_RDONLY) | 548 | if (mp->m_flags & XFS_MOUNT_RDONLY) |
549 | return XFS_ERROR(EROFS); | 549 | return -EROFS; |
550 | 550 | ||
551 | if (XFS_FORCED_SHUTDOWN(mp)) | 551 | if (XFS_FORCED_SHUTDOWN(mp)) |
552 | return XFS_ERROR(EIO); | 552 | return -EIO; |
553 | 553 | ||
554 | error = -inode_change_ok(inode, iattr); | 554 | error = inode_change_ok(inode, iattr); |
555 | if (error) | 555 | if (error) |
556 | return XFS_ERROR(error); | 556 | return error; |
557 | } | 557 | } |
558 | 558 | ||
559 | ASSERT((mask & ATTR_SIZE) == 0); | 559 | ASSERT((mask & ATTR_SIZE) == 0); |
@@ -703,7 +703,7 @@ xfs_setattr_nonsize( | |||
703 | xfs_qm_dqrele(gdqp); | 703 | xfs_qm_dqrele(gdqp); |
704 | 704 | ||
705 | if (error) | 705 | if (error) |
706 | return XFS_ERROR(error); | 706 | return error; |
707 | 707 | ||
708 | /* | 708 | /* |
709 | * XXX(hch): Updating the ACL entries is not atomic vs the i_mode | 709 | * XXX(hch): Updating the ACL entries is not atomic vs the i_mode |
@@ -713,9 +713,9 @@ xfs_setattr_nonsize( | |||
713 | * Posix ACL code seems to care about this issue either. | 713 | * Posix ACL code seems to care about this issue either. |
714 | */ | 714 | */ |
715 | if ((mask & ATTR_MODE) && !(flags & XFS_ATTR_NOACL)) { | 715 | if ((mask & ATTR_MODE) && !(flags & XFS_ATTR_NOACL)) { |
716 | error = -posix_acl_chmod(inode, inode->i_mode); | 716 | error = posix_acl_chmod(inode, inode->i_mode); |
717 | if (error) | 717 | if (error) |
718 | return XFS_ERROR(error); | 718 | return error; |
719 | } | 719 | } |
720 | 720 | ||
721 | return 0; | 721 | return 0; |
@@ -748,14 +748,14 @@ xfs_setattr_size( | |||
748 | trace_xfs_setattr(ip); | 748 | trace_xfs_setattr(ip); |
749 | 749 | ||
750 | if (mp->m_flags & XFS_MOUNT_RDONLY) | 750 | if (mp->m_flags & XFS_MOUNT_RDONLY) |
751 | return XFS_ERROR(EROFS); | 751 | return -EROFS; |
752 | 752 | ||
753 | if (XFS_FORCED_SHUTDOWN(mp)) | 753 | if (XFS_FORCED_SHUTDOWN(mp)) |
754 | return XFS_ERROR(EIO); | 754 | return -EIO; |
755 | 755 | ||
756 | error = -inode_change_ok(inode, iattr); | 756 | error = inode_change_ok(inode, iattr); |
757 | if (error) | 757 | if (error) |
758 | return XFS_ERROR(error); | 758 | return error; |
759 | 759 | ||
760 | ASSERT(xfs_isilocked(ip, XFS_IOLOCK_EXCL)); | 760 | ASSERT(xfs_isilocked(ip, XFS_IOLOCK_EXCL)); |
761 | ASSERT(S_ISREG(ip->i_d.di_mode)); | 761 | ASSERT(S_ISREG(ip->i_d.di_mode)); |
@@ -818,7 +818,7 @@ xfs_setattr_size( | |||
818 | * care about here. | 818 | * care about here. |
819 | */ | 819 | */ |
820 | if (oldsize != ip->i_d.di_size && newsize > ip->i_d.di_size) { | 820 | if (oldsize != ip->i_d.di_size && newsize > ip->i_d.di_size) { |
821 | error = -filemap_write_and_wait_range(VFS_I(ip)->i_mapping, | 821 | error = filemap_write_and_wait_range(VFS_I(ip)->i_mapping, |
822 | ip->i_d.di_size, newsize); | 822 | ip->i_d.di_size, newsize); |
823 | if (error) | 823 | if (error) |
824 | return error; | 824 | return error; |
@@ -844,7 +844,7 @@ xfs_setattr_size( | |||
844 | * much we can do about this, except to hope that the caller sees ENOMEM | 844 | * much we can do about this, except to hope that the caller sees ENOMEM |
845 | * and retries the truncate operation. | 845 | * and retries the truncate operation. |
846 | */ | 846 | */ |
847 | error = -block_truncate_page(inode->i_mapping, newsize, xfs_get_blocks); | 847 | error = block_truncate_page(inode->i_mapping, newsize, xfs_get_blocks); |
848 | if (error) | 848 | if (error) |
849 | return error; | 849 | return error; |
850 | truncate_setsize(inode, newsize); | 850 | truncate_setsize(inode, newsize); |
@@ -950,7 +950,7 @@ xfs_vn_setattr( | |||
950 | error = xfs_setattr_nonsize(ip, iattr, 0); | 950 | error = xfs_setattr_nonsize(ip, iattr, 0); |
951 | } | 951 | } |
952 | 952 | ||
953 | return -error; | 953 | return error; |
954 | } | 954 | } |
955 | 955 | ||
956 | STATIC int | 956 | STATIC int |
@@ -970,7 +970,7 @@ xfs_vn_update_time( | |||
970 | error = xfs_trans_reserve(tp, &M_RES(mp)->tr_fsyncts, 0, 0); | 970 | error = xfs_trans_reserve(tp, &M_RES(mp)->tr_fsyncts, 0, 0); |
971 | if (error) { | 971 | if (error) { |
972 | xfs_trans_cancel(tp, 0); | 972 | xfs_trans_cancel(tp, 0); |
973 | return -error; | 973 | return error; |
974 | } | 974 | } |
975 | 975 | ||
976 | xfs_ilock(ip, XFS_ILOCK_EXCL); | 976 | xfs_ilock(ip, XFS_ILOCK_EXCL); |
@@ -991,7 +991,7 @@ xfs_vn_update_time( | |||
991 | } | 991 | } |
992 | xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL); | 992 | xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL); |
993 | xfs_trans_log_inode(tp, ip, XFS_ILOG_TIMESTAMP); | 993 | xfs_trans_log_inode(tp, ip, XFS_ILOG_TIMESTAMP); |
994 | return -xfs_trans_commit(tp, 0); | 994 | return xfs_trans_commit(tp, 0); |
995 | } | 995 | } |
996 | 996 | ||
997 | #define XFS_FIEMAP_FLAGS (FIEMAP_FLAG_SYNC|FIEMAP_FLAG_XATTR) | 997 | #define XFS_FIEMAP_FLAGS (FIEMAP_FLAG_SYNC|FIEMAP_FLAG_XATTR) |
@@ -1036,7 +1036,7 @@ xfs_fiemap_format( | |||
1036 | *full = 1; /* user array now full */ | 1036 | *full = 1; /* user array now full */ |
1037 | } | 1037 | } |
1038 | 1038 | ||
1039 | return -error; | 1039 | return error; |
1040 | } | 1040 | } |
1041 | 1041 | ||
1042 | STATIC int | 1042 | STATIC int |
@@ -1055,12 +1055,12 @@ xfs_vn_fiemap( | |||
1055 | return error; | 1055 | return error; |
1056 | 1056 | ||
1057 | /* Set up bmap header for xfs internal routine */ | 1057 | /* Set up bmap header for xfs internal routine */ |
1058 | bm.bmv_offset = BTOBB(start); | 1058 | bm.bmv_offset = BTOBBT(start); |
1059 | /* Special case for whole file */ | 1059 | /* Special case for whole file */ |
1060 | if (length == FIEMAP_MAX_OFFSET) | 1060 | if (length == FIEMAP_MAX_OFFSET) |
1061 | bm.bmv_length = -1LL; | 1061 | bm.bmv_length = -1LL; |
1062 | else | 1062 | else |
1063 | bm.bmv_length = BTOBB(length); | 1063 | bm.bmv_length = BTOBB(start + length) - bm.bmv_offset; |
1064 | 1064 | ||
1065 | /* We add one because in getbmap world count includes the header */ | 1065 | /* We add one because in getbmap world count includes the header */ |
1066 | bm.bmv_count = !fieinfo->fi_extents_max ? MAXEXTNUM : | 1066 | bm.bmv_count = !fieinfo->fi_extents_max ? MAXEXTNUM : |
@@ -1075,7 +1075,7 @@ xfs_vn_fiemap( | |||
1075 | 1075 | ||
1076 | error = xfs_getbmap(ip, &bm, xfs_fiemap_format, fieinfo); | 1076 | error = xfs_getbmap(ip, &bm, xfs_fiemap_format, fieinfo); |
1077 | if (error) | 1077 | if (error) |
1078 | return -error; | 1078 | return error; |
1079 | 1079 | ||
1080 | return 0; | 1080 | return 0; |
1081 | } | 1081 | } |
diff --git a/fs/xfs/xfs_itable.c b/fs/xfs/xfs_itable.c index cb64f222d607..f71be9c68017 100644 --- a/fs/xfs/xfs_itable.c +++ b/fs/xfs/xfs_itable.c | |||
@@ -67,19 +67,17 @@ xfs_bulkstat_one_int( | |||
67 | *stat = BULKSTAT_RV_NOTHING; | 67 | *stat = BULKSTAT_RV_NOTHING; |
68 | 68 | ||
69 | if (!buffer || xfs_internal_inum(mp, ino)) | 69 | if (!buffer || xfs_internal_inum(mp, ino)) |
70 | return XFS_ERROR(EINVAL); | 70 | return -EINVAL; |
71 | 71 | ||
72 | buf = kmem_alloc(sizeof(*buf), KM_SLEEP | KM_MAYFAIL); | 72 | buf = kmem_alloc(sizeof(*buf), KM_SLEEP | KM_MAYFAIL); |
73 | if (!buf) | 73 | if (!buf) |
74 | return XFS_ERROR(ENOMEM); | 74 | return -ENOMEM; |
75 | 75 | ||
76 | error = xfs_iget(mp, NULL, ino, | 76 | error = xfs_iget(mp, NULL, ino, |
77 | (XFS_IGET_DONTCACHE | XFS_IGET_UNTRUSTED), | 77 | (XFS_IGET_DONTCACHE | XFS_IGET_UNTRUSTED), |
78 | XFS_ILOCK_SHARED, &ip); | 78 | XFS_ILOCK_SHARED, &ip); |
79 | if (error) { | 79 | if (error) |
80 | *stat = BULKSTAT_RV_NOTHING; | ||
81 | goto out_free; | 80 | goto out_free; |
82 | } | ||
83 | 81 | ||
84 | ASSERT(ip != NULL); | 82 | ASSERT(ip != NULL); |
85 | ASSERT(ip->i_imap.im_blkno != 0); | 83 | ASSERT(ip->i_imap.im_blkno != 0); |
@@ -136,7 +134,6 @@ xfs_bulkstat_one_int( | |||
136 | IRELE(ip); | 134 | IRELE(ip); |
137 | 135 | ||
138 | error = formatter(buffer, ubsize, ubused, buf); | 136 | error = formatter(buffer, ubsize, ubused, buf); |
139 | |||
140 | if (!error) | 137 | if (!error) |
141 | *stat = BULKSTAT_RV_DIDONE; | 138 | *stat = BULKSTAT_RV_DIDONE; |
142 | 139 | ||
@@ -154,9 +151,9 @@ xfs_bulkstat_one_fmt( | |||
154 | const xfs_bstat_t *buffer) | 151 | const xfs_bstat_t *buffer) |
155 | { | 152 | { |
156 | if (ubsize < sizeof(*buffer)) | 153 | if (ubsize < sizeof(*buffer)) |
157 | return XFS_ERROR(ENOMEM); | 154 | return -ENOMEM; |
158 | if (copy_to_user(ubuffer, buffer, sizeof(*buffer))) | 155 | if (copy_to_user(ubuffer, buffer, sizeof(*buffer))) |
159 | return XFS_ERROR(EFAULT); | 156 | return -EFAULT; |
160 | if (ubused) | 157 | if (ubused) |
161 | *ubused = sizeof(*buffer); | 158 | *ubused = sizeof(*buffer); |
162 | return 0; | 159 | return 0; |
@@ -175,9 +172,170 @@ xfs_bulkstat_one( | |||
175 | xfs_bulkstat_one_fmt, ubused, stat); | 172 | xfs_bulkstat_one_fmt, ubused, stat); |
176 | } | 173 | } |
177 | 174 | ||
175 | /* | ||
176 | * Loop over all clusters in a chunk for a given incore inode allocation btree | ||
177 | * record. Do a readahead if there are any allocated inodes in that cluster. | ||
178 | */ | ||
179 | STATIC void | ||
180 | xfs_bulkstat_ichunk_ra( | ||
181 | struct xfs_mount *mp, | ||
182 | xfs_agnumber_t agno, | ||
183 | struct xfs_inobt_rec_incore *irec) | ||
184 | { | ||
185 | xfs_agblock_t agbno; | ||
186 | struct blk_plug plug; | ||
187 | int blks_per_cluster; | ||
188 | int inodes_per_cluster; | ||
189 | int i; /* inode chunk index */ | ||
190 | |||
191 | agbno = XFS_AGINO_TO_AGBNO(mp, irec->ir_startino); | ||
192 | blks_per_cluster = xfs_icluster_size_fsb(mp); | ||
193 | inodes_per_cluster = blks_per_cluster << mp->m_sb.sb_inopblog; | ||
194 | |||
195 | blk_start_plug(&plug); | ||
196 | for (i = 0; i < XFS_INODES_PER_CHUNK; | ||
197 | i += inodes_per_cluster, agbno += blks_per_cluster) { | ||
198 | if (xfs_inobt_maskn(i, inodes_per_cluster) & ~irec->ir_free) { | ||
199 | xfs_btree_reada_bufs(mp, agno, agbno, blks_per_cluster, | ||
200 | &xfs_inode_buf_ops); | ||
201 | } | ||
202 | } | ||
203 | blk_finish_plug(&plug); | ||
204 | } | ||
205 | |||
206 | /* | ||
207 | * Lookup the inode chunk that the given inode lives in and then get the record | ||
208 | * if we found the chunk. If the inode was not the last in the chunk and there | ||
209 | * are some left allocated, update the data for the pointed-to record as well as | ||
210 | * return the count of grabbed inodes. | ||
211 | */ | ||
212 | STATIC int | ||
213 | xfs_bulkstat_grab_ichunk( | ||
214 | struct xfs_btree_cur *cur, /* btree cursor */ | ||
215 | xfs_agino_t agino, /* starting inode of chunk */ | ||
216 | int *icount,/* return # of inodes grabbed */ | ||
217 | struct xfs_inobt_rec_incore *irec) /* btree record */ | ||
218 | { | ||
219 | int idx; /* index into inode chunk */ | ||
220 | int stat; | ||
221 | int error = 0; | ||
222 | |||
223 | /* Lookup the inode chunk that this inode lives in */ | ||
224 | error = xfs_inobt_lookup(cur, agino, XFS_LOOKUP_LE, &stat); | ||
225 | if (error) | ||
226 | return error; | ||
227 | if (!stat) { | ||
228 | *icount = 0; | ||
229 | return error; | ||
230 | } | ||
231 | |||
232 | /* Get the record, should always work */ | ||
233 | error = xfs_inobt_get_rec(cur, irec, &stat); | ||
234 | if (error) | ||
235 | return error; | ||
236 | XFS_WANT_CORRUPTED_RETURN(stat == 1); | ||
237 | |||
238 | /* Check if the record contains the inode in request */ | ||
239 | if (irec->ir_startino + XFS_INODES_PER_CHUNK <= agino) | ||
240 | return -EINVAL; | ||
241 | |||
242 | idx = agino - irec->ir_startino + 1; | ||
243 | if (idx < XFS_INODES_PER_CHUNK && | ||
244 | (xfs_inobt_maskn(idx, XFS_INODES_PER_CHUNK - idx) & ~irec->ir_free)) { | ||
245 | int i; | ||
246 | |||
247 | /* We got a right chunk with some left inodes allocated at it. | ||
248 | * Grab the chunk record. Mark all the uninteresting inodes | ||
249 | * free -- because they're before our start point. | ||
250 | */ | ||
251 | for (i = 0; i < idx; i++) { | ||
252 | if (XFS_INOBT_MASK(i) & ~irec->ir_free) | ||
253 | irec->ir_freecount++; | ||
254 | } | ||
255 | |||
256 | irec->ir_free |= xfs_inobt_maskn(0, idx); | ||
257 | *icount = XFS_INODES_PER_CHUNK - irec->ir_freecount; | ||
258 | } | ||
259 | |||
260 | return 0; | ||
261 | } | ||
262 | |||
178 | #define XFS_BULKSTAT_UBLEFT(ubleft) ((ubleft) >= statstruct_size) | 263 | #define XFS_BULKSTAT_UBLEFT(ubleft) ((ubleft) >= statstruct_size) |
179 | 264 | ||
180 | /* | 265 | /* |
266 | * Process inodes in chunk with a pointer to a formatter function | ||
267 | * that will iget the inode and fill in the appropriate structure. | ||
268 | */ | ||
269 | int | ||
270 | xfs_bulkstat_ag_ichunk( | ||
271 | struct xfs_mount *mp, | ||
272 | xfs_agnumber_t agno, | ||
273 | struct xfs_inobt_rec_incore *irbp, | ||
274 | bulkstat_one_pf formatter, | ||
275 | size_t statstruct_size, | ||
276 | struct xfs_bulkstat_agichunk *acp) | ||
277 | { | ||
278 | xfs_ino_t lastino = acp->ac_lastino; | ||
279 | char __user **ubufp = acp->ac_ubuffer; | ||
280 | int ubleft = acp->ac_ubleft; | ||
281 | int ubelem = acp->ac_ubelem; | ||
282 | int chunkidx, clustidx; | ||
283 | int error = 0; | ||
284 | xfs_agino_t agino; | ||
285 | |||
286 | for (agino = irbp->ir_startino, chunkidx = clustidx = 0; | ||
287 | XFS_BULKSTAT_UBLEFT(ubleft) && | ||
288 | irbp->ir_freecount < XFS_INODES_PER_CHUNK; | ||
289 | chunkidx++, clustidx++, agino++) { | ||
290 | int fmterror; /* bulkstat formatter result */ | ||
291 | int ubused; | ||
292 | xfs_ino_t ino = XFS_AGINO_TO_INO(mp, agno, agino); | ||
293 | |||
294 | ASSERT(chunkidx < XFS_INODES_PER_CHUNK); | ||
295 | |||
296 | /* Skip if this inode is free */ | ||
297 | if (XFS_INOBT_MASK(chunkidx) & irbp->ir_free) { | ||
298 | lastino = ino; | ||
299 | continue; | ||
300 | } | ||
301 | |||
302 | /* | ||
303 | * Count used inodes as free so we can tell when the | ||
304 | * chunk is used up. | ||
305 | */ | ||
306 | irbp->ir_freecount++; | ||
307 | |||
308 | /* Get the inode and fill in a single buffer */ | ||
309 | ubused = statstruct_size; | ||
310 | error = formatter(mp, ino, *ubufp, ubleft, &ubused, &fmterror); | ||
311 | if (fmterror == BULKSTAT_RV_NOTHING) { | ||
312 | if (error && error != -ENOENT && error != -EINVAL) { | ||
313 | ubleft = 0; | ||
314 | break; | ||
315 | } | ||
316 | lastino = ino; | ||
317 | continue; | ||
318 | } | ||
319 | if (fmterror == BULKSTAT_RV_GIVEUP) { | ||
320 | ubleft = 0; | ||
321 | ASSERT(error); | ||
322 | break; | ||
323 | } | ||
324 | if (*ubufp) | ||
325 | *ubufp += ubused; | ||
326 | ubleft -= ubused; | ||
327 | ubelem++; | ||
328 | lastino = ino; | ||
329 | } | ||
330 | |||
331 | acp->ac_lastino = lastino; | ||
332 | acp->ac_ubleft = ubleft; | ||
333 | acp->ac_ubelem = ubelem; | ||
334 | |||
335 | return error; | ||
336 | } | ||
337 | |||
338 | /* | ||
181 | * Return stat information in bulk (by-inode) for the filesystem. | 339 | * Return stat information in bulk (by-inode) for the filesystem. |
182 | */ | 340 | */ |
183 | int /* error status */ | 341 | int /* error status */ |
@@ -190,13 +348,10 @@ xfs_bulkstat( | |||
190 | char __user *ubuffer, /* buffer with inode stats */ | 348 | char __user *ubuffer, /* buffer with inode stats */ |
191 | int *done) /* 1 if there are more stats to get */ | 349 | int *done) /* 1 if there are more stats to get */ |
192 | { | 350 | { |
193 | xfs_agblock_t agbno=0;/* allocation group block number */ | ||
194 | xfs_buf_t *agbp; /* agi header buffer */ | 351 | xfs_buf_t *agbp; /* agi header buffer */ |
195 | xfs_agi_t *agi; /* agi header data */ | 352 | xfs_agi_t *agi; /* agi header data */ |
196 | xfs_agino_t agino; /* inode # in allocation group */ | 353 | xfs_agino_t agino; /* inode # in allocation group */ |
197 | xfs_agnumber_t agno; /* allocation group number */ | 354 | xfs_agnumber_t agno; /* allocation group number */ |
198 | int chunkidx; /* current index into inode chunk */ | ||
199 | int clustidx; /* current index into inode cluster */ | ||
200 | xfs_btree_cur_t *cur; /* btree cursor for ialloc btree */ | 355 | xfs_btree_cur_t *cur; /* btree cursor for ialloc btree */ |
201 | int end_of_ag; /* set if we've seen the ag end */ | 356 | int end_of_ag; /* set if we've seen the ag end */ |
202 | int error; /* error code */ | 357 | int error; /* error code */ |
@@ -209,8 +364,6 @@ xfs_bulkstat( | |||
209 | xfs_inobt_rec_incore_t *irbuf; /* start of irec buffer */ | 364 | xfs_inobt_rec_incore_t *irbuf; /* start of irec buffer */ |
210 | xfs_inobt_rec_incore_t *irbufend; /* end of good irec buffer entries */ | 365 | xfs_inobt_rec_incore_t *irbufend; /* end of good irec buffer entries */ |
211 | xfs_ino_t lastino; /* last inode number returned */ | 366 | xfs_ino_t lastino; /* last inode number returned */ |
212 | int blks_per_cluster; /* # of blocks per cluster */ | ||
213 | int inodes_per_cluster;/* # of inodes per cluster */ | ||
214 | int nirbuf; /* size of irbuf */ | 367 | int nirbuf; /* size of irbuf */ |
215 | int rval; /* return value error code */ | 368 | int rval; /* return value error code */ |
216 | int tmp; /* result value from btree calls */ | 369 | int tmp; /* result value from btree calls */ |
@@ -218,7 +371,6 @@ xfs_bulkstat( | |||
218 | int ubleft; /* bytes left in user's buffer */ | 371 | int ubleft; /* bytes left in user's buffer */ |
219 | char __user *ubufp; /* pointer into user's buffer */ | 372 | char __user *ubufp; /* pointer into user's buffer */ |
220 | int ubelem; /* spaces used in user's buffer */ | 373 | int ubelem; /* spaces used in user's buffer */ |
221 | int ubused; /* bytes used by formatter */ | ||
222 | 374 | ||
223 | /* | 375 | /* |
224 | * Get the last inode value, see if there's nothing to do. | 376 | * Get the last inode value, see if there's nothing to do. |
@@ -233,20 +385,16 @@ xfs_bulkstat( | |||
233 | *ubcountp = 0; | 385 | *ubcountp = 0; |
234 | return 0; | 386 | return 0; |
235 | } | 387 | } |
236 | if (!ubcountp || *ubcountp <= 0) { | 388 | |
237 | return EINVAL; | ||
238 | } | ||
239 | ubcount = *ubcountp; /* statstruct's */ | 389 | ubcount = *ubcountp; /* statstruct's */ |
240 | ubleft = ubcount * statstruct_size; /* bytes */ | 390 | ubleft = ubcount * statstruct_size; /* bytes */ |
241 | *ubcountp = ubelem = 0; | 391 | *ubcountp = ubelem = 0; |
242 | *done = 0; | 392 | *done = 0; |
243 | fmterror = 0; | 393 | fmterror = 0; |
244 | ubufp = ubuffer; | 394 | ubufp = ubuffer; |
245 | blks_per_cluster = xfs_icluster_size_fsb(mp); | ||
246 | inodes_per_cluster = blks_per_cluster << mp->m_sb.sb_inopblog; | ||
247 | irbuf = kmem_zalloc_greedy(&irbsize, PAGE_SIZE, PAGE_SIZE * 4); | 395 | irbuf = kmem_zalloc_greedy(&irbsize, PAGE_SIZE, PAGE_SIZE * 4); |
248 | if (!irbuf) | 396 | if (!irbuf) |
249 | return ENOMEM; | 397 | return -ENOMEM; |
250 | 398 | ||
251 | nirbuf = irbsize / sizeof(*irbuf); | 399 | nirbuf = irbsize / sizeof(*irbuf); |
252 | 400 | ||
@@ -258,14 +406,8 @@ xfs_bulkstat( | |||
258 | while (XFS_BULKSTAT_UBLEFT(ubleft) && agno < mp->m_sb.sb_agcount) { | 406 | while (XFS_BULKSTAT_UBLEFT(ubleft) && agno < mp->m_sb.sb_agcount) { |
259 | cond_resched(); | 407 | cond_resched(); |
260 | error = xfs_ialloc_read_agi(mp, NULL, agno, &agbp); | 408 | error = xfs_ialloc_read_agi(mp, NULL, agno, &agbp); |
261 | if (error) { | 409 | if (error) |
262 | /* | 410 | break; |
263 | * Skip this allocation group and go to the next one. | ||
264 | */ | ||
265 | agno++; | ||
266 | agino = 0; | ||
267 | continue; | ||
268 | } | ||
269 | agi = XFS_BUF_TO_AGI(agbp); | 411 | agi = XFS_BUF_TO_AGI(agbp); |
270 | /* | 412 | /* |
271 | * Allocate and initialize a btree cursor for ialloc btree. | 413 | * Allocate and initialize a btree cursor for ialloc btree. |
@@ -275,96 +417,39 @@ xfs_bulkstat( | |||
275 | irbp = irbuf; | 417 | irbp = irbuf; |
276 | irbufend = irbuf + nirbuf; | 418 | irbufend = irbuf + nirbuf; |
277 | end_of_ag = 0; | 419 | end_of_ag = 0; |
278 | /* | 420 | icount = 0; |
279 | * If we're returning in the middle of an allocation group, | ||
280 | * we need to get the remainder of the chunk we're in. | ||
281 | */ | ||
282 | if (agino > 0) { | 421 | if (agino > 0) { |
283 | xfs_inobt_rec_incore_t r; | ||
284 | |||
285 | /* | 422 | /* |
286 | * Lookup the inode chunk that this inode lives in. | 423 | * In the middle of an allocation group, we need to get |
424 | * the remainder of the chunk we're in. | ||
287 | */ | 425 | */ |
288 | error = xfs_inobt_lookup(cur, agino, XFS_LOOKUP_LE, | 426 | struct xfs_inobt_rec_incore r; |
289 | &tmp); | 427 | |
290 | if (!error && /* no I/O error */ | 428 | error = xfs_bulkstat_grab_ichunk(cur, agino, &icount, &r); |
291 | tmp && /* lookup succeeded */ | 429 | if (error) |
292 | /* got the record, should always work */ | 430 | break; |
293 | !(error = xfs_inobt_get_rec(cur, &r, &i)) && | 431 | if (icount) { |
294 | i == 1 && | ||
295 | /* this is the right chunk */ | ||
296 | agino < r.ir_startino + XFS_INODES_PER_CHUNK && | ||
297 | /* lastino was not last in chunk */ | ||
298 | (chunkidx = agino - r.ir_startino + 1) < | ||
299 | XFS_INODES_PER_CHUNK && | ||
300 | /* there are some left allocated */ | ||
301 | xfs_inobt_maskn(chunkidx, | ||
302 | XFS_INODES_PER_CHUNK - chunkidx) & | ||
303 | ~r.ir_free) { | ||
304 | /* | ||
305 | * Grab the chunk record. Mark all the | ||
306 | * uninteresting inodes (because they're | ||
307 | * before our start point) free. | ||
308 | */ | ||
309 | for (i = 0; i < chunkidx; i++) { | ||
310 | if (XFS_INOBT_MASK(i) & ~r.ir_free) | ||
311 | r.ir_freecount++; | ||
312 | } | ||
313 | r.ir_free |= xfs_inobt_maskn(0, chunkidx); | ||
314 | irbp->ir_startino = r.ir_startino; | 432 | irbp->ir_startino = r.ir_startino; |
315 | irbp->ir_freecount = r.ir_freecount; | 433 | irbp->ir_freecount = r.ir_freecount; |
316 | irbp->ir_free = r.ir_free; | 434 | irbp->ir_free = r.ir_free; |
317 | irbp++; | 435 | irbp++; |
318 | agino = r.ir_startino + XFS_INODES_PER_CHUNK; | 436 | agino = r.ir_startino + XFS_INODES_PER_CHUNK; |
319 | icount = XFS_INODES_PER_CHUNK - r.ir_freecount; | ||
320 | } else { | ||
321 | /* | ||
322 | * If any of those tests failed, bump the | ||
323 | * inode number (just in case). | ||
324 | */ | ||
325 | agino++; | ||
326 | icount = 0; | ||
327 | } | 437 | } |
328 | /* | 438 | /* Increment to the next record */ |
329 | * In any case, increment to the next record. | 439 | error = xfs_btree_increment(cur, 0, &tmp); |
330 | */ | ||
331 | if (!error) | ||
332 | error = xfs_btree_increment(cur, 0, &tmp); | ||
333 | } else { | 440 | } else { |
334 | /* | 441 | /* Start of ag. Lookup the first inode chunk */ |
335 | * Start of ag. Lookup the first inode chunk. | ||
336 | */ | ||
337 | error = xfs_inobt_lookup(cur, 0, XFS_LOOKUP_GE, &tmp); | 442 | error = xfs_inobt_lookup(cur, 0, XFS_LOOKUP_GE, &tmp); |
338 | icount = 0; | ||
339 | } | 443 | } |
444 | if (error) | ||
445 | break; | ||
446 | |||
340 | /* | 447 | /* |
341 | * Loop through inode btree records in this ag, | 448 | * Loop through inode btree records in this ag, |
342 | * until we run out of inodes or space in the buffer. | 449 | * until we run out of inodes or space in the buffer. |
343 | */ | 450 | */ |
344 | while (irbp < irbufend && icount < ubcount) { | 451 | while (irbp < irbufend && icount < ubcount) { |
345 | xfs_inobt_rec_incore_t r; | 452 | struct xfs_inobt_rec_incore r; |
346 | |||
347 | /* | ||
348 | * Loop as long as we're unable to read the | ||
349 | * inode btree. | ||
350 | */ | ||
351 | while (error) { | ||
352 | agino += XFS_INODES_PER_CHUNK; | ||
353 | if (XFS_AGINO_TO_AGBNO(mp, agino) >= | ||
354 | be32_to_cpu(agi->agi_length)) | ||
355 | break; | ||
356 | error = xfs_inobt_lookup(cur, agino, | ||
357 | XFS_LOOKUP_GE, &tmp); | ||
358 | cond_resched(); | ||
359 | } | ||
360 | /* | ||
361 | * If ran off the end of the ag either with an error, | ||
362 | * or the normal way, set end and stop collecting. | ||
363 | */ | ||
364 | if (error) { | ||
365 | end_of_ag = 1; | ||
366 | break; | ||
367 | } | ||
368 | 453 | ||
369 | error = xfs_inobt_get_rec(cur, &r, &i); | 454 | error = xfs_inobt_get_rec(cur, &r, &i); |
370 | if (error || i == 0) { | 455 | if (error || i == 0) { |
@@ -377,25 +462,7 @@ xfs_bulkstat( | |||
377 | * Also start read-ahead now for this chunk. | 462 | * Also start read-ahead now for this chunk. |
378 | */ | 463 | */ |
379 | if (r.ir_freecount < XFS_INODES_PER_CHUNK) { | 464 | if (r.ir_freecount < XFS_INODES_PER_CHUNK) { |
380 | struct blk_plug plug; | 465 | xfs_bulkstat_ichunk_ra(mp, agno, &r); |
381 | /* | ||
382 | * Loop over all clusters in the next chunk. | ||
383 | * Do a readahead if there are any allocated | ||
384 | * inodes in that cluster. | ||
385 | */ | ||
386 | blk_start_plug(&plug); | ||
387 | agbno = XFS_AGINO_TO_AGBNO(mp, r.ir_startino); | ||
388 | for (chunkidx = 0; | ||
389 | chunkidx < XFS_INODES_PER_CHUNK; | ||
390 | chunkidx += inodes_per_cluster, | ||
391 | agbno += blks_per_cluster) { | ||
392 | if (xfs_inobt_maskn(chunkidx, | ||
393 | inodes_per_cluster) & ~r.ir_free) | ||
394 | xfs_btree_reada_bufs(mp, agno, | ||
395 | agbno, blks_per_cluster, | ||
396 | &xfs_inode_buf_ops); | ||
397 | } | ||
398 | blk_finish_plug(&plug); | ||
399 | irbp->ir_startino = r.ir_startino; | 466 | irbp->ir_startino = r.ir_startino; |
400 | irbp->ir_freecount = r.ir_freecount; | 467 | irbp->ir_freecount = r.ir_freecount; |
401 | irbp->ir_free = r.ir_free; | 468 | irbp->ir_free = r.ir_free; |
@@ -422,57 +489,20 @@ xfs_bulkstat( | |||
422 | irbufend = irbp; | 489 | irbufend = irbp; |
423 | for (irbp = irbuf; | 490 | for (irbp = irbuf; |
424 | irbp < irbufend && XFS_BULKSTAT_UBLEFT(ubleft); irbp++) { | 491 | irbp < irbufend && XFS_BULKSTAT_UBLEFT(ubleft); irbp++) { |
425 | /* | 492 | struct xfs_bulkstat_agichunk ac; |
426 | * Now process this chunk of inodes. | 493 | |
427 | */ | 494 | ac.ac_lastino = lastino; |
428 | for (agino = irbp->ir_startino, chunkidx = clustidx = 0; | 495 | ac.ac_ubuffer = &ubuffer; |
429 | XFS_BULKSTAT_UBLEFT(ubleft) && | 496 | ac.ac_ubleft = ubleft; |
430 | irbp->ir_freecount < XFS_INODES_PER_CHUNK; | 497 | ac.ac_ubelem = ubelem; |
431 | chunkidx++, clustidx++, agino++) { | 498 | error = xfs_bulkstat_ag_ichunk(mp, agno, irbp, |
432 | ASSERT(chunkidx < XFS_INODES_PER_CHUNK); | 499 | formatter, statstruct_size, &ac); |
433 | 500 | if (error) | |
434 | ino = XFS_AGINO_TO_INO(mp, agno, agino); | 501 | rval = error; |
435 | /* | 502 | |
436 | * Skip if this inode is free. | 503 | lastino = ac.ac_lastino; |
437 | */ | 504 | ubleft = ac.ac_ubleft; |
438 | if (XFS_INOBT_MASK(chunkidx) & irbp->ir_free) { | 505 | ubelem = ac.ac_ubelem; |
439 | lastino = ino; | ||
440 | continue; | ||
441 | } | ||
442 | /* | ||
443 | * Count used inodes as free so we can tell | ||
444 | * when the chunk is used up. | ||
445 | */ | ||
446 | irbp->ir_freecount++; | ||
447 | |||
448 | /* | ||
449 | * Get the inode and fill in a single buffer. | ||
450 | */ | ||
451 | ubused = statstruct_size; | ||
452 | error = formatter(mp, ino, ubufp, ubleft, | ||
453 | &ubused, &fmterror); | ||
454 | if (fmterror == BULKSTAT_RV_NOTHING) { | ||
455 | if (error && error != ENOENT && | ||
456 | error != EINVAL) { | ||
457 | ubleft = 0; | ||
458 | rval = error; | ||
459 | break; | ||
460 | } | ||
461 | lastino = ino; | ||
462 | continue; | ||
463 | } | ||
464 | if (fmterror == BULKSTAT_RV_GIVEUP) { | ||
465 | ubleft = 0; | ||
466 | ASSERT(error); | ||
467 | rval = error; | ||
468 | break; | ||
469 | } | ||
470 | if (ubufp) | ||
471 | ubufp += ubused; | ||
472 | ubleft -= ubused; | ||
473 | ubelem++; | ||
474 | lastino = ino; | ||
475 | } | ||
476 | 506 | ||
477 | cond_resched(); | 507 | cond_resched(); |
478 | } | 508 | } |
@@ -512,58 +542,10 @@ xfs_bulkstat( | |||
512 | return rval; | 542 | return rval; |
513 | } | 543 | } |
514 | 544 | ||
515 | /* | ||
516 | * Return stat information in bulk (by-inode) for the filesystem. | ||
517 | * Special case for non-sequential one inode bulkstat. | ||
518 | */ | ||
519 | int /* error status */ | ||
520 | xfs_bulkstat_single( | ||
521 | xfs_mount_t *mp, /* mount point for filesystem */ | ||
522 | xfs_ino_t *lastinop, /* inode to return */ | ||
523 | char __user *buffer, /* buffer with inode stats */ | ||
524 | int *done) /* 1 if there are more stats to get */ | ||
525 | { | ||
526 | int count; /* count value for bulkstat call */ | ||
527 | int error; /* return value */ | ||
528 | xfs_ino_t ino; /* filesystem inode number */ | ||
529 | int res; /* result from bs1 */ | ||
530 | |||
531 | /* | ||
532 | * note that requesting valid inode numbers which are not allocated | ||
533 | * to inodes will most likely cause xfs_imap_to_bp to generate warning | ||
534 | * messages about bad magic numbers. This is ok. The fact that | ||
535 | * the inode isn't actually an inode is handled by the | ||
536 | * error check below. Done this way to make the usual case faster | ||
537 | * at the expense of the error case. | ||
538 | */ | ||
539 | |||
540 | ino = *lastinop; | ||
541 | error = xfs_bulkstat_one(mp, ino, buffer, sizeof(xfs_bstat_t), | ||
542 | NULL, &res); | ||
543 | if (error) { | ||
544 | /* | ||
545 | * Special case way failed, do it the "long" way | ||
546 | * to see if that works. | ||
547 | */ | ||
548 | (*lastinop)--; | ||
549 | count = 1; | ||
550 | if (xfs_bulkstat(mp, lastinop, &count, xfs_bulkstat_one, | ||
551 | sizeof(xfs_bstat_t), buffer, done)) | ||
552 | return error; | ||
553 | if (count == 0 || (xfs_ino_t)*lastinop != ino) | ||
554 | return error == EFSCORRUPTED ? | ||
555 | XFS_ERROR(EINVAL) : error; | ||
556 | else | ||
557 | return 0; | ||
558 | } | ||
559 | *done = 0; | ||
560 | return 0; | ||
561 | } | ||
562 | |||
563 | int | 545 | int |
564 | xfs_inumbers_fmt( | 546 | xfs_inumbers_fmt( |
565 | void __user *ubuffer, /* buffer to write to */ | 547 | void __user *ubuffer, /* buffer to write to */ |
566 | const xfs_inogrp_t *buffer, /* buffer to read from */ | 548 | const struct xfs_inogrp *buffer, /* buffer to read from */ |
567 | long count, /* # of elements to read */ | 549 | long count, /* # of elements to read */ |
568 | long *written) /* # of bytes written */ | 550 | long *written) /* # of bytes written */ |
569 | { | 551 | { |
@@ -578,127 +560,104 @@ xfs_inumbers_fmt( | |||
578 | */ | 560 | */ |
579 | int /* error status */ | 561 | int /* error status */ |
580 | xfs_inumbers( | 562 | xfs_inumbers( |
581 | xfs_mount_t *mp, /* mount point for filesystem */ | 563 | struct xfs_mount *mp,/* mount point for filesystem */ |
582 | xfs_ino_t *lastino, /* last inode returned */ | 564 | xfs_ino_t *lastino,/* last inode returned */ |
583 | int *count, /* size of buffer/count returned */ | 565 | int *count,/* size of buffer/count returned */ |
584 | void __user *ubuffer,/* buffer with inode descriptions */ | 566 | void __user *ubuffer,/* buffer with inode descriptions */ |
585 | inumbers_fmt_pf formatter) | 567 | inumbers_fmt_pf formatter) |
586 | { | 568 | { |
587 | xfs_buf_t *agbp; | 569 | xfs_agnumber_t agno = XFS_INO_TO_AGNO(mp, *lastino); |
588 | xfs_agino_t agino; | 570 | xfs_agino_t agino = XFS_INO_TO_AGINO(mp, *lastino); |
589 | xfs_agnumber_t agno; | 571 | struct xfs_btree_cur *cur = NULL; |
590 | int bcount; | 572 | struct xfs_buf *agbp = NULL; |
591 | xfs_inogrp_t *buffer; | 573 | struct xfs_inogrp *buffer; |
592 | int bufidx; | 574 | int bcount; |
593 | xfs_btree_cur_t *cur; | 575 | int left = *count; |
594 | int error; | 576 | int bufidx = 0; |
595 | xfs_inobt_rec_incore_t r; | 577 | int error = 0; |
596 | int i; | 578 | |
597 | xfs_ino_t ino; | ||
598 | int left; | ||
599 | int tmp; | ||
600 | |||
601 | ino = (xfs_ino_t)*lastino; | ||
602 | agno = XFS_INO_TO_AGNO(mp, ino); | ||
603 | agino = XFS_INO_TO_AGINO(mp, ino); | ||
604 | left = *count; | ||
605 | *count = 0; | 579 | *count = 0; |
580 | if (agno >= mp->m_sb.sb_agcount || | ||
581 | *lastino != XFS_AGINO_TO_INO(mp, agno, agino)) | ||
582 | return error; | ||
583 | |||
606 | bcount = MIN(left, (int)(PAGE_SIZE / sizeof(*buffer))); | 584 | bcount = MIN(left, (int)(PAGE_SIZE / sizeof(*buffer))); |
607 | buffer = kmem_alloc(bcount * sizeof(*buffer), KM_SLEEP); | 585 | buffer = kmem_alloc(bcount * sizeof(*buffer), KM_SLEEP); |
608 | error = bufidx = 0; | 586 | do { |
609 | cur = NULL; | 587 | struct xfs_inobt_rec_incore r; |
610 | agbp = NULL; | 588 | int stat; |
611 | while (left > 0 && agno < mp->m_sb.sb_agcount) { | 589 | |
612 | if (agbp == NULL) { | 590 | if (!agbp) { |
613 | error = xfs_ialloc_read_agi(mp, NULL, agno, &agbp); | 591 | error = xfs_ialloc_read_agi(mp, NULL, agno, &agbp); |
614 | if (error) { | 592 | if (error) |
615 | /* | 593 | break; |
616 | * If we can't read the AGI of this ag, | 594 | |
617 | * then just skip to the next one. | ||
618 | */ | ||
619 | ASSERT(cur == NULL); | ||
620 | agbp = NULL; | ||
621 | agno++; | ||
622 | agino = 0; | ||
623 | continue; | ||
624 | } | ||
625 | cur = xfs_inobt_init_cursor(mp, NULL, agbp, agno, | 595 | cur = xfs_inobt_init_cursor(mp, NULL, agbp, agno, |
626 | XFS_BTNUM_INO); | 596 | XFS_BTNUM_INO); |
627 | error = xfs_inobt_lookup(cur, agino, XFS_LOOKUP_GE, | 597 | error = xfs_inobt_lookup(cur, agino, XFS_LOOKUP_GE, |
628 | &tmp); | 598 | &stat); |
629 | if (error) { | 599 | if (error) |
630 | xfs_btree_del_cursor(cur, XFS_BTREE_ERROR); | 600 | break; |
631 | cur = NULL; | 601 | if (!stat) |
632 | xfs_buf_relse(agbp); | 602 | goto next_ag; |
633 | agbp = NULL; | ||
634 | /* | ||
635 | * Move up the last inode in the current | ||
636 | * chunk. The lookup_ge will always get | ||
637 | * us the first inode in the next chunk. | ||
638 | */ | ||
639 | agino += XFS_INODES_PER_CHUNK - 1; | ||
640 | continue; | ||
641 | } | ||
642 | } | ||
643 | error = xfs_inobt_get_rec(cur, &r, &i); | ||
644 | if (error || i == 0) { | ||
645 | xfs_buf_relse(agbp); | ||
646 | agbp = NULL; | ||
647 | xfs_btree_del_cursor(cur, XFS_BTREE_NOERROR); | ||
648 | cur = NULL; | ||
649 | agno++; | ||
650 | agino = 0; | ||
651 | continue; | ||
652 | } | 603 | } |
604 | |||
605 | error = xfs_inobt_get_rec(cur, &r, &stat); | ||
606 | if (error) | ||
607 | break; | ||
608 | if (!stat) | ||
609 | goto next_ag; | ||
610 | |||
653 | agino = r.ir_startino + XFS_INODES_PER_CHUNK - 1; | 611 | agino = r.ir_startino + XFS_INODES_PER_CHUNK - 1; |
654 | buffer[bufidx].xi_startino = | 612 | buffer[bufidx].xi_startino = |
655 | XFS_AGINO_TO_INO(mp, agno, r.ir_startino); | 613 | XFS_AGINO_TO_INO(mp, agno, r.ir_startino); |
656 | buffer[bufidx].xi_alloccount = | 614 | buffer[bufidx].xi_alloccount = |
657 | XFS_INODES_PER_CHUNK - r.ir_freecount; | 615 | XFS_INODES_PER_CHUNK - r.ir_freecount; |
658 | buffer[bufidx].xi_allocmask = ~r.ir_free; | 616 | buffer[bufidx].xi_allocmask = ~r.ir_free; |
659 | bufidx++; | 617 | if (++bufidx == bcount) { |
660 | left--; | 618 | long written; |
661 | if (bufidx == bcount) { | 619 | |
662 | long written; | 620 | error = formatter(ubuffer, buffer, bufidx, &written); |
663 | if (formatter(ubuffer, buffer, bufidx, &written)) { | 621 | if (error) |
664 | error = XFS_ERROR(EFAULT); | ||
665 | break; | 622 | break; |
666 | } | ||
667 | ubuffer += written; | 623 | ubuffer += written; |
668 | *count += bufidx; | 624 | *count += bufidx; |
669 | bufidx = 0; | 625 | bufidx = 0; |
670 | } | 626 | } |
671 | if (left) { | 627 | if (!--left) |
672 | error = xfs_btree_increment(cur, 0, &tmp); | 628 | break; |
673 | if (error) { | 629 | |
674 | xfs_btree_del_cursor(cur, XFS_BTREE_ERROR); | 630 | error = xfs_btree_increment(cur, 0, &stat); |
675 | cur = NULL; | 631 | if (error) |
676 | xfs_buf_relse(agbp); | 632 | break; |
677 | agbp = NULL; | 633 | if (stat) |
678 | /* | 634 | continue; |
679 | * The agino value has already been bumped. | 635 | |
680 | * Just try to skip up to it. | 636 | next_ag: |
681 | */ | 637 | xfs_btree_del_cursor(cur, XFS_BTREE_ERROR); |
682 | agino += XFS_INODES_PER_CHUNK; | 638 | cur = NULL; |
683 | continue; | 639 | xfs_buf_relse(agbp); |
684 | } | 640 | agbp = NULL; |
685 | } | 641 | agino = 0; |
686 | } | 642 | } while (++agno < mp->m_sb.sb_agcount); |
643 | |||
687 | if (!error) { | 644 | if (!error) { |
688 | if (bufidx) { | 645 | if (bufidx) { |
689 | long written; | 646 | long written; |
690 | if (formatter(ubuffer, buffer, bufidx, &written)) | 647 | |
691 | error = XFS_ERROR(EFAULT); | 648 | error = formatter(ubuffer, buffer, bufidx, &written); |
692 | else | 649 | if (!error) |
693 | *count += bufidx; | 650 | *count += bufidx; |
694 | } | 651 | } |
695 | *lastino = XFS_AGINO_TO_INO(mp, agno, agino); | 652 | *lastino = XFS_AGINO_TO_INO(mp, agno, agino); |
696 | } | 653 | } |
654 | |||
697 | kmem_free(buffer); | 655 | kmem_free(buffer); |
698 | if (cur) | 656 | if (cur) |
699 | xfs_btree_del_cursor(cur, (error ? XFS_BTREE_ERROR : | 657 | xfs_btree_del_cursor(cur, (error ? XFS_BTREE_ERROR : |
700 | XFS_BTREE_NOERROR)); | 658 | XFS_BTREE_NOERROR)); |
701 | if (agbp) | 659 | if (agbp) |
702 | xfs_buf_relse(agbp); | 660 | xfs_buf_relse(agbp); |
661 | |||
703 | return error; | 662 | return error; |
704 | } | 663 | } |
diff --git a/fs/xfs/xfs_itable.h b/fs/xfs/xfs_itable.h index 97295d91d170..aaed08022eb9 100644 --- a/fs/xfs/xfs_itable.h +++ b/fs/xfs/xfs_itable.h | |||
@@ -30,6 +30,22 @@ typedef int (*bulkstat_one_pf)(struct xfs_mount *mp, | |||
30 | int *ubused, | 30 | int *ubused, |
31 | int *stat); | 31 | int *stat); |
32 | 32 | ||
33 | struct xfs_bulkstat_agichunk { | ||
34 | xfs_ino_t ac_lastino; /* last inode returned */ | ||
35 | char __user **ac_ubuffer;/* pointer into user's buffer */ | ||
36 | int ac_ubleft; /* bytes left in user's buffer */ | ||
37 | int ac_ubelem; /* spaces used in user's buffer */ | ||
38 | }; | ||
39 | |||
40 | int | ||
41 | xfs_bulkstat_ag_ichunk( | ||
42 | struct xfs_mount *mp, | ||
43 | xfs_agnumber_t agno, | ||
44 | struct xfs_inobt_rec_incore *irbp, | ||
45 | bulkstat_one_pf formatter, | ||
46 | size_t statstruct_size, | ||
47 | struct xfs_bulkstat_agichunk *acp); | ||
48 | |||
33 | /* | 49 | /* |
34 | * Values for stat return value. | 50 | * Values for stat return value. |
35 | */ | 51 | */ |
@@ -50,13 +66,6 @@ xfs_bulkstat( | |||
50 | char __user *ubuffer,/* buffer with inode stats */ | 66 | char __user *ubuffer,/* buffer with inode stats */ |
51 | int *done); /* 1 if there are more stats to get */ | 67 | int *done); /* 1 if there are more stats to get */ |
52 | 68 | ||
53 | int | ||
54 | xfs_bulkstat_single( | ||
55 | xfs_mount_t *mp, | ||
56 | xfs_ino_t *lastinop, | ||
57 | char __user *buffer, | ||
58 | int *done); | ||
59 | |||
60 | typedef int (*bulkstat_one_fmt_pf)( /* used size in bytes or negative error */ | 69 | typedef int (*bulkstat_one_fmt_pf)( /* used size in bytes or negative error */ |
61 | void __user *ubuffer, /* buffer to write to */ | 70 | void __user *ubuffer, /* buffer to write to */ |
62 | int ubsize, /* remaining user buffer sz */ | 71 | int ubsize, /* remaining user buffer sz */ |
diff --git a/fs/xfs/xfs_linux.h b/fs/xfs/xfs_linux.h index 825249d2dfc1..d10dc8f397c9 100644 --- a/fs/xfs/xfs_linux.h +++ b/fs/xfs/xfs_linux.h | |||
@@ -21,18 +21,6 @@ | |||
21 | #include <linux/types.h> | 21 | #include <linux/types.h> |
22 | 22 | ||
23 | /* | 23 | /* |
24 | * XFS_BIG_BLKNOS needs block layer disk addresses to be 64 bits. | ||
25 | * XFS_BIG_INUMS requires XFS_BIG_BLKNOS to be set. | ||
26 | */ | ||
27 | #if defined(CONFIG_LBDAF) || (BITS_PER_LONG == 64) | ||
28 | # define XFS_BIG_BLKNOS 1 | ||
29 | # define XFS_BIG_INUMS 1 | ||
30 | #else | ||
31 | # define XFS_BIG_BLKNOS 0 | ||
32 | # define XFS_BIG_INUMS 0 | ||
33 | #endif | ||
34 | |||
35 | /* | ||
36 | * Kernel specific type declarations for XFS | 24 | * Kernel specific type declarations for XFS |
37 | */ | 25 | */ |
38 | typedef signed char __int8_t; | 26 | typedef signed char __int8_t; |
@@ -113,7 +101,7 @@ typedef __uint64_t __psunsigned_t; | |||
113 | #include <asm/byteorder.h> | 101 | #include <asm/byteorder.h> |
114 | #include <asm/unaligned.h> | 102 | #include <asm/unaligned.h> |
115 | 103 | ||
116 | #include "xfs_vnode.h" | 104 | #include "xfs_fs.h" |
117 | #include "xfs_stats.h" | 105 | #include "xfs_stats.h" |
118 | #include "xfs_sysctl.h" | 106 | #include "xfs_sysctl.h" |
119 | #include "xfs_iops.h" | 107 | #include "xfs_iops.h" |
@@ -191,6 +179,17 @@ typedef __uint64_t __psunsigned_t; | |||
191 | #define MAX(a,b) (max(a,b)) | 179 | #define MAX(a,b) (max(a,b)) |
192 | #define howmany(x, y) (((x)+((y)-1))/(y)) | 180 | #define howmany(x, y) (((x)+((y)-1))/(y)) |
193 | 181 | ||
182 | /* | ||
183 | * XFS wrapper structure for sysfs support. It depends on external data | ||
184 | * structures and is embedded in various internal data structures to implement | ||
185 | * the XFS sysfs object heirarchy. Define it here for broad access throughout | ||
186 | * the codebase. | ||
187 | */ | ||
188 | struct xfs_kobj { | ||
189 | struct kobject kobject; | ||
190 | struct completion complete; | ||
191 | }; | ||
192 | |||
194 | /* Kernel uid/gid conversion. These are used to convert to/from the on disk | 193 | /* Kernel uid/gid conversion. These are used to convert to/from the on disk |
195 | * uid_t/gid_t types to the kuid_t/kgid_t types that the kernel uses internally. | 194 | * uid_t/gid_t types to the kuid_t/kgid_t types that the kernel uses internally. |
196 | * The conversion here is type only, the value will remain the same since we | 195 | * The conversion here is type only, the value will remain the same since we |
@@ -331,7 +330,7 @@ static inline __uint64_t roundup_64(__uint64_t x, __uint32_t y) | |||
331 | { | 330 | { |
332 | x += y - 1; | 331 | x += y - 1; |
333 | do_div(x, y); | 332 | do_div(x, y); |
334 | return(x * y); | 333 | return x * y; |
335 | } | 334 | } |
336 | 335 | ||
337 | static inline __uint64_t howmany_64(__uint64_t x, __uint32_t y) | 336 | static inline __uint64_t howmany_64(__uint64_t x, __uint32_t y) |
diff --git a/fs/xfs/xfs_log.c b/fs/xfs/xfs_log.c index 292308dede6d..ca4fd5bd8522 100644 --- a/fs/xfs/xfs_log.c +++ b/fs/xfs/xfs_log.c | |||
@@ -34,6 +34,7 @@ | |||
34 | #include "xfs_trace.h" | 34 | #include "xfs_trace.h" |
35 | #include "xfs_fsops.h" | 35 | #include "xfs_fsops.h" |
36 | #include "xfs_cksum.h" | 36 | #include "xfs_cksum.h" |
37 | #include "xfs_sysfs.h" | ||
37 | 38 | ||
38 | kmem_zone_t *xfs_log_ticket_zone; | 39 | kmem_zone_t *xfs_log_ticket_zone; |
39 | 40 | ||
@@ -283,7 +284,7 @@ xlog_grant_head_wait( | |||
283 | return 0; | 284 | return 0; |
284 | shutdown: | 285 | shutdown: |
285 | list_del_init(&tic->t_queue); | 286 | list_del_init(&tic->t_queue); |
286 | return XFS_ERROR(EIO); | 287 | return -EIO; |
287 | } | 288 | } |
288 | 289 | ||
289 | /* | 290 | /* |
@@ -377,7 +378,7 @@ xfs_log_regrant( | |||
377 | int error = 0; | 378 | int error = 0; |
378 | 379 | ||
379 | if (XLOG_FORCED_SHUTDOWN(log)) | 380 | if (XLOG_FORCED_SHUTDOWN(log)) |
380 | return XFS_ERROR(EIO); | 381 | return -EIO; |
381 | 382 | ||
382 | XFS_STATS_INC(xs_try_logspace); | 383 | XFS_STATS_INC(xs_try_logspace); |
383 | 384 | ||
@@ -446,7 +447,7 @@ xfs_log_reserve( | |||
446 | ASSERT(client == XFS_TRANSACTION || client == XFS_LOG); | 447 | ASSERT(client == XFS_TRANSACTION || client == XFS_LOG); |
447 | 448 | ||
448 | if (XLOG_FORCED_SHUTDOWN(log)) | 449 | if (XLOG_FORCED_SHUTDOWN(log)) |
449 | return XFS_ERROR(EIO); | 450 | return -EIO; |
450 | 451 | ||
451 | XFS_STATS_INC(xs_try_logspace); | 452 | XFS_STATS_INC(xs_try_logspace); |
452 | 453 | ||
@@ -454,7 +455,7 @@ xfs_log_reserve( | |||
454 | tic = xlog_ticket_alloc(log, unit_bytes, cnt, client, permanent, | 455 | tic = xlog_ticket_alloc(log, unit_bytes, cnt, client, permanent, |
455 | KM_SLEEP | KM_MAYFAIL); | 456 | KM_SLEEP | KM_MAYFAIL); |
456 | if (!tic) | 457 | if (!tic) |
457 | return XFS_ERROR(ENOMEM); | 458 | return -ENOMEM; |
458 | 459 | ||
459 | tic->t_trans_type = t_type; | 460 | tic->t_trans_type = t_type; |
460 | *ticp = tic; | 461 | *ticp = tic; |
@@ -590,7 +591,7 @@ xfs_log_release_iclog( | |||
590 | { | 591 | { |
591 | if (xlog_state_release_iclog(mp->m_log, iclog)) { | 592 | if (xlog_state_release_iclog(mp->m_log, iclog)) { |
592 | xfs_force_shutdown(mp, SHUTDOWN_LOG_IO_ERROR); | 593 | xfs_force_shutdown(mp, SHUTDOWN_LOG_IO_ERROR); |
593 | return EIO; | 594 | return -EIO; |
594 | } | 595 | } |
595 | 596 | ||
596 | return 0; | 597 | return 0; |
@@ -628,7 +629,7 @@ xfs_log_mount( | |||
628 | 629 | ||
629 | mp->m_log = xlog_alloc_log(mp, log_target, blk_offset, num_bblks); | 630 | mp->m_log = xlog_alloc_log(mp, log_target, blk_offset, num_bblks); |
630 | if (IS_ERR(mp->m_log)) { | 631 | if (IS_ERR(mp->m_log)) { |
631 | error = -PTR_ERR(mp->m_log); | 632 | error = PTR_ERR(mp->m_log); |
632 | goto out; | 633 | goto out; |
633 | } | 634 | } |
634 | 635 | ||
@@ -652,18 +653,18 @@ xfs_log_mount( | |||
652 | xfs_warn(mp, | 653 | xfs_warn(mp, |
653 | "Log size %d blocks too small, minimum size is %d blocks", | 654 | "Log size %d blocks too small, minimum size is %d blocks", |
654 | mp->m_sb.sb_logblocks, min_logfsbs); | 655 | mp->m_sb.sb_logblocks, min_logfsbs); |
655 | error = EINVAL; | 656 | error = -EINVAL; |
656 | } else if (mp->m_sb.sb_logblocks > XFS_MAX_LOG_BLOCKS) { | 657 | } else if (mp->m_sb.sb_logblocks > XFS_MAX_LOG_BLOCKS) { |
657 | xfs_warn(mp, | 658 | xfs_warn(mp, |
658 | "Log size %d blocks too large, maximum size is %lld blocks", | 659 | "Log size %d blocks too large, maximum size is %lld blocks", |
659 | mp->m_sb.sb_logblocks, XFS_MAX_LOG_BLOCKS); | 660 | mp->m_sb.sb_logblocks, XFS_MAX_LOG_BLOCKS); |
660 | error = EINVAL; | 661 | error = -EINVAL; |
661 | } else if (XFS_FSB_TO_B(mp, mp->m_sb.sb_logblocks) > XFS_MAX_LOG_BYTES) { | 662 | } else if (XFS_FSB_TO_B(mp, mp->m_sb.sb_logblocks) > XFS_MAX_LOG_BYTES) { |
662 | xfs_warn(mp, | 663 | xfs_warn(mp, |
663 | "log size %lld bytes too large, maximum size is %lld bytes", | 664 | "log size %lld bytes too large, maximum size is %lld bytes", |
664 | XFS_FSB_TO_B(mp, mp->m_sb.sb_logblocks), | 665 | XFS_FSB_TO_B(mp, mp->m_sb.sb_logblocks), |
665 | XFS_MAX_LOG_BYTES); | 666 | XFS_MAX_LOG_BYTES); |
666 | error = EINVAL; | 667 | error = -EINVAL; |
667 | } | 668 | } |
668 | if (error) { | 669 | if (error) { |
669 | if (xfs_sb_version_hascrc(&mp->m_sb)) { | 670 | if (xfs_sb_version_hascrc(&mp->m_sb)) { |
@@ -707,6 +708,11 @@ xfs_log_mount( | |||
707 | } | 708 | } |
708 | } | 709 | } |
709 | 710 | ||
711 | error = xfs_sysfs_init(&mp->m_log->l_kobj, &xfs_log_ktype, &mp->m_kobj, | ||
712 | "log"); | ||
713 | if (error) | ||
714 | goto out_destroy_ail; | ||
715 | |||
710 | /* Normal transactions can now occur */ | 716 | /* Normal transactions can now occur */ |
711 | mp->m_log->l_flags &= ~XLOG_ACTIVE_RECOVERY; | 717 | mp->m_log->l_flags &= ~XLOG_ACTIVE_RECOVERY; |
712 | 718 | ||
@@ -947,6 +953,9 @@ xfs_log_unmount( | |||
947 | xfs_log_quiesce(mp); | 953 | xfs_log_quiesce(mp); |
948 | 954 | ||
949 | xfs_trans_ail_destroy(mp); | 955 | xfs_trans_ail_destroy(mp); |
956 | |||
957 | xfs_sysfs_del(&mp->m_log->l_kobj); | ||
958 | |||
950 | xlog_dealloc_log(mp->m_log); | 959 | xlog_dealloc_log(mp->m_log); |
951 | } | 960 | } |
952 | 961 | ||
@@ -1313,7 +1322,7 @@ xlog_alloc_log( | |||
1313 | xlog_in_core_t *iclog, *prev_iclog=NULL; | 1322 | xlog_in_core_t *iclog, *prev_iclog=NULL; |
1314 | xfs_buf_t *bp; | 1323 | xfs_buf_t *bp; |
1315 | int i; | 1324 | int i; |
1316 | int error = ENOMEM; | 1325 | int error = -ENOMEM; |
1317 | uint log2_size = 0; | 1326 | uint log2_size = 0; |
1318 | 1327 | ||
1319 | log = kmem_zalloc(sizeof(struct xlog), KM_MAYFAIL); | 1328 | log = kmem_zalloc(sizeof(struct xlog), KM_MAYFAIL); |
@@ -1340,7 +1349,7 @@ xlog_alloc_log( | |||
1340 | xlog_grant_head_init(&log->l_reserve_head); | 1349 | xlog_grant_head_init(&log->l_reserve_head); |
1341 | xlog_grant_head_init(&log->l_write_head); | 1350 | xlog_grant_head_init(&log->l_write_head); |
1342 | 1351 | ||
1343 | error = EFSCORRUPTED; | 1352 | error = -EFSCORRUPTED; |
1344 | if (xfs_sb_version_hassector(&mp->m_sb)) { | 1353 | if (xfs_sb_version_hassector(&mp->m_sb)) { |
1345 | log2_size = mp->m_sb.sb_logsectlog; | 1354 | log2_size = mp->m_sb.sb_logsectlog; |
1346 | if (log2_size < BBSHIFT) { | 1355 | if (log2_size < BBSHIFT) { |
@@ -1369,8 +1378,14 @@ xlog_alloc_log( | |||
1369 | 1378 | ||
1370 | xlog_get_iclog_buffer_size(mp, log); | 1379 | xlog_get_iclog_buffer_size(mp, log); |
1371 | 1380 | ||
1372 | error = ENOMEM; | 1381 | /* |
1373 | bp = xfs_buf_alloc(mp->m_logdev_targp, 0, BTOBB(log->l_iclog_size), 0); | 1382 | * Use a NULL block for the extra log buffer used during splits so that |
1383 | * it will trigger errors if we ever try to do IO on it without first | ||
1384 | * having set it up properly. | ||
1385 | */ | ||
1386 | error = -ENOMEM; | ||
1387 | bp = xfs_buf_alloc(mp->m_logdev_targp, XFS_BUF_DADDR_NULL, | ||
1388 | BTOBB(log->l_iclog_size), 0); | ||
1374 | if (!bp) | 1389 | if (!bp) |
1375 | goto out_free_log; | 1390 | goto out_free_log; |
1376 | 1391 | ||
@@ -1463,7 +1478,7 @@ out_free_iclog: | |||
1463 | out_free_log: | 1478 | out_free_log: |
1464 | kmem_free(log); | 1479 | kmem_free(log); |
1465 | out: | 1480 | out: |
1466 | return ERR_PTR(-error); | 1481 | return ERR_PTR(error); |
1467 | } /* xlog_alloc_log */ | 1482 | } /* xlog_alloc_log */ |
1468 | 1483 | ||
1469 | 1484 | ||
@@ -1661,7 +1676,7 @@ xlog_bdstrat( | |||
1661 | 1676 | ||
1662 | xfs_buf_lock(bp); | 1677 | xfs_buf_lock(bp); |
1663 | if (iclog->ic_state & XLOG_STATE_IOERROR) { | 1678 | if (iclog->ic_state & XLOG_STATE_IOERROR) { |
1664 | xfs_buf_ioerror(bp, EIO); | 1679 | xfs_buf_ioerror(bp, -EIO); |
1665 | xfs_buf_stale(bp); | 1680 | xfs_buf_stale(bp); |
1666 | xfs_buf_ioend(bp, 0); | 1681 | xfs_buf_ioend(bp, 0); |
1667 | /* | 1682 | /* |
@@ -2360,7 +2375,7 @@ xlog_write( | |||
2360 | 2375 | ||
2361 | ophdr = xlog_write_setup_ophdr(log, ptr, ticket, flags); | 2376 | ophdr = xlog_write_setup_ophdr(log, ptr, ticket, flags); |
2362 | if (!ophdr) | 2377 | if (!ophdr) |
2363 | return XFS_ERROR(EIO); | 2378 | return -EIO; |
2364 | 2379 | ||
2365 | xlog_write_adv_cnt(&ptr, &len, &log_offset, | 2380 | xlog_write_adv_cnt(&ptr, &len, &log_offset, |
2366 | sizeof(struct xlog_op_header)); | 2381 | sizeof(struct xlog_op_header)); |
@@ -2859,7 +2874,7 @@ restart: | |||
2859 | spin_lock(&log->l_icloglock); | 2874 | spin_lock(&log->l_icloglock); |
2860 | if (XLOG_FORCED_SHUTDOWN(log)) { | 2875 | if (XLOG_FORCED_SHUTDOWN(log)) { |
2861 | spin_unlock(&log->l_icloglock); | 2876 | spin_unlock(&log->l_icloglock); |
2862 | return XFS_ERROR(EIO); | 2877 | return -EIO; |
2863 | } | 2878 | } |
2864 | 2879 | ||
2865 | iclog = log->l_iclog; | 2880 | iclog = log->l_iclog; |
@@ -3047,7 +3062,7 @@ xlog_state_release_iclog( | |||
3047 | int sync = 0; /* do we sync? */ | 3062 | int sync = 0; /* do we sync? */ |
3048 | 3063 | ||
3049 | if (iclog->ic_state & XLOG_STATE_IOERROR) | 3064 | if (iclog->ic_state & XLOG_STATE_IOERROR) |
3050 | return XFS_ERROR(EIO); | 3065 | return -EIO; |
3051 | 3066 | ||
3052 | ASSERT(atomic_read(&iclog->ic_refcnt) > 0); | 3067 | ASSERT(atomic_read(&iclog->ic_refcnt) > 0); |
3053 | if (!atomic_dec_and_lock(&iclog->ic_refcnt, &log->l_icloglock)) | 3068 | if (!atomic_dec_and_lock(&iclog->ic_refcnt, &log->l_icloglock)) |
@@ -3055,7 +3070,7 @@ xlog_state_release_iclog( | |||
3055 | 3070 | ||
3056 | if (iclog->ic_state & XLOG_STATE_IOERROR) { | 3071 | if (iclog->ic_state & XLOG_STATE_IOERROR) { |
3057 | spin_unlock(&log->l_icloglock); | 3072 | spin_unlock(&log->l_icloglock); |
3058 | return XFS_ERROR(EIO); | 3073 | return -EIO; |
3059 | } | 3074 | } |
3060 | ASSERT(iclog->ic_state == XLOG_STATE_ACTIVE || | 3075 | ASSERT(iclog->ic_state == XLOG_STATE_ACTIVE || |
3061 | iclog->ic_state == XLOG_STATE_WANT_SYNC); | 3076 | iclog->ic_state == XLOG_STATE_WANT_SYNC); |
@@ -3172,7 +3187,7 @@ _xfs_log_force( | |||
3172 | iclog = log->l_iclog; | 3187 | iclog = log->l_iclog; |
3173 | if (iclog->ic_state & XLOG_STATE_IOERROR) { | 3188 | if (iclog->ic_state & XLOG_STATE_IOERROR) { |
3174 | spin_unlock(&log->l_icloglock); | 3189 | spin_unlock(&log->l_icloglock); |
3175 | return XFS_ERROR(EIO); | 3190 | return -EIO; |
3176 | } | 3191 | } |
3177 | 3192 | ||
3178 | /* If the head iclog is not active nor dirty, we just attach | 3193 | /* If the head iclog is not active nor dirty, we just attach |
@@ -3210,7 +3225,7 @@ _xfs_log_force( | |||
3210 | spin_unlock(&log->l_icloglock); | 3225 | spin_unlock(&log->l_icloglock); |
3211 | 3226 | ||
3212 | if (xlog_state_release_iclog(log, iclog)) | 3227 | if (xlog_state_release_iclog(log, iclog)) |
3213 | return XFS_ERROR(EIO); | 3228 | return -EIO; |
3214 | 3229 | ||
3215 | if (log_flushed) | 3230 | if (log_flushed) |
3216 | *log_flushed = 1; | 3231 | *log_flushed = 1; |
@@ -3246,7 +3261,7 @@ maybe_sleep: | |||
3246 | */ | 3261 | */ |
3247 | if (iclog->ic_state & XLOG_STATE_IOERROR) { | 3262 | if (iclog->ic_state & XLOG_STATE_IOERROR) { |
3248 | spin_unlock(&log->l_icloglock); | 3263 | spin_unlock(&log->l_icloglock); |
3249 | return XFS_ERROR(EIO); | 3264 | return -EIO; |
3250 | } | 3265 | } |
3251 | XFS_STATS_INC(xs_log_force_sleep); | 3266 | XFS_STATS_INC(xs_log_force_sleep); |
3252 | xlog_wait(&iclog->ic_force_wait, &log->l_icloglock); | 3267 | xlog_wait(&iclog->ic_force_wait, &log->l_icloglock); |
@@ -3256,7 +3271,7 @@ maybe_sleep: | |||
3256 | * and the memory read should be atomic. | 3271 | * and the memory read should be atomic. |
3257 | */ | 3272 | */ |
3258 | if (iclog->ic_state & XLOG_STATE_IOERROR) | 3273 | if (iclog->ic_state & XLOG_STATE_IOERROR) |
3259 | return XFS_ERROR(EIO); | 3274 | return -EIO; |
3260 | if (log_flushed) | 3275 | if (log_flushed) |
3261 | *log_flushed = 1; | 3276 | *log_flushed = 1; |
3262 | } else { | 3277 | } else { |
@@ -3324,7 +3339,7 @@ try_again: | |||
3324 | iclog = log->l_iclog; | 3339 | iclog = log->l_iclog; |
3325 | if (iclog->ic_state & XLOG_STATE_IOERROR) { | 3340 | if (iclog->ic_state & XLOG_STATE_IOERROR) { |
3326 | spin_unlock(&log->l_icloglock); | 3341 | spin_unlock(&log->l_icloglock); |
3327 | return XFS_ERROR(EIO); | 3342 | return -EIO; |
3328 | } | 3343 | } |
3329 | 3344 | ||
3330 | do { | 3345 | do { |
@@ -3375,7 +3390,7 @@ try_again: | |||
3375 | xlog_state_switch_iclogs(log, iclog, 0); | 3390 | xlog_state_switch_iclogs(log, iclog, 0); |
3376 | spin_unlock(&log->l_icloglock); | 3391 | spin_unlock(&log->l_icloglock); |
3377 | if (xlog_state_release_iclog(log, iclog)) | 3392 | if (xlog_state_release_iclog(log, iclog)) |
3378 | return XFS_ERROR(EIO); | 3393 | return -EIO; |
3379 | if (log_flushed) | 3394 | if (log_flushed) |
3380 | *log_flushed = 1; | 3395 | *log_flushed = 1; |
3381 | spin_lock(&log->l_icloglock); | 3396 | spin_lock(&log->l_icloglock); |
@@ -3390,7 +3405,7 @@ try_again: | |||
3390 | */ | 3405 | */ |
3391 | if (iclog->ic_state & XLOG_STATE_IOERROR) { | 3406 | if (iclog->ic_state & XLOG_STATE_IOERROR) { |
3392 | spin_unlock(&log->l_icloglock); | 3407 | spin_unlock(&log->l_icloglock); |
3393 | return XFS_ERROR(EIO); | 3408 | return -EIO; |
3394 | } | 3409 | } |
3395 | XFS_STATS_INC(xs_log_force_sleep); | 3410 | XFS_STATS_INC(xs_log_force_sleep); |
3396 | xlog_wait(&iclog->ic_force_wait, &log->l_icloglock); | 3411 | xlog_wait(&iclog->ic_force_wait, &log->l_icloglock); |
@@ -3400,7 +3415,7 @@ try_again: | |||
3400 | * and the memory read should be atomic. | 3415 | * and the memory read should be atomic. |
3401 | */ | 3416 | */ |
3402 | if (iclog->ic_state & XLOG_STATE_IOERROR) | 3417 | if (iclog->ic_state & XLOG_STATE_IOERROR) |
3403 | return XFS_ERROR(EIO); | 3418 | return -EIO; |
3404 | 3419 | ||
3405 | if (log_flushed) | 3420 | if (log_flushed) |
3406 | *log_flushed = 1; | 3421 | *log_flushed = 1; |
diff --git a/fs/xfs/xfs_log_cil.c b/fs/xfs/xfs_log_cil.c index b3425b34e3d5..f6b79e5325dd 100644 --- a/fs/xfs/xfs_log_cil.c +++ b/fs/xfs/xfs_log_cil.c | |||
@@ -78,8 +78,6 @@ xlog_cil_init_post_recovery( | |||
78 | { | 78 | { |
79 | log->l_cilp->xc_ctx->ticket = xlog_cil_ticket_alloc(log); | 79 | log->l_cilp->xc_ctx->ticket = xlog_cil_ticket_alloc(log); |
80 | log->l_cilp->xc_ctx->sequence = 1; | 80 | log->l_cilp->xc_ctx->sequence = 1; |
81 | log->l_cilp->xc_ctx->commit_lsn = xlog_assign_lsn(log->l_curr_cycle, | ||
82 | log->l_curr_block); | ||
83 | } | 81 | } |
84 | 82 | ||
85 | /* | 83 | /* |
@@ -634,7 +632,7 @@ out_abort_free_ticket: | |||
634 | xfs_log_ticket_put(tic); | 632 | xfs_log_ticket_put(tic); |
635 | out_abort: | 633 | out_abort: |
636 | xlog_cil_committed(ctx, XFS_LI_ABORTED); | 634 | xlog_cil_committed(ctx, XFS_LI_ABORTED); |
637 | return XFS_ERROR(EIO); | 635 | return -EIO; |
638 | } | 636 | } |
639 | 637 | ||
640 | static void | 638 | static void |
@@ -928,12 +926,12 @@ xlog_cil_init( | |||
928 | 926 | ||
929 | cil = kmem_zalloc(sizeof(*cil), KM_SLEEP|KM_MAYFAIL); | 927 | cil = kmem_zalloc(sizeof(*cil), KM_SLEEP|KM_MAYFAIL); |
930 | if (!cil) | 928 | if (!cil) |
931 | return ENOMEM; | 929 | return -ENOMEM; |
932 | 930 | ||
933 | ctx = kmem_zalloc(sizeof(*ctx), KM_SLEEP|KM_MAYFAIL); | 931 | ctx = kmem_zalloc(sizeof(*ctx), KM_SLEEP|KM_MAYFAIL); |
934 | if (!ctx) { | 932 | if (!ctx) { |
935 | kmem_free(cil); | 933 | kmem_free(cil); |
936 | return ENOMEM; | 934 | return -ENOMEM; |
937 | } | 935 | } |
938 | 936 | ||
939 | INIT_WORK(&cil->xc_push_work, xlog_cil_push_work); | 937 | INIT_WORK(&cil->xc_push_work, xlog_cil_push_work); |
diff --git a/fs/xfs/xfs_log_priv.h b/fs/xfs/xfs_log_priv.h index 9bc403a9e54f..db7cbdeb2b42 100644 --- a/fs/xfs/xfs_log_priv.h +++ b/fs/xfs/xfs_log_priv.h | |||
@@ -405,6 +405,8 @@ struct xlog { | |||
405 | struct xlog_grant_head l_reserve_head; | 405 | struct xlog_grant_head l_reserve_head; |
406 | struct xlog_grant_head l_write_head; | 406 | struct xlog_grant_head l_write_head; |
407 | 407 | ||
408 | struct xfs_kobj l_kobj; | ||
409 | |||
408 | /* The following field are used for debugging; need to hold icloglock */ | 410 | /* The following field are used for debugging; need to hold icloglock */ |
409 | #ifdef DEBUG | 411 | #ifdef DEBUG |
410 | char *l_iclog_bak[XLOG_MAX_ICLOGS]; | 412 | char *l_iclog_bak[XLOG_MAX_ICLOGS]; |
diff --git a/fs/xfs/xfs_log_recover.c b/fs/xfs/xfs_log_recover.c index 981af0f6504b..1fd5787add99 100644 --- a/fs/xfs/xfs_log_recover.c +++ b/fs/xfs/xfs_log_recover.c | |||
@@ -179,7 +179,7 @@ xlog_bread_noalign( | |||
179 | xfs_warn(log->l_mp, "Invalid block length (0x%x) for buffer", | 179 | xfs_warn(log->l_mp, "Invalid block length (0x%x) for buffer", |
180 | nbblks); | 180 | nbblks); |
181 | XFS_ERROR_REPORT(__func__, XFS_ERRLEVEL_HIGH, log->l_mp); | 181 | XFS_ERROR_REPORT(__func__, XFS_ERRLEVEL_HIGH, log->l_mp); |
182 | return EFSCORRUPTED; | 182 | return -EFSCORRUPTED; |
183 | } | 183 | } |
184 | 184 | ||
185 | blk_no = round_down(blk_no, log->l_sectBBsize); | 185 | blk_no = round_down(blk_no, log->l_sectBBsize); |
@@ -194,7 +194,7 @@ xlog_bread_noalign( | |||
194 | bp->b_error = 0; | 194 | bp->b_error = 0; |
195 | 195 | ||
196 | if (XFS_FORCED_SHUTDOWN(log->l_mp)) | 196 | if (XFS_FORCED_SHUTDOWN(log->l_mp)) |
197 | return XFS_ERROR(EIO); | 197 | return -EIO; |
198 | 198 | ||
199 | xfs_buf_iorequest(bp); | 199 | xfs_buf_iorequest(bp); |
200 | error = xfs_buf_iowait(bp); | 200 | error = xfs_buf_iowait(bp); |
@@ -268,7 +268,7 @@ xlog_bwrite( | |||
268 | xfs_warn(log->l_mp, "Invalid block length (0x%x) for buffer", | 268 | xfs_warn(log->l_mp, "Invalid block length (0x%x) for buffer", |
269 | nbblks); | 269 | nbblks); |
270 | XFS_ERROR_REPORT(__func__, XFS_ERRLEVEL_HIGH, log->l_mp); | 270 | XFS_ERROR_REPORT(__func__, XFS_ERRLEVEL_HIGH, log->l_mp); |
271 | return EFSCORRUPTED; | 271 | return -EFSCORRUPTED; |
272 | } | 272 | } |
273 | 273 | ||
274 | blk_no = round_down(blk_no, log->l_sectBBsize); | 274 | blk_no = round_down(blk_no, log->l_sectBBsize); |
@@ -330,14 +330,14 @@ xlog_header_check_recover( | |||
330 | xlog_header_check_dump(mp, head); | 330 | xlog_header_check_dump(mp, head); |
331 | XFS_ERROR_REPORT("xlog_header_check_recover(1)", | 331 | XFS_ERROR_REPORT("xlog_header_check_recover(1)", |
332 | XFS_ERRLEVEL_HIGH, mp); | 332 | XFS_ERRLEVEL_HIGH, mp); |
333 | return XFS_ERROR(EFSCORRUPTED); | 333 | return -EFSCORRUPTED; |
334 | } else if (unlikely(!uuid_equal(&mp->m_sb.sb_uuid, &head->h_fs_uuid))) { | 334 | } else if (unlikely(!uuid_equal(&mp->m_sb.sb_uuid, &head->h_fs_uuid))) { |
335 | xfs_warn(mp, | 335 | xfs_warn(mp, |
336 | "dirty log entry has mismatched uuid - can't recover"); | 336 | "dirty log entry has mismatched uuid - can't recover"); |
337 | xlog_header_check_dump(mp, head); | 337 | xlog_header_check_dump(mp, head); |
338 | XFS_ERROR_REPORT("xlog_header_check_recover(2)", | 338 | XFS_ERROR_REPORT("xlog_header_check_recover(2)", |
339 | XFS_ERRLEVEL_HIGH, mp); | 339 | XFS_ERRLEVEL_HIGH, mp); |
340 | return XFS_ERROR(EFSCORRUPTED); | 340 | return -EFSCORRUPTED; |
341 | } | 341 | } |
342 | return 0; | 342 | return 0; |
343 | } | 343 | } |
@@ -364,7 +364,7 @@ xlog_header_check_mount( | |||
364 | xlog_header_check_dump(mp, head); | 364 | xlog_header_check_dump(mp, head); |
365 | XFS_ERROR_REPORT("xlog_header_check_mount", | 365 | XFS_ERROR_REPORT("xlog_header_check_mount", |
366 | XFS_ERRLEVEL_HIGH, mp); | 366 | XFS_ERRLEVEL_HIGH, mp); |
367 | return XFS_ERROR(EFSCORRUPTED); | 367 | return -EFSCORRUPTED; |
368 | } | 368 | } |
369 | return 0; | 369 | return 0; |
370 | } | 370 | } |
@@ -462,7 +462,7 @@ xlog_find_verify_cycle( | |||
462 | while (!(bp = xlog_get_bp(log, bufblks))) { | 462 | while (!(bp = xlog_get_bp(log, bufblks))) { |
463 | bufblks >>= 1; | 463 | bufblks >>= 1; |
464 | if (bufblks < log->l_sectBBsize) | 464 | if (bufblks < log->l_sectBBsize) |
465 | return ENOMEM; | 465 | return -ENOMEM; |
466 | } | 466 | } |
467 | 467 | ||
468 | for (i = start_blk; i < start_blk + nbblks; i += bufblks) { | 468 | for (i = start_blk; i < start_blk + nbblks; i += bufblks) { |
@@ -524,7 +524,7 @@ xlog_find_verify_log_record( | |||
524 | 524 | ||
525 | if (!(bp = xlog_get_bp(log, num_blks))) { | 525 | if (!(bp = xlog_get_bp(log, num_blks))) { |
526 | if (!(bp = xlog_get_bp(log, 1))) | 526 | if (!(bp = xlog_get_bp(log, 1))) |
527 | return ENOMEM; | 527 | return -ENOMEM; |
528 | smallmem = 1; | 528 | smallmem = 1; |
529 | } else { | 529 | } else { |
530 | error = xlog_bread(log, start_blk, num_blks, bp, &offset); | 530 | error = xlog_bread(log, start_blk, num_blks, bp, &offset); |
@@ -539,7 +539,7 @@ xlog_find_verify_log_record( | |||
539 | xfs_warn(log->l_mp, | 539 | xfs_warn(log->l_mp, |
540 | "Log inconsistent (didn't find previous header)"); | 540 | "Log inconsistent (didn't find previous header)"); |
541 | ASSERT(0); | 541 | ASSERT(0); |
542 | error = XFS_ERROR(EIO); | 542 | error = -EIO; |
543 | goto out; | 543 | goto out; |
544 | } | 544 | } |
545 | 545 | ||
@@ -564,7 +564,7 @@ xlog_find_verify_log_record( | |||
564 | * will be called again for the end of the physical log. | 564 | * will be called again for the end of the physical log. |
565 | */ | 565 | */ |
566 | if (i == -1) { | 566 | if (i == -1) { |
567 | error = -1; | 567 | error = 1; |
568 | goto out; | 568 | goto out; |
569 | } | 569 | } |
570 | 570 | ||
@@ -628,7 +628,12 @@ xlog_find_head( | |||
628 | int error, log_bbnum = log->l_logBBsize; | 628 | int error, log_bbnum = log->l_logBBsize; |
629 | 629 | ||
630 | /* Is the end of the log device zeroed? */ | 630 | /* Is the end of the log device zeroed? */ |
631 | if ((error = xlog_find_zeroed(log, &first_blk)) == -1) { | 631 | error = xlog_find_zeroed(log, &first_blk); |
632 | if (error < 0) { | ||
633 | xfs_warn(log->l_mp, "empty log check failed"); | ||
634 | return error; | ||
635 | } | ||
636 | if (error == 1) { | ||
632 | *return_head_blk = first_blk; | 637 | *return_head_blk = first_blk; |
633 | 638 | ||
634 | /* Is the whole lot zeroed? */ | 639 | /* Is the whole lot zeroed? */ |
@@ -641,15 +646,12 @@ xlog_find_head( | |||
641 | } | 646 | } |
642 | 647 | ||
643 | return 0; | 648 | return 0; |
644 | } else if (error) { | ||
645 | xfs_warn(log->l_mp, "empty log check failed"); | ||
646 | return error; | ||
647 | } | 649 | } |
648 | 650 | ||
649 | first_blk = 0; /* get cycle # of 1st block */ | 651 | first_blk = 0; /* get cycle # of 1st block */ |
650 | bp = xlog_get_bp(log, 1); | 652 | bp = xlog_get_bp(log, 1); |
651 | if (!bp) | 653 | if (!bp) |
652 | return ENOMEM; | 654 | return -ENOMEM; |
653 | 655 | ||
654 | error = xlog_bread(log, 0, 1, bp, &offset); | 656 | error = xlog_bread(log, 0, 1, bp, &offset); |
655 | if (error) | 657 | if (error) |
@@ -818,29 +820,29 @@ validate_head: | |||
818 | start_blk = head_blk - num_scan_bblks; /* don't read head_blk */ | 820 | start_blk = head_blk - num_scan_bblks; /* don't read head_blk */ |
819 | 821 | ||
820 | /* start ptr at last block ptr before head_blk */ | 822 | /* start ptr at last block ptr before head_blk */ |
821 | if ((error = xlog_find_verify_log_record(log, start_blk, | 823 | error = xlog_find_verify_log_record(log, start_blk, &head_blk, 0); |
822 | &head_blk, 0)) == -1) { | 824 | if (error == 1) |
823 | error = XFS_ERROR(EIO); | 825 | error = -EIO; |
824 | goto bp_err; | 826 | if (error) |
825 | } else if (error) | ||
826 | goto bp_err; | 827 | goto bp_err; |
827 | } else { | 828 | } else { |
828 | start_blk = 0; | 829 | start_blk = 0; |
829 | ASSERT(head_blk <= INT_MAX); | 830 | ASSERT(head_blk <= INT_MAX); |
830 | if ((error = xlog_find_verify_log_record(log, start_blk, | 831 | error = xlog_find_verify_log_record(log, start_blk, &head_blk, 0); |
831 | &head_blk, 0)) == -1) { | 832 | if (error < 0) |
833 | goto bp_err; | ||
834 | if (error == 1) { | ||
832 | /* We hit the beginning of the log during our search */ | 835 | /* We hit the beginning of the log during our search */ |
833 | start_blk = log_bbnum - (num_scan_bblks - head_blk); | 836 | start_blk = log_bbnum - (num_scan_bblks - head_blk); |
834 | new_blk = log_bbnum; | 837 | new_blk = log_bbnum; |
835 | ASSERT(start_blk <= INT_MAX && | 838 | ASSERT(start_blk <= INT_MAX && |
836 | (xfs_daddr_t) log_bbnum-start_blk >= 0); | 839 | (xfs_daddr_t) log_bbnum-start_blk >= 0); |
837 | ASSERT(head_blk <= INT_MAX); | 840 | ASSERT(head_blk <= INT_MAX); |
838 | if ((error = xlog_find_verify_log_record(log, | 841 | error = xlog_find_verify_log_record(log, start_blk, |
839 | start_blk, &new_blk, | 842 | &new_blk, (int)head_blk); |
840 | (int)head_blk)) == -1) { | 843 | if (error == 1) |
841 | error = XFS_ERROR(EIO); | 844 | error = -EIO; |
842 | goto bp_err; | 845 | if (error) |
843 | } else if (error) | ||
844 | goto bp_err; | 846 | goto bp_err; |
845 | if (new_blk != log_bbnum) | 847 | if (new_blk != log_bbnum) |
846 | head_blk = new_blk; | 848 | head_blk = new_blk; |
@@ -911,7 +913,7 @@ xlog_find_tail( | |||
911 | 913 | ||
912 | bp = xlog_get_bp(log, 1); | 914 | bp = xlog_get_bp(log, 1); |
913 | if (!bp) | 915 | if (!bp) |
914 | return ENOMEM; | 916 | return -ENOMEM; |
915 | if (*head_blk == 0) { /* special case */ | 917 | if (*head_blk == 0) { /* special case */ |
916 | error = xlog_bread(log, 0, 1, bp, &offset); | 918 | error = xlog_bread(log, 0, 1, bp, &offset); |
917 | if (error) | 919 | if (error) |
@@ -961,7 +963,7 @@ xlog_find_tail( | |||
961 | xfs_warn(log->l_mp, "%s: couldn't find sync record", __func__); | 963 | xfs_warn(log->l_mp, "%s: couldn't find sync record", __func__); |
962 | xlog_put_bp(bp); | 964 | xlog_put_bp(bp); |
963 | ASSERT(0); | 965 | ASSERT(0); |
964 | return XFS_ERROR(EIO); | 966 | return -EIO; |
965 | } | 967 | } |
966 | 968 | ||
967 | /* find blk_no of tail of log */ | 969 | /* find blk_no of tail of log */ |
@@ -1092,8 +1094,8 @@ done: | |||
1092 | * | 1094 | * |
1093 | * Return: | 1095 | * Return: |
1094 | * 0 => the log is completely written to | 1096 | * 0 => the log is completely written to |
1095 | * -1 => use *blk_no as the first block of the log | 1097 | * 1 => use *blk_no as the first block of the log |
1096 | * >0 => error has occurred | 1098 | * <0 => error has occurred |
1097 | */ | 1099 | */ |
1098 | STATIC int | 1100 | STATIC int |
1099 | xlog_find_zeroed( | 1101 | xlog_find_zeroed( |
@@ -1112,7 +1114,7 @@ xlog_find_zeroed( | |||
1112 | /* check totally zeroed log */ | 1114 | /* check totally zeroed log */ |
1113 | bp = xlog_get_bp(log, 1); | 1115 | bp = xlog_get_bp(log, 1); |
1114 | if (!bp) | 1116 | if (!bp) |
1115 | return ENOMEM; | 1117 | return -ENOMEM; |
1116 | error = xlog_bread(log, 0, 1, bp, &offset); | 1118 | error = xlog_bread(log, 0, 1, bp, &offset); |
1117 | if (error) | 1119 | if (error) |
1118 | goto bp_err; | 1120 | goto bp_err; |
@@ -1121,7 +1123,7 @@ xlog_find_zeroed( | |||
1121 | if (first_cycle == 0) { /* completely zeroed log */ | 1123 | if (first_cycle == 0) { /* completely zeroed log */ |
1122 | *blk_no = 0; | 1124 | *blk_no = 0; |
1123 | xlog_put_bp(bp); | 1125 | xlog_put_bp(bp); |
1124 | return -1; | 1126 | return 1; |
1125 | } | 1127 | } |
1126 | 1128 | ||
1127 | /* check partially zeroed log */ | 1129 | /* check partially zeroed log */ |
@@ -1141,7 +1143,7 @@ xlog_find_zeroed( | |||
1141 | */ | 1143 | */ |
1142 | xfs_warn(log->l_mp, | 1144 | xfs_warn(log->l_mp, |
1143 | "Log inconsistent or not a log (last==0, first!=1)"); | 1145 | "Log inconsistent or not a log (last==0, first!=1)"); |
1144 | error = XFS_ERROR(EINVAL); | 1146 | error = -EINVAL; |
1145 | goto bp_err; | 1147 | goto bp_err; |
1146 | } | 1148 | } |
1147 | 1149 | ||
@@ -1179,19 +1181,18 @@ xlog_find_zeroed( | |||
1179 | * Potentially backup over partial log record write. We don't need | 1181 | * Potentially backup over partial log record write. We don't need |
1180 | * to search the end of the log because we know it is zero. | 1182 | * to search the end of the log because we know it is zero. |
1181 | */ | 1183 | */ |
1182 | if ((error = xlog_find_verify_log_record(log, start_blk, | 1184 | error = xlog_find_verify_log_record(log, start_blk, &last_blk, 0); |
1183 | &last_blk, 0)) == -1) { | 1185 | if (error == 1) |
1184 | error = XFS_ERROR(EIO); | 1186 | error = -EIO; |
1185 | goto bp_err; | 1187 | if (error) |
1186 | } else if (error) | 1188 | goto bp_err; |
1187 | goto bp_err; | ||
1188 | 1189 | ||
1189 | *blk_no = last_blk; | 1190 | *blk_no = last_blk; |
1190 | bp_err: | 1191 | bp_err: |
1191 | xlog_put_bp(bp); | 1192 | xlog_put_bp(bp); |
1192 | if (error) | 1193 | if (error) |
1193 | return error; | 1194 | return error; |
1194 | return -1; | 1195 | return 1; |
1195 | } | 1196 | } |
1196 | 1197 | ||
1197 | /* | 1198 | /* |
@@ -1251,7 +1252,7 @@ xlog_write_log_records( | |||
1251 | while (!(bp = xlog_get_bp(log, bufblks))) { | 1252 | while (!(bp = xlog_get_bp(log, bufblks))) { |
1252 | bufblks >>= 1; | 1253 | bufblks >>= 1; |
1253 | if (bufblks < sectbb) | 1254 | if (bufblks < sectbb) |
1254 | return ENOMEM; | 1255 | return -ENOMEM; |
1255 | } | 1256 | } |
1256 | 1257 | ||
1257 | /* We may need to do a read at the start to fill in part of | 1258 | /* We may need to do a read at the start to fill in part of |
@@ -1354,7 +1355,7 @@ xlog_clear_stale_blocks( | |||
1354 | if (unlikely(head_block < tail_block || head_block >= log->l_logBBsize)) { | 1355 | if (unlikely(head_block < tail_block || head_block >= log->l_logBBsize)) { |
1355 | XFS_ERROR_REPORT("xlog_clear_stale_blocks(1)", | 1356 | XFS_ERROR_REPORT("xlog_clear_stale_blocks(1)", |
1356 | XFS_ERRLEVEL_LOW, log->l_mp); | 1357 | XFS_ERRLEVEL_LOW, log->l_mp); |
1357 | return XFS_ERROR(EFSCORRUPTED); | 1358 | return -EFSCORRUPTED; |
1358 | } | 1359 | } |
1359 | tail_distance = tail_block + (log->l_logBBsize - head_block); | 1360 | tail_distance = tail_block + (log->l_logBBsize - head_block); |
1360 | } else { | 1361 | } else { |
@@ -1366,7 +1367,7 @@ xlog_clear_stale_blocks( | |||
1366 | if (unlikely(head_block >= tail_block || head_cycle != (tail_cycle + 1))){ | 1367 | if (unlikely(head_block >= tail_block || head_cycle != (tail_cycle + 1))){ |
1367 | XFS_ERROR_REPORT("xlog_clear_stale_blocks(2)", | 1368 | XFS_ERROR_REPORT("xlog_clear_stale_blocks(2)", |
1368 | XFS_ERRLEVEL_LOW, log->l_mp); | 1369 | XFS_ERRLEVEL_LOW, log->l_mp); |
1369 | return XFS_ERROR(EFSCORRUPTED); | 1370 | return -EFSCORRUPTED; |
1370 | } | 1371 | } |
1371 | tail_distance = tail_block - head_block; | 1372 | tail_distance = tail_block - head_block; |
1372 | } | 1373 | } |
@@ -1551,7 +1552,7 @@ xlog_recover_add_to_trans( | |||
1551 | xfs_warn(log->l_mp, "%s: bad header magic number", | 1552 | xfs_warn(log->l_mp, "%s: bad header magic number", |
1552 | __func__); | 1553 | __func__); |
1553 | ASSERT(0); | 1554 | ASSERT(0); |
1554 | return XFS_ERROR(EIO); | 1555 | return -EIO; |
1555 | } | 1556 | } |
1556 | if (len == sizeof(xfs_trans_header_t)) | 1557 | if (len == sizeof(xfs_trans_header_t)) |
1557 | xlog_recover_add_item(&trans->r_itemq); | 1558 | xlog_recover_add_item(&trans->r_itemq); |
@@ -1581,7 +1582,7 @@ xlog_recover_add_to_trans( | |||
1581 | in_f->ilf_size); | 1582 | in_f->ilf_size); |
1582 | ASSERT(0); | 1583 | ASSERT(0); |
1583 | kmem_free(ptr); | 1584 | kmem_free(ptr); |
1584 | return XFS_ERROR(EIO); | 1585 | return -EIO; |
1585 | } | 1586 | } |
1586 | 1587 | ||
1587 | item->ri_total = in_f->ilf_size; | 1588 | item->ri_total = in_f->ilf_size; |
@@ -1702,7 +1703,7 @@ xlog_recover_reorder_trans( | |||
1702 | */ | 1703 | */ |
1703 | if (!list_empty(&sort_list)) | 1704 | if (!list_empty(&sort_list)) |
1704 | list_splice_init(&sort_list, &trans->r_itemq); | 1705 | list_splice_init(&sort_list, &trans->r_itemq); |
1705 | error = XFS_ERROR(EIO); | 1706 | error = -EIO; |
1706 | goto out; | 1707 | goto out; |
1707 | } | 1708 | } |
1708 | } | 1709 | } |
@@ -1943,7 +1944,7 @@ xlog_recover_do_inode_buffer( | |||
1943 | item, bp); | 1944 | item, bp); |
1944 | XFS_ERROR_REPORT("xlog_recover_do_inode_buf", | 1945 | XFS_ERROR_REPORT("xlog_recover_do_inode_buf", |
1945 | XFS_ERRLEVEL_LOW, mp); | 1946 | XFS_ERRLEVEL_LOW, mp); |
1946 | return XFS_ERROR(EFSCORRUPTED); | 1947 | return -EFSCORRUPTED; |
1947 | } | 1948 | } |
1948 | 1949 | ||
1949 | buffer_nextp = (xfs_agino_t *)xfs_buf_offset(bp, | 1950 | buffer_nextp = (xfs_agino_t *)xfs_buf_offset(bp, |
@@ -2125,6 +2126,17 @@ xlog_recover_validate_buf_type( | |||
2125 | __uint16_t magic16; | 2126 | __uint16_t magic16; |
2126 | __uint16_t magicda; | 2127 | __uint16_t magicda; |
2127 | 2128 | ||
2129 | /* | ||
2130 | * We can only do post recovery validation on items on CRC enabled | ||
2131 | * fielsystems as we need to know when the buffer was written to be able | ||
2132 | * to determine if we should have replayed the item. If we replay old | ||
2133 | * metadata over a newer buffer, then it will enter a temporarily | ||
2134 | * inconsistent state resulting in verification failures. Hence for now | ||
2135 | * just avoid the verification stage for non-crc filesystems | ||
2136 | */ | ||
2137 | if (!xfs_sb_version_hascrc(&mp->m_sb)) | ||
2138 | return; | ||
2139 | |||
2128 | magic32 = be32_to_cpu(*(__be32 *)bp->b_addr); | 2140 | magic32 = be32_to_cpu(*(__be32 *)bp->b_addr); |
2129 | magic16 = be16_to_cpu(*(__be16*)bp->b_addr); | 2141 | magic16 = be16_to_cpu(*(__be16*)bp->b_addr); |
2130 | magicda = be16_to_cpu(info->magic); | 2142 | magicda = be16_to_cpu(info->magic); |
@@ -2162,8 +2174,6 @@ xlog_recover_validate_buf_type( | |||
2162 | bp->b_ops = &xfs_agf_buf_ops; | 2174 | bp->b_ops = &xfs_agf_buf_ops; |
2163 | break; | 2175 | break; |
2164 | case XFS_BLFT_AGFL_BUF: | 2176 | case XFS_BLFT_AGFL_BUF: |
2165 | if (!xfs_sb_version_hascrc(&mp->m_sb)) | ||
2166 | break; | ||
2167 | if (magic32 != XFS_AGFL_MAGIC) { | 2177 | if (magic32 != XFS_AGFL_MAGIC) { |
2168 | xfs_warn(mp, "Bad AGFL block magic!"); | 2178 | xfs_warn(mp, "Bad AGFL block magic!"); |
2169 | ASSERT(0); | 2179 | ASSERT(0); |
@@ -2196,10 +2206,6 @@ xlog_recover_validate_buf_type( | |||
2196 | #endif | 2206 | #endif |
2197 | break; | 2207 | break; |
2198 | case XFS_BLFT_DINO_BUF: | 2208 | case XFS_BLFT_DINO_BUF: |
2199 | /* | ||
2200 | * we get here with inode allocation buffers, not buffers that | ||
2201 | * track unlinked list changes. | ||
2202 | */ | ||
2203 | if (magic16 != XFS_DINODE_MAGIC) { | 2209 | if (magic16 != XFS_DINODE_MAGIC) { |
2204 | xfs_warn(mp, "Bad INODE block magic!"); | 2210 | xfs_warn(mp, "Bad INODE block magic!"); |
2205 | ASSERT(0); | 2211 | ASSERT(0); |
@@ -2279,8 +2285,6 @@ xlog_recover_validate_buf_type( | |||
2279 | bp->b_ops = &xfs_attr3_leaf_buf_ops; | 2285 | bp->b_ops = &xfs_attr3_leaf_buf_ops; |
2280 | break; | 2286 | break; |
2281 | case XFS_BLFT_ATTR_RMT_BUF: | 2287 | case XFS_BLFT_ATTR_RMT_BUF: |
2282 | if (!xfs_sb_version_hascrc(&mp->m_sb)) | ||
2283 | break; | ||
2284 | if (magic32 != XFS_ATTR3_RMT_MAGIC) { | 2288 | if (magic32 != XFS_ATTR3_RMT_MAGIC) { |
2285 | xfs_warn(mp, "Bad attr remote magic!"); | 2289 | xfs_warn(mp, "Bad attr remote magic!"); |
2286 | ASSERT(0); | 2290 | ASSERT(0); |
@@ -2387,16 +2391,7 @@ xlog_recover_do_reg_buffer( | |||
2387 | /* Shouldn't be any more regions */ | 2391 | /* Shouldn't be any more regions */ |
2388 | ASSERT(i == item->ri_total); | 2392 | ASSERT(i == item->ri_total); |
2389 | 2393 | ||
2390 | /* | 2394 | xlog_recover_validate_buf_type(mp, bp, buf_f); |
2391 | * We can only do post recovery validation on items on CRC enabled | ||
2392 | * fielsystems as we need to know when the buffer was written to be able | ||
2393 | * to determine if we should have replayed the item. If we replay old | ||
2394 | * metadata over a newer buffer, then it will enter a temporarily | ||
2395 | * inconsistent state resulting in verification failures. Hence for now | ||
2396 | * just avoid the verification stage for non-crc filesystems | ||
2397 | */ | ||
2398 | if (xfs_sb_version_hascrc(&mp->m_sb)) | ||
2399 | xlog_recover_validate_buf_type(mp, bp, buf_f); | ||
2400 | } | 2395 | } |
2401 | 2396 | ||
2402 | /* | 2397 | /* |
@@ -2404,8 +2399,11 @@ xlog_recover_do_reg_buffer( | |||
2404 | * Simple algorithm: if we have found a QUOTAOFF log item of the same type | 2399 | * Simple algorithm: if we have found a QUOTAOFF log item of the same type |
2405 | * (ie. USR or GRP), then just toss this buffer away; don't recover it. | 2400 | * (ie. USR or GRP), then just toss this buffer away; don't recover it. |
2406 | * Else, treat it as a regular buffer and do recovery. | 2401 | * Else, treat it as a regular buffer and do recovery. |
2402 | * | ||
2403 | * Return false if the buffer was tossed and true if we recovered the buffer to | ||
2404 | * indicate to the caller if the buffer needs writing. | ||
2407 | */ | 2405 | */ |
2408 | STATIC void | 2406 | STATIC bool |
2409 | xlog_recover_do_dquot_buffer( | 2407 | xlog_recover_do_dquot_buffer( |
2410 | struct xfs_mount *mp, | 2408 | struct xfs_mount *mp, |
2411 | struct xlog *log, | 2409 | struct xlog *log, |
@@ -2420,9 +2418,8 @@ xlog_recover_do_dquot_buffer( | |||
2420 | /* | 2418 | /* |
2421 | * Filesystems are required to send in quota flags at mount time. | 2419 | * Filesystems are required to send in quota flags at mount time. |
2422 | */ | 2420 | */ |
2423 | if (mp->m_qflags == 0) { | 2421 | if (!mp->m_qflags) |
2424 | return; | 2422 | return false; |
2425 | } | ||
2426 | 2423 | ||
2427 | type = 0; | 2424 | type = 0; |
2428 | if (buf_f->blf_flags & XFS_BLF_UDQUOT_BUF) | 2425 | if (buf_f->blf_flags & XFS_BLF_UDQUOT_BUF) |
@@ -2435,9 +2432,10 @@ xlog_recover_do_dquot_buffer( | |||
2435 | * This type of quotas was turned off, so ignore this buffer | 2432 | * This type of quotas was turned off, so ignore this buffer |
2436 | */ | 2433 | */ |
2437 | if (log->l_quotaoffs_flag & type) | 2434 | if (log->l_quotaoffs_flag & type) |
2438 | return; | 2435 | return false; |
2439 | 2436 | ||
2440 | xlog_recover_do_reg_buffer(mp, item, bp, buf_f); | 2437 | xlog_recover_do_reg_buffer(mp, item, bp, buf_f); |
2438 | return true; | ||
2441 | } | 2439 | } |
2442 | 2440 | ||
2443 | /* | 2441 | /* |
@@ -2496,7 +2494,7 @@ xlog_recover_buffer_pass2( | |||
2496 | bp = xfs_buf_read(mp->m_ddev_targp, buf_f->blf_blkno, buf_f->blf_len, | 2494 | bp = xfs_buf_read(mp->m_ddev_targp, buf_f->blf_blkno, buf_f->blf_len, |
2497 | buf_flags, NULL); | 2495 | buf_flags, NULL); |
2498 | if (!bp) | 2496 | if (!bp) |
2499 | return XFS_ERROR(ENOMEM); | 2497 | return -ENOMEM; |
2500 | error = bp->b_error; | 2498 | error = bp->b_error; |
2501 | if (error) { | 2499 | if (error) { |
2502 | xfs_buf_ioerror_alert(bp, "xlog_recover_do..(read#1)"); | 2500 | xfs_buf_ioerror_alert(bp, "xlog_recover_do..(read#1)"); |
@@ -2504,23 +2502,44 @@ xlog_recover_buffer_pass2( | |||
2504 | } | 2502 | } |
2505 | 2503 | ||
2506 | /* | 2504 | /* |
2507 | * recover the buffer only if we get an LSN from it and it's less than | 2505 | * Recover the buffer only if we get an LSN from it and it's less than |
2508 | * the lsn of the transaction we are replaying. | 2506 | * the lsn of the transaction we are replaying. |
2507 | * | ||
2508 | * Note that we have to be extremely careful of readahead here. | ||
2509 | * Readahead does not attach verfiers to the buffers so if we don't | ||
2510 | * actually do any replay after readahead because of the LSN we found | ||
2511 | * in the buffer if more recent than that current transaction then we | ||
2512 | * need to attach the verifier directly. Failure to do so can lead to | ||
2513 | * future recovery actions (e.g. EFI and unlinked list recovery) can | ||
2514 | * operate on the buffers and they won't get the verifier attached. This | ||
2515 | * can lead to blocks on disk having the correct content but a stale | ||
2516 | * CRC. | ||
2517 | * | ||
2518 | * It is safe to assume these clean buffers are currently up to date. | ||
2519 | * If the buffer is dirtied by a later transaction being replayed, then | ||
2520 | * the verifier will be reset to match whatever recover turns that | ||
2521 | * buffer into. | ||
2509 | */ | 2522 | */ |
2510 | lsn = xlog_recover_get_buf_lsn(mp, bp); | 2523 | lsn = xlog_recover_get_buf_lsn(mp, bp); |
2511 | if (lsn && lsn != -1 && XFS_LSN_CMP(lsn, current_lsn) >= 0) | 2524 | if (lsn && lsn != -1 && XFS_LSN_CMP(lsn, current_lsn) >= 0) { |
2525 | xlog_recover_validate_buf_type(mp, bp, buf_f); | ||
2512 | goto out_release; | 2526 | goto out_release; |
2527 | } | ||
2513 | 2528 | ||
2514 | if (buf_f->blf_flags & XFS_BLF_INODE_BUF) { | 2529 | if (buf_f->blf_flags & XFS_BLF_INODE_BUF) { |
2515 | error = xlog_recover_do_inode_buffer(mp, item, bp, buf_f); | 2530 | error = xlog_recover_do_inode_buffer(mp, item, bp, buf_f); |
2531 | if (error) | ||
2532 | goto out_release; | ||
2516 | } else if (buf_f->blf_flags & | 2533 | } else if (buf_f->blf_flags & |
2517 | (XFS_BLF_UDQUOT_BUF|XFS_BLF_PDQUOT_BUF|XFS_BLF_GDQUOT_BUF)) { | 2534 | (XFS_BLF_UDQUOT_BUF|XFS_BLF_PDQUOT_BUF|XFS_BLF_GDQUOT_BUF)) { |
2518 | xlog_recover_do_dquot_buffer(mp, log, item, bp, buf_f); | 2535 | bool dirty; |
2536 | |||
2537 | dirty = xlog_recover_do_dquot_buffer(mp, log, item, bp, buf_f); | ||
2538 | if (!dirty) | ||
2539 | goto out_release; | ||
2519 | } else { | 2540 | } else { |
2520 | xlog_recover_do_reg_buffer(mp, item, bp, buf_f); | 2541 | xlog_recover_do_reg_buffer(mp, item, bp, buf_f); |
2521 | } | 2542 | } |
2522 | if (error) | ||
2523 | goto out_release; | ||
2524 | 2543 | ||
2525 | /* | 2544 | /* |
2526 | * Perform delayed write on the buffer. Asynchronous writes will be | 2545 | * Perform delayed write on the buffer. Asynchronous writes will be |
@@ -2598,7 +2617,7 @@ xfs_recover_inode_owner_change( | |||
2598 | 2617 | ||
2599 | ip = xfs_inode_alloc(mp, in_f->ilf_ino); | 2618 | ip = xfs_inode_alloc(mp, in_f->ilf_ino); |
2600 | if (!ip) | 2619 | if (!ip) |
2601 | return ENOMEM; | 2620 | return -ENOMEM; |
2602 | 2621 | ||
2603 | /* instantiate the inode */ | 2622 | /* instantiate the inode */ |
2604 | xfs_dinode_from_disk(&ip->i_d, dip); | 2623 | xfs_dinode_from_disk(&ip->i_d, dip); |
@@ -2676,7 +2695,7 @@ xlog_recover_inode_pass2( | |||
2676 | bp = xfs_buf_read(mp->m_ddev_targp, in_f->ilf_blkno, in_f->ilf_len, 0, | 2695 | bp = xfs_buf_read(mp->m_ddev_targp, in_f->ilf_blkno, in_f->ilf_len, 0, |
2677 | &xfs_inode_buf_ops); | 2696 | &xfs_inode_buf_ops); |
2678 | if (!bp) { | 2697 | if (!bp) { |
2679 | error = ENOMEM; | 2698 | error = -ENOMEM; |
2680 | goto error; | 2699 | goto error; |
2681 | } | 2700 | } |
2682 | error = bp->b_error; | 2701 | error = bp->b_error; |
@@ -2697,7 +2716,7 @@ xlog_recover_inode_pass2( | |||
2697 | __func__, dip, bp, in_f->ilf_ino); | 2716 | __func__, dip, bp, in_f->ilf_ino); |
2698 | XFS_ERROR_REPORT("xlog_recover_inode_pass2(1)", | 2717 | XFS_ERROR_REPORT("xlog_recover_inode_pass2(1)", |
2699 | XFS_ERRLEVEL_LOW, mp); | 2718 | XFS_ERRLEVEL_LOW, mp); |
2700 | error = EFSCORRUPTED; | 2719 | error = -EFSCORRUPTED; |
2701 | goto out_release; | 2720 | goto out_release; |
2702 | } | 2721 | } |
2703 | dicp = item->ri_buf[1].i_addr; | 2722 | dicp = item->ri_buf[1].i_addr; |
@@ -2707,7 +2726,7 @@ xlog_recover_inode_pass2( | |||
2707 | __func__, item, in_f->ilf_ino); | 2726 | __func__, item, in_f->ilf_ino); |
2708 | XFS_ERROR_REPORT("xlog_recover_inode_pass2(2)", | 2727 | XFS_ERROR_REPORT("xlog_recover_inode_pass2(2)", |
2709 | XFS_ERRLEVEL_LOW, mp); | 2728 | XFS_ERRLEVEL_LOW, mp); |
2710 | error = EFSCORRUPTED; | 2729 | error = -EFSCORRUPTED; |
2711 | goto out_release; | 2730 | goto out_release; |
2712 | } | 2731 | } |
2713 | 2732 | ||
@@ -2764,7 +2783,7 @@ xlog_recover_inode_pass2( | |||
2764 | "%s: Bad regular inode log record, rec ptr 0x%p, " | 2783 | "%s: Bad regular inode log record, rec ptr 0x%p, " |
2765 | "ino ptr = 0x%p, ino bp = 0x%p, ino %Ld", | 2784 | "ino ptr = 0x%p, ino bp = 0x%p, ino %Ld", |
2766 | __func__, item, dip, bp, in_f->ilf_ino); | 2785 | __func__, item, dip, bp, in_f->ilf_ino); |
2767 | error = EFSCORRUPTED; | 2786 | error = -EFSCORRUPTED; |
2768 | goto out_release; | 2787 | goto out_release; |
2769 | } | 2788 | } |
2770 | } else if (unlikely(S_ISDIR(dicp->di_mode))) { | 2789 | } else if (unlikely(S_ISDIR(dicp->di_mode))) { |
@@ -2777,7 +2796,7 @@ xlog_recover_inode_pass2( | |||
2777 | "%s: Bad dir inode log record, rec ptr 0x%p, " | 2796 | "%s: Bad dir inode log record, rec ptr 0x%p, " |
2778 | "ino ptr = 0x%p, ino bp = 0x%p, ino %Ld", | 2797 | "ino ptr = 0x%p, ino bp = 0x%p, ino %Ld", |
2779 | __func__, item, dip, bp, in_f->ilf_ino); | 2798 | __func__, item, dip, bp, in_f->ilf_ino); |
2780 | error = EFSCORRUPTED; | 2799 | error = -EFSCORRUPTED; |
2781 | goto out_release; | 2800 | goto out_release; |
2782 | } | 2801 | } |
2783 | } | 2802 | } |
@@ -2790,7 +2809,7 @@ xlog_recover_inode_pass2( | |||
2790 | __func__, item, dip, bp, in_f->ilf_ino, | 2809 | __func__, item, dip, bp, in_f->ilf_ino, |
2791 | dicp->di_nextents + dicp->di_anextents, | 2810 | dicp->di_nextents + dicp->di_anextents, |
2792 | dicp->di_nblocks); | 2811 | dicp->di_nblocks); |
2793 | error = EFSCORRUPTED; | 2812 | error = -EFSCORRUPTED; |
2794 | goto out_release; | 2813 | goto out_release; |
2795 | } | 2814 | } |
2796 | if (unlikely(dicp->di_forkoff > mp->m_sb.sb_inodesize)) { | 2815 | if (unlikely(dicp->di_forkoff > mp->m_sb.sb_inodesize)) { |
@@ -2800,7 +2819,7 @@ xlog_recover_inode_pass2( | |||
2800 | "%s: Bad inode log record, rec ptr 0x%p, dino ptr 0x%p, " | 2819 | "%s: Bad inode log record, rec ptr 0x%p, dino ptr 0x%p, " |
2801 | "dino bp 0x%p, ino %Ld, forkoff 0x%x", __func__, | 2820 | "dino bp 0x%p, ino %Ld, forkoff 0x%x", __func__, |
2802 | item, dip, bp, in_f->ilf_ino, dicp->di_forkoff); | 2821 | item, dip, bp, in_f->ilf_ino, dicp->di_forkoff); |
2803 | error = EFSCORRUPTED; | 2822 | error = -EFSCORRUPTED; |
2804 | goto out_release; | 2823 | goto out_release; |
2805 | } | 2824 | } |
2806 | isize = xfs_icdinode_size(dicp->di_version); | 2825 | isize = xfs_icdinode_size(dicp->di_version); |
@@ -2810,7 +2829,7 @@ xlog_recover_inode_pass2( | |||
2810 | xfs_alert(mp, | 2829 | xfs_alert(mp, |
2811 | "%s: Bad inode log record length %d, rec ptr 0x%p", | 2830 | "%s: Bad inode log record length %d, rec ptr 0x%p", |
2812 | __func__, item->ri_buf[1].i_len, item); | 2831 | __func__, item->ri_buf[1].i_len, item); |
2813 | error = EFSCORRUPTED; | 2832 | error = -EFSCORRUPTED; |
2814 | goto out_release; | 2833 | goto out_release; |
2815 | } | 2834 | } |
2816 | 2835 | ||
@@ -2898,7 +2917,7 @@ xlog_recover_inode_pass2( | |||
2898 | default: | 2917 | default: |
2899 | xfs_warn(log->l_mp, "%s: Invalid flag", __func__); | 2918 | xfs_warn(log->l_mp, "%s: Invalid flag", __func__); |
2900 | ASSERT(0); | 2919 | ASSERT(0); |
2901 | error = EIO; | 2920 | error = -EIO; |
2902 | goto out_release; | 2921 | goto out_release; |
2903 | } | 2922 | } |
2904 | } | 2923 | } |
@@ -2919,7 +2938,7 @@ out_release: | |||
2919 | error: | 2938 | error: |
2920 | if (need_free) | 2939 | if (need_free) |
2921 | kmem_free(in_f); | 2940 | kmem_free(in_f); |
2922 | return XFS_ERROR(error); | 2941 | return error; |
2923 | } | 2942 | } |
2924 | 2943 | ||
2925 | /* | 2944 | /* |
@@ -2946,7 +2965,7 @@ xlog_recover_quotaoff_pass1( | |||
2946 | if (qoff_f->qf_flags & XFS_GQUOTA_ACCT) | 2965 | if (qoff_f->qf_flags & XFS_GQUOTA_ACCT) |
2947 | log->l_quotaoffs_flag |= XFS_DQ_GROUP; | 2966 | log->l_quotaoffs_flag |= XFS_DQ_GROUP; |
2948 | 2967 | ||
2949 | return (0); | 2968 | return 0; |
2950 | } | 2969 | } |
2951 | 2970 | ||
2952 | /* | 2971 | /* |
@@ -2971,17 +2990,17 @@ xlog_recover_dquot_pass2( | |||
2971 | * Filesystems are required to send in quota flags at mount time. | 2990 | * Filesystems are required to send in quota flags at mount time. |
2972 | */ | 2991 | */ |
2973 | if (mp->m_qflags == 0) | 2992 | if (mp->m_qflags == 0) |
2974 | return (0); | 2993 | return 0; |
2975 | 2994 | ||
2976 | recddq = item->ri_buf[1].i_addr; | 2995 | recddq = item->ri_buf[1].i_addr; |
2977 | if (recddq == NULL) { | 2996 | if (recddq == NULL) { |
2978 | xfs_alert(log->l_mp, "NULL dquot in %s.", __func__); | 2997 | xfs_alert(log->l_mp, "NULL dquot in %s.", __func__); |
2979 | return XFS_ERROR(EIO); | 2998 | return -EIO; |
2980 | } | 2999 | } |
2981 | if (item->ri_buf[1].i_len < sizeof(xfs_disk_dquot_t)) { | 3000 | if (item->ri_buf[1].i_len < sizeof(xfs_disk_dquot_t)) { |
2982 | xfs_alert(log->l_mp, "dquot too small (%d) in %s.", | 3001 | xfs_alert(log->l_mp, "dquot too small (%d) in %s.", |
2983 | item->ri_buf[1].i_len, __func__); | 3002 | item->ri_buf[1].i_len, __func__); |
2984 | return XFS_ERROR(EIO); | 3003 | return -EIO; |
2985 | } | 3004 | } |
2986 | 3005 | ||
2987 | /* | 3006 | /* |
@@ -2990,7 +3009,7 @@ xlog_recover_dquot_pass2( | |||
2990 | type = recddq->d_flags & (XFS_DQ_USER | XFS_DQ_PROJ | XFS_DQ_GROUP); | 3009 | type = recddq->d_flags & (XFS_DQ_USER | XFS_DQ_PROJ | XFS_DQ_GROUP); |
2991 | ASSERT(type); | 3010 | ASSERT(type); |
2992 | if (log->l_quotaoffs_flag & type) | 3011 | if (log->l_quotaoffs_flag & type) |
2993 | return (0); | 3012 | return 0; |
2994 | 3013 | ||
2995 | /* | 3014 | /* |
2996 | * At this point we know that quota was _not_ turned off. | 3015 | * At this point we know that quota was _not_ turned off. |
@@ -3007,12 +3026,19 @@ xlog_recover_dquot_pass2( | |||
3007 | error = xfs_dqcheck(mp, recddq, dq_f->qlf_id, 0, XFS_QMOPT_DOWARN, | 3026 | error = xfs_dqcheck(mp, recddq, dq_f->qlf_id, 0, XFS_QMOPT_DOWARN, |
3008 | "xlog_recover_dquot_pass2 (log copy)"); | 3027 | "xlog_recover_dquot_pass2 (log copy)"); |
3009 | if (error) | 3028 | if (error) |
3010 | return XFS_ERROR(EIO); | 3029 | return -EIO; |
3011 | ASSERT(dq_f->qlf_len == 1); | 3030 | ASSERT(dq_f->qlf_len == 1); |
3012 | 3031 | ||
3032 | /* | ||
3033 | * At this point we are assuming that the dquots have been allocated | ||
3034 | * and hence the buffer has valid dquots stamped in it. It should, | ||
3035 | * therefore, pass verifier validation. If the dquot is bad, then the | ||
3036 | * we'll return an error here, so we don't need to specifically check | ||
3037 | * the dquot in the buffer after the verifier has run. | ||
3038 | */ | ||
3013 | error = xfs_trans_read_buf(mp, NULL, mp->m_ddev_targp, dq_f->qlf_blkno, | 3039 | error = xfs_trans_read_buf(mp, NULL, mp->m_ddev_targp, dq_f->qlf_blkno, |
3014 | XFS_FSB_TO_BB(mp, dq_f->qlf_len), 0, &bp, | 3040 | XFS_FSB_TO_BB(mp, dq_f->qlf_len), 0, &bp, |
3015 | NULL); | 3041 | &xfs_dquot_buf_ops); |
3016 | if (error) | 3042 | if (error) |
3017 | return error; | 3043 | return error; |
3018 | 3044 | ||
@@ -3020,18 +3046,6 @@ xlog_recover_dquot_pass2( | |||
3020 | ddq = (xfs_disk_dquot_t *)xfs_buf_offset(bp, dq_f->qlf_boffset); | 3046 | ddq = (xfs_disk_dquot_t *)xfs_buf_offset(bp, dq_f->qlf_boffset); |
3021 | 3047 | ||
3022 | /* | 3048 | /* |
3023 | * At least the magic num portion should be on disk because this | ||
3024 | * was among a chunk of dquots created earlier, and we did some | ||
3025 | * minimal initialization then. | ||
3026 | */ | ||
3027 | error = xfs_dqcheck(mp, ddq, dq_f->qlf_id, 0, XFS_QMOPT_DOWARN, | ||
3028 | "xlog_recover_dquot_pass2"); | ||
3029 | if (error) { | ||
3030 | xfs_buf_relse(bp); | ||
3031 | return XFS_ERROR(EIO); | ||
3032 | } | ||
3033 | |||
3034 | /* | ||
3035 | * If the dquot has an LSN in it, recover the dquot only if it's less | 3049 | * If the dquot has an LSN in it, recover the dquot only if it's less |
3036 | * than the lsn of the transaction we are replaying. | 3050 | * than the lsn of the transaction we are replaying. |
3037 | */ | 3051 | */ |
@@ -3178,38 +3192,38 @@ xlog_recover_do_icreate_pass2( | |||
3178 | icl = (struct xfs_icreate_log *)item->ri_buf[0].i_addr; | 3192 | icl = (struct xfs_icreate_log *)item->ri_buf[0].i_addr; |
3179 | if (icl->icl_type != XFS_LI_ICREATE) { | 3193 | if (icl->icl_type != XFS_LI_ICREATE) { |
3180 | xfs_warn(log->l_mp, "xlog_recover_do_icreate_trans: bad type"); | 3194 | xfs_warn(log->l_mp, "xlog_recover_do_icreate_trans: bad type"); |
3181 | return EINVAL; | 3195 | return -EINVAL; |
3182 | } | 3196 | } |
3183 | 3197 | ||
3184 | if (icl->icl_size != 1) { | 3198 | if (icl->icl_size != 1) { |
3185 | xfs_warn(log->l_mp, "xlog_recover_do_icreate_trans: bad icl size"); | 3199 | xfs_warn(log->l_mp, "xlog_recover_do_icreate_trans: bad icl size"); |
3186 | return EINVAL; | 3200 | return -EINVAL; |
3187 | } | 3201 | } |
3188 | 3202 | ||
3189 | agno = be32_to_cpu(icl->icl_ag); | 3203 | agno = be32_to_cpu(icl->icl_ag); |
3190 | if (agno >= mp->m_sb.sb_agcount) { | 3204 | if (agno >= mp->m_sb.sb_agcount) { |
3191 | xfs_warn(log->l_mp, "xlog_recover_do_icreate_trans: bad agno"); | 3205 | xfs_warn(log->l_mp, "xlog_recover_do_icreate_trans: bad agno"); |
3192 | return EINVAL; | 3206 | return -EINVAL; |
3193 | } | 3207 | } |
3194 | agbno = be32_to_cpu(icl->icl_agbno); | 3208 | agbno = be32_to_cpu(icl->icl_agbno); |
3195 | if (!agbno || agbno == NULLAGBLOCK || agbno >= mp->m_sb.sb_agblocks) { | 3209 | if (!agbno || agbno == NULLAGBLOCK || agbno >= mp->m_sb.sb_agblocks) { |
3196 | xfs_warn(log->l_mp, "xlog_recover_do_icreate_trans: bad agbno"); | 3210 | xfs_warn(log->l_mp, "xlog_recover_do_icreate_trans: bad agbno"); |
3197 | return EINVAL; | 3211 | return -EINVAL; |
3198 | } | 3212 | } |
3199 | isize = be32_to_cpu(icl->icl_isize); | 3213 | isize = be32_to_cpu(icl->icl_isize); |
3200 | if (isize != mp->m_sb.sb_inodesize) { | 3214 | if (isize != mp->m_sb.sb_inodesize) { |
3201 | xfs_warn(log->l_mp, "xlog_recover_do_icreate_trans: bad isize"); | 3215 | xfs_warn(log->l_mp, "xlog_recover_do_icreate_trans: bad isize"); |
3202 | return EINVAL; | 3216 | return -EINVAL; |
3203 | } | 3217 | } |
3204 | count = be32_to_cpu(icl->icl_count); | 3218 | count = be32_to_cpu(icl->icl_count); |
3205 | if (!count) { | 3219 | if (!count) { |
3206 | xfs_warn(log->l_mp, "xlog_recover_do_icreate_trans: bad count"); | 3220 | xfs_warn(log->l_mp, "xlog_recover_do_icreate_trans: bad count"); |
3207 | return EINVAL; | 3221 | return -EINVAL; |
3208 | } | 3222 | } |
3209 | length = be32_to_cpu(icl->icl_length); | 3223 | length = be32_to_cpu(icl->icl_length); |
3210 | if (!length || length >= mp->m_sb.sb_agblocks) { | 3224 | if (!length || length >= mp->m_sb.sb_agblocks) { |
3211 | xfs_warn(log->l_mp, "xlog_recover_do_icreate_trans: bad length"); | 3225 | xfs_warn(log->l_mp, "xlog_recover_do_icreate_trans: bad length"); |
3212 | return EINVAL; | 3226 | return -EINVAL; |
3213 | } | 3227 | } |
3214 | 3228 | ||
3215 | /* existing allocation is fixed value */ | 3229 | /* existing allocation is fixed value */ |
@@ -3218,7 +3232,7 @@ xlog_recover_do_icreate_pass2( | |||
3218 | if (count != mp->m_ialloc_inos || | 3232 | if (count != mp->m_ialloc_inos || |
3219 | length != mp->m_ialloc_blks) { | 3233 | length != mp->m_ialloc_blks) { |
3220 | xfs_warn(log->l_mp, "xlog_recover_do_icreate_trans: bad count 2"); | 3234 | xfs_warn(log->l_mp, "xlog_recover_do_icreate_trans: bad count 2"); |
3221 | return EINVAL; | 3235 | return -EINVAL; |
3222 | } | 3236 | } |
3223 | 3237 | ||
3224 | /* | 3238 | /* |
@@ -3389,7 +3403,7 @@ xlog_recover_commit_pass1( | |||
3389 | xfs_warn(log->l_mp, "%s: invalid item type (%d)", | 3403 | xfs_warn(log->l_mp, "%s: invalid item type (%d)", |
3390 | __func__, ITEM_TYPE(item)); | 3404 | __func__, ITEM_TYPE(item)); |
3391 | ASSERT(0); | 3405 | ASSERT(0); |
3392 | return XFS_ERROR(EIO); | 3406 | return -EIO; |
3393 | } | 3407 | } |
3394 | } | 3408 | } |
3395 | 3409 | ||
@@ -3425,7 +3439,7 @@ xlog_recover_commit_pass2( | |||
3425 | xfs_warn(log->l_mp, "%s: invalid item type (%d)", | 3439 | xfs_warn(log->l_mp, "%s: invalid item type (%d)", |
3426 | __func__, ITEM_TYPE(item)); | 3440 | __func__, ITEM_TYPE(item)); |
3427 | ASSERT(0); | 3441 | ASSERT(0); |
3428 | return XFS_ERROR(EIO); | 3442 | return -EIO; |
3429 | } | 3443 | } |
3430 | } | 3444 | } |
3431 | 3445 | ||
@@ -3560,7 +3574,7 @@ xlog_recover_process_data( | |||
3560 | 3574 | ||
3561 | /* check the log format matches our own - else we can't recover */ | 3575 | /* check the log format matches our own - else we can't recover */ |
3562 | if (xlog_header_check_recover(log->l_mp, rhead)) | 3576 | if (xlog_header_check_recover(log->l_mp, rhead)) |
3563 | return (XFS_ERROR(EIO)); | 3577 | return -EIO; |
3564 | 3578 | ||
3565 | while ((dp < lp) && num_logops) { | 3579 | while ((dp < lp) && num_logops) { |
3566 | ASSERT(dp + sizeof(xlog_op_header_t) <= lp); | 3580 | ASSERT(dp + sizeof(xlog_op_header_t) <= lp); |
@@ -3571,7 +3585,7 @@ xlog_recover_process_data( | |||
3571 | xfs_warn(log->l_mp, "%s: bad clientid 0x%x", | 3585 | xfs_warn(log->l_mp, "%s: bad clientid 0x%x", |
3572 | __func__, ohead->oh_clientid); | 3586 | __func__, ohead->oh_clientid); |
3573 | ASSERT(0); | 3587 | ASSERT(0); |
3574 | return (XFS_ERROR(EIO)); | 3588 | return -EIO; |
3575 | } | 3589 | } |
3576 | tid = be32_to_cpu(ohead->oh_tid); | 3590 | tid = be32_to_cpu(ohead->oh_tid); |
3577 | hash = XLOG_RHASH(tid); | 3591 | hash = XLOG_RHASH(tid); |
@@ -3585,7 +3599,7 @@ xlog_recover_process_data( | |||
3585 | xfs_warn(log->l_mp, "%s: bad length 0x%x", | 3599 | xfs_warn(log->l_mp, "%s: bad length 0x%x", |
3586 | __func__, be32_to_cpu(ohead->oh_len)); | 3600 | __func__, be32_to_cpu(ohead->oh_len)); |
3587 | WARN_ON(1); | 3601 | WARN_ON(1); |
3588 | return (XFS_ERROR(EIO)); | 3602 | return -EIO; |
3589 | } | 3603 | } |
3590 | flags = ohead->oh_flags & ~XLOG_END_TRANS; | 3604 | flags = ohead->oh_flags & ~XLOG_END_TRANS; |
3591 | if (flags & XLOG_WAS_CONT_TRANS) | 3605 | if (flags & XLOG_WAS_CONT_TRANS) |
@@ -3607,7 +3621,7 @@ xlog_recover_process_data( | |||
3607 | xfs_warn(log->l_mp, "%s: bad transaction", | 3621 | xfs_warn(log->l_mp, "%s: bad transaction", |
3608 | __func__); | 3622 | __func__); |
3609 | ASSERT(0); | 3623 | ASSERT(0); |
3610 | error = XFS_ERROR(EIO); | 3624 | error = -EIO; |
3611 | break; | 3625 | break; |
3612 | case 0: | 3626 | case 0: |
3613 | case XLOG_CONTINUE_TRANS: | 3627 | case XLOG_CONTINUE_TRANS: |
@@ -3618,7 +3632,7 @@ xlog_recover_process_data( | |||
3618 | xfs_warn(log->l_mp, "%s: bad flag 0x%x", | 3632 | xfs_warn(log->l_mp, "%s: bad flag 0x%x", |
3619 | __func__, flags); | 3633 | __func__, flags); |
3620 | ASSERT(0); | 3634 | ASSERT(0); |
3621 | error = XFS_ERROR(EIO); | 3635 | error = -EIO; |
3622 | break; | 3636 | break; |
3623 | } | 3637 | } |
3624 | if (error) { | 3638 | if (error) { |
@@ -3669,7 +3683,7 @@ xlog_recover_process_efi( | |||
3669 | */ | 3683 | */ |
3670 | set_bit(XFS_EFI_RECOVERED, &efip->efi_flags); | 3684 | set_bit(XFS_EFI_RECOVERED, &efip->efi_flags); |
3671 | xfs_efi_release(efip, efip->efi_format.efi_nextents); | 3685 | xfs_efi_release(efip, efip->efi_format.efi_nextents); |
3672 | return XFS_ERROR(EIO); | 3686 | return -EIO; |
3673 | } | 3687 | } |
3674 | } | 3688 | } |
3675 | 3689 | ||
@@ -3969,7 +3983,7 @@ xlog_unpack_data_crc( | |||
3969 | * CRC protection by punting an error back up the stack. | 3983 | * CRC protection by punting an error back up the stack. |
3970 | */ | 3984 | */ |
3971 | if (xfs_sb_version_hascrc(&log->l_mp->m_sb)) | 3985 | if (xfs_sb_version_hascrc(&log->l_mp->m_sb)) |
3972 | return EFSCORRUPTED; | 3986 | return -EFSCORRUPTED; |
3973 | } | 3987 | } |
3974 | 3988 | ||
3975 | return 0; | 3989 | return 0; |
@@ -4018,14 +4032,14 @@ xlog_valid_rec_header( | |||
4018 | if (unlikely(rhead->h_magicno != cpu_to_be32(XLOG_HEADER_MAGIC_NUM))) { | 4032 | if (unlikely(rhead->h_magicno != cpu_to_be32(XLOG_HEADER_MAGIC_NUM))) { |
4019 | XFS_ERROR_REPORT("xlog_valid_rec_header(1)", | 4033 | XFS_ERROR_REPORT("xlog_valid_rec_header(1)", |
4020 | XFS_ERRLEVEL_LOW, log->l_mp); | 4034 | XFS_ERRLEVEL_LOW, log->l_mp); |
4021 | return XFS_ERROR(EFSCORRUPTED); | 4035 | return -EFSCORRUPTED; |
4022 | } | 4036 | } |
4023 | if (unlikely( | 4037 | if (unlikely( |
4024 | (!rhead->h_version || | 4038 | (!rhead->h_version || |
4025 | (be32_to_cpu(rhead->h_version) & (~XLOG_VERSION_OKBITS))))) { | 4039 | (be32_to_cpu(rhead->h_version) & (~XLOG_VERSION_OKBITS))))) { |
4026 | xfs_warn(log->l_mp, "%s: unrecognised log version (%d).", | 4040 | xfs_warn(log->l_mp, "%s: unrecognised log version (%d).", |
4027 | __func__, be32_to_cpu(rhead->h_version)); | 4041 | __func__, be32_to_cpu(rhead->h_version)); |
4028 | return XFS_ERROR(EIO); | 4042 | return -EIO; |
4029 | } | 4043 | } |
4030 | 4044 | ||
4031 | /* LR body must have data or it wouldn't have been written */ | 4045 | /* LR body must have data or it wouldn't have been written */ |
@@ -4033,12 +4047,12 @@ xlog_valid_rec_header( | |||
4033 | if (unlikely( hlen <= 0 || hlen > INT_MAX )) { | 4047 | if (unlikely( hlen <= 0 || hlen > INT_MAX )) { |
4034 | XFS_ERROR_REPORT("xlog_valid_rec_header(2)", | 4048 | XFS_ERROR_REPORT("xlog_valid_rec_header(2)", |
4035 | XFS_ERRLEVEL_LOW, log->l_mp); | 4049 | XFS_ERRLEVEL_LOW, log->l_mp); |
4036 | return XFS_ERROR(EFSCORRUPTED); | 4050 | return -EFSCORRUPTED; |
4037 | } | 4051 | } |
4038 | if (unlikely( blkno > log->l_logBBsize || blkno > INT_MAX )) { | 4052 | if (unlikely( blkno > log->l_logBBsize || blkno > INT_MAX )) { |
4039 | XFS_ERROR_REPORT("xlog_valid_rec_header(3)", | 4053 | XFS_ERROR_REPORT("xlog_valid_rec_header(3)", |
4040 | XFS_ERRLEVEL_LOW, log->l_mp); | 4054 | XFS_ERRLEVEL_LOW, log->l_mp); |
4041 | return XFS_ERROR(EFSCORRUPTED); | 4055 | return -EFSCORRUPTED; |
4042 | } | 4056 | } |
4043 | return 0; | 4057 | return 0; |
4044 | } | 4058 | } |
@@ -4081,7 +4095,7 @@ xlog_do_recovery_pass( | |||
4081 | */ | 4095 | */ |
4082 | hbp = xlog_get_bp(log, 1); | 4096 | hbp = xlog_get_bp(log, 1); |
4083 | if (!hbp) | 4097 | if (!hbp) |
4084 | return ENOMEM; | 4098 | return -ENOMEM; |
4085 | 4099 | ||
4086 | error = xlog_bread(log, tail_blk, 1, hbp, &offset); | 4100 | error = xlog_bread(log, tail_blk, 1, hbp, &offset); |
4087 | if (error) | 4101 | if (error) |
@@ -4110,11 +4124,11 @@ xlog_do_recovery_pass( | |||
4110 | } | 4124 | } |
4111 | 4125 | ||
4112 | if (!hbp) | 4126 | if (!hbp) |
4113 | return ENOMEM; | 4127 | return -ENOMEM; |
4114 | dbp = xlog_get_bp(log, BTOBB(h_size)); | 4128 | dbp = xlog_get_bp(log, BTOBB(h_size)); |
4115 | if (!dbp) { | 4129 | if (!dbp) { |
4116 | xlog_put_bp(hbp); | 4130 | xlog_put_bp(hbp); |
4117 | return ENOMEM; | 4131 | return -ENOMEM; |
4118 | } | 4132 | } |
4119 | 4133 | ||
4120 | memset(rhash, 0, sizeof(rhash)); | 4134 | memset(rhash, 0, sizeof(rhash)); |
@@ -4388,7 +4402,7 @@ xlog_do_recover( | |||
4388 | * If IO errors happened during recovery, bail out. | 4402 | * If IO errors happened during recovery, bail out. |
4389 | */ | 4403 | */ |
4390 | if (XFS_FORCED_SHUTDOWN(log->l_mp)) { | 4404 | if (XFS_FORCED_SHUTDOWN(log->l_mp)) { |
4391 | return (EIO); | 4405 | return -EIO; |
4392 | } | 4406 | } |
4393 | 4407 | ||
4394 | /* | 4408 | /* |
@@ -4415,7 +4429,7 @@ xlog_do_recover( | |||
4415 | 4429 | ||
4416 | if (XFS_FORCED_SHUTDOWN(log->l_mp)) { | 4430 | if (XFS_FORCED_SHUTDOWN(log->l_mp)) { |
4417 | xfs_buf_relse(bp); | 4431 | xfs_buf_relse(bp); |
4418 | return XFS_ERROR(EIO); | 4432 | return -EIO; |
4419 | } | 4433 | } |
4420 | 4434 | ||
4421 | xfs_buf_iorequest(bp); | 4435 | xfs_buf_iorequest(bp); |
@@ -4492,7 +4506,7 @@ xlog_recover( | |||
4492 | "Please recover the log on a kernel that supports the unknown features.", | 4506 | "Please recover the log on a kernel that supports the unknown features.", |
4493 | (log->l_mp->m_sb.sb_features_log_incompat & | 4507 | (log->l_mp->m_sb.sb_features_log_incompat & |
4494 | XFS_SB_FEAT_INCOMPAT_LOG_UNKNOWN)); | 4508 | XFS_SB_FEAT_INCOMPAT_LOG_UNKNOWN)); |
4495 | return EINVAL; | 4509 | return -EINVAL; |
4496 | } | 4510 | } |
4497 | 4511 | ||
4498 | xfs_notice(log->l_mp, "Starting recovery (logdev: %s)", | 4512 | xfs_notice(log->l_mp, "Starting recovery (logdev: %s)", |
diff --git a/fs/xfs/xfs_mount.c b/fs/xfs/xfs_mount.c index 3507cd0ec400..fbf0384a466f 100644 --- a/fs/xfs/xfs_mount.c +++ b/fs/xfs/xfs_mount.c | |||
@@ -42,6 +42,7 @@ | |||
42 | #include "xfs_trace.h" | 42 | #include "xfs_trace.h" |
43 | #include "xfs_icache.h" | 43 | #include "xfs_icache.h" |
44 | #include "xfs_dinode.h" | 44 | #include "xfs_dinode.h" |
45 | #include "xfs_sysfs.h" | ||
45 | 46 | ||
46 | 47 | ||
47 | #ifdef HAVE_PERCPU_SB | 48 | #ifdef HAVE_PERCPU_SB |
@@ -60,6 +61,8 @@ static DEFINE_MUTEX(xfs_uuid_table_mutex); | |||
60 | static int xfs_uuid_table_size; | 61 | static int xfs_uuid_table_size; |
61 | static uuid_t *xfs_uuid_table; | 62 | static uuid_t *xfs_uuid_table; |
62 | 63 | ||
64 | extern struct kset *xfs_kset; | ||
65 | |||
63 | /* | 66 | /* |
64 | * See if the UUID is unique among mounted XFS filesystems. | 67 | * See if the UUID is unique among mounted XFS filesystems. |
65 | * Mount fails if UUID is nil or a FS with the same UUID is already mounted. | 68 | * Mount fails if UUID is nil or a FS with the same UUID is already mounted. |
@@ -76,7 +79,7 @@ xfs_uuid_mount( | |||
76 | 79 | ||
77 | if (uuid_is_nil(uuid)) { | 80 | if (uuid_is_nil(uuid)) { |
78 | xfs_warn(mp, "Filesystem has nil UUID - can't mount"); | 81 | xfs_warn(mp, "Filesystem has nil UUID - can't mount"); |
79 | return XFS_ERROR(EINVAL); | 82 | return -EINVAL; |
80 | } | 83 | } |
81 | 84 | ||
82 | mutex_lock(&xfs_uuid_table_mutex); | 85 | mutex_lock(&xfs_uuid_table_mutex); |
@@ -104,7 +107,7 @@ xfs_uuid_mount( | |||
104 | out_duplicate: | 107 | out_duplicate: |
105 | mutex_unlock(&xfs_uuid_table_mutex); | 108 | mutex_unlock(&xfs_uuid_table_mutex); |
106 | xfs_warn(mp, "Filesystem has duplicate UUID %pU - can't mount", uuid); | 109 | xfs_warn(mp, "Filesystem has duplicate UUID %pU - can't mount", uuid); |
107 | return XFS_ERROR(EINVAL); | 110 | return -EINVAL; |
108 | } | 111 | } |
109 | 112 | ||
110 | STATIC void | 113 | STATIC void |
@@ -173,13 +176,9 @@ xfs_sb_validate_fsb_count( | |||
173 | ASSERT(PAGE_SHIFT >= sbp->sb_blocklog); | 176 | ASSERT(PAGE_SHIFT >= sbp->sb_blocklog); |
174 | ASSERT(sbp->sb_blocklog >= BBSHIFT); | 177 | ASSERT(sbp->sb_blocklog >= BBSHIFT); |
175 | 178 | ||
176 | #if XFS_BIG_BLKNOS /* Limited by ULONG_MAX of page cache index */ | 179 | /* Limited by ULONG_MAX of page cache index */ |
177 | if (nblocks >> (PAGE_CACHE_SHIFT - sbp->sb_blocklog) > ULONG_MAX) | 180 | if (nblocks >> (PAGE_CACHE_SHIFT - sbp->sb_blocklog) > ULONG_MAX) |
178 | return EFBIG; | 181 | return -EFBIG; |
179 | #else /* Limited by UINT_MAX of sectors */ | ||
180 | if (nblocks << (sbp->sb_blocklog - BBSHIFT) > UINT_MAX) | ||
181 | return EFBIG; | ||
182 | #endif | ||
183 | return 0; | 182 | return 0; |
184 | } | 183 | } |
185 | 184 | ||
@@ -250,9 +249,9 @@ xfs_initialize_perag( | |||
250 | mp->m_flags &= ~XFS_MOUNT_32BITINODES; | 249 | mp->m_flags &= ~XFS_MOUNT_32BITINODES; |
251 | 250 | ||
252 | if (mp->m_flags & XFS_MOUNT_32BITINODES) | 251 | if (mp->m_flags & XFS_MOUNT_32BITINODES) |
253 | index = xfs_set_inode32(mp); | 252 | index = xfs_set_inode32(mp, agcount); |
254 | else | 253 | else |
255 | index = xfs_set_inode64(mp); | 254 | index = xfs_set_inode64(mp, agcount); |
256 | 255 | ||
257 | if (maxagi) | 256 | if (maxagi) |
258 | *maxagi = index; | 257 | *maxagi = index; |
@@ -308,15 +307,15 @@ reread: | |||
308 | if (!bp) { | 307 | if (!bp) { |
309 | if (loud) | 308 | if (loud) |
310 | xfs_warn(mp, "SB buffer read failed"); | 309 | xfs_warn(mp, "SB buffer read failed"); |
311 | return EIO; | 310 | return -EIO; |
312 | } | 311 | } |
313 | if (bp->b_error) { | 312 | if (bp->b_error) { |
314 | error = bp->b_error; | 313 | error = bp->b_error; |
315 | if (loud) | 314 | if (loud) |
316 | xfs_warn(mp, "SB validate failed with error %d.", error); | 315 | xfs_warn(mp, "SB validate failed with error %d.", error); |
317 | /* bad CRC means corrupted metadata */ | 316 | /* bad CRC means corrupted metadata */ |
318 | if (error == EFSBADCRC) | 317 | if (error == -EFSBADCRC) |
319 | error = EFSCORRUPTED; | 318 | error = -EFSCORRUPTED; |
320 | goto release_buf; | 319 | goto release_buf; |
321 | } | 320 | } |
322 | 321 | ||
@@ -324,7 +323,6 @@ reread: | |||
324 | * Initialize the mount structure from the superblock. | 323 | * Initialize the mount structure from the superblock. |
325 | */ | 324 | */ |
326 | xfs_sb_from_disk(sbp, XFS_BUF_TO_SBP(bp)); | 325 | xfs_sb_from_disk(sbp, XFS_BUF_TO_SBP(bp)); |
327 | xfs_sb_quota_from_disk(sbp); | ||
328 | 326 | ||
329 | /* | 327 | /* |
330 | * If we haven't validated the superblock, do so now before we try | 328 | * If we haven't validated the superblock, do so now before we try |
@@ -333,7 +331,7 @@ reread: | |||
333 | if (sbp->sb_magicnum != XFS_SB_MAGIC) { | 331 | if (sbp->sb_magicnum != XFS_SB_MAGIC) { |
334 | if (loud) | 332 | if (loud) |
335 | xfs_warn(mp, "Invalid superblock magic number"); | 333 | xfs_warn(mp, "Invalid superblock magic number"); |
336 | error = EINVAL; | 334 | error = -EINVAL; |
337 | goto release_buf; | 335 | goto release_buf; |
338 | } | 336 | } |
339 | 337 | ||
@@ -344,7 +342,7 @@ reread: | |||
344 | if (loud) | 342 | if (loud) |
345 | xfs_warn(mp, "device supports %u byte sectors (not %u)", | 343 | xfs_warn(mp, "device supports %u byte sectors (not %u)", |
346 | sector_size, sbp->sb_sectsize); | 344 | sector_size, sbp->sb_sectsize); |
347 | error = ENOSYS; | 345 | error = -ENOSYS; |
348 | goto release_buf; | 346 | goto release_buf; |
349 | } | 347 | } |
350 | 348 | ||
@@ -392,7 +390,7 @@ xfs_update_alignment(xfs_mount_t *mp) | |||
392 | xfs_warn(mp, | 390 | xfs_warn(mp, |
393 | "alignment check failed: sunit/swidth vs. blocksize(%d)", | 391 | "alignment check failed: sunit/swidth vs. blocksize(%d)", |
394 | sbp->sb_blocksize); | 392 | sbp->sb_blocksize); |
395 | return XFS_ERROR(EINVAL); | 393 | return -EINVAL; |
396 | } else { | 394 | } else { |
397 | /* | 395 | /* |
398 | * Convert the stripe unit and width to FSBs. | 396 | * Convert the stripe unit and width to FSBs. |
@@ -402,14 +400,14 @@ xfs_update_alignment(xfs_mount_t *mp) | |||
402 | xfs_warn(mp, | 400 | xfs_warn(mp, |
403 | "alignment check failed: sunit/swidth vs. agsize(%d)", | 401 | "alignment check failed: sunit/swidth vs. agsize(%d)", |
404 | sbp->sb_agblocks); | 402 | sbp->sb_agblocks); |
405 | return XFS_ERROR(EINVAL); | 403 | return -EINVAL; |
406 | } else if (mp->m_dalign) { | 404 | } else if (mp->m_dalign) { |
407 | mp->m_swidth = XFS_BB_TO_FSBT(mp, mp->m_swidth); | 405 | mp->m_swidth = XFS_BB_TO_FSBT(mp, mp->m_swidth); |
408 | } else { | 406 | } else { |
409 | xfs_warn(mp, | 407 | xfs_warn(mp, |
410 | "alignment check failed: sunit(%d) less than bsize(%d)", | 408 | "alignment check failed: sunit(%d) less than bsize(%d)", |
411 | mp->m_dalign, sbp->sb_blocksize); | 409 | mp->m_dalign, sbp->sb_blocksize); |
412 | return XFS_ERROR(EINVAL); | 410 | return -EINVAL; |
413 | } | 411 | } |
414 | } | 412 | } |
415 | 413 | ||
@@ -429,7 +427,7 @@ xfs_update_alignment(xfs_mount_t *mp) | |||
429 | } else { | 427 | } else { |
430 | xfs_warn(mp, | 428 | xfs_warn(mp, |
431 | "cannot change alignment: superblock does not support data alignment"); | 429 | "cannot change alignment: superblock does not support data alignment"); |
432 | return XFS_ERROR(EINVAL); | 430 | return -EINVAL; |
433 | } | 431 | } |
434 | } else if ((mp->m_flags & XFS_MOUNT_NOALIGN) != XFS_MOUNT_NOALIGN && | 432 | } else if ((mp->m_flags & XFS_MOUNT_NOALIGN) != XFS_MOUNT_NOALIGN && |
435 | xfs_sb_version_hasdalign(&mp->m_sb)) { | 433 | xfs_sb_version_hasdalign(&mp->m_sb)) { |
@@ -556,14 +554,14 @@ xfs_check_sizes(xfs_mount_t *mp) | |||
556 | d = (xfs_daddr_t)XFS_FSB_TO_BB(mp, mp->m_sb.sb_dblocks); | 554 | d = (xfs_daddr_t)XFS_FSB_TO_BB(mp, mp->m_sb.sb_dblocks); |
557 | if (XFS_BB_TO_FSB(mp, d) != mp->m_sb.sb_dblocks) { | 555 | if (XFS_BB_TO_FSB(mp, d) != mp->m_sb.sb_dblocks) { |
558 | xfs_warn(mp, "filesystem size mismatch detected"); | 556 | xfs_warn(mp, "filesystem size mismatch detected"); |
559 | return XFS_ERROR(EFBIG); | 557 | return -EFBIG; |
560 | } | 558 | } |
561 | bp = xfs_buf_read_uncached(mp->m_ddev_targp, | 559 | bp = xfs_buf_read_uncached(mp->m_ddev_targp, |
562 | d - XFS_FSS_TO_BB(mp, 1), | 560 | d - XFS_FSS_TO_BB(mp, 1), |
563 | XFS_FSS_TO_BB(mp, 1), 0, NULL); | 561 | XFS_FSS_TO_BB(mp, 1), 0, NULL); |
564 | if (!bp) { | 562 | if (!bp) { |
565 | xfs_warn(mp, "last sector read failed"); | 563 | xfs_warn(mp, "last sector read failed"); |
566 | return EIO; | 564 | return -EIO; |
567 | } | 565 | } |
568 | xfs_buf_relse(bp); | 566 | xfs_buf_relse(bp); |
569 | 567 | ||
@@ -571,14 +569,14 @@ xfs_check_sizes(xfs_mount_t *mp) | |||
571 | d = (xfs_daddr_t)XFS_FSB_TO_BB(mp, mp->m_sb.sb_logblocks); | 569 | d = (xfs_daddr_t)XFS_FSB_TO_BB(mp, mp->m_sb.sb_logblocks); |
572 | if (XFS_BB_TO_FSB(mp, d) != mp->m_sb.sb_logblocks) { | 570 | if (XFS_BB_TO_FSB(mp, d) != mp->m_sb.sb_logblocks) { |
573 | xfs_warn(mp, "log size mismatch detected"); | 571 | xfs_warn(mp, "log size mismatch detected"); |
574 | return XFS_ERROR(EFBIG); | 572 | return -EFBIG; |
575 | } | 573 | } |
576 | bp = xfs_buf_read_uncached(mp->m_logdev_targp, | 574 | bp = xfs_buf_read_uncached(mp->m_logdev_targp, |
577 | d - XFS_FSB_TO_BB(mp, 1), | 575 | d - XFS_FSB_TO_BB(mp, 1), |
578 | XFS_FSB_TO_BB(mp, 1), 0, NULL); | 576 | XFS_FSB_TO_BB(mp, 1), 0, NULL); |
579 | if (!bp) { | 577 | if (!bp) { |
580 | xfs_warn(mp, "log device read failed"); | 578 | xfs_warn(mp, "log device read failed"); |
581 | return EIO; | 579 | return -EIO; |
582 | } | 580 | } |
583 | xfs_buf_relse(bp); | 581 | xfs_buf_relse(bp); |
584 | } | 582 | } |
@@ -731,10 +729,15 @@ xfs_mountfs( | |||
731 | 729 | ||
732 | xfs_set_maxicount(mp); | 730 | xfs_set_maxicount(mp); |
733 | 731 | ||
734 | error = xfs_uuid_mount(mp); | 732 | mp->m_kobj.kobject.kset = xfs_kset; |
733 | error = xfs_sysfs_init(&mp->m_kobj, &xfs_mp_ktype, NULL, mp->m_fsname); | ||
735 | if (error) | 734 | if (error) |
736 | goto out; | 735 | goto out; |
737 | 736 | ||
737 | error = xfs_uuid_mount(mp); | ||
738 | if (error) | ||
739 | goto out_remove_sysfs; | ||
740 | |||
738 | /* | 741 | /* |
739 | * Set the minimum read and write sizes | 742 | * Set the minimum read and write sizes |
740 | */ | 743 | */ |
@@ -816,7 +819,7 @@ xfs_mountfs( | |||
816 | if (!sbp->sb_logblocks) { | 819 | if (!sbp->sb_logblocks) { |
817 | xfs_warn(mp, "no log defined"); | 820 | xfs_warn(mp, "no log defined"); |
818 | XFS_ERROR_REPORT("xfs_mountfs", XFS_ERRLEVEL_LOW, mp); | 821 | XFS_ERROR_REPORT("xfs_mountfs", XFS_ERRLEVEL_LOW, mp); |
819 | error = XFS_ERROR(EFSCORRUPTED); | 822 | error = -EFSCORRUPTED; |
820 | goto out_free_perag; | 823 | goto out_free_perag; |
821 | } | 824 | } |
822 | 825 | ||
@@ -855,7 +858,7 @@ xfs_mountfs( | |||
855 | !mp->m_sb.sb_inprogress) { | 858 | !mp->m_sb.sb_inprogress) { |
856 | error = xfs_initialize_perag_data(mp, sbp->sb_agcount); | 859 | error = xfs_initialize_perag_data(mp, sbp->sb_agcount); |
857 | if (error) | 860 | if (error) |
858 | goto out_fail_wait; | 861 | goto out_log_dealloc; |
859 | } | 862 | } |
860 | 863 | ||
861 | /* | 864 | /* |
@@ -876,7 +879,7 @@ xfs_mountfs( | |||
876 | xfs_iunlock(rip, XFS_ILOCK_EXCL); | 879 | xfs_iunlock(rip, XFS_ILOCK_EXCL); |
877 | XFS_ERROR_REPORT("xfs_mountfs_int(2)", XFS_ERRLEVEL_LOW, | 880 | XFS_ERROR_REPORT("xfs_mountfs_int(2)", XFS_ERRLEVEL_LOW, |
878 | mp); | 881 | mp); |
879 | error = XFS_ERROR(EFSCORRUPTED); | 882 | error = -EFSCORRUPTED; |
880 | goto out_rele_rip; | 883 | goto out_rele_rip; |
881 | } | 884 | } |
882 | mp->m_rootip = rip; /* save it */ | 885 | mp->m_rootip = rip; /* save it */ |
@@ -927,7 +930,7 @@ xfs_mountfs( | |||
927 | xfs_notice(mp, "resetting quota flags"); | 930 | xfs_notice(mp, "resetting quota flags"); |
928 | error = xfs_mount_reset_sbqflags(mp); | 931 | error = xfs_mount_reset_sbqflags(mp); |
929 | if (error) | 932 | if (error) |
930 | return error; | 933 | goto out_rtunmount; |
931 | } | 934 | } |
932 | } | 935 | } |
933 | 936 | ||
@@ -989,6 +992,8 @@ xfs_mountfs( | |||
989 | xfs_da_unmount(mp); | 992 | xfs_da_unmount(mp); |
990 | out_remove_uuid: | 993 | out_remove_uuid: |
991 | xfs_uuid_unmount(mp); | 994 | xfs_uuid_unmount(mp); |
995 | out_remove_sysfs: | ||
996 | xfs_sysfs_del(&mp->m_kobj); | ||
992 | out: | 997 | out: |
993 | return error; | 998 | return error; |
994 | } | 999 | } |
@@ -1071,6 +1076,8 @@ xfs_unmountfs( | |||
1071 | xfs_errortag_clearall(mp, 0); | 1076 | xfs_errortag_clearall(mp, 0); |
1072 | #endif | 1077 | #endif |
1073 | xfs_free_perag(mp); | 1078 | xfs_free_perag(mp); |
1079 | |||
1080 | xfs_sysfs_del(&mp->m_kobj); | ||
1074 | } | 1081 | } |
1075 | 1082 | ||
1076 | int | 1083 | int |
@@ -1152,7 +1159,7 @@ xfs_mod_incore_sb_unlocked( | |||
1152 | lcounter += delta; | 1159 | lcounter += delta; |
1153 | if (lcounter < 0) { | 1160 | if (lcounter < 0) { |
1154 | ASSERT(0); | 1161 | ASSERT(0); |
1155 | return XFS_ERROR(EINVAL); | 1162 | return -EINVAL; |
1156 | } | 1163 | } |
1157 | mp->m_sb.sb_icount = lcounter; | 1164 | mp->m_sb.sb_icount = lcounter; |
1158 | return 0; | 1165 | return 0; |
@@ -1161,7 +1168,7 @@ xfs_mod_incore_sb_unlocked( | |||
1161 | lcounter += delta; | 1168 | lcounter += delta; |
1162 | if (lcounter < 0) { | 1169 | if (lcounter < 0) { |
1163 | ASSERT(0); | 1170 | ASSERT(0); |
1164 | return XFS_ERROR(EINVAL); | 1171 | return -EINVAL; |
1165 | } | 1172 | } |
1166 | mp->m_sb.sb_ifree = lcounter; | 1173 | mp->m_sb.sb_ifree = lcounter; |
1167 | return 0; | 1174 | return 0; |
@@ -1191,7 +1198,7 @@ xfs_mod_incore_sb_unlocked( | |||
1191 | * blocks if were allowed to. | 1198 | * blocks if were allowed to. |
1192 | */ | 1199 | */ |
1193 | if (!rsvd) | 1200 | if (!rsvd) |
1194 | return XFS_ERROR(ENOSPC); | 1201 | return -ENOSPC; |
1195 | 1202 | ||
1196 | lcounter = (long long)mp->m_resblks_avail + delta; | 1203 | lcounter = (long long)mp->m_resblks_avail + delta; |
1197 | if (lcounter >= 0) { | 1204 | if (lcounter >= 0) { |
@@ -1202,7 +1209,7 @@ xfs_mod_incore_sb_unlocked( | |||
1202 | "Filesystem \"%s\": reserve blocks depleted! " | 1209 | "Filesystem \"%s\": reserve blocks depleted! " |
1203 | "Consider increasing reserve pool size.", | 1210 | "Consider increasing reserve pool size.", |
1204 | mp->m_fsname); | 1211 | mp->m_fsname); |
1205 | return XFS_ERROR(ENOSPC); | 1212 | return -ENOSPC; |
1206 | } | 1213 | } |
1207 | 1214 | ||
1208 | mp->m_sb.sb_fdblocks = lcounter + XFS_ALLOC_SET_ASIDE(mp); | 1215 | mp->m_sb.sb_fdblocks = lcounter + XFS_ALLOC_SET_ASIDE(mp); |
@@ -1211,7 +1218,7 @@ xfs_mod_incore_sb_unlocked( | |||
1211 | lcounter = (long long)mp->m_sb.sb_frextents; | 1218 | lcounter = (long long)mp->m_sb.sb_frextents; |
1212 | lcounter += delta; | 1219 | lcounter += delta; |
1213 | if (lcounter < 0) { | 1220 | if (lcounter < 0) { |
1214 | return XFS_ERROR(ENOSPC); | 1221 | return -ENOSPC; |
1215 | } | 1222 | } |
1216 | mp->m_sb.sb_frextents = lcounter; | 1223 | mp->m_sb.sb_frextents = lcounter; |
1217 | return 0; | 1224 | return 0; |
@@ -1220,7 +1227,7 @@ xfs_mod_incore_sb_unlocked( | |||
1220 | lcounter += delta; | 1227 | lcounter += delta; |
1221 | if (lcounter < 0) { | 1228 | if (lcounter < 0) { |
1222 | ASSERT(0); | 1229 | ASSERT(0); |
1223 | return XFS_ERROR(EINVAL); | 1230 | return -EINVAL; |
1224 | } | 1231 | } |
1225 | mp->m_sb.sb_dblocks = lcounter; | 1232 | mp->m_sb.sb_dblocks = lcounter; |
1226 | return 0; | 1233 | return 0; |
@@ -1229,7 +1236,7 @@ xfs_mod_incore_sb_unlocked( | |||
1229 | scounter += delta; | 1236 | scounter += delta; |
1230 | if (scounter < 0) { | 1237 | if (scounter < 0) { |
1231 | ASSERT(0); | 1238 | ASSERT(0); |
1232 | return XFS_ERROR(EINVAL); | 1239 | return -EINVAL; |
1233 | } | 1240 | } |
1234 | mp->m_sb.sb_agcount = scounter; | 1241 | mp->m_sb.sb_agcount = scounter; |
1235 | return 0; | 1242 | return 0; |
@@ -1238,7 +1245,7 @@ xfs_mod_incore_sb_unlocked( | |||
1238 | scounter += delta; | 1245 | scounter += delta; |
1239 | if (scounter < 0) { | 1246 | if (scounter < 0) { |
1240 | ASSERT(0); | 1247 | ASSERT(0); |
1241 | return XFS_ERROR(EINVAL); | 1248 | return -EINVAL; |
1242 | } | 1249 | } |
1243 | mp->m_sb.sb_imax_pct = scounter; | 1250 | mp->m_sb.sb_imax_pct = scounter; |
1244 | return 0; | 1251 | return 0; |
@@ -1247,7 +1254,7 @@ xfs_mod_incore_sb_unlocked( | |||
1247 | scounter += delta; | 1254 | scounter += delta; |
1248 | if (scounter < 0) { | 1255 | if (scounter < 0) { |
1249 | ASSERT(0); | 1256 | ASSERT(0); |
1250 | return XFS_ERROR(EINVAL); | 1257 | return -EINVAL; |
1251 | } | 1258 | } |
1252 | mp->m_sb.sb_rextsize = scounter; | 1259 | mp->m_sb.sb_rextsize = scounter; |
1253 | return 0; | 1260 | return 0; |
@@ -1256,7 +1263,7 @@ xfs_mod_incore_sb_unlocked( | |||
1256 | scounter += delta; | 1263 | scounter += delta; |
1257 | if (scounter < 0) { | 1264 | if (scounter < 0) { |
1258 | ASSERT(0); | 1265 | ASSERT(0); |
1259 | return XFS_ERROR(EINVAL); | 1266 | return -EINVAL; |
1260 | } | 1267 | } |
1261 | mp->m_sb.sb_rbmblocks = scounter; | 1268 | mp->m_sb.sb_rbmblocks = scounter; |
1262 | return 0; | 1269 | return 0; |
@@ -1265,7 +1272,7 @@ xfs_mod_incore_sb_unlocked( | |||
1265 | lcounter += delta; | 1272 | lcounter += delta; |
1266 | if (lcounter < 0) { | 1273 | if (lcounter < 0) { |
1267 | ASSERT(0); | 1274 | ASSERT(0); |
1268 | return XFS_ERROR(EINVAL); | 1275 | return -EINVAL; |
1269 | } | 1276 | } |
1270 | mp->m_sb.sb_rblocks = lcounter; | 1277 | mp->m_sb.sb_rblocks = lcounter; |
1271 | return 0; | 1278 | return 0; |
@@ -1274,7 +1281,7 @@ xfs_mod_incore_sb_unlocked( | |||
1274 | lcounter += delta; | 1281 | lcounter += delta; |
1275 | if (lcounter < 0) { | 1282 | if (lcounter < 0) { |
1276 | ASSERT(0); | 1283 | ASSERT(0); |
1277 | return XFS_ERROR(EINVAL); | 1284 | return -EINVAL; |
1278 | } | 1285 | } |
1279 | mp->m_sb.sb_rextents = lcounter; | 1286 | mp->m_sb.sb_rextents = lcounter; |
1280 | return 0; | 1287 | return 0; |
@@ -1283,13 +1290,13 @@ xfs_mod_incore_sb_unlocked( | |||
1283 | scounter += delta; | 1290 | scounter += delta; |
1284 | if (scounter < 0) { | 1291 | if (scounter < 0) { |
1285 | ASSERT(0); | 1292 | ASSERT(0); |
1286 | return XFS_ERROR(EINVAL); | 1293 | return -EINVAL; |
1287 | } | 1294 | } |
1288 | mp->m_sb.sb_rextslog = scounter; | 1295 | mp->m_sb.sb_rextslog = scounter; |
1289 | return 0; | 1296 | return 0; |
1290 | default: | 1297 | default: |
1291 | ASSERT(0); | 1298 | ASSERT(0); |
1292 | return XFS_ERROR(EINVAL); | 1299 | return -EINVAL; |
1293 | } | 1300 | } |
1294 | } | 1301 | } |
1295 | 1302 | ||
@@ -1452,7 +1459,7 @@ xfs_dev_is_read_only( | |||
1452 | (mp->m_rtdev_targp && xfs_readonly_buftarg(mp->m_rtdev_targp))) { | 1459 | (mp->m_rtdev_targp && xfs_readonly_buftarg(mp->m_rtdev_targp))) { |
1453 | xfs_notice(mp, "%s required on read-only device.", message); | 1460 | xfs_notice(mp, "%s required on read-only device.", message); |
1454 | xfs_notice(mp, "write access unavailable, cannot proceed."); | 1461 | xfs_notice(mp, "write access unavailable, cannot proceed."); |
1455 | return EROFS; | 1462 | return -EROFS; |
1456 | } | 1463 | } |
1457 | return 0; | 1464 | return 0; |
1458 | } | 1465 | } |
@@ -1995,7 +2002,7 @@ slow_path: | |||
1995 | * (e.g. lots of space just got freed). After that | 2002 | * (e.g. lots of space just got freed). After that |
1996 | * we are done. | 2003 | * we are done. |
1997 | */ | 2004 | */ |
1998 | if (ret != ENOSPC) | 2005 | if (ret != -ENOSPC) |
1999 | xfs_icsb_balance_counter(mp, field, 0); | 2006 | xfs_icsb_balance_counter(mp, field, 0); |
2000 | xfs_icsb_unlock(mp); | 2007 | xfs_icsb_unlock(mp); |
2001 | return ret; | 2008 | return ret; |
diff --git a/fs/xfs/xfs_mount.h b/fs/xfs/xfs_mount.h index 7295a0b7c343..b0447c86e7e2 100644 --- a/fs/xfs/xfs_mount.h +++ b/fs/xfs/xfs_mount.h | |||
@@ -166,6 +166,7 @@ typedef struct xfs_mount { | |||
166 | on the next remount,rw */ | 166 | on the next remount,rw */ |
167 | int64_t m_low_space[XFS_LOWSP_MAX]; | 167 | int64_t m_low_space[XFS_LOWSP_MAX]; |
168 | /* low free space thresholds */ | 168 | /* low free space thresholds */ |
169 | struct xfs_kobj m_kobj; | ||
169 | 170 | ||
170 | struct workqueue_struct *m_data_workqueue; | 171 | struct workqueue_struct *m_data_workqueue; |
171 | struct workqueue_struct *m_unwritten_workqueue; | 172 | struct workqueue_struct *m_unwritten_workqueue; |
diff --git a/fs/xfs/xfs_mru_cache.c b/fs/xfs/xfs_mru_cache.c index f99b4933dc22..1eb6f3df698c 100644 --- a/fs/xfs/xfs_mru_cache.c +++ b/fs/xfs/xfs_mru_cache.c | |||
@@ -337,20 +337,20 @@ xfs_mru_cache_create( | |||
337 | *mrup = NULL; | 337 | *mrup = NULL; |
338 | 338 | ||
339 | if (!mrup || !grp_count || !lifetime_ms || !free_func) | 339 | if (!mrup || !grp_count || !lifetime_ms || !free_func) |
340 | return EINVAL; | 340 | return -EINVAL; |
341 | 341 | ||
342 | if (!(grp_time = msecs_to_jiffies(lifetime_ms) / grp_count)) | 342 | if (!(grp_time = msecs_to_jiffies(lifetime_ms) / grp_count)) |
343 | return EINVAL; | 343 | return -EINVAL; |
344 | 344 | ||
345 | if (!(mru = kmem_zalloc(sizeof(*mru), KM_SLEEP))) | 345 | if (!(mru = kmem_zalloc(sizeof(*mru), KM_SLEEP))) |
346 | return ENOMEM; | 346 | return -ENOMEM; |
347 | 347 | ||
348 | /* An extra list is needed to avoid reaping up to a grp_time early. */ | 348 | /* An extra list is needed to avoid reaping up to a grp_time early. */ |
349 | mru->grp_count = grp_count + 1; | 349 | mru->grp_count = grp_count + 1; |
350 | mru->lists = kmem_zalloc(mru->grp_count * sizeof(*mru->lists), KM_SLEEP); | 350 | mru->lists = kmem_zalloc(mru->grp_count * sizeof(*mru->lists), KM_SLEEP); |
351 | 351 | ||
352 | if (!mru->lists) { | 352 | if (!mru->lists) { |
353 | err = ENOMEM; | 353 | err = -ENOMEM; |
354 | goto exit; | 354 | goto exit; |
355 | } | 355 | } |
356 | 356 | ||
@@ -434,16 +434,16 @@ xfs_mru_cache_insert( | |||
434 | 434 | ||
435 | ASSERT(mru && mru->lists); | 435 | ASSERT(mru && mru->lists); |
436 | if (!mru || !mru->lists) | 436 | if (!mru || !mru->lists) |
437 | return EINVAL; | 437 | return -EINVAL; |
438 | 438 | ||
439 | if (radix_tree_preload(GFP_KERNEL)) | 439 | if (radix_tree_preload(GFP_KERNEL)) |
440 | return ENOMEM; | 440 | return -ENOMEM; |
441 | 441 | ||
442 | INIT_LIST_HEAD(&elem->list_node); | 442 | INIT_LIST_HEAD(&elem->list_node); |
443 | elem->key = key; | 443 | elem->key = key; |
444 | 444 | ||
445 | spin_lock(&mru->lock); | 445 | spin_lock(&mru->lock); |
446 | error = -radix_tree_insert(&mru->store, key, elem); | 446 | error = radix_tree_insert(&mru->store, key, elem); |
447 | radix_tree_preload_end(); | 447 | radix_tree_preload_end(); |
448 | if (!error) | 448 | if (!error) |
449 | _xfs_mru_cache_list_insert(mru, elem); | 449 | _xfs_mru_cache_list_insert(mru, elem); |
diff --git a/fs/xfs/xfs_qm.c b/fs/xfs/xfs_qm.c index 6d26759c779a..10232102b4a6 100644 --- a/fs/xfs/xfs_qm.c +++ b/fs/xfs/xfs_qm.c | |||
@@ -98,18 +98,18 @@ restart: | |||
98 | next_index = be32_to_cpu(dqp->q_core.d_id) + 1; | 98 | next_index = be32_to_cpu(dqp->q_core.d_id) + 1; |
99 | 99 | ||
100 | error = execute(batch[i], data); | 100 | error = execute(batch[i], data); |
101 | if (error == EAGAIN) { | 101 | if (error == -EAGAIN) { |
102 | skipped++; | 102 | skipped++; |
103 | continue; | 103 | continue; |
104 | } | 104 | } |
105 | if (error && last_error != EFSCORRUPTED) | 105 | if (error && last_error != -EFSCORRUPTED) |
106 | last_error = error; | 106 | last_error = error; |
107 | } | 107 | } |
108 | 108 | ||
109 | mutex_unlock(&qi->qi_tree_lock); | 109 | mutex_unlock(&qi->qi_tree_lock); |
110 | 110 | ||
111 | /* bail out if the filesystem is corrupted. */ | 111 | /* bail out if the filesystem is corrupted. */ |
112 | if (last_error == EFSCORRUPTED) { | 112 | if (last_error == -EFSCORRUPTED) { |
113 | skipped = 0; | 113 | skipped = 0; |
114 | break; | 114 | break; |
115 | } | 115 | } |
@@ -138,7 +138,7 @@ xfs_qm_dqpurge( | |||
138 | xfs_dqlock(dqp); | 138 | xfs_dqlock(dqp); |
139 | if ((dqp->dq_flags & XFS_DQ_FREEING) || dqp->q_nrefs != 0) { | 139 | if ((dqp->dq_flags & XFS_DQ_FREEING) || dqp->q_nrefs != 0) { |
140 | xfs_dqunlock(dqp); | 140 | xfs_dqunlock(dqp); |
141 | return EAGAIN; | 141 | return -EAGAIN; |
142 | } | 142 | } |
143 | 143 | ||
144 | dqp->dq_flags |= XFS_DQ_FREEING; | 144 | dqp->dq_flags |= XFS_DQ_FREEING; |
@@ -221,100 +221,6 @@ xfs_qm_unmount( | |||
221 | } | 221 | } |
222 | } | 222 | } |
223 | 223 | ||
224 | |||
225 | /* | ||
226 | * This is called from xfs_mountfs to start quotas and initialize all | ||
227 | * necessary data structures like quotainfo. This is also responsible for | ||
228 | * running a quotacheck as necessary. We are guaranteed that the superblock | ||
229 | * is consistently read in at this point. | ||
230 | * | ||
231 | * If we fail here, the mount will continue with quota turned off. We don't | ||
232 | * need to inidicate success or failure at all. | ||
233 | */ | ||
234 | void | ||
235 | xfs_qm_mount_quotas( | ||
236 | xfs_mount_t *mp) | ||
237 | { | ||
238 | int error = 0; | ||
239 | uint sbf; | ||
240 | |||
241 | /* | ||
242 | * If quotas on realtime volumes is not supported, we disable | ||
243 | * quotas immediately. | ||
244 | */ | ||
245 | if (mp->m_sb.sb_rextents) { | ||
246 | xfs_notice(mp, "Cannot turn on quotas for realtime filesystem"); | ||
247 | mp->m_qflags = 0; | ||
248 | goto write_changes; | ||
249 | } | ||
250 | |||
251 | ASSERT(XFS_IS_QUOTA_RUNNING(mp)); | ||
252 | |||
253 | /* | ||
254 | * Allocate the quotainfo structure inside the mount struct, and | ||
255 | * create quotainode(s), and change/rev superblock if necessary. | ||
256 | */ | ||
257 | error = xfs_qm_init_quotainfo(mp); | ||
258 | if (error) { | ||
259 | /* | ||
260 | * We must turn off quotas. | ||
261 | */ | ||
262 | ASSERT(mp->m_quotainfo == NULL); | ||
263 | mp->m_qflags = 0; | ||
264 | goto write_changes; | ||
265 | } | ||
266 | /* | ||
267 | * If any of the quotas are not consistent, do a quotacheck. | ||
268 | */ | ||
269 | if (XFS_QM_NEED_QUOTACHECK(mp)) { | ||
270 | error = xfs_qm_quotacheck(mp); | ||
271 | if (error) { | ||
272 | /* Quotacheck failed and disabled quotas. */ | ||
273 | return; | ||
274 | } | ||
275 | } | ||
276 | /* | ||
277 | * If one type of quotas is off, then it will lose its | ||
278 | * quotachecked status, since we won't be doing accounting for | ||
279 | * that type anymore. | ||
280 | */ | ||
281 | if (!XFS_IS_UQUOTA_ON(mp)) | ||
282 | mp->m_qflags &= ~XFS_UQUOTA_CHKD; | ||
283 | if (!XFS_IS_GQUOTA_ON(mp)) | ||
284 | mp->m_qflags &= ~XFS_GQUOTA_CHKD; | ||
285 | if (!XFS_IS_PQUOTA_ON(mp)) | ||
286 | mp->m_qflags &= ~XFS_PQUOTA_CHKD; | ||
287 | |||
288 | write_changes: | ||
289 | /* | ||
290 | * We actually don't have to acquire the m_sb_lock at all. | ||
291 | * This can only be called from mount, and that's single threaded. XXX | ||
292 | */ | ||
293 | spin_lock(&mp->m_sb_lock); | ||
294 | sbf = mp->m_sb.sb_qflags; | ||
295 | mp->m_sb.sb_qflags = mp->m_qflags & XFS_MOUNT_QUOTA_ALL; | ||
296 | spin_unlock(&mp->m_sb_lock); | ||
297 | |||
298 | if (sbf != (mp->m_qflags & XFS_MOUNT_QUOTA_ALL)) { | ||
299 | if (xfs_qm_write_sb_changes(mp, XFS_SB_QFLAGS)) { | ||
300 | /* | ||
301 | * We could only have been turning quotas off. | ||
302 | * We aren't in very good shape actually because | ||
303 | * the incore structures are convinced that quotas are | ||
304 | * off, but the on disk superblock doesn't know that ! | ||
305 | */ | ||
306 | ASSERT(!(XFS_IS_QUOTA_RUNNING(mp))); | ||
307 | xfs_alert(mp, "%s: Superblock update failed!", | ||
308 | __func__); | ||
309 | } | ||
310 | } | ||
311 | |||
312 | if (error) { | ||
313 | xfs_warn(mp, "Failed to initialize disk quotas."); | ||
314 | return; | ||
315 | } | ||
316 | } | ||
317 | |||
318 | /* | 224 | /* |
319 | * Called from the vfsops layer. | 225 | * Called from the vfsops layer. |
320 | */ | 226 | */ |
@@ -671,7 +577,7 @@ xfs_qm_init_quotainfo( | |||
671 | 577 | ||
672 | qinf = mp->m_quotainfo = kmem_zalloc(sizeof(xfs_quotainfo_t), KM_SLEEP); | 578 | qinf = mp->m_quotainfo = kmem_zalloc(sizeof(xfs_quotainfo_t), KM_SLEEP); |
673 | 579 | ||
674 | error = -list_lru_init(&qinf->qi_lru); | 580 | error = list_lru_init(&qinf->qi_lru); |
675 | if (error) | 581 | if (error) |
676 | goto out_free_qinf; | 582 | goto out_free_qinf; |
677 | 583 | ||
@@ -995,7 +901,7 @@ xfs_qm_dqiter_bufs( | |||
995 | * will leave a trace in the log indicating corruption has | 901 | * will leave a trace in the log indicating corruption has |
996 | * been detected. | 902 | * been detected. |
997 | */ | 903 | */ |
998 | if (error == EFSCORRUPTED) { | 904 | if (error == -EFSCORRUPTED) { |
999 | error = xfs_trans_read_buf(mp, NULL, mp->m_ddev_targp, | 905 | error = xfs_trans_read_buf(mp, NULL, mp->m_ddev_targp, |
1000 | XFS_FSB_TO_DADDR(mp, bno), | 906 | XFS_FSB_TO_DADDR(mp, bno), |
1001 | mp->m_quotainfo->qi_dqchunklen, 0, &bp, | 907 | mp->m_quotainfo->qi_dqchunklen, 0, &bp, |
@@ -1005,6 +911,12 @@ xfs_qm_dqiter_bufs( | |||
1005 | if (error) | 911 | if (error) |
1006 | break; | 912 | break; |
1007 | 913 | ||
914 | /* | ||
915 | * A corrupt buffer might not have a verifier attached, so | ||
916 | * make sure we have the correct one attached before writeback | ||
917 | * occurs. | ||
918 | */ | ||
919 | bp->b_ops = &xfs_dquot_buf_ops; | ||
1008 | xfs_qm_reset_dqcounts(mp, bp, firstid, type); | 920 | xfs_qm_reset_dqcounts(mp, bp, firstid, type); |
1009 | xfs_buf_delwri_queue(bp, buffer_list); | 921 | xfs_buf_delwri_queue(bp, buffer_list); |
1010 | xfs_buf_relse(bp); | 922 | xfs_buf_relse(bp); |
@@ -1090,7 +1002,7 @@ xfs_qm_dqiterate( | |||
1090 | xfs_buf_readahead(mp->m_ddev_targp, | 1002 | xfs_buf_readahead(mp->m_ddev_targp, |
1091 | XFS_FSB_TO_DADDR(mp, rablkno), | 1003 | XFS_FSB_TO_DADDR(mp, rablkno), |
1092 | mp->m_quotainfo->qi_dqchunklen, | 1004 | mp->m_quotainfo->qi_dqchunklen, |
1093 | NULL); | 1005 | &xfs_dquot_buf_ops); |
1094 | rablkno++; | 1006 | rablkno++; |
1095 | } | 1007 | } |
1096 | } | 1008 | } |
@@ -1138,8 +1050,8 @@ xfs_qm_quotacheck_dqadjust( | |||
1138 | /* | 1050 | /* |
1139 | * Shouldn't be able to turn off quotas here. | 1051 | * Shouldn't be able to turn off quotas here. |
1140 | */ | 1052 | */ |
1141 | ASSERT(error != ESRCH); | 1053 | ASSERT(error != -ESRCH); |
1142 | ASSERT(error != ENOENT); | 1054 | ASSERT(error != -ENOENT); |
1143 | return error; | 1055 | return error; |
1144 | } | 1056 | } |
1145 | 1057 | ||
@@ -1226,7 +1138,7 @@ xfs_qm_dqusage_adjust( | |||
1226 | */ | 1138 | */ |
1227 | if (xfs_is_quota_inode(&mp->m_sb, ino)) { | 1139 | if (xfs_is_quota_inode(&mp->m_sb, ino)) { |
1228 | *res = BULKSTAT_RV_NOTHING; | 1140 | *res = BULKSTAT_RV_NOTHING; |
1229 | return XFS_ERROR(EINVAL); | 1141 | return -EINVAL; |
1230 | } | 1142 | } |
1231 | 1143 | ||
1232 | /* | 1144 | /* |
@@ -1330,7 +1242,7 @@ out_unlock: | |||
1330 | * Walk thru all the filesystem inodes and construct a consistent view | 1242 | * Walk thru all the filesystem inodes and construct a consistent view |
1331 | * of the disk quota world. If the quotacheck fails, disable quotas. | 1243 | * of the disk quota world. If the quotacheck fails, disable quotas. |
1332 | */ | 1244 | */ |
1333 | int | 1245 | STATIC int |
1334 | xfs_qm_quotacheck( | 1246 | xfs_qm_quotacheck( |
1335 | xfs_mount_t *mp) | 1247 | xfs_mount_t *mp) |
1336 | { | 1248 | { |
@@ -1463,7 +1375,100 @@ xfs_qm_quotacheck( | |||
1463 | } | 1375 | } |
1464 | } else | 1376 | } else |
1465 | xfs_notice(mp, "Quotacheck: Done."); | 1377 | xfs_notice(mp, "Quotacheck: Done."); |
1466 | return (error); | 1378 | return error; |
1379 | } | ||
1380 | |||
1381 | /* | ||
1382 | * This is called from xfs_mountfs to start quotas and initialize all | ||
1383 | * necessary data structures like quotainfo. This is also responsible for | ||
1384 | * running a quotacheck as necessary. We are guaranteed that the superblock | ||
1385 | * is consistently read in at this point. | ||
1386 | * | ||
1387 | * If we fail here, the mount will continue with quota turned off. We don't | ||
1388 | * need to inidicate success or failure at all. | ||
1389 | */ | ||
1390 | void | ||
1391 | xfs_qm_mount_quotas( | ||
1392 | struct xfs_mount *mp) | ||
1393 | { | ||
1394 | int error = 0; | ||
1395 | uint sbf; | ||
1396 | |||
1397 | /* | ||
1398 | * If quotas on realtime volumes is not supported, we disable | ||
1399 | * quotas immediately. | ||
1400 | */ | ||
1401 | if (mp->m_sb.sb_rextents) { | ||
1402 | xfs_notice(mp, "Cannot turn on quotas for realtime filesystem"); | ||
1403 | mp->m_qflags = 0; | ||
1404 | goto write_changes; | ||
1405 | } | ||
1406 | |||
1407 | ASSERT(XFS_IS_QUOTA_RUNNING(mp)); | ||
1408 | |||
1409 | /* | ||
1410 | * Allocate the quotainfo structure inside the mount struct, and | ||
1411 | * create quotainode(s), and change/rev superblock if necessary. | ||
1412 | */ | ||
1413 | error = xfs_qm_init_quotainfo(mp); | ||
1414 | if (error) { | ||
1415 | /* | ||
1416 | * We must turn off quotas. | ||
1417 | */ | ||
1418 | ASSERT(mp->m_quotainfo == NULL); | ||
1419 | mp->m_qflags = 0; | ||
1420 | goto write_changes; | ||
1421 | } | ||
1422 | /* | ||
1423 | * If any of the quotas are not consistent, do a quotacheck. | ||
1424 | */ | ||
1425 | if (XFS_QM_NEED_QUOTACHECK(mp)) { | ||
1426 | error = xfs_qm_quotacheck(mp); | ||
1427 | if (error) { | ||
1428 | /* Quotacheck failed and disabled quotas. */ | ||
1429 | return; | ||
1430 | } | ||
1431 | } | ||
1432 | /* | ||
1433 | * If one type of quotas is off, then it will lose its | ||
1434 | * quotachecked status, since we won't be doing accounting for | ||
1435 | * that type anymore. | ||
1436 | */ | ||
1437 | if (!XFS_IS_UQUOTA_ON(mp)) | ||
1438 | mp->m_qflags &= ~XFS_UQUOTA_CHKD; | ||
1439 | if (!XFS_IS_GQUOTA_ON(mp)) | ||
1440 | mp->m_qflags &= ~XFS_GQUOTA_CHKD; | ||
1441 | if (!XFS_IS_PQUOTA_ON(mp)) | ||
1442 | mp->m_qflags &= ~XFS_PQUOTA_CHKD; | ||
1443 | |||
1444 | write_changes: | ||
1445 | /* | ||
1446 | * We actually don't have to acquire the m_sb_lock at all. | ||
1447 | * This can only be called from mount, and that's single threaded. XXX | ||
1448 | */ | ||
1449 | spin_lock(&mp->m_sb_lock); | ||
1450 | sbf = mp->m_sb.sb_qflags; | ||
1451 | mp->m_sb.sb_qflags = mp->m_qflags & XFS_MOUNT_QUOTA_ALL; | ||
1452 | spin_unlock(&mp->m_sb_lock); | ||
1453 | |||
1454 | if (sbf != (mp->m_qflags & XFS_MOUNT_QUOTA_ALL)) { | ||
1455 | if (xfs_qm_write_sb_changes(mp, XFS_SB_QFLAGS)) { | ||
1456 | /* | ||
1457 | * We could only have been turning quotas off. | ||
1458 | * We aren't in very good shape actually because | ||
1459 | * the incore structures are convinced that quotas are | ||
1460 | * off, but the on disk superblock doesn't know that ! | ||
1461 | */ | ||
1462 | ASSERT(!(XFS_IS_QUOTA_RUNNING(mp))); | ||
1463 | xfs_alert(mp, "%s: Superblock update failed!", | ||
1464 | __func__); | ||
1465 | } | ||
1466 | } | ||
1467 | |||
1468 | if (error) { | ||
1469 | xfs_warn(mp, "Failed to initialize disk quotas."); | ||
1470 | return; | ||
1471 | } | ||
1467 | } | 1472 | } |
1468 | 1473 | ||
1469 | /* | 1474 | /* |
@@ -1493,7 +1498,7 @@ xfs_qm_init_quotainos( | |||
1493 | error = xfs_iget(mp, NULL, mp->m_sb.sb_uquotino, | 1498 | error = xfs_iget(mp, NULL, mp->m_sb.sb_uquotino, |
1494 | 0, 0, &uip); | 1499 | 0, 0, &uip); |
1495 | if (error) | 1500 | if (error) |
1496 | return XFS_ERROR(error); | 1501 | return error; |
1497 | } | 1502 | } |
1498 | if (XFS_IS_GQUOTA_ON(mp) && | 1503 | if (XFS_IS_GQUOTA_ON(mp) && |
1499 | mp->m_sb.sb_gquotino != NULLFSINO) { | 1504 | mp->m_sb.sb_gquotino != NULLFSINO) { |
@@ -1563,7 +1568,7 @@ error_rele: | |||
1563 | IRELE(gip); | 1568 | IRELE(gip); |
1564 | if (pip) | 1569 | if (pip) |
1565 | IRELE(pip); | 1570 | IRELE(pip); |
1566 | return XFS_ERROR(error); | 1571 | return error; |
1567 | } | 1572 | } |
1568 | 1573 | ||
1569 | STATIC void | 1574 | STATIC void |
@@ -1679,7 +1684,7 @@ xfs_qm_vop_dqalloc( | |||
1679 | XFS_QMOPT_DOWARN, | 1684 | XFS_QMOPT_DOWARN, |
1680 | &uq); | 1685 | &uq); |
1681 | if (error) { | 1686 | if (error) { |
1682 | ASSERT(error != ENOENT); | 1687 | ASSERT(error != -ENOENT); |
1683 | return error; | 1688 | return error; |
1684 | } | 1689 | } |
1685 | /* | 1690 | /* |
@@ -1706,7 +1711,7 @@ xfs_qm_vop_dqalloc( | |||
1706 | XFS_QMOPT_DOWARN, | 1711 | XFS_QMOPT_DOWARN, |
1707 | &gq); | 1712 | &gq); |
1708 | if (error) { | 1713 | if (error) { |
1709 | ASSERT(error != ENOENT); | 1714 | ASSERT(error != -ENOENT); |
1710 | goto error_rele; | 1715 | goto error_rele; |
1711 | } | 1716 | } |
1712 | xfs_dqunlock(gq); | 1717 | xfs_dqunlock(gq); |
@@ -1726,7 +1731,7 @@ xfs_qm_vop_dqalloc( | |||
1726 | XFS_QMOPT_DOWARN, | 1731 | XFS_QMOPT_DOWARN, |
1727 | &pq); | 1732 | &pq); |
1728 | if (error) { | 1733 | if (error) { |
1729 | ASSERT(error != ENOENT); | 1734 | ASSERT(error != -ENOENT); |
1730 | goto error_rele; | 1735 | goto error_rele; |
1731 | } | 1736 | } |
1732 | xfs_dqunlock(pq); | 1737 | xfs_dqunlock(pq); |
@@ -1895,7 +1900,7 @@ xfs_qm_vop_chown_reserve( | |||
1895 | -((xfs_qcnt_t)delblks), 0, blkflags); | 1900 | -((xfs_qcnt_t)delblks), 0, blkflags); |
1896 | } | 1901 | } |
1897 | 1902 | ||
1898 | return (0); | 1903 | return 0; |
1899 | } | 1904 | } |
1900 | 1905 | ||
1901 | int | 1906 | int |
diff --git a/fs/xfs/xfs_qm.h b/fs/xfs/xfs_qm.h index 797fd4636273..3a07a937e232 100644 --- a/fs/xfs/xfs_qm.h +++ b/fs/xfs/xfs_qm.h | |||
@@ -157,7 +157,6 @@ struct xfs_dquot_acct { | |||
157 | #define XFS_QM_RTBWARNLIMIT 5 | 157 | #define XFS_QM_RTBWARNLIMIT 5 |
158 | 158 | ||
159 | extern void xfs_qm_destroy_quotainfo(struct xfs_mount *); | 159 | extern void xfs_qm_destroy_quotainfo(struct xfs_mount *); |
160 | extern int xfs_qm_quotacheck(struct xfs_mount *); | ||
161 | extern int xfs_qm_write_sb_changes(struct xfs_mount *, __int64_t); | 160 | extern int xfs_qm_write_sb_changes(struct xfs_mount *, __int64_t); |
162 | 161 | ||
163 | /* dquot stuff */ | 162 | /* dquot stuff */ |
diff --git a/fs/xfs/xfs_qm_bhv.c b/fs/xfs/xfs_qm_bhv.c index e9be63abd8d2..2c61e61b0205 100644 --- a/fs/xfs/xfs_qm_bhv.c +++ b/fs/xfs/xfs_qm_bhv.c | |||
@@ -117,7 +117,7 @@ xfs_qm_newmount( | |||
117 | (uquotaondisk ? " usrquota" : ""), | 117 | (uquotaondisk ? " usrquota" : ""), |
118 | (gquotaondisk ? " grpquota" : ""), | 118 | (gquotaondisk ? " grpquota" : ""), |
119 | (pquotaondisk ? " prjquota" : "")); | 119 | (pquotaondisk ? " prjquota" : "")); |
120 | return XFS_ERROR(EPERM); | 120 | return -EPERM; |
121 | } | 121 | } |
122 | 122 | ||
123 | if (XFS_IS_QUOTA_ON(mp) || quotaondisk) { | 123 | if (XFS_IS_QUOTA_ON(mp) || quotaondisk) { |
diff --git a/fs/xfs/xfs_qm_syscalls.c b/fs/xfs/xfs_qm_syscalls.c index bbc813caba4c..80f2d77d929a 100644 --- a/fs/xfs/xfs_qm_syscalls.c +++ b/fs/xfs/xfs_qm_syscalls.c | |||
@@ -64,10 +64,10 @@ xfs_qm_scall_quotaoff( | |||
64 | /* | 64 | /* |
65 | * No file system can have quotas enabled on disk but not in core. | 65 | * No file system can have quotas enabled on disk but not in core. |
66 | * Note that quota utilities (like quotaoff) _expect_ | 66 | * Note that quota utilities (like quotaoff) _expect_ |
67 | * errno == EEXIST here. | 67 | * errno == -EEXIST here. |
68 | */ | 68 | */ |
69 | if ((mp->m_qflags & flags) == 0) | 69 | if ((mp->m_qflags & flags) == 0) |
70 | return XFS_ERROR(EEXIST); | 70 | return -EEXIST; |
71 | error = 0; | 71 | error = 0; |
72 | 72 | ||
73 | flags &= (XFS_ALL_QUOTA_ACCT | XFS_ALL_QUOTA_ENFD); | 73 | flags &= (XFS_ALL_QUOTA_ACCT | XFS_ALL_QUOTA_ENFD); |
@@ -94,7 +94,7 @@ xfs_qm_scall_quotaoff( | |||
94 | 94 | ||
95 | /* XXX what to do if error ? Revert back to old vals incore ? */ | 95 | /* XXX what to do if error ? Revert back to old vals incore ? */ |
96 | error = xfs_qm_write_sb_changes(mp, XFS_SB_QFLAGS); | 96 | error = xfs_qm_write_sb_changes(mp, XFS_SB_QFLAGS); |
97 | return (error); | 97 | return error; |
98 | } | 98 | } |
99 | 99 | ||
100 | dqtype = 0; | 100 | dqtype = 0; |
@@ -198,7 +198,7 @@ xfs_qm_scall_quotaoff( | |||
198 | if (mp->m_qflags == 0) { | 198 | if (mp->m_qflags == 0) { |
199 | mutex_unlock(&q->qi_quotaofflock); | 199 | mutex_unlock(&q->qi_quotaofflock); |
200 | xfs_qm_destroy_quotainfo(mp); | 200 | xfs_qm_destroy_quotainfo(mp); |
201 | return (0); | 201 | return 0; |
202 | } | 202 | } |
203 | 203 | ||
204 | /* | 204 | /* |
@@ -278,13 +278,13 @@ xfs_qm_scall_trunc_qfiles( | |||
278 | xfs_mount_t *mp, | 278 | xfs_mount_t *mp, |
279 | uint flags) | 279 | uint flags) |
280 | { | 280 | { |
281 | int error = EINVAL; | 281 | int error = -EINVAL; |
282 | 282 | ||
283 | if (!xfs_sb_version_hasquota(&mp->m_sb) || flags == 0 || | 283 | if (!xfs_sb_version_hasquota(&mp->m_sb) || flags == 0 || |
284 | (flags & ~XFS_DQ_ALLTYPES)) { | 284 | (flags & ~XFS_DQ_ALLTYPES)) { |
285 | xfs_debug(mp, "%s: flags=%x m_qflags=%x", | 285 | xfs_debug(mp, "%s: flags=%x m_qflags=%x", |
286 | __func__, flags, mp->m_qflags); | 286 | __func__, flags, mp->m_qflags); |
287 | return XFS_ERROR(EINVAL); | 287 | return -EINVAL; |
288 | } | 288 | } |
289 | 289 | ||
290 | if (flags & XFS_DQ_USER) { | 290 | if (flags & XFS_DQ_USER) { |
@@ -328,7 +328,7 @@ xfs_qm_scall_quotaon( | |||
328 | if (flags == 0) { | 328 | if (flags == 0) { |
329 | xfs_debug(mp, "%s: zero flags, m_qflags=%x", | 329 | xfs_debug(mp, "%s: zero flags, m_qflags=%x", |
330 | __func__, mp->m_qflags); | 330 | __func__, mp->m_qflags); |
331 | return XFS_ERROR(EINVAL); | 331 | return -EINVAL; |
332 | } | 332 | } |
333 | 333 | ||
334 | /* No fs can turn on quotas with a delayed effect */ | 334 | /* No fs can turn on quotas with a delayed effect */ |
@@ -351,13 +351,13 @@ xfs_qm_scall_quotaon( | |||
351 | xfs_debug(mp, | 351 | xfs_debug(mp, |
352 | "%s: Can't enforce without acct, flags=%x sbflags=%x", | 352 | "%s: Can't enforce without acct, flags=%x sbflags=%x", |
353 | __func__, flags, mp->m_sb.sb_qflags); | 353 | __func__, flags, mp->m_sb.sb_qflags); |
354 | return XFS_ERROR(EINVAL); | 354 | return -EINVAL; |
355 | } | 355 | } |
356 | /* | 356 | /* |
357 | * If everything's up to-date incore, then don't waste time. | 357 | * If everything's up to-date incore, then don't waste time. |
358 | */ | 358 | */ |
359 | if ((mp->m_qflags & flags) == flags) | 359 | if ((mp->m_qflags & flags) == flags) |
360 | return XFS_ERROR(EEXIST); | 360 | return -EEXIST; |
361 | 361 | ||
362 | /* | 362 | /* |
363 | * Change sb_qflags on disk but not incore mp->qflags | 363 | * Change sb_qflags on disk but not incore mp->qflags |
@@ -372,11 +372,11 @@ xfs_qm_scall_quotaon( | |||
372 | * There's nothing to change if it's the same. | 372 | * There's nothing to change if it's the same. |
373 | */ | 373 | */ |
374 | if ((qf & flags) == flags && sbflags == 0) | 374 | if ((qf & flags) == flags && sbflags == 0) |
375 | return XFS_ERROR(EEXIST); | 375 | return -EEXIST; |
376 | sbflags |= XFS_SB_QFLAGS; | 376 | sbflags |= XFS_SB_QFLAGS; |
377 | 377 | ||
378 | if ((error = xfs_qm_write_sb_changes(mp, sbflags))) | 378 | if ((error = xfs_qm_write_sb_changes(mp, sbflags))) |
379 | return (error); | 379 | return error; |
380 | /* | 380 | /* |
381 | * If we aren't trying to switch on quota enforcement, we are done. | 381 | * If we aren't trying to switch on quota enforcement, we are done. |
382 | */ | 382 | */ |
@@ -387,10 +387,10 @@ xfs_qm_scall_quotaon( | |||
387 | ((mp->m_sb.sb_qflags & XFS_GQUOTA_ACCT) != | 387 | ((mp->m_sb.sb_qflags & XFS_GQUOTA_ACCT) != |
388 | (mp->m_qflags & XFS_GQUOTA_ACCT)) || | 388 | (mp->m_qflags & XFS_GQUOTA_ACCT)) || |
389 | (flags & XFS_ALL_QUOTA_ENFD) == 0) | 389 | (flags & XFS_ALL_QUOTA_ENFD) == 0) |
390 | return (0); | 390 | return 0; |
391 | 391 | ||
392 | if (! XFS_IS_QUOTA_RUNNING(mp)) | 392 | if (! XFS_IS_QUOTA_RUNNING(mp)) |
393 | return XFS_ERROR(ESRCH); | 393 | return -ESRCH; |
394 | 394 | ||
395 | /* | 395 | /* |
396 | * Switch on quota enforcement in core. | 396 | * Switch on quota enforcement in core. |
@@ -399,7 +399,7 @@ xfs_qm_scall_quotaon( | |||
399 | mp->m_qflags |= (flags & XFS_ALL_QUOTA_ENFD); | 399 | mp->m_qflags |= (flags & XFS_ALL_QUOTA_ENFD); |
400 | mutex_unlock(&mp->m_quotainfo->qi_quotaofflock); | 400 | mutex_unlock(&mp->m_quotainfo->qi_quotaofflock); |
401 | 401 | ||
402 | return (0); | 402 | return 0; |
403 | } | 403 | } |
404 | 404 | ||
405 | 405 | ||
@@ -426,7 +426,7 @@ xfs_qm_scall_getqstat( | |||
426 | if (!xfs_sb_version_hasquota(&mp->m_sb)) { | 426 | if (!xfs_sb_version_hasquota(&mp->m_sb)) { |
427 | out->qs_uquota.qfs_ino = NULLFSINO; | 427 | out->qs_uquota.qfs_ino = NULLFSINO; |
428 | out->qs_gquota.qfs_ino = NULLFSINO; | 428 | out->qs_gquota.qfs_ino = NULLFSINO; |
429 | return (0); | 429 | return 0; |
430 | } | 430 | } |
431 | 431 | ||
432 | out->qs_flags = (__uint16_t) xfs_qm_export_flags(mp->m_qflags & | 432 | out->qs_flags = (__uint16_t) xfs_qm_export_flags(mp->m_qflags & |
@@ -514,7 +514,7 @@ xfs_qm_scall_getqstatv( | |||
514 | out->qs_uquota.qfs_ino = NULLFSINO; | 514 | out->qs_uquota.qfs_ino = NULLFSINO; |
515 | out->qs_gquota.qfs_ino = NULLFSINO; | 515 | out->qs_gquota.qfs_ino = NULLFSINO; |
516 | out->qs_pquota.qfs_ino = NULLFSINO; | 516 | out->qs_pquota.qfs_ino = NULLFSINO; |
517 | return (0); | 517 | return 0; |
518 | } | 518 | } |
519 | 519 | ||
520 | out->qs_flags = (__uint16_t) xfs_qm_export_flags(mp->m_qflags & | 520 | out->qs_flags = (__uint16_t) xfs_qm_export_flags(mp->m_qflags & |
@@ -595,7 +595,7 @@ xfs_qm_scall_setqlim( | |||
595 | xfs_qcnt_t hard, soft; | 595 | xfs_qcnt_t hard, soft; |
596 | 596 | ||
597 | if (newlim->d_fieldmask & ~XFS_DQ_MASK) | 597 | if (newlim->d_fieldmask & ~XFS_DQ_MASK) |
598 | return EINVAL; | 598 | return -EINVAL; |
599 | if ((newlim->d_fieldmask & XFS_DQ_MASK) == 0) | 599 | if ((newlim->d_fieldmask & XFS_DQ_MASK) == 0) |
600 | return 0; | 600 | return 0; |
601 | 601 | ||
@@ -615,7 +615,7 @@ xfs_qm_scall_setqlim( | |||
615 | */ | 615 | */ |
616 | error = xfs_qm_dqget(mp, NULL, id, type, XFS_QMOPT_DQALLOC, &dqp); | 616 | error = xfs_qm_dqget(mp, NULL, id, type, XFS_QMOPT_DQALLOC, &dqp); |
617 | if (error) { | 617 | if (error) { |
618 | ASSERT(error != ENOENT); | 618 | ASSERT(error != -ENOENT); |
619 | goto out_unlock; | 619 | goto out_unlock; |
620 | } | 620 | } |
621 | xfs_dqunlock(dqp); | 621 | xfs_dqunlock(dqp); |
@@ -758,7 +758,7 @@ xfs_qm_log_quotaoff_end( | |||
758 | error = xfs_trans_reserve(tp, &M_RES(mp)->tr_qm_equotaoff, 0, 0); | 758 | error = xfs_trans_reserve(tp, &M_RES(mp)->tr_qm_equotaoff, 0, 0); |
759 | if (error) { | 759 | if (error) { |
760 | xfs_trans_cancel(tp, 0); | 760 | xfs_trans_cancel(tp, 0); |
761 | return (error); | 761 | return error; |
762 | } | 762 | } |
763 | 763 | ||
764 | qoffi = xfs_trans_get_qoff_item(tp, startqoff, | 764 | qoffi = xfs_trans_get_qoff_item(tp, startqoff, |
@@ -772,7 +772,7 @@ xfs_qm_log_quotaoff_end( | |||
772 | */ | 772 | */ |
773 | xfs_trans_set_sync(tp); | 773 | xfs_trans_set_sync(tp); |
774 | error = xfs_trans_commit(tp, 0); | 774 | error = xfs_trans_commit(tp, 0); |
775 | return (error); | 775 | return error; |
776 | } | 776 | } |
777 | 777 | ||
778 | 778 | ||
@@ -822,7 +822,7 @@ error0: | |||
822 | spin_unlock(&mp->m_sb_lock); | 822 | spin_unlock(&mp->m_sb_lock); |
823 | } | 823 | } |
824 | *qoffstartp = qoffi; | 824 | *qoffstartp = qoffi; |
825 | return (error); | 825 | return error; |
826 | } | 826 | } |
827 | 827 | ||
828 | 828 | ||
@@ -850,7 +850,7 @@ xfs_qm_scall_getquota( | |||
850 | * our utility programs are concerned. | 850 | * our utility programs are concerned. |
851 | */ | 851 | */ |
852 | if (XFS_IS_DQUOT_UNINITIALIZED(dqp)) { | 852 | if (XFS_IS_DQUOT_UNINITIALIZED(dqp)) { |
853 | error = XFS_ERROR(ENOENT); | 853 | error = -ENOENT; |
854 | goto out_put; | 854 | goto out_put; |
855 | } | 855 | } |
856 | 856 | ||
@@ -953,7 +953,7 @@ xfs_qm_export_flags( | |||
953 | uflags |= FS_QUOTA_GDQ_ENFD; | 953 | uflags |= FS_QUOTA_GDQ_ENFD; |
954 | if (flags & XFS_PQUOTA_ENFD) | 954 | if (flags & XFS_PQUOTA_ENFD) |
955 | uflags |= FS_QUOTA_PDQ_ENFD; | 955 | uflags |= FS_QUOTA_PDQ_ENFD; |
956 | return (uflags); | 956 | return uflags; |
957 | } | 957 | } |
958 | 958 | ||
959 | 959 | ||
diff --git a/fs/xfs/xfs_quotaops.c b/fs/xfs/xfs_quotaops.c index 2ad1b9822e92..b238027df987 100644 --- a/fs/xfs/xfs_quotaops.c +++ b/fs/xfs/xfs_quotaops.c | |||
@@ -51,7 +51,7 @@ xfs_fs_get_xstate( | |||
51 | 51 | ||
52 | if (!XFS_IS_QUOTA_RUNNING(mp)) | 52 | if (!XFS_IS_QUOTA_RUNNING(mp)) |
53 | return -ENOSYS; | 53 | return -ENOSYS; |
54 | return -xfs_qm_scall_getqstat(mp, fqs); | 54 | return xfs_qm_scall_getqstat(mp, fqs); |
55 | } | 55 | } |
56 | 56 | ||
57 | STATIC int | 57 | STATIC int |
@@ -63,7 +63,7 @@ xfs_fs_get_xstatev( | |||
63 | 63 | ||
64 | if (!XFS_IS_QUOTA_RUNNING(mp)) | 64 | if (!XFS_IS_QUOTA_RUNNING(mp)) |
65 | return -ENOSYS; | 65 | return -ENOSYS; |
66 | return -xfs_qm_scall_getqstatv(mp, fqs); | 66 | return xfs_qm_scall_getqstatv(mp, fqs); |
67 | } | 67 | } |
68 | 68 | ||
69 | STATIC int | 69 | STATIC int |
@@ -95,11 +95,11 @@ xfs_fs_set_xstate( | |||
95 | 95 | ||
96 | switch (op) { | 96 | switch (op) { |
97 | case Q_XQUOTAON: | 97 | case Q_XQUOTAON: |
98 | return -xfs_qm_scall_quotaon(mp, flags); | 98 | return xfs_qm_scall_quotaon(mp, flags); |
99 | case Q_XQUOTAOFF: | 99 | case Q_XQUOTAOFF: |
100 | if (!XFS_IS_QUOTA_ON(mp)) | 100 | if (!XFS_IS_QUOTA_ON(mp)) |
101 | return -EINVAL; | 101 | return -EINVAL; |
102 | return -xfs_qm_scall_quotaoff(mp, flags); | 102 | return xfs_qm_scall_quotaoff(mp, flags); |
103 | } | 103 | } |
104 | 104 | ||
105 | return -EINVAL; | 105 | return -EINVAL; |
@@ -112,7 +112,7 @@ xfs_fs_rm_xquota( | |||
112 | { | 112 | { |
113 | struct xfs_mount *mp = XFS_M(sb); | 113 | struct xfs_mount *mp = XFS_M(sb); |
114 | unsigned int flags = 0; | 114 | unsigned int flags = 0; |
115 | 115 | ||
116 | if (sb->s_flags & MS_RDONLY) | 116 | if (sb->s_flags & MS_RDONLY) |
117 | return -EROFS; | 117 | return -EROFS; |
118 | 118 | ||
@@ -123,11 +123,11 @@ xfs_fs_rm_xquota( | |||
123 | flags |= XFS_DQ_USER; | 123 | flags |= XFS_DQ_USER; |
124 | if (uflags & FS_GROUP_QUOTA) | 124 | if (uflags & FS_GROUP_QUOTA) |
125 | flags |= XFS_DQ_GROUP; | 125 | flags |= XFS_DQ_GROUP; |
126 | if (uflags & FS_USER_QUOTA) | 126 | if (uflags & FS_PROJ_QUOTA) |
127 | flags |= XFS_DQ_PROJ; | 127 | flags |= XFS_DQ_PROJ; |
128 | 128 | ||
129 | return -xfs_qm_scall_trunc_qfiles(mp, flags); | 129 | return xfs_qm_scall_trunc_qfiles(mp, flags); |
130 | } | 130 | } |
131 | 131 | ||
132 | STATIC int | 132 | STATIC int |
133 | xfs_fs_get_dqblk( | 133 | xfs_fs_get_dqblk( |
@@ -142,7 +142,7 @@ xfs_fs_get_dqblk( | |||
142 | if (!XFS_IS_QUOTA_ON(mp)) | 142 | if (!XFS_IS_QUOTA_ON(mp)) |
143 | return -ESRCH; | 143 | return -ESRCH; |
144 | 144 | ||
145 | return -xfs_qm_scall_getquota(mp, from_kqid(&init_user_ns, qid), | 145 | return xfs_qm_scall_getquota(mp, from_kqid(&init_user_ns, qid), |
146 | xfs_quota_type(qid.type), fdq); | 146 | xfs_quota_type(qid.type), fdq); |
147 | } | 147 | } |
148 | 148 | ||
@@ -161,7 +161,7 @@ xfs_fs_set_dqblk( | |||
161 | if (!XFS_IS_QUOTA_ON(mp)) | 161 | if (!XFS_IS_QUOTA_ON(mp)) |
162 | return -ESRCH; | 162 | return -ESRCH; |
163 | 163 | ||
164 | return -xfs_qm_scall_setqlim(mp, from_kqid(&init_user_ns, qid), | 164 | return xfs_qm_scall_setqlim(mp, from_kqid(&init_user_ns, qid), |
165 | xfs_quota_type(qid.type), fdq); | 165 | xfs_quota_type(qid.type), fdq); |
166 | } | 166 | } |
167 | 167 | ||
diff --git a/fs/xfs/xfs_rtalloc.c b/fs/xfs/xfs_rtalloc.c index ec5ca65c6211..909e143b87ae 100644 --- a/fs/xfs/xfs_rtalloc.c +++ b/fs/xfs/xfs_rtalloc.c | |||
@@ -863,7 +863,7 @@ xfs_growfs_rt_alloc( | |||
863 | XFS_BMAPI_METADATA, &firstblock, | 863 | XFS_BMAPI_METADATA, &firstblock, |
864 | resblks, &map, &nmap, &flist); | 864 | resblks, &map, &nmap, &flist); |
865 | if (!error && nmap < 1) | 865 | if (!error && nmap < 1) |
866 | error = XFS_ERROR(ENOSPC); | 866 | error = -ENOSPC; |
867 | if (error) | 867 | if (error) |
868 | goto error_cancel; | 868 | goto error_cancel; |
869 | /* | 869 | /* |
@@ -903,7 +903,7 @@ xfs_growfs_rt_alloc( | |||
903 | bp = xfs_trans_get_buf(tp, mp->m_ddev_targp, d, | 903 | bp = xfs_trans_get_buf(tp, mp->m_ddev_targp, d, |
904 | mp->m_bsize, 0); | 904 | mp->m_bsize, 0); |
905 | if (bp == NULL) { | 905 | if (bp == NULL) { |
906 | error = XFS_ERROR(EIO); | 906 | error = -EIO; |
907 | error_cancel: | 907 | error_cancel: |
908 | xfs_trans_cancel(tp, cancelflags); | 908 | xfs_trans_cancel(tp, cancelflags); |
909 | goto error; | 909 | goto error; |
@@ -944,9 +944,9 @@ xfs_growfs_rt( | |||
944 | xfs_buf_t *bp; /* temporary buffer */ | 944 | xfs_buf_t *bp; /* temporary buffer */ |
945 | int error; /* error return value */ | 945 | int error; /* error return value */ |
946 | xfs_mount_t *nmp; /* new (fake) mount structure */ | 946 | xfs_mount_t *nmp; /* new (fake) mount structure */ |
947 | xfs_drfsbno_t nrblocks; /* new number of realtime blocks */ | 947 | xfs_rfsblock_t nrblocks; /* new number of realtime blocks */ |
948 | xfs_extlen_t nrbmblocks; /* new number of rt bitmap blocks */ | 948 | xfs_extlen_t nrbmblocks; /* new number of rt bitmap blocks */ |
949 | xfs_drtbno_t nrextents; /* new number of realtime extents */ | 949 | xfs_rtblock_t nrextents; /* new number of realtime extents */ |
950 | uint8_t nrextslog; /* new log2 of sb_rextents */ | 950 | uint8_t nrextslog; /* new log2 of sb_rextents */ |
951 | xfs_extlen_t nrsumblocks; /* new number of summary blocks */ | 951 | xfs_extlen_t nrsumblocks; /* new number of summary blocks */ |
952 | uint nrsumlevels; /* new rt summary levels */ | 952 | uint nrsumlevels; /* new rt summary levels */ |
@@ -962,11 +962,11 @@ xfs_growfs_rt( | |||
962 | * Initial error checking. | 962 | * Initial error checking. |
963 | */ | 963 | */ |
964 | if (!capable(CAP_SYS_ADMIN)) | 964 | if (!capable(CAP_SYS_ADMIN)) |
965 | return XFS_ERROR(EPERM); | 965 | return -EPERM; |
966 | if (mp->m_rtdev_targp == NULL || mp->m_rbmip == NULL || | 966 | if (mp->m_rtdev_targp == NULL || mp->m_rbmip == NULL || |
967 | (nrblocks = in->newblocks) <= sbp->sb_rblocks || | 967 | (nrblocks = in->newblocks) <= sbp->sb_rblocks || |
968 | (sbp->sb_rblocks && (in->extsize != sbp->sb_rextsize))) | 968 | (sbp->sb_rblocks && (in->extsize != sbp->sb_rextsize))) |
969 | return XFS_ERROR(EINVAL); | 969 | return -EINVAL; |
970 | if ((error = xfs_sb_validate_fsb_count(sbp, nrblocks))) | 970 | if ((error = xfs_sb_validate_fsb_count(sbp, nrblocks))) |
971 | return error; | 971 | return error; |
972 | /* | 972 | /* |
@@ -976,7 +976,7 @@ xfs_growfs_rt( | |||
976 | XFS_FSB_TO_BB(mp, nrblocks - 1), | 976 | XFS_FSB_TO_BB(mp, nrblocks - 1), |
977 | XFS_FSB_TO_BB(mp, 1), 0, NULL); | 977 | XFS_FSB_TO_BB(mp, 1), 0, NULL); |
978 | if (!bp) | 978 | if (!bp) |
979 | return EIO; | 979 | return -EIO; |
980 | if (bp->b_error) { | 980 | if (bp->b_error) { |
981 | error = bp->b_error; | 981 | error = bp->b_error; |
982 | xfs_buf_relse(bp); | 982 | xfs_buf_relse(bp); |
@@ -1001,7 +1001,7 @@ xfs_growfs_rt( | |||
1001 | * since we'll log basically the whole summary file at once. | 1001 | * since we'll log basically the whole summary file at once. |
1002 | */ | 1002 | */ |
1003 | if (nrsumblocks > (mp->m_sb.sb_logblocks >> 1)) | 1003 | if (nrsumblocks > (mp->m_sb.sb_logblocks >> 1)) |
1004 | return XFS_ERROR(EINVAL); | 1004 | return -EINVAL; |
1005 | /* | 1005 | /* |
1006 | * Get the old block counts for bitmap and summary inodes. | 1006 | * Get the old block counts for bitmap and summary inodes. |
1007 | * These can't change since other growfs callers are locked out. | 1007 | * These can't change since other growfs callers are locked out. |
@@ -1208,7 +1208,7 @@ xfs_rtallocate_extent( | |||
1208 | len, &sumbp, &sb, prod, &r); | 1208 | len, &sumbp, &sb, prod, &r); |
1209 | break; | 1209 | break; |
1210 | default: | 1210 | default: |
1211 | error = EIO; | 1211 | error = -EIO; |
1212 | ASSERT(0); | 1212 | ASSERT(0); |
1213 | } | 1213 | } |
1214 | if (error) | 1214 | if (error) |
@@ -1247,7 +1247,7 @@ xfs_rtmount_init( | |||
1247 | if (mp->m_rtdev_targp == NULL) { | 1247 | if (mp->m_rtdev_targp == NULL) { |
1248 | xfs_warn(mp, | 1248 | xfs_warn(mp, |
1249 | "Filesystem has a realtime volume, use rtdev=device option"); | 1249 | "Filesystem has a realtime volume, use rtdev=device option"); |
1250 | return XFS_ERROR(ENODEV); | 1250 | return -ENODEV; |
1251 | } | 1251 | } |
1252 | mp->m_rsumlevels = sbp->sb_rextslog + 1; | 1252 | mp->m_rsumlevels = sbp->sb_rextslog + 1; |
1253 | mp->m_rsumsize = | 1253 | mp->m_rsumsize = |
@@ -1263,7 +1263,7 @@ xfs_rtmount_init( | |||
1263 | xfs_warn(mp, "realtime mount -- %llu != %llu", | 1263 | xfs_warn(mp, "realtime mount -- %llu != %llu", |
1264 | (unsigned long long) XFS_BB_TO_FSB(mp, d), | 1264 | (unsigned long long) XFS_BB_TO_FSB(mp, d), |
1265 | (unsigned long long) mp->m_sb.sb_rblocks); | 1265 | (unsigned long long) mp->m_sb.sb_rblocks); |
1266 | return XFS_ERROR(EFBIG); | 1266 | return -EFBIG; |
1267 | } | 1267 | } |
1268 | bp = xfs_buf_read_uncached(mp->m_rtdev_targp, | 1268 | bp = xfs_buf_read_uncached(mp->m_rtdev_targp, |
1269 | d - XFS_FSB_TO_BB(mp, 1), | 1269 | d - XFS_FSB_TO_BB(mp, 1), |
@@ -1272,7 +1272,7 @@ xfs_rtmount_init( | |||
1272 | xfs_warn(mp, "realtime device size check failed"); | 1272 | xfs_warn(mp, "realtime device size check failed"); |
1273 | if (bp) | 1273 | if (bp) |
1274 | xfs_buf_relse(bp); | 1274 | xfs_buf_relse(bp); |
1275 | return EIO; | 1275 | return -EIO; |
1276 | } | 1276 | } |
1277 | xfs_buf_relse(bp); | 1277 | xfs_buf_relse(bp); |
1278 | return 0; | 1278 | return 0; |
diff --git a/fs/xfs/xfs_rtalloc.h b/fs/xfs/xfs_rtalloc.h index 752b63d10300..c642795324af 100644 --- a/fs/xfs/xfs_rtalloc.h +++ b/fs/xfs/xfs_rtalloc.h | |||
@@ -132,7 +132,7 @@ xfs_rtmount_init( | |||
132 | return 0; | 132 | return 0; |
133 | 133 | ||
134 | xfs_warn(mp, "Not built with CONFIG_XFS_RT"); | 134 | xfs_warn(mp, "Not built with CONFIG_XFS_RT"); |
135 | return ENOSYS; | 135 | return -ENOSYS; |
136 | } | 136 | } |
137 | # define xfs_rtmount_inodes(m) (((mp)->m_sb.sb_rblocks == 0)? 0 : (ENOSYS)) | 137 | # define xfs_rtmount_inodes(m) (((mp)->m_sb.sb_rblocks == 0)? 0 : (ENOSYS)) |
138 | # define xfs_rtunmount_inodes(m) | 138 | # define xfs_rtunmount_inodes(m) |
diff --git a/fs/xfs/xfs_super.c b/fs/xfs/xfs_super.c index 8f0333b3f7a0..b194652033cd 100644 --- a/fs/xfs/xfs_super.c +++ b/fs/xfs/xfs_super.c | |||
@@ -61,6 +61,7 @@ | |||
61 | static const struct super_operations xfs_super_operations; | 61 | static const struct super_operations xfs_super_operations; |
62 | static kmem_zone_t *xfs_ioend_zone; | 62 | static kmem_zone_t *xfs_ioend_zone; |
63 | mempool_t *xfs_ioend_pool; | 63 | mempool_t *xfs_ioend_pool; |
64 | struct kset *xfs_kset; | ||
64 | 65 | ||
65 | #define MNTOPT_LOGBUFS "logbufs" /* number of XFS log buffers */ | 66 | #define MNTOPT_LOGBUFS "logbufs" /* number of XFS log buffers */ |
66 | #define MNTOPT_LOGBSIZE "logbsize" /* size of XFS log buffers */ | 67 | #define MNTOPT_LOGBSIZE "logbsize" /* size of XFS log buffers */ |
@@ -185,7 +186,7 @@ xfs_parseargs( | |||
185 | */ | 186 | */ |
186 | mp->m_fsname = kstrndup(sb->s_id, MAXNAMELEN, GFP_KERNEL); | 187 | mp->m_fsname = kstrndup(sb->s_id, MAXNAMELEN, GFP_KERNEL); |
187 | if (!mp->m_fsname) | 188 | if (!mp->m_fsname) |
188 | return ENOMEM; | 189 | return -ENOMEM; |
189 | mp->m_fsname_len = strlen(mp->m_fsname) + 1; | 190 | mp->m_fsname_len = strlen(mp->m_fsname) + 1; |
190 | 191 | ||
191 | /* | 192 | /* |
@@ -204,9 +205,6 @@ xfs_parseargs( | |||
204 | */ | 205 | */ |
205 | mp->m_flags |= XFS_MOUNT_BARRIER; | 206 | mp->m_flags |= XFS_MOUNT_BARRIER; |
206 | mp->m_flags |= XFS_MOUNT_COMPAT_IOSIZE; | 207 | mp->m_flags |= XFS_MOUNT_COMPAT_IOSIZE; |
207 | #if !XFS_BIG_INUMS | ||
208 | mp->m_flags |= XFS_MOUNT_SMALL_INUMS; | ||
209 | #endif | ||
210 | 208 | ||
211 | /* | 209 | /* |
212 | * These can be overridden by the mount option parsing. | 210 | * These can be overridden by the mount option parsing. |
@@ -227,57 +225,57 @@ xfs_parseargs( | |||
227 | if (!value || !*value) { | 225 | if (!value || !*value) { |
228 | xfs_warn(mp, "%s option requires an argument", | 226 | xfs_warn(mp, "%s option requires an argument", |
229 | this_char); | 227 | this_char); |
230 | return EINVAL; | 228 | return -EINVAL; |
231 | } | 229 | } |
232 | if (kstrtoint(value, 10, &mp->m_logbufs)) | 230 | if (kstrtoint(value, 10, &mp->m_logbufs)) |
233 | return EINVAL; | 231 | return -EINVAL; |
234 | } else if (!strcmp(this_char, MNTOPT_LOGBSIZE)) { | 232 | } else if (!strcmp(this_char, MNTOPT_LOGBSIZE)) { |
235 | if (!value || !*value) { | 233 | if (!value || !*value) { |
236 | xfs_warn(mp, "%s option requires an argument", | 234 | xfs_warn(mp, "%s option requires an argument", |
237 | this_char); | 235 | this_char); |
238 | return EINVAL; | 236 | return -EINVAL; |
239 | } | 237 | } |
240 | if (suffix_kstrtoint(value, 10, &mp->m_logbsize)) | 238 | if (suffix_kstrtoint(value, 10, &mp->m_logbsize)) |
241 | return EINVAL; | 239 | return -EINVAL; |
242 | } else if (!strcmp(this_char, MNTOPT_LOGDEV)) { | 240 | } else if (!strcmp(this_char, MNTOPT_LOGDEV)) { |
243 | if (!value || !*value) { | 241 | if (!value || !*value) { |
244 | xfs_warn(mp, "%s option requires an argument", | 242 | xfs_warn(mp, "%s option requires an argument", |
245 | this_char); | 243 | this_char); |
246 | return EINVAL; | 244 | return -EINVAL; |
247 | } | 245 | } |
248 | mp->m_logname = kstrndup(value, MAXNAMELEN, GFP_KERNEL); | 246 | mp->m_logname = kstrndup(value, MAXNAMELEN, GFP_KERNEL); |
249 | if (!mp->m_logname) | 247 | if (!mp->m_logname) |
250 | return ENOMEM; | 248 | return -ENOMEM; |
251 | } else if (!strcmp(this_char, MNTOPT_MTPT)) { | 249 | } else if (!strcmp(this_char, MNTOPT_MTPT)) { |
252 | xfs_warn(mp, "%s option not allowed on this system", | 250 | xfs_warn(mp, "%s option not allowed on this system", |
253 | this_char); | 251 | this_char); |
254 | return EINVAL; | 252 | return -EINVAL; |
255 | } else if (!strcmp(this_char, MNTOPT_RTDEV)) { | 253 | } else if (!strcmp(this_char, MNTOPT_RTDEV)) { |
256 | if (!value || !*value) { | 254 | if (!value || !*value) { |
257 | xfs_warn(mp, "%s option requires an argument", | 255 | xfs_warn(mp, "%s option requires an argument", |
258 | this_char); | 256 | this_char); |
259 | return EINVAL; | 257 | return -EINVAL; |
260 | } | 258 | } |
261 | mp->m_rtname = kstrndup(value, MAXNAMELEN, GFP_KERNEL); | 259 | mp->m_rtname = kstrndup(value, MAXNAMELEN, GFP_KERNEL); |
262 | if (!mp->m_rtname) | 260 | if (!mp->m_rtname) |
263 | return ENOMEM; | 261 | return -ENOMEM; |
264 | } else if (!strcmp(this_char, MNTOPT_BIOSIZE)) { | 262 | } else if (!strcmp(this_char, MNTOPT_BIOSIZE)) { |
265 | if (!value || !*value) { | 263 | if (!value || !*value) { |
266 | xfs_warn(mp, "%s option requires an argument", | 264 | xfs_warn(mp, "%s option requires an argument", |
267 | this_char); | 265 | this_char); |
268 | return EINVAL; | 266 | return -EINVAL; |
269 | } | 267 | } |
270 | if (kstrtoint(value, 10, &iosize)) | 268 | if (kstrtoint(value, 10, &iosize)) |
271 | return EINVAL; | 269 | return -EINVAL; |
272 | iosizelog = ffs(iosize) - 1; | 270 | iosizelog = ffs(iosize) - 1; |
273 | } else if (!strcmp(this_char, MNTOPT_ALLOCSIZE)) { | 271 | } else if (!strcmp(this_char, MNTOPT_ALLOCSIZE)) { |
274 | if (!value || !*value) { | 272 | if (!value || !*value) { |
275 | xfs_warn(mp, "%s option requires an argument", | 273 | xfs_warn(mp, "%s option requires an argument", |
276 | this_char); | 274 | this_char); |
277 | return EINVAL; | 275 | return -EINVAL; |
278 | } | 276 | } |
279 | if (suffix_kstrtoint(value, 10, &iosize)) | 277 | if (suffix_kstrtoint(value, 10, &iosize)) |
280 | return EINVAL; | 278 | return -EINVAL; |
281 | iosizelog = ffs(iosize) - 1; | 279 | iosizelog = ffs(iosize) - 1; |
282 | } else if (!strcmp(this_char, MNTOPT_GRPID) || | 280 | } else if (!strcmp(this_char, MNTOPT_GRPID) || |
283 | !strcmp(this_char, MNTOPT_BSDGROUPS)) { | 281 | !strcmp(this_char, MNTOPT_BSDGROUPS)) { |
@@ -297,27 +295,22 @@ xfs_parseargs( | |||
297 | if (!value || !*value) { | 295 | if (!value || !*value) { |
298 | xfs_warn(mp, "%s option requires an argument", | 296 | xfs_warn(mp, "%s option requires an argument", |
299 | this_char); | 297 | this_char); |
300 | return EINVAL; | 298 | return -EINVAL; |
301 | } | 299 | } |
302 | if (kstrtoint(value, 10, &dsunit)) | 300 | if (kstrtoint(value, 10, &dsunit)) |
303 | return EINVAL; | 301 | return -EINVAL; |
304 | } else if (!strcmp(this_char, MNTOPT_SWIDTH)) { | 302 | } else if (!strcmp(this_char, MNTOPT_SWIDTH)) { |
305 | if (!value || !*value) { | 303 | if (!value || !*value) { |
306 | xfs_warn(mp, "%s option requires an argument", | 304 | xfs_warn(mp, "%s option requires an argument", |
307 | this_char); | 305 | this_char); |
308 | return EINVAL; | 306 | return -EINVAL; |
309 | } | 307 | } |
310 | if (kstrtoint(value, 10, &dswidth)) | 308 | if (kstrtoint(value, 10, &dswidth)) |
311 | return EINVAL; | 309 | return -EINVAL; |
312 | } else if (!strcmp(this_char, MNTOPT_32BITINODE)) { | 310 | } else if (!strcmp(this_char, MNTOPT_32BITINODE)) { |
313 | mp->m_flags |= XFS_MOUNT_SMALL_INUMS; | 311 | mp->m_flags |= XFS_MOUNT_SMALL_INUMS; |
314 | } else if (!strcmp(this_char, MNTOPT_64BITINODE)) { | 312 | } else if (!strcmp(this_char, MNTOPT_64BITINODE)) { |
315 | mp->m_flags &= ~XFS_MOUNT_SMALL_INUMS; | 313 | mp->m_flags &= ~XFS_MOUNT_SMALL_INUMS; |
316 | #if !XFS_BIG_INUMS | ||
317 | xfs_warn(mp, "%s option not allowed on this system", | ||
318 | this_char); | ||
319 | return EINVAL; | ||
320 | #endif | ||
321 | } else if (!strcmp(this_char, MNTOPT_NOUUID)) { | 314 | } else if (!strcmp(this_char, MNTOPT_NOUUID)) { |
322 | mp->m_flags |= XFS_MOUNT_NOUUID; | 315 | mp->m_flags |= XFS_MOUNT_NOUUID; |
323 | } else if (!strcmp(this_char, MNTOPT_BARRIER)) { | 316 | } else if (!strcmp(this_char, MNTOPT_BARRIER)) { |
@@ -390,7 +383,7 @@ xfs_parseargs( | |||
390 | "irixsgid is now a sysctl(2) variable, option is deprecated."); | 383 | "irixsgid is now a sysctl(2) variable, option is deprecated."); |
391 | } else { | 384 | } else { |
392 | xfs_warn(mp, "unknown mount option [%s].", this_char); | 385 | xfs_warn(mp, "unknown mount option [%s].", this_char); |
393 | return EINVAL; | 386 | return -EINVAL; |
394 | } | 387 | } |
395 | } | 388 | } |
396 | 389 | ||
@@ -400,32 +393,32 @@ xfs_parseargs( | |||
400 | if ((mp->m_flags & XFS_MOUNT_NORECOVERY) && | 393 | if ((mp->m_flags & XFS_MOUNT_NORECOVERY) && |
401 | !(mp->m_flags & XFS_MOUNT_RDONLY)) { | 394 | !(mp->m_flags & XFS_MOUNT_RDONLY)) { |
402 | xfs_warn(mp, "no-recovery mounts must be read-only."); | 395 | xfs_warn(mp, "no-recovery mounts must be read-only."); |
403 | return EINVAL; | 396 | return -EINVAL; |
404 | } | 397 | } |
405 | 398 | ||
406 | if ((mp->m_flags & XFS_MOUNT_NOALIGN) && (dsunit || dswidth)) { | 399 | if ((mp->m_flags & XFS_MOUNT_NOALIGN) && (dsunit || dswidth)) { |
407 | xfs_warn(mp, | 400 | xfs_warn(mp, |
408 | "sunit and swidth options incompatible with the noalign option"); | 401 | "sunit and swidth options incompatible with the noalign option"); |
409 | return EINVAL; | 402 | return -EINVAL; |
410 | } | 403 | } |
411 | 404 | ||
412 | #ifndef CONFIG_XFS_QUOTA | 405 | #ifndef CONFIG_XFS_QUOTA |
413 | if (XFS_IS_QUOTA_RUNNING(mp)) { | 406 | if (XFS_IS_QUOTA_RUNNING(mp)) { |
414 | xfs_warn(mp, "quota support not available in this kernel."); | 407 | xfs_warn(mp, "quota support not available in this kernel."); |
415 | return EINVAL; | 408 | return -EINVAL; |
416 | } | 409 | } |
417 | #endif | 410 | #endif |
418 | 411 | ||
419 | if ((dsunit && !dswidth) || (!dsunit && dswidth)) { | 412 | if ((dsunit && !dswidth) || (!dsunit && dswidth)) { |
420 | xfs_warn(mp, "sunit and swidth must be specified together"); | 413 | xfs_warn(mp, "sunit and swidth must be specified together"); |
421 | return EINVAL; | 414 | return -EINVAL; |
422 | } | 415 | } |
423 | 416 | ||
424 | if (dsunit && (dswidth % dsunit != 0)) { | 417 | if (dsunit && (dswidth % dsunit != 0)) { |
425 | xfs_warn(mp, | 418 | xfs_warn(mp, |
426 | "stripe width (%d) must be a multiple of the stripe unit (%d)", | 419 | "stripe width (%d) must be a multiple of the stripe unit (%d)", |
427 | dswidth, dsunit); | 420 | dswidth, dsunit); |
428 | return EINVAL; | 421 | return -EINVAL; |
429 | } | 422 | } |
430 | 423 | ||
431 | done: | 424 | done: |
@@ -446,7 +439,7 @@ done: | |||
446 | mp->m_logbufs > XLOG_MAX_ICLOGS)) { | 439 | mp->m_logbufs > XLOG_MAX_ICLOGS)) { |
447 | xfs_warn(mp, "invalid logbufs value: %d [not %d-%d]", | 440 | xfs_warn(mp, "invalid logbufs value: %d [not %d-%d]", |
448 | mp->m_logbufs, XLOG_MIN_ICLOGS, XLOG_MAX_ICLOGS); | 441 | mp->m_logbufs, XLOG_MIN_ICLOGS, XLOG_MAX_ICLOGS); |
449 | return XFS_ERROR(EINVAL); | 442 | return -EINVAL; |
450 | } | 443 | } |
451 | if (mp->m_logbsize != -1 && | 444 | if (mp->m_logbsize != -1 && |
452 | mp->m_logbsize != 0 && | 445 | mp->m_logbsize != 0 && |
@@ -456,7 +449,7 @@ done: | |||
456 | xfs_warn(mp, | 449 | xfs_warn(mp, |
457 | "invalid logbufsize: %d [not 16k,32k,64k,128k or 256k]", | 450 | "invalid logbufsize: %d [not 16k,32k,64k,128k or 256k]", |
458 | mp->m_logbsize); | 451 | mp->m_logbsize); |
459 | return XFS_ERROR(EINVAL); | 452 | return -EINVAL; |
460 | } | 453 | } |
461 | 454 | ||
462 | if (iosizelog) { | 455 | if (iosizelog) { |
@@ -465,7 +458,7 @@ done: | |||
465 | xfs_warn(mp, "invalid log iosize: %d [not %d-%d]", | 458 | xfs_warn(mp, "invalid log iosize: %d [not %d-%d]", |
466 | iosizelog, XFS_MIN_IO_LOG, | 459 | iosizelog, XFS_MIN_IO_LOG, |
467 | XFS_MAX_IO_LOG); | 460 | XFS_MAX_IO_LOG); |
468 | return XFS_ERROR(EINVAL); | 461 | return -EINVAL; |
469 | } | 462 | } |
470 | 463 | ||
471 | mp->m_flags |= XFS_MOUNT_DFLT_IOSIZE; | 464 | mp->m_flags |= XFS_MOUNT_DFLT_IOSIZE; |
@@ -597,15 +590,20 @@ xfs_max_file_offset( | |||
597 | return (((__uint64_t)pagefactor) << bitshift) - 1; | 590 | return (((__uint64_t)pagefactor) << bitshift) - 1; |
598 | } | 591 | } |
599 | 592 | ||
593 | /* | ||
594 | * xfs_set_inode32() and xfs_set_inode64() are passed an agcount | ||
595 | * because in the growfs case, mp->m_sb.sb_agcount is not updated | ||
596 | * yet to the potentially higher ag count. | ||
597 | */ | ||
600 | xfs_agnumber_t | 598 | xfs_agnumber_t |
601 | xfs_set_inode32(struct xfs_mount *mp) | 599 | xfs_set_inode32(struct xfs_mount *mp, xfs_agnumber_t agcount) |
602 | { | 600 | { |
603 | xfs_agnumber_t index = 0; | 601 | xfs_agnumber_t index = 0; |
604 | xfs_agnumber_t maxagi = 0; | 602 | xfs_agnumber_t maxagi = 0; |
605 | xfs_sb_t *sbp = &mp->m_sb; | 603 | xfs_sb_t *sbp = &mp->m_sb; |
606 | xfs_agnumber_t max_metadata; | 604 | xfs_agnumber_t max_metadata; |
607 | xfs_agino_t agino = XFS_OFFBNO_TO_AGINO(mp, sbp->sb_agblocks -1, 0); | 605 | xfs_agino_t agino; |
608 | xfs_ino_t ino = XFS_AGINO_TO_INO(mp, sbp->sb_agcount -1, agino); | 606 | xfs_ino_t ino; |
609 | xfs_perag_t *pag; | 607 | xfs_perag_t *pag; |
610 | 608 | ||
611 | /* Calculate how much should be reserved for inodes to meet | 609 | /* Calculate how much should be reserved for inodes to meet |
@@ -620,10 +618,12 @@ xfs_set_inode32(struct xfs_mount *mp) | |||
620 | do_div(icount, sbp->sb_agblocks); | 618 | do_div(icount, sbp->sb_agblocks); |
621 | max_metadata = icount; | 619 | max_metadata = icount; |
622 | } else { | 620 | } else { |
623 | max_metadata = sbp->sb_agcount; | 621 | max_metadata = agcount; |
624 | } | 622 | } |
625 | 623 | ||
626 | for (index = 0; index < sbp->sb_agcount; index++) { | 624 | agino = XFS_OFFBNO_TO_AGINO(mp, sbp->sb_agblocks - 1, 0); |
625 | |||
626 | for (index = 0; index < agcount; index++) { | ||
627 | ino = XFS_AGINO_TO_INO(mp, index, agino); | 627 | ino = XFS_AGINO_TO_INO(mp, index, agino); |
628 | 628 | ||
629 | if (ino > XFS_MAXINUMBER_32) { | 629 | if (ino > XFS_MAXINUMBER_32) { |
@@ -648,11 +648,11 @@ xfs_set_inode32(struct xfs_mount *mp) | |||
648 | } | 648 | } |
649 | 649 | ||
650 | xfs_agnumber_t | 650 | xfs_agnumber_t |
651 | xfs_set_inode64(struct xfs_mount *mp) | 651 | xfs_set_inode64(struct xfs_mount *mp, xfs_agnumber_t agcount) |
652 | { | 652 | { |
653 | xfs_agnumber_t index = 0; | 653 | xfs_agnumber_t index = 0; |
654 | 654 | ||
655 | for (index = 0; index < mp->m_sb.sb_agcount; index++) { | 655 | for (index = 0; index < agcount; index++) { |
656 | struct xfs_perag *pag; | 656 | struct xfs_perag *pag; |
657 | 657 | ||
658 | pag = xfs_perag_get(mp, index); | 658 | pag = xfs_perag_get(mp, index); |
@@ -686,7 +686,7 @@ xfs_blkdev_get( | |||
686 | xfs_warn(mp, "Invalid device [%s], error=%d\n", name, error); | 686 | xfs_warn(mp, "Invalid device [%s], error=%d\n", name, error); |
687 | } | 687 | } |
688 | 688 | ||
689 | return -error; | 689 | return error; |
690 | } | 690 | } |
691 | 691 | ||
692 | STATIC void | 692 | STATIC void |
@@ -756,7 +756,7 @@ xfs_open_devices( | |||
756 | if (rtdev == ddev || rtdev == logdev) { | 756 | if (rtdev == ddev || rtdev == logdev) { |
757 | xfs_warn(mp, | 757 | xfs_warn(mp, |
758 | "Cannot mount filesystem with identical rtdev and ddev/logdev."); | 758 | "Cannot mount filesystem with identical rtdev and ddev/logdev."); |
759 | error = EINVAL; | 759 | error = -EINVAL; |
760 | goto out_close_rtdev; | 760 | goto out_close_rtdev; |
761 | } | 761 | } |
762 | } | 762 | } |
@@ -764,7 +764,7 @@ xfs_open_devices( | |||
764 | /* | 764 | /* |
765 | * Setup xfs_mount buffer target pointers | 765 | * Setup xfs_mount buffer target pointers |
766 | */ | 766 | */ |
767 | error = ENOMEM; | 767 | error = -ENOMEM; |
768 | mp->m_ddev_targp = xfs_alloc_buftarg(mp, ddev); | 768 | mp->m_ddev_targp = xfs_alloc_buftarg(mp, ddev); |
769 | if (!mp->m_ddev_targp) | 769 | if (!mp->m_ddev_targp) |
770 | goto out_close_rtdev; | 770 | goto out_close_rtdev; |
@@ -1188,6 +1188,7 @@ xfs_fs_remount( | |||
1188 | char *options) | 1188 | char *options) |
1189 | { | 1189 | { |
1190 | struct xfs_mount *mp = XFS_M(sb); | 1190 | struct xfs_mount *mp = XFS_M(sb); |
1191 | xfs_sb_t *sbp = &mp->m_sb; | ||
1191 | substring_t args[MAX_OPT_ARGS]; | 1192 | substring_t args[MAX_OPT_ARGS]; |
1192 | char *p; | 1193 | char *p; |
1193 | int error; | 1194 | int error; |
@@ -1208,10 +1209,10 @@ xfs_fs_remount( | |||
1208 | mp->m_flags &= ~XFS_MOUNT_BARRIER; | 1209 | mp->m_flags &= ~XFS_MOUNT_BARRIER; |
1209 | break; | 1210 | break; |
1210 | case Opt_inode64: | 1211 | case Opt_inode64: |
1211 | mp->m_maxagi = xfs_set_inode64(mp); | 1212 | mp->m_maxagi = xfs_set_inode64(mp, sbp->sb_agcount); |
1212 | break; | 1213 | break; |
1213 | case Opt_inode32: | 1214 | case Opt_inode32: |
1214 | mp->m_maxagi = xfs_set_inode32(mp); | 1215 | mp->m_maxagi = xfs_set_inode32(mp, sbp->sb_agcount); |
1215 | break; | 1216 | break; |
1216 | default: | 1217 | default: |
1217 | /* | 1218 | /* |
@@ -1295,7 +1296,7 @@ xfs_fs_freeze( | |||
1295 | 1296 | ||
1296 | xfs_save_resvblks(mp); | 1297 | xfs_save_resvblks(mp); |
1297 | xfs_quiesce_attr(mp); | 1298 | xfs_quiesce_attr(mp); |
1298 | return -xfs_fs_log_dummy(mp); | 1299 | return xfs_fs_log_dummy(mp); |
1299 | } | 1300 | } |
1300 | 1301 | ||
1301 | STATIC int | 1302 | STATIC int |
@@ -1314,7 +1315,7 @@ xfs_fs_show_options( | |||
1314 | struct seq_file *m, | 1315 | struct seq_file *m, |
1315 | struct dentry *root) | 1316 | struct dentry *root) |
1316 | { | 1317 | { |
1317 | return -xfs_showargs(XFS_M(root->d_sb), m); | 1318 | return xfs_showargs(XFS_M(root->d_sb), m); |
1318 | } | 1319 | } |
1319 | 1320 | ||
1320 | /* | 1321 | /* |
@@ -1336,14 +1337,14 @@ xfs_finish_flags( | |||
1336 | mp->m_logbsize < mp->m_sb.sb_logsunit) { | 1337 | mp->m_logbsize < mp->m_sb.sb_logsunit) { |
1337 | xfs_warn(mp, | 1338 | xfs_warn(mp, |
1338 | "logbuf size must be greater than or equal to log stripe size"); | 1339 | "logbuf size must be greater than or equal to log stripe size"); |
1339 | return XFS_ERROR(EINVAL); | 1340 | return -EINVAL; |
1340 | } | 1341 | } |
1341 | } else { | 1342 | } else { |
1342 | /* Fail a mount if the logbuf is larger than 32K */ | 1343 | /* Fail a mount if the logbuf is larger than 32K */ |
1343 | if (mp->m_logbsize > XLOG_BIG_RECORD_BSIZE) { | 1344 | if (mp->m_logbsize > XLOG_BIG_RECORD_BSIZE) { |
1344 | xfs_warn(mp, | 1345 | xfs_warn(mp, |
1345 | "logbuf size for version 1 logs must be 16K or 32K"); | 1346 | "logbuf size for version 1 logs must be 16K or 32K"); |
1346 | return XFS_ERROR(EINVAL); | 1347 | return -EINVAL; |
1347 | } | 1348 | } |
1348 | } | 1349 | } |
1349 | 1350 | ||
@@ -1355,7 +1356,7 @@ xfs_finish_flags( | |||
1355 | xfs_warn(mp, | 1356 | xfs_warn(mp, |
1356 | "Cannot mount a V5 filesystem as %s. %s is always enabled for V5 filesystems.", | 1357 | "Cannot mount a V5 filesystem as %s. %s is always enabled for V5 filesystems.", |
1357 | MNTOPT_NOATTR2, MNTOPT_ATTR2); | 1358 | MNTOPT_NOATTR2, MNTOPT_ATTR2); |
1358 | return XFS_ERROR(EINVAL); | 1359 | return -EINVAL; |
1359 | } | 1360 | } |
1360 | 1361 | ||
1361 | /* | 1362 | /* |
@@ -1372,7 +1373,7 @@ xfs_finish_flags( | |||
1372 | if ((mp->m_sb.sb_flags & XFS_SBF_READONLY) && !ronly) { | 1373 | if ((mp->m_sb.sb_flags & XFS_SBF_READONLY) && !ronly) { |
1373 | xfs_warn(mp, | 1374 | xfs_warn(mp, |
1374 | "cannot mount a read-only filesystem as read-write"); | 1375 | "cannot mount a read-only filesystem as read-write"); |
1375 | return XFS_ERROR(EROFS); | 1376 | return -EROFS; |
1376 | } | 1377 | } |
1377 | 1378 | ||
1378 | if ((mp->m_qflags & (XFS_GQUOTA_ACCT | XFS_GQUOTA_ACTIVE)) && | 1379 | if ((mp->m_qflags & (XFS_GQUOTA_ACCT | XFS_GQUOTA_ACTIVE)) && |
@@ -1380,7 +1381,7 @@ xfs_finish_flags( | |||
1380 | !xfs_sb_version_has_pquotino(&mp->m_sb)) { | 1381 | !xfs_sb_version_has_pquotino(&mp->m_sb)) { |
1381 | xfs_warn(mp, | 1382 | xfs_warn(mp, |
1382 | "Super block does not support project and group quota together"); | 1383 | "Super block does not support project and group quota together"); |
1383 | return XFS_ERROR(EINVAL); | 1384 | return -EINVAL; |
1384 | } | 1385 | } |
1385 | 1386 | ||
1386 | return 0; | 1387 | return 0; |
@@ -1394,7 +1395,7 @@ xfs_fs_fill_super( | |||
1394 | { | 1395 | { |
1395 | struct inode *root; | 1396 | struct inode *root; |
1396 | struct xfs_mount *mp = NULL; | 1397 | struct xfs_mount *mp = NULL; |
1397 | int flags = 0, error = ENOMEM; | 1398 | int flags = 0, error = -ENOMEM; |
1398 | 1399 | ||
1399 | mp = kzalloc(sizeof(struct xfs_mount), GFP_KERNEL); | 1400 | mp = kzalloc(sizeof(struct xfs_mount), GFP_KERNEL); |
1400 | if (!mp) | 1401 | if (!mp) |
@@ -1428,11 +1429,11 @@ xfs_fs_fill_super( | |||
1428 | if (error) | 1429 | if (error) |
1429 | goto out_free_fsname; | 1430 | goto out_free_fsname; |
1430 | 1431 | ||
1431 | error = -xfs_init_mount_workqueues(mp); | 1432 | error = xfs_init_mount_workqueues(mp); |
1432 | if (error) | 1433 | if (error) |
1433 | goto out_close_devices; | 1434 | goto out_close_devices; |
1434 | 1435 | ||
1435 | error = -xfs_icsb_init_counters(mp); | 1436 | error = xfs_icsb_init_counters(mp); |
1436 | if (error) | 1437 | if (error) |
1437 | goto out_destroy_workqueues; | 1438 | goto out_destroy_workqueues; |
1438 | 1439 | ||
@@ -1474,12 +1475,12 @@ xfs_fs_fill_super( | |||
1474 | 1475 | ||
1475 | root = igrab(VFS_I(mp->m_rootip)); | 1476 | root = igrab(VFS_I(mp->m_rootip)); |
1476 | if (!root) { | 1477 | if (!root) { |
1477 | error = ENOENT; | 1478 | error = -ENOENT; |
1478 | goto out_unmount; | 1479 | goto out_unmount; |
1479 | } | 1480 | } |
1480 | sb->s_root = d_make_root(root); | 1481 | sb->s_root = d_make_root(root); |
1481 | if (!sb->s_root) { | 1482 | if (!sb->s_root) { |
1482 | error = ENOMEM; | 1483 | error = -ENOMEM; |
1483 | goto out_unmount; | 1484 | goto out_unmount; |
1484 | } | 1485 | } |
1485 | 1486 | ||
@@ -1499,7 +1500,7 @@ out_destroy_workqueues: | |||
1499 | xfs_free_fsname(mp); | 1500 | xfs_free_fsname(mp); |
1500 | kfree(mp); | 1501 | kfree(mp); |
1501 | out: | 1502 | out: |
1502 | return -error; | 1503 | return error; |
1503 | 1504 | ||
1504 | out_unmount: | 1505 | out_unmount: |
1505 | xfs_filestream_unmount(mp); | 1506 | xfs_filestream_unmount(mp); |
@@ -1761,9 +1762,15 @@ init_xfs_fs(void) | |||
1761 | if (error) | 1762 | if (error) |
1762 | goto out_cleanup_procfs; | 1763 | goto out_cleanup_procfs; |
1763 | 1764 | ||
1765 | xfs_kset = kset_create_and_add("xfs", NULL, fs_kobj); | ||
1766 | if (!xfs_kset) { | ||
1767 | error = -ENOMEM; | ||
1768 | goto out_sysctl_unregister;; | ||
1769 | } | ||
1770 | |||
1764 | error = xfs_qm_init(); | 1771 | error = xfs_qm_init(); |
1765 | if (error) | 1772 | if (error) |
1766 | goto out_sysctl_unregister; | 1773 | goto out_kset_unregister; |
1767 | 1774 | ||
1768 | error = register_filesystem(&xfs_fs_type); | 1775 | error = register_filesystem(&xfs_fs_type); |
1769 | if (error) | 1776 | if (error) |
@@ -1772,6 +1779,8 @@ init_xfs_fs(void) | |||
1772 | 1779 | ||
1773 | out_qm_exit: | 1780 | out_qm_exit: |
1774 | xfs_qm_exit(); | 1781 | xfs_qm_exit(); |
1782 | out_kset_unregister: | ||
1783 | kset_unregister(xfs_kset); | ||
1775 | out_sysctl_unregister: | 1784 | out_sysctl_unregister: |
1776 | xfs_sysctl_unregister(); | 1785 | xfs_sysctl_unregister(); |
1777 | out_cleanup_procfs: | 1786 | out_cleanup_procfs: |
@@ -1793,6 +1802,7 @@ exit_xfs_fs(void) | |||
1793 | { | 1802 | { |
1794 | xfs_qm_exit(); | 1803 | xfs_qm_exit(); |
1795 | unregister_filesystem(&xfs_fs_type); | 1804 | unregister_filesystem(&xfs_fs_type); |
1805 | kset_unregister(xfs_kset); | ||
1796 | xfs_sysctl_unregister(); | 1806 | xfs_sysctl_unregister(); |
1797 | xfs_cleanup_procfs(); | 1807 | xfs_cleanup_procfs(); |
1798 | xfs_buf_terminate(); | 1808 | xfs_buf_terminate(); |
diff --git a/fs/xfs/xfs_super.h b/fs/xfs/xfs_super.h index bbe3d15a7904..2b830c2f322e 100644 --- a/fs/xfs/xfs_super.h +++ b/fs/xfs/xfs_super.h | |||
@@ -44,16 +44,6 @@ extern void xfs_qm_exit(void); | |||
44 | # define XFS_REALTIME_STRING | 44 | # define XFS_REALTIME_STRING |
45 | #endif | 45 | #endif |
46 | 46 | ||
47 | #if XFS_BIG_BLKNOS | ||
48 | # if XFS_BIG_INUMS | ||
49 | # define XFS_BIGFS_STRING "large block/inode numbers, " | ||
50 | # else | ||
51 | # define XFS_BIGFS_STRING "large block numbers, " | ||
52 | # endif | ||
53 | #else | ||
54 | # define XFS_BIGFS_STRING | ||
55 | #endif | ||
56 | |||
57 | #ifdef DEBUG | 47 | #ifdef DEBUG |
58 | # define XFS_DBG_STRING "debug" | 48 | # define XFS_DBG_STRING "debug" |
59 | #else | 49 | #else |
@@ -64,7 +54,6 @@ extern void xfs_qm_exit(void); | |||
64 | #define XFS_BUILD_OPTIONS XFS_ACL_STRING \ | 54 | #define XFS_BUILD_OPTIONS XFS_ACL_STRING \ |
65 | XFS_SECURITY_STRING \ | 55 | XFS_SECURITY_STRING \ |
66 | XFS_REALTIME_STRING \ | 56 | XFS_REALTIME_STRING \ |
67 | XFS_BIGFS_STRING \ | ||
68 | XFS_DBG_STRING /* DBG must be last */ | 57 | XFS_DBG_STRING /* DBG must be last */ |
69 | 58 | ||
70 | struct xfs_inode; | 59 | struct xfs_inode; |
@@ -76,8 +65,8 @@ extern __uint64_t xfs_max_file_offset(unsigned int); | |||
76 | 65 | ||
77 | extern void xfs_flush_inodes(struct xfs_mount *mp); | 66 | extern void xfs_flush_inodes(struct xfs_mount *mp); |
78 | extern void xfs_blkdev_issue_flush(struct xfs_buftarg *); | 67 | extern void xfs_blkdev_issue_flush(struct xfs_buftarg *); |
79 | extern xfs_agnumber_t xfs_set_inode32(struct xfs_mount *); | 68 | extern xfs_agnumber_t xfs_set_inode32(struct xfs_mount *, xfs_agnumber_t agcount); |
80 | extern xfs_agnumber_t xfs_set_inode64(struct xfs_mount *); | 69 | extern xfs_agnumber_t xfs_set_inode64(struct xfs_mount *, xfs_agnumber_t agcount); |
81 | 70 | ||
82 | extern const struct export_operations xfs_export_operations; | 71 | extern const struct export_operations xfs_export_operations; |
83 | extern const struct xattr_handler *xfs_xattr_handlers[]; | 72 | extern const struct xattr_handler *xfs_xattr_handlers[]; |
diff --git a/fs/xfs/xfs_symlink.c b/fs/xfs/xfs_symlink.c index d69363c833e1..6a944a2cd36f 100644 --- a/fs/xfs/xfs_symlink.c +++ b/fs/xfs/xfs_symlink.c | |||
@@ -76,15 +76,15 @@ xfs_readlink_bmap( | |||
76 | bp = xfs_buf_read(mp->m_ddev_targp, d, BTOBB(byte_cnt), 0, | 76 | bp = xfs_buf_read(mp->m_ddev_targp, d, BTOBB(byte_cnt), 0, |
77 | &xfs_symlink_buf_ops); | 77 | &xfs_symlink_buf_ops); |
78 | if (!bp) | 78 | if (!bp) |
79 | return XFS_ERROR(ENOMEM); | 79 | return -ENOMEM; |
80 | error = bp->b_error; | 80 | error = bp->b_error; |
81 | if (error) { | 81 | if (error) { |
82 | xfs_buf_ioerror_alert(bp, __func__); | 82 | xfs_buf_ioerror_alert(bp, __func__); |
83 | xfs_buf_relse(bp); | 83 | xfs_buf_relse(bp); |
84 | 84 | ||
85 | /* bad CRC means corrupted metadata */ | 85 | /* bad CRC means corrupted metadata */ |
86 | if (error == EFSBADCRC) | 86 | if (error == -EFSBADCRC) |
87 | error = EFSCORRUPTED; | 87 | error = -EFSCORRUPTED; |
88 | goto out; | 88 | goto out; |
89 | } | 89 | } |
90 | byte_cnt = XFS_SYMLINK_BUF_SPACE(mp, byte_cnt); | 90 | byte_cnt = XFS_SYMLINK_BUF_SPACE(mp, byte_cnt); |
@@ -95,7 +95,7 @@ xfs_readlink_bmap( | |||
95 | if (xfs_sb_version_hascrc(&mp->m_sb)) { | 95 | if (xfs_sb_version_hascrc(&mp->m_sb)) { |
96 | if (!xfs_symlink_hdr_ok(ip->i_ino, offset, | 96 | if (!xfs_symlink_hdr_ok(ip->i_ino, offset, |
97 | byte_cnt, bp)) { | 97 | byte_cnt, bp)) { |
98 | error = EFSCORRUPTED; | 98 | error = -EFSCORRUPTED; |
99 | xfs_alert(mp, | 99 | xfs_alert(mp, |
100 | "symlink header does not match required off/len/owner (0x%x/Ox%x,0x%llx)", | 100 | "symlink header does not match required off/len/owner (0x%x/Ox%x,0x%llx)", |
101 | offset, byte_cnt, ip->i_ino); | 101 | offset, byte_cnt, ip->i_ino); |
@@ -135,7 +135,7 @@ xfs_readlink( | |||
135 | trace_xfs_readlink(ip); | 135 | trace_xfs_readlink(ip); |
136 | 136 | ||
137 | if (XFS_FORCED_SHUTDOWN(mp)) | 137 | if (XFS_FORCED_SHUTDOWN(mp)) |
138 | return XFS_ERROR(EIO); | 138 | return -EIO; |
139 | 139 | ||
140 | xfs_ilock(ip, XFS_ILOCK_SHARED); | 140 | xfs_ilock(ip, XFS_ILOCK_SHARED); |
141 | 141 | ||
@@ -148,7 +148,7 @@ xfs_readlink( | |||
148 | __func__, (unsigned long long) ip->i_ino, | 148 | __func__, (unsigned long long) ip->i_ino, |
149 | (long long) pathlen); | 149 | (long long) pathlen); |
150 | ASSERT(0); | 150 | ASSERT(0); |
151 | error = XFS_ERROR(EFSCORRUPTED); | 151 | error = -EFSCORRUPTED; |
152 | goto out; | 152 | goto out; |
153 | } | 153 | } |
154 | 154 | ||
@@ -203,14 +203,14 @@ xfs_symlink( | |||
203 | trace_xfs_symlink(dp, link_name); | 203 | trace_xfs_symlink(dp, link_name); |
204 | 204 | ||
205 | if (XFS_FORCED_SHUTDOWN(mp)) | 205 | if (XFS_FORCED_SHUTDOWN(mp)) |
206 | return XFS_ERROR(EIO); | 206 | return -EIO; |
207 | 207 | ||
208 | /* | 208 | /* |
209 | * Check component lengths of the target path name. | 209 | * Check component lengths of the target path name. |
210 | */ | 210 | */ |
211 | pathlen = strlen(target_path); | 211 | pathlen = strlen(target_path); |
212 | if (pathlen >= MAXPATHLEN) /* total string too long */ | 212 | if (pathlen >= MAXPATHLEN) /* total string too long */ |
213 | return XFS_ERROR(ENAMETOOLONG); | 213 | return -ENAMETOOLONG; |
214 | 214 | ||
215 | udqp = gdqp = NULL; | 215 | udqp = gdqp = NULL; |
216 | prid = xfs_get_initial_prid(dp); | 216 | prid = xfs_get_initial_prid(dp); |
@@ -238,7 +238,7 @@ xfs_symlink( | |||
238 | fs_blocks = xfs_symlink_blocks(mp, pathlen); | 238 | fs_blocks = xfs_symlink_blocks(mp, pathlen); |
239 | resblks = XFS_SYMLINK_SPACE_RES(mp, link_name->len, fs_blocks); | 239 | resblks = XFS_SYMLINK_SPACE_RES(mp, link_name->len, fs_blocks); |
240 | error = xfs_trans_reserve(tp, &M_RES(mp)->tr_symlink, resblks, 0); | 240 | error = xfs_trans_reserve(tp, &M_RES(mp)->tr_symlink, resblks, 0); |
241 | if (error == ENOSPC && fs_blocks == 0) { | 241 | if (error == -ENOSPC && fs_blocks == 0) { |
242 | resblks = 0; | 242 | resblks = 0; |
243 | error = xfs_trans_reserve(tp, &M_RES(mp)->tr_symlink, 0, 0); | 243 | error = xfs_trans_reserve(tp, &M_RES(mp)->tr_symlink, 0, 0); |
244 | } | 244 | } |
@@ -254,7 +254,7 @@ xfs_symlink( | |||
254 | * Check whether the directory allows new symlinks or not. | 254 | * Check whether the directory allows new symlinks or not. |
255 | */ | 255 | */ |
256 | if (dp->i_d.di_flags & XFS_DIFLAG_NOSYMLINKS) { | 256 | if (dp->i_d.di_flags & XFS_DIFLAG_NOSYMLINKS) { |
257 | error = XFS_ERROR(EPERM); | 257 | error = -EPERM; |
258 | goto error_return; | 258 | goto error_return; |
259 | } | 259 | } |
260 | 260 | ||
@@ -284,7 +284,7 @@ xfs_symlink( | |||
284 | error = xfs_dir_ialloc(&tp, dp, S_IFLNK | (mode & ~S_IFMT), 1, 0, | 284 | error = xfs_dir_ialloc(&tp, dp, S_IFLNK | (mode & ~S_IFMT), 1, 0, |
285 | prid, resblks > 0, &ip, NULL); | 285 | prid, resblks > 0, &ip, NULL); |
286 | if (error) { | 286 | if (error) { |
287 | if (error == ENOSPC) | 287 | if (error == -ENOSPC) |
288 | goto error_return; | 288 | goto error_return; |
289 | goto error1; | 289 | goto error1; |
290 | } | 290 | } |
@@ -348,7 +348,7 @@ xfs_symlink( | |||
348 | bp = xfs_trans_get_buf(tp, mp->m_ddev_targp, d, | 348 | bp = xfs_trans_get_buf(tp, mp->m_ddev_targp, d, |
349 | BTOBB(byte_cnt), 0); | 349 | BTOBB(byte_cnt), 0); |
350 | if (!bp) { | 350 | if (!bp) { |
351 | error = ENOMEM; | 351 | error = -ENOMEM; |
352 | goto error2; | 352 | goto error2; |
353 | } | 353 | } |
354 | bp->b_ops = &xfs_symlink_buf_ops; | 354 | bp->b_ops = &xfs_symlink_buf_ops; |
@@ -489,7 +489,7 @@ xfs_inactive_symlink_rmt( | |||
489 | XFS_FSB_TO_DADDR(mp, mval[i].br_startblock), | 489 | XFS_FSB_TO_DADDR(mp, mval[i].br_startblock), |
490 | XFS_FSB_TO_BB(mp, mval[i].br_blockcount), 0); | 490 | XFS_FSB_TO_BB(mp, mval[i].br_blockcount), 0); |
491 | if (!bp) { | 491 | if (!bp) { |
492 | error = ENOMEM; | 492 | error = -ENOMEM; |
493 | goto error_bmap_cancel; | 493 | goto error_bmap_cancel; |
494 | } | 494 | } |
495 | xfs_trans_binval(tp, bp); | 495 | xfs_trans_binval(tp, bp); |
@@ -562,7 +562,7 @@ xfs_inactive_symlink( | |||
562 | trace_xfs_inactive_symlink(ip); | 562 | trace_xfs_inactive_symlink(ip); |
563 | 563 | ||
564 | if (XFS_FORCED_SHUTDOWN(mp)) | 564 | if (XFS_FORCED_SHUTDOWN(mp)) |
565 | return XFS_ERROR(EIO); | 565 | return -EIO; |
566 | 566 | ||
567 | xfs_ilock(ip, XFS_ILOCK_EXCL); | 567 | xfs_ilock(ip, XFS_ILOCK_EXCL); |
568 | 568 | ||
@@ -580,7 +580,7 @@ xfs_inactive_symlink( | |||
580 | __func__, (unsigned long long)ip->i_ino, pathlen); | 580 | __func__, (unsigned long long)ip->i_ino, pathlen); |
581 | xfs_iunlock(ip, XFS_ILOCK_EXCL); | 581 | xfs_iunlock(ip, XFS_ILOCK_EXCL); |
582 | ASSERT(0); | 582 | ASSERT(0); |
583 | return XFS_ERROR(EFSCORRUPTED); | 583 | return -EFSCORRUPTED; |
584 | } | 584 | } |
585 | 585 | ||
586 | if (ip->i_df.if_flags & XFS_IFINLINE) { | 586 | if (ip->i_df.if_flags & XFS_IFINLINE) { |
diff --git a/fs/xfs/xfs_sysfs.c b/fs/xfs/xfs_sysfs.c new file mode 100644 index 000000000000..9835139ce1ec --- /dev/null +++ b/fs/xfs/xfs_sysfs.c | |||
@@ -0,0 +1,165 @@ | |||
1 | /* | ||
2 | * Copyright (c) 2014 Red Hat, Inc. | ||
3 | * All Rights Reserved. | ||
4 | * | ||
5 | * This program is free software; you can redistribute it and/or | ||
6 | * modify it under the terms of the GNU General Public License as | ||
7 | * published by the Free Software Foundation. | ||
8 | * | ||
9 | * This program is distributed in the hope that it would be useful, | ||
10 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
11 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
12 | * GNU General Public License for more details. | ||
13 | * | ||
14 | * You should have received a copy of the GNU General Public License | ||
15 | * along with this program; if not, write the Free Software Foundation, | ||
16 | * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA | ||
17 | */ | ||
18 | |||
19 | #include "xfs.h" | ||
20 | #include "xfs_sysfs.h" | ||
21 | #include "xfs_log_format.h" | ||
22 | #include "xfs_log.h" | ||
23 | #include "xfs_log_priv.h" | ||
24 | |||
25 | struct xfs_sysfs_attr { | ||
26 | struct attribute attr; | ||
27 | ssize_t (*show)(char *buf, void *data); | ||
28 | ssize_t (*store)(const char *buf, size_t count, void *data); | ||
29 | }; | ||
30 | |||
31 | static inline struct xfs_sysfs_attr * | ||
32 | to_attr(struct attribute *attr) | ||
33 | { | ||
34 | return container_of(attr, struct xfs_sysfs_attr, attr); | ||
35 | } | ||
36 | |||
37 | #define XFS_SYSFS_ATTR_RW(name) \ | ||
38 | static struct xfs_sysfs_attr xfs_sysfs_attr_##name = __ATTR_RW(name) | ||
39 | #define XFS_SYSFS_ATTR_RO(name) \ | ||
40 | static struct xfs_sysfs_attr xfs_sysfs_attr_##name = __ATTR_RO(name) | ||
41 | |||
42 | #define ATTR_LIST(name) &xfs_sysfs_attr_##name.attr | ||
43 | |||
44 | /* | ||
45 | * xfs_mount kobject. This currently has no attributes and thus no need for show | ||
46 | * and store helpers. The mp kobject serves as the per-mount parent object that | ||
47 | * is identified by the fsname under sysfs. | ||
48 | */ | ||
49 | |||
50 | struct kobj_type xfs_mp_ktype = { | ||
51 | .release = xfs_sysfs_release, | ||
52 | }; | ||
53 | |||
54 | /* xlog */ | ||
55 | |||
56 | STATIC ssize_t | ||
57 | log_head_lsn_show( | ||
58 | char *buf, | ||
59 | void *data) | ||
60 | { | ||
61 | struct xlog *log = data; | ||
62 | int cycle; | ||
63 | int block; | ||
64 | |||
65 | spin_lock(&log->l_icloglock); | ||
66 | cycle = log->l_curr_cycle; | ||
67 | block = log->l_curr_block; | ||
68 | spin_unlock(&log->l_icloglock); | ||
69 | |||
70 | return snprintf(buf, PAGE_SIZE, "%d:%d\n", cycle, block); | ||
71 | } | ||
72 | XFS_SYSFS_ATTR_RO(log_head_lsn); | ||
73 | |||
74 | STATIC ssize_t | ||
75 | log_tail_lsn_show( | ||
76 | char *buf, | ||
77 | void *data) | ||
78 | { | ||
79 | struct xlog *log = data; | ||
80 | int cycle; | ||
81 | int block; | ||
82 | |||
83 | xlog_crack_atomic_lsn(&log->l_tail_lsn, &cycle, &block); | ||
84 | return snprintf(buf, PAGE_SIZE, "%d:%d\n", cycle, block); | ||
85 | } | ||
86 | XFS_SYSFS_ATTR_RO(log_tail_lsn); | ||
87 | |||
88 | STATIC ssize_t | ||
89 | reserve_grant_head_show( | ||
90 | char *buf, | ||
91 | void *data) | ||
92 | { | ||
93 | struct xlog *log = data; | ||
94 | int cycle; | ||
95 | int bytes; | ||
96 | |||
97 | xlog_crack_grant_head(&log->l_reserve_head.grant, &cycle, &bytes); | ||
98 | return snprintf(buf, PAGE_SIZE, "%d:%d\n", cycle, bytes); | ||
99 | } | ||
100 | XFS_SYSFS_ATTR_RO(reserve_grant_head); | ||
101 | |||
102 | STATIC ssize_t | ||
103 | write_grant_head_show( | ||
104 | char *buf, | ||
105 | void *data) | ||
106 | { | ||
107 | struct xlog *log = data; | ||
108 | int cycle; | ||
109 | int bytes; | ||
110 | |||
111 | xlog_crack_grant_head(&log->l_write_head.grant, &cycle, &bytes); | ||
112 | return snprintf(buf, PAGE_SIZE, "%d:%d\n", cycle, bytes); | ||
113 | } | ||
114 | XFS_SYSFS_ATTR_RO(write_grant_head); | ||
115 | |||
116 | static struct attribute *xfs_log_attrs[] = { | ||
117 | ATTR_LIST(log_head_lsn), | ||
118 | ATTR_LIST(log_tail_lsn), | ||
119 | ATTR_LIST(reserve_grant_head), | ||
120 | ATTR_LIST(write_grant_head), | ||
121 | NULL, | ||
122 | }; | ||
123 | |||
124 | static inline struct xlog * | ||
125 | to_xlog(struct kobject *kobject) | ||
126 | { | ||
127 | struct xfs_kobj *kobj = to_kobj(kobject); | ||
128 | return container_of(kobj, struct xlog, l_kobj); | ||
129 | } | ||
130 | |||
131 | STATIC ssize_t | ||
132 | xfs_log_show( | ||
133 | struct kobject *kobject, | ||
134 | struct attribute *attr, | ||
135 | char *buf) | ||
136 | { | ||
137 | struct xlog *log = to_xlog(kobject); | ||
138 | struct xfs_sysfs_attr *xfs_attr = to_attr(attr); | ||
139 | |||
140 | return xfs_attr->show ? xfs_attr->show(buf, log) : 0; | ||
141 | } | ||
142 | |||
143 | STATIC ssize_t | ||
144 | xfs_log_store( | ||
145 | struct kobject *kobject, | ||
146 | struct attribute *attr, | ||
147 | const char *buf, | ||
148 | size_t count) | ||
149 | { | ||
150 | struct xlog *log = to_xlog(kobject); | ||
151 | struct xfs_sysfs_attr *xfs_attr = to_attr(attr); | ||
152 | |||
153 | return xfs_attr->store ? xfs_attr->store(buf, count, log) : 0; | ||
154 | } | ||
155 | |||
156 | static struct sysfs_ops xfs_log_ops = { | ||
157 | .show = xfs_log_show, | ||
158 | .store = xfs_log_store, | ||
159 | }; | ||
160 | |||
161 | struct kobj_type xfs_log_ktype = { | ||
162 | .release = xfs_sysfs_release, | ||
163 | .sysfs_ops = &xfs_log_ops, | ||
164 | .default_attrs = xfs_log_attrs, | ||
165 | }; | ||
diff --git a/fs/xfs/xfs_sysfs.h b/fs/xfs/xfs_sysfs.h new file mode 100644 index 000000000000..54a2091183c0 --- /dev/null +++ b/fs/xfs/xfs_sysfs.h | |||
@@ -0,0 +1,59 @@ | |||
1 | /* | ||
2 | * Copyright (c) 2014 Red Hat, Inc. | ||
3 | * All Rights Reserved. | ||
4 | * | ||
5 | * This program is free software; you can redistribute it and/or | ||
6 | * modify it under the terms of the GNU General Public License as | ||
7 | * published by the Free Software Foundation. | ||
8 | * | ||
9 | * This program is distributed in the hope that it would be useful, | ||
10 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
11 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
12 | * GNU General Public License for more details. | ||
13 | * | ||
14 | * You should have received a copy of the GNU General Public License | ||
15 | * along with this program; if not, write the Free Software Foundation, | ||
16 | * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA | ||
17 | */ | ||
18 | |||
19 | #ifndef __XFS_SYSFS_H__ | ||
20 | #define __XFS_SYSFS_H__ | ||
21 | |||
22 | extern struct kobj_type xfs_mp_ktype; /* xfs_mount */ | ||
23 | extern struct kobj_type xfs_log_ktype; /* xlog */ | ||
24 | |||
25 | static inline struct xfs_kobj * | ||
26 | to_kobj(struct kobject *kobject) | ||
27 | { | ||
28 | return container_of(kobject, struct xfs_kobj, kobject); | ||
29 | } | ||
30 | |||
31 | static inline void | ||
32 | xfs_sysfs_release(struct kobject *kobject) | ||
33 | { | ||
34 | struct xfs_kobj *kobj = to_kobj(kobject); | ||
35 | complete(&kobj->complete); | ||
36 | } | ||
37 | |||
38 | static inline int | ||
39 | xfs_sysfs_init( | ||
40 | struct xfs_kobj *kobj, | ||
41 | struct kobj_type *ktype, | ||
42 | struct xfs_kobj *parent_kobj, | ||
43 | const char *name) | ||
44 | { | ||
45 | init_completion(&kobj->complete); | ||
46 | return kobject_init_and_add(&kobj->kobject, ktype, | ||
47 | &parent_kobj->kobject, "%s", name); | ||
48 | } | ||
49 | |||
50 | static inline void | ||
51 | xfs_sysfs_del( | ||
52 | struct xfs_kobj *kobj) | ||
53 | { | ||
54 | kobject_del(&kobj->kobject); | ||
55 | kobject_put(&kobj->kobject); | ||
56 | wait_for_completion(&kobj->complete); | ||
57 | } | ||
58 | |||
59 | #endif /* __XFS_SYSFS_H__ */ | ||
diff --git a/fs/xfs/xfs_trans.c b/fs/xfs/xfs_trans.c index d03932564ccb..30e8e3410955 100644 --- a/fs/xfs/xfs_trans.c +++ b/fs/xfs/xfs_trans.c | |||
@@ -190,7 +190,7 @@ xfs_trans_reserve( | |||
190 | -((int64_t)blocks), rsvd); | 190 | -((int64_t)blocks), rsvd); |
191 | if (error != 0) { | 191 | if (error != 0) { |
192 | current_restore_flags_nested(&tp->t_pflags, PF_FSTRANS); | 192 | current_restore_flags_nested(&tp->t_pflags, PF_FSTRANS); |
193 | return (XFS_ERROR(ENOSPC)); | 193 | return -ENOSPC; |
194 | } | 194 | } |
195 | tp->t_blk_res += blocks; | 195 | tp->t_blk_res += blocks; |
196 | } | 196 | } |
@@ -241,7 +241,7 @@ xfs_trans_reserve( | |||
241 | error = xfs_mod_incore_sb(tp->t_mountp, XFS_SBS_FREXTENTS, | 241 | error = xfs_mod_incore_sb(tp->t_mountp, XFS_SBS_FREXTENTS, |
242 | -((int64_t)rtextents), rsvd); | 242 | -((int64_t)rtextents), rsvd); |
243 | if (error) { | 243 | if (error) { |
244 | error = XFS_ERROR(ENOSPC); | 244 | error = -ENOSPC; |
245 | goto undo_log; | 245 | goto undo_log; |
246 | } | 246 | } |
247 | tp->t_rtx_res += rtextents; | 247 | tp->t_rtx_res += rtextents; |
@@ -874,7 +874,7 @@ xfs_trans_commit( | |||
874 | goto out_unreserve; | 874 | goto out_unreserve; |
875 | 875 | ||
876 | if (XFS_FORCED_SHUTDOWN(mp)) { | 876 | if (XFS_FORCED_SHUTDOWN(mp)) { |
877 | error = XFS_ERROR(EIO); | 877 | error = -EIO; |
878 | goto out_unreserve; | 878 | goto out_unreserve; |
879 | } | 879 | } |
880 | 880 | ||
@@ -917,7 +917,7 @@ out_unreserve: | |||
917 | if (tp->t_ticket) { | 917 | if (tp->t_ticket) { |
918 | commit_lsn = xfs_log_done(mp, tp->t_ticket, NULL, log_flags); | 918 | commit_lsn = xfs_log_done(mp, tp->t_ticket, NULL, log_flags); |
919 | if (commit_lsn == -1 && !error) | 919 | if (commit_lsn == -1 && !error) |
920 | error = XFS_ERROR(EIO); | 920 | error = -EIO; |
921 | } | 921 | } |
922 | current_restore_flags_nested(&tp->t_pflags, PF_FSTRANS); | 922 | current_restore_flags_nested(&tp->t_pflags, PF_FSTRANS); |
923 | xfs_trans_free_items(tp, NULLCOMMITLSN, error ? XFS_TRANS_ABORT : 0); | 923 | xfs_trans_free_items(tp, NULLCOMMITLSN, error ? XFS_TRANS_ABORT : 0); |
@@ -1024,7 +1024,7 @@ xfs_trans_roll( | |||
1024 | */ | 1024 | */ |
1025 | error = xfs_trans_commit(trans, 0); | 1025 | error = xfs_trans_commit(trans, 0); |
1026 | if (error) | 1026 | if (error) |
1027 | return (error); | 1027 | return error; |
1028 | 1028 | ||
1029 | trans = *tpp; | 1029 | trans = *tpp; |
1030 | 1030 | ||
diff --git a/fs/xfs/xfs_trans_ail.c b/fs/xfs/xfs_trans_ail.c index cb0f3a84cc68..859482f53b5a 100644 --- a/fs/xfs/xfs_trans_ail.c +++ b/fs/xfs/xfs_trans_ail.c | |||
@@ -762,7 +762,7 @@ xfs_trans_ail_init( | |||
762 | 762 | ||
763 | ailp = kmem_zalloc(sizeof(struct xfs_ail), KM_MAYFAIL); | 763 | ailp = kmem_zalloc(sizeof(struct xfs_ail), KM_MAYFAIL); |
764 | if (!ailp) | 764 | if (!ailp) |
765 | return ENOMEM; | 765 | return -ENOMEM; |
766 | 766 | ||
767 | ailp->xa_mount = mp; | 767 | ailp->xa_mount = mp; |
768 | INIT_LIST_HEAD(&ailp->xa_ail); | 768 | INIT_LIST_HEAD(&ailp->xa_ail); |
@@ -781,7 +781,7 @@ xfs_trans_ail_init( | |||
781 | 781 | ||
782 | out_free_ailp: | 782 | out_free_ailp: |
783 | kmem_free(ailp); | 783 | kmem_free(ailp); |
784 | return ENOMEM; | 784 | return -ENOMEM; |
785 | } | 785 | } |
786 | 786 | ||
787 | void | 787 | void |
diff --git a/fs/xfs/xfs_trans_buf.c b/fs/xfs/xfs_trans_buf.c index b8eef0549f3f..96c898e7ac9a 100644 --- a/fs/xfs/xfs_trans_buf.c +++ b/fs/xfs/xfs_trans_buf.c | |||
@@ -166,7 +166,7 @@ xfs_trans_get_buf_map( | |||
166 | ASSERT(atomic_read(&bip->bli_refcount) > 0); | 166 | ASSERT(atomic_read(&bip->bli_refcount) > 0); |
167 | bip->bli_recur++; | 167 | bip->bli_recur++; |
168 | trace_xfs_trans_get_buf_recur(bip); | 168 | trace_xfs_trans_get_buf_recur(bip); |
169 | return (bp); | 169 | return bp; |
170 | } | 170 | } |
171 | 171 | ||
172 | bp = xfs_buf_get_map(target, map, nmaps, flags); | 172 | bp = xfs_buf_get_map(target, map, nmaps, flags); |
@@ -178,7 +178,7 @@ xfs_trans_get_buf_map( | |||
178 | 178 | ||
179 | _xfs_trans_bjoin(tp, bp, 1); | 179 | _xfs_trans_bjoin(tp, bp, 1); |
180 | trace_xfs_trans_get_buf(bp->b_fspriv); | 180 | trace_xfs_trans_get_buf(bp->b_fspriv); |
181 | return (bp); | 181 | return bp; |
182 | } | 182 | } |
183 | 183 | ||
184 | /* | 184 | /* |
@@ -201,9 +201,8 @@ xfs_trans_getsb(xfs_trans_t *tp, | |||
201 | * Default to just trying to lock the superblock buffer | 201 | * Default to just trying to lock the superblock buffer |
202 | * if tp is NULL. | 202 | * if tp is NULL. |
203 | */ | 203 | */ |
204 | if (tp == NULL) { | 204 | if (tp == NULL) |
205 | return (xfs_getsb(mp, flags)); | 205 | return xfs_getsb(mp, flags); |
206 | } | ||
207 | 206 | ||
208 | /* | 207 | /* |
209 | * If the superblock buffer already has this transaction | 208 | * If the superblock buffer already has this transaction |
@@ -218,7 +217,7 @@ xfs_trans_getsb(xfs_trans_t *tp, | |||
218 | ASSERT(atomic_read(&bip->bli_refcount) > 0); | 217 | ASSERT(atomic_read(&bip->bli_refcount) > 0); |
219 | bip->bli_recur++; | 218 | bip->bli_recur++; |
220 | trace_xfs_trans_getsb_recur(bip); | 219 | trace_xfs_trans_getsb_recur(bip); |
221 | return (bp); | 220 | return bp; |
222 | } | 221 | } |
223 | 222 | ||
224 | bp = xfs_getsb(mp, flags); | 223 | bp = xfs_getsb(mp, flags); |
@@ -227,7 +226,7 @@ xfs_trans_getsb(xfs_trans_t *tp, | |||
227 | 226 | ||
228 | _xfs_trans_bjoin(tp, bp, 1); | 227 | _xfs_trans_bjoin(tp, bp, 1); |
229 | trace_xfs_trans_getsb(bp->b_fspriv); | 228 | trace_xfs_trans_getsb(bp->b_fspriv); |
230 | return (bp); | 229 | return bp; |
231 | } | 230 | } |
232 | 231 | ||
233 | #ifdef DEBUG | 232 | #ifdef DEBUG |
@@ -267,7 +266,7 @@ xfs_trans_read_buf_map( | |||
267 | bp = xfs_buf_read_map(target, map, nmaps, flags, ops); | 266 | bp = xfs_buf_read_map(target, map, nmaps, flags, ops); |
268 | if (!bp) | 267 | if (!bp) |
269 | return (flags & XBF_TRYLOCK) ? | 268 | return (flags & XBF_TRYLOCK) ? |
270 | EAGAIN : XFS_ERROR(ENOMEM); | 269 | -EAGAIN : -ENOMEM; |
271 | 270 | ||
272 | if (bp->b_error) { | 271 | if (bp->b_error) { |
273 | error = bp->b_error; | 272 | error = bp->b_error; |
@@ -277,8 +276,8 @@ xfs_trans_read_buf_map( | |||
277 | xfs_buf_relse(bp); | 276 | xfs_buf_relse(bp); |
278 | 277 | ||
279 | /* bad CRC means corrupted metadata */ | 278 | /* bad CRC means corrupted metadata */ |
280 | if (error == EFSBADCRC) | 279 | if (error == -EFSBADCRC) |
281 | error = EFSCORRUPTED; | 280 | error = -EFSCORRUPTED; |
282 | return error; | 281 | return error; |
283 | } | 282 | } |
284 | #ifdef DEBUG | 283 | #ifdef DEBUG |
@@ -287,7 +286,7 @@ xfs_trans_read_buf_map( | |||
287 | if (((xfs_req_num++) % xfs_error_mod) == 0) { | 286 | if (((xfs_req_num++) % xfs_error_mod) == 0) { |
288 | xfs_buf_relse(bp); | 287 | xfs_buf_relse(bp); |
289 | xfs_debug(mp, "Returning error!"); | 288 | xfs_debug(mp, "Returning error!"); |
290 | return XFS_ERROR(EIO); | 289 | return -EIO; |
291 | } | 290 | } |
292 | } | 291 | } |
293 | } | 292 | } |
@@ -343,8 +342,8 @@ xfs_trans_read_buf_map( | |||
343 | xfs_force_shutdown(tp->t_mountp, | 342 | xfs_force_shutdown(tp->t_mountp, |
344 | SHUTDOWN_META_IO_ERROR); | 343 | SHUTDOWN_META_IO_ERROR); |
345 | /* bad CRC means corrupted metadata */ | 344 | /* bad CRC means corrupted metadata */ |
346 | if (error == EFSBADCRC) | 345 | if (error == -EFSBADCRC) |
347 | error = EFSCORRUPTED; | 346 | error = -EFSCORRUPTED; |
348 | return error; | 347 | return error; |
349 | } | 348 | } |
350 | } | 349 | } |
@@ -355,7 +354,7 @@ xfs_trans_read_buf_map( | |||
355 | if (XFS_FORCED_SHUTDOWN(mp)) { | 354 | if (XFS_FORCED_SHUTDOWN(mp)) { |
356 | trace_xfs_trans_read_buf_shut(bp, _RET_IP_); | 355 | trace_xfs_trans_read_buf_shut(bp, _RET_IP_); |
357 | *bpp = NULL; | 356 | *bpp = NULL; |
358 | return XFS_ERROR(EIO); | 357 | return -EIO; |
359 | } | 358 | } |
360 | 359 | ||
361 | 360 | ||
@@ -372,7 +371,7 @@ xfs_trans_read_buf_map( | |||
372 | if (bp == NULL) { | 371 | if (bp == NULL) { |
373 | *bpp = NULL; | 372 | *bpp = NULL; |
374 | return (flags & XBF_TRYLOCK) ? | 373 | return (flags & XBF_TRYLOCK) ? |
375 | 0 : XFS_ERROR(ENOMEM); | 374 | 0 : -ENOMEM; |
376 | } | 375 | } |
377 | if (bp->b_error) { | 376 | if (bp->b_error) { |
378 | error = bp->b_error; | 377 | error = bp->b_error; |
@@ -384,8 +383,8 @@ xfs_trans_read_buf_map( | |||
384 | xfs_buf_relse(bp); | 383 | xfs_buf_relse(bp); |
385 | 384 | ||
386 | /* bad CRC means corrupted metadata */ | 385 | /* bad CRC means corrupted metadata */ |
387 | if (error == EFSBADCRC) | 386 | if (error == -EFSBADCRC) |
388 | error = EFSCORRUPTED; | 387 | error = -EFSCORRUPTED; |
389 | return error; | 388 | return error; |
390 | } | 389 | } |
391 | #ifdef DEBUG | 390 | #ifdef DEBUG |
@@ -396,7 +395,7 @@ xfs_trans_read_buf_map( | |||
396 | SHUTDOWN_META_IO_ERROR); | 395 | SHUTDOWN_META_IO_ERROR); |
397 | xfs_buf_relse(bp); | 396 | xfs_buf_relse(bp); |
398 | xfs_debug(mp, "Returning trans error!"); | 397 | xfs_debug(mp, "Returning trans error!"); |
399 | return XFS_ERROR(EIO); | 398 | return -EIO; |
400 | } | 399 | } |
401 | } | 400 | } |
402 | } | 401 | } |
@@ -414,7 +413,7 @@ shutdown_abort: | |||
414 | trace_xfs_trans_read_buf_shut(bp, _RET_IP_); | 413 | trace_xfs_trans_read_buf_shut(bp, _RET_IP_); |
415 | xfs_buf_relse(bp); | 414 | xfs_buf_relse(bp); |
416 | *bpp = NULL; | 415 | *bpp = NULL; |
417 | return XFS_ERROR(EIO); | 416 | return -EIO; |
418 | } | 417 | } |
419 | 418 | ||
420 | /* | 419 | /* |
diff --git a/fs/xfs/xfs_trans_dquot.c b/fs/xfs/xfs_trans_dquot.c index 41172861e857..846e061c2e98 100644 --- a/fs/xfs/xfs_trans_dquot.c +++ b/fs/xfs/xfs_trans_dquot.c | |||
@@ -722,8 +722,8 @@ xfs_trans_dqresv( | |||
722 | error_return: | 722 | error_return: |
723 | xfs_dqunlock(dqp); | 723 | xfs_dqunlock(dqp); |
724 | if (flags & XFS_QMOPT_ENOSPC) | 724 | if (flags & XFS_QMOPT_ENOSPC) |
725 | return ENOSPC; | 725 | return -ENOSPC; |
726 | return EDQUOT; | 726 | return -EDQUOT; |
727 | } | 727 | } |
728 | 728 | ||
729 | 729 | ||
diff --git a/fs/xfs/xfs_types.h b/fs/xfs/xfs_types.h index 65c6e6650b1a..b79dc66b2ecd 100644 --- a/fs/xfs/xfs_types.h +++ b/fs/xfs/xfs_types.h | |||
@@ -38,43 +38,18 @@ typedef __int32_t xfs_tid_t; /* transaction identifier */ | |||
38 | typedef __uint32_t xfs_dablk_t; /* dir/attr block number (in file) */ | 38 | typedef __uint32_t xfs_dablk_t; /* dir/attr block number (in file) */ |
39 | typedef __uint32_t xfs_dahash_t; /* dir/attr hash value */ | 39 | typedef __uint32_t xfs_dahash_t; /* dir/attr hash value */ |
40 | 40 | ||
41 | /* | ||
42 | * These types are 64 bits on disk but are either 32 or 64 bits in memory. | ||
43 | * Disk based types: | ||
44 | */ | ||
45 | typedef __uint64_t xfs_dfsbno_t; /* blockno in filesystem (agno|agbno) */ | ||
46 | typedef __uint64_t xfs_drfsbno_t; /* blockno in filesystem (raw) */ | ||
47 | typedef __uint64_t xfs_drtbno_t; /* extent (block) in realtime area */ | ||
48 | typedef __uint64_t xfs_dfiloff_t; /* block number in a file */ | ||
49 | typedef __uint64_t xfs_dfilblks_t; /* number of blocks in a file */ | ||
50 | |||
51 | /* | ||
52 | * Memory based types are conditional. | ||
53 | */ | ||
54 | #if XFS_BIG_BLKNOS | ||
55 | typedef __uint64_t xfs_fsblock_t; /* blockno in filesystem (agno|agbno) */ | 41 | typedef __uint64_t xfs_fsblock_t; /* blockno in filesystem (agno|agbno) */ |
56 | typedef __uint64_t xfs_rfsblock_t; /* blockno in filesystem (raw) */ | 42 | typedef __uint64_t xfs_rfsblock_t; /* blockno in filesystem (raw) */ |
57 | typedef __uint64_t xfs_rtblock_t; /* extent (block) in realtime area */ | 43 | typedef __uint64_t xfs_rtblock_t; /* extent (block) in realtime area */ |
58 | typedef __int64_t xfs_srtblock_t; /* signed version of xfs_rtblock_t */ | ||
59 | #else | ||
60 | typedef __uint32_t xfs_fsblock_t; /* blockno in filesystem (agno|agbno) */ | ||
61 | typedef __uint32_t xfs_rfsblock_t; /* blockno in filesystem (raw) */ | ||
62 | typedef __uint32_t xfs_rtblock_t; /* extent (block) in realtime area */ | ||
63 | typedef __int32_t xfs_srtblock_t; /* signed version of xfs_rtblock_t */ | ||
64 | #endif | ||
65 | typedef __uint64_t xfs_fileoff_t; /* block number in a file */ | 44 | typedef __uint64_t xfs_fileoff_t; /* block number in a file */ |
66 | typedef __int64_t xfs_sfiloff_t; /* signed block number in a file */ | ||
67 | typedef __uint64_t xfs_filblks_t; /* number of blocks in a file */ | 45 | typedef __uint64_t xfs_filblks_t; /* number of blocks in a file */ |
68 | 46 | ||
47 | typedef __int64_t xfs_srtblock_t; /* signed version of xfs_rtblock_t */ | ||
48 | typedef __int64_t xfs_sfiloff_t; /* signed block number in a file */ | ||
69 | 49 | ||
70 | /* | 50 | /* |
71 | * Null values for the types. | 51 | * Null values for the types. |
72 | */ | 52 | */ |
73 | #define NULLDFSBNO ((xfs_dfsbno_t)-1) | ||
74 | #define NULLDRFSBNO ((xfs_drfsbno_t)-1) | ||
75 | #define NULLDRTBNO ((xfs_drtbno_t)-1) | ||
76 | #define NULLDFILOFF ((xfs_dfiloff_t)-1) | ||
77 | |||
78 | #define NULLFSBLOCK ((xfs_fsblock_t)-1) | 53 | #define NULLFSBLOCK ((xfs_fsblock_t)-1) |
79 | #define NULLRFSBLOCK ((xfs_rfsblock_t)-1) | 54 | #define NULLRFSBLOCK ((xfs_rfsblock_t)-1) |
80 | #define NULLRTBLOCK ((xfs_rtblock_t)-1) | 55 | #define NULLRTBLOCK ((xfs_rtblock_t)-1) |
diff --git a/fs/xfs/xfs_vnode.h b/fs/xfs/xfs_vnode.h deleted file mode 100644 index e8a77383c0d5..000000000000 --- a/fs/xfs/xfs_vnode.h +++ /dev/null | |||
@@ -1,46 +0,0 @@ | |||
1 | /* | ||
2 | * Copyright (c) 2000-2005 Silicon Graphics, Inc. | ||
3 | * All Rights Reserved. | ||
4 | * | ||
5 | * This program is free software; you can redistribute it and/or | ||
6 | * modify it under the terms of the GNU General Public License as | ||
7 | * published by the Free Software Foundation. | ||
8 | * | ||
9 | * This program is distributed in the hope that it would be useful, | ||
10 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
11 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
12 | * GNU General Public License for more details. | ||
13 | * | ||
14 | * You should have received a copy of the GNU General Public License | ||
15 | * along with this program; if not, write the Free Software Foundation, | ||
16 | * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA | ||
17 | */ | ||
18 | #ifndef __XFS_VNODE_H__ | ||
19 | #define __XFS_VNODE_H__ | ||
20 | |||
21 | #include "xfs_fs.h" | ||
22 | |||
23 | struct file; | ||
24 | struct xfs_inode; | ||
25 | struct attrlist_cursor_kern; | ||
26 | |||
27 | /* | ||
28 | * Flags for read/write calls - same values as IRIX | ||
29 | */ | ||
30 | #define IO_ISDIRECT 0x00004 /* bypass page cache */ | ||
31 | #define IO_INVIS 0x00020 /* don't update inode timestamps */ | ||
32 | |||
33 | #define XFS_IO_FLAGS \ | ||
34 | { IO_ISDIRECT, "DIRECT" }, \ | ||
35 | { IO_INVIS, "INVIS"} | ||
36 | |||
37 | /* | ||
38 | * Some useful predicates. | ||
39 | */ | ||
40 | #define VN_MAPPED(vp) mapping_mapped(vp->i_mapping) | ||
41 | #define VN_CACHED(vp) (vp->i_mapping->nrpages) | ||
42 | #define VN_DIRTY(vp) mapping_tagged(vp->i_mapping, \ | ||
43 | PAGECACHE_TAG_DIRTY) | ||
44 | |||
45 | |||
46 | #endif /* __XFS_VNODE_H__ */ | ||
diff --git a/fs/xfs/xfs_xattr.c b/fs/xfs/xfs_xattr.c index 78ed92a46fdd..93455b998041 100644 --- a/fs/xfs/xfs_xattr.c +++ b/fs/xfs/xfs_xattr.c | |||
@@ -49,7 +49,7 @@ xfs_xattr_get(struct dentry *dentry, const char *name, | |||
49 | value = NULL; | 49 | value = NULL; |
50 | } | 50 | } |
51 | 51 | ||
52 | error = -xfs_attr_get(ip, (unsigned char *)name, value, &asize, xflags); | 52 | error = xfs_attr_get(ip, (unsigned char *)name, value, &asize, xflags); |
53 | if (error) | 53 | if (error) |
54 | return error; | 54 | return error; |
55 | return asize; | 55 | return asize; |
@@ -71,8 +71,8 @@ xfs_xattr_set(struct dentry *dentry, const char *name, const void *value, | |||
71 | xflags |= ATTR_REPLACE; | 71 | xflags |= ATTR_REPLACE; |
72 | 72 | ||
73 | if (!value) | 73 | if (!value) |
74 | return -xfs_attr_remove(ip, (unsigned char *)name, xflags); | 74 | return xfs_attr_remove(ip, (unsigned char *)name, xflags); |
75 | return -xfs_attr_set(ip, (unsigned char *)name, | 75 | return xfs_attr_set(ip, (unsigned char *)name, |
76 | (void *)value, size, xflags); | 76 | (void *)value, size, xflags); |
77 | } | 77 | } |
78 | 78 | ||