aboutsummaryrefslogtreecommitdiffstats
path: root/fs
diff options
context:
space:
mode:
Diffstat (limited to 'fs')
-rw-r--r--fs/9p/fid.c4
-rw-r--r--fs/9p/v9fs.c51
-rw-r--r--fs/9p/v9fs.h5
-rw-r--r--fs/9p/vfs_file.c4
-rw-r--r--fs/9p/vfs_inode.c7
-rw-r--r--fs/Kconfig128
-rw-r--r--fs/Kconfig.binfmt3
-rw-r--r--fs/adfs/super.c26
-rw-r--r--fs/affs/affs.h3
-rw-r--r--fs/affs/amigaffs.c6
-rw-r--r--fs/affs/inode.c20
-rw-r--r--fs/affs/namei.c12
-rw-r--r--fs/affs/super.c26
-rw-r--r--fs/afs/dir.c4
-rw-r--r--fs/afs/inode.c5
-rw-r--r--fs/afs/mntpt.c23
-rw-r--r--fs/afs/security.c4
-rw-r--r--fs/afs/super.c5
-rw-r--r--fs/aio.c28
-rw-r--r--fs/autofs/autofs_i.h1
-rw-r--r--fs/autofs/inode.c30
-rw-r--r--fs/autofs/root.c22
-rw-r--r--fs/autofs4/inode.c5
-rw-r--r--fs/autofs4/root.c5
-rw-r--r--fs/bad_inode.c14
-rw-r--r--fs/befs/linuxvfs.c43
-rw-r--r--fs/bfs/bfs.h2
-rw-r--r--fs/bfs/dir.c6
-rw-r--r--fs/bfs/inode.c32
-rw-r--r--fs/binfmt_aout.c5
-rw-r--r--fs/binfmt_elf.c157
-rw-r--r--fs/binfmt_flat.c9
-rw-r--r--fs/binfmt_som.c1
-rw-r--r--fs/block_dev.c7
-rw-r--r--fs/buffer.c92
-rw-r--r--fs/char_dev.c2
-rw-r--r--fs/cifs/cifs_dfs_ref.c25
-rw-r--r--fs/cifs/cifsfs.c8
-rw-r--r--fs/cifs/cifsfs.h1
-rw-r--r--fs/cifs/inode.c24
-rw-r--r--fs/coda/pioctl.c6
-rw-r--r--fs/compat.c92
-rw-r--r--fs/compat_ioctl.c38
-rw-r--r--fs/configfs/symlink.c8
-rw-r--r--fs/dcache.c108
-rw-r--r--fs/dcookies.c34
-rw-r--r--fs/debugfs/file.c32
-rw-r--r--fs/devpts/inode.c21
-rw-r--r--fs/direct-io.c4
-rw-r--r--fs/dlm/ast.c9
-rw-r--r--fs/dlm/config.c2
-rw-r--r--fs/dlm/debug_fs.c8
-rw-r--r--fs/dlm/dir.c28
-rw-r--r--fs/dlm/dlm_internal.h53
-rw-r--r--fs/dlm/lock.c139
-rw-r--r--fs/dlm/lock.h2
-rw-r--r--fs/dlm/lockspace.c2
-rw-r--r--fs/dlm/memory.c4
-rw-r--r--fs/dlm/midcomms.c33
-rw-r--r--fs/dlm/netlink.c9
-rw-r--r--fs/dlm/rcom.c63
-rw-r--r--fs/dlm/recover.c4
-rw-r--r--fs/dlm/requestqueue.c12
-rw-r--r--fs/dlm/requestqueue.h2
-rw-r--r--fs/dlm/user.c29
-rw-r--r--fs/dlm/util.c61
-rw-r--r--fs/dquot.c24
-rw-r--r--fs/ecryptfs/crypto.c191
-rw-r--r--fs/ecryptfs/dentry.c12
-rw-r--r--fs/ecryptfs/ecryptfs_kernel.h17
-rw-r--r--fs/ecryptfs/file.c5
-rw-r--r--fs/ecryptfs/inode.c40
-rw-r--r--fs/ecryptfs/keystore.c8
-rw-r--r--fs/ecryptfs/main.c46
-rw-r--r--fs/ecryptfs/mmap.c28
-rw-r--r--fs/ecryptfs/read_write.c2
-rw-r--r--fs/ecryptfs/super.c48
-rw-r--r--fs/efs/inode.c25
-rw-r--r--fs/efs/namei.c23
-rw-r--r--fs/efs/super.c16
-rw-r--r--fs/eventfd.c1
-rw-r--r--fs/eventpoll.c2
-rw-r--r--fs/exec.c53
-rw-r--r--fs/ext2/balloc.c98
-rw-r--r--fs/ext2/dir.c2
-rw-r--r--fs/ext2/ext2.h6
-rw-r--r--fs/ext2/file.c4
-rw-r--r--fs/ext2/inode.c37
-rw-r--r--fs/ext2/ioctl.c12
-rw-r--r--fs/ext2/namei.c12
-rw-r--r--fs/ext2/super.c66
-rw-r--r--fs/ext3/balloc.c101
-rw-r--r--fs/ext3/ialloc.c70
-rw-r--r--fs/ext3/inode.c143
-rw-r--r--fs/ext3/namei.c33
-rw-r--r--fs/ext3/resize.c19
-rw-r--r--fs/ext3/super.c79
-rw-r--r--fs/ext3/xattr.c6
-rw-r--r--fs/ext4/balloc.c8
-rw-r--r--fs/ext4/ialloc.c58
-rw-r--r--fs/ext4/inode.c153
-rw-r--r--fs/ext4/mballoc.c164
-rw-r--r--fs/ext4/migrate.c123
-rw-r--r--fs/ext4/namei.c30
-rw-r--r--fs/ext4/resize.c7
-rw-r--r--fs/ext4/super.c69
-rw-r--r--fs/fat/file.c47
-rw-r--r--fs/fat/inode.c16
-rw-r--r--fs/fat/misc.c5
-rw-r--r--fs/fcntl.c4
-rw-r--r--fs/file.c8
-rw-r--r--fs/file_table.c8
-rw-r--r--fs/freevxfs/vxfs_extern.h2
-rw-r--r--fs/freevxfs/vxfs_inode.c45
-rw-r--r--fs/freevxfs/vxfs_lookup.c6
-rw-r--r--fs/freevxfs/vxfs_super.c10
-rw-r--r--fs/fs-writeback.c40
-rw-r--r--fs/fuse/dev.c113
-rw-r--r--fs/fuse/dir.c7
-rw-r--r--fs/fuse/file.c14
-rw-r--r--fs/fuse/fuse_i.h18
-rw-r--r--fs/fuse/inode.c14
-rw-r--r--fs/gfs2/bmap.c2
-rw-r--r--fs/gfs2/dir.c2
-rw-r--r--fs/gfs2/glock.c37
-rw-r--r--fs/gfs2/glock.h4
-rw-r--r--fs/gfs2/incore.h4
-rw-r--r--fs/gfs2/inode.c2
-rw-r--r--fs/gfs2/ops_address.c2
-rw-r--r--fs/gfs2/ops_export.c2
-rw-r--r--fs/gfs2/ops_fstype.c7
-rw-r--r--fs/gfs2/ops_inode.c2
-rw-r--r--fs/hfs/bfind.c11
-rw-r--r--fs/hfs/brec.c4
-rw-r--r--fs/hfs/btree.c26
-rw-r--r--fs/hfs/hfs.h2
-rw-r--r--fs/hfs/super.c2
-rw-r--r--fs/hfsplus/btree.c6
-rw-r--r--fs/hfsplus/dir.c6
-rw-r--r--fs/hfsplus/hfsplus_fs.h3
-rw-r--r--fs/hfsplus/super.c47
-rw-r--r--fs/hfsplus/unicode.c3
-rw-r--r--fs/hostfs/hostfs_kern.c66
-rw-r--r--fs/hostfs/hostfs_user.c8
-rw-r--r--fs/hpfs/super.c17
-rw-r--r--fs/hppfs/hppfs_kern.c27
-rw-r--r--fs/hugetlbfs/inode.c5
-rw-r--r--fs/inode.c4
-rw-r--r--fs/inotify.c30
-rw-r--r--fs/inotify_user.c43
-rw-r--r--fs/ioctl.c223
-rw-r--r--fs/isofs/export.c14
-rw-r--r--fs/isofs/inode.c94
-rw-r--r--fs/isofs/isofs.h3
-rw-r--r--fs/isofs/namei.c4
-rw-r--r--fs/isofs/rock.c4
-rw-r--r--fs/jbd/commit.c17
-rw-r--r--fs/jbd/journal.c2
-rw-r--r--fs/jbd/recovery.c2
-rw-r--r--fs/jbd2/commit.c10
-rw-r--r--fs/jbd2/recovery.c4
-rw-r--r--fs/jffs2/acl.c6
-rw-r--r--fs/jffs2/acl.h2
-rw-r--r--fs/jffs2/dir.c6
-rw-r--r--fs/jffs2/fs.c62
-rw-r--r--fs/jffs2/nodelist.c9
-rw-r--r--fs/jffs2/os-linux.h2
-rw-r--r--fs/jffs2/readinode.c31
-rw-r--r--fs/jffs2/super.c1
-rw-r--r--fs/jffs2/write.c32
-rw-r--r--fs/jfs/file.c5
-rw-r--r--fs/jfs/inode.c20
-rw-r--r--fs/jfs/ioctl.c31
-rw-r--r--fs/jfs/jfs_dinode.h2
-rw-r--r--fs/jfs/jfs_inode.h6
-rw-r--r--fs/jfs/namei.c39
-rw-r--r--fs/jfs/super.c15
-rw-r--r--fs/libfs.c46
-rw-r--r--fs/lockd/host.c10
-rw-r--r--fs/lockd/svclock.c28
-rw-r--r--fs/locks.c5
-rw-r--r--fs/minix/inode.c43
-rw-r--r--fs/minix/minix.h1
-rw-r--r--fs/minix/namei.c7
-rw-r--r--fs/mpage.c7
-rw-r--r--fs/namei.c330
-rw-r--r--fs/namespace.c380
-rw-r--r--fs/ncpfs/inode.c53
-rw-r--r--fs/nfs/callback.c18
-rw-r--r--fs/nfs/dir.c8
-rw-r--r--fs/nfs/getroot.c4
-rw-r--r--fs/nfs/namespace.c29
-rw-r--r--fs/nfs/nfs4proc.c8
-rw-r--r--fs/nfs/nfs4state.c4
-rw-r--r--fs/nfs/read.c10
-rw-r--r--fs/nfs/super.c4
-rw-r--r--fs/nfs/write.c24
-rw-r--r--fs/nfsctl.c4
-rw-r--r--fs/nfsd/auth.c10
-rw-r--r--fs/nfsd/export.c126
-rw-r--r--fs/nfsd/nfs3proc.c2
-rw-r--r--fs/nfsd/nfs3xdr.c4
-rw-r--r--fs/nfsd/nfs4recover.c34
-rw-r--r--fs/nfsd/nfs4state.c4
-rw-r--r--fs/nfsd/nfs4xdr.c12
-rw-r--r--fs/nfsd/nfsfh.c26
-rw-r--r--fs/nfsd/nfsproc.c6
-rw-r--r--fs/nfsd/nfsxdr.c2
-rw-r--r--fs/nfsd/vfs.c13
-rw-r--r--fs/ntfs/aops.c20
-rw-r--r--fs/ntfs/compress.c2
-rw-r--r--fs/ntfs/file.c32
-rw-r--r--fs/ntfs/malloc.h3
-rw-r--r--fs/ocfs2/alloc.c2
-rw-r--r--fs/ocfs2/aops.c6
-rw-r--r--fs/ocfs2/cluster/endian.h30
-rw-r--r--fs/ocfs2/cluster/nodemanager.c1
-rw-r--r--fs/ocfs2/cluster/tcp_internal.h11
-rw-r--r--fs/ocfs2/dlm/dlmapi.h7
-rw-r--r--fs/ocfs2/dlm/dlmast.c1
-rw-r--r--fs/ocfs2/dlm/dlmcommon.h24
-rw-r--r--fs/ocfs2/dlm/dlmdomain.c195
-rw-r--r--fs/ocfs2/dlm/dlmfs.c15
-rw-r--r--fs/ocfs2/dlm/userdlm.c5
-rw-r--r--fs/ocfs2/dlm/userdlm.h3
-rw-r--r--fs/ocfs2/dlmglue.c29
-rw-r--r--fs/ocfs2/dlmglue.h1
-rw-r--r--fs/ocfs2/endian.h45
-rw-r--r--fs/ocfs2/ocfs2.h2
-rw-r--r--fs/ocfs2/ocfs2_lockingver.h30
-rw-r--r--fs/ocfs2/super.c1
-rw-r--r--fs/open.c66
-rw-r--r--fs/openpromfs/inode.c45
-rw-r--r--fs/partitions/Kconfig2
-rw-r--r--fs/partitions/check.c48
-rw-r--r--fs/pipe.c26
-rw-r--r--fs/pnode.c2
-rw-r--r--fs/proc/array.c163
-rw-r--r--fs/proc/base.c183
-rw-r--r--fs/proc/generic.c74
-rw-r--r--fs/proc/inode.c61
-rw-r--r--fs/proc/internal.h25
-rw-r--r--fs/proc/kcore.c3
-rw-r--r--fs/proc/nommu.c4
-rw-r--r--fs/proc/proc_misc.c172
-rw-r--r--fs/proc/proc_net.c7
-rw-r--r--fs/proc/proc_sysctl.c6
-rw-r--r--fs/proc/proc_tty.c5
-rw-r--r--fs/proc/root.c1
-rw-r--r--fs/proc/task_mmu.c692
-rw-r--r--fs/proc/task_nommu.c13
-rw-r--r--fs/proc/vmcore.c1
-rw-r--r--fs/qnx4/inode.c47
-rw-r--r--fs/qnx4/namei.c8
-rw-r--r--fs/quota.c4
-rw-r--r--fs/read_write.c1
-rw-r--r--fs/reiserfs/inode.c6
-rw-r--r--fs/reiserfs/prints.c2
-rw-r--r--fs/reiserfs/procfs.c2
-rw-r--r--fs/reiserfs/super.c52
-rw-r--r--fs/reiserfs/xattr.c6
-rw-r--r--fs/romfs/inode.c46
-rw-r--r--fs/select.c2
-rw-r--r--fs/seq_file.c6
-rw-r--r--fs/signalfd.c1
-rw-r--r--fs/smbfs/inode.c7
-rw-r--r--fs/smbfs/sock.c5
-rw-r--r--fs/splice.c10
-rw-r--r--fs/stat.c19
-rw-r--r--fs/super.c2
-rw-r--r--fs/sysfs/group.c7
-rw-r--r--fs/sysv/inode.c25
-rw-r--r--fs/sysv/namei.c6
-rw-r--r--fs/sysv/super.c4
-rw-r--r--fs/sysv/sysv.h1
-rw-r--r--fs/timerfd.c207
-rw-r--r--fs/udf/balloc.c491
-rw-r--r--fs/udf/crc.c2
-rw-r--r--fs/udf/dir.c141
-rw-r--r--fs/udf/directory.c102
-rw-r--r--fs/udf/file.c43
-rw-r--r--fs/udf/ialloc.c94
-rw-r--r--fs/udf/inode.c973
-rw-r--r--fs/udf/misc.c146
-rw-r--r--fs/udf/namei.c443
-rw-r--r--fs/udf/partition.c197
-rw-r--r--fs/udf/super.c1158
-rw-r--r--fs/udf/symlink.c9
-rw-r--r--fs/udf/truncate.c100
-rw-r--r--fs/udf/udf_i.h16
-rw-r--r--fs/udf/udf_sb.h90
-rw-r--r--fs/udf/udfdecl.h15
-rw-r--r--fs/udf/udftime.c59
-rw-r--r--fs/udf/unicode.c85
-rw-r--r--fs/ufs/balloc.c2
-rw-r--r--fs/ufs/cylinder.c2
-rw-r--r--fs/ufs/dir.c2
-rw-r--r--fs/ufs/file.c2
-rw-r--r--fs/ufs/ialloc.c2
-rw-r--r--fs/ufs/inode.c36
-rw-r--r--fs/ufs/namei.c9
-rw-r--r--fs/ufs/super.c22
-rw-r--r--fs/ufs/symlink.c3
-rw-r--r--fs/ufs/truncate.c2
-rw-r--r--fs/ufs/ufs.h2
-rw-r--r--fs/ufs/ufs_fs.h947
-rw-r--r--fs/ufs/util.c2
-rw-r--r--fs/utimes.c5
-rw-r--r--fs/vfat/namei.c2
-rw-r--r--fs/xattr.c77
-rw-r--r--fs/xfs/linux-2.6/kmem.c3
-rw-r--r--fs/xfs/linux-2.6/xfs_buf.c3
-rw-r--r--fs/xfs/linux-2.6/xfs_ioctl.c8
-rw-r--r--fs/xfs/linux-2.6/xfs_lrw.c2
-rw-r--r--fs/xfs/quota/xfs_qm.c6
-rw-r--r--fs/xfs/quota/xfs_trans_dquot.c6
-rw-r--r--fs/xfs/xfs_alloc.c16
-rw-r--r--fs/xfs/xfs_alloc_btree.c16
-rw-r--r--fs/xfs/xfs_arch.h15
-rw-r--r--fs/xfs/xfs_attr_leaf.c46
-rw-r--r--fs/xfs/xfs_bmap_btree.c16
-rw-r--r--fs/xfs/xfs_da_btree.c14
-rw-r--r--fs/xfs/xfs_dir2_block.c8
-rw-r--r--fs/xfs/xfs_dir2_data.c4
-rw-r--r--fs/xfs/xfs_dir2_leaf.c16
-rw-r--r--fs/xfs/xfs_dir2_node.c18
-rw-r--r--fs/xfs/xfs_fsops.c4
-rw-r--r--fs/xfs/xfs_ialloc.c12
-rw-r--r--fs/xfs/xfs_ialloc_btree.c16
-rw-r--r--fs/xfs/xfs_log.c6
-rw-r--r--fs/xfs/xfs_trans.c24
331 files changed, 8635 insertions, 5583 deletions
diff --git a/fs/9p/fid.c b/fs/9p/fid.c
index b364da70ff28..dfebdbe7440e 100644
--- a/fs/9p/fid.c
+++ b/fs/9p/fid.c
@@ -175,7 +175,7 @@ struct p9_fid *v9fs_fid_lookup(struct dentry *dentry)
175 if (!wnames) 175 if (!wnames)
176 return ERR_PTR(-ENOMEM); 176 return ERR_PTR(-ENOMEM);
177 177
178 for (d = dentry, i = n; i >= 0; i--, d = d->d_parent) 178 for (d = dentry, i = (n-1); i >= 0; i--, d = d->d_parent)
179 wnames[i] = (char *) d->d_name.name; 179 wnames[i] = (char *) d->d_name.name;
180 180
181 clone = 1; 181 clone = 1;
@@ -183,7 +183,7 @@ struct p9_fid *v9fs_fid_lookup(struct dentry *dentry)
183 while (i < n) { 183 while (i < n) {
184 l = min(n - i, P9_MAXWELEM); 184 l = min(n - i, P9_MAXWELEM);
185 fid = p9_client_walk(fid, l, &wnames[i], clone); 185 fid = p9_client_walk(fid, l, &wnames[i], clone);
186 if (!fid) { 186 if (IS_ERR(fid)) {
187 kfree(wnames); 187 kfree(wnames);
188 return fid; 188 return fid;
189 } 189 }
diff --git a/fs/9p/v9fs.c b/fs/9p/v9fs.c
index fbb12dadba83..9b0f0222e8bb 100644
--- a/fs/9p/v9fs.c
+++ b/fs/9p/v9fs.c
@@ -3,7 +3,7 @@
3 * 3 *
4 * This file contains functions assisting in mapping VFS to 9P2000 4 * This file contains functions assisting in mapping VFS to 9P2000
5 * 5 *
6 * Copyright (C) 2004 by Eric Van Hensbergen <ericvh@gmail.com> 6 * Copyright (C) 2004-2008 by Eric Van Hensbergen <ericvh@gmail.com>
7 * Copyright (C) 2002 by Ron Minnich <rminnich@lanl.gov> 7 * Copyright (C) 2002 by Ron Minnich <rminnich@lanl.gov>
8 * 8 *
9 * This program is free software; you can redistribute it and/or modify 9 * This program is free software; you can redistribute it and/or modify
@@ -31,7 +31,6 @@
31#include <linux/idr.h> 31#include <linux/idr.h>
32#include <net/9p/9p.h> 32#include <net/9p/9p.h>
33#include <net/9p/transport.h> 33#include <net/9p/transport.h>
34#include <net/9p/conn.h>
35#include <net/9p/client.h> 34#include <net/9p/client.h>
36#include "v9fs.h" 35#include "v9fs.h"
37#include "v9fs_vfs.h" 36#include "v9fs_vfs.h"
@@ -43,11 +42,11 @@
43 42
44enum { 43enum {
45 /* Options that take integer arguments */ 44 /* Options that take integer arguments */
46 Opt_debug, Opt_msize, Opt_dfltuid, Opt_dfltgid, Opt_afid, 45 Opt_debug, Opt_dfltuid, Opt_dfltgid, Opt_afid,
47 /* String options */ 46 /* String options */
48 Opt_uname, Opt_remotename, Opt_trans, 47 Opt_uname, Opt_remotename, Opt_trans,
49 /* Options that take no arguments */ 48 /* Options that take no arguments */
50 Opt_legacy, Opt_nodevmap, 49 Opt_nodevmap,
51 /* Cache options */ 50 /* Cache options */
52 Opt_cache_loose, 51 Opt_cache_loose,
53 /* Access options */ 52 /* Access options */
@@ -58,14 +57,11 @@ enum {
58 57
59static match_table_t tokens = { 58static match_table_t tokens = {
60 {Opt_debug, "debug=%x"}, 59 {Opt_debug, "debug=%x"},
61 {Opt_msize, "msize=%u"},
62 {Opt_dfltuid, "dfltuid=%u"}, 60 {Opt_dfltuid, "dfltuid=%u"},
63 {Opt_dfltgid, "dfltgid=%u"}, 61 {Opt_dfltgid, "dfltgid=%u"},
64 {Opt_afid, "afid=%u"}, 62 {Opt_afid, "afid=%u"},
65 {Opt_uname, "uname=%s"}, 63 {Opt_uname, "uname=%s"},
66 {Opt_remotename, "aname=%s"}, 64 {Opt_remotename, "aname=%s"},
67 {Opt_trans, "trans=%s"},
68 {Opt_legacy, "noextend"},
69 {Opt_nodevmap, "nodevmap"}, 65 {Opt_nodevmap, "nodevmap"},
70 {Opt_cache_loose, "cache=loose"}, 66 {Opt_cache_loose, "cache=loose"},
71 {Opt_cache_loose, "loose"}, 67 {Opt_cache_loose, "loose"},
@@ -85,16 +81,14 @@ static void v9fs_parse_options(struct v9fs_session_info *v9ses)
85 char *options; 81 char *options;
86 substring_t args[MAX_OPT_ARGS]; 82 substring_t args[MAX_OPT_ARGS];
87 char *p; 83 char *p;
88 int option; 84 int option = 0;
89 int ret;
90 char *s, *e; 85 char *s, *e;
86 int ret;
91 87
92 /* setup defaults */ 88 /* setup defaults */
93 v9ses->maxdata = 8192;
94 v9ses->afid = ~0; 89 v9ses->afid = ~0;
95 v9ses->debug = 0; 90 v9ses->debug = 0;
96 v9ses->cache = 0; 91 v9ses->cache = 0;
97 v9ses->trans = v9fs_default_trans();
98 92
99 if (!v9ses->options) 93 if (!v9ses->options)
100 return; 94 return;
@@ -106,7 +100,8 @@ static void v9fs_parse_options(struct v9fs_session_info *v9ses)
106 continue; 100 continue;
107 token = match_token(p, tokens, args); 101 token = match_token(p, tokens, args);
108 if (token < Opt_uname) { 102 if (token < Opt_uname) {
109 if ((ret = match_int(&args[0], &option)) < 0) { 103 ret = match_int(&args[0], &option);
104 if (ret < 0) {
110 P9_DPRINTK(P9_DEBUG_ERROR, 105 P9_DPRINTK(P9_DEBUG_ERROR,
111 "integer field, but no integer?\n"); 106 "integer field, but no integer?\n");
112 continue; 107 continue;
@@ -119,9 +114,7 @@ static void v9fs_parse_options(struct v9fs_session_info *v9ses)
119 p9_debug_level = option; 114 p9_debug_level = option;
120#endif 115#endif
121 break; 116 break;
122 case Opt_msize: 117
123 v9ses->maxdata = option;
124 break;
125 case Opt_dfltuid: 118 case Opt_dfltuid:
126 v9ses->dfltuid = option; 119 v9ses->dfltuid = option;
127 break; 120 break;
@@ -131,18 +124,12 @@ static void v9fs_parse_options(struct v9fs_session_info *v9ses)
131 case Opt_afid: 124 case Opt_afid:
132 v9ses->afid = option; 125 v9ses->afid = option;
133 break; 126 break;
134 case Opt_trans:
135 v9ses->trans = v9fs_match_trans(&args[0]);
136 break;
137 case Opt_uname: 127 case Opt_uname:
138 match_strcpy(v9ses->uname, &args[0]); 128 match_strcpy(v9ses->uname, &args[0]);
139 break; 129 break;
140 case Opt_remotename: 130 case Opt_remotename:
141 match_strcpy(v9ses->aname, &args[0]); 131 match_strcpy(v9ses->aname, &args[0]);
142 break; 132 break;
143 case Opt_legacy:
144 v9ses->flags &= ~V9FS_EXTENDED;
145 break;
146 case Opt_nodevmap: 133 case Opt_nodevmap:
147 v9ses->nodev = 1; 134 v9ses->nodev = 1;
148 break; 135 break;
@@ -185,7 +172,6 @@ struct p9_fid *v9fs_session_init(struct v9fs_session_info *v9ses,
185 const char *dev_name, char *data) 172 const char *dev_name, char *data)
186{ 173{
187 int retval = -EINVAL; 174 int retval = -EINVAL;
188 struct p9_trans *trans = NULL;
189 struct p9_fid *fid; 175 struct p9_fid *fid;
190 176
191 v9ses->uname = __getname(); 177 v9ses->uname = __getname();
@@ -207,24 +193,7 @@ struct p9_fid *v9fs_session_init(struct v9fs_session_info *v9ses,
207 v9ses->options = kstrdup(data, GFP_KERNEL); 193 v9ses->options = kstrdup(data, GFP_KERNEL);
208 v9fs_parse_options(v9ses); 194 v9fs_parse_options(v9ses);
209 195
210 if (v9ses->trans == NULL) { 196 v9ses->clnt = p9_client_create(dev_name, v9ses->options);
211 retval = -EPROTONOSUPPORT;
212 P9_DPRINTK(P9_DEBUG_ERROR,
213 "No transport defined or default transport\n");
214 goto error;
215 }
216
217 trans = v9ses->trans->create(dev_name, v9ses->options);
218 if (IS_ERR(trans)) {
219 retval = PTR_ERR(trans);
220 trans = NULL;
221 goto error;
222 }
223 if ((v9ses->maxdata+P9_IOHDRSZ) > v9ses->trans->maxsize)
224 v9ses->maxdata = v9ses->trans->maxsize-P9_IOHDRSZ;
225
226 v9ses->clnt = p9_client_create(trans, v9ses->maxdata+P9_IOHDRSZ,
227 v9fs_extended(v9ses));
228 197
229 if (IS_ERR(v9ses->clnt)) { 198 if (IS_ERR(v9ses->clnt)) {
230 retval = PTR_ERR(v9ses->clnt); 199 retval = PTR_ERR(v9ses->clnt);
@@ -236,6 +205,8 @@ struct p9_fid *v9fs_session_init(struct v9fs_session_info *v9ses,
236 if (!v9ses->clnt->dotu) 205 if (!v9ses->clnt->dotu)
237 v9ses->flags &= ~V9FS_EXTENDED; 206 v9ses->flags &= ~V9FS_EXTENDED;
238 207
208 v9ses->maxdata = v9ses->clnt->msize;
209
239 /* for legacy mode, fall back to V9FS_ACCESS_ANY */ 210 /* for legacy mode, fall back to V9FS_ACCESS_ANY */
240 if (!v9fs_extended(v9ses) && 211 if (!v9fs_extended(v9ses) &&
241 ((v9ses->flags&V9FS_ACCESS_MASK) == V9FS_ACCESS_USER)) { 212 ((v9ses->flags&V9FS_ACCESS_MASK) == V9FS_ACCESS_USER)) {
diff --git a/fs/9p/v9fs.h b/fs/9p/v9fs.h
index db4b4193f2e2..7d3a1018db52 100644
--- a/fs/9p/v9fs.h
+++ b/fs/9p/v9fs.h
@@ -1,7 +1,7 @@
1/* 1/*
2 * V9FS definitions. 2 * V9FS definitions.
3 * 3 *
4 * Copyright (C) 2004 by Eric Van Hensbergen <ericvh@gmail.com> 4 * Copyright (C) 2004-2008 by Eric Van Hensbergen <ericvh@gmail.com>
5 * Copyright (C) 2002 by Ron Minnich <rminnich@lanl.gov> 5 * Copyright (C) 2002 by Ron Minnich <rminnich@lanl.gov>
6 * 6 *
7 * This program is free software; you can redistribute it and/or modify 7 * This program is free software; you can redistribute it and/or modify
@@ -28,7 +28,6 @@
28 28
29struct v9fs_session_info { 29struct v9fs_session_info {
30 /* options */ 30 /* options */
31 unsigned int maxdata;
32 unsigned char flags; /* session flags */ 31 unsigned char flags; /* session flags */
33 unsigned char nodev; /* set to 1 if no disable device mapping */ 32 unsigned char nodev; /* set to 1 if no disable device mapping */
34 unsigned short debug; /* debug level */ 33 unsigned short debug; /* debug level */
@@ -38,10 +37,10 @@ struct v9fs_session_info {
38 char *options; /* copy of mount options */ 37 char *options; /* copy of mount options */
39 char *uname; /* user name to mount as */ 38 char *uname; /* user name to mount as */
40 char *aname; /* name of remote hierarchy being mounted */ 39 char *aname; /* name of remote hierarchy being mounted */
40 unsigned int maxdata; /* max data for client interface */
41 unsigned int dfltuid; /* default uid/muid for legacy support */ 41 unsigned int dfltuid; /* default uid/muid for legacy support */
42 unsigned int dfltgid; /* default gid for legacy support */ 42 unsigned int dfltgid; /* default gid for legacy support */
43 u32 uid; /* if ACCESS_SINGLE, the uid that has access */ 43 u32 uid; /* if ACCESS_SINGLE, the uid that has access */
44 struct p9_trans_module *trans; /* 9p transport */
45 struct p9_client *clnt; /* 9p client */ 44 struct p9_client *clnt; /* 9p client */
46 struct dentry *debugfs_dir; 45 struct dentry *debugfs_dir;
47}; 46};
diff --git a/fs/9p/vfs_file.c b/fs/9p/vfs_file.c
index ba4b1caa9c43..a616fff8906d 100644
--- a/fs/9p/vfs_file.c
+++ b/fs/9p/vfs_file.c
@@ -184,7 +184,7 @@ static const struct file_operations v9fs_cached_file_operations = {
184 .open = v9fs_file_open, 184 .open = v9fs_file_open,
185 .release = v9fs_dir_release, 185 .release = v9fs_dir_release,
186 .lock = v9fs_file_lock, 186 .lock = v9fs_file_lock,
187 .mmap = generic_file_mmap, 187 .mmap = generic_file_readonly_mmap,
188}; 188};
189 189
190const struct file_operations v9fs_file_operations = { 190const struct file_operations v9fs_file_operations = {
@@ -194,5 +194,5 @@ const struct file_operations v9fs_file_operations = {
194 .open = v9fs_file_open, 194 .open = v9fs_file_open,
195 .release = v9fs_dir_release, 195 .release = v9fs_dir_release,
196 .lock = v9fs_file_lock, 196 .lock = v9fs_file_lock,
197 .mmap = generic_file_mmap, 197 .mmap = generic_file_readonly_mmap,
198}; 198};
diff --git a/fs/9p/vfs_inode.c b/fs/9p/vfs_inode.c
index 23581bcb599b..6a28842052ea 100644
--- a/fs/9p/vfs_inode.c
+++ b/fs/9p/vfs_inode.c
@@ -77,6 +77,8 @@ static int unixmode2p9mode(struct v9fs_session_info *v9ses, int mode)
77 res |= P9_DMSETUID; 77 res |= P9_DMSETUID;
78 if ((mode & S_ISGID) == S_ISGID) 78 if ((mode & S_ISGID) == S_ISGID)
79 res |= P9_DMSETGID; 79 res |= P9_DMSETGID;
80 if ((mode & S_ISVTX) == S_ISVTX)
81 res |= P9_DMSETVTX;
80 if ((mode & P9_DMLINK)) 82 if ((mode & P9_DMLINK))
81 res |= P9_DMLINK; 83 res |= P9_DMLINK;
82 } 84 }
@@ -119,6 +121,9 @@ static int p9mode2unixmode(struct v9fs_session_info *v9ses, int mode)
119 121
120 if ((mode & P9_DMSETGID) == P9_DMSETGID) 122 if ((mode & P9_DMSETGID) == P9_DMSETGID)
121 res |= S_ISGID; 123 res |= S_ISGID;
124
125 if ((mode & P9_DMSETVTX) == P9_DMSETVTX)
126 res |= S_ISVTX;
122 } 127 }
123 128
124 return res; 129 return res;
@@ -568,7 +573,7 @@ static struct dentry *v9fs_vfs_lookup(struct inode *dir, struct dentry *dentry,
568 v9ses = v9fs_inode2v9ses(dir); 573 v9ses = v9fs_inode2v9ses(dir);
569 dfid = v9fs_fid_lookup(dentry->d_parent); 574 dfid = v9fs_fid_lookup(dentry->d_parent);
570 if (IS_ERR(dfid)) 575 if (IS_ERR(dfid))
571 return ERR_PTR(PTR_ERR(dfid)); 576 return ERR_CAST(dfid);
572 577
573 name = (char *) dentry->d_name.name; 578 name = (char *) dentry->d_name.name;
574 fid = p9_client_walk(dfid, 1, &name, 1); 579 fid = p9_client_walk(dfid, 1, &name, 1);
diff --git a/fs/Kconfig b/fs/Kconfig
index 987b5d7cb21a..d7312825592b 100644
--- a/fs/Kconfig
+++ b/fs/Kconfig
@@ -463,40 +463,18 @@ config OCFS2_DEBUG_FS
463 this option for debugging only as it is likely to decrease 463 this option for debugging only as it is likely to decrease
464 performance of the filesystem. 464 performance of the filesystem.
465 465
466config MINIX_FS 466endif # BLOCK
467 tristate "Minix fs support"
468 help
469 Minix is a simple operating system used in many classes about OS's.
470 The minix file system (method to organize files on a hard disk
471 partition or a floppy disk) was the original file system for Linux,
472 but has been superseded by the second extended file system ext2fs.
473 You don't want to use the minix file system on your hard disk
474 because of certain built-in restrictions, but it is sometimes found
475 on older Linux floppy disks. This option will enlarge your kernel
476 by about 28 KB. If unsure, say N.
477 467
478 To compile this file system support as a module, choose M here: the 468config DNOTIFY
479 module will be called minix. Note that the file system of your root 469 bool "Dnotify support"
480 partition (the one containing the directory /) cannot be compiled as 470 default y
481 a module. 471 help
482 472 Dnotify is a directory-based per-fd file change notification system
483config ROMFS_FS 473 that uses signals to communicate events to user-space. There exist
484 tristate "ROM file system support" 474 superior alternatives, but some applications may still rely on
485 ---help--- 475 dnotify.
486 This is a very small read-only file system mainly intended for
487 initial ram disks of installation disks, but it could be used for
488 other read-only media as well. Read
489 <file:Documentation/filesystems/romfs.txt> for details.
490
491 To compile this file system support as a module, choose M here: the
492 module will be called romfs. Note that the file system of your
493 root partition (the one containing the directory /) cannot be a
494 module.
495
496 If you don't know whether you need it, then you don't need it:
497 answer N.
498 476
499endif 477 If unsure, say Y.
500 478
501config INOTIFY 479config INOTIFY
502 bool "Inotify file change notification support" 480 bool "Inotify file change notification support"
@@ -577,17 +555,6 @@ config QUOTACTL
577 depends on XFS_QUOTA || QUOTA 555 depends on XFS_QUOTA || QUOTA
578 default y 556 default y
579 557
580config DNOTIFY
581 bool "Dnotify support"
582 default y
583 help
584 Dnotify is a directory-based per-fd file change notification system
585 that uses signals to communicate events to user-space. There exist
586 superior alternatives, but some applications may still rely on
587 dnotify.
588
589 If unsure, say Y.
590
591config AUTOFS_FS 558config AUTOFS_FS
592 tristate "Kernel automounter support" 559 tristate "Kernel automounter support"
593 help 560 help
@@ -713,7 +680,7 @@ config UDF_NLS
713 depends on (UDF_FS=m && NLS) || (UDF_FS=y && NLS=y) 680 depends on (UDF_FS=m && NLS) || (UDF_FS=y && NLS=y)
714 681
715endmenu 682endmenu
716endif 683endif # BLOCK
717 684
718if BLOCK 685if BLOCK
719menu "DOS/FAT/NT Filesystems" 686menu "DOS/FAT/NT Filesystems"
@@ -896,7 +863,7 @@ config NTFS_RW
896 It is perfectly safe to say N here. 863 It is perfectly safe to say N here.
897 864
898endmenu 865endmenu
899endif 866endif # BLOCK
900 867
901menu "Pseudo filesystems" 868menu "Pseudo filesystems"
902 869
@@ -1152,7 +1119,7 @@ config BEFS_DEBUG
1152 depends on BEFS_FS 1119 depends on BEFS_FS
1153 help 1120 help
1154 If you say Y here, you can use the 'debug' mount option to enable 1121 If you say Y here, you can use the 'debug' mount option to enable
1155 debugging output from the driver. 1122 debugging output from the driver.
1156 1123
1157config BFS_FS 1124config BFS_FS
1158 tristate "BFS file system support (EXPERIMENTAL)" 1125 tristate "BFS file system support (EXPERIMENTAL)"
@@ -1263,7 +1230,7 @@ config JFFS2_FS_XATTR
1263 Extended attributes are name:value pairs associated with inodes by 1230 Extended attributes are name:value pairs associated with inodes by
1264 the kernel or by users (see the attr(5) manual page, or visit 1231 the kernel or by users (see the attr(5) manual page, or visit
1265 <http://acl.bestbits.at/> for details). 1232 <http://acl.bestbits.at/> for details).
1266 1233
1267 If unsure, say N. 1234 If unsure, say N.
1268 1235
1269config JFFS2_FS_POSIX_ACL 1236config JFFS2_FS_POSIX_ACL
@@ -1274,10 +1241,10 @@ config JFFS2_FS_POSIX_ACL
1274 help 1241 help
1275 Posix Access Control Lists (ACLs) support permissions for users and 1242 Posix Access Control Lists (ACLs) support permissions for users and
1276 groups beyond the owner/group/world scheme. 1243 groups beyond the owner/group/world scheme.
1277 1244
1278 To learn more about Access Control Lists, visit the Posix ACLs for 1245 To learn more about Access Control Lists, visit the Posix ACLs for
1279 Linux website <http://acl.bestbits.at/>. 1246 Linux website <http://acl.bestbits.at/>.
1280 1247
1281 If you don't know what Access Control Lists are, say N 1248 If you don't know what Access Control Lists are, say N
1282 1249
1283config JFFS2_FS_SECURITY 1250config JFFS2_FS_SECURITY
@@ -1289,7 +1256,7 @@ config JFFS2_FS_SECURITY
1289 implemented by security modules like SELinux. This option 1256 implemented by security modules like SELinux. This option
1290 enables an extended attribute handler for file security 1257 enables an extended attribute handler for file security
1291 labels in the jffs2 filesystem. 1258 labels in the jffs2 filesystem.
1292 1259
1293 If you are not using a security module that requires using 1260 If you are not using a security module that requires using
1294 extended attributes for file security labels, say N. 1261 extended attributes for file security labels, say N.
1295 1262
@@ -1417,6 +1384,24 @@ config VXFS_FS
1417 To compile this as a module, choose M here: the module will be 1384 To compile this as a module, choose M here: the module will be
1418 called freevxfs. If unsure, say N. 1385 called freevxfs. If unsure, say N.
1419 1386
1387config MINIX_FS
1388 tristate "Minix file system support"
1389 depends on BLOCK
1390 help
1391 Minix is a simple operating system used in many classes about OS's.
1392 The minix file system (method to organize files on a hard disk
1393 partition or a floppy disk) was the original file system for Linux,
1394 but has been superseded by the second extended file system ext2fs.
1395 You don't want to use the minix file system on your hard disk
1396 because of certain built-in restrictions, but it is sometimes found
1397 on older Linux floppy disks. This option will enlarge your kernel
1398 by about 28 KB. If unsure, say N.
1399
1400 To compile this file system support as a module, choose M here: the
1401 module will be called minix. Note that the file system of your root
1402 partition (the one containing the directory /) cannot be compiled as
1403 a module.
1404
1420 1405
1421config HPFS_FS 1406config HPFS_FS
1422 tristate "OS/2 HPFS file system support" 1407 tristate "OS/2 HPFS file system support"
@@ -1434,7 +1419,6 @@ config HPFS_FS
1434 module will be called hpfs. If unsure, say N. 1419 module will be called hpfs. If unsure, say N.
1435 1420
1436 1421
1437
1438config QNX4FS_FS 1422config QNX4FS_FS
1439 tristate "QNX4 file system support (read only)" 1423 tristate "QNX4 file system support (read only)"
1440 depends on BLOCK 1424 depends on BLOCK
@@ -1461,6 +1445,22 @@ config QNX4FS_RW
1461 It's currently broken, so for now: 1445 It's currently broken, so for now:
1462 answer N. 1446 answer N.
1463 1447
1448config ROMFS_FS
1449 tristate "ROM file system support"
1450 depends on BLOCK
1451 ---help---
1452 This is a very small read-only file system mainly intended for
1453 initial ram disks of installation disks, but it could be used for
1454 other read-only media as well. Read
1455 <file:Documentation/filesystems/romfs.txt> for details.
1456
1457 To compile this file system support as a module, choose M here: the
1458 module will be called romfs. Note that the file system of your
1459 root partition (the one containing the directory /) cannot be a
1460 module.
1461
1462 If you don't know whether you need it, then you don't need it:
1463 answer N.
1464 1464
1465 1465
1466config SYSV_FS 1466config SYSV_FS
@@ -1501,7 +1501,6 @@ config SYSV_FS
1501 If you haven't heard about all of this before, it's safe to say N. 1501 If you haven't heard about all of this before, it's safe to say N.
1502 1502
1503 1503
1504
1505config UFS_FS 1504config UFS_FS
1506 tristate "UFS file system support (read only)" 1505 tristate "UFS file system support (read only)"
1507 depends on BLOCK 1506 depends on BLOCK
@@ -1779,12 +1778,9 @@ config SUNRPC_GSS
1779 tristate 1778 tristate
1780 1779
1781config SUNRPC_XPRT_RDMA 1780config SUNRPC_XPRT_RDMA
1782 tristate "RDMA transport for sunrpc (EXPERIMENTAL)" 1781 tristate
1783 depends on SUNRPC && INFINIBAND && EXPERIMENTAL 1782 depends on SUNRPC && INFINIBAND && EXPERIMENTAL
1784 default m 1783 default SUNRPC && INFINIBAND
1785 help
1786 Adds a client RPC transport for supporting kernel NFS over RDMA
1787 mounts, including Infiniband and iWARP. Experimental.
1788 1784
1789config SUNRPC_BIND34 1785config SUNRPC_BIND34
1790 bool "Support for rpcbind versions 3 & 4 (EXPERIMENTAL)" 1786 bool "Support for rpcbind versions 3 & 4 (EXPERIMENTAL)"
@@ -1835,7 +1831,7 @@ config RPCSEC_GSS_SPKM3
1835 If unsure, say N. 1831 If unsure, say N.
1836 1832
1837config SMB_FS 1833config SMB_FS
1838 tristate "SMB file system support (to mount Windows shares etc.)" 1834 tristate "SMB file system support (OBSOLETE, please use CIFS)"
1839 depends on INET 1835 depends on INET
1840 select NLS 1836 select NLS
1841 help 1837 help
@@ -1858,8 +1854,8 @@ config SMB_FS
1858 General information about how to connect Linux, Windows machines and 1854 General information about how to connect Linux, Windows machines and
1859 Macs is on the WWW at <http://www.eats.com/linux_mac_win.html>. 1855 Macs is on the WWW at <http://www.eats.com/linux_mac_win.html>.
1860 1856
1861 To compile the SMB support as a module, choose M here: the module will 1857 To compile the SMB support as a module, choose M here:
1862 be called smbfs. Most people say N, however. 1858 the module will be called smbfs. Most people say N, however.
1863 1859
1864config SMB_NLS_DEFAULT 1860config SMB_NLS_DEFAULT
1865 bool "Use a default NLS" 1861 bool "Use a default NLS"
@@ -1891,7 +1887,7 @@ config SMB_NLS_REMOTE
1891 smbmount from samba 2.2.0 or later supports this. 1887 smbmount from samba 2.2.0 or later supports this.
1892 1888
1893config CIFS 1889config CIFS
1894 tristate "CIFS support (advanced network filesystem for Samba, Window and other CIFS compliant servers)" 1890 tristate "CIFS support (advanced network filesystem, SMBFS successor)"
1895 depends on INET 1891 depends on INET
1896 select NLS 1892 select NLS
1897 help 1893 help
@@ -1949,16 +1945,16 @@ config CIFS_WEAK_PW_HASH
1949 LANMAN based servers such as OS/2 and Windows 95, but such 1945 LANMAN based servers such as OS/2 and Windows 95, but such
1950 mounts may be less secure than mounts using NTLM or more recent 1946 mounts may be less secure than mounts using NTLM or more recent
1951 security mechanisms if you are on a public network. Unless you 1947 security mechanisms if you are on a public network. Unless you
1952 have a need to access old SMB servers (and are on a private 1948 have a need to access old SMB servers (and are on a private
1953 network) you probably want to say N. Even if this support 1949 network) you probably want to say N. Even if this support
1954 is enabled in the kernel build, LANMAN authentication will not be 1950 is enabled in the kernel build, LANMAN authentication will not be
1955 used automatically. At runtime LANMAN mounts are disabled but 1951 used automatically. At runtime LANMAN mounts are disabled but
1956 can be set to required (or optional) either in 1952 can be set to required (or optional) either in
1957 /proc/fs/cifs (see fs/cifs/README for more detail) or via an 1953 /proc/fs/cifs (see fs/cifs/README for more detail) or via an
1958 option on the mount command. This support is disabled by 1954 option on the mount command. This support is disabled by
1959 default in order to reduce the possibility of a downgrade 1955 default in order to reduce the possibility of a downgrade
1960 attack. 1956 attack.
1961 1957
1962 If unsure, say N. 1958 If unsure, say N.
1963 1959
1964config CIFS_XATTR 1960config CIFS_XATTR
@@ -1999,7 +1995,7 @@ config CIFS_DEBUG2
1999 messages in some error paths, slowing performance. This 1995 messages in some error paths, slowing performance. This
2000 option can be turned off unless you are debugging 1996 option can be turned off unless you are debugging
2001 cifs problems. If unsure, say N. 1997 cifs problems. If unsure, say N.
2002 1998
2003config CIFS_EXPERIMENTAL 1999config CIFS_EXPERIMENTAL
2004 bool "CIFS Experimental Features (EXPERIMENTAL)" 2000 bool "CIFS Experimental Features (EXPERIMENTAL)"
2005 depends on CIFS && EXPERIMENTAL 2001 depends on CIFS && EXPERIMENTAL
@@ -2090,7 +2086,7 @@ config CODA_FS_OLD_API
2090 However this new API is not backward compatible with older 2086 However this new API is not backward compatible with older
2091 clients. If you really need to run the old Coda userspace 2087 clients. If you really need to run the old Coda userspace
2092 cache manager then say Y. 2088 cache manager then say Y.
2093 2089
2094 For most cases you probably want to say N. 2090 For most cases you probably want to say N.
2095 2091
2096config AFS_FS 2092config AFS_FS
diff --git a/fs/Kconfig.binfmt b/fs/Kconfig.binfmt
index 7c3d5f923da1..b5c3b6114add 100644
--- a/fs/Kconfig.binfmt
+++ b/fs/Kconfig.binfmt
@@ -61,7 +61,8 @@ config BINFMT_SHARED_FLAT
61 61
62config BINFMT_AOUT 62config BINFMT_AOUT
63 tristate "Kernel support for a.out and ECOFF binaries" 63 tristate "Kernel support for a.out and ECOFF binaries"
64 depends on X86_32 || ALPHA || ARM || M68K || SPARC32 64 depends on ARCH_SUPPORTS_AOUT && \
65 (X86_32 || ALPHA || ARM || M68K || SPARC32)
65 ---help--- 66 ---help---
66 A.out (Assembler.OUTput) is a set of formats for libraries and 67 A.out (Assembler.OUTput) is a set of formats for libraries and
67 executables used in the earliest versions of UNIX. Linux used 68 executables used in the earliest versions of UNIX. Linux used
diff --git a/fs/adfs/super.c b/fs/adfs/super.c
index b36695ae5c2e..9e421eeb672b 100644
--- a/fs/adfs/super.c
+++ b/fs/adfs/super.c
@@ -20,6 +20,8 @@
20#include <linux/vfs.h> 20#include <linux/vfs.h>
21#include <linux/parser.h> 21#include <linux/parser.h>
22#include <linux/bitops.h> 22#include <linux/bitops.h>
23#include <linux/mount.h>
24#include <linux/seq_file.h>
23 25
24#include <asm/uaccess.h> 26#include <asm/uaccess.h>
25#include <asm/system.h> 27#include <asm/system.h>
@@ -30,6 +32,9 @@
30#include "dir_f.h" 32#include "dir_f.h"
31#include "dir_fplus.h" 33#include "dir_fplus.h"
32 34
35#define ADFS_DEFAULT_OWNER_MASK S_IRWXU
36#define ADFS_DEFAULT_OTHER_MASK (S_IRWXG | S_IRWXO)
37
33void __adfs_error(struct super_block *sb, const char *function, const char *fmt, ...) 38void __adfs_error(struct super_block *sb, const char *function, const char *fmt, ...)
34{ 39{
35 char error_buf[128]; 40 char error_buf[128];
@@ -134,6 +139,22 @@ static void adfs_put_super(struct super_block *sb)
134 sb->s_fs_info = NULL; 139 sb->s_fs_info = NULL;
135} 140}
136 141
142static int adfs_show_options(struct seq_file *seq, struct vfsmount *mnt)
143{
144 struct adfs_sb_info *asb = ADFS_SB(mnt->mnt_sb);
145
146 if (asb->s_uid != 0)
147 seq_printf(seq, ",uid=%u", asb->s_uid);
148 if (asb->s_gid != 0)
149 seq_printf(seq, ",gid=%u", asb->s_gid);
150 if (asb->s_owner_mask != ADFS_DEFAULT_OWNER_MASK)
151 seq_printf(seq, ",ownmask=%o", asb->s_owner_mask);
152 if (asb->s_other_mask != ADFS_DEFAULT_OTHER_MASK)
153 seq_printf(seq, ",othmask=%o", asb->s_other_mask);
154
155 return 0;
156}
157
137enum {Opt_uid, Opt_gid, Opt_ownmask, Opt_othmask, Opt_err}; 158enum {Opt_uid, Opt_gid, Opt_ownmask, Opt_othmask, Opt_err};
138 159
139static match_table_t tokens = { 160static match_table_t tokens = {
@@ -259,6 +280,7 @@ static const struct super_operations adfs_sops = {
259 .put_super = adfs_put_super, 280 .put_super = adfs_put_super,
260 .statfs = adfs_statfs, 281 .statfs = adfs_statfs,
261 .remount_fs = adfs_remount, 282 .remount_fs = adfs_remount,
283 .show_options = adfs_show_options,
262}; 284};
263 285
264static struct adfs_discmap *adfs_read_map(struct super_block *sb, struct adfs_discrecord *dr) 286static struct adfs_discmap *adfs_read_map(struct super_block *sb, struct adfs_discrecord *dr)
@@ -344,8 +366,8 @@ static int adfs_fill_super(struct super_block *sb, void *data, int silent)
344 /* set default options */ 366 /* set default options */
345 asb->s_uid = 0; 367 asb->s_uid = 0;
346 asb->s_gid = 0; 368 asb->s_gid = 0;
347 asb->s_owner_mask = S_IRWXU; 369 asb->s_owner_mask = ADFS_DEFAULT_OWNER_MASK;
348 asb->s_other_mask = S_IRWXG | S_IRWXO; 370 asb->s_other_mask = ADFS_DEFAULT_OTHER_MASK;
349 371
350 if (parse_options(sb, data)) 372 if (parse_options(sb, data))
351 goto error; 373 goto error;
diff --git a/fs/affs/affs.h b/fs/affs/affs.h
index 232c69493683..d5bd497ab9cb 100644
--- a/fs/affs/affs.h
+++ b/fs/affs/affs.h
@@ -174,7 +174,8 @@ extern void affs_put_inode(struct inode *inode);
174extern void affs_drop_inode(struct inode *inode); 174extern void affs_drop_inode(struct inode *inode);
175extern void affs_delete_inode(struct inode *inode); 175extern void affs_delete_inode(struct inode *inode);
176extern void affs_clear_inode(struct inode *inode); 176extern void affs_clear_inode(struct inode *inode);
177extern void affs_read_inode(struct inode *inode); 177extern struct inode *affs_iget(struct super_block *sb,
178 unsigned long ino);
178extern int affs_write_inode(struct inode *inode, int); 179extern int affs_write_inode(struct inode *inode, int);
179extern int affs_add_entry(struct inode *dir, struct inode *inode, struct dentry *dentry, s32 type); 180extern int affs_add_entry(struct inode *dir, struct inode *inode, struct dentry *dentry, s32 type);
180 181
diff --git a/fs/affs/amigaffs.c b/fs/affs/amigaffs.c
index f4de4b98004f..805573005de6 100644
--- a/fs/affs/amigaffs.c
+++ b/fs/affs/amigaffs.c
@@ -170,9 +170,11 @@ affs_remove_link(struct dentry *dentry)
170 if (!link_bh) 170 if (!link_bh)
171 goto done; 171 goto done;
172 172
173 dir = iget(sb, be32_to_cpu(AFFS_TAIL(sb, link_bh)->parent)); 173 dir = affs_iget(sb, be32_to_cpu(AFFS_TAIL(sb, link_bh)->parent));
174 if (!dir) 174 if (IS_ERR(dir)) {
175 retval = PTR_ERR(dir);
175 goto done; 176 goto done;
177 }
176 178
177 affs_lock_dir(dir); 179 affs_lock_dir(dir);
178 affs_fix_dcache(dentry, link_ino); 180 affs_fix_dcache(dentry, link_ino);
diff --git a/fs/affs/inode.c b/fs/affs/inode.c
index 4609a6c13fe9..27fe6cbe43ae 100644
--- a/fs/affs/inode.c
+++ b/fs/affs/inode.c
@@ -15,20 +15,25 @@
15extern const struct inode_operations affs_symlink_inode_operations; 15extern const struct inode_operations affs_symlink_inode_operations;
16extern struct timezone sys_tz; 16extern struct timezone sys_tz;
17 17
18void 18struct inode *affs_iget(struct super_block *sb, unsigned long ino)
19affs_read_inode(struct inode *inode)
20{ 19{
21 struct super_block *sb = inode->i_sb;
22 struct affs_sb_info *sbi = AFFS_SB(sb); 20 struct affs_sb_info *sbi = AFFS_SB(sb);
23 struct buffer_head *bh; 21 struct buffer_head *bh;
24 struct affs_head *head; 22 struct affs_head *head;
25 struct affs_tail *tail; 23 struct affs_tail *tail;
24 struct inode *inode;
26 u32 block; 25 u32 block;
27 u32 size; 26 u32 size;
28 u32 prot; 27 u32 prot;
29 u16 id; 28 u16 id;
30 29
31 pr_debug("AFFS: read_inode(%lu)\n",inode->i_ino); 30 inode = iget_locked(sb, ino);
31 if (!inode)
32 return ERR_PTR(-ENOMEM);
33 if (!(inode->i_state & I_NEW))
34 return inode;
35
36 pr_debug("AFFS: affs_iget(%lu)\n", inode->i_ino);
32 37
33 block = inode->i_ino; 38 block = inode->i_ino;
34 bh = affs_bread(sb, block); 39 bh = affs_bread(sb, block);
@@ -154,12 +159,13 @@ affs_read_inode(struct inode *inode)
154 sys_tz.tz_minuteswest * 60; 159 sys_tz.tz_minuteswest * 60;
155 inode->i_mtime.tv_nsec = inode->i_ctime.tv_nsec = inode->i_atime.tv_nsec = 0; 160 inode->i_mtime.tv_nsec = inode->i_ctime.tv_nsec = inode->i_atime.tv_nsec = 0;
156 affs_brelse(bh); 161 affs_brelse(bh);
157 return; 162 unlock_new_inode(inode);
163 return inode;
158 164
159bad_inode: 165bad_inode:
160 make_bad_inode(inode);
161 affs_brelse(bh); 166 affs_brelse(bh);
162 return; 167 iget_failed(inode);
168 return ERR_PTR(-EIO);
163} 169}
164 170
165int 171int
diff --git a/fs/affs/namei.c b/fs/affs/namei.c
index a42143ca0169..2218f1ee71ce 100644
--- a/fs/affs/namei.c
+++ b/fs/affs/namei.c
@@ -208,9 +208,8 @@ affs_lookup(struct inode *dir, struct dentry *dentry, struct nameidata *nd)
208 affs_lock_dir(dir); 208 affs_lock_dir(dir);
209 bh = affs_find_entry(dir, dentry); 209 bh = affs_find_entry(dir, dentry);
210 affs_unlock_dir(dir); 210 affs_unlock_dir(dir);
211 if (IS_ERR(bh)) { 211 if (IS_ERR(bh))
212 return ERR_PTR(PTR_ERR(bh)); 212 return ERR_CAST(bh);
213 }
214 if (bh) { 213 if (bh) {
215 u32 ino = bh->b_blocknr; 214 u32 ino = bh->b_blocknr;
216 215
@@ -223,10 +222,9 @@ affs_lookup(struct inode *dir, struct dentry *dentry, struct nameidata *nd)
223 ino = be32_to_cpu(AFFS_TAIL(sb, bh)->original); 222 ino = be32_to_cpu(AFFS_TAIL(sb, bh)->original);
224 } 223 }
225 affs_brelse(bh); 224 affs_brelse(bh);
226 inode = iget(sb, ino); 225 inode = affs_iget(sb, ino);
227 if (!inode) { 226 if (IS_ERR(inode))
228 return ERR_PTR(-EACCES); 227 return ERR_PTR(PTR_ERR(inode));
229 }
230 } 228 }
231 dentry->d_op = AFFS_SB(sb)->s_flags & SF_INTL ? &affs_intl_dentry_operations : &affs_dentry_operations; 229 dentry->d_op = AFFS_SB(sb)->s_flags & SF_INTL ? &affs_intl_dentry_operations : &affs_dentry_operations;
232 d_add(dentry, inode); 230 d_add(dentry, inode);
diff --git a/fs/affs/super.c b/fs/affs/super.c
index b53e5d0ec65c..d2dc047cb479 100644
--- a/fs/affs/super.c
+++ b/fs/affs/super.c
@@ -113,7 +113,6 @@ static void destroy_inodecache(void)
113static const struct super_operations affs_sops = { 113static const struct super_operations affs_sops = {
114 .alloc_inode = affs_alloc_inode, 114 .alloc_inode = affs_alloc_inode,
115 .destroy_inode = affs_destroy_inode, 115 .destroy_inode = affs_destroy_inode,
116 .read_inode = affs_read_inode,
117 .write_inode = affs_write_inode, 116 .write_inode = affs_write_inode,
118 .put_inode = affs_put_inode, 117 .put_inode = affs_put_inode,
119 .drop_inode = affs_drop_inode, 118 .drop_inode = affs_drop_inode,
@@ -123,6 +122,7 @@ static const struct super_operations affs_sops = {
123 .write_super = affs_write_super, 122 .write_super = affs_write_super,
124 .statfs = affs_statfs, 123 .statfs = affs_statfs,
125 .remount_fs = affs_remount, 124 .remount_fs = affs_remount,
125 .show_options = generic_show_options,
126}; 126};
127 127
128enum { 128enum {
@@ -271,6 +271,9 @@ static int affs_fill_super(struct super_block *sb, void *data, int silent)
271 unsigned long mount_flags; 271 unsigned long mount_flags;
272 int tmp_flags; /* fix remount prototype... */ 272 int tmp_flags; /* fix remount prototype... */
273 u8 sig[4]; 273 u8 sig[4];
274 int ret = -EINVAL;
275
276 save_mount_options(sb, data);
274 277
275 pr_debug("AFFS: read_super(%s)\n",data ? (const char *)data : "no options"); 278 pr_debug("AFFS: read_super(%s)\n",data ? (const char *)data : "no options");
276 279
@@ -444,7 +447,12 @@ got_root:
444 447
445 /* set up enough so that it can read an inode */ 448 /* set up enough so that it can read an inode */
446 449
447 root_inode = iget(sb, root_block); 450 root_inode = affs_iget(sb, root_block);
451 if (IS_ERR(root_inode)) {
452 ret = PTR_ERR(root_inode);
453 goto out_error_noinode;
454 }
455
448 sb->s_root = d_alloc_root(root_inode); 456 sb->s_root = d_alloc_root(root_inode);
449 if (!sb->s_root) { 457 if (!sb->s_root) {
450 printk(KERN_ERR "AFFS: Get root inode failed\n"); 458 printk(KERN_ERR "AFFS: Get root inode failed\n");
@@ -461,12 +469,13 @@ got_root:
461out_error: 469out_error:
462 if (root_inode) 470 if (root_inode)
463 iput(root_inode); 471 iput(root_inode);
472out_error_noinode:
464 kfree(sbi->s_bitmap); 473 kfree(sbi->s_bitmap);
465 affs_brelse(root_bh); 474 affs_brelse(root_bh);
466 kfree(sbi->s_prefix); 475 kfree(sbi->s_prefix);
467 kfree(sbi); 476 kfree(sbi);
468 sb->s_fs_info = NULL; 477 sb->s_fs_info = NULL;
469 return -EINVAL; 478 return ret;
470} 479}
471 480
472static int 481static int
@@ -481,14 +490,21 @@ affs_remount(struct super_block *sb, int *flags, char *data)
481 int root_block; 490 int root_block;
482 unsigned long mount_flags; 491 unsigned long mount_flags;
483 int res = 0; 492 int res = 0;
493 char *new_opts = kstrdup(data, GFP_KERNEL);
484 494
485 pr_debug("AFFS: remount(flags=0x%x,opts=\"%s\")\n",*flags,data); 495 pr_debug("AFFS: remount(flags=0x%x,opts=\"%s\")\n",*flags,data);
486 496
487 *flags |= MS_NODIRATIME; 497 *flags |= MS_NODIRATIME;
488 498
489 if (!parse_options(data,&uid,&gid,&mode,&reserved,&root_block, 499 if (!parse_options(data, &uid, &gid, &mode, &reserved, &root_block,
490 &blocksize,&sbi->s_prefix,sbi->s_volume,&mount_flags)) 500 &blocksize, &sbi->s_prefix, sbi->s_volume,
501 &mount_flags)) {
502 kfree(new_opts);
491 return -EINVAL; 503 return -EINVAL;
504 }
505 kfree(sb->s_options);
506 sb->s_options = new_opts;
507
492 sbi->s_flags = mount_flags; 508 sbi->s_flags = mount_flags;
493 sbi->s_mode = mode; 509 sbi->s_mode = mode;
494 sbi->s_uid = uid; 510 sbi->s_uid = uid;
diff --git a/fs/afs/dir.c b/fs/afs/dir.c
index 0cc3597c1197..b58af8f18bc4 100644
--- a/fs/afs/dir.c
+++ b/fs/afs/dir.c
@@ -512,7 +512,7 @@ static struct dentry *afs_lookup(struct inode *dir, struct dentry *dentry,
512 key = afs_request_key(vnode->volume->cell); 512 key = afs_request_key(vnode->volume->cell);
513 if (IS_ERR(key)) { 513 if (IS_ERR(key)) {
514 _leave(" = %ld [key]", PTR_ERR(key)); 514 _leave(" = %ld [key]", PTR_ERR(key));
515 return ERR_PTR(PTR_ERR(key)); 515 return ERR_CAST(key);
516 } 516 }
517 517
518 ret = afs_validate(vnode, key); 518 ret = afs_validate(vnode, key);
@@ -540,7 +540,7 @@ static struct dentry *afs_lookup(struct inode *dir, struct dentry *dentry,
540 key_put(key); 540 key_put(key);
541 if (IS_ERR(inode)) { 541 if (IS_ERR(inode)) {
542 _leave(" = %ld", PTR_ERR(inode)); 542 _leave(" = %ld", PTR_ERR(inode));
543 return ERR_PTR(PTR_ERR(inode)); 543 return ERR_CAST(inode);
544 } 544 }
545 545
546 dentry->d_op = &afs_fs_dentry_operations; 546 dentry->d_op = &afs_fs_dentry_operations;
diff --git a/fs/afs/inode.c b/fs/afs/inode.c
index 84750c8e9f95..08db82e1343a 100644
--- a/fs/afs/inode.c
+++ b/fs/afs/inode.c
@@ -196,10 +196,7 @@ struct inode *afs_iget(struct super_block *sb, struct key *key,
196 196
197 /* failure */ 197 /* failure */
198bad_inode: 198bad_inode:
199 make_bad_inode(inode); 199 iget_failed(inode);
200 unlock_new_inode(inode);
201 iput(inode);
202
203 _leave(" = %d [bad]", ret); 200 _leave(" = %d [bad]", ret);
204 return ERR_PTR(ret); 201 return ERR_PTR(ret);
205} 202}
diff --git a/fs/afs/mntpt.c b/fs/afs/mntpt.c
index 5ce43b63c60e..a3510b8ba3e7 100644
--- a/fs/afs/mntpt.c
+++ b/fs/afs/mntpt.c
@@ -218,16 +218,16 @@ static void *afs_mntpt_follow_link(struct dentry *dentry, struct nameidata *nd)
218 _enter("%p{%s},{%s:%p{%s},}", 218 _enter("%p{%s},{%s:%p{%s},}",
219 dentry, 219 dentry,
220 dentry->d_name.name, 220 dentry->d_name.name,
221 nd->mnt->mnt_devname, 221 nd->path.mnt->mnt_devname,
222 dentry, 222 dentry,
223 nd->dentry->d_name.name); 223 nd->path.dentry->d_name.name);
224 224
225 dput(nd->dentry); 225 dput(nd->path.dentry);
226 nd->dentry = dget(dentry); 226 nd->path.dentry = dget(dentry);
227 227
228 newmnt = afs_mntpt_do_automount(nd->dentry); 228 newmnt = afs_mntpt_do_automount(nd->path.dentry);
229 if (IS_ERR(newmnt)) { 229 if (IS_ERR(newmnt)) {
230 path_release(nd); 230 path_put(&nd->path);
231 return (void *)newmnt; 231 return (void *)newmnt;
232 } 232 }
233 233
@@ -235,17 +235,16 @@ static void *afs_mntpt_follow_link(struct dentry *dentry, struct nameidata *nd)
235 err = do_add_mount(newmnt, nd, MNT_SHRINKABLE, &afs_vfsmounts); 235 err = do_add_mount(newmnt, nd, MNT_SHRINKABLE, &afs_vfsmounts);
236 switch (err) { 236 switch (err) {
237 case 0: 237 case 0:
238 dput(nd->dentry); 238 path_put(&nd->path);
239 mntput(nd->mnt); 239 nd->path.mnt = newmnt;
240 nd->mnt = newmnt; 240 nd->path.dentry = dget(newmnt->mnt_root);
241 nd->dentry = dget(newmnt->mnt_root);
242 schedule_delayed_work(&afs_mntpt_expiry_timer, 241 schedule_delayed_work(&afs_mntpt_expiry_timer,
243 afs_mntpt_expiry_timeout * HZ); 242 afs_mntpt_expiry_timeout * HZ);
244 break; 243 break;
245 case -EBUSY: 244 case -EBUSY:
246 /* someone else made a mount here whilst we were busy */ 245 /* someone else made a mount here whilst we were busy */
247 while (d_mountpoint(nd->dentry) && 246 while (d_mountpoint(nd->path.dentry) &&
248 follow_down(&nd->mnt, &nd->dentry)) 247 follow_down(&nd->path.mnt, &nd->path.dentry))
249 ; 248 ;
250 err = 0; 249 err = 0;
251 default: 250 default:
diff --git a/fs/afs/security.c b/fs/afs/security.c
index 566fe712c682..3bcbeceba1bb 100644
--- a/fs/afs/security.c
+++ b/fs/afs/security.c
@@ -95,7 +95,7 @@ static struct afs_vnode *afs_get_auth_inode(struct afs_vnode *vnode,
95 auth_inode = afs_iget(vnode->vfs_inode.i_sb, key, 95 auth_inode = afs_iget(vnode->vfs_inode.i_sb, key,
96 &vnode->status.parent, NULL, NULL); 96 &vnode->status.parent, NULL, NULL);
97 if (IS_ERR(auth_inode)) 97 if (IS_ERR(auth_inode))
98 return ERR_PTR(PTR_ERR(auth_inode)); 98 return ERR_CAST(auth_inode);
99 } 99 }
100 100
101 auth_vnode = AFS_FS_I(auth_inode); 101 auth_vnode = AFS_FS_I(auth_inode);
@@ -287,7 +287,7 @@ static int afs_check_permit(struct afs_vnode *vnode, struct key *key,
287int afs_permission(struct inode *inode, int mask, struct nameidata *nd) 287int afs_permission(struct inode *inode, int mask, struct nameidata *nd)
288{ 288{
289 struct afs_vnode *vnode = AFS_FS_I(inode); 289 struct afs_vnode *vnode = AFS_FS_I(inode);
290 afs_access_t access; 290 afs_access_t uninitialized_var(access);
291 struct key *key; 291 struct key *key;
292 int ret; 292 int ret;
293 293
diff --git a/fs/afs/super.c b/fs/afs/super.c
index 4b2558c42213..36bbce45f44b 100644
--- a/fs/afs/super.c
+++ b/fs/afs/super.c
@@ -52,6 +52,7 @@ static const struct super_operations afs_super_ops = {
52 .clear_inode = afs_clear_inode, 52 .clear_inode = afs_clear_inode,
53 .umount_begin = afs_umount_begin, 53 .umount_begin = afs_umount_begin,
54 .put_super = afs_put_super, 54 .put_super = afs_put_super,
55 .show_options = generic_show_options,
55}; 56};
56 57
57static struct kmem_cache *afs_inode_cachep; 58static struct kmem_cache *afs_inode_cachep;
@@ -357,6 +358,7 @@ static int afs_get_sb(struct file_system_type *fs_type,
357 struct super_block *sb; 358 struct super_block *sb;
358 struct afs_volume *vol; 359 struct afs_volume *vol;
359 struct key *key; 360 struct key *key;
361 char *new_opts = kstrdup(options, GFP_KERNEL);
360 int ret; 362 int ret;
361 363
362 _enter(",,%s,%p", dev_name, options); 364 _enter(",,%s,%p", dev_name, options);
@@ -408,9 +410,11 @@ static int afs_get_sb(struct file_system_type *fs_type,
408 deactivate_super(sb); 410 deactivate_super(sb);
409 goto error; 411 goto error;
410 } 412 }
413 sb->s_options = new_opts;
411 sb->s_flags |= MS_ACTIVE; 414 sb->s_flags |= MS_ACTIVE;
412 } else { 415 } else {
413 _debug("reuse"); 416 _debug("reuse");
417 kfree(new_opts);
414 ASSERTCMP(sb->s_flags, &, MS_ACTIVE); 418 ASSERTCMP(sb->s_flags, &, MS_ACTIVE);
415 } 419 }
416 420
@@ -424,6 +428,7 @@ error:
424 afs_put_volume(params.volume); 428 afs_put_volume(params.volume);
425 afs_put_cell(params.cell); 429 afs_put_cell(params.cell);
426 key_put(params.key); 430 key_put(params.key);
431 kfree(new_opts);
427 _leave(" = %d", ret); 432 _leave(" = %d", ret);
428 return ret; 433 return ret;
429} 434}
diff --git a/fs/aio.c b/fs/aio.c
index 8a37dbbf3437..b74c567383bc 100644
--- a/fs/aio.c
+++ b/fs/aio.c
@@ -317,7 +317,7 @@ out:
317/* wait_on_sync_kiocb: 317/* wait_on_sync_kiocb:
318 * Waits on the given sync kiocb to complete. 318 * Waits on the given sync kiocb to complete.
319 */ 319 */
320ssize_t fastcall wait_on_sync_kiocb(struct kiocb *iocb) 320ssize_t wait_on_sync_kiocb(struct kiocb *iocb)
321{ 321{
322 while (iocb->ki_users) { 322 while (iocb->ki_users) {
323 set_current_state(TASK_UNINTERRUPTIBLE); 323 set_current_state(TASK_UNINTERRUPTIBLE);
@@ -336,7 +336,7 @@ ssize_t fastcall wait_on_sync_kiocb(struct kiocb *iocb)
336 * go away, they will call put_ioctx and release any pinned memory 336 * go away, they will call put_ioctx and release any pinned memory
337 * associated with the request (held via struct page * references). 337 * associated with the request (held via struct page * references).
338 */ 338 */
339void fastcall exit_aio(struct mm_struct *mm) 339void exit_aio(struct mm_struct *mm)
340{ 340{
341 struct kioctx *ctx = mm->ioctx_list; 341 struct kioctx *ctx = mm->ioctx_list;
342 mm->ioctx_list = NULL; 342 mm->ioctx_list = NULL;
@@ -365,7 +365,7 @@ void fastcall exit_aio(struct mm_struct *mm)
365 * Called when the last user of an aio context has gone away, 365 * Called when the last user of an aio context has gone away,
366 * and the struct needs to be freed. 366 * and the struct needs to be freed.
367 */ 367 */
368void fastcall __put_ioctx(struct kioctx *ctx) 368void __put_ioctx(struct kioctx *ctx)
369{ 369{
370 unsigned nr_events = ctx->max_reqs; 370 unsigned nr_events = ctx->max_reqs;
371 371
@@ -397,8 +397,7 @@ void fastcall __put_ioctx(struct kioctx *ctx)
397 * This prevents races between the aio code path referencing the 397 * This prevents races between the aio code path referencing the
398 * req (after submitting it) and aio_complete() freeing the req. 398 * req (after submitting it) and aio_complete() freeing the req.
399 */ 399 */
400static struct kiocb *__aio_get_req(struct kioctx *ctx); 400static struct kiocb *__aio_get_req(struct kioctx *ctx)
401static struct kiocb fastcall *__aio_get_req(struct kioctx *ctx)
402{ 401{
403 struct kiocb *req = NULL; 402 struct kiocb *req = NULL;
404 struct aio_ring *ring; 403 struct aio_ring *ring;
@@ -533,7 +532,7 @@ static int __aio_put_req(struct kioctx *ctx, struct kiocb *req)
533 * Returns true if this put was the last user of the kiocb, 532 * Returns true if this put was the last user of the kiocb,
534 * false if the request is still in use. 533 * false if the request is still in use.
535 */ 534 */
536int fastcall aio_put_req(struct kiocb *req) 535int aio_put_req(struct kiocb *req)
537{ 536{
538 struct kioctx *ctx = req->ki_ctx; 537 struct kioctx *ctx = req->ki_ctx;
539 int ret; 538 int ret;
@@ -893,7 +892,7 @@ static void try_queue_kicked_iocb(struct kiocb *iocb)
893 * The retry is usually executed by aio workqueue 892 * The retry is usually executed by aio workqueue
894 * threads (See aio_kick_handler). 893 * threads (See aio_kick_handler).
895 */ 894 */
896void fastcall kick_iocb(struct kiocb *iocb) 895void kick_iocb(struct kiocb *iocb)
897{ 896{
898 /* sync iocbs are easy: they can only ever be executing from a 897 /* sync iocbs are easy: they can only ever be executing from a
899 * single context. */ 898 * single context. */
@@ -912,7 +911,7 @@ EXPORT_SYMBOL(kick_iocb);
912 * Returns true if this is the last user of the request. The 911 * Returns true if this is the last user of the request. The
913 * only other user of the request can be the cancellation code. 912 * only other user of the request can be the cancellation code.
914 */ 913 */
915int fastcall aio_complete(struct kiocb *iocb, long res, long res2) 914int aio_complete(struct kiocb *iocb, long res, long res2)
916{ 915{
917 struct kioctx *ctx = iocb->ki_ctx; 916 struct kioctx *ctx = iocb->ki_ctx;
918 struct aio_ring_info *info; 917 struct aio_ring_info *info;
@@ -1330,6 +1329,10 @@ static ssize_t aio_rw_vect_retry(struct kiocb *iocb)
1330 opcode = IOCB_CMD_PWRITEV; 1329 opcode = IOCB_CMD_PWRITEV;
1331 } 1330 }
1332 1331
1332 /* This matches the pread()/pwrite() logic */
1333 if (iocb->ki_pos < 0)
1334 return -EINVAL;
1335
1333 do { 1336 do {
1334 ret = rw_op(iocb, &iocb->ki_iovec[iocb->ki_cur_seg], 1337 ret = rw_op(iocb, &iocb->ki_iovec[iocb->ki_cur_seg],
1335 iocb->ki_nr_segs - iocb->ki_cur_seg, 1338 iocb->ki_nr_segs - iocb->ki_cur_seg,
@@ -1348,6 +1351,13 @@ static ssize_t aio_rw_vect_retry(struct kiocb *iocb)
1348 if ((ret == 0) || (iocb->ki_left == 0)) 1351 if ((ret == 0) || (iocb->ki_left == 0))
1349 ret = iocb->ki_nbytes - iocb->ki_left; 1352 ret = iocb->ki_nbytes - iocb->ki_left;
1350 1353
1354 /* If we managed to write some out we return that, rather than
1355 * the eventual error. */
1356 if (opcode == IOCB_CMD_PWRITEV
1357 && ret < 0 && ret != -EIOCBQUEUED && ret != -EIOCBRETRY
1358 && iocb->ki_nbytes - iocb->ki_left)
1359 ret = iocb->ki_nbytes - iocb->ki_left;
1360
1351 return ret; 1361 return ret;
1352} 1362}
1353 1363
@@ -1523,7 +1533,7 @@ static int aio_wake_function(wait_queue_t *wait, unsigned mode,
1523 return 1; 1533 return 1;
1524} 1534}
1525 1535
1526int fastcall io_submit_one(struct kioctx *ctx, struct iocb __user *user_iocb, 1536int io_submit_one(struct kioctx *ctx, struct iocb __user *user_iocb,
1527 struct iocb *iocb) 1537 struct iocb *iocb)
1528{ 1538{
1529 struct kiocb *req; 1539 struct kiocb *req;
diff --git a/fs/autofs/autofs_i.h b/fs/autofs/autofs_i.h
index 8b4cca3c4705..901a3e67ec45 100644
--- a/fs/autofs/autofs_i.h
+++ b/fs/autofs/autofs_i.h
@@ -150,6 +150,7 @@ extern const struct file_operations autofs_root_operations;
150 150
151int autofs_fill_super(struct super_block *, void *, int); 151int autofs_fill_super(struct super_block *, void *, int);
152void autofs_kill_sb(struct super_block *sb); 152void autofs_kill_sb(struct super_block *sb);
153struct inode *autofs_iget(struct super_block *, unsigned long);
153 154
154/* Queue management functions */ 155/* Queue management functions */
155 156
diff --git a/fs/autofs/inode.c b/fs/autofs/inode.c
index 45f5992a0957..dda510d31f84 100644
--- a/fs/autofs/inode.c
+++ b/fs/autofs/inode.c
@@ -52,11 +52,9 @@ out_kill_sb:
52 kill_anon_super(sb); 52 kill_anon_super(sb);
53} 53}
54 54
55static void autofs_read_inode(struct inode *inode);
56
57static const struct super_operations autofs_sops = { 55static const struct super_operations autofs_sops = {
58 .read_inode = autofs_read_inode,
59 .statfs = simple_statfs, 56 .statfs = simple_statfs,
57 .show_options = generic_show_options,
60}; 58};
61 59
62enum {Opt_err, Opt_fd, Opt_uid, Opt_gid, Opt_pgrp, Opt_minproto, Opt_maxproto}; 60enum {Opt_err, Opt_fd, Opt_uid, Opt_gid, Opt_pgrp, Opt_minproto, Opt_maxproto};
@@ -143,6 +141,8 @@ int autofs_fill_super(struct super_block *s, void *data, int silent)
143 int minproto, maxproto; 141 int minproto, maxproto;
144 pid_t pgid; 142 pid_t pgid;
145 143
144 save_mount_options(s, data);
145
146 sbi = kzalloc(sizeof(*sbi), GFP_KERNEL); 146 sbi = kzalloc(sizeof(*sbi), GFP_KERNEL);
147 if (!sbi) 147 if (!sbi)
148 goto fail_unlock; 148 goto fail_unlock;
@@ -164,7 +164,9 @@ int autofs_fill_super(struct super_block *s, void *data, int silent)
164 s->s_time_gran = 1; 164 s->s_time_gran = 1;
165 sbi->sb = s; 165 sbi->sb = s;
166 166
167 root_inode = iget(s, AUTOFS_ROOT_INO); 167 root_inode = autofs_iget(s, AUTOFS_ROOT_INO);
168 if (IS_ERR(root_inode))
169 goto fail_free;
168 root = d_alloc_root(root_inode); 170 root = d_alloc_root(root_inode);
169 pipe = NULL; 171 pipe = NULL;
170 172
@@ -230,11 +232,17 @@ fail_unlock:
230 return -EINVAL; 232 return -EINVAL;
231} 233}
232 234
233static void autofs_read_inode(struct inode *inode) 235struct inode *autofs_iget(struct super_block *sb, unsigned long ino)
234{ 236{
235 ino_t ino = inode->i_ino;
236 unsigned int n; 237 unsigned int n;
237 struct autofs_sb_info *sbi = autofs_sbi(inode->i_sb); 238 struct autofs_sb_info *sbi = autofs_sbi(sb);
239 struct inode *inode;
240
241 inode = iget_locked(sb, ino);
242 if (!inode)
243 return ERR_PTR(-ENOMEM);
244 if (!(inode->i_state & I_NEW))
245 return inode;
238 246
239 /* Initialize to the default case (stub directory) */ 247 /* Initialize to the default case (stub directory) */
240 248
@@ -250,7 +258,7 @@ static void autofs_read_inode(struct inode *inode)
250 inode->i_op = &autofs_root_inode_operations; 258 inode->i_op = &autofs_root_inode_operations;
251 inode->i_fop = &autofs_root_operations; 259 inode->i_fop = &autofs_root_operations;
252 inode->i_uid = inode->i_gid = 0; /* Changed in read_super */ 260 inode->i_uid = inode->i_gid = 0; /* Changed in read_super */
253 return; 261 goto done;
254 } 262 }
255 263
256 inode->i_uid = inode->i_sb->s_root->d_inode->i_uid; 264 inode->i_uid = inode->i_sb->s_root->d_inode->i_uid;
@@ -263,7 +271,7 @@ static void autofs_read_inode(struct inode *inode)
263 n = ino - AUTOFS_FIRST_SYMLINK; 271 n = ino - AUTOFS_FIRST_SYMLINK;
264 if (n >= AUTOFS_MAX_SYMLINKS || !test_bit(n,sbi->symlink_bitmap)) { 272 if (n >= AUTOFS_MAX_SYMLINKS || !test_bit(n,sbi->symlink_bitmap)) {
265 printk("autofs: Looking for bad symlink inode %u\n", (unsigned int) ino); 273 printk("autofs: Looking for bad symlink inode %u\n", (unsigned int) ino);
266 return; 274 goto done;
267 } 275 }
268 276
269 inode->i_op = &autofs_symlink_inode_operations; 277 inode->i_op = &autofs_symlink_inode_operations;
@@ -275,4 +283,8 @@ static void autofs_read_inode(struct inode *inode)
275 inode->i_size = sl->len; 283 inode->i_size = sl->len;
276 inode->i_nlink = 1; 284 inode->i_nlink = 1;
277 } 285 }
286
287done:
288 unlock_new_inode(inode);
289 return inode;
278} 290}
diff --git a/fs/autofs/root.c b/fs/autofs/root.c
index 5efff3c0d886..8aacade56956 100644
--- a/fs/autofs/root.c
+++ b/fs/autofs/root.c
@@ -114,8 +114,8 @@ static int try_to_fill_dentry(struct dentry *dentry, struct super_block *sb, str
114 dentry->d_time = (unsigned long) ent; 114 dentry->d_time = (unsigned long) ent;
115 115
116 if (!dentry->d_inode) { 116 if (!dentry->d_inode) {
117 inode = iget(sb, ent->ino); 117 inode = autofs_iget(sb, ent->ino);
118 if (!inode) { 118 if (IS_ERR(inode)) {
119 /* Failed, but leave pending for next time */ 119 /* Failed, but leave pending for next time */
120 return 1; 120 return 1;
121 } 121 }
@@ -274,6 +274,7 @@ static int autofs_root_symlink(struct inode *dir, struct dentry *dentry, const c
274 unsigned int n; 274 unsigned int n;
275 int slsize; 275 int slsize;
276 struct autofs_symlink *sl; 276 struct autofs_symlink *sl;
277 struct inode *inode;
277 278
278 DPRINTK(("autofs_root_symlink: %s <- ", symname)); 279 DPRINTK(("autofs_root_symlink: %s <- ", symname));
279 autofs_say(dentry->d_name.name,dentry->d_name.len); 280 autofs_say(dentry->d_name.name,dentry->d_name.len);
@@ -331,7 +332,12 @@ static int autofs_root_symlink(struct inode *dir, struct dentry *dentry, const c
331 ent->dentry = NULL; /* We don't keep the dentry for symlinks */ 332 ent->dentry = NULL; /* We don't keep the dentry for symlinks */
332 333
333 autofs_hash_insert(dh,ent); 334 autofs_hash_insert(dh,ent);
334 d_instantiate(dentry, iget(dir->i_sb,ent->ino)); 335
336 inode = autofs_iget(dir->i_sb, ent->ino);
337 if (IS_ERR(inode))
338 return PTR_ERR(inode);
339
340 d_instantiate(dentry, inode);
335 unlock_kernel(); 341 unlock_kernel();
336 return 0; 342 return 0;
337} 343}
@@ -428,6 +434,7 @@ static int autofs_root_mkdir(struct inode *dir, struct dentry *dentry, int mode)
428 struct autofs_sb_info *sbi = autofs_sbi(dir->i_sb); 434 struct autofs_sb_info *sbi = autofs_sbi(dir->i_sb);
429 struct autofs_dirhash *dh = &sbi->dirhash; 435 struct autofs_dirhash *dh = &sbi->dirhash;
430 struct autofs_dir_ent *ent; 436 struct autofs_dir_ent *ent;
437 struct inode *inode;
431 ino_t ino; 438 ino_t ino;
432 439
433 lock_kernel(); 440 lock_kernel();
@@ -469,7 +476,14 @@ static int autofs_root_mkdir(struct inode *dir, struct dentry *dentry, int mode)
469 autofs_hash_insert(dh,ent); 476 autofs_hash_insert(dh,ent);
470 477
471 inc_nlink(dir); 478 inc_nlink(dir);
472 d_instantiate(dentry, iget(dir->i_sb,ino)); 479
480 inode = autofs_iget(dir->i_sb, ino);
481 if (IS_ERR(inode)) {
482 drop_nlink(dir);
483 return PTR_ERR(inode);
484 }
485
486 d_instantiate(dentry, inode);
473 unlock_kernel(); 487 unlock_kernel();
474 488
475 return 0; 489 return 0;
diff --git a/fs/autofs4/inode.c b/fs/autofs4/inode.c
index 7f05d6ccdb13..2fdcf5e1d236 100644
--- a/fs/autofs4/inode.c
+++ b/fs/autofs4/inode.c
@@ -176,11 +176,16 @@ out_kill_sb:
176static int autofs4_show_options(struct seq_file *m, struct vfsmount *mnt) 176static int autofs4_show_options(struct seq_file *m, struct vfsmount *mnt)
177{ 177{
178 struct autofs_sb_info *sbi = autofs4_sbi(mnt->mnt_sb); 178 struct autofs_sb_info *sbi = autofs4_sbi(mnt->mnt_sb);
179 struct inode *root_inode = mnt->mnt_sb->s_root->d_inode;
179 180
180 if (!sbi) 181 if (!sbi)
181 return 0; 182 return 0;
182 183
183 seq_printf(m, ",fd=%d", sbi->pipefd); 184 seq_printf(m, ",fd=%d", sbi->pipefd);
185 if (root_inode->i_uid != 0)
186 seq_printf(m, ",uid=%u", root_inode->i_uid);
187 if (root_inode->i_gid != 0)
188 seq_printf(m, ",gid=%u", root_inode->i_gid);
184 seq_printf(m, ",pgrp=%d", sbi->oz_pgrp); 189 seq_printf(m, ",pgrp=%d", sbi->oz_pgrp);
185 seq_printf(m, ",timeout=%lu", sbi->exp_timeout/HZ); 190 seq_printf(m, ",timeout=%lu", sbi->exp_timeout/HZ);
186 seq_printf(m, ",minproto=%d", sbi->min_proto); 191 seq_printf(m, ",minproto=%d", sbi->min_proto);
diff --git a/fs/autofs4/root.c b/fs/autofs4/root.c
index 2bbcc8151dc3..a54a946a50ae 100644
--- a/fs/autofs4/root.c
+++ b/fs/autofs4/root.c
@@ -368,7 +368,8 @@ static void *autofs4_follow_link(struct dentry *dentry, struct nameidata *nd)
368 * so we don't need to follow the mount. 368 * so we don't need to follow the mount.
369 */ 369 */
370 if (d_mountpoint(dentry)) { 370 if (d_mountpoint(dentry)) {
371 if (!autofs4_follow_mount(&nd->mnt, &nd->dentry)) { 371 if (!autofs4_follow_mount(&nd->path.mnt,
372 &nd->path.dentry)) {
372 status = -ENOENT; 373 status = -ENOENT;
373 goto out_error; 374 goto out_error;
374 } 375 }
@@ -382,7 +383,7 @@ done:
382 return NULL; 383 return NULL;
383 384
384out_error: 385out_error:
385 path_release(nd); 386 path_put(&nd->path);
386 return ERR_PTR(status); 387 return ERR_PTR(status);
387} 388}
388 389
diff --git a/fs/bad_inode.c b/fs/bad_inode.c
index 521ff7caadbd..f1c2ea8342f5 100644
--- a/fs/bad_inode.c
+++ b/fs/bad_inode.c
@@ -359,3 +359,17 @@ int is_bad_inode(struct inode *inode)
359} 359}
360 360
361EXPORT_SYMBOL(is_bad_inode); 361EXPORT_SYMBOL(is_bad_inode);
362
363/**
364 * iget_failed - Mark an under-construction inode as dead and release it
365 * @inode: The inode to discard
366 *
367 * Mark an under-construction inode as dead and release it.
368 */
369void iget_failed(struct inode *inode)
370{
371 make_bad_inode(inode);
372 unlock_new_inode(inode);
373 iput(inode);
374}
375EXPORT_SYMBOL(iget_failed);
diff --git a/fs/befs/linuxvfs.c b/fs/befs/linuxvfs.c
index b28a20e61b80..82123ff3e1dd 100644
--- a/fs/befs/linuxvfs.c
+++ b/fs/befs/linuxvfs.c
@@ -35,7 +35,7 @@ static int befs_get_block(struct inode *, sector_t, struct buffer_head *, int);
35static int befs_readpage(struct file *file, struct page *page); 35static int befs_readpage(struct file *file, struct page *page);
36static sector_t befs_bmap(struct address_space *mapping, sector_t block); 36static sector_t befs_bmap(struct address_space *mapping, sector_t block);
37static struct dentry *befs_lookup(struct inode *, struct dentry *, struct nameidata *); 37static struct dentry *befs_lookup(struct inode *, struct dentry *, struct nameidata *);
38static void befs_read_inode(struct inode *ino); 38static struct inode *befs_iget(struct super_block *, unsigned long);
39static struct inode *befs_alloc_inode(struct super_block *sb); 39static struct inode *befs_alloc_inode(struct super_block *sb);
40static void befs_destroy_inode(struct inode *inode); 40static void befs_destroy_inode(struct inode *inode);
41static int befs_init_inodecache(void); 41static int befs_init_inodecache(void);
@@ -52,12 +52,12 @@ static int befs_statfs(struct dentry *, struct kstatfs *);
52static int parse_options(char *, befs_mount_options *); 52static int parse_options(char *, befs_mount_options *);
53 53
54static const struct super_operations befs_sops = { 54static const struct super_operations befs_sops = {
55 .read_inode = befs_read_inode, /* initialize & read inode */
56 .alloc_inode = befs_alloc_inode, /* allocate a new inode */ 55 .alloc_inode = befs_alloc_inode, /* allocate a new inode */
57 .destroy_inode = befs_destroy_inode, /* deallocate an inode */ 56 .destroy_inode = befs_destroy_inode, /* deallocate an inode */
58 .put_super = befs_put_super, /* uninit super */ 57 .put_super = befs_put_super, /* uninit super */
59 .statfs = befs_statfs, /* statfs */ 58 .statfs = befs_statfs, /* statfs */
60 .remount_fs = befs_remount, 59 .remount_fs = befs_remount,
60 .show_options = generic_show_options,
61}; 61};
62 62
63/* slab cache for befs_inode_info objects */ 63/* slab cache for befs_inode_info objects */
@@ -198,9 +198,9 @@ befs_lookup(struct inode *dir, struct dentry *dentry, struct nameidata *nd)
198 return ERR_PTR(-ENODATA); 198 return ERR_PTR(-ENODATA);
199 } 199 }
200 200
201 inode = iget(dir->i_sb, (ino_t) offset); 201 inode = befs_iget(dir->i_sb, (ino_t) offset);
202 if (!inode) 202 if (IS_ERR(inode))
203 return ERR_PTR(-EACCES); 203 return ERR_CAST(inode);
204 204
205 d_add(dentry, inode); 205 d_add(dentry, inode);
206 206
@@ -296,17 +296,23 @@ static void init_once(struct kmem_cache *cachep, void *foo)
296 inode_init_once(&bi->vfs_inode); 296 inode_init_once(&bi->vfs_inode);
297} 297}
298 298
299static void 299static struct inode *befs_iget(struct super_block *sb, unsigned long ino)
300befs_read_inode(struct inode *inode)
301{ 300{
302 struct buffer_head *bh = NULL; 301 struct buffer_head *bh = NULL;
303 befs_inode *raw_inode = NULL; 302 befs_inode *raw_inode = NULL;
304 303
305 struct super_block *sb = inode->i_sb;
306 befs_sb_info *befs_sb = BEFS_SB(sb); 304 befs_sb_info *befs_sb = BEFS_SB(sb);
307 befs_inode_info *befs_ino = NULL; 305 befs_inode_info *befs_ino = NULL;
306 struct inode *inode;
307 long ret = -EIO;
308
309 befs_debug(sb, "---> befs_read_inode() " "inode = %lu", ino);
308 310
309 befs_debug(sb, "---> befs_read_inode() " "inode = %lu", inode->i_ino); 311 inode = iget_locked(sb, ino);
312 if (IS_ERR(inode))
313 return inode;
314 if (!(inode->i_state & I_NEW))
315 return inode;
310 316
311 befs_ino = BEFS_I(inode); 317 befs_ino = BEFS_I(inode);
312 318
@@ -402,15 +408,16 @@ befs_read_inode(struct inode *inode)
402 408
403 brelse(bh); 409 brelse(bh);
404 befs_debug(sb, "<--- befs_read_inode()"); 410 befs_debug(sb, "<--- befs_read_inode()");
405 return; 411 unlock_new_inode(inode);
412 return inode;
406 413
407 unacquire_bh: 414 unacquire_bh:
408 brelse(bh); 415 brelse(bh);
409 416
410 unacquire_none: 417 unacquire_none:
411 make_bad_inode(inode); 418 iget_failed(inode);
412 befs_debug(sb, "<--- befs_read_inode() - Bad inode"); 419 befs_debug(sb, "<--- befs_read_inode() - Bad inode");
413 return; 420 return ERR_PTR(ret);
414} 421}
415 422
416/* Initialize the inode cache. Called at fs setup. 423/* Initialize the inode cache. Called at fs setup.
@@ -752,10 +759,12 @@ befs_fill_super(struct super_block *sb, void *data, int silent)
752 befs_sb_info *befs_sb; 759 befs_sb_info *befs_sb;
753 befs_super_block *disk_sb; 760 befs_super_block *disk_sb;
754 struct inode *root; 761 struct inode *root;
755 762 long ret = -EINVAL;
756 const unsigned long sb_block = 0; 763 const unsigned long sb_block = 0;
757 const off_t x86_sb_off = 512; 764 const off_t x86_sb_off = 512;
758 765
766 save_mount_options(sb, data);
767
759 sb->s_fs_info = kmalloc(sizeof (*befs_sb), GFP_KERNEL); 768 sb->s_fs_info = kmalloc(sizeof (*befs_sb), GFP_KERNEL);
760 if (sb->s_fs_info == NULL) { 769 if (sb->s_fs_info == NULL) {
761 printk(KERN_ERR 770 printk(KERN_ERR
@@ -833,7 +842,11 @@ befs_fill_super(struct super_block *sb, void *data, int silent)
833 /* Set real blocksize of fs */ 842 /* Set real blocksize of fs */
834 sb_set_blocksize(sb, (ulong) befs_sb->block_size); 843 sb_set_blocksize(sb, (ulong) befs_sb->block_size);
835 sb->s_op = (struct super_operations *) &befs_sops; 844 sb->s_op = (struct super_operations *) &befs_sops;
836 root = iget(sb, iaddr2blockno(sb, &(befs_sb->root_dir))); 845 root = befs_iget(sb, iaddr2blockno(sb, &(befs_sb->root_dir)));
846 if (IS_ERR(root)) {
847 ret = PTR_ERR(root);
848 goto unacquire_priv_sbp;
849 }
837 sb->s_root = d_alloc_root(root); 850 sb->s_root = d_alloc_root(root);
838 if (!sb->s_root) { 851 if (!sb->s_root) {
839 iput(root); 852 iput(root);
@@ -868,7 +881,7 @@ befs_fill_super(struct super_block *sb, void *data, int silent)
868 881
869 unacquire_none: 882 unacquire_none:
870 sb->s_fs_info = NULL; 883 sb->s_fs_info = NULL;
871 return -EINVAL; 884 return ret;
872} 885}
873 886
874static int 887static int
diff --git a/fs/bfs/bfs.h b/fs/bfs/bfs.h
index ac7a8b1d6c3a..71faf4d23908 100644
--- a/fs/bfs/bfs.h
+++ b/fs/bfs/bfs.h
@@ -44,6 +44,8 @@ static inline struct bfs_inode_info *BFS_I(struct inode *inode)
44#define printf(format, args...) \ 44#define printf(format, args...) \
45 printk(KERN_ERR "BFS-fs: %s(): " format, __FUNCTION__, ## args) 45 printk(KERN_ERR "BFS-fs: %s(): " format, __FUNCTION__, ## args)
46 46
47/* inode.c */
48extern struct inode *bfs_iget(struct super_block *sb, unsigned long ino);
47 49
48/* file.c */ 50/* file.c */
49extern const struct inode_operations bfs_file_inops; 51extern const struct inode_operations bfs_file_inops;
diff --git a/fs/bfs/dir.c b/fs/bfs/dir.c
index 1fd056d0fc3d..034950cb3cbe 100644
--- a/fs/bfs/dir.c
+++ b/fs/bfs/dir.c
@@ -148,10 +148,10 @@ static struct dentry *bfs_lookup(struct inode *dir, struct dentry *dentry,
148 if (bh) { 148 if (bh) {
149 unsigned long ino = (unsigned long)le16_to_cpu(de->ino); 149 unsigned long ino = (unsigned long)le16_to_cpu(de->ino);
150 brelse(bh); 150 brelse(bh);
151 inode = iget(dir->i_sb, ino); 151 inode = bfs_iget(dir->i_sb, ino);
152 if (!inode) { 152 if (IS_ERR(inode)) {
153 unlock_kernel(); 153 unlock_kernel();
154 return ERR_PTR(-EACCES); 154 return ERR_CAST(inode);
155 } 155 }
156 } 156 }
157 unlock_kernel(); 157 unlock_kernel();
diff --git a/fs/bfs/inode.c b/fs/bfs/inode.c
index a64a71d444f5..8db623838b50 100644
--- a/fs/bfs/inode.c
+++ b/fs/bfs/inode.c
@@ -32,17 +32,22 @@ MODULE_LICENSE("GPL");
32 32
33void dump_imap(const char *prefix, struct super_block *s); 33void dump_imap(const char *prefix, struct super_block *s);
34 34
35static void bfs_read_inode(struct inode *inode) 35struct inode *bfs_iget(struct super_block *sb, unsigned long ino)
36{ 36{
37 unsigned long ino = inode->i_ino;
38 struct bfs_inode *di; 37 struct bfs_inode *di;
38 struct inode *inode;
39 struct buffer_head *bh; 39 struct buffer_head *bh;
40 int block, off; 40 int block, off;
41 41
42 inode = iget_locked(sb, ino);
43 if (IS_ERR(inode))
44 return ERR_PTR(-ENOMEM);
45 if (!(inode->i_state & I_NEW))
46 return inode;
47
42 if ((ino < BFS_ROOT_INO) || (ino > BFS_SB(inode->i_sb)->si_lasti)) { 48 if ((ino < BFS_ROOT_INO) || (ino > BFS_SB(inode->i_sb)->si_lasti)) {
43 printf("Bad inode number %s:%08lx\n", inode->i_sb->s_id, ino); 49 printf("Bad inode number %s:%08lx\n", inode->i_sb->s_id, ino);
44 make_bad_inode(inode); 50 goto error;
45 return;
46 } 51 }
47 52
48 block = (ino - BFS_ROOT_INO) / BFS_INODES_PER_BLOCK + 1; 53 block = (ino - BFS_ROOT_INO) / BFS_INODES_PER_BLOCK + 1;
@@ -50,8 +55,7 @@ static void bfs_read_inode(struct inode *inode)
50 if (!bh) { 55 if (!bh) {
51 printf("Unable to read inode %s:%08lx\n", inode->i_sb->s_id, 56 printf("Unable to read inode %s:%08lx\n", inode->i_sb->s_id,
52 ino); 57 ino);
53 make_bad_inode(inode); 58 goto error;
54 return;
55 } 59 }
56 60
57 off = (ino - BFS_ROOT_INO) % BFS_INODES_PER_BLOCK; 61 off = (ino - BFS_ROOT_INO) % BFS_INODES_PER_BLOCK;
@@ -85,6 +89,12 @@ static void bfs_read_inode(struct inode *inode)
85 inode->i_ctime.tv_nsec = 0; 89 inode->i_ctime.tv_nsec = 0;
86 90
87 brelse(bh); 91 brelse(bh);
92 unlock_new_inode(inode);
93 return inode;
94
95error:
96 iget_failed(inode);
97 return ERR_PTR(-EIO);
88} 98}
89 99
90static int bfs_write_inode(struct inode *inode, int unused) 100static int bfs_write_inode(struct inode *inode, int unused)
@@ -276,7 +286,6 @@ static void destroy_inodecache(void)
276static const struct super_operations bfs_sops = { 286static const struct super_operations bfs_sops = {
277 .alloc_inode = bfs_alloc_inode, 287 .alloc_inode = bfs_alloc_inode,
278 .destroy_inode = bfs_destroy_inode, 288 .destroy_inode = bfs_destroy_inode,
279 .read_inode = bfs_read_inode,
280 .write_inode = bfs_write_inode, 289 .write_inode = bfs_write_inode,
281 .delete_inode = bfs_delete_inode, 290 .delete_inode = bfs_delete_inode,
282 .put_super = bfs_put_super, 291 .put_super = bfs_put_super,
@@ -312,6 +321,7 @@ static int bfs_fill_super(struct super_block *s, void *data, int silent)
312 struct inode *inode; 321 struct inode *inode;
313 unsigned i, imap_len; 322 unsigned i, imap_len;
314 struct bfs_sb_info *info; 323 struct bfs_sb_info *info;
324 long ret = -EINVAL;
315 325
316 info = kzalloc(sizeof(*info), GFP_KERNEL); 326 info = kzalloc(sizeof(*info), GFP_KERNEL);
317 if (!info) 327 if (!info)
@@ -346,14 +356,16 @@ static int bfs_fill_super(struct super_block *s, void *data, int silent)
346 set_bit(i, info->si_imap); 356 set_bit(i, info->si_imap);
347 357
348 s->s_op = &bfs_sops; 358 s->s_op = &bfs_sops;
349 inode = iget(s, BFS_ROOT_INO); 359 inode = bfs_iget(s, BFS_ROOT_INO);
350 if (!inode) { 360 if (IS_ERR(inode)) {
361 ret = PTR_ERR(inode);
351 kfree(info->si_imap); 362 kfree(info->si_imap);
352 goto out; 363 goto out;
353 } 364 }
354 s->s_root = d_alloc_root(inode); 365 s->s_root = d_alloc_root(inode);
355 if (!s->s_root) { 366 if (!s->s_root) {
356 iput(inode); 367 iput(inode);
368 ret = -ENOMEM;
357 kfree(info->si_imap); 369 kfree(info->si_imap);
358 goto out; 370 goto out;
359 } 371 }
@@ -404,7 +416,7 @@ out:
404 brelse(bh); 416 brelse(bh);
405 kfree(info); 417 kfree(info);
406 s->s_fs_info = NULL; 418 s->s_fs_info = NULL;
407 return -EINVAL; 419 return ret;
408} 420}
409 421
410static int bfs_get_sb(struct file_system_type *fs_type, 422static int bfs_get_sb(struct file_system_type *fs_type,
diff --git a/fs/binfmt_aout.c b/fs/binfmt_aout.c
index 7596e1e94cde..a1bb2244cac7 100644
--- a/fs/binfmt_aout.c
+++ b/fs/binfmt_aout.c
@@ -28,6 +28,7 @@
28#include <asm/system.h> 28#include <asm/system.h>
29#include <asm/uaccess.h> 29#include <asm/uaccess.h>
30#include <asm/cacheflush.h> 30#include <asm/cacheflush.h>
31#include <asm/a.out-core.h>
31 32
32static int load_aout_binary(struct linux_binprm *, struct pt_regs * regs); 33static int load_aout_binary(struct linux_binprm *, struct pt_regs * regs);
33static int load_aout_library(struct file*); 34static int load_aout_library(struct file*);
@@ -115,10 +116,10 @@ static int aout_core_dump(long signr, struct pt_regs *regs, struct file *file, u
115 current->flags |= PF_DUMPCORE; 116 current->flags |= PF_DUMPCORE;
116 strncpy(dump.u_comm, current->comm, sizeof(dump.u_comm)); 117 strncpy(dump.u_comm, current->comm, sizeof(dump.u_comm));
117#ifndef __sparc__ 118#ifndef __sparc__
118 dump.u_ar0 = (void *)(((unsigned long)(&dump.regs)) - ((unsigned long)(&dump))); 119 dump.u_ar0 = offsetof(struct user, regs);
119#endif 120#endif
120 dump.signal = signr; 121 dump.signal = signr;
121 dump_thread(regs, &dump); 122 aout_dump_thread(regs, &dump);
122 123
123/* If the size of the dump file exceeds the rlimit, then see what would happen 124/* If the size of the dump file exceeds the rlimit, then see what would happen
124 if we wrote the stack, but not the data area. */ 125 if we wrote the stack, but not the data area. */
diff --git a/fs/binfmt_elf.c b/fs/binfmt_elf.c
index 4628c42ca892..41a958a7585e 100644
--- a/fs/binfmt_elf.c
+++ b/fs/binfmt_elf.c
@@ -134,8 +134,7 @@ static int padzero(unsigned long elf_bss)
134 134
135static int 135static int
136create_elf_tables(struct linux_binprm *bprm, struct elfhdr *exec, 136create_elf_tables(struct linux_binprm *bprm, struct elfhdr *exec,
137 int interp_aout, unsigned long load_addr, 137 unsigned long load_addr, unsigned long interp_load_addr)
138 unsigned long interp_load_addr)
139{ 138{
140 unsigned long p = bprm->p; 139 unsigned long p = bprm->p;
141 int argc = bprm->argc; 140 int argc = bprm->argc;
@@ -223,12 +222,7 @@ create_elf_tables(struct linux_binprm *bprm, struct elfhdr *exec,
223 222
224 sp = STACK_ADD(p, ei_index); 223 sp = STACK_ADD(p, ei_index);
225 224
226 items = (argc + 1) + (envc + 1); 225 items = (argc + 1) + (envc + 1) + 1;
227 if (interp_aout) {
228 items += 3; /* a.out interpreters require argv & envp too */
229 } else {
230 items += 1; /* ELF interpreters only put argc on the stack */
231 }
232 bprm->p = STACK_ROUND(sp, items); 226 bprm->p = STACK_ROUND(sp, items);
233 227
234 /* Point sp at the lowest address on the stack */ 228 /* Point sp at the lowest address on the stack */
@@ -251,16 +245,8 @@ create_elf_tables(struct linux_binprm *bprm, struct elfhdr *exec,
251 /* Now, let's put argc (and argv, envp if appropriate) on the stack */ 245 /* Now, let's put argc (and argv, envp if appropriate) on the stack */
252 if (__put_user(argc, sp++)) 246 if (__put_user(argc, sp++))
253 return -EFAULT; 247 return -EFAULT;
254 if (interp_aout) { 248 argv = sp;
255 argv = sp + 2; 249 envp = argv + argc + 1;
256 envp = argv + argc + 1;
257 if (__put_user((elf_addr_t)(unsigned long)argv, sp++) ||
258 __put_user((elf_addr_t)(unsigned long)envp, sp++))
259 return -EFAULT;
260 } else {
261 argv = sp;
262 envp = argv + argc + 1;
263 }
264 250
265 /* Populate argv and envp */ 251 /* Populate argv and envp */
266 p = current->mm->arg_end = current->mm->arg_start; 252 p = current->mm->arg_end = current->mm->arg_start;
@@ -513,59 +499,12 @@ out:
513 return error; 499 return error;
514} 500}
515 501
516static unsigned long load_aout_interp(struct exec *interp_ex,
517 struct file *interpreter)
518{
519 unsigned long text_data, elf_entry = ~0UL;
520 char __user * addr;
521 loff_t offset;
522
523 current->mm->end_code = interp_ex->a_text;
524 text_data = interp_ex->a_text + interp_ex->a_data;
525 current->mm->end_data = text_data;
526 current->mm->brk = interp_ex->a_bss + text_data;
527
528 switch (N_MAGIC(*interp_ex)) {
529 case OMAGIC:
530 offset = 32;
531 addr = (char __user *)0;
532 break;
533 case ZMAGIC:
534 case QMAGIC:
535 offset = N_TXTOFF(*interp_ex);
536 addr = (char __user *)N_TXTADDR(*interp_ex);
537 break;
538 default:
539 goto out;
540 }
541
542 down_write(&current->mm->mmap_sem);
543 do_brk(0, text_data);
544 up_write(&current->mm->mmap_sem);
545 if (!interpreter->f_op || !interpreter->f_op->read)
546 goto out;
547 if (interpreter->f_op->read(interpreter, addr, text_data, &offset) < 0)
548 goto out;
549 flush_icache_range((unsigned long)addr,
550 (unsigned long)addr + text_data);
551
552 down_write(&current->mm->mmap_sem);
553 do_brk(ELF_PAGESTART(text_data + ELF_MIN_ALIGN - 1),
554 interp_ex->a_bss);
555 up_write(&current->mm->mmap_sem);
556 elf_entry = interp_ex->a_entry;
557
558out:
559 return elf_entry;
560}
561
562/* 502/*
563 * These are the functions used to load ELF style executables and shared 503 * These are the functions used to load ELF style executables and shared
564 * libraries. There is no binary dependent code anywhere else. 504 * libraries. There is no binary dependent code anywhere else.
565 */ 505 */
566 506
567#define INTERPRETER_NONE 0 507#define INTERPRETER_NONE 0
568#define INTERPRETER_AOUT 1
569#define INTERPRETER_ELF 2 508#define INTERPRETER_ELF 2
570 509
571#ifndef STACK_RND_MASK 510#ifndef STACK_RND_MASK
@@ -594,7 +533,6 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
594 unsigned long load_addr = 0, load_bias = 0; 533 unsigned long load_addr = 0, load_bias = 0;
595 int load_addr_set = 0; 534 int load_addr_set = 0;
596 char * elf_interpreter = NULL; 535 char * elf_interpreter = NULL;
597 unsigned int interpreter_type = INTERPRETER_NONE;
598 unsigned long error; 536 unsigned long error;
599 struct elf_phdr *elf_ppnt, *elf_phdata; 537 struct elf_phdr *elf_ppnt, *elf_phdata;
600 unsigned long elf_bss, elf_brk; 538 unsigned long elf_bss, elf_brk;
@@ -605,7 +543,6 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
605 unsigned long interp_load_addr = 0; 543 unsigned long interp_load_addr = 0;
606 unsigned long start_code, end_code, start_data, end_data; 544 unsigned long start_code, end_code, start_data, end_data;
607 unsigned long reloc_func_desc = 0; 545 unsigned long reloc_func_desc = 0;
608 char passed_fileno[6];
609 struct files_struct *files; 546 struct files_struct *files;
610 int executable_stack = EXSTACK_DEFAULT; 547 int executable_stack = EXSTACK_DEFAULT;
611 unsigned long def_flags = 0; 548 unsigned long def_flags = 0;
@@ -774,59 +711,18 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
774 711
775 /* Some simple consistency checks for the interpreter */ 712 /* Some simple consistency checks for the interpreter */
776 if (elf_interpreter) { 713 if (elf_interpreter) {
777 static int warn;
778 interpreter_type = INTERPRETER_ELF | INTERPRETER_AOUT;
779
780 /* Now figure out which format our binary is */
781 if ((N_MAGIC(loc->interp_ex) != OMAGIC) &&
782 (N_MAGIC(loc->interp_ex) != ZMAGIC) &&
783 (N_MAGIC(loc->interp_ex) != QMAGIC))
784 interpreter_type = INTERPRETER_ELF;
785
786 if (memcmp(loc->interp_elf_ex.e_ident, ELFMAG, SELFMAG) != 0)
787 interpreter_type &= ~INTERPRETER_ELF;
788
789 if (interpreter_type == INTERPRETER_AOUT && warn < 10) {
790 printk(KERN_WARNING "a.out ELF interpreter %s is "
791 "deprecated and will not be supported "
792 "after Linux 2.6.25\n", elf_interpreter);
793 warn++;
794 }
795
796 retval = -ELIBBAD; 714 retval = -ELIBBAD;
797 if (!interpreter_type) 715 /* Not an ELF interpreter */
716 if (memcmp(loc->interp_elf_ex.e_ident, ELFMAG, SELFMAG) != 0)
798 goto out_free_dentry; 717 goto out_free_dentry;
799
800 /* Make sure only one type was selected */
801 if ((interpreter_type & INTERPRETER_ELF) &&
802 interpreter_type != INTERPRETER_ELF) {
803 // FIXME - ratelimit this before re-enabling
804 // printk(KERN_WARNING "ELF: Ambiguous type, using ELF\n");
805 interpreter_type = INTERPRETER_ELF;
806 }
807 /* Verify the interpreter has a valid arch */ 718 /* Verify the interpreter has a valid arch */
808 if ((interpreter_type == INTERPRETER_ELF) && 719 if (!elf_check_arch(&loc->interp_elf_ex))
809 !elf_check_arch(&loc->interp_elf_ex))
810 goto out_free_dentry; 720 goto out_free_dentry;
811 } else { 721 } else {
812 /* Executables without an interpreter also need a personality */ 722 /* Executables without an interpreter also need a personality */
813 SET_PERSONALITY(loc->elf_ex, 0); 723 SET_PERSONALITY(loc->elf_ex, 0);
814 } 724 }
815 725
816 /* OK, we are done with that, now set up the arg stuff,
817 and then start this sucker up */
818 if ((!bprm->sh_bang) && (interpreter_type == INTERPRETER_AOUT)) {
819 char *passed_p = passed_fileno;
820 sprintf(passed_fileno, "%d", elf_exec_fileno);
821
822 if (elf_interpreter) {
823 retval = copy_strings_kernel(1, &passed_p, bprm);
824 if (retval)
825 goto out_free_dentry;
826 bprm->argc++;
827 }
828 }
829
830 /* Flush all traces of the currently running executable */ 726 /* Flush all traces of the currently running executable */
831 retval = flush_old_exec(bprm); 727 retval = flush_old_exec(bprm);
832 if (retval) 728 if (retval)
@@ -1004,24 +900,19 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
1004 } 900 }
1005 901
1006 if (elf_interpreter) { 902 if (elf_interpreter) {
1007 if (interpreter_type == INTERPRETER_AOUT) { 903 unsigned long uninitialized_var(interp_map_addr);
1008 elf_entry = load_aout_interp(&loc->interp_ex, 904
1009 interpreter); 905 elf_entry = load_elf_interp(&loc->interp_elf_ex,
1010 } else { 906 interpreter,
1011 unsigned long uninitialized_var(interp_map_addr); 907 &interp_map_addr,
1012 908 load_bias);
1013 elf_entry = load_elf_interp(&loc->interp_elf_ex, 909 if (!IS_ERR((void *)elf_entry)) {
1014 interpreter, 910 /*
1015 &interp_map_addr, 911 * load_elf_interp() returns relocation
1016 load_bias); 912 * adjustment
1017 if (!IS_ERR((void *)elf_entry)) { 913 */
1018 /* 914 interp_load_addr = elf_entry;
1019 * load_elf_interp() returns relocation 915 elf_entry += loc->interp_elf_ex.e_entry;
1020 * adjustment
1021 */
1022 interp_load_addr = elf_entry;
1023 elf_entry += loc->interp_elf_ex.e_entry;
1024 }
1025 } 916 }
1026 if (BAD_ADDR(elf_entry)) { 917 if (BAD_ADDR(elf_entry)) {
1027 force_sig(SIGSEGV, current); 918 force_sig(SIGSEGV, current);
@@ -1045,8 +936,7 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
1045 936
1046 kfree(elf_phdata); 937 kfree(elf_phdata);
1047 938
1048 if (interpreter_type != INTERPRETER_AOUT) 939 sys_close(elf_exec_fileno);
1049 sys_close(elf_exec_fileno);
1050 940
1051 set_binfmt(&elf_format); 941 set_binfmt(&elf_format);
1052 942
@@ -1061,15 +951,12 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
1061 compute_creds(bprm); 951 compute_creds(bprm);
1062 current->flags &= ~PF_FORKNOEXEC; 952 current->flags &= ~PF_FORKNOEXEC;
1063 retval = create_elf_tables(bprm, &loc->elf_ex, 953 retval = create_elf_tables(bprm, &loc->elf_ex,
1064 (interpreter_type == INTERPRETER_AOUT),
1065 load_addr, interp_load_addr); 954 load_addr, interp_load_addr);
1066 if (retval < 0) { 955 if (retval < 0) {
1067 send_sig(SIGKILL, current, 0); 956 send_sig(SIGKILL, current, 0);
1068 goto out; 957 goto out;
1069 } 958 }
1070 /* N.B. passed_fileno might not be initialized? */ 959 /* N.B. passed_fileno might not be initialized? */
1071 if (interpreter_type == INTERPRETER_AOUT)
1072 current->mm->arg_start += strlen(passed_fileno) + 1;
1073 current->mm->end_code = end_code; 960 current->mm->end_code = end_code;
1074 current->mm->start_code = start_code; 961 current->mm->start_code = start_code;
1075 current->mm->start_data = start_data; 962 current->mm->start_data = start_data;
@@ -1077,7 +964,7 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
1077 current->mm->start_stack = bprm->p; 964 current->mm->start_stack = bprm->p;
1078 965
1079#ifdef arch_randomize_brk 966#ifdef arch_randomize_brk
1080 if (current->flags & PF_RANDOMIZE) 967 if ((current->flags & PF_RANDOMIZE) && (randomize_va_space > 1))
1081 current->mm->brk = current->mm->start_brk = 968 current->mm->brk = current->mm->start_brk =
1082 arch_randomize_brk(current->mm); 969 arch_randomize_brk(current->mm);
1083#endif 970#endif
diff --git a/fs/binfmt_flat.c b/fs/binfmt_flat.c
index 33764fd6db66..0498b181dd52 100644
--- a/fs/binfmt_flat.c
+++ b/fs/binfmt_flat.c
@@ -20,7 +20,6 @@
20#include <linux/sched.h> 20#include <linux/sched.h>
21#include <linux/mm.h> 21#include <linux/mm.h>
22#include <linux/mman.h> 22#include <linux/mman.h>
23#include <linux/a.out.h>
24#include <linux/errno.h> 23#include <linux/errno.h>
25#include <linux/signal.h> 24#include <linux/signal.h>
26#include <linux/string.h> 25#include <linux/string.h>
@@ -444,12 +443,12 @@ static int load_flat_file(struct linux_binprm * bprm,
444 443
445 if (strncmp(hdr->magic, "bFLT", 4)) { 444 if (strncmp(hdr->magic, "bFLT", 4)) {
446 /* 445 /*
446 * Previously, here was a printk to tell people
447 * "BINFMT_FLAT: bad header magic".
448 * But for the kernel which also use ELF FD-PIC format, this
449 * error message is confusing.
447 * because a lot of people do not manage to produce good 450 * because a lot of people do not manage to produce good
448 * flat binaries, we leave this printk to help them realise
449 * the problem. We only print the error if its not a script file
450 */ 451 */
451 if (strncmp(hdr->magic, "#!", 2))
452 printk("BINFMT_FLAT: bad header magic\n");
453 ret = -ENOEXEC; 452 ret = -ENOEXEC;
454 goto err; 453 goto err;
455 } 454 }
diff --git a/fs/binfmt_som.c b/fs/binfmt_som.c
index 9208c41209f9..14c63527c762 100644
--- a/fs/binfmt_som.c
+++ b/fs/binfmt_som.c
@@ -29,7 +29,6 @@
29#include <linux/personality.h> 29#include <linux/personality.h>
30#include <linux/init.h> 30#include <linux/init.h>
31 31
32#include <asm/a.out.h>
33#include <asm/uaccess.h> 32#include <asm/uaccess.h>
34#include <asm/pgtable.h> 33#include <asm/pgtable.h>
35 34
diff --git a/fs/block_dev.c b/fs/block_dev.c
index e48a630ae266..67fe72ce6ac7 100644
--- a/fs/block_dev.c
+++ b/fs/block_dev.c
@@ -534,7 +534,6 @@ void __init bdev_cache_init(void)
534 if (err) 534 if (err)
535 panic("Cannot register bdev pseudo-fs"); 535 panic("Cannot register bdev pseudo-fs");
536 bd_mnt = kern_mount(&bd_type); 536 bd_mnt = kern_mount(&bd_type);
537 err = PTR_ERR(bd_mnt);
538 if (IS_ERR(bd_mnt)) 537 if (IS_ERR(bd_mnt))
539 panic("Cannot create bdev pseudo-fs"); 538 panic("Cannot create bdev pseudo-fs");
540 blockdev_superblock = bd_mnt->mnt_sb; /* For writeback */ 539 blockdev_superblock = bd_mnt->mnt_sb; /* For writeback */
@@ -1398,19 +1397,19 @@ struct block_device *lookup_bdev(const char *path)
1398 if (error) 1397 if (error)
1399 return ERR_PTR(error); 1398 return ERR_PTR(error);
1400 1399
1401 inode = nd.dentry->d_inode; 1400 inode = nd.path.dentry->d_inode;
1402 error = -ENOTBLK; 1401 error = -ENOTBLK;
1403 if (!S_ISBLK(inode->i_mode)) 1402 if (!S_ISBLK(inode->i_mode))
1404 goto fail; 1403 goto fail;
1405 error = -EACCES; 1404 error = -EACCES;
1406 if (nd.mnt->mnt_flags & MNT_NODEV) 1405 if (nd.path.mnt->mnt_flags & MNT_NODEV)
1407 goto fail; 1406 goto fail;
1408 error = -ENOMEM; 1407 error = -ENOMEM;
1409 bdev = bd_acquire(inode); 1408 bdev = bd_acquire(inode);
1410 if (!bdev) 1409 if (!bdev)
1411 goto fail; 1410 goto fail;
1412out: 1411out:
1413 path_release(&nd); 1412 path_put(&nd.path);
1414 return bdev; 1413 return bdev;
1415fail: 1414fail:
1416 bdev = ERR_PTR(error); 1415 bdev = ERR_PTR(error);
diff --git a/fs/buffer.c b/fs/buffer.c
index 456c9ab7705b..3ebccf4aa7e3 100644
--- a/fs/buffer.c
+++ b/fs/buffer.c
@@ -67,14 +67,14 @@ static int sync_buffer(void *word)
67 return 0; 67 return 0;
68} 68}
69 69
70void fastcall __lock_buffer(struct buffer_head *bh) 70void __lock_buffer(struct buffer_head *bh)
71{ 71{
72 wait_on_bit_lock(&bh->b_state, BH_Lock, sync_buffer, 72 wait_on_bit_lock(&bh->b_state, BH_Lock, sync_buffer,
73 TASK_UNINTERRUPTIBLE); 73 TASK_UNINTERRUPTIBLE);
74} 74}
75EXPORT_SYMBOL(__lock_buffer); 75EXPORT_SYMBOL(__lock_buffer);
76 76
77void fastcall unlock_buffer(struct buffer_head *bh) 77void unlock_buffer(struct buffer_head *bh)
78{ 78{
79 smp_mb__before_clear_bit(); 79 smp_mb__before_clear_bit();
80 clear_buffer_locked(bh); 80 clear_buffer_locked(bh);
@@ -678,7 +678,7 @@ void mark_buffer_dirty_inode(struct buffer_head *bh, struct inode *inode)
678 } else { 678 } else {
679 BUG_ON(mapping->assoc_mapping != buffer_mapping); 679 BUG_ON(mapping->assoc_mapping != buffer_mapping);
680 } 680 }
681 if (list_empty(&bh->b_assoc_buffers)) { 681 if (!bh->b_assoc_map) {
682 spin_lock(&buffer_mapping->private_lock); 682 spin_lock(&buffer_mapping->private_lock);
683 list_move_tail(&bh->b_assoc_buffers, 683 list_move_tail(&bh->b_assoc_buffers,
684 &mapping->private_list); 684 &mapping->private_list);
@@ -794,6 +794,7 @@ static int fsync_buffers_list(spinlock_t *lock, struct list_head *list)
794{ 794{
795 struct buffer_head *bh; 795 struct buffer_head *bh;
796 struct list_head tmp; 796 struct list_head tmp;
797 struct address_space *mapping;
797 int err = 0, err2; 798 int err = 0, err2;
798 799
799 INIT_LIST_HEAD(&tmp); 800 INIT_LIST_HEAD(&tmp);
@@ -801,9 +802,14 @@ static int fsync_buffers_list(spinlock_t *lock, struct list_head *list)
801 spin_lock(lock); 802 spin_lock(lock);
802 while (!list_empty(list)) { 803 while (!list_empty(list)) {
803 bh = BH_ENTRY(list->next); 804 bh = BH_ENTRY(list->next);
805 mapping = bh->b_assoc_map;
804 __remove_assoc_queue(bh); 806 __remove_assoc_queue(bh);
807 /* Avoid race with mark_buffer_dirty_inode() which does
808 * a lockless check and we rely on seeing the dirty bit */
809 smp_mb();
805 if (buffer_dirty(bh) || buffer_locked(bh)) { 810 if (buffer_dirty(bh) || buffer_locked(bh)) {
806 list_add(&bh->b_assoc_buffers, &tmp); 811 list_add(&bh->b_assoc_buffers, &tmp);
812 bh->b_assoc_map = mapping;
807 if (buffer_dirty(bh)) { 813 if (buffer_dirty(bh)) {
808 get_bh(bh); 814 get_bh(bh);
809 spin_unlock(lock); 815 spin_unlock(lock);
@@ -822,8 +828,17 @@ static int fsync_buffers_list(spinlock_t *lock, struct list_head *list)
822 828
823 while (!list_empty(&tmp)) { 829 while (!list_empty(&tmp)) {
824 bh = BH_ENTRY(tmp.prev); 830 bh = BH_ENTRY(tmp.prev);
825 list_del_init(&bh->b_assoc_buffers);
826 get_bh(bh); 831 get_bh(bh);
832 mapping = bh->b_assoc_map;
833 __remove_assoc_queue(bh);
834 /* Avoid race with mark_buffer_dirty_inode() which does
835 * a lockless check and we rely on seeing the dirty bit */
836 smp_mb();
837 if (buffer_dirty(bh)) {
838 list_add(&bh->b_assoc_buffers,
839 &bh->b_assoc_map->private_list);
840 bh->b_assoc_map = mapping;
841 }
827 spin_unlock(lock); 842 spin_unlock(lock);
828 wait_on_buffer(bh); 843 wait_on_buffer(bh);
829 if (!buffer_uptodate(bh)) 844 if (!buffer_uptodate(bh))
@@ -1164,7 +1179,7 @@ __getblk_slow(struct block_device *bdev, sector_t block, int size)
1164 * mark_buffer_dirty() is atomic. It takes bh->b_page->mapping->private_lock, 1179 * mark_buffer_dirty() is atomic. It takes bh->b_page->mapping->private_lock,
1165 * mapping->tree_lock and the global inode_lock. 1180 * mapping->tree_lock and the global inode_lock.
1166 */ 1181 */
1167void fastcall mark_buffer_dirty(struct buffer_head *bh) 1182void mark_buffer_dirty(struct buffer_head *bh)
1168{ 1183{
1169 WARN_ON_ONCE(!buffer_uptodate(bh)); 1184 WARN_ON_ONCE(!buffer_uptodate(bh));
1170 if (!buffer_dirty(bh) && !test_set_buffer_dirty(bh)) 1185 if (!buffer_dirty(bh) && !test_set_buffer_dirty(bh))
@@ -1195,7 +1210,7 @@ void __brelse(struct buffer_head * buf)
1195void __bforget(struct buffer_head *bh) 1210void __bforget(struct buffer_head *bh)
1196{ 1211{
1197 clear_buffer_dirty(bh); 1212 clear_buffer_dirty(bh);
1198 if (!list_empty(&bh->b_assoc_buffers)) { 1213 if (bh->b_assoc_map) {
1199 struct address_space *buffer_mapping = bh->b_page->mapping; 1214 struct address_space *buffer_mapping = bh->b_page->mapping;
1200 1215
1201 spin_lock(&buffer_mapping->private_lock); 1216 spin_lock(&buffer_mapping->private_lock);
@@ -1436,6 +1451,7 @@ void invalidate_bh_lrus(void)
1436{ 1451{
1437 on_each_cpu(invalidate_bh_lru, NULL, 1, 1); 1452 on_each_cpu(invalidate_bh_lru, NULL, 1, 1);
1438} 1453}
1454EXPORT_SYMBOL_GPL(invalidate_bh_lrus);
1439 1455
1440void set_bh_page(struct buffer_head *bh, 1456void set_bh_page(struct buffer_head *bh,
1441 struct page *page, unsigned long offset) 1457 struct page *page, unsigned long offset)
@@ -1798,7 +1814,7 @@ void page_zero_new_buffers(struct page *page, unsigned from, unsigned to)
1798 start = max(from, block_start); 1814 start = max(from, block_start);
1799 size = min(to, block_end) - start; 1815 size = min(to, block_end) - start;
1800 1816
1801 zero_user_page(page, start, size, KM_USER0); 1817 zero_user(page, start, size);
1802 set_buffer_uptodate(bh); 1818 set_buffer_uptodate(bh);
1803 } 1819 }
1804 1820
@@ -1861,19 +1877,10 @@ static int __block_prepare_write(struct inode *inode, struct page *page,
1861 mark_buffer_dirty(bh); 1877 mark_buffer_dirty(bh);
1862 continue; 1878 continue;
1863 } 1879 }
1864 if (block_end > to || block_start < from) { 1880 if (block_end > to || block_start < from)
1865 void *kaddr; 1881 zero_user_segments(page,
1866 1882 to, block_end,
1867 kaddr = kmap_atomic(page, KM_USER0); 1883 block_start, from);
1868 if (block_end > to)
1869 memset(kaddr+to, 0,
1870 block_end-to);
1871 if (block_start < from)
1872 memset(kaddr+block_start,
1873 0, from-block_start);
1874 flush_dcache_page(page);
1875 kunmap_atomic(kaddr, KM_USER0);
1876 }
1877 continue; 1884 continue;
1878 } 1885 }
1879 } 1886 }
@@ -2104,8 +2111,7 @@ int block_read_full_page(struct page *page, get_block_t *get_block)
2104 SetPageError(page); 2111 SetPageError(page);
2105 } 2112 }
2106 if (!buffer_mapped(bh)) { 2113 if (!buffer_mapped(bh)) {
2107 zero_user_page(page, i * blocksize, blocksize, 2114 zero_user(page, i * blocksize, blocksize);
2108 KM_USER0);
2109 if (!err) 2115 if (!err)
2110 set_buffer_uptodate(bh); 2116 set_buffer_uptodate(bh);
2111 continue; 2117 continue;
@@ -2218,7 +2224,7 @@ int cont_expand_zero(struct file *file, struct address_space *mapping,
2218 &page, &fsdata); 2224 &page, &fsdata);
2219 if (err) 2225 if (err)
2220 goto out; 2226 goto out;
2221 zero_user_page(page, zerofrom, len, KM_USER0); 2227 zero_user(page, zerofrom, len);
2222 err = pagecache_write_end(file, mapping, curpos, len, len, 2228 err = pagecache_write_end(file, mapping, curpos, len, len,
2223 page, fsdata); 2229 page, fsdata);
2224 if (err < 0) 2230 if (err < 0)
@@ -2245,7 +2251,7 @@ int cont_expand_zero(struct file *file, struct address_space *mapping,
2245 &page, &fsdata); 2251 &page, &fsdata);
2246 if (err) 2252 if (err)
2247 goto out; 2253 goto out;
2248 zero_user_page(page, zerofrom, len, KM_USER0); 2254 zero_user(page, zerofrom, len);
2249 err = pagecache_write_end(file, mapping, curpos, len, len, 2255 err = pagecache_write_end(file, mapping, curpos, len, len,
2250 page, fsdata); 2256 page, fsdata);
2251 if (err < 0) 2257 if (err < 0)
@@ -2422,7 +2428,6 @@ int nobh_write_begin(struct file *file, struct address_space *mapping,
2422 unsigned block_in_page; 2428 unsigned block_in_page;
2423 unsigned block_start, block_end; 2429 unsigned block_start, block_end;
2424 sector_t block_in_file; 2430 sector_t block_in_file;
2425 char *kaddr;
2426 int nr_reads = 0; 2431 int nr_reads = 0;
2427 int ret = 0; 2432 int ret = 0;
2428 int is_mapped_to_disk = 1; 2433 int is_mapped_to_disk = 1;
@@ -2493,13 +2498,8 @@ int nobh_write_begin(struct file *file, struct address_space *mapping,
2493 continue; 2498 continue;
2494 } 2499 }
2495 if (buffer_new(bh) || !buffer_mapped(bh)) { 2500 if (buffer_new(bh) || !buffer_mapped(bh)) {
2496 kaddr = kmap_atomic(page, KM_USER0); 2501 zero_user_segments(page, block_start, from,
2497 if (block_start < from) 2502 to, block_end);
2498 memset(kaddr+block_start, 0, from-block_start);
2499 if (block_end > to)
2500 memset(kaddr + to, 0, block_end - to);
2501 flush_dcache_page(page);
2502 kunmap_atomic(kaddr, KM_USER0);
2503 continue; 2503 continue;
2504 } 2504 }
2505 if (buffer_uptodate(bh)) 2505 if (buffer_uptodate(bh))
@@ -2636,7 +2636,7 @@ int nobh_writepage(struct page *page, get_block_t *get_block,
2636 * the page size, the remaining memory is zeroed when mapped, and 2636 * the page size, the remaining memory is zeroed when mapped, and
2637 * writes to that region are not written out to the file." 2637 * writes to that region are not written out to the file."
2638 */ 2638 */
2639 zero_user_page(page, offset, PAGE_CACHE_SIZE - offset, KM_USER0); 2639 zero_user_segment(page, offset, PAGE_CACHE_SIZE);
2640out: 2640out:
2641 ret = mpage_writepage(page, get_block, wbc); 2641 ret = mpage_writepage(page, get_block, wbc);
2642 if (ret == -EAGAIN) 2642 if (ret == -EAGAIN)
@@ -2709,7 +2709,7 @@ has_buffers:
2709 if (page_has_buffers(page)) 2709 if (page_has_buffers(page))
2710 goto has_buffers; 2710 goto has_buffers;
2711 } 2711 }
2712 zero_user_page(page, offset, length, KM_USER0); 2712 zero_user(page, offset, length);
2713 set_page_dirty(page); 2713 set_page_dirty(page);
2714 err = 0; 2714 err = 0;
2715 2715
@@ -2785,7 +2785,7 @@ int block_truncate_page(struct address_space *mapping,
2785 goto unlock; 2785 goto unlock;
2786 } 2786 }
2787 2787
2788 zero_user_page(page, offset, length, KM_USER0); 2788 zero_user(page, offset, length);
2789 mark_buffer_dirty(bh); 2789 mark_buffer_dirty(bh);
2790 err = 0; 2790 err = 0;
2791 2791
@@ -2831,7 +2831,7 @@ int block_write_full_page(struct page *page, get_block_t *get_block,
2831 * the page size, the remaining memory is zeroed when mapped, and 2831 * the page size, the remaining memory is zeroed when mapped, and
2832 * writes to that region are not written out to the file." 2832 * writes to that region are not written out to the file."
2833 */ 2833 */
2834 zero_user_page(page, offset, PAGE_CACHE_SIZE - offset, KM_USER0); 2834 zero_user_segment(page, offset, PAGE_CACHE_SIZE);
2835 return __block_write_full_page(inode, page, get_block, wbc); 2835 return __block_write_full_page(inode, page, get_block, wbc);
2836} 2836}
2837 2837
@@ -3037,7 +3037,7 @@ drop_buffers(struct page *page, struct buffer_head **buffers_to_free)
3037 do { 3037 do {
3038 struct buffer_head *next = bh->b_this_page; 3038 struct buffer_head *next = bh->b_this_page;
3039 3039
3040 if (!list_empty(&bh->b_assoc_buffers)) 3040 if (bh->b_assoc_map)
3041 __remove_assoc_queue(bh); 3041 __remove_assoc_queue(bh);
3042 bh = next; 3042 bh = next;
3043 } while (bh != head); 3043 } while (bh != head);
@@ -3169,7 +3169,7 @@ static void recalc_bh_state(void)
3169 3169
3170struct buffer_head *alloc_buffer_head(gfp_t gfp_flags) 3170struct buffer_head *alloc_buffer_head(gfp_t gfp_flags)
3171{ 3171{
3172 struct buffer_head *ret = kmem_cache_zalloc(bh_cachep, 3172 struct buffer_head *ret = kmem_cache_alloc(bh_cachep,
3173 set_migrateflags(gfp_flags, __GFP_RECLAIMABLE)); 3173 set_migrateflags(gfp_flags, __GFP_RECLAIMABLE));
3174 if (ret) { 3174 if (ret) {
3175 INIT_LIST_HEAD(&ret->b_assoc_buffers); 3175 INIT_LIST_HEAD(&ret->b_assoc_buffers);
@@ -3257,12 +3257,24 @@ int bh_submit_read(struct buffer_head *bh)
3257} 3257}
3258EXPORT_SYMBOL(bh_submit_read); 3258EXPORT_SYMBOL(bh_submit_read);
3259 3259
3260static void
3261init_buffer_head(struct kmem_cache *cachep, void *data)
3262{
3263 struct buffer_head *bh = data;
3264
3265 memset(bh, 0, sizeof(*bh));
3266 INIT_LIST_HEAD(&bh->b_assoc_buffers);
3267}
3268
3260void __init buffer_init(void) 3269void __init buffer_init(void)
3261{ 3270{
3262 int nrpages; 3271 int nrpages;
3263 3272
3264 bh_cachep = KMEM_CACHE(buffer_head, 3273 bh_cachep = kmem_cache_create("buffer_head",
3265 SLAB_RECLAIM_ACCOUNT|SLAB_PANIC|SLAB_MEM_SPREAD); 3274 sizeof(struct buffer_head), 0,
3275 (SLAB_RECLAIM_ACCOUNT|SLAB_PANIC|
3276 SLAB_MEM_SPREAD),
3277 init_buffer_head);
3266 3278
3267 /* 3279 /*
3268 * Limit the bh occupancy to 10% of ZONE_NORMAL 3280 * Limit the bh occupancy to 10% of ZONE_NORMAL
diff --git a/fs/char_dev.c b/fs/char_dev.c
index 2c7a8b5b4598..038674aa88a7 100644
--- a/fs/char_dev.c
+++ b/fs/char_dev.c
@@ -357,7 +357,7 @@ void cdev_put(struct cdev *p)
357/* 357/*
358 * Called every time a character special file is opened 358 * Called every time a character special file is opened
359 */ 359 */
360int chrdev_open(struct inode * inode, struct file * filp) 360static int chrdev_open(struct inode *inode, struct file *filp)
361{ 361{
362 struct cdev *p; 362 struct cdev *p;
363 struct cdev *new = NULL; 363 struct cdev *new = NULL;
diff --git a/fs/cifs/cifs_dfs_ref.c b/fs/cifs/cifs_dfs_ref.c
index 413ee2349d1a..6ad447529961 100644
--- a/fs/cifs/cifs_dfs_ref.c
+++ b/fs/cifs/cifs_dfs_ref.c
@@ -259,18 +259,18 @@ static int add_mount_helper(struct vfsmount *newmnt, struct nameidata *nd,
259 int err; 259 int err;
260 260
261 mntget(newmnt); 261 mntget(newmnt);
262 err = do_add_mount(newmnt, nd, nd->mnt->mnt_flags, mntlist); 262 err = do_add_mount(newmnt, nd, nd->path.mnt->mnt_flags, mntlist);
263 switch (err) { 263 switch (err) {
264 case 0: 264 case 0:
265 dput(nd->dentry); 265 dput(nd->path.dentry);
266 mntput(nd->mnt); 266 mntput(nd->path.mnt);
267 nd->mnt = newmnt; 267 nd->path.mnt = newmnt;
268 nd->dentry = dget(newmnt->mnt_root); 268 nd->path.dentry = dget(newmnt->mnt_root);
269 break; 269 break;
270 case -EBUSY: 270 case -EBUSY:
271 /* someone else made a mount here whilst we were busy */ 271 /* someone else made a mount here whilst we were busy */
272 while (d_mountpoint(nd->dentry) && 272 while (d_mountpoint(nd->path.dentry) &&
273 follow_down(&nd->mnt, &nd->dentry)) 273 follow_down(&nd->path.mnt, &nd->path.dentry))
274 ; 274 ;
275 err = 0; 275 err = 0;
276 default: 276 default:
@@ -307,8 +307,8 @@ cifs_dfs_follow_mountpoint(struct dentry *dentry, struct nameidata *nd)
307 307
308 xid = GetXid(); 308 xid = GetXid();
309 309
310 dput(nd->dentry); 310 dput(nd->path.dentry);
311 nd->dentry = dget(dentry); 311 nd->path.dentry = dget(dentry);
312 312
313 cifs_sb = CIFS_SB(dentry->d_inode->i_sb); 313 cifs_sb = CIFS_SB(dentry->d_inode->i_sb);
314 ses = cifs_sb->tcon->ses; 314 ses = cifs_sb->tcon->ses;
@@ -340,7 +340,8 @@ cifs_dfs_follow_mountpoint(struct dentry *dentry, struct nameidata *nd)
340 rc = -EINVAL; 340 rc = -EINVAL;
341 goto out_err; 341 goto out_err;
342 } 342 }
343 mnt = cifs_dfs_do_refmount(nd->mnt, nd->dentry, 343 mnt = cifs_dfs_do_refmount(nd->path.mnt,
344 nd->path.dentry,
344 referrals[i].node_name); 345 referrals[i].node_name);
345 cFYI(1, ("%s: cifs_dfs_do_refmount:%s , mnt:%p", 346 cFYI(1, ("%s: cifs_dfs_do_refmount:%s , mnt:%p",
346 __FUNCTION__, 347 __FUNCTION__,
@@ -357,7 +358,7 @@ cifs_dfs_follow_mountpoint(struct dentry *dentry, struct nameidata *nd)
357 if (IS_ERR(mnt)) 358 if (IS_ERR(mnt))
358 goto out_err; 359 goto out_err;
359 360
360 nd->mnt->mnt_flags |= MNT_SHRINKABLE; 361 nd->path.mnt->mnt_flags |= MNT_SHRINKABLE;
361 rc = add_mount_helper(mnt, nd, &cifs_dfs_automount_list); 362 rc = add_mount_helper(mnt, nd, &cifs_dfs_automount_list);
362 363
363out: 364out:
@@ -367,7 +368,7 @@ out:
367 cFYI(1, ("leaving %s" , __FUNCTION__)); 368 cFYI(1, ("leaving %s" , __FUNCTION__));
368 return ERR_PTR(rc); 369 return ERR_PTR(rc);
369out_err: 370out_err:
370 path_release(nd); 371 path_put(&nd->path);
371 goto out; 372 goto out;
372} 373}
373 374
diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c
index e9f4ec701092..fcc434227691 100644
--- a/fs/cifs/cifsfs.c
+++ b/fs/cifs/cifsfs.c
@@ -147,10 +147,11 @@ cifs_read_super(struct super_block *sb, void *data,
147#endif 147#endif
148 sb->s_blocksize = CIFS_MAX_MSGSIZE; 148 sb->s_blocksize = CIFS_MAX_MSGSIZE;
149 sb->s_blocksize_bits = 14; /* default 2**14 = CIFS_MAX_MSGSIZE */ 149 sb->s_blocksize_bits = 14; /* default 2**14 = CIFS_MAX_MSGSIZE */
150 inode = iget(sb, ROOT_I); 150 inode = cifs_iget(sb, ROOT_I);
151 151
152 if (!inode) { 152 if (IS_ERR(inode)) {
153 rc = -ENOMEM; 153 rc = PTR_ERR(inode);
154 inode = NULL;
154 goto out_no_root; 155 goto out_no_root;
155 } 156 }
156 157
@@ -520,7 +521,6 @@ static int cifs_remount(struct super_block *sb, int *flags, char *data)
520} 521}
521 522
522static const struct super_operations cifs_super_ops = { 523static const struct super_operations cifs_super_ops = {
523 .read_inode = cifs_read_inode,
524 .put_super = cifs_put_super, 524 .put_super = cifs_put_super,
525 .statfs = cifs_statfs, 525 .statfs = cifs_statfs,
526 .alloc_inode = cifs_alloc_inode, 526 .alloc_inode = cifs_alloc_inode,
diff --git a/fs/cifs/cifsfs.h b/fs/cifs/cifsfs.h
index 195b14de5567..68978306c3ca 100644
--- a/fs/cifs/cifsfs.h
+++ b/fs/cifs/cifsfs.h
@@ -44,6 +44,7 @@ extern void cifs_read_inode(struct inode *);
44 44
45/* Functions related to inodes */ 45/* Functions related to inodes */
46extern const struct inode_operations cifs_dir_inode_ops; 46extern const struct inode_operations cifs_dir_inode_ops;
47extern struct inode *cifs_iget(struct super_block *, unsigned long);
47extern int cifs_create(struct inode *, struct dentry *, int, 48extern int cifs_create(struct inode *, struct dentry *, int,
48 struct nameidata *); 49 struct nameidata *);
49extern struct dentry *cifs_lookup(struct inode *, struct dentry *, 50extern struct dentry *cifs_lookup(struct inode *, struct dentry *,
diff --git a/fs/cifs/inode.c b/fs/cifs/inode.c
index d9567ba2960b..b1a4a65eaa08 100644
--- a/fs/cifs/inode.c
+++ b/fs/cifs/inode.c
@@ -586,10 +586,18 @@ static const struct inode_operations cifs_ipc_inode_ops = {
586}; 586};
587 587
588/* gets root inode */ 588/* gets root inode */
589void cifs_read_inode(struct inode *inode) 589struct inode *cifs_iget(struct super_block *sb, unsigned long ino)
590{ 590{
591 int xid, rc; 591 int xid;
592 struct cifs_sb_info *cifs_sb; 592 struct cifs_sb_info *cifs_sb;
593 struct inode *inode;
594 long rc;
595
596 inode = iget_locked(sb, ino);
597 if (!inode)
598 return ERR_PTR(-ENOMEM);
599 if (!(inode->i_state & I_NEW))
600 return inode;
593 601
594 cifs_sb = CIFS_SB(inode->i_sb); 602 cifs_sb = CIFS_SB(inode->i_sb);
595 xid = GetXid(); 603 xid = GetXid();
@@ -606,10 +614,18 @@ void cifs_read_inode(struct inode *inode)
606 inode->i_fop = &simple_dir_operations; 614 inode->i_fop = &simple_dir_operations;
607 inode->i_uid = cifs_sb->mnt_uid; 615 inode->i_uid = cifs_sb->mnt_uid;
608 inode->i_gid = cifs_sb->mnt_gid; 616 inode->i_gid = cifs_sb->mnt_gid;
617 _FreeXid(xid);
618 iget_failed(inode);
619 return ERR_PTR(rc);
609 } 620 }
610 621
611 /* can not call macro FreeXid here since in a void func */ 622 unlock_new_inode(inode);
623
624 /* can not call macro FreeXid here since in a void func
625 * TODO: This is no longer true
626 */
612 _FreeXid(xid); 627 _FreeXid(xid);
628 return inode;
613} 629}
614 630
615int cifs_unlink(struct inode *inode, struct dentry *direntry) 631int cifs_unlink(struct inode *inode, struct dentry *direntry)
@@ -1386,7 +1402,7 @@ static int cifs_truncate_page(struct address_space *mapping, loff_t from)
1386 if (!page) 1402 if (!page)
1387 return -ENOMEM; 1403 return -ENOMEM;
1388 1404
1389 zero_user_page(page, offset, PAGE_CACHE_SIZE - offset, KM_USER0); 1405 zero_user_segment(page, offset, PAGE_CACHE_SIZE);
1390 unlock_page(page); 1406 unlock_page(page);
1391 page_cache_release(page); 1407 page_cache_release(page);
1392 return rc; 1408 return rc;
diff --git a/fs/coda/pioctl.c b/fs/coda/pioctl.c
index 2bf3026adc80..c21a1f552a63 100644
--- a/fs/coda/pioctl.c
+++ b/fs/coda/pioctl.c
@@ -75,12 +75,12 @@ static int coda_pioctl(struct inode * inode, struct file * filp,
75 if ( error ) { 75 if ( error ) {
76 return error; 76 return error;
77 } else { 77 } else {
78 target_inode = nd.dentry->d_inode; 78 target_inode = nd.path.dentry->d_inode;
79 } 79 }
80 80
81 /* return if it is not a Coda inode */ 81 /* return if it is not a Coda inode */
82 if ( target_inode->i_sb != inode->i_sb ) { 82 if ( target_inode->i_sb != inode->i_sb ) {
83 path_release(&nd); 83 path_put(&nd.path);
84 return -EINVAL; 84 return -EINVAL;
85 } 85 }
86 86
@@ -89,7 +89,7 @@ static int coda_pioctl(struct inode * inode, struct file * filp,
89 89
90 error = venus_pioctl(inode->i_sb, &(cnp->c_fid), cmd, &data); 90 error = venus_pioctl(inode->i_sb, &(cnp->c_fid), cmd, &data);
91 91
92 path_release(&nd); 92 path_put(&nd.path);
93 return error; 93 return error;
94} 94}
95 95
diff --git a/fs/compat.c b/fs/compat.c
index 5216c3fd7517..2ce4456aad30 100644
--- a/fs/compat.c
+++ b/fs/compat.c
@@ -241,10 +241,10 @@ asmlinkage long compat_sys_statfs(const char __user *path, struct compat_statfs
241 error = user_path_walk(path, &nd); 241 error = user_path_walk(path, &nd);
242 if (!error) { 242 if (!error) {
243 struct kstatfs tmp; 243 struct kstatfs tmp;
244 error = vfs_statfs(nd.dentry, &tmp); 244 error = vfs_statfs(nd.path.dentry, &tmp);
245 if (!error) 245 if (!error)
246 error = put_compat_statfs(buf, &tmp); 246 error = put_compat_statfs(buf, &tmp);
247 path_release(&nd); 247 path_put(&nd.path);
248 } 248 }
249 return error; 249 return error;
250} 250}
@@ -309,10 +309,10 @@ asmlinkage long compat_sys_statfs64(const char __user *path, compat_size_t sz, s
309 error = user_path_walk(path, &nd); 309 error = user_path_walk(path, &nd);
310 if (!error) { 310 if (!error) {
311 struct kstatfs tmp; 311 struct kstatfs tmp;
312 error = vfs_statfs(nd.dentry, &tmp); 312 error = vfs_statfs(nd.path.dentry, &tmp);
313 if (!error) 313 if (!error)
314 error = put_compat_statfs64(buf, &tmp); 314 error = put_compat_statfs64(buf, &tmp);
315 path_release(&nd); 315 path_put(&nd.path);
316 } 316 }
317 return error; 317 return error;
318} 318}
@@ -702,9 +702,6 @@ static int do_nfs4_super_data_conv(void *raw_data)
702 real->flags = raw->flags; 702 real->flags = raw->flags;
703 real->version = raw->version; 703 real->version = raw->version;
704 } 704 }
705 else {
706 return -EINVAL;
707 }
708 705
709 return 0; 706 return 0;
710} 707}
@@ -2083,51 +2080,6 @@ long asmlinkage compat_sys_nfsservctl(int cmd, void *notused, void *notused2)
2083 2080
2084#ifdef CONFIG_EPOLL 2081#ifdef CONFIG_EPOLL
2085 2082
2086#ifdef CONFIG_HAS_COMPAT_EPOLL_EVENT
2087asmlinkage long compat_sys_epoll_ctl(int epfd, int op, int fd,
2088 struct compat_epoll_event __user *event)
2089{
2090 long err = 0;
2091 struct compat_epoll_event user;
2092 struct epoll_event __user *kernel = NULL;
2093
2094 if (event) {
2095 if (copy_from_user(&user, event, sizeof(user)))
2096 return -EFAULT;
2097 kernel = compat_alloc_user_space(sizeof(struct epoll_event));
2098 err |= __put_user(user.events, &kernel->events);
2099 err |= __put_user(user.data, &kernel->data);
2100 }
2101
2102 return err ? err : sys_epoll_ctl(epfd, op, fd, kernel);
2103}
2104
2105
2106asmlinkage long compat_sys_epoll_wait(int epfd,
2107 struct compat_epoll_event __user *events,
2108 int maxevents, int timeout)
2109{
2110 long i, ret, err = 0;
2111 struct epoll_event __user *kbuf;
2112 struct epoll_event ev;
2113
2114 if ((maxevents <= 0) ||
2115 (maxevents > (INT_MAX / sizeof(struct epoll_event))))
2116 return -EINVAL;
2117 kbuf = compat_alloc_user_space(sizeof(struct epoll_event) * maxevents);
2118 ret = sys_epoll_wait(epfd, kbuf, maxevents, timeout);
2119 for (i = 0; i < ret; i++) {
2120 err |= __get_user(ev.events, &kbuf[i].events);
2121 err |= __get_user(ev.data, &kbuf[i].data);
2122 err |= __put_user(ev.events, &events->events);
2123 err |= __put_user_unaligned(ev.data, &events->data);
2124 events++;
2125 }
2126
2127 return err ? -EFAULT: ret;
2128}
2129#endif /* CONFIG_HAS_COMPAT_EPOLL_EVENT */
2130
2131#ifdef TIF_RESTORE_SIGMASK 2083#ifdef TIF_RESTORE_SIGMASK
2132asmlinkage long compat_sys_epoll_pwait(int epfd, 2084asmlinkage long compat_sys_epoll_pwait(int epfd,
2133 struct compat_epoll_event __user *events, 2085 struct compat_epoll_event __user *events,
@@ -2153,11 +2105,7 @@ asmlinkage long compat_sys_epoll_pwait(int epfd,
2153 sigprocmask(SIG_SETMASK, &ksigmask, &sigsaved); 2105 sigprocmask(SIG_SETMASK, &ksigmask, &sigsaved);
2154 } 2106 }
2155 2107
2156#ifdef CONFIG_HAS_COMPAT_EPOLL_EVENT
2157 err = compat_sys_epoll_wait(epfd, events, maxevents, timeout);
2158#else
2159 err = sys_epoll_wait(epfd, events, maxevents, timeout); 2108 err = sys_epoll_wait(epfd, events, maxevents, timeout);
2160#endif
2161 2109
2162 /* 2110 /*
2163 * If we changed the signal mask, we need to restore the original one. 2111 * If we changed the signal mask, we need to restore the original one.
@@ -2206,19 +2154,41 @@ asmlinkage long compat_sys_signalfd(int ufd,
2206 2154
2207#ifdef CONFIG_TIMERFD 2155#ifdef CONFIG_TIMERFD
2208 2156
2209asmlinkage long compat_sys_timerfd(int ufd, int clockid, int flags, 2157asmlinkage long compat_sys_timerfd_settime(int ufd, int flags,
2210 const struct compat_itimerspec __user *utmr) 2158 const struct compat_itimerspec __user *utmr,
2159 struct compat_itimerspec __user *otmr)
2211{ 2160{
2161 int error;
2212 struct itimerspec t; 2162 struct itimerspec t;
2213 struct itimerspec __user *ut; 2163 struct itimerspec __user *ut;
2214 2164
2215 if (get_compat_itimerspec(&t, utmr)) 2165 if (get_compat_itimerspec(&t, utmr))
2216 return -EFAULT; 2166 return -EFAULT;
2217 ut = compat_alloc_user_space(sizeof(*ut)); 2167 ut = compat_alloc_user_space(2 * sizeof(struct itimerspec));
2218 if (copy_to_user(ut, &t, sizeof(t))) 2168 if (copy_to_user(&ut[0], &t, sizeof(t)))
2219 return -EFAULT; 2169 return -EFAULT;
2170 error = sys_timerfd_settime(ufd, flags, &ut[0], &ut[1]);
2171 if (!error && otmr)
2172 error = (copy_from_user(&t, &ut[1], sizeof(struct itimerspec)) ||
2173 put_compat_itimerspec(otmr, &t)) ? -EFAULT: 0;
2220 2174
2221 return sys_timerfd(ufd, clockid, flags, ut); 2175 return error;
2176}
2177
2178asmlinkage long compat_sys_timerfd_gettime(int ufd,
2179 struct compat_itimerspec __user *otmr)
2180{
2181 int error;
2182 struct itimerspec t;
2183 struct itimerspec __user *ut;
2184
2185 ut = compat_alloc_user_space(sizeof(struct itimerspec));
2186 error = sys_timerfd_gettime(ufd, ut);
2187 if (!error)
2188 error = (copy_from_user(&t, ut, sizeof(struct itimerspec)) ||
2189 put_compat_itimerspec(otmr, &t)) ? -EFAULT: 0;
2190
2191 return error;
2222} 2192}
2223 2193
2224#endif /* CONFIG_TIMERFD */ 2194#endif /* CONFIG_TIMERFD */
diff --git a/fs/compat_ioctl.c b/fs/compat_ioctl.c
index ffdc022cae64..c6e72aebd16b 100644
--- a/fs/compat_ioctl.c
+++ b/fs/compat_ioctl.c
@@ -78,7 +78,6 @@
78#include <linux/mii.h> 78#include <linux/mii.h>
79#include <linux/if_bonding.h> 79#include <linux/if_bonding.h>
80#include <linux/watchdog.h> 80#include <linux/watchdog.h>
81#include <linux/dm-ioctl.h>
82 81
83#include <linux/soundcard.h> 82#include <linux/soundcard.h>
84#include <linux/lp.h> 83#include <linux/lp.h>
@@ -1993,39 +1992,6 @@ COMPATIBLE_IOCTL(STOP_ARRAY_RO)
1993COMPATIBLE_IOCTL(RESTART_ARRAY_RW) 1992COMPATIBLE_IOCTL(RESTART_ARRAY_RW)
1994COMPATIBLE_IOCTL(GET_BITMAP_FILE) 1993COMPATIBLE_IOCTL(GET_BITMAP_FILE)
1995ULONG_IOCTL(SET_BITMAP_FILE) 1994ULONG_IOCTL(SET_BITMAP_FILE)
1996/* DM */
1997COMPATIBLE_IOCTL(DM_VERSION_32)
1998COMPATIBLE_IOCTL(DM_REMOVE_ALL_32)
1999COMPATIBLE_IOCTL(DM_LIST_DEVICES_32)
2000COMPATIBLE_IOCTL(DM_DEV_CREATE_32)
2001COMPATIBLE_IOCTL(DM_DEV_REMOVE_32)
2002COMPATIBLE_IOCTL(DM_DEV_RENAME_32)
2003COMPATIBLE_IOCTL(DM_DEV_SUSPEND_32)
2004COMPATIBLE_IOCTL(DM_DEV_STATUS_32)
2005COMPATIBLE_IOCTL(DM_DEV_WAIT_32)
2006COMPATIBLE_IOCTL(DM_TABLE_LOAD_32)
2007COMPATIBLE_IOCTL(DM_TABLE_CLEAR_32)
2008COMPATIBLE_IOCTL(DM_TABLE_DEPS_32)
2009COMPATIBLE_IOCTL(DM_TABLE_STATUS_32)
2010COMPATIBLE_IOCTL(DM_LIST_VERSIONS_32)
2011COMPATIBLE_IOCTL(DM_TARGET_MSG_32)
2012COMPATIBLE_IOCTL(DM_DEV_SET_GEOMETRY_32)
2013COMPATIBLE_IOCTL(DM_VERSION)
2014COMPATIBLE_IOCTL(DM_REMOVE_ALL)
2015COMPATIBLE_IOCTL(DM_LIST_DEVICES)
2016COMPATIBLE_IOCTL(DM_DEV_CREATE)
2017COMPATIBLE_IOCTL(DM_DEV_REMOVE)
2018COMPATIBLE_IOCTL(DM_DEV_RENAME)
2019COMPATIBLE_IOCTL(DM_DEV_SUSPEND)
2020COMPATIBLE_IOCTL(DM_DEV_STATUS)
2021COMPATIBLE_IOCTL(DM_DEV_WAIT)
2022COMPATIBLE_IOCTL(DM_TABLE_LOAD)
2023COMPATIBLE_IOCTL(DM_TABLE_CLEAR)
2024COMPATIBLE_IOCTL(DM_TABLE_DEPS)
2025COMPATIBLE_IOCTL(DM_TABLE_STATUS)
2026COMPATIBLE_IOCTL(DM_LIST_VERSIONS)
2027COMPATIBLE_IOCTL(DM_TARGET_MSG)
2028COMPATIBLE_IOCTL(DM_DEV_SET_GEOMETRY)
2029/* Big K */ 1995/* Big K */
2030COMPATIBLE_IOCTL(PIO_FONT) 1996COMPATIBLE_IOCTL(PIO_FONT)
2031COMPATIBLE_IOCTL(GIO_FONT) 1997COMPATIBLE_IOCTL(GIO_FONT)
@@ -2887,7 +2853,7 @@ static void compat_ioctl_error(struct file *filp, unsigned int fd,
2887 /* find the name of the device. */ 2853 /* find the name of the device. */
2888 path = (char *)__get_free_page(GFP_KERNEL); 2854 path = (char *)__get_free_page(GFP_KERNEL);
2889 if (path) { 2855 if (path) {
2890 fn = d_path(filp->f_path.dentry, filp->f_path.mnt, path, PAGE_SIZE); 2856 fn = d_path(&filp->f_path, path, PAGE_SIZE);
2891 if (IS_ERR(fn)) 2857 if (IS_ERR(fn))
2892 fn = "?"; 2858 fn = "?";
2893 } 2859 }
@@ -2986,7 +2952,7 @@ asmlinkage long compat_sys_ioctl(unsigned int fd, unsigned int cmd,
2986 } 2952 }
2987 2953
2988 do_ioctl: 2954 do_ioctl:
2989 error = vfs_ioctl(filp, fd, cmd, arg); 2955 error = do_vfs_ioctl(filp, fd, cmd, arg);
2990 out_fput: 2956 out_fput:
2991 fput_light(filp, fput_needed); 2957 fput_light(filp, fput_needed);
2992 out: 2958 out:
diff --git a/fs/configfs/symlink.c b/fs/configfs/symlink.c
index 22700d2857da..78929ea84ff2 100644
--- a/fs/configfs/symlink.c
+++ b/fs/configfs/symlink.c
@@ -99,11 +99,11 @@ static int get_target(const char *symname, struct nameidata *nd,
99 99
100 ret = path_lookup(symname, LOOKUP_FOLLOW|LOOKUP_DIRECTORY, nd); 100 ret = path_lookup(symname, LOOKUP_FOLLOW|LOOKUP_DIRECTORY, nd);
101 if (!ret) { 101 if (!ret) {
102 if (nd->dentry->d_sb == configfs_sb) { 102 if (nd->path.dentry->d_sb == configfs_sb) {
103 *target = configfs_get_config_item(nd->dentry); 103 *target = configfs_get_config_item(nd->path.dentry);
104 if (!*target) { 104 if (!*target) {
105 ret = -ENOENT; 105 ret = -ENOENT;
106 path_release(nd); 106 path_put(&nd->path);
107 } 107 }
108 } else 108 } else
109 ret = -EPERM; 109 ret = -EPERM;
@@ -141,7 +141,7 @@ int configfs_symlink(struct inode *dir, struct dentry *dentry, const char *symna
141 ret = create_link(parent_item, target_item, dentry); 141 ret = create_link(parent_item, target_item, dentry);
142 142
143 config_item_put(target_item); 143 config_item_put(target_item);
144 path_release(&nd); 144 path_put(&nd.path);
145 145
146out_put: 146out_put:
147 config_item_put(parent_item); 147 config_item_put(parent_item);
diff --git a/fs/dcache.c b/fs/dcache.c
index d9ca1e5ceb92..43455776711e 100644
--- a/fs/dcache.c
+++ b/fs/dcache.c
@@ -89,12 +89,20 @@ static void d_free(struct dentry *dentry)
89 if (dentry->d_op && dentry->d_op->d_release) 89 if (dentry->d_op && dentry->d_op->d_release)
90 dentry->d_op->d_release(dentry); 90 dentry->d_op->d_release(dentry);
91 /* if dentry was never inserted into hash, immediate free is OK */ 91 /* if dentry was never inserted into hash, immediate free is OK */
92 if (dentry->d_hash.pprev == NULL) 92 if (hlist_unhashed(&dentry->d_hash))
93 __d_free(dentry); 93 __d_free(dentry);
94 else 94 else
95 call_rcu(&dentry->d_u.d_rcu, d_callback); 95 call_rcu(&dentry->d_u.d_rcu, d_callback);
96} 96}
97 97
98static void dentry_lru_remove(struct dentry *dentry)
99{
100 if (!list_empty(&dentry->d_lru)) {
101 list_del_init(&dentry->d_lru);
102 dentry_stat.nr_unused--;
103 }
104}
105
98/* 106/*
99 * Release the dentry's inode, using the filesystem 107 * Release the dentry's inode, using the filesystem
100 * d_iput() operation if defined. 108 * d_iput() operation if defined.
@@ -211,13 +219,7 @@ repeat:
211unhash_it: 219unhash_it:
212 __d_drop(dentry); 220 __d_drop(dentry);
213kill_it: 221kill_it:
214 /* If dentry was on d_lru list 222 dentry_lru_remove(dentry);
215 * delete it from there
216 */
217 if (!list_empty(&dentry->d_lru)) {
218 list_del(&dentry->d_lru);
219 dentry_stat.nr_unused--;
220 }
221 dentry = d_kill(dentry); 223 dentry = d_kill(dentry);
222 if (dentry) 224 if (dentry)
223 goto repeat; 225 goto repeat;
@@ -285,10 +287,7 @@ int d_invalidate(struct dentry * dentry)
285static inline struct dentry * __dget_locked(struct dentry *dentry) 287static inline struct dentry * __dget_locked(struct dentry *dentry)
286{ 288{
287 atomic_inc(&dentry->d_count); 289 atomic_inc(&dentry->d_count);
288 if (!list_empty(&dentry->d_lru)) { 290 dentry_lru_remove(dentry);
289 dentry_stat.nr_unused--;
290 list_del_init(&dentry->d_lru);
291 }
292 return dentry; 291 return dentry;
293} 292}
294 293
@@ -404,10 +403,7 @@ static void prune_one_dentry(struct dentry * dentry)
404 403
405 if (dentry->d_op && dentry->d_op->d_delete) 404 if (dentry->d_op && dentry->d_op->d_delete)
406 dentry->d_op->d_delete(dentry); 405 dentry->d_op->d_delete(dentry);
407 if (!list_empty(&dentry->d_lru)) { 406 dentry_lru_remove(dentry);
408 list_del(&dentry->d_lru);
409 dentry_stat.nr_unused--;
410 }
411 __d_drop(dentry); 407 __d_drop(dentry);
412 dentry = d_kill(dentry); 408 dentry = d_kill(dentry);
413 spin_lock(&dcache_lock); 409 spin_lock(&dcache_lock);
@@ -596,10 +592,7 @@ static void shrink_dcache_for_umount_subtree(struct dentry *dentry)
596 592
597 /* detach this root from the system */ 593 /* detach this root from the system */
598 spin_lock(&dcache_lock); 594 spin_lock(&dcache_lock);
599 if (!list_empty(&dentry->d_lru)) { 595 dentry_lru_remove(dentry);
600 dentry_stat.nr_unused--;
601 list_del_init(&dentry->d_lru);
602 }
603 __d_drop(dentry); 596 __d_drop(dentry);
604 spin_unlock(&dcache_lock); 597 spin_unlock(&dcache_lock);
605 598
@@ -613,11 +606,7 @@ static void shrink_dcache_for_umount_subtree(struct dentry *dentry)
613 spin_lock(&dcache_lock); 606 spin_lock(&dcache_lock);
614 list_for_each_entry(loop, &dentry->d_subdirs, 607 list_for_each_entry(loop, &dentry->d_subdirs,
615 d_u.d_child) { 608 d_u.d_child) {
616 if (!list_empty(&loop->d_lru)) { 609 dentry_lru_remove(loop);
617 dentry_stat.nr_unused--;
618 list_del_init(&loop->d_lru);
619 }
620
621 __d_drop(loop); 610 __d_drop(loop);
622 cond_resched_lock(&dcache_lock); 611 cond_resched_lock(&dcache_lock);
623 } 612 }
@@ -799,10 +788,7 @@ resume:
799 struct dentry *dentry = list_entry(tmp, struct dentry, d_u.d_child); 788 struct dentry *dentry = list_entry(tmp, struct dentry, d_u.d_child);
800 next = tmp->next; 789 next = tmp->next;
801 790
802 if (!list_empty(&dentry->d_lru)) { 791 dentry_lru_remove(dentry);
803 dentry_stat.nr_unused--;
804 list_del_init(&dentry->d_lru);
805 }
806 /* 792 /*
807 * move only zero ref count dentries to the end 793 * move only zero ref count dentries to the end
808 * of the unused list for prune_dcache 794 * of the unused list for prune_dcache
@@ -1408,9 +1394,6 @@ void d_delete(struct dentry * dentry)
1408 if (atomic_read(&dentry->d_count) == 1) { 1394 if (atomic_read(&dentry->d_count) == 1) {
1409 dentry_iput(dentry); 1395 dentry_iput(dentry);
1410 fsnotify_nameremove(dentry, isdir); 1396 fsnotify_nameremove(dentry, isdir);
1411
1412 /* remove this and other inotify debug checks after 2.6.18 */
1413 dentry->d_flags &= ~DCACHE_INOTIFY_PARENT_WATCHED;
1414 return; 1397 return;
1415 } 1398 }
1416 1399
@@ -1779,9 +1762,8 @@ shouldnt_be_hashed:
1779 * 1762 *
1780 * "buflen" should be positive. Caller holds the dcache_lock. 1763 * "buflen" should be positive. Caller holds the dcache_lock.
1781 */ 1764 */
1782static char * __d_path( struct dentry *dentry, struct vfsmount *vfsmnt, 1765static char *__d_path(struct dentry *dentry, struct vfsmount *vfsmnt,
1783 struct dentry *root, struct vfsmount *rootmnt, 1766 struct path *root, char *buffer, int buflen)
1784 char *buffer, int buflen)
1785{ 1767{
1786 char * end = buffer+buflen; 1768 char * end = buffer+buflen;
1787 char * retval; 1769 char * retval;
@@ -1806,7 +1788,7 @@ static char * __d_path( struct dentry *dentry, struct vfsmount *vfsmnt,
1806 for (;;) { 1788 for (;;) {
1807 struct dentry * parent; 1789 struct dentry * parent;
1808 1790
1809 if (dentry == root && vfsmnt == rootmnt) 1791 if (dentry == root->dentry && vfsmnt == root->mnt)
1810 break; 1792 break;
1811 if (dentry == vfsmnt->mnt_root || IS_ROOT(dentry)) { 1793 if (dentry == vfsmnt->mnt_root || IS_ROOT(dentry)) {
1812 /* Global root? */ 1794 /* Global root? */
@@ -1847,13 +1829,23 @@ Elong:
1847 return ERR_PTR(-ENAMETOOLONG); 1829 return ERR_PTR(-ENAMETOOLONG);
1848} 1830}
1849 1831
1850/* write full pathname into buffer and return start of pathname */ 1832/**
1851char * d_path(struct dentry *dentry, struct vfsmount *vfsmnt, 1833 * d_path - return the path of a dentry
1852 char *buf, int buflen) 1834 * @path: path to report
1835 * @buf: buffer to return value in
1836 * @buflen: buffer length
1837 *
1838 * Convert a dentry into an ASCII path name. If the entry has been deleted
1839 * the string " (deleted)" is appended. Note that this is ambiguous.
1840 *
1841 * Returns the buffer or an error code if the path was too long.
1842 *
1843 * "buflen" should be positive. Caller holds the dcache_lock.
1844 */
1845char *d_path(struct path *path, char *buf, int buflen)
1853{ 1846{
1854 char *res; 1847 char *res;
1855 struct vfsmount *rootmnt; 1848 struct path root;
1856 struct dentry *root;
1857 1849
1858 /* 1850 /*
1859 * We have various synthetic filesystems that never get mounted. On 1851 * We have various synthetic filesystems that never get mounted. On
@@ -1862,18 +1854,17 @@ char * d_path(struct dentry *dentry, struct vfsmount *vfsmnt,
1862 * user wants to identify the object in /proc/pid/fd/. The little hack 1854 * user wants to identify the object in /proc/pid/fd/. The little hack
1863 * below allows us to generate a name for these objects on demand: 1855 * below allows us to generate a name for these objects on demand:
1864 */ 1856 */
1865 if (dentry->d_op && dentry->d_op->d_dname) 1857 if (path->dentry->d_op && path->dentry->d_op->d_dname)
1866 return dentry->d_op->d_dname(dentry, buf, buflen); 1858 return path->dentry->d_op->d_dname(path->dentry, buf, buflen);
1867 1859
1868 read_lock(&current->fs->lock); 1860 read_lock(&current->fs->lock);
1869 rootmnt = mntget(current->fs->rootmnt); 1861 root = current->fs->root;
1870 root = dget(current->fs->root); 1862 path_get(&current->fs->root);
1871 read_unlock(&current->fs->lock); 1863 read_unlock(&current->fs->lock);
1872 spin_lock(&dcache_lock); 1864 spin_lock(&dcache_lock);
1873 res = __d_path(dentry, vfsmnt, root, rootmnt, buf, buflen); 1865 res = __d_path(path->dentry, path->mnt, &root, buf, buflen);
1874 spin_unlock(&dcache_lock); 1866 spin_unlock(&dcache_lock);
1875 dput(root); 1867 path_put(&root);
1876 mntput(rootmnt);
1877 return res; 1868 return res;
1878} 1869}
1879 1870
@@ -1919,28 +1910,27 @@ char *dynamic_dname(struct dentry *dentry, char *buffer, int buflen,
1919asmlinkage long sys_getcwd(char __user *buf, unsigned long size) 1910asmlinkage long sys_getcwd(char __user *buf, unsigned long size)
1920{ 1911{
1921 int error; 1912 int error;
1922 struct vfsmount *pwdmnt, *rootmnt; 1913 struct path pwd, root;
1923 struct dentry *pwd, *root;
1924 char *page = (char *) __get_free_page(GFP_USER); 1914 char *page = (char *) __get_free_page(GFP_USER);
1925 1915
1926 if (!page) 1916 if (!page)
1927 return -ENOMEM; 1917 return -ENOMEM;
1928 1918
1929 read_lock(&current->fs->lock); 1919 read_lock(&current->fs->lock);
1930 pwdmnt = mntget(current->fs->pwdmnt); 1920 pwd = current->fs->pwd;
1931 pwd = dget(current->fs->pwd); 1921 path_get(&current->fs->pwd);
1932 rootmnt = mntget(current->fs->rootmnt); 1922 root = current->fs->root;
1933 root = dget(current->fs->root); 1923 path_get(&current->fs->root);
1934 read_unlock(&current->fs->lock); 1924 read_unlock(&current->fs->lock);
1935 1925
1936 error = -ENOENT; 1926 error = -ENOENT;
1937 /* Has the current directory has been unlinked? */ 1927 /* Has the current directory has been unlinked? */
1938 spin_lock(&dcache_lock); 1928 spin_lock(&dcache_lock);
1939 if (pwd->d_parent == pwd || !d_unhashed(pwd)) { 1929 if (pwd.dentry->d_parent == pwd.dentry || !d_unhashed(pwd.dentry)) {
1940 unsigned long len; 1930 unsigned long len;
1941 char * cwd; 1931 char * cwd;
1942 1932
1943 cwd = __d_path(pwd, pwdmnt, root, rootmnt, page, PAGE_SIZE); 1933 cwd = __d_path(pwd.dentry, pwd.mnt, &root, page, PAGE_SIZE);
1944 spin_unlock(&dcache_lock); 1934 spin_unlock(&dcache_lock);
1945 1935
1946 error = PTR_ERR(cwd); 1936 error = PTR_ERR(cwd);
@@ -1958,10 +1948,8 @@ asmlinkage long sys_getcwd(char __user *buf, unsigned long size)
1958 spin_unlock(&dcache_lock); 1948 spin_unlock(&dcache_lock);
1959 1949
1960out: 1950out:
1961 dput(pwd); 1951 path_put(&pwd);
1962 mntput(pwdmnt); 1952 path_put(&root);
1963 dput(root);
1964 mntput(rootmnt);
1965 free_page((unsigned long) page); 1953 free_page((unsigned long) page);
1966 return error; 1954 return error;
1967} 1955}
diff --git a/fs/dcookies.c b/fs/dcookies.c
index 792cbf55fa95..855d4b1d619a 100644
--- a/fs/dcookies.c
+++ b/fs/dcookies.c
@@ -24,6 +24,7 @@
24#include <linux/errno.h> 24#include <linux/errno.h>
25#include <linux/dcookies.h> 25#include <linux/dcookies.h>
26#include <linux/mutex.h> 26#include <linux/mutex.h>
27#include <linux/path.h>
27#include <asm/uaccess.h> 28#include <asm/uaccess.h>
28 29
29/* The dcookies are allocated from a kmem_cache and 30/* The dcookies are allocated from a kmem_cache and
@@ -31,8 +32,7 @@
31 * code here is particularly performance critical 32 * code here is particularly performance critical
32 */ 33 */
33struct dcookie_struct { 34struct dcookie_struct {
34 struct dentry * dentry; 35 struct path path;
35 struct vfsmount * vfsmnt;
36 struct list_head hash_list; 36 struct list_head hash_list;
37}; 37};
38 38
@@ -51,7 +51,7 @@ static inline int is_live(void)
51/* The dentry is locked, its address will do for the cookie */ 51/* The dentry is locked, its address will do for the cookie */
52static inline unsigned long dcookie_value(struct dcookie_struct * dcs) 52static inline unsigned long dcookie_value(struct dcookie_struct * dcs)
53{ 53{
54 return (unsigned long)dcs->dentry; 54 return (unsigned long)dcs->path.dentry;
55} 55}
56 56
57 57
@@ -89,19 +89,17 @@ static void hash_dcookie(struct dcookie_struct * dcs)
89} 89}
90 90
91 91
92static struct dcookie_struct * alloc_dcookie(struct dentry * dentry, 92static struct dcookie_struct *alloc_dcookie(struct path *path)
93 struct vfsmount * vfsmnt)
94{ 93{
95 struct dcookie_struct * dcs = kmem_cache_alloc(dcookie_cache, GFP_KERNEL); 94 struct dcookie_struct *dcs = kmem_cache_alloc(dcookie_cache,
95 GFP_KERNEL);
96 if (!dcs) 96 if (!dcs)
97 return NULL; 97 return NULL;
98 98
99 dentry->d_cookie = dcs; 99 path->dentry->d_cookie = dcs;
100 100 dcs->path = *path;
101 dcs->dentry = dget(dentry); 101 path_get(path);
102 dcs->vfsmnt = mntget(vfsmnt);
103 hash_dcookie(dcs); 102 hash_dcookie(dcs);
104
105 return dcs; 103 return dcs;
106} 104}
107 105
@@ -109,8 +107,7 @@ static struct dcookie_struct * alloc_dcookie(struct dentry * dentry,
109/* This is the main kernel-side routine that retrieves the cookie 107/* This is the main kernel-side routine that retrieves the cookie
110 * value for a dentry/vfsmnt pair. 108 * value for a dentry/vfsmnt pair.
111 */ 109 */
112int get_dcookie(struct dentry * dentry, struct vfsmount * vfsmnt, 110int get_dcookie(struct path *path, unsigned long *cookie)
113 unsigned long * cookie)
114{ 111{
115 int err = 0; 112 int err = 0;
116 struct dcookie_struct * dcs; 113 struct dcookie_struct * dcs;
@@ -122,10 +119,10 @@ int get_dcookie(struct dentry * dentry, struct vfsmount * vfsmnt,
122 goto out; 119 goto out;
123 } 120 }
124 121
125 dcs = dentry->d_cookie; 122 dcs = path->dentry->d_cookie;
126 123
127 if (!dcs) 124 if (!dcs)
128 dcs = alloc_dcookie(dentry, vfsmnt); 125 dcs = alloc_dcookie(path);
129 126
130 if (!dcs) { 127 if (!dcs) {
131 err = -ENOMEM; 128 err = -ENOMEM;
@@ -174,7 +171,7 @@ asmlinkage long sys_lookup_dcookie(u64 cookie64, char __user * buf, size_t len)
174 goto out; 171 goto out;
175 172
176 /* FIXME: (deleted) ? */ 173 /* FIXME: (deleted) ? */
177 path = d_path(dcs->dentry, dcs->vfsmnt, kbuf, PAGE_SIZE); 174 path = d_path(&dcs->path, kbuf, PAGE_SIZE);
178 175
179 if (IS_ERR(path)) { 176 if (IS_ERR(path)) {
180 err = PTR_ERR(path); 177 err = PTR_ERR(path);
@@ -254,9 +251,8 @@ out_kmem:
254 251
255static void free_dcookie(struct dcookie_struct * dcs) 252static void free_dcookie(struct dcookie_struct * dcs)
256{ 253{
257 dcs->dentry->d_cookie = NULL; 254 dcs->path.dentry->d_cookie = NULL;
258 dput(dcs->dentry); 255 path_put(&dcs->path);
259 mntput(dcs->vfsmnt);
260 kmem_cache_free(dcookie_cache, dcs); 256 kmem_cache_free(dcookie_cache, dcs);
261} 257}
262 258
diff --git a/fs/debugfs/file.c b/fs/debugfs/file.c
index fa6b7f7ff914..fddffe4851f5 100644
--- a/fs/debugfs/file.c
+++ b/fs/debugfs/file.c
@@ -56,13 +56,15 @@ const struct inode_operations debugfs_link_operations = {
56 .follow_link = debugfs_follow_link, 56 .follow_link = debugfs_follow_link,
57}; 57};
58 58
59static void debugfs_u8_set(void *data, u64 val) 59static int debugfs_u8_set(void *data, u64 val)
60{ 60{
61 *(u8 *)data = val; 61 *(u8 *)data = val;
62 return 0;
62} 63}
63static u64 debugfs_u8_get(void *data) 64static int debugfs_u8_get(void *data, u64 *val)
64{ 65{
65 return *(u8 *)data; 66 *val = *(u8 *)data;
67 return 0;
66} 68}
67DEFINE_SIMPLE_ATTRIBUTE(fops_u8, debugfs_u8_get, debugfs_u8_set, "%llu\n"); 69DEFINE_SIMPLE_ATTRIBUTE(fops_u8, debugfs_u8_get, debugfs_u8_set, "%llu\n");
68 70
@@ -97,13 +99,15 @@ struct dentry *debugfs_create_u8(const char *name, mode_t mode,
97} 99}
98EXPORT_SYMBOL_GPL(debugfs_create_u8); 100EXPORT_SYMBOL_GPL(debugfs_create_u8);
99 101
100static void debugfs_u16_set(void *data, u64 val) 102static int debugfs_u16_set(void *data, u64 val)
101{ 103{
102 *(u16 *)data = val; 104 *(u16 *)data = val;
105 return 0;
103} 106}
104static u64 debugfs_u16_get(void *data) 107static int debugfs_u16_get(void *data, u64 *val)
105{ 108{
106 return *(u16 *)data; 109 *val = *(u16 *)data;
110 return 0;
107} 111}
108DEFINE_SIMPLE_ATTRIBUTE(fops_u16, debugfs_u16_get, debugfs_u16_set, "%llu\n"); 112DEFINE_SIMPLE_ATTRIBUTE(fops_u16, debugfs_u16_get, debugfs_u16_set, "%llu\n");
109 113
@@ -138,13 +142,15 @@ struct dentry *debugfs_create_u16(const char *name, mode_t mode,
138} 142}
139EXPORT_SYMBOL_GPL(debugfs_create_u16); 143EXPORT_SYMBOL_GPL(debugfs_create_u16);
140 144
141static void debugfs_u32_set(void *data, u64 val) 145static int debugfs_u32_set(void *data, u64 val)
142{ 146{
143 *(u32 *)data = val; 147 *(u32 *)data = val;
148 return 0;
144} 149}
145static u64 debugfs_u32_get(void *data) 150static int debugfs_u32_get(void *data, u64 *val)
146{ 151{
147 return *(u32 *)data; 152 *val = *(u32 *)data;
153 return 0;
148} 154}
149DEFINE_SIMPLE_ATTRIBUTE(fops_u32, debugfs_u32_get, debugfs_u32_set, "%llu\n"); 155DEFINE_SIMPLE_ATTRIBUTE(fops_u32, debugfs_u32_get, debugfs_u32_set, "%llu\n");
150 156
@@ -179,14 +185,16 @@ struct dentry *debugfs_create_u32(const char *name, mode_t mode,
179} 185}
180EXPORT_SYMBOL_GPL(debugfs_create_u32); 186EXPORT_SYMBOL_GPL(debugfs_create_u32);
181 187
182static void debugfs_u64_set(void *data, u64 val) 188static int debugfs_u64_set(void *data, u64 val)
183{ 189{
184 *(u64 *)data = val; 190 *(u64 *)data = val;
191 return 0;
185} 192}
186 193
187static u64 debugfs_u64_get(void *data) 194static int debugfs_u64_get(void *data, u64 *val)
188{ 195{
189 return *(u64 *)data; 196 *val = *(u64 *)data;
197 return 0;
190} 198}
191DEFINE_SIMPLE_ATTRIBUTE(fops_u64, debugfs_u64_get, debugfs_u64_set, "%llu\n"); 199DEFINE_SIMPLE_ATTRIBUTE(fops_u64, debugfs_u64_get, debugfs_u64_set, "%llu\n");
192 200
diff --git a/fs/devpts/inode.c b/fs/devpts/inode.c
index 06ef9a255c76..f120e1207874 100644
--- a/fs/devpts/inode.c
+++ b/fs/devpts/inode.c
@@ -20,9 +20,12 @@
20#include <linux/devpts_fs.h> 20#include <linux/devpts_fs.h>
21#include <linux/parser.h> 21#include <linux/parser.h>
22#include <linux/fsnotify.h> 22#include <linux/fsnotify.h>
23#include <linux/seq_file.h>
23 24
24#define DEVPTS_SUPER_MAGIC 0x1cd1 25#define DEVPTS_SUPER_MAGIC 0x1cd1
25 26
27#define DEVPTS_DEFAULT_MODE 0600
28
26static struct vfsmount *devpts_mnt; 29static struct vfsmount *devpts_mnt;
27static struct dentry *devpts_root; 30static struct dentry *devpts_root;
28 31
@@ -32,7 +35,7 @@ static struct {
32 uid_t uid; 35 uid_t uid;
33 gid_t gid; 36 gid_t gid;
34 umode_t mode; 37 umode_t mode;
35} config = {.mode = 0600}; 38} config = {.mode = DEVPTS_DEFAULT_MODE};
36 39
37enum { 40enum {
38 Opt_uid, Opt_gid, Opt_mode, 41 Opt_uid, Opt_gid, Opt_mode,
@@ -54,7 +57,7 @@ static int devpts_remount(struct super_block *sb, int *flags, char *data)
54 config.setgid = 0; 57 config.setgid = 0;
55 config.uid = 0; 58 config.uid = 0;
56 config.gid = 0; 59 config.gid = 0;
57 config.mode = 0600; 60 config.mode = DEVPTS_DEFAULT_MODE;
58 61
59 while ((p = strsep(&data, ",")) != NULL) { 62 while ((p = strsep(&data, ",")) != NULL) {
60 substring_t args[MAX_OPT_ARGS]; 63 substring_t args[MAX_OPT_ARGS];
@@ -81,7 +84,7 @@ static int devpts_remount(struct super_block *sb, int *flags, char *data)
81 case Opt_mode: 84 case Opt_mode:
82 if (match_octal(&args[0], &option)) 85 if (match_octal(&args[0], &option))
83 return -EINVAL; 86 return -EINVAL;
84 config.mode = option & ~S_IFMT; 87 config.mode = option & S_IALLUGO;
85 break; 88 break;
86 default: 89 default:
87 printk(KERN_ERR "devpts: called with bogus options\n"); 90 printk(KERN_ERR "devpts: called with bogus options\n");
@@ -92,9 +95,21 @@ static int devpts_remount(struct super_block *sb, int *flags, char *data)
92 return 0; 95 return 0;
93} 96}
94 97
98static int devpts_show_options(struct seq_file *seq, struct vfsmount *vfs)
99{
100 if (config.setuid)
101 seq_printf(seq, ",uid=%u", config.uid);
102 if (config.setgid)
103 seq_printf(seq, ",gid=%u", config.gid);
104 seq_printf(seq, ",mode=%03o", config.mode);
105
106 return 0;
107}
108
95static const struct super_operations devpts_sops = { 109static const struct super_operations devpts_sops = {
96 .statfs = simple_statfs, 110 .statfs = simple_statfs,
97 .remount_fs = devpts_remount, 111 .remount_fs = devpts_remount,
112 .show_options = devpts_show_options,
98}; 113};
99 114
100static int 115static int
diff --git a/fs/direct-io.c b/fs/direct-io.c
index acf0da1bd257..9e81addbd6ea 100644
--- a/fs/direct-io.c
+++ b/fs/direct-io.c
@@ -878,8 +878,8 @@ do_holes:
878 page_cache_release(page); 878 page_cache_release(page);
879 goto out; 879 goto out;
880 } 880 }
881 zero_user_page(page, block_in_page << blkbits, 881 zero_user(page, block_in_page << blkbits,
882 1 << blkbits, KM_USER0); 882 1 << blkbits);
883 dio->block_in_file++; 883 dio->block_in_file++;
884 block_in_page++; 884 block_in_page++;
885 goto next_block; 885 goto next_block;
diff --git a/fs/dlm/ast.c b/fs/dlm/ast.c
index 6308122890ca..8bf31e3fbf01 100644
--- a/fs/dlm/ast.c
+++ b/fs/dlm/ast.c
@@ -39,7 +39,6 @@ void dlm_add_ast(struct dlm_lkb *lkb, int type)
39 dlm_user_add_ast(lkb, type); 39 dlm_user_add_ast(lkb, type);
40 return; 40 return;
41 } 41 }
42 DLM_ASSERT(lkb->lkb_astaddr != DLM_FAKE_USER_AST, dlm_print_lkb(lkb););
43 42
44 spin_lock(&ast_queue_lock); 43 spin_lock(&ast_queue_lock);
45 if (!(lkb->lkb_ast_type & (AST_COMP | AST_BAST))) { 44 if (!(lkb->lkb_ast_type & (AST_COMP | AST_BAST))) {
@@ -58,8 +57,8 @@ static void process_asts(void)
58 struct dlm_ls *ls = NULL; 57 struct dlm_ls *ls = NULL;
59 struct dlm_rsb *r = NULL; 58 struct dlm_rsb *r = NULL;
60 struct dlm_lkb *lkb; 59 struct dlm_lkb *lkb;
61 void (*cast) (long param); 60 void (*cast) (void *astparam);
62 void (*bast) (long param, int mode); 61 void (*bast) (void *astparam, int mode);
63 int type = 0, found, bmode; 62 int type = 0, found, bmode;
64 63
65 for (;;) { 64 for (;;) {
@@ -83,8 +82,8 @@ static void process_asts(void)
83 if (!found) 82 if (!found)
84 break; 83 break;
85 84
86 cast = lkb->lkb_astaddr; 85 cast = lkb->lkb_astfn;
87 bast = lkb->lkb_bastaddr; 86 bast = lkb->lkb_bastfn;
88 bmode = lkb->lkb_bastmode; 87 bmode = lkb->lkb_bastmode;
89 88
90 if ((type & AST_COMP) && cast) 89 if ((type & AST_COMP) && cast)
diff --git a/fs/dlm/config.c b/fs/dlm/config.c
index 2f8e3c81bc19..c3ad1dff3b25 100644
--- a/fs/dlm/config.c
+++ b/fs/dlm/config.c
@@ -604,7 +604,7 @@ static struct clusters clusters_root = {
604 }, 604 },
605}; 605};
606 606
607int dlm_config_init(void) 607int __init dlm_config_init(void)
608{ 608{
609 config_group_init(&clusters_root.subsys.su_group); 609 config_group_init(&clusters_root.subsys.su_group);
610 mutex_init(&clusters_root.subsys.su_mutex); 610 mutex_init(&clusters_root.subsys.su_mutex);
diff --git a/fs/dlm/debug_fs.c b/fs/dlm/debug_fs.c
index 12c3bfd5e660..8fc24f4507a3 100644
--- a/fs/dlm/debug_fs.c
+++ b/fs/dlm/debug_fs.c
@@ -162,14 +162,12 @@ static int print_resource(struct dlm_rsb *res, struct seq_file *s)
162 162
163static void print_lock(struct seq_file *s, struct dlm_lkb *lkb, struct dlm_rsb *r) 163static void print_lock(struct seq_file *s, struct dlm_lkb *lkb, struct dlm_rsb *r)
164{ 164{
165 struct dlm_user_args *ua;
166 unsigned int waiting = 0; 165 unsigned int waiting = 0;
167 uint64_t xid = 0; 166 uint64_t xid = 0;
168 167
169 if (lkb->lkb_flags & DLM_IFL_USER) { 168 if (lkb->lkb_flags & DLM_IFL_USER) {
170 ua = (struct dlm_user_args *) lkb->lkb_astparam; 169 if (lkb->lkb_ua)
171 if (ua) 170 xid = lkb->lkb_ua->xid;
172 xid = ua->xid;
173 } 171 }
174 172
175 if (lkb->lkb_timestamp) 173 if (lkb->lkb_timestamp)
@@ -543,7 +541,7 @@ void dlm_delete_debug_file(struct dlm_ls *ls)
543 debugfs_remove(ls->ls_debug_locks_dentry); 541 debugfs_remove(ls->ls_debug_locks_dentry);
544} 542}
545 543
546int dlm_register_debugfs(void) 544int __init dlm_register_debugfs(void)
547{ 545{
548 mutex_init(&debug_buf_lock); 546 mutex_init(&debug_buf_lock);
549 dlm_root = debugfs_create_dir("dlm", NULL); 547 dlm_root = debugfs_create_dir("dlm", NULL);
diff --git a/fs/dlm/dir.c b/fs/dlm/dir.c
index ff97ba924333..85defeb64df4 100644
--- a/fs/dlm/dir.c
+++ b/fs/dlm/dir.c
@@ -220,6 +220,7 @@ int dlm_recover_directory(struct dlm_ls *ls)
220 last_len = 0; 220 last_len = 0;
221 221
222 for (;;) { 222 for (;;) {
223 int left;
223 error = dlm_recovery_stopped(ls); 224 error = dlm_recovery_stopped(ls);
224 if (error) 225 if (error)
225 goto out_free; 226 goto out_free;
@@ -235,12 +236,21 @@ int dlm_recover_directory(struct dlm_ls *ls)
235 * pick namelen/name pairs out of received buffer 236 * pick namelen/name pairs out of received buffer
236 */ 237 */
237 238
238 b = ls->ls_recover_buf + sizeof(struct dlm_rcom); 239 b = ls->ls_recover_buf->rc_buf;
240 left = ls->ls_recover_buf->rc_header.h_length;
241 left -= sizeof(struct dlm_rcom);
239 242
240 for (;;) { 243 for (;;) {
241 memcpy(&namelen, b, sizeof(uint16_t)); 244 __be16 v;
242 namelen = be16_to_cpu(namelen); 245
243 b += sizeof(uint16_t); 246 error = -EINVAL;
247 if (left < sizeof(__be16))
248 goto out_free;
249
250 memcpy(&v, b, sizeof(__be16));
251 namelen = be16_to_cpu(v);
252 b += sizeof(__be16);
253 left -= sizeof(__be16);
244 254
245 /* namelen of 0xFFFFF marks end of names for 255 /* namelen of 0xFFFFF marks end of names for
246 this node; namelen of 0 marks end of the 256 this node; namelen of 0 marks end of the
@@ -251,6 +261,12 @@ int dlm_recover_directory(struct dlm_ls *ls)
251 if (!namelen) 261 if (!namelen)
252 break; 262 break;
253 263
264 if (namelen > left)
265 goto out_free;
266
267 if (namelen > DLM_RESNAME_MAXLEN)
268 goto out_free;
269
254 error = -ENOMEM; 270 error = -ENOMEM;
255 de = get_free_de(ls, namelen); 271 de = get_free_de(ls, namelen);
256 if (!de) 272 if (!de)
@@ -262,6 +278,7 @@ int dlm_recover_directory(struct dlm_ls *ls)
262 memcpy(de->name, b, namelen); 278 memcpy(de->name, b, namelen);
263 memcpy(last_name, b, namelen); 279 memcpy(last_name, b, namelen);
264 b += namelen; 280 b += namelen;
281 left -= namelen;
265 282
266 add_entry_to_hash(ls, de); 283 add_entry_to_hash(ls, de);
267 count++; 284 count++;
@@ -302,6 +319,9 @@ static int get_entry(struct dlm_ls *ls, int nodeid, char *name,
302 319
303 write_unlock(&ls->ls_dirtbl[bucket].lock); 320 write_unlock(&ls->ls_dirtbl[bucket].lock);
304 321
322 if (namelen > DLM_RESNAME_MAXLEN)
323 return -EINVAL;
324
305 de = kzalloc(sizeof(struct dlm_direntry) + namelen, GFP_KERNEL); 325 de = kzalloc(sizeof(struct dlm_direntry) + namelen, GFP_KERNEL);
306 if (!de) 326 if (!de)
307 return -ENOMEM; 327 return -ENOMEM;
diff --git a/fs/dlm/dlm_internal.h b/fs/dlm/dlm_internal.h
index ec61bbaf25df..d30ea8b433a2 100644
--- a/fs/dlm/dlm_internal.h
+++ b/fs/dlm/dlm_internal.h
@@ -92,8 +92,6 @@ do { \
92 } \ 92 } \
93} 93}
94 94
95#define DLM_FAKE_USER_AST ERR_PTR(-EINVAL)
96
97 95
98struct dlm_direntry { 96struct dlm_direntry {
99 struct list_head list; 97 struct list_head list;
@@ -146,9 +144,9 @@ struct dlm_recover {
146 144
147struct dlm_args { 145struct dlm_args {
148 uint32_t flags; 146 uint32_t flags;
149 void *astaddr; 147 void (*astfn) (void *astparam);
150 long astparam; 148 void *astparam;
151 void *bastaddr; 149 void (*bastfn) (void *astparam, int mode);
152 int mode; 150 int mode;
153 struct dlm_lksb *lksb; 151 struct dlm_lksb *lksb;
154 unsigned long timeout; 152 unsigned long timeout;
@@ -253,9 +251,12 @@ struct dlm_lkb {
253 251
254 char *lkb_lvbptr; 252 char *lkb_lvbptr;
255 struct dlm_lksb *lkb_lksb; /* caller's status block */ 253 struct dlm_lksb *lkb_lksb; /* caller's status block */
256 void *lkb_astaddr; /* caller's ast function */ 254 void (*lkb_astfn) (void *astparam);
257 void *lkb_bastaddr; /* caller's bast function */ 255 void (*lkb_bastfn) (void *astparam, int mode);
258 long lkb_astparam; /* caller's ast arg */ 256 union {
257 void *lkb_astparam; /* caller's ast arg */
258 struct dlm_user_args *lkb_ua;
259 };
259}; 260};
260 261
261 262
@@ -403,28 +404,34 @@ struct dlm_rcom {
403 char rc_buf[0]; 404 char rc_buf[0];
404}; 405};
405 406
407union dlm_packet {
408 struct dlm_header header; /* common to other two */
409 struct dlm_message message;
410 struct dlm_rcom rcom;
411};
412
406struct rcom_config { 413struct rcom_config {
407 uint32_t rf_lvblen; 414 __le32 rf_lvblen;
408 uint32_t rf_lsflags; 415 __le32 rf_lsflags;
409 uint64_t rf_unused; 416 __le64 rf_unused;
410}; 417};
411 418
412struct rcom_lock { 419struct rcom_lock {
413 uint32_t rl_ownpid; 420 __le32 rl_ownpid;
414 uint32_t rl_lkid; 421 __le32 rl_lkid;
415 uint32_t rl_remid; 422 __le32 rl_remid;
416 uint32_t rl_parent_lkid; 423 __le32 rl_parent_lkid;
417 uint32_t rl_parent_remid; 424 __le32 rl_parent_remid;
418 uint32_t rl_exflags; 425 __le32 rl_exflags;
419 uint32_t rl_flags; 426 __le32 rl_flags;
420 uint32_t rl_lvbseq; 427 __le32 rl_lvbseq;
421 int rl_result; 428 __le32 rl_result;
422 int8_t rl_rqmode; 429 int8_t rl_rqmode;
423 int8_t rl_grmode; 430 int8_t rl_grmode;
424 int8_t rl_status; 431 int8_t rl_status;
425 int8_t rl_asts; 432 int8_t rl_asts;
426 uint16_t rl_wait_type; 433 __le16 rl_wait_type;
427 uint16_t rl_namelen; 434 __le16 rl_namelen;
428 char rl_name[DLM_RESNAME_MAXLEN]; 435 char rl_name[DLM_RESNAME_MAXLEN];
429 char rl_lvb[0]; 436 char rl_lvb[0];
430}; 437};
@@ -494,7 +501,7 @@ struct dlm_ls {
494 struct rw_semaphore ls_recv_active; /* block dlm_recv */ 501 struct rw_semaphore ls_recv_active; /* block dlm_recv */
495 struct list_head ls_requestqueue;/* queue remote requests */ 502 struct list_head ls_requestqueue;/* queue remote requests */
496 struct mutex ls_requestqueue_mutex; 503 struct mutex ls_requestqueue_mutex;
497 char *ls_recover_buf; 504 struct dlm_rcom *ls_recover_buf;
498 int ls_recover_nodeid; /* for debugging */ 505 int ls_recover_nodeid; /* for debugging */
499 uint64_t ls_rcom_seq; 506 uint64_t ls_rcom_seq;
500 spinlock_t ls_rcom_spin; 507 spinlock_t ls_rcom_spin;
diff --git a/fs/dlm/lock.c b/fs/dlm/lock.c
index ff4a198fa677..8f250ac8b928 100644
--- a/fs/dlm/lock.c
+++ b/fs/dlm/lock.c
@@ -436,11 +436,15 @@ static int find_rsb(struct dlm_ls *ls, char *name, int namelen,
436{ 436{
437 struct dlm_rsb *r, *tmp; 437 struct dlm_rsb *r, *tmp;
438 uint32_t hash, bucket; 438 uint32_t hash, bucket;
439 int error = 0; 439 int error = -EINVAL;
440
441 if (namelen > DLM_RESNAME_MAXLEN)
442 goto out;
440 443
441 if (dlm_no_directory(ls)) 444 if (dlm_no_directory(ls))
442 flags |= R_CREATE; 445 flags |= R_CREATE;
443 446
447 error = 0;
444 hash = jhash(name, namelen, 0); 448 hash = jhash(name, namelen, 0);
445 bucket = hash & (ls->ls_rsbtbl_size - 1); 449 bucket = hash & (ls->ls_rsbtbl_size - 1);
446 450
@@ -1222,6 +1226,8 @@ static void set_lvb_lock_pc(struct dlm_rsb *r, struct dlm_lkb *lkb,
1222 b = dlm_lvb_operations[lkb->lkb_grmode + 1][lkb->lkb_rqmode + 1]; 1226 b = dlm_lvb_operations[lkb->lkb_grmode + 1][lkb->lkb_rqmode + 1];
1223 if (b == 1) { 1227 if (b == 1) {
1224 int len = receive_extralen(ms); 1228 int len = receive_extralen(ms);
1229 if (len > DLM_RESNAME_MAXLEN)
1230 len = DLM_RESNAME_MAXLEN;
1225 memcpy(lkb->lkb_lvbptr, ms->m_extra, len); 1231 memcpy(lkb->lkb_lvbptr, ms->m_extra, len);
1226 lkb->lkb_lvbseq = ms->m_lvbseq; 1232 lkb->lkb_lvbseq = ms->m_lvbseq;
1227 } 1233 }
@@ -1775,7 +1781,7 @@ static void grant_pending_locks(struct dlm_rsb *r)
1775 */ 1781 */
1776 1782
1777 list_for_each_entry_safe(lkb, s, &r->res_grantqueue, lkb_statequeue) { 1783 list_for_each_entry_safe(lkb, s, &r->res_grantqueue, lkb_statequeue) {
1778 if (lkb->lkb_bastaddr && lock_requires_bast(lkb, high, cw)) { 1784 if (lkb->lkb_bastfn && lock_requires_bast(lkb, high, cw)) {
1779 if (cw && high == DLM_LOCK_PR) 1785 if (cw && high == DLM_LOCK_PR)
1780 queue_bast(r, lkb, DLM_LOCK_CW); 1786 queue_bast(r, lkb, DLM_LOCK_CW);
1781 else 1787 else
@@ -1805,7 +1811,7 @@ static void send_bast_queue(struct dlm_rsb *r, struct list_head *head,
1805 struct dlm_lkb *gr; 1811 struct dlm_lkb *gr;
1806 1812
1807 list_for_each_entry(gr, head, lkb_statequeue) { 1813 list_for_each_entry(gr, head, lkb_statequeue) {
1808 if (gr->lkb_bastaddr && modes_require_bast(gr, lkb)) { 1814 if (gr->lkb_bastfn && modes_require_bast(gr, lkb)) {
1809 queue_bast(r, gr, lkb->lkb_rqmode); 1815 queue_bast(r, gr, lkb->lkb_rqmode);
1810 gr->lkb_highbast = lkb->lkb_rqmode; 1816 gr->lkb_highbast = lkb->lkb_rqmode;
1811 } 1817 }
@@ -1960,8 +1966,11 @@ static void confirm_master(struct dlm_rsb *r, int error)
1960} 1966}
1961 1967
1962static int set_lock_args(int mode, struct dlm_lksb *lksb, uint32_t flags, 1968static int set_lock_args(int mode, struct dlm_lksb *lksb, uint32_t flags,
1963 int namelen, unsigned long timeout_cs, void *ast, 1969 int namelen, unsigned long timeout_cs,
1964 void *astarg, void *bast, struct dlm_args *args) 1970 void (*ast) (void *astparam),
1971 void *astparam,
1972 void (*bast) (void *astparam, int mode),
1973 struct dlm_args *args)
1965{ 1974{
1966 int rv = -EINVAL; 1975 int rv = -EINVAL;
1967 1976
@@ -2011,9 +2020,9 @@ static int set_lock_args(int mode, struct dlm_lksb *lksb, uint32_t flags,
2011 an active lkb cannot be modified before locking the rsb */ 2020 an active lkb cannot be modified before locking the rsb */
2012 2021
2013 args->flags = flags; 2022 args->flags = flags;
2014 args->astaddr = ast; 2023 args->astfn = ast;
2015 args->astparam = (long) astarg; 2024 args->astparam = astparam;
2016 args->bastaddr = bast; 2025 args->bastfn = bast;
2017 args->timeout = timeout_cs; 2026 args->timeout = timeout_cs;
2018 args->mode = mode; 2027 args->mode = mode;
2019 args->lksb = lksb; 2028 args->lksb = lksb;
@@ -2032,7 +2041,7 @@ static int set_unlock_args(uint32_t flags, void *astarg, struct dlm_args *args)
2032 return -EINVAL; 2041 return -EINVAL;
2033 2042
2034 args->flags = flags; 2043 args->flags = flags;
2035 args->astparam = (long) astarg; 2044 args->astparam = astarg;
2036 return 0; 2045 return 0;
2037} 2046}
2038 2047
@@ -2062,9 +2071,9 @@ static int validate_lock_args(struct dlm_ls *ls, struct dlm_lkb *lkb,
2062 2071
2063 lkb->lkb_exflags = args->flags; 2072 lkb->lkb_exflags = args->flags;
2064 lkb->lkb_sbflags = 0; 2073 lkb->lkb_sbflags = 0;
2065 lkb->lkb_astaddr = args->astaddr; 2074 lkb->lkb_astfn = args->astfn;
2066 lkb->lkb_astparam = args->astparam; 2075 lkb->lkb_astparam = args->astparam;
2067 lkb->lkb_bastaddr = args->bastaddr; 2076 lkb->lkb_bastfn = args->bastfn;
2068 lkb->lkb_rqmode = args->mode; 2077 lkb->lkb_rqmode = args->mode;
2069 lkb->lkb_lksb = args->lksb; 2078 lkb->lkb_lksb = args->lksb;
2070 lkb->lkb_lvbptr = args->lksb->sb_lvbptr; 2079 lkb->lkb_lvbptr = args->lksb->sb_lvbptr;
@@ -2711,9 +2720,9 @@ static void send_args(struct dlm_rsb *r, struct dlm_lkb *lkb,
2711 /* m_result and m_bastmode are set from function args, 2720 /* m_result and m_bastmode are set from function args,
2712 not from lkb fields */ 2721 not from lkb fields */
2713 2722
2714 if (lkb->lkb_bastaddr) 2723 if (lkb->lkb_bastfn)
2715 ms->m_asts |= AST_BAST; 2724 ms->m_asts |= AST_BAST;
2716 if (lkb->lkb_astaddr) 2725 if (lkb->lkb_astfn)
2717 ms->m_asts |= AST_COMP; 2726 ms->m_asts |= AST_COMP;
2718 2727
2719 /* compare with switch in create_message; send_remove() doesn't 2728 /* compare with switch in create_message; send_remove() doesn't
@@ -2989,11 +2998,23 @@ static int receive_lvb(struct dlm_ls *ls, struct dlm_lkb *lkb,
2989 if (!lkb->lkb_lvbptr) 2998 if (!lkb->lkb_lvbptr)
2990 return -ENOMEM; 2999 return -ENOMEM;
2991 len = receive_extralen(ms); 3000 len = receive_extralen(ms);
3001 if (len > DLM_RESNAME_MAXLEN)
3002 len = DLM_RESNAME_MAXLEN;
2992 memcpy(lkb->lkb_lvbptr, ms->m_extra, len); 3003 memcpy(lkb->lkb_lvbptr, ms->m_extra, len);
2993 } 3004 }
2994 return 0; 3005 return 0;
2995} 3006}
2996 3007
3008static void fake_bastfn(void *astparam, int mode)
3009{
3010 log_print("fake_bastfn should not be called");
3011}
3012
3013static void fake_astfn(void *astparam)
3014{
3015 log_print("fake_astfn should not be called");
3016}
3017
2997static int receive_request_args(struct dlm_ls *ls, struct dlm_lkb *lkb, 3018static int receive_request_args(struct dlm_ls *ls, struct dlm_lkb *lkb,
2998 struct dlm_message *ms) 3019 struct dlm_message *ms)
2999{ 3020{
@@ -3002,8 +3023,9 @@ static int receive_request_args(struct dlm_ls *ls, struct dlm_lkb *lkb,
3002 lkb->lkb_remid = ms->m_lkid; 3023 lkb->lkb_remid = ms->m_lkid;
3003 lkb->lkb_grmode = DLM_LOCK_IV; 3024 lkb->lkb_grmode = DLM_LOCK_IV;
3004 lkb->lkb_rqmode = ms->m_rqmode; 3025 lkb->lkb_rqmode = ms->m_rqmode;
3005 lkb->lkb_bastaddr = (void *) (long) (ms->m_asts & AST_BAST); 3026
3006 lkb->lkb_astaddr = (void *) (long) (ms->m_asts & AST_COMP); 3027 lkb->lkb_bastfn = (ms->m_asts & AST_BAST) ? &fake_bastfn : NULL;
3028 lkb->lkb_astfn = (ms->m_asts & AST_COMP) ? &fake_astfn : NULL;
3007 3029
3008 if (lkb->lkb_exflags & DLM_LKF_VALBLK) { 3030 if (lkb->lkb_exflags & DLM_LKF_VALBLK) {
3009 /* lkb was just created so there won't be an lvb yet */ 3031 /* lkb was just created so there won't be an lvb yet */
@@ -3802,7 +3824,7 @@ static void dlm_receive_message(struct dlm_ls *ls, struct dlm_message *ms,
3802 int nodeid) 3824 int nodeid)
3803{ 3825{
3804 if (dlm_locking_stopped(ls)) { 3826 if (dlm_locking_stopped(ls)) {
3805 dlm_add_requestqueue(ls, nodeid, (struct dlm_header *) ms); 3827 dlm_add_requestqueue(ls, nodeid, ms);
3806 } else { 3828 } else {
3807 dlm_wait_requestqueue(ls); 3829 dlm_wait_requestqueue(ls);
3808 _receive_message(ls, ms); 3830 _receive_message(ls, ms);
@@ -3822,21 +3844,20 @@ void dlm_receive_message_saved(struct dlm_ls *ls, struct dlm_message *ms)
3822 standard locking activity) or an RCOM (recovery message sent as part of 3844 standard locking activity) or an RCOM (recovery message sent as part of
3823 lockspace recovery). */ 3845 lockspace recovery). */
3824 3846
3825void dlm_receive_buffer(struct dlm_header *hd, int nodeid) 3847void dlm_receive_buffer(union dlm_packet *p, int nodeid)
3826{ 3848{
3827 struct dlm_message *ms = (struct dlm_message *) hd; 3849 struct dlm_header *hd = &p->header;
3828 struct dlm_rcom *rc = (struct dlm_rcom *) hd;
3829 struct dlm_ls *ls; 3850 struct dlm_ls *ls;
3830 int type = 0; 3851 int type = 0;
3831 3852
3832 switch (hd->h_cmd) { 3853 switch (hd->h_cmd) {
3833 case DLM_MSG: 3854 case DLM_MSG:
3834 dlm_message_in(ms); 3855 dlm_message_in(&p->message);
3835 type = ms->m_type; 3856 type = p->message.m_type;
3836 break; 3857 break;
3837 case DLM_RCOM: 3858 case DLM_RCOM:
3838 dlm_rcom_in(rc); 3859 dlm_rcom_in(&p->rcom);
3839 type = rc->rc_type; 3860 type = p->rcom.rc_type;
3840 break; 3861 break;
3841 default: 3862 default:
3842 log_print("invalid h_cmd %d from %u", hd->h_cmd, nodeid); 3863 log_print("invalid h_cmd %d from %u", hd->h_cmd, nodeid);
@@ -3856,7 +3877,7 @@ void dlm_receive_buffer(struct dlm_header *hd, int nodeid)
3856 hd->h_lockspace, nodeid, hd->h_cmd, type); 3877 hd->h_lockspace, nodeid, hd->h_cmd, type);
3857 3878
3858 if (hd->h_cmd == DLM_RCOM && type == DLM_RCOM_STATUS) 3879 if (hd->h_cmd == DLM_RCOM && type == DLM_RCOM_STATUS)
3859 dlm_send_ls_not_ready(nodeid, rc); 3880 dlm_send_ls_not_ready(nodeid, &p->rcom);
3860 return; 3881 return;
3861 } 3882 }
3862 3883
@@ -3865,9 +3886,9 @@ void dlm_receive_buffer(struct dlm_header *hd, int nodeid)
3865 3886
3866 down_read(&ls->ls_recv_active); 3887 down_read(&ls->ls_recv_active);
3867 if (hd->h_cmd == DLM_MSG) 3888 if (hd->h_cmd == DLM_MSG)
3868 dlm_receive_message(ls, ms, nodeid); 3889 dlm_receive_message(ls, &p->message, nodeid);
3869 else 3890 else
3870 dlm_receive_rcom(ls, rc, nodeid); 3891 dlm_receive_rcom(ls, &p->rcom, nodeid);
3871 up_read(&ls->ls_recv_active); 3892 up_read(&ls->ls_recv_active);
3872 3893
3873 dlm_put_lockspace(ls); 3894 dlm_put_lockspace(ls);
@@ -4267,32 +4288,34 @@ static struct dlm_lkb *search_remid(struct dlm_rsb *r, int nodeid,
4267 return NULL; 4288 return NULL;
4268} 4289}
4269 4290
4291/* needs at least dlm_rcom + rcom_lock */
4270static int receive_rcom_lock_args(struct dlm_ls *ls, struct dlm_lkb *lkb, 4292static int receive_rcom_lock_args(struct dlm_ls *ls, struct dlm_lkb *lkb,
4271 struct dlm_rsb *r, struct dlm_rcom *rc) 4293 struct dlm_rsb *r, struct dlm_rcom *rc)
4272{ 4294{
4273 struct rcom_lock *rl = (struct rcom_lock *) rc->rc_buf; 4295 struct rcom_lock *rl = (struct rcom_lock *) rc->rc_buf;
4274 int lvblen;
4275 4296
4276 lkb->lkb_nodeid = rc->rc_header.h_nodeid; 4297 lkb->lkb_nodeid = rc->rc_header.h_nodeid;
4277 lkb->lkb_ownpid = rl->rl_ownpid; 4298 lkb->lkb_ownpid = le32_to_cpu(rl->rl_ownpid);
4278 lkb->lkb_remid = rl->rl_lkid; 4299 lkb->lkb_remid = le32_to_cpu(rl->rl_lkid);
4279 lkb->lkb_exflags = rl->rl_exflags; 4300 lkb->lkb_exflags = le32_to_cpu(rl->rl_exflags);
4280 lkb->lkb_flags = rl->rl_flags & 0x0000FFFF; 4301 lkb->lkb_flags = le32_to_cpu(rl->rl_flags) & 0x0000FFFF;
4281 lkb->lkb_flags |= DLM_IFL_MSTCPY; 4302 lkb->lkb_flags |= DLM_IFL_MSTCPY;
4282 lkb->lkb_lvbseq = rl->rl_lvbseq; 4303 lkb->lkb_lvbseq = le32_to_cpu(rl->rl_lvbseq);
4283 lkb->lkb_rqmode = rl->rl_rqmode; 4304 lkb->lkb_rqmode = rl->rl_rqmode;
4284 lkb->lkb_grmode = rl->rl_grmode; 4305 lkb->lkb_grmode = rl->rl_grmode;
4285 /* don't set lkb_status because add_lkb wants to itself */ 4306 /* don't set lkb_status because add_lkb wants to itself */
4286 4307
4287 lkb->lkb_bastaddr = (void *) (long) (rl->rl_asts & AST_BAST); 4308 lkb->lkb_bastfn = (rl->rl_asts & AST_BAST) ? &fake_bastfn : NULL;
4288 lkb->lkb_astaddr = (void *) (long) (rl->rl_asts & AST_COMP); 4309 lkb->lkb_astfn = (rl->rl_asts & AST_COMP) ? &fake_astfn : NULL;
4289 4310
4290 if (lkb->lkb_exflags & DLM_LKF_VALBLK) { 4311 if (lkb->lkb_exflags & DLM_LKF_VALBLK) {
4312 int lvblen = rc->rc_header.h_length - sizeof(struct dlm_rcom) -
4313 sizeof(struct rcom_lock);
4314 if (lvblen > ls->ls_lvblen)
4315 return -EINVAL;
4291 lkb->lkb_lvbptr = dlm_allocate_lvb(ls); 4316 lkb->lkb_lvbptr = dlm_allocate_lvb(ls);
4292 if (!lkb->lkb_lvbptr) 4317 if (!lkb->lkb_lvbptr)
4293 return -ENOMEM; 4318 return -ENOMEM;
4294 lvblen = rc->rc_header.h_length - sizeof(struct dlm_rcom) -
4295 sizeof(struct rcom_lock);
4296 memcpy(lkb->lkb_lvbptr, rl->rl_lvb, lvblen); 4319 memcpy(lkb->lkb_lvbptr, rl->rl_lvb, lvblen);
4297 } 4320 }
4298 4321
@@ -4300,7 +4323,8 @@ static int receive_rcom_lock_args(struct dlm_ls *ls, struct dlm_lkb *lkb,
4300 The real granted mode of these converting locks cannot be determined 4323 The real granted mode of these converting locks cannot be determined
4301 until all locks have been rebuilt on the rsb (recover_conversion) */ 4324 until all locks have been rebuilt on the rsb (recover_conversion) */
4302 4325
4303 if (rl->rl_wait_type == DLM_MSG_CONVERT && middle_conversion(lkb)) { 4326 if (rl->rl_wait_type == cpu_to_le16(DLM_MSG_CONVERT) &&
4327 middle_conversion(lkb)) {
4304 rl->rl_status = DLM_LKSTS_CONVERT; 4328 rl->rl_status = DLM_LKSTS_CONVERT;
4305 lkb->lkb_grmode = DLM_LOCK_IV; 4329 lkb->lkb_grmode = DLM_LOCK_IV;
4306 rsb_set_flag(r, RSB_RECOVER_CONVERT); 4330 rsb_set_flag(r, RSB_RECOVER_CONVERT);
@@ -4315,6 +4339,7 @@ static int receive_rcom_lock_args(struct dlm_ls *ls, struct dlm_lkb *lkb,
4315 the given values and send back our lkid. We send back our lkid by sending 4339 the given values and send back our lkid. We send back our lkid by sending
4316 back the rcom_lock struct we got but with the remid field filled in. */ 4340 back the rcom_lock struct we got but with the remid field filled in. */
4317 4341
4342/* needs at least dlm_rcom + rcom_lock */
4318int dlm_recover_master_copy(struct dlm_ls *ls, struct dlm_rcom *rc) 4343int dlm_recover_master_copy(struct dlm_ls *ls, struct dlm_rcom *rc)
4319{ 4344{
4320 struct rcom_lock *rl = (struct rcom_lock *) rc->rc_buf; 4345 struct rcom_lock *rl = (struct rcom_lock *) rc->rc_buf;
@@ -4327,13 +4352,14 @@ int dlm_recover_master_copy(struct dlm_ls *ls, struct dlm_rcom *rc)
4327 goto out; 4352 goto out;
4328 } 4353 }
4329 4354
4330 error = find_rsb(ls, rl->rl_name, rl->rl_namelen, R_MASTER, &r); 4355 error = find_rsb(ls, rl->rl_name, le16_to_cpu(rl->rl_namelen),
4356 R_MASTER, &r);
4331 if (error) 4357 if (error)
4332 goto out; 4358 goto out;
4333 4359
4334 lock_rsb(r); 4360 lock_rsb(r);
4335 4361
4336 lkb = search_remid(r, rc->rc_header.h_nodeid, rl->rl_lkid); 4362 lkb = search_remid(r, rc->rc_header.h_nodeid, le32_to_cpu(rl->rl_lkid));
4337 if (lkb) { 4363 if (lkb) {
4338 error = -EEXIST; 4364 error = -EEXIST;
4339 goto out_remid; 4365 goto out_remid;
@@ -4356,18 +4382,20 @@ int dlm_recover_master_copy(struct dlm_ls *ls, struct dlm_rcom *rc)
4356 out_remid: 4382 out_remid:
4357 /* this is the new value returned to the lock holder for 4383 /* this is the new value returned to the lock holder for
4358 saving in its process-copy lkb */ 4384 saving in its process-copy lkb */
4359 rl->rl_remid = lkb->lkb_id; 4385 rl->rl_remid = cpu_to_le32(lkb->lkb_id);
4360 4386
4361 out_unlock: 4387 out_unlock:
4362 unlock_rsb(r); 4388 unlock_rsb(r);
4363 put_rsb(r); 4389 put_rsb(r);
4364 out: 4390 out:
4365 if (error) 4391 if (error)
4366 log_debug(ls, "recover_master_copy %d %x", error, rl->rl_lkid); 4392 log_debug(ls, "recover_master_copy %d %x", error,
4367 rl->rl_result = error; 4393 le32_to_cpu(rl->rl_lkid));
4394 rl->rl_result = cpu_to_le32(error);
4368 return error; 4395 return error;
4369} 4396}
4370 4397
4398/* needs at least dlm_rcom + rcom_lock */
4371int dlm_recover_process_copy(struct dlm_ls *ls, struct dlm_rcom *rc) 4399int dlm_recover_process_copy(struct dlm_ls *ls, struct dlm_rcom *rc)
4372{ 4400{
4373 struct rcom_lock *rl = (struct rcom_lock *) rc->rc_buf; 4401 struct rcom_lock *rl = (struct rcom_lock *) rc->rc_buf;
@@ -4375,15 +4403,16 @@ int dlm_recover_process_copy(struct dlm_ls *ls, struct dlm_rcom *rc)
4375 struct dlm_lkb *lkb; 4403 struct dlm_lkb *lkb;
4376 int error; 4404 int error;
4377 4405
4378 error = find_lkb(ls, rl->rl_lkid, &lkb); 4406 error = find_lkb(ls, le32_to_cpu(rl->rl_lkid), &lkb);
4379 if (error) { 4407 if (error) {
4380 log_error(ls, "recover_process_copy no lkid %x", rl->rl_lkid); 4408 log_error(ls, "recover_process_copy no lkid %x",
4409 le32_to_cpu(rl->rl_lkid));
4381 return error; 4410 return error;
4382 } 4411 }
4383 4412
4384 DLM_ASSERT(is_process_copy(lkb), dlm_print_lkb(lkb);); 4413 DLM_ASSERT(is_process_copy(lkb), dlm_print_lkb(lkb););
4385 4414
4386 error = rl->rl_result; 4415 error = le32_to_cpu(rl->rl_result);
4387 4416
4388 r = lkb->lkb_resource; 4417 r = lkb->lkb_resource;
4389 hold_rsb(r); 4418 hold_rsb(r);
@@ -4402,7 +4431,7 @@ int dlm_recover_process_copy(struct dlm_ls *ls, struct dlm_rcom *rc)
4402 log_debug(ls, "master copy exists %x", lkb->lkb_id); 4431 log_debug(ls, "master copy exists %x", lkb->lkb_id);
4403 /* fall through */ 4432 /* fall through */
4404 case 0: 4433 case 0:
4405 lkb->lkb_remid = rl->rl_remid; 4434 lkb->lkb_remid = le32_to_cpu(rl->rl_remid);
4406 break; 4435 break;
4407 default: 4436 default:
4408 log_error(ls, "dlm_recover_process_copy unknown error %d %x", 4437 log_error(ls, "dlm_recover_process_copy unknown error %d %x",
@@ -4451,7 +4480,7 @@ int dlm_user_request(struct dlm_ls *ls, struct dlm_user_args *ua,
4451 lock and that lkb_astparam is the dlm_user_args structure. */ 4480 lock and that lkb_astparam is the dlm_user_args structure. */
4452 4481
4453 error = set_lock_args(mode, &ua->lksb, flags, namelen, timeout_cs, 4482 error = set_lock_args(mode, &ua->lksb, flags, namelen, timeout_cs,
4454 DLM_FAKE_USER_AST, ua, DLM_FAKE_USER_AST, &args); 4483 fake_astfn, ua, fake_bastfn, &args);
4455 lkb->lkb_flags |= DLM_IFL_USER; 4484 lkb->lkb_flags |= DLM_IFL_USER;
4456 ua->old_mode = DLM_LOCK_IV; 4485 ua->old_mode = DLM_LOCK_IV;
4457 4486
@@ -4504,7 +4533,7 @@ int dlm_user_convert(struct dlm_ls *ls, struct dlm_user_args *ua_tmp,
4504 /* user can change the params on its lock when it converts it, or 4533 /* user can change the params on its lock when it converts it, or
4505 add an lvb that didn't exist before */ 4534 add an lvb that didn't exist before */
4506 4535
4507 ua = (struct dlm_user_args *)lkb->lkb_astparam; 4536 ua = lkb->lkb_ua;
4508 4537
4509 if (flags & DLM_LKF_VALBLK && !ua->lksb.sb_lvbptr) { 4538 if (flags & DLM_LKF_VALBLK && !ua->lksb.sb_lvbptr) {
4510 ua->lksb.sb_lvbptr = kzalloc(DLM_USER_LVB_LEN, GFP_KERNEL); 4539 ua->lksb.sb_lvbptr = kzalloc(DLM_USER_LVB_LEN, GFP_KERNEL);
@@ -4525,7 +4554,7 @@ int dlm_user_convert(struct dlm_ls *ls, struct dlm_user_args *ua_tmp,
4525 ua->old_mode = lkb->lkb_grmode; 4554 ua->old_mode = lkb->lkb_grmode;
4526 4555
4527 error = set_lock_args(mode, &ua->lksb, flags, 0, timeout_cs, 4556 error = set_lock_args(mode, &ua->lksb, flags, 0, timeout_cs,
4528 DLM_FAKE_USER_AST, ua, DLM_FAKE_USER_AST, &args); 4557 fake_astfn, ua, fake_bastfn, &args);
4529 if (error) 4558 if (error)
4530 goto out_put; 4559 goto out_put;
4531 4560
@@ -4555,7 +4584,7 @@ int dlm_user_unlock(struct dlm_ls *ls, struct dlm_user_args *ua_tmp,
4555 if (error) 4584 if (error)
4556 goto out; 4585 goto out;
4557 4586
4558 ua = (struct dlm_user_args *)lkb->lkb_astparam; 4587 ua = lkb->lkb_ua;
4559 4588
4560 if (lvb_in && ua->lksb.sb_lvbptr) 4589 if (lvb_in && ua->lksb.sb_lvbptr)
4561 memcpy(ua->lksb.sb_lvbptr, lvb_in, DLM_USER_LVB_LEN); 4590 memcpy(ua->lksb.sb_lvbptr, lvb_in, DLM_USER_LVB_LEN);
@@ -4604,7 +4633,7 @@ int dlm_user_cancel(struct dlm_ls *ls, struct dlm_user_args *ua_tmp,
4604 if (error) 4633 if (error)
4605 goto out; 4634 goto out;
4606 4635
4607 ua = (struct dlm_user_args *)lkb->lkb_astparam; 4636 ua = lkb->lkb_ua;
4608 if (ua_tmp->castparam) 4637 if (ua_tmp->castparam)
4609 ua->castparam = ua_tmp->castparam; 4638 ua->castparam = ua_tmp->castparam;
4610 ua->user_lksb = ua_tmp->user_lksb; 4639 ua->user_lksb = ua_tmp->user_lksb;
@@ -4642,7 +4671,7 @@ int dlm_user_deadlock(struct dlm_ls *ls, uint32_t flags, uint32_t lkid)
4642 if (error) 4671 if (error)
4643 goto out; 4672 goto out;
4644 4673
4645 ua = (struct dlm_user_args *)lkb->lkb_astparam; 4674 ua = lkb->lkb_ua;
4646 4675
4647 error = set_unlock_args(flags, ua, &args); 4676 error = set_unlock_args(flags, ua, &args);
4648 if (error) 4677 if (error)
@@ -4681,7 +4710,6 @@ int dlm_user_deadlock(struct dlm_ls *ls, uint32_t flags, uint32_t lkid)
4681 4710
4682static int orphan_proc_lock(struct dlm_ls *ls, struct dlm_lkb *lkb) 4711static int orphan_proc_lock(struct dlm_ls *ls, struct dlm_lkb *lkb)
4683{ 4712{
4684 struct dlm_user_args *ua = (struct dlm_user_args *)lkb->lkb_astparam;
4685 struct dlm_args args; 4713 struct dlm_args args;
4686 int error; 4714 int error;
4687 4715
@@ -4690,7 +4718,7 @@ static int orphan_proc_lock(struct dlm_ls *ls, struct dlm_lkb *lkb)
4690 list_add_tail(&lkb->lkb_ownqueue, &ls->ls_orphans); 4718 list_add_tail(&lkb->lkb_ownqueue, &ls->ls_orphans);
4691 mutex_unlock(&ls->ls_orphans_mutex); 4719 mutex_unlock(&ls->ls_orphans_mutex);
4692 4720
4693 set_unlock_args(0, ua, &args); 4721 set_unlock_args(0, lkb->lkb_ua, &args);
4694 4722
4695 error = cancel_lock(ls, lkb, &args); 4723 error = cancel_lock(ls, lkb, &args);
4696 if (error == -DLM_ECANCEL) 4724 if (error == -DLM_ECANCEL)
@@ -4703,11 +4731,10 @@ static int orphan_proc_lock(struct dlm_ls *ls, struct dlm_lkb *lkb)
4703 4731
4704static int unlock_proc_lock(struct dlm_ls *ls, struct dlm_lkb *lkb) 4732static int unlock_proc_lock(struct dlm_ls *ls, struct dlm_lkb *lkb)
4705{ 4733{
4706 struct dlm_user_args *ua = (struct dlm_user_args *)lkb->lkb_astparam;
4707 struct dlm_args args; 4734 struct dlm_args args;
4708 int error; 4735 int error;
4709 4736
4710 set_unlock_args(DLM_LKF_FORCEUNLOCK, ua, &args); 4737 set_unlock_args(DLM_LKF_FORCEUNLOCK, lkb->lkb_ua, &args);
4711 4738
4712 error = unlock_lock(ls, lkb, &args); 4739 error = unlock_lock(ls, lkb, &args);
4713 if (error == -DLM_EUNLOCK) 4740 if (error == -DLM_EUNLOCK)
diff --git a/fs/dlm/lock.h b/fs/dlm/lock.h
index 27b6ed302911..05d9c82e646b 100644
--- a/fs/dlm/lock.h
+++ b/fs/dlm/lock.h
@@ -17,7 +17,7 @@ void dlm_print_rsb(struct dlm_rsb *r);
17void dlm_dump_rsb(struct dlm_rsb *r); 17void dlm_dump_rsb(struct dlm_rsb *r);
18void dlm_print_lkb(struct dlm_lkb *lkb); 18void dlm_print_lkb(struct dlm_lkb *lkb);
19void dlm_receive_message_saved(struct dlm_ls *ls, struct dlm_message *ms); 19void dlm_receive_message_saved(struct dlm_ls *ls, struct dlm_message *ms);
20void dlm_receive_buffer(struct dlm_header *hd, int nodeid); 20void dlm_receive_buffer(union dlm_packet *p, int nodeid);
21int dlm_modes_compat(int mode1, int mode2); 21int dlm_modes_compat(int mode1, int mode2);
22void dlm_put_rsb(struct dlm_rsb *r); 22void dlm_put_rsb(struct dlm_rsb *r);
23void dlm_hold_rsb(struct dlm_rsb *r); 23void dlm_hold_rsb(struct dlm_rsb *r);
diff --git a/fs/dlm/lockspace.c b/fs/dlm/lockspace.c
index b180fdc51085..b64e55e0515d 100644
--- a/fs/dlm/lockspace.c
+++ b/fs/dlm/lockspace.c
@@ -191,7 +191,7 @@ static int do_uevent(struct dlm_ls *ls, int in)
191} 191}
192 192
193 193
194int dlm_lockspace_init(void) 194int __init dlm_lockspace_init(void)
195{ 195{
196 ls_count = 0; 196 ls_count = 0;
197 mutex_init(&ls_lock); 197 mutex_init(&ls_lock);
diff --git a/fs/dlm/memory.c b/fs/dlm/memory.c
index f7783867491a..54c14c6d06cb 100644
--- a/fs/dlm/memory.c
+++ b/fs/dlm/memory.c
@@ -18,7 +18,7 @@
18static struct kmem_cache *lkb_cache; 18static struct kmem_cache *lkb_cache;
19 19
20 20
21int dlm_memory_init(void) 21int __init dlm_memory_init(void)
22{ 22{
23 int ret = 0; 23 int ret = 0;
24 24
@@ -80,7 +80,7 @@ void dlm_free_lkb(struct dlm_lkb *lkb)
80{ 80{
81 if (lkb->lkb_flags & DLM_IFL_USER) { 81 if (lkb->lkb_flags & DLM_IFL_USER) {
82 struct dlm_user_args *ua; 82 struct dlm_user_args *ua;
83 ua = (struct dlm_user_args *)lkb->lkb_astparam; 83 ua = lkb->lkb_ua;
84 if (ua) { 84 if (ua) {
85 if (ua->lksb.sb_lvbptr) 85 if (ua->lksb.sb_lvbptr)
86 kfree(ua->lksb.sb_lvbptr); 86 kfree(ua->lksb.sb_lvbptr);
diff --git a/fs/dlm/midcomms.c b/fs/dlm/midcomms.c
index e69926e984db..07ac709f3ed7 100644
--- a/fs/dlm/midcomms.c
+++ b/fs/dlm/midcomms.c
@@ -61,9 +61,9 @@ int dlm_process_incoming_buffer(int nodeid, const void *base,
61 union { 61 union {
62 unsigned char __buf[DLM_INBUF_LEN]; 62 unsigned char __buf[DLM_INBUF_LEN];
63 /* this is to force proper alignment on some arches */ 63 /* this is to force proper alignment on some arches */
64 struct dlm_header dlm; 64 union dlm_packet p;
65 } __tmp; 65 } __tmp;
66 struct dlm_header *msg = &__tmp.dlm; 66 union dlm_packet *p = &__tmp.p;
67 int ret = 0; 67 int ret = 0;
68 int err = 0; 68 int err = 0;
69 uint16_t msglen; 69 uint16_t msglen;
@@ -75,15 +75,22 @@ int dlm_process_incoming_buffer(int nodeid, const void *base,
75 message may wrap around the end of the buffer back to the 75 message may wrap around the end of the buffer back to the
76 start, so we need to use a temp buffer and copy_from_cb. */ 76 start, so we need to use a temp buffer and copy_from_cb. */
77 77
78 copy_from_cb(msg, base, offset, sizeof(struct dlm_header), 78 copy_from_cb(p, base, offset, sizeof(struct dlm_header),
79 limit); 79 limit);
80 80
81 msglen = le16_to_cpu(msg->h_length); 81 msglen = le16_to_cpu(p->header.h_length);
82 lockspace = msg->h_lockspace; 82 lockspace = p->header.h_lockspace;
83 83
84 err = -EINVAL; 84 err = -EINVAL;
85 if (msglen < sizeof(struct dlm_header)) 85 if (msglen < sizeof(struct dlm_header))
86 break; 86 break;
87 if (p->header.h_cmd == DLM_MSG) {
88 if (msglen < sizeof(struct dlm_message))
89 break;
90 } else {
91 if (msglen < sizeof(struct dlm_rcom))
92 break;
93 }
87 err = -E2BIG; 94 err = -E2BIG;
88 if (msglen > dlm_config.ci_buffer_size) { 95 if (msglen > dlm_config.ci_buffer_size) {
89 log_print("message size %d from %d too big, buf len %d", 96 log_print("message size %d from %d too big, buf len %d",
@@ -104,26 +111,26 @@ int dlm_process_incoming_buffer(int nodeid, const void *base,
104 in the buffer on the stack (which should work for most 111 in the buffer on the stack (which should work for most
105 ordinary messages). */ 112 ordinary messages). */
106 113
107 if (msglen > DLM_INBUF_LEN && msg == &__tmp.dlm) { 114 if (msglen > sizeof(__tmp) && p == &__tmp.p) {
108 msg = kmalloc(dlm_config.ci_buffer_size, GFP_KERNEL); 115 p = kmalloc(dlm_config.ci_buffer_size, GFP_KERNEL);
109 if (msg == NULL) 116 if (p == NULL)
110 return ret; 117 return ret;
111 } 118 }
112 119
113 copy_from_cb(msg, base, offset, msglen, limit); 120 copy_from_cb(p, base, offset, msglen, limit);
114 121
115 BUG_ON(lockspace != msg->h_lockspace); 122 BUG_ON(lockspace != p->header.h_lockspace);
116 123
117 ret += msglen; 124 ret += msglen;
118 offset += msglen; 125 offset += msglen;
119 offset &= (limit - 1); 126 offset &= (limit - 1);
120 len -= msglen; 127 len -= msglen;
121 128
122 dlm_receive_buffer(msg, nodeid); 129 dlm_receive_buffer(p, nodeid);
123 } 130 }
124 131
125 if (msg != &__tmp.dlm) 132 if (p != &__tmp.p)
126 kfree(msg); 133 kfree(p);
127 134
128 return err ? err : ret; 135 return err ? err : ret;
129} 136}
diff --git a/fs/dlm/netlink.c b/fs/dlm/netlink.c
index 863b87d0dc71..714593621f4f 100644
--- a/fs/dlm/netlink.c
+++ b/fs/dlm/netlink.c
@@ -78,7 +78,7 @@ static struct genl_ops dlm_nl_ops = {
78 .doit = user_cmd, 78 .doit = user_cmd,
79}; 79};
80 80
81int dlm_netlink_init(void) 81int __init dlm_netlink_init(void)
82{ 82{
83 int rv; 83 int rv;
84 84
@@ -95,7 +95,7 @@ int dlm_netlink_init(void)
95 return rv; 95 return rv;
96} 96}
97 97
98void dlm_netlink_exit(void) 98void __exit dlm_netlink_exit(void)
99{ 99{
100 genl_unregister_ops(&family, &dlm_nl_ops); 100 genl_unregister_ops(&family, &dlm_nl_ops);
101 genl_unregister_family(&family); 101 genl_unregister_family(&family);
@@ -104,7 +104,6 @@ void dlm_netlink_exit(void)
104static void fill_data(struct dlm_lock_data *data, struct dlm_lkb *lkb) 104static void fill_data(struct dlm_lock_data *data, struct dlm_lkb *lkb)
105{ 105{
106 struct dlm_rsb *r = lkb->lkb_resource; 106 struct dlm_rsb *r = lkb->lkb_resource;
107 struct dlm_user_args *ua = (struct dlm_user_args *) lkb->lkb_astparam;
108 107
109 memset(data, 0, sizeof(struct dlm_lock_data)); 108 memset(data, 0, sizeof(struct dlm_lock_data));
110 109
@@ -117,8 +116,8 @@ static void fill_data(struct dlm_lock_data *data, struct dlm_lkb *lkb)
117 data->grmode = lkb->lkb_grmode; 116 data->grmode = lkb->lkb_grmode;
118 data->rqmode = lkb->lkb_rqmode; 117 data->rqmode = lkb->lkb_rqmode;
119 data->timestamp = lkb->lkb_timestamp; 118 data->timestamp = lkb->lkb_timestamp;
120 if (ua) 119 if (lkb->lkb_ua)
121 data->xid = ua->xid; 120 data->xid = lkb->lkb_ua->xid;
122 if (r) { 121 if (r) {
123 data->lockspace_id = r->res_ls->ls_global_id; 122 data->lockspace_id = r->res_ls->ls_global_id;
124 data->resource_namelen = r->res_length; 123 data->resource_namelen = r->res_length;
diff --git a/fs/dlm/rcom.c b/fs/dlm/rcom.c
index 026824cd3acb..035e6f9990b0 100644
--- a/fs/dlm/rcom.c
+++ b/fs/dlm/rcom.c
@@ -78,13 +78,14 @@ static void send_rcom(struct dlm_ls *ls, struct dlm_mhandle *mh,
78 78
79static void make_config(struct dlm_ls *ls, struct rcom_config *rf) 79static void make_config(struct dlm_ls *ls, struct rcom_config *rf)
80{ 80{
81 rf->rf_lvblen = ls->ls_lvblen; 81 rf->rf_lvblen = cpu_to_le32(ls->ls_lvblen);
82 rf->rf_lsflags = ls->ls_exflags; 82 rf->rf_lsflags = cpu_to_le32(ls->ls_exflags);
83} 83}
84 84
85static int check_config(struct dlm_ls *ls, struct dlm_rcom *rc, int nodeid) 85static int check_config(struct dlm_ls *ls, struct dlm_rcom *rc, int nodeid)
86{ 86{
87 struct rcom_config *rf = (struct rcom_config *) rc->rc_buf; 87 struct rcom_config *rf = (struct rcom_config *) rc->rc_buf;
88 size_t conf_size = sizeof(struct dlm_rcom) + sizeof(struct rcom_config);
88 89
89 if ((rc->rc_header.h_version & 0xFFFF0000) != DLM_HEADER_MAJOR) { 90 if ((rc->rc_header.h_version & 0xFFFF0000) != DLM_HEADER_MAJOR) {
90 log_error(ls, "version mismatch: %x nodeid %d: %x", 91 log_error(ls, "version mismatch: %x nodeid %d: %x",
@@ -93,11 +94,18 @@ static int check_config(struct dlm_ls *ls, struct dlm_rcom *rc, int nodeid)
93 return -EPROTO; 94 return -EPROTO;
94 } 95 }
95 96
96 if (rf->rf_lvblen != ls->ls_lvblen || 97 if (rc->rc_header.h_length < conf_size) {
97 rf->rf_lsflags != ls->ls_exflags) { 98 log_error(ls, "config too short: %d nodeid %d",
99 rc->rc_header.h_length, nodeid);
100 return -EPROTO;
101 }
102
103 if (le32_to_cpu(rf->rf_lvblen) != ls->ls_lvblen ||
104 le32_to_cpu(rf->rf_lsflags) != ls->ls_exflags) {
98 log_error(ls, "config mismatch: %d,%x nodeid %d: %d,%x", 105 log_error(ls, "config mismatch: %d,%x nodeid %d: %d,%x",
99 ls->ls_lvblen, ls->ls_exflags, 106 ls->ls_lvblen, ls->ls_exflags, nodeid,
100 nodeid, rf->rf_lvblen, rf->rf_lsflags); 107 le32_to_cpu(rf->rf_lvblen),
108 le32_to_cpu(rf->rf_lsflags));
101 return -EPROTO; 109 return -EPROTO;
102 } 110 }
103 return 0; 111 return 0;
@@ -128,7 +136,7 @@ int dlm_rcom_status(struct dlm_ls *ls, int nodeid)
128 ls->ls_recover_nodeid = nodeid; 136 ls->ls_recover_nodeid = nodeid;
129 137
130 if (nodeid == dlm_our_nodeid()) { 138 if (nodeid == dlm_our_nodeid()) {
131 rc = (struct dlm_rcom *) ls->ls_recover_buf; 139 rc = ls->ls_recover_buf;
132 rc->rc_result = dlm_recover_status(ls); 140 rc->rc_result = dlm_recover_status(ls);
133 goto out; 141 goto out;
134 } 142 }
@@ -147,7 +155,7 @@ int dlm_rcom_status(struct dlm_ls *ls, int nodeid)
147 if (error) 155 if (error)
148 goto out; 156 goto out;
149 157
150 rc = (struct dlm_rcom *) ls->ls_recover_buf; 158 rc = ls->ls_recover_buf;
151 159
152 if (rc->rc_result == -ESRCH) { 160 if (rc->rc_result == -ESRCH) {
153 /* we pretend the remote lockspace exists with 0 status */ 161 /* we pretend the remote lockspace exists with 0 status */
@@ -201,14 +209,15 @@ int dlm_rcom_names(struct dlm_ls *ls, int nodeid, char *last_name, int last_len)
201{ 209{
202 struct dlm_rcom *rc; 210 struct dlm_rcom *rc;
203 struct dlm_mhandle *mh; 211 struct dlm_mhandle *mh;
204 int error = 0, len = sizeof(struct dlm_rcom); 212 int error = 0;
213 int max_size = dlm_config.ci_buffer_size - sizeof(struct dlm_rcom);
205 214
206 ls->ls_recover_nodeid = nodeid; 215 ls->ls_recover_nodeid = nodeid;
207 216
208 if (nodeid == dlm_our_nodeid()) { 217 if (nodeid == dlm_our_nodeid()) {
209 dlm_copy_master_names(ls, last_name, last_len, 218 dlm_copy_master_names(ls, last_name, last_len,
210 ls->ls_recover_buf + len, 219 ls->ls_recover_buf->rc_buf,
211 dlm_config.ci_buffer_size - len, nodeid); 220 max_size, nodeid);
212 goto out; 221 goto out;
213 } 222 }
214 223
@@ -299,22 +308,22 @@ static void pack_rcom_lock(struct dlm_rsb *r, struct dlm_lkb *lkb,
299{ 308{
300 memset(rl, 0, sizeof(*rl)); 309 memset(rl, 0, sizeof(*rl));
301 310
302 rl->rl_ownpid = lkb->lkb_ownpid; 311 rl->rl_ownpid = cpu_to_le32(lkb->lkb_ownpid);
303 rl->rl_lkid = lkb->lkb_id; 312 rl->rl_lkid = cpu_to_le32(lkb->lkb_id);
304 rl->rl_exflags = lkb->lkb_exflags; 313 rl->rl_exflags = cpu_to_le32(lkb->lkb_exflags);
305 rl->rl_flags = lkb->lkb_flags; 314 rl->rl_flags = cpu_to_le32(lkb->lkb_flags);
306 rl->rl_lvbseq = lkb->lkb_lvbseq; 315 rl->rl_lvbseq = cpu_to_le32(lkb->lkb_lvbseq);
307 rl->rl_rqmode = lkb->lkb_rqmode; 316 rl->rl_rqmode = lkb->lkb_rqmode;
308 rl->rl_grmode = lkb->lkb_grmode; 317 rl->rl_grmode = lkb->lkb_grmode;
309 rl->rl_status = lkb->lkb_status; 318 rl->rl_status = lkb->lkb_status;
310 rl->rl_wait_type = lkb->lkb_wait_type; 319 rl->rl_wait_type = cpu_to_le16(lkb->lkb_wait_type);
311 320
312 if (lkb->lkb_bastaddr) 321 if (lkb->lkb_bastfn)
313 rl->rl_asts |= AST_BAST; 322 rl->rl_asts |= AST_BAST;
314 if (lkb->lkb_astaddr) 323 if (lkb->lkb_astfn)
315 rl->rl_asts |= AST_COMP; 324 rl->rl_asts |= AST_COMP;
316 325
317 rl->rl_namelen = r->res_length; 326 rl->rl_namelen = cpu_to_le16(r->res_length);
318 memcpy(rl->rl_name, r->res_name, r->res_length); 327 memcpy(rl->rl_name, r->res_name, r->res_length);
319 328
320 /* FIXME: might we have an lvb without DLM_LKF_VALBLK set ? 329 /* FIXME: might we have an lvb without DLM_LKF_VALBLK set ?
@@ -348,6 +357,7 @@ int dlm_send_rcom_lock(struct dlm_rsb *r, struct dlm_lkb *lkb)
348 return error; 357 return error;
349} 358}
350 359
360/* needs at least dlm_rcom + rcom_lock */
351static void receive_rcom_lock(struct dlm_ls *ls, struct dlm_rcom *rc_in) 361static void receive_rcom_lock(struct dlm_ls *ls, struct dlm_rcom *rc_in)
352{ 362{
353 struct dlm_rcom *rc; 363 struct dlm_rcom *rc;
@@ -401,7 +411,7 @@ int dlm_send_ls_not_ready(int nodeid, struct dlm_rcom *rc_in)
401 rc->rc_result = -ESRCH; 411 rc->rc_result = -ESRCH;
402 412
403 rf = (struct rcom_config *) rc->rc_buf; 413 rf = (struct rcom_config *) rc->rc_buf;
404 rf->rf_lvblen = -1; 414 rf->rf_lvblen = cpu_to_le32(~0U);
405 415
406 dlm_rcom_out(rc); 416 dlm_rcom_out(rc);
407 dlm_lowcomms_commit_buffer(mh); 417 dlm_lowcomms_commit_buffer(mh);
@@ -439,6 +449,8 @@ static int is_old_reply(struct dlm_ls *ls, struct dlm_rcom *rc)
439 449
440void dlm_receive_rcom(struct dlm_ls *ls, struct dlm_rcom *rc, int nodeid) 450void dlm_receive_rcom(struct dlm_ls *ls, struct dlm_rcom *rc, int nodeid)
441{ 451{
452 int lock_size = sizeof(struct dlm_rcom) + sizeof(struct rcom_lock);
453
442 if (dlm_recovery_stopped(ls) && (rc->rc_type != DLM_RCOM_STATUS)) { 454 if (dlm_recovery_stopped(ls) && (rc->rc_type != DLM_RCOM_STATUS)) {
443 log_debug(ls, "ignoring recovery message %x from %d", 455 log_debug(ls, "ignoring recovery message %x from %d",
444 rc->rc_type, nodeid); 456 rc->rc_type, nodeid);
@@ -462,6 +474,8 @@ void dlm_receive_rcom(struct dlm_ls *ls, struct dlm_rcom *rc, int nodeid)
462 break; 474 break;
463 475
464 case DLM_RCOM_LOCK: 476 case DLM_RCOM_LOCK:
477 if (rc->rc_header.h_length < lock_size)
478 goto Eshort;
465 receive_rcom_lock(ls, rc); 479 receive_rcom_lock(ls, rc);
466 break; 480 break;
467 481
@@ -478,13 +492,18 @@ void dlm_receive_rcom(struct dlm_ls *ls, struct dlm_rcom *rc, int nodeid)
478 break; 492 break;
479 493
480 case DLM_RCOM_LOCK_REPLY: 494 case DLM_RCOM_LOCK_REPLY:
495 if (rc->rc_header.h_length < lock_size)
496 goto Eshort;
481 dlm_recover_process_copy(ls, rc); 497 dlm_recover_process_copy(ls, rc);
482 break; 498 break;
483 499
484 default: 500 default:
485 log_error(ls, "receive_rcom bad type %d", rc->rc_type); 501 log_error(ls, "receive_rcom bad type %d", rc->rc_type);
486 } 502 }
487 out: 503out:
488 return; 504 return;
505Eshort:
506 log_error(ls, "recovery message %x from %d is too short",
507 rc->rc_type, nodeid);
489} 508}
490 509
diff --git a/fs/dlm/recover.c b/fs/dlm/recover.c
index df075dc300fa..80aba5bdd4a4 100644
--- a/fs/dlm/recover.c
+++ b/fs/dlm/recover.c
@@ -94,7 +94,7 @@ void dlm_set_recover_status(struct dlm_ls *ls, uint32_t status)
94 94
95static int wait_status_all(struct dlm_ls *ls, uint32_t wait_status) 95static int wait_status_all(struct dlm_ls *ls, uint32_t wait_status)
96{ 96{
97 struct dlm_rcom *rc = (struct dlm_rcom *) ls->ls_recover_buf; 97 struct dlm_rcom *rc = ls->ls_recover_buf;
98 struct dlm_member *memb; 98 struct dlm_member *memb;
99 int error = 0, delay; 99 int error = 0, delay;
100 100
@@ -123,7 +123,7 @@ static int wait_status_all(struct dlm_ls *ls, uint32_t wait_status)
123 123
124static int wait_status_low(struct dlm_ls *ls, uint32_t wait_status) 124static int wait_status_low(struct dlm_ls *ls, uint32_t wait_status)
125{ 125{
126 struct dlm_rcom *rc = (struct dlm_rcom *) ls->ls_recover_buf; 126 struct dlm_rcom *rc = ls->ls_recover_buf;
127 int error = 0, delay = 0, nodeid = ls->ls_low_nodeid; 127 int error = 0, delay = 0, nodeid = ls->ls_low_nodeid;
128 128
129 for (;;) { 129 for (;;) {
diff --git a/fs/dlm/requestqueue.c b/fs/dlm/requestqueue.c
index 0de04f17ccea..daa4183fbb84 100644
--- a/fs/dlm/requestqueue.c
+++ b/fs/dlm/requestqueue.c
@@ -20,7 +20,7 @@
20struct rq_entry { 20struct rq_entry {
21 struct list_head list; 21 struct list_head list;
22 int nodeid; 22 int nodeid;
23 char request[0]; 23 struct dlm_message request;
24}; 24};
25 25
26/* 26/*
@@ -30,10 +30,10 @@ struct rq_entry {
30 * lockspace is enabled on some while still suspended on others. 30 * lockspace is enabled on some while still suspended on others.
31 */ 31 */
32 32
33void dlm_add_requestqueue(struct dlm_ls *ls, int nodeid, struct dlm_header *hd) 33void dlm_add_requestqueue(struct dlm_ls *ls, int nodeid, struct dlm_message *ms)
34{ 34{
35 struct rq_entry *e; 35 struct rq_entry *e;
36 int length = hd->h_length; 36 int length = ms->m_header.h_length - sizeof(struct dlm_message);
37 37
38 e = kmalloc(sizeof(struct rq_entry) + length, GFP_KERNEL); 38 e = kmalloc(sizeof(struct rq_entry) + length, GFP_KERNEL);
39 if (!e) { 39 if (!e) {
@@ -42,7 +42,7 @@ void dlm_add_requestqueue(struct dlm_ls *ls, int nodeid, struct dlm_header *hd)
42 } 42 }
43 43
44 e->nodeid = nodeid; 44 e->nodeid = nodeid;
45 memcpy(e->request, hd, length); 45 memcpy(&e->request, ms, ms->m_header.h_length);
46 46
47 mutex_lock(&ls->ls_requestqueue_mutex); 47 mutex_lock(&ls->ls_requestqueue_mutex);
48 list_add_tail(&e->list, &ls->ls_requestqueue); 48 list_add_tail(&e->list, &ls->ls_requestqueue);
@@ -76,7 +76,7 @@ int dlm_process_requestqueue(struct dlm_ls *ls)
76 e = list_entry(ls->ls_requestqueue.next, struct rq_entry, list); 76 e = list_entry(ls->ls_requestqueue.next, struct rq_entry, list);
77 mutex_unlock(&ls->ls_requestqueue_mutex); 77 mutex_unlock(&ls->ls_requestqueue_mutex);
78 78
79 dlm_receive_message_saved(ls, (struct dlm_message *)e->request); 79 dlm_receive_message_saved(ls, &e->request);
80 80
81 mutex_lock(&ls->ls_requestqueue_mutex); 81 mutex_lock(&ls->ls_requestqueue_mutex);
82 list_del(&e->list); 82 list_del(&e->list);
@@ -176,7 +176,7 @@ void dlm_purge_requestqueue(struct dlm_ls *ls)
176 176
177 mutex_lock(&ls->ls_requestqueue_mutex); 177 mutex_lock(&ls->ls_requestqueue_mutex);
178 list_for_each_entry_safe(e, safe, &ls->ls_requestqueue, list) { 178 list_for_each_entry_safe(e, safe, &ls->ls_requestqueue, list) {
179 ms = (struct dlm_message *) e->request; 179 ms = &e->request;
180 180
181 if (purge_request(ls, ms, e->nodeid)) { 181 if (purge_request(ls, ms, e->nodeid)) {
182 list_del(&e->list); 182 list_del(&e->list);
diff --git a/fs/dlm/requestqueue.h b/fs/dlm/requestqueue.h
index aba34fc05ee4..10ce449b77da 100644
--- a/fs/dlm/requestqueue.h
+++ b/fs/dlm/requestqueue.h
@@ -13,7 +13,7 @@
13#ifndef __REQUESTQUEUE_DOT_H__ 13#ifndef __REQUESTQUEUE_DOT_H__
14#define __REQUESTQUEUE_DOT_H__ 14#define __REQUESTQUEUE_DOT_H__
15 15
16void dlm_add_requestqueue(struct dlm_ls *ls, int nodeid, struct dlm_header *hd); 16void dlm_add_requestqueue(struct dlm_ls *ls, int nodeid, struct dlm_message *ms);
17int dlm_process_requestqueue(struct dlm_ls *ls); 17int dlm_process_requestqueue(struct dlm_ls *ls);
18void dlm_wait_requestqueue(struct dlm_ls *ls); 18void dlm_wait_requestqueue(struct dlm_ls *ls);
19void dlm_purge_requestqueue(struct dlm_ls *ls); 19void dlm_purge_requestqueue(struct dlm_ls *ls);
diff --git a/fs/dlm/user.c b/fs/dlm/user.c
index 7cbc6826239b..ebbcf38fd33b 100644
--- a/fs/dlm/user.c
+++ b/fs/dlm/user.c
@@ -82,7 +82,7 @@ struct dlm_lock_result32 {
82 82
83static void compat_input(struct dlm_write_request *kb, 83static void compat_input(struct dlm_write_request *kb,
84 struct dlm_write_request32 *kb32, 84 struct dlm_write_request32 *kb32,
85 int max_namelen) 85 size_t count)
86{ 86{
87 kb->version[0] = kb32->version[0]; 87 kb->version[0] = kb32->version[0];
88 kb->version[1] = kb32->version[1]; 88 kb->version[1] = kb32->version[1];
@@ -94,7 +94,8 @@ static void compat_input(struct dlm_write_request *kb,
94 kb->cmd == DLM_USER_REMOVE_LOCKSPACE) { 94 kb->cmd == DLM_USER_REMOVE_LOCKSPACE) {
95 kb->i.lspace.flags = kb32->i.lspace.flags; 95 kb->i.lspace.flags = kb32->i.lspace.flags;
96 kb->i.lspace.minor = kb32->i.lspace.minor; 96 kb->i.lspace.minor = kb32->i.lspace.minor;
97 strcpy(kb->i.lspace.name, kb32->i.lspace.name); 97 memcpy(kb->i.lspace.name, kb32->i.lspace.name, count -
98 offsetof(struct dlm_write_request32, i.lspace.name));
98 } else if (kb->cmd == DLM_USER_PURGE) { 99 } else if (kb->cmd == DLM_USER_PURGE) {
99 kb->i.purge.nodeid = kb32->i.purge.nodeid; 100 kb->i.purge.nodeid = kb32->i.purge.nodeid;
100 kb->i.purge.pid = kb32->i.purge.pid; 101 kb->i.purge.pid = kb32->i.purge.pid;
@@ -112,11 +113,8 @@ static void compat_input(struct dlm_write_request *kb,
112 kb->i.lock.bastaddr = (void *)(long)kb32->i.lock.bastaddr; 113 kb->i.lock.bastaddr = (void *)(long)kb32->i.lock.bastaddr;
113 kb->i.lock.lksb = (void *)(long)kb32->i.lock.lksb; 114 kb->i.lock.lksb = (void *)(long)kb32->i.lock.lksb;
114 memcpy(kb->i.lock.lvb, kb32->i.lock.lvb, DLM_USER_LVB_LEN); 115 memcpy(kb->i.lock.lvb, kb32->i.lock.lvb, DLM_USER_LVB_LEN);
115 if (kb->i.lock.namelen <= max_namelen) 116 memcpy(kb->i.lock.name, kb32->i.lock.name, count -
116 memcpy(kb->i.lock.name, kb32->i.lock.name, 117 offsetof(struct dlm_write_request32, i.lock.name));
117 kb->i.lock.namelen);
118 else
119 kb->i.lock.namelen = max_namelen;
120 } 118 }
121} 119}
122 120
@@ -197,8 +195,8 @@ void dlm_user_add_ast(struct dlm_lkb *lkb, int type)
197 if (lkb->lkb_flags & (DLM_IFL_ORPHAN | DLM_IFL_DEAD)) 195 if (lkb->lkb_flags & (DLM_IFL_ORPHAN | DLM_IFL_DEAD))
198 goto out; 196 goto out;
199 197
200 DLM_ASSERT(lkb->lkb_astparam, dlm_print_lkb(lkb);); 198 DLM_ASSERT(lkb->lkb_ua, dlm_print_lkb(lkb););
201 ua = (struct dlm_user_args *)lkb->lkb_astparam; 199 ua = lkb->lkb_ua;
202 proc = ua->proc; 200 proc = ua->proc;
203 201
204 if (type == AST_BAST && ua->bastaddr == NULL) 202 if (type == AST_BAST && ua->bastaddr == NULL)
@@ -508,7 +506,7 @@ static ssize_t device_write(struct file *file, const char __user *buf,
508#endif 506#endif
509 return -EINVAL; 507 return -EINVAL;
510 508
511 kbuf = kmalloc(count, GFP_KERNEL); 509 kbuf = kzalloc(count + 1, GFP_KERNEL);
512 if (!kbuf) 510 if (!kbuf)
513 return -ENOMEM; 511 return -ENOMEM;
514 512
@@ -526,15 +524,14 @@ static ssize_t device_write(struct file *file, const char __user *buf,
526 if (!kbuf->is64bit) { 524 if (!kbuf->is64bit) {
527 struct dlm_write_request32 *k32buf; 525 struct dlm_write_request32 *k32buf;
528 k32buf = (struct dlm_write_request32 *)kbuf; 526 k32buf = (struct dlm_write_request32 *)kbuf;
529 kbuf = kmalloc(count + (sizeof(struct dlm_write_request) - 527 kbuf = kmalloc(count + 1 + (sizeof(struct dlm_write_request) -
530 sizeof(struct dlm_write_request32)), GFP_KERNEL); 528 sizeof(struct dlm_write_request32)), GFP_KERNEL);
531 if (!kbuf) 529 if (!kbuf)
532 return -ENOMEM; 530 return -ENOMEM;
533 531
534 if (proc) 532 if (proc)
535 set_bit(DLM_PROC_FLAGS_COMPAT, &proc->flags); 533 set_bit(DLM_PROC_FLAGS_COMPAT, &proc->flags);
536 compat_input(kbuf, k32buf, 534 compat_input(kbuf, k32buf, count + 1);
537 count - sizeof(struct dlm_write_request32));
538 kfree(k32buf); 535 kfree(k32buf);
539 } 536 }
540#endif 537#endif
@@ -774,7 +771,6 @@ static ssize_t device_read(struct file *file, char __user *buf, size_t count,
774{ 771{
775 struct dlm_user_proc *proc = file->private_data; 772 struct dlm_user_proc *proc = file->private_data;
776 struct dlm_lkb *lkb; 773 struct dlm_lkb *lkb;
777 struct dlm_user_args *ua;
778 DECLARE_WAITQUEUE(wait, current); 774 DECLARE_WAITQUEUE(wait, current);
779 int error, type=0, bmode=0, removed = 0; 775 int error, type=0, bmode=0, removed = 0;
780 776
@@ -845,8 +841,7 @@ static ssize_t device_read(struct file *file, char __user *buf, size_t count,
845 } 841 }
846 spin_unlock(&proc->asts_spin); 842 spin_unlock(&proc->asts_spin);
847 843
848 ua = (struct dlm_user_args *)lkb->lkb_astparam; 844 error = copy_result_to_user(lkb->lkb_ua,
849 error = copy_result_to_user(ua,
850 test_bit(DLM_PROC_FLAGS_COMPAT, &proc->flags), 845 test_bit(DLM_PROC_FLAGS_COMPAT, &proc->flags),
851 type, bmode, buf, count); 846 type, bmode, buf, count);
852 847
@@ -907,7 +902,7 @@ static struct miscdevice ctl_device = {
907 .minor = MISC_DYNAMIC_MINOR, 902 .minor = MISC_DYNAMIC_MINOR,
908}; 903};
909 904
910int dlm_user_init(void) 905int __init dlm_user_init(void)
911{ 906{
912 int error; 907 int error;
913 908
diff --git a/fs/dlm/util.c b/fs/dlm/util.c
index 4d9c1f4e1bd1..e36520af7cc0 100644
--- a/fs/dlm/util.c
+++ b/fs/dlm/util.c
@@ -131,52 +131,8 @@ void dlm_message_in(struct dlm_message *ms)
131 ms->m_result = from_dlm_errno(le32_to_cpu(ms->m_result)); 131 ms->m_result = from_dlm_errno(le32_to_cpu(ms->m_result));
132} 132}
133 133
134static void rcom_lock_out(struct rcom_lock *rl)
135{
136 rl->rl_ownpid = cpu_to_le32(rl->rl_ownpid);
137 rl->rl_lkid = cpu_to_le32(rl->rl_lkid);
138 rl->rl_remid = cpu_to_le32(rl->rl_remid);
139 rl->rl_parent_lkid = cpu_to_le32(rl->rl_parent_lkid);
140 rl->rl_parent_remid = cpu_to_le32(rl->rl_parent_remid);
141 rl->rl_exflags = cpu_to_le32(rl->rl_exflags);
142 rl->rl_flags = cpu_to_le32(rl->rl_flags);
143 rl->rl_lvbseq = cpu_to_le32(rl->rl_lvbseq);
144 rl->rl_result = cpu_to_le32(rl->rl_result);
145 rl->rl_wait_type = cpu_to_le16(rl->rl_wait_type);
146 rl->rl_namelen = cpu_to_le16(rl->rl_namelen);
147}
148
149static void rcom_lock_in(struct rcom_lock *rl)
150{
151 rl->rl_ownpid = le32_to_cpu(rl->rl_ownpid);
152 rl->rl_lkid = le32_to_cpu(rl->rl_lkid);
153 rl->rl_remid = le32_to_cpu(rl->rl_remid);
154 rl->rl_parent_lkid = le32_to_cpu(rl->rl_parent_lkid);
155 rl->rl_parent_remid = le32_to_cpu(rl->rl_parent_remid);
156 rl->rl_exflags = le32_to_cpu(rl->rl_exflags);
157 rl->rl_flags = le32_to_cpu(rl->rl_flags);
158 rl->rl_lvbseq = le32_to_cpu(rl->rl_lvbseq);
159 rl->rl_result = le32_to_cpu(rl->rl_result);
160 rl->rl_wait_type = le16_to_cpu(rl->rl_wait_type);
161 rl->rl_namelen = le16_to_cpu(rl->rl_namelen);
162}
163
164static void rcom_config_out(struct rcom_config *rf)
165{
166 rf->rf_lvblen = cpu_to_le32(rf->rf_lvblen);
167 rf->rf_lsflags = cpu_to_le32(rf->rf_lsflags);
168}
169
170static void rcom_config_in(struct rcom_config *rf)
171{
172 rf->rf_lvblen = le32_to_cpu(rf->rf_lvblen);
173 rf->rf_lsflags = le32_to_cpu(rf->rf_lsflags);
174}
175
176void dlm_rcom_out(struct dlm_rcom *rc) 134void dlm_rcom_out(struct dlm_rcom *rc)
177{ 135{
178 int type = rc->rc_type;
179
180 header_out(&rc->rc_header); 136 header_out(&rc->rc_header);
181 137
182 rc->rc_type = cpu_to_le32(rc->rc_type); 138 rc->rc_type = cpu_to_le32(rc->rc_type);
@@ -184,18 +140,10 @@ void dlm_rcom_out(struct dlm_rcom *rc)
184 rc->rc_id = cpu_to_le64(rc->rc_id); 140 rc->rc_id = cpu_to_le64(rc->rc_id);
185 rc->rc_seq = cpu_to_le64(rc->rc_seq); 141 rc->rc_seq = cpu_to_le64(rc->rc_seq);
186 rc->rc_seq_reply = cpu_to_le64(rc->rc_seq_reply); 142 rc->rc_seq_reply = cpu_to_le64(rc->rc_seq_reply);
187
188 if ((type == DLM_RCOM_LOCK) || (type == DLM_RCOM_LOCK_REPLY))
189 rcom_lock_out((struct rcom_lock *) rc->rc_buf);
190
191 else if (type == DLM_RCOM_STATUS_REPLY)
192 rcom_config_out((struct rcom_config *) rc->rc_buf);
193} 143}
194 144
195void dlm_rcom_in(struct dlm_rcom *rc) 145void dlm_rcom_in(struct dlm_rcom *rc)
196{ 146{
197 int type;
198
199 header_in(&rc->rc_header); 147 header_in(&rc->rc_header);
200 148
201 rc->rc_type = le32_to_cpu(rc->rc_type); 149 rc->rc_type = le32_to_cpu(rc->rc_type);
@@ -203,13 +151,4 @@ void dlm_rcom_in(struct dlm_rcom *rc)
203 rc->rc_id = le64_to_cpu(rc->rc_id); 151 rc->rc_id = le64_to_cpu(rc->rc_id);
204 rc->rc_seq = le64_to_cpu(rc->rc_seq); 152 rc->rc_seq = le64_to_cpu(rc->rc_seq);
205 rc->rc_seq_reply = le64_to_cpu(rc->rc_seq_reply); 153 rc->rc_seq_reply = le64_to_cpu(rc->rc_seq_reply);
206
207 type = rc->rc_type;
208
209 if ((type == DLM_RCOM_LOCK) || (type == DLM_RCOM_LOCK_REPLY))
210 rcom_lock_in((struct rcom_lock *) rc->rc_buf);
211
212 else if (type == DLM_RCOM_STATUS_REPLY)
213 rcom_config_in((struct rcom_config *) rc->rc_buf);
214} 154}
215
diff --git a/fs/dquot.c b/fs/dquot.c
index cee7c6f428f0..9c7feb62eed1 100644
--- a/fs/dquot.c
+++ b/fs/dquot.c
@@ -696,9 +696,8 @@ static int dqinit_needed(struct inode *inode, int type)
696/* This routine is guarded by dqonoff_mutex mutex */ 696/* This routine is guarded by dqonoff_mutex mutex */
697static void add_dquot_ref(struct super_block *sb, int type) 697static void add_dquot_ref(struct super_block *sb, int type)
698{ 698{
699 struct inode *inode; 699 struct inode *inode, *old_inode = NULL;
700 700
701restart:
702 spin_lock(&inode_lock); 701 spin_lock(&inode_lock);
703 list_for_each_entry(inode, &sb->s_inodes, i_sb_list) { 702 list_for_each_entry(inode, &sb->s_inodes, i_sb_list) {
704 if (!atomic_read(&inode->i_writecount)) 703 if (!atomic_read(&inode->i_writecount))
@@ -711,12 +710,18 @@ restart:
711 __iget(inode); 710 __iget(inode);
712 spin_unlock(&inode_lock); 711 spin_unlock(&inode_lock);
713 712
713 iput(old_inode);
714 sb->dq_op->initialize(inode, type); 714 sb->dq_op->initialize(inode, type);
715 iput(inode); 715 /* We hold a reference to 'inode' so it couldn't have been
716 /* As we may have blocked we had better restart... */ 716 * removed from s_inodes list while we dropped the inode_lock.
717 goto restart; 717 * We cannot iput the inode now as we can be holding the last
718 * reference and we cannot iput it under inode_lock. So we
719 * keep the reference and iput it later. */
720 old_inode = inode;
721 spin_lock(&inode_lock);
718 } 722 }
719 spin_unlock(&inode_lock); 723 spin_unlock(&inode_lock);
724 iput(old_inode);
720} 725}
721 726
722/* Return 0 if dqput() won't block (note that 1 doesn't necessarily mean blocking) */ 727/* Return 0 if dqput() won't block (note that 1 doesn't necessarily mean blocking) */
@@ -1628,16 +1633,17 @@ int vfs_quota_on(struct super_block *sb, int type, int format_id, char *path)
1628 error = path_lookup(path, LOOKUP_FOLLOW, &nd); 1633 error = path_lookup(path, LOOKUP_FOLLOW, &nd);
1629 if (error < 0) 1634 if (error < 0)
1630 return error; 1635 return error;
1631 error = security_quota_on(nd.dentry); 1636 error = security_quota_on(nd.path.dentry);
1632 if (error) 1637 if (error)
1633 goto out_path; 1638 goto out_path;
1634 /* Quota file not on the same filesystem? */ 1639 /* Quota file not on the same filesystem? */
1635 if (nd.mnt->mnt_sb != sb) 1640 if (nd.path.mnt->mnt_sb != sb)
1636 error = -EXDEV; 1641 error = -EXDEV;
1637 else 1642 else
1638 error = vfs_quota_on_inode(nd.dentry->d_inode, type, format_id); 1643 error = vfs_quota_on_inode(nd.path.dentry->d_inode, type,
1644 format_id);
1639out_path: 1645out_path:
1640 path_release(&nd); 1646 path_put(&nd.path);
1641 return error; 1647 return error;
1642} 1648}
1643 1649
diff --git a/fs/ecryptfs/crypto.c b/fs/ecryptfs/crypto.c
index f8ef0af919e7..a066e109ad9c 100644
--- a/fs/ecryptfs/crypto.c
+++ b/fs/ecryptfs/crypto.c
@@ -355,8 +355,11 @@ static int encrypt_scatterlist(struct ecryptfs_crypt_stat *crypt_stat,
355 } 355 }
356 /* Consider doing this once, when the file is opened */ 356 /* Consider doing this once, when the file is opened */
357 mutex_lock(&crypt_stat->cs_tfm_mutex); 357 mutex_lock(&crypt_stat->cs_tfm_mutex);
358 rc = crypto_blkcipher_setkey(crypt_stat->tfm, crypt_stat->key, 358 if (!(crypt_stat->flags & ECRYPTFS_KEY_SET)) {
359 crypt_stat->key_size); 359 rc = crypto_blkcipher_setkey(crypt_stat->tfm, crypt_stat->key,
360 crypt_stat->key_size);
361 crypt_stat->flags |= ECRYPTFS_KEY_SET;
362 }
360 if (rc) { 363 if (rc) {
361 ecryptfs_printk(KERN_ERR, "Error setting key; rc = [%d]\n", 364 ecryptfs_printk(KERN_ERR, "Error setting key; rc = [%d]\n",
362 rc); 365 rc);
@@ -376,11 +379,10 @@ out:
376 * 379 *
377 * Convert an eCryptfs page index into a lower byte offset 380 * Convert an eCryptfs page index into a lower byte offset
378 */ 381 */
379void ecryptfs_lower_offset_for_extent(loff_t *offset, loff_t extent_num, 382static void ecryptfs_lower_offset_for_extent(loff_t *offset, loff_t extent_num,
380 struct ecryptfs_crypt_stat *crypt_stat) 383 struct ecryptfs_crypt_stat *crypt_stat)
381{ 384{
382 (*offset) = ((crypt_stat->extent_size 385 (*offset) = (crypt_stat->num_header_bytes_at_front
383 * crypt_stat->num_header_extents_at_front)
384 + (crypt_stat->extent_size * extent_num)); 386 + (crypt_stat->extent_size * extent_num));
385} 387}
386 388
@@ -842,15 +844,13 @@ void ecryptfs_set_default_sizes(struct ecryptfs_crypt_stat *crypt_stat)
842 set_extent_mask_and_shift(crypt_stat); 844 set_extent_mask_and_shift(crypt_stat);
843 crypt_stat->iv_bytes = ECRYPTFS_DEFAULT_IV_BYTES; 845 crypt_stat->iv_bytes = ECRYPTFS_DEFAULT_IV_BYTES;
844 if (crypt_stat->flags & ECRYPTFS_METADATA_IN_XATTR) 846 if (crypt_stat->flags & ECRYPTFS_METADATA_IN_XATTR)
845 crypt_stat->num_header_extents_at_front = 0; 847 crypt_stat->num_header_bytes_at_front = 0;
846 else { 848 else {
847 if (PAGE_CACHE_SIZE <= ECRYPTFS_MINIMUM_HEADER_EXTENT_SIZE) 849 if (PAGE_CACHE_SIZE <= ECRYPTFS_MINIMUM_HEADER_EXTENT_SIZE)
848 crypt_stat->num_header_extents_at_front = 850 crypt_stat->num_header_bytes_at_front =
849 (ECRYPTFS_MINIMUM_HEADER_EXTENT_SIZE 851 ECRYPTFS_MINIMUM_HEADER_EXTENT_SIZE;
850 / crypt_stat->extent_size);
851 else 852 else
852 crypt_stat->num_header_extents_at_front = 853 crypt_stat->num_header_bytes_at_front = PAGE_CACHE_SIZE;
853 (PAGE_CACHE_SIZE / crypt_stat->extent_size);
854 } 854 }
855} 855}
856 856
@@ -1128,7 +1128,7 @@ write_ecryptfs_flags(char *page_virt, struct ecryptfs_crypt_stat *crypt_stat,
1128 1128
1129struct ecryptfs_cipher_code_str_map_elem { 1129struct ecryptfs_cipher_code_str_map_elem {
1130 char cipher_str[16]; 1130 char cipher_str[16];
1131 u16 cipher_code; 1131 u8 cipher_code;
1132}; 1132};
1133 1133
1134/* Add support for additional ciphers by adding elements here. The 1134/* Add support for additional ciphers by adding elements here. The
@@ -1152,10 +1152,10 @@ ecryptfs_cipher_code_str_map[] = {
1152 * 1152 *
1153 * Returns zero on no match, or the cipher code on match 1153 * Returns zero on no match, or the cipher code on match
1154 */ 1154 */
1155u16 ecryptfs_code_for_cipher_string(struct ecryptfs_crypt_stat *crypt_stat) 1155u8 ecryptfs_code_for_cipher_string(struct ecryptfs_crypt_stat *crypt_stat)
1156{ 1156{
1157 int i; 1157 int i;
1158 u16 code = 0; 1158 u8 code = 0;
1159 struct ecryptfs_cipher_code_str_map_elem *map = 1159 struct ecryptfs_cipher_code_str_map_elem *map =
1160 ecryptfs_cipher_code_str_map; 1160 ecryptfs_cipher_code_str_map;
1161 1161
@@ -1187,7 +1187,7 @@ u16 ecryptfs_code_for_cipher_string(struct ecryptfs_crypt_stat *crypt_stat)
1187 * 1187 *
1188 * Returns zero on success 1188 * Returns zero on success
1189 */ 1189 */
1190int ecryptfs_cipher_code_to_string(char *str, u16 cipher_code) 1190int ecryptfs_cipher_code_to_string(char *str, u8 cipher_code)
1191{ 1191{
1192 int rc = 0; 1192 int rc = 0;
1193 int i; 1193 int i;
@@ -1236,7 +1236,8 @@ ecryptfs_write_header_metadata(char *virt,
1236 1236
1237 header_extent_size = (u32)crypt_stat->extent_size; 1237 header_extent_size = (u32)crypt_stat->extent_size;
1238 num_header_extents_at_front = 1238 num_header_extents_at_front =
1239 (u16)crypt_stat->num_header_extents_at_front; 1239 (u16)(crypt_stat->num_header_bytes_at_front
1240 / crypt_stat->extent_size);
1240 header_extent_size = cpu_to_be32(header_extent_size); 1241 header_extent_size = cpu_to_be32(header_extent_size);
1241 memcpy(virt, &header_extent_size, 4); 1242 memcpy(virt, &header_extent_size, 4);
1242 virt += 4; 1243 virt += 4;
@@ -1311,40 +1312,16 @@ static int ecryptfs_write_headers_virt(char *page_virt, size_t *size,
1311static int 1312static int
1312ecryptfs_write_metadata_to_contents(struct ecryptfs_crypt_stat *crypt_stat, 1313ecryptfs_write_metadata_to_contents(struct ecryptfs_crypt_stat *crypt_stat,
1313 struct dentry *ecryptfs_dentry, 1314 struct dentry *ecryptfs_dentry,
1314 char *page_virt) 1315 char *virt)
1315{ 1316{
1316 int current_header_page;
1317 int header_pages;
1318 int rc; 1317 int rc;
1319 1318
1320 rc = ecryptfs_write_lower(ecryptfs_dentry->d_inode, page_virt, 1319 rc = ecryptfs_write_lower(ecryptfs_dentry->d_inode, virt,
1321 0, PAGE_CACHE_SIZE); 1320 0, crypt_stat->num_header_bytes_at_front);
1322 if (rc) { 1321 if (rc)
1323 printk(KERN_ERR "%s: Error attempting to write header " 1322 printk(KERN_ERR "%s: Error attempting to write header "
1324 "information to lower file; rc = [%d]\n", __FUNCTION__, 1323 "information to lower file; rc = [%d]\n", __FUNCTION__,
1325 rc); 1324 rc);
1326 goto out;
1327 }
1328 header_pages = ((crypt_stat->extent_size
1329 * crypt_stat->num_header_extents_at_front)
1330 / PAGE_CACHE_SIZE);
1331 memset(page_virt, 0, PAGE_CACHE_SIZE);
1332 current_header_page = 1;
1333 while (current_header_page < header_pages) {
1334 loff_t offset;
1335
1336 offset = (((loff_t)current_header_page) << PAGE_CACHE_SHIFT);
1337 if ((rc = ecryptfs_write_lower(ecryptfs_dentry->d_inode,
1338 page_virt, offset,
1339 PAGE_CACHE_SIZE))) {
1340 printk(KERN_ERR "%s: Error attempting to write header "
1341 "information to lower file; rc = [%d]\n",
1342 __FUNCTION__, rc);
1343 goto out;
1344 }
1345 current_header_page++;
1346 }
1347out:
1348 return rc; 1325 return rc;
1349} 1326}
1350 1327
@@ -1370,15 +1347,13 @@ ecryptfs_write_metadata_to_xattr(struct dentry *ecryptfs_dentry,
1370 * retrieved via a prompt. Exactly what happens at this point should 1347 * retrieved via a prompt. Exactly what happens at this point should
1371 * be policy-dependent. 1348 * be policy-dependent.
1372 * 1349 *
1373 * TODO: Support header information spanning multiple pages
1374 *
1375 * Returns zero on success; non-zero on error 1350 * Returns zero on success; non-zero on error
1376 */ 1351 */
1377int ecryptfs_write_metadata(struct dentry *ecryptfs_dentry) 1352int ecryptfs_write_metadata(struct dentry *ecryptfs_dentry)
1378{ 1353{
1379 struct ecryptfs_crypt_stat *crypt_stat = 1354 struct ecryptfs_crypt_stat *crypt_stat =
1380 &ecryptfs_inode_to_private(ecryptfs_dentry->d_inode)->crypt_stat; 1355 &ecryptfs_inode_to_private(ecryptfs_dentry->d_inode)->crypt_stat;
1381 char *page_virt; 1356 char *virt;
1382 size_t size = 0; 1357 size_t size = 0;
1383 int rc = 0; 1358 int rc = 0;
1384 1359
@@ -1389,40 +1364,39 @@ int ecryptfs_write_metadata(struct dentry *ecryptfs_dentry)
1389 goto out; 1364 goto out;
1390 } 1365 }
1391 } else { 1366 } else {
1367 printk(KERN_WARNING "%s: Encrypted flag not set\n",
1368 __FUNCTION__);
1392 rc = -EINVAL; 1369 rc = -EINVAL;
1393 ecryptfs_printk(KERN_WARNING,
1394 "Called with crypt_stat->encrypted == 0\n");
1395 goto out; 1370 goto out;
1396 } 1371 }
1397 /* Released in this function */ 1372 /* Released in this function */
1398 page_virt = kmem_cache_zalloc(ecryptfs_header_cache_0, GFP_USER); 1373 virt = kzalloc(crypt_stat->num_header_bytes_at_front, GFP_KERNEL);
1399 if (!page_virt) { 1374 if (!virt) {
1400 ecryptfs_printk(KERN_ERR, "Out of memory\n"); 1375 printk(KERN_ERR "%s: Out of memory\n", __FUNCTION__);
1401 rc = -ENOMEM; 1376 rc = -ENOMEM;
1402 goto out; 1377 goto out;
1403 } 1378 }
1404 rc = ecryptfs_write_headers_virt(page_virt, &size, crypt_stat, 1379 rc = ecryptfs_write_headers_virt(virt, &size, crypt_stat,
1405 ecryptfs_dentry); 1380 ecryptfs_dentry);
1406 if (unlikely(rc)) { 1381 if (unlikely(rc)) {
1407 ecryptfs_printk(KERN_ERR, "Error whilst writing headers\n"); 1382 printk(KERN_ERR "%s: Error whilst writing headers; rc = [%d]\n",
1408 memset(page_virt, 0, PAGE_CACHE_SIZE); 1383 __FUNCTION__, rc);
1409 goto out_free; 1384 goto out_free;
1410 } 1385 }
1411 if (crypt_stat->flags & ECRYPTFS_METADATA_IN_XATTR) 1386 if (crypt_stat->flags & ECRYPTFS_METADATA_IN_XATTR)
1412 rc = ecryptfs_write_metadata_to_xattr(ecryptfs_dentry, 1387 rc = ecryptfs_write_metadata_to_xattr(ecryptfs_dentry,
1413 crypt_stat, page_virt, 1388 crypt_stat, virt, size);
1414 size);
1415 else 1389 else
1416 rc = ecryptfs_write_metadata_to_contents(crypt_stat, 1390 rc = ecryptfs_write_metadata_to_contents(crypt_stat,
1417 ecryptfs_dentry, 1391 ecryptfs_dentry, virt);
1418 page_virt);
1419 if (rc) { 1392 if (rc) {
1420 printk(KERN_ERR "Error writing metadata out to lower file; " 1393 printk(KERN_ERR "%s: Error writing metadata out to lower file; "
1421 "rc = [%d]\n", rc); 1394 "rc = [%d]\n", __FUNCTION__, rc);
1422 goto out_free; 1395 goto out_free;
1423 } 1396 }
1424out_free: 1397out_free:
1425 kmem_cache_free(ecryptfs_header_cache_0, page_virt); 1398 memset(virt, 0, crypt_stat->num_header_bytes_at_front);
1399 kfree(virt);
1426out: 1400out:
1427 return rc; 1401 return rc;
1428} 1402}
@@ -1442,16 +1416,16 @@ static int parse_header_metadata(struct ecryptfs_crypt_stat *crypt_stat,
1442 virt += sizeof(u32); 1416 virt += sizeof(u32);
1443 memcpy(&num_header_extents_at_front, virt, sizeof(u16)); 1417 memcpy(&num_header_extents_at_front, virt, sizeof(u16));
1444 num_header_extents_at_front = be16_to_cpu(num_header_extents_at_front); 1418 num_header_extents_at_front = be16_to_cpu(num_header_extents_at_front);
1445 crypt_stat->num_header_extents_at_front = 1419 crypt_stat->num_header_bytes_at_front =
1446 (int)num_header_extents_at_front; 1420 (((size_t)num_header_extents_at_front
1421 * (size_t)header_extent_size));
1447 (*bytes_read) = (sizeof(u32) + sizeof(u16)); 1422 (*bytes_read) = (sizeof(u32) + sizeof(u16));
1448 if ((validate_header_size == ECRYPTFS_VALIDATE_HEADER_SIZE) 1423 if ((validate_header_size == ECRYPTFS_VALIDATE_HEADER_SIZE)
1449 && ((crypt_stat->extent_size 1424 && (crypt_stat->num_header_bytes_at_front
1450 * crypt_stat->num_header_extents_at_front)
1451 < ECRYPTFS_MINIMUM_HEADER_EXTENT_SIZE)) { 1425 < ECRYPTFS_MINIMUM_HEADER_EXTENT_SIZE)) {
1452 rc = -EINVAL; 1426 rc = -EINVAL;
1453 printk(KERN_WARNING "Invalid number of header extents: [%zd]\n", 1427 printk(KERN_WARNING "Invalid header size: [%zd]\n",
1454 crypt_stat->num_header_extents_at_front); 1428 crypt_stat->num_header_bytes_at_front);
1455 } 1429 }
1456 return rc; 1430 return rc;
1457} 1431}
@@ -1466,7 +1440,8 @@ static int parse_header_metadata(struct ecryptfs_crypt_stat *crypt_stat,
1466 */ 1440 */
1467static void set_default_header_data(struct ecryptfs_crypt_stat *crypt_stat) 1441static void set_default_header_data(struct ecryptfs_crypt_stat *crypt_stat)
1468{ 1442{
1469 crypt_stat->num_header_extents_at_front = 2; 1443 crypt_stat->num_header_bytes_at_front =
1444 ECRYPTFS_MINIMUM_HEADER_EXTENT_SIZE;
1470} 1445}
1471 1446
1472/** 1447/**
@@ -1552,9 +1527,10 @@ int ecryptfs_read_xattr_region(char *page_virt, struct inode *ecryptfs_inode)
1552 size = ecryptfs_getxattr_lower(lower_dentry, ECRYPTFS_XATTR_NAME, 1527 size = ecryptfs_getxattr_lower(lower_dentry, ECRYPTFS_XATTR_NAME,
1553 page_virt, ECRYPTFS_DEFAULT_EXTENT_SIZE); 1528 page_virt, ECRYPTFS_DEFAULT_EXTENT_SIZE);
1554 if (size < 0) { 1529 if (size < 0) {
1555 printk(KERN_ERR "Error attempting to read the [%s] " 1530 if (unlikely(ecryptfs_verbosity > 0))
1556 "xattr from the lower file; return value = [%zd]\n", 1531 printk(KERN_INFO "Error attempting to read the [%s] "
1557 ECRYPTFS_XATTR_NAME, size); 1532 "xattr from the lower file; return value = "
1533 "[%zd]\n", ECRYPTFS_XATTR_NAME, size);
1558 rc = -EINVAL; 1534 rc = -EINVAL;
1559 goto out; 1535 goto out;
1560 } 1536 }
@@ -1802,7 +1778,7 @@ out:
1802} 1778}
1803 1779
1804struct kmem_cache *ecryptfs_key_tfm_cache; 1780struct kmem_cache *ecryptfs_key_tfm_cache;
1805struct list_head key_tfm_list; 1781static struct list_head key_tfm_list;
1806struct mutex key_tfm_list_mutex; 1782struct mutex key_tfm_list_mutex;
1807 1783
1808int ecryptfs_init_crypto(void) 1784int ecryptfs_init_crypto(void)
@@ -1812,6 +1788,11 @@ int ecryptfs_init_crypto(void)
1812 return 0; 1788 return 0;
1813} 1789}
1814 1790
1791/**
1792 * ecryptfs_destroy_crypto - free all cached key_tfms on key_tfm_list
1793 *
1794 * Called only at module unload time
1795 */
1815int ecryptfs_destroy_crypto(void) 1796int ecryptfs_destroy_crypto(void)
1816{ 1797{
1817 struct ecryptfs_key_tfm *key_tfm, *key_tfm_tmp; 1798 struct ecryptfs_key_tfm *key_tfm, *key_tfm_tmp;
@@ -1835,6 +1816,8 @@ ecryptfs_add_new_key_tfm(struct ecryptfs_key_tfm **key_tfm, char *cipher_name,
1835 struct ecryptfs_key_tfm *tmp_tfm; 1816 struct ecryptfs_key_tfm *tmp_tfm;
1836 int rc = 0; 1817 int rc = 0;
1837 1818
1819 BUG_ON(!mutex_is_locked(&key_tfm_list_mutex));
1820
1838 tmp_tfm = kmem_cache_alloc(ecryptfs_key_tfm_cache, GFP_KERNEL); 1821 tmp_tfm = kmem_cache_alloc(ecryptfs_key_tfm_cache, GFP_KERNEL);
1839 if (key_tfm != NULL) 1822 if (key_tfm != NULL)
1840 (*key_tfm) = tmp_tfm; 1823 (*key_tfm) = tmp_tfm;
@@ -1861,13 +1844,50 @@ ecryptfs_add_new_key_tfm(struct ecryptfs_key_tfm **key_tfm, char *cipher_name,
1861 (*key_tfm) = NULL; 1844 (*key_tfm) = NULL;
1862 goto out; 1845 goto out;
1863 } 1846 }
1864 mutex_lock(&key_tfm_list_mutex);
1865 list_add(&tmp_tfm->key_tfm_list, &key_tfm_list); 1847 list_add(&tmp_tfm->key_tfm_list, &key_tfm_list);
1866 mutex_unlock(&key_tfm_list_mutex);
1867out: 1848out:
1868 return rc; 1849 return rc;
1869} 1850}
1870 1851
1852/**
1853 * ecryptfs_tfm_exists - Search for existing tfm for cipher_name.
1854 * @cipher_name: the name of the cipher to search for
1855 * @key_tfm: set to corresponding tfm if found
1856 *
1857 * Searches for cached key_tfm matching @cipher_name
1858 * Must be called with &key_tfm_list_mutex held
1859 * Returns 1 if found, with @key_tfm set
1860 * Returns 0 if not found, with @key_tfm set to NULL
1861 */
1862int ecryptfs_tfm_exists(char *cipher_name, struct ecryptfs_key_tfm **key_tfm)
1863{
1864 struct ecryptfs_key_tfm *tmp_key_tfm;
1865
1866 BUG_ON(!mutex_is_locked(&key_tfm_list_mutex));
1867
1868 list_for_each_entry(tmp_key_tfm, &key_tfm_list, key_tfm_list) {
1869 if (strcmp(tmp_key_tfm->cipher_name, cipher_name) == 0) {
1870 if (key_tfm)
1871 (*key_tfm) = tmp_key_tfm;
1872 return 1;
1873 }
1874 }
1875 if (key_tfm)
1876 (*key_tfm) = NULL;
1877 return 0;
1878}
1879
1880/**
1881 * ecryptfs_get_tfm_and_mutex_for_cipher_name
1882 *
1883 * @tfm: set to cached tfm found, or new tfm created
1884 * @tfm_mutex: set to mutex for cached tfm found, or new tfm created
1885 * @cipher_name: the name of the cipher to search for and/or add
1886 *
1887 * Sets pointers to @tfm & @tfm_mutex matching @cipher_name.
1888 * Searches for cached item first, and creates new if not found.
1889 * Returns 0 on success, non-zero if adding new cipher failed
1890 */
1871int ecryptfs_get_tfm_and_mutex_for_cipher_name(struct crypto_blkcipher **tfm, 1891int ecryptfs_get_tfm_and_mutex_for_cipher_name(struct crypto_blkcipher **tfm,
1872 struct mutex **tfm_mutex, 1892 struct mutex **tfm_mutex,
1873 char *cipher_name) 1893 char *cipher_name)
@@ -1877,22 +1897,17 @@ int ecryptfs_get_tfm_and_mutex_for_cipher_name(struct crypto_blkcipher **tfm,
1877 1897
1878 (*tfm) = NULL; 1898 (*tfm) = NULL;
1879 (*tfm_mutex) = NULL; 1899 (*tfm_mutex) = NULL;
1900
1880 mutex_lock(&key_tfm_list_mutex); 1901 mutex_lock(&key_tfm_list_mutex);
1881 list_for_each_entry(key_tfm, &key_tfm_list, key_tfm_list) { 1902 if (!ecryptfs_tfm_exists(cipher_name, &key_tfm)) {
1882 if (strcmp(key_tfm->cipher_name, cipher_name) == 0) { 1903 rc = ecryptfs_add_new_key_tfm(&key_tfm, cipher_name, 0);
1883 (*tfm) = key_tfm->key_tfm; 1904 if (rc) {
1884 (*tfm_mutex) = &key_tfm->key_tfm_mutex; 1905 printk(KERN_ERR "Error adding new key_tfm to list; "
1885 mutex_unlock(&key_tfm_list_mutex); 1906 "rc = [%d]\n", rc);
1886 goto out; 1907 goto out;
1887 } 1908 }
1888 } 1909 }
1889 mutex_unlock(&key_tfm_list_mutex); 1910 mutex_unlock(&key_tfm_list_mutex);
1890 rc = ecryptfs_add_new_key_tfm(&key_tfm, cipher_name, 0);
1891 if (rc) {
1892 printk(KERN_ERR "Error adding new key_tfm to list; rc = [%d]\n",
1893 rc);
1894 goto out;
1895 }
1896 (*tfm) = key_tfm->key_tfm; 1911 (*tfm) = key_tfm->key_tfm;
1897 (*tfm_mutex) = &key_tfm->key_tfm_mutex; 1912 (*tfm_mutex) = &key_tfm->key_tfm_mutex;
1898out: 1913out:
diff --git a/fs/ecryptfs/dentry.c b/fs/ecryptfs/dentry.c
index cb20b964419f..841a032050a7 100644
--- a/fs/ecryptfs/dentry.c
+++ b/fs/ecryptfs/dentry.c
@@ -51,13 +51,13 @@ static int ecryptfs_d_revalidate(struct dentry *dentry, struct nameidata *nd)
51 51
52 if (!lower_dentry->d_op || !lower_dentry->d_op->d_revalidate) 52 if (!lower_dentry->d_op || !lower_dentry->d_op->d_revalidate)
53 goto out; 53 goto out;
54 dentry_save = nd->dentry; 54 dentry_save = nd->path.dentry;
55 vfsmount_save = nd->mnt; 55 vfsmount_save = nd->path.mnt;
56 nd->dentry = lower_dentry; 56 nd->path.dentry = lower_dentry;
57 nd->mnt = lower_mnt; 57 nd->path.mnt = lower_mnt;
58 rc = lower_dentry->d_op->d_revalidate(lower_dentry, nd); 58 rc = lower_dentry->d_op->d_revalidate(lower_dentry, nd);
59 nd->dentry = dentry_save; 59 nd->path.dentry = dentry_save;
60 nd->mnt = vfsmount_save; 60 nd->path.mnt = vfsmount_save;
61 if (dentry->d_inode) { 61 if (dentry->d_inode) {
62 struct inode *lower_inode = 62 struct inode *lower_inode =
63 ecryptfs_inode_to_lower(dentry->d_inode); 63 ecryptfs_inode_to_lower(dentry->d_inode);
diff --git a/fs/ecryptfs/ecryptfs_kernel.h b/fs/ecryptfs/ecryptfs_kernel.h
index ce7a5d4aec36..5007f788da01 100644
--- a/fs/ecryptfs/ecryptfs_kernel.h
+++ b/fs/ecryptfs/ecryptfs_kernel.h
@@ -234,10 +234,11 @@ struct ecryptfs_crypt_stat {
234#define ECRYPTFS_KEY_VALID 0x00000080 234#define ECRYPTFS_KEY_VALID 0x00000080
235#define ECRYPTFS_METADATA_IN_XATTR 0x00000100 235#define ECRYPTFS_METADATA_IN_XATTR 0x00000100
236#define ECRYPTFS_VIEW_AS_ENCRYPTED 0x00000200 236#define ECRYPTFS_VIEW_AS_ENCRYPTED 0x00000200
237#define ECRYPTFS_KEY_SET 0x00000400
237 u32 flags; 238 u32 flags;
238 unsigned int file_version; 239 unsigned int file_version;
239 size_t iv_bytes; 240 size_t iv_bytes;
240 size_t num_header_extents_at_front; 241 size_t num_header_bytes_at_front;
241 size_t extent_size; /* Data extent size; default is 4096 */ 242 size_t extent_size; /* Data extent size; default is 4096 */
242 size_t key_size; 243 size_t key_size;
243 size_t extent_shift; 244 size_t extent_shift;
@@ -322,7 +323,6 @@ struct ecryptfs_key_tfm {
322 unsigned char cipher_name[ECRYPTFS_MAX_CIPHER_NAME_SIZE + 1]; 323 unsigned char cipher_name[ECRYPTFS_MAX_CIPHER_NAME_SIZE + 1];
323}; 324};
324 325
325extern struct list_head key_tfm_list;
326extern struct mutex key_tfm_list_mutex; 326extern struct mutex key_tfm_list_mutex;
327 327
328/** 328/**
@@ -521,11 +521,9 @@ extern struct kmem_cache *ecryptfs_file_info_cache;
521extern struct kmem_cache *ecryptfs_dentry_info_cache; 521extern struct kmem_cache *ecryptfs_dentry_info_cache;
522extern struct kmem_cache *ecryptfs_inode_info_cache; 522extern struct kmem_cache *ecryptfs_inode_info_cache;
523extern struct kmem_cache *ecryptfs_sb_info_cache; 523extern struct kmem_cache *ecryptfs_sb_info_cache;
524extern struct kmem_cache *ecryptfs_header_cache_0;
525extern struct kmem_cache *ecryptfs_header_cache_1; 524extern struct kmem_cache *ecryptfs_header_cache_1;
526extern struct kmem_cache *ecryptfs_header_cache_2; 525extern struct kmem_cache *ecryptfs_header_cache_2;
527extern struct kmem_cache *ecryptfs_xattr_cache; 526extern struct kmem_cache *ecryptfs_xattr_cache;
528extern struct kmem_cache *ecryptfs_lower_page_cache;
529extern struct kmem_cache *ecryptfs_key_record_cache; 527extern struct kmem_cache *ecryptfs_key_record_cache;
530extern struct kmem_cache *ecryptfs_key_sig_cache; 528extern struct kmem_cache *ecryptfs_key_sig_cache;
531extern struct kmem_cache *ecryptfs_global_auth_tok_cache; 529extern struct kmem_cache *ecryptfs_global_auth_tok_cache;
@@ -562,8 +560,8 @@ int ecryptfs_read_and_validate_header_region(char *data,
562 struct inode *ecryptfs_inode); 560 struct inode *ecryptfs_inode);
563int ecryptfs_read_and_validate_xattr_region(char *page_virt, 561int ecryptfs_read_and_validate_xattr_region(char *page_virt,
564 struct dentry *ecryptfs_dentry); 562 struct dentry *ecryptfs_dentry);
565u16 ecryptfs_code_for_cipher_string(struct ecryptfs_crypt_stat *crypt_stat); 563u8 ecryptfs_code_for_cipher_string(struct ecryptfs_crypt_stat *crypt_stat);
566int ecryptfs_cipher_code_to_string(char *str, u16 cipher_code); 564int ecryptfs_cipher_code_to_string(char *str, u8 cipher_code);
567void ecryptfs_set_default_sizes(struct ecryptfs_crypt_stat *crypt_stat); 565void ecryptfs_set_default_sizes(struct ecryptfs_crypt_stat *crypt_stat);
568int ecryptfs_generate_key_packet_set(char *dest_base, 566int ecryptfs_generate_key_packet_set(char *dest_base,
569 struct ecryptfs_crypt_stat *crypt_stat, 567 struct ecryptfs_crypt_stat *crypt_stat,
@@ -576,8 +574,6 @@ int ecryptfs_truncate(struct dentry *dentry, loff_t new_length);
576int ecryptfs_inode_test(struct inode *inode, void *candidate_lower_inode); 574int ecryptfs_inode_test(struct inode *inode, void *candidate_lower_inode);
577int ecryptfs_inode_set(struct inode *inode, void *lower_inode); 575int ecryptfs_inode_set(struct inode *inode, void *lower_inode);
578void ecryptfs_init_inode(struct inode *inode, struct inode *lower_inode); 576void ecryptfs_init_inode(struct inode *inode, struct inode *lower_inode);
579ssize_t ecryptfs_getxattr(struct dentry *dentry, const char *name, void *value,
580 size_t size);
581ssize_t 577ssize_t
582ecryptfs_getxattr_lower(struct dentry *lower_dentry, const char *name, 578ecryptfs_getxattr_lower(struct dentry *lower_dentry, const char *name,
583 void *value, size_t size); 579 void *value, size_t size);
@@ -623,6 +619,7 @@ ecryptfs_add_new_key_tfm(struct ecryptfs_key_tfm **key_tfm, char *cipher_name,
623 size_t key_size); 619 size_t key_size);
624int ecryptfs_init_crypto(void); 620int ecryptfs_init_crypto(void);
625int ecryptfs_destroy_crypto(void); 621int ecryptfs_destroy_crypto(void);
622int ecryptfs_tfm_exists(char *cipher_name, struct ecryptfs_key_tfm **key_tfm);
626int ecryptfs_get_tfm_and_mutex_for_cipher_name(struct crypto_blkcipher **tfm, 623int ecryptfs_get_tfm_and_mutex_for_cipher_name(struct crypto_blkcipher **tfm,
627 struct mutex **tfm_mutex, 624 struct mutex **tfm_mutex,
628 char *cipher_name); 625 char *cipher_name);
@@ -631,8 +628,6 @@ int ecryptfs_keyring_auth_tok_for_sig(struct key **auth_tok_key,
631 char *sig); 628 char *sig);
632int ecryptfs_write_zeros(struct file *file, pgoff_t index, int start, 629int ecryptfs_write_zeros(struct file *file, pgoff_t index, int start,
633 int num_zeros); 630 int num_zeros);
634void ecryptfs_lower_offset_for_extent(loff_t *offset, loff_t extent_num,
635 struct ecryptfs_crypt_stat *crypt_stat);
636int ecryptfs_write_lower(struct inode *ecryptfs_inode, char *data, 631int ecryptfs_write_lower(struct inode *ecryptfs_inode, char *data,
637 loff_t offset, size_t size); 632 loff_t offset, size_t size);
638int ecryptfs_write_lower_page_segment(struct inode *ecryptfs_inode, 633int ecryptfs_write_lower_page_segment(struct inode *ecryptfs_inode,
@@ -646,8 +641,6 @@ int ecryptfs_read_lower_page_segment(struct page *page_for_ecryptfs,
646 pgoff_t page_index, 641 pgoff_t page_index,
647 size_t offset_in_page, size_t size, 642 size_t offset_in_page, size_t size,
648 struct inode *ecryptfs_inode); 643 struct inode *ecryptfs_inode);
649int ecryptfs_read(char *data, loff_t offset, size_t size,
650 struct file *ecryptfs_file);
651struct page *ecryptfs_get_locked_page(struct file *file, loff_t index); 644struct page *ecryptfs_get_locked_page(struct file *file, loff_t index);
652 645
653#endif /* #ifndef ECRYPTFS_KERNEL_H */ 646#endif /* #ifndef ECRYPTFS_KERNEL_H */
diff --git a/fs/ecryptfs/file.c b/fs/ecryptfs/file.c
index c98c4690a771..2b8f5ed4adea 100644
--- a/fs/ecryptfs/file.c
+++ b/fs/ecryptfs/file.c
@@ -209,9 +209,10 @@ static int ecryptfs_open(struct inode *inode, struct file *file)
209 if (!(mount_crypt_stat->flags 209 if (!(mount_crypt_stat->flags
210 & ECRYPTFS_PLAINTEXT_PASSTHROUGH_ENABLED)) { 210 & ECRYPTFS_PLAINTEXT_PASSTHROUGH_ENABLED)) {
211 rc = -EIO; 211 rc = -EIO;
212 printk(KERN_WARNING "Attempt to read file that " 212 printk(KERN_WARNING "Either the lower file "
213 "is not in a valid eCryptfs format, " 213 "is not in a valid eCryptfs format, "
214 "and plaintext passthrough mode is not " 214 "or the key could not be retrieved. "
215 "Plaintext passthrough mode is not "
215 "enabled; returning -EIO\n"); 216 "enabled; returning -EIO\n");
216 mutex_unlock(&crypt_stat->cs_mutex); 217 mutex_unlock(&crypt_stat->cs_mutex);
217 goto out_free; 218 goto out_free;
diff --git a/fs/ecryptfs/inode.c b/fs/ecryptfs/inode.c
index 5a719180983c..e23861152101 100644
--- a/fs/ecryptfs/inode.c
+++ b/fs/ecryptfs/inode.c
@@ -77,13 +77,13 @@ ecryptfs_create_underlying_file(struct inode *lower_dir_inode,
77 struct vfsmount *vfsmount_save; 77 struct vfsmount *vfsmount_save;
78 int rc; 78 int rc;
79 79
80 dentry_save = nd->dentry; 80 dentry_save = nd->path.dentry;
81 vfsmount_save = nd->mnt; 81 vfsmount_save = nd->path.mnt;
82 nd->dentry = lower_dentry; 82 nd->path.dentry = lower_dentry;
83 nd->mnt = lower_mnt; 83 nd->path.mnt = lower_mnt;
84 rc = vfs_create(lower_dir_inode, lower_dentry, mode, nd); 84 rc = vfs_create(lower_dir_inode, lower_dentry, mode, nd);
85 nd->dentry = dentry_save; 85 nd->path.dentry = dentry_save;
86 nd->mnt = vfsmount_save; 86 nd->path.mnt = vfsmount_save;
87 return rc; 87 return rc;
88} 88}
89 89
@@ -365,8 +365,7 @@ static struct dentry *ecryptfs_lookup(struct inode *dir, struct dentry *dentry,
365 dentry->d_sb)->mount_crypt_stat; 365 dentry->d_sb)->mount_crypt_stat;
366 if (mount_crypt_stat->flags & ECRYPTFS_ENCRYPTED_VIEW_ENABLED) { 366 if (mount_crypt_stat->flags & ECRYPTFS_ENCRYPTED_VIEW_ENABLED) {
367 if (crypt_stat->flags & ECRYPTFS_METADATA_IN_XATTR) 367 if (crypt_stat->flags & ECRYPTFS_METADATA_IN_XATTR)
368 file_size = ((crypt_stat->extent_size 368 file_size = (crypt_stat->num_header_bytes_at_front
369 * crypt_stat->num_header_extents_at_front)
370 + i_size_read(lower_dentry->d_inode)); 369 + i_size_read(lower_dentry->d_inode));
371 else 370 else
372 file_size = i_size_read(lower_dentry->d_inode); 371 file_size = i_size_read(lower_dentry->d_inode);
@@ -685,7 +684,7 @@ ecryptfs_put_link(struct dentry *dentry, struct nameidata *nd, void *ptr)
685 * @crypt_stat: Crypt_stat associated with file 684 * @crypt_stat: Crypt_stat associated with file
686 * @upper_size: Size of the upper file 685 * @upper_size: Size of the upper file
687 * 686 *
688 * Calculate the requried size of the lower file based on the 687 * Calculate the required size of the lower file based on the
689 * specified size of the upper file. This calculation is based on the 688 * specified size of the upper file. This calculation is based on the
690 * number of headers in the underlying file and the extent size. 689 * number of headers in the underlying file and the extent size.
691 * 690 *
@@ -697,8 +696,7 @@ upper_size_to_lower_size(struct ecryptfs_crypt_stat *crypt_stat,
697{ 696{
698 loff_t lower_size; 697 loff_t lower_size;
699 698
700 lower_size = (crypt_stat->extent_size 699 lower_size = crypt_stat->num_header_bytes_at_front;
701 * crypt_stat->num_header_extents_at_front);
702 if (upper_size != 0) { 700 if (upper_size != 0) {
703 loff_t num_extents; 701 loff_t num_extents;
704 702
@@ -821,14 +819,14 @@ ecryptfs_permission(struct inode *inode, int mask, struct nameidata *nd)
821 int rc; 819 int rc;
822 820
823 if (nd) { 821 if (nd) {
824 struct vfsmount *vfsmnt_save = nd->mnt; 822 struct vfsmount *vfsmnt_save = nd->path.mnt;
825 struct dentry *dentry_save = nd->dentry; 823 struct dentry *dentry_save = nd->path.dentry;
826 824
827 nd->mnt = ecryptfs_dentry_to_lower_mnt(nd->dentry); 825 nd->path.mnt = ecryptfs_dentry_to_lower_mnt(nd->path.dentry);
828 nd->dentry = ecryptfs_dentry_to_lower(nd->dentry); 826 nd->path.dentry = ecryptfs_dentry_to_lower(nd->path.dentry);
829 rc = permission(ecryptfs_inode_to_lower(inode), mask, nd); 827 rc = permission(ecryptfs_inode_to_lower(inode), mask, nd);
830 nd->mnt = vfsmnt_save; 828 nd->path.mnt = vfsmnt_save;
831 nd->dentry = dentry_save; 829 nd->path.dentry = dentry_save;
832 } else 830 } else
833 rc = permission(ecryptfs_inode_to_lower(inode), mask, NULL); 831 rc = permission(ecryptfs_inode_to_lower(inode), mask, NULL);
834 return rc; 832 return rc;
@@ -875,11 +873,11 @@ static int ecryptfs_setattr(struct dentry *dentry, struct iattr *ia)
875 if (!(mount_crypt_stat->flags 873 if (!(mount_crypt_stat->flags
876 & ECRYPTFS_PLAINTEXT_PASSTHROUGH_ENABLED)) { 874 & ECRYPTFS_PLAINTEXT_PASSTHROUGH_ENABLED)) {
877 rc = -EIO; 875 rc = -EIO;
878 printk(KERN_WARNING "Attempt to read file that " 876 printk(KERN_WARNING "Either the lower file "
879 "is not in a valid eCryptfs format, " 877 "is not in a valid eCryptfs format, "
880 "and plaintext passthrough mode is not " 878 "or the key could not be retrieved. "
879 "Plaintext passthrough mode is not "
881 "enabled; returning -EIO\n"); 880 "enabled; returning -EIO\n");
882
883 mutex_unlock(&crypt_stat->cs_mutex); 881 mutex_unlock(&crypt_stat->cs_mutex);
884 goto out; 882 goto out;
885 } 883 }
@@ -954,7 +952,7 @@ out:
954 return rc; 952 return rc;
955} 953}
956 954
957ssize_t 955static ssize_t
958ecryptfs_getxattr(struct dentry *dentry, const char *name, void *value, 956ecryptfs_getxattr(struct dentry *dentry, const char *name, void *value,
959 size_t size) 957 size_t size)
960{ 958{
diff --git a/fs/ecryptfs/keystore.c b/fs/ecryptfs/keystore.c
index f458c1f35565..682b1b2482c2 100644
--- a/fs/ecryptfs/keystore.c
+++ b/fs/ecryptfs/keystore.c
@@ -189,7 +189,7 @@ out:
189} 189}
190 190
191static int 191static int
192parse_tag_65_packet(struct ecryptfs_session_key *session_key, u16 *cipher_code, 192parse_tag_65_packet(struct ecryptfs_session_key *session_key, u8 *cipher_code,
193 struct ecryptfs_message *msg) 193 struct ecryptfs_message *msg)
194{ 194{
195 size_t i = 0; 195 size_t i = 0;
@@ -275,7 +275,7 @@ out:
275 275
276 276
277static int 277static int
278write_tag_66_packet(char *signature, size_t cipher_code, 278write_tag_66_packet(char *signature, u8 cipher_code,
279 struct ecryptfs_crypt_stat *crypt_stat, char **packet, 279 struct ecryptfs_crypt_stat *crypt_stat, char **packet,
280 size_t *packet_len) 280 size_t *packet_len)
281{ 281{
@@ -428,7 +428,7 @@ static int
428decrypt_pki_encrypted_session_key(struct ecryptfs_auth_tok *auth_tok, 428decrypt_pki_encrypted_session_key(struct ecryptfs_auth_tok *auth_tok,
429 struct ecryptfs_crypt_stat *crypt_stat) 429 struct ecryptfs_crypt_stat *crypt_stat)
430{ 430{
431 u16 cipher_code = 0; 431 u8 cipher_code = 0;
432 struct ecryptfs_msg_ctx *msg_ctx; 432 struct ecryptfs_msg_ctx *msg_ctx;
433 struct ecryptfs_message *msg = NULL; 433 struct ecryptfs_message *msg = NULL;
434 char *auth_tok_sig; 434 char *auth_tok_sig;
@@ -1537,7 +1537,7 @@ write_tag_3_packet(char *dest, size_t *remaining_bytes,
1537 struct scatterlist dst_sg; 1537 struct scatterlist dst_sg;
1538 struct scatterlist src_sg; 1538 struct scatterlist src_sg;
1539 struct mutex *tfm_mutex = NULL; 1539 struct mutex *tfm_mutex = NULL;
1540 size_t cipher_code; 1540 u8 cipher_code;
1541 size_t packet_size_length; 1541 size_t packet_size_length;
1542 size_t max_packet_size; 1542 size_t max_packet_size;
1543 struct ecryptfs_mount_crypt_stat *mount_crypt_stat = 1543 struct ecryptfs_mount_crypt_stat *mount_crypt_stat =
diff --git a/fs/ecryptfs/main.c b/fs/ecryptfs/main.c
index 0249aa4ae181..d25ac9500a92 100644
--- a/fs/ecryptfs/main.c
+++ b/fs/ecryptfs/main.c
@@ -117,7 +117,7 @@ void __ecryptfs_printk(const char *fmt, ...)
117 * 117 *
118 * Returns zero on success; non-zero otherwise 118 * Returns zero on success; non-zero otherwise
119 */ 119 */
120int ecryptfs_init_persistent_file(struct dentry *ecryptfs_dentry) 120static int ecryptfs_init_persistent_file(struct dentry *ecryptfs_dentry)
121{ 121{
122 struct ecryptfs_inode_info *inode_info = 122 struct ecryptfs_inode_info *inode_info =
123 ecryptfs_inode_to_private(ecryptfs_dentry->d_inode); 123 ecryptfs_inode_to_private(ecryptfs_dentry->d_inode);
@@ -226,17 +226,15 @@ out:
226 return rc; 226 return rc;
227} 227}
228 228
229enum { ecryptfs_opt_sig, ecryptfs_opt_ecryptfs_sig, ecryptfs_opt_debug, 229enum { ecryptfs_opt_sig, ecryptfs_opt_ecryptfs_sig,
230 ecryptfs_opt_ecryptfs_debug, ecryptfs_opt_cipher, 230 ecryptfs_opt_cipher, ecryptfs_opt_ecryptfs_cipher,
231 ecryptfs_opt_ecryptfs_cipher, ecryptfs_opt_ecryptfs_key_bytes, 231 ecryptfs_opt_ecryptfs_key_bytes,
232 ecryptfs_opt_passthrough, ecryptfs_opt_xattr_metadata, 232 ecryptfs_opt_passthrough, ecryptfs_opt_xattr_metadata,
233 ecryptfs_opt_encrypted_view, ecryptfs_opt_err }; 233 ecryptfs_opt_encrypted_view, ecryptfs_opt_err };
234 234
235static match_table_t tokens = { 235static match_table_t tokens = {
236 {ecryptfs_opt_sig, "sig=%s"}, 236 {ecryptfs_opt_sig, "sig=%s"},
237 {ecryptfs_opt_ecryptfs_sig, "ecryptfs_sig=%s"}, 237 {ecryptfs_opt_ecryptfs_sig, "ecryptfs_sig=%s"},
238 {ecryptfs_opt_debug, "debug=%u"},
239 {ecryptfs_opt_ecryptfs_debug, "ecryptfs_debug=%u"},
240 {ecryptfs_opt_cipher, "cipher=%s"}, 238 {ecryptfs_opt_cipher, "cipher=%s"},
241 {ecryptfs_opt_ecryptfs_cipher, "ecryptfs_cipher=%s"}, 239 {ecryptfs_opt_ecryptfs_cipher, "ecryptfs_cipher=%s"},
242 {ecryptfs_opt_ecryptfs_key_bytes, "ecryptfs_key_bytes=%u"}, 240 {ecryptfs_opt_ecryptfs_key_bytes, "ecryptfs_key_bytes=%u"},
@@ -313,7 +311,6 @@ static int ecryptfs_parse_options(struct super_block *sb, char *options)
313 substring_t args[MAX_OPT_ARGS]; 311 substring_t args[MAX_OPT_ARGS];
314 int token; 312 int token;
315 char *sig_src; 313 char *sig_src;
316 char *debug_src;
317 char *cipher_name_dst; 314 char *cipher_name_dst;
318 char *cipher_name_src; 315 char *cipher_name_src;
319 char *cipher_key_bytes_src; 316 char *cipher_key_bytes_src;
@@ -341,16 +338,6 @@ static int ecryptfs_parse_options(struct super_block *sb, char *options)
341 } 338 }
342 sig_set = 1; 339 sig_set = 1;
343 break; 340 break;
344 case ecryptfs_opt_debug:
345 case ecryptfs_opt_ecryptfs_debug:
346 debug_src = args[0].from;
347 ecryptfs_verbosity =
348 (int)simple_strtol(debug_src, &debug_src,
349 0);
350 ecryptfs_printk(KERN_DEBUG,
351 "Verbosity set to [%d]" "\n",
352 ecryptfs_verbosity);
353 break;
354 case ecryptfs_opt_cipher: 341 case ecryptfs_opt_cipher:
355 case ecryptfs_opt_ecryptfs_cipher: 342 case ecryptfs_opt_ecryptfs_cipher:
356 cipher_name_src = args[0].from; 343 cipher_name_src = args[0].from;
@@ -423,9 +410,13 @@ static int ecryptfs_parse_options(struct super_block *sb, char *options)
423 if (!cipher_key_bytes_set) { 410 if (!cipher_key_bytes_set) {
424 mount_crypt_stat->global_default_cipher_key_size = 0; 411 mount_crypt_stat->global_default_cipher_key_size = 0;
425 } 412 }
426 rc = ecryptfs_add_new_key_tfm( 413 mutex_lock(&key_tfm_list_mutex);
427 NULL, mount_crypt_stat->global_default_cipher_name, 414 if (!ecryptfs_tfm_exists(mount_crypt_stat->global_default_cipher_name,
428 mount_crypt_stat->global_default_cipher_key_size); 415 NULL))
416 rc = ecryptfs_add_new_key_tfm(
417 NULL, mount_crypt_stat->global_default_cipher_name,
418 mount_crypt_stat->global_default_cipher_key_size);
419 mutex_unlock(&key_tfm_list_mutex);
429 if (rc) { 420 if (rc) {
430 printk(KERN_ERR "Error attempting to initialize cipher with " 421 printk(KERN_ERR "Error attempting to initialize cipher with "
431 "name = [%s] and key size = [%td]; rc = [%d]\n", 422 "name = [%s] and key size = [%td]; rc = [%d]\n",
@@ -522,8 +513,8 @@ static int ecryptfs_read_super(struct super_block *sb, const char *dev_name)
522 ecryptfs_printk(KERN_WARNING, "path_lookup() failed\n"); 513 ecryptfs_printk(KERN_WARNING, "path_lookup() failed\n");
523 goto out; 514 goto out;
524 } 515 }
525 lower_root = nd.dentry; 516 lower_root = nd.path.dentry;
526 lower_mnt = nd.mnt; 517 lower_mnt = nd.path.mnt;
527 ecryptfs_set_superblock_lower(sb, lower_root->d_sb); 518 ecryptfs_set_superblock_lower(sb, lower_root->d_sb);
528 sb->s_maxbytes = lower_root->d_sb->s_maxbytes; 519 sb->s_maxbytes = lower_root->d_sb->s_maxbytes;
529 sb->s_blocksize = lower_root->d_sb->s_blocksize; 520 sb->s_blocksize = lower_root->d_sb->s_blocksize;
@@ -535,7 +526,7 @@ static int ecryptfs_read_super(struct super_block *sb, const char *dev_name)
535 rc = 0; 526 rc = 0;
536 goto out; 527 goto out;
537out_free: 528out_free:
538 path_release(&nd); 529 path_put(&nd.path);
539out: 530out:
540 return rc; 531 return rc;
541} 532}
@@ -654,11 +645,6 @@ static struct ecryptfs_cache_info {
654 .size = sizeof(struct ecryptfs_sb_info), 645 .size = sizeof(struct ecryptfs_sb_info),
655 }, 646 },
656 { 647 {
657 .cache = &ecryptfs_header_cache_0,
658 .name = "ecryptfs_headers_0",
659 .size = PAGE_CACHE_SIZE,
660 },
661 {
662 .cache = &ecryptfs_header_cache_1, 648 .cache = &ecryptfs_header_cache_1,
663 .name = "ecryptfs_headers_1", 649 .name = "ecryptfs_headers_1",
664 .size = PAGE_CACHE_SIZE, 650 .size = PAGE_CACHE_SIZE,
@@ -821,6 +807,10 @@ static int __init ecryptfs_init(void)
821 "rc = [%d]\n", rc); 807 "rc = [%d]\n", rc);
822 goto out_release_messaging; 808 goto out_release_messaging;
823 } 809 }
810 if (ecryptfs_verbosity > 0)
811 printk(KERN_CRIT "eCryptfs verbosity set to %d. Secret values "
812 "will be written to the syslog!\n", ecryptfs_verbosity);
813
824 goto out; 814 goto out;
825out_release_messaging: 815out_release_messaging:
826 ecryptfs_release_messaging(ecryptfs_transport); 816 ecryptfs_release_messaging(ecryptfs_transport);
diff --git a/fs/ecryptfs/mmap.c b/fs/ecryptfs/mmap.c
index 32c5711d79a3..dc74b186145d 100644
--- a/fs/ecryptfs/mmap.c
+++ b/fs/ecryptfs/mmap.c
@@ -34,8 +34,6 @@
34#include <linux/scatterlist.h> 34#include <linux/scatterlist.h>
35#include "ecryptfs_kernel.h" 35#include "ecryptfs_kernel.h"
36 36
37struct kmem_cache *ecryptfs_lower_page_cache;
38
39/** 37/**
40 * ecryptfs_get_locked_page 38 * ecryptfs_get_locked_page
41 * 39 *
@@ -102,13 +100,14 @@ static void set_header_info(char *page_virt,
102 struct ecryptfs_crypt_stat *crypt_stat) 100 struct ecryptfs_crypt_stat *crypt_stat)
103{ 101{
104 size_t written; 102 size_t written;
105 int save_num_header_extents_at_front = 103 size_t save_num_header_bytes_at_front =
106 crypt_stat->num_header_extents_at_front; 104 crypt_stat->num_header_bytes_at_front;
107 105
108 crypt_stat->num_header_extents_at_front = 1; 106 crypt_stat->num_header_bytes_at_front =
107 ECRYPTFS_MINIMUM_HEADER_EXTENT_SIZE;
109 ecryptfs_write_header_metadata(page_virt + 20, crypt_stat, &written); 108 ecryptfs_write_header_metadata(page_virt + 20, crypt_stat, &written);
110 crypt_stat->num_header_extents_at_front = 109 crypt_stat->num_header_bytes_at_front =
111 save_num_header_extents_at_front; 110 save_num_header_bytes_at_front;
112} 111}
113 112
114/** 113/**
@@ -134,8 +133,11 @@ ecryptfs_copy_up_encrypted_with_header(struct page *page,
134 loff_t view_extent_num = ((((loff_t)page->index) 133 loff_t view_extent_num = ((((loff_t)page->index)
135 * num_extents_per_page) 134 * num_extents_per_page)
136 + extent_num_in_page); 135 + extent_num_in_page);
136 size_t num_header_extents_at_front =
137 (crypt_stat->num_header_bytes_at_front
138 / crypt_stat->extent_size);
137 139
138 if (view_extent_num < crypt_stat->num_header_extents_at_front) { 140 if (view_extent_num < num_header_extents_at_front) {
139 /* This is a header extent */ 141 /* This is a header extent */
140 char *page_virt; 142 char *page_virt;
141 143
@@ -157,9 +159,8 @@ ecryptfs_copy_up_encrypted_with_header(struct page *page,
157 } else { 159 } else {
158 /* This is an encrypted data extent */ 160 /* This is an encrypted data extent */
159 loff_t lower_offset = 161 loff_t lower_offset =
160 ((view_extent_num - 162 ((view_extent_num * crypt_stat->extent_size)
161 crypt_stat->num_header_extents_at_front) 163 - crypt_stat->num_header_bytes_at_front);
162 * crypt_stat->extent_size);
163 164
164 rc = ecryptfs_read_lower_page_segment( 165 rc = ecryptfs_read_lower_page_segment(
165 page, (lower_offset >> PAGE_CACHE_SHIFT), 166 page, (lower_offset >> PAGE_CACHE_SHIFT),
@@ -257,8 +258,7 @@ static int fill_zeros_to_end_of_page(struct page *page, unsigned int to)
257 end_byte_in_page = i_size_read(inode) % PAGE_CACHE_SIZE; 258 end_byte_in_page = i_size_read(inode) % PAGE_CACHE_SIZE;
258 if (to > end_byte_in_page) 259 if (to > end_byte_in_page)
259 end_byte_in_page = to; 260 end_byte_in_page = to;
260 zero_user_page(page, end_byte_in_page, 261 zero_user_segment(page, end_byte_in_page, PAGE_CACHE_SIZE);
261 PAGE_CACHE_SIZE - end_byte_in_page, KM_USER0);
262out: 262out:
263 return 0; 263 return 0;
264} 264}
@@ -307,7 +307,7 @@ static int ecryptfs_prepare_write(struct file *file, struct page *page,
307 */ 307 */
308 if ((i_size_read(page->mapping->host) == prev_page_end_size) && 308 if ((i_size_read(page->mapping->host) == prev_page_end_size) &&
309 (from != 0)) { 309 (from != 0)) {
310 zero_user_page(page, 0, PAGE_CACHE_SIZE, KM_USER0); 310 zero_user(page, 0, PAGE_CACHE_SIZE);
311 } 311 }
312out: 312out:
313 return rc; 313 return rc;
diff --git a/fs/ecryptfs/read_write.c b/fs/ecryptfs/read_write.c
index 948f57624c05..0c4928623bbc 100644
--- a/fs/ecryptfs/read_write.c
+++ b/fs/ecryptfs/read_write.c
@@ -293,6 +293,7 @@ int ecryptfs_read_lower_page_segment(struct page *page_for_ecryptfs,
293 return rc; 293 return rc;
294} 294}
295 295
296#if 0
296/** 297/**
297 * ecryptfs_read 298 * ecryptfs_read
298 * @data: The virtual address into which to write the data read (and 299 * @data: The virtual address into which to write the data read (and
@@ -371,3 +372,4 @@ int ecryptfs_read(char *data, loff_t offset, size_t size,
371out: 372out:
372 return rc; 373 return rc;
373} 374}
375#endif /* 0 */
diff --git a/fs/ecryptfs/super.c b/fs/ecryptfs/super.c
index 4859c4eecd65..c27ac2b358a1 100644
--- a/fs/ecryptfs/super.c
+++ b/fs/ecryptfs/super.c
@@ -156,32 +156,38 @@ static void ecryptfs_clear_inode(struct inode *inode)
156/** 156/**
157 * ecryptfs_show_options 157 * ecryptfs_show_options
158 * 158 *
159 * Prints the directory we are currently mounted over. 159 * Prints the mount options for a given superblock.
160 * Returns zero on success; non-zero otherwise 160 * Returns zero; does not fail.
161 */ 161 */
162static int ecryptfs_show_options(struct seq_file *m, struct vfsmount *mnt) 162static int ecryptfs_show_options(struct seq_file *m, struct vfsmount *mnt)
163{ 163{
164 struct super_block *sb = mnt->mnt_sb; 164 struct super_block *sb = mnt->mnt_sb;
165 struct dentry *lower_root_dentry = ecryptfs_dentry_to_lower(sb->s_root); 165 struct ecryptfs_mount_crypt_stat *mount_crypt_stat =
166 struct vfsmount *lower_mnt = ecryptfs_dentry_to_lower_mnt(sb->s_root); 166 &ecryptfs_superblock_to_private(sb)->mount_crypt_stat;
167 char *tmp_page; 167 struct ecryptfs_global_auth_tok *walker;
168 char *path; 168
169 int rc = 0; 169 mutex_lock(&mount_crypt_stat->global_auth_tok_list_mutex);
170 170 list_for_each_entry(walker,
171 tmp_page = (char *)__get_free_page(GFP_KERNEL); 171 &mount_crypt_stat->global_auth_tok_list,
172 if (!tmp_page) { 172 mount_crypt_stat_list) {
173 rc = -ENOMEM; 173 seq_printf(m, ",ecryptfs_sig=%s", walker->sig);
174 goto out;
175 }
176 path = d_path(lower_root_dentry, lower_mnt, tmp_page, PAGE_SIZE);
177 if (IS_ERR(path)) {
178 rc = PTR_ERR(path);
179 goto out;
180 } 174 }
181 seq_printf(m, ",dir=%s", path); 175 mutex_unlock(&mount_crypt_stat->global_auth_tok_list_mutex);
182 free_page((unsigned long)tmp_page); 176
183out: 177 seq_printf(m, ",ecryptfs_cipher=%s",
184 return rc; 178 mount_crypt_stat->global_default_cipher_name);
179
180 if (mount_crypt_stat->global_default_cipher_key_size)
181 seq_printf(m, ",ecryptfs_key_bytes=%zd",
182 mount_crypt_stat->global_default_cipher_key_size);
183 if (mount_crypt_stat->flags & ECRYPTFS_PLAINTEXT_PASSTHROUGH_ENABLED)
184 seq_printf(m, ",ecryptfs_passthrough");
185 if (mount_crypt_stat->flags & ECRYPTFS_XATTR_METADATA_ENABLED)
186 seq_printf(m, ",ecryptfs_xattr_metadata");
187 if (mount_crypt_stat->flags & ECRYPTFS_ENCRYPTED_VIEW_ENABLED)
188 seq_printf(m, ",ecryptfs_encrypted_view");
189
190 return 0;
185} 191}
186 192
187const struct super_operations ecryptfs_sops = { 193const struct super_operations ecryptfs_sops = {
diff --git a/fs/efs/inode.c b/fs/efs/inode.c
index 174696f9bf14..627c3026946d 100644
--- a/fs/efs/inode.c
+++ b/fs/efs/inode.c
@@ -45,17 +45,26 @@ static inline void extent_copy(efs_extent *src, efs_extent *dst) {
45 return; 45 return;
46} 46}
47 47
48void efs_read_inode(struct inode *inode) 48struct inode *efs_iget(struct super_block *super, unsigned long ino)
49{ 49{
50 int i, inode_index; 50 int i, inode_index;
51 dev_t device; 51 dev_t device;
52 u32 rdev; 52 u32 rdev;
53 struct buffer_head *bh; 53 struct buffer_head *bh;
54 struct efs_sb_info *sb = SUPER_INFO(inode->i_sb); 54 struct efs_sb_info *sb = SUPER_INFO(super);
55 struct efs_inode_info *in = INODE_INFO(inode); 55 struct efs_inode_info *in;
56 efs_block_t block, offset; 56 efs_block_t block, offset;
57 struct efs_dinode *efs_inode; 57 struct efs_dinode *efs_inode;
58 58 struct inode *inode;
59
60 inode = iget_locked(super, ino);
61 if (IS_ERR(inode))
62 return ERR_PTR(-ENOMEM);
63 if (!(inode->i_state & I_NEW))
64 return inode;
65
66 in = INODE_INFO(inode);
67
59 /* 68 /*
60 ** EFS layout: 69 ** EFS layout:
61 ** 70 **
@@ -159,13 +168,13 @@ void efs_read_inode(struct inode *inode)
159 break; 168 break;
160 } 169 }
161 170
162 return; 171 unlock_new_inode(inode);
172 return inode;
163 173
164read_inode_error: 174read_inode_error:
165 printk(KERN_WARNING "EFS: failed to read inode %lu\n", inode->i_ino); 175 printk(KERN_WARNING "EFS: failed to read inode %lu\n", inode->i_ino);
166 make_bad_inode(inode); 176 iget_failed(inode);
167 177 return ERR_PTR(-EIO);
168 return;
169} 178}
170 179
171static inline efs_block_t 180static inline efs_block_t
diff --git a/fs/efs/namei.c b/fs/efs/namei.c
index f7f407075be1..e26704742d41 100644
--- a/fs/efs/namei.c
+++ b/fs/efs/namei.c
@@ -66,9 +66,10 @@ struct dentry *efs_lookup(struct inode *dir, struct dentry *dentry, struct namei
66 lock_kernel(); 66 lock_kernel();
67 inodenum = efs_find_entry(dir, dentry->d_name.name, dentry->d_name.len); 67 inodenum = efs_find_entry(dir, dentry->d_name.name, dentry->d_name.len);
68 if (inodenum) { 68 if (inodenum) {
69 if (!(inode = iget(dir->i_sb, inodenum))) { 69 inode = efs_iget(dir->i_sb, inodenum);
70 if (IS_ERR(inode)) {
70 unlock_kernel(); 71 unlock_kernel();
71 return ERR_PTR(-EACCES); 72 return ERR_CAST(inode);
72 } 73 }
73 } 74 }
74 unlock_kernel(); 75 unlock_kernel();
@@ -84,12 +85,11 @@ static struct inode *efs_nfs_get_inode(struct super_block *sb, u64 ino,
84 85
85 if (ino == 0) 86 if (ino == 0)
86 return ERR_PTR(-ESTALE); 87 return ERR_PTR(-ESTALE);
87 inode = iget(sb, ino); 88 inode = efs_iget(sb, ino);
88 if (inode == NULL) 89 if (IS_ERR(inode))
89 return ERR_PTR(-ENOMEM); 90 return ERR_CAST(inode);
90 91
91 if (is_bad_inode(inode) || 92 if (generation && inode->i_generation != generation) {
92 (generation && inode->i_generation != generation)) {
93 iput(inode); 93 iput(inode);
94 return ERR_PTR(-ESTALE); 94 return ERR_PTR(-ESTALE);
95 } 95 }
@@ -116,7 +116,7 @@ struct dentry *efs_get_parent(struct dentry *child)
116 struct dentry *parent; 116 struct dentry *parent;
117 struct inode *inode; 117 struct inode *inode;
118 efs_ino_t ino; 118 efs_ino_t ino;
119 int error; 119 long error;
120 120
121 lock_kernel(); 121 lock_kernel();
122 122
@@ -125,10 +125,11 @@ struct dentry *efs_get_parent(struct dentry *child)
125 if (!ino) 125 if (!ino)
126 goto fail; 126 goto fail;
127 127
128 error = -EACCES; 128 inode = efs_iget(child->d_inode->i_sb, ino);
129 inode = iget(child->d_inode->i_sb, ino); 129 if (IS_ERR(inode)) {
130 if (!inode) 130 error = PTR_ERR(inode);
131 goto fail; 131 goto fail;
132 }
132 133
133 error = -ENOMEM; 134 error = -ENOMEM;
134 parent = d_alloc_anon(inode); 135 parent = d_alloc_anon(inode);
diff --git a/fs/efs/super.c b/fs/efs/super.c
index c79bc627f107..14082405cdd1 100644
--- a/fs/efs/super.c
+++ b/fs/efs/super.c
@@ -107,7 +107,6 @@ static int efs_remount(struct super_block *sb, int *flags, char *data)
107static const struct super_operations efs_superblock_operations = { 107static const struct super_operations efs_superblock_operations = {
108 .alloc_inode = efs_alloc_inode, 108 .alloc_inode = efs_alloc_inode,
109 .destroy_inode = efs_destroy_inode, 109 .destroy_inode = efs_destroy_inode,
110 .read_inode = efs_read_inode,
111 .put_super = efs_put_super, 110 .put_super = efs_put_super,
112 .statfs = efs_statfs, 111 .statfs = efs_statfs,
113 .remount_fs = efs_remount, 112 .remount_fs = efs_remount,
@@ -247,6 +246,7 @@ static int efs_fill_super(struct super_block *s, void *d, int silent)
247 struct efs_sb_info *sb; 246 struct efs_sb_info *sb;
248 struct buffer_head *bh; 247 struct buffer_head *bh;
249 struct inode *root; 248 struct inode *root;
249 int ret = -EINVAL;
250 250
251 sb = kzalloc(sizeof(struct efs_sb_info), GFP_KERNEL); 251 sb = kzalloc(sizeof(struct efs_sb_info), GFP_KERNEL);
252 if (!sb) 252 if (!sb)
@@ -303,12 +303,18 @@ static int efs_fill_super(struct super_block *s, void *d, int silent)
303 } 303 }
304 s->s_op = &efs_superblock_operations; 304 s->s_op = &efs_superblock_operations;
305 s->s_export_op = &efs_export_ops; 305 s->s_export_op = &efs_export_ops;
306 root = iget(s, EFS_ROOTINODE); 306 root = efs_iget(s, EFS_ROOTINODE);
307 if (IS_ERR(root)) {
308 printk(KERN_ERR "EFS: get root inode failed\n");
309 ret = PTR_ERR(root);
310 goto out_no_fs;
311 }
312
307 s->s_root = d_alloc_root(root); 313 s->s_root = d_alloc_root(root);
308
309 if (!(s->s_root)) { 314 if (!(s->s_root)) {
310 printk(KERN_ERR "EFS: get root inode failed\n"); 315 printk(KERN_ERR "EFS: get root dentry failed\n");
311 iput(root); 316 iput(root);
317 ret = -ENOMEM;
312 goto out_no_fs; 318 goto out_no_fs;
313 } 319 }
314 320
@@ -318,7 +324,7 @@ out_no_fs_ul:
318out_no_fs: 324out_no_fs:
319 s->s_fs_info = NULL; 325 s->s_fs_info = NULL;
320 kfree(sb); 326 kfree(sb);
321 return -EINVAL; 327 return ret;
322} 328}
323 329
324static int efs_statfs(struct dentry *dentry, struct kstatfs *buf) { 330static int efs_statfs(struct dentry *dentry, struct kstatfs *buf) {
diff --git a/fs/eventfd.c b/fs/eventfd.c
index 2ce19c000d2a..a9f130cd50ac 100644
--- a/fs/eventfd.c
+++ b/fs/eventfd.c
@@ -15,6 +15,7 @@
15#include <linux/spinlock.h> 15#include <linux/spinlock.h>
16#include <linux/anon_inodes.h> 16#include <linux/anon_inodes.h>
17#include <linux/eventfd.h> 17#include <linux/eventfd.h>
18#include <linux/syscalls.h>
18 19
19struct eventfd_ctx { 20struct eventfd_ctx {
20 wait_queue_head_t wqh; 21 wait_queue_head_t wqh;
diff --git a/fs/eventpoll.c b/fs/eventpoll.c
index 81c04abfb1aa..a415f42d32cf 100644
--- a/fs/eventpoll.c
+++ b/fs/eventpoll.c
@@ -353,7 +353,7 @@ static void ep_poll_safewake(struct poll_safewake *psw, wait_queue_head_t *wq)
353 spin_unlock_irqrestore(&psw->lock, flags); 353 spin_unlock_irqrestore(&psw->lock, flags);
354 354
355 /* Do really wake up now */ 355 /* Do really wake up now */
356 wake_up(wq); 356 wake_up_nested(wq, 1 + wake_nests);
357 357
358 /* Remove the current task from the list */ 358 /* Remove the current task from the list */
359 spin_lock_irqsave(&psw->lock, flags); 359 spin_lock_irqsave(&psw->lock, flags);
diff --git a/fs/exec.c b/fs/exec.c
index 282240afe99e..a44b142fb460 100644
--- a/fs/exec.c
+++ b/fs/exec.c
@@ -112,14 +112,14 @@ asmlinkage long sys_uselib(const char __user * library)
112 goto out; 112 goto out;
113 113
114 error = -EINVAL; 114 error = -EINVAL;
115 if (!S_ISREG(nd.dentry->d_inode->i_mode)) 115 if (!S_ISREG(nd.path.dentry->d_inode->i_mode))
116 goto exit; 116 goto exit;
117 117
118 error = vfs_permission(&nd, MAY_READ | MAY_EXEC); 118 error = vfs_permission(&nd, MAY_READ | MAY_EXEC);
119 if (error) 119 if (error)
120 goto exit; 120 goto exit;
121 121
122 file = nameidata_to_filp(&nd, O_RDONLY); 122 file = nameidata_to_filp(&nd, O_RDONLY|O_LARGEFILE);
123 error = PTR_ERR(file); 123 error = PTR_ERR(file);
124 if (IS_ERR(file)) 124 if (IS_ERR(file))
125 goto out; 125 goto out;
@@ -148,7 +148,7 @@ out:
148 return error; 148 return error;
149exit: 149exit:
150 release_open_intent(&nd); 150 release_open_intent(&nd);
151 path_release(&nd); 151 path_put(&nd.path);
152 goto out; 152 goto out;
153} 153}
154 154
@@ -652,13 +652,14 @@ struct file *open_exec(const char *name)
652 file = ERR_PTR(err); 652 file = ERR_PTR(err);
653 653
654 if (!err) { 654 if (!err) {
655 struct inode *inode = nd.dentry->d_inode; 655 struct inode *inode = nd.path.dentry->d_inode;
656 file = ERR_PTR(-EACCES); 656 file = ERR_PTR(-EACCES);
657 if (S_ISREG(inode->i_mode)) { 657 if (S_ISREG(inode->i_mode)) {
658 int err = vfs_permission(&nd, MAY_EXEC); 658 int err = vfs_permission(&nd, MAY_EXEC);
659 file = ERR_PTR(err); 659 file = ERR_PTR(err);
660 if (!err) { 660 if (!err) {
661 file = nameidata_to_filp(&nd, O_RDONLY); 661 file = nameidata_to_filp(&nd,
662 O_RDONLY|O_LARGEFILE);
662 if (!IS_ERR(file)) { 663 if (!IS_ERR(file)) {
663 err = deny_write_access(file); 664 err = deny_write_access(file);
664 if (err) { 665 if (err) {
@@ -671,7 +672,7 @@ out:
671 } 672 }
672 } 673 }
673 release_open_intent(&nd); 674 release_open_intent(&nd);
674 path_release(&nd); 675 path_put(&nd.path);
675 } 676 }
676 goto out; 677 goto out;
677} 678}
@@ -760,7 +761,7 @@ static int de_thread(struct task_struct *tsk)
760 */ 761 */
761 read_lock(&tasklist_lock); 762 read_lock(&tasklist_lock);
762 spin_lock_irq(lock); 763 spin_lock_irq(lock);
763 if (sig->flags & SIGNAL_GROUP_EXIT) { 764 if (signal_group_exit(sig)) {
764 /* 765 /*
765 * Another group action in progress, just 766 * Another group action in progress, just
766 * return so that the signal is processed. 767 * return so that the signal is processed.
@@ -778,31 +779,13 @@ static int de_thread(struct task_struct *tsk)
778 if (unlikely(tsk->group_leader == task_child_reaper(tsk))) 779 if (unlikely(tsk->group_leader == task_child_reaper(tsk)))
779 task_active_pid_ns(tsk)->child_reaper = tsk; 780 task_active_pid_ns(tsk)->child_reaper = tsk;
780 781
782 sig->group_exit_task = tsk;
781 zap_other_threads(tsk); 783 zap_other_threads(tsk);
782 read_unlock(&tasklist_lock); 784 read_unlock(&tasklist_lock);
783 785
784 /* 786 /* Account for the thread group leader hanging around: */
785 * Account for the thread group leader hanging around: 787 count = thread_group_leader(tsk) ? 1 : 2;
786 */
787 count = 1;
788 if (!thread_group_leader(tsk)) {
789 count = 2;
790 /*
791 * The SIGALRM timer survives the exec, but needs to point
792 * at us as the new group leader now. We have a race with
793 * a timer firing now getting the old leader, so we need to
794 * synchronize with any firing (by calling del_timer_sync)
795 * before we can safely let the old group leader die.
796 */
797 sig->tsk = tsk;
798 spin_unlock_irq(lock);
799 if (hrtimer_cancel(&sig->real_timer))
800 hrtimer_restart(&sig->real_timer);
801 spin_lock_irq(lock);
802 }
803
804 sig->notify_count = count; 788 sig->notify_count = count;
805 sig->group_exit_task = tsk;
806 while (atomic_read(&sig->count) > count) { 789 while (atomic_read(&sig->count) > count) {
807 __set_current_state(TASK_UNINTERRUPTIBLE); 790 __set_current_state(TASK_UNINTERRUPTIBLE);
808 spin_unlock_irq(lock); 791 spin_unlock_irq(lock);
@@ -871,15 +854,10 @@ static int de_thread(struct task_struct *tsk)
871 leader->exit_state = EXIT_DEAD; 854 leader->exit_state = EXIT_DEAD;
872 855
873 write_unlock_irq(&tasklist_lock); 856 write_unlock_irq(&tasklist_lock);
874 } 857 }
875 858
876 sig->group_exit_task = NULL; 859 sig->group_exit_task = NULL;
877 sig->notify_count = 0; 860 sig->notify_count = 0;
878 /*
879 * There may be one thread left which is just exiting,
880 * but it's safe to stop telling the group to kill themselves.
881 */
882 sig->flags = 0;
883 861
884no_thread_group: 862no_thread_group:
885 exit_itimers(sig); 863 exit_itimers(sig);
@@ -947,12 +925,13 @@ static void flush_old_files(struct files_struct * files)
947 spin_unlock(&files->file_lock); 925 spin_unlock(&files->file_lock);
948} 926}
949 927
950void get_task_comm(char *buf, struct task_struct *tsk) 928char *get_task_comm(char *buf, struct task_struct *tsk)
951{ 929{
952 /* buf must be at least sizeof(tsk->comm) in size */ 930 /* buf must be at least sizeof(tsk->comm) in size */
953 task_lock(tsk); 931 task_lock(tsk);
954 strncpy(buf, tsk->comm, sizeof(tsk->comm)); 932 strncpy(buf, tsk->comm, sizeof(tsk->comm));
955 task_unlock(tsk); 933 task_unlock(tsk);
934 return buf;
956} 935}
957 936
958void set_task_comm(struct task_struct *tsk, char *buf) 937void set_task_comm(struct task_struct *tsk, char *buf)
@@ -1188,7 +1167,7 @@ int search_binary_handler(struct linux_binprm *bprm,struct pt_regs *regs)
1188{ 1167{
1189 int try,retval; 1168 int try,retval;
1190 struct linux_binfmt *fmt; 1169 struct linux_binfmt *fmt;
1191#ifdef __alpha__ 1170#if defined(__alpha__) && defined(CONFIG_ARCH_SUPPORTS_AOUT)
1192 /* handle /sbin/loader.. */ 1171 /* handle /sbin/loader.. */
1193 { 1172 {
1194 struct exec * eh = (struct exec *) bprm->buf; 1173 struct exec * eh = (struct exec *) bprm->buf;
@@ -1548,7 +1527,7 @@ static inline int zap_threads(struct task_struct *tsk, struct mm_struct *mm,
1548 int err = -EAGAIN; 1527 int err = -EAGAIN;
1549 1528
1550 spin_lock_irq(&tsk->sighand->siglock); 1529 spin_lock_irq(&tsk->sighand->siglock);
1551 if (!(tsk->signal->flags & SIGNAL_GROUP_EXIT)) { 1530 if (!signal_group_exit(tsk->signal)) {
1552 tsk->signal->group_exit_code = exit_code; 1531 tsk->signal->group_exit_code = exit_code;
1553 zap_process(tsk); 1532 zap_process(tsk);
1554 err = 0; 1533 err = 0;
diff --git a/fs/ext2/balloc.c b/fs/ext2/balloc.c
index 377ad172d74b..e7b2bafa1dd9 100644
--- a/fs/ext2/balloc.c
+++ b/fs/ext2/balloc.c
@@ -69,9 +69,53 @@ struct ext2_group_desc * ext2_get_group_desc(struct super_block * sb,
69 return desc + offset; 69 return desc + offset;
70} 70}
71 71
72static int ext2_valid_block_bitmap(struct super_block *sb,
73 struct ext2_group_desc *desc,
74 unsigned int block_group,
75 struct buffer_head *bh)
76{
77 ext2_grpblk_t offset;
78 ext2_grpblk_t next_zero_bit;
79 ext2_fsblk_t bitmap_blk;
80 ext2_fsblk_t group_first_block;
81
82 group_first_block = ext2_group_first_block_no(sb, block_group);
83
84 /* check whether block bitmap block number is set */
85 bitmap_blk = le32_to_cpu(desc->bg_block_bitmap);
86 offset = bitmap_blk - group_first_block;
87 if (!ext2_test_bit(offset, bh->b_data))
88 /* bad block bitmap */
89 goto err_out;
90
91 /* check whether the inode bitmap block number is set */
92 bitmap_blk = le32_to_cpu(desc->bg_inode_bitmap);
93 offset = bitmap_blk - group_first_block;
94 if (!ext2_test_bit(offset, bh->b_data))
95 /* bad block bitmap */
96 goto err_out;
97
98 /* check whether the inode table block number is set */
99 bitmap_blk = le32_to_cpu(desc->bg_inode_table);
100 offset = bitmap_blk - group_first_block;
101 next_zero_bit = ext2_find_next_zero_bit(bh->b_data,
102 offset + EXT2_SB(sb)->s_itb_per_group,
103 offset);
104 if (next_zero_bit >= offset + EXT2_SB(sb)->s_itb_per_group)
105 /* good bitmap for inode tables */
106 return 1;
107
108err_out:
109 ext2_error(sb, __FUNCTION__,
110 "Invalid block bitmap - "
111 "block_group = %d, block = %lu",
112 block_group, bitmap_blk);
113 return 0;
114}
115
72/* 116/*
73 * Read the bitmap for a given block_group, reading into the specified 117 * Read the bitmap for a given block_group,and validate the
74 * slot in the superblock's bitmap cache. 118 * bits for block/inode/inode tables are set in the bitmaps
75 * 119 *
76 * Return buffer_head on success or NULL in case of failure. 120 * Return buffer_head on success or NULL in case of failure.
77 */ 121 */
@@ -80,17 +124,36 @@ read_block_bitmap(struct super_block *sb, unsigned int block_group)
80{ 124{
81 struct ext2_group_desc * desc; 125 struct ext2_group_desc * desc;
82 struct buffer_head * bh = NULL; 126 struct buffer_head * bh = NULL;
83 127 ext2_fsblk_t bitmap_blk;
84 desc = ext2_get_group_desc (sb, block_group, NULL); 128
129 desc = ext2_get_group_desc(sb, block_group, NULL);
85 if (!desc) 130 if (!desc)
86 goto error_out; 131 return NULL;
87 bh = sb_bread(sb, le32_to_cpu(desc->bg_block_bitmap)); 132 bitmap_blk = le32_to_cpu(desc->bg_block_bitmap);
88 if (!bh) 133 bh = sb_getblk(sb, bitmap_blk);
89 ext2_error (sb, "read_block_bitmap", 134 if (unlikely(!bh)) {
135 ext2_error(sb, __FUNCTION__,
136 "Cannot read block bitmap - "
137 "block_group = %d, block_bitmap = %u",
138 block_group, le32_to_cpu(desc->bg_block_bitmap));
139 return NULL;
140 }
141 if (likely(bh_uptodate_or_lock(bh)))
142 return bh;
143
144 if (bh_submit_read(bh) < 0) {
145 brelse(bh);
146 ext2_error(sb, __FUNCTION__,
90 "Cannot read block bitmap - " 147 "Cannot read block bitmap - "
91 "block_group = %d, block_bitmap = %u", 148 "block_group = %d, block_bitmap = %u",
92 block_group, le32_to_cpu(desc->bg_block_bitmap)); 149 block_group, le32_to_cpu(desc->bg_block_bitmap));
93error_out: 150 return NULL;
151 }
152 if (!ext2_valid_block_bitmap(sb, desc, block_group, bh)) {
153 brelse(bh);
154 return NULL;
155 }
156
94 return bh; 157 return bh;
95} 158}
96 159
@@ -474,11 +537,13 @@ do_more:
474 in_range (block, le32_to_cpu(desc->bg_inode_table), 537 in_range (block, le32_to_cpu(desc->bg_inode_table),
475 sbi->s_itb_per_group) || 538 sbi->s_itb_per_group) ||
476 in_range (block + count - 1, le32_to_cpu(desc->bg_inode_table), 539 in_range (block + count - 1, le32_to_cpu(desc->bg_inode_table),
477 sbi->s_itb_per_group)) 540 sbi->s_itb_per_group)) {
478 ext2_error (sb, "ext2_free_blocks", 541 ext2_error (sb, "ext2_free_blocks",
479 "Freeing blocks in system zones - " 542 "Freeing blocks in system zones - "
480 "Block = %lu, count = %lu", 543 "Block = %lu, count = %lu",
481 block, count); 544 block, count);
545 goto error_return;
546 }
482 547
483 for (i = 0, group_freed = 0; i < count; i++) { 548 for (i = 0, group_freed = 0; i < count; i++) {
484 if (!ext2_clear_bit_atomic(sb_bgl_lock(sbi, block_group), 549 if (!ext2_clear_bit_atomic(sb_bgl_lock(sbi, block_group),
@@ -1250,8 +1315,8 @@ retry_alloc:
1250 smp_rmb(); 1315 smp_rmb();
1251 1316
1252 /* 1317 /*
1253 * Now search the rest of the groups. We assume that 1318 * Now search the rest of the groups. We assume that
1254 * i and gdp correctly point to the last group visited. 1319 * group_no and gdp correctly point to the last group visited.
1255 */ 1320 */
1256 for (bgi = 0; bgi < ngroups; bgi++) { 1321 for (bgi = 0; bgi < ngroups; bgi++) {
1257 group_no++; 1322 group_no++;
@@ -1311,11 +1376,13 @@ allocated:
1311 in_range(ret_block, le32_to_cpu(gdp->bg_inode_table), 1376 in_range(ret_block, le32_to_cpu(gdp->bg_inode_table),
1312 EXT2_SB(sb)->s_itb_per_group) || 1377 EXT2_SB(sb)->s_itb_per_group) ||
1313 in_range(ret_block + num - 1, le32_to_cpu(gdp->bg_inode_table), 1378 in_range(ret_block + num - 1, le32_to_cpu(gdp->bg_inode_table),
1314 EXT2_SB(sb)->s_itb_per_group)) 1379 EXT2_SB(sb)->s_itb_per_group)) {
1315 ext2_error(sb, "ext2_new_blocks", 1380 ext2_error(sb, "ext2_new_blocks",
1316 "Allocating block in system zone - " 1381 "Allocating block in system zone - "
1317 "blocks from "E2FSBLK", length %lu", 1382 "blocks from "E2FSBLK", length %lu",
1318 ret_block, num); 1383 ret_block, num);
1384 goto out;
1385 }
1319 1386
1320 performed_allocation = 1; 1387 performed_allocation = 1;
1321 1388
@@ -1466,9 +1533,6 @@ int ext2_bg_has_super(struct super_block *sb, int group)
1466 */ 1533 */
1467unsigned long ext2_bg_num_gdb(struct super_block *sb, int group) 1534unsigned long ext2_bg_num_gdb(struct super_block *sb, int group)
1468{ 1535{
1469 if (EXT2_HAS_RO_COMPAT_FEATURE(sb,EXT2_FEATURE_RO_COMPAT_SPARSE_SUPER)&& 1536 return ext2_bg_has_super(sb, group) ? EXT2_SB(sb)->s_gdb_count : 0;
1470 !ext2_group_sparse(group))
1471 return 0;
1472 return EXT2_SB(sb)->s_gdb_count;
1473} 1537}
1474 1538
diff --git a/fs/ext2/dir.c b/fs/ext2/dir.c
index d868e26c15eb..8dededd80fe2 100644
--- a/fs/ext2/dir.c
+++ b/fs/ext2/dir.c
@@ -703,7 +703,7 @@ const struct file_operations ext2_dir_operations = {
703 .llseek = generic_file_llseek, 703 .llseek = generic_file_llseek,
704 .read = generic_read_dir, 704 .read = generic_read_dir,
705 .readdir = ext2_readdir, 705 .readdir = ext2_readdir,
706 .ioctl = ext2_ioctl, 706 .unlocked_ioctl = ext2_ioctl,
707#ifdef CONFIG_COMPAT 707#ifdef CONFIG_COMPAT
708 .compat_ioctl = ext2_compat_ioctl, 708 .compat_ioctl = ext2_compat_ioctl,
709#endif 709#endif
diff --git a/fs/ext2/ext2.h b/fs/ext2/ext2.h
index c87ae29c19cb..47d88da2d33b 100644
--- a/fs/ext2/ext2.h
+++ b/fs/ext2/ext2.h
@@ -124,9 +124,8 @@ extern void ext2_check_inodes_bitmap (struct super_block *);
124extern unsigned long ext2_count_free (struct buffer_head *, unsigned); 124extern unsigned long ext2_count_free (struct buffer_head *, unsigned);
125 125
126/* inode.c */ 126/* inode.c */
127extern void ext2_read_inode (struct inode *); 127extern struct inode *ext2_iget (struct super_block *, unsigned long);
128extern int ext2_write_inode (struct inode *, int); 128extern int ext2_write_inode (struct inode *, int);
129extern void ext2_put_inode (struct inode *);
130extern void ext2_delete_inode (struct inode *); 129extern void ext2_delete_inode (struct inode *);
131extern int ext2_sync_inode (struct inode *); 130extern int ext2_sync_inode (struct inode *);
132extern int ext2_get_block(struct inode *, sector_t, struct buffer_head *, int); 131extern int ext2_get_block(struct inode *, sector_t, struct buffer_head *, int);
@@ -139,8 +138,7 @@ int __ext2_write_begin(struct file *file, struct address_space *mapping,
139 struct page **pagep, void **fsdata); 138 struct page **pagep, void **fsdata);
140 139
141/* ioctl.c */ 140/* ioctl.c */
142extern int ext2_ioctl (struct inode *, struct file *, unsigned int, 141extern long ext2_ioctl(struct file *, unsigned int, unsigned long);
143 unsigned long);
144extern long ext2_compat_ioctl(struct file *, unsigned int, unsigned long); 142extern long ext2_compat_ioctl(struct file *, unsigned int, unsigned long);
145 143
146/* namei.c */ 144/* namei.c */
diff --git a/fs/ext2/file.c b/fs/ext2/file.c
index c051798459a1..5f2fa9c36293 100644
--- a/fs/ext2/file.c
+++ b/fs/ext2/file.c
@@ -48,7 +48,7 @@ const struct file_operations ext2_file_operations = {
48 .write = do_sync_write, 48 .write = do_sync_write,
49 .aio_read = generic_file_aio_read, 49 .aio_read = generic_file_aio_read,
50 .aio_write = generic_file_aio_write, 50 .aio_write = generic_file_aio_write,
51 .ioctl = ext2_ioctl, 51 .unlocked_ioctl = ext2_ioctl,
52#ifdef CONFIG_COMPAT 52#ifdef CONFIG_COMPAT
53 .compat_ioctl = ext2_compat_ioctl, 53 .compat_ioctl = ext2_compat_ioctl,
54#endif 54#endif
@@ -65,7 +65,7 @@ const struct file_operations ext2_xip_file_operations = {
65 .llseek = generic_file_llseek, 65 .llseek = generic_file_llseek,
66 .read = xip_file_read, 66 .read = xip_file_read,
67 .write = xip_file_write, 67 .write = xip_file_write,
68 .ioctl = ext2_ioctl, 68 .unlocked_ioctl = ext2_ioctl,
69#ifdef CONFIG_COMPAT 69#ifdef CONFIG_COMPAT
70 .compat_ioctl = ext2_compat_ioctl, 70 .compat_ioctl = ext2_compat_ioctl,
71#endif 71#endif
diff --git a/fs/ext2/inode.c b/fs/ext2/inode.c
index b1ab32ab5a77..c62006805427 100644
--- a/fs/ext2/inode.c
+++ b/fs/ext2/inode.c
@@ -286,15 +286,12 @@ static unsigned long ext2_find_near(struct inode *inode, Indirect *ind)
286 * ext2_find_goal - find a prefered place for allocation. 286 * ext2_find_goal - find a prefered place for allocation.
287 * @inode: owner 287 * @inode: owner
288 * @block: block we want 288 * @block: block we want
289 * @chain: chain of indirect blocks
290 * @partial: pointer to the last triple within a chain 289 * @partial: pointer to the last triple within a chain
291 * 290 *
292 * Returns preferred place for a block (the goal). 291 * Returns preferred place for a block (the goal).
293 */ 292 */
294 293
295static inline int ext2_find_goal(struct inode *inode, 294static inline int ext2_find_goal(struct inode *inode, long block,
296 long block,
297 Indirect chain[4],
298 Indirect *partial) 295 Indirect *partial)
299{ 296{
300 struct ext2_block_alloc_info *block_i; 297 struct ext2_block_alloc_info *block_i;
@@ -569,7 +566,6 @@ static void ext2_splice_branch(struct inode *inode,
569 * 566 *
570 * `handle' can be NULL if create == 0. 567 * `handle' can be NULL if create == 0.
571 * 568 *
572 * The BKL may not be held on entry here. Be sure to take it early.
573 * return > 0, # of blocks mapped or allocated. 569 * return > 0, # of blocks mapped or allocated.
574 * return = 0, if plain lookup failed. 570 * return = 0, if plain lookup failed.
575 * return < 0, error case. 571 * return < 0, error case.
@@ -639,7 +635,7 @@ reread:
639 if (S_ISREG(inode->i_mode) && (!ei->i_block_alloc_info)) 635 if (S_ISREG(inode->i_mode) && (!ei->i_block_alloc_info))
640 ext2_init_block_alloc_info(inode); 636 ext2_init_block_alloc_info(inode);
641 637
642 goal = ext2_find_goal(inode, iblock, chain, partial); 638 goal = ext2_find_goal(inode, iblock, partial);
643 639
644 /* the number of blocks need to allocate for [d,t]indirect blocks */ 640 /* the number of blocks need to allocate for [d,t]indirect blocks */
645 indirect_blks = (chain + depth) - partial - 1; 641 indirect_blks = (chain + depth) - partial - 1;
@@ -1185,22 +1181,33 @@ void ext2_get_inode_flags(struct ext2_inode_info *ei)
1185 ei->i_flags |= EXT2_DIRSYNC_FL; 1181 ei->i_flags |= EXT2_DIRSYNC_FL;
1186} 1182}
1187 1183
1188void ext2_read_inode (struct inode * inode) 1184struct inode *ext2_iget (struct super_block *sb, unsigned long ino)
1189{ 1185{
1190 struct ext2_inode_info *ei = EXT2_I(inode); 1186 struct ext2_inode_info *ei;
1191 ino_t ino = inode->i_ino;
1192 struct buffer_head * bh; 1187 struct buffer_head * bh;
1193 struct ext2_inode * raw_inode = ext2_get_inode(inode->i_sb, ino, &bh); 1188 struct ext2_inode *raw_inode;
1189 struct inode *inode;
1190 long ret = -EIO;
1194 int n; 1191 int n;
1195 1192
1193 inode = iget_locked(sb, ino);
1194 if (!inode)
1195 return ERR_PTR(-ENOMEM);
1196 if (!(inode->i_state & I_NEW))
1197 return inode;
1198
1199 ei = EXT2_I(inode);
1196#ifdef CONFIG_EXT2_FS_POSIX_ACL 1200#ifdef CONFIG_EXT2_FS_POSIX_ACL
1197 ei->i_acl = EXT2_ACL_NOT_CACHED; 1201 ei->i_acl = EXT2_ACL_NOT_CACHED;
1198 ei->i_default_acl = EXT2_ACL_NOT_CACHED; 1202 ei->i_default_acl = EXT2_ACL_NOT_CACHED;
1199#endif 1203#endif
1200 ei->i_block_alloc_info = NULL; 1204 ei->i_block_alloc_info = NULL;
1201 1205
1202 if (IS_ERR(raw_inode)) 1206 raw_inode = ext2_get_inode(inode->i_sb, ino, &bh);
1207 if (IS_ERR(raw_inode)) {
1208 ret = PTR_ERR(raw_inode);
1203 goto bad_inode; 1209 goto bad_inode;
1210 }
1204 1211
1205 inode->i_mode = le16_to_cpu(raw_inode->i_mode); 1212 inode->i_mode = le16_to_cpu(raw_inode->i_mode);
1206 inode->i_uid = (uid_t)le16_to_cpu(raw_inode->i_uid_low); 1213 inode->i_uid = (uid_t)le16_to_cpu(raw_inode->i_uid_low);
@@ -1224,6 +1231,7 @@ void ext2_read_inode (struct inode * inode)
1224 if (inode->i_nlink == 0 && (inode->i_mode == 0 || ei->i_dtime)) { 1231 if (inode->i_nlink == 0 && (inode->i_mode == 0 || ei->i_dtime)) {
1225 /* this inode is deleted */ 1232 /* this inode is deleted */
1226 brelse (bh); 1233 brelse (bh);
1234 ret = -ESTALE;
1227 goto bad_inode; 1235 goto bad_inode;
1228 } 1236 }
1229 inode->i_blocks = le32_to_cpu(raw_inode->i_blocks); 1237 inode->i_blocks = le32_to_cpu(raw_inode->i_blocks);
@@ -1290,11 +1298,12 @@ void ext2_read_inode (struct inode * inode)
1290 } 1298 }
1291 brelse (bh); 1299 brelse (bh);
1292 ext2_set_inode_flags(inode); 1300 ext2_set_inode_flags(inode);
1293 return; 1301 unlock_new_inode(inode);
1302 return inode;
1294 1303
1295bad_inode: 1304bad_inode:
1296 make_bad_inode(inode); 1305 iget_failed(inode);
1297 return; 1306 return ERR_PTR(ret);
1298} 1307}
1299 1308
1300static int ext2_update_inode(struct inode * inode, int do_sync) 1309static int ext2_update_inode(struct inode * inode, int do_sync)
diff --git a/fs/ext2/ioctl.c b/fs/ext2/ioctl.c
index 320b2cb3d4d2..b8ea11fee5c6 100644
--- a/fs/ext2/ioctl.c
+++ b/fs/ext2/ioctl.c
@@ -17,9 +17,9 @@
17#include <asm/uaccess.h> 17#include <asm/uaccess.h>
18 18
19 19
20int ext2_ioctl (struct inode * inode, struct file * filp, unsigned int cmd, 20long ext2_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
21 unsigned long arg)
22{ 21{
22 struct inode *inode = filp->f_dentry->d_inode;
23 struct ext2_inode_info *ei = EXT2_I(inode); 23 struct ext2_inode_info *ei = EXT2_I(inode);
24 unsigned int flags; 24 unsigned int flags;
25 unsigned short rsv_window_size; 25 unsigned short rsv_window_size;
@@ -141,9 +141,6 @@ int ext2_ioctl (struct inode * inode, struct file * filp, unsigned int cmd,
141#ifdef CONFIG_COMPAT 141#ifdef CONFIG_COMPAT
142long ext2_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg) 142long ext2_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
143{ 143{
144 struct inode *inode = file->f_path.dentry->d_inode;
145 int ret;
146
147 /* These are just misnamed, they actually get/put from/to user an int */ 144 /* These are just misnamed, they actually get/put from/to user an int */
148 switch (cmd) { 145 switch (cmd) {
149 case EXT2_IOC32_GETFLAGS: 146 case EXT2_IOC32_GETFLAGS:
@@ -161,9 +158,6 @@ long ext2_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
161 default: 158 default:
162 return -ENOIOCTLCMD; 159 return -ENOIOCTLCMD;
163 } 160 }
164 lock_kernel(); 161 return ext2_ioctl(file, cmd, (unsigned long) compat_ptr(arg));
165 ret = ext2_ioctl(inode, file, cmd, (unsigned long) compat_ptr(arg));
166 unlock_kernel();
167 return ret;
168} 162}
169#endif 163#endif
diff --git a/fs/ext2/namei.c b/fs/ext2/namei.c
index e69beed839ac..80c97fd8c571 100644
--- a/fs/ext2/namei.c
+++ b/fs/ext2/namei.c
@@ -63,9 +63,9 @@ static struct dentry *ext2_lookup(struct inode * dir, struct dentry *dentry, str
63 ino = ext2_inode_by_name(dir, dentry); 63 ino = ext2_inode_by_name(dir, dentry);
64 inode = NULL; 64 inode = NULL;
65 if (ino) { 65 if (ino) {
66 inode = iget(dir->i_sb, ino); 66 inode = ext2_iget(dir->i_sb, ino);
67 if (!inode) 67 if (IS_ERR(inode))
68 return ERR_PTR(-EACCES); 68 return ERR_CAST(inode);
69 } 69 }
70 return d_splice_alias(inode, dentry); 70 return d_splice_alias(inode, dentry);
71} 71}
@@ -83,10 +83,10 @@ struct dentry *ext2_get_parent(struct dentry *child)
83 ino = ext2_inode_by_name(child->d_inode, &dotdot); 83 ino = ext2_inode_by_name(child->d_inode, &dotdot);
84 if (!ino) 84 if (!ino)
85 return ERR_PTR(-ENOENT); 85 return ERR_PTR(-ENOENT);
86 inode = iget(child->d_inode->i_sb, ino); 86 inode = ext2_iget(child->d_inode->i_sb, ino);
87 87
88 if (!inode) 88 if (IS_ERR(inode))
89 return ERR_PTR(-EACCES); 89 return ERR_CAST(inode);
90 parent = d_alloc_anon(inode); 90 parent = d_alloc_anon(inode);
91 if (!parent) { 91 if (!parent) {
92 iput(inode); 92 iput(inode);
diff --git a/fs/ext2/super.c b/fs/ext2/super.c
index 6abaf75163f0..088b011bb97e 100644
--- a/fs/ext2/super.c
+++ b/fs/ext2/super.c
@@ -234,16 +234,16 @@ static int ext2_show_options(struct seq_file *seq, struct vfsmount *vfs)
234 le16_to_cpu(es->s_def_resgid) != EXT2_DEF_RESGID) { 234 le16_to_cpu(es->s_def_resgid) != EXT2_DEF_RESGID) {
235 seq_printf(seq, ",resgid=%u", sbi->s_resgid); 235 seq_printf(seq, ",resgid=%u", sbi->s_resgid);
236 } 236 }
237 if (test_opt(sb, ERRORS_CONT)) { 237 if (test_opt(sb, ERRORS_RO)) {
238 int def_errors = le16_to_cpu(es->s_errors); 238 int def_errors = le16_to_cpu(es->s_errors);
239 239
240 if (def_errors == EXT2_ERRORS_PANIC || 240 if (def_errors == EXT2_ERRORS_PANIC ||
241 def_errors == EXT2_ERRORS_RO) { 241 def_errors == EXT2_ERRORS_CONTINUE) {
242 seq_puts(seq, ",errors=continue"); 242 seq_puts(seq, ",errors=remount-ro");
243 } 243 }
244 } 244 }
245 if (test_opt(sb, ERRORS_RO)) 245 if (test_opt(sb, ERRORS_CONT))
246 seq_puts(seq, ",errors=remount-ro"); 246 seq_puts(seq, ",errors=continue");
247 if (test_opt(sb, ERRORS_PANIC)) 247 if (test_opt(sb, ERRORS_PANIC))
248 seq_puts(seq, ",errors=panic"); 248 seq_puts(seq, ",errors=panic");
249 if (test_opt(sb, NO_UID32)) 249 if (test_opt(sb, NO_UID32))
@@ -285,6 +285,9 @@ static int ext2_show_options(struct seq_file *seq, struct vfsmount *vfs)
285 seq_puts(seq, ",xip"); 285 seq_puts(seq, ",xip");
286#endif 286#endif
287 287
288 if (!test_opt(sb, RESERVATION))
289 seq_puts(seq, ",noreservation");
290
288 return 0; 291 return 0;
289} 292}
290 293
@@ -296,7 +299,6 @@ static ssize_t ext2_quota_write(struct super_block *sb, int type, const char *da
296static const struct super_operations ext2_sops = { 299static const struct super_operations ext2_sops = {
297 .alloc_inode = ext2_alloc_inode, 300 .alloc_inode = ext2_alloc_inode,
298 .destroy_inode = ext2_destroy_inode, 301 .destroy_inode = ext2_destroy_inode,
299 .read_inode = ext2_read_inode,
300 .write_inode = ext2_write_inode, 302 .write_inode = ext2_write_inode,
301 .delete_inode = ext2_delete_inode, 303 .delete_inode = ext2_delete_inode,
302 .put_super = ext2_put_super, 304 .put_super = ext2_put_super,
@@ -326,11 +328,10 @@ static struct inode *ext2_nfs_get_inode(struct super_block *sb,
326 * it might be "neater" to call ext2_get_inode first and check 328 * it might be "neater" to call ext2_get_inode first and check
327 * if the inode is valid..... 329 * if the inode is valid.....
328 */ 330 */
329 inode = iget(sb, ino); 331 inode = ext2_iget(sb, ino);
330 if (inode == NULL) 332 if (IS_ERR(inode))
331 return ERR_PTR(-ENOMEM); 333 return ERR_CAST(inode);
332 if (is_bad_inode(inode) || 334 if (generation && inode->i_generation != generation) {
333 (generation && inode->i_generation != generation)) {
334 /* we didn't find the right inode.. */ 335 /* we didn't find the right inode.. */
335 iput(inode); 336 iput(inode);
336 return ERR_PTR(-ESTALE); 337 return ERR_PTR(-ESTALE);
@@ -617,27 +618,24 @@ static int ext2_setup_super (struct super_block * sb,
617 return res; 618 return res;
618} 619}
619 620
620static int ext2_check_descriptors (struct super_block * sb) 621static int ext2_check_descriptors(struct super_block *sb)
621{ 622{
622 int i; 623 int i;
623 int desc_block = 0;
624 struct ext2_sb_info *sbi = EXT2_SB(sb); 624 struct ext2_sb_info *sbi = EXT2_SB(sb);
625 unsigned long first_block = le32_to_cpu(sbi->s_es->s_first_data_block); 625 unsigned long first_block = le32_to_cpu(sbi->s_es->s_first_data_block);
626 unsigned long last_block; 626 unsigned long last_block;
627 struct ext2_group_desc * gdp = NULL;
628 627
629 ext2_debug ("Checking group descriptors"); 628 ext2_debug ("Checking group descriptors");
630 629
631 for (i = 0; i < sbi->s_groups_count; i++) 630 for (i = 0; i < sbi->s_groups_count; i++) {
632 { 631 struct ext2_group_desc *gdp = ext2_get_group_desc(sb, i, NULL);
632
633 if (i == sbi->s_groups_count - 1) 633 if (i == sbi->s_groups_count - 1)
634 last_block = le32_to_cpu(sbi->s_es->s_blocks_count) - 1; 634 last_block = le32_to_cpu(sbi->s_es->s_blocks_count) - 1;
635 else 635 else
636 last_block = first_block + 636 last_block = first_block +
637 (EXT2_BLOCKS_PER_GROUP(sb) - 1); 637 (EXT2_BLOCKS_PER_GROUP(sb) - 1);
638 638
639 if ((i % EXT2_DESC_PER_BLOCK(sb)) == 0)
640 gdp = (struct ext2_group_desc *) sbi->s_group_desc[desc_block++]->b_data;
641 if (le32_to_cpu(gdp->bg_block_bitmap) < first_block || 639 if (le32_to_cpu(gdp->bg_block_bitmap) < first_block ||
642 le32_to_cpu(gdp->bg_block_bitmap) > last_block) 640 le32_to_cpu(gdp->bg_block_bitmap) > last_block)
643 { 641 {
@@ -667,7 +665,6 @@ static int ext2_check_descriptors (struct super_block * sb)
667 return 0; 665 return 0;
668 } 666 }
669 first_block += EXT2_BLOCKS_PER_GROUP(sb); 667 first_block += EXT2_BLOCKS_PER_GROUP(sb);
670 gdp++;
671 } 668 }
672 return 1; 669 return 1;
673} 670}
@@ -750,6 +747,7 @@ static int ext2_fill_super(struct super_block *sb, void *data, int silent)
750 unsigned long logic_sb_block; 747 unsigned long logic_sb_block;
751 unsigned long offset = 0; 748 unsigned long offset = 0;
752 unsigned long def_mount_opts; 749 unsigned long def_mount_opts;
750 long ret = -EINVAL;
753 int blocksize = BLOCK_SIZE; 751 int blocksize = BLOCK_SIZE;
754 int db_count; 752 int db_count;
755 int i, j; 753 int i, j;
@@ -820,10 +818,10 @@ static int ext2_fill_super(struct super_block *sb, void *data, int silent)
820 818
821 if (le16_to_cpu(sbi->s_es->s_errors) == EXT2_ERRORS_PANIC) 819 if (le16_to_cpu(sbi->s_es->s_errors) == EXT2_ERRORS_PANIC)
822 set_opt(sbi->s_mount_opt, ERRORS_PANIC); 820 set_opt(sbi->s_mount_opt, ERRORS_PANIC);
823 else if (le16_to_cpu(sbi->s_es->s_errors) == EXT2_ERRORS_RO) 821 else if (le16_to_cpu(sbi->s_es->s_errors) == EXT2_ERRORS_CONTINUE)
824 set_opt(sbi->s_mount_opt, ERRORS_RO);
825 else
826 set_opt(sbi->s_mount_opt, ERRORS_CONT); 822 set_opt(sbi->s_mount_opt, ERRORS_CONT);
823 else
824 set_opt(sbi->s_mount_opt, ERRORS_RO);
827 825
828 sbi->s_resuid = le16_to_cpu(es->s_def_resuid); 826 sbi->s_resuid = le16_to_cpu(es->s_def_resuid);
829 sbi->s_resgid = le16_to_cpu(es->s_def_resgid); 827 sbi->s_resgid = le16_to_cpu(es->s_def_resgid);
@@ -868,8 +866,7 @@ static int ext2_fill_super(struct super_block *sb, void *data, int silent)
868 866
869 blocksize = BLOCK_SIZE << le32_to_cpu(sbi->s_es->s_log_block_size); 867 blocksize = BLOCK_SIZE << le32_to_cpu(sbi->s_es->s_log_block_size);
870 868
871 if ((ext2_use_xip(sb)) && ((blocksize != PAGE_SIZE) || 869 if (ext2_use_xip(sb) && blocksize != PAGE_SIZE) {
872 (sb->s_blocksize != blocksize))) {
873 if (!silent) 870 if (!silent)
874 printk("XIP: Unsupported blocksize\n"); 871 printk("XIP: Unsupported blocksize\n");
875 goto failed_mount; 872 goto failed_mount;
@@ -1046,19 +1043,24 @@ static int ext2_fill_super(struct super_block *sb, void *data, int silent)
1046 sb->s_op = &ext2_sops; 1043 sb->s_op = &ext2_sops;
1047 sb->s_export_op = &ext2_export_ops; 1044 sb->s_export_op = &ext2_export_ops;
1048 sb->s_xattr = ext2_xattr_handlers; 1045 sb->s_xattr = ext2_xattr_handlers;
1049 root = iget(sb, EXT2_ROOT_INO); 1046 root = ext2_iget(sb, EXT2_ROOT_INO);
1050 sb->s_root = d_alloc_root(root); 1047 if (IS_ERR(root)) {
1051 if (!sb->s_root) { 1048 ret = PTR_ERR(root);
1052 iput(root);
1053 printk(KERN_ERR "EXT2-fs: get root inode failed\n");
1054 goto failed_mount3; 1049 goto failed_mount3;
1055 } 1050 }
1056 if (!S_ISDIR(root->i_mode) || !root->i_blocks || !root->i_size) { 1051 if (!S_ISDIR(root->i_mode) || !root->i_blocks || !root->i_size) {
1057 dput(sb->s_root); 1052 iput(root);
1058 sb->s_root = NULL;
1059 printk(KERN_ERR "EXT2-fs: corrupt root inode, run e2fsck\n"); 1053 printk(KERN_ERR "EXT2-fs: corrupt root inode, run e2fsck\n");
1060 goto failed_mount3; 1054 goto failed_mount3;
1061 } 1055 }
1056
1057 sb->s_root = d_alloc_root(root);
1058 if (!sb->s_root) {
1059 iput(root);
1060 printk(KERN_ERR "EXT2-fs: get root inode failed\n");
1061 ret = -ENOMEM;
1062 goto failed_mount3;
1063 }
1062 if (EXT2_HAS_COMPAT_FEATURE(sb, EXT3_FEATURE_COMPAT_HAS_JOURNAL)) 1064 if (EXT2_HAS_COMPAT_FEATURE(sb, EXT3_FEATURE_COMPAT_HAS_JOURNAL))
1063 ext2_warning(sb, __FUNCTION__, 1065 ext2_warning(sb, __FUNCTION__,
1064 "mounting ext3 filesystem as ext2"); 1066 "mounting ext3 filesystem as ext2");
@@ -1085,7 +1087,7 @@ failed_mount:
1085failed_sbi: 1087failed_sbi:
1086 sb->s_fs_info = NULL; 1088 sb->s_fs_info = NULL;
1087 kfree(sbi); 1089 kfree(sbi);
1088 return -EINVAL; 1090 return ret;
1089} 1091}
1090 1092
1091static void ext2_commit_super (struct super_block * sb, 1093static void ext2_commit_super (struct super_block * sb,
diff --git a/fs/ext3/balloc.c b/fs/ext3/balloc.c
index a8ba7e831278..da0cb2c0e437 100644
--- a/fs/ext3/balloc.c
+++ b/fs/ext3/balloc.c
@@ -80,13 +80,57 @@ struct ext3_group_desc * ext3_get_group_desc(struct super_block * sb,
80 return desc + offset; 80 return desc + offset;
81} 81}
82 82
83static int ext3_valid_block_bitmap(struct super_block *sb,
84 struct ext3_group_desc *desc,
85 unsigned int block_group,
86 struct buffer_head *bh)
87{
88 ext3_grpblk_t offset;
89 ext3_grpblk_t next_zero_bit;
90 ext3_fsblk_t bitmap_blk;
91 ext3_fsblk_t group_first_block;
92
93 group_first_block = ext3_group_first_block_no(sb, block_group);
94
95 /* check whether block bitmap block number is set */
96 bitmap_blk = le32_to_cpu(desc->bg_block_bitmap);
97 offset = bitmap_blk - group_first_block;
98 if (!ext3_test_bit(offset, bh->b_data))
99 /* bad block bitmap */
100 goto err_out;
101
102 /* check whether the inode bitmap block number is set */
103 bitmap_blk = le32_to_cpu(desc->bg_inode_bitmap);
104 offset = bitmap_blk - group_first_block;
105 if (!ext3_test_bit(offset, bh->b_data))
106 /* bad block bitmap */
107 goto err_out;
108
109 /* check whether the inode table block number is set */
110 bitmap_blk = le32_to_cpu(desc->bg_inode_table);
111 offset = bitmap_blk - group_first_block;
112 next_zero_bit = ext3_find_next_zero_bit(bh->b_data,
113 offset + EXT3_SB(sb)->s_itb_per_group,
114 offset);
115 if (next_zero_bit >= offset + EXT3_SB(sb)->s_itb_per_group)
116 /* good bitmap for inode tables */
117 return 1;
118
119err_out:
120 ext3_error(sb, __FUNCTION__,
121 "Invalid block bitmap - "
122 "block_group = %d, block = %lu",
123 block_group, bitmap_blk);
124 return 0;
125}
126
83/** 127/**
84 * read_block_bitmap() 128 * read_block_bitmap()
85 * @sb: super block 129 * @sb: super block
86 * @block_group: given block group 130 * @block_group: given block group
87 * 131 *
88 * Read the bitmap for a given block_group, reading into the specified 132 * Read the bitmap for a given block_group,and validate the
89 * slot in the superblock's bitmap cache. 133 * bits for block/inode/inode tables are set in the bitmaps
90 * 134 *
91 * Return buffer_head on success or NULL in case of failure. 135 * Return buffer_head on success or NULL in case of failure.
92 */ 136 */
@@ -95,17 +139,35 @@ read_block_bitmap(struct super_block *sb, unsigned int block_group)
95{ 139{
96 struct ext3_group_desc * desc; 140 struct ext3_group_desc * desc;
97 struct buffer_head * bh = NULL; 141 struct buffer_head * bh = NULL;
142 ext3_fsblk_t bitmap_blk;
98 143
99 desc = ext3_get_group_desc (sb, block_group, NULL); 144 desc = ext3_get_group_desc(sb, block_group, NULL);
100 if (!desc) 145 if (!desc)
101 goto error_out; 146 return NULL;
102 bh = sb_bread(sb, le32_to_cpu(desc->bg_block_bitmap)); 147 bitmap_blk = le32_to_cpu(desc->bg_block_bitmap);
103 if (!bh) 148 bh = sb_getblk(sb, bitmap_blk);
104 ext3_error (sb, "read_block_bitmap", 149 if (unlikely(!bh)) {
150 ext3_error(sb, __FUNCTION__,
105 "Cannot read block bitmap - " 151 "Cannot read block bitmap - "
106 "block_group = %d, block_bitmap = %u", 152 "block_group = %d, block_bitmap = %u",
107 block_group, le32_to_cpu(desc->bg_block_bitmap)); 153 block_group, le32_to_cpu(desc->bg_block_bitmap));
108error_out: 154 return NULL;
155 }
156 if (likely(bh_uptodate_or_lock(bh)))
157 return bh;
158
159 if (bh_submit_read(bh) < 0) {
160 brelse(bh);
161 ext3_error(sb, __FUNCTION__,
162 "Cannot read block bitmap - "
163 "block_group = %d, block_bitmap = %u",
164 block_group, le32_to_cpu(desc->bg_block_bitmap));
165 return NULL;
166 }
167 if (!ext3_valid_block_bitmap(sb, desc, block_group, bh)) {
168 brelse(bh);
169 return NULL;
170 }
109 return bh; 171 return bh;
110} 172}
111/* 173/*
@@ -468,11 +530,13 @@ do_more:
468 in_range (block, le32_to_cpu(desc->bg_inode_table), 530 in_range (block, le32_to_cpu(desc->bg_inode_table),
469 sbi->s_itb_per_group) || 531 sbi->s_itb_per_group) ||
470 in_range (block + count - 1, le32_to_cpu(desc->bg_inode_table), 532 in_range (block + count - 1, le32_to_cpu(desc->bg_inode_table),
471 sbi->s_itb_per_group)) 533 sbi->s_itb_per_group)) {
472 ext3_error (sb, "ext3_free_blocks", 534 ext3_error (sb, "ext3_free_blocks",
473 "Freeing blocks in system zones - " 535 "Freeing blocks in system zones - "
474 "Block = "E3FSBLK", count = %lu", 536 "Block = "E3FSBLK", count = %lu",
475 block, count); 537 block, count);
538 goto error_return;
539 }
476 540
477 /* 541 /*
478 * We are about to start releasing blocks in the bitmap, 542 * We are about to start releasing blocks in the bitmap,
@@ -566,9 +630,7 @@ do_more:
566 jbd_unlock_bh_state(bitmap_bh); 630 jbd_unlock_bh_state(bitmap_bh);
567 631
568 spin_lock(sb_bgl_lock(sbi, block_group)); 632 spin_lock(sb_bgl_lock(sbi, block_group));
569 desc->bg_free_blocks_count = 633 le16_add_cpu(&desc->bg_free_blocks_count, group_freed);
570 cpu_to_le16(le16_to_cpu(desc->bg_free_blocks_count) +
571 group_freed);
572 spin_unlock(sb_bgl_lock(sbi, block_group)); 634 spin_unlock(sb_bgl_lock(sbi, block_group));
573 percpu_counter_add(&sbi->s_freeblocks_counter, count); 635 percpu_counter_add(&sbi->s_freeblocks_counter, count);
574 636
@@ -1508,7 +1570,7 @@ retry_alloc:
1508 1570
1509 /* 1571 /*
1510 * Now search the rest of the groups. We assume that 1572 * Now search the rest of the groups. We assume that
1511 * i and gdp correctly point to the last group visited. 1573 * group_no and gdp correctly point to the last group visited.
1512 */ 1574 */
1513 for (bgi = 0; bgi < ngroups; bgi++) { 1575 for (bgi = 0; bgi < ngroups; bgi++) {
1514 group_no++; 1576 group_no++;
@@ -1575,11 +1637,13 @@ allocated:
1575 in_range(ret_block, le32_to_cpu(gdp->bg_inode_table), 1637 in_range(ret_block, le32_to_cpu(gdp->bg_inode_table),
1576 EXT3_SB(sb)->s_itb_per_group) || 1638 EXT3_SB(sb)->s_itb_per_group) ||
1577 in_range(ret_block + num - 1, le32_to_cpu(gdp->bg_inode_table), 1639 in_range(ret_block + num - 1, le32_to_cpu(gdp->bg_inode_table),
1578 EXT3_SB(sb)->s_itb_per_group)) 1640 EXT3_SB(sb)->s_itb_per_group)) {
1579 ext3_error(sb, "ext3_new_block", 1641 ext3_error(sb, "ext3_new_block",
1580 "Allocating block in system zone - " 1642 "Allocating block in system zone - "
1581 "blocks from "E3FSBLK", length %lu", 1643 "blocks from "E3FSBLK", length %lu",
1582 ret_block, num); 1644 ret_block, num);
1645 goto out;
1646 }
1583 1647
1584 performed_allocation = 1; 1648 performed_allocation = 1;
1585 1649
@@ -1630,8 +1694,7 @@ allocated:
1630 ret_block, goal_hits, goal_attempts); 1694 ret_block, goal_hits, goal_attempts);
1631 1695
1632 spin_lock(sb_bgl_lock(sbi, group_no)); 1696 spin_lock(sb_bgl_lock(sbi, group_no));
1633 gdp->bg_free_blocks_count = 1697 le16_add_cpu(&gdp->bg_free_blocks_count, -num);
1634 cpu_to_le16(le16_to_cpu(gdp->bg_free_blocks_count)-num);
1635 spin_unlock(sb_bgl_lock(sbi, group_no)); 1698 spin_unlock(sb_bgl_lock(sbi, group_no));
1636 percpu_counter_sub(&sbi->s_freeblocks_counter, num); 1699 percpu_counter_sub(&sbi->s_freeblocks_counter, num);
1637 1700
@@ -1782,11 +1845,7 @@ static unsigned long ext3_bg_num_gdb_meta(struct super_block *sb, int group)
1782 1845
1783static unsigned long ext3_bg_num_gdb_nometa(struct super_block *sb, int group) 1846static unsigned long ext3_bg_num_gdb_nometa(struct super_block *sb, int group)
1784{ 1847{
1785 if (EXT3_HAS_RO_COMPAT_FEATURE(sb, 1848 return ext3_bg_has_super(sb, group) ? EXT3_SB(sb)->s_gdb_count : 0;
1786 EXT3_FEATURE_RO_COMPAT_SPARSE_SUPER) &&
1787 !ext3_group_sparse(group))
1788 return 0;
1789 return EXT3_SB(sb)->s_gdb_count;
1790} 1849}
1791 1850
1792/** 1851/**
diff --git a/fs/ext3/ialloc.c b/fs/ext3/ialloc.c
index 1bc8cd89c51d..4f4020c54683 100644
--- a/fs/ext3/ialloc.c
+++ b/fs/ext3/ialloc.c
@@ -164,11 +164,9 @@ void ext3_free_inode (handle_t *handle, struct inode * inode)
164 164
165 if (gdp) { 165 if (gdp) {
166 spin_lock(sb_bgl_lock(sbi, block_group)); 166 spin_lock(sb_bgl_lock(sbi, block_group));
167 gdp->bg_free_inodes_count = cpu_to_le16( 167 le16_add_cpu(&gdp->bg_free_inodes_count, 1);
168 le16_to_cpu(gdp->bg_free_inodes_count) + 1);
169 if (is_directory) 168 if (is_directory)
170 gdp->bg_used_dirs_count = cpu_to_le16( 169 le16_add_cpu(&gdp->bg_used_dirs_count, -1);
171 le16_to_cpu(gdp->bg_used_dirs_count) - 1);
172 spin_unlock(sb_bgl_lock(sbi, block_group)); 170 spin_unlock(sb_bgl_lock(sbi, block_group));
173 percpu_counter_inc(&sbi->s_freeinodes_counter); 171 percpu_counter_inc(&sbi->s_freeinodes_counter);
174 if (is_directory) 172 if (is_directory)
@@ -527,11 +525,9 @@ got:
527 err = ext3_journal_get_write_access(handle, bh2); 525 err = ext3_journal_get_write_access(handle, bh2);
528 if (err) goto fail; 526 if (err) goto fail;
529 spin_lock(sb_bgl_lock(sbi, group)); 527 spin_lock(sb_bgl_lock(sbi, group));
530 gdp->bg_free_inodes_count = 528 le16_add_cpu(&gdp->bg_free_inodes_count, -1);
531 cpu_to_le16(le16_to_cpu(gdp->bg_free_inodes_count) - 1);
532 if (S_ISDIR(mode)) { 529 if (S_ISDIR(mode)) {
533 gdp->bg_used_dirs_count = 530 le16_add_cpu(&gdp->bg_used_dirs_count, 1);
534 cpu_to_le16(le16_to_cpu(gdp->bg_used_dirs_count) + 1);
535 } 531 }
536 spin_unlock(sb_bgl_lock(sbi, group)); 532 spin_unlock(sb_bgl_lock(sbi, group));
537 BUFFER_TRACE(bh2, "call ext3_journal_dirty_metadata"); 533 BUFFER_TRACE(bh2, "call ext3_journal_dirty_metadata");
@@ -642,14 +638,15 @@ struct inode *ext3_orphan_get(struct super_block *sb, unsigned long ino)
642 unsigned long max_ino = le32_to_cpu(EXT3_SB(sb)->s_es->s_inodes_count); 638 unsigned long max_ino = le32_to_cpu(EXT3_SB(sb)->s_es->s_inodes_count);
643 unsigned long block_group; 639 unsigned long block_group;
644 int bit; 640 int bit;
645 struct buffer_head *bitmap_bh = NULL; 641 struct buffer_head *bitmap_bh;
646 struct inode *inode = NULL; 642 struct inode *inode = NULL;
643 long err = -EIO;
647 644
648 /* Error cases - e2fsck has already cleaned up for us */ 645 /* Error cases - e2fsck has already cleaned up for us */
649 if (ino > max_ino) { 646 if (ino > max_ino) {
650 ext3_warning(sb, __FUNCTION__, 647 ext3_warning(sb, __FUNCTION__,
651 "bad orphan ino %lu! e2fsck was run?", ino); 648 "bad orphan ino %lu! e2fsck was run?", ino);
652 goto out; 649 goto error;
653 } 650 }
654 651
655 block_group = (ino - 1) / EXT3_INODES_PER_GROUP(sb); 652 block_group = (ino - 1) / EXT3_INODES_PER_GROUP(sb);
@@ -658,38 +655,49 @@ struct inode *ext3_orphan_get(struct super_block *sb, unsigned long ino)
658 if (!bitmap_bh) { 655 if (!bitmap_bh) {
659 ext3_warning(sb, __FUNCTION__, 656 ext3_warning(sb, __FUNCTION__,
660 "inode bitmap error for orphan %lu", ino); 657 "inode bitmap error for orphan %lu", ino);
661 goto out; 658 goto error;
662 } 659 }
663 660
664 /* Having the inode bit set should be a 100% indicator that this 661 /* Having the inode bit set should be a 100% indicator that this
665 * is a valid orphan (no e2fsck run on fs). Orphans also include 662 * is a valid orphan (no e2fsck run on fs). Orphans also include
666 * inodes that were being truncated, so we can't check i_nlink==0. 663 * inodes that were being truncated, so we can't check i_nlink==0.
667 */ 664 */
668 if (!ext3_test_bit(bit, bitmap_bh->b_data) || 665 if (!ext3_test_bit(bit, bitmap_bh->b_data))
669 !(inode = iget(sb, ino)) || is_bad_inode(inode) || 666 goto bad_orphan;
670 NEXT_ORPHAN(inode) > max_ino) { 667
671 ext3_warning(sb, __FUNCTION__, 668 inode = ext3_iget(sb, ino);
672 "bad orphan inode %lu! e2fsck was run?", ino); 669 if (IS_ERR(inode))
673 printk(KERN_NOTICE "ext3_test_bit(bit=%d, block=%llu) = %d\n", 670 goto iget_failed;
674 bit, (unsigned long long)bitmap_bh->b_blocknr, 671
675 ext3_test_bit(bit, bitmap_bh->b_data)); 672 if (NEXT_ORPHAN(inode) > max_ino)
676 printk(KERN_NOTICE "inode=%p\n", inode); 673 goto bad_orphan;
677 if (inode) { 674 brelse(bitmap_bh);
678 printk(KERN_NOTICE "is_bad_inode(inode)=%d\n", 675 return inode;
679 is_bad_inode(inode)); 676
680 printk(KERN_NOTICE "NEXT_ORPHAN(inode)=%u\n", 677iget_failed:
681 NEXT_ORPHAN(inode)); 678 err = PTR_ERR(inode);
682 printk(KERN_NOTICE "max_ino=%lu\n", max_ino); 679 inode = NULL;
683 } 680bad_orphan:
681 ext3_warning(sb, __FUNCTION__,
682 "bad orphan inode %lu! e2fsck was run?", ino);
683 printk(KERN_NOTICE "ext3_test_bit(bit=%d, block=%llu) = %d\n",
684 bit, (unsigned long long)bitmap_bh->b_blocknr,
685 ext3_test_bit(bit, bitmap_bh->b_data));
686 printk(KERN_NOTICE "inode=%p\n", inode);
687 if (inode) {
688 printk(KERN_NOTICE "is_bad_inode(inode)=%d\n",
689 is_bad_inode(inode));
690 printk(KERN_NOTICE "NEXT_ORPHAN(inode)=%u\n",
691 NEXT_ORPHAN(inode));
692 printk(KERN_NOTICE "max_ino=%lu\n", max_ino);
684 /* Avoid freeing blocks if we got a bad deleted inode */ 693 /* Avoid freeing blocks if we got a bad deleted inode */
685 if (inode && inode->i_nlink == 0) 694 if (inode->i_nlink == 0)
686 inode->i_blocks = 0; 695 inode->i_blocks = 0;
687 iput(inode); 696 iput(inode);
688 inode = NULL;
689 } 697 }
690out:
691 brelse(bitmap_bh); 698 brelse(bitmap_bh);
692 return inode; 699error:
700 return ERR_PTR(err);
693} 701}
694 702
695unsigned long ext3_count_free_inodes (struct super_block * sb) 703unsigned long ext3_count_free_inodes (struct super_block * sb)
diff --git a/fs/ext3/inode.c b/fs/ext3/inode.c
index 9b162cd6c16c..eb95670a27eb 100644
--- a/fs/ext3/inode.c
+++ b/fs/ext3/inode.c
@@ -439,16 +439,14 @@ static ext3_fsblk_t ext3_find_near(struct inode *inode, Indirect *ind)
439 * ext3_find_goal - find a prefered place for allocation. 439 * ext3_find_goal - find a prefered place for allocation.
440 * @inode: owner 440 * @inode: owner
441 * @block: block we want 441 * @block: block we want
442 * @chain: chain of indirect blocks
443 * @partial: pointer to the last triple within a chain 442 * @partial: pointer to the last triple within a chain
444 * @goal: place to store the result.
445 * 443 *
446 * Normally this function find the prefered place for block allocation, 444 * Normally this function find the prefered place for block allocation,
447 * stores it in *@goal and returns zero. 445 * returns it.
448 */ 446 */
449 447
450static ext3_fsblk_t ext3_find_goal(struct inode *inode, long block, 448static ext3_fsblk_t ext3_find_goal(struct inode *inode, long block,
451 Indirect chain[4], Indirect *partial) 449 Indirect *partial)
452{ 450{
453 struct ext3_block_alloc_info *block_i; 451 struct ext3_block_alloc_info *block_i;
454 452
@@ -884,7 +882,7 @@ int ext3_get_blocks_handle(handle_t *handle, struct inode *inode,
884 if (S_ISREG(inode->i_mode) && (!ei->i_block_alloc_info)) 882 if (S_ISREG(inode->i_mode) && (!ei->i_block_alloc_info))
885 ext3_init_block_alloc_info(inode); 883 ext3_init_block_alloc_info(inode);
886 884
887 goal = ext3_find_goal(inode, iblock, chain, partial); 885 goal = ext3_find_goal(inode, iblock, partial);
888 886
889 /* the number of blocks need to allocate for [d,t]indirect blocks */ 887 /* the number of blocks need to allocate for [d,t]indirect blocks */
890 indirect_blks = (chain + depth) - partial - 1; 888 indirect_blks = (chain + depth) - partial - 1;
@@ -941,55 +939,45 @@ out:
941 return err; 939 return err;
942} 940}
943 941
944#define DIO_CREDITS (EXT3_RESERVE_TRANS_BLOCKS + 32) 942/* Maximum number of blocks we map for direct IO at once. */
943#define DIO_MAX_BLOCKS 4096
944/*
945 * Number of credits we need for writing DIO_MAX_BLOCKS:
946 * We need sb + group descriptor + bitmap + inode -> 4
947 * For B blocks with A block pointers per block we need:
948 * 1 (triple ind.) + (B/A/A + 2) (doubly ind.) + (B/A + 2) (indirect).
949 * If we plug in 4096 for B and 256 for A (for 1KB block size), we get 25.
950 */
951#define DIO_CREDITS 25
945 952
946static int ext3_get_block(struct inode *inode, sector_t iblock, 953static int ext3_get_block(struct inode *inode, sector_t iblock,
947 struct buffer_head *bh_result, int create) 954 struct buffer_head *bh_result, int create)
948{ 955{
949 handle_t *handle = ext3_journal_current_handle(); 956 handle_t *handle = ext3_journal_current_handle();
950 int ret = 0; 957 int ret = 0, started = 0;
951 unsigned max_blocks = bh_result->b_size >> inode->i_blkbits; 958 unsigned max_blocks = bh_result->b_size >> inode->i_blkbits;
952 959
953 if (!create) 960 if (create && !handle) { /* Direct IO write... */
954 goto get_block; /* A read */ 961 if (max_blocks > DIO_MAX_BLOCKS)
955 962 max_blocks = DIO_MAX_BLOCKS;
956 if (max_blocks == 1) 963 handle = ext3_journal_start(inode, DIO_CREDITS +
957 goto get_block; /* A single block get */ 964 2 * EXT3_QUOTA_TRANS_BLOCKS(inode->i_sb));
958 965 if (IS_ERR(handle)) {
959 if (handle->h_transaction->t_state == T_LOCKED) {
960 /*
961 * Huge direct-io writes can hold off commits for long
962 * periods of time. Let this commit run.
963 */
964 ext3_journal_stop(handle);
965 handle = ext3_journal_start(inode, DIO_CREDITS);
966 if (IS_ERR(handle))
967 ret = PTR_ERR(handle); 966 ret = PTR_ERR(handle);
968 goto get_block; 967 goto out;
969 }
970
971 if (handle->h_buffer_credits <= EXT3_RESERVE_TRANS_BLOCKS) {
972 /*
973 * Getting low on buffer credits...
974 */
975 ret = ext3_journal_extend(handle, DIO_CREDITS);
976 if (ret > 0) {
977 /*
978 * Couldn't extend the transaction. Start a new one.
979 */
980 ret = ext3_journal_restart(handle, DIO_CREDITS);
981 } 968 }
969 started = 1;
982 } 970 }
983 971
984get_block: 972 ret = ext3_get_blocks_handle(handle, inode, iblock,
985 if (ret == 0) {
986 ret = ext3_get_blocks_handle(handle, inode, iblock,
987 max_blocks, bh_result, create, 0); 973 max_blocks, bh_result, create, 0);
988 if (ret > 0) { 974 if (ret > 0) {
989 bh_result->b_size = (ret << inode->i_blkbits); 975 bh_result->b_size = (ret << inode->i_blkbits);
990 ret = 0; 976 ret = 0;
991 }
992 } 977 }
978 if (started)
979 ext3_journal_stop(handle);
980out:
993 return ret; 981 return ret;
994} 982}
995 983
@@ -1680,7 +1668,8 @@ static int ext3_releasepage(struct page *page, gfp_t wait)
1680 * if the machine crashes during the write. 1668 * if the machine crashes during the write.
1681 * 1669 *
1682 * If the O_DIRECT write is intantiating holes inside i_size and the machine 1670 * If the O_DIRECT write is intantiating holes inside i_size and the machine
1683 * crashes then stale disk data _may_ be exposed inside the file. 1671 * crashes then stale disk data _may_ be exposed inside the file. But current
1672 * VFS code falls back into buffered path in that case so we are safe.
1684 */ 1673 */
1685static ssize_t ext3_direct_IO(int rw, struct kiocb *iocb, 1674static ssize_t ext3_direct_IO(int rw, struct kiocb *iocb,
1686 const struct iovec *iov, loff_t offset, 1675 const struct iovec *iov, loff_t offset,
@@ -1689,7 +1678,7 @@ static ssize_t ext3_direct_IO(int rw, struct kiocb *iocb,
1689 struct file *file = iocb->ki_filp; 1678 struct file *file = iocb->ki_filp;
1690 struct inode *inode = file->f_mapping->host; 1679 struct inode *inode = file->f_mapping->host;
1691 struct ext3_inode_info *ei = EXT3_I(inode); 1680 struct ext3_inode_info *ei = EXT3_I(inode);
1692 handle_t *handle = NULL; 1681 handle_t *handle;
1693 ssize_t ret; 1682 ssize_t ret;
1694 int orphan = 0; 1683 int orphan = 0;
1695 size_t count = iov_length(iov, nr_segs); 1684 size_t count = iov_length(iov, nr_segs);
@@ -1697,17 +1686,21 @@ static ssize_t ext3_direct_IO(int rw, struct kiocb *iocb,
1697 if (rw == WRITE) { 1686 if (rw == WRITE) {
1698 loff_t final_size = offset + count; 1687 loff_t final_size = offset + count;
1699 1688
1700 handle = ext3_journal_start(inode, DIO_CREDITS);
1701 if (IS_ERR(handle)) {
1702 ret = PTR_ERR(handle);
1703 goto out;
1704 }
1705 if (final_size > inode->i_size) { 1689 if (final_size > inode->i_size) {
1690 /* Credits for sb + inode write */
1691 handle = ext3_journal_start(inode, 2);
1692 if (IS_ERR(handle)) {
1693 ret = PTR_ERR(handle);
1694 goto out;
1695 }
1706 ret = ext3_orphan_add(handle, inode); 1696 ret = ext3_orphan_add(handle, inode);
1707 if (ret) 1697 if (ret) {
1708 goto out_stop; 1698 ext3_journal_stop(handle);
1699 goto out;
1700 }
1709 orphan = 1; 1701 orphan = 1;
1710 ei->i_disksize = inode->i_size; 1702 ei->i_disksize = inode->i_size;
1703 ext3_journal_stop(handle);
1711 } 1704 }
1712 } 1705 }
1713 1706
@@ -1715,18 +1708,21 @@ static ssize_t ext3_direct_IO(int rw, struct kiocb *iocb,
1715 offset, nr_segs, 1708 offset, nr_segs,
1716 ext3_get_block, NULL); 1709 ext3_get_block, NULL);
1717 1710
1718 /* 1711 if (orphan) {
1719 * Reacquire the handle: ext3_get_block() can restart the transaction
1720 */
1721 handle = ext3_journal_current_handle();
1722
1723out_stop:
1724 if (handle) {
1725 int err; 1712 int err;
1726 1713
1727 if (orphan && inode->i_nlink) 1714 /* Credits for sb + inode write */
1715 handle = ext3_journal_start(inode, 2);
1716 if (IS_ERR(handle)) {
1717 /* This is really bad luck. We've written the data
1718 * but cannot extend i_size. Bail out and pretend
1719 * the write failed... */
1720 ret = PTR_ERR(handle);
1721 goto out;
1722 }
1723 if (inode->i_nlink)
1728 ext3_orphan_del(handle, inode); 1724 ext3_orphan_del(handle, inode);
1729 if (orphan && ret > 0) { 1725 if (ret > 0) {
1730 loff_t end = offset + ret; 1726 loff_t end = offset + ret;
1731 if (end > inode->i_size) { 1727 if (end > inode->i_size) {
1732 ei->i_disksize = end; 1728 ei->i_disksize = end;
@@ -1845,7 +1841,7 @@ static int ext3_block_truncate_page(handle_t *handle, struct page *page,
1845 */ 1841 */
1846 if (!page_has_buffers(page) && test_opt(inode->i_sb, NOBH) && 1842 if (!page_has_buffers(page) && test_opt(inode->i_sb, NOBH) &&
1847 ext3_should_writeback_data(inode) && PageUptodate(page)) { 1843 ext3_should_writeback_data(inode) && PageUptodate(page)) {
1848 zero_user_page(page, offset, length, KM_USER0); 1844 zero_user(page, offset, length);
1849 set_page_dirty(page); 1845 set_page_dirty(page);
1850 goto unlock; 1846 goto unlock;
1851 } 1847 }
@@ -1898,7 +1894,7 @@ static int ext3_block_truncate_page(handle_t *handle, struct page *page,
1898 goto unlock; 1894 goto unlock;
1899 } 1895 }
1900 1896
1901 zero_user_page(page, offset, length, KM_USER0); 1897 zero_user(page, offset, length);
1902 BUFFER_TRACE(bh, "zeroed end of block"); 1898 BUFFER_TRACE(bh, "zeroed end of block");
1903 1899
1904 err = 0; 1900 err = 0;
@@ -2658,21 +2654,31 @@ void ext3_get_inode_flags(struct ext3_inode_info *ei)
2658 ei->i_flags |= EXT3_DIRSYNC_FL; 2654 ei->i_flags |= EXT3_DIRSYNC_FL;
2659} 2655}
2660 2656
2661void ext3_read_inode(struct inode * inode) 2657struct inode *ext3_iget(struct super_block *sb, unsigned long ino)
2662{ 2658{
2663 struct ext3_iloc iloc; 2659 struct ext3_iloc iloc;
2664 struct ext3_inode *raw_inode; 2660 struct ext3_inode *raw_inode;
2665 struct ext3_inode_info *ei = EXT3_I(inode); 2661 struct ext3_inode_info *ei;
2666 struct buffer_head *bh; 2662 struct buffer_head *bh;
2663 struct inode *inode;
2664 long ret;
2667 int block; 2665 int block;
2668 2666
2667 inode = iget_locked(sb, ino);
2668 if (!inode)
2669 return ERR_PTR(-ENOMEM);
2670 if (!(inode->i_state & I_NEW))
2671 return inode;
2672
2673 ei = EXT3_I(inode);
2669#ifdef CONFIG_EXT3_FS_POSIX_ACL 2674#ifdef CONFIG_EXT3_FS_POSIX_ACL
2670 ei->i_acl = EXT3_ACL_NOT_CACHED; 2675 ei->i_acl = EXT3_ACL_NOT_CACHED;
2671 ei->i_default_acl = EXT3_ACL_NOT_CACHED; 2676 ei->i_default_acl = EXT3_ACL_NOT_CACHED;
2672#endif 2677#endif
2673 ei->i_block_alloc_info = NULL; 2678 ei->i_block_alloc_info = NULL;
2674 2679
2675 if (__ext3_get_inode_loc(inode, &iloc, 0)) 2680 ret = __ext3_get_inode_loc(inode, &iloc, 0);
2681 if (ret < 0)
2676 goto bad_inode; 2682 goto bad_inode;
2677 bh = iloc.bh; 2683 bh = iloc.bh;
2678 raw_inode = ext3_raw_inode(&iloc); 2684 raw_inode = ext3_raw_inode(&iloc);
@@ -2703,6 +2709,7 @@ void ext3_read_inode(struct inode * inode)
2703 !(EXT3_SB(inode->i_sb)->s_mount_state & EXT3_ORPHAN_FS)) { 2709 !(EXT3_SB(inode->i_sb)->s_mount_state & EXT3_ORPHAN_FS)) {
2704 /* this inode is deleted */ 2710 /* this inode is deleted */
2705 brelse (bh); 2711 brelse (bh);
2712 ret = -ESTALE;
2706 goto bad_inode; 2713 goto bad_inode;
2707 } 2714 }
2708 /* The only unlinked inodes we let through here have 2715 /* The only unlinked inodes we let through here have
@@ -2746,6 +2753,7 @@ void ext3_read_inode(struct inode * inode)
2746 if (EXT3_GOOD_OLD_INODE_SIZE + ei->i_extra_isize > 2753 if (EXT3_GOOD_OLD_INODE_SIZE + ei->i_extra_isize >
2747 EXT3_INODE_SIZE(inode->i_sb)) { 2754 EXT3_INODE_SIZE(inode->i_sb)) {
2748 brelse (bh); 2755 brelse (bh);
2756 ret = -EIO;
2749 goto bad_inode; 2757 goto bad_inode;
2750 } 2758 }
2751 if (ei->i_extra_isize == 0) { 2759 if (ei->i_extra_isize == 0) {
@@ -2787,11 +2795,12 @@ void ext3_read_inode(struct inode * inode)
2787 } 2795 }
2788 brelse (iloc.bh); 2796 brelse (iloc.bh);
2789 ext3_set_inode_flags(inode); 2797 ext3_set_inode_flags(inode);
2790 return; 2798 unlock_new_inode(inode);
2799 return inode;
2791 2800
2792bad_inode: 2801bad_inode:
2793 make_bad_inode(inode); 2802 iget_failed(inode);
2794 return; 2803 return ERR_PTR(ret);
2795} 2804}
2796 2805
2797/* 2806/*
diff --git a/fs/ext3/namei.c b/fs/ext3/namei.c
index 4ab6f76e63d0..dec3e0d88ab1 100644
--- a/fs/ext3/namei.c
+++ b/fs/ext3/namei.c
@@ -860,14 +860,10 @@ static struct buffer_head * ext3_find_entry (struct dentry *dentry,
860 int nblocks, i, err; 860 int nblocks, i, err;
861 struct inode *dir = dentry->d_parent->d_inode; 861 struct inode *dir = dentry->d_parent->d_inode;
862 int namelen; 862 int namelen;
863 const u8 *name;
864 unsigned blocksize;
865 863
866 *res_dir = NULL; 864 *res_dir = NULL;
867 sb = dir->i_sb; 865 sb = dir->i_sb;
868 blocksize = sb->s_blocksize;
869 namelen = dentry->d_name.len; 866 namelen = dentry->d_name.len;
870 name = dentry->d_name.name;
871 if (namelen > EXT3_NAME_LEN) 867 if (namelen > EXT3_NAME_LEN)
872 return NULL; 868 return NULL;
873 if (is_dx(dir)) { 869 if (is_dx(dir)) {
@@ -1041,17 +1037,11 @@ static struct dentry *ext3_lookup(struct inode * dir, struct dentry *dentry, str
1041 if (!ext3_valid_inum(dir->i_sb, ino)) { 1037 if (!ext3_valid_inum(dir->i_sb, ino)) {
1042 ext3_error(dir->i_sb, "ext3_lookup", 1038 ext3_error(dir->i_sb, "ext3_lookup",
1043 "bad inode number: %lu", ino); 1039 "bad inode number: %lu", ino);
1044 inode = NULL; 1040 return ERR_PTR(-EIO);
1045 } else
1046 inode = iget(dir->i_sb, ino);
1047
1048 if (!inode)
1049 return ERR_PTR(-EACCES);
1050
1051 if (is_bad_inode(inode)) {
1052 iput(inode);
1053 return ERR_PTR(-ENOENT);
1054 } 1041 }
1042 inode = ext3_iget(dir->i_sb, ino);
1043 if (IS_ERR(inode))
1044 return ERR_CAST(inode);
1055 } 1045 }
1056 return d_splice_alias(inode, dentry); 1046 return d_splice_alias(inode, dentry);
1057} 1047}
@@ -1080,18 +1070,13 @@ struct dentry *ext3_get_parent(struct dentry *child)
1080 if (!ext3_valid_inum(child->d_inode->i_sb, ino)) { 1070 if (!ext3_valid_inum(child->d_inode->i_sb, ino)) {
1081 ext3_error(child->d_inode->i_sb, "ext3_get_parent", 1071 ext3_error(child->d_inode->i_sb, "ext3_get_parent",
1082 "bad inode number: %lu", ino); 1072 "bad inode number: %lu", ino);
1083 inode = NULL; 1073 return ERR_PTR(-EIO);
1084 } else
1085 inode = iget(child->d_inode->i_sb, ino);
1086
1087 if (!inode)
1088 return ERR_PTR(-EACCES);
1089
1090 if (is_bad_inode(inode)) {
1091 iput(inode);
1092 return ERR_PTR(-ENOENT);
1093 } 1074 }
1094 1075
1076 inode = ext3_iget(child->d_inode->i_sb, ino);
1077 if (IS_ERR(inode))
1078 return ERR_CAST(inode);
1079
1095 parent = d_alloc_anon(inode); 1080 parent = d_alloc_anon(inode);
1096 if (!parent) { 1081 if (!parent) {
1097 iput(inode); 1082 iput(inode);
diff --git a/fs/ext3/resize.c b/fs/ext3/resize.c
index 44de1453c301..9397d779c43d 100644
--- a/fs/ext3/resize.c
+++ b/fs/ext3/resize.c
@@ -518,8 +518,7 @@ static int add_new_gdb(handle_t *handle, struct inode *inode,
518 EXT3_SB(sb)->s_gdb_count++; 518 EXT3_SB(sb)->s_gdb_count++;
519 kfree(o_group_desc); 519 kfree(o_group_desc);
520 520
521 es->s_reserved_gdt_blocks = 521 le16_add_cpu(&es->s_reserved_gdt_blocks, -1);
522 cpu_to_le16(le16_to_cpu(es->s_reserved_gdt_blocks) - 1);
523 ext3_journal_dirty_metadata(handle, EXT3_SB(sb)->s_sbh); 522 ext3_journal_dirty_metadata(handle, EXT3_SB(sb)->s_sbh);
524 523
525 return 0; 524 return 0;
@@ -795,12 +794,11 @@ int ext3_group_add(struct super_block *sb, struct ext3_new_group_data *input)
795 "No reserved GDT blocks, can't resize"); 794 "No reserved GDT blocks, can't resize");
796 return -EPERM; 795 return -EPERM;
797 } 796 }
798 inode = iget(sb, EXT3_RESIZE_INO); 797 inode = ext3_iget(sb, EXT3_RESIZE_INO);
799 if (!inode || is_bad_inode(inode)) { 798 if (IS_ERR(inode)) {
800 ext3_warning(sb, __FUNCTION__, 799 ext3_warning(sb, __FUNCTION__,
801 "Error opening resize inode"); 800 "Error opening resize inode");
802 iput(inode); 801 return PTR_ERR(inode);
803 return -ENOENT;
804 } 802 }
805 } 803 }
806 804
@@ -891,10 +889,8 @@ int ext3_group_add(struct super_block *sb, struct ext3_new_group_data *input)
891 * blocks/inodes before the group is live won't actually let us 889 * blocks/inodes before the group is live won't actually let us
892 * allocate the new space yet. 890 * allocate the new space yet.
893 */ 891 */
894 es->s_blocks_count = cpu_to_le32(le32_to_cpu(es->s_blocks_count) + 892 le32_add_cpu(&es->s_blocks_count, input->blocks_count);
895 input->blocks_count); 893 le32_add_cpu(&es->s_inodes_count, EXT3_INODES_PER_GROUP(sb));
896 es->s_inodes_count = cpu_to_le32(le32_to_cpu(es->s_inodes_count) +
897 EXT3_INODES_PER_GROUP(sb));
898 894
899 /* 895 /*
900 * We need to protect s_groups_count against other CPUs seeing 896 * We need to protect s_groups_count against other CPUs seeing
@@ -927,8 +923,7 @@ int ext3_group_add(struct super_block *sb, struct ext3_new_group_data *input)
927 923
928 /* Update the reserved block counts only once the new group is 924 /* Update the reserved block counts only once the new group is
929 * active. */ 925 * active. */
930 es->s_r_blocks_count = cpu_to_le32(le32_to_cpu(es->s_r_blocks_count) + 926 le32_add_cpu(&es->s_r_blocks_count, input->reserved_blocks);
931 input->reserved_blocks);
932 927
933 /* Update the free space counts */ 928 /* Update the free space counts */
934 percpu_counter_add(&sbi->s_freeblocks_counter, 929 percpu_counter_add(&sbi->s_freeblocks_counter,
diff --git a/fs/ext3/super.c b/fs/ext3/super.c
index f3675cc630e9..18769cc32377 100644
--- a/fs/ext3/super.c
+++ b/fs/ext3/super.c
@@ -575,16 +575,16 @@ static int ext3_show_options(struct seq_file *seq, struct vfsmount *vfs)
575 le16_to_cpu(es->s_def_resgid) != EXT3_DEF_RESGID) { 575 le16_to_cpu(es->s_def_resgid) != EXT3_DEF_RESGID) {
576 seq_printf(seq, ",resgid=%u", sbi->s_resgid); 576 seq_printf(seq, ",resgid=%u", sbi->s_resgid);
577 } 577 }
578 if (test_opt(sb, ERRORS_CONT)) { 578 if (test_opt(sb, ERRORS_RO)) {
579 int def_errors = le16_to_cpu(es->s_errors); 579 int def_errors = le16_to_cpu(es->s_errors);
580 580
581 if (def_errors == EXT3_ERRORS_PANIC || 581 if (def_errors == EXT3_ERRORS_PANIC ||
582 def_errors == EXT3_ERRORS_RO) { 582 def_errors == EXT3_ERRORS_CONTINUE) {
583 seq_puts(seq, ",errors=continue"); 583 seq_puts(seq, ",errors=remount-ro");
584 } 584 }
585 } 585 }
586 if (test_opt(sb, ERRORS_RO)) 586 if (test_opt(sb, ERRORS_CONT))
587 seq_puts(seq, ",errors=remount-ro"); 587 seq_puts(seq, ",errors=continue");
588 if (test_opt(sb, ERRORS_PANIC)) 588 if (test_opt(sb, ERRORS_PANIC))
589 seq_puts(seq, ",errors=panic"); 589 seq_puts(seq, ",errors=panic");
590 if (test_opt(sb, NO_UID32)) 590 if (test_opt(sb, NO_UID32))
@@ -649,11 +649,10 @@ static struct inode *ext3_nfs_get_inode(struct super_block *sb,
649 * Currently we don't know the generation for parent directory, so 649 * Currently we don't know the generation for parent directory, so
650 * a generation of 0 means "accept any" 650 * a generation of 0 means "accept any"
651 */ 651 */
652 inode = iget(sb, ino); 652 inode = ext3_iget(sb, ino);
653 if (inode == NULL) 653 if (IS_ERR(inode))
654 return ERR_PTR(-ENOMEM); 654 return ERR_CAST(inode);
655 if (is_bad_inode(inode) || 655 if (generation && inode->i_generation != generation) {
656 (generation && inode->i_generation != generation)) {
657 iput(inode); 656 iput(inode);
658 return ERR_PTR(-ESTALE); 657 return ERR_PTR(-ESTALE);
659 } 658 }
@@ -722,7 +721,6 @@ static struct quotactl_ops ext3_qctl_operations = {
722static const struct super_operations ext3_sops = { 721static const struct super_operations ext3_sops = {
723 .alloc_inode = ext3_alloc_inode, 722 .alloc_inode = ext3_alloc_inode,
724 .destroy_inode = ext3_destroy_inode, 723 .destroy_inode = ext3_destroy_inode,
725 .read_inode = ext3_read_inode,
726 .write_inode = ext3_write_inode, 724 .write_inode = ext3_write_inode,
727 .dirty_inode = ext3_dirty_inode, 725 .dirty_inode = ext3_dirty_inode,
728 .delete_inode = ext3_delete_inode, 726 .delete_inode = ext3_delete_inode,
@@ -1224,7 +1222,7 @@ static int ext3_setup_super(struct super_block *sb, struct ext3_super_block *es,
1224#endif 1222#endif
1225 if (!(__s16) le16_to_cpu(es->s_max_mnt_count)) 1223 if (!(__s16) le16_to_cpu(es->s_max_mnt_count))
1226 es->s_max_mnt_count = cpu_to_le16(EXT3_DFL_MAX_MNT_COUNT); 1224 es->s_max_mnt_count = cpu_to_le16(EXT3_DFL_MAX_MNT_COUNT);
1227 es->s_mnt_count=cpu_to_le16(le16_to_cpu(es->s_mnt_count) + 1); 1225 le16_add_cpu(&es->s_mnt_count, 1);
1228 es->s_mtime = cpu_to_le32(get_seconds()); 1226 es->s_mtime = cpu_to_le32(get_seconds());
1229 ext3_update_dynamic_rev(sb); 1227 ext3_update_dynamic_rev(sb);
1230 EXT3_SET_INCOMPAT_FEATURE(sb, EXT3_FEATURE_INCOMPAT_RECOVER); 1228 EXT3_SET_INCOMPAT_FEATURE(sb, EXT3_FEATURE_INCOMPAT_RECOVER);
@@ -1252,28 +1250,24 @@ static int ext3_setup_super(struct super_block *sb, struct ext3_super_block *es,
1252} 1250}
1253 1251
1254/* Called at mount-time, super-block is locked */ 1252/* Called at mount-time, super-block is locked */
1255static int ext3_check_descriptors (struct super_block * sb) 1253static int ext3_check_descriptors(struct super_block *sb)
1256{ 1254{
1257 struct ext3_sb_info *sbi = EXT3_SB(sb); 1255 struct ext3_sb_info *sbi = EXT3_SB(sb);
1258 ext3_fsblk_t first_block = le32_to_cpu(sbi->s_es->s_first_data_block); 1256 ext3_fsblk_t first_block = le32_to_cpu(sbi->s_es->s_first_data_block);
1259 ext3_fsblk_t last_block; 1257 ext3_fsblk_t last_block;
1260 struct ext3_group_desc * gdp = NULL;
1261 int desc_block = 0;
1262 int i; 1258 int i;
1263 1259
1264 ext3_debug ("Checking group descriptors"); 1260 ext3_debug ("Checking group descriptors");
1265 1261
1266 for (i = 0; i < sbi->s_groups_count; i++) 1262 for (i = 0; i < sbi->s_groups_count; i++) {
1267 { 1263 struct ext3_group_desc *gdp = ext3_get_group_desc(sb, i, NULL);
1264
1268 if (i == sbi->s_groups_count - 1) 1265 if (i == sbi->s_groups_count - 1)
1269 last_block = le32_to_cpu(sbi->s_es->s_blocks_count) - 1; 1266 last_block = le32_to_cpu(sbi->s_es->s_blocks_count) - 1;
1270 else 1267 else
1271 last_block = first_block + 1268 last_block = first_block +
1272 (EXT3_BLOCKS_PER_GROUP(sb) - 1); 1269 (EXT3_BLOCKS_PER_GROUP(sb) - 1);
1273 1270
1274 if ((i % EXT3_DESC_PER_BLOCK(sb)) == 0)
1275 gdp = (struct ext3_group_desc *)
1276 sbi->s_group_desc[desc_block++]->b_data;
1277 if (le32_to_cpu(gdp->bg_block_bitmap) < first_block || 1271 if (le32_to_cpu(gdp->bg_block_bitmap) < first_block ||
1278 le32_to_cpu(gdp->bg_block_bitmap) > last_block) 1272 le32_to_cpu(gdp->bg_block_bitmap) > last_block)
1279 { 1273 {
@@ -1306,7 +1300,6 @@ static int ext3_check_descriptors (struct super_block * sb)
1306 return 0; 1300 return 0;
1307 } 1301 }
1308 first_block += EXT3_BLOCKS_PER_GROUP(sb); 1302 first_block += EXT3_BLOCKS_PER_GROUP(sb);
1309 gdp++;
1310 } 1303 }
1311 1304
1312 sbi->s_es->s_free_blocks_count=cpu_to_le32(ext3_count_free_blocks(sb)); 1305 sbi->s_es->s_free_blocks_count=cpu_to_le32(ext3_count_free_blocks(sb));
@@ -1383,8 +1376,8 @@ static void ext3_orphan_cleanup (struct super_block * sb,
1383 while (es->s_last_orphan) { 1376 while (es->s_last_orphan) {
1384 struct inode *inode; 1377 struct inode *inode;
1385 1378
1386 if (!(inode = 1379 inode = ext3_orphan_get(sb, le32_to_cpu(es->s_last_orphan));
1387 ext3_orphan_get(sb, le32_to_cpu(es->s_last_orphan)))) { 1380 if (IS_ERR(inode)) {
1388 es->s_last_orphan = 0; 1381 es->s_last_orphan = 0;
1389 break; 1382 break;
1390 } 1383 }
@@ -1513,6 +1506,7 @@ static int ext3_fill_super (struct super_block *sb, void *data, int silent)
1513 int db_count; 1506 int db_count;
1514 int i; 1507 int i;
1515 int needs_recovery; 1508 int needs_recovery;
1509 int ret = -EINVAL;
1516 __le32 features; 1510 __le32 features;
1517 int err; 1511 int err;
1518 1512
@@ -1583,10 +1577,10 @@ static int ext3_fill_super (struct super_block *sb, void *data, int silent)
1583 1577
1584 if (le16_to_cpu(sbi->s_es->s_errors) == EXT3_ERRORS_PANIC) 1578 if (le16_to_cpu(sbi->s_es->s_errors) == EXT3_ERRORS_PANIC)
1585 set_opt(sbi->s_mount_opt, ERRORS_PANIC); 1579 set_opt(sbi->s_mount_opt, ERRORS_PANIC);
1586 else if (le16_to_cpu(sbi->s_es->s_errors) == EXT3_ERRORS_RO) 1580 else if (le16_to_cpu(sbi->s_es->s_errors) == EXT3_ERRORS_CONTINUE)
1587 set_opt(sbi->s_mount_opt, ERRORS_RO);
1588 else
1589 set_opt(sbi->s_mount_opt, ERRORS_CONT); 1581 set_opt(sbi->s_mount_opt, ERRORS_CONT);
1582 else
1583 set_opt(sbi->s_mount_opt, ERRORS_RO);
1590 1584
1591 sbi->s_resuid = le16_to_cpu(es->s_def_resuid); 1585 sbi->s_resuid = le16_to_cpu(es->s_def_resuid);
1592 sbi->s_resgid = le16_to_cpu(es->s_def_resgid); 1586 sbi->s_resgid = le16_to_cpu(es->s_def_resgid);
@@ -1882,19 +1876,24 @@ static int ext3_fill_super (struct super_block *sb, void *data, int silent)
1882 * so we can safely mount the rest of the filesystem now. 1876 * so we can safely mount the rest of the filesystem now.
1883 */ 1877 */
1884 1878
1885 root = iget(sb, EXT3_ROOT_INO); 1879 root = ext3_iget(sb, EXT3_ROOT_INO);
1886 sb->s_root = d_alloc_root(root); 1880 if (IS_ERR(root)) {
1887 if (!sb->s_root) {
1888 printk(KERN_ERR "EXT3-fs: get root inode failed\n"); 1881 printk(KERN_ERR "EXT3-fs: get root inode failed\n");
1889 iput(root); 1882 ret = PTR_ERR(root);
1890 goto failed_mount4; 1883 goto failed_mount4;
1891 } 1884 }
1892 if (!S_ISDIR(root->i_mode) || !root->i_blocks || !root->i_size) { 1885 if (!S_ISDIR(root->i_mode) || !root->i_blocks || !root->i_size) {
1893 dput(sb->s_root); 1886 iput(root);
1894 sb->s_root = NULL;
1895 printk(KERN_ERR "EXT3-fs: corrupt root inode, run e2fsck\n"); 1887 printk(KERN_ERR "EXT3-fs: corrupt root inode, run e2fsck\n");
1896 goto failed_mount4; 1888 goto failed_mount4;
1897 } 1889 }
1890 sb->s_root = d_alloc_root(root);
1891 if (!sb->s_root) {
1892 printk(KERN_ERR "EXT3-fs: get root dentry failed\n");
1893 iput(root);
1894 ret = -ENOMEM;
1895 goto failed_mount4;
1896 }
1898 1897
1899 ext3_setup_super (sb, es, sb->s_flags & MS_RDONLY); 1898 ext3_setup_super (sb, es, sb->s_flags & MS_RDONLY);
1900 /* 1899 /*
@@ -1946,7 +1945,7 @@ out_fail:
1946 sb->s_fs_info = NULL; 1945 sb->s_fs_info = NULL;
1947 kfree(sbi); 1946 kfree(sbi);
1948 lock_kernel(); 1947 lock_kernel();
1949 return -EINVAL; 1948 return ret;
1950} 1949}
1951 1950
1952/* 1951/*
@@ -1982,8 +1981,8 @@ static journal_t *ext3_get_journal(struct super_block *sb,
1982 * things happen if we iget() an unused inode, as the subsequent 1981 * things happen if we iget() an unused inode, as the subsequent
1983 * iput() will try to delete it. */ 1982 * iput() will try to delete it. */
1984 1983
1985 journal_inode = iget(sb, journal_inum); 1984 journal_inode = ext3_iget(sb, journal_inum);
1986 if (!journal_inode) { 1985 if (IS_ERR(journal_inode)) {
1987 printk(KERN_ERR "EXT3-fs: no journal found.\n"); 1986 printk(KERN_ERR "EXT3-fs: no journal found.\n");
1988 return NULL; 1987 return NULL;
1989 } 1988 }
@@ -1996,7 +1995,7 @@ static journal_t *ext3_get_journal(struct super_block *sb,
1996 1995
1997 jbd_debug(2, "Journal inode found at %p: %Ld bytes\n", 1996 jbd_debug(2, "Journal inode found at %p: %Ld bytes\n",
1998 journal_inode, journal_inode->i_size); 1997 journal_inode, journal_inode->i_size);
1999 if (is_bad_inode(journal_inode) || !S_ISREG(journal_inode->i_mode)) { 1998 if (!S_ISREG(journal_inode->i_mode)) {
2000 printk(KERN_ERR "EXT3-fs: invalid journal inode.\n"); 1999 printk(KERN_ERR "EXT3-fs: invalid journal inode.\n");
2001 iput(journal_inode); 2000 iput(journal_inode);
2002 return NULL; 2001 return NULL;
@@ -2759,16 +2758,16 @@ static int ext3_quota_on(struct super_block *sb, int type, int format_id,
2759 if (err) 2758 if (err)
2760 return err; 2759 return err;
2761 /* Quotafile not on the same filesystem? */ 2760 /* Quotafile not on the same filesystem? */
2762 if (nd.mnt->mnt_sb != sb) { 2761 if (nd.path.mnt->mnt_sb != sb) {
2763 path_release(&nd); 2762 path_put(&nd.path);
2764 return -EXDEV; 2763 return -EXDEV;
2765 } 2764 }
2766 /* Quotafile not of fs root? */ 2765 /* Quotafile not of fs root? */
2767 if (nd.dentry->d_parent->d_inode != sb->s_root->d_inode) 2766 if (nd.path.dentry->d_parent->d_inode != sb->s_root->d_inode)
2768 printk(KERN_WARNING 2767 printk(KERN_WARNING
2769 "EXT3-fs: Quota file not on filesystem root. " 2768 "EXT3-fs: Quota file not on filesystem root. "
2770 "Journalled quota will not work.\n"); 2769 "Journalled quota will not work.\n");
2771 path_release(&nd); 2770 path_put(&nd.path);
2772 return vfs_quota_on(sb, type, format_id, path); 2771 return vfs_quota_on(sb, type, format_id, path);
2773} 2772}
2774 2773
diff --git a/fs/ext3/xattr.c b/fs/ext3/xattr.c
index 408373819e34..fb89c299bece 100644
--- a/fs/ext3/xattr.c
+++ b/fs/ext3/xattr.c
@@ -492,8 +492,7 @@ ext3_xattr_release_block(handle_t *handle, struct inode *inode,
492 get_bh(bh); 492 get_bh(bh);
493 ext3_forget(handle, 1, inode, bh, bh->b_blocknr); 493 ext3_forget(handle, 1, inode, bh, bh->b_blocknr);
494 } else { 494 } else {
495 BHDR(bh)->h_refcount = cpu_to_le32( 495 le32_add_cpu(&BHDR(bh)->h_refcount, -1);
496 le32_to_cpu(BHDR(bh)->h_refcount) - 1);
497 error = ext3_journal_dirty_metadata(handle, bh); 496 error = ext3_journal_dirty_metadata(handle, bh);
498 if (IS_SYNC(inode)) 497 if (IS_SYNC(inode))
499 handle->h_sync = 1; 498 handle->h_sync = 1;
@@ -780,8 +779,7 @@ inserted:
780 if (error) 779 if (error)
781 goto cleanup_dquot; 780 goto cleanup_dquot;
782 lock_buffer(new_bh); 781 lock_buffer(new_bh);
783 BHDR(new_bh)->h_refcount = cpu_to_le32(1 + 782 le32_add_cpu(&BHDR(new_bh)->h_refcount, 1);
784 le32_to_cpu(BHDR(new_bh)->h_refcount));
785 ea_bdebug(new_bh, "reusing; refcount now=%d", 783 ea_bdebug(new_bh, "reusing; refcount now=%d",
786 le32_to_cpu(BHDR(new_bh)->h_refcount)); 784 le32_to_cpu(BHDR(new_bh)->h_refcount));
787 unlock_buffer(new_bh); 785 unlock_buffer(new_bh);
diff --git a/fs/ext4/balloc.c b/fs/ext4/balloc.c
index ac75ea953d83..0737e05ba3dd 100644
--- a/fs/ext4/balloc.c
+++ b/fs/ext4/balloc.c
@@ -1700,7 +1700,7 @@ retry_alloc:
1700 1700
1701 /* 1701 /*
1702 * Now search the rest of the groups. We assume that 1702 * Now search the rest of the groups. We assume that
1703 * i and gdp correctly point to the last group visited. 1703 * group_no and gdp correctly point to the last group visited.
1704 */ 1704 */
1705 for (bgi = 0; bgi < ngroups; bgi++) { 1705 for (bgi = 0; bgi < ngroups; bgi++) {
1706 group_no++; 1706 group_no++;
@@ -2011,11 +2011,7 @@ static unsigned long ext4_bg_num_gdb_meta(struct super_block *sb,
2011static unsigned long ext4_bg_num_gdb_nometa(struct super_block *sb, 2011static unsigned long ext4_bg_num_gdb_nometa(struct super_block *sb,
2012 ext4_group_t group) 2012 ext4_group_t group)
2013{ 2013{
2014 if (EXT4_HAS_RO_COMPAT_FEATURE(sb, 2014 return ext4_bg_has_super(sb, group) ? EXT4_SB(sb)->s_gdb_count : 0;
2015 EXT4_FEATURE_RO_COMPAT_SPARSE_SUPER) &&
2016 !ext4_group_sparse(group))
2017 return 0;
2018 return EXT4_SB(sb)->s_gdb_count;
2019} 2015}
2020 2016
2021/** 2017/**
diff --git a/fs/ext4/ialloc.c b/fs/ext4/ialloc.c
index 575b5215c808..da18a74b966a 100644
--- a/fs/ext4/ialloc.c
+++ b/fs/ext4/ialloc.c
@@ -782,14 +782,15 @@ struct inode *ext4_orphan_get(struct super_block *sb, unsigned long ino)
782 unsigned long max_ino = le32_to_cpu(EXT4_SB(sb)->s_es->s_inodes_count); 782 unsigned long max_ino = le32_to_cpu(EXT4_SB(sb)->s_es->s_inodes_count);
783 ext4_group_t block_group; 783 ext4_group_t block_group;
784 int bit; 784 int bit;
785 struct buffer_head *bitmap_bh = NULL; 785 struct buffer_head *bitmap_bh;
786 struct inode *inode = NULL; 786 struct inode *inode = NULL;
787 long err = -EIO;
787 788
788 /* Error cases - e2fsck has already cleaned up for us */ 789 /* Error cases - e2fsck has already cleaned up for us */
789 if (ino > max_ino) { 790 if (ino > max_ino) {
790 ext4_warning(sb, __FUNCTION__, 791 ext4_warning(sb, __FUNCTION__,
791 "bad orphan ino %lu! e2fsck was run?", ino); 792 "bad orphan ino %lu! e2fsck was run?", ino);
792 goto out; 793 goto error;
793 } 794 }
794 795
795 block_group = (ino - 1) / EXT4_INODES_PER_GROUP(sb); 796 block_group = (ino - 1) / EXT4_INODES_PER_GROUP(sb);
@@ -798,38 +799,49 @@ struct inode *ext4_orphan_get(struct super_block *sb, unsigned long ino)
798 if (!bitmap_bh) { 799 if (!bitmap_bh) {
799 ext4_warning(sb, __FUNCTION__, 800 ext4_warning(sb, __FUNCTION__,
800 "inode bitmap error for orphan %lu", ino); 801 "inode bitmap error for orphan %lu", ino);
801 goto out; 802 goto error;
802 } 803 }
803 804
804 /* Having the inode bit set should be a 100% indicator that this 805 /* Having the inode bit set should be a 100% indicator that this
805 * is a valid orphan (no e2fsck run on fs). Orphans also include 806 * is a valid orphan (no e2fsck run on fs). Orphans also include
806 * inodes that were being truncated, so we can't check i_nlink==0. 807 * inodes that were being truncated, so we can't check i_nlink==0.
807 */ 808 */
808 if (!ext4_test_bit(bit, bitmap_bh->b_data) || 809 if (!ext4_test_bit(bit, bitmap_bh->b_data))
809 !(inode = iget(sb, ino)) || is_bad_inode(inode) || 810 goto bad_orphan;
810 NEXT_ORPHAN(inode) > max_ino) { 811
811 ext4_warning(sb, __FUNCTION__, 812 inode = ext4_iget(sb, ino);
812 "bad orphan inode %lu! e2fsck was run?", ino); 813 if (IS_ERR(inode))
813 printk(KERN_NOTICE "ext4_test_bit(bit=%d, block=%llu) = %d\n", 814 goto iget_failed;
814 bit, (unsigned long long)bitmap_bh->b_blocknr, 815
815 ext4_test_bit(bit, bitmap_bh->b_data)); 816 if (NEXT_ORPHAN(inode) > max_ino)
816 printk(KERN_NOTICE "inode=%p\n", inode); 817 goto bad_orphan;
817 if (inode) { 818 brelse(bitmap_bh);
818 printk(KERN_NOTICE "is_bad_inode(inode)=%d\n", 819 return inode;
819 is_bad_inode(inode)); 820
820 printk(KERN_NOTICE "NEXT_ORPHAN(inode)=%u\n", 821iget_failed:
821 NEXT_ORPHAN(inode)); 822 err = PTR_ERR(inode);
822 printk(KERN_NOTICE "max_ino=%lu\n", max_ino); 823 inode = NULL;
823 } 824bad_orphan:
825 ext4_warning(sb, __FUNCTION__,
826 "bad orphan inode %lu! e2fsck was run?", ino);
827 printk(KERN_NOTICE "ext4_test_bit(bit=%d, block=%llu) = %d\n",
828 bit, (unsigned long long)bitmap_bh->b_blocknr,
829 ext4_test_bit(bit, bitmap_bh->b_data));
830 printk(KERN_NOTICE "inode=%p\n", inode);
831 if (inode) {
832 printk(KERN_NOTICE "is_bad_inode(inode)=%d\n",
833 is_bad_inode(inode));
834 printk(KERN_NOTICE "NEXT_ORPHAN(inode)=%u\n",
835 NEXT_ORPHAN(inode));
836 printk(KERN_NOTICE "max_ino=%lu\n", max_ino);
824 /* Avoid freeing blocks if we got a bad deleted inode */ 837 /* Avoid freeing blocks if we got a bad deleted inode */
825 if (inode && inode->i_nlink == 0) 838 if (inode->i_nlink == 0)
826 inode->i_blocks = 0; 839 inode->i_blocks = 0;
827 iput(inode); 840 iput(inode);
828 inode = NULL;
829 } 841 }
830out:
831 brelse(bitmap_bh); 842 brelse(bitmap_bh);
832 return inode; 843error:
844 return ERR_PTR(err);
833} 845}
834 846
835unsigned long ext4_count_free_inodes (struct super_block * sb) 847unsigned long ext4_count_free_inodes (struct super_block * sb)
diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
index bb717cbb749c..7dd9b50d5ebc 100644
--- a/fs/ext4/inode.c
+++ b/fs/ext4/inode.c
@@ -429,16 +429,13 @@ static ext4_fsblk_t ext4_find_near(struct inode *inode, Indirect *ind)
429 * ext4_find_goal - find a prefered place for allocation. 429 * ext4_find_goal - find a prefered place for allocation.
430 * @inode: owner 430 * @inode: owner
431 * @block: block we want 431 * @block: block we want
432 * @chain: chain of indirect blocks
433 * @partial: pointer to the last triple within a chain 432 * @partial: pointer to the last triple within a chain
434 * @goal: place to store the result.
435 * 433 *
436 * Normally this function find the prefered place for block allocation, 434 * Normally this function find the prefered place for block allocation,
437 * stores it in *@goal and returns zero. 435 * returns it.
438 */ 436 */
439
440static ext4_fsblk_t ext4_find_goal(struct inode *inode, ext4_lblk_t block, 437static ext4_fsblk_t ext4_find_goal(struct inode *inode, ext4_lblk_t block,
441 Indirect chain[4], Indirect *partial) 438 Indirect *partial)
442{ 439{
443 struct ext4_block_alloc_info *block_i; 440 struct ext4_block_alloc_info *block_i;
444 441
@@ -839,7 +836,7 @@ int ext4_get_blocks_handle(handle_t *handle, struct inode *inode,
839 if (S_ISREG(inode->i_mode) && (!ei->i_block_alloc_info)) 836 if (S_ISREG(inode->i_mode) && (!ei->i_block_alloc_info))
840 ext4_init_block_alloc_info(inode); 837 ext4_init_block_alloc_info(inode);
841 838
842 goal = ext4_find_goal(inode, iblock, chain, partial); 839 goal = ext4_find_goal(inode, iblock, partial);
843 840
844 /* the number of blocks need to allocate for [d,t]indirect blocks */ 841 /* the number of blocks need to allocate for [d,t]indirect blocks */
845 indirect_blks = (chain + depth) - partial - 1; 842 indirect_blks = (chain + depth) - partial - 1;
@@ -895,7 +892,16 @@ out:
895 return err; 892 return err;
896} 893}
897 894
898#define DIO_CREDITS (EXT4_RESERVE_TRANS_BLOCKS + 32) 895/* Maximum number of blocks we map for direct IO at once. */
896#define DIO_MAX_BLOCKS 4096
897/*
898 * Number of credits we need for writing DIO_MAX_BLOCKS:
899 * We need sb + group descriptor + bitmap + inode -> 4
900 * For B blocks with A block pointers per block we need:
901 * 1 (triple ind.) + (B/A/A + 2) (doubly ind.) + (B/A + 2) (indirect).
902 * If we plug in 4096 for B and 256 for A (for 1KB block size), we get 25.
903 */
904#define DIO_CREDITS 25
899 905
900int ext4_get_blocks_wrap(handle_t *handle, struct inode *inode, sector_t block, 906int ext4_get_blocks_wrap(handle_t *handle, struct inode *inode, sector_t block,
901 unsigned long max_blocks, struct buffer_head *bh, 907 unsigned long max_blocks, struct buffer_head *bh,
@@ -942,49 +948,31 @@ static int ext4_get_block(struct inode *inode, sector_t iblock,
942 struct buffer_head *bh_result, int create) 948 struct buffer_head *bh_result, int create)
943{ 949{
944 handle_t *handle = ext4_journal_current_handle(); 950 handle_t *handle = ext4_journal_current_handle();
945 int ret = 0; 951 int ret = 0, started = 0;
946 unsigned max_blocks = bh_result->b_size >> inode->i_blkbits; 952 unsigned max_blocks = bh_result->b_size >> inode->i_blkbits;
947 953
948 if (!create) 954 if (create && !handle) {
949 goto get_block; /* A read */ 955 /* Direct IO write... */
950 956 if (max_blocks > DIO_MAX_BLOCKS)
951 if (max_blocks == 1) 957 max_blocks = DIO_MAX_BLOCKS;
952 goto get_block; /* A single block get */ 958 handle = ext4_journal_start(inode, DIO_CREDITS +
953 959 2 * EXT4_QUOTA_TRANS_BLOCKS(inode->i_sb));
954 if (handle->h_transaction->t_state == T_LOCKED) { 960 if (IS_ERR(handle)) {
955 /*
956 * Huge direct-io writes can hold off commits for long
957 * periods of time. Let this commit run.
958 */
959 ext4_journal_stop(handle);
960 handle = ext4_journal_start(inode, DIO_CREDITS);
961 if (IS_ERR(handle))
962 ret = PTR_ERR(handle); 961 ret = PTR_ERR(handle);
963 goto get_block; 962 goto out;
964 }
965
966 if (handle->h_buffer_credits <= EXT4_RESERVE_TRANS_BLOCKS) {
967 /*
968 * Getting low on buffer credits...
969 */
970 ret = ext4_journal_extend(handle, DIO_CREDITS);
971 if (ret > 0) {
972 /*
973 * Couldn't extend the transaction. Start a new one.
974 */
975 ret = ext4_journal_restart(handle, DIO_CREDITS);
976 } 963 }
964 started = 1;
977 } 965 }
978 966
979get_block: 967 ret = ext4_get_blocks_wrap(handle, inode, iblock,
980 if (ret == 0) {
981 ret = ext4_get_blocks_wrap(handle, inode, iblock,
982 max_blocks, bh_result, create, 0); 968 max_blocks, bh_result, create, 0);
983 if (ret > 0) { 969 if (ret > 0) {
984 bh_result->b_size = (ret << inode->i_blkbits); 970 bh_result->b_size = (ret << inode->i_blkbits);
985 ret = 0; 971 ret = 0;
986 }
987 } 972 }
973 if (started)
974 ext4_journal_stop(handle);
975out:
988 return ret; 976 return ret;
989} 977}
990 978
@@ -1674,7 +1662,8 @@ static int ext4_releasepage(struct page *page, gfp_t wait)
1674 * if the machine crashes during the write. 1662 * if the machine crashes during the write.
1675 * 1663 *
1676 * If the O_DIRECT write is intantiating holes inside i_size and the machine 1664 * If the O_DIRECT write is intantiating holes inside i_size and the machine
1677 * crashes then stale disk data _may_ be exposed inside the file. 1665 * crashes then stale disk data _may_ be exposed inside the file. But current
1666 * VFS code falls back into buffered path in that case so we are safe.
1678 */ 1667 */
1679static ssize_t ext4_direct_IO(int rw, struct kiocb *iocb, 1668static ssize_t ext4_direct_IO(int rw, struct kiocb *iocb,
1680 const struct iovec *iov, loff_t offset, 1669 const struct iovec *iov, loff_t offset,
@@ -1683,7 +1672,7 @@ static ssize_t ext4_direct_IO(int rw, struct kiocb *iocb,
1683 struct file *file = iocb->ki_filp; 1672 struct file *file = iocb->ki_filp;
1684 struct inode *inode = file->f_mapping->host; 1673 struct inode *inode = file->f_mapping->host;
1685 struct ext4_inode_info *ei = EXT4_I(inode); 1674 struct ext4_inode_info *ei = EXT4_I(inode);
1686 handle_t *handle = NULL; 1675 handle_t *handle;
1687 ssize_t ret; 1676 ssize_t ret;
1688 int orphan = 0; 1677 int orphan = 0;
1689 size_t count = iov_length(iov, nr_segs); 1678 size_t count = iov_length(iov, nr_segs);
@@ -1691,17 +1680,21 @@ static ssize_t ext4_direct_IO(int rw, struct kiocb *iocb,
1691 if (rw == WRITE) { 1680 if (rw == WRITE) {
1692 loff_t final_size = offset + count; 1681 loff_t final_size = offset + count;
1693 1682
1694 handle = ext4_journal_start(inode, DIO_CREDITS);
1695 if (IS_ERR(handle)) {
1696 ret = PTR_ERR(handle);
1697 goto out;
1698 }
1699 if (final_size > inode->i_size) { 1683 if (final_size > inode->i_size) {
1684 /* Credits for sb + inode write */
1685 handle = ext4_journal_start(inode, 2);
1686 if (IS_ERR(handle)) {
1687 ret = PTR_ERR(handle);
1688 goto out;
1689 }
1700 ret = ext4_orphan_add(handle, inode); 1690 ret = ext4_orphan_add(handle, inode);
1701 if (ret) 1691 if (ret) {
1702 goto out_stop; 1692 ext4_journal_stop(handle);
1693 goto out;
1694 }
1703 orphan = 1; 1695 orphan = 1;
1704 ei->i_disksize = inode->i_size; 1696 ei->i_disksize = inode->i_size;
1697 ext4_journal_stop(handle);
1705 } 1698 }
1706 } 1699 }
1707 1700
@@ -1709,18 +1702,21 @@ static ssize_t ext4_direct_IO(int rw, struct kiocb *iocb,
1709 offset, nr_segs, 1702 offset, nr_segs,
1710 ext4_get_block, NULL); 1703 ext4_get_block, NULL);
1711 1704
1712 /* 1705 if (orphan) {
1713 * Reacquire the handle: ext4_get_block() can restart the transaction
1714 */
1715 handle = ext4_journal_current_handle();
1716
1717out_stop:
1718 if (handle) {
1719 int err; 1706 int err;
1720 1707
1721 if (orphan && inode->i_nlink) 1708 /* Credits for sb + inode write */
1709 handle = ext4_journal_start(inode, 2);
1710 if (IS_ERR(handle)) {
1711 /* This is really bad luck. We've written the data
1712 * but cannot extend i_size. Bail out and pretend
1713 * the write failed... */
1714 ret = PTR_ERR(handle);
1715 goto out;
1716 }
1717 if (inode->i_nlink)
1722 ext4_orphan_del(handle, inode); 1718 ext4_orphan_del(handle, inode);
1723 if (orphan && ret > 0) { 1719 if (ret > 0) {
1724 loff_t end = offset + ret; 1720 loff_t end = offset + ret;
1725 if (end > inode->i_size) { 1721 if (end > inode->i_size) {
1726 ei->i_disksize = end; 1722 ei->i_disksize = end;
@@ -1840,7 +1836,7 @@ int ext4_block_truncate_page(handle_t *handle, struct page *page,
1840 */ 1836 */
1841 if (!page_has_buffers(page) && test_opt(inode->i_sb, NOBH) && 1837 if (!page_has_buffers(page) && test_opt(inode->i_sb, NOBH) &&
1842 ext4_should_writeback_data(inode) && PageUptodate(page)) { 1838 ext4_should_writeback_data(inode) && PageUptodate(page)) {
1843 zero_user_page(page, offset, length, KM_USER0); 1839 zero_user(page, offset, length);
1844 set_page_dirty(page); 1840 set_page_dirty(page);
1845 goto unlock; 1841 goto unlock;
1846 } 1842 }
@@ -1893,7 +1889,7 @@ int ext4_block_truncate_page(handle_t *handle, struct page *page,
1893 goto unlock; 1889 goto unlock;
1894 } 1890 }
1895 1891
1896 zero_user_page(page, offset, length, KM_USER0); 1892 zero_user(page, offset, length);
1897 1893
1898 BUFFER_TRACE(bh, "zeroed end of block"); 1894 BUFFER_TRACE(bh, "zeroed end of block");
1899 1895
@@ -2683,21 +2679,31 @@ static blkcnt_t ext4_inode_blocks(struct ext4_inode *raw_inode,
2683 } 2679 }
2684} 2680}
2685 2681
2686void ext4_read_inode(struct inode * inode) 2682struct inode *ext4_iget(struct super_block *sb, unsigned long ino)
2687{ 2683{
2688 struct ext4_iloc iloc; 2684 struct ext4_iloc iloc;
2689 struct ext4_inode *raw_inode; 2685 struct ext4_inode *raw_inode;
2690 struct ext4_inode_info *ei = EXT4_I(inode); 2686 struct ext4_inode_info *ei;
2691 struct buffer_head *bh; 2687 struct buffer_head *bh;
2688 struct inode *inode;
2689 long ret;
2692 int block; 2690 int block;
2693 2691
2692 inode = iget_locked(sb, ino);
2693 if (!inode)
2694 return ERR_PTR(-ENOMEM);
2695 if (!(inode->i_state & I_NEW))
2696 return inode;
2697
2698 ei = EXT4_I(inode);
2694#ifdef CONFIG_EXT4DEV_FS_POSIX_ACL 2699#ifdef CONFIG_EXT4DEV_FS_POSIX_ACL
2695 ei->i_acl = EXT4_ACL_NOT_CACHED; 2700 ei->i_acl = EXT4_ACL_NOT_CACHED;
2696 ei->i_default_acl = EXT4_ACL_NOT_CACHED; 2701 ei->i_default_acl = EXT4_ACL_NOT_CACHED;
2697#endif 2702#endif
2698 ei->i_block_alloc_info = NULL; 2703 ei->i_block_alloc_info = NULL;
2699 2704
2700 if (__ext4_get_inode_loc(inode, &iloc, 0)) 2705 ret = __ext4_get_inode_loc(inode, &iloc, 0);
2706 if (ret < 0)
2701 goto bad_inode; 2707 goto bad_inode;
2702 bh = iloc.bh; 2708 bh = iloc.bh;
2703 raw_inode = ext4_raw_inode(&iloc); 2709 raw_inode = ext4_raw_inode(&iloc);
@@ -2723,6 +2729,7 @@ void ext4_read_inode(struct inode * inode)
2723 !(EXT4_SB(inode->i_sb)->s_mount_state & EXT4_ORPHAN_FS)) { 2729 !(EXT4_SB(inode->i_sb)->s_mount_state & EXT4_ORPHAN_FS)) {
2724 /* this inode is deleted */ 2730 /* this inode is deleted */
2725 brelse (bh); 2731 brelse (bh);
2732 ret = -ESTALE;
2726 goto bad_inode; 2733 goto bad_inode;
2727 } 2734 }
2728 /* The only unlinked inodes we let through here have 2735 /* The only unlinked inodes we let through here have
@@ -2750,17 +2757,12 @@ void ext4_read_inode(struct inode * inode)
2750 ei->i_data[block] = raw_inode->i_block[block]; 2757 ei->i_data[block] = raw_inode->i_block[block];
2751 INIT_LIST_HEAD(&ei->i_orphan); 2758 INIT_LIST_HEAD(&ei->i_orphan);
2752 2759
2753 if (inode->i_ino >= EXT4_FIRST_INO(inode->i_sb) + 1 && 2760 if (EXT4_INODE_SIZE(inode->i_sb) > EXT4_GOOD_OLD_INODE_SIZE) {
2754 EXT4_INODE_SIZE(inode->i_sb) > EXT4_GOOD_OLD_INODE_SIZE) {
2755 /*
2756 * When mke2fs creates big inodes it does not zero out
2757 * the unused bytes above EXT4_GOOD_OLD_INODE_SIZE,
2758 * so ignore those first few inodes.
2759 */
2760 ei->i_extra_isize = le16_to_cpu(raw_inode->i_extra_isize); 2761 ei->i_extra_isize = le16_to_cpu(raw_inode->i_extra_isize);
2761 if (EXT4_GOOD_OLD_INODE_SIZE + ei->i_extra_isize > 2762 if (EXT4_GOOD_OLD_INODE_SIZE + ei->i_extra_isize >
2762 EXT4_INODE_SIZE(inode->i_sb)) { 2763 EXT4_INODE_SIZE(inode->i_sb)) {
2763 brelse (bh); 2764 brelse (bh);
2765 ret = -EIO;
2764 goto bad_inode; 2766 goto bad_inode;
2765 } 2767 }
2766 if (ei->i_extra_isize == 0) { 2768 if (ei->i_extra_isize == 0) {
@@ -2814,11 +2816,12 @@ void ext4_read_inode(struct inode * inode)
2814 } 2816 }
2815 brelse (iloc.bh); 2817 brelse (iloc.bh);
2816 ext4_set_inode_flags(inode); 2818 ext4_set_inode_flags(inode);
2817 return; 2819 unlock_new_inode(inode);
2820 return inode;
2818 2821
2819bad_inode: 2822bad_inode:
2820 make_bad_inode(inode); 2823 iget_failed(inode);
2821 return; 2824 return ERR_PTR(ret);
2822} 2825}
2823 2826
2824static int ext4_inode_blocks_set(handle_t *handle, 2827static int ext4_inode_blocks_set(handle_t *handle,
diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c
index 76e5fedc0a0b..dd0fcfcb35ce 100644
--- a/fs/ext4/mballoc.c
+++ b/fs/ext4/mballoc.c
@@ -420,6 +420,7 @@
420#define MB_DEFAULT_GROUP_PREALLOC 512 420#define MB_DEFAULT_GROUP_PREALLOC 512
421 421
422static struct kmem_cache *ext4_pspace_cachep; 422static struct kmem_cache *ext4_pspace_cachep;
423static struct kmem_cache *ext4_ac_cachep;
423 424
424#ifdef EXT4_BB_MAX_BLOCKS 425#ifdef EXT4_BB_MAX_BLOCKS
425#undef EXT4_BB_MAX_BLOCKS 426#undef EXT4_BB_MAX_BLOCKS
@@ -680,7 +681,6 @@ static void *mb_find_buddy(struct ext4_buddy *e4b, int order, int *max)
680{ 681{
681 char *bb; 682 char *bb;
682 683
683 /* FIXME!! is this needed */
684 BUG_ON(EXT4_MB_BITMAP(e4b) == EXT4_MB_BUDDY(e4b)); 684 BUG_ON(EXT4_MB_BITMAP(e4b) == EXT4_MB_BUDDY(e4b));
685 BUG_ON(max == NULL); 685 BUG_ON(max == NULL);
686 686
@@ -964,7 +964,7 @@ static void ext4_mb_generate_buddy(struct super_block *sb,
964 grp->bb_fragments = fragments; 964 grp->bb_fragments = fragments;
965 965
966 if (free != grp->bb_free) { 966 if (free != grp->bb_free) {
967 printk(KERN_DEBUG 967 ext4_error(sb, __FUNCTION__,
968 "EXT4-fs: group %lu: %u blocks in bitmap, %u in gd\n", 968 "EXT4-fs: group %lu: %u blocks in bitmap, %u in gd\n",
969 group, free, grp->bb_free); 969 group, free, grp->bb_free);
970 grp->bb_free = free; 970 grp->bb_free = free;
@@ -1821,13 +1821,24 @@ static void ext4_mb_complex_scan_group(struct ext4_allocation_context *ac,
1821 i = ext4_find_next_zero_bit(bitmap, 1821 i = ext4_find_next_zero_bit(bitmap,
1822 EXT4_BLOCKS_PER_GROUP(sb), i); 1822 EXT4_BLOCKS_PER_GROUP(sb), i);
1823 if (i >= EXT4_BLOCKS_PER_GROUP(sb)) { 1823 if (i >= EXT4_BLOCKS_PER_GROUP(sb)) {
1824 BUG_ON(free != 0); 1824 /*
1825 * IF we corrupt the bitmap we won't find any
1826 * free blocks even though group info says we
1827 * we have free blocks
1828 */
1829 ext4_error(sb, __FUNCTION__, "%d free blocks as per "
1830 "group info. But bitmap says 0\n",
1831 free);
1825 break; 1832 break;
1826 } 1833 }
1827 1834
1828 mb_find_extent(e4b, 0, i, ac->ac_g_ex.fe_len, &ex); 1835 mb_find_extent(e4b, 0, i, ac->ac_g_ex.fe_len, &ex);
1829 BUG_ON(ex.fe_len <= 0); 1836 BUG_ON(ex.fe_len <= 0);
1830 BUG_ON(free < ex.fe_len); 1837 if (free < ex.fe_len) {
1838 ext4_error(sb, __FUNCTION__, "%d free blocks as per "
1839 "group info. But got %d blocks\n",
1840 free, ex.fe_len);
1841 }
1831 1842
1832 ext4_mb_measure_extent(ac, &ex, e4b); 1843 ext4_mb_measure_extent(ac, &ex, e4b);
1833 1844
@@ -2959,12 +2970,19 @@ int __init init_ext4_mballoc(void)
2959 if (ext4_pspace_cachep == NULL) 2970 if (ext4_pspace_cachep == NULL)
2960 return -ENOMEM; 2971 return -ENOMEM;
2961 2972
2973 ext4_ac_cachep =
2974 kmem_cache_create("ext4_alloc_context",
2975 sizeof(struct ext4_allocation_context),
2976 0, SLAB_RECLAIM_ACCOUNT, NULL);
2977 if (ext4_ac_cachep == NULL) {
2978 kmem_cache_destroy(ext4_pspace_cachep);
2979 return -ENOMEM;
2980 }
2962#ifdef CONFIG_PROC_FS 2981#ifdef CONFIG_PROC_FS
2963 proc_root_ext4 = proc_mkdir(EXT4_ROOT, proc_root_fs); 2982 proc_root_ext4 = proc_mkdir(EXT4_ROOT, proc_root_fs);
2964 if (proc_root_ext4 == NULL) 2983 if (proc_root_ext4 == NULL)
2965 printk(KERN_ERR "EXT4-fs: Unable to create %s\n", EXT4_ROOT); 2984 printk(KERN_ERR "EXT4-fs: Unable to create %s\n", EXT4_ROOT);
2966#endif 2985#endif
2967
2968 return 0; 2986 return 0;
2969} 2987}
2970 2988
@@ -2972,6 +2990,7 @@ void exit_ext4_mballoc(void)
2972{ 2990{
2973 /* XXX: synchronize_rcu(); */ 2991 /* XXX: synchronize_rcu(); */
2974 kmem_cache_destroy(ext4_pspace_cachep); 2992 kmem_cache_destroy(ext4_pspace_cachep);
2993 kmem_cache_destroy(ext4_ac_cachep);
2975#ifdef CONFIG_PROC_FS 2994#ifdef CONFIG_PROC_FS
2976 remove_proc_entry(EXT4_ROOT, proc_root_fs); 2995 remove_proc_entry(EXT4_ROOT, proc_root_fs);
2977#endif 2996#endif
@@ -3069,7 +3088,7 @@ static int ext4_mb_mark_diskspace_used(struct ext4_allocation_context *ac,
3069 3088
3070out_err: 3089out_err:
3071 sb->s_dirt = 1; 3090 sb->s_dirt = 1;
3072 put_bh(bitmap_bh); 3091 brelse(bitmap_bh);
3073 return err; 3092 return err;
3074} 3093}
3075 3094
@@ -3354,13 +3373,10 @@ static void ext4_mb_use_group_pa(struct ext4_allocation_context *ac,
3354 ac->ac_pa = pa; 3373 ac->ac_pa = pa;
3355 3374
3356 /* we don't correct pa_pstart or pa_plen here to avoid 3375 /* we don't correct pa_pstart or pa_plen here to avoid
3357 * possible race when tte group is being loaded concurrently 3376 * possible race when the group is being loaded concurrently
3358 * instead we correct pa later, after blocks are marked 3377 * instead we correct pa later, after blocks are marked
3359 * in on-disk bitmap -- see ext4_mb_release_context() */ 3378 * in on-disk bitmap -- see ext4_mb_release_context()
3360 /* 3379 * Other CPUs are prevented from allocating from this pa by lg_mutex
3361 * FIXME!! but the other CPUs can look at this particular
3362 * pa and think that it have enought free blocks if we
3363 * don't update pa_free here right ?
3364 */ 3380 */
3365 mb_debug("use %u/%u from group pa %p\n", pa->pa_lstart-len, len, pa); 3381 mb_debug("use %u/%u from group pa %p\n", pa->pa_lstart-len, len, pa);
3366} 3382}
@@ -3699,7 +3715,7 @@ static int ext4_mb_release_inode_pa(struct ext4_buddy *e4b,
3699 struct buffer_head *bitmap_bh, 3715 struct buffer_head *bitmap_bh,
3700 struct ext4_prealloc_space *pa) 3716 struct ext4_prealloc_space *pa)
3701{ 3717{
3702 struct ext4_allocation_context ac; 3718 struct ext4_allocation_context *ac;
3703 struct super_block *sb = e4b->bd_sb; 3719 struct super_block *sb = e4b->bd_sb;
3704 struct ext4_sb_info *sbi = EXT4_SB(sb); 3720 struct ext4_sb_info *sbi = EXT4_SB(sb);
3705 unsigned long end; 3721 unsigned long end;
@@ -3715,9 +3731,13 @@ static int ext4_mb_release_inode_pa(struct ext4_buddy *e4b,
3715 BUG_ON(group != e4b->bd_group && pa->pa_len != 0); 3731 BUG_ON(group != e4b->bd_group && pa->pa_len != 0);
3716 end = bit + pa->pa_len; 3732 end = bit + pa->pa_len;
3717 3733
3718 ac.ac_sb = sb; 3734 ac = kmem_cache_alloc(ext4_ac_cachep, GFP_NOFS);
3719 ac.ac_inode = pa->pa_inode; 3735
3720 ac.ac_op = EXT4_MB_HISTORY_DISCARD; 3736 if (ac) {
3737 ac->ac_sb = sb;
3738 ac->ac_inode = pa->pa_inode;
3739 ac->ac_op = EXT4_MB_HISTORY_DISCARD;
3740 }
3721 3741
3722 while (bit < end) { 3742 while (bit < end) {
3723 bit = ext4_find_next_zero_bit(bitmap_bh->b_data, end, bit); 3743 bit = ext4_find_next_zero_bit(bitmap_bh->b_data, end, bit);
@@ -3733,24 +3753,28 @@ static int ext4_mb_release_inode_pa(struct ext4_buddy *e4b,
3733 (unsigned) group); 3753 (unsigned) group);
3734 free += next - bit; 3754 free += next - bit;
3735 3755
3736 ac.ac_b_ex.fe_group = group; 3756 if (ac) {
3737 ac.ac_b_ex.fe_start = bit; 3757 ac->ac_b_ex.fe_group = group;
3738 ac.ac_b_ex.fe_len = next - bit; 3758 ac->ac_b_ex.fe_start = bit;
3739 ac.ac_b_ex.fe_logical = 0; 3759 ac->ac_b_ex.fe_len = next - bit;
3740 ext4_mb_store_history(&ac); 3760 ac->ac_b_ex.fe_logical = 0;
3761 ext4_mb_store_history(ac);
3762 }
3741 3763
3742 mb_free_blocks(pa->pa_inode, e4b, bit, next - bit); 3764 mb_free_blocks(pa->pa_inode, e4b, bit, next - bit);
3743 bit = next + 1; 3765 bit = next + 1;
3744 } 3766 }
3745 if (free != pa->pa_free) { 3767 if (free != pa->pa_free) {
3746 printk(KERN_ERR "pa %p: logic %lu, phys. %lu, len %lu\n", 3768 printk(KERN_CRIT "pa %p: logic %lu, phys. %lu, len %lu\n",
3747 pa, (unsigned long) pa->pa_lstart, 3769 pa, (unsigned long) pa->pa_lstart,
3748 (unsigned long) pa->pa_pstart, 3770 (unsigned long) pa->pa_pstart,
3749 (unsigned long) pa->pa_len); 3771 (unsigned long) pa->pa_len);
3750 printk(KERN_ERR "free %u, pa_free %u\n", free, pa->pa_free); 3772 ext4_error(sb, __FUNCTION__, "free %u, pa_free %u\n",
3773 free, pa->pa_free);
3751 } 3774 }
3752 BUG_ON(free != pa->pa_free);
3753 atomic_add(free, &sbi->s_mb_discarded); 3775 atomic_add(free, &sbi->s_mb_discarded);
3776 if (ac)
3777 kmem_cache_free(ext4_ac_cachep, ac);
3754 3778
3755 return err; 3779 return err;
3756} 3780}
@@ -3758,12 +3782,15 @@ static int ext4_mb_release_inode_pa(struct ext4_buddy *e4b,
3758static int ext4_mb_release_group_pa(struct ext4_buddy *e4b, 3782static int ext4_mb_release_group_pa(struct ext4_buddy *e4b,
3759 struct ext4_prealloc_space *pa) 3783 struct ext4_prealloc_space *pa)
3760{ 3784{
3761 struct ext4_allocation_context ac; 3785 struct ext4_allocation_context *ac;
3762 struct super_block *sb = e4b->bd_sb; 3786 struct super_block *sb = e4b->bd_sb;
3763 ext4_group_t group; 3787 ext4_group_t group;
3764 ext4_grpblk_t bit; 3788 ext4_grpblk_t bit;
3765 3789
3766 ac.ac_op = EXT4_MB_HISTORY_DISCARD; 3790 ac = kmem_cache_alloc(ext4_ac_cachep, GFP_NOFS);
3791
3792 if (ac)
3793 ac->ac_op = EXT4_MB_HISTORY_DISCARD;
3767 3794
3768 BUG_ON(pa->pa_deleted == 0); 3795 BUG_ON(pa->pa_deleted == 0);
3769 ext4_get_group_no_and_offset(sb, pa->pa_pstart, &group, &bit); 3796 ext4_get_group_no_and_offset(sb, pa->pa_pstart, &group, &bit);
@@ -3771,13 +3798,16 @@ static int ext4_mb_release_group_pa(struct ext4_buddy *e4b,
3771 mb_free_blocks(pa->pa_inode, e4b, bit, pa->pa_len); 3798 mb_free_blocks(pa->pa_inode, e4b, bit, pa->pa_len);
3772 atomic_add(pa->pa_len, &EXT4_SB(sb)->s_mb_discarded); 3799 atomic_add(pa->pa_len, &EXT4_SB(sb)->s_mb_discarded);
3773 3800
3774 ac.ac_sb = sb; 3801 if (ac) {
3775 ac.ac_inode = NULL; 3802 ac->ac_sb = sb;
3776 ac.ac_b_ex.fe_group = group; 3803 ac->ac_inode = NULL;
3777 ac.ac_b_ex.fe_start = bit; 3804 ac->ac_b_ex.fe_group = group;
3778 ac.ac_b_ex.fe_len = pa->pa_len; 3805 ac->ac_b_ex.fe_start = bit;
3779 ac.ac_b_ex.fe_logical = 0; 3806 ac->ac_b_ex.fe_len = pa->pa_len;
3780 ext4_mb_store_history(&ac); 3807 ac->ac_b_ex.fe_logical = 0;
3808 ext4_mb_store_history(ac);
3809 kmem_cache_free(ext4_ac_cachep, ac);
3810 }
3781 3811
3782 return 0; 3812 return 0;
3783} 3813}
@@ -4231,7 +4261,7 @@ static int ext4_mb_discard_preallocations(struct super_block *sb, int needed)
4231ext4_fsblk_t ext4_mb_new_blocks(handle_t *handle, 4261ext4_fsblk_t ext4_mb_new_blocks(handle_t *handle,
4232 struct ext4_allocation_request *ar, int *errp) 4262 struct ext4_allocation_request *ar, int *errp)
4233{ 4263{
4234 struct ext4_allocation_context ac; 4264 struct ext4_allocation_context *ac = NULL;
4235 struct ext4_sb_info *sbi; 4265 struct ext4_sb_info *sbi;
4236 struct super_block *sb; 4266 struct super_block *sb;
4237 ext4_fsblk_t block = 0; 4267 ext4_fsblk_t block = 0;
@@ -4257,53 +4287,60 @@ ext4_fsblk_t ext4_mb_new_blocks(handle_t *handle,
4257 } 4287 }
4258 inquota = ar->len; 4288 inquota = ar->len;
4259 4289
4290 ac = kmem_cache_alloc(ext4_ac_cachep, GFP_NOFS);
4291 if (!ac) {
4292 *errp = -ENOMEM;
4293 return 0;
4294 }
4295
4260 ext4_mb_poll_new_transaction(sb, handle); 4296 ext4_mb_poll_new_transaction(sb, handle);
4261 4297
4262 *errp = ext4_mb_initialize_context(&ac, ar); 4298 *errp = ext4_mb_initialize_context(ac, ar);
4263 if (*errp) { 4299 if (*errp) {
4264 ar->len = 0; 4300 ar->len = 0;
4265 goto out; 4301 goto out;
4266 } 4302 }
4267 4303
4268 ac.ac_op = EXT4_MB_HISTORY_PREALLOC; 4304 ac->ac_op = EXT4_MB_HISTORY_PREALLOC;
4269 if (!ext4_mb_use_preallocated(&ac)) { 4305 if (!ext4_mb_use_preallocated(ac)) {
4270 4306
4271 ac.ac_op = EXT4_MB_HISTORY_ALLOC; 4307 ac->ac_op = EXT4_MB_HISTORY_ALLOC;
4272 ext4_mb_normalize_request(&ac, ar); 4308 ext4_mb_normalize_request(ac, ar);
4273 4309
4274repeat: 4310repeat:
4275 /* allocate space in core */ 4311 /* allocate space in core */
4276 ext4_mb_regular_allocator(&ac); 4312 ext4_mb_regular_allocator(ac);
4277 4313
4278 /* as we've just preallocated more space than 4314 /* as we've just preallocated more space than
4279 * user requested orinally, we store allocated 4315 * user requested orinally, we store allocated
4280 * space in a special descriptor */ 4316 * space in a special descriptor */
4281 if (ac.ac_status == AC_STATUS_FOUND && 4317 if (ac->ac_status == AC_STATUS_FOUND &&
4282 ac.ac_o_ex.fe_len < ac.ac_b_ex.fe_len) 4318 ac->ac_o_ex.fe_len < ac->ac_b_ex.fe_len)
4283 ext4_mb_new_preallocation(&ac); 4319 ext4_mb_new_preallocation(ac);
4284 } 4320 }
4285 4321
4286 if (likely(ac.ac_status == AC_STATUS_FOUND)) { 4322 if (likely(ac->ac_status == AC_STATUS_FOUND)) {
4287 ext4_mb_mark_diskspace_used(&ac, handle); 4323 ext4_mb_mark_diskspace_used(ac, handle);
4288 *errp = 0; 4324 *errp = 0;
4289 block = ext4_grp_offs_to_block(sb, &ac.ac_b_ex); 4325 block = ext4_grp_offs_to_block(sb, &ac->ac_b_ex);
4290 ar->len = ac.ac_b_ex.fe_len; 4326 ar->len = ac->ac_b_ex.fe_len;
4291 } else { 4327 } else {
4292 freed = ext4_mb_discard_preallocations(sb, ac.ac_o_ex.fe_len); 4328 freed = ext4_mb_discard_preallocations(sb, ac->ac_o_ex.fe_len);
4293 if (freed) 4329 if (freed)
4294 goto repeat; 4330 goto repeat;
4295 *errp = -ENOSPC; 4331 *errp = -ENOSPC;
4296 ac.ac_b_ex.fe_len = 0; 4332 ac->ac_b_ex.fe_len = 0;
4297 ar->len = 0; 4333 ar->len = 0;
4298 ext4_mb_show_ac(&ac); 4334 ext4_mb_show_ac(ac);
4299 } 4335 }
4300 4336
4301 ext4_mb_release_context(&ac); 4337 ext4_mb_release_context(ac);
4302 4338
4303out: 4339out:
4304 if (ar->len < inquota) 4340 if (ar->len < inquota)
4305 DQUOT_FREE_BLOCK(ar->inode, inquota - ar->len); 4341 DQUOT_FREE_BLOCK(ar->inode, inquota - ar->len);
4306 4342
4343 kmem_cache_free(ext4_ac_cachep, ac);
4307 return block; 4344 return block;
4308} 4345}
4309static void ext4_mb_poll_new_transaction(struct super_block *sb, 4346static void ext4_mb_poll_new_transaction(struct super_block *sb,
@@ -4405,9 +4442,9 @@ void ext4_mb_free_blocks(handle_t *handle, struct inode *inode,
4405 unsigned long block, unsigned long count, 4442 unsigned long block, unsigned long count,
4406 int metadata, unsigned long *freed) 4443 int metadata, unsigned long *freed)
4407{ 4444{
4408 struct buffer_head *bitmap_bh = 0; 4445 struct buffer_head *bitmap_bh = NULL;
4409 struct super_block *sb = inode->i_sb; 4446 struct super_block *sb = inode->i_sb;
4410 struct ext4_allocation_context ac; 4447 struct ext4_allocation_context *ac = NULL;
4411 struct ext4_group_desc *gdp; 4448 struct ext4_group_desc *gdp;
4412 struct ext4_super_block *es; 4449 struct ext4_super_block *es;
4413 unsigned long overflow; 4450 unsigned long overflow;
@@ -4436,9 +4473,12 @@ void ext4_mb_free_blocks(handle_t *handle, struct inode *inode,
4436 4473
4437 ext4_debug("freeing block %lu\n", block); 4474 ext4_debug("freeing block %lu\n", block);
4438 4475
4439 ac.ac_op = EXT4_MB_HISTORY_FREE; 4476 ac = kmem_cache_alloc(ext4_ac_cachep, GFP_NOFS);
4440 ac.ac_inode = inode; 4477 if (ac) {
4441 ac.ac_sb = sb; 4478 ac->ac_op = EXT4_MB_HISTORY_FREE;
4479 ac->ac_inode = inode;
4480 ac->ac_sb = sb;
4481 }
4442 4482
4443do_more: 4483do_more:
4444 overflow = 0; 4484 overflow = 0;
@@ -4504,10 +4544,12 @@ do_more:
4504 BUFFER_TRACE(bitmap_bh, "dirtied bitmap block"); 4544 BUFFER_TRACE(bitmap_bh, "dirtied bitmap block");
4505 err = ext4_journal_dirty_metadata(handle, bitmap_bh); 4545 err = ext4_journal_dirty_metadata(handle, bitmap_bh);
4506 4546
4507 ac.ac_b_ex.fe_group = block_group; 4547 if (ac) {
4508 ac.ac_b_ex.fe_start = bit; 4548 ac->ac_b_ex.fe_group = block_group;
4509 ac.ac_b_ex.fe_len = count; 4549 ac->ac_b_ex.fe_start = bit;
4510 ext4_mb_store_history(&ac); 4550 ac->ac_b_ex.fe_len = count;
4551 ext4_mb_store_history(ac);
4552 }
4511 4553
4512 if (metadata) { 4554 if (metadata) {
4513 /* blocks being freed are metadata. these blocks shouldn't 4555 /* blocks being freed are metadata. these blocks shouldn't
@@ -4548,5 +4590,7 @@ do_more:
4548error_return: 4590error_return:
4549 brelse(bitmap_bh); 4591 brelse(bitmap_bh);
4550 ext4_std_error(sb, err); 4592 ext4_std_error(sb, err);
4593 if (ac)
4594 kmem_cache_free(ext4_ac_cachep, ac);
4551 return; 4595 return;
4552} 4596}
diff --git a/fs/ext4/migrate.c b/fs/ext4/migrate.c
index 3ebc2332f52e..8c6c685b9d22 100644
--- a/fs/ext4/migrate.c
+++ b/fs/ext4/migrate.c
@@ -61,10 +61,9 @@ static int finish_range(handle_t *handle, struct inode *inode,
61 retval = ext4_journal_restart(handle, needed); 61 retval = ext4_journal_restart(handle, needed);
62 if (retval) 62 if (retval)
63 goto err_out; 63 goto err_out;
64 } 64 } else if (needed) {
65 if (needed) {
66 retval = ext4_journal_extend(handle, needed); 65 retval = ext4_journal_extend(handle, needed);
67 if (retval != 0) { 66 if (retval) {
68 /* 67 /*
69 * IF not able to extend the journal restart the journal 68 * IF not able to extend the journal restart the journal
70 */ 69 */
@@ -220,6 +219,26 @@ static int update_tind_extent_range(handle_t *handle, struct inode *inode,
220 219
221} 220}
222 221
222static int extend_credit_for_blkdel(handle_t *handle, struct inode *inode)
223{
224 int retval = 0, needed;
225
226 if (handle->h_buffer_credits > EXT4_RESERVE_TRANS_BLOCKS)
227 return 0;
228 /*
229 * We are freeing a blocks. During this we touch
230 * superblock, group descriptor and block bitmap.
231 * So allocate a credit of 3. We may update
232 * quota (user and group).
233 */
234 needed = 3 + 2*EXT4_QUOTA_TRANS_BLOCKS(inode->i_sb);
235
236 if (ext4_journal_extend(handle, needed) != 0)
237 retval = ext4_journal_restart(handle, needed);
238
239 return retval;
240}
241
223static int free_dind_blocks(handle_t *handle, 242static int free_dind_blocks(handle_t *handle,
224 struct inode *inode, __le32 i_data) 243 struct inode *inode, __le32 i_data)
225{ 244{
@@ -234,11 +253,14 @@ static int free_dind_blocks(handle_t *handle,
234 253
235 tmp_idata = (__le32 *)bh->b_data; 254 tmp_idata = (__le32 *)bh->b_data;
236 for (i = 0; i < max_entries; i++) { 255 for (i = 0; i < max_entries; i++) {
237 if (tmp_idata[i]) 256 if (tmp_idata[i]) {
257 extend_credit_for_blkdel(handle, inode);
238 ext4_free_blocks(handle, inode, 258 ext4_free_blocks(handle, inode,
239 le32_to_cpu(tmp_idata[i]), 1, 1); 259 le32_to_cpu(tmp_idata[i]), 1, 1);
260 }
240 } 261 }
241 put_bh(bh); 262 put_bh(bh);
263 extend_credit_for_blkdel(handle, inode);
242 ext4_free_blocks(handle, inode, le32_to_cpu(i_data), 1, 1); 264 ext4_free_blocks(handle, inode, le32_to_cpu(i_data), 1, 1);
243 return 0; 265 return 0;
244} 266}
@@ -267,29 +289,32 @@ static int free_tind_blocks(handle_t *handle,
267 } 289 }
268 } 290 }
269 put_bh(bh); 291 put_bh(bh);
292 extend_credit_for_blkdel(handle, inode);
270 ext4_free_blocks(handle, inode, le32_to_cpu(i_data), 1, 1); 293 ext4_free_blocks(handle, inode, le32_to_cpu(i_data), 1, 1);
271 return 0; 294 return 0;
272} 295}
273 296
274static int free_ind_block(handle_t *handle, struct inode *inode) 297static int free_ind_block(handle_t *handle, struct inode *inode, __le32 *i_data)
275{ 298{
276 int retval; 299 int retval;
277 struct ext4_inode_info *ei = EXT4_I(inode);
278 300
279 if (ei->i_data[EXT4_IND_BLOCK]) 301 /* ei->i_data[EXT4_IND_BLOCK] */
302 if (i_data[0]) {
303 extend_credit_for_blkdel(handle, inode);
280 ext4_free_blocks(handle, inode, 304 ext4_free_blocks(handle, inode,
281 le32_to_cpu(ei->i_data[EXT4_IND_BLOCK]), 1, 1); 305 le32_to_cpu(i_data[0]), 1, 1);
306 }
282 307
283 if (ei->i_data[EXT4_DIND_BLOCK]) { 308 /* ei->i_data[EXT4_DIND_BLOCK] */
284 retval = free_dind_blocks(handle, inode, 309 if (i_data[1]) {
285 ei->i_data[EXT4_DIND_BLOCK]); 310 retval = free_dind_blocks(handle, inode, i_data[1]);
286 if (retval) 311 if (retval)
287 return retval; 312 return retval;
288 } 313 }
289 314
290 if (ei->i_data[EXT4_TIND_BLOCK]) { 315 /* ei->i_data[EXT4_TIND_BLOCK] */
291 retval = free_tind_blocks(handle, inode, 316 if (i_data[2]) {
292 ei->i_data[EXT4_TIND_BLOCK]); 317 retval = free_tind_blocks(handle, inode, i_data[2]);
293 if (retval) 318 if (retval)
294 return retval; 319 return retval;
295 } 320 }
@@ -297,15 +322,13 @@ static int free_ind_block(handle_t *handle, struct inode *inode)
297} 322}
298 323
299static int ext4_ext_swap_inode_data(handle_t *handle, struct inode *inode, 324static int ext4_ext_swap_inode_data(handle_t *handle, struct inode *inode,
300 struct inode *tmp_inode, int retval) 325 struct inode *tmp_inode)
301{ 326{
327 int retval;
328 __le32 i_data[3];
302 struct ext4_inode_info *ei = EXT4_I(inode); 329 struct ext4_inode_info *ei = EXT4_I(inode);
303 struct ext4_inode_info *tmp_ei = EXT4_I(tmp_inode); 330 struct ext4_inode_info *tmp_ei = EXT4_I(tmp_inode);
304 331
305 retval = free_ind_block(handle, inode);
306 if (retval)
307 goto err_out;
308
309 /* 332 /*
310 * One credit accounted for writing the 333 * One credit accounted for writing the
311 * i_data field of the original inode 334 * i_data field of the original inode
@@ -317,6 +340,11 @@ static int ext4_ext_swap_inode_data(handle_t *handle, struct inode *inode,
317 goto err_out; 340 goto err_out;
318 } 341 }
319 342
343 i_data[0] = ei->i_data[EXT4_IND_BLOCK];
344 i_data[1] = ei->i_data[EXT4_DIND_BLOCK];
345 i_data[2] = ei->i_data[EXT4_TIND_BLOCK];
346
347 down_write(&EXT4_I(inode)->i_data_sem);
320 /* 348 /*
321 * We have the extent map build with the tmp inode. 349 * We have the extent map build with the tmp inode.
322 * Now copy the i_data across 350 * Now copy the i_data across
@@ -336,8 +364,15 @@ static int ext4_ext_swap_inode_data(handle_t *handle, struct inode *inode,
336 spin_lock(&inode->i_lock); 364 spin_lock(&inode->i_lock);
337 inode->i_blocks += tmp_inode->i_blocks; 365 inode->i_blocks += tmp_inode->i_blocks;
338 spin_unlock(&inode->i_lock); 366 spin_unlock(&inode->i_lock);
367 up_write(&EXT4_I(inode)->i_data_sem);
339 368
369 /*
370 * We mark the inode dirty after, because we decrement the
371 * i_blocks when freeing the indirect meta-data blocks
372 */
373 retval = free_ind_block(handle, inode, i_data);
340 ext4_mark_inode_dirty(handle, inode); 374 ext4_mark_inode_dirty(handle, inode);
375
341err_out: 376err_out:
342 return retval; 377 return retval;
343} 378}
@@ -365,6 +400,7 @@ static int free_ext_idx(handle_t *handle, struct inode *inode,
365 } 400 }
366 } 401 }
367 put_bh(bh); 402 put_bh(bh);
403 extend_credit_for_blkdel(handle, inode);
368 ext4_free_blocks(handle, inode, block, 1, 1); 404 ext4_free_blocks(handle, inode, block, 1, 1);
369 return retval; 405 return retval;
370} 406}
@@ -414,7 +450,12 @@ int ext4_ext_migrate(struct inode *inode, struct file *filp,
414 if ((EXT4_I(inode)->i_flags & EXT4_EXTENTS_FL)) 450 if ((EXT4_I(inode)->i_flags & EXT4_EXTENTS_FL))
415 return -EINVAL; 451 return -EINVAL;
416 452
417 down_write(&EXT4_I(inode)->i_data_sem); 453 if (S_ISLNK(inode->i_mode) && inode->i_blocks == 0)
454 /*
455 * don't migrate fast symlink
456 */
457 return retval;
458
418 handle = ext4_journal_start(inode, 459 handle = ext4_journal_start(inode,
419 EXT4_DATA_TRANS_BLOCKS(inode->i_sb) + 460 EXT4_DATA_TRANS_BLOCKS(inode->i_sb) +
420 EXT4_INDEX_EXTRA_TRANS_BLOCKS + 3 + 461 EXT4_INDEX_EXTRA_TRANS_BLOCKS + 3 +
@@ -448,13 +489,6 @@ int ext4_ext_migrate(struct inode *inode, struct file *filp,
448 ext4_orphan_add(handle, tmp_inode); 489 ext4_orphan_add(handle, tmp_inode);
449 ext4_journal_stop(handle); 490 ext4_journal_stop(handle);
450 491
451 ei = EXT4_I(inode);
452 i_data = ei->i_data;
453 memset(&lb, 0, sizeof(lb));
454
455 /* 32 bit block address 4 bytes */
456 max_entries = inode->i_sb->s_blocksize >> 2;
457
458 /* 492 /*
459 * start with one credit accounted for 493 * start with one credit accounted for
460 * superblock modification. 494 * superblock modification.
@@ -463,7 +497,20 @@ int ext4_ext_migrate(struct inode *inode, struct file *filp,
463 * trascation that created the inode. Later as and 497 * trascation that created the inode. Later as and
464 * when we add extents we extent the journal 498 * when we add extents we extent the journal
465 */ 499 */
500 /*
501 * inode_mutex prevent write and truncate on the file. Read still goes
502 * through. We take i_data_sem in ext4_ext_swap_inode_data before we
503 * switch the inode format to prevent read.
504 */
505 mutex_lock(&(inode->i_mutex));
466 handle = ext4_journal_start(inode, 1); 506 handle = ext4_journal_start(inode, 1);
507
508 ei = EXT4_I(inode);
509 i_data = ei->i_data;
510 memset(&lb, 0, sizeof(lb));
511
512 /* 32 bit block address 4 bytes */
513 max_entries = inode->i_sb->s_blocksize >> 2;
467 for (i = 0; i < EXT4_NDIR_BLOCKS; i++, blk_count++) { 514 for (i = 0; i < EXT4_NDIR_BLOCKS; i++, blk_count++) {
468 if (i_data[i]) { 515 if (i_data[i]) {
469 retval = update_extent_range(handle, tmp_inode, 516 retval = update_extent_range(handle, tmp_inode,
@@ -501,19 +548,6 @@ int ext4_ext_migrate(struct inode *inode, struct file *filp,
501 */ 548 */
502 retval = finish_range(handle, tmp_inode, &lb); 549 retval = finish_range(handle, tmp_inode, &lb);
503err_out: 550err_out:
504 /*
505 * We are either freeing extent information or indirect
506 * blocks. During this we touch superblock, group descriptor
507 * and block bitmap. Later we mark the tmp_inode dirty
508 * via ext4_ext_tree_init. So allocate a credit of 4
509 * We may update quota (user and group).
510 *
511 * FIXME!! we may be touching bitmaps in different block groups.
512 */
513 if (ext4_journal_extend(handle,
514 4 + 2*EXT4_QUOTA_TRANS_BLOCKS(inode->i_sb)) != 0)
515 ext4_journal_restart(handle,
516 4 + 2*EXT4_QUOTA_TRANS_BLOCKS(inode->i_sb));
517 if (retval) 551 if (retval)
518 /* 552 /*
519 * Failure case delete the extent information with the 553 * Failure case delete the extent information with the
@@ -522,7 +556,11 @@ err_out:
522 free_ext_block(handle, tmp_inode); 556 free_ext_block(handle, tmp_inode);
523 else 557 else
524 retval = ext4_ext_swap_inode_data(handle, inode, 558 retval = ext4_ext_swap_inode_data(handle, inode,
525 tmp_inode, retval); 559 tmp_inode);
560
561 /* We mark the tmp_inode dirty via ext4_ext_tree_init. */
562 if (ext4_journal_extend(handle, 1) != 0)
563 ext4_journal_restart(handle, 1);
526 564
527 /* 565 /*
528 * Mark the tmp_inode as of size zero 566 * Mark the tmp_inode as of size zero
@@ -550,8 +588,7 @@ err_out:
550 tmp_inode->i_nlink = 0; 588 tmp_inode->i_nlink = 0;
551 589
552 ext4_journal_stop(handle); 590 ext4_journal_stop(handle);
553 591 mutex_unlock(&(inode->i_mutex));
554 up_write(&EXT4_I(inode)->i_data_sem);
555 592
556 if (tmp_inode) 593 if (tmp_inode)
557 iput(tmp_inode); 594 iput(tmp_inode);
diff --git a/fs/ext4/namei.c b/fs/ext4/namei.c
index 67b6d8a1ceff..a9347fb43bcc 100644
--- a/fs/ext4/namei.c
+++ b/fs/ext4/namei.c
@@ -1039,17 +1039,11 @@ static struct dentry *ext4_lookup(struct inode * dir, struct dentry *dentry, str
1039 if (!ext4_valid_inum(dir->i_sb, ino)) { 1039 if (!ext4_valid_inum(dir->i_sb, ino)) {
1040 ext4_error(dir->i_sb, "ext4_lookup", 1040 ext4_error(dir->i_sb, "ext4_lookup",
1041 "bad inode number: %lu", ino); 1041 "bad inode number: %lu", ino);
1042 inode = NULL; 1042 return ERR_PTR(-EIO);
1043 } else
1044 inode = iget(dir->i_sb, ino);
1045
1046 if (!inode)
1047 return ERR_PTR(-EACCES);
1048
1049 if (is_bad_inode(inode)) {
1050 iput(inode);
1051 return ERR_PTR(-ENOENT);
1052 } 1043 }
1044 inode = ext4_iget(dir->i_sb, ino);
1045 if (IS_ERR(inode))
1046 return ERR_CAST(inode);
1053 } 1047 }
1054 return d_splice_alias(inode, dentry); 1048 return d_splice_alias(inode, dentry);
1055} 1049}
@@ -1078,18 +1072,13 @@ struct dentry *ext4_get_parent(struct dentry *child)
1078 if (!ext4_valid_inum(child->d_inode->i_sb, ino)) { 1072 if (!ext4_valid_inum(child->d_inode->i_sb, ino)) {
1079 ext4_error(child->d_inode->i_sb, "ext4_get_parent", 1073 ext4_error(child->d_inode->i_sb, "ext4_get_parent",
1080 "bad inode number: %lu", ino); 1074 "bad inode number: %lu", ino);
1081 inode = NULL; 1075 return ERR_PTR(-EIO);
1082 } else
1083 inode = iget(child->d_inode->i_sb, ino);
1084
1085 if (!inode)
1086 return ERR_PTR(-EACCES);
1087
1088 if (is_bad_inode(inode)) {
1089 iput(inode);
1090 return ERR_PTR(-ENOENT);
1091 } 1076 }
1092 1077
1078 inode = ext4_iget(child->d_inode->i_sb, ino);
1079 if (IS_ERR(inode))
1080 return ERR_CAST(inode);
1081
1093 parent = d_alloc_anon(inode); 1082 parent = d_alloc_anon(inode);
1094 if (!parent) { 1083 if (!parent) {
1095 iput(inode); 1084 iput(inode);
@@ -2234,6 +2223,7 @@ retry:
2234 inode->i_op = &ext4_fast_symlink_inode_operations; 2223 inode->i_op = &ext4_fast_symlink_inode_operations;
2235 memcpy((char*)&EXT4_I(inode)->i_data,symname,l); 2224 memcpy((char*)&EXT4_I(inode)->i_data,symname,l);
2236 inode->i_size = l-1; 2225 inode->i_size = l-1;
2226 EXT4_I(inode)->i_flags &= ~EXT4_EXTENTS_FL;
2237 } 2227 }
2238 EXT4_I(inode)->i_disksize = inode->i_size; 2228 EXT4_I(inode)->i_disksize = inode->i_size;
2239 err = ext4_add_nondir(handle, dentry, inode); 2229 err = ext4_add_nondir(handle, dentry, inode);
diff --git a/fs/ext4/resize.c b/fs/ext4/resize.c
index 4fbba60816f4..9477a2bd6ff2 100644
--- a/fs/ext4/resize.c
+++ b/fs/ext4/resize.c
@@ -779,12 +779,11 @@ int ext4_group_add(struct super_block *sb, struct ext4_new_group_data *input)
779 "No reserved GDT blocks, can't resize"); 779 "No reserved GDT blocks, can't resize");
780 return -EPERM; 780 return -EPERM;
781 } 781 }
782 inode = iget(sb, EXT4_RESIZE_INO); 782 inode = ext4_iget(sb, EXT4_RESIZE_INO);
783 if (!inode || is_bad_inode(inode)) { 783 if (IS_ERR(inode)) {
784 ext4_warning(sb, __FUNCTION__, 784 ext4_warning(sb, __FUNCTION__,
785 "Error opening resize inode"); 785 "Error opening resize inode");
786 iput(inode); 786 return PTR_ERR(inode);
787 return -ENOENT;
788 } 787 }
789 } 788 }
790 789
diff --git a/fs/ext4/super.c b/fs/ext4/super.c
index 055a0cd0168e..13383ba18f1d 100644
--- a/fs/ext4/super.c
+++ b/fs/ext4/super.c
@@ -777,11 +777,10 @@ static struct inode *ext4_nfs_get_inode(struct super_block *sb,
777 * Currently we don't know the generation for parent directory, so 777 * Currently we don't know the generation for parent directory, so
778 * a generation of 0 means "accept any" 778 * a generation of 0 means "accept any"
779 */ 779 */
780 inode = iget(sb, ino); 780 inode = ext4_iget(sb, ino);
781 if (inode == NULL) 781 if (IS_ERR(inode))
782 return ERR_PTR(-ENOMEM); 782 return ERR_CAST(inode);
783 if (is_bad_inode(inode) || 783 if (generation && inode->i_generation != generation) {
784 (generation && inode->i_generation != generation)) {
785 iput(inode); 784 iput(inode);
786 return ERR_PTR(-ESTALE); 785 return ERR_PTR(-ESTALE);
787 } 786 }
@@ -850,7 +849,6 @@ static struct quotactl_ops ext4_qctl_operations = {
850static const struct super_operations ext4_sops = { 849static const struct super_operations ext4_sops = {
851 .alloc_inode = ext4_alloc_inode, 850 .alloc_inode = ext4_alloc_inode,
852 .destroy_inode = ext4_destroy_inode, 851 .destroy_inode = ext4_destroy_inode,
853 .read_inode = ext4_read_inode,
854 .write_inode = ext4_write_inode, 852 .write_inode = ext4_write_inode,
855 .dirty_inode = ext4_dirty_inode, 853 .dirty_inode = ext4_dirty_inode,
856 .delete_inode = ext4_delete_inode, 854 .delete_inode = ext4_delete_inode,
@@ -1458,7 +1456,7 @@ int ext4_group_desc_csum_verify(struct ext4_sb_info *sbi, __u32 block_group,
1458} 1456}
1459 1457
1460/* Called at mount-time, super-block is locked */ 1458/* Called at mount-time, super-block is locked */
1461static int ext4_check_descriptors (struct super_block * sb) 1459static int ext4_check_descriptors(struct super_block *sb)
1462{ 1460{
1463 struct ext4_sb_info *sbi = EXT4_SB(sb); 1461 struct ext4_sb_info *sbi = EXT4_SB(sb);
1464 ext4_fsblk_t first_block = le32_to_cpu(sbi->s_es->s_first_data_block); 1462 ext4_fsblk_t first_block = le32_to_cpu(sbi->s_es->s_first_data_block);
@@ -1466,8 +1464,6 @@ static int ext4_check_descriptors (struct super_block * sb)
1466 ext4_fsblk_t block_bitmap; 1464 ext4_fsblk_t block_bitmap;
1467 ext4_fsblk_t inode_bitmap; 1465 ext4_fsblk_t inode_bitmap;
1468 ext4_fsblk_t inode_table; 1466 ext4_fsblk_t inode_table;
1469 struct ext4_group_desc * gdp = NULL;
1470 int desc_block = 0;
1471 int flexbg_flag = 0; 1467 int flexbg_flag = 0;
1472 ext4_group_t i; 1468 ext4_group_t i;
1473 1469
@@ -1476,17 +1472,15 @@ static int ext4_check_descriptors (struct super_block * sb)
1476 1472
1477 ext4_debug ("Checking group descriptors"); 1473 ext4_debug ("Checking group descriptors");
1478 1474
1479 for (i = 0; i < sbi->s_groups_count; i++) 1475 for (i = 0; i < sbi->s_groups_count; i++) {
1480 { 1476 struct ext4_group_desc *gdp = ext4_get_group_desc(sb, i, NULL);
1477
1481 if (i == sbi->s_groups_count - 1 || flexbg_flag) 1478 if (i == sbi->s_groups_count - 1 || flexbg_flag)
1482 last_block = ext4_blocks_count(sbi->s_es) - 1; 1479 last_block = ext4_blocks_count(sbi->s_es) - 1;
1483 else 1480 else
1484 last_block = first_block + 1481 last_block = first_block +
1485 (EXT4_BLOCKS_PER_GROUP(sb) - 1); 1482 (EXT4_BLOCKS_PER_GROUP(sb) - 1);
1486 1483
1487 if ((i % EXT4_DESC_PER_BLOCK(sb)) == 0)
1488 gdp = (struct ext4_group_desc *)
1489 sbi->s_group_desc[desc_block++]->b_data;
1490 block_bitmap = ext4_block_bitmap(sb, gdp); 1484 block_bitmap = ext4_block_bitmap(sb, gdp);
1491 if (block_bitmap < first_block || block_bitmap > last_block) 1485 if (block_bitmap < first_block || block_bitmap > last_block)
1492 { 1486 {
@@ -1524,8 +1518,6 @@ static int ext4_check_descriptors (struct super_block * sb)
1524 } 1518 }
1525 if (!flexbg_flag) 1519 if (!flexbg_flag)
1526 first_block += EXT4_BLOCKS_PER_GROUP(sb); 1520 first_block += EXT4_BLOCKS_PER_GROUP(sb);
1527 gdp = (struct ext4_group_desc *)
1528 ((__u8 *)gdp + EXT4_DESC_SIZE(sb));
1529 } 1521 }
1530 1522
1531 ext4_free_blocks_count_set(sbi->s_es, ext4_count_free_blocks(sb)); 1523 ext4_free_blocks_count_set(sbi->s_es, ext4_count_free_blocks(sb));
@@ -1811,6 +1803,7 @@ static int ext4_fill_super (struct super_block *sb, void *data, int silent)
1811 unsigned long journal_devnum = 0; 1803 unsigned long journal_devnum = 0;
1812 unsigned long def_mount_opts; 1804 unsigned long def_mount_opts;
1813 struct inode *root; 1805 struct inode *root;
1806 int ret = -EINVAL;
1814 int blocksize; 1807 int blocksize;
1815 int db_count; 1808 int db_count;
1816 int i; 1809 int i;
@@ -1926,6 +1919,17 @@ static int ext4_fill_super (struct super_block *sb, void *data, int silent)
1926 printk(KERN_WARNING 1919 printk(KERN_WARNING
1927 "EXT4-fs warning: feature flags set on rev 0 fs, " 1920 "EXT4-fs warning: feature flags set on rev 0 fs, "
1928 "running e2fsck is recommended\n"); 1921 "running e2fsck is recommended\n");
1922
1923 /*
1924 * Since ext4 is still considered development code, we require
1925 * that the TEST_FILESYS flag in s->flags be set.
1926 */
1927 if (!(le32_to_cpu(es->s_flags) & EXT2_FLAGS_TEST_FILESYS)) {
1928 printk(KERN_WARNING "EXT4-fs: %s: not marked "
1929 "OK to use with test code.\n", sb->s_id);
1930 goto failed_mount;
1931 }
1932
1929 /* 1933 /*
1930 * Check feature flags regardless of the revision level, since we 1934 * Check feature flags regardless of the revision level, since we
1931 * previously didn't change the revision level when setting the flags, 1935 * previously didn't change the revision level when setting the flags,
@@ -2243,19 +2247,24 @@ static int ext4_fill_super (struct super_block *sb, void *data, int silent)
2243 * so we can safely mount the rest of the filesystem now. 2247 * so we can safely mount the rest of the filesystem now.
2244 */ 2248 */
2245 2249
2246 root = iget(sb, EXT4_ROOT_INO); 2250 root = ext4_iget(sb, EXT4_ROOT_INO);
2247 sb->s_root = d_alloc_root(root); 2251 if (IS_ERR(root)) {
2248 if (!sb->s_root) {
2249 printk(KERN_ERR "EXT4-fs: get root inode failed\n"); 2252 printk(KERN_ERR "EXT4-fs: get root inode failed\n");
2250 iput(root); 2253 ret = PTR_ERR(root);
2251 goto failed_mount4; 2254 goto failed_mount4;
2252 } 2255 }
2253 if (!S_ISDIR(root->i_mode) || !root->i_blocks || !root->i_size) { 2256 if (!S_ISDIR(root->i_mode) || !root->i_blocks || !root->i_size) {
2254 dput(sb->s_root); 2257 iput(root);
2255 sb->s_root = NULL;
2256 printk(KERN_ERR "EXT4-fs: corrupt root inode, run e2fsck\n"); 2258 printk(KERN_ERR "EXT4-fs: corrupt root inode, run e2fsck\n");
2257 goto failed_mount4; 2259 goto failed_mount4;
2258 } 2260 }
2261 sb->s_root = d_alloc_root(root);
2262 if (!sb->s_root) {
2263 printk(KERN_ERR "EXT4-fs: get root dentry failed\n");
2264 iput(root);
2265 ret = -ENOMEM;
2266 goto failed_mount4;
2267 }
2259 2268
2260 ext4_setup_super (sb, es, sb->s_flags & MS_RDONLY); 2269 ext4_setup_super (sb, es, sb->s_flags & MS_RDONLY);
2261 2270
@@ -2336,7 +2345,7 @@ out_fail:
2336 sb->s_fs_info = NULL; 2345 sb->s_fs_info = NULL;
2337 kfree(sbi); 2346 kfree(sbi);
2338 lock_kernel(); 2347 lock_kernel();
2339 return -EINVAL; 2348 return ret;
2340} 2349}
2341 2350
2342/* 2351/*
@@ -2372,8 +2381,8 @@ static journal_t *ext4_get_journal(struct super_block *sb,
2372 * things happen if we iget() an unused inode, as the subsequent 2381 * things happen if we iget() an unused inode, as the subsequent
2373 * iput() will try to delete it. */ 2382 * iput() will try to delete it. */
2374 2383
2375 journal_inode = iget(sb, journal_inum); 2384 journal_inode = ext4_iget(sb, journal_inum);
2376 if (!journal_inode) { 2385 if (IS_ERR(journal_inode)) {
2377 printk(KERN_ERR "EXT4-fs: no journal found.\n"); 2386 printk(KERN_ERR "EXT4-fs: no journal found.\n");
2378 return NULL; 2387 return NULL;
2379 } 2388 }
@@ -2386,7 +2395,7 @@ static journal_t *ext4_get_journal(struct super_block *sb,
2386 2395
2387 jbd_debug(2, "Journal inode found at %p: %Ld bytes\n", 2396 jbd_debug(2, "Journal inode found at %p: %Ld bytes\n",
2388 journal_inode, journal_inode->i_size); 2397 journal_inode, journal_inode->i_size);
2389 if (is_bad_inode(journal_inode) || !S_ISREG(journal_inode->i_mode)) { 2398 if (!S_ISREG(journal_inode->i_mode)) {
2390 printk(KERN_ERR "EXT4-fs: invalid journal inode.\n"); 2399 printk(KERN_ERR "EXT4-fs: invalid journal inode.\n");
2391 iput(journal_inode); 2400 iput(journal_inode);
2392 return NULL; 2401 return NULL;
@@ -3149,16 +3158,16 @@ static int ext4_quota_on(struct super_block *sb, int type, int format_id,
3149 if (err) 3158 if (err)
3150 return err; 3159 return err;
3151 /* Quotafile not on the same filesystem? */ 3160 /* Quotafile not on the same filesystem? */
3152 if (nd.mnt->mnt_sb != sb) { 3161 if (nd.path.mnt->mnt_sb != sb) {
3153 path_release(&nd); 3162 path_put(&nd.path);
3154 return -EXDEV; 3163 return -EXDEV;
3155 } 3164 }
3156 /* Quotafile not of fs root? */ 3165 /* Quotafile not of fs root? */
3157 if (nd.dentry->d_parent->d_inode != sb->s_root->d_inode) 3166 if (nd.path.dentry->d_parent->d_inode != sb->s_root->d_inode)
3158 printk(KERN_WARNING 3167 printk(KERN_WARNING
3159 "EXT4-fs: Quota file not on filesystem root. " 3168 "EXT4-fs: Quota file not on filesystem root. "
3160 "Journalled quota will not work.\n"); 3169 "Journalled quota will not work.\n");
3161 path_release(&nd); 3170 path_put(&nd.path);
3162 return vfs_quota_on(sb, type, format_id, path); 3171 return vfs_quota_on(sb, type, format_id, path);
3163} 3172}
3164 3173
diff --git a/fs/fat/file.c b/fs/fat/file.c
index 69a83b59dce8..c614175876e0 100644
--- a/fs/fat/file.c
+++ b/fs/fat/file.c
@@ -155,6 +155,42 @@ out:
155 return err; 155 return err;
156} 156}
157 157
158static int check_mode(const struct msdos_sb_info *sbi, mode_t mode)
159{
160 mode_t req = mode & ~S_IFMT;
161
162 /*
163 * Of the r and x bits, all (subject to umask) must be present. Of the
164 * w bits, either all (subject to umask) or none must be present.
165 */
166
167 if (S_ISREG(mode)) {
168 req &= ~sbi->options.fs_fmask;
169
170 if ((req & (S_IRUGO | S_IXUGO)) !=
171 ((S_IRUGO | S_IXUGO) & ~sbi->options.fs_fmask))
172 return -EPERM;
173
174 if ((req & S_IWUGO) != 0 &&
175 (req & S_IWUGO) != (S_IWUGO & ~sbi->options.fs_fmask))
176 return -EPERM;
177 } else if (S_ISDIR(mode)) {
178 req &= ~sbi->options.fs_dmask;
179
180 if ((req & (S_IRUGO | S_IXUGO)) !=
181 ((S_IRUGO | S_IXUGO) & ~sbi->options.fs_dmask))
182 return -EPERM;
183
184 if ((req & S_IWUGO) != 0 &&
185 (req & S_IWUGO) != (S_IWUGO & ~sbi->options.fs_dmask))
186 return -EPERM;
187 } else {
188 return -EPERM;
189 }
190
191 return 0;
192}
193
158int fat_notify_change(struct dentry *dentry, struct iattr *attr) 194int fat_notify_change(struct dentry *dentry, struct iattr *attr)
159{ 195{
160 struct msdos_sb_info *sbi = MSDOS_SB(dentry->d_sb); 196 struct msdos_sb_info *sbi = MSDOS_SB(dentry->d_sb);
@@ -186,9 +222,7 @@ int fat_notify_change(struct dentry *dentry, struct iattr *attr)
186 if (((attr->ia_valid & ATTR_UID) && 222 if (((attr->ia_valid & ATTR_UID) &&
187 (attr->ia_uid != sbi->options.fs_uid)) || 223 (attr->ia_uid != sbi->options.fs_uid)) ||
188 ((attr->ia_valid & ATTR_GID) && 224 ((attr->ia_valid & ATTR_GID) &&
189 (attr->ia_gid != sbi->options.fs_gid)) || 225 (attr->ia_gid != sbi->options.fs_gid)))
190 ((attr->ia_valid & ATTR_MODE) &&
191 (attr->ia_mode & ~MSDOS_VALID_MODE)))
192 error = -EPERM; 226 error = -EPERM;
193 227
194 if (error) { 228 if (error) {
@@ -196,6 +230,13 @@ int fat_notify_change(struct dentry *dentry, struct iattr *attr)
196 error = 0; 230 error = 0;
197 goto out; 231 goto out;
198 } 232 }
233
234 if (attr->ia_valid & ATTR_MODE) {
235 error = check_mode(sbi, attr->ia_mode);
236 if (error != 0 && !sbi->options.quiet)
237 goto out;
238 }
239
199 error = inode_setattr(inode, attr); 240 error = inode_setattr(inode, attr);
200 if (error) 241 if (error)
201 goto out; 242 goto out;
diff --git a/fs/fat/inode.c b/fs/fat/inode.c
index 920a576e1c25..53f3cf62b7c1 100644
--- a/fs/fat/inode.c
+++ b/fs/fat/inode.c
@@ -634,8 +634,6 @@ static const struct super_operations fat_sops = {
634 .clear_inode = fat_clear_inode, 634 .clear_inode = fat_clear_inode,
635 .remount_fs = fat_remount, 635 .remount_fs = fat_remount,
636 636
637 .read_inode = make_bad_inode,
638
639 .show_options = fat_show_options, 637 .show_options = fat_show_options,
640}; 638};
641 639
@@ -663,8 +661,8 @@ static struct dentry *fat_fh_to_dentry(struct super_block *sb,
663 if (fh_len < 5 || fh_type != 3) 661 if (fh_len < 5 || fh_type != 3)
664 return NULL; 662 return NULL;
665 663
666 inode = iget(sb, fh[0]); 664 inode = ilookup(sb, fh[0]);
667 if (!inode || is_bad_inode(inode) || inode->i_generation != fh[1]) { 665 if (!inode || inode->i_generation != fh[1]) {
668 if (inode) 666 if (inode)
669 iput(inode); 667 iput(inode);
670 inode = NULL; 668 inode = NULL;
@@ -760,7 +758,7 @@ static struct dentry *fat_get_parent(struct dentry *child)
760 inode = fat_build_inode(child->d_sb, de, i_pos); 758 inode = fat_build_inode(child->d_sb, de, i_pos);
761 brelse(bh); 759 brelse(bh);
762 if (IS_ERR(inode)) { 760 if (IS_ERR(inode)) {
763 parent = ERR_PTR(PTR_ERR(inode)); 761 parent = ERR_CAST(inode);
764 goto out; 762 goto out;
765 } 763 }
766 parent = d_alloc_anon(inode); 764 parent = d_alloc_anon(inode);
@@ -839,6 +837,8 @@ static int fat_show_options(struct seq_file *m, struct vfsmount *mnt)
839 if (!opts->numtail) 837 if (!opts->numtail)
840 seq_puts(m, ",nonumtail"); 838 seq_puts(m, ",nonumtail");
841 } 839 }
840 if (sbi->options.flush)
841 seq_puts(m, ",flush");
842 842
843 return 0; 843 return 0;
844} 844}
@@ -1295,10 +1295,8 @@ int fat_fill_super(struct super_block *sb, void *data, int silent,
1295 1295
1296 fsinfo = (struct fat_boot_fsinfo *)fsinfo_bh->b_data; 1296 fsinfo = (struct fat_boot_fsinfo *)fsinfo_bh->b_data;
1297 if (!IS_FSINFO(fsinfo)) { 1297 if (!IS_FSINFO(fsinfo)) {
1298 printk(KERN_WARNING 1298 printk(KERN_WARNING "FAT: Invalid FSINFO signature: "
1299 "FAT: Did not find valid FSINFO signature.\n" 1299 "0x%08x, 0x%08x (sector = %lu)\n",
1300 " Found signature1 0x%08x signature2 0x%08x"
1301 " (sector = %lu)\n",
1302 le32_to_cpu(fsinfo->signature1), 1300 le32_to_cpu(fsinfo->signature1),
1303 le32_to_cpu(fsinfo->signature2), 1301 le32_to_cpu(fsinfo->signature2),
1304 sbi->fsinfo_sector); 1302 sbi->fsinfo_sector);
diff --git a/fs/fat/misc.c b/fs/fat/misc.c
index 308f2b6b5026..61f23511eacf 100644
--- a/fs/fat/misc.c
+++ b/fs/fat/misc.c
@@ -55,9 +55,8 @@ void fat_clusters_flush(struct super_block *sb)
55 fsinfo = (struct fat_boot_fsinfo *)bh->b_data; 55 fsinfo = (struct fat_boot_fsinfo *)bh->b_data;
56 /* Sanity check */ 56 /* Sanity check */
57 if (!IS_FSINFO(fsinfo)) { 57 if (!IS_FSINFO(fsinfo)) {
58 printk(KERN_ERR "FAT: Did not find valid FSINFO signature.\n" 58 printk(KERN_ERR "FAT: Invalid FSINFO signature: "
59 " Found signature1 0x%08x signature2 0x%08x" 59 "0x%08x, 0x%08x (sector = %lu)\n",
60 " (sector = %lu)\n",
61 le32_to_cpu(fsinfo->signature1), 60 le32_to_cpu(fsinfo->signature1),
62 le32_to_cpu(fsinfo->signature2), 61 le32_to_cpu(fsinfo->signature2),
63 sbi->fsinfo_sector); 62 sbi->fsinfo_sector);
diff --git a/fs/fcntl.c b/fs/fcntl.c
index 8685263ccc4a..e632da761fc1 100644
--- a/fs/fcntl.c
+++ b/fs/fcntl.c
@@ -24,7 +24,7 @@
24#include <asm/siginfo.h> 24#include <asm/siginfo.h>
25#include <asm/uaccess.h> 25#include <asm/uaccess.h>
26 26
27void fastcall set_close_on_exec(unsigned int fd, int flag) 27void set_close_on_exec(unsigned int fd, int flag)
28{ 28{
29 struct files_struct *files = current->files; 29 struct files_struct *files = current->files;
30 struct fdtable *fdt; 30 struct fdtable *fdt;
@@ -309,7 +309,7 @@ pid_t f_getown(struct file *filp)
309{ 309{
310 pid_t pid; 310 pid_t pid;
311 read_lock(&filp->f_owner.lock); 311 read_lock(&filp->f_owner.lock);
312 pid = pid_nr_ns(filp->f_owner.pid, current->nsproxy->pid_ns); 312 pid = pid_vnr(filp->f_owner.pid);
313 if (filp->f_owner.pid_type == PIDTYPE_PGID) 313 if (filp->f_owner.pid_type == PIDTYPE_PGID)
314 pid = -pid; 314 pid = -pid;
315 read_unlock(&filp->f_owner.lock); 315 read_unlock(&filp->f_owner.lock);
diff --git a/fs/file.c b/fs/file.c
index c5575de01113..5110acb1c9ef 100644
--- a/fs/file.c
+++ b/fs/file.c
@@ -24,6 +24,8 @@ struct fdtable_defer {
24 struct fdtable *next; 24 struct fdtable *next;
25}; 25};
26 26
27int sysctl_nr_open __read_mostly = 1024*1024;
28
27/* 29/*
28 * We use this list to defer free fdtables that have vmalloced 30 * We use this list to defer free fdtables that have vmalloced
29 * sets/arrays. By keeping a per-cpu list, we avoid having to embed 31 * sets/arrays. By keeping a per-cpu list, we avoid having to embed
@@ -147,8 +149,8 @@ static struct fdtable * alloc_fdtable(unsigned int nr)
147 nr /= (1024 / sizeof(struct file *)); 149 nr /= (1024 / sizeof(struct file *));
148 nr = roundup_pow_of_two(nr + 1); 150 nr = roundup_pow_of_two(nr + 1);
149 nr *= (1024 / sizeof(struct file *)); 151 nr *= (1024 / sizeof(struct file *));
150 if (nr > NR_OPEN) 152 if (nr > sysctl_nr_open)
151 nr = NR_OPEN; 153 nr = sysctl_nr_open;
152 154
153 fdt = kmalloc(sizeof(struct fdtable), GFP_KERNEL); 155 fdt = kmalloc(sizeof(struct fdtable), GFP_KERNEL);
154 if (!fdt) 156 if (!fdt)
@@ -233,7 +235,7 @@ int expand_files(struct files_struct *files, int nr)
233 if (nr < fdt->max_fds) 235 if (nr < fdt->max_fds)
234 return 0; 236 return 0;
235 /* Can we expand? */ 237 /* Can we expand? */
236 if (nr >= NR_OPEN) 238 if (nr >= sysctl_nr_open)
237 return -EMFILE; 239 return -EMFILE;
238 240
239 /* All good, so we try */ 241 /* All good, so we try */
diff --git a/fs/file_table.c b/fs/file_table.c
index 664e3f2309b8..6d27befe2d48 100644
--- a/fs/file_table.c
+++ b/fs/file_table.c
@@ -197,7 +197,7 @@ int init_file(struct file *file, struct vfsmount *mnt, struct dentry *dentry,
197} 197}
198EXPORT_SYMBOL(init_file); 198EXPORT_SYMBOL(init_file);
199 199
200void fastcall fput(struct file *file) 200void fput(struct file *file)
201{ 201{
202 if (atomic_dec_and_test(&file->f_count)) 202 if (atomic_dec_and_test(&file->f_count))
203 __fput(file); 203 __fput(file);
@@ -208,7 +208,7 @@ EXPORT_SYMBOL(fput);
208/* __fput is called from task context when aio completion releases the last 208/* __fput is called from task context when aio completion releases the last
209 * last use of a struct file *. Do not use otherwise. 209 * last use of a struct file *. Do not use otherwise.
210 */ 210 */
211void fastcall __fput(struct file *file) 211void __fput(struct file *file)
212{ 212{
213 struct dentry *dentry = file->f_path.dentry; 213 struct dentry *dentry = file->f_path.dentry;
214 struct vfsmount *mnt = file->f_path.mnt; 214 struct vfsmount *mnt = file->f_path.mnt;
@@ -241,7 +241,7 @@ void fastcall __fput(struct file *file)
241 mntput(mnt); 241 mntput(mnt);
242} 242}
243 243
244struct file fastcall *fget(unsigned int fd) 244struct file *fget(unsigned int fd)
245{ 245{
246 struct file *file; 246 struct file *file;
247 struct files_struct *files = current->files; 247 struct files_struct *files = current->files;
@@ -269,7 +269,7 @@ EXPORT_SYMBOL(fget);
269 * and a flag is returned to be passed to the corresponding fput_light(). 269 * and a flag is returned to be passed to the corresponding fput_light().
270 * There must not be a cloning between an fget_light/fput_light pair. 270 * There must not be a cloning between an fget_light/fput_light pair.
271 */ 271 */
272struct file fastcall *fget_light(unsigned int fd, int *fput_needed) 272struct file *fget_light(unsigned int fd, int *fput_needed)
273{ 273{
274 struct file *file; 274 struct file *file;
275 struct files_struct *files = current->files; 275 struct files_struct *files = current->files;
diff --git a/fs/freevxfs/vxfs_extern.h b/fs/freevxfs/vxfs_extern.h
index 91ccee8723f7..2b46064f66b2 100644
--- a/fs/freevxfs/vxfs_extern.h
+++ b/fs/freevxfs/vxfs_extern.h
@@ -58,7 +58,7 @@ extern struct inode * vxfs_get_fake_inode(struct super_block *,
58extern void vxfs_put_fake_inode(struct inode *); 58extern void vxfs_put_fake_inode(struct inode *);
59extern struct vxfs_inode_info * vxfs_blkiget(struct super_block *, u_long, ino_t); 59extern struct vxfs_inode_info * vxfs_blkiget(struct super_block *, u_long, ino_t);
60extern struct vxfs_inode_info * vxfs_stiget(struct super_block *, ino_t); 60extern struct vxfs_inode_info * vxfs_stiget(struct super_block *, ino_t);
61extern void vxfs_read_inode(struct inode *); 61extern struct inode * vxfs_iget(struct super_block *, ino_t);
62extern void vxfs_clear_inode(struct inode *); 62extern void vxfs_clear_inode(struct inode *);
63 63
64/* vxfs_lookup.c */ 64/* vxfs_lookup.c */
diff --git a/fs/freevxfs/vxfs_inode.c b/fs/freevxfs/vxfs_inode.c
index d1f7c5b5b3c3..ad88d2364bc2 100644
--- a/fs/freevxfs/vxfs_inode.c
+++ b/fs/freevxfs/vxfs_inode.c
@@ -129,7 +129,7 @@ fail:
129 * Description: 129 * Description:
130 * Search the for inode number @ino in the filesystem 130 * Search the for inode number @ino in the filesystem
131 * described by @sbp. Use the specified inode table (@ilistp). 131 * described by @sbp. Use the specified inode table (@ilistp).
132 * Returns the matching VxFS inode on success, else a NULL pointer. 132 * Returns the matching VxFS inode on success, else an error code.
133 */ 133 */
134static struct vxfs_inode_info * 134static struct vxfs_inode_info *
135__vxfs_iget(ino_t ino, struct inode *ilistp) 135__vxfs_iget(ino_t ino, struct inode *ilistp)
@@ -157,12 +157,12 @@ __vxfs_iget(ino_t ino, struct inode *ilistp)
157 } 157 }
158 158
159 printk(KERN_WARNING "vxfs: error on page %p\n", pp); 159 printk(KERN_WARNING "vxfs: error on page %p\n", pp);
160 return NULL; 160 return ERR_CAST(pp);
161 161
162fail: 162fail:
163 printk(KERN_WARNING "vxfs: unable to read inode %ld\n", (unsigned long)ino); 163 printk(KERN_WARNING "vxfs: unable to read inode %ld\n", (unsigned long)ino);
164 vxfs_put_page(pp); 164 vxfs_put_page(pp);
165 return NULL; 165 return ERR_PTR(-ENOMEM);
166} 166}
167 167
168/** 168/**
@@ -178,7 +178,10 @@ fail:
178struct vxfs_inode_info * 178struct vxfs_inode_info *
179vxfs_stiget(struct super_block *sbp, ino_t ino) 179vxfs_stiget(struct super_block *sbp, ino_t ino)
180{ 180{
181 return __vxfs_iget(ino, VXFS_SBI(sbp)->vsi_stilist); 181 struct vxfs_inode_info *vip;
182
183 vip = __vxfs_iget(ino, VXFS_SBI(sbp)->vsi_stilist);
184 return IS_ERR(vip) ? NULL : vip;
182} 185}
183 186
184/** 187/**
@@ -282,23 +285,32 @@ vxfs_put_fake_inode(struct inode *ip)
282} 285}
283 286
284/** 287/**
285 * vxfs_read_inode - fill in inode information 288 * vxfs_iget - get an inode
286 * @ip: inode pointer to fill 289 * @sbp: the superblock to get the inode for
290 * @ino: the number of the inode to get
287 * 291 *
288 * Description: 292 * Description:
289 * vxfs_read_inode reads the disk inode for @ip and fills 293 * vxfs_read_inode creates an inode, reads the disk inode for @ino and fills
290 * in all relevant fields in @ip. 294 * in all relevant fields in the new inode.
291 */ 295 */
292void 296struct inode *
293vxfs_read_inode(struct inode *ip) 297vxfs_iget(struct super_block *sbp, ino_t ino)
294{ 298{
295 struct super_block *sbp = ip->i_sb;
296 struct vxfs_inode_info *vip; 299 struct vxfs_inode_info *vip;
297 const struct address_space_operations *aops; 300 const struct address_space_operations *aops;
298 ino_t ino = ip->i_ino; 301 struct inode *ip;
299 302
300 if (!(vip = __vxfs_iget(ino, VXFS_SBI(sbp)->vsi_ilist))) 303 ip = iget_locked(sbp, ino);
301 return; 304 if (!ip)
305 return ERR_PTR(-ENOMEM);
306 if (!(ip->i_state & I_NEW))
307 return ip;
308
309 vip = __vxfs_iget(ino, VXFS_SBI(sbp)->vsi_ilist);
310 if (IS_ERR(vip)) {
311 iget_failed(ip);
312 return ERR_CAST(vip);
313 }
302 314
303 vxfs_iinit(ip, vip); 315 vxfs_iinit(ip, vip);
304 316
@@ -323,7 +335,8 @@ vxfs_read_inode(struct inode *ip)
323 } else 335 } else
324 init_special_inode(ip, ip->i_mode, old_decode_dev(vip->vii_rdev)); 336 init_special_inode(ip, ip->i_mode, old_decode_dev(vip->vii_rdev));
325 337
326 return; 338 unlock_new_inode(ip);
339 return ip;
327} 340}
328 341
329/** 342/**
diff --git a/fs/freevxfs/vxfs_lookup.c b/fs/freevxfs/vxfs_lookup.c
index bf86e5444ea6..aee049cb9f84 100644
--- a/fs/freevxfs/vxfs_lookup.c
+++ b/fs/freevxfs/vxfs_lookup.c
@@ -213,10 +213,10 @@ vxfs_lookup(struct inode *dip, struct dentry *dp, struct nameidata *nd)
213 lock_kernel(); 213 lock_kernel();
214 ino = vxfs_inode_by_name(dip, dp); 214 ino = vxfs_inode_by_name(dip, dp);
215 if (ino) { 215 if (ino) {
216 ip = iget(dip->i_sb, ino); 216 ip = vxfs_iget(dip->i_sb, ino);
217 if (!ip) { 217 if (IS_ERR(ip)) {
218 unlock_kernel(); 218 unlock_kernel();
219 return ERR_PTR(-EACCES); 219 return ERR_CAST(ip);
220 } 220 }
221 } 221 }
222 unlock_kernel(); 222 unlock_kernel();
diff --git a/fs/freevxfs/vxfs_super.c b/fs/freevxfs/vxfs_super.c
index 4f95572d2722..1dacda831577 100644
--- a/fs/freevxfs/vxfs_super.c
+++ b/fs/freevxfs/vxfs_super.c
@@ -60,7 +60,6 @@ static int vxfs_statfs(struct dentry *, struct kstatfs *);
60static int vxfs_remount(struct super_block *, int *, char *); 60static int vxfs_remount(struct super_block *, int *, char *);
61 61
62static const struct super_operations vxfs_super_ops = { 62static const struct super_operations vxfs_super_ops = {
63 .read_inode = vxfs_read_inode,
64 .clear_inode = vxfs_clear_inode, 63 .clear_inode = vxfs_clear_inode,
65 .put_super = vxfs_put_super, 64 .put_super = vxfs_put_super,
66 .statfs = vxfs_statfs, 65 .statfs = vxfs_statfs,
@@ -153,6 +152,7 @@ static int vxfs_fill_super(struct super_block *sbp, void *dp, int silent)
153 struct buffer_head *bp = NULL; 152 struct buffer_head *bp = NULL;
154 u_long bsize; 153 u_long bsize;
155 struct inode *root; 154 struct inode *root;
155 int ret = -EINVAL;
156 156
157 sbp->s_flags |= MS_RDONLY; 157 sbp->s_flags |= MS_RDONLY;
158 158
@@ -219,7 +219,11 @@ static int vxfs_fill_super(struct super_block *sbp, void *dp, int silent)
219 } 219 }
220 220
221 sbp->s_op = &vxfs_super_ops; 221 sbp->s_op = &vxfs_super_ops;
222 root = iget(sbp, VXFS_ROOT_INO); 222 root = vxfs_iget(sbp, VXFS_ROOT_INO);
223 if (IS_ERR(root)) {
224 ret = PTR_ERR(root);
225 goto out;
226 }
223 sbp->s_root = d_alloc_root(root); 227 sbp->s_root = d_alloc_root(root);
224 if (!sbp->s_root) { 228 if (!sbp->s_root) {
225 iput(root); 229 iput(root);
@@ -236,7 +240,7 @@ out_free_ilist:
236out: 240out:
237 brelse(bp); 241 brelse(bp);
238 kfree(infp); 242 kfree(infp);
239 return -EINVAL; 243 return ret;
240} 244}
241 245
242/* 246/*
diff --git a/fs/fs-writeback.c b/fs/fs-writeback.c
index 300324bd563c..c0076077d338 100644
--- a/fs/fs-writeback.c
+++ b/fs/fs-writeback.c
@@ -284,7 +284,17 @@ __sync_single_inode(struct inode *inode, struct writeback_control *wbc)
284 * soon as the queue becomes uncongested. 284 * soon as the queue becomes uncongested.
285 */ 285 */
286 inode->i_state |= I_DIRTY_PAGES; 286 inode->i_state |= I_DIRTY_PAGES;
287 requeue_io(inode); 287 if (wbc->nr_to_write <= 0) {
288 /*
289 * slice used up: queue for next turn
290 */
291 requeue_io(inode);
292 } else {
293 /*
294 * somehow blocked: retry later
295 */
296 redirty_tail(inode);
297 }
288 } else { 298 } else {
289 /* 299 /*
290 * Otherwise fully redirty the inode so that 300 * Otherwise fully redirty the inode so that
@@ -334,9 +344,6 @@ __writeback_single_inode(struct inode *inode, struct writeback_control *wbc)
334 WARN_ON(inode->i_state & I_WILL_FREE); 344 WARN_ON(inode->i_state & I_WILL_FREE);
335 345
336 if ((wbc->sync_mode != WB_SYNC_ALL) && (inode->i_state & I_SYNC)) { 346 if ((wbc->sync_mode != WB_SYNC_ALL) && (inode->i_state & I_SYNC)) {
337 struct address_space *mapping = inode->i_mapping;
338 int ret;
339
340 /* 347 /*
341 * We're skipping this inode because it's locked, and we're not 348 * We're skipping this inode because it's locked, and we're not
342 * doing writeback-for-data-integrity. Move it to s_more_io so 349 * doing writeback-for-data-integrity. Move it to s_more_io so
@@ -345,15 +352,7 @@ __writeback_single_inode(struct inode *inode, struct writeback_control *wbc)
345 * completed a full scan of s_io. 352 * completed a full scan of s_io.
346 */ 353 */
347 requeue_io(inode); 354 requeue_io(inode);
348 355 return 0;
349 /*
350 * Even if we don't actually write the inode itself here,
351 * we can at least start some of the data writeout..
352 */
353 spin_unlock(&inode_lock);
354 ret = do_writepages(mapping, wbc);
355 spin_lock(&inode_lock);
356 return ret;
357 } 356 }
358 357
359 /* 358 /*
@@ -479,8 +478,12 @@ sync_sb_inodes(struct super_block *sb, struct writeback_control *wbc)
479 iput(inode); 478 iput(inode);
480 cond_resched(); 479 cond_resched();
481 spin_lock(&inode_lock); 480 spin_lock(&inode_lock);
482 if (wbc->nr_to_write <= 0) 481 if (wbc->nr_to_write <= 0) {
482 wbc->more_io = 1;
483 break; 483 break;
484 }
485 if (!list_empty(&sb->s_more_io))
486 wbc->more_io = 1;
484 } 487 }
485 return; /* Leave any unwritten inodes on s_io */ 488 return; /* Leave any unwritten inodes on s_io */
486} 489}
@@ -512,8 +515,7 @@ writeback_inodes(struct writeback_control *wbc)
512 might_sleep(); 515 might_sleep();
513 spin_lock(&sb_lock); 516 spin_lock(&sb_lock);
514restart: 517restart:
515 sb = sb_entry(super_blocks.prev); 518 list_for_each_entry_reverse(sb, &super_blocks, s_list) {
516 for (; sb != sb_entry(&super_blocks); sb = sb_entry(sb->s_list.prev)) {
517 if (sb_has_dirty_inodes(sb)) { 519 if (sb_has_dirty_inodes(sb)) {
518 /* we're making our own get_super here */ 520 /* we're making our own get_super here */
519 sb->s_count++; 521 sb->s_count++;
@@ -578,10 +580,8 @@ static void set_sb_syncing(int val)
578{ 580{
579 struct super_block *sb; 581 struct super_block *sb;
580 spin_lock(&sb_lock); 582 spin_lock(&sb_lock);
581 sb = sb_entry(super_blocks.prev); 583 list_for_each_entry_reverse(sb, &super_blocks, s_list)
582 for (; sb != sb_entry(&super_blocks); sb = sb_entry(sb->s_list.prev)) {
583 sb->s_syncing = val; 584 sb->s_syncing = val;
584 }
585 spin_unlock(&sb_lock); 585 spin_unlock(&sb_lock);
586} 586}
587 587
@@ -655,7 +655,7 @@ int write_inode_now(struct inode *inode, int sync)
655 int ret; 655 int ret;
656 struct writeback_control wbc = { 656 struct writeback_control wbc = {
657 .nr_to_write = LONG_MAX, 657 .nr_to_write = LONG_MAX,
658 .sync_mode = WB_SYNC_ALL, 658 .sync_mode = sync ? WB_SYNC_ALL : WB_SYNC_NONE,
659 .range_start = 0, 659 .range_start = 0,
660 .range_end = LLONG_MAX, 660 .range_end = LLONG_MAX,
661 }; 661 };
diff --git a/fs/fuse/dev.c b/fs/fuse/dev.c
index db534bcde45f..af639807524e 100644
--- a/fs/fuse/dev.c
+++ b/fs/fuse/dev.c
@@ -201,6 +201,55 @@ void fuse_put_request(struct fuse_conn *fc, struct fuse_req *req)
201 } 201 }
202} 202}
203 203
204static unsigned len_args(unsigned numargs, struct fuse_arg *args)
205{
206 unsigned nbytes = 0;
207 unsigned i;
208
209 for (i = 0; i < numargs; i++)
210 nbytes += args[i].size;
211
212 return nbytes;
213}
214
215static u64 fuse_get_unique(struct fuse_conn *fc)
216{
217 fc->reqctr++;
218 /* zero is special */
219 if (fc->reqctr == 0)
220 fc->reqctr = 1;
221
222 return fc->reqctr;
223}
224
225static void queue_request(struct fuse_conn *fc, struct fuse_req *req)
226{
227 req->in.h.unique = fuse_get_unique(fc);
228 req->in.h.len = sizeof(struct fuse_in_header) +
229 len_args(req->in.numargs, (struct fuse_arg *) req->in.args);
230 list_add_tail(&req->list, &fc->pending);
231 req->state = FUSE_REQ_PENDING;
232 if (!req->waiting) {
233 req->waiting = 1;
234 atomic_inc(&fc->num_waiting);
235 }
236 wake_up(&fc->waitq);
237 kill_fasync(&fc->fasync, SIGIO, POLL_IN);
238}
239
240static void flush_bg_queue(struct fuse_conn *fc)
241{
242 while (fc->active_background < FUSE_MAX_BACKGROUND &&
243 !list_empty(&fc->bg_queue)) {
244 struct fuse_req *req;
245
246 req = list_entry(fc->bg_queue.next, struct fuse_req, list);
247 list_del(&req->list);
248 fc->active_background++;
249 queue_request(fc, req);
250 }
251}
252
204/* 253/*
205 * This function is called when a request is finished. Either a reply 254 * This function is called when a request is finished. Either a reply
206 * has arrived or it was aborted (and not yet sent) or some error 255 * has arrived or it was aborted (and not yet sent) or some error
@@ -229,6 +278,8 @@ static void request_end(struct fuse_conn *fc, struct fuse_req *req)
229 clear_bdi_congested(&fc->bdi, WRITE); 278 clear_bdi_congested(&fc->bdi, WRITE);
230 } 279 }
231 fc->num_background--; 280 fc->num_background--;
281 fc->active_background--;
282 flush_bg_queue(fc);
232 } 283 }
233 spin_unlock(&fc->lock); 284 spin_unlock(&fc->lock);
234 wake_up(&req->waitq); 285 wake_up(&req->waitq);
@@ -320,42 +371,6 @@ static void request_wait_answer(struct fuse_conn *fc, struct fuse_req *req)
320 } 371 }
321} 372}
322 373
323static unsigned len_args(unsigned numargs, struct fuse_arg *args)
324{
325 unsigned nbytes = 0;
326 unsigned i;
327
328 for (i = 0; i < numargs; i++)
329 nbytes += args[i].size;
330
331 return nbytes;
332}
333
334static u64 fuse_get_unique(struct fuse_conn *fc)
335 {
336 fc->reqctr++;
337 /* zero is special */
338 if (fc->reqctr == 0)
339 fc->reqctr = 1;
340
341 return fc->reqctr;
342}
343
344static void queue_request(struct fuse_conn *fc, struct fuse_req *req)
345{
346 req->in.h.unique = fuse_get_unique(fc);
347 req->in.h.len = sizeof(struct fuse_in_header) +
348 len_args(req->in.numargs, (struct fuse_arg *) req->in.args);
349 list_add_tail(&req->list, &fc->pending);
350 req->state = FUSE_REQ_PENDING;
351 if (!req->waiting) {
352 req->waiting = 1;
353 atomic_inc(&fc->num_waiting);
354 }
355 wake_up(&fc->waitq);
356 kill_fasync(&fc->fasync, SIGIO, POLL_IN);
357}
358
359void request_send(struct fuse_conn *fc, struct fuse_req *req) 374void request_send(struct fuse_conn *fc, struct fuse_req *req)
360{ 375{
361 req->isreply = 1; 376 req->isreply = 1;
@@ -375,20 +390,26 @@ void request_send(struct fuse_conn *fc, struct fuse_req *req)
375 spin_unlock(&fc->lock); 390 spin_unlock(&fc->lock);
376} 391}
377 392
393static void request_send_nowait_locked(struct fuse_conn *fc,
394 struct fuse_req *req)
395{
396 req->background = 1;
397 fc->num_background++;
398 if (fc->num_background == FUSE_MAX_BACKGROUND)
399 fc->blocked = 1;
400 if (fc->num_background == FUSE_CONGESTION_THRESHOLD) {
401 set_bdi_congested(&fc->bdi, READ);
402 set_bdi_congested(&fc->bdi, WRITE);
403 }
404 list_add_tail(&req->list, &fc->bg_queue);
405 flush_bg_queue(fc);
406}
407
378static void request_send_nowait(struct fuse_conn *fc, struct fuse_req *req) 408static void request_send_nowait(struct fuse_conn *fc, struct fuse_req *req)
379{ 409{
380 spin_lock(&fc->lock); 410 spin_lock(&fc->lock);
381 if (fc->connected) { 411 if (fc->connected) {
382 req->background = 1; 412 request_send_nowait_locked(fc, req);
383 fc->num_background++;
384 if (fc->num_background == FUSE_MAX_BACKGROUND)
385 fc->blocked = 1;
386 if (fc->num_background == FUSE_CONGESTION_THRESHOLD) {
387 set_bdi_congested(&fc->bdi, READ);
388 set_bdi_congested(&fc->bdi, WRITE);
389 }
390
391 queue_request(fc, req);
392 spin_unlock(&fc->lock); 413 spin_unlock(&fc->lock);
393 } else { 414 } else {
394 req->out.h.error = -ENOTCONN; 415 req->out.h.error = -ENOTCONN;
diff --git a/fs/fuse/dir.c b/fs/fuse/dir.c
index 80d2f5292cf9..7fb514b6d852 100644
--- a/fs/fuse/dir.c
+++ b/fs/fuse/dir.c
@@ -269,12 +269,12 @@ static struct dentry *fuse_lookup(struct inode *dir, struct dentry *entry,
269 269
270 req = fuse_get_req(fc); 270 req = fuse_get_req(fc);
271 if (IS_ERR(req)) 271 if (IS_ERR(req))
272 return ERR_PTR(PTR_ERR(req)); 272 return ERR_CAST(req);
273 273
274 forget_req = fuse_get_req(fc); 274 forget_req = fuse_get_req(fc);
275 if (IS_ERR(forget_req)) { 275 if (IS_ERR(forget_req)) {
276 fuse_put_request(fc, req); 276 fuse_put_request(fc, req);
277 return ERR_PTR(PTR_ERR(forget_req)); 277 return ERR_CAST(forget_req);
278 } 278 }
279 279
280 attr_version = fuse_get_attr_version(fc); 280 attr_version = fuse_get_attr_version(fc);
@@ -416,6 +416,7 @@ static int fuse_create_open(struct inode *dir, struct dentry *entry, int mode,
416 fuse_put_request(fc, forget_req); 416 fuse_put_request(fc, forget_req);
417 d_instantiate(entry, inode); 417 d_instantiate(entry, inode);
418 fuse_change_entry_timeout(entry, &outentry); 418 fuse_change_entry_timeout(entry, &outentry);
419 fuse_invalidate_attr(dir);
419 file = lookup_instantiate_filp(nd, entry, generic_file_open); 420 file = lookup_instantiate_filp(nd, entry, generic_file_open);
420 if (IS_ERR(file)) { 421 if (IS_ERR(file)) {
421 ff->fh = outopen.fh; 422 ff->fh = outopen.fh;
@@ -1005,7 +1006,7 @@ static char *read_link(struct dentry *dentry)
1005 char *link; 1006 char *link;
1006 1007
1007 if (IS_ERR(req)) 1008 if (IS_ERR(req))
1008 return ERR_PTR(PTR_ERR(req)); 1009 return ERR_CAST(req);
1009 1010
1010 link = (char *) __get_free_page(GFP_KERNEL); 1011 link = (char *) __get_free_page(GFP_KERNEL);
1011 if (!link) { 1012 if (!link) {
diff --git a/fs/fuse/file.c b/fs/fuse/file.c
index bb05d227cf30..676b0bc8a86d 100644
--- a/fs/fuse/file.c
+++ b/fs/fuse/file.c
@@ -77,8 +77,8 @@ static struct fuse_file *fuse_file_get(struct fuse_file *ff)
77 77
78static void fuse_release_end(struct fuse_conn *fc, struct fuse_req *req) 78static void fuse_release_end(struct fuse_conn *fc, struct fuse_req *req)
79{ 79{
80 dput(req->dentry); 80 dput(req->misc.release.dentry);
81 mntput(req->vfsmount); 81 mntput(req->misc.release.vfsmount);
82 fuse_put_request(fc, req); 82 fuse_put_request(fc, req);
83} 83}
84 84
@@ -86,7 +86,8 @@ static void fuse_file_put(struct fuse_file *ff)
86{ 86{
87 if (atomic_dec_and_test(&ff->count)) { 87 if (atomic_dec_and_test(&ff->count)) {
88 struct fuse_req *req = ff->reserved_req; 88 struct fuse_req *req = ff->reserved_req;
89 struct fuse_conn *fc = get_fuse_conn(req->dentry->d_inode); 89 struct inode *inode = req->misc.release.dentry->d_inode;
90 struct fuse_conn *fc = get_fuse_conn(inode);
90 req->end = fuse_release_end; 91 req->end = fuse_release_end;
91 request_send_background(fc, req); 92 request_send_background(fc, req);
92 kfree(ff); 93 kfree(ff);
@@ -137,7 +138,7 @@ int fuse_open_common(struct inode *inode, struct file *file, int isdir)
137void fuse_release_fill(struct fuse_file *ff, u64 nodeid, int flags, int opcode) 138void fuse_release_fill(struct fuse_file *ff, u64 nodeid, int flags, int opcode)
138{ 139{
139 struct fuse_req *req = ff->reserved_req; 140 struct fuse_req *req = ff->reserved_req;
140 struct fuse_release_in *inarg = &req->misc.release_in; 141 struct fuse_release_in *inarg = &req->misc.release.in;
141 142
142 inarg->fh = ff->fh; 143 inarg->fh = ff->fh;
143 inarg->flags = flags; 144 inarg->flags = flags;
@@ -153,13 +154,14 @@ int fuse_release_common(struct inode *inode, struct file *file, int isdir)
153 struct fuse_file *ff = file->private_data; 154 struct fuse_file *ff = file->private_data;
154 if (ff) { 155 if (ff) {
155 struct fuse_conn *fc = get_fuse_conn(inode); 156 struct fuse_conn *fc = get_fuse_conn(inode);
157 struct fuse_req *req = ff->reserved_req;
156 158
157 fuse_release_fill(ff, get_node_id(inode), file->f_flags, 159 fuse_release_fill(ff, get_node_id(inode), file->f_flags,
158 isdir ? FUSE_RELEASEDIR : FUSE_RELEASE); 160 isdir ? FUSE_RELEASEDIR : FUSE_RELEASE);
159 161
160 /* Hold vfsmount and dentry until release is finished */ 162 /* Hold vfsmount and dentry until release is finished */
161 ff->reserved_req->vfsmount = mntget(file->f_path.mnt); 163 req->misc.release.vfsmount = mntget(file->f_path.mnt);
162 ff->reserved_req->dentry = dget(file->f_path.dentry); 164 req->misc.release.dentry = dget(file->f_path.dentry);
163 165
164 spin_lock(&fc->lock); 166 spin_lock(&fc->lock);
165 list_del(&ff->write_entry); 167 list_del(&ff->write_entry);
diff --git a/fs/fuse/fuse_i.h b/fs/fuse/fuse_i.h
index 3ab8a3048e8b..67aaf6ee38ea 100644
--- a/fs/fuse/fuse_i.h
+++ b/fs/fuse/fuse_i.h
@@ -215,7 +215,11 @@ struct fuse_req {
215 /** Data for asynchronous requests */ 215 /** Data for asynchronous requests */
216 union { 216 union {
217 struct fuse_forget_in forget_in; 217 struct fuse_forget_in forget_in;
218 struct fuse_release_in release_in; 218 struct {
219 struct fuse_release_in in;
220 struct vfsmount *vfsmount;
221 struct dentry *dentry;
222 } release;
219 struct fuse_init_in init_in; 223 struct fuse_init_in init_in;
220 struct fuse_init_out init_out; 224 struct fuse_init_out init_out;
221 struct fuse_read_in read_in; 225 struct fuse_read_in read_in;
@@ -238,12 +242,6 @@ struct fuse_req {
238 /** File used in the request (or NULL) */ 242 /** File used in the request (or NULL) */
239 struct fuse_file *ff; 243 struct fuse_file *ff;
240 244
241 /** vfsmount used in release */
242 struct vfsmount *vfsmount;
243
244 /** dentry used in release */
245 struct dentry *dentry;
246
247 /** Request completion callback */ 245 /** Request completion callback */
248 void (*end)(struct fuse_conn *, struct fuse_req *); 246 void (*end)(struct fuse_conn *, struct fuse_req *);
249 247
@@ -298,6 +296,12 @@ struct fuse_conn {
298 /** Number of requests currently in the background */ 296 /** Number of requests currently in the background */
299 unsigned num_background; 297 unsigned num_background;
300 298
299 /** Number of background requests currently queued for userspace */
300 unsigned active_background;
301
302 /** The list of background requests set aside for later queuing */
303 struct list_head bg_queue;
304
301 /** Pending interrupts */ 305 /** Pending interrupts */
302 struct list_head interrupts; 306 struct list_head interrupts;
303 307
diff --git a/fs/fuse/inode.c b/fs/fuse/inode.c
index e5e80d1a4687..033f7bdd47e8 100644
--- a/fs/fuse/inode.c
+++ b/fs/fuse/inode.c
@@ -29,6 +29,8 @@ DEFINE_MUTEX(fuse_mutex);
29 29
30#define FUSE_SUPER_MAGIC 0x65735546 30#define FUSE_SUPER_MAGIC 0x65735546
31 31
32#define FUSE_DEFAULT_BLKSIZE 512
33
32struct fuse_mount_data { 34struct fuse_mount_data {
33 int fd; 35 int fd;
34 unsigned rootmode; 36 unsigned rootmode;
@@ -76,11 +78,6 @@ static void fuse_destroy_inode(struct inode *inode)
76 kmem_cache_free(fuse_inode_cachep, inode); 78 kmem_cache_free(fuse_inode_cachep, inode);
77} 79}
78 80
79static void fuse_read_inode(struct inode *inode)
80{
81 /* No op */
82}
83
84void fuse_send_forget(struct fuse_conn *fc, struct fuse_req *req, 81void fuse_send_forget(struct fuse_conn *fc, struct fuse_req *req,
85 unsigned long nodeid, u64 nlookup) 82 unsigned long nodeid, u64 nlookup)
86{ 83{
@@ -360,7 +357,7 @@ static int parse_fuse_opt(char *opt, struct fuse_mount_data *d, int is_bdev)
360 char *p; 357 char *p;
361 memset(d, 0, sizeof(struct fuse_mount_data)); 358 memset(d, 0, sizeof(struct fuse_mount_data));
362 d->max_read = ~0; 359 d->max_read = ~0;
363 d->blksize = 512; 360 d->blksize = FUSE_DEFAULT_BLKSIZE;
364 361
365 while ((p = strsep(&opt, ",")) != NULL) { 362 while ((p = strsep(&opt, ",")) != NULL) {
366 int token; 363 int token;
@@ -445,6 +442,9 @@ static int fuse_show_options(struct seq_file *m, struct vfsmount *mnt)
445 seq_puts(m, ",allow_other"); 442 seq_puts(m, ",allow_other");
446 if (fc->max_read != ~0) 443 if (fc->max_read != ~0)
447 seq_printf(m, ",max_read=%u", fc->max_read); 444 seq_printf(m, ",max_read=%u", fc->max_read);
445 if (mnt->mnt_sb->s_bdev &&
446 mnt->mnt_sb->s_blocksize != FUSE_DEFAULT_BLKSIZE)
447 seq_printf(m, ",blksize=%lu", mnt->mnt_sb->s_blocksize);
448 return 0; 448 return 0;
449} 449}
450 450
@@ -465,6 +465,7 @@ static struct fuse_conn *new_conn(void)
465 INIT_LIST_HEAD(&fc->processing); 465 INIT_LIST_HEAD(&fc->processing);
466 INIT_LIST_HEAD(&fc->io); 466 INIT_LIST_HEAD(&fc->io);
467 INIT_LIST_HEAD(&fc->interrupts); 467 INIT_LIST_HEAD(&fc->interrupts);
468 INIT_LIST_HEAD(&fc->bg_queue);
468 atomic_set(&fc->num_waiting, 0); 469 atomic_set(&fc->num_waiting, 0);
469 fc->bdi.ra_pages = (VM_MAX_READAHEAD * 1024) / PAGE_CACHE_SIZE; 470 fc->bdi.ra_pages = (VM_MAX_READAHEAD * 1024) / PAGE_CACHE_SIZE;
470 fc->bdi.unplug_io_fn = default_unplug_io_fn; 471 fc->bdi.unplug_io_fn = default_unplug_io_fn;
@@ -514,7 +515,6 @@ static struct inode *get_root_inode(struct super_block *sb, unsigned mode)
514static const struct super_operations fuse_super_operations = { 515static const struct super_operations fuse_super_operations = {
515 .alloc_inode = fuse_alloc_inode, 516 .alloc_inode = fuse_alloc_inode,
516 .destroy_inode = fuse_destroy_inode, 517 .destroy_inode = fuse_destroy_inode,
517 .read_inode = fuse_read_inode,
518 .clear_inode = fuse_clear_inode, 518 .clear_inode = fuse_clear_inode,
519 .drop_inode = generic_delete_inode, 519 .drop_inode = generic_delete_inode,
520 .remount_fs = fuse_remount_fs, 520 .remount_fs = fuse_remount_fs,
diff --git a/fs/gfs2/bmap.c b/fs/gfs2/bmap.c
index e4effc47abfc..e9456ebd3bb6 100644
--- a/fs/gfs2/bmap.c
+++ b/fs/gfs2/bmap.c
@@ -932,7 +932,7 @@ static int gfs2_block_truncate_page(struct address_space *mapping)
932 if (!gfs2_is_writeback(ip)) 932 if (!gfs2_is_writeback(ip))
933 gfs2_trans_add_bh(ip->i_gl, bh, 0); 933 gfs2_trans_add_bh(ip->i_gl, bh, 0);
934 934
935 zero_user_page(page, offset, length, KM_USER0); 935 zero_user(page, offset, length);
936 936
937unlock: 937unlock:
938 unlock_page(page); 938 unlock_page(page);
diff --git a/fs/gfs2/dir.c b/fs/gfs2/dir.c
index 57e2ed932adc..c34709512b19 100644
--- a/fs/gfs2/dir.c
+++ b/fs/gfs2/dir.c
@@ -1498,7 +1498,7 @@ struct inode *gfs2_dir_search(struct inode *dir, const struct qstr *name)
1498 dent = gfs2_dirent_search(dir, name, gfs2_dirent_find, &bh); 1498 dent = gfs2_dirent_search(dir, name, gfs2_dirent_find, &bh);
1499 if (dent) { 1499 if (dent) {
1500 if (IS_ERR(dent)) 1500 if (IS_ERR(dent))
1501 return ERR_PTR(PTR_ERR(dent)); 1501 return ERR_CAST(dent);
1502 inode = gfs2_inode_lookup(dir->i_sb, 1502 inode = gfs2_inode_lookup(dir->i_sb,
1503 be16_to_cpu(dent->de_type), 1503 be16_to_cpu(dent->de_type),
1504 be64_to_cpu(dent->de_inum.no_addr), 1504 be64_to_cpu(dent->de_inum.no_addr),
diff --git a/fs/gfs2/glock.c b/fs/gfs2/glock.c
index 80e09c50590a..7175a4d06435 100644
--- a/fs/gfs2/glock.c
+++ b/fs/gfs2/glock.c
@@ -334,7 +334,7 @@ int gfs2_glock_get(struct gfs2_sbd *sdp, u64 number,
334 gl->gl_state = LM_ST_UNLOCKED; 334 gl->gl_state = LM_ST_UNLOCKED;
335 gl->gl_demote_state = LM_ST_EXCLUSIVE; 335 gl->gl_demote_state = LM_ST_EXCLUSIVE;
336 gl->gl_hash = hash; 336 gl->gl_hash = hash;
337 gl->gl_owner_pid = 0; 337 gl->gl_owner_pid = NULL;
338 gl->gl_ip = 0; 338 gl->gl_ip = 0;
339 gl->gl_ops = glops; 339 gl->gl_ops = glops;
340 gl->gl_req_gh = NULL; 340 gl->gl_req_gh = NULL;
@@ -399,7 +399,7 @@ void gfs2_holder_init(struct gfs2_glock *gl, unsigned int state, unsigned flags,
399 INIT_LIST_HEAD(&gh->gh_list); 399 INIT_LIST_HEAD(&gh->gh_list);
400 gh->gh_gl = gl; 400 gh->gh_gl = gl;
401 gh->gh_ip = (unsigned long)__builtin_return_address(0); 401 gh->gh_ip = (unsigned long)__builtin_return_address(0);
402 gh->gh_owner_pid = current->pid; 402 gh->gh_owner_pid = get_pid(task_pid(current));
403 gh->gh_state = state; 403 gh->gh_state = state;
404 gh->gh_flags = flags; 404 gh->gh_flags = flags;
405 gh->gh_error = 0; 405 gh->gh_error = 0;
@@ -433,6 +433,7 @@ void gfs2_holder_reinit(unsigned int state, unsigned flags, struct gfs2_holder *
433 433
434void gfs2_holder_uninit(struct gfs2_holder *gh) 434void gfs2_holder_uninit(struct gfs2_holder *gh)
435{ 435{
436 put_pid(gh->gh_owner_pid);
436 gfs2_glock_put(gh->gh_gl); 437 gfs2_glock_put(gh->gh_gl);
437 gh->gh_gl = NULL; 438 gh->gh_gl = NULL;
438 gh->gh_ip = 0; 439 gh->gh_ip = 0;
@@ -631,7 +632,7 @@ static void gfs2_glmutex_lock(struct gfs2_glock *gl)
631 wait_on_holder(&gh); 632 wait_on_holder(&gh);
632 gfs2_holder_uninit(&gh); 633 gfs2_holder_uninit(&gh);
633 } else { 634 } else {
634 gl->gl_owner_pid = current->pid; 635 gl->gl_owner_pid = get_pid(task_pid(current));
635 gl->gl_ip = (unsigned long)__builtin_return_address(0); 636 gl->gl_ip = (unsigned long)__builtin_return_address(0);
636 spin_unlock(&gl->gl_spin); 637 spin_unlock(&gl->gl_spin);
637 } 638 }
@@ -652,7 +653,7 @@ static int gfs2_glmutex_trylock(struct gfs2_glock *gl)
652 if (test_and_set_bit(GLF_LOCK, &gl->gl_flags)) { 653 if (test_and_set_bit(GLF_LOCK, &gl->gl_flags)) {
653 acquired = 0; 654 acquired = 0;
654 } else { 655 } else {
655 gl->gl_owner_pid = current->pid; 656 gl->gl_owner_pid = get_pid(task_pid(current));
656 gl->gl_ip = (unsigned long)__builtin_return_address(0); 657 gl->gl_ip = (unsigned long)__builtin_return_address(0);
657 } 658 }
658 spin_unlock(&gl->gl_spin); 659 spin_unlock(&gl->gl_spin);
@@ -668,12 +669,17 @@ static int gfs2_glmutex_trylock(struct gfs2_glock *gl)
668 669
669static void gfs2_glmutex_unlock(struct gfs2_glock *gl) 670static void gfs2_glmutex_unlock(struct gfs2_glock *gl)
670{ 671{
672 struct pid *pid;
673
671 spin_lock(&gl->gl_spin); 674 spin_lock(&gl->gl_spin);
672 clear_bit(GLF_LOCK, &gl->gl_flags); 675 clear_bit(GLF_LOCK, &gl->gl_flags);
673 gl->gl_owner_pid = 0; 676 pid = gl->gl_owner_pid;
677 gl->gl_owner_pid = NULL;
674 gl->gl_ip = 0; 678 gl->gl_ip = 0;
675 run_queue(gl); 679 run_queue(gl);
676 spin_unlock(&gl->gl_spin); 680 spin_unlock(&gl->gl_spin);
681
682 put_pid(pid);
677} 683}
678 684
679/** 685/**
@@ -1045,7 +1051,7 @@ static int glock_wait_internal(struct gfs2_holder *gh)
1045} 1051}
1046 1052
1047static inline struct gfs2_holder * 1053static inline struct gfs2_holder *
1048find_holder_by_owner(struct list_head *head, pid_t pid) 1054find_holder_by_owner(struct list_head *head, struct pid *pid)
1049{ 1055{
1050 struct gfs2_holder *gh; 1056 struct gfs2_holder *gh;
1051 1057
@@ -1082,7 +1088,7 @@ static void add_to_queue(struct gfs2_holder *gh)
1082 struct gfs2_glock *gl = gh->gh_gl; 1088 struct gfs2_glock *gl = gh->gh_gl;
1083 struct gfs2_holder *existing; 1089 struct gfs2_holder *existing;
1084 1090
1085 BUG_ON(!gh->gh_owner_pid); 1091 BUG_ON(gh->gh_owner_pid == NULL);
1086 if (test_and_set_bit(HIF_WAIT, &gh->gh_iflags)) 1092 if (test_and_set_bit(HIF_WAIT, &gh->gh_iflags))
1087 BUG(); 1093 BUG();
1088 1094
@@ -1092,12 +1098,14 @@ static void add_to_queue(struct gfs2_holder *gh)
1092 if (existing) { 1098 if (existing) {
1093 print_symbol(KERN_WARNING "original: %s\n", 1099 print_symbol(KERN_WARNING "original: %s\n",
1094 existing->gh_ip); 1100 existing->gh_ip);
1095 printk(KERN_INFO "pid : %d\n", existing->gh_owner_pid); 1101 printk(KERN_INFO "pid : %d\n",
1102 pid_nr(existing->gh_owner_pid));
1096 printk(KERN_INFO "lock type : %d lock state : %d\n", 1103 printk(KERN_INFO "lock type : %d lock state : %d\n",
1097 existing->gh_gl->gl_name.ln_type, 1104 existing->gh_gl->gl_name.ln_type,
1098 existing->gh_gl->gl_state); 1105 existing->gh_gl->gl_state);
1099 print_symbol(KERN_WARNING "new: %s\n", gh->gh_ip); 1106 print_symbol(KERN_WARNING "new: %s\n", gh->gh_ip);
1100 printk(KERN_INFO "pid : %d\n", gh->gh_owner_pid); 1107 printk(KERN_INFO "pid : %d\n",
1108 pid_nr(gh->gh_owner_pid));
1101 printk(KERN_INFO "lock type : %d lock state : %d\n", 1109 printk(KERN_INFO "lock type : %d lock state : %d\n",
1102 gl->gl_name.ln_type, gl->gl_state); 1110 gl->gl_name.ln_type, gl->gl_state);
1103 BUG(); 1111 BUG();
@@ -1798,8 +1806,9 @@ static int dump_holder(struct glock_iter *gi, char *str,
1798 1806
1799 print_dbg(gi, " %s\n", str); 1807 print_dbg(gi, " %s\n", str);
1800 if (gh->gh_owner_pid) { 1808 if (gh->gh_owner_pid) {
1801 print_dbg(gi, " owner = %ld ", (long)gh->gh_owner_pid); 1809 print_dbg(gi, " owner = %ld ",
1802 gh_owner = find_task_by_pid(gh->gh_owner_pid); 1810 (long)pid_nr(gh->gh_owner_pid));
1811 gh_owner = pid_task(gh->gh_owner_pid, PIDTYPE_PID);
1803 if (gh_owner) 1812 if (gh_owner)
1804 print_dbg(gi, "(%s)\n", gh_owner->comm); 1813 print_dbg(gi, "(%s)\n", gh_owner->comm);
1805 else 1814 else
@@ -1877,13 +1886,13 @@ static int dump_glock(struct glock_iter *gi, struct gfs2_glock *gl)
1877 print_dbg(gi, " gl_ref = %d\n", atomic_read(&gl->gl_ref)); 1886 print_dbg(gi, " gl_ref = %d\n", atomic_read(&gl->gl_ref));
1878 print_dbg(gi, " gl_state = %u\n", gl->gl_state); 1887 print_dbg(gi, " gl_state = %u\n", gl->gl_state);
1879 if (gl->gl_owner_pid) { 1888 if (gl->gl_owner_pid) {
1880 gl_owner = find_task_by_pid(gl->gl_owner_pid); 1889 gl_owner = pid_task(gl->gl_owner_pid, PIDTYPE_PID);
1881 if (gl_owner) 1890 if (gl_owner)
1882 print_dbg(gi, " gl_owner = pid %d (%s)\n", 1891 print_dbg(gi, " gl_owner = pid %d (%s)\n",
1883 gl->gl_owner_pid, gl_owner->comm); 1892 pid_nr(gl->gl_owner_pid), gl_owner->comm);
1884 else 1893 else
1885 print_dbg(gi, " gl_owner = %d (ended)\n", 1894 print_dbg(gi, " gl_owner = %d (ended)\n",
1886 gl->gl_owner_pid); 1895 pid_nr(gl->gl_owner_pid));
1887 } else 1896 } else
1888 print_dbg(gi, " gl_owner = -1\n"); 1897 print_dbg(gi, " gl_owner = -1\n");
1889 print_dbg(gi, " gl_ip = %lu\n", gl->gl_ip); 1898 print_dbg(gi, " gl_ip = %lu\n", gl->gl_ip);
diff --git a/fs/gfs2/glock.h b/fs/gfs2/glock.h
index b16f604eea9f..2f9c6d136b37 100644
--- a/fs/gfs2/glock.h
+++ b/fs/gfs2/glock.h
@@ -36,11 +36,13 @@ static inline int gfs2_glock_is_locked_by_me(struct gfs2_glock *gl)
36{ 36{
37 struct gfs2_holder *gh; 37 struct gfs2_holder *gh;
38 int locked = 0; 38 int locked = 0;
39 struct pid *pid;
39 40
40 /* Look in glock's list of holders for one with current task as owner */ 41 /* Look in glock's list of holders for one with current task as owner */
41 spin_lock(&gl->gl_spin); 42 spin_lock(&gl->gl_spin);
43 pid = task_pid(current);
42 list_for_each_entry(gh, &gl->gl_holders, gh_list) { 44 list_for_each_entry(gh, &gl->gl_holders, gh_list) {
43 if (gh->gh_owner_pid == current->pid) { 45 if (gh->gh_owner_pid == pid) {
44 locked = 1; 46 locked = 1;
45 break; 47 break;
46 } 48 }
diff --git a/fs/gfs2/incore.h b/fs/gfs2/incore.h
index 513aaf0dc0ab..525dcae352d6 100644
--- a/fs/gfs2/incore.h
+++ b/fs/gfs2/incore.h
@@ -151,7 +151,7 @@ struct gfs2_holder {
151 struct list_head gh_list; 151 struct list_head gh_list;
152 152
153 struct gfs2_glock *gh_gl; 153 struct gfs2_glock *gh_gl;
154 pid_t gh_owner_pid; 154 struct pid *gh_owner_pid;
155 unsigned int gh_state; 155 unsigned int gh_state;
156 unsigned gh_flags; 156 unsigned gh_flags;
157 157
@@ -182,7 +182,7 @@ struct gfs2_glock {
182 unsigned int gl_hash; 182 unsigned int gl_hash;
183 unsigned int gl_demote_state; /* state requested by remote node */ 183 unsigned int gl_demote_state; /* state requested by remote node */
184 unsigned long gl_demote_time; /* time of first demote request */ 184 unsigned long gl_demote_time; /* time of first demote request */
185 pid_t gl_owner_pid; 185 struct pid *gl_owner_pid;
186 unsigned long gl_ip; 186 unsigned long gl_ip;
187 struct list_head gl_holders; 187 struct list_head gl_holders;
188 struct list_head gl_waiters1; /* HIF_MUTEX */ 188 struct list_head gl_waiters1; /* HIF_MUTEX */
diff --git a/fs/gfs2/inode.c b/fs/gfs2/inode.c
index 728d3169e7bd..37725ade3c51 100644
--- a/fs/gfs2/inode.c
+++ b/fs/gfs2/inode.c
@@ -240,7 +240,7 @@ fail_put:
240 ip->i_gl->gl_object = NULL; 240 ip->i_gl->gl_object = NULL;
241 gfs2_glock_put(ip->i_gl); 241 gfs2_glock_put(ip->i_gl);
242fail: 242fail:
243 iput(inode); 243 iget_failed(inode);
244 return ERR_PTR(error); 244 return ERR_PTR(error);
245} 245}
246 246
diff --git a/fs/gfs2/ops_address.c b/fs/gfs2/ops_address.c
index 38dbe99a30ed..ac772b6d9dbb 100644
--- a/fs/gfs2/ops_address.c
+++ b/fs/gfs2/ops_address.c
@@ -446,7 +446,7 @@ static int stuffed_readpage(struct gfs2_inode *ip, struct page *page)
446 * so we need to supply one here. It doesn't happen often. 446 * so we need to supply one here. It doesn't happen often.
447 */ 447 */
448 if (unlikely(page->index)) { 448 if (unlikely(page->index)) {
449 zero_user_page(page, 0, PAGE_CACHE_SIZE, KM_USER0); 449 zero_user(page, 0, PAGE_CACHE_SIZE);
450 return 0; 450 return 0;
451 } 451 }
452 452
diff --git a/fs/gfs2/ops_export.c b/fs/gfs2/ops_export.c
index b9da62348a87..334c7f85351b 100644
--- a/fs/gfs2/ops_export.c
+++ b/fs/gfs2/ops_export.c
@@ -143,7 +143,7 @@ static struct dentry *gfs2_get_parent(struct dentry *child)
143 * have to return that as a(n invalid) pointer to dentry. 143 * have to return that as a(n invalid) pointer to dentry.
144 */ 144 */
145 if (IS_ERR(inode)) 145 if (IS_ERR(inode))
146 return ERR_PTR(PTR_ERR(inode)); 146 return ERR_CAST(inode);
147 147
148 dentry = d_alloc_anon(inode); 148 dentry = d_alloc_anon(inode);
149 if (!dentry) { 149 if (!dentry) {
diff --git a/fs/gfs2/ops_fstype.c b/fs/gfs2/ops_fstype.c
index 43d511bba52d..4bee6aa845e4 100644
--- a/fs/gfs2/ops_fstype.c
+++ b/fs/gfs2/ops_fstype.c
@@ -884,12 +884,13 @@ static struct super_block* get_gfs2_sb(const char *dev_name)
884 dev_name); 884 dev_name);
885 goto out; 885 goto out;
886 } 886 }
887 error = vfs_getattr(nd.mnt, nd.dentry, &stat); 887 error = vfs_getattr(nd.path.mnt, nd.path.dentry, &stat);
888 888
889 fstype = get_fs_type("gfs2"); 889 fstype = get_fs_type("gfs2");
890 list_for_each_entry(s, &fstype->fs_supers, s_instances) { 890 list_for_each_entry(s, &fstype->fs_supers, s_instances) {
891 if ((S_ISBLK(stat.mode) && s->s_dev == stat.rdev) || 891 if ((S_ISBLK(stat.mode) && s->s_dev == stat.rdev) ||
892 (S_ISDIR(stat.mode) && s == nd.dentry->d_inode->i_sb)) { 892 (S_ISDIR(stat.mode) &&
893 s == nd.path.dentry->d_inode->i_sb)) {
893 sb = s; 894 sb = s;
894 goto free_nd; 895 goto free_nd;
895 } 896 }
@@ -899,7 +900,7 @@ static struct super_block* get_gfs2_sb(const char *dev_name)
899 "mount point %s\n", dev_name); 900 "mount point %s\n", dev_name);
900 901
901free_nd: 902free_nd:
902 path_release(&nd); 903 path_put(&nd.path);
903out: 904out:
904 return sb; 905 return sb;
905} 906}
diff --git a/fs/gfs2/ops_inode.c b/fs/gfs2/ops_inode.c
index 9f71372c1757..e87412902bed 100644
--- a/fs/gfs2/ops_inode.c
+++ b/fs/gfs2/ops_inode.c
@@ -111,7 +111,7 @@ static struct dentry *gfs2_lookup(struct inode *dir, struct dentry *dentry,
111 111
112 inode = gfs2_lookupi(dir, &dentry->d_name, 0, nd); 112 inode = gfs2_lookupi(dir, &dentry->d_name, 0, nd);
113 if (inode && IS_ERR(inode)) 113 if (inode && IS_ERR(inode))
114 return ERR_PTR(PTR_ERR(inode)); 114 return ERR_CAST(inode);
115 115
116 if (inode) { 116 if (inode) {
117 struct gfs2_glock *gl = GFS2_I(inode)->i_gl; 117 struct gfs2_glock *gl = GFS2_I(inode)->i_gl;
diff --git a/fs/hfs/bfind.c b/fs/hfs/bfind.c
index f8452a0eab56..4129cdb3f0d8 100644
--- a/fs/hfs/bfind.c
+++ b/fs/hfs/bfind.c
@@ -52,9 +52,9 @@ int __hfs_brec_find(struct hfs_bnode *bnode, struct hfs_find_data *fd)
52 rec = (e + b) / 2; 52 rec = (e + b) / 2;
53 len = hfs_brec_lenoff(bnode, rec, &off); 53 len = hfs_brec_lenoff(bnode, rec, &off);
54 keylen = hfs_brec_keylen(bnode, rec); 54 keylen = hfs_brec_keylen(bnode, rec);
55 if (keylen == HFS_BAD_KEYLEN) { 55 if (keylen == 0) {
56 res = -EINVAL; 56 res = -EINVAL;
57 goto done; 57 goto fail;
58 } 58 }
59 hfs_bnode_read(bnode, fd->key, off, keylen); 59 hfs_bnode_read(bnode, fd->key, off, keylen);
60 cmpval = bnode->tree->keycmp(fd->key, fd->search_key); 60 cmpval = bnode->tree->keycmp(fd->key, fd->search_key);
@@ -71,9 +71,9 @@ int __hfs_brec_find(struct hfs_bnode *bnode, struct hfs_find_data *fd)
71 if (rec != e && e >= 0) { 71 if (rec != e && e >= 0) {
72 len = hfs_brec_lenoff(bnode, e, &off); 72 len = hfs_brec_lenoff(bnode, e, &off);
73 keylen = hfs_brec_keylen(bnode, e); 73 keylen = hfs_brec_keylen(bnode, e);
74 if (keylen == HFS_BAD_KEYLEN) { 74 if (keylen == 0) {
75 res = -EINVAL; 75 res = -EINVAL;
76 goto done; 76 goto fail;
77 } 77 }
78 hfs_bnode_read(bnode, fd->key, off, keylen); 78 hfs_bnode_read(bnode, fd->key, off, keylen);
79 } 79 }
@@ -83,6 +83,7 @@ done:
83 fd->keylength = keylen; 83 fd->keylength = keylen;
84 fd->entryoffset = off + keylen; 84 fd->entryoffset = off + keylen;
85 fd->entrylength = len - keylen; 85 fd->entrylength = len - keylen;
86fail:
86 return res; 87 return res;
87} 88}
88 89
@@ -206,7 +207,7 @@ int hfs_brec_goto(struct hfs_find_data *fd, int cnt)
206 207
207 len = hfs_brec_lenoff(bnode, fd->record, &off); 208 len = hfs_brec_lenoff(bnode, fd->record, &off);
208 keylen = hfs_brec_keylen(bnode, fd->record); 209 keylen = hfs_brec_keylen(bnode, fd->record);
209 if (keylen == HFS_BAD_KEYLEN) { 210 if (keylen == 0) {
210 res = -EINVAL; 211 res = -EINVAL;
211 goto out; 212 goto out;
212 } 213 }
diff --git a/fs/hfs/brec.c b/fs/hfs/brec.c
index 8626ee375ea8..878bf25dbc6a 100644
--- a/fs/hfs/brec.c
+++ b/fs/hfs/brec.c
@@ -49,14 +49,14 @@ u16 hfs_brec_keylen(struct hfs_bnode *node, u16 rec)
49 if (retval > node->tree->max_key_len + 2) { 49 if (retval > node->tree->max_key_len + 2) {
50 printk(KERN_ERR "hfs: keylen %d too large\n", 50 printk(KERN_ERR "hfs: keylen %d too large\n",
51 retval); 51 retval);
52 retval = HFS_BAD_KEYLEN; 52 retval = 0;
53 } 53 }
54 } else { 54 } else {
55 retval = (hfs_bnode_read_u8(node, recoff) | 1) + 1; 55 retval = (hfs_bnode_read_u8(node, recoff) | 1) + 1;
56 if (retval > node->tree->max_key_len + 1) { 56 if (retval > node->tree->max_key_len + 1) {
57 printk(KERN_ERR "hfs: keylen %d too large\n", 57 printk(KERN_ERR "hfs: keylen %d too large\n",
58 retval); 58 retval);
59 retval = HFS_BAD_KEYLEN; 59 retval = 0;
60 } 60 }
61 } 61 }
62 } 62 }
diff --git a/fs/hfs/btree.c b/fs/hfs/btree.c
index 110dd3515dc8..24cf6fc43021 100644
--- a/fs/hfs/btree.c
+++ b/fs/hfs/btree.c
@@ -81,15 +81,23 @@ struct hfs_btree *hfs_btree_open(struct super_block *sb, u32 id, btree_keycmp ke
81 goto fail_page; 81 goto fail_page;
82 if (!tree->node_count) 82 if (!tree->node_count)
83 goto fail_page; 83 goto fail_page;
84 if ((id == HFS_EXT_CNID) && (tree->max_key_len != HFS_MAX_EXT_KEYLEN)) { 84 switch (id) {
85 printk(KERN_ERR "hfs: invalid extent max_key_len %d\n", 85 case HFS_EXT_CNID:
86 tree->max_key_len); 86 if (tree->max_key_len != HFS_MAX_EXT_KEYLEN) {
87 goto fail_page; 87 printk(KERN_ERR "hfs: invalid extent max_key_len %d\n",
88 } 88 tree->max_key_len);
89 if ((id == HFS_CAT_CNID) && (tree->max_key_len != HFS_MAX_CAT_KEYLEN)) { 89 goto fail_page;
90 printk(KERN_ERR "hfs: invalid catalog max_key_len %d\n", 90 }
91 tree->max_key_len); 91 break;
92 goto fail_page; 92 case HFS_CAT_CNID:
93 if (tree->max_key_len != HFS_MAX_CAT_KEYLEN) {
94 printk(KERN_ERR "hfs: invalid catalog max_key_len %d\n",
95 tree->max_key_len);
96 goto fail_page;
97 }
98 break;
99 default:
100 BUG();
93 } 101 }
94 102
95 tree->node_size_shift = ffs(size) - 1; 103 tree->node_size_shift = ffs(size) - 1;
diff --git a/fs/hfs/hfs.h b/fs/hfs/hfs.h
index c6aae61adfe6..6f194d0768b6 100644
--- a/fs/hfs/hfs.h
+++ b/fs/hfs/hfs.h
@@ -28,8 +28,6 @@
28#define HFS_MAX_NAMELEN 128 28#define HFS_MAX_NAMELEN 128
29#define HFS_MAX_VALENCE 32767U 29#define HFS_MAX_VALENCE 32767U
30 30
31#define HFS_BAD_KEYLEN 0xFF
32
33/* Meanings of the drAtrb field of the MDB, 31/* Meanings of the drAtrb field of the MDB,
34 * Reference: _Inside Macintosh: Files_ p. 2-61 32 * Reference: _Inside Macintosh: Files_ p. 2-61
35 */ 33 */
diff --git a/fs/hfs/super.c b/fs/hfs/super.c
index 16cbd902f8b9..32de44ed0021 100644
--- a/fs/hfs/super.c
+++ b/fs/hfs/super.c
@@ -6,7 +6,7 @@
6 * This file may be distributed under the terms of the GNU General Public License. 6 * This file may be distributed under the terms of the GNU General Public License.
7 * 7 *
8 * This file contains hfs_read_super(), some of the super_ops and 8 * This file contains hfs_read_super(), some of the super_ops and
9 * init_module() and cleanup_module(). The remaining super_ops are in 9 * init_hfs_fs() and exit_hfs_fs(). The remaining super_ops are in
10 * inode.c since they deal with inodes. 10 * inode.c since they deal with inodes.
11 * 11 *
12 * Based on the minix file system code, (C) 1991, 1992 by Linus Torvalds 12 * Based on the minix file system code, (C) 1991, 1992 by Linus Torvalds
diff --git a/fs/hfsplus/btree.c b/fs/hfsplus/btree.c
index 050d29c0a5b5..bb5433608a42 100644
--- a/fs/hfsplus/btree.c
+++ b/fs/hfsplus/btree.c
@@ -22,6 +22,7 @@ struct hfs_btree *hfs_btree_open(struct super_block *sb, u32 id)
22 struct hfs_btree *tree; 22 struct hfs_btree *tree;
23 struct hfs_btree_header_rec *head; 23 struct hfs_btree_header_rec *head;
24 struct address_space *mapping; 24 struct address_space *mapping;
25 struct inode *inode;
25 struct page *page; 26 struct page *page;
26 unsigned int size; 27 unsigned int size;
27 28
@@ -33,9 +34,10 @@ struct hfs_btree *hfs_btree_open(struct super_block *sb, u32 id)
33 spin_lock_init(&tree->hash_lock); 34 spin_lock_init(&tree->hash_lock);
34 tree->sb = sb; 35 tree->sb = sb;
35 tree->cnid = id; 36 tree->cnid = id;
36 tree->inode = iget(sb, id); 37 inode = hfsplus_iget(sb, id);
37 if (!tree->inode) 38 if (IS_ERR(inode))
38 goto free_tree; 39 goto free_tree;
40 tree->inode = inode;
39 41
40 mapping = tree->inode->i_mapping; 42 mapping = tree->inode->i_mapping;
41 page = read_mapping_page(mapping, 0, NULL); 43 page = read_mapping_page(mapping, 0, NULL);
diff --git a/fs/hfsplus/dir.c b/fs/hfsplus/dir.c
index 1955ee61251c..29683645fa0a 100644
--- a/fs/hfsplus/dir.c
+++ b/fs/hfsplus/dir.c
@@ -97,9 +97,9 @@ again:
97 goto fail; 97 goto fail;
98 } 98 }
99 hfs_find_exit(&fd); 99 hfs_find_exit(&fd);
100 inode = iget(dir->i_sb, cnid); 100 inode = hfsplus_iget(dir->i_sb, cnid);
101 if (!inode) 101 if (IS_ERR(inode))
102 return ERR_PTR(-EACCES); 102 return ERR_CAST(inode);
103 if (S_ISREG(inode->i_mode)) 103 if (S_ISREG(inode->i_mode))
104 HFSPLUS_I(inode).dev = linkid; 104 HFSPLUS_I(inode).dev = linkid;
105out: 105out:
diff --git a/fs/hfsplus/hfsplus_fs.h b/fs/hfsplus/hfsplus_fs.h
index d9f5eda6d039..d72d0a8b25aa 100644
--- a/fs/hfsplus/hfsplus_fs.h
+++ b/fs/hfsplus/hfsplus_fs.h
@@ -345,6 +345,9 @@ int hfsplus_parse_options(char *, struct hfsplus_sb_info *);
345void hfsplus_fill_defaults(struct hfsplus_sb_info *); 345void hfsplus_fill_defaults(struct hfsplus_sb_info *);
346int hfsplus_show_options(struct seq_file *, struct vfsmount *); 346int hfsplus_show_options(struct seq_file *, struct vfsmount *);
347 347
348/* super.c */
349struct inode *hfsplus_iget(struct super_block *, unsigned long);
350
348/* tables.c */ 351/* tables.c */
349extern u16 hfsplus_case_fold_table[]; 352extern u16 hfsplus_case_fold_table[];
350extern u16 hfsplus_decompose_table[]; 353extern u16 hfsplus_decompose_table[];
diff --git a/fs/hfsplus/super.c b/fs/hfsplus/super.c
index ecf70dafb643..b0f9ad362d1d 100644
--- a/fs/hfsplus/super.c
+++ b/fs/hfsplus/super.c
@@ -20,11 +20,18 @@ static void hfsplus_destroy_inode(struct inode *inode);
20 20
21#include "hfsplus_fs.h" 21#include "hfsplus_fs.h"
22 22
23static void hfsplus_read_inode(struct inode *inode) 23struct inode *hfsplus_iget(struct super_block *sb, unsigned long ino)
24{ 24{
25 struct hfs_find_data fd; 25 struct hfs_find_data fd;
26 struct hfsplus_vh *vhdr; 26 struct hfsplus_vh *vhdr;
27 int err; 27 struct inode *inode;
28 long err = -EIO;
29
30 inode = iget_locked(sb, ino);
31 if (!inode)
32 return ERR_PTR(-ENOMEM);
33 if (!(inode->i_state & I_NEW))
34 return inode;
28 35
29 INIT_LIST_HEAD(&HFSPLUS_I(inode).open_dir_list); 36 INIT_LIST_HEAD(&HFSPLUS_I(inode).open_dir_list);
30 init_MUTEX(&HFSPLUS_I(inode).extents_lock); 37 init_MUTEX(&HFSPLUS_I(inode).extents_lock);
@@ -41,7 +48,7 @@ static void hfsplus_read_inode(struct inode *inode)
41 hfs_find_exit(&fd); 48 hfs_find_exit(&fd);
42 if (err) 49 if (err)
43 goto bad_inode; 50 goto bad_inode;
44 return; 51 goto done;
45 } 52 }
46 vhdr = HFSPLUS_SB(inode->i_sb).s_vhdr; 53 vhdr = HFSPLUS_SB(inode->i_sb).s_vhdr;
47 switch(inode->i_ino) { 54 switch(inode->i_ino) {
@@ -70,10 +77,13 @@ static void hfsplus_read_inode(struct inode *inode)
70 goto bad_inode; 77 goto bad_inode;
71 } 78 }
72 79
73 return; 80done:
81 unlock_new_inode(inode);
82 return inode;
74 83
75 bad_inode: 84bad_inode:
76 make_bad_inode(inode); 85 iget_failed(inode);
86 return ERR_PTR(err);
77} 87}
78 88
79static int hfsplus_write_inode(struct inode *inode, int unused) 89static int hfsplus_write_inode(struct inode *inode, int unused)
@@ -262,7 +272,6 @@ static int hfsplus_remount(struct super_block *sb, int *flags, char *data)
262static const struct super_operations hfsplus_sops = { 272static const struct super_operations hfsplus_sops = {
263 .alloc_inode = hfsplus_alloc_inode, 273 .alloc_inode = hfsplus_alloc_inode,
264 .destroy_inode = hfsplus_destroy_inode, 274 .destroy_inode = hfsplus_destroy_inode,
265 .read_inode = hfsplus_read_inode,
266 .write_inode = hfsplus_write_inode, 275 .write_inode = hfsplus_write_inode,
267 .clear_inode = hfsplus_clear_inode, 276 .clear_inode = hfsplus_clear_inode,
268 .put_super = hfsplus_put_super, 277 .put_super = hfsplus_put_super,
@@ -278,7 +287,7 @@ static int hfsplus_fill_super(struct super_block *sb, void *data, int silent)
278 struct hfsplus_sb_info *sbi; 287 struct hfsplus_sb_info *sbi;
279 hfsplus_cat_entry entry; 288 hfsplus_cat_entry entry;
280 struct hfs_find_data fd; 289 struct hfs_find_data fd;
281 struct inode *root; 290 struct inode *root, *inode;
282 struct qstr str; 291 struct qstr str;
283 struct nls_table *nls = NULL; 292 struct nls_table *nls = NULL;
284 int err = -EINVAL; 293 int err = -EINVAL;
@@ -366,18 +375,25 @@ static int hfsplus_fill_super(struct super_block *sb, void *data, int silent)
366 goto cleanup; 375 goto cleanup;
367 } 376 }
368 377
369 HFSPLUS_SB(sb).alloc_file = iget(sb, HFSPLUS_ALLOC_CNID); 378 inode = hfsplus_iget(sb, HFSPLUS_ALLOC_CNID);
370 if (!HFSPLUS_SB(sb).alloc_file) { 379 if (IS_ERR(inode)) {
371 printk(KERN_ERR "hfs: failed to load allocation file\n"); 380 printk(KERN_ERR "hfs: failed to load allocation file\n");
381 err = PTR_ERR(inode);
372 goto cleanup; 382 goto cleanup;
373 } 383 }
384 HFSPLUS_SB(sb).alloc_file = inode;
374 385
375 /* Load the root directory */ 386 /* Load the root directory */
376 root = iget(sb, HFSPLUS_ROOT_CNID); 387 root = hfsplus_iget(sb, HFSPLUS_ROOT_CNID);
388 if (IS_ERR(root)) {
389 printk(KERN_ERR "hfs: failed to load root directory\n");
390 err = PTR_ERR(root);
391 goto cleanup;
392 }
377 sb->s_root = d_alloc_root(root); 393 sb->s_root = d_alloc_root(root);
378 if (!sb->s_root) { 394 if (!sb->s_root) {
379 printk(KERN_ERR "hfs: failed to load root directory\n");
380 iput(root); 395 iput(root);
396 err = -ENOMEM;
381 goto cleanup; 397 goto cleanup;
382 } 398 }
383 sb->s_root->d_op = &hfsplus_dentry_operations; 399 sb->s_root->d_op = &hfsplus_dentry_operations;
@@ -390,9 +406,12 @@ static int hfsplus_fill_super(struct super_block *sb, void *data, int silent)
390 hfs_find_exit(&fd); 406 hfs_find_exit(&fd);
391 if (entry.type != cpu_to_be16(HFSPLUS_FOLDER)) 407 if (entry.type != cpu_to_be16(HFSPLUS_FOLDER))
392 goto cleanup; 408 goto cleanup;
393 HFSPLUS_SB(sb).hidden_dir = iget(sb, be32_to_cpu(entry.folder.id)); 409 inode = hfsplus_iget(sb, be32_to_cpu(entry.folder.id));
394 if (!HFSPLUS_SB(sb).hidden_dir) 410 if (IS_ERR(inode)) {
411 err = PTR_ERR(inode);
395 goto cleanup; 412 goto cleanup;
413 }
414 HFSPLUS_SB(sb).hidden_dir = inode;
396 } else 415 } else
397 hfs_find_exit(&fd); 416 hfs_find_exit(&fd);
398 417
diff --git a/fs/hfsplus/unicode.c b/fs/hfsplus/unicode.c
index 9e10f9444b64..628ccf6fa402 100644
--- a/fs/hfsplus/unicode.c
+++ b/fs/hfsplus/unicode.c
@@ -325,7 +325,7 @@ int hfsplus_hash_dentry(struct dentry *dentry, struct qstr *str)
325 struct super_block *sb = dentry->d_sb; 325 struct super_block *sb = dentry->d_sb;
326 const char *astr; 326 const char *astr;
327 const u16 *dstr; 327 const u16 *dstr;
328 int casefold, decompose, size, dsize, len; 328 int casefold, decompose, size, len;
329 unsigned long hash; 329 unsigned long hash;
330 wchar_t c; 330 wchar_t c;
331 u16 c2; 331 u16 c2;
@@ -336,6 +336,7 @@ int hfsplus_hash_dentry(struct dentry *dentry, struct qstr *str)
336 astr = str->name; 336 astr = str->name;
337 len = str->len; 337 len = str->len;
338 while (len > 0) { 338 while (len > 0) {
339 int uninitialized_var(dsize);
339 size = asc2unichar(sb, astr, len, &c); 340 size = asc2unichar(sb, astr, len, &c);
340 astr += size; 341 astr += size;
341 len -= size; 342 len -= size;
diff --git a/fs/hostfs/hostfs_kern.c b/fs/hostfs/hostfs_kern.c
index 8966b050196e..5222345ddccf 100644
--- a/fs/hostfs/hostfs_kern.c
+++ b/fs/hostfs/hostfs_kern.c
@@ -11,6 +11,8 @@
11#include <linux/mm.h> 11#include <linux/mm.h>
12#include <linux/pagemap.h> 12#include <linux/pagemap.h>
13#include <linux/statfs.h> 13#include <linux/statfs.h>
14#include <linux/seq_file.h>
15#include <linux/mount.h>
14#include "hostfs.h" 16#include "hostfs.h"
15#include "init.h" 17#include "init.h"
16#include "kern.h" 18#include "kern.h"
@@ -202,7 +204,7 @@ static char *follow_link(char *link)
202 return ERR_PTR(n); 204 return ERR_PTR(n);
203} 205}
204 206
205static int read_inode(struct inode *ino) 207static int hostfs_read_inode(struct inode *ino)
206{ 208{
207 char *name; 209 char *name;
208 int err = 0; 210 int err = 0;
@@ -233,6 +235,25 @@ static int read_inode(struct inode *ino)
233 return err; 235 return err;
234} 236}
235 237
238static struct inode *hostfs_iget(struct super_block *sb)
239{
240 struct inode *inode;
241 long ret;
242
243 inode = iget_locked(sb, 0);
244 if (!inode)
245 return ERR_PTR(-ENOMEM);
246 if (inode->i_state & I_NEW) {
247 ret = hostfs_read_inode(inode);
248 if (ret < 0) {
249 iget_failed(inode);
250 return ERR_PTR(ret);
251 }
252 unlock_new_inode(inode);
253 }
254 return inode;
255}
256
236int hostfs_statfs(struct dentry *dentry, struct kstatfs *sf) 257int hostfs_statfs(struct dentry *dentry, struct kstatfs *sf)
237{ 258{
238 /* 259 /*
@@ -303,9 +324,16 @@ static void hostfs_destroy_inode(struct inode *inode)
303 kfree(HOSTFS_I(inode)); 324 kfree(HOSTFS_I(inode));
304} 325}
305 326
306static void hostfs_read_inode(struct inode *inode) 327static int hostfs_show_options(struct seq_file *seq, struct vfsmount *vfs)
307{ 328{
308 read_inode(inode); 329 struct inode *root = vfs->mnt_sb->s_root->d_inode;
330 const char *root_path = HOSTFS_I(root)->host_filename;
331 size_t offset = strlen(root_ino) + 1;
332
333 if (strlen(root_path) > offset)
334 seq_printf(seq, ",%s", root_path + offset);
335
336 return 0;
309} 337}
310 338
311static const struct super_operations hostfs_sbops = { 339static const struct super_operations hostfs_sbops = {
@@ -313,8 +341,8 @@ static const struct super_operations hostfs_sbops = {
313 .drop_inode = generic_delete_inode, 341 .drop_inode = generic_delete_inode,
314 .delete_inode = hostfs_delete_inode, 342 .delete_inode = hostfs_delete_inode,
315 .destroy_inode = hostfs_destroy_inode, 343 .destroy_inode = hostfs_destroy_inode,
316 .read_inode = hostfs_read_inode,
317 .statfs = hostfs_statfs, 344 .statfs = hostfs_statfs,
345 .show_options = hostfs_show_options,
318}; 346};
319 347
320int hostfs_readdir(struct file *file, void *ent, filldir_t filldir) 348int hostfs_readdir(struct file *file, void *ent, filldir_t filldir)
@@ -571,10 +599,11 @@ int hostfs_create(struct inode *dir, struct dentry *dentry, int mode,
571 char *name; 599 char *name;
572 int error, fd; 600 int error, fd;
573 601
574 error = -ENOMEM; 602 inode = hostfs_iget(dir->i_sb);
575 inode = iget(dir->i_sb, 0); 603 if (IS_ERR(inode)) {
576 if (inode == NULL) 604 error = PTR_ERR(inode);
577 goto out; 605 goto out;
606 }
578 607
579 error = init_inode(inode, dentry); 608 error = init_inode(inode, dentry);
580 if (error) 609 if (error)
@@ -615,10 +644,11 @@ struct dentry *hostfs_lookup(struct inode *ino, struct dentry *dentry,
615 char *name; 644 char *name;
616 int err; 645 int err;
617 646
618 err = -ENOMEM; 647 inode = hostfs_iget(ino->i_sb);
619 inode = iget(ino->i_sb, 0); 648 if (IS_ERR(inode)) {
620 if (inode == NULL) 649 err = PTR_ERR(inode);
621 goto out; 650 goto out;
651 }
622 652
623 err = init_inode(inode, dentry); 653 err = init_inode(inode, dentry);
624 if (err) 654 if (err)
@@ -736,11 +766,13 @@ int hostfs_mknod(struct inode *dir, struct dentry *dentry, int mode, dev_t dev)
736{ 766{
737 struct inode *inode; 767 struct inode *inode;
738 char *name; 768 char *name;
739 int err = -ENOMEM; 769 int err;
740 770
741 inode = iget(dir->i_sb, 0); 771 inode = hostfs_iget(dir->i_sb);
742 if (inode == NULL) 772 if (IS_ERR(inode)) {
773 err = PTR_ERR(inode);
743 goto out; 774 goto out;
775 }
744 776
745 err = init_inode(inode, dentry); 777 err = init_inode(inode, dentry);
746 if (err) 778 if (err)
@@ -952,9 +984,11 @@ static int hostfs_fill_sb_common(struct super_block *sb, void *d, int silent)
952 984
953 sprintf(host_root_path, "%s/%s", root_ino, req_root); 985 sprintf(host_root_path, "%s/%s", root_ino, req_root);
954 986
955 root_inode = iget(sb, 0); 987 root_inode = hostfs_iget(sb);
956 if (root_inode == NULL) 988 if (IS_ERR(root_inode)) {
989 err = PTR_ERR(root_inode);
957 goto out_free; 990 goto out_free;
991 }
958 992
959 err = init_inode(root_inode, NULL); 993 err = init_inode(root_inode, NULL);
960 if (err) 994 if (err)
@@ -972,7 +1006,7 @@ static int hostfs_fill_sb_common(struct super_block *sb, void *d, int silent)
972 if (sb->s_root == NULL) 1006 if (sb->s_root == NULL)
973 goto out_put; 1007 goto out_put;
974 1008
975 err = read_inode(root_inode); 1009 err = hostfs_read_inode(root_inode);
976 if (err) { 1010 if (err) {
977 /* No iput in this case because the dput does that for us */ 1011 /* No iput in this case because the dput does that for us */
978 dput(sb->s_root); 1012 dput(sb->s_root);
diff --git a/fs/hostfs/hostfs_user.c b/fs/hostfs/hostfs_user.c
index 35c1a9f33f47..53fd0a67c11a 100644
--- a/fs/hostfs/hostfs_user.c
+++ b/fs/hostfs/hostfs_user.c
@@ -285,17 +285,17 @@ int set_attr(const char *file, struct hostfs_iattr *attrs, int fd)
285 return err; 285 return err;
286 286
287 times[0].tv_sec = atime_ts.tv_sec; 287 times[0].tv_sec = atime_ts.tv_sec;
288 times[0].tv_usec = atime_ts.tv_nsec * 1000; 288 times[0].tv_usec = atime_ts.tv_nsec / 1000;
289 times[1].tv_sec = mtime_ts.tv_sec; 289 times[1].tv_sec = mtime_ts.tv_sec;
290 times[1].tv_usec = mtime_ts.tv_nsec * 1000; 290 times[1].tv_usec = mtime_ts.tv_nsec / 1000;
291 291
292 if (attrs->ia_valid & HOSTFS_ATTR_ATIME_SET) { 292 if (attrs->ia_valid & HOSTFS_ATTR_ATIME_SET) {
293 times[0].tv_sec = attrs->ia_atime.tv_sec; 293 times[0].tv_sec = attrs->ia_atime.tv_sec;
294 times[0].tv_usec = attrs->ia_atime.tv_nsec * 1000; 294 times[0].tv_usec = attrs->ia_atime.tv_nsec / 1000;
295 } 295 }
296 if (attrs->ia_valid & HOSTFS_ATTR_MTIME_SET) { 296 if (attrs->ia_valid & HOSTFS_ATTR_MTIME_SET) {
297 times[1].tv_sec = attrs->ia_mtime.tv_sec; 297 times[1].tv_sec = attrs->ia_mtime.tv_sec;
298 times[1].tv_usec = attrs->ia_mtime.tv_nsec * 1000; 298 times[1].tv_usec = attrs->ia_mtime.tv_nsec / 1000;
299 } 299 }
300 300
301 if (fd >= 0) { 301 if (fd >= 0) {
diff --git a/fs/hpfs/super.c b/fs/hpfs/super.c
index 00971d999964..f63a699ec659 100644
--- a/fs/hpfs/super.c
+++ b/fs/hpfs/super.c
@@ -386,6 +386,7 @@ static int hpfs_remount_fs(struct super_block *s, int *flags, char *data)
386 int lowercase, conv, eas, chk, errs, chkdsk, timeshift; 386 int lowercase, conv, eas, chk, errs, chkdsk, timeshift;
387 int o; 387 int o;
388 struct hpfs_sb_info *sbi = hpfs_sb(s); 388 struct hpfs_sb_info *sbi = hpfs_sb(s);
389 char *new_opts = kstrdup(data, GFP_KERNEL);
389 390
390 *flags |= MS_NOATIME; 391 *flags |= MS_NOATIME;
391 392
@@ -398,15 +399,15 @@ static int hpfs_remount_fs(struct super_block *s, int *flags, char *data)
398 if (!(o = parse_opts(data, &uid, &gid, &umask, &lowercase, &conv, 399 if (!(o = parse_opts(data, &uid, &gid, &umask, &lowercase, &conv,
399 &eas, &chk, &errs, &chkdsk, &timeshift))) { 400 &eas, &chk, &errs, &chkdsk, &timeshift))) {
400 printk("HPFS: bad mount options.\n"); 401 printk("HPFS: bad mount options.\n");
401 return 1; 402 goto out_err;
402 } 403 }
403 if (o == 2) { 404 if (o == 2) {
404 hpfs_help(); 405 hpfs_help();
405 return 1; 406 goto out_err;
406 } 407 }
407 if (timeshift != sbi->sb_timeshift) { 408 if (timeshift != sbi->sb_timeshift) {
408 printk("HPFS: timeshift can't be changed using remount.\n"); 409 printk("HPFS: timeshift can't be changed using remount.\n");
409 return 1; 410 goto out_err;
410 } 411 }
411 412
412 unmark_dirty(s); 413 unmark_dirty(s);
@@ -419,7 +420,14 @@ static int hpfs_remount_fs(struct super_block *s, int *flags, char *data)
419 420
420 if (!(*flags & MS_RDONLY)) mark_dirty(s); 421 if (!(*flags & MS_RDONLY)) mark_dirty(s);
421 422
423 kfree(s->s_options);
424 s->s_options = new_opts;
425
422 return 0; 426 return 0;
427
428out_err:
429 kfree(new_opts);
430 return -EINVAL;
423} 431}
424 432
425/* Super operations */ 433/* Super operations */
@@ -432,6 +440,7 @@ static const struct super_operations hpfs_sops =
432 .put_super = hpfs_put_super, 440 .put_super = hpfs_put_super,
433 .statfs = hpfs_statfs, 441 .statfs = hpfs_statfs,
434 .remount_fs = hpfs_remount_fs, 442 .remount_fs = hpfs_remount_fs,
443 .show_options = generic_show_options,
435}; 444};
436 445
437static int hpfs_fill_super(struct super_block *s, void *options, int silent) 446static int hpfs_fill_super(struct super_block *s, void *options, int silent)
@@ -454,6 +463,8 @@ static int hpfs_fill_super(struct super_block *s, void *options, int silent)
454 463
455 int o; 464 int o;
456 465
466 save_mount_options(s, options);
467
457 sbi = kzalloc(sizeof(*sbi), GFP_KERNEL); 468 sbi = kzalloc(sizeof(*sbi), GFP_KERNEL);
458 if (!sbi) 469 if (!sbi)
459 return -ENOMEM; 470 return -ENOMEM;
diff --git a/fs/hppfs/hppfs_kern.c b/fs/hppfs/hppfs_kern.c
index affb7412125e..a1e1f0f61aa5 100644
--- a/fs/hppfs/hppfs_kern.c
+++ b/fs/hppfs/hppfs_kern.c
@@ -155,6 +155,20 @@ static void hppfs_read_inode(struct inode *ino)
155 ino->i_blocks = proc_ino->i_blocks; 155 ino->i_blocks = proc_ino->i_blocks;
156} 156}
157 157
158static struct inode *hppfs_iget(struct super_block *sb)
159{
160 struct inode *inode;
161
162 inode = iget_locked(sb, 0);
163 if (!inode)
164 return ERR_PTR(-ENOMEM);
165 if (inode->i_state & I_NEW) {
166 hppfs_read_inode(inode);
167 unlock_new_inode(inode);
168 }
169 return inode;
170}
171
158static struct dentry *hppfs_lookup(struct inode *ino, struct dentry *dentry, 172static struct dentry *hppfs_lookup(struct inode *ino, struct dentry *dentry,
159 struct nameidata *nd) 173 struct nameidata *nd)
160{ 174{
@@ -190,9 +204,11 @@ static struct dentry *hppfs_lookup(struct inode *ino, struct dentry *dentry,
190 if(IS_ERR(proc_dentry)) 204 if(IS_ERR(proc_dentry))
191 return(proc_dentry); 205 return(proc_dentry);
192 206
193 inode = iget(ino->i_sb, 0); 207 inode = hppfs_iget(ino->i_sb);
194 if(inode == NULL) 208 if (IS_ERR(inode)) {
209 err = PTR_ERR(inode);
195 goto out_dput; 210 goto out_dput;
211 }
196 212
197 err = init_inode(inode, proc_dentry); 213 err = init_inode(inode, proc_dentry);
198 if(err) 214 if(err)
@@ -652,7 +668,6 @@ static void hppfs_destroy_inode(struct inode *inode)
652static const struct super_operations hppfs_sbops = { 668static const struct super_operations hppfs_sbops = {
653 .alloc_inode = hppfs_alloc_inode, 669 .alloc_inode = hppfs_alloc_inode,
654 .destroy_inode = hppfs_destroy_inode, 670 .destroy_inode = hppfs_destroy_inode,
655 .read_inode = hppfs_read_inode,
656 .delete_inode = hppfs_delete_inode, 671 .delete_inode = hppfs_delete_inode,
657 .statfs = hppfs_statfs, 672 .statfs = hppfs_statfs,
658}; 673};
@@ -745,9 +760,11 @@ static int hppfs_fill_super(struct super_block *sb, void *d, int silent)
745 sb->s_magic = HPPFS_SUPER_MAGIC; 760 sb->s_magic = HPPFS_SUPER_MAGIC;
746 sb->s_op = &hppfs_sbops; 761 sb->s_op = &hppfs_sbops;
747 762
748 root_inode = iget(sb, 0); 763 root_inode = hppfs_iget(sb);
749 if(root_inode == NULL) 764 if (IS_ERR(root_inode)) {
765 err = PTR_ERR(root_inode);
750 goto out; 766 goto out;
767 }
751 768
752 err = init_inode(root_inode, proc_sb->s_root); 769 err = init_inode(root_inode, proc_sb->s_root);
753 if(err) 770 if(err)
diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c
index 09ee07f02663..eee9487ae47f 100644
--- a/fs/hugetlbfs/inode.c
+++ b/fs/hugetlbfs/inode.c
@@ -734,6 +734,7 @@ static const struct super_operations hugetlbfs_ops = {
734 .delete_inode = hugetlbfs_delete_inode, 734 .delete_inode = hugetlbfs_delete_inode,
735 .drop_inode = hugetlbfs_drop_inode, 735 .drop_inode = hugetlbfs_drop_inode,
736 .put_super = hugetlbfs_put_super, 736 .put_super = hugetlbfs_put_super,
737 .show_options = generic_show_options,
737}; 738};
738 739
739static int 740static int
@@ -768,7 +769,7 @@ hugetlbfs_parse_options(char *options, struct hugetlbfs_config *pconfig)
768 case Opt_mode: 769 case Opt_mode:
769 if (match_octal(&args[0], &option)) 770 if (match_octal(&args[0], &option))
770 goto bad_val; 771 goto bad_val;
771 pconfig->mode = option & 0777U; 772 pconfig->mode = option & 01777U;
772 break; 773 break;
773 774
774 case Opt_size: { 775 case Opt_size: {
@@ -817,6 +818,8 @@ hugetlbfs_fill_super(struct super_block *sb, void *data, int silent)
817 struct hugetlbfs_config config; 818 struct hugetlbfs_config config;
818 struct hugetlbfs_sb_info *sbinfo; 819 struct hugetlbfs_sb_info *sbinfo;
819 820
821 save_mount_options(sb, data);
822
820 config.nr_blocks = -1; /* No limit on size by default */ 823 config.nr_blocks = -1; /* No limit on size by default */
821 config.nr_inodes = -1; /* No limit on number of inodes by default */ 824 config.nr_inodes = -1; /* No limit on number of inodes by default */
822 config.uid = current->fsuid; 825 config.uid = current->fsuid;
diff --git a/fs/inode.c b/fs/inode.c
index 276ffd6b6fdd..53245ffcf93d 100644
--- a/fs/inode.c
+++ b/fs/inode.c
@@ -928,8 +928,6 @@ EXPORT_SYMBOL(ilookup);
928 * @set: callback used to initialize a new struct inode 928 * @set: callback used to initialize a new struct inode
929 * @data: opaque data pointer to pass to @test and @set 929 * @data: opaque data pointer to pass to @test and @set
930 * 930 *
931 * This is iget() without the read_inode() portion of get_new_inode().
932 *
933 * iget5_locked() uses ifind() to search for the inode specified by @hashval 931 * iget5_locked() uses ifind() to search for the inode specified by @hashval
934 * and @data in the inode cache and if present it is returned with an increased 932 * and @data in the inode cache and if present it is returned with an increased
935 * reference count. This is a generalized version of iget_locked() for file 933 * reference count. This is a generalized version of iget_locked() for file
@@ -966,8 +964,6 @@ EXPORT_SYMBOL(iget5_locked);
966 * @sb: super block of file system 964 * @sb: super block of file system
967 * @ino: inode number to get 965 * @ino: inode number to get
968 * 966 *
969 * This is iget() without the read_inode() portion of get_new_inode_fast().
970 *
971 * iget_locked() uses ifind_fast() to search for the inode specified by @ino in 967 * iget_locked() uses ifind_fast() to search for the inode specified by @ino in
972 * the inode cache and if present it is returned with an increased reference 968 * the inode cache and if present it is returned with an increased reference
973 * count. This is for file systems where the inode number is sufficient for 969 * count. This is for file systems where the inode number is sufficient for
diff --git a/fs/inotify.c b/fs/inotify.c
index 2c5b92152876..690e72595e6e 100644
--- a/fs/inotify.c
+++ b/fs/inotify.c
@@ -168,20 +168,14 @@ static void set_dentry_child_flags(struct inode *inode, int watched)
168 struct dentry *child; 168 struct dentry *child;
169 169
170 list_for_each_entry(child, &alias->d_subdirs, d_u.d_child) { 170 list_for_each_entry(child, &alias->d_subdirs, d_u.d_child) {
171 if (!child->d_inode) { 171 if (!child->d_inode)
172 WARN_ON(child->d_flags & DCACHE_INOTIFY_PARENT_WATCHED);
173 continue; 172 continue;
174 } 173
175 spin_lock(&child->d_lock); 174 spin_lock(&child->d_lock);
176 if (watched) { 175 if (watched)
177 WARN_ON(child->d_flags &
178 DCACHE_INOTIFY_PARENT_WATCHED);
179 child->d_flags |= DCACHE_INOTIFY_PARENT_WATCHED; 176 child->d_flags |= DCACHE_INOTIFY_PARENT_WATCHED;
180 } else { 177 else
181 WARN_ON(!(child->d_flags & 178 child->d_flags &=~DCACHE_INOTIFY_PARENT_WATCHED;
182 DCACHE_INOTIFY_PARENT_WATCHED));
183 child->d_flags&=~DCACHE_INOTIFY_PARENT_WATCHED;
184 }
185 spin_unlock(&child->d_lock); 179 spin_unlock(&child->d_lock);
186 } 180 }
187 } 181 }
@@ -253,7 +247,6 @@ void inotify_d_instantiate(struct dentry *entry, struct inode *inode)
253 if (!inode) 247 if (!inode)
254 return; 248 return;
255 249
256 WARN_ON(entry->d_flags & DCACHE_INOTIFY_PARENT_WATCHED);
257 spin_lock(&entry->d_lock); 250 spin_lock(&entry->d_lock);
258 parent = entry->d_parent; 251 parent = entry->d_parent;
259 if (parent->d_inode && inotify_inode_watched(parent->d_inode)) 252 if (parent->d_inode && inotify_inode_watched(parent->d_inode))
@@ -627,6 +620,7 @@ s32 inotify_add_watch(struct inotify_handle *ih, struct inotify_watch *watch,
627 struct inode *inode, u32 mask) 620 struct inode *inode, u32 mask)
628{ 621{
629 int ret = 0; 622 int ret = 0;
623 int newly_watched;
630 624
631 /* don't allow invalid bits: we don't want flags set */ 625 /* don't allow invalid bits: we don't want flags set */
632 mask &= IN_ALL_EVENTS | IN_ONESHOT; 626 mask &= IN_ALL_EVENTS | IN_ONESHOT;
@@ -653,12 +647,18 @@ s32 inotify_add_watch(struct inotify_handle *ih, struct inotify_watch *watch,
653 */ 647 */
654 watch->inode = igrab(inode); 648 watch->inode = igrab(inode);
655 649
656 if (!inotify_inode_watched(inode))
657 set_dentry_child_flags(inode, 1);
658
659 /* Add the watch to the handle's and the inode's list */ 650 /* Add the watch to the handle's and the inode's list */
651 newly_watched = !inotify_inode_watched(inode);
660 list_add(&watch->h_list, &ih->watches); 652 list_add(&watch->h_list, &ih->watches);
661 list_add(&watch->i_list, &inode->inotify_watches); 653 list_add(&watch->i_list, &inode->inotify_watches);
654 /*
655 * Set child flags _after_ adding the watch, so there is no race
656 * windows where newly instantiated children could miss their parent's
657 * watched flag.
658 */
659 if (newly_watched)
660 set_dentry_child_flags(inode, 1);
661
662out: 662out:
663 mutex_unlock(&ih->mutex); 663 mutex_unlock(&ih->mutex);
664 mutex_unlock(&inode->inotify_mutex); 664 mutex_unlock(&inode->inotify_mutex);
diff --git a/fs/inotify_user.c b/fs/inotify_user.c
index 5e009331c01f..7b94a1e3c015 100644
--- a/fs/inotify_user.c
+++ b/fs/inotify_user.c
@@ -41,9 +41,9 @@ static struct kmem_cache *event_cachep __read_mostly;
41static struct vfsmount *inotify_mnt __read_mostly; 41static struct vfsmount *inotify_mnt __read_mostly;
42 42
43/* these are configurable via /proc/sys/fs/inotify/ */ 43/* these are configurable via /proc/sys/fs/inotify/ */
44int inotify_max_user_instances __read_mostly; 44static int inotify_max_user_instances __read_mostly;
45int inotify_max_user_watches __read_mostly; 45static int inotify_max_user_watches __read_mostly;
46int inotify_max_queued_events __read_mostly; 46static int inotify_max_queued_events __read_mostly;
47 47
48/* 48/*
49 * Lock ordering: 49 * Lock ordering:
@@ -79,6 +79,7 @@ struct inotify_device {
79 atomic_t count; /* reference count */ 79 atomic_t count; /* reference count */
80 struct user_struct *user; /* user who opened this dev */ 80 struct user_struct *user; /* user who opened this dev */
81 struct inotify_handle *ih; /* inotify handle */ 81 struct inotify_handle *ih; /* inotify handle */
82 struct fasync_struct *fa; /* async notification */
82 unsigned int queue_size; /* size of the queue (bytes) */ 83 unsigned int queue_size; /* size of the queue (bytes) */
83 unsigned int event_count; /* number of pending events */ 84 unsigned int event_count; /* number of pending events */
84 unsigned int max_events; /* maximum number of events */ 85 unsigned int max_events; /* maximum number of events */
@@ -248,6 +249,19 @@ inotify_dev_get_event(struct inotify_device *dev)
248} 249}
249 250
250/* 251/*
252 * inotify_dev_get_last_event - return the last event in the given dev's queue
253 *
254 * Caller must hold dev->ev_mutex.
255 */
256static inline struct inotify_kernel_event *
257inotify_dev_get_last_event(struct inotify_device *dev)
258{
259 if (list_empty(&dev->events))
260 return NULL;
261 return list_entry(dev->events.prev, struct inotify_kernel_event, list);
262}
263
264/*
251 * inotify_dev_queue_event - event handler registered with core inotify, adds 265 * inotify_dev_queue_event - event handler registered with core inotify, adds
252 * a new event to the given device 266 * a new event to the given device
253 * 267 *
@@ -269,11 +283,11 @@ static void inotify_dev_queue_event(struct inotify_watch *w, u32 wd, u32 mask,
269 /* we can safely put the watch as we don't reference it while 283 /* we can safely put the watch as we don't reference it while
270 * generating the event 284 * generating the event
271 */ 285 */
272 if (mask & IN_IGNORED || mask & IN_ONESHOT) 286 if (mask & IN_IGNORED || w->mask & IN_ONESHOT)
273 put_inotify_watch(w); /* final put */ 287 put_inotify_watch(w); /* final put */
274 288
275 /* coalescing: drop this event if it is a dupe of the previous */ 289 /* coalescing: drop this event if it is a dupe of the previous */
276 last = inotify_dev_get_event(dev); 290 last = inotify_dev_get_last_event(dev);
277 if (last && last->event.mask == mask && last->event.wd == wd && 291 if (last && last->event.mask == mask && last->event.wd == wd &&
278 last->event.cookie == cookie) { 292 last->event.cookie == cookie) {
279 const char *lastname = last->name; 293 const char *lastname = last->name;
@@ -302,6 +316,7 @@ static void inotify_dev_queue_event(struct inotify_watch *w, u32 wd, u32 mask,
302 dev->queue_size += sizeof(struct inotify_event) + kevent->event.len; 316 dev->queue_size += sizeof(struct inotify_event) + kevent->event.len;
303 list_add_tail(&kevent->list, &dev->events); 317 list_add_tail(&kevent->list, &dev->events);
304 wake_up_interruptible(&dev->wq); 318 wake_up_interruptible(&dev->wq);
319 kill_fasync(&dev->fa, SIGIO, POLL_IN);
305 320
306out: 321out:
307 mutex_unlock(&dev->ev_mutex); 322 mutex_unlock(&dev->ev_mutex);
@@ -352,7 +367,7 @@ static int find_inode(const char __user *dirname, struct nameidata *nd,
352 /* you can only watch an inode if you have read permissions on it */ 367 /* you can only watch an inode if you have read permissions on it */
353 error = vfs_permission(nd, MAY_READ); 368 error = vfs_permission(nd, MAY_READ);
354 if (error) 369 if (error)
355 path_release(nd); 370 path_put(&nd->path);
356 return error; 371 return error;
357} 372}
358 373
@@ -490,6 +505,13 @@ static ssize_t inotify_read(struct file *file, char __user *buf,
490 return ret; 505 return ret;
491} 506}
492 507
508static int inotify_fasync(int fd, struct file *file, int on)
509{
510 struct inotify_device *dev = file->private_data;
511
512 return fasync_helper(fd, file, on, &dev->fa) >= 0 ? 0 : -EIO;
513}
514
493static int inotify_release(struct inode *ignored, struct file *file) 515static int inotify_release(struct inode *ignored, struct file *file)
494{ 516{
495 struct inotify_device *dev = file->private_data; 517 struct inotify_device *dev = file->private_data;
@@ -502,6 +524,9 @@ static int inotify_release(struct inode *ignored, struct file *file)
502 inotify_dev_event_dequeue(dev); 524 inotify_dev_event_dequeue(dev);
503 mutex_unlock(&dev->ev_mutex); 525 mutex_unlock(&dev->ev_mutex);
504 526
527 if (file->f_flags & FASYNC)
528 inotify_fasync(-1, file, 0);
529
505 /* free this device: the put matching the get in inotify_init() */ 530 /* free this device: the put matching the get in inotify_init() */
506 put_inotify_dev(dev); 531 put_inotify_dev(dev);
507 532
@@ -530,6 +555,7 @@ static long inotify_ioctl(struct file *file, unsigned int cmd,
530static const struct file_operations inotify_fops = { 555static const struct file_operations inotify_fops = {
531 .poll = inotify_poll, 556 .poll = inotify_poll,
532 .read = inotify_read, 557 .read = inotify_read,
558 .fasync = inotify_fasync,
533 .release = inotify_release, 559 .release = inotify_release,
534 .unlocked_ioctl = inotify_ioctl, 560 .unlocked_ioctl = inotify_ioctl,
535 .compat_ioctl = inotify_ioctl, 561 .compat_ioctl = inotify_ioctl,
@@ -577,6 +603,7 @@ asmlinkage long sys_inotify_init(void)
577 goto out_free_dev; 603 goto out_free_dev;
578 } 604 }
579 dev->ih = ih; 605 dev->ih = ih;
606 dev->fa = NULL;
580 607
581 filp->f_op = &inotify_fops; 608 filp->f_op = &inotify_fops;
582 filp->f_path.mnt = mntget(inotify_mnt); 609 filp->f_path.mnt = mntget(inotify_mnt);
@@ -640,7 +667,7 @@ asmlinkage long sys_inotify_add_watch(int fd, const char __user *path, u32 mask)
640 goto fput_and_out; 667 goto fput_and_out;
641 668
642 /* inode held in place by reference to nd; dev by fget on fd */ 669 /* inode held in place by reference to nd; dev by fget on fd */
643 inode = nd.dentry->d_inode; 670 inode = nd.path.dentry->d_inode;
644 dev = filp->private_data; 671 dev = filp->private_data;
645 672
646 mutex_lock(&dev->up_mutex); 673 mutex_lock(&dev->up_mutex);
@@ -649,7 +676,7 @@ asmlinkage long sys_inotify_add_watch(int fd, const char __user *path, u32 mask)
649 ret = create_watch(dev, inode, mask); 676 ret = create_watch(dev, inode, mask);
650 mutex_unlock(&dev->up_mutex); 677 mutex_unlock(&dev->up_mutex);
651 678
652 path_release(&nd); 679 path_put(&nd.path);
653fput_and_out: 680fput_and_out:
654 fput_light(filp, fput_needed); 681 fput_light(filp, fput_needed);
655 return ret; 682 return ret;
diff --git a/fs/ioctl.c b/fs/ioctl.c
index c2a773e8620b..f32fbde2175e 100644
--- a/fs/ioctl.c
+++ b/fs/ioctl.c
@@ -12,12 +12,24 @@
12#include <linux/fs.h> 12#include <linux/fs.h>
13#include <linux/security.h> 13#include <linux/security.h>
14#include <linux/module.h> 14#include <linux/module.h>
15#include <linux/uaccess.h>
15 16
16#include <asm/uaccess.h>
17#include <asm/ioctls.h> 17#include <asm/ioctls.h>
18 18
19static long do_ioctl(struct file *filp, unsigned int cmd, 19/**
20 unsigned long arg) 20 * vfs_ioctl - call filesystem specific ioctl methods
21 * @filp: open file to invoke ioctl method on
22 * @cmd: ioctl command to execute
23 * @arg: command-specific argument for ioctl
24 *
25 * Invokes filesystem specific ->unlocked_ioctl, if one exists; otherwise
26 * invokes filesystem specific ->ioctl method. If neither method exists,
27 * returns -ENOTTY.
28 *
29 * Returns 0 on success, -errno on error.
30 */
31long vfs_ioctl(struct file *filp, unsigned int cmd,
32 unsigned long arg)
21{ 33{
22 int error = -ENOTTY; 34 int error = -ENOTTY;
23 35
@@ -40,123 +52,148 @@ static long do_ioctl(struct file *filp, unsigned int cmd,
40 return error; 52 return error;
41} 53}
42 54
55static int ioctl_fibmap(struct file *filp, int __user *p)
56{
57 struct address_space *mapping = filp->f_mapping;
58 int res, block;
59
60 /* do we support this mess? */
61 if (!mapping->a_ops->bmap)
62 return -EINVAL;
63 if (!capable(CAP_SYS_RAWIO))
64 return -EPERM;
65 res = get_user(block, p);
66 if (res)
67 return res;
68 lock_kernel();
69 res = mapping->a_ops->bmap(mapping, block);
70 unlock_kernel();
71 return put_user(res, p);
72}
73
43static int file_ioctl(struct file *filp, unsigned int cmd, 74static int file_ioctl(struct file *filp, unsigned int cmd,
44 unsigned long arg) 75 unsigned long arg)
45{ 76{
46 int error; 77 struct inode *inode = filp->f_path.dentry->d_inode;
47 int block;
48 struct inode * inode = filp->f_path.dentry->d_inode;
49 int __user *p = (int __user *)arg; 78 int __user *p = (int __user *)arg;
50 79
51 switch (cmd) { 80 switch (cmd) {
52 case FIBMAP: 81 case FIBMAP:
53 { 82 return ioctl_fibmap(filp, p);
54 struct address_space *mapping = filp->f_mapping; 83 case FIGETBSZ:
55 int res; 84 return put_user(inode->i_sb->s_blocksize, p);
56 /* do we support this mess? */ 85 case FIONREAD:
57 if (!mapping->a_ops->bmap) 86 return put_user(i_size_read(inode) - filp->f_pos, p);
58 return -EINVAL; 87 }
59 if (!capable(CAP_SYS_RAWIO))
60 return -EPERM;
61 if ((error = get_user(block, p)) != 0)
62 return error;
63 88
89 return vfs_ioctl(filp, cmd, arg);
90}
91
92static int ioctl_fionbio(struct file *filp, int __user *argp)
93{
94 unsigned int flag;
95 int on, error;
96
97 error = get_user(on, argp);
98 if (error)
99 return error;
100 flag = O_NONBLOCK;
101#ifdef __sparc__
102 /* SunOS compatibility item. */
103 if (O_NONBLOCK != O_NDELAY)
104 flag |= O_NDELAY;
105#endif
106 if (on)
107 filp->f_flags |= flag;
108 else
109 filp->f_flags &= ~flag;
110 return error;
111}
112
113static int ioctl_fioasync(unsigned int fd, struct file *filp,
114 int __user *argp)
115{
116 unsigned int flag;
117 int on, error;
118
119 error = get_user(on, argp);
120 if (error)
121 return error;
122 flag = on ? FASYNC : 0;
123
124 /* Did FASYNC state change ? */
125 if ((flag ^ filp->f_flags) & FASYNC) {
126 if (filp->f_op && filp->f_op->fasync) {
64 lock_kernel(); 127 lock_kernel();
65 res = mapping->a_ops->bmap(mapping, block); 128 error = filp->f_op->fasync(fd, filp, on);
66 unlock_kernel(); 129 unlock_kernel();
67 return put_user(res, p); 130 } else
68 } 131 error = -ENOTTY;
69 case FIGETBSZ:
70 return put_user(inode->i_sb->s_blocksize, p);
71 case FIONREAD:
72 return put_user(i_size_read(inode) - filp->f_pos, p);
73 } 132 }
133 if (error)
134 return error;
74 135
75 return do_ioctl(filp, cmd, arg); 136 if (on)
137 filp->f_flags |= FASYNC;
138 else
139 filp->f_flags &= ~FASYNC;
140 return error;
76} 141}
77 142
78/* 143/*
79 * When you add any new common ioctls to the switches above and below 144 * When you add any new common ioctls to the switches above and below
80 * please update compat_sys_ioctl() too. 145 * please update compat_sys_ioctl() too.
81 * 146 *
82 * vfs_ioctl() is not for drivers and not intended to be EXPORT_SYMBOL()'d. 147 * do_vfs_ioctl() is not for drivers and not intended to be EXPORT_SYMBOL()'d.
83 * It's just a simple helper for sys_ioctl and compat_sys_ioctl. 148 * It's just a simple helper for sys_ioctl and compat_sys_ioctl.
84 */ 149 */
85int vfs_ioctl(struct file *filp, unsigned int fd, unsigned int cmd, unsigned long arg) 150int do_vfs_ioctl(struct file *filp, unsigned int fd, unsigned int cmd,
151 unsigned long arg)
86{ 152{
87 unsigned int flag; 153 int error = 0;
88 int on, error = 0; 154 int __user *argp = (int __user *)arg;
89 155
90 switch (cmd) { 156 switch (cmd) {
91 case FIOCLEX: 157 case FIOCLEX:
92 set_close_on_exec(fd, 1); 158 set_close_on_exec(fd, 1);
93 break; 159 break;
94 160
95 case FIONCLEX: 161 case FIONCLEX:
96 set_close_on_exec(fd, 0); 162 set_close_on_exec(fd, 0);
97 break; 163 break;
98 164
99 case FIONBIO: 165 case FIONBIO:
100 if ((error = get_user(on, (int __user *)arg)) != 0) 166 error = ioctl_fionbio(filp, argp);
101 break; 167 break;
102 flag = O_NONBLOCK; 168
103#ifdef __sparc__ 169 case FIOASYNC:
104 /* SunOS compatibility item. */ 170 error = ioctl_fioasync(fd, filp, argp);
105 if(O_NONBLOCK != O_NDELAY) 171 break;
106 flag |= O_NDELAY; 172
107#endif 173 case FIOQSIZE:
108 if (on) 174 if (S_ISDIR(filp->f_path.dentry->d_inode->i_mode) ||
109 filp->f_flags |= flag; 175 S_ISREG(filp->f_path.dentry->d_inode->i_mode) ||
110 else 176 S_ISLNK(filp->f_path.dentry->d_inode->i_mode)) {
111 filp->f_flags &= ~flag; 177 loff_t res =
112 break; 178 inode_get_bytes(filp->f_path.dentry->d_inode);
113 179 error = copy_to_user((loff_t __user *)arg, &res,
114 case FIOASYNC: 180 sizeof(res)) ? -EFAULT : 0;
115 if ((error = get_user(on, (int __user *)arg)) != 0) 181 } else
116 break; 182 error = -ENOTTY;
117 flag = on ? FASYNC : 0; 183 break;
118 184 default:
119 /* Did FASYNC state change ? */ 185 if (S_ISREG(filp->f_path.dentry->d_inode->i_mode))
120 if ((flag ^ filp->f_flags) & FASYNC) { 186 error = file_ioctl(filp, cmd, arg);
121 if (filp->f_op && filp->f_op->fasync) { 187 else
122 lock_kernel(); 188 error = vfs_ioctl(filp, cmd, arg);
123 error = filp->f_op->fasync(fd, filp, on); 189 break;
124 unlock_kernel();
125 }
126 else error = -ENOTTY;
127 }
128 if (error != 0)
129 break;
130
131 if (on)
132 filp->f_flags |= FASYNC;
133 else
134 filp->f_flags &= ~FASYNC;
135 break;
136
137 case FIOQSIZE:
138 if (S_ISDIR(filp->f_path.dentry->d_inode->i_mode) ||
139 S_ISREG(filp->f_path.dentry->d_inode->i_mode) ||
140 S_ISLNK(filp->f_path.dentry->d_inode->i_mode)) {
141 loff_t res = inode_get_bytes(filp->f_path.dentry->d_inode);
142 error = copy_to_user((loff_t __user *)arg, &res, sizeof(res)) ? -EFAULT : 0;
143 }
144 else
145 error = -ENOTTY;
146 break;
147 default:
148 if (S_ISREG(filp->f_path.dentry->d_inode->i_mode))
149 error = file_ioctl(filp, cmd, arg);
150 else
151 error = do_ioctl(filp, cmd, arg);
152 break;
153 } 190 }
154 return error; 191 return error;
155} 192}
156 193
157asmlinkage long sys_ioctl(unsigned int fd, unsigned int cmd, unsigned long arg) 194asmlinkage long sys_ioctl(unsigned int fd, unsigned int cmd, unsigned long arg)
158{ 195{
159 struct file * filp; 196 struct file *filp;
160 int error = -EBADF; 197 int error = -EBADF;
161 int fput_needed; 198 int fput_needed;
162 199
@@ -168,7 +205,7 @@ asmlinkage long sys_ioctl(unsigned int fd, unsigned int cmd, unsigned long arg)
168 if (error) 205 if (error)
169 goto out_fput; 206 goto out_fput;
170 207
171 error = vfs_ioctl(filp, fd, cmd, arg); 208 error = do_vfs_ioctl(filp, fd, cmd, arg);
172 out_fput: 209 out_fput:
173 fput_light(filp, fput_needed); 210 fput_light(filp, fput_needed);
174 out: 211 out:
diff --git a/fs/isofs/export.c b/fs/isofs/export.c
index 29f9753ae5e5..bb219138331a 100644
--- a/fs/isofs/export.c
+++ b/fs/isofs/export.c
@@ -26,11 +26,9 @@ isofs_export_iget(struct super_block *sb,
26 if (block == 0) 26 if (block == 0)
27 return ERR_PTR(-ESTALE); 27 return ERR_PTR(-ESTALE);
28 inode = isofs_iget(sb, block, offset); 28 inode = isofs_iget(sb, block, offset);
29 if (inode == NULL) 29 if (IS_ERR(inode))
30 return ERR_PTR(-ENOMEM); 30 return ERR_CAST(inode);
31 if (is_bad_inode(inode) 31 if (generation && inode->i_generation != generation) {
32 || (generation && inode->i_generation != generation))
33 {
34 iput(inode); 32 iput(inode);
35 return ERR_PTR(-ESTALE); 33 return ERR_PTR(-ESTALE);
36 } 34 }
@@ -110,8 +108,10 @@ static struct dentry *isofs_export_get_parent(struct dentry *child)
110 parent_inode = isofs_iget(child_inode->i_sb, 108 parent_inode = isofs_iget(child_inode->i_sb,
111 parent_block, 109 parent_block,
112 parent_offset); 110 parent_offset);
113 if (parent_inode == NULL) { 111 if (IS_ERR(parent_inode)) {
114 rv = ERR_PTR(-EACCES); 112 rv = ERR_CAST(parent_inode);
113 if (rv != ERR_PTR(-ENOMEM))
114 rv = ERR_PTR(-EACCES);
115 goto out; 115 goto out;
116 } 116 }
117 117
diff --git a/fs/isofs/inode.c b/fs/isofs/inode.c
index 09e3d306e96f..044a254d526b 100644
--- a/fs/isofs/inode.c
+++ b/fs/isofs/inode.c
@@ -54,7 +54,7 @@ static void isofs_put_super(struct super_block *sb)
54 return; 54 return;
55} 55}
56 56
57static void isofs_read_inode(struct inode *); 57static int isofs_read_inode(struct inode *);
58static int isofs_statfs (struct dentry *, struct kstatfs *); 58static int isofs_statfs (struct dentry *, struct kstatfs *);
59 59
60static struct kmem_cache *isofs_inode_cachep; 60static struct kmem_cache *isofs_inode_cachep;
@@ -107,10 +107,10 @@ static int isofs_remount(struct super_block *sb, int *flags, char *data)
107static const struct super_operations isofs_sops = { 107static const struct super_operations isofs_sops = {
108 .alloc_inode = isofs_alloc_inode, 108 .alloc_inode = isofs_alloc_inode,
109 .destroy_inode = isofs_destroy_inode, 109 .destroy_inode = isofs_destroy_inode,
110 .read_inode = isofs_read_inode,
111 .put_super = isofs_put_super, 110 .put_super = isofs_put_super,
112 .statfs = isofs_statfs, 111 .statfs = isofs_statfs,
113 .remount_fs = isofs_remount, 112 .remount_fs = isofs_remount,
113 .show_options = generic_show_options,
114}; 114};
115 115
116 116
@@ -145,7 +145,8 @@ struct iso9660_options{
145 char nocompress; 145 char nocompress;
146 unsigned char check; 146 unsigned char check;
147 unsigned int blocksize; 147 unsigned int blocksize;
148 mode_t mode; 148 mode_t fmode;
149 mode_t dmode;
149 gid_t gid; 150 gid_t gid;
150 uid_t uid; 151 uid_t uid;
151 char *iocharset; 152 char *iocharset;
@@ -306,7 +307,7 @@ enum {
306 Opt_block, Opt_check_r, Opt_check_s, Opt_cruft, Opt_gid, Opt_ignore, 307 Opt_block, Opt_check_r, Opt_check_s, Opt_cruft, Opt_gid, Opt_ignore,
307 Opt_iocharset, Opt_map_a, Opt_map_n, Opt_map_o, Opt_mode, Opt_nojoliet, 308 Opt_iocharset, Opt_map_a, Opt_map_n, Opt_map_o, Opt_mode, Opt_nojoliet,
308 Opt_norock, Opt_sb, Opt_session, Opt_uid, Opt_unhide, Opt_utf8, Opt_err, 309 Opt_norock, Opt_sb, Opt_session, Opt_uid, Opt_unhide, Opt_utf8, Opt_err,
309 Opt_nocompress, Opt_hide, Opt_showassoc, 310 Opt_nocompress, Opt_hide, Opt_showassoc, Opt_dmode,
310}; 311};
311 312
312static match_table_t tokens = { 313static match_table_t tokens = {
@@ -333,6 +334,7 @@ static match_table_t tokens = {
333 {Opt_uid, "uid=%u"}, 334 {Opt_uid, "uid=%u"},
334 {Opt_gid, "gid=%u"}, 335 {Opt_gid, "gid=%u"},
335 {Opt_mode, "mode=%u"}, 336 {Opt_mode, "mode=%u"},
337 {Opt_dmode, "dmode=%u"},
336 {Opt_block, "block=%u"}, 338 {Opt_block, "block=%u"},
337 {Opt_ignore, "conv=binary"}, 339 {Opt_ignore, "conv=binary"},
338 {Opt_ignore, "conv=b"}, 340 {Opt_ignore, "conv=b"},
@@ -360,7 +362,7 @@ static int parse_options(char *options, struct iso9660_options *popt)
360 popt->check = 'u'; /* unset */ 362 popt->check = 'u'; /* unset */
361 popt->nocompress = 0; 363 popt->nocompress = 0;
362 popt->blocksize = 1024; 364 popt->blocksize = 1024;
363 popt->mode = S_IRUGO | S_IXUGO; /* 365 popt->fmode = popt->dmode = S_IRUGO | S_IXUGO; /*
364 * r-x for all. The disc could 366 * r-x for all. The disc could
365 * be shared with DOS machines so 367 * be shared with DOS machines so
366 * virtually anything could be 368 * virtually anything could be
@@ -452,7 +454,12 @@ static int parse_options(char *options, struct iso9660_options *popt)
452 case Opt_mode: 454 case Opt_mode:
453 if (match_int(&args[0], &option)) 455 if (match_int(&args[0], &option))
454 return 0; 456 return 0;
455 popt->mode = option; 457 popt->fmode = option;
458 break;
459 case Opt_dmode:
460 if (match_int(&args[0], &option))
461 return 0;
462 popt->dmode = option;
456 break; 463 break;
457 case Opt_block: 464 case Opt_block:
458 if (match_int(&args[0], &option)) 465 if (match_int(&args[0], &option))
@@ -552,9 +559,11 @@ static int isofs_fill_super(struct super_block *s, void *data, int silent)
552 int joliet_level = 0; 559 int joliet_level = 0;
553 int iso_blknum, block; 560 int iso_blknum, block;
554 int orig_zonesize; 561 int orig_zonesize;
555 int table; 562 int table, error = -EINVAL;
556 unsigned int vol_desc_start; 563 unsigned int vol_desc_start;
557 564
565 save_mount_options(s, data);
566
558 sbi = kzalloc(sizeof(*sbi), GFP_KERNEL); 567 sbi = kzalloc(sizeof(*sbi), GFP_KERNEL);
559 if (!sbi) 568 if (!sbi)
560 return -ENOMEM; 569 return -ENOMEM;
@@ -802,7 +811,8 @@ root_found:
802 * on the disk as suid, so we merely allow them to set the default 811 * on the disk as suid, so we merely allow them to set the default
803 * permissions. 812 * permissions.
804 */ 813 */
805 sbi->s_mode = opt.mode & 0777; 814 sbi->s_fmode = opt.fmode & 0777;
815 sbi->s_dmode = opt.dmode & 0777;
806 816
807 /* 817 /*
808 * Read the root inode, which _may_ result in changing 818 * Read the root inode, which _may_ result in changing
@@ -810,6 +820,8 @@ root_found:
810 * we then decide whether to use the Joliet descriptor. 820 * we then decide whether to use the Joliet descriptor.
811 */ 821 */
812 inode = isofs_iget(s, sbi->s_firstdatazone, 0); 822 inode = isofs_iget(s, sbi->s_firstdatazone, 0);
823 if (IS_ERR(inode))
824 goto out_no_root;
813 825
814 /* 826 /*
815 * If this disk has both Rock Ridge and Joliet on it, then we 827 * If this disk has both Rock Ridge and Joliet on it, then we
@@ -829,6 +841,8 @@ root_found:
829 "ISOFS: changing to secondary root\n"); 841 "ISOFS: changing to secondary root\n");
830 iput(inode); 842 iput(inode);
831 inode = isofs_iget(s, sbi->s_firstdatazone, 0); 843 inode = isofs_iget(s, sbi->s_firstdatazone, 0);
844 if (IS_ERR(inode))
845 goto out_no_root;
832 } 846 }
833 } 847 }
834 848
@@ -842,8 +856,6 @@ root_found:
842 sbi->s_joliet_level = joliet_level; 856 sbi->s_joliet_level = joliet_level;
843 857
844 /* check the root inode */ 858 /* check the root inode */
845 if (!inode)
846 goto out_no_root;
847 if (!inode->i_op) 859 if (!inode->i_op)
848 goto out_bad_root; 860 goto out_bad_root;
849 861
@@ -876,11 +888,14 @@ root_found:
876 */ 888 */
877out_bad_root: 889out_bad_root:
878 printk(KERN_WARNING "%s: root inode not initialized\n", __func__); 890 printk(KERN_WARNING "%s: root inode not initialized\n", __func__);
879 goto out_iput;
880out_no_root:
881 printk(KERN_WARNING "%s: get root inode failed\n", __func__);
882out_iput: 891out_iput:
883 iput(inode); 892 iput(inode);
893 goto out_no_inode;
894out_no_root:
895 error = PTR_ERR(inode);
896 if (error != -ENOMEM)
897 printk(KERN_WARNING "%s: get root inode failed\n", __func__);
898out_no_inode:
884#ifdef CONFIG_JOLIET 899#ifdef CONFIG_JOLIET
885 if (sbi->s_nls_iocharset) 900 if (sbi->s_nls_iocharset)
886 unload_nls(sbi->s_nls_iocharset); 901 unload_nls(sbi->s_nls_iocharset);
@@ -908,7 +923,7 @@ out_freesbi:
908 kfree(opt.iocharset); 923 kfree(opt.iocharset);
909 kfree(sbi); 924 kfree(sbi);
910 s->s_fs_info = NULL; 925 s->s_fs_info = NULL;
911 return -EINVAL; 926 return error;
912} 927}
913 928
914static int isofs_statfs (struct dentry *dentry, struct kstatfs *buf) 929static int isofs_statfs (struct dentry *dentry, struct kstatfs *buf)
@@ -930,7 +945,7 @@ static int isofs_statfs (struct dentry *dentry, struct kstatfs *buf)
930/* 945/*
931 * Get a set of blocks; filling in buffer_heads if already allocated 946 * Get a set of blocks; filling in buffer_heads if already allocated
932 * or getblk() if they are not. Returns the number of blocks inserted 947 * or getblk() if they are not. Returns the number of blocks inserted
933 * (0 == error.) 948 * (-ve == error.)
934 */ 949 */
935int isofs_get_blocks(struct inode *inode, sector_t iblock_s, 950int isofs_get_blocks(struct inode *inode, sector_t iblock_s,
936 struct buffer_head **bh, unsigned long nblocks) 951 struct buffer_head **bh, unsigned long nblocks)
@@ -940,11 +955,12 @@ int isofs_get_blocks(struct inode *inode, sector_t iblock_s,
940 unsigned int firstext; 955 unsigned int firstext;
941 unsigned long nextblk, nextoff; 956 unsigned long nextblk, nextoff;
942 long iblock = (long)iblock_s; 957 long iblock = (long)iblock_s;
943 int section, rv; 958 int section, rv, error;
944 struct iso_inode_info *ei = ISOFS_I(inode); 959 struct iso_inode_info *ei = ISOFS_I(inode);
945 960
946 lock_kernel(); 961 lock_kernel();
947 962
963 error = -EIO;
948 rv = 0; 964 rv = 0;
949 if (iblock < 0 || iblock != iblock_s) { 965 if (iblock < 0 || iblock != iblock_s) {
950 printk(KERN_DEBUG "%s: block number too large\n", __func__); 966 printk(KERN_DEBUG "%s: block number too large\n", __func__);
@@ -983,8 +999,10 @@ int isofs_get_blocks(struct inode *inode, sector_t iblock_s,
983 999
984 offset += sect_size; 1000 offset += sect_size;
985 ninode = isofs_iget(inode->i_sb, nextblk, nextoff); 1001 ninode = isofs_iget(inode->i_sb, nextblk, nextoff);
986 if (!ninode) 1002 if (IS_ERR(ninode)) {
1003 error = PTR_ERR(ninode);
987 goto abort; 1004 goto abort;
1005 }
988 firstext = ISOFS_I(ninode)->i_first_extent; 1006 firstext = ISOFS_I(ninode)->i_first_extent;
989 sect_size = ISOFS_I(ninode)->i_section_size >> ISOFS_BUFFER_BITS(ninode); 1007 sect_size = ISOFS_I(ninode)->i_section_size >> ISOFS_BUFFER_BITS(ninode);
990 nextblk = ISOFS_I(ninode)->i_next_section_block; 1008 nextblk = ISOFS_I(ninode)->i_next_section_block;
@@ -1015,9 +1033,10 @@ int isofs_get_blocks(struct inode *inode, sector_t iblock_s,
1015 rv++; 1033 rv++;
1016 } 1034 }
1017 1035
1036 error = 0;
1018abort: 1037abort:
1019 unlock_kernel(); 1038 unlock_kernel();
1020 return rv; 1039 return rv != 0 ? rv : error;
1021} 1040}
1022 1041
1023/* 1042/*
@@ -1026,12 +1045,15 @@ abort:
1026static int isofs_get_block(struct inode *inode, sector_t iblock, 1045static int isofs_get_block(struct inode *inode, sector_t iblock,
1027 struct buffer_head *bh_result, int create) 1046 struct buffer_head *bh_result, int create)
1028{ 1047{
1048 int ret;
1049
1029 if (create) { 1050 if (create) {
1030 printk(KERN_DEBUG "%s: Kernel tries to allocate a block\n", __func__); 1051 printk(KERN_DEBUG "%s: Kernel tries to allocate a block\n", __func__);
1031 return -EROFS; 1052 return -EROFS;
1032 } 1053 }
1033 1054
1034 return isofs_get_blocks(inode, iblock, &bh_result, 1) ? 0 : -EIO; 1055 ret = isofs_get_blocks(inode, iblock, &bh_result, 1);
1056 return ret < 0 ? ret : 0;
1035} 1057}
1036 1058
1037static int isofs_bmap(struct inode *inode, sector_t block) 1059static int isofs_bmap(struct inode *inode, sector_t block)
@@ -1186,7 +1208,7 @@ out_toomany:
1186 goto out; 1208 goto out;
1187} 1209}
1188 1210
1189static void isofs_read_inode(struct inode *inode) 1211static int isofs_read_inode(struct inode *inode)
1190{ 1212{
1191 struct super_block *sb = inode->i_sb; 1213 struct super_block *sb = inode->i_sb;
1192 struct isofs_sb_info *sbi = ISOFS_SB(sb); 1214 struct isofs_sb_info *sbi = ISOFS_SB(sb);
@@ -1199,6 +1221,7 @@ static void isofs_read_inode(struct inode *inode)
1199 unsigned int de_len; 1221 unsigned int de_len;
1200 unsigned long offset; 1222 unsigned long offset;
1201 struct iso_inode_info *ei = ISOFS_I(inode); 1223 struct iso_inode_info *ei = ISOFS_I(inode);
1224 int ret = -EIO;
1202 1225
1203 block = ei->i_iget5_block; 1226 block = ei->i_iget5_block;
1204 bh = sb_bread(inode->i_sb, block); 1227 bh = sb_bread(inode->i_sb, block);
@@ -1216,6 +1239,7 @@ static void isofs_read_inode(struct inode *inode)
1216 tmpde = kmalloc(de_len, GFP_KERNEL); 1239 tmpde = kmalloc(de_len, GFP_KERNEL);
1217 if (tmpde == NULL) { 1240 if (tmpde == NULL) {
1218 printk(KERN_INFO "%s: out of memory\n", __func__); 1241 printk(KERN_INFO "%s: out of memory\n", __func__);
1242 ret = -ENOMEM;
1219 goto fail; 1243 goto fail;
1220 } 1244 }
1221 memcpy(tmpde, bh->b_data + offset, frag1); 1245 memcpy(tmpde, bh->b_data + offset, frag1);
@@ -1235,7 +1259,7 @@ static void isofs_read_inode(struct inode *inode)
1235 ei->i_file_format = isofs_file_normal; 1259 ei->i_file_format = isofs_file_normal;
1236 1260
1237 if (de->flags[-high_sierra] & 2) { 1261 if (de->flags[-high_sierra] & 2) {
1238 inode->i_mode = S_IRUGO | S_IXUGO | S_IFDIR; 1262 inode->i_mode = sbi->s_dmode | S_IFDIR;
1239 inode->i_nlink = 1; /* 1263 inode->i_nlink = 1; /*
1240 * Set to 1. We know there are 2, but 1264 * Set to 1. We know there are 2, but
1241 * the find utility tries to optimize 1265 * the find utility tries to optimize
@@ -1245,9 +1269,8 @@ static void isofs_read_inode(struct inode *inode)
1245 */ 1269 */
1246 } else { 1270 } else {
1247 /* Everybody gets to read the file. */ 1271 /* Everybody gets to read the file. */
1248 inode->i_mode = sbi->s_mode; 1272 inode->i_mode = sbi->s_fmode | S_IFREG;
1249 inode->i_nlink = 1; 1273 inode->i_nlink = 1;
1250 inode->i_mode |= S_IFREG;
1251 } 1274 }
1252 inode->i_uid = sbi->s_uid; 1275 inode->i_uid = sbi->s_uid;
1253 inode->i_gid = sbi->s_gid; 1276 inode->i_gid = sbi->s_gid;
@@ -1259,8 +1282,10 @@ static void isofs_read_inode(struct inode *inode)
1259 1282
1260 ei->i_section_size = isonum_733(de->size); 1283 ei->i_section_size = isonum_733(de->size);
1261 if (de->flags[-high_sierra] & 0x80) { 1284 if (de->flags[-high_sierra] & 0x80) {
1262 if(isofs_read_level3_size(inode)) 1285 ret = isofs_read_level3_size(inode);
1286 if (ret < 0)
1263 goto fail; 1287 goto fail;
1288 ret = -EIO;
1264 } else { 1289 } else {
1265 ei->i_next_section_block = 0; 1290 ei->i_next_section_block = 0;
1266 ei->i_next_section_offset = 0; 1291 ei->i_next_section_offset = 0;
@@ -1346,16 +1371,16 @@ static void isofs_read_inode(struct inode *inode)
1346 /* XXX - parse_rock_ridge_inode() had already set i_rdev. */ 1371 /* XXX - parse_rock_ridge_inode() had already set i_rdev. */
1347 init_special_inode(inode, inode->i_mode, inode->i_rdev); 1372 init_special_inode(inode, inode->i_mode, inode->i_rdev);
1348 1373
1374 ret = 0;
1349out: 1375out:
1350 kfree(tmpde); 1376 kfree(tmpde);
1351 if (bh) 1377 if (bh)
1352 brelse(bh); 1378 brelse(bh);
1353 return; 1379 return ret;
1354 1380
1355out_badread: 1381out_badread:
1356 printk(KERN_WARNING "ISOFS: unable to read i-node block\n"); 1382 printk(KERN_WARNING "ISOFS: unable to read i-node block\n");
1357fail: 1383fail:
1358 make_bad_inode(inode);
1359 goto out; 1384 goto out;
1360} 1385}
1361 1386
@@ -1394,9 +1419,10 @@ struct inode *isofs_iget(struct super_block *sb,
1394 unsigned long hashval; 1419 unsigned long hashval;
1395 struct inode *inode; 1420 struct inode *inode;
1396 struct isofs_iget5_callback_data data; 1421 struct isofs_iget5_callback_data data;
1422 long ret;
1397 1423
1398 if (offset >= 1ul << sb->s_blocksize_bits) 1424 if (offset >= 1ul << sb->s_blocksize_bits)
1399 return NULL; 1425 return ERR_PTR(-EINVAL);
1400 1426
1401 data.block = block; 1427 data.block = block;
1402 data.offset = offset; 1428 data.offset = offset;
@@ -1406,9 +1432,17 @@ struct inode *isofs_iget(struct super_block *sb,
1406 inode = iget5_locked(sb, hashval, &isofs_iget5_test, 1432 inode = iget5_locked(sb, hashval, &isofs_iget5_test,
1407 &isofs_iget5_set, &data); 1433 &isofs_iget5_set, &data);
1408 1434
1409 if (inode && (inode->i_state & I_NEW)) { 1435 if (!inode)
1410 sb->s_op->read_inode(inode); 1436 return ERR_PTR(-ENOMEM);
1411 unlock_new_inode(inode); 1437
1438 if (inode->i_state & I_NEW) {
1439 ret = isofs_read_inode(inode);
1440 if (ret < 0) {
1441 iget_failed(inode);
1442 inode = ERR_PTR(ret);
1443 } else {
1444 unlock_new_inode(inode);
1445 }
1412 } 1446 }
1413 1447
1414 return inode; 1448 return inode;
diff --git a/fs/isofs/isofs.h b/fs/isofs/isofs.h
index f3213f9f89af..d1bdf8adb351 100644
--- a/fs/isofs/isofs.h
+++ b/fs/isofs/isofs.h
@@ -51,7 +51,8 @@ struct isofs_sb_info {
51 unsigned char s_hide; 51 unsigned char s_hide;
52 unsigned char s_showassoc; 52 unsigned char s_showassoc;
53 53
54 mode_t s_mode; 54 mode_t s_fmode;
55 mode_t s_dmode;
55 gid_t s_gid; 56 gid_t s_gid;
56 uid_t s_uid; 57 uid_t s_uid;
57 struct nls_table *s_nls_iocharset; /* Native language support table */ 58 struct nls_table *s_nls_iocharset; /* Native language support table */
diff --git a/fs/isofs/namei.c b/fs/isofs/namei.c
index e2b4dad39ca9..344b247bc29a 100644
--- a/fs/isofs/namei.c
+++ b/fs/isofs/namei.c
@@ -179,9 +179,9 @@ struct dentry *isofs_lookup(struct inode *dir, struct dentry *dentry, struct nam
179 inode = NULL; 179 inode = NULL;
180 if (found) { 180 if (found) {
181 inode = isofs_iget(dir->i_sb, block, offset); 181 inode = isofs_iget(dir->i_sb, block, offset);
182 if (!inode) { 182 if (IS_ERR(inode)) {
183 unlock_kernel(); 183 unlock_kernel();
184 return ERR_PTR(-EACCES); 184 return ERR_CAST(inode);
185 } 185 }
186 } 186 }
187 unlock_kernel(); 187 unlock_kernel();
diff --git a/fs/isofs/rock.c b/fs/isofs/rock.c
index f3a1db3098de..6bd48f0a7047 100644
--- a/fs/isofs/rock.c
+++ b/fs/isofs/rock.c
@@ -474,8 +474,10 @@ repeat:
474 isofs_iget(inode->i_sb, 474 isofs_iget(inode->i_sb,
475 ISOFS_I(inode)->i_first_extent, 475 ISOFS_I(inode)->i_first_extent,
476 0); 476 0);
477 if (!reloc) 477 if (IS_ERR(reloc)) {
478 ret = PTR_ERR(reloc);
478 goto out; 479 goto out;
480 }
479 inode->i_mode = reloc->i_mode; 481 inode->i_mode = reloc->i_mode;
480 inode->i_nlink = reloc->i_nlink; 482 inode->i_nlink = reloc->i_nlink;
481 inode->i_uid = reloc->i_uid; 483 inode->i_uid = reloc->i_uid;
diff --git a/fs/jbd/commit.c b/fs/jbd/commit.c
index 31853eb65b4c..a38c7186c570 100644
--- a/fs/jbd/commit.c
+++ b/fs/jbd/commit.c
@@ -104,7 +104,8 @@ static int journal_write_commit_record(journal_t *journal,
104{ 104{
105 struct journal_head *descriptor; 105 struct journal_head *descriptor;
106 struct buffer_head *bh; 106 struct buffer_head *bh;
107 int i, ret; 107 journal_header_t *header;
108 int ret;
108 int barrier_done = 0; 109 int barrier_done = 0;
109 110
110 if (is_journal_aborted(journal)) 111 if (is_journal_aborted(journal))
@@ -116,13 +117,10 @@ static int journal_write_commit_record(journal_t *journal,
116 117
117 bh = jh2bh(descriptor); 118 bh = jh2bh(descriptor);
118 119
119 /* AKPM: buglet - add `i' to tmp! */ 120 header = (journal_header_t *)(bh->b_data);
120 for (i = 0; i < bh->b_size; i += 512) { 121 header->h_magic = cpu_to_be32(JFS_MAGIC_NUMBER);
121 journal_header_t *tmp = (journal_header_t*)bh->b_data; 122 header->h_blocktype = cpu_to_be32(JFS_COMMIT_BLOCK);
122 tmp->h_magic = cpu_to_be32(JFS_MAGIC_NUMBER); 123 header->h_sequence = cpu_to_be32(commit_transaction->t_tid);
123 tmp->h_blocktype = cpu_to_be32(JFS_COMMIT_BLOCK);
124 tmp->h_sequence = cpu_to_be32(commit_transaction->t_tid);
125 }
126 124
127 JBUFFER_TRACE(descriptor, "write commit block"); 125 JBUFFER_TRACE(descriptor, "write commit block");
128 set_buffer_dirty(bh); 126 set_buffer_dirty(bh);
@@ -131,6 +129,8 @@ static int journal_write_commit_record(journal_t *journal,
131 barrier_done = 1; 129 barrier_done = 1;
132 } 130 }
133 ret = sync_dirty_buffer(bh); 131 ret = sync_dirty_buffer(bh);
132 if (barrier_done)
133 clear_buffer_ordered(bh);
134 /* is it possible for another commit to fail at roughly 134 /* is it possible for another commit to fail at roughly
135 * the same time as this one? If so, we don't want to 135 * the same time as this one? If so, we don't want to
136 * trust the barrier flag in the super, but instead want 136 * trust the barrier flag in the super, but instead want
@@ -148,7 +148,6 @@ static int journal_write_commit_record(journal_t *journal,
148 spin_unlock(&journal->j_state_lock); 148 spin_unlock(&journal->j_state_lock);
149 149
150 /* And try again, without the barrier */ 150 /* And try again, without the barrier */
151 clear_buffer_ordered(bh);
152 set_buffer_uptodate(bh); 151 set_buffer_uptodate(bh);
153 set_buffer_dirty(bh); 152 set_buffer_dirty(bh);
154 ret = sync_dirty_buffer(bh); 153 ret = sync_dirty_buffer(bh);
diff --git a/fs/jbd/journal.c b/fs/jbd/journal.c
index 5d14243499d4..3943a8905eb2 100644
--- a/fs/jbd/journal.c
+++ b/fs/jbd/journal.c
@@ -1457,7 +1457,7 @@ static const char *journal_dev_name(journal_t *journal, char *buffer)
1457 * Aborts hard --- we mark the abort as occurred, but do _nothing_ else, 1457 * Aborts hard --- we mark the abort as occurred, but do _nothing_ else,
1458 * and don't attempt to make any other journal updates. 1458 * and don't attempt to make any other journal updates.
1459 */ 1459 */
1460void __journal_abort_hard(journal_t *journal) 1460static void __journal_abort_hard(journal_t *journal)
1461{ 1461{
1462 transaction_t *transaction; 1462 transaction_t *transaction;
1463 char b[BDEVNAME_SIZE]; 1463 char b[BDEVNAME_SIZE];
diff --git a/fs/jbd/recovery.c b/fs/jbd/recovery.c
index c5d9694b6a2f..2b8edf4d6eaa 100644
--- a/fs/jbd/recovery.c
+++ b/fs/jbd/recovery.c
@@ -354,7 +354,7 @@ static int do_one_pass(journal_t *journal,
354 struct buffer_head * obh; 354 struct buffer_head * obh;
355 struct buffer_head * nbh; 355 struct buffer_head * nbh;
356 356
357 cond_resched(); /* We're under lock_kernel() */ 357 cond_resched();
358 358
359 /* If we already know where to stop the log traversal, 359 /* If we already know where to stop the log traversal,
360 * check right now that we haven't gone past the end of 360 * check right now that we haven't gone past the end of
diff --git a/fs/jbd2/commit.c b/fs/jbd2/commit.c
index 4f302d279279..a8173081f831 100644
--- a/fs/jbd2/commit.c
+++ b/fs/jbd2/commit.c
@@ -136,18 +136,20 @@ static int journal_submit_commit_record(journal_t *journal,
136 136
137 JBUFFER_TRACE(descriptor, "submit commit block"); 137 JBUFFER_TRACE(descriptor, "submit commit block");
138 lock_buffer(bh); 138 lock_buffer(bh);
139 139 get_bh(bh);
140 set_buffer_dirty(bh); 140 set_buffer_dirty(bh);
141 set_buffer_uptodate(bh); 141 set_buffer_uptodate(bh);
142 bh->b_end_io = journal_end_buffer_io_sync; 142 bh->b_end_io = journal_end_buffer_io_sync;
143 143
144 if (journal->j_flags & JBD2_BARRIER && 144 if (journal->j_flags & JBD2_BARRIER &&
145 !JBD2_HAS_COMPAT_FEATURE(journal, 145 !JBD2_HAS_INCOMPAT_FEATURE(journal,
146 JBD2_FEATURE_INCOMPAT_ASYNC_COMMIT)) { 146 JBD2_FEATURE_INCOMPAT_ASYNC_COMMIT)) {
147 set_buffer_ordered(bh); 147 set_buffer_ordered(bh);
148 barrier_done = 1; 148 barrier_done = 1;
149 } 149 }
150 ret = submit_bh(WRITE, bh); 150 ret = submit_bh(WRITE, bh);
151 if (barrier_done)
152 clear_buffer_ordered(bh);
151 153
152 /* is it possible for another commit to fail at roughly 154 /* is it possible for another commit to fail at roughly
153 * the same time as this one? If so, we don't want to 155 * the same time as this one? If so, we don't want to
@@ -166,7 +168,6 @@ static int journal_submit_commit_record(journal_t *journal,
166 spin_unlock(&journal->j_state_lock); 168 spin_unlock(&journal->j_state_lock);
167 169
168 /* And try again, without the barrier */ 170 /* And try again, without the barrier */
169 clear_buffer_ordered(bh);
170 set_buffer_uptodate(bh); 171 set_buffer_uptodate(bh);
171 set_buffer_dirty(bh); 172 set_buffer_dirty(bh);
172 ret = submit_bh(WRITE, bh); 173 ret = submit_bh(WRITE, bh);
@@ -872,7 +873,8 @@ wait_for_iobuf:
872 if (err) 873 if (err)
873 __jbd2_journal_abort_hard(journal); 874 __jbd2_journal_abort_hard(journal);
874 } 875 }
875 err = journal_wait_on_commit_record(cbh); 876 if (!err && !is_journal_aborted(journal))
877 err = journal_wait_on_commit_record(cbh);
876 878
877 if (err) 879 if (err)
878 jbd2_journal_abort(journal, err); 880 jbd2_journal_abort(journal, err);
diff --git a/fs/jbd2/recovery.c b/fs/jbd2/recovery.c
index 921680663fa2..146411387ada 100644
--- a/fs/jbd2/recovery.c
+++ b/fs/jbd2/recovery.c
@@ -397,7 +397,7 @@ static int do_one_pass(journal_t *journal,
397 struct buffer_head * obh; 397 struct buffer_head * obh;
398 struct buffer_head * nbh; 398 struct buffer_head * nbh;
399 399
400 cond_resched(); /* We're under lock_kernel() */ 400 cond_resched();
401 401
402 /* If we already know where to stop the log traversal, 402 /* If we already know where to stop the log traversal,
403 * check right now that we haven't gone past the end of 403 * check right now that we haven't gone past the end of
@@ -641,7 +641,7 @@ static int do_one_pass(journal_t *journal,
641 if (chksum_err) { 641 if (chksum_err) {
642 info->end_transaction = next_commit_ID; 642 info->end_transaction = next_commit_ID;
643 643
644 if (!JBD2_HAS_COMPAT_FEATURE(journal, 644 if (!JBD2_HAS_INCOMPAT_FEATURE(journal,
645 JBD2_FEATURE_INCOMPAT_ASYNC_COMMIT)){ 645 JBD2_FEATURE_INCOMPAT_ASYNC_COMMIT)){
646 printk(KERN_ERR 646 printk(KERN_ERR
647 "JBD: Transaction %u " 647 "JBD: Transaction %u "
diff --git a/fs/jffs2/acl.c b/fs/jffs2/acl.c
index 77fc5838609c..4c80404a9aba 100644
--- a/fs/jffs2/acl.c
+++ b/fs/jffs2/acl.c
@@ -176,7 +176,7 @@ static void jffs2_iset_acl(struct inode *inode, struct posix_acl **i_acl, struct
176 spin_unlock(&inode->i_lock); 176 spin_unlock(&inode->i_lock);
177} 177}
178 178
179struct posix_acl *jffs2_get_acl(struct inode *inode, int type) 179static struct posix_acl *jffs2_get_acl(struct inode *inode, int type)
180{ 180{
181 struct jffs2_inode_info *f = JFFS2_INODE_INFO(inode); 181 struct jffs2_inode_info *f = JFFS2_INODE_INFO(inode);
182 struct posix_acl *acl; 182 struct posix_acl *acl;
@@ -345,8 +345,10 @@ int jffs2_init_acl_pre(struct inode *dir_i, struct inode *inode, int *i_mode)
345 if (!clone) 345 if (!clone)
346 return -ENOMEM; 346 return -ENOMEM;
347 rc = posix_acl_create_masq(clone, (mode_t *)i_mode); 347 rc = posix_acl_create_masq(clone, (mode_t *)i_mode);
348 if (rc < 0) 348 if (rc < 0) {
349 posix_acl_release(clone);
349 return rc; 350 return rc;
351 }
350 if (rc > 0) 352 if (rc > 0)
351 jffs2_iset_acl(inode, &f->i_acl_access, clone); 353 jffs2_iset_acl(inode, &f->i_acl_access, clone);
352 354
diff --git a/fs/jffs2/acl.h b/fs/jffs2/acl.h
index 76c6ebd1acd9..0bb7f003fd80 100644
--- a/fs/jffs2/acl.h
+++ b/fs/jffs2/acl.h
@@ -28,7 +28,6 @@ struct jffs2_acl_header {
28 28
29#define JFFS2_ACL_NOT_CACHED ((void *)-1) 29#define JFFS2_ACL_NOT_CACHED ((void *)-1)
30 30
31extern struct posix_acl *jffs2_get_acl(struct inode *inode, int type);
32extern int jffs2_permission(struct inode *, int, struct nameidata *); 31extern int jffs2_permission(struct inode *, int, struct nameidata *);
33extern int jffs2_acl_chmod(struct inode *); 32extern int jffs2_acl_chmod(struct inode *);
34extern int jffs2_init_acl_pre(struct inode *, struct inode *, int *); 33extern int jffs2_init_acl_pre(struct inode *, struct inode *, int *);
@@ -40,7 +39,6 @@ extern struct xattr_handler jffs2_acl_default_xattr_handler;
40 39
41#else 40#else
42 41
43#define jffs2_get_acl(inode, type) (NULL)
44#define jffs2_permission (NULL) 42#define jffs2_permission (NULL)
45#define jffs2_acl_chmod(inode) (0) 43#define jffs2_acl_chmod(inode) (0)
46#define jffs2_init_acl_pre(dir_i,inode,mode) (0) 44#define jffs2_init_acl_pre(dir_i,inode,mode) (0)
diff --git a/fs/jffs2/dir.c b/fs/jffs2/dir.c
index 787e392ffd41..f948f7e6ec82 100644
--- a/fs/jffs2/dir.c
+++ b/fs/jffs2/dir.c
@@ -101,10 +101,10 @@ static struct dentry *jffs2_lookup(struct inode *dir_i, struct dentry *target,
101 ino = fd->ino; 101 ino = fd->ino;
102 up(&dir_f->sem); 102 up(&dir_f->sem);
103 if (ino) { 103 if (ino) {
104 inode = iget(dir_i->i_sb, ino); 104 inode = jffs2_iget(dir_i->i_sb, ino);
105 if (!inode) { 105 if (IS_ERR(inode)) {
106 printk(KERN_WARNING "iget() failed for ino #%u\n", ino); 106 printk(KERN_WARNING "iget() failed for ino #%u\n", ino);
107 return (ERR_PTR(-EIO)); 107 return ERR_CAST(inode);
108 } 108 }
109 } 109 }
110 110
diff --git a/fs/jffs2/fs.c b/fs/jffs2/fs.c
index d2e06f7ea96f..e26ea78c7892 100644
--- a/fs/jffs2/fs.c
+++ b/fs/jffs2/fs.c
@@ -97,11 +97,7 @@ int jffs2_do_setattr (struct inode *inode, struct iattr *iattr)
97 ri->gid = cpu_to_je16((ivalid & ATTR_GID)?iattr->ia_gid:inode->i_gid); 97 ri->gid = cpu_to_je16((ivalid & ATTR_GID)?iattr->ia_gid:inode->i_gid);
98 98
99 if (ivalid & ATTR_MODE) 99 if (ivalid & ATTR_MODE)
100 if (iattr->ia_mode & S_ISGID && 100 ri->mode = cpu_to_jemode(iattr->ia_mode);
101 !in_group_p(je16_to_cpu(ri->gid)) && !capable(CAP_FSETID))
102 ri->mode = cpu_to_jemode(iattr->ia_mode & ~S_ISGID);
103 else
104 ri->mode = cpu_to_jemode(iattr->ia_mode);
105 else 101 else
106 ri->mode = cpu_to_jemode(inode->i_mode); 102 ri->mode = cpu_to_jemode(inode->i_mode);
107 103
@@ -230,16 +226,23 @@ void jffs2_clear_inode (struct inode *inode)
230 jffs2_do_clear_inode(c, f); 226 jffs2_do_clear_inode(c, f);
231} 227}
232 228
233void jffs2_read_inode (struct inode *inode) 229struct inode *jffs2_iget(struct super_block *sb, unsigned long ino)
234{ 230{
235 struct jffs2_inode_info *f; 231 struct jffs2_inode_info *f;
236 struct jffs2_sb_info *c; 232 struct jffs2_sb_info *c;
237 struct jffs2_raw_inode latest_node; 233 struct jffs2_raw_inode latest_node;
238 union jffs2_device_node jdev; 234 union jffs2_device_node jdev;
235 struct inode *inode;
239 dev_t rdev = 0; 236 dev_t rdev = 0;
240 int ret; 237 int ret;
241 238
242 D1(printk(KERN_DEBUG "jffs2_read_inode(): inode->i_ino == %lu\n", inode->i_ino)); 239 D1(printk(KERN_DEBUG "jffs2_iget(): ino == %lu\n", ino));
240
241 inode = iget_locked(sb, ino);
242 if (!inode)
243 return ERR_PTR(-ENOMEM);
244 if (!(inode->i_state & I_NEW))
245 return inode;
243 246
244 f = JFFS2_INODE_INFO(inode); 247 f = JFFS2_INODE_INFO(inode);
245 c = JFFS2_SB_INFO(inode->i_sb); 248 c = JFFS2_SB_INFO(inode->i_sb);
@@ -250,9 +253,9 @@ void jffs2_read_inode (struct inode *inode)
250 ret = jffs2_do_read_inode(c, f, inode->i_ino, &latest_node); 253 ret = jffs2_do_read_inode(c, f, inode->i_ino, &latest_node);
251 254
252 if (ret) { 255 if (ret) {
253 make_bad_inode(inode);
254 up(&f->sem); 256 up(&f->sem);
255 return; 257 iget_failed(inode);
258 return ERR_PTR(ret);
256 } 259 }
257 inode->i_mode = jemode_to_cpu(latest_node.mode); 260 inode->i_mode = jemode_to_cpu(latest_node.mode);
258 inode->i_uid = je16_to_cpu(latest_node.uid); 261 inode->i_uid = je16_to_cpu(latest_node.uid);
@@ -303,19 +306,14 @@ void jffs2_read_inode (struct inode *inode)
303 if (f->metadata->size != sizeof(jdev.old) && 306 if (f->metadata->size != sizeof(jdev.old) &&
304 f->metadata->size != sizeof(jdev.new)) { 307 f->metadata->size != sizeof(jdev.new)) {
305 printk(KERN_NOTICE "Device node has strange size %d\n", f->metadata->size); 308 printk(KERN_NOTICE "Device node has strange size %d\n", f->metadata->size);
306 up(&f->sem); 309 goto error_io;
307 jffs2_do_clear_inode(c, f);
308 make_bad_inode(inode);
309 return;
310 } 310 }
311 D1(printk(KERN_DEBUG "Reading device numbers from flash\n")); 311 D1(printk(KERN_DEBUG "Reading device numbers from flash\n"));
312 if (jffs2_read_dnode(c, f, f->metadata, (char *)&jdev, 0, f->metadata->size) < 0) { 312 ret = jffs2_read_dnode(c, f, f->metadata, (char *)&jdev, 0, f->metadata->size);
313 if (ret < 0) {
313 /* Eep */ 314 /* Eep */
314 printk(KERN_NOTICE "Read device numbers for inode %lu failed\n", (unsigned long)inode->i_ino); 315 printk(KERN_NOTICE "Read device numbers for inode %lu failed\n", (unsigned long)inode->i_ino);
315 up(&f->sem); 316 goto error;
316 jffs2_do_clear_inode(c, f);
317 make_bad_inode(inode);
318 return;
319 } 317 }
320 if (f->metadata->size == sizeof(jdev.old)) 318 if (f->metadata->size == sizeof(jdev.old))
321 rdev = old_decode_dev(je16_to_cpu(jdev.old)); 319 rdev = old_decode_dev(je16_to_cpu(jdev.old));
@@ -335,6 +333,16 @@ void jffs2_read_inode (struct inode *inode)
335 up(&f->sem); 333 up(&f->sem);
336 334
337 D1(printk(KERN_DEBUG "jffs2_read_inode() returning\n")); 335 D1(printk(KERN_DEBUG "jffs2_read_inode() returning\n"));
336 unlock_new_inode(inode);
337 return inode;
338
339error_io:
340 ret = -EIO;
341error:
342 up(&f->sem);
343 jffs2_do_clear_inode(c, f);
344 iget_failed(inode);
345 return ERR_PTR(ret);
338} 346}
339 347
340void jffs2_dirty_inode(struct inode *inode) 348void jffs2_dirty_inode(struct inode *inode)
@@ -522,15 +530,16 @@ int jffs2_do_fill_super(struct super_block *sb, void *data, int silent)
522 if ((ret = jffs2_do_mount_fs(c))) 530 if ((ret = jffs2_do_mount_fs(c)))
523 goto out_inohash; 531 goto out_inohash;
524 532
525 ret = -EINVAL;
526
527 D1(printk(KERN_DEBUG "jffs2_do_fill_super(): Getting root inode\n")); 533 D1(printk(KERN_DEBUG "jffs2_do_fill_super(): Getting root inode\n"));
528 root_i = iget(sb, 1); 534 root_i = jffs2_iget(sb, 1);
529 if (is_bad_inode(root_i)) { 535 if (IS_ERR(root_i)) {
530 D1(printk(KERN_WARNING "get root inode failed\n")); 536 D1(printk(KERN_WARNING "get root inode failed\n"));
531 goto out_root_i; 537 ret = PTR_ERR(root_i);
538 goto out_root;
532 } 539 }
533 540
541 ret = -ENOMEM;
542
534 D1(printk(KERN_DEBUG "jffs2_do_fill_super(): d_alloc_root()\n")); 543 D1(printk(KERN_DEBUG "jffs2_do_fill_super(): d_alloc_root()\n"));
535 sb->s_root = d_alloc_root(root_i); 544 sb->s_root = d_alloc_root(root_i);
536 if (!sb->s_root) 545 if (!sb->s_root)
@@ -546,6 +555,7 @@ int jffs2_do_fill_super(struct super_block *sb, void *data, int silent)
546 555
547 out_root_i: 556 out_root_i:
548 iput(root_i); 557 iput(root_i);
558out_root:
549 jffs2_free_ino_caches(c); 559 jffs2_free_ino_caches(c);
550 jffs2_free_raw_node_refs(c); 560 jffs2_free_raw_node_refs(c);
551 if (jffs2_blocks_use_vmalloc(c)) 561 if (jffs2_blocks_use_vmalloc(c))
@@ -615,9 +625,9 @@ struct jffs2_inode_info *jffs2_gc_fetch_inode(struct jffs2_sb_info *c,
615 jffs2_do_unlink() would need the alloc_sem and we have it. 625 jffs2_do_unlink() would need the alloc_sem and we have it.
616 Just iget() it, and if read_inode() is necessary that's OK. 626 Just iget() it, and if read_inode() is necessary that's OK.
617 */ 627 */
618 inode = iget(OFNI_BS_2SFFJ(c), inum); 628 inode = jffs2_iget(OFNI_BS_2SFFJ(c), inum);
619 if (!inode) 629 if (IS_ERR(inode))
620 return ERR_PTR(-ENOMEM); 630 return ERR_CAST(inode);
621 } 631 }
622 if (is_bad_inode(inode)) { 632 if (is_bad_inode(inode)) {
623 printk(KERN_NOTICE "Eep. read_inode() failed for ino #%u. nlink %d\n", 633 printk(KERN_NOTICE "Eep. read_inode() failed for ino #%u. nlink %d\n",
diff --git a/fs/jffs2/nodelist.c b/fs/jffs2/nodelist.c
index 4bf86088b3ae..87c6f555e1a0 100644
--- a/fs/jffs2/nodelist.c
+++ b/fs/jffs2/nodelist.c
@@ -32,15 +32,18 @@ void jffs2_add_fd_to_list(struct jffs2_sb_info *c, struct jffs2_full_dirent *new
32 if ((*prev)->nhash == new->nhash && !strcmp((*prev)->name, new->name)) { 32 if ((*prev)->nhash == new->nhash && !strcmp((*prev)->name, new->name)) {
33 /* Duplicate. Free one */ 33 /* Duplicate. Free one */
34 if (new->version < (*prev)->version) { 34 if (new->version < (*prev)->version) {
35 dbg_dentlist("Eep! Marking new dirent node is obsolete, old is \"%s\", ino #%u\n", 35 dbg_dentlist("Eep! Marking new dirent node obsolete, old is \"%s\", ino #%u\n",
36 (*prev)->name, (*prev)->ino); 36 (*prev)->name, (*prev)->ino);
37 jffs2_mark_node_obsolete(c, new->raw); 37 jffs2_mark_node_obsolete(c, new->raw);
38 jffs2_free_full_dirent(new); 38 jffs2_free_full_dirent(new);
39 } else { 39 } else {
40 dbg_dentlist("marking old dirent \"%s\", ino #%u bsolete\n", 40 dbg_dentlist("marking old dirent \"%s\", ino #%u obsolete\n",
41 (*prev)->name, (*prev)->ino); 41 (*prev)->name, (*prev)->ino);
42 new->next = (*prev)->next; 42 new->next = (*prev)->next;
43 jffs2_mark_node_obsolete(c, ((*prev)->raw)); 43 /* It may have been a 'placeholder' deletion dirent,
44 if jffs2_can_mark_obsolete() (see jffs2_do_unlink()) */
45 if ((*prev)->raw)
46 jffs2_mark_node_obsolete(c, ((*prev)->raw));
44 jffs2_free_full_dirent(*prev); 47 jffs2_free_full_dirent(*prev);
45 *prev = new; 48 *prev = new;
46 } 49 }
diff --git a/fs/jffs2/os-linux.h b/fs/jffs2/os-linux.h
index bf64686cf098..1b10d2594092 100644
--- a/fs/jffs2/os-linux.h
+++ b/fs/jffs2/os-linux.h
@@ -175,7 +175,7 @@ extern const struct inode_operations jffs2_symlink_inode_operations;
175/* fs.c */ 175/* fs.c */
176int jffs2_setattr (struct dentry *, struct iattr *); 176int jffs2_setattr (struct dentry *, struct iattr *);
177int jffs2_do_setattr (struct inode *, struct iattr *); 177int jffs2_do_setattr (struct inode *, struct iattr *);
178void jffs2_read_inode (struct inode *); 178struct inode *jffs2_iget(struct super_block *, unsigned long);
179void jffs2_clear_inode (struct inode *); 179void jffs2_clear_inode (struct inode *);
180void jffs2_dirty_inode(struct inode *inode); 180void jffs2_dirty_inode(struct inode *inode);
181struct inode *jffs2_new_inode (struct inode *dir_i, int mode, 181struct inode *jffs2_new_inode (struct inode *dir_i, int mode,
diff --git a/fs/jffs2/readinode.c b/fs/jffs2/readinode.c
index 6c1ba3566f58..e512a93d6249 100644
--- a/fs/jffs2/readinode.c
+++ b/fs/jffs2/readinode.c
@@ -37,23 +37,24 @@ static int check_node_data(struct jffs2_sb_info *c, struct jffs2_tmp_dnode_info
37 37
38 BUG_ON(tn->csize == 0); 38 BUG_ON(tn->csize == 0);
39 39
40 if (!jffs2_is_writebuffered(c))
41 goto adj_acc;
42
43 /* Calculate how many bytes were already checked */ 40 /* Calculate how many bytes were already checked */
44 ofs = ref_offset(ref) + sizeof(struct jffs2_raw_inode); 41 ofs = ref_offset(ref) + sizeof(struct jffs2_raw_inode);
45 len = ofs % c->wbuf_pagesize; 42 len = tn->csize;
46 if (likely(len)) 43
47 len = c->wbuf_pagesize - len; 44 if (jffs2_is_writebuffered(c)) {
48 45 int adj = ofs % c->wbuf_pagesize;
49 if (len >= tn->csize) { 46 if (likely(adj))
50 dbg_readinode("no need to check node at %#08x, data length %u, data starts at %#08x - it has already been checked.\n", 47 adj = c->wbuf_pagesize - adj;
51 ref_offset(ref), tn->csize, ofs); 48
52 goto adj_acc; 49 if (adj >= tn->csize) {
53 } 50 dbg_readinode("no need to check node at %#08x, data length %u, data starts at %#08x - it has already been checked.\n",
51 ref_offset(ref), tn->csize, ofs);
52 goto adj_acc;
53 }
54 54
55 ofs += len; 55 ofs += adj;
56 len = tn->csize - len; 56 len -= adj;
57 }
57 58
58 dbg_readinode("check node at %#08x, data length %u, partial CRC %#08x, correct CRC %#08x, data starts at %#08x, start checking from %#08x - %u bytes.\n", 59 dbg_readinode("check node at %#08x, data length %u, partial CRC %#08x, correct CRC %#08x, data starts at %#08x, start checking from %#08x - %u bytes.\n",
59 ref_offset(ref), tn->csize, tn->partial_crc, tn->data_crc, ofs - len, ofs, len); 60 ref_offset(ref), tn->csize, tn->partial_crc, tn->data_crc, ofs - len, ofs, len);
@@ -63,7 +64,7 @@ static int check_node_data(struct jffs2_sb_info *c, struct jffs2_tmp_dnode_info
63 * adding and jffs2_flash_read_end() interface. */ 64 * adding and jffs2_flash_read_end() interface. */
64 if (c->mtd->point) { 65 if (c->mtd->point) {
65 err = c->mtd->point(c->mtd, ofs, len, &retlen, &buffer); 66 err = c->mtd->point(c->mtd, ofs, len, &retlen, &buffer);
66 if (!err && retlen < tn->csize) { 67 if (!err && retlen < len) {
67 JFFS2_WARNING("MTD point returned len too short: %zu instead of %u.\n", retlen, tn->csize); 68 JFFS2_WARNING("MTD point returned len too short: %zu instead of %u.\n", retlen, tn->csize);
68 c->mtd->unpoint(c->mtd, buffer, ofs, retlen); 69 c->mtd->unpoint(c->mtd, buffer, ofs, retlen);
69 } else if (err) 70 } else if (err)
diff --git a/fs/jffs2/super.c b/fs/jffs2/super.c
index ffa447511e6a..4677355996cc 100644
--- a/fs/jffs2/super.c
+++ b/fs/jffs2/super.c
@@ -65,7 +65,6 @@ static const struct super_operations jffs2_super_operations =
65{ 65{
66 .alloc_inode = jffs2_alloc_inode, 66 .alloc_inode = jffs2_alloc_inode,
67 .destroy_inode =jffs2_destroy_inode, 67 .destroy_inode =jffs2_destroy_inode,
68 .read_inode = jffs2_read_inode,
69 .put_super = jffs2_put_super, 68 .put_super = jffs2_put_super,
70 .write_super = jffs2_write_super, 69 .write_super = jffs2_write_super,
71 .statfs = jffs2_statfs, 70 .statfs = jffs2_statfs,
diff --git a/fs/jffs2/write.c b/fs/jffs2/write.c
index 147e2cbee9e4..776f13cbf2b5 100644
--- a/fs/jffs2/write.c
+++ b/fs/jffs2/write.c
@@ -177,7 +177,7 @@ struct jffs2_full_dnode *jffs2_write_dnode(struct jffs2_sb_info *c, struct jffs2
177 void *hold_err = fn->raw; 177 void *hold_err = fn->raw;
178 /* Release the full_dnode which is now useless, and return */ 178 /* Release the full_dnode which is now useless, and return */
179 jffs2_free_full_dnode(fn); 179 jffs2_free_full_dnode(fn);
180 return ERR_PTR(PTR_ERR(hold_err)); 180 return ERR_CAST(hold_err);
181 } 181 }
182 fn->ofs = je32_to_cpu(ri->offset); 182 fn->ofs = je32_to_cpu(ri->offset);
183 fn->size = je32_to_cpu(ri->dsize); 183 fn->size = je32_to_cpu(ri->dsize);
@@ -313,7 +313,7 @@ struct jffs2_full_dirent *jffs2_write_dirent(struct jffs2_sb_info *c, struct jff
313 void *hold_err = fd->raw; 313 void *hold_err = fd->raw;
314 /* Release the full_dirent which is now useless, and return */ 314 /* Release the full_dirent which is now useless, and return */
315 jffs2_free_full_dirent(fd); 315 jffs2_free_full_dirent(fd);
316 return ERR_PTR(PTR_ERR(hold_err)); 316 return ERR_CAST(hold_err);
317 } 317 }
318 318
319 if (retried) { 319 if (retried) {
@@ -582,7 +582,7 @@ int jffs2_do_unlink(struct jffs2_sb_info *c, struct jffs2_inode_info *dir_f,
582 jffs2_add_fd_to_list(c, fd, &dir_f->dents); 582 jffs2_add_fd_to_list(c, fd, &dir_f->dents);
583 up(&dir_f->sem); 583 up(&dir_f->sem);
584 } else { 584 } else {
585 struct jffs2_full_dirent **prev = &dir_f->dents; 585 struct jffs2_full_dirent *fd = dir_f->dents;
586 uint32_t nhash = full_name_hash(name, namelen); 586 uint32_t nhash = full_name_hash(name, namelen);
587 587
588 /* We don't actually want to reserve any space, but we do 588 /* We don't actually want to reserve any space, but we do
@@ -590,21 +590,22 @@ int jffs2_do_unlink(struct jffs2_sb_info *c, struct jffs2_inode_info *dir_f,
590 down(&c->alloc_sem); 590 down(&c->alloc_sem);
591 down(&dir_f->sem); 591 down(&dir_f->sem);
592 592
593 while ((*prev) && (*prev)->nhash <= nhash) { 593 for (fd = dir_f->dents; fd; fd = fd->next) {
594 if ((*prev)->nhash == nhash && 594 if (fd->nhash == nhash &&
595 !memcmp((*prev)->name, name, namelen) && 595 !memcmp(fd->name, name, namelen) &&
596 !(*prev)->name[namelen]) { 596 !fd->name[namelen]) {
597 struct jffs2_full_dirent *this = *prev;
598 597
599 D1(printk(KERN_DEBUG "Marking old dirent node (ino #%u) @%08x obsolete\n", 598 D1(printk(KERN_DEBUG "Marking old dirent node (ino #%u) @%08x obsolete\n",
600 this->ino, ref_offset(this->raw))); 599 fd->ino, ref_offset(fd->raw)));
601 600 jffs2_mark_node_obsolete(c, fd->raw);
602 *prev = this->next; 601 /* We don't want to remove it from the list immediately,
603 jffs2_mark_node_obsolete(c, (this->raw)); 602 because that screws up getdents()/seek() semantics even
604 jffs2_free_full_dirent(this); 603 more than they're screwed already. Turn it into a
604 node-less deletion dirent instead -- a placeholder */
605 fd->raw = NULL;
606 fd->ino = 0;
605 break; 607 break;
606 } 608 }
607 prev = &((*prev)->next);
608 } 609 }
609 up(&dir_f->sem); 610 up(&dir_f->sem);
610 } 611 }
@@ -630,7 +631,8 @@ int jffs2_do_unlink(struct jffs2_sb_info *c, struct jffs2_inode_info *dir_f,
630 D1(printk(KERN_DEBUG "Removing deletion dirent for \"%s\" from dir ino #%u\n", 631 D1(printk(KERN_DEBUG "Removing deletion dirent for \"%s\" from dir ino #%u\n",
631 fd->name, dead_f->inocache->ino)); 632 fd->name, dead_f->inocache->ino));
632 } 633 }
633 jffs2_mark_node_obsolete(c, fd->raw); 634 if (fd->raw)
635 jffs2_mark_node_obsolete(c, fd->raw);
634 jffs2_free_full_dirent(fd); 636 jffs2_free_full_dirent(fd);
635 } 637 }
636 } 638 }
diff --git a/fs/jfs/file.c b/fs/jfs/file.c
index 87eb93694af7..7f6063acaa3b 100644
--- a/fs/jfs/file.c
+++ b/fs/jfs/file.c
@@ -112,5 +112,8 @@ const struct file_operations jfs_file_operations = {
112 .splice_write = generic_file_splice_write, 112 .splice_write = generic_file_splice_write,
113 .fsync = jfs_fsync, 113 .fsync = jfs_fsync,
114 .release = jfs_release, 114 .release = jfs_release,
115 .ioctl = jfs_ioctl, 115 .unlocked_ioctl = jfs_ioctl,
116#ifdef CONFIG_COMPAT
117 .compat_ioctl = jfs_compat_ioctl,
118#endif
116}; 119};
diff --git a/fs/jfs/inode.c b/fs/jfs/inode.c
index 4672013802e1..210339784b56 100644
--- a/fs/jfs/inode.c
+++ b/fs/jfs/inode.c
@@ -31,11 +31,21 @@
31#include "jfs_debug.h" 31#include "jfs_debug.h"
32 32
33 33
34void jfs_read_inode(struct inode *inode) 34struct inode *jfs_iget(struct super_block *sb, unsigned long ino)
35{ 35{
36 if (diRead(inode)) { 36 struct inode *inode;
37 make_bad_inode(inode); 37 int ret;
38 return; 38
39 inode = iget_locked(sb, ino);
40 if (!inode)
41 return ERR_PTR(-ENOMEM);
42 if (!(inode->i_state & I_NEW))
43 return inode;
44
45 ret = diRead(inode);
46 if (ret < 0) {
47 iget_failed(inode);
48 return ERR_PTR(ret);
39 } 49 }
40 50
41 if (S_ISREG(inode->i_mode)) { 51 if (S_ISREG(inode->i_mode)) {
@@ -55,6 +65,8 @@ void jfs_read_inode(struct inode *inode)
55 inode->i_op = &jfs_file_inode_operations; 65 inode->i_op = &jfs_file_inode_operations;
56 init_special_inode(inode, inode->i_mode, inode->i_rdev); 66 init_special_inode(inode, inode->i_mode, inode->i_rdev);
57 } 67 }
68 unlock_new_inode(inode);
69 return inode;
58} 70}
59 71
60/* 72/*
diff --git a/fs/jfs/ioctl.c b/fs/jfs/ioctl.c
index dfda12a073e1..a1f8e375ad21 100644
--- a/fs/jfs/ioctl.c
+++ b/fs/jfs/ioctl.c
@@ -51,9 +51,9 @@ static long jfs_map_ext2(unsigned long flags, int from)
51} 51}
52 52
53 53
54int jfs_ioctl(struct inode * inode, struct file * filp, unsigned int cmd, 54long jfs_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
55 unsigned long arg)
56{ 55{
56 struct inode *inode = filp->f_dentry->d_inode;
57 struct jfs_inode_info *jfs_inode = JFS_IP(inode); 57 struct jfs_inode_info *jfs_inode = JFS_IP(inode);
58 unsigned int flags; 58 unsigned int flags;
59 59
@@ -82,6 +82,10 @@ int jfs_ioctl(struct inode * inode, struct file * filp, unsigned int cmd,
82 /* Is it quota file? Do not allow user to mess with it */ 82 /* Is it quota file? Do not allow user to mess with it */
83 if (IS_NOQUOTA(inode)) 83 if (IS_NOQUOTA(inode))
84 return -EPERM; 84 return -EPERM;
85
86 /* Lock against other parallel changes of flags */
87 mutex_lock(&inode->i_mutex);
88
85 jfs_get_inode_flags(jfs_inode); 89 jfs_get_inode_flags(jfs_inode);
86 oldflags = jfs_inode->mode2; 90 oldflags = jfs_inode->mode2;
87 91
@@ -92,8 +96,10 @@ int jfs_ioctl(struct inode * inode, struct file * filp, unsigned int cmd,
92 if ((oldflags & JFS_IMMUTABLE_FL) || 96 if ((oldflags & JFS_IMMUTABLE_FL) ||
93 ((flags ^ oldflags) & 97 ((flags ^ oldflags) &
94 (JFS_APPEND_FL | JFS_IMMUTABLE_FL))) { 98 (JFS_APPEND_FL | JFS_IMMUTABLE_FL))) {
95 if (!capable(CAP_LINUX_IMMUTABLE)) 99 if (!capable(CAP_LINUX_IMMUTABLE)) {
100 mutex_unlock(&inode->i_mutex);
96 return -EPERM; 101 return -EPERM;
102 }
97 } 103 }
98 104
99 flags = flags & JFS_FL_USER_MODIFIABLE; 105 flags = flags & JFS_FL_USER_MODIFIABLE;
@@ -101,6 +107,7 @@ int jfs_ioctl(struct inode * inode, struct file * filp, unsigned int cmd,
101 jfs_inode->mode2 = flags; 107 jfs_inode->mode2 = flags;
102 108
103 jfs_set_inode_flags(inode); 109 jfs_set_inode_flags(inode);
110 mutex_unlock(&inode->i_mutex);
104 inode->i_ctime = CURRENT_TIME_SEC; 111 inode->i_ctime = CURRENT_TIME_SEC;
105 mark_inode_dirty(inode); 112 mark_inode_dirty(inode);
106 return 0; 113 return 0;
@@ -110,3 +117,21 @@ int jfs_ioctl(struct inode * inode, struct file * filp, unsigned int cmd,
110 } 117 }
111} 118}
112 119
120#ifdef CONFIG_COMPAT
121long jfs_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
122{
123 /* While these ioctl numbers defined with 'long' and have different
124 * numbers than the 64bit ABI,
125 * the actual implementation only deals with ints and is compatible.
126 */
127 switch (cmd) {
128 case JFS_IOC_GETFLAGS32:
129 cmd = JFS_IOC_GETFLAGS;
130 break;
131 case JFS_IOC_SETFLAGS32:
132 cmd = JFS_IOC_SETFLAGS;
133 break;
134 }
135 return jfs_ioctl(filp, cmd, arg);
136}
137#endif
diff --git a/fs/jfs/jfs_dinode.h b/fs/jfs/jfs_dinode.h
index c387540d3425..395c4c0d0f06 100644
--- a/fs/jfs/jfs_dinode.h
+++ b/fs/jfs/jfs_dinode.h
@@ -170,5 +170,7 @@ struct dinode {
170#define JFS_IOC_GETFLAGS _IOR('f', 1, long) 170#define JFS_IOC_GETFLAGS _IOR('f', 1, long)
171#define JFS_IOC_SETFLAGS _IOW('f', 2, long) 171#define JFS_IOC_SETFLAGS _IOW('f', 2, long)
172 172
173#define JFS_IOC_GETFLAGS32 _IOR('f', 1, int)
174#define JFS_IOC_SETFLAGS32 _IOW('f', 2, int)
173 175
174#endif /*_H_JFS_DINODE */ 176#endif /*_H_JFS_DINODE */
diff --git a/fs/jfs/jfs_inode.h b/fs/jfs/jfs_inode.h
index 8e2cf2cde185..adb2fafcc544 100644
--- a/fs/jfs/jfs_inode.h
+++ b/fs/jfs/jfs_inode.h
@@ -22,9 +22,9 @@ struct fid;
22 22
23extern struct inode *ialloc(struct inode *, umode_t); 23extern struct inode *ialloc(struct inode *, umode_t);
24extern int jfs_fsync(struct file *, struct dentry *, int); 24extern int jfs_fsync(struct file *, struct dentry *, int);
25extern int jfs_ioctl(struct inode *, struct file *, 25extern long jfs_ioctl(struct file *, unsigned int, unsigned long);
26 unsigned int, unsigned long); 26extern long jfs_compat_ioctl(struct file *, unsigned int, unsigned long);
27extern void jfs_read_inode(struct inode *); 27extern struct inode *jfs_iget(struct super_block *, unsigned long);
28extern int jfs_commit_inode(struct inode *, int); 28extern int jfs_commit_inode(struct inode *, int);
29extern int jfs_write_inode(struct inode*, int); 29extern int jfs_write_inode(struct inode*, int);
30extern void jfs_delete_inode(struct inode *); 30extern void jfs_delete_inode(struct inode *);
diff --git a/fs/jfs/namei.c b/fs/jfs/namei.c
index f8718de3505e..0ba6778edaa2 100644
--- a/fs/jfs/namei.c
+++ b/fs/jfs/namei.c
@@ -1462,12 +1462,10 @@ static struct dentry *jfs_lookup(struct inode *dip, struct dentry *dentry, struc
1462 } 1462 }
1463 } 1463 }
1464 1464
1465 ip = iget(dip->i_sb, inum); 1465 ip = jfs_iget(dip->i_sb, inum);
1466 if (ip == NULL || is_bad_inode(ip)) { 1466 if (IS_ERR(ip)) {
1467 jfs_err("jfs_lookup: iget failed on inum %d", (uint) inum); 1467 jfs_err("jfs_lookup: iget failed on inum %d", (uint) inum);
1468 if (ip) 1468 return ERR_CAST(ip);
1469 iput(ip);
1470 return ERR_PTR(-EACCES);
1471 } 1469 }
1472 1470
1473 dentry = d_splice_alias(ip, dentry); 1471 dentry = d_splice_alias(ip, dentry);
@@ -1485,12 +1483,11 @@ static struct inode *jfs_nfs_get_inode(struct super_block *sb,
1485 1483
1486 if (ino == 0) 1484 if (ino == 0)
1487 return ERR_PTR(-ESTALE); 1485 return ERR_PTR(-ESTALE);
1488 inode = iget(sb, ino); 1486 inode = jfs_iget(sb, ino);
1489 if (inode == NULL) 1487 if (IS_ERR(inode))
1490 return ERR_PTR(-ENOMEM); 1488 return ERR_CAST(inode);
1491 1489
1492 if (is_bad_inode(inode) || 1490 if (generation && inode->i_generation != generation) {
1493 (generation && inode->i_generation != generation)) {
1494 iput(inode); 1491 iput(inode);
1495 return ERR_PTR(-ESTALE); 1492 return ERR_PTR(-ESTALE);
1496 } 1493 }
@@ -1521,17 +1518,14 @@ struct dentry *jfs_get_parent(struct dentry *dentry)
1521 1518
1522 parent_ino = 1519 parent_ino =
1523 le32_to_cpu(JFS_IP(dentry->d_inode)->i_dtroot.header.idotdot); 1520 le32_to_cpu(JFS_IP(dentry->d_inode)->i_dtroot.header.idotdot);
1524 inode = iget(sb, parent_ino); 1521 inode = jfs_iget(sb, parent_ino);
1525 if (inode) { 1522 if (IS_ERR(inode)) {
1526 if (is_bad_inode(inode)) { 1523 parent = ERR_CAST(inode);
1524 } else {
1525 parent = d_alloc_anon(inode);
1526 if (!parent) {
1527 parent = ERR_PTR(-ENOMEM);
1527 iput(inode); 1528 iput(inode);
1528 parent = ERR_PTR(-EACCES);
1529 } else {
1530 parent = d_alloc_anon(inode);
1531 if (!parent) {
1532 parent = ERR_PTR(-ENOMEM);
1533 iput(inode);
1534 }
1535 } 1529 }
1536 } 1530 }
1537 1531
@@ -1562,7 +1556,10 @@ const struct file_operations jfs_dir_operations = {
1562 .read = generic_read_dir, 1556 .read = generic_read_dir,
1563 .readdir = jfs_readdir, 1557 .readdir = jfs_readdir,
1564 .fsync = jfs_fsync, 1558 .fsync = jfs_fsync,
1565 .ioctl = jfs_ioctl, 1559 .unlocked_ioctl = jfs_ioctl,
1560#ifdef CONFIG_COMPAT
1561 .compat_ioctl = jfs_compat_ioctl,
1562#endif
1566}; 1563};
1567 1564
1568static int jfs_ci_hash(struct dentry *dir, struct qstr *this) 1565static int jfs_ci_hash(struct dentry *dir, struct qstr *this)
diff --git a/fs/jfs/super.c b/fs/jfs/super.c
index 70a14001c98f..50ea65451732 100644
--- a/fs/jfs/super.c
+++ b/fs/jfs/super.c
@@ -414,7 +414,7 @@ static int jfs_fill_super(struct super_block *sb, void *data, int silent)
414 struct inode *inode; 414 struct inode *inode;
415 int rc; 415 int rc;
416 s64 newLVSize = 0; 416 s64 newLVSize = 0;
417 int flag; 417 int flag, ret = -EINVAL;
418 418
419 jfs_info("In jfs_read_super: s_flags=0x%lx", sb->s_flags); 419 jfs_info("In jfs_read_super: s_flags=0x%lx", sb->s_flags);
420 420
@@ -461,8 +461,10 @@ static int jfs_fill_super(struct super_block *sb, void *data, int silent)
461 * Initialize direct-mapping inode/address-space 461 * Initialize direct-mapping inode/address-space
462 */ 462 */
463 inode = new_inode(sb); 463 inode = new_inode(sb);
464 if (inode == NULL) 464 if (inode == NULL) {
465 ret = -ENOMEM;
465 goto out_kfree; 466 goto out_kfree;
467 }
466 inode->i_ino = 0; 468 inode->i_ino = 0;
467 inode->i_nlink = 1; 469 inode->i_nlink = 1;
468 inode->i_size = sb->s_bdev->bd_inode->i_size; 470 inode->i_size = sb->s_bdev->bd_inode->i_size;
@@ -494,9 +496,11 @@ static int jfs_fill_super(struct super_block *sb, void *data, int silent)
494 496
495 sb->s_magic = JFS_SUPER_MAGIC; 497 sb->s_magic = JFS_SUPER_MAGIC;
496 498
497 inode = iget(sb, ROOT_I); 499 inode = jfs_iget(sb, ROOT_I);
498 if (!inode || is_bad_inode(inode)) 500 if (IS_ERR(inode)) {
501 ret = PTR_ERR(inode);
499 goto out_no_root; 502 goto out_no_root;
503 }
500 sb->s_root = d_alloc_root(inode); 504 sb->s_root = d_alloc_root(inode);
501 if (!sb->s_root) 505 if (!sb->s_root)
502 goto out_no_root; 506 goto out_no_root;
@@ -536,7 +540,7 @@ out_kfree:
536 if (sbi->nls_tab) 540 if (sbi->nls_tab)
537 unload_nls(sbi->nls_tab); 541 unload_nls(sbi->nls_tab);
538 kfree(sbi); 542 kfree(sbi);
539 return -EINVAL; 543 return ret;
540} 544}
541 545
542static void jfs_write_super_lockfs(struct super_block *sb) 546static void jfs_write_super_lockfs(struct super_block *sb)
@@ -726,7 +730,6 @@ out:
726static const struct super_operations jfs_super_operations = { 730static const struct super_operations jfs_super_operations = {
727 .alloc_inode = jfs_alloc_inode, 731 .alloc_inode = jfs_alloc_inode,
728 .destroy_inode = jfs_destroy_inode, 732 .destroy_inode = jfs_destroy_inode,
729 .read_inode = jfs_read_inode,
730 .dirty_inode = jfs_dirty_inode, 733 .dirty_inode = jfs_dirty_inode,
731 .write_inode = jfs_write_inode, 734 .write_inode = jfs_write_inode,
732 .delete_inode = jfs_delete_inode, 735 .delete_inode = jfs_delete_inode,
diff --git a/fs/libfs.c b/fs/libfs.c
index 6e68b700958d..b004dfadd891 100644
--- a/fs/libfs.c
+++ b/fs/libfs.c
@@ -341,13 +341,10 @@ int simple_prepare_write(struct file *file, struct page *page,
341 unsigned from, unsigned to) 341 unsigned from, unsigned to)
342{ 342{
343 if (!PageUptodate(page)) { 343 if (!PageUptodate(page)) {
344 if (to - from != PAGE_CACHE_SIZE) { 344 if (to - from != PAGE_CACHE_SIZE)
345 void *kaddr = kmap_atomic(page, KM_USER0); 345 zero_user_segments(page,
346 memset(kaddr, 0, from); 346 0, from,
347 memset(kaddr + to, 0, PAGE_CACHE_SIZE - to); 347 to, PAGE_CACHE_SIZE);
348 flush_dcache_page(page);
349 kunmap_atomic(kaddr, KM_USER0);
350 }
351 } 348 }
352 return 0; 349 return 0;
353} 350}
@@ -586,8 +583,8 @@ int simple_transaction_release(struct inode *inode, struct file *file)
586/* Simple attribute files */ 583/* Simple attribute files */
587 584
588struct simple_attr { 585struct simple_attr {
589 u64 (*get)(void *); 586 int (*get)(void *, u64 *);
590 void (*set)(void *, u64); 587 int (*set)(void *, u64);
591 char get_buf[24]; /* enough to store a u64 and "\n\0" */ 588 char get_buf[24]; /* enough to store a u64 and "\n\0" */
592 char set_buf[24]; 589 char set_buf[24];
593 void *data; 590 void *data;
@@ -598,7 +595,7 @@ struct simple_attr {
598/* simple_attr_open is called by an actual attribute open file operation 595/* simple_attr_open is called by an actual attribute open file operation
599 * to set the attribute specific access operations. */ 596 * to set the attribute specific access operations. */
600int simple_attr_open(struct inode *inode, struct file *file, 597int simple_attr_open(struct inode *inode, struct file *file,
601 u64 (*get)(void *), void (*set)(void *, u64), 598 int (*get)(void *, u64 *), int (*set)(void *, u64),
602 const char *fmt) 599 const char *fmt)
603{ 600{
604 struct simple_attr *attr; 601 struct simple_attr *attr;
@@ -618,7 +615,7 @@ int simple_attr_open(struct inode *inode, struct file *file,
618 return nonseekable_open(inode, file); 615 return nonseekable_open(inode, file);
619} 616}
620 617
621int simple_attr_close(struct inode *inode, struct file *file) 618int simple_attr_release(struct inode *inode, struct file *file)
622{ 619{
623 kfree(file->private_data); 620 kfree(file->private_data);
624 return 0; 621 return 0;
@@ -637,15 +634,24 @@ ssize_t simple_attr_read(struct file *file, char __user *buf,
637 if (!attr->get) 634 if (!attr->get)
638 return -EACCES; 635 return -EACCES;
639 636
640 mutex_lock(&attr->mutex); 637 ret = mutex_lock_interruptible(&attr->mutex);
641 if (*ppos) /* continued read */ 638 if (ret)
639 return ret;
640
641 if (*ppos) { /* continued read */
642 size = strlen(attr->get_buf); 642 size = strlen(attr->get_buf);
643 else /* first read */ 643 } else { /* first read */
644 u64 val;
645 ret = attr->get(attr->data, &val);
646 if (ret)
647 goto out;
648
644 size = scnprintf(attr->get_buf, sizeof(attr->get_buf), 649 size = scnprintf(attr->get_buf, sizeof(attr->get_buf),
645 attr->fmt, 650 attr->fmt, (unsigned long long)val);
646 (unsigned long long)attr->get(attr->data)); 651 }
647 652
648 ret = simple_read_from_buffer(buf, len, ppos, attr->get_buf, size); 653 ret = simple_read_from_buffer(buf, len, ppos, attr->get_buf, size);
654out:
649 mutex_unlock(&attr->mutex); 655 mutex_unlock(&attr->mutex);
650 return ret; 656 return ret;
651} 657}
@@ -660,11 +666,13 @@ ssize_t simple_attr_write(struct file *file, const char __user *buf,
660 ssize_t ret; 666 ssize_t ret;
661 667
662 attr = file->private_data; 668 attr = file->private_data;
663
664 if (!attr->set) 669 if (!attr->set)
665 return -EACCES; 670 return -EACCES;
666 671
667 mutex_lock(&attr->mutex); 672 ret = mutex_lock_interruptible(&attr->mutex);
673 if (ret)
674 return ret;
675
668 ret = -EFAULT; 676 ret = -EFAULT;
669 size = min(sizeof(attr->set_buf) - 1, len); 677 size = min(sizeof(attr->set_buf) - 1, len);
670 if (copy_from_user(attr->set_buf, buf, size)) 678 if (copy_from_user(attr->set_buf, buf, size))
@@ -796,6 +804,6 @@ EXPORT_SYMBOL(simple_transaction_get);
796EXPORT_SYMBOL(simple_transaction_read); 804EXPORT_SYMBOL(simple_transaction_read);
797EXPORT_SYMBOL(simple_transaction_release); 805EXPORT_SYMBOL(simple_transaction_release);
798EXPORT_SYMBOL_GPL(simple_attr_open); 806EXPORT_SYMBOL_GPL(simple_attr_open);
799EXPORT_SYMBOL_GPL(simple_attr_close); 807EXPORT_SYMBOL_GPL(simple_attr_release);
800EXPORT_SYMBOL_GPL(simple_attr_read); 808EXPORT_SYMBOL_GPL(simple_attr_read);
801EXPORT_SYMBOL_GPL(simple_attr_write); 809EXPORT_SYMBOL_GPL(simple_attr_write);
diff --git a/fs/lockd/host.c b/fs/lockd/host.c
index ca6b16fc3101..f1ef49fff118 100644
--- a/fs/lockd/host.c
+++ b/fs/lockd/host.c
@@ -243,10 +243,18 @@ nlm_bind_host(struct nlm_host *host)
243 .program = &nlm_program, 243 .program = &nlm_program,
244 .version = host->h_version, 244 .version = host->h_version,
245 .authflavor = RPC_AUTH_UNIX, 245 .authflavor = RPC_AUTH_UNIX,
246 .flags = (RPC_CLNT_CREATE_HARDRTRY | 246 .flags = (RPC_CLNT_CREATE_NOPING |
247 RPC_CLNT_CREATE_AUTOBIND), 247 RPC_CLNT_CREATE_AUTOBIND),
248 }; 248 };
249 249
250 /*
251 * lockd retries server side blocks automatically so we want
252 * those to be soft RPC calls. Client side calls need to be
253 * hard RPC tasks.
254 */
255 if (!host->h_server)
256 args.flags |= RPC_CLNT_CREATE_HARDRTRY;
257
250 clnt = rpc_create(&args); 258 clnt = rpc_create(&args);
251 if (!IS_ERR(clnt)) 259 if (!IS_ERR(clnt))
252 host->h_rpcclnt = clnt; 260 host->h_rpcclnt = clnt;
diff --git a/fs/lockd/svclock.c b/fs/lockd/svclock.c
index 2f4d8fa66689..fe9bdb4a220c 100644
--- a/fs/lockd/svclock.c
+++ b/fs/lockd/svclock.c
@@ -763,11 +763,20 @@ callback:
763 dprintk("lockd: GRANTing blocked lock.\n"); 763 dprintk("lockd: GRANTing blocked lock.\n");
764 block->b_granted = 1; 764 block->b_granted = 1;
765 765
766 /* Schedule next grant callback in 30 seconds */ 766 /* keep block on the list, but don't reattempt until the RPC
767 nlmsvc_insert_block(block, 30 * HZ); 767 * completes or the submission fails
768 */
769 nlmsvc_insert_block(block, NLM_NEVER);
770
771 /* Call the client -- use a soft RPC task since nlmsvc_retry_blocked
772 * will queue up a new one if this one times out
773 */
774 error = nlm_async_call(block->b_call, NLMPROC_GRANTED_MSG,
775 &nlmsvc_grant_ops);
768 776
769 /* Call the client */ 777 /* RPC submission failed, wait a bit and retry */
770 nlm_async_call(block->b_call, NLMPROC_GRANTED_MSG, &nlmsvc_grant_ops); 778 if (error < 0)
779 nlmsvc_insert_block(block, 10 * HZ);
771} 780}
772 781
773/* 782/*
@@ -786,6 +795,17 @@ static void nlmsvc_grant_callback(struct rpc_task *task, void *data)
786 795
787 dprintk("lockd: GRANT_MSG RPC callback\n"); 796 dprintk("lockd: GRANT_MSG RPC callback\n");
788 797
798 /* if the block is not on a list at this point then it has
799 * been invalidated. Don't try to requeue it.
800 *
801 * FIXME: it's possible that the block is removed from the list
802 * after this check but before the nlmsvc_insert_block. In that
803 * case it will be added back. Perhaps we need better locking
804 * for nlm_blocked?
805 */
806 if (list_empty(&block->b_list))
807 return;
808
789 /* Technically, we should down the file semaphore here. Since we 809 /* Technically, we should down the file semaphore here. Since we
790 * move the block towards the head of the queue only, no harm 810 * move the block towards the head of the queue only, no harm
791 * can be done, though. */ 811 * can be done, though. */
diff --git a/fs/locks.c b/fs/locks.c
index 49354b9c7dc1..f36f0e61558d 100644
--- a/fs/locks.c
+++ b/fs/locks.c
@@ -658,8 +658,7 @@ posix_test_lock(struct file *filp, struct file_lock *fl)
658 if (cfl) { 658 if (cfl) {
659 __locks_copy_lock(fl, cfl); 659 __locks_copy_lock(fl, cfl);
660 if (cfl->fl_nspid) 660 if (cfl->fl_nspid)
661 fl->fl_pid = pid_nr_ns(cfl->fl_nspid, 661 fl->fl_pid = pid_vnr(cfl->fl_nspid);
662 task_active_pid_ns(current));
663 } else 662 } else
664 fl->fl_type = F_UNLCK; 663 fl->fl_type = F_UNLCK;
665 unlock_kernel(); 664 unlock_kernel();
@@ -2084,7 +2083,7 @@ static void lock_get_status(struct seq_file *f, struct file_lock *fl,
2084 unsigned int fl_pid; 2083 unsigned int fl_pid;
2085 2084
2086 if (fl->fl_nspid) 2085 if (fl->fl_nspid)
2087 fl_pid = pid_nr_ns(fl->fl_nspid, task_active_pid_ns(current)); 2086 fl_pid = pid_vnr(fl->fl_nspid);
2088 else 2087 else
2089 fl_pid = fl->fl_pid; 2088 fl_pid = fl->fl_pid;
2090 2089
diff --git a/fs/minix/inode.c b/fs/minix/inode.c
index bf4cd316af81..84f6242ba6fc 100644
--- a/fs/minix/inode.c
+++ b/fs/minix/inode.c
@@ -18,7 +18,6 @@
18#include <linux/highuid.h> 18#include <linux/highuid.h>
19#include <linux/vfs.h> 19#include <linux/vfs.h>
20 20
21static void minix_read_inode(struct inode * inode);
22static int minix_write_inode(struct inode * inode, int wait); 21static int minix_write_inode(struct inode * inode, int wait);
23static int minix_statfs(struct dentry *dentry, struct kstatfs *buf); 22static int minix_statfs(struct dentry *dentry, struct kstatfs *buf);
24static int minix_remount (struct super_block * sb, int * flags, char * data); 23static int minix_remount (struct super_block * sb, int * flags, char * data);
@@ -96,7 +95,6 @@ static void destroy_inodecache(void)
96static const struct super_operations minix_sops = { 95static const struct super_operations minix_sops = {
97 .alloc_inode = minix_alloc_inode, 96 .alloc_inode = minix_alloc_inode,
98 .destroy_inode = minix_destroy_inode, 97 .destroy_inode = minix_destroy_inode,
99 .read_inode = minix_read_inode,
100 .write_inode = minix_write_inode, 98 .write_inode = minix_write_inode,
101 .delete_inode = minix_delete_inode, 99 .delete_inode = minix_delete_inode,
102 .put_super = minix_put_super, 100 .put_super = minix_put_super,
@@ -149,6 +147,7 @@ static int minix_fill_super(struct super_block *s, void *data, int silent)
149 unsigned long i, block; 147 unsigned long i, block;
150 struct inode *root_inode; 148 struct inode *root_inode;
151 struct minix_sb_info *sbi; 149 struct minix_sb_info *sbi;
150 int ret = -EINVAL;
152 151
153 sbi = kzalloc(sizeof(struct minix_sb_info), GFP_KERNEL); 152 sbi = kzalloc(sizeof(struct minix_sb_info), GFP_KERNEL);
154 if (!sbi) 153 if (!sbi)
@@ -246,10 +245,13 @@ static int minix_fill_super(struct super_block *s, void *data, int silent)
246 245
247 /* set up enough so that it can read an inode */ 246 /* set up enough so that it can read an inode */
248 s->s_op = &minix_sops; 247 s->s_op = &minix_sops;
249 root_inode = iget(s, MINIX_ROOT_INO); 248 root_inode = minix_iget(s, MINIX_ROOT_INO);
250 if (!root_inode || is_bad_inode(root_inode)) 249 if (IS_ERR(root_inode)) {
250 ret = PTR_ERR(root_inode);
251 goto out_no_root; 251 goto out_no_root;
252 }
252 253
254 ret = -ENOMEM;
253 s->s_root = d_alloc_root(root_inode); 255 s->s_root = d_alloc_root(root_inode);
254 if (!s->s_root) 256 if (!s->s_root)
255 goto out_iput; 257 goto out_iput;
@@ -290,6 +292,7 @@ out_freemap:
290 goto out_release; 292 goto out_release;
291 293
292out_no_map: 294out_no_map:
295 ret = -ENOMEM;
293 if (!silent) 296 if (!silent)
294 printk("MINIX-fs: can't allocate map\n"); 297 printk("MINIX-fs: can't allocate map\n");
295 goto out_release; 298 goto out_release;
@@ -316,7 +319,7 @@ out_bad_sb:
316out: 319out:
317 s->s_fs_info = NULL; 320 s->s_fs_info = NULL;
318 kfree(sbi); 321 kfree(sbi);
319 return -EINVAL; 322 return ret;
320} 323}
321 324
322static int minix_statfs(struct dentry *dentry, struct kstatfs *buf) 325static int minix_statfs(struct dentry *dentry, struct kstatfs *buf)
@@ -409,7 +412,7 @@ void minix_set_inode(struct inode *inode, dev_t rdev)
409/* 412/*
410 * The minix V1 function to read an inode. 413 * The minix V1 function to read an inode.
411 */ 414 */
412static void V1_minix_read_inode(struct inode * inode) 415static struct inode *V1_minix_iget(struct inode *inode)
413{ 416{
414 struct buffer_head * bh; 417 struct buffer_head * bh;
415 struct minix_inode * raw_inode; 418 struct minix_inode * raw_inode;
@@ -418,8 +421,8 @@ static void V1_minix_read_inode(struct inode * inode)
418 421
419 raw_inode = minix_V1_raw_inode(inode->i_sb, inode->i_ino, &bh); 422 raw_inode = minix_V1_raw_inode(inode->i_sb, inode->i_ino, &bh);
420 if (!raw_inode) { 423 if (!raw_inode) {
421 make_bad_inode(inode); 424 iget_failed(inode);
422 return; 425 return ERR_PTR(-EIO);
423 } 426 }
424 inode->i_mode = raw_inode->i_mode; 427 inode->i_mode = raw_inode->i_mode;
425 inode->i_uid = (uid_t)raw_inode->i_uid; 428 inode->i_uid = (uid_t)raw_inode->i_uid;
@@ -435,12 +438,14 @@ static void V1_minix_read_inode(struct inode * inode)
435 minix_inode->u.i1_data[i] = raw_inode->i_zone[i]; 438 minix_inode->u.i1_data[i] = raw_inode->i_zone[i];
436 minix_set_inode(inode, old_decode_dev(raw_inode->i_zone[0])); 439 minix_set_inode(inode, old_decode_dev(raw_inode->i_zone[0]));
437 brelse(bh); 440 brelse(bh);
441 unlock_new_inode(inode);
442 return inode;
438} 443}
439 444
440/* 445/*
441 * The minix V2 function to read an inode. 446 * The minix V2 function to read an inode.
442 */ 447 */
443static void V2_minix_read_inode(struct inode * inode) 448static struct inode *V2_minix_iget(struct inode *inode)
444{ 449{
445 struct buffer_head * bh; 450 struct buffer_head * bh;
446 struct minix2_inode * raw_inode; 451 struct minix2_inode * raw_inode;
@@ -449,8 +454,8 @@ static void V2_minix_read_inode(struct inode * inode)
449 454
450 raw_inode = minix_V2_raw_inode(inode->i_sb, inode->i_ino, &bh); 455 raw_inode = minix_V2_raw_inode(inode->i_sb, inode->i_ino, &bh);
451 if (!raw_inode) { 456 if (!raw_inode) {
452 make_bad_inode(inode); 457 iget_failed(inode);
453 return; 458 return ERR_PTR(-EIO);
454 } 459 }
455 inode->i_mode = raw_inode->i_mode; 460 inode->i_mode = raw_inode->i_mode;
456 inode->i_uid = (uid_t)raw_inode->i_uid; 461 inode->i_uid = (uid_t)raw_inode->i_uid;
@@ -468,17 +473,27 @@ static void V2_minix_read_inode(struct inode * inode)
468 minix_inode->u.i2_data[i] = raw_inode->i_zone[i]; 473 minix_inode->u.i2_data[i] = raw_inode->i_zone[i];
469 minix_set_inode(inode, old_decode_dev(raw_inode->i_zone[0])); 474 minix_set_inode(inode, old_decode_dev(raw_inode->i_zone[0]));
470 brelse(bh); 475 brelse(bh);
476 unlock_new_inode(inode);
477 return inode;
471} 478}
472 479
473/* 480/*
474 * The global function to read an inode. 481 * The global function to read an inode.
475 */ 482 */
476static void minix_read_inode(struct inode * inode) 483struct inode *minix_iget(struct super_block *sb, unsigned long ino)
477{ 484{
485 struct inode *inode;
486
487 inode = iget_locked(sb, ino);
488 if (!inode)
489 return ERR_PTR(-ENOMEM);
490 if (!(inode->i_state & I_NEW))
491 return inode;
492
478 if (INODE_VERSION(inode) == MINIX_V1) 493 if (INODE_VERSION(inode) == MINIX_V1)
479 V1_minix_read_inode(inode); 494 return V1_minix_iget(inode);
480 else 495 else
481 V2_minix_read_inode(inode); 496 return V2_minix_iget(inode);
482} 497}
483 498
484/* 499/*
diff --git a/fs/minix/minix.h b/fs/minix/minix.h
index ac5d3a75cb0d..326edfe96108 100644
--- a/fs/minix/minix.h
+++ b/fs/minix/minix.h
@@ -45,6 +45,7 @@ struct minix_sb_info {
45 unsigned short s_version; 45 unsigned short s_version;
46}; 46};
47 47
48extern struct inode *minix_iget(struct super_block *, unsigned long);
48extern struct minix_inode * minix_V1_raw_inode(struct super_block *, ino_t, struct buffer_head **); 49extern struct minix_inode * minix_V1_raw_inode(struct super_block *, ino_t, struct buffer_head **);
49extern struct minix2_inode * minix_V2_raw_inode(struct super_block *, ino_t, struct buffer_head **); 50extern struct minix2_inode * minix_V2_raw_inode(struct super_block *, ino_t, struct buffer_head **);
50extern struct inode * minix_new_inode(const struct inode * dir, int * error); 51extern struct inode * minix_new_inode(const struct inode * dir, int * error);
diff --git a/fs/minix/namei.c b/fs/minix/namei.c
index f4aa7a939040..102241bc9c79 100644
--- a/fs/minix/namei.c
+++ b/fs/minix/namei.c
@@ -54,10 +54,9 @@ static struct dentry *minix_lookup(struct inode * dir, struct dentry *dentry, st
54 54
55 ino = minix_inode_by_name(dentry); 55 ino = minix_inode_by_name(dentry);
56 if (ino) { 56 if (ino) {
57 inode = iget(dir->i_sb, ino); 57 inode = minix_iget(dir->i_sb, ino);
58 58 if (IS_ERR(inode))
59 if (!inode) 59 return ERR_CAST(inode);
60 return ERR_PTR(-EACCES);
61 } 60 }
62 d_add(dentry, inode); 61 d_add(dentry, inode);
63 return NULL; 62 return NULL;
diff --git a/fs/mpage.c b/fs/mpage.c
index d54f8f897224..5df564366f36 100644
--- a/fs/mpage.c
+++ b/fs/mpage.c
@@ -276,9 +276,7 @@ do_mpage_readpage(struct bio *bio, struct page *page, unsigned nr_pages,
276 } 276 }
277 277
278 if (first_hole != blocks_per_page) { 278 if (first_hole != blocks_per_page) {
279 zero_user_page(page, first_hole << blkbits, 279 zero_user_segment(page, first_hole << blkbits, PAGE_CACHE_SIZE);
280 PAGE_CACHE_SIZE - (first_hole << blkbits),
281 KM_USER0);
282 if (first_hole == 0) { 280 if (first_hole == 0) {
283 SetPageUptodate(page); 281 SetPageUptodate(page);
284 unlock_page(page); 282 unlock_page(page);
@@ -571,8 +569,7 @@ page_is_mapped:
571 569
572 if (page->index > end_index || !offset) 570 if (page->index > end_index || !offset)
573 goto confused; 571 goto confused;
574 zero_user_page(page, offset, PAGE_CACHE_SIZE - offset, 572 zero_user_segment(page, offset, PAGE_CACHE_SIZE);
575 KM_USER0);
576 } 573 }
577 574
578 /* 575 /*
diff --git a/fs/namei.c b/fs/namei.c
index 73e2e665817a..941c8e8228c0 100644
--- a/fs/namei.c
+++ b/fs/namei.c
@@ -106,7 +106,7 @@
106 * any extra contention... 106 * any extra contention...
107 */ 107 */
108 108
109static int fastcall link_path_walk(const char *name, struct nameidata *nd); 109static int link_path_walk(const char *name, struct nameidata *nd);
110 110
111/* In order to reduce some races, while at the same time doing additional 111/* In order to reduce some races, while at the same time doing additional
112 * checking and hopefully speeding things up, we copy filenames to the 112 * checking and hopefully speeding things up, we copy filenames to the
@@ -231,7 +231,7 @@ int permission(struct inode *inode, int mask, struct nameidata *nd)
231 struct vfsmount *mnt = NULL; 231 struct vfsmount *mnt = NULL;
232 232
233 if (nd) 233 if (nd)
234 mnt = nd->mnt; 234 mnt = nd->path.mnt;
235 235
236 if (mask & MAY_WRITE) { 236 if (mask & MAY_WRITE) {
237 umode_t mode = inode->i_mode; 237 umode_t mode = inode->i_mode;
@@ -296,7 +296,7 @@ int permission(struct inode *inode, int mask, struct nameidata *nd)
296 */ 296 */
297int vfs_permission(struct nameidata *nd, int mask) 297int vfs_permission(struct nameidata *nd, int mask)
298{ 298{
299 return permission(nd->dentry->d_inode, mask, nd); 299 return permission(nd->path.dentry->d_inode, mask, nd);
300} 300}
301 301
302/** 302/**
@@ -362,21 +362,31 @@ int deny_write_access(struct file * file)
362 return 0; 362 return 0;
363} 363}
364 364
365void path_release(struct nameidata *nd) 365/**
366 * path_get - get a reference to a path
367 * @path: path to get the reference to
368 *
369 * Given a path increment the reference count to the dentry and the vfsmount.
370 */
371void path_get(struct path *path)
366{ 372{
367 dput(nd->dentry); 373 mntget(path->mnt);
368 mntput(nd->mnt); 374 dget(path->dentry);
369} 375}
376EXPORT_SYMBOL(path_get);
370 377
371/* 378/**
372 * umount() mustn't call path_release()/mntput() as that would clear 379 * path_put - put a reference to a path
373 * mnt_expiry_mark 380 * @path: path to put the reference to
381 *
382 * Given a path decrement the reference count to the dentry and the vfsmount.
374 */ 383 */
375void path_release_on_umount(struct nameidata *nd) 384void path_put(struct path *path)
376{ 385{
377 dput(nd->dentry); 386 dput(path->dentry);
378 mntput_no_expire(nd->mnt); 387 mntput(path->mnt);
379} 388}
389EXPORT_SYMBOL(path_put);
380 390
381/** 391/**
382 * release_open_intent - free up open intent resources 392 * release_open_intent - free up open intent resources
@@ -539,16 +549,16 @@ walk_init_root(const char *name, struct nameidata *nd)
539 struct fs_struct *fs = current->fs; 549 struct fs_struct *fs = current->fs;
540 550
541 read_lock(&fs->lock); 551 read_lock(&fs->lock);
542 if (fs->altroot && !(nd->flags & LOOKUP_NOALT)) { 552 if (fs->altroot.dentry && !(nd->flags & LOOKUP_NOALT)) {
543 nd->mnt = mntget(fs->altrootmnt); 553 nd->path = fs->altroot;
544 nd->dentry = dget(fs->altroot); 554 path_get(&fs->altroot);
545 read_unlock(&fs->lock); 555 read_unlock(&fs->lock);
546 if (__emul_lookup_dentry(name,nd)) 556 if (__emul_lookup_dentry(name,nd))
547 return 0; 557 return 0;
548 read_lock(&fs->lock); 558 read_lock(&fs->lock);
549 } 559 }
550 nd->mnt = mntget(fs->rootmnt); 560 nd->path = fs->root;
551 nd->dentry = dget(fs->root); 561 path_get(&fs->root);
552 read_unlock(&fs->lock); 562 read_unlock(&fs->lock);
553 return 1; 563 return 1;
554} 564}
@@ -561,7 +571,7 @@ static __always_inline int __vfs_follow_link(struct nameidata *nd, const char *l
561 goto fail; 571 goto fail;
562 572
563 if (*link == '/') { 573 if (*link == '/') {
564 path_release(nd); 574 path_put(&nd->path);
565 if (!walk_init_root(link, nd)) 575 if (!walk_init_root(link, nd))
566 /* weird __emul_prefix() stuff did it */ 576 /* weird __emul_prefix() stuff did it */
567 goto out; 577 goto out;
@@ -577,31 +587,31 @@ out:
577 */ 587 */
578 name = __getname(); 588 name = __getname();
579 if (unlikely(!name)) { 589 if (unlikely(!name)) {
580 path_release(nd); 590 path_put(&nd->path);
581 return -ENOMEM; 591 return -ENOMEM;
582 } 592 }
583 strcpy(name, nd->last.name); 593 strcpy(name, nd->last.name);
584 nd->last.name = name; 594 nd->last.name = name;
585 return 0; 595 return 0;
586fail: 596fail:
587 path_release(nd); 597 path_put(&nd->path);
588 return PTR_ERR(link); 598 return PTR_ERR(link);
589} 599}
590 600
591static inline void dput_path(struct path *path, struct nameidata *nd) 601static void path_put_conditional(struct path *path, struct nameidata *nd)
592{ 602{
593 dput(path->dentry); 603 dput(path->dentry);
594 if (path->mnt != nd->mnt) 604 if (path->mnt != nd->path.mnt)
595 mntput(path->mnt); 605 mntput(path->mnt);
596} 606}
597 607
598static inline void path_to_nameidata(struct path *path, struct nameidata *nd) 608static inline void path_to_nameidata(struct path *path, struct nameidata *nd)
599{ 609{
600 dput(nd->dentry); 610 dput(nd->path.dentry);
601 if (nd->mnt != path->mnt) 611 if (nd->path.mnt != path->mnt)
602 mntput(nd->mnt); 612 mntput(nd->path.mnt);
603 nd->mnt = path->mnt; 613 nd->path.mnt = path->mnt;
604 nd->dentry = path->dentry; 614 nd->path.dentry = path->dentry;
605} 615}
606 616
607static __always_inline int __do_follow_link(struct path *path, struct nameidata *nd) 617static __always_inline int __do_follow_link(struct path *path, struct nameidata *nd)
@@ -613,7 +623,7 @@ static __always_inline int __do_follow_link(struct path *path, struct nameidata
613 touch_atime(path->mnt, dentry); 623 touch_atime(path->mnt, dentry);
614 nd_set_link(nd, NULL); 624 nd_set_link(nd, NULL);
615 625
616 if (path->mnt != nd->mnt) { 626 if (path->mnt != nd->path.mnt) {
617 path_to_nameidata(path, nd); 627 path_to_nameidata(path, nd);
618 dget(dentry); 628 dget(dentry);
619 } 629 }
@@ -628,8 +638,7 @@ static __always_inline int __do_follow_link(struct path *path, struct nameidata
628 if (dentry->d_inode->i_op->put_link) 638 if (dentry->d_inode->i_op->put_link)
629 dentry->d_inode->i_op->put_link(dentry, nd, cookie); 639 dentry->d_inode->i_op->put_link(dentry, nd, cookie);
630 } 640 }
631 dput(dentry); 641 path_put(path);
632 mntput(path->mnt);
633 642
634 return error; 643 return error;
635} 644}
@@ -661,8 +670,8 @@ static inline int do_follow_link(struct path *path, struct nameidata *nd)
661 nd->depth--; 670 nd->depth--;
662 return err; 671 return err;
663loop: 672loop:
664 dput_path(path, nd); 673 path_put_conditional(path, nd);
665 path_release(nd); 674 path_put(&nd->path);
666 return err; 675 return err;
667} 676}
668 677
@@ -743,37 +752,37 @@ static __always_inline void follow_dotdot(struct nameidata *nd)
743 752
744 while(1) { 753 while(1) {
745 struct vfsmount *parent; 754 struct vfsmount *parent;
746 struct dentry *old = nd->dentry; 755 struct dentry *old = nd->path.dentry;
747 756
748 read_lock(&fs->lock); 757 read_lock(&fs->lock);
749 if (nd->dentry == fs->root && 758 if (nd->path.dentry == fs->root.dentry &&
750 nd->mnt == fs->rootmnt) { 759 nd->path.mnt == fs->root.mnt) {
751 read_unlock(&fs->lock); 760 read_unlock(&fs->lock);
752 break; 761 break;
753 } 762 }
754 read_unlock(&fs->lock); 763 read_unlock(&fs->lock);
755 spin_lock(&dcache_lock); 764 spin_lock(&dcache_lock);
756 if (nd->dentry != nd->mnt->mnt_root) { 765 if (nd->path.dentry != nd->path.mnt->mnt_root) {
757 nd->dentry = dget(nd->dentry->d_parent); 766 nd->path.dentry = dget(nd->path.dentry->d_parent);
758 spin_unlock(&dcache_lock); 767 spin_unlock(&dcache_lock);
759 dput(old); 768 dput(old);
760 break; 769 break;
761 } 770 }
762 spin_unlock(&dcache_lock); 771 spin_unlock(&dcache_lock);
763 spin_lock(&vfsmount_lock); 772 spin_lock(&vfsmount_lock);
764 parent = nd->mnt->mnt_parent; 773 parent = nd->path.mnt->mnt_parent;
765 if (parent == nd->mnt) { 774 if (parent == nd->path.mnt) {
766 spin_unlock(&vfsmount_lock); 775 spin_unlock(&vfsmount_lock);
767 break; 776 break;
768 } 777 }
769 mntget(parent); 778 mntget(parent);
770 nd->dentry = dget(nd->mnt->mnt_mountpoint); 779 nd->path.dentry = dget(nd->path.mnt->mnt_mountpoint);
771 spin_unlock(&vfsmount_lock); 780 spin_unlock(&vfsmount_lock);
772 dput(old); 781 dput(old);
773 mntput(nd->mnt); 782 mntput(nd->path.mnt);
774 nd->mnt = parent; 783 nd->path.mnt = parent;
775 } 784 }
776 follow_mount(&nd->mnt, &nd->dentry); 785 follow_mount(&nd->path.mnt, &nd->path.dentry);
777} 786}
778 787
779/* 788/*
@@ -784,8 +793,8 @@ static __always_inline void follow_dotdot(struct nameidata *nd)
784static int do_lookup(struct nameidata *nd, struct qstr *name, 793static int do_lookup(struct nameidata *nd, struct qstr *name,
785 struct path *path) 794 struct path *path)
786{ 795{
787 struct vfsmount *mnt = nd->mnt; 796 struct vfsmount *mnt = nd->path.mnt;
788 struct dentry *dentry = __d_lookup(nd->dentry, name); 797 struct dentry *dentry = __d_lookup(nd->path.dentry, name);
789 798
790 if (!dentry) 799 if (!dentry)
791 goto need_lookup; 800 goto need_lookup;
@@ -798,7 +807,7 @@ done:
798 return 0; 807 return 0;
799 808
800need_lookup: 809need_lookup:
801 dentry = real_lookup(nd->dentry, name, nd); 810 dentry = real_lookup(nd->path.dentry, name, nd);
802 if (IS_ERR(dentry)) 811 if (IS_ERR(dentry))
803 goto fail; 812 goto fail;
804 goto done; 813 goto done;
@@ -823,7 +832,7 @@ fail:
823 * Returns 0 and nd will have valid dentry and mnt on success. 832 * Returns 0 and nd will have valid dentry and mnt on success.
824 * Returns error and drops reference to input namei data on failure. 833 * Returns error and drops reference to input namei data on failure.
825 */ 834 */
826static fastcall int __link_path_walk(const char * name, struct nameidata *nd) 835static int __link_path_walk(const char *name, struct nameidata *nd)
827{ 836{
828 struct path next; 837 struct path next;
829 struct inode *inode; 838 struct inode *inode;
@@ -835,7 +844,7 @@ static fastcall int __link_path_walk(const char * name, struct nameidata *nd)
835 if (!*name) 844 if (!*name)
836 goto return_reval; 845 goto return_reval;
837 846
838 inode = nd->dentry->d_inode; 847 inode = nd->path.dentry->d_inode;
839 if (nd->depth) 848 if (nd->depth)
840 lookup_flags = LOOKUP_FOLLOW | (nd->flags & LOOKUP_CONTINUE); 849 lookup_flags = LOOKUP_FOLLOW | (nd->flags & LOOKUP_CONTINUE);
841 850
@@ -883,7 +892,7 @@ static fastcall int __link_path_walk(const char * name, struct nameidata *nd)
883 if (this.name[1] != '.') 892 if (this.name[1] != '.')
884 break; 893 break;
885 follow_dotdot(nd); 894 follow_dotdot(nd);
886 inode = nd->dentry->d_inode; 895 inode = nd->path.dentry->d_inode;
887 /* fallthrough */ 896 /* fallthrough */
888 case 1: 897 case 1:
889 continue; 898 continue;
@@ -892,8 +901,9 @@ static fastcall int __link_path_walk(const char * name, struct nameidata *nd)
892 * See if the low-level filesystem might want 901 * See if the low-level filesystem might want
893 * to use its own hash.. 902 * to use its own hash..
894 */ 903 */
895 if (nd->dentry->d_op && nd->dentry->d_op->d_hash) { 904 if (nd->path.dentry->d_op && nd->path.dentry->d_op->d_hash) {
896 err = nd->dentry->d_op->d_hash(nd->dentry, &this); 905 err = nd->path.dentry->d_op->d_hash(nd->path.dentry,
906 &this);
897 if (err < 0) 907 if (err < 0)
898 break; 908 break;
899 } 909 }
@@ -915,7 +925,7 @@ static fastcall int __link_path_walk(const char * name, struct nameidata *nd)
915 if (err) 925 if (err)
916 goto return_err; 926 goto return_err;
917 err = -ENOENT; 927 err = -ENOENT;
918 inode = nd->dentry->d_inode; 928 inode = nd->path.dentry->d_inode;
919 if (!inode) 929 if (!inode)
920 break; 930 break;
921 err = -ENOTDIR; 931 err = -ENOTDIR;
@@ -943,13 +953,14 @@ last_component:
943 if (this.name[1] != '.') 953 if (this.name[1] != '.')
944 break; 954 break;
945 follow_dotdot(nd); 955 follow_dotdot(nd);
946 inode = nd->dentry->d_inode; 956 inode = nd->path.dentry->d_inode;
947 /* fallthrough */ 957 /* fallthrough */
948 case 1: 958 case 1:
949 goto return_reval; 959 goto return_reval;
950 } 960 }
951 if (nd->dentry->d_op && nd->dentry->d_op->d_hash) { 961 if (nd->path.dentry->d_op && nd->path.dentry->d_op->d_hash) {
952 err = nd->dentry->d_op->d_hash(nd->dentry, &this); 962 err = nd->path.dentry->d_op->d_hash(nd->path.dentry,
963 &this);
953 if (err < 0) 964 if (err < 0)
954 break; 965 break;
955 } 966 }
@@ -962,7 +973,7 @@ last_component:
962 err = do_follow_link(&next, nd); 973 err = do_follow_link(&next, nd);
963 if (err) 974 if (err)
964 goto return_err; 975 goto return_err;
965 inode = nd->dentry->d_inode; 976 inode = nd->path.dentry->d_inode;
966 } else 977 } else
967 path_to_nameidata(&next, nd); 978 path_to_nameidata(&next, nd);
968 err = -ENOENT; 979 err = -ENOENT;
@@ -990,20 +1001,21 @@ return_reval:
990 * We bypassed the ordinary revalidation routines. 1001 * We bypassed the ordinary revalidation routines.
991 * We may need to check the cached dentry for staleness. 1002 * We may need to check the cached dentry for staleness.
992 */ 1003 */
993 if (nd->dentry && nd->dentry->d_sb && 1004 if (nd->path.dentry && nd->path.dentry->d_sb &&
994 (nd->dentry->d_sb->s_type->fs_flags & FS_REVAL_DOT)) { 1005 (nd->path.dentry->d_sb->s_type->fs_flags & FS_REVAL_DOT)) {
995 err = -ESTALE; 1006 err = -ESTALE;
996 /* Note: we do not d_invalidate() */ 1007 /* Note: we do not d_invalidate() */
997 if (!nd->dentry->d_op->d_revalidate(nd->dentry, nd)) 1008 if (!nd->path.dentry->d_op->d_revalidate(
1009 nd->path.dentry, nd))
998 break; 1010 break;
999 } 1011 }
1000return_base: 1012return_base:
1001 return 0; 1013 return 0;
1002out_dput: 1014out_dput:
1003 dput_path(&next, nd); 1015 path_put_conditional(&next, nd);
1004 break; 1016 break;
1005 } 1017 }
1006 path_release(nd); 1018 path_put(&nd->path);
1007return_err: 1019return_err:
1008 return err; 1020 return err;
1009} 1021}
@@ -1015,31 +1027,30 @@ return_err:
1015 * Retry the whole path once, forcing real lookup requests 1027 * Retry the whole path once, forcing real lookup requests
1016 * instead of relying on the dcache. 1028 * instead of relying on the dcache.
1017 */ 1029 */
1018static int fastcall link_path_walk(const char *name, struct nameidata *nd) 1030static int link_path_walk(const char *name, struct nameidata *nd)
1019{ 1031{
1020 struct nameidata save = *nd; 1032 struct nameidata save = *nd;
1021 int result; 1033 int result;
1022 1034
1023 /* make sure the stuff we saved doesn't go away */ 1035 /* make sure the stuff we saved doesn't go away */
1024 dget(save.dentry); 1036 dget(save.path.dentry);
1025 mntget(save.mnt); 1037 mntget(save.path.mnt);
1026 1038
1027 result = __link_path_walk(name, nd); 1039 result = __link_path_walk(name, nd);
1028 if (result == -ESTALE) { 1040 if (result == -ESTALE) {
1029 *nd = save; 1041 *nd = save;
1030 dget(nd->dentry); 1042 dget(nd->path.dentry);
1031 mntget(nd->mnt); 1043 mntget(nd->path.mnt);
1032 nd->flags |= LOOKUP_REVAL; 1044 nd->flags |= LOOKUP_REVAL;
1033 result = __link_path_walk(name, nd); 1045 result = __link_path_walk(name, nd);
1034 } 1046 }
1035 1047
1036 dput(save.dentry); 1048 path_put(&save.path);
1037 mntput(save.mnt);
1038 1049
1039 return result; 1050 return result;
1040} 1051}
1041 1052
1042static int fastcall path_walk(const char * name, struct nameidata *nd) 1053static int path_walk(const char *name, struct nameidata *nd)
1043{ 1054{
1044 current->total_link_count = 0; 1055 current->total_link_count = 0;
1045 return link_path_walk(name, nd); 1056 return link_path_walk(name, nd);
@@ -1054,9 +1065,9 @@ static int __emul_lookup_dentry(const char *name, struct nameidata *nd)
1054 if (path_walk(name, nd)) 1065 if (path_walk(name, nd))
1055 return 0; /* something went wrong... */ 1066 return 0; /* something went wrong... */
1056 1067
1057 if (!nd->dentry->d_inode || S_ISDIR(nd->dentry->d_inode->i_mode)) { 1068 if (!nd->path.dentry->d_inode ||
1058 struct dentry *old_dentry = nd->dentry; 1069 S_ISDIR(nd->path.dentry->d_inode->i_mode)) {
1059 struct vfsmount *old_mnt = nd->mnt; 1070 struct path old_path = nd->path;
1060 struct qstr last = nd->last; 1071 struct qstr last = nd->last;
1061 int last_type = nd->last_type; 1072 int last_type = nd->last_type;
1062 struct fs_struct *fs = current->fs; 1073 struct fs_struct *fs = current->fs;
@@ -1067,19 +1078,17 @@ static int __emul_lookup_dentry(const char *name, struct nameidata *nd)
1067 */ 1078 */
1068 nd->last_type = LAST_ROOT; 1079 nd->last_type = LAST_ROOT;
1069 read_lock(&fs->lock); 1080 read_lock(&fs->lock);
1070 nd->mnt = mntget(fs->rootmnt); 1081 nd->path = fs->root;
1071 nd->dentry = dget(fs->root); 1082 path_get(&fs->root);
1072 read_unlock(&fs->lock); 1083 read_unlock(&fs->lock);
1073 if (path_walk(name, nd) == 0) { 1084 if (path_walk(name, nd) == 0) {
1074 if (nd->dentry->d_inode) { 1085 if (nd->path.dentry->d_inode) {
1075 dput(old_dentry); 1086 path_put(&old_path);
1076 mntput(old_mnt);
1077 return 1; 1087 return 1;
1078 } 1088 }
1079 path_release(nd); 1089 path_put(&nd->path);
1080 } 1090 }
1081 nd->dentry = old_dentry; 1091 nd->path = old_path;
1082 nd->mnt = old_mnt;
1083 nd->last = last; 1092 nd->last = last;
1084 nd->last_type = last_type; 1093 nd->last_type = last_type;
1085 } 1094 }
@@ -1090,33 +1099,26 @@ void set_fs_altroot(void)
1090{ 1099{
1091 char *emul = __emul_prefix(); 1100 char *emul = __emul_prefix();
1092 struct nameidata nd; 1101 struct nameidata nd;
1093 struct vfsmount *mnt = NULL, *oldmnt; 1102 struct path path = {}, old_path;
1094 struct dentry *dentry = NULL, *olddentry;
1095 int err; 1103 int err;
1096 struct fs_struct *fs = current->fs; 1104 struct fs_struct *fs = current->fs;
1097 1105
1098 if (!emul) 1106 if (!emul)
1099 goto set_it; 1107 goto set_it;
1100 err = path_lookup(emul, LOOKUP_FOLLOW|LOOKUP_DIRECTORY|LOOKUP_NOALT, &nd); 1108 err = path_lookup(emul, LOOKUP_FOLLOW|LOOKUP_DIRECTORY|LOOKUP_NOALT, &nd);
1101 if (!err) { 1109 if (!err)
1102 mnt = nd.mnt; 1110 path = nd.path;
1103 dentry = nd.dentry;
1104 }
1105set_it: 1111set_it:
1106 write_lock(&fs->lock); 1112 write_lock(&fs->lock);
1107 oldmnt = fs->altrootmnt; 1113 old_path = fs->altroot;
1108 olddentry = fs->altroot; 1114 fs->altroot = path;
1109 fs->altrootmnt = mnt;
1110 fs->altroot = dentry;
1111 write_unlock(&fs->lock); 1115 write_unlock(&fs->lock);
1112 if (olddentry) { 1116 if (old_path.dentry)
1113 dput(olddentry); 1117 path_put(&old_path);
1114 mntput(oldmnt);
1115 }
1116} 1118}
1117 1119
1118/* Returns 0 and nd will be valid on success; Retuns error, otherwise. */ 1120/* Returns 0 and nd will be valid on success; Retuns error, otherwise. */
1119static int fastcall do_path_lookup(int dfd, const char *name, 1121static int do_path_lookup(int dfd, const char *name,
1120 unsigned int flags, struct nameidata *nd) 1122 unsigned int flags, struct nameidata *nd)
1121{ 1123{
1122 int retval = 0; 1124 int retval = 0;
@@ -1130,21 +1132,21 @@ static int fastcall do_path_lookup(int dfd, const char *name,
1130 1132
1131 if (*name=='/') { 1133 if (*name=='/') {
1132 read_lock(&fs->lock); 1134 read_lock(&fs->lock);
1133 if (fs->altroot && !(nd->flags & LOOKUP_NOALT)) { 1135 if (fs->altroot.dentry && !(nd->flags & LOOKUP_NOALT)) {
1134 nd->mnt = mntget(fs->altrootmnt); 1136 nd->path = fs->altroot;
1135 nd->dentry = dget(fs->altroot); 1137 path_get(&fs->altroot);
1136 read_unlock(&fs->lock); 1138 read_unlock(&fs->lock);
1137 if (__emul_lookup_dentry(name,nd)) 1139 if (__emul_lookup_dentry(name,nd))
1138 goto out; /* found in altroot */ 1140 goto out; /* found in altroot */
1139 read_lock(&fs->lock); 1141 read_lock(&fs->lock);
1140 } 1142 }
1141 nd->mnt = mntget(fs->rootmnt); 1143 nd->path = fs->root;
1142 nd->dentry = dget(fs->root); 1144 path_get(&fs->root);
1143 read_unlock(&fs->lock); 1145 read_unlock(&fs->lock);
1144 } else if (dfd == AT_FDCWD) { 1146 } else if (dfd == AT_FDCWD) {
1145 read_lock(&fs->lock); 1147 read_lock(&fs->lock);
1146 nd->mnt = mntget(fs->pwdmnt); 1148 nd->path = fs->pwd;
1147 nd->dentry = dget(fs->pwd); 1149 path_get(&fs->pwd);
1148 read_unlock(&fs->lock); 1150 read_unlock(&fs->lock);
1149 } else { 1151 } else {
1150 struct dentry *dentry; 1152 struct dentry *dentry;
@@ -1164,17 +1166,17 @@ static int fastcall do_path_lookup(int dfd, const char *name,
1164 if (retval) 1166 if (retval)
1165 goto fput_fail; 1167 goto fput_fail;
1166 1168
1167 nd->mnt = mntget(file->f_path.mnt); 1169 nd->path = file->f_path;
1168 nd->dentry = dget(dentry); 1170 path_get(&file->f_path);
1169 1171
1170 fput_light(file, fput_needed); 1172 fput_light(file, fput_needed);
1171 } 1173 }
1172 1174
1173 retval = path_walk(name, nd); 1175 retval = path_walk(name, nd);
1174out: 1176out:
1175 if (unlikely(!retval && !audit_dummy_context() && nd->dentry && 1177 if (unlikely(!retval && !audit_dummy_context() && nd->path.dentry &&
1176 nd->dentry->d_inode)) 1178 nd->path.dentry->d_inode))
1177 audit_inode(name, nd->dentry); 1179 audit_inode(name, nd->path.dentry);
1178out_fail: 1180out_fail:
1179 return retval; 1181 return retval;
1180 1182
@@ -1183,7 +1185,7 @@ fput_fail:
1183 goto out_fail; 1185 goto out_fail;
1184} 1186}
1185 1187
1186int fastcall path_lookup(const char *name, unsigned int flags, 1188int path_lookup(const char *name, unsigned int flags,
1187 struct nameidata *nd) 1189 struct nameidata *nd)
1188{ 1190{
1189 return do_path_lookup(AT_FDCWD, name, flags, nd); 1191 return do_path_lookup(AT_FDCWD, name, flags, nd);
@@ -1208,13 +1210,13 @@ int vfs_path_lookup(struct dentry *dentry, struct vfsmount *mnt,
1208 nd->flags = flags; 1210 nd->flags = flags;
1209 nd->depth = 0; 1211 nd->depth = 0;
1210 1212
1211 nd->mnt = mntget(mnt); 1213 nd->path.mnt = mntget(mnt);
1212 nd->dentry = dget(dentry); 1214 nd->path.dentry = dget(dentry);
1213 1215
1214 retval = path_walk(name, nd); 1216 retval = path_walk(name, nd);
1215 if (unlikely(!retval && !audit_dummy_context() && nd->dentry && 1217 if (unlikely(!retval && !audit_dummy_context() && nd->path.dentry &&
1216 nd->dentry->d_inode)) 1218 nd->path.dentry->d_inode))
1217 audit_inode(name, nd->dentry); 1219 audit_inode(name, nd->path.dentry);
1218 1220
1219 return retval; 1221 return retval;
1220 1222
@@ -1236,7 +1238,7 @@ static int __path_lookup_intent_open(int dfd, const char *name,
1236 if (IS_ERR(nd->intent.open.file)) { 1238 if (IS_ERR(nd->intent.open.file)) {
1237 if (err == 0) { 1239 if (err == 0) {
1238 err = PTR_ERR(nd->intent.open.file); 1240 err = PTR_ERR(nd->intent.open.file);
1239 path_release(nd); 1241 path_put(&nd->path);
1240 } 1242 }
1241 } else if (err != 0) 1243 } else if (err != 0)
1242 release_open_intent(nd); 1244 release_open_intent(nd);
@@ -1333,10 +1335,10 @@ static struct dentry *lookup_hash(struct nameidata *nd)
1333{ 1335{
1334 int err; 1336 int err;
1335 1337
1336 err = permission(nd->dentry->d_inode, MAY_EXEC, nd); 1338 err = permission(nd->path.dentry->d_inode, MAY_EXEC, nd);
1337 if (err) 1339 if (err)
1338 return ERR_PTR(err); 1340 return ERR_PTR(err);
1339 return __lookup_hash(&nd->last, nd->dentry, nd); 1341 return __lookup_hash(&nd->last, nd->path.dentry, nd);
1340} 1342}
1341 1343
1342static int __lookup_one_len(const char *name, struct qstr *this, 1344static int __lookup_one_len(const char *name, struct qstr *this,
@@ -1409,7 +1411,7 @@ struct dentry *lookup_one_noperm(const char *name, struct dentry *base)
1409 return __lookup_hash(&this, base, NULL); 1411 return __lookup_hash(&this, base, NULL);
1410} 1412}
1411 1413
1412int fastcall __user_walk_fd(int dfd, const char __user *name, unsigned flags, 1414int __user_walk_fd(int dfd, const char __user *name, unsigned flags,
1413 struct nameidata *nd) 1415 struct nameidata *nd)
1414{ 1416{
1415 char *tmp = getname(name); 1417 char *tmp = getname(name);
@@ -1422,7 +1424,7 @@ int fastcall __user_walk_fd(int dfd, const char __user *name, unsigned flags,
1422 return err; 1424 return err;
1423} 1425}
1424 1426
1425int fastcall __user_walk(const char __user *name, unsigned flags, struct nameidata *nd) 1427int __user_walk(const char __user *name, unsigned flags, struct nameidata *nd)
1426{ 1428{
1427 return __user_walk_fd(AT_FDCWD, name, flags, nd); 1429 return __user_walk_fd(AT_FDCWD, name, flags, nd);
1428} 1430}
@@ -1595,7 +1597,7 @@ int vfs_create(struct inode *dir, struct dentry *dentry, int mode,
1595 1597
1596int may_open(struct nameidata *nd, int acc_mode, int flag) 1598int may_open(struct nameidata *nd, int acc_mode, int flag)
1597{ 1599{
1598 struct dentry *dentry = nd->dentry; 1600 struct dentry *dentry = nd->path.dentry;
1599 struct inode *inode = dentry->d_inode; 1601 struct inode *inode = dentry->d_inode;
1600 int error; 1602 int error;
1601 1603
@@ -1616,7 +1618,7 @@ int may_open(struct nameidata *nd, int acc_mode, int flag)
1616 if (S_ISFIFO(inode->i_mode) || S_ISSOCK(inode->i_mode)) { 1618 if (S_ISFIFO(inode->i_mode) || S_ISSOCK(inode->i_mode)) {
1617 flag &= ~O_TRUNC; 1619 flag &= ~O_TRUNC;
1618 } else if (S_ISBLK(inode->i_mode) || S_ISCHR(inode->i_mode)) { 1620 } else if (S_ISBLK(inode->i_mode) || S_ISCHR(inode->i_mode)) {
1619 if (nd->mnt->mnt_flags & MNT_NODEV) 1621 if (nd->path.mnt->mnt_flags & MNT_NODEV)
1620 return -EACCES; 1622 return -EACCES;
1621 1623
1622 flag &= ~O_TRUNC; 1624 flag &= ~O_TRUNC;
@@ -1678,14 +1680,14 @@ static int open_namei_create(struct nameidata *nd, struct path *path,
1678 int flag, int mode) 1680 int flag, int mode)
1679{ 1681{
1680 int error; 1682 int error;
1681 struct dentry *dir = nd->dentry; 1683 struct dentry *dir = nd->path.dentry;
1682 1684
1683 if (!IS_POSIXACL(dir->d_inode)) 1685 if (!IS_POSIXACL(dir->d_inode))
1684 mode &= ~current->fs->umask; 1686 mode &= ~current->fs->umask;
1685 error = vfs_create(dir->d_inode, path->dentry, mode, nd); 1687 error = vfs_create(dir->d_inode, path->dentry, mode, nd);
1686 mutex_unlock(&dir->d_inode->i_mutex); 1688 mutex_unlock(&dir->d_inode->i_mutex);
1687 dput(nd->dentry); 1689 dput(nd->path.dentry);
1688 nd->dentry = path->dentry; 1690 nd->path.dentry = path->dentry;
1689 if (error) 1691 if (error)
1690 return error; 1692 return error;
1691 /* Don't check for write permission, don't truncate */ 1693 /* Don't check for write permission, don't truncate */
@@ -1752,11 +1754,11 @@ int open_namei(int dfd, const char *pathname, int flag,
1752 if (nd->last_type != LAST_NORM || nd->last.name[nd->last.len]) 1754 if (nd->last_type != LAST_NORM || nd->last.name[nd->last.len])
1753 goto exit; 1755 goto exit;
1754 1756
1755 dir = nd->dentry; 1757 dir = nd->path.dentry;
1756 nd->flags &= ~LOOKUP_PARENT; 1758 nd->flags &= ~LOOKUP_PARENT;
1757 mutex_lock(&dir->d_inode->i_mutex); 1759 mutex_lock(&dir->d_inode->i_mutex);
1758 path.dentry = lookup_hash(nd); 1760 path.dentry = lookup_hash(nd);
1759 path.mnt = nd->mnt; 1761 path.mnt = nd->path.mnt;
1760 1762
1761do_last: 1763do_last:
1762 error = PTR_ERR(path.dentry); 1764 error = PTR_ERR(path.dentry);
@@ -1812,11 +1814,11 @@ ok:
1812 return 0; 1814 return 0;
1813 1815
1814exit_dput: 1816exit_dput:
1815 dput_path(&path, nd); 1817 path_put_conditional(&path, nd);
1816exit: 1818exit:
1817 if (!IS_ERR(nd->intent.open.file)) 1819 if (!IS_ERR(nd->intent.open.file))
1818 release_open_intent(nd); 1820 release_open_intent(nd);
1819 path_release(nd); 1821 path_put(&nd->path);
1820 return error; 1822 return error;
1821 1823
1822do_link: 1824do_link:
@@ -1861,10 +1863,10 @@ do_link:
1861 __putname(nd->last.name); 1863 __putname(nd->last.name);
1862 goto exit; 1864 goto exit;
1863 } 1865 }
1864 dir = nd->dentry; 1866 dir = nd->path.dentry;
1865 mutex_lock(&dir->d_inode->i_mutex); 1867 mutex_lock(&dir->d_inode->i_mutex);
1866 path.dentry = lookup_hash(nd); 1868 path.dentry = lookup_hash(nd);
1867 path.mnt = nd->mnt; 1869 path.mnt = nd->path.mnt;
1868 __putname(nd->last.name); 1870 __putname(nd->last.name);
1869 goto do_last; 1871 goto do_last;
1870} 1872}
@@ -1877,13 +1879,13 @@ do_link:
1877 * Simple function to lookup and return a dentry and create it 1879 * Simple function to lookup and return a dentry and create it
1878 * if it doesn't exist. Is SMP-safe. 1880 * if it doesn't exist. Is SMP-safe.
1879 * 1881 *
1880 * Returns with nd->dentry->d_inode->i_mutex locked. 1882 * Returns with nd->path.dentry->d_inode->i_mutex locked.
1881 */ 1883 */
1882struct dentry *lookup_create(struct nameidata *nd, int is_dir) 1884struct dentry *lookup_create(struct nameidata *nd, int is_dir)
1883{ 1885{
1884 struct dentry *dentry = ERR_PTR(-EEXIST); 1886 struct dentry *dentry = ERR_PTR(-EEXIST);
1885 1887
1886 mutex_lock_nested(&nd->dentry->d_inode->i_mutex, I_MUTEX_PARENT); 1888 mutex_lock_nested(&nd->path.dentry->d_inode->i_mutex, I_MUTEX_PARENT);
1887 /* 1889 /*
1888 * Yucky last component or no last component at all? 1890 * Yucky last component or no last component at all?
1889 * (foo/., foo/.., /////) 1891 * (foo/., foo/.., /////)
@@ -1962,19 +1964,19 @@ asmlinkage long sys_mknodat(int dfd, const char __user *filename, int mode,
1962 dentry = lookup_create(&nd, 0); 1964 dentry = lookup_create(&nd, 0);
1963 error = PTR_ERR(dentry); 1965 error = PTR_ERR(dentry);
1964 1966
1965 if (!IS_POSIXACL(nd.dentry->d_inode)) 1967 if (!IS_POSIXACL(nd.path.dentry->d_inode))
1966 mode &= ~current->fs->umask; 1968 mode &= ~current->fs->umask;
1967 if (!IS_ERR(dentry)) { 1969 if (!IS_ERR(dentry)) {
1968 switch (mode & S_IFMT) { 1970 switch (mode & S_IFMT) {
1969 case 0: case S_IFREG: 1971 case 0: case S_IFREG:
1970 error = vfs_create(nd.dentry->d_inode,dentry,mode,&nd); 1972 error = vfs_create(nd.path.dentry->d_inode,dentry,mode,&nd);
1971 break; 1973 break;
1972 case S_IFCHR: case S_IFBLK: 1974 case S_IFCHR: case S_IFBLK:
1973 error = vfs_mknod(nd.dentry->d_inode,dentry,mode, 1975 error = vfs_mknod(nd.path.dentry->d_inode,dentry,mode,
1974 new_decode_dev(dev)); 1976 new_decode_dev(dev));
1975 break; 1977 break;
1976 case S_IFIFO: case S_IFSOCK: 1978 case S_IFIFO: case S_IFSOCK:
1977 error = vfs_mknod(nd.dentry->d_inode,dentry,mode,0); 1979 error = vfs_mknod(nd.path.dentry->d_inode,dentry,mode,0);
1978 break; 1980 break;
1979 case S_IFDIR: 1981 case S_IFDIR:
1980 error = -EPERM; 1982 error = -EPERM;
@@ -1984,8 +1986,8 @@ asmlinkage long sys_mknodat(int dfd, const char __user *filename, int mode,
1984 } 1986 }
1985 dput(dentry); 1987 dput(dentry);
1986 } 1988 }
1987 mutex_unlock(&nd.dentry->d_inode->i_mutex); 1989 mutex_unlock(&nd.path.dentry->d_inode->i_mutex);
1988 path_release(&nd); 1990 path_put(&nd.path);
1989out: 1991out:
1990 putname(tmp); 1992 putname(tmp);
1991 1993
@@ -2039,13 +2041,13 @@ asmlinkage long sys_mkdirat(int dfd, const char __user *pathname, int mode)
2039 if (IS_ERR(dentry)) 2041 if (IS_ERR(dentry))
2040 goto out_unlock; 2042 goto out_unlock;
2041 2043
2042 if (!IS_POSIXACL(nd.dentry->d_inode)) 2044 if (!IS_POSIXACL(nd.path.dentry->d_inode))
2043 mode &= ~current->fs->umask; 2045 mode &= ~current->fs->umask;
2044 error = vfs_mkdir(nd.dentry->d_inode, dentry, mode); 2046 error = vfs_mkdir(nd.path.dentry->d_inode, dentry, mode);
2045 dput(dentry); 2047 dput(dentry);
2046out_unlock: 2048out_unlock:
2047 mutex_unlock(&nd.dentry->d_inode->i_mutex); 2049 mutex_unlock(&nd.path.dentry->d_inode->i_mutex);
2048 path_release(&nd); 2050 path_put(&nd.path);
2049out: 2051out:
2050 putname(tmp); 2052 putname(tmp);
2051out_err: 2053out_err:
@@ -2143,17 +2145,17 @@ static long do_rmdir(int dfd, const char __user *pathname)
2143 error = -EBUSY; 2145 error = -EBUSY;
2144 goto exit1; 2146 goto exit1;
2145 } 2147 }
2146 mutex_lock_nested(&nd.dentry->d_inode->i_mutex, I_MUTEX_PARENT); 2148 mutex_lock_nested(&nd.path.dentry->d_inode->i_mutex, I_MUTEX_PARENT);
2147 dentry = lookup_hash(&nd); 2149 dentry = lookup_hash(&nd);
2148 error = PTR_ERR(dentry); 2150 error = PTR_ERR(dentry);
2149 if (IS_ERR(dentry)) 2151 if (IS_ERR(dentry))
2150 goto exit2; 2152 goto exit2;
2151 error = vfs_rmdir(nd.dentry->d_inode, dentry); 2153 error = vfs_rmdir(nd.path.dentry->d_inode, dentry);
2152 dput(dentry); 2154 dput(dentry);
2153exit2: 2155exit2:
2154 mutex_unlock(&nd.dentry->d_inode->i_mutex); 2156 mutex_unlock(&nd.path.dentry->d_inode->i_mutex);
2155exit1: 2157exit1:
2156 path_release(&nd); 2158 path_put(&nd.path);
2157exit: 2159exit:
2158 putname(name); 2160 putname(name);
2159 return error; 2161 return error;
@@ -2188,6 +2190,7 @@ int vfs_unlink(struct inode *dir, struct dentry *dentry)
2188 2190
2189 /* We don't d_delete() NFS sillyrenamed files--they still exist. */ 2191 /* We don't d_delete() NFS sillyrenamed files--they still exist. */
2190 if (!error && !(dentry->d_flags & DCACHE_NFSFS_RENAMED)) { 2192 if (!error && !(dentry->d_flags & DCACHE_NFSFS_RENAMED)) {
2193 fsnotify_link_count(dentry->d_inode);
2191 d_delete(dentry); 2194 d_delete(dentry);
2192 } 2195 }
2193 2196
@@ -2218,7 +2221,7 @@ static long do_unlinkat(int dfd, const char __user *pathname)
2218 error = -EISDIR; 2221 error = -EISDIR;
2219 if (nd.last_type != LAST_NORM) 2222 if (nd.last_type != LAST_NORM)
2220 goto exit1; 2223 goto exit1;
2221 mutex_lock_nested(&nd.dentry->d_inode->i_mutex, I_MUTEX_PARENT); 2224 mutex_lock_nested(&nd.path.dentry->d_inode->i_mutex, I_MUTEX_PARENT);
2222 dentry = lookup_hash(&nd); 2225 dentry = lookup_hash(&nd);
2223 error = PTR_ERR(dentry); 2226 error = PTR_ERR(dentry);
2224 if (!IS_ERR(dentry)) { 2227 if (!IS_ERR(dentry)) {
@@ -2228,15 +2231,15 @@ static long do_unlinkat(int dfd, const char __user *pathname)
2228 inode = dentry->d_inode; 2231 inode = dentry->d_inode;
2229 if (inode) 2232 if (inode)
2230 atomic_inc(&inode->i_count); 2233 atomic_inc(&inode->i_count);
2231 error = vfs_unlink(nd.dentry->d_inode, dentry); 2234 error = vfs_unlink(nd.path.dentry->d_inode, dentry);
2232 exit2: 2235 exit2:
2233 dput(dentry); 2236 dput(dentry);
2234 } 2237 }
2235 mutex_unlock(&nd.dentry->d_inode->i_mutex); 2238 mutex_unlock(&nd.path.dentry->d_inode->i_mutex);
2236 if (inode) 2239 if (inode)
2237 iput(inode); /* truncate the inode here */ 2240 iput(inode); /* truncate the inode here */
2238exit1: 2241exit1:
2239 path_release(&nd); 2242 path_put(&nd.path);
2240exit: 2243exit:
2241 putname(name); 2244 putname(name);
2242 return error; 2245 return error;
@@ -2309,11 +2312,11 @@ asmlinkage long sys_symlinkat(const char __user *oldname,
2309 if (IS_ERR(dentry)) 2312 if (IS_ERR(dentry))
2310 goto out_unlock; 2313 goto out_unlock;
2311 2314
2312 error = vfs_symlink(nd.dentry->d_inode, dentry, from, S_IALLUGO); 2315 error = vfs_symlink(nd.path.dentry->d_inode, dentry, from, S_IALLUGO);
2313 dput(dentry); 2316 dput(dentry);
2314out_unlock: 2317out_unlock:
2315 mutex_unlock(&nd.dentry->d_inode->i_mutex); 2318 mutex_unlock(&nd.path.dentry->d_inode->i_mutex);
2316 path_release(&nd); 2319 path_put(&nd.path);
2317out: 2320out:
2318 putname(to); 2321 putname(to);
2319out_putname: 2322out_putname:
@@ -2360,7 +2363,7 @@ int vfs_link(struct dentry *old_dentry, struct inode *dir, struct dentry *new_de
2360 error = dir->i_op->link(old_dentry, dir, new_dentry); 2363 error = dir->i_op->link(old_dentry, dir, new_dentry);
2361 mutex_unlock(&old_dentry->d_inode->i_mutex); 2364 mutex_unlock(&old_dentry->d_inode->i_mutex);
2362 if (!error) 2365 if (!error)
2363 fsnotify_create(dir, new_dentry); 2366 fsnotify_link(dir, old_dentry->d_inode, new_dentry);
2364 return error; 2367 return error;
2365} 2368}
2366 2369
@@ -2398,20 +2401,20 @@ asmlinkage long sys_linkat(int olddfd, const char __user *oldname,
2398 if (error) 2401 if (error)
2399 goto out; 2402 goto out;
2400 error = -EXDEV; 2403 error = -EXDEV;
2401 if (old_nd.mnt != nd.mnt) 2404 if (old_nd.path.mnt != nd.path.mnt)
2402 goto out_release; 2405 goto out_release;
2403 new_dentry = lookup_create(&nd, 0); 2406 new_dentry = lookup_create(&nd, 0);
2404 error = PTR_ERR(new_dentry); 2407 error = PTR_ERR(new_dentry);
2405 if (IS_ERR(new_dentry)) 2408 if (IS_ERR(new_dentry))
2406 goto out_unlock; 2409 goto out_unlock;
2407 error = vfs_link(old_nd.dentry, nd.dentry->d_inode, new_dentry); 2410 error = vfs_link(old_nd.path.dentry, nd.path.dentry->d_inode, new_dentry);
2408 dput(new_dentry); 2411 dput(new_dentry);
2409out_unlock: 2412out_unlock:
2410 mutex_unlock(&nd.dentry->d_inode->i_mutex); 2413 mutex_unlock(&nd.path.dentry->d_inode->i_mutex);
2411out_release: 2414out_release:
2412 path_release(&nd); 2415 path_put(&nd.path);
2413out: 2416out:
2414 path_release(&old_nd); 2417 path_put(&old_nd.path);
2415exit: 2418exit:
2416 putname(to); 2419 putname(to);
2417 2420
@@ -2587,15 +2590,15 @@ static int do_rename(int olddfd, const char *oldname,
2587 goto exit1; 2590 goto exit1;
2588 2591
2589 error = -EXDEV; 2592 error = -EXDEV;
2590 if (oldnd.mnt != newnd.mnt) 2593 if (oldnd.path.mnt != newnd.path.mnt)
2591 goto exit2; 2594 goto exit2;
2592 2595
2593 old_dir = oldnd.dentry; 2596 old_dir = oldnd.path.dentry;
2594 error = -EBUSY; 2597 error = -EBUSY;
2595 if (oldnd.last_type != LAST_NORM) 2598 if (oldnd.last_type != LAST_NORM)
2596 goto exit2; 2599 goto exit2;
2597 2600
2598 new_dir = newnd.dentry; 2601 new_dir = newnd.path.dentry;
2599 if (newnd.last_type != LAST_NORM) 2602 if (newnd.last_type != LAST_NORM)
2600 goto exit2; 2603 goto exit2;
2601 2604
@@ -2639,9 +2642,9 @@ exit4:
2639exit3: 2642exit3:
2640 unlock_rename(new_dir, old_dir); 2643 unlock_rename(new_dir, old_dir);
2641exit2: 2644exit2:
2642 path_release(&newnd); 2645 path_put(&newnd.path);
2643exit1: 2646exit1:
2644 path_release(&oldnd); 2647 path_put(&oldnd.path);
2645exit: 2648exit:
2646 return error; 2649 return error;
2647} 2650}
@@ -2815,7 +2818,6 @@ EXPORT_SYMBOL(page_symlink);
2815EXPORT_SYMBOL(page_symlink_inode_operations); 2818EXPORT_SYMBOL(page_symlink_inode_operations);
2816EXPORT_SYMBOL(path_lookup); 2819EXPORT_SYMBOL(path_lookup);
2817EXPORT_SYMBOL(vfs_path_lookup); 2820EXPORT_SYMBOL(vfs_path_lookup);
2818EXPORT_SYMBOL(path_release);
2819EXPORT_SYMBOL(permission); 2821EXPORT_SYMBOL(permission);
2820EXPORT_SYMBOL(vfs_permission); 2822EXPORT_SYMBOL(vfs_permission);
2821EXPORT_SYMBOL(file_permission); 2823EXPORT_SYMBOL(file_permission);
diff --git a/fs/namespace.c b/fs/namespace.c
index 61bf376e29e8..7953c96a2071 100644
--- a/fs/namespace.c
+++ b/fs/namespace.c
@@ -25,18 +25,21 @@
25#include <linux/security.h> 25#include <linux/security.h>
26#include <linux/mount.h> 26#include <linux/mount.h>
27#include <linux/ramfs.h> 27#include <linux/ramfs.h>
28#include <linux/log2.h>
28#include <asm/uaccess.h> 29#include <asm/uaccess.h>
29#include <asm/unistd.h> 30#include <asm/unistd.h>
30#include "pnode.h" 31#include "pnode.h"
31#include "internal.h" 32#include "internal.h"
32 33
34#define HASH_SHIFT ilog2(PAGE_SIZE / sizeof(struct list_head))
35#define HASH_SIZE (1UL << HASH_SHIFT)
36
33/* spinlock for vfsmount related operations, inplace of dcache_lock */ 37/* spinlock for vfsmount related operations, inplace of dcache_lock */
34__cacheline_aligned_in_smp DEFINE_SPINLOCK(vfsmount_lock); 38__cacheline_aligned_in_smp DEFINE_SPINLOCK(vfsmount_lock);
35 39
36static int event; 40static int event;
37 41
38static struct list_head *mount_hashtable __read_mostly; 42static struct list_head *mount_hashtable __read_mostly;
39static int hash_mask __read_mostly, hash_bits __read_mostly;
40static struct kmem_cache *mnt_cache __read_mostly; 43static struct kmem_cache *mnt_cache __read_mostly;
41static struct rw_semaphore namespace_sem; 44static struct rw_semaphore namespace_sem;
42 45
@@ -48,8 +51,8 @@ static inline unsigned long hash(struct vfsmount *mnt, struct dentry *dentry)
48{ 51{
49 unsigned long tmp = ((unsigned long)mnt / L1_CACHE_BYTES); 52 unsigned long tmp = ((unsigned long)mnt / L1_CACHE_BYTES);
50 tmp += ((unsigned long)dentry / L1_CACHE_BYTES); 53 tmp += ((unsigned long)dentry / L1_CACHE_BYTES);
51 tmp = tmp + (tmp >> hash_bits); 54 tmp = tmp + (tmp >> HASH_SHIFT);
52 return tmp & hash_mask; 55 return tmp & (HASH_SIZE - 1);
53} 56}
54 57
55struct vfsmount *alloc_vfsmnt(const char *name) 58struct vfsmount *alloc_vfsmnt(const char *name)
@@ -154,13 +157,13 @@ static void __touch_mnt_namespace(struct mnt_namespace *ns)
154 157
155static void detach_mnt(struct vfsmount *mnt, struct nameidata *old_nd) 158static void detach_mnt(struct vfsmount *mnt, struct nameidata *old_nd)
156{ 159{
157 old_nd->dentry = mnt->mnt_mountpoint; 160 old_nd->path.dentry = mnt->mnt_mountpoint;
158 old_nd->mnt = mnt->mnt_parent; 161 old_nd->path.mnt = mnt->mnt_parent;
159 mnt->mnt_parent = mnt; 162 mnt->mnt_parent = mnt;
160 mnt->mnt_mountpoint = mnt->mnt_root; 163 mnt->mnt_mountpoint = mnt->mnt_root;
161 list_del_init(&mnt->mnt_child); 164 list_del_init(&mnt->mnt_child);
162 list_del_init(&mnt->mnt_hash); 165 list_del_init(&mnt->mnt_hash);
163 old_nd->dentry->d_mounted--; 166 old_nd->path.dentry->d_mounted--;
164} 167}
165 168
166void mnt_set_mountpoint(struct vfsmount *mnt, struct dentry *dentry, 169void mnt_set_mountpoint(struct vfsmount *mnt, struct dentry *dentry,
@@ -173,10 +176,10 @@ void mnt_set_mountpoint(struct vfsmount *mnt, struct dentry *dentry,
173 176
174static void attach_mnt(struct vfsmount *mnt, struct nameidata *nd) 177static void attach_mnt(struct vfsmount *mnt, struct nameidata *nd)
175{ 178{
176 mnt_set_mountpoint(nd->mnt, nd->dentry, mnt); 179 mnt_set_mountpoint(nd->path.mnt, nd->path.dentry, mnt);
177 list_add_tail(&mnt->mnt_hash, mount_hashtable + 180 list_add_tail(&mnt->mnt_hash, mount_hashtable +
178 hash(nd->mnt, nd->dentry)); 181 hash(nd->path.mnt, nd->path.dentry));
179 list_add_tail(&mnt->mnt_child, &nd->mnt->mnt_mounts); 182 list_add_tail(&mnt->mnt_child, &nd->path.mnt->mnt_mounts);
180} 183}
181 184
182/* 185/*
@@ -317,6 +320,50 @@ void mnt_unpin(struct vfsmount *mnt)
317 320
318EXPORT_SYMBOL(mnt_unpin); 321EXPORT_SYMBOL(mnt_unpin);
319 322
323static inline void mangle(struct seq_file *m, const char *s)
324{
325 seq_escape(m, s, " \t\n\\");
326}
327
328/*
329 * Simple .show_options callback for filesystems which don't want to
330 * implement more complex mount option showing.
331 *
332 * See also save_mount_options().
333 */
334int generic_show_options(struct seq_file *m, struct vfsmount *mnt)
335{
336 const char *options = mnt->mnt_sb->s_options;
337
338 if (options != NULL && options[0]) {
339 seq_putc(m, ',');
340 mangle(m, options);
341 }
342
343 return 0;
344}
345EXPORT_SYMBOL(generic_show_options);
346
347/*
348 * If filesystem uses generic_show_options(), this function should be
349 * called from the fill_super() callback.
350 *
351 * The .remount_fs callback usually needs to be handled in a special
352 * way, to make sure, that previous options are not overwritten if the
353 * remount fails.
354 *
355 * Also note, that if the filesystem's .remount_fs function doesn't
356 * reset all options to their default value, but changes only newly
357 * given options, then the displayed options will not reflect reality
358 * any more.
359 */
360void save_mount_options(struct super_block *sb, char *options)
361{
362 kfree(sb->s_options);
363 sb->s_options = kstrdup(options, GFP_KERNEL);
364}
365EXPORT_SYMBOL(save_mount_options);
366
320/* iterator */ 367/* iterator */
321static void *m_start(struct seq_file *m, loff_t *pos) 368static void *m_start(struct seq_file *m, loff_t *pos)
322{ 369{
@@ -338,11 +385,6 @@ static void m_stop(struct seq_file *m, void *v)
338 up_read(&namespace_sem); 385 up_read(&namespace_sem);
339} 386}
340 387
341static inline void mangle(struct seq_file *m, const char *s)
342{
343 seq_escape(m, s, " \t\n\\");
344}
345
346static int show_vfsmnt(struct seq_file *m, void *v) 388static int show_vfsmnt(struct seq_file *m, void *v)
347{ 389{
348 struct vfsmount *mnt = list_entry(v, struct vfsmount, mnt_list); 390 struct vfsmount *mnt = list_entry(v, struct vfsmount, mnt_list);
@@ -366,10 +408,11 @@ static int show_vfsmnt(struct seq_file *m, void *v)
366 { 0, NULL } 408 { 0, NULL }
367 }; 409 };
368 struct proc_fs_info *fs_infop; 410 struct proc_fs_info *fs_infop;
411 struct path mnt_path = { .dentry = mnt->mnt_root, .mnt = mnt };
369 412
370 mangle(m, mnt->mnt_devname ? mnt->mnt_devname : "none"); 413 mangle(m, mnt->mnt_devname ? mnt->mnt_devname : "none");
371 seq_putc(m, ' '); 414 seq_putc(m, ' ');
372 seq_path(m, mnt, mnt->mnt_root, " \t\n\\"); 415 seq_path(m, &mnt_path, " \t\n\\");
373 seq_putc(m, ' '); 416 seq_putc(m, ' ');
374 mangle(m, mnt->mnt_sb->s_type->name); 417 mangle(m, mnt->mnt_sb->s_type->name);
375 if (mnt->mnt_sb->s_subtype && mnt->mnt_sb->s_subtype[0]) { 418 if (mnt->mnt_sb->s_subtype && mnt->mnt_sb->s_subtype[0]) {
@@ -401,6 +444,7 @@ struct seq_operations mounts_op = {
401static int show_vfsstat(struct seq_file *m, void *v) 444static int show_vfsstat(struct seq_file *m, void *v)
402{ 445{
403 struct vfsmount *mnt = list_entry(v, struct vfsmount, mnt_list); 446 struct vfsmount *mnt = list_entry(v, struct vfsmount, mnt_list);
447 struct path mnt_path = { .dentry = mnt->mnt_root, .mnt = mnt };
404 int err = 0; 448 int err = 0;
405 449
406 /* device */ 450 /* device */
@@ -412,7 +456,7 @@ static int show_vfsstat(struct seq_file *m, void *v)
412 456
413 /* mount point */ 457 /* mount point */
414 seq_puts(m, " mounted on "); 458 seq_puts(m, " mounted on ");
415 seq_path(m, mnt, mnt->mnt_root, " \t\n\\"); 459 seq_path(m, &mnt_path, " \t\n\\");
416 seq_putc(m, ' '); 460 seq_putc(m, ' ');
417 461
418 /* file system type */ 462 /* file system type */
@@ -551,7 +595,7 @@ static int do_umount(struct vfsmount *mnt, int flags)
551 * (2) the usage count == 1 [parent vfsmount] + 1 [sys_umount] 595 * (2) the usage count == 1 [parent vfsmount] + 1 [sys_umount]
552 */ 596 */
553 if (flags & MNT_EXPIRE) { 597 if (flags & MNT_EXPIRE) {
554 if (mnt == current->fs->rootmnt || 598 if (mnt == current->fs->root.mnt ||
555 flags & (MNT_FORCE | MNT_DETACH)) 599 flags & (MNT_FORCE | MNT_DETACH))
556 return -EINVAL; 600 return -EINVAL;
557 601
@@ -586,7 +630,7 @@ static int do_umount(struct vfsmount *mnt, int flags)
586 * /reboot - static binary that would close all descriptors and 630 * /reboot - static binary that would close all descriptors and
587 * call reboot(9). Then init(8) could umount root and exec /reboot. 631 * call reboot(9). Then init(8) could umount root and exec /reboot.
588 */ 632 */
589 if (mnt == current->fs->rootmnt && !(flags & MNT_DETACH)) { 633 if (mnt == current->fs->root.mnt && !(flags & MNT_DETACH)) {
590 /* 634 /*
591 * Special case for "unmounting" root ... 635 * Special case for "unmounting" root ...
592 * we just try to remount it readonly. 636 * we just try to remount it readonly.
@@ -637,18 +681,20 @@ asmlinkage long sys_umount(char __user * name, int flags)
637 if (retval) 681 if (retval)
638 goto out; 682 goto out;
639 retval = -EINVAL; 683 retval = -EINVAL;
640 if (nd.dentry != nd.mnt->mnt_root) 684 if (nd.path.dentry != nd.path.mnt->mnt_root)
641 goto dput_and_out; 685 goto dput_and_out;
642 if (!check_mnt(nd.mnt)) 686 if (!check_mnt(nd.path.mnt))
643 goto dput_and_out; 687 goto dput_and_out;
644 688
645 retval = -EPERM; 689 retval = -EPERM;
646 if (!capable(CAP_SYS_ADMIN)) 690 if (!capable(CAP_SYS_ADMIN))
647 goto dput_and_out; 691 goto dput_and_out;
648 692
649 retval = do_umount(nd.mnt, flags); 693 retval = do_umount(nd.path.mnt, flags);
650dput_and_out: 694dput_and_out:
651 path_release_on_umount(&nd); 695 /* we mustn't call path_put() as that would clear mnt_expiry_mark */
696 dput(nd.path.dentry);
697 mntput_no_expire(nd.path.mnt);
652out: 698out:
653 return retval; 699 return retval;
654} 700}
@@ -671,10 +717,10 @@ static int mount_is_safe(struct nameidata *nd)
671 return 0; 717 return 0;
672 return -EPERM; 718 return -EPERM;
673#ifdef notyet 719#ifdef notyet
674 if (S_ISLNK(nd->dentry->d_inode->i_mode)) 720 if (S_ISLNK(nd->path.dentry->d_inode->i_mode))
675 return -EPERM; 721 return -EPERM;
676 if (nd->dentry->d_inode->i_mode & S_ISVTX) { 722 if (nd->path.dentry->d_inode->i_mode & S_ISVTX) {
677 if (current->uid != nd->dentry->d_inode->i_uid) 723 if (current->uid != nd->path.dentry->d_inode->i_uid)
678 return -EPERM; 724 return -EPERM;
679 } 725 }
680 if (vfs_permission(nd, MAY_WRITE)) 726 if (vfs_permission(nd, MAY_WRITE))
@@ -723,8 +769,8 @@ struct vfsmount *copy_tree(struct vfsmount *mnt, struct dentry *dentry,
723 q = q->mnt_parent; 769 q = q->mnt_parent;
724 } 770 }
725 p = s; 771 p = s;
726 nd.mnt = q; 772 nd.path.mnt = q;
727 nd.dentry = p->mnt_mountpoint; 773 nd.path.dentry = p->mnt_mountpoint;
728 q = clone_mnt(p, p->mnt_root, flag); 774 q = clone_mnt(p, p->mnt_root, flag);
729 if (!q) 775 if (!q)
730 goto Enomem; 776 goto Enomem;
@@ -833,8 +879,8 @@ static int attach_recursive_mnt(struct vfsmount *source_mnt,
833 struct nameidata *nd, struct nameidata *parent_nd) 879 struct nameidata *nd, struct nameidata *parent_nd)
834{ 880{
835 LIST_HEAD(tree_list); 881 LIST_HEAD(tree_list);
836 struct vfsmount *dest_mnt = nd->mnt; 882 struct vfsmount *dest_mnt = nd->path.mnt;
837 struct dentry *dest_dentry = nd->dentry; 883 struct dentry *dest_dentry = nd->path.dentry;
838 struct vfsmount *child, *p; 884 struct vfsmount *child, *p;
839 885
840 if (propagate_mnt(dest_mnt, dest_dentry, source_mnt, &tree_list)) 886 if (propagate_mnt(dest_mnt, dest_dentry, source_mnt, &tree_list))
@@ -869,13 +915,13 @@ static int graft_tree(struct vfsmount *mnt, struct nameidata *nd)
869 if (mnt->mnt_sb->s_flags & MS_NOUSER) 915 if (mnt->mnt_sb->s_flags & MS_NOUSER)
870 return -EINVAL; 916 return -EINVAL;
871 917
872 if (S_ISDIR(nd->dentry->d_inode->i_mode) != 918 if (S_ISDIR(nd->path.dentry->d_inode->i_mode) !=
873 S_ISDIR(mnt->mnt_root->d_inode->i_mode)) 919 S_ISDIR(mnt->mnt_root->d_inode->i_mode))
874 return -ENOTDIR; 920 return -ENOTDIR;
875 921
876 err = -ENOENT; 922 err = -ENOENT;
877 mutex_lock(&nd->dentry->d_inode->i_mutex); 923 mutex_lock(&nd->path.dentry->d_inode->i_mutex);
878 if (IS_DEADDIR(nd->dentry->d_inode)) 924 if (IS_DEADDIR(nd->path.dentry->d_inode))
879 goto out_unlock; 925 goto out_unlock;
880 926
881 err = security_sb_check_sb(mnt, nd); 927 err = security_sb_check_sb(mnt, nd);
@@ -883,10 +929,10 @@ static int graft_tree(struct vfsmount *mnt, struct nameidata *nd)
883 goto out_unlock; 929 goto out_unlock;
884 930
885 err = -ENOENT; 931 err = -ENOENT;
886 if (IS_ROOT(nd->dentry) || !d_unhashed(nd->dentry)) 932 if (IS_ROOT(nd->path.dentry) || !d_unhashed(nd->path.dentry))
887 err = attach_recursive_mnt(mnt, nd, NULL); 933 err = attach_recursive_mnt(mnt, nd, NULL);
888out_unlock: 934out_unlock:
889 mutex_unlock(&nd->dentry->d_inode->i_mutex); 935 mutex_unlock(&nd->path.dentry->d_inode->i_mutex);
890 if (!err) 936 if (!err)
891 security_sb_post_addmount(mnt, nd); 937 security_sb_post_addmount(mnt, nd);
892 return err; 938 return err;
@@ -894,17 +940,18 @@ out_unlock:
894 940
895/* 941/*
896 * recursively change the type of the mountpoint. 942 * recursively change the type of the mountpoint.
943 * noinline this do_mount helper to save do_mount stack space.
897 */ 944 */
898static int do_change_type(struct nameidata *nd, int flag) 945static noinline int do_change_type(struct nameidata *nd, int flag)
899{ 946{
900 struct vfsmount *m, *mnt = nd->mnt; 947 struct vfsmount *m, *mnt = nd->path.mnt;
901 int recurse = flag & MS_REC; 948 int recurse = flag & MS_REC;
902 int type = flag & ~MS_REC; 949 int type = flag & ~MS_REC;
903 950
904 if (!capable(CAP_SYS_ADMIN)) 951 if (!capable(CAP_SYS_ADMIN))
905 return -EPERM; 952 return -EPERM;
906 953
907 if (nd->dentry != nd->mnt->mnt_root) 954 if (nd->path.dentry != nd->path.mnt->mnt_root)
908 return -EINVAL; 955 return -EINVAL;
909 956
910 down_write(&namespace_sem); 957 down_write(&namespace_sem);
@@ -918,8 +965,10 @@ static int do_change_type(struct nameidata *nd, int flag)
918 965
919/* 966/*
920 * do loopback mount. 967 * do loopback mount.
968 * noinline this do_mount helper to save do_mount stack space.
921 */ 969 */
922static int do_loopback(struct nameidata *nd, char *old_name, int recurse) 970static noinline int do_loopback(struct nameidata *nd, char *old_name,
971 int recurse)
923{ 972{
924 struct nameidata old_nd; 973 struct nameidata old_nd;
925 struct vfsmount *mnt = NULL; 974 struct vfsmount *mnt = NULL;
@@ -934,17 +983,17 @@ static int do_loopback(struct nameidata *nd, char *old_name, int recurse)
934 983
935 down_write(&namespace_sem); 984 down_write(&namespace_sem);
936 err = -EINVAL; 985 err = -EINVAL;
937 if (IS_MNT_UNBINDABLE(old_nd.mnt)) 986 if (IS_MNT_UNBINDABLE(old_nd.path.mnt))
938 goto out; 987 goto out;
939 988
940 if (!check_mnt(nd->mnt) || !check_mnt(old_nd.mnt)) 989 if (!check_mnt(nd->path.mnt) || !check_mnt(old_nd.path.mnt))
941 goto out; 990 goto out;
942 991
943 err = -ENOMEM; 992 err = -ENOMEM;
944 if (recurse) 993 if (recurse)
945 mnt = copy_tree(old_nd.mnt, old_nd.dentry, 0); 994 mnt = copy_tree(old_nd.path.mnt, old_nd.path.dentry, 0);
946 else 995 else
947 mnt = clone_mnt(old_nd.mnt, old_nd.dentry, 0); 996 mnt = clone_mnt(old_nd.path.mnt, old_nd.path.dentry, 0);
948 997
949 if (!mnt) 998 if (!mnt)
950 goto out; 999 goto out;
@@ -960,7 +1009,7 @@ static int do_loopback(struct nameidata *nd, char *old_name, int recurse)
960 1009
961out: 1010out:
962 up_write(&namespace_sem); 1011 up_write(&namespace_sem);
963 path_release(&old_nd); 1012 path_put(&old_nd.path);
964 return err; 1013 return err;
965} 1014}
966 1015
@@ -968,29 +1017,30 @@ out:
968 * change filesystem flags. dir should be a physical root of filesystem. 1017 * change filesystem flags. dir should be a physical root of filesystem.
969 * If you've mounted a non-root directory somewhere and want to do remount 1018 * If you've mounted a non-root directory somewhere and want to do remount
970 * on it - tough luck. 1019 * on it - tough luck.
1020 * noinline this do_mount helper to save do_mount stack space.
971 */ 1021 */
972static int do_remount(struct nameidata *nd, int flags, int mnt_flags, 1022static noinline int do_remount(struct nameidata *nd, int flags, int mnt_flags,
973 void *data) 1023 void *data)
974{ 1024{
975 int err; 1025 int err;
976 struct super_block *sb = nd->mnt->mnt_sb; 1026 struct super_block *sb = nd->path.mnt->mnt_sb;
977 1027
978 if (!capable(CAP_SYS_ADMIN)) 1028 if (!capable(CAP_SYS_ADMIN))
979 return -EPERM; 1029 return -EPERM;
980 1030
981 if (!check_mnt(nd->mnt)) 1031 if (!check_mnt(nd->path.mnt))
982 return -EINVAL; 1032 return -EINVAL;
983 1033
984 if (nd->dentry != nd->mnt->mnt_root) 1034 if (nd->path.dentry != nd->path.mnt->mnt_root)
985 return -EINVAL; 1035 return -EINVAL;
986 1036
987 down_write(&sb->s_umount); 1037 down_write(&sb->s_umount);
988 err = do_remount_sb(sb, flags, data, 0); 1038 err = do_remount_sb(sb, flags, data, 0);
989 if (!err) 1039 if (!err)
990 nd->mnt->mnt_flags = mnt_flags; 1040 nd->path.mnt->mnt_flags = mnt_flags;
991 up_write(&sb->s_umount); 1041 up_write(&sb->s_umount);
992 if (!err) 1042 if (!err)
993 security_sb_post_remount(nd->mnt, flags, data); 1043 security_sb_post_remount(nd->path.mnt, flags, data);
994 return err; 1044 return err;
995} 1045}
996 1046
@@ -1004,7 +1054,10 @@ static inline int tree_contains_unbindable(struct vfsmount *mnt)
1004 return 0; 1054 return 0;
1005} 1055}
1006 1056
1007static int do_move_mount(struct nameidata *nd, char *old_name) 1057/*
1058 * noinline this do_mount helper to save do_mount stack space.
1059 */
1060static noinline int do_move_mount(struct nameidata *nd, char *old_name)
1008{ 1061{
1009 struct nameidata old_nd, parent_nd; 1062 struct nameidata old_nd, parent_nd;
1010 struct vfsmount *p; 1063 struct vfsmount *p;
@@ -1018,69 +1071,74 @@ static int do_move_mount(struct nameidata *nd, char *old_name)
1018 return err; 1071 return err;
1019 1072
1020 down_write(&namespace_sem); 1073 down_write(&namespace_sem);
1021 while (d_mountpoint(nd->dentry) && follow_down(&nd->mnt, &nd->dentry)) 1074 while (d_mountpoint(nd->path.dentry) &&
1075 follow_down(&nd->path.mnt, &nd->path.dentry))
1022 ; 1076 ;
1023 err = -EINVAL; 1077 err = -EINVAL;
1024 if (!check_mnt(nd->mnt) || !check_mnt(old_nd.mnt)) 1078 if (!check_mnt(nd->path.mnt) || !check_mnt(old_nd.path.mnt))
1025 goto out; 1079 goto out;
1026 1080
1027 err = -ENOENT; 1081 err = -ENOENT;
1028 mutex_lock(&nd->dentry->d_inode->i_mutex); 1082 mutex_lock(&nd->path.dentry->d_inode->i_mutex);
1029 if (IS_DEADDIR(nd->dentry->d_inode)) 1083 if (IS_DEADDIR(nd->path.dentry->d_inode))
1030 goto out1; 1084 goto out1;
1031 1085
1032 if (!IS_ROOT(nd->dentry) && d_unhashed(nd->dentry)) 1086 if (!IS_ROOT(nd->path.dentry) && d_unhashed(nd->path.dentry))
1033 goto out1; 1087 goto out1;
1034 1088
1035 err = -EINVAL; 1089 err = -EINVAL;
1036 if (old_nd.dentry != old_nd.mnt->mnt_root) 1090 if (old_nd.path.dentry != old_nd.path.mnt->mnt_root)
1037 goto out1; 1091 goto out1;
1038 1092
1039 if (old_nd.mnt == old_nd.mnt->mnt_parent) 1093 if (old_nd.path.mnt == old_nd.path.mnt->mnt_parent)
1040 goto out1; 1094 goto out1;
1041 1095
1042 if (S_ISDIR(nd->dentry->d_inode->i_mode) != 1096 if (S_ISDIR(nd->path.dentry->d_inode->i_mode) !=
1043 S_ISDIR(old_nd.dentry->d_inode->i_mode)) 1097 S_ISDIR(old_nd.path.dentry->d_inode->i_mode))
1044 goto out1; 1098 goto out1;
1045 /* 1099 /*
1046 * Don't move a mount residing in a shared parent. 1100 * Don't move a mount residing in a shared parent.
1047 */ 1101 */
1048 if (old_nd.mnt->mnt_parent && IS_MNT_SHARED(old_nd.mnt->mnt_parent)) 1102 if (old_nd.path.mnt->mnt_parent &&
1103 IS_MNT_SHARED(old_nd.path.mnt->mnt_parent))
1049 goto out1; 1104 goto out1;
1050 /* 1105 /*
1051 * Don't move a mount tree containing unbindable mounts to a destination 1106 * Don't move a mount tree containing unbindable mounts to a destination
1052 * mount which is shared. 1107 * mount which is shared.
1053 */ 1108 */
1054 if (IS_MNT_SHARED(nd->mnt) && tree_contains_unbindable(old_nd.mnt)) 1109 if (IS_MNT_SHARED(nd->path.mnt) &&
1110 tree_contains_unbindable(old_nd.path.mnt))
1055 goto out1; 1111 goto out1;
1056 err = -ELOOP; 1112 err = -ELOOP;
1057 for (p = nd->mnt; p->mnt_parent != p; p = p->mnt_parent) 1113 for (p = nd->path.mnt; p->mnt_parent != p; p = p->mnt_parent)
1058 if (p == old_nd.mnt) 1114 if (p == old_nd.path.mnt)
1059 goto out1; 1115 goto out1;
1060 1116
1061 if ((err = attach_recursive_mnt(old_nd.mnt, nd, &parent_nd))) 1117 err = attach_recursive_mnt(old_nd.path.mnt, nd, &parent_nd);
1118 if (err)
1062 goto out1; 1119 goto out1;
1063 1120
1064 spin_lock(&vfsmount_lock); 1121 spin_lock(&vfsmount_lock);
1065 /* if the mount is moved, it should no longer be expire 1122 /* if the mount is moved, it should no longer be expire
1066 * automatically */ 1123 * automatically */
1067 list_del_init(&old_nd.mnt->mnt_expire); 1124 list_del_init(&old_nd.path.mnt->mnt_expire);
1068 spin_unlock(&vfsmount_lock); 1125 spin_unlock(&vfsmount_lock);
1069out1: 1126out1:
1070 mutex_unlock(&nd->dentry->d_inode->i_mutex); 1127 mutex_unlock(&nd->path.dentry->d_inode->i_mutex);
1071out: 1128out:
1072 up_write(&namespace_sem); 1129 up_write(&namespace_sem);
1073 if (!err) 1130 if (!err)
1074 path_release(&parent_nd); 1131 path_put(&parent_nd.path);
1075 path_release(&old_nd); 1132 path_put(&old_nd.path);
1076 return err; 1133 return err;
1077} 1134}
1078 1135
1079/* 1136/*
1080 * create a new mount for userspace and request it to be added into the 1137 * create a new mount for userspace and request it to be added into the
1081 * namespace's tree 1138 * namespace's tree
1139 * noinline this do_mount helper to save do_mount stack space.
1082 */ 1140 */
1083static int do_new_mount(struct nameidata *nd, char *type, int flags, 1141static noinline int do_new_mount(struct nameidata *nd, char *type, int flags,
1084 int mnt_flags, char *name, void *data) 1142 int mnt_flags, char *name, void *data)
1085{ 1143{
1086 struct vfsmount *mnt; 1144 struct vfsmount *mnt;
@@ -1110,16 +1168,17 @@ int do_add_mount(struct vfsmount *newmnt, struct nameidata *nd,
1110 1168
1111 down_write(&namespace_sem); 1169 down_write(&namespace_sem);
1112 /* Something was mounted here while we slept */ 1170 /* Something was mounted here while we slept */
1113 while (d_mountpoint(nd->dentry) && follow_down(&nd->mnt, &nd->dentry)) 1171 while (d_mountpoint(nd->path.dentry) &&
1172 follow_down(&nd->path.mnt, &nd->path.dentry))
1114 ; 1173 ;
1115 err = -EINVAL; 1174 err = -EINVAL;
1116 if (!check_mnt(nd->mnt)) 1175 if (!check_mnt(nd->path.mnt))
1117 goto unlock; 1176 goto unlock;
1118 1177
1119 /* Refuse the same filesystem on the same mount point */ 1178 /* Refuse the same filesystem on the same mount point */
1120 err = -EBUSY; 1179 err = -EBUSY;
1121 if (nd->mnt->mnt_sb == newmnt->mnt_sb && 1180 if (nd->path.mnt->mnt_sb == newmnt->mnt_sb &&
1122 nd->mnt->mnt_root == nd->dentry) 1181 nd->path.mnt->mnt_root == nd->path.dentry)
1123 goto unlock; 1182 goto unlock;
1124 1183
1125 err = -EINVAL; 1184 err = -EINVAL;
@@ -1455,7 +1514,7 @@ long do_mount(char *dev_name, char *dir_name, char *type_page,
1455 retval = do_new_mount(&nd, type_page, flags, mnt_flags, 1514 retval = do_new_mount(&nd, type_page, flags, mnt_flags,
1456 dev_name, data_page); 1515 dev_name, data_page);
1457dput_out: 1516dput_out:
1458 path_release(&nd); 1517 path_put(&nd.path);
1459 return retval; 1518 return retval;
1460} 1519}
1461 1520
@@ -1502,17 +1561,17 @@ static struct mnt_namespace *dup_mnt_ns(struct mnt_namespace *mnt_ns,
1502 while (p) { 1561 while (p) {
1503 q->mnt_ns = new_ns; 1562 q->mnt_ns = new_ns;
1504 if (fs) { 1563 if (fs) {
1505 if (p == fs->rootmnt) { 1564 if (p == fs->root.mnt) {
1506 rootmnt = p; 1565 rootmnt = p;
1507 fs->rootmnt = mntget(q); 1566 fs->root.mnt = mntget(q);
1508 } 1567 }
1509 if (p == fs->pwdmnt) { 1568 if (p == fs->pwd.mnt) {
1510 pwdmnt = p; 1569 pwdmnt = p;
1511 fs->pwdmnt = mntget(q); 1570 fs->pwd.mnt = mntget(q);
1512 } 1571 }
1513 if (p == fs->altrootmnt) { 1572 if (p == fs->altroot.mnt) {
1514 altrootmnt = p; 1573 altrootmnt = p;
1515 fs->altrootmnt = mntget(q); 1574 fs->altroot.mnt = mntget(q);
1516 } 1575 }
1517 } 1576 }
1518 p = next_mnt(p, mnt_ns->root); 1577 p = next_mnt(p, mnt_ns->root);
@@ -1593,44 +1652,35 @@ out1:
1593 * Replace the fs->{rootmnt,root} with {mnt,dentry}. Put the old values. 1652 * Replace the fs->{rootmnt,root} with {mnt,dentry}. Put the old values.
1594 * It can block. Requires the big lock held. 1653 * It can block. Requires the big lock held.
1595 */ 1654 */
1596void set_fs_root(struct fs_struct *fs, struct vfsmount *mnt, 1655void set_fs_root(struct fs_struct *fs, struct path *path)
1597 struct dentry *dentry)
1598{ 1656{
1599 struct dentry *old_root; 1657 struct path old_root;
1600 struct vfsmount *old_rootmnt; 1658
1601 write_lock(&fs->lock); 1659 write_lock(&fs->lock);
1602 old_root = fs->root; 1660 old_root = fs->root;
1603 old_rootmnt = fs->rootmnt; 1661 fs->root = *path;
1604 fs->rootmnt = mntget(mnt); 1662 path_get(path);
1605 fs->root = dget(dentry);
1606 write_unlock(&fs->lock); 1663 write_unlock(&fs->lock);
1607 if (old_root) { 1664 if (old_root.dentry)
1608 dput(old_root); 1665 path_put(&old_root);
1609 mntput(old_rootmnt);
1610 }
1611} 1666}
1612 1667
1613/* 1668/*
1614 * Replace the fs->{pwdmnt,pwd} with {mnt,dentry}. Put the old values. 1669 * Replace the fs->{pwdmnt,pwd} with {mnt,dentry}. Put the old values.
1615 * It can block. Requires the big lock held. 1670 * It can block. Requires the big lock held.
1616 */ 1671 */
1617void set_fs_pwd(struct fs_struct *fs, struct vfsmount *mnt, 1672void set_fs_pwd(struct fs_struct *fs, struct path *path)
1618 struct dentry *dentry)
1619{ 1673{
1620 struct dentry *old_pwd; 1674 struct path old_pwd;
1621 struct vfsmount *old_pwdmnt;
1622 1675
1623 write_lock(&fs->lock); 1676 write_lock(&fs->lock);
1624 old_pwd = fs->pwd; 1677 old_pwd = fs->pwd;
1625 old_pwdmnt = fs->pwdmnt; 1678 fs->pwd = *path;
1626 fs->pwdmnt = mntget(mnt); 1679 path_get(path);
1627 fs->pwd = dget(dentry);
1628 write_unlock(&fs->lock); 1680 write_unlock(&fs->lock);
1629 1681
1630 if (old_pwd) { 1682 if (old_pwd.dentry)
1631 dput(old_pwd); 1683 path_put(&old_pwd);
1632 mntput(old_pwdmnt);
1633 }
1634} 1684}
1635 1685
1636static void chroot_fs_refs(struct nameidata *old_nd, struct nameidata *new_nd) 1686static void chroot_fs_refs(struct nameidata *old_nd, struct nameidata *new_nd)
@@ -1645,12 +1695,12 @@ static void chroot_fs_refs(struct nameidata *old_nd, struct nameidata *new_nd)
1645 if (fs) { 1695 if (fs) {
1646 atomic_inc(&fs->count); 1696 atomic_inc(&fs->count);
1647 task_unlock(p); 1697 task_unlock(p);
1648 if (fs->root == old_nd->dentry 1698 if (fs->root.dentry == old_nd->path.dentry
1649 && fs->rootmnt == old_nd->mnt) 1699 && fs->root.mnt == old_nd->path.mnt)
1650 set_fs_root(fs, new_nd->mnt, new_nd->dentry); 1700 set_fs_root(fs, &new_nd->path);
1651 if (fs->pwd == old_nd->dentry 1701 if (fs->pwd.dentry == old_nd->path.dentry
1652 && fs->pwdmnt == old_nd->mnt) 1702 && fs->pwd.mnt == old_nd->path.mnt)
1653 set_fs_pwd(fs, new_nd->mnt, new_nd->dentry); 1703 set_fs_pwd(fs, &new_nd->path);
1654 put_fs_struct(fs); 1704 put_fs_struct(fs);
1655 } else 1705 } else
1656 task_unlock(p); 1706 task_unlock(p);
@@ -1700,7 +1750,7 @@ asmlinkage long sys_pivot_root(const char __user * new_root,
1700 if (error) 1750 if (error)
1701 goto out0; 1751 goto out0;
1702 error = -EINVAL; 1752 error = -EINVAL;
1703 if (!check_mnt(new_nd.mnt)) 1753 if (!check_mnt(new_nd.path.mnt))
1704 goto out1; 1754 goto out1;
1705 1755
1706 error = __user_walk(put_old, LOOKUP_FOLLOW | LOOKUP_DIRECTORY, &old_nd); 1756 error = __user_walk(put_old, LOOKUP_FOLLOW | LOOKUP_DIRECTORY, &old_nd);
@@ -1709,74 +1759,78 @@ asmlinkage long sys_pivot_root(const char __user * new_root,
1709 1759
1710 error = security_sb_pivotroot(&old_nd, &new_nd); 1760 error = security_sb_pivotroot(&old_nd, &new_nd);
1711 if (error) { 1761 if (error) {
1712 path_release(&old_nd); 1762 path_put(&old_nd.path);
1713 goto out1; 1763 goto out1;
1714 } 1764 }
1715 1765
1716 read_lock(&current->fs->lock); 1766 read_lock(&current->fs->lock);
1717 user_nd.mnt = mntget(current->fs->rootmnt); 1767 user_nd.path = current->fs->root;
1718 user_nd.dentry = dget(current->fs->root); 1768 path_get(&current->fs->root);
1719 read_unlock(&current->fs->lock); 1769 read_unlock(&current->fs->lock);
1720 down_write(&namespace_sem); 1770 down_write(&namespace_sem);
1721 mutex_lock(&old_nd.dentry->d_inode->i_mutex); 1771 mutex_lock(&old_nd.path.dentry->d_inode->i_mutex);
1722 error = -EINVAL; 1772 error = -EINVAL;
1723 if (IS_MNT_SHARED(old_nd.mnt) || 1773 if (IS_MNT_SHARED(old_nd.path.mnt) ||
1724 IS_MNT_SHARED(new_nd.mnt->mnt_parent) || 1774 IS_MNT_SHARED(new_nd.path.mnt->mnt_parent) ||
1725 IS_MNT_SHARED(user_nd.mnt->mnt_parent)) 1775 IS_MNT_SHARED(user_nd.path.mnt->mnt_parent))
1726 goto out2; 1776 goto out2;
1727 if (!check_mnt(user_nd.mnt)) 1777 if (!check_mnt(user_nd.path.mnt))
1728 goto out2; 1778 goto out2;
1729 error = -ENOENT; 1779 error = -ENOENT;
1730 if (IS_DEADDIR(new_nd.dentry->d_inode)) 1780 if (IS_DEADDIR(new_nd.path.dentry->d_inode))
1731 goto out2; 1781 goto out2;
1732 if (d_unhashed(new_nd.dentry) && !IS_ROOT(new_nd.dentry)) 1782 if (d_unhashed(new_nd.path.dentry) && !IS_ROOT(new_nd.path.dentry))
1733 goto out2; 1783 goto out2;
1734 if (d_unhashed(old_nd.dentry) && !IS_ROOT(old_nd.dentry)) 1784 if (d_unhashed(old_nd.path.dentry) && !IS_ROOT(old_nd.path.dentry))
1735 goto out2; 1785 goto out2;
1736 error = -EBUSY; 1786 error = -EBUSY;
1737 if (new_nd.mnt == user_nd.mnt || old_nd.mnt == user_nd.mnt) 1787 if (new_nd.path.mnt == user_nd.path.mnt ||
1788 old_nd.path.mnt == user_nd.path.mnt)
1738 goto out2; /* loop, on the same file system */ 1789 goto out2; /* loop, on the same file system */
1739 error = -EINVAL; 1790 error = -EINVAL;
1740 if (user_nd.mnt->mnt_root != user_nd.dentry) 1791 if (user_nd.path.mnt->mnt_root != user_nd.path.dentry)
1741 goto out2; /* not a mountpoint */ 1792 goto out2; /* not a mountpoint */
1742 if (user_nd.mnt->mnt_parent == user_nd.mnt) 1793 if (user_nd.path.mnt->mnt_parent == user_nd.path.mnt)
1743 goto out2; /* not attached */ 1794 goto out2; /* not attached */
1744 if (new_nd.mnt->mnt_root != new_nd.dentry) 1795 if (new_nd.path.mnt->mnt_root != new_nd.path.dentry)
1745 goto out2; /* not a mountpoint */ 1796 goto out2; /* not a mountpoint */
1746 if (new_nd.mnt->mnt_parent == new_nd.mnt) 1797 if (new_nd.path.mnt->mnt_parent == new_nd.path.mnt)
1747 goto out2; /* not attached */ 1798 goto out2; /* not attached */
1748 tmp = old_nd.mnt; /* make sure we can reach put_old from new_root */ 1799 /* make sure we can reach put_old from new_root */
1800 tmp = old_nd.path.mnt;
1749 spin_lock(&vfsmount_lock); 1801 spin_lock(&vfsmount_lock);
1750 if (tmp != new_nd.mnt) { 1802 if (tmp != new_nd.path.mnt) {
1751 for (;;) { 1803 for (;;) {
1752 if (tmp->mnt_parent == tmp) 1804 if (tmp->mnt_parent == tmp)
1753 goto out3; /* already mounted on put_old */ 1805 goto out3; /* already mounted on put_old */
1754 if (tmp->mnt_parent == new_nd.mnt) 1806 if (tmp->mnt_parent == new_nd.path.mnt)
1755 break; 1807 break;
1756 tmp = tmp->mnt_parent; 1808 tmp = tmp->mnt_parent;
1757 } 1809 }
1758 if (!is_subdir(tmp->mnt_mountpoint, new_nd.dentry)) 1810 if (!is_subdir(tmp->mnt_mountpoint, new_nd.path.dentry))
1759 goto out3; 1811 goto out3;
1760 } else if (!is_subdir(old_nd.dentry, new_nd.dentry)) 1812 } else if (!is_subdir(old_nd.path.dentry, new_nd.path.dentry))
1761 goto out3; 1813 goto out3;
1762 detach_mnt(new_nd.mnt, &parent_nd); 1814 detach_mnt(new_nd.path.mnt, &parent_nd);
1763 detach_mnt(user_nd.mnt, &root_parent); 1815 detach_mnt(user_nd.path.mnt, &root_parent);
1764 attach_mnt(user_nd.mnt, &old_nd); /* mount old root on put_old */ 1816 /* mount old root on put_old */
1765 attach_mnt(new_nd.mnt, &root_parent); /* mount new_root on / */ 1817 attach_mnt(user_nd.path.mnt, &old_nd);
1818 /* mount new_root on / */
1819 attach_mnt(new_nd.path.mnt, &root_parent);
1766 touch_mnt_namespace(current->nsproxy->mnt_ns); 1820 touch_mnt_namespace(current->nsproxy->mnt_ns);
1767 spin_unlock(&vfsmount_lock); 1821 spin_unlock(&vfsmount_lock);
1768 chroot_fs_refs(&user_nd, &new_nd); 1822 chroot_fs_refs(&user_nd, &new_nd);
1769 security_sb_post_pivotroot(&user_nd, &new_nd); 1823 security_sb_post_pivotroot(&user_nd, &new_nd);
1770 error = 0; 1824 error = 0;
1771 path_release(&root_parent); 1825 path_put(&root_parent.path);
1772 path_release(&parent_nd); 1826 path_put(&parent_nd.path);
1773out2: 1827out2:
1774 mutex_unlock(&old_nd.dentry->d_inode->i_mutex); 1828 mutex_unlock(&old_nd.path.dentry->d_inode->i_mutex);
1775 up_write(&namespace_sem); 1829 up_write(&namespace_sem);
1776 path_release(&user_nd); 1830 path_put(&user_nd.path);
1777 path_release(&old_nd); 1831 path_put(&old_nd.path);
1778out1: 1832out1:
1779 path_release(&new_nd); 1833 path_put(&new_nd.path);
1780out0: 1834out0:
1781 unlock_kernel(); 1835 unlock_kernel();
1782 return error; 1836 return error;
@@ -1789,6 +1843,7 @@ static void __init init_mount_tree(void)
1789{ 1843{
1790 struct vfsmount *mnt; 1844 struct vfsmount *mnt;
1791 struct mnt_namespace *ns; 1845 struct mnt_namespace *ns;
1846 struct path root;
1792 1847
1793 mnt = do_kern_mount("rootfs", 0, "rootfs", NULL); 1848 mnt = do_kern_mount("rootfs", 0, "rootfs", NULL);
1794 if (IS_ERR(mnt)) 1849 if (IS_ERR(mnt))
@@ -1807,15 +1862,16 @@ static void __init init_mount_tree(void)
1807 init_task.nsproxy->mnt_ns = ns; 1862 init_task.nsproxy->mnt_ns = ns;
1808 get_mnt_ns(ns); 1863 get_mnt_ns(ns);
1809 1864
1810 set_fs_pwd(current->fs, ns->root, ns->root->mnt_root); 1865 root.mnt = ns->root;
1811 set_fs_root(current->fs, ns->root, ns->root->mnt_root); 1866 root.dentry = ns->root->mnt_root;
1867
1868 set_fs_pwd(current->fs, &root);
1869 set_fs_root(current->fs, &root);
1812} 1870}
1813 1871
1814void __init mnt_init(void) 1872void __init mnt_init(void)
1815{ 1873{
1816 struct list_head *d; 1874 unsigned u;
1817 unsigned int nr_hash;
1818 int i;
1819 int err; 1875 int err;
1820 1876
1821 init_rwsem(&namespace_sem); 1877 init_rwsem(&namespace_sem);
@@ -1828,35 +1884,11 @@ void __init mnt_init(void)
1828 if (!mount_hashtable) 1884 if (!mount_hashtable)
1829 panic("Failed to allocate mount hash table\n"); 1885 panic("Failed to allocate mount hash table\n");
1830 1886
1831 /* 1887 printk("Mount-cache hash table entries: %lu\n", HASH_SIZE);
1832 * Find the power-of-two list-heads that can fit into the allocation.. 1888
1833 * We don't guarantee that "sizeof(struct list_head)" is necessarily 1889 for (u = 0; u < HASH_SIZE; u++)
1834 * a power-of-two. 1890 INIT_LIST_HEAD(&mount_hashtable[u]);
1835 */
1836 nr_hash = PAGE_SIZE / sizeof(struct list_head);
1837 hash_bits = 0;
1838 do {
1839 hash_bits++;
1840 } while ((nr_hash >> hash_bits) != 0);
1841 hash_bits--;
1842 1891
1843 /*
1844 * Re-calculate the actual number of entries and the mask
1845 * from the number of bits we can fit.
1846 */
1847 nr_hash = 1UL << hash_bits;
1848 hash_mask = nr_hash - 1;
1849
1850 printk("Mount-cache hash table entries: %d\n", nr_hash);
1851
1852 /* And initialize the newly allocated array */
1853 d = mount_hashtable;
1854 i = nr_hash;
1855 do {
1856 INIT_LIST_HEAD(d);
1857 d++;
1858 i--;
1859 } while (i);
1860 err = sysfs_init(); 1892 err = sysfs_init();
1861 if (err) 1893 if (err)
1862 printk(KERN_WARNING "%s: sysfs_init error: %d\n", 1894 printk(KERN_WARNING "%s: sysfs_init error: %d\n",
diff --git a/fs/ncpfs/inode.c b/fs/ncpfs/inode.c
index e1cb70c643f8..fbbb9f7afa1a 100644
--- a/fs/ncpfs/inode.c
+++ b/fs/ncpfs/inode.c
@@ -28,6 +28,8 @@
28#include <linux/init.h> 28#include <linux/init.h>
29#include <linux/smp_lock.h> 29#include <linux/smp_lock.h>
30#include <linux/vfs.h> 30#include <linux/vfs.h>
31#include <linux/mount.h>
32#include <linux/seq_file.h>
31 33
32#include <linux/ncp_fs.h> 34#include <linux/ncp_fs.h>
33 35
@@ -36,9 +38,15 @@
36#include "ncplib_kernel.h" 38#include "ncplib_kernel.h"
37#include "getopt.h" 39#include "getopt.h"
38 40
41#define NCP_DEFAULT_FILE_MODE 0600
42#define NCP_DEFAULT_DIR_MODE 0700
43#define NCP_DEFAULT_TIME_OUT 10
44#define NCP_DEFAULT_RETRY_COUNT 20
45
39static void ncp_delete_inode(struct inode *); 46static void ncp_delete_inode(struct inode *);
40static void ncp_put_super(struct super_block *); 47static void ncp_put_super(struct super_block *);
41static int ncp_statfs(struct dentry *, struct kstatfs *); 48static int ncp_statfs(struct dentry *, struct kstatfs *);
49static int ncp_show_options(struct seq_file *, struct vfsmount *);
42 50
43static struct kmem_cache * ncp_inode_cachep; 51static struct kmem_cache * ncp_inode_cachep;
44 52
@@ -96,6 +104,7 @@ static const struct super_operations ncp_sops =
96 .put_super = ncp_put_super, 104 .put_super = ncp_put_super,
97 .statfs = ncp_statfs, 105 .statfs = ncp_statfs,
98 .remount_fs = ncp_remount, 106 .remount_fs = ncp_remount,
107 .show_options = ncp_show_options,
99}; 108};
100 109
101extern struct dentry_operations ncp_root_dentry_operations; 110extern struct dentry_operations ncp_root_dentry_operations;
@@ -304,6 +313,37 @@ static void ncp_stop_tasks(struct ncp_server *server) {
304 flush_scheduled_work(); 313 flush_scheduled_work();
305} 314}
306 315
316static int ncp_show_options(struct seq_file *seq, struct vfsmount *mnt)
317{
318 struct ncp_server *server = NCP_SBP(mnt->mnt_sb);
319 unsigned int tmp;
320
321 if (server->m.uid != 0)
322 seq_printf(seq, ",uid=%u", server->m.uid);
323 if (server->m.gid != 0)
324 seq_printf(seq, ",gid=%u", server->m.gid);
325 if (server->m.mounted_uid != 0)
326 seq_printf(seq, ",owner=%u", server->m.mounted_uid);
327 tmp = server->m.file_mode & S_IALLUGO;
328 if (tmp != NCP_DEFAULT_FILE_MODE)
329 seq_printf(seq, ",mode=0%o", tmp);
330 tmp = server->m.dir_mode & S_IALLUGO;
331 if (tmp != NCP_DEFAULT_DIR_MODE)
332 seq_printf(seq, ",dirmode=0%o", tmp);
333 if (server->m.time_out != NCP_DEFAULT_TIME_OUT * HZ / 100) {
334 tmp = server->m.time_out * 100 / HZ;
335 seq_printf(seq, ",timeout=%u", tmp);
336 }
337 if (server->m.retry_count != NCP_DEFAULT_RETRY_COUNT)
338 seq_printf(seq, ",retry=%u", server->m.retry_count);
339 if (server->m.flags != 0)
340 seq_printf(seq, ",flags=%lu", server->m.flags);
341 if (server->m.wdog_pid != NULL)
342 seq_printf(seq, ",wdogpid=%u", pid_vnr(server->m.wdog_pid));
343
344 return 0;
345}
346
307static const struct ncp_option ncp_opts[] = { 347static const struct ncp_option ncp_opts[] = {
308 { "uid", OPT_INT, 'u' }, 348 { "uid", OPT_INT, 'u' },
309 { "gid", OPT_INT, 'g' }, 349 { "gid", OPT_INT, 'g' },
@@ -331,12 +371,12 @@ static int ncp_parse_options(struct ncp_mount_data_kernel *data, char *options)
331 data->mounted_uid = 0; 371 data->mounted_uid = 0;
332 data->wdog_pid = NULL; 372 data->wdog_pid = NULL;
333 data->ncp_fd = ~0; 373 data->ncp_fd = ~0;
334 data->time_out = 10; 374 data->time_out = NCP_DEFAULT_TIME_OUT;
335 data->retry_count = 20; 375 data->retry_count = NCP_DEFAULT_RETRY_COUNT;
336 data->uid = 0; 376 data->uid = 0;
337 data->gid = 0; 377 data->gid = 0;
338 data->file_mode = 0600; 378 data->file_mode = NCP_DEFAULT_FILE_MODE;
339 data->dir_mode = 0700; 379 data->dir_mode = NCP_DEFAULT_DIR_MODE;
340 data->info_fd = -1; 380 data->info_fd = -1;
341 data->mounted_vol[0] = 0; 381 data->mounted_vol[0] = 0;
342 382
@@ -982,12 +1022,13 @@ static struct file_system_type ncp_fs_type = {
982 .name = "ncpfs", 1022 .name = "ncpfs",
983 .get_sb = ncp_get_sb, 1023 .get_sb = ncp_get_sb,
984 .kill_sb = kill_anon_super, 1024 .kill_sb = kill_anon_super,
1025 .fs_flags = FS_BINARY_MOUNTDATA,
985}; 1026};
986 1027
987static int __init init_ncp_fs(void) 1028static int __init init_ncp_fs(void)
988{ 1029{
989 int err; 1030 int err;
990 DPRINTK("ncpfs: init_module called\n"); 1031 DPRINTK("ncpfs: init_ncp_fs called\n");
991 1032
992 err = init_inodecache(); 1033 err = init_inodecache();
993 if (err) 1034 if (err)
@@ -1004,7 +1045,7 @@ out1:
1004 1045
1005static void __exit exit_ncp_fs(void) 1046static void __exit exit_ncp_fs(void)
1006{ 1047{
1007 DPRINTK("ncpfs: cleanup_module called\n"); 1048 DPRINTK("ncpfs: exit_ncp_fs called\n");
1008 unregister_filesystem(&ncp_fs_type); 1049 unregister_filesystem(&ncp_fs_type);
1009 destroy_inodecache(); 1050 destroy_inodecache();
1010} 1051}
diff --git a/fs/nfs/callback.c b/fs/nfs/callback.c
index bd185a572a23..ecc06c619494 100644
--- a/fs/nfs/callback.c
+++ b/fs/nfs/callback.c
@@ -105,7 +105,7 @@ static void nfs_callback_svc(struct svc_rqst *rqstp)
105 */ 105 */
106int nfs_callback_up(void) 106int nfs_callback_up(void)
107{ 107{
108 struct svc_serv *serv; 108 struct svc_serv *serv = NULL;
109 int ret = 0; 109 int ret = 0;
110 110
111 lock_kernel(); 111 lock_kernel();
@@ -122,24 +122,30 @@ int nfs_callback_up(void)
122 ret = svc_create_xprt(serv, "tcp", nfs_callback_set_tcpport, 122 ret = svc_create_xprt(serv, "tcp", nfs_callback_set_tcpport,
123 SVC_SOCK_ANONYMOUS); 123 SVC_SOCK_ANONYMOUS);
124 if (ret <= 0) 124 if (ret <= 0)
125 goto out_destroy; 125 goto out_err;
126 nfs_callback_tcpport = ret; 126 nfs_callback_tcpport = ret;
127 dprintk("Callback port = 0x%x\n", nfs_callback_tcpport); 127 dprintk("Callback port = 0x%x\n", nfs_callback_tcpport);
128 128
129 ret = svc_create_thread(nfs_callback_svc, serv); 129 ret = svc_create_thread(nfs_callback_svc, serv);
130 if (ret < 0) 130 if (ret < 0)
131 goto out_destroy; 131 goto out_err;
132 nfs_callback_info.serv = serv; 132 nfs_callback_info.serv = serv;
133 wait_for_completion(&nfs_callback_info.started); 133 wait_for_completion(&nfs_callback_info.started);
134out: 134out:
135 /*
136 * svc_create creates the svc_serv with sv_nrthreads == 1, and then
137 * svc_create_thread increments that. So we need to call svc_destroy
138 * on both success and failure so that the refcount is 1 when the
139 * thread exits.
140 */
141 if (serv)
142 svc_destroy(serv);
135 mutex_unlock(&nfs_callback_mutex); 143 mutex_unlock(&nfs_callback_mutex);
136 unlock_kernel(); 144 unlock_kernel();
137 return ret; 145 return ret;
138out_destroy: 146out_err:
139 dprintk("Couldn't create callback socket or server thread; err = %d\n", 147 dprintk("Couldn't create callback socket or server thread; err = %d\n",
140 ret); 148 ret);
141 svc_destroy(serv);
142out_err:
143 nfs_callback_info.users--; 149 nfs_callback_info.users--;
144 goto out; 150 goto out;
145} 151}
diff --git a/fs/nfs/dir.c b/fs/nfs/dir.c
index 476cb0f837fd..ae04892a5e5d 100644
--- a/fs/nfs/dir.c
+++ b/fs/nfs/dir.c
@@ -154,7 +154,6 @@ typedef struct {
154 struct nfs_entry *entry; 154 struct nfs_entry *entry;
155 decode_dirent_t decode; 155 decode_dirent_t decode;
156 int plus; 156 int plus;
157 int error;
158 unsigned long timestamp; 157 unsigned long timestamp;
159 int timestamp_valid; 158 int timestamp_valid;
160} nfs_readdir_descriptor_t; 159} nfs_readdir_descriptor_t;
@@ -213,7 +212,6 @@ int nfs_readdir_filler(nfs_readdir_descriptor_t *desc, struct page *page)
213 return 0; 212 return 0;
214 error: 213 error:
215 unlock_page(page); 214 unlock_page(page);
216 desc->error = error;
217 return -EIO; 215 return -EIO;
218} 216}
219 217
@@ -483,13 +481,13 @@ int uncached_readdir(nfs_readdir_descriptor_t *desc, void *dirent,
483 goto out; 481 goto out;
484 } 482 }
485 timestamp = jiffies; 483 timestamp = jiffies;
486 desc->error = NFS_PROTO(inode)->readdir(file->f_path.dentry, cred, *desc->dir_cookie, 484 status = NFS_PROTO(inode)->readdir(file->f_path.dentry, cred,
487 page, 485 *desc->dir_cookie, page,
488 NFS_SERVER(inode)->dtsize, 486 NFS_SERVER(inode)->dtsize,
489 desc->plus); 487 desc->plus);
490 desc->page = page; 488 desc->page = page;
491 desc->ptr = kmap(page); /* matching kunmap in nfs_do_filldir */ 489 desc->ptr = kmap(page); /* matching kunmap in nfs_do_filldir */
492 if (desc->error >= 0) { 490 if (status >= 0) {
493 desc->timestamp = timestamp; 491 desc->timestamp = timestamp;
494 desc->timestamp_valid = 1; 492 desc->timestamp_valid = 1;
495 if ((status = dir_decode(desc)) == 0) 493 if ((status = dir_decode(desc)) == 0)
diff --git a/fs/nfs/getroot.c b/fs/nfs/getroot.c
index e6242cdbaf91..fae97196daad 100644
--- a/fs/nfs/getroot.c
+++ b/fs/nfs/getroot.c
@@ -96,7 +96,7 @@ struct dentry *nfs_get_root(struct super_block *sb, struct nfs_fh *mntfh)
96 inode = nfs_fhget(sb, mntfh, fsinfo.fattr); 96 inode = nfs_fhget(sb, mntfh, fsinfo.fattr);
97 if (IS_ERR(inode)) { 97 if (IS_ERR(inode)) {
98 dprintk("nfs_get_root: get root inode failed\n"); 98 dprintk("nfs_get_root: get root inode failed\n");
99 return ERR_PTR(PTR_ERR(inode)); 99 return ERR_CAST(inode);
100 } 100 }
101 101
102 error = nfs_superblock_set_dummy_root(sb, inode); 102 error = nfs_superblock_set_dummy_root(sb, inode);
@@ -266,7 +266,7 @@ struct dentry *nfs4_get_root(struct super_block *sb, struct nfs_fh *mntfh)
266 inode = nfs_fhget(sb, mntfh, &fattr); 266 inode = nfs_fhget(sb, mntfh, &fattr);
267 if (IS_ERR(inode)) { 267 if (IS_ERR(inode)) {
268 dprintk("nfs_get_root: get root inode failed\n"); 268 dprintk("nfs_get_root: get root inode failed\n");
269 return ERR_PTR(PTR_ERR(inode)); 269 return ERR_CAST(inode);
270 } 270 }
271 271
272 error = nfs_superblock_set_dummy_root(sb, inode); 272 error = nfs_superblock_set_dummy_root(sb, inode);
diff --git a/fs/nfs/namespace.c b/fs/nfs/namespace.c
index be4ce1c3a3d8..607f6eb9cdb5 100644
--- a/fs/nfs/namespace.c
+++ b/fs/nfs/namespace.c
@@ -107,38 +107,40 @@ static void * nfs_follow_mountpoint(struct dentry *dentry, struct nameidata *nd)
107 107
108 BUG_ON(IS_ROOT(dentry)); 108 BUG_ON(IS_ROOT(dentry));
109 dprintk("%s: enter\n", __FUNCTION__); 109 dprintk("%s: enter\n", __FUNCTION__);
110 dput(nd->dentry); 110 dput(nd->path.dentry);
111 nd->dentry = dget(dentry); 111 nd->path.dentry = dget(dentry);
112 112
113 /* Look it up again */ 113 /* Look it up again */
114 parent = dget_parent(nd->dentry); 114 parent = dget_parent(nd->path.dentry);
115 err = server->nfs_client->rpc_ops->lookup(parent->d_inode, 115 err = server->nfs_client->rpc_ops->lookup(parent->d_inode,
116 &nd->dentry->d_name, 116 &nd->path.dentry->d_name,
117 &fh, &fattr); 117 &fh, &fattr);
118 dput(parent); 118 dput(parent);
119 if (err != 0) 119 if (err != 0)
120 goto out_err; 120 goto out_err;
121 121
122 if (fattr.valid & NFS_ATTR_FATTR_V4_REFERRAL) 122 if (fattr.valid & NFS_ATTR_FATTR_V4_REFERRAL)
123 mnt = nfs_do_refmount(nd->mnt, nd->dentry); 123 mnt = nfs_do_refmount(nd->path.mnt, nd->path.dentry);
124 else 124 else
125 mnt = nfs_do_submount(nd->mnt, nd->dentry, &fh, &fattr); 125 mnt = nfs_do_submount(nd->path.mnt, nd->path.dentry, &fh,
126 &fattr);
126 err = PTR_ERR(mnt); 127 err = PTR_ERR(mnt);
127 if (IS_ERR(mnt)) 128 if (IS_ERR(mnt))
128 goto out_err; 129 goto out_err;
129 130
130 mntget(mnt); 131 mntget(mnt);
131 err = do_add_mount(mnt, nd, nd->mnt->mnt_flags|MNT_SHRINKABLE, &nfs_automount_list); 132 err = do_add_mount(mnt, nd, nd->path.mnt->mnt_flags|MNT_SHRINKABLE,
133 &nfs_automount_list);
132 if (err < 0) { 134 if (err < 0) {
133 mntput(mnt); 135 mntput(mnt);
134 if (err == -EBUSY) 136 if (err == -EBUSY)
135 goto out_follow; 137 goto out_follow;
136 goto out_err; 138 goto out_err;
137 } 139 }
138 mntput(nd->mnt); 140 mntput(nd->path.mnt);
139 dput(nd->dentry); 141 dput(nd->path.dentry);
140 nd->mnt = mnt; 142 nd->path.mnt = mnt;
141 nd->dentry = dget(mnt->mnt_root); 143 nd->path.dentry = dget(mnt->mnt_root);
142 schedule_delayed_work(&nfs_automount_task, nfs_mountpoint_expiry_timeout); 144 schedule_delayed_work(&nfs_automount_task, nfs_mountpoint_expiry_timeout);
143out: 145out:
144 dprintk("%s: done, returned %d\n", __FUNCTION__, err); 146 dprintk("%s: done, returned %d\n", __FUNCTION__, err);
@@ -146,10 +148,11 @@ out:
146 dprintk("<-- nfs_follow_mountpoint() = %d\n", err); 148 dprintk("<-- nfs_follow_mountpoint() = %d\n", err);
147 return ERR_PTR(err); 149 return ERR_PTR(err);
148out_err: 150out_err:
149 path_release(nd); 151 path_put(&nd->path);
150 goto out; 152 goto out;
151out_follow: 153out_follow:
152 while(d_mountpoint(nd->dentry) && follow_down(&nd->mnt, &nd->dentry)) 154 while (d_mountpoint(nd->path.dentry) &&
155 follow_down(&nd->path.mnt, &nd->path.dentry))
153 ; 156 ;
154 err = 0; 157 err = 0;
155 goto out; 158 goto out;
diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
index 027e1095256e..7ce07862c2fb 100644
--- a/fs/nfs/nfs4proc.c
+++ b/fs/nfs/nfs4proc.c
@@ -1384,11 +1384,11 @@ out_close:
1384struct dentry * 1384struct dentry *
1385nfs4_atomic_open(struct inode *dir, struct dentry *dentry, struct nameidata *nd) 1385nfs4_atomic_open(struct inode *dir, struct dentry *dentry, struct nameidata *nd)
1386{ 1386{
1387 struct dentry *parent;
1388 struct path path = { 1387 struct path path = {
1389 .mnt = nd->mnt, 1388 .mnt = nd->path.mnt,
1390 .dentry = dentry, 1389 .dentry = dentry,
1391 }; 1390 };
1391 struct dentry *parent;
1392 struct iattr attr; 1392 struct iattr attr;
1393 struct rpc_cred *cred; 1393 struct rpc_cred *cred;
1394 struct nfs4_state *state; 1394 struct nfs4_state *state;
@@ -1433,7 +1433,7 @@ int
1433nfs4_open_revalidate(struct inode *dir, struct dentry *dentry, int openflags, struct nameidata *nd) 1433nfs4_open_revalidate(struct inode *dir, struct dentry *dentry, int openflags, struct nameidata *nd)
1434{ 1434{
1435 struct path path = { 1435 struct path path = {
1436 .mnt = nd->mnt, 1436 .mnt = nd->path.mnt,
1437 .dentry = dentry, 1437 .dentry = dentry,
1438 }; 1438 };
1439 struct rpc_cred *cred; 1439 struct rpc_cred *cred;
@@ -1885,7 +1885,7 @@ nfs4_proc_create(struct inode *dir, struct dentry *dentry, struct iattr *sattr,
1885 int flags, struct nameidata *nd) 1885 int flags, struct nameidata *nd)
1886{ 1886{
1887 struct path path = { 1887 struct path path = {
1888 .mnt = nd->mnt, 1888 .mnt = nd->path.mnt,
1889 .dentry = dentry, 1889 .dentry = dentry,
1890 }; 1890 };
1891 struct nfs4_state *state; 1891 struct nfs4_state *state;
diff --git a/fs/nfs/nfs4state.c b/fs/nfs/nfs4state.c
index f9c7432471dc..6233eb5e98c1 100644
--- a/fs/nfs/nfs4state.c
+++ b/fs/nfs/nfs4state.c
@@ -682,8 +682,8 @@ static void nfs_increment_seqid(int status, struct nfs_seqid *seqid)
682 if (seqid->sequence->flags & NFS_SEQID_CONFIRMED) 682 if (seqid->sequence->flags & NFS_SEQID_CONFIRMED)
683 return; 683 return;
684 printk(KERN_WARNING "NFS: v4 server returned a bad" 684 printk(KERN_WARNING "NFS: v4 server returned a bad"
685 "sequence-id error on an" 685 " sequence-id error on an"
686 "unconfirmed sequence %p!\n", 686 " unconfirmed sequence %p!\n",
687 seqid->sequence); 687 seqid->sequence);
688 case -NFS4ERR_STALE_CLIENTID: 688 case -NFS4ERR_STALE_CLIENTID:
689 case -NFS4ERR_STALE_STATEID: 689 case -NFS4ERR_STALE_STATEID:
diff --git a/fs/nfs/read.c b/fs/nfs/read.c
index 8fd6dfbe1bc3..3d7d9631e125 100644
--- a/fs/nfs/read.c
+++ b/fs/nfs/read.c
@@ -79,7 +79,7 @@ void nfs_readdata_release(void *data)
79static 79static
80int nfs_return_empty_page(struct page *page) 80int nfs_return_empty_page(struct page *page)
81{ 81{
82 zero_user_page(page, 0, PAGE_CACHE_SIZE, KM_USER0); 82 zero_user(page, 0, PAGE_CACHE_SIZE);
83 SetPageUptodate(page); 83 SetPageUptodate(page);
84 unlock_page(page); 84 unlock_page(page);
85 return 0; 85 return 0;
@@ -103,10 +103,10 @@ static void nfs_readpage_truncate_uninitialised_page(struct nfs_read_data *data)
103 pglen = PAGE_CACHE_SIZE - base; 103 pglen = PAGE_CACHE_SIZE - base;
104 for (;;) { 104 for (;;) {
105 if (remainder <= pglen) { 105 if (remainder <= pglen) {
106 zero_user_page(*pages, base, remainder, KM_USER0); 106 zero_user(*pages, base, remainder);
107 break; 107 break;
108 } 108 }
109 zero_user_page(*pages, base, pglen, KM_USER0); 109 zero_user(*pages, base, pglen);
110 pages++; 110 pages++;
111 remainder -= pglen; 111 remainder -= pglen;
112 pglen = PAGE_CACHE_SIZE; 112 pglen = PAGE_CACHE_SIZE;
@@ -130,7 +130,7 @@ static int nfs_readpage_async(struct nfs_open_context *ctx, struct inode *inode,
130 return PTR_ERR(new); 130 return PTR_ERR(new);
131 } 131 }
132 if (len < PAGE_CACHE_SIZE) 132 if (len < PAGE_CACHE_SIZE)
133 zero_user_page(page, len, PAGE_CACHE_SIZE - len, KM_USER0); 133 zero_user_segment(page, len, PAGE_CACHE_SIZE);
134 134
135 nfs_list_add_request(new, &one_request); 135 nfs_list_add_request(new, &one_request);
136 if (NFS_SERVER(inode)->rsize < PAGE_CACHE_SIZE) 136 if (NFS_SERVER(inode)->rsize < PAGE_CACHE_SIZE)
@@ -532,7 +532,7 @@ readpage_async_filler(void *data, struct page *page)
532 goto out_error; 532 goto out_error;
533 533
534 if (len < PAGE_CACHE_SIZE) 534 if (len < PAGE_CACHE_SIZE)
535 zero_user_page(page, len, PAGE_CACHE_SIZE - len, KM_USER0); 535 zero_user_segment(page, len, PAGE_CACHE_SIZE);
536 nfs_pageio_add_request(desc->pgio, new); 536 nfs_pageio_add_request(desc->pgio, new);
537 return 0; 537 return 0;
538out_error: 538out_error:
diff --git a/fs/nfs/super.c b/fs/nfs/super.c
index 7f4505f6ac6f..1fb381843650 100644
--- a/fs/nfs/super.c
+++ b/fs/nfs/super.c
@@ -190,6 +190,10 @@ static match_table_t nfs_secflavor_tokens = {
190 { Opt_sec_lkeyi, "lkeyi" }, 190 { Opt_sec_lkeyi, "lkeyi" },
191 { Opt_sec_lkeyp, "lkeyp" }, 191 { Opt_sec_lkeyp, "lkeyp" },
192 192
193 { Opt_sec_spkm, "spkm3" },
194 { Opt_sec_spkmi, "spkm3i" },
195 { Opt_sec_spkmp, "spkm3p" },
196
193 { Opt_sec_err, NULL } 197 { Opt_sec_err, NULL }
194}; 198};
195 199
diff --git a/fs/nfs/write.c b/fs/nfs/write.c
index 522efff3e2c5..f55c437124a2 100644
--- a/fs/nfs/write.c
+++ b/fs/nfs/write.c
@@ -665,9 +665,7 @@ zero_page:
665 * then we need to zero any uninitalised data. */ 665 * then we need to zero any uninitalised data. */
666 if (req->wb_pgbase == 0 && req->wb_bytes != PAGE_CACHE_SIZE 666 if (req->wb_pgbase == 0 && req->wb_bytes != PAGE_CACHE_SIZE
667 && !PageUptodate(req->wb_page)) 667 && !PageUptodate(req->wb_page))
668 zero_user_page(req->wb_page, req->wb_bytes, 668 zero_user_segment(req->wb_page, req->wb_bytes, PAGE_CACHE_SIZE);
669 PAGE_CACHE_SIZE - req->wb_bytes,
670 KM_USER0);
671 return req; 669 return req;
672} 670}
673 671
@@ -699,6 +697,17 @@ int nfs_flush_incompatible(struct file *file, struct page *page)
699} 697}
700 698
701/* 699/*
700 * If the page cache is marked as unsafe or invalid, then we can't rely on
701 * the PageUptodate() flag. In this case, we will need to turn off
702 * write optimisations that depend on the page contents being correct.
703 */
704static int nfs_write_pageuptodate(struct page *page, struct inode *inode)
705{
706 return PageUptodate(page) &&
707 !(NFS_I(inode)->cache_validity & (NFS_INO_REVAL_PAGECACHE|NFS_INO_INVALID_DATA));
708}
709
710/*
702 * Update and possibly write a cached page of an NFS file. 711 * Update and possibly write a cached page of an NFS file.
703 * 712 *
704 * XXX: Keep an eye on generic_file_read to make sure it doesn't do bad 713 * XXX: Keep an eye on generic_file_read to make sure it doesn't do bad
@@ -719,10 +728,13 @@ int nfs_updatepage(struct file *file, struct page *page,
719 (long long)(page_offset(page) +offset)); 728 (long long)(page_offset(page) +offset));
720 729
721 /* If we're not using byte range locks, and we know the page 730 /* If we're not using byte range locks, and we know the page
722 * is entirely in cache, it may be more efficient to avoid 731 * is up to date, it may be more efficient to extend the write
723 * fragmenting write requests. 732 * to cover the entire page in order to avoid fragmentation
733 * inefficiencies.
724 */ 734 */
725 if (PageUptodate(page) && inode->i_flock == NULL && !(file->f_mode & O_SYNC)) { 735 if (nfs_write_pageuptodate(page, inode) &&
736 inode->i_flock == NULL &&
737 !(file->f_mode & O_SYNC)) {
726 count = max(count + offset, nfs_page_length(page)); 738 count = max(count + offset, nfs_page_length(page));
727 offset = 0; 739 offset = 0;
728 } 740 }
diff --git a/fs/nfsctl.c b/fs/nfsctl.c
index 51f1b31acbf6..aed8145d9087 100644
--- a/fs/nfsctl.c
+++ b/fs/nfsctl.c
@@ -41,9 +41,9 @@ static struct file *do_open(char *name, int flags)
41 error = may_open(&nd, MAY_WRITE, FMODE_WRITE); 41 error = may_open(&nd, MAY_WRITE, FMODE_WRITE);
42 42
43 if (!error) 43 if (!error)
44 return dentry_open(nd.dentry, nd.mnt, flags); 44 return dentry_open(nd.path.dentry, nd.path.mnt, flags);
45 45
46 path_release(&nd); 46 path_put(&nd.path);
47 return ERR_PTR(error); 47 return ERR_PTR(error);
48} 48}
49 49
diff --git a/fs/nfsd/auth.c b/fs/nfsd/auth.c
index 21928056e35e..d13403e33622 100644
--- a/fs/nfsd/auth.c
+++ b/fs/nfsd/auth.c
@@ -11,8 +11,6 @@
11#include <linux/nfsd/nfsd.h> 11#include <linux/nfsd/nfsd.h>
12#include <linux/nfsd/export.h> 12#include <linux/nfsd/export.h>
13 13
14#define CAP_NFSD_MASK (CAP_FS_MASK|CAP_TO_MASK(CAP_SYS_RESOURCE))
15
16int nfsexp_flags(struct svc_rqst *rqstp, struct svc_export *exp) 14int nfsexp_flags(struct svc_rqst *rqstp, struct svc_export *exp)
17{ 15{
18 struct exp_flavor_info *f; 16 struct exp_flavor_info *f;
@@ -69,10 +67,12 @@ int nfsd_setuser(struct svc_rqst *rqstp, struct svc_export *exp)
69 ret = set_current_groups(cred.cr_group_info); 67 ret = set_current_groups(cred.cr_group_info);
70 put_group_info(cred.cr_group_info); 68 put_group_info(cred.cr_group_info);
71 if ((cred.cr_uid)) { 69 if ((cred.cr_uid)) {
72 cap_t(current->cap_effective) &= ~CAP_NFSD_MASK; 70 current->cap_effective =
71 cap_drop_nfsd_set(current->cap_effective);
73 } else { 72 } else {
74 cap_t(current->cap_effective) |= (CAP_NFSD_MASK & 73 current->cap_effective =
75 current->cap_permitted); 74 cap_raise_nfsd_set(current->cap_effective,
75 current->cap_permitted);
76 } 76 }
77 return ret; 77 return ret;
78} 78}
diff --git a/fs/nfsd/export.c b/fs/nfsd/export.c
index 79b4bf812960..8a6f7c924c75 100644
--- a/fs/nfsd/export.c
+++ b/fs/nfsd/export.c
@@ -63,10 +63,8 @@ static void expkey_put(struct kref *ref)
63 struct svc_expkey *key = container_of(ref, struct svc_expkey, h.ref); 63 struct svc_expkey *key = container_of(ref, struct svc_expkey, h.ref);
64 64
65 if (test_bit(CACHE_VALID, &key->h.flags) && 65 if (test_bit(CACHE_VALID, &key->h.flags) &&
66 !test_bit(CACHE_NEGATIVE, &key->h.flags)) { 66 !test_bit(CACHE_NEGATIVE, &key->h.flags))
67 dput(key->ek_dentry); 67 path_put(&key->ek_path);
68 mntput(key->ek_mnt);
69 }
70 auth_domain_put(key->ek_client); 68 auth_domain_put(key->ek_client);
71 kfree(key); 69 kfree(key);
72} 70}
@@ -169,15 +167,14 @@ static int expkey_parse(struct cache_detail *cd, char *mesg, int mlen)
169 goto out; 167 goto out;
170 168
171 dprintk("Found the path %s\n", buf); 169 dprintk("Found the path %s\n", buf);
172 key.ek_mnt = nd.mnt; 170 key.ek_path = nd.path;
173 key.ek_dentry = nd.dentry; 171
174
175 ek = svc_expkey_update(&key, ek); 172 ek = svc_expkey_update(&key, ek);
176 if (ek) 173 if (ek)
177 cache_put(&ek->h, &svc_expkey_cache); 174 cache_put(&ek->h, &svc_expkey_cache);
178 else 175 else
179 err = -ENOMEM; 176 err = -ENOMEM;
180 path_release(&nd); 177 path_put(&nd.path);
181 } 178 }
182 cache_flush(); 179 cache_flush();
183 out: 180 out:
@@ -206,7 +203,7 @@ static int expkey_show(struct seq_file *m,
206 if (test_bit(CACHE_VALID, &h->flags) && 203 if (test_bit(CACHE_VALID, &h->flags) &&
207 !test_bit(CACHE_NEGATIVE, &h->flags)) { 204 !test_bit(CACHE_NEGATIVE, &h->flags)) {
208 seq_printf(m, " "); 205 seq_printf(m, " ");
209 seq_path(m, ek->ek_mnt, ek->ek_dentry, "\\ \t\n"); 206 seq_path(m, &ek->ek_path, "\\ \t\n");
210 } 207 }
211 seq_printf(m, "\n"); 208 seq_printf(m, "\n");
212 return 0; 209 return 0;
@@ -243,8 +240,8 @@ static inline void expkey_update(struct cache_head *cnew,
243 struct svc_expkey *new = container_of(cnew, struct svc_expkey, h); 240 struct svc_expkey *new = container_of(cnew, struct svc_expkey, h);
244 struct svc_expkey *item = container_of(citem, struct svc_expkey, h); 241 struct svc_expkey *item = container_of(citem, struct svc_expkey, h);
245 242
246 new->ek_mnt = mntget(item->ek_mnt); 243 new->ek_path = item->ek_path;
247 new->ek_dentry = dget(item->ek_dentry); 244 path_get(&item->ek_path);
248} 245}
249 246
250static struct cache_head *expkey_alloc(void) 247static struct cache_head *expkey_alloc(void)
@@ -332,10 +329,9 @@ static void nfsd4_fslocs_free(struct nfsd4_fs_locations *fsloc)
332static void svc_export_put(struct kref *ref) 329static void svc_export_put(struct kref *ref)
333{ 330{
334 struct svc_export *exp = container_of(ref, struct svc_export, h.ref); 331 struct svc_export *exp = container_of(ref, struct svc_export, h.ref);
335 dput(exp->ex_dentry); 332 path_put(&exp->ex_path);
336 mntput(exp->ex_mnt);
337 auth_domain_put(exp->ex_client); 333 auth_domain_put(exp->ex_client);
338 kfree(exp->ex_path); 334 kfree(exp->ex_pathname);
339 nfsd4_fslocs_free(&exp->ex_fslocs); 335 nfsd4_fslocs_free(&exp->ex_fslocs);
340 kfree(exp); 336 kfree(exp);
341} 337}
@@ -349,7 +345,7 @@ static void svc_export_request(struct cache_detail *cd,
349 char *pth; 345 char *pth;
350 346
351 qword_add(bpp, blen, exp->ex_client->name); 347 qword_add(bpp, blen, exp->ex_client->name);
352 pth = d_path(exp->ex_dentry, exp->ex_mnt, *bpp, *blen); 348 pth = d_path(&exp->ex_path, *bpp, *blen);
353 if (IS_ERR(pth)) { 349 if (IS_ERR(pth)) {
354 /* is this correct? */ 350 /* is this correct? */
355 (*bpp)[0] = '\n'; 351 (*bpp)[0] = '\n';
@@ -507,8 +503,8 @@ static int svc_export_parse(struct cache_detail *cd, char *mesg, int mlen)
507 struct svc_export exp, *expp; 503 struct svc_export exp, *expp;
508 int an_int; 504 int an_int;
509 505
510 nd.dentry = NULL; 506 nd.path.dentry = NULL;
511 exp.ex_path = NULL; 507 exp.ex_pathname = NULL;
512 508
513 /* fs locations */ 509 /* fs locations */
514 exp.ex_fslocs.locations = NULL; 510 exp.ex_fslocs.locations = NULL;
@@ -547,11 +543,11 @@ static int svc_export_parse(struct cache_detail *cd, char *mesg, int mlen)
547 543
548 exp.h.flags = 0; 544 exp.h.flags = 0;
549 exp.ex_client = dom; 545 exp.ex_client = dom;
550 exp.ex_mnt = nd.mnt; 546 exp.ex_path.mnt = nd.path.mnt;
551 exp.ex_dentry = nd.dentry; 547 exp.ex_path.dentry = nd.path.dentry;
552 exp.ex_path = kstrdup(buf, GFP_KERNEL); 548 exp.ex_pathname = kstrdup(buf, GFP_KERNEL);
553 err = -ENOMEM; 549 err = -ENOMEM;
554 if (!exp.ex_path) 550 if (!exp.ex_pathname)
555 goto out; 551 goto out;
556 552
557 /* expiry */ 553 /* expiry */
@@ -610,7 +606,7 @@ static int svc_export_parse(struct cache_detail *cd, char *mesg, int mlen)
610 goto out; 606 goto out;
611 } 607 }
612 608
613 err = check_export(nd.dentry->d_inode, exp.ex_flags, 609 err = check_export(nd.path.dentry->d_inode, exp.ex_flags,
614 exp.ex_uuid); 610 exp.ex_uuid);
615 if (err) goto out; 611 if (err) goto out;
616 } 612 }
@@ -628,9 +624,9 @@ static int svc_export_parse(struct cache_detail *cd, char *mesg, int mlen)
628 out: 624 out:
629 nfsd4_fslocs_free(&exp.ex_fslocs); 625 nfsd4_fslocs_free(&exp.ex_fslocs);
630 kfree(exp.ex_uuid); 626 kfree(exp.ex_uuid);
631 kfree(exp.ex_path); 627 kfree(exp.ex_pathname);
632 if (nd.dentry) 628 if (nd.path.dentry)
633 path_release(&nd); 629 path_put(&nd.path);
634 out_no_path: 630 out_no_path:
635 if (dom) 631 if (dom)
636 auth_domain_put(dom); 632 auth_domain_put(dom);
@@ -653,7 +649,7 @@ static int svc_export_show(struct seq_file *m,
653 return 0; 649 return 0;
654 } 650 }
655 exp = container_of(h, struct svc_export, h); 651 exp = container_of(h, struct svc_export, h);
656 seq_path(m, exp->ex_mnt, exp->ex_dentry, " \t\n\\"); 652 seq_path(m, &exp->ex_path, " \t\n\\");
657 seq_putc(m, '\t'); 653 seq_putc(m, '\t');
658 seq_escape(m, exp->ex_client->name, " \t\n\\"); 654 seq_escape(m, exp->ex_client->name, " \t\n\\");
659 seq_putc(m, '('); 655 seq_putc(m, '(');
@@ -680,8 +676,8 @@ static int svc_export_match(struct cache_head *a, struct cache_head *b)
680 struct svc_export *orig = container_of(a, struct svc_export, h); 676 struct svc_export *orig = container_of(a, struct svc_export, h);
681 struct svc_export *new = container_of(b, struct svc_export, h); 677 struct svc_export *new = container_of(b, struct svc_export, h);
682 return orig->ex_client == new->ex_client && 678 return orig->ex_client == new->ex_client &&
683 orig->ex_dentry == new->ex_dentry && 679 orig->ex_path.dentry == new->ex_path.dentry &&
684 orig->ex_mnt == new->ex_mnt; 680 orig->ex_path.mnt == new->ex_path.mnt;
685} 681}
686 682
687static void svc_export_init(struct cache_head *cnew, struct cache_head *citem) 683static void svc_export_init(struct cache_head *cnew, struct cache_head *citem)
@@ -691,9 +687,9 @@ static void svc_export_init(struct cache_head *cnew, struct cache_head *citem)
691 687
692 kref_get(&item->ex_client->ref); 688 kref_get(&item->ex_client->ref);
693 new->ex_client = item->ex_client; 689 new->ex_client = item->ex_client;
694 new->ex_dentry = dget(item->ex_dentry); 690 new->ex_path.dentry = dget(item->ex_path.dentry);
695 new->ex_mnt = mntget(item->ex_mnt); 691 new->ex_path.mnt = mntget(item->ex_path.mnt);
696 new->ex_path = NULL; 692 new->ex_pathname = NULL;
697 new->ex_fslocs.locations = NULL; 693 new->ex_fslocs.locations = NULL;
698 new->ex_fslocs.locations_count = 0; 694 new->ex_fslocs.locations_count = 0;
699 new->ex_fslocs.migrated = 0; 695 new->ex_fslocs.migrated = 0;
@@ -711,8 +707,8 @@ static void export_update(struct cache_head *cnew, struct cache_head *citem)
711 new->ex_fsid = item->ex_fsid; 707 new->ex_fsid = item->ex_fsid;
712 new->ex_uuid = item->ex_uuid; 708 new->ex_uuid = item->ex_uuid;
713 item->ex_uuid = NULL; 709 item->ex_uuid = NULL;
714 new->ex_path = item->ex_path; 710 new->ex_pathname = item->ex_pathname;
715 item->ex_path = NULL; 711 item->ex_pathname = NULL;
716 new->ex_fslocs.locations = item->ex_fslocs.locations; 712 new->ex_fslocs.locations = item->ex_fslocs.locations;
717 item->ex_fslocs.locations = NULL; 713 item->ex_fslocs.locations = NULL;
718 new->ex_fslocs.locations_count = item->ex_fslocs.locations_count; 714 new->ex_fslocs.locations_count = item->ex_fslocs.locations_count;
@@ -755,8 +751,8 @@ svc_export_lookup(struct svc_export *exp)
755 struct cache_head *ch; 751 struct cache_head *ch;
756 int hash; 752 int hash;
757 hash = hash_ptr(exp->ex_client, EXPORT_HASHBITS); 753 hash = hash_ptr(exp->ex_client, EXPORT_HASHBITS);
758 hash ^= hash_ptr(exp->ex_dentry, EXPORT_HASHBITS); 754 hash ^= hash_ptr(exp->ex_path.dentry, EXPORT_HASHBITS);
759 hash ^= hash_ptr(exp->ex_mnt, EXPORT_HASHBITS); 755 hash ^= hash_ptr(exp->ex_path.mnt, EXPORT_HASHBITS);
760 756
761 ch = sunrpc_cache_lookup(&svc_export_cache, &exp->h, 757 ch = sunrpc_cache_lookup(&svc_export_cache, &exp->h,
762 hash); 758 hash);
@@ -772,8 +768,8 @@ svc_export_update(struct svc_export *new, struct svc_export *old)
772 struct cache_head *ch; 768 struct cache_head *ch;
773 int hash; 769 int hash;
774 hash = hash_ptr(old->ex_client, EXPORT_HASHBITS); 770 hash = hash_ptr(old->ex_client, EXPORT_HASHBITS);
775 hash ^= hash_ptr(old->ex_dentry, EXPORT_HASHBITS); 771 hash ^= hash_ptr(old->ex_path.dentry, EXPORT_HASHBITS);
776 hash ^= hash_ptr(old->ex_mnt, EXPORT_HASHBITS); 772 hash ^= hash_ptr(old->ex_path.mnt, EXPORT_HASHBITS);
777 773
778 ch = sunrpc_cache_update(&svc_export_cache, &new->h, 774 ch = sunrpc_cache_update(&svc_export_cache, &new->h,
779 &old->h, 775 &old->h,
@@ -815,8 +811,7 @@ static int exp_set_key(svc_client *clp, int fsid_type, u32 *fsidv,
815 key.ek_client = clp; 811 key.ek_client = clp;
816 key.ek_fsidtype = fsid_type; 812 key.ek_fsidtype = fsid_type;
817 memcpy(key.ek_fsid, fsidv, key_len(fsid_type)); 813 memcpy(key.ek_fsid, fsidv, key_len(fsid_type));
818 key.ek_mnt = exp->ex_mnt; 814 key.ek_path = exp->ex_path;
819 key.ek_dentry = exp->ex_dentry;
820 key.h.expiry_time = NEVER; 815 key.h.expiry_time = NEVER;
821 key.h.flags = 0; 816 key.h.flags = 0;
822 817
@@ -865,13 +860,13 @@ static svc_export *exp_get_by_name(svc_client *clp, struct vfsmount *mnt,
865{ 860{
866 struct svc_export *exp, key; 861 struct svc_export *exp, key;
867 int err; 862 int err;
868 863
869 if (!clp) 864 if (!clp)
870 return ERR_PTR(-ENOENT); 865 return ERR_PTR(-ENOENT);
871 866
872 key.ex_client = clp; 867 key.ex_client = clp;
873 key.ex_mnt = mnt; 868 key.ex_path.mnt = mnt;
874 key.ex_dentry = dentry; 869 key.ex_path.dentry = dentry;
875 870
876 exp = svc_export_lookup(&key); 871 exp = svc_export_lookup(&key);
877 if (exp == NULL) 872 if (exp == NULL)
@@ -968,7 +963,7 @@ static int exp_fsid_hash(svc_client *clp, struct svc_export *exp)
968static int exp_hash(struct auth_domain *clp, struct svc_export *exp) 963static int exp_hash(struct auth_domain *clp, struct svc_export *exp)
969{ 964{
970 u32 fsid[2]; 965 u32 fsid[2];
971 struct inode *inode = exp->ex_dentry->d_inode; 966 struct inode *inode = exp->ex_path.dentry->d_inode;
972 dev_t dev = inode->i_sb->s_dev; 967 dev_t dev = inode->i_sb->s_dev;
973 968
974 if (old_valid_dev(dev)) { 969 if (old_valid_dev(dev)) {
@@ -982,7 +977,7 @@ static int exp_hash(struct auth_domain *clp, struct svc_export *exp)
982static void exp_unhash(struct svc_export *exp) 977static void exp_unhash(struct svc_export *exp)
983{ 978{
984 struct svc_expkey *ek; 979 struct svc_expkey *ek;
985 struct inode *inode = exp->ex_dentry->d_inode; 980 struct inode *inode = exp->ex_path.dentry->d_inode;
986 981
987 ek = exp_get_key(exp->ex_client, inode->i_sb->s_dev, inode->i_ino); 982 ek = exp_get_key(exp->ex_client, inode->i_sb->s_dev, inode->i_ino);
988 if (!IS_ERR(ek)) { 983 if (!IS_ERR(ek)) {
@@ -1030,15 +1025,16 @@ exp_export(struct nfsctl_export *nxp)
1030 goto out_unlock; 1025 goto out_unlock;
1031 err = -EINVAL; 1026 err = -EINVAL;
1032 1027
1033 exp = exp_get_by_name(clp, nd.mnt, nd.dentry, NULL); 1028 exp = exp_get_by_name(clp, nd.path.mnt, nd.path.dentry, NULL);
1034 1029
1035 memset(&new, 0, sizeof(new)); 1030 memset(&new, 0, sizeof(new));
1036 1031
1037 /* must make sure there won't be an ex_fsid clash */ 1032 /* must make sure there won't be an ex_fsid clash */
1038 if ((nxp->ex_flags & NFSEXP_FSID) && 1033 if ((nxp->ex_flags & NFSEXP_FSID) &&
1039 (!IS_ERR(fsid_key = exp_get_fsid_key(clp, nxp->ex_dev))) && 1034 (!IS_ERR(fsid_key = exp_get_fsid_key(clp, nxp->ex_dev))) &&
1040 fsid_key->ek_mnt && 1035 fsid_key->ek_path.mnt &&
1041 (fsid_key->ek_mnt != nd.mnt || fsid_key->ek_dentry != nd.dentry) ) 1036 (fsid_key->ek_path.mnt != nd.path.mnt ||
1037 fsid_key->ek_path.dentry != nd.path.dentry))
1042 goto finish; 1038 goto finish;
1043 1039
1044 if (!IS_ERR(exp)) { 1040 if (!IS_ERR(exp)) {
@@ -1054,7 +1050,7 @@ exp_export(struct nfsctl_export *nxp)
1054 goto finish; 1050 goto finish;
1055 } 1051 }
1056 1052
1057 err = check_export(nd.dentry->d_inode, nxp->ex_flags, NULL); 1053 err = check_export(nd.path.dentry->d_inode, nxp->ex_flags, NULL);
1058 if (err) goto finish; 1054 if (err) goto finish;
1059 1055
1060 err = -ENOMEM; 1056 err = -ENOMEM;
@@ -1063,12 +1059,11 @@ exp_export(struct nfsctl_export *nxp)
1063 1059
1064 new.h.expiry_time = NEVER; 1060 new.h.expiry_time = NEVER;
1065 new.h.flags = 0; 1061 new.h.flags = 0;
1066 new.ex_path = kstrdup(nxp->ex_path, GFP_KERNEL); 1062 new.ex_pathname = kstrdup(nxp->ex_path, GFP_KERNEL);
1067 if (!new.ex_path) 1063 if (!new.ex_pathname)
1068 goto finish; 1064 goto finish;
1069 new.ex_client = clp; 1065 new.ex_client = clp;
1070 new.ex_mnt = nd.mnt; 1066 new.ex_path = nd.path;
1071 new.ex_dentry = nd.dentry;
1072 new.ex_flags = nxp->ex_flags; 1067 new.ex_flags = nxp->ex_flags;
1073 new.ex_anon_uid = nxp->ex_anon_uid; 1068 new.ex_anon_uid = nxp->ex_anon_uid;
1074 new.ex_anon_gid = nxp->ex_anon_gid; 1069 new.ex_anon_gid = nxp->ex_anon_gid;
@@ -1089,15 +1084,14 @@ exp_export(struct nfsctl_export *nxp)
1089 } else 1084 } else
1090 err = 0; 1085 err = 0;
1091finish: 1086finish:
1092 if (new.ex_path) 1087 kfree(new.ex_pathname);
1093 kfree(new.ex_path);
1094 if (exp) 1088 if (exp)
1095 exp_put(exp); 1089 exp_put(exp);
1096 if (fsid_key && !IS_ERR(fsid_key)) 1090 if (fsid_key && !IS_ERR(fsid_key))
1097 cache_put(&fsid_key->h, &svc_expkey_cache); 1091 cache_put(&fsid_key->h, &svc_expkey_cache);
1098 if (clp) 1092 if (clp)
1099 auth_domain_put(clp); 1093 auth_domain_put(clp);
1100 path_release(&nd); 1094 path_put(&nd.path);
1101out_unlock: 1095out_unlock:
1102 exp_writeunlock(); 1096 exp_writeunlock();
1103out: 1097out:
@@ -1148,8 +1142,8 @@ exp_unexport(struct nfsctl_export *nxp)
1148 goto out_domain; 1142 goto out_domain;
1149 1143
1150 err = -EINVAL; 1144 err = -EINVAL;
1151 exp = exp_get_by_name(dom, nd.mnt, nd.dentry, NULL); 1145 exp = exp_get_by_name(dom, nd.path.mnt, nd.path.dentry, NULL);
1152 path_release(&nd); 1146 path_put(&nd.path);
1153 if (IS_ERR(exp)) 1147 if (IS_ERR(exp))
1154 goto out_domain; 1148 goto out_domain;
1155 1149
@@ -1185,12 +1179,12 @@ exp_rootfh(svc_client *clp, char *path, struct knfsd_fh *f, int maxsize)
1185 printk("nfsd: exp_rootfh path not found %s", path); 1179 printk("nfsd: exp_rootfh path not found %s", path);
1186 return err; 1180 return err;
1187 } 1181 }
1188 inode = nd.dentry->d_inode; 1182 inode = nd.path.dentry->d_inode;
1189 1183
1190 dprintk("nfsd: exp_rootfh(%s [%p] %s:%s/%ld)\n", 1184 dprintk("nfsd: exp_rootfh(%s [%p] %s:%s/%ld)\n",
1191 path, nd.dentry, clp->name, 1185 path, nd.path.dentry, clp->name,
1192 inode->i_sb->s_id, inode->i_ino); 1186 inode->i_sb->s_id, inode->i_ino);
1193 exp = exp_parent(clp, nd.mnt, nd.dentry, NULL); 1187 exp = exp_parent(clp, nd.path.mnt, nd.path.dentry, NULL);
1194 if (IS_ERR(exp)) { 1188 if (IS_ERR(exp)) {
1195 err = PTR_ERR(exp); 1189 err = PTR_ERR(exp);
1196 goto out; 1190 goto out;
@@ -1200,7 +1194,7 @@ exp_rootfh(svc_client *clp, char *path, struct knfsd_fh *f, int maxsize)
1200 * fh must be initialized before calling fh_compose 1194 * fh must be initialized before calling fh_compose
1201 */ 1195 */
1202 fh_init(&fh, maxsize); 1196 fh_init(&fh, maxsize);
1203 if (fh_compose(&fh, exp, nd.dentry, NULL)) 1197 if (fh_compose(&fh, exp, nd.path.dentry, NULL))
1204 err = -EINVAL; 1198 err = -EINVAL;
1205 else 1199 else
1206 err = 0; 1200 err = 0;
@@ -1208,7 +1202,7 @@ exp_rootfh(svc_client *clp, char *path, struct knfsd_fh *f, int maxsize)
1208 fh_put(&fh); 1202 fh_put(&fh);
1209 exp_put(exp); 1203 exp_put(exp);
1210out: 1204out:
1211 path_release(&nd); 1205 path_put(&nd.path);
1212 return err; 1206 return err;
1213} 1207}
1214 1208
@@ -1218,13 +1212,13 @@ static struct svc_export *exp_find(struct auth_domain *clp, int fsid_type,
1218 struct svc_export *exp; 1212 struct svc_export *exp;
1219 struct svc_expkey *ek = exp_find_key(clp, fsid_type, fsidv, reqp); 1213 struct svc_expkey *ek = exp_find_key(clp, fsid_type, fsidv, reqp);
1220 if (IS_ERR(ek)) 1214 if (IS_ERR(ek))
1221 return ERR_PTR(PTR_ERR(ek)); 1215 return ERR_CAST(ek);
1222 1216
1223 exp = exp_get_by_name(clp, ek->ek_mnt, ek->ek_dentry, reqp); 1217 exp = exp_get_by_name(clp, ek->ek_path.mnt, ek->ek_path.dentry, reqp);
1224 cache_put(&ek->h, &svc_expkey_cache); 1218 cache_put(&ek->h, &svc_expkey_cache);
1225 1219
1226 if (IS_ERR(exp)) 1220 if (IS_ERR(exp))
1227 return ERR_PTR(PTR_ERR(exp)); 1221 return ERR_CAST(exp);
1228 return exp; 1222 return exp;
1229} 1223}
1230 1224
@@ -1359,7 +1353,7 @@ exp_pseudoroot(struct svc_rqst *rqstp, struct svc_fh *fhp)
1359 exp = rqst_exp_find(rqstp, FSID_NUM, fsidv); 1353 exp = rqst_exp_find(rqstp, FSID_NUM, fsidv);
1360 if (IS_ERR(exp)) 1354 if (IS_ERR(exp))
1361 return nfserrno(PTR_ERR(exp)); 1355 return nfserrno(PTR_ERR(exp));
1362 rv = fh_compose(fhp, exp, exp->ex_dentry, NULL); 1356 rv = fh_compose(fhp, exp, exp->ex_path.dentry, NULL);
1363 if (rv) 1357 if (rv)
1364 goto out; 1358 goto out;
1365 rv = check_nfsd_access(exp, rqstp); 1359 rv = check_nfsd_access(exp, rqstp);
diff --git a/fs/nfsd/nfs3proc.c b/fs/nfsd/nfs3proc.c
index eac82830bfd7..c721a1e6e9dd 100644
--- a/fs/nfsd/nfs3proc.c
+++ b/fs/nfsd/nfs3proc.c
@@ -67,7 +67,7 @@ nfsd3_proc_getattr(struct svc_rqst *rqstp, struct nfsd_fhandle *argp,
67 if (nfserr) 67 if (nfserr)
68 RETURN_STATUS(nfserr); 68 RETURN_STATUS(nfserr);
69 69
70 err = vfs_getattr(resp->fh.fh_export->ex_mnt, 70 err = vfs_getattr(resp->fh.fh_export->ex_path.mnt,
71 resp->fh.fh_dentry, &resp->stat); 71 resp->fh.fh_dentry, &resp->stat);
72 nfserr = nfserrno(err); 72 nfserr = nfserrno(err);
73 73
diff --git a/fs/nfsd/nfs3xdr.c b/fs/nfsd/nfs3xdr.c
index d7647f70e02b..17d0dd997204 100644
--- a/fs/nfsd/nfs3xdr.c
+++ b/fs/nfsd/nfs3xdr.c
@@ -218,7 +218,7 @@ encode_post_op_attr(struct svc_rqst *rqstp, __be32 *p, struct svc_fh *fhp)
218 int err; 218 int err;
219 struct kstat stat; 219 struct kstat stat;
220 220
221 err = vfs_getattr(fhp->fh_export->ex_mnt, dentry, &stat); 221 err = vfs_getattr(fhp->fh_export->ex_path.mnt, dentry, &stat);
222 if (!err) { 222 if (!err) {
223 *p++ = xdr_one; /* attributes follow */ 223 *p++ = xdr_one; /* attributes follow */
224 lease_get_mtime(dentry->d_inode, &stat.mtime); 224 lease_get_mtime(dentry->d_inode, &stat.mtime);
@@ -270,7 +270,7 @@ void fill_post_wcc(struct svc_fh *fhp)
270 if (fhp->fh_post_saved) 270 if (fhp->fh_post_saved)
271 printk("nfsd: inode locked twice during operation.\n"); 271 printk("nfsd: inode locked twice during operation.\n");
272 272
273 err = vfs_getattr(fhp->fh_export->ex_mnt, fhp->fh_dentry, 273 err = vfs_getattr(fhp->fh_export->ex_path.mnt, fhp->fh_dentry,
274 &fhp->fh_post_attr); 274 &fhp->fh_post_attr);
275 if (err) 275 if (err)
276 fhp->fh_post_saved = 0; 276 fhp->fh_post_saved = 0;
diff --git a/fs/nfsd/nfs4recover.c b/fs/nfsd/nfs4recover.c
index 1602cd00dd45..1ff90625860f 100644
--- a/fs/nfsd/nfs4recover.c
+++ b/fs/nfsd/nfs4recover.c
@@ -120,9 +120,9 @@ out_no_tfm:
120static void 120static void
121nfsd4_sync_rec_dir(void) 121nfsd4_sync_rec_dir(void)
122{ 122{
123 mutex_lock(&rec_dir.dentry->d_inode->i_mutex); 123 mutex_lock(&rec_dir.path.dentry->d_inode->i_mutex);
124 nfsd_sync_dir(rec_dir.dentry); 124 nfsd_sync_dir(rec_dir.path.dentry);
125 mutex_unlock(&rec_dir.dentry->d_inode->i_mutex); 125 mutex_unlock(&rec_dir.path.dentry->d_inode->i_mutex);
126} 126}
127 127
128int 128int
@@ -142,9 +142,9 @@ nfsd4_create_clid_dir(struct nfs4_client *clp)
142 nfs4_save_user(&uid, &gid); 142 nfs4_save_user(&uid, &gid);
143 143
144 /* lock the parent */ 144 /* lock the parent */
145 mutex_lock(&rec_dir.dentry->d_inode->i_mutex); 145 mutex_lock(&rec_dir.path.dentry->d_inode->i_mutex);
146 146
147 dentry = lookup_one_len(dname, rec_dir.dentry, HEXDIR_LEN-1); 147 dentry = lookup_one_len(dname, rec_dir.path.dentry, HEXDIR_LEN-1);
148 if (IS_ERR(dentry)) { 148 if (IS_ERR(dentry)) {
149 status = PTR_ERR(dentry); 149 status = PTR_ERR(dentry);
150 goto out_unlock; 150 goto out_unlock;
@@ -154,11 +154,11 @@ nfsd4_create_clid_dir(struct nfs4_client *clp)
154 dprintk("NFSD: nfsd4_create_clid_dir: DIRECTORY EXISTS\n"); 154 dprintk("NFSD: nfsd4_create_clid_dir: DIRECTORY EXISTS\n");
155 goto out_put; 155 goto out_put;
156 } 156 }
157 status = vfs_mkdir(rec_dir.dentry->d_inode, dentry, S_IRWXU); 157 status = vfs_mkdir(rec_dir.path.dentry->d_inode, dentry, S_IRWXU);
158out_put: 158out_put:
159 dput(dentry); 159 dput(dentry);
160out_unlock: 160out_unlock:
161 mutex_unlock(&rec_dir.dentry->d_inode->i_mutex); 161 mutex_unlock(&rec_dir.path.dentry->d_inode->i_mutex);
162 if (status == 0) { 162 if (status == 0) {
163 clp->cl_firststate = 1; 163 clp->cl_firststate = 1;
164 nfsd4_sync_rec_dir(); 164 nfsd4_sync_rec_dir();
@@ -221,7 +221,7 @@ nfsd4_list_rec_dir(struct dentry *dir, recdir_func *f)
221 221
222 nfs4_save_user(&uid, &gid); 222 nfs4_save_user(&uid, &gid);
223 223
224 filp = dentry_open(dget(dir), mntget(rec_dir.mnt), O_RDONLY); 224 filp = dentry_open(dget(dir), mntget(rec_dir.path.mnt), O_RDONLY);
225 status = PTR_ERR(filp); 225 status = PTR_ERR(filp);
226 if (IS_ERR(filp)) 226 if (IS_ERR(filp))
227 goto out; 227 goto out;
@@ -286,9 +286,9 @@ nfsd4_unlink_clid_dir(char *name, int namlen)
286 286
287 dprintk("NFSD: nfsd4_unlink_clid_dir. name %.*s\n", namlen, name); 287 dprintk("NFSD: nfsd4_unlink_clid_dir. name %.*s\n", namlen, name);
288 288
289 mutex_lock(&rec_dir.dentry->d_inode->i_mutex); 289 mutex_lock(&rec_dir.path.dentry->d_inode->i_mutex);
290 dentry = lookup_one_len(name, rec_dir.dentry, namlen); 290 dentry = lookup_one_len(name, rec_dir.path.dentry, namlen);
291 mutex_unlock(&rec_dir.dentry->d_inode->i_mutex); 291 mutex_unlock(&rec_dir.path.dentry->d_inode->i_mutex);
292 if (IS_ERR(dentry)) { 292 if (IS_ERR(dentry)) {
293 status = PTR_ERR(dentry); 293 status = PTR_ERR(dentry);
294 return status; 294 return status;
@@ -297,7 +297,7 @@ nfsd4_unlink_clid_dir(char *name, int namlen)
297 if (!dentry->d_inode) 297 if (!dentry->d_inode)
298 goto out; 298 goto out;
299 299
300 status = nfsd4_clear_clid_dir(rec_dir.dentry, dentry); 300 status = nfsd4_clear_clid_dir(rec_dir.path.dentry, dentry);
301out: 301out:
302 dput(dentry); 302 dput(dentry);
303 return status; 303 return status;
@@ -347,12 +347,12 @@ nfsd4_recdir_purge_old(void) {
347 347
348 if (!rec_dir_init) 348 if (!rec_dir_init)
349 return; 349 return;
350 status = nfsd4_list_rec_dir(rec_dir.dentry, purge_old); 350 status = nfsd4_list_rec_dir(rec_dir.path.dentry, purge_old);
351 if (status == 0) 351 if (status == 0)
352 nfsd4_sync_rec_dir(); 352 nfsd4_sync_rec_dir();
353 if (status) 353 if (status)
354 printk("nfsd4: failed to purge old clients from recovery" 354 printk("nfsd4: failed to purge old clients from recovery"
355 " directory %s\n", rec_dir.dentry->d_name.name); 355 " directory %s\n", rec_dir.path.dentry->d_name.name);
356 return; 356 return;
357} 357}
358 358
@@ -373,10 +373,10 @@ int
373nfsd4_recdir_load(void) { 373nfsd4_recdir_load(void) {
374 int status; 374 int status;
375 375
376 status = nfsd4_list_rec_dir(rec_dir.dentry, load_recdir); 376 status = nfsd4_list_rec_dir(rec_dir.path.dentry, load_recdir);
377 if (status) 377 if (status)
378 printk("nfsd4: failed loading clients from recovery" 378 printk("nfsd4: failed loading clients from recovery"
379 " directory %s\n", rec_dir.dentry->d_name.name); 379 " directory %s\n", rec_dir.path.dentry->d_name.name);
380 return status; 380 return status;
381} 381}
382 382
@@ -415,5 +415,5 @@ nfsd4_shutdown_recdir(void)
415 if (!rec_dir_init) 415 if (!rec_dir_init)
416 return; 416 return;
417 rec_dir_init = 0; 417 rec_dir_init = 0;
418 path_release(&rec_dir); 418 path_put(&rec_dir.path);
419} 419}
diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c
index f6744bc03dae..bcb97d8e8b8b 100644
--- a/fs/nfsd/nfs4state.c
+++ b/fs/nfsd/nfs4state.c
@@ -3261,11 +3261,11 @@ nfs4_reset_recoverydir(char *recdir)
3261 if (status) 3261 if (status)
3262 return status; 3262 return status;
3263 status = -ENOTDIR; 3263 status = -ENOTDIR;
3264 if (S_ISDIR(nd.dentry->d_inode->i_mode)) { 3264 if (S_ISDIR(nd.path.dentry->d_inode->i_mode)) {
3265 nfs4_set_recdir(recdir); 3265 nfs4_set_recdir(recdir);
3266 status = 0; 3266 status = 0;
3267 } 3267 }
3268 path_release(&nd); 3268 path_put(&nd.path);
3269 return status; 3269 return status;
3270} 3270}
3271 3271
diff --git a/fs/nfsd/nfs4xdr.c b/fs/nfsd/nfs4xdr.c
index b0592e7c378d..0e6a179eccaf 100644
--- a/fs/nfsd/nfs4xdr.c
+++ b/fs/nfsd/nfs4xdr.c
@@ -1330,9 +1330,9 @@ static char *nfsd4_path(struct svc_rqst *rqstp, struct svc_export *exp, __be32 *
1330 *stat = exp_pseudoroot(rqstp, &tmp_fh); 1330 *stat = exp_pseudoroot(rqstp, &tmp_fh);
1331 if (*stat) 1331 if (*stat)
1332 return NULL; 1332 return NULL;
1333 rootpath = tmp_fh.fh_export->ex_path; 1333 rootpath = tmp_fh.fh_export->ex_pathname;
1334 1334
1335 path = exp->ex_path; 1335 path = exp->ex_pathname;
1336 1336
1337 if (strncmp(path, rootpath, strlen(rootpath))) { 1337 if (strncmp(path, rootpath, strlen(rootpath))) {
1338 dprintk("nfsd: fs_locations failed;" 1338 dprintk("nfsd: fs_locations failed;"
@@ -1481,7 +1481,7 @@ nfsd4_encode_fattr(struct svc_fh *fhp, struct svc_export *exp,
1481 goto out; 1481 goto out;
1482 } 1482 }
1483 1483
1484 err = vfs_getattr(exp->ex_mnt, dentry, &stat); 1484 err = vfs_getattr(exp->ex_path.mnt, dentry, &stat);
1485 if (err) 1485 if (err)
1486 goto out_nfserr; 1486 goto out_nfserr;
1487 if ((bmval0 & (FATTR4_WORD0_FILES_FREE | FATTR4_WORD0_FILES_TOTAL | 1487 if ((bmval0 & (FATTR4_WORD0_FILES_FREE | FATTR4_WORD0_FILES_TOTAL |
@@ -1838,9 +1838,9 @@ out_acl:
1838 * and this is the root of a cross-mounted filesystem. 1838 * and this is the root of a cross-mounted filesystem.
1839 */ 1839 */
1840 if (ignore_crossmnt == 0 && 1840 if (ignore_crossmnt == 0 &&
1841 exp->ex_mnt->mnt_root->d_inode == dentry->d_inode) { 1841 exp->ex_path.mnt->mnt_root->d_inode == dentry->d_inode) {
1842 err = vfs_getattr(exp->ex_mnt->mnt_parent, 1842 err = vfs_getattr(exp->ex_path.mnt->mnt_parent,
1843 exp->ex_mnt->mnt_mountpoint, &stat); 1843 exp->ex_path.mnt->mnt_mountpoint, &stat);
1844 if (err) 1844 if (err)
1845 goto out_nfserr; 1845 goto out_nfserr;
1846 } 1846 }
diff --git a/fs/nfsd/nfsfh.c b/fs/nfsd/nfsfh.c
index 8fbd2dc08a92..0130b345234d 100644
--- a/fs/nfsd/nfsfh.c
+++ b/fs/nfsd/nfsfh.c
@@ -47,7 +47,7 @@ static int nfsd_acceptable(void *expv, struct dentry *dentry)
47 return 1; 47 return 1;
48 48
49 tdentry = dget(dentry); 49 tdentry = dget(dentry);
50 while (tdentry != exp->ex_dentry && ! IS_ROOT(tdentry)) { 50 while (tdentry != exp->ex_path.dentry && !IS_ROOT(tdentry)) {
51 /* make sure parents give x permission to user */ 51 /* make sure parents give x permission to user */
52 int err; 52 int err;
53 parent = dget_parent(tdentry); 53 parent = dget_parent(tdentry);
@@ -59,9 +59,9 @@ static int nfsd_acceptable(void *expv, struct dentry *dentry)
59 dput(tdentry); 59 dput(tdentry);
60 tdentry = parent; 60 tdentry = parent;
61 } 61 }
62 if (tdentry != exp->ex_dentry) 62 if (tdentry != exp->ex_path.dentry)
63 dprintk("nfsd_acceptable failed at %p %s\n", tdentry, tdentry->d_name.name); 63 dprintk("nfsd_acceptable failed at %p %s\n", tdentry, tdentry->d_name.name);
64 rv = (tdentry == exp->ex_dentry); 64 rv = (tdentry == exp->ex_path.dentry);
65 dput(tdentry); 65 dput(tdentry);
66 return rv; 66 return rv;
67} 67}
@@ -209,9 +209,9 @@ fh_verify(struct svc_rqst *rqstp, struct svc_fh *fhp, int type, int access)
209 fileid_type = fh->fh_fileid_type; 209 fileid_type = fh->fh_fileid_type;
210 210
211 if (fileid_type == FILEID_ROOT) 211 if (fileid_type == FILEID_ROOT)
212 dentry = dget(exp->ex_dentry); 212 dentry = dget(exp->ex_path.dentry);
213 else { 213 else {
214 dentry = exportfs_decode_fh(exp->ex_mnt, fid, 214 dentry = exportfs_decode_fh(exp->ex_path.mnt, fid,
215 data_left, fileid_type, 215 data_left, fileid_type,
216 nfsd_acceptable, exp); 216 nfsd_acceptable, exp);
217 } 217 }
@@ -299,7 +299,7 @@ out:
299static void _fh_update(struct svc_fh *fhp, struct svc_export *exp, 299static void _fh_update(struct svc_fh *fhp, struct svc_export *exp,
300 struct dentry *dentry) 300 struct dentry *dentry)
301{ 301{
302 if (dentry != exp->ex_dentry) { 302 if (dentry != exp->ex_path.dentry) {
303 struct fid *fid = (struct fid *) 303 struct fid *fid = (struct fid *)
304 (fhp->fh_handle.fh_auth + fhp->fh_handle.fh_size/4 - 1); 304 (fhp->fh_handle.fh_auth + fhp->fh_handle.fh_size/4 - 1);
305 int maxsize = (fhp->fh_maxsize - fhp->fh_handle.fh_size)/4; 305 int maxsize = (fhp->fh_maxsize - fhp->fh_handle.fh_size)/4;
@@ -344,12 +344,12 @@ fh_compose(struct svc_fh *fhp, struct svc_export *exp, struct dentry *dentry,
344 struct inode * inode = dentry->d_inode; 344 struct inode * inode = dentry->d_inode;
345 struct dentry *parent = dentry->d_parent; 345 struct dentry *parent = dentry->d_parent;
346 __u32 *datap; 346 __u32 *datap;
347 dev_t ex_dev = exp->ex_dentry->d_inode->i_sb->s_dev; 347 dev_t ex_dev = exp->ex_path.dentry->d_inode->i_sb->s_dev;
348 int root_export = (exp->ex_dentry == exp->ex_dentry->d_sb->s_root); 348 int root_export = (exp->ex_path.dentry == exp->ex_path.dentry->d_sb->s_root);
349 349
350 dprintk("nfsd: fh_compose(exp %02x:%02x/%ld %s/%s, ino=%ld)\n", 350 dprintk("nfsd: fh_compose(exp %02x:%02x/%ld %s/%s, ino=%ld)\n",
351 MAJOR(ex_dev), MINOR(ex_dev), 351 MAJOR(ex_dev), MINOR(ex_dev),
352 (long) exp->ex_dentry->d_inode->i_ino, 352 (long) exp->ex_path.dentry->d_inode->i_ino,
353 parent->d_name.name, dentry->d_name.name, 353 parent->d_name.name, dentry->d_name.name,
354 (inode ? inode->i_ino : 0)); 354 (inode ? inode->i_ino : 0));
355 355
@@ -391,7 +391,7 @@ fh_compose(struct svc_fh *fhp, struct svc_export *exp, struct dentry *dentry,
391 /* FALL THROUGH */ 391 /* FALL THROUGH */
392 case FSID_MAJOR_MINOR: 392 case FSID_MAJOR_MINOR:
393 case FSID_ENCODE_DEV: 393 case FSID_ENCODE_DEV:
394 if (!(exp->ex_dentry->d_inode->i_sb->s_type->fs_flags 394 if (!(exp->ex_path.dentry->d_inode->i_sb->s_type->fs_flags
395 & FS_REQUIRES_DEV)) 395 & FS_REQUIRES_DEV))
396 goto retry; 396 goto retry;
397 break; 397 break;
@@ -454,7 +454,7 @@ fh_compose(struct svc_fh *fhp, struct svc_export *exp, struct dentry *dentry,
454 fhp->fh_handle.ofh_dev = old_encode_dev(ex_dev); 454 fhp->fh_handle.ofh_dev = old_encode_dev(ex_dev);
455 fhp->fh_handle.ofh_xdev = fhp->fh_handle.ofh_dev; 455 fhp->fh_handle.ofh_xdev = fhp->fh_handle.ofh_dev;
456 fhp->fh_handle.ofh_xino = 456 fhp->fh_handle.ofh_xino =
457 ino_t_to_u32(exp->ex_dentry->d_inode->i_ino); 457 ino_t_to_u32(exp->ex_path.dentry->d_inode->i_ino);
458 fhp->fh_handle.ofh_dirino = ino_t_to_u32(parent_ino(dentry)); 458 fhp->fh_handle.ofh_dirino = ino_t_to_u32(parent_ino(dentry));
459 if (inode) 459 if (inode)
460 _fh_update_old(dentry, exp, &fhp->fh_handle); 460 _fh_update_old(dentry, exp, &fhp->fh_handle);
@@ -465,7 +465,7 @@ fh_compose(struct svc_fh *fhp, struct svc_export *exp, struct dentry *dentry,
465 datap = fhp->fh_handle.fh_auth+0; 465 datap = fhp->fh_handle.fh_auth+0;
466 fhp->fh_handle.fh_fsid_type = fsid_type; 466 fhp->fh_handle.fh_fsid_type = fsid_type;
467 mk_fsid(fsid_type, datap, ex_dev, 467 mk_fsid(fsid_type, datap, ex_dev,
468 exp->ex_dentry->d_inode->i_ino, 468 exp->ex_path.dentry->d_inode->i_ino,
469 exp->ex_fsid, exp->ex_uuid); 469 exp->ex_fsid, exp->ex_uuid);
470 470
471 len = key_len(fsid_type); 471 len = key_len(fsid_type);
@@ -571,7 +571,7 @@ enum fsid_source fsid_source(struct svc_fh *fhp)
571 case FSID_DEV: 571 case FSID_DEV:
572 case FSID_ENCODE_DEV: 572 case FSID_ENCODE_DEV:
573 case FSID_MAJOR_MINOR: 573 case FSID_MAJOR_MINOR:
574 if (fhp->fh_export->ex_dentry->d_inode->i_sb->s_type->fs_flags 574 if (fhp->fh_export->ex_path.dentry->d_inode->i_sb->s_type->fs_flags
575 & FS_REQUIRES_DEV) 575 & FS_REQUIRES_DEV)
576 return FSIDSOURCE_DEV; 576 return FSIDSOURCE_DEV;
577 break; 577 break;
diff --git a/fs/nfsd/nfsproc.c b/fs/nfsd/nfsproc.c
index 977a71f64e19..6cfc96a12483 100644
--- a/fs/nfsd/nfsproc.c
+++ b/fs/nfsd/nfsproc.c
@@ -41,7 +41,7 @@ static __be32
41nfsd_return_attrs(__be32 err, struct nfsd_attrstat *resp) 41nfsd_return_attrs(__be32 err, struct nfsd_attrstat *resp)
42{ 42{
43 if (err) return err; 43 if (err) return err;
44 return nfserrno(vfs_getattr(resp->fh.fh_export->ex_mnt, 44 return nfserrno(vfs_getattr(resp->fh.fh_export->ex_path.mnt,
45 resp->fh.fh_dentry, 45 resp->fh.fh_dentry,
46 &resp->stat)); 46 &resp->stat));
47} 47}
@@ -49,7 +49,7 @@ static __be32
49nfsd_return_dirop(__be32 err, struct nfsd_diropres *resp) 49nfsd_return_dirop(__be32 err, struct nfsd_diropres *resp)
50{ 50{
51 if (err) return err; 51 if (err) return err;
52 return nfserrno(vfs_getattr(resp->fh.fh_export->ex_mnt, 52 return nfserrno(vfs_getattr(resp->fh.fh_export->ex_path.mnt,
53 resp->fh.fh_dentry, 53 resp->fh.fh_dentry,
54 &resp->stat)); 54 &resp->stat));
55} 55}
@@ -164,7 +164,7 @@ nfsd_proc_read(struct svc_rqst *rqstp, struct nfsd_readargs *argp,
164 &resp->count); 164 &resp->count);
165 165
166 if (nfserr) return nfserr; 166 if (nfserr) return nfserr;
167 return nfserrno(vfs_getattr(resp->fh.fh_export->ex_mnt, 167 return nfserrno(vfs_getattr(resp->fh.fh_export->ex_path.mnt,
168 resp->fh.fh_dentry, 168 resp->fh.fh_dentry,
169 &resp->stat)); 169 &resp->stat));
170} 170}
diff --git a/fs/nfsd/nfsxdr.c b/fs/nfsd/nfsxdr.c
index 61ad61743d94..afd08e2c90a5 100644
--- a/fs/nfsd/nfsxdr.c
+++ b/fs/nfsd/nfsxdr.c
@@ -207,7 +207,7 @@ encode_fattr(struct svc_rqst *rqstp, __be32 *p, struct svc_fh *fhp,
207__be32 *nfs2svc_encode_fattr(struct svc_rqst *rqstp, __be32 *p, struct svc_fh *fhp) 207__be32 *nfs2svc_encode_fattr(struct svc_rqst *rqstp, __be32 *p, struct svc_fh *fhp)
208{ 208{
209 struct kstat stat; 209 struct kstat stat;
210 vfs_getattr(fhp->fh_export->ex_mnt, fhp->fh_dentry, &stat); 210 vfs_getattr(fhp->fh_export->ex_path.mnt, fhp->fh_dentry, &stat);
211 return encode_fattr(rqstp, p, fhp, &stat); 211 return encode_fattr(rqstp, p, fhp, &stat);
212} 212}
213 213
diff --git a/fs/nfsd/vfs.c b/fs/nfsd/vfs.c
index cc75e4fcd02b..46f59d5365a0 100644
--- a/fs/nfsd/vfs.c
+++ b/fs/nfsd/vfs.c
@@ -101,7 +101,7 @@ nfsd_cross_mnt(struct svc_rqst *rqstp, struct dentry **dpp,
101{ 101{
102 struct svc_export *exp = *expp, *exp2 = NULL; 102 struct svc_export *exp = *expp, *exp2 = NULL;
103 struct dentry *dentry = *dpp; 103 struct dentry *dentry = *dpp;
104 struct vfsmount *mnt = mntget(exp->ex_mnt); 104 struct vfsmount *mnt = mntget(exp->ex_path.mnt);
105 struct dentry *mounts = dget(dentry); 105 struct dentry *mounts = dget(dentry);
106 int err = 0; 106 int err = 0;
107 107
@@ -156,15 +156,15 @@ nfsd_lookup_dentry(struct svc_rqst *rqstp, struct svc_fh *fhp,
156 if (isdotent(name, len)) { 156 if (isdotent(name, len)) {
157 if (len==1) 157 if (len==1)
158 dentry = dget(dparent); 158 dentry = dget(dparent);
159 else if (dparent != exp->ex_dentry) { 159 else if (dparent != exp->ex_path.dentry)
160 dentry = dget_parent(dparent); 160 dentry = dget_parent(dparent);
161 } else if (!EX_NOHIDE(exp)) 161 else if (!EX_NOHIDE(exp))
162 dentry = dget(dparent); /* .. == . just like at / */ 162 dentry = dget(dparent); /* .. == . just like at / */
163 else { 163 else {
164 /* checking mountpoint crossing is very different when stepping up */ 164 /* checking mountpoint crossing is very different when stepping up */
165 struct svc_export *exp2 = NULL; 165 struct svc_export *exp2 = NULL;
166 struct dentry *dp; 166 struct dentry *dp;
167 struct vfsmount *mnt = mntget(exp->ex_mnt); 167 struct vfsmount *mnt = mntget(exp->ex_path.mnt);
168 dentry = dget(dparent); 168 dentry = dget(dparent);
169 while(dentry == mnt->mnt_root && follow_up(&mnt, &dentry)) 169 while(dentry == mnt->mnt_root && follow_up(&mnt, &dentry))
170 ; 170 ;
@@ -721,7 +721,8 @@ nfsd_open(struct svc_rqst *rqstp, struct svc_fh *fhp, int type,
721 721
722 DQUOT_INIT(inode); 722 DQUOT_INIT(inode);
723 } 723 }
724 *filp = dentry_open(dget(dentry), mntget(fhp->fh_export->ex_mnt), flags); 724 *filp = dentry_open(dget(dentry), mntget(fhp->fh_export->ex_path.mnt),
725 flags);
725 if (IS_ERR(*filp)) 726 if (IS_ERR(*filp))
726 host_err = PTR_ERR(*filp); 727 host_err = PTR_ERR(*filp);
727out_nfserr: 728out_nfserr:
@@ -1462,7 +1463,7 @@ nfsd_readlink(struct svc_rqst *rqstp, struct svc_fh *fhp, char *buf, int *lenp)
1462 if (!inode->i_op || !inode->i_op->readlink) 1463 if (!inode->i_op || !inode->i_op->readlink)
1463 goto out; 1464 goto out;
1464 1465
1465 touch_atime(fhp->fh_export->ex_mnt, dentry); 1466 touch_atime(fhp->fh_export->ex_path.mnt, dentry);
1466 /* N.B. Why does this call need a get_fs()?? 1467 /* N.B. Why does this call need a get_fs()??
1467 * Remove the set_fs and watch the fireworks:-) --okir 1468 * Remove the set_fs and watch the fireworks:-) --okir
1468 */ 1469 */
diff --git a/fs/ntfs/aops.c b/fs/ntfs/aops.c
index ad87cb01299b..00e9ccde8e42 100644
--- a/fs/ntfs/aops.c
+++ b/fs/ntfs/aops.c
@@ -87,13 +87,17 @@ static void ntfs_end_buffer_async_read(struct buffer_head *bh, int uptodate)
87 /* Check for the current buffer head overflowing. */ 87 /* Check for the current buffer head overflowing. */
88 if (unlikely(file_ofs + bh->b_size > init_size)) { 88 if (unlikely(file_ofs + bh->b_size > init_size)) {
89 int ofs; 89 int ofs;
90 void *kaddr;
90 91
91 ofs = 0; 92 ofs = 0;
92 if (file_ofs < init_size) 93 if (file_ofs < init_size)
93 ofs = init_size - file_ofs; 94 ofs = init_size - file_ofs;
94 local_irq_save(flags); 95 local_irq_save(flags);
95 zero_user_page(page, bh_offset(bh) + ofs, 96 kaddr = kmap_atomic(page, KM_BIO_SRC_IRQ);
96 bh->b_size - ofs, KM_BIO_SRC_IRQ); 97 memset(kaddr + bh_offset(bh) + ofs, 0,
98 bh->b_size - ofs);
99 flush_dcache_page(page);
100 kunmap_atomic(kaddr, KM_BIO_SRC_IRQ);
97 local_irq_restore(flags); 101 local_irq_restore(flags);
98 } 102 }
99 } else { 103 } else {
@@ -334,7 +338,7 @@ handle_hole:
334 bh->b_blocknr = -1UL; 338 bh->b_blocknr = -1UL;
335 clear_buffer_mapped(bh); 339 clear_buffer_mapped(bh);
336handle_zblock: 340handle_zblock:
337 zero_user_page(page, i * blocksize, blocksize, KM_USER0); 341 zero_user(page, i * blocksize, blocksize);
338 if (likely(!err)) 342 if (likely(!err))
339 set_buffer_uptodate(bh); 343 set_buffer_uptodate(bh);
340 } while (i++, iblock++, (bh = bh->b_this_page) != head); 344 } while (i++, iblock++, (bh = bh->b_this_page) != head);
@@ -410,7 +414,7 @@ retry_readpage:
410 /* Is the page fully outside i_size? (truncate in progress) */ 414 /* Is the page fully outside i_size? (truncate in progress) */
411 if (unlikely(page->index >= (i_size + PAGE_CACHE_SIZE - 1) >> 415 if (unlikely(page->index >= (i_size + PAGE_CACHE_SIZE - 1) >>
412 PAGE_CACHE_SHIFT)) { 416 PAGE_CACHE_SHIFT)) {
413 zero_user_page(page, 0, PAGE_CACHE_SIZE, KM_USER0); 417 zero_user(page, 0, PAGE_CACHE_SIZE);
414 ntfs_debug("Read outside i_size - truncated?"); 418 ntfs_debug("Read outside i_size - truncated?");
415 goto done; 419 goto done;
416 } 420 }
@@ -459,7 +463,7 @@ retry_readpage:
459 * ok to ignore the compressed flag here. 463 * ok to ignore the compressed flag here.
460 */ 464 */
461 if (unlikely(page->index > 0)) { 465 if (unlikely(page->index > 0)) {
462 zero_user_page(page, 0, PAGE_CACHE_SIZE, KM_USER0); 466 zero_user(page, 0, PAGE_CACHE_SIZE);
463 goto done; 467 goto done;
464 } 468 }
465 if (!NInoAttr(ni)) 469 if (!NInoAttr(ni))
@@ -788,8 +792,7 @@ lock_retry_remap:
788 if (err == -ENOENT || lcn == LCN_ENOENT) { 792 if (err == -ENOENT || lcn == LCN_ENOENT) {
789 bh->b_blocknr = -1; 793 bh->b_blocknr = -1;
790 clear_buffer_dirty(bh); 794 clear_buffer_dirty(bh);
791 zero_user_page(page, bh_offset(bh), blocksize, 795 zero_user(page, bh_offset(bh), blocksize);
792 KM_USER0);
793 set_buffer_uptodate(bh); 796 set_buffer_uptodate(bh);
794 err = 0; 797 err = 0;
795 continue; 798 continue;
@@ -1414,8 +1417,7 @@ retry_writepage:
1414 if (page->index >= (i_size >> PAGE_CACHE_SHIFT)) { 1417 if (page->index >= (i_size >> PAGE_CACHE_SHIFT)) {
1415 /* The page straddles i_size. */ 1418 /* The page straddles i_size. */
1416 unsigned int ofs = i_size & ~PAGE_CACHE_MASK; 1419 unsigned int ofs = i_size & ~PAGE_CACHE_MASK;
1417 zero_user_page(page, ofs, PAGE_CACHE_SIZE - ofs, 1420 zero_user_segment(page, ofs, PAGE_CACHE_SIZE);
1418 KM_USER0);
1419 } 1421 }
1420 /* Handle mst protected attributes. */ 1422 /* Handle mst protected attributes. */
1421 if (NInoMstProtected(ni)) 1423 if (NInoMstProtected(ni))
diff --git a/fs/ntfs/compress.c b/fs/ntfs/compress.c
index d1619d05eb23..33ff314cc507 100644
--- a/fs/ntfs/compress.c
+++ b/fs/ntfs/compress.c
@@ -565,7 +565,7 @@ int ntfs_read_compressed_block(struct page *page)
565 if (xpage >= max_page) { 565 if (xpage >= max_page) {
566 kfree(bhs); 566 kfree(bhs);
567 kfree(pages); 567 kfree(pages);
568 zero_user_page(page, 0, PAGE_CACHE_SIZE, KM_USER0); 568 zero_user(page, 0, PAGE_CACHE_SIZE);
569 ntfs_debug("Compressed read outside i_size - truncated?"); 569 ntfs_debug("Compressed read outside i_size - truncated?");
570 SetPageUptodate(page); 570 SetPageUptodate(page);
571 unlock_page(page); 571 unlock_page(page);
diff --git a/fs/ntfs/file.c b/fs/ntfs/file.c
index 6cd08dfdc2ed..3c5550cd11d6 100644
--- a/fs/ntfs/file.c
+++ b/fs/ntfs/file.c
@@ -607,8 +607,8 @@ do_next_page:
607 ntfs_submit_bh_for_read(bh); 607 ntfs_submit_bh_for_read(bh);
608 *wait_bh++ = bh; 608 *wait_bh++ = bh;
609 } else { 609 } else {
610 zero_user_page(page, bh_offset(bh), 610 zero_user(page, bh_offset(bh),
611 blocksize, KM_USER0); 611 blocksize);
612 set_buffer_uptodate(bh); 612 set_buffer_uptodate(bh);
613 } 613 }
614 } 614 }
@@ -683,9 +683,8 @@ map_buffer_cached:
683 ntfs_submit_bh_for_read(bh); 683 ntfs_submit_bh_for_read(bh);
684 *wait_bh++ = bh; 684 *wait_bh++ = bh;
685 } else { 685 } else {
686 zero_user_page(page, 686 zero_user(page, bh_offset(bh),
687 bh_offset(bh), 687 blocksize);
688 blocksize, KM_USER0);
689 set_buffer_uptodate(bh); 688 set_buffer_uptodate(bh);
690 } 689 }
691 } 690 }
@@ -703,8 +702,8 @@ map_buffer_cached:
703 */ 702 */
704 if (bh_end <= pos || bh_pos >= end) { 703 if (bh_end <= pos || bh_pos >= end) {
705 if (!buffer_uptodate(bh)) { 704 if (!buffer_uptodate(bh)) {
706 zero_user_page(page, bh_offset(bh), 705 zero_user(page, bh_offset(bh),
707 blocksize, KM_USER0); 706 blocksize);
708 set_buffer_uptodate(bh); 707 set_buffer_uptodate(bh);
709 } 708 }
710 mark_buffer_dirty(bh); 709 mark_buffer_dirty(bh);
@@ -743,8 +742,7 @@ map_buffer_cached:
743 if (!buffer_uptodate(bh)) 742 if (!buffer_uptodate(bh))
744 set_buffer_uptodate(bh); 743 set_buffer_uptodate(bh);
745 } else if (!buffer_uptodate(bh)) { 744 } else if (!buffer_uptodate(bh)) {
746 zero_user_page(page, bh_offset(bh), blocksize, 745 zero_user(page, bh_offset(bh), blocksize);
747 KM_USER0);
748 set_buffer_uptodate(bh); 746 set_buffer_uptodate(bh);
749 } 747 }
750 continue; 748 continue;
@@ -868,8 +866,8 @@ rl_not_mapped_enoent:
868 if (!buffer_uptodate(bh)) 866 if (!buffer_uptodate(bh))
869 set_buffer_uptodate(bh); 867 set_buffer_uptodate(bh);
870 } else if (!buffer_uptodate(bh)) { 868 } else if (!buffer_uptodate(bh)) {
871 zero_user_page(page, bh_offset(bh), 869 zero_user(page, bh_offset(bh),
872 blocksize, KM_USER0); 870 blocksize);
873 set_buffer_uptodate(bh); 871 set_buffer_uptodate(bh);
874 } 872 }
875 continue; 873 continue;
@@ -1128,8 +1126,8 @@ rl_not_mapped_enoent:
1128 1126
1129 if (likely(bh_pos < initialized_size)) 1127 if (likely(bh_pos < initialized_size))
1130 ofs = initialized_size - bh_pos; 1128 ofs = initialized_size - bh_pos;
1131 zero_user_page(page, bh_offset(bh) + ofs, 1129 zero_user_segment(page, bh_offset(bh) + ofs,
1132 blocksize - ofs, KM_USER0); 1130 blocksize);
1133 } 1131 }
1134 } else /* if (unlikely(!buffer_uptodate(bh))) */ 1132 } else /* if (unlikely(!buffer_uptodate(bh))) */
1135 err = -EIO; 1133 err = -EIO;
@@ -1269,8 +1267,8 @@ rl_not_mapped_enoent:
1269 if (PageUptodate(page)) 1267 if (PageUptodate(page))
1270 set_buffer_uptodate(bh); 1268 set_buffer_uptodate(bh);
1271 else { 1269 else {
1272 zero_user_page(page, bh_offset(bh), 1270 zero_user(page, bh_offset(bh),
1273 blocksize, KM_USER0); 1271 blocksize);
1274 set_buffer_uptodate(bh); 1272 set_buffer_uptodate(bh);
1275 } 1273 }
1276 } 1274 }
@@ -1330,7 +1328,7 @@ err_out:
1330 len = PAGE_CACHE_SIZE; 1328 len = PAGE_CACHE_SIZE;
1331 if (len > bytes) 1329 if (len > bytes)
1332 len = bytes; 1330 len = bytes;
1333 zero_user_page(*pages, 0, len, KM_USER0); 1331 zero_user(*pages, 0, len);
1334 } 1332 }
1335 goto out; 1333 goto out;
1336} 1334}
@@ -1451,7 +1449,7 @@ err_out:
1451 len = PAGE_CACHE_SIZE; 1449 len = PAGE_CACHE_SIZE;
1452 if (len > bytes) 1450 if (len > bytes)
1453 len = bytes; 1451 len = bytes;
1454 zero_user_page(*pages, 0, len, KM_USER0); 1452 zero_user(*pages, 0, len);
1455 } 1453 }
1456 goto out; 1454 goto out;
1457} 1455}
diff --git a/fs/ntfs/malloc.h b/fs/ntfs/malloc.h
index e38e402e4103..cd0be3f5c3cd 100644
--- a/fs/ntfs/malloc.h
+++ b/fs/ntfs/malloc.h
@@ -85,8 +85,7 @@ static inline void *ntfs_malloc_nofs_nofail(unsigned long size)
85 85
86static inline void ntfs_free(void *addr) 86static inline void ntfs_free(void *addr)
87{ 87{
88 if (likely(((unsigned long)addr < VMALLOC_START) || 88 if (!is_vmalloc_addr(addr)) {
89 ((unsigned long)addr >= VMALLOC_END ))) {
90 kfree(addr); 89 kfree(addr);
91 /* free_page((unsigned long)addr); */ 90 /* free_page((unsigned long)addr); */
92 return; 91 return;
diff --git a/fs/ocfs2/alloc.c b/fs/ocfs2/alloc.c
index 64713e149e46..447206eb5c2e 100644
--- a/fs/ocfs2/alloc.c
+++ b/fs/ocfs2/alloc.c
@@ -5670,7 +5670,7 @@ static void ocfs2_map_and_dirty_page(struct inode *inode, handle_t *handle,
5670 mlog_errno(ret); 5670 mlog_errno(ret);
5671 5671
5672 if (zero) 5672 if (zero)
5673 zero_user_page(page, from, to - from, KM_USER0); 5673 zero_user_segment(page, from, to);
5674 5674
5675 /* 5675 /*
5676 * Need to set the buffers we zero'd into uptodate 5676 * Need to set the buffers we zero'd into uptodate
diff --git a/fs/ocfs2/aops.c b/fs/ocfs2/aops.c
index bc7b4cbbe8ec..82243127eebf 100644
--- a/fs/ocfs2/aops.c
+++ b/fs/ocfs2/aops.c
@@ -307,7 +307,7 @@ static int ocfs2_readpage(struct file *file, struct page *page)
307 * XXX sys_readahead() seems to get that wrong? 307 * XXX sys_readahead() seems to get that wrong?
308 */ 308 */
309 if (start >= i_size_read(inode)) { 309 if (start >= i_size_read(inode)) {
310 zero_user_page(page, 0, PAGE_SIZE, KM_USER0); 310 zero_user(page, 0, PAGE_SIZE);
311 SetPageUptodate(page); 311 SetPageUptodate(page);
312 ret = 0; 312 ret = 0;
313 goto out_alloc; 313 goto out_alloc;
@@ -869,7 +869,7 @@ int ocfs2_map_page_blocks(struct page *page, u64 *p_blkno,
869 if (block_start >= to) 869 if (block_start >= to)
870 break; 870 break;
871 871
872 zero_user_page(page, block_start, bh->b_size, KM_USER0); 872 zero_user(page, block_start, bh->b_size);
873 set_buffer_uptodate(bh); 873 set_buffer_uptodate(bh);
874 mark_buffer_dirty(bh); 874 mark_buffer_dirty(bh);
875 875
@@ -1034,7 +1034,7 @@ static void ocfs2_zero_new_buffers(struct page *page, unsigned from, unsigned to
1034 start = max(from, block_start); 1034 start = max(from, block_start);
1035 end = min(to, block_end); 1035 end = min(to, block_end);
1036 1036
1037 zero_user_page(page, start, end - start, KM_USER0); 1037 zero_user_segment(page, start, end);
1038 set_buffer_uptodate(bh); 1038 set_buffer_uptodate(bh);
1039 } 1039 }
1040 1040
diff --git a/fs/ocfs2/cluster/endian.h b/fs/ocfs2/cluster/endian.h
deleted file mode 100644
index 2df9082f4e35..000000000000
--- a/fs/ocfs2/cluster/endian.h
+++ /dev/null
@@ -1,30 +0,0 @@
1/* -*- mode: c; c-basic-offset: 8; -*-
2 * vim: noexpandtab sw=8 ts=8 sts=0:
3 *
4 * Copyright (C) 2005 Oracle. All rights reserved.
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public
17 * License along with this program; if not, write to the
18 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
19 * Boston, MA 021110-1307, USA.
20 */
21
22#ifndef OCFS2_CLUSTER_ENDIAN_H
23#define OCFS2_CLUSTER_ENDIAN_H
24
25static inline void be32_add_cpu(__be32 *var, u32 val)
26{
27 *var = cpu_to_be32(be32_to_cpu(*var) + val);
28}
29
30#endif /* OCFS2_CLUSTER_ENDIAN_H */
diff --git a/fs/ocfs2/cluster/nodemanager.c b/fs/ocfs2/cluster/nodemanager.c
index af2070da308b..709fba25bf7e 100644
--- a/fs/ocfs2/cluster/nodemanager.c
+++ b/fs/ocfs2/cluster/nodemanager.c
@@ -24,7 +24,6 @@
24#include <linux/sysctl.h> 24#include <linux/sysctl.h>
25#include <linux/configfs.h> 25#include <linux/configfs.h>
26 26
27#include "endian.h"
28#include "tcp.h" 27#include "tcp.h"
29#include "nodemanager.h" 28#include "nodemanager.h"
30#include "heartbeat.h" 29#include "heartbeat.h"
diff --git a/fs/ocfs2/cluster/tcp_internal.h b/fs/ocfs2/cluster/tcp_internal.h
index b2e832aca567..d25b9af28500 100644
--- a/fs/ocfs2/cluster/tcp_internal.h
+++ b/fs/ocfs2/cluster/tcp_internal.h
@@ -38,6 +38,15 @@
38 * locking semantics of the file system using the protocol. It should 38 * locking semantics of the file system using the protocol. It should
39 * be somewhere else, I'm sure, but right now it isn't. 39 * be somewhere else, I'm sure, but right now it isn't.
40 * 40 *
41 * With version 11, we separate out the filesystem locking portion. The
42 * filesystem now has a major.minor version it negotiates. Version 11
43 * introduces this negotiation to the o2dlm protocol, and as such the
44 * version here in tcp_internal.h should not need to be bumped for
45 * filesystem locking changes.
46 *
47 * New in version 11
48 * - Negotiation of filesystem locking in the dlm join.
49 *
41 * New in version 10: 50 * New in version 10:
42 * - Meta/data locks combined 51 * - Meta/data locks combined
43 * 52 *
@@ -66,7 +75,7 @@
66 * - full 64 bit i_size in the metadata lock lvbs 75 * - full 64 bit i_size in the metadata lock lvbs
67 * - introduction of "rw" lock and pushing meta/data locking down 76 * - introduction of "rw" lock and pushing meta/data locking down
68 */ 77 */
69#define O2NET_PROTOCOL_VERSION 10ULL 78#define O2NET_PROTOCOL_VERSION 11ULL
70struct o2net_handshake { 79struct o2net_handshake {
71 __be64 protocol_version; 80 __be64 protocol_version;
72 __be64 connector_id; 81 __be64 connector_id;
diff --git a/fs/ocfs2/dlm/dlmapi.h b/fs/ocfs2/dlm/dlmapi.h
index cfd5cb65cab0..b5786a787fab 100644
--- a/fs/ocfs2/dlm/dlmapi.h
+++ b/fs/ocfs2/dlm/dlmapi.h
@@ -193,7 +193,12 @@ enum dlm_status dlmunlock(struct dlm_ctxt *dlm,
193 dlm_astunlockfunc_t *unlockast, 193 dlm_astunlockfunc_t *unlockast,
194 void *data); 194 void *data);
195 195
196struct dlm_ctxt * dlm_register_domain(const char *domain, u32 key); 196struct dlm_protocol_version {
197 u8 pv_major;
198 u8 pv_minor;
199};
200struct dlm_ctxt * dlm_register_domain(const char *domain, u32 key,
201 struct dlm_protocol_version *fs_proto);
197 202
198void dlm_unregister_domain(struct dlm_ctxt *dlm); 203void dlm_unregister_domain(struct dlm_ctxt *dlm);
199 204
diff --git a/fs/ocfs2/dlm/dlmast.c b/fs/ocfs2/dlm/dlmast.c
index 2fd8bded38f3..644bee55d8ba 100644
--- a/fs/ocfs2/dlm/dlmast.c
+++ b/fs/ocfs2/dlm/dlmast.c
@@ -43,7 +43,6 @@
43#include "cluster/heartbeat.h" 43#include "cluster/heartbeat.h"
44#include "cluster/nodemanager.h" 44#include "cluster/nodemanager.h"
45#include "cluster/tcp.h" 45#include "cluster/tcp.h"
46#include "cluster/endian.h"
47 46
48#include "dlmapi.h" 47#include "dlmapi.h"
49#include "dlmcommon.h" 48#include "dlmcommon.h"
diff --git a/fs/ocfs2/dlm/dlmcommon.h b/fs/ocfs2/dlm/dlmcommon.h
index e90b92f9ece1..9843ee17ea27 100644
--- a/fs/ocfs2/dlm/dlmcommon.h
+++ b/fs/ocfs2/dlm/dlmcommon.h
@@ -142,6 +142,12 @@ struct dlm_ctxt
142 spinlock_t work_lock; 142 spinlock_t work_lock;
143 struct list_head dlm_domain_handlers; 143 struct list_head dlm_domain_handlers;
144 struct list_head dlm_eviction_callbacks; 144 struct list_head dlm_eviction_callbacks;
145
146 /* The filesystem specifies this at domain registration. We
147 * cache it here to know what to tell other nodes. */
148 struct dlm_protocol_version fs_locking_proto;
149 /* This is the inter-dlm communication version */
150 struct dlm_protocol_version dlm_locking_proto;
145}; 151};
146 152
147static inline struct hlist_head *dlm_lockres_hash(struct dlm_ctxt *dlm, unsigned i) 153static inline struct hlist_head *dlm_lockres_hash(struct dlm_ctxt *dlm, unsigned i)
@@ -589,10 +595,24 @@ struct dlm_proxy_ast
589#define DLM_PROXY_AST_MAX_LEN (sizeof(struct dlm_proxy_ast)+DLM_LVB_LEN) 595#define DLM_PROXY_AST_MAX_LEN (sizeof(struct dlm_proxy_ast)+DLM_LVB_LEN)
590 596
591#define DLM_MOD_KEY (0x666c6172) 597#define DLM_MOD_KEY (0x666c6172)
592enum dlm_query_join_response { 598enum dlm_query_join_response_code {
593 JOIN_DISALLOW = 0, 599 JOIN_DISALLOW = 0,
594 JOIN_OK, 600 JOIN_OK,
595 JOIN_OK_NO_MAP, 601 JOIN_OK_NO_MAP,
602 JOIN_PROTOCOL_MISMATCH,
603};
604
605union dlm_query_join_response {
606 u32 intval;
607 struct {
608 u8 code; /* Response code. dlm_minor and fs_minor
609 are only valid if this is JOIN_OK */
610 u8 dlm_minor; /* The minor version of the protocol the
611 dlm is speaking. */
612 u8 fs_minor; /* The minor version of the protocol the
613 filesystem is speaking. */
614 u8 reserved;
615 } packet;
596}; 616};
597 617
598struct dlm_lock_request 618struct dlm_lock_request
@@ -633,6 +653,8 @@ struct dlm_query_join_request
633 u8 node_idx; 653 u8 node_idx;
634 u8 pad1[2]; 654 u8 pad1[2];
635 u8 name_len; 655 u8 name_len;
656 struct dlm_protocol_version dlm_proto;
657 struct dlm_protocol_version fs_proto;
636 u8 domain[O2NM_MAX_NAME_LEN]; 658 u8 domain[O2NM_MAX_NAME_LEN];
637 u8 node_map[BITS_TO_BYTES(O2NM_MAX_NODES)]; 659 u8 node_map[BITS_TO_BYTES(O2NM_MAX_NODES)];
638}; 660};
diff --git a/fs/ocfs2/dlm/dlmdomain.c b/fs/ocfs2/dlm/dlmdomain.c
index 6954565b8ccb..638d2ebb892b 100644
--- a/fs/ocfs2/dlm/dlmdomain.c
+++ b/fs/ocfs2/dlm/dlmdomain.c
@@ -123,6 +123,17 @@ DEFINE_SPINLOCK(dlm_domain_lock);
123LIST_HEAD(dlm_domains); 123LIST_HEAD(dlm_domains);
124static DECLARE_WAIT_QUEUE_HEAD(dlm_domain_events); 124static DECLARE_WAIT_QUEUE_HEAD(dlm_domain_events);
125 125
126/*
127 * The supported protocol version for DLM communication. Running domains
128 * will have a negotiated version with the same major number and a minor
129 * number equal or smaller. The dlm_ctxt->dlm_locking_proto field should
130 * be used to determine what a running domain is actually using.
131 */
132static const struct dlm_protocol_version dlm_protocol = {
133 .pv_major = 1,
134 .pv_minor = 0,
135};
136
126#define DLM_DOMAIN_BACKOFF_MS 200 137#define DLM_DOMAIN_BACKOFF_MS 200
127 138
128static int dlm_query_join_handler(struct o2net_msg *msg, u32 len, void *data, 139static int dlm_query_join_handler(struct o2net_msg *msg, u32 len, void *data,
@@ -133,6 +144,8 @@ static int dlm_cancel_join_handler(struct o2net_msg *msg, u32 len, void *data,
133 void **ret_data); 144 void **ret_data);
134static int dlm_exit_domain_handler(struct o2net_msg *msg, u32 len, void *data, 145static int dlm_exit_domain_handler(struct o2net_msg *msg, u32 len, void *data,
135 void **ret_data); 146 void **ret_data);
147static int dlm_protocol_compare(struct dlm_protocol_version *existing,
148 struct dlm_protocol_version *request);
136 149
137static void dlm_unregister_domain_handlers(struct dlm_ctxt *dlm); 150static void dlm_unregister_domain_handlers(struct dlm_ctxt *dlm);
138 151
@@ -668,11 +681,45 @@ void dlm_unregister_domain(struct dlm_ctxt *dlm)
668} 681}
669EXPORT_SYMBOL_GPL(dlm_unregister_domain); 682EXPORT_SYMBOL_GPL(dlm_unregister_domain);
670 683
684static int dlm_query_join_proto_check(char *proto_type, int node,
685 struct dlm_protocol_version *ours,
686 struct dlm_protocol_version *request)
687{
688 int rc;
689 struct dlm_protocol_version proto = *request;
690
691 if (!dlm_protocol_compare(ours, &proto)) {
692 mlog(0,
693 "node %u wanted to join with %s locking protocol "
694 "%u.%u, we respond with %u.%u\n",
695 node, proto_type,
696 request->pv_major,
697 request->pv_minor,
698 proto.pv_major, proto.pv_minor);
699 request->pv_minor = proto.pv_minor;
700 rc = 0;
701 } else {
702 mlog(ML_NOTICE,
703 "Node %u wanted to join with %s locking "
704 "protocol %u.%u, but we have %u.%u, disallowing\n",
705 node, proto_type,
706 request->pv_major,
707 request->pv_minor,
708 ours->pv_major,
709 ours->pv_minor);
710 rc = 1;
711 }
712
713 return rc;
714}
715
671static int dlm_query_join_handler(struct o2net_msg *msg, u32 len, void *data, 716static int dlm_query_join_handler(struct o2net_msg *msg, u32 len, void *data,
672 void **ret_data) 717 void **ret_data)
673{ 718{
674 struct dlm_query_join_request *query; 719 struct dlm_query_join_request *query;
675 enum dlm_query_join_response response; 720 union dlm_query_join_response response = {
721 .packet.code = JOIN_DISALLOW,
722 };
676 struct dlm_ctxt *dlm = NULL; 723 struct dlm_ctxt *dlm = NULL;
677 u8 nodenum; 724 u8 nodenum;
678 725
@@ -690,11 +737,11 @@ static int dlm_query_join_handler(struct o2net_msg *msg, u32 len, void *data,
690 mlog(0, "node %u is not in our live map yet\n", 737 mlog(0, "node %u is not in our live map yet\n",
691 query->node_idx); 738 query->node_idx);
692 739
693 response = JOIN_DISALLOW; 740 response.packet.code = JOIN_DISALLOW;
694 goto respond; 741 goto respond;
695 } 742 }
696 743
697 response = JOIN_OK_NO_MAP; 744 response.packet.code = JOIN_OK_NO_MAP;
698 745
699 spin_lock(&dlm_domain_lock); 746 spin_lock(&dlm_domain_lock);
700 dlm = __dlm_lookup_domain_full(query->domain, query->name_len); 747 dlm = __dlm_lookup_domain_full(query->domain, query->name_len);
@@ -713,7 +760,7 @@ static int dlm_query_join_handler(struct o2net_msg *msg, u32 len, void *data,
713 mlog(0, "disallow join as node %u does not " 760 mlog(0, "disallow join as node %u does not "
714 "have node %u in its nodemap\n", 761 "have node %u in its nodemap\n",
715 query->node_idx, nodenum); 762 query->node_idx, nodenum);
716 response = JOIN_DISALLOW; 763 response.packet.code = JOIN_DISALLOW;
717 goto unlock_respond; 764 goto unlock_respond;
718 } 765 }
719 } 766 }
@@ -733,30 +780,48 @@ static int dlm_query_join_handler(struct o2net_msg *msg, u32 len, void *data,
733 /*If this is a brand new context and we 780 /*If this is a brand new context and we
734 * haven't started our join process yet, then 781 * haven't started our join process yet, then
735 * the other node won the race. */ 782 * the other node won the race. */
736 response = JOIN_OK_NO_MAP; 783 response.packet.code = JOIN_OK_NO_MAP;
737 } else if (dlm->joining_node != DLM_LOCK_RES_OWNER_UNKNOWN) { 784 } else if (dlm->joining_node != DLM_LOCK_RES_OWNER_UNKNOWN) {
738 /* Disallow parallel joins. */ 785 /* Disallow parallel joins. */
739 response = JOIN_DISALLOW; 786 response.packet.code = JOIN_DISALLOW;
740 } else if (dlm->reco.state & DLM_RECO_STATE_ACTIVE) { 787 } else if (dlm->reco.state & DLM_RECO_STATE_ACTIVE) {
741 mlog(0, "node %u trying to join, but recovery " 788 mlog(0, "node %u trying to join, but recovery "
742 "is ongoing.\n", bit); 789 "is ongoing.\n", bit);
743 response = JOIN_DISALLOW; 790 response.packet.code = JOIN_DISALLOW;
744 } else if (test_bit(bit, dlm->recovery_map)) { 791 } else if (test_bit(bit, dlm->recovery_map)) {
745 mlog(0, "node %u trying to join, but it " 792 mlog(0, "node %u trying to join, but it "
746 "still needs recovery.\n", bit); 793 "still needs recovery.\n", bit);
747 response = JOIN_DISALLOW; 794 response.packet.code = JOIN_DISALLOW;
748 } else if (test_bit(bit, dlm->domain_map)) { 795 } else if (test_bit(bit, dlm->domain_map)) {
749 mlog(0, "node %u trying to join, but it " 796 mlog(0, "node %u trying to join, but it "
750 "is still in the domain! needs recovery?\n", 797 "is still in the domain! needs recovery?\n",
751 bit); 798 bit);
752 response = JOIN_DISALLOW; 799 response.packet.code = JOIN_DISALLOW;
753 } else { 800 } else {
754 /* Alright we're fully a part of this domain 801 /* Alright we're fully a part of this domain
755 * so we keep some state as to who's joining 802 * so we keep some state as to who's joining
756 * and indicate to him that needs to be fixed 803 * and indicate to him that needs to be fixed
757 * up. */ 804 * up. */
758 response = JOIN_OK; 805
759 __dlm_set_joining_node(dlm, query->node_idx); 806 /* Make sure we speak compatible locking protocols. */
807 if (dlm_query_join_proto_check("DLM", bit,
808 &dlm->dlm_locking_proto,
809 &query->dlm_proto)) {
810 response.packet.code =
811 JOIN_PROTOCOL_MISMATCH;
812 } else if (dlm_query_join_proto_check("fs", bit,
813 &dlm->fs_locking_proto,
814 &query->fs_proto)) {
815 response.packet.code =
816 JOIN_PROTOCOL_MISMATCH;
817 } else {
818 response.packet.dlm_minor =
819 query->dlm_proto.pv_minor;
820 response.packet.fs_minor =
821 query->fs_proto.pv_minor;
822 response.packet.code = JOIN_OK;
823 __dlm_set_joining_node(dlm, query->node_idx);
824 }
760 } 825 }
761 826
762 spin_unlock(&dlm->spinlock); 827 spin_unlock(&dlm->spinlock);
@@ -765,9 +830,9 @@ unlock_respond:
765 spin_unlock(&dlm_domain_lock); 830 spin_unlock(&dlm_domain_lock);
766 831
767respond: 832respond:
768 mlog(0, "We respond with %u\n", response); 833 mlog(0, "We respond with %u\n", response.packet.code);
769 834
770 return response; 835 return response.intval;
771} 836}
772 837
773static int dlm_assert_joined_handler(struct o2net_msg *msg, u32 len, void *data, 838static int dlm_assert_joined_handler(struct o2net_msg *msg, u32 len, void *data,
@@ -899,10 +964,11 @@ static int dlm_send_join_cancels(struct dlm_ctxt *dlm,
899 964
900static int dlm_request_join(struct dlm_ctxt *dlm, 965static int dlm_request_join(struct dlm_ctxt *dlm,
901 int node, 966 int node,
902 enum dlm_query_join_response *response) 967 enum dlm_query_join_response_code *response)
903{ 968{
904 int status, retval; 969 int status;
905 struct dlm_query_join_request join_msg; 970 struct dlm_query_join_request join_msg;
971 union dlm_query_join_response join_resp;
906 972
907 mlog(0, "querying node %d\n", node); 973 mlog(0, "querying node %d\n", node);
908 974
@@ -910,12 +976,15 @@ static int dlm_request_join(struct dlm_ctxt *dlm,
910 join_msg.node_idx = dlm->node_num; 976 join_msg.node_idx = dlm->node_num;
911 join_msg.name_len = strlen(dlm->name); 977 join_msg.name_len = strlen(dlm->name);
912 memcpy(join_msg.domain, dlm->name, join_msg.name_len); 978 memcpy(join_msg.domain, dlm->name, join_msg.name_len);
979 join_msg.dlm_proto = dlm->dlm_locking_proto;
980 join_msg.fs_proto = dlm->fs_locking_proto;
913 981
914 /* copy live node map to join message */ 982 /* copy live node map to join message */
915 byte_copymap(join_msg.node_map, dlm->live_nodes_map, O2NM_MAX_NODES); 983 byte_copymap(join_msg.node_map, dlm->live_nodes_map, O2NM_MAX_NODES);
916 984
917 status = o2net_send_message(DLM_QUERY_JOIN_MSG, DLM_MOD_KEY, &join_msg, 985 status = o2net_send_message(DLM_QUERY_JOIN_MSG, DLM_MOD_KEY, &join_msg,
918 sizeof(join_msg), node, &retval); 986 sizeof(join_msg), node,
987 &join_resp.intval);
919 if (status < 0 && status != -ENOPROTOOPT) { 988 if (status < 0 && status != -ENOPROTOOPT) {
920 mlog_errno(status); 989 mlog_errno(status);
921 goto bail; 990 goto bail;
@@ -928,14 +997,41 @@ static int dlm_request_join(struct dlm_ctxt *dlm,
928 if (status == -ENOPROTOOPT) { 997 if (status == -ENOPROTOOPT) {
929 status = 0; 998 status = 0;
930 *response = JOIN_OK_NO_MAP; 999 *response = JOIN_OK_NO_MAP;
931 } else if (retval == JOIN_DISALLOW || 1000 } else if (join_resp.packet.code == JOIN_DISALLOW ||
932 retval == JOIN_OK || 1001 join_resp.packet.code == JOIN_OK_NO_MAP) {
933 retval == JOIN_OK_NO_MAP) { 1002 *response = join_resp.packet.code;
934 *response = retval; 1003 } else if (join_resp.packet.code == JOIN_PROTOCOL_MISMATCH) {
1004 mlog(ML_NOTICE,
1005 "This node requested DLM locking protocol %u.%u and "
1006 "filesystem locking protocol %u.%u. At least one of "
1007 "the protocol versions on node %d is not compatible, "
1008 "disconnecting\n",
1009 dlm->dlm_locking_proto.pv_major,
1010 dlm->dlm_locking_proto.pv_minor,
1011 dlm->fs_locking_proto.pv_major,
1012 dlm->fs_locking_proto.pv_minor,
1013 node);
1014 status = -EPROTO;
1015 *response = join_resp.packet.code;
1016 } else if (join_resp.packet.code == JOIN_OK) {
1017 *response = join_resp.packet.code;
1018 /* Use the same locking protocol as the remote node */
1019 dlm->dlm_locking_proto.pv_minor =
1020 join_resp.packet.dlm_minor;
1021 dlm->fs_locking_proto.pv_minor =
1022 join_resp.packet.fs_minor;
1023 mlog(0,
1024 "Node %d responds JOIN_OK with DLM locking protocol "
1025 "%u.%u and fs locking protocol %u.%u\n",
1026 node,
1027 dlm->dlm_locking_proto.pv_major,
1028 dlm->dlm_locking_proto.pv_minor,
1029 dlm->fs_locking_proto.pv_major,
1030 dlm->fs_locking_proto.pv_minor);
935 } else { 1031 } else {
936 status = -EINVAL; 1032 status = -EINVAL;
937 mlog(ML_ERROR, "invalid response %d from node %u\n", retval, 1033 mlog(ML_ERROR, "invalid response %d from node %u\n",
938 node); 1034 join_resp.packet.code, node);
939 } 1035 }
940 1036
941 mlog(0, "status %d, node %d response is %d\n", status, node, 1037 mlog(0, "status %d, node %d response is %d\n", status, node,
@@ -1008,7 +1104,7 @@ struct domain_join_ctxt {
1008 1104
1009static int dlm_should_restart_join(struct dlm_ctxt *dlm, 1105static int dlm_should_restart_join(struct dlm_ctxt *dlm,
1010 struct domain_join_ctxt *ctxt, 1106 struct domain_join_ctxt *ctxt,
1011 enum dlm_query_join_response response) 1107 enum dlm_query_join_response_code response)
1012{ 1108{
1013 int ret; 1109 int ret;
1014 1110
@@ -1034,7 +1130,7 @@ static int dlm_try_to_join_domain(struct dlm_ctxt *dlm)
1034{ 1130{
1035 int status = 0, tmpstat, node; 1131 int status = 0, tmpstat, node;
1036 struct domain_join_ctxt *ctxt; 1132 struct domain_join_ctxt *ctxt;
1037 enum dlm_query_join_response response = JOIN_DISALLOW; 1133 enum dlm_query_join_response_code response = JOIN_DISALLOW;
1038 1134
1039 mlog_entry("%p", dlm); 1135 mlog_entry("%p", dlm);
1040 1136
@@ -1450,10 +1546,38 @@ leave:
1450} 1546}
1451 1547
1452/* 1548/*
1453 * dlm_register_domain: one-time setup per "domain" 1549 * Compare a requested locking protocol version against the current one.
1550 *
1551 * If the major numbers are different, they are incompatible.
1552 * If the current minor is greater than the request, they are incompatible.
1553 * If the current minor is less than or equal to the request, they are
1554 * compatible, and the requester should run at the current minor version.
1555 */
1556static int dlm_protocol_compare(struct dlm_protocol_version *existing,
1557 struct dlm_protocol_version *request)
1558{
1559 if (existing->pv_major != request->pv_major)
1560 return 1;
1561
1562 if (existing->pv_minor > request->pv_minor)
1563 return 1;
1564
1565 if (existing->pv_minor < request->pv_minor)
1566 request->pv_minor = existing->pv_minor;
1567
1568 return 0;
1569}
1570
1571/*
1572 * dlm_register_domain: one-time setup per "domain".
1573 *
1574 * The filesystem passes in the requested locking version via proto.
1575 * If registration was successful, proto will contain the negotiated
1576 * locking protocol.
1454 */ 1577 */
1455struct dlm_ctxt * dlm_register_domain(const char *domain, 1578struct dlm_ctxt * dlm_register_domain(const char *domain,
1456 u32 key) 1579 u32 key,
1580 struct dlm_protocol_version *fs_proto)
1457{ 1581{
1458 int ret; 1582 int ret;
1459 struct dlm_ctxt *dlm = NULL; 1583 struct dlm_ctxt *dlm = NULL;
@@ -1496,6 +1620,15 @@ retry:
1496 goto retry; 1620 goto retry;
1497 } 1621 }
1498 1622
1623 if (dlm_protocol_compare(&dlm->fs_locking_proto, fs_proto)) {
1624 mlog(ML_ERROR,
1625 "Requested locking protocol version is not "
1626 "compatible with already registered domain "
1627 "\"%s\"\n", domain);
1628 ret = -EPROTO;
1629 goto leave;
1630 }
1631
1499 __dlm_get(dlm); 1632 __dlm_get(dlm);
1500 dlm->num_joins++; 1633 dlm->num_joins++;
1501 1634
@@ -1526,6 +1659,13 @@ retry:
1526 list_add_tail(&dlm->list, &dlm_domains); 1659 list_add_tail(&dlm->list, &dlm_domains);
1527 spin_unlock(&dlm_domain_lock); 1660 spin_unlock(&dlm_domain_lock);
1528 1661
1662 /*
1663 * Pass the locking protocol version into the join. If the join
1664 * succeeds, it will have the negotiated protocol set.
1665 */
1666 dlm->dlm_locking_proto = dlm_protocol;
1667 dlm->fs_locking_proto = *fs_proto;
1668
1529 ret = dlm_join_domain(dlm); 1669 ret = dlm_join_domain(dlm);
1530 if (ret) { 1670 if (ret) {
1531 mlog_errno(ret); 1671 mlog_errno(ret);
@@ -1533,6 +1673,9 @@ retry:
1533 goto leave; 1673 goto leave;
1534 } 1674 }
1535 1675
1676 /* Tell the caller what locking protocol we negotiated */
1677 *fs_proto = dlm->fs_locking_proto;
1678
1536 ret = 0; 1679 ret = 0;
1537leave: 1680leave:
1538 if (new_ctxt) 1681 if (new_ctxt)
diff --git a/fs/ocfs2/dlm/dlmfs.c b/fs/ocfs2/dlm/dlmfs.c
index 6639baab0798..61a000f8524c 100644
--- a/fs/ocfs2/dlm/dlmfs.c
+++ b/fs/ocfs2/dlm/dlmfs.c
@@ -60,6 +60,8 @@
60#define MLOG_MASK_PREFIX ML_DLMFS 60#define MLOG_MASK_PREFIX ML_DLMFS
61#include "cluster/masklog.h" 61#include "cluster/masklog.h"
62 62
63#include "ocfs2_lockingver.h"
64
63static const struct super_operations dlmfs_ops; 65static const struct super_operations dlmfs_ops;
64static const struct file_operations dlmfs_file_operations; 66static const struct file_operations dlmfs_file_operations;
65static const struct inode_operations dlmfs_dir_inode_operations; 67static const struct inode_operations dlmfs_dir_inode_operations;
@@ -70,6 +72,16 @@ static struct kmem_cache *dlmfs_inode_cache;
70struct workqueue_struct *user_dlm_worker; 72struct workqueue_struct *user_dlm_worker;
71 73
72/* 74/*
75 * This is the userdlmfs locking protocol version.
76 *
77 * See fs/ocfs2/dlmglue.c for more details on locking versions.
78 */
79static const struct dlm_protocol_version user_locking_protocol = {
80 .pv_major = OCFS2_LOCKING_PROTOCOL_MAJOR,
81 .pv_minor = OCFS2_LOCKING_PROTOCOL_MINOR,
82};
83
84/*
73 * decodes a set of open flags into a valid lock level and a set of flags. 85 * decodes a set of open flags into a valid lock level and a set of flags.
74 * returns < 0 if we have invalid flags 86 * returns < 0 if we have invalid flags
75 * flags which mean something to us: 87 * flags which mean something to us:
@@ -416,6 +428,7 @@ static int dlmfs_mkdir(struct inode * dir,
416 struct qstr *domain = &dentry->d_name; 428 struct qstr *domain = &dentry->d_name;
417 struct dlmfs_inode_private *ip; 429 struct dlmfs_inode_private *ip;
418 struct dlm_ctxt *dlm; 430 struct dlm_ctxt *dlm;
431 struct dlm_protocol_version proto = user_locking_protocol;
419 432
420 mlog(0, "mkdir %.*s\n", domain->len, domain->name); 433 mlog(0, "mkdir %.*s\n", domain->len, domain->name);
421 434
@@ -435,7 +448,7 @@ static int dlmfs_mkdir(struct inode * dir,
435 448
436 ip = DLMFS_I(inode); 449 ip = DLMFS_I(inode);
437 450
438 dlm = user_dlm_register_context(domain); 451 dlm = user_dlm_register_context(domain, &proto);
439 if (IS_ERR(dlm)) { 452 if (IS_ERR(dlm)) {
440 status = PTR_ERR(dlm); 453 status = PTR_ERR(dlm);
441 mlog(ML_ERROR, "Error %d could not register domain \"%.*s\"\n", 454 mlog(ML_ERROR, "Error %d could not register domain \"%.*s\"\n",
diff --git a/fs/ocfs2/dlm/userdlm.c b/fs/ocfs2/dlm/userdlm.c
index 7d2f578b267d..4cb1d3dae250 100644
--- a/fs/ocfs2/dlm/userdlm.c
+++ b/fs/ocfs2/dlm/userdlm.c
@@ -645,7 +645,8 @@ bail:
645 return status; 645 return status;
646} 646}
647 647
648struct dlm_ctxt *user_dlm_register_context(struct qstr *name) 648struct dlm_ctxt *user_dlm_register_context(struct qstr *name,
649 struct dlm_protocol_version *proto)
649{ 650{
650 struct dlm_ctxt *dlm; 651 struct dlm_ctxt *dlm;
651 u32 dlm_key; 652 u32 dlm_key;
@@ -661,7 +662,7 @@ struct dlm_ctxt *user_dlm_register_context(struct qstr *name)
661 662
662 snprintf(domain, name->len + 1, "%.*s", name->len, name->name); 663 snprintf(domain, name->len + 1, "%.*s", name->len, name->name);
663 664
664 dlm = dlm_register_domain(domain, dlm_key); 665 dlm = dlm_register_domain(domain, dlm_key, proto);
665 if (IS_ERR(dlm)) 666 if (IS_ERR(dlm))
666 mlog_errno(PTR_ERR(dlm)); 667 mlog_errno(PTR_ERR(dlm));
667 668
diff --git a/fs/ocfs2/dlm/userdlm.h b/fs/ocfs2/dlm/userdlm.h
index c400e93bbf79..39ec27738499 100644
--- a/fs/ocfs2/dlm/userdlm.h
+++ b/fs/ocfs2/dlm/userdlm.h
@@ -83,7 +83,8 @@ void user_dlm_write_lvb(struct inode *inode,
83void user_dlm_read_lvb(struct inode *inode, 83void user_dlm_read_lvb(struct inode *inode,
84 char *val, 84 char *val,
85 unsigned int len); 85 unsigned int len);
86struct dlm_ctxt *user_dlm_register_context(struct qstr *name); 86struct dlm_ctxt *user_dlm_register_context(struct qstr *name,
87 struct dlm_protocol_version *proto);
87void user_dlm_unregister_context(struct dlm_ctxt *dlm); 88void user_dlm_unregister_context(struct dlm_ctxt *dlm);
88 89
89struct dlmfs_inode_private { 90struct dlmfs_inode_private {
diff --git a/fs/ocfs2/dlmglue.c b/fs/ocfs2/dlmglue.c
index 3867244fb144..351130c9b734 100644
--- a/fs/ocfs2/dlmglue.c
+++ b/fs/ocfs2/dlmglue.c
@@ -43,6 +43,7 @@
43#include <cluster/masklog.h> 43#include <cluster/masklog.h>
44 44
45#include "ocfs2.h" 45#include "ocfs2.h"
46#include "ocfs2_lockingver.h"
46 47
47#include "alloc.h" 48#include "alloc.h"
48#include "dcache.h" 49#include "dcache.h"
@@ -258,6 +259,31 @@ static struct ocfs2_lock_res_ops ocfs2_flock_lops = {
258 .flags = 0, 259 .flags = 0,
259}; 260};
260 261
262/*
263 * This is the filesystem locking protocol version.
264 *
265 * Whenever the filesystem does new things with locks (adds or removes a
266 * lock, orders them differently, does different things underneath a lock),
267 * the version must be changed. The protocol is negotiated when joining
268 * the dlm domain. A node may join the domain if its major version is
269 * identical to all other nodes and its minor version is greater than
270 * or equal to all other nodes. When its minor version is greater than
271 * the other nodes, it will run at the minor version specified by the
272 * other nodes.
273 *
274 * If a locking change is made that will not be compatible with older
275 * versions, the major number must be increased and the minor version set
276 * to zero. If a change merely adds a behavior that can be disabled when
277 * speaking to older versions, the minor version must be increased. If a
278 * change adds a fully backwards compatible change (eg, LVB changes that
279 * are just ignored by older versions), the version does not need to be
280 * updated.
281 */
282const struct dlm_protocol_version ocfs2_locking_protocol = {
283 .pv_major = OCFS2_LOCKING_PROTOCOL_MAJOR,
284 .pv_minor = OCFS2_LOCKING_PROTOCOL_MINOR,
285};
286
261static inline int ocfs2_is_inode_lock(struct ocfs2_lock_res *lockres) 287static inline int ocfs2_is_inode_lock(struct ocfs2_lock_res *lockres)
262{ 288{
263 return lockres->l_type == OCFS2_LOCK_TYPE_META || 289 return lockres->l_type == OCFS2_LOCK_TYPE_META ||
@@ -2506,7 +2532,8 @@ int ocfs2_dlm_init(struct ocfs2_super *osb)
2506 dlm_key = crc32_le(0, osb->uuid_str, strlen(osb->uuid_str)); 2532 dlm_key = crc32_le(0, osb->uuid_str, strlen(osb->uuid_str));
2507 2533
2508 /* for now, uuid == domain */ 2534 /* for now, uuid == domain */
2509 dlm = dlm_register_domain(osb->uuid_str, dlm_key); 2535 dlm = dlm_register_domain(osb->uuid_str, dlm_key,
2536 &osb->osb_locking_proto);
2510 if (IS_ERR(dlm)) { 2537 if (IS_ERR(dlm)) {
2511 status = PTR_ERR(dlm); 2538 status = PTR_ERR(dlm);
2512 mlog_errno(status); 2539 mlog_errno(status);
diff --git a/fs/ocfs2/dlmglue.h b/fs/ocfs2/dlmglue.h
index 5f17243ba501..1d5b0699d0a9 100644
--- a/fs/ocfs2/dlmglue.h
+++ b/fs/ocfs2/dlmglue.h
@@ -116,4 +116,5 @@ void ocfs2_wake_downconvert_thread(struct ocfs2_super *osb);
116struct ocfs2_dlm_debug *ocfs2_new_dlm_debug(void); 116struct ocfs2_dlm_debug *ocfs2_new_dlm_debug(void);
117void ocfs2_put_dlm_debug(struct ocfs2_dlm_debug *dlm_debug); 117void ocfs2_put_dlm_debug(struct ocfs2_dlm_debug *dlm_debug);
118 118
119extern const struct dlm_protocol_version ocfs2_locking_protocol;
119#endif /* DLMGLUE_H */ 120#endif /* DLMGLUE_H */
diff --git a/fs/ocfs2/endian.h b/fs/ocfs2/endian.h
deleted file mode 100644
index 1942e09f6ee5..000000000000
--- a/fs/ocfs2/endian.h
+++ /dev/null
@@ -1,45 +0,0 @@
1/* -*- mode: c; c-basic-offset: 8; -*-
2 * vim: noexpandtab sw=8 ts=8 sts=0:
3 *
4 * Copyright (C) 2005 Oracle. All rights reserved.
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public
17 * License along with this program; if not, write to the
18 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
19 * Boston, MA 021110-1307, USA.
20 */
21
22#ifndef OCFS2_ENDIAN_H
23#define OCFS2_ENDIAN_H
24
25static inline void le16_add_cpu(__le16 *var, u16 val)
26{
27 *var = cpu_to_le16(le16_to_cpu(*var) + val);
28}
29
30static inline void le32_add_cpu(__le32 *var, u32 val)
31{
32 *var = cpu_to_le32(le32_to_cpu(*var) + val);
33}
34
35static inline void le64_add_cpu(__le64 *var, u64 val)
36{
37 *var = cpu_to_le64(le64_to_cpu(*var) + val);
38}
39
40static inline void be32_add_cpu(__be32 *var, u32 val)
41{
42 *var = cpu_to_be32(be32_to_cpu(*var) + val);
43}
44
45#endif /* OCFS2_ENDIAN_H */
diff --git a/fs/ocfs2/ocfs2.h b/fs/ocfs2/ocfs2.h
index d08480580470..6546cef212e3 100644
--- a/fs/ocfs2/ocfs2.h
+++ b/fs/ocfs2/ocfs2.h
@@ -43,7 +43,6 @@
43#include "dlm/dlmapi.h" 43#include "dlm/dlmapi.h"
44 44
45#include "ocfs2_fs.h" 45#include "ocfs2_fs.h"
46#include "endian.h"
47#include "ocfs2_lockid.h" 46#include "ocfs2_lockid.h"
48 47
49/* Most user visible OCFS2 inodes will have very few pieces of 48/* Most user visible OCFS2 inodes will have very few pieces of
@@ -251,6 +250,7 @@ struct ocfs2_super
251 struct ocfs2_lock_res osb_rename_lockres; 250 struct ocfs2_lock_res osb_rename_lockres;
252 struct dlm_eviction_cb osb_eviction_cb; 251 struct dlm_eviction_cb osb_eviction_cb;
253 struct ocfs2_dlm_debug *osb_dlm_debug; 252 struct ocfs2_dlm_debug *osb_dlm_debug;
253 struct dlm_protocol_version osb_locking_proto;
254 254
255 struct dentry *osb_debug_root; 255 struct dentry *osb_debug_root;
256 256
diff --git a/fs/ocfs2/ocfs2_lockingver.h b/fs/ocfs2/ocfs2_lockingver.h
new file mode 100644
index 000000000000..82d5eeac0fff
--- /dev/null
+++ b/fs/ocfs2/ocfs2_lockingver.h
@@ -0,0 +1,30 @@
1/* -*- mode: c; c-basic-offset: 8; -*-
2 * vim: noexpandtab sw=8 ts=8 sts=0:
3 *
4 * ocfs2_lockingver.h
5 *
6 * Defines OCFS2 Locking version values.
7 *
8 * Copyright (C) 2008 Oracle. All rights reserved.
9 *
10 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public
12 * License, version 2, as published by the Free Software Foundation.
13 *
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * General Public License for more details.
18 */
19
20#ifndef OCFS2_LOCKINGVER_H
21#define OCFS2_LOCKINGVER_H
22
23/*
24 * The protocol version for ocfs2 cluster locking. See dlmglue.c for
25 * more details.
26 */
27#define OCFS2_LOCKING_PROTOCOL_MAJOR 1
28#define OCFS2_LOCKING_PROTOCOL_MINOR 0
29
30#endif /* OCFS2_LOCKINGVER_H */
diff --git a/fs/ocfs2/super.c b/fs/ocfs2/super.c
index 01fe40ee5ea9..bec75aff3d9f 100644
--- a/fs/ocfs2/super.c
+++ b/fs/ocfs2/super.c
@@ -1355,6 +1355,7 @@ static int ocfs2_initialize_super(struct super_block *sb,
1355 sb->s_fs_info = osb; 1355 sb->s_fs_info = osb;
1356 sb->s_op = &ocfs2_sops; 1356 sb->s_op = &ocfs2_sops;
1357 sb->s_export_op = &ocfs2_export_ops; 1357 sb->s_export_op = &ocfs2_export_ops;
1358 osb->osb_locking_proto = ocfs2_locking_protocol;
1358 sb->s_time_gran = 1; 1359 sb->s_time_gran = 1;
1359 sb->s_flags |= MS_NOATIME; 1360 sb->s_flags |= MS_NOATIME;
1360 /* this is needed to support O_LARGEFILE */ 1361 /* this is needed to support O_LARGEFILE */
diff --git a/fs/open.c b/fs/open.c
index 4932b4d1da05..54198538b67e 100644
--- a/fs/open.c
+++ b/fs/open.c
@@ -127,10 +127,10 @@ asmlinkage long sys_statfs(const char __user * path, struct statfs __user * buf)
127 error = user_path_walk(path, &nd); 127 error = user_path_walk(path, &nd);
128 if (!error) { 128 if (!error) {
129 struct statfs tmp; 129 struct statfs tmp;
130 error = vfs_statfs_native(nd.dentry, &tmp); 130 error = vfs_statfs_native(nd.path.dentry, &tmp);
131 if (!error && copy_to_user(buf, &tmp, sizeof(tmp))) 131 if (!error && copy_to_user(buf, &tmp, sizeof(tmp)))
132 error = -EFAULT; 132 error = -EFAULT;
133 path_release(&nd); 133 path_put(&nd.path);
134 } 134 }
135 return error; 135 return error;
136} 136}
@@ -146,10 +146,10 @@ asmlinkage long sys_statfs64(const char __user *path, size_t sz, struct statfs64
146 error = user_path_walk(path, &nd); 146 error = user_path_walk(path, &nd);
147 if (!error) { 147 if (!error) {
148 struct statfs64 tmp; 148 struct statfs64 tmp;
149 error = vfs_statfs64(nd.dentry, &tmp); 149 error = vfs_statfs64(nd.path.dentry, &tmp);
150 if (!error && copy_to_user(buf, &tmp, sizeof(tmp))) 150 if (!error && copy_to_user(buf, &tmp, sizeof(tmp)))
151 error = -EFAULT; 151 error = -EFAULT;
152 path_release(&nd); 152 path_put(&nd.path);
153 } 153 }
154 return error; 154 return error;
155} 155}
@@ -233,7 +233,7 @@ static long do_sys_truncate(const char __user * path, loff_t length)
233 error = user_path_walk(path, &nd); 233 error = user_path_walk(path, &nd);
234 if (error) 234 if (error)
235 goto out; 235 goto out;
236 inode = nd.dentry->d_inode; 236 inode = nd.path.dentry->d_inode;
237 237
238 /* For directories it's -EISDIR, for other non-regulars - -EINVAL */ 238 /* For directories it's -EISDIR, for other non-regulars - -EINVAL */
239 error = -EISDIR; 239 error = -EISDIR;
@@ -271,13 +271,13 @@ static long do_sys_truncate(const char __user * path, loff_t length)
271 error = locks_verify_truncate(inode, NULL, length); 271 error = locks_verify_truncate(inode, NULL, length);
272 if (!error) { 272 if (!error) {
273 DQUOT_INIT(inode); 273 DQUOT_INIT(inode);
274 error = do_truncate(nd.dentry, length, 0, NULL); 274 error = do_truncate(nd.path.dentry, length, 0, NULL);
275 } 275 }
276 276
277put_write_and_out: 277put_write_and_out:
278 put_write_access(inode); 278 put_write_access(inode);
279dput_and_out: 279dput_and_out:
280 path_release(&nd); 280 path_put(&nd.path);
281out: 281out:
282 return error; 282 return error;
283} 283}
@@ -455,14 +455,14 @@ asmlinkage long sys_faccessat(int dfd, const char __user *filename, int mode)
455 res = vfs_permission(&nd, mode); 455 res = vfs_permission(&nd, mode);
456 /* SuS v2 requires we report a read only fs too */ 456 /* SuS v2 requires we report a read only fs too */
457 if(res || !(mode & S_IWOTH) || 457 if(res || !(mode & S_IWOTH) ||
458 special_file(nd.dentry->d_inode->i_mode)) 458 special_file(nd.path.dentry->d_inode->i_mode))
459 goto out_path_release; 459 goto out_path_release;
460 460
461 if(IS_RDONLY(nd.dentry->d_inode)) 461 if(IS_RDONLY(nd.path.dentry->d_inode))
462 res = -EROFS; 462 res = -EROFS;
463 463
464out_path_release: 464out_path_release:
465 path_release(&nd); 465 path_put(&nd.path);
466out: 466out:
467 current->fsuid = old_fsuid; 467 current->fsuid = old_fsuid;
468 current->fsgid = old_fsgid; 468 current->fsgid = old_fsgid;
@@ -490,10 +490,10 @@ asmlinkage long sys_chdir(const char __user * filename)
490 if (error) 490 if (error)
491 goto dput_and_out; 491 goto dput_and_out;
492 492
493 set_fs_pwd(current->fs, nd.mnt, nd.dentry); 493 set_fs_pwd(current->fs, &nd.path);
494 494
495dput_and_out: 495dput_and_out:
496 path_release(&nd); 496 path_put(&nd.path);
497out: 497out:
498 return error; 498 return error;
499} 499}
@@ -501,9 +501,7 @@ out:
501asmlinkage long sys_fchdir(unsigned int fd) 501asmlinkage long sys_fchdir(unsigned int fd)
502{ 502{
503 struct file *file; 503 struct file *file;
504 struct dentry *dentry;
505 struct inode *inode; 504 struct inode *inode;
506 struct vfsmount *mnt;
507 int error; 505 int error;
508 506
509 error = -EBADF; 507 error = -EBADF;
@@ -511,9 +509,7 @@ asmlinkage long sys_fchdir(unsigned int fd)
511 if (!file) 509 if (!file)
512 goto out; 510 goto out;
513 511
514 dentry = file->f_path.dentry; 512 inode = file->f_path.dentry->d_inode;
515 mnt = file->f_path.mnt;
516 inode = dentry->d_inode;
517 513
518 error = -ENOTDIR; 514 error = -ENOTDIR;
519 if (!S_ISDIR(inode->i_mode)) 515 if (!S_ISDIR(inode->i_mode))
@@ -521,7 +517,7 @@ asmlinkage long sys_fchdir(unsigned int fd)
521 517
522 error = file_permission(file, MAY_EXEC); 518 error = file_permission(file, MAY_EXEC);
523 if (!error) 519 if (!error)
524 set_fs_pwd(current->fs, mnt, dentry); 520 set_fs_pwd(current->fs, &file->f_path);
525out_putf: 521out_putf:
526 fput(file); 522 fput(file);
527out: 523out:
@@ -545,11 +541,11 @@ asmlinkage long sys_chroot(const char __user * filename)
545 if (!capable(CAP_SYS_CHROOT)) 541 if (!capable(CAP_SYS_CHROOT))
546 goto dput_and_out; 542 goto dput_and_out;
547 543
548 set_fs_root(current->fs, nd.mnt, nd.dentry); 544 set_fs_root(current->fs, &nd.path);
549 set_fs_altroot(); 545 set_fs_altroot();
550 error = 0; 546 error = 0;
551dput_and_out: 547dput_and_out:
552 path_release(&nd); 548 path_put(&nd.path);
553out: 549out:
554 return error; 550 return error;
555} 551}
@@ -602,7 +598,7 @@ asmlinkage long sys_fchmodat(int dfd, const char __user *filename,
602 error = __user_walk_fd(dfd, filename, LOOKUP_FOLLOW, &nd); 598 error = __user_walk_fd(dfd, filename, LOOKUP_FOLLOW, &nd);
603 if (error) 599 if (error)
604 goto out; 600 goto out;
605 inode = nd.dentry->d_inode; 601 inode = nd.path.dentry->d_inode;
606 602
607 error = -EROFS; 603 error = -EROFS;
608 if (IS_RDONLY(inode)) 604 if (IS_RDONLY(inode))
@@ -617,11 +613,11 @@ asmlinkage long sys_fchmodat(int dfd, const char __user *filename,
617 mode = inode->i_mode; 613 mode = inode->i_mode;
618 newattrs.ia_mode = (mode & S_IALLUGO) | (inode->i_mode & ~S_IALLUGO); 614 newattrs.ia_mode = (mode & S_IALLUGO) | (inode->i_mode & ~S_IALLUGO);
619 newattrs.ia_valid = ATTR_MODE | ATTR_CTIME; 615 newattrs.ia_valid = ATTR_MODE | ATTR_CTIME;
620 error = notify_change(nd.dentry, &newattrs); 616 error = notify_change(nd.path.dentry, &newattrs);
621 mutex_unlock(&inode->i_mutex); 617 mutex_unlock(&inode->i_mutex);
622 618
623dput_and_out: 619dput_and_out:
624 path_release(&nd); 620 path_put(&nd.path);
625out: 621out:
626 return error; 622 return error;
627} 623}
@@ -675,8 +671,8 @@ asmlinkage long sys_chown(const char __user * filename, uid_t user, gid_t group)
675 error = user_path_walk(filename, &nd); 671 error = user_path_walk(filename, &nd);
676 if (error) 672 if (error)
677 goto out; 673 goto out;
678 error = chown_common(nd.dentry, user, group); 674 error = chown_common(nd.path.dentry, user, group);
679 path_release(&nd); 675 path_put(&nd.path);
680out: 676out:
681 return error; 677 return error;
682} 678}
@@ -695,8 +691,8 @@ asmlinkage long sys_fchownat(int dfd, const char __user *filename, uid_t user,
695 error = __user_walk_fd(dfd, filename, follow, &nd); 691 error = __user_walk_fd(dfd, filename, follow, &nd);
696 if (error) 692 if (error)
697 goto out; 693 goto out;
698 error = chown_common(nd.dentry, user, group); 694 error = chown_common(nd.path.dentry, user, group);
699 path_release(&nd); 695 path_put(&nd.path);
700out: 696out:
701 return error; 697 return error;
702} 698}
@@ -709,8 +705,8 @@ asmlinkage long sys_lchown(const char __user * filename, uid_t user, gid_t group
709 error = user_path_walk_link(filename, &nd); 705 error = user_path_walk_link(filename, &nd);
710 if (error) 706 if (error)
711 goto out; 707 goto out;
712 error = chown_common(nd.dentry, user, group); 708 error = chown_common(nd.path.dentry, user, group);
713 path_release(&nd); 709 path_put(&nd.path);
714out: 710out:
715 return error; 711 return error;
716} 712}
@@ -863,7 +859,7 @@ struct file *lookup_instantiate_filp(struct nameidata *nd, struct dentry *dentry
863 goto out; 859 goto out;
864 if (IS_ERR(dentry)) 860 if (IS_ERR(dentry))
865 goto out_err; 861 goto out_err;
866 nd->intent.open.file = __dentry_open(dget(dentry), mntget(nd->mnt), 862 nd->intent.open.file = __dentry_open(dget(dentry), mntget(nd->path.mnt),
867 nd->intent.open.flags - 1, 863 nd->intent.open.flags - 1,
868 nd->intent.open.file, 864 nd->intent.open.file,
869 open); 865 open);
@@ -891,9 +887,10 @@ struct file *nameidata_to_filp(struct nameidata *nd, int flags)
891 filp = nd->intent.open.file; 887 filp = nd->intent.open.file;
892 /* Has the filesystem initialised the file for us? */ 888 /* Has the filesystem initialised the file for us? */
893 if (filp->f_path.dentry == NULL) 889 if (filp->f_path.dentry == NULL)
894 filp = __dentry_open(nd->dentry, nd->mnt, flags, filp, NULL); 890 filp = __dentry_open(nd->path.dentry, nd->path.mnt, flags, filp,
891 NULL);
895 else 892 else
896 path_release(nd); 893 path_put(&nd->path);
897 return filp; 894 return filp;
898} 895}
899 896
@@ -991,7 +988,7 @@ static void __put_unused_fd(struct files_struct *files, unsigned int fd)
991 files->next_fd = fd; 988 files->next_fd = fd;
992} 989}
993 990
994void fastcall put_unused_fd(unsigned int fd) 991void put_unused_fd(unsigned int fd)
995{ 992{
996 struct files_struct *files = current->files; 993 struct files_struct *files = current->files;
997 spin_lock(&files->file_lock); 994 spin_lock(&files->file_lock);
@@ -1014,7 +1011,7 @@ EXPORT_SYMBOL(put_unused_fd);
1014 * will follow. 1011 * will follow.
1015 */ 1012 */
1016 1013
1017void fastcall fd_install(unsigned int fd, struct file * file) 1014void fd_install(unsigned int fd, struct file *file)
1018{ 1015{
1019 struct files_struct *files = current->files; 1016 struct files_struct *files = current->files;
1020 struct fdtable *fdt; 1017 struct fdtable *fdt;
@@ -1061,7 +1058,6 @@ asmlinkage long sys_open(const char __user *filename, int flags, int mode)
1061 prevent_tail_call(ret); 1058 prevent_tail_call(ret);
1062 return ret; 1059 return ret;
1063} 1060}
1064EXPORT_UNUSED_SYMBOL_GPL(sys_open); /* To be deleted for 2.6.25 */
1065 1061
1066asmlinkage long sys_openat(int dfd, const char __user *filename, int flags, 1062asmlinkage long sys_openat(int dfd, const char __user *filename, int flags,
1067 int mode) 1063 int mode)
diff --git a/fs/openpromfs/inode.c b/fs/openpromfs/inode.c
index 6b7ff1618945..d17b4fd204e1 100644
--- a/fs/openpromfs/inode.c
+++ b/fs/openpromfs/inode.c
@@ -38,6 +38,8 @@ struct op_inode_info {
38 union op_inode_data u; 38 union op_inode_data u;
39}; 39};
40 40
41static struct inode *openprom_iget(struct super_block *sb, ino_t ino);
42
41static inline struct op_inode_info *OP_I(struct inode *inode) 43static inline struct op_inode_info *OP_I(struct inode *inode)
42{ 44{
43 return container_of(inode, struct op_inode_info, vfs_inode); 45 return container_of(inode, struct op_inode_info, vfs_inode);
@@ -226,10 +228,10 @@ static struct dentry *openpromfs_lookup(struct inode *dir, struct dentry *dentry
226 return ERR_PTR(-ENOENT); 228 return ERR_PTR(-ENOENT);
227 229
228found: 230found:
229 inode = iget(dir->i_sb, ino); 231 inode = openprom_iget(dir->i_sb, ino);
230 mutex_unlock(&op_mutex); 232 mutex_unlock(&op_mutex);
231 if (!inode) 233 if (IS_ERR(inode))
232 return ERR_PTR(-EINVAL); 234 return ERR_CAST(inode);
233 ent_oi = OP_I(inode); 235 ent_oi = OP_I(inode);
234 ent_oi->type = ent_type; 236 ent_oi->type = ent_type;
235 ent_oi->u = ent_data; 237 ent_oi->u = ent_data;
@@ -348,14 +350,23 @@ static void openprom_destroy_inode(struct inode *inode)
348 kmem_cache_free(op_inode_cachep, OP_I(inode)); 350 kmem_cache_free(op_inode_cachep, OP_I(inode));
349} 351}
350 352
351static void openprom_read_inode(struct inode * inode) 353static struct inode *openprom_iget(struct super_block *sb, ino_t ino)
352{ 354{
353 inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME; 355 struct inode *inode;
354 if (inode->i_ino == OPENPROM_ROOT_INO) { 356
355 inode->i_op = &openprom_inode_operations; 357 inode = iget_locked(sb, ino);
356 inode->i_fop = &openprom_operations; 358 if (!inode)
357 inode->i_mode = S_IFDIR | S_IRUGO | S_IXUGO; 359 return ERR_PTR(-ENOMEM);
360 if (inode->i_state & I_NEW) {
361 inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME;
362 if (inode->i_ino == OPENPROM_ROOT_INO) {
363 inode->i_op = &openprom_inode_operations;
364 inode->i_fop = &openprom_operations;
365 inode->i_mode = S_IFDIR | S_IRUGO | S_IXUGO;
366 }
367 unlock_new_inode(inode);
358 } 368 }
369 return inode;
359} 370}
360 371
361static int openprom_remount(struct super_block *sb, int *flags, char *data) 372static int openprom_remount(struct super_block *sb, int *flags, char *data)
@@ -367,7 +378,6 @@ static int openprom_remount(struct super_block *sb, int *flags, char *data)
367static const struct super_operations openprom_sops = { 378static const struct super_operations openprom_sops = {
368 .alloc_inode = openprom_alloc_inode, 379 .alloc_inode = openprom_alloc_inode,
369 .destroy_inode = openprom_destroy_inode, 380 .destroy_inode = openprom_destroy_inode,
370 .read_inode = openprom_read_inode,
371 .statfs = simple_statfs, 381 .statfs = simple_statfs,
372 .remount_fs = openprom_remount, 382 .remount_fs = openprom_remount,
373}; 383};
@@ -376,6 +386,7 @@ static int openprom_fill_super(struct super_block *s, void *data, int silent)
376{ 386{
377 struct inode *root_inode; 387 struct inode *root_inode;
378 struct op_inode_info *oi; 388 struct op_inode_info *oi;
389 int ret;
379 390
380 s->s_flags |= MS_NOATIME; 391 s->s_flags |= MS_NOATIME;
381 s->s_blocksize = 1024; 392 s->s_blocksize = 1024;
@@ -383,9 +394,11 @@ static int openprom_fill_super(struct super_block *s, void *data, int silent)
383 s->s_magic = OPENPROM_SUPER_MAGIC; 394 s->s_magic = OPENPROM_SUPER_MAGIC;
384 s->s_op = &openprom_sops; 395 s->s_op = &openprom_sops;
385 s->s_time_gran = 1; 396 s->s_time_gran = 1;
386 root_inode = iget(s, OPENPROM_ROOT_INO); 397 root_inode = openprom_iget(s, OPENPROM_ROOT_INO);
387 if (!root_inode) 398 if (IS_ERR(root_inode)) {
399 ret = PTR_ERR(root_inode);
388 goto out_no_root; 400 goto out_no_root;
401 }
389 402
390 oi = OP_I(root_inode); 403 oi = OP_I(root_inode);
391 oi->type = op_inode_node; 404 oi->type = op_inode_node;
@@ -393,13 +406,15 @@ static int openprom_fill_super(struct super_block *s, void *data, int silent)
393 406
394 s->s_root = d_alloc_root(root_inode); 407 s->s_root = d_alloc_root(root_inode);
395 if (!s->s_root) 408 if (!s->s_root)
396 goto out_no_root; 409 goto out_no_root_dentry;
397 return 0; 410 return 0;
398 411
412out_no_root_dentry:
413 iput(root_inode);
414 ret = -ENOMEM;
399out_no_root: 415out_no_root:
400 printk("openprom_fill_super: get root inode failed\n"); 416 printk("openprom_fill_super: get root inode failed\n");
401 iput(root_inode); 417 return ret;
402 return -ENOMEM;
403} 418}
404 419
405static int openprom_get_sb(struct file_system_type *fs_type, 420static int openprom_get_sb(struct file_system_type *fs_type,
diff --git a/fs/partitions/Kconfig b/fs/partitions/Kconfig
index a99acd8de353..cb5f0a3f1b03 100644
--- a/fs/partitions/Kconfig
+++ b/fs/partitions/Kconfig
@@ -198,7 +198,7 @@ config LDM_DEBUG
198 198
199config SGI_PARTITION 199config SGI_PARTITION
200 bool "SGI partition support" if PARTITION_ADVANCED 200 bool "SGI partition support" if PARTITION_ADVANCED
201 default y if (SGI_IP22 || SGI_IP27 || ((MACH_JAZZ || SNI_RM) && !CPU_LITTLE_ENDIAN)) 201 default y if DEFAULT_SGI_PARTITION
202 help 202 help
203 Say Y here if you would like to be able to read the hard disk 203 Say Y here if you would like to be able to read the hard disk
204 partition table format used by SGI machines. 204 partition table format used by SGI machines.
diff --git a/fs/partitions/check.c b/fs/partitions/check.c
index 739da701ae7b..03f808c5b79d 100644
--- a/fs/partitions/check.c
+++ b/fs/partitions/check.c
@@ -18,6 +18,7 @@
18#include <linux/fs.h> 18#include <linux/fs.h>
19#include <linux/kmod.h> 19#include <linux/kmod.h>
20#include <linux/ctype.h> 20#include <linux/ctype.h>
21#include <linux/genhd.h>
21 22
22#include "check.h" 23#include "check.h"
23 24
@@ -215,9 +216,25 @@ static ssize_t part_stat_show(struct device *dev,
215{ 216{
216 struct hd_struct *p = dev_to_part(dev); 217 struct hd_struct *p = dev_to_part(dev);
217 218
218 return sprintf(buf, "%8u %8llu %8u %8llu\n", 219 preempt_disable();
219 p->ios[0], (unsigned long long)p->sectors[0], 220 part_round_stats(p);
220 p->ios[1], (unsigned long long)p->sectors[1]); 221 preempt_enable();
222 return sprintf(buf,
223 "%8lu %8lu %8llu %8u "
224 "%8lu %8lu %8llu %8u "
225 "%8u %8u %8u"
226 "\n",
227 part_stat_read(p, ios[READ]),
228 part_stat_read(p, merges[READ]),
229 (unsigned long long)part_stat_read(p, sectors[READ]),
230 jiffies_to_msecs(part_stat_read(p, ticks[READ])),
231 part_stat_read(p, ios[WRITE]),
232 part_stat_read(p, merges[WRITE]),
233 (unsigned long long)part_stat_read(p, sectors[WRITE]),
234 jiffies_to_msecs(part_stat_read(p, ticks[WRITE])),
235 p->in_flight,
236 jiffies_to_msecs(part_stat_read(p, io_ticks)),
237 jiffies_to_msecs(part_stat_read(p, time_in_queue)));
221} 238}
222 239
223#ifdef CONFIG_FAIL_MAKE_REQUEST 240#ifdef CONFIG_FAIL_MAKE_REQUEST
@@ -273,6 +290,7 @@ static struct attribute_group *part_attr_groups[] = {
273static void part_release(struct device *dev) 290static void part_release(struct device *dev)
274{ 291{
275 struct hd_struct *p = dev_to_part(dev); 292 struct hd_struct *p = dev_to_part(dev);
293 free_part_stats(p);
276 kfree(p); 294 kfree(p);
277} 295}
278 296
@@ -312,13 +330,20 @@ void delete_partition(struct gendisk *disk, int part)
312 disk->part[part-1] = NULL; 330 disk->part[part-1] = NULL;
313 p->start_sect = 0; 331 p->start_sect = 0;
314 p->nr_sects = 0; 332 p->nr_sects = 0;
315 p->ios[0] = p->ios[1] = 0; 333 part_stat_set_all(p, 0);
316 p->sectors[0] = p->sectors[1] = 0;
317 kobject_put(p->holder_dir); 334 kobject_put(p->holder_dir);
318 device_del(&p->dev); 335 device_del(&p->dev);
319 put_device(&p->dev); 336 put_device(&p->dev);
320} 337}
321 338
339static ssize_t whole_disk_show(struct device *dev,
340 struct device_attribute *attr, char *buf)
341{
342 return 0;
343}
344static DEVICE_ATTR(whole_disk, S_IRUSR | S_IRGRP | S_IROTH,
345 whole_disk_show, NULL);
346
322void add_partition(struct gendisk *disk, int part, sector_t start, sector_t len, int flags) 347void add_partition(struct gendisk *disk, int part, sector_t start, sector_t len, int flags)
323{ 348{
324 struct hd_struct *p; 349 struct hd_struct *p;
@@ -328,6 +353,10 @@ void add_partition(struct gendisk *disk, int part, sector_t start, sector_t len,
328 if (!p) 353 if (!p)
329 return; 354 return;
330 355
356 if (!init_part_stats(p)) {
357 kfree(p);
358 return;
359 }
331 p->start_sect = start; 360 p->start_sect = start;
332 p->nr_sects = len; 361 p->nr_sects = len;
333 p->partno = part; 362 p->partno = part;
@@ -352,13 +381,8 @@ void add_partition(struct gendisk *disk, int part, sector_t start, sector_t len,
352 device_add(&p->dev); 381 device_add(&p->dev);
353 partition_sysfs_add_subdir(p); 382 partition_sysfs_add_subdir(p);
354 p->dev.uevent_suppress = 0; 383 p->dev.uevent_suppress = 0;
355 if (flags & ADDPART_FLAG_WHOLEDISK) { 384 if (flags & ADDPART_FLAG_WHOLEDISK)
356 static struct attribute addpartattr = { 385 err = device_create_file(&p->dev, &dev_attr_whole_disk);
357 .name = "whole_disk",
358 .mode = S_IRUSR | S_IRGRP | S_IROTH,
359 };
360 err = sysfs_create_file(&p->dev.kobj, &addpartattr);
361 }
362 386
363 /* suppress uevent if the disk supresses it */ 387 /* suppress uevent if the disk supresses it */
364 if (!disk->dev.uevent_suppress) 388 if (!disk->dev.uevent_suppress)
diff --git a/fs/pipe.c b/fs/pipe.c
index e66ec48e95d8..3c185b6527bc 100644
--- a/fs/pipe.c
+++ b/fs/pipe.c
@@ -171,7 +171,7 @@ static void anon_pipe_buf_release(struct pipe_inode_info *pipe,
171 * 171 *
172 * Description: 172 * Description:
173 * This function returns a kernel virtual address mapping for the 173 * This function returns a kernel virtual address mapping for the
174 * passed in @pipe_buffer. If @atomic is set, an atomic map is provided 174 * pipe_buffer passed in @buf. If @atomic is set, an atomic map is provided
175 * and the caller has to be careful not to fault before calling 175 * and the caller has to be careful not to fault before calling
176 * the unmap function. 176 * the unmap function.
177 * 177 *
@@ -208,15 +208,15 @@ void generic_pipe_buf_unmap(struct pipe_inode_info *pipe,
208} 208}
209 209
210/** 210/**
211 * generic_pipe_buf_steal - attempt to take ownership of a @pipe_buffer 211 * generic_pipe_buf_steal - attempt to take ownership of a &pipe_buffer
212 * @pipe: the pipe that the buffer belongs to 212 * @pipe: the pipe that the buffer belongs to
213 * @buf: the buffer to attempt to steal 213 * @buf: the buffer to attempt to steal
214 * 214 *
215 * Description: 215 * Description:
216 * This function attempts to steal the @struct page attached to 216 * This function attempts to steal the &struct page attached to
217 * @buf. If successful, this function returns 0 and returns with 217 * @buf. If successful, this function returns 0 and returns with
218 * the page locked. The caller may then reuse the page for whatever 218 * the page locked. The caller may then reuse the page for whatever
219 * he wishes, the typical use is insertion into a different file 219 * he wishes; the typical use is insertion into a different file
220 * page cache. 220 * page cache.
221 */ 221 */
222int generic_pipe_buf_steal(struct pipe_inode_info *pipe, 222int generic_pipe_buf_steal(struct pipe_inode_info *pipe,
@@ -238,7 +238,7 @@ int generic_pipe_buf_steal(struct pipe_inode_info *pipe,
238} 238}
239 239
240/** 240/**
241 * generic_pipe_buf_get - get a reference to a @struct pipe_buffer 241 * generic_pipe_buf_get - get a reference to a &struct pipe_buffer
242 * @pipe: the pipe that the buffer belongs to 242 * @pipe: the pipe that the buffer belongs to
243 * @buf: the buffer to get a reference to 243 * @buf: the buffer to get a reference to
244 * 244 *
@@ -576,9 +576,7 @@ bad_pipe_w(struct file *filp, const char __user *buf, size_t count,
576 return -EBADF; 576 return -EBADF;
577} 577}
578 578
579static int 579static long pipe_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
580pipe_ioctl(struct inode *pino, struct file *filp,
581 unsigned int cmd, unsigned long arg)
582{ 580{
583 struct inode *inode = filp->f_path.dentry->d_inode; 581 struct inode *inode = filp->f_path.dentry->d_inode;
584 struct pipe_inode_info *pipe; 582 struct pipe_inode_info *pipe;
@@ -785,7 +783,7 @@ const struct file_operations read_fifo_fops = {
785 .aio_read = pipe_read, 783 .aio_read = pipe_read,
786 .write = bad_pipe_w, 784 .write = bad_pipe_w,
787 .poll = pipe_poll, 785 .poll = pipe_poll,
788 .ioctl = pipe_ioctl, 786 .unlocked_ioctl = pipe_ioctl,
789 .open = pipe_read_open, 787 .open = pipe_read_open,
790 .release = pipe_read_release, 788 .release = pipe_read_release,
791 .fasync = pipe_read_fasync, 789 .fasync = pipe_read_fasync,
@@ -797,7 +795,7 @@ const struct file_operations write_fifo_fops = {
797 .write = do_sync_write, 795 .write = do_sync_write,
798 .aio_write = pipe_write, 796 .aio_write = pipe_write,
799 .poll = pipe_poll, 797 .poll = pipe_poll,
800 .ioctl = pipe_ioctl, 798 .unlocked_ioctl = pipe_ioctl,
801 .open = pipe_write_open, 799 .open = pipe_write_open,
802 .release = pipe_write_release, 800 .release = pipe_write_release,
803 .fasync = pipe_write_fasync, 801 .fasync = pipe_write_fasync,
@@ -810,7 +808,7 @@ const struct file_operations rdwr_fifo_fops = {
810 .write = do_sync_write, 808 .write = do_sync_write,
811 .aio_write = pipe_write, 809 .aio_write = pipe_write,
812 .poll = pipe_poll, 810 .poll = pipe_poll,
813 .ioctl = pipe_ioctl, 811 .unlocked_ioctl = pipe_ioctl,
814 .open = pipe_rdwr_open, 812 .open = pipe_rdwr_open,
815 .release = pipe_rdwr_release, 813 .release = pipe_rdwr_release,
816 .fasync = pipe_rdwr_fasync, 814 .fasync = pipe_rdwr_fasync,
@@ -822,7 +820,7 @@ static const struct file_operations read_pipe_fops = {
822 .aio_read = pipe_read, 820 .aio_read = pipe_read,
823 .write = bad_pipe_w, 821 .write = bad_pipe_w,
824 .poll = pipe_poll, 822 .poll = pipe_poll,
825 .ioctl = pipe_ioctl, 823 .unlocked_ioctl = pipe_ioctl,
826 .open = pipe_read_open, 824 .open = pipe_read_open,
827 .release = pipe_read_release, 825 .release = pipe_read_release,
828 .fasync = pipe_read_fasync, 826 .fasync = pipe_read_fasync,
@@ -834,7 +832,7 @@ static const struct file_operations write_pipe_fops = {
834 .write = do_sync_write, 832 .write = do_sync_write,
835 .aio_write = pipe_write, 833 .aio_write = pipe_write,
836 .poll = pipe_poll, 834 .poll = pipe_poll,
837 .ioctl = pipe_ioctl, 835 .unlocked_ioctl = pipe_ioctl,
838 .open = pipe_write_open, 836 .open = pipe_write_open,
839 .release = pipe_write_release, 837 .release = pipe_write_release,
840 .fasync = pipe_write_fasync, 838 .fasync = pipe_write_fasync,
@@ -847,7 +845,7 @@ static const struct file_operations rdwr_pipe_fops = {
847 .write = do_sync_write, 845 .write = do_sync_write,
848 .aio_write = pipe_write, 846 .aio_write = pipe_write,
849 .poll = pipe_poll, 847 .poll = pipe_poll,
850 .ioctl = pipe_ioctl, 848 .unlocked_ioctl = pipe_ioctl,
851 .open = pipe_rdwr_open, 849 .open = pipe_rdwr_open,
852 .release = pipe_rdwr_release, 850 .release = pipe_rdwr_release,
853 .fasync = pipe_rdwr_fasync, 851 .fasync = pipe_rdwr_fasync,
diff --git a/fs/pnode.c b/fs/pnode.c
index 89940f243fc2..05ba692bc540 100644
--- a/fs/pnode.c
+++ b/fs/pnode.c
@@ -83,6 +83,8 @@ void change_mnt_propagation(struct vfsmount *mnt, int type)
83 mnt->mnt_master = NULL; 83 mnt->mnt_master = NULL;
84 if (type == MS_UNBINDABLE) 84 if (type == MS_UNBINDABLE)
85 mnt->mnt_flags |= MNT_UNBINDABLE; 85 mnt->mnt_flags |= MNT_UNBINDABLE;
86 else
87 mnt->mnt_flags &= ~MNT_UNBINDABLE;
86 } 88 }
87} 89}
88 90
diff --git a/fs/proc/array.c b/fs/proc/array.c
index b380313092bd..07d6c4853fe8 100644
--- a/fs/proc/array.c
+++ b/fs/proc/array.c
@@ -77,6 +77,7 @@
77#include <linux/cpuset.h> 77#include <linux/cpuset.h>
78#include <linux/rcupdate.h> 78#include <linux/rcupdate.h>
79#include <linux/delayacct.h> 79#include <linux/delayacct.h>
80#include <linux/seq_file.h>
80#include <linux/pid_namespace.h> 81#include <linux/pid_namespace.h>
81 82
82#include <asm/pgtable.h> 83#include <asm/pgtable.h>
@@ -88,18 +89,21 @@
88do { memcpy(buffer, string, strlen(string)); \ 89do { memcpy(buffer, string, strlen(string)); \
89 buffer += strlen(string); } while (0) 90 buffer += strlen(string); } while (0)
90 91
91static inline char *task_name(struct task_struct *p, char *buf) 92static inline void task_name(struct seq_file *m, struct task_struct *p)
92{ 93{
93 int i; 94 int i;
95 char *buf, *end;
94 char *name; 96 char *name;
95 char tcomm[sizeof(p->comm)]; 97 char tcomm[sizeof(p->comm)];
96 98
97 get_task_comm(tcomm, p); 99 get_task_comm(tcomm, p);
98 100
99 ADDBUF(buf, "Name:\t"); 101 seq_printf(m, "Name:\t");
102 end = m->buf + m->size;
103 buf = m->buf + m->count;
100 name = tcomm; 104 name = tcomm;
101 i = sizeof(tcomm); 105 i = sizeof(tcomm);
102 do { 106 while (i && (buf < end)) {
103 unsigned char c = *name; 107 unsigned char c = *name;
104 name++; 108 name++;
105 i--; 109 i--;
@@ -107,20 +111,21 @@ static inline char *task_name(struct task_struct *p, char *buf)
107 if (!c) 111 if (!c)
108 break; 112 break;
109 if (c == '\\') { 113 if (c == '\\') {
110 buf[1] = c; 114 buf++;
111 buf += 2; 115 if (buf < end)
116 *buf++ = c;
112 continue; 117 continue;
113 } 118 }
114 if (c == '\n') { 119 if (c == '\n') {
115 buf[0] = '\\'; 120 *buf++ = '\\';
116 buf[1] = 'n'; 121 if (buf < end)
117 buf += 2; 122 *buf++ = 'n';
118 continue; 123 continue;
119 } 124 }
120 buf++; 125 buf++;
121 } while (i); 126 }
122 *buf = '\n'; 127 m->count = buf - m->buf;
123 return buf+1; 128 seq_printf(m, "\n");
124} 129}
125 130
126/* 131/*
@@ -151,21 +156,20 @@ static inline const char *get_task_state(struct task_struct *tsk)
151 return *p; 156 return *p;
152} 157}
153 158
154static inline char *task_state(struct task_struct *p, char *buffer) 159static inline void task_state(struct seq_file *m, struct pid_namespace *ns,
160 struct pid *pid, struct task_struct *p)
155{ 161{
156 struct group_info *group_info; 162 struct group_info *group_info;
157 int g; 163 int g;
158 struct fdtable *fdt = NULL; 164 struct fdtable *fdt = NULL;
159 struct pid_namespace *ns;
160 pid_t ppid, tpid; 165 pid_t ppid, tpid;
161 166
162 ns = current->nsproxy->pid_ns;
163 rcu_read_lock(); 167 rcu_read_lock();
164 ppid = pid_alive(p) ? 168 ppid = pid_alive(p) ?
165 task_tgid_nr_ns(rcu_dereference(p->real_parent), ns) : 0; 169 task_tgid_nr_ns(rcu_dereference(p->real_parent), ns) : 0;
166 tpid = pid_alive(p) && p->ptrace ? 170 tpid = pid_alive(p) && p->ptrace ?
167 task_pid_nr_ns(rcu_dereference(p->parent), ns) : 0; 171 task_pid_nr_ns(rcu_dereference(p->parent), ns) : 0;
168 buffer += sprintf(buffer, 172 seq_printf(m,
169 "State:\t%s\n" 173 "State:\t%s\n"
170 "Tgid:\t%d\n" 174 "Tgid:\t%d\n"
171 "Pid:\t%d\n" 175 "Pid:\t%d\n"
@@ -175,7 +179,7 @@ static inline char *task_state(struct task_struct *p, char *buffer)
175 "Gid:\t%d\t%d\t%d\t%d\n", 179 "Gid:\t%d\t%d\t%d\t%d\n",
176 get_task_state(p), 180 get_task_state(p),
177 task_tgid_nr_ns(p, ns), 181 task_tgid_nr_ns(p, ns),
178 task_pid_nr_ns(p, ns), 182 pid_nr_ns(pid, ns),
179 ppid, tpid, 183 ppid, tpid,
180 p->uid, p->euid, p->suid, p->fsuid, 184 p->uid, p->euid, p->suid, p->fsuid,
181 p->gid, p->egid, p->sgid, p->fsgid); 185 p->gid, p->egid, p->sgid, p->fsgid);
@@ -183,7 +187,7 @@ static inline char *task_state(struct task_struct *p, char *buffer)
183 task_lock(p); 187 task_lock(p);
184 if (p->files) 188 if (p->files)
185 fdt = files_fdtable(p->files); 189 fdt = files_fdtable(p->files);
186 buffer += sprintf(buffer, 190 seq_printf(m,
187 "FDSize:\t%d\n" 191 "FDSize:\t%d\n"
188 "Groups:\t", 192 "Groups:\t",
189 fdt ? fdt->max_fds : 0); 193 fdt ? fdt->max_fds : 0);
@@ -194,20 +198,18 @@ static inline char *task_state(struct task_struct *p, char *buffer)
194 task_unlock(p); 198 task_unlock(p);
195 199
196 for (g = 0; g < min(group_info->ngroups, NGROUPS_SMALL); g++) 200 for (g = 0; g < min(group_info->ngroups, NGROUPS_SMALL); g++)
197 buffer += sprintf(buffer, "%d ", GROUP_AT(group_info, g)); 201 seq_printf(m, "%d ", GROUP_AT(group_info, g));
198 put_group_info(group_info); 202 put_group_info(group_info);
199 203
200 buffer += sprintf(buffer, "\n"); 204 seq_printf(m, "\n");
201 return buffer;
202} 205}
203 206
204static char *render_sigset_t(const char *header, sigset_t *set, char *buffer) 207static void render_sigset_t(struct seq_file *m, const char *header,
208 sigset_t *set)
205{ 209{
206 int i, len; 210 int i;
207 211
208 len = strlen(header); 212 seq_printf(m, "%s", header);
209 memcpy(buffer, header, len);
210 buffer += len;
211 213
212 i = _NSIG; 214 i = _NSIG;
213 do { 215 do {
@@ -218,12 +220,10 @@ static char *render_sigset_t(const char *header, sigset_t *set, char *buffer)
218 if (sigismember(set, i+2)) x |= 2; 220 if (sigismember(set, i+2)) x |= 2;
219 if (sigismember(set, i+3)) x |= 4; 221 if (sigismember(set, i+3)) x |= 4;
220 if (sigismember(set, i+4)) x |= 8; 222 if (sigismember(set, i+4)) x |= 8;
221 *buffer++ = (x < 10 ? '0' : 'a' - 10) + x; 223 seq_printf(m, "%x", x);
222 } while (i >= 4); 224 } while (i >= 4);
223 225
224 *buffer++ = '\n'; 226 seq_printf(m, "\n");
225 *buffer = 0;
226 return buffer;
227} 227}
228 228
229static void collect_sigign_sigcatch(struct task_struct *p, sigset_t *ign, 229static void collect_sigign_sigcatch(struct task_struct *p, sigset_t *ign,
@@ -241,7 +241,7 @@ static void collect_sigign_sigcatch(struct task_struct *p, sigset_t *ign,
241 } 241 }
242} 242}
243 243
244static inline char *task_sig(struct task_struct *p, char *buffer) 244static inline void task_sig(struct seq_file *m, struct task_struct *p)
245{ 245{
246 unsigned long flags; 246 unsigned long flags;
247 sigset_t pending, shpending, blocked, ignored, caught; 247 sigset_t pending, shpending, blocked, ignored, caught;
@@ -268,58 +268,66 @@ static inline char *task_sig(struct task_struct *p, char *buffer)
268 } 268 }
269 rcu_read_unlock(); 269 rcu_read_unlock();
270 270
271 buffer += sprintf(buffer, "Threads:\t%d\n", num_threads); 271 seq_printf(m, "Threads:\t%d\n", num_threads);
272 buffer += sprintf(buffer, "SigQ:\t%lu/%lu\n", qsize, qlim); 272 seq_printf(m, "SigQ:\t%lu/%lu\n", qsize, qlim);
273 273
274 /* render them all */ 274 /* render them all */
275 buffer = render_sigset_t("SigPnd:\t", &pending, buffer); 275 render_sigset_t(m, "SigPnd:\t", &pending);
276 buffer = render_sigset_t("ShdPnd:\t", &shpending, buffer); 276 render_sigset_t(m, "ShdPnd:\t", &shpending);
277 buffer = render_sigset_t("SigBlk:\t", &blocked, buffer); 277 render_sigset_t(m, "SigBlk:\t", &blocked);
278 buffer = render_sigset_t("SigIgn:\t", &ignored, buffer); 278 render_sigset_t(m, "SigIgn:\t", &ignored);
279 buffer = render_sigset_t("SigCgt:\t", &caught, buffer); 279 render_sigset_t(m, "SigCgt:\t", &caught);
280}
280 281
281 return buffer; 282static void render_cap_t(struct seq_file *m, const char *header,
283 kernel_cap_t *a)
284{
285 unsigned __capi;
286
287 seq_printf(m, "%s", header);
288 CAP_FOR_EACH_U32(__capi) {
289 seq_printf(m, "%08x",
290 a->cap[(_LINUX_CAPABILITY_U32S-1) - __capi]);
291 }
292 seq_printf(m, "\n");
282} 293}
283 294
284static inline char *task_cap(struct task_struct *p, char *buffer) 295static inline void task_cap(struct seq_file *m, struct task_struct *p)
285{ 296{
286 return buffer + sprintf(buffer, "CapInh:\t%016x\n" 297 render_cap_t(m, "CapInh:\t", &p->cap_inheritable);
287 "CapPrm:\t%016x\n" 298 render_cap_t(m, "CapPrm:\t", &p->cap_permitted);
288 "CapEff:\t%016x\n", 299 render_cap_t(m, "CapEff:\t", &p->cap_effective);
289 cap_t(p->cap_inheritable),
290 cap_t(p->cap_permitted),
291 cap_t(p->cap_effective));
292} 300}
293 301
294static inline char *task_context_switch_counts(struct task_struct *p, 302static inline void task_context_switch_counts(struct seq_file *m,
295 char *buffer) 303 struct task_struct *p)
296{ 304{
297 return buffer + sprintf(buffer, "voluntary_ctxt_switches:\t%lu\n" 305 seq_printf(m, "voluntary_ctxt_switches:\t%lu\n"
298 "nonvoluntary_ctxt_switches:\t%lu\n", 306 "nonvoluntary_ctxt_switches:\t%lu\n",
299 p->nvcsw, 307 p->nvcsw,
300 p->nivcsw); 308 p->nivcsw);
301} 309}
302 310
303int proc_pid_status(struct task_struct *task, char *buffer) 311int proc_pid_status(struct seq_file *m, struct pid_namespace *ns,
312 struct pid *pid, struct task_struct *task)
304{ 313{
305 char *orig = buffer;
306 struct mm_struct *mm = get_task_mm(task); 314 struct mm_struct *mm = get_task_mm(task);
307 315
308 buffer = task_name(task, buffer); 316 task_name(m, task);
309 buffer = task_state(task, buffer); 317 task_state(m, ns, pid, task);
310 318
311 if (mm) { 319 if (mm) {
312 buffer = task_mem(mm, buffer); 320 task_mem(m, mm);
313 mmput(mm); 321 mmput(mm);
314 } 322 }
315 buffer = task_sig(task, buffer); 323 task_sig(m, task);
316 buffer = task_cap(task, buffer); 324 task_cap(m, task);
317 buffer = cpuset_task_status_allowed(task, buffer); 325 cpuset_task_status_allowed(m, task);
318#if defined(CONFIG_S390) 326#if defined(CONFIG_S390)
319 buffer = task_show_regs(task, buffer); 327 task_show_regs(m, task);
320#endif 328#endif
321 buffer = task_context_switch_counts(task, buffer); 329 task_context_switch_counts(m, task);
322 return buffer - orig; 330 return 0;
323} 331}
324 332
325/* 333/*
@@ -381,14 +389,14 @@ static cputime_t task_gtime(struct task_struct *p)
381 return p->gtime; 389 return p->gtime;
382} 390}
383 391
384static int do_task_stat(struct task_struct *task, char *buffer, int whole) 392static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
393 struct pid *pid, struct task_struct *task, int whole)
385{ 394{
386 unsigned long vsize, eip, esp, wchan = ~0UL; 395 unsigned long vsize, eip, esp, wchan = ~0UL;
387 long priority, nice; 396 long priority, nice;
388 int tty_pgrp = -1, tty_nr = 0; 397 int tty_pgrp = -1, tty_nr = 0;
389 sigset_t sigign, sigcatch; 398 sigset_t sigign, sigcatch;
390 char state; 399 char state;
391 int res;
392 pid_t ppid = 0, pgid = -1, sid = -1; 400 pid_t ppid = 0, pgid = -1, sid = -1;
393 int num_threads = 0; 401 int num_threads = 0;
394 struct mm_struct *mm; 402 struct mm_struct *mm;
@@ -400,9 +408,6 @@ static int do_task_stat(struct task_struct *task, char *buffer, int whole)
400 unsigned long rsslim = 0; 408 unsigned long rsslim = 0;
401 char tcomm[sizeof(task->comm)]; 409 char tcomm[sizeof(task->comm)];
402 unsigned long flags; 410 unsigned long flags;
403 struct pid_namespace *ns;
404
405 ns = current->nsproxy->pid_ns;
406 411
407 state = *get_task_state(task); 412 state = *get_task_state(task);
408 vsize = eip = esp = 0; 413 vsize = eip = esp = 0;
@@ -489,10 +494,10 @@ static int do_task_stat(struct task_struct *task, char *buffer, int whole)
489 /* convert nsec -> ticks */ 494 /* convert nsec -> ticks */
490 start_time = nsec_to_clock_t(start_time); 495 start_time = nsec_to_clock_t(start_time);
491 496
492 res = sprintf(buffer, "%d (%s) %c %d %d %d %d %d %u %lu \ 497 seq_printf(m, "%d (%s) %c %d %d %d %d %d %u %lu \
493%lu %lu %lu %lu %lu %ld %ld %ld %ld %d 0 %llu %lu %ld %lu %lu %lu %lu %lu \ 498%lu %lu %lu %lu %lu %ld %ld %ld %ld %d 0 %llu %lu %ld %lu %lu %lu %lu %lu \
494%lu %lu %lu %lu %lu %lu %lu %lu %d %d %u %u %llu %lu %ld\n", 499%lu %lu %lu %lu %lu %lu %lu %lu %d %d %u %u %llu %lu %ld\n",
495 task_pid_nr_ns(task, ns), 500 pid_nr_ns(pid, ns),
496 tcomm, 501 tcomm,
497 state, 502 state,
498 ppid, 503 ppid,
@@ -541,20 +546,23 @@ static int do_task_stat(struct task_struct *task, char *buffer, int whole)
541 cputime_to_clock_t(cgtime)); 546 cputime_to_clock_t(cgtime));
542 if (mm) 547 if (mm)
543 mmput(mm); 548 mmput(mm);
544 return res; 549 return 0;
545} 550}
546 551
547int proc_tid_stat(struct task_struct *task, char *buffer) 552int proc_tid_stat(struct seq_file *m, struct pid_namespace *ns,
553 struct pid *pid, struct task_struct *task)
548{ 554{
549 return do_task_stat(task, buffer, 0); 555 return do_task_stat(m, ns, pid, task, 0);
550} 556}
551 557
552int proc_tgid_stat(struct task_struct *task, char *buffer) 558int proc_tgid_stat(struct seq_file *m, struct pid_namespace *ns,
559 struct pid *pid, struct task_struct *task)
553{ 560{
554 return do_task_stat(task, buffer, 1); 561 return do_task_stat(m, ns, pid, task, 1);
555} 562}
556 563
557int proc_pid_statm(struct task_struct *task, char *buffer) 564int proc_pid_statm(struct seq_file *m, struct pid_namespace *ns,
565 struct pid *pid, struct task_struct *task)
558{ 566{
559 int size = 0, resident = 0, shared = 0, text = 0, lib = 0, data = 0; 567 int size = 0, resident = 0, shared = 0, text = 0, lib = 0, data = 0;
560 struct mm_struct *mm = get_task_mm(task); 568 struct mm_struct *mm = get_task_mm(task);
@@ -563,7 +571,8 @@ int proc_pid_statm(struct task_struct *task, char *buffer)
563 size = task_statm(mm, &shared, &text, &data, &resident); 571 size = task_statm(mm, &shared, &text, &data, &resident);
564 mmput(mm); 572 mmput(mm);
565 } 573 }
574 seq_printf(m, "%d %d %d %d %d %d %d\n",
575 size, resident, shared, text, lib, data, 0);
566 576
567 return sprintf(buffer, "%d %d %d %d %d %d %d\n", 577 return 0;
568 size, resident, shared, text, lib, data, 0);
569} 578}
diff --git a/fs/proc/base.c b/fs/proc/base.c
index 33537487f5ab..88f8edf18258 100644
--- a/fs/proc/base.c
+++ b/fs/proc/base.c
@@ -88,10 +88,6 @@
88 * in /proc for a task before it execs a suid executable. 88 * in /proc for a task before it execs a suid executable.
89 */ 89 */
90 90
91
92/* Worst case buffer size needed for holding an integer. */
93#define PROC_NUMBUF 13
94
95struct pid_entry { 91struct pid_entry {
96 char *name; 92 char *name;
97 int len; 93 int len;
@@ -125,6 +121,10 @@ struct pid_entry {
125 NOD(NAME, (S_IFREG|(MODE)), \ 121 NOD(NAME, (S_IFREG|(MODE)), \
126 NULL, &proc_info_file_operations, \ 122 NULL, &proc_info_file_operations, \
127 { .proc_read = &proc_##OTYPE } ) 123 { .proc_read = &proc_##OTYPE } )
124#define ONE(NAME, MODE, OTYPE) \
125 NOD(NAME, (S_IFREG|(MODE)), \
126 NULL, &proc_single_file_operations, \
127 { .proc_show = &proc_##OTYPE } )
128 128
129int maps_protect; 129int maps_protect;
130EXPORT_SYMBOL(maps_protect); 130EXPORT_SYMBOL(maps_protect);
@@ -153,7 +153,7 @@ static int get_nr_threads(struct task_struct *tsk)
153 return count; 153 return count;
154} 154}
155 155
156static int proc_cwd_link(struct inode *inode, struct dentry **dentry, struct vfsmount **mnt) 156static int proc_cwd_link(struct inode *inode, struct path *path)
157{ 157{
158 struct task_struct *task = get_proc_task(inode); 158 struct task_struct *task = get_proc_task(inode);
159 struct fs_struct *fs = NULL; 159 struct fs_struct *fs = NULL;
@@ -165,8 +165,8 @@ static int proc_cwd_link(struct inode *inode, struct dentry **dentry, struct vfs
165 } 165 }
166 if (fs) { 166 if (fs) {
167 read_lock(&fs->lock); 167 read_lock(&fs->lock);
168 *mnt = mntget(fs->pwdmnt); 168 *path = fs->pwd;
169 *dentry = dget(fs->pwd); 169 path_get(&fs->pwd);
170 read_unlock(&fs->lock); 170 read_unlock(&fs->lock);
171 result = 0; 171 result = 0;
172 put_fs_struct(fs); 172 put_fs_struct(fs);
@@ -174,7 +174,7 @@ static int proc_cwd_link(struct inode *inode, struct dentry **dentry, struct vfs
174 return result; 174 return result;
175} 175}
176 176
177static int proc_root_link(struct inode *inode, struct dentry **dentry, struct vfsmount **mnt) 177static int proc_root_link(struct inode *inode, struct path *path)
178{ 178{
179 struct task_struct *task = get_proc_task(inode); 179 struct task_struct *task = get_proc_task(inode);
180 struct fs_struct *fs = NULL; 180 struct fs_struct *fs = NULL;
@@ -186,8 +186,8 @@ static int proc_root_link(struct inode *inode, struct dentry **dentry, struct vf
186 } 186 }
187 if (fs) { 187 if (fs) {
188 read_lock(&fs->lock); 188 read_lock(&fs->lock);
189 *mnt = mntget(fs->rootmnt); 189 *path = fs->root;
190 *dentry = dget(fs->root); 190 path_get(&fs->root);
191 read_unlock(&fs->lock); 191 read_unlock(&fs->lock);
192 result = 0; 192 result = 0;
193 put_fs_struct(fs); 193 put_fs_struct(fs);
@@ -506,7 +506,7 @@ static const struct inode_operations proc_def_inode_operations = {
506 .setattr = proc_setattr, 506 .setattr = proc_setattr,
507}; 507};
508 508
509extern struct seq_operations mounts_op; 509extern const struct seq_operations mounts_op;
510struct proc_mounts { 510struct proc_mounts {
511 struct seq_file m; 511 struct seq_file m;
512 int event; 512 int event;
@@ -585,7 +585,7 @@ static const struct file_operations proc_mounts_operations = {
585 .poll = mounts_poll, 585 .poll = mounts_poll,
586}; 586};
587 587
588extern struct seq_operations mountstats_op; 588extern const struct seq_operations mountstats_op;
589static int mountstats_open(struct inode *inode, struct file *file) 589static int mountstats_open(struct inode *inode, struct file *file)
590{ 590{
591 int ret = seq_open(file, &mountstats_op); 591 int ret = seq_open(file, &mountstats_op);
@@ -662,6 +662,45 @@ static const struct file_operations proc_info_file_operations = {
662 .read = proc_info_read, 662 .read = proc_info_read,
663}; 663};
664 664
665static int proc_single_show(struct seq_file *m, void *v)
666{
667 struct inode *inode = m->private;
668 struct pid_namespace *ns;
669 struct pid *pid;
670 struct task_struct *task;
671 int ret;
672
673 ns = inode->i_sb->s_fs_info;
674 pid = proc_pid(inode);
675 task = get_pid_task(pid, PIDTYPE_PID);
676 if (!task)
677 return -ESRCH;
678
679 ret = PROC_I(inode)->op.proc_show(m, ns, pid, task);
680
681 put_task_struct(task);
682 return ret;
683}
684
685static int proc_single_open(struct inode *inode, struct file *filp)
686{
687 int ret;
688 ret = single_open(filp, proc_single_show, NULL);
689 if (!ret) {
690 struct seq_file *m = filp->private_data;
691
692 m->private = inode;
693 }
694 return ret;
695}
696
697static const struct file_operations proc_single_file_operations = {
698 .open = proc_single_open,
699 .read = seq_read,
700 .llseek = seq_lseek,
701 .release = single_release,
702};
703
665static int mem_open(struct inode* inode, struct file* file) 704static int mem_open(struct inode* inode, struct file* file)
666{ 705{
667 file->private_data = (void*)((long)current->self_exec_id); 706 file->private_data = (void*)((long)current->self_exec_id);
@@ -787,7 +826,7 @@ out_no_task:
787} 826}
788#endif 827#endif
789 828
790static loff_t mem_lseek(struct file * file, loff_t offset, int orig) 829loff_t mem_lseek(struct file *file, loff_t offset, int orig)
791{ 830{
792 switch (orig) { 831 switch (orig) {
793 case 0: 832 case 0:
@@ -935,42 +974,6 @@ static const struct file_operations proc_oom_adjust_operations = {
935 .write = oom_adjust_write, 974 .write = oom_adjust_write,
936}; 975};
937 976
938#ifdef CONFIG_MMU
939static ssize_t clear_refs_write(struct file *file, const char __user *buf,
940 size_t count, loff_t *ppos)
941{
942 struct task_struct *task;
943 char buffer[PROC_NUMBUF], *end;
944 struct mm_struct *mm;
945
946 memset(buffer, 0, sizeof(buffer));
947 if (count > sizeof(buffer) - 1)
948 count = sizeof(buffer) - 1;
949 if (copy_from_user(buffer, buf, count))
950 return -EFAULT;
951 if (!simple_strtol(buffer, &end, 0))
952 return -EINVAL;
953 if (*end == '\n')
954 end++;
955 task = get_proc_task(file->f_path.dentry->d_inode);
956 if (!task)
957 return -ESRCH;
958 mm = get_task_mm(task);
959 if (mm) {
960 clear_refs_smap(mm);
961 mmput(mm);
962 }
963 put_task_struct(task);
964 if (end - buffer == 0)
965 return -EIO;
966 return end - buffer;
967}
968
969static struct file_operations proc_clear_refs_operations = {
970 .write = clear_refs_write,
971};
972#endif
973
974#ifdef CONFIG_AUDITSYSCALL 977#ifdef CONFIG_AUDITSYSCALL
975#define TMPBUFLEN 21 978#define TMPBUFLEN 21
976static ssize_t proc_loginuid_read(struct file * file, char __user * buf, 979static ssize_t proc_loginuid_read(struct file * file, char __user * buf,
@@ -1161,39 +1164,36 @@ static void *proc_pid_follow_link(struct dentry *dentry, struct nameidata *nd)
1161 int error = -EACCES; 1164 int error = -EACCES;
1162 1165
1163 /* We don't need a base pointer in the /proc filesystem */ 1166 /* We don't need a base pointer in the /proc filesystem */
1164 path_release(nd); 1167 path_put(&nd->path);
1165 1168
1166 /* Are we allowed to snoop on the tasks file descriptors? */ 1169 /* Are we allowed to snoop on the tasks file descriptors? */
1167 if (!proc_fd_access_allowed(inode)) 1170 if (!proc_fd_access_allowed(inode))
1168 goto out; 1171 goto out;
1169 1172
1170 error = PROC_I(inode)->op.proc_get_link(inode, &nd->dentry, &nd->mnt); 1173 error = PROC_I(inode)->op.proc_get_link(inode, &nd->path);
1171 nd->last_type = LAST_BIND; 1174 nd->last_type = LAST_BIND;
1172out: 1175out:
1173 return ERR_PTR(error); 1176 return ERR_PTR(error);
1174} 1177}
1175 1178
1176static int do_proc_readlink(struct dentry *dentry, struct vfsmount *mnt, 1179static int do_proc_readlink(struct path *path, char __user *buffer, int buflen)
1177 char __user *buffer, int buflen)
1178{ 1180{
1179 struct inode * inode;
1180 char *tmp = (char*)__get_free_page(GFP_TEMPORARY); 1181 char *tmp = (char*)__get_free_page(GFP_TEMPORARY);
1181 char *path; 1182 char *pathname;
1182 int len; 1183 int len;
1183 1184
1184 if (!tmp) 1185 if (!tmp)
1185 return -ENOMEM; 1186 return -ENOMEM;
1186 1187
1187 inode = dentry->d_inode; 1188 pathname = d_path(path, tmp, PAGE_SIZE);
1188 path = d_path(dentry, mnt, tmp, PAGE_SIZE); 1189 len = PTR_ERR(pathname);
1189 len = PTR_ERR(path); 1190 if (IS_ERR(pathname))
1190 if (IS_ERR(path))
1191 goto out; 1191 goto out;
1192 len = tmp + PAGE_SIZE - 1 - path; 1192 len = tmp + PAGE_SIZE - 1 - pathname;
1193 1193
1194 if (len > buflen) 1194 if (len > buflen)
1195 len = buflen; 1195 len = buflen;
1196 if (copy_to_user(buffer, path, len)) 1196 if (copy_to_user(buffer, pathname, len))
1197 len = -EFAULT; 1197 len = -EFAULT;
1198 out: 1198 out:
1199 free_page((unsigned long)tmp); 1199 free_page((unsigned long)tmp);
@@ -1204,20 +1204,18 @@ static int proc_pid_readlink(struct dentry * dentry, char __user * buffer, int b
1204{ 1204{
1205 int error = -EACCES; 1205 int error = -EACCES;
1206 struct inode *inode = dentry->d_inode; 1206 struct inode *inode = dentry->d_inode;
1207 struct dentry *de; 1207 struct path path;
1208 struct vfsmount *mnt = NULL;
1209 1208
1210 /* Are we allowed to snoop on the tasks file descriptors? */ 1209 /* Are we allowed to snoop on the tasks file descriptors? */
1211 if (!proc_fd_access_allowed(inode)) 1210 if (!proc_fd_access_allowed(inode))
1212 goto out; 1211 goto out;
1213 1212
1214 error = PROC_I(inode)->op.proc_get_link(inode, &de, &mnt); 1213 error = PROC_I(inode)->op.proc_get_link(inode, &path);
1215 if (error) 1214 if (error)
1216 goto out; 1215 goto out;
1217 1216
1218 error = do_proc_readlink(de, mnt, buffer, buflen); 1217 error = do_proc_readlink(&path, buffer, buflen);
1219 dput(de); 1218 path_put(&path);
1220 mntput(mnt);
1221out: 1219out:
1222 return error; 1220 return error;
1223} 1221}
@@ -1444,8 +1442,7 @@ out:
1444 1442
1445#define PROC_FDINFO_MAX 64 1443#define PROC_FDINFO_MAX 64
1446 1444
1447static int proc_fd_info(struct inode *inode, struct dentry **dentry, 1445static int proc_fd_info(struct inode *inode, struct path *path, char *info)
1448 struct vfsmount **mnt, char *info)
1449{ 1446{
1450 struct task_struct *task = get_proc_task(inode); 1447 struct task_struct *task = get_proc_task(inode);
1451 struct files_struct *files = NULL; 1448 struct files_struct *files = NULL;
@@ -1464,10 +1461,10 @@ static int proc_fd_info(struct inode *inode, struct dentry **dentry,
1464 spin_lock(&files->file_lock); 1461 spin_lock(&files->file_lock);
1465 file = fcheck_files(files, fd); 1462 file = fcheck_files(files, fd);
1466 if (file) { 1463 if (file) {
1467 if (mnt) 1464 if (path) {
1468 *mnt = mntget(file->f_path.mnt); 1465 *path = file->f_path;
1469 if (dentry) 1466 path_get(&file->f_path);
1470 *dentry = dget(file->f_path.dentry); 1467 }
1471 if (info) 1468 if (info)
1472 snprintf(info, PROC_FDINFO_MAX, 1469 snprintf(info, PROC_FDINFO_MAX,
1473 "pos:\t%lli\n" 1470 "pos:\t%lli\n"
@@ -1484,10 +1481,9 @@ static int proc_fd_info(struct inode *inode, struct dentry **dentry,
1484 return -ENOENT; 1481 return -ENOENT;
1485} 1482}
1486 1483
1487static int proc_fd_link(struct inode *inode, struct dentry **dentry, 1484static int proc_fd_link(struct inode *inode, struct path *path)
1488 struct vfsmount **mnt)
1489{ 1485{
1490 return proc_fd_info(inode, dentry, mnt, NULL); 1486 return proc_fd_info(inode, path, NULL);
1491} 1487}
1492 1488
1493static int tid_fd_revalidate(struct dentry *dentry, struct nameidata *nd) 1489static int tid_fd_revalidate(struct dentry *dentry, struct nameidata *nd)
@@ -1681,7 +1677,7 @@ static ssize_t proc_fdinfo_read(struct file *file, char __user *buf,
1681 size_t len, loff_t *ppos) 1677 size_t len, loff_t *ppos)
1682{ 1678{
1683 char tmp[PROC_FDINFO_MAX]; 1679 char tmp[PROC_FDINFO_MAX];
1684 int err = proc_fd_info(file->f_path.dentry->d_inode, NULL, NULL, tmp); 1680 int err = proc_fd_info(file->f_path.dentry->d_inode, NULL, tmp);
1685 if (!err) 1681 if (!err)
1686 err = simple_read_from_buffer(buf, len, ppos, tmp, strlen(tmp)); 1682 err = simple_read_from_buffer(buf, len, ppos, tmp, strlen(tmp));
1687 return err; 1683 return err;
@@ -2098,15 +2094,23 @@ static const struct file_operations proc_coredump_filter_operations = {
2098static int proc_self_readlink(struct dentry *dentry, char __user *buffer, 2094static int proc_self_readlink(struct dentry *dentry, char __user *buffer,
2099 int buflen) 2095 int buflen)
2100{ 2096{
2097 struct pid_namespace *ns = dentry->d_sb->s_fs_info;
2098 pid_t tgid = task_tgid_nr_ns(current, ns);
2101 char tmp[PROC_NUMBUF]; 2099 char tmp[PROC_NUMBUF];
2102 sprintf(tmp, "%d", task_tgid_vnr(current)); 2100 if (!tgid)
2101 return -ENOENT;
2102 sprintf(tmp, "%d", tgid);
2103 return vfs_readlink(dentry,buffer,buflen,tmp); 2103 return vfs_readlink(dentry,buffer,buflen,tmp);
2104} 2104}
2105 2105
2106static void *proc_self_follow_link(struct dentry *dentry, struct nameidata *nd) 2106static void *proc_self_follow_link(struct dentry *dentry, struct nameidata *nd)
2107{ 2107{
2108 struct pid_namespace *ns = dentry->d_sb->s_fs_info;
2109 pid_t tgid = task_tgid_nr_ns(current, ns);
2108 char tmp[PROC_NUMBUF]; 2110 char tmp[PROC_NUMBUF];
2109 sprintf(tmp, "%d", task_tgid_vnr(current)); 2111 if (!tgid)
2112 return ERR_PTR(-ENOENT);
2113 sprintf(tmp, "%d", task_tgid_nr_ns(current, ns));
2110 return ERR_PTR(vfs_follow_link(nd,tmp)); 2114 return ERR_PTR(vfs_follow_link(nd,tmp));
2111} 2115}
2112 2116
@@ -2271,14 +2275,14 @@ static const struct pid_entry tgid_base_stuff[] = {
2271 DIR("fdinfo", S_IRUSR|S_IXUSR, fdinfo), 2275 DIR("fdinfo", S_IRUSR|S_IXUSR, fdinfo),
2272 REG("environ", S_IRUSR, environ), 2276 REG("environ", S_IRUSR, environ),
2273 INF("auxv", S_IRUSR, pid_auxv), 2277 INF("auxv", S_IRUSR, pid_auxv),
2274 INF("status", S_IRUGO, pid_status), 2278 ONE("status", S_IRUGO, pid_status),
2275 INF("limits", S_IRUSR, pid_limits), 2279 INF("limits", S_IRUSR, pid_limits),
2276#ifdef CONFIG_SCHED_DEBUG 2280#ifdef CONFIG_SCHED_DEBUG
2277 REG("sched", S_IRUGO|S_IWUSR, pid_sched), 2281 REG("sched", S_IRUGO|S_IWUSR, pid_sched),
2278#endif 2282#endif
2279 INF("cmdline", S_IRUGO, pid_cmdline), 2283 INF("cmdline", S_IRUGO, pid_cmdline),
2280 INF("stat", S_IRUGO, tgid_stat), 2284 ONE("stat", S_IRUGO, tgid_stat),
2281 INF("statm", S_IRUGO, pid_statm), 2285 ONE("statm", S_IRUGO, pid_statm),
2282 REG("maps", S_IRUGO, maps), 2286 REG("maps", S_IRUGO, maps),
2283#ifdef CONFIG_NUMA 2287#ifdef CONFIG_NUMA
2284 REG("numa_maps", S_IRUGO, numa_maps), 2288 REG("numa_maps", S_IRUGO, numa_maps),
@@ -2289,9 +2293,10 @@ static const struct pid_entry tgid_base_stuff[] = {
2289 LNK("exe", exe), 2293 LNK("exe", exe),
2290 REG("mounts", S_IRUGO, mounts), 2294 REG("mounts", S_IRUGO, mounts),
2291 REG("mountstats", S_IRUSR, mountstats), 2295 REG("mountstats", S_IRUSR, mountstats),
2292#ifdef CONFIG_MMU 2296#ifdef CONFIG_PROC_PAGE_MONITOR
2293 REG("clear_refs", S_IWUSR, clear_refs), 2297 REG("clear_refs", S_IWUSR, clear_refs),
2294 REG("smaps", S_IRUGO, smaps), 2298 REG("smaps", S_IRUGO, smaps),
2299 REG("pagemap", S_IRUSR, pagemap),
2295#endif 2300#endif
2296#ifdef CONFIG_SECURITY 2301#ifdef CONFIG_SECURITY
2297 DIR("attr", S_IRUGO|S_IXUGO, attr_dir), 2302 DIR("attr", S_IRUGO|S_IXUGO, attr_dir),
@@ -2360,7 +2365,8 @@ static void proc_flush_task_mnt(struct vfsmount *mnt, pid_t pid, pid_t tgid)
2360 name.len = snprintf(buf, sizeof(buf), "%d", pid); 2365 name.len = snprintf(buf, sizeof(buf), "%d", pid);
2361 dentry = d_hash_and_lookup(mnt->mnt_root, &name); 2366 dentry = d_hash_and_lookup(mnt->mnt_root, &name);
2362 if (dentry) { 2367 if (dentry) {
2363 shrink_dcache_parent(dentry); 2368 if (!(current->flags & PF_EXITING))
2369 shrink_dcache_parent(dentry);
2364 d_drop(dentry); 2370 d_drop(dentry);
2365 dput(dentry); 2371 dput(dentry);
2366 } 2372 }
@@ -2600,14 +2606,14 @@ static const struct pid_entry tid_base_stuff[] = {
2600 DIR("fdinfo", S_IRUSR|S_IXUSR, fdinfo), 2606 DIR("fdinfo", S_IRUSR|S_IXUSR, fdinfo),
2601 REG("environ", S_IRUSR, environ), 2607 REG("environ", S_IRUSR, environ),
2602 INF("auxv", S_IRUSR, pid_auxv), 2608 INF("auxv", S_IRUSR, pid_auxv),
2603 INF("status", S_IRUGO, pid_status), 2609 ONE("status", S_IRUGO, pid_status),
2604 INF("limits", S_IRUSR, pid_limits), 2610 INF("limits", S_IRUSR, pid_limits),
2605#ifdef CONFIG_SCHED_DEBUG 2611#ifdef CONFIG_SCHED_DEBUG
2606 REG("sched", S_IRUGO|S_IWUSR, pid_sched), 2612 REG("sched", S_IRUGO|S_IWUSR, pid_sched),
2607#endif 2613#endif
2608 INF("cmdline", S_IRUGO, pid_cmdline), 2614 INF("cmdline", S_IRUGO, pid_cmdline),
2609 INF("stat", S_IRUGO, tid_stat), 2615 ONE("stat", S_IRUGO, tid_stat),
2610 INF("statm", S_IRUGO, pid_statm), 2616 ONE("statm", S_IRUGO, pid_statm),
2611 REG("maps", S_IRUGO, maps), 2617 REG("maps", S_IRUGO, maps),
2612#ifdef CONFIG_NUMA 2618#ifdef CONFIG_NUMA
2613 REG("numa_maps", S_IRUGO, numa_maps), 2619 REG("numa_maps", S_IRUGO, numa_maps),
@@ -2617,9 +2623,10 @@ static const struct pid_entry tid_base_stuff[] = {
2617 LNK("root", root), 2623 LNK("root", root),
2618 LNK("exe", exe), 2624 LNK("exe", exe),
2619 REG("mounts", S_IRUGO, mounts), 2625 REG("mounts", S_IRUGO, mounts),
2620#ifdef CONFIG_MMU 2626#ifdef CONFIG_PROC_PAGE_MONITOR
2621 REG("clear_refs", S_IWUSR, clear_refs), 2627 REG("clear_refs", S_IWUSR, clear_refs),
2622 REG("smaps", S_IRUGO, smaps), 2628 REG("smaps", S_IRUGO, smaps),
2629 REG("pagemap", S_IRUSR, pagemap),
2623#endif 2630#endif
2624#ifdef CONFIG_SECURITY 2631#ifdef CONFIG_SECURITY
2625 DIR("attr", S_IRUGO|S_IXUGO, attr_dir), 2632 DIR("attr", S_IRUGO|S_IXUGO, attr_dir),
diff --git a/fs/proc/generic.c b/fs/proc/generic.c
index 6a2fe5187b62..68971e66cd41 100644
--- a/fs/proc/generic.c
+++ b/fs/proc/generic.c
@@ -25,12 +25,6 @@
25 25
26#include "internal.h" 26#include "internal.h"
27 27
28static ssize_t proc_file_read(struct file *file, char __user *buf,
29 size_t nbytes, loff_t *ppos);
30static ssize_t proc_file_write(struct file *file, const char __user *buffer,
31 size_t count, loff_t *ppos);
32static loff_t proc_file_lseek(struct file *, loff_t, int);
33
34DEFINE_SPINLOCK(proc_subdir_lock); 28DEFINE_SPINLOCK(proc_subdir_lock);
35 29
36static int proc_match(int len, const char *name, struct proc_dir_entry *de) 30static int proc_match(int len, const char *name, struct proc_dir_entry *de)
@@ -40,12 +34,6 @@ static int proc_match(int len, const char *name, struct proc_dir_entry *de)
40 return !memcmp(name, de->name, len); 34 return !memcmp(name, de->name, len);
41} 35}
42 36
43static const struct file_operations proc_file_operations = {
44 .llseek = proc_file_lseek,
45 .read = proc_file_read,
46 .write = proc_file_write,
47};
48
49/* buffer size is one page but our output routines use some slack for overruns */ 37/* buffer size is one page but our output routines use some slack for overruns */
50#define PROC_BLOCK_SIZE (PAGE_SIZE - 1024) 38#define PROC_BLOCK_SIZE (PAGE_SIZE - 1024)
51 39
@@ -233,6 +221,12 @@ proc_file_lseek(struct file *file, loff_t offset, int orig)
233 return retval; 221 return retval;
234} 222}
235 223
224static const struct file_operations proc_file_operations = {
225 .llseek = proc_file_lseek,
226 .read = proc_file_read,
227 .write = proc_file_write,
228};
229
236static int proc_notify_change(struct dentry *dentry, struct iattr *iattr) 230static int proc_notify_change(struct dentry *dentry, struct iattr *iattr)
237{ 231{
238 struct inode *inode = dentry->d_inode; 232 struct inode *inode = dentry->d_inode;
@@ -406,12 +400,12 @@ struct dentry *proc_lookup(struct inode * dir, struct dentry *dentry, struct nam
406 spin_unlock(&proc_subdir_lock); 400 spin_unlock(&proc_subdir_lock);
407 error = -EINVAL; 401 error = -EINVAL;
408 inode = proc_get_inode(dir->i_sb, ino, de); 402 inode = proc_get_inode(dir->i_sb, ino, de);
409 spin_lock(&proc_subdir_lock); 403 goto out_unlock;
410 break;
411 } 404 }
412 } 405 }
413 } 406 }
414 spin_unlock(&proc_subdir_lock); 407 spin_unlock(&proc_subdir_lock);
408out_unlock:
415 unlock_kernel(); 409 unlock_kernel();
416 410
417 if (inode) { 411 if (inode) {
@@ -527,6 +521,7 @@ static const struct inode_operations proc_dir_inode_operations = {
527static int proc_register(struct proc_dir_entry * dir, struct proc_dir_entry * dp) 521static int proc_register(struct proc_dir_entry * dir, struct proc_dir_entry * dp)
528{ 522{
529 unsigned int i; 523 unsigned int i;
524 struct proc_dir_entry *tmp;
530 525
531 i = get_inode_number(); 526 i = get_inode_number();
532 if (i == 0) 527 if (i == 0)
@@ -550,6 +545,15 @@ static int proc_register(struct proc_dir_entry * dir, struct proc_dir_entry * dp
550 } 545 }
551 546
552 spin_lock(&proc_subdir_lock); 547 spin_lock(&proc_subdir_lock);
548
549 for (tmp = dir->subdir; tmp; tmp = tmp->next)
550 if (strcmp(tmp->name, dp->name) == 0) {
551 printk(KERN_WARNING "proc_dir_entry '%s' already "
552 "registered\n", dp->name);
553 dump_stack();
554 break;
555 }
556
553 dp->next = dir->subdir; 557 dp->next = dir->subdir;
554 dp->parent = dir; 558 dp->parent = dir;
555 dir->subdir = dp; 559 dir->subdir = dp;
@@ -558,7 +562,7 @@ static int proc_register(struct proc_dir_entry * dir, struct proc_dir_entry * dp
558 return 0; 562 return 0;
559} 563}
560 564
561static struct proc_dir_entry *proc_create(struct proc_dir_entry **parent, 565static struct proc_dir_entry *__proc_create(struct proc_dir_entry **parent,
562 const char *name, 566 const char *name,
563 mode_t mode, 567 mode_t mode,
564 nlink_t nlink) 568 nlink_t nlink)
@@ -601,7 +605,7 @@ struct proc_dir_entry *proc_symlink(const char *name,
601{ 605{
602 struct proc_dir_entry *ent; 606 struct proc_dir_entry *ent;
603 607
604 ent = proc_create(&parent,name, 608 ent = __proc_create(&parent, name,
605 (S_IFLNK | S_IRUGO | S_IWUGO | S_IXUGO),1); 609 (S_IFLNK | S_IRUGO | S_IWUGO | S_IXUGO),1);
606 610
607 if (ent) { 611 if (ent) {
@@ -626,7 +630,7 @@ struct proc_dir_entry *proc_mkdir_mode(const char *name, mode_t mode,
626{ 630{
627 struct proc_dir_entry *ent; 631 struct proc_dir_entry *ent;
628 632
629 ent = proc_create(&parent, name, S_IFDIR | mode, 2); 633 ent = __proc_create(&parent, name, S_IFDIR | mode, 2);
630 if (ent) { 634 if (ent) {
631 if (proc_register(parent, ent) < 0) { 635 if (proc_register(parent, ent) < 0) {
632 kfree(ent); 636 kfree(ent);
@@ -660,7 +664,7 @@ struct proc_dir_entry *create_proc_entry(const char *name, mode_t mode,
660 nlink = 1; 664 nlink = 1;
661 } 665 }
662 666
663 ent = proc_create(&parent,name,mode,nlink); 667 ent = __proc_create(&parent, name, mode, nlink);
664 if (ent) { 668 if (ent) {
665 if (proc_register(parent, ent) < 0) { 669 if (proc_register(parent, ent) < 0) {
666 kfree(ent); 670 kfree(ent);
@@ -670,6 +674,38 @@ struct proc_dir_entry *create_proc_entry(const char *name, mode_t mode,
670 return ent; 674 return ent;
671} 675}
672 676
677struct proc_dir_entry *proc_create(const char *name, mode_t mode,
678 struct proc_dir_entry *parent,
679 const struct file_operations *proc_fops)
680{
681 struct proc_dir_entry *pde;
682 nlink_t nlink;
683
684 if (S_ISDIR(mode)) {
685 if ((mode & S_IALLUGO) == 0)
686 mode |= S_IRUGO | S_IXUGO;
687 nlink = 2;
688 } else {
689 if ((mode & S_IFMT) == 0)
690 mode |= S_IFREG;
691 if ((mode & S_IALLUGO) == 0)
692 mode |= S_IRUGO;
693 nlink = 1;
694 }
695
696 pde = __proc_create(&parent, name, mode, nlink);
697 if (!pde)
698 goto out;
699 pde->proc_fops = proc_fops;
700 if (proc_register(parent, pde) < 0)
701 goto out_free;
702 return pde;
703out_free:
704 kfree(pde);
705out:
706 return NULL;
707}
708
673void free_proc_entry(struct proc_dir_entry *de) 709void free_proc_entry(struct proc_dir_entry *de)
674{ 710{
675 unsigned int ino = de->low_ino; 711 unsigned int ino = de->low_ino;
@@ -679,7 +715,7 @@ void free_proc_entry(struct proc_dir_entry *de)
679 715
680 release_inode_number(ino); 716 release_inode_number(ino);
681 717
682 if (S_ISLNK(de->mode) && de->data) 718 if (S_ISLNK(de->mode))
683 kfree(de->data); 719 kfree(de->data);
684 kfree(de); 720 kfree(de);
685} 721}
diff --git a/fs/proc/inode.c b/fs/proc/inode.c
index 1a551d92e1d8..82b3a1b5a70b 100644
--- a/fs/proc/inode.c
+++ b/fs/proc/inode.c
@@ -73,11 +73,6 @@ static void proc_delete_inode(struct inode *inode)
73 73
74struct vfsmount *proc_mnt; 74struct vfsmount *proc_mnt;
75 75
76static void proc_read_inode(struct inode * inode)
77{
78 inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME;
79}
80
81static struct kmem_cache * proc_inode_cachep; 76static struct kmem_cache * proc_inode_cachep;
82 77
83static struct inode *proc_alloc_inode(struct super_block *sb) 78static struct inode *proc_alloc_inode(struct super_block *sb)
@@ -128,7 +123,6 @@ static int proc_remount(struct super_block *sb, int *flags, char *data)
128static const struct super_operations proc_sops = { 123static const struct super_operations proc_sops = {
129 .alloc_inode = proc_alloc_inode, 124 .alloc_inode = proc_alloc_inode,
130 .destroy_inode = proc_destroy_inode, 125 .destroy_inode = proc_destroy_inode,
131 .read_inode = proc_read_inode,
132 .drop_inode = generic_delete_inode, 126 .drop_inode = generic_delete_inode,
133 .delete_inode = proc_delete_inode, 127 .delete_inode = proc_delete_inode,
134 .statfs = simple_statfs, 128 .statfs = simple_statfs,
@@ -401,39 +395,41 @@ struct inode *proc_get_inode(struct super_block *sb, unsigned int ino,
401 if (de != NULL && !try_module_get(de->owner)) 395 if (de != NULL && !try_module_get(de->owner))
402 goto out_mod; 396 goto out_mod;
403 397
404 inode = iget(sb, ino); 398 inode = iget_locked(sb, ino);
405 if (!inode) 399 if (!inode)
406 goto out_ino; 400 goto out_ino;
407 401 if (inode->i_state & I_NEW) {
408 PROC_I(inode)->fd = 0; 402 inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME;
409 PROC_I(inode)->pde = de; 403 PROC_I(inode)->fd = 0;
410 if (de) { 404 PROC_I(inode)->pde = de;
411 if (de->mode) { 405 if (de) {
412 inode->i_mode = de->mode; 406 if (de->mode) {
413 inode->i_uid = de->uid; 407 inode->i_mode = de->mode;
414 inode->i_gid = de->gid; 408 inode->i_uid = de->uid;
415 } 409 inode->i_gid = de->gid;
416 if (de->size) 410 }
417 inode->i_size = de->size; 411 if (de->size)
418 if (de->nlink) 412 inode->i_size = de->size;
419 inode->i_nlink = de->nlink; 413 if (de->nlink)
420 if (de->proc_iops) 414 inode->i_nlink = de->nlink;
421 inode->i_op = de->proc_iops; 415 if (de->proc_iops)
422 if (de->proc_fops) { 416 inode->i_op = de->proc_iops;
423 if (S_ISREG(inode->i_mode)) { 417 if (de->proc_fops) {
418 if (S_ISREG(inode->i_mode)) {
424#ifdef CONFIG_COMPAT 419#ifdef CONFIG_COMPAT
425 if (!de->proc_fops->compat_ioctl) 420 if (!de->proc_fops->compat_ioctl)
426 inode->i_fop = 421 inode->i_fop =
427 &proc_reg_file_ops_no_compat; 422 &proc_reg_file_ops_no_compat;
428 else 423 else
429#endif 424#endif
430 inode->i_fop = &proc_reg_file_ops; 425 inode->i_fop = &proc_reg_file_ops;
426 } else {
427 inode->i_fop = de->proc_fops;
428 }
431 } 429 }
432 else
433 inode->i_fop = de->proc_fops;
434 } 430 }
431 unlock_new_inode(inode);
435 } 432 }
436
437 return inode; 433 return inode;
438 434
439out_ino: 435out_ino:
@@ -471,4 +467,3 @@ out_no_root:
471 de_put(&proc_root); 467 de_put(&proc_root);
472 return -ENOMEM; 468 return -ENOMEM;
473} 469}
474MODULE_LICENSE("GPL");
diff --git a/fs/proc/internal.h b/fs/proc/internal.h
index 05b3e9006262..1c81c8f1aeed 100644
--- a/fs/proc/internal.h
+++ b/fs/proc/internal.h
@@ -46,21 +46,24 @@ extern int nommu_vma_show(struct seq_file *, struct vm_area_struct *);
46 46
47extern int maps_protect; 47extern int maps_protect;
48 48
49extern void create_seq_entry(char *name, mode_t mode, const struct file_operations *f); 49extern void create_seq_entry(char *name, mode_t mode,
50extern int proc_exe_link(struct inode *, struct dentry **, struct vfsmount **); 50 const struct file_operations *f);
51extern int proc_tid_stat(struct task_struct *, char *); 51extern int proc_exe_link(struct inode *, struct path *);
52extern int proc_tgid_stat(struct task_struct *, char *); 52extern int proc_tid_stat(struct seq_file *m, struct pid_namespace *ns,
53extern int proc_pid_status(struct task_struct *, char *); 53 struct pid *pid, struct task_struct *task);
54extern int proc_pid_statm(struct task_struct *, char *); 54extern int proc_tgid_stat(struct seq_file *m, struct pid_namespace *ns,
55 struct pid *pid, struct task_struct *task);
56extern int proc_pid_status(struct seq_file *m, struct pid_namespace *ns,
57 struct pid *pid, struct task_struct *task);
58extern int proc_pid_statm(struct seq_file *m, struct pid_namespace *ns,
59 struct pid *pid, struct task_struct *task);
60extern loff_t mem_lseek(struct file *file, loff_t offset, int orig);
55 61
56extern const struct file_operations proc_maps_operations; 62extern const struct file_operations proc_maps_operations;
57extern const struct file_operations proc_numa_maps_operations; 63extern const struct file_operations proc_numa_maps_operations;
58extern const struct file_operations proc_smaps_operations; 64extern const struct file_operations proc_smaps_operations;
59 65extern const struct file_operations proc_clear_refs_operations;
60extern const struct file_operations proc_maps_operations; 66extern const struct file_operations proc_pagemap_operations;
61extern const struct file_operations proc_numa_maps_operations;
62extern const struct file_operations proc_smaps_operations;
63
64 67
65void free_proc_entry(struct proc_dir_entry *de); 68void free_proc_entry(struct proc_dir_entry *de);
66 69
diff --git a/fs/proc/kcore.c b/fs/proc/kcore.c
index 1be73082edd3..e78c81fcf547 100644
--- a/fs/proc/kcore.c
+++ b/fs/proc/kcore.c
@@ -12,7 +12,6 @@
12#include <linux/mm.h> 12#include <linux/mm.h>
13#include <linux/proc_fs.h> 13#include <linux/proc_fs.h>
14#include <linux/user.h> 14#include <linux/user.h>
15#include <linux/a.out.h>
16#include <linux/capability.h> 15#include <linux/capability.h>
17#include <linux/elf.h> 16#include <linux/elf.h>
18#include <linux/elfcore.h> 17#include <linux/elfcore.h>
@@ -325,7 +324,7 @@ read_kcore(struct file *file, char __user *buffer, size_t buflen, loff_t *fpos)
325 if (m == NULL) { 324 if (m == NULL) {
326 if (clear_user(buffer, tsz)) 325 if (clear_user(buffer, tsz))
327 return -EFAULT; 326 return -EFAULT;
328 } else if ((start >= VMALLOC_START) && (start < VMALLOC_END)) { 327 } else if (is_vmalloc_addr((void *)start)) {
329 char * elf_buf; 328 char * elf_buf;
330 struct vm_struct *m; 329 struct vm_struct *m;
331 unsigned long curstart = start; 330 unsigned long curstart = start;
diff --git a/fs/proc/nommu.c b/fs/proc/nommu.c
index 22f789de3909..941e95114b5a 100644
--- a/fs/proc/nommu.c
+++ b/fs/proc/nommu.c
@@ -67,7 +67,7 @@ int nommu_vma_show(struct seq_file *m, struct vm_area_struct *vma)
67 if (len < 1) 67 if (len < 1)
68 len = 1; 68 len = 1;
69 seq_printf(m, "%*c", len, ' '); 69 seq_printf(m, "%*c", len, ' ');
70 seq_path(m, file->f_path.mnt, file->f_path.dentry, ""); 70 seq_path(m, &file->f_path, "");
71 } 71 }
72 72
73 seq_putc(m, '\n'); 73 seq_putc(m, '\n');
@@ -116,7 +116,7 @@ static void *nommu_vma_list_next(struct seq_file *m, void *v, loff_t *pos)
116 return rb_next((struct rb_node *) v); 116 return rb_next((struct rb_node *) v);
117} 117}
118 118
119static struct seq_operations proc_nommu_vma_list_seqop = { 119static const struct seq_operations proc_nommu_vma_list_seqop = {
120 .start = nommu_vma_list_start, 120 .start = nommu_vma_list_start,
121 .next = nommu_vma_list_next, 121 .next = nommu_vma_list_next,
122 .stop = nommu_vma_list_stop, 122 .stop = nommu_vma_list_stop,
diff --git a/fs/proc/proc_misc.c b/fs/proc/proc_misc.c
index 3462bfde89f6..468805d40e2b 100644
--- a/fs/proc/proc_misc.c
+++ b/fs/proc/proc_misc.c
@@ -29,6 +29,7 @@
29#include <linux/mm.h> 29#include <linux/mm.h>
30#include <linux/mmzone.h> 30#include <linux/mmzone.h>
31#include <linux/pagemap.h> 31#include <linux/pagemap.h>
32#include <linux/interrupt.h>
32#include <linux/swap.h> 33#include <linux/swap.h>
33#include <linux/slab.h> 34#include <linux/slab.h>
34#include <linux/smp.h> 35#include <linux/smp.h>
@@ -46,6 +47,7 @@
46#include <linux/vmalloc.h> 47#include <linux/vmalloc.h>
47#include <linux/crash_dump.h> 48#include <linux/crash_dump.h>
48#include <linux/pid_namespace.h> 49#include <linux/pid_namespace.h>
50#include <linux/bootmem.h>
49#include <asm/uaccess.h> 51#include <asm/uaccess.h>
50#include <asm/pgtable.h> 52#include <asm/pgtable.h>
51#include <asm/io.h> 53#include <asm/io.h>
@@ -63,7 +65,6 @@
63 */ 65 */
64extern int get_hardware_list(char *); 66extern int get_hardware_list(char *);
65extern int get_stram_list(char *); 67extern int get_stram_list(char *);
66extern int get_filesystem_list(char *);
67extern int get_exec_domain_list(char *); 68extern int get_exec_domain_list(char *);
68extern int get_dma_list(char *); 69extern int get_dma_list(char *);
69 70
@@ -83,10 +84,15 @@ static int loadavg_read_proc(char *page, char **start, off_t off,
83{ 84{
84 int a, b, c; 85 int a, b, c;
85 int len; 86 int len;
87 unsigned long seq;
88
89 do {
90 seq = read_seqbegin(&xtime_lock);
91 a = avenrun[0] + (FIXED_1/200);
92 b = avenrun[1] + (FIXED_1/200);
93 c = avenrun[2] + (FIXED_1/200);
94 } while (read_seqretry(&xtime_lock, seq));
86 95
87 a = avenrun[0] + (FIXED_1/200);
88 b = avenrun[1] + (FIXED_1/200);
89 c = avenrun[2] + (FIXED_1/200);
90 len = sprintf(page,"%d.%02d %d.%02d %d.%02d %ld/%d %d\n", 96 len = sprintf(page,"%d.%02d %d.%02d %d.%02d %ld/%d %d\n",
91 LOAD_INT(a), LOAD_FRAC(a), 97 LOAD_INT(a), LOAD_FRAC(a),
92 LOAD_INT(b), LOAD_FRAC(b), 98 LOAD_INT(b), LOAD_FRAC(b),
@@ -216,7 +222,7 @@ static int meminfo_read_proc(char *page, char **start, off_t off,
216#undef K 222#undef K
217} 223}
218 224
219extern struct seq_operations fragmentation_op; 225extern const struct seq_operations fragmentation_op;
220static int fragmentation_open(struct inode *inode, struct file *file) 226static int fragmentation_open(struct inode *inode, struct file *file)
221{ 227{
222 (void)inode; 228 (void)inode;
@@ -230,7 +236,7 @@ static const struct file_operations fragmentation_file_operations = {
230 .release = seq_release, 236 .release = seq_release,
231}; 237};
232 238
233extern struct seq_operations pagetypeinfo_op; 239extern const struct seq_operations pagetypeinfo_op;
234static int pagetypeinfo_open(struct inode *inode, struct file *file) 240static int pagetypeinfo_open(struct inode *inode, struct file *file)
235{ 241{
236 return seq_open(file, &pagetypeinfo_op); 242 return seq_open(file, &pagetypeinfo_op);
@@ -243,7 +249,7 @@ static const struct file_operations pagetypeinfo_file_ops = {
243 .release = seq_release, 249 .release = seq_release,
244}; 250};
245 251
246extern struct seq_operations zoneinfo_op; 252extern const struct seq_operations zoneinfo_op;
247static int zoneinfo_open(struct inode *inode, struct file *file) 253static int zoneinfo_open(struct inode *inode, struct file *file)
248{ 254{
249 return seq_open(file, &zoneinfo_op); 255 return seq_open(file, &zoneinfo_op);
@@ -268,7 +274,7 @@ static int version_read_proc(char *page, char **start, off_t off,
268 return proc_calc_metrics(page, start, off, count, eof, len); 274 return proc_calc_metrics(page, start, off, count, eof, len);
269} 275}
270 276
271extern struct seq_operations cpuinfo_op; 277extern const struct seq_operations cpuinfo_op;
272static int cpuinfo_open(struct inode *inode, struct file *file) 278static int cpuinfo_open(struct inode *inode, struct file *file)
273{ 279{
274 return seq_open(file, &cpuinfo_op); 280 return seq_open(file, &cpuinfo_op);
@@ -321,7 +327,7 @@ static void devinfo_stop(struct seq_file *f, void *v)
321 /* Nothing to do */ 327 /* Nothing to do */
322} 328}
323 329
324static struct seq_operations devinfo_ops = { 330static const struct seq_operations devinfo_ops = {
325 .start = devinfo_start, 331 .start = devinfo_start,
326 .next = devinfo_next, 332 .next = devinfo_next,
327 .stop = devinfo_stop, 333 .stop = devinfo_stop,
@@ -340,7 +346,7 @@ static const struct file_operations proc_devinfo_operations = {
340 .release = seq_release, 346 .release = seq_release,
341}; 347};
342 348
343extern struct seq_operations vmstat_op; 349extern const struct seq_operations vmstat_op;
344static int vmstat_open(struct inode *inode, struct file *file) 350static int vmstat_open(struct inode *inode, struct file *file)
345{ 351{
346 return seq_open(file, &vmstat_op); 352 return seq_open(file, &vmstat_op);
@@ -371,7 +377,7 @@ static int stram_read_proc(char *page, char **start, off_t off,
371#endif 377#endif
372 378
373#ifdef CONFIG_BLOCK 379#ifdef CONFIG_BLOCK
374extern struct seq_operations partitions_op; 380extern const struct seq_operations partitions_op;
375static int partitions_open(struct inode *inode, struct file *file) 381static int partitions_open(struct inode *inode, struct file *file)
376{ 382{
377 return seq_open(file, &partitions_op); 383 return seq_open(file, &partitions_op);
@@ -383,7 +389,7 @@ static const struct file_operations proc_partitions_operations = {
383 .release = seq_release, 389 .release = seq_release,
384}; 390};
385 391
386extern struct seq_operations diskstats_op; 392extern const struct seq_operations diskstats_op;
387static int diskstats_open(struct inode *inode, struct file *file) 393static int diskstats_open(struct inode *inode, struct file *file)
388{ 394{
389 return seq_open(file, &diskstats_op); 395 return seq_open(file, &diskstats_op);
@@ -397,7 +403,7 @@ static const struct file_operations proc_diskstats_operations = {
397#endif 403#endif
398 404
399#ifdef CONFIG_MODULES 405#ifdef CONFIG_MODULES
400extern struct seq_operations modules_op; 406extern const struct seq_operations modules_op;
401static int modules_open(struct inode *inode, struct file *file) 407static int modules_open(struct inode *inode, struct file *file)
402{ 408{
403 return seq_open(file, &modules_op); 409 return seq_open(file, &modules_op);
@@ -424,7 +430,7 @@ static const struct file_operations proc_slabinfo_operations = {
424}; 430};
425 431
426#ifdef CONFIG_DEBUG_SLAB_LEAK 432#ifdef CONFIG_DEBUG_SLAB_LEAK
427extern struct seq_operations slabstats_op; 433extern const struct seq_operations slabstats_op;
428static int slabstats_open(struct inode *inode, struct file *file) 434static int slabstats_open(struct inode *inode, struct file *file)
429{ 435{
430 unsigned long *n = kzalloc(PAGE_SIZE, GFP_KERNEL); 436 unsigned long *n = kzalloc(PAGE_SIZE, GFP_KERNEL);
@@ -598,8 +604,7 @@ static void int_seq_stop(struct seq_file *f, void *v)
598} 604}
599 605
600 606
601extern int show_interrupts(struct seq_file *f, void *v); /* In arch code */ 607static const struct seq_operations int_seq_ops = {
602static struct seq_operations int_seq_ops = {
603 .start = int_seq_start, 608 .start = int_seq_start,
604 .next = int_seq_next, 609 .next = int_seq_next,
605 .stop = int_seq_stop, 610 .stop = int_seq_stop,
@@ -675,6 +680,137 @@ static const struct file_operations proc_sysrq_trigger_operations = {
675}; 680};
676#endif 681#endif
677 682
683#ifdef CONFIG_PROC_PAGE_MONITOR
684#define KPMSIZE sizeof(u64)
685#define KPMMASK (KPMSIZE - 1)
686/* /proc/kpagecount - an array exposing page counts
687 *
688 * Each entry is a u64 representing the corresponding
689 * physical page count.
690 */
691static ssize_t kpagecount_read(struct file *file, char __user *buf,
692 size_t count, loff_t *ppos)
693{
694 u64 __user *out = (u64 __user *)buf;
695 struct page *ppage;
696 unsigned long src = *ppos;
697 unsigned long pfn;
698 ssize_t ret = 0;
699 u64 pcount;
700
701 pfn = src / KPMSIZE;
702 count = min_t(size_t, count, (max_pfn * KPMSIZE) - src);
703 if (src & KPMMASK || count & KPMMASK)
704 return -EIO;
705
706 while (count > 0) {
707 ppage = NULL;
708 if (pfn_valid(pfn))
709 ppage = pfn_to_page(pfn);
710 pfn++;
711 if (!ppage)
712 pcount = 0;
713 else
714 pcount = atomic_read(&ppage->_count);
715
716 if (put_user(pcount, out++)) {
717 ret = -EFAULT;
718 break;
719 }
720
721 count -= KPMSIZE;
722 }
723
724 *ppos += (char __user *)out - buf;
725 if (!ret)
726 ret = (char __user *)out - buf;
727 return ret;
728}
729
730static struct file_operations proc_kpagecount_operations = {
731 .llseek = mem_lseek,
732 .read = kpagecount_read,
733};
734
735/* /proc/kpageflags - an array exposing page flags
736 *
737 * Each entry is a u64 representing the corresponding
738 * physical page flags.
739 */
740
741/* These macros are used to decouple internal flags from exported ones */
742
743#define KPF_LOCKED 0
744#define KPF_ERROR 1
745#define KPF_REFERENCED 2
746#define KPF_UPTODATE 3
747#define KPF_DIRTY 4
748#define KPF_LRU 5
749#define KPF_ACTIVE 6
750#define KPF_SLAB 7
751#define KPF_WRITEBACK 8
752#define KPF_RECLAIM 9
753#define KPF_BUDDY 10
754
755#define kpf_copy_bit(flags, srcpos, dstpos) (((flags >> srcpos) & 1) << dstpos)
756
757static ssize_t kpageflags_read(struct file *file, char __user *buf,
758 size_t count, loff_t *ppos)
759{
760 u64 __user *out = (u64 __user *)buf;
761 struct page *ppage;
762 unsigned long src = *ppos;
763 unsigned long pfn;
764 ssize_t ret = 0;
765 u64 kflags, uflags;
766
767 pfn = src / KPMSIZE;
768 count = min_t(unsigned long, count, (max_pfn * KPMSIZE) - src);
769 if (src & KPMMASK || count & KPMMASK)
770 return -EIO;
771
772 while (count > 0) {
773 ppage = NULL;
774 if (pfn_valid(pfn))
775 ppage = pfn_to_page(pfn);
776 pfn++;
777 if (!ppage)
778 kflags = 0;
779 else
780 kflags = ppage->flags;
781
782 uflags = kpf_copy_bit(KPF_LOCKED, PG_locked, kflags) |
783 kpf_copy_bit(kflags, KPF_ERROR, PG_error) |
784 kpf_copy_bit(kflags, KPF_REFERENCED, PG_referenced) |
785 kpf_copy_bit(kflags, KPF_UPTODATE, PG_uptodate) |
786 kpf_copy_bit(kflags, KPF_DIRTY, PG_dirty) |
787 kpf_copy_bit(kflags, KPF_LRU, PG_lru) |
788 kpf_copy_bit(kflags, KPF_ACTIVE, PG_active) |
789 kpf_copy_bit(kflags, KPF_SLAB, PG_slab) |
790 kpf_copy_bit(kflags, KPF_WRITEBACK, PG_writeback) |
791 kpf_copy_bit(kflags, KPF_RECLAIM, PG_reclaim) |
792 kpf_copy_bit(kflags, KPF_BUDDY, PG_buddy);
793
794 if (put_user(uflags, out++)) {
795 ret = -EFAULT;
796 break;
797 }
798
799 count -= KPMSIZE;
800 }
801
802 *ppos += (char __user *)out - buf;
803 if (!ret)
804 ret = (char __user *)out - buf;
805 return ret;
806}
807
808static struct file_operations proc_kpageflags_operations = {
809 .llseek = mem_lseek,
810 .read = kpageflags_read,
811};
812#endif /* CONFIG_PROC_PAGE_MONITOR */
813
678struct proc_dir_entry *proc_root_kcore; 814struct proc_dir_entry *proc_root_kcore;
679 815
680void create_seq_entry(char *name, mode_t mode, const struct file_operations *f) 816void create_seq_entry(char *name, mode_t mode, const struct file_operations *f)
@@ -755,6 +891,10 @@ void __init proc_misc_init(void)
755 (size_t)high_memory - PAGE_OFFSET + PAGE_SIZE; 891 (size_t)high_memory - PAGE_OFFSET + PAGE_SIZE;
756 } 892 }
757#endif 893#endif
894#ifdef CONFIG_PROC_PAGE_MONITOR
895 create_seq_entry("kpagecount", S_IRUSR, &proc_kpagecount_operations);
896 create_seq_entry("kpageflags", S_IRUSR, &proc_kpageflags_operations);
897#endif
758#ifdef CONFIG_PROC_VMCORE 898#ifdef CONFIG_PROC_VMCORE
759 proc_vmcore = create_proc_entry("vmcore", S_IRUSR, NULL); 899 proc_vmcore = create_proc_entry("vmcore", S_IRUSR, NULL);
760 if (proc_vmcore) 900 if (proc_vmcore)
diff --git a/fs/proc/proc_net.c b/fs/proc/proc_net.c
index 4823c9677fac..14e9b5aaf863 100644
--- a/fs/proc/proc_net.c
+++ b/fs/proc/proc_net.c
@@ -67,12 +67,7 @@ EXPORT_SYMBOL_GPL(seq_release_net);
67struct proc_dir_entry *proc_net_fops_create(struct net *net, 67struct proc_dir_entry *proc_net_fops_create(struct net *net,
68 const char *name, mode_t mode, const struct file_operations *fops) 68 const char *name, mode_t mode, const struct file_operations *fops)
69{ 69{
70 struct proc_dir_entry *res; 70 return proc_create(name, mode, net->proc_net, fops);
71
72 res = create_proc_entry(name, mode, net->proc_net);
73 if (res)
74 res->proc_fops = fops;
75 return res;
76} 71}
77EXPORT_SYMBOL_GPL(proc_net_fops_create); 72EXPORT_SYMBOL_GPL(proc_net_fops_create);
78 73
diff --git a/fs/proc/proc_sysctl.c b/fs/proc/proc_sysctl.c
index 4e57fcf85982..614c34b6d1c2 100644
--- a/fs/proc/proc_sysctl.c
+++ b/fs/proc/proc_sysctl.c
@@ -9,7 +9,7 @@
9 9
10static struct dentry_operations proc_sys_dentry_operations; 10static struct dentry_operations proc_sys_dentry_operations;
11static const struct file_operations proc_sys_file_operations; 11static const struct file_operations proc_sys_file_operations;
12static struct inode_operations proc_sys_inode_operations; 12static const struct inode_operations proc_sys_inode_operations;
13 13
14static void proc_sys_refresh_inode(struct inode *inode, struct ctl_table *table) 14static void proc_sys_refresh_inode(struct inode *inode, struct ctl_table *table)
15{ 15{
@@ -407,7 +407,7 @@ static int proc_sys_permission(struct inode *inode, int mask, struct nameidata *
407 if (!nd || !depth) 407 if (!nd || !depth)
408 goto out; 408 goto out;
409 409
410 dentry = nd->dentry; 410 dentry = nd->path.dentry;
411 table = do_proc_sys_lookup(dentry->d_parent, &dentry->d_name, &head); 411 table = do_proc_sys_lookup(dentry->d_parent, &dentry->d_name, &head);
412 412
413 /* If the entry does not exist deny permission */ 413 /* If the entry does not exist deny permission */
@@ -446,7 +446,7 @@ static const struct file_operations proc_sys_file_operations = {
446 .readdir = proc_sys_readdir, 446 .readdir = proc_sys_readdir,
447}; 447};
448 448
449static struct inode_operations proc_sys_inode_operations = { 449static const struct inode_operations proc_sys_inode_operations = {
450 .lookup = proc_sys_lookup, 450 .lookup = proc_sys_lookup,
451 .permission = proc_sys_permission, 451 .permission = proc_sys_permission,
452 .setattr = proc_sys_setattr, 452 .setattr = proc_sys_setattr,
diff --git a/fs/proc/proc_tty.c b/fs/proc/proc_tty.c
index 22846225acfa..49816e00b51a 100644
--- a/fs/proc/proc_tty.c
+++ b/fs/proc/proc_tty.c
@@ -15,9 +15,6 @@
15#include <linux/seq_file.h> 15#include <linux/seq_file.h>
16#include <linux/bitops.h> 16#include <linux/bitops.h>
17 17
18static int tty_ldiscs_read_proc(char *page, char **start, off_t off,
19 int count, int *eof, void *data);
20
21/* 18/*
22 * The /proc/tty directory inodes... 19 * The /proc/tty directory inodes...
23 */ 20 */
@@ -120,7 +117,7 @@ static void t_stop(struct seq_file *m, void *v)
120 mutex_unlock(&tty_mutex); 117 mutex_unlock(&tty_mutex);
121} 118}
122 119
123static struct seq_operations tty_drivers_op = { 120static const struct seq_operations tty_drivers_op = {
124 .start = t_start, 121 .start = t_start,
125 .next = t_next, 122 .next = t_next,
126 .stop = t_stop, 123 .stop = t_stop,
diff --git a/fs/proc/root.c b/fs/proc/root.c
index 81f99e691f99..ef0fb57fc9ef 100644
--- a/fs/proc/root.c
+++ b/fs/proc/root.c
@@ -232,6 +232,7 @@ void pid_ns_release_proc(struct pid_namespace *ns)
232EXPORT_SYMBOL(proc_symlink); 232EXPORT_SYMBOL(proc_symlink);
233EXPORT_SYMBOL(proc_mkdir); 233EXPORT_SYMBOL(proc_mkdir);
234EXPORT_SYMBOL(create_proc_entry); 234EXPORT_SYMBOL(create_proc_entry);
235EXPORT_SYMBOL(proc_create);
235EXPORT_SYMBOL(remove_proc_entry); 236EXPORT_SYMBOL(remove_proc_entry);
236EXPORT_SYMBOL(proc_root); 237EXPORT_SYMBOL(proc_root);
237EXPORT_SYMBOL(proc_root_fs); 238EXPORT_SYMBOL(proc_root_fs);
diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
index 8043a3eab52c..49958cffbd8d 100644
--- a/fs/proc/task_mmu.c
+++ b/fs/proc/task_mmu.c
@@ -5,14 +5,18 @@
5#include <linux/highmem.h> 5#include <linux/highmem.h>
6#include <linux/ptrace.h> 6#include <linux/ptrace.h>
7#include <linux/pagemap.h> 7#include <linux/pagemap.h>
8#include <linux/ptrace.h>
8#include <linux/mempolicy.h> 9#include <linux/mempolicy.h>
10#include <linux/swap.h>
11#include <linux/swapops.h>
12#include <linux/seq_file.h>
9 13
10#include <asm/elf.h> 14#include <asm/elf.h>
11#include <asm/uaccess.h> 15#include <asm/uaccess.h>
12#include <asm/tlbflush.h> 16#include <asm/tlbflush.h>
13#include "internal.h" 17#include "internal.h"
14 18
15char *task_mem(struct mm_struct *mm, char *buffer) 19void task_mem(struct seq_file *m, struct mm_struct *mm)
16{ 20{
17 unsigned long data, text, lib; 21 unsigned long data, text, lib;
18 unsigned long hiwater_vm, total_vm, hiwater_rss, total_rss; 22 unsigned long hiwater_vm, total_vm, hiwater_rss, total_rss;
@@ -34,7 +38,7 @@ char *task_mem(struct mm_struct *mm, char *buffer)
34 data = mm->total_vm - mm->shared_vm - mm->stack_vm; 38 data = mm->total_vm - mm->shared_vm - mm->stack_vm;
35 text = (PAGE_ALIGN(mm->end_code) - (mm->start_code & PAGE_MASK)) >> 10; 39 text = (PAGE_ALIGN(mm->end_code) - (mm->start_code & PAGE_MASK)) >> 10;
36 lib = (mm->exec_vm << (PAGE_SHIFT-10)) - text; 40 lib = (mm->exec_vm << (PAGE_SHIFT-10)) - text;
37 buffer += sprintf(buffer, 41 seq_printf(m,
38 "VmPeak:\t%8lu kB\n" 42 "VmPeak:\t%8lu kB\n"
39 "VmSize:\t%8lu kB\n" 43 "VmSize:\t%8lu kB\n"
40 "VmLck:\t%8lu kB\n" 44 "VmLck:\t%8lu kB\n"
@@ -53,7 +57,6 @@ char *task_mem(struct mm_struct *mm, char *buffer)
53 data << (PAGE_SHIFT-10), 57 data << (PAGE_SHIFT-10),
54 mm->stack_vm << (PAGE_SHIFT-10), text, lib, 58 mm->stack_vm << (PAGE_SHIFT-10), text, lib,
55 (PTRS_PER_PTE*sizeof(pte_t)*mm->nr_ptes) >> 10); 59 (PTRS_PER_PTE*sizeof(pte_t)*mm->nr_ptes) >> 10);
56 return buffer;
57} 60}
58 61
59unsigned long task_vsize(struct mm_struct *mm) 62unsigned long task_vsize(struct mm_struct *mm)
@@ -72,7 +75,7 @@ int task_statm(struct mm_struct *mm, int *shared, int *text,
72 return mm->total_vm; 75 return mm->total_vm;
73} 76}
74 77
75int proc_exe_link(struct inode *inode, struct dentry **dentry, struct vfsmount **mnt) 78int proc_exe_link(struct inode *inode, struct path *path)
76{ 79{
77 struct vm_area_struct * vma; 80 struct vm_area_struct * vma;
78 int result = -ENOENT; 81 int result = -ENOENT;
@@ -95,8 +98,8 @@ int proc_exe_link(struct inode *inode, struct dentry **dentry, struct vfsmount *
95 } 98 }
96 99
97 if (vma) { 100 if (vma) {
98 *mnt = mntget(vma->vm_file->f_path.mnt); 101 *path = vma->vm_file->f_path;
99 *dentry = dget(vma->vm_file->f_path.dentry); 102 path_get(&vma->vm_file->f_path);
100 result = 0; 103 result = 0;
101 } 104 }
102 105
@@ -114,24 +117,124 @@ static void pad_len_spaces(struct seq_file *m, int len)
114 seq_printf(m, "%*c", len, ' '); 117 seq_printf(m, "%*c", len, ' ');
115} 118}
116 119
117struct mem_size_stats 120static void vma_stop(struct proc_maps_private *priv, struct vm_area_struct *vma)
118{ 121{
119 unsigned long resident; 122 if (vma && vma != priv->tail_vma) {
120 unsigned long shared_clean; 123 struct mm_struct *mm = vma->vm_mm;
121 unsigned long shared_dirty; 124 up_read(&mm->mmap_sem);
122 unsigned long private_clean; 125 mmput(mm);
123 unsigned long private_dirty; 126 }
124 unsigned long referenced; 127}
125};
126 128
127struct pmd_walker { 129static void *m_start(struct seq_file *m, loff_t *pos)
128 struct vm_area_struct *vma; 130{
129 void *private; 131 struct proc_maps_private *priv = m->private;
130 void (*action)(struct vm_area_struct *, pmd_t *, unsigned long, 132 unsigned long last_addr = m->version;
131 unsigned long, void *); 133 struct mm_struct *mm;
132}; 134 struct vm_area_struct *vma, *tail_vma = NULL;
135 loff_t l = *pos;
136
137 /* Clear the per syscall fields in priv */
138 priv->task = NULL;
139 priv->tail_vma = NULL;
140
141 /*
142 * We remember last_addr rather than next_addr to hit with
143 * mmap_cache most of the time. We have zero last_addr at
144 * the beginning and also after lseek. We will have -1 last_addr
145 * after the end of the vmas.
146 */
147
148 if (last_addr == -1UL)
149 return NULL;
150
151 priv->task = get_pid_task(priv->pid, PIDTYPE_PID);
152 if (!priv->task)
153 return NULL;
154
155 mm = mm_for_maps(priv->task);
156 if (!mm)
157 return NULL;
158
159 tail_vma = get_gate_vma(priv->task);
160 priv->tail_vma = tail_vma;
161
162 /* Start with last addr hint */
163 vma = find_vma(mm, last_addr);
164 if (last_addr && vma) {
165 vma = vma->vm_next;
166 goto out;
167 }
168
169 /*
170 * Check the vma index is within the range and do
171 * sequential scan until m_index.
172 */
173 vma = NULL;
174 if ((unsigned long)l < mm->map_count) {
175 vma = mm->mmap;
176 while (l-- && vma)
177 vma = vma->vm_next;
178 goto out;
179 }
180
181 if (l != mm->map_count)
182 tail_vma = NULL; /* After gate vma */
183
184out:
185 if (vma)
186 return vma;
187
188 /* End of vmas has been reached */
189 m->version = (tail_vma != NULL)? 0: -1UL;
190 up_read(&mm->mmap_sem);
191 mmput(mm);
192 return tail_vma;
193}
194
195static void *m_next(struct seq_file *m, void *v, loff_t *pos)
196{
197 struct proc_maps_private *priv = m->private;
198 struct vm_area_struct *vma = v;
199 struct vm_area_struct *tail_vma = priv->tail_vma;
200
201 (*pos)++;
202 if (vma && (vma != tail_vma) && vma->vm_next)
203 return vma->vm_next;
204 vma_stop(priv, vma);
205 return (vma != tail_vma)? tail_vma: NULL;
206}
133 207
134static int show_map_internal(struct seq_file *m, void *v, struct mem_size_stats *mss) 208static void m_stop(struct seq_file *m, void *v)
209{
210 struct proc_maps_private *priv = m->private;
211 struct vm_area_struct *vma = v;
212
213 vma_stop(priv, vma);
214 if (priv->task)
215 put_task_struct(priv->task);
216}
217
218static int do_maps_open(struct inode *inode, struct file *file,
219 const struct seq_operations *ops)
220{
221 struct proc_maps_private *priv;
222 int ret = -ENOMEM;
223 priv = kzalloc(sizeof(*priv), GFP_KERNEL);
224 if (priv) {
225 priv->pid = proc_pid(inode);
226 ret = seq_open(file, ops);
227 if (!ret) {
228 struct seq_file *m = file->private_data;
229 m->private = priv;
230 } else {
231 kfree(priv);
232 }
233 }
234 return ret;
235}
236
237static int show_map(struct seq_file *m, void *v)
135{ 238{
136 struct proc_maps_private *priv = m->private; 239 struct proc_maps_private *priv = m->private;
137 struct task_struct *task = priv->task; 240 struct task_struct *task = priv->task;
@@ -168,7 +271,7 @@ static int show_map_internal(struct seq_file *m, void *v, struct mem_size_stats
168 */ 271 */
169 if (file) { 272 if (file) {
170 pad_len_spaces(m, len); 273 pad_len_spaces(m, len);
171 seq_path(m, file->f_path.mnt, file->f_path.dentry, "\n"); 274 seq_path(m, &file->f_path, "\n");
172 } else { 275 } else {
173 const char *name = arch_vma_name(vma); 276 const char *name = arch_vma_name(vma);
174 if (!name) { 277 if (!name) {
@@ -191,41 +294,71 @@ static int show_map_internal(struct seq_file *m, void *v, struct mem_size_stats
191 } 294 }
192 seq_putc(m, '\n'); 295 seq_putc(m, '\n');
193 296
194 if (mss)
195 seq_printf(m,
196 "Size: %8lu kB\n"
197 "Rss: %8lu kB\n"
198 "Shared_Clean: %8lu kB\n"
199 "Shared_Dirty: %8lu kB\n"
200 "Private_Clean: %8lu kB\n"
201 "Private_Dirty: %8lu kB\n"
202 "Referenced: %8lu kB\n",
203 (vma->vm_end - vma->vm_start) >> 10,
204 mss->resident >> 10,
205 mss->shared_clean >> 10,
206 mss->shared_dirty >> 10,
207 mss->private_clean >> 10,
208 mss->private_dirty >> 10,
209 mss->referenced >> 10);
210
211 if (m->count < m->size) /* vma is copied successfully */ 297 if (m->count < m->size) /* vma is copied successfully */
212 m->version = (vma != get_gate_vma(task))? vma->vm_start: 0; 298 m->version = (vma != get_gate_vma(task))? vma->vm_start: 0;
213 return 0; 299 return 0;
214} 300}
215 301
216static int show_map(struct seq_file *m, void *v) 302static const struct seq_operations proc_pid_maps_op = {
303 .start = m_start,
304 .next = m_next,
305 .stop = m_stop,
306 .show = show_map
307};
308
309static int maps_open(struct inode *inode, struct file *file)
217{ 310{
218 return show_map_internal(m, v, NULL); 311 return do_maps_open(inode, file, &proc_pid_maps_op);
219} 312}
220 313
221static void smaps_pte_range(struct vm_area_struct *vma, pmd_t *pmd, 314const struct file_operations proc_maps_operations = {
222 unsigned long addr, unsigned long end, 315 .open = maps_open,
223 void *private) 316 .read = seq_read,
317 .llseek = seq_lseek,
318 .release = seq_release_private,
319};
320
321/*
322 * Proportional Set Size(PSS): my share of RSS.
323 *
324 * PSS of a process is the count of pages it has in memory, where each
325 * page is divided by the number of processes sharing it. So if a
326 * process has 1000 pages all to itself, and 1000 shared with one other
327 * process, its PSS will be 1500.
328 *
329 * To keep (accumulated) division errors low, we adopt a 64bit
330 * fixed-point pss counter to minimize division errors. So (pss >>
331 * PSS_SHIFT) would be the real byte count.
332 *
333 * A shift of 12 before division means (assuming 4K page size):
334 * - 1M 3-user-pages add up to 8KB errors;
335 * - supports mapcount up to 2^24, or 16M;
336 * - supports PSS up to 2^52 bytes, or 4PB.
337 */
338#define PSS_SHIFT 12
339
340#ifdef CONFIG_PROC_PAGE_MONITOR
341struct mem_size_stats
342{
343 struct vm_area_struct *vma;
344 unsigned long resident;
345 unsigned long shared_clean;
346 unsigned long shared_dirty;
347 unsigned long private_clean;
348 unsigned long private_dirty;
349 unsigned long referenced;
350 u64 pss;
351};
352
353static int smaps_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end,
354 void *private)
224{ 355{
225 struct mem_size_stats *mss = private; 356 struct mem_size_stats *mss = private;
357 struct vm_area_struct *vma = mss->vma;
226 pte_t *pte, ptent; 358 pte_t *pte, ptent;
227 spinlock_t *ptl; 359 spinlock_t *ptl;
228 struct page *page; 360 struct page *page;
361 int mapcount;
229 362
230 pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl); 363 pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
231 for (; addr != end; pte++, addr += PAGE_SIZE) { 364 for (; addr != end; pte++, addr += PAGE_SIZE) {
@@ -242,26 +375,88 @@ static void smaps_pte_range(struct vm_area_struct *vma, pmd_t *pmd,
242 /* Accumulate the size in pages that have been accessed. */ 375 /* Accumulate the size in pages that have been accessed. */
243 if (pte_young(ptent) || PageReferenced(page)) 376 if (pte_young(ptent) || PageReferenced(page))
244 mss->referenced += PAGE_SIZE; 377 mss->referenced += PAGE_SIZE;
245 if (page_mapcount(page) >= 2) { 378 mapcount = page_mapcount(page);
379 if (mapcount >= 2) {
246 if (pte_dirty(ptent)) 380 if (pte_dirty(ptent))
247 mss->shared_dirty += PAGE_SIZE; 381 mss->shared_dirty += PAGE_SIZE;
248 else 382 else
249 mss->shared_clean += PAGE_SIZE; 383 mss->shared_clean += PAGE_SIZE;
384 mss->pss += (PAGE_SIZE << PSS_SHIFT) / mapcount;
250 } else { 385 } else {
251 if (pte_dirty(ptent)) 386 if (pte_dirty(ptent))
252 mss->private_dirty += PAGE_SIZE; 387 mss->private_dirty += PAGE_SIZE;
253 else 388 else
254 mss->private_clean += PAGE_SIZE; 389 mss->private_clean += PAGE_SIZE;
390 mss->pss += (PAGE_SIZE << PSS_SHIFT);
255 } 391 }
256 } 392 }
257 pte_unmap_unlock(pte - 1, ptl); 393 pte_unmap_unlock(pte - 1, ptl);
258 cond_resched(); 394 cond_resched();
395 return 0;
396}
397
398static struct mm_walk smaps_walk = { .pmd_entry = smaps_pte_range };
399
400static int show_smap(struct seq_file *m, void *v)
401{
402 struct vm_area_struct *vma = v;
403 struct mem_size_stats mss;
404 int ret;
405
406 memset(&mss, 0, sizeof mss);
407 mss.vma = vma;
408 if (vma->vm_mm && !is_vm_hugetlb_page(vma))
409 walk_page_range(vma->vm_mm, vma->vm_start, vma->vm_end,
410 &smaps_walk, &mss);
411
412 ret = show_map(m, v);
413 if (ret)
414 return ret;
415
416 seq_printf(m,
417 "Size: %8lu kB\n"
418 "Rss: %8lu kB\n"
419 "Pss: %8lu kB\n"
420 "Shared_Clean: %8lu kB\n"
421 "Shared_Dirty: %8lu kB\n"
422 "Private_Clean: %8lu kB\n"
423 "Private_Dirty: %8lu kB\n"
424 "Referenced: %8lu kB\n",
425 (vma->vm_end - vma->vm_start) >> 10,
426 mss.resident >> 10,
427 (unsigned long)(mss.pss >> (10 + PSS_SHIFT)),
428 mss.shared_clean >> 10,
429 mss.shared_dirty >> 10,
430 mss.private_clean >> 10,
431 mss.private_dirty >> 10,
432 mss.referenced >> 10);
433
434 return ret;
435}
436
437static const struct seq_operations proc_pid_smaps_op = {
438 .start = m_start,
439 .next = m_next,
440 .stop = m_stop,
441 .show = show_smap
442};
443
444static int smaps_open(struct inode *inode, struct file *file)
445{
446 return do_maps_open(inode, file, &proc_pid_smaps_op);
259} 447}
260 448
261static void clear_refs_pte_range(struct vm_area_struct *vma, pmd_t *pmd, 449const struct file_operations proc_smaps_operations = {
262 unsigned long addr, unsigned long end, 450 .open = smaps_open,
263 void *private) 451 .read = seq_read,
452 .llseek = seq_lseek,
453 .release = seq_release_private,
454};
455
456static int clear_refs_pte_range(pmd_t *pmd, unsigned long addr,
457 unsigned long end, void *private)
264{ 458{
459 struct vm_area_struct *vma = private;
265 pte_t *pte, ptent; 460 pte_t *pte, ptent;
266 spinlock_t *ptl; 461 spinlock_t *ptl;
267 struct page *page; 462 struct page *page;
@@ -282,235 +477,248 @@ static void clear_refs_pte_range(struct vm_area_struct *vma, pmd_t *pmd,
282 } 477 }
283 pte_unmap_unlock(pte - 1, ptl); 478 pte_unmap_unlock(pte - 1, ptl);
284 cond_resched(); 479 cond_resched();
480 return 0;
285} 481}
286 482
287static inline void walk_pmd_range(struct pmd_walker *walker, pud_t *pud, 483static struct mm_walk clear_refs_walk = { .pmd_entry = clear_refs_pte_range };
288 unsigned long addr, unsigned long end) 484
485static ssize_t clear_refs_write(struct file *file, const char __user *buf,
486 size_t count, loff_t *ppos)
289{ 487{
290 pmd_t *pmd; 488 struct task_struct *task;
291 unsigned long next; 489 char buffer[PROC_NUMBUF], *end;
490 struct mm_struct *mm;
491 struct vm_area_struct *vma;
292 492
293 for (pmd = pmd_offset(pud, addr); addr != end; 493 memset(buffer, 0, sizeof(buffer));
294 pmd++, addr = next) { 494 if (count > sizeof(buffer) - 1)
295 next = pmd_addr_end(addr, end); 495 count = sizeof(buffer) - 1;
296 if (pmd_none_or_clear_bad(pmd)) 496 if (copy_from_user(buffer, buf, count))
297 continue; 497 return -EFAULT;
298 walker->action(walker->vma, pmd, addr, next, walker->private); 498 if (!simple_strtol(buffer, &end, 0))
499 return -EINVAL;
500 if (*end == '\n')
501 end++;
502 task = get_proc_task(file->f_path.dentry->d_inode);
503 if (!task)
504 return -ESRCH;
505 mm = get_task_mm(task);
506 if (mm) {
507 down_read(&mm->mmap_sem);
508 for (vma = mm->mmap; vma; vma = vma->vm_next)
509 if (!is_vm_hugetlb_page(vma))
510 walk_page_range(mm, vma->vm_start, vma->vm_end,
511 &clear_refs_walk, vma);
512 flush_tlb_mm(mm);
513 up_read(&mm->mmap_sem);
514 mmput(mm);
299 } 515 }
516 put_task_struct(task);
517 if (end - buffer == 0)
518 return -EIO;
519 return end - buffer;
300} 520}
301 521
302static inline void walk_pud_range(struct pmd_walker *walker, pgd_t *pgd, 522const struct file_operations proc_clear_refs_operations = {
303 unsigned long addr, unsigned long end) 523 .write = clear_refs_write,
304{ 524};
305 pud_t *pud;
306 unsigned long next;
307 525
308 for (pud = pud_offset(pgd, addr); addr != end; 526struct pagemapread {
309 pud++, addr = next) { 527 char __user *out, *end;
310 next = pud_addr_end(addr, end); 528};
311 if (pud_none_or_clear_bad(pud)) 529
312 continue; 530#define PM_ENTRY_BYTES sizeof(u64)
313 walk_pmd_range(walker, pud, addr, next); 531#define PM_RESERVED_BITS 3
532#define PM_RESERVED_OFFSET (64 - PM_RESERVED_BITS)
533#define PM_RESERVED_MASK (((1LL<<PM_RESERVED_BITS)-1) << PM_RESERVED_OFFSET)
534#define PM_SPECIAL(nr) (((nr) << PM_RESERVED_OFFSET) | PM_RESERVED_MASK)
535#define PM_NOT_PRESENT PM_SPECIAL(1LL)
536#define PM_SWAP PM_SPECIAL(2LL)
537#define PM_END_OF_BUFFER 1
538
539static int add_to_pagemap(unsigned long addr, u64 pfn,
540 struct pagemapread *pm)
541{
542 /*
543 * Make sure there's room in the buffer for an
544 * entire entry. Otherwise, only copy part of
545 * the pfn.
546 */
547 if (pm->out + PM_ENTRY_BYTES >= pm->end) {
548 if (copy_to_user(pm->out, &pfn, pm->end - pm->out))
549 return -EFAULT;
550 pm->out = pm->end;
551 return PM_END_OF_BUFFER;
314 } 552 }
553
554 if (put_user(pfn, pm->out))
555 return -EFAULT;
556 pm->out += PM_ENTRY_BYTES;
557 return 0;
315} 558}
316 559
317/* 560static int pagemap_pte_hole(unsigned long start, unsigned long end,
318 * walk_page_range - walk the page tables of a VMA with a callback 561 void *private)
319 * @vma - VMA to walk
320 * @action - callback invoked for every bottom-level (PTE) page table
321 * @private - private data passed to the callback function
322 *
323 * Recursively walk the page table for the memory area in a VMA, calling
324 * a callback for every bottom-level (PTE) page table.
325 */
326static inline void walk_page_range(struct vm_area_struct *vma,
327 void (*action)(struct vm_area_struct *,
328 pmd_t *, unsigned long,
329 unsigned long, void *),
330 void *private)
331{ 562{
332 unsigned long addr = vma->vm_start; 563 struct pagemapread *pm = private;
333 unsigned long end = vma->vm_end; 564 unsigned long addr;
334 struct pmd_walker walker = { 565 int err = 0;
335 .vma = vma, 566 for (addr = start; addr < end; addr += PAGE_SIZE) {
336 .private = private, 567 err = add_to_pagemap(addr, PM_NOT_PRESENT, pm);
337 .action = action, 568 if (err)
338 }; 569 break;
339 pgd_t *pgd;
340 unsigned long next;
341
342 for (pgd = pgd_offset(vma->vm_mm, addr); addr != end;
343 pgd++, addr = next) {
344 next = pgd_addr_end(addr, end);
345 if (pgd_none_or_clear_bad(pgd))
346 continue;
347 walk_pud_range(&walker, pgd, addr, next);
348 } 570 }
571 return err;
349} 572}
350 573
351static int show_smap(struct seq_file *m, void *v) 574u64 swap_pte_to_pagemap_entry(pte_t pte)
352{ 575{
353 struct vm_area_struct *vma = v; 576 swp_entry_t e = pte_to_swp_entry(pte);
354 struct mem_size_stats mss; 577 return PM_SWAP | swp_type(e) | (swp_offset(e) << MAX_SWAPFILES_SHIFT);
355
356 memset(&mss, 0, sizeof mss);
357 if (vma->vm_mm && !is_vm_hugetlb_page(vma))
358 walk_page_range(vma, smaps_pte_range, &mss);
359 return show_map_internal(m, v, &mss);
360} 578}
361 579
362void clear_refs_smap(struct mm_struct *mm) 580static int pagemap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end,
581 void *private)
363{ 582{
364 struct vm_area_struct *vma; 583 struct pagemapread *pm = private;
584 pte_t *pte;
585 int err = 0;
586
587 for (; addr != end; addr += PAGE_SIZE) {
588 u64 pfn = PM_NOT_PRESENT;
589 pte = pte_offset_map(pmd, addr);
590 if (is_swap_pte(*pte))
591 pfn = swap_pte_to_pagemap_entry(*pte);
592 else if (pte_present(*pte))
593 pfn = pte_pfn(*pte);
594 /* unmap so we're not in atomic when we copy to userspace */
595 pte_unmap(pte);
596 err = add_to_pagemap(addr, pfn, pm);
597 if (err)
598 return err;
599 }
365 600
366 down_read(&mm->mmap_sem); 601 cond_resched();
367 for (vma = mm->mmap; vma; vma = vma->vm_next) 602
368 if (vma->vm_mm && !is_vm_hugetlb_page(vma)) 603 return err;
369 walk_page_range(vma, clear_refs_pte_range, NULL);
370 flush_tlb_mm(mm);
371 up_read(&mm->mmap_sem);
372} 604}
373 605
374static void *m_start(struct seq_file *m, loff_t *pos) 606static struct mm_walk pagemap_walk = {
607 .pmd_entry = pagemap_pte_range,
608 .pte_hole = pagemap_pte_hole
609};
610
611/*
612 * /proc/pid/pagemap - an array mapping virtual pages to pfns
613 *
614 * For each page in the address space, this file contains one 64-bit
615 * entry representing the corresponding physical page frame number
616 * (PFN) if the page is present. If there is a swap entry for the
617 * physical page, then an encoding of the swap file number and the
618 * page's offset into the swap file are returned. If no page is
619 * present at all, PM_NOT_PRESENT is returned. This allows determining
620 * precisely which pages are mapped (or in swap) and comparing mapped
621 * pages between processes.
622 *
623 * Efficient users of this interface will use /proc/pid/maps to
624 * determine which areas of memory are actually mapped and llseek to
625 * skip over unmapped regions.
626 */
627static ssize_t pagemap_read(struct file *file, char __user *buf,
628 size_t count, loff_t *ppos)
375{ 629{
376 struct proc_maps_private *priv = m->private; 630 struct task_struct *task = get_proc_task(file->f_path.dentry->d_inode);
377 unsigned long last_addr = m->version; 631 struct page **pages, *page;
632 unsigned long uaddr, uend;
378 struct mm_struct *mm; 633 struct mm_struct *mm;
379 struct vm_area_struct *vma, *tail_vma = NULL; 634 struct pagemapread pm;
380 loff_t l = *pos; 635 int pagecount;
381 636 int ret = -ESRCH;
382 /* Clear the per syscall fields in priv */
383 priv->task = NULL;
384 priv->tail_vma = NULL;
385 637
386 /* 638 if (!task)
387 * We remember last_addr rather than next_addr to hit with 639 goto out;
388 * mmap_cache most of the time. We have zero last_addr at
389 * the beginning and also after lseek. We will have -1 last_addr
390 * after the end of the vmas.
391 */
392 640
393 if (last_addr == -1UL) 641 ret = -EACCES;
394 return NULL; 642 if (!ptrace_may_attach(task))
643 goto out;
395 644
396 priv->task = get_pid_task(priv->pid, PIDTYPE_PID); 645 ret = -EINVAL;
397 if (!priv->task) 646 /* file position must be aligned */
398 return NULL; 647 if (*ppos % PM_ENTRY_BYTES)
648 goto out;
399 649
400 mm = mm_for_maps(priv->task); 650 ret = 0;
651 mm = get_task_mm(task);
401 if (!mm) 652 if (!mm)
402 return NULL;
403
404 priv->tail_vma = tail_vma = get_gate_vma(priv->task);
405
406 /* Start with last addr hint */
407 if (last_addr && (vma = find_vma(mm, last_addr))) {
408 vma = vma->vm_next;
409 goto out; 653 goto out;
410 }
411 654
412 /* 655 ret = -ENOMEM;
413 * Check the vma index is within the range and do 656 uaddr = (unsigned long)buf & PAGE_MASK;
414 * sequential scan until m_index. 657 uend = (unsigned long)(buf + count);
415 */ 658 pagecount = (PAGE_ALIGN(uend) - uaddr) / PAGE_SIZE;
416 vma = NULL; 659 pages = kmalloc(pagecount * sizeof(struct page *), GFP_KERNEL);
417 if ((unsigned long)l < mm->map_count) { 660 if (!pages)
418 vma = mm->mmap; 661 goto out_task;
419 while (l-- && vma)
420 vma = vma->vm_next;
421 goto out;
422 }
423 662
424 if (l != mm->map_count) 663 down_read(&current->mm->mmap_sem);
425 tail_vma = NULL; /* After gate vma */ 664 ret = get_user_pages(current, current->mm, uaddr, pagecount,
665 1, 0, pages, NULL);
666 up_read(&current->mm->mmap_sem);
426 667
427out: 668 if (ret < 0)
428 if (vma) 669 goto out_free;
429 return vma;
430 670
431 /* End of vmas has been reached */ 671 pm.out = buf;
432 m->version = (tail_vma != NULL)? 0: -1UL; 672 pm.end = buf + count;
433 up_read(&mm->mmap_sem);
434 mmput(mm);
435 return tail_vma;
436}
437 673
438static void vma_stop(struct proc_maps_private *priv, struct vm_area_struct *vma) 674 if (!ptrace_may_attach(task)) {
439{ 675 ret = -EIO;
440 if (vma && vma != priv->tail_vma) { 676 } else {
441 struct mm_struct *mm = vma->vm_mm; 677 unsigned long src = *ppos;
442 up_read(&mm->mmap_sem); 678 unsigned long svpfn = src / PM_ENTRY_BYTES;
443 mmput(mm); 679 unsigned long start_vaddr = svpfn << PAGE_SHIFT;
680 unsigned long end_vaddr = TASK_SIZE_OF(task);
681
682 /* watch out for wraparound */
683 if (svpfn > TASK_SIZE_OF(task) >> PAGE_SHIFT)
684 start_vaddr = end_vaddr;
685
686 /*
687 * The odds are that this will stop walking way
688 * before end_vaddr, because the length of the
689 * user buffer is tracked in "pm", and the walk
690 * will stop when we hit the end of the buffer.
691 */
692 ret = walk_page_range(mm, start_vaddr, end_vaddr,
693 &pagemap_walk, &pm);
694 if (ret == PM_END_OF_BUFFER)
695 ret = 0;
696 /* don't need mmap_sem for these, but this looks cleaner */
697 *ppos += pm.out - buf;
698 if (!ret)
699 ret = pm.out - buf;
444 } 700 }
445}
446
447static void *m_next(struct seq_file *m, void *v, loff_t *pos)
448{
449 struct proc_maps_private *priv = m->private;
450 struct vm_area_struct *vma = v;
451 struct vm_area_struct *tail_vma = priv->tail_vma;
452
453 (*pos)++;
454 if (vma && (vma != tail_vma) && vma->vm_next)
455 return vma->vm_next;
456 vma_stop(priv, vma);
457 return (vma != tail_vma)? tail_vma: NULL;
458}
459
460static void m_stop(struct seq_file *m, void *v)
461{
462 struct proc_maps_private *priv = m->private;
463 struct vm_area_struct *vma = v;
464
465 vma_stop(priv, vma);
466 if (priv->task)
467 put_task_struct(priv->task);
468}
469
470static struct seq_operations proc_pid_maps_op = {
471 .start = m_start,
472 .next = m_next,
473 .stop = m_stop,
474 .show = show_map
475};
476
477static struct seq_operations proc_pid_smaps_op = {
478 .start = m_start,
479 .next = m_next,
480 .stop = m_stop,
481 .show = show_smap
482};
483 701
484static int do_maps_open(struct inode *inode, struct file *file, 702 for (; pagecount; pagecount--) {
485 struct seq_operations *ops) 703 page = pages[pagecount-1];
486{ 704 if (!PageReserved(page))
487 struct proc_maps_private *priv; 705 SetPageDirty(page);
488 int ret = -ENOMEM; 706 page_cache_release(page);
489 priv = kzalloc(sizeof(*priv), GFP_KERNEL);
490 if (priv) {
491 priv->pid = proc_pid(inode);
492 ret = seq_open(file, ops);
493 if (!ret) {
494 struct seq_file *m = file->private_data;
495 m->private = priv;
496 } else {
497 kfree(priv);
498 }
499 } 707 }
708 mmput(mm);
709out_free:
710 kfree(pages);
711out_task:
712 put_task_struct(task);
713out:
500 return ret; 714 return ret;
501} 715}
502 716
503static int maps_open(struct inode *inode, struct file *file) 717const struct file_operations proc_pagemap_operations = {
504{ 718 .llseek = mem_lseek, /* borrow this */
505 return do_maps_open(inode, file, &proc_pid_maps_op); 719 .read = pagemap_read,
506}
507
508const struct file_operations proc_maps_operations = {
509 .open = maps_open,
510 .read = seq_read,
511 .llseek = seq_lseek,
512 .release = seq_release_private,
513}; 720};
721#endif /* CONFIG_PROC_PAGE_MONITOR */
514 722
515#ifdef CONFIG_NUMA 723#ifdef CONFIG_NUMA
516extern int show_numa_map(struct seq_file *m, void *v); 724extern int show_numa_map(struct seq_file *m, void *v);
@@ -526,7 +734,7 @@ static int show_numa_map_checked(struct seq_file *m, void *v)
526 return show_numa_map(m, v); 734 return show_numa_map(m, v);
527} 735}
528 736
529static struct seq_operations proc_pid_numa_maps_op = { 737static const struct seq_operations proc_pid_numa_maps_op = {
530 .start = m_start, 738 .start = m_start,
531 .next = m_next, 739 .next = m_next,
532 .stop = m_stop, 740 .stop = m_stop,
@@ -545,15 +753,3 @@ const struct file_operations proc_numa_maps_operations = {
545 .release = seq_release_private, 753 .release = seq_release_private,
546}; 754};
547#endif 755#endif
548
549static int smaps_open(struct inode *inode, struct file *file)
550{
551 return do_maps_open(inode, file, &proc_pid_smaps_op);
552}
553
554const struct file_operations proc_smaps_operations = {
555 .open = smaps_open,
556 .read = seq_read,
557 .llseek = seq_lseek,
558 .release = seq_release_private,
559};
diff --git a/fs/proc/task_nommu.c b/fs/proc/task_nommu.c
index 1932c2ca3457..8011528518bd 100644
--- a/fs/proc/task_nommu.c
+++ b/fs/proc/task_nommu.c
@@ -12,7 +12,7 @@
12 * each process that owns it. Non-shared memory is counted 12 * each process that owns it. Non-shared memory is counted
13 * accurately. 13 * accurately.
14 */ 14 */
15char *task_mem(struct mm_struct *mm, char *buffer) 15void task_mem(struct seq_file *m, struct mm_struct *mm)
16{ 16{
17 struct vm_list_struct *vml; 17 struct vm_list_struct *vml;
18 unsigned long bytes = 0, sbytes = 0, slack = 0; 18 unsigned long bytes = 0, sbytes = 0, slack = 0;
@@ -58,14 +58,13 @@ char *task_mem(struct mm_struct *mm, char *buffer)
58 58
59 bytes += kobjsize(current); /* includes kernel stack */ 59 bytes += kobjsize(current); /* includes kernel stack */
60 60
61 buffer += sprintf(buffer, 61 seq_printf(m,
62 "Mem:\t%8lu bytes\n" 62 "Mem:\t%8lu bytes\n"
63 "Slack:\t%8lu bytes\n" 63 "Slack:\t%8lu bytes\n"
64 "Shared:\t%8lu bytes\n", 64 "Shared:\t%8lu bytes\n",
65 bytes, slack, sbytes); 65 bytes, slack, sbytes);
66 66
67 up_read(&mm->mmap_sem); 67 up_read(&mm->mmap_sem);
68 return buffer;
69} 68}
70 69
71unsigned long task_vsize(struct mm_struct *mm) 70unsigned long task_vsize(struct mm_struct *mm)
@@ -104,7 +103,7 @@ int task_statm(struct mm_struct *mm, int *shared, int *text,
104 return size; 103 return size;
105} 104}
106 105
107int proc_exe_link(struct inode *inode, struct dentry **dentry, struct vfsmount **mnt) 106int proc_exe_link(struct inode *inode, struct path *path)
108{ 107{
109 struct vm_list_struct *vml; 108 struct vm_list_struct *vml;
110 struct vm_area_struct *vma; 109 struct vm_area_struct *vma;
@@ -127,8 +126,8 @@ int proc_exe_link(struct inode *inode, struct dentry **dentry, struct vfsmount *
127 } 126 }
128 127
129 if (vma) { 128 if (vma) {
130 *mnt = mntget(vma->vm_file->f_path.mnt); 129 *path = vma->vm_file->f_path;
131 *dentry = dget(vma->vm_file->f_path.dentry); 130 path_get(&vma->vm_file->f_path);
132 result = 0; 131 result = 0;
133 } 132 }
134 133
@@ -199,7 +198,7 @@ static void *m_next(struct seq_file *m, void *_vml, loff_t *pos)
199 return vml ? vml->next : NULL; 198 return vml ? vml->next : NULL;
200} 199}
201 200
202static struct seq_operations proc_pid_maps_ops = { 201static const struct seq_operations proc_pid_maps_ops = {
203 .start = m_start, 202 .start = m_start,
204 .next = m_next, 203 .next = m_next,
205 .stop = m_stop, 204 .stop = m_stop,
diff --git a/fs/proc/vmcore.c b/fs/proc/vmcore.c
index 523e1098ae88..9ac0f5e064e0 100644
--- a/fs/proc/vmcore.c
+++ b/fs/proc/vmcore.c
@@ -10,7 +10,6 @@
10#include <linux/mm.h> 10#include <linux/mm.h>
11#include <linux/proc_fs.h> 11#include <linux/proc_fs.h>
12#include <linux/user.h> 12#include <linux/user.h>
13#include <linux/a.out.h>
14#include <linux/elf.h> 13#include <linux/elf.h>
15#include <linux/elfcore.h> 14#include <linux/elfcore.h>
16#include <linux/highmem.h> 15#include <linux/highmem.h>
diff --git a/fs/qnx4/inode.c b/fs/qnx4/inode.c
index 638bdb963213..b31ab78052b3 100644
--- a/fs/qnx4/inode.c
+++ b/fs/qnx4/inode.c
@@ -125,7 +125,6 @@ static int qnx4_write_inode(struct inode *inode, int unused)
125static void qnx4_put_super(struct super_block *sb); 125static void qnx4_put_super(struct super_block *sb);
126static struct inode *qnx4_alloc_inode(struct super_block *sb); 126static struct inode *qnx4_alloc_inode(struct super_block *sb);
127static void qnx4_destroy_inode(struct inode *inode); 127static void qnx4_destroy_inode(struct inode *inode);
128static void qnx4_read_inode(struct inode *);
129static int qnx4_remount(struct super_block *sb, int *flags, char *data); 128static int qnx4_remount(struct super_block *sb, int *flags, char *data);
130static int qnx4_statfs(struct dentry *, struct kstatfs *); 129static int qnx4_statfs(struct dentry *, struct kstatfs *);
131 130
@@ -133,7 +132,6 @@ static const struct super_operations qnx4_sops =
133{ 132{
134 .alloc_inode = qnx4_alloc_inode, 133 .alloc_inode = qnx4_alloc_inode,
135 .destroy_inode = qnx4_destroy_inode, 134 .destroy_inode = qnx4_destroy_inode,
136 .read_inode = qnx4_read_inode,
137 .put_super = qnx4_put_super, 135 .put_super = qnx4_put_super,
138 .statfs = qnx4_statfs, 136 .statfs = qnx4_statfs,
139 .remount_fs = qnx4_remount, 137 .remount_fs = qnx4_remount,
@@ -357,6 +355,7 @@ static int qnx4_fill_super(struct super_block *s, void *data, int silent)
357 struct inode *root; 355 struct inode *root;
358 const char *errmsg; 356 const char *errmsg;
359 struct qnx4_sb_info *qs; 357 struct qnx4_sb_info *qs;
358 int ret = -EINVAL;
360 359
361 qs = kzalloc(sizeof(struct qnx4_sb_info), GFP_KERNEL); 360 qs = kzalloc(sizeof(struct qnx4_sb_info), GFP_KERNEL);
362 if (!qs) 361 if (!qs)
@@ -396,12 +395,14 @@ static int qnx4_fill_super(struct super_block *s, void *data, int silent)
396 } 395 }
397 396
398 /* does root not have inode number QNX4_ROOT_INO ?? */ 397 /* does root not have inode number QNX4_ROOT_INO ?? */
399 root = iget(s, QNX4_ROOT_INO * QNX4_INODES_PER_BLOCK); 398 root = qnx4_iget(s, QNX4_ROOT_INO * QNX4_INODES_PER_BLOCK);
400 if (!root) { 399 if (IS_ERR(root)) {
401 printk("qnx4: get inode failed\n"); 400 printk("qnx4: get inode failed\n");
401 ret = PTR_ERR(root);
402 goto out; 402 goto out;
403 } 403 }
404 404
405 ret = -ENOMEM;
405 s->s_root = d_alloc_root(root); 406 s->s_root = d_alloc_root(root);
406 if (s->s_root == NULL) 407 if (s->s_root == NULL)
407 goto outi; 408 goto outi;
@@ -417,7 +418,7 @@ static int qnx4_fill_super(struct super_block *s, void *data, int silent)
417 outnobh: 418 outnobh:
418 kfree(qs); 419 kfree(qs);
419 s->s_fs_info = NULL; 420 s->s_fs_info = NULL;
420 return -EINVAL; 421 return ret;
421} 422}
422 423
423static void qnx4_put_super(struct super_block *sb) 424static void qnx4_put_super(struct super_block *sb)
@@ -462,29 +463,38 @@ static const struct address_space_operations qnx4_aops = {
462 .bmap = qnx4_bmap 463 .bmap = qnx4_bmap
463}; 464};
464 465
465static void qnx4_read_inode(struct inode *inode) 466struct inode *qnx4_iget(struct super_block *sb, unsigned long ino)
466{ 467{
467 struct buffer_head *bh; 468 struct buffer_head *bh;
468 struct qnx4_inode_entry *raw_inode; 469 struct qnx4_inode_entry *raw_inode;
469 int block, ino; 470 int block;
470 struct super_block *sb = inode->i_sb; 471 struct qnx4_inode_entry *qnx4_inode;
471 struct qnx4_inode_entry *qnx4_inode = qnx4_raw_inode(inode); 472 struct inode *inode;
472 473
473 ino = inode->i_ino; 474 inode = iget_locked(sb, ino);
475 if (!inode)
476 return ERR_PTR(-ENOMEM);
477 if (!(inode->i_state & I_NEW))
478 return inode;
479
480 qnx4_inode = qnx4_raw_inode(inode);
474 inode->i_mode = 0; 481 inode->i_mode = 0;
475 482
476 QNX4DEBUG(("Reading inode : [%d]\n", ino)); 483 QNX4DEBUG(("Reading inode : [%d]\n", ino));
477 if (!ino) { 484 if (!ino) {
478 printk("qnx4: bad inode number on dev %s: %d is out of range\n", 485 printk(KERN_ERR "qnx4: bad inode number on dev %s: %lu is "
486 "out of range\n",
479 sb->s_id, ino); 487 sb->s_id, ino);
480 return; 488 iget_failed(inode);
489 return ERR_PTR(-EIO);
481 } 490 }
482 block = ino / QNX4_INODES_PER_BLOCK; 491 block = ino / QNX4_INODES_PER_BLOCK;
483 492
484 if (!(bh = sb_bread(sb, block))) { 493 if (!(bh = sb_bread(sb, block))) {
485 printk("qnx4: major problem: unable to read inode from dev " 494 printk("qnx4: major problem: unable to read inode from dev "
486 "%s\n", sb->s_id); 495 "%s\n", sb->s_id);
487 return; 496 iget_failed(inode);
497 return ERR_PTR(-EIO);
488 } 498 }
489 raw_inode = ((struct qnx4_inode_entry *) bh->b_data) + 499 raw_inode = ((struct qnx4_inode_entry *) bh->b_data) +
490 (ino % QNX4_INODES_PER_BLOCK); 500 (ino % QNX4_INODES_PER_BLOCK);
@@ -515,9 +525,16 @@ static void qnx4_read_inode(struct inode *inode)
515 inode->i_op = &page_symlink_inode_operations; 525 inode->i_op = &page_symlink_inode_operations;
516 inode->i_mapping->a_ops = &qnx4_aops; 526 inode->i_mapping->a_ops = &qnx4_aops;
517 qnx4_i(inode)->mmu_private = inode->i_size; 527 qnx4_i(inode)->mmu_private = inode->i_size;
518 } else 528 } else {
519 printk("qnx4: bad inode %d on dev %s\n",ino,sb->s_id); 529 printk(KERN_ERR "qnx4: bad inode %lu on dev %s\n",
530 ino, sb->s_id);
531 iget_failed(inode);
532 brelse(bh);
533 return ERR_PTR(-EIO);
534 }
520 brelse(bh); 535 brelse(bh);
536 unlock_new_inode(inode);
537 return inode;
521} 538}
522 539
523static struct kmem_cache *qnx4_inode_cachep; 540static struct kmem_cache *qnx4_inode_cachep;
diff --git a/fs/qnx4/namei.c b/fs/qnx4/namei.c
index 733cdf01d645..775eed3a4085 100644
--- a/fs/qnx4/namei.c
+++ b/fs/qnx4/namei.c
@@ -128,10 +128,12 @@ struct dentry * qnx4_lookup(struct inode *dir, struct dentry *dentry, struct nam
128 } 128 }
129 brelse(bh); 129 brelse(bh);
130 130
131 if ((foundinode = iget(dir->i_sb, ino)) == NULL) { 131 foundinode = qnx4_iget(dir->i_sb, ino);
132 if (IS_ERR(foundinode)) {
132 unlock_kernel(); 133 unlock_kernel();
133 QNX4DEBUG(("qnx4: lookup->iget -> NULL\n")); 134 QNX4DEBUG(("qnx4: lookup->iget -> error %ld\n",
134 return ERR_PTR(-EACCES); 135 PTR_ERR(foundinode)));
136 return ERR_CAST(foundinode);
135 } 137 }
136out: 138out:
137 unlock_kernel(); 139 unlock_kernel();
diff --git a/fs/quota.c b/fs/quota.c
index 99b24b52bfc8..84f28dd72116 100644
--- a/fs/quota.c
+++ b/fs/quota.c
@@ -341,11 +341,11 @@ static inline struct super_block *quotactl_block(const char __user *special)
341 char *tmp = getname(special); 341 char *tmp = getname(special);
342 342
343 if (IS_ERR(tmp)) 343 if (IS_ERR(tmp))
344 return ERR_PTR(PTR_ERR(tmp)); 344 return ERR_CAST(tmp);
345 bdev = lookup_bdev(tmp); 345 bdev = lookup_bdev(tmp);
346 putname(tmp); 346 putname(tmp);
347 if (IS_ERR(bdev)) 347 if (IS_ERR(bdev))
348 return ERR_PTR(PTR_ERR(bdev)); 348 return ERR_CAST(bdev);
349 sb = get_super(bdev); 349 sb = get_super(bdev);
350 bdput(bdev); 350 bdput(bdev);
351 if (!sb) 351 if (!sb)
diff --git a/fs/read_write.c b/fs/read_write.c
index 1c177f29e1b7..49a98718ecdf 100644
--- a/fs/read_write.c
+++ b/fs/read_write.c
@@ -366,7 +366,6 @@ asmlinkage ssize_t sys_read(unsigned int fd, char __user * buf, size_t count)
366 366
367 return ret; 367 return ret;
368} 368}
369EXPORT_UNUSED_SYMBOL_GPL(sys_read); /* to be deleted for 2.6.25 */
370 369
371asmlinkage ssize_t sys_write(unsigned int fd, const char __user * buf, size_t count) 370asmlinkage ssize_t sys_write(unsigned int fd, const char __user * buf, size_t count)
372{ 371{
diff --git a/fs/reiserfs/inode.c b/fs/reiserfs/inode.c
index 231fd5ccadc5..57917932212e 100644
--- a/fs/reiserfs/inode.c
+++ b/fs/reiserfs/inode.c
@@ -1536,7 +1536,7 @@ static struct dentry *reiserfs_get_dentry(struct super_block *sb,
1536 if (!inode) 1536 if (!inode)
1537 inode = ERR_PTR(-ESTALE); 1537 inode = ERR_PTR(-ESTALE);
1538 if (IS_ERR(inode)) 1538 if (IS_ERR(inode))
1539 return ERR_PTR(PTR_ERR(inode)); 1539 return ERR_CAST(inode);
1540 result = d_alloc_anon(inode); 1540 result = d_alloc_anon(inode);
1541 if (!result) { 1541 if (!result) {
1542 iput(inode); 1542 iput(inode);
@@ -2143,7 +2143,7 @@ int reiserfs_truncate_file(struct inode *p_s_inode, int update_timestamps)
2143 /* if we are not on a block boundary */ 2143 /* if we are not on a block boundary */
2144 if (length) { 2144 if (length) {
2145 length = blocksize - length; 2145 length = blocksize - length;
2146 zero_user_page(page, offset, length, KM_USER0); 2146 zero_user(page, offset, length);
2147 if (buffer_mapped(bh) && bh->b_blocknr != 0) { 2147 if (buffer_mapped(bh) && bh->b_blocknr != 0) {
2148 mark_buffer_dirty(bh); 2148 mark_buffer_dirty(bh);
2149 } 2149 }
@@ -2367,7 +2367,7 @@ static int reiserfs_write_full_page(struct page *page,
2367 unlock_page(page); 2367 unlock_page(page);
2368 return 0; 2368 return 0;
2369 } 2369 }
2370 zero_user_page(page, last_offset, PAGE_CACHE_SIZE - last_offset, KM_USER0); 2370 zero_user_segment(page, last_offset, PAGE_CACHE_SIZE);
2371 } 2371 }
2372 bh = head; 2372 bh = head;
2373 block = page->index << (PAGE_CACHE_SHIFT - s->s_blocksize_bits); 2373 block = page->index << (PAGE_CACHE_SHIFT - s->s_blocksize_bits);
diff --git a/fs/reiserfs/prints.c b/fs/reiserfs/prints.c
index 5e7388b32d02..740bb8c0c1ae 100644
--- a/fs/reiserfs/prints.c
+++ b/fs/reiserfs/prints.c
@@ -575,6 +575,8 @@ void print_block(struct buffer_head *bh, ...) //int print_mode, int first, int l
575 printk 575 printk
576 ("Block %llu contains unformatted data\n", 576 ("Block %llu contains unformatted data\n",
577 (unsigned long long)bh->b_blocknr); 577 (unsigned long long)bh->b_blocknr);
578
579 va_end(args);
578} 580}
579 581
580static char print_tb_buf[2048]; 582static char print_tb_buf[2048];
diff --git a/fs/reiserfs/procfs.c b/fs/reiserfs/procfs.c
index 001144621672..8f86c52b30d8 100644
--- a/fs/reiserfs/procfs.c
+++ b/fs/reiserfs/procfs.c
@@ -444,7 +444,7 @@ static int r_show(struct seq_file *m, void *v)
444 return show(m, v); 444 return show(m, v);
445} 445}
446 446
447static struct seq_operations r_ops = { 447static const struct seq_operations r_ops = {
448 .start = r_start, 448 .start = r_start,
449 .next = r_next, 449 .next = r_next,
450 .stop = r_stop, 450 .stop = r_stop,
diff --git a/fs/reiserfs/super.c b/fs/reiserfs/super.c
index 5cd85fe5df5d..6841452e0dea 100644
--- a/fs/reiserfs/super.c
+++ b/fs/reiserfs/super.c
@@ -617,6 +617,7 @@ static const struct super_operations reiserfs_sops = {
617 .unlockfs = reiserfs_unlockfs, 617 .unlockfs = reiserfs_unlockfs,
618 .statfs = reiserfs_statfs, 618 .statfs = reiserfs_statfs,
619 .remount_fs = reiserfs_remount, 619 .remount_fs = reiserfs_remount,
620 .show_options = generic_show_options,
620#ifdef CONFIG_QUOTA 621#ifdef CONFIG_QUOTA
621 .quota_read = reiserfs_quota_read, 622 .quota_read = reiserfs_quota_read,
622 .quota_write = reiserfs_quota_write, 623 .quota_write = reiserfs_quota_write,
@@ -1138,6 +1139,7 @@ static int reiserfs_remount(struct super_block *s, int *mount_flags, char *arg)
1138 unsigned long safe_mask = 0; 1139 unsigned long safe_mask = 0;
1139 unsigned int commit_max_age = (unsigned int)-1; 1140 unsigned int commit_max_age = (unsigned int)-1;
1140 struct reiserfs_journal *journal = SB_JOURNAL(s); 1141 struct reiserfs_journal *journal = SB_JOURNAL(s);
1142 char *new_opts = kstrdup(arg, GFP_KERNEL);
1141 int err; 1143 int err;
1142#ifdef CONFIG_QUOTA 1144#ifdef CONFIG_QUOTA
1143 int i; 1145 int i;
@@ -1153,7 +1155,8 @@ static int reiserfs_remount(struct super_block *s, int *mount_flags, char *arg)
1153 REISERFS_SB(s)->s_qf_names[i] = NULL; 1155 REISERFS_SB(s)->s_qf_names[i] = NULL;
1154 } 1156 }
1155#endif 1157#endif
1156 return -EINVAL; 1158 err = -EINVAL;
1159 goto out_err;
1157 } 1160 }
1158 1161
1159 handle_attrs(s); 1162 handle_attrs(s);
@@ -1191,9 +1194,9 @@ static int reiserfs_remount(struct super_block *s, int *mount_flags, char *arg)
1191 } 1194 }
1192 1195
1193 if (blocks) { 1196 if (blocks) {
1194 int rc = reiserfs_resize(s, blocks); 1197 err = reiserfs_resize(s, blocks);
1195 if (rc != 0) 1198 if (err != 0)
1196 return rc; 1199 goto out_err;
1197 } 1200 }
1198 1201
1199 if (*mount_flags & MS_RDONLY) { 1202 if (*mount_flags & MS_RDONLY) {
@@ -1201,16 +1204,16 @@ static int reiserfs_remount(struct super_block *s, int *mount_flags, char *arg)
1201 /* remount read-only */ 1204 /* remount read-only */
1202 if (s->s_flags & MS_RDONLY) 1205 if (s->s_flags & MS_RDONLY)
1203 /* it is read-only already */ 1206 /* it is read-only already */
1204 return 0; 1207 goto out_ok;
1205 /* try to remount file system with read-only permissions */ 1208 /* try to remount file system with read-only permissions */
1206 if (sb_umount_state(rs) == REISERFS_VALID_FS 1209 if (sb_umount_state(rs) == REISERFS_VALID_FS
1207 || REISERFS_SB(s)->s_mount_state != REISERFS_VALID_FS) { 1210 || REISERFS_SB(s)->s_mount_state != REISERFS_VALID_FS) {
1208 return 0; 1211 goto out_ok;
1209 } 1212 }
1210 1213
1211 err = journal_begin(&th, s, 10); 1214 err = journal_begin(&th, s, 10);
1212 if (err) 1215 if (err)
1213 return err; 1216 goto out_err;
1214 1217
1215 /* Mounting a rw partition read-only. */ 1218 /* Mounting a rw partition read-only. */
1216 reiserfs_prepare_for_journal(s, SB_BUFFER_WITH_SB(s), 1); 1219 reiserfs_prepare_for_journal(s, SB_BUFFER_WITH_SB(s), 1);
@@ -1220,11 +1223,13 @@ static int reiserfs_remount(struct super_block *s, int *mount_flags, char *arg)
1220 /* remount read-write */ 1223 /* remount read-write */
1221 if (!(s->s_flags & MS_RDONLY)) { 1224 if (!(s->s_flags & MS_RDONLY)) {
1222 reiserfs_xattr_init(s, *mount_flags); 1225 reiserfs_xattr_init(s, *mount_flags);
1223 return 0; /* We are read-write already */ 1226 goto out_ok; /* We are read-write already */
1224 } 1227 }
1225 1228
1226 if (reiserfs_is_journal_aborted(journal)) 1229 if (reiserfs_is_journal_aborted(journal)) {
1227 return journal->j_errno; 1230 err = journal->j_errno;
1231 goto out_err;
1232 }
1228 1233
1229 handle_data_mode(s, mount_options); 1234 handle_data_mode(s, mount_options);
1230 handle_barrier_mode(s, mount_options); 1235 handle_barrier_mode(s, mount_options);
@@ -1232,7 +1237,7 @@ static int reiserfs_remount(struct super_block *s, int *mount_flags, char *arg)
1232 s->s_flags &= ~MS_RDONLY; /* now it is safe to call journal_begin */ 1237 s->s_flags &= ~MS_RDONLY; /* now it is safe to call journal_begin */
1233 err = journal_begin(&th, s, 10); 1238 err = journal_begin(&th, s, 10);
1234 if (err) 1239 if (err)
1235 return err; 1240 goto out_err;
1236 1241
1237 /* Mount a partition which is read-only, read-write */ 1242 /* Mount a partition which is read-only, read-write */
1238 reiserfs_prepare_for_journal(s, SB_BUFFER_WITH_SB(s), 1); 1243 reiserfs_prepare_for_journal(s, SB_BUFFER_WITH_SB(s), 1);
@@ -1247,7 +1252,7 @@ static int reiserfs_remount(struct super_block *s, int *mount_flags, char *arg)
1247 SB_JOURNAL(s)->j_must_wait = 1; 1252 SB_JOURNAL(s)->j_must_wait = 1;
1248 err = journal_end(&th, s, 10); 1253 err = journal_end(&th, s, 10);
1249 if (err) 1254 if (err)
1250 return err; 1255 goto out_err;
1251 s->s_dirt = 0; 1256 s->s_dirt = 0;
1252 1257
1253 if (!(*mount_flags & MS_RDONLY)) { 1258 if (!(*mount_flags & MS_RDONLY)) {
@@ -1255,7 +1260,14 @@ static int reiserfs_remount(struct super_block *s, int *mount_flags, char *arg)
1255 reiserfs_xattr_init(s, *mount_flags); 1260 reiserfs_xattr_init(s, *mount_flags);
1256 } 1261 }
1257 1262
1263out_ok:
1264 kfree(s->s_options);
1265 s->s_options = new_opts;
1258 return 0; 1266 return 0;
1267
1268out_err:
1269 kfree(new_opts);
1270 return err;
1259} 1271}
1260 1272
1261static int read_super_block(struct super_block *s, int offset) 1273static int read_super_block(struct super_block *s, int offset)
@@ -1559,6 +1571,8 @@ static int reiserfs_fill_super(struct super_block *s, void *data, int silent)
1559 struct reiserfs_sb_info *sbi; 1571 struct reiserfs_sb_info *sbi;
1560 int errval = -EINVAL; 1572 int errval = -EINVAL;
1561 1573
1574 save_mount_options(s, data);
1575
1562 sbi = kzalloc(sizeof(struct reiserfs_sb_info), GFP_KERNEL); 1576 sbi = kzalloc(sizeof(struct reiserfs_sb_info), GFP_KERNEL);
1563 if (!sbi) { 1577 if (!sbi) {
1564 errval = -ENOMEM; 1578 errval = -ENOMEM;
@@ -2012,29 +2026,29 @@ static int reiserfs_quota_on(struct super_block *sb, int type, int format_id,
2012 if (err) 2026 if (err)
2013 return err; 2027 return err;
2014 /* Quotafile not on the same filesystem? */ 2028 /* Quotafile not on the same filesystem? */
2015 if (nd.mnt->mnt_sb != sb) { 2029 if (nd.path.mnt->mnt_sb != sb) {
2016 path_release(&nd); 2030 path_put(&nd.path);
2017 return -EXDEV; 2031 return -EXDEV;
2018 } 2032 }
2019 /* We must not pack tails for quota files on reiserfs for quota IO to work */ 2033 /* We must not pack tails for quota files on reiserfs for quota IO to work */
2020 if (!REISERFS_I(nd.dentry->d_inode)->i_flags & i_nopack_mask) { 2034 if (!REISERFS_I(nd.path.dentry->d_inode)->i_flags & i_nopack_mask) {
2021 reiserfs_warning(sb, 2035 reiserfs_warning(sb,
2022 "reiserfs: Quota file must have tail packing disabled."); 2036 "reiserfs: Quota file must have tail packing disabled.");
2023 path_release(&nd); 2037 path_put(&nd.path);
2024 return -EINVAL; 2038 return -EINVAL;
2025 } 2039 }
2026 /* Not journalling quota? No more tests needed... */ 2040 /* Not journalling quota? No more tests needed... */
2027 if (!REISERFS_SB(sb)->s_qf_names[USRQUOTA] && 2041 if (!REISERFS_SB(sb)->s_qf_names[USRQUOTA] &&
2028 !REISERFS_SB(sb)->s_qf_names[GRPQUOTA]) { 2042 !REISERFS_SB(sb)->s_qf_names[GRPQUOTA]) {
2029 path_release(&nd); 2043 path_put(&nd.path);
2030 return vfs_quota_on(sb, type, format_id, path); 2044 return vfs_quota_on(sb, type, format_id, path);
2031 } 2045 }
2032 /* Quotafile not of fs root? */ 2046 /* Quotafile not of fs root? */
2033 if (nd.dentry->d_parent->d_inode != sb->s_root->d_inode) 2047 if (nd.path.dentry->d_parent->d_inode != sb->s_root->d_inode)
2034 reiserfs_warning(sb, 2048 reiserfs_warning(sb,
2035 "reiserfs: Quota file not on filesystem root. " 2049 "reiserfs: Quota file not on filesystem root. "
2036 "Journalled quota will not work."); 2050 "Journalled quota will not work.");
2037 path_release(&nd); 2051 path_put(&nd.path);
2038 return vfs_quota_on(sb, type, format_id, path); 2052 return vfs_quota_on(sb, type, format_id, path);
2039} 2053}
2040 2054
diff --git a/fs/reiserfs/xattr.c b/fs/reiserfs/xattr.c
index 1597f6b649e0..eba037b3338f 100644
--- a/fs/reiserfs/xattr.c
+++ b/fs/reiserfs/xattr.c
@@ -155,7 +155,7 @@ static struct dentry *get_xa_file_dentry(const struct inode *inode,
155 155
156 xadir = open_xa_dir(inode, flags); 156 xadir = open_xa_dir(inode, flags);
157 if (IS_ERR(xadir)) { 157 if (IS_ERR(xadir)) {
158 return ERR_PTR(PTR_ERR(xadir)); 158 return ERR_CAST(xadir);
159 } else if (xadir && !xadir->d_inode) { 159 } else if (xadir && !xadir->d_inode) {
160 dput(xadir); 160 dput(xadir);
161 return ERR_PTR(-ENODATA); 161 return ERR_PTR(-ENODATA);
@@ -164,7 +164,7 @@ static struct dentry *get_xa_file_dentry(const struct inode *inode,
164 xafile = lookup_one_len(name, xadir, strlen(name)); 164 xafile = lookup_one_len(name, xadir, strlen(name));
165 if (IS_ERR(xafile)) { 165 if (IS_ERR(xafile)) {
166 dput(xadir); 166 dput(xadir);
167 return ERR_PTR(PTR_ERR(xafile)); 167 return ERR_CAST(xafile);
168 } 168 }
169 169
170 if (xafile->d_inode) { /* file exists */ 170 if (xafile->d_inode) { /* file exists */
@@ -1084,7 +1084,7 @@ ssize_t reiserfs_listxattr(struct dentry * dentry, char *buffer, size_t size)
1084} 1084}
1085 1085
1086/* This is the implementation for the xattr plugin infrastructure */ 1086/* This is the implementation for the xattr plugin infrastructure */
1087static struct list_head xattr_handlers = LIST_HEAD_INIT(xattr_handlers); 1087static LIST_HEAD(xattr_handlers);
1088static DEFINE_RWLOCK(handler_lock); 1088static DEFINE_RWLOCK(handler_lock);
1089 1089
1090static struct reiserfs_xattr_handler *find_xattr_handler_prefix(const char 1090static struct reiserfs_xattr_handler *find_xattr_handler_prefix(const char
diff --git a/fs/romfs/inode.c b/fs/romfs/inode.c
index a49cf5b9a195..00b6f0a518c8 100644
--- a/fs/romfs/inode.c
+++ b/fs/romfs/inode.c
@@ -84,6 +84,8 @@ struct romfs_inode_info {
84 struct inode vfs_inode; 84 struct inode vfs_inode;
85}; 85};
86 86
87static struct inode *romfs_iget(struct super_block *, unsigned long);
88
87/* instead of private superblock data */ 89/* instead of private superblock data */
88static inline unsigned long romfs_maxsize(struct super_block *sb) 90static inline unsigned long romfs_maxsize(struct super_block *sb)
89{ 91{
@@ -117,7 +119,7 @@ static int romfs_fill_super(struct super_block *s, void *data, int silent)
117 struct buffer_head *bh; 119 struct buffer_head *bh;
118 struct romfs_super_block *rsb; 120 struct romfs_super_block *rsb;
119 struct inode *root; 121 struct inode *root;
120 int sz; 122 int sz, ret = -EINVAL;
121 123
122 /* I would parse the options here, but there are none.. :) */ 124 /* I would parse the options here, but there are none.. :) */
123 125
@@ -157,10 +159,13 @@ static int romfs_fill_super(struct super_block *s, void *data, int silent)
157 & ROMFH_MASK; 159 & ROMFH_MASK;
158 160
159 s->s_op = &romfs_ops; 161 s->s_op = &romfs_ops;
160 root = iget(s, sz); 162 root = romfs_iget(s, sz);
161 if (!root) 163 if (IS_ERR(root)) {
164 ret = PTR_ERR(root);
162 goto out; 165 goto out;
166 }
163 167
168 ret = -ENOMEM;
164 s->s_root = d_alloc_root(root); 169 s->s_root = d_alloc_root(root);
165 if (!s->s_root) 170 if (!s->s_root)
166 goto outiput; 171 goto outiput;
@@ -173,7 +178,7 @@ outiput:
173out: 178out:
174 brelse(bh); 179 brelse(bh);
175outnobh: 180outnobh:
176 return -EINVAL; 181 return ret;
177} 182}
178 183
179/* That's simple too. */ 184/* That's simple too. */
@@ -389,8 +394,11 @@ romfs_lookup(struct inode *dir, struct dentry *dentry, struct nameidata *nd)
389 if ((be32_to_cpu(ri.next) & ROMFH_TYPE) == ROMFH_HRD) 394 if ((be32_to_cpu(ri.next) & ROMFH_TYPE) == ROMFH_HRD)
390 offset = be32_to_cpu(ri.spec) & ROMFH_MASK; 395 offset = be32_to_cpu(ri.spec) & ROMFH_MASK;
391 396
392 if ((inode = iget(dir->i_sb, offset))) 397 inode = romfs_iget(dir->i_sb, offset);
393 goto outi; 398 if (IS_ERR(inode)) {
399 res = PTR_ERR(inode);
400 goto out;
401 }
394 402
395 /* 403 /*
396 * it's a bit funky, _lookup needs to return an error code 404 * it's a bit funky, _lookup needs to return an error code
@@ -402,7 +410,7 @@ romfs_lookup(struct inode *dir, struct dentry *dentry, struct nameidata *nd)
402 */ 410 */
403 411
404out0: inode = NULL; 412out0: inode = NULL;
405outi: res = 0; 413 res = 0;
406 d_add (dentry, inode); 414 d_add (dentry, inode);
407 415
408out: unlock_kernel(); 416out: unlock_kernel();
@@ -478,20 +486,29 @@ static mode_t romfs_modemap[] =
478 S_IFBLK+0600, S_IFCHR+0600, S_IFSOCK+0644, S_IFIFO+0644 486 S_IFBLK+0600, S_IFCHR+0600, S_IFSOCK+0644, S_IFIFO+0644
479}; 487};
480 488
481static void 489static struct inode *
482romfs_read_inode(struct inode *i) 490romfs_iget(struct super_block *sb, unsigned long ino)
483{ 491{
484 int nextfh, ino; 492 int nextfh;
485 struct romfs_inode ri; 493 struct romfs_inode ri;
494 struct inode *i;
495
496 ino &= ROMFH_MASK;
497 i = iget_locked(sb, ino);
498 if (!i)
499 return ERR_PTR(-ENOMEM);
500 if (!(i->i_state & I_NEW))
501 return i;
486 502
487 ino = i->i_ino & ROMFH_MASK;
488 i->i_mode = 0; 503 i->i_mode = 0;
489 504
490 /* Loop for finding the real hard link */ 505 /* Loop for finding the real hard link */
491 for(;;) { 506 for(;;) {
492 if (romfs_copyfrom(i, &ri, ino, ROMFH_SIZE) <= 0) { 507 if (romfs_copyfrom(i, &ri, ino, ROMFH_SIZE) <= 0) {
493 printk("romfs: read error for inode 0x%x\n", ino); 508 printk(KERN_ERR "romfs: read error for inode 0x%lx\n",
494 return; 509 ino);
510 iget_failed(i);
511 return ERR_PTR(-EIO);
495 } 512 }
496 /* XXX: do romfs_checksum here too (with name) */ 513 /* XXX: do romfs_checksum here too (with name) */
497 514
@@ -548,6 +565,8 @@ romfs_read_inode(struct inode *i)
548 init_special_inode(i, ino, 565 init_special_inode(i, ino,
549 MKDEV(nextfh>>16,nextfh&0xffff)); 566 MKDEV(nextfh>>16,nextfh&0xffff));
550 } 567 }
568 unlock_new_inode(i);
569 return i;
551} 570}
552 571
553static struct kmem_cache * romfs_inode_cachep; 572static struct kmem_cache * romfs_inode_cachep;
@@ -599,7 +618,6 @@ static int romfs_remount(struct super_block *sb, int *flags, char *data)
599static const struct super_operations romfs_ops = { 618static const struct super_operations romfs_ops = {
600 .alloc_inode = romfs_alloc_inode, 619 .alloc_inode = romfs_alloc_inode,
601 .destroy_inode = romfs_destroy_inode, 620 .destroy_inode = romfs_destroy_inode,
602 .read_inode = romfs_read_inode,
603 .statfs = romfs_statfs, 621 .statfs = romfs_statfs,
604 .remount_fs = romfs_remount, 622 .remount_fs = romfs_remount,
605}; 623};
diff --git a/fs/select.c b/fs/select.c
index 47f47925aea2..5633fe980781 100644
--- a/fs/select.c
+++ b/fs/select.c
@@ -739,7 +739,7 @@ asmlinkage long sys_poll(struct pollfd __user *ufds, unsigned int nfds,
739 timeout_jiffies = -1; 739 timeout_jiffies = -1;
740 else 740 else
741#endif 741#endif
742 timeout_jiffies = msecs_to_jiffies(timeout_msecs); 742 timeout_jiffies = msecs_to_jiffies(timeout_msecs) + 1;
743 } else { 743 } else {
744 /* Infinite (< 0) or no (0) timeout */ 744 /* Infinite (< 0) or no (0) timeout */
745 timeout_jiffies = timeout_msecs; 745 timeout_jiffies = timeout_msecs;
diff --git a/fs/seq_file.c b/fs/seq_file.c
index ca71c115bdaa..853770274f20 100644
--- a/fs/seq_file.c
+++ b/fs/seq_file.c
@@ -342,13 +342,11 @@ int seq_printf(struct seq_file *m, const char *f, ...)
342} 342}
343EXPORT_SYMBOL(seq_printf); 343EXPORT_SYMBOL(seq_printf);
344 344
345int seq_path(struct seq_file *m, 345int seq_path(struct seq_file *m, struct path *path, char *esc)
346 struct vfsmount *mnt, struct dentry *dentry,
347 char *esc)
348{ 346{
349 if (m->count < m->size) { 347 if (m->count < m->size) {
350 char *s = m->buf + m->count; 348 char *s = m->buf + m->count;
351 char *p = d_path(dentry, mnt, s, m->size - m->count); 349 char *p = d_path(path, s, m->size - m->count);
352 if (!IS_ERR(p)) { 350 if (!IS_ERR(p)) {
353 while (s <= p) { 351 while (s <= p) {
354 char c = *p++; 352 char c = *p++;
diff --git a/fs/signalfd.c b/fs/signalfd.c
index 2d3e107da2d3..cb2b63ae0bf4 100644
--- a/fs/signalfd.c
+++ b/fs/signalfd.c
@@ -27,6 +27,7 @@
27#include <linux/list.h> 27#include <linux/list.h>
28#include <linux/anon_inodes.h> 28#include <linux/anon_inodes.h>
29#include <linux/signalfd.h> 29#include <linux/signalfd.h>
30#include <linux/syscalls.h>
30 31
31struct signalfd_ctx { 32struct signalfd_ctx {
32 sigset_t sigmask; 33 sigset_t sigmask;
diff --git a/fs/smbfs/inode.c b/fs/smbfs/inode.c
index 9416ead0c7aa..376ef3ee6ed7 100644
--- a/fs/smbfs/inode.c
+++ b/fs/smbfs/inode.c
@@ -500,6 +500,13 @@ static int smb_fill_super(struct super_block *sb, void *raw_data, int silent)
500 struct smb_fattr root; 500 struct smb_fattr root;
501 int ver; 501 int ver;
502 void *mem; 502 void *mem;
503 static int warn_count;
504
505 if (warn_count < 5) {
506 warn_count++;
507 printk(KERN_EMERG "smbfs is deprecated and will be removed"
508 " from the 2.6.27 kernel. Please migrate to cifs\n");
509 }
503 510
504 if (!raw_data) 511 if (!raw_data)
505 goto out_no_data; 512 goto out_no_data;
diff --git a/fs/smbfs/sock.c b/fs/smbfs/sock.c
index e48bd8235a8e..e37fe4deebd0 100644
--- a/fs/smbfs/sock.c
+++ b/fs/smbfs/sock.c
@@ -329,9 +329,8 @@ smb_receive(struct smb_sb_info *server, struct smb_request *req)
329 msg.msg_control = NULL; 329 msg.msg_control = NULL;
330 330
331 /* Dont repeat bytes and count available bufferspace */ 331 /* Dont repeat bytes and count available bufferspace */
332 rlen = smb_move_iov(&p, &num, iov, req->rq_bytes_recvd); 332 rlen = min_t(int, smb_move_iov(&p, &num, iov, req->rq_bytes_recvd),
333 if (req->rq_rlen < rlen) 333 (req->rq_rlen - req->rq_bytes_recvd));
334 rlen = req->rq_rlen;
335 334
336 result = kernel_recvmsg(sock, &msg, p, num, rlen, flags); 335 result = kernel_recvmsg(sock, &msg, p, num, rlen, flags);
337 336
diff --git a/fs/splice.c b/fs/splice.c
index 4ee49e86edde..9b559ee711a8 100644
--- a/fs/splice.c
+++ b/fs/splice.c
@@ -1179,6 +1179,9 @@ static int copy_from_user_mmap_sem(void *dst, const void __user *src, size_t n)
1179{ 1179{
1180 int partial; 1180 int partial;
1181 1181
1182 if (!access_ok(VERIFY_READ, src, n))
1183 return -EFAULT;
1184
1182 pagefault_disable(); 1185 pagefault_disable();
1183 partial = __copy_from_user_inatomic(dst, src, n); 1186 partial = __copy_from_user_inatomic(dst, src, n);
1184 pagefault_enable(); 1187 pagefault_enable();
@@ -1231,7 +1234,7 @@ static int get_iovec_page_array(const struct iovec __user *iov,
1231 if (unlikely(!len)) 1234 if (unlikely(!len))
1232 break; 1235 break;
1233 error = -EFAULT; 1236 error = -EFAULT;
1234 if (unlikely(!base)) 1237 if (!access_ok(VERIFY_READ, base, len))
1235 break; 1238 break;
1236 1239
1237 /* 1240 /*
@@ -1387,6 +1390,11 @@ static long vmsplice_to_user(struct file *file, const struct iovec __user *iov,
1387 break; 1390 break;
1388 } 1391 }
1389 1392
1393 if (unlikely(!access_ok(VERIFY_WRITE, base, len))) {
1394 error = -EFAULT;
1395 break;
1396 }
1397
1390 sd.len = 0; 1398 sd.len = 0;
1391 sd.total_len = len; 1399 sd.total_len = len;
1392 sd.flags = flags; 1400 sd.flags = flags;
diff --git a/fs/stat.c b/fs/stat.c
index 68510068a641..9cf41f719d50 100644
--- a/fs/stat.c
+++ b/fs/stat.c
@@ -62,8 +62,8 @@ int vfs_stat_fd(int dfd, char __user *name, struct kstat *stat)
62 62
63 error = __user_walk_fd(dfd, name, LOOKUP_FOLLOW, &nd); 63 error = __user_walk_fd(dfd, name, LOOKUP_FOLLOW, &nd);
64 if (!error) { 64 if (!error) {
65 error = vfs_getattr(nd.mnt, nd.dentry, stat); 65 error = vfs_getattr(nd.path.mnt, nd.path.dentry, stat);
66 path_release(&nd); 66 path_put(&nd.path);
67 } 67 }
68 return error; 68 return error;
69} 69}
@@ -82,8 +82,8 @@ int vfs_lstat_fd(int dfd, char __user *name, struct kstat *stat)
82 82
83 error = __user_walk_fd(dfd, name, 0, &nd); 83 error = __user_walk_fd(dfd, name, 0, &nd);
84 if (!error) { 84 if (!error) {
85 error = vfs_getattr(nd.mnt, nd.dentry, stat); 85 error = vfs_getattr(nd.path.mnt, nd.path.dentry, stat);
86 path_release(&nd); 86 path_put(&nd.path);
87 } 87 }
88 return error; 88 return error;
89} 89}
@@ -302,17 +302,18 @@ asmlinkage long sys_readlinkat(int dfd, const char __user *path,
302 302
303 error = __user_walk_fd(dfd, path, 0, &nd); 303 error = __user_walk_fd(dfd, path, 0, &nd);
304 if (!error) { 304 if (!error) {
305 struct inode * inode = nd.dentry->d_inode; 305 struct inode *inode = nd.path.dentry->d_inode;
306 306
307 error = -EINVAL; 307 error = -EINVAL;
308 if (inode->i_op && inode->i_op->readlink) { 308 if (inode->i_op && inode->i_op->readlink) {
309 error = security_inode_readlink(nd.dentry); 309 error = security_inode_readlink(nd.path.dentry);
310 if (!error) { 310 if (!error) {
311 touch_atime(nd.mnt, nd.dentry); 311 touch_atime(nd.path.mnt, nd.path.dentry);
312 error = inode->i_op->readlink(nd.dentry, buf, bufsiz); 312 error = inode->i_op->readlink(nd.path.dentry,
313 buf, bufsiz);
313 } 314 }
314 } 315 }
315 path_release(&nd); 316 path_put(&nd.path);
316 } 317 }
317 return error; 318 return error;
318} 319}
diff --git a/fs/super.c b/fs/super.c
index ceaf2e3d594c..88811f60c8de 100644
--- a/fs/super.c
+++ b/fs/super.c
@@ -105,6 +105,7 @@ static inline void destroy_super(struct super_block *s)
105{ 105{
106 security_sb_free(s); 106 security_sb_free(s);
107 kfree(s->s_subtype); 107 kfree(s->s_subtype);
108 kfree(s->s_options);
108 kfree(s); 109 kfree(s);
109} 110}
110 111
@@ -603,6 +604,7 @@ int do_remount_sb(struct super_block *sb, int flags, void *data, int force)
603 mark_files_ro(sb); 604 mark_files_ro(sb);
604 else if (!fs_may_remount_ro(sb)) 605 else if (!fs_may_remount_ro(sb))
605 return -EBUSY; 606 return -EBUSY;
607 DQUOT_OFF(sb);
606 } 608 }
607 609
608 if (sb->s_op->remount_fs) { 610 if (sb->s_op->remount_fs) {
diff --git a/fs/sysfs/group.c b/fs/sysfs/group.c
index 0871c3dadce1..477904915032 100644
--- a/fs/sysfs/group.c
+++ b/fs/sysfs/group.c
@@ -77,7 +77,12 @@ void sysfs_remove_group(struct kobject * kobj,
77 77
78 if (grp->name) { 78 if (grp->name) {
79 sd = sysfs_get_dirent(dir_sd, grp->name); 79 sd = sysfs_get_dirent(dir_sd, grp->name);
80 BUG_ON(!sd); 80 if (!sd) {
81 printk(KERN_WARNING "sysfs group %p not found for "
82 "kobject '%s'\n", grp, kobject_name(kobj));
83 WARN_ON(!sd);
84 return;
85 }
81 } else 86 } else
82 sd = sysfs_get(dir_sd); 87 sd = sysfs_get(dir_sd);
83 88
diff --git a/fs/sysv/inode.c b/fs/sysv/inode.c
index 81ec6c548c07..c5d60de0658f 100644
--- a/fs/sysv/inode.c
+++ b/fs/sysv/inode.c
@@ -169,20 +169,27 @@ void sysv_set_inode(struct inode *inode, dev_t rdev)
169 init_special_inode(inode, inode->i_mode, rdev); 169 init_special_inode(inode, inode->i_mode, rdev);
170} 170}
171 171
172static void sysv_read_inode(struct inode *inode) 172struct inode *sysv_iget(struct super_block *sb, unsigned int ino)
173{ 173{
174 struct super_block * sb = inode->i_sb;
175 struct sysv_sb_info * sbi = SYSV_SB(sb); 174 struct sysv_sb_info * sbi = SYSV_SB(sb);
176 struct buffer_head * bh; 175 struct buffer_head * bh;
177 struct sysv_inode * raw_inode; 176 struct sysv_inode * raw_inode;
178 struct sysv_inode_info * si; 177 struct sysv_inode_info * si;
179 unsigned int block, ino = inode->i_ino; 178 struct inode *inode;
179 unsigned int block;
180 180
181 if (!ino || ino > sbi->s_ninodes) { 181 if (!ino || ino > sbi->s_ninodes) {
182 printk("Bad inode number on dev %s: %d is out of range\n", 182 printk("Bad inode number on dev %s: %d is out of range\n",
183 inode->i_sb->s_id, ino); 183 sb->s_id, ino);
184 goto bad_inode; 184 return ERR_PTR(-EIO);
185 } 185 }
186
187 inode = iget_locked(sb, ino);
188 if (!inode)
189 return ERR_PTR(-ENOMEM);
190 if (!(inode->i_state & I_NEW))
191 return inode;
192
186 raw_inode = sysv_raw_inode(sb, ino, &bh); 193 raw_inode = sysv_raw_inode(sb, ino, &bh);
187 if (!raw_inode) { 194 if (!raw_inode) {
188 printk("Major problem: unable to read inode from dev %s\n", 195 printk("Major problem: unable to read inode from dev %s\n",
@@ -214,11 +221,12 @@ static void sysv_read_inode(struct inode *inode)
214 old_decode_dev(fs32_to_cpu(sbi, si->i_data[0]))); 221 old_decode_dev(fs32_to_cpu(sbi, si->i_data[0])));
215 else 222 else
216 sysv_set_inode(inode, 0); 223 sysv_set_inode(inode, 0);
217 return; 224 unlock_new_inode(inode);
225 return inode;
218 226
219bad_inode: 227bad_inode:
220 make_bad_inode(inode); 228 iget_failed(inode);
221 return; 229 return ERR_PTR(-EIO);
222} 230}
223 231
224static struct buffer_head * sysv_update_inode(struct inode * inode) 232static struct buffer_head * sysv_update_inode(struct inode * inode)
@@ -328,7 +336,6 @@ static void init_once(struct kmem_cache *cachep, void *p)
328const struct super_operations sysv_sops = { 336const struct super_operations sysv_sops = {
329 .alloc_inode = sysv_alloc_inode, 337 .alloc_inode = sysv_alloc_inode,
330 .destroy_inode = sysv_destroy_inode, 338 .destroy_inode = sysv_destroy_inode,
331 .read_inode = sysv_read_inode,
332 .write_inode = sysv_write_inode, 339 .write_inode = sysv_write_inode,
333 .delete_inode = sysv_delete_inode, 340 .delete_inode = sysv_delete_inode,
334 .put_super = sysv_put_super, 341 .put_super = sysv_put_super,
diff --git a/fs/sysv/namei.c b/fs/sysv/namei.c
index 6bd850b7641a..a1f1ef33e81c 100644
--- a/fs/sysv/namei.c
+++ b/fs/sysv/namei.c
@@ -53,9 +53,9 @@ static struct dentry *sysv_lookup(struct inode * dir, struct dentry * dentry, st
53 ino = sysv_inode_by_name(dentry); 53 ino = sysv_inode_by_name(dentry);
54 54
55 if (ino) { 55 if (ino) {
56 inode = iget(dir->i_sb, ino); 56 inode = sysv_iget(dir->i_sb, ino);
57 if (!inode) 57 if (IS_ERR(inode))
58 return ERR_PTR(-EACCES); 58 return ERR_CAST(inode);
59 } 59 }
60 d_add(dentry, inode); 60 d_add(dentry, inode);
61 return NULL; 61 return NULL;
diff --git a/fs/sysv/super.c b/fs/sysv/super.c
index 6f9707a1b954..5a903da54551 100644
--- a/fs/sysv/super.c
+++ b/fs/sysv/super.c
@@ -332,8 +332,8 @@ static int complete_read_super(struct super_block *sb, int silent, int size)
332 sb->s_magic = SYSV_MAGIC_BASE + sbi->s_type; 332 sb->s_magic = SYSV_MAGIC_BASE + sbi->s_type;
333 /* set up enough so that it can read an inode */ 333 /* set up enough so that it can read an inode */
334 sb->s_op = &sysv_sops; 334 sb->s_op = &sysv_sops;
335 root_inode = iget(sb,SYSV_ROOT_INO); 335 root_inode = sysv_iget(sb, SYSV_ROOT_INO);
336 if (!root_inode || is_bad_inode(root_inode)) { 336 if (IS_ERR(root_inode)) {
337 printk("SysV FS: get root inode failed\n"); 337 printk("SysV FS: get root inode failed\n");
338 return 0; 338 return 0;
339 } 339 }
diff --git a/fs/sysv/sysv.h b/fs/sysv/sysv.h
index 64c03bdf06a5..42d51d1c05cd 100644
--- a/fs/sysv/sysv.h
+++ b/fs/sysv/sysv.h
@@ -141,6 +141,7 @@ extern int __sysv_write_begin(struct file *file, struct address_space *mapping,
141 struct page **pagep, void **fsdata); 141 struct page **pagep, void **fsdata);
142 142
143/* inode.c */ 143/* inode.c */
144extern struct inode *sysv_iget(struct super_block *, unsigned int);
144extern int sysv_write_inode(struct inode *, int); 145extern int sysv_write_inode(struct inode *, int);
145extern int sysv_sync_inode(struct inode *); 146extern int sysv_sync_inode(struct inode *);
146extern int sysv_sync_file(struct file *, struct dentry *, int); 147extern int sysv_sync_file(struct file *, struct dentry *, int);
diff --git a/fs/timerfd.c b/fs/timerfd.c
index 61983f3b107c..10c80b59ec4b 100644
--- a/fs/timerfd.c
+++ b/fs/timerfd.c
@@ -25,13 +25,15 @@ struct timerfd_ctx {
25 struct hrtimer tmr; 25 struct hrtimer tmr;
26 ktime_t tintv; 26 ktime_t tintv;
27 wait_queue_head_t wqh; 27 wait_queue_head_t wqh;
28 u64 ticks;
28 int expired; 29 int expired;
30 int clockid;
29}; 31};
30 32
31/* 33/*
32 * This gets called when the timer event triggers. We set the "expired" 34 * This gets called when the timer event triggers. We set the "expired"
33 * flag, but we do not re-arm the timer (in case it's necessary, 35 * flag, but we do not re-arm the timer (in case it's necessary,
34 * tintv.tv64 != 0) until the timer is read. 36 * tintv.tv64 != 0) until the timer is accessed.
35 */ 37 */
36static enum hrtimer_restart timerfd_tmrproc(struct hrtimer *htmr) 38static enum hrtimer_restart timerfd_tmrproc(struct hrtimer *htmr)
37{ 39{
@@ -40,13 +42,24 @@ static enum hrtimer_restart timerfd_tmrproc(struct hrtimer *htmr)
40 42
41 spin_lock_irqsave(&ctx->wqh.lock, flags); 43 spin_lock_irqsave(&ctx->wqh.lock, flags);
42 ctx->expired = 1; 44 ctx->expired = 1;
45 ctx->ticks++;
43 wake_up_locked(&ctx->wqh); 46 wake_up_locked(&ctx->wqh);
44 spin_unlock_irqrestore(&ctx->wqh.lock, flags); 47 spin_unlock_irqrestore(&ctx->wqh.lock, flags);
45 48
46 return HRTIMER_NORESTART; 49 return HRTIMER_NORESTART;
47} 50}
48 51
49static void timerfd_setup(struct timerfd_ctx *ctx, int clockid, int flags, 52static ktime_t timerfd_get_remaining(struct timerfd_ctx *ctx)
53{
54 ktime_t now, remaining;
55
56 now = ctx->tmr.base->get_time();
57 remaining = ktime_sub(ctx->tmr.expires, now);
58
59 return remaining.tv64 < 0 ? ktime_set(0, 0): remaining;
60}
61
62static void timerfd_setup(struct timerfd_ctx *ctx, int flags,
50 const struct itimerspec *ktmr) 63 const struct itimerspec *ktmr)
51{ 64{
52 enum hrtimer_mode htmode; 65 enum hrtimer_mode htmode;
@@ -57,8 +70,9 @@ static void timerfd_setup(struct timerfd_ctx *ctx, int clockid, int flags,
57 70
58 texp = timespec_to_ktime(ktmr->it_value); 71 texp = timespec_to_ktime(ktmr->it_value);
59 ctx->expired = 0; 72 ctx->expired = 0;
73 ctx->ticks = 0;
60 ctx->tintv = timespec_to_ktime(ktmr->it_interval); 74 ctx->tintv = timespec_to_ktime(ktmr->it_interval);
61 hrtimer_init(&ctx->tmr, clockid, htmode); 75 hrtimer_init(&ctx->tmr, ctx->clockid, htmode);
62 ctx->tmr.expires = texp; 76 ctx->tmr.expires = texp;
63 ctx->tmr.function = timerfd_tmrproc; 77 ctx->tmr.function = timerfd_tmrproc;
64 if (texp.tv64 != 0) 78 if (texp.tv64 != 0)
@@ -83,7 +97,7 @@ static unsigned int timerfd_poll(struct file *file, poll_table *wait)
83 poll_wait(file, &ctx->wqh, wait); 97 poll_wait(file, &ctx->wqh, wait);
84 98
85 spin_lock_irqsave(&ctx->wqh.lock, flags); 99 spin_lock_irqsave(&ctx->wqh.lock, flags);
86 if (ctx->expired) 100 if (ctx->ticks)
87 events |= POLLIN; 101 events |= POLLIN;
88 spin_unlock_irqrestore(&ctx->wqh.lock, flags); 102 spin_unlock_irqrestore(&ctx->wqh.lock, flags);
89 103
@@ -102,11 +116,11 @@ static ssize_t timerfd_read(struct file *file, char __user *buf, size_t count,
102 return -EINVAL; 116 return -EINVAL;
103 spin_lock_irq(&ctx->wqh.lock); 117 spin_lock_irq(&ctx->wqh.lock);
104 res = -EAGAIN; 118 res = -EAGAIN;
105 if (!ctx->expired && !(file->f_flags & O_NONBLOCK)) { 119 if (!ctx->ticks && !(file->f_flags & O_NONBLOCK)) {
106 __add_wait_queue(&ctx->wqh, &wait); 120 __add_wait_queue(&ctx->wqh, &wait);
107 for (res = 0;;) { 121 for (res = 0;;) {
108 set_current_state(TASK_INTERRUPTIBLE); 122 set_current_state(TASK_INTERRUPTIBLE);
109 if (ctx->expired) { 123 if (ctx->ticks) {
110 res = 0; 124 res = 0;
111 break; 125 break;
112 } 126 }
@@ -121,22 +135,21 @@ static ssize_t timerfd_read(struct file *file, char __user *buf, size_t count,
121 __remove_wait_queue(&ctx->wqh, &wait); 135 __remove_wait_queue(&ctx->wqh, &wait);
122 __set_current_state(TASK_RUNNING); 136 __set_current_state(TASK_RUNNING);
123 } 137 }
124 if (ctx->expired) { 138 if (ctx->ticks) {
125 ctx->expired = 0; 139 ticks = ctx->ticks;
126 if (ctx->tintv.tv64 != 0) { 140 if (ctx->expired && ctx->tintv.tv64) {
127 /* 141 /*
128 * If tintv.tv64 != 0, this is a periodic timer that 142 * If tintv.tv64 != 0, this is a periodic timer that
129 * needs to be re-armed. We avoid doing it in the timer 143 * needs to be re-armed. We avoid doing it in the timer
130 * callback to avoid DoS attacks specifying a very 144 * callback to avoid DoS attacks specifying a very
131 * short timer period. 145 * short timer period.
132 */ 146 */
133 ticks = (u64) 147 ticks += hrtimer_forward_now(&ctx->tmr,
134 hrtimer_forward(&ctx->tmr, 148 ctx->tintv) - 1;
135 hrtimer_cb_get_time(&ctx->tmr),
136 ctx->tintv);
137 hrtimer_restart(&ctx->tmr); 149 hrtimer_restart(&ctx->tmr);
138 } else 150 }
139 ticks = 1; 151 ctx->expired = 0;
152 ctx->ticks = 0;
140 } 153 }
141 spin_unlock_irq(&ctx->wqh.lock); 154 spin_unlock_irq(&ctx->wqh.lock);
142 if (ticks) 155 if (ticks)
@@ -150,76 +163,132 @@ static const struct file_operations timerfd_fops = {
150 .read = timerfd_read, 163 .read = timerfd_read,
151}; 164};
152 165
153asmlinkage long sys_timerfd(int ufd, int clockid, int flags, 166static struct file *timerfd_fget(int fd)
154 const struct itimerspec __user *utmr) 167{
168 struct file *file;
169
170 file = fget(fd);
171 if (!file)
172 return ERR_PTR(-EBADF);
173 if (file->f_op != &timerfd_fops) {
174 fput(file);
175 return ERR_PTR(-EINVAL);
176 }
177
178 return file;
179}
180
181asmlinkage long sys_timerfd_create(int clockid, int flags)
155{ 182{
156 int error; 183 int error, ufd;
157 struct timerfd_ctx *ctx; 184 struct timerfd_ctx *ctx;
158 struct file *file; 185 struct file *file;
159 struct inode *inode; 186 struct inode *inode;
160 struct itimerspec ktmr;
161
162 if (copy_from_user(&ktmr, utmr, sizeof(ktmr)))
163 return -EFAULT;
164 187
188 if (flags)
189 return -EINVAL;
165 if (clockid != CLOCK_MONOTONIC && 190 if (clockid != CLOCK_MONOTONIC &&
166 clockid != CLOCK_REALTIME) 191 clockid != CLOCK_REALTIME)
167 return -EINVAL; 192 return -EINVAL;
193
194 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
195 if (!ctx)
196 return -ENOMEM;
197
198 init_waitqueue_head(&ctx->wqh);
199 ctx->clockid = clockid;
200 hrtimer_init(&ctx->tmr, clockid, HRTIMER_MODE_ABS);
201
202 error = anon_inode_getfd(&ufd, &inode, &file, "[timerfd]",
203 &timerfd_fops, ctx);
204 if (error) {
205 kfree(ctx);
206 return error;
207 }
208
209 return ufd;
210}
211
212asmlinkage long sys_timerfd_settime(int ufd, int flags,
213 const struct itimerspec __user *utmr,
214 struct itimerspec __user *otmr)
215{
216 struct file *file;
217 struct timerfd_ctx *ctx;
218 struct itimerspec ktmr, kotmr;
219
220 if (copy_from_user(&ktmr, utmr, sizeof(ktmr)))
221 return -EFAULT;
222
168 if (!timespec_valid(&ktmr.it_value) || 223 if (!timespec_valid(&ktmr.it_value) ||
169 !timespec_valid(&ktmr.it_interval)) 224 !timespec_valid(&ktmr.it_interval))
170 return -EINVAL; 225 return -EINVAL;
171 226
172 if (ufd == -1) { 227 file = timerfd_fget(ufd);
173 ctx = kmalloc(sizeof(*ctx), GFP_KERNEL); 228 if (IS_ERR(file))
174 if (!ctx) 229 return PTR_ERR(file);
175 return -ENOMEM; 230 ctx = file->private_data;
176
177 init_waitqueue_head(&ctx->wqh);
178
179 timerfd_setup(ctx, clockid, flags, &ktmr);
180
181 /*
182 * When we call this, the initialization must be complete, since
183 * anon_inode_getfd() will install the fd.
184 */
185 error = anon_inode_getfd(&ufd, &inode, &file, "[timerfd]",
186 &timerfd_fops, ctx);
187 if (error)
188 goto err_tmrcancel;
189 } else {
190 file = fget(ufd);
191 if (!file)
192 return -EBADF;
193 ctx = file->private_data;
194 if (file->f_op != &timerfd_fops) {
195 fput(file);
196 return -EINVAL;
197 }
198 /*
199 * We need to stop the existing timer before reprogramming
200 * it to the new values.
201 */
202 for (;;) {
203 spin_lock_irq(&ctx->wqh.lock);
204 if (hrtimer_try_to_cancel(&ctx->tmr) >= 0)
205 break;
206 spin_unlock_irq(&ctx->wqh.lock);
207 cpu_relax();
208 }
209 /*
210 * Re-program the timer to the new value ...
211 */
212 timerfd_setup(ctx, clockid, flags, &ktmr);
213 231
232 /*
233 * We need to stop the existing timer before reprogramming
234 * it to the new values.
235 */
236 for (;;) {
237 spin_lock_irq(&ctx->wqh.lock);
238 if (hrtimer_try_to_cancel(&ctx->tmr) >= 0)
239 break;
214 spin_unlock_irq(&ctx->wqh.lock); 240 spin_unlock_irq(&ctx->wqh.lock);
215 fput(file); 241 cpu_relax();
216 } 242 }
217 243
218 return ufd; 244 /*
245 * If the timer is expired and it's periodic, we need to advance it
246 * because the caller may want to know the previous expiration time.
247 * We do not update "ticks" and "expired" since the timer will be
248 * re-programmed again in the following timerfd_setup() call.
249 */
250 if (ctx->expired && ctx->tintv.tv64)
251 hrtimer_forward_now(&ctx->tmr, ctx->tintv);
219 252
220err_tmrcancel: 253 kotmr.it_value = ktime_to_timespec(timerfd_get_remaining(ctx));
221 hrtimer_cancel(&ctx->tmr); 254 kotmr.it_interval = ktime_to_timespec(ctx->tintv);
222 kfree(ctx); 255
223 return error; 256 /*
257 * Re-program the timer to the new value ...
258 */
259 timerfd_setup(ctx, flags, &ktmr);
260
261 spin_unlock_irq(&ctx->wqh.lock);
262 fput(file);
263 if (otmr && copy_to_user(otmr, &kotmr, sizeof(kotmr)))
264 return -EFAULT;
265
266 return 0;
267}
268
269asmlinkage long sys_timerfd_gettime(int ufd, struct itimerspec __user *otmr)
270{
271 struct file *file;
272 struct timerfd_ctx *ctx;
273 struct itimerspec kotmr;
274
275 file = timerfd_fget(ufd);
276 if (IS_ERR(file))
277 return PTR_ERR(file);
278 ctx = file->private_data;
279
280 spin_lock_irq(&ctx->wqh.lock);
281 if (ctx->expired && ctx->tintv.tv64) {
282 ctx->expired = 0;
283 ctx->ticks +=
284 hrtimer_forward_now(&ctx->tmr, ctx->tintv) - 1;
285 hrtimer_restart(&ctx->tmr);
286 }
287 kotmr.it_value = ktime_to_timespec(timerfd_get_remaining(ctx));
288 kotmr.it_interval = ktime_to_timespec(ctx->tintv);
289 spin_unlock_irq(&ctx->wqh.lock);
290 fput(file);
291
292 return copy_to_user(otmr, &kotmr, sizeof(kotmr)) ? -EFAULT: 0;
224} 293}
225 294
diff --git a/fs/udf/balloc.c b/fs/udf/balloc.c
index ab26176f6b91..f855dcbbdfb8 100644
--- a/fs/udf/balloc.c
+++ b/fs/udf/balloc.c
@@ -28,15 +28,16 @@
28#include "udf_i.h" 28#include "udf_i.h"
29#include "udf_sb.h" 29#include "udf_sb.h"
30 30
31#define udf_clear_bit(nr,addr) ext2_clear_bit(nr,addr) 31#define udf_clear_bit(nr, addr) ext2_clear_bit(nr, addr)
32#define udf_set_bit(nr,addr) ext2_set_bit(nr,addr) 32#define udf_set_bit(nr, addr) ext2_set_bit(nr, addr)
33#define udf_test_bit(nr, addr) ext2_test_bit(nr, addr) 33#define udf_test_bit(nr, addr) ext2_test_bit(nr, addr)
34#define udf_find_first_one_bit(addr, size) find_first_one_bit(addr, size) 34#define udf_find_first_one_bit(addr, size) find_first_one_bit(addr, size)
35#define udf_find_next_one_bit(addr, size, offset) find_next_one_bit(addr, size, offset) 35#define udf_find_next_one_bit(addr, size, offset) \
36 find_next_one_bit(addr, size, offset)
36 37
37#define leBPL_to_cpup(x) leNUM_to_cpup(BITS_PER_LONG, x) 38#define leBPL_to_cpup(x) leNUM_to_cpup(BITS_PER_LONG, x)
38#define leNUM_to_cpup(x,y) xleNUM_to_cpup(x,y) 39#define leNUM_to_cpup(x, y) xleNUM_to_cpup(x, y)
39#define xleNUM_to_cpup(x,y) (le ## x ## _to_cpup(y)) 40#define xleNUM_to_cpup(x, y) (le ## x ## _to_cpup(y))
40#define uintBPL_t uint(BITS_PER_LONG) 41#define uintBPL_t uint(BITS_PER_LONG)
41#define uint(x) xuint(x) 42#define uint(x) xuint(x)
42#define xuint(x) __le ## x 43#define xuint(x) __le ## x
@@ -62,7 +63,8 @@ static inline int find_next_one_bit(void *addr, int size, int offset)
62 result += BITS_PER_LONG; 63 result += BITS_PER_LONG;
63 } 64 }
64 while (size & ~(BITS_PER_LONG - 1)) { 65 while (size & ~(BITS_PER_LONG - 1)) {
65 if ((tmp = leBPL_to_cpup(p++))) 66 tmp = leBPL_to_cpup(p++);
67 if (tmp)
66 goto found_middle; 68 goto found_middle;
67 result += BITS_PER_LONG; 69 result += BITS_PER_LONG;
68 size -= BITS_PER_LONG; 70 size -= BITS_PER_LONG;
@@ -88,12 +90,12 @@ static int read_block_bitmap(struct super_block *sb,
88 kernel_lb_addr loc; 90 kernel_lb_addr loc;
89 91
90 loc.logicalBlockNum = bitmap->s_extPosition; 92 loc.logicalBlockNum = bitmap->s_extPosition;
91 loc.partitionReferenceNum = UDF_SB_PARTITION(sb); 93 loc.partitionReferenceNum = UDF_SB(sb)->s_partition;
92 94
93 bh = udf_tread(sb, udf_get_lb_pblock(sb, loc, block)); 95 bh = udf_tread(sb, udf_get_lb_pblock(sb, loc, block));
94 if (!bh) { 96 if (!bh)
95 retval = -EIO; 97 retval = -EIO;
96 } 98
97 bitmap->s_block_bitmap[bitmap_nr] = bh; 99 bitmap->s_block_bitmap[bitmap_nr] = bh;
98 return retval; 100 return retval;
99} 101}
@@ -138,6 +140,20 @@ static inline int load_block_bitmap(struct super_block *sb,
138 return slot; 140 return slot;
139} 141}
140 142
143static bool udf_add_free_space(struct udf_sb_info *sbi,
144 u16 partition, u32 cnt)
145{
146 struct logicalVolIntegrityDesc *lvid;
147
148 if (sbi->s_lvid_bh == NULL)
149 return false;
150
151 lvid = (struct logicalVolIntegrityDesc *)sbi->s_lvid_bh->b_data;
152 lvid->freeSpaceTable[partition] = cpu_to_le32(le32_to_cpu(
153 lvid->freeSpaceTable[partition]) + cnt);
154 return true;
155}
156
141static void udf_bitmap_free_blocks(struct super_block *sb, 157static void udf_bitmap_free_blocks(struct super_block *sb,
142 struct inode *inode, 158 struct inode *inode,
143 struct udf_bitmap *bitmap, 159 struct udf_bitmap *bitmap,
@@ -155,57 +171,58 @@ static void udf_bitmap_free_blocks(struct super_block *sb,
155 171
156 mutex_lock(&sbi->s_alloc_mutex); 172 mutex_lock(&sbi->s_alloc_mutex);
157 if (bloc.logicalBlockNum < 0 || 173 if (bloc.logicalBlockNum < 0 ||
158 (bloc.logicalBlockNum + count) > UDF_SB_PARTLEN(sb, bloc.partitionReferenceNum)) { 174 (bloc.logicalBlockNum + count) >
175 sbi->s_partmaps[bloc.partitionReferenceNum].s_partition_len) {
159 udf_debug("%d < %d || %d + %d > %d\n", 176 udf_debug("%d < %d || %d + %d > %d\n",
160 bloc.logicalBlockNum, 0, bloc.logicalBlockNum, count, 177 bloc.logicalBlockNum, 0, bloc.logicalBlockNum, count,
161 UDF_SB_PARTLEN(sb, bloc.partitionReferenceNum)); 178 sbi->s_partmaps[bloc.partitionReferenceNum].
179 s_partition_len);
162 goto error_return; 180 goto error_return;
163 } 181 }
164 182
165 block = bloc.logicalBlockNum + offset + (sizeof(struct spaceBitmapDesc) << 3); 183 block = bloc.logicalBlockNum + offset +
184 (sizeof(struct spaceBitmapDesc) << 3);
166 185
167do_more: 186 do {
168 overflow = 0; 187 overflow = 0;
169 block_group = block >> (sb->s_blocksize_bits + 3); 188 block_group = block >> (sb->s_blocksize_bits + 3);
170 bit = block % (sb->s_blocksize << 3); 189 bit = block % (sb->s_blocksize << 3);
171 190
172 /* 191 /*
173 * Check to see if we are freeing blocks across a group boundary. 192 * Check to see if we are freeing blocks across a group boundary.
174 */ 193 */
175 if (bit + count > (sb->s_blocksize << 3)) { 194 if (bit + count > (sb->s_blocksize << 3)) {
176 overflow = bit + count - (sb->s_blocksize << 3); 195 overflow = bit + count - (sb->s_blocksize << 3);
177 count -= overflow; 196 count -= overflow;
178 } 197 }
179 bitmap_nr = load_block_bitmap(sb, bitmap, block_group); 198 bitmap_nr = load_block_bitmap(sb, bitmap, block_group);
180 if (bitmap_nr < 0) 199 if (bitmap_nr < 0)
181 goto error_return; 200 goto error_return;
182 201
183 bh = bitmap->s_block_bitmap[bitmap_nr]; 202 bh = bitmap->s_block_bitmap[bitmap_nr];
184 for (i = 0; i < count; i++) { 203 for (i = 0; i < count; i++) {
185 if (udf_set_bit(bit + i, bh->b_data)) { 204 if (udf_set_bit(bit + i, bh->b_data)) {
186 udf_debug("bit %ld already set\n", bit + i); 205 udf_debug("bit %ld already set\n", bit + i);
187 udf_debug("byte=%2x\n", ((char *)bh->b_data)[(bit + i) >> 3]); 206 udf_debug("byte=%2x\n",
188 } else { 207 ((char *)bh->b_data)[(bit + i) >> 3]);
189 if (inode) 208 } else {
190 DQUOT_FREE_BLOCK(inode, 1); 209 if (inode)
191 if (UDF_SB_LVIDBH(sb)) { 210 DQUOT_FREE_BLOCK(inode, 1);
192 UDF_SB_LVID(sb)->freeSpaceTable[UDF_SB_PARTITION(sb)] = 211 udf_add_free_space(sbi, sbi->s_partition, 1);
193 cpu_to_le32(le32_to_cpu(UDF_SB_LVID(sb)->freeSpaceTable[UDF_SB_PARTITION(sb)]) + 1);
194 } 212 }
195 } 213 }
196 } 214 mark_buffer_dirty(bh);
197 mark_buffer_dirty(bh); 215 if (overflow) {
198 if (overflow) { 216 block += count;
199 block += count; 217 count = overflow;
200 count = overflow; 218 }
201 goto do_more; 219 } while (overflow);
202 } 220
203error_return: 221error_return:
204 sb->s_dirt = 1; 222 sb->s_dirt = 1;
205 if (UDF_SB_LVIDBH(sb)) 223 if (sbi->s_lvid_bh)
206 mark_buffer_dirty(UDF_SB_LVIDBH(sb)); 224 mark_buffer_dirty(sbi->s_lvid_bh);
207 mutex_unlock(&sbi->s_alloc_mutex); 225 mutex_unlock(&sbi->s_alloc_mutex);
208 return;
209} 226}
210 227
211static int udf_bitmap_prealloc_blocks(struct super_block *sb, 228static int udf_bitmap_prealloc_blocks(struct super_block *sb,
@@ -219,53 +236,50 @@ static int udf_bitmap_prealloc_blocks(struct super_block *sb,
219 int bit, block, block_group, group_start; 236 int bit, block, block_group, group_start;
220 int nr_groups, bitmap_nr; 237 int nr_groups, bitmap_nr;
221 struct buffer_head *bh; 238 struct buffer_head *bh;
239 __u32 part_len;
222 240
223 mutex_lock(&sbi->s_alloc_mutex); 241 mutex_lock(&sbi->s_alloc_mutex);
224 if (first_block < 0 || first_block >= UDF_SB_PARTLEN(sb, partition)) 242 part_len = sbi->s_partmaps[partition].s_partition_len;
243 if (first_block < 0 || first_block >= part_len)
225 goto out; 244 goto out;
226 245
227 if (first_block + block_count > UDF_SB_PARTLEN(sb, partition)) 246 if (first_block + block_count > part_len)
228 block_count = UDF_SB_PARTLEN(sb, partition) - first_block; 247 block_count = part_len - first_block;
229 248
230repeat: 249 do {
231 nr_groups = (UDF_SB_PARTLEN(sb, partition) + 250 nr_groups = udf_compute_nr_groups(sb, partition);
232 (sizeof(struct spaceBitmapDesc) << 3) + 251 block = first_block + (sizeof(struct spaceBitmapDesc) << 3);
233 (sb->s_blocksize * 8) - 1) / (sb->s_blocksize * 8); 252 block_group = block >> (sb->s_blocksize_bits + 3);
234 block = first_block + (sizeof(struct spaceBitmapDesc) << 3); 253 group_start = block_group ? 0 : sizeof(struct spaceBitmapDesc);
235 block_group = block >> (sb->s_blocksize_bits + 3);
236 group_start = block_group ? 0 : sizeof(struct spaceBitmapDesc);
237 254
238 bitmap_nr = load_block_bitmap(sb, bitmap, block_group); 255 bitmap_nr = load_block_bitmap(sb, bitmap, block_group);
239 if (bitmap_nr < 0) 256 if (bitmap_nr < 0)
240 goto out; 257 goto out;
241 bh = bitmap->s_block_bitmap[bitmap_nr]; 258 bh = bitmap->s_block_bitmap[bitmap_nr];
242 259
243 bit = block % (sb->s_blocksize << 3); 260 bit = block % (sb->s_blocksize << 3);
244 261
245 while (bit < (sb->s_blocksize << 3) && block_count > 0) { 262 while (bit < (sb->s_blocksize << 3) && block_count > 0) {
246 if (!udf_test_bit(bit, bh->b_data)) { 263 if (!udf_test_bit(bit, bh->b_data))
247 goto out; 264 goto out;
248 } else if (DQUOT_PREALLOC_BLOCK(inode, 1)) { 265 else if (DQUOT_PREALLOC_BLOCK(inode, 1))
249 goto out; 266 goto out;
250 } else if (!udf_clear_bit(bit, bh->b_data)) { 267 else if (!udf_clear_bit(bit, bh->b_data)) {
251 udf_debug("bit already cleared for block %d\n", bit); 268 udf_debug("bit already cleared for block %d\n", bit);
252 DQUOT_FREE_BLOCK(inode, 1); 269 DQUOT_FREE_BLOCK(inode, 1);
253 goto out; 270 goto out;
271 }
272 block_count--;
273 alloc_count++;
274 bit++;
275 block++;
254 } 276 }
255 block_count--; 277 mark_buffer_dirty(bh);
256 alloc_count++; 278 } while (block_count > 0);
257 bit++; 279
258 block++;
259 }
260 mark_buffer_dirty(bh);
261 if (block_count > 0)
262 goto repeat;
263out: 280out:
264 if (UDF_SB_LVIDBH(sb)) { 281 if (udf_add_free_space(sbi, partition, -alloc_count))
265 UDF_SB_LVID(sb)->freeSpaceTable[partition] = 282 mark_buffer_dirty(sbi->s_lvid_bh);
266 cpu_to_le32(le32_to_cpu(UDF_SB_LVID(sb)->freeSpaceTable[partition]) - alloc_count);
267 mark_buffer_dirty(UDF_SB_LVIDBH(sb));
268 }
269 sb->s_dirt = 1; 283 sb->s_dirt = 1;
270 mutex_unlock(&sbi->s_alloc_mutex); 284 mutex_unlock(&sbi->s_alloc_mutex);
271 return alloc_count; 285 return alloc_count;
@@ -287,7 +301,7 @@ static int udf_bitmap_new_block(struct super_block *sb,
287 mutex_lock(&sbi->s_alloc_mutex); 301 mutex_lock(&sbi->s_alloc_mutex);
288 302
289repeat: 303repeat:
290 if (goal < 0 || goal >= UDF_SB_PARTLEN(sb, partition)) 304 if (goal < 0 || goal >= sbi->s_partmaps[partition].s_partition_len)
291 goal = 0; 305 goal = 0;
292 306
293 nr_groups = bitmap->s_nr_groups; 307 nr_groups = bitmap->s_nr_groups;
@@ -312,14 +326,16 @@ repeat:
312 if (bit < end_goal) 326 if (bit < end_goal)
313 goto got_block; 327 goto got_block;
314 328
315 ptr = memscan((char *)bh->b_data + (bit >> 3), 0xFF, sb->s_blocksize - ((bit + 7) >> 3)); 329 ptr = memscan((char *)bh->b_data + (bit >> 3), 0xFF,
330 sb->s_blocksize - ((bit + 7) >> 3));
316 newbit = (ptr - ((char *)bh->b_data)) << 3; 331 newbit = (ptr - ((char *)bh->b_data)) << 3;
317 if (newbit < sb->s_blocksize << 3) { 332 if (newbit < sb->s_blocksize << 3) {
318 bit = newbit; 333 bit = newbit;
319 goto search_back; 334 goto search_back;
320 } 335 }
321 336
322 newbit = udf_find_next_one_bit(bh->b_data, sb->s_blocksize << 3, bit); 337 newbit = udf_find_next_one_bit(bh->b_data,
338 sb->s_blocksize << 3, bit);
323 if (newbit < sb->s_blocksize << 3) { 339 if (newbit < sb->s_blocksize << 3) {
324 bit = newbit; 340 bit = newbit;
325 goto got_block; 341 goto got_block;
@@ -358,15 +374,20 @@ repeat:
358 if (bit < sb->s_blocksize << 3) 374 if (bit < sb->s_blocksize << 3)
359 goto search_back; 375 goto search_back;
360 else 376 else
361 bit = udf_find_next_one_bit(bh->b_data, sb->s_blocksize << 3, group_start << 3); 377 bit = udf_find_next_one_bit(bh->b_data, sb->s_blocksize << 3,
378 group_start << 3);
362 if (bit >= sb->s_blocksize << 3) { 379 if (bit >= sb->s_blocksize << 3) {
363 mutex_unlock(&sbi->s_alloc_mutex); 380 mutex_unlock(&sbi->s_alloc_mutex);
364 return 0; 381 return 0;
365 } 382 }
366 383
367search_back: 384search_back:
368 for (i = 0; i < 7 && bit > (group_start << 3) && udf_test_bit(bit - 1, bh->b_data); i++, bit--) 385 i = 0;
369 ; /* empty loop */ 386 while (i < 7 && bit > (group_start << 3) &&
387 udf_test_bit(bit - 1, bh->b_data)) {
388 ++i;
389 --bit;
390 }
370 391
371got_block: 392got_block:
372 393
@@ -389,11 +410,8 @@ got_block:
389 410
390 mark_buffer_dirty(bh); 411 mark_buffer_dirty(bh);
391 412
392 if (UDF_SB_LVIDBH(sb)) { 413 if (udf_add_free_space(sbi, partition, -1))
393 UDF_SB_LVID(sb)->freeSpaceTable[partition] = 414 mark_buffer_dirty(sbi->s_lvid_bh);
394 cpu_to_le32(le32_to_cpu(UDF_SB_LVID(sb)->freeSpaceTable[partition]) - 1);
395 mark_buffer_dirty(UDF_SB_LVIDBH(sb));
396 }
397 sb->s_dirt = 1; 415 sb->s_dirt = 1;
398 mutex_unlock(&sbi->s_alloc_mutex); 416 mutex_unlock(&sbi->s_alloc_mutex);
399 *err = 0; 417 *err = 0;
@@ -418,56 +436,70 @@ static void udf_table_free_blocks(struct super_block *sb,
418 struct extent_position oepos, epos; 436 struct extent_position oepos, epos;
419 int8_t etype; 437 int8_t etype;
420 int i; 438 int i;
439 struct udf_inode_info *iinfo;
421 440
422 mutex_lock(&sbi->s_alloc_mutex); 441 mutex_lock(&sbi->s_alloc_mutex);
423 if (bloc.logicalBlockNum < 0 || 442 if (bloc.logicalBlockNum < 0 ||
424 (bloc.logicalBlockNum + count) > UDF_SB_PARTLEN(sb, bloc.partitionReferenceNum)) { 443 (bloc.logicalBlockNum + count) >
444 sbi->s_partmaps[bloc.partitionReferenceNum].s_partition_len) {
425 udf_debug("%d < %d || %d + %d > %d\n", 445 udf_debug("%d < %d || %d + %d > %d\n",
426 bloc.logicalBlockNum, 0, bloc.logicalBlockNum, count, 446 bloc.logicalBlockNum, 0, bloc.logicalBlockNum, count,
427 UDF_SB_PARTLEN(sb, bloc.partitionReferenceNum)); 447 sbi->s_partmaps[bloc.partitionReferenceNum].
448 s_partition_len);
428 goto error_return; 449 goto error_return;
429 } 450 }
430 451
431 /* We do this up front - There are some error conditions that could occure, 452 iinfo = UDF_I(table);
432 but.. oh well */ 453 /* We do this up front - There are some error conditions that
454 could occure, but.. oh well */
433 if (inode) 455 if (inode)
434 DQUOT_FREE_BLOCK(inode, count); 456 DQUOT_FREE_BLOCK(inode, count);
435 if (UDF_SB_LVIDBH(sb)) { 457 if (udf_add_free_space(sbi, sbi->s_partition, count))
436 UDF_SB_LVID(sb)->freeSpaceTable[UDF_SB_PARTITION(sb)] = 458 mark_buffer_dirty(sbi->s_lvid_bh);
437 cpu_to_le32(le32_to_cpu(UDF_SB_LVID(sb)->freeSpaceTable[UDF_SB_PARTITION(sb)]) + count);
438 mark_buffer_dirty(UDF_SB_LVIDBH(sb));
439 }
440 459
441 start = bloc.logicalBlockNum + offset; 460 start = bloc.logicalBlockNum + offset;
442 end = bloc.logicalBlockNum + offset + count - 1; 461 end = bloc.logicalBlockNum + offset + count - 1;
443 462
444 epos.offset = oepos.offset = sizeof(struct unallocSpaceEntry); 463 epos.offset = oepos.offset = sizeof(struct unallocSpaceEntry);
445 elen = 0; 464 elen = 0;
446 epos.block = oepos.block = UDF_I_LOCATION(table); 465 epos.block = oepos.block = iinfo->i_location;
447 epos.bh = oepos.bh = NULL; 466 epos.bh = oepos.bh = NULL;
448 467
449 while (count && 468 while (count &&
450 (etype = udf_next_aext(table, &epos, &eloc, &elen, 1)) != -1) { 469 (etype = udf_next_aext(table, &epos, &eloc, &elen, 1)) != -1) {
451 if (((eloc.logicalBlockNum + (elen >> sb->s_blocksize_bits)) == start)) { 470 if (((eloc.logicalBlockNum +
452 if ((0x3FFFFFFF - elen) < (count << sb->s_blocksize_bits)) { 471 (elen >> sb->s_blocksize_bits)) == start)) {
453 count -= ((0x3FFFFFFF - elen) >> sb->s_blocksize_bits); 472 if ((0x3FFFFFFF - elen) <
454 start += ((0x3FFFFFFF - elen) >> sb->s_blocksize_bits); 473 (count << sb->s_blocksize_bits)) {
455 elen = (etype << 30) | (0x40000000 - sb->s_blocksize); 474 uint32_t tmp = ((0x3FFFFFFF - elen) >>
475 sb->s_blocksize_bits);
476 count -= tmp;
477 start += tmp;
478 elen = (etype << 30) |
479 (0x40000000 - sb->s_blocksize);
456 } else { 480 } else {
457 elen = (etype << 30) | (elen + (count << sb->s_blocksize_bits)); 481 elen = (etype << 30) |
482 (elen +
483 (count << sb->s_blocksize_bits));
458 start += count; 484 start += count;
459 count = 0; 485 count = 0;
460 } 486 }
461 udf_write_aext(table, &oepos, eloc, elen, 1); 487 udf_write_aext(table, &oepos, eloc, elen, 1);
462 } else if (eloc.logicalBlockNum == (end + 1)) { 488 } else if (eloc.logicalBlockNum == (end + 1)) {
463 if ((0x3FFFFFFF - elen) < (count << sb->s_blocksize_bits)) { 489 if ((0x3FFFFFFF - elen) <
464 count -= ((0x3FFFFFFF - elen) >> sb->s_blocksize_bits); 490 (count << sb->s_blocksize_bits)) {
465 end -= ((0x3FFFFFFF - elen) >> sb->s_blocksize_bits); 491 uint32_t tmp = ((0x3FFFFFFF - elen) >>
466 eloc.logicalBlockNum -= ((0x3FFFFFFF - elen) >> sb->s_blocksize_bits); 492 sb->s_blocksize_bits);
467 elen = (etype << 30) | (0x40000000 - sb->s_blocksize); 493 count -= tmp;
494 end -= tmp;
495 eloc.logicalBlockNum -= tmp;
496 elen = (etype << 30) |
497 (0x40000000 - sb->s_blocksize);
468 } else { 498 } else {
469 eloc.logicalBlockNum = start; 499 eloc.logicalBlockNum = start;
470 elen = (etype << 30) | (elen + (count << sb->s_blocksize_bits)); 500 elen = (etype << 30) |
501 (elen +
502 (count << sb->s_blocksize_bits));
471 end -= count; 503 end -= count;
472 count = 0; 504 count = 0;
473 } 505 }
@@ -488,9 +520,9 @@ static void udf_table_free_blocks(struct super_block *sb,
488 520
489 if (count) { 521 if (count) {
490 /* 522 /*
491 * NOTE: we CANNOT use udf_add_aext here, as it can try to allocate 523 * NOTE: we CANNOT use udf_add_aext here, as it can try to
492 * a new block, and since we hold the super block lock already 524 * allocate a new block, and since we hold the super block
493 * very bad things would happen :) 525 * lock already very bad things would happen :)
494 * 526 *
495 * We copy the behavior of udf_add_aext, but instead of 527 * We copy the behavior of udf_add_aext, but instead of
496 * trying to allocate a new block close to the existing one, 528 * trying to allocate a new block close to the existing one,
@@ -509,11 +541,11 @@ static void udf_table_free_blocks(struct super_block *sb,
509 elen = EXT_RECORDED_ALLOCATED | 541 elen = EXT_RECORDED_ALLOCATED |
510 (count << sb->s_blocksize_bits); 542 (count << sb->s_blocksize_bits);
511 543
512 if (UDF_I_ALLOCTYPE(table) == ICBTAG_FLAG_AD_SHORT) { 544 if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_SHORT)
513 adsize = sizeof(short_ad); 545 adsize = sizeof(short_ad);
514 } else if (UDF_I_ALLOCTYPE(table) == ICBTAG_FLAG_AD_LONG) { 546 else if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_LONG)
515 adsize = sizeof(long_ad); 547 adsize = sizeof(long_ad);
516 } else { 548 else {
517 brelse(oepos.bh); 549 brelse(oepos.bh);
518 brelse(epos.bh); 550 brelse(epos.bh);
519 goto error_return; 551 goto error_return;
@@ -531,56 +563,70 @@ static void udf_table_free_blocks(struct super_block *sb,
531 eloc.logicalBlockNum++; 563 eloc.logicalBlockNum++;
532 elen -= sb->s_blocksize; 564 elen -= sb->s_blocksize;
533 565
534 if (!(epos.bh = udf_tread(sb, udf_get_lb_pblock(sb, epos.block, 0)))) { 566 epos.bh = udf_tread(sb,
567 udf_get_lb_pblock(sb, epos.block, 0));
568 if (!epos.bh) {
535 brelse(oepos.bh); 569 brelse(oepos.bh);
536 goto error_return; 570 goto error_return;
537 } 571 }
538 aed = (struct allocExtDesc *)(epos.bh->b_data); 572 aed = (struct allocExtDesc *)(epos.bh->b_data);
539 aed->previousAllocExtLocation = cpu_to_le32(oepos.block.logicalBlockNum); 573 aed->previousAllocExtLocation =
574 cpu_to_le32(oepos.block.logicalBlockNum);
540 if (epos.offset + adsize > sb->s_blocksize) { 575 if (epos.offset + adsize > sb->s_blocksize) {
541 loffset = epos.offset; 576 loffset = epos.offset;
542 aed->lengthAllocDescs = cpu_to_le32(adsize); 577 aed->lengthAllocDescs = cpu_to_le32(adsize);
543 sptr = UDF_I_DATA(table) + epos.offset - adsize; 578 sptr = iinfo->i_ext.i_data + epos.offset
544 dptr = epos.bh->b_data + sizeof(struct allocExtDesc); 579 - adsize;
580 dptr = epos.bh->b_data +
581 sizeof(struct allocExtDesc);
545 memcpy(dptr, sptr, adsize); 582 memcpy(dptr, sptr, adsize);
546 epos.offset = sizeof(struct allocExtDesc) + adsize; 583 epos.offset = sizeof(struct allocExtDesc) +
584 adsize;
547 } else { 585 } else {
548 loffset = epos.offset + adsize; 586 loffset = epos.offset + adsize;
549 aed->lengthAllocDescs = cpu_to_le32(0); 587 aed->lengthAllocDescs = cpu_to_le32(0);
550 if (oepos.bh) { 588 if (oepos.bh) {
551 sptr = oepos.bh->b_data + epos.offset; 589 sptr = oepos.bh->b_data + epos.offset;
552 aed = (struct allocExtDesc *)oepos.bh->b_data; 590 aed = (struct allocExtDesc *)
591 oepos.bh->b_data;
553 aed->lengthAllocDescs = 592 aed->lengthAllocDescs =
554 cpu_to_le32(le32_to_cpu(aed->lengthAllocDescs) + adsize); 593 cpu_to_le32(le32_to_cpu(
594 aed->lengthAllocDescs) +
595 adsize);
555 } else { 596 } else {
556 sptr = UDF_I_DATA(table) + epos.offset; 597 sptr = iinfo->i_ext.i_data +
557 UDF_I_LENALLOC(table) += adsize; 598 epos.offset;
599 iinfo->i_lenAlloc += adsize;
558 mark_inode_dirty(table); 600 mark_inode_dirty(table);
559 } 601 }
560 epos.offset = sizeof(struct allocExtDesc); 602 epos.offset = sizeof(struct allocExtDesc);
561 } 603 }
562 if (UDF_SB_UDFREV(sb) >= 0x0200) 604 if (sbi->s_udfrev >= 0x0200)
563 udf_new_tag(epos.bh->b_data, TAG_IDENT_AED, 3, 1, 605 udf_new_tag(epos.bh->b_data, TAG_IDENT_AED,
564 epos.block.logicalBlockNum, sizeof(tag)); 606 3, 1, epos.block.logicalBlockNum,
607 sizeof(tag));
565 else 608 else
566 udf_new_tag(epos.bh->b_data, TAG_IDENT_AED, 2, 1, 609 udf_new_tag(epos.bh->b_data, TAG_IDENT_AED,
567 epos.block.logicalBlockNum, sizeof(tag)); 610 2, 1, epos.block.logicalBlockNum,
568 611 sizeof(tag));
569 switch (UDF_I_ALLOCTYPE(table)) { 612
570 case ICBTAG_FLAG_AD_SHORT: 613 switch (iinfo->i_alloc_type) {
571 sad = (short_ad *)sptr; 614 case ICBTAG_FLAG_AD_SHORT:
572 sad->extLength = cpu_to_le32( 615 sad = (short_ad *)sptr;
573 EXT_NEXT_EXTENT_ALLOCDECS | 616 sad->extLength = cpu_to_le32(
574 sb->s_blocksize); 617 EXT_NEXT_EXTENT_ALLOCDECS |
575 sad->extPosition = cpu_to_le32(epos.block.logicalBlockNum); 618 sb->s_blocksize);
576 break; 619 sad->extPosition =
577 case ICBTAG_FLAG_AD_LONG: 620 cpu_to_le32(epos.block.logicalBlockNum);
578 lad = (long_ad *)sptr; 621 break;
579 lad->extLength = cpu_to_le32( 622 case ICBTAG_FLAG_AD_LONG:
580 EXT_NEXT_EXTENT_ALLOCDECS | 623 lad = (long_ad *)sptr;
581 sb->s_blocksize); 624 lad->extLength = cpu_to_le32(
582 lad->extLocation = cpu_to_lelb(epos.block); 625 EXT_NEXT_EXTENT_ALLOCDECS |
583 break; 626 sb->s_blocksize);
627 lad->extLocation =
628 cpu_to_lelb(epos.block);
629 break;
584 } 630 }
585 if (oepos.bh) { 631 if (oepos.bh) {
586 udf_update_tag(oepos.bh->b_data, loffset); 632 udf_update_tag(oepos.bh->b_data, loffset);
@@ -590,16 +636,18 @@ static void udf_table_free_blocks(struct super_block *sb,
590 } 636 }
591 } 637 }
592 638
593 if (elen) { /* It's possible that stealing the block emptied the extent */ 639 /* It's possible that stealing the block emptied the extent */
640 if (elen) {
594 udf_write_aext(table, &epos, eloc, elen, 1); 641 udf_write_aext(table, &epos, eloc, elen, 1);
595 642
596 if (!epos.bh) { 643 if (!epos.bh) {
597 UDF_I_LENALLOC(table) += adsize; 644 iinfo->i_lenAlloc += adsize;
598 mark_inode_dirty(table); 645 mark_inode_dirty(table);
599 } else { 646 } else {
600 aed = (struct allocExtDesc *)epos.bh->b_data; 647 aed = (struct allocExtDesc *)epos.bh->b_data;
601 aed->lengthAllocDescs = 648 aed->lengthAllocDescs =
602 cpu_to_le32(le32_to_cpu(aed->lengthAllocDescs) + adsize); 649 cpu_to_le32(le32_to_cpu(
650 aed->lengthAllocDescs) + adsize);
603 udf_update_tag(epos.bh->b_data, epos.offset); 651 udf_update_tag(epos.bh->b_data, epos.offset);
604 mark_buffer_dirty(epos.bh); 652 mark_buffer_dirty(epos.bh);
605 } 653 }
@@ -626,20 +674,23 @@ static int udf_table_prealloc_blocks(struct super_block *sb,
626 kernel_lb_addr eloc; 674 kernel_lb_addr eloc;
627 struct extent_position epos; 675 struct extent_position epos;
628 int8_t etype = -1; 676 int8_t etype = -1;
677 struct udf_inode_info *iinfo;
629 678
630 if (first_block < 0 || first_block >= UDF_SB_PARTLEN(sb, partition)) 679 if (first_block < 0 ||
680 first_block >= sbi->s_partmaps[partition].s_partition_len)
631 return 0; 681 return 0;
632 682
633 if (UDF_I_ALLOCTYPE(table) == ICBTAG_FLAG_AD_SHORT) 683 iinfo = UDF_I(table);
684 if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_SHORT)
634 adsize = sizeof(short_ad); 685 adsize = sizeof(short_ad);
635 else if (UDF_I_ALLOCTYPE(table) == ICBTAG_FLAG_AD_LONG) 686 else if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_LONG)
636 adsize = sizeof(long_ad); 687 adsize = sizeof(long_ad);
637 else 688 else
638 return 0; 689 return 0;
639 690
640 mutex_lock(&sbi->s_alloc_mutex); 691 mutex_lock(&sbi->s_alloc_mutex);
641 epos.offset = sizeof(struct unallocSpaceEntry); 692 epos.offset = sizeof(struct unallocSpaceEntry);
642 epos.block = UDF_I_LOCATION(table); 693 epos.block = iinfo->i_location;
643 epos.bh = NULL; 694 epos.bh = NULL;
644 eloc.logicalBlockNum = 0xFFFFFFFF; 695 eloc.logicalBlockNum = 0xFFFFFFFF;
645 696
@@ -654,26 +705,26 @@ static int udf_table_prealloc_blocks(struct super_block *sb,
654 epos.offset -= adsize; 705 epos.offset -= adsize;
655 706
656 alloc_count = (elen >> sb->s_blocksize_bits); 707 alloc_count = (elen >> sb->s_blocksize_bits);
657 if (inode && DQUOT_PREALLOC_BLOCK(inode, alloc_count > block_count ? block_count : alloc_count)) { 708 if (inode && DQUOT_PREALLOC_BLOCK(inode,
709 alloc_count > block_count ? block_count : alloc_count))
658 alloc_count = 0; 710 alloc_count = 0;
659 } else if (alloc_count > block_count) { 711 else if (alloc_count > block_count) {
660 alloc_count = block_count; 712 alloc_count = block_count;
661 eloc.logicalBlockNum += alloc_count; 713 eloc.logicalBlockNum += alloc_count;
662 elen -= (alloc_count << sb->s_blocksize_bits); 714 elen -= (alloc_count << sb->s_blocksize_bits);
663 udf_write_aext(table, &epos, eloc, (etype << 30) | elen, 1); 715 udf_write_aext(table, &epos, eloc,
664 } else { 716 (etype << 30) | elen, 1);
665 udf_delete_aext(table, epos, eloc, (etype << 30) | elen); 717 } else
666 } 718 udf_delete_aext(table, epos, eloc,
719 (etype << 30) | elen);
667 } else { 720 } else {
668 alloc_count = 0; 721 alloc_count = 0;
669 } 722 }
670 723
671 brelse(epos.bh); 724 brelse(epos.bh);
672 725
673 if (alloc_count && UDF_SB_LVIDBH(sb)) { 726 if (alloc_count && udf_add_free_space(sbi, partition, -alloc_count)) {
674 UDF_SB_LVID(sb)->freeSpaceTable[partition] = 727 mark_buffer_dirty(sbi->s_lvid_bh);
675 cpu_to_le32(le32_to_cpu(UDF_SB_LVID(sb)->freeSpaceTable[partition]) - alloc_count);
676 mark_buffer_dirty(UDF_SB_LVIDBH(sb));
677 sb->s_dirt = 1; 728 sb->s_dirt = 1;
678 } 729 }
679 mutex_unlock(&sbi->s_alloc_mutex); 730 mutex_unlock(&sbi->s_alloc_mutex);
@@ -692,33 +743,35 @@ static int udf_table_new_block(struct super_block *sb,
692 kernel_lb_addr eloc, uninitialized_var(goal_eloc); 743 kernel_lb_addr eloc, uninitialized_var(goal_eloc);
693 struct extent_position epos, goal_epos; 744 struct extent_position epos, goal_epos;
694 int8_t etype; 745 int8_t etype;
746 struct udf_inode_info *iinfo = UDF_I(table);
695 747
696 *err = -ENOSPC; 748 *err = -ENOSPC;
697 749
698 if (UDF_I_ALLOCTYPE(table) == ICBTAG_FLAG_AD_SHORT) 750 if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_SHORT)
699 adsize = sizeof(short_ad); 751 adsize = sizeof(short_ad);
700 else if (UDF_I_ALLOCTYPE(table) == ICBTAG_FLAG_AD_LONG) 752 else if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_LONG)
701 adsize = sizeof(long_ad); 753 adsize = sizeof(long_ad);
702 else 754 else
703 return newblock; 755 return newblock;
704 756
705 mutex_lock(&sbi->s_alloc_mutex); 757 mutex_lock(&sbi->s_alloc_mutex);
706 if (goal < 0 || goal >= UDF_SB_PARTLEN(sb, partition)) 758 if (goal < 0 || goal >= sbi->s_partmaps[partition].s_partition_len)
707 goal = 0; 759 goal = 0;
708 760
709 /* We search for the closest matching block to goal. If we find a exact hit, 761 /* We search for the closest matching block to goal. If we find
710 we stop. Otherwise we keep going till we run out of extents. 762 a exact hit, we stop. Otherwise we keep going till we run out
711 We store the buffer_head, bloc, and extoffset of the current closest 763 of extents. We store the buffer_head, bloc, and extoffset
712 match and use that when we are done. 764 of the current closest match and use that when we are done.
713 */ 765 */
714 epos.offset = sizeof(struct unallocSpaceEntry); 766 epos.offset = sizeof(struct unallocSpaceEntry);
715 epos.block = UDF_I_LOCATION(table); 767 epos.block = iinfo->i_location;
716 epos.bh = goal_epos.bh = NULL; 768 epos.bh = goal_epos.bh = NULL;
717 769
718 while (spread && 770 while (spread &&
719 (etype = udf_next_aext(table, &epos, &eloc, &elen, 1)) != -1) { 771 (etype = udf_next_aext(table, &epos, &eloc, &elen, 1)) != -1) {
720 if (goal >= eloc.logicalBlockNum) { 772 if (goal >= eloc.logicalBlockNum) {
721 if (goal < eloc.logicalBlockNum + (elen >> sb->s_blocksize_bits)) 773 if (goal < eloc.logicalBlockNum +
774 (elen >> sb->s_blocksize_bits))
722 nspread = 0; 775 nspread = 0;
723 else 776 else
724 nspread = goal - eloc.logicalBlockNum - 777 nspread = goal - eloc.logicalBlockNum -
@@ -771,11 +824,8 @@ static int udf_table_new_block(struct super_block *sb,
771 udf_delete_aext(table, goal_epos, goal_eloc, goal_elen); 824 udf_delete_aext(table, goal_epos, goal_eloc, goal_elen);
772 brelse(goal_epos.bh); 825 brelse(goal_epos.bh);
773 826
774 if (UDF_SB_LVIDBH(sb)) { 827 if (udf_add_free_space(sbi, partition, -1))
775 UDF_SB_LVID(sb)->freeSpaceTable[partition] = 828 mark_buffer_dirty(sbi->s_lvid_bh);
776 cpu_to_le32(le32_to_cpu(UDF_SB_LVID(sb)->freeSpaceTable[partition]) - 1);
777 mark_buffer_dirty(UDF_SB_LVIDBH(sb));
778 }
779 829
780 sb->s_dirt = 1; 830 sb->s_dirt = 1;
781 mutex_unlock(&sbi->s_alloc_mutex); 831 mutex_unlock(&sbi->s_alloc_mutex);
@@ -789,22 +839,23 @@ inline void udf_free_blocks(struct super_block *sb,
789 uint32_t count) 839 uint32_t count)
790{ 840{
791 uint16_t partition = bloc.partitionReferenceNum; 841 uint16_t partition = bloc.partitionReferenceNum;
842 struct udf_part_map *map = &UDF_SB(sb)->s_partmaps[partition];
792 843
793 if (UDF_SB_PARTFLAGS(sb, partition) & UDF_PART_FLAG_UNALLOC_BITMAP) { 844 if (map->s_partition_flags & UDF_PART_FLAG_UNALLOC_BITMAP) {
794 return udf_bitmap_free_blocks(sb, inode, 845 return udf_bitmap_free_blocks(sb, inode,
795 UDF_SB_PARTMAPS(sb)[partition].s_uspace.s_bitmap, 846 map->s_uspace.s_bitmap,
796 bloc, offset, count); 847 bloc, offset, count);
797 } else if (UDF_SB_PARTFLAGS(sb, partition) & UDF_PART_FLAG_UNALLOC_TABLE) { 848 } else if (map->s_partition_flags & UDF_PART_FLAG_UNALLOC_TABLE) {
798 return udf_table_free_blocks(sb, inode, 849 return udf_table_free_blocks(sb, inode,
799 UDF_SB_PARTMAPS(sb)[partition].s_uspace.s_table, 850 map->s_uspace.s_table,
800 bloc, offset, count); 851 bloc, offset, count);
801 } else if (UDF_SB_PARTFLAGS(sb, partition) & UDF_PART_FLAG_FREED_BITMAP) { 852 } else if (map->s_partition_flags & UDF_PART_FLAG_FREED_BITMAP) {
802 return udf_bitmap_free_blocks(sb, inode, 853 return udf_bitmap_free_blocks(sb, inode,
803 UDF_SB_PARTMAPS(sb)[partition].s_fspace.s_bitmap, 854 map->s_fspace.s_bitmap,
804 bloc, offset, count); 855 bloc, offset, count);
805 } else if (UDF_SB_PARTFLAGS(sb, partition) & UDF_PART_FLAG_FREED_TABLE) { 856 } else if (map->s_partition_flags & UDF_PART_FLAG_FREED_TABLE) {
806 return udf_table_free_blocks(sb, inode, 857 return udf_table_free_blocks(sb, inode,
807 UDF_SB_PARTMAPS(sb)[partition].s_fspace.s_table, 858 map->s_fspace.s_table,
808 bloc, offset, count); 859 bloc, offset, count);
809 } else { 860 } else {
810 return; 861 return;
@@ -816,51 +867,55 @@ inline int udf_prealloc_blocks(struct super_block *sb,
816 uint16_t partition, uint32_t first_block, 867 uint16_t partition, uint32_t first_block,
817 uint32_t block_count) 868 uint32_t block_count)
818{ 869{
819 if (UDF_SB_PARTFLAGS(sb, partition) & UDF_PART_FLAG_UNALLOC_BITMAP) { 870 struct udf_part_map *map = &UDF_SB(sb)->s_partmaps[partition];
871
872 if (map->s_partition_flags & UDF_PART_FLAG_UNALLOC_BITMAP)
820 return udf_bitmap_prealloc_blocks(sb, inode, 873 return udf_bitmap_prealloc_blocks(sb, inode,
821 UDF_SB_PARTMAPS(sb)[partition].s_uspace.s_bitmap, 874 map->s_uspace.s_bitmap,
822 partition, first_block, block_count); 875 partition, first_block,
823 } else if (UDF_SB_PARTFLAGS(sb, partition) & UDF_PART_FLAG_UNALLOC_TABLE) { 876 block_count);
877 else if (map->s_partition_flags & UDF_PART_FLAG_UNALLOC_TABLE)
824 return udf_table_prealloc_blocks(sb, inode, 878 return udf_table_prealloc_blocks(sb, inode,
825 UDF_SB_PARTMAPS(sb)[partition].s_uspace.s_table, 879 map->s_uspace.s_table,
826 partition, first_block, block_count); 880 partition, first_block,
827 } else if (UDF_SB_PARTFLAGS(sb, partition) & UDF_PART_FLAG_FREED_BITMAP) { 881 block_count);
882 else if (map->s_partition_flags & UDF_PART_FLAG_FREED_BITMAP)
828 return udf_bitmap_prealloc_blocks(sb, inode, 883 return udf_bitmap_prealloc_blocks(sb, inode,
829 UDF_SB_PARTMAPS(sb)[partition].s_fspace.s_bitmap, 884 map->s_fspace.s_bitmap,
830 partition, first_block, block_count); 885 partition, first_block,
831 } else if (UDF_SB_PARTFLAGS(sb, partition) & UDF_PART_FLAG_FREED_TABLE) { 886 block_count);
887 else if (map->s_partition_flags & UDF_PART_FLAG_FREED_TABLE)
832 return udf_table_prealloc_blocks(sb, inode, 888 return udf_table_prealloc_blocks(sb, inode,
833 UDF_SB_PARTMAPS(sb)[partition].s_fspace.s_table, 889 map->s_fspace.s_table,
834 partition, first_block, block_count); 890 partition, first_block,
835 } else { 891 block_count);
892 else
836 return 0; 893 return 0;
837 }
838} 894}
839 895
840inline int udf_new_block(struct super_block *sb, 896inline int udf_new_block(struct super_block *sb,
841 struct inode *inode, 897 struct inode *inode,
842 uint16_t partition, uint32_t goal, int *err) 898 uint16_t partition, uint32_t goal, int *err)
843{ 899{
844 int ret; 900 struct udf_part_map *map = &UDF_SB(sb)->s_partmaps[partition];
845 901
846 if (UDF_SB_PARTFLAGS(sb, partition) & UDF_PART_FLAG_UNALLOC_BITMAP) { 902 if (map->s_partition_flags & UDF_PART_FLAG_UNALLOC_BITMAP)
847 ret = udf_bitmap_new_block(sb, inode, 903 return udf_bitmap_new_block(sb, inode,
848 UDF_SB_PARTMAPS(sb)[partition].s_uspace.s_bitmap, 904 map->s_uspace.s_bitmap,
849 partition, goal, err); 905 partition, goal, err);
850 return ret; 906 else if (map->s_partition_flags & UDF_PART_FLAG_UNALLOC_TABLE)
851 } else if (UDF_SB_PARTFLAGS(sb, partition) & UDF_PART_FLAG_UNALLOC_TABLE) {
852 return udf_table_new_block(sb, inode, 907 return udf_table_new_block(sb, inode,
853 UDF_SB_PARTMAPS(sb)[partition].s_uspace.s_table, 908 map->s_uspace.s_table,
854 partition, goal, err); 909 partition, goal, err);
855 } else if (UDF_SB_PARTFLAGS(sb, partition) & UDF_PART_FLAG_FREED_BITMAP) { 910 else if (map->s_partition_flags & UDF_PART_FLAG_FREED_BITMAP)
856 return udf_bitmap_new_block(sb, inode, 911 return udf_bitmap_new_block(sb, inode,
857 UDF_SB_PARTMAPS(sb)[partition].s_fspace.s_bitmap, 912 map->s_fspace.s_bitmap,
858 partition, goal, err); 913 partition, goal, err);
859 } else if (UDF_SB_PARTFLAGS(sb, partition) & UDF_PART_FLAG_FREED_TABLE) { 914 else if (map->s_partition_flags & UDF_PART_FLAG_FREED_TABLE)
860 return udf_table_new_block(sb, inode, 915 return udf_table_new_block(sb, inode,
861 UDF_SB_PARTMAPS(sb)[partition].s_fspace.s_table, 916 map->s_fspace.s_table,
862 partition, goal, err); 917 partition, goal, err);
863 } else { 918 else {
864 *err = -EIO; 919 *err = -EIO;
865 return 0; 920 return 0;
866 } 921 }
diff --git a/fs/udf/crc.c b/fs/udf/crc.c
index 85aaee5fab26..b1661296e786 100644
--- a/fs/udf/crc.c
+++ b/fs/udf/crc.c
@@ -79,7 +79,7 @@ static uint16_t crc_table[256] = {
79 * July 21, 1997 - Andrew E. Mileski 79 * July 21, 1997 - Andrew E. Mileski
80 * Adapted from OSTA-UDF(tm) 1.50 standard. 80 * Adapted from OSTA-UDF(tm) 1.50 standard.
81 */ 81 */
82uint16_t udf_crc(uint8_t * data, uint32_t size, uint16_t crc) 82uint16_t udf_crc(uint8_t *data, uint32_t size, uint16_t crc)
83{ 83{
84 while (size--) 84 while (size--)
85 crc = crc_table[(crc >> 8 ^ *(data++)) & 0xffU] ^ (crc << 8); 85 crc = crc_table[(crc >> 8 ^ *(data++)) & 0xffU] ^ (crc << 8);
diff --git a/fs/udf/dir.c b/fs/udf/dir.c
index 9e3b9f97ddbc..8d8643ada199 100644
--- a/fs/udf/dir.c
+++ b/fs/udf/dir.c
@@ -36,80 +36,20 @@
36#include "udf_i.h" 36#include "udf_i.h"
37#include "udf_sb.h" 37#include "udf_sb.h"
38 38
39/* Prototypes for file operations */ 39static int do_udf_readdir(struct inode *dir, struct file *filp,
40static int udf_readdir(struct file *, void *, filldir_t); 40 filldir_t filldir, void *dirent)
41static int do_udf_readdir(struct inode *, struct file *, filldir_t, void *);
42
43/* readdir and lookup functions */
44
45const struct file_operations udf_dir_operations = {
46 .read = generic_read_dir,
47 .readdir = udf_readdir,
48 .ioctl = udf_ioctl,
49 .fsync = udf_fsync_file,
50};
51
52/*
53 * udf_readdir
54 *
55 * PURPOSE
56 * Read a directory entry.
57 *
58 * DESCRIPTION
59 * Optional - sys_getdents() will return -ENOTDIR if this routine is not
60 * available.
61 *
62 * Refer to sys_getdents() in fs/readdir.c
63 * sys_getdents() -> .
64 *
65 * PRE-CONDITIONS
66 * filp Pointer to directory file.
67 * buf Pointer to directory entry buffer.
68 * filldir Pointer to filldir function.
69 *
70 * POST-CONDITIONS
71 * <return> >=0 on success.
72 *
73 * HISTORY
74 * July 1, 1997 - Andrew E. Mileski
75 * Written, tested, and released.
76 */
77
78int udf_readdir(struct file *filp, void *dirent, filldir_t filldir)
79{
80 struct inode *dir = filp->f_path.dentry->d_inode;
81 int result;
82
83 lock_kernel();
84
85 if (filp->f_pos == 0) {
86 if (filldir(dirent, ".", 1, filp->f_pos, dir->i_ino, DT_DIR) < 0) {
87 unlock_kernel();
88 return 0;
89 }
90 filp->f_pos++;
91 }
92
93 result = do_udf_readdir(dir, filp, filldir, dirent);
94 unlock_kernel();
95 return result;
96}
97
98static int
99do_udf_readdir(struct inode *dir, struct file *filp, filldir_t filldir,
100 void *dirent)
101{ 41{
102 struct udf_fileident_bh fibh; 42 struct udf_fileident_bh fibh;
103 struct fileIdentDesc *fi = NULL; 43 struct fileIdentDesc *fi = NULL;
104 struct fileIdentDesc cfi; 44 struct fileIdentDesc cfi;
105 int block, iblock; 45 int block, iblock;
106 loff_t nf_pos = filp->f_pos - 1; 46 loff_t nf_pos = (filp->f_pos - 1) << 2;
107 int flen; 47 int flen;
108 char fname[UDF_NAME_LEN]; 48 char fname[UDF_NAME_LEN];
109 char *nameptr; 49 char *nameptr;
110 uint16_t liu; 50 uint16_t liu;
111 uint8_t lfi; 51 uint8_t lfi;
112 loff_t size = (udf_ext0_offset(dir) + dir->i_size) >> 2; 52 loff_t size = udf_ext0_offset(dir) + dir->i_size;
113 struct buffer_head *tmp, *bha[16]; 53 struct buffer_head *tmp, *bha[16];
114 kernel_lb_addr eloc; 54 kernel_lb_addr eloc;
115 uint32_t elen; 55 uint32_t elen;
@@ -117,23 +57,26 @@ do_udf_readdir(struct inode *dir, struct file *filp, filldir_t filldir,
117 int i, num; 57 int i, num;
118 unsigned int dt_type; 58 unsigned int dt_type;
119 struct extent_position epos = { NULL, 0, {0, 0} }; 59 struct extent_position epos = { NULL, 0, {0, 0} };
60 struct udf_inode_info *iinfo;
120 61
121 if (nf_pos >= size) 62 if (nf_pos >= size)
122 return 0; 63 return 0;
123 64
124 if (nf_pos == 0) 65 if (nf_pos == 0)
125 nf_pos = (udf_ext0_offset(dir) >> 2); 66 nf_pos = udf_ext0_offset(dir);
126 67
127 fibh.soffset = fibh.eoffset = (nf_pos & ((dir->i_sb->s_blocksize - 1) >> 2)) << 2; 68 fibh.soffset = fibh.eoffset = nf_pos & (dir->i_sb->s_blocksize - 1);
128 if (UDF_I_ALLOCTYPE(dir) == ICBTAG_FLAG_AD_IN_ICB) { 69 iinfo = UDF_I(dir);
70 if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_IN_ICB) {
129 fibh.sbh = fibh.ebh = NULL; 71 fibh.sbh = fibh.ebh = NULL;
130 } else if (inode_bmap(dir, nf_pos >> (dir->i_sb->s_blocksize_bits - 2), 72 } else if (inode_bmap(dir, nf_pos >> dir->i_sb->s_blocksize_bits,
131 &epos, &eloc, &elen, &offset) == (EXT_RECORDED_ALLOCATED >> 30)) { 73 &epos, &eloc, &elen, &offset) == (EXT_RECORDED_ALLOCATED >> 30)) {
132 block = udf_get_lb_pblock(dir->i_sb, eloc, offset); 74 block = udf_get_lb_pblock(dir->i_sb, eloc, offset);
133 if ((++offset << dir->i_sb->s_blocksize_bits) < elen) { 75 if ((++offset << dir->i_sb->s_blocksize_bits) < elen) {
134 if (UDF_I_ALLOCTYPE(dir) == ICBTAG_FLAG_AD_SHORT) 76 if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_SHORT)
135 epos.offset -= sizeof(short_ad); 77 epos.offset -= sizeof(short_ad);
136 else if (UDF_I_ALLOCTYPE(dir) == ICBTAG_FLAG_AD_LONG) 78 else if (iinfo->i_alloc_type ==
79 ICBTAG_FLAG_AD_LONG)
137 epos.offset -= sizeof(long_ad); 80 epos.offset -= sizeof(long_ad);
138 } else { 81 } else {
139 offset = 0; 82 offset = 0;
@@ -168,7 +111,7 @@ do_udf_readdir(struct inode *dir, struct file *filp, filldir_t filldir,
168 } 111 }
169 112
170 while (nf_pos < size) { 113 while (nf_pos < size) {
171 filp->f_pos = nf_pos + 1; 114 filp->f_pos = (nf_pos >> 2) + 1;
172 115
173 fi = udf_fileident_read(dir, &nf_pos, &fibh, &cfi, &epos, &eloc, 116 fi = udf_fileident_read(dir, &nf_pos, &fibh, &cfi, &epos, &eloc,
174 &elen, &offset); 117 &elen, &offset);
@@ -235,7 +178,7 @@ do_udf_readdir(struct inode *dir, struct file *filp, filldir_t filldir,
235 } 178 }
236 } /* end while */ 179 } /* end while */
237 180
238 filp->f_pos = nf_pos + 1; 181 filp->f_pos = (nf_pos >> 2) + 1;
239 182
240 if (fibh.sbh != fibh.ebh) 183 if (fibh.sbh != fibh.ebh)
241 brelse(fibh.ebh); 184 brelse(fibh.ebh);
@@ -244,3 +187,57 @@ do_udf_readdir(struct inode *dir, struct file *filp, filldir_t filldir,
244 187
245 return 0; 188 return 0;
246} 189}
190
191/*
192 * udf_readdir
193 *
194 * PURPOSE
195 * Read a directory entry.
196 *
197 * DESCRIPTION
198 * Optional - sys_getdents() will return -ENOTDIR if this routine is not
199 * available.
200 *
201 * Refer to sys_getdents() in fs/readdir.c
202 * sys_getdents() -> .
203 *
204 * PRE-CONDITIONS
205 * filp Pointer to directory file.
206 * buf Pointer to directory entry buffer.
207 * filldir Pointer to filldir function.
208 *
209 * POST-CONDITIONS
210 * <return> >=0 on success.
211 *
212 * HISTORY
213 * July 1, 1997 - Andrew E. Mileski
214 * Written, tested, and released.
215 */
216
217static int udf_readdir(struct file *filp, void *dirent, filldir_t filldir)
218{
219 struct inode *dir = filp->f_path.dentry->d_inode;
220 int result;
221
222 lock_kernel();
223
224 if (filp->f_pos == 0) {
225 if (filldir(dirent, ".", 1, filp->f_pos, dir->i_ino, DT_DIR) < 0) {
226 unlock_kernel();
227 return 0;
228 }
229 filp->f_pos++;
230 }
231
232 result = do_udf_readdir(dir, filp, filldir, dirent);
233 unlock_kernel();
234 return result;
235}
236
237/* readdir and lookup functions */
238const struct file_operations udf_dir_operations = {
239 .read = generic_read_dir,
240 .readdir = udf_readdir,
241 .ioctl = udf_ioctl,
242 .fsync = udf_fsync_file,
243};
diff --git a/fs/udf/directory.c b/fs/udf/directory.c
index ff8c08fd7bf5..2820f8fcf4cc 100644
--- a/fs/udf/directory.c
+++ b/fs/udf/directory.c
@@ -19,7 +19,7 @@
19#include <linux/buffer_head.h> 19#include <linux/buffer_head.h>
20 20
21#if 0 21#if 0
22static uint8_t *udf_filead_read(struct inode *dir, uint8_t * tmpad, 22static uint8_t *udf_filead_read(struct inode *dir, uint8_t *tmpad,
23 uint8_t ad_size, kernel_lb_addr fe_loc, 23 uint8_t ad_size, kernel_lb_addr fe_loc,
24 int *pos, int *offset, struct buffer_head **bh, 24 int *pos, int *offset, struct buffer_head **bh,
25 int *error) 25 int *error)
@@ -45,7 +45,8 @@ static uint8_t *udf_filead_read(struct inode *dir, uint8_t * tmpad,
45 block = udf_get_lb_pblock(dir->i_sb, fe_loc, ++*pos); 45 block = udf_get_lb_pblock(dir->i_sb, fe_loc, ++*pos);
46 if (!block) 46 if (!block)
47 return NULL; 47 return NULL;
48 if (!(*bh = udf_tread(dir->i_sb, block))) 48 *bh = udf_tread(dir->i_sb, block);
49 if (!*bh)
49 return NULL; 50 return NULL;
50 } else if (*offset > dir->i_sb->s_blocksize) { 51 } else if (*offset > dir->i_sb->s_blocksize) {
51 ad = tmpad; 52 ad = tmpad;
@@ -57,10 +58,12 @@ static uint8_t *udf_filead_read(struct inode *dir, uint8_t * tmpad,
57 block = udf_get_lb_pblock(dir->i_sb, fe_loc, ++*pos); 58 block = udf_get_lb_pblock(dir->i_sb, fe_loc, ++*pos);
58 if (!block) 59 if (!block)
59 return NULL; 60 return NULL;
60 if (!((*bh) = udf_tread(dir->i_sb, block))) 61 (*bh) = udf_tread(dir->i_sb, block);
62 if (!*bh)
61 return NULL; 63 return NULL;
62 64
63 memcpy((uint8_t *)ad + remainder, (*bh)->b_data, ad_size - remainder); 65 memcpy((uint8_t *)ad + remainder, (*bh)->b_data,
66 ad_size - remainder);
64 *offset = ad_size - remainder; 67 *offset = ad_size - remainder;
65 } 68 }
66 69
@@ -68,29 +71,31 @@ static uint8_t *udf_filead_read(struct inode *dir, uint8_t * tmpad,
68} 71}
69#endif 72#endif
70 73
71struct fileIdentDesc *udf_fileident_read(struct inode *dir, loff_t * nf_pos, 74struct fileIdentDesc *udf_fileident_read(struct inode *dir, loff_t *nf_pos,
72 struct udf_fileident_bh *fibh, 75 struct udf_fileident_bh *fibh,
73 struct fileIdentDesc *cfi, 76 struct fileIdentDesc *cfi,
74 struct extent_position *epos, 77 struct extent_position *epos,
75 kernel_lb_addr * eloc, uint32_t * elen, 78 kernel_lb_addr *eloc, uint32_t *elen,
76 sector_t * offset) 79 sector_t *offset)
77{ 80{
78 struct fileIdentDesc *fi; 81 struct fileIdentDesc *fi;
79 int i, num, block; 82 int i, num, block;
80 struct buffer_head *tmp, *bha[16]; 83 struct buffer_head *tmp, *bha[16];
84 struct udf_inode_info *iinfo = UDF_I(dir);
81 85
82 fibh->soffset = fibh->eoffset; 86 fibh->soffset = fibh->eoffset;
83 87
84 if (UDF_I_ALLOCTYPE(dir) == ICBTAG_FLAG_AD_IN_ICB) { 88 if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_IN_ICB) {
85 fi = udf_get_fileident(UDF_I_DATA(dir) - 89 fi = udf_get_fileident(iinfo->i_ext.i_data -
86 (UDF_I_EFE(dir) ? 90 (iinfo->i_efe ?
87 sizeof(struct extendedFileEntry) : 91 sizeof(struct extendedFileEntry) :
88 sizeof(struct fileEntry)), 92 sizeof(struct fileEntry)),
89 dir->i_sb->s_blocksize, &(fibh->eoffset)); 93 dir->i_sb->s_blocksize,
94 &(fibh->eoffset));
90 if (!fi) 95 if (!fi)
91 return NULL; 96 return NULL;
92 97
93 *nf_pos += ((fibh->eoffset - fibh->soffset) >> 2); 98 *nf_pos += fibh->eoffset - fibh->soffset;
94 99
95 memcpy((uint8_t *)cfi, (uint8_t *)fi, 100 memcpy((uint8_t *)cfi, (uint8_t *)fi,
96 sizeof(struct fileIdentDesc)); 101 sizeof(struct fileIdentDesc));
@@ -100,6 +105,7 @@ struct fileIdentDesc *udf_fileident_read(struct inode *dir, loff_t * nf_pos,
100 105
101 if (fibh->eoffset == dir->i_sb->s_blocksize) { 106 if (fibh->eoffset == dir->i_sb->s_blocksize) {
102 int lextoffset = epos->offset; 107 int lextoffset = epos->offset;
108 unsigned char blocksize_bits = dir->i_sb->s_blocksize_bits;
103 109
104 if (udf_next_aext(dir, epos, eloc, elen, 1) != 110 if (udf_next_aext(dir, epos, eloc, elen, 1) !=
105 (EXT_RECORDED_ALLOCATED >> 30)) 111 (EXT_RECORDED_ALLOCATED >> 30))
@@ -109,24 +115,27 @@ struct fileIdentDesc *udf_fileident_read(struct inode *dir, loff_t * nf_pos,
109 115
110 (*offset)++; 116 (*offset)++;
111 117
112 if ((*offset << dir->i_sb->s_blocksize_bits) >= *elen) 118 if ((*offset << blocksize_bits) >= *elen)
113 *offset = 0; 119 *offset = 0;
114 else 120 else
115 epos->offset = lextoffset; 121 epos->offset = lextoffset;
116 122
117 brelse(fibh->sbh); 123 brelse(fibh->sbh);
118 if (!(fibh->sbh = fibh->ebh = udf_tread(dir->i_sb, block))) 124 fibh->sbh = fibh->ebh = udf_tread(dir->i_sb, block);
125 if (!fibh->sbh)
119 return NULL; 126 return NULL;
120 fibh->soffset = fibh->eoffset = 0; 127 fibh->soffset = fibh->eoffset = 0;
121 128
122 if (!(*offset & ((16 >> (dir->i_sb->s_blocksize_bits - 9)) - 1))) { 129 if (!(*offset & ((16 >> (blocksize_bits - 9)) - 1))) {
123 i = 16 >> (dir->i_sb->s_blocksize_bits - 9); 130 i = 16 >> (blocksize_bits - 9);
124 if (i + *offset > (*elen >> dir->i_sb->s_blocksize_bits)) 131 if (i + *offset > (*elen >> blocksize_bits))
125 i = (*elen >> dir->i_sb->s_blocksize_bits)-*offset; 132 i = (*elen >> blocksize_bits)-*offset;
126 for (num = 0; i > 0; i--) { 133 for (num = 0; i > 0; i--) {
127 block = udf_get_lb_pblock(dir->i_sb, *eloc, *offset + i); 134 block = udf_get_lb_pblock(dir->i_sb, *eloc,
135 *offset + i);
128 tmp = udf_tgetblk(dir->i_sb, block); 136 tmp = udf_tgetblk(dir->i_sb, block);
129 if (tmp && !buffer_uptodate(tmp) && !buffer_locked(tmp)) 137 if (tmp && !buffer_uptodate(tmp) &&
138 !buffer_locked(tmp))
130 bha[num++] = tmp; 139 bha[num++] = tmp;
131 else 140 else
132 brelse(tmp); 141 brelse(tmp);
@@ -148,7 +157,7 @@ struct fileIdentDesc *udf_fileident_read(struct inode *dir, loff_t * nf_pos,
148 if (!fi) 157 if (!fi)
149 return NULL; 158 return NULL;
150 159
151 *nf_pos += ((fibh->eoffset - fibh->soffset) >> 2); 160 *nf_pos += fibh->eoffset - fibh->soffset;
152 161
153 if (fibh->eoffset <= dir->i_sb->s_blocksize) { 162 if (fibh->eoffset <= dir->i_sb->s_blocksize) {
154 memcpy((uint8_t *)cfi, (uint8_t *)fi, 163 memcpy((uint8_t *)cfi, (uint8_t *)fi,
@@ -172,20 +181,23 @@ struct fileIdentDesc *udf_fileident_read(struct inode *dir, loff_t * nf_pos,
172 fibh->soffset -= dir->i_sb->s_blocksize; 181 fibh->soffset -= dir->i_sb->s_blocksize;
173 fibh->eoffset -= dir->i_sb->s_blocksize; 182 fibh->eoffset -= dir->i_sb->s_blocksize;
174 183
175 if (!(fibh->ebh = udf_tread(dir->i_sb, block))) 184 fibh->ebh = udf_tread(dir->i_sb, block);
185 if (!fibh->ebh)
176 return NULL; 186 return NULL;
177 187
178 if (sizeof(struct fileIdentDesc) > -fibh->soffset) { 188 if (sizeof(struct fileIdentDesc) > -fibh->soffset) {
179 int fi_len; 189 int fi_len;
180 190
181 memcpy((uint8_t *)cfi, (uint8_t *)fi, -fibh->soffset); 191 memcpy((uint8_t *)cfi, (uint8_t *)fi, -fibh->soffset);
182 memcpy((uint8_t *)cfi - fibh->soffset, fibh->ebh->b_data, 192 memcpy((uint8_t *)cfi - fibh->soffset,
193 fibh->ebh->b_data,
183 sizeof(struct fileIdentDesc) + fibh->soffset); 194 sizeof(struct fileIdentDesc) + fibh->soffset);
184 195
185 fi_len = (sizeof(struct fileIdentDesc) + cfi->lengthFileIdent + 196 fi_len = (sizeof(struct fileIdentDesc) +
197 cfi->lengthFileIdent +
186 le16_to_cpu(cfi->lengthOfImpUse) + 3) & ~3; 198 le16_to_cpu(cfi->lengthOfImpUse) + 3) & ~3;
187 199
188 *nf_pos += ((fi_len - (fibh->eoffset - fibh->soffset)) >> 2); 200 *nf_pos += fi_len - (fibh->eoffset - fibh->soffset);
189 fibh->eoffset = fibh->soffset + fi_len; 201 fibh->eoffset = fibh->soffset + fi_len;
190 } else { 202 } else {
191 memcpy((uint8_t *)cfi, (uint8_t *)fi, 203 memcpy((uint8_t *)cfi, (uint8_t *)fi,
@@ -210,11 +222,10 @@ struct fileIdentDesc *udf_get_fileident(void *buffer, int bufsize, int *offset)
210 222
211 ptr = buffer; 223 ptr = buffer;
212 224
213 if ((*offset > 0) && (*offset < bufsize)) { 225 if ((*offset > 0) && (*offset < bufsize))
214 ptr += *offset; 226 ptr += *offset;
215 }
216 fi = (struct fileIdentDesc *)ptr; 227 fi = (struct fileIdentDesc *)ptr;
217 if (le16_to_cpu(fi->descTag.tagIdent) != TAG_IDENT_FID) { 228 if (fi->descTag.tagIdent != cpu_to_le16(TAG_IDENT_FID)) {
218 udf_debug("0x%x != TAG_IDENT_FID\n", 229 udf_debug("0x%x != TAG_IDENT_FID\n",
219 le16_to_cpu(fi->descTag.tagIdent)); 230 le16_to_cpu(fi->descTag.tagIdent));
220 udf_debug("offset: %u sizeof: %lu bufsize: %u\n", 231 udf_debug("offset: %u sizeof: %lu bufsize: %u\n",
@@ -222,12 +233,11 @@ struct fileIdentDesc *udf_get_fileident(void *buffer, int bufsize, int *offset)
222 bufsize); 233 bufsize);
223 return NULL; 234 return NULL;
224 } 235 }
225 if ((*offset + sizeof(struct fileIdentDesc)) > bufsize) { 236 if ((*offset + sizeof(struct fileIdentDesc)) > bufsize)
226 lengthThisIdent = sizeof(struct fileIdentDesc); 237 lengthThisIdent = sizeof(struct fileIdentDesc);
227 } else { 238 else
228 lengthThisIdent = sizeof(struct fileIdentDesc) + 239 lengthThisIdent = sizeof(struct fileIdentDesc) +
229 fi->lengthFileIdent + le16_to_cpu(fi->lengthOfImpUse); 240 fi->lengthFileIdent + le16_to_cpu(fi->lengthOfImpUse);
230 }
231 241
232 /* we need to figure padding, too! */ 242 /* we need to figure padding, too! */
233 padlen = lengthThisIdent % UDF_NAME_PAD; 243 padlen = lengthThisIdent % UDF_NAME_PAD;
@@ -252,17 +262,17 @@ static extent_ad *udf_get_fileextent(void *buffer, int bufsize, int *offset)
252 262
253 fe = (struct fileEntry *)buffer; 263 fe = (struct fileEntry *)buffer;
254 264
255 if (le16_to_cpu(fe->descTag.tagIdent) != TAG_IDENT_FE) { 265 if (fe->descTag.tagIdent != cpu_to_le16(TAG_IDENT_FE)) {
256 udf_debug("0x%x != TAG_IDENT_FE\n", 266 udf_debug("0x%x != TAG_IDENT_FE\n",
257 le16_to_cpu(fe->descTag.tagIdent)); 267 le16_to_cpu(fe->descTag.tagIdent));
258 return NULL; 268 return NULL;
259 } 269 }
260 270
261 ptr = (uint8_t *)(fe->extendedAttr) + le32_to_cpu(fe->lengthExtendedAttr); 271 ptr = (uint8_t *)(fe->extendedAttr) +
272 le32_to_cpu(fe->lengthExtendedAttr);
262 273
263 if ((*offset > 0) && (*offset < le32_to_cpu(fe->lengthAllocDescs))) { 274 if ((*offset > 0) && (*offset < le32_to_cpu(fe->lengthAllocDescs)))
264 ptr += *offset; 275 ptr += *offset;
265 }
266 276
267 ext = (extent_ad *)ptr; 277 ext = (extent_ad *)ptr;
268 278
@@ -271,7 +281,7 @@ static extent_ad *udf_get_fileextent(void *buffer, int bufsize, int *offset)
271} 281}
272#endif 282#endif
273 283
274short_ad *udf_get_fileshortad(uint8_t *ptr, int maxoffset, int *offset, 284short_ad *udf_get_fileshortad(uint8_t *ptr, int maxoffset, uint32_t *offset,
275 int inc) 285 int inc)
276{ 286{
277 short_ad *sa; 287 short_ad *sa;
@@ -281,17 +291,20 @@ short_ad *udf_get_fileshortad(uint8_t *ptr, int maxoffset, int *offset,
281 return NULL; 291 return NULL;
282 } 292 }
283 293
284 if ((*offset < 0) || ((*offset + sizeof(short_ad)) > maxoffset)) 294 if ((*offset + sizeof(short_ad)) > maxoffset)
285 return NULL;
286 else if ((sa = (short_ad *)ptr)->extLength == 0)
287 return NULL; 295 return NULL;
296 else {
297 sa = (short_ad *)ptr;
298 if (sa->extLength == 0)
299 return NULL;
300 }
288 301
289 if (inc) 302 if (inc)
290 *offset += sizeof(short_ad); 303 *offset += sizeof(short_ad);
291 return sa; 304 return sa;
292} 305}
293 306
294long_ad *udf_get_filelongad(uint8_t *ptr, int maxoffset, int *offset, int inc) 307long_ad *udf_get_filelongad(uint8_t *ptr, int maxoffset, uint32_t *offset, int inc)
295{ 308{
296 long_ad *la; 309 long_ad *la;
297 310
@@ -300,10 +313,13 @@ long_ad *udf_get_filelongad(uint8_t *ptr, int maxoffset, int *offset, int inc)
300 return NULL; 313 return NULL;
301 } 314 }
302 315
303 if ((*offset < 0) || ((*offset + sizeof(long_ad)) > maxoffset)) 316 if ((*offset + sizeof(long_ad)) > maxoffset)
304 return NULL;
305 else if ((la = (long_ad *)ptr)->extLength == 0)
306 return NULL; 317 return NULL;
318 else {
319 la = (long_ad *)ptr;
320 if (la->extLength == 0)
321 return NULL;
322 }
307 323
308 if (inc) 324 if (inc)
309 *offset += sizeof(long_ad); 325 *offset += sizeof(long_ad);
diff --git a/fs/udf/file.c b/fs/udf/file.c
index 7c7a1b39d56c..97c71ae7c689 100644
--- a/fs/udf/file.c
+++ b/fs/udf/file.c
@@ -45,12 +45,13 @@ static int udf_adinicb_readpage(struct file *file, struct page *page)
45{ 45{
46 struct inode *inode = page->mapping->host; 46 struct inode *inode = page->mapping->host;
47 char *kaddr; 47 char *kaddr;
48 struct udf_inode_info *iinfo = UDF_I(inode);
48 49
49 BUG_ON(!PageLocked(page)); 50 BUG_ON(!PageLocked(page));
50 51
51 kaddr = kmap(page); 52 kaddr = kmap(page);
52 memset(kaddr, 0, PAGE_CACHE_SIZE); 53 memset(kaddr, 0, PAGE_CACHE_SIZE);
53 memcpy(kaddr, UDF_I_DATA(inode) + UDF_I_LENEATTR(inode), inode->i_size); 54 memcpy(kaddr, iinfo->i_ext.i_data + iinfo->i_lenEAttr, inode->i_size);
54 flush_dcache_page(page); 55 flush_dcache_page(page);
55 SetPageUptodate(page); 56 SetPageUptodate(page);
56 kunmap(page); 57 kunmap(page);
@@ -59,15 +60,17 @@ static int udf_adinicb_readpage(struct file *file, struct page *page)
59 return 0; 60 return 0;
60} 61}
61 62
62static int udf_adinicb_writepage(struct page *page, struct writeback_control *wbc) 63static int udf_adinicb_writepage(struct page *page,
64 struct writeback_control *wbc)
63{ 65{
64 struct inode *inode = page->mapping->host; 66 struct inode *inode = page->mapping->host;
65 char *kaddr; 67 char *kaddr;
68 struct udf_inode_info *iinfo = UDF_I(inode);
66 69
67 BUG_ON(!PageLocked(page)); 70 BUG_ON(!PageLocked(page));
68 71
69 kaddr = kmap(page); 72 kaddr = kmap(page);
70 memcpy(UDF_I_DATA(inode) + UDF_I_LENEATTR(inode), kaddr, inode->i_size); 73 memcpy(iinfo->i_ext.i_data + iinfo->i_lenEAttr, kaddr, inode->i_size);
71 mark_inode_dirty(inode); 74 mark_inode_dirty(inode);
72 SetPageUptodate(page); 75 SetPageUptodate(page);
73 kunmap(page); 76 kunmap(page);
@@ -84,9 +87,10 @@ static int udf_adinicb_write_end(struct file *file,
84 struct inode *inode = mapping->host; 87 struct inode *inode = mapping->host;
85 unsigned offset = pos & (PAGE_CACHE_SIZE - 1); 88 unsigned offset = pos & (PAGE_CACHE_SIZE - 1);
86 char *kaddr; 89 char *kaddr;
90 struct udf_inode_info *iinfo = UDF_I(inode);
87 91
88 kaddr = kmap_atomic(page, KM_USER0); 92 kaddr = kmap_atomic(page, KM_USER0);
89 memcpy(UDF_I_DATA(inode) + UDF_I_LENEATTR(inode) + offset, 93 memcpy(iinfo->i_ext.i_data + iinfo->i_lenEAttr + offset,
90 kaddr + offset, copied); 94 kaddr + offset, copied);
91 kunmap_atomic(kaddr, KM_USER0); 95 kunmap_atomic(kaddr, KM_USER0);
92 96
@@ -109,25 +113,27 @@ static ssize_t udf_file_aio_write(struct kiocb *iocb, const struct iovec *iov,
109 struct inode *inode = file->f_path.dentry->d_inode; 113 struct inode *inode = file->f_path.dentry->d_inode;
110 int err, pos; 114 int err, pos;
111 size_t count = iocb->ki_left; 115 size_t count = iocb->ki_left;
116 struct udf_inode_info *iinfo = UDF_I(inode);
112 117
113 if (UDF_I_ALLOCTYPE(inode) == ICBTAG_FLAG_AD_IN_ICB) { 118 if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_IN_ICB) {
114 if (file->f_flags & O_APPEND) 119 if (file->f_flags & O_APPEND)
115 pos = inode->i_size; 120 pos = inode->i_size;
116 else 121 else
117 pos = ppos; 122 pos = ppos;
118 123
119 if (inode->i_sb->s_blocksize < (udf_file_entry_alloc_offset(inode) + 124 if (inode->i_sb->s_blocksize <
125 (udf_file_entry_alloc_offset(inode) +
120 pos + count)) { 126 pos + count)) {
121 udf_expand_file_adinicb(inode, pos + count, &err); 127 udf_expand_file_adinicb(inode, pos + count, &err);
122 if (UDF_I_ALLOCTYPE(inode) == ICBTAG_FLAG_AD_IN_ICB) { 128 if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_IN_ICB) {
123 udf_debug("udf_expand_adinicb: err=%d\n", err); 129 udf_debug("udf_expand_adinicb: err=%d\n", err);
124 return err; 130 return err;
125 } 131 }
126 } else { 132 } else {
127 if (pos + count > inode->i_size) 133 if (pos + count > inode->i_size)
128 UDF_I_LENALLOC(inode) = pos + count; 134 iinfo->i_lenAlloc = pos + count;
129 else 135 else
130 UDF_I_LENALLOC(inode) = inode->i_size; 136 iinfo->i_lenAlloc = inode->i_size;
131 } 137 }
132 } 138 }
133 139
@@ -191,23 +197,28 @@ int udf_ioctl(struct inode *inode, struct file *filp, unsigned int cmd,
191 197
192 switch (cmd) { 198 switch (cmd) {
193 case UDF_GETVOLIDENT: 199 case UDF_GETVOLIDENT:
194 return copy_to_user((char __user *)arg, 200 if (copy_to_user((char __user *)arg,
195 UDF_SB_VOLIDENT(inode->i_sb), 32) ? -EFAULT : 0; 201 UDF_SB(inode->i_sb)->s_volume_ident, 32))
202 return -EFAULT;
203 else
204 return 0;
196 case UDF_RELOCATE_BLOCKS: 205 case UDF_RELOCATE_BLOCKS:
197 if (!capable(CAP_SYS_ADMIN)) 206 if (!capable(CAP_SYS_ADMIN))
198 return -EACCES; 207 return -EACCES;
199 if (get_user(old_block, (long __user *)arg)) 208 if (get_user(old_block, (long __user *)arg))
200 return -EFAULT; 209 return -EFAULT;
201 if ((result = udf_relocate_blocks(inode->i_sb, 210 result = udf_relocate_blocks(inode->i_sb,
202 old_block, &new_block)) == 0) 211 old_block, &new_block);
212 if (result == 0)
203 result = put_user(new_block, (long __user *)arg); 213 result = put_user(new_block, (long __user *)arg);
204 return result; 214 return result;
205 case UDF_GETEASIZE: 215 case UDF_GETEASIZE:
206 result = put_user(UDF_I_LENEATTR(inode), (int __user *)arg); 216 result = put_user(UDF_I(inode)->i_lenEAttr, (int __user *)arg);
207 break; 217 break;
208 case UDF_GETEABLOCK: 218 case UDF_GETEABLOCK:
209 result = copy_to_user((char __user *)arg, UDF_I_DATA(inode), 219 result = copy_to_user((char __user *)arg,
210 UDF_I_LENEATTR(inode)) ? -EFAULT : 0; 220 UDF_I(inode)->i_ext.i_data,
221 UDF_I(inode)->i_lenEAttr) ? -EFAULT : 0;
211 break; 222 break;
212 } 223 }
213 224
diff --git a/fs/udf/ialloc.c b/fs/udf/ialloc.c
index 636d8f613929..84360315aca2 100644
--- a/fs/udf/ialloc.c
+++ b/fs/udf/ialloc.c
@@ -43,19 +43,21 @@ void udf_free_inode(struct inode *inode)
43 clear_inode(inode); 43 clear_inode(inode);
44 44
45 mutex_lock(&sbi->s_alloc_mutex); 45 mutex_lock(&sbi->s_alloc_mutex);
46 if (sbi->s_lvidbh) { 46 if (sbi->s_lvid_bh) {
47 struct logicalVolIntegrityDescImpUse *lvidiu =
48 udf_sb_lvidiu(sbi);
47 if (S_ISDIR(inode->i_mode)) 49 if (S_ISDIR(inode->i_mode))
48 UDF_SB_LVIDIU(sb)->numDirs = 50 lvidiu->numDirs =
49 cpu_to_le32(le32_to_cpu(UDF_SB_LVIDIU(sb)->numDirs) - 1); 51 cpu_to_le32(le32_to_cpu(lvidiu->numDirs) - 1);
50 else 52 else
51 UDF_SB_LVIDIU(sb)->numFiles = 53 lvidiu->numFiles =
52 cpu_to_le32(le32_to_cpu(UDF_SB_LVIDIU(sb)->numFiles) - 1); 54 cpu_to_le32(le32_to_cpu(lvidiu->numFiles) - 1);
53 55
54 mark_buffer_dirty(sbi->s_lvidbh); 56 mark_buffer_dirty(sbi->s_lvid_bh);
55 } 57 }
56 mutex_unlock(&sbi->s_alloc_mutex); 58 mutex_unlock(&sbi->s_alloc_mutex);
57 59
58 udf_free_blocks(sb, NULL, UDF_I_LOCATION(inode), 0, 1); 60 udf_free_blocks(sb, NULL, UDF_I(inode)->i_location, 0, 1);
59} 61}
60 62
61struct inode *udf_new_inode(struct inode *dir, int mode, int *err) 63struct inode *udf_new_inode(struct inode *dir, int mode, int *err)
@@ -64,7 +66,9 @@ struct inode *udf_new_inode(struct inode *dir, int mode, int *err)
64 struct udf_sb_info *sbi = UDF_SB(sb); 66 struct udf_sb_info *sbi = UDF_SB(sb);
65 struct inode *inode; 67 struct inode *inode;
66 int block; 68 int block;
67 uint32_t start = UDF_I_LOCATION(dir).logicalBlockNum; 69 uint32_t start = UDF_I(dir)->i_location.logicalBlockNum;
70 struct udf_inode_info *iinfo;
71 struct udf_inode_info *dinfo = UDF_I(dir);
68 72
69 inode = new_inode(sb); 73 inode = new_inode(sb);
70 74
@@ -74,13 +78,15 @@ struct inode *udf_new_inode(struct inode *dir, int mode, int *err)
74 } 78 }
75 *err = -ENOSPC; 79 *err = -ENOSPC;
76 80
77 UDF_I_UNIQUE(inode) = 0; 81 iinfo = UDF_I(inode);
78 UDF_I_LENEXTENTS(inode) = 0; 82 iinfo->i_unique = 0;
79 UDF_I_NEXT_ALLOC_BLOCK(inode) = 0; 83 iinfo->i_lenExtents = 0;
80 UDF_I_NEXT_ALLOC_GOAL(inode) = 0; 84 iinfo->i_next_alloc_block = 0;
81 UDF_I_STRAT4096(inode) = 0; 85 iinfo->i_next_alloc_goal = 0;
86 iinfo->i_strat4096 = 0;
82 87
83 block = udf_new_block(dir->i_sb, NULL, UDF_I_LOCATION(dir).partitionReferenceNum, 88 block = udf_new_block(dir->i_sb, NULL,
89 dinfo->i_location.partitionReferenceNum,
84 start, err); 90 start, err);
85 if (*err) { 91 if (*err) {
86 iput(inode); 92 iput(inode);
@@ -88,21 +94,27 @@ struct inode *udf_new_inode(struct inode *dir, int mode, int *err)
88 } 94 }
89 95
90 mutex_lock(&sbi->s_alloc_mutex); 96 mutex_lock(&sbi->s_alloc_mutex);
91 if (UDF_SB_LVIDBH(sb)) { 97 if (sbi->s_lvid_bh) {
98 struct logicalVolIntegrityDesc *lvid =
99 (struct logicalVolIntegrityDesc *)
100 sbi->s_lvid_bh->b_data;
101 struct logicalVolIntegrityDescImpUse *lvidiu =
102 udf_sb_lvidiu(sbi);
92 struct logicalVolHeaderDesc *lvhd; 103 struct logicalVolHeaderDesc *lvhd;
93 uint64_t uniqueID; 104 uint64_t uniqueID;
94 lvhd = (struct logicalVolHeaderDesc *)(UDF_SB_LVID(sb)->logicalVolContentsUse); 105 lvhd = (struct logicalVolHeaderDesc *)
106 (lvid->logicalVolContentsUse);
95 if (S_ISDIR(mode)) 107 if (S_ISDIR(mode))
96 UDF_SB_LVIDIU(sb)->numDirs = 108 lvidiu->numDirs =
97 cpu_to_le32(le32_to_cpu(UDF_SB_LVIDIU(sb)->numDirs) + 1); 109 cpu_to_le32(le32_to_cpu(lvidiu->numDirs) + 1);
98 else 110 else
99 UDF_SB_LVIDIU(sb)->numFiles = 111 lvidiu->numFiles =
100 cpu_to_le32(le32_to_cpu(UDF_SB_LVIDIU(sb)->numFiles) + 1); 112 cpu_to_le32(le32_to_cpu(lvidiu->numFiles) + 1);
101 UDF_I_UNIQUE(inode) = uniqueID = le64_to_cpu(lvhd->uniqueID); 113 iinfo->i_unique = uniqueID = le64_to_cpu(lvhd->uniqueID);
102 if (!(++uniqueID & 0x00000000FFFFFFFFUL)) 114 if (!(++uniqueID & 0x00000000FFFFFFFFUL))
103 uniqueID += 16; 115 uniqueID += 16;
104 lvhd->uniqueID = cpu_to_le64(uniqueID); 116 lvhd->uniqueID = cpu_to_le64(uniqueID);
105 mark_buffer_dirty(UDF_SB_LVIDBH(sb)); 117 mark_buffer_dirty(sbi->s_lvid_bh);
106 } 118 }
107 inode->i_mode = mode; 119 inode->i_mode = mode;
108 inode->i_uid = current->fsuid; 120 inode->i_uid = current->fsuid;
@@ -114,35 +126,41 @@ struct inode *udf_new_inode(struct inode *dir, int mode, int *err)
114 inode->i_gid = current->fsgid; 126 inode->i_gid = current->fsgid;
115 } 127 }
116 128
117 UDF_I_LOCATION(inode).logicalBlockNum = block; 129 iinfo->i_location.logicalBlockNum = block;
118 UDF_I_LOCATION(inode).partitionReferenceNum = UDF_I_LOCATION(dir).partitionReferenceNum; 130 iinfo->i_location.partitionReferenceNum =
119 inode->i_ino = udf_get_lb_pblock(sb, UDF_I_LOCATION(inode), 0); 131 dinfo->i_location.partitionReferenceNum;
132 inode->i_ino = udf_get_lb_pblock(sb, iinfo->i_location, 0);
120 inode->i_blocks = 0; 133 inode->i_blocks = 0;
121 UDF_I_LENEATTR(inode) = 0; 134 iinfo->i_lenEAttr = 0;
122 UDF_I_LENALLOC(inode) = 0; 135 iinfo->i_lenAlloc = 0;
123 UDF_I_USE(inode) = 0; 136 iinfo->i_use = 0;
124 if (UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_USE_EXTENDED_FE)) { 137 if (UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_USE_EXTENDED_FE)) {
125 UDF_I_EFE(inode) = 1; 138 iinfo->i_efe = 1;
126 UDF_UPDATE_UDFREV(inode->i_sb, UDF_VERS_USE_EXTENDED_FE); 139 if (UDF_VERS_USE_EXTENDED_FE > sbi->s_udfrev)
127 UDF_I_DATA(inode) = kzalloc(inode->i_sb->s_blocksize - sizeof(struct extendedFileEntry), GFP_KERNEL); 140 sbi->s_udfrev = UDF_VERS_USE_EXTENDED_FE;
141 iinfo->i_ext.i_data = kzalloc(inode->i_sb->s_blocksize -
142 sizeof(struct extendedFileEntry),
143 GFP_KERNEL);
128 } else { 144 } else {
129 UDF_I_EFE(inode) = 0; 145 iinfo->i_efe = 0;
130 UDF_I_DATA(inode) = kzalloc(inode->i_sb->s_blocksize - sizeof(struct fileEntry), GFP_KERNEL); 146 iinfo->i_ext.i_data = kzalloc(inode->i_sb->s_blocksize -
147 sizeof(struct fileEntry),
148 GFP_KERNEL);
131 } 149 }
132 if (!UDF_I_DATA(inode)) { 150 if (!iinfo->i_ext.i_data) {
133 iput(inode); 151 iput(inode);
134 *err = -ENOMEM; 152 *err = -ENOMEM;
135 mutex_unlock(&sbi->s_alloc_mutex); 153 mutex_unlock(&sbi->s_alloc_mutex);
136 return NULL; 154 return NULL;
137 } 155 }
138 if (UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_USE_AD_IN_ICB)) 156 if (UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_USE_AD_IN_ICB))
139 UDF_I_ALLOCTYPE(inode) = ICBTAG_FLAG_AD_IN_ICB; 157 iinfo->i_alloc_type = ICBTAG_FLAG_AD_IN_ICB;
140 else if (UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_USE_SHORT_AD)) 158 else if (UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_USE_SHORT_AD))
141 UDF_I_ALLOCTYPE(inode) = ICBTAG_FLAG_AD_SHORT; 159 iinfo->i_alloc_type = ICBTAG_FLAG_AD_SHORT;
142 else 160 else
143 UDF_I_ALLOCTYPE(inode) = ICBTAG_FLAG_AD_LONG; 161 iinfo->i_alloc_type = ICBTAG_FLAG_AD_LONG;
144 inode->i_mtime = inode->i_atime = inode->i_ctime = 162 inode->i_mtime = inode->i_atime = inode->i_ctime =
145 UDF_I_CRTIME(inode) = current_fs_time(inode->i_sb); 163 iinfo->i_crtime = current_fs_time(inode->i_sb);
146 insert_inode_hash(inode); 164 insert_inode_hash(inode);
147 mark_inode_dirty(inode); 165 mark_inode_dirty(inode);
148 mutex_unlock(&sbi->s_alloc_mutex); 166 mutex_unlock(&sbi->s_alloc_mutex);
diff --git a/fs/udf/inode.c b/fs/udf/inode.c
index 6ff8151984cf..24cfa55d0fdc 100644
--- a/fs/udf/inode.c
+++ b/fs/udf/inode.c
@@ -19,7 +19,8 @@
19 * 10/04/98 dgb Added rudimentary directory functions 19 * 10/04/98 dgb Added rudimentary directory functions
20 * 10/07/98 Fully working udf_block_map! It works! 20 * 10/07/98 Fully working udf_block_map! It works!
21 * 11/25/98 bmap altered to better support extents 21 * 11/25/98 bmap altered to better support extents
22 * 12/06/98 blf partition support in udf_iget, udf_block_map and udf_read_inode 22 * 12/06/98 blf partition support in udf_iget, udf_block_map
23 * and udf_read_inode
23 * 12/12/98 rewrote udf_block_map to handle next extents and descs across 24 * 12/12/98 rewrote udf_block_map to handle next extents and descs across
24 * block boundaries (which is not actually allowed) 25 * block boundaries (which is not actually allowed)
25 * 12/20/98 added support for strategy 4096 26 * 12/20/98 added support for strategy 4096
@@ -51,7 +52,7 @@ static int udf_update_inode(struct inode *, int);
51static void udf_fill_inode(struct inode *, struct buffer_head *); 52static void udf_fill_inode(struct inode *, struct buffer_head *);
52static int udf_alloc_i_data(struct inode *inode, size_t size); 53static int udf_alloc_i_data(struct inode *inode, size_t size);
53static struct buffer_head *inode_getblk(struct inode *, sector_t, int *, 54static struct buffer_head *inode_getblk(struct inode *, sector_t, int *,
54 long *, int *); 55 sector_t *, int *);
55static int8_t udf_insert_aext(struct inode *, struct extent_position, 56static int8_t udf_insert_aext(struct inode *, struct extent_position,
56 kernel_lb_addr, uint32_t); 57 kernel_lb_addr, uint32_t);
57static void udf_split_extents(struct inode *, int *, int, int, 58static void udf_split_extents(struct inode *, int *, int, int,
@@ -111,16 +112,18 @@ no_delete:
111 */ 112 */
112void udf_clear_inode(struct inode *inode) 113void udf_clear_inode(struct inode *inode)
113{ 114{
115 struct udf_inode_info *iinfo;
114 if (!(inode->i_sb->s_flags & MS_RDONLY)) { 116 if (!(inode->i_sb->s_flags & MS_RDONLY)) {
115 lock_kernel(); 117 lock_kernel();
116 /* Discard preallocation for directories, symlinks, etc. */ 118 /* Discard preallocation for directories, symlinks, etc. */
117 udf_discard_prealloc(inode); 119 udf_discard_prealloc(inode);
118 udf_truncate_tail_extent(inode); 120 udf_truncate_tail_extent(inode);
119 unlock_kernel(); 121 unlock_kernel();
120 write_inode_now(inode, 1); 122 write_inode_now(inode, 0);
121 } 123 }
122 kfree(UDF_I_DATA(inode)); 124 iinfo = UDF_I(inode);
123 UDF_I_DATA(inode) = NULL; 125 kfree(iinfo->i_ext.i_data);
126 iinfo->i_ext.i_data = NULL;
124} 127}
125 128
126static int udf_writepage(struct page *page, struct writeback_control *wbc) 129static int udf_writepage(struct page *page, struct writeback_control *wbc)
@@ -160,6 +163,7 @@ void udf_expand_file_adinicb(struct inode *inode, int newsize, int *err)
160{ 163{
161 struct page *page; 164 struct page *page;
162 char *kaddr; 165 char *kaddr;
166 struct udf_inode_info *iinfo = UDF_I(inode);
163 struct writeback_control udf_wbc = { 167 struct writeback_control udf_wbc = {
164 .sync_mode = WB_SYNC_NONE, 168 .sync_mode = WB_SYNC_NONE,
165 .nr_to_write = 1, 169 .nr_to_write = 1,
@@ -168,11 +172,11 @@ void udf_expand_file_adinicb(struct inode *inode, int newsize, int *err)
168 /* from now on we have normal address_space methods */ 172 /* from now on we have normal address_space methods */
169 inode->i_data.a_ops = &udf_aops; 173 inode->i_data.a_ops = &udf_aops;
170 174
171 if (!UDF_I_LENALLOC(inode)) { 175 if (!iinfo->i_lenAlloc) {
172 if (UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_USE_SHORT_AD)) 176 if (UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_USE_SHORT_AD))
173 UDF_I_ALLOCTYPE(inode) = ICBTAG_FLAG_AD_SHORT; 177 iinfo->i_alloc_type = ICBTAG_FLAG_AD_SHORT;
174 else 178 else
175 UDF_I_ALLOCTYPE(inode) = ICBTAG_FLAG_AD_LONG; 179 iinfo->i_alloc_type = ICBTAG_FLAG_AD_LONG;
176 mark_inode_dirty(inode); 180 mark_inode_dirty(inode);
177 return; 181 return;
178 } 182 }
@@ -182,21 +186,21 @@ void udf_expand_file_adinicb(struct inode *inode, int newsize, int *err)
182 186
183 if (!PageUptodate(page)) { 187 if (!PageUptodate(page)) {
184 kaddr = kmap(page); 188 kaddr = kmap(page);
185 memset(kaddr + UDF_I_LENALLOC(inode), 0x00, 189 memset(kaddr + iinfo->i_lenAlloc, 0x00,
186 PAGE_CACHE_SIZE - UDF_I_LENALLOC(inode)); 190 PAGE_CACHE_SIZE - iinfo->i_lenAlloc);
187 memcpy(kaddr, UDF_I_DATA(inode) + UDF_I_LENEATTR(inode), 191 memcpy(kaddr, iinfo->i_ext.i_data + iinfo->i_lenEAttr,
188 UDF_I_LENALLOC(inode)); 192 iinfo->i_lenAlloc);
189 flush_dcache_page(page); 193 flush_dcache_page(page);
190 SetPageUptodate(page); 194 SetPageUptodate(page);
191 kunmap(page); 195 kunmap(page);
192 } 196 }
193 memset(UDF_I_DATA(inode) + UDF_I_LENEATTR(inode), 0x00, 197 memset(iinfo->i_ext.i_data + iinfo->i_lenEAttr, 0x00,
194 UDF_I_LENALLOC(inode)); 198 iinfo->i_lenAlloc);
195 UDF_I_LENALLOC(inode) = 0; 199 iinfo->i_lenAlloc = 0;
196 if (UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_USE_SHORT_AD)) 200 if (UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_USE_SHORT_AD))
197 UDF_I_ALLOCTYPE(inode) = ICBTAG_FLAG_AD_SHORT; 201 iinfo->i_alloc_type = ICBTAG_FLAG_AD_SHORT;
198 else 202 else
199 UDF_I_ALLOCTYPE(inode) = ICBTAG_FLAG_AD_LONG; 203 iinfo->i_alloc_type = ICBTAG_FLAG_AD_LONG;
200 204
201 inode->i_data.a_ops->writepage(page, &udf_wbc); 205 inode->i_data.a_ops->writepage(page, &udf_wbc);
202 page_cache_release(page); 206 page_cache_release(page);
@@ -215,9 +219,10 @@ struct buffer_head *udf_expand_dir_adinicb(struct inode *inode, int *block,
215 struct extent_position epos; 219 struct extent_position epos;
216 220
217 struct udf_fileident_bh sfibh, dfibh; 221 struct udf_fileident_bh sfibh, dfibh;
218 loff_t f_pos = udf_ext0_offset(inode) >> 2; 222 loff_t f_pos = udf_ext0_offset(inode);
219 int size = (udf_ext0_offset(inode) + inode->i_size) >> 2; 223 int size = udf_ext0_offset(inode) + inode->i_size;
220 struct fileIdentDesc cfi, *sfi, *dfi; 224 struct fileIdentDesc cfi, *sfi, *dfi;
225 struct udf_inode_info *iinfo = UDF_I(inode);
221 226
222 if (UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_USE_SHORT_AD)) 227 if (UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_USE_SHORT_AD))
223 alloctype = ICBTAG_FLAG_AD_SHORT; 228 alloctype = ICBTAG_FLAG_AD_SHORT;
@@ -225,19 +230,20 @@ struct buffer_head *udf_expand_dir_adinicb(struct inode *inode, int *block,
225 alloctype = ICBTAG_FLAG_AD_LONG; 230 alloctype = ICBTAG_FLAG_AD_LONG;
226 231
227 if (!inode->i_size) { 232 if (!inode->i_size) {
228 UDF_I_ALLOCTYPE(inode) = alloctype; 233 iinfo->i_alloc_type = alloctype;
229 mark_inode_dirty(inode); 234 mark_inode_dirty(inode);
230 return NULL; 235 return NULL;
231 } 236 }
232 237
233 /* alloc block, and copy data to it */ 238 /* alloc block, and copy data to it */
234 *block = udf_new_block(inode->i_sb, inode, 239 *block = udf_new_block(inode->i_sb, inode,
235 UDF_I_LOCATION(inode).partitionReferenceNum, 240 iinfo->i_location.partitionReferenceNum,
236 UDF_I_LOCATION(inode).logicalBlockNum, err); 241 iinfo->i_location.logicalBlockNum, err);
237 if (!(*block)) 242 if (!(*block))
238 return NULL; 243 return NULL;
239 newblock = udf_get_pblock(inode->i_sb, *block, 244 newblock = udf_get_pblock(inode->i_sb, *block,
240 UDF_I_LOCATION(inode).partitionReferenceNum, 0); 245 iinfo->i_location.partitionReferenceNum,
246 0);
241 if (!newblock) 247 if (!newblock)
242 return NULL; 248 return NULL;
243 dbh = udf_tgetblk(inode->i_sb, newblock); 249 dbh = udf_tgetblk(inode->i_sb, newblock);
@@ -249,39 +255,44 @@ struct buffer_head *udf_expand_dir_adinicb(struct inode *inode, int *block,
249 unlock_buffer(dbh); 255 unlock_buffer(dbh);
250 mark_buffer_dirty_inode(dbh, inode); 256 mark_buffer_dirty_inode(dbh, inode);
251 257
252 sfibh.soffset = sfibh.eoffset = (f_pos & ((inode->i_sb->s_blocksize - 1) >> 2)) << 2; 258 sfibh.soffset = sfibh.eoffset =
259 f_pos & (inode->i_sb->s_blocksize - 1);
253 sfibh.sbh = sfibh.ebh = NULL; 260 sfibh.sbh = sfibh.ebh = NULL;
254 dfibh.soffset = dfibh.eoffset = 0; 261 dfibh.soffset = dfibh.eoffset = 0;
255 dfibh.sbh = dfibh.ebh = dbh; 262 dfibh.sbh = dfibh.ebh = dbh;
256 while ((f_pos < size)) { 263 while (f_pos < size) {
257 UDF_I_ALLOCTYPE(inode) = ICBTAG_FLAG_AD_IN_ICB; 264 iinfo->i_alloc_type = ICBTAG_FLAG_AD_IN_ICB;
258 sfi = udf_fileident_read(inode, &f_pos, &sfibh, &cfi, NULL, NULL, NULL, NULL); 265 sfi = udf_fileident_read(inode, &f_pos, &sfibh, &cfi, NULL,
266 NULL, NULL, NULL);
259 if (!sfi) { 267 if (!sfi) {
260 brelse(dbh); 268 brelse(dbh);
261 return NULL; 269 return NULL;
262 } 270 }
263 UDF_I_ALLOCTYPE(inode) = alloctype; 271 iinfo->i_alloc_type = alloctype;
264 sfi->descTag.tagLocation = cpu_to_le32(*block); 272 sfi->descTag.tagLocation = cpu_to_le32(*block);
265 dfibh.soffset = dfibh.eoffset; 273 dfibh.soffset = dfibh.eoffset;
266 dfibh.eoffset += (sfibh.eoffset - sfibh.soffset); 274 dfibh.eoffset += (sfibh.eoffset - sfibh.soffset);
267 dfi = (struct fileIdentDesc *)(dbh->b_data + dfibh.soffset); 275 dfi = (struct fileIdentDesc *)(dbh->b_data + dfibh.soffset);
268 if (udf_write_fi(inode, sfi, dfi, &dfibh, sfi->impUse, 276 if (udf_write_fi(inode, sfi, dfi, &dfibh, sfi->impUse,
269 sfi->fileIdent + le16_to_cpu(sfi->lengthOfImpUse))) { 277 sfi->fileIdent +
270 UDF_I_ALLOCTYPE(inode) = ICBTAG_FLAG_AD_IN_ICB; 278 le16_to_cpu(sfi->lengthOfImpUse))) {
279 iinfo->i_alloc_type = ICBTAG_FLAG_AD_IN_ICB;
271 brelse(dbh); 280 brelse(dbh);
272 return NULL; 281 return NULL;
273 } 282 }
274 } 283 }
275 mark_buffer_dirty_inode(dbh, inode); 284 mark_buffer_dirty_inode(dbh, inode);
276 285
277 memset(UDF_I_DATA(inode) + UDF_I_LENEATTR(inode), 0, UDF_I_LENALLOC(inode)); 286 memset(iinfo->i_ext.i_data + iinfo->i_lenEAttr, 0,
278 UDF_I_LENALLOC(inode) = 0; 287 iinfo->i_lenAlloc);
288 iinfo->i_lenAlloc = 0;
279 eloc.logicalBlockNum = *block; 289 eloc.logicalBlockNum = *block;
280 eloc.partitionReferenceNum = UDF_I_LOCATION(inode).partitionReferenceNum; 290 eloc.partitionReferenceNum =
281 elen = inode->i_size; 291 iinfo->i_location.partitionReferenceNum;
282 UDF_I_LENEXTENTS(inode) = elen; 292 elen = inode->i_sb->s_blocksize;
293 iinfo->i_lenExtents = elen;
283 epos.bh = NULL; 294 epos.bh = NULL;
284 epos.block = UDF_I_LOCATION(inode); 295 epos.block = iinfo->i_location;
285 epos.offset = udf_file_entry_alloc_offset(inode); 296 epos.offset = udf_file_entry_alloc_offset(inode);
286 udf_add_aext(inode, &epos, eloc, elen, 0); 297 udf_add_aext(inode, &epos, eloc, elen, 0);
287 /* UniqueID stuff */ 298 /* UniqueID stuff */
@@ -296,7 +307,8 @@ static int udf_get_block(struct inode *inode, sector_t block,
296{ 307{
297 int err, new; 308 int err, new;
298 struct buffer_head *bh; 309 struct buffer_head *bh;
299 unsigned long phys; 310 sector_t phys = 0;
311 struct udf_inode_info *iinfo;
300 312
301 if (!create) { 313 if (!create) {
302 phys = udf_block_map(inode, block); 314 phys = udf_block_map(inode, block);
@@ -314,9 +326,10 @@ static int udf_get_block(struct inode *inode, sector_t block,
314 if (block < 0) 326 if (block < 0)
315 goto abort_negative; 327 goto abort_negative;
316 328
317 if (block == UDF_I_NEXT_ALLOC_BLOCK(inode) + 1) { 329 iinfo = UDF_I(inode);
318 UDF_I_NEXT_ALLOC_BLOCK(inode)++; 330 if (block == iinfo->i_next_alloc_block + 1) {
319 UDF_I_NEXT_ALLOC_GOAL(inode)++; 331 iinfo->i_next_alloc_block++;
332 iinfo->i_next_alloc_goal++;
320 } 333 }
321 334
322 err = 0; 335 err = 0;
@@ -366,32 +379,35 @@ static struct buffer_head *udf_getblk(struct inode *inode, long block,
366 379
367/* Extend the file by 'blocks' blocks, return the number of extents added */ 380/* Extend the file by 'blocks' blocks, return the number of extents added */
368int udf_extend_file(struct inode *inode, struct extent_position *last_pos, 381int udf_extend_file(struct inode *inode, struct extent_position *last_pos,
369 kernel_long_ad * last_ext, sector_t blocks) 382 kernel_long_ad *last_ext, sector_t blocks)
370{ 383{
371 sector_t add; 384 sector_t add;
372 int count = 0, fake = !(last_ext->extLength & UDF_EXTENT_LENGTH_MASK); 385 int count = 0, fake = !(last_ext->extLength & UDF_EXTENT_LENGTH_MASK);
373 struct super_block *sb = inode->i_sb; 386 struct super_block *sb = inode->i_sb;
374 kernel_lb_addr prealloc_loc = {}; 387 kernel_lb_addr prealloc_loc = {};
375 int prealloc_len = 0; 388 int prealloc_len = 0;
389 struct udf_inode_info *iinfo;
376 390
377 /* The previous extent is fake and we should not extend by anything 391 /* The previous extent is fake and we should not extend by anything
378 * - there's nothing to do... */ 392 * - there's nothing to do... */
379 if (!blocks && fake) 393 if (!blocks && fake)
380 return 0; 394 return 0;
381 395
396 iinfo = UDF_I(inode);
382 /* Round the last extent up to a multiple of block size */ 397 /* Round the last extent up to a multiple of block size */
383 if (last_ext->extLength & (sb->s_blocksize - 1)) { 398 if (last_ext->extLength & (sb->s_blocksize - 1)) {
384 last_ext->extLength = 399 last_ext->extLength =
385 (last_ext->extLength & UDF_EXTENT_FLAG_MASK) | 400 (last_ext->extLength & UDF_EXTENT_FLAG_MASK) |
386 (((last_ext->extLength & UDF_EXTENT_LENGTH_MASK) + 401 (((last_ext->extLength & UDF_EXTENT_LENGTH_MASK) +
387 sb->s_blocksize - 1) & ~(sb->s_blocksize - 1)); 402 sb->s_blocksize - 1) & ~(sb->s_blocksize - 1));
388 UDF_I_LENEXTENTS(inode) = 403 iinfo->i_lenExtents =
389 (UDF_I_LENEXTENTS(inode) + sb->s_blocksize - 1) & 404 (iinfo->i_lenExtents + sb->s_blocksize - 1) &
390 ~(sb->s_blocksize - 1); 405 ~(sb->s_blocksize - 1);
391 } 406 }
392 407
393 /* Last extent are just preallocated blocks? */ 408 /* Last extent are just preallocated blocks? */
394 if ((last_ext->extLength & UDF_EXTENT_FLAG_MASK) == EXT_NOT_RECORDED_ALLOCATED) { 409 if ((last_ext->extLength & UDF_EXTENT_FLAG_MASK) ==
410 EXT_NOT_RECORDED_ALLOCATED) {
395 /* Save the extent so that we can reattach it to the end */ 411 /* Save the extent so that we can reattach it to the end */
396 prealloc_loc = last_ext->extLocation; 412 prealloc_loc = last_ext->extLocation;
397 prealloc_len = last_ext->extLength; 413 prealloc_len = last_ext->extLength;
@@ -399,13 +415,15 @@ int udf_extend_file(struct inode *inode, struct extent_position *last_pos,
399 last_ext->extLength = EXT_NOT_RECORDED_NOT_ALLOCATED | 415 last_ext->extLength = EXT_NOT_RECORDED_NOT_ALLOCATED |
400 (last_ext->extLength & UDF_EXTENT_LENGTH_MASK); 416 (last_ext->extLength & UDF_EXTENT_LENGTH_MASK);
401 last_ext->extLocation.logicalBlockNum = 0; 417 last_ext->extLocation.logicalBlockNum = 0;
402 last_ext->extLocation.partitionReferenceNum = 0; 418 last_ext->extLocation.partitionReferenceNum = 0;
403 } 419 }
404 420
405 /* Can we merge with the previous extent? */ 421 /* Can we merge with the previous extent? */
406 if ((last_ext->extLength & UDF_EXTENT_FLAG_MASK) == EXT_NOT_RECORDED_NOT_ALLOCATED) { 422 if ((last_ext->extLength & UDF_EXTENT_FLAG_MASK) ==
407 add = ((1 << 30) - sb->s_blocksize - (last_ext->extLength & 423 EXT_NOT_RECORDED_NOT_ALLOCATED) {
408 UDF_EXTENT_LENGTH_MASK)) >> sb->s_blocksize_bits; 424 add = ((1 << 30) - sb->s_blocksize -
425 (last_ext->extLength & UDF_EXTENT_LENGTH_MASK)) >>
426 sb->s_blocksize_bits;
409 if (add > blocks) 427 if (add > blocks)
410 add = blocks; 428 add = blocks;
411 blocks -= add; 429 blocks -= add;
@@ -416,9 +434,9 @@ int udf_extend_file(struct inode *inode, struct extent_position *last_pos,
416 udf_add_aext(inode, last_pos, last_ext->extLocation, 434 udf_add_aext(inode, last_pos, last_ext->extLocation,
417 last_ext->extLength, 1); 435 last_ext->extLength, 1);
418 count++; 436 count++;
419 } else { 437 } else
420 udf_write_aext(inode, last_pos, last_ext->extLocation, last_ext->extLength, 1); 438 udf_write_aext(inode, last_pos, last_ext->extLocation,
421 } 439 last_ext->extLength, 1);
422 440
423 /* Managed to do everything necessary? */ 441 /* Managed to do everything necessary? */
424 if (!blocks) 442 if (!blocks)
@@ -426,9 +444,10 @@ int udf_extend_file(struct inode *inode, struct extent_position *last_pos,
426 444
427 /* All further extents will be NOT_RECORDED_NOT_ALLOCATED */ 445 /* All further extents will be NOT_RECORDED_NOT_ALLOCATED */
428 last_ext->extLocation.logicalBlockNum = 0; 446 last_ext->extLocation.logicalBlockNum = 0;
429 last_ext->extLocation.partitionReferenceNum = 0; 447 last_ext->extLocation.partitionReferenceNum = 0;
430 add = (1 << (30-sb->s_blocksize_bits)) - 1; 448 add = (1 << (30-sb->s_blocksize_bits)) - 1;
431 last_ext->extLength = EXT_NOT_RECORDED_NOT_ALLOCATED | (add << sb->s_blocksize_bits); 449 last_ext->extLength = EXT_NOT_RECORDED_NOT_ALLOCATED |
450 (add << sb->s_blocksize_bits);
432 451
433 /* Create enough extents to cover the whole hole */ 452 /* Create enough extents to cover the whole hole */
434 while (blocks > add) { 453 while (blocks > add) {
@@ -450,7 +469,8 @@ int udf_extend_file(struct inode *inode, struct extent_position *last_pos,
450out: 469out:
451 /* Do we have some preallocated blocks saved? */ 470 /* Do we have some preallocated blocks saved? */
452 if (prealloc_len) { 471 if (prealloc_len) {
453 if (udf_add_aext(inode, last_pos, prealloc_loc, prealloc_len, 1) == -1) 472 if (udf_add_aext(inode, last_pos, prealloc_loc,
473 prealloc_len, 1) == -1)
454 return -1; 474 return -1;
455 last_ext->extLocation = prealloc_loc; 475 last_ext->extLocation = prealloc_loc;
456 last_ext->extLength = prealloc_len; 476 last_ext->extLength = prealloc_len;
@@ -458,9 +478,9 @@ out:
458 } 478 }
459 479
460 /* last_pos should point to the last written extent... */ 480 /* last_pos should point to the last written extent... */
461 if (UDF_I_ALLOCTYPE(inode) == ICBTAG_FLAG_AD_SHORT) 481 if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_SHORT)
462 last_pos->offset -= sizeof(short_ad); 482 last_pos->offset -= sizeof(short_ad);
463 else if (UDF_I_ALLOCTYPE(inode) == ICBTAG_FLAG_AD_LONG) 483 else if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_LONG)
464 last_pos->offset -= sizeof(long_ad); 484 last_pos->offset -= sizeof(long_ad);
465 else 485 else
466 return -1; 486 return -1;
@@ -469,7 +489,7 @@ out:
469} 489}
470 490
471static struct buffer_head *inode_getblk(struct inode *inode, sector_t block, 491static struct buffer_head *inode_getblk(struct inode *inode, sector_t block,
472 int *err, long *phys, int *new) 492 int *err, sector_t *phys, int *new)
473{ 493{
474 static sector_t last_block; 494 static sector_t last_block;
475 struct buffer_head *result = NULL; 495 struct buffer_head *result = NULL;
@@ -483,11 +503,12 @@ static struct buffer_head *inode_getblk(struct inode *inode, sector_t block,
483 uint32_t newblocknum, newblock; 503 uint32_t newblocknum, newblock;
484 sector_t offset = 0; 504 sector_t offset = 0;
485 int8_t etype; 505 int8_t etype;
486 int goal = 0, pgoal = UDF_I_LOCATION(inode).logicalBlockNum; 506 struct udf_inode_info *iinfo = UDF_I(inode);
507 int goal = 0, pgoal = iinfo->i_location.logicalBlockNum;
487 int lastblock = 0; 508 int lastblock = 0;
488 509
489 prev_epos.offset = udf_file_entry_alloc_offset(inode); 510 prev_epos.offset = udf_file_entry_alloc_offset(inode);
490 prev_epos.block = UDF_I_LOCATION(inode); 511 prev_epos.block = iinfo->i_location;
491 prev_epos.bh = NULL; 512 prev_epos.bh = NULL;
492 cur_epos = next_epos = prev_epos; 513 cur_epos = next_epos = prev_epos;
493 b_off = (loff_t)block << inode->i_sb->s_blocksize_bits; 514 b_off = (loff_t)block << inode->i_sb->s_blocksize_bits;
@@ -515,7 +536,8 @@ static struct buffer_head *inode_getblk(struct inode *inode, sector_t block,
515 prev_epos.offset = cur_epos.offset; 536 prev_epos.offset = cur_epos.offset;
516 cur_epos.offset = next_epos.offset; 537 cur_epos.offset = next_epos.offset;
517 538
518 if ((etype = udf_next_aext(inode, &next_epos, &eloc, &elen, 1)) == -1) 539 etype = udf_next_aext(inode, &next_epos, &eloc, &elen, 1);
540 if (etype == -1)
519 break; 541 break;
520 542
521 c = !c; 543 c = !c;
@@ -569,9 +591,11 @@ static struct buffer_head *inode_getblk(struct inode *inode, sector_t block,
569 startnum = 1; 591 startnum = 1;
570 } else { 592 } else {
571 /* Create a fake extent when there's not one */ 593 /* Create a fake extent when there's not one */
572 memset(&laarr[0].extLocation, 0x00, sizeof(kernel_lb_addr)); 594 memset(&laarr[0].extLocation, 0x00,
595 sizeof(kernel_lb_addr));
573 laarr[0].extLength = EXT_NOT_RECORDED_NOT_ALLOCATED; 596 laarr[0].extLength = EXT_NOT_RECORDED_NOT_ALLOCATED;
574 /* Will udf_extend_file() create real extent from a fake one? */ 597 /* Will udf_extend_file() create real extent from
598 a fake one? */
575 startnum = (offset > 0); 599 startnum = (offset > 0);
576 } 600 }
577 /* Create extents for the hole between EOF and offset */ 601 /* Create extents for the hole between EOF and offset */
@@ -589,14 +613,16 @@ static struct buffer_head *inode_getblk(struct inode *inode, sector_t block,
589 offset = 0; 613 offset = 0;
590 count += ret; 614 count += ret;
591 /* We are not covered by a preallocated extent? */ 615 /* We are not covered by a preallocated extent? */
592 if ((laarr[0].extLength & UDF_EXTENT_FLAG_MASK) != EXT_NOT_RECORDED_ALLOCATED) { 616 if ((laarr[0].extLength & UDF_EXTENT_FLAG_MASK) !=
617 EXT_NOT_RECORDED_ALLOCATED) {
593 /* Is there any real extent? - otherwise we overwrite 618 /* Is there any real extent? - otherwise we overwrite
594 * the fake one... */ 619 * the fake one... */
595 if (count) 620 if (count)
596 c = !c; 621 c = !c;
597 laarr[c].extLength = EXT_NOT_RECORDED_NOT_ALLOCATED | 622 laarr[c].extLength = EXT_NOT_RECORDED_NOT_ALLOCATED |
598 inode->i_sb->s_blocksize; 623 inode->i_sb->s_blocksize;
599 memset(&laarr[c].extLocation, 0x00, sizeof(kernel_lb_addr)); 624 memset(&laarr[c].extLocation, 0x00,
625 sizeof(kernel_lb_addr));
600 count++; 626 count++;
601 endnum++; 627 endnum++;
602 } 628 }
@@ -605,7 +631,8 @@ static struct buffer_head *inode_getblk(struct inode *inode, sector_t block,
605 } else { 631 } else {
606 endnum = startnum = ((count > 2) ? 2 : count); 632 endnum = startnum = ((count > 2) ? 2 : count);
607 633
608 /* if the current extent is in position 0, swap it with the previous */ 634 /* if the current extent is in position 0,
635 swap it with the previous */
609 if (!c && count != 1) { 636 if (!c && count != 1) {
610 laarr[2] = laarr[0]; 637 laarr[2] = laarr[0];
611 laarr[0] = laarr[1]; 638 laarr[0] = laarr[1];
@@ -613,44 +640,47 @@ static struct buffer_head *inode_getblk(struct inode *inode, sector_t block,
613 c = 1; 640 c = 1;
614 } 641 }
615 642
616 /* if the current block is located in an extent, read the next extent */ 643 /* if the current block is located in an extent,
617 if ((etype = udf_next_aext(inode, &next_epos, &eloc, &elen, 0)) != -1) { 644 read the next extent */
645 etype = udf_next_aext(inode, &next_epos, &eloc, &elen, 0);
646 if (etype != -1) {
618 laarr[c + 1].extLength = (etype << 30) | elen; 647 laarr[c + 1].extLength = (etype << 30) | elen;
619 laarr[c + 1].extLocation = eloc; 648 laarr[c + 1].extLocation = eloc;
620 count++; 649 count++;
621 startnum++; 650 startnum++;
622 endnum++; 651 endnum++;
623 } else { 652 } else
624 lastblock = 1; 653 lastblock = 1;
625 }
626 } 654 }
627 655
628 /* if the current extent is not recorded but allocated, get the 656 /* if the current extent is not recorded but allocated, get the
629 * block in the extent corresponding to the requested block */ 657 * block in the extent corresponding to the requested block */
630 if ((laarr[c].extLength >> 30) == (EXT_NOT_RECORDED_ALLOCATED >> 30)) { 658 if ((laarr[c].extLength >> 30) == (EXT_NOT_RECORDED_ALLOCATED >> 30))
631 newblocknum = laarr[c].extLocation.logicalBlockNum + offset; 659 newblocknum = laarr[c].extLocation.logicalBlockNum + offset;
632 } else { /* otherwise, allocate a new block */ 660 else { /* otherwise, allocate a new block */
633 if (UDF_I_NEXT_ALLOC_BLOCK(inode) == block) 661 if (iinfo->i_next_alloc_block == block)
634 goal = UDF_I_NEXT_ALLOC_GOAL(inode); 662 goal = iinfo->i_next_alloc_goal;
635 663
636 if (!goal) { 664 if (!goal) {
637 if (!(goal = pgoal)) 665 if (!(goal = pgoal)) /* XXX: what was intended here? */
638 goal = UDF_I_LOCATION(inode).logicalBlockNum + 1; 666 goal = iinfo->i_location.logicalBlockNum + 1;
639 } 667 }
640 668
641 if (!(newblocknum = udf_new_block(inode->i_sb, inode, 669 newblocknum = udf_new_block(inode->i_sb, inode,
642 UDF_I_LOCATION(inode).partitionReferenceNum, 670 iinfo->i_location.partitionReferenceNum,
643 goal, err))) { 671 goal, err);
672 if (!newblocknum) {
644 brelse(prev_epos.bh); 673 brelse(prev_epos.bh);
645 *err = -ENOSPC; 674 *err = -ENOSPC;
646 return NULL; 675 return NULL;
647 } 676 }
648 UDF_I_LENEXTENTS(inode) += inode->i_sb->s_blocksize; 677 iinfo->i_lenExtents += inode->i_sb->s_blocksize;
649 } 678 }
650 679
651 /* if the extent the requsted block is located in contains multiple blocks, 680 /* if the extent the requsted block is located in contains multiple
652 * split the extent into at most three extents. blocks prior to requested 681 * blocks, split the extent into at most three extents. blocks prior
653 * block, requested block, and blocks after requested block */ 682 * to requested block, requested block, and blocks after requested
683 * block */
654 udf_split_extents(inode, &c, offset, newblocknum, laarr, &endnum); 684 udf_split_extents(inode, &c, offset, newblocknum, laarr, &endnum);
655 685
656#ifdef UDF_PREALLOCATE 686#ifdef UDF_PREALLOCATE
@@ -668,15 +698,15 @@ static struct buffer_head *inode_getblk(struct inode *inode, sector_t block,
668 698
669 brelse(prev_epos.bh); 699 brelse(prev_epos.bh);
670 700
671 if (!(newblock = udf_get_pblock(inode->i_sb, newblocknum, 701 newblock = udf_get_pblock(inode->i_sb, newblocknum,
672 UDF_I_LOCATION(inode).partitionReferenceNum, 0))) { 702 iinfo->i_location.partitionReferenceNum, 0);
703 if (!newblock)
673 return NULL; 704 return NULL;
674 }
675 *phys = newblock; 705 *phys = newblock;
676 *err = 0; 706 *err = 0;
677 *new = 1; 707 *new = 1;
678 UDF_I_NEXT_ALLOC_BLOCK(inode) = block; 708 iinfo->i_next_alloc_block = block;
679 UDF_I_NEXT_ALLOC_GOAL(inode) = newblocknum; 709 iinfo->i_next_alloc_goal = newblocknum;
680 inode->i_ctime = current_fs_time(inode->i_sb); 710 inode->i_ctime = current_fs_time(inode->i_sb);
681 711
682 if (IS_SYNC(inode)) 712 if (IS_SYNC(inode))
@@ -692,16 +722,20 @@ static void udf_split_extents(struct inode *inode, int *c, int offset,
692 kernel_long_ad laarr[EXTENT_MERGE_SIZE], 722 kernel_long_ad laarr[EXTENT_MERGE_SIZE],
693 int *endnum) 723 int *endnum)
694{ 724{
725 unsigned long blocksize = inode->i_sb->s_blocksize;
726 unsigned char blocksize_bits = inode->i_sb->s_blocksize_bits;
727
695 if ((laarr[*c].extLength >> 30) == (EXT_NOT_RECORDED_ALLOCATED >> 30) || 728 if ((laarr[*c].extLength >> 30) == (EXT_NOT_RECORDED_ALLOCATED >> 30) ||
696 (laarr[*c].extLength >> 30) == (EXT_NOT_RECORDED_NOT_ALLOCATED >> 30)) { 729 (laarr[*c].extLength >> 30) ==
730 (EXT_NOT_RECORDED_NOT_ALLOCATED >> 30)) {
697 int curr = *c; 731 int curr = *c;
698 int blen = ((laarr[curr].extLength & UDF_EXTENT_LENGTH_MASK) + 732 int blen = ((laarr[curr].extLength & UDF_EXTENT_LENGTH_MASK) +
699 inode->i_sb->s_blocksize - 1) >> inode->i_sb->s_blocksize_bits; 733 blocksize - 1) >> blocksize_bits;
700 int8_t etype = (laarr[curr].extLength >> 30); 734 int8_t etype = (laarr[curr].extLength >> 30);
701 735
702 if (blen == 1) { 736 if (blen == 1)
703 ; 737 ;
704 } else if (!offset || blen == offset + 1) { 738 else if (!offset || blen == offset + 1) {
705 laarr[curr + 2] = laarr[curr + 1]; 739 laarr[curr + 2] = laarr[curr + 1];
706 laarr[curr + 1] = laarr[curr]; 740 laarr[curr + 1] = laarr[curr];
707 } else { 741 } else {
@@ -711,15 +745,18 @@ static void udf_split_extents(struct inode *inode, int *c, int offset,
711 745
712 if (offset) { 746 if (offset) {
713 if (etype == (EXT_NOT_RECORDED_ALLOCATED >> 30)) { 747 if (etype == (EXT_NOT_RECORDED_ALLOCATED >> 30)) {
714 udf_free_blocks(inode->i_sb, inode, laarr[curr].extLocation, 0, offset); 748 udf_free_blocks(inode->i_sb, inode,
715 laarr[curr].extLength = EXT_NOT_RECORDED_NOT_ALLOCATED | 749 laarr[curr].extLocation,
716 (offset << inode->i_sb->s_blocksize_bits); 750 0, offset);
751 laarr[curr].extLength =
752 EXT_NOT_RECORDED_NOT_ALLOCATED |
753 (offset << blocksize_bits);
717 laarr[curr].extLocation.logicalBlockNum = 0; 754 laarr[curr].extLocation.logicalBlockNum = 0;
718 laarr[curr].extLocation.partitionReferenceNum = 0; 755 laarr[curr].extLocation.
719 } else { 756 partitionReferenceNum = 0;
757 } else
720 laarr[curr].extLength = (etype << 30) | 758 laarr[curr].extLength = (etype << 30) |
721 (offset << inode->i_sb->s_blocksize_bits); 759 (offset << blocksize_bits);
722 }
723 curr++; 760 curr++;
724 (*c)++; 761 (*c)++;
725 (*endnum)++; 762 (*endnum)++;
@@ -728,16 +765,17 @@ static void udf_split_extents(struct inode *inode, int *c, int offset,
728 laarr[curr].extLocation.logicalBlockNum = newblocknum; 765 laarr[curr].extLocation.logicalBlockNum = newblocknum;
729 if (etype == (EXT_NOT_RECORDED_NOT_ALLOCATED >> 30)) 766 if (etype == (EXT_NOT_RECORDED_NOT_ALLOCATED >> 30))
730 laarr[curr].extLocation.partitionReferenceNum = 767 laarr[curr].extLocation.partitionReferenceNum =
731 UDF_I_LOCATION(inode).partitionReferenceNum; 768 UDF_I(inode)->i_location.partitionReferenceNum;
732 laarr[curr].extLength = EXT_RECORDED_ALLOCATED | 769 laarr[curr].extLength = EXT_RECORDED_ALLOCATED |
733 inode->i_sb->s_blocksize; 770 blocksize;
734 curr++; 771 curr++;
735 772
736 if (blen != offset + 1) { 773 if (blen != offset + 1) {
737 if (etype == (EXT_NOT_RECORDED_ALLOCATED >> 30)) 774 if (etype == (EXT_NOT_RECORDED_ALLOCATED >> 30))
738 laarr[curr].extLocation.logicalBlockNum += (offset + 1); 775 laarr[curr].extLocation.logicalBlockNum +=
776 offset + 1;
739 laarr[curr].extLength = (etype << 30) | 777 laarr[curr].extLength = (etype << 30) |
740 ((blen - (offset + 1)) << inode->i_sb->s_blocksize_bits); 778 ((blen - (offset + 1)) << blocksize_bits);
741 curr++; 779 curr++;
742 (*endnum)++; 780 (*endnum)++;
743 } 781 }
@@ -756,69 +794,86 @@ static void udf_prealloc_extents(struct inode *inode, int c, int lastblock,
756 else 794 else
757 start = c; 795 start = c;
758 } else { 796 } else {
759 if ((laarr[c + 1].extLength >> 30) == (EXT_NOT_RECORDED_ALLOCATED >> 30)) { 797 if ((laarr[c + 1].extLength >> 30) ==
798 (EXT_NOT_RECORDED_ALLOCATED >> 30)) {
760 start = c + 1; 799 start = c + 1;
761 length = currlength = (((laarr[c + 1].extLength & UDF_EXTENT_LENGTH_MASK) + 800 length = currlength =
762 inode->i_sb->s_blocksize - 1) >> inode->i_sb->s_blocksize_bits); 801 (((laarr[c + 1].extLength &
763 } else { 802 UDF_EXTENT_LENGTH_MASK) +
803 inode->i_sb->s_blocksize - 1) >>
804 inode->i_sb->s_blocksize_bits);
805 } else
764 start = c; 806 start = c;
765 }
766 } 807 }
767 808
768 for (i = start + 1; i <= *endnum; i++) { 809 for (i = start + 1; i <= *endnum; i++) {
769 if (i == *endnum) { 810 if (i == *endnum) {
770 if (lastblock) 811 if (lastblock)
771 length += UDF_DEFAULT_PREALLOC_BLOCKS; 812 length += UDF_DEFAULT_PREALLOC_BLOCKS;
772 } else if ((laarr[i].extLength >> 30) == (EXT_NOT_RECORDED_NOT_ALLOCATED >> 30)) { 813 } else if ((laarr[i].extLength >> 30) ==
773 length += (((laarr[i].extLength & UDF_EXTENT_LENGTH_MASK) + 814 (EXT_NOT_RECORDED_NOT_ALLOCATED >> 30)) {
774 inode->i_sb->s_blocksize - 1) >> inode->i_sb->s_blocksize_bits); 815 length += (((laarr[i].extLength &
775 } else { 816 UDF_EXTENT_LENGTH_MASK) +
817 inode->i_sb->s_blocksize - 1) >>
818 inode->i_sb->s_blocksize_bits);
819 } else
776 break; 820 break;
777 }
778 } 821 }
779 822
780 if (length) { 823 if (length) {
781 int next = laarr[start].extLocation.logicalBlockNum + 824 int next = laarr[start].extLocation.logicalBlockNum +
782 (((laarr[start].extLength & UDF_EXTENT_LENGTH_MASK) + 825 (((laarr[start].extLength & UDF_EXTENT_LENGTH_MASK) +
783 inode->i_sb->s_blocksize - 1) >> inode->i_sb->s_blocksize_bits); 826 inode->i_sb->s_blocksize - 1) >>
827 inode->i_sb->s_blocksize_bits);
784 int numalloc = udf_prealloc_blocks(inode->i_sb, inode, 828 int numalloc = udf_prealloc_blocks(inode->i_sb, inode,
785 laarr[start].extLocation.partitionReferenceNum, 829 laarr[start].extLocation.partitionReferenceNum,
786 next, (UDF_DEFAULT_PREALLOC_BLOCKS > length ? length : 830 next, (UDF_DEFAULT_PREALLOC_BLOCKS > length ?
787 UDF_DEFAULT_PREALLOC_BLOCKS) - currlength); 831 length : UDF_DEFAULT_PREALLOC_BLOCKS) -
832 currlength);
788 if (numalloc) { 833 if (numalloc) {
789 if (start == (c + 1)) { 834 if (start == (c + 1))
790 laarr[start].extLength += 835 laarr[start].extLength +=
791 (numalloc << inode->i_sb->s_blocksize_bits); 836 (numalloc <<
792 } else { 837 inode->i_sb->s_blocksize_bits);
838 else {
793 memmove(&laarr[c + 2], &laarr[c + 1], 839 memmove(&laarr[c + 2], &laarr[c + 1],
794 sizeof(long_ad) * (*endnum - (c + 1))); 840 sizeof(long_ad) * (*endnum - (c + 1)));
795 (*endnum)++; 841 (*endnum)++;
796 laarr[c + 1].extLocation.logicalBlockNum = next; 842 laarr[c + 1].extLocation.logicalBlockNum = next;
797 laarr[c + 1].extLocation.partitionReferenceNum = 843 laarr[c + 1].extLocation.partitionReferenceNum =
798 laarr[c].extLocation.partitionReferenceNum; 844 laarr[c].extLocation.
799 laarr[c + 1].extLength = EXT_NOT_RECORDED_ALLOCATED | 845 partitionReferenceNum;
800 (numalloc << inode->i_sb->s_blocksize_bits); 846 laarr[c + 1].extLength =
847 EXT_NOT_RECORDED_ALLOCATED |
848 (numalloc <<
849 inode->i_sb->s_blocksize_bits);
801 start = c + 1; 850 start = c + 1;
802 } 851 }
803 852
804 for (i = start + 1; numalloc && i < *endnum; i++) { 853 for (i = start + 1; numalloc && i < *endnum; i++) {
805 int elen = ((laarr[i].extLength & UDF_EXTENT_LENGTH_MASK) + 854 int elen = ((laarr[i].extLength &
806 inode->i_sb->s_blocksize - 1) >> inode->i_sb->s_blocksize_bits; 855 UDF_EXTENT_LENGTH_MASK) +
856 inode->i_sb->s_blocksize - 1) >>
857 inode->i_sb->s_blocksize_bits;
807 858
808 if (elen > numalloc) { 859 if (elen > numalloc) {
809 laarr[i].extLength -= 860 laarr[i].extLength -=
810 (numalloc << inode->i_sb->s_blocksize_bits); 861 (numalloc <<
862 inode->i_sb->s_blocksize_bits);
811 numalloc = 0; 863 numalloc = 0;
812 } else { 864 } else {
813 numalloc -= elen; 865 numalloc -= elen;
814 if (*endnum > (i + 1)) 866 if (*endnum > (i + 1))
815 memmove(&laarr[i], &laarr[i + 1], 867 memmove(&laarr[i],
816 sizeof(long_ad) * (*endnum - (i + 1))); 868 &laarr[i + 1],
869 sizeof(long_ad) *
870 (*endnum - (i + 1)));
817 i--; 871 i--;
818 (*endnum)--; 872 (*endnum)--;
819 } 873 }
820 } 874 }
821 UDF_I_LENEXTENTS(inode) += numalloc << inode->i_sb->s_blocksize_bits; 875 UDF_I(inode)->i_lenExtents +=
876 numalloc << inode->i_sb->s_blocksize_bits;
822 } 877 }
823 } 878 }
824} 879}
@@ -828,70 +883,97 @@ static void udf_merge_extents(struct inode *inode,
828 int *endnum) 883 int *endnum)
829{ 884{
830 int i; 885 int i;
886 unsigned long blocksize = inode->i_sb->s_blocksize;
887 unsigned char blocksize_bits = inode->i_sb->s_blocksize_bits;
831 888
832 for (i = 0; i < (*endnum - 1); i++) { 889 for (i = 0; i < (*endnum - 1); i++) {
833 if ((laarr[i].extLength >> 30) == (laarr[i + 1].extLength >> 30)) { 890 kernel_long_ad *li /*l[i]*/ = &laarr[i];
834 if (((laarr[i].extLength >> 30) == (EXT_NOT_RECORDED_NOT_ALLOCATED >> 30)) || 891 kernel_long_ad *lip1 /*l[i plus 1]*/ = &laarr[i + 1];
835 ((laarr[i + 1].extLocation.logicalBlockNum - laarr[i].extLocation.logicalBlockNum) == 892
836 (((laarr[i].extLength & UDF_EXTENT_LENGTH_MASK) + 893 if (((li->extLength >> 30) == (lip1->extLength >> 30)) &&
837 inode->i_sb->s_blocksize - 1) >> inode->i_sb->s_blocksize_bits))) { 894 (((li->extLength >> 30) ==
838 if (((laarr[i].extLength & UDF_EXTENT_LENGTH_MASK) + 895 (EXT_NOT_RECORDED_NOT_ALLOCATED >> 30)) ||
839 (laarr[i + 1].extLength & UDF_EXTENT_LENGTH_MASK) + 896 ((lip1->extLocation.logicalBlockNum -
840 inode->i_sb->s_blocksize - 1) & ~UDF_EXTENT_LENGTH_MASK) { 897 li->extLocation.logicalBlockNum) ==
841 laarr[i + 1].extLength = (laarr[i + 1].extLength - 898 (((li->extLength & UDF_EXTENT_LENGTH_MASK) +
842 (laarr[i].extLength & UDF_EXTENT_LENGTH_MASK) + 899 blocksize - 1) >> blocksize_bits)))) {
843 UDF_EXTENT_LENGTH_MASK) & ~(inode->i_sb->s_blocksize - 1); 900
844 laarr[i].extLength = (laarr[i].extLength & UDF_EXTENT_FLAG_MASK) + 901 if (((li->extLength & UDF_EXTENT_LENGTH_MASK) +
845 (UDF_EXTENT_LENGTH_MASK + 1) - inode->i_sb->s_blocksize; 902 (lip1->extLength & UDF_EXTENT_LENGTH_MASK) +
846 laarr[i + 1].extLocation.logicalBlockNum = 903 blocksize - 1) & ~UDF_EXTENT_LENGTH_MASK) {
847 laarr[i].extLocation.logicalBlockNum + 904 lip1->extLength = (lip1->extLength -
848 ((laarr[i].extLength & UDF_EXTENT_LENGTH_MASK) >> 905 (li->extLength &
849 inode->i_sb->s_blocksize_bits); 906 UDF_EXTENT_LENGTH_MASK) +
850 } else { 907 UDF_EXTENT_LENGTH_MASK) &
851 laarr[i].extLength = laarr[i + 1].extLength + 908 ~(blocksize - 1);
852 (((laarr[i].extLength & UDF_EXTENT_LENGTH_MASK) + 909 li->extLength = (li->extLength &
853 inode->i_sb->s_blocksize - 1) & ~(inode->i_sb->s_blocksize - 1)); 910 UDF_EXTENT_FLAG_MASK) +
854 if (*endnum > (i + 2)) 911 (UDF_EXTENT_LENGTH_MASK + 1) -
855 memmove(&laarr[i + 1], &laarr[i + 2], 912 blocksize;
856 sizeof(long_ad) * (*endnum - (i + 2))); 913 lip1->extLocation.logicalBlockNum =
857 i--; 914 li->extLocation.logicalBlockNum +
858 (*endnum)--; 915 ((li->extLength &
859 } 916 UDF_EXTENT_LENGTH_MASK) >>
917 blocksize_bits);
918 } else {
919 li->extLength = lip1->extLength +
920 (((li->extLength &
921 UDF_EXTENT_LENGTH_MASK) +
922 blocksize - 1) & ~(blocksize - 1));
923 if (*endnum > (i + 2))
924 memmove(&laarr[i + 1], &laarr[i + 2],
925 sizeof(long_ad) *
926 (*endnum - (i + 2)));
927 i--;
928 (*endnum)--;
860 } 929 }
861 } else if (((laarr[i].extLength >> 30) == (EXT_NOT_RECORDED_ALLOCATED >> 30)) && 930 } else if (((li->extLength >> 30) ==
862 ((laarr[i + 1].extLength >> 30) == (EXT_NOT_RECORDED_NOT_ALLOCATED >> 30))) { 931 (EXT_NOT_RECORDED_ALLOCATED >> 30)) &&
863 udf_free_blocks(inode->i_sb, inode, laarr[i].extLocation, 0, 932 ((lip1->extLength >> 30) ==
864 ((laarr[i].extLength & UDF_EXTENT_LENGTH_MASK) + 933 (EXT_NOT_RECORDED_NOT_ALLOCATED >> 30))) {
865 inode->i_sb->s_blocksize - 1) >> inode->i_sb->s_blocksize_bits); 934 udf_free_blocks(inode->i_sb, inode, li->extLocation, 0,
866 laarr[i].extLocation.logicalBlockNum = 0; 935 ((li->extLength &
867 laarr[i].extLocation.partitionReferenceNum = 0; 936 UDF_EXTENT_LENGTH_MASK) +
868 937 blocksize - 1) >> blocksize_bits);
869 if (((laarr[i].extLength & UDF_EXTENT_LENGTH_MASK) + 938 li->extLocation.logicalBlockNum = 0;
870 (laarr[i + 1].extLength & UDF_EXTENT_LENGTH_MASK) + 939 li->extLocation.partitionReferenceNum = 0;
871 inode->i_sb->s_blocksize - 1) & ~UDF_EXTENT_LENGTH_MASK) { 940
872 laarr[i + 1].extLength = (laarr[i + 1].extLength - 941 if (((li->extLength & UDF_EXTENT_LENGTH_MASK) +
873 (laarr[i].extLength & UDF_EXTENT_LENGTH_MASK) + 942 (lip1->extLength & UDF_EXTENT_LENGTH_MASK) +
874 UDF_EXTENT_LENGTH_MASK) & ~(inode->i_sb->s_blocksize - 1); 943 blocksize - 1) & ~UDF_EXTENT_LENGTH_MASK) {
875 laarr[i].extLength = (laarr[i].extLength & UDF_EXTENT_FLAG_MASK) + 944 lip1->extLength = (lip1->extLength -
876 (UDF_EXTENT_LENGTH_MASK + 1) - inode->i_sb->s_blocksize; 945 (li->extLength &
946 UDF_EXTENT_LENGTH_MASK) +
947 UDF_EXTENT_LENGTH_MASK) &
948 ~(blocksize - 1);
949 li->extLength = (li->extLength &
950 UDF_EXTENT_FLAG_MASK) +
951 (UDF_EXTENT_LENGTH_MASK + 1) -
952 blocksize;
877 } else { 953 } else {
878 laarr[i].extLength = laarr[i + 1].extLength + 954 li->extLength = lip1->extLength +
879 (((laarr[i].extLength & UDF_EXTENT_LENGTH_MASK) + 955 (((li->extLength &
880 inode->i_sb->s_blocksize - 1) & ~(inode->i_sb->s_blocksize - 1)); 956 UDF_EXTENT_LENGTH_MASK) +
957 blocksize - 1) & ~(blocksize - 1));
881 if (*endnum > (i + 2)) 958 if (*endnum > (i + 2))
882 memmove(&laarr[i + 1], &laarr[i + 2], 959 memmove(&laarr[i + 1], &laarr[i + 2],
883 sizeof(long_ad) * (*endnum - (i + 2))); 960 sizeof(long_ad) *
961 (*endnum - (i + 2)));
884 i--; 962 i--;
885 (*endnum)--; 963 (*endnum)--;
886 } 964 }
887 } else if ((laarr[i].extLength >> 30) == (EXT_NOT_RECORDED_ALLOCATED >> 30)) { 965 } else if ((li->extLength >> 30) ==
888 udf_free_blocks(inode->i_sb, inode, laarr[i].extLocation, 0, 966 (EXT_NOT_RECORDED_ALLOCATED >> 30)) {
889 ((laarr[i].extLength & UDF_EXTENT_LENGTH_MASK) + 967 udf_free_blocks(inode->i_sb, inode,
890 inode->i_sb->s_blocksize - 1) >> inode->i_sb->s_blocksize_bits); 968 li->extLocation, 0,
891 laarr[i].extLocation.logicalBlockNum = 0; 969 ((li->extLength &
892 laarr[i].extLocation.partitionReferenceNum = 0; 970 UDF_EXTENT_LENGTH_MASK) +
893 laarr[i].extLength = (laarr[i].extLength & UDF_EXTENT_LENGTH_MASK) | 971 blocksize - 1) >> blocksize_bits);
894 EXT_NOT_RECORDED_NOT_ALLOCATED; 972 li->extLocation.logicalBlockNum = 0;
973 li->extLocation.partitionReferenceNum = 0;
974 li->extLength = (li->extLength &
975 UDF_EXTENT_LENGTH_MASK) |
976 EXT_NOT_RECORDED_NOT_ALLOCATED;
895 } 977 }
896 } 978 }
897} 979}
@@ -953,6 +1035,7 @@ void udf_truncate(struct inode *inode)
953{ 1035{
954 int offset; 1036 int offset;
955 int err; 1037 int err;
1038 struct udf_inode_info *iinfo;
956 1039
957 if (!(S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) || 1040 if (!(S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) ||
958 S_ISLNK(inode->i_mode))) 1041 S_ISLNK(inode->i_mode)))
@@ -961,25 +1044,28 @@ void udf_truncate(struct inode *inode)
961 return; 1044 return;
962 1045
963 lock_kernel(); 1046 lock_kernel();
964 if (UDF_I_ALLOCTYPE(inode) == ICBTAG_FLAG_AD_IN_ICB) { 1047 iinfo = UDF_I(inode);
965 if (inode->i_sb->s_blocksize < (udf_file_entry_alloc_offset(inode) + 1048 if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_IN_ICB) {
966 inode->i_size)) { 1049 if (inode->i_sb->s_blocksize <
1050 (udf_file_entry_alloc_offset(inode) +
1051 inode->i_size)) {
967 udf_expand_file_adinicb(inode, inode->i_size, &err); 1052 udf_expand_file_adinicb(inode, inode->i_size, &err);
968 if (UDF_I_ALLOCTYPE(inode) == ICBTAG_FLAG_AD_IN_ICB) { 1053 if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_IN_ICB) {
969 inode->i_size = UDF_I_LENALLOC(inode); 1054 inode->i_size = iinfo->i_lenAlloc;
970 unlock_kernel(); 1055 unlock_kernel();
971 return; 1056 return;
972 } else { 1057 } else
973 udf_truncate_extents(inode); 1058 udf_truncate_extents(inode);
974 }
975 } else { 1059 } else {
976 offset = inode->i_size & (inode->i_sb->s_blocksize - 1); 1060 offset = inode->i_size & (inode->i_sb->s_blocksize - 1);
977 memset(UDF_I_DATA(inode) + UDF_I_LENEATTR(inode) + offset, 0x00, 1061 memset(iinfo->i_ext.i_data + iinfo->i_lenEAttr + offset,
978 inode->i_sb->s_blocksize - offset - udf_file_entry_alloc_offset(inode)); 1062 0x00, inode->i_sb->s_blocksize -
979 UDF_I_LENALLOC(inode) = inode->i_size; 1063 offset - udf_file_entry_alloc_offset(inode));
1064 iinfo->i_lenAlloc = inode->i_size;
980 } 1065 }
981 } else { 1066 } else {
982 block_truncate_page(inode->i_mapping, inode->i_size, udf_get_block); 1067 block_truncate_page(inode->i_mapping, inode->i_size,
1068 udf_get_block);
983 udf_truncate_extents(inode); 1069 udf_truncate_extents(inode);
984 } 1070 }
985 1071
@@ -996,6 +1082,7 @@ static void __udf_read_inode(struct inode *inode)
996 struct buffer_head *bh = NULL; 1082 struct buffer_head *bh = NULL;
997 struct fileEntry *fe; 1083 struct fileEntry *fe;
998 uint16_t ident; 1084 uint16_t ident;
1085 struct udf_inode_info *iinfo = UDF_I(inode);
999 1086
1000 /* 1087 /*
1001 * Set defaults, but the inode is still incomplete! 1088 * Set defaults, but the inode is still incomplete!
@@ -1009,7 +1096,7 @@ static void __udf_read_inode(struct inode *inode)
1009 * i_nlink = 1 1096 * i_nlink = 1
1010 * i_op = NULL; 1097 * i_op = NULL;
1011 */ 1098 */
1012 bh = udf_read_ptagged(inode->i_sb, UDF_I_LOCATION(inode), 0, &ident); 1099 bh = udf_read_ptagged(inode->i_sb, iinfo->i_location, 0, &ident);
1013 if (!bh) { 1100 if (!bh) {
1014 printk(KERN_ERR "udf: udf_read_inode(ino %ld) failed !bh\n", 1101 printk(KERN_ERR "udf: udf_read_inode(ino %ld) failed !bh\n",
1015 inode->i_ino); 1102 inode->i_ino);
@@ -1019,8 +1106,8 @@ static void __udf_read_inode(struct inode *inode)
1019 1106
1020 if (ident != TAG_IDENT_FE && ident != TAG_IDENT_EFE && 1107 if (ident != TAG_IDENT_FE && ident != TAG_IDENT_EFE &&
1021 ident != TAG_IDENT_USE) { 1108 ident != TAG_IDENT_USE) {
1022 printk(KERN_ERR "udf: udf_read_inode(ino %ld) failed ident=%d\n", 1109 printk(KERN_ERR "udf: udf_read_inode(ino %ld) "
1023 inode->i_ino, ident); 1110 "failed ident=%d\n", inode->i_ino, ident);
1024 brelse(bh); 1111 brelse(bh);
1025 make_bad_inode(inode); 1112 make_bad_inode(inode);
1026 return; 1113 return;
@@ -1028,11 +1115,12 @@ static void __udf_read_inode(struct inode *inode)
1028 1115
1029 fe = (struct fileEntry *)bh->b_data; 1116 fe = (struct fileEntry *)bh->b_data;
1030 1117
1031 if (le16_to_cpu(fe->icbTag.strategyType) == 4096) { 1118 if (fe->icbTag.strategyType == cpu_to_le16(4096)) {
1032 struct buffer_head *ibh = NULL, *nbh = NULL; 1119 struct buffer_head *ibh = NULL, *nbh = NULL;
1033 struct indirectEntry *ie; 1120 struct indirectEntry *ie;
1034 1121
1035 ibh = udf_read_ptagged(inode->i_sb, UDF_I_LOCATION(inode), 1, &ident); 1122 ibh = udf_read_ptagged(inode->i_sb, iinfo->i_location, 1,
1123 &ident);
1036 if (ident == TAG_IDENT_IE) { 1124 if (ident == TAG_IDENT_IE) {
1037 if (ibh) { 1125 if (ibh) {
1038 kernel_lb_addr loc; 1126 kernel_lb_addr loc;
@@ -1041,10 +1129,12 @@ static void __udf_read_inode(struct inode *inode)
1041 loc = lelb_to_cpu(ie->indirectICB.extLocation); 1129 loc = lelb_to_cpu(ie->indirectICB.extLocation);
1042 1130
1043 if (ie->indirectICB.extLength && 1131 if (ie->indirectICB.extLength &&
1044 (nbh = udf_read_ptagged(inode->i_sb, loc, 0, &ident))) { 1132 (nbh = udf_read_ptagged(inode->i_sb, loc, 0,
1133 &ident))) {
1045 if (ident == TAG_IDENT_FE || 1134 if (ident == TAG_IDENT_FE ||
1046 ident == TAG_IDENT_EFE) { 1135 ident == TAG_IDENT_EFE) {
1047 memcpy(&UDF_I_LOCATION(inode), &loc, 1136 memcpy(&iinfo->i_location,
1137 &loc,
1048 sizeof(kernel_lb_addr)); 1138 sizeof(kernel_lb_addr));
1049 brelse(bh); 1139 brelse(bh);
1050 brelse(ibh); 1140 brelse(ibh);
@@ -1062,7 +1152,7 @@ static void __udf_read_inode(struct inode *inode)
1062 } else { 1152 } else {
1063 brelse(ibh); 1153 brelse(ibh);
1064 } 1154 }
1065 } else if (le16_to_cpu(fe->icbTag.strategyType) != 4) { 1155 } else if (fe->icbTag.strategyType != cpu_to_le16(4)) {
1066 printk(KERN_ERR "udf: unsupported strategy type: %d\n", 1156 printk(KERN_ERR "udf: unsupported strategy type: %d\n",
1067 le16_to_cpu(fe->icbTag.strategyType)); 1157 le16_to_cpu(fe->icbTag.strategyType));
1068 brelse(bh); 1158 brelse(bh);
@@ -1081,51 +1171,63 @@ static void udf_fill_inode(struct inode *inode, struct buffer_head *bh)
1081 time_t convtime; 1171 time_t convtime;
1082 long convtime_usec; 1172 long convtime_usec;
1083 int offset; 1173 int offset;
1174 struct udf_sb_info *sbi = UDF_SB(inode->i_sb);
1175 struct udf_inode_info *iinfo = UDF_I(inode);
1084 1176
1085 fe = (struct fileEntry *)bh->b_data; 1177 fe = (struct fileEntry *)bh->b_data;
1086 efe = (struct extendedFileEntry *)bh->b_data; 1178 efe = (struct extendedFileEntry *)bh->b_data;
1087 1179
1088 if (le16_to_cpu(fe->icbTag.strategyType) == 4) 1180 if (fe->icbTag.strategyType == cpu_to_le16(4))
1089 UDF_I_STRAT4096(inode) = 0; 1181 iinfo->i_strat4096 = 0;
1090 else /* if (le16_to_cpu(fe->icbTag.strategyType) == 4096) */ 1182 else /* if (fe->icbTag.strategyType == cpu_to_le16(4096)) */
1091 UDF_I_STRAT4096(inode) = 1; 1183 iinfo->i_strat4096 = 1;
1092 1184
1093 UDF_I_ALLOCTYPE(inode) = le16_to_cpu(fe->icbTag.flags) & ICBTAG_FLAG_AD_MASK; 1185 iinfo->i_alloc_type = le16_to_cpu(fe->icbTag.flags) &
1094 UDF_I_UNIQUE(inode) = 0; 1186 ICBTAG_FLAG_AD_MASK;
1095 UDF_I_LENEATTR(inode) = 0; 1187 iinfo->i_unique = 0;
1096 UDF_I_LENEXTENTS(inode) = 0; 1188 iinfo->i_lenEAttr = 0;
1097 UDF_I_LENALLOC(inode) = 0; 1189 iinfo->i_lenExtents = 0;
1098 UDF_I_NEXT_ALLOC_BLOCK(inode) = 0; 1190 iinfo->i_lenAlloc = 0;
1099 UDF_I_NEXT_ALLOC_GOAL(inode) = 0; 1191 iinfo->i_next_alloc_block = 0;
1100 if (le16_to_cpu(fe->descTag.tagIdent) == TAG_IDENT_EFE) { 1192 iinfo->i_next_alloc_goal = 0;
1101 UDF_I_EFE(inode) = 1; 1193 if (fe->descTag.tagIdent == cpu_to_le16(TAG_IDENT_EFE)) {
1102 UDF_I_USE(inode) = 0; 1194 iinfo->i_efe = 1;
1103 if (udf_alloc_i_data(inode, inode->i_sb->s_blocksize - sizeof(struct extendedFileEntry))) { 1195 iinfo->i_use = 0;
1196 if (udf_alloc_i_data(inode, inode->i_sb->s_blocksize -
1197 sizeof(struct extendedFileEntry))) {
1104 make_bad_inode(inode); 1198 make_bad_inode(inode);
1105 return; 1199 return;
1106 } 1200 }
1107 memcpy(UDF_I_DATA(inode), bh->b_data + sizeof(struct extendedFileEntry), 1201 memcpy(iinfo->i_ext.i_data,
1108 inode->i_sb->s_blocksize - sizeof(struct extendedFileEntry)); 1202 bh->b_data + sizeof(struct extendedFileEntry),
1109 } else if (le16_to_cpu(fe->descTag.tagIdent) == TAG_IDENT_FE) { 1203 inode->i_sb->s_blocksize -
1110 UDF_I_EFE(inode) = 0; 1204 sizeof(struct extendedFileEntry));
1111 UDF_I_USE(inode) = 0; 1205 } else if (fe->descTag.tagIdent == cpu_to_le16(TAG_IDENT_FE)) {
1112 if (udf_alloc_i_data(inode, inode->i_sb->s_blocksize - sizeof(struct fileEntry))) { 1206 iinfo->i_efe = 0;
1207 iinfo->i_use = 0;
1208 if (udf_alloc_i_data(inode, inode->i_sb->s_blocksize -
1209 sizeof(struct fileEntry))) {
1113 make_bad_inode(inode); 1210 make_bad_inode(inode);
1114 return; 1211 return;
1115 } 1212 }
1116 memcpy(UDF_I_DATA(inode), bh->b_data + sizeof(struct fileEntry), 1213 memcpy(iinfo->i_ext.i_data,
1214 bh->b_data + sizeof(struct fileEntry),
1117 inode->i_sb->s_blocksize - sizeof(struct fileEntry)); 1215 inode->i_sb->s_blocksize - sizeof(struct fileEntry));
1118 } else if (le16_to_cpu(fe->descTag.tagIdent) == TAG_IDENT_USE) { 1216 } else if (fe->descTag.tagIdent == cpu_to_le16(TAG_IDENT_USE)) {
1119 UDF_I_EFE(inode) = 0; 1217 iinfo->i_efe = 0;
1120 UDF_I_USE(inode) = 1; 1218 iinfo->i_use = 1;
1121 UDF_I_LENALLOC(inode) = 1219 iinfo->i_lenAlloc = le32_to_cpu(
1122 le32_to_cpu(((struct unallocSpaceEntry *)bh->b_data)->lengthAllocDescs); 1220 ((struct unallocSpaceEntry *)bh->b_data)->
1123 if (udf_alloc_i_data(inode, inode->i_sb->s_blocksize - sizeof(struct unallocSpaceEntry))) { 1221 lengthAllocDescs);
1222 if (udf_alloc_i_data(inode, inode->i_sb->s_blocksize -
1223 sizeof(struct unallocSpaceEntry))) {
1124 make_bad_inode(inode); 1224 make_bad_inode(inode);
1125 return; 1225 return;
1126 } 1226 }
1127 memcpy(UDF_I_DATA(inode), bh->b_data + sizeof(struct unallocSpaceEntry), 1227 memcpy(iinfo->i_ext.i_data,
1128 inode->i_sb->s_blocksize - sizeof(struct unallocSpaceEntry)); 1228 bh->b_data + sizeof(struct unallocSpaceEntry),
1229 inode->i_sb->s_blocksize -
1230 sizeof(struct unallocSpaceEntry));
1129 return; 1231 return;
1130 } 1232 }
1131 1233
@@ -1146,12 +1248,12 @@ static void udf_fill_inode(struct inode *inode, struct buffer_head *bh)
1146 inode->i_nlink = 1; 1248 inode->i_nlink = 1;
1147 1249
1148 inode->i_size = le64_to_cpu(fe->informationLength); 1250 inode->i_size = le64_to_cpu(fe->informationLength);
1149 UDF_I_LENEXTENTS(inode) = inode->i_size; 1251 iinfo->i_lenExtents = inode->i_size;
1150 1252
1151 inode->i_mode = udf_convert_permissions(fe); 1253 inode->i_mode = udf_convert_permissions(fe);
1152 inode->i_mode &= ~UDF_SB(inode->i_sb)->s_umask; 1254 inode->i_mode &= ~UDF_SB(inode->i_sb)->s_umask;
1153 1255
1154 if (UDF_I_EFE(inode) == 0) { 1256 if (iinfo->i_efe == 0) {
1155 inode->i_blocks = le64_to_cpu(fe->logicalBlocksRecorded) << 1257 inode->i_blocks = le64_to_cpu(fe->logicalBlocksRecorded) <<
1156 (inode->i_sb->s_blocksize_bits - 9); 1258 (inode->i_sb->s_blocksize_bits - 9);
1157 1259
@@ -1160,7 +1262,7 @@ static void udf_fill_inode(struct inode *inode, struct buffer_head *bh)
1160 inode->i_atime.tv_sec = convtime; 1262 inode->i_atime.tv_sec = convtime;
1161 inode->i_atime.tv_nsec = convtime_usec * 1000; 1263 inode->i_atime.tv_nsec = convtime_usec * 1000;
1162 } else { 1264 } else {
1163 inode->i_atime = UDF_SB_RECORDTIME(inode->i_sb); 1265 inode->i_atime = sbi->s_record_time;
1164 } 1266 }
1165 1267
1166 if (udf_stamp_to_time(&convtime, &convtime_usec, 1268 if (udf_stamp_to_time(&convtime, &convtime_usec,
@@ -1168,7 +1270,7 @@ static void udf_fill_inode(struct inode *inode, struct buffer_head *bh)
1168 inode->i_mtime.tv_sec = convtime; 1270 inode->i_mtime.tv_sec = convtime;
1169 inode->i_mtime.tv_nsec = convtime_usec * 1000; 1271 inode->i_mtime.tv_nsec = convtime_usec * 1000;
1170 } else { 1272 } else {
1171 inode->i_mtime = UDF_SB_RECORDTIME(inode->i_sb); 1273 inode->i_mtime = sbi->s_record_time;
1172 } 1274 }
1173 1275
1174 if (udf_stamp_to_time(&convtime, &convtime_usec, 1276 if (udf_stamp_to_time(&convtime, &convtime_usec,
@@ -1176,13 +1278,13 @@ static void udf_fill_inode(struct inode *inode, struct buffer_head *bh)
1176 inode->i_ctime.tv_sec = convtime; 1278 inode->i_ctime.tv_sec = convtime;
1177 inode->i_ctime.tv_nsec = convtime_usec * 1000; 1279 inode->i_ctime.tv_nsec = convtime_usec * 1000;
1178 } else { 1280 } else {
1179 inode->i_ctime = UDF_SB_RECORDTIME(inode->i_sb); 1281 inode->i_ctime = sbi->s_record_time;
1180 } 1282 }
1181 1283
1182 UDF_I_UNIQUE(inode) = le64_to_cpu(fe->uniqueID); 1284 iinfo->i_unique = le64_to_cpu(fe->uniqueID);
1183 UDF_I_LENEATTR(inode) = le32_to_cpu(fe->lengthExtendedAttr); 1285 iinfo->i_lenEAttr = le32_to_cpu(fe->lengthExtendedAttr);
1184 UDF_I_LENALLOC(inode) = le32_to_cpu(fe->lengthAllocDescs); 1286 iinfo->i_lenAlloc = le32_to_cpu(fe->lengthAllocDescs);
1185 offset = sizeof(struct fileEntry) + UDF_I_LENEATTR(inode); 1287 offset = sizeof(struct fileEntry) + iinfo->i_lenEAttr;
1186 } else { 1288 } else {
1187 inode->i_blocks = le64_to_cpu(efe->logicalBlocksRecorded) << 1289 inode->i_blocks = le64_to_cpu(efe->logicalBlocksRecorded) <<
1188 (inode->i_sb->s_blocksize_bits - 9); 1290 (inode->i_sb->s_blocksize_bits - 9);
@@ -1192,7 +1294,7 @@ static void udf_fill_inode(struct inode *inode, struct buffer_head *bh)
1192 inode->i_atime.tv_sec = convtime; 1294 inode->i_atime.tv_sec = convtime;
1193 inode->i_atime.tv_nsec = convtime_usec * 1000; 1295 inode->i_atime.tv_nsec = convtime_usec * 1000;
1194 } else { 1296 } else {
1195 inode->i_atime = UDF_SB_RECORDTIME(inode->i_sb); 1297 inode->i_atime = sbi->s_record_time;
1196 } 1298 }
1197 1299
1198 if (udf_stamp_to_time(&convtime, &convtime_usec, 1300 if (udf_stamp_to_time(&convtime, &convtime_usec,
@@ -1200,15 +1302,15 @@ static void udf_fill_inode(struct inode *inode, struct buffer_head *bh)
1200 inode->i_mtime.tv_sec = convtime; 1302 inode->i_mtime.tv_sec = convtime;
1201 inode->i_mtime.tv_nsec = convtime_usec * 1000; 1303 inode->i_mtime.tv_nsec = convtime_usec * 1000;
1202 } else { 1304 } else {
1203 inode->i_mtime = UDF_SB_RECORDTIME(inode->i_sb); 1305 inode->i_mtime = sbi->s_record_time;
1204 } 1306 }
1205 1307
1206 if (udf_stamp_to_time(&convtime, &convtime_usec, 1308 if (udf_stamp_to_time(&convtime, &convtime_usec,
1207 lets_to_cpu(efe->createTime))) { 1309 lets_to_cpu(efe->createTime))) {
1208 UDF_I_CRTIME(inode).tv_sec = convtime; 1310 iinfo->i_crtime.tv_sec = convtime;
1209 UDF_I_CRTIME(inode).tv_nsec = convtime_usec * 1000; 1311 iinfo->i_crtime.tv_nsec = convtime_usec * 1000;
1210 } else { 1312 } else {
1211 UDF_I_CRTIME(inode) = UDF_SB_RECORDTIME(inode->i_sb); 1313 iinfo->i_crtime = sbi->s_record_time;
1212 } 1314 }
1213 1315
1214 if (udf_stamp_to_time(&convtime, &convtime_usec, 1316 if (udf_stamp_to_time(&convtime, &convtime_usec,
@@ -1216,13 +1318,14 @@ static void udf_fill_inode(struct inode *inode, struct buffer_head *bh)
1216 inode->i_ctime.tv_sec = convtime; 1318 inode->i_ctime.tv_sec = convtime;
1217 inode->i_ctime.tv_nsec = convtime_usec * 1000; 1319 inode->i_ctime.tv_nsec = convtime_usec * 1000;
1218 } else { 1320 } else {
1219 inode->i_ctime = UDF_SB_RECORDTIME(inode->i_sb); 1321 inode->i_ctime = sbi->s_record_time;
1220 } 1322 }
1221 1323
1222 UDF_I_UNIQUE(inode) = le64_to_cpu(efe->uniqueID); 1324 iinfo->i_unique = le64_to_cpu(efe->uniqueID);
1223 UDF_I_LENEATTR(inode) = le32_to_cpu(efe->lengthExtendedAttr); 1325 iinfo->i_lenEAttr = le32_to_cpu(efe->lengthExtendedAttr);
1224 UDF_I_LENALLOC(inode) = le32_to_cpu(efe->lengthAllocDescs); 1326 iinfo->i_lenAlloc = le32_to_cpu(efe->lengthAllocDescs);
1225 offset = sizeof(struct extendedFileEntry) + UDF_I_LENEATTR(inode); 1327 offset = sizeof(struct extendedFileEntry) +
1328 iinfo->i_lenEAttr;
1226 } 1329 }
1227 1330
1228 switch (fe->icbTag.fileType) { 1331 switch (fe->icbTag.fileType) {
@@ -1235,7 +1338,7 @@ static void udf_fill_inode(struct inode *inode, struct buffer_head *bh)
1235 case ICBTAG_FILE_TYPE_REALTIME: 1338 case ICBTAG_FILE_TYPE_REALTIME:
1236 case ICBTAG_FILE_TYPE_REGULAR: 1339 case ICBTAG_FILE_TYPE_REGULAR:
1237 case ICBTAG_FILE_TYPE_UNDEF: 1340 case ICBTAG_FILE_TYPE_UNDEF:
1238 if (UDF_I_ALLOCTYPE(inode) == ICBTAG_FLAG_AD_IN_ICB) 1341 if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_IN_ICB)
1239 inode->i_data.a_ops = &udf_adinicb_aops; 1342 inode->i_data.a_ops = &udf_adinicb_aops;
1240 else 1343 else
1241 inode->i_data.a_ops = &udf_aops; 1344 inode->i_data.a_ops = &udf_aops;
@@ -1261,31 +1364,33 @@ static void udf_fill_inode(struct inode *inode, struct buffer_head *bh)
1261 inode->i_mode = S_IFLNK | S_IRWXUGO; 1364 inode->i_mode = S_IFLNK | S_IRWXUGO;
1262 break; 1365 break;
1263 default: 1366 default:
1264 printk(KERN_ERR "udf: udf_fill_inode(ino %ld) failed unknown file type=%d\n", 1367 printk(KERN_ERR "udf: udf_fill_inode(ino %ld) failed unknown "
1265 inode->i_ino, fe->icbTag.fileType); 1368 "file type=%d\n", inode->i_ino,
1369 fe->icbTag.fileType);
1266 make_bad_inode(inode); 1370 make_bad_inode(inode);
1267 return; 1371 return;
1268 } 1372 }
1269 if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode)) { 1373 if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode)) {
1270 struct deviceSpec *dsea = (struct deviceSpec *)udf_get_extendedattr(inode, 12, 1); 1374 struct deviceSpec *dsea =
1375 (struct deviceSpec *)udf_get_extendedattr(inode, 12, 1);
1271 if (dsea) { 1376 if (dsea) {
1272 init_special_inode(inode, inode->i_mode, 1377 init_special_inode(inode, inode->i_mode,
1273 MKDEV(le32_to_cpu(dsea->majorDeviceIdent), 1378 MKDEV(le32_to_cpu(dsea->majorDeviceIdent),
1274 le32_to_cpu(dsea->minorDeviceIdent))); 1379 le32_to_cpu(dsea->minorDeviceIdent)));
1275 /* Developer ID ??? */ 1380 /* Developer ID ??? */
1276 } else { 1381 } else
1277 make_bad_inode(inode); 1382 make_bad_inode(inode);
1278 }
1279 } 1383 }
1280} 1384}
1281 1385
1282static int udf_alloc_i_data(struct inode *inode, size_t size) 1386static int udf_alloc_i_data(struct inode *inode, size_t size)
1283{ 1387{
1284 UDF_I_DATA(inode) = kmalloc(size, GFP_KERNEL); 1388 struct udf_inode_info *iinfo = UDF_I(inode);
1389 iinfo->i_ext.i_data = kmalloc(size, GFP_KERNEL);
1285 1390
1286 if (!UDF_I_DATA(inode)) { 1391 if (!iinfo->i_ext.i_data) {
1287 printk(KERN_ERR "udf:udf_alloc_i_data (ino %ld) no free memory\n", 1392 printk(KERN_ERR "udf:udf_alloc_i_data (ino %ld) "
1288 inode->i_ino); 1393 "no free memory\n", inode->i_ino);
1289 return -ENOMEM; 1394 return -ENOMEM;
1290 } 1395 }
1291 1396
@@ -1301,12 +1406,12 @@ static mode_t udf_convert_permissions(struct fileEntry *fe)
1301 permissions = le32_to_cpu(fe->permissions); 1406 permissions = le32_to_cpu(fe->permissions);
1302 flags = le16_to_cpu(fe->icbTag.flags); 1407 flags = le16_to_cpu(fe->icbTag.flags);
1303 1408
1304 mode = (( permissions ) & S_IRWXO) | 1409 mode = ((permissions) & S_IRWXO) |
1305 (( permissions >> 2 ) & S_IRWXG) | 1410 ((permissions >> 2) & S_IRWXG) |
1306 (( permissions >> 4 ) & S_IRWXU) | 1411 ((permissions >> 4) & S_IRWXU) |
1307 (( flags & ICBTAG_FLAG_SETUID) ? S_ISUID : 0) | 1412 ((flags & ICBTAG_FLAG_SETUID) ? S_ISUID : 0) |
1308 (( flags & ICBTAG_FLAG_SETGID) ? S_ISGID : 0) | 1413 ((flags & ICBTAG_FLAG_SETGID) ? S_ISGID : 0) |
1309 (( flags & ICBTAG_FLAG_STICKY) ? S_ISVTX : 0); 1414 ((flags & ICBTAG_FLAG_STICKY) ? S_ISVTX : 0);
1310 1415
1311 return mode; 1416 return mode;
1312} 1417}
@@ -1350,11 +1455,15 @@ static int udf_update_inode(struct inode *inode, int do_sync)
1350 uint32_t udfperms; 1455 uint32_t udfperms;
1351 uint16_t icbflags; 1456 uint16_t icbflags;
1352 uint16_t crclen; 1457 uint16_t crclen;
1353 int i;
1354 kernel_timestamp cpu_time; 1458 kernel_timestamp cpu_time;
1355 int err = 0; 1459 int err = 0;
1460 struct udf_sb_info *sbi = UDF_SB(inode->i_sb);
1461 unsigned char blocksize_bits = inode->i_sb->s_blocksize_bits;
1462 struct udf_inode_info *iinfo = UDF_I(inode);
1356 1463
1357 bh = udf_tread(inode->i_sb, udf_get_lb_pblock(inode->i_sb, UDF_I_LOCATION(inode), 0)); 1464 bh = udf_tread(inode->i_sb,
1465 udf_get_lb_pblock(inode->i_sb,
1466 iinfo->i_location, 0));
1358 if (!bh) { 1467 if (!bh) {
1359 udf_debug("bread failure\n"); 1468 udf_debug("bread failure\n");
1360 return -EIO; 1469 return -EIO;
@@ -1365,23 +1474,24 @@ static int udf_update_inode(struct inode *inode, int do_sync)
1365 fe = (struct fileEntry *)bh->b_data; 1474 fe = (struct fileEntry *)bh->b_data;
1366 efe = (struct extendedFileEntry *)bh->b_data; 1475 efe = (struct extendedFileEntry *)bh->b_data;
1367 1476
1368 if (le16_to_cpu(fe->descTag.tagIdent) == TAG_IDENT_USE) { 1477 if (fe->descTag.tagIdent == cpu_to_le16(TAG_IDENT_USE)) {
1369 struct unallocSpaceEntry *use = 1478 struct unallocSpaceEntry *use =
1370 (struct unallocSpaceEntry *)bh->b_data; 1479 (struct unallocSpaceEntry *)bh->b_data;
1371 1480
1372 use->lengthAllocDescs = cpu_to_le32(UDF_I_LENALLOC(inode)); 1481 use->lengthAllocDescs = cpu_to_le32(iinfo->i_lenAlloc);
1373 memcpy(bh->b_data + sizeof(struct unallocSpaceEntry), UDF_I_DATA(inode), 1482 memcpy(bh->b_data + sizeof(struct unallocSpaceEntry),
1374 inode->i_sb->s_blocksize - sizeof(struct unallocSpaceEntry)); 1483 iinfo->i_ext.i_data, inode->i_sb->s_blocksize -
1375 crclen = sizeof(struct unallocSpaceEntry) + UDF_I_LENALLOC(inode) - sizeof(tag); 1484 sizeof(struct unallocSpaceEntry));
1376 use->descTag.tagLocation = cpu_to_le32(UDF_I_LOCATION(inode).logicalBlockNum); 1485 crclen = sizeof(struct unallocSpaceEntry) +
1486 iinfo->i_lenAlloc - sizeof(tag);
1487 use->descTag.tagLocation = cpu_to_le32(
1488 iinfo->i_location.
1489 logicalBlockNum);
1377 use->descTag.descCRCLength = cpu_to_le16(crclen); 1490 use->descTag.descCRCLength = cpu_to_le16(crclen);
1378 use->descTag.descCRC = cpu_to_le16(udf_crc((char *)use + sizeof(tag), crclen, 0)); 1491 use->descTag.descCRC = cpu_to_le16(udf_crc((char *)use +
1379 1492 sizeof(tag), crclen,
1380 use->descTag.tagChecksum = 0; 1493 0));
1381 for (i = 0; i < 16; i++) { 1494 use->descTag.tagChecksum = udf_tag_checksum(&use->descTag);
1382 if (i != 4)
1383 use->descTag.tagChecksum += ((uint8_t *)&(use->descTag))[i];
1384 }
1385 1495
1386 mark_buffer_dirty(bh); 1496 mark_buffer_dirty(bh);
1387 brelse(bh); 1497 brelse(bh);
@@ -1398,14 +1508,14 @@ static int udf_update_inode(struct inode *inode, int do_sync)
1398 else 1508 else
1399 fe->gid = cpu_to_le32(inode->i_gid); 1509 fe->gid = cpu_to_le32(inode->i_gid);
1400 1510
1401 udfperms = ((inode->i_mode & S_IRWXO) ) | 1511 udfperms = ((inode->i_mode & S_IRWXO)) |
1402 ((inode->i_mode & S_IRWXG) << 2) | 1512 ((inode->i_mode & S_IRWXG) << 2) |
1403 ((inode->i_mode & S_IRWXU) << 4); 1513 ((inode->i_mode & S_IRWXU) << 4);
1404 1514
1405 udfperms |= (le32_to_cpu(fe->permissions) & 1515 udfperms |= (le32_to_cpu(fe->permissions) &
1406 (FE_PERM_O_DELETE | FE_PERM_O_CHATTR | 1516 (FE_PERM_O_DELETE | FE_PERM_O_CHATTR |
1407 FE_PERM_G_DELETE | FE_PERM_G_CHATTR | 1517 FE_PERM_G_DELETE | FE_PERM_G_CHATTR |
1408 FE_PERM_U_DELETE | FE_PERM_U_CHATTR)); 1518 FE_PERM_U_DELETE | FE_PERM_U_CHATTR));
1409 fe->permissions = cpu_to_le32(udfperms); 1519 fe->permissions = cpu_to_le32(udfperms);
1410 1520
1411 if (S_ISDIR(inode->i_mode)) 1521 if (S_ISDIR(inode->i_mode))
@@ -1426,8 +1536,9 @@ static int udf_update_inode(struct inode *inode, int do_sync)
1426 sizeof(regid), 12, 0x3); 1536 sizeof(regid), 12, 0x3);
1427 dsea->attrType = cpu_to_le32(12); 1537 dsea->attrType = cpu_to_le32(12);
1428 dsea->attrSubtype = 1; 1538 dsea->attrSubtype = 1;
1429 dsea->attrLength = cpu_to_le32(sizeof(struct deviceSpec) + 1539 dsea->attrLength = cpu_to_le32(
1430 sizeof(regid)); 1540 sizeof(struct deviceSpec) +
1541 sizeof(regid));
1431 dsea->impUseLength = cpu_to_le32(sizeof(regid)); 1542 dsea->impUseLength = cpu_to_le32(sizeof(regid));
1432 } 1543 }
1433 eid = (regid *)dsea->impUse; 1544 eid = (regid *)dsea->impUse;
@@ -1439,12 +1550,13 @@ static int udf_update_inode(struct inode *inode, int do_sync)
1439 dsea->minorDeviceIdent = cpu_to_le32(iminor(inode)); 1550 dsea->minorDeviceIdent = cpu_to_le32(iminor(inode));
1440 } 1551 }
1441 1552
1442 if (UDF_I_EFE(inode) == 0) { 1553 if (iinfo->i_efe == 0) {
1443 memcpy(bh->b_data + sizeof(struct fileEntry), UDF_I_DATA(inode), 1554 memcpy(bh->b_data + sizeof(struct fileEntry),
1555 iinfo->i_ext.i_data,
1444 inode->i_sb->s_blocksize - sizeof(struct fileEntry)); 1556 inode->i_sb->s_blocksize - sizeof(struct fileEntry));
1445 fe->logicalBlocksRecorded = cpu_to_le64( 1557 fe->logicalBlocksRecorded = cpu_to_le64(
1446 (inode->i_blocks + (1 << (inode->i_sb->s_blocksize_bits - 9)) - 1) >> 1558 (inode->i_blocks + (1 << (blocksize_bits - 9)) - 1) >>
1447 (inode->i_sb->s_blocksize_bits - 9)); 1559 (blocksize_bits - 9));
1448 1560
1449 if (udf_time_to_stamp(&cpu_time, inode->i_atime)) 1561 if (udf_time_to_stamp(&cpu_time, inode->i_atime))
1450 fe->accessTime = cpu_to_lets(cpu_time); 1562 fe->accessTime = cpu_to_lets(cpu_time);
@@ -1456,40 +1568,41 @@ static int udf_update_inode(struct inode *inode, int do_sync)
1456 strcpy(fe->impIdent.ident, UDF_ID_DEVELOPER); 1568 strcpy(fe->impIdent.ident, UDF_ID_DEVELOPER);
1457 fe->impIdent.identSuffix[0] = UDF_OS_CLASS_UNIX; 1569 fe->impIdent.identSuffix[0] = UDF_OS_CLASS_UNIX;
1458 fe->impIdent.identSuffix[1] = UDF_OS_ID_LINUX; 1570 fe->impIdent.identSuffix[1] = UDF_OS_ID_LINUX;
1459 fe->uniqueID = cpu_to_le64(UDF_I_UNIQUE(inode)); 1571 fe->uniqueID = cpu_to_le64(iinfo->i_unique);
1460 fe->lengthExtendedAttr = cpu_to_le32(UDF_I_LENEATTR(inode)); 1572 fe->lengthExtendedAttr = cpu_to_le32(iinfo->i_lenEAttr);
1461 fe->lengthAllocDescs = cpu_to_le32(UDF_I_LENALLOC(inode)); 1573 fe->lengthAllocDescs = cpu_to_le32(iinfo->i_lenAlloc);
1462 fe->descTag.tagIdent = cpu_to_le16(TAG_IDENT_FE); 1574 fe->descTag.tagIdent = cpu_to_le16(TAG_IDENT_FE);
1463 crclen = sizeof(struct fileEntry); 1575 crclen = sizeof(struct fileEntry);
1464 } else { 1576 } else {
1465 memcpy(bh->b_data + sizeof(struct extendedFileEntry), UDF_I_DATA(inode), 1577 memcpy(bh->b_data + sizeof(struct extendedFileEntry),
1466 inode->i_sb->s_blocksize - sizeof(struct extendedFileEntry)); 1578 iinfo->i_ext.i_data,
1579 inode->i_sb->s_blocksize -
1580 sizeof(struct extendedFileEntry));
1467 efe->objectSize = cpu_to_le64(inode->i_size); 1581 efe->objectSize = cpu_to_le64(inode->i_size);
1468 efe->logicalBlocksRecorded = cpu_to_le64( 1582 efe->logicalBlocksRecorded = cpu_to_le64(
1469 (inode->i_blocks + (1 << (inode->i_sb->s_blocksize_bits - 9)) - 1) >> 1583 (inode->i_blocks + (1 << (blocksize_bits - 9)) - 1) >>
1470 (inode->i_sb->s_blocksize_bits - 9)); 1584 (blocksize_bits - 9));
1471 1585
1472 if (UDF_I_CRTIME(inode).tv_sec > inode->i_atime.tv_sec || 1586 if (iinfo->i_crtime.tv_sec > inode->i_atime.tv_sec ||
1473 (UDF_I_CRTIME(inode).tv_sec == inode->i_atime.tv_sec && 1587 (iinfo->i_crtime.tv_sec == inode->i_atime.tv_sec &&
1474 UDF_I_CRTIME(inode).tv_nsec > inode->i_atime.tv_nsec)) { 1588 iinfo->i_crtime.tv_nsec > inode->i_atime.tv_nsec))
1475 UDF_I_CRTIME(inode) = inode->i_atime; 1589 iinfo->i_crtime = inode->i_atime;
1476 } 1590
1477 if (UDF_I_CRTIME(inode).tv_sec > inode->i_mtime.tv_sec || 1591 if (iinfo->i_crtime.tv_sec > inode->i_mtime.tv_sec ||
1478 (UDF_I_CRTIME(inode).tv_sec == inode->i_mtime.tv_sec && 1592 (iinfo->i_crtime.tv_sec == inode->i_mtime.tv_sec &&
1479 UDF_I_CRTIME(inode).tv_nsec > inode->i_mtime.tv_nsec)) { 1593 iinfo->i_crtime.tv_nsec > inode->i_mtime.tv_nsec))
1480 UDF_I_CRTIME(inode) = inode->i_mtime; 1594 iinfo->i_crtime = inode->i_mtime;
1481 } 1595
1482 if (UDF_I_CRTIME(inode).tv_sec > inode->i_ctime.tv_sec || 1596 if (iinfo->i_crtime.tv_sec > inode->i_ctime.tv_sec ||
1483 (UDF_I_CRTIME(inode).tv_sec == inode->i_ctime.tv_sec && 1597 (iinfo->i_crtime.tv_sec == inode->i_ctime.tv_sec &&
1484 UDF_I_CRTIME(inode).tv_nsec > inode->i_ctime.tv_nsec)) { 1598 iinfo->i_crtime.tv_nsec > inode->i_ctime.tv_nsec))
1485 UDF_I_CRTIME(inode) = inode->i_ctime; 1599 iinfo->i_crtime = inode->i_ctime;
1486 }
1487 1600
1488 if (udf_time_to_stamp(&cpu_time, inode->i_atime)) 1601 if (udf_time_to_stamp(&cpu_time, inode->i_atime))
1489 efe->accessTime = cpu_to_lets(cpu_time); 1602 efe->accessTime = cpu_to_lets(cpu_time);
1490 if (udf_time_to_stamp(&cpu_time, inode->i_mtime)) 1603 if (udf_time_to_stamp(&cpu_time, inode->i_mtime))
1491 efe->modificationTime = cpu_to_lets(cpu_time); 1604 efe->modificationTime = cpu_to_lets(cpu_time);
1492 if (udf_time_to_stamp(&cpu_time, UDF_I_CRTIME(inode))) 1605 if (udf_time_to_stamp(&cpu_time, iinfo->i_crtime))
1493 efe->createTime = cpu_to_lets(cpu_time); 1606 efe->createTime = cpu_to_lets(cpu_time);
1494 if (udf_time_to_stamp(&cpu_time, inode->i_ctime)) 1607 if (udf_time_to_stamp(&cpu_time, inode->i_ctime))
1495 efe->attrTime = cpu_to_lets(cpu_time); 1608 efe->attrTime = cpu_to_lets(cpu_time);
@@ -1498,13 +1611,13 @@ static int udf_update_inode(struct inode *inode, int do_sync)
1498 strcpy(efe->impIdent.ident, UDF_ID_DEVELOPER); 1611 strcpy(efe->impIdent.ident, UDF_ID_DEVELOPER);
1499 efe->impIdent.identSuffix[0] = UDF_OS_CLASS_UNIX; 1612 efe->impIdent.identSuffix[0] = UDF_OS_CLASS_UNIX;
1500 efe->impIdent.identSuffix[1] = UDF_OS_ID_LINUX; 1613 efe->impIdent.identSuffix[1] = UDF_OS_ID_LINUX;
1501 efe->uniqueID = cpu_to_le64(UDF_I_UNIQUE(inode)); 1614 efe->uniqueID = cpu_to_le64(iinfo->i_unique);
1502 efe->lengthExtendedAttr = cpu_to_le32(UDF_I_LENEATTR(inode)); 1615 efe->lengthExtendedAttr = cpu_to_le32(iinfo->i_lenEAttr);
1503 efe->lengthAllocDescs = cpu_to_le32(UDF_I_LENALLOC(inode)); 1616 efe->lengthAllocDescs = cpu_to_le32(iinfo->i_lenAlloc);
1504 efe->descTag.tagIdent = cpu_to_le16(TAG_IDENT_EFE); 1617 efe->descTag.tagIdent = cpu_to_le16(TAG_IDENT_EFE);
1505 crclen = sizeof(struct extendedFileEntry); 1618 crclen = sizeof(struct extendedFileEntry);
1506 } 1619 }
1507 if (UDF_I_STRAT4096(inode)) { 1620 if (iinfo->i_strat4096) {
1508 fe->icbTag.strategyType = cpu_to_le16(4096); 1621 fe->icbTag.strategyType = cpu_to_le16(4096);
1509 fe->icbTag.strategyParameter = cpu_to_le16(1); 1622 fe->icbTag.strategyParameter = cpu_to_le16(1);
1510 fe->icbTag.numEntries = cpu_to_le16(2); 1623 fe->icbTag.numEntries = cpu_to_le16(2);
@@ -1528,7 +1641,7 @@ static int udf_update_inode(struct inode *inode, int do_sync)
1528 else if (S_ISSOCK(inode->i_mode)) 1641 else if (S_ISSOCK(inode->i_mode))
1529 fe->icbTag.fileType = ICBTAG_FILE_TYPE_SOCKET; 1642 fe->icbTag.fileType = ICBTAG_FILE_TYPE_SOCKET;
1530 1643
1531 icbflags = UDF_I_ALLOCTYPE(inode) | 1644 icbflags = iinfo->i_alloc_type |
1532 ((inode->i_mode & S_ISUID) ? ICBTAG_FLAG_SETUID : 0) | 1645 ((inode->i_mode & S_ISUID) ? ICBTAG_FLAG_SETUID : 0) |
1533 ((inode->i_mode & S_ISGID) ? ICBTAG_FLAG_SETGID : 0) | 1646 ((inode->i_mode & S_ISGID) ? ICBTAG_FLAG_SETGID : 0) |
1534 ((inode->i_mode & S_ISVTX) ? ICBTAG_FLAG_STICKY : 0) | 1647 ((inode->i_mode & S_ISVTX) ? ICBTAG_FLAG_STICKY : 0) |
@@ -1537,29 +1650,28 @@ static int udf_update_inode(struct inode *inode, int do_sync)
1537 ICBTAG_FLAG_SETGID | ICBTAG_FLAG_STICKY)); 1650 ICBTAG_FLAG_SETGID | ICBTAG_FLAG_STICKY));
1538 1651
1539 fe->icbTag.flags = cpu_to_le16(icbflags); 1652 fe->icbTag.flags = cpu_to_le16(icbflags);
1540 if (UDF_SB_UDFREV(inode->i_sb) >= 0x0200) 1653 if (sbi->s_udfrev >= 0x0200)
1541 fe->descTag.descVersion = cpu_to_le16(3); 1654 fe->descTag.descVersion = cpu_to_le16(3);
1542 else 1655 else
1543 fe->descTag.descVersion = cpu_to_le16(2); 1656 fe->descTag.descVersion = cpu_to_le16(2);
1544 fe->descTag.tagSerialNum = cpu_to_le16(UDF_SB_SERIALNUM(inode->i_sb)); 1657 fe->descTag.tagSerialNum = cpu_to_le16(sbi->s_serial_number);
1545 fe->descTag.tagLocation = cpu_to_le32(UDF_I_LOCATION(inode).logicalBlockNum); 1658 fe->descTag.tagLocation = cpu_to_le32(
1546 crclen += UDF_I_LENEATTR(inode) + UDF_I_LENALLOC(inode) - sizeof(tag); 1659 iinfo->i_location.logicalBlockNum);
1660 crclen += iinfo->i_lenEAttr + iinfo->i_lenAlloc -
1661 sizeof(tag);
1547 fe->descTag.descCRCLength = cpu_to_le16(crclen); 1662 fe->descTag.descCRCLength = cpu_to_le16(crclen);
1548 fe->descTag.descCRC = cpu_to_le16(udf_crc((char *)fe + sizeof(tag), crclen, 0)); 1663 fe->descTag.descCRC = cpu_to_le16(udf_crc((char *)fe + sizeof(tag),
1549 1664 crclen, 0));
1550 fe->descTag.tagChecksum = 0; 1665 fe->descTag.tagChecksum = udf_tag_checksum(&fe->descTag);
1551 for (i = 0; i < 16; i++) {
1552 if (i != 4)
1553 fe->descTag.tagChecksum += ((uint8_t *)&(fe->descTag))[i];
1554 }
1555 1666
1556 /* write the data blocks */ 1667 /* write the data blocks */
1557 mark_buffer_dirty(bh); 1668 mark_buffer_dirty(bh);
1558 if (do_sync) { 1669 if (do_sync) {
1559 sync_dirty_buffer(bh); 1670 sync_dirty_buffer(bh);
1560 if (buffer_req(bh) && !buffer_uptodate(bh)) { 1671 if (buffer_req(bh) && !buffer_uptodate(bh)) {
1561 printk("IO error syncing udf inode [%s:%08lx]\n", 1672 printk(KERN_WARNING "IO error syncing udf inode "
1562 inode->i_sb->s_id, inode->i_ino); 1673 "[%s:%08lx]\n", inode->i_sb->s_id,
1674 inode->i_ino);
1563 err = -EIO; 1675 err = -EIO;
1564 } 1676 }
1565 } 1677 }
@@ -1577,7 +1689,7 @@ struct inode *udf_iget(struct super_block *sb, kernel_lb_addr ino)
1577 return NULL; 1689 return NULL;
1578 1690
1579 if (inode->i_state & I_NEW) { 1691 if (inode->i_state & I_NEW) {
1580 memcpy(&UDF_I_LOCATION(inode), &ino, sizeof(kernel_lb_addr)); 1692 memcpy(&UDF_I(inode)->i_location, &ino, sizeof(kernel_lb_addr));
1581 __udf_read_inode(inode); 1693 __udf_read_inode(inode);
1582 unlock_new_inode(inode); 1694 unlock_new_inode(inode);
1583 } 1695 }
@@ -1585,7 +1697,8 @@ struct inode *udf_iget(struct super_block *sb, kernel_lb_addr ino)
1585 if (is_bad_inode(inode)) 1697 if (is_bad_inode(inode))
1586 goto out_iput; 1698 goto out_iput;
1587 1699
1588 if (ino.logicalBlockNum >= UDF_SB_PARTLEN(sb, ino.partitionReferenceNum)) { 1700 if (ino.logicalBlockNum >= UDF_SB(sb)->
1701 s_partmaps[ino.partitionReferenceNum].s_partition_len) {
1589 udf_debug("block=%d, partition=%d out of range\n", 1702 udf_debug("block=%d, partition=%d out of range\n",
1590 ino.logicalBlockNum, ino.partitionReferenceNum); 1703 ino.logicalBlockNum, ino.partitionReferenceNum);
1591 make_bad_inode(inode); 1704 make_bad_inode(inode);
@@ -1599,7 +1712,7 @@ struct inode *udf_iget(struct super_block *sb, kernel_lb_addr ino)
1599 return NULL; 1712 return NULL;
1600} 1713}
1601 1714
1602int8_t udf_add_aext(struct inode * inode, struct extent_position * epos, 1715int8_t udf_add_aext(struct inode *inode, struct extent_position *epos,
1603 kernel_lb_addr eloc, uint32_t elen, int inc) 1716 kernel_lb_addr eloc, uint32_t elen, int inc)
1604{ 1717{
1605 int adsize; 1718 int adsize;
@@ -1608,15 +1721,18 @@ int8_t udf_add_aext(struct inode * inode, struct extent_position * epos,
1608 struct allocExtDesc *aed; 1721 struct allocExtDesc *aed;
1609 int8_t etype; 1722 int8_t etype;
1610 uint8_t *ptr; 1723 uint8_t *ptr;
1724 struct udf_inode_info *iinfo = UDF_I(inode);
1611 1725
1612 if (!epos->bh) 1726 if (!epos->bh)
1613 ptr = UDF_I_DATA(inode) + epos->offset - udf_file_entry_alloc_offset(inode) + UDF_I_LENEATTR(inode); 1727 ptr = iinfo->i_ext.i_data + epos->offset -
1728 udf_file_entry_alloc_offset(inode) +
1729 iinfo->i_lenEAttr;
1614 else 1730 else
1615 ptr = epos->bh->b_data + epos->offset; 1731 ptr = epos->bh->b_data + epos->offset;
1616 1732
1617 if (UDF_I_ALLOCTYPE(inode) == ICBTAG_FLAG_AD_SHORT) 1733 if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_SHORT)
1618 adsize = sizeof(short_ad); 1734 adsize = sizeof(short_ad);
1619 else if (UDF_I_ALLOCTYPE(inode) == ICBTAG_FLAG_AD_LONG) 1735 else if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_LONG)
1620 adsize = sizeof(long_ad); 1736 adsize = sizeof(long_ad);
1621 else 1737 else
1622 return -1; 1738 return -1;
@@ -1627,15 +1743,16 @@ int8_t udf_add_aext(struct inode * inode, struct extent_position * epos,
1627 int err, loffset; 1743 int err, loffset;
1628 kernel_lb_addr obloc = epos->block; 1744 kernel_lb_addr obloc = epos->block;
1629 1745
1630 if (!(epos->block.logicalBlockNum = udf_new_block(inode->i_sb, NULL, 1746 epos->block.logicalBlockNum = udf_new_block(inode->i_sb, NULL,
1631 obloc.partitionReferenceNum, 1747 obloc.partitionReferenceNum,
1632 obloc.logicalBlockNum, &err))) { 1748 obloc.logicalBlockNum, &err);
1749 if (!epos->block.logicalBlockNum)
1633 return -1; 1750 return -1;
1634 } 1751 nbh = udf_tgetblk(inode->i_sb, udf_get_lb_pblock(inode->i_sb,
1635 if (!(nbh = udf_tgetblk(inode->i_sb, udf_get_lb_pblock(inode->i_sb, 1752 epos->block,
1636 epos->block, 0)))) { 1753 0));
1754 if (!nbh)
1637 return -1; 1755 return -1;
1638 }
1639 lock_buffer(nbh); 1756 lock_buffer(nbh);
1640 memset(nbh->b_data, 0x00, inode->i_sb->s_blocksize); 1757 memset(nbh->b_data, 0x00, inode->i_sb->s_blocksize);
1641 set_buffer_uptodate(nbh); 1758 set_buffer_uptodate(nbh);
@@ -1644,7 +1761,8 @@ int8_t udf_add_aext(struct inode * inode, struct extent_position * epos,
1644 1761
1645 aed = (struct allocExtDesc *)(nbh->b_data); 1762 aed = (struct allocExtDesc *)(nbh->b_data);
1646 if (!UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_STRICT)) 1763 if (!UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_STRICT))
1647 aed->previousAllocExtLocation = cpu_to_le32(obloc.logicalBlockNum); 1764 aed->previousAllocExtLocation =
1765 cpu_to_le32(obloc.logicalBlockNum);
1648 if (epos->offset + adsize > inode->i_sb->s_blocksize) { 1766 if (epos->offset + adsize > inode->i_sb->s_blocksize) {
1649 loffset = epos->offset; 1767 loffset = epos->offset;
1650 aed->lengthAllocDescs = cpu_to_le32(adsize); 1768 aed->lengthAllocDescs = cpu_to_le32(adsize);
@@ -1661,24 +1779,26 @@ int8_t udf_add_aext(struct inode * inode, struct extent_position * epos,
1661 if (epos->bh) { 1779 if (epos->bh) {
1662 aed = (struct allocExtDesc *)epos->bh->b_data; 1780 aed = (struct allocExtDesc *)epos->bh->b_data;
1663 aed->lengthAllocDescs = 1781 aed->lengthAllocDescs =
1664 cpu_to_le32(le32_to_cpu(aed->lengthAllocDescs) + adsize); 1782 cpu_to_le32(le32_to_cpu(
1783 aed->lengthAllocDescs) + adsize);
1665 } else { 1784 } else {
1666 UDF_I_LENALLOC(inode) += adsize; 1785 iinfo->i_lenAlloc += adsize;
1667 mark_inode_dirty(inode); 1786 mark_inode_dirty(inode);
1668 } 1787 }
1669 } 1788 }
1670 if (UDF_SB_UDFREV(inode->i_sb) >= 0x0200) 1789 if (UDF_SB(inode->i_sb)->s_udfrev >= 0x0200)
1671 udf_new_tag(nbh->b_data, TAG_IDENT_AED, 3, 1, 1790 udf_new_tag(nbh->b_data, TAG_IDENT_AED, 3, 1,
1672 epos->block.logicalBlockNum, sizeof(tag)); 1791 epos->block.logicalBlockNum, sizeof(tag));
1673 else 1792 else
1674 udf_new_tag(nbh->b_data, TAG_IDENT_AED, 2, 1, 1793 udf_new_tag(nbh->b_data, TAG_IDENT_AED, 2, 1,
1675 epos->block.logicalBlockNum, sizeof(tag)); 1794 epos->block.logicalBlockNum, sizeof(tag));
1676 switch (UDF_I_ALLOCTYPE(inode)) { 1795 switch (iinfo->i_alloc_type) {
1677 case ICBTAG_FLAG_AD_SHORT: 1796 case ICBTAG_FLAG_AD_SHORT:
1678 sad = (short_ad *)sptr; 1797 sad = (short_ad *)sptr;
1679 sad->extLength = cpu_to_le32(EXT_NEXT_EXTENT_ALLOCDECS | 1798 sad->extLength = cpu_to_le32(EXT_NEXT_EXTENT_ALLOCDECS |
1680 inode->i_sb->s_blocksize); 1799 inode->i_sb->s_blocksize);
1681 sad->extPosition = cpu_to_le32(epos->block.logicalBlockNum); 1800 sad->extPosition =
1801 cpu_to_le32(epos->block.logicalBlockNum);
1682 break; 1802 break;
1683 case ICBTAG_FLAG_AD_LONG: 1803 case ICBTAG_FLAG_AD_LONG:
1684 lad = (long_ad *)sptr; 1804 lad = (long_ad *)sptr;
@@ -1690,10 +1810,11 @@ int8_t udf_add_aext(struct inode * inode, struct extent_position * epos,
1690 } 1810 }
1691 if (epos->bh) { 1811 if (epos->bh) {
1692 if (!UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_STRICT) || 1812 if (!UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_STRICT) ||
1693 UDF_SB_UDFREV(inode->i_sb) >= 0x0201) 1813 UDF_SB(inode->i_sb)->s_udfrev >= 0x0201)
1694 udf_update_tag(epos->bh->b_data, loffset); 1814 udf_update_tag(epos->bh->b_data, loffset);
1695 else 1815 else
1696 udf_update_tag(epos->bh->b_data, sizeof(struct allocExtDesc)); 1816 udf_update_tag(epos->bh->b_data,
1817 sizeof(struct allocExtDesc));
1697 mark_buffer_dirty_inode(epos->bh, inode); 1818 mark_buffer_dirty_inode(epos->bh, inode);
1698 brelse(epos->bh); 1819 brelse(epos->bh);
1699 } else { 1820 } else {
@@ -1705,36 +1826,43 @@ int8_t udf_add_aext(struct inode * inode, struct extent_position * epos,
1705 etype = udf_write_aext(inode, epos, eloc, elen, inc); 1826 etype = udf_write_aext(inode, epos, eloc, elen, inc);
1706 1827
1707 if (!epos->bh) { 1828 if (!epos->bh) {
1708 UDF_I_LENALLOC(inode) += adsize; 1829 iinfo->i_lenAlloc += adsize;
1709 mark_inode_dirty(inode); 1830 mark_inode_dirty(inode);
1710 } else { 1831 } else {
1711 aed = (struct allocExtDesc *)epos->bh->b_data; 1832 aed = (struct allocExtDesc *)epos->bh->b_data;
1712 aed->lengthAllocDescs = 1833 aed->lengthAllocDescs =
1713 cpu_to_le32(le32_to_cpu(aed->lengthAllocDescs) + adsize); 1834 cpu_to_le32(le32_to_cpu(aed->lengthAllocDescs) +
1714 if (!UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_STRICT) || UDF_SB_UDFREV(inode->i_sb) >= 0x0201) 1835 adsize);
1715 udf_update_tag(epos->bh->b_data, epos->offset + (inc ? 0 : adsize)); 1836 if (!UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_STRICT) ||
1837 UDF_SB(inode->i_sb)->s_udfrev >= 0x0201)
1838 udf_update_tag(epos->bh->b_data,
1839 epos->offset + (inc ? 0 : adsize));
1716 else 1840 else
1717 udf_update_tag(epos->bh->b_data, sizeof(struct allocExtDesc)); 1841 udf_update_tag(epos->bh->b_data,
1842 sizeof(struct allocExtDesc));
1718 mark_buffer_dirty_inode(epos->bh, inode); 1843 mark_buffer_dirty_inode(epos->bh, inode);
1719 } 1844 }
1720 1845
1721 return etype; 1846 return etype;
1722} 1847}
1723 1848
1724int8_t udf_write_aext(struct inode * inode, struct extent_position * epos, 1849int8_t udf_write_aext(struct inode *inode, struct extent_position *epos,
1725 kernel_lb_addr eloc, uint32_t elen, int inc) 1850 kernel_lb_addr eloc, uint32_t elen, int inc)
1726{ 1851{
1727 int adsize; 1852 int adsize;
1728 uint8_t *ptr; 1853 uint8_t *ptr;
1729 short_ad *sad; 1854 short_ad *sad;
1730 long_ad *lad; 1855 long_ad *lad;
1856 struct udf_inode_info *iinfo = UDF_I(inode);
1731 1857
1732 if (!epos->bh) 1858 if (!epos->bh)
1733 ptr = UDF_I_DATA(inode) + epos->offset - udf_file_entry_alloc_offset(inode) + UDF_I_LENEATTR(inode); 1859 ptr = iinfo->i_ext.i_data + epos->offset -
1860 udf_file_entry_alloc_offset(inode) +
1861 iinfo->i_lenEAttr;
1734 else 1862 else
1735 ptr = epos->bh->b_data + epos->offset; 1863 ptr = epos->bh->b_data + epos->offset;
1736 1864
1737 switch (UDF_I_ALLOCTYPE(inode)) { 1865 switch (iinfo->i_alloc_type) {
1738 case ICBTAG_FLAG_AD_SHORT: 1866 case ICBTAG_FLAG_AD_SHORT:
1739 sad = (short_ad *)ptr; 1867 sad = (short_ad *)ptr;
1740 sad->extLength = cpu_to_le32(elen); 1868 sad->extLength = cpu_to_le32(elen);
@@ -1754,10 +1882,12 @@ int8_t udf_write_aext(struct inode * inode, struct extent_position * epos,
1754 1882
1755 if (epos->bh) { 1883 if (epos->bh) {
1756 if (!UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_STRICT) || 1884 if (!UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_STRICT) ||
1757 UDF_SB_UDFREV(inode->i_sb) >= 0x0201) { 1885 UDF_SB(inode->i_sb)->s_udfrev >= 0x0201) {
1758 struct allocExtDesc *aed = (struct allocExtDesc *)epos->bh->b_data; 1886 struct allocExtDesc *aed =
1887 (struct allocExtDesc *)epos->bh->b_data;
1759 udf_update_tag(epos->bh->b_data, 1888 udf_update_tag(epos->bh->b_data,
1760 le32_to_cpu(aed->lengthAllocDescs) + sizeof(struct allocExtDesc)); 1889 le32_to_cpu(aed->lengthAllocDescs) +
1890 sizeof(struct allocExtDesc));
1761 } 1891 }
1762 mark_buffer_dirty_inode(epos->bh, inode); 1892 mark_buffer_dirty_inode(epos->bh, inode);
1763 } else { 1893 } else {
@@ -1770,19 +1900,21 @@ int8_t udf_write_aext(struct inode * inode, struct extent_position * epos,
1770 return (elen >> 30); 1900 return (elen >> 30);
1771} 1901}
1772 1902
1773int8_t udf_next_aext(struct inode * inode, struct extent_position * epos, 1903int8_t udf_next_aext(struct inode *inode, struct extent_position *epos,
1774 kernel_lb_addr * eloc, uint32_t * elen, int inc) 1904 kernel_lb_addr *eloc, uint32_t *elen, int inc)
1775{ 1905{
1776 int8_t etype; 1906 int8_t etype;
1777 1907
1778 while ((etype = udf_current_aext(inode, epos, eloc, elen, inc)) == 1908 while ((etype = udf_current_aext(inode, epos, eloc, elen, inc)) ==
1779 (EXT_NEXT_EXTENT_ALLOCDECS >> 30)) { 1909 (EXT_NEXT_EXTENT_ALLOCDECS >> 30)) {
1910 int block;
1780 epos->block = *eloc; 1911 epos->block = *eloc;
1781 epos->offset = sizeof(struct allocExtDesc); 1912 epos->offset = sizeof(struct allocExtDesc);
1782 brelse(epos->bh); 1913 brelse(epos->bh);
1783 if (!(epos->bh = udf_tread(inode->i_sb, udf_get_lb_pblock(inode->i_sb, epos->block, 0)))) { 1914 block = udf_get_lb_pblock(inode->i_sb, epos->block, 0);
1784 udf_debug("reading block %d failed!\n", 1915 epos->bh = udf_tread(inode->i_sb, block);
1785 udf_get_lb_pblock(inode->i_sb, epos->block, 0)); 1916 if (!epos->bh) {
1917 udf_debug("reading block %d failed!\n", block);
1786 return -1; 1918 return -1;
1787 } 1919 }
1788 } 1920 }
@@ -1790,47 +1922,55 @@ int8_t udf_next_aext(struct inode * inode, struct extent_position * epos,
1790 return etype; 1922 return etype;
1791} 1923}
1792 1924
1793int8_t udf_current_aext(struct inode * inode, struct extent_position * epos, 1925int8_t udf_current_aext(struct inode *inode, struct extent_position *epos,
1794 kernel_lb_addr * eloc, uint32_t * elen, int inc) 1926 kernel_lb_addr *eloc, uint32_t *elen, int inc)
1795{ 1927{
1796 int alen; 1928 int alen;
1797 int8_t etype; 1929 int8_t etype;
1798 uint8_t *ptr; 1930 uint8_t *ptr;
1799 short_ad *sad; 1931 short_ad *sad;
1800 long_ad *lad; 1932 long_ad *lad;
1801 1933 struct udf_inode_info *iinfo = UDF_I(inode);
1802 1934
1803 if (!epos->bh) { 1935 if (!epos->bh) {
1804 if (!epos->offset) 1936 if (!epos->offset)
1805 epos->offset = udf_file_entry_alloc_offset(inode); 1937 epos->offset = udf_file_entry_alloc_offset(inode);
1806 ptr = UDF_I_DATA(inode) + epos->offset - udf_file_entry_alloc_offset(inode) + UDF_I_LENEATTR(inode); 1938 ptr = iinfo->i_ext.i_data + epos->offset -
1807 alen = udf_file_entry_alloc_offset(inode) + UDF_I_LENALLOC(inode); 1939 udf_file_entry_alloc_offset(inode) +
1940 iinfo->i_lenEAttr;
1941 alen = udf_file_entry_alloc_offset(inode) +
1942 iinfo->i_lenAlloc;
1808 } else { 1943 } else {
1809 if (!epos->offset) 1944 if (!epos->offset)
1810 epos->offset = sizeof(struct allocExtDesc); 1945 epos->offset = sizeof(struct allocExtDesc);
1811 ptr = epos->bh->b_data + epos->offset; 1946 ptr = epos->bh->b_data + epos->offset;
1812 alen = sizeof(struct allocExtDesc) + 1947 alen = sizeof(struct allocExtDesc) +
1813 le32_to_cpu(((struct allocExtDesc *)epos->bh->b_data)->lengthAllocDescs); 1948 le32_to_cpu(((struct allocExtDesc *)epos->bh->b_data)->
1949 lengthAllocDescs);
1814 } 1950 }
1815 1951
1816 switch (UDF_I_ALLOCTYPE(inode)) { 1952 switch (iinfo->i_alloc_type) {
1817 case ICBTAG_FLAG_AD_SHORT: 1953 case ICBTAG_FLAG_AD_SHORT:
1818 if (!(sad = udf_get_fileshortad(ptr, alen, &epos->offset, inc))) 1954 sad = udf_get_fileshortad(ptr, alen, &epos->offset, inc);
1955 if (!sad)
1819 return -1; 1956 return -1;
1820 etype = le32_to_cpu(sad->extLength) >> 30; 1957 etype = le32_to_cpu(sad->extLength) >> 30;
1821 eloc->logicalBlockNum = le32_to_cpu(sad->extPosition); 1958 eloc->logicalBlockNum = le32_to_cpu(sad->extPosition);
1822 eloc->partitionReferenceNum = UDF_I_LOCATION(inode).partitionReferenceNum; 1959 eloc->partitionReferenceNum =
1960 iinfo->i_location.partitionReferenceNum;
1823 *elen = le32_to_cpu(sad->extLength) & UDF_EXTENT_LENGTH_MASK; 1961 *elen = le32_to_cpu(sad->extLength) & UDF_EXTENT_LENGTH_MASK;
1824 break; 1962 break;
1825 case ICBTAG_FLAG_AD_LONG: 1963 case ICBTAG_FLAG_AD_LONG:
1826 if (!(lad = udf_get_filelongad(ptr, alen, &epos->offset, inc))) 1964 lad = udf_get_filelongad(ptr, alen, &epos->offset, inc);
1965 if (!lad)
1827 return -1; 1966 return -1;
1828 etype = le32_to_cpu(lad->extLength) >> 30; 1967 etype = le32_to_cpu(lad->extLength) >> 30;
1829 *eloc = lelb_to_cpu(lad->extLocation); 1968 *eloc = lelb_to_cpu(lad->extLocation);
1830 *elen = le32_to_cpu(lad->extLength) & UDF_EXTENT_LENGTH_MASK; 1969 *elen = le32_to_cpu(lad->extLength) & UDF_EXTENT_LENGTH_MASK;
1831 break; 1970 break;
1832 default: 1971 default:
1833 udf_debug("alloc_type = %d unsupported\n", UDF_I_ALLOCTYPE(inode)); 1972 udf_debug("alloc_type = %d unsupported\n",
1973 iinfo->i_alloc_type);
1834 return -1; 1974 return -1;
1835 } 1975 }
1836 1976
@@ -1858,22 +1998,24 @@ static int8_t udf_insert_aext(struct inode *inode, struct extent_position epos,
1858 return (nelen >> 30); 1998 return (nelen >> 30);
1859} 1999}
1860 2000
1861int8_t udf_delete_aext(struct inode * inode, struct extent_position epos, 2001int8_t udf_delete_aext(struct inode *inode, struct extent_position epos,
1862 kernel_lb_addr eloc, uint32_t elen) 2002 kernel_lb_addr eloc, uint32_t elen)
1863{ 2003{
1864 struct extent_position oepos; 2004 struct extent_position oepos;
1865 int adsize; 2005 int adsize;
1866 int8_t etype; 2006 int8_t etype;
1867 struct allocExtDesc *aed; 2007 struct allocExtDesc *aed;
2008 struct udf_inode_info *iinfo;
1868 2009
1869 if (epos.bh) { 2010 if (epos.bh) {
1870 get_bh(epos.bh); 2011 get_bh(epos.bh);
1871 get_bh(epos.bh); 2012 get_bh(epos.bh);
1872 } 2013 }
1873 2014
1874 if (UDF_I_ALLOCTYPE(inode) == ICBTAG_FLAG_AD_SHORT) 2015 iinfo = UDF_I(inode);
2016 if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_SHORT)
1875 adsize = sizeof(short_ad); 2017 adsize = sizeof(short_ad);
1876 else if (UDF_I_ALLOCTYPE(inode) == ICBTAG_FLAG_AD_LONG) 2018 else if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_LONG)
1877 adsize = sizeof(long_ad); 2019 adsize = sizeof(long_ad);
1878 else 2020 else
1879 adsize = 0; 2021 adsize = 0;
@@ -1900,33 +2042,39 @@ int8_t udf_delete_aext(struct inode * inode, struct extent_position epos,
1900 udf_write_aext(inode, &oepos, eloc, elen, 1); 2042 udf_write_aext(inode, &oepos, eloc, elen, 1);
1901 udf_write_aext(inode, &oepos, eloc, elen, 1); 2043 udf_write_aext(inode, &oepos, eloc, elen, 1);
1902 if (!oepos.bh) { 2044 if (!oepos.bh) {
1903 UDF_I_LENALLOC(inode) -= (adsize * 2); 2045 iinfo->i_lenAlloc -= (adsize * 2);
1904 mark_inode_dirty(inode); 2046 mark_inode_dirty(inode);
1905 } else { 2047 } else {
1906 aed = (struct allocExtDesc *)oepos.bh->b_data; 2048 aed = (struct allocExtDesc *)oepos.bh->b_data;
1907 aed->lengthAllocDescs = 2049 aed->lengthAllocDescs =
1908 cpu_to_le32(le32_to_cpu(aed->lengthAllocDescs) - (2 * adsize)); 2050 cpu_to_le32(le32_to_cpu(aed->lengthAllocDescs) -
2051 (2 * adsize));
1909 if (!UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_STRICT) || 2052 if (!UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_STRICT) ||
1910 UDF_SB_UDFREV(inode->i_sb) >= 0x0201) 2053 UDF_SB(inode->i_sb)->s_udfrev >= 0x0201)
1911 udf_update_tag(oepos.bh->b_data, oepos.offset - (2 * adsize)); 2054 udf_update_tag(oepos.bh->b_data,
2055 oepos.offset - (2 * adsize));
1912 else 2056 else
1913 udf_update_tag(oepos.bh->b_data, sizeof(struct allocExtDesc)); 2057 udf_update_tag(oepos.bh->b_data,
2058 sizeof(struct allocExtDesc));
1914 mark_buffer_dirty_inode(oepos.bh, inode); 2059 mark_buffer_dirty_inode(oepos.bh, inode);
1915 } 2060 }
1916 } else { 2061 } else {
1917 udf_write_aext(inode, &oepos, eloc, elen, 1); 2062 udf_write_aext(inode, &oepos, eloc, elen, 1);
1918 if (!oepos.bh) { 2063 if (!oepos.bh) {
1919 UDF_I_LENALLOC(inode) -= adsize; 2064 iinfo->i_lenAlloc -= adsize;
1920 mark_inode_dirty(inode); 2065 mark_inode_dirty(inode);
1921 } else { 2066 } else {
1922 aed = (struct allocExtDesc *)oepos.bh->b_data; 2067 aed = (struct allocExtDesc *)oepos.bh->b_data;
1923 aed->lengthAllocDescs = 2068 aed->lengthAllocDescs =
1924 cpu_to_le32(le32_to_cpu(aed->lengthAllocDescs) - adsize); 2069 cpu_to_le32(le32_to_cpu(aed->lengthAllocDescs) -
2070 adsize);
1925 if (!UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_STRICT) || 2071 if (!UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_STRICT) ||
1926 UDF_SB_UDFREV(inode->i_sb) >= 0x0201) 2072 UDF_SB(inode->i_sb)->s_udfrev >= 0x0201)
1927 udf_update_tag(oepos.bh->b_data, epos.offset - adsize); 2073 udf_update_tag(oepos.bh->b_data,
2074 epos.offset - adsize);
1928 else 2075 else
1929 udf_update_tag(oepos.bh->b_data, sizeof(struct allocExtDesc)); 2076 udf_update_tag(oepos.bh->b_data,
2077 sizeof(struct allocExtDesc));
1930 mark_buffer_dirty_inode(oepos.bh, inode); 2078 mark_buffer_dirty_inode(oepos.bh, inode);
1931 } 2079 }
1932 } 2080 }
@@ -1937,34 +2085,38 @@ int8_t udf_delete_aext(struct inode * inode, struct extent_position epos,
1937 return (elen >> 30); 2085 return (elen >> 30);
1938} 2086}
1939 2087
1940int8_t inode_bmap(struct inode * inode, sector_t block, 2088int8_t inode_bmap(struct inode *inode, sector_t block,
1941 struct extent_position * pos, kernel_lb_addr * eloc, 2089 struct extent_position *pos, kernel_lb_addr *eloc,
1942 uint32_t * elen, sector_t * offset) 2090 uint32_t *elen, sector_t *offset)
1943{ 2091{
2092 unsigned char blocksize_bits = inode->i_sb->s_blocksize_bits;
1944 loff_t lbcount = 0, bcount = 2093 loff_t lbcount = 0, bcount =
1945 (loff_t) block << inode->i_sb->s_blocksize_bits; 2094 (loff_t) block << blocksize_bits;
1946 int8_t etype; 2095 int8_t etype;
2096 struct udf_inode_info *iinfo;
1947 2097
1948 if (block < 0) { 2098 if (block < 0) {
1949 printk(KERN_ERR "udf: inode_bmap: block < 0\n"); 2099 printk(KERN_ERR "udf: inode_bmap: block < 0\n");
1950 return -1; 2100 return -1;
1951 } 2101 }
1952 2102
2103 iinfo = UDF_I(inode);
1953 pos->offset = 0; 2104 pos->offset = 0;
1954 pos->block = UDF_I_LOCATION(inode); 2105 pos->block = iinfo->i_location;
1955 pos->bh = NULL; 2106 pos->bh = NULL;
1956 *elen = 0; 2107 *elen = 0;
1957 2108
1958 do { 2109 do {
1959 if ((etype = udf_next_aext(inode, pos, eloc, elen, 1)) == -1) { 2110 etype = udf_next_aext(inode, pos, eloc, elen, 1);
1960 *offset = (bcount - lbcount) >> inode->i_sb->s_blocksize_bits; 2111 if (etype == -1) {
1961 UDF_I_LENEXTENTS(inode) = lbcount; 2112 *offset = (bcount - lbcount) >> blocksize_bits;
2113 iinfo->i_lenExtents = lbcount;
1962 return -1; 2114 return -1;
1963 } 2115 }
1964 lbcount += *elen; 2116 lbcount += *elen;
1965 } while (lbcount <= bcount); 2117 } while (lbcount <= bcount);
1966 2118
1967 *offset = (bcount + *elen - lbcount) >> inode->i_sb->s_blocksize_bits; 2119 *offset = (bcount + *elen - lbcount) >> blocksize_bits;
1968 2120
1969 return etype; 2121 return etype;
1970} 2122}
@@ -1979,7 +2131,8 @@ long udf_block_map(struct inode *inode, sector_t block)
1979 2131
1980 lock_kernel(); 2132 lock_kernel();
1981 2133
1982 if (inode_bmap(inode, block, &epos, &eloc, &elen, &offset) == (EXT_RECORDED_ALLOCATED >> 30)) 2134 if (inode_bmap(inode, block, &epos, &eloc, &elen, &offset) ==
2135 (EXT_RECORDED_ALLOCATED >> 30))
1983 ret = udf_get_lb_pblock(inode->i_sb, eloc, offset); 2136 ret = udf_get_lb_pblock(inode->i_sb, eloc, offset);
1984 else 2137 else
1985 ret = 0; 2138 ret = 0;
diff --git a/fs/udf/misc.c b/fs/udf/misc.c
index 15297deb5051..a1d6da0caf71 100644
--- a/fs/udf/misc.c
+++ b/fs/udf/misc.c
@@ -51,18 +51,18 @@ struct genericFormat *udf_add_extendedattr(struct inode *inode, uint32_t size,
51 uint8_t *ea = NULL, *ad = NULL; 51 uint8_t *ea = NULL, *ad = NULL;
52 int offset; 52 int offset;
53 uint16_t crclen; 53 uint16_t crclen;
54 int i; 54 struct udf_inode_info *iinfo = UDF_I(inode);
55 55
56 ea = UDF_I_DATA(inode); 56 ea = iinfo->i_ext.i_data;
57 if (UDF_I_LENEATTR(inode)) { 57 if (iinfo->i_lenEAttr) {
58 ad = UDF_I_DATA(inode) + UDF_I_LENEATTR(inode); 58 ad = iinfo->i_ext.i_data + iinfo->i_lenEAttr;
59 } else { 59 } else {
60 ad = ea; 60 ad = ea;
61 size += sizeof(struct extendedAttrHeaderDesc); 61 size += sizeof(struct extendedAttrHeaderDesc);
62 } 62 }
63 63
64 offset = inode->i_sb->s_blocksize - udf_file_entry_alloc_offset(inode) - 64 offset = inode->i_sb->s_blocksize - udf_file_entry_alloc_offset(inode) -
65 UDF_I_LENALLOC(inode); 65 iinfo->i_lenAlloc;
66 66
67 /* TODO - Check for FreeEASpace */ 67 /* TODO - Check for FreeEASpace */
68 68
@@ -70,69 +70,80 @@ struct genericFormat *udf_add_extendedattr(struct inode *inode, uint32_t size,
70 struct extendedAttrHeaderDesc *eahd; 70 struct extendedAttrHeaderDesc *eahd;
71 eahd = (struct extendedAttrHeaderDesc *)ea; 71 eahd = (struct extendedAttrHeaderDesc *)ea;
72 72
73 if (UDF_I_LENALLOC(inode)) { 73 if (iinfo->i_lenAlloc)
74 memmove(&ad[size], ad, UDF_I_LENALLOC(inode)); 74 memmove(&ad[size], ad, iinfo->i_lenAlloc);
75 }
76 75
77 if (UDF_I_LENEATTR(inode)) { 76 if (iinfo->i_lenEAttr) {
78 /* check checksum/crc */ 77 /* check checksum/crc */
79 if (le16_to_cpu(eahd->descTag.tagIdent) != TAG_IDENT_EAHD || 78 if (eahd->descTag.tagIdent !=
80 le32_to_cpu(eahd->descTag.tagLocation) != UDF_I_LOCATION(inode).logicalBlockNum) { 79 cpu_to_le16(TAG_IDENT_EAHD) ||
80 le32_to_cpu(eahd->descTag.tagLocation) !=
81 iinfo->i_location.logicalBlockNum)
81 return NULL; 82 return NULL;
82 }
83 } else { 83 } else {
84 struct udf_sb_info *sbi = UDF_SB(inode->i_sb);
85
84 size -= sizeof(struct extendedAttrHeaderDesc); 86 size -= sizeof(struct extendedAttrHeaderDesc);
85 UDF_I_LENEATTR(inode) += sizeof(struct extendedAttrHeaderDesc); 87 iinfo->i_lenEAttr +=
88 sizeof(struct extendedAttrHeaderDesc);
86 eahd->descTag.tagIdent = cpu_to_le16(TAG_IDENT_EAHD); 89 eahd->descTag.tagIdent = cpu_to_le16(TAG_IDENT_EAHD);
87 if (UDF_SB_UDFREV(inode->i_sb) >= 0x0200) 90 if (sbi->s_udfrev >= 0x0200)
88 eahd->descTag.descVersion = cpu_to_le16(3); 91 eahd->descTag.descVersion = cpu_to_le16(3);
89 else 92 else
90 eahd->descTag.descVersion = cpu_to_le16(2); 93 eahd->descTag.descVersion = cpu_to_le16(2);
91 eahd->descTag.tagSerialNum = cpu_to_le16(UDF_SB_SERIALNUM(inode->i_sb)); 94 eahd->descTag.tagSerialNum =
92 eahd->descTag.tagLocation = cpu_to_le32(UDF_I_LOCATION(inode).logicalBlockNum); 95 cpu_to_le16(sbi->s_serial_number);
96 eahd->descTag.tagLocation = cpu_to_le32(
97 iinfo->i_location.logicalBlockNum);
93 eahd->impAttrLocation = cpu_to_le32(0xFFFFFFFF); 98 eahd->impAttrLocation = cpu_to_le32(0xFFFFFFFF);
94 eahd->appAttrLocation = cpu_to_le32(0xFFFFFFFF); 99 eahd->appAttrLocation = cpu_to_le32(0xFFFFFFFF);
95 } 100 }
96 101
97 offset = UDF_I_LENEATTR(inode); 102 offset = iinfo->i_lenEAttr;
98 if (type < 2048) { 103 if (type < 2048) {
99 if (le32_to_cpu(eahd->appAttrLocation) < UDF_I_LENEATTR(inode)) { 104 if (le32_to_cpu(eahd->appAttrLocation) <
100 uint32_t aal = le32_to_cpu(eahd->appAttrLocation); 105 iinfo->i_lenEAttr) {
106 uint32_t aal =
107 le32_to_cpu(eahd->appAttrLocation);
101 memmove(&ea[offset - aal + size], 108 memmove(&ea[offset - aal + size],
102 &ea[aal], offset - aal); 109 &ea[aal], offset - aal);
103 offset -= aal; 110 offset -= aal;
104 eahd->appAttrLocation = cpu_to_le32(aal + size); 111 eahd->appAttrLocation =
112 cpu_to_le32(aal + size);
105 } 113 }
106 if (le32_to_cpu(eahd->impAttrLocation) < UDF_I_LENEATTR(inode)) { 114 if (le32_to_cpu(eahd->impAttrLocation) <
107 uint32_t ial = le32_to_cpu(eahd->impAttrLocation); 115 iinfo->i_lenEAttr) {
116 uint32_t ial =
117 le32_to_cpu(eahd->impAttrLocation);
108 memmove(&ea[offset - ial + size], 118 memmove(&ea[offset - ial + size],
109 &ea[ial], offset - ial); 119 &ea[ial], offset - ial);
110 offset -= ial; 120 offset -= ial;
111 eahd->impAttrLocation = cpu_to_le32(ial + size); 121 eahd->impAttrLocation =
122 cpu_to_le32(ial + size);
112 } 123 }
113 } else if (type < 65536) { 124 } else if (type < 65536) {
114 if (le32_to_cpu(eahd->appAttrLocation) < UDF_I_LENEATTR(inode)) { 125 if (le32_to_cpu(eahd->appAttrLocation) <
115 uint32_t aal = le32_to_cpu(eahd->appAttrLocation); 126 iinfo->i_lenEAttr) {
127 uint32_t aal =
128 le32_to_cpu(eahd->appAttrLocation);
116 memmove(&ea[offset - aal + size], 129 memmove(&ea[offset - aal + size],
117 &ea[aal], offset - aal); 130 &ea[aal], offset - aal);
118 offset -= aal; 131 offset -= aal;
119 eahd->appAttrLocation = cpu_to_le32(aal + size); 132 eahd->appAttrLocation =
133 cpu_to_le32(aal + size);
120 } 134 }
121 } 135 }
122 /* rewrite CRC + checksum of eahd */ 136 /* rewrite CRC + checksum of eahd */
123 crclen = sizeof(struct extendedAttrHeaderDesc) - sizeof(tag); 137 crclen = sizeof(struct extendedAttrHeaderDesc) - sizeof(tag);
124 eahd->descTag.descCRCLength = cpu_to_le16(crclen); 138 eahd->descTag.descCRCLength = cpu_to_le16(crclen);
125 eahd->descTag.descCRC = cpu_to_le16(udf_crc((char *)eahd + 139 eahd->descTag.descCRC = cpu_to_le16(udf_crc((char *)eahd +
126 sizeof(tag), crclen, 0)); 140 sizeof(tag), crclen, 0));
127 eahd->descTag.tagChecksum = 0; 141 eahd->descTag.tagChecksum = udf_tag_checksum(&eahd->descTag);
128 for (i = 0; i < 16; i++) 142 iinfo->i_lenEAttr += size;
129 if (i != 4)
130 eahd->descTag.tagChecksum += ((uint8_t *)&(eahd->descTag))[i];
131 UDF_I_LENEATTR(inode) += size;
132 return (struct genericFormat *)&ea[offset]; 143 return (struct genericFormat *)&ea[offset];
133 } 144 }
134 if (loc & 0x02) { 145 if (loc & 0x02)
135 } 146 ;
136 147
137 return NULL; 148 return NULL;
138} 149}
@@ -143,18 +154,20 @@ struct genericFormat *udf_get_extendedattr(struct inode *inode, uint32_t type,
143 struct genericFormat *gaf; 154 struct genericFormat *gaf;
144 uint8_t *ea = NULL; 155 uint8_t *ea = NULL;
145 uint32_t offset; 156 uint32_t offset;
157 struct udf_inode_info *iinfo = UDF_I(inode);
146 158
147 ea = UDF_I_DATA(inode); 159 ea = iinfo->i_ext.i_data;
148 160
149 if (UDF_I_LENEATTR(inode)) { 161 if (iinfo->i_lenEAttr) {
150 struct extendedAttrHeaderDesc *eahd; 162 struct extendedAttrHeaderDesc *eahd;
151 eahd = (struct extendedAttrHeaderDesc *)ea; 163 eahd = (struct extendedAttrHeaderDesc *)ea;
152 164
153 /* check checksum/crc */ 165 /* check checksum/crc */
154 if (le16_to_cpu(eahd->descTag.tagIdent) != TAG_IDENT_EAHD || 166 if (eahd->descTag.tagIdent !=
155 le32_to_cpu(eahd->descTag.tagLocation) != UDF_I_LOCATION(inode).logicalBlockNum) { 167 cpu_to_le16(TAG_IDENT_EAHD) ||
168 le32_to_cpu(eahd->descTag.tagLocation) !=
169 iinfo->i_location.logicalBlockNum)
156 return NULL; 170 return NULL;
157 }
158 171
159 if (type < 2048) 172 if (type < 2048)
160 offset = sizeof(struct extendedAttrHeaderDesc); 173 offset = sizeof(struct extendedAttrHeaderDesc);
@@ -163,9 +176,10 @@ struct genericFormat *udf_get_extendedattr(struct inode *inode, uint32_t type,
163 else 176 else
164 offset = le32_to_cpu(eahd->appAttrLocation); 177 offset = le32_to_cpu(eahd->appAttrLocation);
165 178
166 while (offset < UDF_I_LENEATTR(inode)) { 179 while (offset < iinfo->i_lenEAttr) {
167 gaf = (struct genericFormat *)&ea[offset]; 180 gaf = (struct genericFormat *)&ea[offset];
168 if (le32_to_cpu(gaf->attrType) == type && gaf->attrSubtype == subtype) 181 if (le32_to_cpu(gaf->attrType) == type &&
182 gaf->attrSubtype == subtype)
169 return gaf; 183 return gaf;
170 else 184 else
171 offset += le32_to_cpu(gaf->attrLength); 185 offset += le32_to_cpu(gaf->attrLength);
@@ -186,21 +200,20 @@ struct genericFormat *udf_get_extendedattr(struct inode *inode, uint32_t type,
186 * Written, tested, and released. 200 * Written, tested, and released.
187 */ 201 */
188struct buffer_head *udf_read_tagged(struct super_block *sb, uint32_t block, 202struct buffer_head *udf_read_tagged(struct super_block *sb, uint32_t block,
189 uint32_t location, uint16_t * ident) 203 uint32_t location, uint16_t *ident)
190{ 204{
191 tag *tag_p; 205 tag *tag_p;
192 struct buffer_head *bh = NULL; 206 struct buffer_head *bh = NULL;
193 register uint8_t checksum; 207 struct udf_sb_info *sbi = UDF_SB(sb);
194 register int i;
195 208
196 /* Read the block */ 209 /* Read the block */
197 if (block == 0xFFFFFFFF) 210 if (block == 0xFFFFFFFF)
198 return NULL; 211 return NULL;
199 212
200 bh = udf_tread(sb, block + UDF_SB_SESSION(sb)); 213 bh = udf_tread(sb, block + sbi->s_session);
201 if (!bh) { 214 if (!bh) {
202 udf_debug("block=%d, location=%d: read failed\n", 215 udf_debug("block=%d, location=%d: read failed\n",
203 block + UDF_SB_SESSION(sb), location); 216 block + sbi->s_session, location);
204 return NULL; 217 return NULL;
205 } 218 }
206 219
@@ -210,24 +223,20 @@ struct buffer_head *udf_read_tagged(struct super_block *sb, uint32_t block,
210 223
211 if (location != le32_to_cpu(tag_p->tagLocation)) { 224 if (location != le32_to_cpu(tag_p->tagLocation)) {
212 udf_debug("location mismatch block %u, tag %u != %u\n", 225 udf_debug("location mismatch block %u, tag %u != %u\n",
213 block + UDF_SB_SESSION(sb), le32_to_cpu(tag_p->tagLocation), location); 226 block + sbi->s_session,
227 le32_to_cpu(tag_p->tagLocation), location);
214 goto error_out; 228 goto error_out;
215 } 229 }
216 230
217 /* Verify the tag checksum */ 231 /* Verify the tag checksum */
218 checksum = 0U; 232 if (udf_tag_checksum(tag_p) != tag_p->tagChecksum) {
219 for (i = 0; i < 4; i++)
220 checksum += (uint8_t)(bh->b_data[i]);
221 for (i = 5; i < 16; i++)
222 checksum += (uint8_t)(bh->b_data[i]);
223 if (checksum != tag_p->tagChecksum) {
224 printk(KERN_ERR "udf: tag checksum failed block %d\n", block); 233 printk(KERN_ERR "udf: tag checksum failed block %d\n", block);
225 goto error_out; 234 goto error_out;
226 } 235 }
227 236
228 /* Verify the tag version */ 237 /* Verify the tag version */
229 if (le16_to_cpu(tag_p->descVersion) != 0x0002U && 238 if (tag_p->descVersion != cpu_to_le16(0x0002U) &&
230 le16_to_cpu(tag_p->descVersion) != 0x0003U) { 239 tag_p->descVersion != cpu_to_le16(0x0003U)) {
231 udf_debug("tag version 0x%04x != 0x0002 || 0x0003 block %d\n", 240 udf_debug("tag version 0x%04x != 0x0002 || 0x0003 block %d\n",
232 le16_to_cpu(tag_p->descVersion), block); 241 le16_to_cpu(tag_p->descVersion), block);
233 goto error_out; 242 goto error_out;
@@ -236,11 +245,11 @@ struct buffer_head *udf_read_tagged(struct super_block *sb, uint32_t block,
236 /* Verify the descriptor CRC */ 245 /* Verify the descriptor CRC */
237 if (le16_to_cpu(tag_p->descCRCLength) + sizeof(tag) > sb->s_blocksize || 246 if (le16_to_cpu(tag_p->descCRCLength) + sizeof(tag) > sb->s_blocksize ||
238 le16_to_cpu(tag_p->descCRC) == udf_crc(bh->b_data + sizeof(tag), 247 le16_to_cpu(tag_p->descCRC) == udf_crc(bh->b_data + sizeof(tag),
239 le16_to_cpu(tag_p->descCRCLength), 0)) { 248 le16_to_cpu(tag_p->descCRCLength), 0))
240 return bh; 249 return bh;
241 } 250
242 udf_debug("Crc failure block %d: crc = %d, crclen = %d\n", 251 udf_debug("Crc failure block %d: crc = %d, crclen = %d\n",
243 block + UDF_SB_SESSION(sb), le16_to_cpu(tag_p->descCRC), 252 block + sbi->s_session, le16_to_cpu(tag_p->descCRC),
244 le16_to_cpu(tag_p->descCRCLength)); 253 le16_to_cpu(tag_p->descCRCLength));
245 254
246error_out: 255error_out:
@@ -249,7 +258,7 @@ error_out:
249} 258}
250 259
251struct buffer_head *udf_read_ptagged(struct super_block *sb, kernel_lb_addr loc, 260struct buffer_head *udf_read_ptagged(struct super_block *sb, kernel_lb_addr loc,
252 uint32_t offset, uint16_t * ident) 261 uint32_t offset, uint16_t *ident)
253{ 262{
254 return udf_read_tagged(sb, udf_get_lb_pblock(sb, loc, offset), 263 return udf_read_tagged(sb, udf_get_lb_pblock(sb, loc, offset),
255 loc.logicalBlockNum + offset, ident); 264 loc.logicalBlockNum + offset, ident);
@@ -258,17 +267,11 @@ struct buffer_head *udf_read_ptagged(struct super_block *sb, kernel_lb_addr loc,
258void udf_update_tag(char *data, int length) 267void udf_update_tag(char *data, int length)
259{ 268{
260 tag *tptr = (tag *)data; 269 tag *tptr = (tag *)data;
261 int i;
262
263 length -= sizeof(tag); 270 length -= sizeof(tag);
264 271
265 tptr->tagChecksum = 0;
266 tptr->descCRCLength = cpu_to_le16(length); 272 tptr->descCRCLength = cpu_to_le16(length);
267 tptr->descCRC = cpu_to_le16(udf_crc(data + sizeof(tag), length, 0)); 273 tptr->descCRC = cpu_to_le16(udf_crc(data + sizeof(tag), length, 0));
268 274 tptr->tagChecksum = udf_tag_checksum(tptr);
269 for (i = 0; i < 16; i++)
270 if (i != 4)
271 tptr->tagChecksum += (uint8_t)(data[i]);
272} 275}
273 276
274void udf_new_tag(char *data, uint16_t ident, uint16_t version, uint16_t snum, 277void udf_new_tag(char *data, uint16_t ident, uint16_t version, uint16_t snum,
@@ -281,3 +284,14 @@ void udf_new_tag(char *data, uint16_t ident, uint16_t version, uint16_t snum,
281 tptr->tagLocation = cpu_to_le32(loc); 284 tptr->tagLocation = cpu_to_le32(loc);
282 udf_update_tag(data, length); 285 udf_update_tag(data, length);
283} 286}
287
288u8 udf_tag_checksum(const tag *t)
289{
290 u8 *data = (u8 *)t;
291 u8 checksum = 0;
292 int i;
293 for (i = 0; i < sizeof(tag); ++i)
294 if (i != 4) /* position of checksum */
295 checksum += data[i];
296 return checksum;
297}
diff --git a/fs/udf/namei.c b/fs/udf/namei.c
index bec96a6b3343..112a5fb0b27b 100644
--- a/fs/udf/namei.c
+++ b/fs/udf/namei.c
@@ -43,12 +43,10 @@ static inline int udf_match(int len1, const char *name1, int len2,
43 43
44int udf_write_fi(struct inode *inode, struct fileIdentDesc *cfi, 44int udf_write_fi(struct inode *inode, struct fileIdentDesc *cfi,
45 struct fileIdentDesc *sfi, struct udf_fileident_bh *fibh, 45 struct fileIdentDesc *sfi, struct udf_fileident_bh *fibh,
46 uint8_t * impuse, uint8_t * fileident) 46 uint8_t *impuse, uint8_t *fileident)
47{ 47{
48 uint16_t crclen = fibh->eoffset - fibh->soffset - sizeof(tag); 48 uint16_t crclen = fibh->eoffset - fibh->soffset - sizeof(tag);
49 uint16_t crc; 49 uint16_t crc;
50 uint8_t checksum = 0;
51 int i;
52 int offset; 50 int offset;
53 uint16_t liu = le16_to_cpu(cfi->lengthOfImpUse); 51 uint16_t liu = le16_to_cpu(cfi->lengthOfImpUse);
54 uint8_t lfi = cfi->lengthFileIdent; 52 uint8_t lfi = cfi->lengthFileIdent;
@@ -56,7 +54,7 @@ int udf_write_fi(struct inode *inode, struct fileIdentDesc *cfi,
56 sizeof(struct fileIdentDesc); 54 sizeof(struct fileIdentDesc);
57 int adinicb = 0; 55 int adinicb = 0;
58 56
59 if (UDF_I_ALLOCTYPE(inode) == ICBTAG_FLAG_AD_IN_ICB) 57 if (UDF_I(inode)->i_alloc_type == ICBTAG_FLAG_AD_IN_ICB)
60 adinicb = 1; 58 adinicb = 1;
61 59
62 offset = fibh->soffset + sizeof(struct fileIdentDesc); 60 offset = fibh->soffset + sizeof(struct fileIdentDesc);
@@ -68,7 +66,8 @@ int udf_write_fi(struct inode *inode, struct fileIdentDesc *cfi,
68 memcpy(fibh->ebh->b_data + offset, impuse, liu); 66 memcpy(fibh->ebh->b_data + offset, impuse, liu);
69 } else { 67 } else {
70 memcpy((uint8_t *)sfi->impUse, impuse, -offset); 68 memcpy((uint8_t *)sfi->impUse, impuse, -offset);
71 memcpy(fibh->ebh->b_data, impuse - offset, liu + offset); 69 memcpy(fibh->ebh->b_data, impuse - offset,
70 liu + offset);
72 } 71 }
73 } 72 }
74 73
@@ -80,8 +79,10 @@ int udf_write_fi(struct inode *inode, struct fileIdentDesc *cfi,
80 } else if (offset >= 0) { 79 } else if (offset >= 0) {
81 memcpy(fibh->ebh->b_data + offset, fileident, lfi); 80 memcpy(fibh->ebh->b_data + offset, fileident, lfi);
82 } else { 81 } else {
83 memcpy((uint8_t *)sfi->fileIdent + liu, fileident, -offset); 82 memcpy((uint8_t *)sfi->fileIdent + liu, fileident,
84 memcpy(fibh->ebh->b_data, fileident - offset, lfi + offset); 83 -offset);
84 memcpy(fibh->ebh->b_data, fileident - offset,
85 lfi + offset);
85 } 86 }
86 } 87 }
87 88
@@ -101,27 +102,29 @@ int udf_write_fi(struct inode *inode, struct fileIdentDesc *cfi,
101 102
102 if (fibh->sbh == fibh->ebh) { 103 if (fibh->sbh == fibh->ebh) {
103 crc = udf_crc((uint8_t *)sfi->impUse, 104 crc = udf_crc((uint8_t *)sfi->impUse,
104 crclen + sizeof(tag) - sizeof(struct fileIdentDesc), crc); 105 crclen + sizeof(tag) -
106 sizeof(struct fileIdentDesc), crc);
105 } else if (sizeof(struct fileIdentDesc) >= -fibh->soffset) { 107 } else if (sizeof(struct fileIdentDesc) >= -fibh->soffset) {
106 crc = udf_crc(fibh->ebh->b_data + sizeof(struct fileIdentDesc) + fibh->soffset, 108 crc = udf_crc(fibh->ebh->b_data +
107 crclen + sizeof(tag) - sizeof(struct fileIdentDesc), crc); 109 sizeof(struct fileIdentDesc) +
110 fibh->soffset,
111 crclen + sizeof(tag) -
112 sizeof(struct fileIdentDesc),
113 crc);
108 } else { 114 } else {
109 crc = udf_crc((uint8_t *)sfi->impUse, 115 crc = udf_crc((uint8_t *)sfi->impUse,
110 -fibh->soffset - sizeof(struct fileIdentDesc), crc); 116 -fibh->soffset - sizeof(struct fileIdentDesc),
117 crc);
111 crc = udf_crc(fibh->ebh->b_data, fibh->eoffset, crc); 118 crc = udf_crc(fibh->ebh->b_data, fibh->eoffset, crc);
112 } 119 }
113 120
114 cfi->descTag.descCRC = cpu_to_le16(crc); 121 cfi->descTag.descCRC = cpu_to_le16(crc);
115 cfi->descTag.descCRCLength = cpu_to_le16(crclen); 122 cfi->descTag.descCRCLength = cpu_to_le16(crclen);
123 cfi->descTag.tagChecksum = udf_tag_checksum(&cfi->descTag);
116 124
117 for (i = 0; i < 16; i++) {
118 if (i != 4)
119 checksum += ((uint8_t *)&cfi->descTag)[i];
120 }
121
122 cfi->descTag.tagChecksum = checksum;
123 if (adinicb || (sizeof(struct fileIdentDesc) <= -fibh->soffset)) { 125 if (adinicb || (sizeof(struct fileIdentDesc) <= -fibh->soffset)) {
124 memcpy((uint8_t *)sfi, (uint8_t *)cfi, sizeof(struct fileIdentDesc)); 126 memcpy((uint8_t *)sfi, (uint8_t *)cfi,
127 sizeof(struct fileIdentDesc));
125 } else { 128 } else {
126 memcpy((uint8_t *)sfi, (uint8_t *)cfi, -fibh->soffset); 129 memcpy((uint8_t *)sfi, (uint8_t *)cfi, -fibh->soffset);
127 memcpy(fibh->ebh->b_data, (uint8_t *)cfi - fibh->soffset, 130 memcpy(fibh->ebh->b_data, (uint8_t *)cfi - fibh->soffset,
@@ -155,26 +158,28 @@ static struct fileIdentDesc *udf_find_entry(struct inode *dir,
155 uint32_t elen; 158 uint32_t elen;
156 sector_t offset; 159 sector_t offset;
157 struct extent_position epos = {}; 160 struct extent_position epos = {};
161 struct udf_inode_info *dinfo = UDF_I(dir);
158 162
159 size = (udf_ext0_offset(dir) + dir->i_size) >> 2; 163 size = udf_ext0_offset(dir) + dir->i_size;
160 f_pos = (udf_ext0_offset(dir) >> 2); 164 f_pos = udf_ext0_offset(dir);
161 165
162 fibh->soffset = fibh->eoffset = (f_pos & ((dir->i_sb->s_blocksize - 1) >> 2)) << 2; 166 fibh->soffset = fibh->eoffset = f_pos & (dir->i_sb->s_blocksize - 1);
163 if (UDF_I_ALLOCTYPE(dir) == ICBTAG_FLAG_AD_IN_ICB) { 167 if (dinfo->i_alloc_type == ICBTAG_FLAG_AD_IN_ICB)
164 fibh->sbh = fibh->ebh = NULL; 168 fibh->sbh = fibh->ebh = NULL;
165 } else if (inode_bmap(dir, f_pos >> (dir->i_sb->s_blocksize_bits - 2), 169 else if (inode_bmap(dir, f_pos >> dir->i_sb->s_blocksize_bits,
166 &epos, &eloc, &elen, &offset) == (EXT_RECORDED_ALLOCATED >> 30)) { 170 &epos, &eloc, &elen, &offset) ==
171 (EXT_RECORDED_ALLOCATED >> 30)) {
167 block = udf_get_lb_pblock(dir->i_sb, eloc, offset); 172 block = udf_get_lb_pblock(dir->i_sb, eloc, offset);
168 if ((++offset << dir->i_sb->s_blocksize_bits) < elen) { 173 if ((++offset << dir->i_sb->s_blocksize_bits) < elen) {
169 if (UDF_I_ALLOCTYPE(dir) == ICBTAG_FLAG_AD_SHORT) 174 if (dinfo->i_alloc_type == ICBTAG_FLAG_AD_SHORT)
170 epos.offset -= sizeof(short_ad); 175 epos.offset -= sizeof(short_ad);
171 else if (UDF_I_ALLOCTYPE(dir) == ICBTAG_FLAG_AD_LONG) 176 else if (dinfo->i_alloc_type == ICBTAG_FLAG_AD_LONG)
172 epos.offset -= sizeof(long_ad); 177 epos.offset -= sizeof(long_ad);
173 } else { 178 } else
174 offset = 0; 179 offset = 0;
175 }
176 180
177 if (!(fibh->sbh = fibh->ebh = udf_tread(dir->i_sb, block))) { 181 fibh->sbh = fibh->ebh = udf_tread(dir->i_sb, block);
182 if (!fibh->sbh) {
178 brelse(epos.bh); 183 brelse(epos.bh);
179 return NULL; 184 return NULL;
180 } 185 }
@@ -183,7 +188,7 @@ static struct fileIdentDesc *udf_find_entry(struct inode *dir,
183 return NULL; 188 return NULL;
184 } 189 }
185 190
186 while ((f_pos < size)) { 191 while (f_pos < size) {
187 fi = udf_fileident_read(dir, &f_pos, fibh, cfi, &epos, &eloc, 192 fi = udf_fileident_read(dir, &f_pos, fibh, cfi, &epos, &eloc,
188 &elen, &offset); 193 &elen, &offset);
189 if (!fi) { 194 if (!fi) {
@@ -202,14 +207,18 @@ static struct fileIdentDesc *udf_find_entry(struct inode *dir,
202 } else { 207 } else {
203 int poffset; /* Unpaded ending offset */ 208 int poffset; /* Unpaded ending offset */
204 209
205 poffset = fibh->soffset + sizeof(struct fileIdentDesc) + liu + lfi; 210 poffset = fibh->soffset + sizeof(struct fileIdentDesc) +
211 liu + lfi;
206 212
207 if (poffset >= lfi) { 213 if (poffset >= lfi)
208 nameptr = (uint8_t *)(fibh->ebh->b_data + poffset - lfi); 214 nameptr = (uint8_t *)(fibh->ebh->b_data +
209 } else { 215 poffset - lfi);
216 else {
210 nameptr = fname; 217 nameptr = fname;
211 memcpy(nameptr, fi->fileIdent + liu, lfi - poffset); 218 memcpy(nameptr, fi->fileIdent + liu,
212 memcpy(nameptr + lfi - poffset, fibh->ebh->b_data, poffset); 219 lfi - poffset);
220 memcpy(nameptr + lfi - poffset,
221 fibh->ebh->b_data, poffset);
213 } 222 }
214 } 223 }
215 224
@@ -226,11 +235,11 @@ static struct fileIdentDesc *udf_find_entry(struct inode *dir,
226 if (!lfi) 235 if (!lfi)
227 continue; 236 continue;
228 237
229 if ((flen = udf_get_filename(dir->i_sb, nameptr, fname, lfi))) { 238 flen = udf_get_filename(dir->i_sb, nameptr, fname, lfi);
230 if (udf_match(flen, fname, dentry->d_name.len, dentry->d_name.name)) { 239 if (flen && udf_match(flen, fname, dentry->d_name.len,
231 brelse(epos.bh); 240 dentry->d_name.name)) {
232 return fi; 241 brelse(epos.bh);
233 } 242 return fi;
234 } 243 }
235 } 244 }
236 245
@@ -291,16 +300,16 @@ static struct dentry *udf_lookup(struct inode *dir, struct dentry *dentry,
291 if (!strncmp(dentry->d_name.name, ".B=", 3)) { 300 if (!strncmp(dentry->d_name.name, ".B=", 3)) {
292 kernel_lb_addr lb = { 301 kernel_lb_addr lb = {
293 .logicalBlockNum = 0, 302 .logicalBlockNum = 0,
294 .partitionReferenceNum = simple_strtoul(dentry->d_name.name + 3, 303 .partitionReferenceNum =
295 NULL, 0), 304 simple_strtoul(dentry->d_name.name + 3,
305 NULL, 0),
296 }; 306 };
297 inode = udf_iget(dir->i_sb, lb); 307 inode = udf_iget(dir->i_sb, lb);
298 if (!inode) { 308 if (!inode) {
299 unlock_kernel(); 309 unlock_kernel();
300 return ERR_PTR(-EACCES); 310 return ERR_PTR(-EACCES);
301 } 311 }
302 } 312 } else
303 else
304#endif /* UDF_RECOVERY */ 313#endif /* UDF_RECOVERY */
305 314
306 if (udf_find_entry(dir, dentry, &fibh, &cfi)) { 315 if (udf_find_entry(dir, dentry, &fibh, &cfi)) {
@@ -325,14 +334,14 @@ static struct fileIdentDesc *udf_add_entry(struct inode *dir,
325 struct udf_fileident_bh *fibh, 334 struct udf_fileident_bh *fibh,
326 struct fileIdentDesc *cfi, int *err) 335 struct fileIdentDesc *cfi, int *err)
327{ 336{
328 struct super_block *sb; 337 struct super_block *sb = dir->i_sb;
329 struct fileIdentDesc *fi = NULL; 338 struct fileIdentDesc *fi = NULL;
330 char name[UDF_NAME_LEN], fname[UDF_NAME_LEN]; 339 char name[UDF_NAME_LEN], fname[UDF_NAME_LEN];
331 int namelen; 340 int namelen;
332 loff_t f_pos; 341 loff_t f_pos;
333 int flen; 342 int flen;
334 char *nameptr; 343 char *nameptr;
335 loff_t size = (udf_ext0_offset(dir) + dir->i_size) >> 2; 344 loff_t size = udf_ext0_offset(dir) + dir->i_size;
336 int nfidlen; 345 int nfidlen;
337 uint8_t lfi; 346 uint8_t lfi;
338 uint16_t liu; 347 uint16_t liu;
@@ -341,16 +350,16 @@ static struct fileIdentDesc *udf_add_entry(struct inode *dir,
341 uint32_t elen; 350 uint32_t elen;
342 sector_t offset; 351 sector_t offset;
343 struct extent_position epos = {}; 352 struct extent_position epos = {};
344 353 struct udf_inode_info *dinfo;
345 sb = dir->i_sb;
346 354
347 if (dentry) { 355 if (dentry) {
348 if (!dentry->d_name.len) { 356 if (!dentry->d_name.len) {
349 *err = -EINVAL; 357 *err = -EINVAL;
350 return NULL; 358 return NULL;
351 } 359 }
352 if (!(namelen = udf_put_filename(sb, dentry->d_name.name, name, 360 namelen = udf_put_filename(sb, dentry->d_name.name, name,
353 dentry->d_name.len))) { 361 dentry->d_name.len);
362 if (!namelen) {
354 *err = -ENAMETOOLONG; 363 *err = -ENAMETOOLONG;
355 return NULL; 364 return NULL;
356 } 365 }
@@ -360,39 +369,40 @@ static struct fileIdentDesc *udf_add_entry(struct inode *dir,
360 369
361 nfidlen = (sizeof(struct fileIdentDesc) + namelen + 3) & ~3; 370 nfidlen = (sizeof(struct fileIdentDesc) + namelen + 3) & ~3;
362 371
363 f_pos = (udf_ext0_offset(dir) >> 2); 372 f_pos = udf_ext0_offset(dir);
364 373
365 fibh->soffset = fibh->eoffset = (f_pos & ((dir->i_sb->s_blocksize - 1) >> 2)) << 2; 374 fibh->soffset = fibh->eoffset = f_pos & (dir->i_sb->s_blocksize - 1);
366 if (UDF_I_ALLOCTYPE(dir) == ICBTAG_FLAG_AD_IN_ICB) { 375 dinfo = UDF_I(dir);
376 if (dinfo->i_alloc_type == ICBTAG_FLAG_AD_IN_ICB)
367 fibh->sbh = fibh->ebh = NULL; 377 fibh->sbh = fibh->ebh = NULL;
368 } else if (inode_bmap(dir, f_pos >> (dir->i_sb->s_blocksize_bits - 2), 378 else if (inode_bmap(dir, f_pos >> dir->i_sb->s_blocksize_bits,
369 &epos, &eloc, &elen, &offset) == (EXT_RECORDED_ALLOCATED >> 30)) { 379 &epos, &eloc, &elen, &offset) ==
380 (EXT_RECORDED_ALLOCATED >> 30)) {
370 block = udf_get_lb_pblock(dir->i_sb, eloc, offset); 381 block = udf_get_lb_pblock(dir->i_sb, eloc, offset);
371 if ((++offset << dir->i_sb->s_blocksize_bits) < elen) { 382 if ((++offset << dir->i_sb->s_blocksize_bits) < elen) {
372 if (UDF_I_ALLOCTYPE(dir) == ICBTAG_FLAG_AD_SHORT) 383 if (dinfo->i_alloc_type == ICBTAG_FLAG_AD_SHORT)
373 epos.offset -= sizeof(short_ad); 384 epos.offset -= sizeof(short_ad);
374 else if (UDF_I_ALLOCTYPE(dir) == ICBTAG_FLAG_AD_LONG) 385 else if (dinfo->i_alloc_type == ICBTAG_FLAG_AD_LONG)
375 epos.offset -= sizeof(long_ad); 386 epos.offset -= sizeof(long_ad);
376 } else { 387 } else
377 offset = 0; 388 offset = 0;
378 }
379 389
380 if (!(fibh->sbh = fibh->ebh = udf_tread(dir->i_sb, block))) { 390 fibh->sbh = fibh->ebh = udf_tread(dir->i_sb, block);
391 if (!fibh->sbh) {
381 brelse(epos.bh); 392 brelse(epos.bh);
382 *err = -EIO; 393 *err = -EIO;
383 return NULL; 394 return NULL;
384 } 395 }
385 396
386 block = UDF_I_LOCATION(dir).logicalBlockNum; 397 block = dinfo->i_location.logicalBlockNum;
387
388 } else { 398 } else {
389 block = udf_get_lb_pblock(dir->i_sb, UDF_I_LOCATION(dir), 0); 399 block = udf_get_lb_pblock(dir->i_sb, dinfo->i_location, 0);
390 fibh->sbh = fibh->ebh = NULL; 400 fibh->sbh = fibh->ebh = NULL;
391 fibh->soffset = fibh->eoffset = sb->s_blocksize; 401 fibh->soffset = fibh->eoffset = sb->s_blocksize;
392 goto add; 402 goto add;
393 } 403 }
394 404
395 while ((f_pos < size)) { 405 while (f_pos < size) {
396 fi = udf_fileident_read(dir, &f_pos, fibh, cfi, &epos, &eloc, 406 fi = udf_fileident_read(dir, &f_pos, fibh, cfi, &epos, &eloc,
397 &elen, &offset); 407 &elen, &offset);
398 408
@@ -408,33 +418,39 @@ static struct fileIdentDesc *udf_add_entry(struct inode *dir,
408 liu = le16_to_cpu(cfi->lengthOfImpUse); 418 liu = le16_to_cpu(cfi->lengthOfImpUse);
409 lfi = cfi->lengthFileIdent; 419 lfi = cfi->lengthFileIdent;
410 420
411 if (fibh->sbh == fibh->ebh) { 421 if (fibh->sbh == fibh->ebh)
412 nameptr = fi->fileIdent + liu; 422 nameptr = fi->fileIdent + liu;
413 } else { 423 else {
414 int poffset; /* Unpaded ending offset */ 424 int poffset; /* Unpaded ending offset */
415 425
416 poffset = fibh->soffset + sizeof(struct fileIdentDesc) + liu + lfi; 426 poffset = fibh->soffset + sizeof(struct fileIdentDesc) +
427 liu + lfi;
417 428
418 if (poffset >= lfi) { 429 if (poffset >= lfi)
419 nameptr = (char *)(fibh->ebh->b_data + poffset - lfi); 430 nameptr = (char *)(fibh->ebh->b_data +
420 } else { 431 poffset - lfi);
432 else {
421 nameptr = fname; 433 nameptr = fname;
422 memcpy(nameptr, fi->fileIdent + liu, lfi - poffset); 434 memcpy(nameptr, fi->fileIdent + liu,
423 memcpy(nameptr + lfi - poffset, fibh->ebh->b_data, poffset); 435 lfi - poffset);
436 memcpy(nameptr + lfi - poffset,
437 fibh->ebh->b_data, poffset);
424 } 438 }
425 } 439 }
426 440
427 if ((cfi->fileCharacteristics & FID_FILE_CHAR_DELETED) != 0) { 441 if ((cfi->fileCharacteristics & FID_FILE_CHAR_DELETED) != 0) {
428 if (((sizeof(struct fileIdentDesc) + liu + lfi + 3) & ~3) == nfidlen) { 442 if (((sizeof(struct fileIdentDesc) +
443 liu + lfi + 3) & ~3) == nfidlen) {
429 brelse(epos.bh); 444 brelse(epos.bh);
430 cfi->descTag.tagSerialNum = cpu_to_le16(1); 445 cfi->descTag.tagSerialNum = cpu_to_le16(1);
431 cfi->fileVersionNum = cpu_to_le16(1); 446 cfi->fileVersionNum = cpu_to_le16(1);
432 cfi->fileCharacteristics = 0; 447 cfi->fileCharacteristics = 0;
433 cfi->lengthFileIdent = namelen; 448 cfi->lengthFileIdent = namelen;
434 cfi->lengthOfImpUse = cpu_to_le16(0); 449 cfi->lengthOfImpUse = cpu_to_le16(0);
435 if (!udf_write_fi(dir, cfi, fi, fibh, NULL, name)) { 450 if (!udf_write_fi(dir, cfi, fi, fibh, NULL,
451 name))
436 return fi; 452 return fi;
437 } else { 453 else {
438 *err = -EIO; 454 *err = -EIO;
439 return NULL; 455 return NULL;
440 } 456 }
@@ -444,8 +460,9 @@ static struct fileIdentDesc *udf_add_entry(struct inode *dir,
444 if (!lfi || !dentry) 460 if (!lfi || !dentry)
445 continue; 461 continue;
446 462
447 if ((flen = udf_get_filename(dir->i_sb, nameptr, fname, lfi)) && 463 flen = udf_get_filename(dir->i_sb, nameptr, fname, lfi);
448 udf_match(flen, fname, dentry->d_name.len, dentry->d_name.name)) { 464 if (flen && udf_match(flen, fname, dentry->d_name.len,
465 dentry->d_name.name)) {
449 if (fibh->sbh != fibh->ebh) 466 if (fibh->sbh != fibh->ebh)
450 brelse(fibh->ebh); 467 brelse(fibh->ebh);
451 brelse(fibh->sbh); 468 brelse(fibh->sbh);
@@ -456,29 +473,34 @@ static struct fileIdentDesc *udf_add_entry(struct inode *dir,
456 } 473 }
457 474
458add: 475add:
476 if (dinfo->i_alloc_type != ICBTAG_FLAG_AD_IN_ICB) {
477 elen = (elen + sb->s_blocksize - 1) & ~(sb->s_blocksize - 1);
478 if (dinfo->i_alloc_type == ICBTAG_FLAG_AD_SHORT)
479 epos.offset -= sizeof(short_ad);
480 else if (dinfo->i_alloc_type == ICBTAG_FLAG_AD_LONG)
481 epos.offset -= sizeof(long_ad);
482 udf_write_aext(dir, &epos, eloc, elen, 1);
483 }
459 f_pos += nfidlen; 484 f_pos += nfidlen;
460 485
461 if (UDF_I_ALLOCTYPE(dir) == ICBTAG_FLAG_AD_IN_ICB && 486 if (dinfo->i_alloc_type == ICBTAG_FLAG_AD_IN_ICB &&
462 sb->s_blocksize - fibh->eoffset < nfidlen) { 487 sb->s_blocksize - fibh->eoffset < nfidlen) {
463 brelse(epos.bh); 488 brelse(epos.bh);
464 epos.bh = NULL; 489 epos.bh = NULL;
465 fibh->soffset -= udf_ext0_offset(dir); 490 fibh->soffset -= udf_ext0_offset(dir);
466 fibh->eoffset -= udf_ext0_offset(dir); 491 fibh->eoffset -= udf_ext0_offset(dir);
467 f_pos -= (udf_ext0_offset(dir) >> 2); 492 f_pos -= udf_ext0_offset(dir);
468 if (fibh->sbh != fibh->ebh) 493 if (fibh->sbh != fibh->ebh)
469 brelse(fibh->ebh); 494 brelse(fibh->ebh);
470 brelse(fibh->sbh); 495 brelse(fibh->sbh);
471 if (!(fibh->sbh = fibh->ebh = udf_expand_dir_adinicb(dir, &block, err))) 496 fibh->sbh = fibh->ebh =
497 udf_expand_dir_adinicb(dir, &block, err);
498 if (!fibh->sbh)
472 return NULL; 499 return NULL;
473 epos.block = UDF_I_LOCATION(dir); 500 epos.block = dinfo->i_location;
474 eloc.logicalBlockNum = block;
475 eloc.partitionReferenceNum = UDF_I_LOCATION(dir).partitionReferenceNum;
476 elen = dir->i_sb->s_blocksize;
477 epos.offset = udf_file_entry_alloc_offset(dir); 501 epos.offset = udf_file_entry_alloc_offset(dir);
478 if (UDF_I_ALLOCTYPE(dir) == ICBTAG_FLAG_AD_SHORT) 502 /* Load extent udf_expand_dir_adinicb() has created */
479 epos.offset += sizeof(short_ad); 503 udf_current_aext(dir, &epos, &eloc, &elen, 1);
480 else if (UDF_I_ALLOCTYPE(dir) == ICBTAG_FLAG_AD_LONG)
481 epos.offset += sizeof(long_ad);
482 } 504 }
483 505
484 if (sb->s_blocksize - fibh->eoffset >= nfidlen) { 506 if (sb->s_blocksize - fibh->eoffset >= nfidlen) {
@@ -489,15 +511,19 @@ add:
489 fibh->sbh = fibh->ebh; 511 fibh->sbh = fibh->ebh;
490 } 512 }
491 513
492 if (UDF_I_ALLOCTYPE(dir) == ICBTAG_FLAG_AD_IN_ICB) { 514 if (dinfo->i_alloc_type == ICBTAG_FLAG_AD_IN_ICB) {
493 block = UDF_I_LOCATION(dir).logicalBlockNum; 515 block = dinfo->i_location.logicalBlockNum;
494 fi = (struct fileIdentDesc *)(UDF_I_DATA(dir) + fibh->soffset - 516 fi = (struct fileIdentDesc *)
495 udf_ext0_offset(dir) + 517 (dinfo->i_ext.i_data +
496 UDF_I_LENEATTR(dir)); 518 fibh->soffset -
519 udf_ext0_offset(dir) +
520 dinfo->i_lenEAttr);
497 } else { 521 } else {
498 block = eloc.logicalBlockNum + ((elen - 1) >> 522 block = eloc.logicalBlockNum +
499 dir->i_sb->s_blocksize_bits); 523 ((elen - 1) >>
500 fi = (struct fileIdentDesc *)(fibh->sbh->b_data + fibh->soffset); 524 dir->i_sb->s_blocksize_bits);
525 fi = (struct fileIdentDesc *)
526 (fibh->sbh->b_data + fibh->soffset);
501 } 527 }
502 } else { 528 } else {
503 fibh->soffset = fibh->eoffset - sb->s_blocksize; 529 fibh->soffset = fibh->eoffset - sb->s_blocksize;
@@ -509,7 +535,8 @@ add:
509 535
510 block = eloc.logicalBlockNum + ((elen - 1) >> 536 block = eloc.logicalBlockNum + ((elen - 1) >>
511 dir->i_sb->s_blocksize_bits); 537 dir->i_sb->s_blocksize_bits);
512 fibh->ebh = udf_bread(dir, f_pos >> (dir->i_sb->s_blocksize_bits - 2), 1, err); 538 fibh->ebh = udf_bread(dir,
539 f_pos >> dir->i_sb->s_blocksize_bits, 1, err);
513 if (!fibh->ebh) { 540 if (!fibh->ebh) {
514 brelse(epos.bh); 541 brelse(epos.bh);
515 brelse(fibh->sbh); 542 brelse(fibh->sbh);
@@ -521,32 +548,34 @@ add:
521 (EXT_RECORDED_ALLOCATED >> 30)) { 548 (EXT_RECORDED_ALLOCATED >> 30)) {
522 block = eloc.logicalBlockNum + ((elen - 1) >> 549 block = eloc.logicalBlockNum + ((elen - 1) >>
523 dir->i_sb->s_blocksize_bits); 550 dir->i_sb->s_blocksize_bits);
524 } else { 551 } else
525 block++; 552 block++;
526 }
527 553
528 brelse(fibh->sbh); 554 brelse(fibh->sbh);
529 fibh->sbh = fibh->ebh; 555 fibh->sbh = fibh->ebh;
530 fi = (struct fileIdentDesc *)(fibh->sbh->b_data); 556 fi = (struct fileIdentDesc *)(fibh->sbh->b_data);
531 } else { 557 } else {
532 fi = (struct fileIdentDesc *) 558 fi = (struct fileIdentDesc *)
533 (fibh->sbh->b_data + sb->s_blocksize + fibh->soffset); 559 (fibh->sbh->b_data + sb->s_blocksize +
560 fibh->soffset);
534 } 561 }
535 } 562 }
536 563
537 memset(cfi, 0, sizeof(struct fileIdentDesc)); 564 memset(cfi, 0, sizeof(struct fileIdentDesc));
538 if (UDF_SB_UDFREV(sb) >= 0x0200) 565 if (UDF_SB(sb)->s_udfrev >= 0x0200)
539 udf_new_tag((char *)cfi, TAG_IDENT_FID, 3, 1, block, sizeof(tag)); 566 udf_new_tag((char *)cfi, TAG_IDENT_FID, 3, 1, block,
567 sizeof(tag));
540 else 568 else
541 udf_new_tag((char *)cfi, TAG_IDENT_FID, 2, 1, block, sizeof(tag)); 569 udf_new_tag((char *)cfi, TAG_IDENT_FID, 2, 1, block,
570 sizeof(tag));
542 cfi->fileVersionNum = cpu_to_le16(1); 571 cfi->fileVersionNum = cpu_to_le16(1);
543 cfi->lengthFileIdent = namelen; 572 cfi->lengthFileIdent = namelen;
544 cfi->lengthOfImpUse = cpu_to_le16(0); 573 cfi->lengthOfImpUse = cpu_to_le16(0);
545 if (!udf_write_fi(dir, cfi, fi, fibh, NULL, name)) { 574 if (!udf_write_fi(dir, cfi, fi, fibh, NULL, name)) {
546 brelse(epos.bh); 575 brelse(epos.bh);
547 dir->i_size += nfidlen; 576 dir->i_size += nfidlen;
548 if (UDF_I_ALLOCTYPE(dir) == ICBTAG_FLAG_AD_IN_ICB) 577 if (dinfo->i_alloc_type == ICBTAG_FLAG_AD_IN_ICB)
549 UDF_I_LENALLOC(dir) += nfidlen; 578 dinfo->i_lenAlloc += nfidlen;
550 mark_inode_dirty(dir); 579 mark_inode_dirty(dir);
551 return fi; 580 return fi;
552 } else { 581 } else {
@@ -578,6 +607,7 @@ static int udf_create(struct inode *dir, struct dentry *dentry, int mode,
578 struct inode *inode; 607 struct inode *inode;
579 struct fileIdentDesc cfi, *fi; 608 struct fileIdentDesc cfi, *fi;
580 int err; 609 int err;
610 struct udf_inode_info *iinfo;
581 611
582 lock_kernel(); 612 lock_kernel();
583 inode = udf_new_inode(dir, mode, &err); 613 inode = udf_new_inode(dir, mode, &err);
@@ -586,7 +616,8 @@ static int udf_create(struct inode *dir, struct dentry *dentry, int mode,
586 return err; 616 return err;
587 } 617 }
588 618
589 if (UDF_I_ALLOCTYPE(inode) == ICBTAG_FLAG_AD_IN_ICB) 619 iinfo = UDF_I(inode);
620 if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_IN_ICB)
590 inode->i_data.a_ops = &udf_adinicb_aops; 621 inode->i_data.a_ops = &udf_adinicb_aops;
591 else 622 else
592 inode->i_data.a_ops = &udf_aops; 623 inode->i_data.a_ops = &udf_aops;
@@ -595,7 +626,8 @@ static int udf_create(struct inode *dir, struct dentry *dentry, int mode,
595 inode->i_mode = mode; 626 inode->i_mode = mode;
596 mark_inode_dirty(inode); 627 mark_inode_dirty(inode);
597 628
598 if (!(fi = udf_add_entry(dir, dentry, &fibh, &cfi, &err))) { 629 fi = udf_add_entry(dir, dentry, &fibh, &cfi, &err);
630 if (!fi) {
599 inode->i_nlink--; 631 inode->i_nlink--;
600 mark_inode_dirty(inode); 632 mark_inode_dirty(inode);
601 iput(inode); 633 iput(inode);
@@ -603,13 +635,12 @@ static int udf_create(struct inode *dir, struct dentry *dentry, int mode,
603 return err; 635 return err;
604 } 636 }
605 cfi.icb.extLength = cpu_to_le32(inode->i_sb->s_blocksize); 637 cfi.icb.extLength = cpu_to_le32(inode->i_sb->s_blocksize);
606 cfi.icb.extLocation = cpu_to_lelb(UDF_I_LOCATION(inode)); 638 cfi.icb.extLocation = cpu_to_lelb(iinfo->i_location);
607 *(__le32 *)((struct allocDescImpUse *)cfi.icb.impUse)->impUse = 639 *(__le32 *)((struct allocDescImpUse *)cfi.icb.impUse)->impUse =
608 cpu_to_le32(UDF_I_UNIQUE(inode) & 0x00000000FFFFFFFFUL); 640 cpu_to_le32(iinfo->i_unique & 0x00000000FFFFFFFFUL);
609 udf_write_fi(dir, &cfi, fi, &fibh, NULL, NULL); 641 udf_write_fi(dir, &cfi, fi, &fibh, NULL, NULL);
610 if (UDF_I_ALLOCTYPE(dir) == ICBTAG_FLAG_AD_IN_ICB) { 642 if (UDF_I(dir)->i_alloc_type == ICBTAG_FLAG_AD_IN_ICB)
611 mark_inode_dirty(dir); 643 mark_inode_dirty(dir);
612 }
613 if (fibh.sbh != fibh.ebh) 644 if (fibh.sbh != fibh.ebh)
614 brelse(fibh.ebh); 645 brelse(fibh.ebh);
615 brelse(fibh.sbh); 646 brelse(fibh.sbh);
@@ -626,6 +657,7 @@ static int udf_mknod(struct inode *dir, struct dentry *dentry, int mode,
626 struct udf_fileident_bh fibh; 657 struct udf_fileident_bh fibh;
627 struct fileIdentDesc cfi, *fi; 658 struct fileIdentDesc cfi, *fi;
628 int err; 659 int err;
660 struct udf_inode_info *iinfo;
629 661
630 if (!old_valid_dev(rdev)) 662 if (!old_valid_dev(rdev))
631 return -EINVAL; 663 return -EINVAL;
@@ -636,9 +668,11 @@ static int udf_mknod(struct inode *dir, struct dentry *dentry, int mode,
636 if (!inode) 668 if (!inode)
637 goto out; 669 goto out;
638 670
671 iinfo = UDF_I(inode);
639 inode->i_uid = current->fsuid; 672 inode->i_uid = current->fsuid;
640 init_special_inode(inode, mode, rdev); 673 init_special_inode(inode, mode, rdev);
641 if (!(fi = udf_add_entry(dir, dentry, &fibh, &cfi, &err))) { 674 fi = udf_add_entry(dir, dentry, &fibh, &cfi, &err);
675 if (!fi) {
642 inode->i_nlink--; 676 inode->i_nlink--;
643 mark_inode_dirty(inode); 677 mark_inode_dirty(inode);
644 iput(inode); 678 iput(inode);
@@ -646,13 +680,12 @@ static int udf_mknod(struct inode *dir, struct dentry *dentry, int mode,
646 return err; 680 return err;
647 } 681 }
648 cfi.icb.extLength = cpu_to_le32(inode->i_sb->s_blocksize); 682 cfi.icb.extLength = cpu_to_le32(inode->i_sb->s_blocksize);
649 cfi.icb.extLocation = cpu_to_lelb(UDF_I_LOCATION(inode)); 683 cfi.icb.extLocation = cpu_to_lelb(iinfo->i_location);
650 *(__le32 *)((struct allocDescImpUse *)cfi.icb.impUse)->impUse = 684 *(__le32 *)((struct allocDescImpUse *)cfi.icb.impUse)->impUse =
651 cpu_to_le32(UDF_I_UNIQUE(inode) & 0x00000000FFFFFFFFUL); 685 cpu_to_le32(iinfo->i_unique & 0x00000000FFFFFFFFUL);
652 udf_write_fi(dir, &cfi, fi, &fibh, NULL, NULL); 686 udf_write_fi(dir, &cfi, fi, &fibh, NULL, NULL);
653 if (UDF_I_ALLOCTYPE(dir) == ICBTAG_FLAG_AD_IN_ICB) { 687 if (UDF_I(dir)->i_alloc_type == ICBTAG_FLAG_AD_IN_ICB)
654 mark_inode_dirty(dir); 688 mark_inode_dirty(dir);
655 }
656 mark_inode_dirty(inode); 689 mark_inode_dirty(inode);
657 690
658 if (fibh.sbh != fibh.ebh) 691 if (fibh.sbh != fibh.ebh)
@@ -672,6 +705,8 @@ static int udf_mkdir(struct inode *dir, struct dentry *dentry, int mode)
672 struct udf_fileident_bh fibh; 705 struct udf_fileident_bh fibh;
673 struct fileIdentDesc cfi, *fi; 706 struct fileIdentDesc cfi, *fi;
674 int err; 707 int err;
708 struct udf_inode_info *dinfo = UDF_I(dir);
709 struct udf_inode_info *iinfo;
675 710
676 lock_kernel(); 711 lock_kernel();
677 err = -EMLINK; 712 err = -EMLINK;
@@ -683,9 +718,11 @@ static int udf_mkdir(struct inode *dir, struct dentry *dentry, int mode)
683 if (!inode) 718 if (!inode)
684 goto out; 719 goto out;
685 720
721 iinfo = UDF_I(inode);
686 inode->i_op = &udf_dir_inode_operations; 722 inode->i_op = &udf_dir_inode_operations;
687 inode->i_fop = &udf_dir_operations; 723 inode->i_fop = &udf_dir_operations;
688 if (!(fi = udf_add_entry(inode, NULL, &fibh, &cfi, &err))) { 724 fi = udf_add_entry(inode, NULL, &fibh, &cfi, &err);
725 if (!fi) {
689 inode->i_nlink--; 726 inode->i_nlink--;
690 mark_inode_dirty(inode); 727 mark_inode_dirty(inode);
691 iput(inode); 728 iput(inode);
@@ -693,10 +730,11 @@ static int udf_mkdir(struct inode *dir, struct dentry *dentry, int mode)
693 } 730 }
694 inode->i_nlink = 2; 731 inode->i_nlink = 2;
695 cfi.icb.extLength = cpu_to_le32(inode->i_sb->s_blocksize); 732 cfi.icb.extLength = cpu_to_le32(inode->i_sb->s_blocksize);
696 cfi.icb.extLocation = cpu_to_lelb(UDF_I_LOCATION(dir)); 733 cfi.icb.extLocation = cpu_to_lelb(dinfo->i_location);
697 *(__le32 *)((struct allocDescImpUse *)cfi.icb.impUse)->impUse = 734 *(__le32 *)((struct allocDescImpUse *)cfi.icb.impUse)->impUse =
698 cpu_to_le32(UDF_I_UNIQUE(dir) & 0x00000000FFFFFFFFUL); 735 cpu_to_le32(dinfo->i_unique & 0x00000000FFFFFFFFUL);
699 cfi.fileCharacteristics = FID_FILE_CHAR_DIRECTORY | FID_FILE_CHAR_PARENT; 736 cfi.fileCharacteristics =
737 FID_FILE_CHAR_DIRECTORY | FID_FILE_CHAR_PARENT;
700 udf_write_fi(inode, &cfi, fi, &fibh, NULL, NULL); 738 udf_write_fi(inode, &cfi, fi, &fibh, NULL, NULL);
701 brelse(fibh.sbh); 739 brelse(fibh.sbh);
702 inode->i_mode = S_IFDIR | mode; 740 inode->i_mode = S_IFDIR | mode;
@@ -704,16 +742,17 @@ static int udf_mkdir(struct inode *dir, struct dentry *dentry, int mode)
704 inode->i_mode |= S_ISGID; 742 inode->i_mode |= S_ISGID;
705 mark_inode_dirty(inode); 743 mark_inode_dirty(inode);
706 744
707 if (!(fi = udf_add_entry(dir, dentry, &fibh, &cfi, &err))) { 745 fi = udf_add_entry(dir, dentry, &fibh, &cfi, &err);
746 if (!fi) {
708 inode->i_nlink = 0; 747 inode->i_nlink = 0;
709 mark_inode_dirty(inode); 748 mark_inode_dirty(inode);
710 iput(inode); 749 iput(inode);
711 goto out; 750 goto out;
712 } 751 }
713 cfi.icb.extLength = cpu_to_le32(inode->i_sb->s_blocksize); 752 cfi.icb.extLength = cpu_to_le32(inode->i_sb->s_blocksize);
714 cfi.icb.extLocation = cpu_to_lelb(UDF_I_LOCATION(inode)); 753 cfi.icb.extLocation = cpu_to_lelb(iinfo->i_location);
715 *(__le32 *)((struct allocDescImpUse *)cfi.icb.impUse)->impUse = 754 *(__le32 *)((struct allocDescImpUse *)cfi.icb.impUse)->impUse =
716 cpu_to_le32(UDF_I_UNIQUE(inode) & 0x00000000FFFFFFFFUL); 755 cpu_to_le32(iinfo->i_unique & 0x00000000FFFFFFFFUL);
717 cfi.fileCharacteristics |= FID_FILE_CHAR_DIRECTORY; 756 cfi.fileCharacteristics |= FID_FILE_CHAR_DIRECTORY;
718 udf_write_fi(dir, &cfi, fi, &fibh, NULL, NULL); 757 udf_write_fi(dir, &cfi, fi, &fibh, NULL, NULL);
719 inc_nlink(dir); 758 inc_nlink(dir);
@@ -734,32 +773,33 @@ static int empty_dir(struct inode *dir)
734 struct fileIdentDesc *fi, cfi; 773 struct fileIdentDesc *fi, cfi;
735 struct udf_fileident_bh fibh; 774 struct udf_fileident_bh fibh;
736 loff_t f_pos; 775 loff_t f_pos;
737 loff_t size = (udf_ext0_offset(dir) + dir->i_size) >> 2; 776 loff_t size = udf_ext0_offset(dir) + dir->i_size;
738 int block; 777 int block;
739 kernel_lb_addr eloc; 778 kernel_lb_addr eloc;
740 uint32_t elen; 779 uint32_t elen;
741 sector_t offset; 780 sector_t offset;
742 struct extent_position epos = {}; 781 struct extent_position epos = {};
782 struct udf_inode_info *dinfo = UDF_I(dir);
743 783
744 f_pos = (udf_ext0_offset(dir) >> 2); 784 f_pos = udf_ext0_offset(dir);
785 fibh.soffset = fibh.eoffset = f_pos & (dir->i_sb->s_blocksize - 1);
745 786
746 fibh.soffset = fibh.eoffset = (f_pos & ((dir->i_sb->s_blocksize - 1) >> 2)) << 2; 787 if (dinfo->i_alloc_type == ICBTAG_FLAG_AD_IN_ICB)
747
748 if (UDF_I_ALLOCTYPE(dir) == ICBTAG_FLAG_AD_IN_ICB) {
749 fibh.sbh = fibh.ebh = NULL; 788 fibh.sbh = fibh.ebh = NULL;
750 } else if (inode_bmap(dir, f_pos >> (dir->i_sb->s_blocksize_bits - 2), 789 else if (inode_bmap(dir, f_pos >> dir->i_sb->s_blocksize_bits,
751 &epos, &eloc, &elen, &offset) == (EXT_RECORDED_ALLOCATED >> 30)) { 790 &epos, &eloc, &elen, &offset) ==
791 (EXT_RECORDED_ALLOCATED >> 30)) {
752 block = udf_get_lb_pblock(dir->i_sb, eloc, offset); 792 block = udf_get_lb_pblock(dir->i_sb, eloc, offset);
753 if ((++offset << dir->i_sb->s_blocksize_bits) < elen) { 793 if ((++offset << dir->i_sb->s_blocksize_bits) < elen) {
754 if (UDF_I_ALLOCTYPE(dir) == ICBTAG_FLAG_AD_SHORT) 794 if (dinfo->i_alloc_type == ICBTAG_FLAG_AD_SHORT)
755 epos.offset -= sizeof(short_ad); 795 epos.offset -= sizeof(short_ad);
756 else if (UDF_I_ALLOCTYPE(dir) == ICBTAG_FLAG_AD_LONG) 796 else if (dinfo->i_alloc_type == ICBTAG_FLAG_AD_LONG)
757 epos.offset -= sizeof(long_ad); 797 epos.offset -= sizeof(long_ad);
758 } else { 798 } else
759 offset = 0; 799 offset = 0;
760 }
761 800
762 if (!(fibh.sbh = fibh.ebh = udf_tread(dir->i_sb, block))) { 801 fibh.sbh = fibh.ebh = udf_tread(dir->i_sb, block);
802 if (!fibh.sbh) {
763 brelse(epos.bh); 803 brelse(epos.bh);
764 return 0; 804 return 0;
765 } 805 }
@@ -768,7 +808,7 @@ static int empty_dir(struct inode *dir)
768 return 0; 808 return 0;
769 } 809 }
770 810
771 while ((f_pos < size)) { 811 while (f_pos < size) {
772 fi = udf_fileident_read(dir, &f_pos, &fibh, &cfi, &epos, &eloc, 812 fi = udf_fileident_read(dir, &f_pos, &fibh, &cfi, &epos, &eloc,
773 &elen, &offset); 813 &elen, &offset);
774 if (!fi) { 814 if (!fi) {
@@ -828,7 +868,8 @@ static int udf_rmdir(struct inode *dir, struct dentry *dentry)
828 clear_nlink(inode); 868 clear_nlink(inode);
829 inode->i_size = 0; 869 inode->i_size = 0;
830 inode_dec_link_count(dir); 870 inode_dec_link_count(dir);
831 inode->i_ctime = dir->i_ctime = dir->i_mtime = current_fs_time(dir->i_sb); 871 inode->i_ctime = dir->i_ctime = dir->i_mtime =
872 current_fs_time(dir->i_sb);
832 mark_inode_dirty(dir); 873 mark_inode_dirty(dir);
833 874
834end_rmdir: 875end_rmdir:
@@ -901,36 +942,42 @@ static int udf_symlink(struct inode *dir, struct dentry *dentry,
901 int block; 942 int block;
902 char name[UDF_NAME_LEN]; 943 char name[UDF_NAME_LEN];
903 int namelen; 944 int namelen;
945 struct buffer_head *bh;
946 struct udf_inode_info *iinfo;
904 947
905 lock_kernel(); 948 lock_kernel();
906 if (!(inode = udf_new_inode(dir, S_IFLNK, &err))) 949 inode = udf_new_inode(dir, S_IFLNK, &err);
950 if (!inode)
907 goto out; 951 goto out;
908 952
953 iinfo = UDF_I(inode);
909 inode->i_mode = S_IFLNK | S_IRWXUGO; 954 inode->i_mode = S_IFLNK | S_IRWXUGO;
910 inode->i_data.a_ops = &udf_symlink_aops; 955 inode->i_data.a_ops = &udf_symlink_aops;
911 inode->i_op = &page_symlink_inode_operations; 956 inode->i_op = &page_symlink_inode_operations;
912 957
913 if (UDF_I_ALLOCTYPE(inode) != ICBTAG_FLAG_AD_IN_ICB) { 958 if (iinfo->i_alloc_type != ICBTAG_FLAG_AD_IN_ICB) {
914 kernel_lb_addr eloc; 959 kernel_lb_addr eloc;
915 uint32_t elen; 960 uint32_t elen;
916 961
917 block = udf_new_block(inode->i_sb, inode, 962 block = udf_new_block(inode->i_sb, inode,
918 UDF_I_LOCATION(inode).partitionReferenceNum, 963 iinfo->i_location.partitionReferenceNum,
919 UDF_I_LOCATION(inode).logicalBlockNum, &err); 964 iinfo->i_location.logicalBlockNum, &err);
920 if (!block) 965 if (!block)
921 goto out_no_entry; 966 goto out_no_entry;
922 epos.block = UDF_I_LOCATION(inode); 967 epos.block = iinfo->i_location;
923 epos.offset = udf_file_entry_alloc_offset(inode); 968 epos.offset = udf_file_entry_alloc_offset(inode);
924 epos.bh = NULL; 969 epos.bh = NULL;
925 eloc.logicalBlockNum = block; 970 eloc.logicalBlockNum = block;
926 eloc.partitionReferenceNum = UDF_I_LOCATION(inode).partitionReferenceNum; 971 eloc.partitionReferenceNum =
972 iinfo->i_location.partitionReferenceNum;
927 elen = inode->i_sb->s_blocksize; 973 elen = inode->i_sb->s_blocksize;
928 UDF_I_LENEXTENTS(inode) = elen; 974 iinfo->i_lenExtents = elen;
929 udf_add_aext(inode, &epos, eloc, elen, 0); 975 udf_add_aext(inode, &epos, eloc, elen, 0);
930 brelse(epos.bh); 976 brelse(epos.bh);
931 977
932 block = udf_get_pblock(inode->i_sb, block, 978 block = udf_get_pblock(inode->i_sb, block,
933 UDF_I_LOCATION(inode).partitionReferenceNum, 0); 979 iinfo->i_location.partitionReferenceNum,
980 0);
934 epos.bh = udf_tread(inode->i_sb, block); 981 epos.bh = udf_tread(inode->i_sb, block);
935 lock_buffer(epos.bh); 982 lock_buffer(epos.bh);
936 memset(epos.bh->b_data, 0x00, inode->i_sb->s_blocksize); 983 memset(epos.bh->b_data, 0x00, inode->i_sb->s_blocksize);
@@ -938,9 +985,8 @@ static int udf_symlink(struct inode *dir, struct dentry *dentry,
938 unlock_buffer(epos.bh); 985 unlock_buffer(epos.bh);
939 mark_buffer_dirty_inode(epos.bh, inode); 986 mark_buffer_dirty_inode(epos.bh, inode);
940 ea = epos.bh->b_data + udf_ext0_offset(inode); 987 ea = epos.bh->b_data + udf_ext0_offset(inode);
941 } else { 988 } else
942 ea = UDF_I_DATA(inode) + UDF_I_LENEATTR(inode); 989 ea = iinfo->i_ext.i_data + iinfo->i_lenEAttr;
943 }
944 990
945 eoffset = inode->i_sb->s_blocksize - udf_ext0_offset(inode); 991 eoffset = inode->i_sb->s_blocksize - udf_ext0_offset(inode);
946 pc = (struct pathComponent *)ea; 992 pc = (struct pathComponent *)ea;
@@ -977,7 +1023,8 @@ static int udf_symlink(struct inode *dir, struct dentry *dentry,
977 if (compstart[0] == '.') { 1023 if (compstart[0] == '.') {
978 if ((symname - compstart) == 1) 1024 if ((symname - compstart) == 1)
979 pc->componentType = 4; 1025 pc->componentType = 4;
980 else if ((symname - compstart) == 2 && compstart[1] == '.') 1026 else if ((symname - compstart) == 2 &&
1027 compstart[1] == '.')
981 pc->componentType = 3; 1028 pc->componentType = 3;
982 } 1029 }
983 1030
@@ -987,7 +1034,8 @@ static int udf_symlink(struct inode *dir, struct dentry *dentry,
987 if (!namelen) 1034 if (!namelen)
988 goto out_no_entry; 1035 goto out_no_entry;
989 1036
990 if (elen + sizeof(struct pathComponent) + namelen > eoffset) 1037 if (elen + sizeof(struct pathComponent) + namelen >
1038 eoffset)
991 goto out_no_entry; 1039 goto out_no_entry;
992 else 1040 else
993 pc->lengthComponentIdent = namelen; 1041 pc->lengthComponentIdent = namelen;
@@ -1006,30 +1054,34 @@ static int udf_symlink(struct inode *dir, struct dentry *dentry,
1006 1054
1007 brelse(epos.bh); 1055 brelse(epos.bh);
1008 inode->i_size = elen; 1056 inode->i_size = elen;
1009 if (UDF_I_ALLOCTYPE(inode) == ICBTAG_FLAG_AD_IN_ICB) 1057 if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_IN_ICB)
1010 UDF_I_LENALLOC(inode) = inode->i_size; 1058 iinfo->i_lenAlloc = inode->i_size;
1011 mark_inode_dirty(inode); 1059 mark_inode_dirty(inode);
1012 1060
1013 if (!(fi = udf_add_entry(dir, dentry, &fibh, &cfi, &err))) 1061 fi = udf_add_entry(dir, dentry, &fibh, &cfi, &err);
1062 if (!fi)
1014 goto out_no_entry; 1063 goto out_no_entry;
1015 cfi.icb.extLength = cpu_to_le32(inode->i_sb->s_blocksize); 1064 cfi.icb.extLength = cpu_to_le32(inode->i_sb->s_blocksize);
1016 cfi.icb.extLocation = cpu_to_lelb(UDF_I_LOCATION(inode)); 1065 cfi.icb.extLocation = cpu_to_lelb(iinfo->i_location);
1017 if (UDF_SB_LVIDBH(inode->i_sb)) { 1066 bh = UDF_SB(inode->i_sb)->s_lvid_bh;
1067 if (bh) {
1068 struct logicalVolIntegrityDesc *lvid =
1069 (struct logicalVolIntegrityDesc *)bh->b_data;
1018 struct logicalVolHeaderDesc *lvhd; 1070 struct logicalVolHeaderDesc *lvhd;
1019 uint64_t uniqueID; 1071 uint64_t uniqueID;
1020 lvhd = (struct logicalVolHeaderDesc *)(UDF_SB_LVID(inode->i_sb)->logicalVolContentsUse); 1072 lvhd = (struct logicalVolHeaderDesc *)
1073 lvid->logicalVolContentsUse;
1021 uniqueID = le64_to_cpu(lvhd->uniqueID); 1074 uniqueID = le64_to_cpu(lvhd->uniqueID);
1022 *(__le32 *)((struct allocDescImpUse *)cfi.icb.impUse)->impUse = 1075 *(__le32 *)((struct allocDescImpUse *)cfi.icb.impUse)->impUse =
1023 cpu_to_le32(uniqueID & 0x00000000FFFFFFFFUL); 1076 cpu_to_le32(uniqueID & 0x00000000FFFFFFFFUL);
1024 if (!(++uniqueID & 0x00000000FFFFFFFFUL)) 1077 if (!(++uniqueID & 0x00000000FFFFFFFFUL))
1025 uniqueID += 16; 1078 uniqueID += 16;
1026 lvhd->uniqueID = cpu_to_le64(uniqueID); 1079 lvhd->uniqueID = cpu_to_le64(uniqueID);
1027 mark_buffer_dirty(UDF_SB_LVIDBH(inode->i_sb)); 1080 mark_buffer_dirty(bh);
1028 } 1081 }
1029 udf_write_fi(dir, &cfi, fi, &fibh, NULL, NULL); 1082 udf_write_fi(dir, &cfi, fi, &fibh, NULL, NULL);
1030 if (UDF_I_ALLOCTYPE(dir) == ICBTAG_FLAG_AD_IN_ICB) { 1083 if (UDF_I(dir)->i_alloc_type == ICBTAG_FLAG_AD_IN_ICB)
1031 mark_inode_dirty(dir); 1084 mark_inode_dirty(dir);
1032 }
1033 if (fibh.sbh != fibh.ebh) 1085 if (fibh.sbh != fibh.ebh)
1034 brelse(fibh.ebh); 1086 brelse(fibh.ebh);
1035 brelse(fibh.sbh); 1087 brelse(fibh.sbh);
@@ -1053,6 +1105,7 @@ static int udf_link(struct dentry *old_dentry, struct inode *dir,
1053 struct udf_fileident_bh fibh; 1105 struct udf_fileident_bh fibh;
1054 struct fileIdentDesc cfi, *fi; 1106 struct fileIdentDesc cfi, *fi;
1055 int err; 1107 int err;
1108 struct buffer_head *bh;
1056 1109
1057 lock_kernel(); 1110 lock_kernel();
1058 if (inode->i_nlink >= (256 << sizeof(inode->i_nlink)) - 1) { 1111 if (inode->i_nlink >= (256 << sizeof(inode->i_nlink)) - 1) {
@@ -1060,28 +1113,32 @@ static int udf_link(struct dentry *old_dentry, struct inode *dir,
1060 return -EMLINK; 1113 return -EMLINK;
1061 } 1114 }
1062 1115
1063 if (!(fi = udf_add_entry(dir, dentry, &fibh, &cfi, &err))) { 1116 fi = udf_add_entry(dir, dentry, &fibh, &cfi, &err);
1117 if (!fi) {
1064 unlock_kernel(); 1118 unlock_kernel();
1065 return err; 1119 return err;
1066 } 1120 }
1067 cfi.icb.extLength = cpu_to_le32(inode->i_sb->s_blocksize); 1121 cfi.icb.extLength = cpu_to_le32(inode->i_sb->s_blocksize);
1068 cfi.icb.extLocation = cpu_to_lelb(UDF_I_LOCATION(inode)); 1122 cfi.icb.extLocation = cpu_to_lelb(UDF_I(inode)->i_location);
1069 if (UDF_SB_LVIDBH(inode->i_sb)) { 1123 bh = UDF_SB(inode->i_sb)->s_lvid_bh;
1124 if (bh) {
1125 struct logicalVolIntegrityDesc *lvid =
1126 (struct logicalVolIntegrityDesc *)bh->b_data;
1070 struct logicalVolHeaderDesc *lvhd; 1127 struct logicalVolHeaderDesc *lvhd;
1071 uint64_t uniqueID; 1128 uint64_t uniqueID;
1072 lvhd = (struct logicalVolHeaderDesc *)(UDF_SB_LVID(inode->i_sb)->logicalVolContentsUse); 1129 lvhd = (struct logicalVolHeaderDesc *)
1130 (lvid->logicalVolContentsUse);
1073 uniqueID = le64_to_cpu(lvhd->uniqueID); 1131 uniqueID = le64_to_cpu(lvhd->uniqueID);
1074 *(__le32 *)((struct allocDescImpUse *)cfi.icb.impUse)->impUse = 1132 *(__le32 *)((struct allocDescImpUse *)cfi.icb.impUse)->impUse =
1075 cpu_to_le32(uniqueID & 0x00000000FFFFFFFFUL); 1133 cpu_to_le32(uniqueID & 0x00000000FFFFFFFFUL);
1076 if (!(++uniqueID & 0x00000000FFFFFFFFUL)) 1134 if (!(++uniqueID & 0x00000000FFFFFFFFUL))
1077 uniqueID += 16; 1135 uniqueID += 16;
1078 lvhd->uniqueID = cpu_to_le64(uniqueID); 1136 lvhd->uniqueID = cpu_to_le64(uniqueID);
1079 mark_buffer_dirty(UDF_SB_LVIDBH(inode->i_sb)); 1137 mark_buffer_dirty(bh);
1080 } 1138 }
1081 udf_write_fi(dir, &cfi, fi, &fibh, NULL, NULL); 1139 udf_write_fi(dir, &cfi, fi, &fibh, NULL, NULL);
1082 if (UDF_I_ALLOCTYPE(dir) == ICBTAG_FLAG_AD_IN_ICB) { 1140 if (UDF_I(dir)->i_alloc_type == ICBTAG_FLAG_AD_IN_ICB)
1083 mark_inode_dirty(dir); 1141 mark_inode_dirty(dir);
1084 }
1085 1142
1086 if (fibh.sbh != fibh.ebh) 1143 if (fibh.sbh != fibh.ebh)
1087 brelse(fibh.ebh); 1144 brelse(fibh.ebh);
@@ -1105,13 +1162,16 @@ static int udf_rename(struct inode *old_dir, struct dentry *old_dentry,
1105 struct inode *old_inode = old_dentry->d_inode; 1162 struct inode *old_inode = old_dentry->d_inode;
1106 struct inode *new_inode = new_dentry->d_inode; 1163 struct inode *new_inode = new_dentry->d_inode;
1107 struct udf_fileident_bh ofibh, nfibh; 1164 struct udf_fileident_bh ofibh, nfibh;
1108 struct fileIdentDesc *ofi = NULL, *nfi = NULL, *dir_fi = NULL, ocfi, ncfi; 1165 struct fileIdentDesc *ofi = NULL, *nfi = NULL, *dir_fi = NULL;
1166 struct fileIdentDesc ocfi, ncfi;
1109 struct buffer_head *dir_bh = NULL; 1167 struct buffer_head *dir_bh = NULL;
1110 int retval = -ENOENT; 1168 int retval = -ENOENT;
1111 kernel_lb_addr tloc; 1169 kernel_lb_addr tloc;
1170 struct udf_inode_info *old_iinfo = UDF_I(old_inode);
1112 1171
1113 lock_kernel(); 1172 lock_kernel();
1114 if ((ofi = udf_find_entry(old_dir, old_dentry, &ofibh, &ocfi))) { 1173 ofi = udf_find_entry(old_dir, old_dentry, &ofibh, &ocfi);
1174 if (ofi) {
1115 if (ofibh.sbh != ofibh.ebh) 1175 if (ofibh.sbh != ofibh.ebh)
1116 brelse(ofibh.ebh); 1176 brelse(ofibh.ebh);
1117 brelse(ofibh.sbh); 1177 brelse(ofibh.sbh);
@@ -1131,7 +1191,7 @@ static int udf_rename(struct inode *old_dir, struct dentry *old_dentry,
1131 } 1191 }
1132 } 1192 }
1133 if (S_ISDIR(old_inode->i_mode)) { 1193 if (S_ISDIR(old_inode->i_mode)) {
1134 uint32_t offset = udf_ext0_offset(old_inode); 1194 int offset = udf_ext0_offset(old_inode);
1135 1195
1136 if (new_inode) { 1196 if (new_inode) {
1137 retval = -ENOTEMPTY; 1197 retval = -ENOTEMPTY;
@@ -1139,30 +1199,36 @@ static int udf_rename(struct inode *old_dir, struct dentry *old_dentry,
1139 goto end_rename; 1199 goto end_rename;
1140 } 1200 }
1141 retval = -EIO; 1201 retval = -EIO;
1142 if (UDF_I_ALLOCTYPE(old_inode) == ICBTAG_FLAG_AD_IN_ICB) { 1202 if (old_iinfo->i_alloc_type == ICBTAG_FLAG_AD_IN_ICB) {
1143 dir_fi = udf_get_fileident(UDF_I_DATA(old_inode) - 1203 dir_fi = udf_get_fileident(
1144 (UDF_I_EFE(old_inode) ? 1204 old_iinfo->i_ext.i_data -
1145 sizeof(struct extendedFileEntry) : 1205 (old_iinfo->i_efe ?
1146 sizeof(struct fileEntry)), 1206 sizeof(struct extendedFileEntry) :
1147 old_inode->i_sb->s_blocksize, &offset); 1207 sizeof(struct fileEntry)),
1208 old_inode->i_sb->s_blocksize, &offset);
1148 } else { 1209 } else {
1149 dir_bh = udf_bread(old_inode, 0, 0, &retval); 1210 dir_bh = udf_bread(old_inode, 0, 0, &retval);
1150 if (!dir_bh) 1211 if (!dir_bh)
1151 goto end_rename; 1212 goto end_rename;
1152 dir_fi = udf_get_fileident(dir_bh->b_data, old_inode->i_sb->s_blocksize, &offset); 1213 dir_fi = udf_get_fileident(dir_bh->b_data,
1214 old_inode->i_sb->s_blocksize, &offset);
1153 } 1215 }
1154 if (!dir_fi) 1216 if (!dir_fi)
1155 goto end_rename; 1217 goto end_rename;
1156 tloc = lelb_to_cpu(dir_fi->icb.extLocation); 1218 tloc = lelb_to_cpu(dir_fi->icb.extLocation);
1157 if (udf_get_lb_pblock(old_inode->i_sb, tloc, 0) != old_dir->i_ino) 1219 if (udf_get_lb_pblock(old_inode->i_sb, tloc, 0) !=
1220 old_dir->i_ino)
1158 goto end_rename; 1221 goto end_rename;
1159 1222
1160 retval = -EMLINK; 1223 retval = -EMLINK;
1161 if (!new_inode && new_dir->i_nlink >= (256 << sizeof(new_dir->i_nlink)) - 1) 1224 if (!new_inode &&
1225 new_dir->i_nlink >=
1226 (256 << sizeof(new_dir->i_nlink)) - 1)
1162 goto end_rename; 1227 goto end_rename;
1163 } 1228 }
1164 if (!nfi) { 1229 if (!nfi) {
1165 nfi = udf_add_entry(new_dir, new_dentry, &nfibh, &ncfi, &retval); 1230 nfi = udf_add_entry(new_dir, new_dentry, &nfibh, &ncfi,
1231 &retval);
1166 if (!nfi) 1232 if (!nfi)
1167 goto end_rename; 1233 goto end_rename;
1168 } 1234 }
@@ -1194,18 +1260,19 @@ static int udf_rename(struct inode *old_dir, struct dentry *old_dentry,
1194 mark_inode_dirty(old_dir); 1260 mark_inode_dirty(old_dir);
1195 1261
1196 if (dir_fi) { 1262 if (dir_fi) {
1197 dir_fi->icb.extLocation = cpu_to_lelb(UDF_I_LOCATION(new_dir)); 1263 dir_fi->icb.extLocation = cpu_to_lelb(UDF_I(new_dir)->i_location);
1198 udf_update_tag((char *)dir_fi, (sizeof(struct fileIdentDesc) + 1264 udf_update_tag((char *)dir_fi,
1199 le16_to_cpu(dir_fi->lengthOfImpUse) + 3) & ~3); 1265 (sizeof(struct fileIdentDesc) +
1200 if (UDF_I_ALLOCTYPE(old_inode) == ICBTAG_FLAG_AD_IN_ICB) { 1266 le16_to_cpu(dir_fi->lengthOfImpUse) + 3) & ~3);
1267 if (old_iinfo->i_alloc_type == ICBTAG_FLAG_AD_IN_ICB)
1201 mark_inode_dirty(old_inode); 1268 mark_inode_dirty(old_inode);
1202 } else { 1269 else
1203 mark_buffer_dirty_inode(dir_bh, old_inode); 1270 mark_buffer_dirty_inode(dir_bh, old_inode);
1204 } 1271
1205 inode_dec_link_count(old_dir); 1272 inode_dec_link_count(old_dir);
1206 if (new_inode) { 1273 if (new_inode)
1207 inode_dec_link_count(new_inode); 1274 inode_dec_link_count(new_inode);
1208 } else { 1275 else {
1209 inc_nlink(new_dir); 1276 inc_nlink(new_dir);
1210 mark_inode_dirty(new_dir); 1277 mark_inode_dirty(new_dir);
1211 } 1278 }
diff --git a/fs/udf/partition.c b/fs/udf/partition.c
index aaab24c8c498..fc533345ab89 100644
--- a/fs/udf/partition.c
+++ b/fs/udf/partition.c
@@ -31,15 +31,18 @@
31inline uint32_t udf_get_pblock(struct super_block *sb, uint32_t block, 31inline uint32_t udf_get_pblock(struct super_block *sb, uint32_t block,
32 uint16_t partition, uint32_t offset) 32 uint16_t partition, uint32_t offset)
33{ 33{
34 if (partition >= UDF_SB_NUMPARTS(sb)) { 34 struct udf_sb_info *sbi = UDF_SB(sb);
35 udf_debug("block=%d, partition=%d, offset=%d: invalid partition\n", 35 struct udf_part_map *map;
36 block, partition, offset); 36 if (partition >= sbi->s_partitions) {
37 udf_debug("block=%d, partition=%d, offset=%d: "
38 "invalid partition\n", block, partition, offset);
37 return 0xFFFFFFFF; 39 return 0xFFFFFFFF;
38 } 40 }
39 if (UDF_SB_PARTFUNC(sb, partition)) 41 map = &sbi->s_partmaps[partition];
40 return UDF_SB_PARTFUNC(sb, partition)(sb, block, partition, offset); 42 if (map->s_partition_func)
43 return map->s_partition_func(sb, block, partition, offset);
41 else 44 else
42 return UDF_SB_PARTROOT(sb, partition) + block + offset; 45 return map->s_partition_root + block + offset;
43} 46}
44 47
45uint32_t udf_get_pblock_virt15(struct super_block *sb, uint32_t block, 48uint32_t udf_get_pblock_virt15(struct super_block *sb, uint32_t block,
@@ -49,12 +52,18 @@ uint32_t udf_get_pblock_virt15(struct super_block *sb, uint32_t block,
49 uint32_t newblock; 52 uint32_t newblock;
50 uint32_t index; 53 uint32_t index;
51 uint32_t loc; 54 uint32_t loc;
55 struct udf_sb_info *sbi = UDF_SB(sb);
56 struct udf_part_map *map;
57 struct udf_virtual_data *vdata;
58 struct udf_inode_info *iinfo;
52 59
53 index = (sb->s_blocksize - UDF_SB_TYPEVIRT(sb,partition).s_start_offset) / sizeof(uint32_t); 60 map = &sbi->s_partmaps[partition];
61 vdata = &map->s_type_specific.s_virtual;
62 index = (sb->s_blocksize - vdata->s_start_offset) / sizeof(uint32_t);
54 63
55 if (block > UDF_SB_TYPEVIRT(sb,partition).s_num_entries) { 64 if (block > vdata->s_num_entries) {
56 udf_debug("Trying to access block beyond end of VAT (%d max %d)\n", 65 udf_debug("Trying to access block beyond end of VAT "
57 block, UDF_SB_TYPEVIRT(sb,partition).s_num_entries); 66 "(%d max %d)\n", block, vdata->s_num_entries);
58 return 0xFFFFFFFF; 67 return 0xFFFFFFFF;
59 } 68 }
60 69
@@ -64,12 +73,13 @@ uint32_t udf_get_pblock_virt15(struct super_block *sb, uint32_t block,
64 index = block % (sb->s_blocksize / sizeof(uint32_t)); 73 index = block % (sb->s_blocksize / sizeof(uint32_t));
65 } else { 74 } else {
66 newblock = 0; 75 newblock = 0;
67 index = UDF_SB_TYPEVIRT(sb,partition).s_start_offset / sizeof(uint32_t) + block; 76 index = vdata->s_start_offset / sizeof(uint32_t) + block;
68 } 77 }
69 78
70 loc = udf_block_map(UDF_SB_VAT(sb), newblock); 79 loc = udf_block_map(sbi->s_vat_inode, newblock);
71 80
72 if (!(bh = sb_bread(sb, loc))) { 81 bh = sb_bread(sb, loc);
82 if (!bh) {
73 udf_debug("get_pblock(UDF_VIRTUAL_MAP:%p,%d,%d) VAT: %d[%d]\n", 83 udf_debug("get_pblock(UDF_VIRTUAL_MAP:%p,%d,%d) VAT: %d[%d]\n",
74 sb, block, partition, loc, index); 84 sb, block, partition, loc, index);
75 return 0xFFFFFFFF; 85 return 0xFFFFFFFF;
@@ -79,50 +89,61 @@ uint32_t udf_get_pblock_virt15(struct super_block *sb, uint32_t block,
79 89
80 brelse(bh); 90 brelse(bh);
81 91
82 if (UDF_I_LOCATION(UDF_SB_VAT(sb)).partitionReferenceNum == partition) { 92 iinfo = UDF_I(sbi->s_vat_inode);
93 if (iinfo->i_location.partitionReferenceNum == partition) {
83 udf_debug("recursive call to udf_get_pblock!\n"); 94 udf_debug("recursive call to udf_get_pblock!\n");
84 return 0xFFFFFFFF; 95 return 0xFFFFFFFF;
85 } 96 }
86 97
87 return udf_get_pblock(sb, loc, 98 return udf_get_pblock(sb, loc,
88 UDF_I_LOCATION(UDF_SB_VAT(sb)).partitionReferenceNum, 99 iinfo->i_location.partitionReferenceNum,
89 offset); 100 offset);
90} 101}
91 102
92inline uint32_t udf_get_pblock_virt20(struct super_block * sb, uint32_t block, 103inline uint32_t udf_get_pblock_virt20(struct super_block *sb, uint32_t block,
93 uint16_t partition, uint32_t offset) 104 uint16_t partition, uint32_t offset)
94{ 105{
95 return udf_get_pblock_virt15(sb, block, partition, offset); 106 return udf_get_pblock_virt15(sb, block, partition, offset);
96} 107}
97 108
98uint32_t udf_get_pblock_spar15(struct super_block * sb, uint32_t block, 109uint32_t udf_get_pblock_spar15(struct super_block *sb, uint32_t block,
99 uint16_t partition, uint32_t offset) 110 uint16_t partition, uint32_t offset)
100{ 111{
101 int i; 112 int i;
102 struct sparingTable *st = NULL; 113 struct sparingTable *st = NULL;
103 uint32_t packet = (block + offset) & ~(UDF_SB_TYPESPAR(sb,partition).s_packet_len - 1); 114 struct udf_sb_info *sbi = UDF_SB(sb);
115 struct udf_part_map *map;
116 uint32_t packet;
117 struct udf_sparing_data *sdata;
118
119 map = &sbi->s_partmaps[partition];
120 sdata = &map->s_type_specific.s_sparing;
121 packet = (block + offset) & ~(sdata->s_packet_len - 1);
104 122
105 for (i = 0; i < 4; i++) { 123 for (i = 0; i < 4; i++) {
106 if (UDF_SB_TYPESPAR(sb,partition).s_spar_map[i] != NULL) { 124 if (sdata->s_spar_map[i] != NULL) {
107 st = (struct sparingTable *)UDF_SB_TYPESPAR(sb,partition).s_spar_map[i]->b_data; 125 st = (struct sparingTable *)
126 sdata->s_spar_map[i]->b_data;
108 break; 127 break;
109 } 128 }
110 } 129 }
111 130
112 if (st) { 131 if (st) {
113 for (i = 0; i < le16_to_cpu(st->reallocationTableLen); i++) { 132 for (i = 0; i < le16_to_cpu(st->reallocationTableLen); i++) {
114 if (le32_to_cpu(st->mapEntry[i].origLocation) >= 0xFFFFFFF0) { 133 struct sparingEntry *entry = &st->mapEntry[i];
134 u32 origLoc = le32_to_cpu(entry->origLocation);
135 if (origLoc >= 0xFFFFFFF0)
115 break; 136 break;
116 } else if (le32_to_cpu(st->mapEntry[i].origLocation) == packet) { 137 else if (origLoc == packet)
117 return le32_to_cpu(st->mapEntry[i].mappedLocation) + 138 return le32_to_cpu(entry->mappedLocation) +
118 ((block + offset) & (UDF_SB_TYPESPAR(sb,partition).s_packet_len - 1)); 139 ((block + offset) &
119 } else if (le32_to_cpu(st->mapEntry[i].origLocation) > packet) { 140 (sdata->s_packet_len - 1));
141 else if (origLoc > packet)
120 break; 142 break;
121 }
122 } 143 }
123 } 144 }
124 145
125 return UDF_SB_PARTROOT(sb,partition) + block + offset; 146 return map->s_partition_root + block + offset;
126} 147}
127 148
128int udf_relocate_blocks(struct super_block *sb, long old_block, long *new_block) 149int udf_relocate_blocks(struct super_block *sb, long old_block, long *new_block)
@@ -132,69 +153,109 @@ int udf_relocate_blocks(struct super_block *sb, long old_block, long *new_block)
132 struct sparingEntry mapEntry; 153 struct sparingEntry mapEntry;
133 uint32_t packet; 154 uint32_t packet;
134 int i, j, k, l; 155 int i, j, k, l;
156 struct udf_sb_info *sbi = UDF_SB(sb);
157 u16 reallocationTableLen;
158 struct buffer_head *bh;
135 159
136 for (i = 0; i < UDF_SB_NUMPARTS(sb); i++) { 160 for (i = 0; i < sbi->s_partitions; i++) {
137 if (old_block > UDF_SB_PARTROOT(sb,i) && 161 struct udf_part_map *map = &sbi->s_partmaps[i];
138 old_block < UDF_SB_PARTROOT(sb,i) + UDF_SB_PARTLEN(sb,i)) { 162 if (old_block > map->s_partition_root &&
139 sdata = &UDF_SB_TYPESPAR(sb,i); 163 old_block < map->s_partition_root + map->s_partition_len) {
140 packet = (old_block - UDF_SB_PARTROOT(sb,i)) & ~(sdata->s_packet_len - 1); 164 sdata = &map->s_type_specific.s_sparing;
165 packet = (old_block - map->s_partition_root) &
166 ~(sdata->s_packet_len - 1);
141 167
142 for (j = 0; j < 4; j++) { 168 for (j = 0; j < 4; j++)
143 if (UDF_SB_TYPESPAR(sb,i).s_spar_map[j] != NULL) { 169 if (sdata->s_spar_map[j] != NULL) {
144 st = (struct sparingTable *)sdata->s_spar_map[j]->b_data; 170 st = (struct sparingTable *)
171 sdata->s_spar_map[j]->b_data;
145 break; 172 break;
146 } 173 }
147 }
148 174
149 if (!st) 175 if (!st)
150 return 1; 176 return 1;
151 177
152 for (k = 0; k < le16_to_cpu(st->reallocationTableLen); k++) { 178 reallocationTableLen =
153 if (le32_to_cpu(st->mapEntry[k].origLocation) == 0xFFFFFFFF) { 179 le16_to_cpu(st->reallocationTableLen);
180 for (k = 0; k < reallocationTableLen; k++) {
181 struct sparingEntry *entry = &st->mapEntry[k];
182 u32 origLoc = le32_to_cpu(entry->origLocation);
183
184 if (origLoc == 0xFFFFFFFF) {
154 for (; j < 4; j++) { 185 for (; j < 4; j++) {
155 if (sdata->s_spar_map[j]) { 186 int len;
156 st = (struct sparingTable *)sdata->s_spar_map[j]->b_data; 187 bh = sdata->s_spar_map[j];
157 st->mapEntry[k].origLocation = cpu_to_le32(packet); 188 if (!bh)
158 udf_update_tag((char *)st, sizeof(struct sparingTable) + le16_to_cpu(st->reallocationTableLen) * sizeof(struct sparingEntry)); 189 continue;
159 mark_buffer_dirty(sdata->s_spar_map[j]); 190
160 } 191 st = (struct sparingTable *)
192 bh->b_data;
193 entry->origLocation =
194 cpu_to_le32(packet);
195 len =
196 sizeof(struct sparingTable) +
197 reallocationTableLen *
198 sizeof(struct sparingEntry);
199 udf_update_tag((char *)st, len);
200 mark_buffer_dirty(bh);
161 } 201 }
162 *new_block = le32_to_cpu(st->mapEntry[k].mappedLocation) + 202 *new_block = le32_to_cpu(
163 ((old_block - UDF_SB_PARTROOT(sb,i)) & (sdata->s_packet_len - 1)); 203 entry->mappedLocation) +
204 ((old_block -
205 map->s_partition_root) &
206 (sdata->s_packet_len - 1));
164 return 0; 207 return 0;
165 } else if (le32_to_cpu(st->mapEntry[k].origLocation) == packet) { 208 } else if (origLoc == packet) {
166 *new_block = le32_to_cpu(st->mapEntry[k].mappedLocation) + 209 *new_block = le32_to_cpu(
167 ((old_block - UDF_SB_PARTROOT(sb,i)) & (sdata->s_packet_len - 1)); 210 entry->mappedLocation) +
211 ((old_block -
212 map->s_partition_root) &
213 (sdata->s_packet_len - 1));
168 return 0; 214 return 0;
169 } else if (le32_to_cpu(st->mapEntry[k].origLocation) > packet) { 215 } else if (origLoc > packet)
170 break; 216 break;
171 }
172 } 217 }
173 218
174 for (l = k; l < le16_to_cpu(st->reallocationTableLen); l++) { 219 for (l = k; l < reallocationTableLen; l++) {
175 if (le32_to_cpu(st->mapEntry[l].origLocation) == 0xFFFFFFFF) { 220 struct sparingEntry *entry = &st->mapEntry[l];
176 for (; j < 4; j++) { 221 u32 origLoc = le32_to_cpu(entry->origLocation);
177 if (sdata->s_spar_map[j]) { 222
178 st = (struct sparingTable *)sdata->s_spar_map[j]->b_data; 223 if (origLoc != 0xFFFFFFFF)
179 mapEntry = st->mapEntry[l]; 224 continue;
180 mapEntry.origLocation = cpu_to_le32(packet); 225
181 memmove(&st->mapEntry[k + 1], &st->mapEntry[k], (l - k) * sizeof(struct sparingEntry)); 226 for (; j < 4; j++) {
182 st->mapEntry[k] = mapEntry; 227 bh = sdata->s_spar_map[j];
183 udf_update_tag((char *)st, sizeof(struct sparingTable) + le16_to_cpu(st->reallocationTableLen) * sizeof(struct sparingEntry)); 228 if (!bh)
184 mark_buffer_dirty(sdata->s_spar_map[j]); 229 continue;
185 } 230
186 } 231 st = (struct sparingTable *)bh->b_data;
187 *new_block = le32_to_cpu(st->mapEntry[k].mappedLocation) + 232 mapEntry = st->mapEntry[l];
188 ((old_block - UDF_SB_PARTROOT(sb,i)) & (sdata->s_packet_len - 1)); 233 mapEntry.origLocation =
189 return 0; 234 cpu_to_le32(packet);
235 memmove(&st->mapEntry[k + 1],
236 &st->mapEntry[k],
237 (l - k) *
238 sizeof(struct sparingEntry));
239 st->mapEntry[k] = mapEntry;
240 udf_update_tag((char *)st,
241 sizeof(struct sparingTable) +
242 reallocationTableLen *
243 sizeof(struct sparingEntry));
244 mark_buffer_dirty(bh);
190 } 245 }
246 *new_block =
247 le32_to_cpu(
248 st->mapEntry[k].mappedLocation) +
249 ((old_block - map->s_partition_root) &
250 (sdata->s_packet_len - 1));
251 return 0;
191 } 252 }
192 253
193 return 1; 254 return 1;
194 } /* if old_block */ 255 } /* if old_block */
195 } 256 }
196 257
197 if (i == UDF_SB_NUMPARTS(sb)) { 258 if (i == sbi->s_partitions) {
198 /* outside of partitions */ 259 /* outside of partitions */
199 /* for now, fail =) */ 260 /* for now, fail =) */
200 return 1; 261 return 1;
diff --git a/fs/udf/super.c b/fs/udf/super.c
index 4360c7a05743..f3ac4abfc946 100644
--- a/fs/udf/super.c
+++ b/fs/udf/super.c
@@ -33,8 +33,8 @@
33 * 10/17/98 added freespace count for "df" 33 * 10/17/98 added freespace count for "df"
34 * 11/11/98 gr added novrs option 34 * 11/11/98 gr added novrs option
35 * 11/26/98 dgb added fileset,anchor mount options 35 * 11/26/98 dgb added fileset,anchor mount options
36 * 12/06/98 blf really hosed things royally. vat/sparing support. sequenced vol descs 36 * 12/06/98 blf really hosed things royally. vat/sparing support. sequenced
37 * rewrote option handling based on isofs 37 * vol descs. rewrote option handling based on isofs
38 * 12/20/98 find the free space bitmap (if it exists) 38 * 12/20/98 find the free space bitmap (if it exists)
39 */ 39 */
40 40
@@ -52,6 +52,9 @@
52#include <linux/buffer_head.h> 52#include <linux/buffer_head.h>
53#include <linux/vfs.h> 53#include <linux/vfs.h>
54#include <linux/vmalloc.h> 54#include <linux/vmalloc.h>
55#include <linux/errno.h>
56#include <linux/mount.h>
57#include <linux/seq_file.h>
55#include <asm/byteorder.h> 58#include <asm/byteorder.h>
56 59
57#include <linux/udf_fs.h> 60#include <linux/udf_fs.h>
@@ -70,6 +73,8 @@
70#define VDS_POS_TERMINATING_DESC 6 73#define VDS_POS_TERMINATING_DESC 6
71#define VDS_POS_LENGTH 7 74#define VDS_POS_LENGTH 7
72 75
76#define UDF_DEFAULT_BLOCKSIZE 2048
77
73static char error_buf[1024]; 78static char error_buf[1024];
74 79
75/* These are the "meat" - everything else is stuffing */ 80/* These are the "meat" - everything else is stuffing */
@@ -94,6 +99,17 @@ static void udf_open_lvid(struct super_block *);
94static void udf_close_lvid(struct super_block *); 99static void udf_close_lvid(struct super_block *);
95static unsigned int udf_count_free(struct super_block *); 100static unsigned int udf_count_free(struct super_block *);
96static int udf_statfs(struct dentry *, struct kstatfs *); 101static int udf_statfs(struct dentry *, struct kstatfs *);
102static int udf_show_options(struct seq_file *, struct vfsmount *);
103
104struct logicalVolIntegrityDescImpUse *udf_sb_lvidiu(struct udf_sb_info *sbi)
105{
106 struct logicalVolIntegrityDesc *lvid =
107 (struct logicalVolIntegrityDesc *)sbi->s_lvid_bh->b_data;
108 __u32 number_of_partitions = le32_to_cpu(lvid->numOfPartitions);
109 __u32 offset = number_of_partitions * 2 *
110 sizeof(uint32_t)/sizeof(uint8_t);
111 return (struct logicalVolIntegrityDescImpUse *)&(lvid->impUse[offset]);
112}
97 113
98/* UDF filesystem type */ 114/* UDF filesystem type */
99static int udf_get_sb(struct file_system_type *fs_type, 115static int udf_get_sb(struct file_system_type *fs_type,
@@ -116,7 +132,7 @@ static struct kmem_cache *udf_inode_cachep;
116static struct inode *udf_alloc_inode(struct super_block *sb) 132static struct inode *udf_alloc_inode(struct super_block *sb)
117{ 133{
118 struct udf_inode_info *ei; 134 struct udf_inode_info *ei;
119 ei = (struct udf_inode_info *)kmem_cache_alloc(udf_inode_cachep, GFP_KERNEL); 135 ei = kmem_cache_alloc(udf_inode_cachep, GFP_KERNEL);
120 if (!ei) 136 if (!ei)
121 return NULL; 137 return NULL;
122 138
@@ -170,6 +186,7 @@ static const struct super_operations udf_sb_ops = {
170 .write_super = udf_write_super, 186 .write_super = udf_write_super,
171 .statfs = udf_statfs, 187 .statfs = udf_statfs,
172 .remount_fs = udf_remount_fs, 188 .remount_fs = udf_remount_fs,
189 .show_options = udf_show_options,
173}; 190};
174 191
175struct udf_options { 192struct udf_options {
@@ -218,6 +235,79 @@ static void __exit exit_udf_fs(void)
218module_init(init_udf_fs) 235module_init(init_udf_fs)
219module_exit(exit_udf_fs) 236module_exit(exit_udf_fs)
220 237
238static int udf_sb_alloc_partition_maps(struct super_block *sb, u32 count)
239{
240 struct udf_sb_info *sbi = UDF_SB(sb);
241
242 sbi->s_partmaps = kcalloc(count, sizeof(struct udf_part_map),
243 GFP_KERNEL);
244 if (!sbi->s_partmaps) {
245 udf_error(sb, __FUNCTION__,
246 "Unable to allocate space for %d partition maps",
247 count);
248 sbi->s_partitions = 0;
249 return -ENOMEM;
250 }
251
252 sbi->s_partitions = count;
253 return 0;
254}
255
256static int udf_show_options(struct seq_file *seq, struct vfsmount *mnt)
257{
258 struct super_block *sb = mnt->mnt_sb;
259 struct udf_sb_info *sbi = UDF_SB(sb);
260
261 if (!UDF_QUERY_FLAG(sb, UDF_FLAG_STRICT))
262 seq_puts(seq, ",nostrict");
263 if (sb->s_blocksize != UDF_DEFAULT_BLOCKSIZE)
264 seq_printf(seq, ",bs=%lu", sb->s_blocksize);
265 if (UDF_QUERY_FLAG(sb, UDF_FLAG_UNHIDE))
266 seq_puts(seq, ",unhide");
267 if (UDF_QUERY_FLAG(sb, UDF_FLAG_UNDELETE))
268 seq_puts(seq, ",undelete");
269 if (!UDF_QUERY_FLAG(sb, UDF_FLAG_USE_AD_IN_ICB))
270 seq_puts(seq, ",noadinicb");
271 if (UDF_QUERY_FLAG(sb, UDF_FLAG_USE_SHORT_AD))
272 seq_puts(seq, ",shortad");
273 if (UDF_QUERY_FLAG(sb, UDF_FLAG_UID_FORGET))
274 seq_puts(seq, ",uid=forget");
275 if (UDF_QUERY_FLAG(sb, UDF_FLAG_UID_IGNORE))
276 seq_puts(seq, ",uid=ignore");
277 if (UDF_QUERY_FLAG(sb, UDF_FLAG_GID_FORGET))
278 seq_puts(seq, ",gid=forget");
279 if (UDF_QUERY_FLAG(sb, UDF_FLAG_GID_IGNORE))
280 seq_puts(seq, ",gid=ignore");
281 if (UDF_QUERY_FLAG(sb, UDF_FLAG_UID_SET))
282 seq_printf(seq, ",uid=%u", sbi->s_uid);
283 if (UDF_QUERY_FLAG(sb, UDF_FLAG_GID_SET))
284 seq_printf(seq, ",gid=%u", sbi->s_gid);
285 if (sbi->s_umask != 0)
286 seq_printf(seq, ",umask=%o", sbi->s_umask);
287 if (UDF_QUERY_FLAG(sb, UDF_FLAG_SESSION_SET))
288 seq_printf(seq, ",session=%u", sbi->s_session);
289 if (UDF_QUERY_FLAG(sb, UDF_FLAG_LASTBLOCK_SET))
290 seq_printf(seq, ",lastblock=%u", sbi->s_last_block);
291 /*
292 * s_anchor[2] could be zeroed out in case there is no anchor
293 * in the specified block, but then the "anchor=N" option
294 * originally given by the user wasn't effective, so it's OK
295 * if we don't show it.
296 */
297 if (sbi->s_anchor[2] != 0)
298 seq_printf(seq, ",anchor=%u", sbi->s_anchor[2]);
299 /*
300 * volume, partition, fileset and rootdir seem to be ignored
301 * currently
302 */
303 if (UDF_QUERY_FLAG(sb, UDF_FLAG_UTF8))
304 seq_puts(seq, ",utf8");
305 if (UDF_QUERY_FLAG(sb, UDF_FLAG_NLS_MAP) && sbi->s_nls_map)
306 seq_printf(seq, ",iocharset=%s", sbi->s_nls_map->charset);
307
308 return 0;
309}
310
221/* 311/*
222 * udf_parse_options 312 * udf_parse_options
223 * 313 *
@@ -310,13 +400,14 @@ static match_table_t tokens = {
310 {Opt_err, NULL} 400 {Opt_err, NULL}
311}; 401};
312 402
313static int udf_parse_options(char *options, struct udf_options *uopt) 403static int udf_parse_options(char *options, struct udf_options *uopt,
404 bool remount)
314{ 405{
315 char *p; 406 char *p;
316 int option; 407 int option;
317 408
318 uopt->novrs = 0; 409 uopt->novrs = 0;
319 uopt->blocksize = 2048; 410 uopt->blocksize = UDF_DEFAULT_BLOCKSIZE;
320 uopt->partition = 0xFFFF; 411 uopt->partition = 0xFFFF;
321 uopt->session = 0xFFFFFFFF; 412 uopt->session = 0xFFFFFFFF;
322 uopt->lastblock = 0; 413 uopt->lastblock = 0;
@@ -386,11 +477,15 @@ static int udf_parse_options(char *options, struct udf_options *uopt)
386 if (match_int(args, &option)) 477 if (match_int(args, &option))
387 return 0; 478 return 0;
388 uopt->session = option; 479 uopt->session = option;
480 if (!remount)
481 uopt->flags |= (1 << UDF_FLAG_SESSION_SET);
389 break; 482 break;
390 case Opt_lastblock: 483 case Opt_lastblock:
391 if (match_int(args, &option)) 484 if (match_int(args, &option))
392 return 0; 485 return 0;
393 uopt->lastblock = option; 486 uopt->lastblock = option;
487 if (!remount)
488 uopt->flags |= (1 << UDF_FLAG_LASTBLOCK_SET);
394 break; 489 break;
395 case Opt_anchor: 490 case Opt_anchor:
396 if (match_int(args, &option)) 491 if (match_int(args, &option))
@@ -447,7 +542,7 @@ static int udf_parse_options(char *options, struct udf_options *uopt)
447 return 1; 542 return 1;
448} 543}
449 544
450void udf_write_super(struct super_block *sb) 545static void udf_write_super(struct super_block *sb)
451{ 546{
452 lock_kernel(); 547 lock_kernel();
453 548
@@ -461,22 +556,23 @@ void udf_write_super(struct super_block *sb)
461static int udf_remount_fs(struct super_block *sb, int *flags, char *options) 556static int udf_remount_fs(struct super_block *sb, int *flags, char *options)
462{ 557{
463 struct udf_options uopt; 558 struct udf_options uopt;
559 struct udf_sb_info *sbi = UDF_SB(sb);
464 560
465 uopt.flags = UDF_SB(sb)->s_flags; 561 uopt.flags = sbi->s_flags;
466 uopt.uid = UDF_SB(sb)->s_uid; 562 uopt.uid = sbi->s_uid;
467 uopt.gid = UDF_SB(sb)->s_gid; 563 uopt.gid = sbi->s_gid;
468 uopt.umask = UDF_SB(sb)->s_umask; 564 uopt.umask = sbi->s_umask;
469 565
470 if (!udf_parse_options(options, &uopt)) 566 if (!udf_parse_options(options, &uopt, true))
471 return -EINVAL; 567 return -EINVAL;
472 568
473 UDF_SB(sb)->s_flags = uopt.flags; 569 sbi->s_flags = uopt.flags;
474 UDF_SB(sb)->s_uid = uopt.uid; 570 sbi->s_uid = uopt.uid;
475 UDF_SB(sb)->s_gid = uopt.gid; 571 sbi->s_gid = uopt.gid;
476 UDF_SB(sb)->s_umask = uopt.umask; 572 sbi->s_umask = uopt.umask;
477 573
478 if (UDF_SB_LVIDBH(sb)) { 574 if (sbi->s_lvid_bh) {
479 int write_rev = le16_to_cpu(UDF_SB_LVIDIU(sb)->minUDFWriteRev); 575 int write_rev = le16_to_cpu(udf_sb_lvidiu(sbi)->minUDFWriteRev);
480 if (write_rev > UDF_MAX_WRITE_VERSION) 576 if (write_rev > UDF_MAX_WRITE_VERSION)
481 *flags |= MS_RDONLY; 577 *flags |= MS_RDONLY;
482 } 578 }
@@ -538,17 +634,19 @@ static int udf_vrs(struct super_block *sb, int silent)
538 int iso9660 = 0; 634 int iso9660 = 0;
539 int nsr02 = 0; 635 int nsr02 = 0;
540 int nsr03 = 0; 636 int nsr03 = 0;
637 struct udf_sb_info *sbi;
541 638
542 /* Block size must be a multiple of 512 */ 639 /* Block size must be a multiple of 512 */
543 if (sb->s_blocksize & 511) 640 if (sb->s_blocksize & 511)
544 return 0; 641 return 0;
642 sbi = UDF_SB(sb);
545 643
546 if (sb->s_blocksize < sizeof(struct volStructDesc)) 644 if (sb->s_blocksize < sizeof(struct volStructDesc))
547 sectorsize = sizeof(struct volStructDesc); 645 sectorsize = sizeof(struct volStructDesc);
548 else 646 else
549 sectorsize = sb->s_blocksize; 647 sectorsize = sb->s_blocksize;
550 648
551 sector += (UDF_SB_SESSION(sb) << sb->s_blocksize_bits); 649 sector += (sbi->s_session << sb->s_blocksize_bits);
552 650
553 udf_debug("Starting at sector %u (%ld byte sectors)\n", 651 udf_debug("Starting at sector %u (%ld byte sectors)\n",
554 (sector >> sb->s_blocksize_bits), sb->s_blocksize); 652 (sector >> sb->s_blocksize_bits), sb->s_blocksize);
@@ -561,47 +659,52 @@ static int udf_vrs(struct super_block *sb, int silent)
561 659
562 /* Look for ISO descriptors */ 660 /* Look for ISO descriptors */
563 vsd = (struct volStructDesc *)(bh->b_data + 661 vsd = (struct volStructDesc *)(bh->b_data +
564 (sector & (sb->s_blocksize - 1))); 662 (sector & (sb->s_blocksize - 1)));
565 663
566 if (vsd->stdIdent[0] == 0) { 664 if (vsd->stdIdent[0] == 0) {
567 brelse(bh); 665 brelse(bh);
568 break; 666 break;
569 } else if (!strncmp(vsd->stdIdent, VSD_STD_ID_CD001, VSD_STD_ID_LEN)) { 667 } else if (!strncmp(vsd->stdIdent, VSD_STD_ID_CD001,
668 VSD_STD_ID_LEN)) {
570 iso9660 = sector; 669 iso9660 = sector;
571 switch (vsd->structType) { 670 switch (vsd->structType) {
572 case 0: 671 case 0:
573 udf_debug("ISO9660 Boot Record found\n"); 672 udf_debug("ISO9660 Boot Record found\n");
574 break; 673 break;
575 case 1: 674 case 1:
576 udf_debug 675 udf_debug("ISO9660 Primary Volume Descriptor "
577 ("ISO9660 Primary Volume Descriptor found\n"); 676 "found\n");
578 break; 677 break;
579 case 2: 678 case 2:
580 udf_debug 679 udf_debug("ISO9660 Supplementary Volume "
581 ("ISO9660 Supplementary Volume Descriptor found\n"); 680 "Descriptor found\n");
582 break; 681 break;
583 case 3: 682 case 3:
584 udf_debug 683 udf_debug("ISO9660 Volume Partition Descriptor "
585 ("ISO9660 Volume Partition Descriptor found\n"); 684 "found\n");
586 break; 685 break;
587 case 255: 686 case 255:
588 udf_debug 687 udf_debug("ISO9660 Volume Descriptor Set "
589 ("ISO9660 Volume Descriptor Set Terminator found\n"); 688 "Terminator found\n");
590 break; 689 break;
591 default: 690 default:
592 udf_debug("ISO9660 VRS (%u) found\n", 691 udf_debug("ISO9660 VRS (%u) found\n",
593 vsd->structType); 692 vsd->structType);
594 break; 693 break;
595 } 694 }
596 } else if (!strncmp(vsd->stdIdent, VSD_STD_ID_BEA01, VSD_STD_ID_LEN)) { 695 } else if (!strncmp(vsd->stdIdent, VSD_STD_ID_BEA01,
597 } else if (!strncmp(vsd->stdIdent, VSD_STD_ID_TEA01, VSD_STD_ID_LEN)) { 696 VSD_STD_ID_LEN))
697 ; /* nothing */
698 else if (!strncmp(vsd->stdIdent, VSD_STD_ID_TEA01,
699 VSD_STD_ID_LEN)) {
598 brelse(bh); 700 brelse(bh);
599 break; 701 break;
600 } else if (!strncmp(vsd->stdIdent, VSD_STD_ID_NSR02, VSD_STD_ID_LEN)) { 702 } else if (!strncmp(vsd->stdIdent, VSD_STD_ID_NSR02,
703 VSD_STD_ID_LEN))
601 nsr02 = sector; 704 nsr02 = sector;
602 } else if (!strncmp(vsd->stdIdent, VSD_STD_ID_NSR03, VSD_STD_ID_LEN)) { 705 else if (!strncmp(vsd->stdIdent, VSD_STD_ID_NSR03,
706 VSD_STD_ID_LEN))
603 nsr03 = sector; 707 nsr03 = sector;
604 }
605 brelse(bh); 708 brelse(bh);
606 } 709 }
607 710
@@ -609,7 +712,7 @@ static int udf_vrs(struct super_block *sb, int silent)
609 return nsr03; 712 return nsr03;
610 else if (nsr02) 713 else if (nsr02)
611 return nsr02; 714 return nsr02;
612 else if (sector - (UDF_SB_SESSION(sb) << sb->s_blocksize_bits) == 32768) 715 else if (sector - (sbi->s_session << sb->s_blocksize_bits) == 32768)
613 return -1; 716 return -1;
614 else 717 else
615 return 0; 718 return 0;
@@ -634,11 +737,15 @@ static int udf_vrs(struct super_block *sb, int silent)
634 */ 737 */
635static void udf_find_anchor(struct super_block *sb) 738static void udf_find_anchor(struct super_block *sb)
636{ 739{
637 int lastblock = UDF_SB_LASTBLOCK(sb); 740 int lastblock;
638 struct buffer_head *bh = NULL; 741 struct buffer_head *bh = NULL;
639 uint16_t ident; 742 uint16_t ident;
640 uint32_t location; 743 uint32_t location;
641 int i; 744 int i;
745 struct udf_sb_info *sbi;
746
747 sbi = UDF_SB(sb);
748 lastblock = sbi->s_last_block;
642 749
643 if (lastblock) { 750 if (lastblock) {
644 int varlastblock = udf_variable_to_fixed(lastblock); 751 int varlastblock = udf_variable_to_fixed(lastblock);
@@ -658,57 +765,83 @@ static void udf_find_anchor(struct super_block *sb)
658 * however, if the disc isn't closed, it could be 512 */ 765 * however, if the disc isn't closed, it could be 512 */
659 766
660 for (i = 0; !lastblock && i < ARRAY_SIZE(last); i++) { 767 for (i = 0; !lastblock && i < ARRAY_SIZE(last); i++) {
661 if (last[i] < 0 || !(bh = sb_bread(sb, last[i]))) { 768 ident = location = 0;
662 ident = location = 0; 769 if (last[i] >= 0) {
663 } else { 770 bh = sb_bread(sb, last[i]);
664 ident = le16_to_cpu(((tag *)bh->b_data)->tagIdent); 771 if (bh) {
665 location = le32_to_cpu(((tag *)bh->b_data)->tagLocation); 772 tag *t = (tag *)bh->b_data;
666 brelse(bh); 773 ident = le16_to_cpu(t->tagIdent);
774 location = le32_to_cpu(t->tagLocation);
775 brelse(bh);
776 }
667 } 777 }
668 778
669 if (ident == TAG_IDENT_AVDP) { 779 if (ident == TAG_IDENT_AVDP) {
670 if (location == last[i] - UDF_SB_SESSION(sb)) { 780 if (location == last[i] - sbi->s_session) {
671 lastblock = UDF_SB_ANCHOR(sb)[0] = last[i] - UDF_SB_SESSION(sb); 781 lastblock = last[i] - sbi->s_session;
672 UDF_SB_ANCHOR(sb)[1] = last[i] - 256 - UDF_SB_SESSION(sb); 782 sbi->s_anchor[0] = lastblock;
673 } else if (location == udf_variable_to_fixed(last[i]) - UDF_SB_SESSION(sb)) { 783 sbi->s_anchor[1] = lastblock - 256;
784 } else if (location ==
785 udf_variable_to_fixed(last[i]) -
786 sbi->s_session) {
674 UDF_SET_FLAG(sb, UDF_FLAG_VARCONV); 787 UDF_SET_FLAG(sb, UDF_FLAG_VARCONV);
675 lastblock = UDF_SB_ANCHOR(sb)[0] = udf_variable_to_fixed(last[i]) - UDF_SB_SESSION(sb); 788 lastblock =
676 UDF_SB_ANCHOR(sb)[1] = lastblock - 256 - UDF_SB_SESSION(sb); 789 udf_variable_to_fixed(last[i]) -
790 sbi->s_session;
791 sbi->s_anchor[0] = lastblock;
792 sbi->s_anchor[1] = lastblock - 256 -
793 sbi->s_session;
677 } else { 794 } else {
678 udf_debug("Anchor found at block %d, location mismatch %d.\n", 795 udf_debug("Anchor found at block %d, "
796 "location mismatch %d.\n",
679 last[i], location); 797 last[i], location);
680 } 798 }
681 } else if (ident == TAG_IDENT_FE || ident == TAG_IDENT_EFE) { 799 } else if (ident == TAG_IDENT_FE ||
800 ident == TAG_IDENT_EFE) {
682 lastblock = last[i]; 801 lastblock = last[i];
683 UDF_SB_ANCHOR(sb)[3] = 512; 802 sbi->s_anchor[3] = 512;
684 } else { 803 } else {
685 if (last[i] < 256 || !(bh = sb_bread(sb, last[i] - 256))) { 804 ident = location = 0;
686 ident = location = 0; 805 if (last[i] >= 256) {
687 } else { 806 bh = sb_bread(sb, last[i] - 256);
688 ident = le16_to_cpu(((tag *)bh->b_data)->tagIdent); 807 if (bh) {
689 location = le32_to_cpu(((tag *)bh->b_data)->tagLocation); 808 tag *t = (tag *)bh->b_data;
690 brelse(bh); 809 ident = le16_to_cpu(
810 t->tagIdent);
811 location = le32_to_cpu(
812 t->tagLocation);
813 brelse(bh);
814 }
691 } 815 }
692 816
693 if (ident == TAG_IDENT_AVDP && 817 if (ident == TAG_IDENT_AVDP &&
694 location == last[i] - 256 - UDF_SB_SESSION(sb)) { 818 location == last[i] - 256 -
819 sbi->s_session) {
695 lastblock = last[i]; 820 lastblock = last[i];
696 UDF_SB_ANCHOR(sb)[1] = last[i] - 256; 821 sbi->s_anchor[1] = last[i] - 256;
697 } else { 822 } else {
698 if (last[i] < 312 + UDF_SB_SESSION(sb) || 823 ident = location = 0;
699 !(bh = sb_bread(sb, last[i] - 312 - UDF_SB_SESSION(sb)))) { 824 if (last[i] >= 312 + sbi->s_session) {
700 ident = location = 0; 825 bh = sb_bread(sb,
701 } else { 826 last[i] - 312 -
702 ident = le16_to_cpu(((tag *)bh->b_data)->tagIdent); 827 sbi->s_session);
703 location = le32_to_cpu(((tag *)bh->b_data)->tagLocation); 828 if (bh) {
704 brelse(bh); 829 tag *t = (tag *)
830 bh->b_data;
831 ident = le16_to_cpu(
832 t->tagIdent);
833 location = le32_to_cpu(
834 t->tagLocation);
835 brelse(bh);
836 }
705 } 837 }
706 838
707 if (ident == TAG_IDENT_AVDP && 839 if (ident == TAG_IDENT_AVDP &&
708 location == udf_variable_to_fixed(last[i]) - 256) { 840 location == udf_variable_to_fixed(last[i]) - 256) {
709 UDF_SET_FLAG(sb, UDF_FLAG_VARCONV); 841 UDF_SET_FLAG(sb,
842 UDF_FLAG_VARCONV);
710 lastblock = udf_variable_to_fixed(last[i]); 843 lastblock = udf_variable_to_fixed(last[i]);
711 UDF_SB_ANCHOR(sb)[1] = lastblock - 256; 844 sbi->s_anchor[1] = lastblock - 256;
712 } 845 }
713 } 846 }
714 } 847 }
@@ -716,10 +849,12 @@ static void udf_find_anchor(struct super_block *sb)
716 } 849 }
717 850
718 if (!lastblock) { 851 if (!lastblock) {
719 /* We havn't found the lastblock. check 312 */ 852 /* We haven't found the lastblock. check 312 */
720 if ((bh = sb_bread(sb, 312 + UDF_SB_SESSION(sb)))) { 853 bh = sb_bread(sb, 312 + sbi->s_session);
721 ident = le16_to_cpu(((tag *)bh->b_data)->tagIdent); 854 if (bh) {
722 location = le32_to_cpu(((tag *)bh->b_data)->tagLocation); 855 tag *t = (tag *)bh->b_data;
856 ident = le16_to_cpu(t->tagIdent);
857 location = le32_to_cpu(t->tagLocation);
723 brelse(bh); 858 brelse(bh);
724 859
725 if (ident == TAG_IDENT_AVDP && location == 256) 860 if (ident == TAG_IDENT_AVDP && location == 256)
@@ -727,29 +862,33 @@ static void udf_find_anchor(struct super_block *sb)
727 } 862 }
728 } 863 }
729 864
730 for (i = 0; i < ARRAY_SIZE(UDF_SB_ANCHOR(sb)); i++) { 865 for (i = 0; i < ARRAY_SIZE(sbi->s_anchor); i++) {
731 if (UDF_SB_ANCHOR(sb)[i]) { 866 if (sbi->s_anchor[i]) {
732 if (!(bh = udf_read_tagged(sb, UDF_SB_ANCHOR(sb)[i], 867 bh = udf_read_tagged(sb, sbi->s_anchor[i],
733 UDF_SB_ANCHOR(sb)[i], &ident))) { 868 sbi->s_anchor[i], &ident);
734 UDF_SB_ANCHOR(sb)[i] = 0; 869 if (!bh)
735 } else { 870 sbi->s_anchor[i] = 0;
871 else {
736 brelse(bh); 872 brelse(bh);
737 if ((ident != TAG_IDENT_AVDP) && 873 if ((ident != TAG_IDENT_AVDP) &&
738 (i || (ident != TAG_IDENT_FE && ident != TAG_IDENT_EFE))) { 874 (i || (ident != TAG_IDENT_FE &&
739 UDF_SB_ANCHOR(sb)[i] = 0; 875 ident != TAG_IDENT_EFE)))
740 } 876 sbi->s_anchor[i] = 0;
741 } 877 }
742 } 878 }
743 } 879 }
744 880
745 UDF_SB_LASTBLOCK(sb) = lastblock; 881 sbi->s_last_block = lastblock;
746} 882}
747 883
748static int udf_find_fileset(struct super_block *sb, kernel_lb_addr *fileset, kernel_lb_addr *root) 884static int udf_find_fileset(struct super_block *sb,
885 kernel_lb_addr *fileset,
886 kernel_lb_addr *root)
749{ 887{
750 struct buffer_head *bh = NULL; 888 struct buffer_head *bh = NULL;
751 long lastblock; 889 long lastblock;
752 uint16_t ident; 890 uint16_t ident;
891 struct udf_sb_info *sbi;
753 892
754 if (fileset->logicalBlockNum != 0xFFFFFFFF || 893 if (fileset->logicalBlockNum != 0xFFFFFFFF ||
755 fileset->partitionReferenceNum != 0xFFFF) { 894 fileset->partitionReferenceNum != 0xFFFF) {
@@ -764,22 +903,27 @@ static int udf_find_fileset(struct super_block *sb, kernel_lb_addr *fileset, ker
764 903
765 } 904 }
766 905
767 if (!bh) { /* Search backwards through the partitions */ 906 sbi = UDF_SB(sb);
907 if (!bh) {
908 /* Search backwards through the partitions */
768 kernel_lb_addr newfileset; 909 kernel_lb_addr newfileset;
769 910
770/* --> cvg: FIXME - is it reasonable? */ 911/* --> cvg: FIXME - is it reasonable? */
771 return 1; 912 return 1;
772 913
773 for (newfileset.partitionReferenceNum = UDF_SB_NUMPARTS(sb) - 1; 914 for (newfileset.partitionReferenceNum = sbi->s_partitions - 1;
774 (newfileset.partitionReferenceNum != 0xFFFF && 915 (newfileset.partitionReferenceNum != 0xFFFF &&
775 fileset->logicalBlockNum == 0xFFFFFFFF && 916 fileset->logicalBlockNum == 0xFFFFFFFF &&
776 fileset->partitionReferenceNum == 0xFFFF); 917 fileset->partitionReferenceNum == 0xFFFF);
777 newfileset.partitionReferenceNum--) { 918 newfileset.partitionReferenceNum--) {
778 lastblock = UDF_SB_PARTLEN(sb, newfileset.partitionReferenceNum); 919 lastblock = sbi->s_partmaps
920 [newfileset.partitionReferenceNum]
921 .s_partition_len;
779 newfileset.logicalBlockNum = 0; 922 newfileset.logicalBlockNum = 0;
780 923
781 do { 924 do {
782 bh = udf_read_ptagged(sb, newfileset, 0, &ident); 925 bh = udf_read_ptagged(sb, newfileset, 0,
926 &ident);
783 if (!bh) { 927 if (!bh) {
784 newfileset.logicalBlockNum++; 928 newfileset.logicalBlockNum++;
785 continue; 929 continue;
@@ -789,11 +933,12 @@ static int udf_find_fileset(struct super_block *sb, kernel_lb_addr *fileset, ker
789 case TAG_IDENT_SBD: 933 case TAG_IDENT_SBD:
790 { 934 {
791 struct spaceBitmapDesc *sp; 935 struct spaceBitmapDesc *sp;
792 sp = (struct spaceBitmapDesc *)bh->b_data; 936 sp = (struct spaceBitmapDesc *)
937 bh->b_data;
793 newfileset.logicalBlockNum += 1 + 938 newfileset.logicalBlockNum += 1 +
794 ((le32_to_cpu(sp->numOfBytes) + 939 ((le32_to_cpu(sp->numOfBytes) +
795 sizeof(struct spaceBitmapDesc) - 1) 940 sizeof(struct spaceBitmapDesc)
796 >> sb->s_blocksize_bits); 941 - 1) >> sb->s_blocksize_bits);
797 brelse(bh); 942 brelse(bh);
798 break; 943 break;
799 } 944 }
@@ -818,7 +963,7 @@ static int udf_find_fileset(struct super_block *sb, kernel_lb_addr *fileset, ker
818 fileset->logicalBlockNum, 963 fileset->logicalBlockNum,
819 fileset->partitionReferenceNum); 964 fileset->partitionReferenceNum);
820 965
821 UDF_SB_PARTITION(sb) = fileset->partitionReferenceNum; 966 sbi->s_partition = fileset->partitionReferenceNum;
822 udf_load_fileset(sb, bh, root); 967 udf_load_fileset(sb, bh, root);
823 brelse(bh); 968 brelse(bh);
824 return 0; 969 return 0;
@@ -840,26 +985,26 @@ static void udf_load_pvoldesc(struct super_block *sb, struct buffer_head *bh)
840 lets_to_cpu(pvoldesc->recordingDateAndTime))) { 985 lets_to_cpu(pvoldesc->recordingDateAndTime))) {
841 kernel_timestamp ts; 986 kernel_timestamp ts;
842 ts = lets_to_cpu(pvoldesc->recordingDateAndTime); 987 ts = lets_to_cpu(pvoldesc->recordingDateAndTime);
843 udf_debug("recording time %ld/%ld, %04u/%02u/%02u %02u:%02u (%x)\n", 988 udf_debug("recording time %ld/%ld, %04u/%02u/%02u"
989 " %02u:%02u (%x)\n",
844 recording, recording_usec, 990 recording, recording_usec,
845 ts.year, ts.month, ts.day, ts.hour, 991 ts.year, ts.month, ts.day, ts.hour,
846 ts.minute, ts.typeAndTimezone); 992 ts.minute, ts.typeAndTimezone);
847 UDF_SB_RECORDTIME(sb).tv_sec = recording; 993 UDF_SB(sb)->s_record_time.tv_sec = recording;
848 UDF_SB_RECORDTIME(sb).tv_nsec = recording_usec * 1000; 994 UDF_SB(sb)->s_record_time.tv_nsec = recording_usec * 1000;
849 } 995 }
850 996
851 if (!udf_build_ustr(&instr, pvoldesc->volIdent, 32)) { 997 if (!udf_build_ustr(&instr, pvoldesc->volIdent, 32))
852 if (udf_CS0toUTF8(&outstr, &instr)) { 998 if (udf_CS0toUTF8(&outstr, &instr)) {
853 strncpy(UDF_SB_VOLIDENT(sb), outstr.u_name, 999 strncpy(UDF_SB(sb)->s_volume_ident, outstr.u_name,
854 outstr.u_len > 31 ? 31 : outstr.u_len); 1000 outstr.u_len > 31 ? 31 : outstr.u_len);
855 udf_debug("volIdent[] = '%s'\n", UDF_SB_VOLIDENT(sb)); 1001 udf_debug("volIdent[] = '%s'\n",
1002 UDF_SB(sb)->s_volume_ident);
856 } 1003 }
857 }
858 1004
859 if (!udf_build_ustr(&instr, pvoldesc->volSetIdent, 128)) { 1005 if (!udf_build_ustr(&instr, pvoldesc->volSetIdent, 128))
860 if (udf_CS0toUTF8(&outstr, &instr)) 1006 if (udf_CS0toUTF8(&outstr, &instr))
861 udf_debug("volSetIdent[] = '%s'\n", outstr.u_name); 1007 udf_debug("volSetIdent[] = '%s'\n", outstr.u_name);
862 }
863} 1008}
864 1009
865static void udf_load_fileset(struct super_block *sb, struct buffer_head *bh, 1010static void udf_load_fileset(struct super_block *sb, struct buffer_head *bh,
@@ -871,65 +1016,124 @@ static void udf_load_fileset(struct super_block *sb, struct buffer_head *bh,
871 1016
872 *root = lelb_to_cpu(fset->rootDirectoryICB.extLocation); 1017 *root = lelb_to_cpu(fset->rootDirectoryICB.extLocation);
873 1018
874 UDF_SB_SERIALNUM(sb) = le16_to_cpu(fset->descTag.tagSerialNum); 1019 UDF_SB(sb)->s_serial_number = le16_to_cpu(fset->descTag.tagSerialNum);
875 1020
876 udf_debug("Rootdir at block=%d, partition=%d\n", 1021 udf_debug("Rootdir at block=%d, partition=%d\n",
877 root->logicalBlockNum, root->partitionReferenceNum); 1022 root->logicalBlockNum, root->partitionReferenceNum);
878} 1023}
879 1024
1025int udf_compute_nr_groups(struct super_block *sb, u32 partition)
1026{
1027 struct udf_part_map *map = &UDF_SB(sb)->s_partmaps[partition];
1028 return (map->s_partition_len +
1029 (sizeof(struct spaceBitmapDesc) << 3) +
1030 (sb->s_blocksize * 8) - 1) /
1031 (sb->s_blocksize * 8);
1032}
1033
1034static struct udf_bitmap *udf_sb_alloc_bitmap(struct super_block *sb, u32 index)
1035{
1036 struct udf_bitmap *bitmap;
1037 int nr_groups;
1038 int size;
1039
1040 nr_groups = udf_compute_nr_groups(sb, index);
1041 size = sizeof(struct udf_bitmap) +
1042 (sizeof(struct buffer_head *) * nr_groups);
1043
1044 if (size <= PAGE_SIZE)
1045 bitmap = kmalloc(size, GFP_KERNEL);
1046 else
1047 bitmap = vmalloc(size); /* TODO: get rid of vmalloc */
1048
1049 if (bitmap == NULL) {
1050 udf_error(sb, __FUNCTION__,
1051 "Unable to allocate space for bitmap "
1052 "and %d buffer_head pointers", nr_groups);
1053 return NULL;
1054 }
1055
1056 memset(bitmap, 0x00, size);
1057 bitmap->s_block_bitmap = (struct buffer_head **)(bitmap + 1);
1058 bitmap->s_nr_groups = nr_groups;
1059 return bitmap;
1060}
1061
880static int udf_load_partdesc(struct super_block *sb, struct buffer_head *bh) 1062static int udf_load_partdesc(struct super_block *sb, struct buffer_head *bh)
881{ 1063{
882 struct partitionDesc *p; 1064 struct partitionDesc *p;
883 int i; 1065 int i;
1066 struct udf_part_map *map;
1067 struct udf_sb_info *sbi;
884 1068
885 p = (struct partitionDesc *)bh->b_data; 1069 p = (struct partitionDesc *)bh->b_data;
1070 sbi = UDF_SB(sb);
886 1071
887 for (i = 0; i < UDF_SB_NUMPARTS(sb); i++) { 1072 for (i = 0; i < sbi->s_partitions; i++) {
1073 map = &sbi->s_partmaps[i];
888 udf_debug("Searching map: (%d == %d)\n", 1074 udf_debug("Searching map: (%d == %d)\n",
889 UDF_SB_PARTMAPS(sb)[i].s_partition_num, le16_to_cpu(p->partitionNumber)); 1075 map->s_partition_num,
890 if (UDF_SB_PARTMAPS(sb)[i].s_partition_num == le16_to_cpu(p->partitionNumber)) { 1076 le16_to_cpu(p->partitionNumber));
891 UDF_SB_PARTLEN(sb,i) = le32_to_cpu(p->partitionLength); /* blocks */ 1077 if (map->s_partition_num ==
892 UDF_SB_PARTROOT(sb,i) = le32_to_cpu(p->partitionStartingLocation); 1078 le16_to_cpu(p->partitionNumber)) {
893 if (le32_to_cpu(p->accessType) == PD_ACCESS_TYPE_READ_ONLY) 1079 map->s_partition_len =
894 UDF_SB_PARTFLAGS(sb,i) |= UDF_PART_FLAG_READ_ONLY; 1080 le32_to_cpu(p->partitionLength); /* blocks */
895 if (le32_to_cpu(p->accessType) == PD_ACCESS_TYPE_WRITE_ONCE) 1081 map->s_partition_root =
896 UDF_SB_PARTFLAGS(sb,i) |= UDF_PART_FLAG_WRITE_ONCE; 1082 le32_to_cpu(p->partitionStartingLocation);
897 if (le32_to_cpu(p->accessType) == PD_ACCESS_TYPE_REWRITABLE) 1083 if (p->accessType ==
898 UDF_SB_PARTFLAGS(sb,i) |= UDF_PART_FLAG_REWRITABLE; 1084 cpu_to_le32(PD_ACCESS_TYPE_READ_ONLY))
899 if (le32_to_cpu(p->accessType) == PD_ACCESS_TYPE_OVERWRITABLE) 1085 map->s_partition_flags |=
900 UDF_SB_PARTFLAGS(sb,i) |= UDF_PART_FLAG_OVERWRITABLE; 1086 UDF_PART_FLAG_READ_ONLY;
901 1087 if (p->accessType ==
902 if (!strcmp(p->partitionContents.ident, PD_PARTITION_CONTENTS_NSR02) || 1088 cpu_to_le32(PD_ACCESS_TYPE_WRITE_ONCE))
903 !strcmp(p->partitionContents.ident, PD_PARTITION_CONTENTS_NSR03)) { 1089 map->s_partition_flags |=
1090 UDF_PART_FLAG_WRITE_ONCE;
1091 if (p->accessType ==
1092 cpu_to_le32(PD_ACCESS_TYPE_REWRITABLE))
1093 map->s_partition_flags |=
1094 UDF_PART_FLAG_REWRITABLE;
1095 if (p->accessType ==
1096 cpu_to_le32(PD_ACCESS_TYPE_OVERWRITABLE))
1097 map->s_partition_flags |=
1098 UDF_PART_FLAG_OVERWRITABLE;
1099
1100 if (!strcmp(p->partitionContents.ident,
1101 PD_PARTITION_CONTENTS_NSR02) ||
1102 !strcmp(p->partitionContents.ident,
1103 PD_PARTITION_CONTENTS_NSR03)) {
904 struct partitionHeaderDesc *phd; 1104 struct partitionHeaderDesc *phd;
905 1105
906 phd = (struct partitionHeaderDesc *)(p->partitionContentsUse); 1106 phd = (struct partitionHeaderDesc *)
1107 (p->partitionContentsUse);
907 if (phd->unallocSpaceTable.extLength) { 1108 if (phd->unallocSpaceTable.extLength) {
908 kernel_lb_addr loc = { 1109 kernel_lb_addr loc = {
909 .logicalBlockNum = le32_to_cpu(phd->unallocSpaceTable.extPosition), 1110 .logicalBlockNum = le32_to_cpu(phd->unallocSpaceTable.extPosition),
910 .partitionReferenceNum = i, 1111 .partitionReferenceNum = i,
911 }; 1112 };
912 1113
913 UDF_SB_PARTMAPS(sb)[i].s_uspace.s_table = 1114 map->s_uspace.s_table =
914 udf_iget(sb, loc); 1115 udf_iget(sb, loc);
915 if (!UDF_SB_PARTMAPS(sb)[i].s_uspace.s_table) { 1116 if (!map->s_uspace.s_table) {
916 udf_debug("cannot load unallocSpaceTable (part %d)\n", i); 1117 udf_debug("cannot load unallocSpaceTable (part %d)\n", i);
917 return 1; 1118 return 1;
918 } 1119 }
919 UDF_SB_PARTFLAGS(sb,i) |= UDF_PART_FLAG_UNALLOC_TABLE; 1120 map->s_partition_flags |=
1121 UDF_PART_FLAG_UNALLOC_TABLE;
920 udf_debug("unallocSpaceTable (part %d) @ %ld\n", 1122 udf_debug("unallocSpaceTable (part %d) @ %ld\n",
921 i, UDF_SB_PARTMAPS(sb)[i].s_uspace.s_table->i_ino); 1123 i, map->s_uspace.s_table->i_ino);
922 } 1124 }
923 if (phd->unallocSpaceBitmap.extLength) { 1125 if (phd->unallocSpaceBitmap.extLength) {
924 UDF_SB_ALLOC_BITMAP(sb, i, s_uspace); 1126 struct udf_bitmap *bitmap =
925 if (UDF_SB_PARTMAPS(sb)[i].s_uspace.s_bitmap != NULL) { 1127 udf_sb_alloc_bitmap(sb, i);
926 UDF_SB_PARTMAPS(sb)[i].s_uspace.s_bitmap->s_extLength = 1128 map->s_uspace.s_bitmap = bitmap;
1129 if (bitmap != NULL) {
1130 bitmap->s_extLength =
927 le32_to_cpu(phd->unallocSpaceBitmap.extLength); 1131 le32_to_cpu(phd->unallocSpaceBitmap.extLength);
928 UDF_SB_PARTMAPS(sb)[i].s_uspace.s_bitmap->s_extPosition = 1132 bitmap->s_extPosition =
929 le32_to_cpu(phd->unallocSpaceBitmap.extPosition); 1133 le32_to_cpu(phd->unallocSpaceBitmap.extPosition);
930 UDF_SB_PARTFLAGS(sb,i) |= UDF_PART_FLAG_UNALLOC_BITMAP; 1134 map->s_partition_flags |= UDF_PART_FLAG_UNALLOC_BITMAP;
931 udf_debug("unallocSpaceBitmap (part %d) @ %d\n", 1135 udf_debug("unallocSpaceBitmap (part %d) @ %d\n",
932 i, UDF_SB_PARTMAPS(sb)[i].s_uspace.s_bitmap->s_extPosition); 1136 i, bitmap->s_extPosition);
933 } 1137 }
934 } 1138 }
935 if (phd->partitionIntegrityTable.extLength) 1139 if (phd->partitionIntegrityTable.extLength)
@@ -940,40 +1144,45 @@ static int udf_load_partdesc(struct super_block *sb, struct buffer_head *bh)
940 .partitionReferenceNum = i, 1144 .partitionReferenceNum = i,
941 }; 1145 };
942 1146
943 UDF_SB_PARTMAPS(sb)[i].s_fspace.s_table = 1147 map->s_fspace.s_table =
944 udf_iget(sb, loc); 1148 udf_iget(sb, loc);
945 if (!UDF_SB_PARTMAPS(sb)[i].s_fspace.s_table) { 1149 if (!map->s_fspace.s_table) {
946 udf_debug("cannot load freedSpaceTable (part %d)\n", i); 1150 udf_debug("cannot load freedSpaceTable (part %d)\n", i);
947 return 1; 1151 return 1;
948 } 1152 }
949 UDF_SB_PARTFLAGS(sb,i) |= UDF_PART_FLAG_FREED_TABLE; 1153 map->s_partition_flags |=
1154 UDF_PART_FLAG_FREED_TABLE;
950 udf_debug("freedSpaceTable (part %d) @ %ld\n", 1155 udf_debug("freedSpaceTable (part %d) @ %ld\n",
951 i, UDF_SB_PARTMAPS(sb)[i].s_fspace.s_table->i_ino); 1156 i, map->s_fspace.s_table->i_ino);
952 } 1157 }
953 if (phd->freedSpaceBitmap.extLength) { 1158 if (phd->freedSpaceBitmap.extLength) {
954 UDF_SB_ALLOC_BITMAP(sb, i, s_fspace); 1159 struct udf_bitmap *bitmap =
955 if (UDF_SB_PARTMAPS(sb)[i].s_fspace.s_bitmap != NULL) { 1160 udf_sb_alloc_bitmap(sb, i);
956 UDF_SB_PARTMAPS(sb)[i].s_fspace.s_bitmap->s_extLength = 1161 map->s_fspace.s_bitmap = bitmap;
1162 if (bitmap != NULL) {
1163 bitmap->s_extLength =
957 le32_to_cpu(phd->freedSpaceBitmap.extLength); 1164 le32_to_cpu(phd->freedSpaceBitmap.extLength);
958 UDF_SB_PARTMAPS(sb)[i].s_fspace.s_bitmap->s_extPosition = 1165 bitmap->s_extPosition =
959 le32_to_cpu(phd->freedSpaceBitmap.extPosition); 1166 le32_to_cpu(phd->freedSpaceBitmap.extPosition);
960 UDF_SB_PARTFLAGS(sb,i) |= UDF_PART_FLAG_FREED_BITMAP; 1167 map->s_partition_flags |= UDF_PART_FLAG_FREED_BITMAP;
961 udf_debug("freedSpaceBitmap (part %d) @ %d\n", 1168 udf_debug("freedSpaceBitmap (part %d) @ %d\n",
962 i, UDF_SB_PARTMAPS(sb)[i].s_fspace.s_bitmap->s_extPosition); 1169 i, bitmap->s_extPosition);
963 } 1170 }
964 } 1171 }
965 } 1172 }
966 break; 1173 break;
967 } 1174 }
968 } 1175 }
969 if (i == UDF_SB_NUMPARTS(sb)) { 1176 if (i == sbi->s_partitions)
970 udf_debug("Partition (%d) not found in partition map\n", 1177 udf_debug("Partition (%d) not found in partition map\n",
971 le16_to_cpu(p->partitionNumber)); 1178 le16_to_cpu(p->partitionNumber));
972 } else { 1179 else
973 udf_debug("Partition (%d:%d type %x) starts at physical %d, block length %d\n", 1180 udf_debug("Partition (%d:%d type %x) starts at physical %d, "
974 le16_to_cpu(p->partitionNumber), i, UDF_SB_PARTTYPE(sb,i), 1181 "block length %d\n",
975 UDF_SB_PARTROOT(sb,i), UDF_SB_PARTLEN(sb,i)); 1182 le16_to_cpu(p->partitionNumber), i,
976 } 1183 map->s_partition_type,
1184 map->s_partition_root,
1185 map->s_partition_len);
977 return 0; 1186 return 0;
978} 1187}
979 1188
@@ -983,70 +1192,105 @@ static int udf_load_logicalvol(struct super_block *sb, struct buffer_head *bh,
983 struct logicalVolDesc *lvd; 1192 struct logicalVolDesc *lvd;
984 int i, j, offset; 1193 int i, j, offset;
985 uint8_t type; 1194 uint8_t type;
1195 struct udf_sb_info *sbi = UDF_SB(sb);
1196 struct genericPartitionMap *gpm;
986 1197
987 lvd = (struct logicalVolDesc *)bh->b_data; 1198 lvd = (struct logicalVolDesc *)bh->b_data;
988 1199
989 UDF_SB_ALLOC_PARTMAPS(sb, le32_to_cpu(lvd->numPartitionMaps)); 1200 i = udf_sb_alloc_partition_maps(sb, le32_to_cpu(lvd->numPartitionMaps));
1201 if (i != 0)
1202 return i;
990 1203
991 for (i = 0, offset = 0; 1204 for (i = 0, offset = 0;
992 i < UDF_SB_NUMPARTS(sb) && offset < le32_to_cpu(lvd->mapTableLength); 1205 i < sbi->s_partitions && offset < le32_to_cpu(lvd->mapTableLength);
993 i++, offset += ((struct genericPartitionMap *)&(lvd->partitionMaps[offset]))->partitionMapLength) { 1206 i++, offset += gpm->partitionMapLength) {
994 type = ((struct genericPartitionMap *)&(lvd->partitionMaps[offset]))->partitionMapType; 1207 struct udf_part_map *map = &sbi->s_partmaps[i];
1208 gpm = (struct genericPartitionMap *)
1209 &(lvd->partitionMaps[offset]);
1210 type = gpm->partitionMapType;
995 if (type == 1) { 1211 if (type == 1) {
996 struct genericPartitionMap1 *gpm1 = (struct genericPartitionMap1 *)&(lvd->partitionMaps[offset]); 1212 struct genericPartitionMap1 *gpm1 =
997 UDF_SB_PARTTYPE(sb,i) = UDF_TYPE1_MAP15; 1213 (struct genericPartitionMap1 *)gpm;
998 UDF_SB_PARTVSN(sb,i) = le16_to_cpu(gpm1->volSeqNum); 1214 map->s_partition_type = UDF_TYPE1_MAP15;
999 UDF_SB_PARTNUM(sb,i) = le16_to_cpu(gpm1->partitionNum); 1215 map->s_volumeseqnum = le16_to_cpu(gpm1->volSeqNum);
1000 UDF_SB_PARTFUNC(sb,i) = NULL; 1216 map->s_partition_num = le16_to_cpu(gpm1->partitionNum);
1217 map->s_partition_func = NULL;
1001 } else if (type == 2) { 1218 } else if (type == 2) {
1002 struct udfPartitionMap2 *upm2 = (struct udfPartitionMap2 *)&(lvd->partitionMaps[offset]); 1219 struct udfPartitionMap2 *upm2 =
1003 if (!strncmp(upm2->partIdent.ident, UDF_ID_VIRTUAL, strlen(UDF_ID_VIRTUAL))) { 1220 (struct udfPartitionMap2 *)gpm;
1004 if (le16_to_cpu(((__le16 *)upm2->partIdent.identSuffix)[0]) == 0x0150) { 1221 if (!strncmp(upm2->partIdent.ident, UDF_ID_VIRTUAL,
1005 UDF_SB_PARTTYPE(sb,i) = UDF_VIRTUAL_MAP15; 1222 strlen(UDF_ID_VIRTUAL))) {
1006 UDF_SB_PARTFUNC(sb,i) = udf_get_pblock_virt15; 1223 u16 suf =
1007 } else if (le16_to_cpu(((__le16 *)upm2->partIdent.identSuffix)[0]) == 0x0200) { 1224 le16_to_cpu(((__le16 *)upm2->partIdent.
1008 UDF_SB_PARTTYPE(sb,i) = UDF_VIRTUAL_MAP20; 1225 identSuffix)[0]);
1009 UDF_SB_PARTFUNC(sb,i) = udf_get_pblock_virt20; 1226 if (suf == 0x0150) {
1227 map->s_partition_type =
1228 UDF_VIRTUAL_MAP15;
1229 map->s_partition_func =
1230 udf_get_pblock_virt15;
1231 } else if (suf == 0x0200) {
1232 map->s_partition_type =
1233 UDF_VIRTUAL_MAP20;
1234 map->s_partition_func =
1235 udf_get_pblock_virt20;
1010 } 1236 }
1011 } else if (!strncmp(upm2->partIdent.ident, UDF_ID_SPARABLE, strlen(UDF_ID_SPARABLE))) { 1237 } else if (!strncmp(upm2->partIdent.ident,
1238 UDF_ID_SPARABLE,
1239 strlen(UDF_ID_SPARABLE))) {
1012 uint32_t loc; 1240 uint32_t loc;
1013 uint16_t ident; 1241 uint16_t ident;
1014 struct sparingTable *st; 1242 struct sparingTable *st;
1015 struct sparablePartitionMap *spm = (struct sparablePartitionMap *)&(lvd->partitionMaps[offset]); 1243 struct sparablePartitionMap *spm =
1244 (struct sparablePartitionMap *)gpm;
1016 1245
1017 UDF_SB_PARTTYPE(sb,i) = UDF_SPARABLE_MAP15; 1246 map->s_partition_type = UDF_SPARABLE_MAP15;
1018 UDF_SB_TYPESPAR(sb,i).s_packet_len = le16_to_cpu(spm->packetLength); 1247 map->s_type_specific.s_sparing.s_packet_len =
1248 le16_to_cpu(spm->packetLength);
1019 for (j = 0; j < spm->numSparingTables; j++) { 1249 for (j = 0; j < spm->numSparingTables; j++) {
1020 loc = le32_to_cpu(spm->locSparingTable[j]); 1250 struct buffer_head *bh2;
1021 UDF_SB_TYPESPAR(sb,i).s_spar_map[j] = 1251
1022 udf_read_tagged(sb, loc, loc, &ident); 1252 loc = le32_to_cpu(
1023 if (UDF_SB_TYPESPAR(sb,i).s_spar_map[j] != NULL) { 1253 spm->locSparingTable[j]);
1024 st = (struct sparingTable *)UDF_SB_TYPESPAR(sb,i).s_spar_map[j]->b_data; 1254 bh2 = udf_read_tagged(sb, loc, loc,
1025 if (ident != 0 || 1255 &ident);
1026 strncmp(st->sparingIdent.ident, UDF_ID_SPARING, strlen(UDF_ID_SPARING))) { 1256 map->s_type_specific.s_sparing.
1027 brelse(UDF_SB_TYPESPAR(sb,i).s_spar_map[j]); 1257 s_spar_map[j] = bh2;
1028 UDF_SB_TYPESPAR(sb,i).s_spar_map[j] = NULL; 1258
1259 if (bh2 != NULL) {
1260 st = (struct sparingTable *)
1261 bh2->b_data;
1262 if (ident != 0 || strncmp(
1263 st->sparingIdent.ident,
1264 UDF_ID_SPARING,
1265 strlen(UDF_ID_SPARING))) {
1266 brelse(bh2);
1267 map->s_type_specific.
1268 s_sparing.
1269 s_spar_map[j] =
1270 NULL;
1029 } 1271 }
1030 } 1272 }
1031 } 1273 }
1032 UDF_SB_PARTFUNC(sb,i) = udf_get_pblock_spar15; 1274 map->s_partition_func = udf_get_pblock_spar15;
1033 } else { 1275 } else {
1034 udf_debug("Unknown ident: %s\n", upm2->partIdent.ident); 1276 udf_debug("Unknown ident: %s\n",
1277 upm2->partIdent.ident);
1035 continue; 1278 continue;
1036 } 1279 }
1037 UDF_SB_PARTVSN(sb,i) = le16_to_cpu(upm2->volSeqNum); 1280 map->s_volumeseqnum = le16_to_cpu(upm2->volSeqNum);
1038 UDF_SB_PARTNUM(sb,i) = le16_to_cpu(upm2->partitionNum); 1281 map->s_partition_num = le16_to_cpu(upm2->partitionNum);
1039 } 1282 }
1040 udf_debug("Partition (%d:%d) type %d on volume %d\n", 1283 udf_debug("Partition (%d:%d) type %d on volume %d\n",
1041 i, UDF_SB_PARTNUM(sb,i), type, UDF_SB_PARTVSN(sb,i)); 1284 i, map->s_partition_num, type,
1285 map->s_volumeseqnum);
1042 } 1286 }
1043 1287
1044 if (fileset) { 1288 if (fileset) {
1045 long_ad *la = (long_ad *)&(lvd->logicalVolContentsUse[0]); 1289 long_ad *la = (long_ad *)&(lvd->logicalVolContentsUse[0]);
1046 1290
1047 *fileset = lelb_to_cpu(la->extLocation); 1291 *fileset = lelb_to_cpu(la->extLocation);
1048 udf_debug("FileSet found in LogicalVolDesc at block=%d, partition=%d\n", 1292 udf_debug("FileSet found in LogicalVolDesc at block=%d, "
1049 fileset->logicalBlockNum, 1293 "partition=%d\n", fileset->logicalBlockNum,
1050 fileset->partitionReferenceNum); 1294 fileset->partitionReferenceNum);
1051 } 1295 }
1052 if (lvd->integritySeqExt.extLength) 1296 if (lvd->integritySeqExt.extLength)
@@ -1063,22 +1307,26 @@ static void udf_load_logicalvolint(struct super_block *sb, kernel_extent_ad loc)
1063{ 1307{
1064 struct buffer_head *bh = NULL; 1308 struct buffer_head *bh = NULL;
1065 uint16_t ident; 1309 uint16_t ident;
1310 struct udf_sb_info *sbi = UDF_SB(sb);
1311 struct logicalVolIntegrityDesc *lvid;
1066 1312
1067 while (loc.extLength > 0 && 1313 while (loc.extLength > 0 &&
1068 (bh = udf_read_tagged(sb, loc.extLocation, 1314 (bh = udf_read_tagged(sb, loc.extLocation,
1069 loc.extLocation, &ident)) && 1315 loc.extLocation, &ident)) &&
1070 ident == TAG_IDENT_LVID) { 1316 ident == TAG_IDENT_LVID) {
1071 UDF_SB_LVIDBH(sb) = bh; 1317 sbi->s_lvid_bh = bh;
1318 lvid = (struct logicalVolIntegrityDesc *)bh->b_data;
1072 1319
1073 if (UDF_SB_LVID(sb)->nextIntegrityExt.extLength) 1320 if (lvid->nextIntegrityExt.extLength)
1074 udf_load_logicalvolint(sb, leea_to_cpu(UDF_SB_LVID(sb)->nextIntegrityExt)); 1321 udf_load_logicalvolint(sb,
1322 leea_to_cpu(lvid->nextIntegrityExt));
1075 1323
1076 if (UDF_SB_LVIDBH(sb) != bh) 1324 if (sbi->s_lvid_bh != bh)
1077 brelse(bh); 1325 brelse(bh);
1078 loc.extLength -= sb->s_blocksize; 1326 loc.extLength -= sb->s_blocksize;
1079 loc.extLocation++; 1327 loc.extLocation++;
1080 } 1328 }
1081 if (UDF_SB_LVIDBH(sb) != bh) 1329 if (sbi->s_lvid_bh != bh)
1082 brelse(bh); 1330 brelse(bh);
1083} 1331}
1084 1332
@@ -1097,11 +1345,12 @@ static void udf_load_logicalvolint(struct super_block *sb, kernel_extent_ad loc)
1097 * July 1, 1997 - Andrew E. Mileski 1345 * July 1, 1997 - Andrew E. Mileski
1098 * Written, tested, and released. 1346 * Written, tested, and released.
1099 */ 1347 */
1100static int udf_process_sequence(struct super_block *sb, long block, long lastblock, 1348static int udf_process_sequence(struct super_block *sb, long block,
1101 kernel_lb_addr *fileset) 1349 long lastblock, kernel_lb_addr *fileset)
1102{ 1350{
1103 struct buffer_head *bh = NULL; 1351 struct buffer_head *bh = NULL;
1104 struct udf_vds_record vds[VDS_POS_LENGTH]; 1352 struct udf_vds_record vds[VDS_POS_LENGTH];
1353 struct udf_vds_record *curr;
1105 struct generic_desc *gd; 1354 struct generic_desc *gd;
1106 struct volDescPtr *vdp; 1355 struct volDescPtr *vdp;
1107 int done = 0; 1356 int done = 0;
@@ -1124,43 +1373,51 @@ static int udf_process_sequence(struct super_block *sb, long block, long lastblo
1124 vdsn = le32_to_cpu(gd->volDescSeqNum); 1373 vdsn = le32_to_cpu(gd->volDescSeqNum);
1125 switch (ident) { 1374 switch (ident) {
1126 case TAG_IDENT_PVD: /* ISO 13346 3/10.1 */ 1375 case TAG_IDENT_PVD: /* ISO 13346 3/10.1 */
1127 if (vdsn >= vds[VDS_POS_PRIMARY_VOL_DESC].volDescSeqNum) { 1376 curr = &vds[VDS_POS_PRIMARY_VOL_DESC];
1128 vds[VDS_POS_PRIMARY_VOL_DESC].volDescSeqNum = vdsn; 1377 if (vdsn >= curr->volDescSeqNum) {
1129 vds[VDS_POS_PRIMARY_VOL_DESC].block = block; 1378 curr->volDescSeqNum = vdsn;
1379 curr->block = block;
1130 } 1380 }
1131 break; 1381 break;
1132 case TAG_IDENT_VDP: /* ISO 13346 3/10.3 */ 1382 case TAG_IDENT_VDP: /* ISO 13346 3/10.3 */
1133 if (vdsn >= vds[VDS_POS_VOL_DESC_PTR].volDescSeqNum) { 1383 curr = &vds[VDS_POS_VOL_DESC_PTR];
1134 vds[VDS_POS_VOL_DESC_PTR].volDescSeqNum = vdsn; 1384 if (vdsn >= curr->volDescSeqNum) {
1135 vds[VDS_POS_VOL_DESC_PTR].block = block; 1385 curr->volDescSeqNum = vdsn;
1386 curr->block = block;
1136 1387
1137 vdp = (struct volDescPtr *)bh->b_data; 1388 vdp = (struct volDescPtr *)bh->b_data;
1138 next_s = le32_to_cpu(vdp->nextVolDescSeqExt.extLocation); 1389 next_s = le32_to_cpu(
1139 next_e = le32_to_cpu(vdp->nextVolDescSeqExt.extLength); 1390 vdp->nextVolDescSeqExt.extLocation);
1391 next_e = le32_to_cpu(
1392 vdp->nextVolDescSeqExt.extLength);
1140 next_e = next_e >> sb->s_blocksize_bits; 1393 next_e = next_e >> sb->s_blocksize_bits;
1141 next_e += next_s; 1394 next_e += next_s;
1142 } 1395 }
1143 break; 1396 break;
1144 case TAG_IDENT_IUVD: /* ISO 13346 3/10.4 */ 1397 case TAG_IDENT_IUVD: /* ISO 13346 3/10.4 */
1145 if (vdsn >= vds[VDS_POS_IMP_USE_VOL_DESC].volDescSeqNum) { 1398 curr = &vds[VDS_POS_IMP_USE_VOL_DESC];
1146 vds[VDS_POS_IMP_USE_VOL_DESC].volDescSeqNum = vdsn; 1399 if (vdsn >= curr->volDescSeqNum) {
1147 vds[VDS_POS_IMP_USE_VOL_DESC].block = block; 1400 curr->volDescSeqNum = vdsn;
1401 curr->block = block;
1148 } 1402 }
1149 break; 1403 break;
1150 case TAG_IDENT_PD: /* ISO 13346 3/10.5 */ 1404 case TAG_IDENT_PD: /* ISO 13346 3/10.5 */
1151 if (!vds[VDS_POS_PARTITION_DESC].block) 1405 curr = &vds[VDS_POS_PARTITION_DESC];
1152 vds[VDS_POS_PARTITION_DESC].block = block; 1406 if (!curr->block)
1407 curr->block = block;
1153 break; 1408 break;
1154 case TAG_IDENT_LVD: /* ISO 13346 3/10.6 */ 1409 case TAG_IDENT_LVD: /* ISO 13346 3/10.6 */
1155 if (vdsn >= vds[VDS_POS_LOGICAL_VOL_DESC].volDescSeqNum) { 1410 curr = &vds[VDS_POS_LOGICAL_VOL_DESC];
1156 vds[VDS_POS_LOGICAL_VOL_DESC].volDescSeqNum = vdsn; 1411 if (vdsn >= curr->volDescSeqNum) {
1157 vds[VDS_POS_LOGICAL_VOL_DESC].block = block; 1412 curr->volDescSeqNum = vdsn;
1413 curr->block = block;
1158 } 1414 }
1159 break; 1415 break;
1160 case TAG_IDENT_USD: /* ISO 13346 3/10.8 */ 1416 case TAG_IDENT_USD: /* ISO 13346 3/10.8 */
1161 if (vdsn >= vds[VDS_POS_UNALLOC_SPACE_DESC].volDescSeqNum) { 1417 curr = &vds[VDS_POS_UNALLOC_SPACE_DESC];
1162 vds[VDS_POS_UNALLOC_SPACE_DESC].volDescSeqNum = vdsn; 1418 if (vdsn >= curr->volDescSeqNum) {
1163 vds[VDS_POS_UNALLOC_SPACE_DESC].block = block; 1419 curr->volDescSeqNum = vdsn;
1420 curr->block = block;
1164 } 1421 }
1165 break; 1422 break;
1166 case TAG_IDENT_TD: /* ISO 13346 3/10.9 */ 1423 case TAG_IDENT_TD: /* ISO 13346 3/10.9 */
@@ -1169,32 +1426,38 @@ static int udf_process_sequence(struct super_block *sb, long block, long lastblo
1169 block = next_s; 1426 block = next_s;
1170 lastblock = next_e; 1427 lastblock = next_e;
1171 next_s = next_e = 0; 1428 next_s = next_e = 0;
1172 } else { 1429 } else
1173 done = 1; 1430 done = 1;
1174 }
1175 break; 1431 break;
1176 } 1432 }
1177 brelse(bh); 1433 brelse(bh);
1178 } 1434 }
1179 for (i = 0; i < VDS_POS_LENGTH; i++) { 1435 for (i = 0; i < VDS_POS_LENGTH; i++) {
1180 if (vds[i].block) { 1436 if (vds[i].block) {
1181 bh = udf_read_tagged(sb, vds[i].block, vds[i].block, &ident); 1437 bh = udf_read_tagged(sb, vds[i].block, vds[i].block,
1438 &ident);
1182 1439
1183 if (i == VDS_POS_PRIMARY_VOL_DESC) { 1440 if (i == VDS_POS_PRIMARY_VOL_DESC) {
1184 udf_load_pvoldesc(sb, bh); 1441 udf_load_pvoldesc(sb, bh);
1185 } else if (i == VDS_POS_LOGICAL_VOL_DESC) { 1442 } else if (i == VDS_POS_LOGICAL_VOL_DESC) {
1186 udf_load_logicalvol(sb, bh, fileset); 1443 if (udf_load_logicalvol(sb, bh, fileset)) {
1444 brelse(bh);
1445 return 1;
1446 }
1187 } else if (i == VDS_POS_PARTITION_DESC) { 1447 } else if (i == VDS_POS_PARTITION_DESC) {
1188 struct buffer_head *bh2 = NULL; 1448 struct buffer_head *bh2 = NULL;
1189 if (udf_load_partdesc(sb, bh)) { 1449 if (udf_load_partdesc(sb, bh)) {
1190 brelse(bh); 1450 brelse(bh);
1191 return 1; 1451 return 1;
1192 } 1452 }
1193 for (j = vds[i].block + 1; j < vds[VDS_POS_TERMINATING_DESC].block; j++) { 1453 for (j = vds[i].block + 1;
1454 j < vds[VDS_POS_TERMINATING_DESC].block;
1455 j++) {
1194 bh2 = udf_read_tagged(sb, j, j, &ident); 1456 bh2 = udf_read_tagged(sb, j, j, &ident);
1195 gd = (struct generic_desc *)bh2->b_data; 1457 gd = (struct generic_desc *)bh2->b_data;
1196 if (ident == TAG_IDENT_PD) 1458 if (ident == TAG_IDENT_PD)
1197 if (udf_load_partdesc(sb, bh2)) { 1459 if (udf_load_partdesc(sb,
1460 bh2)) {
1198 brelse(bh); 1461 brelse(bh);
1199 brelse(bh2); 1462 brelse(bh2);
1200 return 1; 1463 return 1;
@@ -1222,14 +1485,17 @@ static int udf_check_valid(struct super_block *sb, int novrs, int silent)
1222 } 1485 }
1223 /* Check that it is NSR02 compliant */ 1486 /* Check that it is NSR02 compliant */
1224 /* Process any "CD-ROM Volume Descriptor Set" (ECMA 167 2/8.3.1) */ 1487 /* Process any "CD-ROM Volume Descriptor Set" (ECMA 167 2/8.3.1) */
1225 else if ((block = udf_vrs(sb, silent)) == -1) { 1488 else {
1226 udf_debug("Failed to read byte 32768. Assuming open disc. " 1489 block = udf_vrs(sb, silent);
1227 "Skipping validity check\n"); 1490 if (block == -1) {
1228 if (!UDF_SB_LASTBLOCK(sb)) 1491 struct udf_sb_info *sbi = UDF_SB(sb);
1229 UDF_SB_LASTBLOCK(sb) = udf_get_last_block(sb); 1492 udf_debug("Failed to read byte 32768. Assuming open "
1230 return 0; 1493 "disc. Skipping validity check\n");
1231 } else { 1494 if (!sbi->s_last_block)
1232 return !block; 1495 sbi->s_last_block = udf_get_last_block(sb);
1496 return 0;
1497 } else
1498 return !block;
1233 } 1499 }
1234} 1500}
1235 1501
@@ -1240,100 +1506,121 @@ static int udf_load_partition(struct super_block *sb, kernel_lb_addr *fileset)
1240 struct buffer_head *bh; 1506 struct buffer_head *bh;
1241 long main_s, main_e, reserve_s, reserve_e; 1507 long main_s, main_e, reserve_s, reserve_e;
1242 int i, j; 1508 int i, j;
1509 struct udf_sb_info *sbi;
1243 1510
1244 if (!sb) 1511 if (!sb)
1245 return 1; 1512 return 1;
1513 sbi = UDF_SB(sb);
1246 1514
1247 for (i = 0; i < ARRAY_SIZE(UDF_SB_ANCHOR(sb)); i++) { 1515 for (i = 0; i < ARRAY_SIZE(sbi->s_anchor); i++) {
1248 if (UDF_SB_ANCHOR(sb)[i] && 1516 if (!sbi->s_anchor[i])
1249 (bh = udf_read_tagged(sb, UDF_SB_ANCHOR(sb)[i], 1517 continue;
1250 UDF_SB_ANCHOR(sb)[i], &ident))) { 1518 bh = udf_read_tagged(sb, sbi->s_anchor[i], sbi->s_anchor[i],
1251 anchor = (struct anchorVolDescPtr *)bh->b_data; 1519 &ident);
1520 if (!bh)
1521 continue;
1252 1522
1253 /* Locate the main sequence */ 1523 anchor = (struct anchorVolDescPtr *)bh->b_data;
1254 main_s = le32_to_cpu(anchor->mainVolDescSeqExt.extLocation);
1255 main_e = le32_to_cpu(anchor->mainVolDescSeqExt.extLength );
1256 main_e = main_e >> sb->s_blocksize_bits;
1257 main_e += main_s;
1258 1524
1259 /* Locate the reserve sequence */ 1525 /* Locate the main sequence */
1260 reserve_s = le32_to_cpu(anchor->reserveVolDescSeqExt.extLocation); 1526 main_s = le32_to_cpu(anchor->mainVolDescSeqExt.extLocation);
1261 reserve_e = le32_to_cpu(anchor->reserveVolDescSeqExt.extLength); 1527 main_e = le32_to_cpu(anchor->mainVolDescSeqExt.extLength);
1262 reserve_e = reserve_e >> sb->s_blocksize_bits; 1528 main_e = main_e >> sb->s_blocksize_bits;
1263 reserve_e += reserve_s; 1529 main_e += main_s;
1264 1530
1265 brelse(bh); 1531 /* Locate the reserve sequence */
1532 reserve_s = le32_to_cpu(
1533 anchor->reserveVolDescSeqExt.extLocation);
1534 reserve_e = le32_to_cpu(
1535 anchor->reserveVolDescSeqExt.extLength);
1536 reserve_e = reserve_e >> sb->s_blocksize_bits;
1537 reserve_e += reserve_s;
1266 1538
1267 /* Process the main & reserve sequences */ 1539 brelse(bh);
1268 /* responsible for finding the PartitionDesc(s) */ 1540
1269 if (!(udf_process_sequence(sb, main_s, main_e, fileset) && 1541 /* Process the main & reserve sequences */
1270 udf_process_sequence(sb, reserve_s, reserve_e, fileset))) { 1542 /* responsible for finding the PartitionDesc(s) */
1271 break; 1543 if (!(udf_process_sequence(sb, main_s, main_e,
1272 } 1544 fileset) &&
1273 } 1545 udf_process_sequence(sb, reserve_s, reserve_e,
1546 fileset)))
1547 break;
1274 } 1548 }
1275 1549
1276 if (i == ARRAY_SIZE(UDF_SB_ANCHOR(sb))) { 1550 if (i == ARRAY_SIZE(sbi->s_anchor)) {
1277 udf_debug("No Anchor block found\n"); 1551 udf_debug("No Anchor block found\n");
1278 return 1; 1552 return 1;
1279 } else 1553 }
1280 udf_debug("Using anchor in block %d\n", UDF_SB_ANCHOR(sb)[i]); 1554 udf_debug("Using anchor in block %d\n", sbi->s_anchor[i]);
1281 1555
1282 for (i = 0; i < UDF_SB_NUMPARTS(sb); i++) { 1556 for (i = 0; i < sbi->s_partitions; i++) {
1283 kernel_lb_addr uninitialized_var(ino); 1557 kernel_lb_addr uninitialized_var(ino);
1284 switch (UDF_SB_PARTTYPE(sb, i)) { 1558 struct udf_part_map *map = &sbi->s_partmaps[i];
1559 switch (map->s_partition_type) {
1285 case UDF_VIRTUAL_MAP15: 1560 case UDF_VIRTUAL_MAP15:
1286 case UDF_VIRTUAL_MAP20: 1561 case UDF_VIRTUAL_MAP20:
1287 if (!UDF_SB_LASTBLOCK(sb)) { 1562 if (!sbi->s_last_block) {
1288 UDF_SB_LASTBLOCK(sb) = udf_get_last_block(sb); 1563 sbi->s_last_block = udf_get_last_block(sb);
1289 udf_find_anchor(sb); 1564 udf_find_anchor(sb);
1290 } 1565 }
1291 1566
1292 if (!UDF_SB_LASTBLOCK(sb)) { 1567 if (!sbi->s_last_block) {
1293 udf_debug("Unable to determine Lastblock (For " 1568 udf_debug("Unable to determine Lastblock (For "
1294 "Virtual Partition)\n"); 1569 "Virtual Partition)\n");
1295 return 1; 1570 return 1;
1296 } 1571 }
1297 1572
1298 for (j = 0; j < UDF_SB_NUMPARTS(sb); j++) { 1573 for (j = 0; j < sbi->s_partitions; j++) {
1574 struct udf_part_map *map2 = &sbi->s_partmaps[j];
1299 if (j != i && 1575 if (j != i &&
1300 UDF_SB_PARTVSN(sb, i) == UDF_SB_PARTVSN(sb, j) && 1576 map->s_volumeseqnum ==
1301 UDF_SB_PARTNUM(sb, i) == UDF_SB_PARTNUM(sb, j)) { 1577 map2->s_volumeseqnum &&
1578 map->s_partition_num ==
1579 map2->s_partition_num) {
1302 ino.partitionReferenceNum = j; 1580 ino.partitionReferenceNum = j;
1303 ino.logicalBlockNum = UDF_SB_LASTBLOCK(sb) - UDF_SB_PARTROOT(sb, j); 1581 ino.logicalBlockNum =
1582 sbi->s_last_block -
1583 map2->s_partition_root;
1304 break; 1584 break;
1305 } 1585 }
1306 } 1586 }
1307 1587
1308 if (j == UDF_SB_NUMPARTS(sb)) 1588 if (j == sbi->s_partitions)
1309 return 1; 1589 return 1;
1310 1590
1311 if (!(UDF_SB_VAT(sb) = udf_iget(sb, ino))) 1591 sbi->s_vat_inode = udf_iget(sb, ino);
1592 if (!sbi->s_vat_inode)
1312 return 1; 1593 return 1;
1313 1594
1314 if (UDF_SB_PARTTYPE(sb, i) == UDF_VIRTUAL_MAP15) { 1595 if (map->s_partition_type == UDF_VIRTUAL_MAP15) {
1315 UDF_SB_TYPEVIRT(sb, i).s_start_offset = 1596 map->s_type_specific.s_virtual.s_start_offset =
1316 udf_ext0_offset(UDF_SB_VAT(sb)); 1597 udf_ext0_offset(sbi->s_vat_inode);
1317 UDF_SB_TYPEVIRT(sb, i).s_num_entries = 1598 map->s_type_specific.s_virtual.s_num_entries =
1318 (UDF_SB_VAT(sb)->i_size - 36) >> 2; 1599 (sbi->s_vat_inode->i_size - 36) >> 2;
1319 } else if (UDF_SB_PARTTYPE(sb, i) == UDF_VIRTUAL_MAP20) { 1600 } else if (map->s_partition_type == UDF_VIRTUAL_MAP20) {
1320 struct buffer_head *bh = NULL;
1321 uint32_t pos; 1601 uint32_t pos;
1602 struct virtualAllocationTable20 *vat20;
1322 1603
1323 pos = udf_block_map(UDF_SB_VAT(sb), 0); 1604 pos = udf_block_map(sbi->s_vat_inode, 0);
1324 bh = sb_bread(sb, pos); 1605 bh = sb_bread(sb, pos);
1325 if (!bh) 1606 if (!bh)
1326 return 1; 1607 return 1;
1327 UDF_SB_TYPEVIRT(sb, i).s_start_offset = 1608 vat20 = (struct virtualAllocationTable20 *)
1328 le16_to_cpu(((struct virtualAllocationTable20 *)bh->b_data + 1609 bh->b_data +
1329 udf_ext0_offset(UDF_SB_VAT(sb)))->lengthHeader) + 1610 udf_ext0_offset(sbi->s_vat_inode);
1330 udf_ext0_offset(UDF_SB_VAT(sb)); 1611 map->s_type_specific.s_virtual.s_start_offset =
1331 UDF_SB_TYPEVIRT(sb, i).s_num_entries = (UDF_SB_VAT(sb)->i_size - 1612 le16_to_cpu(vat20->lengthHeader) +
1332 UDF_SB_TYPEVIRT(sb, i).s_start_offset) >> 2; 1613 udf_ext0_offset(sbi->s_vat_inode);
1614 map->s_type_specific.s_virtual.s_num_entries =
1615 (sbi->s_vat_inode->i_size -
1616 map->s_type_specific.s_virtual.
1617 s_start_offset) >> 2;
1333 brelse(bh); 1618 brelse(bh);
1334 } 1619 }
1335 UDF_SB_PARTROOT(sb, i) = udf_get_pblock(sb, 0, i, 0); 1620 map->s_partition_root = udf_get_pblock(sb, 0, i, 0);
1336 UDF_SB_PARTLEN(sb, i) = UDF_SB_PARTLEN(sb, ino.partitionReferenceNum); 1621 map->s_partition_len =
1622 sbi->s_partmaps[ino.partitionReferenceNum].
1623 s_partition_len;
1337 } 1624 }
1338 } 1625 }
1339 return 0; 1626 return 0;
@@ -1341,62 +1628,86 @@ static int udf_load_partition(struct super_block *sb, kernel_lb_addr *fileset)
1341 1628
1342static void udf_open_lvid(struct super_block *sb) 1629static void udf_open_lvid(struct super_block *sb)
1343{ 1630{
1344 if (UDF_SB_LVIDBH(sb)) { 1631 struct udf_sb_info *sbi = UDF_SB(sb);
1345 int i; 1632 struct buffer_head *bh = sbi->s_lvid_bh;
1633 if (bh) {
1346 kernel_timestamp cpu_time; 1634 kernel_timestamp cpu_time;
1635 struct logicalVolIntegrityDesc *lvid =
1636 (struct logicalVolIntegrityDesc *)bh->b_data;
1637 struct logicalVolIntegrityDescImpUse *lvidiu =
1638 udf_sb_lvidiu(sbi);
1347 1639
1348 UDF_SB_LVIDIU(sb)->impIdent.identSuffix[0] = UDF_OS_CLASS_UNIX; 1640 lvidiu->impIdent.identSuffix[0] = UDF_OS_CLASS_UNIX;
1349 UDF_SB_LVIDIU(sb)->impIdent.identSuffix[1] = UDF_OS_ID_LINUX; 1641 lvidiu->impIdent.identSuffix[1] = UDF_OS_ID_LINUX;
1350 if (udf_time_to_stamp(&cpu_time, CURRENT_TIME)) 1642 if (udf_time_to_stamp(&cpu_time, CURRENT_TIME))
1351 UDF_SB_LVID(sb)->recordingDateAndTime = cpu_to_lets(cpu_time); 1643 lvid->recordingDateAndTime = cpu_to_lets(cpu_time);
1352 UDF_SB_LVID(sb)->integrityType = LVID_INTEGRITY_TYPE_OPEN; 1644 lvid->integrityType = LVID_INTEGRITY_TYPE_OPEN;
1353
1354 UDF_SB_LVID(sb)->descTag.descCRC = cpu_to_le16(udf_crc((char *)UDF_SB_LVID(sb) + sizeof(tag),
1355 le16_to_cpu(UDF_SB_LVID(sb)->descTag.descCRCLength), 0));
1356 1645
1357 UDF_SB_LVID(sb)->descTag.tagChecksum = 0; 1646 lvid->descTag.descCRC = cpu_to_le16(
1358 for (i = 0; i < 16; i++) 1647 udf_crc((char *)lvid + sizeof(tag),
1359 if (i != 4) 1648 le16_to_cpu(lvid->descTag.descCRCLength),
1360 UDF_SB_LVID(sb)->descTag.tagChecksum += 1649 0));
1361 ((uint8_t *) &(UDF_SB_LVID(sb)->descTag))[i];
1362 1650
1363 mark_buffer_dirty(UDF_SB_LVIDBH(sb)); 1651 lvid->descTag.tagChecksum = udf_tag_checksum(&lvid->descTag);
1652 mark_buffer_dirty(bh);
1364 } 1653 }
1365} 1654}
1366 1655
1367static void udf_close_lvid(struct super_block *sb) 1656static void udf_close_lvid(struct super_block *sb)
1368{ 1657{
1369 kernel_timestamp cpu_time; 1658 kernel_timestamp cpu_time;
1370 int i; 1659 struct udf_sb_info *sbi = UDF_SB(sb);
1660 struct buffer_head *bh = sbi->s_lvid_bh;
1661 struct logicalVolIntegrityDesc *lvid;
1662
1663 if (!bh)
1664 return;
1665
1666 lvid = (struct logicalVolIntegrityDesc *)bh->b_data;
1371 1667
1372 if (UDF_SB_LVIDBH(sb) && 1668 if (lvid->integrityType == LVID_INTEGRITY_TYPE_OPEN) {
1373 UDF_SB_LVID(sb)->integrityType == LVID_INTEGRITY_TYPE_OPEN) { 1669 struct logicalVolIntegrityDescImpUse *lvidiu =
1374 UDF_SB_LVIDIU(sb)->impIdent.identSuffix[0] = UDF_OS_CLASS_UNIX; 1670 udf_sb_lvidiu(sbi);
1375 UDF_SB_LVIDIU(sb)->impIdent.identSuffix[1] = UDF_OS_ID_LINUX; 1671 lvidiu->impIdent.identSuffix[0] = UDF_OS_CLASS_UNIX;
1672 lvidiu->impIdent.identSuffix[1] = UDF_OS_ID_LINUX;
1376 if (udf_time_to_stamp(&cpu_time, CURRENT_TIME)) 1673 if (udf_time_to_stamp(&cpu_time, CURRENT_TIME))
1377 UDF_SB_LVID(sb)->recordingDateAndTime = cpu_to_lets(cpu_time); 1674 lvid->recordingDateAndTime = cpu_to_lets(cpu_time);
1378 if (UDF_MAX_WRITE_VERSION > le16_to_cpu(UDF_SB_LVIDIU(sb)->maxUDFWriteRev)) 1675 if (UDF_MAX_WRITE_VERSION > le16_to_cpu(lvidiu->maxUDFWriteRev))
1379 UDF_SB_LVIDIU(sb)->maxUDFWriteRev = cpu_to_le16(UDF_MAX_WRITE_VERSION); 1676 lvidiu->maxUDFWriteRev =
1380 if (UDF_SB_UDFREV(sb) > le16_to_cpu(UDF_SB_LVIDIU(sb)->minUDFReadRev)) 1677 cpu_to_le16(UDF_MAX_WRITE_VERSION);
1381 UDF_SB_LVIDIU(sb)->minUDFReadRev = cpu_to_le16(UDF_SB_UDFREV(sb)); 1678 if (sbi->s_udfrev > le16_to_cpu(lvidiu->minUDFReadRev))
1382 if (UDF_SB_UDFREV(sb) > le16_to_cpu(UDF_SB_LVIDIU(sb)->minUDFWriteRev)) 1679 lvidiu->minUDFReadRev = cpu_to_le16(sbi->s_udfrev);
1383 UDF_SB_LVIDIU(sb)->minUDFWriteRev = cpu_to_le16(UDF_SB_UDFREV(sb)); 1680 if (sbi->s_udfrev > le16_to_cpu(lvidiu->minUDFWriteRev))
1384 UDF_SB_LVID(sb)->integrityType = cpu_to_le32(LVID_INTEGRITY_TYPE_CLOSE); 1681 lvidiu->minUDFWriteRev = cpu_to_le16(sbi->s_udfrev);
1385 1682 lvid->integrityType = cpu_to_le32(LVID_INTEGRITY_TYPE_CLOSE);
1386 UDF_SB_LVID(sb)->descTag.descCRC = 1683
1387 cpu_to_le16(udf_crc((char *)UDF_SB_LVID(sb) + sizeof(tag), 1684 lvid->descTag.descCRC = cpu_to_le16(
1388 le16_to_cpu(UDF_SB_LVID(sb)->descTag.descCRCLength), 0)); 1685 udf_crc((char *)lvid + sizeof(tag),
1389 1686 le16_to_cpu(lvid->descTag.descCRCLength),
1390 UDF_SB_LVID(sb)->descTag.tagChecksum = 0; 1687 0));
1391 for (i = 0; i < 16; i++) 1688
1392 if (i != 4) 1689 lvid->descTag.tagChecksum = udf_tag_checksum(&lvid->descTag);
1393 UDF_SB_LVID(sb)->descTag.tagChecksum += 1690 mark_buffer_dirty(bh);
1394 ((uint8_t *)&(UDF_SB_LVID(sb)->descTag))[i];
1395
1396 mark_buffer_dirty(UDF_SB_LVIDBH(sb));
1397 } 1691 }
1398} 1692}
1399 1693
1694static void udf_sb_free_bitmap(struct udf_bitmap *bitmap)
1695{
1696 int i;
1697 int nr_groups = bitmap->s_nr_groups;
1698 int size = sizeof(struct udf_bitmap) + (sizeof(struct buffer_head *) *
1699 nr_groups);
1700
1701 for (i = 0; i < nr_groups; i++)
1702 if (bitmap->s_block_bitmap[i])
1703 brelse(bitmap->s_block_bitmap[i]);
1704
1705 if (size <= PAGE_SIZE)
1706 kfree(bitmap);
1707 else
1708 vfree(bitmap);
1709}
1710
1400/* 1711/*
1401 * udf_read_super 1712 * udf_read_super
1402 * 1713 *
@@ -1426,16 +1737,15 @@ static int udf_fill_super(struct super_block *sb, void *options, int silent)
1426 uopt.gid = -1; 1737 uopt.gid = -1;
1427 uopt.umask = 0; 1738 uopt.umask = 0;
1428 1739
1429 sbi = kmalloc(sizeof(struct udf_sb_info), GFP_KERNEL); 1740 sbi = kzalloc(sizeof(struct udf_sb_info), GFP_KERNEL);
1430 if (!sbi) 1741 if (!sbi)
1431 return -ENOMEM; 1742 return -ENOMEM;
1432 1743
1433 sb->s_fs_info = sbi; 1744 sb->s_fs_info = sbi;
1434 memset(UDF_SB(sb), 0x00, sizeof(struct udf_sb_info));
1435 1745
1436 mutex_init(&sbi->s_alloc_mutex); 1746 mutex_init(&sbi->s_alloc_mutex);
1437 1747
1438 if (!udf_parse_options((char *)options, &uopt)) 1748 if (!udf_parse_options((char *)options, &uopt, false))
1439 goto error_out; 1749 goto error_out;
1440 1750
1441 if (uopt.flags & (1 << UDF_FLAG_UTF8) && 1751 if (uopt.flags & (1 << UDF_FLAG_UTF8) &&
@@ -1459,30 +1769,31 @@ static int udf_fill_super(struct super_block *sb, void *options, int silent)
1459 fileset.logicalBlockNum = 0xFFFFFFFF; 1769 fileset.logicalBlockNum = 0xFFFFFFFF;
1460 fileset.partitionReferenceNum = 0xFFFF; 1770 fileset.partitionReferenceNum = 0xFFFF;
1461 1771
1462 UDF_SB(sb)->s_flags = uopt.flags; 1772 sbi->s_flags = uopt.flags;
1463 UDF_SB(sb)->s_uid = uopt.uid; 1773 sbi->s_uid = uopt.uid;
1464 UDF_SB(sb)->s_gid = uopt.gid; 1774 sbi->s_gid = uopt.gid;
1465 UDF_SB(sb)->s_umask = uopt.umask; 1775 sbi->s_umask = uopt.umask;
1466 UDF_SB(sb)->s_nls_map = uopt.nls_map; 1776 sbi->s_nls_map = uopt.nls_map;
1467 1777
1468 /* Set the block size for all transfers */ 1778 /* Set the block size for all transfers */
1469 if (!udf_set_blocksize(sb, uopt.blocksize)) 1779 if (!udf_set_blocksize(sb, uopt.blocksize))
1470 goto error_out; 1780 goto error_out;
1471 1781
1472 if (uopt.session == 0xFFFFFFFF) 1782 if (uopt.session == 0xFFFFFFFF)
1473 UDF_SB_SESSION(sb) = udf_get_last_session(sb); 1783 sbi->s_session = udf_get_last_session(sb);
1474 else 1784 else
1475 UDF_SB_SESSION(sb) = uopt.session; 1785 sbi->s_session = uopt.session;
1476 1786
1477 udf_debug("Multi-session=%d\n", UDF_SB_SESSION(sb)); 1787 udf_debug("Multi-session=%d\n", sbi->s_session);
1478 1788
1479 UDF_SB_LASTBLOCK(sb) = uopt.lastblock; 1789 sbi->s_last_block = uopt.lastblock;
1480 UDF_SB_ANCHOR(sb)[0] = UDF_SB_ANCHOR(sb)[1] = 0; 1790 sbi->s_anchor[0] = sbi->s_anchor[1] = 0;
1481 UDF_SB_ANCHOR(sb)[2] = uopt.anchor; 1791 sbi->s_anchor[2] = uopt.anchor;
1482 UDF_SB_ANCHOR(sb)[3] = 256; 1792 sbi->s_anchor[3] = 256;
1483 1793
1484 if (udf_check_valid(sb, uopt.novrs, silent)) { /* read volume recognition sequences */ 1794 if (udf_check_valid(sb, uopt.novrs, silent)) {
1485 printk("UDF-fs: No VRS found\n"); 1795 /* read volume recognition sequences */
1796 printk(KERN_WARNING "UDF-fs: No VRS found\n");
1486 goto error_out; 1797 goto error_out;
1487 } 1798 }
1488 1799
@@ -1496,27 +1807,30 @@ static int udf_fill_super(struct super_block *sb, void *options, int silent)
1496 sb->s_time_gran = 1000; 1807 sb->s_time_gran = 1000;
1497 1808
1498 if (udf_load_partition(sb, &fileset)) { 1809 if (udf_load_partition(sb, &fileset)) {
1499 printk("UDF-fs: No partition found (1)\n"); 1810 printk(KERN_WARNING "UDF-fs: No partition found (1)\n");
1500 goto error_out; 1811 goto error_out;
1501 } 1812 }
1502 1813
1503 udf_debug("Lastblock=%d\n", UDF_SB_LASTBLOCK(sb)); 1814 udf_debug("Lastblock=%d\n", sbi->s_last_block);
1504 1815
1505 if (UDF_SB_LVIDBH(sb)) { 1816 if (sbi->s_lvid_bh) {
1506 uint16_t minUDFReadRev = le16_to_cpu(UDF_SB_LVIDIU(sb)->minUDFReadRev); 1817 struct logicalVolIntegrityDescImpUse *lvidiu =
1507 uint16_t minUDFWriteRev = le16_to_cpu(UDF_SB_LVIDIU(sb)->minUDFWriteRev); 1818 udf_sb_lvidiu(sbi);
1508 /* uint16_t maxUDFWriteRev = le16_to_cpu(UDF_SB_LVIDIU(sb)->maxUDFWriteRev); */ 1819 uint16_t minUDFReadRev = le16_to_cpu(lvidiu->minUDFReadRev);
1820 uint16_t minUDFWriteRev = le16_to_cpu(lvidiu->minUDFWriteRev);
1821 /* uint16_t maxUDFWriteRev =
1822 le16_to_cpu(lvidiu->maxUDFWriteRev); */
1509 1823
1510 if (minUDFReadRev > UDF_MAX_READ_VERSION) { 1824 if (minUDFReadRev > UDF_MAX_READ_VERSION) {
1511 printk("UDF-fs: minUDFReadRev=%x (max is %x)\n", 1825 printk(KERN_ERR "UDF-fs: minUDFReadRev=%x "
1512 le16_to_cpu(UDF_SB_LVIDIU(sb)->minUDFReadRev), 1826 "(max is %x)\n",
1827 le16_to_cpu(lvidiu->minUDFReadRev),
1513 UDF_MAX_READ_VERSION); 1828 UDF_MAX_READ_VERSION);
1514 goto error_out; 1829 goto error_out;
1515 } else if (minUDFWriteRev > UDF_MAX_WRITE_VERSION) { 1830 } else if (minUDFWriteRev > UDF_MAX_WRITE_VERSION)
1516 sb->s_flags |= MS_RDONLY; 1831 sb->s_flags |= MS_RDONLY;
1517 }
1518 1832
1519 UDF_SB_UDFREV(sb) = minUDFWriteRev; 1833 sbi->s_udfrev = minUDFWriteRev;
1520 1834
1521 if (minUDFReadRev >= UDF_VERS_USE_EXTENDED_FE) 1835 if (minUDFReadRev >= UDF_VERS_USE_EXTENDED_FE)
1522 UDF_SET_FLAG(sb, UDF_FLAG_USE_EXTENDED_FE); 1836 UDF_SET_FLAG(sb, UDF_FLAG_USE_EXTENDED_FE);
@@ -1524,29 +1838,30 @@ static int udf_fill_super(struct super_block *sb, void *options, int silent)
1524 UDF_SET_FLAG(sb, UDF_FLAG_USE_STREAMS); 1838 UDF_SET_FLAG(sb, UDF_FLAG_USE_STREAMS);
1525 } 1839 }
1526 1840
1527 if (!UDF_SB_NUMPARTS(sb)) { 1841 if (!sbi->s_partitions) {
1528 printk("UDF-fs: No partition found (2)\n"); 1842 printk(KERN_WARNING "UDF-fs: No partition found (2)\n");
1529 goto error_out; 1843 goto error_out;
1530 } 1844 }
1531 1845
1532 if (UDF_SB_PARTFLAGS(sb, UDF_SB_PARTITION(sb)) & UDF_PART_FLAG_READ_ONLY) { 1846 if (sbi->s_partmaps[sbi->s_partition].s_partition_flags &
1533 printk("UDF-fs: Partition marked readonly; forcing readonly mount\n"); 1847 UDF_PART_FLAG_READ_ONLY) {
1848 printk(KERN_NOTICE "UDF-fs: Partition marked readonly; "
1849 "forcing readonly mount\n");
1534 sb->s_flags |= MS_RDONLY; 1850 sb->s_flags |= MS_RDONLY;
1535 } 1851 }
1536 1852
1537 if (udf_find_fileset(sb, &fileset, &rootdir)) { 1853 if (udf_find_fileset(sb, &fileset, &rootdir)) {
1538 printk("UDF-fs: No fileset found\n"); 1854 printk(KERN_WARNING "UDF-fs: No fileset found\n");
1539 goto error_out; 1855 goto error_out;
1540 } 1856 }
1541 1857
1542 if (!silent) { 1858 if (!silent) {
1543 kernel_timestamp ts; 1859 kernel_timestamp ts;
1544 udf_time_to_stamp(&ts, UDF_SB_RECORDTIME(sb)); 1860 udf_time_to_stamp(&ts, sbi->s_record_time);
1545 udf_info("UDF %s (%s) Mounting volume '%s', " 1861 udf_info("UDF: Mounting volume '%s', "
1546 "timestamp %04u/%02u/%02u %02u:%02u (%x)\n", 1862 "timestamp %04u/%02u/%02u %02u:%02u (%x)\n",
1547 UDFFS_VERSION, UDFFS_DATE, 1863 sbi->s_volume_ident, ts.year, ts.month, ts.day,
1548 UDF_SB_VOLIDENT(sb), ts.year, ts.month, ts.day, ts.hour, ts.minute, 1864 ts.hour, ts.minute, ts.typeAndTimezone);
1549 ts.typeAndTimezone);
1550 } 1865 }
1551 if (!(sb->s_flags & MS_RDONLY)) 1866 if (!(sb->s_flags & MS_RDONLY))
1552 udf_open_lvid(sb); 1867 udf_open_lvid(sb);
@@ -1556,7 +1871,8 @@ static int udf_fill_super(struct super_block *sb, void *options, int silent)
1556 /* perhaps it's not extensible enough, but for now ... */ 1871 /* perhaps it's not extensible enough, but for now ... */
1557 inode = udf_iget(sb, rootdir); 1872 inode = udf_iget(sb, rootdir);
1558 if (!inode) { 1873 if (!inode) {
1559 printk("UDF-fs: Error in udf_iget, block=%d, partition=%d\n", 1874 printk(KERN_ERR "UDF-fs: Error in udf_iget, block=%d, "
1875 "partition=%d\n",
1560 rootdir.logicalBlockNum, rootdir.partitionReferenceNum); 1876 rootdir.logicalBlockNum, rootdir.partitionReferenceNum);
1561 goto error_out; 1877 goto error_out;
1562 } 1878 }
@@ -1564,7 +1880,7 @@ static int udf_fill_super(struct super_block *sb, void *options, int silent)
1564 /* Allocate a dentry for the root inode */ 1880 /* Allocate a dentry for the root inode */
1565 sb->s_root = d_alloc_root(inode); 1881 sb->s_root = d_alloc_root(inode);
1566 if (!sb->s_root) { 1882 if (!sb->s_root) {
1567 printk("UDF-fs: Couldn't allocate root dentry\n"); 1883 printk(KERN_ERR "UDF-fs: Couldn't allocate root dentry\n");
1568 iput(inode); 1884 iput(inode);
1569 goto error_out; 1885 goto error_out;
1570 } 1886 }
@@ -1572,30 +1888,32 @@ static int udf_fill_super(struct super_block *sb, void *options, int silent)
1572 return 0; 1888 return 0;
1573 1889
1574error_out: 1890error_out:
1575 if (UDF_SB_VAT(sb)) 1891 if (sbi->s_vat_inode)
1576 iput(UDF_SB_VAT(sb)); 1892 iput(sbi->s_vat_inode);
1577 if (UDF_SB_NUMPARTS(sb)) { 1893 if (sbi->s_partitions) {
1578 if (UDF_SB_PARTFLAGS(sb, UDF_SB_PARTITION(sb)) & UDF_PART_FLAG_UNALLOC_TABLE) 1894 struct udf_part_map *map = &sbi->s_partmaps[sbi->s_partition];
1579 iput(UDF_SB_PARTMAPS(sb)[UDF_SB_PARTITION(sb)].s_uspace.s_table); 1895 if (map->s_partition_flags & UDF_PART_FLAG_UNALLOC_TABLE)
1580 if (UDF_SB_PARTFLAGS(sb, UDF_SB_PARTITION(sb)) & UDF_PART_FLAG_FREED_TABLE) 1896 iput(map->s_uspace.s_table);
1581 iput(UDF_SB_PARTMAPS(sb)[UDF_SB_PARTITION(sb)].s_fspace.s_table); 1897 if (map->s_partition_flags & UDF_PART_FLAG_FREED_TABLE)
1582 if (UDF_SB_PARTFLAGS(sb, UDF_SB_PARTITION(sb)) & UDF_PART_FLAG_UNALLOC_BITMAP) 1898 iput(map->s_fspace.s_table);
1583 UDF_SB_FREE_BITMAP(sb,UDF_SB_PARTITION(sb), s_uspace); 1899 if (map->s_partition_flags & UDF_PART_FLAG_UNALLOC_BITMAP)
1584 if (UDF_SB_PARTFLAGS(sb, UDF_SB_PARTITION(sb)) & UDF_PART_FLAG_FREED_BITMAP) 1900 udf_sb_free_bitmap(map->s_uspace.s_bitmap);
1585 UDF_SB_FREE_BITMAP(sb,UDF_SB_PARTITION(sb), s_fspace); 1901 if (map->s_partition_flags & UDF_PART_FLAG_FREED_BITMAP)
1586 if (UDF_SB_PARTTYPE(sb, UDF_SB_PARTITION(sb)) == UDF_SPARABLE_MAP15) { 1902 udf_sb_free_bitmap(map->s_fspace.s_bitmap);
1903 if (map->s_partition_type == UDF_SPARABLE_MAP15)
1587 for (i = 0; i < 4; i++) 1904 for (i = 0; i < 4; i++)
1588 brelse(UDF_SB_TYPESPAR(sb, UDF_SB_PARTITION(sb)).s_spar_map[i]); 1905 brelse(map->s_type_specific.s_sparing.
1589 } 1906 s_spar_map[i]);
1590 } 1907 }
1591#ifdef CONFIG_UDF_NLS 1908#ifdef CONFIG_UDF_NLS
1592 if (UDF_QUERY_FLAG(sb, UDF_FLAG_NLS_MAP)) 1909 if (UDF_QUERY_FLAG(sb, UDF_FLAG_NLS_MAP))
1593 unload_nls(UDF_SB(sb)->s_nls_map); 1910 unload_nls(sbi->s_nls_map);
1594#endif 1911#endif
1595 if (!(sb->s_flags & MS_RDONLY)) 1912 if (!(sb->s_flags & MS_RDONLY))
1596 udf_close_lvid(sb); 1913 udf_close_lvid(sb);
1597 brelse(UDF_SB_LVIDBH(sb)); 1914 brelse(sbi->s_lvid_bh);
1598 UDF_SB_FREE(sb); 1915
1916 kfree(sbi->s_partmaps);
1599 kfree(sbi); 1917 kfree(sbi);
1600 sb->s_fs_info = NULL; 1918 sb->s_fs_info = NULL;
1601 1919
@@ -1614,7 +1932,7 @@ void udf_error(struct super_block *sb, const char *function,
1614 va_start(args, fmt); 1932 va_start(args, fmt);
1615 vsnprintf(error_buf, sizeof(error_buf), fmt, args); 1933 vsnprintf(error_buf, sizeof(error_buf), fmt, args);
1616 va_end(args); 1934 va_end(args);
1617 printk (KERN_CRIT "UDF-fs error (device %s): %s: %s\n", 1935 printk(KERN_CRIT "UDF-fs error (device %s): %s: %s\n",
1618 sb->s_id, function, error_buf); 1936 sb->s_id, function, error_buf);
1619} 1937}
1620 1938
@@ -1646,31 +1964,34 @@ void udf_warning(struct super_block *sb, const char *function,
1646static void udf_put_super(struct super_block *sb) 1964static void udf_put_super(struct super_block *sb)
1647{ 1965{
1648 int i; 1966 int i;
1967 struct udf_sb_info *sbi;
1649 1968
1650 if (UDF_SB_VAT(sb)) 1969 sbi = UDF_SB(sb);
1651 iput(UDF_SB_VAT(sb)); 1970 if (sbi->s_vat_inode)
1652 if (UDF_SB_NUMPARTS(sb)) { 1971 iput(sbi->s_vat_inode);
1653 if (UDF_SB_PARTFLAGS(sb, UDF_SB_PARTITION(sb)) & UDF_PART_FLAG_UNALLOC_TABLE) 1972 if (sbi->s_partitions) {
1654 iput(UDF_SB_PARTMAPS(sb)[UDF_SB_PARTITION(sb)].s_uspace.s_table); 1973 struct udf_part_map *map = &sbi->s_partmaps[sbi->s_partition];
1655 if (UDF_SB_PARTFLAGS(sb, UDF_SB_PARTITION(sb)) & UDF_PART_FLAG_FREED_TABLE) 1974 if (map->s_partition_flags & UDF_PART_FLAG_UNALLOC_TABLE)
1656 iput(UDF_SB_PARTMAPS(sb)[UDF_SB_PARTITION(sb)].s_fspace.s_table); 1975 iput(map->s_uspace.s_table);
1657 if (UDF_SB_PARTFLAGS(sb, UDF_SB_PARTITION(sb)) & UDF_PART_FLAG_UNALLOC_BITMAP) 1976 if (map->s_partition_flags & UDF_PART_FLAG_FREED_TABLE)
1658 UDF_SB_FREE_BITMAP(sb,UDF_SB_PARTITION(sb), s_uspace); 1977 iput(map->s_fspace.s_table);
1659 if (UDF_SB_PARTFLAGS(sb, UDF_SB_PARTITION(sb)) & UDF_PART_FLAG_FREED_BITMAP) 1978 if (map->s_partition_flags & UDF_PART_FLAG_UNALLOC_BITMAP)
1660 UDF_SB_FREE_BITMAP(sb,UDF_SB_PARTITION(sb), s_fspace); 1979 udf_sb_free_bitmap(map->s_uspace.s_bitmap);
1661 if (UDF_SB_PARTTYPE(sb, UDF_SB_PARTITION(sb)) == UDF_SPARABLE_MAP15) { 1980 if (map->s_partition_flags & UDF_PART_FLAG_FREED_BITMAP)
1981 udf_sb_free_bitmap(map->s_fspace.s_bitmap);
1982 if (map->s_partition_type == UDF_SPARABLE_MAP15)
1662 for (i = 0; i < 4; i++) 1983 for (i = 0; i < 4; i++)
1663 brelse(UDF_SB_TYPESPAR(sb, UDF_SB_PARTITION(sb)).s_spar_map[i]); 1984 brelse(map->s_type_specific.s_sparing.
1664 } 1985 s_spar_map[i]);
1665 } 1986 }
1666#ifdef CONFIG_UDF_NLS 1987#ifdef CONFIG_UDF_NLS
1667 if (UDF_QUERY_FLAG(sb, UDF_FLAG_NLS_MAP)) 1988 if (UDF_QUERY_FLAG(sb, UDF_FLAG_NLS_MAP))
1668 unload_nls(UDF_SB(sb)->s_nls_map); 1989 unload_nls(sbi->s_nls_map);
1669#endif 1990#endif
1670 if (!(sb->s_flags & MS_RDONLY)) 1991 if (!(sb->s_flags & MS_RDONLY))
1671 udf_close_lvid(sb); 1992 udf_close_lvid(sb);
1672 brelse(UDF_SB_LVIDBH(sb)); 1993 brelse(sbi->s_lvid_bh);
1673 UDF_SB_FREE(sb); 1994 kfree(sbi->s_partmaps);
1674 kfree(sb->s_fs_info); 1995 kfree(sb->s_fs_info);
1675 sb->s_fs_info = NULL; 1996 sb->s_fs_info = NULL;
1676} 1997}
@@ -1691,15 +2012,22 @@ static void udf_put_super(struct super_block *sb)
1691static int udf_statfs(struct dentry *dentry, struct kstatfs *buf) 2012static int udf_statfs(struct dentry *dentry, struct kstatfs *buf)
1692{ 2013{
1693 struct super_block *sb = dentry->d_sb; 2014 struct super_block *sb = dentry->d_sb;
2015 struct udf_sb_info *sbi = UDF_SB(sb);
2016 struct logicalVolIntegrityDescImpUse *lvidiu;
2017
2018 if (sbi->s_lvid_bh != NULL)
2019 lvidiu = udf_sb_lvidiu(sbi);
2020 else
2021 lvidiu = NULL;
1694 2022
1695 buf->f_type = UDF_SUPER_MAGIC; 2023 buf->f_type = UDF_SUPER_MAGIC;
1696 buf->f_bsize = sb->s_blocksize; 2024 buf->f_bsize = sb->s_blocksize;
1697 buf->f_blocks = UDF_SB_PARTLEN(sb, UDF_SB_PARTITION(sb)); 2025 buf->f_blocks = sbi->s_partmaps[sbi->s_partition].s_partition_len;
1698 buf->f_bfree = udf_count_free(sb); 2026 buf->f_bfree = udf_count_free(sb);
1699 buf->f_bavail = buf->f_bfree; 2027 buf->f_bavail = buf->f_bfree;
1700 buf->f_files = (UDF_SB_LVIDBH(sb) ? 2028 buf->f_files = (lvidiu != NULL ? (le32_to_cpu(lvidiu->numFiles) +
1701 (le32_to_cpu(UDF_SB_LVIDIU(sb)->numFiles) + 2029 le32_to_cpu(lvidiu->numDirs)) : 0)
1702 le32_to_cpu(UDF_SB_LVIDIU(sb)->numDirs)) : 0) + buf->f_bfree; 2030 + buf->f_bfree;
1703 buf->f_ffree = buf->f_bfree; 2031 buf->f_ffree = buf->f_bfree;
1704 /* __kernel_fsid_t f_fsid */ 2032 /* __kernel_fsid_t f_fsid */
1705 buf->f_namelen = UDF_NAME_LEN - 2; 2033 buf->f_namelen = UDF_NAME_LEN - 2;
@@ -1711,7 +2039,8 @@ static unsigned char udf_bitmap_lookup[16] = {
1711 0, 1, 1, 2, 1, 2, 2, 3, 1, 2, 2, 3, 2, 3, 3, 4 2039 0, 1, 1, 2, 1, 2, 2, 3, 1, 2, 2, 3, 2, 3, 3, 4
1712}; 2040};
1713 2041
1714static unsigned int udf_count_free_bitmap(struct super_block *sb, struct udf_bitmap *bitmap) 2042static unsigned int udf_count_free_bitmap(struct super_block *sb,
2043 struct udf_bitmap *bitmap)
1715{ 2044{
1716 struct buffer_head *bh = NULL; 2045 struct buffer_head *bh = NULL;
1717 unsigned int accum = 0; 2046 unsigned int accum = 0;
@@ -1727,7 +2056,7 @@ static unsigned int udf_count_free_bitmap(struct super_block *sb, struct udf_bit
1727 lock_kernel(); 2056 lock_kernel();
1728 2057
1729 loc.logicalBlockNum = bitmap->s_extPosition; 2058 loc.logicalBlockNum = bitmap->s_extPosition;
1730 loc.partitionReferenceNum = UDF_SB_PARTITION(sb); 2059 loc.partitionReferenceNum = UDF_SB(sb)->s_partition;
1731 bh = udf_read_ptagged(sb, loc, 0, &ident); 2060 bh = udf_read_ptagged(sb, loc, 0, &ident);
1732 2061
1733 if (!bh) { 2062 if (!bh) {
@@ -1772,7 +2101,8 @@ out:
1772 return accum; 2101 return accum;
1773} 2102}
1774 2103
1775static unsigned int udf_count_free_table(struct super_block *sb, struct inode *table) 2104static unsigned int udf_count_free_table(struct super_block *sb,
2105 struct inode *table)
1776{ 2106{
1777 unsigned int accum = 0; 2107 unsigned int accum = 0;
1778 uint32_t elen; 2108 uint32_t elen;
@@ -1782,13 +2112,13 @@ static unsigned int udf_count_free_table(struct super_block *sb, struct inode *t
1782 2112
1783 lock_kernel(); 2113 lock_kernel();
1784 2114
1785 epos.block = UDF_I_LOCATION(table); 2115 epos.block = UDF_I(table)->i_location;
1786 epos.offset = sizeof(struct unallocSpaceEntry); 2116 epos.offset = sizeof(struct unallocSpaceEntry);
1787 epos.bh = NULL; 2117 epos.bh = NULL;
1788 2118
1789 while ((etype = udf_next_aext(table, &epos, &eloc, &elen, 1)) != -1) { 2119 while ((etype = udf_next_aext(table, &epos, &eloc, &elen, 1)) != -1)
1790 accum += (elen >> table->i_sb->s_blocksize_bits); 2120 accum += (elen >> table->i_sb->s_blocksize_bits);
1791 } 2121
1792 brelse(epos.bh); 2122 brelse(epos.bh);
1793 2123
1794 unlock_kernel(); 2124 unlock_kernel();
@@ -1799,10 +2129,17 @@ static unsigned int udf_count_free_table(struct super_block *sb, struct inode *t
1799static unsigned int udf_count_free(struct super_block *sb) 2129static unsigned int udf_count_free(struct super_block *sb)
1800{ 2130{
1801 unsigned int accum = 0; 2131 unsigned int accum = 0;
1802 2132 struct udf_sb_info *sbi;
1803 if (UDF_SB_LVIDBH(sb)) { 2133 struct udf_part_map *map;
1804 if (le32_to_cpu(UDF_SB_LVID(sb)->numOfPartitions) > UDF_SB_PARTITION(sb)) { 2134
1805 accum = le32_to_cpu(UDF_SB_LVID(sb)->freeSpaceTable[UDF_SB_PARTITION(sb)]); 2135 sbi = UDF_SB(sb);
2136 if (sbi->s_lvid_bh) {
2137 struct logicalVolIntegrityDesc *lvid =
2138 (struct logicalVolIntegrityDesc *)
2139 sbi->s_lvid_bh->b_data;
2140 if (le32_to_cpu(lvid->numOfPartitions) > sbi->s_partition) {
2141 accum = le32_to_cpu(
2142 lvid->freeSpaceTable[sbi->s_partition]);
1806 if (accum == 0xFFFFFFFF) 2143 if (accum == 0xFFFFFFFF)
1807 accum = 0; 2144 accum = 0;
1808 } 2145 }
@@ -1811,24 +2148,25 @@ static unsigned int udf_count_free(struct super_block *sb)
1811 if (accum) 2148 if (accum)
1812 return accum; 2149 return accum;
1813 2150
1814 if (UDF_SB_PARTFLAGS(sb,UDF_SB_PARTITION(sb)) & UDF_PART_FLAG_UNALLOC_BITMAP) { 2151 map = &sbi->s_partmaps[sbi->s_partition];
2152 if (map->s_partition_flags & UDF_PART_FLAG_UNALLOC_BITMAP) {
1815 accum += udf_count_free_bitmap(sb, 2153 accum += udf_count_free_bitmap(sb,
1816 UDF_SB_PARTMAPS(sb)[UDF_SB_PARTITION(sb)].s_uspace.s_bitmap); 2154 map->s_uspace.s_bitmap);
1817 } 2155 }
1818 if (UDF_SB_PARTFLAGS(sb,UDF_SB_PARTITION(sb)) & UDF_PART_FLAG_FREED_BITMAP) { 2156 if (map->s_partition_flags & UDF_PART_FLAG_FREED_BITMAP) {
1819 accum += udf_count_free_bitmap(sb, 2157 accum += udf_count_free_bitmap(sb,
1820 UDF_SB_PARTMAPS(sb)[UDF_SB_PARTITION(sb)].s_fspace.s_bitmap); 2158 map->s_fspace.s_bitmap);
1821 } 2159 }
1822 if (accum) 2160 if (accum)
1823 return accum; 2161 return accum;
1824 2162
1825 if (UDF_SB_PARTFLAGS(sb,UDF_SB_PARTITION(sb)) & UDF_PART_FLAG_UNALLOC_TABLE) { 2163 if (map->s_partition_flags & UDF_PART_FLAG_UNALLOC_TABLE) {
1826 accum += udf_count_free_table(sb, 2164 accum += udf_count_free_table(sb,
1827 UDF_SB_PARTMAPS(sb)[UDF_SB_PARTITION(sb)].s_uspace.s_table); 2165 map->s_uspace.s_table);
1828 } 2166 }
1829 if (UDF_SB_PARTFLAGS(sb,UDF_SB_PARTITION(sb)) & UDF_PART_FLAG_FREED_TABLE) { 2167 if (map->s_partition_flags & UDF_PART_FLAG_FREED_TABLE) {
1830 accum += udf_count_free_table(sb, 2168 accum += udf_count_free_table(sb,
1831 UDF_SB_PARTMAPS(sb)[UDF_SB_PARTITION(sb)].s_fspace.s_table); 2169 map->s_fspace.s_table);
1832 } 2170 }
1833 2171
1834 return accum; 2172 return accum;
diff --git a/fs/udf/symlink.c b/fs/udf/symlink.c
index e6f933dd6a7b..6ec99221e50c 100644
--- a/fs/udf/symlink.c
+++ b/fs/udf/symlink.c
@@ -33,7 +33,8 @@
33#include <linux/buffer_head.h> 33#include <linux/buffer_head.h>
34#include "udf_i.h" 34#include "udf_i.h"
35 35
36static void udf_pc_to_char(struct super_block *sb, char *from, int fromlen, char *to) 36static void udf_pc_to_char(struct super_block *sb, char *from, int fromlen,
37 char *to)
37{ 38{
38 struct pathComponent *pc; 39 struct pathComponent *pc;
39 int elen = 0; 40 int elen = 0;
@@ -78,10 +79,12 @@ static int udf_symlink_filler(struct file *file, struct page *page)
78 char *symlink; 79 char *symlink;
79 int err = -EIO; 80 int err = -EIO;
80 char *p = kmap(page); 81 char *p = kmap(page);
82 struct udf_inode_info *iinfo;
81 83
82 lock_kernel(); 84 lock_kernel();
83 if (UDF_I_ALLOCTYPE(inode) == ICBTAG_FLAG_AD_IN_ICB) { 85 iinfo = UDF_I(inode);
84 symlink = UDF_I_DATA(inode) + UDF_I_LENEATTR(inode); 86 if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_IN_ICB) {
87 symlink = iinfo->i_ext.i_data + iinfo->i_lenEAttr;
85 } else { 88 } else {
86 bh = sb_bread(inode->i_sb, udf_block_map(inode, 0)); 89 bh = sb_bread(inode->i_sb, udf_block_map(inode, 0));
87 90
diff --git a/fs/udf/truncate.c b/fs/udf/truncate.c
index 7fc3912885a5..fe61be17cdab 100644
--- a/fs/udf/truncate.c
+++ b/fs/udf/truncate.c
@@ -74,17 +74,18 @@ void udf_truncate_tail_extent(struct inode *inode)
74 uint64_t lbcount = 0; 74 uint64_t lbcount = 0;
75 int8_t etype = -1, netype; 75 int8_t etype = -1, netype;
76 int adsize; 76 int adsize;
77 struct udf_inode_info *iinfo = UDF_I(inode);
77 78
78 if (UDF_I_ALLOCTYPE(inode) == ICBTAG_FLAG_AD_IN_ICB || 79 if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_IN_ICB ||
79 inode->i_size == UDF_I_LENEXTENTS(inode)) 80 inode->i_size == iinfo->i_lenExtents)
80 return; 81 return;
81 /* Are we going to delete the file anyway? */ 82 /* Are we going to delete the file anyway? */
82 if (inode->i_nlink == 0) 83 if (inode->i_nlink == 0)
83 return; 84 return;
84 85
85 if (UDF_I_ALLOCTYPE(inode) == ICBTAG_FLAG_AD_SHORT) 86 if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_SHORT)
86 adsize = sizeof(short_ad); 87 adsize = sizeof(short_ad);
87 else if (UDF_I_ALLOCTYPE(inode) == ICBTAG_FLAG_AD_LONG) 88 else if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_LONG)
88 adsize = sizeof(long_ad); 89 adsize = sizeof(long_ad);
89 else 90 else
90 BUG(); 91 BUG();
@@ -117,7 +118,7 @@ void udf_truncate_tail_extent(struct inode *inode)
117 } 118 }
118 /* This inode entry is in-memory only and thus we don't have to mark 119 /* This inode entry is in-memory only and thus we don't have to mark
119 * the inode dirty */ 120 * the inode dirty */
120 UDF_I_LENEXTENTS(inode) = inode->i_size; 121 iinfo->i_lenExtents = inode->i_size;
121 brelse(epos.bh); 122 brelse(epos.bh);
122} 123}
123 124
@@ -129,19 +130,20 @@ void udf_discard_prealloc(struct inode *inode)
129 uint64_t lbcount = 0; 130 uint64_t lbcount = 0;
130 int8_t etype = -1, netype; 131 int8_t etype = -1, netype;
131 int adsize; 132 int adsize;
133 struct udf_inode_info *iinfo = UDF_I(inode);
132 134
133 if (UDF_I_ALLOCTYPE(inode) == ICBTAG_FLAG_AD_IN_ICB || 135 if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_IN_ICB ||
134 inode->i_size == UDF_I_LENEXTENTS(inode)) 136 inode->i_size == iinfo->i_lenExtents)
135 return; 137 return;
136 138
137 if (UDF_I_ALLOCTYPE(inode) == ICBTAG_FLAG_AD_SHORT) 139 if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_SHORT)
138 adsize = sizeof(short_ad); 140 adsize = sizeof(short_ad);
139 else if (UDF_I_ALLOCTYPE(inode) == ICBTAG_FLAG_AD_LONG) 141 else if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_LONG)
140 adsize = sizeof(long_ad); 142 adsize = sizeof(long_ad);
141 else 143 else
142 adsize = 0; 144 adsize = 0;
143 145
144 epos.block = UDF_I_LOCATION(inode); 146 epos.block = iinfo->i_location;
145 147
146 /* Find the last extent in the file */ 148 /* Find the last extent in the file */
147 while ((netype = udf_next_aext(inode, &epos, &eloc, &elen, 1)) != -1) { 149 while ((netype = udf_next_aext(inode, &epos, &eloc, &elen, 1)) != -1) {
@@ -153,8 +155,9 @@ void udf_discard_prealloc(struct inode *inode)
153 lbcount -= elen; 155 lbcount -= elen;
154 extent_trunc(inode, &epos, eloc, etype, elen, 0); 156 extent_trunc(inode, &epos, eloc, etype, elen, 0);
155 if (!epos.bh) { 157 if (!epos.bh) {
156 UDF_I_LENALLOC(inode) = 158 iinfo->i_lenAlloc =
157 epos.offset - udf_file_entry_alloc_offset(inode); 159 epos.offset -
160 udf_file_entry_alloc_offset(inode);
158 mark_inode_dirty(inode); 161 mark_inode_dirty(inode);
159 } else { 162 } else {
160 struct allocExtDesc *aed = 163 struct allocExtDesc *aed =
@@ -163,7 +166,7 @@ void udf_discard_prealloc(struct inode *inode)
163 cpu_to_le32(epos.offset - 166 cpu_to_le32(epos.offset -
164 sizeof(struct allocExtDesc)); 167 sizeof(struct allocExtDesc));
165 if (!UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_STRICT) || 168 if (!UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_STRICT) ||
166 UDF_SB_UDFREV(inode->i_sb) >= 0x0201) 169 UDF_SB(inode->i_sb)->s_udfrev >= 0x0201)
167 udf_update_tag(epos.bh->b_data, epos.offset); 170 udf_update_tag(epos.bh->b_data, epos.offset);
168 else 171 else
169 udf_update_tag(epos.bh->b_data, 172 udf_update_tag(epos.bh->b_data,
@@ -173,7 +176,7 @@ void udf_discard_prealloc(struct inode *inode)
173 } 176 }
174 /* This inode entry is in-memory only and thus we don't have to mark 177 /* This inode entry is in-memory only and thus we don't have to mark
175 * the inode dirty */ 178 * the inode dirty */
176 UDF_I_LENEXTENTS(inode) = lbcount; 179 iinfo->i_lenExtents = lbcount;
177 brelse(epos.bh); 180 brelse(epos.bh);
178} 181}
179 182
@@ -184,13 +187,15 @@ void udf_truncate_extents(struct inode *inode)
184 uint32_t elen, nelen = 0, indirect_ext_len = 0, lenalloc; 187 uint32_t elen, nelen = 0, indirect_ext_len = 0, lenalloc;
185 int8_t etype; 188 int8_t etype;
186 struct super_block *sb = inode->i_sb; 189 struct super_block *sb = inode->i_sb;
190 struct udf_sb_info *sbi = UDF_SB(sb);
187 sector_t first_block = inode->i_size >> sb->s_blocksize_bits, offset; 191 sector_t first_block = inode->i_size >> sb->s_blocksize_bits, offset;
188 loff_t byte_offset; 192 loff_t byte_offset;
189 int adsize; 193 int adsize;
194 struct udf_inode_info *iinfo = UDF_I(inode);
190 195
191 if (UDF_I_ALLOCTYPE(inode) == ICBTAG_FLAG_AD_SHORT) 196 if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_SHORT)
192 adsize = sizeof(short_ad); 197 adsize = sizeof(short_ad);
193 else if (UDF_I_ALLOCTYPE(inode) == ICBTAG_FLAG_AD_LONG) 198 else if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_LONG)
194 adsize = sizeof(long_ad); 199 adsize = sizeof(long_ad);
195 else 200 else
196 BUG(); 201 BUG();
@@ -212,7 +217,8 @@ void udf_truncate_extents(struct inode *inode)
212 else 217 else
213 lenalloc -= sizeof(struct allocExtDesc); 218 lenalloc -= sizeof(struct allocExtDesc);
214 219
215 while ((etype = udf_current_aext(inode, &epos, &eloc, &elen, 0)) != -1) { 220 while ((etype = udf_current_aext(inode, &epos, &eloc,
221 &elen, 0)) != -1) {
216 if (etype == (EXT_NEXT_EXTENT_ALLOCDECS >> 30)) { 222 if (etype == (EXT_NEXT_EXTENT_ALLOCDECS >> 30)) {
217 udf_write_aext(inode, &epos, neloc, nelen, 0); 223 udf_write_aext(inode, &epos, neloc, nelen, 0);
218 if (indirect_ext_len) { 224 if (indirect_ext_len) {
@@ -224,35 +230,43 @@ void udf_truncate_extents(struct inode *inode)
224 0, indirect_ext_len); 230 0, indirect_ext_len);
225 } else { 231 } else {
226 if (!epos.bh) { 232 if (!epos.bh) {
227 UDF_I_LENALLOC(inode) = lenalloc; 233 iinfo->i_lenAlloc =
234 lenalloc;
228 mark_inode_dirty(inode); 235 mark_inode_dirty(inode);
229 } else { 236 } else {
230 struct allocExtDesc *aed = 237 struct allocExtDesc *aed =
231 (struct allocExtDesc *)(epos.bh->b_data); 238 (struct allocExtDesc *)
239 (epos.bh->b_data);
240 int len =
241 sizeof(struct allocExtDesc);
242
232 aed->lengthAllocDescs = 243 aed->lengthAllocDescs =
233 cpu_to_le32(lenalloc); 244 cpu_to_le32(lenalloc);
234 if (!UDF_QUERY_FLAG(sb, UDF_FLAG_STRICT) || 245 if (!UDF_QUERY_FLAG(sb,
235 UDF_SB_UDFREV(sb) >= 0x0201) 246 UDF_FLAG_STRICT) ||
236 udf_update_tag(epos.bh->b_data, 247 sbi->s_udfrev >= 0x0201)
237 lenalloc + 248 len += lenalloc;
238 sizeof(struct allocExtDesc)); 249
239 else 250 udf_update_tag(epos.bh->b_data,
240 udf_update_tag(epos.bh->b_data, 251 len);
241 sizeof(struct allocExtDesc)); 252 mark_buffer_dirty_inode(
242 mark_buffer_dirty_inode(epos.bh, inode); 253 epos.bh, inode);
243 } 254 }
244 } 255 }
245 brelse(epos.bh); 256 brelse(epos.bh);
246 epos.offset = sizeof(struct allocExtDesc); 257 epos.offset = sizeof(struct allocExtDesc);
247 epos.block = eloc; 258 epos.block = eloc;
248 epos.bh = udf_tread(sb, udf_get_lb_pblock(sb, eloc, 0)); 259 epos.bh = udf_tread(sb,
260 udf_get_lb_pblock(sb, eloc, 0));
249 if (elen) 261 if (elen)
250 indirect_ext_len = (elen + sb->s_blocksize -1) >> 262 indirect_ext_len =
263 (elen + sb->s_blocksize - 1) >>
251 sb->s_blocksize_bits; 264 sb->s_blocksize_bits;
252 else 265 else
253 indirect_ext_len = 1; 266 indirect_ext_len = 1;
254 } else { 267 } else {
255 extent_trunc(inode, &epos, eloc, etype, elen, 0); 268 extent_trunc(inode, &epos, eloc, etype,
269 elen, 0);
256 epos.offset += adsize; 270 epos.offset += adsize;
257 } 271 }
258 } 272 }
@@ -264,19 +278,20 @@ void udf_truncate_extents(struct inode *inode)
264 indirect_ext_len); 278 indirect_ext_len);
265 } else { 279 } else {
266 if (!epos.bh) { 280 if (!epos.bh) {
267 UDF_I_LENALLOC(inode) = lenalloc; 281 iinfo->i_lenAlloc = lenalloc;
268 mark_inode_dirty(inode); 282 mark_inode_dirty(inode);
269 } else { 283 } else {
270 struct allocExtDesc *aed = 284 struct allocExtDesc *aed =
271 (struct allocExtDesc *)(epos.bh->b_data); 285 (struct allocExtDesc *)(epos.bh->b_data);
272 aed->lengthAllocDescs = cpu_to_le32(lenalloc); 286 aed->lengthAllocDescs = cpu_to_le32(lenalloc);
273 if (!UDF_QUERY_FLAG(sb, UDF_FLAG_STRICT) || 287 if (!UDF_QUERY_FLAG(sb, UDF_FLAG_STRICT) ||
274 UDF_SB_UDFREV(sb) >= 0x0201) 288 sbi->s_udfrev >= 0x0201)
275 udf_update_tag(epos.bh->b_data, 289 udf_update_tag(epos.bh->b_data,
276 lenalloc + sizeof(struct allocExtDesc)); 290 lenalloc +
291 sizeof(struct allocExtDesc));
277 else 292 else
278 udf_update_tag(epos.bh->b_data, 293 udf_update_tag(epos.bh->b_data,
279 sizeof(struct allocExtDesc)); 294 sizeof(struct allocExtDesc));
280 mark_buffer_dirty_inode(epos.bh, inode); 295 mark_buffer_dirty_inode(epos.bh, inode);
281 } 296 }
282 } 297 }
@@ -290,13 +305,16 @@ void udf_truncate_extents(struct inode *inode)
290 * extending the file by 'offset' blocks. 305 * extending the file by 'offset' blocks.
291 */ 306 */
292 if ((!epos.bh && 307 if ((!epos.bh &&
293 epos.offset == udf_file_entry_alloc_offset(inode)) || 308 epos.offset ==
294 (epos.bh && epos.offset == sizeof(struct allocExtDesc))) { 309 udf_file_entry_alloc_offset(inode)) ||
310 (epos.bh && epos.offset ==
311 sizeof(struct allocExtDesc))) {
295 /* File has no extents at all or has empty last 312 /* File has no extents at all or has empty last
296 * indirect extent! Create a fake extent... */ 313 * indirect extent! Create a fake extent... */
297 extent.extLocation.logicalBlockNum = 0; 314 extent.extLocation.logicalBlockNum = 0;
298 extent.extLocation.partitionReferenceNum = 0; 315 extent.extLocation.partitionReferenceNum = 0;
299 extent.extLength = EXT_NOT_RECORDED_NOT_ALLOCATED; 316 extent.extLength =
317 EXT_NOT_RECORDED_NOT_ALLOCATED;
300 } else { 318 } else {
301 epos.offset -= adsize; 319 epos.offset -= adsize;
302 etype = udf_next_aext(inode, &epos, 320 etype = udf_next_aext(inode, &epos,
@@ -305,10 +323,12 @@ void udf_truncate_extents(struct inode *inode)
305 extent.extLength |= etype << 30; 323 extent.extLength |= etype << 30;
306 } 324 }
307 udf_extend_file(inode, &epos, &extent, 325 udf_extend_file(inode, &epos, &extent,
308 offset + ((inode->i_size & (sb->s_blocksize - 1)) != 0)); 326 offset +
327 ((inode->i_size &
328 (sb->s_blocksize - 1)) != 0));
309 } 329 }
310 } 330 }
311 UDF_I_LENEXTENTS(inode) = inode->i_size; 331 iinfo->i_lenExtents = inode->i_size;
312 332
313 brelse(epos.bh); 333 brelse(epos.bh);
314} 334}
diff --git a/fs/udf/udf_i.h b/fs/udf/udf_i.h
index d7dbe6f3ba0c..ccc52f16bf7d 100644
--- a/fs/udf/udf_i.h
+++ b/fs/udf/udf_i.h
@@ -7,20 +7,4 @@ static inline struct udf_inode_info *UDF_I(struct inode *inode)
7 return list_entry(inode, struct udf_inode_info, vfs_inode); 7 return list_entry(inode, struct udf_inode_info, vfs_inode);
8} 8}
9 9
10#define UDF_I_LOCATION(X) ( UDF_I(X)->i_location )
11#define UDF_I_LENEATTR(X) ( UDF_I(X)->i_lenEAttr )
12#define UDF_I_LENALLOC(X) ( UDF_I(X)->i_lenAlloc )
13#define UDF_I_LENEXTENTS(X) ( UDF_I(X)->i_lenExtents )
14#define UDF_I_UNIQUE(X) ( UDF_I(X)->i_unique )
15#define UDF_I_ALLOCTYPE(X) ( UDF_I(X)->i_alloc_type )
16#define UDF_I_EFE(X) ( UDF_I(X)->i_efe )
17#define UDF_I_USE(X) ( UDF_I(X)->i_use )
18#define UDF_I_STRAT4096(X) ( UDF_I(X)->i_strat4096 )
19#define UDF_I_NEXT_ALLOC_BLOCK(X) ( UDF_I(X)->i_next_alloc_block )
20#define UDF_I_NEXT_ALLOC_GOAL(X) ( UDF_I(X)->i_next_alloc_goal )
21#define UDF_I_CRTIME(X) ( UDF_I(X)->i_crtime )
22#define UDF_I_SAD(X) ( UDF_I(X)->i_ext.i_sad )
23#define UDF_I_LAD(X) ( UDF_I(X)->i_ext.i_lad )
24#define UDF_I_DATA(X) ( UDF_I(X)->i_ext.i_data )
25
26#endif /* !defined(_LINUX_UDF_I_H) */ 10#endif /* !defined(_LINUX_UDF_I_H) */
diff --git a/fs/udf/udf_sb.h b/fs/udf/udf_sb.h
index 3c2982017c6d..737d1c604eea 100644
--- a/fs/udf/udf_sb.h
+++ b/fs/udf/udf_sb.h
@@ -26,6 +26,8 @@
26#define UDF_FLAG_GID_IGNORE 14 26#define UDF_FLAG_GID_IGNORE 14
27#define UDF_FLAG_UID_SET 15 27#define UDF_FLAG_UID_SET 15
28#define UDF_FLAG_GID_SET 16 28#define UDF_FLAG_GID_SET 16
29#define UDF_FLAG_SESSION_SET 17
30#define UDF_FLAG_LASTBLOCK_SET 18
29 31
30#define UDF_PART_FLAG_UNALLOC_BITMAP 0x0001 32#define UDF_PART_FLAG_UNALLOC_BITMAP 0x0001
31#define UDF_PART_FLAG_UNALLOC_TABLE 0x0002 33#define UDF_PART_FLAG_UNALLOC_TABLE 0x0002
@@ -41,96 +43,12 @@ static inline struct udf_sb_info *UDF_SB(struct super_block *sb)
41 return sb->s_fs_info; 43 return sb->s_fs_info;
42} 44}
43 45
44#define UDF_SB_FREE(X)\ 46struct logicalVolIntegrityDescImpUse *udf_sb_lvidiu(struct udf_sb_info *sbi);
45{\
46 if (UDF_SB(X)) {\
47 kfree(UDF_SB_PARTMAPS(X));\
48 UDF_SB_PARTMAPS(X) = NULL;\
49 }\
50}
51
52#define UDF_SB_ALLOC_PARTMAPS(X,Y)\
53{\
54 UDF_SB_PARTMAPS(X) = kmalloc(sizeof(struct udf_part_map) * Y, GFP_KERNEL);\
55 if (UDF_SB_PARTMAPS(X) != NULL) {\
56 UDF_SB_NUMPARTS(X) = Y;\
57 memset(UDF_SB_PARTMAPS(X), 0x00, sizeof(struct udf_part_map) * Y);\
58 } else {\
59 UDF_SB_NUMPARTS(X) = 0;\
60 udf_error(X, __FUNCTION__, "Unable to allocate space for %d partition maps", Y);\
61 }\
62}
63
64#define UDF_SB_ALLOC_BITMAP(X,Y,Z)\
65{\
66 int nr_groups = ((UDF_SB_PARTLEN((X),(Y)) + (sizeof(struct spaceBitmapDesc) << 3) +\
67 ((X)->s_blocksize * 8) - 1) / ((X)->s_blocksize * 8));\
68 int size = sizeof(struct udf_bitmap) + (sizeof(struct buffer_head *) * nr_groups);\
69 if (size <= PAGE_SIZE)\
70 UDF_SB_PARTMAPS(X)[(Y)].Z.s_bitmap = kmalloc(size, GFP_KERNEL);\
71 else\
72 UDF_SB_PARTMAPS(X)[(Y)].Z.s_bitmap = vmalloc(size);\
73 if (UDF_SB_PARTMAPS(X)[(Y)].Z.s_bitmap != NULL) {\
74 memset(UDF_SB_PARTMAPS(X)[(Y)].Z.s_bitmap, 0x00, size);\
75 UDF_SB_PARTMAPS(X)[(Y)].Z.s_bitmap->s_block_bitmap =\
76 (struct buffer_head **)(UDF_SB_PARTMAPS(X)[(Y)].Z.s_bitmap + 1);\
77 UDF_SB_PARTMAPS(X)[(Y)].Z.s_bitmap->s_nr_groups = nr_groups;\
78 } else {\
79 udf_error(X, __FUNCTION__, "Unable to allocate space for bitmap and %d buffer_head pointers", nr_groups);\
80 }\
81}
82 47
83#define UDF_SB_FREE_BITMAP(X,Y,Z)\ 48int udf_compute_nr_groups(struct super_block *sb, u32 partition);
84{\
85 int i;\
86 int nr_groups = UDF_SB_BITMAP_NR_GROUPS(X,Y,Z);\
87 int size = sizeof(struct udf_bitmap) + (sizeof(struct buffer_head *) * nr_groups);\
88 for (i = 0; i < nr_groups; i++) {\
89 if (UDF_SB_BITMAP(X,Y,Z,i))\
90 brelse(UDF_SB_BITMAP(X,Y,Z,i));\
91 }\
92 if (size <= PAGE_SIZE)\
93 kfree(UDF_SB_PARTMAPS(X)[Y].Z.s_bitmap);\
94 else\
95 vfree(UDF_SB_PARTMAPS(X)[Y].Z.s_bitmap);\
96}
97 49
98#define UDF_QUERY_FLAG(X,Y) ( UDF_SB(X)->s_flags & ( 1 << (Y) ) ) 50#define UDF_QUERY_FLAG(X,Y) ( UDF_SB(X)->s_flags & ( 1 << (Y) ) )
99#define UDF_SET_FLAG(X,Y) ( UDF_SB(X)->s_flags |= ( 1 << (Y) ) ) 51#define UDF_SET_FLAG(X,Y) ( UDF_SB(X)->s_flags |= ( 1 << (Y) ) )
100#define UDF_CLEAR_FLAG(X,Y) ( UDF_SB(X)->s_flags &= ~( 1 << (Y) ) ) 52#define UDF_CLEAR_FLAG(X,Y) ( UDF_SB(X)->s_flags &= ~( 1 << (Y) ) )
101 53
102#define UDF_UPDATE_UDFREV(X,Y) ( ((Y) > UDF_SB_UDFREV(X)) ? UDF_SB_UDFREV(X) = (Y) : UDF_SB_UDFREV(X) )
103
104#define UDF_SB_PARTMAPS(X) ( UDF_SB(X)->s_partmaps )
105#define UDF_SB_PARTTYPE(X,Y) ( UDF_SB_PARTMAPS(X)[(Y)].s_partition_type )
106#define UDF_SB_PARTROOT(X,Y) ( UDF_SB_PARTMAPS(X)[(Y)].s_partition_root )
107#define UDF_SB_PARTLEN(X,Y) ( UDF_SB_PARTMAPS(X)[(Y)].s_partition_len )
108#define UDF_SB_PARTVSN(X,Y) ( UDF_SB_PARTMAPS(X)[(Y)].s_volumeseqnum )
109#define UDF_SB_PARTNUM(X,Y) ( UDF_SB_PARTMAPS(X)[(Y)].s_partition_num )
110#define UDF_SB_TYPESPAR(X,Y) ( UDF_SB_PARTMAPS(X)[(Y)].s_type_specific.s_sparing )
111#define UDF_SB_TYPEVIRT(X,Y) ( UDF_SB_PARTMAPS(X)[(Y)].s_type_specific.s_virtual )
112#define UDF_SB_PARTFUNC(X,Y) ( UDF_SB_PARTMAPS(X)[(Y)].s_partition_func )
113#define UDF_SB_PARTFLAGS(X,Y) ( UDF_SB_PARTMAPS(X)[(Y)].s_partition_flags )
114#define UDF_SB_BITMAP(X,Y,Z,I) ( UDF_SB_PARTMAPS(X)[(Y)].Z.s_bitmap->s_block_bitmap[I] )
115#define UDF_SB_BITMAP_NR_GROUPS(X,Y,Z) ( UDF_SB_PARTMAPS(X)[(Y)].Z.s_bitmap->s_nr_groups )
116
117#define UDF_SB_VOLIDENT(X) ( UDF_SB(X)->s_volident )
118#define UDF_SB_NUMPARTS(X) ( UDF_SB(X)->s_partitions )
119#define UDF_SB_PARTITION(X) ( UDF_SB(X)->s_partition )
120#define UDF_SB_SESSION(X) ( UDF_SB(X)->s_session )
121#define UDF_SB_ANCHOR(X) ( UDF_SB(X)->s_anchor )
122#define UDF_SB_LASTBLOCK(X) ( UDF_SB(X)->s_lastblock )
123#define UDF_SB_LVIDBH(X) ( UDF_SB(X)->s_lvidbh )
124#define UDF_SB_LVID(X) ( (struct logicalVolIntegrityDesc *)UDF_SB_LVIDBH(X)->b_data )
125#define UDF_SB_LVIDIU(X) ( (struct logicalVolIntegrityDescImpUse *)&(UDF_SB_LVID(X)->impUse[le32_to_cpu(UDF_SB_LVID(X)->numOfPartitions) * 2 * sizeof(uint32_t)/sizeof(uint8_t)]) )
126
127#define UDF_SB_UMASK(X) ( UDF_SB(X)->s_umask )
128#define UDF_SB_GID(X) ( UDF_SB(X)->s_gid )
129#define UDF_SB_UID(X) ( UDF_SB(X)->s_uid )
130#define UDF_SB_RECORDTIME(X) ( UDF_SB(X)->s_recordtime )
131#define UDF_SB_SERIALNUM(X) ( UDF_SB(X)->s_serialnum )
132#define UDF_SB_UDFREV(X) ( UDF_SB(X)->s_udfrev )
133#define UDF_SB_FLAGS(X) ( UDF_SB(X)->s_flags )
134#define UDF_SB_VAT(X) ( UDF_SB(X)->s_vat )
135
136#endif /* __LINUX_UDF_SB_H */ 54#endif /* __LINUX_UDF_SB_H */
diff --git a/fs/udf/udfdecl.h b/fs/udf/udfdecl.h
index c8016cc9e7e6..681dc2b66cdb 100644
--- a/fs/udf/udfdecl.h
+++ b/fs/udf/udfdecl.h
@@ -24,18 +24,21 @@
24#define UDF_PATH_LEN 1023 24#define UDF_PATH_LEN 1023
25 25
26#define udf_file_entry_alloc_offset(inode)\ 26#define udf_file_entry_alloc_offset(inode)\
27 (UDF_I_USE(inode) ?\ 27 (UDF_I(inode)->i_use ?\
28 sizeof(struct unallocSpaceEntry) :\ 28 sizeof(struct unallocSpaceEntry) :\
29 ((UDF_I_EFE(inode) ?\ 29 ((UDF_I(inode)->i_efe ?\
30 sizeof(struct extendedFileEntry) :\ 30 sizeof(struct extendedFileEntry) :\
31 sizeof(struct fileEntry)) + UDF_I_LENEATTR(inode))) 31 sizeof(struct fileEntry)) + UDF_I(inode)->i_lenEAttr))
32 32
33#define udf_ext0_offset(inode)\ 33#define udf_ext0_offset(inode)\
34 (UDF_I_ALLOCTYPE(inode) == ICBTAG_FLAG_AD_IN_ICB ?\ 34 (UDF_I(inode)->i_alloc_type == ICBTAG_FLAG_AD_IN_ICB ?\
35 udf_file_entry_alloc_offset(inode) : 0) 35 udf_file_entry_alloc_offset(inode) : 0)
36 36
37#define udf_get_lb_pblock(sb,loc,offset) udf_get_pblock((sb), (loc).logicalBlockNum, (loc).partitionReferenceNum, (offset)) 37#define udf_get_lb_pblock(sb,loc,offset) udf_get_pblock((sb), (loc).logicalBlockNum, (loc).partitionReferenceNum, (offset))
38 38
39/* computes tag checksum */
40u8 udf_tag_checksum(const tag *t);
41
39struct dentry; 42struct dentry;
40struct inode; 43struct inode;
41struct task_struct; 44struct task_struct;
@@ -185,8 +188,8 @@ extern struct fileIdentDesc *udf_fileident_read(struct inode *, loff_t *,
185 sector_t *); 188 sector_t *);
186extern struct fileIdentDesc *udf_get_fileident(void *buffer, int bufsize, 189extern struct fileIdentDesc *udf_get_fileident(void *buffer, int bufsize,
187 int *offset); 190 int *offset);
188extern long_ad *udf_get_filelongad(uint8_t *, int, int *, int); 191extern long_ad *udf_get_filelongad(uint8_t *, int, uint32_t *, int);
189extern short_ad *udf_get_fileshortad(uint8_t *, int, int *, int); 192extern short_ad *udf_get_fileshortad(uint8_t *, int, uint32_t *, int);
190 193
191/* crc.c */ 194/* crc.c */
192extern uint16_t udf_crc(uint8_t *, uint32_t, uint16_t); 195extern uint16_t udf_crc(uint8_t *, uint32_t, uint16_t);
diff --git a/fs/udf/udftime.c b/fs/udf/udftime.c
index adcb87c2da7e..ce595732ba6f 100644
--- a/fs/udf/udftime.c
+++ b/fs/udf/udftime.c
@@ -18,8 +18,10 @@
18 Boston, MA 02111-1307, USA. */ 18 Boston, MA 02111-1307, USA. */
19 19
20/* 20/*
21 * dgb 10/02/98: ripped this from glibc source to help convert timestamps to unix time 21 * dgb 10/02/98: ripped this from glibc source to help convert timestamps
22 * 10/04/98: added new table-based lookup after seeing how ugly the gnu code is 22 * to unix time
23 * 10/04/98: added new table-based lookup after seeing how ugly
24 * the gnu code is
23 * blf 09/27/99: ripped out all the old code and inserted new table from 25 * blf 09/27/99: ripped out all the old code and inserted new table from
24 * John Brockmeyer (without leap second corrections) 26 * John Brockmeyer (without leap second corrections)
25 * rewrote udf_stamp_to_time and fixed timezone accounting in 27 * rewrote udf_stamp_to_time and fixed timezone accounting in
@@ -55,27 +57,27 @@ static const unsigned short int __mon_yday[2][13] = {
55 57
56#define MAX_YEAR_SECONDS 69 58#define MAX_YEAR_SECONDS 69
57#define SPD 0x15180 /*3600*24 */ 59#define SPD 0x15180 /*3600*24 */
58#define SPY(y,l,s) (SPD * (365*y+l)+s) 60#define SPY(y, l, s) (SPD * (365 * y + l) + s)
59 61
60static time_t year_seconds[MAX_YEAR_SECONDS]= { 62static time_t year_seconds[MAX_YEAR_SECONDS] = {
61/*1970*/ SPY( 0, 0,0), SPY( 1, 0,0), SPY( 2, 0,0), SPY( 3, 1,0), 63/*1970*/ SPY(0, 0, 0), SPY(1, 0, 0), SPY(2, 0, 0), SPY(3, 1, 0),
62/*1974*/ SPY( 4, 1,0), SPY( 5, 1,0), SPY( 6, 1,0), SPY( 7, 2,0), 64/*1974*/ SPY(4, 1, 0), SPY(5, 1, 0), SPY(6, 1, 0), SPY(7, 2, 0),
63/*1978*/ SPY( 8, 2,0), SPY( 9, 2,0), SPY(10, 2,0), SPY(11, 3,0), 65/*1978*/ SPY(8, 2, 0), SPY(9, 2, 0), SPY(10, 2, 0), SPY(11, 3, 0),
64/*1982*/ SPY(12, 3,0), SPY(13, 3,0), SPY(14, 3,0), SPY(15, 4,0), 66/*1982*/ SPY(12, 3, 0), SPY(13, 3, 0), SPY(14, 3, 0), SPY(15, 4, 0),
65/*1986*/ SPY(16, 4,0), SPY(17, 4,0), SPY(18, 4,0), SPY(19, 5,0), 67/*1986*/ SPY(16, 4, 0), SPY(17, 4, 0), SPY(18, 4, 0), SPY(19, 5, 0),
66/*1990*/ SPY(20, 5,0), SPY(21, 5,0), SPY(22, 5,0), SPY(23, 6,0), 68/*1990*/ SPY(20, 5, 0), SPY(21, 5, 0), SPY(22, 5, 0), SPY(23, 6, 0),
67/*1994*/ SPY(24, 6,0), SPY(25, 6,0), SPY(26, 6,0), SPY(27, 7,0), 69/*1994*/ SPY(24, 6, 0), SPY(25, 6, 0), SPY(26, 6, 0), SPY(27, 7, 0),
68/*1998*/ SPY(28, 7,0), SPY(29, 7,0), SPY(30, 7,0), SPY(31, 8,0), 70/*1998*/ SPY(28, 7, 0), SPY(29, 7, 0), SPY(30, 7, 0), SPY(31, 8, 0),
69/*2002*/ SPY(32, 8,0), SPY(33, 8,0), SPY(34, 8,0), SPY(35, 9,0), 71/*2002*/ SPY(32, 8, 0), SPY(33, 8, 0), SPY(34, 8, 0), SPY(35, 9, 0),
70/*2006*/ SPY(36, 9,0), SPY(37, 9,0), SPY(38, 9,0), SPY(39,10,0), 72/*2006*/ SPY(36, 9, 0), SPY(37, 9, 0), SPY(38, 9, 0), SPY(39, 10, 0),
71/*2010*/ SPY(40,10,0), SPY(41,10,0), SPY(42,10,0), SPY(43,11,0), 73/*2010*/ SPY(40, 10, 0), SPY(41, 10, 0), SPY(42, 10, 0), SPY(43, 11, 0),
72/*2014*/ SPY(44,11,0), SPY(45,11,0), SPY(46,11,0), SPY(47,12,0), 74/*2014*/ SPY(44, 11, 0), SPY(45, 11, 0), SPY(46, 11, 0), SPY(47, 12, 0),
73/*2018*/ SPY(48,12,0), SPY(49,12,0), SPY(50,12,0), SPY(51,13,0), 75/*2018*/ SPY(48, 12, 0), SPY(49, 12, 0), SPY(50, 12, 0), SPY(51, 13, 0),
74/*2022*/ SPY(52,13,0), SPY(53,13,0), SPY(54,13,0), SPY(55,14,0), 76/*2022*/ SPY(52, 13, 0), SPY(53, 13, 0), SPY(54, 13, 0), SPY(55, 14, 0),
75/*2026*/ SPY(56,14,0), SPY(57,14,0), SPY(58,14,0), SPY(59,15,0), 77/*2026*/ SPY(56, 14, 0), SPY(57, 14, 0), SPY(58, 14, 0), SPY(59, 15, 0),
76/*2030*/ SPY(60,15,0), SPY(61,15,0), SPY(62,15,0), SPY(63,16,0), 78/*2030*/ SPY(60, 15, 0), SPY(61, 15, 0), SPY(62, 15, 0), SPY(63, 16, 0),
77/*2034*/ SPY(64,16,0), SPY(65,16,0), SPY(66,16,0), SPY(67,17,0), 79/*2034*/ SPY(64, 16, 0), SPY(65, 16, 0), SPY(66, 16, 0), SPY(67, 17, 0),
78/*2038*/ SPY(68,17,0) 80/*2038*/ SPY(68, 17, 0)
79}; 81};
80 82
81extern struct timezone sys_tz; 83extern struct timezone sys_tz;
@@ -115,7 +117,7 @@ time_t *udf_stamp_to_time(time_t *dest, long *dest_usec, kernel_timestamp src)
115 return dest; 117 return dest;
116} 118}
117 119
118kernel_timestamp *udf_time_to_stamp(kernel_timestamp * dest, struct timespec ts) 120kernel_timestamp *udf_time_to_stamp(kernel_timestamp *dest, struct timespec ts)
119{ 121{
120 long int days, rem, y; 122 long int days, rem, y;
121 const unsigned short int *ip; 123 const unsigned short int *ip;
@@ -137,7 +139,7 @@ kernel_timestamp *udf_time_to_stamp(kernel_timestamp * dest, struct timespec ts)
137 dest->second = rem % 60; 139 dest->second = rem % 60;
138 y = 1970; 140 y = 1970;
139 141
140#define DIV(a,b) ((a) / (b) - ((a) % (b) < 0)) 142#define DIV(a, b) ((a) / (b) - ((a) % (b) < 0))
141#define LEAPS_THRU_END_OF(y) (DIV (y, 4) - DIV (y, 100) + DIV (y, 400)) 143#define LEAPS_THRU_END_OF(y) (DIV (y, 4) - DIV (y, 100) + DIV (y, 400))
142 144
143 while (days < 0 || days >= (__isleap(y) ? 366 : 365)) { 145 while (days < 0 || days >= (__isleap(y) ? 366 : 365)) {
@@ -145,8 +147,8 @@ kernel_timestamp *udf_time_to_stamp(kernel_timestamp * dest, struct timespec ts)
145 147
146 /* Adjust DAYS and Y to match the guessed year. */ 148 /* Adjust DAYS and Y to match the guessed year. */
147 days -= ((yg - y) * 365 149 days -= ((yg - y) * 365
148 + LEAPS_THRU_END_OF (yg - 1) 150 + LEAPS_THRU_END_OF(yg - 1)
149 - LEAPS_THRU_END_OF (y - 1)); 151 - LEAPS_THRU_END_OF(y - 1));
150 y = yg; 152 y = yg;
151 } 153 }
152 dest->year = y; 154 dest->year = y;
@@ -158,7 +160,8 @@ kernel_timestamp *udf_time_to_stamp(kernel_timestamp * dest, struct timespec ts)
158 dest->day = days + 1; 160 dest->day = days + 1;
159 161
160 dest->centiseconds = ts.tv_nsec / 10000000; 162 dest->centiseconds = ts.tv_nsec / 10000000;
161 dest->hundredsOfMicroseconds = (ts.tv_nsec / 1000 - dest->centiseconds * 10000) / 100; 163 dest->hundredsOfMicroseconds = (ts.tv_nsec / 1000 -
164 dest->centiseconds * 10000) / 100;
162 dest->microseconds = (ts.tv_nsec / 1000 - dest->centiseconds * 10000 - 165 dest->microseconds = (ts.tv_nsec / 1000 - dest->centiseconds * 10000 -
163 dest->hundredsOfMicroseconds * 100); 166 dest->hundredsOfMicroseconds * 100);
164 return dest; 167 return dest;
diff --git a/fs/udf/unicode.c b/fs/udf/unicode.c
index 9e6099c26c27..e533b11703bf 100644
--- a/fs/udf/unicode.c
+++ b/fs/udf/unicode.c
@@ -136,12 +136,18 @@ int udf_CS0toUTF8(struct ustr *utf_o, struct ustr *ocu_i)
136 if (c < 0x80U) { 136 if (c < 0x80U) {
137 utf_o->u_name[utf_o->u_len++] = (uint8_t)c; 137 utf_o->u_name[utf_o->u_len++] = (uint8_t)c;
138 } else if (c < 0x800U) { 138 } else if (c < 0x800U) {
139 utf_o->u_name[utf_o->u_len++] = (uint8_t)(0xc0 | (c >> 6)); 139 utf_o->u_name[utf_o->u_len++] =
140 utf_o->u_name[utf_o->u_len++] = (uint8_t)(0x80 | (c & 0x3f)); 140 (uint8_t)(0xc0 | (c >> 6));
141 utf_o->u_name[utf_o->u_len++] =
142 (uint8_t)(0x80 | (c & 0x3f));
141 } else { 143 } else {
142 utf_o->u_name[utf_o->u_len++] = (uint8_t)(0xe0 | (c >> 12)); 144 utf_o->u_name[utf_o->u_len++] =
143 utf_o->u_name[utf_o->u_len++] = (uint8_t)(0x80 | ((c >> 6) & 0x3f)); 145 (uint8_t)(0xe0 | (c >> 12));
144 utf_o->u_name[utf_o->u_len++] = (uint8_t)(0x80 | (c & 0x3f)); 146 utf_o->u_name[utf_o->u_len++] =
147 (uint8_t)(0x80 |
148 ((c >> 6) & 0x3f));
149 utf_o->u_name[utf_o->u_len++] =
150 (uint8_t)(0x80 | (c & 0x3f));
145 } 151 }
146 } 152 }
147 utf_o->u_cmpID = 8; 153 utf_o->u_cmpID = 8;
@@ -232,9 +238,8 @@ try_again:
232 goto error_out; 238 goto error_out;
233 } 239 }
234 240
235 if (max_val == 0xffffU) { 241 if (max_val == 0xffffU)
236 ocu[++u_len] = (uint8_t)(utf_char >> 8); 242 ocu[++u_len] = (uint8_t)(utf_char >> 8);
237 }
238 ocu[++u_len] = (uint8_t)(utf_char & 0xffU); 243 ocu[++u_len] = (uint8_t)(utf_char & 0xffU);
239 } 244 }
240 245
@@ -330,29 +335,29 @@ int udf_get_filename(struct super_block *sb, uint8_t *sname, uint8_t *dname,
330 struct ustr filename, unifilename; 335 struct ustr filename, unifilename;
331 int len; 336 int len;
332 337
333 if (udf_build_ustr_exact(&unifilename, sname, flen)) { 338 if (udf_build_ustr_exact(&unifilename, sname, flen))
334 return 0; 339 return 0;
335 }
336 340
337 if (UDF_QUERY_FLAG(sb, UDF_FLAG_UTF8)) { 341 if (UDF_QUERY_FLAG(sb, UDF_FLAG_UTF8)) {
338 if (!udf_CS0toUTF8(&filename, &unifilename)) { 342 if (!udf_CS0toUTF8(&filename, &unifilename)) {
339 udf_debug("Failed in udf_get_filename: sname = %s\n", sname); 343 udf_debug("Failed in udf_get_filename: sname = %s\n",
344 sname);
340 return 0; 345 return 0;
341 } 346 }
342 } else if (UDF_QUERY_FLAG(sb, UDF_FLAG_NLS_MAP)) { 347 } else if (UDF_QUERY_FLAG(sb, UDF_FLAG_NLS_MAP)) {
343 if (!udf_CS0toNLS(UDF_SB(sb)->s_nls_map, &filename, &unifilename)) { 348 if (!udf_CS0toNLS(UDF_SB(sb)->s_nls_map, &filename,
344 udf_debug("Failed in udf_get_filename: sname = %s\n", sname); 349 &unifilename)) {
350 udf_debug("Failed in udf_get_filename: sname = %s\n",
351 sname);
345 return 0; 352 return 0;
346 } 353 }
347 } else { 354 } else
348 return 0; 355 return 0;
349 }
350 356
351 len = udf_translate_to_linux(dname, filename.u_name, filename.u_len, 357 len = udf_translate_to_linux(dname, filename.u_name, filename.u_len,
352 unifilename.u_name, unifilename.u_len); 358 unifilename.u_name, unifilename.u_len);
353 if (len) { 359 if (len)
354 return len; 360 return len;
355 }
356 361
357 return 0; 362 return 0;
358} 363}
@@ -363,23 +368,20 @@ int udf_put_filename(struct super_block *sb, const uint8_t *sname,
363 struct ustr unifilename; 368 struct ustr unifilename;
364 int namelen; 369 int namelen;
365 370
366 if (!(udf_char_to_ustr(&unifilename, sname, flen))) { 371 if (!udf_char_to_ustr(&unifilename, sname, flen))
367 return 0; 372 return 0;
368 }
369 373
370 if (UDF_QUERY_FLAG(sb, UDF_FLAG_UTF8)) { 374 if (UDF_QUERY_FLAG(sb, UDF_FLAG_UTF8)) {
371 namelen = udf_UTF8toCS0(dname, &unifilename, UDF_NAME_LEN); 375 namelen = udf_UTF8toCS0(dname, &unifilename, UDF_NAME_LEN);
372 if (!namelen) { 376 if (!namelen)
373 return 0; 377 return 0;
374 }
375 } else if (UDF_QUERY_FLAG(sb, UDF_FLAG_NLS_MAP)) { 378 } else if (UDF_QUERY_FLAG(sb, UDF_FLAG_NLS_MAP)) {
376 namelen = udf_NLStoCS0(UDF_SB(sb)->s_nls_map, dname, &unifilename, UDF_NAME_LEN); 379 namelen = udf_NLStoCS0(UDF_SB(sb)->s_nls_map, dname,
377 if (!namelen) { 380 &unifilename, UDF_NAME_LEN);
381 if (!namelen)
378 return 0; 382 return 0;
379 } 383 } else
380 } else {
381 return 0; 384 return 0;
382 }
383 385
384 return namelen; 386 return namelen;
385} 387}
@@ -389,8 +391,9 @@ int udf_put_filename(struct super_block *sb, const uint8_t *sname,
389#define CRC_MARK '#' 391#define CRC_MARK '#'
390#define EXT_SIZE 5 392#define EXT_SIZE 5
391 393
392static int udf_translate_to_linux(uint8_t *newName, uint8_t *udfName, int udfLen, 394static int udf_translate_to_linux(uint8_t *newName, uint8_t *udfName,
393 uint8_t *fidName, int fidNameLen) 395 int udfLen, uint8_t *fidName,
396 int fidNameLen)
394{ 397{
395 int index, newIndex = 0, needsCRC = 0; 398 int index, newIndex = 0, needsCRC = 0;
396 int extIndex = 0, newExtIndex = 0, hasExt = 0; 399 int extIndex = 0, newExtIndex = 0, hasExt = 0;
@@ -409,13 +412,16 @@ static int udf_translate_to_linux(uint8_t *newName, uint8_t *udfName, int udfLen
409 if (curr == '/' || curr == 0) { 412 if (curr == '/' || curr == 0) {
410 needsCRC = 1; 413 needsCRC = 1;
411 curr = ILLEGAL_CHAR_MARK; 414 curr = ILLEGAL_CHAR_MARK;
412 while (index + 1 < udfLen && (udfName[index + 1] == '/' || 415 while (index + 1 < udfLen &&
413 udfName[index + 1] == 0)) 416 (udfName[index + 1] == '/' ||
417 udfName[index + 1] == 0))
414 index++; 418 index++;
415 } if (curr == EXT_MARK && (udfLen - index - 1) <= EXT_SIZE) { 419 }
416 if (udfLen == index + 1) { 420 if (curr == EXT_MARK &&
421 (udfLen - index - 1) <= EXT_SIZE) {
422 if (udfLen == index + 1)
417 hasExt = 0; 423 hasExt = 0;
418 } else { 424 else {
419 hasExt = 1; 425 hasExt = 1;
420 extIndex = index; 426 extIndex = index;
421 newExtIndex = newIndex; 427 newExtIndex = newIndex;
@@ -433,16 +439,18 @@ static int udf_translate_to_linux(uint8_t *newName, uint8_t *udfName, int udfLen
433 439
434 if (hasExt) { 440 if (hasExt) {
435 int maxFilenameLen; 441 int maxFilenameLen;
436 for(index = 0; index < EXT_SIZE && extIndex + index + 1 < udfLen; index++) { 442 for (index = 0;
443 index < EXT_SIZE && extIndex + index + 1 < udfLen;
444 index++) {
437 curr = udfName[extIndex + index + 1]; 445 curr = udfName[extIndex + index + 1];
438 446
439 if (curr == '/' || curr == 0) { 447 if (curr == '/' || curr == 0) {
440 needsCRC = 1; 448 needsCRC = 1;
441 curr = ILLEGAL_CHAR_MARK; 449 curr = ILLEGAL_CHAR_MARK;
442 while(extIndex + index + 2 < udfLen && 450 while (extIndex + index + 2 < udfLen &&
443 (index + 1 < EXT_SIZE 451 (index + 1 < EXT_SIZE &&
444 && (udfName[extIndex + index + 2] == '/' || 452 (udfName[extIndex + index + 2] == '/' ||
445 udfName[extIndex + index + 2] == 0))) 453 udfName[extIndex + index + 2] == 0)))
446 index++; 454 index++;
447 } 455 }
448 ext[localExtIndex++] = curr; 456 ext[localExtIndex++] = curr;
@@ -452,9 +460,8 @@ static int udf_translate_to_linux(uint8_t *newName, uint8_t *udfName, int udfLen
452 newIndex = maxFilenameLen; 460 newIndex = maxFilenameLen;
453 else 461 else
454 newIndex = newExtIndex; 462 newIndex = newExtIndex;
455 } else if (newIndex > 250) { 463 } else if (newIndex > 250)
456 newIndex = 250; 464 newIndex = 250;
457 }
458 newName[newIndex++] = CRC_MARK; 465 newName[newIndex++] = CRC_MARK;
459 valueCRC = udf_crc(fidName, fidNameLen, 0); 466 valueCRC = udf_crc(fidName, fidNameLen, 0);
460 newName[newIndex++] = hexChar[(valueCRC & 0xf000) >> 12]; 467 newName[newIndex++] = hexChar[(valueCRC & 0xf000) >> 12];
diff --git a/fs/ufs/balloc.c b/fs/ufs/balloc.c
index f63a09ce8683..1fca381f0ce2 100644
--- a/fs/ufs/balloc.c
+++ b/fs/ufs/balloc.c
@@ -9,7 +9,6 @@
9 */ 9 */
10 10
11#include <linux/fs.h> 11#include <linux/fs.h>
12#include <linux/ufs_fs.h>
13#include <linux/stat.h> 12#include <linux/stat.h>
14#include <linux/time.h> 13#include <linux/time.h>
15#include <linux/string.h> 14#include <linux/string.h>
@@ -19,6 +18,7 @@
19#include <linux/bitops.h> 18#include <linux/bitops.h>
20#include <asm/byteorder.h> 19#include <asm/byteorder.h>
21 20
21#include "ufs_fs.h"
22#include "ufs.h" 22#include "ufs.h"
23#include "swab.h" 23#include "swab.h"
24#include "util.h" 24#include "util.h"
diff --git a/fs/ufs/cylinder.c b/fs/ufs/cylinder.c
index 2a815665644f..b4676322ddb6 100644
--- a/fs/ufs/cylinder.c
+++ b/fs/ufs/cylinder.c
@@ -9,7 +9,6 @@
9 */ 9 */
10 10
11#include <linux/fs.h> 11#include <linux/fs.h>
12#include <linux/ufs_fs.h>
13#include <linux/time.h> 12#include <linux/time.h>
14#include <linux/stat.h> 13#include <linux/stat.h>
15#include <linux/string.h> 14#include <linux/string.h>
@@ -17,6 +16,7 @@
17 16
18#include <asm/byteorder.h> 17#include <asm/byteorder.h>
19 18
19#include "ufs_fs.h"
20#include "ufs.h" 20#include "ufs.h"
21#include "swab.h" 21#include "swab.h"
22#include "util.h" 22#include "util.h"
diff --git a/fs/ufs/dir.c b/fs/ufs/dir.c
index aaf2878305ce..ef563fc8d72c 100644
--- a/fs/ufs/dir.c
+++ b/fs/ufs/dir.c
@@ -18,9 +18,9 @@
18 18
19#include <linux/time.h> 19#include <linux/time.h>
20#include <linux/fs.h> 20#include <linux/fs.h>
21#include <linux/ufs_fs.h>
22#include <linux/swap.h> 21#include <linux/swap.h>
23 22
23#include "ufs_fs.h"
24#include "ufs.h" 24#include "ufs.h"
25#include "swab.h" 25#include "swab.h"
26#include "util.h" 26#include "util.h"
diff --git a/fs/ufs/file.c b/fs/ufs/file.c
index a46c97bf023f..625ef17c6f83 100644
--- a/fs/ufs/file.c
+++ b/fs/ufs/file.c
@@ -24,9 +24,9 @@
24 */ 24 */
25 25
26#include <linux/fs.h> 26#include <linux/fs.h>
27#include <linux/ufs_fs.h>
28#include <linux/buffer_head.h> /* for sync_mapping_buffers() */ 27#include <linux/buffer_head.h> /* for sync_mapping_buffers() */
29 28
29#include "ufs_fs.h"
30#include "ufs.h" 30#include "ufs.h"
31 31
32 32
diff --git a/fs/ufs/ialloc.c b/fs/ufs/ialloc.c
index 7e260bc0d94f..ac181f6806a3 100644
--- a/fs/ufs/ialloc.c
+++ b/fs/ufs/ialloc.c
@@ -24,7 +24,6 @@
24 */ 24 */
25 25
26#include <linux/fs.h> 26#include <linux/fs.h>
27#include <linux/ufs_fs.h>
28#include <linux/time.h> 27#include <linux/time.h>
29#include <linux/stat.h> 28#include <linux/stat.h>
30#include <linux/string.h> 29#include <linux/string.h>
@@ -34,6 +33,7 @@
34#include <linux/bitops.h> 33#include <linux/bitops.h>
35#include <asm/byteorder.h> 34#include <asm/byteorder.h>
36 35
36#include "ufs_fs.h"
37#include "ufs.h" 37#include "ufs.h"
38#include "swab.h" 38#include "swab.h"
39#include "util.h" 39#include "util.h"
diff --git a/fs/ufs/inode.c b/fs/ufs/inode.c
index 4320782761ae..5446b888fc8e 100644
--- a/fs/ufs/inode.c
+++ b/fs/ufs/inode.c
@@ -30,7 +30,6 @@
30 30
31#include <linux/errno.h> 31#include <linux/errno.h>
32#include <linux/fs.h> 32#include <linux/fs.h>
33#include <linux/ufs_fs.h>
34#include <linux/time.h> 33#include <linux/time.h>
35#include <linux/stat.h> 34#include <linux/stat.h>
36#include <linux/string.h> 35#include <linux/string.h>
@@ -38,6 +37,7 @@
38#include <linux/smp_lock.h> 37#include <linux/smp_lock.h>
39#include <linux/buffer_head.h> 38#include <linux/buffer_head.h>
40 39
40#include "ufs_fs.h"
41#include "ufs.h" 41#include "ufs.h"
42#include "swab.h" 42#include "swab.h"
43#include "util.h" 43#include "util.h"
@@ -714,26 +714,30 @@ static int ufs2_read_inode(struct inode *inode, struct ufs2_inode *ufs2_inode)
714 return 0; 714 return 0;
715} 715}
716 716
717void ufs_read_inode(struct inode * inode) 717struct inode *ufs_iget(struct super_block *sb, unsigned long ino)
718{ 718{
719 struct ufs_inode_info *ufsi = UFS_I(inode); 719 struct ufs_inode_info *ufsi;
720 struct super_block * sb; 720 struct ufs_sb_private_info *uspi = UFS_SB(sb)->s_uspi;
721 struct ufs_sb_private_info * uspi;
722 struct buffer_head * bh; 721 struct buffer_head * bh;
722 struct inode *inode;
723 int err; 723 int err;
724 724
725 UFSD("ENTER, ino %lu\n", inode->i_ino); 725 UFSD("ENTER, ino %lu\n", ino);
726
727 sb = inode->i_sb;
728 uspi = UFS_SB(sb)->s_uspi;
729 726
730 if (inode->i_ino < UFS_ROOTINO || 727 if (ino < UFS_ROOTINO || ino > (uspi->s_ncg * uspi->s_ipg)) {
731 inode->i_ino > (uspi->s_ncg * uspi->s_ipg)) {
732 ufs_warning(sb, "ufs_read_inode", "bad inode number (%lu)\n", 728 ufs_warning(sb, "ufs_read_inode", "bad inode number (%lu)\n",
733 inode->i_ino); 729 ino);
734 goto bad_inode; 730 return ERR_PTR(-EIO);
735 } 731 }
736 732
733 inode = iget_locked(sb, ino);
734 if (!inode)
735 return ERR_PTR(-ENOMEM);
736 if (!(inode->i_state & I_NEW))
737 return inode;
738
739 ufsi = UFS_I(inode);
740
737 bh = sb_bread(sb, uspi->s_sbbase + ufs_inotofsba(inode->i_ino)); 741 bh = sb_bread(sb, uspi->s_sbbase + ufs_inotofsba(inode->i_ino));
738 if (!bh) { 742 if (!bh) {
739 ufs_warning(sb, "ufs_read_inode", "unable to read inode %lu\n", 743 ufs_warning(sb, "ufs_read_inode", "unable to read inode %lu\n",
@@ -765,10 +769,12 @@ void ufs_read_inode(struct inode * inode)
765 brelse(bh); 769 brelse(bh);
766 770
767 UFSD("EXIT\n"); 771 UFSD("EXIT\n");
768 return; 772 unlock_new_inode(inode);
773 return inode;
769 774
770bad_inode: 775bad_inode:
771 make_bad_inode(inode); 776 iget_failed(inode);
777 return ERR_PTR(-EIO);
772} 778}
773 779
774static void ufs1_update_inode(struct inode *inode, struct ufs_inode *ufs_inode) 780static void ufs1_update_inode(struct inode *inode, struct ufs_inode *ufs_inode)
diff --git a/fs/ufs/namei.c b/fs/ufs/namei.c
index d8bfbee2fe2b..e3a9b1fac75a 100644
--- a/fs/ufs/namei.c
+++ b/fs/ufs/namei.c
@@ -29,8 +29,9 @@
29 29
30#include <linux/time.h> 30#include <linux/time.h>
31#include <linux/fs.h> 31#include <linux/fs.h>
32#include <linux/ufs_fs.h>
33#include <linux/smp_lock.h> 32#include <linux/smp_lock.h>
33
34#include "ufs_fs.h"
34#include "ufs.h" 35#include "ufs.h"
35#include "util.h" 36#include "util.h"
36 37
@@ -57,10 +58,10 @@ static struct dentry *ufs_lookup(struct inode * dir, struct dentry *dentry, stru
57 lock_kernel(); 58 lock_kernel();
58 ino = ufs_inode_by_name(dir, dentry); 59 ino = ufs_inode_by_name(dir, dentry);
59 if (ino) { 60 if (ino) {
60 inode = iget(dir->i_sb, ino); 61 inode = ufs_iget(dir->i_sb, ino);
61 if (!inode) { 62 if (IS_ERR(inode)) {
62 unlock_kernel(); 63 unlock_kernel();
63 return ERR_PTR(-EACCES); 64 return ERR_CAST(inode);
64 } 65 }
65 } 66 }
66 unlock_kernel(); 67 unlock_kernel();
diff --git a/fs/ufs/super.c b/fs/ufs/super.c
index 0072cb33ebec..85b22b5977fa 100644
--- a/fs/ufs/super.c
+++ b/fs/ufs/super.c
@@ -76,7 +76,6 @@
76 76
77#include <linux/errno.h> 77#include <linux/errno.h>
78#include <linux/fs.h> 78#include <linux/fs.h>
79#include <linux/ufs_fs.h>
80#include <linux/slab.h> 79#include <linux/slab.h>
81#include <linux/time.h> 80#include <linux/time.h>
82#include <linux/stat.h> 81#include <linux/stat.h>
@@ -91,6 +90,7 @@
91#include <linux/mount.h> 90#include <linux/mount.h>
92#include <linux/seq_file.h> 91#include <linux/seq_file.h>
93 92
93#include "ufs_fs.h"
94#include "ufs.h" 94#include "ufs.h"
95#include "swab.h" 95#include "swab.h"
96#include "util.h" 96#include "util.h"
@@ -131,6 +131,8 @@ static void ufs_print_super_stuff(struct super_block *sb,
131 printk(KERN_INFO" cs_nffree(Num of free frags): %llu\n", 131 printk(KERN_INFO" cs_nffree(Num of free frags): %llu\n",
132 (unsigned long long) 132 (unsigned long long)
133 fs64_to_cpu(sb, usb3->fs_un1.fs_u2.cs_nffree)); 133 fs64_to_cpu(sb, usb3->fs_un1.fs_u2.cs_nffree));
134 printk(KERN_INFO" fs_maxsymlinklen: %u\n",
135 fs32_to_cpu(sb, usb3->fs_un2.fs_44.fs_maxsymlinklen));
134 } else { 136 } else {
135 printk(" sblkno: %u\n", fs32_to_cpu(sb, usb1->fs_sblkno)); 137 printk(" sblkno: %u\n", fs32_to_cpu(sb, usb1->fs_sblkno));
136 printk(" cblkno: %u\n", fs32_to_cpu(sb, usb1->fs_cblkno)); 138 printk(" cblkno: %u\n", fs32_to_cpu(sb, usb1->fs_cblkno));
@@ -633,6 +635,7 @@ static int ufs_fill_super(struct super_block *sb, void *data, int silent)
633 unsigned block_size, super_block_size; 635 unsigned block_size, super_block_size;
634 unsigned flags; 636 unsigned flags;
635 unsigned super_block_offset; 637 unsigned super_block_offset;
638 int ret = -EINVAL;
636 639
637 uspi = NULL; 640 uspi = NULL;
638 ubh = NULL; 641 ubh = NULL;
@@ -1060,17 +1063,21 @@ magic_found:
1060 uspi->s_bpf = uspi->s_fsize << 3; 1063 uspi->s_bpf = uspi->s_fsize << 3;
1061 uspi->s_bpfshift = uspi->s_fshift + 3; 1064 uspi->s_bpfshift = uspi->s_fshift + 3;
1062 uspi->s_bpfmask = uspi->s_bpf - 1; 1065 uspi->s_bpfmask = uspi->s_bpf - 1;
1063 if ((sbi->s_mount_opt & UFS_MOUNT_UFSTYPE) == 1066 if ((sbi->s_mount_opt & UFS_MOUNT_UFSTYPE) == UFS_MOUNT_UFSTYPE_44BSD ||
1064 UFS_MOUNT_UFSTYPE_44BSD) 1067 (sbi->s_mount_opt & UFS_MOUNT_UFSTYPE) == UFS_MOUNT_UFSTYPE_UFS2)
1065 uspi->s_maxsymlinklen = 1068 uspi->s_maxsymlinklen =
1066 fs32_to_cpu(sb, usb3->fs_un2.fs_44.fs_maxsymlinklen); 1069 fs32_to_cpu(sb, usb3->fs_un2.fs_44.fs_maxsymlinklen);
1067 1070
1068 inode = iget(sb, UFS_ROOTINO); 1071 inode = ufs_iget(sb, UFS_ROOTINO);
1069 if (!inode || is_bad_inode(inode)) 1072 if (IS_ERR(inode)) {
1073 ret = PTR_ERR(inode);
1070 goto failed; 1074 goto failed;
1075 }
1071 sb->s_root = d_alloc_root(inode); 1076 sb->s_root = d_alloc_root(inode);
1072 if (!sb->s_root) 1077 if (!sb->s_root) {
1078 ret = -ENOMEM;
1073 goto dalloc_failed; 1079 goto dalloc_failed;
1080 }
1074 1081
1075 ufs_setup_cstotal(sb); 1082 ufs_setup_cstotal(sb);
1076 /* 1083 /*
@@ -1092,7 +1099,7 @@ failed:
1092 kfree(sbi); 1099 kfree(sbi);
1093 sb->s_fs_info = NULL; 1100 sb->s_fs_info = NULL;
1094 UFSD("EXIT (FAILED)\n"); 1101 UFSD("EXIT (FAILED)\n");
1095 return -EINVAL; 1102 return ret;
1096 1103
1097failed_nomem: 1104failed_nomem:
1098 UFSD("EXIT (NOMEM)\n"); 1105 UFSD("EXIT (NOMEM)\n");
@@ -1326,7 +1333,6 @@ static ssize_t ufs_quota_write(struct super_block *, int, const char *, size_t,
1326static const struct super_operations ufs_super_ops = { 1333static const struct super_operations ufs_super_ops = {
1327 .alloc_inode = ufs_alloc_inode, 1334 .alloc_inode = ufs_alloc_inode,
1328 .destroy_inode = ufs_destroy_inode, 1335 .destroy_inode = ufs_destroy_inode,
1329 .read_inode = ufs_read_inode,
1330 .write_inode = ufs_write_inode, 1336 .write_inode = ufs_write_inode,
1331 .delete_inode = ufs_delete_inode, 1337 .delete_inode = ufs_delete_inode,
1332 .put_super = ufs_put_super, 1338 .put_super = ufs_put_super,
diff --git a/fs/ufs/symlink.c b/fs/ufs/symlink.c
index 43ac10e75a4a..c0156eda44bc 100644
--- a/fs/ufs/symlink.c
+++ b/fs/ufs/symlink.c
@@ -27,7 +27,8 @@
27 27
28#include <linux/fs.h> 28#include <linux/fs.h>
29#include <linux/namei.h> 29#include <linux/namei.h>
30#include <linux/ufs_fs.h> 30
31#include "ufs_fs.h"
31#include "ufs.h" 32#include "ufs.h"
32 33
33 34
diff --git a/fs/ufs/truncate.c b/fs/ufs/truncate.c
index 311ded34c2b2..41dd431ce228 100644
--- a/fs/ufs/truncate.c
+++ b/fs/ufs/truncate.c
@@ -36,7 +36,6 @@
36 36
37#include <linux/errno.h> 37#include <linux/errno.h>
38#include <linux/fs.h> 38#include <linux/fs.h>
39#include <linux/ufs_fs.h>
40#include <linux/fcntl.h> 39#include <linux/fcntl.h>
41#include <linux/time.h> 40#include <linux/time.h>
42#include <linux/stat.h> 41#include <linux/stat.h>
@@ -46,6 +45,7 @@
46#include <linux/blkdev.h> 45#include <linux/blkdev.h>
47#include <linux/sched.h> 46#include <linux/sched.h>
48 47
48#include "ufs_fs.h"
49#include "ufs.h" 49#include "ufs.h"
50#include "swab.h" 50#include "swab.h"
51#include "util.h" 51#include "util.h"
diff --git a/fs/ufs/ufs.h b/fs/ufs/ufs.h
index 7faa4cd71a27..fcb9231bb9ed 100644
--- a/fs/ufs/ufs.h
+++ b/fs/ufs/ufs.h
@@ -106,7 +106,7 @@ extern void ufs_free_inode (struct inode *inode);
106extern struct inode * ufs_new_inode (struct inode *, int); 106extern struct inode * ufs_new_inode (struct inode *, int);
107 107
108/* inode.c */ 108/* inode.c */
109extern void ufs_read_inode (struct inode *); 109extern struct inode *ufs_iget(struct super_block *, unsigned long);
110extern void ufs_put_inode (struct inode *); 110extern void ufs_put_inode (struct inode *);
111extern int ufs_write_inode (struct inode *, int); 111extern int ufs_write_inode (struct inode *, int);
112extern int ufs_sync_inode (struct inode *); 112extern int ufs_sync_inode (struct inode *);
diff --git a/fs/ufs/ufs_fs.h b/fs/ufs/ufs_fs.h
new file mode 100644
index 000000000000..54bde1895a80
--- /dev/null
+++ b/fs/ufs/ufs_fs.h
@@ -0,0 +1,947 @@
1/*
2 * linux/include/linux/ufs_fs.h
3 *
4 * Copyright (C) 1996
5 * Adrian Rodriguez (adrian@franklins-tower.rutgers.edu)
6 * Laboratory for Computer Science Research Computing Facility
7 * Rutgers, The State University of New Jersey
8 *
9 * Clean swab support by Fare <fare@tunes.org>
10 * just hope no one is using NNUUXXI on __?64 structure elements
11 * 64-bit clean thanks to Maciej W. Rozycki <macro@ds2.pg.gda.pl>
12 *
13 * 4.4BSD (FreeBSD) support added on February 1st 1998 by
14 * Niels Kristian Bech Jensen <nkbj@image.dk> partially based
15 * on code by Martin von Loewis <martin@mira.isdn.cs.tu-berlin.de>.
16 *
17 * NeXTstep support added on February 5th 1998 by
18 * Niels Kristian Bech Jensen <nkbj@image.dk>.
19 *
20 * Write support by Daniel Pirkl <daniel.pirkl@email.cz>
21 *
22 * HP/UX hfs filesystem support added by
23 * Martin K. Petersen <mkp@mkp.net>, August 1999
24 *
25 * UFS2 (of FreeBSD 5.x) support added by
26 * Niraj Kumar <niraj17@iitbombay.org> , Jan 2004
27 *
28 */
29
30#ifndef __LINUX_UFS_FS_H
31#define __LINUX_UFS_FS_H
32
33#include <linux/types.h>
34#include <linux/kernel.h>
35#include <linux/stat.h>
36#include <linux/fs.h>
37
38#include <asm/div64.h>
39typedef __u64 __bitwise __fs64;
40typedef __u32 __bitwise __fs32;
41typedef __u16 __bitwise __fs16;
42
43#define UFS_BBLOCK 0
44#define UFS_BBSIZE 8192
45#define UFS_SBLOCK 8192
46#define UFS_SBSIZE 8192
47
48#define UFS_SECTOR_SIZE 512
49#define UFS_SECTOR_BITS 9
50#define UFS_MAGIC 0x00011954
51#define UFS2_MAGIC 0x19540119
52#define UFS_CIGAM 0x54190100 /* byteswapped MAGIC */
53
54/* Copied from FreeBSD */
55/*
56 * Each disk drive contains some number of filesystems.
57 * A filesystem consists of a number of cylinder groups.
58 * Each cylinder group has inodes and data.
59 *
60 * A filesystem is described by its super-block, which in turn
61 * describes the cylinder groups. The super-block is critical
62 * data and is replicated in each cylinder group to protect against
63 * catastrophic loss. This is done at `newfs' time and the critical
64 * super-block data does not change, so the copies need not be
65 * referenced further unless disaster strikes.
66 *
67 * For filesystem fs, the offsets of the various blocks of interest
68 * are given in the super block as:
69 * [fs->fs_sblkno] Super-block
70 * [fs->fs_cblkno] Cylinder group block
71 * [fs->fs_iblkno] Inode blocks
72 * [fs->fs_dblkno] Data blocks
73 * The beginning of cylinder group cg in fs, is given by
74 * the ``cgbase(fs, cg)'' macro.
75 *
76 * Depending on the architecture and the media, the superblock may
77 * reside in any one of four places. For tiny media where every block
78 * counts, it is placed at the very front of the partition. Historically,
79 * UFS1 placed it 8K from the front to leave room for the disk label and
80 * a small bootstrap. For UFS2 it got moved to 64K from the front to leave
81 * room for the disk label and a bigger bootstrap, and for really piggy
82 * systems we check at 256K from the front if the first three fail. In
83 * all cases the size of the superblock will be SBLOCKSIZE. All values are
84 * given in byte-offset form, so they do not imply a sector size. The
85 * SBLOCKSEARCH specifies the order in which the locations should be searched.
86 */
87#define SBLOCK_FLOPPY 0
88#define SBLOCK_UFS1 8192
89#define SBLOCK_UFS2 65536
90#define SBLOCK_PIGGY 262144
91#define SBLOCKSIZE 8192
92#define SBLOCKSEARCH \
93 { SBLOCK_UFS2, SBLOCK_UFS1, SBLOCK_FLOPPY, SBLOCK_PIGGY, -1 }
94
95
96/* HP specific MAGIC values */
97
98#define UFS_MAGIC_LFN 0x00095014 /* fs supports filenames > 14 chars */
99#define UFS_CIGAM_LFN 0x14500900 /* srahc 41 < semanelif stroppus sf */
100
101#define UFS_MAGIC_SEC 0x00612195 /* B1 security fs */
102#define UFS_CIGAM_SEC 0x95216100
103
104#define UFS_MAGIC_FEA 0x00195612 /* fs_featurebits supported */
105#define UFS_CIGAM_FEA 0x12561900
106
107#define UFS_MAGIC_4GB 0x05231994 /* fs > 4 GB && fs_featurebits */
108#define UFS_CIGAM_4GB 0x94192305
109
110/* Seems somebody at HP goofed here. B1 and lfs are both 0x2 !?! */
111#define UFS_FSF_LFN 0x00000001 /* long file names */
112#define UFS_FSF_B1 0x00000002 /* B1 security */
113#define UFS_FSF_LFS 0x00000002 /* large files */
114#define UFS_FSF_LUID 0x00000004 /* large UIDs */
115
116/* End of HP stuff */
117
118
119#define UFS_BSIZE 8192
120#define UFS_MINBSIZE 4096
121#define UFS_FSIZE 1024
122#define UFS_MAXFRAG (UFS_BSIZE / UFS_FSIZE)
123
124#define UFS_NDADDR 12
125#define UFS_NINDIR 3
126
127#define UFS_IND_BLOCK (UFS_NDADDR + 0)
128#define UFS_DIND_BLOCK (UFS_NDADDR + 1)
129#define UFS_TIND_BLOCK (UFS_NDADDR + 2)
130
131#define UFS_NDIR_FRAGMENT (UFS_NDADDR << uspi->s_fpbshift)
132#define UFS_IND_FRAGMENT (UFS_IND_BLOCK << uspi->s_fpbshift)
133#define UFS_DIND_FRAGMENT (UFS_DIND_BLOCK << uspi->s_fpbshift)
134#define UFS_TIND_FRAGMENT (UFS_TIND_BLOCK << uspi->s_fpbshift)
135
136#define UFS_ROOTINO 2
137#define UFS_FIRST_INO (UFS_ROOTINO + 1)
138
139#define UFS_USEEFT ((__u16)65535)
140
141#define UFS_FSOK 0x7c269d38
142#define UFS_FSACTIVE ((__s8)0x00)
143#define UFS_FSCLEAN ((__s8)0x01)
144#define UFS_FSSTABLE ((__s8)0x02)
145#define UFS_FSOSF1 ((__s8)0x03) /* is this correct for DEC OSF/1? */
146#define UFS_FSBAD ((__s8)0xff)
147
148/* From here to next blank line, s_flags for ufs_sb_info */
149/* directory entry encoding */
150#define UFS_DE_MASK 0x00000010 /* mask for the following */
151#define UFS_DE_OLD 0x00000000
152#define UFS_DE_44BSD 0x00000010
153/* uid encoding */
154#define UFS_UID_MASK 0x00000060 /* mask for the following */
155#define UFS_UID_OLD 0x00000000
156#define UFS_UID_44BSD 0x00000020
157#define UFS_UID_EFT 0x00000040
158/* superblock state encoding */
159#define UFS_ST_MASK 0x00000700 /* mask for the following */
160#define UFS_ST_OLD 0x00000000
161#define UFS_ST_44BSD 0x00000100
162#define UFS_ST_SUN 0x00000200 /* Solaris */
163#define UFS_ST_SUNOS 0x00000300
164#define UFS_ST_SUNx86 0x00000400 /* Solaris x86 */
165/*cylinder group encoding */
166#define UFS_CG_MASK 0x00003000 /* mask for the following */
167#define UFS_CG_OLD 0x00000000
168#define UFS_CG_44BSD 0x00002000
169#define UFS_CG_SUN 0x00001000
170/* filesystem type encoding */
171#define UFS_TYPE_MASK 0x00010000 /* mask for the following */
172#define UFS_TYPE_UFS1 0x00000000
173#define UFS_TYPE_UFS2 0x00010000
174
175
176/* fs_inodefmt options */
177#define UFS_42INODEFMT -1
178#define UFS_44INODEFMT 2
179
180/*
181 * MINFREE gives the minimum acceptable percentage of file system
182 * blocks which may be free. If the freelist drops below this level
183 * only the superuser may continue to allocate blocks. This may
184 * be set to 0 if no reserve of free blocks is deemed necessary,
185 * however throughput drops by fifty percent if the file system
186 * is run at between 95% and 100% full; thus the minimum default
187 * value of fs_minfree is 5%. However, to get good clustering
188 * performance, 10% is a better choice. hence we use 10% as our
189 * default value. With 10% free space, fragmentation is not a
190 * problem, so we choose to optimize for time.
191 */
192#define UFS_MINFREE 5
193#define UFS_DEFAULTOPT UFS_OPTTIME
194
195/*
196 * Turn file system block numbers into disk block addresses.
197 * This maps file system blocks to device size blocks.
198 */
199#define ufs_fsbtodb(uspi, b) ((b) << (uspi)->s_fsbtodb)
200#define ufs_dbtofsb(uspi, b) ((b) >> (uspi)->s_fsbtodb)
201
202/*
203 * Cylinder group macros to locate things in cylinder groups.
204 * They calc file system addresses of cylinder group data structures.
205 */
206#define ufs_cgbase(c) (uspi->s_fpg * (c))
207#define ufs_cgstart(c) ((uspi)->fs_magic == UFS2_MAGIC ? ufs_cgbase(c) : \
208 (ufs_cgbase(c) + uspi->s_cgoffset * ((c) & ~uspi->s_cgmask)))
209#define ufs_cgsblock(c) (ufs_cgstart(c) + uspi->s_sblkno) /* super blk */
210#define ufs_cgcmin(c) (ufs_cgstart(c) + uspi->s_cblkno) /* cg block */
211#define ufs_cgimin(c) (ufs_cgstart(c) + uspi->s_iblkno) /* inode blk */
212#define ufs_cgdmin(c) (ufs_cgstart(c) + uspi->s_dblkno) /* 1st data */
213
214/*
215 * Macros for handling inode numbers:
216 * inode number to file system block offset.
217 * inode number to cylinder group number.
218 * inode number to file system block address.
219 */
220#define ufs_inotocg(x) ((x) / uspi->s_ipg)
221#define ufs_inotocgoff(x) ((x) % uspi->s_ipg)
222#define ufs_inotofsba(x) (((u64)ufs_cgimin(ufs_inotocg(x))) + ufs_inotocgoff(x) / uspi->s_inopf)
223#define ufs_inotofsbo(x) ((x) % uspi->s_inopf)
224
225/*
226 * Compute the cylinder and rotational position of a cyl block addr.
227 */
228#define ufs_cbtocylno(bno) \
229 ((bno) * uspi->s_nspf / uspi->s_spc)
230#define ufs_cbtorpos(bno) \
231 ((((bno) * uspi->s_nspf % uspi->s_spc / uspi->s_nsect \
232 * uspi->s_trackskew + (bno) * uspi->s_nspf % uspi->s_spc \
233 % uspi->s_nsect * uspi->s_interleave) % uspi->s_nsect \
234 * uspi->s_nrpos) / uspi->s_npsect)
235
236/*
237 * The following macros optimize certain frequently calculated
238 * quantities by using shifts and masks in place of divisions
239 * modulos and multiplications.
240 */
241#define ufs_blkoff(loc) ((loc) & uspi->s_qbmask)
242#define ufs_fragoff(loc) ((loc) & uspi->s_qfmask)
243#define ufs_lblktosize(blk) ((blk) << uspi->s_bshift)
244#define ufs_lblkno(loc) ((loc) >> uspi->s_bshift)
245#define ufs_numfrags(loc) ((loc) >> uspi->s_fshift)
246#define ufs_blkroundup(size) (((size) + uspi->s_qbmask) & uspi->s_bmask)
247#define ufs_fragroundup(size) (((size) + uspi->s_qfmask) & uspi->s_fmask)
248#define ufs_fragstoblks(frags) ((frags) >> uspi->s_fpbshift)
249#define ufs_blkstofrags(blks) ((blks) << uspi->s_fpbshift)
250#define ufs_fragnum(fsb) ((fsb) & uspi->s_fpbmask)
251#define ufs_blknum(fsb) ((fsb) & ~uspi->s_fpbmask)
252
253#define UFS_MAXNAMLEN 255
254#define UFS_MAXMNTLEN 512
255#define UFS2_MAXMNTLEN 468
256#define UFS2_MAXVOLLEN 32
257#define UFS_MAXCSBUFS 31
258#define UFS_LINK_MAX 32000
259/*
260#define UFS2_NOCSPTRS ((128 / sizeof(void *)) - 4)
261*/
262#define UFS2_NOCSPTRS 28
263
264/*
265 * UFS_DIR_PAD defines the directory entries boundaries
266 * (must be a multiple of 4)
267 */
268#define UFS_DIR_PAD 4
269#define UFS_DIR_ROUND (UFS_DIR_PAD - 1)
270#define UFS_DIR_REC_LEN(name_len) (((name_len) + 1 + 8 + UFS_DIR_ROUND) & ~UFS_DIR_ROUND)
271
272struct ufs_timeval {
273 __fs32 tv_sec;
274 __fs32 tv_usec;
275};
276
277struct ufs_dir_entry {
278 __fs32 d_ino; /* inode number of this entry */
279 __fs16 d_reclen; /* length of this entry */
280 union {
281 __fs16 d_namlen; /* actual length of d_name */
282 struct {
283 __u8 d_type; /* file type */
284 __u8 d_namlen; /* length of string in d_name */
285 } d_44;
286 } d_u;
287 __u8 d_name[UFS_MAXNAMLEN + 1]; /* file name */
288};
289
290struct ufs_csum {
291 __fs32 cs_ndir; /* number of directories */
292 __fs32 cs_nbfree; /* number of free blocks */
293 __fs32 cs_nifree; /* number of free inodes */
294 __fs32 cs_nffree; /* number of free frags */
295};
296struct ufs2_csum_total {
297 __fs64 cs_ndir; /* number of directories */
298 __fs64 cs_nbfree; /* number of free blocks */
299 __fs64 cs_nifree; /* number of free inodes */
300 __fs64 cs_nffree; /* number of free frags */
301 __fs64 cs_numclusters; /* number of free clusters */
302 __fs64 cs_spare[3]; /* future expansion */
303};
304
305struct ufs_csum_core {
306 __u64 cs_ndir; /* number of directories */
307 __u64 cs_nbfree; /* number of free blocks */
308 __u64 cs_nifree; /* number of free inodes */
309 __u64 cs_nffree; /* number of free frags */
310 __u64 cs_numclusters; /* number of free clusters */
311};
312
313/*
314 * File system flags
315 */
316#define UFS_UNCLEAN 0x01 /* file system not clean at mount (unused) */
317#define UFS_DOSOFTDEP 0x02 /* file system using soft dependencies */
318#define UFS_NEEDSFSCK 0x04 /* needs sync fsck (FreeBSD compat, unused) */
319#define UFS_INDEXDIRS 0x08 /* kernel supports indexed directories */
320#define UFS_ACLS 0x10 /* file system has ACLs enabled */
321#define UFS_MULTILABEL 0x20 /* file system is MAC multi-label */
322#define UFS_FLAGS_UPDATED 0x80 /* flags have been moved to new location */
323
324#if 0
325/*
326 * This is the actual superblock, as it is laid out on the disk.
327 * Do NOT use this structure, because of sizeof(ufs_super_block) > 512 and
328 * it may occupy several blocks, use
329 * struct ufs_super_block_(first,second,third) instead.
330 */
331struct ufs_super_block {
332 union {
333 struct {
334 __fs32 fs_link; /* UNUSED */
335 } fs_42;
336 struct {
337 __fs32 fs_state; /* file system state flag */
338 } fs_sun;
339 } fs_u0;
340 __fs32 fs_rlink; /* UNUSED */
341 __fs32 fs_sblkno; /* addr of super-block in filesys */
342 __fs32 fs_cblkno; /* offset of cyl-block in filesys */
343 __fs32 fs_iblkno; /* offset of inode-blocks in filesys */
344 __fs32 fs_dblkno; /* offset of first data after cg */
345 __fs32 fs_cgoffset; /* cylinder group offset in cylinder */
346 __fs32 fs_cgmask; /* used to calc mod fs_ntrak */
347 __fs32 fs_time; /* last time written -- time_t */
348 __fs32 fs_size; /* number of blocks in fs */
349 __fs32 fs_dsize; /* number of data blocks in fs */
350 __fs32 fs_ncg; /* number of cylinder groups */
351 __fs32 fs_bsize; /* size of basic blocks in fs */
352 __fs32 fs_fsize; /* size of frag blocks in fs */
353 __fs32 fs_frag; /* number of frags in a block in fs */
354/* these are configuration parameters */
355 __fs32 fs_minfree; /* minimum percentage of free blocks */
356 __fs32 fs_rotdelay; /* num of ms for optimal next block */
357 __fs32 fs_rps; /* disk revolutions per second */
358/* these fields can be computed from the others */
359 __fs32 fs_bmask; /* ``blkoff'' calc of blk offsets */
360 __fs32 fs_fmask; /* ``fragoff'' calc of frag offsets */
361 __fs32 fs_bshift; /* ``lblkno'' calc of logical blkno */
362 __fs32 fs_fshift; /* ``numfrags'' calc number of frags */
363/* these are configuration parameters */
364 __fs32 fs_maxcontig; /* max number of contiguous blks */
365 __fs32 fs_maxbpg; /* max number of blks per cyl group */
366/* these fields can be computed from the others */
367 __fs32 fs_fragshift; /* block to frag shift */
368 __fs32 fs_fsbtodb; /* fsbtodb and dbtofsb shift constant */
369 __fs32 fs_sbsize; /* actual size of super block */
370 __fs32 fs_csmask; /* csum block offset */
371 __fs32 fs_csshift; /* csum block number */
372 __fs32 fs_nindir; /* value of NINDIR */
373 __fs32 fs_inopb; /* value of INOPB */
374 __fs32 fs_nspf; /* value of NSPF */
375/* yet another configuration parameter */
376 __fs32 fs_optim; /* optimization preference, see below */
377/* these fields are derived from the hardware */
378 union {
379 struct {
380 __fs32 fs_npsect; /* # sectors/track including spares */
381 } fs_sun;
382 struct {
383 __fs32 fs_state; /* file system state time stamp */
384 } fs_sunx86;
385 } fs_u1;
386 __fs32 fs_interleave; /* hardware sector interleave */
387 __fs32 fs_trackskew; /* sector 0 skew, per track */
388/* a unique id for this filesystem (currently unused and unmaintained) */
389/* In 4.3 Tahoe this space is used by fs_headswitch and fs_trkseek */
390/* Neither of those fields is used in the Tahoe code right now but */
391/* there could be problems if they are. */
392 __fs32 fs_id[2]; /* file system id */
393/* sizes determined by number of cylinder groups and their sizes */
394 __fs32 fs_csaddr; /* blk addr of cyl grp summary area */
395 __fs32 fs_cssize; /* size of cyl grp summary area */
396 __fs32 fs_cgsize; /* cylinder group size */
397/* these fields are derived from the hardware */
398 __fs32 fs_ntrak; /* tracks per cylinder */
399 __fs32 fs_nsect; /* sectors per track */
400 __fs32 fs_spc; /* sectors per cylinder */
401/* this comes from the disk driver partitioning */
402 __fs32 fs_ncyl; /* cylinders in file system */
403/* these fields can be computed from the others */
404 __fs32 fs_cpg; /* cylinders per group */
405 __fs32 fs_ipg; /* inodes per cylinder group */
406 __fs32 fs_fpg; /* blocks per group * fs_frag */
407/* this data must be re-computed after crashes */
408 struct ufs_csum fs_cstotal; /* cylinder summary information */
409/* these fields are cleared at mount time */
410 __s8 fs_fmod; /* super block modified flag */
411 __s8 fs_clean; /* file system is clean flag */
412 __s8 fs_ronly; /* mounted read-only flag */
413 __s8 fs_flags;
414 union {
415 struct {
416 __s8 fs_fsmnt[UFS_MAXMNTLEN];/* name mounted on */
417 __fs32 fs_cgrotor; /* last cg searched */
418 __fs32 fs_csp[UFS_MAXCSBUFS];/*list of fs_cs info buffers */
419 __fs32 fs_maxcluster;
420 __fs32 fs_cpc; /* cyl per cycle in postbl */
421 __fs16 fs_opostbl[16][8]; /* old rotation block list head */
422 } fs_u1;
423 struct {
424 __s8 fs_fsmnt[UFS2_MAXMNTLEN]; /* name mounted on */
425 __u8 fs_volname[UFS2_MAXVOLLEN]; /* volume name */
426 __fs64 fs_swuid; /* system-wide uid */
427 __fs32 fs_pad; /* due to alignment of fs_swuid */
428 __fs32 fs_cgrotor; /* last cg searched */
429 __fs32 fs_ocsp[UFS2_NOCSPTRS]; /*list of fs_cs info buffers */
430 __fs32 fs_contigdirs;/*# of contiguously allocated dirs */
431 __fs32 fs_csp; /* cg summary info buffer for fs_cs */
432 __fs32 fs_maxcluster;
433 __fs32 fs_active;/* used by snapshots to track fs */
434 __fs32 fs_old_cpc; /* cyl per cycle in postbl */
435 __fs32 fs_maxbsize;/*maximum blocking factor permitted */
436 __fs64 fs_sparecon64[17];/*old rotation block list head */
437 __fs64 fs_sblockloc; /* byte offset of standard superblock */
438 struct ufs2_csum_total fs_cstotal;/*cylinder summary information*/
439 struct ufs_timeval fs_time; /* last time written */
440 __fs64 fs_size; /* number of blocks in fs */
441 __fs64 fs_dsize; /* number of data blocks in fs */
442 __fs64 fs_csaddr; /* blk addr of cyl grp summary area */
443 __fs64 fs_pendingblocks;/* blocks in process of being freed */
444 __fs32 fs_pendinginodes;/*inodes in process of being freed */
445 } fs_u2;
446 } fs_u11;
447 union {
448 struct {
449 __fs32 fs_sparecon[53];/* reserved for future constants */
450 __fs32 fs_reclaim;
451 __fs32 fs_sparecon2[1];
452 __fs32 fs_state; /* file system state time stamp */
453 __fs32 fs_qbmask[2]; /* ~usb_bmask */
454 __fs32 fs_qfmask[2]; /* ~usb_fmask */
455 } fs_sun;
456 struct {
457 __fs32 fs_sparecon[53];/* reserved for future constants */
458 __fs32 fs_reclaim;
459 __fs32 fs_sparecon2[1];
460 __fs32 fs_npsect; /* # sectors/track including spares */
461 __fs32 fs_qbmask[2]; /* ~usb_bmask */
462 __fs32 fs_qfmask[2]; /* ~usb_fmask */
463 } fs_sunx86;
464 struct {
465 __fs32 fs_sparecon[50];/* reserved for future constants */
466 __fs32 fs_contigsumsize;/* size of cluster summary array */
467 __fs32 fs_maxsymlinklen;/* max length of an internal symlink */
468 __fs32 fs_inodefmt; /* format of on-disk inodes */
469 __fs32 fs_maxfilesize[2]; /* max representable file size */
470 __fs32 fs_qbmask[2]; /* ~usb_bmask */
471 __fs32 fs_qfmask[2]; /* ~usb_fmask */
472 __fs32 fs_state; /* file system state time stamp */
473 } fs_44;
474 } fs_u2;
475 __fs32 fs_postblformat; /* format of positional layout tables */
476 __fs32 fs_nrpos; /* number of rotational positions */
477 __fs32 fs_postbloff; /* (__s16) rotation block list head */
478 __fs32 fs_rotbloff; /* (__u8) blocks for each rotation */
479 __fs32 fs_magic; /* magic number */
480 __u8 fs_space[1]; /* list of blocks for each rotation */
481};
482#endif/*struct ufs_super_block*/
483
484/*
485 * Preference for optimization.
486 */
487#define UFS_OPTTIME 0 /* minimize allocation time */
488#define UFS_OPTSPACE 1 /* minimize disk fragmentation */
489
490/*
491 * Rotational layout table format types
492 */
493#define UFS_42POSTBLFMT -1 /* 4.2BSD rotational table format */
494#define UFS_DYNAMICPOSTBLFMT 1 /* dynamic rotational table format */
495
496/*
497 * Convert cylinder group to base address of its global summary info.
498 */
499#define fs_cs(indx) s_csp[(indx)]
500
501/*
502 * Cylinder group block for a file system.
503 *
504 * Writable fields in the cylinder group are protected by the associated
505 * super block lock fs->fs_lock.
506 */
507#define CG_MAGIC 0x090255
508#define ufs_cg_chkmagic(sb, ucg) \
509 (fs32_to_cpu((sb), (ucg)->cg_magic) == CG_MAGIC)
510/*
511 * Macros for access to old cylinder group array structures
512 */
513#define ufs_ocg_blktot(sb, ucg) fs32_to_cpu((sb), ((struct ufs_old_cylinder_group *)(ucg))->cg_btot)
514#define ufs_ocg_blks(sb, ucg, cylno) fs32_to_cpu((sb), ((struct ufs_old_cylinder_group *)(ucg))->cg_b[cylno])
515#define ufs_ocg_inosused(sb, ucg) fs32_to_cpu((sb), ((struct ufs_old_cylinder_group *)(ucg))->cg_iused)
516#define ufs_ocg_blksfree(sb, ucg) fs32_to_cpu((sb), ((struct ufs_old_cylinder_group *)(ucg))->cg_free)
517#define ufs_ocg_chkmagic(sb, ucg) \
518 (fs32_to_cpu((sb), ((struct ufs_old_cylinder_group *)(ucg))->cg_magic) == CG_MAGIC)
519
520/*
521 * size of this structure is 172 B
522 */
523struct ufs_cylinder_group {
524 __fs32 cg_link; /* linked list of cyl groups */
525 __fs32 cg_magic; /* magic number */
526 __fs32 cg_time; /* time last written */
527 __fs32 cg_cgx; /* we are the cgx'th cylinder group */
528 __fs16 cg_ncyl; /* number of cyl's this cg */
529 __fs16 cg_niblk; /* number of inode blocks this cg */
530 __fs32 cg_ndblk; /* number of data blocks this cg */
531 struct ufs_csum cg_cs; /* cylinder summary information */
532 __fs32 cg_rotor; /* position of last used block */
533 __fs32 cg_frotor; /* position of last used frag */
534 __fs32 cg_irotor; /* position of last used inode */
535 __fs32 cg_frsum[UFS_MAXFRAG]; /* counts of available frags */
536 __fs32 cg_btotoff; /* (__u32) block totals per cylinder */
537 __fs32 cg_boff; /* (short) free block positions */
538 __fs32 cg_iusedoff; /* (char) used inode map */
539 __fs32 cg_freeoff; /* (u_char) free block map */
540 __fs32 cg_nextfreeoff; /* (u_char) next available space */
541 union {
542 struct {
543 __fs32 cg_clustersumoff; /* (u_int32) counts of avail clusters */
544 __fs32 cg_clusteroff; /* (u_int8) free cluster map */
545 __fs32 cg_nclusterblks; /* number of clusters this cg */
546 __fs32 cg_sparecon[13]; /* reserved for future use */
547 } cg_44;
548 struct {
549 __fs32 cg_clustersumoff;/* (u_int32) counts of avail clusters */
550 __fs32 cg_clusteroff; /* (u_int8) free cluster map */
551 __fs32 cg_nclusterblks;/* number of clusters this cg */
552 __fs32 cg_niblk; /* number of inode blocks this cg */
553 __fs32 cg_initediblk; /* last initialized inode */
554 __fs32 cg_sparecon32[3];/* reserved for future use */
555 __fs64 cg_time; /* time last written */
556 __fs64 cg_sparecon[3]; /* reserved for future use */
557 } cg_u2;
558 __fs32 cg_sparecon[16]; /* reserved for future use */
559 } cg_u;
560 __u8 cg_space[1]; /* space for cylinder group maps */
561/* actually longer */
562};
563
564/* Historic Cylinder group info */
565struct ufs_old_cylinder_group {
566 __fs32 cg_link; /* linked list of cyl groups */
567 __fs32 cg_rlink; /* for incore cyl groups */
568 __fs32 cg_time; /* time last written */
569 __fs32 cg_cgx; /* we are the cgx'th cylinder group */
570 __fs16 cg_ncyl; /* number of cyl's this cg */
571 __fs16 cg_niblk; /* number of inode blocks this cg */
572 __fs32 cg_ndblk; /* number of data blocks this cg */
573 struct ufs_csum cg_cs; /* cylinder summary information */
574 __fs32 cg_rotor; /* position of last used block */
575 __fs32 cg_frotor; /* position of last used frag */
576 __fs32 cg_irotor; /* position of last used inode */
577 __fs32 cg_frsum[8]; /* counts of available frags */
578 __fs32 cg_btot[32]; /* block totals per cylinder */
579 __fs16 cg_b[32][8]; /* positions of free blocks */
580 __u8 cg_iused[256]; /* used inode map */
581 __fs32 cg_magic; /* magic number */
582 __u8 cg_free[1]; /* free block map */
583/* actually longer */
584};
585
586/*
587 * structure of an on-disk inode
588 */
589struct ufs_inode {
590 __fs16 ui_mode; /* 0x0 */
591 __fs16 ui_nlink; /* 0x2 */
592 union {
593 struct {
594 __fs16 ui_suid; /* 0x4 */
595 __fs16 ui_sgid; /* 0x6 */
596 } oldids;
597 __fs32 ui_inumber; /* 0x4 lsf: inode number */
598 __fs32 ui_author; /* 0x4 GNU HURD: author */
599 } ui_u1;
600 __fs64 ui_size; /* 0x8 */
601 struct ufs_timeval ui_atime; /* 0x10 access */
602 struct ufs_timeval ui_mtime; /* 0x18 modification */
603 struct ufs_timeval ui_ctime; /* 0x20 creation */
604 union {
605 struct {
606 __fs32 ui_db[UFS_NDADDR];/* 0x28 data blocks */
607 __fs32 ui_ib[UFS_NINDIR];/* 0x58 indirect blocks */
608 } ui_addr;
609 __u8 ui_symlink[4*(UFS_NDADDR+UFS_NINDIR)];/* 0x28 fast symlink */
610 } ui_u2;
611 __fs32 ui_flags; /* 0x64 immutable, append-only... */
612 __fs32 ui_blocks; /* 0x68 blocks in use */
613 __fs32 ui_gen; /* 0x6c like ext2 i_version, for NFS support */
614 union {
615 struct {
616 __fs32 ui_shadow; /* 0x70 shadow inode with security data */
617 __fs32 ui_uid; /* 0x74 long EFT version of uid */
618 __fs32 ui_gid; /* 0x78 long EFT version of gid */
619 __fs32 ui_oeftflag; /* 0x7c reserved */
620 } ui_sun;
621 struct {
622 __fs32 ui_uid; /* 0x70 File owner */
623 __fs32 ui_gid; /* 0x74 File group */
624 __fs32 ui_spare[2]; /* 0x78 reserved */
625 } ui_44;
626 struct {
627 __fs32 ui_uid; /* 0x70 */
628 __fs32 ui_gid; /* 0x74 */
629 __fs16 ui_modeh; /* 0x78 mode high bits */
630 __fs16 ui_spare; /* 0x7A unused */
631 __fs32 ui_trans; /* 0x7c filesystem translator */
632 } ui_hurd;
633 } ui_u3;
634};
635
636#define UFS_NXADDR 2 /* External addresses in inode. */
637struct ufs2_inode {
638 __fs16 ui_mode; /* 0: IFMT, permissions; see below. */
639 __fs16 ui_nlink; /* 2: File link count. */
640 __fs32 ui_uid; /* 4: File owner. */
641 __fs32 ui_gid; /* 8: File group. */
642 __fs32 ui_blksize; /* 12: Inode blocksize. */
643 __fs64 ui_size; /* 16: File byte count. */
644 __fs64 ui_blocks; /* 24: Bytes actually held. */
645 __fs64 ui_atime; /* 32: Last access time. */
646 __fs64 ui_mtime; /* 40: Last modified time. */
647 __fs64 ui_ctime; /* 48: Last inode change time. */
648 __fs64 ui_birthtime; /* 56: Inode creation time. */
649 __fs32 ui_mtimensec; /* 64: Last modified time. */
650 __fs32 ui_atimensec; /* 68: Last access time. */
651 __fs32 ui_ctimensec; /* 72: Last inode change time. */
652 __fs32 ui_birthnsec; /* 76: Inode creation time. */
653 __fs32 ui_gen; /* 80: Generation number. */
654 __fs32 ui_kernflags; /* 84: Kernel flags. */
655 __fs32 ui_flags; /* 88: Status flags (chflags). */
656 __fs32 ui_extsize; /* 92: External attributes block. */
657 __fs64 ui_extb[UFS_NXADDR];/* 96: External attributes block. */
658 union {
659 struct {
660 __fs64 ui_db[UFS_NDADDR]; /* 112: Direct disk blocks. */
661 __fs64 ui_ib[UFS_NINDIR];/* 208: Indirect disk blocks.*/
662 } ui_addr;
663 __u8 ui_symlink[2*4*(UFS_NDADDR+UFS_NINDIR)];/* 0x28 fast symlink */
664 } ui_u2;
665 __fs64 ui_spare[3]; /* 232: Reserved; currently unused */
666};
667
668
669/* FreeBSD has these in sys/stat.h */
670/* ui_flags that can be set by a file owner */
671#define UFS_UF_SETTABLE 0x0000ffff
672#define UFS_UF_NODUMP 0x00000001 /* do not dump */
673#define UFS_UF_IMMUTABLE 0x00000002 /* immutable (can't "change") */
674#define UFS_UF_APPEND 0x00000004 /* append-only */
675#define UFS_UF_OPAQUE 0x00000008 /* directory is opaque (unionfs) */
676#define UFS_UF_NOUNLINK 0x00000010 /* can't be removed or renamed */
677/* ui_flags that only root can set */
678#define UFS_SF_SETTABLE 0xffff0000
679#define UFS_SF_ARCHIVED 0x00010000 /* archived */
680#define UFS_SF_IMMUTABLE 0x00020000 /* immutable (can't "change") */
681#define UFS_SF_APPEND 0x00040000 /* append-only */
682#define UFS_SF_NOUNLINK 0x00100000 /* can't be removed or renamed */
683
684/*
685 * This structure is used for reading disk structures larger
686 * than the size of fragment.
687 */
688struct ufs_buffer_head {
689 __u64 fragment; /* first fragment */
690 __u64 count; /* number of fragments */
691 struct buffer_head * bh[UFS_MAXFRAG]; /* buffers */
692};
693
694struct ufs_cg_private_info {
695 struct ufs_buffer_head c_ubh;
696 __u32 c_cgx; /* number of cylidner group */
697 __u16 c_ncyl; /* number of cyl's this cg */
698 __u16 c_niblk; /* number of inode blocks this cg */
699 __u32 c_ndblk; /* number of data blocks this cg */
700 __u32 c_rotor; /* position of last used block */
701 __u32 c_frotor; /* position of last used frag */
702 __u32 c_irotor; /* position of last used inode */
703 __u32 c_btotoff; /* (__u32) block totals per cylinder */
704 __u32 c_boff; /* (short) free block positions */
705 __u32 c_iusedoff; /* (char) used inode map */
706 __u32 c_freeoff; /* (u_char) free block map */
707 __u32 c_nextfreeoff; /* (u_char) next available space */
708 __u32 c_clustersumoff;/* (u_int32) counts of avail clusters */
709 __u32 c_clusteroff; /* (u_int8) free cluster map */
710 __u32 c_nclusterblks; /* number of clusters this cg */
711};
712
713
714struct ufs_sb_private_info {
715 struct ufs_buffer_head s_ubh; /* buffer containing super block */
716 struct ufs_csum_core cs_total;
717 __u32 s_sblkno; /* offset of super-blocks in filesys */
718 __u32 s_cblkno; /* offset of cg-block in filesys */
719 __u32 s_iblkno; /* offset of inode-blocks in filesys */
720 __u32 s_dblkno; /* offset of first data after cg */
721 __u32 s_cgoffset; /* cylinder group offset in cylinder */
722 __u32 s_cgmask; /* used to calc mod fs_ntrak */
723 __u32 s_size; /* number of blocks (fragments) in fs */
724 __u32 s_dsize; /* number of data blocks in fs */
725 __u64 s_u2_size; /* ufs2: number of blocks (fragments) in fs */
726 __u64 s_u2_dsize; /*ufs2: number of data blocks in fs */
727 __u32 s_ncg; /* number of cylinder groups */
728 __u32 s_bsize; /* size of basic blocks */
729 __u32 s_fsize; /* size of fragments */
730 __u32 s_fpb; /* fragments per block */
731 __u32 s_minfree; /* minimum percentage of free blocks */
732 __u32 s_bmask; /* `blkoff'' calc of blk offsets */
733 __u32 s_fmask; /* s_fsize mask */
734 __u32 s_bshift; /* `lblkno'' calc of logical blkno */
735 __u32 s_fshift; /* s_fsize shift */
736 __u32 s_fpbshift; /* fragments per block shift */
737 __u32 s_fsbtodb; /* fsbtodb and dbtofsb shift constant */
738 __u32 s_sbsize; /* actual size of super block */
739 __u32 s_csmask; /* csum block offset */
740 __u32 s_csshift; /* csum block number */
741 __u32 s_nindir; /* value of NINDIR */
742 __u32 s_inopb; /* value of INOPB */
743 __u32 s_nspf; /* value of NSPF */
744 __u32 s_npsect; /* # sectors/track including spares */
745 __u32 s_interleave; /* hardware sector interleave */
746 __u32 s_trackskew; /* sector 0 skew, per track */
747 __u64 s_csaddr; /* blk addr of cyl grp summary area */
748 __u32 s_cssize; /* size of cyl grp summary area */
749 __u32 s_cgsize; /* cylinder group size */
750 __u32 s_ntrak; /* tracks per cylinder */
751 __u32 s_nsect; /* sectors per track */
752 __u32 s_spc; /* sectors per cylinder */
753 __u32 s_ipg; /* inodes per cylinder group */
754 __u32 s_fpg; /* fragments per group */
755 __u32 s_cpc; /* cyl per cycle in postbl */
756 __s32 s_contigsumsize;/* size of cluster summary array, 44bsd */
757 __s64 s_qbmask; /* ~usb_bmask */
758 __s64 s_qfmask; /* ~usb_fmask */
759 __s32 s_postblformat; /* format of positional layout tables */
760 __s32 s_nrpos; /* number of rotational positions */
761 __s32 s_postbloff; /* (__s16) rotation block list head */
762 __s32 s_rotbloff; /* (__u8) blocks for each rotation */
763
764 __u32 s_fpbmask; /* fragments per block mask */
765 __u32 s_apb; /* address per block */
766 __u32 s_2apb; /* address per block^2 */
767 __u32 s_3apb; /* address per block^3 */
768 __u32 s_apbmask; /* address per block mask */
769 __u32 s_apbshift; /* address per block shift */
770 __u32 s_2apbshift; /* address per block shift * 2 */
771 __u32 s_3apbshift; /* address per block shift * 3 */
772 __u32 s_nspfshift; /* number of sector per fragment shift */
773 __u32 s_nspb; /* number of sector per block */
774 __u32 s_inopf; /* inodes per fragment */
775 __u32 s_sbbase; /* offset of NeXTstep superblock */
776 __u32 s_bpf; /* bits per fragment */
777 __u32 s_bpfshift; /* bits per fragment shift*/
778 __u32 s_bpfmask; /* bits per fragment mask */
779
780 __u32 s_maxsymlinklen;/* upper limit on fast symlinks' size */
781 __s32 fs_magic; /* filesystem magic */
782 unsigned int s_dirblksize;
783};
784
785/*
786 * Sizes of this structures are:
787 * ufs_super_block_first 512
788 * ufs_super_block_second 512
789 * ufs_super_block_third 356
790 */
791struct ufs_super_block_first {
792 union {
793 struct {
794 __fs32 fs_link; /* UNUSED */
795 } fs_42;
796 struct {
797 __fs32 fs_state; /* file system state flag */
798 } fs_sun;
799 } fs_u0;
800 __fs32 fs_rlink;
801 __fs32 fs_sblkno;
802 __fs32 fs_cblkno;
803 __fs32 fs_iblkno;
804 __fs32 fs_dblkno;
805 __fs32 fs_cgoffset;
806 __fs32 fs_cgmask;
807 __fs32 fs_time;
808 __fs32 fs_size;
809 __fs32 fs_dsize;
810 __fs32 fs_ncg;
811 __fs32 fs_bsize;
812 __fs32 fs_fsize;
813 __fs32 fs_frag;
814 __fs32 fs_minfree;
815 __fs32 fs_rotdelay;
816 __fs32 fs_rps;
817 __fs32 fs_bmask;
818 __fs32 fs_fmask;
819 __fs32 fs_bshift;
820 __fs32 fs_fshift;
821 __fs32 fs_maxcontig;
822 __fs32 fs_maxbpg;
823 __fs32 fs_fragshift;
824 __fs32 fs_fsbtodb;
825 __fs32 fs_sbsize;
826 __fs32 fs_csmask;
827 __fs32 fs_csshift;
828 __fs32 fs_nindir;
829 __fs32 fs_inopb;
830 __fs32 fs_nspf;
831 __fs32 fs_optim;
832 union {
833 struct {
834 __fs32 fs_npsect;
835 } fs_sun;
836 struct {
837 __fs32 fs_state;
838 } fs_sunx86;
839 } fs_u1;
840 __fs32 fs_interleave;
841 __fs32 fs_trackskew;
842 __fs32 fs_id[2];
843 __fs32 fs_csaddr;
844 __fs32 fs_cssize;
845 __fs32 fs_cgsize;
846 __fs32 fs_ntrak;
847 __fs32 fs_nsect;
848 __fs32 fs_spc;
849 __fs32 fs_ncyl;
850 __fs32 fs_cpg;
851 __fs32 fs_ipg;
852 __fs32 fs_fpg;
853 struct ufs_csum fs_cstotal;
854 __s8 fs_fmod;
855 __s8 fs_clean;
856 __s8 fs_ronly;
857 __s8 fs_flags;
858 __s8 fs_fsmnt[UFS_MAXMNTLEN - 212];
859
860};
861
862struct ufs_super_block_second {
863 union {
864 struct {
865 __s8 fs_fsmnt[212];
866 __fs32 fs_cgrotor;
867 __fs32 fs_csp[UFS_MAXCSBUFS];
868 __fs32 fs_maxcluster;
869 __fs32 fs_cpc;
870 __fs16 fs_opostbl[82];
871 } fs_u1;
872 struct {
873 __s8 fs_fsmnt[UFS2_MAXMNTLEN - UFS_MAXMNTLEN + 212];
874 __u8 fs_volname[UFS2_MAXVOLLEN];
875 __fs64 fs_swuid;
876 __fs32 fs_pad;
877 __fs32 fs_cgrotor;
878 __fs32 fs_ocsp[UFS2_NOCSPTRS];
879 __fs32 fs_contigdirs;
880 __fs32 fs_csp;
881 __fs32 fs_maxcluster;
882 __fs32 fs_active;
883 __fs32 fs_old_cpc;
884 __fs32 fs_maxbsize;
885 __fs64 fs_sparecon64[17];
886 __fs64 fs_sblockloc;
887 __fs64 cs_ndir;
888 __fs64 cs_nbfree;
889 } fs_u2;
890 } fs_un;
891};
892
893struct ufs_super_block_third {
894 union {
895 struct {
896 __fs16 fs_opostbl[46];
897 } fs_u1;
898 struct {
899 __fs64 cs_nifree; /* number of free inodes */
900 __fs64 cs_nffree; /* number of free frags */
901 __fs64 cs_numclusters; /* number of free clusters */
902 __fs64 cs_spare[3]; /* future expansion */
903 struct ufs_timeval fs_time; /* last time written */
904 __fs64 fs_size; /* number of blocks in fs */
905 __fs64 fs_dsize; /* number of data blocks in fs */
906 __fs64 fs_csaddr; /* blk addr of cyl grp summary area */
907 __fs64 fs_pendingblocks;/* blocks in process of being freed */
908 __fs32 fs_pendinginodes;/*inodes in process of being freed */
909 } __attribute__ ((packed)) fs_u2;
910 } fs_un1;
911 union {
912 struct {
913 __fs32 fs_sparecon[53];/* reserved for future constants */
914 __fs32 fs_reclaim;
915 __fs32 fs_sparecon2[1];
916 __fs32 fs_state; /* file system state time stamp */
917 __fs32 fs_qbmask[2]; /* ~usb_bmask */
918 __fs32 fs_qfmask[2]; /* ~usb_fmask */
919 } fs_sun;
920 struct {
921 __fs32 fs_sparecon[53];/* reserved for future constants */
922 __fs32 fs_reclaim;
923 __fs32 fs_sparecon2[1];
924 __fs32 fs_npsect; /* # sectors/track including spares */
925 __fs32 fs_qbmask[2]; /* ~usb_bmask */
926 __fs32 fs_qfmask[2]; /* ~usb_fmask */
927 } fs_sunx86;
928 struct {
929 __fs32 fs_sparecon[50];/* reserved for future constants */
930 __fs32 fs_contigsumsize;/* size of cluster summary array */
931 __fs32 fs_maxsymlinklen;/* max length of an internal symlink */
932 __fs32 fs_inodefmt; /* format of on-disk inodes */
933 __fs32 fs_maxfilesize[2]; /* max representable file size */
934 __fs32 fs_qbmask[2]; /* ~usb_bmask */
935 __fs32 fs_qfmask[2]; /* ~usb_fmask */
936 __fs32 fs_state; /* file system state time stamp */
937 } fs_44;
938 } fs_un2;
939 __fs32 fs_postblformat;
940 __fs32 fs_nrpos;
941 __fs32 fs_postbloff;
942 __fs32 fs_rotbloff;
943 __fs32 fs_magic;
944 __u8 fs_space[1];
945};
946
947#endif /* __LINUX_UFS_FS_H */
diff --git a/fs/ufs/util.c b/fs/ufs/util.c
index 410084dae389..85a7fc9e4a4e 100644
--- a/fs/ufs/util.c
+++ b/fs/ufs/util.c
@@ -8,9 +8,9 @@
8 8
9#include <linux/string.h> 9#include <linux/string.h>
10#include <linux/slab.h> 10#include <linux/slab.h>
11#include <linux/ufs_fs.h>
12#include <linux/buffer_head.h> 11#include <linux/buffer_head.h>
13 12
13#include "ufs_fs.h"
14#include "ufs.h" 14#include "ufs.h"
15#include "swab.h" 15#include "swab.h"
16#include "util.h" 16#include "util.h"
diff --git a/fs/utimes.c b/fs/utimes.c
index b9912ecbee24..b18da9c0b97f 100644
--- a/fs/utimes.c
+++ b/fs/utimes.c
@@ -6,6 +6,7 @@
6#include <linux/sched.h> 6#include <linux/sched.h>
7#include <linux/stat.h> 7#include <linux/stat.h>
8#include <linux/utime.h> 8#include <linux/utime.h>
9#include <linux/syscalls.h>
9#include <asm/uaccess.h> 10#include <asm/uaccess.h>
10#include <asm/unistd.h> 11#include <asm/unistd.h>
11 12
@@ -83,7 +84,7 @@ long do_utimes(int dfd, char __user *filename, struct timespec *times, int flags
83 if (error) 84 if (error)
84 goto out; 85 goto out;
85 86
86 dentry = nd.dentry; 87 dentry = nd.path.dentry;
87 } 88 }
88 89
89 inode = dentry->d_inode; 90 inode = dentry->d_inode;
@@ -137,7 +138,7 @@ dput_and_out:
137 if (f) 138 if (f)
138 fput(f); 139 fput(f);
139 else 140 else
140 path_release(&nd); 141 path_put(&nd.path);
141out: 142out:
142 return error; 143 return error;
143} 144}
diff --git a/fs/vfat/namei.c b/fs/vfat/namei.c
index c28add2fbe95..cd450bea9f1a 100644
--- a/fs/vfat/namei.c
+++ b/fs/vfat/namei.c
@@ -705,7 +705,7 @@ static struct dentry *vfat_lookup(struct inode *dir, struct dentry *dentry,
705 brelse(sinfo.bh); 705 brelse(sinfo.bh);
706 if (IS_ERR(inode)) { 706 if (IS_ERR(inode)) {
707 unlock_kernel(); 707 unlock_kernel();
708 return ERR_PTR(PTR_ERR(inode)); 708 return ERR_CAST(inode);
709 } 709 }
710 alias = d_find_alias(inode); 710 alias = d_find_alias(inode);
711 if (alias) { 711 if (alias) {
diff --git a/fs/xattr.c b/fs/xattr.c
index 6645b7313b33..3acab1615460 100644
--- a/fs/xattr.c
+++ b/fs/xattr.c
@@ -105,6 +105,33 @@ out:
105EXPORT_SYMBOL_GPL(vfs_setxattr); 105EXPORT_SYMBOL_GPL(vfs_setxattr);
106 106
107ssize_t 107ssize_t
108xattr_getsecurity(struct inode *inode, const char *name, void *value,
109 size_t size)
110{
111 void *buffer = NULL;
112 ssize_t len;
113
114 if (!value || !size) {
115 len = security_inode_getsecurity(inode, name, &buffer, false);
116 goto out_noalloc;
117 }
118
119 len = security_inode_getsecurity(inode, name, &buffer, true);
120 if (len < 0)
121 return len;
122 if (size < len) {
123 len = -ERANGE;
124 goto out;
125 }
126 memcpy(value, buffer, len);
127out:
128 security_release_secctx(buffer, len);
129out_noalloc:
130 return len;
131}
132EXPORT_SYMBOL_GPL(xattr_getsecurity);
133
134ssize_t
108vfs_getxattr(struct dentry *dentry, char *name, void *value, size_t size) 135vfs_getxattr(struct dentry *dentry, char *name, void *value, size_t size)
109{ 136{
110 struct inode *inode = dentry->d_inode; 137 struct inode *inode = dentry->d_inode;
@@ -118,23 +145,23 @@ vfs_getxattr(struct dentry *dentry, char *name, void *value, size_t size)
118 if (error) 145 if (error)
119 return error; 146 return error;
120 147
121 if (inode->i_op->getxattr)
122 error = inode->i_op->getxattr(dentry, name, value, size);
123 else
124 error = -EOPNOTSUPP;
125
126 if (!strncmp(name, XATTR_SECURITY_PREFIX, 148 if (!strncmp(name, XATTR_SECURITY_PREFIX,
127 XATTR_SECURITY_PREFIX_LEN)) { 149 XATTR_SECURITY_PREFIX_LEN)) {
128 const char *suffix = name + XATTR_SECURITY_PREFIX_LEN; 150 const char *suffix = name + XATTR_SECURITY_PREFIX_LEN;
129 int ret = security_inode_getsecurity(inode, suffix, value, 151 int ret = xattr_getsecurity(inode, suffix, value, size);
130 size, error);
131 /* 152 /*
132 * Only overwrite the return value if a security module 153 * Only overwrite the return value if a security module
133 * is actually active. 154 * is actually active.
134 */ 155 */
135 if (ret != -EOPNOTSUPP) 156 if (ret == -EOPNOTSUPP)
136 error = ret; 157 goto nolsm;
158 return ret;
137 } 159 }
160nolsm:
161 if (inode->i_op->getxattr)
162 error = inode->i_op->getxattr(dentry, name, value, size);
163 else
164 error = -EOPNOTSUPP;
138 165
139 return error; 166 return error;
140} 167}
@@ -235,8 +262,8 @@ sys_setxattr(char __user *path, char __user *name, void __user *value,
235 error = user_path_walk(path, &nd); 262 error = user_path_walk(path, &nd);
236 if (error) 263 if (error)
237 return error; 264 return error;
238 error = setxattr(nd.dentry, name, value, size, flags); 265 error = setxattr(nd.path.dentry, name, value, size, flags);
239 path_release(&nd); 266 path_put(&nd.path);
240 return error; 267 return error;
241} 268}
242 269
@@ -250,8 +277,8 @@ sys_lsetxattr(char __user *path, char __user *name, void __user *value,
250 error = user_path_walk_link(path, &nd); 277 error = user_path_walk_link(path, &nd);
251 if (error) 278 if (error)
252 return error; 279 return error;
253 error = setxattr(nd.dentry, name, value, size, flags); 280 error = setxattr(nd.path.dentry, name, value, size, flags);
254 path_release(&nd); 281 path_put(&nd.path);
255 return error; 282 return error;
256} 283}
257 284
@@ -320,8 +347,8 @@ sys_getxattr(char __user *path, char __user *name, void __user *value,
320 error = user_path_walk(path, &nd); 347 error = user_path_walk(path, &nd);
321 if (error) 348 if (error)
322 return error; 349 return error;
323 error = getxattr(nd.dentry, name, value, size); 350 error = getxattr(nd.path.dentry, name, value, size);
324 path_release(&nd); 351 path_put(&nd.path);
325 return error; 352 return error;
326} 353}
327 354
@@ -335,8 +362,8 @@ sys_lgetxattr(char __user *path, char __user *name, void __user *value,
335 error = user_path_walk_link(path, &nd); 362 error = user_path_walk_link(path, &nd);
336 if (error) 363 if (error)
337 return error; 364 return error;
338 error = getxattr(nd.dentry, name, value, size); 365 error = getxattr(nd.path.dentry, name, value, size);
339 path_release(&nd); 366 path_put(&nd.path);
340 return error; 367 return error;
341} 368}
342 369
@@ -394,8 +421,8 @@ sys_listxattr(char __user *path, char __user *list, size_t size)
394 error = user_path_walk(path, &nd); 421 error = user_path_walk(path, &nd);
395 if (error) 422 if (error)
396 return error; 423 return error;
397 error = listxattr(nd.dentry, list, size); 424 error = listxattr(nd.path.dentry, list, size);
398 path_release(&nd); 425 path_put(&nd.path);
399 return error; 426 return error;
400} 427}
401 428
@@ -408,8 +435,8 @@ sys_llistxattr(char __user *path, char __user *list, size_t size)
408 error = user_path_walk_link(path, &nd); 435 error = user_path_walk_link(path, &nd);
409 if (error) 436 if (error)
410 return error; 437 return error;
411 error = listxattr(nd.dentry, list, size); 438 error = listxattr(nd.path.dentry, list, size);
412 path_release(&nd); 439 path_put(&nd.path);
413 return error; 440 return error;
414} 441}
415 442
@@ -455,8 +482,8 @@ sys_removexattr(char __user *path, char __user *name)
455 error = user_path_walk(path, &nd); 482 error = user_path_walk(path, &nd);
456 if (error) 483 if (error)
457 return error; 484 return error;
458 error = removexattr(nd.dentry, name); 485 error = removexattr(nd.path.dentry, name);
459 path_release(&nd); 486 path_put(&nd.path);
460 return error; 487 return error;
461} 488}
462 489
@@ -469,8 +496,8 @@ sys_lremovexattr(char __user *path, char __user *name)
469 error = user_path_walk_link(path, &nd); 496 error = user_path_walk_link(path, &nd);
470 if (error) 497 if (error)
471 return error; 498 return error;
472 error = removexattr(nd.dentry, name); 499 error = removexattr(nd.path.dentry, name);
473 path_release(&nd); 500 path_put(&nd.path);
474 return error; 501 return error;
475} 502}
476 503
diff --git a/fs/xfs/linux-2.6/kmem.c b/fs/xfs/linux-2.6/kmem.c
index ed2b16dff914..e040f1ce1b6a 100644
--- a/fs/xfs/linux-2.6/kmem.c
+++ b/fs/xfs/linux-2.6/kmem.c
@@ -92,8 +92,7 @@ kmem_zalloc_greedy(size_t *size, size_t minsize, size_t maxsize,
92void 92void
93kmem_free(void *ptr, size_t size) 93kmem_free(void *ptr, size_t size)
94{ 94{
95 if (((unsigned long)ptr < VMALLOC_START) || 95 if (!is_vmalloc_addr(ptr)) {
96 ((unsigned long)ptr >= VMALLOC_END)) {
97 kfree(ptr); 96 kfree(ptr);
98 } else { 97 } else {
99 vfree(ptr); 98 vfree(ptr);
diff --git a/fs/xfs/linux-2.6/xfs_buf.c b/fs/xfs/linux-2.6/xfs_buf.c
index 302273f8e2a9..e347bfd47c91 100644
--- a/fs/xfs/linux-2.6/xfs_buf.c
+++ b/fs/xfs/linux-2.6/xfs_buf.c
@@ -700,8 +700,7 @@ static inline struct page *
700mem_to_page( 700mem_to_page(
701 void *addr) 701 void *addr)
702{ 702{
703 if (((unsigned long)addr < VMALLOC_START) || 703 if ((!is_vmalloc_addr(addr))) {
704 ((unsigned long)addr >= VMALLOC_END)) {
705 return virt_to_page(addr); 704 return virt_to_page(addr);
706 } else { 705 } else {
707 return vmalloc_to_page(addr); 706 return vmalloc_to_page(addr);
diff --git a/fs/xfs/linux-2.6/xfs_ioctl.c b/fs/xfs/linux-2.6/xfs_ioctl.c
index 4c82a050a3a8..a9952e490ac9 100644
--- a/fs/xfs/linux-2.6/xfs_ioctl.c
+++ b/fs/xfs/linux-2.6/xfs_ioctl.c
@@ -91,10 +91,10 @@ xfs_find_handle(
91 if (error) 91 if (error)
92 return error; 92 return error;
93 93
94 ASSERT(nd.dentry); 94 ASSERT(nd.path.dentry);
95 ASSERT(nd.dentry->d_inode); 95 ASSERT(nd.path.dentry->d_inode);
96 inode = igrab(nd.dentry->d_inode); 96 inode = igrab(nd.path.dentry->d_inode);
97 path_release(&nd); 97 path_put(&nd.path);
98 break; 98 break;
99 } 99 }
100 100
diff --git a/fs/xfs/linux-2.6/xfs_lrw.c b/fs/xfs/linux-2.6/xfs_lrw.c
index 6c3a846a5267..166353388490 100644
--- a/fs/xfs/linux-2.6/xfs_lrw.c
+++ b/fs/xfs/linux-2.6/xfs_lrw.c
@@ -152,7 +152,7 @@ xfs_iozero(
152 if (status) 152 if (status)
153 break; 153 break;
154 154
155 zero_user_page(page, offset, bytes, KM_USER0); 155 zero_user(page, offset, bytes);
156 156
157 status = pagecache_write_end(NULL, mapping, pos, bytes, bytes, 157 status = pagecache_write_end(NULL, mapping, pos, bytes, bytes,
158 page, fsdata); 158 page, fsdata);
diff --git a/fs/xfs/quota/xfs_qm.c b/fs/xfs/quota/xfs_qm.c
index 35582fe9d648..1f3da5b8657b 100644
--- a/fs/xfs/quota/xfs_qm.c
+++ b/fs/xfs/quota/xfs_qm.c
@@ -1648,14 +1648,14 @@ xfs_qm_quotacheck_dqadjust(
1648 * Adjust the inode count and the block count to reflect this inode's 1648 * Adjust the inode count and the block count to reflect this inode's
1649 * resource usage. 1649 * resource usage.
1650 */ 1650 */
1651 be64_add(&dqp->q_core.d_icount, 1); 1651 be64_add_cpu(&dqp->q_core.d_icount, 1);
1652 dqp->q_res_icount++; 1652 dqp->q_res_icount++;
1653 if (nblks) { 1653 if (nblks) {
1654 be64_add(&dqp->q_core.d_bcount, nblks); 1654 be64_add_cpu(&dqp->q_core.d_bcount, nblks);
1655 dqp->q_res_bcount += nblks; 1655 dqp->q_res_bcount += nblks;
1656 } 1656 }
1657 if (rtblks) { 1657 if (rtblks) {
1658 be64_add(&dqp->q_core.d_rtbcount, rtblks); 1658 be64_add_cpu(&dqp->q_core.d_rtbcount, rtblks);
1659 dqp->q_res_rtbcount += rtblks; 1659 dqp->q_res_rtbcount += rtblks;
1660 } 1660 }
1661 1661
diff --git a/fs/xfs/quota/xfs_trans_dquot.c b/fs/xfs/quota/xfs_trans_dquot.c
index 7de6874bf1b8..f441f836ca8b 100644
--- a/fs/xfs/quota/xfs_trans_dquot.c
+++ b/fs/xfs/quota/xfs_trans_dquot.c
@@ -421,13 +421,13 @@ xfs_trans_apply_dquot_deltas(
421 (xfs_qcnt_t) -qtrx->qt_icount_delta); 421 (xfs_qcnt_t) -qtrx->qt_icount_delta);
422#endif 422#endif
423 if (totalbdelta) 423 if (totalbdelta)
424 be64_add(&d->d_bcount, (xfs_qcnt_t)totalbdelta); 424 be64_add_cpu(&d->d_bcount, (xfs_qcnt_t)totalbdelta);
425 425
426 if (qtrx->qt_icount_delta) 426 if (qtrx->qt_icount_delta)
427 be64_add(&d->d_icount, (xfs_qcnt_t)qtrx->qt_icount_delta); 427 be64_add_cpu(&d->d_icount, (xfs_qcnt_t)qtrx->qt_icount_delta);
428 428
429 if (totalrtbdelta) 429 if (totalrtbdelta)
430 be64_add(&d->d_rtbcount, (xfs_qcnt_t)totalrtbdelta); 430 be64_add_cpu(&d->d_rtbcount, (xfs_qcnt_t)totalrtbdelta);
431 431
432 /* 432 /*
433 * Get any default limits in use. 433 * Get any default limits in use.
diff --git a/fs/xfs/xfs_alloc.c b/fs/xfs/xfs_alloc.c
index ea6aa60ace06..bdbfbbee4959 100644
--- a/fs/xfs/xfs_alloc.c
+++ b/fs/xfs/xfs_alloc.c
@@ -592,7 +592,7 @@ xfs_alloc_ag_vextent(
592 if (!(args->wasfromfl)) { 592 if (!(args->wasfromfl)) {
593 593
594 agf = XFS_BUF_TO_AGF(args->agbp); 594 agf = XFS_BUF_TO_AGF(args->agbp);
595 be32_add(&agf->agf_freeblks, -(args->len)); 595 be32_add_cpu(&agf->agf_freeblks, -(args->len));
596 xfs_trans_agblocks_delta(args->tp, 596 xfs_trans_agblocks_delta(args->tp,
597 -((long)(args->len))); 597 -((long)(args->len)));
598 args->pag->pagf_freeblks -= args->len; 598 args->pag->pagf_freeblks -= args->len;
@@ -1720,7 +1720,7 @@ xfs_free_ag_extent(
1720 1720
1721 agf = XFS_BUF_TO_AGF(agbp); 1721 agf = XFS_BUF_TO_AGF(agbp);
1722 pag = &mp->m_perag[agno]; 1722 pag = &mp->m_perag[agno];
1723 be32_add(&agf->agf_freeblks, len); 1723 be32_add_cpu(&agf->agf_freeblks, len);
1724 xfs_trans_agblocks_delta(tp, len); 1724 xfs_trans_agblocks_delta(tp, len);
1725 pag->pagf_freeblks += len; 1725 pag->pagf_freeblks += len;
1726 XFS_WANT_CORRUPTED_GOTO( 1726 XFS_WANT_CORRUPTED_GOTO(
@@ -2008,18 +2008,18 @@ xfs_alloc_get_freelist(
2008 * Get the block number and update the data structures. 2008 * Get the block number and update the data structures.
2009 */ 2009 */
2010 bno = be32_to_cpu(agfl->agfl_bno[be32_to_cpu(agf->agf_flfirst)]); 2010 bno = be32_to_cpu(agfl->agfl_bno[be32_to_cpu(agf->agf_flfirst)]);
2011 be32_add(&agf->agf_flfirst, 1); 2011 be32_add_cpu(&agf->agf_flfirst, 1);
2012 xfs_trans_brelse(tp, agflbp); 2012 xfs_trans_brelse(tp, agflbp);
2013 if (be32_to_cpu(agf->agf_flfirst) == XFS_AGFL_SIZE(mp)) 2013 if (be32_to_cpu(agf->agf_flfirst) == XFS_AGFL_SIZE(mp))
2014 agf->agf_flfirst = 0; 2014 agf->agf_flfirst = 0;
2015 pag = &mp->m_perag[be32_to_cpu(agf->agf_seqno)]; 2015 pag = &mp->m_perag[be32_to_cpu(agf->agf_seqno)];
2016 be32_add(&agf->agf_flcount, -1); 2016 be32_add_cpu(&agf->agf_flcount, -1);
2017 xfs_trans_agflist_delta(tp, -1); 2017 xfs_trans_agflist_delta(tp, -1);
2018 pag->pagf_flcount--; 2018 pag->pagf_flcount--;
2019 2019
2020 logflags = XFS_AGF_FLFIRST | XFS_AGF_FLCOUNT; 2020 logflags = XFS_AGF_FLFIRST | XFS_AGF_FLCOUNT;
2021 if (btreeblk) { 2021 if (btreeblk) {
2022 be32_add(&agf->agf_btreeblks, 1); 2022 be32_add_cpu(&agf->agf_btreeblks, 1);
2023 pag->pagf_btreeblks++; 2023 pag->pagf_btreeblks++;
2024 logflags |= XFS_AGF_BTREEBLKS; 2024 logflags |= XFS_AGF_BTREEBLKS;
2025 } 2025 }
@@ -2117,17 +2117,17 @@ xfs_alloc_put_freelist(
2117 be32_to_cpu(agf->agf_seqno), &agflbp))) 2117 be32_to_cpu(agf->agf_seqno), &agflbp)))
2118 return error; 2118 return error;
2119 agfl = XFS_BUF_TO_AGFL(agflbp); 2119 agfl = XFS_BUF_TO_AGFL(agflbp);
2120 be32_add(&agf->agf_fllast, 1); 2120 be32_add_cpu(&agf->agf_fllast, 1);
2121 if (be32_to_cpu(agf->agf_fllast) == XFS_AGFL_SIZE(mp)) 2121 if (be32_to_cpu(agf->agf_fllast) == XFS_AGFL_SIZE(mp))
2122 agf->agf_fllast = 0; 2122 agf->agf_fllast = 0;
2123 pag = &mp->m_perag[be32_to_cpu(agf->agf_seqno)]; 2123 pag = &mp->m_perag[be32_to_cpu(agf->agf_seqno)];
2124 be32_add(&agf->agf_flcount, 1); 2124 be32_add_cpu(&agf->agf_flcount, 1);
2125 xfs_trans_agflist_delta(tp, 1); 2125 xfs_trans_agflist_delta(tp, 1);
2126 pag->pagf_flcount++; 2126 pag->pagf_flcount++;
2127 2127
2128 logflags = XFS_AGF_FLLAST | XFS_AGF_FLCOUNT; 2128 logflags = XFS_AGF_FLLAST | XFS_AGF_FLCOUNT;
2129 if (btreeblk) { 2129 if (btreeblk) {
2130 be32_add(&agf->agf_btreeblks, -1); 2130 be32_add_cpu(&agf->agf_btreeblks, -1);
2131 pag->pagf_btreeblks--; 2131 pag->pagf_btreeblks--;
2132 logflags |= XFS_AGF_BTREEBLKS; 2132 logflags |= XFS_AGF_BTREEBLKS;
2133 } 2133 }
diff --git a/fs/xfs/xfs_alloc_btree.c b/fs/xfs/xfs_alloc_btree.c
index 1603ce595853..3ce2645508ae 100644
--- a/fs/xfs/xfs_alloc_btree.c
+++ b/fs/xfs/xfs_alloc_btree.c
@@ -221,7 +221,7 @@ xfs_alloc_delrec(
221 */ 221 */
222 bno = be32_to_cpu(agf->agf_roots[cur->bc_btnum]); 222 bno = be32_to_cpu(agf->agf_roots[cur->bc_btnum]);
223 agf->agf_roots[cur->bc_btnum] = *lpp; 223 agf->agf_roots[cur->bc_btnum] = *lpp;
224 be32_add(&agf->agf_levels[cur->bc_btnum], -1); 224 be32_add_cpu(&agf->agf_levels[cur->bc_btnum], -1);
225 mp->m_perag[be32_to_cpu(agf->agf_seqno)].pagf_levels[cur->bc_btnum]--; 225 mp->m_perag[be32_to_cpu(agf->agf_seqno)].pagf_levels[cur->bc_btnum]--;
226 /* 226 /*
227 * Put this buffer/block on the ag's freelist. 227 * Put this buffer/block on the ag's freelist.
@@ -1256,9 +1256,9 @@ xfs_alloc_lshift(
1256 /* 1256 /*
1257 * Bump and log left's numrecs, decrement and log right's numrecs. 1257 * Bump and log left's numrecs, decrement and log right's numrecs.
1258 */ 1258 */
1259 be16_add(&left->bb_numrecs, 1); 1259 be16_add_cpu(&left->bb_numrecs, 1);
1260 xfs_alloc_log_block(cur->bc_tp, lbp, XFS_BB_NUMRECS); 1260 xfs_alloc_log_block(cur->bc_tp, lbp, XFS_BB_NUMRECS);
1261 be16_add(&right->bb_numrecs, -1); 1261 be16_add_cpu(&right->bb_numrecs, -1);
1262 xfs_alloc_log_block(cur->bc_tp, rbp, XFS_BB_NUMRECS); 1262 xfs_alloc_log_block(cur->bc_tp, rbp, XFS_BB_NUMRECS);
1263 /* 1263 /*
1264 * Slide the contents of right down one entry. 1264 * Slide the contents of right down one entry.
@@ -1346,7 +1346,7 @@ xfs_alloc_newroot(
1346 1346
1347 agf = XFS_BUF_TO_AGF(cur->bc_private.a.agbp); 1347 agf = XFS_BUF_TO_AGF(cur->bc_private.a.agbp);
1348 agf->agf_roots[cur->bc_btnum] = cpu_to_be32(nbno); 1348 agf->agf_roots[cur->bc_btnum] = cpu_to_be32(nbno);
1349 be32_add(&agf->agf_levels[cur->bc_btnum], 1); 1349 be32_add_cpu(&agf->agf_levels[cur->bc_btnum], 1);
1350 seqno = be32_to_cpu(agf->agf_seqno); 1350 seqno = be32_to_cpu(agf->agf_seqno);
1351 mp->m_perag[seqno].pagf_levels[cur->bc_btnum]++; 1351 mp->m_perag[seqno].pagf_levels[cur->bc_btnum]++;
1352 xfs_alloc_log_agf(cur->bc_tp, cur->bc_private.a.agbp, 1352 xfs_alloc_log_agf(cur->bc_tp, cur->bc_private.a.agbp,
@@ -1558,9 +1558,9 @@ xfs_alloc_rshift(
1558 /* 1558 /*
1559 * Decrement and log left's numrecs, bump and log right's numrecs. 1559 * Decrement and log left's numrecs, bump and log right's numrecs.
1560 */ 1560 */
1561 be16_add(&left->bb_numrecs, -1); 1561 be16_add_cpu(&left->bb_numrecs, -1);
1562 xfs_alloc_log_block(cur->bc_tp, lbp, XFS_BB_NUMRECS); 1562 xfs_alloc_log_block(cur->bc_tp, lbp, XFS_BB_NUMRECS);
1563 be16_add(&right->bb_numrecs, 1); 1563 be16_add_cpu(&right->bb_numrecs, 1);
1564 xfs_alloc_log_block(cur->bc_tp, rbp, XFS_BB_NUMRECS); 1564 xfs_alloc_log_block(cur->bc_tp, rbp, XFS_BB_NUMRECS);
1565 /* 1565 /*
1566 * Using a temporary cursor, update the parent key values of the 1566 * Using a temporary cursor, update the parent key values of the
@@ -1643,7 +1643,7 @@ xfs_alloc_split(
1643 */ 1643 */
1644 if ((be16_to_cpu(left->bb_numrecs) & 1) && 1644 if ((be16_to_cpu(left->bb_numrecs) & 1) &&
1645 cur->bc_ptrs[level] <= be16_to_cpu(right->bb_numrecs) + 1) 1645 cur->bc_ptrs[level] <= be16_to_cpu(right->bb_numrecs) + 1)
1646 be16_add(&right->bb_numrecs, 1); 1646 be16_add_cpu(&right->bb_numrecs, 1);
1647 i = be16_to_cpu(left->bb_numrecs) - be16_to_cpu(right->bb_numrecs) + 1; 1647 i = be16_to_cpu(left->bb_numrecs) - be16_to_cpu(right->bb_numrecs) + 1;
1648 /* 1648 /*
1649 * For non-leaf blocks, copy keys and addresses over to the new block. 1649 * For non-leaf blocks, copy keys and addresses over to the new block.
@@ -1689,7 +1689,7 @@ xfs_alloc_split(
1689 * Adjust numrecs, sibling pointers. 1689 * Adjust numrecs, sibling pointers.
1690 */ 1690 */
1691 lbno = XFS_DADDR_TO_AGBNO(cur->bc_mp, XFS_BUF_ADDR(lbp)); 1691 lbno = XFS_DADDR_TO_AGBNO(cur->bc_mp, XFS_BUF_ADDR(lbp));
1692 be16_add(&left->bb_numrecs, -(be16_to_cpu(right->bb_numrecs))); 1692 be16_add_cpu(&left->bb_numrecs, -(be16_to_cpu(right->bb_numrecs)));
1693 right->bb_rightsib = left->bb_rightsib; 1693 right->bb_rightsib = left->bb_rightsib;
1694 left->bb_rightsib = cpu_to_be32(rbno); 1694 left->bb_rightsib = cpu_to_be32(rbno);
1695 right->bb_leftsib = cpu_to_be32(lbno); 1695 right->bb_leftsib = cpu_to_be32(lbno);
diff --git a/fs/xfs/xfs_arch.h b/fs/xfs/xfs_arch.h
index c4836890b726..f9472a2076d4 100644
--- a/fs/xfs/xfs_arch.h
+++ b/fs/xfs/xfs_arch.h
@@ -170,21 +170,6 @@
170 } \ 170 } \
171} 171}
172 172
173static inline void be16_add(__be16 *a, __s16 b)
174{
175 *a = cpu_to_be16(be16_to_cpu(*a) + b);
176}
177
178static inline void be32_add(__be32 *a, __s32 b)
179{
180 *a = cpu_to_be32(be32_to_cpu(*a) + b);
181}
182
183static inline void be64_add(__be64 *a, __s64 b)
184{
185 *a = cpu_to_be64(be64_to_cpu(*a) + b);
186}
187
188/* 173/*
189 * In directories inode numbers are stored as unaligned arrays of unsigned 174 * In directories inode numbers are stored as unaligned arrays of unsigned
190 * 8bit integers on disk. 175 * 8bit integers on disk.
diff --git a/fs/xfs/xfs_attr_leaf.c b/fs/xfs/xfs_attr_leaf.c
index eb3815ebb7aa..b08e2a2a8add 100644
--- a/fs/xfs/xfs_attr_leaf.c
+++ b/fs/xfs/xfs_attr_leaf.c
@@ -317,7 +317,7 @@ xfs_attr_shortform_add(xfs_da_args_t *args, int forkoff)
317 memcpy(sfe->nameval, args->name, args->namelen); 317 memcpy(sfe->nameval, args->name, args->namelen);
318 memcpy(&sfe->nameval[args->namelen], args->value, args->valuelen); 318 memcpy(&sfe->nameval[args->namelen], args->value, args->valuelen);
319 sf->hdr.count++; 319 sf->hdr.count++;
320 be16_add(&sf->hdr.totsize, size); 320 be16_add_cpu(&sf->hdr.totsize, size);
321 xfs_trans_log_inode(args->trans, dp, XFS_ILOG_CORE | XFS_ILOG_ADATA); 321 xfs_trans_log_inode(args->trans, dp, XFS_ILOG_CORE | XFS_ILOG_ADATA);
322 322
323 xfs_sbversion_add_attr2(mp, args->trans); 323 xfs_sbversion_add_attr2(mp, args->trans);
@@ -363,7 +363,7 @@ xfs_attr_shortform_remove(xfs_da_args_t *args)
363 if (end != totsize) 363 if (end != totsize)
364 memmove(&((char *)sf)[base], &((char *)sf)[end], totsize - end); 364 memmove(&((char *)sf)[base], &((char *)sf)[end], totsize - end);
365 sf->hdr.count--; 365 sf->hdr.count--;
366 be16_add(&sf->hdr.totsize, -size); 366 be16_add_cpu(&sf->hdr.totsize, -size);
367 367
368 /* 368 /*
369 * Fix up the start offset of the attribute fork 369 * Fix up the start offset of the attribute fork
@@ -1133,7 +1133,7 @@ xfs_attr_leaf_add_work(xfs_dabuf_t *bp, xfs_da_args_t *args, int mapindex)
1133 xfs_da_log_buf(args->trans, bp, 1133 xfs_da_log_buf(args->trans, bp,
1134 XFS_DA_LOGRANGE(leaf, entry, tmp + sizeof(*entry))); 1134 XFS_DA_LOGRANGE(leaf, entry, tmp + sizeof(*entry)));
1135 } 1135 }
1136 be16_add(&hdr->count, 1); 1136 be16_add_cpu(&hdr->count, 1);
1137 1137
1138 /* 1138 /*
1139 * Allocate space for the new string (at the end of the run). 1139 * Allocate space for the new string (at the end of the run).
@@ -1147,7 +1147,7 @@ xfs_attr_leaf_add_work(xfs_dabuf_t *bp, xfs_da_args_t *args, int mapindex)
1147 mp->m_sb.sb_blocksize, NULL)); 1147 mp->m_sb.sb_blocksize, NULL));
1148 ASSERT(be16_to_cpu(map->size) < XFS_LBSIZE(mp)); 1148 ASSERT(be16_to_cpu(map->size) < XFS_LBSIZE(mp));
1149 ASSERT((be16_to_cpu(map->size) & 0x3) == 0); 1149 ASSERT((be16_to_cpu(map->size) & 0x3) == 0);
1150 be16_add(&map->size, 1150 be16_add_cpu(&map->size,
1151 -xfs_attr_leaf_newentsize(args->namelen, args->valuelen, 1151 -xfs_attr_leaf_newentsize(args->namelen, args->valuelen,
1152 mp->m_sb.sb_blocksize, &tmp)); 1152 mp->m_sb.sb_blocksize, &tmp));
1153 entry->nameidx = cpu_to_be16(be16_to_cpu(map->base) + 1153 entry->nameidx = cpu_to_be16(be16_to_cpu(map->base) +
@@ -1214,12 +1214,12 @@ xfs_attr_leaf_add_work(xfs_dabuf_t *bp, xfs_da_args_t *args, int mapindex)
1214 map = &hdr->freemap[0]; 1214 map = &hdr->freemap[0];
1215 for (i = 0; i < XFS_ATTR_LEAF_MAPSIZE; map++, i++) { 1215 for (i = 0; i < XFS_ATTR_LEAF_MAPSIZE; map++, i++) {
1216 if (be16_to_cpu(map->base) == tmp) { 1216 if (be16_to_cpu(map->base) == tmp) {
1217 be16_add(&map->base, sizeof(xfs_attr_leaf_entry_t)); 1217 be16_add_cpu(&map->base, sizeof(xfs_attr_leaf_entry_t));
1218 be16_add(&map->size, 1218 be16_add_cpu(&map->size,
1219 -((int)sizeof(xfs_attr_leaf_entry_t))); 1219 -((int)sizeof(xfs_attr_leaf_entry_t)));
1220 } 1220 }
1221 } 1221 }
1222 be16_add(&hdr->usedbytes, xfs_attr_leaf_entsize(leaf, args->index)); 1222 be16_add_cpu(&hdr->usedbytes, xfs_attr_leaf_entsize(leaf, args->index));
1223 xfs_da_log_buf(args->trans, bp, 1223 xfs_da_log_buf(args->trans, bp,
1224 XFS_DA_LOGRANGE(leaf, hdr, sizeof(*hdr))); 1224 XFS_DA_LOGRANGE(leaf, hdr, sizeof(*hdr)));
1225 return(0); 1225 return(0);
@@ -1727,9 +1727,9 @@ xfs_attr_leaf_remove(xfs_dabuf_t *bp, xfs_da_args_t *args)
1727 ASSERT(be16_to_cpu(map->base) < XFS_LBSIZE(mp)); 1727 ASSERT(be16_to_cpu(map->base) < XFS_LBSIZE(mp));
1728 ASSERT(be16_to_cpu(map->size) < XFS_LBSIZE(mp)); 1728 ASSERT(be16_to_cpu(map->size) < XFS_LBSIZE(mp));
1729 if (be16_to_cpu(map->base) == tablesize) { 1729 if (be16_to_cpu(map->base) == tablesize) {
1730 be16_add(&map->base, 1730 be16_add_cpu(&map->base,
1731 -((int)sizeof(xfs_attr_leaf_entry_t))); 1731 -((int)sizeof(xfs_attr_leaf_entry_t)));
1732 be16_add(&map->size, sizeof(xfs_attr_leaf_entry_t)); 1732 be16_add_cpu(&map->size, sizeof(xfs_attr_leaf_entry_t));
1733 } 1733 }
1734 1734
1735 if ((be16_to_cpu(map->base) + be16_to_cpu(map->size)) 1735 if ((be16_to_cpu(map->base) + be16_to_cpu(map->size))
@@ -1751,19 +1751,19 @@ xfs_attr_leaf_remove(xfs_dabuf_t *bp, xfs_da_args_t *args)
1751 if ((before >= 0) || (after >= 0)) { 1751 if ((before >= 0) || (after >= 0)) {
1752 if ((before >= 0) && (after >= 0)) { 1752 if ((before >= 0) && (after >= 0)) {
1753 map = &hdr->freemap[before]; 1753 map = &hdr->freemap[before];
1754 be16_add(&map->size, entsize); 1754 be16_add_cpu(&map->size, entsize);
1755 be16_add(&map->size, 1755 be16_add_cpu(&map->size,
1756 be16_to_cpu(hdr->freemap[after].size)); 1756 be16_to_cpu(hdr->freemap[after].size));
1757 hdr->freemap[after].base = 0; 1757 hdr->freemap[after].base = 0;
1758 hdr->freemap[after].size = 0; 1758 hdr->freemap[after].size = 0;
1759 } else if (before >= 0) { 1759 } else if (before >= 0) {
1760 map = &hdr->freemap[before]; 1760 map = &hdr->freemap[before];
1761 be16_add(&map->size, entsize); 1761 be16_add_cpu(&map->size, entsize);
1762 } else { 1762 } else {
1763 map = &hdr->freemap[after]; 1763 map = &hdr->freemap[after];
1764 /* both on-disk, don't endian flip twice */ 1764 /* both on-disk, don't endian flip twice */
1765 map->base = entry->nameidx; 1765 map->base = entry->nameidx;
1766 be16_add(&map->size, entsize); 1766 be16_add_cpu(&map->size, entsize);
1767 } 1767 }
1768 } else { 1768 } else {
1769 /* 1769 /*
@@ -1788,7 +1788,7 @@ xfs_attr_leaf_remove(xfs_dabuf_t *bp, xfs_da_args_t *args)
1788 * Compress the remaining entries and zero out the removed stuff. 1788 * Compress the remaining entries and zero out the removed stuff.
1789 */ 1789 */
1790 memset(XFS_ATTR_LEAF_NAME(leaf, args->index), 0, entsize); 1790 memset(XFS_ATTR_LEAF_NAME(leaf, args->index), 0, entsize);
1791 be16_add(&hdr->usedbytes, -entsize); 1791 be16_add_cpu(&hdr->usedbytes, -entsize);
1792 xfs_da_log_buf(args->trans, bp, 1792 xfs_da_log_buf(args->trans, bp,
1793 XFS_DA_LOGRANGE(leaf, XFS_ATTR_LEAF_NAME(leaf, args->index), 1793 XFS_DA_LOGRANGE(leaf, XFS_ATTR_LEAF_NAME(leaf, args->index),
1794 entsize)); 1794 entsize));
@@ -1796,7 +1796,7 @@ xfs_attr_leaf_remove(xfs_dabuf_t *bp, xfs_da_args_t *args)
1796 tmp = (be16_to_cpu(hdr->count) - args->index) 1796 tmp = (be16_to_cpu(hdr->count) - args->index)
1797 * sizeof(xfs_attr_leaf_entry_t); 1797 * sizeof(xfs_attr_leaf_entry_t);
1798 memmove((char *)entry, (char *)(entry+1), tmp); 1798 memmove((char *)entry, (char *)(entry+1), tmp);
1799 be16_add(&hdr->count, -1); 1799 be16_add_cpu(&hdr->count, -1);
1800 xfs_da_log_buf(args->trans, bp, 1800 xfs_da_log_buf(args->trans, bp,
1801 XFS_DA_LOGRANGE(leaf, entry, tmp + sizeof(*entry))); 1801 XFS_DA_LOGRANGE(leaf, entry, tmp + sizeof(*entry)));
1802 entry = &leaf->entries[be16_to_cpu(hdr->count)]; 1802 entry = &leaf->entries[be16_to_cpu(hdr->count)];
@@ -2182,15 +2182,15 @@ xfs_attr_leaf_moveents(xfs_attr_leafblock_t *leaf_s, int start_s,
2182 */ 2182 */
2183 if (entry_s->flags & XFS_ATTR_INCOMPLETE) { /* skip partials? */ 2183 if (entry_s->flags & XFS_ATTR_INCOMPLETE) { /* skip partials? */
2184 memset(XFS_ATTR_LEAF_NAME(leaf_s, start_s + i), 0, tmp); 2184 memset(XFS_ATTR_LEAF_NAME(leaf_s, start_s + i), 0, tmp);
2185 be16_add(&hdr_s->usedbytes, -tmp); 2185 be16_add_cpu(&hdr_s->usedbytes, -tmp);
2186 be16_add(&hdr_s->count, -1); 2186 be16_add_cpu(&hdr_s->count, -1);
2187 entry_d--; /* to compensate for ++ in loop hdr */ 2187 entry_d--; /* to compensate for ++ in loop hdr */
2188 desti--; 2188 desti--;
2189 if ((start_s + i) < offset) 2189 if ((start_s + i) < offset)
2190 result++; /* insertion index adjustment */ 2190 result++; /* insertion index adjustment */
2191 } else { 2191 } else {
2192#endif /* GROT */ 2192#endif /* GROT */
2193 be16_add(&hdr_d->firstused, -tmp); 2193 be16_add_cpu(&hdr_d->firstused, -tmp);
2194 /* both on-disk, don't endian flip twice */ 2194 /* both on-disk, don't endian flip twice */
2195 entry_d->hashval = entry_s->hashval; 2195 entry_d->hashval = entry_s->hashval;
2196 /* both on-disk, don't endian flip twice */ 2196 /* both on-disk, don't endian flip twice */
@@ -2203,10 +2203,10 @@ xfs_attr_leaf_moveents(xfs_attr_leafblock_t *leaf_s, int start_s,
2203 ASSERT(be16_to_cpu(entry_s->nameidx) + tmp 2203 ASSERT(be16_to_cpu(entry_s->nameidx) + tmp
2204 <= XFS_LBSIZE(mp)); 2204 <= XFS_LBSIZE(mp));
2205 memset(XFS_ATTR_LEAF_NAME(leaf_s, start_s + i), 0, tmp); 2205 memset(XFS_ATTR_LEAF_NAME(leaf_s, start_s + i), 0, tmp);
2206 be16_add(&hdr_s->usedbytes, -tmp); 2206 be16_add_cpu(&hdr_s->usedbytes, -tmp);
2207 be16_add(&hdr_d->usedbytes, tmp); 2207 be16_add_cpu(&hdr_d->usedbytes, tmp);
2208 be16_add(&hdr_s->count, -1); 2208 be16_add_cpu(&hdr_s->count, -1);
2209 be16_add(&hdr_d->count, 1); 2209 be16_add_cpu(&hdr_d->count, 1);
2210 tmp = be16_to_cpu(hdr_d->count) 2210 tmp = be16_to_cpu(hdr_d->count)
2211 * sizeof(xfs_attr_leaf_entry_t) 2211 * sizeof(xfs_attr_leaf_entry_t)
2212 + sizeof(xfs_attr_leaf_hdr_t); 2212 + sizeof(xfs_attr_leaf_hdr_t);
@@ -2247,7 +2247,7 @@ xfs_attr_leaf_moveents(xfs_attr_leafblock_t *leaf_s, int start_s,
2247 * Fill in the freemap information 2247 * Fill in the freemap information
2248 */ 2248 */
2249 hdr_d->freemap[0].base = cpu_to_be16(sizeof(xfs_attr_leaf_hdr_t)); 2249 hdr_d->freemap[0].base = cpu_to_be16(sizeof(xfs_attr_leaf_hdr_t));
2250 be16_add(&hdr_d->freemap[0].base, be16_to_cpu(hdr_d->count) * 2250 be16_add_cpu(&hdr_d->freemap[0].base, be16_to_cpu(hdr_d->count) *
2251 sizeof(xfs_attr_leaf_entry_t)); 2251 sizeof(xfs_attr_leaf_entry_t));
2252 hdr_d->freemap[0].size = cpu_to_be16(be16_to_cpu(hdr_d->firstused) 2252 hdr_d->freemap[0].size = cpu_to_be16(be16_to_cpu(hdr_d->firstused)
2253 - be16_to_cpu(hdr_d->freemap[0].base)); 2253 - be16_to_cpu(hdr_d->freemap[0].base));
diff --git a/fs/xfs/xfs_bmap_btree.c b/fs/xfs/xfs_bmap_btree.c
index c4181d85605c..bd18987326a3 100644
--- a/fs/xfs/xfs_bmap_btree.c
+++ b/fs/xfs/xfs_bmap_btree.c
@@ -631,7 +631,7 @@ xfs_bmbt_delrec(
631 memcpy(lrp, rrp, numrrecs * sizeof(*lrp)); 631 memcpy(lrp, rrp, numrrecs * sizeof(*lrp));
632 xfs_bmbt_log_recs(cur, lbp, numlrecs + 1, numlrecs + numrrecs); 632 xfs_bmbt_log_recs(cur, lbp, numlrecs + 1, numlrecs + numrrecs);
633 } 633 }
634 be16_add(&left->bb_numrecs, numrrecs); 634 be16_add_cpu(&left->bb_numrecs, numrrecs);
635 left->bb_rightsib = right->bb_rightsib; 635 left->bb_rightsib = right->bb_rightsib;
636 xfs_bmbt_log_block(cur, lbp, XFS_BB_RIGHTSIB | XFS_BB_NUMRECS); 636 xfs_bmbt_log_block(cur, lbp, XFS_BB_RIGHTSIB | XFS_BB_NUMRECS);
637 if (be64_to_cpu(left->bb_rightsib) != NULLDFSBNO) { 637 if (be64_to_cpu(left->bb_rightsib) != NULLDFSBNO) {
@@ -924,7 +924,7 @@ xfs_bmbt_killroot(
924 xfs_iroot_realloc(ip, i, cur->bc_private.b.whichfork); 924 xfs_iroot_realloc(ip, i, cur->bc_private.b.whichfork);
925 block = ifp->if_broot; 925 block = ifp->if_broot;
926 } 926 }
927 be16_add(&block->bb_numrecs, i); 927 be16_add_cpu(&block->bb_numrecs, i);
928 ASSERT(block->bb_numrecs == cblock->bb_numrecs); 928 ASSERT(block->bb_numrecs == cblock->bb_numrecs);
929 kp = XFS_BMAP_KEY_IADDR(block, 1, cur); 929 kp = XFS_BMAP_KEY_IADDR(block, 1, cur);
930 ckp = XFS_BMAP_KEY_IADDR(cblock, 1, cur); 930 ckp = XFS_BMAP_KEY_IADDR(cblock, 1, cur);
@@ -947,7 +947,7 @@ xfs_bmbt_killroot(
947 XFS_TRANS_DQ_BCOUNT, -1L); 947 XFS_TRANS_DQ_BCOUNT, -1L);
948 xfs_trans_binval(cur->bc_tp, cbp); 948 xfs_trans_binval(cur->bc_tp, cbp);
949 cur->bc_bufs[level - 1] = NULL; 949 cur->bc_bufs[level - 1] = NULL;
950 be16_add(&block->bb_level, -1); 950 be16_add_cpu(&block->bb_level, -1);
951 xfs_trans_log_inode(cur->bc_tp, ip, 951 xfs_trans_log_inode(cur->bc_tp, ip,
952 XFS_ILOG_CORE | XFS_ILOG_FBROOT(cur->bc_private.b.whichfork)); 952 XFS_ILOG_CORE | XFS_ILOG_FBROOT(cur->bc_private.b.whichfork));
953 cur->bc_nlevels--; 953 cur->bc_nlevels--;
@@ -1401,9 +1401,9 @@ xfs_bmbt_rshift(
1401 key.br_startoff = cpu_to_be64(xfs_bmbt_disk_get_startoff(rrp)); 1401 key.br_startoff = cpu_to_be64(xfs_bmbt_disk_get_startoff(rrp));
1402 rkp = &key; 1402 rkp = &key;
1403 } 1403 }
1404 be16_add(&left->bb_numrecs, -1); 1404 be16_add_cpu(&left->bb_numrecs, -1);
1405 xfs_bmbt_log_block(cur, lbp, XFS_BB_NUMRECS); 1405 xfs_bmbt_log_block(cur, lbp, XFS_BB_NUMRECS);
1406 be16_add(&right->bb_numrecs, 1); 1406 be16_add_cpu(&right->bb_numrecs, 1);
1407#ifdef DEBUG 1407#ifdef DEBUG
1408 if (level > 0) 1408 if (level > 0)
1409 xfs_btree_check_key(XFS_BTNUM_BMAP, rkp, rkp + 1); 1409 xfs_btree_check_key(XFS_BTNUM_BMAP, rkp, rkp + 1);
@@ -1535,7 +1535,7 @@ xfs_bmbt_split(
1535 right->bb_numrecs = cpu_to_be16(be16_to_cpu(left->bb_numrecs) / 2); 1535 right->bb_numrecs = cpu_to_be16(be16_to_cpu(left->bb_numrecs) / 2);
1536 if ((be16_to_cpu(left->bb_numrecs) & 1) && 1536 if ((be16_to_cpu(left->bb_numrecs) & 1) &&
1537 cur->bc_ptrs[level] <= be16_to_cpu(right->bb_numrecs) + 1) 1537 cur->bc_ptrs[level] <= be16_to_cpu(right->bb_numrecs) + 1)
1538 be16_add(&right->bb_numrecs, 1); 1538 be16_add_cpu(&right->bb_numrecs, 1);
1539 i = be16_to_cpu(left->bb_numrecs) - be16_to_cpu(right->bb_numrecs) + 1; 1539 i = be16_to_cpu(left->bb_numrecs) - be16_to_cpu(right->bb_numrecs) + 1;
1540 if (level > 0) { 1540 if (level > 0) {
1541 lkp = XFS_BMAP_KEY_IADDR(left, i, cur); 1541 lkp = XFS_BMAP_KEY_IADDR(left, i, cur);
@@ -1562,7 +1562,7 @@ xfs_bmbt_split(
1562 xfs_bmbt_log_recs(cur, rbp, 1, be16_to_cpu(right->bb_numrecs)); 1562 xfs_bmbt_log_recs(cur, rbp, 1, be16_to_cpu(right->bb_numrecs));
1563 *startoff = xfs_bmbt_disk_get_startoff(rrp); 1563 *startoff = xfs_bmbt_disk_get_startoff(rrp);
1564 } 1564 }
1565 be16_add(&left->bb_numrecs, -(be16_to_cpu(right->bb_numrecs))); 1565 be16_add_cpu(&left->bb_numrecs, -(be16_to_cpu(right->bb_numrecs)));
1566 right->bb_rightsib = left->bb_rightsib; 1566 right->bb_rightsib = left->bb_rightsib;
1567 left->bb_rightsib = cpu_to_be64(args.fsbno); 1567 left->bb_rightsib = cpu_to_be64(args.fsbno);
1568 right->bb_leftsib = cpu_to_be64(lbno); 1568 right->bb_leftsib = cpu_to_be64(lbno);
@@ -2240,7 +2240,7 @@ xfs_bmbt_newroot(
2240 bp = xfs_btree_get_bufl(args.mp, cur->bc_tp, args.fsbno, 0); 2240 bp = xfs_btree_get_bufl(args.mp, cur->bc_tp, args.fsbno, 0);
2241 cblock = XFS_BUF_TO_BMBT_BLOCK(bp); 2241 cblock = XFS_BUF_TO_BMBT_BLOCK(bp);
2242 *cblock = *block; 2242 *cblock = *block;
2243 be16_add(&block->bb_level, 1); 2243 be16_add_cpu(&block->bb_level, 1);
2244 block->bb_numrecs = cpu_to_be16(1); 2244 block->bb_numrecs = cpu_to_be16(1);
2245 cur->bc_nlevels++; 2245 cur->bc_nlevels++;
2246 cur->bc_ptrs[level + 1] = 1; 2246 cur->bc_ptrs[level + 1] = 1;
diff --git a/fs/xfs/xfs_da_btree.c b/fs/xfs/xfs_da_btree.c
index 1b446849fb3d..021a8f7e563f 100644
--- a/fs/xfs/xfs_da_btree.c
+++ b/fs/xfs/xfs_da_btree.c
@@ -511,12 +511,12 @@ xfs_da_node_rebalance(xfs_da_state_t *state, xfs_da_state_blk_t *blk1,
511 * Move the req'd B-tree elements from high in node1 to 511 * Move the req'd B-tree elements from high in node1 to
512 * low in node2. 512 * low in node2.
513 */ 513 */
514 be16_add(&node2->hdr.count, count); 514 be16_add_cpu(&node2->hdr.count, count);
515 tmp = count * (uint)sizeof(xfs_da_node_entry_t); 515 tmp = count * (uint)sizeof(xfs_da_node_entry_t);
516 btree_s = &node1->btree[be16_to_cpu(node1->hdr.count) - count]; 516 btree_s = &node1->btree[be16_to_cpu(node1->hdr.count) - count];
517 btree_d = &node2->btree[0]; 517 btree_d = &node2->btree[0];
518 memcpy(btree_d, btree_s, tmp); 518 memcpy(btree_d, btree_s, tmp);
519 be16_add(&node1->hdr.count, -count); 519 be16_add_cpu(&node1->hdr.count, -count);
520 } else { 520 } else {
521 /* 521 /*
522 * Move the req'd B-tree elements from low in node2 to 522 * Move the req'd B-tree elements from low in node2 to
@@ -527,7 +527,7 @@ xfs_da_node_rebalance(xfs_da_state_t *state, xfs_da_state_blk_t *blk1,
527 btree_s = &node2->btree[0]; 527 btree_s = &node2->btree[0];
528 btree_d = &node1->btree[be16_to_cpu(node1->hdr.count)]; 528 btree_d = &node1->btree[be16_to_cpu(node1->hdr.count)];
529 memcpy(btree_d, btree_s, tmp); 529 memcpy(btree_d, btree_s, tmp);
530 be16_add(&node1->hdr.count, count); 530 be16_add_cpu(&node1->hdr.count, count);
531 xfs_da_log_buf(tp, blk1->bp, 531 xfs_da_log_buf(tp, blk1->bp,
532 XFS_DA_LOGRANGE(node1, btree_d, tmp)); 532 XFS_DA_LOGRANGE(node1, btree_d, tmp));
533 533
@@ -539,7 +539,7 @@ xfs_da_node_rebalance(xfs_da_state_t *state, xfs_da_state_blk_t *blk1,
539 btree_s = &node2->btree[count]; 539 btree_s = &node2->btree[count];
540 btree_d = &node2->btree[0]; 540 btree_d = &node2->btree[0];
541 memmove(btree_d, btree_s, tmp); 541 memmove(btree_d, btree_s, tmp);
542 be16_add(&node2->hdr.count, -count); 542 be16_add_cpu(&node2->hdr.count, -count);
543 } 543 }
544 544
545 /* 545 /*
@@ -604,7 +604,7 @@ xfs_da_node_add(xfs_da_state_t *state, xfs_da_state_blk_t *oldblk,
604 btree->before = cpu_to_be32(newblk->blkno); 604 btree->before = cpu_to_be32(newblk->blkno);
605 xfs_da_log_buf(state->args->trans, oldblk->bp, 605 xfs_da_log_buf(state->args->trans, oldblk->bp,
606 XFS_DA_LOGRANGE(node, btree, tmp + sizeof(*btree))); 606 XFS_DA_LOGRANGE(node, btree, tmp + sizeof(*btree)));
607 be16_add(&node->hdr.count, 1); 607 be16_add_cpu(&node->hdr.count, 1);
608 xfs_da_log_buf(state->args->trans, oldblk->bp, 608 xfs_da_log_buf(state->args->trans, oldblk->bp,
609 XFS_DA_LOGRANGE(node, &node->hdr, sizeof(node->hdr))); 609 XFS_DA_LOGRANGE(node, &node->hdr, sizeof(node->hdr)));
610 610
@@ -959,7 +959,7 @@ xfs_da_node_remove(xfs_da_state_t *state, xfs_da_state_blk_t *drop_blk)
959 memset((char *)btree, 0, sizeof(xfs_da_node_entry_t)); 959 memset((char *)btree, 0, sizeof(xfs_da_node_entry_t));
960 xfs_da_log_buf(state->args->trans, drop_blk->bp, 960 xfs_da_log_buf(state->args->trans, drop_blk->bp,
961 XFS_DA_LOGRANGE(node, btree, sizeof(*btree))); 961 XFS_DA_LOGRANGE(node, btree, sizeof(*btree)));
962 be16_add(&node->hdr.count, -1); 962 be16_add_cpu(&node->hdr.count, -1);
963 xfs_da_log_buf(state->args->trans, drop_blk->bp, 963 xfs_da_log_buf(state->args->trans, drop_blk->bp,
964 XFS_DA_LOGRANGE(node, &node->hdr, sizeof(node->hdr))); 964 XFS_DA_LOGRANGE(node, &node->hdr, sizeof(node->hdr)));
965 965
@@ -1018,7 +1018,7 @@ xfs_da_node_unbalance(xfs_da_state_t *state, xfs_da_state_blk_t *drop_blk,
1018 */ 1018 */
1019 tmp = be16_to_cpu(drop_node->hdr.count) * (uint)sizeof(xfs_da_node_entry_t); 1019 tmp = be16_to_cpu(drop_node->hdr.count) * (uint)sizeof(xfs_da_node_entry_t);
1020 memcpy(btree, &drop_node->btree[0], tmp); 1020 memcpy(btree, &drop_node->btree[0], tmp);
1021 be16_add(&save_node->hdr.count, be16_to_cpu(drop_node->hdr.count)); 1021 be16_add_cpu(&save_node->hdr.count, be16_to_cpu(drop_node->hdr.count));
1022 1022
1023 xfs_da_log_buf(tp, save_blk->bp, 1023 xfs_da_log_buf(tp, save_blk->bp,
1024 XFS_DA_LOGRANGE(save_node, &save_node->hdr, 1024 XFS_DA_LOGRANGE(save_node, &save_node->hdr,
diff --git a/fs/xfs/xfs_dir2_block.c b/fs/xfs/xfs_dir2_block.c
index a5f4f4fb8868..fb5a556725b3 100644
--- a/fs/xfs/xfs_dir2_block.c
+++ b/fs/xfs/xfs_dir2_block.c
@@ -271,7 +271,7 @@ xfs_dir2_block_addname(
271 } 271 }
272 lfloglow = toidx + 1 - (be32_to_cpu(btp->stale) - 1); 272 lfloglow = toidx + 1 - (be32_to_cpu(btp->stale) - 1);
273 lfloghigh -= be32_to_cpu(btp->stale) - 1; 273 lfloghigh -= be32_to_cpu(btp->stale) - 1;
274 be32_add(&btp->count, -(be32_to_cpu(btp->stale) - 1)); 274 be32_add_cpu(&btp->count, -(be32_to_cpu(btp->stale) - 1));
275 xfs_dir2_data_make_free(tp, bp, 275 xfs_dir2_data_make_free(tp, bp,
276 (xfs_dir2_data_aoff_t)((char *)blp - (char *)block), 276 (xfs_dir2_data_aoff_t)((char *)blp - (char *)block),
277 (xfs_dir2_data_aoff_t)((be32_to_cpu(btp->stale) - 1) * sizeof(*blp)), 277 (xfs_dir2_data_aoff_t)((be32_to_cpu(btp->stale) - 1) * sizeof(*blp)),
@@ -326,7 +326,7 @@ xfs_dir2_block_addname(
326 /* 326 /*
327 * Update the tail (entry count). 327 * Update the tail (entry count).
328 */ 328 */
329 be32_add(&btp->count, 1); 329 be32_add_cpu(&btp->count, 1);
330 /* 330 /*
331 * If we now need to rebuild the bestfree map, do so. 331 * If we now need to rebuild the bestfree map, do so.
332 * This needs to happen before the next call to use_free. 332 * This needs to happen before the next call to use_free.
@@ -387,7 +387,7 @@ xfs_dir2_block_addname(
387 lfloglow = MIN(mid, lfloglow); 387 lfloglow = MIN(mid, lfloglow);
388 lfloghigh = MAX(highstale, lfloghigh); 388 lfloghigh = MAX(highstale, lfloghigh);
389 } 389 }
390 be32_add(&btp->stale, -1); 390 be32_add_cpu(&btp->stale, -1);
391 } 391 }
392 /* 392 /*
393 * Point to the new data entry. 393 * Point to the new data entry.
@@ -767,7 +767,7 @@ xfs_dir2_block_removename(
767 /* 767 /*
768 * Fix up the block tail. 768 * Fix up the block tail.
769 */ 769 */
770 be32_add(&btp->stale, 1); 770 be32_add_cpu(&btp->stale, 1);
771 xfs_dir2_block_log_tail(tp, bp); 771 xfs_dir2_block_log_tail(tp, bp);
772 /* 772 /*
773 * Remove the leaf entry by marking it stale. 773 * Remove the leaf entry by marking it stale.
diff --git a/fs/xfs/xfs_dir2_data.c b/fs/xfs/xfs_dir2_data.c
index d2452699e9b1..fb8c9e08b23d 100644
--- a/fs/xfs/xfs_dir2_data.c
+++ b/fs/xfs/xfs_dir2_data.c
@@ -587,7 +587,7 @@ xfs_dir2_data_make_free(
587 /* 587 /*
588 * Fix up the new big freespace. 588 * Fix up the new big freespace.
589 */ 589 */
590 be16_add(&prevdup->length, len + be16_to_cpu(postdup->length)); 590 be16_add_cpu(&prevdup->length, len + be16_to_cpu(postdup->length));
591 *xfs_dir2_data_unused_tag_p(prevdup) = 591 *xfs_dir2_data_unused_tag_p(prevdup) =
592 cpu_to_be16((char *)prevdup - (char *)d); 592 cpu_to_be16((char *)prevdup - (char *)d);
593 xfs_dir2_data_log_unused(tp, bp, prevdup); 593 xfs_dir2_data_log_unused(tp, bp, prevdup);
@@ -621,7 +621,7 @@ xfs_dir2_data_make_free(
621 */ 621 */
622 else if (prevdup) { 622 else if (prevdup) {
623 dfp = xfs_dir2_data_freefind(d, prevdup); 623 dfp = xfs_dir2_data_freefind(d, prevdup);
624 be16_add(&prevdup->length, len); 624 be16_add_cpu(&prevdup->length, len);
625 *xfs_dir2_data_unused_tag_p(prevdup) = 625 *xfs_dir2_data_unused_tag_p(prevdup) =
626 cpu_to_be16((char *)prevdup - (char *)d); 626 cpu_to_be16((char *)prevdup - (char *)d);
627 xfs_dir2_data_log_unused(tp, bp, prevdup); 627 xfs_dir2_data_log_unused(tp, bp, prevdup);
diff --git a/fs/xfs/xfs_dir2_leaf.c b/fs/xfs/xfs_dir2_leaf.c
index 0ca0020ba09f..bc52b803d79b 100644
--- a/fs/xfs/xfs_dir2_leaf.c
+++ b/fs/xfs/xfs_dir2_leaf.c
@@ -359,7 +359,7 @@ xfs_dir2_leaf_addname(
359 bestsp--; 359 bestsp--;
360 memmove(&bestsp[0], &bestsp[1], 360 memmove(&bestsp[0], &bestsp[1],
361 be32_to_cpu(ltp->bestcount) * sizeof(bestsp[0])); 361 be32_to_cpu(ltp->bestcount) * sizeof(bestsp[0]));
362 be32_add(&ltp->bestcount, 1); 362 be32_add_cpu(&ltp->bestcount, 1);
363 xfs_dir2_leaf_log_tail(tp, lbp); 363 xfs_dir2_leaf_log_tail(tp, lbp);
364 xfs_dir2_leaf_log_bests(tp, lbp, 0, be32_to_cpu(ltp->bestcount) - 1); 364 xfs_dir2_leaf_log_bests(tp, lbp, 0, be32_to_cpu(ltp->bestcount) - 1);
365 } 365 }
@@ -445,7 +445,7 @@ xfs_dir2_leaf_addname(
445 */ 445 */
446 lfloglow = index; 446 lfloglow = index;
447 lfloghigh = be16_to_cpu(leaf->hdr.count); 447 lfloghigh = be16_to_cpu(leaf->hdr.count);
448 be16_add(&leaf->hdr.count, 1); 448 be16_add_cpu(&leaf->hdr.count, 1);
449 } 449 }
450 /* 450 /*
451 * There are stale entries. 451 * There are stale entries.
@@ -523,7 +523,7 @@ xfs_dir2_leaf_addname(
523 lfloglow = MIN(index, lfloglow); 523 lfloglow = MIN(index, lfloglow);
524 lfloghigh = MAX(highstale, lfloghigh); 524 lfloghigh = MAX(highstale, lfloghigh);
525 } 525 }
526 be16_add(&leaf->hdr.stale, -1); 526 be16_add_cpu(&leaf->hdr.stale, -1);
527 } 527 }
528 /* 528 /*
529 * Fill in the new leaf entry. 529 * Fill in the new leaf entry.
@@ -626,7 +626,7 @@ xfs_dir2_leaf_compact(
626 * Update and log the header, log the leaf entries. 626 * Update and log the header, log the leaf entries.
627 */ 627 */
628 ASSERT(be16_to_cpu(leaf->hdr.stale) == from - to); 628 ASSERT(be16_to_cpu(leaf->hdr.stale) == from - to);
629 be16_add(&leaf->hdr.count, -(be16_to_cpu(leaf->hdr.stale))); 629 be16_add_cpu(&leaf->hdr.count, -(be16_to_cpu(leaf->hdr.stale)));
630 leaf->hdr.stale = 0; 630 leaf->hdr.stale = 0;
631 xfs_dir2_leaf_log_header(args->trans, bp); 631 xfs_dir2_leaf_log_header(args->trans, bp);
632 if (loglow != -1) 632 if (loglow != -1)
@@ -728,7 +728,7 @@ xfs_dir2_leaf_compact_x1(
728 /* 728 /*
729 * Adjust the leaf header values. 729 * Adjust the leaf header values.
730 */ 730 */
731 be16_add(&leaf->hdr.count, -(from - to)); 731 be16_add_cpu(&leaf->hdr.count, -(from - to));
732 leaf->hdr.stale = cpu_to_be16(1); 732 leaf->hdr.stale = cpu_to_be16(1);
733 /* 733 /*
734 * Remember the low/high stale value only in the "right" 734 * Remember the low/high stale value only in the "right"
@@ -1470,7 +1470,7 @@ xfs_dir2_leaf_removename(
1470 /* 1470 /*
1471 * We just mark the leaf entry stale by putting a null in it. 1471 * We just mark the leaf entry stale by putting a null in it.
1472 */ 1472 */
1473 be16_add(&leaf->hdr.stale, 1); 1473 be16_add_cpu(&leaf->hdr.stale, 1);
1474 xfs_dir2_leaf_log_header(tp, lbp); 1474 xfs_dir2_leaf_log_header(tp, lbp);
1475 lep->address = cpu_to_be32(XFS_DIR2_NULL_DATAPTR); 1475 lep->address = cpu_to_be32(XFS_DIR2_NULL_DATAPTR);
1476 xfs_dir2_leaf_log_ents(tp, lbp, index, index); 1476 xfs_dir2_leaf_log_ents(tp, lbp, index, index);
@@ -1531,7 +1531,7 @@ xfs_dir2_leaf_removename(
1531 */ 1531 */
1532 memmove(&bestsp[db - i], bestsp, 1532 memmove(&bestsp[db - i], bestsp,
1533 (be32_to_cpu(ltp->bestcount) - (db - i)) * sizeof(*bestsp)); 1533 (be32_to_cpu(ltp->bestcount) - (db - i)) * sizeof(*bestsp));
1534 be32_add(&ltp->bestcount, -(db - i)); 1534 be32_add_cpu(&ltp->bestcount, -(db - i));
1535 xfs_dir2_leaf_log_tail(tp, lbp); 1535 xfs_dir2_leaf_log_tail(tp, lbp);
1536 xfs_dir2_leaf_log_bests(tp, lbp, 0, be32_to_cpu(ltp->bestcount) - 1); 1536 xfs_dir2_leaf_log_bests(tp, lbp, 0, be32_to_cpu(ltp->bestcount) - 1);
1537 } else 1537 } else
@@ -1712,7 +1712,7 @@ xfs_dir2_leaf_trim_data(
1712 * Eliminate the last bests entry from the table. 1712 * Eliminate the last bests entry from the table.
1713 */ 1713 */
1714 bestsp = xfs_dir2_leaf_bests_p(ltp); 1714 bestsp = xfs_dir2_leaf_bests_p(ltp);
1715 be32_add(&ltp->bestcount, -1); 1715 be32_add_cpu(&ltp->bestcount, -1);
1716 memmove(&bestsp[1], &bestsp[0], be32_to_cpu(ltp->bestcount) * sizeof(*bestsp)); 1716 memmove(&bestsp[1], &bestsp[0], be32_to_cpu(ltp->bestcount) * sizeof(*bestsp));
1717 xfs_dir2_leaf_log_tail(tp, lbp); 1717 xfs_dir2_leaf_log_tail(tp, lbp);
1718 xfs_dir2_leaf_log_bests(tp, lbp, 0, be32_to_cpu(ltp->bestcount) - 1); 1718 xfs_dir2_leaf_log_bests(tp, lbp, 0, be32_to_cpu(ltp->bestcount) - 1);
diff --git a/fs/xfs/xfs_dir2_node.c b/fs/xfs/xfs_dir2_node.c
index eb18e399e836..8dade711f099 100644
--- a/fs/xfs/xfs_dir2_node.c
+++ b/fs/xfs/xfs_dir2_node.c
@@ -254,7 +254,7 @@ xfs_dir2_leafn_add(
254 (be16_to_cpu(leaf->hdr.count) - index) * sizeof(*lep)); 254 (be16_to_cpu(leaf->hdr.count) - index) * sizeof(*lep));
255 lfloglow = index; 255 lfloglow = index;
256 lfloghigh = be16_to_cpu(leaf->hdr.count); 256 lfloghigh = be16_to_cpu(leaf->hdr.count);
257 be16_add(&leaf->hdr.count, 1); 257 be16_add_cpu(&leaf->hdr.count, 1);
258 } 258 }
259 /* 259 /*
260 * There are stale entries. We'll use one for the new entry. 260 * There are stale entries. We'll use one for the new entry.
@@ -322,7 +322,7 @@ xfs_dir2_leafn_add(
322 lfloglow = MIN(index, lfloglow); 322 lfloglow = MIN(index, lfloglow);
323 lfloghigh = MAX(highstale, lfloghigh); 323 lfloghigh = MAX(highstale, lfloghigh);
324 } 324 }
325 be16_add(&leaf->hdr.stale, -1); 325 be16_add_cpu(&leaf->hdr.stale, -1);
326 } 326 }
327 /* 327 /*
328 * Insert the new entry, log everything. 328 * Insert the new entry, log everything.
@@ -697,10 +697,10 @@ xfs_dir2_leafn_moveents(
697 /* 697 /*
698 * Update the headers and log them. 698 * Update the headers and log them.
699 */ 699 */
700 be16_add(&leaf_s->hdr.count, -(count)); 700 be16_add_cpu(&leaf_s->hdr.count, -(count));
701 be16_add(&leaf_s->hdr.stale, -(stale)); 701 be16_add_cpu(&leaf_s->hdr.stale, -(stale));
702 be16_add(&leaf_d->hdr.count, count); 702 be16_add_cpu(&leaf_d->hdr.count, count);
703 be16_add(&leaf_d->hdr.stale, stale); 703 be16_add_cpu(&leaf_d->hdr.stale, stale);
704 xfs_dir2_leaf_log_header(tp, bp_s); 704 xfs_dir2_leaf_log_header(tp, bp_s);
705 xfs_dir2_leaf_log_header(tp, bp_d); 705 xfs_dir2_leaf_log_header(tp, bp_d);
706 xfs_dir2_leafn_check(args->dp, bp_s); 706 xfs_dir2_leafn_check(args->dp, bp_s);
@@ -885,7 +885,7 @@ xfs_dir2_leafn_remove(
885 * Kill the leaf entry by marking it stale. 885 * Kill the leaf entry by marking it stale.
886 * Log the leaf block changes. 886 * Log the leaf block changes.
887 */ 887 */
888 be16_add(&leaf->hdr.stale, 1); 888 be16_add_cpu(&leaf->hdr.stale, 1);
889 xfs_dir2_leaf_log_header(tp, bp); 889 xfs_dir2_leaf_log_header(tp, bp);
890 lep->address = cpu_to_be32(XFS_DIR2_NULL_DATAPTR); 890 lep->address = cpu_to_be32(XFS_DIR2_NULL_DATAPTR);
891 xfs_dir2_leaf_log_ents(tp, bp, index, index); 891 xfs_dir2_leaf_log_ents(tp, bp, index, index);
@@ -971,7 +971,7 @@ xfs_dir2_leafn_remove(
971 /* 971 /*
972 * One less used entry in the free table. 972 * One less used entry in the free table.
973 */ 973 */
974 be32_add(&free->hdr.nused, -1); 974 be32_add_cpu(&free->hdr.nused, -1);
975 xfs_dir2_free_log_header(tp, fbp); 975 xfs_dir2_free_log_header(tp, fbp);
976 /* 976 /*
977 * If this was the last entry in the table, we can 977 * If this was the last entry in the table, we can
@@ -1642,7 +1642,7 @@ xfs_dir2_node_addname_int(
1642 * (this should always be true) then update the header. 1642 * (this should always be true) then update the header.
1643 */ 1643 */
1644 if (be16_to_cpu(free->bests[findex]) == NULLDATAOFF) { 1644 if (be16_to_cpu(free->bests[findex]) == NULLDATAOFF) {
1645 be32_add(&free->hdr.nused, 1); 1645 be32_add_cpu(&free->hdr.nused, 1);
1646 xfs_dir2_free_log_header(tp, fbp); 1646 xfs_dir2_free_log_header(tp, fbp);
1647 } 1647 }
1648 /* 1648 /*
diff --git a/fs/xfs/xfs_fsops.c b/fs/xfs/xfs_fsops.c
index b8de7f3cc17e..eadc1591c795 100644
--- a/fs/xfs/xfs_fsops.c
+++ b/fs/xfs/xfs_fsops.c
@@ -318,7 +318,7 @@ xfs_growfs_data_private(
318 } 318 }
319 ASSERT(bp); 319 ASSERT(bp);
320 agi = XFS_BUF_TO_AGI(bp); 320 agi = XFS_BUF_TO_AGI(bp);
321 be32_add(&agi->agi_length, new); 321 be32_add_cpu(&agi->agi_length, new);
322 ASSERT(nagcount == oagcount || 322 ASSERT(nagcount == oagcount ||
323 be32_to_cpu(agi->agi_length) == mp->m_sb.sb_agblocks); 323 be32_to_cpu(agi->agi_length) == mp->m_sb.sb_agblocks);
324 xfs_ialloc_log_agi(tp, bp, XFS_AGI_LENGTH); 324 xfs_ialloc_log_agi(tp, bp, XFS_AGI_LENGTH);
@@ -331,7 +331,7 @@ xfs_growfs_data_private(
331 } 331 }
332 ASSERT(bp); 332 ASSERT(bp);
333 agf = XFS_BUF_TO_AGF(bp); 333 agf = XFS_BUF_TO_AGF(bp);
334 be32_add(&agf->agf_length, new); 334 be32_add_cpu(&agf->agf_length, new);
335 ASSERT(be32_to_cpu(agf->agf_length) == 335 ASSERT(be32_to_cpu(agf->agf_length) ==
336 be32_to_cpu(agi->agi_length)); 336 be32_to_cpu(agi->agi_length));
337 xfs_alloc_log_agf(tp, bp, XFS_AGF_LENGTH); 337 xfs_alloc_log_agf(tp, bp, XFS_AGF_LENGTH);
diff --git a/fs/xfs/xfs_ialloc.c b/fs/xfs/xfs_ialloc.c
index 1409c2d61c11..c5836b951d0c 100644
--- a/fs/xfs/xfs_ialloc.c
+++ b/fs/xfs/xfs_ialloc.c
@@ -301,8 +301,8 @@ xfs_ialloc_ag_alloc(
301 } 301 }
302 xfs_trans_inode_alloc_buf(tp, fbuf); 302 xfs_trans_inode_alloc_buf(tp, fbuf);
303 } 303 }
304 be32_add(&agi->agi_count, newlen); 304 be32_add_cpu(&agi->agi_count, newlen);
305 be32_add(&agi->agi_freecount, newlen); 305 be32_add_cpu(&agi->agi_freecount, newlen);
306 agno = be32_to_cpu(agi->agi_seqno); 306 agno = be32_to_cpu(agi->agi_seqno);
307 down_read(&args.mp->m_peraglock); 307 down_read(&args.mp->m_peraglock);
308 args.mp->m_perag[agno].pagi_freecount += newlen; 308 args.mp->m_perag[agno].pagi_freecount += newlen;
@@ -885,7 +885,7 @@ nextag:
885 if ((error = xfs_inobt_update(cur, rec.ir_startino, rec.ir_freecount, 885 if ((error = xfs_inobt_update(cur, rec.ir_startino, rec.ir_freecount,
886 rec.ir_free))) 886 rec.ir_free)))
887 goto error0; 887 goto error0;
888 be32_add(&agi->agi_freecount, -1); 888 be32_add_cpu(&agi->agi_freecount, -1);
889 xfs_ialloc_log_agi(tp, agbp, XFS_AGI_FREECOUNT); 889 xfs_ialloc_log_agi(tp, agbp, XFS_AGI_FREECOUNT);
890 down_read(&mp->m_peraglock); 890 down_read(&mp->m_peraglock);
891 mp->m_perag[tagno].pagi_freecount--; 891 mp->m_perag[tagno].pagi_freecount--;
@@ -1065,8 +1065,8 @@ xfs_difree(
1065 * to be freed when the transaction is committed. 1065 * to be freed when the transaction is committed.
1066 */ 1066 */
1067 ilen = XFS_IALLOC_INODES(mp); 1067 ilen = XFS_IALLOC_INODES(mp);
1068 be32_add(&agi->agi_count, -ilen); 1068 be32_add_cpu(&agi->agi_count, -ilen);
1069 be32_add(&agi->agi_freecount, -(ilen - 1)); 1069 be32_add_cpu(&agi->agi_freecount, -(ilen - 1));
1070 xfs_ialloc_log_agi(tp, agbp, XFS_AGI_COUNT | XFS_AGI_FREECOUNT); 1070 xfs_ialloc_log_agi(tp, agbp, XFS_AGI_COUNT | XFS_AGI_FREECOUNT);
1071 down_read(&mp->m_peraglock); 1071 down_read(&mp->m_peraglock);
1072 mp->m_perag[agno].pagi_freecount -= ilen - 1; 1072 mp->m_perag[agno].pagi_freecount -= ilen - 1;
@@ -1095,7 +1095,7 @@ xfs_difree(
1095 /* 1095 /*
1096 * Change the inode free counts and log the ag/sb changes. 1096 * Change the inode free counts and log the ag/sb changes.
1097 */ 1097 */
1098 be32_add(&agi->agi_freecount, 1); 1098 be32_add_cpu(&agi->agi_freecount, 1);
1099 xfs_ialloc_log_agi(tp, agbp, XFS_AGI_FREECOUNT); 1099 xfs_ialloc_log_agi(tp, agbp, XFS_AGI_FREECOUNT);
1100 down_read(&mp->m_peraglock); 1100 down_read(&mp->m_peraglock);
1101 mp->m_perag[agno].pagi_freecount++; 1101 mp->m_perag[agno].pagi_freecount++;
diff --git a/fs/xfs/xfs_ialloc_btree.c b/fs/xfs/xfs_ialloc_btree.c
index 8cdeeaf8632b..e5310c90e50f 100644
--- a/fs/xfs/xfs_ialloc_btree.c
+++ b/fs/xfs/xfs_ialloc_btree.c
@@ -189,7 +189,7 @@ xfs_inobt_delrec(
189 */ 189 */
190 bno = be32_to_cpu(agi->agi_root); 190 bno = be32_to_cpu(agi->agi_root);
191 agi->agi_root = *pp; 191 agi->agi_root = *pp;
192 be32_add(&agi->agi_level, -1); 192 be32_add_cpu(&agi->agi_level, -1);
193 /* 193 /*
194 * Free the block. 194 * Free the block.
195 */ 195 */
@@ -1132,7 +1132,7 @@ xfs_inobt_lshift(
1132 /* 1132 /*
1133 * Bump and log left's numrecs, decrement and log right's numrecs. 1133 * Bump and log left's numrecs, decrement and log right's numrecs.
1134 */ 1134 */
1135 be16_add(&left->bb_numrecs, 1); 1135 be16_add_cpu(&left->bb_numrecs, 1);
1136 xfs_inobt_log_block(cur->bc_tp, lbp, XFS_BB_NUMRECS); 1136 xfs_inobt_log_block(cur->bc_tp, lbp, XFS_BB_NUMRECS);
1137#ifdef DEBUG 1137#ifdef DEBUG
1138 if (level > 0) 1138 if (level > 0)
@@ -1140,7 +1140,7 @@ xfs_inobt_lshift(
1140 else 1140 else
1141 xfs_btree_check_rec(cur->bc_btnum, lrp - 1, lrp); 1141 xfs_btree_check_rec(cur->bc_btnum, lrp - 1, lrp);
1142#endif 1142#endif
1143 be16_add(&right->bb_numrecs, -1); 1143 be16_add_cpu(&right->bb_numrecs, -1);
1144 xfs_inobt_log_block(cur->bc_tp, rbp, XFS_BB_NUMRECS); 1144 xfs_inobt_log_block(cur->bc_tp, rbp, XFS_BB_NUMRECS);
1145 /* 1145 /*
1146 * Slide the contents of right down one entry. 1146 * Slide the contents of right down one entry.
@@ -1232,7 +1232,7 @@ xfs_inobt_newroot(
1232 * Set the root data in the a.g. inode structure. 1232 * Set the root data in the a.g. inode structure.
1233 */ 1233 */
1234 agi->agi_root = cpu_to_be32(args.agbno); 1234 agi->agi_root = cpu_to_be32(args.agbno);
1235 be32_add(&agi->agi_level, 1); 1235 be32_add_cpu(&agi->agi_level, 1);
1236 xfs_ialloc_log_agi(args.tp, cur->bc_private.i.agbp, 1236 xfs_ialloc_log_agi(args.tp, cur->bc_private.i.agbp,
1237 XFS_AGI_ROOT | XFS_AGI_LEVEL); 1237 XFS_AGI_ROOT | XFS_AGI_LEVEL);
1238 /* 1238 /*
@@ -1426,9 +1426,9 @@ xfs_inobt_rshift(
1426 /* 1426 /*
1427 * Decrement and log left's numrecs, bump and log right's numrecs. 1427 * Decrement and log left's numrecs, bump and log right's numrecs.
1428 */ 1428 */
1429 be16_add(&left->bb_numrecs, -1); 1429 be16_add_cpu(&left->bb_numrecs, -1);
1430 xfs_inobt_log_block(cur->bc_tp, lbp, XFS_BB_NUMRECS); 1430 xfs_inobt_log_block(cur->bc_tp, lbp, XFS_BB_NUMRECS);
1431 be16_add(&right->bb_numrecs, 1); 1431 be16_add_cpu(&right->bb_numrecs, 1);
1432#ifdef DEBUG 1432#ifdef DEBUG
1433 if (level > 0) 1433 if (level > 0)
1434 xfs_btree_check_key(cur->bc_btnum, rkp, rkp + 1); 1434 xfs_btree_check_key(cur->bc_btnum, rkp, rkp + 1);
@@ -1529,7 +1529,7 @@ xfs_inobt_split(
1529 */ 1529 */
1530 if ((be16_to_cpu(left->bb_numrecs) & 1) && 1530 if ((be16_to_cpu(left->bb_numrecs) & 1) &&
1531 cur->bc_ptrs[level] <= be16_to_cpu(right->bb_numrecs) + 1) 1531 cur->bc_ptrs[level] <= be16_to_cpu(right->bb_numrecs) + 1)
1532 be16_add(&right->bb_numrecs, 1); 1532 be16_add_cpu(&right->bb_numrecs, 1);
1533 i = be16_to_cpu(left->bb_numrecs) - be16_to_cpu(right->bb_numrecs) + 1; 1533 i = be16_to_cpu(left->bb_numrecs) - be16_to_cpu(right->bb_numrecs) + 1;
1534 /* 1534 /*
1535 * For non-leaf blocks, copy keys and addresses over to the new block. 1535 * For non-leaf blocks, copy keys and addresses over to the new block.
@@ -1565,7 +1565,7 @@ xfs_inobt_split(
1565 * Find the left block number by looking in the buffer. 1565 * Find the left block number by looking in the buffer.
1566 * Adjust numrecs, sibling pointers. 1566 * Adjust numrecs, sibling pointers.
1567 */ 1567 */
1568 be16_add(&left->bb_numrecs, -(be16_to_cpu(right->bb_numrecs))); 1568 be16_add_cpu(&left->bb_numrecs, -(be16_to_cpu(right->bb_numrecs)));
1569 right->bb_rightsib = left->bb_rightsib; 1569 right->bb_rightsib = left->bb_rightsib;
1570 left->bb_rightsib = cpu_to_be32(args.agbno); 1570 left->bb_rightsib = cpu_to_be32(args.agbno);
1571 right->bb_leftsib = cpu_to_be32(lbno); 1571 right->bb_leftsib = cpu_to_be32(lbno);
diff --git a/fs/xfs/xfs_log.c b/fs/xfs/xfs_log.c
index b3ac3805d3c4..a75edca1860f 100644
--- a/fs/xfs/xfs_log.c
+++ b/fs/xfs/xfs_log.c
@@ -1509,9 +1509,9 @@ xlog_sync(xlog_t *log,
1509 * case, though. 1509 * case, though.
1510 */ 1510 */
1511 for (i = 0; i < split; i += BBSIZE) { 1511 for (i = 0; i < split; i += BBSIZE) {
1512 be32_add((__be32 *)dptr, 1); 1512 be32_add_cpu((__be32 *)dptr, 1);
1513 if (be32_to_cpu(*(__be32 *)dptr) == XLOG_HEADER_MAGIC_NUM) 1513 if (be32_to_cpu(*(__be32 *)dptr) == XLOG_HEADER_MAGIC_NUM)
1514 be32_add((__be32 *)dptr, 1); 1514 be32_add_cpu((__be32 *)dptr, 1);
1515 dptr += BBSIZE; 1515 dptr += BBSIZE;
1516 } 1516 }
1517 1517
@@ -1600,7 +1600,7 @@ xlog_state_finish_copy(xlog_t *log,
1600{ 1600{
1601 spin_lock(&log->l_icloglock); 1601 spin_lock(&log->l_icloglock);
1602 1602
1603 be32_add(&iclog->ic_header.h_num_logops, record_cnt); 1603 be32_add_cpu(&iclog->ic_header.h_num_logops, record_cnt);
1604 iclog->ic_offset += copy_bytes; 1604 iclog->ic_offset += copy_bytes;
1605 1605
1606 spin_unlock(&log->l_icloglock); 1606 spin_unlock(&log->l_icloglock);
diff --git a/fs/xfs/xfs_trans.c b/fs/xfs/xfs_trans.c
index 71e4c8dcc69b..140386434aa3 100644
--- a/fs/xfs/xfs_trans.c
+++ b/fs/xfs/xfs_trans.c
@@ -567,26 +567,26 @@ xfs_trans_apply_sb_deltas(
567 */ 567 */
568 if (!xfs_sb_version_haslazysbcount(&(tp->t_mountp->m_sb))) { 568 if (!xfs_sb_version_haslazysbcount(&(tp->t_mountp->m_sb))) {
569 if (tp->t_icount_delta) 569 if (tp->t_icount_delta)
570 be64_add(&sbp->sb_icount, tp->t_icount_delta); 570 be64_add_cpu(&sbp->sb_icount, tp->t_icount_delta);
571 if (tp->t_ifree_delta) 571 if (tp->t_ifree_delta)
572 be64_add(&sbp->sb_ifree, tp->t_ifree_delta); 572 be64_add_cpu(&sbp->sb_ifree, tp->t_ifree_delta);
573 if (tp->t_fdblocks_delta) 573 if (tp->t_fdblocks_delta)
574 be64_add(&sbp->sb_fdblocks, tp->t_fdblocks_delta); 574 be64_add_cpu(&sbp->sb_fdblocks, tp->t_fdblocks_delta);
575 if (tp->t_res_fdblocks_delta) 575 if (tp->t_res_fdblocks_delta)
576 be64_add(&sbp->sb_fdblocks, tp->t_res_fdblocks_delta); 576 be64_add_cpu(&sbp->sb_fdblocks, tp->t_res_fdblocks_delta);
577 } 577 }
578 578
579 if (tp->t_frextents_delta) 579 if (tp->t_frextents_delta)
580 be64_add(&sbp->sb_frextents, tp->t_frextents_delta); 580 be64_add_cpu(&sbp->sb_frextents, tp->t_frextents_delta);
581 if (tp->t_res_frextents_delta) 581 if (tp->t_res_frextents_delta)
582 be64_add(&sbp->sb_frextents, tp->t_res_frextents_delta); 582 be64_add_cpu(&sbp->sb_frextents, tp->t_res_frextents_delta);
583 583
584 if (tp->t_dblocks_delta) { 584 if (tp->t_dblocks_delta) {
585 be64_add(&sbp->sb_dblocks, tp->t_dblocks_delta); 585 be64_add_cpu(&sbp->sb_dblocks, tp->t_dblocks_delta);
586 whole = 1; 586 whole = 1;
587 } 587 }
588 if (tp->t_agcount_delta) { 588 if (tp->t_agcount_delta) {
589 be32_add(&sbp->sb_agcount, tp->t_agcount_delta); 589 be32_add_cpu(&sbp->sb_agcount, tp->t_agcount_delta);
590 whole = 1; 590 whole = 1;
591 } 591 }
592 if (tp->t_imaxpct_delta) { 592 if (tp->t_imaxpct_delta) {
@@ -594,19 +594,19 @@ xfs_trans_apply_sb_deltas(
594 whole = 1; 594 whole = 1;
595 } 595 }
596 if (tp->t_rextsize_delta) { 596 if (tp->t_rextsize_delta) {
597 be32_add(&sbp->sb_rextsize, tp->t_rextsize_delta); 597 be32_add_cpu(&sbp->sb_rextsize, tp->t_rextsize_delta);
598 whole = 1; 598 whole = 1;
599 } 599 }
600 if (tp->t_rbmblocks_delta) { 600 if (tp->t_rbmblocks_delta) {
601 be32_add(&sbp->sb_rbmblocks, tp->t_rbmblocks_delta); 601 be32_add_cpu(&sbp->sb_rbmblocks, tp->t_rbmblocks_delta);
602 whole = 1; 602 whole = 1;
603 } 603 }
604 if (tp->t_rblocks_delta) { 604 if (tp->t_rblocks_delta) {
605 be64_add(&sbp->sb_rblocks, tp->t_rblocks_delta); 605 be64_add_cpu(&sbp->sb_rblocks, tp->t_rblocks_delta);
606 whole = 1; 606 whole = 1;
607 } 607 }
608 if (tp->t_rextents_delta) { 608 if (tp->t_rextents_delta) {
609 be64_add(&sbp->sb_rextents, tp->t_rextents_delta); 609 be64_add_cpu(&sbp->sb_rextents, tp->t_rextents_delta);
610 whole = 1; 610 whole = 1;
611 } 611 }
612 if (tp->t_rextslog_delta) { 612 if (tp->t_rextslog_delta) {