aboutsummaryrefslogtreecommitdiffstats
path: root/fs/xfs
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2009-04-03 12:52:29 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2009-04-03 12:52:29 -0400
commitac7c1a776dfe1a9c83ea7885f858f5f1a144d8af (patch)
tree4ad3158dd6dc5029a221421ae9a3c339f10c3f10 /fs/xfs
parent3ba113d14cedcd88105a3b9c90f8ecce829e1095 (diff)
parentf36345ff9a4a77f2cc576a2777b6256d5c8798fa (diff)
Merge branch 'for-linus' of git://oss.sgi.com/xfs/xfs
* 'for-linus' of git://oss.sgi.com/xfs/xfs: (61 commits) Revert "xfs: increase the maximum number of supported ACL entries" xfs: cleanup uuid handling xfs: remove m_attroffset xfs: fix various typos xfs: pagecache usage optimization xfs: remove m_litino xfs: kill ino64 mount option xfs: kill mutex_t typedef xfs: increase the maximum number of supported ACL entries xfs: factor out code to find the longest free extent in the AG xfs: kill VN_BAD xfs: kill vn_atime_* helpers. xfs: cleanup xlog_bread xfs: cleanup xlog_recover_do_trans xfs: remove another leftover of the old inode log item format xfs: cleanup log unmount handling Fix xfs debug build breakage by pushing xfs_error.h after xfs: include header files for prototypes xfs: make symbols static xfs: move declaration to header file ...
Diffstat (limited to 'fs/xfs')
-rw-r--r--fs/xfs/Makefile1
-rw-r--r--fs/xfs/linux-2.6/mutex.h25
-rw-r--r--fs/xfs/linux-2.6/xfs_aops.c1
-rw-r--r--fs/xfs/linux-2.6/xfs_ioctl.c107
-rw-r--r--fs/xfs/linux-2.6/xfs_iops.c33
-rw-r--r--fs/xfs/linux-2.6/xfs_linux.h13
-rw-r--r--fs/xfs/linux-2.6/xfs_quotaops.c157
-rw-r--r--fs/xfs/linux-2.6/xfs_super.c137
-rw-r--r--fs/xfs/linux-2.6/xfs_super.h1
-rw-r--r--fs/xfs/linux-2.6/xfs_sync.h1
-rw-r--r--fs/xfs/linux-2.6/xfs_vnode.h32
-rw-r--r--fs/xfs/quota/xfs_dquot.c28
-rw-r--r--fs/xfs/quota/xfs_dquot.h18
-rw-r--r--fs/xfs/quota/xfs_qm.c212
-rw-r--r--fs/xfs/quota/xfs_qm.h26
-rw-r--r--fs/xfs/quota/xfs_qm_bhv.c1
-rw-r--r--fs/xfs/quota/xfs_qm_syscalls.c190
-rw-r--r--fs/xfs/quota/xfs_quota_priv.h40
-rw-r--r--fs/xfs/quota/xfs_trans_dquot.c16
-rw-r--r--fs/xfs/support/debug.c1
-rw-r--r--fs/xfs/support/uuid.c71
-rw-r--r--fs/xfs/support/uuid.h4
-rw-r--r--fs/xfs/xfs_ag.h4
-rw-r--r--fs/xfs/xfs_alloc.c26
-rw-r--r--fs/xfs/xfs_alloc.h6
-rw-r--r--fs/xfs/xfs_attr_leaf.c58
-rw-r--r--fs/xfs/xfs_bmap.c76
-rw-r--r--fs/xfs/xfs_bmap.h6
-rw-r--r--fs/xfs/xfs_btree.c4
-rw-r--r--fs/xfs/xfs_btree.h2
-rw-r--r--fs/xfs/xfs_da_btree.c2
-rw-r--r--fs/xfs/xfs_da_btree.h9
-rw-r--r--fs/xfs/xfs_dfrag.c68
-rw-r--r--fs/xfs/xfs_dinode.h4
-rw-r--r--fs/xfs/xfs_dir2.c2
-rw-r--r--fs/xfs/xfs_dir2_block.c7
-rw-r--r--fs/xfs/xfs_dir2_data.h2
-rw-r--r--fs/xfs/xfs_dir2_leaf.c17
-rw-r--r--fs/xfs/xfs_dir2_node.c2
-rw-r--r--fs/xfs/xfs_dir2_sf.c13
-rw-r--r--fs/xfs/xfs_extfree_item.h6
-rw-r--r--fs/xfs/xfs_filestream.c9
-rw-r--r--fs/xfs/xfs_fsops.c2
-rw-r--r--fs/xfs/xfs_ialloc.c12
-rw-r--r--fs/xfs/xfs_ialloc_btree.c2
-rw-r--r--fs/xfs/xfs_ialloc_btree.h22
-rw-r--r--fs/xfs/xfs_inode.h2
-rw-r--r--fs/xfs/xfs_inode_item.h2
-rw-r--r--fs/xfs/xfs_iomap.h2
-rw-r--r--fs/xfs/xfs_itable.c9
-rw-r--r--fs/xfs/xfs_log.c67
-rw-r--r--fs/xfs/xfs_log.h3
-rw-r--r--fs/xfs/xfs_log_priv.h3
-rw-r--r--fs/xfs/xfs_log_recover.c308
-rw-r--r--fs/xfs/xfs_mount.c253
-rw-r--r--fs/xfs/xfs_mount.h19
-rw-r--r--fs/xfs/xfs_qmops.c1
-rw-r--r--fs/xfs/xfs_quota.h3
-rw-r--r--fs/xfs/xfs_rtalloc.c10
-rw-r--r--fs/xfs/xfs_rtalloc.h8
-rw-r--r--fs/xfs/xfs_trans.h24
-rw-r--r--fs/xfs/xfs_trans_ail.c4
-rw-r--r--fs/xfs/xfs_trans_item.c2
-rw-r--r--fs/xfs/xfs_trans_space.h2
-rw-r--r--fs/xfs/xfs_types.h8
-rw-r--r--fs/xfs/xfs_utils.c2
-rw-r--r--fs/xfs/xfs_vnodeops.c408
-rw-r--r--fs/xfs/xfs_vnodeops.h3
68 files changed, 1025 insertions, 1594 deletions
diff --git a/fs/xfs/Makefile b/fs/xfs/Makefile
index c3dc491fff89..60f107e47fe9 100644
--- a/fs/xfs/Makefile
+++ b/fs/xfs/Makefile
@@ -33,6 +33,7 @@ xfs-$(CONFIG_XFS_QUOTA) += $(addprefix quota/, \
33 xfs_qm_syscalls.o \ 33 xfs_qm_syscalls.o \
34 xfs_qm_bhv.o \ 34 xfs_qm_bhv.o \
35 xfs_qm.o) 35 xfs_qm.o)
36xfs-$(CONFIG_XFS_QUOTA) += linux-2.6/xfs_quotaops.o
36 37
37ifeq ($(CONFIG_XFS_QUOTA),y) 38ifeq ($(CONFIG_XFS_QUOTA),y)
38xfs-$(CONFIG_PROC_FS) += quota/xfs_qm_stats.o 39xfs-$(CONFIG_PROC_FS) += quota/xfs_qm_stats.o
diff --git a/fs/xfs/linux-2.6/mutex.h b/fs/xfs/linux-2.6/mutex.h
deleted file mode 100644
index 2a88d56c4dc2..000000000000
--- a/fs/xfs/linux-2.6/mutex.h
+++ /dev/null
@@ -1,25 +0,0 @@
1/*
2 * Copyright (c) 2000-2003,2005 Silicon Graphics, Inc.
3 * All Rights Reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it would be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write the Free Software Foundation,
16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
17 */
18#ifndef __XFS_SUPPORT_MUTEX_H__
19#define __XFS_SUPPORT_MUTEX_H__
20
21#include <linux/mutex.h>
22
23typedef struct mutex mutex_t;
24
25#endif /* __XFS_SUPPORT_MUTEX_H__ */
diff --git a/fs/xfs/linux-2.6/xfs_aops.c b/fs/xfs/linux-2.6/xfs_aops.c
index de3a198f771e..c13f67300fe7 100644
--- a/fs/xfs/linux-2.6/xfs_aops.c
+++ b/fs/xfs/linux-2.6/xfs_aops.c
@@ -1623,4 +1623,5 @@ const struct address_space_operations xfs_address_space_operations = {
1623 .bmap = xfs_vm_bmap, 1623 .bmap = xfs_vm_bmap,
1624 .direct_IO = xfs_vm_direct_IO, 1624 .direct_IO = xfs_vm_direct_IO,
1625 .migratepage = buffer_migrate_page, 1625 .migratepage = buffer_migrate_page,
1626 .is_partially_uptodate = block_is_partially_uptodate,
1626}; 1627};
diff --git a/fs/xfs/linux-2.6/xfs_ioctl.c b/fs/xfs/linux-2.6/xfs_ioctl.c
index 4bd112313f33..d0b499418a7d 100644
--- a/fs/xfs/linux-2.6/xfs_ioctl.c
+++ b/fs/xfs/linux-2.6/xfs_ioctl.c
@@ -34,6 +34,7 @@
34#include "xfs_dir2_sf.h" 34#include "xfs_dir2_sf.h"
35#include "xfs_dinode.h" 35#include "xfs_dinode.h"
36#include "xfs_inode.h" 36#include "xfs_inode.h"
37#include "xfs_ioctl.h"
37#include "xfs_btree.h" 38#include "xfs_btree.h"
38#include "xfs_ialloc.h" 39#include "xfs_ialloc.h"
39#include "xfs_rtalloc.h" 40#include "xfs_rtalloc.h"
@@ -78,92 +79,74 @@ xfs_find_handle(
78 int hsize; 79 int hsize;
79 xfs_handle_t handle; 80 xfs_handle_t handle;
80 struct inode *inode; 81 struct inode *inode;
82 struct file *file = NULL;
83 struct path path;
84 int error;
85 struct xfs_inode *ip;
81 86
82 memset((char *)&handle, 0, sizeof(handle)); 87 if (cmd == XFS_IOC_FD_TO_HANDLE) {
83 88 file = fget(hreq->fd);
84 switch (cmd) { 89 if (!file)
85 case XFS_IOC_PATH_TO_FSHANDLE: 90 return -EBADF;
86 case XFS_IOC_PATH_TO_HANDLE: { 91 inode = file->f_path.dentry->d_inode;
87 struct path path; 92 } else {
88 int error = user_lpath((const char __user *)hreq->path, &path); 93 error = user_lpath((const char __user *)hreq->path, &path);
89 if (error) 94 if (error)
90 return error; 95 return error;
91 96 inode = path.dentry->d_inode;
92 ASSERT(path.dentry);
93 ASSERT(path.dentry->d_inode);
94 inode = igrab(path.dentry->d_inode);
95 path_put(&path);
96 break;
97 } 97 }
98 ip = XFS_I(inode);
98 99
99 case XFS_IOC_FD_TO_HANDLE: { 100 /*
100 struct file *file; 101 * We can only generate handles for inodes residing on a XFS filesystem,
101 102 * and only for regular files, directories or symbolic links.
102 file = fget(hreq->fd); 103 */
103 if (!file) 104 error = -EINVAL;
104 return -EBADF; 105 if (inode->i_sb->s_magic != XFS_SB_MAGIC)
106 goto out_put;
105 107
106 ASSERT(file->f_path.dentry); 108 error = -EBADF;
107 ASSERT(file->f_path.dentry->d_inode); 109 if (!S_ISREG(inode->i_mode) &&
108 inode = igrab(file->f_path.dentry->d_inode); 110 !S_ISDIR(inode->i_mode) &&
109 fput(file); 111 !S_ISLNK(inode->i_mode))
110 break; 112 goto out_put;
111 }
112 113
113 default:
114 ASSERT(0);
115 return -XFS_ERROR(EINVAL);
116 }
117 114
118 if (inode->i_sb->s_magic != XFS_SB_MAGIC) { 115 memcpy(&handle.ha_fsid, ip->i_mount->m_fixedfsid, sizeof(xfs_fsid_t));
119 /* we're not in XFS anymore, Toto */
120 iput(inode);
121 return -XFS_ERROR(EINVAL);
122 }
123 116
124 switch (inode->i_mode & S_IFMT) { 117 if (cmd == XFS_IOC_PATH_TO_FSHANDLE) {
125 case S_IFREG: 118 /*
126 case S_IFDIR: 119 * This handle only contains an fsid, zero the rest.
127 case S_IFLNK: 120 */
128 break; 121 memset(&handle.ha_fid, 0, sizeof(handle.ha_fid));
129 default: 122 hsize = sizeof(xfs_fsid_t);
130 iput(inode); 123 } else {
131 return -XFS_ERROR(EBADF);
132 }
133
134 /* now we can grab the fsid */
135 memcpy(&handle.ha_fsid, XFS_I(inode)->i_mount->m_fixedfsid,
136 sizeof(xfs_fsid_t));
137 hsize = sizeof(xfs_fsid_t);
138
139 if (cmd != XFS_IOC_PATH_TO_FSHANDLE) {
140 xfs_inode_t *ip = XFS_I(inode);
141 int lock_mode; 124 int lock_mode;
142 125
143 /* need to get access to the xfs_inode to read the generation */
144 lock_mode = xfs_ilock_map_shared(ip); 126 lock_mode = xfs_ilock_map_shared(ip);
145
146 /* fill in fid section of handle from inode */
147 handle.ha_fid.fid_len = sizeof(xfs_fid_t) - 127 handle.ha_fid.fid_len = sizeof(xfs_fid_t) -
148 sizeof(handle.ha_fid.fid_len); 128 sizeof(handle.ha_fid.fid_len);
149 handle.ha_fid.fid_pad = 0; 129 handle.ha_fid.fid_pad = 0;
150 handle.ha_fid.fid_gen = ip->i_d.di_gen; 130 handle.ha_fid.fid_gen = ip->i_d.di_gen;
151 handle.ha_fid.fid_ino = ip->i_ino; 131 handle.ha_fid.fid_ino = ip->i_ino;
152
153 xfs_iunlock_map_shared(ip, lock_mode); 132 xfs_iunlock_map_shared(ip, lock_mode);
154 133
155 hsize = XFS_HSIZE(handle); 134 hsize = XFS_HSIZE(handle);
156 } 135 }
157 136
158 /* now copy our handle into the user buffer & write out the size */ 137 error = -EFAULT;
159 if (copy_to_user(hreq->ohandle, &handle, hsize) || 138 if (copy_to_user(hreq->ohandle, &handle, hsize) ||
160 copy_to_user(hreq->ohandlen, &hsize, sizeof(__s32))) { 139 copy_to_user(hreq->ohandlen, &hsize, sizeof(__s32)))
161 iput(inode); 140 goto out_put;
162 return -XFS_ERROR(EFAULT);
163 }
164 141
165 iput(inode); 142 error = 0;
166 return 0; 143
144 out_put:
145 if (cmd == XFS_IOC_FD_TO_HANDLE)
146 fput(file);
147 else
148 path_put(&path);
149 return error;
167} 150}
168 151
169/* 152/*
diff --git a/fs/xfs/linux-2.6/xfs_iops.c b/fs/xfs/linux-2.6/xfs_iops.c
index 2940612e3aeb..6075382336d7 100644
--- a/fs/xfs/linux-2.6/xfs_iops.c
+++ b/fs/xfs/linux-2.6/xfs_iops.c
@@ -211,8 +211,13 @@ xfs_vn_mknod(
211 * Irix uses Missed'em'V split, but doesn't want to see 211 * Irix uses Missed'em'V split, but doesn't want to see
212 * the upper 5 bits of (14bit) major. 212 * the upper 5 bits of (14bit) major.
213 */ 213 */
214 if (unlikely(!sysv_valid_dev(rdev) || MAJOR(rdev) & ~0x1ff)) 214 if (S_ISCHR(mode) || S_ISBLK(mode)) {
215 return -EINVAL; 215 if (unlikely(!sysv_valid_dev(rdev) || MAJOR(rdev) & ~0x1ff))
216 return -EINVAL;
217 rdev = sysv_encode_dev(rdev);
218 } else {
219 rdev = 0;
220 }
216 221
217 if (test_default_acl && test_default_acl(dir)) { 222 if (test_default_acl && test_default_acl(dir)) {
218 if (!_ACL_ALLOC(default_acl)) { 223 if (!_ACL_ALLOC(default_acl)) {
@@ -224,28 +229,11 @@ xfs_vn_mknod(
224 } 229 }
225 } 230 }
226 231
227 xfs_dentry_to_name(&name, dentry);
228
229 if (IS_POSIXACL(dir) && !default_acl) 232 if (IS_POSIXACL(dir) && !default_acl)
230 mode &= ~current_umask(); 233 mode &= ~current_umask();
231 234
232 switch (mode & S_IFMT) { 235 xfs_dentry_to_name(&name, dentry);
233 case S_IFCHR: 236 error = xfs_create(XFS_I(dir), &name, mode, rdev, &ip, NULL);
234 case S_IFBLK:
235 case S_IFIFO:
236 case S_IFSOCK:
237 rdev = sysv_encode_dev(rdev);
238 case S_IFREG:
239 error = xfs_create(XFS_I(dir), &name, mode, rdev, &ip, NULL);
240 break;
241 case S_IFDIR:
242 error = xfs_mkdir(XFS_I(dir), &name, mode, &ip, NULL);
243 break;
244 default:
245 error = EINVAL;
246 break;
247 }
248
249 if (unlikely(error)) 237 if (unlikely(error))
250 goto out_free_acl; 238 goto out_free_acl;
251 239
@@ -553,9 +541,6 @@ xfs_vn_getattr(
553 stat->uid = ip->i_d.di_uid; 541 stat->uid = ip->i_d.di_uid;
554 stat->gid = ip->i_d.di_gid; 542 stat->gid = ip->i_d.di_gid;
555 stat->ino = ip->i_ino; 543 stat->ino = ip->i_ino;
556#if XFS_BIG_INUMS
557 stat->ino += mp->m_inoadd;
558#endif
559 stat->atime = inode->i_atime; 544 stat->atime = inode->i_atime;
560 stat->mtime.tv_sec = ip->i_d.di_mtime.t_sec; 545 stat->mtime.tv_sec = ip->i_d.di_mtime.t_sec;
561 stat->mtime.tv_nsec = ip->i_d.di_mtime.t_nsec; 546 stat->mtime.tv_nsec = ip->i_d.di_mtime.t_nsec;
diff --git a/fs/xfs/linux-2.6/xfs_linux.h b/fs/xfs/linux-2.6/xfs_linux.h
index 507492d6dccd..f65a53f8752f 100644
--- a/fs/xfs/linux-2.6/xfs_linux.h
+++ b/fs/xfs/linux-2.6/xfs_linux.h
@@ -38,7 +38,6 @@
38#include <kmem.h> 38#include <kmem.h>
39#include <mrlock.h> 39#include <mrlock.h>
40#include <sv.h> 40#include <sv.h>
41#include <mutex.h>
42#include <time.h> 41#include <time.h>
43 42
44#include <support/ktrace.h> 43#include <support/ktrace.h>
@@ -51,6 +50,7 @@
51#include <linux/blkdev.h> 50#include <linux/blkdev.h>
52#include <linux/slab.h> 51#include <linux/slab.h>
53#include <linux/module.h> 52#include <linux/module.h>
53#include <linux/mutex.h>
54#include <linux/file.h> 54#include <linux/file.h>
55#include <linux/swap.h> 55#include <linux/swap.h>
56#include <linux/errno.h> 56#include <linux/errno.h>
@@ -147,17 +147,6 @@
147#define SYNCHRONIZE() barrier() 147#define SYNCHRONIZE() barrier()
148#define __return_address __builtin_return_address(0) 148#define __return_address __builtin_return_address(0)
149 149
150/*
151 * IRIX (BSD) quotactl makes use of separate commands for user/group,
152 * whereas on Linux the syscall encodes this information into the cmd
153 * field (see the QCMD macro in quota.h). These macros help keep the
154 * code portable - they are not visible from the syscall interface.
155 */
156#define Q_XSETGQLIM XQM_CMD(8) /* set groups disk limits */
157#define Q_XGETGQUOTA XQM_CMD(9) /* get groups disk limits */
158#define Q_XSETPQLIM XQM_CMD(10) /* set projects disk limits */
159#define Q_XGETPQUOTA XQM_CMD(11) /* get projects disk limits */
160
161#define dfltprid 0 150#define dfltprid 0
162#define MAXPATHLEN 1024 151#define MAXPATHLEN 1024
163 152
diff --git a/fs/xfs/linux-2.6/xfs_quotaops.c b/fs/xfs/linux-2.6/xfs_quotaops.c
new file mode 100644
index 000000000000..94d9a633d3d9
--- /dev/null
+++ b/fs/xfs/linux-2.6/xfs_quotaops.c
@@ -0,0 +1,157 @@
1/*
2 * Copyright (c) 2008, Christoph Hellwig
3 * All Rights Reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it would be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write the Free Software Foundation,
16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
17 */
18#include "xfs.h"
19#include "xfs_dmapi.h"
20#include "xfs_sb.h"
21#include "xfs_inum.h"
22#include "xfs_ag.h"
23#include "xfs_mount.h"
24#include "xfs_quota.h"
25#include "xfs_log.h"
26#include "xfs_trans.h"
27#include "xfs_bmap_btree.h"
28#include "xfs_inode.h"
29#include "quota/xfs_qm.h"
30#include <linux/quota.h>
31
32
33STATIC int
34xfs_quota_type(int type)
35{
36 switch (type) {
37 case USRQUOTA:
38 return XFS_DQ_USER;
39 case GRPQUOTA:
40 return XFS_DQ_GROUP;
41 default:
42 return XFS_DQ_PROJ;
43 }
44}
45
46STATIC int
47xfs_fs_quota_sync(
48 struct super_block *sb,
49 int type)
50{
51 struct xfs_mount *mp = XFS_M(sb);
52
53 if (!XFS_IS_QUOTA_RUNNING(mp))
54 return -ENOSYS;
55 return -xfs_sync_inodes(mp, SYNC_DELWRI);
56}
57
58STATIC int
59xfs_fs_get_xstate(
60 struct super_block *sb,
61 struct fs_quota_stat *fqs)
62{
63 struct xfs_mount *mp = XFS_M(sb);
64
65 if (!XFS_IS_QUOTA_RUNNING(mp))
66 return -ENOSYS;
67 return -xfs_qm_scall_getqstat(mp, fqs);
68}
69
70STATIC int
71xfs_fs_set_xstate(
72 struct super_block *sb,
73 unsigned int uflags,
74 int op)
75{
76 struct xfs_mount *mp = XFS_M(sb);
77 unsigned int flags = 0;
78
79 if (sb->s_flags & MS_RDONLY)
80 return -EROFS;
81 if (!XFS_IS_QUOTA_RUNNING(mp))
82 return -ENOSYS;
83 if (!capable(CAP_SYS_ADMIN))
84 return -EPERM;
85
86 if (uflags & XFS_QUOTA_UDQ_ACCT)
87 flags |= XFS_UQUOTA_ACCT;
88 if (uflags & XFS_QUOTA_PDQ_ACCT)
89 flags |= XFS_PQUOTA_ACCT;
90 if (uflags & XFS_QUOTA_GDQ_ACCT)
91 flags |= XFS_GQUOTA_ACCT;
92 if (uflags & XFS_QUOTA_UDQ_ENFD)
93 flags |= XFS_UQUOTA_ENFD;
94 if (uflags & (XFS_QUOTA_PDQ_ENFD|XFS_QUOTA_GDQ_ENFD))
95 flags |= XFS_OQUOTA_ENFD;
96
97 switch (op) {
98 case Q_XQUOTAON:
99 return -xfs_qm_scall_quotaon(mp, flags);
100 case Q_XQUOTAOFF:
101 if (!XFS_IS_QUOTA_ON(mp))
102 return -EINVAL;
103 return -xfs_qm_scall_quotaoff(mp, flags);
104 case Q_XQUOTARM:
105 if (XFS_IS_QUOTA_ON(mp))
106 return -EINVAL;
107 return -xfs_qm_scall_trunc_qfiles(mp, flags);
108 }
109
110 return -EINVAL;
111}
112
113STATIC int
114xfs_fs_get_xquota(
115 struct super_block *sb,
116 int type,
117 qid_t id,
118 struct fs_disk_quota *fdq)
119{
120 struct xfs_mount *mp = XFS_M(sb);
121
122 if (!XFS_IS_QUOTA_RUNNING(mp))
123 return -ENOSYS;
124 if (!XFS_IS_QUOTA_ON(mp))
125 return -ESRCH;
126
127 return -xfs_qm_scall_getquota(mp, id, xfs_quota_type(type), fdq);
128}
129
130STATIC int
131xfs_fs_set_xquota(
132 struct super_block *sb,
133 int type,
134 qid_t id,
135 struct fs_disk_quota *fdq)
136{
137 struct xfs_mount *mp = XFS_M(sb);
138
139 if (sb->s_flags & MS_RDONLY)
140 return -EROFS;
141 if (!XFS_IS_QUOTA_RUNNING(mp))
142 return -ENOSYS;
143 if (!XFS_IS_QUOTA_ON(mp))
144 return -ESRCH;
145 if (!capable(CAP_SYS_ADMIN))
146 return -EPERM;
147
148 return -xfs_qm_scall_setqlim(mp, id, xfs_quota_type(type), fdq);
149}
150
151struct quotactl_ops xfs_quotactl_operations = {
152 .quota_sync = xfs_fs_quota_sync,
153 .get_xstate = xfs_fs_get_xstate,
154 .set_xstate = xfs_fs_set_xstate,
155 .get_xquota = xfs_fs_get_xquota,
156 .set_xquota = xfs_fs_set_xquota,
157};
diff --git a/fs/xfs/linux-2.6/xfs_super.c b/fs/xfs/linux-2.6/xfs_super.c
index 32ae5028e96b..bb685269f832 100644
--- a/fs/xfs/linux-2.6/xfs_super.c
+++ b/fs/xfs/linux-2.6/xfs_super.c
@@ -68,7 +68,6 @@
68#include <linux/freezer.h> 68#include <linux/freezer.h>
69#include <linux/parser.h> 69#include <linux/parser.h>
70 70
71static struct quotactl_ops xfs_quotactl_operations;
72static struct super_operations xfs_super_operations; 71static struct super_operations xfs_super_operations;
73static kmem_zone_t *xfs_ioend_zone; 72static kmem_zone_t *xfs_ioend_zone;
74mempool_t *xfs_ioend_pool; 73mempool_t *xfs_ioend_pool;
@@ -79,7 +78,6 @@ mempool_t *xfs_ioend_pool;
79#define MNTOPT_RTDEV "rtdev" /* realtime I/O device */ 78#define MNTOPT_RTDEV "rtdev" /* realtime I/O device */
80#define MNTOPT_BIOSIZE "biosize" /* log2 of preferred buffered io size */ 79#define MNTOPT_BIOSIZE "biosize" /* log2 of preferred buffered io size */
81#define MNTOPT_WSYNC "wsync" /* safe-mode nfs compatible mount */ 80#define MNTOPT_WSYNC "wsync" /* safe-mode nfs compatible mount */
82#define MNTOPT_INO64 "ino64" /* force inodes into 64-bit range */
83#define MNTOPT_NOALIGN "noalign" /* turn off stripe alignment */ 81#define MNTOPT_NOALIGN "noalign" /* turn off stripe alignment */
84#define MNTOPT_SWALLOC "swalloc" /* turn on stripe width allocation */ 82#define MNTOPT_SWALLOC "swalloc" /* turn on stripe width allocation */
85#define MNTOPT_SUNIT "sunit" /* data volume stripe unit */ 83#define MNTOPT_SUNIT "sunit" /* data volume stripe unit */
@@ -180,7 +178,7 @@ xfs_parseargs(
180 int dswidth = 0; 178 int dswidth = 0;
181 int iosize = 0; 179 int iosize = 0;
182 int dmapi_implies_ikeep = 1; 180 int dmapi_implies_ikeep = 1;
183 uchar_t iosizelog = 0; 181 __uint8_t iosizelog = 0;
184 182
185 /* 183 /*
186 * Copy binary VFS mount flags we are interested in. 184 * Copy binary VFS mount flags we are interested in.
@@ -291,16 +289,6 @@ xfs_parseargs(
291 mp->m_flags |= XFS_MOUNT_OSYNCISOSYNC; 289 mp->m_flags |= XFS_MOUNT_OSYNCISOSYNC;
292 } else if (!strcmp(this_char, MNTOPT_NORECOVERY)) { 290 } else if (!strcmp(this_char, MNTOPT_NORECOVERY)) {
293 mp->m_flags |= XFS_MOUNT_NORECOVERY; 291 mp->m_flags |= XFS_MOUNT_NORECOVERY;
294 } else if (!strcmp(this_char, MNTOPT_INO64)) {
295#if XFS_BIG_INUMS
296 mp->m_flags |= XFS_MOUNT_INO64;
297 mp->m_inoadd = XFS_INO64_OFFSET;
298#else
299 cmn_err(CE_WARN,
300 "XFS: %s option not allowed on this system",
301 this_char);
302 return EINVAL;
303#endif
304 } else if (!strcmp(this_char, MNTOPT_NOALIGN)) { 292 } else if (!strcmp(this_char, MNTOPT_NOALIGN)) {
305 mp->m_flags |= XFS_MOUNT_NOALIGN; 293 mp->m_flags |= XFS_MOUNT_NOALIGN;
306 } else if (!strcmp(this_char, MNTOPT_SWALLOC)) { 294 } else if (!strcmp(this_char, MNTOPT_SWALLOC)) {
@@ -529,7 +517,6 @@ xfs_showargs(
529 /* the few simple ones we can get from the mount struct */ 517 /* the few simple ones we can get from the mount struct */
530 { XFS_MOUNT_IKEEP, "," MNTOPT_IKEEP }, 518 { XFS_MOUNT_IKEEP, "," MNTOPT_IKEEP },
531 { XFS_MOUNT_WSYNC, "," MNTOPT_WSYNC }, 519 { XFS_MOUNT_WSYNC, "," MNTOPT_WSYNC },
532 { XFS_MOUNT_INO64, "," MNTOPT_INO64 },
533 { XFS_MOUNT_NOALIGN, "," MNTOPT_NOALIGN }, 520 { XFS_MOUNT_NOALIGN, "," MNTOPT_NOALIGN },
534 { XFS_MOUNT_SWALLOC, "," MNTOPT_SWALLOC }, 521 { XFS_MOUNT_SWALLOC, "," MNTOPT_SWALLOC },
535 { XFS_MOUNT_NOUUID, "," MNTOPT_NOUUID }, 522 { XFS_MOUNT_NOUUID, "," MNTOPT_NOUUID },
@@ -634,7 +621,7 @@ xfs_max_file_offset(
634 return (((__uint64_t)pagefactor) << bitshift) - 1; 621 return (((__uint64_t)pagefactor) << bitshift) - 1;
635} 622}
636 623
637int 624STATIC int
638xfs_blkdev_get( 625xfs_blkdev_get(
639 xfs_mount_t *mp, 626 xfs_mount_t *mp,
640 const char *name, 627 const char *name,
@@ -651,7 +638,7 @@ xfs_blkdev_get(
651 return -error; 638 return -error;
652} 639}
653 640
654void 641STATIC void
655xfs_blkdev_put( 642xfs_blkdev_put(
656 struct block_device *bdev) 643 struct block_device *bdev)
657{ 644{
@@ -872,7 +859,7 @@ xfsaild_wakeup(
872 wake_up_process(ailp->xa_task); 859 wake_up_process(ailp->xa_task);
873} 860}
874 861
875int 862STATIC int
876xfsaild( 863xfsaild(
877 void *data) 864 void *data)
878{ 865{
@@ -990,26 +977,57 @@ xfs_fs_write_inode(
990 int sync) 977 int sync)
991{ 978{
992 struct xfs_inode *ip = XFS_I(inode); 979 struct xfs_inode *ip = XFS_I(inode);
980 struct xfs_mount *mp = ip->i_mount;
993 int error = 0; 981 int error = 0;
994 int flags = 0;
995 982
996 xfs_itrace_entry(ip); 983 xfs_itrace_entry(ip);
984
985 if (XFS_FORCED_SHUTDOWN(mp))
986 return XFS_ERROR(EIO);
987
997 if (sync) { 988 if (sync) {
998 error = xfs_wait_on_pages(ip, 0, -1); 989 error = xfs_wait_on_pages(ip, 0, -1);
999 if (error) 990 if (error)
1000 goto out_error; 991 goto out;
1001 flags |= FLUSH_SYNC;
1002 } 992 }
1003 error = xfs_inode_flush(ip, flags);
1004 993
1005out_error: 994 /*
995 * Bypass inodes which have already been cleaned by
996 * the inode flush clustering code inside xfs_iflush
997 */
998 if (xfs_inode_clean(ip))
999 goto out;
1000
1001 /*
1002 * We make this non-blocking if the inode is contended, return
1003 * EAGAIN to indicate to the caller that they did not succeed.
1004 * This prevents the flush path from blocking on inodes inside
1005 * another operation right now, they get caught later by xfs_sync.
1006 */
1007 if (sync) {
1008 xfs_ilock(ip, XFS_ILOCK_SHARED);
1009 xfs_iflock(ip);
1010
1011 error = xfs_iflush(ip, XFS_IFLUSH_SYNC);
1012 } else {
1013 error = EAGAIN;
1014 if (!xfs_ilock_nowait(ip, XFS_ILOCK_SHARED))
1015 goto out;
1016 if (xfs_ipincount(ip) || !xfs_iflock_nowait(ip))
1017 goto out_unlock;
1018
1019 error = xfs_iflush(ip, XFS_IFLUSH_ASYNC_NOBLOCK);
1020 }
1021
1022 out_unlock:
1023 xfs_iunlock(ip, XFS_ILOCK_SHARED);
1024 out:
1006 /* 1025 /*
1007 * if we failed to write out the inode then mark 1026 * if we failed to write out the inode then mark
1008 * it dirty again so we'll try again later. 1027 * it dirty again so we'll try again later.
1009 */ 1028 */
1010 if (error) 1029 if (error)
1011 xfs_mark_inode_dirty_sync(ip); 1030 xfs_mark_inode_dirty_sync(ip);
1012
1013 return -error; 1031 return -error;
1014} 1032}
1015 1033
@@ -1169,18 +1187,12 @@ xfs_fs_statfs(
1169 statp->f_bfree = statp->f_bavail = 1187 statp->f_bfree = statp->f_bavail =
1170 sbp->sb_fdblocks - XFS_ALLOC_SET_ASIDE(mp); 1188 sbp->sb_fdblocks - XFS_ALLOC_SET_ASIDE(mp);
1171 fakeinos = statp->f_bfree << sbp->sb_inopblog; 1189 fakeinos = statp->f_bfree << sbp->sb_inopblog;
1172#if XFS_BIG_INUMS
1173 fakeinos += mp->m_inoadd;
1174#endif
1175 statp->f_files = 1190 statp->f_files =
1176 MIN(sbp->sb_icount + fakeinos, (__uint64_t)XFS_MAXINUMBER); 1191 MIN(sbp->sb_icount + fakeinos, (__uint64_t)XFS_MAXINUMBER);
1177 if (mp->m_maxicount) 1192 if (mp->m_maxicount)
1178#if XFS_BIG_INUMS 1193 statp->f_files = min_t(typeof(statp->f_files),
1179 if (!mp->m_inoadd) 1194 statp->f_files,
1180#endif 1195 mp->m_maxicount);
1181 statp->f_files = min_t(typeof(statp->f_files),
1182 statp->f_files,
1183 mp->m_maxicount);
1184 statp->f_ffree = statp->f_files - (sbp->sb_icount - sbp->sb_ifree); 1196 statp->f_ffree = statp->f_files - (sbp->sb_icount - sbp->sb_ifree);
1185 spin_unlock(&mp->m_sb_lock); 1197 spin_unlock(&mp->m_sb_lock);
1186 1198
@@ -1302,57 +1314,6 @@ xfs_fs_show_options(
1302 return -xfs_showargs(XFS_M(mnt->mnt_sb), m); 1314 return -xfs_showargs(XFS_M(mnt->mnt_sb), m);
1303} 1315}
1304 1316
1305STATIC int
1306xfs_fs_quotasync(
1307 struct super_block *sb,
1308 int type)
1309{
1310 return -XFS_QM_QUOTACTL(XFS_M(sb), Q_XQUOTASYNC, 0, NULL);
1311}
1312
1313STATIC int
1314xfs_fs_getxstate(
1315 struct super_block *sb,
1316 struct fs_quota_stat *fqs)
1317{
1318 return -XFS_QM_QUOTACTL(XFS_M(sb), Q_XGETQSTAT, 0, (caddr_t)fqs);
1319}
1320
1321STATIC int
1322xfs_fs_setxstate(
1323 struct super_block *sb,
1324 unsigned int flags,
1325 int op)
1326{
1327 return -XFS_QM_QUOTACTL(XFS_M(sb), op, 0, (caddr_t)&flags);
1328}
1329
1330STATIC int
1331xfs_fs_getxquota(
1332 struct super_block *sb,
1333 int type,
1334 qid_t id,
1335 struct fs_disk_quota *fdq)
1336{
1337 return -XFS_QM_QUOTACTL(XFS_M(sb),
1338 (type == USRQUOTA) ? Q_XGETQUOTA :
1339 ((type == GRPQUOTA) ? Q_XGETGQUOTA :
1340 Q_XGETPQUOTA), id, (caddr_t)fdq);
1341}
1342
1343STATIC int
1344xfs_fs_setxquota(
1345 struct super_block *sb,
1346 int type,
1347 qid_t id,
1348 struct fs_disk_quota *fdq)
1349{
1350 return -XFS_QM_QUOTACTL(XFS_M(sb),
1351 (type == USRQUOTA) ? Q_XSETQLIM :
1352 ((type == GRPQUOTA) ? Q_XSETGQLIM :
1353 Q_XSETPQLIM), id, (caddr_t)fdq);
1354}
1355
1356/* 1317/*
1357 * This function fills in xfs_mount_t fields based on mount args. 1318 * This function fills in xfs_mount_t fields based on mount args.
1358 * Note: the superblock _has_ now been read in. 1319 * Note: the superblock _has_ now been read in.
@@ -1435,7 +1396,9 @@ xfs_fs_fill_super(
1435 sb_min_blocksize(sb, BBSIZE); 1396 sb_min_blocksize(sb, BBSIZE);
1436 sb->s_xattr = xfs_xattr_handlers; 1397 sb->s_xattr = xfs_xattr_handlers;
1437 sb->s_export_op = &xfs_export_operations; 1398 sb->s_export_op = &xfs_export_operations;
1399#ifdef CONFIG_XFS_QUOTA
1438 sb->s_qcop = &xfs_quotactl_operations; 1400 sb->s_qcop = &xfs_quotactl_operations;
1401#endif
1439 sb->s_op = &xfs_super_operations; 1402 sb->s_op = &xfs_super_operations;
1440 1403
1441 error = xfs_dmops_get(mp); 1404 error = xfs_dmops_get(mp);
@@ -1578,14 +1541,6 @@ static struct super_operations xfs_super_operations = {
1578 .show_options = xfs_fs_show_options, 1541 .show_options = xfs_fs_show_options,
1579}; 1542};
1580 1543
1581static struct quotactl_ops xfs_quotactl_operations = {
1582 .quota_sync = xfs_fs_quotasync,
1583 .get_xstate = xfs_fs_getxstate,
1584 .set_xstate = xfs_fs_setxstate,
1585 .get_xquota = xfs_fs_getxquota,
1586 .set_xquota = xfs_fs_setxquota,
1587};
1588
1589static struct file_system_type xfs_fs_type = { 1544static struct file_system_type xfs_fs_type = {
1590 .owner = THIS_MODULE, 1545 .owner = THIS_MODULE,
1591 .name = "xfs", 1546 .name = "xfs",
diff --git a/fs/xfs/linux-2.6/xfs_super.h b/fs/xfs/linux-2.6/xfs_super.h
index d5d776d4cd67..5a2ea3a21781 100644
--- a/fs/xfs/linux-2.6/xfs_super.h
+++ b/fs/xfs/linux-2.6/xfs_super.h
@@ -93,6 +93,7 @@ extern void xfs_blkdev_issue_flush(struct xfs_buftarg *);
93 93
94extern const struct export_operations xfs_export_operations; 94extern const struct export_operations xfs_export_operations;
95extern struct xattr_handler *xfs_xattr_handlers[]; 95extern struct xattr_handler *xfs_xattr_handlers[];
96extern struct quotactl_ops xfs_quotactl_operations;
96 97
97#define XFS_M(sb) ((struct xfs_mount *)((sb)->s_fs_info)) 98#define XFS_M(sb) ((struct xfs_mount *)((sb)->s_fs_info))
98 99
diff --git a/fs/xfs/linux-2.6/xfs_sync.h b/fs/xfs/linux-2.6/xfs_sync.h
index 5f6de1efe1f6..04f058c848ae 100644
--- a/fs/xfs/linux-2.6/xfs_sync.h
+++ b/fs/xfs/linux-2.6/xfs_sync.h
@@ -19,6 +19,7 @@
19#define XFS_SYNC_H 1 19#define XFS_SYNC_H 1
20 20
21struct xfs_mount; 21struct xfs_mount;
22struct xfs_perag;
22 23
23typedef struct bhv_vfs_sync_work { 24typedef struct bhv_vfs_sync_work {
24 struct list_head w_list; 25 struct list_head w_list;
diff --git a/fs/xfs/linux-2.6/xfs_vnode.h b/fs/xfs/linux-2.6/xfs_vnode.h
index f65983a230d3..ad7fbead4c97 100644
--- a/fs/xfs/linux-2.6/xfs_vnode.h
+++ b/fs/xfs/linux-2.6/xfs_vnode.h
@@ -41,11 +41,6 @@ struct attrlist_cursor_kern;
41#define IO_INVIS 0x00020 /* don't update inode timestamps */ 41#define IO_INVIS 0x00020 /* don't update inode timestamps */
42 42
43/* 43/*
44 * Flags for xfs_inode_flush
45 */
46#define FLUSH_SYNC 1 /* wait for flush to complete */
47
48/*
49 * Flush/Invalidate options for vop_toss/flush/flushinval_pages. 44 * Flush/Invalidate options for vop_toss/flush/flushinval_pages.
50 */ 45 */
51#define FI_NONE 0 /* none */ 46#define FI_NONE 0 /* none */
@@ -55,33 +50,6 @@ struct attrlist_cursor_kern;
55 the operation completes. */ 50 the operation completes. */
56 51
57/* 52/*
58 * Dealing with bad inodes
59 */
60static inline int VN_BAD(struct inode *vp)
61{
62 return is_bad_inode(vp);
63}
64
65/*
66 * Extracting atime values in various formats
67 */
68static inline void vn_atime_to_bstime(struct inode *vp, xfs_bstime_t *bs_atime)
69{
70 bs_atime->tv_sec = vp->i_atime.tv_sec;
71 bs_atime->tv_nsec = vp->i_atime.tv_nsec;
72}
73
74static inline void vn_atime_to_timespec(struct inode *vp, struct timespec *ts)
75{
76 *ts = vp->i_atime;
77}
78
79static inline void vn_atime_to_time_t(struct inode *vp, time_t *tt)
80{
81 *tt = vp->i_atime.tv_sec;
82}
83
84/*
85 * Some useful predicates. 53 * Some useful predicates.
86 */ 54 */
87#define VN_MAPPED(vp) mapping_mapped(vp->i_mapping) 55#define VN_MAPPED(vp) mapping_mapped(vp->i_mapping)
diff --git a/fs/xfs/quota/xfs_dquot.c b/fs/xfs/quota/xfs_dquot.c
index 6543c0b29753..e4babcc63423 100644
--- a/fs/xfs/quota/xfs_dquot.c
+++ b/fs/xfs/quota/xfs_dquot.c
@@ -804,7 +804,7 @@ xfs_qm_dqlookup(
804 uint flist_locked; 804 uint flist_locked;
805 xfs_dquot_t *d; 805 xfs_dquot_t *d;
806 806
807 ASSERT(XFS_DQ_IS_HASH_LOCKED(qh)); 807 ASSERT(mutex_is_locked(&qh->qh_lock));
808 808
809 flist_locked = B_FALSE; 809 flist_locked = B_FALSE;
810 810
@@ -877,7 +877,7 @@ xfs_qm_dqlookup(
877 /* 877 /*
878 * move the dquot to the front of the hashchain 878 * move the dquot to the front of the hashchain
879 */ 879 */
880 ASSERT(XFS_DQ_IS_HASH_LOCKED(qh)); 880 ASSERT(mutex_is_locked(&qh->qh_lock));
881 if (dqp->HL_PREVP != &qh->qh_next) { 881 if (dqp->HL_PREVP != &qh->qh_next) {
882 xfs_dqtrace_entry(dqp, 882 xfs_dqtrace_entry(dqp,
883 "DQLOOKUP: HASH MOVETOFRONT"); 883 "DQLOOKUP: HASH MOVETOFRONT");
@@ -892,13 +892,13 @@ xfs_qm_dqlookup(
892 } 892 }
893 xfs_dqtrace_entry(dqp, "LOOKUP END"); 893 xfs_dqtrace_entry(dqp, "LOOKUP END");
894 *O_dqpp = dqp; 894 *O_dqpp = dqp;
895 ASSERT(XFS_DQ_IS_HASH_LOCKED(qh)); 895 ASSERT(mutex_is_locked(&qh->qh_lock));
896 return (0); 896 return (0);
897 } 897 }
898 } 898 }
899 899
900 *O_dqpp = NULL; 900 *O_dqpp = NULL;
901 ASSERT(XFS_DQ_IS_HASH_LOCKED(qh)); 901 ASSERT(mutex_is_locked(&qh->qh_lock));
902 return (1); 902 return (1);
903} 903}
904 904
@@ -956,7 +956,7 @@ xfs_qm_dqget(
956 ASSERT(ip->i_gdquot == NULL); 956 ASSERT(ip->i_gdquot == NULL);
957 } 957 }
958#endif 958#endif
959 XFS_DQ_HASH_LOCK(h); 959 mutex_lock(&h->qh_lock);
960 960
961 /* 961 /*
962 * Look in the cache (hashtable). 962 * Look in the cache (hashtable).
@@ -971,7 +971,7 @@ xfs_qm_dqget(
971 */ 971 */
972 ASSERT(*O_dqpp); 972 ASSERT(*O_dqpp);
973 ASSERT(XFS_DQ_IS_LOCKED(*O_dqpp)); 973 ASSERT(XFS_DQ_IS_LOCKED(*O_dqpp));
974 XFS_DQ_HASH_UNLOCK(h); 974 mutex_unlock(&h->qh_lock);
975 xfs_dqtrace_entry(*O_dqpp, "DQGET DONE (FROM CACHE)"); 975 xfs_dqtrace_entry(*O_dqpp, "DQGET DONE (FROM CACHE)");
976 return (0); /* success */ 976 return (0); /* success */
977 } 977 }
@@ -991,7 +991,7 @@ xfs_qm_dqget(
991 * we don't keep the lock across a disk read 991 * we don't keep the lock across a disk read
992 */ 992 */
993 version = h->qh_version; 993 version = h->qh_version;
994 XFS_DQ_HASH_UNLOCK(h); 994 mutex_unlock(&h->qh_lock);
995 995
996 /* 996 /*
997 * Allocate the dquot on the kernel heap, and read the ondisk 997 * Allocate the dquot on the kernel heap, and read the ondisk
@@ -1056,7 +1056,7 @@ xfs_qm_dqget(
1056 /* 1056 /*
1057 * Hashlock comes after ilock in lock order 1057 * Hashlock comes after ilock in lock order
1058 */ 1058 */
1059 XFS_DQ_HASH_LOCK(h); 1059 mutex_lock(&h->qh_lock);
1060 if (version != h->qh_version) { 1060 if (version != h->qh_version) {
1061 xfs_dquot_t *tmpdqp; 1061 xfs_dquot_t *tmpdqp;
1062 /* 1062 /*
@@ -1072,7 +1072,7 @@ xfs_qm_dqget(
1072 * and start over. 1072 * and start over.
1073 */ 1073 */
1074 xfs_qm_dqput(tmpdqp); 1074 xfs_qm_dqput(tmpdqp);
1075 XFS_DQ_HASH_UNLOCK(h); 1075 mutex_unlock(&h->qh_lock);
1076 xfs_qm_dqdestroy(dqp); 1076 xfs_qm_dqdestroy(dqp);
1077 XQM_STATS_INC(xqmstats.xs_qm_dquot_dups); 1077 XQM_STATS_INC(xqmstats.xs_qm_dquot_dups);
1078 goto again; 1078 goto again;
@@ -1083,7 +1083,7 @@ xfs_qm_dqget(
1083 * Put the dquot at the beginning of the hash-chain and mp's list 1083 * Put the dquot at the beginning of the hash-chain and mp's list
1084 * LOCK ORDER: hashlock, freelistlock, mplistlock, udqlock, gdqlock .. 1084 * LOCK ORDER: hashlock, freelistlock, mplistlock, udqlock, gdqlock ..
1085 */ 1085 */
1086 ASSERT(XFS_DQ_IS_HASH_LOCKED(h)); 1086 ASSERT(mutex_is_locked(&h->qh_lock));
1087 dqp->q_hash = h; 1087 dqp->q_hash = h;
1088 XQM_HASHLIST_INSERT(h, dqp); 1088 XQM_HASHLIST_INSERT(h, dqp);
1089 1089
@@ -1102,7 +1102,7 @@ xfs_qm_dqget(
1102 XQM_MPLIST_INSERT(&(XFS_QI_MPL_LIST(mp)), dqp); 1102 XQM_MPLIST_INSERT(&(XFS_QI_MPL_LIST(mp)), dqp);
1103 1103
1104 xfs_qm_mplist_unlock(mp); 1104 xfs_qm_mplist_unlock(mp);
1105 XFS_DQ_HASH_UNLOCK(h); 1105 mutex_unlock(&h->qh_lock);
1106 dqret: 1106 dqret:
1107 ASSERT((ip == NULL) || xfs_isilocked(ip, XFS_ILOCK_EXCL)); 1107 ASSERT((ip == NULL) || xfs_isilocked(ip, XFS_ILOCK_EXCL));
1108 xfs_dqtrace_entry(dqp, "DQGET DONE"); 1108 xfs_dqtrace_entry(dqp, "DQGET DONE");
@@ -1440,7 +1440,7 @@ xfs_qm_dqpurge(
1440 xfs_mount_t *mp = dqp->q_mount; 1440 xfs_mount_t *mp = dqp->q_mount;
1441 1441
1442 ASSERT(XFS_QM_IS_MPLIST_LOCKED(mp)); 1442 ASSERT(XFS_QM_IS_MPLIST_LOCKED(mp));
1443 ASSERT(XFS_DQ_IS_HASH_LOCKED(dqp->q_hash)); 1443 ASSERT(mutex_is_locked(&dqp->q_hash->qh_lock));
1444 1444
1445 xfs_dqlock(dqp); 1445 xfs_dqlock(dqp);
1446 /* 1446 /*
@@ -1453,7 +1453,7 @@ xfs_qm_dqpurge(
1453 */ 1453 */
1454 if (dqp->q_nrefs != 0) { 1454 if (dqp->q_nrefs != 0) {
1455 xfs_dqunlock(dqp); 1455 xfs_dqunlock(dqp);
1456 XFS_DQ_HASH_UNLOCK(dqp->q_hash); 1456 mutex_unlock(&dqp->q_hash->qh_lock);
1457 return (1); 1457 return (1);
1458 } 1458 }
1459 1459
@@ -1517,7 +1517,7 @@ xfs_qm_dqpurge(
1517 memset(&dqp->q_core, 0, sizeof(dqp->q_core)); 1517 memset(&dqp->q_core, 0, sizeof(dqp->q_core));
1518 xfs_dqfunlock(dqp); 1518 xfs_dqfunlock(dqp);
1519 xfs_dqunlock(dqp); 1519 xfs_dqunlock(dqp);
1520 XFS_DQ_HASH_UNLOCK(thishash); 1520 mutex_unlock(&thishash->qh_lock);
1521 return (0); 1521 return (0);
1522} 1522}
1523 1523
diff --git a/fs/xfs/quota/xfs_dquot.h b/fs/xfs/quota/xfs_dquot.h
index d443e93b4331..de0f402ddb4c 100644
--- a/fs/xfs/quota/xfs_dquot.h
+++ b/fs/xfs/quota/xfs_dquot.h
@@ -34,7 +34,7 @@
34 */ 34 */
35typedef struct xfs_dqhash { 35typedef struct xfs_dqhash {
36 struct xfs_dquot *qh_next; 36 struct xfs_dquot *qh_next;
37 mutex_t qh_lock; 37 struct mutex qh_lock;
38 uint qh_version; /* ever increasing version */ 38 uint qh_version; /* ever increasing version */
39 uint qh_nelems; /* number of dquots on the list */ 39 uint qh_nelems; /* number of dquots on the list */
40} xfs_dqhash_t; 40} xfs_dqhash_t;
@@ -81,7 +81,7 @@ typedef struct xfs_dquot {
81 xfs_qcnt_t q_res_bcount; /* total regular nblks used+reserved */ 81 xfs_qcnt_t q_res_bcount; /* total regular nblks used+reserved */
82 xfs_qcnt_t q_res_icount; /* total inos allocd+reserved */ 82 xfs_qcnt_t q_res_icount; /* total inos allocd+reserved */
83 xfs_qcnt_t q_res_rtbcount;/* total realtime blks used+reserved */ 83 xfs_qcnt_t q_res_rtbcount;/* total realtime blks used+reserved */
84 mutex_t q_qlock; /* quota lock */ 84 struct mutex q_qlock; /* quota lock */
85 struct completion q_flush; /* flush completion queue */ 85 struct completion q_flush; /* flush completion queue */
86 atomic_t q_pincount; /* dquot pin count */ 86 atomic_t q_pincount; /* dquot pin count */
87 wait_queue_head_t q_pinwait; /* dquot pinning wait queue */ 87 wait_queue_head_t q_pinwait; /* dquot pinning wait queue */
@@ -109,19 +109,6 @@ enum {
109 109
110#define XFS_DQHOLD(dqp) ((dqp)->q_nrefs++) 110#define XFS_DQHOLD(dqp) ((dqp)->q_nrefs++)
111 111
112#ifdef DEBUG
113static inline int
114XFS_DQ_IS_LOCKED(xfs_dquot_t *dqp)
115{
116 if (mutex_trylock(&dqp->q_qlock)) {
117 mutex_unlock(&dqp->q_qlock);
118 return 0;
119 }
120 return 1;
121}
122#endif
123
124
125/* 112/*
126 * Manage the q_flush completion queue embedded in the dquot. This completion 113 * Manage the q_flush completion queue embedded in the dquot. This completion
127 * queue synchronizes processes attempting to flush the in-core dquot back to 114 * queue synchronizes processes attempting to flush the in-core dquot back to
@@ -142,6 +129,7 @@ static inline void xfs_dqfunlock(xfs_dquot_t *dqp)
142 complete(&dqp->q_flush); 129 complete(&dqp->q_flush);
143} 130}
144 131
132#define XFS_DQ_IS_LOCKED(dqp) (mutex_is_locked(&((dqp)->q_qlock)))
145#define XFS_DQ_IS_ON_FREELIST(dqp) ((dqp)->dq_flnext != (dqp)) 133#define XFS_DQ_IS_ON_FREELIST(dqp) ((dqp)->dq_flnext != (dqp))
146#define XFS_DQ_IS_DIRTY(dqp) ((dqp)->dq_flags & XFS_DQ_DIRTY) 134#define XFS_DQ_IS_DIRTY(dqp) ((dqp)->dq_flags & XFS_DQ_DIRTY)
147#define XFS_QM_ISUDQ(dqp) ((dqp)->dq_flags & XFS_DQ_USER) 135#define XFS_QM_ISUDQ(dqp) ((dqp)->dq_flags & XFS_DQ_USER)
diff --git a/fs/xfs/quota/xfs_qm.c b/fs/xfs/quota/xfs_qm.c
index 7a2beb64314f..5b6695049e00 100644
--- a/fs/xfs/quota/xfs_qm.c
+++ b/fs/xfs/quota/xfs_qm.c
@@ -55,7 +55,7 @@
55 * quota functionality, including maintaining the freelist and hash 55 * quota functionality, including maintaining the freelist and hash
56 * tables of dquots. 56 * tables of dquots.
57 */ 57 */
58mutex_t xfs_Gqm_lock; 58struct mutex xfs_Gqm_lock;
59struct xfs_qm *xfs_Gqm; 59struct xfs_qm *xfs_Gqm;
60uint ndquot; 60uint ndquot;
61 61
@@ -69,8 +69,6 @@ STATIC void xfs_qm_list_destroy(xfs_dqlist_t *);
69 69
70STATIC void xfs_qm_freelist_init(xfs_frlist_t *); 70STATIC void xfs_qm_freelist_init(xfs_frlist_t *);
71STATIC void xfs_qm_freelist_destroy(xfs_frlist_t *); 71STATIC void xfs_qm_freelist_destroy(xfs_frlist_t *);
72STATIC int xfs_qm_mplist_nowait(xfs_mount_t *);
73STATIC int xfs_qm_dqhashlock_nowait(xfs_dquot_t *);
74 72
75STATIC int xfs_qm_init_quotainos(xfs_mount_t *); 73STATIC int xfs_qm_init_quotainos(xfs_mount_t *);
76STATIC int xfs_qm_init_quotainfo(xfs_mount_t *); 74STATIC int xfs_qm_init_quotainfo(xfs_mount_t *);
@@ -82,7 +80,7 @@ static struct shrinker xfs_qm_shaker = {
82}; 80};
83 81
84#ifdef DEBUG 82#ifdef DEBUG
85extern mutex_t qcheck_lock; 83extern struct mutex qcheck_lock;
86#endif 84#endif
87 85
88#ifdef QUOTADEBUG 86#ifdef QUOTADEBUG
@@ -219,7 +217,7 @@ xfs_qm_hold_quotafs_ref(
219 * the structure could disappear between the entry to this routine and 217 * the structure could disappear between the entry to this routine and
220 * a HOLD operation if not locked. 218 * a HOLD operation if not locked.
221 */ 219 */
222 XFS_QM_LOCK(xfs_Gqm); 220 mutex_lock(&xfs_Gqm_lock);
223 221
224 if (xfs_Gqm == NULL) 222 if (xfs_Gqm == NULL)
225 xfs_Gqm = xfs_Gqm_init(); 223 xfs_Gqm = xfs_Gqm_init();
@@ -228,8 +226,8 @@ xfs_qm_hold_quotafs_ref(
228 * debugging and statistical purposes, but ... 226 * debugging and statistical purposes, but ...
229 * Just take a reference and get out. 227 * Just take a reference and get out.
230 */ 228 */
231 XFS_QM_HOLD(xfs_Gqm); 229 xfs_Gqm->qm_nrefs++;
232 XFS_QM_UNLOCK(xfs_Gqm); 230 mutex_unlock(&xfs_Gqm_lock);
233 231
234 return 0; 232 return 0;
235} 233}
@@ -277,13 +275,12 @@ xfs_qm_rele_quotafs_ref(
277 * Destroy the entire XQM. If somebody mounts with quotaon, this'll 275 * Destroy the entire XQM. If somebody mounts with quotaon, this'll
278 * be restarted. 276 * be restarted.
279 */ 277 */
280 XFS_QM_LOCK(xfs_Gqm); 278 mutex_lock(&xfs_Gqm_lock);
281 XFS_QM_RELE(xfs_Gqm); 279 if (--xfs_Gqm->qm_nrefs == 0) {
282 if (xfs_Gqm->qm_nrefs == 0) {
283 xfs_qm_destroy(xfs_Gqm); 280 xfs_qm_destroy(xfs_Gqm);
284 xfs_Gqm = NULL; 281 xfs_Gqm = NULL;
285 } 282 }
286 XFS_QM_UNLOCK(xfs_Gqm); 283 mutex_unlock(&xfs_Gqm_lock);
287} 284}
288 285
289/* 286/*
@@ -577,10 +574,10 @@ xfs_qm_dqpurge_int(
577 continue; 574 continue;
578 } 575 }
579 576
580 if (! xfs_qm_dqhashlock_nowait(dqp)) { 577 if (!mutex_trylock(&dqp->q_hash->qh_lock)) {
581 nrecl = XFS_QI_MPLRECLAIMS(mp); 578 nrecl = XFS_QI_MPLRECLAIMS(mp);
582 xfs_qm_mplist_unlock(mp); 579 xfs_qm_mplist_unlock(mp);
583 XFS_DQ_HASH_LOCK(dqp->q_hash); 580 mutex_lock(&dqp->q_hash->qh_lock);
584 xfs_qm_mplist_lock(mp); 581 xfs_qm_mplist_lock(mp);
585 582
586 /* 583 /*
@@ -590,7 +587,7 @@ xfs_qm_dqpurge_int(
590 * this point, but somebody might be taking things off. 587 * this point, but somebody might be taking things off.
591 */ 588 */
592 if (nrecl != XFS_QI_MPLRECLAIMS(mp)) { 589 if (nrecl != XFS_QI_MPLRECLAIMS(mp)) {
593 XFS_DQ_HASH_UNLOCK(dqp->q_hash); 590 mutex_unlock(&dqp->q_hash->qh_lock);
594 goto again; 591 goto again;
595 } 592 }
596 } 593 }
@@ -632,7 +629,6 @@ xfs_qm_dqattach_one(
632 xfs_dqid_t id, 629 xfs_dqid_t id,
633 uint type, 630 uint type,
634 uint doalloc, 631 uint doalloc,
635 uint dolock,
636 xfs_dquot_t *udqhint, /* hint */ 632 xfs_dquot_t *udqhint, /* hint */
637 xfs_dquot_t **IO_idqpp) 633 xfs_dquot_t **IO_idqpp)
638{ 634{
@@ -641,16 +637,16 @@ xfs_qm_dqattach_one(
641 637
642 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL)); 638 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
643 error = 0; 639 error = 0;
640
644 /* 641 /*
645 * See if we already have it in the inode itself. IO_idqpp is 642 * See if we already have it in the inode itself. IO_idqpp is
646 * &i_udquot or &i_gdquot. This made the code look weird, but 643 * &i_udquot or &i_gdquot. This made the code look weird, but
647 * made the logic a lot simpler. 644 * made the logic a lot simpler.
648 */ 645 */
649 if ((dqp = *IO_idqpp)) { 646 dqp = *IO_idqpp;
650 if (dolock) 647 if (dqp) {
651 xfs_dqlock(dqp);
652 xfs_dqtrace_entry(dqp, "DQATTACH: found in ip"); 648 xfs_dqtrace_entry(dqp, "DQATTACH: found in ip");
653 goto done; 649 return 0;
654 } 650 }
655 651
656 /* 652 /*
@@ -659,38 +655,38 @@ xfs_qm_dqattach_one(
659 * lookup by dqid (xfs_qm_dqget) by caching a group dquot inside 655 * lookup by dqid (xfs_qm_dqget) by caching a group dquot inside
660 * the user dquot. 656 * the user dquot.
661 */ 657 */
662 ASSERT(!udqhint || type == XFS_DQ_GROUP || type == XFS_DQ_PROJ); 658 if (udqhint) {
663 if (udqhint && !dolock) 659 ASSERT(type == XFS_DQ_GROUP || type == XFS_DQ_PROJ);
664 xfs_dqlock(udqhint); 660 xfs_dqlock(udqhint);
665 661
666 /* 662 /*
667 * No need to take dqlock to look at the id. 663 * No need to take dqlock to look at the id.
668 * The ID can't change until it gets reclaimed, and it won't 664 *
669 * be reclaimed as long as we have a ref from inode and we hold 665 * The ID can't change until it gets reclaimed, and it won't
670 * the ilock. 666 * be reclaimed as long as we have a ref from inode and we
671 */ 667 * hold the ilock.
672 if (udqhint && 668 */
673 (dqp = udqhint->q_gdquot) && 669 dqp = udqhint->q_gdquot;
674 (be32_to_cpu(dqp->q_core.d_id) == id)) { 670 if (dqp && be32_to_cpu(dqp->q_core.d_id) == id) {
675 ASSERT(XFS_DQ_IS_LOCKED(udqhint)); 671 xfs_dqlock(dqp);
676 xfs_dqlock(dqp); 672 XFS_DQHOLD(dqp);
677 XFS_DQHOLD(dqp); 673 ASSERT(*IO_idqpp == NULL);
678 ASSERT(*IO_idqpp == NULL); 674 *IO_idqpp = dqp;
679 *IO_idqpp = dqp; 675
680 if (!dolock) {
681 xfs_dqunlock(dqp); 676 xfs_dqunlock(dqp);
682 xfs_dqunlock(udqhint); 677 xfs_dqunlock(udqhint);
678 return 0;
683 } 679 }
684 goto done; 680
685 } 681 /*
686 /* 682 * We can't hold a dquot lock when we call the dqget code.
687 * We can't hold a dquot lock when we call the dqget code. 683 * We'll deadlock in no time, because of (not conforming to)
688 * We'll deadlock in no time, because of (not conforming to) 684 * lock ordering - the inodelock comes before any dquot lock,
689 * lock ordering - the inodelock comes before any dquot lock, 685 * and we may drop and reacquire the ilock in xfs_qm_dqget().
690 * and we may drop and reacquire the ilock in xfs_qm_dqget(). 686 */
691 */
692 if (udqhint)
693 xfs_dqunlock(udqhint); 687 xfs_dqunlock(udqhint);
688 }
689
694 /* 690 /*
695 * Find the dquot from somewhere. This bumps the 691 * Find the dquot from somewhere. This bumps the
696 * reference count of dquot and returns it locked. 692 * reference count of dquot and returns it locked.
@@ -698,48 +694,19 @@ xfs_qm_dqattach_one(
698 * disk and we didn't ask it to allocate; 694 * disk and we didn't ask it to allocate;
699 * ESRCH if quotas got turned off suddenly. 695 * ESRCH if quotas got turned off suddenly.
700 */ 696 */
701 if ((error = xfs_qm_dqget(ip->i_mount, ip, id, type, 697 error = xfs_qm_dqget(ip->i_mount, ip, id, type, XFS_QMOPT_DOWARN, &dqp);
702 doalloc|XFS_QMOPT_DOWARN, &dqp))) { 698 if (error)
703 if (udqhint && dolock) 699 return error;
704 xfs_dqlock(udqhint);
705 goto done;
706 }
707 700
708 xfs_dqtrace_entry(dqp, "DQATTACH: found by dqget"); 701 xfs_dqtrace_entry(dqp, "DQATTACH: found by dqget");
702
709 /* 703 /*
710 * dqget may have dropped and re-acquired the ilock, but it guarantees 704 * dqget may have dropped and re-acquired the ilock, but it guarantees
711 * that the dquot returned is the one that should go in the inode. 705 * that the dquot returned is the one that should go in the inode.
712 */ 706 */
713 *IO_idqpp = dqp; 707 *IO_idqpp = dqp;
714 ASSERT(dqp); 708 xfs_dqunlock(dqp);
715 ASSERT(XFS_DQ_IS_LOCKED(dqp)); 709 return 0;
716 if (! dolock) {
717 xfs_dqunlock(dqp);
718 goto done;
719 }
720 if (! udqhint)
721 goto done;
722
723 ASSERT(udqhint);
724 ASSERT(dolock);
725 ASSERT(XFS_DQ_IS_LOCKED(dqp));
726 if (! xfs_qm_dqlock_nowait(udqhint)) {
727 xfs_dqunlock(dqp);
728 xfs_dqlock(udqhint);
729 xfs_dqlock(dqp);
730 }
731 done:
732#ifdef QUOTADEBUG
733 if (udqhint) {
734 if (dolock)
735 ASSERT(XFS_DQ_IS_LOCKED(udqhint));
736 }
737 if (! error) {
738 if (dolock)
739 ASSERT(XFS_DQ_IS_LOCKED(dqp));
740 }
741#endif
742 return error;
743} 710}
744 711
745 712
@@ -754,24 +721,15 @@ xfs_qm_dqattach_one(
754STATIC void 721STATIC void
755xfs_qm_dqattach_grouphint( 722xfs_qm_dqattach_grouphint(
756 xfs_dquot_t *udq, 723 xfs_dquot_t *udq,
757 xfs_dquot_t *gdq, 724 xfs_dquot_t *gdq)
758 uint locked)
759{ 725{
760 xfs_dquot_t *tmp; 726 xfs_dquot_t *tmp;
761 727
762#ifdef QUOTADEBUG 728 xfs_dqlock(udq);
763 if (locked) {
764 ASSERT(XFS_DQ_IS_LOCKED(udq));
765 ASSERT(XFS_DQ_IS_LOCKED(gdq));
766 }
767#endif
768 if (! locked)
769 xfs_dqlock(udq);
770 729
771 if ((tmp = udq->q_gdquot)) { 730 if ((tmp = udq->q_gdquot)) {
772 if (tmp == gdq) { 731 if (tmp == gdq) {
773 if (! locked) 732 xfs_dqunlock(udq);
774 xfs_dqunlock(udq);
775 return; 733 return;
776 } 734 }
777 735
@@ -781,8 +739,6 @@ xfs_qm_dqattach_grouphint(
781 * because the freelist lock comes before dqlocks. 739 * because the freelist lock comes before dqlocks.
782 */ 740 */
783 xfs_dqunlock(udq); 741 xfs_dqunlock(udq);
784 if (locked)
785 xfs_dqunlock(gdq);
786 /* 742 /*
787 * we took a hard reference once upon a time in dqget, 743 * we took a hard reference once upon a time in dqget,
788 * so give it back when the udquot no longer points at it 744 * so give it back when the udquot no longer points at it
@@ -795,9 +751,7 @@ xfs_qm_dqattach_grouphint(
795 751
796 } else { 752 } else {
797 ASSERT(XFS_DQ_IS_LOCKED(udq)); 753 ASSERT(XFS_DQ_IS_LOCKED(udq));
798 if (! locked) { 754 xfs_dqlock(gdq);
799 xfs_dqlock(gdq);
800 }
801 } 755 }
802 756
803 ASSERT(XFS_DQ_IS_LOCKED(udq)); 757 ASSERT(XFS_DQ_IS_LOCKED(udq));
@@ -810,10 +764,9 @@ xfs_qm_dqattach_grouphint(
810 XFS_DQHOLD(gdq); 764 XFS_DQHOLD(gdq);
811 udq->q_gdquot = gdq; 765 udq->q_gdquot = gdq;
812 } 766 }
813 if (! locked) { 767
814 xfs_dqunlock(gdq); 768 xfs_dqunlock(gdq);
815 xfs_dqunlock(udq); 769 xfs_dqunlock(udq);
816 }
817} 770}
818 771
819 772
@@ -821,8 +774,6 @@ xfs_qm_dqattach_grouphint(
821 * Given a locked inode, attach dquot(s) to it, taking U/G/P-QUOTAON 774 * Given a locked inode, attach dquot(s) to it, taking U/G/P-QUOTAON
822 * into account. 775 * into account.
823 * If XFS_QMOPT_DQALLOC, the dquot(s) will be allocated if needed. 776 * If XFS_QMOPT_DQALLOC, the dquot(s) will be allocated if needed.
824 * If XFS_QMOPT_DQLOCK, the dquot(s) will be returned locked. This option pretty
825 * much made this code a complete mess, but it has been pretty useful.
826 * If XFS_QMOPT_ILOCKED, then inode sent is already locked EXCL. 777 * If XFS_QMOPT_ILOCKED, then inode sent is already locked EXCL.
827 * Inode may get unlocked and relocked in here, and the caller must deal with 778 * Inode may get unlocked and relocked in here, and the caller must deal with
828 * the consequences. 779 * the consequences.
@@ -851,7 +802,6 @@ xfs_qm_dqattach(
851 if (XFS_IS_UQUOTA_ON(mp)) { 802 if (XFS_IS_UQUOTA_ON(mp)) {
852 error = xfs_qm_dqattach_one(ip, ip->i_d.di_uid, XFS_DQ_USER, 803 error = xfs_qm_dqattach_one(ip, ip->i_d.di_uid, XFS_DQ_USER,
853 flags & XFS_QMOPT_DQALLOC, 804 flags & XFS_QMOPT_DQALLOC,
854 flags & XFS_QMOPT_DQLOCK,
855 NULL, &ip->i_udquot); 805 NULL, &ip->i_udquot);
856 if (error) 806 if (error)
857 goto done; 807 goto done;
@@ -863,11 +813,9 @@ xfs_qm_dqattach(
863 error = XFS_IS_GQUOTA_ON(mp) ? 813 error = XFS_IS_GQUOTA_ON(mp) ?
864 xfs_qm_dqattach_one(ip, ip->i_d.di_gid, XFS_DQ_GROUP, 814 xfs_qm_dqattach_one(ip, ip->i_d.di_gid, XFS_DQ_GROUP,
865 flags & XFS_QMOPT_DQALLOC, 815 flags & XFS_QMOPT_DQALLOC,
866 flags & XFS_QMOPT_DQLOCK,
867 ip->i_udquot, &ip->i_gdquot) : 816 ip->i_udquot, &ip->i_gdquot) :
868 xfs_qm_dqattach_one(ip, ip->i_d.di_projid, XFS_DQ_PROJ, 817 xfs_qm_dqattach_one(ip, ip->i_d.di_projid, XFS_DQ_PROJ,
869 flags & XFS_QMOPT_DQALLOC, 818 flags & XFS_QMOPT_DQALLOC,
870 flags & XFS_QMOPT_DQLOCK,
871 ip->i_udquot, &ip->i_gdquot); 819 ip->i_udquot, &ip->i_gdquot);
872 /* 820 /*
873 * Don't worry about the udquot that we may have 821 * Don't worry about the udquot that we may have
@@ -898,22 +846,13 @@ xfs_qm_dqattach(
898 /* 846 /*
899 * Attach i_gdquot to the gdquot hint inside the i_udquot. 847 * Attach i_gdquot to the gdquot hint inside the i_udquot.
900 */ 848 */
901 xfs_qm_dqattach_grouphint(ip->i_udquot, ip->i_gdquot, 849 xfs_qm_dqattach_grouphint(ip->i_udquot, ip->i_gdquot);
902 flags & XFS_QMOPT_DQLOCK);
903 } 850 }
904 851
905 done: 852 done:
906 853
907#ifdef QUOTADEBUG 854#ifdef QUOTADEBUG
908 if (! error) { 855 if (! error) {
909 if (ip->i_udquot) {
910 if (flags & XFS_QMOPT_DQLOCK)
911 ASSERT(XFS_DQ_IS_LOCKED(ip->i_udquot));
912 }
913 if (ip->i_gdquot) {
914 if (flags & XFS_QMOPT_DQLOCK)
915 ASSERT(XFS_DQ_IS_LOCKED(ip->i_gdquot));
916 }
917 if (XFS_IS_UQUOTA_ON(mp)) 856 if (XFS_IS_UQUOTA_ON(mp))
918 ASSERT(ip->i_udquot); 857 ASSERT(ip->i_udquot);
919 if (XFS_IS_OQUOTA_ON(mp)) 858 if (XFS_IS_OQUOTA_ON(mp))
@@ -2086,7 +2025,7 @@ xfs_qm_shake_freelist(
2086 * a dqlookup process that holds the hashlock that is 2025 * a dqlookup process that holds the hashlock that is
2087 * waiting for the freelist lock. 2026 * waiting for the freelist lock.
2088 */ 2027 */
2089 if (! xfs_qm_dqhashlock_nowait(dqp)) { 2028 if (!mutex_trylock(&dqp->q_hash->qh_lock)) {
2090 xfs_dqfunlock(dqp); 2029 xfs_dqfunlock(dqp);
2091 xfs_dqunlock(dqp); 2030 xfs_dqunlock(dqp);
2092 dqp = dqp->dq_flnext; 2031 dqp = dqp->dq_flnext;
@@ -2103,7 +2042,7 @@ xfs_qm_shake_freelist(
2103 /* XXX put a sentinel so that we can come back here */ 2042 /* XXX put a sentinel so that we can come back here */
2104 xfs_dqfunlock(dqp); 2043 xfs_dqfunlock(dqp);
2105 xfs_dqunlock(dqp); 2044 xfs_dqunlock(dqp);
2106 XFS_DQ_HASH_UNLOCK(hash); 2045 mutex_unlock(&hash->qh_lock);
2107 xfs_qm_freelist_unlock(xfs_Gqm); 2046 xfs_qm_freelist_unlock(xfs_Gqm);
2108 if (++restarts >= XFS_QM_RECLAIM_MAX_RESTARTS) 2047 if (++restarts >= XFS_QM_RECLAIM_MAX_RESTARTS)
2109 return nreclaimed; 2048 return nreclaimed;
@@ -2120,7 +2059,7 @@ xfs_qm_shake_freelist(
2120 XQM_HASHLIST_REMOVE(hash, dqp); 2059 XQM_HASHLIST_REMOVE(hash, dqp);
2121 xfs_dqfunlock(dqp); 2060 xfs_dqfunlock(dqp);
2122 xfs_qm_mplist_unlock(dqp->q_mount); 2061 xfs_qm_mplist_unlock(dqp->q_mount);
2123 XFS_DQ_HASH_UNLOCK(hash); 2062 mutex_unlock(&hash->qh_lock);
2124 2063
2125 off_freelist: 2064 off_freelist:
2126 XQM_FREELIST_REMOVE(dqp); 2065 XQM_FREELIST_REMOVE(dqp);
@@ -2262,7 +2201,7 @@ xfs_qm_dqreclaim_one(void)
2262 continue; 2201 continue;
2263 } 2202 }
2264 2203
2265 if (! xfs_qm_dqhashlock_nowait(dqp)) 2204 if (!mutex_trylock(&dqp->q_hash->qh_lock))
2266 goto mplistunlock; 2205 goto mplistunlock;
2267 2206
2268 ASSERT(dqp->q_nrefs == 0); 2207 ASSERT(dqp->q_nrefs == 0);
@@ -2271,7 +2210,7 @@ xfs_qm_dqreclaim_one(void)
2271 XQM_HASHLIST_REMOVE(dqp->q_hash, dqp); 2210 XQM_HASHLIST_REMOVE(dqp->q_hash, dqp);
2272 XQM_FREELIST_REMOVE(dqp); 2211 XQM_FREELIST_REMOVE(dqp);
2273 dqpout = dqp; 2212 dqpout = dqp;
2274 XFS_DQ_HASH_UNLOCK(dqp->q_hash); 2213 mutex_unlock(&dqp->q_hash->qh_lock);
2275 mplistunlock: 2214 mplistunlock:
2276 xfs_qm_mplist_unlock(dqp->q_mount); 2215 xfs_qm_mplist_unlock(dqp->q_mount);
2277 xfs_dqfunlock(dqp); 2216 xfs_dqfunlock(dqp);
@@ -2774,34 +2713,3 @@ xfs_qm_freelist_append(xfs_frlist_t *ql, xfs_dquot_t *dq)
2774{ 2713{
2775 xfs_qm_freelist_insert((xfs_frlist_t *)ql->qh_prev, dq); 2714 xfs_qm_freelist_insert((xfs_frlist_t *)ql->qh_prev, dq);
2776} 2715}
2777
2778STATIC int
2779xfs_qm_dqhashlock_nowait(
2780 xfs_dquot_t *dqp)
2781{
2782 int locked;
2783
2784 locked = mutex_trylock(&((dqp)->q_hash->qh_lock));
2785 return locked;
2786}
2787
2788int
2789xfs_qm_freelist_lock_nowait(
2790 xfs_qm_t *xqm)
2791{
2792 int locked;
2793
2794 locked = mutex_trylock(&(xqm->qm_dqfreelist.qh_lock));
2795 return locked;
2796}
2797
2798STATIC int
2799xfs_qm_mplist_nowait(
2800 xfs_mount_t *mp)
2801{
2802 int locked;
2803
2804 ASSERT(mp->m_quotainfo);
2805 locked = mutex_trylock(&(XFS_QI_MPLLOCK(mp)));
2806 return locked;
2807}
diff --git a/fs/xfs/quota/xfs_qm.h b/fs/xfs/quota/xfs_qm.h
index ddf09166387c..a371954cae1b 100644
--- a/fs/xfs/quota/xfs_qm.h
+++ b/fs/xfs/quota/xfs_qm.h
@@ -27,7 +27,7 @@ struct xfs_qm;
27struct xfs_inode; 27struct xfs_inode;
28 28
29extern uint ndquot; 29extern uint ndquot;
30extern mutex_t xfs_Gqm_lock; 30extern struct mutex xfs_Gqm_lock;
31extern struct xfs_qm *xfs_Gqm; 31extern struct xfs_qm *xfs_Gqm;
32extern kmem_zone_t *qm_dqzone; 32extern kmem_zone_t *qm_dqzone;
33extern kmem_zone_t *qm_dqtrxzone; 33extern kmem_zone_t *qm_dqtrxzone;
@@ -79,7 +79,7 @@ typedef xfs_dqhash_t xfs_dqlist_t;
79typedef struct xfs_frlist { 79typedef struct xfs_frlist {
80 struct xfs_dquot *qh_next; 80 struct xfs_dquot *qh_next;
81 struct xfs_dquot *qh_prev; 81 struct xfs_dquot *qh_prev;
82 mutex_t qh_lock; 82 struct mutex qh_lock;
83 uint qh_version; 83 uint qh_version;
84 uint qh_nelems; 84 uint qh_nelems;
85} xfs_frlist_t; 85} xfs_frlist_t;
@@ -115,7 +115,7 @@ typedef struct xfs_quotainfo {
115 xfs_qwarncnt_t qi_bwarnlimit; /* limit for blks warnings */ 115 xfs_qwarncnt_t qi_bwarnlimit; /* limit for blks warnings */
116 xfs_qwarncnt_t qi_iwarnlimit; /* limit for inodes warnings */ 116 xfs_qwarncnt_t qi_iwarnlimit; /* limit for inodes warnings */
117 xfs_qwarncnt_t qi_rtbwarnlimit;/* limit for rt blks warnings */ 117 xfs_qwarncnt_t qi_rtbwarnlimit;/* limit for rt blks warnings */
118 mutex_t qi_quotaofflock;/* to serialize quotaoff */ 118 struct mutex qi_quotaofflock;/* to serialize quotaoff */
119 xfs_filblks_t qi_dqchunklen; /* # BBs in a chunk of dqs */ 119 xfs_filblks_t qi_dqchunklen; /* # BBs in a chunk of dqs */
120 uint qi_dqperchunk; /* # ondisk dqs in above chunk */ 120 uint qi_dqperchunk; /* # ondisk dqs in above chunk */
121 xfs_qcnt_t qi_bhardlimit; /* default data blk hard limit */ 121 xfs_qcnt_t qi_bhardlimit; /* default data blk hard limit */
@@ -158,11 +158,6 @@ typedef struct xfs_dquot_acct {
158#define XFS_QM_IWARNLIMIT 5 158#define XFS_QM_IWARNLIMIT 5
159#define XFS_QM_RTBWARNLIMIT 5 159#define XFS_QM_RTBWARNLIMIT 5
160 160
161#define XFS_QM_LOCK(xqm) (mutex_lock(&xqm##_lock))
162#define XFS_QM_UNLOCK(xqm) (mutex_unlock(&xqm##_lock))
163#define XFS_QM_HOLD(xqm) ((xqm)->qm_nrefs++)
164#define XFS_QM_RELE(xqm) ((xqm)->qm_nrefs--)
165
166extern void xfs_qm_destroy_quotainfo(xfs_mount_t *); 161extern void xfs_qm_destroy_quotainfo(xfs_mount_t *);
167extern void xfs_qm_mount_quotas(xfs_mount_t *); 162extern void xfs_qm_mount_quotas(xfs_mount_t *);
168extern int xfs_qm_quotacheck(xfs_mount_t *); 163extern int xfs_qm_quotacheck(xfs_mount_t *);
@@ -178,6 +173,16 @@ extern void xfs_qm_dqdetach(xfs_inode_t *);
178extern int xfs_qm_dqpurge_all(xfs_mount_t *, uint); 173extern int xfs_qm_dqpurge_all(xfs_mount_t *, uint);
179extern void xfs_qm_dqrele_all_inodes(xfs_mount_t *, uint); 174extern void xfs_qm_dqrele_all_inodes(xfs_mount_t *, uint);
180 175
176/* quota ops */
177extern int xfs_qm_scall_trunc_qfiles(xfs_mount_t *, uint);
178extern int xfs_qm_scall_getquota(xfs_mount_t *, xfs_dqid_t, uint,
179 fs_disk_quota_t *);
180extern int xfs_qm_scall_setqlim(xfs_mount_t *, xfs_dqid_t, uint,
181 fs_disk_quota_t *);
182extern int xfs_qm_scall_getqstat(xfs_mount_t *, fs_quota_stat_t *);
183extern int xfs_qm_scall_quotaon(xfs_mount_t *, uint);
184extern int xfs_qm_scall_quotaoff(xfs_mount_t *, uint);
185
181/* vop stuff */ 186/* vop stuff */
182extern int xfs_qm_vop_dqalloc(xfs_mount_t *, xfs_inode_t *, 187extern int xfs_qm_vop_dqalloc(xfs_mount_t *, xfs_inode_t *,
183 uid_t, gid_t, prid_t, uint, 188 uid_t, gid_t, prid_t, uint,
@@ -194,11 +199,6 @@ extern int xfs_qm_vop_chown_reserve(xfs_trans_t *, xfs_inode_t *,
194/* list stuff */ 199/* list stuff */
195extern void xfs_qm_freelist_append(xfs_frlist_t *, xfs_dquot_t *); 200extern void xfs_qm_freelist_append(xfs_frlist_t *, xfs_dquot_t *);
196extern void xfs_qm_freelist_unlink(xfs_dquot_t *); 201extern void xfs_qm_freelist_unlink(xfs_dquot_t *);
197extern int xfs_qm_freelist_lock_nowait(xfs_qm_t *);
198
199/* system call interface */
200extern int xfs_qm_quotactl(struct xfs_mount *, int, int,
201 xfs_caddr_t);
202 202
203#ifdef DEBUG 203#ifdef DEBUG
204extern int xfs_qm_internalqcheck(xfs_mount_t *); 204extern int xfs_qm_internalqcheck(xfs_mount_t *);
diff --git a/fs/xfs/quota/xfs_qm_bhv.c b/fs/xfs/quota/xfs_qm_bhv.c
index bc6c5cca3e12..63037c689a4b 100644
--- a/fs/xfs/quota/xfs_qm_bhv.c
+++ b/fs/xfs/quota/xfs_qm_bhv.c
@@ -235,7 +235,6 @@ struct xfs_qmops xfs_qmcore_xfs = {
235 .xfs_dqvopchownresv = xfs_qm_vop_chown_reserve, 235 .xfs_dqvopchownresv = xfs_qm_vop_chown_reserve,
236 .xfs_dqstatvfs = xfs_qm_statvfs, 236 .xfs_dqstatvfs = xfs_qm_statvfs,
237 .xfs_dqsync = xfs_qm_sync, 237 .xfs_dqsync = xfs_qm_sync,
238 .xfs_quotactl = xfs_qm_quotactl,
239 .xfs_dqtrxops = &xfs_trans_dquot_ops, 238 .xfs_dqtrxops = &xfs_trans_dquot_ops,
240}; 239};
241EXPORT_SYMBOL(xfs_qmcore_xfs); 240EXPORT_SYMBOL(xfs_qmcore_xfs);
diff --git a/fs/xfs/quota/xfs_qm_syscalls.c b/fs/xfs/quota/xfs_qm_syscalls.c
index 68139b38aede..c7b66f6506ce 100644
--- a/fs/xfs/quota/xfs_qm_syscalls.c
+++ b/fs/xfs/quota/xfs_qm_syscalls.c
@@ -57,135 +57,16 @@
57# define qdprintk(s, args...) do { } while (0) 57# define qdprintk(s, args...) do { } while (0)
58#endif 58#endif
59 59
60STATIC int xfs_qm_scall_trunc_qfiles(xfs_mount_t *, uint);
61STATIC int xfs_qm_scall_getquota(xfs_mount_t *, xfs_dqid_t, uint,
62 fs_disk_quota_t *);
63STATIC int xfs_qm_scall_getqstat(xfs_mount_t *, fs_quota_stat_t *);
64STATIC int xfs_qm_scall_setqlim(xfs_mount_t *, xfs_dqid_t, uint,
65 fs_disk_quota_t *);
66STATIC int xfs_qm_scall_quotaon(xfs_mount_t *, uint);
67STATIC int xfs_qm_scall_quotaoff(xfs_mount_t *, uint, boolean_t);
68STATIC int xfs_qm_log_quotaoff(xfs_mount_t *, xfs_qoff_logitem_t **, uint); 60STATIC int xfs_qm_log_quotaoff(xfs_mount_t *, xfs_qoff_logitem_t **, uint);
69STATIC int xfs_qm_log_quotaoff_end(xfs_mount_t *, xfs_qoff_logitem_t *, 61STATIC int xfs_qm_log_quotaoff_end(xfs_mount_t *, xfs_qoff_logitem_t *,
70 uint); 62 uint);
71STATIC uint xfs_qm_import_flags(uint);
72STATIC uint xfs_qm_export_flags(uint); 63STATIC uint xfs_qm_export_flags(uint);
73STATIC uint xfs_qm_import_qtype_flags(uint);
74STATIC uint xfs_qm_export_qtype_flags(uint); 64STATIC uint xfs_qm_export_qtype_flags(uint);
75STATIC void xfs_qm_export_dquot(xfs_mount_t *, xfs_disk_dquot_t *, 65STATIC void xfs_qm_export_dquot(xfs_mount_t *, xfs_disk_dquot_t *,
76 fs_disk_quota_t *); 66 fs_disk_quota_t *);
77 67
78 68
79/* 69/*
80 * The main distribution switch of all XFS quotactl system calls.
81 */
82int
83xfs_qm_quotactl(
84 xfs_mount_t *mp,
85 int cmd,
86 int id,
87 xfs_caddr_t addr)
88{
89 int error;
90
91 ASSERT(addr != NULL || cmd == Q_XQUOTASYNC);
92
93 /*
94 * The following commands are valid even when quotaoff.
95 */
96 switch (cmd) {
97 case Q_XQUOTARM:
98 /*
99 * Truncate quota files. quota must be off.
100 */
101 if (XFS_IS_QUOTA_ON(mp))
102 return XFS_ERROR(EINVAL);
103 if (mp->m_flags & XFS_MOUNT_RDONLY)
104 return XFS_ERROR(EROFS);
105 return (xfs_qm_scall_trunc_qfiles(mp,
106 xfs_qm_import_qtype_flags(*(uint *)addr)));
107
108 case Q_XGETQSTAT:
109 /*
110 * Get quota status information.
111 */
112 return (xfs_qm_scall_getqstat(mp, (fs_quota_stat_t *)addr));
113
114 case Q_XQUOTAON:
115 /*
116 * QUOTAON - enabling quota enforcement.
117 * Quota accounting must be turned on at mount time.
118 */
119 if (mp->m_flags & XFS_MOUNT_RDONLY)
120 return XFS_ERROR(EROFS);
121 return (xfs_qm_scall_quotaon(mp,
122 xfs_qm_import_flags(*(uint *)addr)));
123
124 case Q_XQUOTAOFF:
125 if (mp->m_flags & XFS_MOUNT_RDONLY)
126 return XFS_ERROR(EROFS);
127 break;
128
129 case Q_XQUOTASYNC:
130 return xfs_sync_inodes(mp, SYNC_DELWRI);
131
132 default:
133 break;
134 }
135
136 if (! XFS_IS_QUOTA_ON(mp))
137 return XFS_ERROR(ESRCH);
138
139 switch (cmd) {
140 case Q_XQUOTAOFF:
141 if (mp->m_flags & XFS_MOUNT_RDONLY)
142 return XFS_ERROR(EROFS);
143 error = xfs_qm_scall_quotaoff(mp,
144 xfs_qm_import_flags(*(uint *)addr),
145 B_FALSE);
146 break;
147
148 case Q_XGETQUOTA:
149 error = xfs_qm_scall_getquota(mp, (xfs_dqid_t)id, XFS_DQ_USER,
150 (fs_disk_quota_t *)addr);
151 break;
152 case Q_XGETGQUOTA:
153 error = xfs_qm_scall_getquota(mp, (xfs_dqid_t)id, XFS_DQ_GROUP,
154 (fs_disk_quota_t *)addr);
155 break;
156 case Q_XGETPQUOTA:
157 error = xfs_qm_scall_getquota(mp, (xfs_dqid_t)id, XFS_DQ_PROJ,
158 (fs_disk_quota_t *)addr);
159 break;
160
161 case Q_XSETQLIM:
162 if (mp->m_flags & XFS_MOUNT_RDONLY)
163 return XFS_ERROR(EROFS);
164 error = xfs_qm_scall_setqlim(mp, (xfs_dqid_t)id, XFS_DQ_USER,
165 (fs_disk_quota_t *)addr);
166 break;
167 case Q_XSETGQLIM:
168 if (mp->m_flags & XFS_MOUNT_RDONLY)
169 return XFS_ERROR(EROFS);
170 error = xfs_qm_scall_setqlim(mp, (xfs_dqid_t)id, XFS_DQ_GROUP,
171 (fs_disk_quota_t *)addr);
172 break;
173 case Q_XSETPQLIM:
174 if (mp->m_flags & XFS_MOUNT_RDONLY)
175 return XFS_ERROR(EROFS);
176 error = xfs_qm_scall_setqlim(mp, (xfs_dqid_t)id, XFS_DQ_PROJ,
177 (fs_disk_quota_t *)addr);
178 break;
179
180 default:
181 error = XFS_ERROR(EINVAL);
182 break;
183 }
184
185 return (error);
186}
187
188/*
189 * Turn off quota accounting and/or enforcement for all udquots and/or 70 * Turn off quota accounting and/or enforcement for all udquots and/or
190 * gdquots. Called only at unmount time. 71 * gdquots. Called only at unmount time.
191 * 72 *
@@ -193,11 +74,10 @@ xfs_qm_quotactl(
193 * incore, and modifies the ondisk dquot directly. Therefore, for example, 74 * incore, and modifies the ondisk dquot directly. Therefore, for example,
194 * it is an error to call this twice, without purging the cache. 75 * it is an error to call this twice, without purging the cache.
195 */ 76 */
196STATIC int 77int
197xfs_qm_scall_quotaoff( 78xfs_qm_scall_quotaoff(
198 xfs_mount_t *mp, 79 xfs_mount_t *mp,
199 uint flags, 80 uint flags)
200 boolean_t force)
201{ 81{
202 uint dqtype; 82 uint dqtype;
203 int error; 83 int error;
@@ -205,8 +85,6 @@ xfs_qm_scall_quotaoff(
205 xfs_qoff_logitem_t *qoffstart; 85 xfs_qoff_logitem_t *qoffstart;
206 int nculprits; 86 int nculprits;
207 87
208 if (!force && !capable(CAP_SYS_ADMIN))
209 return XFS_ERROR(EPERM);
210 /* 88 /*
211 * No file system can have quotas enabled on disk but not in core. 89 * No file system can have quotas enabled on disk but not in core.
212 * Note that quota utilities (like quotaoff) _expect_ 90 * Note that quota utilities (like quotaoff) _expect_
@@ -375,7 +253,7 @@ out_error:
375 return (error); 253 return (error);
376} 254}
377 255
378STATIC int 256int
379xfs_qm_scall_trunc_qfiles( 257xfs_qm_scall_trunc_qfiles(
380 xfs_mount_t *mp, 258 xfs_mount_t *mp,
381 uint flags) 259 uint flags)
@@ -383,8 +261,6 @@ xfs_qm_scall_trunc_qfiles(
383 int error = 0, error2 = 0; 261 int error = 0, error2 = 0;
384 xfs_inode_t *qip; 262 xfs_inode_t *qip;
385 263
386 if (!capable(CAP_SYS_ADMIN))
387 return XFS_ERROR(EPERM);
388 if (!xfs_sb_version_hasquota(&mp->m_sb) || flags == 0) { 264 if (!xfs_sb_version_hasquota(&mp->m_sb) || flags == 0) {
389 qdprintk("qtrunc flags=%x m_qflags=%x\n", flags, mp->m_qflags); 265 qdprintk("qtrunc flags=%x m_qflags=%x\n", flags, mp->m_qflags);
390 return XFS_ERROR(EINVAL); 266 return XFS_ERROR(EINVAL);
@@ -416,7 +292,7 @@ xfs_qm_scall_trunc_qfiles(
416 * effect immediately. 292 * effect immediately.
417 * (Switching on quota accounting must be done at mount time.) 293 * (Switching on quota accounting must be done at mount time.)
418 */ 294 */
419STATIC int 295int
420xfs_qm_scall_quotaon( 296xfs_qm_scall_quotaon(
421 xfs_mount_t *mp, 297 xfs_mount_t *mp,
422 uint flags) 298 uint flags)
@@ -426,9 +302,6 @@ xfs_qm_scall_quotaon(
426 uint accflags; 302 uint accflags;
427 __int64_t sbflags; 303 __int64_t sbflags;
428 304
429 if (!capable(CAP_SYS_ADMIN))
430 return XFS_ERROR(EPERM);
431
432 flags &= (XFS_ALL_QUOTA_ACCT | XFS_ALL_QUOTA_ENFD); 305 flags &= (XFS_ALL_QUOTA_ACCT | XFS_ALL_QUOTA_ENFD);
433 /* 306 /*
434 * Switching on quota accounting must be done at mount time. 307 * Switching on quota accounting must be done at mount time.
@@ -517,7 +390,7 @@ xfs_qm_scall_quotaon(
517/* 390/*
518 * Return quota status information, such as uquota-off, enforcements, etc. 391 * Return quota status information, such as uquota-off, enforcements, etc.
519 */ 392 */
520STATIC int 393int
521xfs_qm_scall_getqstat( 394xfs_qm_scall_getqstat(
522 xfs_mount_t *mp, 395 xfs_mount_t *mp,
523 fs_quota_stat_t *out) 396 fs_quota_stat_t *out)
@@ -582,7 +455,7 @@ xfs_qm_scall_getqstat(
582/* 455/*
583 * Adjust quota limits, and start/stop timers accordingly. 456 * Adjust quota limits, and start/stop timers accordingly.
584 */ 457 */
585STATIC int 458int
586xfs_qm_scall_setqlim( 459xfs_qm_scall_setqlim(
587 xfs_mount_t *mp, 460 xfs_mount_t *mp,
588 xfs_dqid_t id, 461 xfs_dqid_t id,
@@ -595,9 +468,6 @@ xfs_qm_scall_setqlim(
595 int error; 468 int error;
596 xfs_qcnt_t hard, soft; 469 xfs_qcnt_t hard, soft;
597 470
598 if (!capable(CAP_SYS_ADMIN))
599 return XFS_ERROR(EPERM);
600
601 if ((newlim->d_fieldmask & 471 if ((newlim->d_fieldmask &
602 (FS_DQ_LIMIT_MASK|FS_DQ_TIMER_MASK|FS_DQ_WARNS_MASK)) == 0) 472 (FS_DQ_LIMIT_MASK|FS_DQ_TIMER_MASK|FS_DQ_WARNS_MASK)) == 0)
603 return (0); 473 return (0);
@@ -742,7 +612,7 @@ xfs_qm_scall_setqlim(
742 return error; 612 return error;
743} 613}
744 614
745STATIC int 615int
746xfs_qm_scall_getquota( 616xfs_qm_scall_getquota(
747 xfs_mount_t *mp, 617 xfs_mount_t *mp,
748 xfs_dqid_t id, 618 xfs_dqid_t id,
@@ -935,30 +805,6 @@ xfs_qm_export_dquot(
935} 805}
936 806
937STATIC uint 807STATIC uint
938xfs_qm_import_qtype_flags(
939 uint uflags)
940{
941 uint oflags = 0;
942
943 /*
944 * Can't be more than one, or none.
945 */
946 if (((uflags & (XFS_GROUP_QUOTA | XFS_USER_QUOTA)) ==
947 (XFS_GROUP_QUOTA | XFS_USER_QUOTA)) ||
948 ((uflags & (XFS_GROUP_QUOTA | XFS_PROJ_QUOTA)) ==
949 (XFS_GROUP_QUOTA | XFS_PROJ_QUOTA)) ||
950 ((uflags & (XFS_USER_QUOTA | XFS_PROJ_QUOTA)) ==
951 (XFS_USER_QUOTA | XFS_PROJ_QUOTA)) ||
952 ((uflags & (XFS_GROUP_QUOTA|XFS_USER_QUOTA|XFS_PROJ_QUOTA)) == 0))
953 return (0);
954
955 oflags |= (uflags & XFS_USER_QUOTA) ? XFS_DQ_USER : 0;
956 oflags |= (uflags & XFS_PROJ_QUOTA) ? XFS_DQ_PROJ : 0;
957 oflags |= (uflags & XFS_GROUP_QUOTA) ? XFS_DQ_GROUP: 0;
958 return oflags;
959}
960
961STATIC uint
962xfs_qm_export_qtype_flags( 808xfs_qm_export_qtype_flags(
963 uint flags) 809 uint flags)
964{ 810{
@@ -979,26 +825,6 @@ xfs_qm_export_qtype_flags(
979} 825}
980 826
981STATIC uint 827STATIC uint
982xfs_qm_import_flags(
983 uint uflags)
984{
985 uint flags = 0;
986
987 if (uflags & XFS_QUOTA_UDQ_ACCT)
988 flags |= XFS_UQUOTA_ACCT;
989 if (uflags & XFS_QUOTA_PDQ_ACCT)
990 flags |= XFS_PQUOTA_ACCT;
991 if (uflags & XFS_QUOTA_GDQ_ACCT)
992 flags |= XFS_GQUOTA_ACCT;
993 if (uflags & XFS_QUOTA_UDQ_ENFD)
994 flags |= XFS_UQUOTA_ENFD;
995 if (uflags & (XFS_QUOTA_PDQ_ENFD|XFS_QUOTA_GDQ_ENFD))
996 flags |= XFS_OQUOTA_ENFD;
997 return (flags);
998}
999
1000
1001STATIC uint
1002xfs_qm_export_flags( 828xfs_qm_export_flags(
1003 uint flags) 829 uint flags)
1004{ 830{
@@ -1134,7 +960,7 @@ xfs_dqhash_t *qmtest_udqtab;
1134xfs_dqhash_t *qmtest_gdqtab; 960xfs_dqhash_t *qmtest_gdqtab;
1135int qmtest_hashmask; 961int qmtest_hashmask;
1136int qmtest_nfails; 962int qmtest_nfails;
1137mutex_t qcheck_lock; 963struct mutex qcheck_lock;
1138 964
1139#define DQTEST_HASHVAL(mp, id) (((__psunsigned_t)(mp) + \ 965#define DQTEST_HASHVAL(mp, id) (((__psunsigned_t)(mp) + \
1140 (__psunsigned_t)(id)) & \ 966 (__psunsigned_t)(id)) & \
diff --git a/fs/xfs/quota/xfs_quota_priv.h b/fs/xfs/quota/xfs_quota_priv.h
index c4fcea600bc2..8286b2842b6b 100644
--- a/fs/xfs/quota/xfs_quota_priv.h
+++ b/fs/xfs/quota/xfs_quota_priv.h
@@ -42,34 +42,24 @@
42#define XFS_QI_QOFFLOCK(mp) ((mp)->m_quotainfo->qi_quotaofflock) 42#define XFS_QI_QOFFLOCK(mp) ((mp)->m_quotainfo->qi_quotaofflock)
43 43
44#define XFS_QI_MPL_LIST(mp) ((mp)->m_quotainfo->qi_dqlist) 44#define XFS_QI_MPL_LIST(mp) ((mp)->m_quotainfo->qi_dqlist)
45#define XFS_QI_MPLLOCK(mp) ((mp)->m_quotainfo->qi_dqlist.qh_lock)
46#define XFS_QI_MPLNEXT(mp) ((mp)->m_quotainfo->qi_dqlist.qh_next) 45#define XFS_QI_MPLNEXT(mp) ((mp)->m_quotainfo->qi_dqlist.qh_next)
47#define XFS_QI_MPLNDQUOTS(mp) ((mp)->m_quotainfo->qi_dqlist.qh_nelems) 46#define XFS_QI_MPLNDQUOTS(mp) ((mp)->m_quotainfo->qi_dqlist.qh_nelems)
48 47
49#define XQMLCK(h) (mutex_lock(&((h)->qh_lock))) 48#define xfs_qm_mplist_lock(mp) \
50#define XQMUNLCK(h) (mutex_unlock(&((h)->qh_lock))) 49 mutex_lock(&(XFS_QI_MPL_LIST(mp).qh_lock))
51#ifdef DEBUG 50#define xfs_qm_mplist_nowait(mp) \
52struct xfs_dqhash; 51 mutex_trylock(&(XFS_QI_MPL_LIST(mp).qh_lock))
53static inline int XQMISLCKD(struct xfs_dqhash *h) 52#define xfs_qm_mplist_unlock(mp) \
54{ 53 mutex_unlock(&(XFS_QI_MPL_LIST(mp).qh_lock))
55 if (mutex_trylock(&h->qh_lock)) { 54#define XFS_QM_IS_MPLIST_LOCKED(mp) \
56 mutex_unlock(&h->qh_lock); 55 mutex_is_locked(&(XFS_QI_MPL_LIST(mp).qh_lock))
57 return 0; 56
58 } 57#define xfs_qm_freelist_lock(qm) \
59 return 1; 58 mutex_lock(&((qm)->qm_dqfreelist.qh_lock))
60} 59#define xfs_qm_freelist_lock_nowait(qm) \
61#endif 60 mutex_trylock(&((qm)->qm_dqfreelist.qh_lock))
62 61#define xfs_qm_freelist_unlock(qm) \
63#define XFS_DQ_HASH_LOCK(h) XQMLCK(h) 62 mutex_unlock(&((qm)->qm_dqfreelist.qh_lock))
64#define XFS_DQ_HASH_UNLOCK(h) XQMUNLCK(h)
65#define XFS_DQ_IS_HASH_LOCKED(h) XQMISLCKD(h)
66
67#define xfs_qm_mplist_lock(mp) XQMLCK(&(XFS_QI_MPL_LIST(mp)))
68#define xfs_qm_mplist_unlock(mp) XQMUNLCK(&(XFS_QI_MPL_LIST(mp)))
69#define XFS_QM_IS_MPLIST_LOCKED(mp) XQMISLCKD(&(XFS_QI_MPL_LIST(mp)))
70
71#define xfs_qm_freelist_lock(qm) XQMLCK(&((qm)->qm_dqfreelist))
72#define xfs_qm_freelist_unlock(qm) XQMUNLCK(&((qm)->qm_dqfreelist))
73 63
74/* 64/*
75 * Hash into a bucket in the dquot hash table, based on <mp, id>. 65 * Hash into a bucket in the dquot hash table, based on <mp, id>.
diff --git a/fs/xfs/quota/xfs_trans_dquot.c b/fs/xfs/quota/xfs_trans_dquot.c
index 99611381e740..447173bcf96d 100644
--- a/fs/xfs/quota/xfs_trans_dquot.c
+++ b/fs/xfs/quota/xfs_trans_dquot.c
@@ -624,10 +624,9 @@ xfs_trans_dqresv(
624 xfs_qcnt_t *resbcountp; 624 xfs_qcnt_t *resbcountp;
625 xfs_quotainfo_t *q = mp->m_quotainfo; 625 xfs_quotainfo_t *q = mp->m_quotainfo;
626 626
627 if (! (flags & XFS_QMOPT_DQLOCK)) { 627
628 xfs_dqlock(dqp); 628 xfs_dqlock(dqp);
629 } 629
630 ASSERT(XFS_DQ_IS_LOCKED(dqp));
631 if (flags & XFS_TRANS_DQ_RES_BLKS) { 630 if (flags & XFS_TRANS_DQ_RES_BLKS) {
632 hardlimit = be64_to_cpu(dqp->q_core.d_blk_hardlimit); 631 hardlimit = be64_to_cpu(dqp->q_core.d_blk_hardlimit);
633 if (!hardlimit) 632 if (!hardlimit)
@@ -740,10 +739,8 @@ xfs_trans_dqresv(
740 ASSERT(dqp->q_res_icount >= be64_to_cpu(dqp->q_core.d_icount)); 739 ASSERT(dqp->q_res_icount >= be64_to_cpu(dqp->q_core.d_icount));
741 740
742error_return: 741error_return:
743 if (! (flags & XFS_QMOPT_DQLOCK)) { 742 xfs_dqunlock(dqp);
744 xfs_dqunlock(dqp); 743 return error;
745 }
746 return (error);
747} 744}
748 745
749 746
@@ -753,8 +750,7 @@ error_return:
753 * grp/prj quotas is important, because this follows a both-or-nothing 750 * grp/prj quotas is important, because this follows a both-or-nothing
754 * approach. 751 * approach.
755 * 752 *
756 * flags = XFS_QMOPT_DQLOCK indicate if dquot(s) need to be locked. 753 * flags = XFS_QMOPT_FORCE_RES evades limit enforcement. Used by chown.
757 * XFS_QMOPT_FORCE_RES evades limit enforcement. Used by chown.
758 * XFS_QMOPT_ENOSPC returns ENOSPC not EDQUOT. Used by pquota. 754 * XFS_QMOPT_ENOSPC returns ENOSPC not EDQUOT. Used by pquota.
759 * XFS_TRANS_DQ_RES_BLKS reserves regular disk blocks 755 * XFS_TRANS_DQ_RES_BLKS reserves regular disk blocks
760 * XFS_TRANS_DQ_RES_RTBLKS reserves realtime disk blocks 756 * XFS_TRANS_DQ_RES_RTBLKS reserves realtime disk blocks
diff --git a/fs/xfs/support/debug.c b/fs/xfs/support/debug.c
index ae5482965424..3f3610a7ee05 100644
--- a/fs/xfs/support/debug.c
+++ b/fs/xfs/support/debug.c
@@ -24,6 +24,7 @@
24#include "xfs_ag.h" 24#include "xfs_ag.h"
25#include "xfs_dmapi.h" 25#include "xfs_dmapi.h"
26#include "xfs_mount.h" 26#include "xfs_mount.h"
27#include "xfs_error.h"
27 28
28static char message[1024]; /* keep it off the stack */ 29static char message[1024]; /* keep it off the stack */
29static DEFINE_SPINLOCK(xfs_err_lock); 30static DEFINE_SPINLOCK(xfs_err_lock);
diff --git a/fs/xfs/support/uuid.c b/fs/xfs/support/uuid.c
index 5830c040ea7e..b83f76b6d410 100644
--- a/fs/xfs/support/uuid.c
+++ b/fs/xfs/support/uuid.c
@@ -17,10 +17,6 @@
17 */ 17 */
18#include <xfs.h> 18#include <xfs.h>
19 19
20static DEFINE_MUTEX(uuid_monitor);
21static int uuid_table_size;
22static uuid_t *uuid_table;
23
24/* IRIX interpretation of an uuid_t */ 20/* IRIX interpretation of an uuid_t */
25typedef struct { 21typedef struct {
26 __be32 uu_timelow; 22 __be32 uu_timelow;
@@ -46,12 +42,6 @@ uuid_getnodeuniq(uuid_t *uuid, int fsid [2])
46 fsid[1] = be32_to_cpu(uup->uu_timelow); 42 fsid[1] = be32_to_cpu(uup->uu_timelow);
47} 43}
48 44
49void
50uuid_create_nil(uuid_t *uuid)
51{
52 memset(uuid, 0, sizeof(*uuid));
53}
54
55int 45int
56uuid_is_nil(uuid_t *uuid) 46uuid_is_nil(uuid_t *uuid)
57{ 47{
@@ -71,64 +61,3 @@ uuid_equal(uuid_t *uuid1, uuid_t *uuid2)
71{ 61{
72 return memcmp(uuid1, uuid2, sizeof(uuid_t)) ? 0 : 1; 62 return memcmp(uuid1, uuid2, sizeof(uuid_t)) ? 0 : 1;
73} 63}
74
75/*
76 * Given a 128-bit uuid, return a 64-bit value by adding the top and bottom
77 * 64-bit words. NOTE: This function can not be changed EVER. Although
78 * brain-dead, some applications depend on this 64-bit value remaining
79 * persistent. Specifically, DMI vendors store the value as a persistent
80 * filehandle.
81 */
82__uint64_t
83uuid_hash64(uuid_t *uuid)
84{
85 __uint64_t *sp = (__uint64_t *)uuid;
86
87 return sp[0] + sp[1];
88}
89
90int
91uuid_table_insert(uuid_t *uuid)
92{
93 int i, hole;
94
95 mutex_lock(&uuid_monitor);
96 for (i = 0, hole = -1; i < uuid_table_size; i++) {
97 if (uuid_is_nil(&uuid_table[i])) {
98 hole = i;
99 continue;
100 }
101 if (uuid_equal(uuid, &uuid_table[i])) {
102 mutex_unlock(&uuid_monitor);
103 return 0;
104 }
105 }
106 if (hole < 0) {
107 uuid_table = kmem_realloc(uuid_table,
108 (uuid_table_size + 1) * sizeof(*uuid_table),
109 uuid_table_size * sizeof(*uuid_table),
110 KM_SLEEP);
111 hole = uuid_table_size++;
112 }
113 uuid_table[hole] = *uuid;
114 mutex_unlock(&uuid_monitor);
115 return 1;
116}
117
118void
119uuid_table_remove(uuid_t *uuid)
120{
121 int i;
122
123 mutex_lock(&uuid_monitor);
124 for (i = 0; i < uuid_table_size; i++) {
125 if (uuid_is_nil(&uuid_table[i]))
126 continue;
127 if (!uuid_equal(uuid, &uuid_table[i]))
128 continue;
129 uuid_create_nil(&uuid_table[i]);
130 break;
131 }
132 ASSERT(i < uuid_table_size);
133 mutex_unlock(&uuid_monitor);
134}
diff --git a/fs/xfs/support/uuid.h b/fs/xfs/support/uuid.h
index cff5b607d445..4732d71262cc 100644
--- a/fs/xfs/support/uuid.h
+++ b/fs/xfs/support/uuid.h
@@ -22,12 +22,8 @@ typedef struct {
22 unsigned char __u_bits[16]; 22 unsigned char __u_bits[16];
23} uuid_t; 23} uuid_t;
24 24
25extern void uuid_create_nil(uuid_t *uuid);
26extern int uuid_is_nil(uuid_t *uuid); 25extern int uuid_is_nil(uuid_t *uuid);
27extern int uuid_equal(uuid_t *uuid1, uuid_t *uuid2); 26extern int uuid_equal(uuid_t *uuid1, uuid_t *uuid2);
28extern void uuid_getnodeuniq(uuid_t *uuid, int fsid [2]); 27extern void uuid_getnodeuniq(uuid_t *uuid, int fsid [2]);
29extern __uint64_t uuid_hash64(uuid_t *uuid);
30extern int uuid_table_insert(uuid_t *uuid);
31extern void uuid_table_remove(uuid_t *uuid);
32 28
33#endif /* __XFS_SUPPORT_UUID_H__ */ 29#endif /* __XFS_SUPPORT_UUID_H__ */
diff --git a/fs/xfs/xfs_ag.h b/fs/xfs/xfs_ag.h
index 143d63ecb20a..c8641f713caa 100644
--- a/fs/xfs/xfs_ag.h
+++ b/fs/xfs/xfs_ag.h
@@ -223,8 +223,8 @@ typedef struct xfs_perag
223 be32_to_cpu((a)->agf_levels[XFS_BTNUM_CNTi]), mp)) 223 be32_to_cpu((a)->agf_levels[XFS_BTNUM_CNTi]), mp))
224#define XFS_MIN_FREELIST_PAG(pag,mp) \ 224#define XFS_MIN_FREELIST_PAG(pag,mp) \
225 (XFS_MIN_FREELIST_RAW( \ 225 (XFS_MIN_FREELIST_RAW( \
226 (uint_t)(pag)->pagf_levels[XFS_BTNUM_BNOi], \ 226 (unsigned int)(pag)->pagf_levels[XFS_BTNUM_BNOi], \
227 (uint_t)(pag)->pagf_levels[XFS_BTNUM_CNTi], mp)) 227 (unsigned int)(pag)->pagf_levels[XFS_BTNUM_CNTi], mp))
228 228
229#define XFS_AGB_TO_FSB(mp,agno,agbno) \ 229#define XFS_AGB_TO_FSB(mp,agno,agbno) \
230 (((xfs_fsblock_t)(agno) << (mp)->m_sb.sb_agblklog) | (agbno)) 230 (((xfs_fsblock_t)(agno) << (mp)->m_sb.sb_agblklog) | (agbno))
diff --git a/fs/xfs/xfs_alloc.c b/fs/xfs/xfs_alloc.c
index 028e44e58ea9..2cf944eb796d 100644
--- a/fs/xfs/xfs_alloc.c
+++ b/fs/xfs/xfs_alloc.c
@@ -1872,6 +1872,25 @@ xfs_alloc_compute_maxlevels(
1872} 1872}
1873 1873
1874/* 1874/*
1875 * Find the length of the longest extent in an AG.
1876 */
1877xfs_extlen_t
1878xfs_alloc_longest_free_extent(
1879 struct xfs_mount *mp,
1880 struct xfs_perag *pag)
1881{
1882 xfs_extlen_t need, delta = 0;
1883
1884 need = XFS_MIN_FREELIST_PAG(pag, mp);
1885 if (need > pag->pagf_flcount)
1886 delta = need - pag->pagf_flcount;
1887
1888 if (pag->pagf_longest > delta)
1889 return pag->pagf_longest - delta;
1890 return pag->pagf_flcount > 0 || pag->pagf_longest > 0;
1891}
1892
1893/*
1875 * Decide whether to use this allocation group for this allocation. 1894 * Decide whether to use this allocation group for this allocation.
1876 * If so, fix up the btree freelist's size. 1895 * If so, fix up the btree freelist's size.
1877 */ 1896 */
@@ -1923,15 +1942,12 @@ xfs_alloc_fix_freelist(
1923 } 1942 }
1924 1943
1925 if (!(flags & XFS_ALLOC_FLAG_FREEING)) { 1944 if (!(flags & XFS_ALLOC_FLAG_FREEING)) {
1926 need = XFS_MIN_FREELIST_PAG(pag, mp);
1927 delta = need > pag->pagf_flcount ? need - pag->pagf_flcount : 0;
1928 /* 1945 /*
1929 * If it looks like there isn't a long enough extent, or enough 1946 * If it looks like there isn't a long enough extent, or enough
1930 * total blocks, reject it. 1947 * total blocks, reject it.
1931 */ 1948 */
1932 longest = (pag->pagf_longest > delta) ? 1949 need = XFS_MIN_FREELIST_PAG(pag, mp);
1933 (pag->pagf_longest - delta) : 1950 longest = xfs_alloc_longest_free_extent(mp, pag);
1934 (pag->pagf_flcount > 0 || pag->pagf_longest > 0);
1935 if ((args->minlen + args->alignment + args->minalignslop - 1) > 1951 if ((args->minlen + args->alignment + args->minalignslop - 1) >
1936 longest || 1952 longest ||
1937 ((int)(pag->pagf_freeblks + pag->pagf_flcount - 1953 ((int)(pag->pagf_freeblks + pag->pagf_flcount -
diff --git a/fs/xfs/xfs_alloc.h b/fs/xfs/xfs_alloc.h
index 588172796f7b..e704caee10df 100644
--- a/fs/xfs/xfs_alloc.h
+++ b/fs/xfs/xfs_alloc.h
@@ -100,6 +100,12 @@ typedef struct xfs_alloc_arg {
100#define XFS_ALLOC_USERDATA 1 /* allocation is for user data*/ 100#define XFS_ALLOC_USERDATA 1 /* allocation is for user data*/
101#define XFS_ALLOC_INITIAL_USER_DATA 2 /* special case start of file */ 101#define XFS_ALLOC_INITIAL_USER_DATA 2 /* special case start of file */
102 102
103/*
104 * Find the length of the longest extent in an AG.
105 */
106xfs_extlen_t
107xfs_alloc_longest_free_extent(struct xfs_mount *mp,
108 struct xfs_perag *pag);
103 109
104#ifdef __KERNEL__ 110#ifdef __KERNEL__
105 111
diff --git a/fs/xfs/xfs_attr_leaf.c b/fs/xfs/xfs_attr_leaf.c
index 6c323f8a4cd1..afdc8911637d 100644
--- a/fs/xfs/xfs_attr_leaf.c
+++ b/fs/xfs/xfs_attr_leaf.c
@@ -155,7 +155,8 @@ xfs_attr_shortform_bytesfit(xfs_inode_t *dp, int bytes)
155 * minimum offset only needs to be the space required for 155 * minimum offset only needs to be the space required for
156 * the btree root. 156 * the btree root.
157 */ 157 */
158 if (!dp->i_d.di_forkoff && dp->i_df.if_bytes > mp->m_attroffset) 158 if (!dp->i_d.di_forkoff && dp->i_df.if_bytes >
159 xfs_default_attroffset(dp))
159 dsize = XFS_BMDR_SPACE_CALC(MINDBTPTRS); 160 dsize = XFS_BMDR_SPACE_CALC(MINDBTPTRS);
160 break; 161 break;
161 162
@@ -298,6 +299,26 @@ xfs_attr_shortform_add(xfs_da_args_t *args, int forkoff)
298} 299}
299 300
300/* 301/*
302 * After the last attribute is removed revert to original inode format,
303 * making all literal area available to the data fork once more.
304 */
305STATIC void
306xfs_attr_fork_reset(
307 struct xfs_inode *ip,
308 struct xfs_trans *tp)
309{
310 xfs_idestroy_fork(ip, XFS_ATTR_FORK);
311 ip->i_d.di_forkoff = 0;
312 ip->i_d.di_aformat = XFS_DINODE_FMT_EXTENTS;
313
314 ASSERT(ip->i_d.di_anextents == 0);
315 ASSERT(ip->i_afp == NULL);
316
317 ip->i_df.if_ext_max = XFS_IFORK_DSIZE(ip) / sizeof(xfs_bmbt_rec_t);
318 xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
319}
320
321/*
301 * Remove an attribute from the shortform attribute list structure. 322 * Remove an attribute from the shortform attribute list structure.
302 */ 323 */
303int 324int
@@ -344,22 +365,10 @@ xfs_attr_shortform_remove(xfs_da_args_t *args)
344 */ 365 */
345 totsize -= size; 366 totsize -= size;
346 if (totsize == sizeof(xfs_attr_sf_hdr_t) && 367 if (totsize == sizeof(xfs_attr_sf_hdr_t) &&
347 !(args->op_flags & XFS_DA_OP_ADDNAME) && 368 (mp->m_flags & XFS_MOUNT_ATTR2) &&
348 (mp->m_flags & XFS_MOUNT_ATTR2) && 369 (dp->i_d.di_format != XFS_DINODE_FMT_BTREE) &&
349 (dp->i_d.di_format != XFS_DINODE_FMT_BTREE)) { 370 !(args->op_flags & XFS_DA_OP_ADDNAME)) {
350 /* 371 xfs_attr_fork_reset(dp, args->trans);
351 * Last attribute now removed, revert to original
352 * inode format making all literal area available
353 * to the data fork once more.
354 */
355 xfs_idestroy_fork(dp, XFS_ATTR_FORK);
356 dp->i_d.di_forkoff = 0;
357 dp->i_d.di_aformat = XFS_DINODE_FMT_EXTENTS;
358 ASSERT(dp->i_d.di_anextents == 0);
359 ASSERT(dp->i_afp == NULL);
360 dp->i_df.if_ext_max =
361 XFS_IFORK_DSIZE(dp) / (uint)sizeof(xfs_bmbt_rec_t);
362 xfs_trans_log_inode(args->trans, dp, XFS_ILOG_CORE);
363 } else { 372 } else {
364 xfs_idata_realloc(dp, -size, XFS_ATTR_FORK); 373 xfs_idata_realloc(dp, -size, XFS_ATTR_FORK);
365 dp->i_d.di_forkoff = xfs_attr_shortform_bytesfit(dp, totsize); 374 dp->i_d.di_forkoff = xfs_attr_shortform_bytesfit(dp, totsize);
@@ -786,20 +795,7 @@ xfs_attr_leaf_to_shortform(xfs_dabuf_t *bp, xfs_da_args_t *args, int forkoff)
786 if (forkoff == -1) { 795 if (forkoff == -1) {
787 ASSERT(dp->i_mount->m_flags & XFS_MOUNT_ATTR2); 796 ASSERT(dp->i_mount->m_flags & XFS_MOUNT_ATTR2);
788 ASSERT(dp->i_d.di_format != XFS_DINODE_FMT_BTREE); 797 ASSERT(dp->i_d.di_format != XFS_DINODE_FMT_BTREE);
789 798 xfs_attr_fork_reset(dp, args->trans);
790 /*
791 * Last attribute was removed, revert to original
792 * inode format making all literal area available
793 * to the data fork once more.
794 */
795 xfs_idestroy_fork(dp, XFS_ATTR_FORK);
796 dp->i_d.di_forkoff = 0;
797 dp->i_d.di_aformat = XFS_DINODE_FMT_EXTENTS;
798 ASSERT(dp->i_d.di_anextents == 0);
799 ASSERT(dp->i_afp == NULL);
800 dp->i_df.if_ext_max =
801 XFS_IFORK_DSIZE(dp) / (uint)sizeof(xfs_bmbt_rec_t);
802 xfs_trans_log_inode(args->trans, dp, XFS_ILOG_CORE);
803 goto out; 799 goto out;
804 } 800 }
805 801
diff --git a/fs/xfs/xfs_bmap.c b/fs/xfs/xfs_bmap.c
index c852cd65aaea..3a6ed426327a 100644
--- a/fs/xfs/xfs_bmap.c
+++ b/fs/xfs/xfs_bmap.c
@@ -2479,7 +2479,7 @@ xfs_bmap_adjacent(
2479 fb_agno = nullfb ? NULLAGNUMBER : XFS_FSB_TO_AGNO(mp, ap->firstblock); 2479 fb_agno = nullfb ? NULLAGNUMBER : XFS_FSB_TO_AGNO(mp, ap->firstblock);
2480 /* 2480 /*
2481 * If allocating at eof, and there's a previous real block, 2481 * If allocating at eof, and there's a previous real block,
2482 * try to use it's last block as our starting point. 2482 * try to use its last block as our starting point.
2483 */ 2483 */
2484 if (ap->eof && ap->prevp->br_startoff != NULLFILEOFF && 2484 if (ap->eof && ap->prevp->br_startoff != NULLFILEOFF &&
2485 !isnullstartblock(ap->prevp->br_startblock) && 2485 !isnullstartblock(ap->prevp->br_startblock) &&
@@ -2712,9 +2712,6 @@ xfs_bmap_btalloc(
2712 xfs_agnumber_t startag; 2712 xfs_agnumber_t startag;
2713 xfs_alloc_arg_t args; 2713 xfs_alloc_arg_t args;
2714 xfs_extlen_t blen; 2714 xfs_extlen_t blen;
2715 xfs_extlen_t delta;
2716 xfs_extlen_t longest;
2717 xfs_extlen_t need;
2718 xfs_extlen_t nextminlen = 0; 2715 xfs_extlen_t nextminlen = 0;
2719 xfs_perag_t *pag; 2716 xfs_perag_t *pag;
2720 int nullfb; /* true if ap->firstblock isn't set */ 2717 int nullfb; /* true if ap->firstblock isn't set */
@@ -2796,13 +2793,8 @@ xfs_bmap_btalloc(
2796 * See xfs_alloc_fix_freelist... 2793 * See xfs_alloc_fix_freelist...
2797 */ 2794 */
2798 if (pag->pagf_init) { 2795 if (pag->pagf_init) {
2799 need = XFS_MIN_FREELIST_PAG(pag, mp); 2796 xfs_extlen_t longest;
2800 delta = need > pag->pagf_flcount ? 2797 longest = xfs_alloc_longest_free_extent(mp, pag);
2801 need - pag->pagf_flcount : 0;
2802 longest = (pag->pagf_longest > delta) ?
2803 (pag->pagf_longest - delta) :
2804 (pag->pagf_flcount > 0 ||
2805 pag->pagf_longest > 0);
2806 if (blen < longest) 2798 if (blen < longest)
2807 blen = longest; 2799 blen = longest;
2808 } else 2800 } else
@@ -3577,6 +3569,27 @@ xfs_bmap_extents_to_btree(
3577} 3569}
3578 3570
3579/* 3571/*
3572 * Calculate the default attribute fork offset for newly created inodes.
3573 */
3574uint
3575xfs_default_attroffset(
3576 struct xfs_inode *ip)
3577{
3578 struct xfs_mount *mp = ip->i_mount;
3579 uint offset;
3580
3581 if (mp->m_sb.sb_inodesize == 256) {
3582 offset = XFS_LITINO(mp) -
3583 XFS_BMDR_SPACE_CALC(MINABTPTRS);
3584 } else {
3585 offset = XFS_BMDR_SPACE_CALC(6 * MINABTPTRS);
3586 }
3587
3588 ASSERT(offset < XFS_LITINO(mp));
3589 return offset;
3590}
3591
3592/*
3580 * Helper routine to reset inode di_forkoff field when switching 3593 * Helper routine to reset inode di_forkoff field when switching
3581 * attribute fork from local to extent format - we reset it where 3594 * attribute fork from local to extent format - we reset it where
3582 * possible to make space available for inline data fork extents. 3595 * possible to make space available for inline data fork extents.
@@ -3588,15 +3601,18 @@ xfs_bmap_forkoff_reset(
3588 int whichfork) 3601 int whichfork)
3589{ 3602{
3590 if (whichfork == XFS_ATTR_FORK && 3603 if (whichfork == XFS_ATTR_FORK &&
3591 (ip->i_d.di_format != XFS_DINODE_FMT_DEV) && 3604 ip->i_d.di_format != XFS_DINODE_FMT_DEV &&
3592 (ip->i_d.di_format != XFS_DINODE_FMT_UUID) && 3605 ip->i_d.di_format != XFS_DINODE_FMT_UUID &&
3593 (ip->i_d.di_format != XFS_DINODE_FMT_BTREE) && 3606 ip->i_d.di_format != XFS_DINODE_FMT_BTREE) {
3594 ((mp->m_attroffset >> 3) > ip->i_d.di_forkoff)) { 3607 uint dfl_forkoff = xfs_default_attroffset(ip) >> 3;
3595 ip->i_d.di_forkoff = mp->m_attroffset >> 3; 3608
3596 ip->i_df.if_ext_max = XFS_IFORK_DSIZE(ip) / 3609 if (dfl_forkoff > ip->i_d.di_forkoff) {
3597 (uint)sizeof(xfs_bmbt_rec_t); 3610 ip->i_d.di_forkoff = dfl_forkoff;
3598 ip->i_afp->if_ext_max = XFS_IFORK_ASIZE(ip) / 3611 ip->i_df.if_ext_max =
3599 (uint)sizeof(xfs_bmbt_rec_t); 3612 XFS_IFORK_DSIZE(ip) / sizeof(xfs_bmbt_rec_t);
3613 ip->i_afp->if_ext_max =
3614 XFS_IFORK_ASIZE(ip) / sizeof(xfs_bmbt_rec_t);
3615 }
3600 } 3616 }
3601} 3617}
3602 3618
@@ -4065,7 +4081,7 @@ xfs_bmap_add_attrfork(
4065 case XFS_DINODE_FMT_BTREE: 4081 case XFS_DINODE_FMT_BTREE:
4066 ip->i_d.di_forkoff = xfs_attr_shortform_bytesfit(ip, size); 4082 ip->i_d.di_forkoff = xfs_attr_shortform_bytesfit(ip, size);
4067 if (!ip->i_d.di_forkoff) 4083 if (!ip->i_d.di_forkoff)
4068 ip->i_d.di_forkoff = mp->m_attroffset >> 3; 4084 ip->i_d.di_forkoff = xfs_default_attroffset(ip) >> 3;
4069 else if (mp->m_flags & XFS_MOUNT_ATTR2) 4085 else if (mp->m_flags & XFS_MOUNT_ATTR2)
4070 version = 2; 4086 version = 2;
4071 break; 4087 break;
@@ -4212,12 +4228,12 @@ xfs_bmap_compute_maxlevels(
4212 * (a signed 16-bit number, xfs_aextnum_t). 4228 * (a signed 16-bit number, xfs_aextnum_t).
4213 * 4229 *
4214 * Note that we can no longer assume that if we are in ATTR1 that 4230 * Note that we can no longer assume that if we are in ATTR1 that
4215 * the fork offset of all the inodes will be (m_attroffset >> 3) 4231 * the fork offset of all the inodes will be
4216 * because we could have mounted with ATTR2 and then mounted back 4232 * (xfs_default_attroffset(ip) >> 3) because we could have mounted
4217 * with ATTR1, keeping the di_forkoff's fixed but probably at 4233 * with ATTR2 and then mounted back with ATTR1, keeping the
4218 * various positions. Therefore, for both ATTR1 and ATTR2 4234 * di_forkoff's fixed but probably at various positions. Therefore,
4219 * we have to assume the worst case scenario of a minimum size 4235 * for both ATTR1 and ATTR2 we have to assume the worst case scenario
4220 * available. 4236 * of a minimum size available.
4221 */ 4237 */
4222 if (whichfork == XFS_DATA_FORK) { 4238 if (whichfork == XFS_DATA_FORK) {
4223 maxleafents = MAXEXTNUM; 4239 maxleafents = MAXEXTNUM;
@@ -4804,7 +4820,7 @@ xfs_bmapi(
4804 xfs_extlen_t minlen; /* min allocation size */ 4820 xfs_extlen_t minlen; /* min allocation size */
4805 xfs_mount_t *mp; /* xfs mount structure */ 4821 xfs_mount_t *mp; /* xfs mount structure */
4806 int n; /* current extent index */ 4822 int n; /* current extent index */
4807 int nallocs; /* number of extents alloc\'d */ 4823 int nallocs; /* number of extents alloc'd */
4808 xfs_extnum_t nextents; /* number of extents in file */ 4824 xfs_extnum_t nextents; /* number of extents in file */
4809 xfs_fileoff_t obno; /* old block number (offset) */ 4825 xfs_fileoff_t obno; /* old block number (offset) */
4810 xfs_bmbt_irec_t prev; /* previous file extent record */ 4826 xfs_bmbt_irec_t prev; /* previous file extent record */
@@ -6204,7 +6220,7 @@ xfs_bmap_get_bp(
6204 return(bp); 6220 return(bp);
6205} 6221}
6206 6222
6207void 6223STATIC void
6208xfs_check_block( 6224xfs_check_block(
6209 struct xfs_btree_block *block, 6225 struct xfs_btree_block *block,
6210 xfs_mount_t *mp, 6226 xfs_mount_t *mp,
@@ -6494,7 +6510,7 @@ xfs_bmap_count_tree(
6494 block = XFS_BUF_TO_BLOCK(bp); 6510 block = XFS_BUF_TO_BLOCK(bp);
6495 6511
6496 if (--level) { 6512 if (--level) {
6497 /* Not at node above leafs, count this level of nodes */ 6513 /* Not at node above leaves, count this level of nodes */
6498 nextbno = be64_to_cpu(block->bb_u.l.bb_rightsib); 6514 nextbno = be64_to_cpu(block->bb_u.l.bb_rightsib);
6499 while (nextbno != NULLFSBLOCK) { 6515 while (nextbno != NULLFSBLOCK) {
6500 if ((error = xfs_btree_read_bufl(mp, tp, nextbno, 6516 if ((error = xfs_btree_read_bufl(mp, tp, nextbno,
diff --git a/fs/xfs/xfs_bmap.h b/fs/xfs/xfs_bmap.h
index be2979d88d32..1b8ff9256bd0 100644
--- a/fs/xfs/xfs_bmap.h
+++ b/fs/xfs/xfs_bmap.h
@@ -125,7 +125,7 @@ typedef struct xfs_bmalloca {
125 struct xfs_bmbt_irec *gotp; /* extent after, or delayed */ 125 struct xfs_bmbt_irec *gotp; /* extent after, or delayed */
126 xfs_extlen_t alen; /* i/o length asked/allocated */ 126 xfs_extlen_t alen; /* i/o length asked/allocated */
127 xfs_extlen_t total; /* total blocks needed for xaction */ 127 xfs_extlen_t total; /* total blocks needed for xaction */
128 xfs_extlen_t minlen; /* mininum allocation size (blocks) */ 128 xfs_extlen_t minlen; /* minimum allocation size (blocks) */
129 xfs_extlen_t minleft; /* amount must be left after alloc */ 129 xfs_extlen_t minleft; /* amount must be left after alloc */
130 char eof; /* set if allocating past last extent */ 130 char eof; /* set if allocating past last extent */
131 char wasdel; /* replacing a delayed allocation */ 131 char wasdel; /* replacing a delayed allocation */
@@ -338,6 +338,10 @@ xfs_check_nostate_extents(
338 xfs_extnum_t idx, 338 xfs_extnum_t idx,
339 xfs_extnum_t num); 339 xfs_extnum_t num);
340 340
341uint
342xfs_default_attroffset(
343 struct xfs_inode *ip);
344
341#ifdef __KERNEL__ 345#ifdef __KERNEL__
342 346
343/* 347/*
diff --git a/fs/xfs/xfs_btree.c b/fs/xfs/xfs_btree.c
index e73c332eb23f..e9df99574829 100644
--- a/fs/xfs/xfs_btree.c
+++ b/fs/xfs/xfs_btree.c
@@ -1883,7 +1883,7 @@ xfs_btree_lshift(
1883 1883
1884 /* 1884 /*
1885 * We add one entry to the left side and remove one for the right side. 1885 * We add one entry to the left side and remove one for the right side.
1886 * Accout for it here, the changes will be updated on disk and logged 1886 * Account for it here, the changes will be updated on disk and logged
1887 * later. 1887 * later.
1888 */ 1888 */
1889 lrecs++; 1889 lrecs++;
@@ -3535,7 +3535,7 @@ xfs_btree_delrec(
3535 XFS_BTREE_STATS_INC(cur, join); 3535 XFS_BTREE_STATS_INC(cur, join);
3536 3536
3537 /* 3537 /*
3538 * Fix up the the number of records and right block pointer in the 3538 * Fix up the number of records and right block pointer in the
3539 * surviving block, and log it. 3539 * surviving block, and log it.
3540 */ 3540 */
3541 xfs_btree_set_numrecs(left, lrecs + rrecs); 3541 xfs_btree_set_numrecs(left, lrecs + rrecs);
diff --git a/fs/xfs/xfs_btree.h b/fs/xfs/xfs_btree.h
index 789fffdf8b2f..4f852b735b96 100644
--- a/fs/xfs/xfs_btree.h
+++ b/fs/xfs/xfs_btree.h
@@ -41,7 +41,7 @@ extern kmem_zone_t *xfs_btree_cur_zone;
41/* 41/*
42 * Generic btree header. 42 * Generic btree header.
43 * 43 *
44 * This is a comination of the actual format used on disk for short and long 44 * This is a combination of the actual format used on disk for short and long
45 * format btrees. The first three fields are shared by both format, but 45 * format btrees. The first three fields are shared by both format, but
46 * the pointers are different and should be used with care. 46 * the pointers are different and should be used with care.
47 * 47 *
diff --git a/fs/xfs/xfs_da_btree.c b/fs/xfs/xfs_da_btree.c
index c45f74ff1a5b..9ff6e57a5075 100644
--- a/fs/xfs/xfs_da_btree.c
+++ b/fs/xfs/xfs_da_btree.c
@@ -1503,7 +1503,7 @@ xfs_da_path_shift(xfs_da_state_t *state, xfs_da_state_path_t *path,
1503 * This is implemented with some source-level loop unrolling. 1503 * This is implemented with some source-level loop unrolling.
1504 */ 1504 */
1505xfs_dahash_t 1505xfs_dahash_t
1506xfs_da_hashname(const uchar_t *name, int namelen) 1506xfs_da_hashname(const __uint8_t *name, int namelen)
1507{ 1507{
1508 xfs_dahash_t hash; 1508 xfs_dahash_t hash;
1509 1509
diff --git a/fs/xfs/xfs_da_btree.h b/fs/xfs/xfs_da_btree.h
index 70b710c1792d..8c536167bf75 100644
--- a/fs/xfs/xfs_da_btree.h
+++ b/fs/xfs/xfs_da_btree.h
@@ -91,9 +91,9 @@ enum xfs_dacmp {
91 * Structure to ease passing around component names. 91 * Structure to ease passing around component names.
92 */ 92 */
93typedef struct xfs_da_args { 93typedef struct xfs_da_args {
94 const uchar_t *name; /* string (maybe not NULL terminated) */ 94 const __uint8_t *name; /* string (maybe not NULL terminated) */
95 int namelen; /* length of string (maybe no NULL) */ 95 int namelen; /* length of string (maybe no NULL) */
96 uchar_t *value; /* set of bytes (maybe contain NULLs) */ 96 __uint8_t *value; /* set of bytes (maybe contain NULLs) */
97 int valuelen; /* length of value */ 97 int valuelen; /* length of value */
98 int flags; /* argument flags (eg: ATTR_NOCREATE) */ 98 int flags; /* argument flags (eg: ATTR_NOCREATE) */
99 xfs_dahash_t hashval; /* hash value of name */ 99 xfs_dahash_t hashval; /* hash value of name */
@@ -185,7 +185,7 @@ typedef struct xfs_da_state {
185 unsigned char inleaf; /* insert into 1->lf, 0->splf */ 185 unsigned char inleaf; /* insert into 1->lf, 0->splf */
186 unsigned char extravalid; /* T/F: extrablk is in use */ 186 unsigned char extravalid; /* T/F: extrablk is in use */
187 unsigned char extraafter; /* T/F: extrablk is after new */ 187 unsigned char extraafter; /* T/F: extrablk is after new */
188 xfs_da_state_blk_t extrablk; /* for double-splits on leafs */ 188 xfs_da_state_blk_t extrablk; /* for double-splits on leaves */
189 /* for dirv2 extrablk is data */ 189 /* for dirv2 extrablk is data */
190} xfs_da_state_t; 190} xfs_da_state_t;
191 191
@@ -251,7 +251,7 @@ xfs_daddr_t xfs_da_reada_buf(struct xfs_trans *trans, struct xfs_inode *dp,
251int xfs_da_shrink_inode(xfs_da_args_t *args, xfs_dablk_t dead_blkno, 251int xfs_da_shrink_inode(xfs_da_args_t *args, xfs_dablk_t dead_blkno,
252 xfs_dabuf_t *dead_buf); 252 xfs_dabuf_t *dead_buf);
253 253
254uint xfs_da_hashname(const uchar_t *name_string, int name_length); 254uint xfs_da_hashname(const __uint8_t *name_string, int name_length);
255enum xfs_dacmp xfs_da_compname(struct xfs_da_args *args, 255enum xfs_dacmp xfs_da_compname(struct xfs_da_args *args,
256 const char *name, int len); 256 const char *name, int len);
257 257
@@ -268,5 +268,6 @@ xfs_daddr_t xfs_da_blkno(xfs_dabuf_t *dabuf);
268 268
269extern struct kmem_zone *xfs_da_state_zone; 269extern struct kmem_zone *xfs_da_state_zone;
270extern struct kmem_zone *xfs_dabuf_zone; 270extern struct kmem_zone *xfs_dabuf_zone;
271extern const struct xfs_nameops xfs_default_nameops;
271 272
272#endif /* __XFS_DA_BTREE_H__ */ 273#endif /* __XFS_DA_BTREE_H__ */
diff --git a/fs/xfs/xfs_dfrag.c b/fs/xfs/xfs_dfrag.c
index f8278cfcc1d3..e6d839bddbf0 100644
--- a/fs/xfs/xfs_dfrag.c
+++ b/fs/xfs/xfs_dfrag.c
@@ -79,6 +79,12 @@ xfs_swapext(
79 goto out_put_target_file; 79 goto out_put_target_file;
80 } 80 }
81 81
82 if (IS_SWAPFILE(file->f_path.dentry->d_inode) ||
83 IS_SWAPFILE(target_file->f_path.dentry->d_inode)) {
84 error = XFS_ERROR(EINVAL);
85 goto out_put_target_file;
86 }
87
82 ip = XFS_I(file->f_path.dentry->d_inode); 88 ip = XFS_I(file->f_path.dentry->d_inode);
83 tip = XFS_I(target_file->f_path.dentry->d_inode); 89 tip = XFS_I(target_file->f_path.dentry->d_inode);
84 90
@@ -118,19 +124,17 @@ xfs_swap_extents(
118 xfs_bstat_t *sbp = &sxp->sx_stat; 124 xfs_bstat_t *sbp = &sxp->sx_stat;
119 xfs_ifork_t *tempifp, *ifp, *tifp; 125 xfs_ifork_t *tempifp, *ifp, *tifp;
120 int ilf_fields, tilf_fields; 126 int ilf_fields, tilf_fields;
121 static uint lock_flags = XFS_ILOCK_EXCL | XFS_IOLOCK_EXCL;
122 int error = 0; 127 int error = 0;
123 int aforkblks = 0; 128 int aforkblks = 0;
124 int taforkblks = 0; 129 int taforkblks = 0;
125 __uint64_t tmp; 130 __uint64_t tmp;
126 char locked = 0;
127 131
128 mp = ip->i_mount; 132 mp = ip->i_mount;
129 133
130 tempifp = kmem_alloc(sizeof(xfs_ifork_t), KM_MAYFAIL); 134 tempifp = kmem_alloc(sizeof(xfs_ifork_t), KM_MAYFAIL);
131 if (!tempifp) { 135 if (!tempifp) {
132 error = XFS_ERROR(ENOMEM); 136 error = XFS_ERROR(ENOMEM);
133 goto error0; 137 goto out;
134 } 138 }
135 139
136 sbp = &sxp->sx_stat; 140 sbp = &sxp->sx_stat;
@@ -143,25 +147,24 @@ xfs_swap_extents(
143 */ 147 */
144 xfs_lock_two_inodes(ip, tip, XFS_IOLOCK_EXCL); 148 xfs_lock_two_inodes(ip, tip, XFS_IOLOCK_EXCL);
145 xfs_lock_two_inodes(ip, tip, XFS_ILOCK_EXCL); 149 xfs_lock_two_inodes(ip, tip, XFS_ILOCK_EXCL);
146 locked = 1;
147 150
148 /* Verify that both files have the same format */ 151 /* Verify that both files have the same format */
149 if ((ip->i_d.di_mode & S_IFMT) != (tip->i_d.di_mode & S_IFMT)) { 152 if ((ip->i_d.di_mode & S_IFMT) != (tip->i_d.di_mode & S_IFMT)) {
150 error = XFS_ERROR(EINVAL); 153 error = XFS_ERROR(EINVAL);
151 goto error0; 154 goto out_unlock;
152 } 155 }
153 156
154 /* Verify both files are either real-time or non-realtime */ 157 /* Verify both files are either real-time or non-realtime */
155 if (XFS_IS_REALTIME_INODE(ip) != XFS_IS_REALTIME_INODE(tip)) { 158 if (XFS_IS_REALTIME_INODE(ip) != XFS_IS_REALTIME_INODE(tip)) {
156 error = XFS_ERROR(EINVAL); 159 error = XFS_ERROR(EINVAL);
157 goto error0; 160 goto out_unlock;
158 } 161 }
159 162
160 /* Should never get a local format */ 163 /* Should never get a local format */
161 if (ip->i_d.di_format == XFS_DINODE_FMT_LOCAL || 164 if (ip->i_d.di_format == XFS_DINODE_FMT_LOCAL ||
162 tip->i_d.di_format == XFS_DINODE_FMT_LOCAL) { 165 tip->i_d.di_format == XFS_DINODE_FMT_LOCAL) {
163 error = XFS_ERROR(EINVAL); 166 error = XFS_ERROR(EINVAL);
164 goto error0; 167 goto out_unlock;
165 } 168 }
166 169
167 if (VN_CACHED(VFS_I(tip)) != 0) { 170 if (VN_CACHED(VFS_I(tip)) != 0) {
@@ -169,13 +172,13 @@ xfs_swap_extents(
169 error = xfs_flushinval_pages(tip, 0, -1, 172 error = xfs_flushinval_pages(tip, 0, -1,
170 FI_REMAPF_LOCKED); 173 FI_REMAPF_LOCKED);
171 if (error) 174 if (error)
172 goto error0; 175 goto out_unlock;
173 } 176 }
174 177
175 /* Verify O_DIRECT for ftmp */ 178 /* Verify O_DIRECT for ftmp */
176 if (VN_CACHED(VFS_I(tip)) != 0) { 179 if (VN_CACHED(VFS_I(tip)) != 0) {
177 error = XFS_ERROR(EINVAL); 180 error = XFS_ERROR(EINVAL);
178 goto error0; 181 goto out_unlock;
179 } 182 }
180 183
181 /* Verify all data are being swapped */ 184 /* Verify all data are being swapped */
@@ -183,7 +186,7 @@ xfs_swap_extents(
183 sxp->sx_length != ip->i_d.di_size || 186 sxp->sx_length != ip->i_d.di_size ||
184 sxp->sx_length != tip->i_d.di_size) { 187 sxp->sx_length != tip->i_d.di_size) {
185 error = XFS_ERROR(EFAULT); 188 error = XFS_ERROR(EFAULT);
186 goto error0; 189 goto out_unlock;
187 } 190 }
188 191
189 /* 192 /*
@@ -193,7 +196,7 @@ xfs_swap_extents(
193 */ 196 */
194 if ( XFS_IFORK_Q(ip) != XFS_IFORK_Q(tip) ) { 197 if ( XFS_IFORK_Q(ip) != XFS_IFORK_Q(tip) ) {
195 error = XFS_ERROR(EINVAL); 198 error = XFS_ERROR(EINVAL);
196 goto error0; 199 goto out_unlock;
197 } 200 }
198 201
199 /* 202 /*
@@ -208,7 +211,7 @@ xfs_swap_extents(
208 (sbp->bs_mtime.tv_sec != ip->i_d.di_mtime.t_sec) || 211 (sbp->bs_mtime.tv_sec != ip->i_d.di_mtime.t_sec) ||
209 (sbp->bs_mtime.tv_nsec != ip->i_d.di_mtime.t_nsec)) { 212 (sbp->bs_mtime.tv_nsec != ip->i_d.di_mtime.t_nsec)) {
210 error = XFS_ERROR(EBUSY); 213 error = XFS_ERROR(EBUSY);
211 goto error0; 214 goto out_unlock;
212 } 215 }
213 216
214 /* We need to fail if the file is memory mapped. Once we have tossed 217 /* We need to fail if the file is memory mapped. Once we have tossed
@@ -219,7 +222,7 @@ xfs_swap_extents(
219 */ 222 */
220 if (VN_MAPPED(VFS_I(ip))) { 223 if (VN_MAPPED(VFS_I(ip))) {
221 error = XFS_ERROR(EBUSY); 224 error = XFS_ERROR(EBUSY);
222 goto error0; 225 goto out_unlock;
223 } 226 }
224 227
225 xfs_iunlock(ip, XFS_ILOCK_EXCL); 228 xfs_iunlock(ip, XFS_ILOCK_EXCL);
@@ -242,8 +245,7 @@ xfs_swap_extents(
242 xfs_iunlock(ip, XFS_IOLOCK_EXCL); 245 xfs_iunlock(ip, XFS_IOLOCK_EXCL);
243 xfs_iunlock(tip, XFS_IOLOCK_EXCL); 246 xfs_iunlock(tip, XFS_IOLOCK_EXCL);
244 xfs_trans_cancel(tp, 0); 247 xfs_trans_cancel(tp, 0);
245 locked = 0; 248 goto out;
246 goto error0;
247 } 249 }
248 xfs_lock_two_inodes(ip, tip, XFS_ILOCK_EXCL); 250 xfs_lock_two_inodes(ip, tip, XFS_ILOCK_EXCL);
249 251
@@ -253,19 +255,15 @@ xfs_swap_extents(
253 if ( ((XFS_IFORK_Q(ip) != 0) && (ip->i_d.di_anextents > 0)) && 255 if ( ((XFS_IFORK_Q(ip) != 0) && (ip->i_d.di_anextents > 0)) &&
254 (ip->i_d.di_aformat != XFS_DINODE_FMT_LOCAL)) { 256 (ip->i_d.di_aformat != XFS_DINODE_FMT_LOCAL)) {
255 error = xfs_bmap_count_blocks(tp, ip, XFS_ATTR_FORK, &aforkblks); 257 error = xfs_bmap_count_blocks(tp, ip, XFS_ATTR_FORK, &aforkblks);
256 if (error) { 258 if (error)
257 xfs_trans_cancel(tp, 0); 259 goto out_trans_cancel;
258 goto error0;
259 }
260 } 260 }
261 if ( ((XFS_IFORK_Q(tip) != 0) && (tip->i_d.di_anextents > 0)) && 261 if ( ((XFS_IFORK_Q(tip) != 0) && (tip->i_d.di_anextents > 0)) &&
262 (tip->i_d.di_aformat != XFS_DINODE_FMT_LOCAL)) { 262 (tip->i_d.di_aformat != XFS_DINODE_FMT_LOCAL)) {
263 error = xfs_bmap_count_blocks(tp, tip, XFS_ATTR_FORK, 263 error = xfs_bmap_count_blocks(tp, tip, XFS_ATTR_FORK,
264 &taforkblks); 264 &taforkblks);
265 if (error) { 265 if (error)
266 xfs_trans_cancel(tp, 0); 266 goto out_trans_cancel;
267 goto error0;
268 }
269 } 267 }
270 268
271 /* 269 /*
@@ -332,10 +330,10 @@ xfs_swap_extents(
332 330
333 331
334 IHOLD(ip); 332 IHOLD(ip);
335 xfs_trans_ijoin(tp, ip, lock_flags); 333 xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL | XFS_IOLOCK_EXCL);
336 334
337 IHOLD(tip); 335 IHOLD(tip);
338 xfs_trans_ijoin(tp, tip, lock_flags); 336 xfs_trans_ijoin(tp, tip, XFS_ILOCK_EXCL | XFS_IOLOCK_EXCL);
339 337
340 xfs_trans_log_inode(tp, ip, ilf_fields); 338 xfs_trans_log_inode(tp, ip, ilf_fields);
341 xfs_trans_log_inode(tp, tip, tilf_fields); 339 xfs_trans_log_inode(tp, tip, tilf_fields);
@@ -344,19 +342,19 @@ xfs_swap_extents(
344 * If this is a synchronous mount, make sure that the 342 * If this is a synchronous mount, make sure that the
345 * transaction goes to disk before returning to the user. 343 * transaction goes to disk before returning to the user.
346 */ 344 */
347 if (mp->m_flags & XFS_MOUNT_WSYNC) { 345 if (mp->m_flags & XFS_MOUNT_WSYNC)
348 xfs_trans_set_sync(tp); 346 xfs_trans_set_sync(tp);
349 }
350 347
351 error = xfs_trans_commit(tp, XFS_TRANS_SWAPEXT); 348 error = xfs_trans_commit(tp, XFS_TRANS_SWAPEXT);
352 locked = 0;
353 349
354 error0: 350out_unlock:
355 if (locked) { 351 xfs_iunlock(ip, XFS_ILOCK_EXCL | XFS_IOLOCK_EXCL);
356 xfs_iunlock(ip, lock_flags); 352 xfs_iunlock(tip, XFS_ILOCK_EXCL | XFS_IOLOCK_EXCL);
357 xfs_iunlock(tip, lock_flags); 353out:
358 } 354 kmem_free(tempifp);
359 if (tempifp != NULL)
360 kmem_free(tempifp);
361 return error; 355 return error;
356
357out_trans_cancel:
358 xfs_trans_cancel(tp, 0);
359 goto out_unlock;
362} 360}
diff --git a/fs/xfs/xfs_dinode.h b/fs/xfs/xfs_dinode.h
index 162e8726df5e..e5b153b2e6a3 100644
--- a/fs/xfs/xfs_dinode.h
+++ b/fs/xfs/xfs_dinode.h
@@ -103,7 +103,9 @@ typedef enum xfs_dinode_fmt {
103/* 103/*
104 * Inode size for given fs. 104 * Inode size for given fs.
105 */ 105 */
106#define XFS_LITINO(mp) ((mp)->m_litino) 106#define XFS_LITINO(mp) \
107 ((int)(((mp)->m_sb.sb_inodesize) - sizeof(struct xfs_dinode)))
108
107#define XFS_BROOT_SIZE_ADJ \ 109#define XFS_BROOT_SIZE_ADJ \
108 (XFS_BTREE_LBLOCK_LEN - sizeof(xfs_bmdr_block_t)) 110 (XFS_BTREE_LBLOCK_LEN - sizeof(xfs_bmdr_block_t))
109 111
diff --git a/fs/xfs/xfs_dir2.c b/fs/xfs/xfs_dir2.c
index 1afb12278b8d..c657bec6d951 100644
--- a/fs/xfs/xfs_dir2.c
+++ b/fs/xfs/xfs_dir2.c
@@ -46,8 +46,6 @@
46 46
47struct xfs_name xfs_name_dotdot = {"..", 2}; 47struct xfs_name xfs_name_dotdot = {"..", 2};
48 48
49extern const struct xfs_nameops xfs_default_nameops;
50
51/* 49/*
52 * ASCII case-insensitive (ie. A-Z) support for directories that was 50 * ASCII case-insensitive (ie. A-Z) support for directories that was
53 * used in IRIX. 51 * used in IRIX.
diff --git a/fs/xfs/xfs_dir2_block.c b/fs/xfs/xfs_dir2_block.c
index e1f0a06aaf04..ab52e9e1c1ee 100644
--- a/fs/xfs/xfs_dir2_block.c
+++ b/fs/xfs/xfs_dir2_block.c
@@ -448,7 +448,6 @@ xfs_dir2_block_getdents(
448 xfs_mount_t *mp; /* filesystem mount point */ 448 xfs_mount_t *mp; /* filesystem mount point */
449 char *ptr; /* current data entry */ 449 char *ptr; /* current data entry */
450 int wantoff; /* starting block offset */ 450 int wantoff; /* starting block offset */
451 xfs_ino_t ino;
452 xfs_off_t cook; 451 xfs_off_t cook;
453 452
454 mp = dp->i_mount; 453 mp = dp->i_mount;
@@ -509,16 +508,12 @@ xfs_dir2_block_getdents(
509 508
510 cook = xfs_dir2_db_off_to_dataptr(mp, mp->m_dirdatablk, 509 cook = xfs_dir2_db_off_to_dataptr(mp, mp->m_dirdatablk,
511 (char *)dep - (char *)block); 510 (char *)dep - (char *)block);
512 ino = be64_to_cpu(dep->inumber);
513#if XFS_BIG_INUMS
514 ino += mp->m_inoadd;
515#endif
516 511
517 /* 512 /*
518 * If it didn't fit, set the final offset to here & return. 513 * If it didn't fit, set the final offset to here & return.
519 */ 514 */
520 if (filldir(dirent, dep->name, dep->namelen, cook & 0x7fffffff, 515 if (filldir(dirent, dep->name, dep->namelen, cook & 0x7fffffff,
521 ino, DT_UNKNOWN)) { 516 be64_to_cpu(dep->inumber), DT_UNKNOWN)) {
522 *offset = cook & 0x7fffffff; 517 *offset = cook & 0x7fffffff;
523 xfs_da_brelse(NULL, bp); 518 xfs_da_brelse(NULL, bp);
524 return 0; 519 return 0;
diff --git a/fs/xfs/xfs_dir2_data.h b/fs/xfs/xfs_dir2_data.h
index b816e0252739..efbc290c7fec 100644
--- a/fs/xfs/xfs_dir2_data.h
+++ b/fs/xfs/xfs_dir2_data.h
@@ -38,7 +38,7 @@ struct xfs_trans;
38 38
39/* 39/*
40 * Directory address space divided into sections, 40 * Directory address space divided into sections,
41 * spaces separated by 32gb. 41 * spaces separated by 32GB.
42 */ 42 */
43#define XFS_DIR2_SPACE_SIZE (1ULL << (32 + XFS_DIR2_DATA_ALIGN_LOG)) 43#define XFS_DIR2_SPACE_SIZE (1ULL << (32 + XFS_DIR2_DATA_ALIGN_LOG))
44#define XFS_DIR2_DATA_SPACE 0 44#define XFS_DIR2_DATA_SPACE 0
diff --git a/fs/xfs/xfs_dir2_leaf.c b/fs/xfs/xfs_dir2_leaf.c
index ef805a374eec..fa913e459442 100644
--- a/fs/xfs/xfs_dir2_leaf.c
+++ b/fs/xfs/xfs_dir2_leaf.c
@@ -549,7 +549,7 @@ xfs_dir2_leaf_addname(
549 * Check the internal consistency of a leaf1 block. 549 * Check the internal consistency of a leaf1 block.
550 * Pop an assert if something is wrong. 550 * Pop an assert if something is wrong.
551 */ 551 */
552void 552STATIC void
553xfs_dir2_leaf_check( 553xfs_dir2_leaf_check(
554 xfs_inode_t *dp, /* incore directory inode */ 554 xfs_inode_t *dp, /* incore directory inode */
555 xfs_dabuf_t *bp) /* leaf's buffer */ 555 xfs_dabuf_t *bp) /* leaf's buffer */
@@ -780,7 +780,6 @@ xfs_dir2_leaf_getdents(
780 int ra_index; /* *map index for read-ahead */ 780 int ra_index; /* *map index for read-ahead */
781 int ra_offset; /* map entry offset for ra */ 781 int ra_offset; /* map entry offset for ra */
782 int ra_want; /* readahead count wanted */ 782 int ra_want; /* readahead count wanted */
783 xfs_ino_t ino;
784 783
785 /* 784 /*
786 * If the offset is at or past the largest allowed value, 785 * If the offset is at or past the largest allowed value,
@@ -1076,24 +1075,12 @@ xfs_dir2_leaf_getdents(
1076 continue; 1075 continue;
1077 } 1076 }
1078 1077
1079 /*
1080 * Copy the entry into the putargs, and try formatting it.
1081 */
1082 dep = (xfs_dir2_data_entry_t *)ptr; 1078 dep = (xfs_dir2_data_entry_t *)ptr;
1083
1084 length = xfs_dir2_data_entsize(dep->namelen); 1079 length = xfs_dir2_data_entsize(dep->namelen);
1085 1080
1086 ino = be64_to_cpu(dep->inumber);
1087#if XFS_BIG_INUMS
1088 ino += mp->m_inoadd;
1089#endif
1090
1091 /*
1092 * Won't fit. Return to caller.
1093 */
1094 if (filldir(dirent, dep->name, dep->namelen, 1081 if (filldir(dirent, dep->name, dep->namelen,
1095 xfs_dir2_byte_to_dataptr(mp, curoff) & 0x7fffffff, 1082 xfs_dir2_byte_to_dataptr(mp, curoff) & 0x7fffffff,
1096 ino, DT_UNKNOWN)) 1083 be64_to_cpu(dep->inumber), DT_UNKNOWN))
1097 break; 1084 break;
1098 1085
1099 /* 1086 /*
diff --git a/fs/xfs/xfs_dir2_node.c b/fs/xfs/xfs_dir2_node.c
index fa6c3a5ddbc6..5a81ccd1045b 100644
--- a/fs/xfs/xfs_dir2_node.c
+++ b/fs/xfs/xfs_dir2_node.c
@@ -1104,7 +1104,7 @@ xfs_dir2_leafn_remove(
1104 } 1104 }
1105 xfs_dir2_leafn_check(dp, bp); 1105 xfs_dir2_leafn_check(dp, bp);
1106 /* 1106 /*
1107 * Return indication of whether this leaf block is emtpy enough 1107 * Return indication of whether this leaf block is empty enough
1108 * to justify trying to join it with a neighbor. 1108 * to justify trying to join it with a neighbor.
1109 */ 1109 */
1110 *rval = 1110 *rval =
diff --git a/fs/xfs/xfs_dir2_sf.c b/fs/xfs/xfs_dir2_sf.c
index a8a8a6efad5b..e89734e84646 100644
--- a/fs/xfs/xfs_dir2_sf.c
+++ b/fs/xfs/xfs_dir2_sf.c
@@ -748,11 +748,7 @@ xfs_dir2_sf_getdents(
748 * Put . entry unless we're starting past it. 748 * Put . entry unless we're starting past it.
749 */ 749 */
750 if (*offset <= dot_offset) { 750 if (*offset <= dot_offset) {
751 ino = dp->i_ino; 751 if (filldir(dirent, ".", 1, dot_offset & 0x7fffffff, dp->i_ino, DT_DIR)) {
752#if XFS_BIG_INUMS
753 ino += mp->m_inoadd;
754#endif
755 if (filldir(dirent, ".", 1, dot_offset & 0x7fffffff, ino, DT_DIR)) {
756 *offset = dot_offset & 0x7fffffff; 752 *offset = dot_offset & 0x7fffffff;
757 return 0; 753 return 0;
758 } 754 }
@@ -763,9 +759,6 @@ xfs_dir2_sf_getdents(
763 */ 759 */
764 if (*offset <= dotdot_offset) { 760 if (*offset <= dotdot_offset) {
765 ino = xfs_dir2_sf_get_inumber(sfp, &sfp->hdr.parent); 761 ino = xfs_dir2_sf_get_inumber(sfp, &sfp->hdr.parent);
766#if XFS_BIG_INUMS
767 ino += mp->m_inoadd;
768#endif
769 if (filldir(dirent, "..", 2, dotdot_offset & 0x7fffffff, ino, DT_DIR)) { 762 if (filldir(dirent, "..", 2, dotdot_offset & 0x7fffffff, ino, DT_DIR)) {
770 *offset = dotdot_offset & 0x7fffffff; 763 *offset = dotdot_offset & 0x7fffffff;
771 return 0; 764 return 0;
@@ -786,10 +779,6 @@ xfs_dir2_sf_getdents(
786 } 779 }
787 780
788 ino = xfs_dir2_sf_get_inumber(sfp, xfs_dir2_sf_inumberp(sfep)); 781 ino = xfs_dir2_sf_get_inumber(sfp, xfs_dir2_sf_inumberp(sfep));
789#if XFS_BIG_INUMS
790 ino += mp->m_inoadd;
791#endif
792
793 if (filldir(dirent, sfep->name, sfep->namelen, 782 if (filldir(dirent, sfep->name, sfep->namelen,
794 off & 0x7fffffff, ino, DT_UNKNOWN)) { 783 off & 0x7fffffff, ino, DT_UNKNOWN)) {
795 *offset = off & 0x7fffffff; 784 *offset = off & 0x7fffffff;
diff --git a/fs/xfs/xfs_extfree_item.h b/fs/xfs/xfs_extfree_item.h
index 2f049f63e85f..0d22c56fdf64 100644
--- a/fs/xfs/xfs_extfree_item.h
+++ b/fs/xfs/xfs_extfree_item.h
@@ -33,12 +33,10 @@ typedef struct xfs_extent {
33 * conversion routine. 33 * conversion routine.
34 */ 34 */
35 35
36#ifndef HAVE_FORMAT32
37typedef struct xfs_extent_32 { 36typedef struct xfs_extent_32 {
38 __uint64_t ext_start; 37 __uint64_t ext_start;
39 __uint32_t ext_len; 38 __uint32_t ext_len;
40} __attribute__((packed)) xfs_extent_32_t; 39} __attribute__((packed)) xfs_extent_32_t;
41#endif
42 40
43typedef struct xfs_extent_64 { 41typedef struct xfs_extent_64 {
44 __uint64_t ext_start; 42 __uint64_t ext_start;
@@ -59,7 +57,6 @@ typedef struct xfs_efi_log_format {
59 xfs_extent_t efi_extents[1]; /* array of extents to free */ 57 xfs_extent_t efi_extents[1]; /* array of extents to free */
60} xfs_efi_log_format_t; 58} xfs_efi_log_format_t;
61 59
62#ifndef HAVE_FORMAT32
63typedef struct xfs_efi_log_format_32 { 60typedef struct xfs_efi_log_format_32 {
64 __uint16_t efi_type; /* efi log item type */ 61 __uint16_t efi_type; /* efi log item type */
65 __uint16_t efi_size; /* size of this item */ 62 __uint16_t efi_size; /* size of this item */
@@ -67,7 +64,6 @@ typedef struct xfs_efi_log_format_32 {
67 __uint64_t efi_id; /* efi identifier */ 64 __uint64_t efi_id; /* efi identifier */
68 xfs_extent_32_t efi_extents[1]; /* array of extents to free */ 65 xfs_extent_32_t efi_extents[1]; /* array of extents to free */
69} __attribute__((packed)) xfs_efi_log_format_32_t; 66} __attribute__((packed)) xfs_efi_log_format_32_t;
70#endif
71 67
72typedef struct xfs_efi_log_format_64 { 68typedef struct xfs_efi_log_format_64 {
73 __uint16_t efi_type; /* efi log item type */ 69 __uint16_t efi_type; /* efi log item type */
@@ -90,7 +86,6 @@ typedef struct xfs_efd_log_format {
90 xfs_extent_t efd_extents[1]; /* array of extents freed */ 86 xfs_extent_t efd_extents[1]; /* array of extents freed */
91} xfs_efd_log_format_t; 87} xfs_efd_log_format_t;
92 88
93#ifndef HAVE_FORMAT32
94typedef struct xfs_efd_log_format_32 { 89typedef struct xfs_efd_log_format_32 {
95 __uint16_t efd_type; /* efd log item type */ 90 __uint16_t efd_type; /* efd log item type */
96 __uint16_t efd_size; /* size of this item */ 91 __uint16_t efd_size; /* size of this item */
@@ -98,7 +93,6 @@ typedef struct xfs_efd_log_format_32 {
98 __uint64_t efd_efi_id; /* id of corresponding efi */ 93 __uint64_t efd_efi_id; /* id of corresponding efi */
99 xfs_extent_32_t efd_extents[1]; /* array of extents freed */ 94 xfs_extent_32_t efd_extents[1]; /* array of extents freed */
100} __attribute__((packed)) xfs_efd_log_format_32_t; 95} __attribute__((packed)) xfs_efd_log_format_32_t;
101#endif
102 96
103typedef struct xfs_efd_log_format_64 { 97typedef struct xfs_efd_log_format_64 {
104 __uint16_t efd_type; /* efd log item type */ 98 __uint16_t efd_type; /* efd log item type */
diff --git a/fs/xfs/xfs_filestream.c b/fs/xfs/xfs_filestream.c
index f3bb75da384e..6c87c8f304ef 100644
--- a/fs/xfs/xfs_filestream.c
+++ b/fs/xfs/xfs_filestream.c
@@ -140,7 +140,7 @@ _xfs_filestream_pick_ag(
140 xfs_extlen_t minlen) 140 xfs_extlen_t minlen)
141{ 141{
142 int err, trylock, nscan; 142 int err, trylock, nscan;
143 xfs_extlen_t delta, longest, need, free, minfree, maxfree = 0; 143 xfs_extlen_t longest, free, minfree, maxfree = 0;
144 xfs_agnumber_t ag, max_ag = NULLAGNUMBER; 144 xfs_agnumber_t ag, max_ag = NULLAGNUMBER;
145 struct xfs_perag *pag; 145 struct xfs_perag *pag;
146 146
@@ -186,12 +186,7 @@ _xfs_filestream_pick_ag(
186 goto next_ag; 186 goto next_ag;
187 } 187 }
188 188
189 need = XFS_MIN_FREELIST_PAG(pag, mp); 189 longest = xfs_alloc_longest_free_extent(mp, pag);
190 delta = need > pag->pagf_flcount ? need - pag->pagf_flcount : 0;
191 longest = (pag->pagf_longest > delta) ?
192 (pag->pagf_longest - delta) :
193 (pag->pagf_flcount > 0 || pag->pagf_longest > 0);
194
195 if (((minlen && longest >= minlen) || 190 if (((minlen && longest >= minlen) ||
196 (!minlen && pag->pagf_freeblks >= minfree)) && 191 (!minlen && pag->pagf_freeblks >= minfree)) &&
197 (!pag->pagf_metadata || !(flags & XFS_PICK_USERDATA) || 192 (!pag->pagf_metadata || !(flags & XFS_PICK_USERDATA) ||
diff --git a/fs/xfs/xfs_fsops.c b/fs/xfs/xfs_fsops.c
index 680d0e0ec932..8379e3bca26c 100644
--- a/fs/xfs/xfs_fsops.c
+++ b/fs/xfs/xfs_fsops.c
@@ -576,7 +576,7 @@ out:
576 if (fdblks_delta) { 576 if (fdblks_delta) {
577 /* 577 /*
578 * If we are putting blocks back here, m_resblks_avail is 578 * If we are putting blocks back here, m_resblks_avail is
579 * already at it's max so this will put it in the free pool. 579 * already at its max so this will put it in the free pool.
580 * 580 *
581 * If we need space, we'll either succeed in getting it 581 * If we need space, we'll either succeed in getting it
582 * from the free block count or we'll get an enospc. If 582 * from the free block count or we'll get an enospc. If
diff --git a/fs/xfs/xfs_ialloc.c b/fs/xfs/xfs_ialloc.c
index ab016e5ae7be..3120a3a5e20f 100644
--- a/fs/xfs/xfs_ialloc.c
+++ b/fs/xfs/xfs_ialloc.c
@@ -230,7 +230,7 @@ xfs_ialloc_ag_alloc(
230 args.minalignslop = xfs_ialloc_cluster_alignment(&args) - 1; 230 args.minalignslop = xfs_ialloc_cluster_alignment(&args) - 1;
231 231
232 /* Allow space for the inode btree to split. */ 232 /* Allow space for the inode btree to split. */
233 args.minleft = XFS_IN_MAXLEVELS(args.mp) - 1; 233 args.minleft = args.mp->m_in_maxlevels - 1;
234 if ((error = xfs_alloc_vextent(&args))) 234 if ((error = xfs_alloc_vextent(&args)))
235 return error; 235 return error;
236 } else 236 } else
@@ -270,7 +270,7 @@ xfs_ialloc_ag_alloc(
270 /* 270 /*
271 * Allow space for the inode btree to split. 271 * Allow space for the inode btree to split.
272 */ 272 */
273 args.minleft = XFS_IN_MAXLEVELS(args.mp) - 1; 273 args.minleft = args.mp->m_in_maxlevels - 1;
274 if ((error = xfs_alloc_vextent(&args))) 274 if ((error = xfs_alloc_vextent(&args)))
275 return error; 275 return error;
276 } 276 }
@@ -349,7 +349,7 @@ xfs_ialloc_ag_alloc(
349 * Initialize all inodes in this buffer and then log them. 349 * Initialize all inodes in this buffer and then log them.
350 * 350 *
351 * XXX: It would be much better if we had just one transaction to 351 * XXX: It would be much better if we had just one transaction to
352 * log a whole cluster of inodes instead of all the indivdual 352 * log a whole cluster of inodes instead of all the individual
353 * transactions causing a lot of log traffic. 353 * transactions causing a lot of log traffic.
354 */ 354 */
355 xfs_biozero(fbuf, 0, ninodes << args.mp->m_sb.sb_inodelog); 355 xfs_biozero(fbuf, 0, ninodes << args.mp->m_sb.sb_inodelog);
@@ -943,7 +943,7 @@ nextag:
943 ASSERT((XFS_AGINO_TO_OFFSET(mp, rec.ir_startino) % 943 ASSERT((XFS_AGINO_TO_OFFSET(mp, rec.ir_startino) %
944 XFS_INODES_PER_CHUNK) == 0); 944 XFS_INODES_PER_CHUNK) == 0);
945 ino = XFS_AGINO_TO_INO(mp, agno, rec.ir_startino + offset); 945 ino = XFS_AGINO_TO_INO(mp, agno, rec.ir_startino + offset);
946 XFS_INOBT_CLR_FREE(&rec, offset); 946 rec.ir_free &= ~XFS_INOBT_MASK(offset);
947 rec.ir_freecount--; 947 rec.ir_freecount--;
948 if ((error = xfs_inobt_update(cur, rec.ir_startino, rec.ir_freecount, 948 if ((error = xfs_inobt_update(cur, rec.ir_startino, rec.ir_freecount,
949 rec.ir_free))) 949 rec.ir_free)))
@@ -1105,11 +1105,11 @@ xfs_difree(
1105 */ 1105 */
1106 off = agino - rec.ir_startino; 1106 off = agino - rec.ir_startino;
1107 ASSERT(off >= 0 && off < XFS_INODES_PER_CHUNK); 1107 ASSERT(off >= 0 && off < XFS_INODES_PER_CHUNK);
1108 ASSERT(!XFS_INOBT_IS_FREE(&rec, off)); 1108 ASSERT(!(rec.ir_free & XFS_INOBT_MASK(off)));
1109 /* 1109 /*
1110 * Mark the inode free & increment the count. 1110 * Mark the inode free & increment the count.
1111 */ 1111 */
1112 XFS_INOBT_SET_FREE(&rec, off); 1112 rec.ir_free |= XFS_INOBT_MASK(off);
1113 rec.ir_freecount++; 1113 rec.ir_freecount++;
1114 1114
1115 /* 1115 /*
diff --git a/fs/xfs/xfs_ialloc_btree.c b/fs/xfs/xfs_ialloc_btree.c
index 99f2408e8d8e..c282a9af5393 100644
--- a/fs/xfs/xfs_ialloc_btree.c
+++ b/fs/xfs/xfs_ialloc_btree.c
@@ -164,7 +164,7 @@ xfs_inobt_init_rec_from_cur(
164} 164}
165 165
166/* 166/*
167 * intial value of ptr for lookup 167 * initial value of ptr for lookup
168 */ 168 */
169STATIC void 169STATIC void
170xfs_inobt_init_ptr_from_cur( 170xfs_inobt_init_ptr_from_cur(
diff --git a/fs/xfs/xfs_ialloc_btree.h b/fs/xfs/xfs_ialloc_btree.h
index 5580e255ff06..f782ad0c4769 100644
--- a/fs/xfs/xfs_ialloc_btree.h
+++ b/fs/xfs/xfs_ialloc_btree.h
@@ -32,14 +32,14 @@ struct xfs_mount;
32#define XFS_IBT_MAGIC 0x49414254 /* 'IABT' */ 32#define XFS_IBT_MAGIC 0x49414254 /* 'IABT' */
33 33
34typedef __uint64_t xfs_inofree_t; 34typedef __uint64_t xfs_inofree_t;
35#define XFS_INODES_PER_CHUNK (NBBY * sizeof(xfs_inofree_t)) 35#define XFS_INODES_PER_CHUNK (NBBY * sizeof(xfs_inofree_t))
36#define XFS_INODES_PER_CHUNK_LOG (XFS_NBBYLOG + 3) 36#define XFS_INODES_PER_CHUNK_LOG (XFS_NBBYLOG + 3)
37#define XFS_INOBT_ALL_FREE ((xfs_inofree_t)-1) 37#define XFS_INOBT_ALL_FREE ((xfs_inofree_t)-1)
38#define XFS_INOBT_MASK(i) ((xfs_inofree_t)1 << (i))
38 39
39static inline xfs_inofree_t xfs_inobt_maskn(int i, int n) 40static inline xfs_inofree_t xfs_inobt_maskn(int i, int n)
40{ 41{
41 return (((n) >= XFS_INODES_PER_CHUNK ? \ 42 return ((n >= XFS_INODES_PER_CHUNK ? 0 : XFS_INOBT_MASK(n)) - 1) << i;
42 (xfs_inofree_t)0 : ((xfs_inofree_t)1 << (n))) - 1) << (i);
43} 43}
44 44
45/* 45/*
@@ -69,20 +69,6 @@ typedef struct xfs_inobt_key {
69typedef __be32 xfs_inobt_ptr_t; 69typedef __be32 xfs_inobt_ptr_t;
70 70
71/* 71/*
72 * Bit manipulations for ir_free.
73 */
74#define XFS_INOBT_MASK(i) ((xfs_inofree_t)1 << (i))
75#define XFS_INOBT_IS_FREE(rp,i) \
76 (((rp)->ir_free & XFS_INOBT_MASK(i)) != 0)
77#define XFS_INOBT_SET_FREE(rp,i) ((rp)->ir_free |= XFS_INOBT_MASK(i))
78#define XFS_INOBT_CLR_FREE(rp,i) ((rp)->ir_free &= ~XFS_INOBT_MASK(i))
79
80/*
81 * Maximum number of inode btree levels.
82 */
83#define XFS_IN_MAXLEVELS(mp) ((mp)->m_in_maxlevels)
84
85/*
86 * block numbers in the AG. 72 * block numbers in the AG.
87 */ 73 */
88#define XFS_IBT_BLOCK(mp) ((xfs_agblock_t)(XFS_CNT_BLOCK(mp) + 1)) 74#define XFS_IBT_BLOCK(mp) ((xfs_agblock_t)(XFS_CNT_BLOCK(mp) + 1))
diff --git a/fs/xfs/xfs_inode.h b/fs/xfs/xfs_inode.h
index 1f175fa34b22..f879c1bc4b96 100644
--- a/fs/xfs/xfs_inode.h
+++ b/fs/xfs/xfs_inode.h
@@ -122,7 +122,7 @@ typedef struct xfs_ictimestamp {
122 122
123/* 123/*
124 * NOTE: This structure must be kept identical to struct xfs_dinode 124 * NOTE: This structure must be kept identical to struct xfs_dinode
125 * in xfs_dinode.h except for the endianess annotations. 125 * in xfs_dinode.h except for the endianness annotations.
126 */ 126 */
127typedef struct xfs_icdinode { 127typedef struct xfs_icdinode {
128 __uint16_t di_magic; /* inode magic # = XFS_DINODE_MAGIC */ 128 __uint16_t di_magic; /* inode magic # = XFS_DINODE_MAGIC */
diff --git a/fs/xfs/xfs_inode_item.h b/fs/xfs/xfs_inode_item.h
index 9957d0602d54..a52ac125f055 100644
--- a/fs/xfs/xfs_inode_item.h
+++ b/fs/xfs/xfs_inode_item.h
@@ -40,7 +40,6 @@ typedef struct xfs_inode_log_format {
40 __int32_t ilf_boffset; /* off of inode in buffer */ 40 __int32_t ilf_boffset; /* off of inode in buffer */
41} xfs_inode_log_format_t; 41} xfs_inode_log_format_t;
42 42
43#ifndef HAVE_FORMAT32
44typedef struct xfs_inode_log_format_32 { 43typedef struct xfs_inode_log_format_32 {
45 __uint16_t ilf_type; /* inode log item type */ 44 __uint16_t ilf_type; /* inode log item type */
46 __uint16_t ilf_size; /* size of this item */ 45 __uint16_t ilf_size; /* size of this item */
@@ -56,7 +55,6 @@ typedef struct xfs_inode_log_format_32 {
56 __int32_t ilf_len; /* len of inode buffer */ 55 __int32_t ilf_len; /* len of inode buffer */
57 __int32_t ilf_boffset; /* off of inode in buffer */ 56 __int32_t ilf_boffset; /* off of inode in buffer */
58} __attribute__((packed)) xfs_inode_log_format_32_t; 57} __attribute__((packed)) xfs_inode_log_format_32_t;
59#endif
60 58
61typedef struct xfs_inode_log_format_64 { 59typedef struct xfs_inode_log_format_64 {
62 __uint16_t ilf_type; /* inode log item type */ 60 __uint16_t ilf_type; /* inode log item type */
diff --git a/fs/xfs/xfs_iomap.h b/fs/xfs/xfs_iomap.h
index ee1a0c134cc2..a1cc1322fc0f 100644
--- a/fs/xfs/xfs_iomap.h
+++ b/fs/xfs/xfs_iomap.h
@@ -63,7 +63,7 @@ typedef enum {
63 */ 63 */
64 64
65typedef struct xfs_iomap { 65typedef struct xfs_iomap {
66 xfs_daddr_t iomap_bn; /* first 512b blk of mapping */ 66 xfs_daddr_t iomap_bn; /* first 512B blk of mapping */
67 xfs_buftarg_t *iomap_target; 67 xfs_buftarg_t *iomap_target;
68 xfs_off_t iomap_offset; /* offset of mapping, bytes */ 68 xfs_off_t iomap_offset; /* offset of mapping, bytes */
69 xfs_off_t iomap_bsize; /* size of mapping, bytes */ 69 xfs_off_t iomap_bsize; /* size of mapping, bytes */
diff --git a/fs/xfs/xfs_itable.c b/fs/xfs/xfs_itable.c
index cf98a805ec90..aeb2d2221c7d 100644
--- a/fs/xfs/xfs_itable.c
+++ b/fs/xfs/xfs_itable.c
@@ -83,7 +83,12 @@ xfs_bulkstat_one_iget(
83 buf->bs_uid = dic->di_uid; 83 buf->bs_uid = dic->di_uid;
84 buf->bs_gid = dic->di_gid; 84 buf->bs_gid = dic->di_gid;
85 buf->bs_size = dic->di_size; 85 buf->bs_size = dic->di_size;
86 vn_atime_to_bstime(VFS_I(ip), &buf->bs_atime); 86 /*
87 * We are reading the atime from the Linux inode because the
88 * dinode might not be uptodate.
89 */
90 buf->bs_atime.tv_sec = VFS_I(ip)->i_atime.tv_sec;
91 buf->bs_atime.tv_nsec = VFS_I(ip)->i_atime.tv_nsec;
87 buf->bs_mtime.tv_sec = dic->di_mtime.t_sec; 92 buf->bs_mtime.tv_sec = dic->di_mtime.t_sec;
88 buf->bs_mtime.tv_nsec = dic->di_mtime.t_nsec; 93 buf->bs_mtime.tv_nsec = dic->di_mtime.t_nsec;
89 buf->bs_ctime.tv_sec = dic->di_ctime.t_sec; 94 buf->bs_ctime.tv_sec = dic->di_ctime.t_sec;
@@ -579,7 +584,7 @@ xfs_bulkstat(
579 * first inode of the cluster. 584 * first inode of the cluster.
580 * 585 *
581 * Careful with clustidx. There can be 586 * Careful with clustidx. There can be
582 * multple clusters per chunk, a single 587 * multiple clusters per chunk, a single
583 * cluster per chunk or a cluster that has 588 * cluster per chunk or a cluster that has
584 * inodes represented from several different 589 * inodes represented from several different
585 * chunks (if blocksize is large). 590 * chunks (if blocksize is large).
diff --git a/fs/xfs/xfs_log.c b/fs/xfs/xfs_log.c
index f4726f702a9e..f76c6d7cea21 100644
--- a/fs/xfs/xfs_log.c
+++ b/fs/xfs/xfs_log.c
@@ -574,7 +574,7 @@ xfs_log_mount(
574 error = xfs_trans_ail_init(mp); 574 error = xfs_trans_ail_init(mp);
575 if (error) { 575 if (error) {
576 cmn_err(CE_WARN, "XFS: AIL initialisation failed: error %d", error); 576 cmn_err(CE_WARN, "XFS: AIL initialisation failed: error %d", error);
577 goto error; 577 goto out_free_log;
578 } 578 }
579 mp->m_log->l_ailp = mp->m_ail; 579 mp->m_log->l_ailp = mp->m_ail;
580 580
@@ -594,20 +594,22 @@ xfs_log_mount(
594 mp->m_flags |= XFS_MOUNT_RDONLY; 594 mp->m_flags |= XFS_MOUNT_RDONLY;
595 if (error) { 595 if (error) {
596 cmn_err(CE_WARN, "XFS: log mount/recovery failed: error %d", error); 596 cmn_err(CE_WARN, "XFS: log mount/recovery failed: error %d", error);
597 goto error; 597 goto out_destroy_ail;
598 } 598 }
599 } 599 }
600 600
601 /* Normal transactions can now occur */ 601 /* Normal transactions can now occur */
602 mp->m_log->l_flags &= ~XLOG_ACTIVE_RECOVERY; 602 mp->m_log->l_flags &= ~XLOG_ACTIVE_RECOVERY;
603 603
604 /* End mounting message in xfs_log_mount_finish */
605 return 0; 604 return 0;
606error: 605
607 xfs_log_unmount_dealloc(mp); 606out_destroy_ail:
607 xfs_trans_ail_destroy(mp);
608out_free_log:
609 xlog_dealloc_log(mp->m_log);
608out: 610out:
609 return error; 611 return error;
610} /* xfs_log_mount */ 612}
611 613
612/* 614/*
613 * Finish the recovery of the file system. This is separate from 615 * Finish the recovery of the file system. This is separate from
@@ -633,19 +635,6 @@ xfs_log_mount_finish(xfs_mount_t *mp)
633} 635}
634 636
635/* 637/*
636 * Unmount processing for the log.
637 */
638int
639xfs_log_unmount(xfs_mount_t *mp)
640{
641 int error;
642
643 error = xfs_log_unmount_write(mp);
644 xfs_log_unmount_dealloc(mp);
645 return error;
646}
647
648/*
649 * Final log writes as part of unmount. 638 * Final log writes as part of unmount.
650 * 639 *
651 * Mark the filesystem clean as unmount happens. Note that during relocation 640 * Mark the filesystem clean as unmount happens. Note that during relocation
@@ -795,7 +784,7 @@ xfs_log_unmount_write(xfs_mount_t *mp)
795 * and deallocate the log as the aild references the log. 784 * and deallocate the log as the aild references the log.
796 */ 785 */
797void 786void
798xfs_log_unmount_dealloc(xfs_mount_t *mp) 787xfs_log_unmount(xfs_mount_t *mp)
799{ 788{
800 xfs_trans_ail_destroy(mp); 789 xfs_trans_ail_destroy(mp);
801 xlog_dealloc_log(mp->m_log); 790 xlog_dealloc_log(mp->m_log);
@@ -1109,7 +1098,7 @@ xlog_bdstrat_cb(struct xfs_buf *bp)
1109/* 1098/*
1110 * Return size of each in-core log record buffer. 1099 * Return size of each in-core log record buffer.
1111 * 1100 *
1112 * All machines get 8 x 32KB buffers by default, unless tuned otherwise. 1101 * All machines get 8 x 32kB buffers by default, unless tuned otherwise.
1113 * 1102 *
1114 * If the filesystem blocksize is too large, we may need to choose a 1103 * If the filesystem blocksize is too large, we may need to choose a
1115 * larger size since the directory code currently logs entire blocks. 1104 * larger size since the directory code currently logs entire blocks.
@@ -1139,8 +1128,8 @@ xlog_get_iclog_buffer_size(xfs_mount_t *mp,
1139 } 1128 }
1140 1129
1141 if (xfs_sb_version_haslogv2(&mp->m_sb)) { 1130 if (xfs_sb_version_haslogv2(&mp->m_sb)) {
1142 /* # headers = size / 32K 1131 /* # headers = size / 32k
1143 * one header holds cycles from 32K of data 1132 * one header holds cycles from 32k of data
1144 */ 1133 */
1145 1134
1146 xhdrs = mp->m_logbsize / XLOG_HEADER_CYCLE_SIZE; 1135 xhdrs = mp->m_logbsize / XLOG_HEADER_CYCLE_SIZE;
@@ -1156,7 +1145,7 @@ xlog_get_iclog_buffer_size(xfs_mount_t *mp,
1156 goto done; 1145 goto done;
1157 } 1146 }
1158 1147
1159 /* All machines use 32KB buffers by default. */ 1148 /* All machines use 32kB buffers by default. */
1160 log->l_iclog_size = XLOG_BIG_RECORD_BSIZE; 1149 log->l_iclog_size = XLOG_BIG_RECORD_BSIZE;
1161 log->l_iclog_size_log = XLOG_BIG_RECORD_BSHIFT; 1150 log->l_iclog_size_log = XLOG_BIG_RECORD_BSHIFT;
1162 1151
@@ -1164,32 +1153,8 @@ xlog_get_iclog_buffer_size(xfs_mount_t *mp,
1164 log->l_iclog_hsize = BBSIZE; 1153 log->l_iclog_hsize = BBSIZE;
1165 log->l_iclog_heads = 1; 1154 log->l_iclog_heads = 1;
1166 1155
1167 /* 1156done:
1168 * For 16KB, we use 3 32KB buffers. For 32KB block sizes, we use 1157 /* are we being asked to make the sizes selected above visible? */
1169 * 4 32KB buffers. For 64KB block sizes, we use 8 32KB buffers.
1170 */
1171 if (mp->m_sb.sb_blocksize >= 16*1024) {
1172 log->l_iclog_size = XLOG_BIG_RECORD_BSIZE;
1173 log->l_iclog_size_log = XLOG_BIG_RECORD_BSHIFT;
1174 if (mp->m_logbufs <= 0) {
1175 switch (mp->m_sb.sb_blocksize) {
1176 case 16*1024: /* 16 KB */
1177 log->l_iclog_bufs = 3;
1178 break;
1179 case 32*1024: /* 32 KB */
1180 log->l_iclog_bufs = 4;
1181 break;
1182 case 64*1024: /* 64 KB */
1183 log->l_iclog_bufs = 8;
1184 break;
1185 default:
1186 xlog_panic("XFS: Invalid blocksize");
1187 break;
1188 }
1189 }
1190 }
1191
1192done: /* are we being asked to make the sizes selected above visible? */
1193 if (mp->m_logbufs == 0) 1158 if (mp->m_logbufs == 0)
1194 mp->m_logbufs = log->l_iclog_bufs; 1159 mp->m_logbufs = log->l_iclog_bufs;
1195 if (mp->m_logbsize == 0) 1160 if (mp->m_logbsize == 0)
@@ -3214,7 +3179,7 @@ xlog_state_want_sync(xlog_t *log, xlog_in_core_t *iclog)
3214 */ 3179 */
3215 3180
3216/* 3181/*
3217 * Free a used ticket when it's refcount falls to zero. 3182 * Free a used ticket when its refcount falls to zero.
3218 */ 3183 */
3219void 3184void
3220xfs_log_ticket_put( 3185xfs_log_ticket_put(
diff --git a/fs/xfs/xfs_log.h b/fs/xfs/xfs_log.h
index 8a3e84e900a3..d0c9baa50b1a 100644
--- a/fs/xfs/xfs_log.h
+++ b/fs/xfs/xfs_log.h
@@ -170,9 +170,8 @@ int xfs_log_write(struct xfs_mount *mp,
170 int nentries, 170 int nentries,
171 xfs_log_ticket_t ticket, 171 xfs_log_ticket_t ticket,
172 xfs_lsn_t *start_lsn); 172 xfs_lsn_t *start_lsn);
173int xfs_log_unmount(struct xfs_mount *mp);
174int xfs_log_unmount_write(struct xfs_mount *mp); 173int xfs_log_unmount_write(struct xfs_mount *mp);
175void xfs_log_unmount_dealloc(struct xfs_mount *mp); 174void xfs_log_unmount(struct xfs_mount *mp);
176int xfs_log_force_umount(struct xfs_mount *mp, int logerror); 175int xfs_log_force_umount(struct xfs_mount *mp, int logerror);
177int xfs_log_need_covered(struct xfs_mount *mp); 176int xfs_log_need_covered(struct xfs_mount *mp);
178 177
diff --git a/fs/xfs/xfs_log_priv.h b/fs/xfs/xfs_log_priv.h
index 654167be0efb..bcad5f4c1fd1 100644
--- a/fs/xfs/xfs_log_priv.h
+++ b/fs/xfs/xfs_log_priv.h
@@ -359,7 +359,7 @@ typedef struct xlog_in_core {
359 int ic_size; 359 int ic_size;
360 int ic_offset; 360 int ic_offset;
361 int ic_bwritecnt; 361 int ic_bwritecnt;
362 ushort_t ic_state; 362 unsigned short ic_state;
363 char *ic_datap; /* pointer to iclog data */ 363 char *ic_datap; /* pointer to iclog data */
364#ifdef XFS_LOG_TRACE 364#ifdef XFS_LOG_TRACE
365 struct ktrace *ic_trace; 365 struct ktrace *ic_trace;
@@ -455,7 +455,6 @@ extern void xlog_recover_process_iunlinks(xlog_t *log);
455 455
456extern struct xfs_buf *xlog_get_bp(xlog_t *, int); 456extern struct xfs_buf *xlog_get_bp(xlog_t *, int);
457extern void xlog_put_bp(struct xfs_buf *); 457extern void xlog_put_bp(struct xfs_buf *);
458extern int xlog_bread(xlog_t *, xfs_daddr_t, int, struct xfs_buf *);
459 458
460extern kmem_zone_t *xfs_log_ticket_zone; 459extern kmem_zone_t *xfs_log_ticket_zone;
461 460
diff --git a/fs/xfs/xfs_log_recover.c b/fs/xfs/xfs_log_recover.c
index 61af610d79b3..7ba450116d4f 100644
--- a/fs/xfs/xfs_log_recover.c
+++ b/fs/xfs/xfs_log_recover.c
@@ -94,12 +94,30 @@ xlog_put_bp(
94 xfs_buf_free(bp); 94 xfs_buf_free(bp);
95} 95}
96 96
97STATIC xfs_caddr_t
98xlog_align(
99 xlog_t *log,
100 xfs_daddr_t blk_no,
101 int nbblks,
102 xfs_buf_t *bp)
103{
104 xfs_caddr_t ptr;
105
106 if (!log->l_sectbb_log)
107 return XFS_BUF_PTR(bp);
108
109 ptr = XFS_BUF_PTR(bp) + BBTOB((int)blk_no & log->l_sectbb_mask);
110 ASSERT(XFS_BUF_SIZE(bp) >=
111 BBTOB(nbblks + (blk_no & log->l_sectbb_mask)));
112 return ptr;
113}
114
97 115
98/* 116/*
99 * nbblks should be uint, but oh well. Just want to catch that 32-bit length. 117 * nbblks should be uint, but oh well. Just want to catch that 32-bit length.
100 */ 118 */
101int 119STATIC int
102xlog_bread( 120xlog_bread_noalign(
103 xlog_t *log, 121 xlog_t *log,
104 xfs_daddr_t blk_no, 122 xfs_daddr_t blk_no,
105 int nbblks, 123 int nbblks,
@@ -137,6 +155,24 @@ xlog_bread(
137 return error; 155 return error;
138} 156}
139 157
158STATIC int
159xlog_bread(
160 xlog_t *log,
161 xfs_daddr_t blk_no,
162 int nbblks,
163 xfs_buf_t *bp,
164 xfs_caddr_t *offset)
165{
166 int error;
167
168 error = xlog_bread_noalign(log, blk_no, nbblks, bp);
169 if (error)
170 return error;
171
172 *offset = xlog_align(log, blk_no, nbblks, bp);
173 return 0;
174}
175
140/* 176/*
141 * Write out the buffer at the given block for the given number of blocks. 177 * Write out the buffer at the given block for the given number of blocks.
142 * The buffer is kept locked across the write and is returned locked. 178 * The buffer is kept locked across the write and is returned locked.
@@ -180,24 +216,6 @@ xlog_bwrite(
180 return error; 216 return error;
181} 217}
182 218
183STATIC xfs_caddr_t
184xlog_align(
185 xlog_t *log,
186 xfs_daddr_t blk_no,
187 int nbblks,
188 xfs_buf_t *bp)
189{
190 xfs_caddr_t ptr;
191
192 if (!log->l_sectbb_log)
193 return XFS_BUF_PTR(bp);
194
195 ptr = XFS_BUF_PTR(bp) + BBTOB((int)blk_no & log->l_sectbb_mask);
196 ASSERT(XFS_BUF_SIZE(bp) >=
197 BBTOB(nbblks + (blk_no & log->l_sectbb_mask)));
198 return ptr;
199}
200
201#ifdef DEBUG 219#ifdef DEBUG
202/* 220/*
203 * dump debug superblock and log record information 221 * dump debug superblock and log record information
@@ -211,11 +229,11 @@ xlog_header_check_dump(
211 229
212 cmn_err(CE_DEBUG, "%s: SB : uuid = ", __func__); 230 cmn_err(CE_DEBUG, "%s: SB : uuid = ", __func__);
213 for (b = 0; b < 16; b++) 231 for (b = 0; b < 16; b++)
214 cmn_err(CE_DEBUG, "%02x", ((uchar_t *)&mp->m_sb.sb_uuid)[b]); 232 cmn_err(CE_DEBUG, "%02x", ((__uint8_t *)&mp->m_sb.sb_uuid)[b]);
215 cmn_err(CE_DEBUG, ", fmt = %d\n", XLOG_FMT); 233 cmn_err(CE_DEBUG, ", fmt = %d\n", XLOG_FMT);
216 cmn_err(CE_DEBUG, " log : uuid = "); 234 cmn_err(CE_DEBUG, " log : uuid = ");
217 for (b = 0; b < 16; b++) 235 for (b = 0; b < 16; b++)
218 cmn_err(CE_DEBUG, "%02x",((uchar_t *)&head->h_fs_uuid)[b]); 236 cmn_err(CE_DEBUG, "%02x", ((__uint8_t *)&head->h_fs_uuid)[b]);
219 cmn_err(CE_DEBUG, ", fmt = %d\n", be32_to_cpu(head->h_fmt)); 237 cmn_err(CE_DEBUG, ", fmt = %d\n", be32_to_cpu(head->h_fmt));
220} 238}
221#else 239#else
@@ -321,9 +339,9 @@ xlog_find_cycle_start(
321 339
322 mid_blk = BLK_AVG(first_blk, *last_blk); 340 mid_blk = BLK_AVG(first_blk, *last_blk);
323 while (mid_blk != first_blk && mid_blk != *last_blk) { 341 while (mid_blk != first_blk && mid_blk != *last_blk) {
324 if ((error = xlog_bread(log, mid_blk, 1, bp))) 342 error = xlog_bread(log, mid_blk, 1, bp, &offset);
343 if (error)
325 return error; 344 return error;
326 offset = xlog_align(log, mid_blk, 1, bp);
327 mid_cycle = xlog_get_cycle(offset); 345 mid_cycle = xlog_get_cycle(offset);
328 if (mid_cycle == cycle) { 346 if (mid_cycle == cycle) {
329 *last_blk = mid_blk; 347 *last_blk = mid_blk;
@@ -379,10 +397,10 @@ xlog_find_verify_cycle(
379 397
380 bcount = min(bufblks, (start_blk + nbblks - i)); 398 bcount = min(bufblks, (start_blk + nbblks - i));
381 399
382 if ((error = xlog_bread(log, i, bcount, bp))) 400 error = xlog_bread(log, i, bcount, bp, &buf);
401 if (error)
383 goto out; 402 goto out;
384 403
385 buf = xlog_align(log, i, bcount, bp);
386 for (j = 0; j < bcount; j++) { 404 for (j = 0; j < bcount; j++) {
387 cycle = xlog_get_cycle(buf); 405 cycle = xlog_get_cycle(buf);
388 if (cycle == stop_on_cycle_no) { 406 if (cycle == stop_on_cycle_no) {
@@ -436,9 +454,9 @@ xlog_find_verify_log_record(
436 return ENOMEM; 454 return ENOMEM;
437 smallmem = 1; 455 smallmem = 1;
438 } else { 456 } else {
439 if ((error = xlog_bread(log, start_blk, num_blks, bp))) 457 error = xlog_bread(log, start_blk, num_blks, bp, &offset);
458 if (error)
440 goto out; 459 goto out;
441 offset = xlog_align(log, start_blk, num_blks, bp);
442 offset += ((num_blks - 1) << BBSHIFT); 460 offset += ((num_blks - 1) << BBSHIFT);
443 } 461 }
444 462
@@ -453,9 +471,9 @@ xlog_find_verify_log_record(
453 } 471 }
454 472
455 if (smallmem) { 473 if (smallmem) {
456 if ((error = xlog_bread(log, i, 1, bp))) 474 error = xlog_bread(log, i, 1, bp, &offset);
475 if (error)
457 goto out; 476 goto out;
458 offset = xlog_align(log, i, 1, bp);
459 } 477 }
460 478
461 head = (xlog_rec_header_t *)offset; 479 head = (xlog_rec_header_t *)offset;
@@ -559,15 +577,18 @@ xlog_find_head(
559 bp = xlog_get_bp(log, 1); 577 bp = xlog_get_bp(log, 1);
560 if (!bp) 578 if (!bp)
561 return ENOMEM; 579 return ENOMEM;
562 if ((error = xlog_bread(log, 0, 1, bp))) 580
581 error = xlog_bread(log, 0, 1, bp, &offset);
582 if (error)
563 goto bp_err; 583 goto bp_err;
564 offset = xlog_align(log, 0, 1, bp); 584
565 first_half_cycle = xlog_get_cycle(offset); 585 first_half_cycle = xlog_get_cycle(offset);
566 586
567 last_blk = head_blk = log_bbnum - 1; /* get cycle # of last block */ 587 last_blk = head_blk = log_bbnum - 1; /* get cycle # of last block */
568 if ((error = xlog_bread(log, last_blk, 1, bp))) 588 error = xlog_bread(log, last_blk, 1, bp, &offset);
589 if (error)
569 goto bp_err; 590 goto bp_err;
570 offset = xlog_align(log, last_blk, 1, bp); 591
571 last_half_cycle = xlog_get_cycle(offset); 592 last_half_cycle = xlog_get_cycle(offset);
572 ASSERT(last_half_cycle != 0); 593 ASSERT(last_half_cycle != 0);
573 594
@@ -817,9 +838,10 @@ xlog_find_tail(
817 if (!bp) 838 if (!bp)
818 return ENOMEM; 839 return ENOMEM;
819 if (*head_blk == 0) { /* special case */ 840 if (*head_blk == 0) { /* special case */
820 if ((error = xlog_bread(log, 0, 1, bp))) 841 error = xlog_bread(log, 0, 1, bp, &offset);
842 if (error)
821 goto bread_err; 843 goto bread_err;
822 offset = xlog_align(log, 0, 1, bp); 844
823 if (xlog_get_cycle(offset) == 0) { 845 if (xlog_get_cycle(offset) == 0) {
824 *tail_blk = 0; 846 *tail_blk = 0;
825 /* leave all other log inited values alone */ 847 /* leave all other log inited values alone */
@@ -832,9 +854,10 @@ xlog_find_tail(
832 */ 854 */
833 ASSERT(*head_blk < INT_MAX); 855 ASSERT(*head_blk < INT_MAX);
834 for (i = (int)(*head_blk) - 1; i >= 0; i--) { 856 for (i = (int)(*head_blk) - 1; i >= 0; i--) {
835 if ((error = xlog_bread(log, i, 1, bp))) 857 error = xlog_bread(log, i, 1, bp, &offset);
858 if (error)
836 goto bread_err; 859 goto bread_err;
837 offset = xlog_align(log, i, 1, bp); 860
838 if (XLOG_HEADER_MAGIC_NUM == be32_to_cpu(*(__be32 *)offset)) { 861 if (XLOG_HEADER_MAGIC_NUM == be32_to_cpu(*(__be32 *)offset)) {
839 found = 1; 862 found = 1;
840 break; 863 break;
@@ -848,9 +871,10 @@ xlog_find_tail(
848 */ 871 */
849 if (!found) { 872 if (!found) {
850 for (i = log->l_logBBsize - 1; i >= (int)(*head_blk); i--) { 873 for (i = log->l_logBBsize - 1; i >= (int)(*head_blk); i--) {
851 if ((error = xlog_bread(log, i, 1, bp))) 874 error = xlog_bread(log, i, 1, bp, &offset);
875 if (error)
852 goto bread_err; 876 goto bread_err;
853 offset = xlog_align(log, i, 1, bp); 877
854 if (XLOG_HEADER_MAGIC_NUM == 878 if (XLOG_HEADER_MAGIC_NUM ==
855 be32_to_cpu(*(__be32 *)offset)) { 879 be32_to_cpu(*(__be32 *)offset)) {
856 found = 2; 880 found = 2;
@@ -922,10 +946,10 @@ xlog_find_tail(
922 if (*head_blk == after_umount_blk && 946 if (*head_blk == after_umount_blk &&
923 be32_to_cpu(rhead->h_num_logops) == 1) { 947 be32_to_cpu(rhead->h_num_logops) == 1) {
924 umount_data_blk = (i + hblks) % log->l_logBBsize; 948 umount_data_blk = (i + hblks) % log->l_logBBsize;
925 if ((error = xlog_bread(log, umount_data_blk, 1, bp))) { 949 error = xlog_bread(log, umount_data_blk, 1, bp, &offset);
950 if (error)
926 goto bread_err; 951 goto bread_err;
927 } 952
928 offset = xlog_align(log, umount_data_blk, 1, bp);
929 op_head = (xlog_op_header_t *)offset; 953 op_head = (xlog_op_header_t *)offset;
930 if (op_head->oh_flags & XLOG_UNMOUNT_TRANS) { 954 if (op_head->oh_flags & XLOG_UNMOUNT_TRANS) {
931 /* 955 /*
@@ -1017,9 +1041,10 @@ xlog_find_zeroed(
1017 bp = xlog_get_bp(log, 1); 1041 bp = xlog_get_bp(log, 1);
1018 if (!bp) 1042 if (!bp)
1019 return ENOMEM; 1043 return ENOMEM;
1020 if ((error = xlog_bread(log, 0, 1, bp))) 1044 error = xlog_bread(log, 0, 1, bp, &offset);
1045 if (error)
1021 goto bp_err; 1046 goto bp_err;
1022 offset = xlog_align(log, 0, 1, bp); 1047
1023 first_cycle = xlog_get_cycle(offset); 1048 first_cycle = xlog_get_cycle(offset);
1024 if (first_cycle == 0) { /* completely zeroed log */ 1049 if (first_cycle == 0) { /* completely zeroed log */
1025 *blk_no = 0; 1050 *blk_no = 0;
@@ -1028,9 +1053,10 @@ xlog_find_zeroed(
1028 } 1053 }
1029 1054
1030 /* check partially zeroed log */ 1055 /* check partially zeroed log */
1031 if ((error = xlog_bread(log, log_bbnum-1, 1, bp))) 1056 error = xlog_bread(log, log_bbnum-1, 1, bp, &offset);
1057 if (error)
1032 goto bp_err; 1058 goto bp_err;
1033 offset = xlog_align(log, log_bbnum-1, 1, bp); 1059
1034 last_cycle = xlog_get_cycle(offset); 1060 last_cycle = xlog_get_cycle(offset);
1035 if (last_cycle != 0) { /* log completely written to */ 1061 if (last_cycle != 0) { /* log completely written to */
1036 xlog_put_bp(bp); 1062 xlog_put_bp(bp);
@@ -1152,10 +1178,10 @@ xlog_write_log_records(
1152 */ 1178 */
1153 balign = XLOG_SECTOR_ROUNDDOWN_BLKNO(log, start_block); 1179 balign = XLOG_SECTOR_ROUNDDOWN_BLKNO(log, start_block);
1154 if (balign != start_block) { 1180 if (balign != start_block) {
1155 if ((error = xlog_bread(log, start_block, 1, bp))) { 1181 error = xlog_bread_noalign(log, start_block, 1, bp);
1156 xlog_put_bp(bp); 1182 if (error)
1157 return error; 1183 goto out_put_bp;
1158 } 1184
1159 j = start_block - balign; 1185 j = start_block - balign;
1160 } 1186 }
1161 1187
@@ -1175,10 +1201,14 @@ xlog_write_log_records(
1175 balign = BBTOB(ealign - start_block); 1201 balign = BBTOB(ealign - start_block);
1176 error = XFS_BUF_SET_PTR(bp, offset + balign, 1202 error = XFS_BUF_SET_PTR(bp, offset + balign,
1177 BBTOB(sectbb)); 1203 BBTOB(sectbb));
1178 if (!error) 1204 if (error)
1179 error = xlog_bread(log, ealign, sectbb, bp); 1205 break;
1180 if (!error) 1206
1181 error = XFS_BUF_SET_PTR(bp, offset, bufblks); 1207 error = xlog_bread_noalign(log, ealign, sectbb, bp);
1208 if (error)
1209 break;
1210
1211 error = XFS_BUF_SET_PTR(bp, offset, bufblks);
1182 if (error) 1212 if (error)
1183 break; 1213 break;
1184 } 1214 }
@@ -1195,6 +1225,8 @@ xlog_write_log_records(
1195 start_block += endcount; 1225 start_block += endcount;
1196 j = 0; 1226 j = 0;
1197 } 1227 }
1228
1229 out_put_bp:
1198 xlog_put_bp(bp); 1230 xlog_put_bp(bp);
1199 return error; 1231 return error;
1200} 1232}
@@ -2511,16 +2543,10 @@ xlog_recover_do_inode_trans(
2511 } 2543 }
2512 2544
2513write_inode_buffer: 2545write_inode_buffer:
2514 if (ITEM_TYPE(item) == XFS_LI_INODE) { 2546 ASSERT(bp->b_mount == NULL || bp->b_mount == mp);
2515 ASSERT(bp->b_mount == NULL || bp->b_mount == mp); 2547 bp->b_mount = mp;
2516 bp->b_mount = mp; 2548 XFS_BUF_SET_IODONE_FUNC(bp, xlog_recover_iodone);
2517 XFS_BUF_SET_IODONE_FUNC(bp, xlog_recover_iodone); 2549 xfs_bdwrite(mp, bp);
2518 xfs_bdwrite(mp, bp);
2519 } else {
2520 XFS_BUF_STALE(bp);
2521 error = xfs_bwrite(mp, bp);
2522 }
2523
2524error: 2550error:
2525 if (need_free) 2551 if (need_free)
2526 kmem_free(in_f); 2552 kmem_free(in_f);
@@ -2769,51 +2795,48 @@ xlog_recover_do_trans(
2769 int error = 0; 2795 int error = 0;
2770 xlog_recover_item_t *item, *first_item; 2796 xlog_recover_item_t *item, *first_item;
2771 2797
2772 if ((error = xlog_recover_reorder_trans(trans))) 2798 error = xlog_recover_reorder_trans(trans);
2799 if (error)
2773 return error; 2800 return error;
2801
2774 first_item = item = trans->r_itemq; 2802 first_item = item = trans->r_itemq;
2775 do { 2803 do {
2776 /* 2804 switch (ITEM_TYPE(item)) {
2777 * we don't need to worry about the block number being 2805 case XFS_LI_BUF:
2778 * truncated in > 1 TB buffers because in user-land, 2806 error = xlog_recover_do_buffer_trans(log, item, pass);
2779 * we're now n32 or 64-bit so xfs_daddr_t is 64-bits so 2807 break;
2780 * the blknos will get through the user-mode buffer 2808 case XFS_LI_INODE:
2781 * cache properly. The only bad case is o32 kernels 2809 error = xlog_recover_do_inode_trans(log, item, pass);
2782 * where xfs_daddr_t is 32-bits but mount will warn us 2810 break;
2783 * off a > 1 TB filesystem before we get here. 2811 case XFS_LI_EFI:
2784 */ 2812 error = xlog_recover_do_efi_trans(log, item,
2785 if ((ITEM_TYPE(item) == XFS_LI_BUF)) { 2813 trans->r_lsn, pass);
2786 if ((error = xlog_recover_do_buffer_trans(log, item, 2814 break;
2787 pass))) 2815 case XFS_LI_EFD:
2788 break;
2789 } else if ((ITEM_TYPE(item) == XFS_LI_INODE)) {
2790 if ((error = xlog_recover_do_inode_trans(log, item,
2791 pass)))
2792 break;
2793 } else if (ITEM_TYPE(item) == XFS_LI_EFI) {
2794 if ((error = xlog_recover_do_efi_trans(log, item, trans->r_lsn,
2795 pass)))
2796 break;
2797 } else if (ITEM_TYPE(item) == XFS_LI_EFD) {
2798 xlog_recover_do_efd_trans(log, item, pass); 2816 xlog_recover_do_efd_trans(log, item, pass);
2799 } else if (ITEM_TYPE(item) == XFS_LI_DQUOT) { 2817 error = 0;
2800 if ((error = xlog_recover_do_dquot_trans(log, item, 2818 break;
2801 pass))) 2819 case XFS_LI_DQUOT:
2802 break; 2820 error = xlog_recover_do_dquot_trans(log, item, pass);
2803 } else if ((ITEM_TYPE(item) == XFS_LI_QUOTAOFF)) { 2821 break;
2804 if ((error = xlog_recover_do_quotaoff_trans(log, item, 2822 case XFS_LI_QUOTAOFF:
2805 pass))) 2823 error = xlog_recover_do_quotaoff_trans(log, item,
2806 break; 2824 pass);
2807 } else { 2825 break;
2808 xlog_warn("XFS: xlog_recover_do_trans"); 2826 default:
2827 xlog_warn(
2828 "XFS: invalid item type (%d) xlog_recover_do_trans", ITEM_TYPE(item));
2809 ASSERT(0); 2829 ASSERT(0);
2810 error = XFS_ERROR(EIO); 2830 error = XFS_ERROR(EIO);
2811 break; 2831 break;
2812 } 2832 }
2833
2834 if (error)
2835 return error;
2813 item = item->ri_next; 2836 item = item->ri_next;
2814 } while (first_item != item); 2837 } while (first_item != item);
2815 2838
2816 return error; 2839 return 0;
2817} 2840}
2818 2841
2819/* 2842/*
@@ -3490,9 +3513,11 @@ xlog_do_recovery_pass(
3490 hbp = xlog_get_bp(log, 1); 3513 hbp = xlog_get_bp(log, 1);
3491 if (!hbp) 3514 if (!hbp)
3492 return ENOMEM; 3515 return ENOMEM;
3493 if ((error = xlog_bread(log, tail_blk, 1, hbp))) 3516
3517 error = xlog_bread(log, tail_blk, 1, hbp, &offset);
3518 if (error)
3494 goto bread_err1; 3519 goto bread_err1;
3495 offset = xlog_align(log, tail_blk, 1, hbp); 3520
3496 rhead = (xlog_rec_header_t *)offset; 3521 rhead = (xlog_rec_header_t *)offset;
3497 error = xlog_valid_rec_header(log, rhead, tail_blk); 3522 error = xlog_valid_rec_header(log, rhead, tail_blk);
3498 if (error) 3523 if (error)
@@ -3526,9 +3551,10 @@ xlog_do_recovery_pass(
3526 memset(rhash, 0, sizeof(rhash)); 3551 memset(rhash, 0, sizeof(rhash));
3527 if (tail_blk <= head_blk) { 3552 if (tail_blk <= head_blk) {
3528 for (blk_no = tail_blk; blk_no < head_blk; ) { 3553 for (blk_no = tail_blk; blk_no < head_blk; ) {
3529 if ((error = xlog_bread(log, blk_no, hblks, hbp))) 3554 error = xlog_bread(log, blk_no, hblks, hbp, &offset);
3555 if (error)
3530 goto bread_err2; 3556 goto bread_err2;
3531 offset = xlog_align(log, blk_no, hblks, hbp); 3557
3532 rhead = (xlog_rec_header_t *)offset; 3558 rhead = (xlog_rec_header_t *)offset;
3533 error = xlog_valid_rec_header(log, rhead, blk_no); 3559 error = xlog_valid_rec_header(log, rhead, blk_no);
3534 if (error) 3560 if (error)
@@ -3536,10 +3562,11 @@ xlog_do_recovery_pass(
3536 3562
3537 /* blocks in data section */ 3563 /* blocks in data section */
3538 bblks = (int)BTOBB(be32_to_cpu(rhead->h_len)); 3564 bblks = (int)BTOBB(be32_to_cpu(rhead->h_len));
3539 error = xlog_bread(log, blk_no + hblks, bblks, dbp); 3565 error = xlog_bread(log, blk_no + hblks, bblks, dbp,
3566 &offset);
3540 if (error) 3567 if (error)
3541 goto bread_err2; 3568 goto bread_err2;
3542 offset = xlog_align(log, blk_no + hblks, bblks, dbp); 3569
3543 xlog_unpack_data(rhead, offset, log); 3570 xlog_unpack_data(rhead, offset, log);
3544 if ((error = xlog_recover_process_data(log, 3571 if ((error = xlog_recover_process_data(log,
3545 rhash, rhead, offset, pass))) 3572 rhash, rhead, offset, pass)))
@@ -3562,10 +3589,10 @@ xlog_do_recovery_pass(
3562 wrapped_hblks = 0; 3589 wrapped_hblks = 0;
3563 if (blk_no + hblks <= log->l_logBBsize) { 3590 if (blk_no + hblks <= log->l_logBBsize) {
3564 /* Read header in one read */ 3591 /* Read header in one read */
3565 error = xlog_bread(log, blk_no, hblks, hbp); 3592 error = xlog_bread(log, blk_no, hblks, hbp,
3593 &offset);
3566 if (error) 3594 if (error)
3567 goto bread_err2; 3595 goto bread_err2;
3568 offset = xlog_align(log, blk_no, hblks, hbp);
3569 } else { 3596 } else {
3570 /* This LR is split across physical log end */ 3597 /* This LR is split across physical log end */
3571 if (blk_no != log->l_logBBsize) { 3598 if (blk_no != log->l_logBBsize) {
@@ -3573,12 +3600,13 @@ xlog_do_recovery_pass(
3573 ASSERT(blk_no <= INT_MAX); 3600 ASSERT(blk_no <= INT_MAX);
3574 split_hblks = log->l_logBBsize - (int)blk_no; 3601 split_hblks = log->l_logBBsize - (int)blk_no;
3575 ASSERT(split_hblks > 0); 3602 ASSERT(split_hblks > 0);
3576 if ((error = xlog_bread(log, blk_no, 3603 error = xlog_bread(log, blk_no,
3577 split_hblks, hbp))) 3604 split_hblks, hbp,
3605 &offset);
3606 if (error)
3578 goto bread_err2; 3607 goto bread_err2;
3579 offset = xlog_align(log, blk_no,
3580 split_hblks, hbp);
3581 } 3608 }
3609
3582 /* 3610 /*
3583 * Note: this black magic still works with 3611 * Note: this black magic still works with
3584 * large sector sizes (non-512) only because: 3612 * large sector sizes (non-512) only because:
@@ -3596,14 +3624,19 @@ xlog_do_recovery_pass(
3596 error = XFS_BUF_SET_PTR(hbp, 3624 error = XFS_BUF_SET_PTR(hbp,
3597 bufaddr + BBTOB(split_hblks), 3625 bufaddr + BBTOB(split_hblks),
3598 BBTOB(hblks - split_hblks)); 3626 BBTOB(hblks - split_hblks));
3599 if (!error) 3627 if (error)
3600 error = xlog_bread(log, 0, 3628 goto bread_err2;
3601 wrapped_hblks, hbp); 3629
3602 if (!error) 3630 error = xlog_bread_noalign(log, 0,
3603 error = XFS_BUF_SET_PTR(hbp, bufaddr, 3631 wrapped_hblks, hbp);
3632 if (error)
3633 goto bread_err2;
3634
3635 error = XFS_BUF_SET_PTR(hbp, bufaddr,
3604 BBTOB(hblks)); 3636 BBTOB(hblks));
3605 if (error) 3637 if (error)
3606 goto bread_err2; 3638 goto bread_err2;
3639
3607 if (!offset) 3640 if (!offset)
3608 offset = xlog_align(log, 0, 3641 offset = xlog_align(log, 0,
3609 wrapped_hblks, hbp); 3642 wrapped_hblks, hbp);
@@ -3619,10 +3652,10 @@ xlog_do_recovery_pass(
3619 3652
3620 /* Read in data for log record */ 3653 /* Read in data for log record */
3621 if (blk_no + bblks <= log->l_logBBsize) { 3654 if (blk_no + bblks <= log->l_logBBsize) {
3622 error = xlog_bread(log, blk_no, bblks, dbp); 3655 error = xlog_bread(log, blk_no, bblks, dbp,
3656 &offset);
3623 if (error) 3657 if (error)
3624 goto bread_err2; 3658 goto bread_err2;
3625 offset = xlog_align(log, blk_no, bblks, dbp);
3626 } else { 3659 } else {
3627 /* This log record is split across the 3660 /* This log record is split across the
3628 * physical end of log */ 3661 * physical end of log */
@@ -3636,12 +3669,13 @@ xlog_do_recovery_pass(
3636 split_bblks = 3669 split_bblks =
3637 log->l_logBBsize - (int)blk_no; 3670 log->l_logBBsize - (int)blk_no;
3638 ASSERT(split_bblks > 0); 3671 ASSERT(split_bblks > 0);
3639 if ((error = xlog_bread(log, blk_no, 3672 error = xlog_bread(log, blk_no,
3640 split_bblks, dbp))) 3673 split_bblks, dbp,
3674 &offset);
3675 if (error)
3641 goto bread_err2; 3676 goto bread_err2;
3642 offset = xlog_align(log, blk_no,
3643 split_bblks, dbp);
3644 } 3677 }
3678
3645 /* 3679 /*
3646 * Note: this black magic still works with 3680 * Note: this black magic still works with
3647 * large sector sizes (non-512) only because: 3681 * large sector sizes (non-512) only because:
@@ -3658,15 +3692,19 @@ xlog_do_recovery_pass(
3658 error = XFS_BUF_SET_PTR(dbp, 3692 error = XFS_BUF_SET_PTR(dbp,
3659 bufaddr + BBTOB(split_bblks), 3693 bufaddr + BBTOB(split_bblks),
3660 BBTOB(bblks - split_bblks)); 3694 BBTOB(bblks - split_bblks));
3661 if (!error)
3662 error = xlog_bread(log, wrapped_hblks,
3663 bblks - split_bblks,
3664 dbp);
3665 if (!error)
3666 error = XFS_BUF_SET_PTR(dbp, bufaddr,
3667 h_size);
3668 if (error) 3695 if (error)
3669 goto bread_err2; 3696 goto bread_err2;
3697
3698 error = xlog_bread_noalign(log, wrapped_hblks,
3699 bblks - split_bblks,
3700 dbp);
3701 if (error)
3702 goto bread_err2;
3703
3704 error = XFS_BUF_SET_PTR(dbp, bufaddr, h_size);
3705 if (error)
3706 goto bread_err2;
3707
3670 if (!offset) 3708 if (!offset)
3671 offset = xlog_align(log, wrapped_hblks, 3709 offset = xlog_align(log, wrapped_hblks,
3672 bblks - split_bblks, dbp); 3710 bblks - split_bblks, dbp);
@@ -3683,17 +3721,21 @@ xlog_do_recovery_pass(
3683 3721
3684 /* read first part of physical log */ 3722 /* read first part of physical log */
3685 while (blk_no < head_blk) { 3723 while (blk_no < head_blk) {
3686 if ((error = xlog_bread(log, blk_no, hblks, hbp))) 3724 error = xlog_bread(log, blk_no, hblks, hbp, &offset);
3725 if (error)
3687 goto bread_err2; 3726 goto bread_err2;
3688 offset = xlog_align(log, blk_no, hblks, hbp); 3727
3689 rhead = (xlog_rec_header_t *)offset; 3728 rhead = (xlog_rec_header_t *)offset;
3690 error = xlog_valid_rec_header(log, rhead, blk_no); 3729 error = xlog_valid_rec_header(log, rhead, blk_no);
3691 if (error) 3730 if (error)
3692 goto bread_err2; 3731 goto bread_err2;
3732
3693 bblks = (int)BTOBB(be32_to_cpu(rhead->h_len)); 3733 bblks = (int)BTOBB(be32_to_cpu(rhead->h_len));
3694 if ((error = xlog_bread(log, blk_no+hblks, bblks, dbp))) 3734 error = xlog_bread(log, blk_no+hblks, bblks, dbp,
3735 &offset);
3736 if (error)
3695 goto bread_err2; 3737 goto bread_err2;
3696 offset = xlog_align(log, blk_no+hblks, bblks, dbp); 3738
3697 xlog_unpack_data(rhead, offset, log); 3739 xlog_unpack_data(rhead, offset, log);
3698 if ((error = xlog_recover_process_data(log, rhash, 3740 if ((error = xlog_recover_process_data(log, rhash,
3699 rhead, offset, pass))) 3741 rhead, offset, pass)))
diff --git a/fs/xfs/xfs_mount.c b/fs/xfs/xfs_mount.c
index 35300250e86d..b101990df027 100644
--- a/fs/xfs/xfs_mount.c
+++ b/fs/xfs/xfs_mount.c
@@ -45,7 +45,6 @@
45#include "xfs_fsops.h" 45#include "xfs_fsops.h"
46#include "xfs_utils.h" 46#include "xfs_utils.h"
47 47
48STATIC int xfs_uuid_mount(xfs_mount_t *);
49STATIC void xfs_unmountfs_wait(xfs_mount_t *); 48STATIC void xfs_unmountfs_wait(xfs_mount_t *);
50 49
51 50
@@ -121,6 +120,84 @@ static const struct {
121 { sizeof(xfs_sb_t), 0 } 120 { sizeof(xfs_sb_t), 0 }
122}; 121};
123 122
123static DEFINE_MUTEX(xfs_uuid_table_mutex);
124static int xfs_uuid_table_size;
125static uuid_t *xfs_uuid_table;
126
127/*
128 * See if the UUID is unique among mounted XFS filesystems.
129 * Mount fails if UUID is nil or a FS with the same UUID is already mounted.
130 */
131STATIC int
132xfs_uuid_mount(
133 struct xfs_mount *mp)
134{
135 uuid_t *uuid = &mp->m_sb.sb_uuid;
136 int hole, i;
137
138 if (mp->m_flags & XFS_MOUNT_NOUUID)
139 return 0;
140
141 if (uuid_is_nil(uuid)) {
142 cmn_err(CE_WARN,
143 "XFS: Filesystem %s has nil UUID - can't mount",
144 mp->m_fsname);
145 return XFS_ERROR(EINVAL);
146 }
147
148 mutex_lock(&xfs_uuid_table_mutex);
149 for (i = 0, hole = -1; i < xfs_uuid_table_size; i++) {
150 if (uuid_is_nil(&xfs_uuid_table[i])) {
151 hole = i;
152 continue;
153 }
154 if (uuid_equal(uuid, &xfs_uuid_table[i]))
155 goto out_duplicate;
156 }
157
158 if (hole < 0) {
159 xfs_uuid_table = kmem_realloc(xfs_uuid_table,
160 (xfs_uuid_table_size + 1) * sizeof(*xfs_uuid_table),
161 xfs_uuid_table_size * sizeof(*xfs_uuid_table),
162 KM_SLEEP);
163 hole = xfs_uuid_table_size++;
164 }
165 xfs_uuid_table[hole] = *uuid;
166 mutex_unlock(&xfs_uuid_table_mutex);
167
168 return 0;
169
170 out_duplicate:
171 mutex_unlock(&xfs_uuid_table_mutex);
172 cmn_err(CE_WARN, "XFS: Filesystem %s has duplicate UUID - can't mount",
173 mp->m_fsname);
174 return XFS_ERROR(EINVAL);
175}
176
177STATIC void
178xfs_uuid_unmount(
179 struct xfs_mount *mp)
180{
181 uuid_t *uuid = &mp->m_sb.sb_uuid;
182 int i;
183
184 if (mp->m_flags & XFS_MOUNT_NOUUID)
185 return;
186
187 mutex_lock(&xfs_uuid_table_mutex);
188 for (i = 0; i < xfs_uuid_table_size; i++) {
189 if (uuid_is_nil(&xfs_uuid_table[i]))
190 continue;
191 if (!uuid_equal(uuid, &xfs_uuid_table[i]))
192 continue;
193 memset(&xfs_uuid_table[i], 0, sizeof(uuid_t));
194 break;
195 }
196 ASSERT(i < xfs_uuid_table_size);
197 mutex_unlock(&xfs_uuid_table_mutex);
198}
199
200
124/* 201/*
125 * Free up the resources associated with a mount structure. Assume that 202 * Free up the resources associated with a mount structure. Assume that
126 * the structure was initially zeroed, so we can tell which fields got 203 * the structure was initially zeroed, so we can tell which fields got
@@ -256,6 +333,22 @@ xfs_mount_validate_sb(
256 return XFS_ERROR(ENOSYS); 333 return XFS_ERROR(ENOSYS);
257 } 334 }
258 335
336 /*
337 * Currently only very few inode sizes are supported.
338 */
339 switch (sbp->sb_inodesize) {
340 case 256:
341 case 512:
342 case 1024:
343 case 2048:
344 break;
345 default:
346 xfs_fs_mount_cmn_err(flags,
347 "inode size of %d bytes not supported",
348 sbp->sb_inodesize);
349 return XFS_ERROR(ENOSYS);
350 }
351
259 if (xfs_sb_validate_fsb_count(sbp, sbp->sb_dblocks) || 352 if (xfs_sb_validate_fsb_count(sbp, sbp->sb_dblocks) ||
260 xfs_sb_validate_fsb_count(sbp, sbp->sb_rblocks)) { 353 xfs_sb_validate_fsb_count(sbp, sbp->sb_rblocks)) {
261 xfs_fs_mount_cmn_err(flags, 354 xfs_fs_mount_cmn_err(flags,
@@ -574,32 +667,10 @@ xfs_mount_common(xfs_mount_t *mp, xfs_sb_t *sbp)
574 mp->m_sectbb_log = sbp->sb_sectlog - BBSHIFT; 667 mp->m_sectbb_log = sbp->sb_sectlog - BBSHIFT;
575 mp->m_agno_log = xfs_highbit32(sbp->sb_agcount - 1) + 1; 668 mp->m_agno_log = xfs_highbit32(sbp->sb_agcount - 1) + 1;
576 mp->m_agino_log = sbp->sb_inopblog + sbp->sb_agblklog; 669 mp->m_agino_log = sbp->sb_inopblog + sbp->sb_agblklog;
577 mp->m_litino = sbp->sb_inodesize - sizeof(struct xfs_dinode);
578 mp->m_blockmask = sbp->sb_blocksize - 1; 670 mp->m_blockmask = sbp->sb_blocksize - 1;
579 mp->m_blockwsize = sbp->sb_blocksize >> XFS_WORDLOG; 671 mp->m_blockwsize = sbp->sb_blocksize >> XFS_WORDLOG;
580 mp->m_blockwmask = mp->m_blockwsize - 1; 672 mp->m_blockwmask = mp->m_blockwsize - 1;
581 673
582 /*
583 * Setup for attributes, in case they get created.
584 * This value is for inodes getting attributes for the first time,
585 * the per-inode value is for old attribute values.
586 */
587 ASSERT(sbp->sb_inodesize >= 256 && sbp->sb_inodesize <= 2048);
588 switch (sbp->sb_inodesize) {
589 case 256:
590 mp->m_attroffset = XFS_LITINO(mp) -
591 XFS_BMDR_SPACE_CALC(MINABTPTRS);
592 break;
593 case 512:
594 case 1024:
595 case 2048:
596 mp->m_attroffset = XFS_BMDR_SPACE_CALC(6 * MINABTPTRS);
597 break;
598 default:
599 ASSERT(0);
600 }
601 ASSERT(mp->m_attroffset < XFS_LITINO(mp));
602
603 mp->m_alloc_mxr[0] = xfs_allocbt_maxrecs(mp, sbp->sb_blocksize, 1); 674 mp->m_alloc_mxr[0] = xfs_allocbt_maxrecs(mp, sbp->sb_blocksize, 1);
604 mp->m_alloc_mxr[1] = xfs_allocbt_maxrecs(mp, sbp->sb_blocksize, 0); 675 mp->m_alloc_mxr[1] = xfs_allocbt_maxrecs(mp, sbp->sb_blocksize, 0);
605 mp->m_alloc_mnr[0] = mp->m_alloc_mxr[0] / 2; 676 mp->m_alloc_mnr[0] = mp->m_alloc_mxr[0] / 2;
@@ -645,7 +716,7 @@ xfs_initialize_perag_data(xfs_mount_t *mp, xfs_agnumber_t agcount)
645 for (index = 0; index < agcount; index++) { 716 for (index = 0; index < agcount; index++) {
646 /* 717 /*
647 * read the agf, then the agi. This gets us 718 * read the agf, then the agi. This gets us
648 * all the inforamtion we need and populates the 719 * all the information we need and populates the
649 * per-ag structures for us. 720 * per-ag structures for us.
650 */ 721 */
651 error = xfs_alloc_pagf_init(mp, NULL, index, 0); 722 error = xfs_alloc_pagf_init(mp, NULL, index, 0);
@@ -886,8 +957,6 @@ xfs_check_sizes(xfs_mount_t *mp)
886} 957}
887 958
888/* 959/*
889 * xfs_mountfs
890 *
891 * This function does the following on an initial mount of a file system: 960 * This function does the following on an initial mount of a file system:
892 * - reads the superblock from disk and init the mount struct 961 * - reads the superblock from disk and init the mount struct
893 * - if we're a 32-bit kernel, do a size check on the superblock 962 * - if we're a 32-bit kernel, do a size check on the superblock
@@ -905,7 +974,6 @@ xfs_mountfs(
905 xfs_inode_t *rip; 974 xfs_inode_t *rip;
906 __uint64_t resblks; 975 __uint64_t resblks;
907 uint quotamount, quotaflags; 976 uint quotamount, quotaflags;
908 int uuid_mounted = 0;
909 int error = 0; 977 int error = 0;
910 978
911 xfs_mount_common(mp, sbp); 979 xfs_mount_common(mp, sbp);
@@ -960,7 +1028,7 @@ xfs_mountfs(
960 */ 1028 */
961 error = xfs_update_alignment(mp); 1029 error = xfs_update_alignment(mp);
962 if (error) 1030 if (error)
963 goto error1; 1031 goto out;
964 1032
965 xfs_alloc_compute_maxlevels(mp); 1033 xfs_alloc_compute_maxlevels(mp);
966 xfs_bmap_compute_maxlevels(mp, XFS_DATA_FORK); 1034 xfs_bmap_compute_maxlevels(mp, XFS_DATA_FORK);
@@ -971,19 +1039,9 @@ xfs_mountfs(
971 1039
972 mp->m_maxioffset = xfs_max_file_offset(sbp->sb_blocklog); 1040 mp->m_maxioffset = xfs_max_file_offset(sbp->sb_blocklog);
973 1041
974 /* 1042 error = xfs_uuid_mount(mp);
975 * XFS uses the uuid from the superblock as the unique 1043 if (error)
976 * identifier for fsid. We can not use the uuid from the volume 1044 goto out;
977 * since a single partition filesystem is identical to a single
978 * partition volume/filesystem.
979 */
980 if ((mp->m_flags & XFS_MOUNT_NOUUID) == 0) {
981 if (xfs_uuid_mount(mp)) {
982 error = XFS_ERROR(EINVAL);
983 goto error1;
984 }
985 uuid_mounted=1;
986 }
987 1045
988 /* 1046 /*
989 * Set the minimum read and write sizes 1047 * Set the minimum read and write sizes
@@ -1007,7 +1065,7 @@ xfs_mountfs(
1007 */ 1065 */
1008 error = xfs_check_sizes(mp); 1066 error = xfs_check_sizes(mp);
1009 if (error) 1067 if (error)
1010 goto error1; 1068 goto out_remove_uuid;
1011 1069
1012 /* 1070 /*
1013 * Initialize realtime fields in the mount structure 1071 * Initialize realtime fields in the mount structure
@@ -1015,7 +1073,7 @@ xfs_mountfs(
1015 error = xfs_rtmount_init(mp); 1073 error = xfs_rtmount_init(mp);
1016 if (error) { 1074 if (error) {
1017 cmn_err(CE_WARN, "XFS: RT mount failed"); 1075 cmn_err(CE_WARN, "XFS: RT mount failed");
1018 goto error1; 1076 goto out_remove_uuid;
1019 } 1077 }
1020 1078
1021 /* 1079 /*
@@ -1045,26 +1103,26 @@ xfs_mountfs(
1045 mp->m_perag = kmem_zalloc(sbp->sb_agcount * sizeof(xfs_perag_t), 1103 mp->m_perag = kmem_zalloc(sbp->sb_agcount * sizeof(xfs_perag_t),
1046 KM_MAYFAIL); 1104 KM_MAYFAIL);
1047 if (!mp->m_perag) 1105 if (!mp->m_perag)
1048 goto error1; 1106 goto out_remove_uuid;
1049 1107
1050 mp->m_maxagi = xfs_initialize_perag(mp, sbp->sb_agcount); 1108 mp->m_maxagi = xfs_initialize_perag(mp, sbp->sb_agcount);
1051 1109
1110 if (!sbp->sb_logblocks) {
1111 cmn_err(CE_WARN, "XFS: no log defined");
1112 XFS_ERROR_REPORT("xfs_mountfs", XFS_ERRLEVEL_LOW, mp);
1113 error = XFS_ERROR(EFSCORRUPTED);
1114 goto out_free_perag;
1115 }
1116
1052 /* 1117 /*
1053 * log's mount-time initialization. Perform 1st part recovery if needed 1118 * log's mount-time initialization. Perform 1st part recovery if needed
1054 */ 1119 */
1055 if (likely(sbp->sb_logblocks > 0)) { /* check for volume case */ 1120 error = xfs_log_mount(mp, mp->m_logdev_targp,
1056 error = xfs_log_mount(mp, mp->m_logdev_targp, 1121 XFS_FSB_TO_DADDR(mp, sbp->sb_logstart),
1057 XFS_FSB_TO_DADDR(mp, sbp->sb_logstart), 1122 XFS_FSB_TO_BB(mp, sbp->sb_logblocks));
1058 XFS_FSB_TO_BB(mp, sbp->sb_logblocks)); 1123 if (error) {
1059 if (error) { 1124 cmn_err(CE_WARN, "XFS: log mount failed");
1060 cmn_err(CE_WARN, "XFS: log mount failed"); 1125 goto out_free_perag;
1061 goto error2;
1062 }
1063 } else { /* No log has been defined */
1064 cmn_err(CE_WARN, "XFS: no log defined");
1065 XFS_ERROR_REPORT("xfs_mountfs_int(1)", XFS_ERRLEVEL_LOW, mp);
1066 error = XFS_ERROR(EFSCORRUPTED);
1067 goto error2;
1068 } 1126 }
1069 1127
1070 /* 1128 /*
@@ -1086,15 +1144,14 @@ xfs_mountfs(
1086 * If we are currently making the filesystem, the initialisation will 1144 * If we are currently making the filesystem, the initialisation will
1087 * fail as the perag data is in an undefined state. 1145 * fail as the perag data is in an undefined state.
1088 */ 1146 */
1089
1090 if (xfs_sb_version_haslazysbcount(&mp->m_sb) && 1147 if (xfs_sb_version_haslazysbcount(&mp->m_sb) &&
1091 !XFS_LAST_UNMOUNT_WAS_CLEAN(mp) && 1148 !XFS_LAST_UNMOUNT_WAS_CLEAN(mp) &&
1092 !mp->m_sb.sb_inprogress) { 1149 !mp->m_sb.sb_inprogress) {
1093 error = xfs_initialize_perag_data(mp, sbp->sb_agcount); 1150 error = xfs_initialize_perag_data(mp, sbp->sb_agcount);
1094 if (error) { 1151 if (error)
1095 goto error2; 1152 goto out_free_perag;
1096 }
1097 } 1153 }
1154
1098 /* 1155 /*
1099 * Get and sanity-check the root inode. 1156 * Get and sanity-check the root inode.
1100 * Save the pointer to it in the mount structure. 1157 * Save the pointer to it in the mount structure.
@@ -1102,7 +1159,7 @@ xfs_mountfs(
1102 error = xfs_iget(mp, NULL, sbp->sb_rootino, 0, XFS_ILOCK_EXCL, &rip, 0); 1159 error = xfs_iget(mp, NULL, sbp->sb_rootino, 0, XFS_ILOCK_EXCL, &rip, 0);
1103 if (error) { 1160 if (error) {
1104 cmn_err(CE_WARN, "XFS: failed to read root inode"); 1161 cmn_err(CE_WARN, "XFS: failed to read root inode");
1105 goto error3; 1162 goto out_log_dealloc;
1106 } 1163 }
1107 1164
1108 ASSERT(rip != NULL); 1165 ASSERT(rip != NULL);
@@ -1116,7 +1173,7 @@ xfs_mountfs(
1116 XFS_ERROR_REPORT("xfs_mountfs_int(2)", XFS_ERRLEVEL_LOW, 1173 XFS_ERROR_REPORT("xfs_mountfs_int(2)", XFS_ERRLEVEL_LOW,
1117 mp); 1174 mp);
1118 error = XFS_ERROR(EFSCORRUPTED); 1175 error = XFS_ERROR(EFSCORRUPTED);
1119 goto error4; 1176 goto out_rele_rip;
1120 } 1177 }
1121 mp->m_rootip = rip; /* save it */ 1178 mp->m_rootip = rip; /* save it */
1122 1179
@@ -1131,7 +1188,7 @@ xfs_mountfs(
1131 * Free up the root inode. 1188 * Free up the root inode.
1132 */ 1189 */
1133 cmn_err(CE_WARN, "XFS: failed to read RT inodes"); 1190 cmn_err(CE_WARN, "XFS: failed to read RT inodes");
1134 goto error4; 1191 goto out_rele_rip;
1135 } 1192 }
1136 1193
1137 /* 1194 /*
@@ -1143,7 +1200,7 @@ xfs_mountfs(
1143 error = xfs_mount_log_sb(mp, mp->m_update_flags); 1200 error = xfs_mount_log_sb(mp, mp->m_update_flags);
1144 if (error) { 1201 if (error) {
1145 cmn_err(CE_WARN, "XFS: failed to write sb changes"); 1202 cmn_err(CE_WARN, "XFS: failed to write sb changes");
1146 goto error4; 1203 goto out_rtunmount;
1147 } 1204 }
1148 } 1205 }
1149 1206
@@ -1152,7 +1209,7 @@ xfs_mountfs(
1152 */ 1209 */
1153 error = XFS_QM_INIT(mp, &quotamount, &quotaflags); 1210 error = XFS_QM_INIT(mp, &quotamount, &quotaflags);
1154 if (error) 1211 if (error)
1155 goto error4; 1212 goto out_rtunmount;
1156 1213
1157 /* 1214 /*
1158 * Finish recovering the file system. This part needed to be 1215 * Finish recovering the file system. This part needed to be
@@ -1162,7 +1219,7 @@ xfs_mountfs(
1162 error = xfs_log_mount_finish(mp); 1219 error = xfs_log_mount_finish(mp);
1163 if (error) { 1220 if (error) {
1164 cmn_err(CE_WARN, "XFS: log mount finish failed"); 1221 cmn_err(CE_WARN, "XFS: log mount finish failed");
1165 goto error4; 1222 goto out_rtunmount;
1166 } 1223 }
1167 1224
1168 /* 1225 /*
@@ -1170,7 +1227,7 @@ xfs_mountfs(
1170 */ 1227 */
1171 error = XFS_QM_MOUNT(mp, quotamount, quotaflags); 1228 error = XFS_QM_MOUNT(mp, quotamount, quotaflags);
1172 if (error) 1229 if (error)
1173 goto error4; 1230 goto out_rtunmount;
1174 1231
1175 /* 1232 /*
1176 * Now we are mounted, reserve a small amount of unused space for 1233 * Now we are mounted, reserve a small amount of unused space for
@@ -1194,18 +1251,17 @@ xfs_mountfs(
1194 1251
1195 return 0; 1252 return 0;
1196 1253
1197 error4: 1254 out_rtunmount:
1198 /* 1255 xfs_rtunmount_inodes(mp);
1199 * Free up the root inode. 1256 out_rele_rip:
1200 */
1201 IRELE(rip); 1257 IRELE(rip);
1202 error3: 1258 out_log_dealloc:
1203 xfs_log_unmount_dealloc(mp); 1259 xfs_log_unmount(mp);
1204 error2: 1260 out_free_perag:
1205 xfs_free_perag(mp); 1261 xfs_free_perag(mp);
1206 error1: 1262 out_remove_uuid:
1207 if (uuid_mounted) 1263 xfs_uuid_unmount(mp);
1208 uuid_table_remove(&mp->m_sb.sb_uuid); 1264 out:
1209 return error; 1265 return error;
1210} 1266}
1211 1267
@@ -1226,15 +1282,12 @@ xfs_unmountfs(
1226 */ 1282 */
1227 XFS_QM_UNMOUNT(mp); 1283 XFS_QM_UNMOUNT(mp);
1228 1284
1229 if (mp->m_rbmip) 1285 xfs_rtunmount_inodes(mp);
1230 IRELE(mp->m_rbmip);
1231 if (mp->m_rsumip)
1232 IRELE(mp->m_rsumip);
1233 IRELE(mp->m_rootip); 1286 IRELE(mp->m_rootip);
1234 1287
1235 /* 1288 /*
1236 * We can potentially deadlock here if we have an inode cluster 1289 * We can potentially deadlock here if we have an inode cluster
1237 * that has been freed has it's buffer still pinned in memory because 1290 * that has been freed has its buffer still pinned in memory because
1238 * the transaction is still sitting in a iclog. The stale inodes 1291 * the transaction is still sitting in a iclog. The stale inodes
1239 * on that buffer will have their flush locks held until the 1292 * on that buffer will have their flush locks held until the
1240 * transaction hits the disk and the callbacks run. the inode 1293 * transaction hits the disk and the callbacks run. the inode
@@ -1266,7 +1319,7 @@ xfs_unmountfs(
1266 * Unreserve any blocks we have so that when we unmount we don't account 1319 * Unreserve any blocks we have so that when we unmount we don't account
1267 * the reserved free space as used. This is really only necessary for 1320 * the reserved free space as used. This is really only necessary for
1268 * lazy superblock counting because it trusts the incore superblock 1321 * lazy superblock counting because it trusts the incore superblock
1269 * counters to be aboslutely correct on clean unmount. 1322 * counters to be absolutely correct on clean unmount.
1270 * 1323 *
1271 * We don't bother correcting this elsewhere for lazy superblock 1324 * We don't bother correcting this elsewhere for lazy superblock
1272 * counting because on mount of an unclean filesystem we reconstruct the 1325 * counting because on mount of an unclean filesystem we reconstruct the
@@ -1288,10 +1341,9 @@ xfs_unmountfs(
1288 "Freespace may not be correct on next mount."); 1341 "Freespace may not be correct on next mount.");
1289 xfs_unmountfs_writesb(mp); 1342 xfs_unmountfs_writesb(mp);
1290 xfs_unmountfs_wait(mp); /* wait for async bufs */ 1343 xfs_unmountfs_wait(mp); /* wait for async bufs */
1291 xfs_log_unmount(mp); /* Done! No more fs ops. */ 1344 xfs_log_unmount_write(mp);
1292 1345 xfs_log_unmount(mp);
1293 if ((mp->m_flags & XFS_MOUNT_NOUUID) == 0) 1346 xfs_uuid_unmount(mp);
1294 uuid_table_remove(&mp->m_sb.sb_uuid);
1295 1347
1296#if defined(DEBUG) 1348#if defined(DEBUG)
1297 xfs_errortag_clearall(mp, 0); 1349 xfs_errortag_clearall(mp, 0);
@@ -1793,29 +1845,6 @@ xfs_freesb(
1793} 1845}
1794 1846
1795/* 1847/*
1796 * See if the UUID is unique among mounted XFS filesystems.
1797 * Mount fails if UUID is nil or a FS with the same UUID is already mounted.
1798 */
1799STATIC int
1800xfs_uuid_mount(
1801 xfs_mount_t *mp)
1802{
1803 if (uuid_is_nil(&mp->m_sb.sb_uuid)) {
1804 cmn_err(CE_WARN,
1805 "XFS: Filesystem %s has nil UUID - can't mount",
1806 mp->m_fsname);
1807 return -1;
1808 }
1809 if (!uuid_table_insert(&mp->m_sb.sb_uuid)) {
1810 cmn_err(CE_WARN,
1811 "XFS: Filesystem %s has duplicate UUID - can't mount",
1812 mp->m_fsname);
1813 return -1;
1814 }
1815 return 0;
1816}
1817
1818/*
1819 * Used to log changes to the superblock unit and width fields which could 1848 * Used to log changes to the superblock unit and width fields which could
1820 * be altered by the mount options, as well as any potential sb_features2 1849 * be altered by the mount options, as well as any potential sb_features2
1821 * fixup. Only the first superblock is updated. 1850 * fixup. Only the first superblock is updated.
@@ -1868,7 +1897,7 @@ xfs_mount_log_sb(
1868 * we disable the per-cpu counter and go through the slow path. 1897 * we disable the per-cpu counter and go through the slow path.
1869 * 1898 *
1870 * The slow path is the current xfs_mod_incore_sb() function. This means that 1899 * The slow path is the current xfs_mod_incore_sb() function. This means that
1871 * when we disable a per-cpu counter, we need to drain it's resources back to 1900 * when we disable a per-cpu counter, we need to drain its resources back to
1872 * the global superblock. We do this after disabling the counter to prevent 1901 * the global superblock. We do this after disabling the counter to prevent
1873 * more threads from queueing up on the counter. 1902 * more threads from queueing up on the counter.
1874 * 1903 *
diff --git a/fs/xfs/xfs_mount.h b/fs/xfs/xfs_mount.h
index f5e9937f9bdb..7af44adffc8f 100644
--- a/fs/xfs/xfs_mount.h
+++ b/fs/xfs/xfs_mount.h
@@ -136,7 +136,6 @@ typedef int (*xfs_dqvopchownresv_t)(struct xfs_trans *, struct xfs_inode *,
136 struct xfs_dquot *, struct xfs_dquot *, uint); 136 struct xfs_dquot *, struct xfs_dquot *, uint);
137typedef void (*xfs_dqstatvfs_t)(struct xfs_inode *, struct kstatfs *); 137typedef void (*xfs_dqstatvfs_t)(struct xfs_inode *, struct kstatfs *);
138typedef int (*xfs_dqsync_t)(struct xfs_mount *, int flags); 138typedef int (*xfs_dqsync_t)(struct xfs_mount *, int flags);
139typedef int (*xfs_quotactl_t)(struct xfs_mount *, int, int, xfs_caddr_t);
140 139
141typedef struct xfs_qmops { 140typedef struct xfs_qmops {
142 xfs_qminit_t xfs_qminit; 141 xfs_qminit_t xfs_qminit;
@@ -154,7 +153,6 @@ typedef struct xfs_qmops {
154 xfs_dqvopchownresv_t xfs_dqvopchownresv; 153 xfs_dqvopchownresv_t xfs_dqvopchownresv;
155 xfs_dqstatvfs_t xfs_dqstatvfs; 154 xfs_dqstatvfs_t xfs_dqstatvfs;
156 xfs_dqsync_t xfs_dqsync; 155 xfs_dqsync_t xfs_dqsync;
157 xfs_quotactl_t xfs_quotactl;
158 struct xfs_dqtrxops *xfs_dqtrxops; 156 struct xfs_dqtrxops *xfs_dqtrxops;
159} xfs_qmops_t; 157} xfs_qmops_t;
160 158
@@ -188,8 +186,6 @@ typedef struct xfs_qmops {
188 (*(ip)->i_mount->m_qm_ops->xfs_dqstatvfs)(ip, statp) 186 (*(ip)->i_mount->m_qm_ops->xfs_dqstatvfs)(ip, statp)
189#define XFS_QM_DQSYNC(mp, flags) \ 187#define XFS_QM_DQSYNC(mp, flags) \
190 (*(mp)->m_qm_ops->xfs_dqsync)(mp, flags) 188 (*(mp)->m_qm_ops->xfs_dqsync)(mp, flags)
191#define XFS_QM_QUOTACTL(mp, cmd, id, addr) \
192 (*(mp)->m_qm_ops->xfs_quotactl)(mp, cmd, id, addr)
193 189
194#ifdef HAVE_PERCPU_SB 190#ifdef HAVE_PERCPU_SB
195 191
@@ -273,19 +269,17 @@ typedef struct xfs_mount {
273 uint m_inobt_mnr[2]; /* min inobt btree records */ 269 uint m_inobt_mnr[2]; /* min inobt btree records */
274 uint m_ag_maxlevels; /* XFS_AG_MAXLEVELS */ 270 uint m_ag_maxlevels; /* XFS_AG_MAXLEVELS */
275 uint m_bm_maxlevels[2]; /* XFS_BM_MAXLEVELS */ 271 uint m_bm_maxlevels[2]; /* XFS_BM_MAXLEVELS */
276 uint m_in_maxlevels; /* XFS_IN_MAXLEVELS */ 272 uint m_in_maxlevels; /* max inobt btree levels. */
277 struct xfs_perag *m_perag; /* per-ag accounting info */ 273 struct xfs_perag *m_perag; /* per-ag accounting info */
278 struct rw_semaphore m_peraglock; /* lock for m_perag (pointer) */ 274 struct rw_semaphore m_peraglock; /* lock for m_perag (pointer) */
279 struct mutex m_growlock; /* growfs mutex */ 275 struct mutex m_growlock; /* growfs mutex */
280 int m_fixedfsid[2]; /* unchanged for life of FS */ 276 int m_fixedfsid[2]; /* unchanged for life of FS */
281 uint m_dmevmask; /* DMI events for this FS */ 277 uint m_dmevmask; /* DMI events for this FS */
282 __uint64_t m_flags; /* global mount flags */ 278 __uint64_t m_flags; /* global mount flags */
283 uint m_attroffset; /* inode attribute offset */
284 uint m_dir_node_ents; /* #entries in a dir danode */ 279 uint m_dir_node_ents; /* #entries in a dir danode */
285 uint m_attr_node_ents; /* #entries in attr danode */ 280 uint m_attr_node_ents; /* #entries in attr danode */
286 int m_ialloc_inos; /* inodes in inode allocation */ 281 int m_ialloc_inos; /* inodes in inode allocation */
287 int m_ialloc_blks; /* blocks in inode allocation */ 282 int m_ialloc_blks; /* blocks in inode allocation */
288 int m_litino; /* size of inode union area */
289 int m_inoalign_mask;/* mask sb_inoalignmt if used */ 283 int m_inoalign_mask;/* mask sb_inoalignmt if used */
290 uint m_qflags; /* quota status flags */ 284 uint m_qflags; /* quota status flags */
291 xfs_trans_reservations_t m_reservations;/* precomputed res values */ 285 xfs_trans_reservations_t m_reservations;/* precomputed res values */
@@ -293,9 +287,6 @@ typedef struct xfs_mount {
293 __uint64_t m_maxioffset; /* maximum inode offset */ 287 __uint64_t m_maxioffset; /* maximum inode offset */
294 __uint64_t m_resblks; /* total reserved blocks */ 288 __uint64_t m_resblks; /* total reserved blocks */
295 __uint64_t m_resblks_avail;/* available reserved blocks */ 289 __uint64_t m_resblks_avail;/* available reserved blocks */
296#if XFS_BIG_INUMS
297 xfs_ino_t m_inoadd; /* add value for ino64_offset */
298#endif
299 int m_dalign; /* stripe unit */ 290 int m_dalign; /* stripe unit */
300 int m_swidth; /* stripe width */ 291 int m_swidth; /* stripe width */
301 int m_sinoalign; /* stripe unit inode alignment */ 292 int m_sinoalign; /* stripe unit inode alignment */
@@ -337,7 +328,6 @@ typedef struct xfs_mount {
337#define XFS_MOUNT_WSYNC (1ULL << 0) /* for nfs - all metadata ops 328#define XFS_MOUNT_WSYNC (1ULL << 0) /* for nfs - all metadata ops
338 must be synchronous except 329 must be synchronous except
339 for space allocations */ 330 for space allocations */
340#define XFS_MOUNT_INO64 (1ULL << 1)
341#define XFS_MOUNT_DMAPI (1ULL << 2) /* dmapi is enabled */ 331#define XFS_MOUNT_DMAPI (1ULL << 2) /* dmapi is enabled */
342#define XFS_MOUNT_WAS_CLEAN (1ULL << 3) 332#define XFS_MOUNT_WAS_CLEAN (1ULL << 3)
343#define XFS_MOUNT_FS_SHUTDOWN (1ULL << 4) /* atomic stop of all filesystem 333#define XFS_MOUNT_FS_SHUTDOWN (1ULL << 4) /* atomic stop of all filesystem
@@ -389,8 +379,8 @@ typedef struct xfs_mount {
389 * Synchronous read and write sizes. This should be 379 * Synchronous read and write sizes. This should be
390 * better for NFSv2 wsync filesystems. 380 * better for NFSv2 wsync filesystems.
391 */ 381 */
392#define XFS_WSYNC_READIO_LOG 15 /* 32K */ 382#define XFS_WSYNC_READIO_LOG 15 /* 32k */
393#define XFS_WSYNC_WRITEIO_LOG 14 /* 16K */ 383#define XFS_WSYNC_WRITEIO_LOG 14 /* 16k */
394 384
395/* 385/*
396 * Allow large block sizes to be reported to userspace programs if the 386 * Allow large block sizes to be reported to userspace programs if the
@@ -500,9 +490,6 @@ typedef struct xfs_mod_sb {
500 int64_t msb_delta; /* Change to make to specified field */ 490 int64_t msb_delta; /* Change to make to specified field */
501} xfs_mod_sb_t; 491} xfs_mod_sb_t;
502 492
503#define XFS_MOUNT_ILOCK(mp) mutex_lock(&((mp)->m_ilock))
504#define XFS_MOUNT_IUNLOCK(mp) mutex_unlock(&((mp)->m_ilock))
505
506extern int xfs_log_sbcount(xfs_mount_t *, uint); 493extern int xfs_log_sbcount(xfs_mount_t *, uint);
507extern int xfs_mountfs(xfs_mount_t *mp); 494extern int xfs_mountfs(xfs_mount_t *mp);
508extern void xfs_mountfs_check_barriers(xfs_mount_t *mp); 495extern void xfs_mountfs_check_barriers(xfs_mount_t *mp);
diff --git a/fs/xfs/xfs_qmops.c b/fs/xfs/xfs_qmops.c
index 27f80581520a..e101790ea8e7 100644
--- a/fs/xfs/xfs_qmops.c
+++ b/fs/xfs/xfs_qmops.c
@@ -126,7 +126,6 @@ static struct xfs_qmops xfs_qmcore_stub = {
126 .xfs_dqvopchownresv = (xfs_dqvopchownresv_t) fs_noerr, 126 .xfs_dqvopchownresv = (xfs_dqvopchownresv_t) fs_noerr,
127 .xfs_dqstatvfs = (xfs_dqstatvfs_t) fs_noval, 127 .xfs_dqstatvfs = (xfs_dqstatvfs_t) fs_noval,
128 .xfs_dqsync = (xfs_dqsync_t) fs_noerr, 128 .xfs_dqsync = (xfs_dqsync_t) fs_noerr,
129 .xfs_quotactl = (xfs_quotactl_t) fs_nosys,
130}; 129};
131 130
132int 131int
diff --git a/fs/xfs/xfs_quota.h b/fs/xfs/xfs_quota.h
index 48965ecaa155..f5d1202dde25 100644
--- a/fs/xfs/xfs_quota.h
+++ b/fs/xfs/xfs_quota.h
@@ -18,6 +18,8 @@
18#ifndef __XFS_QUOTA_H__ 18#ifndef __XFS_QUOTA_H__
19#define __XFS_QUOTA_H__ 19#define __XFS_QUOTA_H__
20 20
21struct xfs_trans;
22
21/* 23/*
22 * The ondisk form of a dquot structure. 24 * The ondisk form of a dquot structure.
23 */ 25 */
@@ -185,7 +187,6 @@ typedef struct xfs_qoff_logformat {
185 * to a single function. None of these XFS_QMOPT_* flags are meant to have 187 * to a single function. None of these XFS_QMOPT_* flags are meant to have
186 * persistent values (ie. their values can and will change between versions) 188 * persistent values (ie. their values can and will change between versions)
187 */ 189 */
188#define XFS_QMOPT_DQLOCK 0x0000001 /* dqlock */
189#define XFS_QMOPT_DQALLOC 0x0000002 /* alloc dquot ondisk if needed */ 190#define XFS_QMOPT_DQALLOC 0x0000002 /* alloc dquot ondisk if needed */
190#define XFS_QMOPT_UQUOTA 0x0000004 /* user dquot requested */ 191#define XFS_QMOPT_UQUOTA 0x0000004 /* user dquot requested */
191#define XFS_QMOPT_PQUOTA 0x0000008 /* project dquot requested */ 192#define XFS_QMOPT_PQUOTA 0x0000008 /* project dquot requested */
diff --git a/fs/xfs/xfs_rtalloc.c b/fs/xfs/xfs_rtalloc.c
index c5bb86f3ec05..385f6dceba5d 100644
--- a/fs/xfs/xfs_rtalloc.c
+++ b/fs/xfs/xfs_rtalloc.c
@@ -2288,6 +2288,16 @@ xfs_rtmount_inodes(
2288 return 0; 2288 return 0;
2289} 2289}
2290 2290
2291void
2292xfs_rtunmount_inodes(
2293 struct xfs_mount *mp)
2294{
2295 if (mp->m_rbmip)
2296 IRELE(mp->m_rbmip);
2297 if (mp->m_rsumip)
2298 IRELE(mp->m_rsumip);
2299}
2300
2291/* 2301/*
2292 * Pick an extent for allocation at the start of a new realtime file. 2302 * Pick an extent for allocation at the start of a new realtime file.
2293 * Use the sequence number stored in the atime field of the bitmap inode. 2303 * Use the sequence number stored in the atime field of the bitmap inode.
diff --git a/fs/xfs/xfs_rtalloc.h b/fs/xfs/xfs_rtalloc.h
index 8d8dcd215716..b2d67adb6a08 100644
--- a/fs/xfs/xfs_rtalloc.h
+++ b/fs/xfs/xfs_rtalloc.h
@@ -23,8 +23,8 @@ struct xfs_trans;
23 23
24/* Min and max rt extent sizes, specified in bytes */ 24/* Min and max rt extent sizes, specified in bytes */
25#define XFS_MAX_RTEXTSIZE (1024 * 1024 * 1024) /* 1GB */ 25#define XFS_MAX_RTEXTSIZE (1024 * 1024 * 1024) /* 1GB */
26#define XFS_DFL_RTEXTSIZE (64 * 1024) /* 64KB */ 26#define XFS_DFL_RTEXTSIZE (64 * 1024) /* 64kB */
27#define XFS_MIN_RTEXTSIZE (4 * 1024) /* 4KB */ 27#define XFS_MIN_RTEXTSIZE (4 * 1024) /* 4kB */
28 28
29/* 29/*
30 * Constants for bit manipulations. 30 * Constants for bit manipulations.
@@ -108,6 +108,9 @@ xfs_rtfree_extent(
108int /* error */ 108int /* error */
109xfs_rtmount_init( 109xfs_rtmount_init(
110 struct xfs_mount *mp); /* file system mount structure */ 110 struct xfs_mount *mp); /* file system mount structure */
111void
112xfs_rtunmount_inodes(
113 struct xfs_mount *mp);
111 114
112/* 115/*
113 * Get the bitmap and summary inodes into the mount structure 116 * Get the bitmap and summary inodes into the mount structure
@@ -146,6 +149,7 @@ xfs_growfs_rt(
146# define xfs_growfs_rt(mp,in) (ENOSYS) 149# define xfs_growfs_rt(mp,in) (ENOSYS)
147# define xfs_rtmount_init(m) (((mp)->m_sb.sb_rblocks == 0)? 0 : (ENOSYS)) 150# define xfs_rtmount_init(m) (((mp)->m_sb.sb_rblocks == 0)? 0 : (ENOSYS))
148# define xfs_rtmount_inodes(m) (((mp)->m_sb.sb_rblocks == 0)? 0 : (ENOSYS)) 151# define xfs_rtmount_inodes(m) (((mp)->m_sb.sb_rblocks == 0)? 0 : (ENOSYS))
152# define xfs_rtunmount_inodes(m)
149#endif /* CONFIG_XFS_RT */ 153#endif /* CONFIG_XFS_RT */
150 154
151#endif /* __KERNEL__ */ 155#endif /* __KERNEL__ */
diff --git a/fs/xfs/xfs_trans.h b/fs/xfs/xfs_trans.h
index d6fe4a88d79f..775249a54f6f 100644
--- a/fs/xfs/xfs_trans.h
+++ b/fs/xfs/xfs_trans.h
@@ -292,7 +292,7 @@ xfs_lic_desc_to_chunk(xfs_log_item_desc_t *dp)
292 * In a write transaction we can allocate a maximum of 2 292 * In a write transaction we can allocate a maximum of 2
293 * extents. This gives: 293 * extents. This gives:
294 * the inode getting the new extents: inode size 294 * the inode getting the new extents: inode size
295 * the inode\'s bmap btree: max depth * block size 295 * the inode's bmap btree: max depth * block size
296 * the agfs of the ags from which the extents are allocated: 2 * sector 296 * the agfs of the ags from which the extents are allocated: 2 * sector
297 * the superblock free block counter: sector size 297 * the superblock free block counter: sector size
298 * the allocation btrees: 2 exts * 2 trees * (2 * max depth - 1) * block size 298 * the allocation btrees: 2 exts * 2 trees * (2 * max depth - 1) * block size
@@ -321,7 +321,7 @@ xfs_lic_desc_to_chunk(xfs_log_item_desc_t *dp)
321/* 321/*
322 * In truncating a file we free up to two extents at once. We can modify: 322 * In truncating a file we free up to two extents at once. We can modify:
323 * the inode being truncated: inode size 323 * the inode being truncated: inode size
324 * the inode\'s bmap btree: (max depth + 1) * block size 324 * the inode's bmap btree: (max depth + 1) * block size
325 * And the bmap_finish transaction can free the blocks and bmap blocks: 325 * And the bmap_finish transaction can free the blocks and bmap blocks:
326 * the agf for each of the ags: 4 * sector size 326 * the agf for each of the ags: 4 * sector size
327 * the agfl for each of the ags: 4 * sector size 327 * the agfl for each of the ags: 4 * sector size
@@ -343,7 +343,7 @@ xfs_lic_desc_to_chunk(xfs_log_item_desc_t *dp)
343 (128 * (9 + XFS_ALLOCFREE_LOG_COUNT(mp, 4))) + \ 343 (128 * (9 + XFS_ALLOCFREE_LOG_COUNT(mp, 4))) + \
344 (128 * 5) + \ 344 (128 * 5) + \
345 XFS_ALLOCFREE_LOG_RES(mp, 1) + \ 345 XFS_ALLOCFREE_LOG_RES(mp, 1) + \
346 (128 * (2 + XFS_IALLOC_BLOCKS(mp) + XFS_IN_MAXLEVELS(mp) + \ 346 (128 * (2 + XFS_IALLOC_BLOCKS(mp) + (mp)->m_in_maxlevels + \
347 XFS_ALLOCFREE_LOG_COUNT(mp, 1)))))) 347 XFS_ALLOCFREE_LOG_COUNT(mp, 1))))))
348 348
349#define XFS_ITRUNCATE_LOG_RES(mp) ((mp)->m_reservations.tr_itruncate) 349#define XFS_ITRUNCATE_LOG_RES(mp) ((mp)->m_reservations.tr_itruncate)
@@ -431,8 +431,8 @@ xfs_lic_desc_to_chunk(xfs_log_item_desc_t *dp)
431 * the new inode: inode size 431 * the new inode: inode size
432 * the inode btree entry: 1 block 432 * the inode btree entry: 1 block
433 * the directory btree: (max depth + v2) * dir block size 433 * the directory btree: (max depth + v2) * dir block size
434 * the directory inode\'s bmap btree: (max depth + v2) * block size 434 * the directory inode's bmap btree: (max depth + v2) * block size
435 * the blocks for the symlink: 1 KB 435 * the blocks for the symlink: 1 kB
436 * Or in the first xact we allocate some inodes giving: 436 * Or in the first xact we allocate some inodes giving:
437 * the agi and agf of the ag getting the new inodes: 2 * sectorsize 437 * the agi and agf of the ag getting the new inodes: 2 * sectorsize
438 * the inode blocks allocated: XFS_IALLOC_BLOCKS * blocksize 438 * the inode blocks allocated: XFS_IALLOC_BLOCKS * blocksize
@@ -449,9 +449,9 @@ xfs_lic_desc_to_chunk(xfs_log_item_desc_t *dp)
449 (128 * (4 + XFS_DIROP_LOG_COUNT(mp)))), \ 449 (128 * (4 + XFS_DIROP_LOG_COUNT(mp)))), \
450 (2 * (mp)->m_sb.sb_sectsize + \ 450 (2 * (mp)->m_sb.sb_sectsize + \
451 XFS_FSB_TO_B((mp), XFS_IALLOC_BLOCKS((mp))) + \ 451 XFS_FSB_TO_B((mp), XFS_IALLOC_BLOCKS((mp))) + \
452 XFS_FSB_TO_B((mp), XFS_IN_MAXLEVELS(mp)) + \ 452 XFS_FSB_TO_B((mp), (mp)->m_in_maxlevels) + \
453 XFS_ALLOCFREE_LOG_RES(mp, 1) + \ 453 XFS_ALLOCFREE_LOG_RES(mp, 1) + \
454 (128 * (2 + XFS_IALLOC_BLOCKS(mp) + XFS_IN_MAXLEVELS(mp) + \ 454 (128 * (2 + XFS_IALLOC_BLOCKS(mp) + (mp)->m_in_maxlevels + \
455 XFS_ALLOCFREE_LOG_COUNT(mp, 1)))))) 455 XFS_ALLOCFREE_LOG_COUNT(mp, 1))))))
456 456
457#define XFS_SYMLINK_LOG_RES(mp) ((mp)->m_reservations.tr_symlink) 457#define XFS_SYMLINK_LOG_RES(mp) ((mp)->m_reservations.tr_symlink)
@@ -463,7 +463,7 @@ xfs_lic_desc_to_chunk(xfs_log_item_desc_t *dp)
463 * the inode btree entry: block size 463 * the inode btree entry: block size
464 * the superblock for the nlink flag: sector size 464 * the superblock for the nlink flag: sector size
465 * the directory btree: (max depth + v2) * dir block size 465 * the directory btree: (max depth + v2) * dir block size
466 * the directory inode\'s bmap btree: (max depth + v2) * block size 466 * the directory inode's bmap btree: (max depth + v2) * block size
467 * Or in the first xact we allocate some inodes giving: 467 * Or in the first xact we allocate some inodes giving:
468 * the agi and agf of the ag getting the new inodes: 2 * sectorsize 468 * the agi and agf of the ag getting the new inodes: 2 * sectorsize
469 * the superblock for the nlink flag: sector size 469 * the superblock for the nlink flag: sector size
@@ -481,9 +481,9 @@ xfs_lic_desc_to_chunk(xfs_log_item_desc_t *dp)
481 (128 * (3 + XFS_DIROP_LOG_COUNT(mp)))), \ 481 (128 * (3 + XFS_DIROP_LOG_COUNT(mp)))), \
482 (3 * (mp)->m_sb.sb_sectsize + \ 482 (3 * (mp)->m_sb.sb_sectsize + \
483 XFS_FSB_TO_B((mp), XFS_IALLOC_BLOCKS((mp))) + \ 483 XFS_FSB_TO_B((mp), XFS_IALLOC_BLOCKS((mp))) + \
484 XFS_FSB_TO_B((mp), XFS_IN_MAXLEVELS(mp)) + \ 484 XFS_FSB_TO_B((mp), (mp)->m_in_maxlevels) + \
485 XFS_ALLOCFREE_LOG_RES(mp, 1) + \ 485 XFS_ALLOCFREE_LOG_RES(mp, 1) + \
486 (128 * (2 + XFS_IALLOC_BLOCKS(mp) + XFS_IN_MAXLEVELS(mp) + \ 486 (128 * (2 + XFS_IALLOC_BLOCKS(mp) + (mp)->m_in_maxlevels + \
487 XFS_ALLOCFREE_LOG_COUNT(mp, 1)))))) 487 XFS_ALLOCFREE_LOG_COUNT(mp, 1))))))
488 488
489#define XFS_CREATE_LOG_RES(mp) ((mp)->m_reservations.tr_create) 489#define XFS_CREATE_LOG_RES(mp) ((mp)->m_reservations.tr_create)
@@ -513,7 +513,7 @@ xfs_lic_desc_to_chunk(xfs_log_item_desc_t *dp)
513 MAX((__uint16_t)XFS_FSB_TO_B((mp), 1), XFS_INODE_CLUSTER_SIZE(mp)) + \ 513 MAX((__uint16_t)XFS_FSB_TO_B((mp), 1), XFS_INODE_CLUSTER_SIZE(mp)) + \
514 (128 * 5) + \ 514 (128 * 5) + \
515 XFS_ALLOCFREE_LOG_RES(mp, 1) + \ 515 XFS_ALLOCFREE_LOG_RES(mp, 1) + \
516 (128 * (2 + XFS_IALLOC_BLOCKS(mp) + XFS_IN_MAXLEVELS(mp) + \ 516 (128 * (2 + XFS_IALLOC_BLOCKS(mp) + (mp)->m_in_maxlevels + \
517 XFS_ALLOCFREE_LOG_COUNT(mp, 1)))) 517 XFS_ALLOCFREE_LOG_COUNT(mp, 1))))
518 518
519 519
@@ -637,7 +637,7 @@ xfs_lic_desc_to_chunk(xfs_log_item_desc_t *dp)
637/* 637/*
638 * Removing the attribute fork of a file 638 * Removing the attribute fork of a file
639 * the inode being truncated: inode size 639 * the inode being truncated: inode size
640 * the inode\'s bmap btree: max depth * block size 640 * the inode's bmap btree: max depth * block size
641 * And the bmap_finish transaction can free the blocks and bmap blocks: 641 * And the bmap_finish transaction can free the blocks and bmap blocks:
642 * the agf for each of the ags: 4 * sector size 642 * the agf for each of the ags: 4 * sector size
643 * the agfl for each of the ags: 4 * sector size 643 * the agfl for each of the ags: 4 * sector size
diff --git a/fs/xfs/xfs_trans_ail.c b/fs/xfs/xfs_trans_ail.c
index 2d47f10f8bed..f31271c30de9 100644
--- a/fs/xfs/xfs_trans_ail.c
+++ b/fs/xfs/xfs_trans_ail.c
@@ -79,7 +79,7 @@ xfs_trans_ail_tail(
79 * the push is run asynchronously in a separate thread, so we return the tail 79 * the push is run asynchronously in a separate thread, so we return the tail
80 * of the log right now instead of the tail after the push. This means we will 80 * of the log right now instead of the tail after the push. This means we will
81 * either continue right away, or we will sleep waiting on the async thread to 81 * either continue right away, or we will sleep waiting on the async thread to
82 * do it's work. 82 * do its work.
83 * 83 *
84 * We do this unlocked - we only need to know whether there is anything in the 84 * We do this unlocked - we only need to know whether there is anything in the
85 * AIL at the time we are called. We don't need to access the contents of 85 * AIL at the time we are called. We don't need to access the contents of
@@ -160,7 +160,7 @@ xfs_trans_ail_cursor_next(
160/* 160/*
161 * Now that the traversal is complete, we need to remove the cursor 161 * Now that the traversal is complete, we need to remove the cursor
162 * from the list of traversing cursors. Avoid removing the embedded 162 * from the list of traversing cursors. Avoid removing the embedded
163 * push cursor, but use the fact it is alway present to make the 163 * push cursor, but use the fact it is always present to make the
164 * list deletion simple. 164 * list deletion simple.
165 */ 165 */
166void 166void
diff --git a/fs/xfs/xfs_trans_item.c b/fs/xfs/xfs_trans_item.c
index e110bf57d7f4..eb3fc57f9eef 100644
--- a/fs/xfs/xfs_trans_item.c
+++ b/fs/xfs/xfs_trans_item.c
@@ -22,7 +22,7 @@
22#include "xfs_inum.h" 22#include "xfs_inum.h"
23#include "xfs_trans.h" 23#include "xfs_trans.h"
24#include "xfs_trans_priv.h" 24#include "xfs_trans_priv.h"
25/* XXX: from here down needed until struct xfs_trans has it's own ailp */ 25/* XXX: from here down needed until struct xfs_trans has its own ailp */
26#include "xfs_bit.h" 26#include "xfs_bit.h"
27#include "xfs_buf_item.h" 27#include "xfs_buf_item.h"
28#include "xfs_sb.h" 28#include "xfs_sb.h"
diff --git a/fs/xfs/xfs_trans_space.h b/fs/xfs/xfs_trans_space.h
index 4ea2e5074bdd..7d2c920dfb9c 100644
--- a/fs/xfs/xfs_trans_space.h
+++ b/fs/xfs/xfs_trans_space.h
@@ -47,7 +47,7 @@
47#define XFS_DIRREMOVE_SPACE_RES(mp) \ 47#define XFS_DIRREMOVE_SPACE_RES(mp) \
48 XFS_DAREMOVE_SPACE_RES(mp, XFS_DATA_FORK) 48 XFS_DAREMOVE_SPACE_RES(mp, XFS_DATA_FORK)
49#define XFS_IALLOC_SPACE_RES(mp) \ 49#define XFS_IALLOC_SPACE_RES(mp) \
50 (XFS_IALLOC_BLOCKS(mp) + XFS_IN_MAXLEVELS(mp)-1) 50 (XFS_IALLOC_BLOCKS(mp) + (mp)->m_in_maxlevels - 1)
51 51
52/* 52/*
53 * Space reservation values for various transactions. 53 * Space reservation values for various transactions.
diff --git a/fs/xfs/xfs_types.h b/fs/xfs/xfs_types.h
index b2f724502f1b..d725428c9df6 100644
--- a/fs/xfs/xfs_types.h
+++ b/fs/xfs/xfs_types.h
@@ -21,14 +21,6 @@
21#ifdef __KERNEL__ 21#ifdef __KERNEL__
22 22
23/* 23/*
24 * POSIX Extensions
25 */
26typedef unsigned char uchar_t;
27typedef unsigned short ushort_t;
28typedef unsigned int uint_t;
29typedef unsigned long ulong_t;
30
31/*
32 * Additional type declarations for XFS 24 * Additional type declarations for XFS
33 */ 25 */
34typedef signed char __int8_t; 26typedef signed char __int8_t;
diff --git a/fs/xfs/xfs_utils.c b/fs/xfs/xfs_utils.c
index fcc2285d03ed..79b9e5ea5359 100644
--- a/fs/xfs/xfs_utils.c
+++ b/fs/xfs/xfs_utils.c
@@ -374,7 +374,7 @@ xfs_truncate_file(
374 374
375 /* 375 /*
376 * Follow the normal truncate locking protocol. Since we 376 * Follow the normal truncate locking protocol. Since we
377 * hold the inode in the transaction, we know that it's number 377 * hold the inode in the transaction, we know that its number
378 * of references will stay constant. 378 * of references will stay constant.
379 */ 379 */
380 xfs_ilock(ip, XFS_ILOCK_EXCL); 380 xfs_ilock(ip, XFS_ILOCK_EXCL);
diff --git a/fs/xfs/xfs_vnodeops.c b/fs/xfs/xfs_vnodeops.c
index 0e55c5d7db5f..7394c7af5de5 100644
--- a/fs/xfs/xfs_vnodeops.c
+++ b/fs/xfs/xfs_vnodeops.c
@@ -1136,7 +1136,7 @@ xfs_inactive(
1136 * If the inode is already free, then there can be nothing 1136 * If the inode is already free, then there can be nothing
1137 * to clean up here. 1137 * to clean up here.
1138 */ 1138 */
1139 if (ip->i_d.di_mode == 0 || VN_BAD(VFS_I(ip))) { 1139 if (ip->i_d.di_mode == 0 || is_bad_inode(VFS_I(ip))) {
1140 ASSERT(ip->i_df.if_real_bytes == 0); 1140 ASSERT(ip->i_df.if_real_bytes == 0);
1141 ASSERT(ip->i_df.if_broot_bytes == 0); 1141 ASSERT(ip->i_df.if_broot_bytes == 0);
1142 return VN_INACTIVE_CACHE; 1142 return VN_INACTIVE_CACHE;
@@ -1387,23 +1387,28 @@ xfs_create(
1387 xfs_inode_t **ipp, 1387 xfs_inode_t **ipp,
1388 cred_t *credp) 1388 cred_t *credp)
1389{ 1389{
1390 xfs_mount_t *mp = dp->i_mount; 1390 int is_dir = S_ISDIR(mode);
1391 xfs_inode_t *ip; 1391 struct xfs_mount *mp = dp->i_mount;
1392 xfs_trans_t *tp; 1392 struct xfs_inode *ip = NULL;
1393 struct xfs_trans *tp = NULL;
1393 int error; 1394 int error;
1394 xfs_bmap_free_t free_list; 1395 xfs_bmap_free_t free_list;
1395 xfs_fsblock_t first_block; 1396 xfs_fsblock_t first_block;
1396 boolean_t unlock_dp_on_error = B_FALSE; 1397 boolean_t unlock_dp_on_error = B_FALSE;
1397 int dm_event_sent = 0;
1398 uint cancel_flags; 1398 uint cancel_flags;
1399 int committed; 1399 int committed;
1400 xfs_prid_t prid; 1400 xfs_prid_t prid;
1401 struct xfs_dquot *udqp, *gdqp; 1401 struct xfs_dquot *udqp = NULL;
1402 struct xfs_dquot *gdqp = NULL;
1402 uint resblks; 1403 uint resblks;
1404 uint log_res;
1405 uint log_count;
1403 1406
1404 ASSERT(!*ipp);
1405 xfs_itrace_entry(dp); 1407 xfs_itrace_entry(dp);
1406 1408
1409 if (XFS_FORCED_SHUTDOWN(mp))
1410 return XFS_ERROR(EIO);
1411
1407 if (DM_EVENT_ENABLED(dp, DM_EVENT_CREATE)) { 1412 if (DM_EVENT_ENABLED(dp, DM_EVENT_CREATE)) {
1408 error = XFS_SEND_NAMESP(mp, DM_EVENT_CREATE, 1413 error = XFS_SEND_NAMESP(mp, DM_EVENT_CREATE,
1409 dp, DM_RIGHT_NULL, NULL, 1414 dp, DM_RIGHT_NULL, NULL,
@@ -1412,84 +1417,97 @@ xfs_create(
1412 1417
1413 if (error) 1418 if (error)
1414 return error; 1419 return error;
1415 dm_event_sent = 1;
1416 } 1420 }
1417 1421
1418 if (XFS_FORCED_SHUTDOWN(mp))
1419 return XFS_ERROR(EIO);
1420
1421 /* Return through std_return after this point. */
1422
1423 udqp = gdqp = NULL;
1424 if (dp->i_d.di_flags & XFS_DIFLAG_PROJINHERIT) 1422 if (dp->i_d.di_flags & XFS_DIFLAG_PROJINHERIT)
1425 prid = dp->i_d.di_projid; 1423 prid = dp->i_d.di_projid;
1426 else 1424 else
1427 prid = (xfs_prid_t)dfltprid; 1425 prid = dfltprid;
1428 1426
1429 /* 1427 /*
1430 * Make sure that we have allocated dquot(s) on disk. 1428 * Make sure that we have allocated dquot(s) on disk.
1431 */ 1429 */
1432 error = XFS_QM_DQVOPALLOC(mp, dp, 1430 error = XFS_QM_DQVOPALLOC(mp, dp,
1433 current_fsuid(), current_fsgid(), prid, 1431 current_fsuid(), current_fsgid(), prid,
1434 XFS_QMOPT_QUOTALL|XFS_QMOPT_INHERIT, &udqp, &gdqp); 1432 XFS_QMOPT_QUOTALL | XFS_QMOPT_INHERIT, &udqp, &gdqp);
1435 if (error) 1433 if (error)
1436 goto std_return; 1434 goto std_return;
1437 1435
1438 ip = NULL; 1436 if (is_dir) {
1437 rdev = 0;
1438 resblks = XFS_MKDIR_SPACE_RES(mp, name->len);
1439 log_res = XFS_MKDIR_LOG_RES(mp);
1440 log_count = XFS_MKDIR_LOG_COUNT;
1441 tp = xfs_trans_alloc(mp, XFS_TRANS_MKDIR);
1442 } else {
1443 resblks = XFS_CREATE_SPACE_RES(mp, name->len);
1444 log_res = XFS_CREATE_LOG_RES(mp);
1445 log_count = XFS_CREATE_LOG_COUNT;
1446 tp = xfs_trans_alloc(mp, XFS_TRANS_CREATE);
1447 }
1439 1448
1440 tp = xfs_trans_alloc(mp, XFS_TRANS_CREATE);
1441 cancel_flags = XFS_TRANS_RELEASE_LOG_RES; 1449 cancel_flags = XFS_TRANS_RELEASE_LOG_RES;
1442 resblks = XFS_CREATE_SPACE_RES(mp, name->len); 1450
1443 /* 1451 /*
1444 * Initially assume that the file does not exist and 1452 * Initially assume that the file does not exist and
1445 * reserve the resources for that case. If that is not 1453 * reserve the resources for that case. If that is not
1446 * the case we'll drop the one we have and get a more 1454 * the case we'll drop the one we have and get a more
1447 * appropriate transaction later. 1455 * appropriate transaction later.
1448 */ 1456 */
1449 error = xfs_trans_reserve(tp, resblks, XFS_CREATE_LOG_RES(mp), 0, 1457 error = xfs_trans_reserve(tp, resblks, log_res, 0,
1450 XFS_TRANS_PERM_LOG_RES, XFS_CREATE_LOG_COUNT); 1458 XFS_TRANS_PERM_LOG_RES, log_count);
1451 if (error == ENOSPC) { 1459 if (error == ENOSPC) {
1452 resblks = 0; 1460 resblks = 0;
1453 error = xfs_trans_reserve(tp, 0, XFS_CREATE_LOG_RES(mp), 0, 1461 error = xfs_trans_reserve(tp, 0, log_res, 0,
1454 XFS_TRANS_PERM_LOG_RES, XFS_CREATE_LOG_COUNT); 1462 XFS_TRANS_PERM_LOG_RES, log_count);
1455 } 1463 }
1456 if (error) { 1464 if (error) {
1457 cancel_flags = 0; 1465 cancel_flags = 0;
1458 goto error_return; 1466 goto out_trans_cancel;
1459 } 1467 }
1460 1468
1461 xfs_ilock(dp, XFS_ILOCK_EXCL | XFS_ILOCK_PARENT); 1469 xfs_ilock(dp, XFS_ILOCK_EXCL | XFS_ILOCK_PARENT);
1462 unlock_dp_on_error = B_TRUE; 1470 unlock_dp_on_error = B_TRUE;
1463 1471
1464 xfs_bmap_init(&free_list, &first_block); 1472 /*
1473 * Check for directory link count overflow.
1474 */
1475 if (is_dir && dp->i_d.di_nlink >= XFS_MAXLINK) {
1476 error = XFS_ERROR(EMLINK);
1477 goto out_trans_cancel;
1478 }
1465 1479
1466 ASSERT(ip == NULL); 1480 xfs_bmap_init(&free_list, &first_block);
1467 1481
1468 /* 1482 /*
1469 * Reserve disk quota and the inode. 1483 * Reserve disk quota and the inode.
1470 */ 1484 */
1471 error = XFS_TRANS_RESERVE_QUOTA(mp, tp, udqp, gdqp, resblks, 1, 0); 1485 error = XFS_TRANS_RESERVE_QUOTA(mp, tp, udqp, gdqp, resblks, 1, 0);
1472 if (error) 1486 if (error)
1473 goto error_return; 1487 goto out_trans_cancel;
1474 1488
1475 error = xfs_dir_canenter(tp, dp, name, resblks); 1489 error = xfs_dir_canenter(tp, dp, name, resblks);
1476 if (error) 1490 if (error)
1477 goto error_return; 1491 goto out_trans_cancel;
1478 error = xfs_dir_ialloc(&tp, dp, mode, 1, 1492
1479 rdev, credp, prid, resblks > 0, 1493 /*
1480 &ip, &committed); 1494 * A newly created regular or special file just has one directory
1495 * entry pointing to them, but a directory also the "." entry
1496 * pointing to itself.
1497 */
1498 error = xfs_dir_ialloc(&tp, dp, mode, is_dir ? 2 : 1, rdev, credp,
1499 prid, resblks > 0, &ip, &committed);
1481 if (error) { 1500 if (error) {
1482 if (error == ENOSPC) 1501 if (error == ENOSPC)
1483 goto error_return; 1502 goto out_trans_cancel;
1484 goto abort_return; 1503 goto out_trans_abort;
1485 } 1504 }
1486 xfs_itrace_ref(ip);
1487 1505
1488 /* 1506 /*
1489 * At this point, we've gotten a newly allocated inode. 1507 * At this point, we've gotten a newly allocated inode.
1490 * It is locked (and joined to the transaction). 1508 * It is locked (and joined to the transaction).
1491 */ 1509 */
1492 1510 xfs_itrace_ref(ip);
1493 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL)); 1511 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
1494 1512
1495 /* 1513 /*
@@ -1508,19 +1526,28 @@ xfs_create(
1508 resblks - XFS_IALLOC_SPACE_RES(mp) : 0); 1526 resblks - XFS_IALLOC_SPACE_RES(mp) : 0);
1509 if (error) { 1527 if (error) {
1510 ASSERT(error != ENOSPC); 1528 ASSERT(error != ENOSPC);
1511 goto abort_return; 1529 goto out_trans_abort;
1512 } 1530 }
1513 xfs_ichgtime(dp, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG); 1531 xfs_ichgtime(dp, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG);
1514 xfs_trans_log_inode(tp, dp, XFS_ILOG_CORE); 1532 xfs_trans_log_inode(tp, dp, XFS_ILOG_CORE);
1515 1533
1534 if (is_dir) {
1535 error = xfs_dir_init(tp, ip, dp);
1536 if (error)
1537 goto out_bmap_cancel;
1538
1539 error = xfs_bumplink(tp, dp);
1540 if (error)
1541 goto out_bmap_cancel;
1542 }
1543
1516 /* 1544 /*
1517 * If this is a synchronous mount, make sure that the 1545 * If this is a synchronous mount, make sure that the
1518 * create transaction goes to disk before returning to 1546 * create transaction goes to disk before returning to
1519 * the user. 1547 * the user.
1520 */ 1548 */
1521 if (mp->m_flags & (XFS_MOUNT_WSYNC|XFS_MOUNT_DIRSYNC)) { 1549 if (mp->m_flags & (XFS_MOUNT_WSYNC|XFS_MOUNT_DIRSYNC))
1522 xfs_trans_set_sync(tp); 1550 xfs_trans_set_sync(tp);
1523 }
1524 1551
1525 /* 1552 /*
1526 * Attach the dquot(s) to the inodes and modify them incore. 1553 * Attach the dquot(s) to the inodes and modify them incore.
@@ -1537,16 +1564,13 @@ xfs_create(
1537 IHOLD(ip); 1564 IHOLD(ip);
1538 1565
1539 error = xfs_bmap_finish(&tp, &free_list, &committed); 1566 error = xfs_bmap_finish(&tp, &free_list, &committed);
1540 if (error) { 1567 if (error)
1541 xfs_bmap_cancel(&free_list); 1568 goto out_abort_rele;
1542 goto abort_rele;
1543 }
1544 1569
1545 error = xfs_trans_commit(tp, XFS_TRANS_RELEASE_LOG_RES); 1570 error = xfs_trans_commit(tp, XFS_TRANS_RELEASE_LOG_RES);
1546 if (error) { 1571 if (error) {
1547 IRELE(ip); 1572 IRELE(ip);
1548 tp = NULL; 1573 goto out_dqrele;
1549 goto error_return;
1550 } 1574 }
1551 1575
1552 XFS_QM_DQRELE(mp, udqp); 1576 XFS_QM_DQRELE(mp, udqp);
@@ -1555,26 +1579,22 @@ xfs_create(
1555 *ipp = ip; 1579 *ipp = ip;
1556 1580
1557 /* Fallthrough to std_return with error = 0 */ 1581 /* Fallthrough to std_return with error = 0 */
1558 1582 std_return:
1559std_return: 1583 if (DM_EVENT_ENABLED(dp, DM_EVENT_POSTCREATE)) {
1560 if ((*ipp || (error != 0 && dm_event_sent != 0)) && 1584 XFS_SEND_NAMESP(mp, DM_EVENT_POSTCREATE, dp, DM_RIGHT_NULL,
1561 DM_EVENT_ENABLED(dp, DM_EVENT_POSTCREATE)) { 1585 ip, DM_RIGHT_NULL, name->name, NULL, mode,
1562 (void) XFS_SEND_NAMESP(mp, DM_EVENT_POSTCREATE, 1586 error, 0);
1563 dp, DM_RIGHT_NULL,
1564 *ipp ? ip : NULL,
1565 DM_RIGHT_NULL, name->name, NULL,
1566 mode, error, 0);
1567 } 1587 }
1588
1568 return error; 1589 return error;
1569 1590
1570 abort_return: 1591 out_bmap_cancel:
1592 xfs_bmap_cancel(&free_list);
1593 out_trans_abort:
1571 cancel_flags |= XFS_TRANS_ABORT; 1594 cancel_flags |= XFS_TRANS_ABORT;
1572 /* FALLTHROUGH */ 1595 out_trans_cancel:
1573 1596 xfs_trans_cancel(tp, cancel_flags);
1574 error_return: 1597 out_dqrele:
1575 if (tp != NULL)
1576 xfs_trans_cancel(tp, cancel_flags);
1577
1578 XFS_QM_DQRELE(mp, udqp); 1598 XFS_QM_DQRELE(mp, udqp);
1579 XFS_QM_DQRELE(mp, gdqp); 1599 XFS_QM_DQRELE(mp, gdqp);
1580 1600
@@ -1583,20 +1603,18 @@ std_return:
1583 1603
1584 goto std_return; 1604 goto std_return;
1585 1605
1586 abort_rele: 1606 out_abort_rele:
1587 /* 1607 /*
1588 * Wait until after the current transaction is aborted to 1608 * Wait until after the current transaction is aborted to
1589 * release the inode. This prevents recursive transactions 1609 * release the inode. This prevents recursive transactions
1590 * and deadlocks from xfs_inactive. 1610 * and deadlocks from xfs_inactive.
1591 */ 1611 */
1612 xfs_bmap_cancel(&free_list);
1592 cancel_flags |= XFS_TRANS_ABORT; 1613 cancel_flags |= XFS_TRANS_ABORT;
1593 xfs_trans_cancel(tp, cancel_flags); 1614 xfs_trans_cancel(tp, cancel_flags);
1594 IRELE(ip); 1615 IRELE(ip);
1595 1616 unlock_dp_on_error = B_FALSE;
1596 XFS_QM_DQRELE(mp, udqp); 1617 goto out_dqrele;
1597 XFS_QM_DQRELE(mp, gdqp);
1598
1599 goto std_return;
1600} 1618}
1601 1619
1602#ifdef DEBUG 1620#ifdef DEBUG
@@ -2004,8 +2022,10 @@ xfs_link(
2004 /* Return through std_return after this point. */ 2022 /* Return through std_return after this point. */
2005 2023
2006 error = XFS_QM_DQATTACH(mp, sip, 0); 2024 error = XFS_QM_DQATTACH(mp, sip, 0);
2007 if (!error && sip != tdp) 2025 if (error)
2008 error = XFS_QM_DQATTACH(mp, tdp, 0); 2026 goto std_return;
2027
2028 error = XFS_QM_DQATTACH(mp, tdp, 0);
2009 if (error) 2029 if (error)
2010 goto std_return; 2030 goto std_return;
2011 2031
@@ -2110,209 +2130,6 @@ std_return:
2110 goto std_return; 2130 goto std_return;
2111} 2131}
2112 2132
2113
2114int
2115xfs_mkdir(
2116 xfs_inode_t *dp,
2117 struct xfs_name *dir_name,
2118 mode_t mode,
2119 xfs_inode_t **ipp,
2120 cred_t *credp)
2121{
2122 xfs_mount_t *mp = dp->i_mount;
2123 xfs_inode_t *cdp; /* inode of created dir */
2124 xfs_trans_t *tp;
2125 int cancel_flags;
2126 int error;
2127 int committed;
2128 xfs_bmap_free_t free_list;
2129 xfs_fsblock_t first_block;
2130 boolean_t unlock_dp_on_error = B_FALSE;
2131 boolean_t created = B_FALSE;
2132 int dm_event_sent = 0;
2133 xfs_prid_t prid;
2134 struct xfs_dquot *udqp, *gdqp;
2135 uint resblks;
2136
2137 if (XFS_FORCED_SHUTDOWN(mp))
2138 return XFS_ERROR(EIO);
2139
2140 tp = NULL;
2141
2142 if (DM_EVENT_ENABLED(dp, DM_EVENT_CREATE)) {
2143 error = XFS_SEND_NAMESP(mp, DM_EVENT_CREATE,
2144 dp, DM_RIGHT_NULL, NULL,
2145 DM_RIGHT_NULL, dir_name->name, NULL,
2146 mode, 0, 0);
2147 if (error)
2148 return error;
2149 dm_event_sent = 1;
2150 }
2151
2152 /* Return through std_return after this point. */
2153
2154 xfs_itrace_entry(dp);
2155
2156 mp = dp->i_mount;
2157 udqp = gdqp = NULL;
2158 if (dp->i_d.di_flags & XFS_DIFLAG_PROJINHERIT)
2159 prid = dp->i_d.di_projid;
2160 else
2161 prid = (xfs_prid_t)dfltprid;
2162
2163 /*
2164 * Make sure that we have allocated dquot(s) on disk.
2165 */
2166 error = XFS_QM_DQVOPALLOC(mp, dp,
2167 current_fsuid(), current_fsgid(), prid,
2168 XFS_QMOPT_QUOTALL | XFS_QMOPT_INHERIT, &udqp, &gdqp);
2169 if (error)
2170 goto std_return;
2171
2172 tp = xfs_trans_alloc(mp, XFS_TRANS_MKDIR);
2173 cancel_flags = XFS_TRANS_RELEASE_LOG_RES;
2174 resblks = XFS_MKDIR_SPACE_RES(mp, dir_name->len);
2175 error = xfs_trans_reserve(tp, resblks, XFS_MKDIR_LOG_RES(mp), 0,
2176 XFS_TRANS_PERM_LOG_RES, XFS_MKDIR_LOG_COUNT);
2177 if (error == ENOSPC) {
2178 resblks = 0;
2179 error = xfs_trans_reserve(tp, 0, XFS_MKDIR_LOG_RES(mp), 0,
2180 XFS_TRANS_PERM_LOG_RES,
2181 XFS_MKDIR_LOG_COUNT);
2182 }
2183 if (error) {
2184 cancel_flags = 0;
2185 goto error_return;
2186 }
2187
2188 xfs_ilock(dp, XFS_ILOCK_EXCL | XFS_ILOCK_PARENT);
2189 unlock_dp_on_error = B_TRUE;
2190
2191 /*
2192 * Check for directory link count overflow.
2193 */
2194 if (dp->i_d.di_nlink >= XFS_MAXLINK) {
2195 error = XFS_ERROR(EMLINK);
2196 goto error_return;
2197 }
2198
2199 /*
2200 * Reserve disk quota and the inode.
2201 */
2202 error = XFS_TRANS_RESERVE_QUOTA(mp, tp, udqp, gdqp, resblks, 1, 0);
2203 if (error)
2204 goto error_return;
2205
2206 error = xfs_dir_canenter(tp, dp, dir_name, resblks);
2207 if (error)
2208 goto error_return;
2209 /*
2210 * create the directory inode.
2211 */
2212 error = xfs_dir_ialloc(&tp, dp, mode, 2,
2213 0, credp, prid, resblks > 0,
2214 &cdp, NULL);
2215 if (error) {
2216 if (error == ENOSPC)
2217 goto error_return;
2218 goto abort_return;
2219 }
2220 xfs_itrace_ref(cdp);
2221
2222 /*
2223 * Now we add the directory inode to the transaction.
2224 * We waited until now since xfs_dir_ialloc might start
2225 * a new transaction. Had we joined the transaction
2226 * earlier, the locks might have gotten released. An error
2227 * from here on will result in the transaction cancel
2228 * unlocking dp so don't do it explicitly in the error path.
2229 */
2230 IHOLD(dp);
2231 xfs_trans_ijoin(tp, dp, XFS_ILOCK_EXCL);
2232 unlock_dp_on_error = B_FALSE;
2233
2234 xfs_bmap_init(&free_list, &first_block);
2235
2236 error = xfs_dir_createname(tp, dp, dir_name, cdp->i_ino,
2237 &first_block, &free_list, resblks ?
2238 resblks - XFS_IALLOC_SPACE_RES(mp) : 0);
2239 if (error) {
2240 ASSERT(error != ENOSPC);
2241 goto error1;
2242 }
2243 xfs_ichgtime(dp, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG);
2244
2245 error = xfs_dir_init(tp, cdp, dp);
2246 if (error)
2247 goto error2;
2248
2249 error = xfs_bumplink(tp, dp);
2250 if (error)
2251 goto error2;
2252
2253 created = B_TRUE;
2254
2255 *ipp = cdp;
2256 IHOLD(cdp);
2257
2258 /*
2259 * Attach the dquots to the new inode and modify the icount incore.
2260 */
2261 XFS_QM_DQVOPCREATE(mp, tp, cdp, udqp, gdqp);
2262
2263 /*
2264 * If this is a synchronous mount, make sure that the
2265 * mkdir transaction goes to disk before returning to
2266 * the user.
2267 */
2268 if (mp->m_flags & (XFS_MOUNT_WSYNC|XFS_MOUNT_DIRSYNC)) {
2269 xfs_trans_set_sync(tp);
2270 }
2271
2272 error = xfs_bmap_finish(&tp, &free_list, &committed);
2273 if (error) {
2274 IRELE(cdp);
2275 goto error2;
2276 }
2277
2278 error = xfs_trans_commit(tp, XFS_TRANS_RELEASE_LOG_RES);
2279 XFS_QM_DQRELE(mp, udqp);
2280 XFS_QM_DQRELE(mp, gdqp);
2281 if (error) {
2282 IRELE(cdp);
2283 }
2284
2285 /* Fall through to std_return with error = 0 or errno from
2286 * xfs_trans_commit. */
2287
2288std_return:
2289 if ((created || (error != 0 && dm_event_sent != 0)) &&
2290 DM_EVENT_ENABLED(dp, DM_EVENT_POSTCREATE)) {
2291 (void) XFS_SEND_NAMESP(mp, DM_EVENT_POSTCREATE,
2292 dp, DM_RIGHT_NULL,
2293 created ? cdp : NULL,
2294 DM_RIGHT_NULL,
2295 dir_name->name, NULL,
2296 mode, error, 0);
2297 }
2298 return error;
2299
2300 error2:
2301 error1:
2302 xfs_bmap_cancel(&free_list);
2303 abort_return:
2304 cancel_flags |= XFS_TRANS_ABORT;
2305 error_return:
2306 xfs_trans_cancel(tp, cancel_flags);
2307 XFS_QM_DQRELE(mp, udqp);
2308 XFS_QM_DQRELE(mp, gdqp);
2309
2310 if (unlock_dp_on_error)
2311 xfs_iunlock(dp, XFS_ILOCK_EXCL);
2312
2313 goto std_return;
2314}
2315
2316int 2133int
2317xfs_symlink( 2134xfs_symlink(
2318 xfs_inode_t *dp, 2135 xfs_inode_t *dp,
@@ -2587,51 +2404,6 @@ std_return:
2587} 2404}
2588 2405
2589int 2406int
2590xfs_inode_flush(
2591 xfs_inode_t *ip,
2592 int flags)
2593{
2594 xfs_mount_t *mp = ip->i_mount;
2595 int error = 0;
2596
2597 if (XFS_FORCED_SHUTDOWN(mp))
2598 return XFS_ERROR(EIO);
2599
2600 /*
2601 * Bypass inodes which have already been cleaned by
2602 * the inode flush clustering code inside xfs_iflush
2603 */
2604 if (xfs_inode_clean(ip))
2605 return 0;
2606
2607 /*
2608 * We make this non-blocking if the inode is contended,
2609 * return EAGAIN to indicate to the caller that they
2610 * did not succeed. This prevents the flush path from
2611 * blocking on inodes inside another operation right
2612 * now, they get caught later by xfs_sync.
2613 */
2614 if (flags & FLUSH_SYNC) {
2615 xfs_ilock(ip, XFS_ILOCK_SHARED);
2616 xfs_iflock(ip);
2617 } else if (xfs_ilock_nowait(ip, XFS_ILOCK_SHARED)) {
2618 if (xfs_ipincount(ip) || !xfs_iflock_nowait(ip)) {
2619 xfs_iunlock(ip, XFS_ILOCK_SHARED);
2620 return EAGAIN;
2621 }
2622 } else {
2623 return EAGAIN;
2624 }
2625
2626 error = xfs_iflush(ip, (flags & FLUSH_SYNC) ? XFS_IFLUSH_SYNC
2627 : XFS_IFLUSH_ASYNC_NOBLOCK);
2628 xfs_iunlock(ip, XFS_ILOCK_SHARED);
2629
2630 return error;
2631}
2632
2633
2634int
2635xfs_set_dmattrs( 2407xfs_set_dmattrs(
2636 xfs_inode_t *ip, 2408 xfs_inode_t *ip,
2637 u_int evmask, 2409 u_int evmask,
@@ -2676,7 +2448,7 @@ xfs_reclaim(
2676 ASSERT(!VN_MAPPED(VFS_I(ip))); 2448 ASSERT(!VN_MAPPED(VFS_I(ip)));
2677 2449
2678 /* bad inode, get out here ASAP */ 2450 /* bad inode, get out here ASAP */
2679 if (VN_BAD(VFS_I(ip))) { 2451 if (is_bad_inode(VFS_I(ip))) {
2680 xfs_ireclaim(ip); 2452 xfs_ireclaim(ip);
2681 return 0; 2453 return 0;
2682 } 2454 }
@@ -3090,7 +2862,7 @@ xfs_free_file_space(
3090 2862
3091 /* 2863 /*
3092 * Need to zero the stuff we're not freeing, on disk. 2864 * Need to zero the stuff we're not freeing, on disk.
3093 * If its a realtime file & can't use unwritten extents then we 2865 * If it's a realtime file & can't use unwritten extents then we
3094 * actually need to zero the extent edges. Otherwise xfs_bunmapi 2866 * actually need to zero the extent edges. Otherwise xfs_bunmapi
3095 * will take care of it for us. 2867 * will take care of it for us.
3096 */ 2868 */
diff --git a/fs/xfs/xfs_vnodeops.h b/fs/xfs/xfs_vnodeops.h
index 76df328c61b4..04373c6c61ff 100644
--- a/fs/xfs/xfs_vnodeops.h
+++ b/fs/xfs/xfs_vnodeops.h
@@ -31,14 +31,11 @@ int xfs_remove(struct xfs_inode *dp, struct xfs_name *name,
31 struct xfs_inode *ip); 31 struct xfs_inode *ip);
32int xfs_link(struct xfs_inode *tdp, struct xfs_inode *sip, 32int xfs_link(struct xfs_inode *tdp, struct xfs_inode *sip,
33 struct xfs_name *target_name); 33 struct xfs_name *target_name);
34int xfs_mkdir(struct xfs_inode *dp, struct xfs_name *dir_name,
35 mode_t mode, struct xfs_inode **ipp, cred_t *credp);
36int xfs_readdir(struct xfs_inode *dp, void *dirent, size_t bufsize, 34int xfs_readdir(struct xfs_inode *dp, void *dirent, size_t bufsize,
37 xfs_off_t *offset, filldir_t filldir); 35 xfs_off_t *offset, filldir_t filldir);
38int xfs_symlink(struct xfs_inode *dp, struct xfs_name *link_name, 36int xfs_symlink(struct xfs_inode *dp, struct xfs_name *link_name,
39 const char *target_path, mode_t mode, struct xfs_inode **ipp, 37 const char *target_path, mode_t mode, struct xfs_inode **ipp,
40 cred_t *credp); 38 cred_t *credp);
41int xfs_inode_flush(struct xfs_inode *ip, int flags);
42int xfs_set_dmattrs(struct xfs_inode *ip, u_int evmask, u_int16_t state); 39int xfs_set_dmattrs(struct xfs_inode *ip, u_int evmask, u_int16_t state);
43int xfs_reclaim(struct xfs_inode *ip); 40int xfs_reclaim(struct xfs_inode *ip);
44int xfs_change_file_space(struct xfs_inode *ip, int cmd, 41int xfs_change_file_space(struct xfs_inode *ip, int cmd,