aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--fs/xfs/Kconfig1
-rw-r--r--fs/xfs/Makefile5
-rw-r--r--fs/xfs/linux-2.6/xfs_acl.c523
-rw-r--r--fs/xfs/linux-2.6/xfs_ioctl.c25
-rw-r--r--fs/xfs/linux-2.6/xfs_iops.c51
-rw-r--r--fs/xfs/linux-2.6/xfs_lrw.c1
-rw-r--r--fs/xfs/linux-2.6/xfs_quotaops.c4
-rw-r--r--fs/xfs/linux-2.6/xfs_super.c49
-rw-r--r--fs/xfs/linux-2.6/xfs_sync.c479
-rw-r--r--fs/xfs/linux-2.6/xfs_sync.h19
-rw-r--r--fs/xfs/linux-2.6/xfs_xattr.c67
-rw-r--r--fs/xfs/quota/xfs_dquot.c5
-rw-r--r--fs/xfs/quota/xfs_dquot.h1
-rw-r--r--fs/xfs/quota/xfs_dquot_item.c1
-rw-r--r--fs/xfs/quota/xfs_qm.c168
-rw-r--r--fs/xfs/quota/xfs_qm.h21
-rw-r--r--fs/xfs/quota/xfs_qm_bhv.c77
-rw-r--r--fs/xfs/quota/xfs_qm_stats.c1
-rw-r--r--fs/xfs/quota/xfs_qm_syscalls.c113
-rw-r--r--fs/xfs/quota/xfs_trans_dquot.c66
-rw-r--r--fs/xfs/xfs_acl.c874
-rw-r--r--fs/xfs/xfs_acl.h97
-rw-r--r--fs/xfs/xfs_ag.h2
-rw-r--r--fs/xfs/xfs_arch.h32
-rw-r--r--fs/xfs/xfs_attr.c13
-rw-r--r--fs/xfs/xfs_bmap.c33
-rw-r--r--fs/xfs/xfs_bmap_btree.c4
-rw-r--r--fs/xfs/xfs_filestream.c6
-rw-r--r--fs/xfs/xfs_fs.h11
-rw-r--r--fs/xfs/xfs_iget.c8
-rw-r--r--fs/xfs/xfs_inode.c1
-rw-r--r--fs/xfs/xfs_inode.h6
-rw-r--r--fs/xfs/xfs_iomap.c13
-rw-r--r--fs/xfs/xfs_log_recover.c38
-rw-r--r--fs/xfs/xfs_mount.c105
-rw-r--r--fs/xfs/xfs_mount.h84
-rw-r--r--fs/xfs/xfs_qmops.c152
-rw-r--r--fs/xfs/xfs_quota.h122
-rw-r--r--fs/xfs/xfs_rename.c3
-rw-r--r--fs/xfs/xfs_rw.c1
-rw-r--r--fs/xfs/xfs_trans.c15
-rw-r--r--fs/xfs/xfs_utils.c2
-rw-r--r--fs/xfs/xfs_vnodeops.c114
-rw-r--r--fs/xfs/xfs_vnodeops.h1
44 files changed, 1332 insertions, 2082 deletions
diff --git a/fs/xfs/Kconfig b/fs/xfs/Kconfig
index 29228f5899cd..480f28127f09 100644
--- a/fs/xfs/Kconfig
+++ b/fs/xfs/Kconfig
@@ -39,6 +39,7 @@ config XFS_QUOTA
39config XFS_POSIX_ACL 39config XFS_POSIX_ACL
40 bool "XFS POSIX ACL support" 40 bool "XFS POSIX ACL support"
41 depends on XFS_FS 41 depends on XFS_FS
42 select FS_POSIX_ACL
42 help 43 help
43 POSIX Access Control Lists (ACLs) support permissions for users and 44 POSIX Access Control Lists (ACLs) support permissions for users and
44 groups beyond the owner/group/world scheme. 45 groups beyond the owner/group/world scheme.
diff --git a/fs/xfs/Makefile b/fs/xfs/Makefile
index 60f107e47fe9..7a59daed1782 100644
--- a/fs/xfs/Makefile
+++ b/fs/xfs/Makefile
@@ -40,7 +40,7 @@ xfs-$(CONFIG_PROC_FS) += quota/xfs_qm_stats.o
40endif 40endif
41 41
42xfs-$(CONFIG_XFS_RT) += xfs_rtalloc.o 42xfs-$(CONFIG_XFS_RT) += xfs_rtalloc.o
43xfs-$(CONFIG_XFS_POSIX_ACL) += xfs_acl.o 43xfs-$(CONFIG_XFS_POSIX_ACL) += $(XFS_LINUX)/xfs_acl.o
44xfs-$(CONFIG_PROC_FS) += $(XFS_LINUX)/xfs_stats.o 44xfs-$(CONFIG_PROC_FS) += $(XFS_LINUX)/xfs_stats.o
45xfs-$(CONFIG_SYSCTL) += $(XFS_LINUX)/xfs_sysctl.o 45xfs-$(CONFIG_SYSCTL) += $(XFS_LINUX)/xfs_sysctl.o
46xfs-$(CONFIG_COMPAT) += $(XFS_LINUX)/xfs_ioctl32.o 46xfs-$(CONFIG_COMPAT) += $(XFS_LINUX)/xfs_ioctl32.o
@@ -88,8 +88,7 @@ xfs-y += xfs_alloc.o \
88 xfs_utils.o \ 88 xfs_utils.o \
89 xfs_vnodeops.o \ 89 xfs_vnodeops.o \
90 xfs_rw.o \ 90 xfs_rw.o \
91 xfs_dmops.o \ 91 xfs_dmops.o
92 xfs_qmops.o
93 92
94xfs-$(CONFIG_XFS_TRACE) += xfs_btree_trace.o \ 93xfs-$(CONFIG_XFS_TRACE) += xfs_btree_trace.o \
95 xfs_dir2_trace.o 94 xfs_dir2_trace.o
diff --git a/fs/xfs/linux-2.6/xfs_acl.c b/fs/xfs/linux-2.6/xfs_acl.c
new file mode 100644
index 000000000000..1e9d1246eebc
--- /dev/null
+++ b/fs/xfs/linux-2.6/xfs_acl.c
@@ -0,0 +1,523 @@
1/*
2 * Copyright (c) 2008, Christoph Hellwig
3 * All Rights Reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it would be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write the Free Software Foundation,
16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
17 */
18#include "xfs.h"
19#include "xfs_acl.h"
20#include "xfs_attr.h"
21#include "xfs_bmap_btree.h"
22#include "xfs_inode.h"
23#include "xfs_vnodeops.h"
24#include <linux/xattr.h>
25#include <linux/posix_acl_xattr.h>
26
27
28#define XFS_ACL_NOT_CACHED ((void *)-1)
29
30/*
31 * Locking scheme:
32 * - all ACL updates are protected by inode->i_mutex, which is taken before
33 * calling into this file.
34 * - access and updates to the ip->i_acl and ip->i_default_acl pointers are
35 * protected by inode->i_lock.
36 */
37
38STATIC struct posix_acl *
39xfs_acl_from_disk(struct xfs_acl *aclp)
40{
41 struct posix_acl_entry *acl_e;
42 struct posix_acl *acl;
43 struct xfs_acl_entry *ace;
44 int count, i;
45
46 count = be32_to_cpu(aclp->acl_cnt);
47
48 acl = posix_acl_alloc(count, GFP_KERNEL);
49 if (!acl)
50 return ERR_PTR(-ENOMEM);
51
52 for (i = 0; i < count; i++) {
53 acl_e = &acl->a_entries[i];
54 ace = &aclp->acl_entry[i];
55
56 /*
57 * The tag is 32 bits on disk and 16 bits in core.
58 *
59 * Because every access to it goes through the core
60 * format first this is not a problem.
61 */
62 acl_e->e_tag = be32_to_cpu(ace->ae_tag);
63 acl_e->e_perm = be16_to_cpu(ace->ae_perm);
64
65 switch (acl_e->e_tag) {
66 case ACL_USER:
67 case ACL_GROUP:
68 acl_e->e_id = be32_to_cpu(ace->ae_id);
69 break;
70 case ACL_USER_OBJ:
71 case ACL_GROUP_OBJ:
72 case ACL_MASK:
73 case ACL_OTHER:
74 acl_e->e_id = ACL_UNDEFINED_ID;
75 break;
76 default:
77 goto fail;
78 }
79 }
80 return acl;
81
82fail:
83 posix_acl_release(acl);
84 return ERR_PTR(-EINVAL);
85}
86
87STATIC void
88xfs_acl_to_disk(struct xfs_acl *aclp, const struct posix_acl *acl)
89{
90 const struct posix_acl_entry *acl_e;
91 struct xfs_acl_entry *ace;
92 int i;
93
94 aclp->acl_cnt = cpu_to_be32(acl->a_count);
95 for (i = 0; i < acl->a_count; i++) {
96 ace = &aclp->acl_entry[i];
97 acl_e = &acl->a_entries[i];
98
99 ace->ae_tag = cpu_to_be32(acl_e->e_tag);
100 ace->ae_id = cpu_to_be32(acl_e->e_id);
101 ace->ae_perm = cpu_to_be16(acl_e->e_perm);
102 }
103}
104
105/*
106 * Update the cached ACL pointer in the inode.
107 *
108 * Because we don't hold any locks while reading/writing the attribute
109 * from/to disk another thread could have raced and updated the cached
110 * ACL value before us. In that case we release the previous cached value
111 * and update it with our new value.
112 */
113STATIC void
114xfs_update_cached_acl(struct inode *inode, struct posix_acl **p_acl,
115 struct posix_acl *acl)
116{
117 spin_lock(&inode->i_lock);
118 if (*p_acl && *p_acl != XFS_ACL_NOT_CACHED)
119 posix_acl_release(*p_acl);
120 *p_acl = posix_acl_dup(acl);
121 spin_unlock(&inode->i_lock);
122}
123
124struct posix_acl *
125xfs_get_acl(struct inode *inode, int type)
126{
127 struct xfs_inode *ip = XFS_I(inode);
128 struct posix_acl *acl = NULL, **p_acl;
129 struct xfs_acl *xfs_acl;
130 int len = sizeof(struct xfs_acl);
131 char *ea_name;
132 int error;
133
134 switch (type) {
135 case ACL_TYPE_ACCESS:
136 ea_name = SGI_ACL_FILE;
137 p_acl = &ip->i_acl;
138 break;
139 case ACL_TYPE_DEFAULT:
140 ea_name = SGI_ACL_DEFAULT;
141 p_acl = &ip->i_default_acl;
142 break;
143 default:
144 return ERR_PTR(-EINVAL);
145 }
146
147 spin_lock(&inode->i_lock);
148 if (*p_acl != XFS_ACL_NOT_CACHED)
149 acl = posix_acl_dup(*p_acl);
150 spin_unlock(&inode->i_lock);
151
152 /*
153 * If we have a cached ACLs value just return it, not need to
154 * go out to the disk.
155 */
156 if (acl)
157 return acl;
158
159 xfs_acl = kzalloc(sizeof(struct xfs_acl), GFP_KERNEL);
160 if (!xfs_acl)
161 return ERR_PTR(-ENOMEM);
162
163 error = -xfs_attr_get(ip, ea_name, (char *)xfs_acl, &len, ATTR_ROOT);
164 if (error) {
165 /*
166 * If the attribute doesn't exist make sure we have a negative
167 * cache entry, for any other error assume it is transient and
168 * leave the cache entry as XFS_ACL_NOT_CACHED.
169 */
170 if (error == -ENOATTR) {
171 acl = NULL;
172 goto out_update_cache;
173 }
174 goto out;
175 }
176
177 acl = xfs_acl_from_disk(xfs_acl);
178 if (IS_ERR(acl))
179 goto out;
180
181 out_update_cache:
182 xfs_update_cached_acl(inode, p_acl, acl);
183 out:
184 kfree(xfs_acl);
185 return acl;
186}
187
188STATIC int
189xfs_set_acl(struct inode *inode, int type, struct posix_acl *acl)
190{
191 struct xfs_inode *ip = XFS_I(inode);
192 struct posix_acl **p_acl;
193 char *ea_name;
194 int error;
195
196 if (S_ISLNK(inode->i_mode))
197 return -EOPNOTSUPP;
198
199 switch (type) {
200 case ACL_TYPE_ACCESS:
201 ea_name = SGI_ACL_FILE;
202 p_acl = &ip->i_acl;
203 break;
204 case ACL_TYPE_DEFAULT:
205 if (!S_ISDIR(inode->i_mode))
206 return acl ? -EACCES : 0;
207 ea_name = SGI_ACL_DEFAULT;
208 p_acl = &ip->i_default_acl;
209 break;
210 default:
211 return -EINVAL;
212 }
213
214 if (acl) {
215 struct xfs_acl *xfs_acl;
216 int len;
217
218 xfs_acl = kzalloc(sizeof(struct xfs_acl), GFP_KERNEL);
219 if (!xfs_acl)
220 return -ENOMEM;
221
222 xfs_acl_to_disk(xfs_acl, acl);
223 len = sizeof(struct xfs_acl) -
224 (sizeof(struct xfs_acl_entry) *
225 (XFS_ACL_MAX_ENTRIES - acl->a_count));
226
227 error = -xfs_attr_set(ip, ea_name, (char *)xfs_acl,
228 len, ATTR_ROOT);
229
230 kfree(xfs_acl);
231 } else {
232 /*
233 * A NULL ACL argument means we want to remove the ACL.
234 */
235 error = -xfs_attr_remove(ip, ea_name, ATTR_ROOT);
236
237 /*
238 * If the attribute didn't exist to start with that's fine.
239 */
240 if (error == -ENOATTR)
241 error = 0;
242 }
243
244 if (!error)
245 xfs_update_cached_acl(inode, p_acl, acl);
246 return error;
247}
248
249int
250xfs_check_acl(struct inode *inode, int mask)
251{
252 struct xfs_inode *ip = XFS_I(inode);
253 struct posix_acl *acl;
254 int error = -EAGAIN;
255
256 xfs_itrace_entry(ip);
257
258 /*
259 * If there is no attribute fork no ACL exists on this inode and
260 * we can skip the whole exercise.
261 */
262 if (!XFS_IFORK_Q(ip))
263 return -EAGAIN;
264
265 acl = xfs_get_acl(inode, ACL_TYPE_ACCESS);
266 if (IS_ERR(acl))
267 return PTR_ERR(acl);
268 if (acl) {
269 error = posix_acl_permission(inode, acl, mask);
270 posix_acl_release(acl);
271 }
272
273 return error;
274}
275
276static int
277xfs_set_mode(struct inode *inode, mode_t mode)
278{
279 int error = 0;
280
281 if (mode != inode->i_mode) {
282 struct iattr iattr;
283
284 iattr.ia_valid = ATTR_MODE;
285 iattr.ia_mode = mode;
286
287 error = -xfs_setattr(XFS_I(inode), &iattr, XFS_ATTR_NOACL);
288 }
289
290 return error;
291}
292
293static int
294xfs_acl_exists(struct inode *inode, char *name)
295{
296 int len = sizeof(struct xfs_acl);
297
298 return (xfs_attr_get(XFS_I(inode), name, NULL, &len,
299 ATTR_ROOT|ATTR_KERNOVAL) == 0);
300}
301
302int
303posix_acl_access_exists(struct inode *inode)
304{
305 return xfs_acl_exists(inode, SGI_ACL_FILE);
306}
307
308int
309posix_acl_default_exists(struct inode *inode)
310{
311 if (!S_ISDIR(inode->i_mode))
312 return 0;
313 return xfs_acl_exists(inode, SGI_ACL_DEFAULT);
314}
315
316/*
317 * No need for i_mutex because the inode is not yet exposed to the VFS.
318 */
319int
320xfs_inherit_acl(struct inode *inode, struct posix_acl *default_acl)
321{
322 struct posix_acl *clone;
323 mode_t mode;
324 int error = 0, inherit = 0;
325
326 if (S_ISDIR(inode->i_mode)) {
327 error = xfs_set_acl(inode, ACL_TYPE_DEFAULT, default_acl);
328 if (error)
329 return error;
330 }
331
332 clone = posix_acl_clone(default_acl, GFP_KERNEL);
333 if (!clone)
334 return -ENOMEM;
335
336 mode = inode->i_mode;
337 error = posix_acl_create_masq(clone, &mode);
338 if (error < 0)
339 goto out_release_clone;
340
341 /*
342 * If posix_acl_create_masq returns a positive value we need to
343 * inherit a permission that can't be represented using the Unix
344 * mode bits and we actually need to set an ACL.
345 */
346 if (error > 0)
347 inherit = 1;
348
349 error = xfs_set_mode(inode, mode);
350 if (error)
351 goto out_release_clone;
352
353 if (inherit)
354 error = xfs_set_acl(inode, ACL_TYPE_ACCESS, clone);
355
356 out_release_clone:
357 posix_acl_release(clone);
358 return error;
359}
360
361int
362xfs_acl_chmod(struct inode *inode)
363{
364 struct posix_acl *acl, *clone;
365 int error;
366
367 if (S_ISLNK(inode->i_mode))
368 return -EOPNOTSUPP;
369
370 acl = xfs_get_acl(inode, ACL_TYPE_ACCESS);
371 if (IS_ERR(acl) || !acl)
372 return PTR_ERR(acl);
373
374 clone = posix_acl_clone(acl, GFP_KERNEL);
375 posix_acl_release(acl);
376 if (!clone)
377 return -ENOMEM;
378
379 error = posix_acl_chmod_masq(clone, inode->i_mode);
380 if (!error)
381 error = xfs_set_acl(inode, ACL_TYPE_ACCESS, clone);
382
383 posix_acl_release(clone);
384 return error;
385}
386
387void
388xfs_inode_init_acls(struct xfs_inode *ip)
389{
390 /*
391 * No need for locking, inode is not live yet.
392 */
393 ip->i_acl = XFS_ACL_NOT_CACHED;
394 ip->i_default_acl = XFS_ACL_NOT_CACHED;
395}
396
397void
398xfs_inode_clear_acls(struct xfs_inode *ip)
399{
400 /*
401 * No need for locking here, the inode is not live anymore
402 * and just about to be freed.
403 */
404 if (ip->i_acl != XFS_ACL_NOT_CACHED)
405 posix_acl_release(ip->i_acl);
406 if (ip->i_default_acl != XFS_ACL_NOT_CACHED)
407 posix_acl_release(ip->i_default_acl);
408}
409
410
411/*
412 * System xattr handlers.
413 *
414 * Currently Posix ACLs are the only system namespace extended attribute
415 * handlers supported by XFS, so we just implement the handlers here.
416 * If we ever support other system extended attributes this will need
417 * some refactoring.
418 */
419
420static int
421xfs_decode_acl(const char *name)
422{
423 if (strcmp(name, "posix_acl_access") == 0)
424 return ACL_TYPE_ACCESS;
425 else if (strcmp(name, "posix_acl_default") == 0)
426 return ACL_TYPE_DEFAULT;
427 return -EINVAL;
428}
429
430static int
431xfs_xattr_system_get(struct inode *inode, const char *name,
432 void *value, size_t size)
433{
434 struct posix_acl *acl;
435 int type, error;
436
437 type = xfs_decode_acl(name);
438 if (type < 0)
439 return type;
440
441 acl = xfs_get_acl(inode, type);
442 if (IS_ERR(acl))
443 return PTR_ERR(acl);
444 if (acl == NULL)
445 return -ENODATA;
446
447 error = posix_acl_to_xattr(acl, value, size);
448 posix_acl_release(acl);
449
450 return error;
451}
452
453static int
454xfs_xattr_system_set(struct inode *inode, const char *name,
455 const void *value, size_t size, int flags)
456{
457 struct posix_acl *acl = NULL;
458 int error = 0, type;
459
460 type = xfs_decode_acl(name);
461 if (type < 0)
462 return type;
463 if (flags & XATTR_CREATE)
464 return -EINVAL;
465 if (type == ACL_TYPE_DEFAULT && !S_ISDIR(inode->i_mode))
466 return value ? -EACCES : 0;
467 if ((current_fsuid() != inode->i_uid) && !capable(CAP_FOWNER))
468 return -EPERM;
469
470 if (!value)
471 goto set_acl;
472
473 acl = posix_acl_from_xattr(value, size);
474 if (!acl) {
475 /*
476 * acl_set_file(3) may request that we set default ACLs with
477 * zero length -- defend (gracefully) against that here.
478 */
479 goto out;
480 }
481 if (IS_ERR(acl)) {
482 error = PTR_ERR(acl);
483 goto out;
484 }
485
486 error = posix_acl_valid(acl);
487 if (error)
488 goto out_release;
489
490 error = -EINVAL;
491 if (acl->a_count > XFS_ACL_MAX_ENTRIES)
492 goto out_release;
493
494 if (type == ACL_TYPE_ACCESS) {
495 mode_t mode = inode->i_mode;
496 error = posix_acl_equiv_mode(acl, &mode);
497
498 if (error <= 0) {
499 posix_acl_release(acl);
500 acl = NULL;
501
502 if (error < 0)
503 return error;
504 }
505
506 error = xfs_set_mode(inode, mode);
507 if (error)
508 goto out_release;
509 }
510
511 set_acl:
512 error = xfs_set_acl(inode, type, acl);
513 out_release:
514 posix_acl_release(acl);
515 out:
516 return error;
517}
518
519struct xattr_handler xfs_xattr_system_handler = {
520 .prefix = XATTR_SYSTEM_PREFIX,
521 .get = xfs_xattr_system_get,
522 .set = xfs_xattr_system_set,
523};
diff --git a/fs/xfs/linux-2.6/xfs_ioctl.c b/fs/xfs/linux-2.6/xfs_ioctl.c
index 34eaab608e6e..5bb523d7f37e 100644
--- a/fs/xfs/linux-2.6/xfs_ioctl.c
+++ b/fs/xfs/linux-2.6/xfs_ioctl.c
@@ -41,7 +41,6 @@
41#include "xfs_itable.h" 41#include "xfs_itable.h"
42#include "xfs_error.h" 42#include "xfs_error.h"
43#include "xfs_rw.h" 43#include "xfs_rw.h"
44#include "xfs_acl.h"
45#include "xfs_attr.h" 44#include "xfs_attr.h"
46#include "xfs_bmap.h" 45#include "xfs_bmap.h"
47#include "xfs_buf_item.h" 46#include "xfs_buf_item.h"
@@ -899,7 +898,8 @@ xfs_ioctl_setattr(
899 struct xfs_mount *mp = ip->i_mount; 898 struct xfs_mount *mp = ip->i_mount;
900 struct xfs_trans *tp; 899 struct xfs_trans *tp;
901 unsigned int lock_flags = 0; 900 unsigned int lock_flags = 0;
902 struct xfs_dquot *udqp = NULL, *gdqp = NULL; 901 struct xfs_dquot *udqp = NULL;
902 struct xfs_dquot *gdqp = NULL;
903 struct xfs_dquot *olddquot = NULL; 903 struct xfs_dquot *olddquot = NULL;
904 int code; 904 int code;
905 905
@@ -919,7 +919,7 @@ xfs_ioctl_setattr(
919 * because the i_*dquot fields will get updated anyway. 919 * because the i_*dquot fields will get updated anyway.
920 */ 920 */
921 if (XFS_IS_QUOTA_ON(mp) && (mask & FSX_PROJID)) { 921 if (XFS_IS_QUOTA_ON(mp) && (mask & FSX_PROJID)) {
922 code = XFS_QM_DQVOPALLOC(mp, ip, ip->i_d.di_uid, 922 code = xfs_qm_vop_dqalloc(ip, ip->i_d.di_uid,
923 ip->i_d.di_gid, fa->fsx_projid, 923 ip->i_d.di_gid, fa->fsx_projid,
924 XFS_QMOPT_PQUOTA, &udqp, &gdqp); 924 XFS_QMOPT_PQUOTA, &udqp, &gdqp);
925 if (code) 925 if (code)
@@ -954,10 +954,11 @@ xfs_ioctl_setattr(
954 * Do a quota reservation only if projid is actually going to change. 954 * Do a quota reservation only if projid is actually going to change.
955 */ 955 */
956 if (mask & FSX_PROJID) { 956 if (mask & FSX_PROJID) {
957 if (XFS_IS_PQUOTA_ON(mp) && 957 if (XFS_IS_QUOTA_RUNNING(mp) &&
958 XFS_IS_PQUOTA_ON(mp) &&
958 ip->i_d.di_projid != fa->fsx_projid) { 959 ip->i_d.di_projid != fa->fsx_projid) {
959 ASSERT(tp); 960 ASSERT(tp);
960 code = XFS_QM_DQVOPCHOWNRESV(mp, tp, ip, udqp, gdqp, 961 code = xfs_qm_vop_chown_reserve(tp, ip, udqp, gdqp,
961 capable(CAP_FOWNER) ? 962 capable(CAP_FOWNER) ?
962 XFS_QMOPT_FORCE_RES : 0); 963 XFS_QMOPT_FORCE_RES : 0);
963 if (code) /* out of quota */ 964 if (code) /* out of quota */
@@ -1059,8 +1060,8 @@ xfs_ioctl_setattr(
1059 * in the transaction. 1060 * in the transaction.
1060 */ 1061 */
1061 if (ip->i_d.di_projid != fa->fsx_projid) { 1062 if (ip->i_d.di_projid != fa->fsx_projid) {
1062 if (XFS_IS_PQUOTA_ON(mp)) { 1063 if (XFS_IS_QUOTA_RUNNING(mp) && XFS_IS_PQUOTA_ON(mp)) {
1063 olddquot = XFS_QM_DQVOPCHOWN(mp, tp, ip, 1064 olddquot = xfs_qm_vop_chown(tp, ip,
1064 &ip->i_gdquot, gdqp); 1065 &ip->i_gdquot, gdqp);
1065 } 1066 }
1066 ip->i_d.di_projid = fa->fsx_projid; 1067 ip->i_d.di_projid = fa->fsx_projid;
@@ -1106,9 +1107,9 @@ xfs_ioctl_setattr(
1106 /* 1107 /*
1107 * Release any dquot(s) the inode had kept before chown. 1108 * Release any dquot(s) the inode had kept before chown.
1108 */ 1109 */
1109 XFS_QM_DQRELE(mp, olddquot); 1110 xfs_qm_dqrele(olddquot);
1110 XFS_QM_DQRELE(mp, udqp); 1111 xfs_qm_dqrele(udqp);
1111 XFS_QM_DQRELE(mp, gdqp); 1112 xfs_qm_dqrele(gdqp);
1112 1113
1113 if (code) 1114 if (code)
1114 return code; 1115 return code;
@@ -1122,8 +1123,8 @@ xfs_ioctl_setattr(
1122 return 0; 1123 return 0;
1123 1124
1124 error_return: 1125 error_return:
1125 XFS_QM_DQRELE(mp, udqp); 1126 xfs_qm_dqrele(udqp);
1126 XFS_QM_DQRELE(mp, gdqp); 1127 xfs_qm_dqrele(gdqp);
1127 xfs_trans_cancel(tp, 0); 1128 xfs_trans_cancel(tp, 0);
1128 if (lock_flags) 1129 if (lock_flags)
1129 xfs_iunlock(ip, lock_flags); 1130 xfs_iunlock(ip, lock_flags);
diff --git a/fs/xfs/linux-2.6/xfs_iops.c b/fs/xfs/linux-2.6/xfs_iops.c
index 6075382336d7..800dd4f98d4e 100644
--- a/fs/xfs/linux-2.6/xfs_iops.c
+++ b/fs/xfs/linux-2.6/xfs_iops.c
@@ -17,6 +17,7 @@
17 */ 17 */
18#include "xfs.h" 18#include "xfs.h"
19#include "xfs_fs.h" 19#include "xfs_fs.h"
20#include "xfs_acl.h"
20#include "xfs_bit.h" 21#include "xfs_bit.h"
21#include "xfs_log.h" 22#include "xfs_log.h"
22#include "xfs_inum.h" 23#include "xfs_inum.h"
@@ -51,6 +52,7 @@
51#include <linux/capability.h> 52#include <linux/capability.h>
52#include <linux/xattr.h> 53#include <linux/xattr.h>
53#include <linux/namei.h> 54#include <linux/namei.h>
55#include <linux/posix_acl.h>
54#include <linux/security.h> 56#include <linux/security.h>
55#include <linux/falloc.h> 57#include <linux/falloc.h>
56#include <linux/fiemap.h> 58#include <linux/fiemap.h>
@@ -202,9 +204,8 @@ xfs_vn_mknod(
202{ 204{
203 struct inode *inode; 205 struct inode *inode;
204 struct xfs_inode *ip = NULL; 206 struct xfs_inode *ip = NULL;
205 xfs_acl_t *default_acl = NULL; 207 struct posix_acl *default_acl = NULL;
206 struct xfs_name name; 208 struct xfs_name name;
207 int (*test_default_acl)(struct inode *) = _ACL_DEFAULT_EXISTS;
208 int error; 209 int error;
209 210
210 /* 211 /*
@@ -219,18 +220,14 @@ xfs_vn_mknod(
219 rdev = 0; 220 rdev = 0;
220 } 221 }
221 222
222 if (test_default_acl && test_default_acl(dir)) { 223 if (IS_POSIXACL(dir)) {
223 if (!_ACL_ALLOC(default_acl)) { 224 default_acl = xfs_get_acl(dir, ACL_TYPE_DEFAULT);
224 return -ENOMEM; 225 if (IS_ERR(default_acl))
225 } 226 return -PTR_ERR(default_acl);
226 if (!_ACL_GET_DEFAULT(dir, default_acl)) {
227 _ACL_FREE(default_acl);
228 default_acl = NULL;
229 }
230 }
231 227
232 if (IS_POSIXACL(dir) && !default_acl) 228 if (!default_acl)
233 mode &= ~current_umask(); 229 mode &= ~current_umask();
230 }
234 231
235 xfs_dentry_to_name(&name, dentry); 232 xfs_dentry_to_name(&name, dentry);
236 error = xfs_create(XFS_I(dir), &name, mode, rdev, &ip, NULL); 233 error = xfs_create(XFS_I(dir), &name, mode, rdev, &ip, NULL);
@@ -244,10 +241,10 @@ xfs_vn_mknod(
244 goto out_cleanup_inode; 241 goto out_cleanup_inode;
245 242
246 if (default_acl) { 243 if (default_acl) {
247 error = _ACL_INHERIT(inode, mode, default_acl); 244 error = -xfs_inherit_acl(inode, default_acl);
248 if (unlikely(error)) 245 if (unlikely(error))
249 goto out_cleanup_inode; 246 goto out_cleanup_inode;
250 _ACL_FREE(default_acl); 247 posix_acl_release(default_acl);
251 } 248 }
252 249
253 250
@@ -257,8 +254,7 @@ xfs_vn_mknod(
257 out_cleanup_inode: 254 out_cleanup_inode:
258 xfs_cleanup_inode(dir, inode, dentry); 255 xfs_cleanup_inode(dir, inode, dentry);
259 out_free_acl: 256 out_free_acl:
260 if (default_acl) 257 posix_acl_release(default_acl);
261 _ACL_FREE(default_acl);
262 return -error; 258 return -error;
263} 259}
264 260
@@ -488,26 +484,6 @@ xfs_vn_put_link(
488 kfree(s); 484 kfree(s);
489} 485}
490 486
491#ifdef CONFIG_XFS_POSIX_ACL
492STATIC int
493xfs_check_acl(
494 struct inode *inode,
495 int mask)
496{
497 struct xfs_inode *ip = XFS_I(inode);
498 int error;
499
500 xfs_itrace_entry(ip);
501
502 if (XFS_IFORK_Q(ip)) {
503 error = xfs_acl_iaccess(ip, mask, NULL);
504 if (error != -1)
505 return -error;
506 }
507
508 return -EAGAIN;
509}
510
511STATIC int 487STATIC int
512xfs_vn_permission( 488xfs_vn_permission(
513 struct inode *inode, 489 struct inode *inode,
@@ -515,9 +491,6 @@ xfs_vn_permission(
515{ 491{
516 return generic_permission(inode, mask, xfs_check_acl); 492 return generic_permission(inode, mask, xfs_check_acl);
517} 493}
518#else
519#define xfs_vn_permission NULL
520#endif
521 494
522STATIC int 495STATIC int
523xfs_vn_getattr( 496xfs_vn_getattr(
diff --git a/fs/xfs/linux-2.6/xfs_lrw.c b/fs/xfs/linux-2.6/xfs_lrw.c
index 9142192ccbe6..7078974a6eee 100644
--- a/fs/xfs/linux-2.6/xfs_lrw.c
+++ b/fs/xfs/linux-2.6/xfs_lrw.c
@@ -42,7 +42,6 @@
42#include "xfs_error.h" 42#include "xfs_error.h"
43#include "xfs_itable.h" 43#include "xfs_itable.h"
44#include "xfs_rw.h" 44#include "xfs_rw.h"
45#include "xfs_acl.h"
46#include "xfs_attr.h" 45#include "xfs_attr.h"
47#include "xfs_inode_item.h" 46#include "xfs_inode_item.h"
48#include "xfs_buf_item.h" 47#include "xfs_buf_item.h"
diff --git a/fs/xfs/linux-2.6/xfs_quotaops.c b/fs/xfs/linux-2.6/xfs_quotaops.c
index 94d9a633d3d9..cb6e2cca214f 100644
--- a/fs/xfs/linux-2.6/xfs_quotaops.c
+++ b/fs/xfs/linux-2.6/xfs_quotaops.c
@@ -50,9 +50,11 @@ xfs_fs_quota_sync(
50{ 50{
51 struct xfs_mount *mp = XFS_M(sb); 51 struct xfs_mount *mp = XFS_M(sb);
52 52
53 if (sb->s_flags & MS_RDONLY)
54 return -EROFS;
53 if (!XFS_IS_QUOTA_RUNNING(mp)) 55 if (!XFS_IS_QUOTA_RUNNING(mp))
54 return -ENOSYS; 56 return -ENOSYS;
55 return -xfs_sync_inodes(mp, SYNC_DELWRI); 57 return -xfs_sync_data(mp, 0);
56} 58}
57 59
58STATIC int 60STATIC int
diff --git a/fs/xfs/linux-2.6/xfs_super.c b/fs/xfs/linux-2.6/xfs_super.c
index bb685269f832..36fb8a657c55 100644
--- a/fs/xfs/linux-2.6/xfs_super.c
+++ b/fs/xfs/linux-2.6/xfs_super.c
@@ -43,7 +43,6 @@
43#include "xfs_itable.h" 43#include "xfs_itable.h"
44#include "xfs_fsops.h" 44#include "xfs_fsops.h"
45#include "xfs_rw.h" 45#include "xfs_rw.h"
46#include "xfs_acl.h"
47#include "xfs_attr.h" 46#include "xfs_attr.h"
48#include "xfs_buf_item.h" 47#include "xfs_buf_item.h"
49#include "xfs_utils.h" 48#include "xfs_utils.h"
@@ -405,6 +404,14 @@ xfs_parseargs(
405 return EINVAL; 404 return EINVAL;
406 } 405 }
407 406
407#ifndef CONFIG_XFS_QUOTA
408 if (XFS_IS_QUOTA_RUNNING(mp)) {
409 cmn_err(CE_WARN,
410 "XFS: quota support not available in this kernel.");
411 return EINVAL;
412 }
413#endif
414
408 if ((mp->m_qflags & (XFS_GQUOTA_ACCT | XFS_GQUOTA_ACTIVE)) && 415 if ((mp->m_qflags & (XFS_GQUOTA_ACCT | XFS_GQUOTA_ACTIVE)) &&
409 (mp->m_qflags & (XFS_PQUOTA_ACCT | XFS_PQUOTA_ACTIVE))) { 416 (mp->m_qflags & (XFS_PQUOTA_ACCT | XFS_PQUOTA_ACTIVE))) {
410 cmn_err(CE_WARN, 417 cmn_err(CE_WARN,
@@ -1063,7 +1070,18 @@ xfs_fs_put_super(
1063 int unmount_event_flags = 0; 1070 int unmount_event_flags = 0;
1064 1071
1065 xfs_syncd_stop(mp); 1072 xfs_syncd_stop(mp);
1066 xfs_sync_inodes(mp, SYNC_ATTR|SYNC_DELWRI); 1073
1074 if (!(sb->s_flags & MS_RDONLY)) {
1075 /*
1076 * XXX(hch): this should be SYNC_WAIT.
1077 *
1078 * Or more likely not needed at all because the VFS is already
1079 * calling ->sync_fs after shutting down all filestem
1080 * operations and just before calling ->put_super.
1081 */
1082 xfs_sync_data(mp, 0);
1083 xfs_sync_attr(mp, 0);
1084 }
1067 1085
1068#ifdef HAVE_DMAPI 1086#ifdef HAVE_DMAPI
1069 if (mp->m_flags & XFS_MOUNT_DMAPI) { 1087 if (mp->m_flags & XFS_MOUNT_DMAPI) {
@@ -1098,7 +1116,6 @@ xfs_fs_put_super(
1098 xfs_freesb(mp); 1116 xfs_freesb(mp);
1099 xfs_icsb_destroy_counters(mp); 1117 xfs_icsb_destroy_counters(mp);
1100 xfs_close_devices(mp); 1118 xfs_close_devices(mp);
1101 xfs_qmops_put(mp);
1102 xfs_dmops_put(mp); 1119 xfs_dmops_put(mp);
1103 xfs_free_fsname(mp); 1120 xfs_free_fsname(mp);
1104 kfree(mp); 1121 kfree(mp);
@@ -1168,6 +1185,7 @@ xfs_fs_statfs(
1168{ 1185{
1169 struct xfs_mount *mp = XFS_M(dentry->d_sb); 1186 struct xfs_mount *mp = XFS_M(dentry->d_sb);
1170 xfs_sb_t *sbp = &mp->m_sb; 1187 xfs_sb_t *sbp = &mp->m_sb;
1188 struct xfs_inode *ip = XFS_I(dentry->d_inode);
1171 __uint64_t fakeinos, id; 1189 __uint64_t fakeinos, id;
1172 xfs_extlen_t lsize; 1190 xfs_extlen_t lsize;
1173 1191
@@ -1196,7 +1214,10 @@ xfs_fs_statfs(
1196 statp->f_ffree = statp->f_files - (sbp->sb_icount - sbp->sb_ifree); 1214 statp->f_ffree = statp->f_files - (sbp->sb_icount - sbp->sb_ifree);
1197 spin_unlock(&mp->m_sb_lock); 1215 spin_unlock(&mp->m_sb_lock);
1198 1216
1199 XFS_QM_DQSTATVFS(XFS_I(dentry->d_inode), statp); 1217 if ((ip->i_d.di_flags & XFS_DIFLAG_PROJINHERIT) ||
1218 ((mp->m_qflags & (XFS_PQUOTA_ACCT|XFS_OQUOTA_ENFD))) ==
1219 (XFS_PQUOTA_ACCT|XFS_OQUOTA_ENFD))
1220 xfs_qm_statvfs(ip, statp);
1200 return 0; 1221 return 0;
1201} 1222}
1202 1223
@@ -1404,16 +1425,13 @@ xfs_fs_fill_super(
1404 error = xfs_dmops_get(mp); 1425 error = xfs_dmops_get(mp);
1405 if (error) 1426 if (error)
1406 goto out_free_fsname; 1427 goto out_free_fsname;
1407 error = xfs_qmops_get(mp);
1408 if (error)
1409 goto out_put_dmops;
1410 1428
1411 if (silent) 1429 if (silent)
1412 flags |= XFS_MFSI_QUIET; 1430 flags |= XFS_MFSI_QUIET;
1413 1431
1414 error = xfs_open_devices(mp); 1432 error = xfs_open_devices(mp);
1415 if (error) 1433 if (error)
1416 goto out_put_qmops; 1434 goto out_put_dmops;
1417 1435
1418 if (xfs_icsb_init_counters(mp)) 1436 if (xfs_icsb_init_counters(mp))
1419 mp->m_flags |= XFS_MOUNT_NO_PERCPU_SB; 1437 mp->m_flags |= XFS_MOUNT_NO_PERCPU_SB;
@@ -1482,8 +1500,6 @@ xfs_fs_fill_super(
1482 out_destroy_counters: 1500 out_destroy_counters:
1483 xfs_icsb_destroy_counters(mp); 1501 xfs_icsb_destroy_counters(mp);
1484 xfs_close_devices(mp); 1502 xfs_close_devices(mp);
1485 out_put_qmops:
1486 xfs_qmops_put(mp);
1487 out_put_dmops: 1503 out_put_dmops:
1488 xfs_dmops_put(mp); 1504 xfs_dmops_put(mp);
1489 out_free_fsname: 1505 out_free_fsname:
@@ -1718,18 +1734,8 @@ xfs_init_zones(void)
1718 if (!xfs_ili_zone) 1734 if (!xfs_ili_zone)
1719 goto out_destroy_inode_zone; 1735 goto out_destroy_inode_zone;
1720 1736
1721#ifdef CONFIG_XFS_POSIX_ACL
1722 xfs_acl_zone = kmem_zone_init(sizeof(xfs_acl_t), "xfs_acl");
1723 if (!xfs_acl_zone)
1724 goto out_destroy_ili_zone;
1725#endif
1726
1727 return 0; 1737 return 0;
1728 1738
1729#ifdef CONFIG_XFS_POSIX_ACL
1730 out_destroy_ili_zone:
1731#endif
1732 kmem_zone_destroy(xfs_ili_zone);
1733 out_destroy_inode_zone: 1739 out_destroy_inode_zone:
1734 kmem_zone_destroy(xfs_inode_zone); 1740 kmem_zone_destroy(xfs_inode_zone);
1735 out_destroy_efi_zone: 1741 out_destroy_efi_zone:
@@ -1763,9 +1769,6 @@ xfs_init_zones(void)
1763STATIC void 1769STATIC void
1764xfs_destroy_zones(void) 1770xfs_destroy_zones(void)
1765{ 1771{
1766#ifdef CONFIG_XFS_POSIX_ACL
1767 kmem_zone_destroy(xfs_acl_zone);
1768#endif
1769 kmem_zone_destroy(xfs_ili_zone); 1772 kmem_zone_destroy(xfs_ili_zone);
1770 kmem_zone_destroy(xfs_inode_zone); 1773 kmem_zone_destroy(xfs_inode_zone);
1771 kmem_zone_destroy(xfs_efi_zone); 1774 kmem_zone_destroy(xfs_efi_zone);
diff --git a/fs/xfs/linux-2.6/xfs_sync.c b/fs/xfs/linux-2.6/xfs_sync.c
index f7ba76633c29..b619d6b8ca43 100644
--- a/fs/xfs/linux-2.6/xfs_sync.c
+++ b/fs/xfs/linux-2.6/xfs_sync.c
@@ -43,166 +43,267 @@
43#include "xfs_buf_item.h" 43#include "xfs_buf_item.h"
44#include "xfs_inode_item.h" 44#include "xfs_inode_item.h"
45#include "xfs_rw.h" 45#include "xfs_rw.h"
46#include "xfs_quota.h"
46 47
47#include <linux/kthread.h> 48#include <linux/kthread.h>
48#include <linux/freezer.h> 49#include <linux/freezer.h>
49 50
50/*
51 * Sync all the inodes in the given AG according to the
52 * direction given by the flags.
53 */
54STATIC int
55xfs_sync_inodes_ag(
56 xfs_mount_t *mp,
57 int ag,
58 int flags)
59{
60 xfs_perag_t *pag = &mp->m_perag[ag];
61 int nr_found;
62 uint32_t first_index = 0;
63 int error = 0;
64 int last_error = 0;
65 51
66 do { 52STATIC xfs_inode_t *
67 struct inode *inode; 53xfs_inode_ag_lookup(
68 xfs_inode_t *ip = NULL; 54 struct xfs_mount *mp,
69 int lock_flags = XFS_ILOCK_SHARED; 55 struct xfs_perag *pag,
56 uint32_t *first_index,
57 int tag)
58{
59 int nr_found;
60 struct xfs_inode *ip;
70 61
71 /* 62 /*
72 * use a gang lookup to find the next inode in the tree 63 * use a gang lookup to find the next inode in the tree
73 * as the tree is sparse and a gang lookup walks to find 64 * as the tree is sparse and a gang lookup walks to find
74 * the number of objects requested. 65 * the number of objects requested.
75 */ 66 */
76 read_lock(&pag->pag_ici_lock); 67 read_lock(&pag->pag_ici_lock);
68 if (tag == XFS_ICI_NO_TAG) {
77 nr_found = radix_tree_gang_lookup(&pag->pag_ici_root, 69 nr_found = radix_tree_gang_lookup(&pag->pag_ici_root,
78 (void**)&ip, first_index, 1); 70 (void **)&ip, *first_index, 1);
71 } else {
72 nr_found = radix_tree_gang_lookup_tag(&pag->pag_ici_root,
73 (void **)&ip, *first_index, 1, tag);
74 }
75 if (!nr_found)
76 goto unlock;
79 77
80 if (!nr_found) { 78 /*
81 read_unlock(&pag->pag_ici_lock); 79 * Update the index for the next lookup. Catch overflows
82 break; 80 * into the next AG range which can occur if we have inodes
83 } 81 * in the last block of the AG and we are currently
82 * pointing to the last inode.
83 */
84 *first_index = XFS_INO_TO_AGINO(mp, ip->i_ino + 1);
85 if (*first_index < XFS_INO_TO_AGINO(mp, ip->i_ino))
86 goto unlock;
84 87
85 /* 88 return ip;
86 * Update the index for the next lookup. Catch overflows
87 * into the next AG range which can occur if we have inodes
88 * in the last block of the AG and we are currently
89 * pointing to the last inode.
90 */
91 first_index = XFS_INO_TO_AGINO(mp, ip->i_ino + 1);
92 if (first_index < XFS_INO_TO_AGINO(mp, ip->i_ino)) {
93 read_unlock(&pag->pag_ici_lock);
94 break;
95 }
96 89
97 /* nothing to sync during shutdown */ 90unlock:
98 if (XFS_FORCED_SHUTDOWN(mp)) { 91 read_unlock(&pag->pag_ici_lock);
99 read_unlock(&pag->pag_ici_lock); 92 return NULL;
100 return 0; 93}
101 }
102 94
103 /* 95STATIC int
104 * If we can't get a reference on the inode, it must be 96xfs_inode_ag_walk(
105 * in reclaim. Leave it for the reclaim code to flush. 97 struct xfs_mount *mp,
106 */ 98 xfs_agnumber_t ag,
107 inode = VFS_I(ip); 99 int (*execute)(struct xfs_inode *ip,
108 if (!igrab(inode)) { 100 struct xfs_perag *pag, int flags),
109 read_unlock(&pag->pag_ici_lock); 101 int flags,
110 continue; 102 int tag)
111 } 103{
112 read_unlock(&pag->pag_ici_lock); 104 struct xfs_perag *pag = &mp->m_perag[ag];
105 uint32_t first_index;
106 int last_error = 0;
107 int skipped;
113 108
114 /* avoid new or bad inodes */ 109restart:
115 if (is_bad_inode(inode) || 110 skipped = 0;
116 xfs_iflags_test(ip, XFS_INEW)) { 111 first_index = 0;
117 IRELE(ip); 112 do {
118 continue; 113 int error = 0;
119 } 114 xfs_inode_t *ip;
120 115
121 /* 116 ip = xfs_inode_ag_lookup(mp, pag, &first_index, tag);
122 * If we have to flush data or wait for I/O completion 117 if (!ip)
123 * we need to hold the iolock. 118 break;
124 */
125 if (flags & SYNC_DELWRI) {
126 if (VN_DIRTY(inode)) {
127 if (flags & SYNC_TRYLOCK) {
128 if (xfs_ilock_nowait(ip, XFS_IOLOCK_SHARED))
129 lock_flags |= XFS_IOLOCK_SHARED;
130 } else {
131 xfs_ilock(ip, XFS_IOLOCK_SHARED);
132 lock_flags |= XFS_IOLOCK_SHARED;
133 }
134 if (lock_flags & XFS_IOLOCK_SHARED) {
135 error = xfs_flush_pages(ip, 0, -1,
136 (flags & SYNC_WAIT) ? 0
137 : XFS_B_ASYNC,
138 FI_NONE);
139 }
140 }
141 if (VN_CACHED(inode) && (flags & SYNC_IOWAIT))
142 xfs_ioend_wait(ip);
143 }
144 xfs_ilock(ip, XFS_ILOCK_SHARED);
145
146 if ((flags & SYNC_ATTR) && !xfs_inode_clean(ip)) {
147 if (flags & SYNC_WAIT) {
148 xfs_iflock(ip);
149 if (!xfs_inode_clean(ip))
150 error = xfs_iflush(ip, XFS_IFLUSH_SYNC);
151 else
152 xfs_ifunlock(ip);
153 } else if (xfs_iflock_nowait(ip)) {
154 if (!xfs_inode_clean(ip))
155 error = xfs_iflush(ip, XFS_IFLUSH_DELWRI);
156 else
157 xfs_ifunlock(ip);
158 }
159 }
160 xfs_iput(ip, lock_flags);
161 119
120 error = execute(ip, pag, flags);
121 if (error == EAGAIN) {
122 skipped++;
123 continue;
124 }
162 if (error) 125 if (error)
163 last_error = error; 126 last_error = error;
164 /* 127 /*
165 * bail out if the filesystem is corrupted. 128 * bail out if the filesystem is corrupted.
166 */ 129 */
167 if (error == EFSCORRUPTED) 130 if (error == EFSCORRUPTED)
168 return XFS_ERROR(error); 131 break;
169 132
170 } while (nr_found); 133 } while (1);
134
135 if (skipped) {
136 delay(1);
137 goto restart;
138 }
171 139
140 xfs_put_perag(mp, pag);
172 return last_error; 141 return last_error;
173} 142}
174 143
175int 144int
176xfs_sync_inodes( 145xfs_inode_ag_iterator(
177 xfs_mount_t *mp, 146 struct xfs_mount *mp,
178 int flags) 147 int (*execute)(struct xfs_inode *ip,
148 struct xfs_perag *pag, int flags),
149 int flags,
150 int tag)
179{ 151{
180 int error; 152 int error = 0;
181 int last_error; 153 int last_error = 0;
182 int i; 154 xfs_agnumber_t ag;
183 int lflags = XFS_LOG_FORCE;
184 155
185 if (mp->m_flags & XFS_MOUNT_RDONLY) 156 for (ag = 0; ag < mp->m_sb.sb_agcount; ag++) {
186 return 0; 157 if (!mp->m_perag[ag].pag_ici_init)
187 error = 0; 158 continue;
188 last_error = 0; 159 error = xfs_inode_ag_walk(mp, ag, execute, flags, tag);
160 if (error) {
161 last_error = error;
162 if (error == EFSCORRUPTED)
163 break;
164 }
165 }
166 return XFS_ERROR(last_error);
167}
168
169/* must be called with pag_ici_lock held and releases it */
170int
171xfs_sync_inode_valid(
172 struct xfs_inode *ip,
173 struct xfs_perag *pag)
174{
175 struct inode *inode = VFS_I(ip);
176
177 /* nothing to sync during shutdown */
178 if (XFS_FORCED_SHUTDOWN(ip->i_mount)) {
179 read_unlock(&pag->pag_ici_lock);
180 return EFSCORRUPTED;
181 }
182
183 /*
184 * If we can't get a reference on the inode, it must be in reclaim.
185 * Leave it for the reclaim code to flush. Also avoid inodes that
186 * haven't been fully initialised.
187 */
188 if (!igrab(inode)) {
189 read_unlock(&pag->pag_ici_lock);
190 return ENOENT;
191 }
192 read_unlock(&pag->pag_ici_lock);
193
194 if (is_bad_inode(inode) || xfs_iflags_test(ip, XFS_INEW)) {
195 IRELE(ip);
196 return ENOENT;
197 }
198
199 return 0;
200}
201
202STATIC int
203xfs_sync_inode_data(
204 struct xfs_inode *ip,
205 struct xfs_perag *pag,
206 int flags)
207{
208 struct inode *inode = VFS_I(ip);
209 struct address_space *mapping = inode->i_mapping;
210 int error = 0;
211
212 error = xfs_sync_inode_valid(ip, pag);
213 if (error)
214 return error;
215
216 if (!mapping_tagged(mapping, PAGECACHE_TAG_DIRTY))
217 goto out_wait;
218
219 if (!xfs_ilock_nowait(ip, XFS_IOLOCK_SHARED)) {
220 if (flags & SYNC_TRYLOCK)
221 goto out_wait;
222 xfs_ilock(ip, XFS_IOLOCK_SHARED);
223 }
224
225 error = xfs_flush_pages(ip, 0, -1, (flags & SYNC_WAIT) ?
226 0 : XFS_B_ASYNC, FI_NONE);
227 xfs_iunlock(ip, XFS_IOLOCK_SHARED);
189 228
229 out_wait:
190 if (flags & SYNC_WAIT) 230 if (flags & SYNC_WAIT)
191 lflags |= XFS_LOG_SYNC; 231 xfs_ioend_wait(ip);
232 IRELE(ip);
233 return error;
234}
192 235
193 for (i = 0; i < mp->m_sb.sb_agcount; i++) { 236STATIC int
194 if (!mp->m_perag[i].pag_ici_init) 237xfs_sync_inode_attr(
195 continue; 238 struct xfs_inode *ip,
196 error = xfs_sync_inodes_ag(mp, i, flags); 239 struct xfs_perag *pag,
197 if (error) 240 int flags)
198 last_error = error; 241{
199 if (error == EFSCORRUPTED) 242 int error = 0;
200 break; 243
244 error = xfs_sync_inode_valid(ip, pag);
245 if (error)
246 return error;
247
248 xfs_ilock(ip, XFS_ILOCK_SHARED);
249 if (xfs_inode_clean(ip))
250 goto out_unlock;
251 if (!xfs_iflock_nowait(ip)) {
252 if (!(flags & SYNC_WAIT))
253 goto out_unlock;
254 xfs_iflock(ip);
201 } 255 }
202 if (flags & SYNC_DELWRI)
203 xfs_log_force(mp, 0, lflags);
204 256
205 return XFS_ERROR(last_error); 257 if (xfs_inode_clean(ip)) {
258 xfs_ifunlock(ip);
259 goto out_unlock;
260 }
261
262 error = xfs_iflush(ip, (flags & SYNC_WAIT) ?
263 XFS_IFLUSH_SYNC : XFS_IFLUSH_DELWRI);
264
265 out_unlock:
266 xfs_iunlock(ip, XFS_ILOCK_SHARED);
267 IRELE(ip);
268 return error;
269}
270
271/*
272 * Write out pagecache data for the whole filesystem.
273 */
274int
275xfs_sync_data(
276 struct xfs_mount *mp,
277 int flags)
278{
279 int error;
280
281 ASSERT((flags & ~(SYNC_TRYLOCK|SYNC_WAIT)) == 0);
282
283 error = xfs_inode_ag_iterator(mp, xfs_sync_inode_data, flags,
284 XFS_ICI_NO_TAG);
285 if (error)
286 return XFS_ERROR(error);
287
288 xfs_log_force(mp, 0,
289 (flags & SYNC_WAIT) ?
290 XFS_LOG_FORCE | XFS_LOG_SYNC :
291 XFS_LOG_FORCE);
292 return 0;
293}
294
295/*
296 * Write out inode metadata (attributes) for the whole filesystem.
297 */
298int
299xfs_sync_attr(
300 struct xfs_mount *mp,
301 int flags)
302{
303 ASSERT((flags & ~SYNC_WAIT) == 0);
304
305 return xfs_inode_ag_iterator(mp, xfs_sync_inode_attr, flags,
306 XFS_ICI_NO_TAG);
206} 307}
207 308
208STATIC int 309STATIC int
@@ -252,7 +353,7 @@ xfs_sync_fsdata(
252 * If this is xfssyncd() then only sync the superblock if we can 353 * If this is xfssyncd() then only sync the superblock if we can
253 * lock it without sleeping and it is not pinned. 354 * lock it without sleeping and it is not pinned.
254 */ 355 */
255 if (flags & SYNC_BDFLUSH) { 356 if (flags & SYNC_TRYLOCK) {
256 ASSERT(!(flags & SYNC_WAIT)); 357 ASSERT(!(flags & SYNC_WAIT));
257 358
258 bp = xfs_getsb(mp, XFS_BUF_TRYLOCK); 359 bp = xfs_getsb(mp, XFS_BUF_TRYLOCK);
@@ -316,13 +417,13 @@ xfs_quiesce_data(
316 int error; 417 int error;
317 418
318 /* push non-blocking */ 419 /* push non-blocking */
319 xfs_sync_inodes(mp, SYNC_DELWRI|SYNC_BDFLUSH); 420 xfs_sync_data(mp, 0);
320 XFS_QM_DQSYNC(mp, SYNC_BDFLUSH); 421 xfs_qm_sync(mp, SYNC_TRYLOCK);
321 xfs_filestream_flush(mp); 422 xfs_filestream_flush(mp);
322 423
323 /* push and block */ 424 /* push and block */
324 xfs_sync_inodes(mp, SYNC_DELWRI|SYNC_WAIT|SYNC_IOWAIT); 425 xfs_sync_data(mp, SYNC_WAIT);
325 XFS_QM_DQSYNC(mp, SYNC_WAIT); 426 xfs_qm_sync(mp, SYNC_WAIT);
326 427
327 /* write superblock and hoover up shutdown errors */ 428 /* write superblock and hoover up shutdown errors */
328 error = xfs_sync_fsdata(mp, 0); 429 error = xfs_sync_fsdata(mp, 0);
@@ -341,7 +442,7 @@ xfs_quiesce_fs(
341 int count = 0, pincount; 442 int count = 0, pincount;
342 443
343 xfs_flush_buftarg(mp->m_ddev_targp, 0); 444 xfs_flush_buftarg(mp->m_ddev_targp, 0);
344 xfs_reclaim_inodes(mp, 0, XFS_IFLUSH_DELWRI_ELSE_ASYNC); 445 xfs_reclaim_inodes(mp, XFS_IFLUSH_DELWRI_ELSE_ASYNC);
345 446
346 /* 447 /*
347 * This loop must run at least twice. The first instance of the loop 448 * This loop must run at least twice. The first instance of the loop
@@ -350,7 +451,7 @@ xfs_quiesce_fs(
350 * logged before we can write the unmount record. 451 * logged before we can write the unmount record.
351 */ 452 */
352 do { 453 do {
353 xfs_sync_inodes(mp, SYNC_ATTR|SYNC_WAIT); 454 xfs_sync_attr(mp, SYNC_WAIT);
354 pincount = xfs_flush_buftarg(mp->m_ddev_targp, 1); 455 pincount = xfs_flush_buftarg(mp->m_ddev_targp, 1);
355 if (!pincount) { 456 if (!pincount) {
356 delay(50); 457 delay(50);
@@ -433,8 +534,8 @@ xfs_flush_inodes_work(
433 void *arg) 534 void *arg)
434{ 535{
435 struct inode *inode = arg; 536 struct inode *inode = arg;
436 xfs_sync_inodes(mp, SYNC_DELWRI | SYNC_TRYLOCK); 537 xfs_sync_data(mp, SYNC_TRYLOCK);
437 xfs_sync_inodes(mp, SYNC_DELWRI | SYNC_TRYLOCK | SYNC_IOWAIT); 538 xfs_sync_data(mp, SYNC_TRYLOCK | SYNC_WAIT);
438 iput(inode); 539 iput(inode);
439} 540}
440 541
@@ -465,10 +566,10 @@ xfs_sync_worker(
465 566
466 if (!(mp->m_flags & XFS_MOUNT_RDONLY)) { 567 if (!(mp->m_flags & XFS_MOUNT_RDONLY)) {
467 xfs_log_force(mp, (xfs_lsn_t)0, XFS_LOG_FORCE); 568 xfs_log_force(mp, (xfs_lsn_t)0, XFS_LOG_FORCE);
468 xfs_reclaim_inodes(mp, 0, XFS_IFLUSH_DELWRI_ELSE_ASYNC); 569 xfs_reclaim_inodes(mp, XFS_IFLUSH_DELWRI_ELSE_ASYNC);
469 /* dgc: errors ignored here */ 570 /* dgc: errors ignored here */
470 error = XFS_QM_DQSYNC(mp, SYNC_BDFLUSH); 571 error = xfs_qm_sync(mp, SYNC_TRYLOCK);
471 error = xfs_sync_fsdata(mp, SYNC_BDFLUSH); 572 error = xfs_sync_fsdata(mp, SYNC_TRYLOCK);
472 if (xfs_log_need_covered(mp)) 573 if (xfs_log_need_covered(mp))
473 error = xfs_commit_dummy_trans(mp, XFS_LOG_FORCE); 574 error = xfs_commit_dummy_trans(mp, XFS_LOG_FORCE);
474 } 575 }
@@ -569,7 +670,7 @@ xfs_reclaim_inode(
569 xfs_ifunlock(ip); 670 xfs_ifunlock(ip);
570 xfs_iunlock(ip, XFS_ILOCK_EXCL); 671 xfs_iunlock(ip, XFS_ILOCK_EXCL);
571 } 672 }
572 return 1; 673 return -EAGAIN;
573 } 674 }
574 __xfs_iflags_set(ip, XFS_IRECLAIM); 675 __xfs_iflags_set(ip, XFS_IRECLAIM);
575 spin_unlock(&ip->i_flags_lock); 676 spin_unlock(&ip->i_flags_lock);
@@ -654,101 +755,27 @@ xfs_inode_clear_reclaim_tag(
654 xfs_put_perag(mp, pag); 755 xfs_put_perag(mp, pag);
655} 756}
656 757
657 758STATIC int
658STATIC void 759xfs_reclaim_inode_now(
659xfs_reclaim_inodes_ag( 760 struct xfs_inode *ip,
660 xfs_mount_t *mp, 761 struct xfs_perag *pag,
661 int ag, 762 int flags)
662 int noblock,
663 int mode)
664{ 763{
665 xfs_inode_t *ip = NULL; 764 /* ignore if already under reclaim */
666 xfs_perag_t *pag = &mp->m_perag[ag]; 765 if (xfs_iflags_test(ip, XFS_IRECLAIM)) {
667 int nr_found;
668 uint32_t first_index;
669 int skipped;
670
671restart:
672 first_index = 0;
673 skipped = 0;
674 do {
675 /*
676 * use a gang lookup to find the next inode in the tree
677 * as the tree is sparse and a gang lookup walks to find
678 * the number of objects requested.
679 */
680 read_lock(&pag->pag_ici_lock);
681 nr_found = radix_tree_gang_lookup_tag(&pag->pag_ici_root,
682 (void**)&ip, first_index, 1,
683 XFS_ICI_RECLAIM_TAG);
684
685 if (!nr_found) {
686 read_unlock(&pag->pag_ici_lock);
687 break;
688 }
689
690 /*
691 * Update the index for the next lookup. Catch overflows
692 * into the next AG range which can occur if we have inodes
693 * in the last block of the AG and we are currently
694 * pointing to the last inode.
695 */
696 first_index = XFS_INO_TO_AGINO(mp, ip->i_ino + 1);
697 if (first_index < XFS_INO_TO_AGINO(mp, ip->i_ino)) {
698 read_unlock(&pag->pag_ici_lock);
699 break;
700 }
701
702 /* ignore if already under reclaim */
703 if (xfs_iflags_test(ip, XFS_IRECLAIM)) {
704 read_unlock(&pag->pag_ici_lock);
705 continue;
706 }
707
708 if (noblock) {
709 if (!xfs_ilock_nowait(ip, XFS_ILOCK_EXCL)) {
710 read_unlock(&pag->pag_ici_lock);
711 continue;
712 }
713 if (xfs_ipincount(ip) ||
714 !xfs_iflock_nowait(ip)) {
715 xfs_iunlock(ip, XFS_ILOCK_EXCL);
716 read_unlock(&pag->pag_ici_lock);
717 continue;
718 }
719 }
720 read_unlock(&pag->pag_ici_lock); 766 read_unlock(&pag->pag_ici_lock);
721 767 return 0;
722 /*
723 * hmmm - this is an inode already in reclaim. Do
724 * we even bother catching it here?
725 */
726 if (xfs_reclaim_inode(ip, noblock, mode))
727 skipped++;
728 } while (nr_found);
729
730 if (skipped) {
731 delay(1);
732 goto restart;
733 } 768 }
734 return; 769 read_unlock(&pag->pag_ici_lock);
735 770
771 return xfs_reclaim_inode(ip, 0, flags);
736} 772}
737 773
738int 774int
739xfs_reclaim_inodes( 775xfs_reclaim_inodes(
740 xfs_mount_t *mp, 776 xfs_mount_t *mp,
741 int noblock,
742 int mode) 777 int mode)
743{ 778{
744 int i; 779 return xfs_inode_ag_iterator(mp, xfs_reclaim_inode_now, mode,
745 780 XFS_ICI_RECLAIM_TAG);
746 for (i = 0; i < mp->m_sb.sb_agcount; i++) {
747 if (!mp->m_perag[i].pag_ici_init)
748 continue;
749 xfs_reclaim_inodes_ag(mp, i, noblock, mode);
750 }
751 return 0;
752} 781}
753
754
diff --git a/fs/xfs/linux-2.6/xfs_sync.h b/fs/xfs/linux-2.6/xfs_sync.h
index 308d5bf6dfbd..2a10301c99c7 100644
--- a/fs/xfs/linux-2.6/xfs_sync.h
+++ b/fs/xfs/linux-2.6/xfs_sync.h
@@ -29,17 +29,14 @@ typedef struct xfs_sync_work {
29 struct completion *w_completion; 29 struct completion *w_completion;
30} xfs_sync_work_t; 30} xfs_sync_work_t;
31 31
32#define SYNC_ATTR 0x0001 /* sync attributes */ 32#define SYNC_WAIT 0x0001 /* wait for i/o to complete */
33#define SYNC_DELWRI 0x0002 /* look at delayed writes */ 33#define SYNC_TRYLOCK 0x0002 /* only try to lock inodes */
34#define SYNC_WAIT 0x0004 /* wait for i/o to complete */
35#define SYNC_BDFLUSH 0x0008 /* BDFLUSH is calling -- don't block */
36#define SYNC_IOWAIT 0x0010 /* wait for all I/O to complete */
37#define SYNC_TRYLOCK 0x0020 /* only try to lock inodes */
38 34
39int xfs_syncd_init(struct xfs_mount *mp); 35int xfs_syncd_init(struct xfs_mount *mp);
40void xfs_syncd_stop(struct xfs_mount *mp); 36void xfs_syncd_stop(struct xfs_mount *mp);
41 37
42int xfs_sync_inodes(struct xfs_mount *mp, int flags); 38int xfs_sync_attr(struct xfs_mount *mp, int flags);
39int xfs_sync_data(struct xfs_mount *mp, int flags);
43int xfs_sync_fsdata(struct xfs_mount *mp, int flags); 40int xfs_sync_fsdata(struct xfs_mount *mp, int flags);
44 41
45int xfs_quiesce_data(struct xfs_mount *mp); 42int xfs_quiesce_data(struct xfs_mount *mp);
@@ -48,10 +45,16 @@ void xfs_quiesce_attr(struct xfs_mount *mp);
48void xfs_flush_inodes(struct xfs_inode *ip); 45void xfs_flush_inodes(struct xfs_inode *ip);
49 46
50int xfs_reclaim_inode(struct xfs_inode *ip, int locked, int sync_mode); 47int xfs_reclaim_inode(struct xfs_inode *ip, int locked, int sync_mode);
51int xfs_reclaim_inodes(struct xfs_mount *mp, int noblock, int mode); 48int xfs_reclaim_inodes(struct xfs_mount *mp, int mode);
52 49
53void xfs_inode_set_reclaim_tag(struct xfs_inode *ip); 50void xfs_inode_set_reclaim_tag(struct xfs_inode *ip);
54void xfs_inode_clear_reclaim_tag(struct xfs_inode *ip); 51void xfs_inode_clear_reclaim_tag(struct xfs_inode *ip);
55void __xfs_inode_clear_reclaim_tag(struct xfs_mount *mp, struct xfs_perag *pag, 52void __xfs_inode_clear_reclaim_tag(struct xfs_mount *mp, struct xfs_perag *pag,
56 struct xfs_inode *ip); 53 struct xfs_inode *ip);
54
55int xfs_sync_inode_valid(struct xfs_inode *ip, struct xfs_perag *pag);
56int xfs_inode_ag_iterator(struct xfs_mount *mp,
57 int (*execute)(struct xfs_inode *ip, struct xfs_perag *pag, int flags),
58 int flags, int tag);
59
57#endif 60#endif
diff --git a/fs/xfs/linux-2.6/xfs_xattr.c b/fs/xfs/linux-2.6/xfs_xattr.c
index 964621fde6ed..497c7fb75cc1 100644
--- a/fs/xfs/linux-2.6/xfs_xattr.c
+++ b/fs/xfs/linux-2.6/xfs_xattr.c
@@ -29,67 +29,6 @@
29#include <linux/xattr.h> 29#include <linux/xattr.h>
30 30
31 31
32/*
33 * ACL handling. Should eventually be moved into xfs_acl.c
34 */
35
36static int
37xfs_decode_acl(const char *name)
38{
39 if (strcmp(name, "posix_acl_access") == 0)
40 return _ACL_TYPE_ACCESS;
41 else if (strcmp(name, "posix_acl_default") == 0)
42 return _ACL_TYPE_DEFAULT;
43 return -EINVAL;
44}
45
46/*
47 * Get system extended attributes which at the moment only
48 * includes Posix ACLs.
49 */
50static int
51xfs_xattr_system_get(struct inode *inode, const char *name,
52 void *buffer, size_t size)
53{
54 int acl;
55
56 acl = xfs_decode_acl(name);
57 if (acl < 0)
58 return acl;
59
60 return xfs_acl_vget(inode, buffer, size, acl);
61}
62
63static int
64xfs_xattr_system_set(struct inode *inode, const char *name,
65 const void *value, size_t size, int flags)
66{
67 int acl;
68
69 acl = xfs_decode_acl(name);
70 if (acl < 0)
71 return acl;
72 if (flags & XATTR_CREATE)
73 return -EINVAL;
74
75 if (!value)
76 return xfs_acl_vremove(inode, acl);
77
78 return xfs_acl_vset(inode, (void *)value, size, acl);
79}
80
81static struct xattr_handler xfs_xattr_system_handler = {
82 .prefix = XATTR_SYSTEM_PREFIX,
83 .get = xfs_xattr_system_get,
84 .set = xfs_xattr_system_set,
85};
86
87
88/*
89 * Real xattr handling. The only difference between the namespaces is
90 * a flag passed to the low-level attr code.
91 */
92
93static int 32static int
94__xfs_xattr_get(struct inode *inode, const char *name, 33__xfs_xattr_get(struct inode *inode, const char *name,
95 void *value, size_t size, int xflags) 34 void *value, size_t size, int xflags)
@@ -199,7 +138,9 @@ struct xattr_handler *xfs_xattr_handlers[] = {
199 &xfs_xattr_user_handler, 138 &xfs_xattr_user_handler,
200 &xfs_xattr_trusted_handler, 139 &xfs_xattr_trusted_handler,
201 &xfs_xattr_security_handler, 140 &xfs_xattr_security_handler,
141#ifdef CONFIG_XFS_POSIX_ACL
202 &xfs_xattr_system_handler, 142 &xfs_xattr_system_handler,
143#endif
203 NULL 144 NULL
204}; 145};
205 146
@@ -310,7 +251,7 @@ xfs_vn_listxattr(struct dentry *dentry, char *data, size_t size)
310 /* 251 /*
311 * Then add the two synthetic ACL attributes. 252 * Then add the two synthetic ACL attributes.
312 */ 253 */
313 if (xfs_acl_vhasacl_access(inode)) { 254 if (posix_acl_access_exists(inode)) {
314 error = list_one_attr(POSIX_ACL_XATTR_ACCESS, 255 error = list_one_attr(POSIX_ACL_XATTR_ACCESS,
315 strlen(POSIX_ACL_XATTR_ACCESS) + 1, 256 strlen(POSIX_ACL_XATTR_ACCESS) + 1,
316 data, size, &context.count); 257 data, size, &context.count);
@@ -318,7 +259,7 @@ xfs_vn_listxattr(struct dentry *dentry, char *data, size_t size)
318 return error; 259 return error;
319 } 260 }
320 261
321 if (xfs_acl_vhasacl_default(inode)) { 262 if (posix_acl_default_exists(inode)) {
322 error = list_one_attr(POSIX_ACL_XATTR_DEFAULT, 263 error = list_one_attr(POSIX_ACL_XATTR_DEFAULT,
323 strlen(POSIX_ACL_XATTR_DEFAULT) + 1, 264 strlen(POSIX_ACL_XATTR_DEFAULT) + 1,
324 data, size, &context.count); 265 data, size, &context.count);
diff --git a/fs/xfs/quota/xfs_dquot.c b/fs/xfs/quota/xfs_dquot.c
index e4babcc63423..2f3f2229eaaf 100644
--- a/fs/xfs/quota/xfs_dquot.c
+++ b/fs/xfs/quota/xfs_dquot.c
@@ -42,7 +42,6 @@
42#include "xfs_error.h" 42#include "xfs_error.h"
43#include "xfs_itable.h" 43#include "xfs_itable.h"
44#include "xfs_rw.h" 44#include "xfs_rw.h"
45#include "xfs_acl.h"
46#include "xfs_attr.h" 45#include "xfs_attr.h"
47#include "xfs_buf_item.h" 46#include "xfs_buf_item.h"
48#include "xfs_trans_space.h" 47#include "xfs_trans_space.h"
@@ -1194,7 +1193,9 @@ void
1194xfs_qm_dqrele( 1193xfs_qm_dqrele(
1195 xfs_dquot_t *dqp) 1194 xfs_dquot_t *dqp)
1196{ 1195{
1197 ASSERT(dqp); 1196 if (!dqp)
1197 return;
1198
1198 xfs_dqtrace_entry(dqp, "DQRELE"); 1199 xfs_dqtrace_entry(dqp, "DQRELE");
1199 1200
1200 xfs_dqlock(dqp); 1201 xfs_dqlock(dqp);
diff --git a/fs/xfs/quota/xfs_dquot.h b/fs/xfs/quota/xfs_dquot.h
index de0f402ddb4c..6533ead9b889 100644
--- a/fs/xfs/quota/xfs_dquot.h
+++ b/fs/xfs/quota/xfs_dquot.h
@@ -181,7 +181,6 @@ extern void xfs_qm_adjust_dqlimits(xfs_mount_t *,
181extern int xfs_qm_dqget(xfs_mount_t *, xfs_inode_t *, 181extern int xfs_qm_dqget(xfs_mount_t *, xfs_inode_t *,
182 xfs_dqid_t, uint, uint, xfs_dquot_t **); 182 xfs_dqid_t, uint, uint, xfs_dquot_t **);
183extern void xfs_qm_dqput(xfs_dquot_t *); 183extern void xfs_qm_dqput(xfs_dquot_t *);
184extern void xfs_qm_dqrele(xfs_dquot_t *);
185extern void xfs_dqlock(xfs_dquot_t *); 184extern void xfs_dqlock(xfs_dquot_t *);
186extern void xfs_dqlock2(xfs_dquot_t *, xfs_dquot_t *); 185extern void xfs_dqlock2(xfs_dquot_t *, xfs_dquot_t *);
187extern void xfs_dqunlock(xfs_dquot_t *); 186extern void xfs_dqunlock(xfs_dquot_t *);
diff --git a/fs/xfs/quota/xfs_dquot_item.c b/fs/xfs/quota/xfs_dquot_item.c
index 1728f6a7c4f5..d0d4a9a0bbd7 100644
--- a/fs/xfs/quota/xfs_dquot_item.c
+++ b/fs/xfs/quota/xfs_dquot_item.c
@@ -42,7 +42,6 @@
42#include "xfs_error.h" 42#include "xfs_error.h"
43#include "xfs_itable.h" 43#include "xfs_itable.h"
44#include "xfs_rw.h" 44#include "xfs_rw.h"
45#include "xfs_acl.h"
46#include "xfs_attr.h" 45#include "xfs_attr.h"
47#include "xfs_buf_item.h" 46#include "xfs_buf_item.h"
48#include "xfs_trans_priv.h" 47#include "xfs_trans_priv.h"
diff --git a/fs/xfs/quota/xfs_qm.c b/fs/xfs/quota/xfs_qm.c
index 5b6695049e00..45b1bfef7388 100644
--- a/fs/xfs/quota/xfs_qm.c
+++ b/fs/xfs/quota/xfs_qm.c
@@ -42,7 +42,6 @@
42#include "xfs_error.h" 42#include "xfs_error.h"
43#include "xfs_bmap.h" 43#include "xfs_bmap.h"
44#include "xfs_rw.h" 44#include "xfs_rw.h"
45#include "xfs_acl.h"
46#include "xfs_attr.h" 45#include "xfs_attr.h"
47#include "xfs_buf_item.h" 46#include "xfs_buf_item.h"
48#include "xfs_trans_space.h" 47#include "xfs_trans_space.h"
@@ -287,11 +286,13 @@ xfs_qm_rele_quotafs_ref(
287 * Just destroy the quotainfo structure. 286 * Just destroy the quotainfo structure.
288 */ 287 */
289void 288void
290xfs_qm_unmount_quotadestroy( 289xfs_qm_unmount(
291 xfs_mount_t *mp) 290 struct xfs_mount *mp)
292{ 291{
293 if (mp->m_quotainfo) 292 if (mp->m_quotainfo) {
293 xfs_qm_dqpurge_all(mp, XFS_QMOPT_QUOTALL | XFS_QMOPT_UMOUNTING);
294 xfs_qm_destroy_quotainfo(mp); 294 xfs_qm_destroy_quotainfo(mp);
295 }
295} 296}
296 297
297 298
@@ -385,8 +386,13 @@ xfs_qm_mount_quotas(
385 if (error) { 386 if (error) {
386 xfs_fs_cmn_err(CE_WARN, mp, 387 xfs_fs_cmn_err(CE_WARN, mp,
387 "Failed to initialize disk quotas."); 388 "Failed to initialize disk quotas.");
389 return;
388 } 390 }
389 return; 391
392#ifdef QUOTADEBUG
393 if (XFS_IS_QUOTA_ON(mp))
394 xfs_qm_internalqcheck(mp);
395#endif
390} 396}
391 397
392/* 398/*
@@ -774,12 +780,11 @@ xfs_qm_dqattach_grouphint(
774 * Given a locked inode, attach dquot(s) to it, taking U/G/P-QUOTAON 780 * Given a locked inode, attach dquot(s) to it, taking U/G/P-QUOTAON
775 * into account. 781 * into account.
776 * If XFS_QMOPT_DQALLOC, the dquot(s) will be allocated if needed. 782 * If XFS_QMOPT_DQALLOC, the dquot(s) will be allocated if needed.
777 * If XFS_QMOPT_ILOCKED, then inode sent is already locked EXCL.
778 * Inode may get unlocked and relocked in here, and the caller must deal with 783 * Inode may get unlocked and relocked in here, and the caller must deal with
779 * the consequences. 784 * the consequences.
780 */ 785 */
781int 786int
782xfs_qm_dqattach( 787xfs_qm_dqattach_locked(
783 xfs_inode_t *ip, 788 xfs_inode_t *ip,
784 uint flags) 789 uint flags)
785{ 790{
@@ -787,17 +792,14 @@ xfs_qm_dqattach(
787 uint nquotas = 0; 792 uint nquotas = 0;
788 int error = 0; 793 int error = 0;
789 794
790 if ((! XFS_IS_QUOTA_ON(mp)) || 795 if (!XFS_IS_QUOTA_RUNNING(mp) ||
791 (! XFS_NOT_DQATTACHED(mp, ip)) || 796 !XFS_IS_QUOTA_ON(mp) ||
792 (ip->i_ino == mp->m_sb.sb_uquotino) || 797 !XFS_NOT_DQATTACHED(mp, ip) ||
793 (ip->i_ino == mp->m_sb.sb_gquotino)) 798 ip->i_ino == mp->m_sb.sb_uquotino ||
799 ip->i_ino == mp->m_sb.sb_gquotino)
794 return 0; 800 return 0;
795 801
796 ASSERT((flags & XFS_QMOPT_ILOCKED) == 0 || 802 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
797 xfs_isilocked(ip, XFS_ILOCK_EXCL));
798
799 if (! (flags & XFS_QMOPT_ILOCKED))
800 xfs_ilock(ip, XFS_ILOCK_EXCL);
801 803
802 if (XFS_IS_UQUOTA_ON(mp)) { 804 if (XFS_IS_UQUOTA_ON(mp)) {
803 error = xfs_qm_dqattach_one(ip, ip->i_d.di_uid, XFS_DQ_USER, 805 error = xfs_qm_dqattach_one(ip, ip->i_d.di_uid, XFS_DQ_USER,
@@ -849,8 +851,7 @@ xfs_qm_dqattach(
849 xfs_qm_dqattach_grouphint(ip->i_udquot, ip->i_gdquot); 851 xfs_qm_dqattach_grouphint(ip->i_udquot, ip->i_gdquot);
850 } 852 }
851 853
852 done: 854 done:
853
854#ifdef QUOTADEBUG 855#ifdef QUOTADEBUG
855 if (! error) { 856 if (! error) {
856 if (XFS_IS_UQUOTA_ON(mp)) 857 if (XFS_IS_UQUOTA_ON(mp))
@@ -858,15 +859,22 @@ xfs_qm_dqattach(
858 if (XFS_IS_OQUOTA_ON(mp)) 859 if (XFS_IS_OQUOTA_ON(mp))
859 ASSERT(ip->i_gdquot); 860 ASSERT(ip->i_gdquot);
860 } 861 }
862 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
861#endif 863#endif
864 return error;
865}
862 866
863 if (! (flags & XFS_QMOPT_ILOCKED)) 867int
864 xfs_iunlock(ip, XFS_ILOCK_EXCL); 868xfs_qm_dqattach(
869 struct xfs_inode *ip,
870 uint flags)
871{
872 int error;
873
874 xfs_ilock(ip, XFS_ILOCK_EXCL);
875 error = xfs_qm_dqattach_locked(ip, flags);
876 xfs_iunlock(ip, XFS_ILOCK_EXCL);
865 877
866#ifdef QUOTADEBUG
867 else
868 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
869#endif
870 return error; 878 return error;
871} 879}
872 880
@@ -896,11 +904,6 @@ xfs_qm_dqdetach(
896 } 904 }
897} 905}
898 906
899/*
900 * This is called to sync quotas. We can be told to use non-blocking
901 * semantics by either the SYNC_BDFLUSH flag or the absence of the
902 * SYNC_WAIT flag.
903 */
904int 907int
905xfs_qm_sync( 908xfs_qm_sync(
906 xfs_mount_t *mp, 909 xfs_mount_t *mp,
@@ -909,17 +912,13 @@ xfs_qm_sync(
909 int recl, restarts; 912 int recl, restarts;
910 xfs_dquot_t *dqp; 913 xfs_dquot_t *dqp;
911 uint flush_flags; 914 uint flush_flags;
912 boolean_t nowait;
913 int error; 915 int error;
914 916
915 if (! XFS_IS_QUOTA_ON(mp)) 917 if (!XFS_IS_QUOTA_RUNNING(mp) || !XFS_IS_QUOTA_ON(mp))
916 return 0; 918 return 0;
917 919
920 flush_flags = (flags & SYNC_WAIT) ? XFS_QMOPT_SYNC : XFS_QMOPT_DELWRI;
918 restarts = 0; 921 restarts = 0;
919 /*
920 * We won't block unless we are asked to.
921 */
922 nowait = (boolean_t)(flags & SYNC_BDFLUSH || (flags & SYNC_WAIT) == 0);
923 922
924 again: 923 again:
925 xfs_qm_mplist_lock(mp); 924 xfs_qm_mplist_lock(mp);
@@ -939,18 +938,10 @@ xfs_qm_sync(
939 * don't 'seem' to be dirty. ie. don't acquire dqlock. 938 * don't 'seem' to be dirty. ie. don't acquire dqlock.
940 * This is very similar to what xfs_sync does with inodes. 939 * This is very similar to what xfs_sync does with inodes.
941 */ 940 */
942 if (flags & SYNC_BDFLUSH) { 941 if (flags & SYNC_TRYLOCK) {
943 if (! XFS_DQ_IS_DIRTY(dqp)) 942 if (!XFS_DQ_IS_DIRTY(dqp))
944 continue; 943 continue;
945 } 944 if (!xfs_qm_dqlock_nowait(dqp))
946
947 if (nowait) {
948 /*
949 * Try to acquire the dquot lock. We are NOT out of
950 * lock order, but we just don't want to wait for this
951 * lock, unless somebody wanted us to.
952 */
953 if (! xfs_qm_dqlock_nowait(dqp))
954 continue; 945 continue;
955 } else { 946 } else {
956 xfs_dqlock(dqp); 947 xfs_dqlock(dqp);
@@ -967,7 +958,7 @@ xfs_qm_sync(
967 /* XXX a sentinel would be better */ 958 /* XXX a sentinel would be better */
968 recl = XFS_QI_MPLRECLAIMS(mp); 959 recl = XFS_QI_MPLRECLAIMS(mp);
969 if (!xfs_dqflock_nowait(dqp)) { 960 if (!xfs_dqflock_nowait(dqp)) {
970 if (nowait) { 961 if (flags & SYNC_TRYLOCK) {
971 xfs_dqunlock(dqp); 962 xfs_dqunlock(dqp);
972 continue; 963 continue;
973 } 964 }
@@ -985,7 +976,6 @@ xfs_qm_sync(
985 * Let go of the mplist lock. We don't want to hold it 976 * Let go of the mplist lock. We don't want to hold it
986 * across a disk write 977 * across a disk write
987 */ 978 */
988 flush_flags = (nowait) ? XFS_QMOPT_DELWRI : XFS_QMOPT_SYNC;
989 xfs_qm_mplist_unlock(mp); 979 xfs_qm_mplist_unlock(mp);
990 xfs_dqtrace_entry(dqp, "XQM_SYNC: DQFLUSH"); 980 xfs_dqtrace_entry(dqp, "XQM_SYNC: DQFLUSH");
991 error = xfs_qm_dqflush(dqp, flush_flags); 981 error = xfs_qm_dqflush(dqp, flush_flags);
@@ -2319,20 +2309,20 @@ xfs_qm_write_sb_changes(
2319 */ 2309 */
2320int 2310int
2321xfs_qm_vop_dqalloc( 2311xfs_qm_vop_dqalloc(
2322 xfs_mount_t *mp, 2312 struct xfs_inode *ip,
2323 xfs_inode_t *ip, 2313 uid_t uid,
2324 uid_t uid, 2314 gid_t gid,
2325 gid_t gid, 2315 prid_t prid,
2326 prid_t prid, 2316 uint flags,
2327 uint flags, 2317 struct xfs_dquot **O_udqpp,
2328 xfs_dquot_t **O_udqpp, 2318 struct xfs_dquot **O_gdqpp)
2329 xfs_dquot_t **O_gdqpp)
2330{ 2319{
2331 int error; 2320 struct xfs_mount *mp = ip->i_mount;
2332 xfs_dquot_t *uq, *gq; 2321 struct xfs_dquot *uq, *gq;
2333 uint lockflags; 2322 int error;
2323 uint lockflags;
2334 2324
2335 if (!XFS_IS_QUOTA_ON(mp)) 2325 if (!XFS_IS_QUOTA_RUNNING(mp) || !XFS_IS_QUOTA_ON(mp))
2336 return 0; 2326 return 0;
2337 2327
2338 lockflags = XFS_ILOCK_EXCL; 2328 lockflags = XFS_ILOCK_EXCL;
@@ -2346,8 +2336,8 @@ xfs_qm_vop_dqalloc(
2346 * if necessary. The dquot(s) will not be locked. 2336 * if necessary. The dquot(s) will not be locked.
2347 */ 2337 */
2348 if (XFS_NOT_DQATTACHED(mp, ip)) { 2338 if (XFS_NOT_DQATTACHED(mp, ip)) {
2349 if ((error = xfs_qm_dqattach(ip, XFS_QMOPT_DQALLOC | 2339 error = xfs_qm_dqattach_locked(ip, XFS_QMOPT_DQALLOC);
2350 XFS_QMOPT_ILOCKED))) { 2340 if (error) {
2351 xfs_iunlock(ip, lockflags); 2341 xfs_iunlock(ip, lockflags);
2352 return error; 2342 return error;
2353 } 2343 }
@@ -2469,6 +2459,7 @@ xfs_qm_vop_chown(
2469 uint bfield = XFS_IS_REALTIME_INODE(ip) ? 2459 uint bfield = XFS_IS_REALTIME_INODE(ip) ?
2470 XFS_TRANS_DQ_RTBCOUNT : XFS_TRANS_DQ_BCOUNT; 2460 XFS_TRANS_DQ_RTBCOUNT : XFS_TRANS_DQ_BCOUNT;
2471 2461
2462
2472 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL)); 2463 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
2473 ASSERT(XFS_IS_QUOTA_RUNNING(ip->i_mount)); 2464 ASSERT(XFS_IS_QUOTA_RUNNING(ip->i_mount));
2474 2465
@@ -2508,13 +2499,13 @@ xfs_qm_vop_chown_reserve(
2508 xfs_dquot_t *gdqp, 2499 xfs_dquot_t *gdqp,
2509 uint flags) 2500 uint flags)
2510{ 2501{
2511 int error; 2502 xfs_mount_t *mp = ip->i_mount;
2512 xfs_mount_t *mp;
2513 uint delblks, blkflags, prjflags = 0; 2503 uint delblks, blkflags, prjflags = 0;
2514 xfs_dquot_t *unresudq, *unresgdq, *delblksudq, *delblksgdq; 2504 xfs_dquot_t *unresudq, *unresgdq, *delblksudq, *delblksgdq;
2505 int error;
2506
2515 2507
2516 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL|XFS_ILOCK_SHARED)); 2508 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL|XFS_ILOCK_SHARED));
2517 mp = ip->i_mount;
2518 ASSERT(XFS_IS_QUOTA_RUNNING(mp)); 2509 ASSERT(XFS_IS_QUOTA_RUNNING(mp));
2519 2510
2520 delblks = ip->i_delayed_blks; 2511 delblks = ip->i_delayed_blks;
@@ -2582,28 +2573,23 @@ xfs_qm_vop_chown_reserve(
2582 2573
2583int 2574int
2584xfs_qm_vop_rename_dqattach( 2575xfs_qm_vop_rename_dqattach(
2585 xfs_inode_t **i_tab) 2576 struct xfs_inode **i_tab)
2586{ 2577{
2587 xfs_inode_t *ip; 2578 struct xfs_mount *mp = i_tab[0]->i_mount;
2588 int i; 2579 int i;
2589 int error;
2590 2580
2591 ip = i_tab[0]; 2581 if (!XFS_IS_QUOTA_RUNNING(mp) || !XFS_IS_QUOTA_ON(mp))
2592
2593 if (! XFS_IS_QUOTA_ON(ip->i_mount))
2594 return 0; 2582 return 0;
2595 2583
2596 if (XFS_NOT_DQATTACHED(ip->i_mount, ip)) { 2584 for (i = 0; (i < 4 && i_tab[i]); i++) {
2597 error = xfs_qm_dqattach(ip, 0); 2585 struct xfs_inode *ip = i_tab[i];
2598 if (error) 2586 int error;
2599 return error; 2587
2600 }
2601 for (i = 1; (i < 4 && i_tab[i]); i++) {
2602 /* 2588 /*
2603 * Watch out for duplicate entries in the table. 2589 * Watch out for duplicate entries in the table.
2604 */ 2590 */
2605 if ((ip = i_tab[i]) != i_tab[i-1]) { 2591 if (i == 0 || ip != i_tab[i-1]) {
2606 if (XFS_NOT_DQATTACHED(ip->i_mount, ip)) { 2592 if (XFS_NOT_DQATTACHED(mp, ip)) {
2607 error = xfs_qm_dqattach(ip, 0); 2593 error = xfs_qm_dqattach(ip, 0);
2608 if (error) 2594 if (error)
2609 return error; 2595 return error;
@@ -2614,17 +2600,19 @@ xfs_qm_vop_rename_dqattach(
2614} 2600}
2615 2601
2616void 2602void
2617xfs_qm_vop_dqattach_and_dqmod_newinode( 2603xfs_qm_vop_create_dqattach(
2618 xfs_trans_t *tp, 2604 struct xfs_trans *tp,
2619 xfs_inode_t *ip, 2605 struct xfs_inode *ip,
2620 xfs_dquot_t *udqp, 2606 struct xfs_dquot *udqp,
2621 xfs_dquot_t *gdqp) 2607 struct xfs_dquot *gdqp)
2622{ 2608{
2623 if (!XFS_IS_QUOTA_ON(tp->t_mountp)) 2609 struct xfs_mount *mp = tp->t_mountp;
2610
2611 if (!XFS_IS_QUOTA_RUNNING(mp) || !XFS_IS_QUOTA_ON(mp))
2624 return; 2612 return;
2625 2613
2626 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL)); 2614 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
2627 ASSERT(XFS_IS_QUOTA_RUNNING(tp->t_mountp)); 2615 ASSERT(XFS_IS_QUOTA_RUNNING(mp));
2628 2616
2629 if (udqp) { 2617 if (udqp) {
2630 xfs_dqlock(udqp); 2618 xfs_dqlock(udqp);
@@ -2632,7 +2620,7 @@ xfs_qm_vop_dqattach_and_dqmod_newinode(
2632 xfs_dqunlock(udqp); 2620 xfs_dqunlock(udqp);
2633 ASSERT(ip->i_udquot == NULL); 2621 ASSERT(ip->i_udquot == NULL);
2634 ip->i_udquot = udqp; 2622 ip->i_udquot = udqp;
2635 ASSERT(XFS_IS_UQUOTA_ON(tp->t_mountp)); 2623 ASSERT(XFS_IS_UQUOTA_ON(mp));
2636 ASSERT(ip->i_d.di_uid == be32_to_cpu(udqp->q_core.d_id)); 2624 ASSERT(ip->i_d.di_uid == be32_to_cpu(udqp->q_core.d_id));
2637 xfs_trans_mod_dquot(tp, udqp, XFS_TRANS_DQ_ICOUNT, 1); 2625 xfs_trans_mod_dquot(tp, udqp, XFS_TRANS_DQ_ICOUNT, 1);
2638 } 2626 }
@@ -2642,8 +2630,8 @@ xfs_qm_vop_dqattach_and_dqmod_newinode(
2642 xfs_dqunlock(gdqp); 2630 xfs_dqunlock(gdqp);
2643 ASSERT(ip->i_gdquot == NULL); 2631 ASSERT(ip->i_gdquot == NULL);
2644 ip->i_gdquot = gdqp; 2632 ip->i_gdquot = gdqp;
2645 ASSERT(XFS_IS_OQUOTA_ON(tp->t_mountp)); 2633 ASSERT(XFS_IS_OQUOTA_ON(mp));
2646 ASSERT((XFS_IS_GQUOTA_ON(tp->t_mountp) ? 2634 ASSERT((XFS_IS_GQUOTA_ON(mp) ?
2647 ip->i_d.di_gid : ip->i_d.di_projid) == 2635 ip->i_d.di_gid : ip->i_d.di_projid) ==
2648 be32_to_cpu(gdqp->q_core.d_id)); 2636 be32_to_cpu(gdqp->q_core.d_id));
2649 xfs_trans_mod_dquot(tp, gdqp, XFS_TRANS_DQ_ICOUNT, 1); 2637 xfs_trans_mod_dquot(tp, gdqp, XFS_TRANS_DQ_ICOUNT, 1);
diff --git a/fs/xfs/quota/xfs_qm.h b/fs/xfs/quota/xfs_qm.h
index a371954cae1b..495564b8af38 100644
--- a/fs/xfs/quota/xfs_qm.h
+++ b/fs/xfs/quota/xfs_qm.h
@@ -127,8 +127,6 @@ typedef struct xfs_quotainfo {
127} xfs_quotainfo_t; 127} xfs_quotainfo_t;
128 128
129 129
130extern xfs_dqtrxops_t xfs_trans_dquot_ops;
131
132extern void xfs_trans_mod_dquot(xfs_trans_t *, xfs_dquot_t *, uint, long); 130extern void xfs_trans_mod_dquot(xfs_trans_t *, xfs_dquot_t *, uint, long);
133extern int xfs_trans_reserve_quota_bydquots(xfs_trans_t *, xfs_mount_t *, 131extern int xfs_trans_reserve_quota_bydquots(xfs_trans_t *, xfs_mount_t *,
134 xfs_dquot_t *, xfs_dquot_t *, long, long, uint); 132 xfs_dquot_t *, xfs_dquot_t *, long, long, uint);
@@ -159,17 +157,11 @@ typedef struct xfs_dquot_acct {
159#define XFS_QM_RTBWARNLIMIT 5 157#define XFS_QM_RTBWARNLIMIT 5
160 158
161extern void xfs_qm_destroy_quotainfo(xfs_mount_t *); 159extern void xfs_qm_destroy_quotainfo(xfs_mount_t *);
162extern void xfs_qm_mount_quotas(xfs_mount_t *);
163extern int xfs_qm_quotacheck(xfs_mount_t *); 160extern int xfs_qm_quotacheck(xfs_mount_t *);
164extern void xfs_qm_unmount_quotadestroy(xfs_mount_t *);
165extern void xfs_qm_unmount_quotas(xfs_mount_t *);
166extern int xfs_qm_write_sb_changes(xfs_mount_t *, __int64_t); 161extern int xfs_qm_write_sb_changes(xfs_mount_t *, __int64_t);
167extern int xfs_qm_sync(xfs_mount_t *, int);
168 162
169/* dquot stuff */ 163/* dquot stuff */
170extern boolean_t xfs_qm_dqalloc_incore(xfs_dquot_t **); 164extern boolean_t xfs_qm_dqalloc_incore(xfs_dquot_t **);
171extern int xfs_qm_dqattach(xfs_inode_t *, uint);
172extern void xfs_qm_dqdetach(xfs_inode_t *);
173extern int xfs_qm_dqpurge_all(xfs_mount_t *, uint); 165extern int xfs_qm_dqpurge_all(xfs_mount_t *, uint);
174extern void xfs_qm_dqrele_all_inodes(xfs_mount_t *, uint); 166extern void xfs_qm_dqrele_all_inodes(xfs_mount_t *, uint);
175 167
@@ -183,19 +175,6 @@ extern int xfs_qm_scall_getqstat(xfs_mount_t *, fs_quota_stat_t *);
183extern int xfs_qm_scall_quotaon(xfs_mount_t *, uint); 175extern int xfs_qm_scall_quotaon(xfs_mount_t *, uint);
184extern int xfs_qm_scall_quotaoff(xfs_mount_t *, uint); 176extern int xfs_qm_scall_quotaoff(xfs_mount_t *, uint);
185 177
186/* vop stuff */
187extern int xfs_qm_vop_dqalloc(xfs_mount_t *, xfs_inode_t *,
188 uid_t, gid_t, prid_t, uint,
189 xfs_dquot_t **, xfs_dquot_t **);
190extern void xfs_qm_vop_dqattach_and_dqmod_newinode(
191 xfs_trans_t *, xfs_inode_t *,
192 xfs_dquot_t *, xfs_dquot_t *);
193extern int xfs_qm_vop_rename_dqattach(xfs_inode_t **);
194extern xfs_dquot_t * xfs_qm_vop_chown(xfs_trans_t *, xfs_inode_t *,
195 xfs_dquot_t **, xfs_dquot_t *);
196extern int xfs_qm_vop_chown_reserve(xfs_trans_t *, xfs_inode_t *,
197 xfs_dquot_t *, xfs_dquot_t *, uint);
198
199/* list stuff */ 178/* list stuff */
200extern void xfs_qm_freelist_append(xfs_frlist_t *, xfs_dquot_t *); 179extern void xfs_qm_freelist_append(xfs_frlist_t *, xfs_dquot_t *);
201extern void xfs_qm_freelist_unlink(xfs_dquot_t *); 180extern void xfs_qm_freelist_unlink(xfs_dquot_t *);
diff --git a/fs/xfs/quota/xfs_qm_bhv.c b/fs/xfs/quota/xfs_qm_bhv.c
index 63037c689a4b..a5346630dfae 100644
--- a/fs/xfs/quota/xfs_qm_bhv.c
+++ b/fs/xfs/quota/xfs_qm_bhv.c
@@ -42,7 +42,6 @@
42#include "xfs_rtalloc.h" 42#include "xfs_rtalloc.h"
43#include "xfs_error.h" 43#include "xfs_error.h"
44#include "xfs_rw.h" 44#include "xfs_rw.h"
45#include "xfs_acl.h"
46#include "xfs_attr.h" 45#include "xfs_attr.h"
47#include "xfs_buf_item.h" 46#include "xfs_buf_item.h"
48#include "xfs_qm.h" 47#include "xfs_qm.h"
@@ -84,7 +83,7 @@ xfs_fill_statvfs_from_dquot(
84 * return a statvfs of the project, not the entire filesystem. 83 * return a statvfs of the project, not the entire filesystem.
85 * This makes such trees appear as if they are filesystems in themselves. 84 * This makes such trees appear as if they are filesystems in themselves.
86 */ 85 */
87STATIC void 86void
88xfs_qm_statvfs( 87xfs_qm_statvfs(
89 xfs_inode_t *ip, 88 xfs_inode_t *ip,
90 struct kstatfs *statp) 89 struct kstatfs *statp)
@@ -92,20 +91,13 @@ xfs_qm_statvfs(
92 xfs_mount_t *mp = ip->i_mount; 91 xfs_mount_t *mp = ip->i_mount;
93 xfs_dquot_t *dqp; 92 xfs_dquot_t *dqp;
94 93
95 if (!(ip->i_d.di_flags & XFS_DIFLAG_PROJINHERIT) ||
96 !((mp->m_qflags & (XFS_PQUOTA_ACCT|XFS_OQUOTA_ENFD))) ==
97 (XFS_PQUOTA_ACCT|XFS_OQUOTA_ENFD))
98 return;
99
100 if (!xfs_qm_dqget(mp, NULL, ip->i_d.di_projid, XFS_DQ_PROJ, 0, &dqp)) { 94 if (!xfs_qm_dqget(mp, NULL, ip->i_d.di_projid, XFS_DQ_PROJ, 0, &dqp)) {
101 xfs_disk_dquot_t *dp = &dqp->q_core; 95 xfs_fill_statvfs_from_dquot(statp, &dqp->q_core);
102
103 xfs_fill_statvfs_from_dquot(statp, dp);
104 xfs_qm_dqput(dqp); 96 xfs_qm_dqput(dqp);
105 } 97 }
106} 98}
107 99
108STATIC int 100int
109xfs_qm_newmount( 101xfs_qm_newmount(
110 xfs_mount_t *mp, 102 xfs_mount_t *mp,
111 uint *needquotamount, 103 uint *needquotamount,
@@ -114,9 +106,6 @@ xfs_qm_newmount(
114 uint quotaondisk; 106 uint quotaondisk;
115 uint uquotaondisk = 0, gquotaondisk = 0, pquotaondisk = 0; 107 uint uquotaondisk = 0, gquotaondisk = 0, pquotaondisk = 0;
116 108
117 *quotaflags = 0;
118 *needquotamount = B_FALSE;
119
120 quotaondisk = xfs_sb_version_hasquota(&mp->m_sb) && 109 quotaondisk = xfs_sb_version_hasquota(&mp->m_sb) &&
121 (mp->m_sb.sb_qflags & XFS_ALL_QUOTA_ACCT); 110 (mp->m_sb.sb_qflags & XFS_ALL_QUOTA_ACCT);
122 111
@@ -179,66 +168,6 @@ xfs_qm_newmount(
179 return 0; 168 return 0;
180} 169}
181 170
182STATIC int
183xfs_qm_endmount(
184 xfs_mount_t *mp,
185 uint needquotamount,
186 uint quotaflags)
187{
188 if (needquotamount) {
189 ASSERT(mp->m_qflags == 0);
190 mp->m_qflags = quotaflags;
191 xfs_qm_mount_quotas(mp);
192 }
193
194#if defined(DEBUG) && defined(XFS_LOUD_RECOVERY)
195 if (! (XFS_IS_QUOTA_ON(mp)))
196 xfs_fs_cmn_err(CE_NOTE, mp, "Disk quotas not turned on");
197 else
198 xfs_fs_cmn_err(CE_NOTE, mp, "Disk quotas turned on");
199#endif
200
201#ifdef QUOTADEBUG
202 if (XFS_IS_QUOTA_ON(mp) && xfs_qm_internalqcheck(mp))
203 cmn_err(CE_WARN, "XFS: mount internalqcheck failed");
204#endif
205
206 return 0;
207}
208
209STATIC void
210xfs_qm_dqrele_null(
211 xfs_dquot_t *dq)
212{
213 /*
214 * Called from XFS, where we always check first for a NULL dquot.
215 */
216 if (!dq)
217 return;
218 xfs_qm_dqrele(dq);
219}
220
221
222struct xfs_qmops xfs_qmcore_xfs = {
223 .xfs_qminit = xfs_qm_newmount,
224 .xfs_qmdone = xfs_qm_unmount_quotadestroy,
225 .xfs_qmmount = xfs_qm_endmount,
226 .xfs_qmunmount = xfs_qm_unmount_quotas,
227 .xfs_dqrele = xfs_qm_dqrele_null,
228 .xfs_dqattach = xfs_qm_dqattach,
229 .xfs_dqdetach = xfs_qm_dqdetach,
230 .xfs_dqpurgeall = xfs_qm_dqpurge_all,
231 .xfs_dqvopalloc = xfs_qm_vop_dqalloc,
232 .xfs_dqvopcreate = xfs_qm_vop_dqattach_and_dqmod_newinode,
233 .xfs_dqvoprename = xfs_qm_vop_rename_dqattach,
234 .xfs_dqvopchown = xfs_qm_vop_chown,
235 .xfs_dqvopchownresv = xfs_qm_vop_chown_reserve,
236 .xfs_dqstatvfs = xfs_qm_statvfs,
237 .xfs_dqsync = xfs_qm_sync,
238 .xfs_dqtrxops = &xfs_trans_dquot_ops,
239};
240EXPORT_SYMBOL(xfs_qmcore_xfs);
241
242void __init 171void __init
243xfs_qm_init(void) 172xfs_qm_init(void)
244{ 173{
diff --git a/fs/xfs/quota/xfs_qm_stats.c b/fs/xfs/quota/xfs_qm_stats.c
index 709f5f545cf5..21b08c0396a1 100644
--- a/fs/xfs/quota/xfs_qm_stats.c
+++ b/fs/xfs/quota/xfs_qm_stats.c
@@ -42,7 +42,6 @@
42#include "xfs_rtalloc.h" 42#include "xfs_rtalloc.h"
43#include "xfs_error.h" 43#include "xfs_error.h"
44#include "xfs_rw.h" 44#include "xfs_rw.h"
45#include "xfs_acl.h"
46#include "xfs_attr.h" 45#include "xfs_attr.h"
47#include "xfs_buf_item.h" 46#include "xfs_buf_item.h"
48#include "xfs_qm.h" 47#include "xfs_qm.h"
diff --git a/fs/xfs/quota/xfs_qm_syscalls.c b/fs/xfs/quota/xfs_qm_syscalls.c
index c7b66f6506ce..4e4276b956e8 100644
--- a/fs/xfs/quota/xfs_qm_syscalls.c
+++ b/fs/xfs/quota/xfs_qm_syscalls.c
@@ -45,7 +45,6 @@
45#include "xfs_rtalloc.h" 45#include "xfs_rtalloc.h"
46#include "xfs_error.h" 46#include "xfs_error.h"
47#include "xfs_rw.h" 47#include "xfs_rw.h"
48#include "xfs_acl.h"
49#include "xfs_attr.h" 48#include "xfs_attr.h"
50#include "xfs_buf_item.h" 49#include "xfs_buf_item.h"
51#include "xfs_utils.h" 50#include "xfs_utils.h"
@@ -847,105 +846,55 @@ xfs_qm_export_flags(
847} 846}
848 847
849 848
850/* 849STATIC int
851 * Release all the dquots on the inodes in an AG. 850xfs_dqrele_inode(
852 */ 851 struct xfs_inode *ip,
853STATIC void 852 struct xfs_perag *pag,
854xfs_qm_dqrele_inodes_ag( 853 int flags)
855 xfs_mount_t *mp,
856 int ag,
857 uint flags)
858{ 854{
859 xfs_inode_t *ip = NULL; 855 int error;
860 xfs_perag_t *pag = &mp->m_perag[ag];
861 int first_index = 0;
862 int nr_found;
863
864 do {
865 /*
866 * use a gang lookup to find the next inode in the tree
867 * as the tree is sparse and a gang lookup walks to find
868 * the number of objects requested.
869 */
870 read_lock(&pag->pag_ici_lock);
871 nr_found = radix_tree_gang_lookup(&pag->pag_ici_root,
872 (void**)&ip, first_index, 1);
873
874 if (!nr_found) {
875 read_unlock(&pag->pag_ici_lock);
876 break;
877 }
878
879 /*
880 * Update the index for the next lookup. Catch overflows
881 * into the next AG range which can occur if we have inodes
882 * in the last block of the AG and we are currently
883 * pointing to the last inode.
884 */
885 first_index = XFS_INO_TO_AGINO(mp, ip->i_ino + 1);
886 if (first_index < XFS_INO_TO_AGINO(mp, ip->i_ino)) {
887 read_unlock(&pag->pag_ici_lock);
888 break;
889 }
890
891 /* skip quota inodes */
892 if (ip == XFS_QI_UQIP(mp) || ip == XFS_QI_GQIP(mp)) {
893 ASSERT(ip->i_udquot == NULL);
894 ASSERT(ip->i_gdquot == NULL);
895 read_unlock(&pag->pag_ici_lock);
896 continue;
897 }
898 856
899 /* 857 /* skip quota inodes */
900 * If we can't get a reference on the inode, it must be 858 if (ip == XFS_QI_UQIP(ip->i_mount) || ip == XFS_QI_GQIP(ip->i_mount)) {
901 * in reclaim. Leave it for the reclaim code to flush. 859 ASSERT(ip->i_udquot == NULL);
902 */ 860 ASSERT(ip->i_gdquot == NULL);
903 if (!igrab(VFS_I(ip))) {
904 read_unlock(&pag->pag_ici_lock);
905 continue;
906 }
907 read_unlock(&pag->pag_ici_lock); 861 read_unlock(&pag->pag_ici_lock);
862 return 0;
863 }
908 864
909 /* avoid new inodes though we shouldn't find any here */ 865 error = xfs_sync_inode_valid(ip, pag);
910 if (xfs_iflags_test(ip, XFS_INEW)) { 866 if (error)
911 IRELE(ip); 867 return error;
912 continue;
913 }
914 868
915 xfs_ilock(ip, XFS_ILOCK_EXCL); 869 xfs_ilock(ip, XFS_ILOCK_EXCL);
916 if ((flags & XFS_UQUOTA_ACCT) && ip->i_udquot) { 870 if ((flags & XFS_UQUOTA_ACCT) && ip->i_udquot) {
917 xfs_qm_dqrele(ip->i_udquot); 871 xfs_qm_dqrele(ip->i_udquot);
918 ip->i_udquot = NULL; 872 ip->i_udquot = NULL;
919 } 873 }
920 if (flags & (XFS_PQUOTA_ACCT|XFS_GQUOTA_ACCT) && 874 if (flags & (XFS_PQUOTA_ACCT|XFS_GQUOTA_ACCT) && ip->i_gdquot) {
921 ip->i_gdquot) { 875 xfs_qm_dqrele(ip->i_gdquot);
922 xfs_qm_dqrele(ip->i_gdquot); 876 ip->i_gdquot = NULL;
923 ip->i_gdquot = NULL; 877 }
924 } 878 xfs_iput(ip, XFS_ILOCK_EXCL);
925 xfs_iput(ip, XFS_ILOCK_EXCL); 879 IRELE(ip);
926 880
927 } while (nr_found); 881 return 0;
928} 882}
929 883
884
930/* 885/*
931 * Go thru all the inodes in the file system, releasing their dquots. 886 * Go thru all the inodes in the file system, releasing their dquots.
887 *
932 * Note that the mount structure gets modified to indicate that quotas are off 888 * Note that the mount structure gets modified to indicate that quotas are off
933 * AFTER this, in the case of quotaoff. This also gets called from 889 * AFTER this, in the case of quotaoff.
934 * xfs_rootumount.
935 */ 890 */
936void 891void
937xfs_qm_dqrele_all_inodes( 892xfs_qm_dqrele_all_inodes(
938 struct xfs_mount *mp, 893 struct xfs_mount *mp,
939 uint flags) 894 uint flags)
940{ 895{
941 int i;
942
943 ASSERT(mp->m_quotainfo); 896 ASSERT(mp->m_quotainfo);
944 for (i = 0; i < mp->m_sb.sb_agcount; i++) { 897 xfs_inode_ag_iterator(mp, xfs_dqrele_inode, flags, XFS_ICI_NO_TAG);
945 if (!mp->m_perag[i].pag_ici_init)
946 continue;
947 xfs_qm_dqrele_inodes_ag(mp, i, flags);
948 }
949} 898}
950 899
951/*------------------------------------------------------------------------*/ 900/*------------------------------------------------------------------------*/
diff --git a/fs/xfs/quota/xfs_trans_dquot.c b/fs/xfs/quota/xfs_trans_dquot.c
index 447173bcf96d..97ac9640be98 100644
--- a/fs/xfs/quota/xfs_trans_dquot.c
+++ b/fs/xfs/quota/xfs_trans_dquot.c
@@ -42,7 +42,6 @@
42#include "xfs_rtalloc.h" 42#include "xfs_rtalloc.h"
43#include "xfs_error.h" 43#include "xfs_error.h"
44#include "xfs_rw.h" 44#include "xfs_rw.h"
45#include "xfs_acl.h"
46#include "xfs_attr.h" 45#include "xfs_attr.h"
47#include "xfs_buf_item.h" 46#include "xfs_buf_item.h"
48#include "xfs_trans_priv.h" 47#include "xfs_trans_priv.h"
@@ -111,7 +110,7 @@ xfs_trans_log_dquot(
111 * Carry forward whatever is left of the quota blk reservation to 110 * Carry forward whatever is left of the quota blk reservation to
112 * the spanky new transaction 111 * the spanky new transaction
113 */ 112 */
114STATIC void 113void
115xfs_trans_dup_dqinfo( 114xfs_trans_dup_dqinfo(
116 xfs_trans_t *otp, 115 xfs_trans_t *otp,
117 xfs_trans_t *ntp) 116 xfs_trans_t *ntp)
@@ -167,19 +166,17 @@ xfs_trans_dup_dqinfo(
167/* 166/*
168 * Wrap around mod_dquot to account for both user and group quotas. 167 * Wrap around mod_dquot to account for both user and group quotas.
169 */ 168 */
170STATIC void 169void
171xfs_trans_mod_dquot_byino( 170xfs_trans_mod_dquot_byino(
172 xfs_trans_t *tp, 171 xfs_trans_t *tp,
173 xfs_inode_t *ip, 172 xfs_inode_t *ip,
174 uint field, 173 uint field,
175 long delta) 174 long delta)
176{ 175{
177 xfs_mount_t *mp; 176 xfs_mount_t *mp = tp->t_mountp;
178
179 ASSERT(tp);
180 mp = tp->t_mountp;
181 177
182 if (!XFS_IS_QUOTA_ON(mp) || 178 if (!XFS_IS_QUOTA_RUNNING(mp) ||
179 !XFS_IS_QUOTA_ON(mp) ||
183 ip->i_ino == mp->m_sb.sb_uquotino || 180 ip->i_ino == mp->m_sb.sb_uquotino ||
184 ip->i_ino == mp->m_sb.sb_gquotino) 181 ip->i_ino == mp->m_sb.sb_gquotino)
185 return; 182 return;
@@ -229,6 +226,7 @@ xfs_trans_mod_dquot(
229 xfs_dqtrx_t *qtrx; 226 xfs_dqtrx_t *qtrx;
230 227
231 ASSERT(tp); 228 ASSERT(tp);
229 ASSERT(XFS_IS_QUOTA_RUNNING(tp->t_mountp));
232 qtrx = NULL; 230 qtrx = NULL;
233 231
234 if (tp->t_dqinfo == NULL) 232 if (tp->t_dqinfo == NULL)
@@ -346,7 +344,7 @@ xfs_trans_dqlockedjoin(
346 * Unreserve just the reservations done by this transaction. 344 * Unreserve just the reservations done by this transaction.
347 * dquot is still left locked at exit. 345 * dquot is still left locked at exit.
348 */ 346 */
349STATIC void 347void
350xfs_trans_apply_dquot_deltas( 348xfs_trans_apply_dquot_deltas(
351 xfs_trans_t *tp) 349 xfs_trans_t *tp)
352{ 350{
@@ -357,7 +355,7 @@ xfs_trans_apply_dquot_deltas(
357 long totalbdelta; 355 long totalbdelta;
358 long totalrtbdelta; 356 long totalrtbdelta;
359 357
360 if (! (tp->t_flags & XFS_TRANS_DQ_DIRTY)) 358 if (!(tp->t_flags & XFS_TRANS_DQ_DIRTY))
361 return; 359 return;
362 360
363 ASSERT(tp->t_dqinfo); 361 ASSERT(tp->t_dqinfo);
@@ -531,7 +529,7 @@ xfs_trans_apply_dquot_deltas(
531 * we simply throw those away, since that's the expected behavior 529 * we simply throw those away, since that's the expected behavior
532 * when a transaction is curtailed without a commit. 530 * when a transaction is curtailed without a commit.
533 */ 531 */
534STATIC void 532void
535xfs_trans_unreserve_and_mod_dquots( 533xfs_trans_unreserve_and_mod_dquots(
536 xfs_trans_t *tp) 534 xfs_trans_t *tp)
537{ 535{
@@ -768,7 +766,7 @@ xfs_trans_reserve_quota_bydquots(
768{ 766{
769 int resvd = 0, error; 767 int resvd = 0, error;
770 768
771 if (!XFS_IS_QUOTA_ON(mp)) 769 if (!XFS_IS_QUOTA_RUNNING(mp) || !XFS_IS_QUOTA_ON(mp))
772 return 0; 770 return 0;
773 771
774 if (tp && tp->t_dqinfo == NULL) 772 if (tp && tp->t_dqinfo == NULL)
@@ -811,18 +809,17 @@ xfs_trans_reserve_quota_bydquots(
811 * This doesn't change the actual usage, just the reservation. 809 * This doesn't change the actual usage, just the reservation.
812 * The inode sent in is locked. 810 * The inode sent in is locked.
813 */ 811 */
814STATIC int 812int
815xfs_trans_reserve_quota_nblks( 813xfs_trans_reserve_quota_nblks(
816 xfs_trans_t *tp, 814 struct xfs_trans *tp,
817 xfs_mount_t *mp, 815 struct xfs_inode *ip,
818 xfs_inode_t *ip, 816 long nblks,
819 long nblks, 817 long ninos,
820 long ninos, 818 uint flags)
821 uint flags)
822{ 819{
823 int error; 820 struct xfs_mount *mp = ip->i_mount;
824 821
825 if (!XFS_IS_QUOTA_ON(mp)) 822 if (!XFS_IS_QUOTA_RUNNING(mp) || !XFS_IS_QUOTA_ON(mp))
826 return 0; 823 return 0;
827 if (XFS_IS_PQUOTA_ON(mp)) 824 if (XFS_IS_PQUOTA_ON(mp))
828 flags |= XFS_QMOPT_ENOSPC; 825 flags |= XFS_QMOPT_ENOSPC;
@@ -831,7 +828,6 @@ xfs_trans_reserve_quota_nblks(
831 ASSERT(ip->i_ino != mp->m_sb.sb_gquotino); 828 ASSERT(ip->i_ino != mp->m_sb.sb_gquotino);
832 829
833 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL)); 830 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
834 ASSERT(XFS_IS_QUOTA_RUNNING(ip->i_mount));
835 ASSERT((flags & ~(XFS_QMOPT_FORCE_RES | XFS_QMOPT_ENOSPC)) == 831 ASSERT((flags & ~(XFS_QMOPT_FORCE_RES | XFS_QMOPT_ENOSPC)) ==
836 XFS_TRANS_DQ_RES_RTBLKS || 832 XFS_TRANS_DQ_RES_RTBLKS ||
837 (flags & ~(XFS_QMOPT_FORCE_RES | XFS_QMOPT_ENOSPC)) == 833 (flags & ~(XFS_QMOPT_FORCE_RES | XFS_QMOPT_ENOSPC)) ==
@@ -840,11 +836,9 @@ xfs_trans_reserve_quota_nblks(
840 /* 836 /*
841 * Reserve nblks against these dquots, with trans as the mediator. 837 * Reserve nblks against these dquots, with trans as the mediator.
842 */ 838 */
843 error = xfs_trans_reserve_quota_bydquots(tp, mp, 839 return xfs_trans_reserve_quota_bydquots(tp, mp,
844 ip->i_udquot, ip->i_gdquot, 840 ip->i_udquot, ip->i_gdquot,
845 nblks, ninos, 841 nblks, ninos, flags);
846 flags);
847 return error;
848} 842}
849 843
850/* 844/*
@@ -895,25 +889,15 @@ STATIC void
895xfs_trans_alloc_dqinfo( 889xfs_trans_alloc_dqinfo(
896 xfs_trans_t *tp) 890 xfs_trans_t *tp)
897{ 891{
898 (tp)->t_dqinfo = kmem_zone_zalloc(xfs_Gqm->qm_dqtrxzone, KM_SLEEP); 892 tp->t_dqinfo = kmem_zone_zalloc(xfs_Gqm->qm_dqtrxzone, KM_SLEEP);
899} 893}
900 894
901STATIC void 895void
902xfs_trans_free_dqinfo( 896xfs_trans_free_dqinfo(
903 xfs_trans_t *tp) 897 xfs_trans_t *tp)
904{ 898{
905 if (!tp->t_dqinfo) 899 if (!tp->t_dqinfo)
906 return; 900 return;
907 kmem_zone_free(xfs_Gqm->qm_dqtrxzone, (tp)->t_dqinfo); 901 kmem_zone_free(xfs_Gqm->qm_dqtrxzone, tp->t_dqinfo);
908 (tp)->t_dqinfo = NULL; 902 tp->t_dqinfo = NULL;
909} 903}
910
911xfs_dqtrxops_t xfs_trans_dquot_ops = {
912 .qo_dup_dqinfo = xfs_trans_dup_dqinfo,
913 .qo_free_dqinfo = xfs_trans_free_dqinfo,
914 .qo_mod_dquot_byino = xfs_trans_mod_dquot_byino,
915 .qo_apply_dquot_deltas = xfs_trans_apply_dquot_deltas,
916 .qo_reserve_quota_nblks = xfs_trans_reserve_quota_nblks,
917 .qo_reserve_quota_bydquots = xfs_trans_reserve_quota_bydquots,
918 .qo_unreserve_and_mod_dquots = xfs_trans_unreserve_and_mod_dquots,
919};
diff --git a/fs/xfs/xfs_acl.c b/fs/xfs/xfs_acl.c
deleted file mode 100644
index a8cdd73999a4..000000000000
--- a/fs/xfs/xfs_acl.c
+++ /dev/null
@@ -1,874 +0,0 @@
1/*
2 * Copyright (c) 2001-2002,2005 Silicon Graphics, Inc.
3 * All Rights Reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it would be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write the Free Software Foundation,
16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
17 */
18#include "xfs.h"
19#include "xfs_fs.h"
20#include "xfs_types.h"
21#include "xfs_bit.h"
22#include "xfs_inum.h"
23#include "xfs_ag.h"
24#include "xfs_dir2.h"
25#include "xfs_bmap_btree.h"
26#include "xfs_alloc_btree.h"
27#include "xfs_ialloc_btree.h"
28#include "xfs_dir2_sf.h"
29#include "xfs_attr_sf.h"
30#include "xfs_dinode.h"
31#include "xfs_inode.h"
32#include "xfs_btree.h"
33#include "xfs_acl.h"
34#include "xfs_attr.h"
35#include "xfs_vnodeops.h"
36
37#include <linux/capability.h>
38#include <linux/posix_acl_xattr.h>
39
40STATIC int xfs_acl_setmode(struct inode *, xfs_acl_t *, int *);
41STATIC void xfs_acl_filter_mode(mode_t, xfs_acl_t *);
42STATIC void xfs_acl_get_endian(xfs_acl_t *);
43STATIC int xfs_acl_access(uid_t, gid_t, xfs_acl_t *, mode_t, cred_t *);
44STATIC int xfs_acl_invalid(xfs_acl_t *);
45STATIC void xfs_acl_sync_mode(mode_t, xfs_acl_t *);
46STATIC void xfs_acl_get_attr(struct inode *, xfs_acl_t *, int, int, int *);
47STATIC void xfs_acl_set_attr(struct inode *, xfs_acl_t *, int, int *);
48STATIC int xfs_acl_allow_set(struct inode *, int);
49
50kmem_zone_t *xfs_acl_zone;
51
52
53/*
54 * Test for existence of access ACL attribute as efficiently as possible.
55 */
56int
57xfs_acl_vhasacl_access(
58 struct inode *vp)
59{
60 int error;
61
62 xfs_acl_get_attr(vp, NULL, _ACL_TYPE_ACCESS, ATTR_KERNOVAL, &error);
63 return (error == 0);
64}
65
66/*
67 * Test for existence of default ACL attribute as efficiently as possible.
68 */
69int
70xfs_acl_vhasacl_default(
71 struct inode *vp)
72{
73 int error;
74
75 if (!S_ISDIR(vp->i_mode))
76 return 0;
77 xfs_acl_get_attr(vp, NULL, _ACL_TYPE_DEFAULT, ATTR_KERNOVAL, &error);
78 return (error == 0);
79}
80
81/*
82 * Convert from extended attribute representation to in-memory for XFS.
83 */
84STATIC int
85posix_acl_xattr_to_xfs(
86 posix_acl_xattr_header *src,
87 size_t size,
88 xfs_acl_t *dest)
89{
90 posix_acl_xattr_entry *src_entry;
91 xfs_acl_entry_t *dest_entry;
92 int n;
93
94 if (!src || !dest)
95 return EINVAL;
96
97 if (size < sizeof(posix_acl_xattr_header))
98 return EINVAL;
99
100 if (src->a_version != cpu_to_le32(POSIX_ACL_XATTR_VERSION))
101 return EOPNOTSUPP;
102
103 memset(dest, 0, sizeof(xfs_acl_t));
104 dest->acl_cnt = posix_acl_xattr_count(size);
105 if (dest->acl_cnt < 0 || dest->acl_cnt > XFS_ACL_MAX_ENTRIES)
106 return EINVAL;
107
108 /*
109 * acl_set_file(3) may request that we set default ACLs with
110 * zero length -- defend (gracefully) against that here.
111 */
112 if (!dest->acl_cnt)
113 return 0;
114
115 src_entry = (posix_acl_xattr_entry *)((char *)src + sizeof(*src));
116 dest_entry = &dest->acl_entry[0];
117
118 for (n = 0; n < dest->acl_cnt; n++, src_entry++, dest_entry++) {
119 dest_entry->ae_perm = le16_to_cpu(src_entry->e_perm);
120 if (_ACL_PERM_INVALID(dest_entry->ae_perm))
121 return EINVAL;
122 dest_entry->ae_tag = le16_to_cpu(src_entry->e_tag);
123 switch(dest_entry->ae_tag) {
124 case ACL_USER:
125 case ACL_GROUP:
126 dest_entry->ae_id = le32_to_cpu(src_entry->e_id);
127 break;
128 case ACL_USER_OBJ:
129 case ACL_GROUP_OBJ:
130 case ACL_MASK:
131 case ACL_OTHER:
132 dest_entry->ae_id = ACL_UNDEFINED_ID;
133 break;
134 default:
135 return EINVAL;
136 }
137 }
138 if (xfs_acl_invalid(dest))
139 return EINVAL;
140
141 return 0;
142}
143
144/*
145 * Comparison function called from xfs_sort().
146 * Primary key is ae_tag, secondary key is ae_id.
147 */
148STATIC int
149xfs_acl_entry_compare(
150 const void *va,
151 const void *vb)
152{
153 xfs_acl_entry_t *a = (xfs_acl_entry_t *)va,
154 *b = (xfs_acl_entry_t *)vb;
155
156 if (a->ae_tag == b->ae_tag)
157 return (a->ae_id - b->ae_id);
158 return (a->ae_tag - b->ae_tag);
159}
160
161/*
162 * Convert from in-memory XFS to extended attribute representation.
163 */
164STATIC int
165posix_acl_xfs_to_xattr(
166 xfs_acl_t *src,
167 posix_acl_xattr_header *dest,
168 size_t size)
169{
170 int n;
171 size_t new_size = posix_acl_xattr_size(src->acl_cnt);
172 posix_acl_xattr_entry *dest_entry;
173 xfs_acl_entry_t *src_entry;
174
175 if (size < new_size)
176 return -ERANGE;
177
178 /* Need to sort src XFS ACL by <ae_tag,ae_id> */
179 xfs_sort(src->acl_entry, src->acl_cnt, sizeof(src->acl_entry[0]),
180 xfs_acl_entry_compare);
181
182 dest->a_version = cpu_to_le32(POSIX_ACL_XATTR_VERSION);
183 dest_entry = &dest->a_entries[0];
184 src_entry = &src->acl_entry[0];
185 for (n = 0; n < src->acl_cnt; n++, dest_entry++, src_entry++) {
186 dest_entry->e_perm = cpu_to_le16(src_entry->ae_perm);
187 if (_ACL_PERM_INVALID(src_entry->ae_perm))
188 return -EINVAL;
189 dest_entry->e_tag = cpu_to_le16(src_entry->ae_tag);
190 switch (src_entry->ae_tag) {
191 case ACL_USER:
192 case ACL_GROUP:
193 dest_entry->e_id = cpu_to_le32(src_entry->ae_id);
194 break;
195 case ACL_USER_OBJ:
196 case ACL_GROUP_OBJ:
197 case ACL_MASK:
198 case ACL_OTHER:
199 dest_entry->e_id = cpu_to_le32(ACL_UNDEFINED_ID);
200 break;
201 default:
202 return -EINVAL;
203 }
204 }
205 return new_size;
206}
207
208int
209xfs_acl_vget(
210 struct inode *vp,
211 void *acl,
212 size_t size,
213 int kind)
214{
215 int error;
216 xfs_acl_t *xfs_acl = NULL;
217 posix_acl_xattr_header *ext_acl = acl;
218 int flags = 0;
219
220 if(size) {
221 if (!(_ACL_ALLOC(xfs_acl))) {
222 error = ENOMEM;
223 goto out;
224 }
225 memset(xfs_acl, 0, sizeof(xfs_acl_t));
226 } else
227 flags = ATTR_KERNOVAL;
228
229 xfs_acl_get_attr(vp, xfs_acl, kind, flags, &error);
230 if (error)
231 goto out;
232
233 if (!size) {
234 error = -posix_acl_xattr_size(XFS_ACL_MAX_ENTRIES);
235 } else {
236 if (xfs_acl_invalid(xfs_acl)) {
237 error = EINVAL;
238 goto out;
239 }
240 if (kind == _ACL_TYPE_ACCESS)
241 xfs_acl_sync_mode(XFS_I(vp)->i_d.di_mode, xfs_acl);
242 error = -posix_acl_xfs_to_xattr(xfs_acl, ext_acl, size);
243 }
244out:
245 if(xfs_acl)
246 _ACL_FREE(xfs_acl);
247 return -error;
248}
249
250int
251xfs_acl_vremove(
252 struct inode *vp,
253 int kind)
254{
255 int error;
256
257 error = xfs_acl_allow_set(vp, kind);
258 if (!error) {
259 error = xfs_attr_remove(XFS_I(vp),
260 kind == _ACL_TYPE_DEFAULT?
261 SGI_ACL_DEFAULT: SGI_ACL_FILE,
262 ATTR_ROOT);
263 if (error == ENOATTR)
264 error = 0; /* 'scool */
265 }
266 return -error;
267}
268
269int
270xfs_acl_vset(
271 struct inode *vp,
272 void *acl,
273 size_t size,
274 int kind)
275{
276 posix_acl_xattr_header *ext_acl = acl;
277 xfs_acl_t *xfs_acl;
278 int error;
279 int basicperms = 0; /* more than std unix perms? */
280
281 if (!acl)
282 return -EINVAL;
283
284 if (!(_ACL_ALLOC(xfs_acl)))
285 return -ENOMEM;
286
287 error = posix_acl_xattr_to_xfs(ext_acl, size, xfs_acl);
288 if (error) {
289 _ACL_FREE(xfs_acl);
290 return -error;
291 }
292 if (!xfs_acl->acl_cnt) {
293 _ACL_FREE(xfs_acl);
294 return 0;
295 }
296
297 error = xfs_acl_allow_set(vp, kind);
298
299 /* Incoming ACL exists, set file mode based on its value */
300 if (!error && kind == _ACL_TYPE_ACCESS)
301 error = xfs_acl_setmode(vp, xfs_acl, &basicperms);
302
303 if (error)
304 goto out;
305
306 /*
307 * If we have more than std unix permissions, set up the actual attr.
308 * Otherwise, delete any existing attr. This prevents us from
309 * having actual attrs for permissions that can be stored in the
310 * standard permission bits.
311 */
312 if (!basicperms) {
313 xfs_acl_set_attr(vp, xfs_acl, kind, &error);
314 } else {
315 error = -xfs_acl_vremove(vp, _ACL_TYPE_ACCESS);
316 }
317
318out:
319 _ACL_FREE(xfs_acl);
320 return -error;
321}
322
323int
324xfs_acl_iaccess(
325 xfs_inode_t *ip,
326 mode_t mode,
327 cred_t *cr)
328{
329 xfs_acl_t *acl;
330 int rval;
331 struct xfs_name acl_name = {SGI_ACL_FILE, SGI_ACL_FILE_SIZE};
332
333 if (!(_ACL_ALLOC(acl)))
334 return -1;
335
336 /* If the file has no ACL return -1. */
337 rval = sizeof(xfs_acl_t);
338 if (xfs_attr_fetch(ip, &acl_name, (char *)acl, &rval, ATTR_ROOT)) {
339 _ACL_FREE(acl);
340 return -1;
341 }
342 xfs_acl_get_endian(acl);
343
344 /* If the file has an empty ACL return -1. */
345 if (acl->acl_cnt == XFS_ACL_NOT_PRESENT) {
346 _ACL_FREE(acl);
347 return -1;
348 }
349
350 /* Synchronize ACL with mode bits */
351 xfs_acl_sync_mode(ip->i_d.di_mode, acl);
352
353 rval = xfs_acl_access(ip->i_d.di_uid, ip->i_d.di_gid, acl, mode, cr);
354 _ACL_FREE(acl);
355 return rval;
356}
357
358STATIC int
359xfs_acl_allow_set(
360 struct inode *vp,
361 int kind)
362{
363 if (vp->i_flags & (S_IMMUTABLE|S_APPEND))
364 return EPERM;
365 if (kind == _ACL_TYPE_DEFAULT && !S_ISDIR(vp->i_mode))
366 return ENOTDIR;
367 if (vp->i_sb->s_flags & MS_RDONLY)
368 return EROFS;
369 if (XFS_I(vp)->i_d.di_uid != current_fsuid() && !capable(CAP_FOWNER))
370 return EPERM;
371 return 0;
372}
373
374/*
375 * Note: cr is only used here for the capability check if the ACL test fails.
376 * It is not used to find out the credentials uid or groups etc, as was
377 * done in IRIX. It is assumed that the uid and groups for the current
378 * thread are taken from "current" instead of the cr parameter.
379 */
380STATIC int
381xfs_acl_access(
382 uid_t fuid,
383 gid_t fgid,
384 xfs_acl_t *fap,
385 mode_t md,
386 cred_t *cr)
387{
388 xfs_acl_entry_t matched;
389 int i, allows;
390 int maskallows = -1; /* true, but not 1, either */
391 int seen_userobj = 0;
392
393 matched.ae_tag = 0; /* Invalid type */
394 matched.ae_perm = 0;
395
396 for (i = 0; i < fap->acl_cnt; i++) {
397 /*
398 * Break out if we've got a user_obj entry or
399 * a user entry and the mask (and have processed USER_OBJ)
400 */
401 if (matched.ae_tag == ACL_USER_OBJ)
402 break;
403 if (matched.ae_tag == ACL_USER) {
404 if (maskallows != -1 && seen_userobj)
405 break;
406 if (fap->acl_entry[i].ae_tag != ACL_MASK &&
407 fap->acl_entry[i].ae_tag != ACL_USER_OBJ)
408 continue;
409 }
410 /* True if this entry allows the requested access */
411 allows = ((fap->acl_entry[i].ae_perm & md) == md);
412
413 switch (fap->acl_entry[i].ae_tag) {
414 case ACL_USER_OBJ:
415 seen_userobj = 1;
416 if (fuid != current_fsuid())
417 continue;
418 matched.ae_tag = ACL_USER_OBJ;
419 matched.ae_perm = allows;
420 break;
421 case ACL_USER:
422 if (fap->acl_entry[i].ae_id != current_fsuid())
423 continue;
424 matched.ae_tag = ACL_USER;
425 matched.ae_perm = allows;
426 break;
427 case ACL_GROUP_OBJ:
428 if ((matched.ae_tag == ACL_GROUP_OBJ ||
429 matched.ae_tag == ACL_GROUP) && !allows)
430 continue;
431 if (!in_group_p(fgid))
432 continue;
433 matched.ae_tag = ACL_GROUP_OBJ;
434 matched.ae_perm = allows;
435 break;
436 case ACL_GROUP:
437 if ((matched.ae_tag == ACL_GROUP_OBJ ||
438 matched.ae_tag == ACL_GROUP) && !allows)
439 continue;
440 if (!in_group_p(fap->acl_entry[i].ae_id))
441 continue;
442 matched.ae_tag = ACL_GROUP;
443 matched.ae_perm = allows;
444 break;
445 case ACL_MASK:
446 maskallows = allows;
447 break;
448 case ACL_OTHER:
449 if (matched.ae_tag != 0)
450 continue;
451 matched.ae_tag = ACL_OTHER;
452 matched.ae_perm = allows;
453 break;
454 }
455 }
456 /*
457 * First possibility is that no matched entry allows access.
458 * The capability to override DAC may exist, so check for it.
459 */
460 switch (matched.ae_tag) {
461 case ACL_OTHER:
462 case ACL_USER_OBJ:
463 if (matched.ae_perm)
464 return 0;
465 break;
466 case ACL_USER:
467 case ACL_GROUP_OBJ:
468 case ACL_GROUP:
469 if (maskallows && matched.ae_perm)
470 return 0;
471 break;
472 case 0:
473 break;
474 }
475
476 /* EACCES tells generic_permission to check for capability overrides */
477 return EACCES;
478}
479
480/*
481 * ACL validity checker.
482 * This acl validation routine checks each ACL entry read in makes sense.
483 */
484STATIC int
485xfs_acl_invalid(
486 xfs_acl_t *aclp)
487{
488 xfs_acl_entry_t *entry, *e;
489 int user = 0, group = 0, other = 0, mask = 0;
490 int mask_required = 0;
491 int i, j;
492
493 if (!aclp)
494 goto acl_invalid;
495
496 if (aclp->acl_cnt > XFS_ACL_MAX_ENTRIES)
497 goto acl_invalid;
498
499 for (i = 0; i < aclp->acl_cnt; i++) {
500 entry = &aclp->acl_entry[i];
501 switch (entry->ae_tag) {
502 case ACL_USER_OBJ:
503 if (user++)
504 goto acl_invalid;
505 break;
506 case ACL_GROUP_OBJ:
507 if (group++)
508 goto acl_invalid;
509 break;
510 case ACL_OTHER:
511 if (other++)
512 goto acl_invalid;
513 break;
514 case ACL_USER:
515 case ACL_GROUP:
516 for (j = i + 1; j < aclp->acl_cnt; j++) {
517 e = &aclp->acl_entry[j];
518 if (e->ae_id == entry->ae_id &&
519 e->ae_tag == entry->ae_tag)
520 goto acl_invalid;
521 }
522 mask_required++;
523 break;
524 case ACL_MASK:
525 if (mask++)
526 goto acl_invalid;
527 break;
528 default:
529 goto acl_invalid;
530 }
531 }
532 if (!user || !group || !other || (mask_required && !mask))
533 goto acl_invalid;
534 else
535 return 0;
536acl_invalid:
537 return EINVAL;
538}
539
540/*
541 * Do ACL endian conversion.
542 */
543STATIC void
544xfs_acl_get_endian(
545 xfs_acl_t *aclp)
546{
547 xfs_acl_entry_t *ace, *end;
548
549 INT_SET(aclp->acl_cnt, ARCH_CONVERT, aclp->acl_cnt);
550 end = &aclp->acl_entry[0]+aclp->acl_cnt;
551 for (ace = &aclp->acl_entry[0]; ace < end; ace++) {
552 INT_SET(ace->ae_tag, ARCH_CONVERT, ace->ae_tag);
553 INT_SET(ace->ae_id, ARCH_CONVERT, ace->ae_id);
554 INT_SET(ace->ae_perm, ARCH_CONVERT, ace->ae_perm);
555 }
556}
557
558/*
559 * Get the ACL from the EA and do endian conversion.
560 */
561STATIC void
562xfs_acl_get_attr(
563 struct inode *vp,
564 xfs_acl_t *aclp,
565 int kind,
566 int flags,
567 int *error)
568{
569 int len = sizeof(xfs_acl_t);
570
571 ASSERT((flags & ATTR_KERNOVAL) ? (aclp == NULL) : 1);
572 flags |= ATTR_ROOT;
573 *error = xfs_attr_get(XFS_I(vp),
574 kind == _ACL_TYPE_ACCESS ?
575 SGI_ACL_FILE : SGI_ACL_DEFAULT,
576 (char *)aclp, &len, flags);
577 if (*error || (flags & ATTR_KERNOVAL))
578 return;
579 xfs_acl_get_endian(aclp);
580}
581
582/*
583 * Set the EA with the ACL and do endian conversion.
584 */
585STATIC void
586xfs_acl_set_attr(
587 struct inode *vp,
588 xfs_acl_t *aclp,
589 int kind,
590 int *error)
591{
592 xfs_acl_entry_t *ace, *newace, *end;
593 xfs_acl_t *newacl;
594 int len;
595
596 if (!(_ACL_ALLOC(newacl))) {
597 *error = ENOMEM;
598 return;
599 }
600
601 len = sizeof(xfs_acl_t) -
602 (sizeof(xfs_acl_entry_t) * (XFS_ACL_MAX_ENTRIES - aclp->acl_cnt));
603 end = &aclp->acl_entry[0]+aclp->acl_cnt;
604 for (ace = &aclp->acl_entry[0], newace = &newacl->acl_entry[0];
605 ace < end;
606 ace++, newace++) {
607 INT_SET(newace->ae_tag, ARCH_CONVERT, ace->ae_tag);
608 INT_SET(newace->ae_id, ARCH_CONVERT, ace->ae_id);
609 INT_SET(newace->ae_perm, ARCH_CONVERT, ace->ae_perm);
610 }
611 INT_SET(newacl->acl_cnt, ARCH_CONVERT, aclp->acl_cnt);
612 *error = xfs_attr_set(XFS_I(vp),
613 kind == _ACL_TYPE_ACCESS ?
614 SGI_ACL_FILE: SGI_ACL_DEFAULT,
615 (char *)newacl, len, ATTR_ROOT);
616 _ACL_FREE(newacl);
617}
618
619int
620xfs_acl_vtoacl(
621 struct inode *vp,
622 xfs_acl_t *access_acl,
623 xfs_acl_t *default_acl)
624{
625 int error = 0;
626
627 if (access_acl) {
628 /*
629 * Get the Access ACL and the mode. If either cannot
630 * be obtained for some reason, invalidate the access ACL.
631 */
632 xfs_acl_get_attr(vp, access_acl, _ACL_TYPE_ACCESS, 0, &error);
633 if (error)
634 access_acl->acl_cnt = XFS_ACL_NOT_PRESENT;
635 else /* We have a good ACL and the file mode, synchronize. */
636 xfs_acl_sync_mode(XFS_I(vp)->i_d.di_mode, access_acl);
637 }
638
639 if (default_acl) {
640 xfs_acl_get_attr(vp, default_acl, _ACL_TYPE_DEFAULT, 0, &error);
641 if (error)
642 default_acl->acl_cnt = XFS_ACL_NOT_PRESENT;
643 }
644 return error;
645}
646
647/*
648 * This function retrieves the parent directory's acl, processes it
649 * and lets the child inherit the acl(s) that it should.
650 */
651int
652xfs_acl_inherit(
653 struct inode *vp,
654 mode_t mode,
655 xfs_acl_t *pdaclp)
656{
657 xfs_acl_t *cacl;
658 int error = 0;
659 int basicperms = 0;
660
661 /*
662 * If the parent does not have a default ACL, or it's an
663 * invalid ACL, we're done.
664 */
665 if (!vp)
666 return 0;
667 if (!pdaclp || xfs_acl_invalid(pdaclp))
668 return 0;
669
670 /*
671 * Copy the default ACL of the containing directory to
672 * the access ACL of the new file and use the mode that
673 * was passed in to set up the correct initial values for
674 * the u::,g::[m::], and o:: entries. This is what makes
675 * umask() "work" with ACL's.
676 */
677
678 if (!(_ACL_ALLOC(cacl)))
679 return ENOMEM;
680
681 memcpy(cacl, pdaclp, sizeof(xfs_acl_t));
682 xfs_acl_filter_mode(mode, cacl);
683 error = xfs_acl_setmode(vp, cacl, &basicperms);
684 if (error)
685 goto out_error;
686
687 /*
688 * Set the Default and Access ACL on the file. The mode is already
689 * set on the file, so we don't need to worry about that.
690 *
691 * If the new file is a directory, its default ACL is a copy of
692 * the containing directory's default ACL.
693 */
694 if (S_ISDIR(vp->i_mode))
695 xfs_acl_set_attr(vp, pdaclp, _ACL_TYPE_DEFAULT, &error);
696 if (!error && !basicperms)
697 xfs_acl_set_attr(vp, cacl, _ACL_TYPE_ACCESS, &error);
698out_error:
699 _ACL_FREE(cacl);
700 return error;
701}
702
703/*
704 * Set up the correct mode on the file based on the supplied ACL. This
705 * makes sure that the mode on the file reflects the state of the
706 * u::,g::[m::], and o:: entries in the ACL. Since the mode is where
707 * the ACL is going to get the permissions for these entries, we must
708 * synchronize the mode whenever we set the ACL on a file.
709 */
710STATIC int
711xfs_acl_setmode(
712 struct inode *vp,
713 xfs_acl_t *acl,
714 int *basicperms)
715{
716 struct iattr iattr;
717 xfs_acl_entry_t *ap;
718 xfs_acl_entry_t *gap = NULL;
719 int i, nomask = 1;
720
721 *basicperms = 1;
722
723 if (acl->acl_cnt == XFS_ACL_NOT_PRESENT)
724 return 0;
725
726 /*
727 * Copy the u::, g::, o::, and m:: bits from the ACL into the
728 * mode. The m:: bits take precedence over the g:: bits.
729 */
730 iattr.ia_valid = ATTR_MODE;
731 iattr.ia_mode = XFS_I(vp)->i_d.di_mode;
732 iattr.ia_mode &= ~(S_IRWXU|S_IRWXG|S_IRWXO);
733 ap = acl->acl_entry;
734 for (i = 0; i < acl->acl_cnt; ++i) {
735 switch (ap->ae_tag) {
736 case ACL_USER_OBJ:
737 iattr.ia_mode |= ap->ae_perm << 6;
738 break;
739 case ACL_GROUP_OBJ:
740 gap = ap;
741 break;
742 case ACL_MASK: /* more than just standard modes */
743 nomask = 0;
744 iattr.ia_mode |= ap->ae_perm << 3;
745 *basicperms = 0;
746 break;
747 case ACL_OTHER:
748 iattr.ia_mode |= ap->ae_perm;
749 break;
750 default: /* more than just standard modes */
751 *basicperms = 0;
752 break;
753 }
754 ap++;
755 }
756
757 /* Set the group bits from ACL_GROUP_OBJ if there's no ACL_MASK */
758 if (gap && nomask)
759 iattr.ia_mode |= gap->ae_perm << 3;
760
761 return xfs_setattr(XFS_I(vp), &iattr, 0);
762}
763
764/*
765 * The permissions for the special ACL entries (u::, g::[m::], o::) are
766 * actually stored in the file mode (if there is both a group and a mask,
767 * the group is stored in the ACL entry and the mask is stored on the file).
768 * This allows the mode to remain automatically in sync with the ACL without
769 * the need for a call-back to the ACL system at every point where the mode
770 * could change. This function takes the permissions from the specified mode
771 * and places it in the supplied ACL.
772 *
773 * This implementation draws its validity from the fact that, when the ACL
774 * was assigned, the mode was copied from the ACL.
775 * If the mode did not change, therefore, the mode remains exactly what was
776 * taken from the special ACL entries at assignment.
777 * If a subsequent chmod() was done, the POSIX spec says that the change in
778 * mode must cause an update to the ACL seen at user level and used for
779 * access checks. Before and after a mode change, therefore, the file mode
780 * most accurately reflects what the special ACL entries should permit/deny.
781 *
782 * CAVEAT: If someone sets the SGI_ACL_FILE attribute directly,
783 * the existing mode bits will override whatever is in the
784 * ACL. Similarly, if there is a pre-existing ACL that was
785 * never in sync with its mode (owing to a bug in 6.5 and
786 * before), it will now magically (or mystically) be
787 * synchronized. This could cause slight astonishment, but
788 * it is better than inconsistent permissions.
789 *
790 * The supplied ACL is a template that may contain any combination
791 * of special entries. These are treated as place holders when we fill
792 * out the ACL. This routine does not add or remove special entries, it
793 * simply unites each special entry with its associated set of permissions.
794 */
795STATIC void
796xfs_acl_sync_mode(
797 mode_t mode,
798 xfs_acl_t *acl)
799{
800 int i, nomask = 1;
801 xfs_acl_entry_t *ap;
802 xfs_acl_entry_t *gap = NULL;
803
804 /*
805 * Set ACL entries. POSIX1003.1eD16 requires that the MASK
806 * be set instead of the GROUP entry, if there is a MASK.
807 */
808 for (ap = acl->acl_entry, i = 0; i < acl->acl_cnt; ap++, i++) {
809 switch (ap->ae_tag) {
810 case ACL_USER_OBJ:
811 ap->ae_perm = (mode >> 6) & 0x7;
812 break;
813 case ACL_GROUP_OBJ:
814 gap = ap;
815 break;
816 case ACL_MASK:
817 nomask = 0;
818 ap->ae_perm = (mode >> 3) & 0x7;
819 break;
820 case ACL_OTHER:
821 ap->ae_perm = mode & 0x7;
822 break;
823 default:
824 break;
825 }
826 }
827 /* Set the ACL_GROUP_OBJ if there's no ACL_MASK */
828 if (gap && nomask)
829 gap->ae_perm = (mode >> 3) & 0x7;
830}
831
832/*
833 * When inheriting an Access ACL from a directory Default ACL,
834 * the ACL bits are set to the intersection of the ACL default
835 * permission bits and the file permission bits in mode. If there
836 * are no permission bits on the file then we must not give them
837 * the ACL. This is what what makes umask() work with ACLs.
838 */
839STATIC void
840xfs_acl_filter_mode(
841 mode_t mode,
842 xfs_acl_t *acl)
843{
844 int i, nomask = 1;
845 xfs_acl_entry_t *ap;
846 xfs_acl_entry_t *gap = NULL;
847
848 /*
849 * Set ACL entries. POSIX1003.1eD16 requires that the MASK
850 * be merged with GROUP entry, if there is a MASK.
851 */
852 for (ap = acl->acl_entry, i = 0; i < acl->acl_cnt; ap++, i++) {
853 switch (ap->ae_tag) {
854 case ACL_USER_OBJ:
855 ap->ae_perm &= (mode >> 6) & 0x7;
856 break;
857 case ACL_GROUP_OBJ:
858 gap = ap;
859 break;
860 case ACL_MASK:
861 nomask = 0;
862 ap->ae_perm &= (mode >> 3) & 0x7;
863 break;
864 case ACL_OTHER:
865 ap->ae_perm &= mode & 0x7;
866 break;
867 default:
868 break;
869 }
870 }
871 /* Set the ACL_GROUP_OBJ if there's no ACL_MASK */
872 if (gap && nomask)
873 gap->ae_perm &= (mode >> 3) & 0x7;
874}
diff --git a/fs/xfs/xfs_acl.h b/fs/xfs/xfs_acl.h
index 642f1db4def4..63dc1f2efad5 100644
--- a/fs/xfs/xfs_acl.h
+++ b/fs/xfs/xfs_acl.h
@@ -18,81 +18,48 @@
18#ifndef __XFS_ACL_H__ 18#ifndef __XFS_ACL_H__
19#define __XFS_ACL_H__ 19#define __XFS_ACL_H__
20 20
21/* 21struct inode;
22 * Access Control Lists 22struct posix_acl;
23 */ 23struct xfs_inode;
24typedef __uint16_t xfs_acl_perm_t;
25typedef __int32_t xfs_acl_tag_t;
26typedef __int32_t xfs_acl_id_t;
27 24
28#define XFS_ACL_MAX_ENTRIES 25 25#define XFS_ACL_MAX_ENTRIES 25
29#define XFS_ACL_NOT_PRESENT (-1) 26#define XFS_ACL_NOT_PRESENT (-1)
30 27
31typedef struct xfs_acl_entry { 28/* On-disk XFS access control list structure */
32 xfs_acl_tag_t ae_tag; 29struct xfs_acl {
33 xfs_acl_id_t ae_id; 30 __be32 acl_cnt;
34 xfs_acl_perm_t ae_perm; 31 struct xfs_acl_entry {
35} xfs_acl_entry_t; 32 __be32 ae_tag;
36 33 __be32 ae_id;
37typedef struct xfs_acl { 34 __be16 ae_perm;
38 __int32_t acl_cnt; 35 } acl_entry[XFS_ACL_MAX_ENTRIES];
39 xfs_acl_entry_t acl_entry[XFS_ACL_MAX_ENTRIES]; 36};
40} xfs_acl_t;
41 37
42/* On-disk XFS extended attribute names */ 38/* On-disk XFS extended attribute names */
43#define SGI_ACL_FILE "SGI_ACL_FILE" 39#define SGI_ACL_FILE "SGI_ACL_FILE"
44#define SGI_ACL_DEFAULT "SGI_ACL_DEFAULT" 40#define SGI_ACL_DEFAULT "SGI_ACL_DEFAULT"
45#define SGI_ACL_FILE_SIZE (sizeof(SGI_ACL_FILE)-1) 41#define SGI_ACL_FILE_SIZE (sizeof(SGI_ACL_FILE)-1)
46#define SGI_ACL_DEFAULT_SIZE (sizeof(SGI_ACL_DEFAULT)-1) 42#define SGI_ACL_DEFAULT_SIZE (sizeof(SGI_ACL_DEFAULT)-1)
47 43
48#define _ACL_TYPE_ACCESS 1
49#define _ACL_TYPE_DEFAULT 2
50
51#ifdef CONFIG_XFS_POSIX_ACL 44#ifdef CONFIG_XFS_POSIX_ACL
45extern int xfs_check_acl(struct inode *inode, int mask);
46extern struct posix_acl *xfs_get_acl(struct inode *inode, int type);
47extern int xfs_inherit_acl(struct inode *inode, struct posix_acl *default_acl);
48extern int xfs_acl_chmod(struct inode *inode);
49extern void xfs_inode_init_acls(struct xfs_inode *ip);
50extern void xfs_inode_clear_acls(struct xfs_inode *ip);
51extern int posix_acl_access_exists(struct inode *inode);
52extern int posix_acl_default_exists(struct inode *inode);
52 53
53struct vattr; 54extern struct xattr_handler xfs_xattr_system_handler;
54struct xfs_inode;
55
56extern struct kmem_zone *xfs_acl_zone;
57#define xfs_acl_zone_init(zone, name) \
58 (zone) = kmem_zone_init(sizeof(xfs_acl_t), (name))
59#define xfs_acl_zone_destroy(zone) kmem_zone_destroy(zone)
60
61extern int xfs_acl_inherit(struct inode *, mode_t mode, xfs_acl_t *);
62extern int xfs_acl_iaccess(struct xfs_inode *, mode_t, cred_t *);
63extern int xfs_acl_vtoacl(struct inode *, xfs_acl_t *, xfs_acl_t *);
64extern int xfs_acl_vhasacl_access(struct inode *);
65extern int xfs_acl_vhasacl_default(struct inode *);
66extern int xfs_acl_vset(struct inode *, void *, size_t, int);
67extern int xfs_acl_vget(struct inode *, void *, size_t, int);
68extern int xfs_acl_vremove(struct inode *, int);
69
70#define _ACL_PERM_INVALID(perm) ((perm) & ~(ACL_READ|ACL_WRITE|ACL_EXECUTE))
71
72#define _ACL_INHERIT(c,m,d) (xfs_acl_inherit(c,m,d))
73#define _ACL_GET_ACCESS(pv,pa) (xfs_acl_vtoacl(pv,pa,NULL) == 0)
74#define _ACL_GET_DEFAULT(pv,pd) (xfs_acl_vtoacl(pv,NULL,pd) == 0)
75#define _ACL_ACCESS_EXISTS xfs_acl_vhasacl_access
76#define _ACL_DEFAULT_EXISTS xfs_acl_vhasacl_default
77
78#define _ACL_ALLOC(a) ((a) = kmem_zone_alloc(xfs_acl_zone, KM_SLEEP))
79#define _ACL_FREE(a) ((a)? kmem_zone_free(xfs_acl_zone, (a)):(void)0)
80
81#else 55#else
82#define xfs_acl_zone_init(zone,name) 56# define xfs_check_acl NULL
83#define xfs_acl_zone_destroy(zone) 57# define xfs_get_acl(inode, type) NULL
84#define xfs_acl_vset(v,p,sz,t) (-EOPNOTSUPP) 58# define xfs_inherit_acl(inode, default_acl) 0
85#define xfs_acl_vget(v,p,sz,t) (-EOPNOTSUPP) 59# define xfs_acl_chmod(inode) 0
86#define xfs_acl_vremove(v,t) (-EOPNOTSUPP) 60# define xfs_inode_init_acls(ip)
87#define xfs_acl_vhasacl_access(v) (0) 61# define xfs_inode_clear_acls(ip)
88#define xfs_acl_vhasacl_default(v) (0) 62# define posix_acl_access_exists(inode) 0
89#define _ACL_ALLOC(a) (1) /* successfully allocate nothing */ 63# define posix_acl_default_exists(inode) 0
90#define _ACL_FREE(a) ((void)0) 64#endif /* CONFIG_XFS_POSIX_ACL */
91#define _ACL_INHERIT(c,m,d) (0)
92#define _ACL_GET_ACCESS(pv,pa) (0)
93#define _ACL_GET_DEFAULT(pv,pd) (0)
94#define _ACL_ACCESS_EXISTS (NULL)
95#define _ACL_DEFAULT_EXISTS (NULL)
96#endif
97
98#endif /* __XFS_ACL_H__ */ 65#endif /* __XFS_ACL_H__ */
diff --git a/fs/xfs/xfs_ag.h b/fs/xfs/xfs_ag.h
index c8641f713caa..f24b50b68d03 100644
--- a/fs/xfs/xfs_ag.h
+++ b/fs/xfs/xfs_ag.h
@@ -212,6 +212,8 @@ typedef struct xfs_perag
212/* 212/*
213 * tags for inode radix tree 213 * tags for inode radix tree
214 */ 214 */
215#define XFS_ICI_NO_TAG (-1) /* special flag for an untagged lookup
216 in xfs_inode_ag_iterator */
215#define XFS_ICI_RECLAIM_TAG 0 /* inode is to be reclaimed */ 217#define XFS_ICI_RECLAIM_TAG 0 /* inode is to be reclaimed */
216 218
217#define XFS_AG_MAXLEVELS(mp) ((mp)->m_ag_maxlevels) 219#define XFS_AG_MAXLEVELS(mp) ((mp)->m_ag_maxlevels)
diff --git a/fs/xfs/xfs_arch.h b/fs/xfs/xfs_arch.h
index 53d5e70d1360..0902249354a0 100644
--- a/fs/xfs/xfs_arch.h
+++ b/fs/xfs/xfs_arch.h
@@ -73,28 +73,6 @@ static inline void be64_add_cpu(__be64 *a, __s64 b)
73 73
74#endif /* __KERNEL__ */ 74#endif /* __KERNEL__ */
75 75
76/* do we need conversion? */
77#define ARCH_NOCONVERT 1
78#ifdef XFS_NATIVE_HOST
79# define ARCH_CONVERT ARCH_NOCONVERT
80#else
81# define ARCH_CONVERT 0
82#endif
83
84/* generic swapping macros */
85
86#ifndef HAVE_SWABMACROS
87#define INT_SWAP16(type,var) ((typeof(type))(__swab16((__u16)(var))))
88#define INT_SWAP32(type,var) ((typeof(type))(__swab32((__u32)(var))))
89#define INT_SWAP64(type,var) ((typeof(type))(__swab64((__u64)(var))))
90#endif
91
92#define INT_SWAP(type, var) \
93 ((sizeof(type) == 8) ? INT_SWAP64(type,var) : \
94 ((sizeof(type) == 4) ? INT_SWAP32(type,var) : \
95 ((sizeof(type) == 2) ? INT_SWAP16(type,var) : \
96 (var))))
97
98/* 76/*
99 * get and set integers from potentially unaligned locations 77 * get and set integers from potentially unaligned locations
100 */ 78 */
@@ -107,16 +85,6 @@ static inline void be64_add_cpu(__be64 *a, __s64 b)
107 ((__u8*)(pointer))[1] = (((value) ) & 0xff); \ 85 ((__u8*)(pointer))[1] = (((value) ) & 0xff); \
108 } 86 }
109 87
110/* does not return a value */
111#define INT_SET(reference,arch,valueref) \
112 (__builtin_constant_p(valueref) ? \
113 (void)( (reference) = ( ((arch) != ARCH_NOCONVERT) ? (INT_SWAP((reference),(valueref))) : (valueref)) ) : \
114 (void)( \
115 ((reference) = (valueref)), \
116 ( ((arch) != ARCH_NOCONVERT) ? (reference) = INT_SWAP((reference),(reference)) : 0 ) \
117 ) \
118 )
119
120/* 88/*
121 * In directories inode numbers are stored as unaligned arrays of unsigned 89 * In directories inode numbers are stored as unaligned arrays of unsigned
122 * 8bit integers on disk. 90 * 8bit integers on disk.
diff --git a/fs/xfs/xfs_attr.c b/fs/xfs/xfs_attr.c
index 5fde1654b430..db15feb906ff 100644
--- a/fs/xfs/xfs_attr.c
+++ b/fs/xfs/xfs_attr.c
@@ -45,7 +45,6 @@
45#include "xfs_error.h" 45#include "xfs_error.h"
46#include "xfs_quota.h" 46#include "xfs_quota.h"
47#include "xfs_trans_space.h" 47#include "xfs_trans_space.h"
48#include "xfs_acl.h"
49#include "xfs_rw.h" 48#include "xfs_rw.h"
50#include "xfs_vnodeops.h" 49#include "xfs_vnodeops.h"
51 50
@@ -249,8 +248,9 @@ xfs_attr_set_int(xfs_inode_t *dp, struct xfs_name *name,
249 /* 248 /*
250 * Attach the dquots to the inode. 249 * Attach the dquots to the inode.
251 */ 250 */
252 if ((error = XFS_QM_DQATTACH(mp, dp, 0))) 251 error = xfs_qm_dqattach(dp, 0);
253 return (error); 252 if (error)
253 return error;
254 254
255 /* 255 /*
256 * If the inode doesn't have an attribute fork, add one. 256 * If the inode doesn't have an attribute fork, add one.
@@ -311,7 +311,7 @@ xfs_attr_set_int(xfs_inode_t *dp, struct xfs_name *name,
311 } 311 }
312 xfs_ilock(dp, XFS_ILOCK_EXCL); 312 xfs_ilock(dp, XFS_ILOCK_EXCL);
313 313
314 error = XFS_TRANS_RESERVE_QUOTA_NBLKS(mp, args.trans, dp, args.total, 0, 314 error = xfs_trans_reserve_quota_nblks(args.trans, dp, args.total, 0,
315 rsvd ? XFS_QMOPT_RES_REGBLKS | XFS_QMOPT_FORCE_RES : 315 rsvd ? XFS_QMOPT_RES_REGBLKS | XFS_QMOPT_FORCE_RES :
316 XFS_QMOPT_RES_REGBLKS); 316 XFS_QMOPT_RES_REGBLKS);
317 if (error) { 317 if (error) {
@@ -501,8 +501,9 @@ xfs_attr_remove_int(xfs_inode_t *dp, struct xfs_name *name, int flags)
501 /* 501 /*
502 * Attach the dquots to the inode. 502 * Attach the dquots to the inode.
503 */ 503 */
504 if ((error = XFS_QM_DQATTACH(mp, dp, 0))) 504 error = xfs_qm_dqattach(dp, 0);
505 return (error); 505 if (error)
506 return error;
506 507
507 /* 508 /*
508 * Start our first transaction of the day. 509 * Start our first transaction of the day.
diff --git a/fs/xfs/xfs_bmap.c b/fs/xfs/xfs_bmap.c
index ca7c6005a487..4b0f6efb046c 100644
--- a/fs/xfs/xfs_bmap.c
+++ b/fs/xfs/xfs_bmap.c
@@ -2691,7 +2691,7 @@ xfs_bmap_rtalloc(
2691 * Adjust the disk quota also. This was reserved 2691 * Adjust the disk quota also. This was reserved
2692 * earlier. 2692 * earlier.
2693 */ 2693 */
2694 XFS_TRANS_MOD_DQUOT_BYINO(mp, ap->tp, ap->ip, 2694 xfs_trans_mod_dquot_byino(ap->tp, ap->ip,
2695 ap->wasdel ? XFS_TRANS_DQ_DELRTBCOUNT : 2695 ap->wasdel ? XFS_TRANS_DQ_DELRTBCOUNT :
2696 XFS_TRANS_DQ_RTBCOUNT, (long) ralen); 2696 XFS_TRANS_DQ_RTBCOUNT, (long) ralen);
2697 } else { 2697 } else {
@@ -2995,7 +2995,7 @@ xfs_bmap_btalloc(
2995 * Adjust the disk quota also. This was reserved 2995 * Adjust the disk quota also. This was reserved
2996 * earlier. 2996 * earlier.
2997 */ 2997 */
2998 XFS_TRANS_MOD_DQUOT_BYINO(mp, ap->tp, ap->ip, 2998 xfs_trans_mod_dquot_byino(ap->tp, ap->ip,
2999 ap->wasdel ? XFS_TRANS_DQ_DELBCOUNT : 2999 ap->wasdel ? XFS_TRANS_DQ_DELBCOUNT :
3000 XFS_TRANS_DQ_BCOUNT, 3000 XFS_TRANS_DQ_BCOUNT,
3001 (long) args.len); 3001 (long) args.len);
@@ -3066,7 +3066,7 @@ xfs_bmap_btree_to_extents(
3066 return error; 3066 return error;
3067 xfs_bmap_add_free(cbno, 1, cur->bc_private.b.flist, mp); 3067 xfs_bmap_add_free(cbno, 1, cur->bc_private.b.flist, mp);
3068 ip->i_d.di_nblocks--; 3068 ip->i_d.di_nblocks--;
3069 XFS_TRANS_MOD_DQUOT_BYINO(mp, tp, ip, XFS_TRANS_DQ_BCOUNT, -1L); 3069 xfs_trans_mod_dquot_byino(tp, ip, XFS_TRANS_DQ_BCOUNT, -1L);
3070 xfs_trans_binval(tp, cbp); 3070 xfs_trans_binval(tp, cbp);
3071 if (cur->bc_bufs[0] == cbp) 3071 if (cur->bc_bufs[0] == cbp)
3072 cur->bc_bufs[0] = NULL; 3072 cur->bc_bufs[0] = NULL;
@@ -3386,7 +3386,7 @@ xfs_bmap_del_extent(
3386 * Adjust quota data. 3386 * Adjust quota data.
3387 */ 3387 */
3388 if (qfield) 3388 if (qfield)
3389 XFS_TRANS_MOD_DQUOT_BYINO(mp, tp, ip, qfield, (long)-nblks); 3389 xfs_trans_mod_dquot_byino(tp, ip, qfield, (long)-nblks);
3390 3390
3391 /* 3391 /*
3392 * Account for change in delayed indirect blocks. 3392 * Account for change in delayed indirect blocks.
@@ -3523,7 +3523,7 @@ xfs_bmap_extents_to_btree(
3523 *firstblock = cur->bc_private.b.firstblock = args.fsbno; 3523 *firstblock = cur->bc_private.b.firstblock = args.fsbno;
3524 cur->bc_private.b.allocated++; 3524 cur->bc_private.b.allocated++;
3525 ip->i_d.di_nblocks++; 3525 ip->i_d.di_nblocks++;
3526 XFS_TRANS_MOD_DQUOT_BYINO(mp, tp, ip, XFS_TRANS_DQ_BCOUNT, 1L); 3526 xfs_trans_mod_dquot_byino(tp, ip, XFS_TRANS_DQ_BCOUNT, 1L);
3527 abp = xfs_btree_get_bufl(mp, tp, args.fsbno, 0); 3527 abp = xfs_btree_get_bufl(mp, tp, args.fsbno, 0);
3528 /* 3528 /*
3529 * Fill in the child block. 3529 * Fill in the child block.
@@ -3690,7 +3690,7 @@ xfs_bmap_local_to_extents(
3690 XFS_BMAP_TRACE_POST_UPDATE("new", ip, 0, whichfork); 3690 XFS_BMAP_TRACE_POST_UPDATE("new", ip, 0, whichfork);
3691 XFS_IFORK_NEXT_SET(ip, whichfork, 1); 3691 XFS_IFORK_NEXT_SET(ip, whichfork, 1);
3692 ip->i_d.di_nblocks = 1; 3692 ip->i_d.di_nblocks = 1;
3693 XFS_TRANS_MOD_DQUOT_BYINO(args.mp, tp, ip, 3693 xfs_trans_mod_dquot_byino(tp, ip,
3694 XFS_TRANS_DQ_BCOUNT, 1L); 3694 XFS_TRANS_DQ_BCOUNT, 1L);
3695 flags |= xfs_ilog_fext(whichfork); 3695 flags |= xfs_ilog_fext(whichfork);
3696 } else { 3696 } else {
@@ -4048,7 +4048,7 @@ xfs_bmap_add_attrfork(
4048 XFS_TRANS_PERM_LOG_RES, XFS_ADDAFORK_LOG_COUNT))) 4048 XFS_TRANS_PERM_LOG_RES, XFS_ADDAFORK_LOG_COUNT)))
4049 goto error0; 4049 goto error0;
4050 xfs_ilock(ip, XFS_ILOCK_EXCL); 4050 xfs_ilock(ip, XFS_ILOCK_EXCL);
4051 error = XFS_TRANS_RESERVE_QUOTA_NBLKS(mp, tp, ip, blks, 0, rsvd ? 4051 error = xfs_trans_reserve_quota_nblks(tp, ip, blks, 0, rsvd ?
4052 XFS_QMOPT_RES_REGBLKS | XFS_QMOPT_FORCE_RES : 4052 XFS_QMOPT_RES_REGBLKS | XFS_QMOPT_FORCE_RES :
4053 XFS_QMOPT_RES_REGBLKS); 4053 XFS_QMOPT_RES_REGBLKS);
4054 if (error) { 4054 if (error) {
@@ -4983,10 +4983,11 @@ xfs_bmapi(
4983 * adjusted later. We return if we haven't 4983 * adjusted later. We return if we haven't
4984 * allocated blocks already inside this loop. 4984 * allocated blocks already inside this loop.
4985 */ 4985 */
4986 if ((error = XFS_TRANS_RESERVE_QUOTA_NBLKS( 4986 error = xfs_trans_reserve_quota_nblks(
4987 mp, NULL, ip, (long)alen, 0, 4987 NULL, ip, (long)alen, 0,
4988 rt ? XFS_QMOPT_RES_RTBLKS : 4988 rt ? XFS_QMOPT_RES_RTBLKS :
4989 XFS_QMOPT_RES_REGBLKS))) { 4989 XFS_QMOPT_RES_REGBLKS);
4990 if (error) {
4990 if (n == 0) { 4991 if (n == 0) {
4991 *nmap = 0; 4992 *nmap = 0;
4992 ASSERT(cur == NULL); 4993 ASSERT(cur == NULL);
@@ -5035,8 +5036,8 @@ xfs_bmapi(
5035 if (XFS_IS_QUOTA_ON(mp)) 5036 if (XFS_IS_QUOTA_ON(mp))
5036 /* unreserve the blocks now */ 5037 /* unreserve the blocks now */
5037 (void) 5038 (void)
5038 XFS_TRANS_UNRESERVE_QUOTA_NBLKS( 5039 xfs_trans_unreserve_quota_nblks(
5039 mp, NULL, ip, 5040 NULL, ip,
5040 (long)alen, 0, rt ? 5041 (long)alen, 0, rt ?
5041 XFS_QMOPT_RES_RTBLKS : 5042 XFS_QMOPT_RES_RTBLKS :
5042 XFS_QMOPT_RES_REGBLKS); 5043 XFS_QMOPT_RES_REGBLKS);
@@ -5691,14 +5692,14 @@ xfs_bunmapi(
5691 do_div(rtexts, mp->m_sb.sb_rextsize); 5692 do_div(rtexts, mp->m_sb.sb_rextsize);
5692 xfs_mod_incore_sb(mp, XFS_SBS_FREXTENTS, 5693 xfs_mod_incore_sb(mp, XFS_SBS_FREXTENTS,
5693 (int64_t)rtexts, rsvd); 5694 (int64_t)rtexts, rsvd);
5694 (void)XFS_TRANS_RESERVE_QUOTA_NBLKS(mp, 5695 (void)xfs_trans_reserve_quota_nblks(NULL,
5695 NULL, ip, -((long)del.br_blockcount), 0, 5696 ip, -((long)del.br_blockcount), 0,
5696 XFS_QMOPT_RES_RTBLKS); 5697 XFS_QMOPT_RES_RTBLKS);
5697 } else { 5698 } else {
5698 xfs_mod_incore_sb(mp, XFS_SBS_FDBLOCKS, 5699 xfs_mod_incore_sb(mp, XFS_SBS_FDBLOCKS,
5699 (int64_t)del.br_blockcount, rsvd); 5700 (int64_t)del.br_blockcount, rsvd);
5700 (void)XFS_TRANS_RESERVE_QUOTA_NBLKS(mp, 5701 (void)xfs_trans_reserve_quota_nblks(NULL,
5701 NULL, ip, -((long)del.br_blockcount), 0, 5702 ip, -((long)del.br_blockcount), 0,
5702 XFS_QMOPT_RES_REGBLKS); 5703 XFS_QMOPT_RES_REGBLKS);
5703 } 5704 }
5704 ip->i_delayed_blks -= del.br_blockcount; 5705 ip->i_delayed_blks -= del.br_blockcount;
diff --git a/fs/xfs/xfs_bmap_btree.c b/fs/xfs/xfs_bmap_btree.c
index 0760d352586f..5c1ade06578e 100644
--- a/fs/xfs/xfs_bmap_btree.c
+++ b/fs/xfs/xfs_bmap_btree.c
@@ -590,7 +590,7 @@ xfs_bmbt_alloc_block(
590 cur->bc_private.b.allocated++; 590 cur->bc_private.b.allocated++;
591 cur->bc_private.b.ip->i_d.di_nblocks++; 591 cur->bc_private.b.ip->i_d.di_nblocks++;
592 xfs_trans_log_inode(args.tp, cur->bc_private.b.ip, XFS_ILOG_CORE); 592 xfs_trans_log_inode(args.tp, cur->bc_private.b.ip, XFS_ILOG_CORE);
593 XFS_TRANS_MOD_DQUOT_BYINO(args.mp, args.tp, cur->bc_private.b.ip, 593 xfs_trans_mod_dquot_byino(args.tp, cur->bc_private.b.ip,
594 XFS_TRANS_DQ_BCOUNT, 1L); 594 XFS_TRANS_DQ_BCOUNT, 1L);
595 595
596 new->l = cpu_to_be64(args.fsbno); 596 new->l = cpu_to_be64(args.fsbno);
@@ -618,7 +618,7 @@ xfs_bmbt_free_block(
618 ip->i_d.di_nblocks--; 618 ip->i_d.di_nblocks--;
619 619
620 xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE); 620 xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
621 XFS_TRANS_MOD_DQUOT_BYINO(mp, tp, ip, XFS_TRANS_DQ_BCOUNT, -1L); 621 xfs_trans_mod_dquot_byino(tp, ip, XFS_TRANS_DQ_BCOUNT, -1L);
622 xfs_trans_binval(tp, bp); 622 xfs_trans_binval(tp, bp);
623 return 0; 623 return 0;
624} 624}
diff --git a/fs/xfs/xfs_filestream.c b/fs/xfs/xfs_filestream.c
index 6c87c8f304ef..edf8bdf4141f 100644
--- a/fs/xfs/xfs_filestream.c
+++ b/fs/xfs/xfs_filestream.c
@@ -542,10 +542,8 @@ xfs_filestream_associate(
542 * waiting for the lock because someone else is waiting on the lock we 542 * waiting for the lock because someone else is waiting on the lock we
543 * hold and we cannot drop that as we are in a transaction here. 543 * hold and we cannot drop that as we are in a transaction here.
544 * 544 *
545 * Lucky for us, this inversion is rarely a problem because it's a 545 * Lucky for us, this inversion is not a problem because it's a
546 * directory inode that we are trying to lock here and that means the 546 * directory inode that we are trying to lock here.
547 * only place that matters is xfs_sync_inodes() and SYNC_DELWRI is
548 * used. i.e. freeze, remount-ro, quotasync or unmount.
549 * 547 *
550 * So, if we can't get the iolock without sleeping then just give up 548 * So, if we can't get the iolock without sleeping then just give up
551 */ 549 */
diff --git a/fs/xfs/xfs_fs.h b/fs/xfs/xfs_fs.h
index f7c06fac8229..c4ea51b55dce 100644
--- a/fs/xfs/xfs_fs.h
+++ b/fs/xfs/xfs_fs.h
@@ -239,10 +239,13 @@ typedef struct xfs_fsop_resblks {
239 * Minimum and maximum sizes need for growth checks 239 * Minimum and maximum sizes need for growth checks
240 */ 240 */
241#define XFS_MIN_AG_BLOCKS 64 241#define XFS_MIN_AG_BLOCKS 64
242#define XFS_MIN_LOG_BLOCKS 512 242#define XFS_MIN_LOG_BLOCKS 512ULL
243#define XFS_MAX_LOG_BLOCKS (64 * 1024) 243#define XFS_MAX_LOG_BLOCKS (1024 * 1024ULL)
244#define XFS_MIN_LOG_BYTES (256 * 1024) 244#define XFS_MIN_LOG_BYTES (10 * 1024 * 1024ULL)
245#define XFS_MAX_LOG_BYTES (128 * 1024 * 1024) 245
246/* keep the maximum size under 2^31 by a small amount */
247#define XFS_MAX_LOG_BYTES \
248 ((2 * 1024 * 1024 * 1024ULL) - XFS_MIN_LOG_BYTES)
246 249
247/* 250/*
248 * Structures for XFS_IOC_FSGROWFSDATA, XFS_IOC_FSGROWFSLOG & XFS_IOC_FSGROWFSRT 251 * Structures for XFS_IOC_FSGROWFSDATA, XFS_IOC_FSGROWFSLOG & XFS_IOC_FSGROWFSRT
diff --git a/fs/xfs/xfs_iget.c b/fs/xfs/xfs_iget.c
index 89b81eedce6a..76c540f719e4 100644
--- a/fs/xfs/xfs_iget.c
+++ b/fs/xfs/xfs_iget.c
@@ -18,6 +18,7 @@
18#include "xfs.h" 18#include "xfs.h"
19#include "xfs_fs.h" 19#include "xfs_fs.h"
20#include "xfs_types.h" 20#include "xfs_types.h"
21#include "xfs_acl.h"
21#include "xfs_bit.h" 22#include "xfs_bit.h"
22#include "xfs_log.h" 23#include "xfs_log.h"
23#include "xfs_inum.h" 24#include "xfs_inum.h"
@@ -82,6 +83,7 @@ xfs_inode_alloc(
82 memset(&ip->i_d, 0, sizeof(xfs_icdinode_t)); 83 memset(&ip->i_d, 0, sizeof(xfs_icdinode_t));
83 ip->i_size = 0; 84 ip->i_size = 0;
84 ip->i_new_size = 0; 85 ip->i_new_size = 0;
86 xfs_inode_init_acls(ip);
85 87
86 /* 88 /*
87 * Initialize inode's trace buffers. 89 * Initialize inode's trace buffers.
@@ -500,10 +502,7 @@ xfs_ireclaim(
500 * ilock one but will still hold the iolock. 502 * ilock one but will still hold the iolock.
501 */ 503 */
502 xfs_ilock(ip, XFS_ILOCK_EXCL | XFS_IOLOCK_EXCL); 504 xfs_ilock(ip, XFS_ILOCK_EXCL | XFS_IOLOCK_EXCL);
503 /* 505 xfs_qm_dqdetach(ip);
504 * Release dquots (and their references) if any.
505 */
506 XFS_QM_DQDETACH(ip->i_mount, ip);
507 xfs_iunlock(ip, XFS_ILOCK_EXCL | XFS_IOLOCK_EXCL); 506 xfs_iunlock(ip, XFS_ILOCK_EXCL | XFS_IOLOCK_EXCL);
508 507
509 switch (ip->i_d.di_mode & S_IFMT) { 508 switch (ip->i_d.di_mode & S_IFMT) {
@@ -561,6 +560,7 @@ xfs_ireclaim(
561 ASSERT(atomic_read(&ip->i_pincount) == 0); 560 ASSERT(atomic_read(&ip->i_pincount) == 0);
562 ASSERT(!spin_is_locked(&ip->i_flags_lock)); 561 ASSERT(!spin_is_locked(&ip->i_flags_lock));
563 ASSERT(completion_done(&ip->i_flush)); 562 ASSERT(completion_done(&ip->i_flush));
563 xfs_inode_clear_acls(ip);
564 kmem_zone_free(xfs_inode_zone, ip); 564 kmem_zone_free(xfs_inode_zone, ip);
565} 565}
566 566
diff --git a/fs/xfs/xfs_inode.c b/fs/xfs/xfs_inode.c
index 123b20c8cbf2..1f22d65fed0a 100644
--- a/fs/xfs/xfs_inode.c
+++ b/fs/xfs/xfs_inode.c
@@ -49,7 +49,6 @@
49#include "xfs_utils.h" 49#include "xfs_utils.h"
50#include "xfs_dir2_trace.h" 50#include "xfs_dir2_trace.h"
51#include "xfs_quota.h" 51#include "xfs_quota.h"
52#include "xfs_acl.h"
53#include "xfs_filestream.h" 52#include "xfs_filestream.h"
54#include "xfs_vnodeops.h" 53#include "xfs_vnodeops.h"
55 54
diff --git a/fs/xfs/xfs_inode.h b/fs/xfs/xfs_inode.h
index f879c1bc4b96..77016702938b 100644
--- a/fs/xfs/xfs_inode.h
+++ b/fs/xfs/xfs_inode.h
@@ -18,6 +18,7 @@
18#ifndef __XFS_INODE_H__ 18#ifndef __XFS_INODE_H__
19#define __XFS_INODE_H__ 19#define __XFS_INODE_H__
20 20
21struct posix_acl;
21struct xfs_dinode; 22struct xfs_dinode;
22struct xfs_inode; 23struct xfs_inode;
23 24
@@ -272,6 +273,11 @@ typedef struct xfs_inode {
272 /* VFS inode */ 273 /* VFS inode */
273 struct inode i_vnode; /* embedded VFS inode */ 274 struct inode i_vnode; /* embedded VFS inode */
274 275
276#ifdef CONFIG_XFS_POSIX_ACL
277 struct posix_acl *i_acl;
278 struct posix_acl *i_default_acl;
279#endif
280
275 /* Trace buffers per inode. */ 281 /* Trace buffers per inode. */
276#ifdef XFS_INODE_TRACE 282#ifdef XFS_INODE_TRACE
277 struct ktrace *i_trace; /* general inode trace */ 283 struct ktrace *i_trace; /* general inode trace */
diff --git a/fs/xfs/xfs_iomap.c b/fs/xfs/xfs_iomap.c
index 5aaa2d7ec155..67ae5555a30a 100644
--- a/fs/xfs/xfs_iomap.c
+++ b/fs/xfs/xfs_iomap.c
@@ -42,7 +42,6 @@
42#include "xfs_error.h" 42#include "xfs_error.h"
43#include "xfs_itable.h" 43#include "xfs_itable.h"
44#include "xfs_rw.h" 44#include "xfs_rw.h"
45#include "xfs_acl.h"
46#include "xfs_attr.h" 45#include "xfs_attr.h"
47#include "xfs_buf_item.h" 46#include "xfs_buf_item.h"
48#include "xfs_trans_space.h" 47#include "xfs_trans_space.h"
@@ -385,7 +384,7 @@ xfs_iomap_write_direct(
385 * Make sure that the dquots are there. This doesn't hold 384 * Make sure that the dquots are there. This doesn't hold
386 * the ilock across a disk read. 385 * the ilock across a disk read.
387 */ 386 */
388 error = XFS_QM_DQATTACH(ip->i_mount, ip, XFS_QMOPT_ILOCKED); 387 error = xfs_qm_dqattach_locked(ip, 0);
389 if (error) 388 if (error)
390 return XFS_ERROR(error); 389 return XFS_ERROR(error);
391 390
@@ -444,8 +443,7 @@ xfs_iomap_write_direct(
444 if (error) 443 if (error)
445 goto error_out; 444 goto error_out;
446 445
447 error = XFS_TRANS_RESERVE_QUOTA_NBLKS(mp, tp, ip, 446 error = xfs_trans_reserve_quota_nblks(tp, ip, qblocks, 0, quota_flag);
448 qblocks, 0, quota_flag);
449 if (error) 447 if (error)
450 goto error1; 448 goto error1;
451 449
@@ -495,7 +493,7 @@ xfs_iomap_write_direct(
495 493
496error0: /* Cancel bmap, unlock inode, unreserve quota blocks, cancel trans */ 494error0: /* Cancel bmap, unlock inode, unreserve quota blocks, cancel trans */
497 xfs_bmap_cancel(&free_list); 495 xfs_bmap_cancel(&free_list);
498 XFS_TRANS_UNRESERVE_QUOTA_NBLKS(mp, tp, ip, qblocks, 0, quota_flag); 496 xfs_trans_unreserve_quota_nblks(tp, ip, qblocks, 0, quota_flag);
499 497
500error1: /* Just cancel transaction */ 498error1: /* Just cancel transaction */
501 xfs_trans_cancel(tp, XFS_TRANS_RELEASE_LOG_RES | XFS_TRANS_ABORT); 499 xfs_trans_cancel(tp, XFS_TRANS_RELEASE_LOG_RES | XFS_TRANS_ABORT);
@@ -582,7 +580,7 @@ xfs_iomap_write_delay(
582 * Make sure that the dquots are there. This doesn't hold 580 * Make sure that the dquots are there. This doesn't hold
583 * the ilock across a disk read. 581 * the ilock across a disk read.
584 */ 582 */
585 error = XFS_QM_DQATTACH(mp, ip, XFS_QMOPT_ILOCKED); 583 error = xfs_qm_dqattach_locked(ip, 0);
586 if (error) 584 if (error)
587 return XFS_ERROR(error); 585 return XFS_ERROR(error);
588 586
@@ -684,7 +682,8 @@ xfs_iomap_write_allocate(
684 /* 682 /*
685 * Make sure that the dquots are there. 683 * Make sure that the dquots are there.
686 */ 684 */
687 if ((error = XFS_QM_DQATTACH(mp, ip, 0))) 685 error = xfs_qm_dqattach(ip, 0);
686 if (error)
688 return XFS_ERROR(error); 687 return XFS_ERROR(error);
689 688
690 offset_fsb = XFS_B_TO_FSBT(mp, offset); 689 offset_fsb = XFS_B_TO_FSBT(mp, offset);
diff --git a/fs/xfs/xfs_log_recover.c b/fs/xfs/xfs_log_recover.c
index 7ba450116d4f..47da2fb45377 100644
--- a/fs/xfs/xfs_log_recover.c
+++ b/fs/xfs/xfs_log_recover.c
@@ -1975,16 +1975,30 @@ xlog_recover_do_reg_buffer(
1975 error = 0; 1975 error = 0;
1976 if (buf_f->blf_flags & 1976 if (buf_f->blf_flags &
1977 (XFS_BLI_UDQUOT_BUF|XFS_BLI_PDQUOT_BUF|XFS_BLI_GDQUOT_BUF)) { 1977 (XFS_BLI_UDQUOT_BUF|XFS_BLI_PDQUOT_BUF|XFS_BLI_GDQUOT_BUF)) {
1978 if (item->ri_buf[i].i_addr == NULL) {
1979 cmn_err(CE_ALERT,
1980 "XFS: NULL dquot in %s.", __func__);
1981 goto next;
1982 }
1983 if (item->ri_buf[i].i_len < sizeof(xfs_dqblk_t)) {
1984 cmn_err(CE_ALERT,
1985 "XFS: dquot too small (%d) in %s.",
1986 item->ri_buf[i].i_len, __func__);
1987 goto next;
1988 }
1978 error = xfs_qm_dqcheck((xfs_disk_dquot_t *) 1989 error = xfs_qm_dqcheck((xfs_disk_dquot_t *)
1979 item->ri_buf[i].i_addr, 1990 item->ri_buf[i].i_addr,
1980 -1, 0, XFS_QMOPT_DOWARN, 1991 -1, 0, XFS_QMOPT_DOWARN,
1981 "dquot_buf_recover"); 1992 "dquot_buf_recover");
1993 if (error)
1994 goto next;
1982 } 1995 }
1983 if (!error) 1996
1984 memcpy(xfs_buf_offset(bp, 1997 memcpy(xfs_buf_offset(bp,
1985 (uint)bit << XFS_BLI_SHIFT), /* dest */ 1998 (uint)bit << XFS_BLI_SHIFT), /* dest */
1986 item->ri_buf[i].i_addr, /* source */ 1999 item->ri_buf[i].i_addr, /* source */
1987 nbits<<XFS_BLI_SHIFT); /* length */ 2000 nbits<<XFS_BLI_SHIFT); /* length */
2001 next:
1988 i++; 2002 i++;
1989 bit += nbits; 2003 bit += nbits;
1990 } 2004 }
@@ -2615,7 +2629,19 @@ xlog_recover_do_dquot_trans(
2615 return (0); 2629 return (0);
2616 2630
2617 recddq = (xfs_disk_dquot_t *)item->ri_buf[1].i_addr; 2631 recddq = (xfs_disk_dquot_t *)item->ri_buf[1].i_addr;
2618 ASSERT(recddq); 2632
2633 if (item->ri_buf[1].i_addr == NULL) {
2634 cmn_err(CE_ALERT,
2635 "XFS: NULL dquot in %s.", __func__);
2636 return XFS_ERROR(EIO);
2637 }
2638 if (item->ri_buf[1].i_len < sizeof(xfs_dqblk_t)) {
2639 cmn_err(CE_ALERT,
2640 "XFS: dquot too small (%d) in %s.",
2641 item->ri_buf[1].i_len, __func__);
2642 return XFS_ERROR(EIO);
2643 }
2644
2619 /* 2645 /*
2620 * This type of quotas was turned off, so ignore this record. 2646 * This type of quotas was turned off, so ignore this record.
2621 */ 2647 */
diff --git a/fs/xfs/xfs_mount.c b/fs/xfs/xfs_mount.c
index 65a99725d0cc..5c6f092659c1 100644
--- a/fs/xfs/xfs_mount.c
+++ b/fs/xfs/xfs_mount.c
@@ -960,6 +960,53 @@ xfs_check_sizes(xfs_mount_t *mp)
960} 960}
961 961
962/* 962/*
963 * Clear the quotaflags in memory and in the superblock.
964 */
965int
966xfs_mount_reset_sbqflags(
967 struct xfs_mount *mp)
968{
969 int error;
970 struct xfs_trans *tp;
971
972 mp->m_qflags = 0;
973
974 /*
975 * It is OK to look at sb_qflags here in mount path,
976 * without m_sb_lock.
977 */
978 if (mp->m_sb.sb_qflags == 0)
979 return 0;
980 spin_lock(&mp->m_sb_lock);
981 mp->m_sb.sb_qflags = 0;
982 spin_unlock(&mp->m_sb_lock);
983
984 /*
985 * If the fs is readonly, let the incore superblock run
986 * with quotas off but don't flush the update out to disk
987 */
988 if (mp->m_flags & XFS_MOUNT_RDONLY)
989 return 0;
990
991#ifdef QUOTADEBUG
992 xfs_fs_cmn_err(CE_NOTE, mp, "Writing superblock quota changes");
993#endif
994
995 tp = xfs_trans_alloc(mp, XFS_TRANS_QM_SBCHANGE);
996 error = xfs_trans_reserve(tp, 0, mp->m_sb.sb_sectsize + 128, 0, 0,
997 XFS_DEFAULT_LOG_COUNT);
998 if (error) {
999 xfs_trans_cancel(tp, 0);
1000 xfs_fs_cmn_err(CE_ALERT, mp,
1001 "xfs_mount_reset_sbqflags: Superblock update failed!");
1002 return error;
1003 }
1004
1005 xfs_mod_sb(tp, XFS_SB_QFLAGS);
1006 return xfs_trans_commit(tp, 0);
1007}
1008
1009/*
963 * This function does the following on an initial mount of a file system: 1010 * This function does the following on an initial mount of a file system:
964 * - reads the superblock from disk and init the mount struct 1011 * - reads the superblock from disk and init the mount struct
965 * - if we're a 32-bit kernel, do a size check on the superblock 1012 * - if we're a 32-bit kernel, do a size check on the superblock
@@ -976,7 +1023,8 @@ xfs_mountfs(
976 xfs_sb_t *sbp = &(mp->m_sb); 1023 xfs_sb_t *sbp = &(mp->m_sb);
977 xfs_inode_t *rip; 1024 xfs_inode_t *rip;
978 __uint64_t resblks; 1025 __uint64_t resblks;
979 uint quotamount, quotaflags; 1026 uint quotamount = 0;
1027 uint quotaflags = 0;
980 int error = 0; 1028 int error = 0;
981 1029
982 xfs_mount_common(mp, sbp); 1030 xfs_mount_common(mp, sbp);
@@ -1210,9 +1258,28 @@ xfs_mountfs(
1210 /* 1258 /*
1211 * Initialise the XFS quota management subsystem for this mount 1259 * Initialise the XFS quota management subsystem for this mount
1212 */ 1260 */
1213 error = XFS_QM_INIT(mp, &quotamount, &quotaflags); 1261 if (XFS_IS_QUOTA_RUNNING(mp)) {
1214 if (error) 1262 error = xfs_qm_newmount(mp, &quotamount, &quotaflags);
1215 goto out_rtunmount; 1263 if (error)
1264 goto out_rtunmount;
1265 } else {
1266 ASSERT(!XFS_IS_QUOTA_ON(mp));
1267
1268 /*
1269 * If a file system had quotas running earlier, but decided to
1270 * mount without -o uquota/pquota/gquota options, revoke the
1271 * quotachecked license.
1272 */
1273 if (mp->m_sb.sb_qflags & XFS_ALL_QUOTA_ACCT) {
1274 cmn_err(CE_NOTE,
1275 "XFS: resetting qflags for filesystem %s",
1276 mp->m_fsname);
1277
1278 error = xfs_mount_reset_sbqflags(mp);
1279 if (error)
1280 return error;
1281 }
1282 }
1216 1283
1217 /* 1284 /*
1218 * Finish recovering the file system. This part needed to be 1285 * Finish recovering the file system. This part needed to be
@@ -1228,9 +1295,19 @@ xfs_mountfs(
1228 /* 1295 /*
1229 * Complete the quota initialisation, post-log-replay component. 1296 * Complete the quota initialisation, post-log-replay component.
1230 */ 1297 */
1231 error = XFS_QM_MOUNT(mp, quotamount, quotaflags); 1298 if (quotamount) {
1232 if (error) 1299 ASSERT(mp->m_qflags == 0);
1233 goto out_rtunmount; 1300 mp->m_qflags = quotaflags;
1301
1302 xfs_qm_mount_quotas(mp);
1303 }
1304
1305#if defined(DEBUG) && defined(XFS_LOUD_RECOVERY)
1306 if (XFS_IS_QUOTA_ON(mp))
1307 xfs_fs_cmn_err(CE_NOTE, mp, "Disk quotas turned on");
1308 else
1309 xfs_fs_cmn_err(CE_NOTE, mp, "Disk quotas not turned on");
1310#endif
1234 1311
1235 /* 1312 /*
1236 * Now we are mounted, reserve a small amount of unused space for 1313 * Now we are mounted, reserve a small amount of unused space for
@@ -1279,12 +1356,7 @@ xfs_unmountfs(
1279 __uint64_t resblks; 1356 __uint64_t resblks;
1280 int error; 1357 int error;
1281 1358
1282 /* 1359 xfs_qm_unmount_quotas(mp);
1283 * Release dquot that rootinode, rbmino and rsumino might be holding,
1284 * and release the quota inodes.
1285 */
1286 XFS_QM_UNMOUNT(mp);
1287
1288 xfs_rtunmount_inodes(mp); 1360 xfs_rtunmount_inodes(mp);
1289 IRELE(mp->m_rootip); 1361 IRELE(mp->m_rootip);
1290 1362
@@ -1299,12 +1371,9 @@ xfs_unmountfs(
1299 * need to force the log first. 1371 * need to force the log first.
1300 */ 1372 */
1301 xfs_log_force(mp, (xfs_lsn_t)0, XFS_LOG_FORCE | XFS_LOG_SYNC); 1373 xfs_log_force(mp, (xfs_lsn_t)0, XFS_LOG_FORCE | XFS_LOG_SYNC);
1302 xfs_reclaim_inodes(mp, 0, XFS_IFLUSH_ASYNC); 1374 xfs_reclaim_inodes(mp, XFS_IFLUSH_ASYNC);
1303
1304 XFS_QM_DQPURGEALL(mp, XFS_QMOPT_QUOTALL | XFS_QMOPT_UMOUNTING);
1305 1375
1306 if (mp->m_quotainfo) 1376 xfs_qm_unmount(mp);
1307 XFS_QM_DONE(mp);
1308 1377
1309 /* 1378 /*
1310 * Flush out the log synchronously so that we know for sure 1379 * Flush out the log synchronously so that we know for sure
diff --git a/fs/xfs/xfs_mount.h b/fs/xfs/xfs_mount.h
index d6a64392f983..a5122382afde 100644
--- a/fs/xfs/xfs_mount.h
+++ b/fs/xfs/xfs_mount.h
@@ -64,6 +64,8 @@ struct xfs_swapext;
64struct xfs_mru_cache; 64struct xfs_mru_cache;
65struct xfs_nameops; 65struct xfs_nameops;
66struct xfs_ail; 66struct xfs_ail;
67struct xfs_quotainfo;
68
67 69
68/* 70/*
69 * Prototypes and functions for the Data Migration subsystem. 71 * Prototypes and functions for the Data Migration subsystem.
@@ -107,86 +109,6 @@ typedef struct xfs_dmops {
107 (*(mp)->m_dm_ops->xfs_send_unmount)(mp,ip,right,mode,rval,fl) 109 (*(mp)->m_dm_ops->xfs_send_unmount)(mp,ip,right,mode,rval,fl)
108 110
109 111
110/*
111 * Prototypes and functions for the Quota Management subsystem.
112 */
113
114struct xfs_dquot;
115struct xfs_dqtrxops;
116struct xfs_quotainfo;
117
118typedef int (*xfs_qminit_t)(struct xfs_mount *, uint *, uint *);
119typedef int (*xfs_qmmount_t)(struct xfs_mount *, uint, uint);
120typedef void (*xfs_qmunmount_t)(struct xfs_mount *);
121typedef void (*xfs_qmdone_t)(struct xfs_mount *);
122typedef void (*xfs_dqrele_t)(struct xfs_dquot *);
123typedef int (*xfs_dqattach_t)(struct xfs_inode *, uint);
124typedef void (*xfs_dqdetach_t)(struct xfs_inode *);
125typedef int (*xfs_dqpurgeall_t)(struct xfs_mount *, uint);
126typedef int (*xfs_dqvopalloc_t)(struct xfs_mount *,
127 struct xfs_inode *, uid_t, gid_t, prid_t, uint,
128 struct xfs_dquot **, struct xfs_dquot **);
129typedef void (*xfs_dqvopcreate_t)(struct xfs_trans *, struct xfs_inode *,
130 struct xfs_dquot *, struct xfs_dquot *);
131typedef int (*xfs_dqvoprename_t)(struct xfs_inode **);
132typedef struct xfs_dquot * (*xfs_dqvopchown_t)(
133 struct xfs_trans *, struct xfs_inode *,
134 struct xfs_dquot **, struct xfs_dquot *);
135typedef int (*xfs_dqvopchownresv_t)(struct xfs_trans *, struct xfs_inode *,
136 struct xfs_dquot *, struct xfs_dquot *, uint);
137typedef void (*xfs_dqstatvfs_t)(struct xfs_inode *, struct kstatfs *);
138typedef int (*xfs_dqsync_t)(struct xfs_mount *, int flags);
139
140typedef struct xfs_qmops {
141 xfs_qminit_t xfs_qminit;
142 xfs_qmdone_t xfs_qmdone;
143 xfs_qmmount_t xfs_qmmount;
144 xfs_qmunmount_t xfs_qmunmount;
145 xfs_dqrele_t xfs_dqrele;
146 xfs_dqattach_t xfs_dqattach;
147 xfs_dqdetach_t xfs_dqdetach;
148 xfs_dqpurgeall_t xfs_dqpurgeall;
149 xfs_dqvopalloc_t xfs_dqvopalloc;
150 xfs_dqvopcreate_t xfs_dqvopcreate;
151 xfs_dqvoprename_t xfs_dqvoprename;
152 xfs_dqvopchown_t xfs_dqvopchown;
153 xfs_dqvopchownresv_t xfs_dqvopchownresv;
154 xfs_dqstatvfs_t xfs_dqstatvfs;
155 xfs_dqsync_t xfs_dqsync;
156 struct xfs_dqtrxops *xfs_dqtrxops;
157} xfs_qmops_t;
158
159#define XFS_QM_INIT(mp, mnt, fl) \
160 (*(mp)->m_qm_ops->xfs_qminit)(mp, mnt, fl)
161#define XFS_QM_MOUNT(mp, mnt, fl) \
162 (*(mp)->m_qm_ops->xfs_qmmount)(mp, mnt, fl)
163#define XFS_QM_UNMOUNT(mp) \
164 (*(mp)->m_qm_ops->xfs_qmunmount)(mp)
165#define XFS_QM_DONE(mp) \
166 (*(mp)->m_qm_ops->xfs_qmdone)(mp)
167#define XFS_QM_DQRELE(mp, dq) \
168 (*(mp)->m_qm_ops->xfs_dqrele)(dq)
169#define XFS_QM_DQATTACH(mp, ip, fl) \
170 (*(mp)->m_qm_ops->xfs_dqattach)(ip, fl)
171#define XFS_QM_DQDETACH(mp, ip) \
172 (*(mp)->m_qm_ops->xfs_dqdetach)(ip)
173#define XFS_QM_DQPURGEALL(mp, fl) \
174 (*(mp)->m_qm_ops->xfs_dqpurgeall)(mp, fl)
175#define XFS_QM_DQVOPALLOC(mp, ip, uid, gid, prid, fl, dq1, dq2) \
176 (*(mp)->m_qm_ops->xfs_dqvopalloc)(mp, ip, uid, gid, prid, fl, dq1, dq2)
177#define XFS_QM_DQVOPCREATE(mp, tp, ip, dq1, dq2) \
178 (*(mp)->m_qm_ops->xfs_dqvopcreate)(tp, ip, dq1, dq2)
179#define XFS_QM_DQVOPRENAME(mp, ip) \
180 (*(mp)->m_qm_ops->xfs_dqvoprename)(ip)
181#define XFS_QM_DQVOPCHOWN(mp, tp, ip, dqp, dq) \
182 (*(mp)->m_qm_ops->xfs_dqvopchown)(tp, ip, dqp, dq)
183#define XFS_QM_DQVOPCHOWNRESV(mp, tp, ip, dq1, dq2, fl) \
184 (*(mp)->m_qm_ops->xfs_dqvopchownresv)(tp, ip, dq1, dq2, fl)
185#define XFS_QM_DQSTATVFS(ip, statp) \
186 (*(ip)->i_mount->m_qm_ops->xfs_dqstatvfs)(ip, statp)
187#define XFS_QM_DQSYNC(mp, flags) \
188 (*(mp)->m_qm_ops->xfs_dqsync)(mp, flags)
189
190#ifdef HAVE_PERCPU_SB 112#ifdef HAVE_PERCPU_SB
191 113
192/* 114/*
@@ -510,8 +432,6 @@ extern int xfs_sb_validate_fsb_count(struct xfs_sb *, __uint64_t);
510 432
511extern int xfs_dmops_get(struct xfs_mount *); 433extern int xfs_dmops_get(struct xfs_mount *);
512extern void xfs_dmops_put(struct xfs_mount *); 434extern void xfs_dmops_put(struct xfs_mount *);
513extern int xfs_qmops_get(struct xfs_mount *);
514extern void xfs_qmops_put(struct xfs_mount *);
515 435
516extern struct xfs_dmops xfs_dmcore_xfs; 436extern struct xfs_dmops xfs_dmcore_xfs;
517 437
diff --git a/fs/xfs/xfs_qmops.c b/fs/xfs/xfs_qmops.c
deleted file mode 100644
index e101790ea8e7..000000000000
--- a/fs/xfs/xfs_qmops.c
+++ /dev/null
@@ -1,152 +0,0 @@
1/*
2 * Copyright (c) 2000-2005 Silicon Graphics, Inc.
3 * All Rights Reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it would be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write the Free Software Foundation,
16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
17 */
18#include "xfs.h"
19#include "xfs_fs.h"
20#include "xfs_types.h"
21#include "xfs_log.h"
22#include "xfs_inum.h"
23#include "xfs_trans.h"
24#include "xfs_sb.h"
25#include "xfs_ag.h"
26#include "xfs_dir2.h"
27#include "xfs_dmapi.h"
28#include "xfs_mount.h"
29#include "xfs_quota.h"
30#include "xfs_error.h"
31
32
33STATIC struct xfs_dquot *
34xfs_dqvopchown_default(
35 struct xfs_trans *tp,
36 struct xfs_inode *ip,
37 struct xfs_dquot **dqp,
38 struct xfs_dquot *dq)
39{
40 return NULL;
41}
42
43/*
44 * Clear the quotaflags in memory and in the superblock.
45 */
46int
47xfs_mount_reset_sbqflags(xfs_mount_t *mp)
48{
49 int error;
50 xfs_trans_t *tp;
51
52 mp->m_qflags = 0;
53 /*
54 * It is OK to look at sb_qflags here in mount path,
55 * without m_sb_lock.
56 */
57 if (mp->m_sb.sb_qflags == 0)
58 return 0;
59 spin_lock(&mp->m_sb_lock);
60 mp->m_sb.sb_qflags = 0;
61 spin_unlock(&mp->m_sb_lock);
62
63 /*
64 * if the fs is readonly, let the incore superblock run
65 * with quotas off but don't flush the update out to disk
66 */
67 if (mp->m_flags & XFS_MOUNT_RDONLY)
68 return 0;
69#ifdef QUOTADEBUG
70 xfs_fs_cmn_err(CE_NOTE, mp, "Writing superblock quota changes");
71#endif
72 tp = xfs_trans_alloc(mp, XFS_TRANS_QM_SBCHANGE);
73 if ((error = xfs_trans_reserve(tp, 0, mp->m_sb.sb_sectsize + 128, 0, 0,
74 XFS_DEFAULT_LOG_COUNT))) {
75 xfs_trans_cancel(tp, 0);
76 xfs_fs_cmn_err(CE_ALERT, mp,
77 "xfs_mount_reset_sbqflags: Superblock update failed!");
78 return error;
79 }
80 xfs_mod_sb(tp, XFS_SB_QFLAGS);
81 error = xfs_trans_commit(tp, 0);
82 return error;
83}
84
85STATIC int
86xfs_noquota_init(
87 xfs_mount_t *mp,
88 uint *needquotamount,
89 uint *quotaflags)
90{
91 int error = 0;
92
93 *quotaflags = 0;
94 *needquotamount = B_FALSE;
95
96 ASSERT(!XFS_IS_QUOTA_ON(mp));
97
98 /*
99 * If a file system had quotas running earlier, but decided to
100 * mount without -o uquota/pquota/gquota options, revoke the
101 * quotachecked license.
102 */
103 if (mp->m_sb.sb_qflags & XFS_ALL_QUOTA_ACCT) {
104 cmn_err(CE_NOTE,
105 "XFS resetting qflags for filesystem %s",
106 mp->m_fsname);
107
108 error = xfs_mount_reset_sbqflags(mp);
109 }
110 return error;
111}
112
113static struct xfs_qmops xfs_qmcore_stub = {
114 .xfs_qminit = (xfs_qminit_t) xfs_noquota_init,
115 .xfs_qmdone = (xfs_qmdone_t) fs_noerr,
116 .xfs_qmmount = (xfs_qmmount_t) fs_noerr,
117 .xfs_qmunmount = (xfs_qmunmount_t) fs_noerr,
118 .xfs_dqrele = (xfs_dqrele_t) fs_noerr,
119 .xfs_dqattach = (xfs_dqattach_t) fs_noerr,
120 .xfs_dqdetach = (xfs_dqdetach_t) fs_noerr,
121 .xfs_dqpurgeall = (xfs_dqpurgeall_t) fs_noerr,
122 .xfs_dqvopalloc = (xfs_dqvopalloc_t) fs_noerr,
123 .xfs_dqvopcreate = (xfs_dqvopcreate_t) fs_noerr,
124 .xfs_dqvoprename = (xfs_dqvoprename_t) fs_noerr,
125 .xfs_dqvopchown = xfs_dqvopchown_default,
126 .xfs_dqvopchownresv = (xfs_dqvopchownresv_t) fs_noerr,
127 .xfs_dqstatvfs = (xfs_dqstatvfs_t) fs_noval,
128 .xfs_dqsync = (xfs_dqsync_t) fs_noerr,
129};
130
131int
132xfs_qmops_get(struct xfs_mount *mp)
133{
134 if (XFS_IS_QUOTA_RUNNING(mp)) {
135#ifdef CONFIG_XFS_QUOTA
136 mp->m_qm_ops = &xfs_qmcore_xfs;
137#else
138 cmn_err(CE_WARN,
139 "XFS: qouta support not available in this kernel.");
140 return EINVAL;
141#endif
142 } else {
143 mp->m_qm_ops = &xfs_qmcore_stub;
144 }
145
146 return 0;
147}
148
149void
150xfs_qmops_put(struct xfs_mount *mp)
151{
152}
diff --git a/fs/xfs/xfs_quota.h b/fs/xfs/xfs_quota.h
index f5d1202dde25..079d2e61c626 100644
--- a/fs/xfs/xfs_quota.h
+++ b/fs/xfs/xfs_quota.h
@@ -197,7 +197,6 @@ typedef struct xfs_qoff_logformat {
197#define XFS_QMOPT_UMOUNTING 0x0000100 /* filesys is being unmounted */ 197#define XFS_QMOPT_UMOUNTING 0x0000100 /* filesys is being unmounted */
198#define XFS_QMOPT_DOLOG 0x0000200 /* log buf changes (in quotacheck) */ 198#define XFS_QMOPT_DOLOG 0x0000200 /* log buf changes (in quotacheck) */
199#define XFS_QMOPT_DOWARN 0x0000400 /* increase warning cnt if needed */ 199#define XFS_QMOPT_DOWARN 0x0000400 /* increase warning cnt if needed */
200#define XFS_QMOPT_ILOCKED 0x0000800 /* inode is already locked (excl) */
201#define XFS_QMOPT_DQREPAIR 0x0001000 /* repair dquot if damaged */ 200#define XFS_QMOPT_DQREPAIR 0x0001000 /* repair dquot if damaged */
202#define XFS_QMOPT_GQUOTA 0x0002000 /* group dquot requested */ 201#define XFS_QMOPT_GQUOTA 0x0002000 /* group dquot requested */
203#define XFS_QMOPT_ENOSPC 0x0004000 /* enospc instead of edquot (prj) */ 202#define XFS_QMOPT_ENOSPC 0x0004000 /* enospc instead of edquot (prj) */
@@ -302,69 +301,72 @@ typedef struct xfs_dqtrx {
302 long qt_delrtb_delta; /* delayed RT blk count changes */ 301 long qt_delrtb_delta; /* delayed RT blk count changes */
303} xfs_dqtrx_t; 302} xfs_dqtrx_t;
304 303
305/* 304#ifdef CONFIG_XFS_QUOTA
306 * Dquot transaction functions, used if quota is enabled. 305extern void xfs_trans_dup_dqinfo(struct xfs_trans *, struct xfs_trans *);
307 */ 306extern void xfs_trans_free_dqinfo(struct xfs_trans *);
308typedef void (*qo_dup_dqinfo_t)(struct xfs_trans *, struct xfs_trans *); 307extern void xfs_trans_mod_dquot_byino(struct xfs_trans *, struct xfs_inode *,
309typedef void (*qo_mod_dquot_byino_t)(struct xfs_trans *, 308 uint, long);
310 struct xfs_inode *, uint, long); 309extern void xfs_trans_apply_dquot_deltas(struct xfs_trans *);
311typedef void (*qo_free_dqinfo_t)(struct xfs_trans *); 310extern void xfs_trans_unreserve_and_mod_dquots(struct xfs_trans *);
312typedef void (*qo_apply_dquot_deltas_t)(struct xfs_trans *); 311extern int xfs_trans_reserve_quota_nblks(struct xfs_trans *,
313typedef void (*qo_unreserve_and_mod_dquots_t)(struct xfs_trans *); 312 struct xfs_inode *, long, long, uint);
314typedef int (*qo_reserve_quota_nblks_t)( 313extern int xfs_trans_reserve_quota_bydquots(struct xfs_trans *,
315 struct xfs_trans *, struct xfs_mount *, 314 struct xfs_mount *, struct xfs_dquot *,
316 struct xfs_inode *, long, long, uint); 315 struct xfs_dquot *, long, long, uint);
317typedef int (*qo_reserve_quota_bydquots_t)( 316
318 struct xfs_trans *, struct xfs_mount *, 317extern int xfs_qm_vop_dqalloc(struct xfs_inode *, uid_t, gid_t, prid_t, uint,
319 struct xfs_dquot *, struct xfs_dquot *, 318 struct xfs_dquot **, struct xfs_dquot **);
320 long, long, uint); 319extern void xfs_qm_vop_create_dqattach(struct xfs_trans *, struct xfs_inode *,
321typedef struct xfs_dqtrxops { 320 struct xfs_dquot *, struct xfs_dquot *);
322 qo_dup_dqinfo_t qo_dup_dqinfo; 321extern int xfs_qm_vop_rename_dqattach(struct xfs_inode **);
323 qo_free_dqinfo_t qo_free_dqinfo; 322extern struct xfs_dquot *xfs_qm_vop_chown(struct xfs_trans *,
324 qo_mod_dquot_byino_t qo_mod_dquot_byino; 323 struct xfs_inode *, struct xfs_dquot **, struct xfs_dquot *);
325 qo_apply_dquot_deltas_t qo_apply_dquot_deltas; 324extern int xfs_qm_vop_chown_reserve(struct xfs_trans *, struct xfs_inode *,
326 qo_reserve_quota_nblks_t qo_reserve_quota_nblks; 325 struct xfs_dquot *, struct xfs_dquot *, uint);
327 qo_reserve_quota_bydquots_t qo_reserve_quota_bydquots; 326extern int xfs_qm_dqattach(struct xfs_inode *, uint);
328 qo_unreserve_and_mod_dquots_t qo_unreserve_and_mod_dquots; 327extern int xfs_qm_dqattach_locked(struct xfs_inode *, uint);
329} xfs_dqtrxops_t; 328extern void xfs_qm_dqdetach(struct xfs_inode *);
330 329extern void xfs_qm_dqrele(struct xfs_dquot *);
331#define XFS_DQTRXOP(mp, tp, op, args...) \ 330extern void xfs_qm_statvfs(struct xfs_inode *, struct kstatfs *);
332 ((mp)->m_qm_ops->xfs_dqtrxops ? \ 331extern int xfs_qm_sync(struct xfs_mount *, int);
333 ((mp)->m_qm_ops->xfs_dqtrxops->op)(tp, ## args) : 0) 332extern int xfs_qm_newmount(struct xfs_mount *, uint *, uint *);
334 333extern void xfs_qm_mount_quotas(struct xfs_mount *);
335#define XFS_DQTRXOP_VOID(mp, tp, op, args...) \ 334extern void xfs_qm_unmount(struct xfs_mount *);
336 ((mp)->m_qm_ops->xfs_dqtrxops ? \ 335extern void xfs_qm_unmount_quotas(struct xfs_mount *);
337 ((mp)->m_qm_ops->xfs_dqtrxops->op)(tp, ## args) : (void)0) 336
338 337#else
339#define XFS_TRANS_DUP_DQINFO(mp, otp, ntp) \ 338#define xfs_trans_dup_dqinfo(tp, tp2)
340 XFS_DQTRXOP_VOID(mp, otp, qo_dup_dqinfo, ntp) 339#define xfs_trans_free_dqinfo(tp)
341#define XFS_TRANS_FREE_DQINFO(mp, tp) \ 340#define xfs_trans_mod_dquot_byino(tp, ip, fields, delta)
342 XFS_DQTRXOP_VOID(mp, tp, qo_free_dqinfo) 341#define xfs_trans_apply_dquot_deltas(tp)
343#define XFS_TRANS_MOD_DQUOT_BYINO(mp, tp, ip, field, delta) \ 342#define xfs_trans_unreserve_and_mod_dquots(tp)
344 XFS_DQTRXOP_VOID(mp, tp, qo_mod_dquot_byino, ip, field, delta) 343#define xfs_trans_reserve_quota_nblks(tp, ip, nblks, ninos, flags) (0)
345#define XFS_TRANS_APPLY_DQUOT_DELTAS(mp, tp) \ 344#define xfs_trans_reserve_quota_bydquots(tp, mp, u, g, nb, ni, fl) (0)
346 XFS_DQTRXOP_VOID(mp, tp, qo_apply_dquot_deltas) 345#define xfs_qm_vop_dqalloc(ip, uid, gid, prid, fl, ou, og) (0)
347#define XFS_TRANS_RESERVE_QUOTA_NBLKS(mp, tp, ip, nblks, ninos, fl) \ 346#define xfs_qm_vop_create_dqattach(tp, ip, u, g)
348 XFS_DQTRXOP(mp, tp, qo_reserve_quota_nblks, mp, ip, nblks, ninos, fl) 347#define xfs_qm_vop_rename_dqattach(it) (0)
349#define XFS_TRANS_RESERVE_QUOTA_BYDQUOTS(mp, tp, ud, gd, nb, ni, fl) \ 348#define xfs_qm_vop_chown(tp, ip, old, new) (NULL)
350 XFS_DQTRXOP(mp, tp, qo_reserve_quota_bydquots, mp, ud, gd, nb, ni, fl) 349#define xfs_qm_vop_chown_reserve(tp, ip, u, g, fl) (0)
351#define XFS_TRANS_UNRESERVE_AND_MOD_DQUOTS(mp, tp) \ 350#define xfs_qm_dqattach(ip, fl) (0)
352 XFS_DQTRXOP_VOID(mp, tp, qo_unreserve_and_mod_dquots) 351#define xfs_qm_dqattach_locked(ip, fl) (0)
353 352#define xfs_qm_dqdetach(ip)
354#define XFS_TRANS_UNRESERVE_QUOTA_NBLKS(mp, tp, ip, nblks, ninos, flags) \ 353#define xfs_qm_dqrele(d)
355 XFS_TRANS_RESERVE_QUOTA_NBLKS(mp, tp, ip, -(nblks), -(ninos), flags) 354#define xfs_qm_statvfs(ip, s)
356#define XFS_TRANS_RESERVE_QUOTA(mp, tp, ud, gd, nb, ni, f) \ 355#define xfs_qm_sync(mp, fl) (0)
357 XFS_TRANS_RESERVE_QUOTA_BYDQUOTS(mp, tp, ud, gd, nb, ni, \ 356#define xfs_qm_newmount(mp, a, b) (0)
358 f | XFS_QMOPT_RES_REGBLKS) 357#define xfs_qm_mount_quotas(mp)
359#define XFS_TRANS_UNRESERVE_QUOTA(mp, tp, ud, gd, nb, ni, f) \ 358#define xfs_qm_unmount(mp)
360 XFS_TRANS_RESERVE_QUOTA_BYDQUOTS(mp, tp, ud, gd, -(nb), -(ni), \ 359#define xfs_qm_unmount_quotas(mp) (0)
360#endif /* CONFIG_XFS_QUOTA */
361
362#define xfs_trans_unreserve_quota_nblks(tp, ip, nblks, ninos, flags) \
363 xfs_trans_reserve_quota_nblks(tp, ip, -(nblks), -(ninos), flags)
364#define xfs_trans_reserve_quota(tp, mp, ud, gd, nb, ni, f) \
365 xfs_trans_reserve_quota_bydquots(tp, mp, ud, gd, nb, ni, \
361 f | XFS_QMOPT_RES_REGBLKS) 366 f | XFS_QMOPT_RES_REGBLKS)
362 367
363extern int xfs_qm_dqcheck(xfs_disk_dquot_t *, xfs_dqid_t, uint, uint, char *); 368extern int xfs_qm_dqcheck(xfs_disk_dquot_t *, xfs_dqid_t, uint, uint, char *);
364extern int xfs_mount_reset_sbqflags(struct xfs_mount *); 369extern int xfs_mount_reset_sbqflags(struct xfs_mount *);
365 370
366extern struct xfs_qmops xfs_qmcore_xfs;
367
368#endif /* __KERNEL__ */ 371#endif /* __KERNEL__ */
369
370#endif /* __XFS_QUOTA_H__ */ 372#endif /* __XFS_QUOTA_H__ */
diff --git a/fs/xfs/xfs_rename.c b/fs/xfs/xfs_rename.c
index 58f85e9cd11d..b81deea0ce19 100644
--- a/fs/xfs/xfs_rename.c
+++ b/fs/xfs/xfs_rename.c
@@ -166,7 +166,8 @@ xfs_rename(
166 /* 166 /*
167 * Attach the dquots to the inodes 167 * Attach the dquots to the inodes
168 */ 168 */
169 if ((error = XFS_QM_DQVOPRENAME(mp, inodes))) { 169 error = xfs_qm_vop_rename_dqattach(inodes);
170 if (error) {
170 xfs_trans_cancel(tp, cancel_flags); 171 xfs_trans_cancel(tp, cancel_flags);
171 goto std_return; 172 goto std_return;
172 } 173 }
diff --git a/fs/xfs/xfs_rw.c b/fs/xfs/xfs_rw.c
index 36f3a21c54d2..fea68615ed23 100644
--- a/fs/xfs/xfs_rw.c
+++ b/fs/xfs/xfs_rw.c
@@ -41,7 +41,6 @@
41#include "xfs_ialloc.h" 41#include "xfs_ialloc.h"
42#include "xfs_attr.h" 42#include "xfs_attr.h"
43#include "xfs_bmap.h" 43#include "xfs_bmap.h"
44#include "xfs_acl.h"
45#include "xfs_error.h" 44#include "xfs_error.h"
46#include "xfs_buf_item.h" 45#include "xfs_buf_item.h"
47#include "xfs_rw.h" 46#include "xfs_rw.h"
diff --git a/fs/xfs/xfs_trans.c b/fs/xfs/xfs_trans.c
index 8570b826fedd..fffabc0aefcb 100644
--- a/fs/xfs/xfs_trans.c
+++ b/fs/xfs/xfs_trans.c
@@ -297,7 +297,7 @@ xfs_trans_dup(
297 tp->t_rtx_res = tp->t_rtx_res_used; 297 tp->t_rtx_res = tp->t_rtx_res_used;
298 ntp->t_pflags = tp->t_pflags; 298 ntp->t_pflags = tp->t_pflags;
299 299
300 XFS_TRANS_DUP_DQINFO(tp->t_mountp, tp, ntp); 300 xfs_trans_dup_dqinfo(tp, ntp);
301 301
302 atomic_inc(&tp->t_mountp->m_active_trans); 302 atomic_inc(&tp->t_mountp->m_active_trans);
303 return ntp; 303 return ntp;
@@ -831,7 +831,7 @@ shut_us_down:
831 * means is that we have some (non-persistent) quota 831 * means is that we have some (non-persistent) quota
832 * reservations that need to be unreserved. 832 * reservations that need to be unreserved.
833 */ 833 */
834 XFS_TRANS_UNRESERVE_AND_MOD_DQUOTS(mp, tp); 834 xfs_trans_unreserve_and_mod_dquots(tp);
835 if (tp->t_ticket) { 835 if (tp->t_ticket) {
836 commit_lsn = xfs_log_done(mp, tp->t_ticket, 836 commit_lsn = xfs_log_done(mp, tp->t_ticket,
837 NULL, log_flags); 837 NULL, log_flags);
@@ -850,10 +850,9 @@ shut_us_down:
850 /* 850 /*
851 * If we need to update the superblock, then do it now. 851 * If we need to update the superblock, then do it now.
852 */ 852 */
853 if (tp->t_flags & XFS_TRANS_SB_DIRTY) { 853 if (tp->t_flags & XFS_TRANS_SB_DIRTY)
854 xfs_trans_apply_sb_deltas(tp); 854 xfs_trans_apply_sb_deltas(tp);
855 } 855 xfs_trans_apply_dquot_deltas(tp);
856 XFS_TRANS_APPLY_DQUOT_DELTAS(mp, tp);
857 856
858 /* 857 /*
859 * Ask each log item how many log_vector entries it will 858 * Ask each log item how many log_vector entries it will
@@ -1058,7 +1057,7 @@ xfs_trans_uncommit(
1058 } 1057 }
1059 1058
1060 xfs_trans_unreserve_and_mod_sb(tp); 1059 xfs_trans_unreserve_and_mod_sb(tp);
1061 XFS_TRANS_UNRESERVE_AND_MOD_DQUOTS(tp->t_mountp, tp); 1060 xfs_trans_unreserve_and_mod_dquots(tp);
1062 1061
1063 xfs_trans_free_items(tp, flags); 1062 xfs_trans_free_items(tp, flags);
1064 xfs_trans_free_busy(tp); 1063 xfs_trans_free_busy(tp);
@@ -1183,7 +1182,7 @@ xfs_trans_cancel(
1183 } 1182 }
1184#endif 1183#endif
1185 xfs_trans_unreserve_and_mod_sb(tp); 1184 xfs_trans_unreserve_and_mod_sb(tp);
1186 XFS_TRANS_UNRESERVE_AND_MOD_DQUOTS(mp, tp); 1185 xfs_trans_unreserve_and_mod_dquots(tp);
1187 1186
1188 if (tp->t_ticket) { 1187 if (tp->t_ticket) {
1189 if (flags & XFS_TRANS_RELEASE_LOG_RES) { 1188 if (flags & XFS_TRANS_RELEASE_LOG_RES) {
@@ -1213,7 +1212,7 @@ xfs_trans_free(
1213 xfs_trans_t *tp) 1212 xfs_trans_t *tp)
1214{ 1213{
1215 atomic_dec(&tp->t_mountp->m_active_trans); 1214 atomic_dec(&tp->t_mountp->m_active_trans);
1216 XFS_TRANS_FREE_DQINFO(tp->t_mountp, tp); 1215 xfs_trans_free_dqinfo(tp);
1217 kmem_zone_free(xfs_trans_zone, tp); 1216 kmem_zone_free(xfs_trans_zone, tp);
1218} 1217}
1219 1218
diff --git a/fs/xfs/xfs_utils.c b/fs/xfs/xfs_utils.c
index 79b9e5ea5359..4d88616bde91 100644
--- a/fs/xfs/xfs_utils.c
+++ b/fs/xfs/xfs_utils.c
@@ -166,7 +166,7 @@ xfs_dir_ialloc(
166 xfs_buf_relse(ialloc_context); 166 xfs_buf_relse(ialloc_context);
167 if (dqinfo) { 167 if (dqinfo) {
168 tp->t_dqinfo = dqinfo; 168 tp->t_dqinfo = dqinfo;
169 XFS_TRANS_FREE_DQINFO(tp->t_mountp, tp); 169 xfs_trans_free_dqinfo(tp);
170 } 170 }
171 *tpp = ntp; 171 *tpp = ntp;
172 *ipp = NULL; 172 *ipp = NULL;
diff --git a/fs/xfs/xfs_vnodeops.c b/fs/xfs/xfs_vnodeops.c
index 19cf90a9c762..c4eca5ed5dab 100644
--- a/fs/xfs/xfs_vnodeops.c
+++ b/fs/xfs/xfs_vnodeops.c
@@ -42,6 +42,7 @@
42#include "xfs_ialloc.h" 42#include "xfs_ialloc.h"
43#include "xfs_alloc.h" 43#include "xfs_alloc.h"
44#include "xfs_bmap.h" 44#include "xfs_bmap.h"
45#include "xfs_acl.h"
45#include "xfs_attr.h" 46#include "xfs_attr.h"
46#include "xfs_rw.h" 47#include "xfs_rw.h"
47#include "xfs_error.h" 48#include "xfs_error.h"
@@ -118,7 +119,7 @@ xfs_setattr(
118 */ 119 */
119 ASSERT(udqp == NULL); 120 ASSERT(udqp == NULL);
120 ASSERT(gdqp == NULL); 121 ASSERT(gdqp == NULL);
121 code = XFS_QM_DQVOPALLOC(mp, ip, uid, gid, ip->i_d.di_projid, 122 code = xfs_qm_vop_dqalloc(ip, uid, gid, ip->i_d.di_projid,
122 qflags, &udqp, &gdqp); 123 qflags, &udqp, &gdqp);
123 if (code) 124 if (code)
124 return code; 125 return code;
@@ -180,10 +181,11 @@ xfs_setattr(
180 * Do a quota reservation only if uid/gid is actually 181 * Do a quota reservation only if uid/gid is actually
181 * going to change. 182 * going to change.
182 */ 183 */
183 if ((XFS_IS_UQUOTA_ON(mp) && iuid != uid) || 184 if (XFS_IS_QUOTA_RUNNING(mp) &&
184 (XFS_IS_GQUOTA_ON(mp) && igid != gid)) { 185 ((XFS_IS_UQUOTA_ON(mp) && iuid != uid) ||
186 (XFS_IS_GQUOTA_ON(mp) && igid != gid))) {
185 ASSERT(tp); 187 ASSERT(tp);
186 code = XFS_QM_DQVOPCHOWNRESV(mp, tp, ip, udqp, gdqp, 188 code = xfs_qm_vop_chown_reserve(tp, ip, udqp, gdqp,
187 capable(CAP_FOWNER) ? 189 capable(CAP_FOWNER) ?
188 XFS_QMOPT_FORCE_RES : 0); 190 XFS_QMOPT_FORCE_RES : 0);
189 if (code) /* out of quota */ 191 if (code) /* out of quota */
@@ -217,7 +219,7 @@ xfs_setattr(
217 /* 219 /*
218 * Make sure that the dquots are attached to the inode. 220 * Make sure that the dquots are attached to the inode.
219 */ 221 */
220 code = XFS_QM_DQATTACH(mp, ip, XFS_QMOPT_ILOCKED); 222 code = xfs_qm_dqattach_locked(ip, 0);
221 if (code) 223 if (code)
222 goto error_return; 224 goto error_return;
223 225
@@ -351,21 +353,21 @@ xfs_setattr(
351 * in the transaction. 353 * in the transaction.
352 */ 354 */
353 if (iuid != uid) { 355 if (iuid != uid) {
354 if (XFS_IS_UQUOTA_ON(mp)) { 356 if (XFS_IS_QUOTA_RUNNING(mp) && XFS_IS_UQUOTA_ON(mp)) {
355 ASSERT(mask & ATTR_UID); 357 ASSERT(mask & ATTR_UID);
356 ASSERT(udqp); 358 ASSERT(udqp);
357 olddquot1 = XFS_QM_DQVOPCHOWN(mp, tp, ip, 359 olddquot1 = xfs_qm_vop_chown(tp, ip,
358 &ip->i_udquot, udqp); 360 &ip->i_udquot, udqp);
359 } 361 }
360 ip->i_d.di_uid = uid; 362 ip->i_d.di_uid = uid;
361 inode->i_uid = uid; 363 inode->i_uid = uid;
362 } 364 }
363 if (igid != gid) { 365 if (igid != gid) {
364 if (XFS_IS_GQUOTA_ON(mp)) { 366 if (XFS_IS_QUOTA_RUNNING(mp) && XFS_IS_GQUOTA_ON(mp)) {
365 ASSERT(!XFS_IS_PQUOTA_ON(mp)); 367 ASSERT(!XFS_IS_PQUOTA_ON(mp));
366 ASSERT(mask & ATTR_GID); 368 ASSERT(mask & ATTR_GID);
367 ASSERT(gdqp); 369 ASSERT(gdqp);
368 olddquot2 = XFS_QM_DQVOPCHOWN(mp, tp, ip, 370 olddquot2 = xfs_qm_vop_chown(tp, ip,
369 &ip->i_gdquot, gdqp); 371 &ip->i_gdquot, gdqp);
370 } 372 }
371 ip->i_d.di_gid = gid; 373 ip->i_d.di_gid = gid;
@@ -461,13 +463,25 @@ xfs_setattr(
461 /* 463 /*
462 * Release any dquot(s) the inode had kept before chown. 464 * Release any dquot(s) the inode had kept before chown.
463 */ 465 */
464 XFS_QM_DQRELE(mp, olddquot1); 466 xfs_qm_dqrele(olddquot1);
465 XFS_QM_DQRELE(mp, olddquot2); 467 xfs_qm_dqrele(olddquot2);
466 XFS_QM_DQRELE(mp, udqp); 468 xfs_qm_dqrele(udqp);
467 XFS_QM_DQRELE(mp, gdqp); 469 xfs_qm_dqrele(gdqp);
468 470
469 if (code) { 471 if (code)
470 return code; 472 return code;
473
474 /*
475 * XXX(hch): Updating the ACL entries is not atomic vs the i_mode
476 * update. We could avoid this with linked transactions
477 * and passing down the transaction pointer all the way
478 * to attr_set. No previous user of the generic
479 * Posix ACL code seems to care about this issue either.
480 */
481 if ((mask & ATTR_MODE) && !(flags & XFS_ATTR_NOACL)) {
482 code = -xfs_acl_chmod(inode);
483 if (code)
484 return XFS_ERROR(code);
471 } 485 }
472 486
473 if (DM_EVENT_ENABLED(ip, DM_EVENT_ATTRIBUTE) && 487 if (DM_EVENT_ENABLED(ip, DM_EVENT_ATTRIBUTE) &&
@@ -482,8 +496,8 @@ xfs_setattr(
482 commit_flags |= XFS_TRANS_ABORT; 496 commit_flags |= XFS_TRANS_ABORT;
483 /* FALLTHROUGH */ 497 /* FALLTHROUGH */
484 error_return: 498 error_return:
485 XFS_QM_DQRELE(mp, udqp); 499 xfs_qm_dqrele(udqp);
486 XFS_QM_DQRELE(mp, gdqp); 500 xfs_qm_dqrele(gdqp);
487 if (tp) { 501 if (tp) {
488 xfs_trans_cancel(tp, commit_flags); 502 xfs_trans_cancel(tp, commit_flags);
489 } 503 }
@@ -739,7 +753,8 @@ xfs_free_eofblocks(
739 /* 753 /*
740 * Attach the dquots to the inode up front. 754 * Attach the dquots to the inode up front.
741 */ 755 */
742 if ((error = XFS_QM_DQATTACH(mp, ip, 0))) 756 error = xfs_qm_dqattach(ip, 0);
757 if (error)
743 return error; 758 return error;
744 759
745 /* 760 /*
@@ -1181,7 +1196,8 @@ xfs_inactive(
1181 1196
1182 ASSERT(ip->i_d.di_nlink == 0); 1197 ASSERT(ip->i_d.di_nlink == 0);
1183 1198
1184 if ((error = XFS_QM_DQATTACH(mp, ip, 0))) 1199 error = xfs_qm_dqattach(ip, 0);
1200 if (error)
1185 return VN_INACTIVE_CACHE; 1201 return VN_INACTIVE_CACHE;
1186 1202
1187 tp = xfs_trans_alloc(mp, XFS_TRANS_INACTIVE); 1203 tp = xfs_trans_alloc(mp, XFS_TRANS_INACTIVE);
@@ -1307,7 +1323,7 @@ xfs_inactive(
1307 /* 1323 /*
1308 * Credit the quota account(s). The inode is gone. 1324 * Credit the quota account(s). The inode is gone.
1309 */ 1325 */
1310 XFS_TRANS_MOD_DQUOT_BYINO(mp, tp, ip, XFS_TRANS_DQ_ICOUNT, -1); 1326 xfs_trans_mod_dquot_byino(tp, ip, XFS_TRANS_DQ_ICOUNT, -1);
1311 1327
1312 /* 1328 /*
1313 * Just ignore errors at this point. There is nothing we can 1329 * Just ignore errors at this point. There is nothing we can
@@ -1323,11 +1339,11 @@ xfs_inactive(
1323 xfs_fs_cmn_err(CE_NOTE, mp, "xfs_inactive: " 1339 xfs_fs_cmn_err(CE_NOTE, mp, "xfs_inactive: "
1324 "xfs_trans_commit() returned error %d", error); 1340 "xfs_trans_commit() returned error %d", error);
1325 } 1341 }
1342
1326 /* 1343 /*
1327 * Release the dquots held by inode, if any. 1344 * Release the dquots held by inode, if any.
1328 */ 1345 */
1329 XFS_QM_DQDETACH(mp, ip); 1346 xfs_qm_dqdetach(ip);
1330
1331 xfs_iunlock(ip, XFS_IOLOCK_EXCL | XFS_ILOCK_EXCL); 1347 xfs_iunlock(ip, XFS_IOLOCK_EXCL | XFS_ILOCK_EXCL);
1332 1348
1333 out: 1349 out:
@@ -1427,8 +1443,7 @@ xfs_create(
1427 /* 1443 /*
1428 * Make sure that we have allocated dquot(s) on disk. 1444 * Make sure that we have allocated dquot(s) on disk.
1429 */ 1445 */
1430 error = XFS_QM_DQVOPALLOC(mp, dp, 1446 error = xfs_qm_vop_dqalloc(dp, current_fsuid(), current_fsgid(), prid,
1431 current_fsuid(), current_fsgid(), prid,
1432 XFS_QMOPT_QUOTALL | XFS_QMOPT_INHERIT, &udqp, &gdqp); 1447 XFS_QMOPT_QUOTALL | XFS_QMOPT_INHERIT, &udqp, &gdqp);
1433 if (error) 1448 if (error)
1434 goto std_return; 1449 goto std_return;
@@ -1489,7 +1504,7 @@ xfs_create(
1489 /* 1504 /*
1490 * Reserve disk quota and the inode. 1505 * Reserve disk quota and the inode.
1491 */ 1506 */
1492 error = XFS_TRANS_RESERVE_QUOTA(mp, tp, udqp, gdqp, resblks, 1, 0); 1507 error = xfs_trans_reserve_quota(tp, mp, udqp, gdqp, resblks, 1, 0);
1493 if (error) 1508 if (error)
1494 goto out_trans_cancel; 1509 goto out_trans_cancel;
1495 1510
@@ -1561,7 +1576,7 @@ xfs_create(
1561 * These ids of the inode couldn't have changed since the new 1576 * These ids of the inode couldn't have changed since the new
1562 * inode has been locked ever since it was created. 1577 * inode has been locked ever since it was created.
1563 */ 1578 */
1564 XFS_QM_DQVOPCREATE(mp, tp, ip, udqp, gdqp); 1579 xfs_qm_vop_create_dqattach(tp, ip, udqp, gdqp);
1565 1580
1566 /* 1581 /*
1567 * xfs_trans_commit normally decrements the vnode ref count 1582 * xfs_trans_commit normally decrements the vnode ref count
@@ -1580,8 +1595,8 @@ xfs_create(
1580 goto out_dqrele; 1595 goto out_dqrele;
1581 } 1596 }
1582 1597
1583 XFS_QM_DQRELE(mp, udqp); 1598 xfs_qm_dqrele(udqp);
1584 XFS_QM_DQRELE(mp, gdqp); 1599 xfs_qm_dqrele(gdqp);
1585 1600
1586 *ipp = ip; 1601 *ipp = ip;
1587 1602
@@ -1602,8 +1617,8 @@ xfs_create(
1602 out_trans_cancel: 1617 out_trans_cancel:
1603 xfs_trans_cancel(tp, cancel_flags); 1618 xfs_trans_cancel(tp, cancel_flags);
1604 out_dqrele: 1619 out_dqrele:
1605 XFS_QM_DQRELE(mp, udqp); 1620 xfs_qm_dqrele(udqp);
1606 XFS_QM_DQRELE(mp, gdqp); 1621 xfs_qm_dqrele(gdqp);
1607 1622
1608 if (unlock_dp_on_error) 1623 if (unlock_dp_on_error)
1609 xfs_iunlock(dp, XFS_ILOCK_EXCL); 1624 xfs_iunlock(dp, XFS_ILOCK_EXCL);
@@ -1837,11 +1852,11 @@ xfs_remove(
1837 return error; 1852 return error;
1838 } 1853 }
1839 1854
1840 error = XFS_QM_DQATTACH(mp, dp, 0); 1855 error = xfs_qm_dqattach(dp, 0);
1841 if (error) 1856 if (error)
1842 goto std_return; 1857 goto std_return;
1843 1858
1844 error = XFS_QM_DQATTACH(mp, ip, 0); 1859 error = xfs_qm_dqattach(ip, 0);
1845 if (error) 1860 if (error)
1846 goto std_return; 1861 goto std_return;
1847 1862
@@ -2028,11 +2043,11 @@ xfs_link(
2028 2043
2029 /* Return through std_return after this point. */ 2044 /* Return through std_return after this point. */
2030 2045
2031 error = XFS_QM_DQATTACH(mp, sip, 0); 2046 error = xfs_qm_dqattach(sip, 0);
2032 if (error) 2047 if (error)
2033 goto std_return; 2048 goto std_return;
2034 2049
2035 error = XFS_QM_DQATTACH(mp, tdp, 0); 2050 error = xfs_qm_dqattach(tdp, 0);
2036 if (error) 2051 if (error)
2037 goto std_return; 2052 goto std_return;
2038 2053
@@ -2205,8 +2220,7 @@ xfs_symlink(
2205 /* 2220 /*
2206 * Make sure that we have allocated dquot(s) on disk. 2221 * Make sure that we have allocated dquot(s) on disk.
2207 */ 2222 */
2208 error = XFS_QM_DQVOPALLOC(mp, dp, 2223 error = xfs_qm_vop_dqalloc(dp, current_fsuid(), current_fsgid(), prid,
2209 current_fsuid(), current_fsgid(), prid,
2210 XFS_QMOPT_QUOTALL | XFS_QMOPT_INHERIT, &udqp, &gdqp); 2224 XFS_QMOPT_QUOTALL | XFS_QMOPT_INHERIT, &udqp, &gdqp);
2211 if (error) 2225 if (error)
2212 goto std_return; 2226 goto std_return;
@@ -2248,7 +2262,7 @@ xfs_symlink(
2248 /* 2262 /*
2249 * Reserve disk quota : blocks and inode. 2263 * Reserve disk quota : blocks and inode.
2250 */ 2264 */
2251 error = XFS_TRANS_RESERVE_QUOTA(mp, tp, udqp, gdqp, resblks, 1, 0); 2265 error = xfs_trans_reserve_quota(tp, mp, udqp, gdqp, resblks, 1, 0);
2252 if (error) 2266 if (error)
2253 goto error_return; 2267 goto error_return;
2254 2268
@@ -2288,7 +2302,7 @@ xfs_symlink(
2288 /* 2302 /*
2289 * Also attach the dquot(s) to it, if applicable. 2303 * Also attach the dquot(s) to it, if applicable.
2290 */ 2304 */
2291 XFS_QM_DQVOPCREATE(mp, tp, ip, udqp, gdqp); 2305 xfs_qm_vop_create_dqattach(tp, ip, udqp, gdqp);
2292 2306
2293 if (resblks) 2307 if (resblks)
2294 resblks -= XFS_IALLOC_SPACE_RES(mp); 2308 resblks -= XFS_IALLOC_SPACE_RES(mp);
@@ -2376,8 +2390,8 @@ xfs_symlink(
2376 goto error2; 2390 goto error2;
2377 } 2391 }
2378 error = xfs_trans_commit(tp, XFS_TRANS_RELEASE_LOG_RES); 2392 error = xfs_trans_commit(tp, XFS_TRANS_RELEASE_LOG_RES);
2379 XFS_QM_DQRELE(mp, udqp); 2393 xfs_qm_dqrele(udqp);
2380 XFS_QM_DQRELE(mp, gdqp); 2394 xfs_qm_dqrele(gdqp);
2381 2395
2382 /* Fall through to std_return with error = 0 or errno from 2396 /* Fall through to std_return with error = 0 or errno from
2383 * xfs_trans_commit */ 2397 * xfs_trans_commit */
@@ -2401,8 +2415,8 @@ std_return:
2401 cancel_flags |= XFS_TRANS_ABORT; 2415 cancel_flags |= XFS_TRANS_ABORT;
2402 error_return: 2416 error_return:
2403 xfs_trans_cancel(tp, cancel_flags); 2417 xfs_trans_cancel(tp, cancel_flags);
2404 XFS_QM_DQRELE(mp, udqp); 2418 xfs_qm_dqrele(udqp);
2405 XFS_QM_DQRELE(mp, gdqp); 2419 xfs_qm_dqrele(gdqp);
2406 2420
2407 if (unlock_dp_on_error) 2421 if (unlock_dp_on_error)
2408 xfs_iunlock(dp, XFS_ILOCK_EXCL); 2422 xfs_iunlock(dp, XFS_ILOCK_EXCL);
@@ -2541,7 +2555,8 @@ xfs_alloc_file_space(
2541 if (XFS_FORCED_SHUTDOWN(mp)) 2555 if (XFS_FORCED_SHUTDOWN(mp))
2542 return XFS_ERROR(EIO); 2556 return XFS_ERROR(EIO);
2543 2557
2544 if ((error = XFS_QM_DQATTACH(mp, ip, 0))) 2558 error = xfs_qm_dqattach(ip, 0);
2559 if (error)
2545 return error; 2560 return error;
2546 2561
2547 if (len <= 0) 2562 if (len <= 0)
@@ -2628,8 +2643,8 @@ retry:
2628 break; 2643 break;
2629 } 2644 }
2630 xfs_ilock(ip, XFS_ILOCK_EXCL); 2645 xfs_ilock(ip, XFS_ILOCK_EXCL);
2631 error = XFS_TRANS_RESERVE_QUOTA_NBLKS(mp, tp, ip, 2646 error = xfs_trans_reserve_quota_nblks(tp, ip, qblocks,
2632 qblocks, 0, quota_flag); 2647 0, quota_flag);
2633 if (error) 2648 if (error)
2634 goto error1; 2649 goto error1;
2635 2650
@@ -2688,7 +2703,7 @@ dmapi_enospc_check:
2688 2703
2689error0: /* Cancel bmap, unlock inode, unreserve quota blocks, cancel trans */ 2704error0: /* Cancel bmap, unlock inode, unreserve quota blocks, cancel trans */
2690 xfs_bmap_cancel(&free_list); 2705 xfs_bmap_cancel(&free_list);
2691 XFS_TRANS_UNRESERVE_QUOTA_NBLKS(mp, tp, ip, qblocks, 0, quota_flag); 2706 xfs_trans_unreserve_quota_nblks(tp, ip, qblocks, 0, quota_flag);
2692 2707
2693error1: /* Just cancel transaction */ 2708error1: /* Just cancel transaction */
2694 xfs_trans_cancel(tp, XFS_TRANS_RELEASE_LOG_RES | XFS_TRANS_ABORT); 2709 xfs_trans_cancel(tp, XFS_TRANS_RELEASE_LOG_RES | XFS_TRANS_ABORT);
@@ -2827,7 +2842,8 @@ xfs_free_file_space(
2827 2842
2828 xfs_itrace_entry(ip); 2843 xfs_itrace_entry(ip);
2829 2844
2830 if ((error = XFS_QM_DQATTACH(mp, ip, 0))) 2845 error = xfs_qm_dqattach(ip, 0);
2846 if (error)
2831 return error; 2847 return error;
2832 2848
2833 error = 0; 2849 error = 0;
@@ -2953,9 +2969,9 @@ xfs_free_file_space(
2953 break; 2969 break;
2954 } 2970 }
2955 xfs_ilock(ip, XFS_ILOCK_EXCL); 2971 xfs_ilock(ip, XFS_ILOCK_EXCL);
2956 error = XFS_TRANS_RESERVE_QUOTA(mp, tp, 2972 error = xfs_trans_reserve_quota(tp, mp,
2957 ip->i_udquot, ip->i_gdquot, resblks, 0, 2973 ip->i_udquot, ip->i_gdquot,
2958 XFS_QMOPT_RES_REGBLKS); 2974 resblks, 0, XFS_QMOPT_RES_REGBLKS);
2959 if (error) 2975 if (error)
2960 goto error1; 2976 goto error1;
2961 2977
diff --git a/fs/xfs/xfs_vnodeops.h b/fs/xfs/xfs_vnodeops.h
index 04373c6c61ff..a9e102de71a1 100644
--- a/fs/xfs/xfs_vnodeops.h
+++ b/fs/xfs/xfs_vnodeops.h
@@ -18,6 +18,7 @@ int xfs_setattr(struct xfs_inode *ip, struct iattr *vap, int flags);
18#define XFS_ATTR_DMI 0x01 /* invocation from a DMI function */ 18#define XFS_ATTR_DMI 0x01 /* invocation from a DMI function */
19#define XFS_ATTR_NONBLOCK 0x02 /* return EAGAIN if operation would block */ 19#define XFS_ATTR_NONBLOCK 0x02 /* return EAGAIN if operation would block */
20#define XFS_ATTR_NOLOCK 0x04 /* Don't grab any conflicting locks */ 20#define XFS_ATTR_NOLOCK 0x04 /* Don't grab any conflicting locks */
21#define XFS_ATTR_NOACL 0x08 /* Don't call xfs_acl_chmod */
21 22
22int xfs_readlink(struct xfs_inode *ip, char *link); 23int xfs_readlink(struct xfs_inode *ip, char *link);
23int xfs_fsync(struct xfs_inode *ip); 24int xfs_fsync(struct xfs_inode *ip);