aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2009-12-05 12:47:17 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2009-12-05 12:47:17 -0500
commit1ebb275afcf5a47092e995541d6c604eef96062a (patch)
tree56722c2a688f4034e5157a0fceec98d6ac3b36f0
parent83fdbfbfe6e7e8906e3a3f8f6bc074d887e92109 (diff)
parent26bb7505cf7db3560286be9f6384b6d3911f78b5 (diff)
Merge git://git.kernel.org/pub/scm/linux/kernel/git/steve/gfs2-2.6-nmw
* git://git.kernel.org/pub/scm/linux/kernel/git/steve/gfs2-2.6-nmw: (31 commits) GFS2: Fix glock refcount issues writeback: remove unused nonblocking and congestion checks (gfs2) GFS2: drop rindex glock to refresh rindex list GFS2: Tag all metadata with jid GFS2: Locking order fix in gfs2_check_blk_state GFS2: Remove dirent_first() function GFS2: Display nobarrier option in /proc/mounts GFS2: add barrier/nobarrier mount options GFS2: remove division from new statfs code GFS2: Improve statfs and quota usability GFS2: Use dquot_send_warning() VFS: Export dquot_send_warning GFS2: Add set_xquota support GFS2: Add get_xquota support GFS2: Clean up gfs2_adjust_quota() and do_glock() GFS2: Remove constant argument from qd_get() GFS2: Remove constant argument from qdsb_get() GFS2: Add proper error reporting to quota sync via sysfs GFS2: Add get_xstate quota function GFS2: Remove obsolete code in quota.c ...
-rw-r--r--fs/gfs2/Kconfig2
-rw-r--r--fs/gfs2/acl.c357
-rw-r--r--fs/gfs2/acl.h24
-rw-r--r--fs/gfs2/aops.c20
-rw-r--r--fs/gfs2/dir.c34
-rw-r--r--fs/gfs2/glock.c31
-rw-r--r--fs/gfs2/glock.h9
-rw-r--r--fs/gfs2/glops.c5
-rw-r--r--fs/gfs2/incore.h5
-rw-r--r--fs/gfs2/inode.c4
-rw-r--r--fs/gfs2/log.c2
-rw-r--r--fs/gfs2/lops.c4
-rw-r--r--fs/gfs2/ops_fstype.c154
-rw-r--r--fs/gfs2/quota.c393
-rw-r--r--fs/gfs2/quota.h5
-rw-r--r--fs/gfs2/recovery.c2
-rw-r--r--fs/gfs2/rgrp.c14
-rw-r--r--fs/gfs2/super.c110
-rw-r--r--fs/gfs2/super.h4
-rw-r--r--fs/gfs2/sys.c14
-rw-r--r--fs/gfs2/xattr.c74
-rw-r--r--fs/gfs2/xattr.h8
-rw-r--r--fs/quota/Kconfig2
-rw-r--r--fs/quota/dquot.c93
-rw-r--r--fs/quota/quota.c93
-rw-r--r--fs/xattr_acl.c2
-rw-r--r--include/linux/gfs2_ondisk.h6
-rw-r--r--include/linux/posix_acl.h14
-rw-r--r--include/linux/quota.h11
29 files changed, 991 insertions, 505 deletions
diff --git a/fs/gfs2/Kconfig b/fs/gfs2/Kconfig
index 5971359d2090..4dcddf83326f 100644
--- a/fs/gfs2/Kconfig
+++ b/fs/gfs2/Kconfig
@@ -8,6 +8,8 @@ config GFS2_FS
8 select FS_POSIX_ACL 8 select FS_POSIX_ACL
9 select CRC32 9 select CRC32
10 select SLOW_WORK 10 select SLOW_WORK
11 select QUOTA
12 select QUOTACTL
11 help 13 help
12 A cluster filesystem. 14 A cluster filesystem.
13 15
diff --git a/fs/gfs2/acl.c b/fs/gfs2/acl.c
index 3fc4e3ac7d84..3eb1ea846173 100644
--- a/fs/gfs2/acl.c
+++ b/fs/gfs2/acl.c
@@ -12,6 +12,7 @@
12#include <linux/spinlock.h> 12#include <linux/spinlock.h>
13#include <linux/completion.h> 13#include <linux/completion.h>
14#include <linux/buffer_head.h> 14#include <linux/buffer_head.h>
15#include <linux/xattr.h>
15#include <linux/posix_acl.h> 16#include <linux/posix_acl.h>
16#include <linux/posix_acl_xattr.h> 17#include <linux/posix_acl_xattr.h>
17#include <linux/gfs2_ondisk.h> 18#include <linux/gfs2_ondisk.h>
@@ -26,108 +27,44 @@
26#include "trans.h" 27#include "trans.h"
27#include "util.h" 28#include "util.h"
28 29
29#define ACL_ACCESS 1 30static const char *gfs2_acl_name(int type)
30#define ACL_DEFAULT 0
31
32int gfs2_acl_validate_set(struct gfs2_inode *ip, int access,
33 struct gfs2_ea_request *er, int *remove, mode_t *mode)
34{ 31{
35 struct posix_acl *acl; 32 switch (type) {
36 int error; 33 case ACL_TYPE_ACCESS:
37 34 return GFS2_POSIX_ACL_ACCESS;
38 error = gfs2_acl_validate_remove(ip, access); 35 case ACL_TYPE_DEFAULT:
39 if (error) 36 return GFS2_POSIX_ACL_DEFAULT;
40 return error;
41
42 if (!er->er_data)
43 return -EINVAL;
44
45 acl = posix_acl_from_xattr(er->er_data, er->er_data_len);
46 if (IS_ERR(acl))
47 return PTR_ERR(acl);
48 if (!acl) {
49 *remove = 1;
50 return 0;
51 }
52
53 error = posix_acl_valid(acl);
54 if (error)
55 goto out;
56
57 if (access) {
58 error = posix_acl_equiv_mode(acl, mode);
59 if (!error)
60 *remove = 1;
61 else if (error > 0)
62 error = 0;
63 } 37 }
64 38 return NULL;
65out:
66 posix_acl_release(acl);
67 return error;
68}
69
70int gfs2_acl_validate_remove(struct gfs2_inode *ip, int access)
71{
72 if (!GFS2_SB(&ip->i_inode)->sd_args.ar_posix_acl)
73 return -EOPNOTSUPP;
74 if (!is_owner_or_cap(&ip->i_inode))
75 return -EPERM;
76 if (S_ISLNK(ip->i_inode.i_mode))
77 return -EOPNOTSUPP;
78 if (!access && !S_ISDIR(ip->i_inode.i_mode))
79 return -EACCES;
80
81 return 0;
82} 39}
83 40
84static int acl_get(struct gfs2_inode *ip, const char *name, 41static struct posix_acl *gfs2_acl_get(struct gfs2_inode *ip, int type)
85 struct posix_acl **acl, struct gfs2_ea_location *el,
86 char **datap, unsigned int *lenp)
87{ 42{
43 struct posix_acl *acl;
44 const char *name;
88 char *data; 45 char *data;
89 unsigned int len; 46 int len;
90 int error;
91
92 el->el_bh = NULL;
93 47
94 if (!ip->i_eattr) 48 if (!ip->i_eattr)
95 return 0; 49 return NULL;
96
97 error = gfs2_ea_find(ip, GFS2_EATYPE_SYS, name, el);
98 if (error)
99 return error;
100 if (!el->el_ea)
101 return 0;
102 if (!GFS2_EA_DATA_LEN(el->el_ea))
103 goto out;
104 50
105 len = GFS2_EA_DATA_LEN(el->el_ea); 51 acl = get_cached_acl(&ip->i_inode, type);
106 data = kmalloc(len, GFP_NOFS); 52 if (acl != ACL_NOT_CACHED)
107 error = -ENOMEM; 53 return acl;
108 if (!data)
109 goto out;
110 54
111 error = gfs2_ea_get_copy(ip, el, data, len); 55 name = gfs2_acl_name(type);
112 if (error < 0) 56 if (name == NULL)
113 goto out_kfree; 57 return ERR_PTR(-EINVAL);
114 error = 0;
115 58
116 if (acl) { 59 len = gfs2_xattr_acl_get(ip, name, &data);
117 *acl = posix_acl_from_xattr(data, len); 60 if (len < 0)
118 if (IS_ERR(*acl)) 61 return ERR_PTR(len);
119 error = PTR_ERR(*acl); 62 if (len == 0)
120 } 63 return NULL;
121 64
122out_kfree: 65 acl = posix_acl_from_xattr(data, len);
123 if (error || !datap) { 66 kfree(data);
124 kfree(data); 67 return acl;
125 } else {
126 *datap = data;
127 *lenp = len;
128 }
129out:
130 return error;
131} 68}
132 69
133/** 70/**
@@ -140,14 +77,12 @@ out:
140 77
141int gfs2_check_acl(struct inode *inode, int mask) 78int gfs2_check_acl(struct inode *inode, int mask)
142{ 79{
143 struct gfs2_ea_location el; 80 struct posix_acl *acl;
144 struct posix_acl *acl = NULL;
145 int error; 81 int error;
146 82
147 error = acl_get(GFS2_I(inode), GFS2_POSIX_ACL_ACCESS, &acl, &el, NULL, NULL); 83 acl = gfs2_acl_get(GFS2_I(inode), ACL_TYPE_ACCESS);
148 brelse(el.el_bh); 84 if (IS_ERR(acl))
149 if (error) 85 return PTR_ERR(acl);
150 return error;
151 86
152 if (acl) { 87 if (acl) {
153 error = posix_acl_permission(inode, acl, mask); 88 error = posix_acl_permission(inode, acl, mask);
@@ -158,57 +93,75 @@ int gfs2_check_acl(struct inode *inode, int mask)
158 return -EAGAIN; 93 return -EAGAIN;
159} 94}
160 95
161static int munge_mode(struct gfs2_inode *ip, mode_t mode) 96static int gfs2_set_mode(struct inode *inode, mode_t mode)
162{ 97{
163 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode); 98 int error = 0;
164 struct buffer_head *dibh;
165 int error;
166 99
167 error = gfs2_trans_begin(sdp, RES_DINODE, 0); 100 if (mode != inode->i_mode) {
168 if (error) 101 struct iattr iattr;
169 return error;
170 102
171 error = gfs2_meta_inode_buffer(ip, &dibh); 103 iattr.ia_valid = ATTR_MODE;
172 if (!error) { 104 iattr.ia_mode = mode;
173 gfs2_assert_withdraw(sdp, 105
174 (ip->i_inode.i_mode & S_IFMT) == (mode & S_IFMT)); 106 error = gfs2_setattr_simple(GFS2_I(inode), &iattr);
175 ip->i_inode.i_mode = mode;
176 gfs2_trans_add_bh(ip->i_gl, dibh, 1);
177 gfs2_dinode_out(ip, dibh->b_data);
178 brelse(dibh);
179 } 107 }
180 108
181 gfs2_trans_end(sdp); 109 return error;
110}
111
112static int gfs2_acl_set(struct inode *inode, int type, struct posix_acl *acl)
113{
114 int error;
115 int len;
116 char *data;
117 const char *name = gfs2_acl_name(type);
182 118
183 return 0; 119 BUG_ON(name == NULL);
120 len = posix_acl_to_xattr(acl, NULL, 0);
121 if (len == 0)
122 return 0;
123 data = kmalloc(len, GFP_NOFS);
124 if (data == NULL)
125 return -ENOMEM;
126 error = posix_acl_to_xattr(acl, data, len);
127 if (error < 0)
128 goto out;
129 error = gfs2_xattr_set(inode, GFS2_EATYPE_SYS, name, data, len, 0);
130 if (!error)
131 set_cached_acl(inode, type, acl);
132out:
133 kfree(data);
134 return error;
184} 135}
185 136
186int gfs2_acl_create(struct gfs2_inode *dip, struct gfs2_inode *ip) 137int gfs2_acl_create(struct gfs2_inode *dip, struct inode *inode)
187{ 138{
188 struct gfs2_ea_location el;
189 struct gfs2_sbd *sdp = GFS2_SB(&dip->i_inode); 139 struct gfs2_sbd *sdp = GFS2_SB(&dip->i_inode);
190 struct posix_acl *acl = NULL, *clone; 140 struct posix_acl *acl, *clone;
191 mode_t mode = ip->i_inode.i_mode; 141 mode_t mode = inode->i_mode;
192 char *data = NULL; 142 int error = 0;
193 unsigned int len;
194 int error;
195 143
196 if (!sdp->sd_args.ar_posix_acl) 144 if (!sdp->sd_args.ar_posix_acl)
197 return 0; 145 return 0;
198 if (S_ISLNK(ip->i_inode.i_mode)) 146 if (S_ISLNK(inode->i_mode))
199 return 0; 147 return 0;
200 148
201 error = acl_get(dip, GFS2_POSIX_ACL_DEFAULT, &acl, &el, &data, &len); 149 acl = gfs2_acl_get(dip, ACL_TYPE_DEFAULT);
202 brelse(el.el_bh); 150 if (IS_ERR(acl))
203 if (error) 151 return PTR_ERR(acl);
204 return error;
205 if (!acl) { 152 if (!acl) {
206 mode &= ~current_umask(); 153 mode &= ~current_umask();
207 if (mode != ip->i_inode.i_mode) 154 if (mode != inode->i_mode)
208 error = munge_mode(ip, mode); 155 error = gfs2_set_mode(inode, mode);
209 return error; 156 return error;
210 } 157 }
211 158
159 if (S_ISDIR(inode->i_mode)) {
160 error = gfs2_acl_set(inode, ACL_TYPE_DEFAULT, acl);
161 if (error)
162 goto out;
163 }
164
212 clone = posix_acl_clone(acl, GFP_NOFS); 165 clone = posix_acl_clone(acl, GFP_NOFS);
213 error = -ENOMEM; 166 error = -ENOMEM;
214 if (!clone) 167 if (!clone)
@@ -216,43 +169,32 @@ int gfs2_acl_create(struct gfs2_inode *dip, struct gfs2_inode *ip)
216 posix_acl_release(acl); 169 posix_acl_release(acl);
217 acl = clone; 170 acl = clone;
218 171
219 if (S_ISDIR(ip->i_inode.i_mode)) {
220 error = gfs2_xattr_set(&ip->i_inode, GFS2_EATYPE_SYS,
221 GFS2_POSIX_ACL_DEFAULT, data, len, 0);
222 if (error)
223 goto out;
224 }
225
226 error = posix_acl_create_masq(acl, &mode); 172 error = posix_acl_create_masq(acl, &mode);
227 if (error < 0) 173 if (error < 0)
228 goto out; 174 goto out;
229 if (error == 0) 175 if (error == 0)
230 goto munge; 176 goto munge;
231 177
232 posix_acl_to_xattr(acl, data, len); 178 error = gfs2_acl_set(inode, ACL_TYPE_ACCESS, acl);
233 error = gfs2_xattr_set(&ip->i_inode, GFS2_EATYPE_SYS,
234 GFS2_POSIX_ACL_ACCESS, data, len, 0);
235 if (error) 179 if (error)
236 goto out; 180 goto out;
237munge: 181munge:
238 error = munge_mode(ip, mode); 182 error = gfs2_set_mode(inode, mode);
239out: 183out:
240 posix_acl_release(acl); 184 posix_acl_release(acl);
241 kfree(data);
242 return error; 185 return error;
243} 186}
244 187
245int gfs2_acl_chmod(struct gfs2_inode *ip, struct iattr *attr) 188int gfs2_acl_chmod(struct gfs2_inode *ip, struct iattr *attr)
246{ 189{
247 struct posix_acl *acl = NULL, *clone; 190 struct posix_acl *acl, *clone;
248 struct gfs2_ea_location el;
249 char *data; 191 char *data;
250 unsigned int len; 192 unsigned int len;
251 int error; 193 int error;
252 194
253 error = acl_get(ip, GFS2_POSIX_ACL_ACCESS, &acl, &el, &data, &len); 195 acl = gfs2_acl_get(ip, ACL_TYPE_ACCESS);
254 if (error) 196 if (IS_ERR(acl))
255 goto out_brelse; 197 return PTR_ERR(acl);
256 if (!acl) 198 if (!acl)
257 return gfs2_setattr_simple(ip, attr); 199 return gfs2_setattr_simple(ip, attr);
258 200
@@ -265,15 +207,134 @@ int gfs2_acl_chmod(struct gfs2_inode *ip, struct iattr *attr)
265 207
266 error = posix_acl_chmod_masq(acl, attr->ia_mode); 208 error = posix_acl_chmod_masq(acl, attr->ia_mode);
267 if (!error) { 209 if (!error) {
210 len = posix_acl_to_xattr(acl, NULL, 0);
211 data = kmalloc(len, GFP_NOFS);
212 error = -ENOMEM;
213 if (data == NULL)
214 goto out;
268 posix_acl_to_xattr(acl, data, len); 215 posix_acl_to_xattr(acl, data, len);
269 error = gfs2_ea_acl_chmod(ip, &el, attr, data); 216 error = gfs2_xattr_acl_chmod(ip, attr, data);
217 kfree(data);
218 set_cached_acl(&ip->i_inode, ACL_TYPE_ACCESS, acl);
270 } 219 }
271 220
272out: 221out:
273 posix_acl_release(acl); 222 posix_acl_release(acl);
274 kfree(data);
275out_brelse:
276 brelse(el.el_bh);
277 return error; 223 return error;
278} 224}
279 225
226static int gfs2_acl_type(const char *name)
227{
228 if (strcmp(name, GFS2_POSIX_ACL_ACCESS) == 0)
229 return ACL_TYPE_ACCESS;
230 if (strcmp(name, GFS2_POSIX_ACL_DEFAULT) == 0)
231 return ACL_TYPE_DEFAULT;
232 return -EINVAL;
233}
234
235static int gfs2_xattr_system_get(struct inode *inode, const char *name,
236 void *buffer, size_t size)
237{
238 struct posix_acl *acl;
239 int type;
240 int error;
241
242 type = gfs2_acl_type(name);
243 if (type < 0)
244 return type;
245
246 acl = gfs2_acl_get(GFS2_I(inode), type);
247 if (IS_ERR(acl))
248 return PTR_ERR(acl);
249 if (acl == NULL)
250 return -ENODATA;
251
252 error = posix_acl_to_xattr(acl, buffer, size);
253 posix_acl_release(acl);
254
255 return error;
256}
257
258static int gfs2_xattr_system_set(struct inode *inode, const char *name,
259 const void *value, size_t size, int flags)
260{
261 struct gfs2_sbd *sdp = GFS2_SB(inode);
262 struct posix_acl *acl = NULL;
263 int error = 0, type;
264
265 if (!sdp->sd_args.ar_posix_acl)
266 return -EOPNOTSUPP;
267
268 type = gfs2_acl_type(name);
269 if (type < 0)
270 return type;
271 if (flags & XATTR_CREATE)
272 return -EINVAL;
273 if (type == ACL_TYPE_DEFAULT && !S_ISDIR(inode->i_mode))
274 return value ? -EACCES : 0;
275 if ((current_fsuid() != inode->i_uid) && !capable(CAP_FOWNER))
276 return -EPERM;
277 if (S_ISLNK(inode->i_mode))
278 return -EOPNOTSUPP;
279
280 if (!value)
281 goto set_acl;
282
283 acl = posix_acl_from_xattr(value, size);
284 if (!acl) {
285 /*
286 * acl_set_file(3) may request that we set default ACLs with
287 * zero length -- defend (gracefully) against that here.
288 */
289 goto out;
290 }
291 if (IS_ERR(acl)) {
292 error = PTR_ERR(acl);
293 goto out;
294 }
295
296 error = posix_acl_valid(acl);
297 if (error)
298 goto out_release;
299
300 error = -EINVAL;
301 if (acl->a_count > GFS2_ACL_MAX_ENTRIES)
302 goto out_release;
303
304 if (type == ACL_TYPE_ACCESS) {
305 mode_t mode = inode->i_mode;
306 error = posix_acl_equiv_mode(acl, &mode);
307
308 if (error <= 0) {
309 posix_acl_release(acl);
310 acl = NULL;
311
312 if (error < 0)
313 return error;
314 }
315
316 error = gfs2_set_mode(inode, mode);
317 if (error)
318 goto out_release;
319 }
320
321set_acl:
322 error = gfs2_xattr_set(inode, GFS2_EATYPE_SYS, name, value, size, 0);
323 if (!error) {
324 if (acl)
325 set_cached_acl(inode, type, acl);
326 else
327 forget_cached_acl(inode, type);
328 }
329out_release:
330 posix_acl_release(acl);
331out:
332 return error;
333}
334
335struct xattr_handler gfs2_xattr_system_handler = {
336 .prefix = XATTR_SYSTEM_PREFIX,
337 .get = gfs2_xattr_system_get,
338 .set = gfs2_xattr_system_set,
339};
340
diff --git a/fs/gfs2/acl.h b/fs/gfs2/acl.h
index 6751930bfb64..9306a2e6620c 100644
--- a/fs/gfs2/acl.h
+++ b/fs/gfs2/acl.h
@@ -13,26 +13,12 @@
13#include "incore.h" 13#include "incore.h"
14 14
15#define GFS2_POSIX_ACL_ACCESS "posix_acl_access" 15#define GFS2_POSIX_ACL_ACCESS "posix_acl_access"
16#define GFS2_POSIX_ACL_ACCESS_LEN 16
17#define GFS2_POSIX_ACL_DEFAULT "posix_acl_default" 16#define GFS2_POSIX_ACL_DEFAULT "posix_acl_default"
18#define GFS2_POSIX_ACL_DEFAULT_LEN 17 17#define GFS2_ACL_MAX_ENTRIES 25
19 18
20#define GFS2_ACL_IS_ACCESS(name, len) \ 19extern int gfs2_check_acl(struct inode *inode, int mask);
21 ((len) == GFS2_POSIX_ACL_ACCESS_LEN && \ 20extern int gfs2_acl_create(struct gfs2_inode *dip, struct inode *inode);
22 !memcmp(GFS2_POSIX_ACL_ACCESS, (name), (len))) 21extern int gfs2_acl_chmod(struct gfs2_inode *ip, struct iattr *attr);
23 22extern struct xattr_handler gfs2_xattr_system_handler;
24#define GFS2_ACL_IS_DEFAULT(name, len) \
25 ((len) == GFS2_POSIX_ACL_DEFAULT_LEN && \
26 !memcmp(GFS2_POSIX_ACL_DEFAULT, (name), (len)))
27
28struct gfs2_ea_request;
29
30int gfs2_acl_validate_set(struct gfs2_inode *ip, int access,
31 struct gfs2_ea_request *er,
32 int *remove, mode_t *mode);
33int gfs2_acl_validate_remove(struct gfs2_inode *ip, int access);
34int gfs2_check_acl(struct inode *inode, int mask);
35int gfs2_acl_create(struct gfs2_inode *dip, struct gfs2_inode *ip);
36int gfs2_acl_chmod(struct gfs2_inode *ip, struct iattr *attr);
37 23
38#endif /* __ACL_DOT_H__ */ 24#endif /* __ACL_DOT_H__ */
diff --git a/fs/gfs2/aops.c b/fs/gfs2/aops.c
index 694b5d48f036..7b8da9415267 100644
--- a/fs/gfs2/aops.c
+++ b/fs/gfs2/aops.c
@@ -269,7 +269,6 @@ static int gfs2_write_jdata_pagevec(struct address_space *mapping,
269 pgoff_t end_index = i_size >> PAGE_CACHE_SHIFT; 269 pgoff_t end_index = i_size >> PAGE_CACHE_SHIFT;
270 unsigned offset = i_size & (PAGE_CACHE_SIZE-1); 270 unsigned offset = i_size & (PAGE_CACHE_SIZE-1);
271 unsigned nrblocks = nr_pages * (PAGE_CACHE_SIZE/inode->i_sb->s_blocksize); 271 unsigned nrblocks = nr_pages * (PAGE_CACHE_SIZE/inode->i_sb->s_blocksize);
272 struct backing_dev_info *bdi = mapping->backing_dev_info;
273 int i; 272 int i;
274 int ret; 273 int ret;
275 274
@@ -313,11 +312,6 @@ static int gfs2_write_jdata_pagevec(struct address_space *mapping,
313 312
314 if (ret || (--(wbc->nr_to_write) <= 0)) 313 if (ret || (--(wbc->nr_to_write) <= 0))
315 ret = 1; 314 ret = 1;
316 if (wbc->nonblocking && bdi_write_congested(bdi)) {
317 wbc->encountered_congestion = 1;
318 ret = 1;
319 }
320
321 } 315 }
322 gfs2_trans_end(sdp); 316 gfs2_trans_end(sdp);
323 return ret; 317 return ret;
@@ -338,7 +332,6 @@ static int gfs2_write_jdata_pagevec(struct address_space *mapping,
338static int gfs2_write_cache_jdata(struct address_space *mapping, 332static int gfs2_write_cache_jdata(struct address_space *mapping,
339 struct writeback_control *wbc) 333 struct writeback_control *wbc)
340{ 334{
341 struct backing_dev_info *bdi = mapping->backing_dev_info;
342 int ret = 0; 335 int ret = 0;
343 int done = 0; 336 int done = 0;
344 struct pagevec pvec; 337 struct pagevec pvec;
@@ -348,11 +341,6 @@ static int gfs2_write_cache_jdata(struct address_space *mapping,
348 int scanned = 0; 341 int scanned = 0;
349 int range_whole = 0; 342 int range_whole = 0;
350 343
351 if (wbc->nonblocking && bdi_write_congested(bdi)) {
352 wbc->encountered_congestion = 1;
353 return 0;
354 }
355
356 pagevec_init(&pvec, 0); 344 pagevec_init(&pvec, 0);
357 if (wbc->range_cyclic) { 345 if (wbc->range_cyclic) {
358 index = mapping->writeback_index; /* Start from prev offset */ 346 index = mapping->writeback_index; /* Start from prev offset */
@@ -819,8 +807,10 @@ static int gfs2_stuffed_write_end(struct inode *inode, struct buffer_head *dibh,
819 mark_inode_dirty(inode); 807 mark_inode_dirty(inode);
820 } 808 }
821 809
822 if (inode == sdp->sd_rindex) 810 if (inode == sdp->sd_rindex) {
823 adjust_fs_space(inode); 811 adjust_fs_space(inode);
812 ip->i_gh.gh_flags |= GL_NOCACHE;
813 }
824 814
825 brelse(dibh); 815 brelse(dibh);
826 gfs2_trans_end(sdp); 816 gfs2_trans_end(sdp);
@@ -889,8 +879,10 @@ static int gfs2_write_end(struct file *file, struct address_space *mapping,
889 mark_inode_dirty(inode); 879 mark_inode_dirty(inode);
890 } 880 }
891 881
892 if (inode == sdp->sd_rindex) 882 if (inode == sdp->sd_rindex) {
893 adjust_fs_space(inode); 883 adjust_fs_space(inode);
884 ip->i_gh.gh_flags |= GL_NOCACHE;
885 }
894 886
895 brelse(dibh); 887 brelse(dibh);
896 gfs2_trans_end(sdp); 888 gfs2_trans_end(sdp);
diff --git a/fs/gfs2/dir.c b/fs/gfs2/dir.c
index 297d7e5cebad..25fddc100f18 100644
--- a/fs/gfs2/dir.c
+++ b/fs/gfs2/dir.c
@@ -525,38 +525,6 @@ consist_inode:
525 return ERR_PTR(-EIO); 525 return ERR_PTR(-EIO);
526} 526}
527 527
528
529/**
530 * dirent_first - Return the first dirent
531 * @dip: the directory
532 * @bh: The buffer
533 * @dent: Pointer to list of dirents
534 *
535 * return first dirent whether bh points to leaf or stuffed dinode
536 *
537 * Returns: IS_LEAF, IS_DINODE, or -errno
538 */
539
540static int dirent_first(struct gfs2_inode *dip, struct buffer_head *bh,
541 struct gfs2_dirent **dent)
542{
543 struct gfs2_meta_header *h = (struct gfs2_meta_header *)bh->b_data;
544
545 if (be32_to_cpu(h->mh_type) == GFS2_METATYPE_LF) {
546 if (gfs2_meta_check(GFS2_SB(&dip->i_inode), bh))
547 return -EIO;
548 *dent = (struct gfs2_dirent *)(bh->b_data +
549 sizeof(struct gfs2_leaf));
550 return IS_LEAF;
551 } else {
552 if (gfs2_metatype_check(GFS2_SB(&dip->i_inode), bh, GFS2_METATYPE_DI))
553 return -EIO;
554 *dent = (struct gfs2_dirent *)(bh->b_data +
555 sizeof(struct gfs2_dinode));
556 return IS_DINODE;
557 }
558}
559
560static int dirent_check_reclen(struct gfs2_inode *dip, 528static int dirent_check_reclen(struct gfs2_inode *dip,
561 const struct gfs2_dirent *d, const void *end_p) 529 const struct gfs2_dirent *d, const void *end_p)
562{ 530{
@@ -1006,7 +974,7 @@ static int dir_split_leaf(struct inode *inode, const struct qstr *name)
1006 divider = (start + half_len) << (32 - dip->i_depth); 974 divider = (start + half_len) << (32 - dip->i_depth);
1007 975
1008 /* Copy the entries */ 976 /* Copy the entries */
1009 dirent_first(dip, obh, &dent); 977 dent = (struct gfs2_dirent *)(obh->b_data + sizeof(struct gfs2_leaf));
1010 978
1011 do { 979 do {
1012 next = dent; 980 next = dent;
diff --git a/fs/gfs2/glock.c b/fs/gfs2/glock.c
index 8b674b1f3a55..f455a03a09e2 100644
--- a/fs/gfs2/glock.c
+++ b/fs/gfs2/glock.c
@@ -241,15 +241,14 @@ int gfs2_glock_put(struct gfs2_glock *gl)
241 int rv = 0; 241 int rv = 0;
242 242
243 write_lock(gl_lock_addr(gl->gl_hash)); 243 write_lock(gl_lock_addr(gl->gl_hash));
244 if (atomic_dec_and_test(&gl->gl_ref)) { 244 if (atomic_dec_and_lock(&gl->gl_ref, &lru_lock)) {
245 hlist_del(&gl->gl_list); 245 hlist_del(&gl->gl_list);
246 write_unlock(gl_lock_addr(gl->gl_hash));
247 spin_lock(&lru_lock);
248 if (!list_empty(&gl->gl_lru)) { 246 if (!list_empty(&gl->gl_lru)) {
249 list_del_init(&gl->gl_lru); 247 list_del_init(&gl->gl_lru);
250 atomic_dec(&lru_count); 248 atomic_dec(&lru_count);
251 } 249 }
252 spin_unlock(&lru_lock); 250 spin_unlock(&lru_lock);
251 write_unlock(gl_lock_addr(gl->gl_hash));
253 GLOCK_BUG_ON(gl, !list_empty(&gl->gl_holders)); 252 GLOCK_BUG_ON(gl, !list_empty(&gl->gl_holders));
254 glock_free(gl); 253 glock_free(gl);
255 rv = 1; 254 rv = 1;
@@ -513,7 +512,6 @@ retry:
513 GLOCK_BUG_ON(gl, 1); 512 GLOCK_BUG_ON(gl, 1);
514 } 513 }
515 spin_unlock(&gl->gl_spin); 514 spin_unlock(&gl->gl_spin);
516 gfs2_glock_put(gl);
517 return; 515 return;
518 } 516 }
519 517
@@ -524,8 +522,6 @@ retry:
524 if (glops->go_xmote_bh) { 522 if (glops->go_xmote_bh) {
525 spin_unlock(&gl->gl_spin); 523 spin_unlock(&gl->gl_spin);
526 rv = glops->go_xmote_bh(gl, gh); 524 rv = glops->go_xmote_bh(gl, gh);
527 if (rv == -EAGAIN)
528 return;
529 spin_lock(&gl->gl_spin); 525 spin_lock(&gl->gl_spin);
530 if (rv) { 526 if (rv) {
531 do_error(gl, rv); 527 do_error(gl, rv);
@@ -540,7 +536,6 @@ out:
540 clear_bit(GLF_LOCK, &gl->gl_flags); 536 clear_bit(GLF_LOCK, &gl->gl_flags);
541out_locked: 537out_locked:
542 spin_unlock(&gl->gl_spin); 538 spin_unlock(&gl->gl_spin);
543 gfs2_glock_put(gl);
544} 539}
545 540
546static unsigned int gfs2_lm_lock(struct gfs2_sbd *sdp, void *lock, 541static unsigned int gfs2_lm_lock(struct gfs2_sbd *sdp, void *lock,
@@ -600,7 +595,6 @@ __acquires(&gl->gl_spin)
600 595
601 if (!(ret & LM_OUT_ASYNC)) { 596 if (!(ret & LM_OUT_ASYNC)) {
602 finish_xmote(gl, ret); 597 finish_xmote(gl, ret);
603 gfs2_glock_hold(gl);
604 if (queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0) 598 if (queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0)
605 gfs2_glock_put(gl); 599 gfs2_glock_put(gl);
606 } else { 600 } else {
@@ -672,12 +666,17 @@ out:
672 return; 666 return;
673 667
674out_sched: 668out_sched:
669 clear_bit(GLF_LOCK, &gl->gl_flags);
670 smp_mb__after_clear_bit();
675 gfs2_glock_hold(gl); 671 gfs2_glock_hold(gl);
676 if (queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0) 672 if (queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0)
677 gfs2_glock_put_nolock(gl); 673 gfs2_glock_put_nolock(gl);
674 return;
675
678out_unlock: 676out_unlock:
679 clear_bit(GLF_LOCK, &gl->gl_flags); 677 clear_bit(GLF_LOCK, &gl->gl_flags);
680 goto out; 678 smp_mb__after_clear_bit();
679 return;
681} 680}
682 681
683static void delete_work_func(struct work_struct *work) 682static void delete_work_func(struct work_struct *work)
@@ -707,9 +706,12 @@ static void glock_work_func(struct work_struct *work)
707{ 706{
708 unsigned long delay = 0; 707 unsigned long delay = 0;
709 struct gfs2_glock *gl = container_of(work, struct gfs2_glock, gl_work.work); 708 struct gfs2_glock *gl = container_of(work, struct gfs2_glock, gl_work.work);
709 int drop_ref = 0;
710 710
711 if (test_and_clear_bit(GLF_REPLY_PENDING, &gl->gl_flags)) 711 if (test_and_clear_bit(GLF_REPLY_PENDING, &gl->gl_flags)) {
712 finish_xmote(gl, gl->gl_reply); 712 finish_xmote(gl, gl->gl_reply);
713 drop_ref = 1;
714 }
713 down_read(&gfs2_umount_flush_sem); 715 down_read(&gfs2_umount_flush_sem);
714 spin_lock(&gl->gl_spin); 716 spin_lock(&gl->gl_spin);
715 if (test_and_clear_bit(GLF_PENDING_DEMOTE, &gl->gl_flags) && 717 if (test_and_clear_bit(GLF_PENDING_DEMOTE, &gl->gl_flags) &&
@@ -727,6 +729,8 @@ static void glock_work_func(struct work_struct *work)
727 if (!delay || 729 if (!delay ||
728 queue_delayed_work(glock_workqueue, &gl->gl_work, delay) == 0) 730 queue_delayed_work(glock_workqueue, &gl->gl_work, delay) == 0)
729 gfs2_glock_put(gl); 731 gfs2_glock_put(gl);
732 if (drop_ref)
733 gfs2_glock_put(gl);
730} 734}
731 735
732/** 736/**
@@ -1361,10 +1365,6 @@ static int gfs2_shrink_glock_memory(int nr, gfp_t gfp_mask)
1361 list_del_init(&gl->gl_lru); 1365 list_del_init(&gl->gl_lru);
1362 atomic_dec(&lru_count); 1366 atomic_dec(&lru_count);
1363 1367
1364 /* Check if glock is about to be freed */
1365 if (atomic_read(&gl->gl_ref) == 0)
1366 continue;
1367
1368 /* Test for being demotable */ 1368 /* Test for being demotable */
1369 if (!test_and_set_bit(GLF_LOCK, &gl->gl_flags)) { 1369 if (!test_and_set_bit(GLF_LOCK, &gl->gl_flags)) {
1370 gfs2_glock_hold(gl); 1370 gfs2_glock_hold(gl);
@@ -1375,10 +1375,11 @@ static int gfs2_shrink_glock_memory(int nr, gfp_t gfp_mask)
1375 handle_callback(gl, LM_ST_UNLOCKED, 0); 1375 handle_callback(gl, LM_ST_UNLOCKED, 0);
1376 nr--; 1376 nr--;
1377 } 1377 }
1378 clear_bit(GLF_LOCK, &gl->gl_flags);
1379 smp_mb__after_clear_bit();
1378 if (queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0) 1380 if (queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0)
1379 gfs2_glock_put_nolock(gl); 1381 gfs2_glock_put_nolock(gl);
1380 spin_unlock(&gl->gl_spin); 1382 spin_unlock(&gl->gl_spin);
1381 clear_bit(GLF_LOCK, &gl->gl_flags);
1382 spin_lock(&lru_lock); 1383 spin_lock(&lru_lock);
1383 continue; 1384 continue;
1384 } 1385 }
diff --git a/fs/gfs2/glock.h b/fs/gfs2/glock.h
index c609894ec0d0..13f0bd228132 100644
--- a/fs/gfs2/glock.h
+++ b/fs/gfs2/glock.h
@@ -180,15 +180,6 @@ static inline int gfs2_glock_is_held_shrd(struct gfs2_glock *gl)
180 return gl->gl_state == LM_ST_SHARED; 180 return gl->gl_state == LM_ST_SHARED;
181} 181}
182 182
183static inline int gfs2_glock_is_blocking(struct gfs2_glock *gl)
184{
185 int ret;
186 spin_lock(&gl->gl_spin);
187 ret = test_bit(GLF_DEMOTE, &gl->gl_flags);
188 spin_unlock(&gl->gl_spin);
189 return ret;
190}
191
192int gfs2_glock_get(struct gfs2_sbd *sdp, 183int gfs2_glock_get(struct gfs2_sbd *sdp,
193 u64 number, const struct gfs2_glock_operations *glops, 184 u64 number, const struct gfs2_glock_operations *glops,
194 int create, struct gfs2_glock **glp); 185 int create, struct gfs2_glock **glp);
diff --git a/fs/gfs2/glops.c b/fs/gfs2/glops.c
index 6985eef06c39..78554acc0605 100644
--- a/fs/gfs2/glops.c
+++ b/fs/gfs2/glops.c
@@ -13,6 +13,7 @@
13#include <linux/buffer_head.h> 13#include <linux/buffer_head.h>
14#include <linux/gfs2_ondisk.h> 14#include <linux/gfs2_ondisk.h>
15#include <linux/bio.h> 15#include <linux/bio.h>
16#include <linux/posix_acl.h>
16 17
17#include "gfs2.h" 18#include "gfs2.h"
18#include "incore.h" 19#include "incore.h"
@@ -184,8 +185,10 @@ static void inode_go_inval(struct gfs2_glock *gl, int flags)
184 if (flags & DIO_METADATA) { 185 if (flags & DIO_METADATA) {
185 struct address_space *mapping = gl->gl_aspace->i_mapping; 186 struct address_space *mapping = gl->gl_aspace->i_mapping;
186 truncate_inode_pages(mapping, 0); 187 truncate_inode_pages(mapping, 0);
187 if (ip) 188 if (ip) {
188 set_bit(GIF_INVALID, &ip->i_flags); 189 set_bit(GIF_INVALID, &ip->i_flags);
190 forget_all_cached_acls(&ip->i_inode);
191 }
189 } 192 }
190 193
191 if (ip == GFS2_I(gl->gl_sbd->sd_rindex)) 194 if (ip == GFS2_I(gl->gl_sbd->sd_rindex))
diff --git a/fs/gfs2/incore.h b/fs/gfs2/incore.h
index 6edb423f90b3..4792200978c8 100644
--- a/fs/gfs2/incore.h
+++ b/fs/gfs2/incore.h
@@ -429,7 +429,11 @@ struct gfs2_args {
429 unsigned int ar_meta:1; /* mount metafs */ 429 unsigned int ar_meta:1; /* mount metafs */
430 unsigned int ar_discard:1; /* discard requests */ 430 unsigned int ar_discard:1; /* discard requests */
431 unsigned int ar_errors:2; /* errors=withdraw | panic */ 431 unsigned int ar_errors:2; /* errors=withdraw | panic */
432 unsigned int ar_nobarrier:1; /* do not send barriers */
432 int ar_commit; /* Commit interval */ 433 int ar_commit; /* Commit interval */
434 int ar_statfs_quantum; /* The fast statfs interval */
435 int ar_quota_quantum; /* The quota interval */
436 int ar_statfs_percent; /* The % change to force sync */
433}; 437};
434 438
435struct gfs2_tune { 439struct gfs2_tune {
@@ -558,6 +562,7 @@ struct gfs2_sbd {
558 spinlock_t sd_statfs_spin; 562 spinlock_t sd_statfs_spin;
559 struct gfs2_statfs_change_host sd_statfs_master; 563 struct gfs2_statfs_change_host sd_statfs_master;
560 struct gfs2_statfs_change_host sd_statfs_local; 564 struct gfs2_statfs_change_host sd_statfs_local;
565 int sd_statfs_force_sync;
561 566
562 /* Resource group stuff */ 567 /* Resource group stuff */
563 568
diff --git a/fs/gfs2/inode.c b/fs/gfs2/inode.c
index fb15d3b1f409..26ba2a4c4a2d 100644
--- a/fs/gfs2/inode.c
+++ b/fs/gfs2/inode.c
@@ -871,7 +871,7 @@ struct inode *gfs2_createi(struct gfs2_holder *ghs, const struct qstr *name,
871 if (error) 871 if (error)
872 goto fail_gunlock2; 872 goto fail_gunlock2;
873 873
874 error = gfs2_acl_create(dip, GFS2_I(inode)); 874 error = gfs2_acl_create(dip, inode);
875 if (error) 875 if (error)
876 goto fail_gunlock2; 876 goto fail_gunlock2;
877 877
@@ -947,9 +947,7 @@ void gfs2_dinode_out(const struct gfs2_inode *ip, void *buf)
947 947
948 str->di_header.mh_magic = cpu_to_be32(GFS2_MAGIC); 948 str->di_header.mh_magic = cpu_to_be32(GFS2_MAGIC);
949 str->di_header.mh_type = cpu_to_be32(GFS2_METATYPE_DI); 949 str->di_header.mh_type = cpu_to_be32(GFS2_METATYPE_DI);
950 str->di_header.__pad0 = 0;
951 str->di_header.mh_format = cpu_to_be32(GFS2_FORMAT_DI); 950 str->di_header.mh_format = cpu_to_be32(GFS2_FORMAT_DI);
952 str->di_header.__pad1 = 0;
953 str->di_num.no_addr = cpu_to_be64(ip->i_no_addr); 951 str->di_num.no_addr = cpu_to_be64(ip->i_no_addr);
954 str->di_num.no_formal_ino = cpu_to_be64(ip->i_no_formal_ino); 952 str->di_num.no_formal_ino = cpu_to_be64(ip->i_no_formal_ino);
955 str->di_mode = cpu_to_be32(ip->i_inode.i_mode); 953 str->di_mode = cpu_to_be32(ip->i_inode.i_mode);
diff --git a/fs/gfs2/log.c b/fs/gfs2/log.c
index 13c6237c5f67..4511b08fc451 100644
--- a/fs/gfs2/log.c
+++ b/fs/gfs2/log.c
@@ -596,7 +596,9 @@ static void log_write_header(struct gfs2_sbd *sdp, u32 flags, int pull)
596 memset(lh, 0, sizeof(struct gfs2_log_header)); 596 memset(lh, 0, sizeof(struct gfs2_log_header));
597 lh->lh_header.mh_magic = cpu_to_be32(GFS2_MAGIC); 597 lh->lh_header.mh_magic = cpu_to_be32(GFS2_MAGIC);
598 lh->lh_header.mh_type = cpu_to_be32(GFS2_METATYPE_LH); 598 lh->lh_header.mh_type = cpu_to_be32(GFS2_METATYPE_LH);
599 lh->lh_header.__pad0 = cpu_to_be64(0);
599 lh->lh_header.mh_format = cpu_to_be32(GFS2_FORMAT_LH); 600 lh->lh_header.mh_format = cpu_to_be32(GFS2_FORMAT_LH);
601 lh->lh_header.mh_jid = cpu_to_be32(sdp->sd_jdesc->jd_jid);
600 lh->lh_sequence = cpu_to_be64(sdp->sd_log_sequence++); 602 lh->lh_sequence = cpu_to_be64(sdp->sd_log_sequence++);
601 lh->lh_flags = cpu_to_be32(flags); 603 lh->lh_flags = cpu_to_be32(flags);
602 lh->lh_tail = cpu_to_be32(tail); 604 lh->lh_tail = cpu_to_be32(tail);
diff --git a/fs/gfs2/lops.c b/fs/gfs2/lops.c
index 9969ff062c5b..de97632ba32f 100644
--- a/fs/gfs2/lops.c
+++ b/fs/gfs2/lops.c
@@ -132,6 +132,7 @@ static struct buffer_head *gfs2_get_log_desc(struct gfs2_sbd *sdp, u32 ld_type)
132static void buf_lo_add(struct gfs2_sbd *sdp, struct gfs2_log_element *le) 132static void buf_lo_add(struct gfs2_sbd *sdp, struct gfs2_log_element *le)
133{ 133{
134 struct gfs2_bufdata *bd = container_of(le, struct gfs2_bufdata, bd_le); 134 struct gfs2_bufdata *bd = container_of(le, struct gfs2_bufdata, bd_le);
135 struct gfs2_meta_header *mh;
135 struct gfs2_trans *tr; 136 struct gfs2_trans *tr;
136 137
137 lock_buffer(bd->bd_bh); 138 lock_buffer(bd->bd_bh);
@@ -148,6 +149,9 @@ static void buf_lo_add(struct gfs2_sbd *sdp, struct gfs2_log_element *le)
148 set_bit(GLF_DIRTY, &bd->bd_gl->gl_flags); 149 set_bit(GLF_DIRTY, &bd->bd_gl->gl_flags);
149 gfs2_meta_check(sdp, bd->bd_bh); 150 gfs2_meta_check(sdp, bd->bd_bh);
150 gfs2_pin(sdp, bd->bd_bh); 151 gfs2_pin(sdp, bd->bd_bh);
152 mh = (struct gfs2_meta_header *)bd->bd_bh->b_data;
153 mh->__pad0 = cpu_to_be64(0);
154 mh->mh_jid = cpu_to_be32(sdp->sd_jdesc->jd_jid);
151 sdp->sd_log_num_buf++; 155 sdp->sd_log_num_buf++;
152 list_add(&le->le_list, &sdp->sd_log_le_buf); 156 list_add(&le->le_list, &sdp->sd_log_le_buf);
153 tr->tr_num_buf_new++; 157 tr->tr_num_buf_new++;
diff --git a/fs/gfs2/ops_fstype.c b/fs/gfs2/ops_fstype.c
index 52fb6c048981..edfee24f3636 100644
--- a/fs/gfs2/ops_fstype.c
+++ b/fs/gfs2/ops_fstype.c
@@ -18,6 +18,7 @@
18#include <linux/mount.h> 18#include <linux/mount.h>
19#include <linux/gfs2_ondisk.h> 19#include <linux/gfs2_ondisk.h>
20#include <linux/slow-work.h> 20#include <linux/slow-work.h>
21#include <linux/quotaops.h>
21 22
22#include "gfs2.h" 23#include "gfs2.h"
23#include "incore.h" 24#include "incore.h"
@@ -62,13 +63,10 @@ static void gfs2_tune_init(struct gfs2_tune *gt)
62 gt->gt_quota_warn_period = 10; 63 gt->gt_quota_warn_period = 10;
63 gt->gt_quota_scale_num = 1; 64 gt->gt_quota_scale_num = 1;
64 gt->gt_quota_scale_den = 1; 65 gt->gt_quota_scale_den = 1;
65 gt->gt_quota_quantum = 60;
66 gt->gt_new_files_jdata = 0; 66 gt->gt_new_files_jdata = 0;
67 gt->gt_max_readahead = 1 << 18; 67 gt->gt_max_readahead = 1 << 18;
68 gt->gt_stall_secs = 600; 68 gt->gt_stall_secs = 600;
69 gt->gt_complain_secs = 10; 69 gt->gt_complain_secs = 10;
70 gt->gt_statfs_quantum = 30;
71 gt->gt_statfs_slow = 0;
72} 70}
73 71
74static struct gfs2_sbd *init_sbd(struct super_block *sb) 72static struct gfs2_sbd *init_sbd(struct super_block *sb)
@@ -1114,7 +1112,7 @@ void gfs2_online_uevent(struct gfs2_sbd *sdp)
1114 * Returns: errno 1112 * Returns: errno
1115 */ 1113 */
1116 1114
1117static int fill_super(struct super_block *sb, void *data, int silent) 1115static int fill_super(struct super_block *sb, struct gfs2_args *args, int silent)
1118{ 1116{
1119 struct gfs2_sbd *sdp; 1117 struct gfs2_sbd *sdp;
1120 struct gfs2_holder mount_gh; 1118 struct gfs2_holder mount_gh;
@@ -1125,17 +1123,7 @@ static int fill_super(struct super_block *sb, void *data, int silent)
1125 printk(KERN_WARNING "GFS2: can't alloc struct gfs2_sbd\n"); 1123 printk(KERN_WARNING "GFS2: can't alloc struct gfs2_sbd\n");
1126 return -ENOMEM; 1124 return -ENOMEM;
1127 } 1125 }
1128 1126 sdp->sd_args = *args;
1129 sdp->sd_args.ar_quota = GFS2_QUOTA_DEFAULT;
1130 sdp->sd_args.ar_data = GFS2_DATA_DEFAULT;
1131 sdp->sd_args.ar_commit = 60;
1132 sdp->sd_args.ar_errors = GFS2_ERRORS_DEFAULT;
1133
1134 error = gfs2_mount_args(sdp, &sdp->sd_args, data);
1135 if (error) {
1136 printk(KERN_WARNING "GFS2: can't parse mount arguments\n");
1137 goto fail;
1138 }
1139 1127
1140 if (sdp->sd_args.ar_spectator) { 1128 if (sdp->sd_args.ar_spectator) {
1141 sb->s_flags |= MS_RDONLY; 1129 sb->s_flags |= MS_RDONLY;
@@ -1143,11 +1131,15 @@ static int fill_super(struct super_block *sb, void *data, int silent)
1143 } 1131 }
1144 if (sdp->sd_args.ar_posix_acl) 1132 if (sdp->sd_args.ar_posix_acl)
1145 sb->s_flags |= MS_POSIXACL; 1133 sb->s_flags |= MS_POSIXACL;
1134 if (sdp->sd_args.ar_nobarrier)
1135 set_bit(SDF_NOBARRIERS, &sdp->sd_flags);
1146 1136
1147 sb->s_magic = GFS2_MAGIC; 1137 sb->s_magic = GFS2_MAGIC;
1148 sb->s_op = &gfs2_super_ops; 1138 sb->s_op = &gfs2_super_ops;
1149 sb->s_export_op = &gfs2_export_ops; 1139 sb->s_export_op = &gfs2_export_ops;
1150 sb->s_xattr = gfs2_xattr_handlers; 1140 sb->s_xattr = gfs2_xattr_handlers;
1141 sb->s_qcop = &gfs2_quotactl_ops;
1142 sb_dqopt(sb)->flags |= DQUOT_QUOTA_SYS_FILE;
1151 sb->s_time_gran = 1; 1143 sb->s_time_gran = 1;
1152 sb->s_maxbytes = MAX_LFS_FILESIZE; 1144 sb->s_maxbytes = MAX_LFS_FILESIZE;
1153 1145
@@ -1160,6 +1152,15 @@ static int fill_super(struct super_block *sb, void *data, int silent)
1160 sdp->sd_fsb2bb = 1 << sdp->sd_fsb2bb_shift; 1152 sdp->sd_fsb2bb = 1 << sdp->sd_fsb2bb_shift;
1161 1153
1162 sdp->sd_tune.gt_log_flush_secs = sdp->sd_args.ar_commit; 1154 sdp->sd_tune.gt_log_flush_secs = sdp->sd_args.ar_commit;
1155 sdp->sd_tune.gt_quota_quantum = sdp->sd_args.ar_quota_quantum;
1156 if (sdp->sd_args.ar_statfs_quantum) {
1157 sdp->sd_tune.gt_statfs_slow = 0;
1158 sdp->sd_tune.gt_statfs_quantum = sdp->sd_args.ar_statfs_quantum;
1159 }
1160 else {
1161 sdp->sd_tune.gt_statfs_slow = 1;
1162 sdp->sd_tune.gt_statfs_quantum = 30;
1163 }
1163 1164
1164 error = init_names(sdp, silent); 1165 error = init_names(sdp, silent);
1165 if (error) 1166 if (error)
@@ -1243,18 +1244,127 @@ fail:
1243 return error; 1244 return error;
1244} 1245}
1245 1246
1246static int gfs2_get_sb(struct file_system_type *fs_type, int flags, 1247static int set_gfs2_super(struct super_block *s, void *data)
1247 const char *dev_name, void *data, struct vfsmount *mnt)
1248{ 1248{
1249 return get_sb_bdev(fs_type, flags, dev_name, data, fill_super, mnt); 1249 s->s_bdev = data;
1250 s->s_dev = s->s_bdev->bd_dev;
1251
1252 /*
1253 * We set the bdi here to the queue backing, file systems can
1254 * overwrite this in ->fill_super()
1255 */
1256 s->s_bdi = &bdev_get_queue(s->s_bdev)->backing_dev_info;
1257 return 0;
1250} 1258}
1251 1259
1252static int test_meta_super(struct super_block *s, void *ptr) 1260static int test_gfs2_super(struct super_block *s, void *ptr)
1253{ 1261{
1254 struct block_device *bdev = ptr; 1262 struct block_device *bdev = ptr;
1255 return (bdev == s->s_bdev); 1263 return (bdev == s->s_bdev);
1256} 1264}
1257 1265
1266/**
1267 * gfs2_get_sb - Get the GFS2 superblock
1268 * @fs_type: The GFS2 filesystem type
1269 * @flags: Mount flags
1270 * @dev_name: The name of the device
1271 * @data: The mount arguments
1272 * @mnt: The vfsmnt for this mount
1273 *
1274 * Q. Why not use get_sb_bdev() ?
1275 * A. We need to select one of two root directories to mount, independent
1276 * of whether this is the initial, or subsequent, mount of this sb
1277 *
1278 * Returns: 0 or -ve on error
1279 */
1280
1281static int gfs2_get_sb(struct file_system_type *fs_type, int flags,
1282 const char *dev_name, void *data, struct vfsmount *mnt)
1283{
1284 struct block_device *bdev;
1285 struct super_block *s;
1286 fmode_t mode = FMODE_READ;
1287 int error;
1288 struct gfs2_args args;
1289 struct gfs2_sbd *sdp;
1290
1291 if (!(flags & MS_RDONLY))
1292 mode |= FMODE_WRITE;
1293
1294 bdev = open_bdev_exclusive(dev_name, mode, fs_type);
1295 if (IS_ERR(bdev))
1296 return PTR_ERR(bdev);
1297
1298 /*
1299 * once the super is inserted into the list by sget, s_umount
1300 * will protect the lockfs code from trying to start a snapshot
1301 * while we are mounting
1302 */
1303 mutex_lock(&bdev->bd_fsfreeze_mutex);
1304 if (bdev->bd_fsfreeze_count > 0) {
1305 mutex_unlock(&bdev->bd_fsfreeze_mutex);
1306 error = -EBUSY;
1307 goto error_bdev;
1308 }
1309 s = sget(fs_type, test_gfs2_super, set_gfs2_super, bdev);
1310 mutex_unlock(&bdev->bd_fsfreeze_mutex);
1311 error = PTR_ERR(s);
1312 if (IS_ERR(s))
1313 goto error_bdev;
1314
1315 memset(&args, 0, sizeof(args));
1316 args.ar_quota = GFS2_QUOTA_DEFAULT;
1317 args.ar_data = GFS2_DATA_DEFAULT;
1318 args.ar_commit = 60;
1319 args.ar_statfs_quantum = 30;
1320 args.ar_quota_quantum = 60;
1321 args.ar_errors = GFS2_ERRORS_DEFAULT;
1322
1323 error = gfs2_mount_args(&args, data);
1324 if (error) {
1325 printk(KERN_WARNING "GFS2: can't parse mount arguments\n");
1326 if (s->s_root)
1327 goto error_super;
1328 deactivate_locked_super(s);
1329 return error;
1330 }
1331
1332 if (s->s_root) {
1333 error = -EBUSY;
1334 if ((flags ^ s->s_flags) & MS_RDONLY)
1335 goto error_super;
1336 close_bdev_exclusive(bdev, mode);
1337 } else {
1338 char b[BDEVNAME_SIZE];
1339
1340 s->s_flags = flags;
1341 s->s_mode = mode;
1342 strlcpy(s->s_id, bdevname(bdev, b), sizeof(s->s_id));
1343 sb_set_blocksize(s, block_size(bdev));
1344 error = fill_super(s, &args, flags & MS_SILENT ? 1 : 0);
1345 if (error) {
1346 deactivate_locked_super(s);
1347 return error;
1348 }
1349 s->s_flags |= MS_ACTIVE;
1350 bdev->bd_super = s;
1351 }
1352
1353 sdp = s->s_fs_info;
1354 mnt->mnt_sb = s;
1355 if (args.ar_meta)
1356 mnt->mnt_root = dget(sdp->sd_master_dir);
1357 else
1358 mnt->mnt_root = dget(sdp->sd_root_dir);
1359 return 0;
1360
1361error_super:
1362 deactivate_locked_super(s);
1363error_bdev:
1364 close_bdev_exclusive(bdev, mode);
1365 return error;
1366}
1367
1258static int set_meta_super(struct super_block *s, void *ptr) 1368static int set_meta_super(struct super_block *s, void *ptr)
1259{ 1369{
1260 return -EINVAL; 1370 return -EINVAL;
@@ -1274,13 +1384,17 @@ static int gfs2_get_sb_meta(struct file_system_type *fs_type, int flags,
1274 dev_name, error); 1384 dev_name, error);
1275 return error; 1385 return error;
1276 } 1386 }
1277 s = sget(&gfs2_fs_type, test_meta_super, set_meta_super, 1387 s = sget(&gfs2_fs_type, test_gfs2_super, set_meta_super,
1278 path.dentry->d_inode->i_sb->s_bdev); 1388 path.dentry->d_inode->i_sb->s_bdev);
1279 path_put(&path); 1389 path_put(&path);
1280 if (IS_ERR(s)) { 1390 if (IS_ERR(s)) {
1281 printk(KERN_WARNING "GFS2: gfs2 mount does not exist\n"); 1391 printk(KERN_WARNING "GFS2: gfs2 mount does not exist\n");
1282 return PTR_ERR(s); 1392 return PTR_ERR(s);
1283 } 1393 }
1394 if ((flags ^ s->s_flags) & MS_RDONLY) {
1395 deactivate_locked_super(s);
1396 return -EBUSY;
1397 }
1284 sdp = s->s_fs_info; 1398 sdp = s->s_fs_info;
1285 mnt->mnt_sb = s; 1399 mnt->mnt_sb = s;
1286 mnt->mnt_root = dget(sdp->sd_master_dir); 1400 mnt->mnt_root = dget(sdp->sd_master_dir);
diff --git a/fs/gfs2/quota.c b/fs/gfs2/quota.c
index 2e9b9326bfc9..e3bf6eab8750 100644
--- a/fs/gfs2/quota.c
+++ b/fs/gfs2/quota.c
@@ -15,7 +15,7 @@
15 * fuzziness in the current usage value of IDs that are being used on different 15 * fuzziness in the current usage value of IDs that are being used on different
16 * nodes in the cluster simultaneously. So, it is possible for a user on 16 * nodes in the cluster simultaneously. So, it is possible for a user on
17 * multiple nodes to overrun their quota, but that overrun is controlable. 17 * multiple nodes to overrun their quota, but that overrun is controlable.
18 * Since quota tags are part of transactions, there is no need to a quota check 18 * Since quota tags are part of transactions, there is no need for a quota check
19 * program to be run on node crashes or anything like that. 19 * program to be run on node crashes or anything like that.
20 * 20 *
21 * There are couple of knobs that let the administrator manage the quota 21 * There are couple of knobs that let the administrator manage the quota
@@ -47,6 +47,8 @@
47#include <linux/gfs2_ondisk.h> 47#include <linux/gfs2_ondisk.h>
48#include <linux/kthread.h> 48#include <linux/kthread.h>
49#include <linux/freezer.h> 49#include <linux/freezer.h>
50#include <linux/quota.h>
51#include <linux/dqblk_xfs.h>
50 52
51#include "gfs2.h" 53#include "gfs2.h"
52#include "incore.h" 54#include "incore.h"
@@ -65,13 +67,6 @@
65#define QUOTA_USER 1 67#define QUOTA_USER 1
66#define QUOTA_GROUP 0 68#define QUOTA_GROUP 0
67 69
68struct gfs2_quota_host {
69 u64 qu_limit;
70 u64 qu_warn;
71 s64 qu_value;
72 u32 qu_ll_next;
73};
74
75struct gfs2_quota_change_host { 70struct gfs2_quota_change_host {
76 u64 qc_change; 71 u64 qc_change;
77 u32 qc_flags; /* GFS2_QCF_... */ 72 u32 qc_flags; /* GFS2_QCF_... */
@@ -164,7 +159,7 @@ fail:
164 return error; 159 return error;
165} 160}
166 161
167static int qd_get(struct gfs2_sbd *sdp, int user, u32 id, int create, 162static int qd_get(struct gfs2_sbd *sdp, int user, u32 id,
168 struct gfs2_quota_data **qdp) 163 struct gfs2_quota_data **qdp)
169{ 164{
170 struct gfs2_quota_data *qd = NULL, *new_qd = NULL; 165 struct gfs2_quota_data *qd = NULL, *new_qd = NULL;
@@ -202,7 +197,7 @@ static int qd_get(struct gfs2_sbd *sdp, int user, u32 id, int create,
202 197
203 spin_unlock(&qd_lru_lock); 198 spin_unlock(&qd_lru_lock);
204 199
205 if (qd || !create) { 200 if (qd) {
206 if (new_qd) { 201 if (new_qd) {
207 gfs2_glock_put(new_qd->qd_gl); 202 gfs2_glock_put(new_qd->qd_gl);
208 kmem_cache_free(gfs2_quotad_cachep, new_qd); 203 kmem_cache_free(gfs2_quotad_cachep, new_qd);
@@ -461,12 +456,12 @@ static void qd_unlock(struct gfs2_quota_data *qd)
461 qd_put(qd); 456 qd_put(qd);
462} 457}
463 458
464static int qdsb_get(struct gfs2_sbd *sdp, int user, u32 id, int create, 459static int qdsb_get(struct gfs2_sbd *sdp, int user, u32 id,
465 struct gfs2_quota_data **qdp) 460 struct gfs2_quota_data **qdp)
466{ 461{
467 int error; 462 int error;
468 463
469 error = qd_get(sdp, user, id, create, qdp); 464 error = qd_get(sdp, user, id, qdp);
470 if (error) 465 if (error)
471 return error; 466 return error;
472 467
@@ -508,20 +503,20 @@ int gfs2_quota_hold(struct gfs2_inode *ip, u32 uid, u32 gid)
508 if (sdp->sd_args.ar_quota == GFS2_QUOTA_OFF) 503 if (sdp->sd_args.ar_quota == GFS2_QUOTA_OFF)
509 return 0; 504 return 0;
510 505
511 error = qdsb_get(sdp, QUOTA_USER, ip->i_inode.i_uid, CREATE, qd); 506 error = qdsb_get(sdp, QUOTA_USER, ip->i_inode.i_uid, qd);
512 if (error) 507 if (error)
513 goto out; 508 goto out;
514 al->al_qd_num++; 509 al->al_qd_num++;
515 qd++; 510 qd++;
516 511
517 error = qdsb_get(sdp, QUOTA_GROUP, ip->i_inode.i_gid, CREATE, qd); 512 error = qdsb_get(sdp, QUOTA_GROUP, ip->i_inode.i_gid, qd);
518 if (error) 513 if (error)
519 goto out; 514 goto out;
520 al->al_qd_num++; 515 al->al_qd_num++;
521 qd++; 516 qd++;
522 517
523 if (uid != NO_QUOTA_CHANGE && uid != ip->i_inode.i_uid) { 518 if (uid != NO_QUOTA_CHANGE && uid != ip->i_inode.i_uid) {
524 error = qdsb_get(sdp, QUOTA_USER, uid, CREATE, qd); 519 error = qdsb_get(sdp, QUOTA_USER, uid, qd);
525 if (error) 520 if (error)
526 goto out; 521 goto out;
527 al->al_qd_num++; 522 al->al_qd_num++;
@@ -529,7 +524,7 @@ int gfs2_quota_hold(struct gfs2_inode *ip, u32 uid, u32 gid)
529 } 524 }
530 525
531 if (gid != NO_QUOTA_CHANGE && gid != ip->i_inode.i_gid) { 526 if (gid != NO_QUOTA_CHANGE && gid != ip->i_inode.i_gid) {
532 error = qdsb_get(sdp, QUOTA_GROUP, gid, CREATE, qd); 527 error = qdsb_get(sdp, QUOTA_GROUP, gid, qd);
533 if (error) 528 if (error)
534 goto out; 529 goto out;
535 al->al_qd_num++; 530 al->al_qd_num++;
@@ -617,48 +612,36 @@ static void do_qc(struct gfs2_quota_data *qd, s64 change)
617 mutex_unlock(&sdp->sd_quota_mutex); 612 mutex_unlock(&sdp->sd_quota_mutex);
618} 613}
619 614
620static void gfs2_quota_in(struct gfs2_quota_host *qu, const void *buf)
621{
622 const struct gfs2_quota *str = buf;
623
624 qu->qu_limit = be64_to_cpu(str->qu_limit);
625 qu->qu_warn = be64_to_cpu(str->qu_warn);
626 qu->qu_value = be64_to_cpu(str->qu_value);
627 qu->qu_ll_next = be32_to_cpu(str->qu_ll_next);
628}
629
630static void gfs2_quota_out(const struct gfs2_quota_host *qu, void *buf)
631{
632 struct gfs2_quota *str = buf;
633
634 str->qu_limit = cpu_to_be64(qu->qu_limit);
635 str->qu_warn = cpu_to_be64(qu->qu_warn);
636 str->qu_value = cpu_to_be64(qu->qu_value);
637 str->qu_ll_next = cpu_to_be32(qu->qu_ll_next);
638 memset(&str->qu_reserved, 0, sizeof(str->qu_reserved));
639}
640
641/** 615/**
642 * gfs2_adjust_quota 616 * gfs2_adjust_quota - adjust record of current block usage
617 * @ip: The quota inode
618 * @loc: Offset of the entry in the quota file
619 * @change: The amount of usage change to record
620 * @qd: The quota data
621 * @fdq: The updated limits to record
643 * 622 *
644 * This function was mostly borrowed from gfs2_block_truncate_page which was 623 * This function was mostly borrowed from gfs2_block_truncate_page which was
645 * in turn mostly borrowed from ext3 624 * in turn mostly borrowed from ext3
625 *
626 * Returns: 0 or -ve on error
646 */ 627 */
628
647static int gfs2_adjust_quota(struct gfs2_inode *ip, loff_t loc, 629static int gfs2_adjust_quota(struct gfs2_inode *ip, loff_t loc,
648 s64 change, struct gfs2_quota_data *qd) 630 s64 change, struct gfs2_quota_data *qd,
631 struct fs_disk_quota *fdq)
649{ 632{
650 struct inode *inode = &ip->i_inode; 633 struct inode *inode = &ip->i_inode;
651 struct address_space *mapping = inode->i_mapping; 634 struct address_space *mapping = inode->i_mapping;
652 unsigned long index = loc >> PAGE_CACHE_SHIFT; 635 unsigned long index = loc >> PAGE_CACHE_SHIFT;
653 unsigned offset = loc & (PAGE_CACHE_SIZE - 1); 636 unsigned offset = loc & (PAGE_CACHE_SIZE - 1);
654 unsigned blocksize, iblock, pos; 637 unsigned blocksize, iblock, pos;
655 struct buffer_head *bh; 638 struct buffer_head *bh, *dibh;
656 struct page *page; 639 struct page *page;
657 void *kaddr; 640 void *kaddr;
658 char *ptr; 641 struct gfs2_quota *qp;
659 struct gfs2_quota_host qp;
660 s64 value; 642 s64 value;
661 int err = -EIO; 643 int err = -EIO;
644 u64 size;
662 645
663 if (gfs2_is_stuffed(ip)) 646 if (gfs2_is_stuffed(ip))
664 gfs2_unstuff_dinode(ip, NULL); 647 gfs2_unstuff_dinode(ip, NULL);
@@ -700,18 +683,38 @@ static int gfs2_adjust_quota(struct gfs2_inode *ip, loff_t loc,
700 gfs2_trans_add_bh(ip->i_gl, bh, 0); 683 gfs2_trans_add_bh(ip->i_gl, bh, 0);
701 684
702 kaddr = kmap_atomic(page, KM_USER0); 685 kaddr = kmap_atomic(page, KM_USER0);
703 ptr = kaddr + offset; 686 qp = kaddr + offset;
704 gfs2_quota_in(&qp, ptr); 687 value = (s64)be64_to_cpu(qp->qu_value) + change;
705 qp.qu_value += change; 688 qp->qu_value = cpu_to_be64(value);
706 value = qp.qu_value; 689 qd->qd_qb.qb_value = qp->qu_value;
707 gfs2_quota_out(&qp, ptr); 690 if (fdq) {
691 if (fdq->d_fieldmask & FS_DQ_BSOFT) {
692 qp->qu_warn = cpu_to_be64(fdq->d_blk_softlimit);
693 qd->qd_qb.qb_warn = qp->qu_warn;
694 }
695 if (fdq->d_fieldmask & FS_DQ_BHARD) {
696 qp->qu_limit = cpu_to_be64(fdq->d_blk_hardlimit);
697 qd->qd_qb.qb_limit = qp->qu_limit;
698 }
699 }
708 flush_dcache_page(page); 700 flush_dcache_page(page);
709 kunmap_atomic(kaddr, KM_USER0); 701 kunmap_atomic(kaddr, KM_USER0);
710 err = 0; 702
711 qd->qd_qb.qb_magic = cpu_to_be32(GFS2_MAGIC); 703 err = gfs2_meta_inode_buffer(ip, &dibh);
712 qd->qd_qb.qb_value = cpu_to_be64(value); 704 if (err)
713 ((struct gfs2_quota_lvb*)(qd->qd_gl->gl_lvb))->qb_magic = cpu_to_be32(GFS2_MAGIC); 705 goto unlock;
714 ((struct gfs2_quota_lvb*)(qd->qd_gl->gl_lvb))->qb_value = cpu_to_be64(value); 706
707 size = loc + sizeof(struct gfs2_quota);
708 if (size > inode->i_size) {
709 ip->i_disksize = size;
710 i_size_write(inode, size);
711 }
712 inode->i_mtime = inode->i_atime = CURRENT_TIME;
713 gfs2_trans_add_bh(ip->i_gl, dibh, 1);
714 gfs2_dinode_out(ip, dibh->b_data);
715 brelse(dibh);
716 mark_inode_dirty(inode);
717
715unlock: 718unlock:
716 unlock_page(page); 719 unlock_page(page);
717 page_cache_release(page); 720 page_cache_release(page);
@@ -739,9 +742,9 @@ static int do_sync(unsigned int num_qd, struct gfs2_quota_data **qda)
739 return -ENOMEM; 742 return -ENOMEM;
740 743
741 sort(qda, num_qd, sizeof(struct gfs2_quota_data *), sort_qd, NULL); 744 sort(qda, num_qd, sizeof(struct gfs2_quota_data *), sort_qd, NULL);
745 mutex_lock_nested(&ip->i_inode.i_mutex, I_MUTEX_QUOTA);
742 for (qx = 0; qx < num_qd; qx++) { 746 for (qx = 0; qx < num_qd; qx++) {
743 error = gfs2_glock_nq_init(qda[qx]->qd_gl, 747 error = gfs2_glock_nq_init(qda[qx]->qd_gl, LM_ST_EXCLUSIVE,
744 LM_ST_EXCLUSIVE,
745 GL_NOCACHE, &ghs[qx]); 748 GL_NOCACHE, &ghs[qx]);
746 if (error) 749 if (error)
747 goto out; 750 goto out;
@@ -795,9 +798,7 @@ static int do_sync(unsigned int num_qd, struct gfs2_quota_data **qda)
795 for (x = 0; x < num_qd; x++) { 798 for (x = 0; x < num_qd; x++) {
796 qd = qda[x]; 799 qd = qda[x];
797 offset = qd2offset(qd); 800 offset = qd2offset(qd);
798 error = gfs2_adjust_quota(ip, offset, qd->qd_change_sync, 801 error = gfs2_adjust_quota(ip, offset, qd->qd_change_sync, qd, NULL);
799 (struct gfs2_quota_data *)
800 qd);
801 if (error) 802 if (error)
802 goto out_end_trans; 803 goto out_end_trans;
803 804
@@ -817,21 +818,44 @@ out_gunlock:
817out: 818out:
818 while (qx--) 819 while (qx--)
819 gfs2_glock_dq_uninit(&ghs[qx]); 820 gfs2_glock_dq_uninit(&ghs[qx]);
821 mutex_unlock(&ip->i_inode.i_mutex);
820 kfree(ghs); 822 kfree(ghs);
821 gfs2_log_flush(ip->i_gl->gl_sbd, ip->i_gl); 823 gfs2_log_flush(ip->i_gl->gl_sbd, ip->i_gl);
822 return error; 824 return error;
823} 825}
824 826
827static int update_qd(struct gfs2_sbd *sdp, struct gfs2_quota_data *qd)
828{
829 struct gfs2_inode *ip = GFS2_I(sdp->sd_quota_inode);
830 struct gfs2_quota q;
831 struct gfs2_quota_lvb *qlvb;
832 loff_t pos;
833 int error;
834
835 memset(&q, 0, sizeof(struct gfs2_quota));
836 pos = qd2offset(qd);
837 error = gfs2_internal_read(ip, NULL, (char *)&q, &pos, sizeof(q));
838 if (error < 0)
839 return error;
840
841 qlvb = (struct gfs2_quota_lvb *)qd->qd_gl->gl_lvb;
842 qlvb->qb_magic = cpu_to_be32(GFS2_MAGIC);
843 qlvb->__pad = 0;
844 qlvb->qb_limit = q.qu_limit;
845 qlvb->qb_warn = q.qu_warn;
846 qlvb->qb_value = q.qu_value;
847 qd->qd_qb = *qlvb;
848
849 return 0;
850}
851
825static int do_glock(struct gfs2_quota_data *qd, int force_refresh, 852static int do_glock(struct gfs2_quota_data *qd, int force_refresh,
826 struct gfs2_holder *q_gh) 853 struct gfs2_holder *q_gh)
827{ 854{
828 struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd; 855 struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd;
829 struct gfs2_inode *ip = GFS2_I(sdp->sd_quota_inode); 856 struct gfs2_inode *ip = GFS2_I(sdp->sd_quota_inode);
830 struct gfs2_holder i_gh; 857 struct gfs2_holder i_gh;
831 struct gfs2_quota_host q;
832 char buf[sizeof(struct gfs2_quota)];
833 int error; 858 int error;
834 struct gfs2_quota_lvb *qlvb;
835 859
836restart: 860restart:
837 error = gfs2_glock_nq_init(qd->qd_gl, LM_ST_SHARED, 0, q_gh); 861 error = gfs2_glock_nq_init(qd->qd_gl, LM_ST_SHARED, 0, q_gh);
@@ -841,11 +865,9 @@ restart:
841 qd->qd_qb = *(struct gfs2_quota_lvb *)qd->qd_gl->gl_lvb; 865 qd->qd_qb = *(struct gfs2_quota_lvb *)qd->qd_gl->gl_lvb;
842 866
843 if (force_refresh || qd->qd_qb.qb_magic != cpu_to_be32(GFS2_MAGIC)) { 867 if (force_refresh || qd->qd_qb.qb_magic != cpu_to_be32(GFS2_MAGIC)) {
844 loff_t pos;
845 gfs2_glock_dq_uninit(q_gh); 868 gfs2_glock_dq_uninit(q_gh);
846 error = gfs2_glock_nq_init(qd->qd_gl, 869 error = gfs2_glock_nq_init(qd->qd_gl, LM_ST_EXCLUSIVE,
847 LM_ST_EXCLUSIVE, GL_NOCACHE, 870 GL_NOCACHE, q_gh);
848 q_gh);
849 if (error) 871 if (error)
850 return error; 872 return error;
851 873
@@ -853,29 +875,14 @@ restart:
853 if (error) 875 if (error)
854 goto fail; 876 goto fail;
855 877
856 memset(buf, 0, sizeof(struct gfs2_quota)); 878 error = update_qd(sdp, qd);
857 pos = qd2offset(qd); 879 if (error)
858 error = gfs2_internal_read(ip, NULL, buf, &pos,
859 sizeof(struct gfs2_quota));
860 if (error < 0)
861 goto fail_gunlock; 880 goto fail_gunlock;
862 881
863 gfs2_glock_dq_uninit(&i_gh); 882 gfs2_glock_dq_uninit(&i_gh);
864 883 gfs2_glock_dq_uninit(q_gh);
865 gfs2_quota_in(&q, buf); 884 force_refresh = 0;
866 qlvb = (struct gfs2_quota_lvb *)qd->qd_gl->gl_lvb; 885 goto restart;
867 qlvb->qb_magic = cpu_to_be32(GFS2_MAGIC);
868 qlvb->__pad = 0;
869 qlvb->qb_limit = cpu_to_be64(q.qu_limit);
870 qlvb->qb_warn = cpu_to_be64(q.qu_warn);
871 qlvb->qb_value = cpu_to_be64(q.qu_value);
872 qd->qd_qb = *qlvb;
873
874 if (gfs2_glock_is_blocking(qd->qd_gl)) {
875 gfs2_glock_dq_uninit(q_gh);
876 force_refresh = 0;
877 goto restart;
878 }
879 } 886 }
880 887
881 return 0; 888 return 0;
@@ -995,7 +1002,7 @@ static int print_message(struct gfs2_quota_data *qd, char *type)
995{ 1002{
996 struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd; 1003 struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd;
997 1004
998 printk(KERN_INFO "GFS2: fsid=%s: quota %s for %s %u\r\n", 1005 printk(KERN_INFO "GFS2: fsid=%s: quota %s for %s %u\n",
999 sdp->sd_fsname, type, 1006 sdp->sd_fsname, type,
1000 (test_bit(QDF_USER, &qd->qd_flags)) ? "user" : "group", 1007 (test_bit(QDF_USER, &qd->qd_flags)) ? "user" : "group",
1001 qd->qd_id); 1008 qd->qd_id);
@@ -1032,6 +1039,10 @@ int gfs2_quota_check(struct gfs2_inode *ip, u32 uid, u32 gid)
1032 1039
1033 if (be64_to_cpu(qd->qd_qb.qb_limit) && (s64)be64_to_cpu(qd->qd_qb.qb_limit) < value) { 1040 if (be64_to_cpu(qd->qd_qb.qb_limit) && (s64)be64_to_cpu(qd->qd_qb.qb_limit) < value) {
1034 print_message(qd, "exceeded"); 1041 print_message(qd, "exceeded");
1042 quota_send_warning(test_bit(QDF_USER, &qd->qd_flags) ?
1043 USRQUOTA : GRPQUOTA, qd->qd_id,
1044 sdp->sd_vfs->s_dev, QUOTA_NL_BHARDWARN);
1045
1035 error = -EDQUOT; 1046 error = -EDQUOT;
1036 break; 1047 break;
1037 } else if (be64_to_cpu(qd->qd_qb.qb_warn) && 1048 } else if (be64_to_cpu(qd->qd_qb.qb_warn) &&
@@ -1039,6 +1050,9 @@ int gfs2_quota_check(struct gfs2_inode *ip, u32 uid, u32 gid)
1039 time_after_eq(jiffies, qd->qd_last_warn + 1050 time_after_eq(jiffies, qd->qd_last_warn +
1040 gfs2_tune_get(sdp, 1051 gfs2_tune_get(sdp,
1041 gt_quota_warn_period) * HZ)) { 1052 gt_quota_warn_period) * HZ)) {
1053 quota_send_warning(test_bit(QDF_USER, &qd->qd_flags) ?
1054 USRQUOTA : GRPQUOTA, qd->qd_id,
1055 sdp->sd_vfs->s_dev, QUOTA_NL_BSOFTWARN);
1042 error = print_message(qd, "warning"); 1056 error = print_message(qd, "warning");
1043 qd->qd_last_warn = jiffies; 1057 qd->qd_last_warn = jiffies;
1044 } 1058 }
@@ -1069,8 +1083,9 @@ void gfs2_quota_change(struct gfs2_inode *ip, s64 change,
1069 } 1083 }
1070} 1084}
1071 1085
1072int gfs2_quota_sync(struct gfs2_sbd *sdp) 1086int gfs2_quota_sync(struct super_block *sb, int type)
1073{ 1087{
1088 struct gfs2_sbd *sdp = sb->s_fs_info;
1074 struct gfs2_quota_data **qda; 1089 struct gfs2_quota_data **qda;
1075 unsigned int max_qd = gfs2_tune_get(sdp, gt_quota_simul_sync); 1090 unsigned int max_qd = gfs2_tune_get(sdp, gt_quota_simul_sync);
1076 unsigned int num_qd; 1091 unsigned int num_qd;
@@ -1118,7 +1133,7 @@ int gfs2_quota_refresh(struct gfs2_sbd *sdp, int user, u32 id)
1118 struct gfs2_holder q_gh; 1133 struct gfs2_holder q_gh;
1119 int error; 1134 int error;
1120 1135
1121 error = qd_get(sdp, user, id, CREATE, &qd); 1136 error = qd_get(sdp, user, id, &qd);
1122 if (error) 1137 if (error)
1123 return error; 1138 return error;
1124 1139
@@ -1127,7 +1142,6 @@ int gfs2_quota_refresh(struct gfs2_sbd *sdp, int user, u32 id)
1127 gfs2_glock_dq_uninit(&q_gh); 1142 gfs2_glock_dq_uninit(&q_gh);
1128 1143
1129 qd_put(qd); 1144 qd_put(qd);
1130
1131 return error; 1145 return error;
1132} 1146}
1133 1147
@@ -1298,12 +1312,12 @@ static void quotad_error(struct gfs2_sbd *sdp, const char *msg, int error)
1298} 1312}
1299 1313
1300static void quotad_check_timeo(struct gfs2_sbd *sdp, const char *msg, 1314static void quotad_check_timeo(struct gfs2_sbd *sdp, const char *msg,
1301 int (*fxn)(struct gfs2_sbd *sdp), 1315 int (*fxn)(struct super_block *sb, int type),
1302 unsigned long t, unsigned long *timeo, 1316 unsigned long t, unsigned long *timeo,
1303 unsigned int *new_timeo) 1317 unsigned int *new_timeo)
1304{ 1318{
1305 if (t >= *timeo) { 1319 if (t >= *timeo) {
1306 int error = fxn(sdp); 1320 int error = fxn(sdp->sd_vfs, 0);
1307 quotad_error(sdp, msg, error); 1321 quotad_error(sdp, msg, error);
1308 *timeo = gfs2_tune_get_i(&sdp->sd_tune, new_timeo) * HZ; 1322 *timeo = gfs2_tune_get_i(&sdp->sd_tune, new_timeo) * HZ;
1309 } else { 1323 } else {
@@ -1330,6 +1344,14 @@ static void quotad_check_trunc_list(struct gfs2_sbd *sdp)
1330 } 1344 }
1331} 1345}
1332 1346
1347void gfs2_wake_up_statfs(struct gfs2_sbd *sdp) {
1348 if (!sdp->sd_statfs_force_sync) {
1349 sdp->sd_statfs_force_sync = 1;
1350 wake_up(&sdp->sd_quota_wait);
1351 }
1352}
1353
1354
1333/** 1355/**
1334 * gfs2_quotad - Write cached quota changes into the quota file 1356 * gfs2_quotad - Write cached quota changes into the quota file
1335 * @sdp: Pointer to GFS2 superblock 1357 * @sdp: Pointer to GFS2 superblock
@@ -1349,8 +1371,15 @@ int gfs2_quotad(void *data)
1349 while (!kthread_should_stop()) { 1371 while (!kthread_should_stop()) {
1350 1372
1351 /* Update the master statfs file */ 1373 /* Update the master statfs file */
1352 quotad_check_timeo(sdp, "statfs", gfs2_statfs_sync, t, 1374 if (sdp->sd_statfs_force_sync) {
1353 &statfs_timeo, &tune->gt_statfs_quantum); 1375 int error = gfs2_statfs_sync(sdp->sd_vfs, 0);
1376 quotad_error(sdp, "statfs", error);
1377 statfs_timeo = gfs2_tune_get(sdp, gt_statfs_quantum) * HZ;
1378 }
1379 else
1380 quotad_check_timeo(sdp, "statfs", gfs2_statfs_sync, t,
1381 &statfs_timeo,
1382 &tune->gt_statfs_quantum);
1354 1383
1355 /* Update quota file */ 1384 /* Update quota file */
1356 quotad_check_timeo(sdp, "sync", gfs2_quota_sync, t, 1385 quotad_check_timeo(sdp, "sync", gfs2_quota_sync, t,
@@ -1367,7 +1396,7 @@ int gfs2_quotad(void *data)
1367 spin_lock(&sdp->sd_trunc_lock); 1396 spin_lock(&sdp->sd_trunc_lock);
1368 empty = list_empty(&sdp->sd_trunc_list); 1397 empty = list_empty(&sdp->sd_trunc_list);
1369 spin_unlock(&sdp->sd_trunc_lock); 1398 spin_unlock(&sdp->sd_trunc_lock);
1370 if (empty) 1399 if (empty && !sdp->sd_statfs_force_sync)
1371 t -= schedule_timeout(t); 1400 t -= schedule_timeout(t);
1372 else 1401 else
1373 t = 0; 1402 t = 0;
@@ -1377,3 +1406,181 @@ int gfs2_quotad(void *data)
1377 return 0; 1406 return 0;
1378} 1407}
1379 1408
1409static int gfs2_quota_get_xstate(struct super_block *sb,
1410 struct fs_quota_stat *fqs)
1411{
1412 struct gfs2_sbd *sdp = sb->s_fs_info;
1413
1414 memset(fqs, 0, sizeof(struct fs_quota_stat));
1415 fqs->qs_version = FS_QSTAT_VERSION;
1416 if (sdp->sd_args.ar_quota == GFS2_QUOTA_ON)
1417 fqs->qs_flags = (XFS_QUOTA_UDQ_ENFD | XFS_QUOTA_GDQ_ENFD);
1418 else if (sdp->sd_args.ar_quota == GFS2_QUOTA_ACCOUNT)
1419 fqs->qs_flags = (XFS_QUOTA_UDQ_ACCT | XFS_QUOTA_GDQ_ACCT);
1420 if (sdp->sd_quota_inode) {
1421 fqs->qs_uquota.qfs_ino = GFS2_I(sdp->sd_quota_inode)->i_no_addr;
1422 fqs->qs_uquota.qfs_nblks = sdp->sd_quota_inode->i_blocks;
1423 }
1424 fqs->qs_uquota.qfs_nextents = 1; /* unsupported */
1425 fqs->qs_gquota = fqs->qs_uquota; /* its the same inode in both cases */
1426 fqs->qs_incoredqs = atomic_read(&qd_lru_count);
1427 return 0;
1428}
1429
1430static int gfs2_xquota_get(struct super_block *sb, int type, qid_t id,
1431 struct fs_disk_quota *fdq)
1432{
1433 struct gfs2_sbd *sdp = sb->s_fs_info;
1434 struct gfs2_quota_lvb *qlvb;
1435 struct gfs2_quota_data *qd;
1436 struct gfs2_holder q_gh;
1437 int error;
1438
1439 memset(fdq, 0, sizeof(struct fs_disk_quota));
1440
1441 if (sdp->sd_args.ar_quota == GFS2_QUOTA_OFF)
1442 return -ESRCH; /* Crazy XFS error code */
1443
1444 if (type == USRQUOTA)
1445 type = QUOTA_USER;
1446 else if (type == GRPQUOTA)
1447 type = QUOTA_GROUP;
1448 else
1449 return -EINVAL;
1450
1451 error = qd_get(sdp, type, id, &qd);
1452 if (error)
1453 return error;
1454 error = do_glock(qd, FORCE, &q_gh);
1455 if (error)
1456 goto out;
1457
1458 qlvb = (struct gfs2_quota_lvb *)qd->qd_gl->gl_lvb;
1459 fdq->d_version = FS_DQUOT_VERSION;
1460 fdq->d_flags = (type == QUOTA_USER) ? XFS_USER_QUOTA : XFS_GROUP_QUOTA;
1461 fdq->d_id = id;
1462 fdq->d_blk_hardlimit = be64_to_cpu(qlvb->qb_limit);
1463 fdq->d_blk_softlimit = be64_to_cpu(qlvb->qb_warn);
1464 fdq->d_bcount = be64_to_cpu(qlvb->qb_value);
1465
1466 gfs2_glock_dq_uninit(&q_gh);
1467out:
1468 qd_put(qd);
1469 return error;
1470}
1471
1472/* GFS2 only supports a subset of the XFS fields */
1473#define GFS2_FIELDMASK (FS_DQ_BSOFT|FS_DQ_BHARD)
1474
1475static int gfs2_xquota_set(struct super_block *sb, int type, qid_t id,
1476 struct fs_disk_quota *fdq)
1477{
1478 struct gfs2_sbd *sdp = sb->s_fs_info;
1479 struct gfs2_inode *ip = GFS2_I(sdp->sd_quota_inode);
1480 struct gfs2_quota_data *qd;
1481 struct gfs2_holder q_gh, i_gh;
1482 unsigned int data_blocks, ind_blocks;
1483 unsigned int blocks = 0;
1484 int alloc_required;
1485 struct gfs2_alloc *al;
1486 loff_t offset;
1487 int error;
1488
1489 if (sdp->sd_args.ar_quota == GFS2_QUOTA_OFF)
1490 return -ESRCH; /* Crazy XFS error code */
1491
1492 switch(type) {
1493 case USRQUOTA:
1494 type = QUOTA_USER;
1495 if (fdq->d_flags != XFS_USER_QUOTA)
1496 return -EINVAL;
1497 break;
1498 case GRPQUOTA:
1499 type = QUOTA_GROUP;
1500 if (fdq->d_flags != XFS_GROUP_QUOTA)
1501 return -EINVAL;
1502 break;
1503 default:
1504 return -EINVAL;
1505 }
1506
1507 if (fdq->d_fieldmask & ~GFS2_FIELDMASK)
1508 return -EINVAL;
1509 if (fdq->d_id != id)
1510 return -EINVAL;
1511
1512 error = qd_get(sdp, type, id, &qd);
1513 if (error)
1514 return error;
1515
1516 mutex_lock(&ip->i_inode.i_mutex);
1517 error = gfs2_glock_nq_init(qd->qd_gl, LM_ST_EXCLUSIVE, 0, &q_gh);
1518 if (error)
1519 goto out_put;
1520 error = gfs2_glock_nq_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &i_gh);
1521 if (error)
1522 goto out_q;
1523
1524 /* Check for existing entry, if none then alloc new blocks */
1525 error = update_qd(sdp, qd);
1526 if (error)
1527 goto out_i;
1528
1529 /* If nothing has changed, this is a no-op */
1530 if ((fdq->d_fieldmask & FS_DQ_BSOFT) &&
1531 (fdq->d_blk_softlimit == be64_to_cpu(qd->qd_qb.qb_warn)))
1532 fdq->d_fieldmask ^= FS_DQ_BSOFT;
1533 if ((fdq->d_fieldmask & FS_DQ_BHARD) &&
1534 (fdq->d_blk_hardlimit == be64_to_cpu(qd->qd_qb.qb_limit)))
1535 fdq->d_fieldmask ^= FS_DQ_BHARD;
1536 if (fdq->d_fieldmask == 0)
1537 goto out_i;
1538
1539 offset = qd2offset(qd);
1540 error = gfs2_write_alloc_required(ip, offset, sizeof(struct gfs2_quota),
1541 &alloc_required);
1542 if (error)
1543 goto out_i;
1544 if (alloc_required) {
1545 al = gfs2_alloc_get(ip);
1546 if (al == NULL)
1547 goto out_i;
1548 gfs2_write_calc_reserv(ip, sizeof(struct gfs2_quota),
1549 &data_blocks, &ind_blocks);
1550 blocks = al->al_requested = 1 + data_blocks + ind_blocks;
1551 error = gfs2_inplace_reserve(ip);
1552 if (error)
1553 goto out_alloc;
1554 }
1555
1556 error = gfs2_trans_begin(sdp, blocks + RES_DINODE + 1, 0);
1557 if (error)
1558 goto out_release;
1559
1560 /* Apply changes */
1561 error = gfs2_adjust_quota(ip, offset, 0, qd, fdq);
1562
1563 gfs2_trans_end(sdp);
1564out_release:
1565 if (alloc_required) {
1566 gfs2_inplace_release(ip);
1567out_alloc:
1568 gfs2_alloc_put(ip);
1569 }
1570out_i:
1571 gfs2_glock_dq_uninit(&i_gh);
1572out_q:
1573 gfs2_glock_dq_uninit(&q_gh);
1574out_put:
1575 mutex_unlock(&ip->i_inode.i_mutex);
1576 qd_put(qd);
1577 return error;
1578}
1579
1580const struct quotactl_ops gfs2_quotactl_ops = {
1581 .quota_sync = gfs2_quota_sync,
1582 .get_xstate = gfs2_quota_get_xstate,
1583 .get_xquota = gfs2_xquota_get,
1584 .set_xquota = gfs2_xquota_set,
1585};
1586
diff --git a/fs/gfs2/quota.h b/fs/gfs2/quota.h
index 0fa5fa63d0e8..e271fa07ad02 100644
--- a/fs/gfs2/quota.h
+++ b/fs/gfs2/quota.h
@@ -25,13 +25,15 @@ extern int gfs2_quota_check(struct gfs2_inode *ip, u32 uid, u32 gid);
25extern void gfs2_quota_change(struct gfs2_inode *ip, s64 change, 25extern void gfs2_quota_change(struct gfs2_inode *ip, s64 change,
26 u32 uid, u32 gid); 26 u32 uid, u32 gid);
27 27
28extern int gfs2_quota_sync(struct gfs2_sbd *sdp); 28extern int gfs2_quota_sync(struct super_block *sb, int type);
29extern int gfs2_quota_refresh(struct gfs2_sbd *sdp, int user, u32 id); 29extern int gfs2_quota_refresh(struct gfs2_sbd *sdp, int user, u32 id);
30 30
31extern int gfs2_quota_init(struct gfs2_sbd *sdp); 31extern int gfs2_quota_init(struct gfs2_sbd *sdp);
32extern void gfs2_quota_cleanup(struct gfs2_sbd *sdp); 32extern void gfs2_quota_cleanup(struct gfs2_sbd *sdp);
33extern int gfs2_quotad(void *data); 33extern int gfs2_quotad(void *data);
34 34
35extern void gfs2_wake_up_statfs(struct gfs2_sbd *sdp);
36
35static inline int gfs2_quota_lock_check(struct gfs2_inode *ip) 37static inline int gfs2_quota_lock_check(struct gfs2_inode *ip)
36{ 38{
37 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode); 39 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
@@ -50,5 +52,6 @@ static inline int gfs2_quota_lock_check(struct gfs2_inode *ip)
50} 52}
51 53
52extern int gfs2_shrink_qd_memory(int nr, gfp_t gfp_mask); 54extern int gfs2_shrink_qd_memory(int nr, gfp_t gfp_mask);
55extern const struct quotactl_ops gfs2_quotactl_ops;
53 56
54#endif /* __QUOTA_DOT_H__ */ 57#endif /* __QUOTA_DOT_H__ */
diff --git a/fs/gfs2/recovery.c b/fs/gfs2/recovery.c
index 09fa31965576..4b9bece3d437 100644
--- a/fs/gfs2/recovery.c
+++ b/fs/gfs2/recovery.c
@@ -410,7 +410,9 @@ static int clean_journal(struct gfs2_jdesc *jd, struct gfs2_log_header_host *hea
410 memset(lh, 0, sizeof(struct gfs2_log_header)); 410 memset(lh, 0, sizeof(struct gfs2_log_header));
411 lh->lh_header.mh_magic = cpu_to_be32(GFS2_MAGIC); 411 lh->lh_header.mh_magic = cpu_to_be32(GFS2_MAGIC);
412 lh->lh_header.mh_type = cpu_to_be32(GFS2_METATYPE_LH); 412 lh->lh_header.mh_type = cpu_to_be32(GFS2_METATYPE_LH);
413 lh->lh_header.__pad0 = cpu_to_be64(0);
413 lh->lh_header.mh_format = cpu_to_be32(GFS2_FORMAT_LH); 414 lh->lh_header.mh_format = cpu_to_be32(GFS2_FORMAT_LH);
415 lh->lh_header.mh_jid = cpu_to_be32(sdp->sd_jdesc->jd_jid);
414 lh->lh_sequence = cpu_to_be64(head->lh_sequence + 1); 416 lh->lh_sequence = cpu_to_be64(head->lh_sequence + 1);
415 lh->lh_flags = cpu_to_be32(GFS2_LOG_HEAD_UNMOUNT); 417 lh->lh_flags = cpu_to_be32(GFS2_LOG_HEAD_UNMOUNT);
416 lh->lh_blkno = cpu_to_be32(lblock); 418 lh->lh_blkno = cpu_to_be32(lblock);
diff --git a/fs/gfs2/rgrp.c b/fs/gfs2/rgrp.c
index 8f1cfb02a6cb..0608f490c295 100644
--- a/fs/gfs2/rgrp.c
+++ b/fs/gfs2/rgrp.c
@@ -1710,11 +1710,16 @@ int gfs2_check_blk_type(struct gfs2_sbd *sdp, u64 no_addr, unsigned int type)
1710{ 1710{
1711 struct gfs2_rgrpd *rgd; 1711 struct gfs2_rgrpd *rgd;
1712 struct gfs2_holder ri_gh, rgd_gh; 1712 struct gfs2_holder ri_gh, rgd_gh;
1713 struct gfs2_inode *ip = GFS2_I(sdp->sd_rindex);
1714 int ri_locked = 0;
1713 int error; 1715 int error;
1714 1716
1715 error = gfs2_rindex_hold(sdp, &ri_gh); 1717 if (!gfs2_glock_is_locked_by_me(ip->i_gl)) {
1716 if (error) 1718 error = gfs2_rindex_hold(sdp, &ri_gh);
1717 goto fail; 1719 if (error)
1720 goto fail;
1721 ri_locked = 1;
1722 }
1718 1723
1719 error = -EINVAL; 1724 error = -EINVAL;
1720 rgd = gfs2_blk2rgrpd(sdp, no_addr); 1725 rgd = gfs2_blk2rgrpd(sdp, no_addr);
@@ -1730,7 +1735,8 @@ int gfs2_check_blk_type(struct gfs2_sbd *sdp, u64 no_addr, unsigned int type)
1730 1735
1731 gfs2_glock_dq_uninit(&rgd_gh); 1736 gfs2_glock_dq_uninit(&rgd_gh);
1732fail_rindex: 1737fail_rindex:
1733 gfs2_glock_dq_uninit(&ri_gh); 1738 if (ri_locked)
1739 gfs2_glock_dq_uninit(&ri_gh);
1734fail: 1740fail:
1735 return error; 1741 return error;
1736} 1742}
diff --git a/fs/gfs2/super.c b/fs/gfs2/super.c
index 0ec3ec672de1..c282ad41f3d1 100644
--- a/fs/gfs2/super.c
+++ b/fs/gfs2/super.c
@@ -70,6 +70,11 @@ enum {
70 Opt_commit, 70 Opt_commit,
71 Opt_err_withdraw, 71 Opt_err_withdraw,
72 Opt_err_panic, 72 Opt_err_panic,
73 Opt_statfs_quantum,
74 Opt_statfs_percent,
75 Opt_quota_quantum,
76 Opt_barrier,
77 Opt_nobarrier,
73 Opt_error, 78 Opt_error,
74}; 79};
75 80
@@ -101,18 +106,23 @@ static const match_table_t tokens = {
101 {Opt_commit, "commit=%d"}, 106 {Opt_commit, "commit=%d"},
102 {Opt_err_withdraw, "errors=withdraw"}, 107 {Opt_err_withdraw, "errors=withdraw"},
103 {Opt_err_panic, "errors=panic"}, 108 {Opt_err_panic, "errors=panic"},
109 {Opt_statfs_quantum, "statfs_quantum=%d"},
110 {Opt_statfs_percent, "statfs_percent=%d"},
111 {Opt_quota_quantum, "quota_quantum=%d"},
112 {Opt_barrier, "barrier"},
113 {Opt_nobarrier, "nobarrier"},
104 {Opt_error, NULL} 114 {Opt_error, NULL}
105}; 115};
106 116
107/** 117/**
108 * gfs2_mount_args - Parse mount options 118 * gfs2_mount_args - Parse mount options
109 * @sdp: 119 * @args: The structure into which the parsed options will be written
110 * @data: 120 * @options: The options to parse
111 * 121 *
112 * Return: errno 122 * Return: errno
113 */ 123 */
114 124
115int gfs2_mount_args(struct gfs2_sbd *sdp, struct gfs2_args *args, char *options) 125int gfs2_mount_args(struct gfs2_args *args, char *options)
116{ 126{
117 char *o; 127 char *o;
118 int token; 128 int token;
@@ -157,7 +167,7 @@ int gfs2_mount_args(struct gfs2_sbd *sdp, struct gfs2_args *args, char *options)
157 break; 167 break;
158 case Opt_debug: 168 case Opt_debug:
159 if (args->ar_errors == GFS2_ERRORS_PANIC) { 169 if (args->ar_errors == GFS2_ERRORS_PANIC) {
160 fs_info(sdp, "-o debug and -o errors=panic " 170 printk(KERN_WARNING "GFS2: -o debug and -o errors=panic "
161 "are mutually exclusive.\n"); 171 "are mutually exclusive.\n");
162 return -EINVAL; 172 return -EINVAL;
163 } 173 }
@@ -210,7 +220,29 @@ int gfs2_mount_args(struct gfs2_sbd *sdp, struct gfs2_args *args, char *options)
210 case Opt_commit: 220 case Opt_commit:
211 rv = match_int(&tmp[0], &args->ar_commit); 221 rv = match_int(&tmp[0], &args->ar_commit);
212 if (rv || args->ar_commit <= 0) { 222 if (rv || args->ar_commit <= 0) {
213 fs_info(sdp, "commit mount option requires a positive numeric argument\n"); 223 printk(KERN_WARNING "GFS2: commit mount option requires a positive numeric argument\n");
224 return rv ? rv : -EINVAL;
225 }
226 break;
227 case Opt_statfs_quantum:
228 rv = match_int(&tmp[0], &args->ar_statfs_quantum);
229 if (rv || args->ar_statfs_quantum < 0) {
230 printk(KERN_WARNING "GFS2: statfs_quantum mount option requires a non-negative numeric argument\n");
231 return rv ? rv : -EINVAL;
232 }
233 break;
234 case Opt_quota_quantum:
235 rv = match_int(&tmp[0], &args->ar_quota_quantum);
236 if (rv || args->ar_quota_quantum <= 0) {
237 printk(KERN_WARNING "GFS2: quota_quantum mount option requires a positive numeric argument\n");
238 return rv ? rv : -EINVAL;
239 }
240 break;
241 case Opt_statfs_percent:
242 rv = match_int(&tmp[0], &args->ar_statfs_percent);
243 if (rv || args->ar_statfs_percent < 0 ||
244 args->ar_statfs_percent > 100) {
245 printk(KERN_WARNING "statfs_percent mount option requires a numeric argument between 0 and 100\n");
214 return rv ? rv : -EINVAL; 246 return rv ? rv : -EINVAL;
215 } 247 }
216 break; 248 break;
@@ -219,15 +251,21 @@ int gfs2_mount_args(struct gfs2_sbd *sdp, struct gfs2_args *args, char *options)
219 break; 251 break;
220 case Opt_err_panic: 252 case Opt_err_panic:
221 if (args->ar_debug) { 253 if (args->ar_debug) {
222 fs_info(sdp, "-o debug and -o errors=panic " 254 printk(KERN_WARNING "GFS2: -o debug and -o errors=panic "
223 "are mutually exclusive.\n"); 255 "are mutually exclusive.\n");
224 return -EINVAL; 256 return -EINVAL;
225 } 257 }
226 args->ar_errors = GFS2_ERRORS_PANIC; 258 args->ar_errors = GFS2_ERRORS_PANIC;
227 break; 259 break;
260 case Opt_barrier:
261 args->ar_nobarrier = 0;
262 break;
263 case Opt_nobarrier:
264 args->ar_nobarrier = 1;
265 break;
228 case Opt_error: 266 case Opt_error:
229 default: 267 default:
230 fs_info(sdp, "invalid mount option: %s\n", o); 268 printk(KERN_WARNING "GFS2: invalid mount option: %s\n", o);
231 return -EINVAL; 269 return -EINVAL;
232 } 270 }
233 } 271 }
@@ -442,7 +480,10 @@ void gfs2_statfs_change(struct gfs2_sbd *sdp, s64 total, s64 free,
442{ 480{
443 struct gfs2_inode *l_ip = GFS2_I(sdp->sd_sc_inode); 481 struct gfs2_inode *l_ip = GFS2_I(sdp->sd_sc_inode);
444 struct gfs2_statfs_change_host *l_sc = &sdp->sd_statfs_local; 482 struct gfs2_statfs_change_host *l_sc = &sdp->sd_statfs_local;
483 struct gfs2_statfs_change_host *m_sc = &sdp->sd_statfs_master;
445 struct buffer_head *l_bh; 484 struct buffer_head *l_bh;
485 s64 x, y;
486 int need_sync = 0;
446 int error; 487 int error;
447 488
448 error = gfs2_meta_inode_buffer(l_ip, &l_bh); 489 error = gfs2_meta_inode_buffer(l_ip, &l_bh);
@@ -456,9 +497,17 @@ void gfs2_statfs_change(struct gfs2_sbd *sdp, s64 total, s64 free,
456 l_sc->sc_free += free; 497 l_sc->sc_free += free;
457 l_sc->sc_dinodes += dinodes; 498 l_sc->sc_dinodes += dinodes;
458 gfs2_statfs_change_out(l_sc, l_bh->b_data + sizeof(struct gfs2_dinode)); 499 gfs2_statfs_change_out(l_sc, l_bh->b_data + sizeof(struct gfs2_dinode));
500 if (sdp->sd_args.ar_statfs_percent) {
501 x = 100 * l_sc->sc_free;
502 y = m_sc->sc_free * sdp->sd_args.ar_statfs_percent;
503 if (x >= y || x <= -y)
504 need_sync = 1;
505 }
459 spin_unlock(&sdp->sd_statfs_spin); 506 spin_unlock(&sdp->sd_statfs_spin);
460 507
461 brelse(l_bh); 508 brelse(l_bh);
509 if (need_sync)
510 gfs2_wake_up_statfs(sdp);
462} 511}
463 512
464void update_statfs(struct gfs2_sbd *sdp, struct buffer_head *m_bh, 513void update_statfs(struct gfs2_sbd *sdp, struct buffer_head *m_bh,
@@ -484,8 +533,9 @@ void update_statfs(struct gfs2_sbd *sdp, struct buffer_head *m_bh,
484 gfs2_statfs_change_out(m_sc, m_bh->b_data + sizeof(struct gfs2_dinode)); 533 gfs2_statfs_change_out(m_sc, m_bh->b_data + sizeof(struct gfs2_dinode));
485} 534}
486 535
487int gfs2_statfs_sync(struct gfs2_sbd *sdp) 536int gfs2_statfs_sync(struct super_block *sb, int type)
488{ 537{
538 struct gfs2_sbd *sdp = sb->s_fs_info;
489 struct gfs2_inode *m_ip = GFS2_I(sdp->sd_statfs_inode); 539 struct gfs2_inode *m_ip = GFS2_I(sdp->sd_statfs_inode);
490 struct gfs2_inode *l_ip = GFS2_I(sdp->sd_sc_inode); 540 struct gfs2_inode *l_ip = GFS2_I(sdp->sd_sc_inode);
491 struct gfs2_statfs_change_host *m_sc = &sdp->sd_statfs_master; 541 struct gfs2_statfs_change_host *m_sc = &sdp->sd_statfs_master;
@@ -521,6 +571,7 @@ int gfs2_statfs_sync(struct gfs2_sbd *sdp)
521 goto out_bh2; 571 goto out_bh2;
522 572
523 update_statfs(sdp, m_bh, l_bh); 573 update_statfs(sdp, m_bh, l_bh);
574 sdp->sd_statfs_force_sync = 0;
524 575
525 gfs2_trans_end(sdp); 576 gfs2_trans_end(sdp);
526 577
@@ -712,8 +763,8 @@ static int gfs2_make_fs_ro(struct gfs2_sbd *sdp)
712 int error; 763 int error;
713 764
714 flush_workqueue(gfs2_delete_workqueue); 765 flush_workqueue(gfs2_delete_workqueue);
715 gfs2_quota_sync(sdp); 766 gfs2_quota_sync(sdp->sd_vfs, 0);
716 gfs2_statfs_sync(sdp); 767 gfs2_statfs_sync(sdp->sd_vfs, 0);
717 768
718 error = gfs2_glock_nq_init(sdp->sd_trans_gl, LM_ST_SHARED, GL_NOCACHE, 769 error = gfs2_glock_nq_init(sdp->sd_trans_gl, LM_ST_SHARED, GL_NOCACHE,
719 &t_gh); 770 &t_gh);
@@ -1061,8 +1112,13 @@ static int gfs2_remount_fs(struct super_block *sb, int *flags, char *data)
1061 1112
1062 spin_lock(&gt->gt_spin); 1113 spin_lock(&gt->gt_spin);
1063 args.ar_commit = gt->gt_log_flush_secs; 1114 args.ar_commit = gt->gt_log_flush_secs;
1115 args.ar_quota_quantum = gt->gt_quota_quantum;
1116 if (gt->gt_statfs_slow)
1117 args.ar_statfs_quantum = 0;
1118 else
1119 args.ar_statfs_quantum = gt->gt_statfs_quantum;
1064 spin_unlock(&gt->gt_spin); 1120 spin_unlock(&gt->gt_spin);
1065 error = gfs2_mount_args(sdp, &args, data); 1121 error = gfs2_mount_args(&args, data);
1066 if (error) 1122 if (error)
1067 return error; 1123 return error;
1068 1124
@@ -1097,8 +1153,21 @@ static int gfs2_remount_fs(struct super_block *sb, int *flags, char *data)
1097 sb->s_flags |= MS_POSIXACL; 1153 sb->s_flags |= MS_POSIXACL;
1098 else 1154 else
1099 sb->s_flags &= ~MS_POSIXACL; 1155 sb->s_flags &= ~MS_POSIXACL;
1156 if (sdp->sd_args.ar_nobarrier)
1157 set_bit(SDF_NOBARRIERS, &sdp->sd_flags);
1158 else
1159 clear_bit(SDF_NOBARRIERS, &sdp->sd_flags);
1100 spin_lock(&gt->gt_spin); 1160 spin_lock(&gt->gt_spin);
1101 gt->gt_log_flush_secs = args.ar_commit; 1161 gt->gt_log_flush_secs = args.ar_commit;
1162 gt->gt_quota_quantum = args.ar_quota_quantum;
1163 if (args.ar_statfs_quantum) {
1164 gt->gt_statfs_slow = 0;
1165 gt->gt_statfs_quantum = args.ar_statfs_quantum;
1166 }
1167 else {
1168 gt->gt_statfs_slow = 1;
1169 gt->gt_statfs_quantum = 30;
1170 }
1102 spin_unlock(&gt->gt_spin); 1171 spin_unlock(&gt->gt_spin);
1103 1172
1104 gfs2_online_uevent(sdp); 1173 gfs2_online_uevent(sdp);
@@ -1179,7 +1248,7 @@ static int gfs2_show_options(struct seq_file *s, struct vfsmount *mnt)
1179{ 1248{
1180 struct gfs2_sbd *sdp = mnt->mnt_sb->s_fs_info; 1249 struct gfs2_sbd *sdp = mnt->mnt_sb->s_fs_info;
1181 struct gfs2_args *args = &sdp->sd_args; 1250 struct gfs2_args *args = &sdp->sd_args;
1182 int lfsecs; 1251 int val;
1183 1252
1184 if (is_ancestor(mnt->mnt_root, sdp->sd_master_dir)) 1253 if (is_ancestor(mnt->mnt_root, sdp->sd_master_dir))
1185 seq_printf(s, ",meta"); 1254 seq_printf(s, ",meta");
@@ -1240,9 +1309,17 @@ static int gfs2_show_options(struct seq_file *s, struct vfsmount *mnt)
1240 } 1309 }
1241 if (args->ar_discard) 1310 if (args->ar_discard)
1242 seq_printf(s, ",discard"); 1311 seq_printf(s, ",discard");
1243 lfsecs = sdp->sd_tune.gt_log_flush_secs; 1312 val = sdp->sd_tune.gt_log_flush_secs;
1244 if (lfsecs != 60) 1313 if (val != 60)
1245 seq_printf(s, ",commit=%d", lfsecs); 1314 seq_printf(s, ",commit=%d", val);
1315 val = sdp->sd_tune.gt_statfs_quantum;
1316 if (val != 30)
1317 seq_printf(s, ",statfs_quantum=%d", val);
1318 val = sdp->sd_tune.gt_quota_quantum;
1319 if (val != 60)
1320 seq_printf(s, ",quota_quantum=%d", val);
1321 if (args->ar_statfs_percent)
1322 seq_printf(s, ",statfs_percent=%d", args->ar_statfs_percent);
1246 if (args->ar_errors != GFS2_ERRORS_DEFAULT) { 1323 if (args->ar_errors != GFS2_ERRORS_DEFAULT) {
1247 const char *state; 1324 const char *state;
1248 1325
@@ -1259,6 +1336,9 @@ static int gfs2_show_options(struct seq_file *s, struct vfsmount *mnt)
1259 } 1336 }
1260 seq_printf(s, ",errors=%s", state); 1337 seq_printf(s, ",errors=%s", state);
1261 } 1338 }
1339 if (test_bit(SDF_NOBARRIERS, &sdp->sd_flags))
1340 seq_printf(s, ",nobarrier");
1341
1262 return 0; 1342 return 0;
1263} 1343}
1264 1344
diff --git a/fs/gfs2/super.h b/fs/gfs2/super.h
index 235db3682885..3df60f2d84e3 100644
--- a/fs/gfs2/super.h
+++ b/fs/gfs2/super.h
@@ -27,7 +27,7 @@ static inline unsigned int gfs2_jindex_size(struct gfs2_sbd *sdp)
27 27
28extern void gfs2_jindex_free(struct gfs2_sbd *sdp); 28extern void gfs2_jindex_free(struct gfs2_sbd *sdp);
29 29
30extern int gfs2_mount_args(struct gfs2_sbd *sdp, struct gfs2_args *args, char *data); 30extern int gfs2_mount_args(struct gfs2_args *args, char *data);
31 31
32extern struct gfs2_jdesc *gfs2_jdesc_find(struct gfs2_sbd *sdp, unsigned int jid); 32extern struct gfs2_jdesc *gfs2_jdesc_find(struct gfs2_sbd *sdp, unsigned int jid);
33extern int gfs2_jdesc_check(struct gfs2_jdesc *jd); 33extern int gfs2_jdesc_check(struct gfs2_jdesc *jd);
@@ -44,7 +44,7 @@ extern void gfs2_statfs_change_in(struct gfs2_statfs_change_host *sc,
44 const void *buf); 44 const void *buf);
45extern void update_statfs(struct gfs2_sbd *sdp, struct buffer_head *m_bh, 45extern void update_statfs(struct gfs2_sbd *sdp, struct buffer_head *m_bh,
46 struct buffer_head *l_bh); 46 struct buffer_head *l_bh);
47extern int gfs2_statfs_sync(struct gfs2_sbd *sdp); 47extern int gfs2_statfs_sync(struct super_block *sb, int type);
48 48
49extern int gfs2_freeze_fs(struct gfs2_sbd *sdp); 49extern int gfs2_freeze_fs(struct gfs2_sbd *sdp);
50extern void gfs2_unfreeze_fs(struct gfs2_sbd *sdp); 50extern void gfs2_unfreeze_fs(struct gfs2_sbd *sdp);
diff --git a/fs/gfs2/sys.c b/fs/gfs2/sys.c
index 446329728d52..c5dad1eb7b91 100644
--- a/fs/gfs2/sys.c
+++ b/fs/gfs2/sys.c
@@ -158,7 +158,7 @@ static ssize_t statfs_sync_store(struct gfs2_sbd *sdp, const char *buf,
158 if (simple_strtol(buf, NULL, 0) != 1) 158 if (simple_strtol(buf, NULL, 0) != 1)
159 return -EINVAL; 159 return -EINVAL;
160 160
161 gfs2_statfs_sync(sdp); 161 gfs2_statfs_sync(sdp->sd_vfs, 0);
162 return len; 162 return len;
163} 163}
164 164
@@ -171,13 +171,14 @@ static ssize_t quota_sync_store(struct gfs2_sbd *sdp, const char *buf,
171 if (simple_strtol(buf, NULL, 0) != 1) 171 if (simple_strtol(buf, NULL, 0) != 1)
172 return -EINVAL; 172 return -EINVAL;
173 173
174 gfs2_quota_sync(sdp); 174 gfs2_quota_sync(sdp->sd_vfs, 0);
175 return len; 175 return len;
176} 176}
177 177
178static ssize_t quota_refresh_user_store(struct gfs2_sbd *sdp, const char *buf, 178static ssize_t quota_refresh_user_store(struct gfs2_sbd *sdp, const char *buf,
179 size_t len) 179 size_t len)
180{ 180{
181 int error;
181 u32 id; 182 u32 id;
182 183
183 if (!capable(CAP_SYS_ADMIN)) 184 if (!capable(CAP_SYS_ADMIN))
@@ -185,13 +186,14 @@ static ssize_t quota_refresh_user_store(struct gfs2_sbd *sdp, const char *buf,
185 186
186 id = simple_strtoul(buf, NULL, 0); 187 id = simple_strtoul(buf, NULL, 0);
187 188
188 gfs2_quota_refresh(sdp, 1, id); 189 error = gfs2_quota_refresh(sdp, 1, id);
189 return len; 190 return error ? error : len;
190} 191}
191 192
192static ssize_t quota_refresh_group_store(struct gfs2_sbd *sdp, const char *buf, 193static ssize_t quota_refresh_group_store(struct gfs2_sbd *sdp, const char *buf,
193 size_t len) 194 size_t len)
194{ 195{
196 int error;
195 u32 id; 197 u32 id;
196 198
197 if (!capable(CAP_SYS_ADMIN)) 199 if (!capable(CAP_SYS_ADMIN))
@@ -199,8 +201,8 @@ static ssize_t quota_refresh_group_store(struct gfs2_sbd *sdp, const char *buf,
199 201
200 id = simple_strtoul(buf, NULL, 0); 202 id = simple_strtoul(buf, NULL, 0);
201 203
202 gfs2_quota_refresh(sdp, 0, id); 204 error = gfs2_quota_refresh(sdp, 0, id);
203 return len; 205 return error ? error : len;
204} 206}
205 207
206static ssize_t demote_rq_store(struct gfs2_sbd *sdp, const char *buf, size_t len) 208static ssize_t demote_rq_store(struct gfs2_sbd *sdp, const char *buf, size_t len)
diff --git a/fs/gfs2/xattr.c b/fs/gfs2/xattr.c
index 8a0f8ef6ee27..912f5cbc4740 100644
--- a/fs/gfs2/xattr.c
+++ b/fs/gfs2/xattr.c
@@ -186,8 +186,8 @@ static int ea_find_i(struct gfs2_inode *ip, struct buffer_head *bh,
186 return 0; 186 return 0;
187} 187}
188 188
189int gfs2_ea_find(struct gfs2_inode *ip, int type, const char *name, 189static int gfs2_ea_find(struct gfs2_inode *ip, int type, const char *name,
190 struct gfs2_ea_location *el) 190 struct gfs2_ea_location *el)
191{ 191{
192 struct ea_find ef; 192 struct ea_find ef;
193 int error; 193 int error;
@@ -516,8 +516,8 @@ out:
516 return error; 516 return error;
517} 517}
518 518
519int gfs2_ea_get_copy(struct gfs2_inode *ip, struct gfs2_ea_location *el, 519static int gfs2_ea_get_copy(struct gfs2_inode *ip, struct gfs2_ea_location *el,
520 char *data, size_t size) 520 char *data, size_t size)
521{ 521{
522 int ret; 522 int ret;
523 size_t len = GFS2_EA_DATA_LEN(el->el_ea); 523 size_t len = GFS2_EA_DATA_LEN(el->el_ea);
@@ -534,6 +534,36 @@ int gfs2_ea_get_copy(struct gfs2_inode *ip, struct gfs2_ea_location *el,
534 return len; 534 return len;
535} 535}
536 536
537int gfs2_xattr_acl_get(struct gfs2_inode *ip, const char *name, char **ppdata)
538{
539 struct gfs2_ea_location el;
540 int error;
541 int len;
542 char *data;
543
544 error = gfs2_ea_find(ip, GFS2_EATYPE_SYS, name, &el);
545 if (error)
546 return error;
547 if (!el.el_ea)
548 goto out;
549 if (!GFS2_EA_DATA_LEN(el.el_ea))
550 goto out;
551
552 len = GFS2_EA_DATA_LEN(el.el_ea);
553 data = kmalloc(len, GFP_NOFS);
554 error = -ENOMEM;
555 if (data == NULL)
556 goto out;
557
558 error = gfs2_ea_get_copy(ip, &el, data, len);
559 if (error == 0)
560 error = len;
561 *ppdata = data;
562out:
563 brelse(el.el_bh);
564 return error;
565}
566
537/** 567/**
538 * gfs2_xattr_get - Get a GFS2 extended attribute 568 * gfs2_xattr_get - Get a GFS2 extended attribute
539 * @inode: The inode 569 * @inode: The inode
@@ -1259,22 +1289,26 @@ fail:
1259 return error; 1289 return error;
1260} 1290}
1261 1291
1262int gfs2_ea_acl_chmod(struct gfs2_inode *ip, struct gfs2_ea_location *el, 1292int gfs2_xattr_acl_chmod(struct gfs2_inode *ip, struct iattr *attr, char *data)
1263 struct iattr *attr, char *data)
1264{ 1293{
1294 struct gfs2_ea_location el;
1265 struct buffer_head *dibh; 1295 struct buffer_head *dibh;
1266 int error; 1296 int error;
1267 1297
1268 if (GFS2_EA_IS_STUFFED(el->el_ea)) { 1298 error = gfs2_ea_find(ip, GFS2_EATYPE_SYS, GFS2_POSIX_ACL_ACCESS, &el);
1299 if (error)
1300 return error;
1301
1302 if (GFS2_EA_IS_STUFFED(el.el_ea)) {
1269 error = gfs2_trans_begin(GFS2_SB(&ip->i_inode), RES_DINODE + RES_EATTR, 0); 1303 error = gfs2_trans_begin(GFS2_SB(&ip->i_inode), RES_DINODE + RES_EATTR, 0);
1270 if (error) 1304 if (error)
1271 return error; 1305 return error;
1272 1306
1273 gfs2_trans_add_bh(ip->i_gl, el->el_bh, 1); 1307 gfs2_trans_add_bh(ip->i_gl, el.el_bh, 1);
1274 memcpy(GFS2_EA2DATA(el->el_ea), data, 1308 memcpy(GFS2_EA2DATA(el.el_ea), data,
1275 GFS2_EA_DATA_LEN(el->el_ea)); 1309 GFS2_EA_DATA_LEN(el.el_ea));
1276 } else 1310 } else
1277 error = ea_acl_chmod_unstuffed(ip, el->el_ea, data); 1311 error = ea_acl_chmod_unstuffed(ip, el.el_ea, data);
1278 1312
1279 if (error) 1313 if (error)
1280 return error; 1314 return error;
@@ -1507,18 +1541,6 @@ static int gfs2_xattr_user_set(struct inode *inode, const char *name,
1507 return gfs2_xattr_set(inode, GFS2_EATYPE_USR, name, value, size, flags); 1541 return gfs2_xattr_set(inode, GFS2_EATYPE_USR, name, value, size, flags);
1508} 1542}
1509 1543
1510static int gfs2_xattr_system_get(struct inode *inode, const char *name,
1511 void *buffer, size_t size)
1512{
1513 return gfs2_xattr_get(inode, GFS2_EATYPE_SYS, name, buffer, size);
1514}
1515
1516static int gfs2_xattr_system_set(struct inode *inode, const char *name,
1517 const void *value, size_t size, int flags)
1518{
1519 return gfs2_xattr_set(inode, GFS2_EATYPE_SYS, name, value, size, flags);
1520}
1521
1522static int gfs2_xattr_security_get(struct inode *inode, const char *name, 1544static int gfs2_xattr_security_get(struct inode *inode, const char *name,
1523 void *buffer, size_t size) 1545 void *buffer, size_t size)
1524{ 1546{
@@ -1543,12 +1565,6 @@ static struct xattr_handler gfs2_xattr_security_handler = {
1543 .set = gfs2_xattr_security_set, 1565 .set = gfs2_xattr_security_set,
1544}; 1566};
1545 1567
1546static struct xattr_handler gfs2_xattr_system_handler = {
1547 .prefix = XATTR_SYSTEM_PREFIX,
1548 .get = gfs2_xattr_system_get,
1549 .set = gfs2_xattr_system_set,
1550};
1551
1552struct xattr_handler *gfs2_xattr_handlers[] = { 1568struct xattr_handler *gfs2_xattr_handlers[] = {
1553 &gfs2_xattr_user_handler, 1569 &gfs2_xattr_user_handler,
1554 &gfs2_xattr_security_handler, 1570 &gfs2_xattr_security_handler,
diff --git a/fs/gfs2/xattr.h b/fs/gfs2/xattr.h
index cbdfd7743733..8d6ae5813c4d 100644
--- a/fs/gfs2/xattr.h
+++ b/fs/gfs2/xattr.h
@@ -62,11 +62,7 @@ extern int gfs2_ea_dealloc(struct gfs2_inode *ip);
62 62
63/* Exported to acl.c */ 63/* Exported to acl.c */
64 64
65extern int gfs2_ea_find(struct gfs2_inode *ip, int type, const char *name, 65extern int gfs2_xattr_acl_get(struct gfs2_inode *ip, const char *name, char **data);
66 struct gfs2_ea_location *el); 66extern int gfs2_xattr_acl_chmod(struct gfs2_inode *ip, struct iattr *attr, char *data);
67extern int gfs2_ea_get_copy(struct gfs2_inode *ip, struct gfs2_ea_location *el,
68 char *data, size_t size);
69extern int gfs2_ea_acl_chmod(struct gfs2_inode *ip, struct gfs2_ea_location *el,
70 struct iattr *attr, char *data);
71 67
72#endif /* __EATTR_DOT_H__ */ 68#endif /* __EATTR_DOT_H__ */
diff --git a/fs/quota/Kconfig b/fs/quota/Kconfig
index 8047e01ef46b..353e78a9ebee 100644
--- a/fs/quota/Kconfig
+++ b/fs/quota/Kconfig
@@ -17,7 +17,7 @@ config QUOTA
17 17
18config QUOTA_NETLINK_INTERFACE 18config QUOTA_NETLINK_INTERFACE
19 bool "Report quota messages through netlink interface" 19 bool "Report quota messages through netlink interface"
20 depends on QUOTA && NET 20 depends on QUOTACTL && NET
21 help 21 help
22 If you say Y here, quota warnings (about exceeding softlimit, reaching 22 If you say Y here, quota warnings (about exceeding softlimit, reaching
23 hardlimit, etc.) will be reported through netlink interface. If unsure, 23 hardlimit, etc.) will be reported through netlink interface. If unsure,
diff --git a/fs/quota/dquot.c b/fs/quota/dquot.c
index 39b49c42a7ed..9b6ad908dcb2 100644
--- a/fs/quota/dquot.c
+++ b/fs/quota/dquot.c
@@ -77,10 +77,6 @@
77#include <linux/capability.h> 77#include <linux/capability.h>
78#include <linux/quotaops.h> 78#include <linux/quotaops.h>
79#include <linux/writeback.h> /* for inode_lock, oddly enough.. */ 79#include <linux/writeback.h> /* for inode_lock, oddly enough.. */
80#ifdef CONFIG_QUOTA_NETLINK_INTERFACE
81#include <net/netlink.h>
82#include <net/genetlink.h>
83#endif
84 80
85#include <asm/uaccess.h> 81#include <asm/uaccess.h>
86 82
@@ -1071,73 +1067,6 @@ static void print_warning(struct dquot *dquot, const int warntype)
1071} 1067}
1072#endif 1068#endif
1073 1069
1074#ifdef CONFIG_QUOTA_NETLINK_INTERFACE
1075
1076/* Netlink family structure for quota */
1077static struct genl_family quota_genl_family = {
1078 .id = GENL_ID_GENERATE,
1079 .hdrsize = 0,
1080 .name = "VFS_DQUOT",
1081 .version = 1,
1082 .maxattr = QUOTA_NL_A_MAX,
1083};
1084
1085/* Send warning to userspace about user which exceeded quota */
1086static void send_warning(const struct dquot *dquot, const char warntype)
1087{
1088 static atomic_t seq;
1089 struct sk_buff *skb;
1090 void *msg_head;
1091 int ret;
1092 int msg_size = 4 * nla_total_size(sizeof(u32)) +
1093 2 * nla_total_size(sizeof(u64));
1094
1095 /* We have to allocate using GFP_NOFS as we are called from a
1096 * filesystem performing write and thus further recursion into
1097 * the fs to free some data could cause deadlocks. */
1098 skb = genlmsg_new(msg_size, GFP_NOFS);
1099 if (!skb) {
1100 printk(KERN_ERR
1101 "VFS: Not enough memory to send quota warning.\n");
1102 return;
1103 }
1104 msg_head = genlmsg_put(skb, 0, atomic_add_return(1, &seq),
1105 &quota_genl_family, 0, QUOTA_NL_C_WARNING);
1106 if (!msg_head) {
1107 printk(KERN_ERR
1108 "VFS: Cannot store netlink header in quota warning.\n");
1109 goto err_out;
1110 }
1111 ret = nla_put_u32(skb, QUOTA_NL_A_QTYPE, dquot->dq_type);
1112 if (ret)
1113 goto attr_err_out;
1114 ret = nla_put_u64(skb, QUOTA_NL_A_EXCESS_ID, dquot->dq_id);
1115 if (ret)
1116 goto attr_err_out;
1117 ret = nla_put_u32(skb, QUOTA_NL_A_WARNING, warntype);
1118 if (ret)
1119 goto attr_err_out;
1120 ret = nla_put_u32(skb, QUOTA_NL_A_DEV_MAJOR,
1121 MAJOR(dquot->dq_sb->s_dev));
1122 if (ret)
1123 goto attr_err_out;
1124 ret = nla_put_u32(skb, QUOTA_NL_A_DEV_MINOR,
1125 MINOR(dquot->dq_sb->s_dev));
1126 if (ret)
1127 goto attr_err_out;
1128 ret = nla_put_u64(skb, QUOTA_NL_A_CAUSED_ID, current_uid());
1129 if (ret)
1130 goto attr_err_out;
1131 genlmsg_end(skb, msg_head);
1132
1133 genlmsg_multicast(skb, 0, quota_genl_family.id, GFP_NOFS);
1134 return;
1135attr_err_out:
1136 printk(KERN_ERR "VFS: Not enough space to compose quota message!\n");
1137err_out:
1138 kfree_skb(skb);
1139}
1140#endif
1141/* 1070/*
1142 * Write warnings to the console and send warning messages over netlink. 1071 * Write warnings to the console and send warning messages over netlink.
1143 * 1072 *
@@ -1145,18 +1074,20 @@ err_out:
1145 */ 1074 */
1146static void flush_warnings(struct dquot *const *dquots, char *warntype) 1075static void flush_warnings(struct dquot *const *dquots, char *warntype)
1147{ 1076{
1077 struct dquot *dq;
1148 int i; 1078 int i;
1149 1079
1150 for (i = 0; i < MAXQUOTAS; i++) 1080 for (i = 0; i < MAXQUOTAS; i++) {
1151 if (dquots[i] && warntype[i] != QUOTA_NL_NOWARN && 1081 dq = dquots[i];
1152 !warning_issued(dquots[i], warntype[i])) { 1082 if (dq && warntype[i] != QUOTA_NL_NOWARN &&
1083 !warning_issued(dq, warntype[i])) {
1153#ifdef CONFIG_PRINT_QUOTA_WARNING 1084#ifdef CONFIG_PRINT_QUOTA_WARNING
1154 print_warning(dquots[i], warntype[i]); 1085 print_warning(dq, warntype[i]);
1155#endif
1156#ifdef CONFIG_QUOTA_NETLINK_INTERFACE
1157 send_warning(dquots[i], warntype[i]);
1158#endif 1086#endif
1087 quota_send_warning(dq->dq_type, dq->dq_id,
1088 dq->dq_sb->s_dev, warntype[i]);
1159 } 1089 }
1090 }
1160} 1091}
1161 1092
1162static int ignore_hardlimit(struct dquot *dquot) 1093static int ignore_hardlimit(struct dquot *dquot)
@@ -2607,12 +2538,6 @@ static int __init dquot_init(void)
2607 2538
2608 register_shrinker(&dqcache_shrinker); 2539 register_shrinker(&dqcache_shrinker);
2609 2540
2610#ifdef CONFIG_QUOTA_NETLINK_INTERFACE
2611 if (genl_register_family(&quota_genl_family) != 0)
2612 printk(KERN_ERR
2613 "VFS: Failed to create quota netlink interface.\n");
2614#endif
2615
2616 return 0; 2541 return 0;
2617} 2542}
2618module_init(dquot_init); 2543module_init(dquot_init);
diff --git a/fs/quota/quota.c b/fs/quota/quota.c
index 95c5b42384b2..ee91e2756950 100644
--- a/fs/quota/quota.c
+++ b/fs/quota/quota.c
@@ -18,6 +18,8 @@
18#include <linux/capability.h> 18#include <linux/capability.h>
19#include <linux/quotaops.h> 19#include <linux/quotaops.h>
20#include <linux/types.h> 20#include <linux/types.h>
21#include <net/netlink.h>
22#include <net/genetlink.h>
21 23
22/* Check validity of generic quotactl commands */ 24/* Check validity of generic quotactl commands */
23static int generic_quotactl_valid(struct super_block *sb, int type, int cmd, 25static int generic_quotactl_valid(struct super_block *sb, int type, int cmd,
@@ -525,3 +527,94 @@ asmlinkage long sys32_quotactl(unsigned int cmd, const char __user *special,
525 return ret; 527 return ret;
526} 528}
527#endif 529#endif
530
531
532#ifdef CONFIG_QUOTA_NETLINK_INTERFACE
533
534/* Netlink family structure for quota */
535static struct genl_family quota_genl_family = {
536 .id = GENL_ID_GENERATE,
537 .hdrsize = 0,
538 .name = "VFS_DQUOT",
539 .version = 1,
540 .maxattr = QUOTA_NL_A_MAX,
541};
542
543/**
544 * quota_send_warning - Send warning to userspace about exceeded quota
545 * @type: The quota type: USRQQUOTA, GRPQUOTA,...
546 * @id: The user or group id of the quota that was exceeded
547 * @dev: The device on which the fs is mounted (sb->s_dev)
548 * @warntype: The type of the warning: QUOTA_NL_...
549 *
550 * This can be used by filesystems (including those which don't use
551 * dquot) to send a message to userspace relating to quota limits.
552 *
553 */
554
555void quota_send_warning(short type, unsigned int id, dev_t dev,
556 const char warntype)
557{
558 static atomic_t seq;
559 struct sk_buff *skb;
560 void *msg_head;
561 int ret;
562 int msg_size = 4 * nla_total_size(sizeof(u32)) +
563 2 * nla_total_size(sizeof(u64));
564
565 /* We have to allocate using GFP_NOFS as we are called from a
566 * filesystem performing write and thus further recursion into
567 * the fs to free some data could cause deadlocks. */
568 skb = genlmsg_new(msg_size, GFP_NOFS);
569 if (!skb) {
570 printk(KERN_ERR
571 "VFS: Not enough memory to send quota warning.\n");
572 return;
573 }
574 msg_head = genlmsg_put(skb, 0, atomic_add_return(1, &seq),
575 &quota_genl_family, 0, QUOTA_NL_C_WARNING);
576 if (!msg_head) {
577 printk(KERN_ERR
578 "VFS: Cannot store netlink header in quota warning.\n");
579 goto err_out;
580 }
581 ret = nla_put_u32(skb, QUOTA_NL_A_QTYPE, type);
582 if (ret)
583 goto attr_err_out;
584 ret = nla_put_u64(skb, QUOTA_NL_A_EXCESS_ID, id);
585 if (ret)
586 goto attr_err_out;
587 ret = nla_put_u32(skb, QUOTA_NL_A_WARNING, warntype);
588 if (ret)
589 goto attr_err_out;
590 ret = nla_put_u32(skb, QUOTA_NL_A_DEV_MAJOR, MAJOR(dev));
591 if (ret)
592 goto attr_err_out;
593 ret = nla_put_u32(skb, QUOTA_NL_A_DEV_MINOR, MINOR(dev));
594 if (ret)
595 goto attr_err_out;
596 ret = nla_put_u64(skb, QUOTA_NL_A_CAUSED_ID, current_uid());
597 if (ret)
598 goto attr_err_out;
599 genlmsg_end(skb, msg_head);
600
601 genlmsg_multicast(skb, 0, quota_genl_family.id, GFP_NOFS);
602 return;
603attr_err_out:
604 printk(KERN_ERR "VFS: Not enough space to compose quota message!\n");
605err_out:
606 kfree_skb(skb);
607}
608EXPORT_SYMBOL(quota_send_warning);
609
610static int __init quota_init(void)
611{
612 if (genl_register_family(&quota_genl_family) != 0)
613 printk(KERN_ERR
614 "VFS: Failed to create quota netlink interface.\n");
615 return 0;
616};
617
618module_init(quota_init);
619#endif
620
diff --git a/fs/xattr_acl.c b/fs/xattr_acl.c
index c6ad7c7e3ee9..05ac0fe9c4d3 100644
--- a/fs/xattr_acl.c
+++ b/fs/xattr_acl.c
@@ -36,7 +36,7 @@ posix_acl_from_xattr(const void *value, size_t size)
36 if (count == 0) 36 if (count == 0)
37 return NULL; 37 return NULL;
38 38
39 acl = posix_acl_alloc(count, GFP_KERNEL); 39 acl = posix_acl_alloc(count, GFP_NOFS);
40 if (!acl) 40 if (!acl)
41 return ERR_PTR(-ENOMEM); 41 return ERR_PTR(-ENOMEM);
42 acl_e = acl->a_entries; 42 acl_e = acl->a_entries;
diff --git a/include/linux/gfs2_ondisk.h b/include/linux/gfs2_ondisk.h
index b80c88dedbbb..81f90a59cda6 100644
--- a/include/linux/gfs2_ondisk.h
+++ b/include/linux/gfs2_ondisk.h
@@ -81,7 +81,11 @@ struct gfs2_meta_header {
81 __be32 mh_type; 81 __be32 mh_type;
82 __be64 __pad0; /* Was generation number in gfs1 */ 82 __be64 __pad0; /* Was generation number in gfs1 */
83 __be32 mh_format; 83 __be32 mh_format;
84 __be32 __pad1; /* Was incarnation number in gfs1 */ 84 /* This union is to keep userspace happy */
85 union {
86 __be32 mh_jid; /* Was incarnation number in gfs1 */
87 __be32 __pad1;
88 };
85}; 89};
86 90
87/* 91/*
diff --git a/include/linux/posix_acl.h b/include/linux/posix_acl.h
index 065a3652a3ea..67608161df6b 100644
--- a/include/linux/posix_acl.h
+++ b/include/linux/posix_acl.h
@@ -147,6 +147,20 @@ static inline void forget_cached_acl(struct inode *inode, int type)
147 if (old != ACL_NOT_CACHED) 147 if (old != ACL_NOT_CACHED)
148 posix_acl_release(old); 148 posix_acl_release(old);
149} 149}
150
151static inline void forget_all_cached_acls(struct inode *inode)
152{
153 struct posix_acl *old_access, *old_default;
154 spin_lock(&inode->i_lock);
155 old_access = inode->i_acl;
156 old_default = inode->i_default_acl;
157 inode->i_acl = inode->i_default_acl = ACL_NOT_CACHED;
158 spin_unlock(&inode->i_lock);
159 if (old_access != ACL_NOT_CACHED)
160 posix_acl_release(old_access);
161 if (old_default != ACL_NOT_CACHED)
162 posix_acl_release(old_default);
163}
150#endif 164#endif
151 165
152static inline void cache_no_acl(struct inode *inode) 166static inline void cache_no_acl(struct inode *inode)
diff --git a/include/linux/quota.h b/include/linux/quota.h
index 78c48895b12a..ce9a9b2e5cd4 100644
--- a/include/linux/quota.h
+++ b/include/linux/quota.h
@@ -376,6 +376,17 @@ static inline unsigned int dquot_generic_flag(unsigned int flags, int type)
376 return flags >> _DQUOT_STATE_FLAGS; 376 return flags >> _DQUOT_STATE_FLAGS;
377} 377}
378 378
379#ifdef CONFIG_QUOTA_NETLINK_INTERFACE
380extern void quota_send_warning(short type, unsigned int id, dev_t dev,
381 const char warntype);
382#else
383static inline void quota_send_warning(short type, unsigned int id, dev_t dev,
384 const char warntype)
385{
386 return;
387}
388#endif /* CONFIG_QUOTA_NETLINK_INTERFACE */
389
379struct quota_info { 390struct quota_info {
380 unsigned int flags; /* Flags for diskquotas on this device */ 391 unsigned int flags; /* Flags for diskquotas on this device */
381 struct mutex dqio_mutex; /* lock device while I/O in progress */ 392 struct mutex dqio_mutex; /* lock device while I/O in progress */