aboutsummaryrefslogtreecommitdiffstats
path: root/fs/gfs2
diff options
context:
space:
mode:
authorSteve French <sfrench@us.ibm.com>2008-04-24 11:26:50 -0400
committerSteve French <sfrench@us.ibm.com>2008-04-24 11:26:50 -0400
commit36d99df2fb474222ab47fbe8ae7385661033223b (patch)
tree962e068491b752a944f61c454fad3f8619a1ea3f /fs/gfs2
parent076d8423a98659a92837b07aa494cb74bfefe77c (diff)
parent3dc5063786b273f1aee545844f6bd4e9651ebffe (diff)
Merge branch 'master' of /pub/scm/linux/kernel/git/torvalds/linux-2.6
Diffstat (limited to 'fs/gfs2')
-rw-r--r--fs/gfs2/Kconfig2
-rw-r--r--fs/gfs2/Makefile2
-rw-r--r--fs/gfs2/acl.c6
-rw-r--r--fs/gfs2/bmap.c670
-rw-r--r--fs/gfs2/dir.c84
-rw-r--r--fs/gfs2/eattr.c58
-rw-r--r--fs/gfs2/glock.c188
-rw-r--r--fs/gfs2/glock.h14
-rw-r--r--fs/gfs2/glops.c10
-rw-r--r--fs/gfs2/incore.h40
-rw-r--r--fs/gfs2/inode.c72
-rw-r--r--fs/gfs2/inode.h22
-rw-r--r--fs/gfs2/lm.c210
-rw-r--r--fs/gfs2/lm.h42
-rw-r--r--fs/gfs2/locking/dlm/Makefile2
-rw-r--r--fs/gfs2/locking/dlm/lock.c7
-rw-r--r--fs/gfs2/locking/dlm/lock_dlm.h13
-rw-r--r--fs/gfs2/locking/dlm/main.c10
-rw-r--r--fs/gfs2/locking/dlm/mount.c21
-rw-r--r--fs/gfs2/locking/dlm/plock.c406
-rw-r--r--fs/gfs2/locking/dlm/sysfs.c2
-rw-r--r--fs/gfs2/locking/dlm/thread.c10
-rw-r--r--fs/gfs2/locking/nolock/main.c2
-rw-r--r--fs/gfs2/log.c19
-rw-r--r--fs/gfs2/lops.c21
-rw-r--r--fs/gfs2/lops.h11
-rw-r--r--fs/gfs2/main.c10
-rw-r--r--fs/gfs2/ops_address.c44
-rw-r--r--fs/gfs2/ops_dentry.c4
-rw-r--r--fs/gfs2/ops_export.c2
-rw-r--r--fs/gfs2/ops_file.c37
-rw-r--r--fs/gfs2/ops_fstype.c80
-rw-r--r--fs/gfs2/ops_inode.c42
-rw-r--r--fs/gfs2/ops_inode.h1
-rw-r--r--fs/gfs2/ops_super.c1
-rw-r--r--fs/gfs2/quota.c74
-rw-r--r--fs/gfs2/quota.h17
-rw-r--r--fs/gfs2/recovery.c15
-rw-r--r--fs/gfs2/rgrp.c370
-rw-r--r--fs/gfs2/rgrp.h8
-rw-r--r--fs/gfs2/super.c6
-rw-r--r--fs/gfs2/super.h1
-rw-r--r--fs/gfs2/sys.c7
-rw-r--r--fs/gfs2/trans.c25
-rw-r--r--fs/gfs2/trans.h2
-rw-r--r--fs/gfs2/util.c24
-rw-r--r--fs/gfs2/util.h2
47 files changed, 1161 insertions, 1555 deletions
diff --git a/fs/gfs2/Kconfig b/fs/gfs2/Kconfig
index de8e64c03f73..7f7947e3dfbb 100644
--- a/fs/gfs2/Kconfig
+++ b/fs/gfs2/Kconfig
@@ -1,6 +1,6 @@
1config GFS2_FS 1config GFS2_FS
2 tristate "GFS2 file system support" 2 tristate "GFS2 file system support"
3 depends on EXPERIMENTAL 3 depends on EXPERIMENTAL && (64BIT || (LSF && LBD))
4 select FS_POSIX_ACL 4 select FS_POSIX_ACL
5 select CRC32 5 select CRC32
6 help 6 help
diff --git a/fs/gfs2/Makefile b/fs/gfs2/Makefile
index 8fff11058cee..e2350df02a07 100644
--- a/fs/gfs2/Makefile
+++ b/fs/gfs2/Makefile
@@ -1,6 +1,6 @@
1obj-$(CONFIG_GFS2_FS) += gfs2.o 1obj-$(CONFIG_GFS2_FS) += gfs2.o
2gfs2-y := acl.o bmap.o daemon.o dir.o eaops.o eattr.o glock.o \ 2gfs2-y := acl.o bmap.o daemon.o dir.o eaops.o eattr.o glock.o \
3 glops.o inode.o lm.o log.o lops.o locking.o main.o meta_io.o \ 3 glops.o inode.o log.o lops.o locking.o main.o meta_io.o \
4 mount.o ops_address.o ops_dentry.o ops_export.o ops_file.o \ 4 mount.o ops_address.o ops_dentry.o ops_export.o ops_file.o \
5 ops_fstype.o ops_inode.o ops_super.o quota.o \ 5 ops_fstype.o ops_inode.o ops_super.o quota.o \
6 recovery.o rgrp.o super.o sys.o trans.o util.o 6 recovery.o rgrp.o super.o sys.o trans.o util.o
diff --git a/fs/gfs2/acl.c b/fs/gfs2/acl.c
index 1047a8c7226a..3e9bd46f27e3 100644
--- a/fs/gfs2/acl.c
+++ b/fs/gfs2/acl.c
@@ -116,7 +116,7 @@ static int acl_get(struct gfs2_inode *ip, int access, struct posix_acl **acl,
116 goto out; 116 goto out;
117 117
118 er.er_data_len = GFS2_EA_DATA_LEN(el->el_ea); 118 er.er_data_len = GFS2_EA_DATA_LEN(el->el_ea);
119 er.er_data = kmalloc(er.er_data_len, GFP_KERNEL); 119 er.er_data = kmalloc(er.er_data_len, GFP_NOFS);
120 error = -ENOMEM; 120 error = -ENOMEM;
121 if (!er.er_data) 121 if (!er.er_data)
122 goto out; 122 goto out;
@@ -222,7 +222,7 @@ int gfs2_acl_create(struct gfs2_inode *dip, struct gfs2_inode *ip)
222 return error; 222 return error;
223 } 223 }
224 224
225 clone = posix_acl_clone(acl, GFP_KERNEL); 225 clone = posix_acl_clone(acl, GFP_NOFS);
226 error = -ENOMEM; 226 error = -ENOMEM;
227 if (!clone) 227 if (!clone)
228 goto out; 228 goto out;
@@ -272,7 +272,7 @@ int gfs2_acl_chmod(struct gfs2_inode *ip, struct iattr *attr)
272 if (!acl) 272 if (!acl)
273 return gfs2_setattr_simple(ip, attr); 273 return gfs2_setattr_simple(ip, attr);
274 274
275 clone = posix_acl_clone(acl, GFP_KERNEL); 275 clone = posix_acl_clone(acl, GFP_NOFS);
276 error = -ENOMEM; 276 error = -ENOMEM;
277 if (!clone) 277 if (!clone)
278 goto out; 278 goto out;
diff --git a/fs/gfs2/bmap.c b/fs/gfs2/bmap.c
index e9456ebd3bb6..c19184f2e70e 100644
--- a/fs/gfs2/bmap.c
+++ b/fs/gfs2/bmap.c
@@ -33,6 +33,7 @@
33 * keep it small. 33 * keep it small.
34 */ 34 */
35struct metapath { 35struct metapath {
36 struct buffer_head *mp_bh[GFS2_MAX_META_HEIGHT];
36 __u16 mp_list[GFS2_MAX_META_HEIGHT]; 37 __u16 mp_list[GFS2_MAX_META_HEIGHT];
37}; 38};
38 39
@@ -135,9 +136,10 @@ int gfs2_unstuff_dinode(struct gfs2_inode *ip, struct page *page)
135 /* Get a free block, fill it with the stuffed data, 136 /* Get a free block, fill it with the stuffed data,
136 and write it out to disk */ 137 and write it out to disk */
137 138
139 unsigned int n = 1;
140 block = gfs2_alloc_block(ip, &n);
138 if (isdir) { 141 if (isdir) {
139 block = gfs2_alloc_meta(ip); 142 gfs2_trans_add_unrevoke(GFS2_SB(&ip->i_inode), block, 1);
140
141 error = gfs2_dir_get_new_buffer(ip, block, &bh); 143 error = gfs2_dir_get_new_buffer(ip, block, &bh);
142 if (error) 144 if (error)
143 goto out_brelse; 145 goto out_brelse;
@@ -145,8 +147,6 @@ int gfs2_unstuff_dinode(struct gfs2_inode *ip, struct page *page)
145 dibh, sizeof(struct gfs2_dinode)); 147 dibh, sizeof(struct gfs2_dinode));
146 brelse(bh); 148 brelse(bh);
147 } else { 149 } else {
148 block = gfs2_alloc_data(ip);
149
150 error = gfs2_unstuffer_page(ip, dibh, block, page); 150 error = gfs2_unstuffer_page(ip, dibh, block, page);
151 if (error) 151 if (error)
152 goto out_brelse; 152 goto out_brelse;
@@ -161,12 +161,11 @@ int gfs2_unstuff_dinode(struct gfs2_inode *ip, struct page *page)
161 161
162 if (ip->i_di.di_size) { 162 if (ip->i_di.di_size) {
163 *(__be64 *)(di + 1) = cpu_to_be64(block); 163 *(__be64 *)(di + 1) = cpu_to_be64(block);
164 ip->i_di.di_blocks++; 164 gfs2_add_inode_blocks(&ip->i_inode, 1);
165 gfs2_set_inode_blocks(&ip->i_inode); 165 di->di_blocks = cpu_to_be64(gfs2_get_inode_blocks(&ip->i_inode));
166 di->di_blocks = cpu_to_be64(ip->i_di.di_blocks);
167 } 166 }
168 167
169 ip->i_di.di_height = 1; 168 ip->i_height = 1;
170 di->di_height = cpu_to_be16(1); 169 di->di_height = cpu_to_be16(1);
171 170
172out_brelse: 171out_brelse:
@@ -176,114 +175,13 @@ out:
176 return error; 175 return error;
177} 176}
178 177
179/**
180 * calc_tree_height - Calculate the height of a metadata tree
181 * @ip: The GFS2 inode
182 * @size: The proposed size of the file
183 *
184 * Work out how tall a metadata tree needs to be in order to accommodate a
185 * file of a particular size. If size is less than the current size of
186 * the inode, then the current size of the inode is used instead of the
187 * supplied one.
188 *
189 * Returns: the height the tree should be
190 */
191
192static unsigned int calc_tree_height(struct gfs2_inode *ip, u64 size)
193{
194 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
195 u64 *arr;
196 unsigned int max, height;
197
198 if (ip->i_di.di_size > size)
199 size = ip->i_di.di_size;
200
201 if (gfs2_is_dir(ip)) {
202 arr = sdp->sd_jheightsize;
203 max = sdp->sd_max_jheight;
204 } else {
205 arr = sdp->sd_heightsize;
206 max = sdp->sd_max_height;
207 }
208
209 for (height = 0; height < max; height++)
210 if (arr[height] >= size)
211 break;
212
213 return height;
214}
215
216/**
217 * build_height - Build a metadata tree of the requested height
218 * @ip: The GFS2 inode
219 * @height: The height to build to
220 *
221 *
222 * Returns: errno
223 */
224
225static int build_height(struct inode *inode, unsigned height)
226{
227 struct gfs2_inode *ip = GFS2_I(inode);
228 unsigned new_height = height - ip->i_di.di_height;
229 struct buffer_head *dibh;
230 struct buffer_head *blocks[GFS2_MAX_META_HEIGHT];
231 struct gfs2_dinode *di;
232 int error;
233 __be64 *bp;
234 u64 bn;
235 unsigned n;
236
237 if (height <= ip->i_di.di_height)
238 return 0;
239
240 error = gfs2_meta_inode_buffer(ip, &dibh);
241 if (error)
242 return error;
243
244 for(n = 0; n < new_height; n++) {
245 bn = gfs2_alloc_meta(ip);
246 blocks[n] = gfs2_meta_new(ip->i_gl, bn);
247 gfs2_trans_add_bh(ip->i_gl, blocks[n], 1);
248 }
249
250 n = 0;
251 bn = blocks[0]->b_blocknr;
252 if (new_height > 1) {
253 for(; n < new_height-1; n++) {
254 gfs2_metatype_set(blocks[n], GFS2_METATYPE_IN,
255 GFS2_FORMAT_IN);
256 gfs2_buffer_clear_tail(blocks[n],
257 sizeof(struct gfs2_meta_header));
258 bp = (__be64 *)(blocks[n]->b_data +
259 sizeof(struct gfs2_meta_header));
260 *bp = cpu_to_be64(blocks[n+1]->b_blocknr);
261 brelse(blocks[n]);
262 blocks[n] = NULL;
263 }
264 }
265 gfs2_metatype_set(blocks[n], GFS2_METATYPE_IN, GFS2_FORMAT_IN);
266 gfs2_buffer_copy_tail(blocks[n], sizeof(struct gfs2_meta_header),
267 dibh, sizeof(struct gfs2_dinode));
268 brelse(blocks[n]);
269 gfs2_trans_add_bh(ip->i_gl, dibh, 1);
270 di = (struct gfs2_dinode *)dibh->b_data;
271 gfs2_buffer_clear_tail(dibh, sizeof(struct gfs2_dinode));
272 *(__be64 *)(di + 1) = cpu_to_be64(bn);
273 ip->i_di.di_height += new_height;
274 ip->i_di.di_blocks += new_height;
275 gfs2_set_inode_blocks(&ip->i_inode);
276 di->di_height = cpu_to_be16(ip->i_di.di_height);
277 di->di_blocks = cpu_to_be64(ip->i_di.di_blocks);
278 brelse(dibh);
279 return error;
280}
281 178
282/** 179/**
283 * find_metapath - Find path through the metadata tree 180 * find_metapath - Find path through the metadata tree
284 * @ip: The inode pointer 181 * @sdp: The superblock
285 * @mp: The metapath to return the result in 182 * @mp: The metapath to return the result in
286 * @block: The disk block to look up 183 * @block: The disk block to look up
184 * @height: The pre-calculated height of the metadata tree
287 * 185 *
288 * This routine returns a struct metapath structure that defines a path 186 * This routine returns a struct metapath structure that defines a path
289 * through the metadata of inode "ip" to get to block "block". 187 * through the metadata of inode "ip" to get to block "block".
@@ -338,21 +236,29 @@ static int build_height(struct inode *inode, unsigned height)
338 * 236 *
339 */ 237 */
340 238
341static void find_metapath(struct gfs2_inode *ip, u64 block, 239static void find_metapath(const struct gfs2_sbd *sdp, u64 block,
342 struct metapath *mp) 240 struct metapath *mp, unsigned int height)
343{ 241{
344 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
345 u64 b = block;
346 unsigned int i; 242 unsigned int i;
347 243
348 for (i = ip->i_di.di_height; i--;) 244 for (i = height; i--;)
349 mp->mp_list[i] = do_div(b, sdp->sd_inptrs); 245 mp->mp_list[i] = do_div(block, sdp->sd_inptrs);
246
247}
350 248
249static inline unsigned int zero_metapath_length(const struct metapath *mp,
250 unsigned height)
251{
252 unsigned int i;
253 for (i = 0; i < height - 1; i++) {
254 if (mp->mp_list[i] != 0)
255 return i;
256 }
257 return height;
351} 258}
352 259
353/** 260/**
354 * metapointer - Return pointer to start of metadata in a buffer 261 * metapointer - Return pointer to start of metadata in a buffer
355 * @bh: The buffer
356 * @height: The metadata height (0 = dinode) 262 * @height: The metadata height (0 = dinode)
357 * @mp: The metapath 263 * @mp: The metapath
358 * 264 *
@@ -361,93 +267,302 @@ static void find_metapath(struct gfs2_inode *ip, u64 block,
361 * metadata tree. 267 * metadata tree.
362 */ 268 */
363 269
364static inline __be64 *metapointer(struct buffer_head *bh, int *boundary, 270static inline __be64 *metapointer(unsigned int height, const struct metapath *mp)
365 unsigned int height, const struct metapath *mp)
366{ 271{
272 struct buffer_head *bh = mp->mp_bh[height];
367 unsigned int head_size = (height > 0) ? 273 unsigned int head_size = (height > 0) ?
368 sizeof(struct gfs2_meta_header) : sizeof(struct gfs2_dinode); 274 sizeof(struct gfs2_meta_header) : sizeof(struct gfs2_dinode);
369 __be64 *ptr; 275 return ((__be64 *)(bh->b_data + head_size)) + mp->mp_list[height];
370 *boundary = 0;
371 ptr = ((__be64 *)(bh->b_data + head_size)) + mp->mp_list[height];
372 if (ptr + 1 == (__be64 *)(bh->b_data + bh->b_size))
373 *boundary = 1;
374 return ptr;
375} 276}
376 277
377/** 278/**
378 * lookup_block - Get the next metadata block in metadata tree 279 * lookup_metapath - Walk the metadata tree to a specific point
379 * @ip: The GFS2 inode 280 * @ip: The inode
380 * @bh: Buffer containing the pointers to metadata blocks
381 * @height: The height of the tree (0 = dinode)
382 * @mp: The metapath 281 * @mp: The metapath
383 * @create: Non-zero if we may create a new meatdata block
384 * @new: Used to indicate if we did create a new metadata block
385 * @block: the returned disk block number
386 * 282 *
387 * Given a metatree, complete to a particular height, checks to see if the next 283 * Assumes that the inode's buffer has already been looked up and
388 * height of the tree exists. If not the next height of the tree is created. 284 * hooked onto mp->mp_bh[0] and that the metapath has been initialised
389 * The block number of the next height of the metadata tree is returned. 285 * by find_metapath().
286 *
287 * If this function encounters part of the tree which has not been
288 * allocated, it returns the current height of the tree at the point
289 * at which it found the unallocated block. Blocks which are found are
290 * added to the mp->mp_bh[] list.
390 * 291 *
292 * Returns: error or height of metadata tree
391 */ 293 */
392 294
393static int lookup_block(struct gfs2_inode *ip, struct buffer_head *bh, 295static int lookup_metapath(struct gfs2_inode *ip, struct metapath *mp)
394 unsigned int height, struct metapath *mp, int create,
395 int *new, u64 *block)
396{ 296{
397 int boundary; 297 unsigned int end_of_metadata = ip->i_height - 1;
398 __be64 *ptr = metapointer(bh, &boundary, height, mp); 298 unsigned int x;
299 __be64 *ptr;
300 u64 dblock;
301 int ret;
399 302
400 if (*ptr) { 303 for (x = 0; x < end_of_metadata; x++) {
401 *block = be64_to_cpu(*ptr); 304 ptr = metapointer(x, mp);
402 return boundary; 305 dblock = be64_to_cpu(*ptr);
403 } 306 if (!dblock)
307 return x + 1;
404 308
405 *block = 0; 309 ret = gfs2_meta_indirect_buffer(ip, x+1, dblock, 0, &mp->mp_bh[x+1]);
310 if (ret)
311 return ret;
312 }
406 313
407 if (!create) 314 return ip->i_height;
408 return 0; 315}
409 316
410 if (height == ip->i_di.di_height - 1 && !gfs2_is_dir(ip)) 317static inline void release_metapath(struct metapath *mp)
411 *block = gfs2_alloc_data(ip); 318{
412 else 319 int i;
413 *block = gfs2_alloc_meta(ip);
414 320
415 gfs2_trans_add_bh(ip->i_gl, bh, 1); 321 for (i = 0; i < GFS2_MAX_META_HEIGHT; i++) {
322 if (mp->mp_bh[i] == NULL)
323 break;
324 brelse(mp->mp_bh[i]);
325 }
326}
416 327
417 *ptr = cpu_to_be64(*block); 328/**
418 ip->i_di.di_blocks++; 329 * gfs2_extent_length - Returns length of an extent of blocks
419 gfs2_set_inode_blocks(&ip->i_inode); 330 * @start: Start of the buffer
331 * @len: Length of the buffer in bytes
332 * @ptr: Current position in the buffer
333 * @limit: Max extent length to return (0 = unlimited)
334 * @eob: Set to 1 if we hit "end of block"
335 *
336 * If the first block is zero (unallocated) it will return the number of
337 * unallocated blocks in the extent, otherwise it will return the number
338 * of contiguous blocks in the extent.
339 *
340 * Returns: The length of the extent (minimum of one block)
341 */
420 342
421 *new = 1; 343static inline unsigned int gfs2_extent_length(void *start, unsigned int len, __be64 *ptr, unsigned limit, int *eob)
422 return 0; 344{
345 const __be64 *end = (start + len);
346 const __be64 *first = ptr;
347 u64 d = be64_to_cpu(*ptr);
348
349 *eob = 0;
350 do {
351 ptr++;
352 if (ptr >= end)
353 break;
354 if (limit && --limit == 0)
355 break;
356 if (d)
357 d++;
358 } while(be64_to_cpu(*ptr) == d);
359 if (ptr >= end)
360 *eob = 1;
361 return (ptr - first);
423} 362}
424 363
425static inline void bmap_lock(struct inode *inode, int create) 364static inline void bmap_lock(struct gfs2_inode *ip, int create)
426{ 365{
427 struct gfs2_inode *ip = GFS2_I(inode);
428 if (create) 366 if (create)
429 down_write(&ip->i_rw_mutex); 367 down_write(&ip->i_rw_mutex);
430 else 368 else
431 down_read(&ip->i_rw_mutex); 369 down_read(&ip->i_rw_mutex);
432} 370}
433 371
434static inline void bmap_unlock(struct inode *inode, int create) 372static inline void bmap_unlock(struct gfs2_inode *ip, int create)
435{ 373{
436 struct gfs2_inode *ip = GFS2_I(inode);
437 if (create) 374 if (create)
438 up_write(&ip->i_rw_mutex); 375 up_write(&ip->i_rw_mutex);
439 else 376 else
440 up_read(&ip->i_rw_mutex); 377 up_read(&ip->i_rw_mutex);
441} 378}
442 379
380static inline __be64 *gfs2_indirect_init(struct metapath *mp,
381 struct gfs2_glock *gl, unsigned int i,
382 unsigned offset, u64 bn)
383{
384 __be64 *ptr = (__be64 *)(mp->mp_bh[i - 1]->b_data +
385 ((i > 1) ? sizeof(struct gfs2_meta_header) :
386 sizeof(struct gfs2_dinode)));
387 BUG_ON(i < 1);
388 BUG_ON(mp->mp_bh[i] != NULL);
389 mp->mp_bh[i] = gfs2_meta_new(gl, bn);
390 gfs2_trans_add_bh(gl, mp->mp_bh[i], 1);
391 gfs2_metatype_set(mp->mp_bh[i], GFS2_METATYPE_IN, GFS2_FORMAT_IN);
392 gfs2_buffer_clear_tail(mp->mp_bh[i], sizeof(struct gfs2_meta_header));
393 ptr += offset;
394 *ptr = cpu_to_be64(bn);
395 return ptr;
396}
397
398enum alloc_state {
399 ALLOC_DATA = 0,
400 ALLOC_GROW_DEPTH = 1,
401 ALLOC_GROW_HEIGHT = 2,
402 /* ALLOC_UNSTUFF = 3, TBD and rather complicated */
403};
404
405/**
406 * gfs2_bmap_alloc - Build a metadata tree of the requested height
407 * @inode: The GFS2 inode
408 * @lblock: The logical starting block of the extent
409 * @bh_map: This is used to return the mapping details
410 * @mp: The metapath
411 * @sheight: The starting height (i.e. whats already mapped)
412 * @height: The height to build to
413 * @maxlen: The max number of data blocks to alloc
414 *
415 * In this routine we may have to alloc:
416 * i) Indirect blocks to grow the metadata tree height
417 * ii) Indirect blocks to fill in lower part of the metadata tree
418 * iii) Data blocks
419 *
420 * The function is in two parts. The first part works out the total
421 * number of blocks which we need. The second part does the actual
422 * allocation asking for an extent at a time (if enough contiguous free
423 * blocks are available, there will only be one request per bmap call)
424 * and uses the state machine to initialise the blocks in order.
425 *
426 * Returns: errno on error
427 */
428
429static int gfs2_bmap_alloc(struct inode *inode, const sector_t lblock,
430 struct buffer_head *bh_map, struct metapath *mp,
431 const unsigned int sheight,
432 const unsigned int height,
433 const unsigned int maxlen)
434{
435 struct gfs2_inode *ip = GFS2_I(inode);
436 struct gfs2_sbd *sdp = GFS2_SB(inode);
437 struct buffer_head *dibh = mp->mp_bh[0];
438 u64 bn, dblock = 0;
439 unsigned n, i, blks, alloced = 0, iblks = 0, zmpl = 0;
440 unsigned dblks = 0;
441 unsigned ptrs_per_blk;
442 const unsigned end_of_metadata = height - 1;
443 int eob = 0;
444 enum alloc_state state;
445 __be64 *ptr;
446 __be64 zero_bn = 0;
447
448 BUG_ON(sheight < 1);
449 BUG_ON(dibh == NULL);
450
451 gfs2_trans_add_bh(ip->i_gl, dibh, 1);
452
453 if (height == sheight) {
454 struct buffer_head *bh;
455 /* Bottom indirect block exists, find unalloced extent size */
456 ptr = metapointer(end_of_metadata, mp);
457 bh = mp->mp_bh[end_of_metadata];
458 dblks = gfs2_extent_length(bh->b_data, bh->b_size, ptr, maxlen,
459 &eob);
460 BUG_ON(dblks < 1);
461 state = ALLOC_DATA;
462 } else {
463 /* Need to allocate indirect blocks */
464 ptrs_per_blk = height > 1 ? sdp->sd_inptrs : sdp->sd_diptrs;
465 dblks = min(maxlen, ptrs_per_blk - mp->mp_list[end_of_metadata]);
466 if (height == ip->i_height) {
467 /* Writing into existing tree, extend tree down */
468 iblks = height - sheight;
469 state = ALLOC_GROW_DEPTH;
470 } else {
471 /* Building up tree height */
472 state = ALLOC_GROW_HEIGHT;
473 iblks = height - ip->i_height;
474 zmpl = zero_metapath_length(mp, height);
475 iblks -= zmpl;
476 iblks += height;
477 }
478 }
479
480 /* start of the second part of the function (state machine) */
481
482 blks = dblks + iblks;
483 i = sheight;
484 do {
485 n = blks - alloced;
486 bn = gfs2_alloc_block(ip, &n);
487 alloced += n;
488 if (state != ALLOC_DATA || gfs2_is_jdata(ip))
489 gfs2_trans_add_unrevoke(sdp, bn, n);
490 switch (state) {
491 /* Growing height of tree */
492 case ALLOC_GROW_HEIGHT:
493 if (i == 1) {
494 ptr = (__be64 *)(dibh->b_data +
495 sizeof(struct gfs2_dinode));
496 zero_bn = *ptr;
497 }
498 for (; i - 1 < height - ip->i_height && n > 0; i++, n--)
499 gfs2_indirect_init(mp, ip->i_gl, i, 0, bn++);
500 if (i - 1 == height - ip->i_height) {
501 i--;
502 gfs2_buffer_copy_tail(mp->mp_bh[i],
503 sizeof(struct gfs2_meta_header),
504 dibh, sizeof(struct gfs2_dinode));
505 gfs2_buffer_clear_tail(dibh,
506 sizeof(struct gfs2_dinode) +
507 sizeof(__be64));
508 ptr = (__be64 *)(mp->mp_bh[i]->b_data +
509 sizeof(struct gfs2_meta_header));
510 *ptr = zero_bn;
511 state = ALLOC_GROW_DEPTH;
512 for(i = zmpl; i < height; i++) {
513 if (mp->mp_bh[i] == NULL)
514 break;
515 brelse(mp->mp_bh[i]);
516 mp->mp_bh[i] = NULL;
517 }
518 i = zmpl;
519 }
520 if (n == 0)
521 break;
522 /* Branching from existing tree */
523 case ALLOC_GROW_DEPTH:
524 if (i > 1 && i < height)
525 gfs2_trans_add_bh(ip->i_gl, mp->mp_bh[i-1], 1);
526 for (; i < height && n > 0; i++, n--)
527 gfs2_indirect_init(mp, ip->i_gl, i,
528 mp->mp_list[i-1], bn++);
529 if (i == height)
530 state = ALLOC_DATA;
531 if (n == 0)
532 break;
533 /* Tree complete, adding data blocks */
534 case ALLOC_DATA:
535 BUG_ON(n > dblks);
536 BUG_ON(mp->mp_bh[end_of_metadata] == NULL);
537 gfs2_trans_add_bh(ip->i_gl, mp->mp_bh[end_of_metadata], 1);
538 dblks = n;
539 ptr = metapointer(end_of_metadata, mp);
540 dblock = bn;
541 while (n-- > 0)
542 *ptr++ = cpu_to_be64(bn++);
543 break;
544 }
545 } while (state != ALLOC_DATA);
546
547 ip->i_height = height;
548 gfs2_add_inode_blocks(&ip->i_inode, alloced);
549 gfs2_dinode_out(ip, mp->mp_bh[0]->b_data);
550 map_bh(bh_map, inode->i_sb, dblock);
551 bh_map->b_size = dblks << inode->i_blkbits;
552 set_buffer_new(bh_map);
553 return 0;
554}
555
443/** 556/**
444 * gfs2_block_map - Map a block from an inode to a disk block 557 * gfs2_block_map - Map a block from an inode to a disk block
445 * @inode: The inode 558 * @inode: The inode
446 * @lblock: The logical block number 559 * @lblock: The logical block number
447 * @bh_map: The bh to be mapped 560 * @bh_map: The bh to be mapped
561 * @create: True if its ok to alloc blocks to satify the request
448 * 562 *
449 * Find the block number on the current device which corresponds to an 563 * Sets buffer_mapped() if successful, sets buffer_boundary() if a
450 * inode's block. If the block had to be created, "new" will be set. 564 * read of metadata will be required before the next block can be
565 * mapped. Sets buffer_new() if new blocks were allocated.
451 * 566 *
452 * Returns: errno 567 * Returns: errno
453 */ 568 */
@@ -457,97 +572,78 @@ int gfs2_block_map(struct inode *inode, sector_t lblock,
457{ 572{
458 struct gfs2_inode *ip = GFS2_I(inode); 573 struct gfs2_inode *ip = GFS2_I(inode);
459 struct gfs2_sbd *sdp = GFS2_SB(inode); 574 struct gfs2_sbd *sdp = GFS2_SB(inode);
460 struct buffer_head *bh; 575 unsigned int bsize = sdp->sd_sb.sb_bsize;
461 unsigned int bsize; 576 const unsigned int maxlen = bh_map->b_size >> inode->i_blkbits;
462 unsigned int height; 577 const u64 *arr = sdp->sd_heightsize;
463 unsigned int end_of_metadata; 578 __be64 *ptr;
464 unsigned int x;
465 int error = 0;
466 int new = 0;
467 u64 dblock = 0;
468 int boundary;
469 unsigned int maxlen = bh_map->b_size >> inode->i_blkbits;
470 struct metapath mp;
471 u64 size; 579 u64 size;
472 struct buffer_head *dibh = NULL; 580 struct metapath mp;
581 int ret;
582 int eob;
583 unsigned int len;
584 struct buffer_head *bh;
585 u8 height;
473 586
474 BUG_ON(maxlen == 0); 587 BUG_ON(maxlen == 0);
475 588
476 if (gfs2_assert_warn(sdp, !gfs2_is_stuffed(ip))) 589 memset(mp.mp_bh, 0, sizeof(mp.mp_bh));
477 return 0; 590 bmap_lock(ip, create);
478
479 bmap_lock(inode, create);
480 clear_buffer_mapped(bh_map); 591 clear_buffer_mapped(bh_map);
481 clear_buffer_new(bh_map); 592 clear_buffer_new(bh_map);
482 clear_buffer_boundary(bh_map); 593 clear_buffer_boundary(bh_map);
483 bsize = gfs2_is_dir(ip) ? sdp->sd_jbsize : sdp->sd_sb.sb_bsize; 594 if (gfs2_is_dir(ip)) {
484 size = (lblock + 1) * bsize; 595 bsize = sdp->sd_jbsize;
485 596 arr = sdp->sd_jheightsize;
486 if (size > ip->i_di.di_size) {
487 height = calc_tree_height(ip, size);
488 if (ip->i_di.di_height < height) {
489 if (!create)
490 goto out_ok;
491
492 error = build_height(inode, height);
493 if (error)
494 goto out_fail;
495 }
496 } 597 }
497 598
498 find_metapath(ip, lblock, &mp); 599 ret = gfs2_meta_inode_buffer(ip, &mp.mp_bh[0]);
499 end_of_metadata = ip->i_di.di_height - 1; 600 if (ret)
500 error = gfs2_meta_inode_buffer(ip, &bh); 601 goto out;
501 if (error)
502 goto out_fail;
503 dibh = bh;
504 get_bh(dibh);
505 602
506 for (x = 0; x < end_of_metadata; x++) { 603 height = ip->i_height;
507 lookup_block(ip, bh, x, &mp, create, &new, &dblock); 604 size = (lblock + 1) * bsize;
508 brelse(bh); 605 while (size > arr[height])
509 if (!dblock) 606 height++;
510 goto out_ok; 607 find_metapath(sdp, lblock, &mp, height);
608 ret = 1;
609 if (height > ip->i_height || gfs2_is_stuffed(ip))
610 goto do_alloc;
611 ret = lookup_metapath(ip, &mp);
612 if (ret < 0)
613 goto out;
614 if (ret != ip->i_height)
615 goto do_alloc;
616 ptr = metapointer(ip->i_height - 1, &mp);
617 if (*ptr == 0)
618 goto do_alloc;
619 map_bh(bh_map, inode->i_sb, be64_to_cpu(*ptr));
620 bh = mp.mp_bh[ip->i_height - 1];
621 len = gfs2_extent_length(bh->b_data, bh->b_size, ptr, maxlen, &eob);
622 bh_map->b_size = (len << inode->i_blkbits);
623 if (eob)
624 set_buffer_boundary(bh_map);
625 ret = 0;
626out:
627 release_metapath(&mp);
628 bmap_unlock(ip, create);
629 return ret;
511 630
512 error = gfs2_meta_indirect_buffer(ip, x+1, dblock, new, &bh); 631do_alloc:
513 if (error) 632 /* All allocations are done here, firstly check create flag */
514 goto out_fail; 633 if (!create) {
634 BUG_ON(gfs2_is_stuffed(ip));
635 ret = 0;
636 goto out;
515 } 637 }
516 638
517 boundary = lookup_block(ip, bh, end_of_metadata, &mp, create, &new, &dblock); 639 /* At this point ret is the tree depth of already allocated blocks */
518 if (dblock) { 640 ret = gfs2_bmap_alloc(inode, lblock, bh_map, &mp, ret, height, maxlen);
519 map_bh(bh_map, inode->i_sb, dblock); 641 goto out;
520 if (boundary)
521 set_buffer_boundary(bh_map);
522 if (new) {
523 gfs2_trans_add_bh(ip->i_gl, dibh, 1);
524 gfs2_dinode_out(ip, dibh->b_data);
525 set_buffer_new(bh_map);
526 goto out_brelse;
527 }
528 while(--maxlen && !buffer_boundary(bh_map)) {
529 u64 eblock;
530
531 mp.mp_list[end_of_metadata]++;
532 boundary = lookup_block(ip, bh, end_of_metadata, &mp, 0, &new, &eblock);
533 if (eblock != ++dblock)
534 break;
535 bh_map->b_size += (1 << inode->i_blkbits);
536 if (boundary)
537 set_buffer_boundary(bh_map);
538 }
539 }
540out_brelse:
541 brelse(bh);
542out_ok:
543 error = 0;
544out_fail:
545 if (dibh)
546 brelse(dibh);
547 bmap_unlock(inode, create);
548 return error;
549} 642}
550 643
644/*
645 * Deprecated: do not use in new code
646 */
551int gfs2_extent_map(struct inode *inode, u64 lblock, int *new, u64 *dblock, unsigned *extlen) 647int gfs2_extent_map(struct inode *inode, u64 lblock, int *new, u64 *dblock, unsigned *extlen)
552{ 648{
553 struct buffer_head bh = { .b_state = 0, .b_blocknr = 0 }; 649 struct buffer_head bh = { .b_state = 0, .b_blocknr = 0 };
@@ -558,7 +654,7 @@ int gfs2_extent_map(struct inode *inode, u64 lblock, int *new, u64 *dblock, unsi
558 BUG_ON(!dblock); 654 BUG_ON(!dblock);
559 BUG_ON(!new); 655 BUG_ON(!new);
560 656
561 bh.b_size = 1 << (inode->i_blkbits + 5); 657 bh.b_size = 1 << (inode->i_blkbits + (create ? 0 : 5));
562 ret = gfs2_block_map(inode, lblock, &bh, create); 658 ret = gfs2_block_map(inode, lblock, &bh, create);
563 *extlen = bh.b_size >> inode->i_blkbits; 659 *extlen = bh.b_size >> inode->i_blkbits;
564 *dblock = bh.b_blocknr; 660 *dblock = bh.b_blocknr;
@@ -621,7 +717,7 @@ static int recursive_scan(struct gfs2_inode *ip, struct buffer_head *dibh,
621 if (error) 717 if (error)
622 goto out; 718 goto out;
623 719
624 if (height < ip->i_di.di_height - 1) 720 if (height < ip->i_height - 1)
625 for (; top < bottom; top++, first = 0) { 721 for (; top < bottom; top++, first = 0) {
626 if (!*top) 722 if (!*top)
627 continue; 723 continue;
@@ -679,7 +775,7 @@ static int do_strip(struct gfs2_inode *ip, struct buffer_head *dibh,
679 sm->sm_first = 0; 775 sm->sm_first = 0;
680 } 776 }
681 777
682 metadata = (height != ip->i_di.di_height - 1); 778 metadata = (height != ip->i_height - 1);
683 if (metadata) 779 if (metadata)
684 revokes = (height) ? sdp->sd_inptrs : sdp->sd_diptrs; 780 revokes = (height) ? sdp->sd_inptrs : sdp->sd_diptrs;
685 781
@@ -713,7 +809,7 @@ static int do_strip(struct gfs2_inode *ip, struct buffer_head *dibh,
713 else 809 else
714 goto out; /* Nothing to do */ 810 goto out; /* Nothing to do */
715 811
716 gfs2_rlist_alloc(&rlist, LM_ST_EXCLUSIVE, 0); 812 gfs2_rlist_alloc(&rlist, LM_ST_EXCLUSIVE);
717 813
718 for (x = 0; x < rlist.rl_rgrps; x++) { 814 for (x = 0; x < rlist.rl_rgrps; x++) {
719 struct gfs2_rgrpd *rgd; 815 struct gfs2_rgrpd *rgd;
@@ -760,10 +856,7 @@ static int do_strip(struct gfs2_inode *ip, struct buffer_head *dibh,
760 } 856 }
761 857
762 *p = 0; 858 *p = 0;
763 if (!ip->i_di.di_blocks) 859 gfs2_add_inode_blocks(&ip->i_inode, -1);
764 gfs2_consist_inode(ip);
765 ip->i_di.di_blocks--;
766 gfs2_set_inode_blocks(&ip->i_inode);
767 } 860 }
768 if (bstart) { 861 if (bstart) {
769 if (metadata) 862 if (metadata)
@@ -804,19 +897,16 @@ static int do_grow(struct gfs2_inode *ip, u64 size)
804 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode); 897 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
805 struct gfs2_alloc *al; 898 struct gfs2_alloc *al;
806 struct buffer_head *dibh; 899 struct buffer_head *dibh;
807 unsigned int h;
808 int error; 900 int error;
809 901
810 al = gfs2_alloc_get(ip); 902 al = gfs2_alloc_get(ip);
903 if (!al)
904 return -ENOMEM;
811 905
812 error = gfs2_quota_lock(ip, NO_QUOTA_CHANGE, NO_QUOTA_CHANGE); 906 error = gfs2_quota_lock_check(ip);
813 if (error) 907 if (error)
814 goto out; 908 goto out;
815 909
816 error = gfs2_quota_check(ip, ip->i_inode.i_uid, ip->i_inode.i_gid);
817 if (error)
818 goto out_gunlock_q;
819
820 al->al_requested = sdp->sd_max_height + RES_DATA; 910 al->al_requested = sdp->sd_max_height + RES_DATA;
821 911
822 error = gfs2_inplace_reserve(ip); 912 error = gfs2_inplace_reserve(ip);
@@ -829,34 +919,25 @@ static int do_grow(struct gfs2_inode *ip, u64 size)
829 if (error) 919 if (error)
830 goto out_ipres; 920 goto out_ipres;
831 921
922 error = gfs2_meta_inode_buffer(ip, &dibh);
923 if (error)
924 goto out_end_trans;
925
832 if (size > sdp->sd_sb.sb_bsize - sizeof(struct gfs2_dinode)) { 926 if (size > sdp->sd_sb.sb_bsize - sizeof(struct gfs2_dinode)) {
833 if (gfs2_is_stuffed(ip)) { 927 if (gfs2_is_stuffed(ip)) {
834 error = gfs2_unstuff_dinode(ip, NULL); 928 error = gfs2_unstuff_dinode(ip, NULL);
835 if (error) 929 if (error)
836 goto out_end_trans; 930 goto out_brelse;
837 }
838
839 h = calc_tree_height(ip, size);
840 if (ip->i_di.di_height < h) {
841 down_write(&ip->i_rw_mutex);
842 error = build_height(&ip->i_inode, h);
843 up_write(&ip->i_rw_mutex);
844 if (error)
845 goto out_end_trans;
846 } 931 }
847 } 932 }
848 933
849 ip->i_di.di_size = size; 934 ip->i_di.di_size = size;
850 ip->i_inode.i_mtime = ip->i_inode.i_ctime = CURRENT_TIME; 935 ip->i_inode.i_mtime = ip->i_inode.i_ctime = CURRENT_TIME;
851
852 error = gfs2_meta_inode_buffer(ip, &dibh);
853 if (error)
854 goto out_end_trans;
855
856 gfs2_trans_add_bh(ip->i_gl, dibh, 1); 936 gfs2_trans_add_bh(ip->i_gl, dibh, 1);
857 gfs2_dinode_out(ip, dibh->b_data); 937 gfs2_dinode_out(ip, dibh->b_data);
858 brelse(dibh);
859 938
939out_brelse:
940 brelse(dibh);
860out_end_trans: 941out_end_trans:
861 gfs2_trans_end(sdp); 942 gfs2_trans_end(sdp);
862out_ipres: 943out_ipres:
@@ -986,7 +1067,8 @@ out:
986 1067
987static int trunc_dealloc(struct gfs2_inode *ip, u64 size) 1068static int trunc_dealloc(struct gfs2_inode *ip, u64 size)
988{ 1069{
989 unsigned int height = ip->i_di.di_height; 1070 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
1071 unsigned int height = ip->i_height;
990 u64 lblock; 1072 u64 lblock;
991 struct metapath mp; 1073 struct metapath mp;
992 int error; 1074 int error;
@@ -994,10 +1076,11 @@ static int trunc_dealloc(struct gfs2_inode *ip, u64 size)
994 if (!size) 1076 if (!size)
995 lblock = 0; 1077 lblock = 0;
996 else 1078 else
997 lblock = (size - 1) >> GFS2_SB(&ip->i_inode)->sd_sb.sb_bsize_shift; 1079 lblock = (size - 1) >> sdp->sd_sb.sb_bsize_shift;
998 1080
999 find_metapath(ip, lblock, &mp); 1081 find_metapath(sdp, lblock, &mp, ip->i_height);
1000 gfs2_alloc_get(ip); 1082 if (!gfs2_alloc_get(ip))
1083 return -ENOMEM;
1001 1084
1002 error = gfs2_quota_hold(ip, NO_QUOTA_CHANGE, NO_QUOTA_CHANGE); 1085 error = gfs2_quota_hold(ip, NO_QUOTA_CHANGE, NO_QUOTA_CHANGE);
1003 if (error) 1086 if (error)
@@ -1037,10 +1120,8 @@ static int trunc_end(struct gfs2_inode *ip)
1037 goto out; 1120 goto out;
1038 1121
1039 if (!ip->i_di.di_size) { 1122 if (!ip->i_di.di_size) {
1040 ip->i_di.di_height = 0; 1123 ip->i_height = 0;
1041 ip->i_di.di_goal_meta = 1124 ip->i_goal = ip->i_no_addr;
1042 ip->i_di.di_goal_data =
1043 ip->i_no_addr;
1044 gfs2_buffer_clear_tail(dibh, sizeof(struct gfs2_dinode)); 1125 gfs2_buffer_clear_tail(dibh, sizeof(struct gfs2_dinode));
1045 } 1126 }
1046 ip->i_inode.i_mtime = ip->i_inode.i_ctime = CURRENT_TIME; 1127 ip->i_inode.i_mtime = ip->i_inode.i_ctime = CURRENT_TIME;
@@ -1197,10 +1278,9 @@ int gfs2_write_alloc_required(struct gfs2_inode *ip, u64 offset,
1197 unsigned int len, int *alloc_required) 1278 unsigned int len, int *alloc_required)
1198{ 1279{
1199 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode); 1280 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
1200 u64 lblock, lblock_stop, dblock; 1281 struct buffer_head bh;
1201 u32 extlen; 1282 unsigned int shift;
1202 int new = 0; 1283 u64 lblock, lblock_stop, size;
1203 int error = 0;
1204 1284
1205 *alloc_required = 0; 1285 *alloc_required = 0;
1206 1286
@@ -1214,6 +1294,8 @@ int gfs2_write_alloc_required(struct gfs2_inode *ip, u64 offset,
1214 return 0; 1294 return 0;
1215 } 1295 }
1216 1296
1297 *alloc_required = 1;
1298 shift = sdp->sd_sb.sb_bsize_shift;
1217 if (gfs2_is_dir(ip)) { 1299 if (gfs2_is_dir(ip)) {
1218 unsigned int bsize = sdp->sd_jbsize; 1300 unsigned int bsize = sdp->sd_jbsize;
1219 lblock = offset; 1301 lblock = offset;
@@ -1221,27 +1303,25 @@ int gfs2_write_alloc_required(struct gfs2_inode *ip, u64 offset,
1221 lblock_stop = offset + len + bsize - 1; 1303 lblock_stop = offset + len + bsize - 1;
1222 do_div(lblock_stop, bsize); 1304 do_div(lblock_stop, bsize);
1223 } else { 1305 } else {
1224 unsigned int shift = sdp->sd_sb.sb_bsize_shift;
1225 u64 end_of_file = (ip->i_di.di_size + sdp->sd_sb.sb_bsize - 1) >> shift; 1306 u64 end_of_file = (ip->i_di.di_size + sdp->sd_sb.sb_bsize - 1) >> shift;
1226 lblock = offset >> shift; 1307 lblock = offset >> shift;
1227 lblock_stop = (offset + len + sdp->sd_sb.sb_bsize - 1) >> shift; 1308 lblock_stop = (offset + len + sdp->sd_sb.sb_bsize - 1) >> shift;
1228 if (lblock_stop > end_of_file) { 1309 if (lblock_stop > end_of_file)
1229 *alloc_required = 1;
1230 return 0; 1310 return 0;
1231 }
1232 } 1311 }
1233 1312
1234 for (; lblock < lblock_stop; lblock += extlen) { 1313 size = (lblock_stop - lblock) << shift;
1235 error = gfs2_extent_map(&ip->i_inode, lblock, &new, &dblock, &extlen); 1314 do {
1236 if (error) 1315 bh.b_state = 0;
1237 return error; 1316 bh.b_size = size;
1238 1317 gfs2_block_map(&ip->i_inode, lblock, &bh, 0);
1239 if (!dblock) { 1318 if (!buffer_mapped(&bh))
1240 *alloc_required = 1;
1241 return 0; 1319 return 0;
1242 } 1320 size -= bh.b_size;
1243 } 1321 lblock += (bh.b_size >> ip->i_inode.i_blkbits);
1322 } while(size > 0);
1244 1323
1324 *alloc_required = 0;
1245 return 0; 1325 return 0;
1246} 1326}
1247 1327
diff --git a/fs/gfs2/dir.c b/fs/gfs2/dir.c
index c34709512b19..eed040d8ba3a 100644
--- a/fs/gfs2/dir.c
+++ b/fs/gfs2/dir.c
@@ -159,6 +159,7 @@ static int gfs2_dir_write_data(struct gfs2_inode *ip, const char *buf,
159 unsigned int o; 159 unsigned int o;
160 int copied = 0; 160 int copied = 0;
161 int error = 0; 161 int error = 0;
162 int new = 0;
162 163
163 if (!size) 164 if (!size)
164 return 0; 165 return 0;
@@ -183,7 +184,6 @@ static int gfs2_dir_write_data(struct gfs2_inode *ip, const char *buf,
183 while (copied < size) { 184 while (copied < size) {
184 unsigned int amount; 185 unsigned int amount;
185 struct buffer_head *bh; 186 struct buffer_head *bh;
186 int new = 0;
187 187
188 amount = size - copied; 188 amount = size - copied;
189 if (amount > sdp->sd_sb.sb_bsize - o) 189 if (amount > sdp->sd_sb.sb_bsize - o)
@@ -757,7 +757,7 @@ static struct gfs2_dirent *gfs2_dirent_search(struct inode *inode,
757 757
758 if (ip->i_di.di_flags & GFS2_DIF_EXHASH) { 758 if (ip->i_di.di_flags & GFS2_DIF_EXHASH) {
759 struct gfs2_leaf *leaf; 759 struct gfs2_leaf *leaf;
760 unsigned hsize = 1 << ip->i_di.di_depth; 760 unsigned hsize = 1 << ip->i_depth;
761 unsigned index; 761 unsigned index;
762 u64 ln; 762 u64 ln;
763 if (hsize * sizeof(u64) != ip->i_di.di_size) { 763 if (hsize * sizeof(u64) != ip->i_di.di_size) {
@@ -765,7 +765,7 @@ static struct gfs2_dirent *gfs2_dirent_search(struct inode *inode,
765 return ERR_PTR(-EIO); 765 return ERR_PTR(-EIO);
766 } 766 }
767 767
768 index = name->hash >> (32 - ip->i_di.di_depth); 768 index = name->hash >> (32 - ip->i_depth);
769 error = get_first_leaf(ip, index, &bh); 769 error = get_first_leaf(ip, index, &bh);
770 if (error) 770 if (error)
771 return ERR_PTR(error); 771 return ERR_PTR(error);
@@ -803,14 +803,15 @@ got_dent:
803static struct gfs2_leaf *new_leaf(struct inode *inode, struct buffer_head **pbh, u16 depth) 803static struct gfs2_leaf *new_leaf(struct inode *inode, struct buffer_head **pbh, u16 depth)
804{ 804{
805 struct gfs2_inode *ip = GFS2_I(inode); 805 struct gfs2_inode *ip = GFS2_I(inode);
806 u64 bn = gfs2_alloc_meta(ip); 806 unsigned int n = 1;
807 u64 bn = gfs2_alloc_block(ip, &n);
807 struct buffer_head *bh = gfs2_meta_new(ip->i_gl, bn); 808 struct buffer_head *bh = gfs2_meta_new(ip->i_gl, bn);
808 struct gfs2_leaf *leaf; 809 struct gfs2_leaf *leaf;
809 struct gfs2_dirent *dent; 810 struct gfs2_dirent *dent;
810 struct qstr name = { .name = "", .len = 0, .hash = 0 }; 811 struct qstr name = { .name = "", .len = 0, .hash = 0 };
811 if (!bh) 812 if (!bh)
812 return NULL; 813 return NULL;
813 814 gfs2_trans_add_unrevoke(GFS2_SB(inode), bn, 1);
814 gfs2_trans_add_bh(ip->i_gl, bh, 1); 815 gfs2_trans_add_bh(ip->i_gl, bh, 1);
815 gfs2_metatype_set(bh, GFS2_METATYPE_LF, GFS2_FORMAT_LF); 816 gfs2_metatype_set(bh, GFS2_METATYPE_LF, GFS2_FORMAT_LF);
816 leaf = (struct gfs2_leaf *)bh->b_data; 817 leaf = (struct gfs2_leaf *)bh->b_data;
@@ -905,12 +906,11 @@ static int dir_make_exhash(struct inode *inode)
905 *lp = cpu_to_be64(bn); 906 *lp = cpu_to_be64(bn);
906 907
907 dip->i_di.di_size = sdp->sd_sb.sb_bsize / 2; 908 dip->i_di.di_size = sdp->sd_sb.sb_bsize / 2;
908 dip->i_di.di_blocks++; 909 gfs2_add_inode_blocks(&dip->i_inode, 1);
909 gfs2_set_inode_blocks(&dip->i_inode);
910 dip->i_di.di_flags |= GFS2_DIF_EXHASH; 910 dip->i_di.di_flags |= GFS2_DIF_EXHASH;
911 911
912 for (x = sdp->sd_hash_ptrs, y = -1; x; x >>= 1, y++) ; 912 for (x = sdp->sd_hash_ptrs, y = -1; x; x >>= 1, y++) ;
913 dip->i_di.di_depth = y; 913 dip->i_depth = y;
914 914
915 gfs2_dinode_out(dip, dibh->b_data); 915 gfs2_dinode_out(dip, dibh->b_data);
916 916
@@ -941,7 +941,7 @@ static int dir_split_leaf(struct inode *inode, const struct qstr *name)
941 int x, moved = 0; 941 int x, moved = 0;
942 int error; 942 int error;
943 943
944 index = name->hash >> (32 - dip->i_di.di_depth); 944 index = name->hash >> (32 - dip->i_depth);
945 error = get_leaf_nr(dip, index, &leaf_no); 945 error = get_leaf_nr(dip, index, &leaf_no);
946 if (error) 946 if (error)
947 return error; 947 return error;
@@ -952,7 +952,7 @@ static int dir_split_leaf(struct inode *inode, const struct qstr *name)
952 return error; 952 return error;
953 953
954 oleaf = (struct gfs2_leaf *)obh->b_data; 954 oleaf = (struct gfs2_leaf *)obh->b_data;
955 if (dip->i_di.di_depth == be16_to_cpu(oleaf->lf_depth)) { 955 if (dip->i_depth == be16_to_cpu(oleaf->lf_depth)) {
956 brelse(obh); 956 brelse(obh);
957 return 1; /* can't split */ 957 return 1; /* can't split */
958 } 958 }
@@ -967,10 +967,10 @@ static int dir_split_leaf(struct inode *inode, const struct qstr *name)
967 bn = nbh->b_blocknr; 967 bn = nbh->b_blocknr;
968 968
969 /* Compute the start and len of leaf pointers in the hash table. */ 969 /* Compute the start and len of leaf pointers in the hash table. */
970 len = 1 << (dip->i_di.di_depth - be16_to_cpu(oleaf->lf_depth)); 970 len = 1 << (dip->i_depth - be16_to_cpu(oleaf->lf_depth));
971 half_len = len >> 1; 971 half_len = len >> 1;
972 if (!half_len) { 972 if (!half_len) {
973 printk(KERN_WARNING "di_depth %u lf_depth %u index %u\n", dip->i_di.di_depth, be16_to_cpu(oleaf->lf_depth), index); 973 printk(KERN_WARNING "i_depth %u lf_depth %u index %u\n", dip->i_depth, be16_to_cpu(oleaf->lf_depth), index);
974 gfs2_consist_inode(dip); 974 gfs2_consist_inode(dip);
975 error = -EIO; 975 error = -EIO;
976 goto fail_brelse; 976 goto fail_brelse;
@@ -997,7 +997,7 @@ static int dir_split_leaf(struct inode *inode, const struct qstr *name)
997 kfree(lp); 997 kfree(lp);
998 998
999 /* Compute the divider */ 999 /* Compute the divider */
1000 divider = (start + half_len) << (32 - dip->i_di.di_depth); 1000 divider = (start + half_len) << (32 - dip->i_depth);
1001 1001
1002 /* Copy the entries */ 1002 /* Copy the entries */
1003 dirent_first(dip, obh, &dent); 1003 dirent_first(dip, obh, &dent);
@@ -1021,13 +1021,13 @@ static int dir_split_leaf(struct inode *inode, const struct qstr *name)
1021 1021
1022 new->de_inum = dent->de_inum; /* No endian worries */ 1022 new->de_inum = dent->de_inum; /* No endian worries */
1023 new->de_type = dent->de_type; /* No endian worries */ 1023 new->de_type = dent->de_type; /* No endian worries */
1024 nleaf->lf_entries = cpu_to_be16(be16_to_cpu(nleaf->lf_entries)+1); 1024 be16_add_cpu(&nleaf->lf_entries, 1);
1025 1025
1026 dirent_del(dip, obh, prev, dent); 1026 dirent_del(dip, obh, prev, dent);
1027 1027
1028 if (!oleaf->lf_entries) 1028 if (!oleaf->lf_entries)
1029 gfs2_consist_inode(dip); 1029 gfs2_consist_inode(dip);
1030 oleaf->lf_entries = cpu_to_be16(be16_to_cpu(oleaf->lf_entries)-1); 1030 be16_add_cpu(&oleaf->lf_entries, -1);
1031 1031
1032 if (!prev) 1032 if (!prev)
1033 prev = dent; 1033 prev = dent;
@@ -1044,8 +1044,7 @@ static int dir_split_leaf(struct inode *inode, const struct qstr *name)
1044 error = gfs2_meta_inode_buffer(dip, &dibh); 1044 error = gfs2_meta_inode_buffer(dip, &dibh);
1045 if (!gfs2_assert_withdraw(GFS2_SB(&dip->i_inode), !error)) { 1045 if (!gfs2_assert_withdraw(GFS2_SB(&dip->i_inode), !error)) {
1046 gfs2_trans_add_bh(dip->i_gl, dibh, 1); 1046 gfs2_trans_add_bh(dip->i_gl, dibh, 1);
1047 dip->i_di.di_blocks++; 1047 gfs2_add_inode_blocks(&dip->i_inode, 1);
1048 gfs2_set_inode_blocks(&dip->i_inode);
1049 gfs2_dinode_out(dip, dibh->b_data); 1048 gfs2_dinode_out(dip, dibh->b_data);
1050 brelse(dibh); 1049 brelse(dibh);
1051 } 1050 }
@@ -1082,7 +1081,7 @@ static int dir_double_exhash(struct gfs2_inode *dip)
1082 int x; 1081 int x;
1083 int error = 0; 1082 int error = 0;
1084 1083
1085 hsize = 1 << dip->i_di.di_depth; 1084 hsize = 1 << dip->i_depth;
1086 if (hsize * sizeof(u64) != dip->i_di.di_size) { 1085 if (hsize * sizeof(u64) != dip->i_di.di_size) {
1087 gfs2_consist_inode(dip); 1086 gfs2_consist_inode(dip);
1088 return -EIO; 1087 return -EIO;
@@ -1090,7 +1089,7 @@ static int dir_double_exhash(struct gfs2_inode *dip)
1090 1089
1091 /* Allocate both the "from" and "to" buffers in one big chunk */ 1090 /* Allocate both the "from" and "to" buffers in one big chunk */
1092 1091
1093 buf = kcalloc(3, sdp->sd_hash_bsize, GFP_KERNEL | __GFP_NOFAIL); 1092 buf = kcalloc(3, sdp->sd_hash_bsize, GFP_NOFS | __GFP_NOFAIL);
1094 1093
1095 for (block = dip->i_di.di_size >> sdp->sd_hash_bsize_shift; block--;) { 1094 for (block = dip->i_di.di_size >> sdp->sd_hash_bsize_shift; block--;) {
1096 error = gfs2_dir_read_data(dip, (char *)buf, 1095 error = gfs2_dir_read_data(dip, (char *)buf,
@@ -1125,7 +1124,7 @@ static int dir_double_exhash(struct gfs2_inode *dip)
1125 1124
1126 error = gfs2_meta_inode_buffer(dip, &dibh); 1125 error = gfs2_meta_inode_buffer(dip, &dibh);
1127 if (!gfs2_assert_withdraw(sdp, !error)) { 1126 if (!gfs2_assert_withdraw(sdp, !error)) {
1128 dip->i_di.di_depth++; 1127 dip->i_depth++;
1129 gfs2_dinode_out(dip, dibh->b_data); 1128 gfs2_dinode_out(dip, dibh->b_data);
1130 brelse(dibh); 1129 brelse(dibh);
1131 } 1130 }
@@ -1370,16 +1369,16 @@ static int dir_e_read(struct inode *inode, u64 *offset, void *opaque,
1370 int error = 0; 1369 int error = 0;
1371 unsigned depth = 0; 1370 unsigned depth = 0;
1372 1371
1373 hsize = 1 << dip->i_di.di_depth; 1372 hsize = 1 << dip->i_depth;
1374 if (hsize * sizeof(u64) != dip->i_di.di_size) { 1373 if (hsize * sizeof(u64) != dip->i_di.di_size) {
1375 gfs2_consist_inode(dip); 1374 gfs2_consist_inode(dip);
1376 return -EIO; 1375 return -EIO;
1377 } 1376 }
1378 1377
1379 hash = gfs2_dir_offset2hash(*offset); 1378 hash = gfs2_dir_offset2hash(*offset);
1380 index = hash >> (32 - dip->i_di.di_depth); 1379 index = hash >> (32 - dip->i_depth);
1381 1380
1382 lp = kmalloc(sdp->sd_hash_bsize, GFP_KERNEL); 1381 lp = kmalloc(sdp->sd_hash_bsize, GFP_NOFS);
1383 if (!lp) 1382 if (!lp)
1384 return -ENOMEM; 1383 return -ENOMEM;
1385 1384
@@ -1405,7 +1404,7 @@ static int dir_e_read(struct inode *inode, u64 *offset, void *opaque,
1405 if (error) 1404 if (error)
1406 break; 1405 break;
1407 1406
1408 len = 1 << (dip->i_di.di_depth - depth); 1407 len = 1 << (dip->i_depth - depth);
1409 index = (index & ~(len - 1)) + len; 1408 index = (index & ~(len - 1)) + len;
1410 } 1409 }
1411 1410
@@ -1444,7 +1443,7 @@ int gfs2_dir_read(struct inode *inode, u64 *offset, void *opaque,
1444 1443
1445 error = -ENOMEM; 1444 error = -ENOMEM;
1446 /* 96 is max number of dirents which can be stuffed into an inode */ 1445 /* 96 is max number of dirents which can be stuffed into an inode */
1447 darr = kmalloc(96 * sizeof(struct gfs2_dirent *), GFP_KERNEL); 1446 darr = kmalloc(96 * sizeof(struct gfs2_dirent *), GFP_NOFS);
1448 if (darr) { 1447 if (darr) {
1449 g.pdent = darr; 1448 g.pdent = darr;
1450 g.offset = 0; 1449 g.offset = 0;
@@ -1549,7 +1548,7 @@ static int dir_new_leaf(struct inode *inode, const struct qstr *name)
1549 u32 index; 1548 u32 index;
1550 u64 bn; 1549 u64 bn;
1551 1550
1552 index = name->hash >> (32 - ip->i_di.di_depth); 1551 index = name->hash >> (32 - ip->i_depth);
1553 error = get_first_leaf(ip, index, &obh); 1552 error = get_first_leaf(ip, index, &obh);
1554 if (error) 1553 if (error)
1555 return error; 1554 return error;
@@ -1579,8 +1578,7 @@ static int dir_new_leaf(struct inode *inode, const struct qstr *name)
1579 if (error) 1578 if (error)
1580 return error; 1579 return error;
1581 gfs2_trans_add_bh(ip->i_gl, bh, 1); 1580 gfs2_trans_add_bh(ip->i_gl, bh, 1);
1582 ip->i_di.di_blocks++; 1581 gfs2_add_inode_blocks(&ip->i_inode, 1);
1583 gfs2_set_inode_blocks(&ip->i_inode);
1584 gfs2_dinode_out(ip, bh->b_data); 1582 gfs2_dinode_out(ip, bh->b_data);
1585 brelse(bh); 1583 brelse(bh);
1586 return 0; 1584 return 0;
@@ -1616,7 +1614,7 @@ int gfs2_dir_add(struct inode *inode, const struct qstr *name,
1616 dent->de_type = cpu_to_be16(type); 1614 dent->de_type = cpu_to_be16(type);
1617 if (ip->i_di.di_flags & GFS2_DIF_EXHASH) { 1615 if (ip->i_di.di_flags & GFS2_DIF_EXHASH) {
1618 leaf = (struct gfs2_leaf *)bh->b_data; 1616 leaf = (struct gfs2_leaf *)bh->b_data;
1619 leaf->lf_entries = cpu_to_be16(be16_to_cpu(leaf->lf_entries) + 1); 1617 be16_add_cpu(&leaf->lf_entries, 1);
1620 } 1618 }
1621 brelse(bh); 1619 brelse(bh);
1622 error = gfs2_meta_inode_buffer(ip, &bh); 1620 error = gfs2_meta_inode_buffer(ip, &bh);
@@ -1641,7 +1639,7 @@ int gfs2_dir_add(struct inode *inode, const struct qstr *name,
1641 continue; 1639 continue;
1642 if (error < 0) 1640 if (error < 0)
1643 break; 1641 break;
1644 if (ip->i_di.di_depth < GFS2_DIR_MAX_DEPTH) { 1642 if (ip->i_depth < GFS2_DIR_MAX_DEPTH) {
1645 error = dir_double_exhash(ip); 1643 error = dir_double_exhash(ip);
1646 if (error) 1644 if (error)
1647 break; 1645 break;
@@ -1785,13 +1783,13 @@ static int foreach_leaf(struct gfs2_inode *dip, leaf_call_t lc, void *data)
1785 u64 leaf_no; 1783 u64 leaf_no;
1786 int error = 0; 1784 int error = 0;
1787 1785
1788 hsize = 1 << dip->i_di.di_depth; 1786 hsize = 1 << dip->i_depth;
1789 if (hsize * sizeof(u64) != dip->i_di.di_size) { 1787 if (hsize * sizeof(u64) != dip->i_di.di_size) {
1790 gfs2_consist_inode(dip); 1788 gfs2_consist_inode(dip);
1791 return -EIO; 1789 return -EIO;
1792 } 1790 }
1793 1791
1794 lp = kmalloc(sdp->sd_hash_bsize, GFP_KERNEL); 1792 lp = kmalloc(sdp->sd_hash_bsize, GFP_NOFS);
1795 if (!lp) 1793 if (!lp)
1796 return -ENOMEM; 1794 return -ENOMEM;
1797 1795
@@ -1817,7 +1815,7 @@ static int foreach_leaf(struct gfs2_inode *dip, leaf_call_t lc, void *data)
1817 if (error) 1815 if (error)
1818 goto out; 1816 goto out;
1819 leaf = (struct gfs2_leaf *)bh->b_data; 1817 leaf = (struct gfs2_leaf *)bh->b_data;
1820 len = 1 << (dip->i_di.di_depth - be16_to_cpu(leaf->lf_depth)); 1818 len = 1 << (dip->i_depth - be16_to_cpu(leaf->lf_depth));
1821 brelse(bh); 1819 brelse(bh);
1822 1820
1823 error = lc(dip, index, len, leaf_no, data); 1821 error = lc(dip, index, len, leaf_no, data);
@@ -1866,15 +1864,18 @@ static int leaf_dealloc(struct gfs2_inode *dip, u32 index, u32 len,
1866 1864
1867 memset(&rlist, 0, sizeof(struct gfs2_rgrp_list)); 1865 memset(&rlist, 0, sizeof(struct gfs2_rgrp_list));
1868 1866
1869 ht = kzalloc(size, GFP_KERNEL); 1867 ht = kzalloc(size, GFP_NOFS);
1870 if (!ht) 1868 if (!ht)
1871 return -ENOMEM; 1869 return -ENOMEM;
1872 1870
1873 gfs2_alloc_get(dip); 1871 if (!gfs2_alloc_get(dip)) {
1872 error = -ENOMEM;
1873 goto out;
1874 }
1874 1875
1875 error = gfs2_quota_hold(dip, NO_QUOTA_CHANGE, NO_QUOTA_CHANGE); 1876 error = gfs2_quota_hold(dip, NO_QUOTA_CHANGE, NO_QUOTA_CHANGE);
1876 if (error) 1877 if (error)
1877 goto out; 1878 goto out_put;
1878 1879
1879 error = gfs2_rindex_hold(sdp, &dip->i_alloc->al_ri_gh); 1880 error = gfs2_rindex_hold(sdp, &dip->i_alloc->al_ri_gh);
1880 if (error) 1881 if (error)
@@ -1894,7 +1895,7 @@ static int leaf_dealloc(struct gfs2_inode *dip, u32 index, u32 len,
1894 l_blocks++; 1895 l_blocks++;
1895 } 1896 }
1896 1897
1897 gfs2_rlist_alloc(&rlist, LM_ST_EXCLUSIVE, 0); 1898 gfs2_rlist_alloc(&rlist, LM_ST_EXCLUSIVE);
1898 1899
1899 for (x = 0; x < rlist.rl_rgrps; x++) { 1900 for (x = 0; x < rlist.rl_rgrps; x++) {
1900 struct gfs2_rgrpd *rgd; 1901 struct gfs2_rgrpd *rgd;
@@ -1921,11 +1922,7 @@ static int leaf_dealloc(struct gfs2_inode *dip, u32 index, u32 len,
1921 brelse(bh); 1922 brelse(bh);
1922 1923
1923 gfs2_free_meta(dip, blk, 1); 1924 gfs2_free_meta(dip, blk, 1);
1924 1925 gfs2_add_inode_blocks(&dip->i_inode, -1);
1925 if (!dip->i_di.di_blocks)
1926 gfs2_consist_inode(dip);
1927 dip->i_di.di_blocks--;
1928 gfs2_set_inode_blocks(&dip->i_inode);
1929 } 1926 }
1930 1927
1931 error = gfs2_dir_write_data(dip, ht, index * sizeof(u64), size); 1928 error = gfs2_dir_write_data(dip, ht, index * sizeof(u64), size);
@@ -1952,8 +1949,9 @@ out_rlist:
1952 gfs2_glock_dq_uninit(&dip->i_alloc->al_ri_gh); 1949 gfs2_glock_dq_uninit(&dip->i_alloc->al_ri_gh);
1953out_qs: 1950out_qs:
1954 gfs2_quota_unhold(dip); 1951 gfs2_quota_unhold(dip);
1955out: 1952out_put:
1956 gfs2_alloc_put(dip); 1953 gfs2_alloc_put(dip);
1954out:
1957 kfree(ht); 1955 kfree(ht);
1958 return error; 1956 return error;
1959} 1957}
diff --git a/fs/gfs2/eattr.c b/fs/gfs2/eattr.c
index bee99704ea10..e3f76f451b0a 100644
--- a/fs/gfs2/eattr.c
+++ b/fs/gfs2/eattr.c
@@ -277,10 +277,7 @@ static int ea_dealloc_unstuffed(struct gfs2_inode *ip, struct buffer_head *bh,
277 } 277 }
278 278
279 *dataptrs = 0; 279 *dataptrs = 0;
280 if (!ip->i_di.di_blocks) 280 gfs2_add_inode_blocks(&ip->i_inode, -1);
281 gfs2_consist_inode(ip);
282 ip->i_di.di_blocks--;
283 gfs2_set_inode_blocks(&ip->i_inode);
284 } 281 }
285 if (bstart) 282 if (bstart)
286 gfs2_free_meta(ip, bstart, blen); 283 gfs2_free_meta(ip, bstart, blen);
@@ -321,6 +318,8 @@ static int ea_remove_unstuffed(struct gfs2_inode *ip, struct buffer_head *bh,
321 int error; 318 int error;
322 319
323 al = gfs2_alloc_get(ip); 320 al = gfs2_alloc_get(ip);
321 if (!al)
322 return -ENOMEM;
324 323
325 error = gfs2_quota_hold(ip, NO_QUOTA_CHANGE, NO_QUOTA_CHANGE); 324 error = gfs2_quota_hold(ip, NO_QUOTA_CHANGE, NO_QUOTA_CHANGE);
326 if (error) 325 if (error)
@@ -449,7 +448,7 @@ static int ea_get_unstuffed(struct gfs2_inode *ip, struct gfs2_ea_header *ea,
449 unsigned int x; 448 unsigned int x;
450 int error = 0; 449 int error = 0;
451 450
452 bh = kcalloc(nptrs, sizeof(struct buffer_head *), GFP_KERNEL); 451 bh = kcalloc(nptrs, sizeof(struct buffer_head *), GFP_NOFS);
453 if (!bh) 452 if (!bh)
454 return -ENOMEM; 453 return -ENOMEM;
455 454
@@ -582,10 +581,11 @@ static int ea_alloc_blk(struct gfs2_inode *ip, struct buffer_head **bhp)
582{ 581{
583 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode); 582 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
584 struct gfs2_ea_header *ea; 583 struct gfs2_ea_header *ea;
584 unsigned int n = 1;
585 u64 block; 585 u64 block;
586 586
587 block = gfs2_alloc_meta(ip); 587 block = gfs2_alloc_block(ip, &n);
588 588 gfs2_trans_add_unrevoke(sdp, block, 1);
589 *bhp = gfs2_meta_new(ip->i_gl, block); 589 *bhp = gfs2_meta_new(ip->i_gl, block);
590 gfs2_trans_add_bh(ip->i_gl, *bhp, 1); 590 gfs2_trans_add_bh(ip->i_gl, *bhp, 1);
591 gfs2_metatype_set(*bhp, GFS2_METATYPE_EA, GFS2_FORMAT_EA); 591 gfs2_metatype_set(*bhp, GFS2_METATYPE_EA, GFS2_FORMAT_EA);
@@ -597,8 +597,7 @@ static int ea_alloc_blk(struct gfs2_inode *ip, struct buffer_head **bhp)
597 ea->ea_flags = GFS2_EAFLAG_LAST; 597 ea->ea_flags = GFS2_EAFLAG_LAST;
598 ea->ea_num_ptrs = 0; 598 ea->ea_num_ptrs = 0;
599 599
600 ip->i_di.di_blocks++; 600 gfs2_add_inode_blocks(&ip->i_inode, 1);
601 gfs2_set_inode_blocks(&ip->i_inode);
602 601
603 return 0; 602 return 0;
604} 603}
@@ -642,15 +641,15 @@ static int ea_write(struct gfs2_inode *ip, struct gfs2_ea_header *ea,
642 struct buffer_head *bh; 641 struct buffer_head *bh;
643 u64 block; 642 u64 block;
644 int mh_size = sizeof(struct gfs2_meta_header); 643 int mh_size = sizeof(struct gfs2_meta_header);
644 unsigned int n = 1;
645 645
646 block = gfs2_alloc_meta(ip); 646 block = gfs2_alloc_block(ip, &n);
647 647 gfs2_trans_add_unrevoke(sdp, block, 1);
648 bh = gfs2_meta_new(ip->i_gl, block); 648 bh = gfs2_meta_new(ip->i_gl, block);
649 gfs2_trans_add_bh(ip->i_gl, bh, 1); 649 gfs2_trans_add_bh(ip->i_gl, bh, 1);
650 gfs2_metatype_set(bh, GFS2_METATYPE_ED, GFS2_FORMAT_ED); 650 gfs2_metatype_set(bh, GFS2_METATYPE_ED, GFS2_FORMAT_ED);
651 651
652 ip->i_di.di_blocks++; 652 gfs2_add_inode_blocks(&ip->i_inode, 1);
653 gfs2_set_inode_blocks(&ip->i_inode);
654 653
655 copy = data_len > sdp->sd_jbsize ? sdp->sd_jbsize : 654 copy = data_len > sdp->sd_jbsize ? sdp->sd_jbsize :
656 data_len; 655 data_len;
@@ -684,15 +683,13 @@ static int ea_alloc_skeleton(struct gfs2_inode *ip, struct gfs2_ea_request *er,
684 int error; 683 int error;
685 684
686 al = gfs2_alloc_get(ip); 685 al = gfs2_alloc_get(ip);
686 if (!al)
687 return -ENOMEM;
687 688
688 error = gfs2_quota_lock(ip, NO_QUOTA_CHANGE, NO_QUOTA_CHANGE); 689 error = gfs2_quota_lock_check(ip);
689 if (error) 690 if (error)
690 goto out; 691 goto out;
691 692
692 error = gfs2_quota_check(ip, ip->i_inode.i_uid, ip->i_inode.i_gid);
693 if (error)
694 goto out_gunlock_q;
695
696 al->al_requested = blks; 693 al->al_requested = blks;
697 694
698 error = gfs2_inplace_reserve(ip); 695 error = gfs2_inplace_reserve(ip);
@@ -966,9 +963,9 @@ static int ea_set_block(struct gfs2_inode *ip, struct gfs2_ea_request *er,
966 gfs2_trans_add_bh(ip->i_gl, indbh, 1); 963 gfs2_trans_add_bh(ip->i_gl, indbh, 1);
967 } else { 964 } else {
968 u64 blk; 965 u64 blk;
969 966 unsigned int n = 1;
970 blk = gfs2_alloc_meta(ip); 967 blk = gfs2_alloc_block(ip, &n);
971 968 gfs2_trans_add_unrevoke(sdp, blk, 1);
972 indbh = gfs2_meta_new(ip->i_gl, blk); 969 indbh = gfs2_meta_new(ip->i_gl, blk);
973 gfs2_trans_add_bh(ip->i_gl, indbh, 1); 970 gfs2_trans_add_bh(ip->i_gl, indbh, 1);
974 gfs2_metatype_set(indbh, GFS2_METATYPE_IN, GFS2_FORMAT_IN); 971 gfs2_metatype_set(indbh, GFS2_METATYPE_IN, GFS2_FORMAT_IN);
@@ -978,8 +975,7 @@ static int ea_set_block(struct gfs2_inode *ip, struct gfs2_ea_request *er,
978 *eablk = cpu_to_be64(ip->i_di.di_eattr); 975 *eablk = cpu_to_be64(ip->i_di.di_eattr);
979 ip->i_di.di_eattr = blk; 976 ip->i_di.di_eattr = blk;
980 ip->i_di.di_flags |= GFS2_DIF_EA_INDIRECT; 977 ip->i_di.di_flags |= GFS2_DIF_EA_INDIRECT;
981 ip->i_di.di_blocks++; 978 gfs2_add_inode_blocks(&ip->i_inode, 1);
982 gfs2_set_inode_blocks(&ip->i_inode);
983 979
984 eablk++; 980 eablk++;
985 } 981 }
@@ -1210,7 +1206,7 @@ static int ea_acl_chmod_unstuffed(struct gfs2_inode *ip,
1210 unsigned int x; 1206 unsigned int x;
1211 int error; 1207 int error;
1212 1208
1213 bh = kcalloc(nptrs, sizeof(struct buffer_head *), GFP_KERNEL); 1209 bh = kcalloc(nptrs, sizeof(struct buffer_head *), GFP_NOFS);
1214 if (!bh) 1210 if (!bh)
1215 return -ENOMEM; 1211 return -ENOMEM;
1216 1212
@@ -1347,7 +1343,7 @@ static int ea_dealloc_indirect(struct gfs2_inode *ip)
1347 else 1343 else
1348 goto out; 1344 goto out;
1349 1345
1350 gfs2_rlist_alloc(&rlist, LM_ST_EXCLUSIVE, 0); 1346 gfs2_rlist_alloc(&rlist, LM_ST_EXCLUSIVE);
1351 1347
1352 for (x = 0; x < rlist.rl_rgrps; x++) { 1348 for (x = 0; x < rlist.rl_rgrps; x++) {
1353 struct gfs2_rgrpd *rgd; 1349 struct gfs2_rgrpd *rgd;
@@ -1387,10 +1383,7 @@ static int ea_dealloc_indirect(struct gfs2_inode *ip)
1387 } 1383 }
1388 1384
1389 *eablk = 0; 1385 *eablk = 0;
1390 if (!ip->i_di.di_blocks) 1386 gfs2_add_inode_blocks(&ip->i_inode, -1);
1391 gfs2_consist_inode(ip);
1392 ip->i_di.di_blocks--;
1393 gfs2_set_inode_blocks(&ip->i_inode);
1394 } 1387 }
1395 if (bstart) 1388 if (bstart)
1396 gfs2_free_meta(ip, bstart, blen); 1389 gfs2_free_meta(ip, bstart, blen);
@@ -1442,10 +1435,7 @@ static int ea_dealloc_block(struct gfs2_inode *ip)
1442 gfs2_free_meta(ip, ip->i_di.di_eattr, 1); 1435 gfs2_free_meta(ip, ip->i_di.di_eattr, 1);
1443 1436
1444 ip->i_di.di_eattr = 0; 1437 ip->i_di.di_eattr = 0;
1445 if (!ip->i_di.di_blocks) 1438 gfs2_add_inode_blocks(&ip->i_inode, -1);
1446 gfs2_consist_inode(ip);
1447 ip->i_di.di_blocks--;
1448 gfs2_set_inode_blocks(&ip->i_inode);
1449 1439
1450 error = gfs2_meta_inode_buffer(ip, &dibh); 1440 error = gfs2_meta_inode_buffer(ip, &dibh);
1451 if (!error) { 1441 if (!error) {
@@ -1474,6 +1464,8 @@ int gfs2_ea_dealloc(struct gfs2_inode *ip)
1474 int error; 1464 int error;
1475 1465
1476 al = gfs2_alloc_get(ip); 1466 al = gfs2_alloc_get(ip);
1467 if (!al)
1468 return -ENOMEM;
1477 1469
1478 error = gfs2_quota_hold(ip, NO_QUOTA_CHANGE, NO_QUOTA_CHANGE); 1470 error = gfs2_quota_hold(ip, NO_QUOTA_CHANGE, NO_QUOTA_CHANGE);
1479 if (error) 1471 if (error)
diff --git a/fs/gfs2/glock.c b/fs/gfs2/glock.c
index 7175a4d06435..d636b3e80f5d 100644
--- a/fs/gfs2/glock.c
+++ b/fs/gfs2/glock.c
@@ -1,6 +1,6 @@
1/* 1/*
2 * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved. 2 * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
3 * Copyright (C) 2004-2007 Red Hat, Inc. All rights reserved. 3 * Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved.
4 * 4 *
5 * This copyrighted material is made available to anyone wishing to use, 5 * This copyrighted material is made available to anyone wishing to use,
6 * modify, copy, or redistribute it subject to the terms and conditions 6 * modify, copy, or redistribute it subject to the terms and conditions
@@ -35,7 +35,6 @@
35#include "glock.h" 35#include "glock.h"
36#include "glops.h" 36#include "glops.h"
37#include "inode.h" 37#include "inode.h"
38#include "lm.h"
39#include "lops.h" 38#include "lops.h"
40#include "meta_io.h" 39#include "meta_io.h"
41#include "quota.h" 40#include "quota.h"
@@ -183,7 +182,8 @@ static void glock_free(struct gfs2_glock *gl)
183 struct gfs2_sbd *sdp = gl->gl_sbd; 182 struct gfs2_sbd *sdp = gl->gl_sbd;
184 struct inode *aspace = gl->gl_aspace; 183 struct inode *aspace = gl->gl_aspace;
185 184
186 gfs2_lm_put_lock(sdp, gl->gl_lock); 185 if (likely(!test_bit(SDF_SHUTDOWN, &sdp->sd_flags)))
186 sdp->sd_lockstruct.ls_ops->lm_put_lock(gl->gl_lock);
187 187
188 if (aspace) 188 if (aspace)
189 gfs2_aspace_put(aspace); 189 gfs2_aspace_put(aspace);
@@ -197,7 +197,7 @@ static void glock_free(struct gfs2_glock *gl)
197 * 197 *
198 */ 198 */
199 199
200void gfs2_glock_hold(struct gfs2_glock *gl) 200static void gfs2_glock_hold(struct gfs2_glock *gl)
201{ 201{
202 atomic_inc(&gl->gl_ref); 202 atomic_inc(&gl->gl_ref);
203} 203}
@@ -293,6 +293,16 @@ static void glock_work_func(struct work_struct *work)
293 gfs2_glock_put(gl); 293 gfs2_glock_put(gl);
294} 294}
295 295
296static int gfs2_lm_get_lock(struct gfs2_sbd *sdp, struct lm_lockname *name,
297 void **lockp)
298{
299 int error = -EIO;
300 if (likely(!test_bit(SDF_SHUTDOWN, &sdp->sd_flags)))
301 error = sdp->sd_lockstruct.ls_ops->lm_get_lock(
302 sdp->sd_lockstruct.ls_lockspace, name, lockp);
303 return error;
304}
305
296/** 306/**
297 * gfs2_glock_get() - Get a glock, or create one if one doesn't exist 307 * gfs2_glock_get() - Get a glock, or create one if one doesn't exist
298 * @sdp: The GFS2 superblock 308 * @sdp: The GFS2 superblock
@@ -338,8 +348,6 @@ int gfs2_glock_get(struct gfs2_sbd *sdp, u64 number,
338 gl->gl_ip = 0; 348 gl->gl_ip = 0;
339 gl->gl_ops = glops; 349 gl->gl_ops = glops;
340 gl->gl_req_gh = NULL; 350 gl->gl_req_gh = NULL;
341 gl->gl_req_bh = NULL;
342 gl->gl_vn = 0;
343 gl->gl_stamp = jiffies; 351 gl->gl_stamp = jiffies;
344 gl->gl_tchange = jiffies; 352 gl->gl_tchange = jiffies;
345 gl->gl_object = NULL; 353 gl->gl_object = NULL;
@@ -595,11 +603,12 @@ static void run_queue(struct gfs2_glock *gl)
595 blocked = rq_mutex(gh); 603 blocked = rq_mutex(gh);
596 } else if (test_bit(GLF_DEMOTE, &gl->gl_flags)) { 604 } else if (test_bit(GLF_DEMOTE, &gl->gl_flags)) {
597 blocked = rq_demote(gl); 605 blocked = rq_demote(gl);
598 if (gl->gl_waiters2 && !blocked) { 606 if (test_bit(GLF_WAITERS2, &gl->gl_flags) &&
607 !blocked) {
599 set_bit(GLF_DEMOTE, &gl->gl_flags); 608 set_bit(GLF_DEMOTE, &gl->gl_flags);
600 gl->gl_demote_state = LM_ST_UNLOCKED; 609 gl->gl_demote_state = LM_ST_UNLOCKED;
601 } 610 }
602 gl->gl_waiters2 = 0; 611 clear_bit(GLF_WAITERS2, &gl->gl_flags);
603 } else if (!list_empty(&gl->gl_waiters3)) { 612 } else if (!list_empty(&gl->gl_waiters3)) {
604 gh = list_entry(gl->gl_waiters3.next, 613 gh = list_entry(gl->gl_waiters3.next,
605 struct gfs2_holder, gh_list); 614 struct gfs2_holder, gh_list);
@@ -710,7 +719,7 @@ static void handle_callback(struct gfs2_glock *gl, unsigned int state,
710 } else if (gl->gl_demote_state != LM_ST_UNLOCKED && 719 } else if (gl->gl_demote_state != LM_ST_UNLOCKED &&
711 gl->gl_demote_state != state) { 720 gl->gl_demote_state != state) {
712 if (test_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags)) 721 if (test_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags))
713 gl->gl_waiters2 = 1; 722 set_bit(GLF_WAITERS2, &gl->gl_flags);
714 else 723 else
715 gl->gl_demote_state = LM_ST_UNLOCKED; 724 gl->gl_demote_state = LM_ST_UNLOCKED;
716 } 725 }
@@ -743,6 +752,43 @@ static void state_change(struct gfs2_glock *gl, unsigned int new_state)
743} 752}
744 753
745/** 754/**
755 * drop_bh - Called after a lock module unlock completes
756 * @gl: the glock
757 * @ret: the return status
758 *
759 * Doesn't wake up the process waiting on the struct gfs2_holder (if any)
760 * Doesn't drop the reference on the glock the top half took out
761 *
762 */
763
764static void drop_bh(struct gfs2_glock *gl, unsigned int ret)
765{
766 struct gfs2_sbd *sdp = gl->gl_sbd;
767 struct gfs2_holder *gh = gl->gl_req_gh;
768
769 gfs2_assert_warn(sdp, test_bit(GLF_LOCK, &gl->gl_flags));
770 gfs2_assert_warn(sdp, list_empty(&gl->gl_holders));
771 gfs2_assert_warn(sdp, !ret);
772
773 state_change(gl, LM_ST_UNLOCKED);
774
775 if (test_and_clear_bit(GLF_CONV_DEADLK, &gl->gl_flags)) {
776 spin_lock(&gl->gl_spin);
777 gh->gh_error = 0;
778 spin_unlock(&gl->gl_spin);
779 gfs2_glock_xmote_th(gl, gl->gl_req_gh);
780 gfs2_glock_put(gl);
781 return;
782 }
783
784 spin_lock(&gl->gl_spin);
785 gfs2_demote_wake(gl);
786 clear_bit(GLF_LOCK, &gl->gl_flags);
787 spin_unlock(&gl->gl_spin);
788 gfs2_glock_put(gl);
789}
790
791/**
746 * xmote_bh - Called after the lock module is done acquiring a lock 792 * xmote_bh - Called after the lock module is done acquiring a lock
747 * @gl: The glock in question 793 * @gl: The glock in question
748 * @ret: the int returned from the lock module 794 * @ret: the int returned from the lock module
@@ -754,25 +800,19 @@ static void xmote_bh(struct gfs2_glock *gl, unsigned int ret)
754 struct gfs2_sbd *sdp = gl->gl_sbd; 800 struct gfs2_sbd *sdp = gl->gl_sbd;
755 const struct gfs2_glock_operations *glops = gl->gl_ops; 801 const struct gfs2_glock_operations *glops = gl->gl_ops;
756 struct gfs2_holder *gh = gl->gl_req_gh; 802 struct gfs2_holder *gh = gl->gl_req_gh;
757 int prev_state = gl->gl_state;
758 int op_done = 1; 803 int op_done = 1;
759 804
805 if (!gh && (ret & LM_OUT_ST_MASK) == LM_ST_UNLOCKED) {
806 drop_bh(gl, ret);
807 return;
808 }
809
760 gfs2_assert_warn(sdp, test_bit(GLF_LOCK, &gl->gl_flags)); 810 gfs2_assert_warn(sdp, test_bit(GLF_LOCK, &gl->gl_flags));
761 gfs2_assert_warn(sdp, list_empty(&gl->gl_holders)); 811 gfs2_assert_warn(sdp, list_empty(&gl->gl_holders));
762 gfs2_assert_warn(sdp, !(ret & LM_OUT_ASYNC)); 812 gfs2_assert_warn(sdp, !(ret & LM_OUT_ASYNC));
763 813
764 state_change(gl, ret & LM_OUT_ST_MASK); 814 state_change(gl, ret & LM_OUT_ST_MASK);
765 815
766 if (prev_state != LM_ST_UNLOCKED && !(ret & LM_OUT_CACHEABLE)) {
767 if (glops->go_inval)
768 glops->go_inval(gl, DIO_METADATA);
769 } else if (gl->gl_state == LM_ST_DEFERRED) {
770 /* We might not want to do this here.
771 Look at moving to the inode glops. */
772 if (glops->go_inval)
773 glops->go_inval(gl, 0);
774 }
775
776 /* Deal with each possible exit condition */ 816 /* Deal with each possible exit condition */
777 817
778 if (!gh) { 818 if (!gh) {
@@ -782,7 +822,6 @@ static void xmote_bh(struct gfs2_glock *gl, unsigned int ret)
782 } else { 822 } else {
783 spin_lock(&gl->gl_spin); 823 spin_lock(&gl->gl_spin);
784 if (gl->gl_state != gl->gl_demote_state) { 824 if (gl->gl_state != gl->gl_demote_state) {
785 gl->gl_req_bh = NULL;
786 spin_unlock(&gl->gl_spin); 825 spin_unlock(&gl->gl_spin);
787 gfs2_glock_drop_th(gl); 826 gfs2_glock_drop_th(gl);
788 gfs2_glock_put(gl); 827 gfs2_glock_put(gl);
@@ -793,6 +832,14 @@ static void xmote_bh(struct gfs2_glock *gl, unsigned int ret)
793 } 832 }
794 } else { 833 } else {
795 spin_lock(&gl->gl_spin); 834 spin_lock(&gl->gl_spin);
835 if (ret & LM_OUT_CONV_DEADLK) {
836 gh->gh_error = 0;
837 set_bit(GLF_CONV_DEADLK, &gl->gl_flags);
838 spin_unlock(&gl->gl_spin);
839 gfs2_glock_drop_th(gl);
840 gfs2_glock_put(gl);
841 return;
842 }
796 list_del_init(&gh->gh_list); 843 list_del_init(&gh->gh_list);
797 gh->gh_error = -EIO; 844 gh->gh_error = -EIO;
798 if (unlikely(test_bit(SDF_SHUTDOWN, &sdp->sd_flags))) 845 if (unlikely(test_bit(SDF_SHUTDOWN, &sdp->sd_flags)))
@@ -824,7 +871,6 @@ out:
824 if (op_done) { 871 if (op_done) {
825 spin_lock(&gl->gl_spin); 872 spin_lock(&gl->gl_spin);
826 gl->gl_req_gh = NULL; 873 gl->gl_req_gh = NULL;
827 gl->gl_req_bh = NULL;
828 clear_bit(GLF_LOCK, &gl->gl_flags); 874 clear_bit(GLF_LOCK, &gl->gl_flags);
829 spin_unlock(&gl->gl_spin); 875 spin_unlock(&gl->gl_spin);
830 } 876 }
@@ -835,6 +881,17 @@ out:
835 gfs2_holder_wake(gh); 881 gfs2_holder_wake(gh);
836} 882}
837 883
884static unsigned int gfs2_lm_lock(struct gfs2_sbd *sdp, void *lock,
885 unsigned int cur_state, unsigned int req_state,
886 unsigned int flags)
887{
888 int ret = 0;
889 if (likely(!test_bit(SDF_SHUTDOWN, &sdp->sd_flags)))
890 ret = sdp->sd_lockstruct.ls_ops->lm_lock(lock, cur_state,
891 req_state, flags);
892 return ret;
893}
894
838/** 895/**
839 * gfs2_glock_xmote_th - Call into the lock module to acquire or change a glock 896 * gfs2_glock_xmote_th - Call into the lock module to acquire or change a glock
840 * @gl: The glock in question 897 * @gl: The glock in question
@@ -856,6 +913,8 @@ static void gfs2_glock_xmote_th(struct gfs2_glock *gl, struct gfs2_holder *gh)
856 913
857 if (glops->go_xmote_th) 914 if (glops->go_xmote_th)
858 glops->go_xmote_th(gl); 915 glops->go_xmote_th(gl);
916 if (state == LM_ST_DEFERRED && glops->go_inval)
917 glops->go_inval(gl, DIO_METADATA);
859 918
860 gfs2_assert_warn(sdp, test_bit(GLF_LOCK, &gl->gl_flags)); 919 gfs2_assert_warn(sdp, test_bit(GLF_LOCK, &gl->gl_flags));
861 gfs2_assert_warn(sdp, list_empty(&gl->gl_holders)); 920 gfs2_assert_warn(sdp, list_empty(&gl->gl_holders));
@@ -863,7 +922,6 @@ static void gfs2_glock_xmote_th(struct gfs2_glock *gl, struct gfs2_holder *gh)
863 gfs2_assert_warn(sdp, state != gl->gl_state); 922 gfs2_assert_warn(sdp, state != gl->gl_state);
864 923
865 gfs2_glock_hold(gl); 924 gfs2_glock_hold(gl);
866 gl->gl_req_bh = xmote_bh;
867 925
868 lck_ret = gfs2_lm_lock(sdp, gl->gl_lock, gl->gl_state, state, lck_flags); 926 lck_ret = gfs2_lm_lock(sdp, gl->gl_lock, gl->gl_state, state, lck_flags);
869 927
@@ -876,49 +934,13 @@ static void gfs2_glock_xmote_th(struct gfs2_glock *gl, struct gfs2_holder *gh)
876 xmote_bh(gl, lck_ret); 934 xmote_bh(gl, lck_ret);
877} 935}
878 936
879/** 937static unsigned int gfs2_lm_unlock(struct gfs2_sbd *sdp, void *lock,
880 * drop_bh - Called after a lock module unlock completes 938 unsigned int cur_state)
881 * @gl: the glock
882 * @ret: the return status
883 *
884 * Doesn't wake up the process waiting on the struct gfs2_holder (if any)
885 * Doesn't drop the reference on the glock the top half took out
886 *
887 */
888
889static void drop_bh(struct gfs2_glock *gl, unsigned int ret)
890{ 939{
891 struct gfs2_sbd *sdp = gl->gl_sbd; 940 int ret = 0;
892 const struct gfs2_glock_operations *glops = gl->gl_ops; 941 if (likely(!test_bit(SDF_SHUTDOWN, &sdp->sd_flags)))
893 struct gfs2_holder *gh = gl->gl_req_gh; 942 ret = sdp->sd_lockstruct.ls_ops->lm_unlock(lock, cur_state);
894 943 return ret;
895 gfs2_assert_warn(sdp, test_bit(GLF_LOCK, &gl->gl_flags));
896 gfs2_assert_warn(sdp, list_empty(&gl->gl_holders));
897 gfs2_assert_warn(sdp, !ret);
898
899 state_change(gl, LM_ST_UNLOCKED);
900
901 if (glops->go_inval)
902 glops->go_inval(gl, DIO_METADATA);
903
904 if (gh) {
905 spin_lock(&gl->gl_spin);
906 list_del_init(&gh->gh_list);
907 gh->gh_error = 0;
908 spin_unlock(&gl->gl_spin);
909 }
910
911 spin_lock(&gl->gl_spin);
912 gfs2_demote_wake(gl);
913 gl->gl_req_gh = NULL;
914 gl->gl_req_bh = NULL;
915 clear_bit(GLF_LOCK, &gl->gl_flags);
916 spin_unlock(&gl->gl_spin);
917
918 gfs2_glock_put(gl);
919
920 if (gh)
921 gfs2_holder_wake(gh);
922} 944}
923 945
924/** 946/**
@@ -935,13 +957,14 @@ static void gfs2_glock_drop_th(struct gfs2_glock *gl)
935 957
936 if (glops->go_xmote_th) 958 if (glops->go_xmote_th)
937 glops->go_xmote_th(gl); 959 glops->go_xmote_th(gl);
960 if (glops->go_inval)
961 glops->go_inval(gl, DIO_METADATA);
938 962
939 gfs2_assert_warn(sdp, test_bit(GLF_LOCK, &gl->gl_flags)); 963 gfs2_assert_warn(sdp, test_bit(GLF_LOCK, &gl->gl_flags));
940 gfs2_assert_warn(sdp, list_empty(&gl->gl_holders)); 964 gfs2_assert_warn(sdp, list_empty(&gl->gl_holders));
941 gfs2_assert_warn(sdp, gl->gl_state != LM_ST_UNLOCKED); 965 gfs2_assert_warn(sdp, gl->gl_state != LM_ST_UNLOCKED);
942 966
943 gfs2_glock_hold(gl); 967 gfs2_glock_hold(gl);
944 gl->gl_req_bh = drop_bh;
945 968
946 ret = gfs2_lm_unlock(sdp, gl->gl_lock, gl->gl_state); 969 ret = gfs2_lm_unlock(sdp, gl->gl_lock, gl->gl_state);
947 970
@@ -964,16 +987,17 @@ static void gfs2_glock_drop_th(struct gfs2_glock *gl)
964static void do_cancels(struct gfs2_holder *gh) 987static void do_cancels(struct gfs2_holder *gh)
965{ 988{
966 struct gfs2_glock *gl = gh->gh_gl; 989 struct gfs2_glock *gl = gh->gh_gl;
990 struct gfs2_sbd *sdp = gl->gl_sbd;
967 991
968 spin_lock(&gl->gl_spin); 992 spin_lock(&gl->gl_spin);
969 993
970 while (gl->gl_req_gh != gh && 994 while (gl->gl_req_gh != gh &&
971 !test_bit(HIF_HOLDER, &gh->gh_iflags) && 995 !test_bit(HIF_HOLDER, &gh->gh_iflags) &&
972 !list_empty(&gh->gh_list)) { 996 !list_empty(&gh->gh_list)) {
973 if (gl->gl_req_bh && !(gl->gl_req_gh && 997 if (!(gl->gl_req_gh && (gl->gl_req_gh->gh_flags & GL_NOCANCEL))) {
974 (gl->gl_req_gh->gh_flags & GL_NOCANCEL))) {
975 spin_unlock(&gl->gl_spin); 998 spin_unlock(&gl->gl_spin);
976 gfs2_lm_cancel(gl->gl_sbd, gl->gl_lock); 999 if (likely(!test_bit(SDF_SHUTDOWN, &sdp->sd_flags)))
1000 sdp->sd_lockstruct.ls_ops->lm_cancel(gl->gl_lock);
977 msleep(100); 1001 msleep(100);
978 spin_lock(&gl->gl_spin); 1002 spin_lock(&gl->gl_spin);
979 } else { 1003 } else {
@@ -1041,7 +1065,6 @@ static int glock_wait_internal(struct gfs2_holder *gh)
1041 1065
1042 spin_lock(&gl->gl_spin); 1066 spin_lock(&gl->gl_spin);
1043 gl->gl_req_gh = NULL; 1067 gl->gl_req_gh = NULL;
1044 gl->gl_req_bh = NULL;
1045 clear_bit(GLF_LOCK, &gl->gl_flags); 1068 clear_bit(GLF_LOCK, &gl->gl_flags);
1046 run_queue(gl); 1069 run_queue(gl);
1047 spin_unlock(&gl->gl_spin); 1070 spin_unlock(&gl->gl_spin);
@@ -1428,6 +1451,14 @@ void gfs2_glock_dq_uninit_m(unsigned int num_gh, struct gfs2_holder *ghs)
1428 gfs2_glock_dq_uninit(&ghs[x]); 1451 gfs2_glock_dq_uninit(&ghs[x]);
1429} 1452}
1430 1453
1454static int gfs2_lm_hold_lvb(struct gfs2_sbd *sdp, void *lock, char **lvbp)
1455{
1456 int error = -EIO;
1457 if (likely(!test_bit(SDF_SHUTDOWN, &sdp->sd_flags)))
1458 error = sdp->sd_lockstruct.ls_ops->lm_hold_lvb(lock, lvbp);
1459 return error;
1460}
1461
1431/** 1462/**
1432 * gfs2_lvb_hold - attach a LVB from a glock 1463 * gfs2_lvb_hold - attach a LVB from a glock
1433 * @gl: The glock in question 1464 * @gl: The glock in question
@@ -1463,12 +1494,15 @@ int gfs2_lvb_hold(struct gfs2_glock *gl)
1463 1494
1464void gfs2_lvb_unhold(struct gfs2_glock *gl) 1495void gfs2_lvb_unhold(struct gfs2_glock *gl)
1465{ 1496{
1497 struct gfs2_sbd *sdp = gl->gl_sbd;
1498
1466 gfs2_glock_hold(gl); 1499 gfs2_glock_hold(gl);
1467 gfs2_glmutex_lock(gl); 1500 gfs2_glmutex_lock(gl);
1468 1501
1469 gfs2_assert(gl->gl_sbd, atomic_read(&gl->gl_lvb_count) > 0); 1502 gfs2_assert(gl->gl_sbd, atomic_read(&gl->gl_lvb_count) > 0);
1470 if (atomic_dec_and_test(&gl->gl_lvb_count)) { 1503 if (atomic_dec_and_test(&gl->gl_lvb_count)) {
1471 gfs2_lm_unhold_lvb(gl->gl_sbd, gl->gl_lock, gl->gl_lvb); 1504 if (likely(!test_bit(SDF_SHUTDOWN, &sdp->sd_flags)))
1505 sdp->sd_lockstruct.ls_ops->lm_unhold_lvb(gl->gl_lock, gl->gl_lvb);
1472 gl->gl_lvb = NULL; 1506 gl->gl_lvb = NULL;
1473 gfs2_glock_put(gl); 1507 gfs2_glock_put(gl);
1474 } 1508 }
@@ -1534,8 +1568,7 @@ void gfs2_glock_cb(void *cb_data, unsigned int type, void *data)
1534 gl = gfs2_glock_find(sdp, &async->lc_name); 1568 gl = gfs2_glock_find(sdp, &async->lc_name);
1535 if (gfs2_assert_warn(sdp, gl)) 1569 if (gfs2_assert_warn(sdp, gl))
1536 return; 1570 return;
1537 if (!gfs2_assert_warn(sdp, gl->gl_req_bh)) 1571 xmote_bh(gl, async->lc_ret);
1538 gl->gl_req_bh(gl, async->lc_ret);
1539 if (queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0) 1572 if (queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0)
1540 gfs2_glock_put(gl); 1573 gfs2_glock_put(gl);
1541 up_read(&gfs2_umount_flush_sem); 1574 up_read(&gfs2_umount_flush_sem);
@@ -1594,10 +1627,10 @@ void gfs2_glock_schedule_for_reclaim(struct gfs2_glock *gl)
1594 gfs2_glock_hold(gl); 1627 gfs2_glock_hold(gl);
1595 list_add(&gl->gl_reclaim, &sdp->sd_reclaim_list); 1628 list_add(&gl->gl_reclaim, &sdp->sd_reclaim_list);
1596 atomic_inc(&sdp->sd_reclaim_count); 1629 atomic_inc(&sdp->sd_reclaim_count);
1597 } 1630 spin_unlock(&sdp->sd_reclaim_lock);
1598 spin_unlock(&sdp->sd_reclaim_lock); 1631 wake_up(&sdp->sd_reclaim_wq);
1599 1632 } else
1600 wake_up(&sdp->sd_reclaim_wq); 1633 spin_unlock(&sdp->sd_reclaim_lock);
1601} 1634}
1602 1635
1603/** 1636/**
@@ -1897,7 +1930,6 @@ static int dump_glock(struct glock_iter *gi, struct gfs2_glock *gl)
1897 print_dbg(gi, " gl_owner = -1\n"); 1930 print_dbg(gi, " gl_owner = -1\n");
1898 print_dbg(gi, " gl_ip = %lu\n", gl->gl_ip); 1931 print_dbg(gi, " gl_ip = %lu\n", gl->gl_ip);
1899 print_dbg(gi, " req_gh = %s\n", (gl->gl_req_gh) ? "yes" : "no"); 1932 print_dbg(gi, " req_gh = %s\n", (gl->gl_req_gh) ? "yes" : "no");
1900 print_dbg(gi, " req_bh = %s\n", (gl->gl_req_bh) ? "yes" : "no");
1901 print_dbg(gi, " lvb_count = %d\n", atomic_read(&gl->gl_lvb_count)); 1933 print_dbg(gi, " lvb_count = %d\n", atomic_read(&gl->gl_lvb_count));
1902 print_dbg(gi, " object = %s\n", (gl->gl_object) ? "yes" : "no"); 1934 print_dbg(gi, " object = %s\n", (gl->gl_object) ? "yes" : "no");
1903 print_dbg(gi, " reclaim = %s\n", 1935 print_dbg(gi, " reclaim = %s\n",
diff --git a/fs/gfs2/glock.h b/fs/gfs2/glock.h
index 2f9c6d136b37..cdad3e6f8150 100644
--- a/fs/gfs2/glock.h
+++ b/fs/gfs2/glock.h
@@ -32,24 +32,23 @@
32#define GLR_TRYFAILED 13 32#define GLR_TRYFAILED 13
33#define GLR_CANCELED 14 33#define GLR_CANCELED 14
34 34
35static inline int gfs2_glock_is_locked_by_me(struct gfs2_glock *gl) 35static inline struct gfs2_holder *gfs2_glock_is_locked_by_me(struct gfs2_glock *gl)
36{ 36{
37 struct gfs2_holder *gh; 37 struct gfs2_holder *gh;
38 int locked = 0;
39 struct pid *pid; 38 struct pid *pid;
40 39
41 /* Look in glock's list of holders for one with current task as owner */ 40 /* Look in glock's list of holders for one with current task as owner */
42 spin_lock(&gl->gl_spin); 41 spin_lock(&gl->gl_spin);
43 pid = task_pid(current); 42 pid = task_pid(current);
44 list_for_each_entry(gh, &gl->gl_holders, gh_list) { 43 list_for_each_entry(gh, &gl->gl_holders, gh_list) {
45 if (gh->gh_owner_pid == pid) { 44 if (gh->gh_owner_pid == pid)
46 locked = 1; 45 goto out;
47 break;
48 }
49 } 46 }
47 gh = NULL;
48out:
50 spin_unlock(&gl->gl_spin); 49 spin_unlock(&gl->gl_spin);
51 50
52 return locked; 51 return gh;
53} 52}
54 53
55static inline int gfs2_glock_is_held_excl(struct gfs2_glock *gl) 54static inline int gfs2_glock_is_held_excl(struct gfs2_glock *gl)
@@ -79,7 +78,6 @@ static inline int gfs2_glock_is_blocking(struct gfs2_glock *gl)
79int gfs2_glock_get(struct gfs2_sbd *sdp, 78int gfs2_glock_get(struct gfs2_sbd *sdp,
80 u64 number, const struct gfs2_glock_operations *glops, 79 u64 number, const struct gfs2_glock_operations *glops,
81 int create, struct gfs2_glock **glp); 80 int create, struct gfs2_glock **glp);
82void gfs2_glock_hold(struct gfs2_glock *gl);
83int gfs2_glock_put(struct gfs2_glock *gl); 81int gfs2_glock_put(struct gfs2_glock *gl);
84void gfs2_holder_init(struct gfs2_glock *gl, unsigned int state, unsigned flags, 82void gfs2_holder_init(struct gfs2_glock *gl, unsigned int state, unsigned flags,
85 struct gfs2_holder *gh); 83 struct gfs2_holder *gh);
diff --git a/fs/gfs2/glops.c b/fs/gfs2/glops.c
index c663b7a0f410..d31badadef8f 100644
--- a/fs/gfs2/glops.c
+++ b/fs/gfs2/glops.c
@@ -1,6 +1,6 @@
1/* 1/*
2 * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved. 2 * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
3 * Copyright (C) 2004-2006 Red Hat, Inc. All rights reserved. 3 * Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved.
4 * 4 *
5 * This copyrighted material is made available to anyone wishing to use, 5 * This copyrighted material is made available to anyone wishing to use,
6 * modify, copy, or redistribute it subject to the terms and conditions 6 * modify, copy, or redistribute it subject to the terms and conditions
@@ -126,7 +126,13 @@ static void meta_go_inval(struct gfs2_glock *gl, int flags)
126 return; 126 return;
127 127
128 gfs2_meta_inval(gl); 128 gfs2_meta_inval(gl);
129 gl->gl_vn++; 129 if (gl->gl_object == GFS2_I(gl->gl_sbd->sd_rindex))
130 gl->gl_sbd->sd_rindex_uptodate = 0;
131 else if (gl->gl_ops == &gfs2_rgrp_glops && gl->gl_object) {
132 struct gfs2_rgrpd *rgd = (struct gfs2_rgrpd *)gl->gl_object;
133
134 rgd->rd_flags &= ~GFS2_RDF_UPTODATE;
135 }
130} 136}
131 137
132/** 138/**
diff --git a/fs/gfs2/incore.h b/fs/gfs2/incore.h
index 525dcae352d6..9c2c0b90b22a 100644
--- a/fs/gfs2/incore.h
+++ b/fs/gfs2/incore.h
@@ -1,6 +1,6 @@
1/* 1/*
2 * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved. 2 * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
3 * Copyright (C) 2004-2007 Red Hat, Inc. All rights reserved. 3 * Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved.
4 * 4 *
5 * This copyrighted material is made available to anyone wishing to use, 5 * This copyrighted material is made available to anyone wishing to use,
6 * modify, copy, or redistribute it subject to the terms and conditions 6 * modify, copy, or redistribute it subject to the terms and conditions
@@ -44,7 +44,6 @@ struct gfs2_log_header_host {
44 44
45struct gfs2_log_operations { 45struct gfs2_log_operations {
46 void (*lo_add) (struct gfs2_sbd *sdp, struct gfs2_log_element *le); 46 void (*lo_add) (struct gfs2_sbd *sdp, struct gfs2_log_element *le);
47 void (*lo_incore_commit) (struct gfs2_sbd *sdp, struct gfs2_trans *tr);
48 void (*lo_before_commit) (struct gfs2_sbd *sdp); 47 void (*lo_before_commit) (struct gfs2_sbd *sdp);
49 void (*lo_after_commit) (struct gfs2_sbd *sdp, struct gfs2_ail *ai); 48 void (*lo_after_commit) (struct gfs2_sbd *sdp, struct gfs2_ail *ai);
50 void (*lo_before_scan) (struct gfs2_jdesc *jd, 49 void (*lo_before_scan) (struct gfs2_jdesc *jd,
@@ -70,7 +69,6 @@ struct gfs2_bitmap {
70}; 69};
71 70
72struct gfs2_rgrp_host { 71struct gfs2_rgrp_host {
73 u32 rg_flags;
74 u32 rg_free; 72 u32 rg_free;
75 u32 rg_dinodes; 73 u32 rg_dinodes;
76 u64 rg_igeneration; 74 u64 rg_igeneration;
@@ -87,17 +85,17 @@ struct gfs2_rgrpd {
87 u32 rd_data; /* num of data blocks in rgrp */ 85 u32 rd_data; /* num of data blocks in rgrp */
88 u32 rd_bitbytes; /* number of bytes in data bitmaps */ 86 u32 rd_bitbytes; /* number of bytes in data bitmaps */
89 struct gfs2_rgrp_host rd_rg; 87 struct gfs2_rgrp_host rd_rg;
90 u64 rd_rg_vn;
91 struct gfs2_bitmap *rd_bits; 88 struct gfs2_bitmap *rd_bits;
92 unsigned int rd_bh_count; 89 unsigned int rd_bh_count;
93 struct mutex rd_mutex; 90 struct mutex rd_mutex;
94 u32 rd_free_clone; 91 u32 rd_free_clone;
95 struct gfs2_log_element rd_le; 92 struct gfs2_log_element rd_le;
96 u32 rd_last_alloc_data; 93 u32 rd_last_alloc;
97 u32 rd_last_alloc_meta;
98 struct gfs2_sbd *rd_sbd; 94 struct gfs2_sbd *rd_sbd;
99 unsigned long rd_flags; 95 unsigned char rd_flags;
100#define GFS2_RDF_CHECK 0x0001 /* Need to check for unlinked inodes */ 96#define GFS2_RDF_CHECK 0x01 /* Need to check for unlinked inodes */
97#define GFS2_RDF_NOALLOC 0x02 /* rg prohibits allocation */
98#define GFS2_RDF_UPTODATE 0x04 /* rg is up to date */
101}; 99};
102 100
103enum gfs2_state_bits { 101enum gfs2_state_bits {
@@ -168,6 +166,8 @@ enum {
168 GLF_DIRTY = 5, 166 GLF_DIRTY = 5,
169 GLF_DEMOTE_IN_PROGRESS = 6, 167 GLF_DEMOTE_IN_PROGRESS = 6,
170 GLF_LFLUSH = 7, 168 GLF_LFLUSH = 7,
169 GLF_WAITERS2 = 8,
170 GLF_CONV_DEADLK = 9,
171}; 171};
172 172
173struct gfs2_glock { 173struct gfs2_glock {
@@ -187,18 +187,15 @@ struct gfs2_glock {
187 struct list_head gl_holders; 187 struct list_head gl_holders;
188 struct list_head gl_waiters1; /* HIF_MUTEX */ 188 struct list_head gl_waiters1; /* HIF_MUTEX */
189 struct list_head gl_waiters3; /* HIF_PROMOTE */ 189 struct list_head gl_waiters3; /* HIF_PROMOTE */
190 int gl_waiters2; /* GIF_DEMOTE */
191 190
192 const struct gfs2_glock_operations *gl_ops; 191 const struct gfs2_glock_operations *gl_ops;
193 192
194 struct gfs2_holder *gl_req_gh; 193 struct gfs2_holder *gl_req_gh;
195 gfs2_glop_bh_t gl_req_bh;
196 194
197 void *gl_lock; 195 void *gl_lock;
198 char *gl_lvb; 196 char *gl_lvb;
199 atomic_t gl_lvb_count; 197 atomic_t gl_lvb_count;
200 198
201 u64 gl_vn;
202 unsigned long gl_stamp; 199 unsigned long gl_stamp;
203 unsigned long gl_tchange; 200 unsigned long gl_tchange;
204 void *gl_object; 201 void *gl_object;
@@ -213,6 +210,8 @@ struct gfs2_glock {
213 struct delayed_work gl_work; 210 struct delayed_work gl_work;
214}; 211};
215 212
213#define GFS2_MIN_LVB_SIZE 32 /* Min size of LVB that gfs2 supports */
214
216struct gfs2_alloc { 215struct gfs2_alloc {
217 /* Quota stuff */ 216 /* Quota stuff */
218 217
@@ -241,14 +240,9 @@ enum {
241 240
242struct gfs2_dinode_host { 241struct gfs2_dinode_host {
243 u64 di_size; /* number of bytes in file */ 242 u64 di_size; /* number of bytes in file */
244 u64 di_blocks; /* number of blocks in file */
245 u64 di_goal_meta; /* rgrp to alloc from next */
246 u64 di_goal_data; /* data block goal */
247 u64 di_generation; /* generation number for NFS */ 243 u64 di_generation; /* generation number for NFS */
248 u32 di_flags; /* GFS2_DIF_... */ 244 u32 di_flags; /* GFS2_DIF_... */
249 u16 di_height; /* height of metadata */
250 /* These only apply to directories */ 245 /* These only apply to directories */
251 u16 di_depth; /* Number of bits in the table */
252 u32 di_entries; /* The number of entries in the directory */ 246 u32 di_entries; /* The number of entries in the directory */
253 u64 di_eattr; /* extended attribute block number */ 247 u64 di_eattr; /* extended attribute block number */
254}; 248};
@@ -265,9 +259,10 @@ struct gfs2_inode {
265 struct gfs2_holder i_iopen_gh; 259 struct gfs2_holder i_iopen_gh;
266 struct gfs2_holder i_gh; /* for prepare/commit_write only */ 260 struct gfs2_holder i_gh; /* for prepare/commit_write only */
267 struct gfs2_alloc *i_alloc; 261 struct gfs2_alloc *i_alloc;
268 u64 i_last_rg_alloc; 262 u64 i_goal; /* goal block for allocations */
269
270 struct rw_semaphore i_rw_mutex; 263 struct rw_semaphore i_rw_mutex;
264 u8 i_height;
265 u8 i_depth;
271}; 266};
272 267
273/* 268/*
@@ -490,9 +485,9 @@ struct gfs2_sbd {
490 u32 sd_qc_per_block; 485 u32 sd_qc_per_block;
491 u32 sd_max_dirres; /* Max blocks needed to add a directory entry */ 486 u32 sd_max_dirres; /* Max blocks needed to add a directory entry */
492 u32 sd_max_height; /* Max height of a file's metadata tree */ 487 u32 sd_max_height; /* Max height of a file's metadata tree */
493 u64 sd_heightsize[GFS2_MAX_META_HEIGHT]; 488 u64 sd_heightsize[GFS2_MAX_META_HEIGHT + 1];
494 u32 sd_max_jheight; /* Max height of journaled file's meta tree */ 489 u32 sd_max_jheight; /* Max height of journaled file's meta tree */
495 u64 sd_jheightsize[GFS2_MAX_META_HEIGHT]; 490 u64 sd_jheightsize[GFS2_MAX_META_HEIGHT + 1];
496 491
497 struct gfs2_args sd_args; /* Mount arguments */ 492 struct gfs2_args sd_args; /* Mount arguments */
498 struct gfs2_tune sd_tune; /* Filesystem tuning structure */ 493 struct gfs2_tune sd_tune; /* Filesystem tuning structure */
@@ -533,7 +528,7 @@ struct gfs2_sbd {
533 528
534 /* Resource group stuff */ 529 /* Resource group stuff */
535 530
536 u64 sd_rindex_vn; 531 int sd_rindex_uptodate;
537 spinlock_t sd_rindex_spin; 532 spinlock_t sd_rindex_spin;
538 struct mutex sd_rindex_mutex; 533 struct mutex sd_rindex_mutex;
539 struct list_head sd_rindex_list; 534 struct list_head sd_rindex_list;
@@ -637,9 +632,6 @@ struct gfs2_sbd {
637 632
638 /* Counters */ 633 /* Counters */
639 634
640 atomic_t sd_glock_count;
641 atomic_t sd_glock_held_count;
642 atomic_t sd_inode_count;
643 atomic_t sd_reclaimed; 635 atomic_t sd_reclaimed;
644 636
645 char sd_fsname[GFS2_FSNAME_LEN]; 637 char sd_fsname[GFS2_FSNAME_LEN];
diff --git a/fs/gfs2/inode.c b/fs/gfs2/inode.c
index 37725ade3c51..3a9ef526c308 100644
--- a/fs/gfs2/inode.c
+++ b/fs/gfs2/inode.c
@@ -1,6 +1,6 @@
1/* 1/*
2 * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved. 2 * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
3 * Copyright (C) 2004-2006 Red Hat, Inc. All rights reserved. 3 * Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved.
4 * 4 *
5 * This copyrighted material is made available to anyone wishing to use, 5 * This copyrighted material is made available to anyone wishing to use,
6 * modify, copy, or redistribute it subject to the terms and conditions 6 * modify, copy, or redistribute it subject to the terms and conditions
@@ -149,7 +149,8 @@ void gfs2_set_iop(struct inode *inode)
149 } else if (S_ISLNK(mode)) { 149 } else if (S_ISLNK(mode)) {
150 inode->i_op = &gfs2_symlink_iops; 150 inode->i_op = &gfs2_symlink_iops;
151 } else { 151 } else {
152 inode->i_op = &gfs2_dev_iops; 152 inode->i_op = &gfs2_file_iops;
153 init_special_inode(inode, inode->i_mode, inode->i_rdev);
153 } 154 }
154 155
155 unlock_new_inode(inode); 156 unlock_new_inode(inode);
@@ -248,12 +249,10 @@ static int gfs2_dinode_in(struct gfs2_inode *ip, const void *buf)
248{ 249{
249 struct gfs2_dinode_host *di = &ip->i_di; 250 struct gfs2_dinode_host *di = &ip->i_di;
250 const struct gfs2_dinode *str = buf; 251 const struct gfs2_dinode *str = buf;
252 u16 height, depth;
251 253
252 if (ip->i_no_addr != be64_to_cpu(str->di_num.no_addr)) { 254 if (unlikely(ip->i_no_addr != be64_to_cpu(str->di_num.no_addr)))
253 if (gfs2_consist_inode(ip)) 255 goto corrupt;
254 gfs2_dinode_print(ip);
255 return -EIO;
256 }
257 ip->i_no_formal_ino = be64_to_cpu(str->di_num.no_formal_ino); 256 ip->i_no_formal_ino = be64_to_cpu(str->di_num.no_formal_ino);
258 ip->i_inode.i_mode = be32_to_cpu(str->di_mode); 257 ip->i_inode.i_mode = be32_to_cpu(str->di_mode);
259 ip->i_inode.i_rdev = 0; 258 ip->i_inode.i_rdev = 0;
@@ -275,8 +274,7 @@ static int gfs2_dinode_in(struct gfs2_inode *ip, const void *buf)
275 ip->i_inode.i_nlink = be32_to_cpu(str->di_nlink); 274 ip->i_inode.i_nlink = be32_to_cpu(str->di_nlink);
276 di->di_size = be64_to_cpu(str->di_size); 275 di->di_size = be64_to_cpu(str->di_size);
277 i_size_write(&ip->i_inode, di->di_size); 276 i_size_write(&ip->i_inode, di->di_size);
278 di->di_blocks = be64_to_cpu(str->di_blocks); 277 gfs2_set_inode_blocks(&ip->i_inode, be64_to_cpu(str->di_blocks));
279 gfs2_set_inode_blocks(&ip->i_inode);
280 ip->i_inode.i_atime.tv_sec = be64_to_cpu(str->di_atime); 278 ip->i_inode.i_atime.tv_sec = be64_to_cpu(str->di_atime);
281 ip->i_inode.i_atime.tv_nsec = be32_to_cpu(str->di_atime_nsec); 279 ip->i_inode.i_atime.tv_nsec = be32_to_cpu(str->di_atime_nsec);
282 ip->i_inode.i_mtime.tv_sec = be64_to_cpu(str->di_mtime); 280 ip->i_inode.i_mtime.tv_sec = be64_to_cpu(str->di_mtime);
@@ -284,15 +282,20 @@ static int gfs2_dinode_in(struct gfs2_inode *ip, const void *buf)
284 ip->i_inode.i_ctime.tv_sec = be64_to_cpu(str->di_ctime); 282 ip->i_inode.i_ctime.tv_sec = be64_to_cpu(str->di_ctime);
285 ip->i_inode.i_ctime.tv_nsec = be32_to_cpu(str->di_ctime_nsec); 283 ip->i_inode.i_ctime.tv_nsec = be32_to_cpu(str->di_ctime_nsec);
286 284
287 di->di_goal_meta = be64_to_cpu(str->di_goal_meta); 285 ip->i_goal = be64_to_cpu(str->di_goal_meta);
288 di->di_goal_data = be64_to_cpu(str->di_goal_data);
289 di->di_generation = be64_to_cpu(str->di_generation); 286 di->di_generation = be64_to_cpu(str->di_generation);
290 287
291 di->di_flags = be32_to_cpu(str->di_flags); 288 di->di_flags = be32_to_cpu(str->di_flags);
292 gfs2_set_inode_flags(&ip->i_inode); 289 gfs2_set_inode_flags(&ip->i_inode);
293 di->di_height = be16_to_cpu(str->di_height); 290 height = be16_to_cpu(str->di_height);
294 291 if (unlikely(height > GFS2_MAX_META_HEIGHT))
295 di->di_depth = be16_to_cpu(str->di_depth); 292 goto corrupt;
293 ip->i_height = (u8)height;
294
295 depth = be16_to_cpu(str->di_depth);
296 if (unlikely(depth > GFS2_DIR_MAX_DEPTH))
297 goto corrupt;
298 ip->i_depth = (u8)depth;
296 di->di_entries = be32_to_cpu(str->di_entries); 299 di->di_entries = be32_to_cpu(str->di_entries);
297 300
298 di->di_eattr = be64_to_cpu(str->di_eattr); 301 di->di_eattr = be64_to_cpu(str->di_eattr);
@@ -300,6 +303,10 @@ static int gfs2_dinode_in(struct gfs2_inode *ip, const void *buf)
300 gfs2_set_aops(&ip->i_inode); 303 gfs2_set_aops(&ip->i_inode);
301 304
302 return 0; 305 return 0;
306corrupt:
307 if (gfs2_consist_inode(ip))
308 gfs2_dinode_print(ip);
309 return -EIO;
303} 310}
304 311
305/** 312/**
@@ -337,13 +344,15 @@ int gfs2_dinode_dealloc(struct gfs2_inode *ip)
337 struct gfs2_rgrpd *rgd; 344 struct gfs2_rgrpd *rgd;
338 int error; 345 int error;
339 346
340 if (ip->i_di.di_blocks != 1) { 347 if (gfs2_get_inode_blocks(&ip->i_inode) != 1) {
341 if (gfs2_consist_inode(ip)) 348 if (gfs2_consist_inode(ip))
342 gfs2_dinode_print(ip); 349 gfs2_dinode_print(ip);
343 return -EIO; 350 return -EIO;
344 } 351 }
345 352
346 al = gfs2_alloc_get(ip); 353 al = gfs2_alloc_get(ip);
354 if (!al)
355 return -ENOMEM;
347 356
348 error = gfs2_quota_hold(ip, NO_QUOTA_CHANGE, NO_QUOTA_CHANGE); 357 error = gfs2_quota_hold(ip, NO_QUOTA_CHANGE, NO_QUOTA_CHANGE);
349 if (error) 358 if (error)
@@ -487,7 +496,7 @@ struct inode *gfs2_lookupi(struct inode *dir, const struct qstr *name,
487 return dir; 496 return dir;
488 } 497 }
489 498
490 if (gfs2_glock_is_locked_by_me(dip->i_gl) == 0) { 499 if (gfs2_glock_is_locked_by_me(dip->i_gl) == NULL) {
491 error = gfs2_glock_nq_init(dip->i_gl, LM_ST_SHARED, 0, &d_gh); 500 error = gfs2_glock_nq_init(dip->i_gl, LM_ST_SHARED, 0, &d_gh);
492 if (error) 501 if (error)
493 return ERR_PTR(error); 502 return ERR_PTR(error);
@@ -818,7 +827,8 @@ static int make_dinode(struct gfs2_inode *dip, struct gfs2_glock *gl,
818 int error; 827 int error;
819 828
820 munge_mode_uid_gid(dip, &mode, &uid, &gid); 829 munge_mode_uid_gid(dip, &mode, &uid, &gid);
821 gfs2_alloc_get(dip); 830 if (!gfs2_alloc_get(dip))
831 return -ENOMEM;
822 832
823 error = gfs2_quota_lock(dip, uid, gid); 833 error = gfs2_quota_lock(dip, uid, gid);
824 if (error) 834 if (error)
@@ -853,6 +863,8 @@ static int link_dinode(struct gfs2_inode *dip, const struct qstr *name,
853 int error; 863 int error;
854 864
855 al = gfs2_alloc_get(dip); 865 al = gfs2_alloc_get(dip);
866 if (!al)
867 return -ENOMEM;
856 868
857 error = gfs2_quota_lock(dip, NO_QUOTA_CHANGE, NO_QUOTA_CHANGE); 869 error = gfs2_quota_lock(dip, NO_QUOTA_CHANGE, NO_QUOTA_CHANGE);
858 if (error) 870 if (error)
@@ -1219,7 +1231,7 @@ int gfs2_readlinki(struct gfs2_inode *ip, char **buf, unsigned int *len)
1219 1231
1220 x = ip->i_di.di_size + 1; 1232 x = ip->i_di.di_size + 1;
1221 if (x > *len) { 1233 if (x > *len) {
1222 *buf = kmalloc(x, GFP_KERNEL); 1234 *buf = kmalloc(x, GFP_NOFS);
1223 if (!*buf) { 1235 if (!*buf) {
1224 error = -ENOMEM; 1236 error = -ENOMEM;
1225 goto out_brelse; 1237 goto out_brelse;
@@ -1391,21 +1403,21 @@ void gfs2_dinode_out(const struct gfs2_inode *ip, void *buf)
1391 str->di_gid = cpu_to_be32(ip->i_inode.i_gid); 1403 str->di_gid = cpu_to_be32(ip->i_inode.i_gid);
1392 str->di_nlink = cpu_to_be32(ip->i_inode.i_nlink); 1404 str->di_nlink = cpu_to_be32(ip->i_inode.i_nlink);
1393 str->di_size = cpu_to_be64(di->di_size); 1405 str->di_size = cpu_to_be64(di->di_size);
1394 str->di_blocks = cpu_to_be64(di->di_blocks); 1406 str->di_blocks = cpu_to_be64(gfs2_get_inode_blocks(&ip->i_inode));
1395 str->di_atime = cpu_to_be64(ip->i_inode.i_atime.tv_sec); 1407 str->di_atime = cpu_to_be64(ip->i_inode.i_atime.tv_sec);
1396 str->di_mtime = cpu_to_be64(ip->i_inode.i_mtime.tv_sec); 1408 str->di_mtime = cpu_to_be64(ip->i_inode.i_mtime.tv_sec);
1397 str->di_ctime = cpu_to_be64(ip->i_inode.i_ctime.tv_sec); 1409 str->di_ctime = cpu_to_be64(ip->i_inode.i_ctime.tv_sec);
1398 1410
1399 str->di_goal_meta = cpu_to_be64(di->di_goal_meta); 1411 str->di_goal_meta = cpu_to_be64(ip->i_goal);
1400 str->di_goal_data = cpu_to_be64(di->di_goal_data); 1412 str->di_goal_data = cpu_to_be64(ip->i_goal);
1401 str->di_generation = cpu_to_be64(di->di_generation); 1413 str->di_generation = cpu_to_be64(di->di_generation);
1402 1414
1403 str->di_flags = cpu_to_be32(di->di_flags); 1415 str->di_flags = cpu_to_be32(di->di_flags);
1404 str->di_height = cpu_to_be16(di->di_height); 1416 str->di_height = cpu_to_be16(ip->i_height);
1405 str->di_payload_format = cpu_to_be32(S_ISDIR(ip->i_inode.i_mode) && 1417 str->di_payload_format = cpu_to_be32(S_ISDIR(ip->i_inode.i_mode) &&
1406 !(ip->i_di.di_flags & GFS2_DIF_EXHASH) ? 1418 !(ip->i_di.di_flags & GFS2_DIF_EXHASH) ?
1407 GFS2_FORMAT_DE : 0); 1419 GFS2_FORMAT_DE : 0);
1408 str->di_depth = cpu_to_be16(di->di_depth); 1420 str->di_depth = cpu_to_be16(ip->i_depth);
1409 str->di_entries = cpu_to_be32(di->di_entries); 1421 str->di_entries = cpu_to_be32(di->di_entries);
1410 1422
1411 str->di_eattr = cpu_to_be64(di->di_eattr); 1423 str->di_eattr = cpu_to_be64(di->di_eattr);
@@ -1423,15 +1435,13 @@ void gfs2_dinode_print(const struct gfs2_inode *ip)
1423 printk(KERN_INFO " no_addr = %llu\n", 1435 printk(KERN_INFO " no_addr = %llu\n",
1424 (unsigned long long)ip->i_no_addr); 1436 (unsigned long long)ip->i_no_addr);
1425 printk(KERN_INFO " di_size = %llu\n", (unsigned long long)di->di_size); 1437 printk(KERN_INFO " di_size = %llu\n", (unsigned long long)di->di_size);
1426 printk(KERN_INFO " di_blocks = %llu\n", 1438 printk(KERN_INFO " blocks = %llu\n",
1427 (unsigned long long)di->di_blocks); 1439 (unsigned long long)gfs2_get_inode_blocks(&ip->i_inode));
1428 printk(KERN_INFO " di_goal_meta = %llu\n", 1440 printk(KERN_INFO " i_goal = %llu\n",
1429 (unsigned long long)di->di_goal_meta); 1441 (unsigned long long)ip->i_goal);
1430 printk(KERN_INFO " di_goal_data = %llu\n",
1431 (unsigned long long)di->di_goal_data);
1432 printk(KERN_INFO " di_flags = 0x%.8X\n", di->di_flags); 1442 printk(KERN_INFO " di_flags = 0x%.8X\n", di->di_flags);
1433 printk(KERN_INFO " di_height = %u\n", di->di_height); 1443 printk(KERN_INFO " i_height = %u\n", ip->i_height);
1434 printk(KERN_INFO " di_depth = %u\n", di->di_depth); 1444 printk(KERN_INFO " i_depth = %u\n", ip->i_depth);
1435 printk(KERN_INFO " di_entries = %u\n", di->di_entries); 1445 printk(KERN_INFO " di_entries = %u\n", di->di_entries);
1436 printk(KERN_INFO " di_eattr = %llu\n", 1446 printk(KERN_INFO " di_eattr = %llu\n",
1437 (unsigned long long)di->di_eattr); 1447 (unsigned long long)di->di_eattr);
diff --git a/fs/gfs2/inode.h b/fs/gfs2/inode.h
index d44650662615..580da454b38f 100644
--- a/fs/gfs2/inode.h
+++ b/fs/gfs2/inode.h
@@ -10,9 +10,11 @@
10#ifndef __INODE_DOT_H__ 10#ifndef __INODE_DOT_H__
11#define __INODE_DOT_H__ 11#define __INODE_DOT_H__
12 12
13#include "util.h"
14
13static inline int gfs2_is_stuffed(const struct gfs2_inode *ip) 15static inline int gfs2_is_stuffed(const struct gfs2_inode *ip)
14{ 16{
15 return !ip->i_di.di_height; 17 return !ip->i_height;
16} 18}
17 19
18static inline int gfs2_is_jdata(const struct gfs2_inode *ip) 20static inline int gfs2_is_jdata(const struct gfs2_inode *ip)
@@ -37,13 +39,25 @@ static inline int gfs2_is_dir(const struct gfs2_inode *ip)
37 return S_ISDIR(ip->i_inode.i_mode); 39 return S_ISDIR(ip->i_inode.i_mode);
38} 40}
39 41
40static inline void gfs2_set_inode_blocks(struct inode *inode) 42static inline void gfs2_set_inode_blocks(struct inode *inode, u64 blocks)
43{
44 inode->i_blocks = blocks <<
45 (GFS2_SB(inode)->sd_sb.sb_bsize_shift - GFS2_BASIC_BLOCK_SHIFT);
46}
47
48static inline u64 gfs2_get_inode_blocks(const struct inode *inode)
41{ 49{
42 struct gfs2_inode *ip = GFS2_I(inode); 50 return inode->i_blocks >>
43 inode->i_blocks = ip->i_di.di_blocks <<
44 (GFS2_SB(inode)->sd_sb.sb_bsize_shift - GFS2_BASIC_BLOCK_SHIFT); 51 (GFS2_SB(inode)->sd_sb.sb_bsize_shift - GFS2_BASIC_BLOCK_SHIFT);
45} 52}
46 53
54static inline void gfs2_add_inode_blocks(struct inode *inode, s64 change)
55{
56 gfs2_assert(GFS2_SB(inode), (change >= 0 || inode->i_blocks > -change));
57 change *= (GFS2_SB(inode)->sd_sb.sb_bsize/GFS2_BASIC_BLOCK);
58 inode->i_blocks += change;
59}
60
47static inline int gfs2_check_inum(const struct gfs2_inode *ip, u64 no_addr, 61static inline int gfs2_check_inum(const struct gfs2_inode *ip, u64 no_addr,
48 u64 no_formal_ino) 62 u64 no_formal_ino)
49{ 63{
diff --git a/fs/gfs2/lm.c b/fs/gfs2/lm.c
deleted file mode 100644
index cfcc39b86a53..000000000000
--- a/fs/gfs2/lm.c
+++ /dev/null
@@ -1,210 +0,0 @@
1/*
2 * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
3 * Copyright (C) 2004-2006 Red Hat, Inc. All rights reserved.
4 *
5 * This copyrighted material is made available to anyone wishing to use,
6 * modify, copy, or redistribute it subject to the terms and conditions
7 * of the GNU General Public License version 2.
8 */
9
10#include <linux/slab.h>
11#include <linux/spinlock.h>
12#include <linux/completion.h>
13#include <linux/buffer_head.h>
14#include <linux/delay.h>
15#include <linux/gfs2_ondisk.h>
16#include <linux/lm_interface.h>
17
18#include "gfs2.h"
19#include "incore.h"
20#include "glock.h"
21#include "lm.h"
22#include "super.h"
23#include "util.h"
24
25/**
26 * gfs2_lm_mount - mount a locking protocol
27 * @sdp: the filesystem
28 * @args: mount arguements
29 * @silent: if 1, don't complain if the FS isn't a GFS2 fs
30 *
31 * Returns: errno
32 */
33
34int gfs2_lm_mount(struct gfs2_sbd *sdp, int silent)
35{
36 char *proto = sdp->sd_proto_name;
37 char *table = sdp->sd_table_name;
38 int flags = 0;
39 int error;
40
41 if (sdp->sd_args.ar_spectator)
42 flags |= LM_MFLAG_SPECTATOR;
43
44 fs_info(sdp, "Trying to join cluster \"%s\", \"%s\"\n", proto, table);
45
46 error = gfs2_mount_lockproto(proto, table, sdp->sd_args.ar_hostdata,
47 gfs2_glock_cb, sdp,
48 GFS2_MIN_LVB_SIZE, flags,
49 &sdp->sd_lockstruct, &sdp->sd_kobj);
50 if (error) {
51 fs_info(sdp, "can't mount proto=%s, table=%s, hostdata=%s\n",
52 proto, table, sdp->sd_args.ar_hostdata);
53 goto out;
54 }
55
56 if (gfs2_assert_warn(sdp, sdp->sd_lockstruct.ls_lockspace) ||
57 gfs2_assert_warn(sdp, sdp->sd_lockstruct.ls_ops) ||
58 gfs2_assert_warn(sdp, sdp->sd_lockstruct.ls_lvb_size >=
59 GFS2_MIN_LVB_SIZE)) {
60 gfs2_unmount_lockproto(&sdp->sd_lockstruct);
61 goto out;
62 }
63
64 if (sdp->sd_args.ar_spectator)
65 snprintf(sdp->sd_fsname, GFS2_FSNAME_LEN, "%s.s", table);
66 else
67 snprintf(sdp->sd_fsname, GFS2_FSNAME_LEN, "%s.%u", table,
68 sdp->sd_lockstruct.ls_jid);
69
70 fs_info(sdp, "Joined cluster. Now mounting FS...\n");
71
72 if ((sdp->sd_lockstruct.ls_flags & LM_LSFLAG_LOCAL) &&
73 !sdp->sd_args.ar_ignore_local_fs) {
74 sdp->sd_args.ar_localflocks = 1;
75 sdp->sd_args.ar_localcaching = 1;
76 }
77
78out:
79 return error;
80}
81
82void gfs2_lm_others_may_mount(struct gfs2_sbd *sdp)
83{
84 if (likely(!test_bit(SDF_SHUTDOWN, &sdp->sd_flags)))
85 sdp->sd_lockstruct.ls_ops->lm_others_may_mount(
86 sdp->sd_lockstruct.ls_lockspace);
87}
88
89void gfs2_lm_unmount(struct gfs2_sbd *sdp)
90{
91 if (likely(!test_bit(SDF_SHUTDOWN, &sdp->sd_flags)))
92 gfs2_unmount_lockproto(&sdp->sd_lockstruct);
93}
94
95int gfs2_lm_withdraw(struct gfs2_sbd *sdp, char *fmt, ...)
96{
97 va_list args;
98
99 if (test_and_set_bit(SDF_SHUTDOWN, &sdp->sd_flags))
100 return 0;
101
102 va_start(args, fmt);
103 vprintk(fmt, args);
104 va_end(args);
105
106 fs_err(sdp, "about to withdraw this file system\n");
107 BUG_ON(sdp->sd_args.ar_debug);
108
109 fs_err(sdp, "telling LM to withdraw\n");
110 gfs2_withdraw_lockproto(&sdp->sd_lockstruct);
111 fs_err(sdp, "withdrawn\n");
112 dump_stack();
113
114 return -1;
115}
116
117int gfs2_lm_get_lock(struct gfs2_sbd *sdp, struct lm_lockname *name,
118 void **lockp)
119{
120 int error = -EIO;
121 if (likely(!test_bit(SDF_SHUTDOWN, &sdp->sd_flags)))
122 error = sdp->sd_lockstruct.ls_ops->lm_get_lock(
123 sdp->sd_lockstruct.ls_lockspace, name, lockp);
124 return error;
125}
126
127void gfs2_lm_put_lock(struct gfs2_sbd *sdp, void *lock)
128{
129 if (likely(!test_bit(SDF_SHUTDOWN, &sdp->sd_flags)))
130 sdp->sd_lockstruct.ls_ops->lm_put_lock(lock);
131}
132
133unsigned int gfs2_lm_lock(struct gfs2_sbd *sdp, void *lock,
134 unsigned int cur_state, unsigned int req_state,
135 unsigned int flags)
136{
137 int ret = 0;
138 if (likely(!test_bit(SDF_SHUTDOWN, &sdp->sd_flags)))
139 ret = sdp->sd_lockstruct.ls_ops->lm_lock(lock, cur_state,
140 req_state, flags);
141 return ret;
142}
143
144unsigned int gfs2_lm_unlock(struct gfs2_sbd *sdp, void *lock,
145 unsigned int cur_state)
146{
147 int ret = 0;
148 if (likely(!test_bit(SDF_SHUTDOWN, &sdp->sd_flags)))
149 ret = sdp->sd_lockstruct.ls_ops->lm_unlock(lock, cur_state);
150 return ret;
151}
152
153void gfs2_lm_cancel(struct gfs2_sbd *sdp, void *lock)
154{
155 if (likely(!test_bit(SDF_SHUTDOWN, &sdp->sd_flags)))
156 sdp->sd_lockstruct.ls_ops->lm_cancel(lock);
157}
158
159int gfs2_lm_hold_lvb(struct gfs2_sbd *sdp, void *lock, char **lvbp)
160{
161 int error = -EIO;
162 if (likely(!test_bit(SDF_SHUTDOWN, &sdp->sd_flags)))
163 error = sdp->sd_lockstruct.ls_ops->lm_hold_lvb(lock, lvbp);
164 return error;
165}
166
167void gfs2_lm_unhold_lvb(struct gfs2_sbd *sdp, void *lock, char *lvb)
168{
169 if (likely(!test_bit(SDF_SHUTDOWN, &sdp->sd_flags)))
170 sdp->sd_lockstruct.ls_ops->lm_unhold_lvb(lock, lvb);
171}
172
173int gfs2_lm_plock_get(struct gfs2_sbd *sdp, struct lm_lockname *name,
174 struct file *file, struct file_lock *fl)
175{
176 int error = -EIO;
177 if (likely(!test_bit(SDF_SHUTDOWN, &sdp->sd_flags)))
178 error = sdp->sd_lockstruct.ls_ops->lm_plock_get(
179 sdp->sd_lockstruct.ls_lockspace, name, file, fl);
180 return error;
181}
182
183int gfs2_lm_plock(struct gfs2_sbd *sdp, struct lm_lockname *name,
184 struct file *file, int cmd, struct file_lock *fl)
185{
186 int error = -EIO;
187 if (likely(!test_bit(SDF_SHUTDOWN, &sdp->sd_flags)))
188 error = sdp->sd_lockstruct.ls_ops->lm_plock(
189 sdp->sd_lockstruct.ls_lockspace, name, file, cmd, fl);
190 return error;
191}
192
193int gfs2_lm_punlock(struct gfs2_sbd *sdp, struct lm_lockname *name,
194 struct file *file, struct file_lock *fl)
195{
196 int error = -EIO;
197 if (likely(!test_bit(SDF_SHUTDOWN, &sdp->sd_flags)))
198 error = sdp->sd_lockstruct.ls_ops->lm_punlock(
199 sdp->sd_lockstruct.ls_lockspace, name, file, fl);
200 return error;
201}
202
203void gfs2_lm_recovery_done(struct gfs2_sbd *sdp, unsigned int jid,
204 unsigned int message)
205{
206 if (likely(!test_bit(SDF_SHUTDOWN, &sdp->sd_flags)))
207 sdp->sd_lockstruct.ls_ops->lm_recovery_done(
208 sdp->sd_lockstruct.ls_lockspace, jid, message);
209}
210
diff --git a/fs/gfs2/lm.h b/fs/gfs2/lm.h
deleted file mode 100644
index 21cdc30ee08c..000000000000
--- a/fs/gfs2/lm.h
+++ /dev/null
@@ -1,42 +0,0 @@
1/*
2 * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
3 * Copyright (C) 2004-2006 Red Hat, Inc. All rights reserved.
4 *
5 * This copyrighted material is made available to anyone wishing to use,
6 * modify, copy, or redistribute it subject to the terms and conditions
7 * of the GNU General Public License version 2.
8 */
9
10#ifndef __LM_DOT_H__
11#define __LM_DOT_H__
12
13struct gfs2_sbd;
14
15#define GFS2_MIN_LVB_SIZE 32
16
17int gfs2_lm_mount(struct gfs2_sbd *sdp, int silent);
18void gfs2_lm_others_may_mount(struct gfs2_sbd *sdp);
19void gfs2_lm_unmount(struct gfs2_sbd *sdp);
20int gfs2_lm_withdraw(struct gfs2_sbd *sdp, char *fmt, ...)
21 __attribute__ ((format(printf, 2, 3)));
22int gfs2_lm_get_lock(struct gfs2_sbd *sdp, struct lm_lockname *name,
23 void **lockp);
24void gfs2_lm_put_lock(struct gfs2_sbd *sdp, void *lock);
25unsigned int gfs2_lm_lock(struct gfs2_sbd *sdp, void *lock,
26 unsigned int cur_state, unsigned int req_state,
27 unsigned int flags);
28unsigned int gfs2_lm_unlock(struct gfs2_sbd *sdp, void *lock,
29 unsigned int cur_state);
30void gfs2_lm_cancel(struct gfs2_sbd *sdp, void *lock);
31int gfs2_lm_hold_lvb(struct gfs2_sbd *sdp, void *lock, char **lvbp);
32void gfs2_lm_unhold_lvb(struct gfs2_sbd *sdp, void *lock, char *lvb);
33int gfs2_lm_plock_get(struct gfs2_sbd *sdp, struct lm_lockname *name,
34 struct file *file, struct file_lock *fl);
35int gfs2_lm_plock(struct gfs2_sbd *sdp, struct lm_lockname *name,
36 struct file *file, int cmd, struct file_lock *fl);
37int gfs2_lm_punlock(struct gfs2_sbd *sdp, struct lm_lockname *name,
38 struct file *file, struct file_lock *fl);
39void gfs2_lm_recovery_done(struct gfs2_sbd *sdp, unsigned int jid,
40 unsigned int message);
41
42#endif /* __LM_DOT_H__ */
diff --git a/fs/gfs2/locking/dlm/Makefile b/fs/gfs2/locking/dlm/Makefile
index 89b93b6b45cf..2609bb6cd013 100644
--- a/fs/gfs2/locking/dlm/Makefile
+++ b/fs/gfs2/locking/dlm/Makefile
@@ -1,3 +1,3 @@
1obj-$(CONFIG_GFS2_FS_LOCKING_DLM) += lock_dlm.o 1obj-$(CONFIG_GFS2_FS_LOCKING_DLM) += lock_dlm.o
2lock_dlm-y := lock.o main.o mount.o sysfs.o thread.o plock.o 2lock_dlm-y := lock.o main.o mount.o sysfs.o thread.o
3 3
diff --git a/fs/gfs2/locking/dlm/lock.c b/fs/gfs2/locking/dlm/lock.c
index 542a797ac89a..cf7ea8abec87 100644
--- a/fs/gfs2/locking/dlm/lock.c
+++ b/fs/gfs2/locking/dlm/lock.c
@@ -137,7 +137,8 @@ static inline unsigned int make_flags(struct gdlm_lock *lp,
137 137
138 /* Conversion deadlock avoidance by DLM */ 138 /* Conversion deadlock avoidance by DLM */
139 139
140 if (!test_bit(LFL_FORCE_PROMOTE, &lp->flags) && 140 if (!(lp->ls->fsflags & LM_MFLAG_CONV_NODROP) &&
141 !test_bit(LFL_FORCE_PROMOTE, &lp->flags) &&
141 !(lkf & DLM_LKF_NOQUEUE) && 142 !(lkf & DLM_LKF_NOQUEUE) &&
142 cur > DLM_LOCK_NL && req > DLM_LOCK_NL && cur != req) 143 cur > DLM_LOCK_NL && req > DLM_LOCK_NL && cur != req)
143 lkf |= DLM_LKF_CONVDEADLK; 144 lkf |= DLM_LKF_CONVDEADLK;
@@ -164,7 +165,7 @@ static int gdlm_create_lp(struct gdlm_ls *ls, struct lm_lockname *name,
164{ 165{
165 struct gdlm_lock *lp; 166 struct gdlm_lock *lp;
166 167
167 lp = kzalloc(sizeof(struct gdlm_lock), GFP_KERNEL); 168 lp = kzalloc(sizeof(struct gdlm_lock), GFP_NOFS);
168 if (!lp) 169 if (!lp)
169 return -ENOMEM; 170 return -ENOMEM;
170 171
@@ -382,7 +383,7 @@ static int gdlm_add_lvb(struct gdlm_lock *lp)
382{ 383{
383 char *lvb; 384 char *lvb;
384 385
385 lvb = kzalloc(GDLM_LVB_SIZE, GFP_KERNEL); 386 lvb = kzalloc(GDLM_LVB_SIZE, GFP_NOFS);
386 if (!lvb) 387 if (!lvb)
387 return -ENOMEM; 388 return -ENOMEM;
388 389
diff --git a/fs/gfs2/locking/dlm/lock_dlm.h b/fs/gfs2/locking/dlm/lock_dlm.h
index 9e8265d28377..a243cf69c54e 100644
--- a/fs/gfs2/locking/dlm/lock_dlm.h
+++ b/fs/gfs2/locking/dlm/lock_dlm.h
@@ -25,6 +25,7 @@
25#include <net/sock.h> 25#include <net/sock.h>
26 26
27#include <linux/dlm.h> 27#include <linux/dlm.h>
28#include <linux/dlm_plock.h>
28#include <linux/lm_interface.h> 29#include <linux/lm_interface.h>
29 30
30/* 31/*
@@ -173,15 +174,9 @@ void gdlm_cancel(void *);
173int gdlm_hold_lvb(void *, char **); 174int gdlm_hold_lvb(void *, char **);
174void gdlm_unhold_lvb(void *, char *); 175void gdlm_unhold_lvb(void *, char *);
175 176
176/* plock.c */ 177/* mount.c */
178
179extern const struct lm_lockops gdlm_ops;
177 180
178int gdlm_plock_init(void);
179void gdlm_plock_exit(void);
180int gdlm_plock(void *, struct lm_lockname *, struct file *, int,
181 struct file_lock *);
182int gdlm_plock_get(void *, struct lm_lockname *, struct file *,
183 struct file_lock *);
184int gdlm_punlock(void *, struct lm_lockname *, struct file *,
185 struct file_lock *);
186#endif 181#endif
187 182
diff --git a/fs/gfs2/locking/dlm/main.c b/fs/gfs2/locking/dlm/main.c
index a0e7eda643ed..b9a03a7ff801 100644
--- a/fs/gfs2/locking/dlm/main.c
+++ b/fs/gfs2/locking/dlm/main.c
@@ -11,8 +11,6 @@
11 11
12#include "lock_dlm.h" 12#include "lock_dlm.h"
13 13
14extern struct lm_lockops gdlm_ops;
15
16static int __init init_lock_dlm(void) 14static int __init init_lock_dlm(void)
17{ 15{
18 int error; 16 int error;
@@ -30,13 +28,6 @@ static int __init init_lock_dlm(void)
30 return error; 28 return error;
31 } 29 }
32 30
33 error = gdlm_plock_init();
34 if (error) {
35 gdlm_sysfs_exit();
36 gfs2_unregister_lockproto(&gdlm_ops);
37 return error;
38 }
39
40 printk(KERN_INFO 31 printk(KERN_INFO
41 "Lock_DLM (built %s %s) installed\n", __DATE__, __TIME__); 32 "Lock_DLM (built %s %s) installed\n", __DATE__, __TIME__);
42 return 0; 33 return 0;
@@ -44,7 +35,6 @@ static int __init init_lock_dlm(void)
44 35
45static void __exit exit_lock_dlm(void) 36static void __exit exit_lock_dlm(void)
46{ 37{
47 gdlm_plock_exit();
48 gdlm_sysfs_exit(); 38 gdlm_sysfs_exit();
49 gfs2_unregister_lockproto(&gdlm_ops); 39 gfs2_unregister_lockproto(&gdlm_ops);
50} 40}
diff --git a/fs/gfs2/locking/dlm/mount.c b/fs/gfs2/locking/dlm/mount.c
index f2efff424224..470bdf650b50 100644
--- a/fs/gfs2/locking/dlm/mount.c
+++ b/fs/gfs2/locking/dlm/mount.c
@@ -236,6 +236,27 @@ static void gdlm_withdraw(void *lockspace)
236 gdlm_kobject_release(ls); 236 gdlm_kobject_release(ls);
237} 237}
238 238
239static int gdlm_plock(void *lockspace, struct lm_lockname *name,
240 struct file *file, int cmd, struct file_lock *fl)
241{
242 struct gdlm_ls *ls = lockspace;
243 return dlm_posix_lock(ls->dlm_lockspace, name->ln_number, file, cmd, fl);
244}
245
246static int gdlm_punlock(void *lockspace, struct lm_lockname *name,
247 struct file *file, struct file_lock *fl)
248{
249 struct gdlm_ls *ls = lockspace;
250 return dlm_posix_unlock(ls->dlm_lockspace, name->ln_number, file, fl);
251}
252
253static int gdlm_plock_get(void *lockspace, struct lm_lockname *name,
254 struct file *file, struct file_lock *fl)
255{
256 struct gdlm_ls *ls = lockspace;
257 return dlm_posix_get(ls->dlm_lockspace, name->ln_number, file, fl);
258}
259
239const struct lm_lockops gdlm_ops = { 260const struct lm_lockops gdlm_ops = {
240 .lm_proto_name = "lock_dlm", 261 .lm_proto_name = "lock_dlm",
241 .lm_mount = gdlm_mount, 262 .lm_mount = gdlm_mount,
diff --git a/fs/gfs2/locking/dlm/plock.c b/fs/gfs2/locking/dlm/plock.c
deleted file mode 100644
index 2ebd374b3143..000000000000
--- a/fs/gfs2/locking/dlm/plock.c
+++ /dev/null
@@ -1,406 +0,0 @@
1/*
2 * Copyright (C) 2005 Red Hat, Inc. All rights reserved.
3 *
4 * This copyrighted material is made available to anyone wishing to use,
5 * modify, copy, or redistribute it subject to the terms and conditions
6 * of the GNU General Public License version 2.
7 */
8
9#include <linux/miscdevice.h>
10#include <linux/lock_dlm_plock.h>
11#include <linux/poll.h>
12
13#include "lock_dlm.h"
14
15
16static spinlock_t ops_lock;
17static struct list_head send_list;
18static struct list_head recv_list;
19static wait_queue_head_t send_wq;
20static wait_queue_head_t recv_wq;
21
22struct plock_op {
23 struct list_head list;
24 int done;
25 struct gdlm_plock_info info;
26};
27
28struct plock_xop {
29 struct plock_op xop;
30 void *callback;
31 void *fl;
32 void *file;
33 struct file_lock flc;
34};
35
36
37static inline void set_version(struct gdlm_plock_info *info)
38{
39 info->version[0] = GDLM_PLOCK_VERSION_MAJOR;
40 info->version[1] = GDLM_PLOCK_VERSION_MINOR;
41 info->version[2] = GDLM_PLOCK_VERSION_PATCH;
42}
43
44static int check_version(struct gdlm_plock_info *info)
45{
46 if ((GDLM_PLOCK_VERSION_MAJOR != info->version[0]) ||
47 (GDLM_PLOCK_VERSION_MINOR < info->version[1])) {
48 log_error("plock device version mismatch: "
49 "kernel (%u.%u.%u), user (%u.%u.%u)",
50 GDLM_PLOCK_VERSION_MAJOR,
51 GDLM_PLOCK_VERSION_MINOR,
52 GDLM_PLOCK_VERSION_PATCH,
53 info->version[0],
54 info->version[1],
55 info->version[2]);
56 return -EINVAL;
57 }
58 return 0;
59}
60
61static void send_op(struct plock_op *op)
62{
63 set_version(&op->info);
64 INIT_LIST_HEAD(&op->list);
65 spin_lock(&ops_lock);
66 list_add_tail(&op->list, &send_list);
67 spin_unlock(&ops_lock);
68 wake_up(&send_wq);
69}
70
71int gdlm_plock(void *lockspace, struct lm_lockname *name,
72 struct file *file, int cmd, struct file_lock *fl)
73{
74 struct gdlm_ls *ls = lockspace;
75 struct plock_op *op;
76 struct plock_xop *xop;
77 int rv;
78
79 xop = kzalloc(sizeof(*xop), GFP_KERNEL);
80 if (!xop)
81 return -ENOMEM;
82
83 op = &xop->xop;
84 op->info.optype = GDLM_PLOCK_OP_LOCK;
85 op->info.pid = fl->fl_pid;
86 op->info.ex = (fl->fl_type == F_WRLCK);
87 op->info.wait = IS_SETLKW(cmd);
88 op->info.fsid = ls->id;
89 op->info.number = name->ln_number;
90 op->info.start = fl->fl_start;
91 op->info.end = fl->fl_end;
92 if (fl->fl_lmops && fl->fl_lmops->fl_grant) {
93 /* fl_owner is lockd which doesn't distinguish
94 processes on the nfs client */
95 op->info.owner = (__u64) fl->fl_pid;
96 xop->callback = fl->fl_lmops->fl_grant;
97 locks_init_lock(&xop->flc);
98 locks_copy_lock(&xop->flc, fl);
99 xop->fl = fl;
100 xop->file = file;
101 } else {
102 op->info.owner = (__u64)(long) fl->fl_owner;
103 xop->callback = NULL;
104 }
105
106 send_op(op);
107
108 if (xop->callback == NULL)
109 wait_event(recv_wq, (op->done != 0));
110 else
111 return -EINPROGRESS;
112
113 spin_lock(&ops_lock);
114 if (!list_empty(&op->list)) {
115 printk(KERN_INFO "plock op on list\n");
116 list_del(&op->list);
117 }
118 spin_unlock(&ops_lock);
119
120 rv = op->info.rv;
121
122 if (!rv) {
123 if (posix_lock_file_wait(file, fl) < 0)
124 log_error("gdlm_plock: vfs lock error %x,%llx",
125 name->ln_type,
126 (unsigned long long)name->ln_number);
127 }
128
129 kfree(xop);
130 return rv;
131}
132
133/* Returns failure iff a succesful lock operation should be canceled */
134static int gdlm_plock_callback(struct plock_op *op)
135{
136 struct file *file;
137 struct file_lock *fl;
138 struct file_lock *flc;
139 int (*notify)(void *, void *, int) = NULL;
140 struct plock_xop *xop = (struct plock_xop *)op;
141 int rv = 0;
142
143 spin_lock(&ops_lock);
144 if (!list_empty(&op->list)) {
145 printk(KERN_INFO "plock op on list\n");
146 list_del(&op->list);
147 }
148 spin_unlock(&ops_lock);
149
150 /* check if the following 2 are still valid or make a copy */
151 file = xop->file;
152 flc = &xop->flc;
153 fl = xop->fl;
154 notify = xop->callback;
155
156 if (op->info.rv) {
157 notify(flc, NULL, op->info.rv);
158 goto out;
159 }
160
161 /* got fs lock; bookkeep locally as well: */
162 flc->fl_flags &= ~FL_SLEEP;
163 if (posix_lock_file(file, flc, NULL)) {
164 /*
165 * This can only happen in the case of kmalloc() failure.
166 * The filesystem's own lock is the authoritative lock,
167 * so a failure to get the lock locally is not a disaster.
168 * As long as GFS cannot reliably cancel locks (especially
169 * in a low-memory situation), we're better off ignoring
170 * this failure than trying to recover.
171 */
172 log_error("gdlm_plock: vfs lock error file %p fl %p",
173 file, fl);
174 }
175
176 rv = notify(flc, NULL, 0);
177 if (rv) {
178 /* XXX: We need to cancel the fs lock here: */
179 printk("gfs2 lock granted after lock request failed;"
180 " dangling lock!\n");
181 goto out;
182 }
183
184out:
185 kfree(xop);
186 return rv;
187}
188
189int gdlm_punlock(void *lockspace, struct lm_lockname *name,
190 struct file *file, struct file_lock *fl)
191{
192 struct gdlm_ls *ls = lockspace;
193 struct plock_op *op;
194 int rv;
195
196 op = kzalloc(sizeof(*op), GFP_KERNEL);
197 if (!op)
198 return -ENOMEM;
199
200 if (posix_lock_file_wait(file, fl) < 0)
201 log_error("gdlm_punlock: vfs unlock error %x,%llx",
202 name->ln_type, (unsigned long long)name->ln_number);
203
204 op->info.optype = GDLM_PLOCK_OP_UNLOCK;
205 op->info.pid = fl->fl_pid;
206 op->info.fsid = ls->id;
207 op->info.number = name->ln_number;
208 op->info.start = fl->fl_start;
209 op->info.end = fl->fl_end;
210 if (fl->fl_lmops && fl->fl_lmops->fl_grant)
211 op->info.owner = (__u64) fl->fl_pid;
212 else
213 op->info.owner = (__u64)(long) fl->fl_owner;
214
215 send_op(op);
216 wait_event(recv_wq, (op->done != 0));
217
218 spin_lock(&ops_lock);
219 if (!list_empty(&op->list)) {
220 printk(KERN_INFO "punlock op on list\n");
221 list_del(&op->list);
222 }
223 spin_unlock(&ops_lock);
224
225 rv = op->info.rv;
226
227 if (rv == -ENOENT)
228 rv = 0;
229
230 kfree(op);
231 return rv;
232}
233
234int gdlm_plock_get(void *lockspace, struct lm_lockname *name,
235 struct file *file, struct file_lock *fl)
236{
237 struct gdlm_ls *ls = lockspace;
238 struct plock_op *op;
239 int rv;
240
241 op = kzalloc(sizeof(*op), GFP_KERNEL);
242 if (!op)
243 return -ENOMEM;
244
245 op->info.optype = GDLM_PLOCK_OP_GET;
246 op->info.pid = fl->fl_pid;
247 op->info.ex = (fl->fl_type == F_WRLCK);
248 op->info.fsid = ls->id;
249 op->info.number = name->ln_number;
250 op->info.start = fl->fl_start;
251 op->info.end = fl->fl_end;
252 if (fl->fl_lmops && fl->fl_lmops->fl_grant)
253 op->info.owner = (__u64) fl->fl_pid;
254 else
255 op->info.owner = (__u64)(long) fl->fl_owner;
256
257 send_op(op);
258 wait_event(recv_wq, (op->done != 0));
259
260 spin_lock(&ops_lock);
261 if (!list_empty(&op->list)) {
262 printk(KERN_INFO "plock_get op on list\n");
263 list_del(&op->list);
264 }
265 spin_unlock(&ops_lock);
266
267 /* info.rv from userspace is 1 for conflict, 0 for no-conflict,
268 -ENOENT if there are no locks on the file */
269
270 rv = op->info.rv;
271
272 fl->fl_type = F_UNLCK;
273 if (rv == -ENOENT)
274 rv = 0;
275 else if (rv > 0) {
276 fl->fl_type = (op->info.ex) ? F_WRLCK : F_RDLCK;
277 fl->fl_pid = op->info.pid;
278 fl->fl_start = op->info.start;
279 fl->fl_end = op->info.end;
280 rv = 0;
281 }
282
283 kfree(op);
284 return rv;
285}
286
287/* a read copies out one plock request from the send list */
288static ssize_t dev_read(struct file *file, char __user *u, size_t count,
289 loff_t *ppos)
290{
291 struct gdlm_plock_info info;
292 struct plock_op *op = NULL;
293
294 if (count < sizeof(info))
295 return -EINVAL;
296
297 spin_lock(&ops_lock);
298 if (!list_empty(&send_list)) {
299 op = list_entry(send_list.next, struct plock_op, list);
300 list_move(&op->list, &recv_list);
301 memcpy(&info, &op->info, sizeof(info));
302 }
303 spin_unlock(&ops_lock);
304
305 if (!op)
306 return -EAGAIN;
307
308 if (copy_to_user(u, &info, sizeof(info)))
309 return -EFAULT;
310 return sizeof(info);
311}
312
313/* a write copies in one plock result that should match a plock_op
314 on the recv list */
315static ssize_t dev_write(struct file *file, const char __user *u, size_t count,
316 loff_t *ppos)
317{
318 struct gdlm_plock_info info;
319 struct plock_op *op;
320 int found = 0;
321
322 if (count != sizeof(info))
323 return -EINVAL;
324
325 if (copy_from_user(&info, u, sizeof(info)))
326 return -EFAULT;
327
328 if (check_version(&info))
329 return -EINVAL;
330
331 spin_lock(&ops_lock);
332 list_for_each_entry(op, &recv_list, list) {
333 if (op->info.fsid == info.fsid && op->info.number == info.number &&
334 op->info.owner == info.owner) {
335 list_del_init(&op->list);
336 found = 1;
337 op->done = 1;
338 memcpy(&op->info, &info, sizeof(info));
339 break;
340 }
341 }
342 spin_unlock(&ops_lock);
343
344 if (found) {
345 struct plock_xop *xop;
346 xop = (struct plock_xop *)op;
347 if (xop->callback)
348 count = gdlm_plock_callback(op);
349 else
350 wake_up(&recv_wq);
351 } else
352 printk(KERN_INFO "gdlm dev_write no op %x %llx\n", info.fsid,
353 (unsigned long long)info.number);
354 return count;
355}
356
357static unsigned int dev_poll(struct file *file, poll_table *wait)
358{
359 unsigned int mask = 0;
360
361 poll_wait(file, &send_wq, wait);
362
363 spin_lock(&ops_lock);
364 if (!list_empty(&send_list))
365 mask = POLLIN | POLLRDNORM;
366 spin_unlock(&ops_lock);
367
368 return mask;
369}
370
371static const struct file_operations dev_fops = {
372 .read = dev_read,
373 .write = dev_write,
374 .poll = dev_poll,
375 .owner = THIS_MODULE
376};
377
378static struct miscdevice plock_dev_misc = {
379 .minor = MISC_DYNAMIC_MINOR,
380 .name = GDLM_PLOCK_MISC_NAME,
381 .fops = &dev_fops
382};
383
384int gdlm_plock_init(void)
385{
386 int rv;
387
388 spin_lock_init(&ops_lock);
389 INIT_LIST_HEAD(&send_list);
390 INIT_LIST_HEAD(&recv_list);
391 init_waitqueue_head(&send_wq);
392 init_waitqueue_head(&recv_wq);
393
394 rv = misc_register(&plock_dev_misc);
395 if (rv)
396 printk(KERN_INFO "gdlm_plock_init: misc_register failed %d",
397 rv);
398 return rv;
399}
400
401void gdlm_plock_exit(void)
402{
403 if (misc_deregister(&plock_dev_misc) < 0)
404 printk(KERN_INFO "gdlm_plock_exit: misc_deregister failed");
405}
406
diff --git a/fs/gfs2/locking/dlm/sysfs.c b/fs/gfs2/locking/dlm/sysfs.c
index a87b09839761..8479da47049c 100644
--- a/fs/gfs2/locking/dlm/sysfs.c
+++ b/fs/gfs2/locking/dlm/sysfs.c
@@ -12,8 +12,6 @@
12 12
13#include "lock_dlm.h" 13#include "lock_dlm.h"
14 14
15extern struct lm_lockops gdlm_ops;
16
17static ssize_t proto_name_show(struct gdlm_ls *ls, char *buf) 15static ssize_t proto_name_show(struct gdlm_ls *ls, char *buf)
18{ 16{
19 return sprintf(buf, "%s\n", gdlm_ops.lm_proto_name); 17 return sprintf(buf, "%s\n", gdlm_ops.lm_proto_name);
diff --git a/fs/gfs2/locking/dlm/thread.c b/fs/gfs2/locking/dlm/thread.c
index 521694fc19d6..e53db6fd28ab 100644
--- a/fs/gfs2/locking/dlm/thread.c
+++ b/fs/gfs2/locking/dlm/thread.c
@@ -135,7 +135,15 @@ static void process_complete(struct gdlm_lock *lp)
135 lp->lksb.sb_status, lp->lockname.ln_type, 135 lp->lksb.sb_status, lp->lockname.ln_type,
136 (unsigned long long)lp->lockname.ln_number, 136 (unsigned long long)lp->lockname.ln_number,
137 lp->flags); 137 lp->flags);
138 return; 138 if (lp->lksb.sb_status == -EDEADLOCK &&
139 lp->ls->fsflags & LM_MFLAG_CONV_NODROP) {
140 lp->req = lp->cur;
141 acb.lc_ret |= LM_OUT_CONV_DEADLK;
142 if (lp->cur == DLM_LOCK_IV)
143 lp->lksb.sb_lkid = 0;
144 goto out;
145 } else
146 return;
139 } 147 }
140 148
141 /* 149 /*
diff --git a/fs/gfs2/locking/nolock/main.c b/fs/gfs2/locking/nolock/main.c
index d3b8ce6fbbe3..284a5ece8d94 100644
--- a/fs/gfs2/locking/nolock/main.c
+++ b/fs/gfs2/locking/nolock/main.c
@@ -140,7 +140,7 @@ static int nolock_hold_lvb(void *lock, char **lvbp)
140 struct nolock_lockspace *nl = lock; 140 struct nolock_lockspace *nl = lock;
141 int error = 0; 141 int error = 0;
142 142
143 *lvbp = kzalloc(nl->nl_lvb_size, GFP_KERNEL); 143 *lvbp = kzalloc(nl->nl_lvb_size, GFP_NOFS);
144 if (!*lvbp) 144 if (!*lvbp)
145 error = -ENOMEM; 145 error = -ENOMEM;
146 146
diff --git a/fs/gfs2/log.c b/fs/gfs2/log.c
index 161ab6f2058e..548264b1836d 100644
--- a/fs/gfs2/log.c
+++ b/fs/gfs2/log.c
@@ -769,8 +769,8 @@ static void log_refund(struct gfs2_sbd *sdp, struct gfs2_trans *tr)
769 sdp->sd_log_commited_revoke += tr->tr_num_revoke - tr->tr_num_revoke_rm; 769 sdp->sd_log_commited_revoke += tr->tr_num_revoke - tr->tr_num_revoke_rm;
770 gfs2_assert_withdraw(sdp, ((int)sdp->sd_log_commited_revoke) >= 0); 770 gfs2_assert_withdraw(sdp, ((int)sdp->sd_log_commited_revoke) >= 0);
771 reserved = calc_reserved(sdp); 771 reserved = calc_reserved(sdp);
772 gfs2_assert_withdraw(sdp, sdp->sd_log_blks_reserved + tr->tr_reserved >= reserved);
772 unused = sdp->sd_log_blks_reserved - reserved + tr->tr_reserved; 773 unused = sdp->sd_log_blks_reserved - reserved + tr->tr_reserved;
773 gfs2_assert_withdraw(sdp, unused >= 0);
774 atomic_add(unused, &sdp->sd_log_blks_free); 774 atomic_add(unused, &sdp->sd_log_blks_free);
775 gfs2_assert_withdraw(sdp, atomic_read(&sdp->sd_log_blks_free) <= 775 gfs2_assert_withdraw(sdp, atomic_read(&sdp->sd_log_blks_free) <=
776 sdp->sd_jdesc->jd_blocks); 776 sdp->sd_jdesc->jd_blocks);
@@ -779,6 +779,21 @@ static void log_refund(struct gfs2_sbd *sdp, struct gfs2_trans *tr)
779 gfs2_log_unlock(sdp); 779 gfs2_log_unlock(sdp);
780} 780}
781 781
782static void buf_lo_incore_commit(struct gfs2_sbd *sdp, struct gfs2_trans *tr)
783{
784 struct list_head *head = &tr->tr_list_buf;
785 struct gfs2_bufdata *bd;
786
787 gfs2_log_lock(sdp);
788 while (!list_empty(head)) {
789 bd = list_entry(head->next, struct gfs2_bufdata, bd_list_tr);
790 list_del_init(&bd->bd_list_tr);
791 tr->tr_num_buf--;
792 }
793 gfs2_log_unlock(sdp);
794 gfs2_assert_warn(sdp, !tr->tr_num_buf);
795}
796
782/** 797/**
783 * gfs2_log_commit - Commit a transaction to the log 798 * gfs2_log_commit - Commit a transaction to the log
784 * @sdp: the filesystem 799 * @sdp: the filesystem
@@ -790,7 +805,7 @@ static void log_refund(struct gfs2_sbd *sdp, struct gfs2_trans *tr)
790void gfs2_log_commit(struct gfs2_sbd *sdp, struct gfs2_trans *tr) 805void gfs2_log_commit(struct gfs2_sbd *sdp, struct gfs2_trans *tr)
791{ 806{
792 log_refund(sdp, tr); 807 log_refund(sdp, tr);
793 lops_incore_commit(sdp, tr); 808 buf_lo_incore_commit(sdp, tr);
794 809
795 sdp->sd_vfs->s_dirt = 1; 810 sdp->sd_vfs->s_dirt = 1;
796 up_read(&sdp->sd_log_flush_lock); 811 up_read(&sdp->sd_log_flush_lock);
diff --git a/fs/gfs2/lops.c b/fs/gfs2/lops.c
index fae59d69d01a..4390f6f4047d 100644
--- a/fs/gfs2/lops.c
+++ b/fs/gfs2/lops.c
@@ -152,21 +152,6 @@ out:
152 unlock_buffer(bd->bd_bh); 152 unlock_buffer(bd->bd_bh);
153} 153}
154 154
155static void buf_lo_incore_commit(struct gfs2_sbd *sdp, struct gfs2_trans *tr)
156{
157 struct list_head *head = &tr->tr_list_buf;
158 struct gfs2_bufdata *bd;
159
160 gfs2_log_lock(sdp);
161 while (!list_empty(head)) {
162 bd = list_entry(head->next, struct gfs2_bufdata, bd_list_tr);
163 list_del_init(&bd->bd_list_tr);
164 tr->tr_num_buf--;
165 }
166 gfs2_log_unlock(sdp);
167 gfs2_assert_warn(sdp, !tr->tr_num_buf);
168}
169
170static void buf_lo_before_commit(struct gfs2_sbd *sdp) 155static void buf_lo_before_commit(struct gfs2_sbd *sdp)
171{ 156{
172 struct buffer_head *bh; 157 struct buffer_head *bh;
@@ -419,8 +404,10 @@ static int revoke_lo_scan_elements(struct gfs2_jdesc *jd, unsigned int start,
419 blkno = be64_to_cpu(*(__be64 *)(bh->b_data + offset)); 404 blkno = be64_to_cpu(*(__be64 *)(bh->b_data + offset));
420 405
421 error = gfs2_revoke_add(sdp, blkno, start); 406 error = gfs2_revoke_add(sdp, blkno, start);
422 if (error < 0) 407 if (error < 0) {
408 brelse(bh);
423 return error; 409 return error;
410 }
424 else if (error) 411 else if (error)
425 sdp->sd_found_revokes++; 412 sdp->sd_found_revokes++;
426 413
@@ -737,7 +724,6 @@ static void databuf_lo_after_commit(struct gfs2_sbd *sdp, struct gfs2_ail *ai)
737 724
738const struct gfs2_log_operations gfs2_buf_lops = { 725const struct gfs2_log_operations gfs2_buf_lops = {
739 .lo_add = buf_lo_add, 726 .lo_add = buf_lo_add,
740 .lo_incore_commit = buf_lo_incore_commit,
741 .lo_before_commit = buf_lo_before_commit, 727 .lo_before_commit = buf_lo_before_commit,
742 .lo_after_commit = buf_lo_after_commit, 728 .lo_after_commit = buf_lo_after_commit,
743 .lo_before_scan = buf_lo_before_scan, 729 .lo_before_scan = buf_lo_before_scan,
@@ -763,7 +749,6 @@ const struct gfs2_log_operations gfs2_rg_lops = {
763 749
764const struct gfs2_log_operations gfs2_databuf_lops = { 750const struct gfs2_log_operations gfs2_databuf_lops = {
765 .lo_add = databuf_lo_add, 751 .lo_add = databuf_lo_add,
766 .lo_incore_commit = buf_lo_incore_commit,
767 .lo_before_commit = databuf_lo_before_commit, 752 .lo_before_commit = databuf_lo_before_commit,
768 .lo_after_commit = databuf_lo_after_commit, 753 .lo_after_commit = databuf_lo_after_commit,
769 .lo_scan_elements = databuf_lo_scan_elements, 754 .lo_scan_elements = databuf_lo_scan_elements,
diff --git a/fs/gfs2/lops.h b/fs/gfs2/lops.h
index 41a00df75587..3c0b2737658a 100644
--- a/fs/gfs2/lops.h
+++ b/fs/gfs2/lops.h
@@ -1,6 +1,6 @@
1/* 1/*
2 * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved. 2 * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
3 * Copyright (C) 2004-2006 Red Hat, Inc. All rights reserved. 3 * Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved.
4 * 4 *
5 * This copyrighted material is made available to anyone wishing to use, 5 * This copyrighted material is made available to anyone wishing to use,
6 * modify, copy, or redistribute it subject to the terms and conditions 6 * modify, copy, or redistribute it subject to the terms and conditions
@@ -57,15 +57,6 @@ static inline void lops_add(struct gfs2_sbd *sdp, struct gfs2_log_element *le)
57 le->le_ops->lo_add(sdp, le); 57 le->le_ops->lo_add(sdp, le);
58} 58}
59 59
60static inline void lops_incore_commit(struct gfs2_sbd *sdp,
61 struct gfs2_trans *tr)
62{
63 int x;
64 for (x = 0; gfs2_log_ops[x]; x++)
65 if (gfs2_log_ops[x]->lo_incore_commit)
66 gfs2_log_ops[x]->lo_incore_commit(sdp, tr);
67}
68
69static inline void lops_before_commit(struct gfs2_sbd *sdp) 60static inline void lops_before_commit(struct gfs2_sbd *sdp)
70{ 61{
71 int x; 62 int x;
diff --git a/fs/gfs2/main.c b/fs/gfs2/main.c
index 9c7765c12d62..053e2ebbbd50 100644
--- a/fs/gfs2/main.c
+++ b/fs/gfs2/main.c
@@ -89,6 +89,12 @@ static int __init init_gfs2_fs(void)
89 if (!gfs2_bufdata_cachep) 89 if (!gfs2_bufdata_cachep)
90 goto fail; 90 goto fail;
91 91
92 gfs2_rgrpd_cachep = kmem_cache_create("gfs2_rgrpd",
93 sizeof(struct gfs2_rgrpd),
94 0, 0, NULL);
95 if (!gfs2_rgrpd_cachep)
96 goto fail;
97
92 error = register_filesystem(&gfs2_fs_type); 98 error = register_filesystem(&gfs2_fs_type);
93 if (error) 99 if (error)
94 goto fail; 100 goto fail;
@@ -108,6 +114,9 @@ fail_unregister:
108fail: 114fail:
109 gfs2_glock_exit(); 115 gfs2_glock_exit();
110 116
117 if (gfs2_rgrpd_cachep)
118 kmem_cache_destroy(gfs2_rgrpd_cachep);
119
111 if (gfs2_bufdata_cachep) 120 if (gfs2_bufdata_cachep)
112 kmem_cache_destroy(gfs2_bufdata_cachep); 121 kmem_cache_destroy(gfs2_bufdata_cachep);
113 122
@@ -133,6 +142,7 @@ static void __exit exit_gfs2_fs(void)
133 unregister_filesystem(&gfs2_fs_type); 142 unregister_filesystem(&gfs2_fs_type);
134 unregister_filesystem(&gfs2meta_fs_type); 143 unregister_filesystem(&gfs2meta_fs_type);
135 144
145 kmem_cache_destroy(gfs2_rgrpd_cachep);
136 kmem_cache_destroy(gfs2_bufdata_cachep); 146 kmem_cache_destroy(gfs2_bufdata_cachep);
137 kmem_cache_destroy(gfs2_inode_cachep); 147 kmem_cache_destroy(gfs2_inode_cachep);
138 kmem_cache_destroy(gfs2_glock_cachep); 148 kmem_cache_destroy(gfs2_glock_cachep);
diff --git a/fs/gfs2/ops_address.c b/fs/gfs2/ops_address.c
index ac772b6d9dbb..90a04a6e3789 100644
--- a/fs/gfs2/ops_address.c
+++ b/fs/gfs2/ops_address.c
@@ -1,6 +1,6 @@
1/* 1/*
2 * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved. 2 * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
3 * Copyright (C) 2004-2007 Red Hat, Inc. All rights reserved. 3 * Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved.
4 * 4 *
5 * This copyrighted material is made available to anyone wishing to use, 5 * This copyrighted material is made available to anyone wishing to use,
6 * modify, copy, or redistribute it subject to the terms and conditions 6 * modify, copy, or redistribute it subject to the terms and conditions
@@ -21,7 +21,6 @@
21#include <linux/gfs2_ondisk.h> 21#include <linux/gfs2_ondisk.h>
22#include <linux/lm_interface.h> 22#include <linux/lm_interface.h>
23#include <linux/backing-dev.h> 23#include <linux/backing-dev.h>
24#include <linux/pagevec.h>
25 24
26#include "gfs2.h" 25#include "gfs2.h"
27#include "incore.h" 26#include "incore.h"
@@ -104,11 +103,9 @@ static int gfs2_writepage_common(struct page *page,
104 loff_t i_size = i_size_read(inode); 103 loff_t i_size = i_size_read(inode);
105 pgoff_t end_index = i_size >> PAGE_CACHE_SHIFT; 104 pgoff_t end_index = i_size >> PAGE_CACHE_SHIFT;
106 unsigned offset; 105 unsigned offset;
107 int ret = -EIO;
108 106
109 if (gfs2_assert_withdraw(sdp, gfs2_glock_is_held_excl(ip->i_gl))) 107 if (gfs2_assert_withdraw(sdp, gfs2_glock_is_held_excl(ip->i_gl)))
110 goto out; 108 goto out;
111 ret = 0;
112 if (current->journal_info) 109 if (current->journal_info)
113 goto redirty; 110 goto redirty;
114 /* Is the page fully outside i_size? (truncate in progress) */ 111 /* Is the page fully outside i_size? (truncate in progress) */
@@ -280,7 +277,7 @@ static int gfs2_write_jdata_pagevec(struct address_space *mapping,
280 int i; 277 int i;
281 int ret; 278 int ret;
282 279
283 ret = gfs2_trans_begin(sdp, nrblocks, 0); 280 ret = gfs2_trans_begin(sdp, nrblocks, nrblocks);
284 if (ret < 0) 281 if (ret < 0)
285 return ret; 282 return ret;
286 283
@@ -510,23 +507,26 @@ static int __gfs2_readpage(void *file, struct page *page)
510static int gfs2_readpage(struct file *file, struct page *page) 507static int gfs2_readpage(struct file *file, struct page *page)
511{ 508{
512 struct gfs2_inode *ip = GFS2_I(page->mapping->host); 509 struct gfs2_inode *ip = GFS2_I(page->mapping->host);
513 struct gfs2_holder gh; 510 struct gfs2_holder *gh;
514 int error; 511 int error;
515 512
516 gfs2_holder_init(ip->i_gl, LM_ST_SHARED, GL_ATIME|LM_FLAG_TRY_1CB, &gh); 513 gh = gfs2_glock_is_locked_by_me(ip->i_gl);
517 error = gfs2_glock_nq_atime(&gh); 514 if (!gh) {
518 if (unlikely(error)) { 515 gh = kmalloc(sizeof(struct gfs2_holder), GFP_NOFS);
516 if (!gh)
517 return -ENOBUFS;
518 gfs2_holder_init(ip->i_gl, LM_ST_SHARED, GL_ATIME, gh);
519 unlock_page(page); 519 unlock_page(page);
520 goto out; 520 error = gfs2_glock_nq_atime(gh);
521 if (likely(error != 0))
522 goto out;
523 return AOP_TRUNCATED_PAGE;
521 } 524 }
522 error = __gfs2_readpage(file, page); 525 error = __gfs2_readpage(file, page);
523 gfs2_glock_dq(&gh); 526 gfs2_glock_dq(gh);
524out: 527out:
525 gfs2_holder_uninit(&gh); 528 gfs2_holder_uninit(gh);
526 if (error == GLR_TRYFAILED) { 529 kfree(gh);
527 yield();
528 return AOP_TRUNCATED_PAGE;
529 }
530 return error; 530 return error;
531} 531}
532 532
@@ -648,15 +648,15 @@ static int gfs2_write_begin(struct file *file, struct address_space *mapping,
648 648
649 if (alloc_required) { 649 if (alloc_required) {
650 al = gfs2_alloc_get(ip); 650 al = gfs2_alloc_get(ip);
651 if (!al) {
652 error = -ENOMEM;
653 goto out_unlock;
654 }
651 655
652 error = gfs2_quota_lock(ip, NO_QUOTA_CHANGE, NO_QUOTA_CHANGE); 656 error = gfs2_quota_lock_check(ip);
653 if (error) 657 if (error)
654 goto out_alloc_put; 658 goto out_alloc_put;
655 659
656 error = gfs2_quota_check(ip, ip->i_inode.i_uid, ip->i_inode.i_gid);
657 if (error)
658 goto out_qunlock;
659
660 al->al_requested = data_blocks + ind_blocks; 660 al->al_requested = data_blocks + ind_blocks;
661 error = gfs2_inplace_reserve(ip); 661 error = gfs2_inplace_reserve(ip);
662 if (error) 662 if (error)
@@ -828,7 +828,7 @@ static int gfs2_write_end(struct file *file, struct address_space *mapping,
828 unsigned int to = from + len; 828 unsigned int to = from + len;
829 int ret; 829 int ret;
830 830
831 BUG_ON(gfs2_glock_is_locked_by_me(ip->i_gl) == 0); 831 BUG_ON(gfs2_glock_is_locked_by_me(ip->i_gl) == NULL);
832 832
833 ret = gfs2_meta_inode_buffer(ip, &dibh); 833 ret = gfs2_meta_inode_buffer(ip, &dibh);
834 if (unlikely(ret)) { 834 if (unlikely(ret)) {
diff --git a/fs/gfs2/ops_dentry.c b/fs/gfs2/ops_dentry.c
index 793e334d098e..4a5e676b4420 100644
--- a/fs/gfs2/ops_dentry.c
+++ b/fs/gfs2/ops_dentry.c
@@ -43,7 +43,7 @@ static int gfs2_drevalidate(struct dentry *dentry, struct nameidata *nd)
43 struct gfs2_holder d_gh; 43 struct gfs2_holder d_gh;
44 struct gfs2_inode *ip = NULL; 44 struct gfs2_inode *ip = NULL;
45 int error; 45 int error;
46 int had_lock=0; 46 int had_lock = 0;
47 47
48 if (inode) { 48 if (inode) {
49 if (is_bad_inode(inode)) 49 if (is_bad_inode(inode))
@@ -54,7 +54,7 @@ static int gfs2_drevalidate(struct dentry *dentry, struct nameidata *nd)
54 if (sdp->sd_args.ar_localcaching) 54 if (sdp->sd_args.ar_localcaching)
55 goto valid; 55 goto valid;
56 56
57 had_lock = gfs2_glock_is_locked_by_me(dip->i_gl); 57 had_lock = (gfs2_glock_is_locked_by_me(dip->i_gl) != NULL);
58 if (!had_lock) { 58 if (!had_lock) {
59 error = gfs2_glock_nq_init(dip->i_gl, LM_ST_SHARED, 0, &d_gh); 59 error = gfs2_glock_nq_init(dip->i_gl, LM_ST_SHARED, 0, &d_gh);
60 if (error) 60 if (error)
diff --git a/fs/gfs2/ops_export.c b/fs/gfs2/ops_export.c
index 334c7f85351b..990d9f4bc463 100644
--- a/fs/gfs2/ops_export.c
+++ b/fs/gfs2/ops_export.c
@@ -204,8 +204,6 @@ static struct dentry *gfs2_get_dentry(struct super_block *sb,
204 inode = gfs2_inode_lookup(sb, DT_UNKNOWN, 204 inode = gfs2_inode_lookup(sb, DT_UNKNOWN,
205 inum->no_addr, 205 inum->no_addr,
206 0, 0); 206 0, 0);
207 if (!inode)
208 goto fail;
209 if (IS_ERR(inode)) { 207 if (IS_ERR(inode)) {
210 error = PTR_ERR(inode); 208 error = PTR_ERR(inode);
211 goto fail; 209 goto fail;
diff --git a/fs/gfs2/ops_file.c b/fs/gfs2/ops_file.c
index f4842f2548cd..e1b7d525a066 100644
--- a/fs/gfs2/ops_file.c
+++ b/fs/gfs2/ops_file.c
@@ -30,7 +30,6 @@
30#include "glock.h" 30#include "glock.h"
31#include "glops.h" 31#include "glops.h"
32#include "inode.h" 32#include "inode.h"
33#include "lm.h"
34#include "log.h" 33#include "log.h"
35#include "meta_io.h" 34#include "meta_io.h"
36#include "quota.h" 35#include "quota.h"
@@ -39,6 +38,7 @@
39#include "util.h" 38#include "util.h"
40#include "eaops.h" 39#include "eaops.h"
41#include "ops_address.h" 40#include "ops_address.h"
41#include "ops_inode.h"
42 42
43/** 43/**
44 * gfs2_llseek - seek to a location in a file 44 * gfs2_llseek - seek to a location in a file
@@ -369,12 +369,9 @@ static int gfs2_page_mkwrite(struct vm_area_struct *vma, struct page *page)
369 if (al == NULL) 369 if (al == NULL)
370 goto out_unlock; 370 goto out_unlock;
371 371
372 ret = gfs2_quota_lock(ip, NO_QUOTA_CHANGE, NO_QUOTA_CHANGE); 372 ret = gfs2_quota_lock_check(ip);
373 if (ret) 373 if (ret)
374 goto out_alloc_put; 374 goto out_alloc_put;
375 ret = gfs2_quota_check(ip, ip->i_inode.i_uid, ip->i_inode.i_gid);
376 if (ret)
377 goto out_quota_unlock;
378 al->al_requested = data_blocks + ind_blocks; 375 al->al_requested = data_blocks + ind_blocks;
379 ret = gfs2_inplace_reserve(ip); 376 ret = gfs2_inplace_reserve(ip);
380 if (ret) 377 if (ret)
@@ -596,6 +593,36 @@ static int gfs2_setlease(struct file *file, long arg, struct file_lock **fl)
596 return generic_setlease(file, arg, fl); 593 return generic_setlease(file, arg, fl);
597} 594}
598 595
596static int gfs2_lm_plock_get(struct gfs2_sbd *sdp, struct lm_lockname *name,
597 struct file *file, struct file_lock *fl)
598{
599 int error = -EIO;
600 if (likely(!test_bit(SDF_SHUTDOWN, &sdp->sd_flags)))
601 error = sdp->sd_lockstruct.ls_ops->lm_plock_get(
602 sdp->sd_lockstruct.ls_lockspace, name, file, fl);
603 return error;
604}
605
606static int gfs2_lm_plock(struct gfs2_sbd *sdp, struct lm_lockname *name,
607 struct file *file, int cmd, struct file_lock *fl)
608{
609 int error = -EIO;
610 if (likely(!test_bit(SDF_SHUTDOWN, &sdp->sd_flags)))
611 error = sdp->sd_lockstruct.ls_ops->lm_plock(
612 sdp->sd_lockstruct.ls_lockspace, name, file, cmd, fl);
613 return error;
614}
615
616static int gfs2_lm_punlock(struct gfs2_sbd *sdp, struct lm_lockname *name,
617 struct file *file, struct file_lock *fl)
618{
619 int error = -EIO;
620 if (likely(!test_bit(SDF_SHUTDOWN, &sdp->sd_flags)))
621 error = sdp->sd_lockstruct.ls_ops->lm_punlock(
622 sdp->sd_lockstruct.ls_lockspace, name, file, fl);
623 return error;
624}
625
599/** 626/**
600 * gfs2_lock - acquire/release a posix lock on a file 627 * gfs2_lock - acquire/release a posix lock on a file
601 * @file: the file pointer 628 * @file: the file pointer
diff --git a/fs/gfs2/ops_fstype.c b/fs/gfs2/ops_fstype.c
index 4bee6aa845e4..ef9c6c4f80f6 100644
--- a/fs/gfs2/ops_fstype.c
+++ b/fs/gfs2/ops_fstype.c
@@ -1,6 +1,6 @@
1/* 1/*
2 * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved. 2 * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
3 * Copyright (C) 2004-2007 Red Hat, Inc. All rights reserved. 3 * Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved.
4 * 4 *
5 * This copyrighted material is made available to anyone wishing to use, 5 * This copyrighted material is made available to anyone wishing to use,
6 * modify, copy, or redistribute it subject to the terms and conditions 6 * modify, copy, or redistribute it subject to the terms and conditions
@@ -26,7 +26,6 @@
26#include "glock.h" 26#include "glock.h"
27#include "glops.h" 27#include "glops.h"
28#include "inode.h" 28#include "inode.h"
29#include "lm.h"
30#include "mount.h" 29#include "mount.h"
31#include "ops_fstype.h" 30#include "ops_fstype.h"
32#include "ops_dentry.h" 31#include "ops_dentry.h"
@@ -363,6 +362,13 @@ static int map_journal_extents(struct gfs2_sbd *sdp)
363 return rc; 362 return rc;
364} 363}
365 364
365static void gfs2_lm_others_may_mount(struct gfs2_sbd *sdp)
366{
367 if (likely(!test_bit(SDF_SHUTDOWN, &sdp->sd_flags)))
368 sdp->sd_lockstruct.ls_ops->lm_others_may_mount(
369 sdp->sd_lockstruct.ls_lockspace);
370}
371
366static int init_journal(struct gfs2_sbd *sdp, int undo) 372static int init_journal(struct gfs2_sbd *sdp, int undo)
367{ 373{
368 struct gfs2_holder ji_gh; 374 struct gfs2_holder ji_gh;
@@ -542,7 +548,7 @@ static int init_inodes(struct gfs2_sbd *sdp, int undo)
542 } 548 }
543 ip = GFS2_I(sdp->sd_rindex); 549 ip = GFS2_I(sdp->sd_rindex);
544 set_bit(GLF_STICKY, &ip->i_gl->gl_flags); 550 set_bit(GLF_STICKY, &ip->i_gl->gl_flags);
545 sdp->sd_rindex_vn = ip->i_gl->gl_vn - 1; 551 sdp->sd_rindex_uptodate = 0;
546 552
547 /* Read in the quota inode */ 553 /* Read in the quota inode */
548 sdp->sd_quota_inode = gfs2_lookup_simple(sdp->sd_master_dir, "quota"); 554 sdp->sd_quota_inode = gfs2_lookup_simple(sdp->sd_master_dir, "quota");
@@ -705,6 +711,69 @@ fail:
705} 711}
706 712
707/** 713/**
714 * gfs2_lm_mount - mount a locking protocol
715 * @sdp: the filesystem
716 * @args: mount arguements
717 * @silent: if 1, don't complain if the FS isn't a GFS2 fs
718 *
719 * Returns: errno
720 */
721
722static int gfs2_lm_mount(struct gfs2_sbd *sdp, int silent)
723{
724 char *proto = sdp->sd_proto_name;
725 char *table = sdp->sd_table_name;
726 int flags = LM_MFLAG_CONV_NODROP;
727 int error;
728
729 if (sdp->sd_args.ar_spectator)
730 flags |= LM_MFLAG_SPECTATOR;
731
732 fs_info(sdp, "Trying to join cluster \"%s\", \"%s\"\n", proto, table);
733
734 error = gfs2_mount_lockproto(proto, table, sdp->sd_args.ar_hostdata,
735 gfs2_glock_cb, sdp,
736 GFS2_MIN_LVB_SIZE, flags,
737 &sdp->sd_lockstruct, &sdp->sd_kobj);
738 if (error) {
739 fs_info(sdp, "can't mount proto=%s, table=%s, hostdata=%s\n",
740 proto, table, sdp->sd_args.ar_hostdata);
741 goto out;
742 }
743
744 if (gfs2_assert_warn(sdp, sdp->sd_lockstruct.ls_lockspace) ||
745 gfs2_assert_warn(sdp, sdp->sd_lockstruct.ls_ops) ||
746 gfs2_assert_warn(sdp, sdp->sd_lockstruct.ls_lvb_size >=
747 GFS2_MIN_LVB_SIZE)) {
748 gfs2_unmount_lockproto(&sdp->sd_lockstruct);
749 goto out;
750 }
751
752 if (sdp->sd_args.ar_spectator)
753 snprintf(sdp->sd_fsname, GFS2_FSNAME_LEN, "%s.s", table);
754 else
755 snprintf(sdp->sd_fsname, GFS2_FSNAME_LEN, "%s.%u", table,
756 sdp->sd_lockstruct.ls_jid);
757
758 fs_info(sdp, "Joined cluster. Now mounting FS...\n");
759
760 if ((sdp->sd_lockstruct.ls_flags & LM_LSFLAG_LOCAL) &&
761 !sdp->sd_args.ar_ignore_local_fs) {
762 sdp->sd_args.ar_localflocks = 1;
763 sdp->sd_args.ar_localcaching = 1;
764 }
765
766out:
767 return error;
768}
769
770void gfs2_lm_unmount(struct gfs2_sbd *sdp)
771{
772 if (likely(!test_bit(SDF_SHUTDOWN, &sdp->sd_flags)))
773 gfs2_unmount_lockproto(&sdp->sd_lockstruct);
774}
775
776/**
708 * fill_super - Read in superblock 777 * fill_super - Read in superblock
709 * @sb: The VFS superblock 778 * @sb: The VFS superblock
710 * @data: Mount options 779 * @data: Mount options
@@ -874,7 +943,6 @@ static struct super_block* get_gfs2_sb(const char *dev_name)
874{ 943{
875 struct kstat stat; 944 struct kstat stat;
876 struct nameidata nd; 945 struct nameidata nd;
877 struct file_system_type *fstype;
878 struct super_block *sb = NULL, *s; 946 struct super_block *sb = NULL, *s;
879 int error; 947 int error;
880 948
@@ -886,8 +954,7 @@ static struct super_block* get_gfs2_sb(const char *dev_name)
886 } 954 }
887 error = vfs_getattr(nd.path.mnt, nd.path.dentry, &stat); 955 error = vfs_getattr(nd.path.mnt, nd.path.dentry, &stat);
888 956
889 fstype = get_fs_type("gfs2"); 957 list_for_each_entry(s, &gfs2_fs_type.fs_supers, s_instances) {
890 list_for_each_entry(s, &fstype->fs_supers, s_instances) {
891 if ((S_ISBLK(stat.mode) && s->s_dev == stat.rdev) || 958 if ((S_ISBLK(stat.mode) && s->s_dev == stat.rdev) ||
892 (S_ISDIR(stat.mode) && 959 (S_ISDIR(stat.mode) &&
893 s == nd.path.dentry->d_inode->i_sb)) { 960 s == nd.path.dentry->d_inode->i_sb)) {
@@ -931,7 +998,6 @@ static int gfs2_get_sb_meta(struct file_system_type *fs_type, int flags,
931 error = PTR_ERR(new); 998 error = PTR_ERR(new);
932 goto error; 999 goto error;
933 } 1000 }
934 module_put(fs_type->owner);
935 new->s_flags = flags; 1001 new->s_flags = flags;
936 strlcpy(new->s_id, sb->s_id, sizeof(new->s_id)); 1002 strlcpy(new->s_id, sb->s_id, sizeof(new->s_id));
937 sb_set_blocksize(new, sb->s_blocksize); 1003 sb_set_blocksize(new, sb->s_blocksize);
diff --git a/fs/gfs2/ops_inode.c b/fs/gfs2/ops_inode.c
index e87412902bed..2686ad4c0029 100644
--- a/fs/gfs2/ops_inode.c
+++ b/fs/gfs2/ops_inode.c
@@ -200,15 +200,15 @@ static int gfs2_link(struct dentry *old_dentry, struct inode *dir,
200 200
201 if (alloc_required) { 201 if (alloc_required) {
202 struct gfs2_alloc *al = gfs2_alloc_get(dip); 202 struct gfs2_alloc *al = gfs2_alloc_get(dip);
203 if (!al) {
204 error = -ENOMEM;
205 goto out_gunlock;
206 }
203 207
204 error = gfs2_quota_lock(dip, NO_QUOTA_CHANGE, NO_QUOTA_CHANGE); 208 error = gfs2_quota_lock_check(dip);
205 if (error) 209 if (error)
206 goto out_alloc; 210 goto out_alloc;
207 211
208 error = gfs2_quota_check(dip, dip->i_inode.i_uid, dip->i_inode.i_gid);
209 if (error)
210 goto out_gunlock_q;
211
212 al->al_requested = sdp->sd_max_dirres; 212 al->al_requested = sdp->sd_max_dirres;
213 213
214 error = gfs2_inplace_reserve(dip); 214 error = gfs2_inplace_reserve(dip);
@@ -716,15 +716,15 @@ static int gfs2_rename(struct inode *odir, struct dentry *odentry,
716 716
717 if (alloc_required) { 717 if (alloc_required) {
718 struct gfs2_alloc *al = gfs2_alloc_get(ndip); 718 struct gfs2_alloc *al = gfs2_alloc_get(ndip);
719 if (!al) {
720 error = -ENOMEM;
721 goto out_gunlock;
722 }
719 723
720 error = gfs2_quota_lock(ndip, NO_QUOTA_CHANGE, NO_QUOTA_CHANGE); 724 error = gfs2_quota_lock_check(ndip);
721 if (error) 725 if (error)
722 goto out_alloc; 726 goto out_alloc;
723 727
724 error = gfs2_quota_check(ndip, ndip->i_inode.i_uid, ndip->i_inode.i_gid);
725 if (error)
726 goto out_gunlock_q;
727
728 al->al_requested = sdp->sd_max_dirres; 728 al->al_requested = sdp->sd_max_dirres;
729 729
730 error = gfs2_inplace_reserve(ndip); 730 error = gfs2_inplace_reserve(ndip);
@@ -898,7 +898,7 @@ static int gfs2_permission(struct inode *inode, int mask, struct nameidata *nd)
898 int error; 898 int error;
899 int unlock = 0; 899 int unlock = 0;
900 900
901 if (gfs2_glock_is_locked_by_me(ip->i_gl) == 0) { 901 if (gfs2_glock_is_locked_by_me(ip->i_gl) == NULL) {
902 error = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED, LM_FLAG_ANY, &i_gh); 902 error = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED, LM_FLAG_ANY, &i_gh);
903 if (error) 903 if (error)
904 return error; 904 return error;
@@ -953,7 +953,8 @@ static int setattr_chown(struct inode *inode, struct iattr *attr)
953 if (!(attr->ia_valid & ATTR_GID) || ogid == ngid) 953 if (!(attr->ia_valid & ATTR_GID) || ogid == ngid)
954 ogid = ngid = NO_QUOTA_CHANGE; 954 ogid = ngid = NO_QUOTA_CHANGE;
955 955
956 gfs2_alloc_get(ip); 956 if (!gfs2_alloc_get(ip))
957 return -ENOMEM;
957 958
958 error = gfs2_quota_lock(ip, nuid, ngid); 959 error = gfs2_quota_lock(ip, nuid, ngid);
959 if (error) 960 if (error)
@@ -981,8 +982,9 @@ static int setattr_chown(struct inode *inode, struct iattr *attr)
981 brelse(dibh); 982 brelse(dibh);
982 983
983 if (ouid != NO_QUOTA_CHANGE || ogid != NO_QUOTA_CHANGE) { 984 if (ouid != NO_QUOTA_CHANGE || ogid != NO_QUOTA_CHANGE) {
984 gfs2_quota_change(ip, -ip->i_di.di_blocks, ouid, ogid); 985 u64 blocks = gfs2_get_inode_blocks(&ip->i_inode);
985 gfs2_quota_change(ip, ip->i_di.di_blocks, nuid, ngid); 986 gfs2_quota_change(ip, -blocks, ouid, ogid);
987 gfs2_quota_change(ip, blocks, nuid, ngid);
986 } 988 }
987 989
988out_end_trans: 990out_end_trans:
@@ -1064,7 +1066,7 @@ static int gfs2_getattr(struct vfsmount *mnt, struct dentry *dentry,
1064 int error; 1066 int error;
1065 int unlock = 0; 1067 int unlock = 0;
1066 1068
1067 if (gfs2_glock_is_locked_by_me(ip->i_gl) == 0) { 1069 if (gfs2_glock_is_locked_by_me(ip->i_gl) == NULL) {
1068 error = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED, LM_FLAG_ANY, &gh); 1070 error = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED, LM_FLAG_ANY, &gh);
1069 if (error) 1071 if (error)
1070 return error; 1072 return error;
@@ -1148,16 +1150,6 @@ const struct inode_operations gfs2_file_iops = {
1148 .removexattr = gfs2_removexattr, 1150 .removexattr = gfs2_removexattr,
1149}; 1151};
1150 1152
1151const struct inode_operations gfs2_dev_iops = {
1152 .permission = gfs2_permission,
1153 .setattr = gfs2_setattr,
1154 .getattr = gfs2_getattr,
1155 .setxattr = gfs2_setxattr,
1156 .getxattr = gfs2_getxattr,
1157 .listxattr = gfs2_listxattr,
1158 .removexattr = gfs2_removexattr,
1159};
1160
1161const struct inode_operations gfs2_dir_iops = { 1153const struct inode_operations gfs2_dir_iops = {
1162 .create = gfs2_create, 1154 .create = gfs2_create,
1163 .lookup = gfs2_lookup, 1155 .lookup = gfs2_lookup,
diff --git a/fs/gfs2/ops_inode.h b/fs/gfs2/ops_inode.h
index fd8cee231e1d..14b4b797622a 100644
--- a/fs/gfs2/ops_inode.h
+++ b/fs/gfs2/ops_inode.h
@@ -15,7 +15,6 @@
15extern const struct inode_operations gfs2_file_iops; 15extern const struct inode_operations gfs2_file_iops;
16extern const struct inode_operations gfs2_dir_iops; 16extern const struct inode_operations gfs2_dir_iops;
17extern const struct inode_operations gfs2_symlink_iops; 17extern const struct inode_operations gfs2_symlink_iops;
18extern const struct inode_operations gfs2_dev_iops;
19extern const struct file_operations gfs2_file_fops; 18extern const struct file_operations gfs2_file_fops;
20extern const struct file_operations gfs2_dir_fops; 19extern const struct file_operations gfs2_dir_fops;
21extern const struct file_operations gfs2_file_fops_nolock; 20extern const struct file_operations gfs2_file_fops_nolock;
diff --git a/fs/gfs2/ops_super.c b/fs/gfs2/ops_super.c
index 5e524217944a..2278c68b7e35 100644
--- a/fs/gfs2/ops_super.c
+++ b/fs/gfs2/ops_super.c
@@ -25,7 +25,6 @@
25#include "incore.h" 25#include "incore.h"
26#include "glock.h" 26#include "glock.h"
27#include "inode.h" 27#include "inode.h"
28#include "lm.h"
29#include "log.h" 28#include "log.h"
30#include "mount.h" 29#include "mount.h"
31#include "ops_super.h" 30#include "ops_super.h"
diff --git a/fs/gfs2/quota.c b/fs/gfs2/quota.c
index a08dabd6ce90..56aaf915c59a 100644
--- a/fs/gfs2/quota.c
+++ b/fs/gfs2/quota.c
@@ -94,7 +94,7 @@ static int qd_alloc(struct gfs2_sbd *sdp, int user, u32 id,
94 struct gfs2_quota_data *qd; 94 struct gfs2_quota_data *qd;
95 int error; 95 int error;
96 96
97 qd = kzalloc(sizeof(struct gfs2_quota_data), GFP_KERNEL); 97 qd = kzalloc(sizeof(struct gfs2_quota_data), GFP_NOFS);
98 if (!qd) 98 if (!qd)
99 return -ENOMEM; 99 return -ENOMEM;
100 100
@@ -616,16 +616,9 @@ static int gfs2_adjust_quota(struct gfs2_inode *ip, loff_t loc,
616 s64 value; 616 s64 value;
617 int err = -EIO; 617 int err = -EIO;
618 618
619 if (gfs2_is_stuffed(ip)) { 619 if (gfs2_is_stuffed(ip))
620 struct gfs2_alloc *al = NULL;
621 al = gfs2_alloc_get(ip);
622 /* just request 1 blk */
623 al->al_requested = 1;
624 gfs2_inplace_reserve(ip);
625 gfs2_unstuff_dinode(ip, NULL); 620 gfs2_unstuff_dinode(ip, NULL);
626 gfs2_inplace_release(ip); 621
627 gfs2_alloc_put(ip);
628 }
629 page = grab_cache_page(mapping, index); 622 page = grab_cache_page(mapping, index);
630 if (!page) 623 if (!page)
631 return -ENOMEM; 624 return -ENOMEM;
@@ -690,14 +683,14 @@ static int do_sync(unsigned int num_qd, struct gfs2_quota_data **qda)
690 unsigned int qx, x; 683 unsigned int qx, x;
691 struct gfs2_quota_data *qd; 684 struct gfs2_quota_data *qd;
692 loff_t offset; 685 loff_t offset;
693 unsigned int nalloc = 0; 686 unsigned int nalloc = 0, blocks;
694 struct gfs2_alloc *al = NULL; 687 struct gfs2_alloc *al = NULL;
695 int error; 688 int error;
696 689
697 gfs2_write_calc_reserv(ip, sizeof(struct gfs2_quota), 690 gfs2_write_calc_reserv(ip, sizeof(struct gfs2_quota),
698 &data_blocks, &ind_blocks); 691 &data_blocks, &ind_blocks);
699 692
700 ghs = kcalloc(num_qd, sizeof(struct gfs2_holder), GFP_KERNEL); 693 ghs = kcalloc(num_qd, sizeof(struct gfs2_holder), GFP_NOFS);
701 if (!ghs) 694 if (!ghs)
702 return -ENOMEM; 695 return -ENOMEM;
703 696
@@ -727,30 +720,33 @@ static int do_sync(unsigned int num_qd, struct gfs2_quota_data **qda)
727 nalloc++; 720 nalloc++;
728 } 721 }
729 722
730 if (nalloc) { 723 al = gfs2_alloc_get(ip);
731 al = gfs2_alloc_get(ip); 724 if (!al) {
725 error = -ENOMEM;
726 goto out_gunlock;
727 }
728 /*
729 * 1 blk for unstuffing inode if stuffed. We add this extra
730 * block to the reservation unconditionally. If the inode
731 * doesn't need unstuffing, the block will be released to the
732 * rgrp since it won't be allocated during the transaction
733 */
734 al->al_requested = 1;
735 /* +1 in the end for block requested above for unstuffing */
736 blocks = num_qd * data_blocks + RES_DINODE + num_qd + 1;
732 737
733 al->al_requested = nalloc * (data_blocks + ind_blocks); 738 if (nalloc)
739 al->al_requested += nalloc * (data_blocks + ind_blocks);
740 error = gfs2_inplace_reserve(ip);
741 if (error)
742 goto out_alloc;
734 743
735 error = gfs2_inplace_reserve(ip); 744 if (nalloc)
736 if (error) 745 blocks += al->al_rgd->rd_length + nalloc * ind_blocks + RES_STATFS;
737 goto out_alloc; 746
738 747 error = gfs2_trans_begin(sdp, blocks, 0);
739 error = gfs2_trans_begin(sdp, 748 if (error)
740 al->al_rgd->rd_length + 749 goto out_ipres;
741 num_qd * data_blocks +
742 nalloc * ind_blocks +
743 RES_DINODE + num_qd +
744 RES_STATFS, 0);
745 if (error)
746 goto out_ipres;
747 } else {
748 error = gfs2_trans_begin(sdp,
749 num_qd * data_blocks +
750 RES_DINODE + num_qd, 0);
751 if (error)
752 goto out_gunlock;
753 }
754 750
755 for (x = 0; x < num_qd; x++) { 751 for (x = 0; x < num_qd; x++) {
756 qd = qda[x]; 752 qd = qda[x];
@@ -769,11 +765,9 @@ static int do_sync(unsigned int num_qd, struct gfs2_quota_data **qda)
769out_end_trans: 765out_end_trans:
770 gfs2_trans_end(sdp); 766 gfs2_trans_end(sdp);
771out_ipres: 767out_ipres:
772 if (nalloc) 768 gfs2_inplace_release(ip);
773 gfs2_inplace_release(ip);
774out_alloc: 769out_alloc:
775 if (nalloc) 770 gfs2_alloc_put(ip);
776 gfs2_alloc_put(ip);
777out_gunlock: 771out_gunlock:
778 gfs2_glock_dq_uninit(&i_gh); 772 gfs2_glock_dq_uninit(&i_gh);
779out: 773out:
@@ -1124,12 +1118,12 @@ int gfs2_quota_init(struct gfs2_sbd *sdp)
1124 error = -ENOMEM; 1118 error = -ENOMEM;
1125 1119
1126 sdp->sd_quota_bitmap = kcalloc(sdp->sd_quota_chunks, 1120 sdp->sd_quota_bitmap = kcalloc(sdp->sd_quota_chunks,
1127 sizeof(unsigned char *), GFP_KERNEL); 1121 sizeof(unsigned char *), GFP_NOFS);
1128 if (!sdp->sd_quota_bitmap) 1122 if (!sdp->sd_quota_bitmap)
1129 return error; 1123 return error;
1130 1124
1131 for (x = 0; x < sdp->sd_quota_chunks; x++) { 1125 for (x = 0; x < sdp->sd_quota_chunks; x++) {
1132 sdp->sd_quota_bitmap[x] = kzalloc(PAGE_SIZE, GFP_KERNEL); 1126 sdp->sd_quota_bitmap[x] = kzalloc(PAGE_SIZE, GFP_NOFS);
1133 if (!sdp->sd_quota_bitmap[x]) 1127 if (!sdp->sd_quota_bitmap[x])
1134 goto fail; 1128 goto fail;
1135 } 1129 }
diff --git a/fs/gfs2/quota.h b/fs/gfs2/quota.h
index a8be1417051f..3b7f4b0e5dfe 100644
--- a/fs/gfs2/quota.h
+++ b/fs/gfs2/quota.h
@@ -32,4 +32,21 @@ int gfs2_quota_init(struct gfs2_sbd *sdp);
32void gfs2_quota_scan(struct gfs2_sbd *sdp); 32void gfs2_quota_scan(struct gfs2_sbd *sdp);
33void gfs2_quota_cleanup(struct gfs2_sbd *sdp); 33void gfs2_quota_cleanup(struct gfs2_sbd *sdp);
34 34
35static inline int gfs2_quota_lock_check(struct gfs2_inode *ip)
36{
37 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
38 int ret;
39 if (sdp->sd_args.ar_quota == GFS2_QUOTA_OFF)
40 return 0;
41 ret = gfs2_quota_lock(ip, NO_QUOTA_CHANGE, NO_QUOTA_CHANGE);
42 if (ret)
43 return ret;
44 if (sdp->sd_args.ar_quota != GFS2_QUOTA_ON)
45 return 0;
46 ret = gfs2_quota_check(ip, ip->i_inode.i_uid, ip->i_inode.i_gid);
47 if (ret)
48 gfs2_quota_unlock(ip);
49 return ret;
50}
51
35#endif /* __QUOTA_DOT_H__ */ 52#endif /* __QUOTA_DOT_H__ */
diff --git a/fs/gfs2/recovery.c b/fs/gfs2/recovery.c
index 6fb07d67ca8a..2888e4b4b1c5 100644
--- a/fs/gfs2/recovery.c
+++ b/fs/gfs2/recovery.c
@@ -20,7 +20,6 @@
20#include "bmap.h" 20#include "bmap.h"
21#include "glock.h" 21#include "glock.h"
22#include "glops.h" 22#include "glops.h"
23#include "lm.h"
24#include "lops.h" 23#include "lops.h"
25#include "meta_io.h" 24#include "meta_io.h"
26#include "recovery.h" 25#include "recovery.h"
@@ -69,7 +68,7 @@ int gfs2_revoke_add(struct gfs2_sbd *sdp, u64 blkno, unsigned int where)
69 return 0; 68 return 0;
70 } 69 }
71 70
72 rr = kmalloc(sizeof(struct gfs2_revoke_replay), GFP_KERNEL); 71 rr = kmalloc(sizeof(struct gfs2_revoke_replay), GFP_NOFS);
73 if (!rr) 72 if (!rr)
74 return -ENOMEM; 73 return -ENOMEM;
75 74
@@ -150,7 +149,7 @@ static int get_log_header(struct gfs2_jdesc *jd, unsigned int blk,
150 struct gfs2_log_header_host *head) 149 struct gfs2_log_header_host *head)
151{ 150{
152 struct buffer_head *bh; 151 struct buffer_head *bh;
153 struct gfs2_log_header_host lh; 152 struct gfs2_log_header_host uninitialized_var(lh);
154 const u32 nothing = 0; 153 const u32 nothing = 0;
155 u32 hash; 154 u32 hash;
156 int error; 155 int error;
@@ -425,6 +424,16 @@ static int clean_journal(struct gfs2_jdesc *jd, struct gfs2_log_header_host *hea
425 return error; 424 return error;
426} 425}
427 426
427
428static void gfs2_lm_recovery_done(struct gfs2_sbd *sdp, unsigned int jid,
429 unsigned int message)
430{
431 if (likely(!test_bit(SDF_SHUTDOWN, &sdp->sd_flags)))
432 sdp->sd_lockstruct.ls_ops->lm_recovery_done(
433 sdp->sd_lockstruct.ls_lockspace, jid, message);
434}
435
436
428/** 437/**
429 * gfs2_recover_journal - recovery a given journal 438 * gfs2_recover_journal - recovery a given journal
430 * @jd: the struct gfs2_jdesc describing the journal 439 * @jd: the struct gfs2_jdesc describing the journal
diff --git a/fs/gfs2/rgrp.c b/fs/gfs2/rgrp.c
index 3552110b2e5f..7e8f0b1d6c6e 100644
--- a/fs/gfs2/rgrp.c
+++ b/fs/gfs2/rgrp.c
@@ -1,6 +1,6 @@
1/* 1/*
2 * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved. 2 * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
3 * Copyright (C) 2004-2007 Red Hat, Inc. All rights reserved. 3 * Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved.
4 * 4 *
5 * This copyrighted material is made available to anyone wishing to use, 5 * This copyrighted material is made available to anyone wishing to use,
6 * modify, copy, or redistribute it subject to the terms and conditions 6 * modify, copy, or redistribute it subject to the terms and conditions
@@ -14,6 +14,7 @@
14#include <linux/fs.h> 14#include <linux/fs.h>
15#include <linux/gfs2_ondisk.h> 15#include <linux/gfs2_ondisk.h>
16#include <linux/lm_interface.h> 16#include <linux/lm_interface.h>
17#include <linux/prefetch.h>
17 18
18#include "gfs2.h" 19#include "gfs2.h"
19#include "incore.h" 20#include "incore.h"
@@ -33,6 +34,16 @@
33#define BFITNOENT ((u32)~0) 34#define BFITNOENT ((u32)~0)
34#define NO_BLOCK ((u64)~0) 35#define NO_BLOCK ((u64)~0)
35 36
37#if BITS_PER_LONG == 32
38#define LBITMASK (0x55555555UL)
39#define LBITSKIP55 (0x55555555UL)
40#define LBITSKIP00 (0x00000000UL)
41#else
42#define LBITMASK (0x5555555555555555UL)
43#define LBITSKIP55 (0x5555555555555555UL)
44#define LBITSKIP00 (0x0000000000000000UL)
45#endif
46
36/* 47/*
37 * These routines are used by the resource group routines (rgrp.c) 48 * These routines are used by the resource group routines (rgrp.c)
38 * to keep track of block allocation. Each block is represented by two 49 * to keep track of block allocation. Each block is represented by two
@@ -53,7 +64,8 @@ static const char valid_change[16] = {
53}; 64};
54 65
55static u32 rgblk_search(struct gfs2_rgrpd *rgd, u32 goal, 66static u32 rgblk_search(struct gfs2_rgrpd *rgd, u32 goal,
56 unsigned char old_state, unsigned char new_state); 67 unsigned char old_state, unsigned char new_state,
68 unsigned int *n);
57 69
58/** 70/**
59 * gfs2_setbit - Set a bit in the bitmaps 71 * gfs2_setbit - Set a bit in the bitmaps
@@ -64,26 +76,32 @@ static u32 rgblk_search(struct gfs2_rgrpd *rgd, u32 goal,
64 * 76 *
65 */ 77 */
66 78
67static void gfs2_setbit(struct gfs2_rgrpd *rgd, unsigned char *buffer, 79static inline void gfs2_setbit(struct gfs2_rgrpd *rgd, unsigned char *buf1,
68 unsigned int buflen, u32 block, 80 unsigned char *buf2, unsigned int offset,
69 unsigned char new_state) 81 unsigned int buflen, u32 block,
82 unsigned char new_state)
70{ 83{
71 unsigned char *byte, *end, cur_state; 84 unsigned char *byte1, *byte2, *end, cur_state;
72 unsigned int bit; 85 const unsigned int bit = (block % GFS2_NBBY) * GFS2_BIT_SIZE;
73 86
74 byte = buffer + (block / GFS2_NBBY); 87 byte1 = buf1 + offset + (block / GFS2_NBBY);
75 bit = (block % GFS2_NBBY) * GFS2_BIT_SIZE; 88 end = buf1 + offset + buflen;
76 end = buffer + buflen;
77 89
78 gfs2_assert(rgd->rd_sbd, byte < end); 90 BUG_ON(byte1 >= end);
79 91
80 cur_state = (*byte >> bit) & GFS2_BIT_MASK; 92 cur_state = (*byte1 >> bit) & GFS2_BIT_MASK;
81 93
82 if (valid_change[new_state * 4 + cur_state]) { 94 if (unlikely(!valid_change[new_state * 4 + cur_state])) {
83 *byte ^= cur_state << bit;
84 *byte |= new_state << bit;
85 } else
86 gfs2_consist_rgrpd(rgd); 95 gfs2_consist_rgrpd(rgd);
96 return;
97 }
98 *byte1 ^= (cur_state ^ new_state) << bit;
99
100 if (buf2) {
101 byte2 = buf2 + offset + (block / GFS2_NBBY);
102 cur_state = (*byte2 >> bit) & GFS2_BIT_MASK;
103 *byte2 ^= (cur_state ^ new_state) << bit;
104 }
87} 105}
88 106
89/** 107/**
@@ -94,10 +112,12 @@ static void gfs2_setbit(struct gfs2_rgrpd *rgd, unsigned char *buffer,
94 * 112 *
95 */ 113 */
96 114
97static unsigned char gfs2_testbit(struct gfs2_rgrpd *rgd, unsigned char *buffer, 115static inline unsigned char gfs2_testbit(struct gfs2_rgrpd *rgd,
98 unsigned int buflen, u32 block) 116 const unsigned char *buffer,
117 unsigned int buflen, u32 block)
99{ 118{
100 unsigned char *byte, *end, cur_state; 119 const unsigned char *byte, *end;
120 unsigned char cur_state;
101 unsigned int bit; 121 unsigned int bit;
102 122
103 byte = buffer + (block / GFS2_NBBY); 123 byte = buffer + (block / GFS2_NBBY);
@@ -126,47 +146,66 @@ static unsigned char gfs2_testbit(struct gfs2_rgrpd *rgd, unsigned char *buffer,
126 * Return: the block number (bitmap buffer scope) that was found 146 * Return: the block number (bitmap buffer scope) that was found
127 */ 147 */
128 148
129static u32 gfs2_bitfit(unsigned char *buffer, unsigned int buflen, u32 goal, 149static u32 gfs2_bitfit(const u8 *buffer, unsigned int buflen, u32 goal,
130 unsigned char old_state) 150 u8 old_state)
131{ 151{
132 unsigned char *byte; 152 const u8 *byte, *start, *end;
133 u32 blk = goal; 153 int bit, startbit;
134 unsigned int bit, bitlong; 154 u32 g1, g2, misaligned;
135 unsigned long *plong, plong55; 155 unsigned long *plong;
136 156 unsigned long lskipval;
137 byte = buffer + (goal / GFS2_NBBY); 157
138 plong = (unsigned long *)(buffer + (goal / GFS2_NBBY)); 158 lskipval = (old_state & GFS2_BLKST_USED) ? LBITSKIP00 : LBITSKIP55;
139 bit = (goal % GFS2_NBBY) * GFS2_BIT_SIZE; 159 g1 = (goal / GFS2_NBBY);
140 bitlong = bit; 160 start = buffer + g1;
141#if BITS_PER_LONG == 32 161 byte = start;
142 plong55 = 0x55555555; 162 end = buffer + buflen;
143#else 163 g2 = ALIGN(g1, sizeof(unsigned long));
144 plong55 = 0x5555555555555555; 164 plong = (unsigned long *)(buffer + g2);
145#endif 165 startbit = bit = (goal % GFS2_NBBY) * GFS2_BIT_SIZE;
146 while (byte < buffer + buflen) { 166 misaligned = g2 - g1;
147 167 if (!misaligned)
148 if (bitlong == 0 && old_state == 0 && *plong == plong55) { 168 goto ulong_aligned;
149 plong++; 169/* parse the bitmap a byte at a time */
150 byte += sizeof(unsigned long); 170misaligned:
151 blk += sizeof(unsigned long) * GFS2_NBBY; 171 while (byte < end) {
152 continue; 172 if (((*byte >> bit) & GFS2_BIT_MASK) == old_state) {
173 return goal +
174 (((byte - start) * GFS2_NBBY) +
175 ((bit - startbit) >> 1));
153 } 176 }
154 if (((*byte >> bit) & GFS2_BIT_MASK) == old_state)
155 return blk;
156 bit += GFS2_BIT_SIZE; 177 bit += GFS2_BIT_SIZE;
157 if (bit >= 8) { 178 if (bit >= GFS2_NBBY * GFS2_BIT_SIZE) {
158 bit = 0; 179 bit = 0;
159 byte++; 180 byte++;
181 misaligned--;
182 if (!misaligned) {
183 plong = (unsigned long *)byte;
184 goto ulong_aligned;
185 }
160 } 186 }
161 bitlong += GFS2_BIT_SIZE;
162 if (bitlong >= sizeof(unsigned long) * 8) {
163 bitlong = 0;
164 plong++;
165 }
166
167 blk++;
168 } 187 }
188 return BFITNOENT;
169 189
190/* parse the bitmap a unsigned long at a time */
191ulong_aligned:
192 /* Stop at "end - 1" or else prefetch can go past the end and segfault.
193 We could "if" it but we'd lose some of the performance gained.
194 This way will only slow down searching the very last 4/8 bytes
195 depending on architecture. I've experimented with several ways
196 of writing this section such as using an else before the goto
197 but this one seems to be the fastest. */
198 while ((unsigned char *)plong < end - 1) {
199 prefetch(plong + 1);
200 if (((*plong) & LBITMASK) != lskipval)
201 break;
202 plong++;
203 }
204 if ((unsigned char *)plong < end) {
205 byte = (const u8 *)plong;
206 misaligned += sizeof(unsigned long) - 1;
207 goto misaligned;
208 }
170 return BFITNOENT; 209 return BFITNOENT;
171} 210}
172 211
@@ -179,14 +218,14 @@ static u32 gfs2_bitfit(unsigned char *buffer, unsigned int buflen, u32 goal,
179 * Returns: The number of bits 218 * Returns: The number of bits
180 */ 219 */
181 220
182static u32 gfs2_bitcount(struct gfs2_rgrpd *rgd, unsigned char *buffer, 221static u32 gfs2_bitcount(struct gfs2_rgrpd *rgd, const u8 *buffer,
183 unsigned int buflen, unsigned char state) 222 unsigned int buflen, u8 state)
184{ 223{
185 unsigned char *byte = buffer; 224 const u8 *byte = buffer;
186 unsigned char *end = buffer + buflen; 225 const u8 *end = buffer + buflen;
187 unsigned char state1 = state << 2; 226 const u8 state1 = state << 2;
188 unsigned char state2 = state << 4; 227 const u8 state2 = state << 4;
189 unsigned char state3 = state << 6; 228 const u8 state3 = state << 6;
190 u32 count = 0; 229 u32 count = 0;
191 230
192 for (; byte < end; byte++) { 231 for (; byte < end; byte++) {
@@ -353,7 +392,7 @@ static void clear_rgrpdi(struct gfs2_sbd *sdp)
353 } 392 }
354 393
355 kfree(rgd->rd_bits); 394 kfree(rgd->rd_bits);
356 kfree(rgd); 395 kmem_cache_free(gfs2_rgrpd_cachep, rgd);
357 } 396 }
358} 397}
359 398
@@ -516,7 +555,7 @@ static int read_rindex_entry(struct gfs2_inode *ip,
516 return error; 555 return error;
517 } 556 }
518 557
519 rgd = kzalloc(sizeof(struct gfs2_rgrpd), GFP_NOFS); 558 rgd = kmem_cache_zalloc(gfs2_rgrpd_cachep, GFP_NOFS);
520 error = -ENOMEM; 559 error = -ENOMEM;
521 if (!rgd) 560 if (!rgd)
522 return error; 561 return error;
@@ -539,7 +578,7 @@ static int read_rindex_entry(struct gfs2_inode *ip,
539 return error; 578 return error;
540 579
541 rgd->rd_gl->gl_object = rgd; 580 rgd->rd_gl->gl_object = rgd;
542 rgd->rd_rg_vn = rgd->rd_gl->gl_vn - 1; 581 rgd->rd_flags &= ~GFS2_RDF_UPTODATE;
543 rgd->rd_flags |= GFS2_RDF_CHECK; 582 rgd->rd_flags |= GFS2_RDF_CHECK;
544 return error; 583 return error;
545} 584}
@@ -575,7 +614,7 @@ static int gfs2_ri_update(struct gfs2_inode *ip)
575 } 614 }
576 } 615 }
577 616
578 sdp->sd_rindex_vn = ip->i_gl->gl_vn; 617 sdp->sd_rindex_uptodate = 1;
579 return 0; 618 return 0;
580} 619}
581 620
@@ -609,7 +648,7 @@ static int gfs2_ri_update_special(struct gfs2_inode *ip)
609 } 648 }
610 } 649 }
611 650
612 sdp->sd_rindex_vn = ip->i_gl->gl_vn; 651 sdp->sd_rindex_uptodate = 1;
613 return 0; 652 return 0;
614} 653}
615 654
@@ -642,9 +681,9 @@ int gfs2_rindex_hold(struct gfs2_sbd *sdp, struct gfs2_holder *ri_gh)
642 return error; 681 return error;
643 682
644 /* Read new copy from disk if we don't have the latest */ 683 /* Read new copy from disk if we don't have the latest */
645 if (sdp->sd_rindex_vn != gl->gl_vn) { 684 if (!sdp->sd_rindex_uptodate) {
646 mutex_lock(&sdp->sd_rindex_mutex); 685 mutex_lock(&sdp->sd_rindex_mutex);
647 if (sdp->sd_rindex_vn != gl->gl_vn) { 686 if (!sdp->sd_rindex_uptodate) {
648 error = gfs2_ri_update(ip); 687 error = gfs2_ri_update(ip);
649 if (error) 688 if (error)
650 gfs2_glock_dq_uninit(ri_gh); 689 gfs2_glock_dq_uninit(ri_gh);
@@ -655,21 +694,31 @@ int gfs2_rindex_hold(struct gfs2_sbd *sdp, struct gfs2_holder *ri_gh)
655 return error; 694 return error;
656} 695}
657 696
658static void gfs2_rgrp_in(struct gfs2_rgrp_host *rg, const void *buf) 697static void gfs2_rgrp_in(struct gfs2_rgrpd *rgd, const void *buf)
659{ 698{
660 const struct gfs2_rgrp *str = buf; 699 const struct gfs2_rgrp *str = buf;
700 struct gfs2_rgrp_host *rg = &rgd->rd_rg;
701 u32 rg_flags;
661 702
662 rg->rg_flags = be32_to_cpu(str->rg_flags); 703 rg_flags = be32_to_cpu(str->rg_flags);
704 if (rg_flags & GFS2_RGF_NOALLOC)
705 rgd->rd_flags |= GFS2_RDF_NOALLOC;
706 else
707 rgd->rd_flags &= ~GFS2_RDF_NOALLOC;
663 rg->rg_free = be32_to_cpu(str->rg_free); 708 rg->rg_free = be32_to_cpu(str->rg_free);
664 rg->rg_dinodes = be32_to_cpu(str->rg_dinodes); 709 rg->rg_dinodes = be32_to_cpu(str->rg_dinodes);
665 rg->rg_igeneration = be64_to_cpu(str->rg_igeneration); 710 rg->rg_igeneration = be64_to_cpu(str->rg_igeneration);
666} 711}
667 712
668static void gfs2_rgrp_out(const struct gfs2_rgrp_host *rg, void *buf) 713static void gfs2_rgrp_out(struct gfs2_rgrpd *rgd, void *buf)
669{ 714{
670 struct gfs2_rgrp *str = buf; 715 struct gfs2_rgrp *str = buf;
716 struct gfs2_rgrp_host *rg = &rgd->rd_rg;
717 u32 rg_flags = 0;
671 718
672 str->rg_flags = cpu_to_be32(rg->rg_flags); 719 if (rgd->rd_flags & GFS2_RDF_NOALLOC)
720 rg_flags |= GFS2_RGF_NOALLOC;
721 str->rg_flags = cpu_to_be32(rg_flags);
673 str->rg_free = cpu_to_be32(rg->rg_free); 722 str->rg_free = cpu_to_be32(rg->rg_free);
674 str->rg_dinodes = cpu_to_be32(rg->rg_dinodes); 723 str->rg_dinodes = cpu_to_be32(rg->rg_dinodes);
675 str->__pad = cpu_to_be32(0); 724 str->__pad = cpu_to_be32(0);
@@ -726,9 +775,9 @@ int gfs2_rgrp_bh_get(struct gfs2_rgrpd *rgd)
726 } 775 }
727 } 776 }
728 777
729 if (rgd->rd_rg_vn != gl->gl_vn) { 778 if (!(rgd->rd_flags & GFS2_RDF_UPTODATE)) {
730 gfs2_rgrp_in(&rgd->rd_rg, (rgd->rd_bits[0].bi_bh)->b_data); 779 gfs2_rgrp_in(rgd, (rgd->rd_bits[0].bi_bh)->b_data);
731 rgd->rd_rg_vn = gl->gl_vn; 780 rgd->rd_flags |= GFS2_RDF_UPTODATE;
732 } 781 }
733 782
734 spin_lock(&sdp->sd_rindex_spin); 783 spin_lock(&sdp->sd_rindex_spin);
@@ -840,7 +889,7 @@ static int try_rgrp_fit(struct gfs2_rgrpd *rgd, struct gfs2_alloc *al)
840 struct gfs2_sbd *sdp = rgd->rd_sbd; 889 struct gfs2_sbd *sdp = rgd->rd_sbd;
841 int ret = 0; 890 int ret = 0;
842 891
843 if (rgd->rd_rg.rg_flags & GFS2_RGF_NOALLOC) 892 if (rgd->rd_flags & GFS2_RDF_NOALLOC)
844 return 0; 893 return 0;
845 894
846 spin_lock(&sdp->sd_rindex_spin); 895 spin_lock(&sdp->sd_rindex_spin);
@@ -866,13 +915,15 @@ static struct inode *try_rgrp_unlink(struct gfs2_rgrpd *rgd, u64 *last_unlinked)
866 u32 goal = 0, block; 915 u32 goal = 0, block;
867 u64 no_addr; 916 u64 no_addr;
868 struct gfs2_sbd *sdp = rgd->rd_sbd; 917 struct gfs2_sbd *sdp = rgd->rd_sbd;
918 unsigned int n;
869 919
870 for(;;) { 920 for(;;) {
871 if (goal >= rgd->rd_data) 921 if (goal >= rgd->rd_data)
872 break; 922 break;
873 down_write(&sdp->sd_log_flush_lock); 923 down_write(&sdp->sd_log_flush_lock);
924 n = 1;
874 block = rgblk_search(rgd, goal, GFS2_BLKST_UNLINKED, 925 block = rgblk_search(rgd, goal, GFS2_BLKST_UNLINKED,
875 GFS2_BLKST_UNLINKED); 926 GFS2_BLKST_UNLINKED, &n);
876 up_write(&sdp->sd_log_flush_lock); 927 up_write(&sdp->sd_log_flush_lock);
877 if (block == BFITNOENT) 928 if (block == BFITNOENT)
878 break; 929 break;
@@ -904,24 +955,20 @@ static struct inode *try_rgrp_unlink(struct gfs2_rgrpd *rgd, u64 *last_unlinked)
904static struct gfs2_rgrpd *recent_rgrp_first(struct gfs2_sbd *sdp, 955static struct gfs2_rgrpd *recent_rgrp_first(struct gfs2_sbd *sdp,
905 u64 rglast) 956 u64 rglast)
906{ 957{
907 struct gfs2_rgrpd *rgd = NULL; 958 struct gfs2_rgrpd *rgd;
908 959
909 spin_lock(&sdp->sd_rindex_spin); 960 spin_lock(&sdp->sd_rindex_spin);
910 961
911 if (list_empty(&sdp->sd_rindex_recent_list)) 962 if (rglast) {
912 goto out; 963 list_for_each_entry(rgd, &sdp->sd_rindex_recent_list, rd_recent) {
913 964 if (rgrp_contains_block(rgd, rglast))
914 if (!rglast) 965 goto out;
915 goto first; 966 }
916
917 list_for_each_entry(rgd, &sdp->sd_rindex_recent_list, rd_recent) {
918 if (rgd->rd_addr == rglast)
919 goto out;
920 } 967 }
921 968 rgd = NULL;
922first: 969 if (!list_empty(&sdp->sd_rindex_recent_list))
923 rgd = list_entry(sdp->sd_rindex_recent_list.next, struct gfs2_rgrpd, 970 rgd = list_entry(sdp->sd_rindex_recent_list.next,
924 rd_recent); 971 struct gfs2_rgrpd, rd_recent);
925out: 972out:
926 spin_unlock(&sdp->sd_rindex_spin); 973 spin_unlock(&sdp->sd_rindex_spin);
927 return rgd; 974 return rgd;
@@ -1067,7 +1114,7 @@ static struct inode *get_local_rgrp(struct gfs2_inode *ip, u64 *last_unlinked)
1067 1114
1068 /* Try recently successful rgrps */ 1115 /* Try recently successful rgrps */
1069 1116
1070 rgd = recent_rgrp_first(sdp, ip->i_last_rg_alloc); 1117 rgd = recent_rgrp_first(sdp, ip->i_goal);
1071 1118
1072 while (rgd) { 1119 while (rgd) {
1073 rg_locked = 0; 1120 rg_locked = 0;
@@ -1151,8 +1198,6 @@ static struct inode *get_local_rgrp(struct gfs2_inode *ip, u64 *last_unlinked)
1151 } 1198 }
1152 1199
1153out: 1200out:
1154 ip->i_last_rg_alloc = rgd->rd_addr;
1155
1156 if (begin) { 1201 if (begin) {
1157 recent_rgrp_add(rgd); 1202 recent_rgrp_add(rgd);
1158 rgd = gfs2_rgrpd_get_next(rgd); 1203 rgd = gfs2_rgrpd_get_next(rgd);
@@ -1275,6 +1320,7 @@ unsigned char gfs2_get_block_type(struct gfs2_rgrpd *rgd, u64 block)
1275 * @goal: the goal block within the RG (start here to search for avail block) 1320 * @goal: the goal block within the RG (start here to search for avail block)
1276 * @old_state: GFS2_BLKST_XXX the before-allocation state to find 1321 * @old_state: GFS2_BLKST_XXX the before-allocation state to find
1277 * @new_state: GFS2_BLKST_XXX the after-allocation block state 1322 * @new_state: GFS2_BLKST_XXX the after-allocation block state
1323 * @n: The extent length
1278 * 1324 *
1279 * Walk rgrp's bitmap to find bits that represent a block in @old_state. 1325 * Walk rgrp's bitmap to find bits that represent a block in @old_state.
1280 * Add the found bitmap buffer to the transaction. 1326 * Add the found bitmap buffer to the transaction.
@@ -1290,13 +1336,17 @@ unsigned char gfs2_get_block_type(struct gfs2_rgrpd *rgd, u64 block)
1290 */ 1336 */
1291 1337
1292static u32 rgblk_search(struct gfs2_rgrpd *rgd, u32 goal, 1338static u32 rgblk_search(struct gfs2_rgrpd *rgd, u32 goal,
1293 unsigned char old_state, unsigned char new_state) 1339 unsigned char old_state, unsigned char new_state,
1340 unsigned int *n)
1294{ 1341{
1295 struct gfs2_bitmap *bi = NULL; 1342 struct gfs2_bitmap *bi = NULL;
1296 u32 length = rgd->rd_length; 1343 const u32 length = rgd->rd_length;
1297 u32 blk = 0; 1344 u32 blk = 0;
1298 unsigned int buf, x; 1345 unsigned int buf, x;
1346 const unsigned int elen = *n;
1347 const u8 *buffer;
1299 1348
1349 *n = 0;
1300 /* Find bitmap block that contains bits for goal block */ 1350 /* Find bitmap block that contains bits for goal block */
1301 for (buf = 0; buf < length; buf++) { 1351 for (buf = 0; buf < length; buf++) {
1302 bi = rgd->rd_bits + buf; 1352 bi = rgd->rd_bits + buf;
@@ -1317,12 +1367,11 @@ static u32 rgblk_search(struct gfs2_rgrpd *rgd, u32 goal,
1317 for (x = 0; x <= length; x++) { 1367 for (x = 0; x <= length; x++) {
1318 /* The GFS2_BLKST_UNLINKED state doesn't apply to the clone 1368 /* The GFS2_BLKST_UNLINKED state doesn't apply to the clone
1319 bitmaps, so we must search the originals for that. */ 1369 bitmaps, so we must search the originals for that. */
1370 buffer = bi->bi_bh->b_data + bi->bi_offset;
1320 if (old_state != GFS2_BLKST_UNLINKED && bi->bi_clone) 1371 if (old_state != GFS2_BLKST_UNLINKED && bi->bi_clone)
1321 blk = gfs2_bitfit(bi->bi_clone + bi->bi_offset, 1372 buffer = bi->bi_clone + bi->bi_offset;
1322 bi->bi_len, goal, old_state); 1373
1323 else 1374 blk = gfs2_bitfit(buffer, bi->bi_len, goal, old_state);
1324 blk = gfs2_bitfit(bi->bi_bh->b_data + bi->bi_offset,
1325 bi->bi_len, goal, old_state);
1326 if (blk != BFITNOENT) 1375 if (blk != BFITNOENT)
1327 break; 1376 break;
1328 1377
@@ -1333,12 +1382,23 @@ static u32 rgblk_search(struct gfs2_rgrpd *rgd, u32 goal,
1333 } 1382 }
1334 1383
1335 if (blk != BFITNOENT && old_state != new_state) { 1384 if (blk != BFITNOENT && old_state != new_state) {
1385 *n = 1;
1336 gfs2_trans_add_bh(rgd->rd_gl, bi->bi_bh, 1); 1386 gfs2_trans_add_bh(rgd->rd_gl, bi->bi_bh, 1);
1337 gfs2_setbit(rgd, bi->bi_bh->b_data + bi->bi_offset, 1387 gfs2_setbit(rgd, bi->bi_bh->b_data, bi->bi_clone, bi->bi_offset,
1338 bi->bi_len, blk, new_state); 1388 bi->bi_len, blk, new_state);
1339 if (bi->bi_clone) 1389 goal = blk;
1340 gfs2_setbit(rgd, bi->bi_clone + bi->bi_offset, 1390 while (*n < elen) {
1341 bi->bi_len, blk, new_state); 1391 goal++;
1392 if (goal >= (bi->bi_len * GFS2_NBBY))
1393 break;
1394 if (gfs2_testbit(rgd, buffer, bi->bi_len, goal) !=
1395 GFS2_BLKST_FREE)
1396 break;
1397 gfs2_setbit(rgd, bi->bi_bh->b_data, bi->bi_clone,
1398 bi->bi_offset, bi->bi_len, goal,
1399 new_state);
1400 (*n)++;
1401 }
1342 } 1402 }
1343 1403
1344 return (blk == BFITNOENT) ? blk : (bi->bi_start * GFS2_NBBY) + blk; 1404 return (blk == BFITNOENT) ? blk : (bi->bi_start * GFS2_NBBY) + blk;
@@ -1393,7 +1453,7 @@ static struct gfs2_rgrpd *rgblk_free(struct gfs2_sbd *sdp, u64 bstart,
1393 bi->bi_len); 1453 bi->bi_len);
1394 } 1454 }
1395 gfs2_trans_add_bh(rgd->rd_gl, bi->bi_bh, 1); 1455 gfs2_trans_add_bh(rgd->rd_gl, bi->bi_bh, 1);
1396 gfs2_setbit(rgd, bi->bi_bh->b_data + bi->bi_offset, 1456 gfs2_setbit(rgd, bi->bi_bh->b_data, NULL, bi->bi_offset,
1397 bi->bi_len, buf_blk, new_state); 1457 bi->bi_len, buf_blk, new_state);
1398 } 1458 }
1399 1459
@@ -1401,13 +1461,13 @@ static struct gfs2_rgrpd *rgblk_free(struct gfs2_sbd *sdp, u64 bstart,
1401} 1461}
1402 1462
1403/** 1463/**
1404 * gfs2_alloc_data - Allocate a data block 1464 * gfs2_alloc_block - Allocate a block
1405 * @ip: the inode to allocate the data block for 1465 * @ip: the inode to allocate the block for
1406 * 1466 *
1407 * Returns: the allocated block 1467 * Returns: the allocated block
1408 */ 1468 */
1409 1469
1410u64 gfs2_alloc_data(struct gfs2_inode *ip) 1470u64 gfs2_alloc_block(struct gfs2_inode *ip, unsigned int *n)
1411{ 1471{
1412 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode); 1472 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
1413 struct gfs2_alloc *al = ip->i_alloc; 1473 struct gfs2_alloc *al = ip->i_alloc;
@@ -1415,77 +1475,31 @@ u64 gfs2_alloc_data(struct gfs2_inode *ip)
1415 u32 goal, blk; 1475 u32 goal, blk;
1416 u64 block; 1476 u64 block;
1417 1477
1418 if (rgrp_contains_block(rgd, ip->i_di.di_goal_data)) 1478 if (rgrp_contains_block(rgd, ip->i_goal))
1419 goal = ip->i_di.di_goal_data - rgd->rd_data0; 1479 goal = ip->i_goal - rgd->rd_data0;
1420 else 1480 else
1421 goal = rgd->rd_last_alloc_data; 1481 goal = rgd->rd_last_alloc;
1422 1482
1423 blk = rgblk_search(rgd, goal, GFS2_BLKST_FREE, GFS2_BLKST_USED); 1483 blk = rgblk_search(rgd, goal, GFS2_BLKST_FREE, GFS2_BLKST_USED, n);
1424 BUG_ON(blk == BFITNOENT); 1484 BUG_ON(blk == BFITNOENT);
1425 rgd->rd_last_alloc_data = blk;
1426 1485
1486 rgd->rd_last_alloc = blk;
1427 block = rgd->rd_data0 + blk; 1487 block = rgd->rd_data0 + blk;
1428 ip->i_di.di_goal_data = block; 1488 ip->i_goal = block;
1429 1489
1430 gfs2_assert_withdraw(sdp, rgd->rd_rg.rg_free); 1490 gfs2_assert_withdraw(sdp, rgd->rd_rg.rg_free >= *n);
1431 rgd->rd_rg.rg_free--; 1491 rgd->rd_rg.rg_free -= *n;
1432 1492
1433 gfs2_trans_add_bh(rgd->rd_gl, rgd->rd_bits[0].bi_bh, 1); 1493 gfs2_trans_add_bh(rgd->rd_gl, rgd->rd_bits[0].bi_bh, 1);
1434 gfs2_rgrp_out(&rgd->rd_rg, rgd->rd_bits[0].bi_bh->b_data); 1494 gfs2_rgrp_out(rgd, rgd->rd_bits[0].bi_bh->b_data);
1435 1495
1436 al->al_alloced++; 1496 al->al_alloced += *n;
1437 1497
1438 gfs2_statfs_change(sdp, 0, -1, 0); 1498 gfs2_statfs_change(sdp, 0, -*n, 0);
1439 gfs2_quota_change(ip, +1, ip->i_inode.i_uid, ip->i_inode.i_gid); 1499 gfs2_quota_change(ip, *n, ip->i_inode.i_uid, ip->i_inode.i_gid);
1440 1500
1441 spin_lock(&sdp->sd_rindex_spin); 1501 spin_lock(&sdp->sd_rindex_spin);
1442 rgd->rd_free_clone--; 1502 rgd->rd_free_clone -= *n;
1443 spin_unlock(&sdp->sd_rindex_spin);
1444
1445 return block;
1446}
1447
1448/**
1449 * gfs2_alloc_meta - Allocate a metadata block
1450 * @ip: the inode to allocate the metadata block for
1451 *
1452 * Returns: the allocated block
1453 */
1454
1455u64 gfs2_alloc_meta(struct gfs2_inode *ip)
1456{
1457 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
1458 struct gfs2_alloc *al = ip->i_alloc;
1459 struct gfs2_rgrpd *rgd = al->al_rgd;
1460 u32 goal, blk;
1461 u64 block;
1462
1463 if (rgrp_contains_block(rgd, ip->i_di.di_goal_meta))
1464 goal = ip->i_di.di_goal_meta - rgd->rd_data0;
1465 else
1466 goal = rgd->rd_last_alloc_meta;
1467
1468 blk = rgblk_search(rgd, goal, GFS2_BLKST_FREE, GFS2_BLKST_USED);
1469 BUG_ON(blk == BFITNOENT);
1470 rgd->rd_last_alloc_meta = blk;
1471
1472 block = rgd->rd_data0 + blk;
1473 ip->i_di.di_goal_meta = block;
1474
1475 gfs2_assert_withdraw(sdp, rgd->rd_rg.rg_free);
1476 rgd->rd_rg.rg_free--;
1477
1478 gfs2_trans_add_bh(rgd->rd_gl, rgd->rd_bits[0].bi_bh, 1);
1479 gfs2_rgrp_out(&rgd->rd_rg, rgd->rd_bits[0].bi_bh->b_data);
1480
1481 al->al_alloced++;
1482
1483 gfs2_statfs_change(sdp, 0, -1, 0);
1484 gfs2_quota_change(ip, +1, ip->i_inode.i_uid, ip->i_inode.i_gid);
1485 gfs2_trans_add_unrevoke(sdp, block);
1486
1487 spin_lock(&sdp->sd_rindex_spin);
1488 rgd->rd_free_clone--;
1489 spin_unlock(&sdp->sd_rindex_spin); 1503 spin_unlock(&sdp->sd_rindex_spin);
1490 1504
1491 return block; 1505 return block;
@@ -1505,12 +1519,13 @@ u64 gfs2_alloc_di(struct gfs2_inode *dip, u64 *generation)
1505 struct gfs2_rgrpd *rgd = al->al_rgd; 1519 struct gfs2_rgrpd *rgd = al->al_rgd;
1506 u32 blk; 1520 u32 blk;
1507 u64 block; 1521 u64 block;
1522 unsigned int n = 1;
1508 1523
1509 blk = rgblk_search(rgd, rgd->rd_last_alloc_meta, 1524 blk = rgblk_search(rgd, rgd->rd_last_alloc,
1510 GFS2_BLKST_FREE, GFS2_BLKST_DINODE); 1525 GFS2_BLKST_FREE, GFS2_BLKST_DINODE, &n);
1511 BUG_ON(blk == BFITNOENT); 1526 BUG_ON(blk == BFITNOENT);
1512 1527
1513 rgd->rd_last_alloc_meta = blk; 1528 rgd->rd_last_alloc = blk;
1514 1529
1515 block = rgd->rd_data0 + blk; 1530 block = rgd->rd_data0 + blk;
1516 1531
@@ -1519,12 +1534,12 @@ u64 gfs2_alloc_di(struct gfs2_inode *dip, u64 *generation)
1519 rgd->rd_rg.rg_dinodes++; 1534 rgd->rd_rg.rg_dinodes++;
1520 *generation = rgd->rd_rg.rg_igeneration++; 1535 *generation = rgd->rd_rg.rg_igeneration++;
1521 gfs2_trans_add_bh(rgd->rd_gl, rgd->rd_bits[0].bi_bh, 1); 1536 gfs2_trans_add_bh(rgd->rd_gl, rgd->rd_bits[0].bi_bh, 1);
1522 gfs2_rgrp_out(&rgd->rd_rg, rgd->rd_bits[0].bi_bh->b_data); 1537 gfs2_rgrp_out(rgd, rgd->rd_bits[0].bi_bh->b_data);
1523 1538
1524 al->al_alloced++; 1539 al->al_alloced++;
1525 1540
1526 gfs2_statfs_change(sdp, 0, -1, +1); 1541 gfs2_statfs_change(sdp, 0, -1, +1);
1527 gfs2_trans_add_unrevoke(sdp, block); 1542 gfs2_trans_add_unrevoke(sdp, block, 1);
1528 1543
1529 spin_lock(&sdp->sd_rindex_spin); 1544 spin_lock(&sdp->sd_rindex_spin);
1530 rgd->rd_free_clone--; 1545 rgd->rd_free_clone--;
@@ -1553,7 +1568,7 @@ void gfs2_free_data(struct gfs2_inode *ip, u64 bstart, u32 blen)
1553 rgd->rd_rg.rg_free += blen; 1568 rgd->rd_rg.rg_free += blen;
1554 1569
1555 gfs2_trans_add_bh(rgd->rd_gl, rgd->rd_bits[0].bi_bh, 1); 1570 gfs2_trans_add_bh(rgd->rd_gl, rgd->rd_bits[0].bi_bh, 1);
1556 gfs2_rgrp_out(&rgd->rd_rg, rgd->rd_bits[0].bi_bh->b_data); 1571 gfs2_rgrp_out(rgd, rgd->rd_bits[0].bi_bh->b_data);
1557 1572
1558 gfs2_trans_add_rg(rgd); 1573 gfs2_trans_add_rg(rgd);
1559 1574
@@ -1581,7 +1596,7 @@ void gfs2_free_meta(struct gfs2_inode *ip, u64 bstart, u32 blen)
1581 rgd->rd_rg.rg_free += blen; 1596 rgd->rd_rg.rg_free += blen;
1582 1597
1583 gfs2_trans_add_bh(rgd->rd_gl, rgd->rd_bits[0].bi_bh, 1); 1598 gfs2_trans_add_bh(rgd->rd_gl, rgd->rd_bits[0].bi_bh, 1);
1584 gfs2_rgrp_out(&rgd->rd_rg, rgd->rd_bits[0].bi_bh->b_data); 1599 gfs2_rgrp_out(rgd, rgd->rd_bits[0].bi_bh->b_data);
1585 1600
1586 gfs2_trans_add_rg(rgd); 1601 gfs2_trans_add_rg(rgd);
1587 1602
@@ -1601,7 +1616,7 @@ void gfs2_unlink_di(struct inode *inode)
1601 if (!rgd) 1616 if (!rgd)
1602 return; 1617 return;
1603 gfs2_trans_add_bh(rgd->rd_gl, rgd->rd_bits[0].bi_bh, 1); 1618 gfs2_trans_add_bh(rgd->rd_gl, rgd->rd_bits[0].bi_bh, 1);
1604 gfs2_rgrp_out(&rgd->rd_rg, rgd->rd_bits[0].bi_bh->b_data); 1619 gfs2_rgrp_out(rgd, rgd->rd_bits[0].bi_bh->b_data);
1605 gfs2_trans_add_rg(rgd); 1620 gfs2_trans_add_rg(rgd);
1606} 1621}
1607 1622
@@ -1621,7 +1636,7 @@ static void gfs2_free_uninit_di(struct gfs2_rgrpd *rgd, u64 blkno)
1621 rgd->rd_rg.rg_free++; 1636 rgd->rd_rg.rg_free++;
1622 1637
1623 gfs2_trans_add_bh(rgd->rd_gl, rgd->rd_bits[0].bi_bh, 1); 1638 gfs2_trans_add_bh(rgd->rd_gl, rgd->rd_bits[0].bi_bh, 1);
1624 gfs2_rgrp_out(&rgd->rd_rg, rgd->rd_bits[0].bi_bh->b_data); 1639 gfs2_rgrp_out(rgd, rgd->rd_bits[0].bi_bh->b_data);
1625 1640
1626 gfs2_statfs_change(sdp, 0, +1, -1); 1641 gfs2_statfs_change(sdp, 0, +1, -1);
1627 gfs2_trans_add_rg(rgd); 1642 gfs2_trans_add_rg(rgd);
@@ -1699,8 +1714,7 @@ void gfs2_rlist_add(struct gfs2_sbd *sdp, struct gfs2_rgrp_list *rlist,
1699 * 1714 *
1700 */ 1715 */
1701 1716
1702void gfs2_rlist_alloc(struct gfs2_rgrp_list *rlist, unsigned int state, 1717void gfs2_rlist_alloc(struct gfs2_rgrp_list *rlist, unsigned int state)
1703 int flags)
1704{ 1718{
1705 unsigned int x; 1719 unsigned int x;
1706 1720
@@ -1708,7 +1722,7 @@ void gfs2_rlist_alloc(struct gfs2_rgrp_list *rlist, unsigned int state,
1708 GFP_NOFS | __GFP_NOFAIL); 1722 GFP_NOFS | __GFP_NOFAIL);
1709 for (x = 0; x < rlist->rl_rgrps; x++) 1723 for (x = 0; x < rlist->rl_rgrps; x++)
1710 gfs2_holder_init(rlist->rl_rgd[x]->rd_gl, 1724 gfs2_holder_init(rlist->rl_rgd[x]->rd_gl,
1711 state, flags, 1725 state, 0,
1712 &rlist->rl_ghs[x]); 1726 &rlist->rl_ghs[x]);
1713} 1727}
1714 1728
diff --git a/fs/gfs2/rgrp.h b/fs/gfs2/rgrp.h
index 149bb161f4b6..3181c7e624bf 100644
--- a/fs/gfs2/rgrp.h
+++ b/fs/gfs2/rgrp.h
@@ -1,6 +1,6 @@
1/* 1/*
2 * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved. 2 * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
3 * Copyright (C) 2004-2006 Red Hat, Inc. All rights reserved. 3 * Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved.
4 * 4 *
5 * This copyrighted material is made available to anyone wishing to use, 5 * This copyrighted material is made available to anyone wishing to use,
6 * modify, copy, or redistribute it subject to the terms and conditions 6 * modify, copy, or redistribute it subject to the terms and conditions
@@ -46,8 +46,7 @@ void gfs2_inplace_release(struct gfs2_inode *ip);
46 46
47unsigned char gfs2_get_block_type(struct gfs2_rgrpd *rgd, u64 block); 47unsigned char gfs2_get_block_type(struct gfs2_rgrpd *rgd, u64 block);
48 48
49u64 gfs2_alloc_data(struct gfs2_inode *ip); 49u64 gfs2_alloc_block(struct gfs2_inode *ip, unsigned int *n);
50u64 gfs2_alloc_meta(struct gfs2_inode *ip);
51u64 gfs2_alloc_di(struct gfs2_inode *ip, u64 *generation); 50u64 gfs2_alloc_di(struct gfs2_inode *ip, u64 *generation);
52 51
53void gfs2_free_data(struct gfs2_inode *ip, u64 bstart, u32 blen); 52void gfs2_free_data(struct gfs2_inode *ip, u64 bstart, u32 blen);
@@ -64,8 +63,7 @@ struct gfs2_rgrp_list {
64 63
65void gfs2_rlist_add(struct gfs2_sbd *sdp, struct gfs2_rgrp_list *rlist, 64void gfs2_rlist_add(struct gfs2_sbd *sdp, struct gfs2_rgrp_list *rlist,
66 u64 block); 65 u64 block);
67void gfs2_rlist_alloc(struct gfs2_rgrp_list *rlist, unsigned int state, 66void gfs2_rlist_alloc(struct gfs2_rgrp_list *rlist, unsigned int state);
68 int flags);
69void gfs2_rlist_free(struct gfs2_rgrp_list *rlist); 67void gfs2_rlist_free(struct gfs2_rgrp_list *rlist);
70u64 gfs2_ri_total(struct gfs2_sbd *sdp); 68u64 gfs2_ri_total(struct gfs2_sbd *sdp);
71 69
diff --git a/fs/gfs2/super.c b/fs/gfs2/super.c
index ef0562c3bc71..7aeacbc65f35 100644
--- a/fs/gfs2/super.c
+++ b/fs/gfs2/super.c
@@ -210,7 +210,7 @@ int gfs2_read_super(struct gfs2_sbd *sdp, sector_t sector)
210 struct page *page; 210 struct page *page;
211 struct bio *bio; 211 struct bio *bio;
212 212
213 page = alloc_page(GFP_KERNEL); 213 page = alloc_page(GFP_NOFS);
214 if (unlikely(!page)) 214 if (unlikely(!page))
215 return -ENOBUFS; 215 return -ENOBUFS;
216 216
@@ -218,7 +218,7 @@ int gfs2_read_super(struct gfs2_sbd *sdp, sector_t sector)
218 ClearPageDirty(page); 218 ClearPageDirty(page);
219 lock_page(page); 219 lock_page(page);
220 220
221 bio = bio_alloc(GFP_KERNEL, 1); 221 bio = bio_alloc(GFP_NOFS, 1);
222 if (unlikely(!bio)) { 222 if (unlikely(!bio)) {
223 __free_page(page); 223 __free_page(page);
224 return -ENOBUFS; 224 return -ENOBUFS;
@@ -316,6 +316,7 @@ int gfs2_read_sb(struct gfs2_sbd *sdp, struct gfs2_glock *gl, int silent)
316 sdp->sd_heightsize[x] = space; 316 sdp->sd_heightsize[x] = space;
317 } 317 }
318 sdp->sd_max_height = x; 318 sdp->sd_max_height = x;
319 sdp->sd_heightsize[x] = ~0;
319 gfs2_assert(sdp, sdp->sd_max_height <= GFS2_MAX_META_HEIGHT); 320 gfs2_assert(sdp, sdp->sd_max_height <= GFS2_MAX_META_HEIGHT);
320 321
321 sdp->sd_jheightsize[0] = sdp->sd_sb.sb_bsize - 322 sdp->sd_jheightsize[0] = sdp->sd_sb.sb_bsize -
@@ -334,6 +335,7 @@ int gfs2_read_sb(struct gfs2_sbd *sdp, struct gfs2_glock *gl, int silent)
334 sdp->sd_jheightsize[x] = space; 335 sdp->sd_jheightsize[x] = space;
335 } 336 }
336 sdp->sd_max_jheight = x; 337 sdp->sd_max_jheight = x;
338 sdp->sd_jheightsize[x] = ~0;
337 gfs2_assert(sdp, sdp->sd_max_jheight <= GFS2_MAX_META_HEIGHT); 339 gfs2_assert(sdp, sdp->sd_max_jheight <= GFS2_MAX_META_HEIGHT);
338 340
339 return 0; 341 return 0;
diff --git a/fs/gfs2/super.h b/fs/gfs2/super.h
index 60a870e430be..44361ecc44f7 100644
--- a/fs/gfs2/super.h
+++ b/fs/gfs2/super.h
@@ -17,6 +17,7 @@ void gfs2_tune_init(struct gfs2_tune *gt);
17int gfs2_check_sb(struct gfs2_sbd *sdp, struct gfs2_sb_host *sb, int silent); 17int gfs2_check_sb(struct gfs2_sbd *sdp, struct gfs2_sb_host *sb, int silent);
18int gfs2_read_sb(struct gfs2_sbd *sdp, struct gfs2_glock *gl, int silent); 18int gfs2_read_sb(struct gfs2_sbd *sdp, struct gfs2_glock *gl, int silent);
19int gfs2_read_super(struct gfs2_sbd *sdp, sector_t sector); 19int gfs2_read_super(struct gfs2_sbd *sdp, sector_t sector);
20void gfs2_lm_unmount(struct gfs2_sbd *sdp);
20 21
21static inline unsigned int gfs2_jindex_size(struct gfs2_sbd *sdp) 22static inline unsigned int gfs2_jindex_size(struct gfs2_sbd *sdp)
22{ 23{
diff --git a/fs/gfs2/sys.c b/fs/gfs2/sys.c
index eaa3b7b2f99e..9ab9fc85ecd0 100644
--- a/fs/gfs2/sys.c
+++ b/fs/gfs2/sys.c
@@ -20,7 +20,6 @@
20 20
21#include "gfs2.h" 21#include "gfs2.h"
22#include "incore.h" 22#include "incore.h"
23#include "lm.h"
24#include "sys.h" 23#include "sys.h"
25#include "super.h" 24#include "super.h"
26#include "glock.h" 25#include "glock.h"
@@ -328,15 +327,9 @@ static ssize_t name##_show(struct gfs2_sbd *sdp, char *buf) \
328} \ 327} \
329static struct counters_attr counters_attr_##name = __ATTR_RO(name) 328static struct counters_attr counters_attr_##name = __ATTR_RO(name)
330 329
331COUNTERS_ATTR(glock_count, "%u\n");
332COUNTERS_ATTR(glock_held_count, "%u\n");
333COUNTERS_ATTR(inode_count, "%u\n");
334COUNTERS_ATTR(reclaimed, "%u\n"); 330COUNTERS_ATTR(reclaimed, "%u\n");
335 331
336static struct attribute *counters_attrs[] = { 332static struct attribute *counters_attrs[] = {
337 &counters_attr_glock_count.attr,
338 &counters_attr_glock_held_count.attr,
339 &counters_attr_inode_count.attr,
340 &counters_attr_reclaimed.attr, 333 &counters_attr_reclaimed.attr,
341 NULL, 334 NULL,
342}; 335};
diff --git a/fs/gfs2/trans.c b/fs/gfs2/trans.c
index 73e5d92a657c..f677b8a83f0c 100644
--- a/fs/gfs2/trans.c
+++ b/fs/gfs2/trans.c
@@ -146,30 +146,25 @@ void gfs2_trans_add_revoke(struct gfs2_sbd *sdp, struct gfs2_bufdata *bd)
146 lops_add(sdp, &bd->bd_le); 146 lops_add(sdp, &bd->bd_le);
147} 147}
148 148
149void gfs2_trans_add_unrevoke(struct gfs2_sbd *sdp, u64 blkno) 149void gfs2_trans_add_unrevoke(struct gfs2_sbd *sdp, u64 blkno, unsigned int len)
150{ 150{
151 struct gfs2_bufdata *bd; 151 struct gfs2_bufdata *bd, *tmp;
152 int found = 0; 152 struct gfs2_trans *tr = current->journal_info;
153 unsigned int n = len;
153 154
154 gfs2_log_lock(sdp); 155 gfs2_log_lock(sdp);
155 156 list_for_each_entry_safe(bd, tmp, &sdp->sd_log_le_revoke, bd_le.le_list) {
156 list_for_each_entry(bd, &sdp->sd_log_le_revoke, bd_le.le_list) { 157 if ((bd->bd_blkno >= blkno) && (bd->bd_blkno < (blkno + len))) {
157 if (bd->bd_blkno == blkno) {
158 list_del_init(&bd->bd_le.le_list); 158 list_del_init(&bd->bd_le.le_list);
159 gfs2_assert_withdraw(sdp, sdp->sd_log_num_revoke); 159 gfs2_assert_withdraw(sdp, sdp->sd_log_num_revoke);
160 sdp->sd_log_num_revoke--; 160 sdp->sd_log_num_revoke--;
161 found = 1; 161 kmem_cache_free(gfs2_bufdata_cachep, bd);
162 break; 162 tr->tr_num_revoke_rm++;
163 if (--n == 0)
164 break;
163 } 165 }
164 } 166 }
165
166 gfs2_log_unlock(sdp); 167 gfs2_log_unlock(sdp);
167
168 if (found) {
169 struct gfs2_trans *tr = current->journal_info;
170 kmem_cache_free(gfs2_bufdata_cachep, bd);
171 tr->tr_num_revoke_rm++;
172 }
173} 168}
174 169
175void gfs2_trans_add_rg(struct gfs2_rgrpd *rgd) 170void gfs2_trans_add_rg(struct gfs2_rgrpd *rgd)
diff --git a/fs/gfs2/trans.h b/fs/gfs2/trans.h
index e826f0dab80a..edf9d4bd908e 100644
--- a/fs/gfs2/trans.h
+++ b/fs/gfs2/trans.h
@@ -32,7 +32,7 @@ void gfs2_trans_end(struct gfs2_sbd *sdp);
32 32
33void gfs2_trans_add_bh(struct gfs2_glock *gl, struct buffer_head *bh, int meta); 33void gfs2_trans_add_bh(struct gfs2_glock *gl, struct buffer_head *bh, int meta);
34void gfs2_trans_add_revoke(struct gfs2_sbd *sdp, struct gfs2_bufdata *bd); 34void gfs2_trans_add_revoke(struct gfs2_sbd *sdp, struct gfs2_bufdata *bd);
35void gfs2_trans_add_unrevoke(struct gfs2_sbd *sdp, u64 blkno); 35void gfs2_trans_add_unrevoke(struct gfs2_sbd *sdp, u64 blkno, unsigned int len);
36void gfs2_trans_add_rg(struct gfs2_rgrpd *rgd); 36void gfs2_trans_add_rg(struct gfs2_rgrpd *rgd);
37 37
38#endif /* __TRANS_DOT_H__ */ 38#endif /* __TRANS_DOT_H__ */
diff --git a/fs/gfs2/util.c b/fs/gfs2/util.c
index 424a0774eda8..d31e355c61fb 100644
--- a/fs/gfs2/util.c
+++ b/fs/gfs2/util.c
@@ -19,12 +19,12 @@
19#include "gfs2.h" 19#include "gfs2.h"
20#include "incore.h" 20#include "incore.h"
21#include "glock.h" 21#include "glock.h"
22#include "lm.h"
23#include "util.h" 22#include "util.h"
24 23
25struct kmem_cache *gfs2_glock_cachep __read_mostly; 24struct kmem_cache *gfs2_glock_cachep __read_mostly;
26struct kmem_cache *gfs2_inode_cachep __read_mostly; 25struct kmem_cache *gfs2_inode_cachep __read_mostly;
27struct kmem_cache *gfs2_bufdata_cachep __read_mostly; 26struct kmem_cache *gfs2_bufdata_cachep __read_mostly;
27struct kmem_cache *gfs2_rgrpd_cachep __read_mostly;
28 28
29void gfs2_assert_i(struct gfs2_sbd *sdp) 29void gfs2_assert_i(struct gfs2_sbd *sdp)
30{ 30{
@@ -32,6 +32,28 @@ void gfs2_assert_i(struct gfs2_sbd *sdp)
32 sdp->sd_fsname); 32 sdp->sd_fsname);
33} 33}
34 34
35int gfs2_lm_withdraw(struct gfs2_sbd *sdp, char *fmt, ...)
36{
37 va_list args;
38
39 if (test_and_set_bit(SDF_SHUTDOWN, &sdp->sd_flags))
40 return 0;
41
42 va_start(args, fmt);
43 vprintk(fmt, args);
44 va_end(args);
45
46 fs_err(sdp, "about to withdraw this file system\n");
47 BUG_ON(sdp->sd_args.ar_debug);
48
49 fs_err(sdp, "telling LM to withdraw\n");
50 gfs2_withdraw_lockproto(&sdp->sd_lockstruct);
51 fs_err(sdp, "withdrawn\n");
52 dump_stack();
53
54 return -1;
55}
56
35/** 57/**
36 * gfs2_assert_withdraw_i - Cause the machine to withdraw if @assertion is false 58 * gfs2_assert_withdraw_i - Cause the machine to withdraw if @assertion is false
37 * Returns: -1 if this call withdrew the machine, 59 * Returns: -1 if this call withdrew the machine,
diff --git a/fs/gfs2/util.h b/fs/gfs2/util.h
index 28938a46cf47..509c5d60bd80 100644
--- a/fs/gfs2/util.h
+++ b/fs/gfs2/util.h
@@ -147,6 +147,7 @@ gfs2_io_error_bh_i((sdp), (bh), __FUNCTION__, __FILE__, __LINE__);
147extern struct kmem_cache *gfs2_glock_cachep; 147extern struct kmem_cache *gfs2_glock_cachep;
148extern struct kmem_cache *gfs2_inode_cachep; 148extern struct kmem_cache *gfs2_inode_cachep;
149extern struct kmem_cache *gfs2_bufdata_cachep; 149extern struct kmem_cache *gfs2_bufdata_cachep;
150extern struct kmem_cache *gfs2_rgrpd_cachep;
150 151
151static inline unsigned int gfs2_tune_get_i(struct gfs2_tune *gt, 152static inline unsigned int gfs2_tune_get_i(struct gfs2_tune *gt,
152 unsigned int *p) 153 unsigned int *p)
@@ -163,6 +164,7 @@ gfs2_tune_get_i(&(sdp)->sd_tune, &(sdp)->sd_tune.field)
163 164
164void gfs2_icbit_munge(struct gfs2_sbd *sdp, unsigned char **bitmap, 165void gfs2_icbit_munge(struct gfs2_sbd *sdp, unsigned char **bitmap,
165 unsigned int bit, int new_value); 166 unsigned int bit, int new_value);
167int gfs2_lm_withdraw(struct gfs2_sbd *sdp, char *fmt, ...);
166 168
167#endif /* __UTIL_DOT_H__ */ 169#endif /* __UTIL_DOT_H__ */
168 170