aboutsummaryrefslogtreecommitdiffstats
path: root/fs
diff options
context:
space:
mode:
authorSteven Whitehouse <swhiteho@redhat.com>2008-02-22 11:09:31 -0500
committerSteven Whitehouse <swhiteho@redhat.com>2008-03-31 05:41:14 -0400
commit9b8c81d1de49943ec69d157234b8981008c30d31 (patch)
treec0cbbd25fdcbf376c06c9dcfb7d25b8873caa6ff /fs
parent7afd88d9166a752b52517648bcbe923e05d393fc (diff)
[GFS2] Allow bmap to allocate extents
We've supported mapping of extents when no block allocation is required for some time. This patch extends that to mapping of extents when an allocation has been requested. In that case we try to allocate as many blocks as are requested, but we might return fewer in case there is something preventing us from returning the complete amount (e.g. an already allocated block is in the way). Currently the only code path which can actually request multiple data blocks in a single bmap call is the page_mkwrite path and even then it only happens if there are multiple blocks per page. What this patch does do however, is merge the allocation requests for metadata (growing the metadata tree in either height or depth) with the allocation of the data blocks in the case that both are needed. This results in lower overheads even in the single block allocation case. The one thing which we can't handle here at the moment is unstuffing. I would like to be able to do that, but the problem which arises is that in order to unstuff one has to get a locked page from the page cache which results in locking problems in the (usual) case that the caller is holding the page lock on the page it wishes to map. So that case will have to be addressed in future patches. Signed-off-by: Steven Whitehouse <swhiteho@redhat.com>
Diffstat (limited to 'fs')
-rw-r--r--fs/gfs2/bmap.c502
-rw-r--r--fs/gfs2/dir.c2
-rw-r--r--fs/gfs2/rgrp.c10
3 files changed, 295 insertions, 219 deletions
diff --git a/fs/gfs2/bmap.c b/fs/gfs2/bmap.c
index f1f38ca77a52..c1ee6355ced1 100644
--- a/fs/gfs2/bmap.c
+++ b/fs/gfs2/bmap.c
@@ -175,74 +175,13 @@ out:
175 return error; 175 return error;
176} 176}
177 177
178/**
179 * build_height - Build a metadata tree of the requested height
180 * @ip: The GFS2 inode
181 * @height: The height to build to
182 *
183 *
184 * Returns: errno
185 */
186
187static int build_height(struct inode *inode, struct metapath *mp, unsigned height)
188{
189 struct gfs2_inode *ip = GFS2_I(inode);
190 unsigned new_height = height - ip->i_height;
191 struct buffer_head *dibh = mp->mp_bh[0];
192 struct gfs2_dinode *di;
193 __be64 *bp;
194 u64 bn;
195 unsigned n, i = 0;
196
197 BUG_ON(height <= ip->i_height);
198
199 do {
200 n = new_height - i;
201 bn = gfs2_alloc_block(ip, &n);
202 gfs2_trans_add_unrevoke(GFS2_SB(inode), bn, n);
203 do {
204 mp->mp_bh[i + 1] = gfs2_meta_new(ip->i_gl, bn++);
205 gfs2_trans_add_bh(ip->i_gl, mp->mp_bh[i + 1], 1);
206 i++;
207 } while(i < n);
208 } while(i < new_height);
209
210 n = 0;
211 bn = mp->mp_bh[1]->b_blocknr;
212 if (new_height > 1) {
213 for(; n < new_height-1; n++) {
214 gfs2_metatype_set(mp->mp_bh[n + 1], GFS2_METATYPE_IN,
215 GFS2_FORMAT_IN);
216 gfs2_buffer_clear_tail(mp->mp_bh[n + 1],
217 sizeof(struct gfs2_meta_header));
218 bp = (__be64 *)(mp->mp_bh[n + 1]->b_data +
219 sizeof(struct gfs2_meta_header));
220 *bp = cpu_to_be64(mp->mp_bh[n+2]->b_blocknr);
221 brelse(mp->mp_bh[n+1]);
222 mp->mp_bh[n+1] = NULL;
223 }
224 }
225 gfs2_metatype_set(mp->mp_bh[n+1], GFS2_METATYPE_IN, GFS2_FORMAT_IN);
226 gfs2_buffer_copy_tail(mp->mp_bh[n+1], sizeof(struct gfs2_meta_header),
227 dibh, sizeof(struct gfs2_dinode));
228 brelse(mp->mp_bh[n+1]);
229 mp->mp_bh[n+1] = NULL;
230 gfs2_trans_add_bh(ip->i_gl, dibh, 1);
231 di = (struct gfs2_dinode *)dibh->b_data;
232 gfs2_buffer_clear_tail(dibh, sizeof(struct gfs2_dinode));
233 *(__be64 *)(di + 1) = cpu_to_be64(bn);
234 ip->i_height += new_height;
235 gfs2_add_inode_blocks(&ip->i_inode, new_height);
236 di->di_height = cpu_to_be16(ip->i_height);
237 di->di_blocks = cpu_to_be64(gfs2_get_inode_blocks(&ip->i_inode));
238 return 0;
239}
240 178
241/** 179/**
242 * find_metapath - Find path through the metadata tree 180 * find_metapath - Find path through the metadata tree
243 * @ip: The inode pointer 181 * @sdp: The superblock
244 * @mp: The metapath to return the result in 182 * @mp: The metapath to return the result in
245 * @block: The disk block to look up 183 * @block: The disk block to look up
184 * @height: The pre-calculated height of the metadata tree
246 * 185 *
247 * This routine returns a struct metapath structure that defines a path 186 * This routine returns a struct metapath structure that defines a path
248 * through the metadata of inode "ip" to get to block "block". 187 * through the metadata of inode "ip" to get to block "block".
@@ -297,17 +236,27 @@ static int build_height(struct inode *inode, struct metapath *mp, unsigned heigh
297 * 236 *
298 */ 237 */
299 238
300static void find_metapath(struct gfs2_inode *ip, u64 block, 239static void find_metapath(const struct gfs2_sbd *sdp, u64 block,
301 struct metapath *mp) 240 struct metapath *mp, unsigned int height)
302{ 241{
303 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
304 unsigned int i; 242 unsigned int i;
305 243
306 for (i = ip->i_height; i--;) 244 for (i = height; i--;)
307 mp->mp_list[i] = do_div(block, sdp->sd_inptrs); 245 mp->mp_list[i] = do_div(block, sdp->sd_inptrs);
308 246
309} 247}
310 248
249static inline unsigned int zero_metapath_length(const struct metapath *mp,
250 unsigned height)
251{
252 unsigned int i;
253 for (i = 0; i < height - 1; i++) {
254 if (mp->mp_list[i] != 0)
255 return i;
256 }
257 return height;
258}
259
311/** 260/**
312 * metapointer - Return pointer to start of metadata in a buffer 261 * metapointer - Return pointer to start of metadata in a buffer
313 * @height: The metadata height (0 = dinode) 262 * @height: The metadata height (0 = dinode)
@@ -318,95 +267,62 @@ static void find_metapath(struct gfs2_inode *ip, u64 block,
318 * metadata tree. 267 * metadata tree.
319 */ 268 */
320 269
321static inline __be64 *metapointer(int *boundary, unsigned int height, 270static inline __be64 *metapointer(unsigned int height, const struct metapath *mp)
322 const struct metapath *mp)
323{ 271{
324 struct buffer_head *bh = mp->mp_bh[height]; 272 struct buffer_head *bh = mp->mp_bh[height];
325 unsigned int head_size = (height > 0) ? 273 unsigned int head_size = (height > 0) ?
326 sizeof(struct gfs2_meta_header) : sizeof(struct gfs2_dinode); 274 sizeof(struct gfs2_meta_header) : sizeof(struct gfs2_dinode);
327 __be64 *ptr; 275 return ((__be64 *)(bh->b_data + head_size)) + mp->mp_list[height];
328 *boundary = 0;
329 ptr = ((__be64 *)(bh->b_data + head_size)) + mp->mp_list[height];
330 if (ptr + 1 == (__be64 *)(bh->b_data + bh->b_size))
331 *boundary = 1;
332 return ptr;
333} 276}
334 277
335/** 278/**
336 * lookup_block - Get the next metadata block in metadata tree 279 * lookup_metapath - Walk the metadata tree to a specific point
337 * @ip: The GFS2 inode 280 * @ip: The inode
338 * @height: The height of the tree (0 = dinode)
339 * @mp: The metapath 281 * @mp: The metapath
340 * @create: Non-zero if we may create a new meatdata block
341 * @new: Used to indicate if we did create a new metadata block
342 * @block: the returned disk block number
343 * 282 *
344 * Given a metatree, complete to a particular height, checks to see if the next 283 * Assumes that the inode's buffer has already been looked up and
345 * height of the tree exists. If not the next height of the tree is created. 284 * hooked onto mp->mp_bh[0] and that the metapath has been initialised
346 * The block number of the next height of the metadata tree is returned. 285 * by find_metapath().
347 * 286 *
287 * If this function encounters part of the tree which has not been
288 * allocated, it returns the current height of the tree at the point
289 * at which it found the unallocated block. Blocks which are found are
290 * added to the mp->mp_bh[] list.
291 *
292 * Returns: error or height of metadata tree
348 */ 293 */
349 294
350static int lookup_block(struct gfs2_inode *ip, unsigned int height, 295static int lookup_metapath(struct gfs2_inode *ip, struct metapath *mp)
351 struct metapath *mp, int create,
352 int *new, u64 *block)
353{ 296{
354 int boundary;
355 __be64 *ptr = metapointer(&boundary, height, mp);
356 unsigned int n = 1;
357
358 if (*ptr) {
359 *block = be64_to_cpu(*ptr);
360 return boundary;
361 }
362
363 *block = 0;
364
365 if (!create)
366 return 0;
367
368 *block = gfs2_alloc_block(ip, &n);
369 if (height != ip->i_height - 1 || gfs2_is_dir(ip))
370 gfs2_trans_add_unrevoke(GFS2_SB(&ip->i_inode), *block, 1);
371
372 gfs2_trans_add_bh(ip->i_gl, mp->mp_bh[height], 1);
373
374 *ptr = cpu_to_be64(*block);
375 gfs2_add_inode_blocks(&ip->i_inode, 1);
376
377 *new = 1;
378 return 0;
379}
380
381static int lookup_metapath(struct inode *inode, struct metapath *mp,
382 int create, int *new, u64 *dblock)
383{
384 struct buffer_head *bh;
385 struct gfs2_inode *ip = GFS2_I(inode);
386 unsigned int end_of_metadata = ip->i_height - 1; 297 unsigned int end_of_metadata = ip->i_height - 1;
387 unsigned int x; 298 unsigned int x;
299 __be64 *ptr;
300 u64 dblock;
388 int ret; 301 int ret;
389 302
390 for (x = 0; x < end_of_metadata; x++) { 303 for (x = 0; x < end_of_metadata; x++) {
391 lookup_block(ip, x, mp, create, new, dblock); 304 ptr = metapointer(x, mp);
392 if (!*dblock) 305 dblock = be64_to_cpu(*ptr);
393 return 0; 306 if (!dblock)
307 return x + 1;
394 308
395 ret = gfs2_meta_indirect_buffer(ip, x+1, *dblock, *new, &mp->mp_bh[x+1]); 309 ret = gfs2_meta_indirect_buffer(ip, x+1, dblock, 0, &mp->mp_bh[x+1]);
396 if (ret) 310 if (ret)
397 return ret; 311 return ret;
398 } 312 }
399 313
400 return lookup_block(ip, end_of_metadata, mp, create, new, dblock); 314 return ip->i_height;
401} 315}
402 316
403static void release_metapath(struct metapath *mp) 317static inline void release_metapath(struct metapath *mp)
404{ 318{
405 int i; 319 int i;
406 320
407 for (i = 0; i < GFS2_MAX_META_HEIGHT; i++) 321 for (i = 0; i < GFS2_MAX_META_HEIGHT; i++) {
408 if (mp->mp_bh[i]) 322 if (mp->mp_bh[i] == NULL)
409 brelse(mp->mp_bh[i]); 323 break;
324 brelse(mp->mp_bh[i]);
325 }
410} 326}
411 327
412/** 328/**
@@ -445,32 +361,208 @@ static inline unsigned int gfs2_extent_length(void *start, unsigned int len, __b
445 return (ptr - first); 361 return (ptr - first);
446} 362}
447 363
448static inline void bmap_lock(struct inode *inode, int create) 364static inline void bmap_lock(struct gfs2_inode *ip, int create)
449{ 365{
450 struct gfs2_inode *ip = GFS2_I(inode);
451 if (create) 366 if (create)
452 down_write(&ip->i_rw_mutex); 367 down_write(&ip->i_rw_mutex);
453 else 368 else
454 down_read(&ip->i_rw_mutex); 369 down_read(&ip->i_rw_mutex);
455} 370}
456 371
457static inline void bmap_unlock(struct inode *inode, int create) 372static inline void bmap_unlock(struct gfs2_inode *ip, int create)
458{ 373{
459 struct gfs2_inode *ip = GFS2_I(inode);
460 if (create) 374 if (create)
461 up_write(&ip->i_rw_mutex); 375 up_write(&ip->i_rw_mutex);
462 else 376 else
463 up_read(&ip->i_rw_mutex); 377 up_read(&ip->i_rw_mutex);
464} 378}
465 379
380static inline __be64 *gfs2_indirect_init(struct metapath *mp,
381 struct gfs2_glock *gl, unsigned int i,
382 unsigned offset, u64 bn)
383{
384 __be64 *ptr = (__be64 *)(mp->mp_bh[i - 1]->b_data +
385 ((i > 1) ? sizeof(struct gfs2_meta_header) :
386 sizeof(struct gfs2_dinode)));
387 BUG_ON(i < 1);
388 BUG_ON(mp->mp_bh[i] != NULL);
389 mp->mp_bh[i] = gfs2_meta_new(gl, bn);
390 gfs2_trans_add_bh(gl, mp->mp_bh[i], 1);
391 gfs2_metatype_set(mp->mp_bh[i], GFS2_METATYPE_IN, GFS2_FORMAT_IN);
392 gfs2_buffer_clear_tail(mp->mp_bh[i], sizeof(struct gfs2_meta_header));
393 ptr += offset;
394 *ptr = cpu_to_be64(bn);
395 return ptr;
396}
397
398enum alloc_state {
399 ALLOC_DATA = 0,
400 ALLOC_GROW_DEPTH = 1,
401 ALLOC_GROW_HEIGHT = 2,
402 /* ALLOC_UNSTUFF = 3, TBD and rather complicated */
403};
404
405/**
406 * gfs2_bmap_alloc - Build a metadata tree of the requested height
407 * @inode: The GFS2 inode
408 * @lblock: The logical starting block of the extent
409 * @bh_map: This is used to return the mapping details
410 * @mp: The metapath
411 * @sheight: The starting height (i.e. whats already mapped)
412 * @height: The height to build to
413 * @maxlen: The max number of data blocks to alloc
414 *
415 * In this routine we may have to alloc:
416 * i) Indirect blocks to grow the metadata tree height
417 * ii) Indirect blocks to fill in lower part of the metadata tree
418 * iii) Data blocks
419 *
420 * The function is in two parts. The first part works out the total
421 * number of blocks which we need. The second part does the actual
422 * allocation asking for an extent at a time (if enough contiguous free
423 * blocks are available, there will only be one request per bmap call)
424 * and uses the state machine to initialise the blocks in order.
425 *
426 * Returns: errno on error
427 */
428
429static int gfs2_bmap_alloc(struct inode *inode, const sector_t lblock,
430 struct buffer_head *bh_map, struct metapath *mp,
431 const unsigned int sheight,
432 const unsigned int height,
433 const unsigned int maxlen)
434{
435 struct gfs2_inode *ip = GFS2_I(inode);
436 struct gfs2_sbd *sdp = GFS2_SB(inode);
437 struct buffer_head *dibh = mp->mp_bh[0];
438 u64 bn, dblock = 0;
439 unsigned n, i, blks, alloced = 0, iblks = 0, zmpl = 0;
440 unsigned dblks = 0;
441 unsigned ptrs_per_blk;
442 const unsigned end_of_metadata = height - 1;
443 int eob = 0;
444 enum alloc_state state;
445 __be64 *ptr;
446 __be64 zero_bn = 0;
447
448 BUG_ON(sheight < 1);
449 BUG_ON(dibh == NULL);
450
451 gfs2_trans_add_bh(ip->i_gl, dibh, 1);
452
453 if (height == sheight) {
454 struct buffer_head *bh;
455 /* Bottom indirect block exists, find unalloced extent size */
456 ptr = metapointer(end_of_metadata, mp);
457 bh = mp->mp_bh[end_of_metadata];
458 dblks = gfs2_extent_length(bh->b_data, bh->b_size, ptr, maxlen,
459 &eob);
460 BUG_ON(dblks < 1);
461 state = ALLOC_DATA;
462 } else {
463 /* Need to allocate indirect blocks */
464 ptrs_per_blk = height > 1 ? sdp->sd_inptrs : sdp->sd_diptrs;
465 dblks = min(maxlen, ptrs_per_blk - mp->mp_list[end_of_metadata]);
466 if (height == ip->i_height) {
467 /* Writing into existing tree, extend tree down */
468 iblks = height - sheight;
469 state = ALLOC_GROW_DEPTH;
470 } else {
471 /* Building up tree height */
472 state = ALLOC_GROW_HEIGHT;
473 iblks = height - ip->i_height;
474 zmpl = zero_metapath_length(mp, height);
475 iblks -= zmpl;
476 iblks += height;
477 }
478 }
479
480 /* start of the second part of the function (state machine) */
481
482 blks = dblks + iblks;
483 i = sheight;
484 do {
485 n = blks - alloced;
486 bn = gfs2_alloc_block(ip, &n);
487 alloced += n;
488 if (state != ALLOC_DATA || gfs2_is_jdata(ip))
489 gfs2_trans_add_unrevoke(sdp, bn, n);
490 switch (state) {
491 /* Growing height of tree */
492 case ALLOC_GROW_HEIGHT:
493 if (i == 1) {
494 ptr = (__be64 *)(dibh->b_data +
495 sizeof(struct gfs2_dinode));
496 zero_bn = *ptr;
497 }
498 for (; i - 1 < height - ip->i_height && n > 0; i++, n--)
499 gfs2_indirect_init(mp, ip->i_gl, i, 0, bn++);
500 if (i - 1 == height - ip->i_height) {
501 i--;
502 gfs2_buffer_copy_tail(mp->mp_bh[i],
503 sizeof(struct gfs2_meta_header),
504 dibh, sizeof(struct gfs2_dinode));
505 gfs2_buffer_clear_tail(dibh,
506 sizeof(struct gfs2_dinode) +
507 sizeof(__be64));
508 ptr = (__be64 *)(mp->mp_bh[i]->b_data +
509 sizeof(struct gfs2_meta_header));
510 *ptr = zero_bn;
511 state = ALLOC_GROW_DEPTH;
512 for(i = zmpl; i < height; i++) {
513 if (mp->mp_bh[i] == NULL)
514 break;
515 brelse(mp->mp_bh[i]);
516 mp->mp_bh[i] = NULL;
517 }
518 i = zmpl;
519 }
520 if (n == 0)
521 break;
522 /* Branching from existing tree */
523 case ALLOC_GROW_DEPTH:
524 if (i > 1 && i < height)
525 gfs2_trans_add_bh(ip->i_gl, mp->mp_bh[i-1], 1);
526 for (; i < height && n > 0; i++, n--)
527 gfs2_indirect_init(mp, ip->i_gl, i,
528 mp->mp_list[i-1], bn++);
529 if (i == height)
530 state = ALLOC_DATA;
531 if (n == 0)
532 break;
533 /* Tree complete, adding data blocks */
534 case ALLOC_DATA:
535 BUG_ON(n > dblks);
536 BUG_ON(mp->mp_bh[end_of_metadata] == NULL);
537 gfs2_trans_add_bh(ip->i_gl, mp->mp_bh[end_of_metadata], 1);
538 dblks = n;
539 ptr = metapointer(end_of_metadata, mp);
540 dblock = bn;
541 while (n-- > 0)
542 *ptr++ = cpu_to_be64(bn++);
543 break;
544 }
545 } while (state != ALLOC_DATA);
546
547 ip->i_height = height;
548 gfs2_add_inode_blocks(&ip->i_inode, alloced);
549 gfs2_dinode_out(ip, mp->mp_bh[0]->b_data);
550 map_bh(bh_map, inode->i_sb, dblock);
551 bh_map->b_size = dblks << inode->i_blkbits;
552 set_buffer_new(bh_map);
553 return 0;
554}
555
466/** 556/**
467 * gfs2_block_map - Map a block from an inode to a disk block 557 * gfs2_block_map - Map a block from an inode to a disk block
468 * @inode: The inode 558 * @inode: The inode
469 * @lblock: The logical block number 559 * @lblock: The logical block number
470 * @bh_map: The bh to be mapped 560 * @bh_map: The bh to be mapped
561 * @create: True if its ok to alloc blocks to satify the request
471 * 562 *
472 * Find the block number on the current device which corresponds to an 563 * Sets buffer_mapped() if successful, sets buffer_boundary() if a
473 * inode's block. If the block had to be created, "new" will be set. 564 * read of metadata will be required before the next block can be
565 * mapped. Sets buffer_new() if new blocks were allocated.
474 * 566 *
475 * Returns: errno 567 * Returns: errno
476 */ 568 */
@@ -481,21 +573,21 @@ int gfs2_block_map(struct inode *inode, sector_t lblock,
481 struct gfs2_inode *ip = GFS2_I(inode); 573 struct gfs2_inode *ip = GFS2_I(inode);
482 struct gfs2_sbd *sdp = GFS2_SB(inode); 574 struct gfs2_sbd *sdp = GFS2_SB(inode);
483 unsigned int bsize = sdp->sd_sb.sb_bsize; 575 unsigned int bsize = sdp->sd_sb.sb_bsize;
484 int error = 0; 576 const unsigned int maxlen = bh_map->b_size >> inode->i_blkbits;
485 int new = 0;
486 u64 dblock = 0;
487 int boundary;
488 unsigned int maxlen = bh_map->b_size >> inode->i_blkbits;
489 struct metapath mp;
490 u64 size;
491 const u64 *arr = sdp->sd_heightsize; 577 const u64 *arr = sdp->sd_heightsize;
492 BUG_ON(maxlen == 0); 578 __be64 *ptr;
579 u64 size;
580 struct metapath mp;
581 int ret;
582 int eob;
583 unsigned int len;
584 struct buffer_head *bh;
585 u8 height;
493 586
494 if (gfs2_assert_warn(sdp, !gfs2_is_stuffed(ip))) 587 BUG_ON(maxlen == 0);
495 return 0;
496 588
497 memset(mp.mp_bh, 0, sizeof(mp.mp_bh)); 589 memset(mp.mp_bh, 0, sizeof(mp.mp_bh));
498 bmap_lock(inode, create); 590 bmap_lock(ip, create);
499 clear_buffer_mapped(bh_map); 591 clear_buffer_mapped(bh_map);
500 clear_buffer_new(bh_map); 592 clear_buffer_new(bh_map);
501 clear_buffer_boundary(bh_map); 593 clear_buffer_boundary(bh_map);
@@ -503,56 +595,50 @@ int gfs2_block_map(struct inode *inode, sector_t lblock,
503 bsize = sdp->sd_jbsize; 595 bsize = sdp->sd_jbsize;
504 arr = sdp->sd_jheightsize; 596 arr = sdp->sd_jheightsize;
505 } 597 }
506 size = (lblock + 1) * bsize;
507 598
508 error = gfs2_meta_inode_buffer(ip, &mp.mp_bh[0]); 599 ret = gfs2_meta_inode_buffer(ip, &mp.mp_bh[0]);
509 if (error) 600 if (ret)
510 goto out_fail; 601 goto out;
511
512 if (size > arr[ip->i_height]) {
513 u8 height = ip->i_height;
514 if (!create)
515 goto out_ok;
516 while (size > arr[height])
517 height++;
518 error = build_height(inode, &mp, height);
519 if (error)
520 goto out_fail;
521 }
522 602
523 find_metapath(ip, lblock, &mp); 603 height = ip->i_height;
524 error = lookup_metapath(inode, &mp, create, &new, &dblock); 604 size = (lblock + 1) * bsize;
525 if (error < 0) 605 while (size > arr[height])
526 goto out_fail; 606 height++;
527 boundary = error; 607 find_metapath(sdp, lblock, &mp, height);
528 608 ret = 1;
529 if (new) { 609 if (height > ip->i_height || gfs2_is_stuffed(ip))
530 map_bh(bh_map, inode->i_sb, dblock); 610 goto do_alloc;
531 if (boundary) 611 ret = lookup_metapath(ip, &mp);
532 set_buffer_boundary(bh_map); 612 if (ret < 0)
533 gfs2_trans_add_bh(ip->i_gl, mp.mp_bh[0], 1); 613 goto out;
534 gfs2_dinode_out(ip, mp.mp_bh[0]->b_data); 614 if (ret != ip->i_height)
535 set_buffer_new(bh_map); 615 goto do_alloc;
536 goto out_ok; 616 ptr = metapointer(ip->i_height - 1, &mp);
537 } 617 if (*ptr == 0)
618 goto do_alloc;
619 map_bh(bh_map, inode->i_sb, be64_to_cpu(*ptr));
620 bh = mp.mp_bh[ip->i_height - 1];
621 len = gfs2_extent_length(bh->b_data, bh->b_size, ptr, maxlen, &eob);
622 bh_map->b_size = (len << inode->i_blkbits);
623 if (eob)
624 set_buffer_boundary(bh_map);
625 ret = 0;
626out:
627 release_metapath(&mp);
628 bmap_unlock(ip, create);
629 return ret;
538 630
539 if (dblock) { 631do_alloc:
540 unsigned int len; 632 /* All allocations are done here, firstly check create flag */
541 struct buffer_head *bh = mp.mp_bh[ip->i_height - 1]; 633 if (!create) {
542 __be64 *ptr = metapointer(&boundary, ip->i_height - 1, &mp); 634 BUG_ON(gfs2_is_stuffed(ip));
543 map_bh(bh_map, inode->i_sb, dblock); 635 ret = 0;
544 len = gfs2_extent_length(bh->b_data, bh->b_size, ptr, maxlen, 636 goto out;
545 &boundary);
546 bh_map->b_size = (len << inode->i_blkbits);
547 if (boundary)
548 set_buffer_boundary(bh_map);
549 } 637 }
550out_ok: 638
551 error = 0; 639 /* At this point ret is the tree depth of already allocated blocks */
552out_fail: 640 ret = gfs2_bmap_alloc(inode, lblock, bh_map, &mp, ret, height, maxlen);
553 release_metapath(&mp); 641 goto out;
554 bmap_unlock(inode, create);
555 return error;
556} 642}
557 643
558/* 644/*
@@ -568,7 +654,7 @@ int gfs2_extent_map(struct inode *inode, u64 lblock, int *new, u64 *dblock, unsi
568 BUG_ON(!dblock); 654 BUG_ON(!dblock);
569 BUG_ON(!new); 655 BUG_ON(!new);
570 656
571 bh.b_size = 1 << (inode->i_blkbits + 5); 657 bh.b_size = 1 << (inode->i_blkbits + (create ? 0 : 5));
572 ret = gfs2_block_map(inode, lblock, &bh, create); 658 ret = gfs2_block_map(inode, lblock, &bh, create);
573 *extlen = bh.b_size >> inode->i_blkbits; 659 *extlen = bh.b_size >> inode->i_blkbits;
574 *dblock = bh.b_blocknr; 660 *dblock = bh.b_blocknr;
@@ -835,38 +921,25 @@ static int do_grow(struct gfs2_inode *ip, u64 size)
835 if (error) 921 if (error)
836 goto out_ipres; 922 goto out_ipres;
837 923
924 error = gfs2_meta_inode_buffer(ip, &dibh);
925 if (error)
926 goto out_end_trans;
927
838 if (size > sdp->sd_sb.sb_bsize - sizeof(struct gfs2_dinode)) { 928 if (size > sdp->sd_sb.sb_bsize - sizeof(struct gfs2_dinode)) {
839 const u64 *arr = sdp->sd_heightsize;
840 if (gfs2_is_stuffed(ip)) { 929 if (gfs2_is_stuffed(ip)) {
841 error = gfs2_unstuff_dinode(ip, NULL); 930 error = gfs2_unstuff_dinode(ip, NULL);
842 if (error) 931 if (error)
843 goto out_end_trans; 932 goto out_brelse;
844 }
845
846 down_write(&ip->i_rw_mutex);
847 if (size > arr[ip->i_height]) {
848 struct metapath mp;
849 u8 height = ip->i_height;
850 while(size > arr[height])
851 height++;
852 error = build_height(&ip->i_inode, &mp, height);
853 } 933 }
854 up_write(&ip->i_rw_mutex);
855 if (error)
856 goto out_end_trans;
857 } 934 }
858 935
859 ip->i_di.di_size = size; 936 ip->i_di.di_size = size;
860 ip->i_inode.i_mtime = ip->i_inode.i_ctime = CURRENT_TIME; 937 ip->i_inode.i_mtime = ip->i_inode.i_ctime = CURRENT_TIME;
861
862 error = gfs2_meta_inode_buffer(ip, &dibh);
863 if (error)
864 goto out_end_trans;
865
866 gfs2_trans_add_bh(ip->i_gl, dibh, 1); 938 gfs2_trans_add_bh(ip->i_gl, dibh, 1);
867 gfs2_dinode_out(ip, dibh->b_data); 939 gfs2_dinode_out(ip, dibh->b_data);
868 brelse(dibh);
869 940
941out_brelse:
942 brelse(dibh);
870out_end_trans: 943out_end_trans:
871 gfs2_trans_end(sdp); 944 gfs2_trans_end(sdp);
872out_ipres: 945out_ipres:
@@ -996,6 +1069,7 @@ out:
996 1069
997static int trunc_dealloc(struct gfs2_inode *ip, u64 size) 1070static int trunc_dealloc(struct gfs2_inode *ip, u64 size)
998{ 1071{
1072 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
999 unsigned int height = ip->i_height; 1073 unsigned int height = ip->i_height;
1000 u64 lblock; 1074 u64 lblock;
1001 struct metapath mp; 1075 struct metapath mp;
@@ -1004,9 +1078,9 @@ static int trunc_dealloc(struct gfs2_inode *ip, u64 size)
1004 if (!size) 1078 if (!size)
1005 lblock = 0; 1079 lblock = 0;
1006 else 1080 else
1007 lblock = (size - 1) >> GFS2_SB(&ip->i_inode)->sd_sb.sb_bsize_shift; 1081 lblock = (size - 1) >> sdp->sd_sb.sb_bsize_shift;
1008 1082
1009 find_metapath(ip, lblock, &mp); 1083 find_metapath(sdp, lblock, &mp, ip->i_height);
1010 gfs2_alloc_get(ip); 1084 gfs2_alloc_get(ip);
1011 1085
1012 error = gfs2_quota_hold(ip, NO_QUOTA_CHANGE, NO_QUOTA_CHANGE); 1086 error = gfs2_quota_hold(ip, NO_QUOTA_CHANGE, NO_QUOTA_CHANGE);
diff --git a/fs/gfs2/dir.c b/fs/gfs2/dir.c
index 34dc8dfaba12..a3753c7989f7 100644
--- a/fs/gfs2/dir.c
+++ b/fs/gfs2/dir.c
@@ -159,6 +159,7 @@ static int gfs2_dir_write_data(struct gfs2_inode *ip, const char *buf,
159 unsigned int o; 159 unsigned int o;
160 int copied = 0; 160 int copied = 0;
161 int error = 0; 161 int error = 0;
162 int new = 0;
162 163
163 if (!size) 164 if (!size)
164 return 0; 165 return 0;
@@ -183,7 +184,6 @@ static int gfs2_dir_write_data(struct gfs2_inode *ip, const char *buf,
183 while (copied < size) { 184 while (copied < size) {
184 unsigned int amount; 185 unsigned int amount;
185 struct buffer_head *bh; 186 struct buffer_head *bh;
186 int new = 0;
187 187
188 amount = size - copied; 188 amount = size - copied;
189 if (amount > sdp->sd_sb.sb_bsize - o) 189 if (amount > sdp->sd_sb.sb_bsize - o)
diff --git a/fs/gfs2/rgrp.c b/fs/gfs2/rgrp.c
index 77eba0a38040..4291375cecc6 100644
--- a/fs/gfs2/rgrp.c
+++ b/fs/gfs2/rgrp.c
@@ -1357,16 +1357,18 @@ static u32 rgblk_search(struct gfs2_rgrpd *rgd, u32 goal,
1357 gfs2_trans_add_bh(rgd->rd_gl, bi->bi_bh, 1); 1357 gfs2_trans_add_bh(rgd->rd_gl, bi->bi_bh, 1);
1358 gfs2_setbit(rgd, bi->bi_bh->b_data, bi->bi_clone, bi->bi_offset, 1358 gfs2_setbit(rgd, bi->bi_bh->b_data, bi->bi_clone, bi->bi_offset,
1359 bi->bi_len, blk, new_state); 1359 bi->bi_len, blk, new_state);
1360 while(*n < elen) { 1360 goal = blk;
1361 while (*n < elen) {
1361 goal++; 1362 goal++;
1362 if (goal >= (bi->bi_len / GFS2_NBBY)) 1363 if (goal >= (bi->bi_len * GFS2_NBBY))
1363 break; 1364 break;
1364 if (gfs2_testbit(rgd, buffer, bi->bi_len, goal) != 1365 if (gfs2_testbit(rgd, buffer, bi->bi_len, goal) !=
1365 GFS2_BLKST_FREE) 1366 GFS2_BLKST_FREE)
1366 break; 1367 break;
1367 (*n)++;
1368 gfs2_setbit(rgd, bi->bi_bh->b_data, bi->bi_clone, 1368 gfs2_setbit(rgd, bi->bi_bh->b_data, bi->bi_clone,
1369 bi->bi_offset, bi->bi_len, blk, new_state); 1369 bi->bi_offset, bi->bi_len, goal,
1370 new_state);
1371 (*n)++;
1370 } 1372 }
1371 } 1373 }
1372 1374