aboutsummaryrefslogtreecommitdiffstats
path: root/fs/ext2/inode.c
diff options
context:
space:
mode:
Diffstat (limited to 'fs/ext2/inode.c')
-rw-r--r--fs/ext2/inode.c524
1 files changed, 308 insertions, 216 deletions
diff --git a/fs/ext2/inode.c b/fs/ext2/inode.c
index 84818176fd9d..b1ab32ab5a77 100644
--- a/fs/ext2/inode.c
+++ b/fs/ext2/inode.c
@@ -54,19 +54,6 @@ static inline int ext2_inode_is_fast_symlink(struct inode *inode)
54} 54}
55 55
56/* 56/*
57 * Called at each iput().
58 *
59 * The inode may be "bad" if ext2_read_inode() saw an error from
60 * ext2_get_inode(), so we need to check that to avoid freeing random disk
61 * blocks.
62 */
63void ext2_put_inode(struct inode *inode)
64{
65 if (!is_bad_inode(inode))
66 ext2_discard_prealloc(inode);
67}
68
69/*
70 * Called at the last iput() if i_nlink is zero. 57 * Called at the last iput() if i_nlink is zero.
71 */ 58 */
72void ext2_delete_inode (struct inode * inode) 59void ext2_delete_inode (struct inode * inode)
@@ -89,61 +76,6 @@ no_delete:
89 clear_inode(inode); /* We must guarantee clearing of inode... */ 76 clear_inode(inode); /* We must guarantee clearing of inode... */
90} 77}
91 78
92void ext2_discard_prealloc (struct inode * inode)
93{
94#ifdef EXT2_PREALLOCATE
95 struct ext2_inode_info *ei = EXT2_I(inode);
96 write_lock(&ei->i_meta_lock);
97 if (ei->i_prealloc_count) {
98 unsigned short total = ei->i_prealloc_count;
99 unsigned long block = ei->i_prealloc_block;
100 ei->i_prealloc_count = 0;
101 ei->i_prealloc_block = 0;
102 write_unlock(&ei->i_meta_lock);
103 ext2_free_blocks (inode, block, total);
104 return;
105 } else
106 write_unlock(&ei->i_meta_lock);
107#endif
108}
109
110static int ext2_alloc_block (struct inode * inode, unsigned long goal, int *err)
111{
112#ifdef EXT2FS_DEBUG
113 static unsigned long alloc_hits, alloc_attempts;
114#endif
115 unsigned long result;
116
117
118#ifdef EXT2_PREALLOCATE
119 struct ext2_inode_info *ei = EXT2_I(inode);
120 write_lock(&ei->i_meta_lock);
121 if (ei->i_prealloc_count &&
122 (goal == ei->i_prealloc_block || goal + 1 == ei->i_prealloc_block))
123 {
124 result = ei->i_prealloc_block++;
125 ei->i_prealloc_count--;
126 write_unlock(&ei->i_meta_lock);
127 ext2_debug ("preallocation hit (%lu/%lu).\n",
128 ++alloc_hits, ++alloc_attempts);
129 } else {
130 write_unlock(&ei->i_meta_lock);
131 ext2_discard_prealloc (inode);
132 ext2_debug ("preallocation miss (%lu/%lu).\n",
133 alloc_hits, ++alloc_attempts);
134 if (S_ISREG(inode->i_mode))
135 result = ext2_new_block (inode, goal,
136 &ei->i_prealloc_count,
137 &ei->i_prealloc_block, err);
138 else
139 result = ext2_new_block(inode, goal, NULL, NULL, err);
140 }
141#else
142 result = ext2_new_block (inode, goal, 0, 0, err);
143#endif
144 return result;
145}
146
147typedef struct { 79typedef struct {
148 __le32 *p; 80 __le32 *p;
149 __le32 key; 81 __le32 key;
@@ -228,7 +160,8 @@ static int ext2_block_to_path(struct inode *inode,
228 ext2_warning (inode->i_sb, "ext2_block_to_path", "block > big"); 160 ext2_warning (inode->i_sb, "ext2_block_to_path", "block > big");
229 } 161 }
230 if (boundary) 162 if (boundary)
231 *boundary = (i_block & (ptrs - 1)) == (final - 1); 163 *boundary = final - 1 - (i_block & (ptrs - 1));
164
232 return n; 165 return n;
233} 166}
234 167
@@ -355,39 +288,129 @@ static unsigned long ext2_find_near(struct inode *inode, Indirect *ind)
355 * @block: block we want 288 * @block: block we want
356 * @chain: chain of indirect blocks 289 * @chain: chain of indirect blocks
357 * @partial: pointer to the last triple within a chain 290 * @partial: pointer to the last triple within a chain
358 * @goal: place to store the result.
359 * 291 *
360 * Normally this function find the prefered place for block allocation, 292 * Returns preferred place for a block (the goal).
361 * stores it in *@goal and returns zero. If the branch had been changed
362 * under us we return -EAGAIN.
363 */ 293 */
364 294
365static inline int ext2_find_goal(struct inode *inode, 295static inline int ext2_find_goal(struct inode *inode,
366 long block, 296 long block,
367 Indirect chain[4], 297 Indirect chain[4],
368 Indirect *partial, 298 Indirect *partial)
369 unsigned long *goal)
370{ 299{
371 struct ext2_inode_info *ei = EXT2_I(inode); 300 struct ext2_block_alloc_info *block_i;
372 write_lock(&ei->i_meta_lock); 301
373 if ((block == ei->i_next_alloc_block + 1) && ei->i_next_alloc_goal) { 302 block_i = EXT2_I(inode)->i_block_alloc_info;
374 ei->i_next_alloc_block++; 303
375 ei->i_next_alloc_goal++; 304 /*
376 } 305 * try the heuristic for sequential allocation,
377 if (verify_chain(chain, partial)) { 306 * failing that at least try to get decent locality.
378 /* 307 */
379 * try the heuristic for sequential allocation, 308 if (block_i && (block == block_i->last_alloc_logical_block + 1)
380 * failing that at least try to get decent locality. 309 && (block_i->last_alloc_physical_block != 0)) {
381 */ 310 return block_i->last_alloc_physical_block + 1;
382 if (block == ei->i_next_alloc_block)
383 *goal = ei->i_next_alloc_goal;
384 if (!*goal)
385 *goal = ext2_find_near(inode, partial);
386 write_unlock(&ei->i_meta_lock);
387 return 0;
388 } 311 }
389 write_unlock(&ei->i_meta_lock); 312
390 return -EAGAIN; 313 return ext2_find_near(inode, partial);
314}
315
316/**
317 * ext2_blks_to_allocate: Look up the block map and count the number
318 * of direct blocks need to be allocated for the given branch.
319 *
320 * @branch: chain of indirect blocks
321 * @k: number of blocks need for indirect blocks
322 * @blks: number of data blocks to be mapped.
323 * @blocks_to_boundary: the offset in the indirect block
324 *
325 * return the total number of blocks to be allocate, including the
326 * direct and indirect blocks.
327 */
328static int
329ext2_blks_to_allocate(Indirect * branch, int k, unsigned long blks,
330 int blocks_to_boundary)
331{
332 unsigned long count = 0;
333
334 /*
335 * Simple case, [t,d]Indirect block(s) has not allocated yet
336 * then it's clear blocks on that path have not allocated
337 */
338 if (k > 0) {
339 /* right now don't hanel cross boundary allocation */
340 if (blks < blocks_to_boundary + 1)
341 count += blks;
342 else
343 count += blocks_to_boundary + 1;
344 return count;
345 }
346
347 count++;
348 while (count < blks && count <= blocks_to_boundary
349 && le32_to_cpu(*(branch[0].p + count)) == 0) {
350 count++;
351 }
352 return count;
353}
354
355/**
356 * ext2_alloc_blocks: multiple allocate blocks needed for a branch
357 * @indirect_blks: the number of blocks need to allocate for indirect
358 * blocks
359 *
360 * @new_blocks: on return it will store the new block numbers for
361 * the indirect blocks(if needed) and the first direct block,
362 * @blks: on return it will store the total number of allocated
363 * direct blocks
364 */
365static int ext2_alloc_blocks(struct inode *inode,
366 ext2_fsblk_t goal, int indirect_blks, int blks,
367 ext2_fsblk_t new_blocks[4], int *err)
368{
369 int target, i;
370 unsigned long count = 0;
371 int index = 0;
372 ext2_fsblk_t current_block = 0;
373 int ret = 0;
374
375 /*
376 * Here we try to allocate the requested multiple blocks at once,
377 * on a best-effort basis.
378 * To build a branch, we should allocate blocks for
379 * the indirect blocks(if not allocated yet), and at least
380 * the first direct block of this branch. That's the
381 * minimum number of blocks need to allocate(required)
382 */
383 target = blks + indirect_blks;
384
385 while (1) {
386 count = target;
387 /* allocating blocks for indirect blocks and direct blocks */
388 current_block = ext2_new_blocks(inode,goal,&count,err);
389 if (*err)
390 goto failed_out;
391
392 target -= count;
393 /* allocate blocks for indirect blocks */
394 while (index < indirect_blks && count) {
395 new_blocks[index++] = current_block++;
396 count--;
397 }
398
399 if (count > 0)
400 break;
401 }
402
403 /* save the new block number for the first direct block */
404 new_blocks[index] = current_block;
405
406 /* total number of blocks allocated for direct blocks */
407 ret = count;
408 *err = 0;
409 return ret;
410failed_out:
411 for (i = 0; i <index; i++)
412 ext2_free_blocks(inode, new_blocks[i], 1);
413 return ret;
391} 414}
392 415
393/** 416/**
@@ -416,39 +439,49 @@ static inline int ext2_find_goal(struct inode *inode,
416 */ 439 */
417 440
418static int ext2_alloc_branch(struct inode *inode, 441static int ext2_alloc_branch(struct inode *inode,
419 int num, 442 int indirect_blks, int *blks, ext2_fsblk_t goal,
420 unsigned long goal, 443 int *offsets, Indirect *branch)
421 int *offsets,
422 Indirect *branch)
423{ 444{
424 int blocksize = inode->i_sb->s_blocksize; 445 int blocksize = inode->i_sb->s_blocksize;
425 int n = 0; 446 int i, n = 0;
426 int err; 447 int err = 0;
427 int i; 448 struct buffer_head *bh;
428 int parent = ext2_alloc_block(inode, goal, &err); 449 int num;
429 450 ext2_fsblk_t new_blocks[4];
430 branch[0].key = cpu_to_le32(parent); 451 ext2_fsblk_t current_block;
431 if (parent) for (n = 1; n < num; n++) { 452
432 struct buffer_head *bh; 453 num = ext2_alloc_blocks(inode, goal, indirect_blks,
433 /* Allocate the next block */ 454 *blks, new_blocks, &err);
434 int nr = ext2_alloc_block(inode, parent, &err); 455 if (err)
435 if (!nr) 456 return err;
436 break; 457
437 branch[n].key = cpu_to_le32(nr); 458 branch[0].key = cpu_to_le32(new_blocks[0]);
459 /*
460 * metadata blocks and data blocks are allocated.
461 */
462 for (n = 1; n <= indirect_blks; n++) {
438 /* 463 /*
439 * Get buffer_head for parent block, zero it out and set 464 * Get buffer_head for parent block, zero it out
440 * the pointer to new one, then send parent to disk. 465 * and set the pointer to new one, then send
466 * parent to disk.
441 */ 467 */
442 bh = sb_getblk(inode->i_sb, parent); 468 bh = sb_getblk(inode->i_sb, new_blocks[n-1]);
443 if (!bh) { 469 branch[n].bh = bh;
444 err = -EIO;
445 break;
446 }
447 lock_buffer(bh); 470 lock_buffer(bh);
448 memset(bh->b_data, 0, blocksize); 471 memset(bh->b_data, 0, blocksize);
449 branch[n].bh = bh;
450 branch[n].p = (__le32 *) bh->b_data + offsets[n]; 472 branch[n].p = (__le32 *) bh->b_data + offsets[n];
473 branch[n].key = cpu_to_le32(new_blocks[n]);
451 *branch[n].p = branch[n].key; 474 *branch[n].p = branch[n].key;
475 if ( n == indirect_blks) {
476 current_block = new_blocks[n];
477 /*
478 * End of chain, update the last new metablock of
479 * the chain to point to the new allocated
480 * data blocks numbers
481 */
482 for (i=1; i < num; i++)
483 *(branch[n].p + i) = cpu_to_le32(++current_block);
484 }
452 set_buffer_uptodate(bh); 485 set_buffer_uptodate(bh);
453 unlock_buffer(bh); 486 unlock_buffer(bh);
454 mark_buffer_dirty_inode(bh, inode); 487 mark_buffer_dirty_inode(bh, inode);
@@ -458,77 +491,68 @@ static int ext2_alloc_branch(struct inode *inode,
458 */ 491 */
459 if (S_ISDIR(inode->i_mode) && IS_DIRSYNC(inode)) 492 if (S_ISDIR(inode->i_mode) && IS_DIRSYNC(inode))
460 sync_dirty_buffer(bh); 493 sync_dirty_buffer(bh);
461 parent = nr;
462 } 494 }
463 if (n == num) 495 *blks = num;
464 return 0;
465
466 /* Allocation failed, free what we already allocated */
467 for (i = 1; i < n; i++)
468 bforget(branch[i].bh);
469 for (i = 0; i < n; i++)
470 ext2_free_blocks(inode, le32_to_cpu(branch[i].key), 1);
471 return err; 496 return err;
472} 497}
473 498
474/** 499/**
475 * ext2_splice_branch - splice the allocated branch onto inode. 500 * ext2_splice_branch - splice the allocated branch onto inode.
476 * @inode: owner 501 * @inode: owner
477 * @block: (logical) number of block we are adding 502 * @block: (logical) number of block we are adding
478 * @chain: chain of indirect blocks (with a missing link - see 503 * @chain: chain of indirect blocks (with a missing link - see
479 * ext2_alloc_branch) 504 * ext2_alloc_branch)
480 * @where: location of missing link 505 * @where: location of missing link
481 * @num: number of blocks we are adding 506 * @num: number of indirect blocks we are adding
507 * @blks: number of direct blocks we are adding
482 * 508 *
483 * This function verifies that chain (up to the missing link) had not 509 * This function fills the missing link and does all housekeeping needed in
484 * changed, fills the missing link and does all housekeeping needed in 510 * inode (->i_blocks, etc.). In case of success we end up with the full
485 * inode (->i_blocks, etc.). In case of success we end up with the full 511 * chain to new block and return 0.
486 * chain to new block and return 0. Otherwise (== chain had been changed)
487 * we free the new blocks (forgetting their buffer_heads, indeed) and
488 * return -EAGAIN.
489 */ 512 */
490 513static void ext2_splice_branch(struct inode *inode,
491static inline int ext2_splice_branch(struct inode *inode, 514 long block, Indirect *where, int num, int blks)
492 long block,
493 Indirect chain[4],
494 Indirect *where,
495 int num)
496{ 515{
497 struct ext2_inode_info *ei = EXT2_I(inode);
498 int i; 516 int i;
517 struct ext2_block_alloc_info *block_i;
518 ext2_fsblk_t current_block;
499 519
500 /* Verify that place we are splicing to is still there and vacant */ 520 block_i = EXT2_I(inode)->i_block_alloc_info;
501
502 write_lock(&ei->i_meta_lock);
503 if (!verify_chain(chain, where-1) || *where->p)
504 goto changed;
505 521
522 /* XXX LOCKING probably should have i_meta_lock ?*/
506 /* That's it */ 523 /* That's it */
507 524
508 *where->p = where->key; 525 *where->p = where->key;
509 ei->i_next_alloc_block = block;
510 ei->i_next_alloc_goal = le32_to_cpu(where[num-1].key);
511 526
512 write_unlock(&ei->i_meta_lock); 527 /*
528 * Update the host buffer_head or inode to point to more just allocated
529 * direct blocks blocks
530 */
531 if (num == 0 && blks > 1) {
532 current_block = le32_to_cpu(where->key) + 1;
533 for (i = 1; i < blks; i++)
534 *(where->p + i ) = cpu_to_le32(current_block++);
535 }
513 536
514 /* We are done with atomic stuff, now do the rest of housekeeping */ 537 /*
538 * update the most recently allocated logical & physical block
539 * in i_block_alloc_info, to assist find the proper goal block for next
540 * allocation
541 */
542 if (block_i) {
543 block_i->last_alloc_logical_block = block + blks - 1;
544 block_i->last_alloc_physical_block =
545 le32_to_cpu(where[num].key) + blks - 1;
546 }
515 547
516 inode->i_ctime = CURRENT_TIME_SEC; 548 /* We are done with atomic stuff, now do the rest of housekeeping */
517 549
518 /* had we spliced it onto indirect block? */ 550 /* had we spliced it onto indirect block? */
519 if (where->bh) 551 if (where->bh)
520 mark_buffer_dirty_inode(where->bh, inode); 552 mark_buffer_dirty_inode(where->bh, inode);
521 553
554 inode->i_ctime = CURRENT_TIME_SEC;
522 mark_inode_dirty(inode); 555 mark_inode_dirty(inode);
523 return 0;
524
525changed:
526 write_unlock(&ei->i_meta_lock);
527 for (i = 1; i < num; i++)
528 bforget(where[i].bh);
529 for (i = 0; i < num; i++)
530 ext2_free_blocks(inode, le32_to_cpu(where[i].key), 1);
531 return -EAGAIN;
532} 556}
533 557
534/* 558/*
@@ -542,64 +566,99 @@ changed:
542 * That has a nice additional property: no special recovery from the failed 566 * That has a nice additional property: no special recovery from the failed
543 * allocations is needed - we simply release blocks and do not touch anything 567 * allocations is needed - we simply release blocks and do not touch anything
544 * reachable from inode. 568 * reachable from inode.
569 *
570 * `handle' can be NULL if create == 0.
571 *
572 * The BKL may not be held on entry here. Be sure to take it early.
573 * return > 0, # of blocks mapped or allocated.
574 * return = 0, if plain lookup failed.
575 * return < 0, error case.
545 */ 576 */
546 577static int ext2_get_blocks(struct inode *inode,
547int ext2_get_block(struct inode *inode, sector_t iblock, struct buffer_head *bh_result, int create) 578 sector_t iblock, unsigned long maxblocks,
579 struct buffer_head *bh_result,
580 int create)
548{ 581{
549 int err = -EIO; 582 int err = -EIO;
550 int offsets[4]; 583 int offsets[4];
551 Indirect chain[4]; 584 Indirect chain[4];
552 Indirect *partial; 585 Indirect *partial;
553 unsigned long goal; 586 ext2_fsblk_t goal;
554 int left; 587 int indirect_blks;
555 int boundary = 0; 588 int blocks_to_boundary = 0;
556 int depth = ext2_block_to_path(inode, iblock, offsets, &boundary); 589 int depth;
590 struct ext2_inode_info *ei = EXT2_I(inode);
591 int count = 0;
592 ext2_fsblk_t first_block = 0;
557 593
558 if (depth == 0) 594 depth = ext2_block_to_path(inode,iblock,offsets,&blocks_to_boundary);
559 goto out;
560 595
596 if (depth == 0)
597 return (err);
561reread: 598reread:
562 partial = ext2_get_branch(inode, depth, offsets, chain, &err); 599 partial = ext2_get_branch(inode, depth, offsets, chain, &err);
563 600
564 /* Simplest case - block found, no allocation needed */ 601 /* Simplest case - block found, no allocation needed */
565 if (!partial) { 602 if (!partial) {
566got_it: 603 first_block = le32_to_cpu(chain[depth - 1].key);
567 map_bh(bh_result, inode->i_sb, le32_to_cpu(chain[depth-1].key)); 604 clear_buffer_new(bh_result); /* What's this do? */
568 if (boundary) 605 count++;
569 set_buffer_boundary(bh_result); 606 /*map more blocks*/
570 /* Clean up and exit */ 607 while (count < maxblocks && count <= blocks_to_boundary) {
571 partial = chain+depth-1; /* the whole chain */ 608 ext2_fsblk_t blk;
572 goto cleanup; 609
610 if (!verify_chain(chain, partial)) {
611 /*
612 * Indirect block might be removed by
613 * truncate while we were reading it.
614 * Handling of that case: forget what we've
615 * got now, go to reread.
616 */
617 count = 0;
618 goto changed;
619 }
620 blk = le32_to_cpu(*(chain[depth-1].p + count));
621 if (blk == first_block + count)
622 count++;
623 else
624 break;
625 }
626 goto got_it;
573 } 627 }
574 628
575 /* Next simple case - plain lookup or failed read of indirect block */ 629 /* Next simple case - plain lookup or failed read of indirect block */
576 if (!create || err == -EIO) { 630 if (!create || err == -EIO)
577cleanup: 631 goto cleanup;
578 while (partial > chain) { 632
579 brelse(partial->bh); 633 mutex_lock(&ei->truncate_mutex);
580 partial--;
581 }
582out:
583 return err;
584 }
585 634
586 /* 635 /*
587 * Indirect block might be removed by truncate while we were 636 * Okay, we need to do block allocation. Lazily initialize the block
588 * reading it. Handling of that case (forget what we've got and 637 * allocation info here if necessary
589 * reread) is taken out of the main path. 638 */
590 */ 639 if (S_ISREG(inode->i_mode) && (!ei->i_block_alloc_info))
591 if (err == -EAGAIN) 640 ext2_init_block_alloc_info(inode);
592 goto changed;
593 641
594 goal = 0; 642 goal = ext2_find_goal(inode, iblock, chain, partial);
595 if (ext2_find_goal(inode, iblock, chain, partial, &goal) < 0)
596 goto changed;
597 643
598 left = (chain + depth) - partial; 644 /* the number of blocks need to allocate for [d,t]indirect blocks */
599 err = ext2_alloc_branch(inode, left, goal, 645 indirect_blks = (chain + depth) - partial - 1;
600 offsets+(partial-chain), partial); 646 /*
601 if (err) 647 * Next look up the indirect map to count the totoal number of
648 * direct blocks to allocate for this branch.
649 */
650 count = ext2_blks_to_allocate(partial, indirect_blks,
651 maxblocks, blocks_to_boundary);
652 /*
653 * XXX ???? Block out ext2_truncate while we alter the tree
654 */
655 err = ext2_alloc_branch(inode, indirect_blks, &count, goal,
656 offsets + (partial - chain), partial);
657
658 if (err) {
659 mutex_unlock(&ei->truncate_mutex);
602 goto cleanup; 660 goto cleanup;
661 }
603 662
604 if (ext2_use_xip(inode->i_sb)) { 663 if (ext2_use_xip(inode->i_sb)) {
605 /* 664 /*
@@ -607,16 +666,28 @@ out:
607 */ 666 */
608 err = ext2_clear_xip_target (inode, 667 err = ext2_clear_xip_target (inode,
609 le32_to_cpu(chain[depth-1].key)); 668 le32_to_cpu(chain[depth-1].key));
610 if (err) 669 if (err) {
670 mutex_unlock(&ei->truncate_mutex);
611 goto cleanup; 671 goto cleanup;
672 }
612 } 673 }
613 674
614 if (ext2_splice_branch(inode, iblock, chain, partial, left) < 0) 675 ext2_splice_branch(inode, iblock, partial, indirect_blks, count);
615 goto changed; 676 mutex_unlock(&ei->truncate_mutex);
616
617 set_buffer_new(bh_result); 677 set_buffer_new(bh_result);
618 goto got_it; 678got_it:
619 679 map_bh(bh_result, inode->i_sb, le32_to_cpu(chain[depth-1].key));
680 if (count > blocks_to_boundary)
681 set_buffer_boundary(bh_result);
682 err = count;
683 /* Clean up and exit */
684 partial = chain + depth - 1; /* the whole chain */
685cleanup:
686 while (partial > chain) {
687 brelse(partial->bh);
688 partial--;
689 }
690 return err;
620changed: 691changed:
621 while (partial > chain) { 692 while (partial > chain) {
622 brelse(partial->bh); 693 brelse(partial->bh);
@@ -625,6 +696,19 @@ changed:
625 goto reread; 696 goto reread;
626} 697}
627 698
699int ext2_get_block(struct inode *inode, sector_t iblock, struct buffer_head *bh_result, int create)
700{
701 unsigned max_blocks = bh_result->b_size >> inode->i_blkbits;
702 int ret = ext2_get_blocks(inode, iblock, max_blocks,
703 bh_result, create);
704 if (ret > 0) {
705 bh_result->b_size = (ret << inode->i_blkbits);
706 ret = 0;
707 }
708 return ret;
709
710}
711
628static int ext2_writepage(struct page *page, struct writeback_control *wbc) 712static int ext2_writepage(struct page *page, struct writeback_control *wbc)
629{ 713{
630 return block_write_full_page(page, ext2_get_block, wbc); 714 return block_write_full_page(page, ext2_get_block, wbc);
@@ -913,9 +997,10 @@ static void ext2_free_branches(struct inode *inode, __le32 *p, __le32 *q, int de
913 ext2_free_data(inode, p, q); 997 ext2_free_data(inode, p, q);
914} 998}
915 999
916void ext2_truncate (struct inode * inode) 1000void ext2_truncate(struct inode *inode)
917{ 1001{
918 __le32 *i_data = EXT2_I(inode)->i_data; 1002 __le32 *i_data = EXT2_I(inode)->i_data;
1003 struct ext2_inode_info *ei = EXT2_I(inode);
919 int addr_per_block = EXT2_ADDR_PER_BLOCK(inode->i_sb); 1004 int addr_per_block = EXT2_ADDR_PER_BLOCK(inode->i_sb);
920 int offsets[4]; 1005 int offsets[4];
921 Indirect chain[4]; 1006 Indirect chain[4];
@@ -933,8 +1018,6 @@ void ext2_truncate (struct inode * inode)
933 if (IS_APPEND(inode) || IS_IMMUTABLE(inode)) 1018 if (IS_APPEND(inode) || IS_IMMUTABLE(inode))
934 return; 1019 return;
935 1020
936 ext2_discard_prealloc(inode);
937
938 blocksize = inode->i_sb->s_blocksize; 1021 blocksize = inode->i_sb->s_blocksize;
939 iblock = (inode->i_size + blocksize-1) 1022 iblock = (inode->i_size + blocksize-1)
940 >> EXT2_BLOCK_SIZE_BITS(inode->i_sb); 1023 >> EXT2_BLOCK_SIZE_BITS(inode->i_sb);
@@ -952,6 +1035,12 @@ void ext2_truncate (struct inode * inode)
952 if (n == 0) 1035 if (n == 0)
953 return; 1036 return;
954 1037
1038 /*
1039 * From here we block out all ext2_get_block() callers who want to
1040 * modify the block allocation tree.
1041 */
1042 mutex_lock(&ei->truncate_mutex);
1043
955 if (n == 1) { 1044 if (n == 1) {
956 ext2_free_data(inode, i_data+offsets[0], 1045 ext2_free_data(inode, i_data+offsets[0],
957 i_data + EXT2_NDIR_BLOCKS); 1046 i_data + EXT2_NDIR_BLOCKS);
@@ -1004,6 +1093,10 @@ do_indirects:
1004 case EXT2_TIND_BLOCK: 1093 case EXT2_TIND_BLOCK:
1005 ; 1094 ;
1006 } 1095 }
1096
1097 ext2_discard_reservation(inode);
1098
1099 mutex_unlock(&ei->truncate_mutex);
1007 inode->i_mtime = inode->i_ctime = CURRENT_TIME_SEC; 1100 inode->i_mtime = inode->i_ctime = CURRENT_TIME_SEC;
1008 if (inode_needs_sync(inode)) { 1101 if (inode_needs_sync(inode)) {
1009 sync_mapping_buffers(inode->i_mapping); 1102 sync_mapping_buffers(inode->i_mapping);
@@ -1104,6 +1197,8 @@ void ext2_read_inode (struct inode * inode)
1104 ei->i_acl = EXT2_ACL_NOT_CACHED; 1197 ei->i_acl = EXT2_ACL_NOT_CACHED;
1105 ei->i_default_acl = EXT2_ACL_NOT_CACHED; 1198 ei->i_default_acl = EXT2_ACL_NOT_CACHED;
1106#endif 1199#endif
1200 ei->i_block_alloc_info = NULL;
1201
1107 if (IS_ERR(raw_inode)) 1202 if (IS_ERR(raw_inode))
1108 goto bad_inode; 1203 goto bad_inode;
1109 1204
@@ -1145,9 +1240,6 @@ void ext2_read_inode (struct inode * inode)
1145 ei->i_dtime = 0; 1240 ei->i_dtime = 0;
1146 inode->i_generation = le32_to_cpu(raw_inode->i_generation); 1241 inode->i_generation = le32_to_cpu(raw_inode->i_generation);
1147 ei->i_state = 0; 1242 ei->i_state = 0;
1148 ei->i_next_alloc_block = 0;
1149 ei->i_next_alloc_goal = 0;
1150 ei->i_prealloc_count = 0;
1151 ei->i_block_group = (ino - 1) / EXT2_INODES_PER_GROUP(inode->i_sb); 1243 ei->i_block_group = (ino - 1) / EXT2_INODES_PER_GROUP(inode->i_sb);
1152 ei->i_dir_start_lookup = 0; 1244 ei->i_dir_start_lookup = 0;
1153 1245