aboutsummaryrefslogtreecommitdiffstats
path: root/fs/ext3
diff options
context:
space:
mode:
Diffstat (limited to 'fs/ext3')
-rw-r--r--fs/ext3/inode.c270
1 files changed, 193 insertions, 77 deletions
diff --git a/fs/ext3/inode.c b/fs/ext3/inode.c
index fcfb10f77120..34e5b0dc9168 100644
--- a/fs/ext3/inode.c
+++ b/fs/ext3/inode.c
@@ -235,16 +235,6 @@ no_delete:
235 clear_inode(inode); /* We must guarantee clearing of inode... */ 235 clear_inode(inode); /* We must guarantee clearing of inode... */
236} 236}
237 237
238static int ext3_alloc_block (handle_t *handle,
239 struct inode * inode, unsigned long goal, int *err)
240{
241 unsigned long result;
242
243 result = ext3_new_block(handle, inode, goal, err);
244 return result;
245}
246
247
248typedef struct { 238typedef struct {
249 __le32 *p; 239 __le32 *p;
250 __le32 key; 240 __le32 key;
@@ -476,15 +466,115 @@ static unsigned long ext3_find_goal(struct inode *inode, long block,
476 466
477 return ext3_find_near(inode, partial); 467 return ext3_find_near(inode, partial);
478} 468}
469/**
470 * ext3_blks_to_allocate: Look up the block map and count the number
471 * of direct blocks need to be allocated for the given branch.
472 *
473 * @branch: chain of indirect blocks
474 * @k: number of blocks need for indirect blocks
475 * @blks: number of data blocks to be mapped.
476 * @blocks_to_boundary: the offset in the indirect block
477 *
478 * return the total number of blocks to be allocate, including the
479 * direct and indirect blocks.
480 */
481static int
482ext3_blks_to_allocate(Indirect * branch, int k, unsigned long blks,
483 int blocks_to_boundary)
484{
485 unsigned long count = 0;
486
487 /*
488 * Simple case, [t,d]Indirect block(s) has not allocated yet
489 * then it's clear blocks on that path have not allocated
490 */
491 if (k > 0) {
492 /* right now don't hanel cross boundary allocation */
493 if (blks < blocks_to_boundary + 1)
494 count += blks;
495 else
496 count += blocks_to_boundary + 1;
497 return count;
498 }
499
500 count++;
501 while (count < blks && count <= blocks_to_boundary &&
502 le32_to_cpu(*(branch[0].p + count)) == 0) {
503 count++;
504 }
505 return count;
506}
507
508/**
509 * ext3_alloc_blocks: multiple allocate blocks needed for a branch
510 * @indirect_blks: the number of blocks need to allocate for indirect
511 * blocks
512 *
513 * @new_blocks: on return it will store the new block numbers for
514 * the indirect blocks(if needed) and the first direct block,
515 * @blks: on return it will store the total number of allocated
516 * direct blocks
517 */
518static int ext3_alloc_blocks(handle_t *handle, struct inode *inode,
519 unsigned long goal, int indirect_blks, int blks,
520 unsigned long long new_blocks[4], int *err)
521{
522 int target, i;
523 unsigned long count = 0;
524 int index = 0;
525 unsigned long current_block = 0;
526 int ret = 0;
527
528 /*
529 * Here we try to allocate the requested multiple blocks at once,
530 * on a best-effort basis.
531 * To build a branch, we should allocate blocks for
532 * the indirect blocks(if not allocated yet), and at least
533 * the first direct block of this branch. That's the
534 * minimum number of blocks need to allocate(required)
535 */
536 target = blks + indirect_blks;
537
538 while (1) {
539 count = target;
540 /* allocating blocks for indirect blocks and direct blocks */
541 current_block = ext3_new_blocks(handle, inode, goal, &count, err);
542 if (*err)
543 goto failed_out;
544
545 target -= count;
546 /* allocate blocks for indirect blocks */
547 while (index < indirect_blks && count) {
548 new_blocks[index++] = current_block++;
549 count--;
550 }
551
552 if (count > 0)
553 break;
554 }
555
556 /* save the new block number for the first direct block */
557 new_blocks[index] = current_block;
558
559 /* total number of blocks allocated for direct blocks */
560 ret = count;
561 *err = 0;
562 return ret;
563failed_out:
564 for (i = 0; i <index; i++)
565 ext3_free_blocks(handle, inode, new_blocks[i], 1);
566 return ret;
567}
479 568
480/** 569/**
481 * ext3_alloc_branch - allocate and set up a chain of blocks. 570 * ext3_alloc_branch - allocate and set up a chain of blocks.
482 * @inode: owner 571 * @inode: owner
483 * @num: depth of the chain (number of blocks to allocate) 572 * @indirect_blks: number of allocated indirect blocks
573 * @blks: number of allocated direct blocks
484 * @offsets: offsets (in the blocks) to store the pointers to next. 574 * @offsets: offsets (in the blocks) to store the pointers to next.
485 * @branch: place to store the chain in. 575 * @branch: place to store the chain in.
486 * 576 *
487 * This function allocates @num blocks, zeroes out all but the last one, 577 * This function allocates blocks, zeroes out all but the last one,
488 * links them into chain and (if we are synchronous) writes them to disk. 578 * links them into chain and (if we are synchronous) writes them to disk.
489 * In other words, it prepares a branch that can be spliced onto the 579 * In other words, it prepares a branch that can be spliced onto the
490 * inode. It stores the information about that chain in the branch[], in 580 * inode. It stores the information about that chain in the branch[], in
@@ -503,71 +593,79 @@ static unsigned long ext3_find_goal(struct inode *inode, long block,
503 */ 593 */
504 594
505static int ext3_alloc_branch(handle_t *handle, struct inode *inode, 595static int ext3_alloc_branch(handle_t *handle, struct inode *inode,
506 int num, 596 int indirect_blks, int *blks, unsigned long goal,
507 unsigned long goal, 597 int *offsets, Indirect *branch)
508 int *offsets,
509 Indirect *branch)
510{ 598{
511 int blocksize = inode->i_sb->s_blocksize; 599 int blocksize = inode->i_sb->s_blocksize;
512 int n = 0, keys = 0; 600 int i, n = 0;
513 int err = 0; 601 int err = 0;
514 int i; 602 struct buffer_head *bh;
515 int parent = ext3_alloc_block(handle, inode, goal, &err); 603 int num;
516 604 unsigned long long new_blocks[4];
517 branch[0].key = cpu_to_le32(parent); 605 unsigned long long current_block;
518 if (parent) {
519 for (n = 1; n < num; n++) {
520 struct buffer_head *bh;
521 /* Allocate the next block */
522 int nr = ext3_alloc_block(handle, inode, parent, &err);
523 if (!nr)
524 break;
525 branch[n].key = cpu_to_le32(nr);
526 606
527 /* 607 num = ext3_alloc_blocks(handle, inode, goal, indirect_blks,
528 * Get buffer_head for parent block, zero it out 608 *blks, new_blocks, &err);
529 * and set the pointer to new one, then send 609 if (err)
530 * parent to disk. 610 return err;
531 */
532 bh = sb_getblk(inode->i_sb, parent);
533 if (!bh)
534 break;
535 keys = n+1;
536 branch[n].bh = bh;
537 lock_buffer(bh);
538 BUFFER_TRACE(bh, "call get_create_access");
539 err = ext3_journal_get_create_access(handle, bh);
540 if (err) {
541 unlock_buffer(bh);
542 brelse(bh);
543 break;
544 }
545 611
546 memset(bh->b_data, 0, blocksize); 612 branch[0].key = cpu_to_le32(new_blocks[0]);
547 branch[n].p = (__le32*) bh->b_data + offsets[n]; 613 /*
548 *branch[n].p = branch[n].key; 614 * metadata blocks and data blocks are allocated.
549 BUFFER_TRACE(bh, "marking uptodate"); 615 */
550 set_buffer_uptodate(bh); 616 for (n = 1; n <= indirect_blks; n++) {
617 /*
618 * Get buffer_head for parent block, zero it out
619 * and set the pointer to new one, then send
620 * parent to disk.
621 */
622 bh = sb_getblk(inode->i_sb, new_blocks[n-1]);
623 branch[n].bh = bh;
624 lock_buffer(bh);
625 BUFFER_TRACE(bh, "call get_create_access");
626 err = ext3_journal_get_create_access(handle, bh);
627 if (err) {
551 unlock_buffer(bh); 628 unlock_buffer(bh);
629 brelse(bh);
630 goto failed;
631 }
552 632
553 BUFFER_TRACE(bh, "call ext3_journal_dirty_metadata"); 633 memset(bh->b_data, 0, blocksize);
554 err = ext3_journal_dirty_metadata(handle, bh); 634 branch[n].p = (__le32 *) bh->b_data + offsets[n];
555 if (err) 635 branch[n].key = cpu_to_le32(new_blocks[n]);
556 break; 636 *branch[n].p = branch[n].key;
557 637 if ( n == indirect_blks) {
558 parent = nr; 638 current_block = new_blocks[n];
639 /*
640 * End of chain, update the last new metablock of
641 * the chain to point to the new allocated
642 * data blocks numbers
643 */
644 for (i=1; i < num; i++)
645 *(branch[n].p + i) = cpu_to_le32(++current_block);
559 } 646 }
560 } 647 BUFFER_TRACE(bh, "marking uptodate");
561 if (n == num) 648 set_buffer_uptodate(bh);
562 return 0; 649 unlock_buffer(bh);
563 650
651 BUFFER_TRACE(bh, "call ext3_journal_dirty_metadata");
652 err = ext3_journal_dirty_metadata(handle, bh);
653 if (err)
654 goto failed;
655 }
656 *blks = num;
657 return err;
658failed:
564 /* Allocation failed, free what we already allocated */ 659 /* Allocation failed, free what we already allocated */
565 for (i = 1; i < keys; i++) { 660 for (i = 1; i <= n ; i++) {
566 BUFFER_TRACE(branch[i].bh, "call journal_forget"); 661 BUFFER_TRACE(branch[i].bh, "call journal_forget");
567 ext3_journal_forget(handle, branch[i].bh); 662 ext3_journal_forget(handle, branch[i].bh);
568 } 663 }
569 for (i = 0; i < keys; i++) 664 for (i = 0; i <indirect_blks; i++)
570 ext3_free_blocks(handle, inode, le32_to_cpu(branch[i].key), 1); 665 ext3_free_blocks(handle, inode, new_blocks[i], 1);
666
667 ext3_free_blocks(handle, inode, new_blocks[i], num);
668
571 return err; 669 return err;
572} 670}
573 671
@@ -578,7 +676,8 @@ static int ext3_alloc_branch(handle_t *handle, struct inode *inode,
578 * @chain: chain of indirect blocks (with a missing link - see 676 * @chain: chain of indirect blocks (with a missing link - see
579 * ext3_alloc_branch) 677 * ext3_alloc_branch)
580 * @where: location of missing link 678 * @where: location of missing link
581 * @num: number of blocks we are adding 679 * @num: number of indirect blocks we are adding
680 * @blks: number of direct blocks we are adding
582 * 681 *
583 * This function fills the missing link and does all housekeeping needed in 682 * This function fills the missing link and does all housekeeping needed in
584 * inode (->i_blocks, etc.). In case of success we end up with the full 683 * inode (->i_blocks, etc.). In case of success we end up with the full
@@ -586,12 +685,12 @@ static int ext3_alloc_branch(handle_t *handle, struct inode *inode,
586 */ 685 */
587 686
588static int ext3_splice_branch(handle_t *handle, struct inode *inode, long block, 687static int ext3_splice_branch(handle_t *handle, struct inode *inode, long block,
589 Indirect chain[4], Indirect *where, int num) 688 Indirect *where, int num, int blks)
590{ 689{
591 int i; 690 int i;
592 int err = 0; 691 int err = 0;
593 struct ext3_block_alloc_info *block_i = EXT3_I(inode)->i_block_alloc_info; 692 struct ext3_block_alloc_info *block_i = EXT3_I(inode)->i_block_alloc_info;
594 693 unsigned long current_block;
595 /* 694 /*
596 * If we're splicing into a [td]indirect block (as opposed to the 695 * If we're splicing into a [td]indirect block (as opposed to the
597 * inode) then we need to get write access to the [td]indirect block 696 * inode) then we need to get write access to the [td]indirect block
@@ -606,6 +705,13 @@ static int ext3_splice_branch(handle_t *handle, struct inode *inode, long block,
606 /* That's it */ 705 /* That's it */
607 706
608 *where->p = where->key; 707 *where->p = where->key;
708 /* update host bufferhead or inode to point to
709 * more just allocated direct blocks blocks */
710 if (num == 0 && blks > 1) {
711 current_block = le32_to_cpu(where->key + 1);
712 for (i = 1; i < blks; i++)
713 *(where->p + i ) = cpu_to_le32(current_block++);
714 }
609 715
610 /* 716 /*
611 * update the most recently allocated logical & physical block 717 * update the most recently allocated logical & physical block
@@ -613,8 +719,8 @@ static int ext3_splice_branch(handle_t *handle, struct inode *inode, long block,
613 * allocation 719 * allocation
614 */ 720 */
615 if (block_i) { 721 if (block_i) {
616 block_i->last_alloc_logical_block = block; 722 block_i->last_alloc_logical_block = block + blks - 1;
617 block_i->last_alloc_physical_block = le32_to_cpu(where[num-1].key); 723 block_i->last_alloc_physical_block = le32_to_cpu(where[num].key + blks - 1);
618 } 724 }
619 725
620 /* We are done with atomic stuff, now do the rest of housekeeping */ 726 /* We are done with atomic stuff, now do the rest of housekeeping */
@@ -647,10 +753,13 @@ static int ext3_splice_branch(handle_t *handle, struct inode *inode, long block,
647 return err; 753 return err;
648 754
649err_out: 755err_out:
650 for (i = 1; i < num; i++) { 756 for (i = 1; i <= num; i++) {
651 BUFFER_TRACE(where[i].bh, "call journal_forget"); 757 BUFFER_TRACE(where[i].bh, "call journal_forget");
652 ext3_journal_forget(handle, where[i].bh); 758 ext3_journal_forget(handle, where[i].bh);
759 ext3_free_blocks(handle, inode, le32_to_cpu(where[i-1].key), 1);
653 } 760 }
761 ext3_free_blocks(handle, inode, le32_to_cpu(where[num].key), blks);
762
654 return err; 763 return err;
655} 764}
656 765
@@ -684,7 +793,7 @@ ext3_get_blocks_handle(handle_t *handle, struct inode *inode, sector_t iblock,
684 Indirect chain[4]; 793 Indirect chain[4];
685 Indirect *partial; 794 Indirect *partial;
686 unsigned long goal; 795 unsigned long goal;
687 int left; 796 int indirect_blks;
688 int blocks_to_boundary = 0; 797 int blocks_to_boundary = 0;
689 int depth; 798 int depth;
690 struct ext3_inode_info *ei = EXT3_I(inode); 799 struct ext3_inode_info *ei = EXT3_I(inode);
@@ -772,12 +881,19 @@ ext3_get_blocks_handle(handle_t *handle, struct inode *inode, sector_t iblock,
772 881
773 goal = ext3_find_goal(inode, iblock, chain, partial); 882 goal = ext3_find_goal(inode, iblock, chain, partial);
774 883
775 left = (chain + depth) - partial; 884 /* the number of blocks need to allocate for [d,t]indirect blocks */
885 indirect_blks = (chain + depth) - partial - 1;
776 886
777 /* 887 /*
888 * Next look up the indirect map to count the totoal number of
889 * direct blocks to allocate for this branch.
890 */
891 count = ext3_blks_to_allocate(partial, indirect_blks,
892 maxblocks, blocks_to_boundary);
893 /*
778 * Block out ext3_truncate while we alter the tree 894 * Block out ext3_truncate while we alter the tree
779 */ 895 */
780 err = ext3_alloc_branch(handle, inode, left, goal, 896 err = ext3_alloc_branch(handle, inode, indirect_blks, &count, goal,
781 offsets + (partial - chain), partial); 897 offsets + (partial - chain), partial);
782 898
783 /* 899 /*
@@ -788,8 +904,8 @@ ext3_get_blocks_handle(handle_t *handle, struct inode *inode, sector_t iblock,
788 * may need to return -EAGAIN upwards in the worst case. --sct 904 * may need to return -EAGAIN upwards in the worst case. --sct
789 */ 905 */
790 if (!err) 906 if (!err)
791 err = ext3_splice_branch(handle, inode, iblock, chain, 907 err = ext3_splice_branch(handle, inode, iblock,
792 partial, left); 908 partial, indirect_blks, count);
793 /* 909 /*
794 * i_disksize growing is protected by truncate_mutex. Don't forget to 910 * i_disksize growing is protected by truncate_mutex. Don't forget to
795 * protect it if you're about to implement concurrent 911 * protect it if you're about to implement concurrent
@@ -824,8 +940,8 @@ out:
824 940
825static int 941static int
826ext3_direct_io_get_blocks(struct inode *inode, sector_t iblock, 942ext3_direct_io_get_blocks(struct inode *inode, sector_t iblock,
827 unsigned long max_blocks, struct buffer_head *bh_result, 943 unsigned long max_blocks,
828 int create) 944 struct buffer_head *bh_result, int create)
829{ 945{
830 handle_t *handle = journal_current_handle(); 946 handle_t *handle = journal_current_handle();
831 int ret = 0; 947 int ret = 0;