diff options
Diffstat (limited to 'fs')
-rw-r--r-- | fs/ext3/inode.c | 239 |
1 files changed, 114 insertions, 125 deletions
diff --git a/fs/ext3/inode.c b/fs/ext3/inode.c index 0cd126176bbb..e68587a7f366 100644 --- a/fs/ext3/inode.c +++ b/fs/ext3/inode.c | |||
@@ -44,16 +44,16 @@ static int ext3_writepage_trans_blocks(struct inode *inode); | |||
44 | /* | 44 | /* |
45 | * Test whether an inode is a fast symlink. | 45 | * Test whether an inode is a fast symlink. |
46 | */ | 46 | */ |
47 | static inline int ext3_inode_is_fast_symlink(struct inode *inode) | 47 | static int ext3_inode_is_fast_symlink(struct inode *inode) |
48 | { | 48 | { |
49 | int ea_blocks = EXT3_I(inode)->i_file_acl ? | 49 | int ea_blocks = EXT3_I(inode)->i_file_acl ? |
50 | (inode->i_sb->s_blocksize >> 9) : 0; | 50 | (inode->i_sb->s_blocksize >> 9) : 0; |
51 | 51 | ||
52 | return (S_ISLNK(inode->i_mode) && | 52 | return (S_ISLNK(inode->i_mode) && inode->i_blocks - ea_blocks == 0); |
53 | inode->i_blocks - ea_blocks == 0); | ||
54 | } | 53 | } |
55 | 54 | ||
56 | /* The ext3 forget function must perform a revoke if we are freeing data | 55 | /* |
56 | * The ext3 forget function must perform a revoke if we are freeing data | ||
57 | * which has been journaled. Metadata (eg. indirect blocks) must be | 57 | * which has been journaled. Metadata (eg. indirect blocks) must be |
58 | * revoked in all cases. | 58 | * revoked in all cases. |
59 | * | 59 | * |
@@ -61,10 +61,8 @@ static inline int ext3_inode_is_fast_symlink(struct inode *inode) | |||
61 | * but there may still be a record of it in the journal, and that record | 61 | * but there may still be a record of it in the journal, and that record |
62 | * still needs to be revoked. | 62 | * still needs to be revoked. |
63 | */ | 63 | */ |
64 | 64 | int ext3_forget(handle_t *handle, int is_metadata, struct inode *inode, | |
65 | int ext3_forget(handle_t *handle, int is_metadata, | 65 | struct buffer_head *bh, int blocknr) |
66 | struct inode *inode, struct buffer_head *bh, | ||
67 | int blocknr) | ||
68 | { | 66 | { |
69 | int err; | 67 | int err; |
70 | 68 | ||
@@ -104,10 +102,9 @@ int ext3_forget(handle_t *handle, int is_metadata, | |||
104 | } | 102 | } |
105 | 103 | ||
106 | /* | 104 | /* |
107 | * Work out how many blocks we need to progress with the next chunk of a | 105 | * Work out how many blocks we need to proceed with the next chunk of a |
108 | * truncate transaction. | 106 | * truncate transaction. |
109 | */ | 107 | */ |
110 | |||
111 | static unsigned long blocks_for_truncate(struct inode *inode) | 108 | static unsigned long blocks_for_truncate(struct inode *inode) |
112 | { | 109 | { |
113 | unsigned long needed; | 110 | unsigned long needed; |
@@ -141,7 +138,6 @@ static unsigned long blocks_for_truncate(struct inode *inode) | |||
141 | * extend fails, we need to propagate the failure up and restart the | 138 | * extend fails, we need to propagate the failure up and restart the |
142 | * transaction in the top-level truncate loop. --sct | 139 | * transaction in the top-level truncate loop. --sct |
143 | */ | 140 | */ |
144 | |||
145 | static handle_t *start_transaction(struct inode *inode) | 141 | static handle_t *start_transaction(struct inode *inode) |
146 | { | 142 | { |
147 | handle_t *result; | 143 | handle_t *result; |
@@ -194,9 +190,11 @@ void ext3_delete_inode (struct inode * inode) | |||
194 | 190 | ||
195 | handle = start_transaction(inode); | 191 | handle = start_transaction(inode); |
196 | if (IS_ERR(handle)) { | 192 | if (IS_ERR(handle)) { |
197 | /* If we're going to skip the normal cleanup, we still | 193 | /* |
198 | * need to make sure that the in-core orphan linked list | 194 | * If we're going to skip the normal cleanup, we still need to |
199 | * is properly cleaned up. */ | 195 | * make sure that the in-core orphan linked list is properly |
196 | * cleaned up. | ||
197 | */ | ||
200 | ext3_orphan_del(NULL, inode); | 198 | ext3_orphan_del(NULL, inode); |
201 | goto no_delete; | 199 | goto no_delete; |
202 | } | 200 | } |
@@ -247,7 +245,7 @@ static inline void add_chain(Indirect *p, struct buffer_head *bh, __le32 *v) | |||
247 | p->bh = bh; | 245 | p->bh = bh; |
248 | } | 246 | } |
249 | 247 | ||
250 | static inline int verify_chain(Indirect *from, Indirect *to) | 248 | static int verify_chain(Indirect *from, Indirect *to) |
251 | { | 249 | { |
252 | while (from <= to && from->key == *from->p) | 250 | while (from <= to && from->key == *from->p) |
253 | from++; | 251 | from++; |
@@ -317,7 +315,7 @@ static int ext3_block_to_path(struct inode *inode, | |||
317 | offsets[n++] = i_block & (ptrs - 1); | 315 | offsets[n++] = i_block & (ptrs - 1); |
318 | final = ptrs; | 316 | final = ptrs; |
319 | } else { | 317 | } else { |
320 | ext3_warning (inode->i_sb, "ext3_block_to_path", "block > big"); | 318 | ext3_warning(inode->i_sb, "ext3_block_to_path", "block > big"); |
321 | } | 319 | } |
322 | if (boundary) | 320 | if (boundary) |
323 | *boundary = final - 1 - (i_block & (ptrs - 1)); | 321 | *boundary = final - 1 - (i_block & (ptrs - 1)); |
@@ -409,7 +407,6 @@ no_block: | |||
409 | * | 407 | * |
410 | * Caller must make sure that @ind is valid and will stay that way. | 408 | * Caller must make sure that @ind is valid and will stay that way. |
411 | */ | 409 | */ |
412 | |||
413 | static unsigned long ext3_find_near(struct inode *inode, Indirect *ind) | 410 | static unsigned long ext3_find_near(struct inode *inode, Indirect *ind) |
414 | { | 411 | { |
415 | struct ext3_inode_info *ei = EXT3_I(inode); | 412 | struct ext3_inode_info *ei = EXT3_I(inode); |
@@ -419,17 +416,18 @@ static unsigned long ext3_find_near(struct inode *inode, Indirect *ind) | |||
419 | unsigned long colour; | 416 | unsigned long colour; |
420 | 417 | ||
421 | /* Try to find previous block */ | 418 | /* Try to find previous block */ |
422 | for (p = ind->p - 1; p >= start; p--) | 419 | for (p = ind->p - 1; p >= start; p--) { |
423 | if (*p) | 420 | if (*p) |
424 | return le32_to_cpu(*p); | 421 | return le32_to_cpu(*p); |
422 | } | ||
425 | 423 | ||
426 | /* No such thing, so let's try location of indirect block */ | 424 | /* No such thing, so let's try location of indirect block */ |
427 | if (ind->bh) | 425 | if (ind->bh) |
428 | return ind->bh->b_blocknr; | 426 | return ind->bh->b_blocknr; |
429 | 427 | ||
430 | /* | 428 | /* |
431 | * It is going to be refered from inode itself? OK, just put it into | 429 | * It is going to be referred to from the inode itself? OK, just put it |
432 | * the same cylinder group then. | 430 | * into the same cylinder group then. |
433 | */ | 431 | */ |
434 | bg_start = (ei->i_block_group * EXT3_BLOCKS_PER_GROUP(inode->i_sb)) + | 432 | bg_start = (ei->i_block_group * EXT3_BLOCKS_PER_GROUP(inode->i_sb)) + |
435 | le32_to_cpu(EXT3_SB(inode->i_sb)->s_es->s_first_data_block); | 433 | le32_to_cpu(EXT3_SB(inode->i_sb)->s_es->s_first_data_block); |
@@ -453,7 +451,9 @@ static unsigned long ext3_find_near(struct inode *inode, Indirect *ind) | |||
453 | static unsigned long ext3_find_goal(struct inode *inode, long block, | 451 | static unsigned long ext3_find_goal(struct inode *inode, long block, |
454 | Indirect chain[4], Indirect *partial) | 452 | Indirect chain[4], Indirect *partial) |
455 | { | 453 | { |
456 | struct ext3_block_alloc_info *block_i = EXT3_I(inode)->i_block_alloc_info; | 454 | struct ext3_block_alloc_info *block_i; |
455 | |||
456 | block_i = EXT3_I(inode)->i_block_alloc_info; | ||
457 | 457 | ||
458 | /* | 458 | /* |
459 | * try the heuristic for sequential allocation, | 459 | * try the heuristic for sequential allocation, |
@@ -466,6 +466,7 @@ static unsigned long ext3_find_goal(struct inode *inode, long block, | |||
466 | 466 | ||
467 | return ext3_find_near(inode, partial); | 467 | return ext3_find_near(inode, partial); |
468 | } | 468 | } |
469 | |||
469 | /** | 470 | /** |
470 | * ext3_blks_to_allocate: Look up the block map and count the number | 471 | * ext3_blks_to_allocate: Look up the block map and count the number |
471 | * of direct blocks need to be allocated for the given branch. | 472 | * of direct blocks need to be allocated for the given branch. |
@@ -478,8 +479,7 @@ static unsigned long ext3_find_goal(struct inode *inode, long block, | |||
478 | * return the total number of blocks to be allocate, including the | 479 | * return the total number of blocks to be allocate, including the |
479 | * direct and indirect blocks. | 480 | * direct and indirect blocks. |
480 | */ | 481 | */ |
481 | static int | 482 | static int ext3_blks_to_allocate(Indirect *branch, int k, unsigned long blks, |
482 | ext3_blks_to_allocate(Indirect * branch, int k, unsigned long blks, | ||
483 | int blocks_to_boundary) | 483 | int blocks_to_boundary) |
484 | { | 484 | { |
485 | unsigned long count = 0; | 485 | unsigned long count = 0; |
@@ -489,7 +489,7 @@ ext3_blks_to_allocate(Indirect * branch, int k, unsigned long blks, | |||
489 | * then it's clear blocks on that path have not allocated | 489 | * then it's clear blocks on that path have not allocated |
490 | */ | 490 | */ |
491 | if (k > 0) { | 491 | if (k > 0) { |
492 | /* right now don't hanel cross boundary allocation */ | 492 | /* right now we don't handle cross boundary allocation */ |
493 | if (blks < blocks_to_boundary + 1) | 493 | if (blks < blocks_to_boundary + 1) |
494 | count += blks; | 494 | count += blks; |
495 | else | 495 | else |
@@ -538,7 +538,7 @@ static int ext3_alloc_blocks(handle_t *handle, struct inode *inode, | |||
538 | while (1) { | 538 | while (1) { |
539 | count = target; | 539 | count = target; |
540 | /* allocating blocks for indirect blocks and direct blocks */ | 540 | /* allocating blocks for indirect blocks and direct blocks */ |
541 | current_block = ext3_new_blocks(handle, inode, goal, &count, err); | 541 | current_block = ext3_new_blocks(handle,inode,goal,&count,err); |
542 | if (*err) | 542 | if (*err) |
543 | goto failed_out; | 543 | goto failed_out; |
544 | 544 | ||
@@ -591,7 +591,6 @@ failed_out: | |||
591 | * ext3_alloc_block() (normally -ENOSPC). Otherwise we set the chain | 591 | * ext3_alloc_block() (normally -ENOSPC). Otherwise we set the chain |
592 | * as described above and return 0. | 592 | * as described above and return 0. |
593 | */ | 593 | */ |
594 | |||
595 | static int ext3_alloc_branch(handle_t *handle, struct inode *inode, | 594 | static int ext3_alloc_branch(handle_t *handle, struct inode *inode, |
596 | int indirect_blks, int *blks, unsigned long goal, | 595 | int indirect_blks, int *blks, unsigned long goal, |
597 | int *offsets, Indirect *branch) | 596 | int *offsets, Indirect *branch) |
@@ -670,27 +669,28 @@ failed: | |||
670 | } | 669 | } |
671 | 670 | ||
672 | /** | 671 | /** |
673 | * ext3_splice_branch - splice the allocated branch onto inode. | 672 | * ext3_splice_branch - splice the allocated branch onto inode. |
674 | * @inode: owner | 673 | * @inode: owner |
675 | * @block: (logical) number of block we are adding | 674 | * @block: (logical) number of block we are adding |
676 | * @chain: chain of indirect blocks (with a missing link - see | 675 | * @chain: chain of indirect blocks (with a missing link - see |
677 | * ext3_alloc_branch) | 676 | * ext3_alloc_branch) |
678 | * @where: location of missing link | 677 | * @where: location of missing link |
679 | * @num: number of indirect blocks we are adding | 678 | * @num: number of indirect blocks we are adding |
680 | * @blks: number of direct blocks we are adding | 679 | * @blks: number of direct blocks we are adding |
681 | * | 680 | * |
682 | * This function fills the missing link and does all housekeeping needed in | 681 | * This function fills the missing link and does all housekeeping needed in |
683 | * inode (->i_blocks, etc.). In case of success we end up with the full | 682 | * inode (->i_blocks, etc.). In case of success we end up with the full |
684 | * chain to new block and return 0. | 683 | * chain to new block and return 0. |
685 | */ | 684 | */ |
686 | 685 | static int ext3_splice_branch(handle_t *handle, struct inode *inode, | |
687 | static int ext3_splice_branch(handle_t *handle, struct inode *inode, long block, | 686 | long block, Indirect *where, int num, int blks) |
688 | Indirect *where, int num, int blks) | ||
689 | { | 687 | { |
690 | int i; | 688 | int i; |
691 | int err = 0; | 689 | int err = 0; |
692 | struct ext3_block_alloc_info *block_i = EXT3_I(inode)->i_block_alloc_info; | 690 | struct ext3_block_alloc_info *block_i; |
693 | unsigned long current_block; | 691 | unsigned long current_block; |
692 | |||
693 | block_i = EXT3_I(inode)->i_block_alloc_info; | ||
694 | /* | 694 | /* |
695 | * If we're splicing into a [td]indirect block (as opposed to the | 695 | * If we're splicing into a [td]indirect block (as opposed to the |
696 | * inode) then we need to get write access to the [td]indirect block | 696 | * inode) then we need to get write access to the [td]indirect block |
@@ -705,8 +705,11 @@ static int ext3_splice_branch(handle_t *handle, struct inode *inode, long block, | |||
705 | /* That's it */ | 705 | /* That's it */ |
706 | 706 | ||
707 | *where->p = where->key; | 707 | *where->p = where->key; |
708 | /* update host bufferhead or inode to point to | 708 | |
709 | * more just allocated direct blocks blocks */ | 709 | /* |
710 | * Update the host buffer_head or inode to point to more just allocated | ||
711 | * direct blocks blocks | ||
712 | */ | ||
710 | if (num == 0 && blks > 1) { | 713 | if (num == 0 && blks > 1) { |
711 | current_block = le32_to_cpu(where->key + 1); | 714 | current_block = le32_to_cpu(where->key + 1); |
712 | for (i = 1; i < blks; i++) | 715 | for (i = 1; i < blks; i++) |
@@ -720,7 +723,8 @@ static int ext3_splice_branch(handle_t *handle, struct inode *inode, long block, | |||
720 | */ | 723 | */ |
721 | if (block_i) { | 724 | if (block_i) { |
722 | block_i->last_alloc_logical_block = block + blks - 1; | 725 | block_i->last_alloc_logical_block = block + blks - 1; |
723 | block_i->last_alloc_physical_block = le32_to_cpu(where[num].key + blks - 1); | 726 | block_i->last_alloc_physical_block = |
727 | le32_to_cpu(where[num].key + blks - 1); | ||
724 | } | 728 | } |
725 | 729 | ||
726 | /* We are done with atomic stuff, now do the rest of housekeeping */ | 730 | /* We are done with atomic stuff, now do the rest of housekeeping */ |
@@ -731,7 +735,7 @@ static int ext3_splice_branch(handle_t *handle, struct inode *inode, long block, | |||
731 | /* had we spliced it onto indirect block? */ | 735 | /* had we spliced it onto indirect block? */ |
732 | if (where->bh) { | 736 | if (where->bh) { |
733 | /* | 737 | /* |
734 | * akpm: If we spliced it onto an indirect block, we haven't | 738 | * If we spliced it onto an indirect block, we haven't |
735 | * altered the inode. Note however that if it is being spliced | 739 | * altered the inode. Note however that if it is being spliced |
736 | * onto an indirect block at the very end of the file (the | 740 | * onto an indirect block at the very end of the file (the |
737 | * file is growing) then we *will* alter the inode to reflect | 741 | * file is growing) then we *will* alter the inode to reflect |
@@ -756,7 +760,7 @@ err_out: | |||
756 | for (i = 1; i <= num; i++) { | 760 | for (i = 1; i <= num; i++) { |
757 | BUFFER_TRACE(where[i].bh, "call journal_forget"); | 761 | BUFFER_TRACE(where[i].bh, "call journal_forget"); |
758 | ext3_journal_forget(handle, where[i].bh); | 762 | ext3_journal_forget(handle, where[i].bh); |
759 | ext3_free_blocks(handle, inode, le32_to_cpu(where[i-1].key), 1); | 763 | ext3_free_blocks(handle,inode,le32_to_cpu(where[i-1].key),1); |
760 | } | 764 | } |
761 | ext3_free_blocks(handle, inode, le32_to_cpu(where[num].key), blks); | 765 | ext3_free_blocks(handle, inode, le32_to_cpu(where[num].key), blks); |
762 | 766 | ||
@@ -775,17 +779,16 @@ err_out: | |||
775 | * allocations is needed - we simply release blocks and do not touch anything | 779 | * allocations is needed - we simply release blocks and do not touch anything |
776 | * reachable from inode. | 780 | * reachable from inode. |
777 | * | 781 | * |
778 | * akpm: `handle' can be NULL if create == 0. | 782 | * `handle' can be NULL if create == 0. |
779 | * | 783 | * |
780 | * The BKL may not be held on entry here. Be sure to take it early. | 784 | * The BKL may not be held on entry here. Be sure to take it early. |
781 | * return > 0, # of blocks mapped or allocated. | 785 | * return > 0, # of blocks mapped or allocated. |
782 | * return = 0, if plain lookup failed. | 786 | * return = 0, if plain lookup failed. |
783 | * return < 0, error case. | 787 | * return < 0, error case. |
784 | */ | 788 | */ |
785 | 789 | int ext3_get_blocks_handle(handle_t *handle, struct inode *inode, | |
786 | int | 790 | sector_t iblock, unsigned long maxblocks, |
787 | ext3_get_blocks_handle(handle_t *handle, struct inode *inode, sector_t iblock, | 791 | struct buffer_head *bh_result, |
788 | unsigned long maxblocks, struct buffer_head *bh_result, | ||
789 | int create, int extend_disksize) | 792 | int create, int extend_disksize) |
790 | { | 793 | { |
791 | int err = -EIO; | 794 | int err = -EIO; |
@@ -802,7 +805,7 @@ ext3_get_blocks_handle(handle_t *handle, struct inode *inode, sector_t iblock, | |||
802 | 805 | ||
803 | 806 | ||
804 | J_ASSERT(handle != NULL || create == 0); | 807 | J_ASSERT(handle != NULL || create == 0); |
805 | depth = ext3_block_to_path(inode, iblock, offsets, &blocks_to_boundary); | 808 | depth = ext3_block_to_path(inode,iblock,offsets,&blocks_to_boundary); |
806 | 809 | ||
807 | if (depth == 0) | 810 | if (depth == 0) |
808 | goto out; | 811 | goto out; |
@@ -998,8 +1001,8 @@ static int ext3_get_block(struct inode *inode, sector_t iblock, | |||
998 | /* | 1001 | /* |
999 | * `handle' can be NULL if create is zero | 1002 | * `handle' can be NULL if create is zero |
1000 | */ | 1003 | */ |
1001 | struct buffer_head *ext3_getblk(handle_t *handle, struct inode * inode, | 1004 | struct buffer_head *ext3_getblk(handle_t *handle, struct inode *inode, |
1002 | long block, int create, int * errp) | 1005 | long block, int create, int *errp) |
1003 | { | 1006 | { |
1004 | struct buffer_head dummy; | 1007 | struct buffer_head dummy; |
1005 | int fatal = 0, err; | 1008 | int fatal = 0, err; |
@@ -1029,17 +1032,18 @@ struct buffer_head *ext3_getblk(handle_t *handle, struct inode * inode, | |||
1029 | J_ASSERT(create != 0); | 1032 | J_ASSERT(create != 0); |
1030 | J_ASSERT(handle != 0); | 1033 | J_ASSERT(handle != 0); |
1031 | 1034 | ||
1032 | /* Now that we do not always journal data, we | 1035 | /* |
1033 | should keep in mind whether this should | 1036 | * Now that we do not always journal data, we should |
1034 | always journal the new buffer as metadata. | 1037 | * keep in mind whether this should always journal the |
1035 | For now, regular file writes use | 1038 | * new buffer as metadata. For now, regular file |
1036 | ext3_get_block instead, so it's not a | 1039 | * writes use ext3_get_block instead, so it's not a |
1037 | problem. */ | 1040 | * problem. |
1041 | */ | ||
1038 | lock_buffer(bh); | 1042 | lock_buffer(bh); |
1039 | BUFFER_TRACE(bh, "call get_create_access"); | 1043 | BUFFER_TRACE(bh, "call get_create_access"); |
1040 | fatal = ext3_journal_get_create_access(handle, bh); | 1044 | fatal = ext3_journal_get_create_access(handle, bh); |
1041 | if (!fatal && !buffer_uptodate(bh)) { | 1045 | if (!fatal && !buffer_uptodate(bh)) { |
1042 | memset(bh->b_data, 0, inode->i_sb->s_blocksize); | 1046 | memset(bh->b_data,0,inode->i_sb->s_blocksize); |
1043 | set_buffer_uptodate(bh); | 1047 | set_buffer_uptodate(bh); |
1044 | } | 1048 | } |
1045 | unlock_buffer(bh); | 1049 | unlock_buffer(bh); |
@@ -1061,7 +1065,7 @@ err: | |||
1061 | return NULL; | 1065 | return NULL; |
1062 | } | 1066 | } |
1063 | 1067 | ||
1064 | struct buffer_head *ext3_bread(handle_t *handle, struct inode * inode, | 1068 | struct buffer_head *ext3_bread(handle_t *handle, struct inode *inode, |
1065 | int block, int create, int *err) | 1069 | int block, int create, int *err) |
1066 | { | 1070 | { |
1067 | struct buffer_head * bh; | 1071 | struct buffer_head * bh; |
@@ -1137,9 +1141,8 @@ static int walk_page_buffers( handle_t *handle, | |||
1137 | * is elevated. We'll still have enough credits for the tiny quotafile | 1141 | * is elevated. We'll still have enough credits for the tiny quotafile |
1138 | * write. | 1142 | * write. |
1139 | */ | 1143 | */ |
1140 | 1144 | static int do_journal_get_write_access(handle_t *handle, | |
1141 | static int do_journal_get_write_access(handle_t *handle, | 1145 | struct buffer_head *bh) |
1142 | struct buffer_head *bh) | ||
1143 | { | 1146 | { |
1144 | if (!buffer_mapped(bh) || buffer_freed(bh)) | 1147 | if (!buffer_mapped(bh) || buffer_freed(bh)) |
1145 | return 0; | 1148 | return 0; |
@@ -1180,8 +1183,7 @@ out: | |||
1180 | return ret; | 1183 | return ret; |
1181 | } | 1184 | } |
1182 | 1185 | ||
1183 | int | 1186 | int ext3_journal_dirty_data(handle_t *handle, struct buffer_head *bh) |
1184 | ext3_journal_dirty_data(handle_t *handle, struct buffer_head *bh) | ||
1185 | { | 1187 | { |
1186 | int err = journal_dirty_data(handle, bh); | 1188 | int err = journal_dirty_data(handle, bh); |
1187 | if (err) | 1189 | if (err) |
@@ -1206,7 +1208,6 @@ static int commit_write_fn(handle_t *handle, struct buffer_head *bh) | |||
1206 | * ext3 never places buffers on inode->i_mapping->private_list. metadata | 1208 | * ext3 never places buffers on inode->i_mapping->private_list. metadata |
1207 | * buffers are managed internally. | 1209 | * buffers are managed internally. |
1208 | */ | 1210 | */ |
1209 | |||
1210 | static int ext3_ordered_commit_write(struct file *file, struct page *page, | 1211 | static int ext3_ordered_commit_write(struct file *file, struct page *page, |
1211 | unsigned from, unsigned to) | 1212 | unsigned from, unsigned to) |
1212 | { | 1213 | { |
@@ -1416,7 +1417,7 @@ static int journal_dirty_data_fn(handle_t *handle, struct buffer_head *bh) | |||
1416 | * we don't need to open a transaction here. | 1417 | * we don't need to open a transaction here. |
1417 | */ | 1418 | */ |
1418 | static int ext3_ordered_writepage(struct page *page, | 1419 | static int ext3_ordered_writepage(struct page *page, |
1419 | struct writeback_control *wbc) | 1420 | struct writeback_control *wbc) |
1420 | { | 1421 | { |
1421 | struct inode *inode = page->mapping->host; | 1422 | struct inode *inode = page->mapping->host; |
1422 | struct buffer_head *page_bufs; | 1423 | struct buffer_head *page_bufs; |
@@ -1907,11 +1908,8 @@ static inline int all_zeroes(__le32 *p, __le32 *q) | |||
1907 | * c) free the subtrees growing from the inode past the @chain[0]. | 1908 | * c) free the subtrees growing from the inode past the @chain[0]. |
1908 | * (no partially truncated stuff there). */ | 1909 | * (no partially truncated stuff there). */ |
1909 | 1910 | ||
1910 | static Indirect *ext3_find_shared(struct inode *inode, | 1911 | static Indirect *ext3_find_shared(struct inode *inode, int depth, |
1911 | int depth, | 1912 | int offsets[4], Indirect chain[4], __le32 *top) |
1912 | int offsets[4], | ||
1913 | Indirect chain[4], | ||
1914 | __le32 *top) | ||
1915 | { | 1913 | { |
1916 | Indirect *partial, *p; | 1914 | Indirect *partial, *p; |
1917 | int k, err; | 1915 | int k, err; |
@@ -1950,8 +1948,7 @@ static Indirect *ext3_find_shared(struct inode *inode, | |||
1950 | } | 1948 | } |
1951 | /* Writer: end */ | 1949 | /* Writer: end */ |
1952 | 1950 | ||
1953 | while(partial > p) | 1951 | while(partial > p) { |
1954 | { | ||
1955 | brelse(partial->bh); | 1952 | brelse(partial->bh); |
1956 | partial--; | 1953 | partial--; |
1957 | } | 1954 | } |
@@ -1967,10 +1964,9 @@ no_top: | |||
1967 | * We release `count' blocks on disk, but (last - first) may be greater | 1964 | * We release `count' blocks on disk, but (last - first) may be greater |
1968 | * than `count' because there can be holes in there. | 1965 | * than `count' because there can be holes in there. |
1969 | */ | 1966 | */ |
1970 | static void | 1967 | static void ext3_clear_blocks(handle_t *handle, struct inode *inode, |
1971 | ext3_clear_blocks(handle_t *handle, struct inode *inode, struct buffer_head *bh, | 1968 | struct buffer_head *bh, unsigned long block_to_free, |
1972 | unsigned long block_to_free, unsigned long count, | 1969 | unsigned long count, __le32 *first, __le32 *last) |
1973 | __le32 *first, __le32 *last) | ||
1974 | { | 1970 | { |
1975 | __le32 *p; | 1971 | __le32 *p; |
1976 | if (try_to_extend_transaction(handle, inode)) { | 1972 | if (try_to_extend_transaction(handle, inode)) { |
@@ -2231,8 +2227,7 @@ static void ext3_free_branches(handle_t *handle, struct inode *inode, | |||
2231 | * that's fine - as long as they are linked from the inode, the post-crash | 2227 | * that's fine - as long as they are linked from the inode, the post-crash |
2232 | * ext3_truncate() run will find them and release them. | 2228 | * ext3_truncate() run will find them and release them. |
2233 | */ | 2229 | */ |
2234 | 2230 | void ext3_truncate(struct inode *inode) | |
2235 | void ext3_truncate(struct inode * inode) | ||
2236 | { | 2231 | { |
2237 | handle_t *handle; | 2232 | handle_t *handle; |
2238 | struct ext3_inode_info *ei = EXT3_I(inode); | 2233 | struct ext3_inode_info *ei = EXT3_I(inode); |
@@ -2356,29 +2351,26 @@ void ext3_truncate(struct inode * inode) | |||
2356 | do_indirects: | 2351 | do_indirects: |
2357 | /* Kill the remaining (whole) subtrees */ | 2352 | /* Kill the remaining (whole) subtrees */ |
2358 | switch (offsets[0]) { | 2353 | switch (offsets[0]) { |
2359 | default: | 2354 | default: |
2360 | nr = i_data[EXT3_IND_BLOCK]; | 2355 | nr = i_data[EXT3_IND_BLOCK]; |
2361 | if (nr) { | 2356 | if (nr) { |
2362 | ext3_free_branches(handle, inode, NULL, | 2357 | ext3_free_branches(handle, inode, NULL, &nr, &nr+1, 1); |
2363 | &nr, &nr+1, 1); | 2358 | i_data[EXT3_IND_BLOCK] = 0; |
2364 | i_data[EXT3_IND_BLOCK] = 0; | 2359 | } |
2365 | } | 2360 | case EXT3_IND_BLOCK: |
2366 | case EXT3_IND_BLOCK: | 2361 | nr = i_data[EXT3_DIND_BLOCK]; |
2367 | nr = i_data[EXT3_DIND_BLOCK]; | 2362 | if (nr) { |
2368 | if (nr) { | 2363 | ext3_free_branches(handle, inode, NULL, &nr, &nr+1, 2); |
2369 | ext3_free_branches(handle, inode, NULL, | 2364 | i_data[EXT3_DIND_BLOCK] = 0; |
2370 | &nr, &nr+1, 2); | 2365 | } |
2371 | i_data[EXT3_DIND_BLOCK] = 0; | 2366 | case EXT3_DIND_BLOCK: |
2372 | } | 2367 | nr = i_data[EXT3_TIND_BLOCK]; |
2373 | case EXT3_DIND_BLOCK: | 2368 | if (nr) { |
2374 | nr = i_data[EXT3_TIND_BLOCK]; | 2369 | ext3_free_branches(handle, inode, NULL, &nr, &nr+1, 3); |
2375 | if (nr) { | 2370 | i_data[EXT3_TIND_BLOCK] = 0; |
2376 | ext3_free_branches(handle, inode, NULL, | 2371 | } |
2377 | &nr, &nr+1, 3); | 2372 | case EXT3_TIND_BLOCK: |
2378 | i_data[EXT3_TIND_BLOCK] = 0; | 2373 | ; |
2379 | } | ||
2380 | case EXT3_TIND_BLOCK: | ||
2381 | ; | ||
2382 | } | 2374 | } |
2383 | 2375 | ||
2384 | ext3_discard_reservation(inode); | 2376 | ext3_discard_reservation(inode); |
@@ -2387,8 +2379,10 @@ do_indirects: | |||
2387 | inode->i_mtime = inode->i_ctime = CURRENT_TIME_SEC; | 2379 | inode->i_mtime = inode->i_ctime = CURRENT_TIME_SEC; |
2388 | ext3_mark_inode_dirty(handle, inode); | 2380 | ext3_mark_inode_dirty(handle, inode); |
2389 | 2381 | ||
2390 | /* In a multi-transaction truncate, we only make the final | 2382 | /* |
2391 | * transaction synchronous */ | 2383 | * In a multi-transaction truncate, we only make the final transaction |
2384 | * synchronous | ||
2385 | */ | ||
2392 | if (IS_SYNC(inode)) | 2386 | if (IS_SYNC(inode)) |
2393 | handle->h_sync = 1; | 2387 | handle->h_sync = 1; |
2394 | out_stop: | 2388 | out_stop: |
@@ -2414,20 +2408,16 @@ static unsigned long ext3_get_inode_block(struct super_block *sb, | |||
2414 | struct ext3_group_desc * gdp; | 2408 | struct ext3_group_desc * gdp; |
2415 | 2409 | ||
2416 | 2410 | ||
2417 | if ((ino != EXT3_ROOT_INO && | 2411 | if ((ino != EXT3_ROOT_INO && ino != EXT3_JOURNAL_INO && |
2418 | ino != EXT3_JOURNAL_INO && | 2412 | ino != EXT3_RESIZE_INO && ino < EXT3_FIRST_INO(sb)) || |
2419 | ino != EXT3_RESIZE_INO && | 2413 | ino > le32_to_cpu(EXT3_SB(sb)->s_es->s_inodes_count)) { |
2420 | ino < EXT3_FIRST_INO(sb)) || | 2414 | ext3_error(sb, "ext3_get_inode_block", |
2421 | ino > le32_to_cpu( | ||
2422 | EXT3_SB(sb)->s_es->s_inodes_count)) { | ||
2423 | ext3_error (sb, "ext3_get_inode_block", | ||
2424 | "bad inode number: %lu", ino); | 2415 | "bad inode number: %lu", ino); |
2425 | return 0; | 2416 | return 0; |
2426 | } | 2417 | } |
2427 | block_group = (ino - 1) / EXT3_INODES_PER_GROUP(sb); | 2418 | block_group = (ino - 1) / EXT3_INODES_PER_GROUP(sb); |
2428 | if (block_group >= EXT3_SB(sb)->s_groups_count) { | 2419 | if (block_group >= EXT3_SB(sb)->s_groups_count) { |
2429 | ext3_error (sb, "ext3_get_inode_block", | 2420 | ext3_error(sb,"ext3_get_inode_block","group >= groups count"); |
2430 | "group >= groups count"); | ||
2431 | return 0; | 2421 | return 0; |
2432 | } | 2422 | } |
2433 | smp_rmb(); | 2423 | smp_rmb(); |
@@ -2440,7 +2430,7 @@ static unsigned long ext3_get_inode_block(struct super_block *sb, | |||
2440 | return 0; | 2430 | return 0; |
2441 | } | 2431 | } |
2442 | 2432 | ||
2443 | gdp = (struct ext3_group_desc *) bh->b_data; | 2433 | gdp = (struct ext3_group_desc *)bh->b_data; |
2444 | /* | 2434 | /* |
2445 | * Figure out the offset within the block group inode table | 2435 | * Figure out the offset within the block group inode table |
2446 | */ | 2436 | */ |
@@ -2989,7 +2979,7 @@ err_out: | |||
2989 | 2979 | ||
2990 | 2980 | ||
2991 | /* | 2981 | /* |
2992 | * akpm: how many blocks doth make a writepage()? | 2982 | * How many blocks doth make a writepage()? |
2993 | * | 2983 | * |
2994 | * With N blocks per page, it may be: | 2984 | * With N blocks per page, it may be: |
2995 | * N data blocks | 2985 | * N data blocks |
@@ -3079,8 +3069,8 @@ ext3_reserve_inode_write(handle_t *handle, struct inode *inode, | |||
3079 | } | 3069 | } |
3080 | 3070 | ||
3081 | /* | 3071 | /* |
3082 | * akpm: What we do here is to mark the in-core inode as clean | 3072 | * What we do here is to mark the in-core inode as clean with respect to inode |
3083 | * with respect to inode dirtiness (it may still be data-dirty). | 3073 | * dirtiness (it may still be data-dirty). |
3084 | * This means that the in-core inode may be reaped by prune_icache | 3074 | * This means that the in-core inode may be reaped by prune_icache |
3085 | * without having to perform any I/O. This is a very good thing, | 3075 | * without having to perform any I/O. This is a very good thing, |
3086 | * because *any* task may call prune_icache - even ones which | 3076 | * because *any* task may call prune_icache - even ones which |
@@ -3112,7 +3102,7 @@ int ext3_mark_inode_dirty(handle_t *handle, struct inode *inode) | |||
3112 | } | 3102 | } |
3113 | 3103 | ||
3114 | /* | 3104 | /* |
3115 | * akpm: ext3_dirty_inode() is called from __mark_inode_dirty() | 3105 | * ext3_dirty_inode() is called from __mark_inode_dirty() |
3116 | * | 3106 | * |
3117 | * We're really interested in the case where a file is being extended. | 3107 | * We're really interested in the case where a file is being extended. |
3118 | * i_size has been changed by generic_commit_write() and we thus need | 3108 | * i_size has been changed by generic_commit_write() and we thus need |
@@ -3148,7 +3138,7 @@ out: | |||
3148 | return; | 3138 | return; |
3149 | } | 3139 | } |
3150 | 3140 | ||
3151 | #ifdef AKPM | 3141 | #if 0 |
3152 | /* | 3142 | /* |
3153 | * Bind an inode's backing buffer_head into this transaction, to prevent | 3143 | * Bind an inode's backing buffer_head into this transaction, to prevent |
3154 | * it from being flushed to disk early. Unlike | 3144 | * it from being flushed to disk early. Unlike |
@@ -3156,8 +3146,7 @@ out: | |||
3156 | * returns no iloc structure, so the caller needs to repeat the iloc | 3146 | * returns no iloc structure, so the caller needs to repeat the iloc |
3157 | * lookup to mark the inode dirty later. | 3147 | * lookup to mark the inode dirty later. |
3158 | */ | 3148 | */ |
3159 | static inline int | 3149 | static int ext3_pin_inode(handle_t *handle, struct inode *inode) |
3160 | ext3_pin_inode(handle_t *handle, struct inode *inode) | ||
3161 | { | 3150 | { |
3162 | struct ext3_iloc iloc; | 3151 | struct ext3_iloc iloc; |
3163 | 3152 | ||