diff options
Diffstat (limited to 'fs/ext3')
-rw-r--r-- | fs/ext3/balloc.c | 109 | ||||
-rw-r--r-- | fs/ext3/dir.c | 5 | ||||
-rw-r--r-- | fs/ext3/inode.c | 582 | ||||
-rw-r--r-- | fs/ext3/super.c | 6 |
4 files changed, 448 insertions, 254 deletions
diff --git a/fs/ext3/balloc.c b/fs/ext3/balloc.c index 46623f77666b..77927d6938f6 100644 --- a/fs/ext3/balloc.c +++ b/fs/ext3/balloc.c | |||
@@ -653,9 +653,11 @@ claim_block(spinlock_t *lock, int block, struct buffer_head *bh) | |||
653 | */ | 653 | */ |
654 | static int | 654 | static int |
655 | ext3_try_to_allocate(struct super_block *sb, handle_t *handle, int group, | 655 | ext3_try_to_allocate(struct super_block *sb, handle_t *handle, int group, |
656 | struct buffer_head *bitmap_bh, int goal, struct ext3_reserve_window *my_rsv) | 656 | struct buffer_head *bitmap_bh, int goal, |
657 | unsigned long *count, struct ext3_reserve_window *my_rsv) | ||
657 | { | 658 | { |
658 | int group_first_block, start, end; | 659 | int group_first_block, start, end; |
660 | unsigned long num = 0; | ||
659 | 661 | ||
660 | /* we do allocation within the reservation window if we have a window */ | 662 | /* we do allocation within the reservation window if we have a window */ |
661 | if (my_rsv) { | 663 | if (my_rsv) { |
@@ -713,8 +715,18 @@ repeat: | |||
713 | goto fail_access; | 715 | goto fail_access; |
714 | goto repeat; | 716 | goto repeat; |
715 | } | 717 | } |
716 | return goal; | 718 | num++; |
719 | goal++; | ||
720 | while (num < *count && goal < end | ||
721 | && ext3_test_allocatable(goal, bitmap_bh) | ||
722 | && claim_block(sb_bgl_lock(EXT3_SB(sb), group), goal, bitmap_bh)) { | ||
723 | num++; | ||
724 | goal++; | ||
725 | } | ||
726 | *count = num; | ||
727 | return goal - num; | ||
717 | fail_access: | 728 | fail_access: |
729 | *count = num; | ||
718 | return -1; | 730 | return -1; |
719 | } | 731 | } |
720 | 732 | ||
@@ -999,6 +1011,31 @@ retry: | |||
999 | goto retry; | 1011 | goto retry; |
1000 | } | 1012 | } |
1001 | 1013 | ||
1014 | static void try_to_extend_reservation(struct ext3_reserve_window_node *my_rsv, | ||
1015 | struct super_block *sb, int size) | ||
1016 | { | ||
1017 | struct ext3_reserve_window_node *next_rsv; | ||
1018 | struct rb_node *next; | ||
1019 | spinlock_t *rsv_lock = &EXT3_SB(sb)->s_rsv_window_lock; | ||
1020 | |||
1021 | if (!spin_trylock(rsv_lock)) | ||
1022 | return; | ||
1023 | |||
1024 | next = rb_next(&my_rsv->rsv_node); | ||
1025 | |||
1026 | if (!next) | ||
1027 | my_rsv->rsv_end += size; | ||
1028 | else { | ||
1029 | next_rsv = list_entry(next, struct ext3_reserve_window_node, rsv_node); | ||
1030 | |||
1031 | if ((next_rsv->rsv_start - my_rsv->rsv_end - 1) >= size) | ||
1032 | my_rsv->rsv_end += size; | ||
1033 | else | ||
1034 | my_rsv->rsv_end = next_rsv->rsv_start - 1; | ||
1035 | } | ||
1036 | spin_unlock(rsv_lock); | ||
1037 | } | ||
1038 | |||
1002 | /* | 1039 | /* |
1003 | * This is the main function used to allocate a new block and its reservation | 1040 | * This is the main function used to allocate a new block and its reservation |
1004 | * window. | 1041 | * window. |
@@ -1024,11 +1061,12 @@ static int | |||
1024 | ext3_try_to_allocate_with_rsv(struct super_block *sb, handle_t *handle, | 1061 | ext3_try_to_allocate_with_rsv(struct super_block *sb, handle_t *handle, |
1025 | unsigned int group, struct buffer_head *bitmap_bh, | 1062 | unsigned int group, struct buffer_head *bitmap_bh, |
1026 | int goal, struct ext3_reserve_window_node * my_rsv, | 1063 | int goal, struct ext3_reserve_window_node * my_rsv, |
1027 | int *errp) | 1064 | unsigned long *count, int *errp) |
1028 | { | 1065 | { |
1029 | unsigned long group_first_block; | 1066 | unsigned long group_first_block; |
1030 | int ret = 0; | 1067 | int ret = 0; |
1031 | int fatal; | 1068 | int fatal; |
1069 | unsigned long num = *count; | ||
1032 | 1070 | ||
1033 | *errp = 0; | 1071 | *errp = 0; |
1034 | 1072 | ||
@@ -1051,7 +1089,8 @@ ext3_try_to_allocate_with_rsv(struct super_block *sb, handle_t *handle, | |||
1051 | * or last attempt to allocate a block with reservation turned on failed | 1089 | * or last attempt to allocate a block with reservation turned on failed |
1052 | */ | 1090 | */ |
1053 | if (my_rsv == NULL ) { | 1091 | if (my_rsv == NULL ) { |
1054 | ret = ext3_try_to_allocate(sb, handle, group, bitmap_bh, goal, NULL); | 1092 | ret = ext3_try_to_allocate(sb, handle, group, bitmap_bh, |
1093 | goal, count, NULL); | ||
1055 | goto out; | 1094 | goto out; |
1056 | } | 1095 | } |
1057 | /* | 1096 | /* |
@@ -1081,6 +1120,8 @@ ext3_try_to_allocate_with_rsv(struct super_block *sb, handle_t *handle, | |||
1081 | while (1) { | 1120 | while (1) { |
1082 | if (rsv_is_empty(&my_rsv->rsv_window) || (ret < 0) || | 1121 | if (rsv_is_empty(&my_rsv->rsv_window) || (ret < 0) || |
1083 | !goal_in_my_reservation(&my_rsv->rsv_window, goal, group, sb)) { | 1122 | !goal_in_my_reservation(&my_rsv->rsv_window, goal, group, sb)) { |
1123 | if (my_rsv->rsv_goal_size < *count) | ||
1124 | my_rsv->rsv_goal_size = *count; | ||
1084 | ret = alloc_new_reservation(my_rsv, goal, sb, | 1125 | ret = alloc_new_reservation(my_rsv, goal, sb, |
1085 | group, bitmap_bh); | 1126 | group, bitmap_bh); |
1086 | if (ret < 0) | 1127 | if (ret < 0) |
@@ -1088,16 +1129,21 @@ ext3_try_to_allocate_with_rsv(struct super_block *sb, handle_t *handle, | |||
1088 | 1129 | ||
1089 | if (!goal_in_my_reservation(&my_rsv->rsv_window, goal, group, sb)) | 1130 | if (!goal_in_my_reservation(&my_rsv->rsv_window, goal, group, sb)) |
1090 | goal = -1; | 1131 | goal = -1; |
1091 | } | 1132 | } else if (goal > 0 && (my_rsv->rsv_end-goal+1) < *count) |
1133 | try_to_extend_reservation(my_rsv, sb, | ||
1134 | *count-my_rsv->rsv_end + goal - 1); | ||
1135 | |||
1092 | if ((my_rsv->rsv_start >= group_first_block + EXT3_BLOCKS_PER_GROUP(sb)) | 1136 | if ((my_rsv->rsv_start >= group_first_block + EXT3_BLOCKS_PER_GROUP(sb)) |
1093 | || (my_rsv->rsv_end < group_first_block)) | 1137 | || (my_rsv->rsv_end < group_first_block)) |
1094 | BUG(); | 1138 | BUG(); |
1095 | ret = ext3_try_to_allocate(sb, handle, group, bitmap_bh, goal, | 1139 | ret = ext3_try_to_allocate(sb, handle, group, bitmap_bh, goal, |
1096 | &my_rsv->rsv_window); | 1140 | &num, &my_rsv->rsv_window); |
1097 | if (ret >= 0) { | 1141 | if (ret >= 0) { |
1098 | my_rsv->rsv_alloc_hit++; | 1142 | my_rsv->rsv_alloc_hit += num; |
1143 | *count = num; | ||
1099 | break; /* succeed */ | 1144 | break; /* succeed */ |
1100 | } | 1145 | } |
1146 | num = *count; | ||
1101 | } | 1147 | } |
1102 | out: | 1148 | out: |
1103 | if (ret >= 0) { | 1149 | if (ret >= 0) { |
@@ -1154,8 +1200,8 @@ int ext3_should_retry_alloc(struct super_block *sb, int *retries) | |||
1154 | * bitmap, and then for any free bit if that fails. | 1200 | * bitmap, and then for any free bit if that fails. |
1155 | * This function also updates quota and i_blocks field. | 1201 | * This function also updates quota and i_blocks field. |
1156 | */ | 1202 | */ |
1157 | int ext3_new_block(handle_t *handle, struct inode *inode, | 1203 | int ext3_new_blocks(handle_t *handle, struct inode *inode, |
1158 | unsigned long goal, int *errp) | 1204 | unsigned long goal, unsigned long *count, int *errp) |
1159 | { | 1205 | { |
1160 | struct buffer_head *bitmap_bh = NULL; | 1206 | struct buffer_head *bitmap_bh = NULL; |
1161 | struct buffer_head *gdp_bh; | 1207 | struct buffer_head *gdp_bh; |
@@ -1178,6 +1224,7 @@ int ext3_new_block(handle_t *handle, struct inode *inode, | |||
1178 | static int goal_hits, goal_attempts; | 1224 | static int goal_hits, goal_attempts; |
1179 | #endif | 1225 | #endif |
1180 | unsigned long ngroups; | 1226 | unsigned long ngroups; |
1227 | unsigned long num = *count; | ||
1181 | 1228 | ||
1182 | *errp = -ENOSPC; | 1229 | *errp = -ENOSPC; |
1183 | sb = inode->i_sb; | 1230 | sb = inode->i_sb; |
@@ -1189,7 +1236,7 @@ int ext3_new_block(handle_t *handle, struct inode *inode, | |||
1189 | /* | 1236 | /* |
1190 | * Check quota for allocation of this block. | 1237 | * Check quota for allocation of this block. |
1191 | */ | 1238 | */ |
1192 | if (DQUOT_ALLOC_BLOCK(inode, 1)) { | 1239 | if (DQUOT_ALLOC_BLOCK(inode, num)) { |
1193 | *errp = -EDQUOT; | 1240 | *errp = -EDQUOT; |
1194 | return 0; | 1241 | return 0; |
1195 | } | 1242 | } |
@@ -1244,7 +1291,7 @@ retry: | |||
1244 | if (!bitmap_bh) | 1291 | if (!bitmap_bh) |
1245 | goto io_error; | 1292 | goto io_error; |
1246 | ret_block = ext3_try_to_allocate_with_rsv(sb, handle, group_no, | 1293 | ret_block = ext3_try_to_allocate_with_rsv(sb, handle, group_no, |
1247 | bitmap_bh, ret_block, my_rsv, &fatal); | 1294 | bitmap_bh, ret_block, my_rsv, &num, &fatal); |
1248 | if (fatal) | 1295 | if (fatal) |
1249 | goto out; | 1296 | goto out; |
1250 | if (ret_block >= 0) | 1297 | if (ret_block >= 0) |
@@ -1281,7 +1328,7 @@ retry: | |||
1281 | if (!bitmap_bh) | 1328 | if (!bitmap_bh) |
1282 | goto io_error; | 1329 | goto io_error; |
1283 | ret_block = ext3_try_to_allocate_with_rsv(sb, handle, group_no, | 1330 | ret_block = ext3_try_to_allocate_with_rsv(sb, handle, group_no, |
1284 | bitmap_bh, -1, my_rsv, &fatal); | 1331 | bitmap_bh, -1, my_rsv, &num, &fatal); |
1285 | if (fatal) | 1332 | if (fatal) |
1286 | goto out; | 1333 | goto out; |
1287 | if (ret_block >= 0) | 1334 | if (ret_block >= 0) |
@@ -1316,13 +1363,15 @@ allocated: | |||
1316 | target_block = ret_block + group_no * EXT3_BLOCKS_PER_GROUP(sb) | 1363 | target_block = ret_block + group_no * EXT3_BLOCKS_PER_GROUP(sb) |
1317 | + le32_to_cpu(es->s_first_data_block); | 1364 | + le32_to_cpu(es->s_first_data_block); |
1318 | 1365 | ||
1319 | if (target_block == le32_to_cpu(gdp->bg_block_bitmap) || | 1366 | if (in_range(le32_to_cpu(gdp->bg_block_bitmap), target_block, num) || |
1320 | target_block == le32_to_cpu(gdp->bg_inode_bitmap) || | 1367 | in_range(le32_to_cpu(gdp->bg_inode_bitmap), target_block, num) || |
1321 | in_range(target_block, le32_to_cpu(gdp->bg_inode_table), | 1368 | in_range(target_block, le32_to_cpu(gdp->bg_inode_table), |
1369 | EXT3_SB(sb)->s_itb_per_group) || | ||
1370 | in_range(target_block + num - 1, le32_to_cpu(gdp->bg_inode_table), | ||
1322 | EXT3_SB(sb)->s_itb_per_group)) | 1371 | EXT3_SB(sb)->s_itb_per_group)) |
1323 | ext3_error(sb, "ext3_new_block", | 1372 | ext3_error(sb, "ext3_new_block", |
1324 | "Allocating block in system zone - " | 1373 | "Allocating block in system zone - " |
1325 | "block = %u", target_block); | 1374 | "blocks from %u, length %lu", target_block, num); |
1326 | 1375 | ||
1327 | performed_allocation = 1; | 1376 | performed_allocation = 1; |
1328 | 1377 | ||
@@ -1341,10 +1390,14 @@ allocated: | |||
1341 | jbd_lock_bh_state(bitmap_bh); | 1390 | jbd_lock_bh_state(bitmap_bh); |
1342 | spin_lock(sb_bgl_lock(sbi, group_no)); | 1391 | spin_lock(sb_bgl_lock(sbi, group_no)); |
1343 | if (buffer_jbd(bitmap_bh) && bh2jh(bitmap_bh)->b_committed_data) { | 1392 | if (buffer_jbd(bitmap_bh) && bh2jh(bitmap_bh)->b_committed_data) { |
1344 | if (ext3_test_bit(ret_block, | 1393 | int i; |
1345 | bh2jh(bitmap_bh)->b_committed_data)) { | 1394 | |
1346 | printk("%s: block was unexpectedly set in " | 1395 | for (i = 0; i < num; i++) { |
1347 | "b_committed_data\n", __FUNCTION__); | 1396 | if (ext3_test_bit(ret_block, |
1397 | bh2jh(bitmap_bh)->b_committed_data)) { | ||
1398 | printk("%s: block was unexpectedly set in " | ||
1399 | "b_committed_data\n", __FUNCTION__); | ||
1400 | } | ||
1348 | } | 1401 | } |
1349 | } | 1402 | } |
1350 | ext3_debug("found bit %d\n", ret_block); | 1403 | ext3_debug("found bit %d\n", ret_block); |
@@ -1355,7 +1408,7 @@ allocated: | |||
1355 | /* ret_block was blockgroup-relative. Now it becomes fs-relative */ | 1408 | /* ret_block was blockgroup-relative. Now it becomes fs-relative */ |
1356 | ret_block = target_block; | 1409 | ret_block = target_block; |
1357 | 1410 | ||
1358 | if (ret_block >= le32_to_cpu(es->s_blocks_count)) { | 1411 | if (ret_block + num - 1 >= le32_to_cpu(es->s_blocks_count)) { |
1359 | ext3_error(sb, "ext3_new_block", | 1412 | ext3_error(sb, "ext3_new_block", |
1360 | "block(%d) >= blocks count(%d) - " | 1413 | "block(%d) >= blocks count(%d) - " |
1361 | "block_group = %d, es == %p ", ret_block, | 1414 | "block_group = %d, es == %p ", ret_block, |
@@ -1373,9 +1426,9 @@ allocated: | |||
1373 | 1426 | ||
1374 | spin_lock(sb_bgl_lock(sbi, group_no)); | 1427 | spin_lock(sb_bgl_lock(sbi, group_no)); |
1375 | gdp->bg_free_blocks_count = | 1428 | gdp->bg_free_blocks_count = |
1376 | cpu_to_le16(le16_to_cpu(gdp->bg_free_blocks_count) - 1); | 1429 | cpu_to_le16(le16_to_cpu(gdp->bg_free_blocks_count) - num); |
1377 | spin_unlock(sb_bgl_lock(sbi, group_no)); | 1430 | spin_unlock(sb_bgl_lock(sbi, group_no)); |
1378 | percpu_counter_mod(&sbi->s_freeblocks_counter, -1); | 1431 | percpu_counter_mod(&sbi->s_freeblocks_counter, -num); |
1379 | 1432 | ||
1380 | BUFFER_TRACE(gdp_bh, "journal_dirty_metadata for group descriptor"); | 1433 | BUFFER_TRACE(gdp_bh, "journal_dirty_metadata for group descriptor"); |
1381 | err = ext3_journal_dirty_metadata(handle, gdp_bh); | 1434 | err = ext3_journal_dirty_metadata(handle, gdp_bh); |
@@ -1388,6 +1441,8 @@ allocated: | |||
1388 | 1441 | ||
1389 | *errp = 0; | 1442 | *errp = 0; |
1390 | brelse(bitmap_bh); | 1443 | brelse(bitmap_bh); |
1444 | DQUOT_FREE_BLOCK(inode, *count-num); | ||
1445 | *count = num; | ||
1391 | return ret_block; | 1446 | return ret_block; |
1392 | 1447 | ||
1393 | io_error: | 1448 | io_error: |
@@ -1401,11 +1456,19 @@ out: | |||
1401 | * Undo the block allocation | 1456 | * Undo the block allocation |
1402 | */ | 1457 | */ |
1403 | if (!performed_allocation) | 1458 | if (!performed_allocation) |
1404 | DQUOT_FREE_BLOCK(inode, 1); | 1459 | DQUOT_FREE_BLOCK(inode, *count); |
1405 | brelse(bitmap_bh); | 1460 | brelse(bitmap_bh); |
1406 | return 0; | 1461 | return 0; |
1407 | } | 1462 | } |
1408 | 1463 | ||
1464 | int ext3_new_block(handle_t *handle, struct inode *inode, | ||
1465 | unsigned long goal, int *errp) | ||
1466 | { | ||
1467 | unsigned long count = 1; | ||
1468 | |||
1469 | return ext3_new_blocks(handle, inode, goal, &count, errp); | ||
1470 | } | ||
1471 | |||
1409 | unsigned long ext3_count_free_blocks(struct super_block *sb) | 1472 | unsigned long ext3_count_free_blocks(struct super_block *sb) |
1410 | { | 1473 | { |
1411 | unsigned long desc_count; | 1474 | unsigned long desc_count; |
diff --git a/fs/ext3/dir.c b/fs/ext3/dir.c index 773459164bb2..38bd3f6ec147 100644 --- a/fs/ext3/dir.c +++ b/fs/ext3/dir.c | |||
@@ -131,8 +131,9 @@ static int ext3_readdir(struct file * filp, | |||
131 | struct buffer_head *bh = NULL; | 131 | struct buffer_head *bh = NULL; |
132 | 132 | ||
133 | map_bh.b_state = 0; | 133 | map_bh.b_state = 0; |
134 | err = ext3_get_block_handle(NULL, inode, blk, &map_bh, 0, 0); | 134 | err = ext3_get_blocks_handle(NULL, inode, blk, 1, |
135 | if (!err) { | 135 | &map_bh, 0, 0); |
136 | if (err > 0) { | ||
136 | page_cache_readahead(sb->s_bdev->bd_inode->i_mapping, | 137 | page_cache_readahead(sb->s_bdev->bd_inode->i_mapping, |
137 | &filp->f_ra, | 138 | &filp->f_ra, |
138 | filp, | 139 | filp, |
diff --git a/fs/ext3/inode.c b/fs/ext3/inode.c index 2c361377e0a5..48ae0339af17 100644 --- a/fs/ext3/inode.c +++ b/fs/ext3/inode.c | |||
@@ -44,16 +44,16 @@ static int ext3_writepage_trans_blocks(struct inode *inode); | |||
44 | /* | 44 | /* |
45 | * Test whether an inode is a fast symlink. | 45 | * Test whether an inode is a fast symlink. |
46 | */ | 46 | */ |
47 | static inline int ext3_inode_is_fast_symlink(struct inode *inode) | 47 | static int ext3_inode_is_fast_symlink(struct inode *inode) |
48 | { | 48 | { |
49 | int ea_blocks = EXT3_I(inode)->i_file_acl ? | 49 | int ea_blocks = EXT3_I(inode)->i_file_acl ? |
50 | (inode->i_sb->s_blocksize >> 9) : 0; | 50 | (inode->i_sb->s_blocksize >> 9) : 0; |
51 | 51 | ||
52 | return (S_ISLNK(inode->i_mode) && | 52 | return (S_ISLNK(inode->i_mode) && inode->i_blocks - ea_blocks == 0); |
53 | inode->i_blocks - ea_blocks == 0); | ||
54 | } | 53 | } |
55 | 54 | ||
56 | /* The ext3 forget function must perform a revoke if we are freeing data | 55 | /* |
56 | * The ext3 forget function must perform a revoke if we are freeing data | ||
57 | * which has been journaled. Metadata (eg. indirect blocks) must be | 57 | * which has been journaled. Metadata (eg. indirect blocks) must be |
58 | * revoked in all cases. | 58 | * revoked in all cases. |
59 | * | 59 | * |
@@ -61,10 +61,8 @@ static inline int ext3_inode_is_fast_symlink(struct inode *inode) | |||
61 | * but there may still be a record of it in the journal, and that record | 61 | * but there may still be a record of it in the journal, and that record |
62 | * still needs to be revoked. | 62 | * still needs to be revoked. |
63 | */ | 63 | */ |
64 | 64 | int ext3_forget(handle_t *handle, int is_metadata, struct inode *inode, | |
65 | int ext3_forget(handle_t *handle, int is_metadata, | 65 | struct buffer_head *bh, int blocknr) |
66 | struct inode *inode, struct buffer_head *bh, | ||
67 | int blocknr) | ||
68 | { | 66 | { |
69 | int err; | 67 | int err; |
70 | 68 | ||
@@ -104,10 +102,9 @@ int ext3_forget(handle_t *handle, int is_metadata, | |||
104 | } | 102 | } |
105 | 103 | ||
106 | /* | 104 | /* |
107 | * Work out how many blocks we need to progress with the next chunk of a | 105 | * Work out how many blocks we need to proceed with the next chunk of a |
108 | * truncate transaction. | 106 | * truncate transaction. |
109 | */ | 107 | */ |
110 | |||
111 | static unsigned long blocks_for_truncate(struct inode *inode) | 108 | static unsigned long blocks_for_truncate(struct inode *inode) |
112 | { | 109 | { |
113 | unsigned long needed; | 110 | unsigned long needed; |
@@ -141,7 +138,6 @@ static unsigned long blocks_for_truncate(struct inode *inode) | |||
141 | * extend fails, we need to propagate the failure up and restart the | 138 | * extend fails, we need to propagate the failure up and restart the |
142 | * transaction in the top-level truncate loop. --sct | 139 | * transaction in the top-level truncate loop. --sct |
143 | */ | 140 | */ |
144 | |||
145 | static handle_t *start_transaction(struct inode *inode) | 141 | static handle_t *start_transaction(struct inode *inode) |
146 | { | 142 | { |
147 | handle_t *result; | 143 | handle_t *result; |
@@ -194,9 +190,11 @@ void ext3_delete_inode (struct inode * inode) | |||
194 | 190 | ||
195 | handle = start_transaction(inode); | 191 | handle = start_transaction(inode); |
196 | if (IS_ERR(handle)) { | 192 | if (IS_ERR(handle)) { |
197 | /* If we're going to skip the normal cleanup, we still | 193 | /* |
198 | * need to make sure that the in-core orphan linked list | 194 | * If we're going to skip the normal cleanup, we still need to |
199 | * is properly cleaned up. */ | 195 | * make sure that the in-core orphan linked list is properly |
196 | * cleaned up. | ||
197 | */ | ||
200 | ext3_orphan_del(NULL, inode); | 198 | ext3_orphan_del(NULL, inode); |
201 | goto no_delete; | 199 | goto no_delete; |
202 | } | 200 | } |
@@ -235,16 +233,6 @@ no_delete: | |||
235 | clear_inode(inode); /* We must guarantee clearing of inode... */ | 233 | clear_inode(inode); /* We must guarantee clearing of inode... */ |
236 | } | 234 | } |
237 | 235 | ||
238 | static int ext3_alloc_block (handle_t *handle, | ||
239 | struct inode * inode, unsigned long goal, int *err) | ||
240 | { | ||
241 | unsigned long result; | ||
242 | |||
243 | result = ext3_new_block(handle, inode, goal, err); | ||
244 | return result; | ||
245 | } | ||
246 | |||
247 | |||
248 | typedef struct { | 236 | typedef struct { |
249 | __le32 *p; | 237 | __le32 *p; |
250 | __le32 key; | 238 | __le32 key; |
@@ -257,7 +245,7 @@ static inline void add_chain(Indirect *p, struct buffer_head *bh, __le32 *v) | |||
257 | p->bh = bh; | 245 | p->bh = bh; |
258 | } | 246 | } |
259 | 247 | ||
260 | static inline int verify_chain(Indirect *from, Indirect *to) | 248 | static int verify_chain(Indirect *from, Indirect *to) |
261 | { | 249 | { |
262 | while (from <= to && from->key == *from->p) | 250 | while (from <= to && from->key == *from->p) |
263 | from++; | 251 | from++; |
@@ -327,10 +315,10 @@ static int ext3_block_to_path(struct inode *inode, | |||
327 | offsets[n++] = i_block & (ptrs - 1); | 315 | offsets[n++] = i_block & (ptrs - 1); |
328 | final = ptrs; | 316 | final = ptrs; |
329 | } else { | 317 | } else { |
330 | ext3_warning (inode->i_sb, "ext3_block_to_path", "block > big"); | 318 | ext3_warning(inode->i_sb, "ext3_block_to_path", "block > big"); |
331 | } | 319 | } |
332 | if (boundary) | 320 | if (boundary) |
333 | *boundary = (i_block & (ptrs - 1)) == (final - 1); | 321 | *boundary = final - 1 - (i_block & (ptrs - 1)); |
334 | return n; | 322 | return n; |
335 | } | 323 | } |
336 | 324 | ||
@@ -419,7 +407,6 @@ no_block: | |||
419 | * | 407 | * |
420 | * Caller must make sure that @ind is valid and will stay that way. | 408 | * Caller must make sure that @ind is valid and will stay that way. |
421 | */ | 409 | */ |
422 | |||
423 | static unsigned long ext3_find_near(struct inode *inode, Indirect *ind) | 410 | static unsigned long ext3_find_near(struct inode *inode, Indirect *ind) |
424 | { | 411 | { |
425 | struct ext3_inode_info *ei = EXT3_I(inode); | 412 | struct ext3_inode_info *ei = EXT3_I(inode); |
@@ -429,17 +416,18 @@ static unsigned long ext3_find_near(struct inode *inode, Indirect *ind) | |||
429 | unsigned long colour; | 416 | unsigned long colour; |
430 | 417 | ||
431 | /* Try to find previous block */ | 418 | /* Try to find previous block */ |
432 | for (p = ind->p - 1; p >= start; p--) | 419 | for (p = ind->p - 1; p >= start; p--) { |
433 | if (*p) | 420 | if (*p) |
434 | return le32_to_cpu(*p); | 421 | return le32_to_cpu(*p); |
422 | } | ||
435 | 423 | ||
436 | /* No such thing, so let's try location of indirect block */ | 424 | /* No such thing, so let's try location of indirect block */ |
437 | if (ind->bh) | 425 | if (ind->bh) |
438 | return ind->bh->b_blocknr; | 426 | return ind->bh->b_blocknr; |
439 | 427 | ||
440 | /* | 428 | /* |
441 | * It is going to be refered from inode itself? OK, just put it into | 429 | * It is going to be referred to from the inode itself? OK, just put it |
442 | * the same cylinder group then. | 430 | * into the same cylinder group then. |
443 | */ | 431 | */ |
444 | bg_start = (ei->i_block_group * EXT3_BLOCKS_PER_GROUP(inode->i_sb)) + | 432 | bg_start = (ei->i_block_group * EXT3_BLOCKS_PER_GROUP(inode->i_sb)) + |
445 | le32_to_cpu(EXT3_SB(inode->i_sb)->s_es->s_first_data_block); | 433 | le32_to_cpu(EXT3_SB(inode->i_sb)->s_es->s_first_data_block); |
@@ -463,7 +451,9 @@ static unsigned long ext3_find_near(struct inode *inode, Indirect *ind) | |||
463 | static unsigned long ext3_find_goal(struct inode *inode, long block, | 451 | static unsigned long ext3_find_goal(struct inode *inode, long block, |
464 | Indirect chain[4], Indirect *partial) | 452 | Indirect chain[4], Indirect *partial) |
465 | { | 453 | { |
466 | struct ext3_block_alloc_info *block_i = EXT3_I(inode)->i_block_alloc_info; | 454 | struct ext3_block_alloc_info *block_i; |
455 | |||
456 | block_i = EXT3_I(inode)->i_block_alloc_info; | ||
467 | 457 | ||
468 | /* | 458 | /* |
469 | * try the heuristic for sequential allocation, | 459 | * try the heuristic for sequential allocation, |
@@ -478,13 +468,113 @@ static unsigned long ext3_find_goal(struct inode *inode, long block, | |||
478 | } | 468 | } |
479 | 469 | ||
480 | /** | 470 | /** |
471 | * ext3_blks_to_allocate: Look up the block map and count the number | ||
472 | * of direct blocks need to be allocated for the given branch. | ||
473 | * | ||
474 | * @branch: chain of indirect blocks | ||
475 | * @k: number of blocks need for indirect blocks | ||
476 | * @blks: number of data blocks to be mapped. | ||
477 | * @blocks_to_boundary: the offset in the indirect block | ||
478 | * | ||
479 | * return the total number of blocks to be allocate, including the | ||
480 | * direct and indirect blocks. | ||
481 | */ | ||
482 | static int ext3_blks_to_allocate(Indirect *branch, int k, unsigned long blks, | ||
483 | int blocks_to_boundary) | ||
484 | { | ||
485 | unsigned long count = 0; | ||
486 | |||
487 | /* | ||
488 | * Simple case, [t,d]Indirect block(s) has not allocated yet | ||
489 | * then it's clear blocks on that path have not allocated | ||
490 | */ | ||
491 | if (k > 0) { | ||
492 | /* right now we don't handle cross boundary allocation */ | ||
493 | if (blks < blocks_to_boundary + 1) | ||
494 | count += blks; | ||
495 | else | ||
496 | count += blocks_to_boundary + 1; | ||
497 | return count; | ||
498 | } | ||
499 | |||
500 | count++; | ||
501 | while (count < blks && count <= blocks_to_boundary && | ||
502 | le32_to_cpu(*(branch[0].p + count)) == 0) { | ||
503 | count++; | ||
504 | } | ||
505 | return count; | ||
506 | } | ||
507 | |||
508 | /** | ||
509 | * ext3_alloc_blocks: multiple allocate blocks needed for a branch | ||
510 | * @indirect_blks: the number of blocks need to allocate for indirect | ||
511 | * blocks | ||
512 | * | ||
513 | * @new_blocks: on return it will store the new block numbers for | ||
514 | * the indirect blocks(if needed) and the first direct block, | ||
515 | * @blks: on return it will store the total number of allocated | ||
516 | * direct blocks | ||
517 | */ | ||
518 | static int ext3_alloc_blocks(handle_t *handle, struct inode *inode, | ||
519 | unsigned long goal, int indirect_blks, int blks, | ||
520 | unsigned long long new_blocks[4], int *err) | ||
521 | { | ||
522 | int target, i; | ||
523 | unsigned long count = 0; | ||
524 | int index = 0; | ||
525 | unsigned long current_block = 0; | ||
526 | int ret = 0; | ||
527 | |||
528 | /* | ||
529 | * Here we try to allocate the requested multiple blocks at once, | ||
530 | * on a best-effort basis. | ||
531 | * To build a branch, we should allocate blocks for | ||
532 | * the indirect blocks(if not allocated yet), and at least | ||
533 | * the first direct block of this branch. That's the | ||
534 | * minimum number of blocks need to allocate(required) | ||
535 | */ | ||
536 | target = blks + indirect_blks; | ||
537 | |||
538 | while (1) { | ||
539 | count = target; | ||
540 | /* allocating blocks for indirect blocks and direct blocks */ | ||
541 | current_block = ext3_new_blocks(handle,inode,goal,&count,err); | ||
542 | if (*err) | ||
543 | goto failed_out; | ||
544 | |||
545 | target -= count; | ||
546 | /* allocate blocks for indirect blocks */ | ||
547 | while (index < indirect_blks && count) { | ||
548 | new_blocks[index++] = current_block++; | ||
549 | count--; | ||
550 | } | ||
551 | |||
552 | if (count > 0) | ||
553 | break; | ||
554 | } | ||
555 | |||
556 | /* save the new block number for the first direct block */ | ||
557 | new_blocks[index] = current_block; | ||
558 | |||
559 | /* total number of blocks allocated for direct blocks */ | ||
560 | ret = count; | ||
561 | *err = 0; | ||
562 | return ret; | ||
563 | failed_out: | ||
564 | for (i = 0; i <index; i++) | ||
565 | ext3_free_blocks(handle, inode, new_blocks[i], 1); | ||
566 | return ret; | ||
567 | } | ||
568 | |||
569 | /** | ||
481 | * ext3_alloc_branch - allocate and set up a chain of blocks. | 570 | * ext3_alloc_branch - allocate and set up a chain of blocks. |
482 | * @inode: owner | 571 | * @inode: owner |
483 | * @num: depth of the chain (number of blocks to allocate) | 572 | * @indirect_blks: number of allocated indirect blocks |
573 | * @blks: number of allocated direct blocks | ||
484 | * @offsets: offsets (in the blocks) to store the pointers to next. | 574 | * @offsets: offsets (in the blocks) to store the pointers to next. |
485 | * @branch: place to store the chain in. | 575 | * @branch: place to store the chain in. |
486 | * | 576 | * |
487 | * This function allocates @num blocks, zeroes out all but the last one, | 577 | * This function allocates blocks, zeroes out all but the last one, |
488 | * links them into chain and (if we are synchronous) writes them to disk. | 578 | * links them into chain and (if we are synchronous) writes them to disk. |
489 | * In other words, it prepares a branch that can be spliced onto the | 579 | * In other words, it prepares a branch that can be spliced onto the |
490 | * inode. It stores the information about that chain in the branch[], in | 580 | * inode. It stores the information about that chain in the branch[], in |
@@ -501,97 +591,106 @@ static unsigned long ext3_find_goal(struct inode *inode, long block, | |||
501 | * ext3_alloc_block() (normally -ENOSPC). Otherwise we set the chain | 591 | * ext3_alloc_block() (normally -ENOSPC). Otherwise we set the chain |
502 | * as described above and return 0. | 592 | * as described above and return 0. |
503 | */ | 593 | */ |
504 | |||
505 | static int ext3_alloc_branch(handle_t *handle, struct inode *inode, | 594 | static int ext3_alloc_branch(handle_t *handle, struct inode *inode, |
506 | int num, | 595 | int indirect_blks, int *blks, unsigned long goal, |
507 | unsigned long goal, | 596 | int *offsets, Indirect *branch) |
508 | int *offsets, | ||
509 | Indirect *branch) | ||
510 | { | 597 | { |
511 | int blocksize = inode->i_sb->s_blocksize; | 598 | int blocksize = inode->i_sb->s_blocksize; |
512 | int n = 0, keys = 0; | 599 | int i, n = 0; |
513 | int err = 0; | 600 | int err = 0; |
514 | int i; | 601 | struct buffer_head *bh; |
515 | int parent = ext3_alloc_block(handle, inode, goal, &err); | 602 | int num; |
516 | 603 | unsigned long long new_blocks[4]; | |
517 | branch[0].key = cpu_to_le32(parent); | 604 | unsigned long long current_block; |
518 | if (parent) { | ||
519 | for (n = 1; n < num; n++) { | ||
520 | struct buffer_head *bh; | ||
521 | /* Allocate the next block */ | ||
522 | int nr = ext3_alloc_block(handle, inode, parent, &err); | ||
523 | if (!nr) | ||
524 | break; | ||
525 | branch[n].key = cpu_to_le32(nr); | ||
526 | 605 | ||
527 | /* | 606 | num = ext3_alloc_blocks(handle, inode, goal, indirect_blks, |
528 | * Get buffer_head for parent block, zero it out | 607 | *blks, new_blocks, &err); |
529 | * and set the pointer to new one, then send | 608 | if (err) |
530 | * parent to disk. | 609 | return err; |
531 | */ | ||
532 | bh = sb_getblk(inode->i_sb, parent); | ||
533 | if (!bh) | ||
534 | break; | ||
535 | keys = n+1; | ||
536 | branch[n].bh = bh; | ||
537 | lock_buffer(bh); | ||
538 | BUFFER_TRACE(bh, "call get_create_access"); | ||
539 | err = ext3_journal_get_create_access(handle, bh); | ||
540 | if (err) { | ||
541 | unlock_buffer(bh); | ||
542 | brelse(bh); | ||
543 | break; | ||
544 | } | ||
545 | 610 | ||
546 | memset(bh->b_data, 0, blocksize); | 611 | branch[0].key = cpu_to_le32(new_blocks[0]); |
547 | branch[n].p = (__le32*) bh->b_data + offsets[n]; | 612 | /* |
548 | *branch[n].p = branch[n].key; | 613 | * metadata blocks and data blocks are allocated. |
549 | BUFFER_TRACE(bh, "marking uptodate"); | 614 | */ |
550 | set_buffer_uptodate(bh); | 615 | for (n = 1; n <= indirect_blks; n++) { |
616 | /* | ||
617 | * Get buffer_head for parent block, zero it out | ||
618 | * and set the pointer to new one, then send | ||
619 | * parent to disk. | ||
620 | */ | ||
621 | bh = sb_getblk(inode->i_sb, new_blocks[n-1]); | ||
622 | branch[n].bh = bh; | ||
623 | lock_buffer(bh); | ||
624 | BUFFER_TRACE(bh, "call get_create_access"); | ||
625 | err = ext3_journal_get_create_access(handle, bh); | ||
626 | if (err) { | ||
551 | unlock_buffer(bh); | 627 | unlock_buffer(bh); |
628 | brelse(bh); | ||
629 | goto failed; | ||
630 | } | ||
552 | 631 | ||
553 | BUFFER_TRACE(bh, "call ext3_journal_dirty_metadata"); | 632 | memset(bh->b_data, 0, blocksize); |
554 | err = ext3_journal_dirty_metadata(handle, bh); | 633 | branch[n].p = (__le32 *) bh->b_data + offsets[n]; |
555 | if (err) | 634 | branch[n].key = cpu_to_le32(new_blocks[n]); |
556 | break; | 635 | *branch[n].p = branch[n].key; |
557 | 636 | if ( n == indirect_blks) { | |
558 | parent = nr; | 637 | current_block = new_blocks[n]; |
638 | /* | ||
639 | * End of chain, update the last new metablock of | ||
640 | * the chain to point to the new allocated | ||
641 | * data blocks numbers | ||
642 | */ | ||
643 | for (i=1; i < num; i++) | ||
644 | *(branch[n].p + i) = cpu_to_le32(++current_block); | ||
559 | } | 645 | } |
560 | } | 646 | BUFFER_TRACE(bh, "marking uptodate"); |
561 | if (n == num) | 647 | set_buffer_uptodate(bh); |
562 | return 0; | 648 | unlock_buffer(bh); |
563 | 649 | ||
650 | BUFFER_TRACE(bh, "call ext3_journal_dirty_metadata"); | ||
651 | err = ext3_journal_dirty_metadata(handle, bh); | ||
652 | if (err) | ||
653 | goto failed; | ||
654 | } | ||
655 | *blks = num; | ||
656 | return err; | ||
657 | failed: | ||
564 | /* Allocation failed, free what we already allocated */ | 658 | /* Allocation failed, free what we already allocated */ |
565 | for (i = 1; i < keys; i++) { | 659 | for (i = 1; i <= n ; i++) { |
566 | BUFFER_TRACE(branch[i].bh, "call journal_forget"); | 660 | BUFFER_TRACE(branch[i].bh, "call journal_forget"); |
567 | ext3_journal_forget(handle, branch[i].bh); | 661 | ext3_journal_forget(handle, branch[i].bh); |
568 | } | 662 | } |
569 | for (i = 0; i < keys; i++) | 663 | for (i = 0; i <indirect_blks; i++) |
570 | ext3_free_blocks(handle, inode, le32_to_cpu(branch[i].key), 1); | 664 | ext3_free_blocks(handle, inode, new_blocks[i], 1); |
665 | |||
666 | ext3_free_blocks(handle, inode, new_blocks[i], num); | ||
667 | |||
571 | return err; | 668 | return err; |
572 | } | 669 | } |
573 | 670 | ||
574 | /** | 671 | /** |
575 | * ext3_splice_branch - splice the allocated branch onto inode. | 672 | * ext3_splice_branch - splice the allocated branch onto inode. |
576 | * @inode: owner | 673 | * @inode: owner |
577 | * @block: (logical) number of block we are adding | 674 | * @block: (logical) number of block we are adding |
578 | * @chain: chain of indirect blocks (with a missing link - see | 675 | * @chain: chain of indirect blocks (with a missing link - see |
579 | * ext3_alloc_branch) | 676 | * ext3_alloc_branch) |
580 | * @where: location of missing link | 677 | * @where: location of missing link |
581 | * @num: number of blocks we are adding | 678 | * @num: number of indirect blocks we are adding |
582 | * | 679 | * @blks: number of direct blocks we are adding |
583 | * This function fills the missing link and does all housekeeping needed in | 680 | * |
584 | * inode (->i_blocks, etc.). In case of success we end up with the full | 681 | * This function fills the missing link and does all housekeeping needed in |
585 | * chain to new block and return 0. | 682 | * inode (->i_blocks, etc.). In case of success we end up with the full |
683 | * chain to new block and return 0. | ||
586 | */ | 684 | */ |
587 | 685 | static int ext3_splice_branch(handle_t *handle, struct inode *inode, | |
588 | static int ext3_splice_branch(handle_t *handle, struct inode *inode, long block, | 686 | long block, Indirect *where, int num, int blks) |
589 | Indirect chain[4], Indirect *where, int num) | ||
590 | { | 687 | { |
591 | int i; | 688 | int i; |
592 | int err = 0; | 689 | int err = 0; |
593 | struct ext3_block_alloc_info *block_i = EXT3_I(inode)->i_block_alloc_info; | 690 | struct ext3_block_alloc_info *block_i; |
691 | unsigned long current_block; | ||
594 | 692 | ||
693 | block_i = EXT3_I(inode)->i_block_alloc_info; | ||
595 | /* | 694 | /* |
596 | * If we're splicing into a [td]indirect block (as opposed to the | 695 | * If we're splicing into a [td]indirect block (as opposed to the |
597 | * inode) then we need to get write access to the [td]indirect block | 696 | * inode) then we need to get write access to the [td]indirect block |
@@ -608,13 +707,24 @@ static int ext3_splice_branch(handle_t *handle, struct inode *inode, long block, | |||
608 | *where->p = where->key; | 707 | *where->p = where->key; |
609 | 708 | ||
610 | /* | 709 | /* |
710 | * Update the host buffer_head or inode to point to more just allocated | ||
711 | * direct blocks blocks | ||
712 | */ | ||
713 | if (num == 0 && blks > 1) { | ||
714 | current_block = le32_to_cpu(where->key + 1); | ||
715 | for (i = 1; i < blks; i++) | ||
716 | *(where->p + i ) = cpu_to_le32(current_block++); | ||
717 | } | ||
718 | |||
719 | /* | ||
611 | * update the most recently allocated logical & physical block | 720 | * update the most recently allocated logical & physical block |
612 | * in i_block_alloc_info, to assist find the proper goal block for next | 721 | * in i_block_alloc_info, to assist find the proper goal block for next |
613 | * allocation | 722 | * allocation |
614 | */ | 723 | */ |
615 | if (block_i) { | 724 | if (block_i) { |
616 | block_i->last_alloc_logical_block = block; | 725 | block_i->last_alloc_logical_block = block + blks - 1; |
617 | block_i->last_alloc_physical_block = le32_to_cpu(where[num-1].key); | 726 | block_i->last_alloc_physical_block = |
727 | le32_to_cpu(where[num].key + blks - 1); | ||
618 | } | 728 | } |
619 | 729 | ||
620 | /* We are done with atomic stuff, now do the rest of housekeeping */ | 730 | /* We are done with atomic stuff, now do the rest of housekeeping */ |
@@ -625,7 +735,7 @@ static int ext3_splice_branch(handle_t *handle, struct inode *inode, long block, | |||
625 | /* had we spliced it onto indirect block? */ | 735 | /* had we spliced it onto indirect block? */ |
626 | if (where->bh) { | 736 | if (where->bh) { |
627 | /* | 737 | /* |
628 | * akpm: If we spliced it onto an indirect block, we haven't | 738 | * If we spliced it onto an indirect block, we haven't |
629 | * altered the inode. Note however that if it is being spliced | 739 | * altered the inode. Note however that if it is being spliced |
630 | * onto an indirect block at the very end of the file (the | 740 | * onto an indirect block at the very end of the file (the |
631 | * file is growing) then we *will* alter the inode to reflect | 741 | * file is growing) then we *will* alter the inode to reflect |
@@ -647,10 +757,13 @@ static int ext3_splice_branch(handle_t *handle, struct inode *inode, long block, | |||
647 | return err; | 757 | return err; |
648 | 758 | ||
649 | err_out: | 759 | err_out: |
650 | for (i = 1; i < num; i++) { | 760 | for (i = 1; i <= num; i++) { |
651 | BUFFER_TRACE(where[i].bh, "call journal_forget"); | 761 | BUFFER_TRACE(where[i].bh, "call journal_forget"); |
652 | ext3_journal_forget(handle, where[i].bh); | 762 | ext3_journal_forget(handle, where[i].bh); |
763 | ext3_free_blocks(handle,inode,le32_to_cpu(where[i-1].key),1); | ||
653 | } | 764 | } |
765 | ext3_free_blocks(handle, inode, le32_to_cpu(where[num].key), blks); | ||
766 | |||
654 | return err; | 767 | return err; |
655 | } | 768 | } |
656 | 769 | ||
@@ -666,26 +779,33 @@ err_out: | |||
666 | * allocations is needed - we simply release blocks and do not touch anything | 779 | * allocations is needed - we simply release blocks and do not touch anything |
667 | * reachable from inode. | 780 | * reachable from inode. |
668 | * | 781 | * |
669 | * akpm: `handle' can be NULL if create == 0. | 782 | * `handle' can be NULL if create == 0. |
670 | * | 783 | * |
671 | * The BKL may not be held on entry here. Be sure to take it early. | 784 | * The BKL may not be held on entry here. Be sure to take it early. |
785 | * return > 0, # of blocks mapped or allocated. | ||
786 | * return = 0, if plain lookup failed. | ||
787 | * return < 0, error case. | ||
672 | */ | 788 | */ |
673 | 789 | int ext3_get_blocks_handle(handle_t *handle, struct inode *inode, | |
674 | int | 790 | sector_t iblock, unsigned long maxblocks, |
675 | ext3_get_block_handle(handle_t *handle, struct inode *inode, sector_t iblock, | 791 | struct buffer_head *bh_result, |
676 | struct buffer_head *bh_result, int create, int extend_disksize) | 792 | int create, int extend_disksize) |
677 | { | 793 | { |
678 | int err = -EIO; | 794 | int err = -EIO; |
679 | int offsets[4]; | 795 | int offsets[4]; |
680 | Indirect chain[4]; | 796 | Indirect chain[4]; |
681 | Indirect *partial; | 797 | Indirect *partial; |
682 | unsigned long goal; | 798 | unsigned long goal; |
683 | int left; | 799 | int indirect_blks; |
684 | int boundary = 0; | 800 | int blocks_to_boundary = 0; |
685 | const int depth = ext3_block_to_path(inode, iblock, offsets, &boundary); | 801 | int depth; |
686 | struct ext3_inode_info *ei = EXT3_I(inode); | 802 | struct ext3_inode_info *ei = EXT3_I(inode); |
803 | int count = 0; | ||
804 | unsigned long first_block = 0; | ||
805 | |||
687 | 806 | ||
688 | J_ASSERT(handle != NULL || create == 0); | 807 | J_ASSERT(handle != NULL || create == 0); |
808 | depth = ext3_block_to_path(inode,iblock,offsets,&blocks_to_boundary); | ||
689 | 809 | ||
690 | if (depth == 0) | 810 | if (depth == 0) |
691 | goto out; | 811 | goto out; |
@@ -694,8 +814,31 @@ ext3_get_block_handle(handle_t *handle, struct inode *inode, sector_t iblock, | |||
694 | 814 | ||
695 | /* Simplest case - block found, no allocation needed */ | 815 | /* Simplest case - block found, no allocation needed */ |
696 | if (!partial) { | 816 | if (!partial) { |
817 | first_block = chain[depth - 1].key; | ||
697 | clear_buffer_new(bh_result); | 818 | clear_buffer_new(bh_result); |
698 | goto got_it; | 819 | count++; |
820 | /*map more blocks*/ | ||
821 | while (count < maxblocks && count <= blocks_to_boundary) { | ||
822 | if (!verify_chain(chain, partial)) { | ||
823 | /* | ||
824 | * Indirect block might be removed by | ||
825 | * truncate while we were reading it. | ||
826 | * Handling of that case: forget what we've | ||
827 | * got now. Flag the err as EAGAIN, so it | ||
828 | * will reread. | ||
829 | */ | ||
830 | err = -EAGAIN; | ||
831 | count = 0; | ||
832 | break; | ||
833 | } | ||
834 | if (le32_to_cpu(*(chain[depth-1].p+count) == | ||
835 | (first_block + count))) | ||
836 | count++; | ||
837 | else | ||
838 | break; | ||
839 | } | ||
840 | if (err != -EAGAIN) | ||
841 | goto got_it; | ||
699 | } | 842 | } |
700 | 843 | ||
701 | /* Next simple case - plain lookup or failed read of indirect block */ | 844 | /* Next simple case - plain lookup or failed read of indirect block */ |
@@ -723,6 +866,7 @@ ext3_get_block_handle(handle_t *handle, struct inode *inode, sector_t iblock, | |||
723 | } | 866 | } |
724 | partial = ext3_get_branch(inode, depth, offsets, chain, &err); | 867 | partial = ext3_get_branch(inode, depth, offsets, chain, &err); |
725 | if (!partial) { | 868 | if (!partial) { |
869 | count++; | ||
726 | mutex_unlock(&ei->truncate_mutex); | 870 | mutex_unlock(&ei->truncate_mutex); |
727 | if (err) | 871 | if (err) |
728 | goto cleanup; | 872 | goto cleanup; |
@@ -740,12 +884,19 @@ ext3_get_block_handle(handle_t *handle, struct inode *inode, sector_t iblock, | |||
740 | 884 | ||
741 | goal = ext3_find_goal(inode, iblock, chain, partial); | 885 | goal = ext3_find_goal(inode, iblock, chain, partial); |
742 | 886 | ||
743 | left = (chain + depth) - partial; | 887 | /* the number of blocks need to allocate for [d,t]indirect blocks */ |
888 | indirect_blks = (chain + depth) - partial - 1; | ||
744 | 889 | ||
745 | /* | 890 | /* |
891 | * Next look up the indirect map to count the totoal number of | ||
892 | * direct blocks to allocate for this branch. | ||
893 | */ | ||
894 | count = ext3_blks_to_allocate(partial, indirect_blks, | ||
895 | maxblocks, blocks_to_boundary); | ||
896 | /* | ||
746 | * Block out ext3_truncate while we alter the tree | 897 | * Block out ext3_truncate while we alter the tree |
747 | */ | 898 | */ |
748 | err = ext3_alloc_branch(handle, inode, left, goal, | 899 | err = ext3_alloc_branch(handle, inode, indirect_blks, &count, goal, |
749 | offsets + (partial - chain), partial); | 900 | offsets + (partial - chain), partial); |
750 | 901 | ||
751 | /* | 902 | /* |
@@ -756,8 +907,8 @@ ext3_get_block_handle(handle_t *handle, struct inode *inode, sector_t iblock, | |||
756 | * may need to return -EAGAIN upwards in the worst case. --sct | 907 | * may need to return -EAGAIN upwards in the worst case. --sct |
757 | */ | 908 | */ |
758 | if (!err) | 909 | if (!err) |
759 | err = ext3_splice_branch(handle, inode, iblock, chain, | 910 | err = ext3_splice_branch(handle, inode, iblock, |
760 | partial, left); | 911 | partial, indirect_blks, count); |
761 | /* | 912 | /* |
762 | * i_disksize growing is protected by truncate_mutex. Don't forget to | 913 | * i_disksize growing is protected by truncate_mutex. Don't forget to |
763 | * protect it if you're about to implement concurrent | 914 | * protect it if you're about to implement concurrent |
@@ -772,8 +923,9 @@ ext3_get_block_handle(handle_t *handle, struct inode *inode, sector_t iblock, | |||
772 | set_buffer_new(bh_result); | 923 | set_buffer_new(bh_result); |
773 | got_it: | 924 | got_it: |
774 | map_bh(bh_result, inode->i_sb, le32_to_cpu(chain[depth-1].key)); | 925 | map_bh(bh_result, inode->i_sb, le32_to_cpu(chain[depth-1].key)); |
775 | if (boundary) | 926 | if (blocks_to_boundary == 0) |
776 | set_buffer_boundary(bh_result); | 927 | set_buffer_boundary(bh_result); |
928 | err = count; | ||
777 | /* Clean up and exit */ | 929 | /* Clean up and exit */ |
778 | partial = chain + depth - 1; /* the whole chain */ | 930 | partial = chain + depth - 1; /* the whole chain */ |
779 | cleanup: | 931 | cleanup: |
@@ -787,34 +939,21 @@ out: | |||
787 | return err; | 939 | return err; |
788 | } | 940 | } |
789 | 941 | ||
790 | static int ext3_get_block(struct inode *inode, sector_t iblock, | ||
791 | struct buffer_head *bh_result, int create) | ||
792 | { | ||
793 | handle_t *handle = NULL; | ||
794 | int ret; | ||
795 | |||
796 | if (create) { | ||
797 | handle = ext3_journal_current_handle(); | ||
798 | J_ASSERT(handle != 0); | ||
799 | } | ||
800 | ret = ext3_get_block_handle(handle, inode, iblock, | ||
801 | bh_result, create, 1); | ||
802 | return ret; | ||
803 | } | ||
804 | |||
805 | #define DIO_CREDITS (EXT3_RESERVE_TRANS_BLOCKS + 32) | 942 | #define DIO_CREDITS (EXT3_RESERVE_TRANS_BLOCKS + 32) |
806 | 943 | ||
807 | static int | 944 | static int ext3_get_block(struct inode *inode, sector_t iblock, |
808 | ext3_direct_io_get_blocks(struct inode *inode, sector_t iblock, | 945 | struct buffer_head *bh_result, int create) |
809 | unsigned long max_blocks, struct buffer_head *bh_result, | ||
810 | int create) | ||
811 | { | 946 | { |
812 | handle_t *handle = journal_current_handle(); | 947 | handle_t *handle = journal_current_handle(); |
813 | int ret = 0; | 948 | int ret = 0; |
949 | unsigned max_blocks = bh_result->b_size >> inode->i_blkbits; | ||
814 | 950 | ||
815 | if (!handle) | 951 | if (!create) |
816 | goto get_block; /* A read */ | 952 | goto get_block; /* A read */ |
817 | 953 | ||
954 | if (max_blocks == 1) | ||
955 | goto get_block; /* A single block get */ | ||
956 | |||
818 | if (handle->h_transaction->t_state == T_LOCKED) { | 957 | if (handle->h_transaction->t_state == T_LOCKED) { |
819 | /* | 958 | /* |
820 | * Huge direct-io writes can hold off commits for long | 959 | * Huge direct-io writes can hold off commits for long |
@@ -841,18 +980,22 @@ ext3_direct_io_get_blocks(struct inode *inode, sector_t iblock, | |||
841 | } | 980 | } |
842 | 981 | ||
843 | get_block: | 982 | get_block: |
844 | if (ret == 0) | 983 | if (ret == 0) { |
845 | ret = ext3_get_block_handle(handle, inode, iblock, | 984 | ret = ext3_get_blocks_handle(handle, inode, iblock, |
846 | bh_result, create, 0); | 985 | max_blocks, bh_result, create, 0); |
847 | bh_result->b_size = (1 << inode->i_blkbits); | 986 | if (ret > 0) { |
987 | bh_result->b_size = (ret << inode->i_blkbits); | ||
988 | ret = 0; | ||
989 | } | ||
990 | } | ||
848 | return ret; | 991 | return ret; |
849 | } | 992 | } |
850 | 993 | ||
851 | /* | 994 | /* |
852 | * `handle' can be NULL if create is zero | 995 | * `handle' can be NULL if create is zero |
853 | */ | 996 | */ |
854 | struct buffer_head *ext3_getblk(handle_t *handle, struct inode * inode, | 997 | struct buffer_head *ext3_getblk(handle_t *handle, struct inode *inode, |
855 | long block, int create, int * errp) | 998 | long block, int create, int *errp) |
856 | { | 999 | { |
857 | struct buffer_head dummy; | 1000 | struct buffer_head dummy; |
858 | int fatal = 0, err; | 1001 | int fatal = 0, err; |
@@ -862,8 +1005,16 @@ struct buffer_head *ext3_getblk(handle_t *handle, struct inode * inode, | |||
862 | dummy.b_state = 0; | 1005 | dummy.b_state = 0; |
863 | dummy.b_blocknr = -1000; | 1006 | dummy.b_blocknr = -1000; |
864 | buffer_trace_init(&dummy.b_history); | 1007 | buffer_trace_init(&dummy.b_history); |
865 | *errp = ext3_get_block_handle(handle, inode, block, &dummy, create, 1); | 1008 | err = ext3_get_blocks_handle(handle, inode, block, 1, |
866 | if (!*errp && buffer_mapped(&dummy)) { | 1009 | &dummy, create, 1); |
1010 | if (err == 1) { | ||
1011 | err = 0; | ||
1012 | } else if (err >= 0) { | ||
1013 | WARN_ON(1); | ||
1014 | err = -EIO; | ||
1015 | } | ||
1016 | *errp = err; | ||
1017 | if (!err && buffer_mapped(&dummy)) { | ||
867 | struct buffer_head *bh; | 1018 | struct buffer_head *bh; |
868 | bh = sb_getblk(inode->i_sb, dummy.b_blocknr); | 1019 | bh = sb_getblk(inode->i_sb, dummy.b_blocknr); |
869 | if (!bh) { | 1020 | if (!bh) { |
@@ -874,17 +1025,18 @@ struct buffer_head *ext3_getblk(handle_t *handle, struct inode * inode, | |||
874 | J_ASSERT(create != 0); | 1025 | J_ASSERT(create != 0); |
875 | J_ASSERT(handle != 0); | 1026 | J_ASSERT(handle != 0); |
876 | 1027 | ||
877 | /* Now that we do not always journal data, we | 1028 | /* |
878 | should keep in mind whether this should | 1029 | * Now that we do not always journal data, we should |
879 | always journal the new buffer as metadata. | 1030 | * keep in mind whether this should always journal the |
880 | For now, regular file writes use | 1031 | * new buffer as metadata. For now, regular file |
881 | ext3_get_block instead, so it's not a | 1032 | * writes use ext3_get_block instead, so it's not a |
882 | problem. */ | 1033 | * problem. |
1034 | */ | ||
883 | lock_buffer(bh); | 1035 | lock_buffer(bh); |
884 | BUFFER_TRACE(bh, "call get_create_access"); | 1036 | BUFFER_TRACE(bh, "call get_create_access"); |
885 | fatal = ext3_journal_get_create_access(handle, bh); | 1037 | fatal = ext3_journal_get_create_access(handle, bh); |
886 | if (!fatal && !buffer_uptodate(bh)) { | 1038 | if (!fatal && !buffer_uptodate(bh)) { |
887 | memset(bh->b_data, 0, inode->i_sb->s_blocksize); | 1039 | memset(bh->b_data,0,inode->i_sb->s_blocksize); |
888 | set_buffer_uptodate(bh); | 1040 | set_buffer_uptodate(bh); |
889 | } | 1041 | } |
890 | unlock_buffer(bh); | 1042 | unlock_buffer(bh); |
@@ -906,7 +1058,7 @@ err: | |||
906 | return NULL; | 1058 | return NULL; |
907 | } | 1059 | } |
908 | 1060 | ||
909 | struct buffer_head *ext3_bread(handle_t *handle, struct inode * inode, | 1061 | struct buffer_head *ext3_bread(handle_t *handle, struct inode *inode, |
910 | int block, int create, int *err) | 1062 | int block, int create, int *err) |
911 | { | 1063 | { |
912 | struct buffer_head * bh; | 1064 | struct buffer_head * bh; |
@@ -982,9 +1134,8 @@ static int walk_page_buffers( handle_t *handle, | |||
982 | * is elevated. We'll still have enough credits for the tiny quotafile | 1134 | * is elevated. We'll still have enough credits for the tiny quotafile |
983 | * write. | 1135 | * write. |
984 | */ | 1136 | */ |
985 | 1137 | static int do_journal_get_write_access(handle_t *handle, | |
986 | static int do_journal_get_write_access(handle_t *handle, | 1138 | struct buffer_head *bh) |
987 | struct buffer_head *bh) | ||
988 | { | 1139 | { |
989 | if (!buffer_mapped(bh) || buffer_freed(bh)) | 1140 | if (!buffer_mapped(bh) || buffer_freed(bh)) |
990 | return 0; | 1141 | return 0; |
@@ -1025,8 +1176,7 @@ out: | |||
1025 | return ret; | 1176 | return ret; |
1026 | } | 1177 | } |
1027 | 1178 | ||
1028 | int | 1179 | int ext3_journal_dirty_data(handle_t *handle, struct buffer_head *bh) |
1029 | ext3_journal_dirty_data(handle_t *handle, struct buffer_head *bh) | ||
1030 | { | 1180 | { |
1031 | int err = journal_dirty_data(handle, bh); | 1181 | int err = journal_dirty_data(handle, bh); |
1032 | if (err) | 1182 | if (err) |
@@ -1051,7 +1201,6 @@ static int commit_write_fn(handle_t *handle, struct buffer_head *bh) | |||
1051 | * ext3 never places buffers on inode->i_mapping->private_list. metadata | 1201 | * ext3 never places buffers on inode->i_mapping->private_list. metadata |
1052 | * buffers are managed internally. | 1202 | * buffers are managed internally. |
1053 | */ | 1203 | */ |
1054 | |||
1055 | static int ext3_ordered_commit_write(struct file *file, struct page *page, | 1204 | static int ext3_ordered_commit_write(struct file *file, struct page *page, |
1056 | unsigned from, unsigned to) | 1205 | unsigned from, unsigned to) |
1057 | { | 1206 | { |
@@ -1261,7 +1410,7 @@ static int journal_dirty_data_fn(handle_t *handle, struct buffer_head *bh) | |||
1261 | * we don't need to open a transaction here. | 1410 | * we don't need to open a transaction here. |
1262 | */ | 1411 | */ |
1263 | static int ext3_ordered_writepage(struct page *page, | 1412 | static int ext3_ordered_writepage(struct page *page, |
1264 | struct writeback_control *wbc) | 1413 | struct writeback_control *wbc) |
1265 | { | 1414 | { |
1266 | struct inode *inode = page->mapping->host; | 1415 | struct inode *inode = page->mapping->host; |
1267 | struct buffer_head *page_bufs; | 1416 | struct buffer_head *page_bufs; |
@@ -1430,7 +1579,7 @@ ext3_readpages(struct file *file, struct address_space *mapping, | |||
1430 | return mpage_readpages(mapping, pages, nr_pages, ext3_get_block); | 1579 | return mpage_readpages(mapping, pages, nr_pages, ext3_get_block); |
1431 | } | 1580 | } |
1432 | 1581 | ||
1433 | static int ext3_invalidatepage(struct page *page, unsigned long offset) | 1582 | static void ext3_invalidatepage(struct page *page, unsigned long offset) |
1434 | { | 1583 | { |
1435 | journal_t *journal = EXT3_JOURNAL(page->mapping->host); | 1584 | journal_t *journal = EXT3_JOURNAL(page->mapping->host); |
1436 | 1585 | ||
@@ -1440,7 +1589,7 @@ static int ext3_invalidatepage(struct page *page, unsigned long offset) | |||
1440 | if (offset == 0) | 1589 | if (offset == 0) |
1441 | ClearPageChecked(page); | 1590 | ClearPageChecked(page); |
1442 | 1591 | ||
1443 | return journal_invalidatepage(journal, page, offset); | 1592 | journal_invalidatepage(journal, page, offset); |
1444 | } | 1593 | } |
1445 | 1594 | ||
1446 | static int ext3_releasepage(struct page *page, gfp_t wait) | 1595 | static int ext3_releasepage(struct page *page, gfp_t wait) |
@@ -1492,11 +1641,10 @@ static ssize_t ext3_direct_IO(int rw, struct kiocb *iocb, | |||
1492 | 1641 | ||
1493 | ret = blockdev_direct_IO(rw, iocb, inode, inode->i_sb->s_bdev, iov, | 1642 | ret = blockdev_direct_IO(rw, iocb, inode, inode->i_sb->s_bdev, iov, |
1494 | offset, nr_segs, | 1643 | offset, nr_segs, |
1495 | ext3_direct_io_get_blocks, NULL); | 1644 | ext3_get_block, NULL); |
1496 | 1645 | ||
1497 | /* | 1646 | /* |
1498 | * Reacquire the handle: ext3_direct_io_get_block() can restart the | 1647 | * Reacquire the handle: ext3_get_block() can restart the transaction |
1499 | * transaction | ||
1500 | */ | 1648 | */ |
1501 | handle = journal_current_handle(); | 1649 | handle = journal_current_handle(); |
1502 | 1650 | ||
@@ -1752,11 +1900,8 @@ static inline int all_zeroes(__le32 *p, __le32 *q) | |||
1752 | * c) free the subtrees growing from the inode past the @chain[0]. | 1900 | * c) free the subtrees growing from the inode past the @chain[0]. |
1753 | * (no partially truncated stuff there). */ | 1901 | * (no partially truncated stuff there). */ |
1754 | 1902 | ||
1755 | static Indirect *ext3_find_shared(struct inode *inode, | 1903 | static Indirect *ext3_find_shared(struct inode *inode, int depth, |
1756 | int depth, | 1904 | int offsets[4], Indirect chain[4], __le32 *top) |
1757 | int offsets[4], | ||
1758 | Indirect chain[4], | ||
1759 | __le32 *top) | ||
1760 | { | 1905 | { |
1761 | Indirect *partial, *p; | 1906 | Indirect *partial, *p; |
1762 | int k, err; | 1907 | int k, err; |
@@ -1795,8 +1940,7 @@ static Indirect *ext3_find_shared(struct inode *inode, | |||
1795 | } | 1940 | } |
1796 | /* Writer: end */ | 1941 | /* Writer: end */ |
1797 | 1942 | ||
1798 | while(partial > p) | 1943 | while(partial > p) { |
1799 | { | ||
1800 | brelse(partial->bh); | 1944 | brelse(partial->bh); |
1801 | partial--; | 1945 | partial--; |
1802 | } | 1946 | } |
@@ -1812,10 +1956,9 @@ no_top: | |||
1812 | * We release `count' blocks on disk, but (last - first) may be greater | 1956 | * We release `count' blocks on disk, but (last - first) may be greater |
1813 | * than `count' because there can be holes in there. | 1957 | * than `count' because there can be holes in there. |
1814 | */ | 1958 | */ |
1815 | static void | 1959 | static void ext3_clear_blocks(handle_t *handle, struct inode *inode, |
1816 | ext3_clear_blocks(handle_t *handle, struct inode *inode, struct buffer_head *bh, | 1960 | struct buffer_head *bh, unsigned long block_to_free, |
1817 | unsigned long block_to_free, unsigned long count, | 1961 | unsigned long count, __le32 *first, __le32 *last) |
1818 | __le32 *first, __le32 *last) | ||
1819 | { | 1962 | { |
1820 | __le32 *p; | 1963 | __le32 *p; |
1821 | if (try_to_extend_transaction(handle, inode)) { | 1964 | if (try_to_extend_transaction(handle, inode)) { |
@@ -2076,8 +2219,7 @@ static void ext3_free_branches(handle_t *handle, struct inode *inode, | |||
2076 | * that's fine - as long as they are linked from the inode, the post-crash | 2219 | * that's fine - as long as they are linked from the inode, the post-crash |
2077 | * ext3_truncate() run will find them and release them. | 2220 | * ext3_truncate() run will find them and release them. |
2078 | */ | 2221 | */ |
2079 | 2222 | void ext3_truncate(struct inode *inode) | |
2080 | void ext3_truncate(struct inode * inode) | ||
2081 | { | 2223 | { |
2082 | handle_t *handle; | 2224 | handle_t *handle; |
2083 | struct ext3_inode_info *ei = EXT3_I(inode); | 2225 | struct ext3_inode_info *ei = EXT3_I(inode); |
@@ -2201,29 +2343,26 @@ void ext3_truncate(struct inode * inode) | |||
2201 | do_indirects: | 2343 | do_indirects: |
2202 | /* Kill the remaining (whole) subtrees */ | 2344 | /* Kill the remaining (whole) subtrees */ |
2203 | switch (offsets[0]) { | 2345 | switch (offsets[0]) { |
2204 | default: | 2346 | default: |
2205 | nr = i_data[EXT3_IND_BLOCK]; | 2347 | nr = i_data[EXT3_IND_BLOCK]; |
2206 | if (nr) { | 2348 | if (nr) { |
2207 | ext3_free_branches(handle, inode, NULL, | 2349 | ext3_free_branches(handle, inode, NULL, &nr, &nr+1, 1); |
2208 | &nr, &nr+1, 1); | 2350 | i_data[EXT3_IND_BLOCK] = 0; |
2209 | i_data[EXT3_IND_BLOCK] = 0; | 2351 | } |
2210 | } | 2352 | case EXT3_IND_BLOCK: |
2211 | case EXT3_IND_BLOCK: | 2353 | nr = i_data[EXT3_DIND_BLOCK]; |
2212 | nr = i_data[EXT3_DIND_BLOCK]; | 2354 | if (nr) { |
2213 | if (nr) { | 2355 | ext3_free_branches(handle, inode, NULL, &nr, &nr+1, 2); |
2214 | ext3_free_branches(handle, inode, NULL, | 2356 | i_data[EXT3_DIND_BLOCK] = 0; |
2215 | &nr, &nr+1, 2); | 2357 | } |
2216 | i_data[EXT3_DIND_BLOCK] = 0; | 2358 | case EXT3_DIND_BLOCK: |
2217 | } | 2359 | nr = i_data[EXT3_TIND_BLOCK]; |
2218 | case EXT3_DIND_BLOCK: | 2360 | if (nr) { |
2219 | nr = i_data[EXT3_TIND_BLOCK]; | 2361 | ext3_free_branches(handle, inode, NULL, &nr, &nr+1, 3); |
2220 | if (nr) { | 2362 | i_data[EXT3_TIND_BLOCK] = 0; |
2221 | ext3_free_branches(handle, inode, NULL, | 2363 | } |
2222 | &nr, &nr+1, 3); | 2364 | case EXT3_TIND_BLOCK: |
2223 | i_data[EXT3_TIND_BLOCK] = 0; | 2365 | ; |
2224 | } | ||
2225 | case EXT3_TIND_BLOCK: | ||
2226 | ; | ||
2227 | } | 2366 | } |
2228 | 2367 | ||
2229 | ext3_discard_reservation(inode); | 2368 | ext3_discard_reservation(inode); |
@@ -2232,8 +2371,10 @@ do_indirects: | |||
2232 | inode->i_mtime = inode->i_ctime = CURRENT_TIME_SEC; | 2371 | inode->i_mtime = inode->i_ctime = CURRENT_TIME_SEC; |
2233 | ext3_mark_inode_dirty(handle, inode); | 2372 | ext3_mark_inode_dirty(handle, inode); |
2234 | 2373 | ||
2235 | /* In a multi-transaction truncate, we only make the final | 2374 | /* |
2236 | * transaction synchronous */ | 2375 | * In a multi-transaction truncate, we only make the final transaction |
2376 | * synchronous | ||
2377 | */ | ||
2237 | if (IS_SYNC(inode)) | 2378 | if (IS_SYNC(inode)) |
2238 | handle->h_sync = 1; | 2379 | handle->h_sync = 1; |
2239 | out_stop: | 2380 | out_stop: |
@@ -2259,20 +2400,16 @@ static unsigned long ext3_get_inode_block(struct super_block *sb, | |||
2259 | struct ext3_group_desc * gdp; | 2400 | struct ext3_group_desc * gdp; |
2260 | 2401 | ||
2261 | 2402 | ||
2262 | if ((ino != EXT3_ROOT_INO && | 2403 | if ((ino != EXT3_ROOT_INO && ino != EXT3_JOURNAL_INO && |
2263 | ino != EXT3_JOURNAL_INO && | 2404 | ino != EXT3_RESIZE_INO && ino < EXT3_FIRST_INO(sb)) || |
2264 | ino != EXT3_RESIZE_INO && | 2405 | ino > le32_to_cpu(EXT3_SB(sb)->s_es->s_inodes_count)) { |
2265 | ino < EXT3_FIRST_INO(sb)) || | 2406 | ext3_error(sb, "ext3_get_inode_block", |
2266 | ino > le32_to_cpu( | ||
2267 | EXT3_SB(sb)->s_es->s_inodes_count)) { | ||
2268 | ext3_error (sb, "ext3_get_inode_block", | ||
2269 | "bad inode number: %lu", ino); | 2407 | "bad inode number: %lu", ino); |
2270 | return 0; | 2408 | return 0; |
2271 | } | 2409 | } |
2272 | block_group = (ino - 1) / EXT3_INODES_PER_GROUP(sb); | 2410 | block_group = (ino - 1) / EXT3_INODES_PER_GROUP(sb); |
2273 | if (block_group >= EXT3_SB(sb)->s_groups_count) { | 2411 | if (block_group >= EXT3_SB(sb)->s_groups_count) { |
2274 | ext3_error (sb, "ext3_get_inode_block", | 2412 | ext3_error(sb,"ext3_get_inode_block","group >= groups count"); |
2275 | "group >= groups count"); | ||
2276 | return 0; | 2413 | return 0; |
2277 | } | 2414 | } |
2278 | smp_rmb(); | 2415 | smp_rmb(); |
@@ -2285,7 +2422,7 @@ static unsigned long ext3_get_inode_block(struct super_block *sb, | |||
2285 | return 0; | 2422 | return 0; |
2286 | } | 2423 | } |
2287 | 2424 | ||
2288 | gdp = (struct ext3_group_desc *) bh->b_data; | 2425 | gdp = (struct ext3_group_desc *)bh->b_data; |
2289 | /* | 2426 | /* |
2290 | * Figure out the offset within the block group inode table | 2427 | * Figure out the offset within the block group inode table |
2291 | */ | 2428 | */ |
@@ -2834,7 +2971,7 @@ err_out: | |||
2834 | 2971 | ||
2835 | 2972 | ||
2836 | /* | 2973 | /* |
2837 | * akpm: how many blocks doth make a writepage()? | 2974 | * How many blocks doth make a writepage()? |
2838 | * | 2975 | * |
2839 | * With N blocks per page, it may be: | 2976 | * With N blocks per page, it may be: |
2840 | * N data blocks | 2977 | * N data blocks |
@@ -2924,8 +3061,8 @@ ext3_reserve_inode_write(handle_t *handle, struct inode *inode, | |||
2924 | } | 3061 | } |
2925 | 3062 | ||
2926 | /* | 3063 | /* |
2927 | * akpm: What we do here is to mark the in-core inode as clean | 3064 | * What we do here is to mark the in-core inode as clean with respect to inode |
2928 | * with respect to inode dirtiness (it may still be data-dirty). | 3065 | * dirtiness (it may still be data-dirty). |
2929 | * This means that the in-core inode may be reaped by prune_icache | 3066 | * This means that the in-core inode may be reaped by prune_icache |
2930 | * without having to perform any I/O. This is a very good thing, | 3067 | * without having to perform any I/O. This is a very good thing, |
2931 | * because *any* task may call prune_icache - even ones which | 3068 | * because *any* task may call prune_icache - even ones which |
@@ -2957,7 +3094,7 @@ int ext3_mark_inode_dirty(handle_t *handle, struct inode *inode) | |||
2957 | } | 3094 | } |
2958 | 3095 | ||
2959 | /* | 3096 | /* |
2960 | * akpm: ext3_dirty_inode() is called from __mark_inode_dirty() | 3097 | * ext3_dirty_inode() is called from __mark_inode_dirty() |
2961 | * | 3098 | * |
2962 | * We're really interested in the case where a file is being extended. | 3099 | * We're really interested in the case where a file is being extended. |
2963 | * i_size has been changed by generic_commit_write() and we thus need | 3100 | * i_size has been changed by generic_commit_write() and we thus need |
@@ -2993,7 +3130,7 @@ out: | |||
2993 | return; | 3130 | return; |
2994 | } | 3131 | } |
2995 | 3132 | ||
2996 | #ifdef AKPM | 3133 | #if 0 |
2997 | /* | 3134 | /* |
2998 | * Bind an inode's backing buffer_head into this transaction, to prevent | 3135 | * Bind an inode's backing buffer_head into this transaction, to prevent |
2999 | * it from being flushed to disk early. Unlike | 3136 | * it from being flushed to disk early. Unlike |
@@ -3001,8 +3138,7 @@ out: | |||
3001 | * returns no iloc structure, so the caller needs to repeat the iloc | 3138 | * returns no iloc structure, so the caller needs to repeat the iloc |
3002 | * lookup to mark the inode dirty later. | 3139 | * lookup to mark the inode dirty later. |
3003 | */ | 3140 | */ |
3004 | static inline int | 3141 | static int ext3_pin_inode(handle_t *handle, struct inode *inode) |
3005 | ext3_pin_inode(handle_t *handle, struct inode *inode) | ||
3006 | { | 3142 | { |
3007 | struct ext3_iloc iloc; | 3143 | struct ext3_iloc iloc; |
3008 | 3144 | ||
diff --git a/fs/ext3/super.c b/fs/ext3/super.c index 86e443182de4..f8a5266ea1ff 100644 --- a/fs/ext3/super.c +++ b/fs/ext3/super.c | |||
@@ -1678,12 +1678,6 @@ static int ext3_fill_super (struct super_block *sb, void *data, int silent) | |||
1678 | } | 1678 | } |
1679 | 1679 | ||
1680 | if (test_opt(sb, NOBH)) { | 1680 | if (test_opt(sb, NOBH)) { |
1681 | if (sb->s_blocksize_bits != PAGE_CACHE_SHIFT) { | ||
1682 | printk(KERN_WARNING "EXT3-fs: Ignoring nobh option " | ||
1683 | "since filesystem blocksize doesn't match " | ||
1684 | "pagesize\n"); | ||
1685 | clear_opt(sbi->s_mount_opt, NOBH); | ||
1686 | } | ||
1687 | if (!(test_opt(sb, DATA_FLAGS) == EXT3_MOUNT_WRITEBACK_DATA)) { | 1681 | if (!(test_opt(sb, DATA_FLAGS) == EXT3_MOUNT_WRITEBACK_DATA)) { |
1688 | printk(KERN_WARNING "EXT3-fs: Ignoring nobh option - " | 1682 | printk(KERN_WARNING "EXT3-fs: Ignoring nobh option - " |
1689 | "its supported only with writeback mode\n"); | 1683 | "its supported only with writeback mode\n"); |