diff options
author | Ryusuke Konishi <konishi.ryusuke@lab.ntt.co.jp> | 2009-05-21 12:07:13 -0400 |
---|---|---|
committer | Ryusuke Konishi <konishi.ryusuke@lab.ntt.co.jp> | 2009-06-10 10:41:10 -0400 |
commit | f198dbb9cf580c09644ebdf46846115c6daff14e (patch) | |
tree | 0792f2044153e35bea1c2b93d69ba8045cd86551 | |
parent | 9f098900ad34edfe3bcc2498cfa372f588b96c62 (diff) |
nilfs2: move get block functions in bmap.c into btree codes
Two get block function for btree nodes, nilfs_bmap_get_block() and
nilfs_bmap_get_new_block(), are called only from the btree codes.
This relocation will increase opportunities of compiler optimization.
Signed-off-by: Ryusuke Konishi <konishi.ryusuke@lab.ntt.co.jp>
-rw-r--r-- | fs/nilfs2/bmap.c | 20 | ||||
-rw-r--r-- | fs/nilfs2/bmap.h | 6 | ||||
-rw-r--r-- | fs/nilfs2/btree.c | 57 |
3 files changed, 35 insertions, 48 deletions
diff --git a/fs/nilfs2/bmap.c b/fs/nilfs2/bmap.c index d7dbedfbd2b4..cf5c5d2cfa00 100644 --- a/fs/nilfs2/bmap.c +++ b/fs/nilfs2/bmap.c | |||
@@ -417,26 +417,6 @@ void nilfs_bmap_sub_blocks(const struct nilfs_bmap *bmap, int n) | |||
417 | mark_inode_dirty(bmap->b_inode); | 417 | mark_inode_dirty(bmap->b_inode); |
418 | } | 418 | } |
419 | 419 | ||
420 | int nilfs_bmap_get_block(const struct nilfs_bmap *bmap, __u64 ptr, | ||
421 | struct buffer_head **bhp) | ||
422 | { | ||
423 | return nilfs_btnode_get(&NILFS_BMAP_I(bmap)->i_btnode_cache, | ||
424 | ptr, 0, bhp, 0); | ||
425 | } | ||
426 | |||
427 | int nilfs_bmap_get_new_block(const struct nilfs_bmap *bmap, __u64 ptr, | ||
428 | struct buffer_head **bhp) | ||
429 | { | ||
430 | int ret; | ||
431 | |||
432 | ret = nilfs_btnode_get(&NILFS_BMAP_I(bmap)->i_btnode_cache, | ||
433 | ptr, 0, bhp, 1); | ||
434 | if (ret < 0) | ||
435 | return ret; | ||
436 | set_buffer_nilfs_volatile(*bhp); | ||
437 | return 0; | ||
438 | } | ||
439 | |||
440 | __u64 nilfs_bmap_data_get_key(const struct nilfs_bmap *bmap, | 420 | __u64 nilfs_bmap_data_get_key(const struct nilfs_bmap *bmap, |
441 | const struct buffer_head *bh) | 421 | const struct buffer_head *bh) |
442 | { | 422 | { |
diff --git a/fs/nilfs2/bmap.h b/fs/nilfs2/bmap.h index 0af3190931ec..978073bf5650 100644 --- a/fs/nilfs2/bmap.h +++ b/fs/nilfs2/bmap.h | |||
@@ -202,12 +202,6 @@ void nilfs_bmap_add_blocks(const struct nilfs_bmap *, int); | |||
202 | void nilfs_bmap_sub_blocks(const struct nilfs_bmap *, int); | 202 | void nilfs_bmap_sub_blocks(const struct nilfs_bmap *, int); |
203 | 203 | ||
204 | 204 | ||
205 | int nilfs_bmap_get_block(const struct nilfs_bmap *, __u64, | ||
206 | struct buffer_head **); | ||
207 | int nilfs_bmap_get_new_block(const struct nilfs_bmap *, __u64, | ||
208 | struct buffer_head **); | ||
209 | |||
210 | |||
211 | /* Assume that bmap semaphore is locked. */ | 205 | /* Assume that bmap semaphore is locked. */ |
212 | static inline int nilfs_bmap_dirty(const struct nilfs_bmap *bmap) | 206 | static inline int nilfs_bmap_dirty(const struct nilfs_bmap *bmap) |
213 | { | 207 | { |
diff --git a/fs/nilfs2/btree.c b/fs/nilfs2/btree.c index 61e70d719b4d..63ee35080fbc 100644 --- a/fs/nilfs2/btree.c +++ b/fs/nilfs2/btree.c | |||
@@ -122,10 +122,29 @@ static void nilfs_btree_clear_path(const struct nilfs_btree *btree, | |||
122 | } | 122 | } |
123 | } | 123 | } |
124 | 124 | ||
125 | |||
126 | /* | 125 | /* |
127 | * B-tree node operations | 126 | * B-tree node operations |
128 | */ | 127 | */ |
128 | static int nilfs_btree_get_block(const struct nilfs_btree *btree, __u64 ptr, | ||
129 | struct buffer_head **bhp) | ||
130 | { | ||
131 | struct address_space *btnc = | ||
132 | &NILFS_BMAP_I((struct nilfs_bmap *)btree)->i_btnode_cache; | ||
133 | return nilfs_btnode_get(btnc, ptr, 0, bhp, 0); | ||
134 | } | ||
135 | |||
136 | static int nilfs_btree_get_new_block(const struct nilfs_btree *btree, | ||
137 | __u64 ptr, struct buffer_head **bhp) | ||
138 | { | ||
139 | struct address_space *btnc = | ||
140 | &NILFS_BMAP_I((struct nilfs_bmap *)btree)->i_btnode_cache; | ||
141 | int ret; | ||
142 | |||
143 | ret = nilfs_btnode_get(btnc, ptr, 0, bhp, 1); | ||
144 | if (!ret) | ||
145 | set_buffer_nilfs_volatile(*bhp); | ||
146 | return ret; | ||
147 | } | ||
129 | 148 | ||
130 | static inline int | 149 | static inline int |
131 | nilfs_btree_node_get_flags(const struct nilfs_btree *btree, | 150 | nilfs_btree_node_get_flags(const struct nilfs_btree *btree, |
@@ -487,8 +506,7 @@ static int nilfs_btree_do_lookup(const struct nilfs_btree *btree, | |||
487 | path[level].bp_index = index; | 506 | path[level].bp_index = index; |
488 | 507 | ||
489 | for (level--; level >= minlevel; level--) { | 508 | for (level--; level >= minlevel; level--) { |
490 | ret = nilfs_bmap_get_block(&btree->bt_bmap, ptr, | 509 | ret = nilfs_btree_get_block(btree, ptr, &path[level].bp_bh); |
491 | &path[level].bp_bh); | ||
492 | if (ret < 0) | 510 | if (ret < 0) |
493 | return ret; | 511 | return ret; |
494 | node = nilfs_btree_get_nonroot_node(btree, path, level); | 512 | node = nilfs_btree_get_nonroot_node(btree, path, level); |
@@ -534,8 +552,7 @@ static int nilfs_btree_do_lookup_last(const struct nilfs_btree *btree, | |||
534 | path[level].bp_index = index; | 552 | path[level].bp_index = index; |
535 | 553 | ||
536 | for (level--; level > 0; level--) { | 554 | for (level--; level > 0; level--) { |
537 | ret = nilfs_bmap_get_block(&btree->bt_bmap, ptr, | 555 | ret = nilfs_btree_get_block(btree, ptr, &path[level].bp_bh); |
538 | &path[level].bp_bh); | ||
539 | if (ret < 0) | 556 | if (ret < 0) |
540 | return ret; | 557 | return ret; |
541 | node = nilfs_btree_get_nonroot_node(btree, path, level); | 558 | node = nilfs_btree_get_nonroot_node(btree, path, level); |
@@ -923,8 +940,7 @@ static int nilfs_btree_prepare_insert(struct nilfs_btree *btree, | |||
923 | if (pindex > 0) { | 940 | if (pindex > 0) { |
924 | sibptr = nilfs_btree_node_get_ptr(btree, parent, | 941 | sibptr = nilfs_btree_node_get_ptr(btree, parent, |
925 | pindex - 1); | 942 | pindex - 1); |
926 | ret = nilfs_bmap_get_block(&btree->bt_bmap, sibptr, | 943 | ret = nilfs_btree_get_block(btree, sibptr, &bh); |
927 | &bh); | ||
928 | if (ret < 0) | 944 | if (ret < 0) |
929 | goto err_out_child_node; | 945 | goto err_out_child_node; |
930 | sib = (struct nilfs_btree_node *)bh->b_data; | 946 | sib = (struct nilfs_btree_node *)bh->b_data; |
@@ -943,8 +959,7 @@ static int nilfs_btree_prepare_insert(struct nilfs_btree *btree, | |||
943 | nilfs_btree_node_get_nchildren(btree, parent) - 1) { | 959 | nilfs_btree_node_get_nchildren(btree, parent) - 1) { |
944 | sibptr = nilfs_btree_node_get_ptr(btree, parent, | 960 | sibptr = nilfs_btree_node_get_ptr(btree, parent, |
945 | pindex + 1); | 961 | pindex + 1); |
946 | ret = nilfs_bmap_get_block(&btree->bt_bmap, sibptr, | 962 | ret = nilfs_btree_get_block(btree, sibptr, &bh); |
947 | &bh); | ||
948 | if (ret < 0) | 963 | if (ret < 0) |
949 | goto err_out_child_node; | 964 | goto err_out_child_node; |
950 | sib = (struct nilfs_btree_node *)bh->b_data; | 965 | sib = (struct nilfs_btree_node *)bh->b_data; |
@@ -965,9 +980,9 @@ static int nilfs_btree_prepare_insert(struct nilfs_btree *btree, | |||
965 | &btree->bt_bmap, &path[level].bp_newreq); | 980 | &btree->bt_bmap, &path[level].bp_newreq); |
966 | if (ret < 0) | 981 | if (ret < 0) |
967 | goto err_out_child_node; | 982 | goto err_out_child_node; |
968 | ret = nilfs_bmap_get_new_block(&btree->bt_bmap, | 983 | ret = nilfs_btree_get_new_block(btree, |
969 | path[level].bp_newreq.bpr_ptr, | 984 | path[level].bp_newreq.bpr_ptr, |
970 | &bh); | 985 | &bh); |
971 | if (ret < 0) | 986 | if (ret < 0) |
972 | goto err_out_curr_node; | 987 | goto err_out_curr_node; |
973 | 988 | ||
@@ -997,8 +1012,8 @@ static int nilfs_btree_prepare_insert(struct nilfs_btree *btree, | |||
997 | &btree->bt_bmap, &path[level].bp_newreq); | 1012 | &btree->bt_bmap, &path[level].bp_newreq); |
998 | if (ret < 0) | 1013 | if (ret < 0) |
999 | goto err_out_child_node; | 1014 | goto err_out_child_node; |
1000 | ret = nilfs_bmap_get_new_block(&btree->bt_bmap, | 1015 | ret = nilfs_btree_get_new_block(btree, path[level].bp_newreq.bpr_ptr, |
1001 | path[level].bp_newreq.bpr_ptr, &bh); | 1016 | &bh); |
1002 | if (ret < 0) | 1017 | if (ret < 0) |
1003 | goto err_out_curr_node; | 1018 | goto err_out_curr_node; |
1004 | 1019 | ||
@@ -1320,8 +1335,7 @@ static int nilfs_btree_prepare_delete(struct nilfs_btree *btree, | |||
1320 | /* left sibling */ | 1335 | /* left sibling */ |
1321 | sibptr = nilfs_btree_node_get_ptr(btree, parent, | 1336 | sibptr = nilfs_btree_node_get_ptr(btree, parent, |
1322 | pindex - 1); | 1337 | pindex - 1); |
1323 | ret = nilfs_bmap_get_block(&btree->bt_bmap, sibptr, | 1338 | ret = nilfs_btree_get_block(btree, sibptr, &bh); |
1324 | &bh); | ||
1325 | if (ret < 0) | 1339 | if (ret < 0) |
1326 | goto err_out_curr_node; | 1340 | goto err_out_curr_node; |
1327 | sib = (struct nilfs_btree_node *)bh->b_data; | 1341 | sib = (struct nilfs_btree_node *)bh->b_data; |
@@ -1342,8 +1356,7 @@ static int nilfs_btree_prepare_delete(struct nilfs_btree *btree, | |||
1342 | /* right sibling */ | 1356 | /* right sibling */ |
1343 | sibptr = nilfs_btree_node_get_ptr(btree, parent, | 1357 | sibptr = nilfs_btree_node_get_ptr(btree, parent, |
1344 | pindex + 1); | 1358 | pindex + 1); |
1345 | ret = nilfs_bmap_get_block(&btree->bt_bmap, sibptr, | 1359 | ret = nilfs_btree_get_block(btree, sibptr, &bh); |
1346 | &bh); | ||
1347 | if (ret < 0) | 1360 | if (ret < 0) |
1348 | goto err_out_curr_node; | 1361 | goto err_out_curr_node; |
1349 | sib = (struct nilfs_btree_node *)bh->b_data; | 1362 | sib = (struct nilfs_btree_node *)bh->b_data; |
@@ -1500,7 +1513,7 @@ static int nilfs_btree_check_delete(struct nilfs_bmap *bmap, __u64 key) | |||
1500 | if (nchildren > 1) | 1513 | if (nchildren > 1) |
1501 | return 0; | 1514 | return 0; |
1502 | ptr = nilfs_btree_node_get_ptr(btree, root, nchildren - 1); | 1515 | ptr = nilfs_btree_node_get_ptr(btree, root, nchildren - 1); |
1503 | ret = nilfs_bmap_get_block(bmap, ptr, &bh); | 1516 | ret = nilfs_btree_get_block(btree, ptr, &bh); |
1504 | if (ret < 0) | 1517 | if (ret < 0) |
1505 | return ret; | 1518 | return ret; |
1506 | node = (struct nilfs_btree_node *)bh->b_data; | 1519 | node = (struct nilfs_btree_node *)bh->b_data; |
@@ -1541,7 +1554,7 @@ static int nilfs_btree_gather_data(struct nilfs_bmap *bmap, | |||
1541 | nchildren = nilfs_btree_node_get_nchildren(btree, root); | 1554 | nchildren = nilfs_btree_node_get_nchildren(btree, root); |
1542 | WARN_ON(nchildren > 1); | 1555 | WARN_ON(nchildren > 1); |
1543 | ptr = nilfs_btree_node_get_ptr(btree, root, nchildren - 1); | 1556 | ptr = nilfs_btree_node_get_ptr(btree, root, nchildren - 1); |
1544 | ret = nilfs_bmap_get_block(bmap, ptr, &bh); | 1557 | ret = nilfs_btree_get_block(btree, ptr, &bh); |
1545 | if (ret < 0) | 1558 | if (ret < 0) |
1546 | return ret; | 1559 | return ret; |
1547 | node = (struct nilfs_btree_node *)bh->b_data; | 1560 | node = (struct nilfs_btree_node *)bh->b_data; |
@@ -1598,7 +1611,7 @@ nilfs_btree_prepare_convert_and_insert(struct nilfs_bmap *bmap, __u64 key, | |||
1598 | if (ret < 0) | 1611 | if (ret < 0) |
1599 | goto err_out_dreq; | 1612 | goto err_out_dreq; |
1600 | 1613 | ||
1601 | ret = nilfs_bmap_get_new_block(bmap, nreq->bpr_ptr, &bh); | 1614 | ret = nilfs_btree_get_new_block(btree, nreq->bpr_ptr, &bh); |
1602 | if (ret < 0) | 1615 | if (ret < 0) |
1603 | goto err_out_nreq; | 1616 | goto err_out_nreq; |
1604 | 1617 | ||
@@ -2167,7 +2180,7 @@ static int nilfs_btree_mark(struct nilfs_bmap *bmap, __u64 key, int level) | |||
2167 | WARN_ON(ret == -ENOENT); | 2180 | WARN_ON(ret == -ENOENT); |
2168 | goto out; | 2181 | goto out; |
2169 | } | 2182 | } |
2170 | ret = nilfs_bmap_get_block(&btree->bt_bmap, ptr, &bh); | 2183 | ret = nilfs_btree_get_block(btree, ptr, &bh); |
2171 | if (ret < 0) { | 2184 | if (ret < 0) { |
2172 | WARN_ON(ret == -ENOENT); | 2185 | WARN_ON(ret == -ENOENT); |
2173 | goto out; | 2186 | goto out; |