aboutsummaryrefslogtreecommitdiffstats
path: root/fs/nilfs2/btree.c
diff options
context:
space:
mode:
authorRyusuke Konishi <konishi.ryusuke@lab.ntt.co.jp>2009-05-23 14:25:44 -0400
committerRyusuke Konishi <konishi.ryusuke@lab.ntt.co.jp>2009-06-10 10:41:10 -0400
commitd4b961576df2769b936bd967b01e8c607c3c9ad8 (patch)
tree3d5d31cb7b4ba31a5b8232d0cd0edac2cf368fae /fs/nilfs2/btree.c
parent3033342a0b76048e32ce1faebfa85cf8f1aa93b5 (diff)
nilfs2: remove bmap pointer operations
Previously, the bmap codes of nilfs used three types of function tables. The abuse of indirect function calls decreased source readability and suffered many indirect jumps which would confuse branch prediction of processors. This eliminates one type of the function tables, nilfs_bmap_ptr_operations, which was used to dispatch low level pointer operations of the nilfs bmap. This adds a new integer variable "b_ptr_type" to nilfs_bmap struct, and uses the value to select the pointer operations. Signed-off-by: Ryusuke Konishi <konishi.ryusuke@lab.ntt.co.jp>
Diffstat (limited to 'fs/nilfs2/btree.c')
-rw-r--r--fs/nilfs2/btree.c100
1 files changed, 46 insertions, 54 deletions
diff --git a/fs/nilfs2/btree.c b/fs/nilfs2/btree.c
index f5a0ec64e1a..20e3fd0f4d4 100644
--- a/fs/nilfs2/btree.c
+++ b/fs/nilfs2/btree.c
@@ -917,8 +917,8 @@ static int nilfs_btree_prepare_insert(struct nilfs_btree *btree,
917 path[level].bp_newreq.bpr_ptr = 917 path[level].bp_newreq.bpr_ptr =
918 btree->bt_ops->btop_find_target(btree, path, key); 918 btree->bt_ops->btop_find_target(btree, path, key);
919 919
920 ret = btree->bt_bmap.b_pops->bpop_prepare_alloc_ptr( 920 ret = nilfs_bmap_prepare_alloc_ptr(&btree->bt_bmap,
921 &btree->bt_bmap, &path[level].bp_newreq); 921 &path[level].bp_newreq);
922 if (ret < 0) 922 if (ret < 0)
923 goto err_out_data; 923 goto err_out_data;
924 924
@@ -976,8 +976,8 @@ static int nilfs_btree_prepare_insert(struct nilfs_btree *btree,
976 /* split */ 976 /* split */
977 path[level].bp_newreq.bpr_ptr = 977 path[level].bp_newreq.bpr_ptr =
978 path[level - 1].bp_newreq.bpr_ptr + 1; 978 path[level - 1].bp_newreq.bpr_ptr + 1;
979 ret = btree->bt_bmap.b_pops->bpop_prepare_alloc_ptr( 979 ret = nilfs_bmap_prepare_alloc_ptr(&btree->bt_bmap,
980 &btree->bt_bmap, &path[level].bp_newreq); 980 &path[level].bp_newreq);
981 if (ret < 0) 981 if (ret < 0)
982 goto err_out_child_node; 982 goto err_out_child_node;
983 ret = nilfs_btree_get_new_block(btree, 983 ret = nilfs_btree_get_new_block(btree,
@@ -1008,8 +1008,8 @@ static int nilfs_btree_prepare_insert(struct nilfs_btree *btree,
1008 1008
1009 /* grow */ 1009 /* grow */
1010 path[level].bp_newreq.bpr_ptr = path[level - 1].bp_newreq.bpr_ptr + 1; 1010 path[level].bp_newreq.bpr_ptr = path[level - 1].bp_newreq.bpr_ptr + 1;
1011 ret = btree->bt_bmap.b_pops->bpop_prepare_alloc_ptr( 1011 ret = nilfs_bmap_prepare_alloc_ptr(&btree->bt_bmap,
1012 &btree->bt_bmap, &path[level].bp_newreq); 1012 &path[level].bp_newreq);
1013 if (ret < 0) 1013 if (ret < 0)
1014 goto err_out_child_node; 1014 goto err_out_child_node;
1015 ret = nilfs_btree_get_new_block(btree, path[level].bp_newreq.bpr_ptr, 1015 ret = nilfs_btree_get_new_block(btree, path[level].bp_newreq.bpr_ptr,
@@ -1037,18 +1037,16 @@ static int nilfs_btree_prepare_insert(struct nilfs_btree *btree,
1037 1037
1038 /* error */ 1038 /* error */
1039 err_out_curr_node: 1039 err_out_curr_node:
1040 btree->bt_bmap.b_pops->bpop_abort_alloc_ptr(&btree->bt_bmap, 1040 nilfs_bmap_abort_alloc_ptr(&btree->bt_bmap, &path[level].bp_newreq);
1041 &path[level].bp_newreq);
1042 err_out_child_node: 1041 err_out_child_node:
1043 for (level--; level > NILFS_BTREE_LEVEL_DATA; level--) { 1042 for (level--; level > NILFS_BTREE_LEVEL_DATA; level--) {
1044 nilfs_btnode_delete(path[level].bp_sib_bh); 1043 nilfs_btnode_delete(path[level].bp_sib_bh);
1045 btree->bt_bmap.b_pops->bpop_abort_alloc_ptr( 1044 nilfs_bmap_abort_alloc_ptr(&btree->bt_bmap,
1046 &btree->bt_bmap, &path[level].bp_newreq); 1045 &path[level].bp_newreq);
1047 1046
1048 } 1047 }
1049 1048
1050 btree->bt_bmap.b_pops->bpop_abort_alloc_ptr(&btree->bt_bmap, 1049 nilfs_bmap_abort_alloc_ptr(&btree->bt_bmap, &path[level].bp_newreq);
1051 &path[level].bp_newreq);
1052 err_out_data: 1050 err_out_data:
1053 *levelp = level; 1051 *levelp = level;
1054 stats->bs_nblocks = 0; 1052 stats->bs_nblocks = 0;
@@ -1067,8 +1065,8 @@ static void nilfs_btree_commit_insert(struct nilfs_btree *btree,
1067 btree->bt_ops->btop_set_target(btree, key, ptr); 1065 btree->bt_ops->btop_set_target(btree, key, ptr);
1068 1066
1069 for (level = NILFS_BTREE_LEVEL_NODE_MIN; level <= maxlevel; level++) { 1067 for (level = NILFS_BTREE_LEVEL_NODE_MIN; level <= maxlevel; level++) {
1070 btree->bt_bmap.b_pops->bpop_commit_alloc_ptr( 1068 nilfs_bmap_commit_alloc_ptr(&btree->bt_bmap,
1071 &btree->bt_bmap, &path[level - 1].bp_newreq); 1069 &path[level - 1].bp_newreq);
1072 path[level].bp_op(btree, path, level, &key, &ptr); 1070 path[level].bp_op(btree, path, level, &key, &ptr);
1073 } 1071 }
1074 1072
@@ -1312,12 +1310,10 @@ static int nilfs_btree_prepare_delete(struct nilfs_btree *btree,
1312 path[level].bp_oldreq.bpr_ptr = 1310 path[level].bp_oldreq.bpr_ptr =
1313 nilfs_btree_node_get_ptr(btree, node, 1311 nilfs_btree_node_get_ptr(btree, node,
1314 path[level].bp_index); 1312 path[level].bp_index);
1315 if (btree->bt_bmap.b_pops->bpop_prepare_end_ptr != NULL) { 1313 ret = nilfs_bmap_prepare_end_ptr(&btree->bt_bmap,
1316 ret = btree->bt_bmap.b_pops->bpop_prepare_end_ptr( 1314 &path[level].bp_oldreq);
1317 &btree->bt_bmap, &path[level].bp_oldreq); 1315 if (ret < 0)
1318 if (ret < 0) 1316 goto err_out_child_node;
1319 goto err_out_child_node;
1320 }
1321 1317
1322 if (nilfs_btree_node_get_nchildren(btree, node) > 1318 if (nilfs_btree_node_get_nchildren(btree, node) >
1323 nilfs_btree_node_nchildren_min(btree, node)) { 1319 nilfs_btree_node_nchildren_min(btree, node)) {
@@ -1391,12 +1387,12 @@ static int nilfs_btree_prepare_delete(struct nilfs_btree *btree,
1391 node = nilfs_btree_get_root(btree); 1387 node = nilfs_btree_get_root(btree);
1392 path[level].bp_oldreq.bpr_ptr = 1388 path[level].bp_oldreq.bpr_ptr =
1393 nilfs_btree_node_get_ptr(btree, node, path[level].bp_index); 1389 nilfs_btree_node_get_ptr(btree, node, path[level].bp_index);
1394 if (btree->bt_bmap.b_pops->bpop_prepare_end_ptr != NULL) { 1390
1395 ret = btree->bt_bmap.b_pops->bpop_prepare_end_ptr( 1391 ret = nilfs_bmap_prepare_end_ptr(&btree->bt_bmap,
1396 &btree->bt_bmap, &path[level].bp_oldreq); 1392 &path[level].bp_oldreq);
1397 if (ret < 0) 1393 if (ret < 0)
1398 goto err_out_child_node; 1394 goto err_out_child_node;
1399 } 1395
1400 /* child of the root node is deleted */ 1396 /* child of the root node is deleted */
1401 path[level].bp_op = nilfs_btree_do_delete; 1397 path[level].bp_op = nilfs_btree_do_delete;
1402 stats->bs_nblocks++; 1398 stats->bs_nblocks++;
@@ -1408,15 +1404,12 @@ static int nilfs_btree_prepare_delete(struct nilfs_btree *btree,
1408 1404
1409 /* error */ 1405 /* error */
1410 err_out_curr_node: 1406 err_out_curr_node:
1411 if (btree->bt_bmap.b_pops->bpop_abort_end_ptr != NULL) 1407 nilfs_bmap_abort_end_ptr(&btree->bt_bmap, &path[level].bp_oldreq);
1412 btree->bt_bmap.b_pops->bpop_abort_end_ptr(
1413 &btree->bt_bmap, &path[level].bp_oldreq);
1414 err_out_child_node: 1408 err_out_child_node:
1415 for (level--; level >= NILFS_BTREE_LEVEL_NODE_MIN; level--) { 1409 for (level--; level >= NILFS_BTREE_LEVEL_NODE_MIN; level--) {
1416 brelse(path[level].bp_sib_bh); 1410 brelse(path[level].bp_sib_bh);
1417 if (btree->bt_bmap.b_pops->bpop_abort_end_ptr != NULL) 1411 nilfs_bmap_abort_end_ptr(&btree->bt_bmap,
1418 btree->bt_bmap.b_pops->bpop_abort_end_ptr( 1412 &path[level].bp_oldreq);
1419 &btree->bt_bmap, &path[level].bp_oldreq);
1420 } 1413 }
1421 *levelp = level; 1414 *levelp = level;
1422 stats->bs_nblocks = 0; 1415 stats->bs_nblocks = 0;
@@ -1430,9 +1423,8 @@ static void nilfs_btree_commit_delete(struct nilfs_btree *btree,
1430 int level; 1423 int level;
1431 1424
1432 for (level = NILFS_BTREE_LEVEL_NODE_MIN; level <= maxlevel; level++) { 1425 for (level = NILFS_BTREE_LEVEL_NODE_MIN; level <= maxlevel; level++) {
1433 if (btree->bt_bmap.b_pops->bpop_commit_end_ptr != NULL) 1426 nilfs_bmap_commit_end_ptr(&btree->bt_bmap,
1434 btree->bt_bmap.b_pops->bpop_commit_end_ptr( 1427 &path[level].bp_oldreq);
1435 &btree->bt_bmap, &path[level].bp_oldreq);
1436 path[level].bp_op(btree, path, level, NULL, NULL); 1428 path[level].bp_op(btree, path, level, NULL, NULL);
1437 } 1429 }
1438 1430
@@ -1597,7 +1589,7 @@ nilfs_btree_prepare_convert_and_insert(struct nilfs_bmap *bmap, __u64 key,
1597 if (btree->bt_ops->btop_find_target != NULL) 1589 if (btree->bt_ops->btop_find_target != NULL)
1598 dreq->bpr_ptr 1590 dreq->bpr_ptr
1599 = btree->bt_ops->btop_find_target(btree, NULL, key); 1591 = btree->bt_ops->btop_find_target(btree, NULL, key);
1600 ret = bmap->b_pops->bpop_prepare_alloc_ptr(bmap, dreq); 1592 ret = nilfs_bmap_prepare_alloc_ptr(bmap, dreq);
1601 if (ret < 0) 1593 if (ret < 0)
1602 return ret; 1594 return ret;
1603 1595
@@ -1605,7 +1597,7 @@ nilfs_btree_prepare_convert_and_insert(struct nilfs_bmap *bmap, __u64 key,
1605 stats->bs_nblocks++; 1597 stats->bs_nblocks++;
1606 if (nreq != NULL) { 1598 if (nreq != NULL) {
1607 nreq->bpr_ptr = dreq->bpr_ptr + 1; 1599 nreq->bpr_ptr = dreq->bpr_ptr + 1;
1608 ret = bmap->b_pops->bpop_prepare_alloc_ptr(bmap, nreq); 1600 ret = nilfs_bmap_prepare_alloc_ptr(bmap, nreq);
1609 if (ret < 0) 1601 if (ret < 0)
1610 goto err_out_dreq; 1602 goto err_out_dreq;
1611 1603
@@ -1622,9 +1614,9 @@ nilfs_btree_prepare_convert_and_insert(struct nilfs_bmap *bmap, __u64 key,
1622 1614
1623 /* error */ 1615 /* error */
1624 err_out_nreq: 1616 err_out_nreq:
1625 bmap->b_pops->bpop_abort_alloc_ptr(bmap, nreq); 1617 nilfs_bmap_abort_alloc_ptr(bmap, nreq);
1626 err_out_dreq: 1618 err_out_dreq:
1627 bmap->b_pops->bpop_abort_alloc_ptr(bmap, dreq); 1619 nilfs_bmap_abort_alloc_ptr(bmap, dreq);
1628 stats->bs_nblocks = 0; 1620 stats->bs_nblocks = 0;
1629 return ret; 1621 return ret;
1630 1622
@@ -1654,8 +1646,8 @@ nilfs_btree_commit_convert_and_insert(struct nilfs_bmap *bmap,
1654 btree = (struct nilfs_btree *)bmap; 1646 btree = (struct nilfs_btree *)bmap;
1655 nilfs_btree_init(bmap); 1647 nilfs_btree_init(bmap);
1656 if (nreq != NULL) { 1648 if (nreq != NULL) {
1657 bmap->b_pops->bpop_commit_alloc_ptr(bmap, dreq); 1649 nilfs_bmap_commit_alloc_ptr(bmap, dreq);
1658 bmap->b_pops->bpop_commit_alloc_ptr(bmap, nreq); 1650 nilfs_bmap_commit_alloc_ptr(bmap, nreq);
1659 1651
1660 /* create child node at level 1 */ 1652 /* create child node at level 1 */
1661 lock_buffer(bh); 1653 lock_buffer(bh);
@@ -1677,7 +1669,7 @@ nilfs_btree_commit_convert_and_insert(struct nilfs_bmap *bmap,
1677 nilfs_btree_node_init(btree, node, NILFS_BTREE_NODE_ROOT, 1669 nilfs_btree_node_init(btree, node, NILFS_BTREE_NODE_ROOT,
1678 2, 1, &keys[0], &tmpptr); 1670 2, 1, &keys[0], &tmpptr);
1679 } else { 1671 } else {
1680 bmap->b_pops->bpop_commit_alloc_ptr(bmap, dreq); 1672 nilfs_bmap_commit_alloc_ptr(bmap, dreq);
1681 1673
1682 /* create root node at level 1 */ 1674 /* create root node at level 1 */
1683 node = nilfs_btree_get_root(btree); 1675 node = nilfs_btree_get_root(btree);
@@ -1758,9 +1750,9 @@ static int nilfs_btree_prepare_update_v(struct nilfs_btree *btree,
1758 nilfs_btree_node_get_ptr(btree, parent, 1750 nilfs_btree_node_get_ptr(btree, parent,
1759 path[level + 1].bp_index); 1751 path[level + 1].bp_index);
1760 path[level].bp_newreq.bpr_ptr = path[level].bp_oldreq.bpr_ptr + 1; 1752 path[level].bp_newreq.bpr_ptr = path[level].bp_oldreq.bpr_ptr + 1;
1761 ret = nilfs_bmap_prepare_update(&btree->bt_bmap, 1753 ret = nilfs_bmap_prepare_update_v(&btree->bt_bmap,
1762 &path[level].bp_oldreq, 1754 &path[level].bp_oldreq,
1763 &path[level].bp_newreq); 1755 &path[level].bp_newreq);
1764 if (ret < 0) 1756 if (ret < 0)
1765 return ret; 1757 return ret;
1766 1758
@@ -1772,9 +1764,9 @@ static int nilfs_btree_prepare_update_v(struct nilfs_btree *btree,
1772 &NILFS_BMAP_I(&btree->bt_bmap)->i_btnode_cache, 1764 &NILFS_BMAP_I(&btree->bt_bmap)->i_btnode_cache,
1773 &path[level].bp_ctxt); 1765 &path[level].bp_ctxt);
1774 if (ret < 0) { 1766 if (ret < 0) {
1775 nilfs_bmap_abort_update(&btree->bt_bmap, 1767 nilfs_bmap_abort_update_v(&btree->bt_bmap,
1776 &path[level].bp_oldreq, 1768 &path[level].bp_oldreq,
1777 &path[level].bp_newreq); 1769 &path[level].bp_newreq);
1778 return ret; 1770 return ret;
1779 } 1771 }
1780 } 1772 }
@@ -1788,9 +1780,9 @@ static void nilfs_btree_commit_update_v(struct nilfs_btree *btree,
1788{ 1780{
1789 struct nilfs_btree_node *parent; 1781 struct nilfs_btree_node *parent;
1790 1782
1791 nilfs_bmap_commit_update(&btree->bt_bmap, 1783 nilfs_bmap_commit_update_v(&btree->bt_bmap,
1792 &path[level].bp_oldreq, 1784 &path[level].bp_oldreq,
1793 &path[level].bp_newreq); 1785 &path[level].bp_newreq);
1794 1786
1795 if (buffer_nilfs_node(path[level].bp_bh)) { 1787 if (buffer_nilfs_node(path[level].bp_bh)) {
1796 nilfs_btnode_commit_change_key( 1788 nilfs_btnode_commit_change_key(
@@ -1809,9 +1801,9 @@ static void nilfs_btree_abort_update_v(struct nilfs_btree *btree,
1809 struct nilfs_btree_path *path, 1801 struct nilfs_btree_path *path,
1810 int level) 1802 int level)
1811{ 1803{
1812 nilfs_bmap_abort_update(&btree->bt_bmap, 1804 nilfs_bmap_abort_update_v(&btree->bt_bmap,
1813 &path[level].bp_oldreq, 1805 &path[level].bp_oldreq,
1814 &path[level].bp_newreq); 1806 &path[level].bp_newreq);
1815 if (buffer_nilfs_node(path[level].bp_bh)) 1807 if (buffer_nilfs_node(path[level].bp_bh))
1816 nilfs_btnode_abort_change_key( 1808 nilfs_btnode_abort_change_key(
1817 &NILFS_BMAP_I(&btree->bt_bmap)->i_btnode_cache, 1809 &NILFS_BMAP_I(&btree->bt_bmap)->i_btnode_cache,