aboutsummaryrefslogtreecommitdiffstats
path: root/fs/nilfs2/btree.c
diff options
context:
space:
mode:
authorRyusuke Konishi <konishi.ryusuke@lab.ntt.co.jp>2009-08-15 02:34:33 -0400
committerRyusuke Konishi <konishi.ryusuke@lab.ntt.co.jp>2009-09-14 05:27:16 -0400
commit2e0c2c73923fed27337039ddfd69985e6c4b91fe (patch)
treea6c4a318776f5804f53994acad7254c7404418f0 /fs/nilfs2/btree.c
parentbd8169efae8bdd292675c386010f6b35f0771057 (diff)
nilfs2: allow btree code to directly call dat operations
The current btree code is written so that btree functions call dat operations via wrapper functions in bmap.c when they allocate, free, or modify virtual block addresses. This abstraction requires additional function calls and causes frequent call of nilfs_bmap_get_dat() function since it is used in the every wrapper function. This removes the wrapper functions and makes them available from btree.c and direct.c, which will increase the opportunity of compiler optimization. Signed-off-by: Ryusuke Konishi <konishi.ryusuke@lab.ntt.co.jp>
Diffstat (limited to 'fs/nilfs2/btree.c')
-rw-r--r--fs/nilfs2/btree.c151
1 files changed, 84 insertions, 67 deletions
diff --git a/fs/nilfs2/btree.c b/fs/nilfs2/btree.c
index 115b157d508b..e25b507a474f 100644
--- a/fs/nilfs2/btree.c
+++ b/fs/nilfs2/btree.c
@@ -940,17 +940,20 @@ static int nilfs_btree_prepare_insert(struct nilfs_btree *btree,
940 struct nilfs_btree_node *node, *parent, *sib; 940 struct nilfs_btree_node *node, *parent, *sib;
941 __u64 sibptr; 941 __u64 sibptr;
942 int pindex, level, ret; 942 int pindex, level, ret;
943 struct inode *dat = NULL;
943 944
944 stats->bs_nblocks = 0; 945 stats->bs_nblocks = 0;
945 level = NILFS_BTREE_LEVEL_DATA; 946 level = NILFS_BTREE_LEVEL_DATA;
946 947
947 /* allocate a new ptr for data block */ 948 /* allocate a new ptr for data block */
948 if (NILFS_BMAP_USE_VBN(&btree->bt_bmap)) 949 if (NILFS_BMAP_USE_VBN(&btree->bt_bmap)) {
949 path[level].bp_newreq.bpr_ptr = 950 path[level].bp_newreq.bpr_ptr =
950 nilfs_btree_find_target_v(btree, path, key); 951 nilfs_btree_find_target_v(btree, path, key);
952 dat = nilfs_bmap_get_dat(&btree->bt_bmap);
953 }
951 954
952 ret = nilfs_bmap_prepare_alloc_ptr(&btree->bt_bmap, 955 ret = nilfs_bmap_prepare_alloc_ptr(&btree->bt_bmap,
953 &path[level].bp_newreq); 956 &path[level].bp_newreq, dat);
954 if (ret < 0) 957 if (ret < 0)
955 goto err_out_data; 958 goto err_out_data;
956 959
@@ -1009,7 +1012,7 @@ static int nilfs_btree_prepare_insert(struct nilfs_btree *btree,
1009 path[level].bp_newreq.bpr_ptr = 1012 path[level].bp_newreq.bpr_ptr =
1010 path[level - 1].bp_newreq.bpr_ptr + 1; 1013 path[level - 1].bp_newreq.bpr_ptr + 1;
1011 ret = nilfs_bmap_prepare_alloc_ptr(&btree->bt_bmap, 1014 ret = nilfs_bmap_prepare_alloc_ptr(&btree->bt_bmap,
1012 &path[level].bp_newreq); 1015 &path[level].bp_newreq, dat);
1013 if (ret < 0) 1016 if (ret < 0)
1014 goto err_out_child_node; 1017 goto err_out_child_node;
1015 ret = nilfs_btree_get_new_block(btree, 1018 ret = nilfs_btree_get_new_block(btree,
@@ -1041,7 +1044,7 @@ static int nilfs_btree_prepare_insert(struct nilfs_btree *btree,
1041 /* grow */ 1044 /* grow */
1042 path[level].bp_newreq.bpr_ptr = path[level - 1].bp_newreq.bpr_ptr + 1; 1045 path[level].bp_newreq.bpr_ptr = path[level - 1].bp_newreq.bpr_ptr + 1;
1043 ret = nilfs_bmap_prepare_alloc_ptr(&btree->bt_bmap, 1046 ret = nilfs_bmap_prepare_alloc_ptr(&btree->bt_bmap,
1044 &path[level].bp_newreq); 1047 &path[level].bp_newreq, dat);
1045 if (ret < 0) 1048 if (ret < 0)
1046 goto err_out_child_node; 1049 goto err_out_child_node;
1047 ret = nilfs_btree_get_new_block(btree, path[level].bp_newreq.bpr_ptr, 1050 ret = nilfs_btree_get_new_block(btree, path[level].bp_newreq.bpr_ptr,
@@ -1069,16 +1072,18 @@ static int nilfs_btree_prepare_insert(struct nilfs_btree *btree,
1069 1072
1070 /* error */ 1073 /* error */
1071 err_out_curr_node: 1074 err_out_curr_node:
1072 nilfs_bmap_abort_alloc_ptr(&btree->bt_bmap, &path[level].bp_newreq); 1075 nilfs_bmap_abort_alloc_ptr(&btree->bt_bmap, &path[level].bp_newreq,
1076 dat);
1073 err_out_child_node: 1077 err_out_child_node:
1074 for (level--; level > NILFS_BTREE_LEVEL_DATA; level--) { 1078 for (level--; level > NILFS_BTREE_LEVEL_DATA; level--) {
1075 nilfs_btnode_delete(path[level].bp_sib_bh); 1079 nilfs_btnode_delete(path[level].bp_sib_bh);
1076 nilfs_bmap_abort_alloc_ptr(&btree->bt_bmap, 1080 nilfs_bmap_abort_alloc_ptr(&btree->bt_bmap,
1077 &path[level].bp_newreq); 1081 &path[level].bp_newreq, dat);
1078 1082
1079 } 1083 }
1080 1084
1081 nilfs_bmap_abort_alloc_ptr(&btree->bt_bmap, &path[level].bp_newreq); 1085 nilfs_bmap_abort_alloc_ptr(&btree->bt_bmap, &path[level].bp_newreq,
1086 dat);
1082 err_out_data: 1087 err_out_data:
1083 *levelp = level; 1088 *levelp = level;
1084 stats->bs_nblocks = 0; 1089 stats->bs_nblocks = 0;
@@ -1089,16 +1094,19 @@ static void nilfs_btree_commit_insert(struct nilfs_btree *btree,
1089 struct nilfs_btree_path *path, 1094 struct nilfs_btree_path *path,
1090 int maxlevel, __u64 key, __u64 ptr) 1095 int maxlevel, __u64 key, __u64 ptr)
1091{ 1096{
1097 struct inode *dat = NULL;
1092 int level; 1098 int level;
1093 1099
1094 set_buffer_nilfs_volatile((struct buffer_head *)((unsigned long)ptr)); 1100 set_buffer_nilfs_volatile((struct buffer_head *)((unsigned long)ptr));
1095 ptr = path[NILFS_BTREE_LEVEL_DATA].bp_newreq.bpr_ptr; 1101 ptr = path[NILFS_BTREE_LEVEL_DATA].bp_newreq.bpr_ptr;
1096 if (NILFS_BMAP_USE_VBN(&btree->bt_bmap)) 1102 if (NILFS_BMAP_USE_VBN(&btree->bt_bmap)) {
1097 nilfs_btree_set_target_v(btree, key, ptr); 1103 nilfs_btree_set_target_v(btree, key, ptr);
1104 dat = nilfs_bmap_get_dat(&btree->bt_bmap);
1105 }
1098 1106
1099 for (level = NILFS_BTREE_LEVEL_NODE_MIN; level <= maxlevel; level++) { 1107 for (level = NILFS_BTREE_LEVEL_NODE_MIN; level <= maxlevel; level++) {
1100 nilfs_bmap_commit_alloc_ptr(&btree->bt_bmap, 1108 nilfs_bmap_commit_alloc_ptr(&btree->bt_bmap,
1101 &path[level - 1].bp_newreq); 1109 &path[level - 1].bp_newreq, dat);
1102 path[level].bp_op(btree, path, level, &key, &ptr); 1110 path[level].bp_op(btree, path, level, &key, &ptr);
1103 } 1111 }
1104 1112
@@ -1326,7 +1334,8 @@ static void nilfs_btree_shrink(struct nilfs_btree *btree,
1326static int nilfs_btree_prepare_delete(struct nilfs_btree *btree, 1334static int nilfs_btree_prepare_delete(struct nilfs_btree *btree,
1327 struct nilfs_btree_path *path, 1335 struct nilfs_btree_path *path,
1328 int *levelp, 1336 int *levelp,
1329 struct nilfs_bmap_stats *stats) 1337 struct nilfs_bmap_stats *stats,
1338 struct inode *dat)
1330{ 1339{
1331 struct buffer_head *bh; 1340 struct buffer_head *bh;
1332 struct nilfs_btree_node *node, *parent, *sib; 1341 struct nilfs_btree_node *node, *parent, *sib;
@@ -1343,7 +1352,7 @@ static int nilfs_btree_prepare_delete(struct nilfs_btree *btree,
1343 nilfs_btree_node_get_ptr(btree, node, 1352 nilfs_btree_node_get_ptr(btree, node,
1344 path[level].bp_index); 1353 path[level].bp_index);
1345 ret = nilfs_bmap_prepare_end_ptr(&btree->bt_bmap, 1354 ret = nilfs_bmap_prepare_end_ptr(&btree->bt_bmap,
1346 &path[level].bp_oldreq); 1355 &path[level].bp_oldreq, dat);
1347 if (ret < 0) 1356 if (ret < 0)
1348 goto err_out_child_node; 1357 goto err_out_child_node;
1349 1358
@@ -1421,7 +1430,7 @@ static int nilfs_btree_prepare_delete(struct nilfs_btree *btree,
1421 nilfs_btree_node_get_ptr(btree, node, path[level].bp_index); 1430 nilfs_btree_node_get_ptr(btree, node, path[level].bp_index);
1422 1431
1423 ret = nilfs_bmap_prepare_end_ptr(&btree->bt_bmap, 1432 ret = nilfs_bmap_prepare_end_ptr(&btree->bt_bmap,
1424 &path[level].bp_oldreq); 1433 &path[level].bp_oldreq, dat);
1425 if (ret < 0) 1434 if (ret < 0)
1426 goto err_out_child_node; 1435 goto err_out_child_node;
1427 1436
@@ -1436,12 +1445,12 @@ static int nilfs_btree_prepare_delete(struct nilfs_btree *btree,
1436 1445
1437 /* error */ 1446 /* error */
1438 err_out_curr_node: 1447 err_out_curr_node:
1439 nilfs_bmap_abort_end_ptr(&btree->bt_bmap, &path[level].bp_oldreq); 1448 nilfs_bmap_abort_end_ptr(&btree->bt_bmap, &path[level].bp_oldreq, dat);
1440 err_out_child_node: 1449 err_out_child_node:
1441 for (level--; level >= NILFS_BTREE_LEVEL_NODE_MIN; level--) { 1450 for (level--; level >= NILFS_BTREE_LEVEL_NODE_MIN; level--) {
1442 brelse(path[level].bp_sib_bh); 1451 brelse(path[level].bp_sib_bh);
1443 nilfs_bmap_abort_end_ptr(&btree->bt_bmap, 1452 nilfs_bmap_abort_end_ptr(&btree->bt_bmap,
1444 &path[level].bp_oldreq); 1453 &path[level].bp_oldreq, dat);
1445 } 1454 }
1446 *levelp = level; 1455 *levelp = level;
1447 stats->bs_nblocks = 0; 1456 stats->bs_nblocks = 0;
@@ -1450,13 +1459,13 @@ static int nilfs_btree_prepare_delete(struct nilfs_btree *btree,
1450 1459
1451static void nilfs_btree_commit_delete(struct nilfs_btree *btree, 1460static void nilfs_btree_commit_delete(struct nilfs_btree *btree,
1452 struct nilfs_btree_path *path, 1461 struct nilfs_btree_path *path,
1453 int maxlevel) 1462 int maxlevel, struct inode *dat)
1454{ 1463{
1455 int level; 1464 int level;
1456 1465
1457 for (level = NILFS_BTREE_LEVEL_NODE_MIN; level <= maxlevel; level++) { 1466 for (level = NILFS_BTREE_LEVEL_NODE_MIN; level <= maxlevel; level++) {
1458 nilfs_bmap_commit_end_ptr(&btree->bt_bmap, 1467 nilfs_bmap_commit_end_ptr(&btree->bt_bmap,
1459 &path[level].bp_oldreq); 1468 &path[level].bp_oldreq, dat);
1460 path[level].bp_op(btree, path, level, NULL, NULL); 1469 path[level].bp_op(btree, path, level, NULL, NULL);
1461 } 1470 }
1462 1471
@@ -1470,6 +1479,7 @@ static int nilfs_btree_delete(struct nilfs_bmap *bmap, __u64 key)
1470 struct nilfs_btree *btree; 1479 struct nilfs_btree *btree;
1471 struct nilfs_btree_path *path; 1480 struct nilfs_btree_path *path;
1472 struct nilfs_bmap_stats stats; 1481 struct nilfs_bmap_stats stats;
1482 struct inode *dat;
1473 int level, ret; 1483 int level, ret;
1474 1484
1475 btree = (struct nilfs_btree *)bmap; 1485 btree = (struct nilfs_btree *)bmap;
@@ -1482,10 +1492,14 @@ static int nilfs_btree_delete(struct nilfs_bmap *bmap, __u64 key)
1482 if (ret < 0) 1492 if (ret < 0)
1483 goto out; 1493 goto out;
1484 1494
1485 ret = nilfs_btree_prepare_delete(btree, path, &level, &stats); 1495
1496 dat = NILFS_BMAP_USE_VBN(&btree->bt_bmap) ?
1497 nilfs_bmap_get_dat(&btree->bt_bmap) : NULL;
1498
1499 ret = nilfs_btree_prepare_delete(btree, path, &level, &stats, dat);
1486 if (ret < 0) 1500 if (ret < 0)
1487 goto out; 1501 goto out;
1488 nilfs_btree_commit_delete(btree, path, level); 1502 nilfs_btree_commit_delete(btree, path, level, dat);
1489 nilfs_bmap_sub_blocks(bmap, stats.bs_nblocks); 1503 nilfs_bmap_sub_blocks(bmap, stats.bs_nblocks);
1490 1504
1491out: 1505out:
@@ -1610,18 +1624,20 @@ nilfs_btree_prepare_convert_and_insert(struct nilfs_bmap *bmap, __u64 key,
1610 struct nilfs_bmap_stats *stats) 1624 struct nilfs_bmap_stats *stats)
1611{ 1625{
1612 struct buffer_head *bh; 1626 struct buffer_head *bh;
1613 struct nilfs_btree *btree; 1627 struct nilfs_btree *btree = (struct nilfs_btree *)bmap;
1628 struct inode *dat = NULL;
1614 int ret; 1629 int ret;
1615 1630
1616 btree = (struct nilfs_btree *)bmap;
1617 stats->bs_nblocks = 0; 1631 stats->bs_nblocks = 0;
1618 1632
1619 /* for data */ 1633 /* for data */
1620 /* cannot find near ptr */ 1634 /* cannot find near ptr */
1621 if (NILFS_BMAP_USE_VBN(bmap)) 1635 if (NILFS_BMAP_USE_VBN(bmap)) {
1622 dreq->bpr_ptr = nilfs_btree_find_target_v(btree, NULL, key); 1636 dreq->bpr_ptr = nilfs_btree_find_target_v(btree, NULL, key);
1637 dat = nilfs_bmap_get_dat(bmap);
1638 }
1623 1639
1624 ret = nilfs_bmap_prepare_alloc_ptr(bmap, dreq); 1640 ret = nilfs_bmap_prepare_alloc_ptr(bmap, dreq, dat);
1625 if (ret < 0) 1641 if (ret < 0)
1626 return ret; 1642 return ret;
1627 1643
@@ -1629,7 +1645,7 @@ nilfs_btree_prepare_convert_and_insert(struct nilfs_bmap *bmap, __u64 key,
1629 stats->bs_nblocks++; 1645 stats->bs_nblocks++;
1630 if (nreq != NULL) { 1646 if (nreq != NULL) {
1631 nreq->bpr_ptr = dreq->bpr_ptr + 1; 1647 nreq->bpr_ptr = dreq->bpr_ptr + 1;
1632 ret = nilfs_bmap_prepare_alloc_ptr(bmap, nreq); 1648 ret = nilfs_bmap_prepare_alloc_ptr(bmap, nreq, dat);
1633 if (ret < 0) 1649 if (ret < 0)
1634 goto err_out_dreq; 1650 goto err_out_dreq;
1635 1651
@@ -1646,9 +1662,9 @@ nilfs_btree_prepare_convert_and_insert(struct nilfs_bmap *bmap, __u64 key,
1646 1662
1647 /* error */ 1663 /* error */
1648 err_out_nreq: 1664 err_out_nreq:
1649 nilfs_bmap_abort_alloc_ptr(bmap, nreq); 1665 nilfs_bmap_abort_alloc_ptr(bmap, nreq, dat);
1650 err_out_dreq: 1666 err_out_dreq:
1651 nilfs_bmap_abort_alloc_ptr(bmap, dreq); 1667 nilfs_bmap_abort_alloc_ptr(bmap, dreq, dat);
1652 stats->bs_nblocks = 0; 1668 stats->bs_nblocks = 0;
1653 return ret; 1669 return ret;
1654 1670
@@ -1663,8 +1679,9 @@ nilfs_btree_commit_convert_and_insert(struct nilfs_bmap *bmap,
1663 union nilfs_bmap_ptr_req *nreq, 1679 union nilfs_bmap_ptr_req *nreq,
1664 struct buffer_head *bh) 1680 struct buffer_head *bh)
1665{ 1681{
1666 struct nilfs_btree *btree; 1682 struct nilfs_btree *btree = (struct nilfs_btree *)bmap;
1667 struct nilfs_btree_node *node; 1683 struct nilfs_btree_node *node;
1684 struct inode *dat;
1668 __u64 tmpptr; 1685 __u64 tmpptr;
1669 1686
1670 /* free resources */ 1687 /* free resources */
@@ -1675,11 +1692,11 @@ nilfs_btree_commit_convert_and_insert(struct nilfs_bmap *bmap,
1675 set_buffer_nilfs_volatile((struct buffer_head *)((unsigned long)ptr)); 1692 set_buffer_nilfs_volatile((struct buffer_head *)((unsigned long)ptr));
1676 1693
1677 /* convert and insert */ 1694 /* convert and insert */
1678 btree = (struct nilfs_btree *)bmap; 1695 dat = NILFS_BMAP_USE_VBN(bmap) ? nilfs_bmap_get_dat(bmap) : NULL;
1679 nilfs_btree_init(bmap); 1696 nilfs_btree_init(bmap);
1680 if (nreq != NULL) { 1697 if (nreq != NULL) {
1681 nilfs_bmap_commit_alloc_ptr(bmap, dreq); 1698 nilfs_bmap_commit_alloc_ptr(bmap, dreq, dat);
1682 nilfs_bmap_commit_alloc_ptr(bmap, nreq); 1699 nilfs_bmap_commit_alloc_ptr(bmap, nreq, dat);
1683 1700
1684 /* create child node at level 1 */ 1701 /* create child node at level 1 */
1685 lock_buffer(bh); 1702 lock_buffer(bh);
@@ -1701,7 +1718,7 @@ nilfs_btree_commit_convert_and_insert(struct nilfs_bmap *bmap,
1701 nilfs_btree_node_init(btree, node, NILFS_BTREE_NODE_ROOT, 1718 nilfs_btree_node_init(btree, node, NILFS_BTREE_NODE_ROOT,
1702 2, 1, &keys[0], &tmpptr); 1719 2, 1, &keys[0], &tmpptr);
1703 } else { 1720 } else {
1704 nilfs_bmap_commit_alloc_ptr(bmap, dreq); 1721 nilfs_bmap_commit_alloc_ptr(bmap, dreq, dat);
1705 1722
1706 /* create root node at level 1 */ 1723 /* create root node at level 1 */
1707 node = nilfs_btree_get_root(btree); 1724 node = nilfs_btree_get_root(btree);
@@ -1772,7 +1789,7 @@ static int nilfs_btree_propagate_p(struct nilfs_btree *btree,
1772 1789
1773static int nilfs_btree_prepare_update_v(struct nilfs_btree *btree, 1790static int nilfs_btree_prepare_update_v(struct nilfs_btree *btree,
1774 struct nilfs_btree_path *path, 1791 struct nilfs_btree_path *path,
1775 int level) 1792 int level, struct inode *dat)
1776{ 1793{
1777 struct nilfs_btree_node *parent; 1794 struct nilfs_btree_node *parent;
1778 int ret; 1795 int ret;
@@ -1782,9 +1799,8 @@ static int nilfs_btree_prepare_update_v(struct nilfs_btree *btree,
1782 nilfs_btree_node_get_ptr(btree, parent, 1799 nilfs_btree_node_get_ptr(btree, parent,
1783 path[level + 1].bp_index); 1800 path[level + 1].bp_index);
1784 path[level].bp_newreq.bpr_ptr = path[level].bp_oldreq.bpr_ptr + 1; 1801 path[level].bp_newreq.bpr_ptr = path[level].bp_oldreq.bpr_ptr + 1;
1785 ret = nilfs_bmap_prepare_update_v(&btree->bt_bmap, 1802 ret = nilfs_dat_prepare_update(dat, &path[level].bp_oldreq.bpr_req,
1786 &path[level].bp_oldreq, 1803 &path[level].bp_newreq.bpr_req);
1787 &path[level].bp_newreq);
1788 if (ret < 0) 1804 if (ret < 0)
1789 return ret; 1805 return ret;
1790 1806
@@ -1796,9 +1812,9 @@ static int nilfs_btree_prepare_update_v(struct nilfs_btree *btree,
1796 &NILFS_BMAP_I(&btree->bt_bmap)->i_btnode_cache, 1812 &NILFS_BMAP_I(&btree->bt_bmap)->i_btnode_cache,
1797 &path[level].bp_ctxt); 1813 &path[level].bp_ctxt);
1798 if (ret < 0) { 1814 if (ret < 0) {
1799 nilfs_bmap_abort_update_v(&btree->bt_bmap, 1815 nilfs_dat_abort_update(dat,
1800 &path[level].bp_oldreq, 1816 &path[level].bp_oldreq.bpr_req,
1801 &path[level].bp_newreq); 1817 &path[level].bp_newreq.bpr_req);
1802 return ret; 1818 return ret;
1803 } 1819 }
1804 } 1820 }
@@ -1808,13 +1824,13 @@ static int nilfs_btree_prepare_update_v(struct nilfs_btree *btree,
1808 1824
1809static void nilfs_btree_commit_update_v(struct nilfs_btree *btree, 1825static void nilfs_btree_commit_update_v(struct nilfs_btree *btree,
1810 struct nilfs_btree_path *path, 1826 struct nilfs_btree_path *path,
1811 int level) 1827 int level, struct inode *dat)
1812{ 1828{
1813 struct nilfs_btree_node *parent; 1829 struct nilfs_btree_node *parent;
1814 1830
1815 nilfs_bmap_commit_update_v(&btree->bt_bmap, 1831 nilfs_dat_commit_update(dat, &path[level].bp_oldreq.bpr_req,
1816 &path[level].bp_oldreq, 1832 &path[level].bp_newreq.bpr_req,
1817 &path[level].bp_newreq); 1833 btree->bt_bmap.b_ptr_type == NILFS_BMAP_PTR_VS);
1818 1834
1819 if (buffer_nilfs_node(path[level].bp_bh)) { 1835 if (buffer_nilfs_node(path[level].bp_bh)) {
1820 nilfs_btnode_commit_change_key( 1836 nilfs_btnode_commit_change_key(
@@ -1831,11 +1847,10 @@ static void nilfs_btree_commit_update_v(struct nilfs_btree *btree,
1831 1847
1832static void nilfs_btree_abort_update_v(struct nilfs_btree *btree, 1848static void nilfs_btree_abort_update_v(struct nilfs_btree *btree,
1833 struct nilfs_btree_path *path, 1849 struct nilfs_btree_path *path,
1834 int level) 1850 int level, struct inode *dat)
1835{ 1851{
1836 nilfs_bmap_abort_update_v(&btree->bt_bmap, 1852 nilfs_dat_abort_update(dat, &path[level].bp_oldreq.bpr_req,
1837 &path[level].bp_oldreq, 1853 &path[level].bp_newreq.bpr_req);
1838 &path[level].bp_newreq);
1839 if (buffer_nilfs_node(path[level].bp_bh)) 1854 if (buffer_nilfs_node(path[level].bp_bh))
1840 nilfs_btnode_abort_change_key( 1855 nilfs_btnode_abort_change_key(
1841 &NILFS_BMAP_I(&btree->bt_bmap)->i_btnode_cache, 1856 &NILFS_BMAP_I(&btree->bt_bmap)->i_btnode_cache,
@@ -1844,14 +1859,14 @@ static void nilfs_btree_abort_update_v(struct nilfs_btree *btree,
1844 1859
1845static int nilfs_btree_prepare_propagate_v(struct nilfs_btree *btree, 1860static int nilfs_btree_prepare_propagate_v(struct nilfs_btree *btree,
1846 struct nilfs_btree_path *path, 1861 struct nilfs_btree_path *path,
1847 int minlevel, 1862 int minlevel, int *maxlevelp,
1848 int *maxlevelp) 1863 struct inode *dat)
1849{ 1864{
1850 int level, ret; 1865 int level, ret;
1851 1866
1852 level = minlevel; 1867 level = minlevel;
1853 if (!buffer_nilfs_volatile(path[level].bp_bh)) { 1868 if (!buffer_nilfs_volatile(path[level].bp_bh)) {
1854 ret = nilfs_btree_prepare_update_v(btree, path, level); 1869 ret = nilfs_btree_prepare_update_v(btree, path, level, dat);
1855 if (ret < 0) 1870 if (ret < 0)
1856 return ret; 1871 return ret;
1857 } 1872 }
@@ -1859,7 +1874,7 @@ static int nilfs_btree_prepare_propagate_v(struct nilfs_btree *btree,
1859 !buffer_dirty(path[level].bp_bh)) { 1874 !buffer_dirty(path[level].bp_bh)) {
1860 1875
1861 WARN_ON(buffer_nilfs_volatile(path[level].bp_bh)); 1876 WARN_ON(buffer_nilfs_volatile(path[level].bp_bh));
1862 ret = nilfs_btree_prepare_update_v(btree, path, level); 1877 ret = nilfs_btree_prepare_update_v(btree, path, level, dat);
1863 if (ret < 0) 1878 if (ret < 0)
1864 goto out; 1879 goto out;
1865 } 1880 }
@@ -1871,39 +1886,40 @@ static int nilfs_btree_prepare_propagate_v(struct nilfs_btree *btree,
1871 /* error */ 1886 /* error */
1872 out: 1887 out:
1873 while (--level > minlevel) 1888 while (--level > minlevel)
1874 nilfs_btree_abort_update_v(btree, path, level); 1889 nilfs_btree_abort_update_v(btree, path, level, dat);
1875 if (!buffer_nilfs_volatile(path[level].bp_bh)) 1890 if (!buffer_nilfs_volatile(path[level].bp_bh))
1876 nilfs_btree_abort_update_v(btree, path, level); 1891 nilfs_btree_abort_update_v(btree, path, level, dat);
1877 return ret; 1892 return ret;
1878} 1893}
1879 1894
1880static void nilfs_btree_commit_propagate_v(struct nilfs_btree *btree, 1895static void nilfs_btree_commit_propagate_v(struct nilfs_btree *btree,
1881 struct nilfs_btree_path *path, 1896 struct nilfs_btree_path *path,
1882 int minlevel, 1897 int minlevel, int maxlevel,
1883 int maxlevel, 1898 struct buffer_head *bh,
1884 struct buffer_head *bh) 1899 struct inode *dat)
1885{ 1900{
1886 int level; 1901 int level;
1887 1902
1888 if (!buffer_nilfs_volatile(path[minlevel].bp_bh)) 1903 if (!buffer_nilfs_volatile(path[minlevel].bp_bh))
1889 nilfs_btree_commit_update_v(btree, path, minlevel); 1904 nilfs_btree_commit_update_v(btree, path, minlevel, dat);
1890 1905
1891 for (level = minlevel + 1; level <= maxlevel; level++) 1906 for (level = minlevel + 1; level <= maxlevel; level++)
1892 nilfs_btree_commit_update_v(btree, path, level); 1907 nilfs_btree_commit_update_v(btree, path, level, dat);
1893} 1908}
1894 1909
1895static int nilfs_btree_propagate_v(struct nilfs_btree *btree, 1910static int nilfs_btree_propagate_v(struct nilfs_btree *btree,
1896 struct nilfs_btree_path *path, 1911 struct nilfs_btree_path *path,
1897 int level, 1912 int level, struct buffer_head *bh)
1898 struct buffer_head *bh)
1899{ 1913{
1900 int maxlevel, ret; 1914 int maxlevel, ret;
1901 struct nilfs_btree_node *parent; 1915 struct nilfs_btree_node *parent;
1916 struct inode *dat = nilfs_bmap_get_dat(&btree->bt_bmap);
1902 __u64 ptr; 1917 __u64 ptr;
1903 1918
1904 get_bh(bh); 1919 get_bh(bh);
1905 path[level].bp_bh = bh; 1920 path[level].bp_bh = bh;
1906 ret = nilfs_btree_prepare_propagate_v(btree, path, level, &maxlevel); 1921 ret = nilfs_btree_prepare_propagate_v(btree, path, level, &maxlevel,
1922 dat);
1907 if (ret < 0) 1923 if (ret < 0)
1908 goto out; 1924 goto out;
1909 1925
@@ -1911,12 +1927,12 @@ static int nilfs_btree_propagate_v(struct nilfs_btree *btree,
1911 parent = nilfs_btree_get_node(btree, path, level + 1); 1927 parent = nilfs_btree_get_node(btree, path, level + 1);
1912 ptr = nilfs_btree_node_get_ptr(btree, parent, 1928 ptr = nilfs_btree_node_get_ptr(btree, parent,
1913 path[level + 1].bp_index); 1929 path[level + 1].bp_index);
1914 ret = nilfs_bmap_mark_dirty(&btree->bt_bmap, ptr); 1930 ret = nilfs_dat_mark_dirty(dat, ptr);
1915 if (ret < 0) 1931 if (ret < 0)
1916 goto out; 1932 goto out;
1917 } 1933 }
1918 1934
1919 nilfs_btree_commit_propagate_v(btree, path, level, maxlevel, bh); 1935 nilfs_btree_commit_propagate_v(btree, path, level, maxlevel, bh, dat);
1920 1936
1921 out: 1937 out:
1922 brelse(path[level].bp_bh); 1938 brelse(path[level].bp_bh);
@@ -1972,7 +1988,7 @@ static int nilfs_btree_propagate(const struct nilfs_bmap *bmap,
1972static int nilfs_btree_propagate_gc(const struct nilfs_bmap *bmap, 1988static int nilfs_btree_propagate_gc(const struct nilfs_bmap *bmap,
1973 struct buffer_head *bh) 1989 struct buffer_head *bh)
1974{ 1990{
1975 return nilfs_bmap_mark_dirty(bmap, bh->b_blocknr); 1991 return nilfs_dat_mark_dirty(nilfs_bmap_get_dat(bmap), bh->b_blocknr);
1976} 1992}
1977 1993
1978static void nilfs_btree_add_dirty_buffer(struct nilfs_btree *btree, 1994static void nilfs_btree_add_dirty_buffer(struct nilfs_btree *btree,
@@ -2086,6 +2102,7 @@ static int nilfs_btree_assign_v(struct nilfs_btree *btree,
2086 union nilfs_binfo *binfo) 2102 union nilfs_binfo *binfo)
2087{ 2103{
2088 struct nilfs_btree_node *parent; 2104 struct nilfs_btree_node *parent;
2105 struct inode *dat = nilfs_bmap_get_dat(&btree->bt_bmap);
2089 __u64 key; 2106 __u64 key;
2090 __u64 ptr; 2107 __u64 ptr;
2091 union nilfs_bmap_ptr_req req; 2108 union nilfs_bmap_ptr_req req;
@@ -2095,9 +2112,10 @@ static int nilfs_btree_assign_v(struct nilfs_btree *btree,
2095 ptr = nilfs_btree_node_get_ptr(btree, parent, 2112 ptr = nilfs_btree_node_get_ptr(btree, parent,
2096 path[level + 1].bp_index); 2113 path[level + 1].bp_index);
2097 req.bpr_ptr = ptr; 2114 req.bpr_ptr = ptr;
2098 ret = nilfs_bmap_start_v(&btree->bt_bmap, &req, blocknr); 2115 ret = nilfs_dat_prepare_start(dat, &req.bpr_req);
2099 if (unlikely(ret < 0)) 2116 if (ret < 0)
2100 return ret; 2117 return ret;
2118 nilfs_dat_commit_start(dat, &req.bpr_req, blocknr);
2101 2119
2102 key = nilfs_btree_node_get_key(parent, path[level + 1].bp_index); 2120 key = nilfs_btree_node_get_key(parent, path[level + 1].bp_index);
2103 /* on-disk format */ 2121 /* on-disk format */
@@ -2155,13 +2173,12 @@ static int nilfs_btree_assign_gc(struct nilfs_bmap *bmap,
2155 sector_t blocknr, 2173 sector_t blocknr,
2156 union nilfs_binfo *binfo) 2174 union nilfs_binfo *binfo)
2157{ 2175{
2158 struct nilfs_btree *btree;
2159 struct nilfs_btree_node *node; 2176 struct nilfs_btree_node *node;
2160 __u64 key; 2177 __u64 key;
2161 int ret; 2178 int ret;
2162 2179
2163 btree = (struct nilfs_btree *)bmap; 2180 ret = nilfs_dat_move(nilfs_bmap_get_dat(bmap), (*bh)->b_blocknr,
2164 ret = nilfs_bmap_move_v(bmap, (*bh)->b_blocknr, blocknr); 2181 blocknr);
2165 if (ret < 0) 2182 if (ret < 0)
2166 return ret; 2183 return ret;
2167 2184