aboutsummaryrefslogtreecommitdiffstats
path: root/fs/nilfs2/btree.c
diff options
context:
space:
mode:
authorPekka Enberg <penberg@cs.helsinki.fi>2009-04-06 22:01:49 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2009-04-07 11:31:17 -0400
commit8acfbf0939e98cc77dab94c24899c9930ddd1e13 (patch)
tree0f261ee58584db9a89a874f85646ab827105143f /fs/nilfs2/btree.c
parent7fa10d20012296300dfe645cb3e628a4e9a0d5ef (diff)
nilfs2: clean up indirect function calling conventions
This cleans up the strange indirect function calling convention used in nilfs to follow the normal kernel coding style. Signed-off-by: Pekka Enberg <penberg@cs.helsinki.fi> Acked-by: Ryusuke Konishi <konishi.ryusuke@lab.ntt.co.jp> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'fs/nilfs2/btree.c')
-rw-r--r--fs/nilfs2/btree.c62
1 files changed, 31 insertions, 31 deletions
diff --git a/fs/nilfs2/btree.c b/fs/nilfs2/btree.c
index 893f0190a61f..53f0d4c31cb0 100644
--- a/fs/nilfs2/btree.c
+++ b/fs/nilfs2/btree.c
@@ -902,9 +902,9 @@ static int nilfs_btree_prepare_insert(struct nilfs_btree *btree,
902 /* allocate a new ptr for data block */ 902 /* allocate a new ptr for data block */
903 if (btree->bt_ops->btop_find_target != NULL) 903 if (btree->bt_ops->btop_find_target != NULL)
904 path[level].bp_newreq.bpr_ptr = 904 path[level].bp_newreq.bpr_ptr =
905 (*btree->bt_ops->btop_find_target)(btree, path, key); 905 btree->bt_ops->btop_find_target(btree, path, key);
906 906
907 ret = (*btree->bt_bmap.b_pops->bpop_prepare_alloc_ptr)( 907 ret = btree->bt_bmap.b_pops->bpop_prepare_alloc_ptr(
908 &btree->bt_bmap, &path[level].bp_newreq); 908 &btree->bt_bmap, &path[level].bp_newreq);
909 if (ret < 0) 909 if (ret < 0)
910 goto err_out_data; 910 goto err_out_data;
@@ -965,7 +965,7 @@ static int nilfs_btree_prepare_insert(struct nilfs_btree *btree,
965 /* split */ 965 /* split */
966 path[level].bp_newreq.bpr_ptr = 966 path[level].bp_newreq.bpr_ptr =
967 path[level - 1].bp_newreq.bpr_ptr + 1; 967 path[level - 1].bp_newreq.bpr_ptr + 1;
968 ret = (*btree->bt_bmap.b_pops->bpop_prepare_alloc_ptr)( 968 ret = btree->bt_bmap.b_pops->bpop_prepare_alloc_ptr(
969 &btree->bt_bmap, &path[level].bp_newreq); 969 &btree->bt_bmap, &path[level].bp_newreq);
970 if (ret < 0) 970 if (ret < 0)
971 goto err_out_child_node; 971 goto err_out_child_node;
@@ -997,7 +997,7 @@ static int nilfs_btree_prepare_insert(struct nilfs_btree *btree,
997 997
998 /* grow */ 998 /* grow */
999 path[level].bp_newreq.bpr_ptr = path[level - 1].bp_newreq.bpr_ptr + 1; 999 path[level].bp_newreq.bpr_ptr = path[level - 1].bp_newreq.bpr_ptr + 1;
1000 ret = (*btree->bt_bmap.b_pops->bpop_prepare_alloc_ptr)( 1000 ret = btree->bt_bmap.b_pops->bpop_prepare_alloc_ptr(
1001 &btree->bt_bmap, &path[level].bp_newreq); 1001 &btree->bt_bmap, &path[level].bp_newreq);
1002 if (ret < 0) 1002 if (ret < 0)
1003 goto err_out_child_node; 1003 goto err_out_child_node;
@@ -1026,17 +1026,17 @@ static int nilfs_btree_prepare_insert(struct nilfs_btree *btree,
1026 1026
1027 /* error */ 1027 /* error */
1028 err_out_curr_node: 1028 err_out_curr_node:
1029 (*btree->bt_bmap.b_pops->bpop_abort_alloc_ptr)(&btree->bt_bmap, 1029 btree->bt_bmap.b_pops->bpop_abort_alloc_ptr(&btree->bt_bmap,
1030 &path[level].bp_newreq); 1030 &path[level].bp_newreq);
1031 err_out_child_node: 1031 err_out_child_node:
1032 for (level--; level > NILFS_BTREE_LEVEL_DATA; level--) { 1032 for (level--; level > NILFS_BTREE_LEVEL_DATA; level--) {
1033 nilfs_bmap_delete_block(&btree->bt_bmap, path[level].bp_sib_bh); 1033 nilfs_bmap_delete_block(&btree->bt_bmap, path[level].bp_sib_bh);
1034 (*btree->bt_bmap.b_pops->bpop_abort_alloc_ptr)( 1034 btree->bt_bmap.b_pops->bpop_abort_alloc_ptr(
1035 &btree->bt_bmap, &path[level].bp_newreq); 1035 &btree->bt_bmap, &path[level].bp_newreq);
1036 1036
1037 } 1037 }
1038 1038
1039 (*btree->bt_bmap.b_pops->bpop_abort_alloc_ptr)(&btree->bt_bmap, 1039 btree->bt_bmap.b_pops->bpop_abort_alloc_ptr(&btree->bt_bmap,
1040 &path[level].bp_newreq); 1040 &path[level].bp_newreq);
1041 err_out_data: 1041 err_out_data:
1042 *levelp = level; 1042 *levelp = level;
@@ -1053,14 +1053,14 @@ static void nilfs_btree_commit_insert(struct nilfs_btree *btree,
1053 set_buffer_nilfs_volatile((struct buffer_head *)((unsigned long)ptr)); 1053 set_buffer_nilfs_volatile((struct buffer_head *)((unsigned long)ptr));
1054 ptr = path[NILFS_BTREE_LEVEL_DATA].bp_newreq.bpr_ptr; 1054 ptr = path[NILFS_BTREE_LEVEL_DATA].bp_newreq.bpr_ptr;
1055 if (btree->bt_ops->btop_set_target != NULL) 1055 if (btree->bt_ops->btop_set_target != NULL)
1056 (*btree->bt_ops->btop_set_target)(btree, key, ptr); 1056 btree->bt_ops->btop_set_target(btree, key, ptr);
1057 1057
1058 for (level = NILFS_BTREE_LEVEL_NODE_MIN; level <= maxlevel; level++) { 1058 for (level = NILFS_BTREE_LEVEL_NODE_MIN; level <= maxlevel; level++) {
1059 if (btree->bt_bmap.b_pops->bpop_commit_alloc_ptr != NULL) { 1059 if (btree->bt_bmap.b_pops->bpop_commit_alloc_ptr != NULL) {
1060 (*btree->bt_bmap.b_pops->bpop_commit_alloc_ptr)( 1060 btree->bt_bmap.b_pops->bpop_commit_alloc_ptr(
1061 &btree->bt_bmap, &path[level - 1].bp_newreq); 1061 &btree->bt_bmap, &path[level - 1].bp_newreq);
1062 } 1062 }
1063 (*path[level].bp_op)(btree, path, level, &key, &ptr); 1063 path[level].bp_op(btree, path, level, &key, &ptr);
1064 } 1064 }
1065 1065
1066 if (!nilfs_bmap_dirty(&btree->bt_bmap)) 1066 if (!nilfs_bmap_dirty(&btree->bt_bmap))
@@ -1304,7 +1304,7 @@ static int nilfs_btree_prepare_delete(struct nilfs_btree *btree,
1304 nilfs_btree_node_get_ptr(btree, node, 1304 nilfs_btree_node_get_ptr(btree, node,
1305 path[level].bp_index); 1305 path[level].bp_index);
1306 if (btree->bt_bmap.b_pops->bpop_prepare_end_ptr != NULL) { 1306 if (btree->bt_bmap.b_pops->bpop_prepare_end_ptr != NULL) {
1307 ret = (*btree->bt_bmap.b_pops->bpop_prepare_end_ptr)( 1307 ret = btree->bt_bmap.b_pops->bpop_prepare_end_ptr(
1308 &btree->bt_bmap, &path[level].bp_oldreq); 1308 &btree->bt_bmap, &path[level].bp_oldreq);
1309 if (ret < 0) 1309 if (ret < 0)
1310 goto err_out_child_node; 1310 goto err_out_child_node;
@@ -1385,7 +1385,7 @@ static int nilfs_btree_prepare_delete(struct nilfs_btree *btree,
1385 path[level].bp_oldreq.bpr_ptr = 1385 path[level].bp_oldreq.bpr_ptr =
1386 nilfs_btree_node_get_ptr(btree, node, path[level].bp_index); 1386 nilfs_btree_node_get_ptr(btree, node, path[level].bp_index);
1387 if (btree->bt_bmap.b_pops->bpop_prepare_end_ptr != NULL) { 1387 if (btree->bt_bmap.b_pops->bpop_prepare_end_ptr != NULL) {
1388 ret = (*btree->bt_bmap.b_pops->bpop_prepare_end_ptr)( 1388 ret = btree->bt_bmap.b_pops->bpop_prepare_end_ptr(
1389 &btree->bt_bmap, &path[level].bp_oldreq); 1389 &btree->bt_bmap, &path[level].bp_oldreq);
1390 if (ret < 0) 1390 if (ret < 0)
1391 goto err_out_child_node; 1391 goto err_out_child_node;
@@ -1402,13 +1402,13 @@ static int nilfs_btree_prepare_delete(struct nilfs_btree *btree,
1402 /* error */ 1402 /* error */
1403 err_out_curr_node: 1403 err_out_curr_node:
1404 if (btree->bt_bmap.b_pops->bpop_abort_end_ptr != NULL) 1404 if (btree->bt_bmap.b_pops->bpop_abort_end_ptr != NULL)
1405 (*btree->bt_bmap.b_pops->bpop_abort_end_ptr)( 1405 btree->bt_bmap.b_pops->bpop_abort_end_ptr(
1406 &btree->bt_bmap, &path[level].bp_oldreq); 1406 &btree->bt_bmap, &path[level].bp_oldreq);
1407 err_out_child_node: 1407 err_out_child_node:
1408 for (level--; level >= NILFS_BTREE_LEVEL_NODE_MIN; level--) { 1408 for (level--; level >= NILFS_BTREE_LEVEL_NODE_MIN; level--) {
1409 nilfs_bmap_put_block(&btree->bt_bmap, path[level].bp_sib_bh); 1409 nilfs_bmap_put_block(&btree->bt_bmap, path[level].bp_sib_bh);
1410 if (btree->bt_bmap.b_pops->bpop_abort_end_ptr != NULL) 1410 if (btree->bt_bmap.b_pops->bpop_abort_end_ptr != NULL)
1411 (*btree->bt_bmap.b_pops->bpop_abort_end_ptr)( 1411 btree->bt_bmap.b_pops->bpop_abort_end_ptr(
1412 &btree->bt_bmap, &path[level].bp_oldreq); 1412 &btree->bt_bmap, &path[level].bp_oldreq);
1413 } 1413 }
1414 *levelp = level; 1414 *levelp = level;
@@ -1424,9 +1424,9 @@ static void nilfs_btree_commit_delete(struct nilfs_btree *btree,
1424 1424
1425 for (level = NILFS_BTREE_LEVEL_NODE_MIN; level <= maxlevel; level++) { 1425 for (level = NILFS_BTREE_LEVEL_NODE_MIN; level <= maxlevel; level++) {
1426 if (btree->bt_bmap.b_pops->bpop_commit_end_ptr != NULL) 1426 if (btree->bt_bmap.b_pops->bpop_commit_end_ptr != NULL)
1427 (*btree->bt_bmap.b_pops->bpop_commit_end_ptr)( 1427 btree->bt_bmap.b_pops->bpop_commit_end_ptr(
1428 &btree->bt_bmap, &path[level].bp_oldreq); 1428 &btree->bt_bmap, &path[level].bp_oldreq);
1429 (*path[level].bp_op)(btree, path, level, NULL, NULL); 1429 path[level].bp_op(btree, path, level, NULL, NULL);
1430 } 1430 }
1431 1431
1432 if (!nilfs_bmap_dirty(&btree->bt_bmap)) 1432 if (!nilfs_bmap_dirty(&btree->bt_bmap))
@@ -1589,8 +1589,8 @@ nilfs_btree_prepare_convert_and_insert(struct nilfs_bmap *bmap, __u64 key,
1589 /* cannot find near ptr */ 1589 /* cannot find near ptr */
1590 if (btree->bt_ops->btop_find_target != NULL) 1590 if (btree->bt_ops->btop_find_target != NULL)
1591 dreq->bpr_ptr 1591 dreq->bpr_ptr
1592 = (*btree->bt_ops->btop_find_target)(btree, NULL, key); 1592 = btree->bt_ops->btop_find_target(btree, NULL, key);
1593 ret = (*bmap->b_pops->bpop_prepare_alloc_ptr)(bmap, dreq); 1593 ret = bmap->b_pops->bpop_prepare_alloc_ptr(bmap, dreq);
1594 if (ret < 0) 1594 if (ret < 0)
1595 return ret; 1595 return ret;
1596 1596
@@ -1598,7 +1598,7 @@ nilfs_btree_prepare_convert_and_insert(struct nilfs_bmap *bmap, __u64 key,
1598 stats->bs_nblocks++; 1598 stats->bs_nblocks++;
1599 if (nreq != NULL) { 1599 if (nreq != NULL) {
1600 nreq->bpr_ptr = dreq->bpr_ptr + 1; 1600 nreq->bpr_ptr = dreq->bpr_ptr + 1;
1601 ret = (*bmap->b_pops->bpop_prepare_alloc_ptr)(bmap, nreq); 1601 ret = bmap->b_pops->bpop_prepare_alloc_ptr(bmap, nreq);
1602 if (ret < 0) 1602 if (ret < 0)
1603 goto err_out_dreq; 1603 goto err_out_dreq;
1604 1604
@@ -1615,9 +1615,9 @@ nilfs_btree_prepare_convert_and_insert(struct nilfs_bmap *bmap, __u64 key,
1615 1615
1616 /* error */ 1616 /* error */
1617 err_out_nreq: 1617 err_out_nreq:
1618 (*bmap->b_pops->bpop_abort_alloc_ptr)(bmap, nreq); 1618 bmap->b_pops->bpop_abort_alloc_ptr(bmap, nreq);
1619 err_out_dreq: 1619 err_out_dreq:
1620 (*bmap->b_pops->bpop_abort_alloc_ptr)(bmap, dreq); 1620 bmap->b_pops->bpop_abort_alloc_ptr(bmap, dreq);
1621 stats->bs_nblocks = 0; 1621 stats->bs_nblocks = 0;
1622 return ret; 1622 return ret;
1623 1623
@@ -1638,7 +1638,7 @@ nilfs_btree_commit_convert_and_insert(struct nilfs_bmap *bmap,
1638 1638
1639 /* free resources */ 1639 /* free resources */
1640 if (bmap->b_ops->bop_clear != NULL) 1640 if (bmap->b_ops->bop_clear != NULL)
1641 (*bmap->b_ops->bop_clear)(bmap); 1641 bmap->b_ops->bop_clear(bmap);
1642 1642
1643 /* ptr must be a pointer to a buffer head. */ 1643 /* ptr must be a pointer to a buffer head. */
1644 set_buffer_nilfs_volatile((struct buffer_head *)((unsigned long)ptr)); 1644 set_buffer_nilfs_volatile((struct buffer_head *)((unsigned long)ptr));
@@ -1648,8 +1648,8 @@ nilfs_btree_commit_convert_and_insert(struct nilfs_bmap *bmap,
1648 nilfs_btree_init(bmap, low, high); 1648 nilfs_btree_init(bmap, low, high);
1649 if (nreq != NULL) { 1649 if (nreq != NULL) {
1650 if (bmap->b_pops->bpop_commit_alloc_ptr != NULL) { 1650 if (bmap->b_pops->bpop_commit_alloc_ptr != NULL) {
1651 (*bmap->b_pops->bpop_commit_alloc_ptr)(bmap, dreq); 1651 bmap->b_pops->bpop_commit_alloc_ptr(bmap, dreq);
1652 (*bmap->b_pops->bpop_commit_alloc_ptr)(bmap, nreq); 1652 bmap->b_pops->bpop_commit_alloc_ptr(bmap, nreq);
1653 } 1653 }
1654 1654
1655 /* create child node at level 1 */ 1655 /* create child node at level 1 */
@@ -1673,7 +1673,7 @@ nilfs_btree_commit_convert_and_insert(struct nilfs_bmap *bmap,
1673 2, 1, &keys[0], &tmpptr); 1673 2, 1, &keys[0], &tmpptr);
1674 } else { 1674 } else {
1675 if (bmap->b_pops->bpop_commit_alloc_ptr != NULL) 1675 if (bmap->b_pops->bpop_commit_alloc_ptr != NULL)
1676 (*bmap->b_pops->bpop_commit_alloc_ptr)(bmap, dreq); 1676 bmap->b_pops->bpop_commit_alloc_ptr(bmap, dreq);
1677 1677
1678 /* create root node at level 1 */ 1678 /* create root node at level 1 */
1679 node = nilfs_btree_get_root(btree); 1679 node = nilfs_btree_get_root(btree);
@@ -1686,7 +1686,7 @@ nilfs_btree_commit_convert_and_insert(struct nilfs_bmap *bmap,
1686 } 1686 }
1687 1687
1688 if (btree->bt_ops->btop_set_target != NULL) 1688 if (btree->bt_ops->btop_set_target != NULL)
1689 (*btree->bt_ops->btop_set_target)(btree, key, dreq->bpr_ptr); 1689 btree->bt_ops->btop_set_target(btree, key, dreq->bpr_ptr);
1690} 1690}
1691 1691
1692/** 1692/**
@@ -1937,7 +1937,7 @@ static int nilfs_btree_propagate(const struct nilfs_bmap *bmap,
1937 goto out; 1937 goto out;
1938 } 1938 }
1939 1939
1940 ret = (*btree->bt_ops->btop_propagate)(btree, path, level, bh); 1940 ret = btree->bt_ops->btop_propagate(btree, path, level, bh);
1941 1941
1942 out: 1942 out:
1943 nilfs_btree_clear_path(btree, path); 1943 nilfs_btree_clear_path(btree, path);
@@ -2073,11 +2073,11 @@ static int nilfs_btree_assign_v(struct nilfs_btree *btree,
2073 ptr = nilfs_btree_node_get_ptr(btree, parent, 2073 ptr = nilfs_btree_node_get_ptr(btree, parent,
2074 path[level + 1].bp_index); 2074 path[level + 1].bp_index);
2075 req.bpr_ptr = ptr; 2075 req.bpr_ptr = ptr;
2076 ret = (*btree->bt_bmap.b_pops->bpop_prepare_start_ptr)(&btree->bt_bmap, 2076 ret = btree->bt_bmap.b_pops->bpop_prepare_start_ptr(&btree->bt_bmap,
2077 &req); 2077 &req);
2078 if (ret < 0) 2078 if (ret < 0)
2079 return ret; 2079 return ret;
2080 (*btree->bt_bmap.b_pops->bpop_commit_start_ptr)(&btree->bt_bmap, 2080 btree->bt_bmap.b_pops->bpop_commit_start_ptr(&btree->bt_bmap,
2081 &req, blocknr); 2081 &req, blocknr);
2082 2082
2083 key = nilfs_btree_node_get_key(btree, parent, 2083 key = nilfs_btree_node_get_key(btree, parent,
@@ -2121,7 +2121,7 @@ static int nilfs_btree_assign(struct nilfs_bmap *bmap,
2121 goto out; 2121 goto out;
2122 } 2122 }
2123 2123
2124 ret = (*btree->bt_ops->btop_assign)(btree, path, level, bh, 2124 ret = btree->bt_ops->btop_assign(btree, path, level, bh,
2125 blocknr, binfo); 2125 blocknr, binfo);
2126 2126
2127 out: 2127 out: