aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--fs/nilfs2/bmap.c85
-rw-r--r--fs/nilfs2/bmap.h69
-rw-r--r--fs/nilfs2/btree.c151
-rw-r--r--fs/nilfs2/direct.c161
4 files changed, 167 insertions, 299 deletions
diff --git a/fs/nilfs2/bmap.c b/fs/nilfs2/bmap.c
index 13e95a907ece..f98c5c4cf6e7 100644
--- a/fs/nilfs2/bmap.c
+++ b/fs/nilfs2/bmap.c
@@ -469,91 +469,6 @@ __u64 nilfs_bmap_find_target_in_group(const struct nilfs_bmap *bmap)
469 (entries_per_group / NILFS_BMAP_GROUP_DIV); 469 (entries_per_group / NILFS_BMAP_GROUP_DIV);
470} 470}
471 471
472int nilfs_bmap_prepare_alloc_v(struct nilfs_bmap *bmap,
473 union nilfs_bmap_ptr_req *req)
474{
475 return nilfs_dat_prepare_alloc(nilfs_bmap_get_dat(bmap), &req->bpr_req);
476}
477
478void nilfs_bmap_commit_alloc_v(struct nilfs_bmap *bmap,
479 union nilfs_bmap_ptr_req *req)
480{
481 nilfs_dat_commit_alloc(nilfs_bmap_get_dat(bmap), &req->bpr_req);
482}
483
484void nilfs_bmap_abort_alloc_v(struct nilfs_bmap *bmap,
485 union nilfs_bmap_ptr_req *req)
486{
487 nilfs_dat_abort_alloc(nilfs_bmap_get_dat(bmap), &req->bpr_req);
488}
489
490int nilfs_bmap_start_v(struct nilfs_bmap *bmap, union nilfs_bmap_ptr_req *req,
491 sector_t blocknr)
492{
493 struct inode *dat = nilfs_bmap_get_dat(bmap);
494 int ret;
495
496 ret = nilfs_dat_prepare_start(dat, &req->bpr_req);
497 if (likely(!ret))
498 nilfs_dat_commit_start(dat, &req->bpr_req, blocknr);
499 return ret;
500}
501
502int nilfs_bmap_prepare_end_v(struct nilfs_bmap *bmap,
503 union nilfs_bmap_ptr_req *req)
504{
505 return nilfs_dat_prepare_end(nilfs_bmap_get_dat(bmap), &req->bpr_req);
506}
507
508void nilfs_bmap_commit_end_v(struct nilfs_bmap *bmap,
509 union nilfs_bmap_ptr_req *req)
510{
511 nilfs_dat_commit_end(nilfs_bmap_get_dat(bmap), &req->bpr_req,
512 bmap->b_ptr_type == NILFS_BMAP_PTR_VS);
513}
514
515void nilfs_bmap_abort_end_v(struct nilfs_bmap *bmap,
516 union nilfs_bmap_ptr_req *req)
517{
518 nilfs_dat_abort_end(nilfs_bmap_get_dat(bmap), &req->bpr_req);
519}
520
521int nilfs_bmap_move_v(const struct nilfs_bmap *bmap, __u64 vblocknr,
522 sector_t blocknr)
523{
524 return nilfs_dat_move(nilfs_bmap_get_dat(bmap), vblocknr, blocknr);
525}
526
527int nilfs_bmap_mark_dirty(const struct nilfs_bmap *bmap, __u64 vblocknr)
528{
529 return nilfs_dat_mark_dirty(nilfs_bmap_get_dat(bmap), vblocknr);
530}
531
532int nilfs_bmap_prepare_update_v(struct nilfs_bmap *bmap,
533 union nilfs_bmap_ptr_req *oldreq,
534 union nilfs_bmap_ptr_req *newreq)
535{
536 return nilfs_dat_prepare_update(nilfs_bmap_get_dat(bmap),
537 &oldreq->bpr_req, &newreq->bpr_req);
538}
539
540void nilfs_bmap_commit_update_v(struct nilfs_bmap *bmap,
541 union nilfs_bmap_ptr_req *oldreq,
542 union nilfs_bmap_ptr_req *newreq)
543{
544 nilfs_dat_commit_update(nilfs_bmap_get_dat(bmap),
545 &oldreq->bpr_req, &newreq->bpr_req,
546 bmap->b_ptr_type == NILFS_BMAP_PTR_VS);
547}
548
549void nilfs_bmap_abort_update_v(struct nilfs_bmap *bmap,
550 union nilfs_bmap_ptr_req *oldreq,
551 union nilfs_bmap_ptr_req *newreq)
552{
553 nilfs_dat_abort_update(nilfs_bmap_get_dat(bmap),
554 &oldreq->bpr_req, &newreq->bpr_req);
555}
556
557static struct lock_class_key nilfs_bmap_dat_lock_key; 472static struct lock_class_key nilfs_bmap_dat_lock_key;
558static struct lock_class_key nilfs_bmap_mdt_lock_key; 473static struct lock_class_key nilfs_bmap_mdt_lock_key;
559 474
diff --git a/fs/nilfs2/bmap.h b/fs/nilfs2/bmap.h
index b2890cdcef12..a4f64e54424c 100644
--- a/fs/nilfs2/bmap.h
+++ b/fs/nilfs2/bmap.h
@@ -28,6 +28,7 @@
28#include <linux/buffer_head.h> 28#include <linux/buffer_head.h>
29#include <linux/nilfs2_fs.h> 29#include <linux/nilfs2_fs.h>
30#include "alloc.h" 30#include "alloc.h"
31#include "dat.h"
31 32
32#define NILFS_BMAP_INVALID_PTR 0 33#define NILFS_BMAP_INVALID_PTR 0
33 34
@@ -164,86 +165,66 @@ void nilfs_bmap_commit_gcdat(struct nilfs_bmap *, struct nilfs_bmap *);
164 * Internal use only 165 * Internal use only
165 */ 166 */
166struct inode *nilfs_bmap_get_dat(const struct nilfs_bmap *); 167struct inode *nilfs_bmap_get_dat(const struct nilfs_bmap *);
167int nilfs_bmap_prepare_alloc_v(struct nilfs_bmap *,
168 union nilfs_bmap_ptr_req *);
169void nilfs_bmap_commit_alloc_v(struct nilfs_bmap *,
170 union nilfs_bmap_ptr_req *);
171void nilfs_bmap_abort_alloc_v(struct nilfs_bmap *,
172 union nilfs_bmap_ptr_req *);
173 168
174static inline int nilfs_bmap_prepare_alloc_ptr(struct nilfs_bmap *bmap, 169static inline int nilfs_bmap_prepare_alloc_ptr(struct nilfs_bmap *bmap,
175 union nilfs_bmap_ptr_req *req) 170 union nilfs_bmap_ptr_req *req,
171 struct inode *dat)
176{ 172{
177 if (NILFS_BMAP_USE_VBN(bmap)) 173 if (dat)
178 return nilfs_bmap_prepare_alloc_v(bmap, req); 174 return nilfs_dat_prepare_alloc(dat, &req->bpr_req);
179 /* ignore target ptr */ 175 /* ignore target ptr */
180 req->bpr_ptr = bmap->b_last_allocated_ptr++; 176 req->bpr_ptr = bmap->b_last_allocated_ptr++;
181 return 0; 177 return 0;
182} 178}
183 179
184static inline void nilfs_bmap_commit_alloc_ptr(struct nilfs_bmap *bmap, 180static inline void nilfs_bmap_commit_alloc_ptr(struct nilfs_bmap *bmap,
185 union nilfs_bmap_ptr_req *req) 181 union nilfs_bmap_ptr_req *req,
182 struct inode *dat)
186{ 183{
187 if (NILFS_BMAP_USE_VBN(bmap)) 184 if (dat)
188 nilfs_bmap_commit_alloc_v(bmap, req); 185 nilfs_dat_commit_alloc(dat, &req->bpr_req);
189} 186}
190 187
191static inline void nilfs_bmap_abort_alloc_ptr(struct nilfs_bmap *bmap, 188static inline void nilfs_bmap_abort_alloc_ptr(struct nilfs_bmap *bmap,
192 union nilfs_bmap_ptr_req *req) 189 union nilfs_bmap_ptr_req *req,
190 struct inode *dat)
193{ 191{
194 if (NILFS_BMAP_USE_VBN(bmap)) 192 if (dat)
195 nilfs_bmap_abort_alloc_v(bmap, req); 193 nilfs_dat_abort_alloc(dat, &req->bpr_req);
196 else 194 else
197 bmap->b_last_allocated_ptr--; 195 bmap->b_last_allocated_ptr--;
198} 196}
199 197
200int nilfs_bmap_prepare_end_v(struct nilfs_bmap *, union nilfs_bmap_ptr_req *);
201void nilfs_bmap_commit_end_v(struct nilfs_bmap *, union nilfs_bmap_ptr_req *);
202void nilfs_bmap_abort_end_v(struct nilfs_bmap *, union nilfs_bmap_ptr_req *);
203
204static inline int nilfs_bmap_prepare_end_ptr(struct nilfs_bmap *bmap, 198static inline int nilfs_bmap_prepare_end_ptr(struct nilfs_bmap *bmap,
205 union nilfs_bmap_ptr_req *req) 199 union nilfs_bmap_ptr_req *req,
200 struct inode *dat)
206{ 201{
207 return NILFS_BMAP_USE_VBN(bmap) ? 202 return dat ? nilfs_dat_prepare_end(dat, &req->bpr_req) : 0;
208 nilfs_bmap_prepare_end_v(bmap, req) : 0;
209} 203}
210 204
211static inline void nilfs_bmap_commit_end_ptr(struct nilfs_bmap *bmap, 205static inline void nilfs_bmap_commit_end_ptr(struct nilfs_bmap *bmap,
212 union nilfs_bmap_ptr_req *req) 206 union nilfs_bmap_ptr_req *req,
207 struct inode *dat)
213{ 208{
214 if (NILFS_BMAP_USE_VBN(bmap)) 209 if (dat)
215 nilfs_bmap_commit_end_v(bmap, req); 210 nilfs_dat_commit_end(dat, &req->bpr_req,
211 bmap->b_ptr_type == NILFS_BMAP_PTR_VS);
216} 212}
217 213
218static inline void nilfs_bmap_abort_end_ptr(struct nilfs_bmap *bmap, 214static inline void nilfs_bmap_abort_end_ptr(struct nilfs_bmap *bmap,
219 union nilfs_bmap_ptr_req *req) 215 union nilfs_bmap_ptr_req *req,
216 struct inode *dat)
220{ 217{
221 if (NILFS_BMAP_USE_VBN(bmap)) 218 if (dat)
222 nilfs_bmap_abort_end_v(bmap, req); 219 nilfs_dat_abort_end(dat, &req->bpr_req);
223} 220}
224 221
225int nilfs_bmap_start_v(struct nilfs_bmap *, union nilfs_bmap_ptr_req *,
226 sector_t);
227int nilfs_bmap_move_v(const struct nilfs_bmap *, __u64, sector_t);
228int nilfs_bmap_mark_dirty(const struct nilfs_bmap *, __u64);
229
230
231__u64 nilfs_bmap_data_get_key(const struct nilfs_bmap *, 222__u64 nilfs_bmap_data_get_key(const struct nilfs_bmap *,
232 const struct buffer_head *); 223 const struct buffer_head *);
233 224
234__u64 nilfs_bmap_find_target_seq(const struct nilfs_bmap *, __u64); 225__u64 nilfs_bmap_find_target_seq(const struct nilfs_bmap *, __u64);
235__u64 nilfs_bmap_find_target_in_group(const struct nilfs_bmap *); 226__u64 nilfs_bmap_find_target_in_group(const struct nilfs_bmap *);
236 227
237int nilfs_bmap_prepare_update_v(struct nilfs_bmap *,
238 union nilfs_bmap_ptr_req *,
239 union nilfs_bmap_ptr_req *);
240void nilfs_bmap_commit_update_v(struct nilfs_bmap *,
241 union nilfs_bmap_ptr_req *,
242 union nilfs_bmap_ptr_req *);
243void nilfs_bmap_abort_update_v(struct nilfs_bmap *,
244 union nilfs_bmap_ptr_req *,
245 union nilfs_bmap_ptr_req *);
246
247void nilfs_bmap_add_blocks(const struct nilfs_bmap *, int); 228void nilfs_bmap_add_blocks(const struct nilfs_bmap *, int);
248void nilfs_bmap_sub_blocks(const struct nilfs_bmap *, int); 229void nilfs_bmap_sub_blocks(const struct nilfs_bmap *, int);
249 230
diff --git a/fs/nilfs2/btree.c b/fs/nilfs2/btree.c
index 115b157d508b..e25b507a474f 100644
--- a/fs/nilfs2/btree.c
+++ b/fs/nilfs2/btree.c
@@ -940,17 +940,20 @@ static int nilfs_btree_prepare_insert(struct nilfs_btree *btree,
940 struct nilfs_btree_node *node, *parent, *sib; 940 struct nilfs_btree_node *node, *parent, *sib;
941 __u64 sibptr; 941 __u64 sibptr;
942 int pindex, level, ret; 942 int pindex, level, ret;
943 struct inode *dat = NULL;
943 944
944 stats->bs_nblocks = 0; 945 stats->bs_nblocks = 0;
945 level = NILFS_BTREE_LEVEL_DATA; 946 level = NILFS_BTREE_LEVEL_DATA;
946 947
947 /* allocate a new ptr for data block */ 948 /* allocate a new ptr for data block */
948 if (NILFS_BMAP_USE_VBN(&btree->bt_bmap)) 949 if (NILFS_BMAP_USE_VBN(&btree->bt_bmap)) {
949 path[level].bp_newreq.bpr_ptr = 950 path[level].bp_newreq.bpr_ptr =
950 nilfs_btree_find_target_v(btree, path, key); 951 nilfs_btree_find_target_v(btree, path, key);
952 dat = nilfs_bmap_get_dat(&btree->bt_bmap);
953 }
951 954
952 ret = nilfs_bmap_prepare_alloc_ptr(&btree->bt_bmap, 955 ret = nilfs_bmap_prepare_alloc_ptr(&btree->bt_bmap,
953 &path[level].bp_newreq); 956 &path[level].bp_newreq, dat);
954 if (ret < 0) 957 if (ret < 0)
955 goto err_out_data; 958 goto err_out_data;
956 959
@@ -1009,7 +1012,7 @@ static int nilfs_btree_prepare_insert(struct nilfs_btree *btree,
1009 path[level].bp_newreq.bpr_ptr = 1012 path[level].bp_newreq.bpr_ptr =
1010 path[level - 1].bp_newreq.bpr_ptr + 1; 1013 path[level - 1].bp_newreq.bpr_ptr + 1;
1011 ret = nilfs_bmap_prepare_alloc_ptr(&btree->bt_bmap, 1014 ret = nilfs_bmap_prepare_alloc_ptr(&btree->bt_bmap,
1012 &path[level].bp_newreq); 1015 &path[level].bp_newreq, dat);
1013 if (ret < 0) 1016 if (ret < 0)
1014 goto err_out_child_node; 1017 goto err_out_child_node;
1015 ret = nilfs_btree_get_new_block(btree, 1018 ret = nilfs_btree_get_new_block(btree,
@@ -1041,7 +1044,7 @@ static int nilfs_btree_prepare_insert(struct nilfs_btree *btree,
1041 /* grow */ 1044 /* grow */
1042 path[level].bp_newreq.bpr_ptr = path[level - 1].bp_newreq.bpr_ptr + 1; 1045 path[level].bp_newreq.bpr_ptr = path[level - 1].bp_newreq.bpr_ptr + 1;
1043 ret = nilfs_bmap_prepare_alloc_ptr(&btree->bt_bmap, 1046 ret = nilfs_bmap_prepare_alloc_ptr(&btree->bt_bmap,
1044 &path[level].bp_newreq); 1047 &path[level].bp_newreq, dat);
1045 if (ret < 0) 1048 if (ret < 0)
1046 goto err_out_child_node; 1049 goto err_out_child_node;
1047 ret = nilfs_btree_get_new_block(btree, path[level].bp_newreq.bpr_ptr, 1050 ret = nilfs_btree_get_new_block(btree, path[level].bp_newreq.bpr_ptr,
@@ -1069,16 +1072,18 @@ static int nilfs_btree_prepare_insert(struct nilfs_btree *btree,
1069 1072
1070 /* error */ 1073 /* error */
1071 err_out_curr_node: 1074 err_out_curr_node:
1072 nilfs_bmap_abort_alloc_ptr(&btree->bt_bmap, &path[level].bp_newreq); 1075 nilfs_bmap_abort_alloc_ptr(&btree->bt_bmap, &path[level].bp_newreq,
1076 dat);
1073 err_out_child_node: 1077 err_out_child_node:
1074 for (level--; level > NILFS_BTREE_LEVEL_DATA; level--) { 1078 for (level--; level > NILFS_BTREE_LEVEL_DATA; level--) {
1075 nilfs_btnode_delete(path[level].bp_sib_bh); 1079 nilfs_btnode_delete(path[level].bp_sib_bh);
1076 nilfs_bmap_abort_alloc_ptr(&btree->bt_bmap, 1080 nilfs_bmap_abort_alloc_ptr(&btree->bt_bmap,
1077 &path[level].bp_newreq); 1081 &path[level].bp_newreq, dat);
1078 1082
1079 } 1083 }
1080 1084
1081 nilfs_bmap_abort_alloc_ptr(&btree->bt_bmap, &path[level].bp_newreq); 1085 nilfs_bmap_abort_alloc_ptr(&btree->bt_bmap, &path[level].bp_newreq,
1086 dat);
1082 err_out_data: 1087 err_out_data:
1083 *levelp = level; 1088 *levelp = level;
1084 stats->bs_nblocks = 0; 1089 stats->bs_nblocks = 0;
@@ -1089,16 +1094,19 @@ static void nilfs_btree_commit_insert(struct nilfs_btree *btree,
1089 struct nilfs_btree_path *path, 1094 struct nilfs_btree_path *path,
1090 int maxlevel, __u64 key, __u64 ptr) 1095 int maxlevel, __u64 key, __u64 ptr)
1091{ 1096{
1097 struct inode *dat = NULL;
1092 int level; 1098 int level;
1093 1099
1094 set_buffer_nilfs_volatile((struct buffer_head *)((unsigned long)ptr)); 1100 set_buffer_nilfs_volatile((struct buffer_head *)((unsigned long)ptr));
1095 ptr = path[NILFS_BTREE_LEVEL_DATA].bp_newreq.bpr_ptr; 1101 ptr = path[NILFS_BTREE_LEVEL_DATA].bp_newreq.bpr_ptr;
1096 if (NILFS_BMAP_USE_VBN(&btree->bt_bmap)) 1102 if (NILFS_BMAP_USE_VBN(&btree->bt_bmap)) {
1097 nilfs_btree_set_target_v(btree, key, ptr); 1103 nilfs_btree_set_target_v(btree, key, ptr);
1104 dat = nilfs_bmap_get_dat(&btree->bt_bmap);
1105 }
1098 1106
1099 for (level = NILFS_BTREE_LEVEL_NODE_MIN; level <= maxlevel; level++) { 1107 for (level = NILFS_BTREE_LEVEL_NODE_MIN; level <= maxlevel; level++) {
1100 nilfs_bmap_commit_alloc_ptr(&btree->bt_bmap, 1108 nilfs_bmap_commit_alloc_ptr(&btree->bt_bmap,
1101 &path[level - 1].bp_newreq); 1109 &path[level - 1].bp_newreq, dat);
1102 path[level].bp_op(btree, path, level, &key, &ptr); 1110 path[level].bp_op(btree, path, level, &key, &ptr);
1103 } 1111 }
1104 1112
@@ -1326,7 +1334,8 @@ static void nilfs_btree_shrink(struct nilfs_btree *btree,
1326static int nilfs_btree_prepare_delete(struct nilfs_btree *btree, 1334static int nilfs_btree_prepare_delete(struct nilfs_btree *btree,
1327 struct nilfs_btree_path *path, 1335 struct nilfs_btree_path *path,
1328 int *levelp, 1336 int *levelp,
1329 struct nilfs_bmap_stats *stats) 1337 struct nilfs_bmap_stats *stats,
1338 struct inode *dat)
1330{ 1339{
1331 struct buffer_head *bh; 1340 struct buffer_head *bh;
1332 struct nilfs_btree_node *node, *parent, *sib; 1341 struct nilfs_btree_node *node, *parent, *sib;
@@ -1343,7 +1352,7 @@ static int nilfs_btree_prepare_delete(struct nilfs_btree *btree,
1343 nilfs_btree_node_get_ptr(btree, node, 1352 nilfs_btree_node_get_ptr(btree, node,
1344 path[level].bp_index); 1353 path[level].bp_index);
1345 ret = nilfs_bmap_prepare_end_ptr(&btree->bt_bmap, 1354 ret = nilfs_bmap_prepare_end_ptr(&btree->bt_bmap,
1346 &path[level].bp_oldreq); 1355 &path[level].bp_oldreq, dat);
1347 if (ret < 0) 1356 if (ret < 0)
1348 goto err_out_child_node; 1357 goto err_out_child_node;
1349 1358
@@ -1421,7 +1430,7 @@ static int nilfs_btree_prepare_delete(struct nilfs_btree *btree,
1421 nilfs_btree_node_get_ptr(btree, node, path[level].bp_index); 1430 nilfs_btree_node_get_ptr(btree, node, path[level].bp_index);
1422 1431
1423 ret = nilfs_bmap_prepare_end_ptr(&btree->bt_bmap, 1432 ret = nilfs_bmap_prepare_end_ptr(&btree->bt_bmap,
1424 &path[level].bp_oldreq); 1433 &path[level].bp_oldreq, dat);
1425 if (ret < 0) 1434 if (ret < 0)
1426 goto err_out_child_node; 1435 goto err_out_child_node;
1427 1436
@@ -1436,12 +1445,12 @@ static int nilfs_btree_prepare_delete(struct nilfs_btree *btree,
1436 1445
1437 /* error */ 1446 /* error */
1438 err_out_curr_node: 1447 err_out_curr_node:
1439 nilfs_bmap_abort_end_ptr(&btree->bt_bmap, &path[level].bp_oldreq); 1448 nilfs_bmap_abort_end_ptr(&btree->bt_bmap, &path[level].bp_oldreq, dat);
1440 err_out_child_node: 1449 err_out_child_node:
1441 for (level--; level >= NILFS_BTREE_LEVEL_NODE_MIN; level--) { 1450 for (level--; level >= NILFS_BTREE_LEVEL_NODE_MIN; level--) {
1442 brelse(path[level].bp_sib_bh); 1451 brelse(path[level].bp_sib_bh);
1443 nilfs_bmap_abort_end_ptr(&btree->bt_bmap, 1452 nilfs_bmap_abort_end_ptr(&btree->bt_bmap,
1444 &path[level].bp_oldreq); 1453 &path[level].bp_oldreq, dat);
1445 } 1454 }
1446 *levelp = level; 1455 *levelp = level;
1447 stats->bs_nblocks = 0; 1456 stats->bs_nblocks = 0;
@@ -1450,13 +1459,13 @@ static int nilfs_btree_prepare_delete(struct nilfs_btree *btree,
1450 1459
1451static void nilfs_btree_commit_delete(struct nilfs_btree *btree, 1460static void nilfs_btree_commit_delete(struct nilfs_btree *btree,
1452 struct nilfs_btree_path *path, 1461 struct nilfs_btree_path *path,
1453 int maxlevel) 1462 int maxlevel, struct inode *dat)
1454{ 1463{
1455 int level; 1464 int level;
1456 1465
1457 for (level = NILFS_BTREE_LEVEL_NODE_MIN; level <= maxlevel; level++) { 1466 for (level = NILFS_BTREE_LEVEL_NODE_MIN; level <= maxlevel; level++) {
1458 nilfs_bmap_commit_end_ptr(&btree->bt_bmap, 1467 nilfs_bmap_commit_end_ptr(&btree->bt_bmap,
1459 &path[level].bp_oldreq); 1468 &path[level].bp_oldreq, dat);
1460 path[level].bp_op(btree, path, level, NULL, NULL); 1469 path[level].bp_op(btree, path, level, NULL, NULL);
1461 } 1470 }
1462 1471
@@ -1470,6 +1479,7 @@ static int nilfs_btree_delete(struct nilfs_bmap *bmap, __u64 key)
1470 struct nilfs_btree *btree; 1479 struct nilfs_btree *btree;
1471 struct nilfs_btree_path *path; 1480 struct nilfs_btree_path *path;
1472 struct nilfs_bmap_stats stats; 1481 struct nilfs_bmap_stats stats;
1482 struct inode *dat;
1473 int level, ret; 1483 int level, ret;
1474 1484
1475 btree = (struct nilfs_btree *)bmap; 1485 btree = (struct nilfs_btree *)bmap;
@@ -1482,10 +1492,14 @@ static int nilfs_btree_delete(struct nilfs_bmap *bmap, __u64 key)
1482 if (ret < 0) 1492 if (ret < 0)
1483 goto out; 1493 goto out;
1484 1494
1485 ret = nilfs_btree_prepare_delete(btree, path, &level, &stats); 1495
1496 dat = NILFS_BMAP_USE_VBN(&btree->bt_bmap) ?
1497 nilfs_bmap_get_dat(&btree->bt_bmap) : NULL;
1498
1499 ret = nilfs_btree_prepare_delete(btree, path, &level, &stats, dat);
1486 if (ret < 0) 1500 if (ret < 0)
1487 goto out; 1501 goto out;
1488 nilfs_btree_commit_delete(btree, path, level); 1502 nilfs_btree_commit_delete(btree, path, level, dat);
1489 nilfs_bmap_sub_blocks(bmap, stats.bs_nblocks); 1503 nilfs_bmap_sub_blocks(bmap, stats.bs_nblocks);
1490 1504
1491out: 1505out:
@@ -1610,18 +1624,20 @@ nilfs_btree_prepare_convert_and_insert(struct nilfs_bmap *bmap, __u64 key,
1610 struct nilfs_bmap_stats *stats) 1624 struct nilfs_bmap_stats *stats)
1611{ 1625{
1612 struct buffer_head *bh; 1626 struct buffer_head *bh;
1613 struct nilfs_btree *btree; 1627 struct nilfs_btree *btree = (struct nilfs_btree *)bmap;
1628 struct inode *dat = NULL;
1614 int ret; 1629 int ret;
1615 1630
1616 btree = (struct nilfs_btree *)bmap;
1617 stats->bs_nblocks = 0; 1631 stats->bs_nblocks = 0;
1618 1632
1619 /* for data */ 1633 /* for data */
1620 /* cannot find near ptr */ 1634 /* cannot find near ptr */
1621 if (NILFS_BMAP_USE_VBN(bmap)) 1635 if (NILFS_BMAP_USE_VBN(bmap)) {
1622 dreq->bpr_ptr = nilfs_btree_find_target_v(btree, NULL, key); 1636 dreq->bpr_ptr = nilfs_btree_find_target_v(btree, NULL, key);
1637 dat = nilfs_bmap_get_dat(bmap);
1638 }
1623 1639
1624 ret = nilfs_bmap_prepare_alloc_ptr(bmap, dreq); 1640 ret = nilfs_bmap_prepare_alloc_ptr(bmap, dreq, dat);
1625 if (ret < 0) 1641 if (ret < 0)
1626 return ret; 1642 return ret;
1627 1643
@@ -1629,7 +1645,7 @@ nilfs_btree_prepare_convert_and_insert(struct nilfs_bmap *bmap, __u64 key,
1629 stats->bs_nblocks++; 1645 stats->bs_nblocks++;
1630 if (nreq != NULL) { 1646 if (nreq != NULL) {
1631 nreq->bpr_ptr = dreq->bpr_ptr + 1; 1647 nreq->bpr_ptr = dreq->bpr_ptr + 1;
1632 ret = nilfs_bmap_prepare_alloc_ptr(bmap, nreq); 1648 ret = nilfs_bmap_prepare_alloc_ptr(bmap, nreq, dat);
1633 if (ret < 0) 1649 if (ret < 0)
1634 goto err_out_dreq; 1650 goto err_out_dreq;
1635 1651
@@ -1646,9 +1662,9 @@ nilfs_btree_prepare_convert_and_insert(struct nilfs_bmap *bmap, __u64 key,
1646 1662
1647 /* error */ 1663 /* error */
1648 err_out_nreq: 1664 err_out_nreq:
1649 nilfs_bmap_abort_alloc_ptr(bmap, nreq); 1665 nilfs_bmap_abort_alloc_ptr(bmap, nreq, dat);
1650 err_out_dreq: 1666 err_out_dreq:
1651 nilfs_bmap_abort_alloc_ptr(bmap, dreq); 1667 nilfs_bmap_abort_alloc_ptr(bmap, dreq, dat);
1652 stats->bs_nblocks = 0; 1668 stats->bs_nblocks = 0;
1653 return ret; 1669 return ret;
1654 1670
@@ -1663,8 +1679,9 @@ nilfs_btree_commit_convert_and_insert(struct nilfs_bmap *bmap,
1663 union nilfs_bmap_ptr_req *nreq, 1679 union nilfs_bmap_ptr_req *nreq,
1664 struct buffer_head *bh) 1680 struct buffer_head *bh)
1665{ 1681{
1666 struct nilfs_btree *btree; 1682 struct nilfs_btree *btree = (struct nilfs_btree *)bmap;
1667 struct nilfs_btree_node *node; 1683 struct nilfs_btree_node *node;
1684 struct inode *dat;
1668 __u64 tmpptr; 1685 __u64 tmpptr;
1669 1686
1670 /* free resources */ 1687 /* free resources */
@@ -1675,11 +1692,11 @@ nilfs_btree_commit_convert_and_insert(struct nilfs_bmap *bmap,
1675 set_buffer_nilfs_volatile((struct buffer_head *)((unsigned long)ptr)); 1692 set_buffer_nilfs_volatile((struct buffer_head *)((unsigned long)ptr));
1676 1693
1677 /* convert and insert */ 1694 /* convert and insert */
1678 btree = (struct nilfs_btree *)bmap; 1695 dat = NILFS_BMAP_USE_VBN(bmap) ? nilfs_bmap_get_dat(bmap) : NULL;
1679 nilfs_btree_init(bmap); 1696 nilfs_btree_init(bmap);
1680 if (nreq != NULL) { 1697 if (nreq != NULL) {
1681 nilfs_bmap_commit_alloc_ptr(bmap, dreq); 1698 nilfs_bmap_commit_alloc_ptr(bmap, dreq, dat);
1682 nilfs_bmap_commit_alloc_ptr(bmap, nreq); 1699 nilfs_bmap_commit_alloc_ptr(bmap, nreq, dat);
1683 1700
1684 /* create child node at level 1 */ 1701 /* create child node at level 1 */
1685 lock_buffer(bh); 1702 lock_buffer(bh);
@@ -1701,7 +1718,7 @@ nilfs_btree_commit_convert_and_insert(struct nilfs_bmap *bmap,
1701 nilfs_btree_node_init(btree, node, NILFS_BTREE_NODE_ROOT, 1718 nilfs_btree_node_init(btree, node, NILFS_BTREE_NODE_ROOT,
1702 2, 1, &keys[0], &tmpptr); 1719 2, 1, &keys[0], &tmpptr);
1703 } else { 1720 } else {
1704 nilfs_bmap_commit_alloc_ptr(bmap, dreq); 1721 nilfs_bmap_commit_alloc_ptr(bmap, dreq, dat);
1705 1722
1706 /* create root node at level 1 */ 1723 /* create root node at level 1 */
1707 node = nilfs_btree_get_root(btree); 1724 node = nilfs_btree_get_root(btree);
@@ -1772,7 +1789,7 @@ static int nilfs_btree_propagate_p(struct nilfs_btree *btree,
1772 1789
1773static int nilfs_btree_prepare_update_v(struct nilfs_btree *btree, 1790static int nilfs_btree_prepare_update_v(struct nilfs_btree *btree,
1774 struct nilfs_btree_path *path, 1791 struct nilfs_btree_path *path,
1775 int level) 1792 int level, struct inode *dat)
1776{ 1793{
1777 struct nilfs_btree_node *parent; 1794 struct nilfs_btree_node *parent;
1778 int ret; 1795 int ret;
@@ -1782,9 +1799,8 @@ static int nilfs_btree_prepare_update_v(struct nilfs_btree *btree,
1782 nilfs_btree_node_get_ptr(btree, parent, 1799 nilfs_btree_node_get_ptr(btree, parent,
1783 path[level + 1].bp_index); 1800 path[level + 1].bp_index);
1784 path[level].bp_newreq.bpr_ptr = path[level].bp_oldreq.bpr_ptr + 1; 1801 path[level].bp_newreq.bpr_ptr = path[level].bp_oldreq.bpr_ptr + 1;
1785 ret = nilfs_bmap_prepare_update_v(&btree->bt_bmap, 1802 ret = nilfs_dat_prepare_update(dat, &path[level].bp_oldreq.bpr_req,
1786 &path[level].bp_oldreq, 1803 &path[level].bp_newreq.bpr_req);
1787 &path[level].bp_newreq);
1788 if (ret < 0) 1804 if (ret < 0)
1789 return ret; 1805 return ret;
1790 1806
@@ -1796,9 +1812,9 @@ static int nilfs_btree_prepare_update_v(struct nilfs_btree *btree,
1796 &NILFS_BMAP_I(&btree->bt_bmap)->i_btnode_cache, 1812 &NILFS_BMAP_I(&btree->bt_bmap)->i_btnode_cache,
1797 &path[level].bp_ctxt); 1813 &path[level].bp_ctxt);
1798 if (ret < 0) { 1814 if (ret < 0) {
1799 nilfs_bmap_abort_update_v(&btree->bt_bmap, 1815 nilfs_dat_abort_update(dat,
1800 &path[level].bp_oldreq, 1816 &path[level].bp_oldreq.bpr_req,
1801 &path[level].bp_newreq); 1817 &path[level].bp_newreq.bpr_req);
1802 return ret; 1818 return ret;
1803 } 1819 }
1804 } 1820 }
@@ -1808,13 +1824,13 @@ static int nilfs_btree_prepare_update_v(struct nilfs_btree *btree,
1808 1824
1809static void nilfs_btree_commit_update_v(struct nilfs_btree *btree, 1825static void nilfs_btree_commit_update_v(struct nilfs_btree *btree,
1810 struct nilfs_btree_path *path, 1826 struct nilfs_btree_path *path,
1811 int level) 1827 int level, struct inode *dat)
1812{ 1828{
1813 struct nilfs_btree_node *parent; 1829 struct nilfs_btree_node *parent;
1814 1830
1815 nilfs_bmap_commit_update_v(&btree->bt_bmap, 1831 nilfs_dat_commit_update(dat, &path[level].bp_oldreq.bpr_req,
1816 &path[level].bp_oldreq, 1832 &path[level].bp_newreq.bpr_req,
1817 &path[level].bp_newreq); 1833 btree->bt_bmap.b_ptr_type == NILFS_BMAP_PTR_VS);
1818 1834
1819 if (buffer_nilfs_node(path[level].bp_bh)) { 1835 if (buffer_nilfs_node(path[level].bp_bh)) {
1820 nilfs_btnode_commit_change_key( 1836 nilfs_btnode_commit_change_key(
@@ -1831,11 +1847,10 @@ static void nilfs_btree_commit_update_v(struct nilfs_btree *btree,
1831 1847
1832static void nilfs_btree_abort_update_v(struct nilfs_btree *btree, 1848static void nilfs_btree_abort_update_v(struct nilfs_btree *btree,
1833 struct nilfs_btree_path *path, 1849 struct nilfs_btree_path *path,
1834 int level) 1850 int level, struct inode *dat)
1835{ 1851{
1836 nilfs_bmap_abort_update_v(&btree->bt_bmap, 1852 nilfs_dat_abort_update(dat, &path[level].bp_oldreq.bpr_req,
1837 &path[level].bp_oldreq, 1853 &path[level].bp_newreq.bpr_req);
1838 &path[level].bp_newreq);
1839 if (buffer_nilfs_node(path[level].bp_bh)) 1854 if (buffer_nilfs_node(path[level].bp_bh))
1840 nilfs_btnode_abort_change_key( 1855 nilfs_btnode_abort_change_key(
1841 &NILFS_BMAP_I(&btree->bt_bmap)->i_btnode_cache, 1856 &NILFS_BMAP_I(&btree->bt_bmap)->i_btnode_cache,
@@ -1844,14 +1859,14 @@ static void nilfs_btree_abort_update_v(struct nilfs_btree *btree,
1844 1859
1845static int nilfs_btree_prepare_propagate_v(struct nilfs_btree *btree, 1860static int nilfs_btree_prepare_propagate_v(struct nilfs_btree *btree,
1846 struct nilfs_btree_path *path, 1861 struct nilfs_btree_path *path,
1847 int minlevel, 1862 int minlevel, int *maxlevelp,
1848 int *maxlevelp) 1863 struct inode *dat)
1849{ 1864{
1850 int level, ret; 1865 int level, ret;
1851 1866
1852 level = minlevel; 1867 level = minlevel;
1853 if (!buffer_nilfs_volatile(path[level].bp_bh)) { 1868 if (!buffer_nilfs_volatile(path[level].bp_bh)) {
1854 ret = nilfs_btree_prepare_update_v(btree, path, level); 1869 ret = nilfs_btree_prepare_update_v(btree, path, level, dat);
1855 if (ret < 0) 1870 if (ret < 0)
1856 return ret; 1871 return ret;
1857 } 1872 }
@@ -1859,7 +1874,7 @@ static int nilfs_btree_prepare_propagate_v(struct nilfs_btree *btree,
1859 !buffer_dirty(path[level].bp_bh)) { 1874 !buffer_dirty(path[level].bp_bh)) {
1860 1875
1861 WARN_ON(buffer_nilfs_volatile(path[level].bp_bh)); 1876 WARN_ON(buffer_nilfs_volatile(path[level].bp_bh));
1862 ret = nilfs_btree_prepare_update_v(btree, path, level); 1877 ret = nilfs_btree_prepare_update_v(btree, path, level, dat);
1863 if (ret < 0) 1878 if (ret < 0)
1864 goto out; 1879 goto out;
1865 } 1880 }
@@ -1871,39 +1886,40 @@ static int nilfs_btree_prepare_propagate_v(struct nilfs_btree *btree,
1871 /* error */ 1886 /* error */
1872 out: 1887 out:
1873 while (--level > minlevel) 1888 while (--level > minlevel)
1874 nilfs_btree_abort_update_v(btree, path, level); 1889 nilfs_btree_abort_update_v(btree, path, level, dat);
1875 if (!buffer_nilfs_volatile(path[level].bp_bh)) 1890 if (!buffer_nilfs_volatile(path[level].bp_bh))
1876 nilfs_btree_abort_update_v(btree, path, level); 1891 nilfs_btree_abort_update_v(btree, path, level, dat);
1877 return ret; 1892 return ret;
1878} 1893}
1879 1894
1880static void nilfs_btree_commit_propagate_v(struct nilfs_btree *btree, 1895static void nilfs_btree_commit_propagate_v(struct nilfs_btree *btree,
1881 struct nilfs_btree_path *path, 1896 struct nilfs_btree_path *path,
1882 int minlevel, 1897 int minlevel, int maxlevel,
1883 int maxlevel, 1898 struct buffer_head *bh,
1884 struct buffer_head *bh) 1899 struct inode *dat)
1885{ 1900{
1886 int level; 1901 int level;
1887 1902
1888 if (!buffer_nilfs_volatile(path[minlevel].bp_bh)) 1903 if (!buffer_nilfs_volatile(path[minlevel].bp_bh))
1889 nilfs_btree_commit_update_v(btree, path, minlevel); 1904 nilfs_btree_commit_update_v(btree, path, minlevel, dat);
1890 1905
1891 for (level = minlevel + 1; level <= maxlevel; level++) 1906 for (level = minlevel + 1; level <= maxlevel; level++)
1892 nilfs_btree_commit_update_v(btree, path, level); 1907 nilfs_btree_commit_update_v(btree, path, level, dat);
1893} 1908}
1894 1909
1895static int nilfs_btree_propagate_v(struct nilfs_btree *btree, 1910static int nilfs_btree_propagate_v(struct nilfs_btree *btree,
1896 struct nilfs_btree_path *path, 1911 struct nilfs_btree_path *path,
1897 int level, 1912 int level, struct buffer_head *bh)
1898 struct buffer_head *bh)
1899{ 1913{
1900 int maxlevel, ret; 1914 int maxlevel, ret;
1901 struct nilfs_btree_node *parent; 1915 struct nilfs_btree_node *parent;
1916 struct inode *dat = nilfs_bmap_get_dat(&btree->bt_bmap);
1902 __u64 ptr; 1917 __u64 ptr;
1903 1918
1904 get_bh(bh); 1919 get_bh(bh);
1905 path[level].bp_bh = bh; 1920 path[level].bp_bh = bh;
1906 ret = nilfs_btree_prepare_propagate_v(btree, path, level, &maxlevel); 1921 ret = nilfs_btree_prepare_propagate_v(btree, path, level, &maxlevel,
1922 dat);
1907 if (ret < 0) 1923 if (ret < 0)
1908 goto out; 1924 goto out;
1909 1925
@@ -1911,12 +1927,12 @@ static int nilfs_btree_propagate_v(struct nilfs_btree *btree,
1911 parent = nilfs_btree_get_node(btree, path, level + 1); 1927 parent = nilfs_btree_get_node(btree, path, level + 1);
1912 ptr = nilfs_btree_node_get_ptr(btree, parent, 1928 ptr = nilfs_btree_node_get_ptr(btree, parent,
1913 path[level + 1].bp_index); 1929 path[level + 1].bp_index);
1914 ret = nilfs_bmap_mark_dirty(&btree->bt_bmap, ptr); 1930 ret = nilfs_dat_mark_dirty(dat, ptr);
1915 if (ret < 0) 1931 if (ret < 0)
1916 goto out; 1932 goto out;
1917 } 1933 }
1918 1934
1919 nilfs_btree_commit_propagate_v(btree, path, level, maxlevel, bh); 1935 nilfs_btree_commit_propagate_v(btree, path, level, maxlevel, bh, dat);
1920 1936
1921 out: 1937 out:
1922 brelse(path[level].bp_bh); 1938 brelse(path[level].bp_bh);
@@ -1972,7 +1988,7 @@ static int nilfs_btree_propagate(const struct nilfs_bmap *bmap,
1972static int nilfs_btree_propagate_gc(const struct nilfs_bmap *bmap, 1988static int nilfs_btree_propagate_gc(const struct nilfs_bmap *bmap,
1973 struct buffer_head *bh) 1989 struct buffer_head *bh)
1974{ 1990{
1975 return nilfs_bmap_mark_dirty(bmap, bh->b_blocknr); 1991 return nilfs_dat_mark_dirty(nilfs_bmap_get_dat(bmap), bh->b_blocknr);
1976} 1992}
1977 1993
1978static void nilfs_btree_add_dirty_buffer(struct nilfs_btree *btree, 1994static void nilfs_btree_add_dirty_buffer(struct nilfs_btree *btree,
@@ -2086,6 +2102,7 @@ static int nilfs_btree_assign_v(struct nilfs_btree *btree,
2086 union nilfs_binfo *binfo) 2102 union nilfs_binfo *binfo)
2087{ 2103{
2088 struct nilfs_btree_node *parent; 2104 struct nilfs_btree_node *parent;
2105 struct inode *dat = nilfs_bmap_get_dat(&btree->bt_bmap);
2089 __u64 key; 2106 __u64 key;
2090 __u64 ptr; 2107 __u64 ptr;
2091 union nilfs_bmap_ptr_req req; 2108 union nilfs_bmap_ptr_req req;
@@ -2095,9 +2112,10 @@ static int nilfs_btree_assign_v(struct nilfs_btree *btree,
2095 ptr = nilfs_btree_node_get_ptr(btree, parent, 2112 ptr = nilfs_btree_node_get_ptr(btree, parent,
2096 path[level + 1].bp_index); 2113 path[level + 1].bp_index);
2097 req.bpr_ptr = ptr; 2114 req.bpr_ptr = ptr;
2098 ret = nilfs_bmap_start_v(&btree->bt_bmap, &req, blocknr); 2115 ret = nilfs_dat_prepare_start(dat, &req.bpr_req);
2099 if (unlikely(ret < 0)) 2116 if (ret < 0)
2100 return ret; 2117 return ret;
2118 nilfs_dat_commit_start(dat, &req.bpr_req, blocknr);
2101 2119
2102 key = nilfs_btree_node_get_key(parent, path[level + 1].bp_index); 2120 key = nilfs_btree_node_get_key(parent, path[level + 1].bp_index);
2103 /* on-disk format */ 2121 /* on-disk format */
@@ -2155,13 +2173,12 @@ static int nilfs_btree_assign_gc(struct nilfs_bmap *bmap,
2155 sector_t blocknr, 2173 sector_t blocknr,
2156 union nilfs_binfo *binfo) 2174 union nilfs_binfo *binfo)
2157{ 2175{
2158 struct nilfs_btree *btree;
2159 struct nilfs_btree_node *node; 2176 struct nilfs_btree_node *node;
2160 __u64 key; 2177 __u64 key;
2161 int ret; 2178 int ret;
2162 2179
2163 btree = (struct nilfs_btree *)bmap; 2180 ret = nilfs_dat_move(nilfs_bmap_get_dat(bmap), (*bh)->b_blocknr,
2164 ret = nilfs_bmap_move_v(bmap, (*bh)->b_blocknr, blocknr); 2181 blocknr);
2165 if (ret < 0) 2182 if (ret < 0)
2166 return ret; 2183 return ret;
2167 2184
diff --git a/fs/nilfs2/direct.c b/fs/nilfs2/direct.c
index 342d9765df8d..d369ac718277 100644
--- a/fs/nilfs2/direct.c
+++ b/fs/nilfs2/direct.c
@@ -125,106 +125,64 @@ static void nilfs_direct_set_target_v(struct nilfs_direct *direct,
125 direct->d_bmap.b_last_allocated_ptr = ptr; 125 direct->d_bmap.b_last_allocated_ptr = ptr;
126} 126}
127 127
128static int nilfs_direct_prepare_insert(struct nilfs_direct *direct,
129 __u64 key,
130 union nilfs_bmap_ptr_req *req,
131 struct nilfs_bmap_stats *stats)
132{
133 int ret;
134
135 if (NILFS_BMAP_USE_VBN(&direct->d_bmap))
136 req->bpr_ptr = nilfs_direct_find_target_v(direct, key);
137 ret = nilfs_bmap_prepare_alloc_ptr(&direct->d_bmap, req);
138 if (ret < 0)
139 return ret;
140
141 stats->bs_nblocks = 1;
142 return 0;
143}
144
145static void nilfs_direct_commit_insert(struct nilfs_direct *direct,
146 union nilfs_bmap_ptr_req *req,
147 __u64 key, __u64 ptr)
148{
149 struct buffer_head *bh;
150
151 /* ptr must be a pointer to a buffer head. */
152 bh = (struct buffer_head *)((unsigned long)ptr);
153 set_buffer_nilfs_volatile(bh);
154
155 nilfs_bmap_commit_alloc_ptr(&direct->d_bmap, req);
156 nilfs_direct_set_ptr(direct, key, req->bpr_ptr);
157
158 if (!nilfs_bmap_dirty(&direct->d_bmap))
159 nilfs_bmap_set_dirty(&direct->d_bmap);
160
161 if (NILFS_BMAP_USE_VBN(&direct->d_bmap))
162 nilfs_direct_set_target_v(direct, key, req->bpr_ptr);
163}
164
165static int nilfs_direct_insert(struct nilfs_bmap *bmap, __u64 key, __u64 ptr) 128static int nilfs_direct_insert(struct nilfs_bmap *bmap, __u64 key, __u64 ptr)
166{ 129{
167 struct nilfs_direct *direct; 130 struct nilfs_direct *direct = (struct nilfs_direct *)bmap;
168 union nilfs_bmap_ptr_req req; 131 union nilfs_bmap_ptr_req req;
169 struct nilfs_bmap_stats stats; 132 struct inode *dat = NULL;
133 struct buffer_head *bh;
170 int ret; 134 int ret;
171 135
172 direct = (struct nilfs_direct *)bmap;
173 if (key > NILFS_DIRECT_KEY_MAX) 136 if (key > NILFS_DIRECT_KEY_MAX)
174 return -ENOENT; 137 return -ENOENT;
175 if (nilfs_direct_get_ptr(direct, key) != NILFS_BMAP_INVALID_PTR) 138 if (nilfs_direct_get_ptr(direct, key) != NILFS_BMAP_INVALID_PTR)
176 return -EEXIST; 139 return -EEXIST;
177 140
178 ret = nilfs_direct_prepare_insert(direct, key, &req, &stats); 141 if (NILFS_BMAP_USE_VBN(bmap)) {
179 if (ret < 0) 142 req.bpr_ptr = nilfs_direct_find_target_v(direct, key);
180 return ret; 143 dat = nilfs_bmap_get_dat(bmap);
181 nilfs_direct_commit_insert(direct, &req, key, ptr); 144 }
182 nilfs_bmap_add_blocks(bmap, stats.bs_nblocks); 145 ret = nilfs_bmap_prepare_alloc_ptr(bmap, &req, dat);
146 if (!ret) {
147 /* ptr must be a pointer to a buffer head. */
148 bh = (struct buffer_head *)((unsigned long)ptr);
149 set_buffer_nilfs_volatile(bh);
183 150
184 return 0; 151 nilfs_bmap_commit_alloc_ptr(bmap, &req, dat);
185} 152 nilfs_direct_set_ptr(direct, key, req.bpr_ptr);
186 153
187static int nilfs_direct_prepare_delete(struct nilfs_direct *direct, 154 if (!nilfs_bmap_dirty(bmap))
188 union nilfs_bmap_ptr_req *req, 155 nilfs_bmap_set_dirty(bmap);
189 __u64 key,
190 struct nilfs_bmap_stats *stats)
191{
192 int ret;
193 156
194 req->bpr_ptr = nilfs_direct_get_ptr(direct, key); 157 if (NILFS_BMAP_USE_VBN(bmap))
195 ret = nilfs_bmap_prepare_end_ptr(&direct->d_bmap, req); 158 nilfs_direct_set_target_v(direct, key, req.bpr_ptr);
196 if (!ret)
197 stats->bs_nblocks = 1;
198 return ret;
199}
200 159
201static void nilfs_direct_commit_delete(struct nilfs_direct *direct, 160 nilfs_bmap_add_blocks(bmap, 1);
202 union nilfs_bmap_ptr_req *req, 161 }
203 __u64 key) 162 return ret;
204{
205 nilfs_bmap_commit_end_ptr(&direct->d_bmap, req);
206 nilfs_direct_set_ptr(direct, key, NILFS_BMAP_INVALID_PTR);
207} 163}
208 164
209static int nilfs_direct_delete(struct nilfs_bmap *bmap, __u64 key) 165static int nilfs_direct_delete(struct nilfs_bmap *bmap, __u64 key)
210{ 166{
211 struct nilfs_direct *direct; 167 struct nilfs_direct *direct = (struct nilfs_direct *)bmap;
212 union nilfs_bmap_ptr_req req; 168 union nilfs_bmap_ptr_req req;
213 struct nilfs_bmap_stats stats; 169 struct inode *dat;
214 int ret; 170 int ret;
215 171
216 direct = (struct nilfs_direct *)bmap; 172 if (key > NILFS_DIRECT_KEY_MAX ||
217 if ((key > NILFS_DIRECT_KEY_MAX) ||
218 nilfs_direct_get_ptr(direct, key) == NILFS_BMAP_INVALID_PTR) 173 nilfs_direct_get_ptr(direct, key) == NILFS_BMAP_INVALID_PTR)
219 return -ENOENT; 174 return -ENOENT;
220 175
221 ret = nilfs_direct_prepare_delete(direct, &req, key, &stats); 176 dat = NILFS_BMAP_USE_VBN(bmap) ? nilfs_bmap_get_dat(bmap) : NULL;
222 if (ret < 0) 177 req.bpr_ptr = nilfs_direct_get_ptr(direct, key);
223 return ret;
224 nilfs_direct_commit_delete(direct, &req, key);
225 nilfs_bmap_sub_blocks(bmap, stats.bs_nblocks);
226 178
227 return 0; 179 ret = nilfs_bmap_prepare_end_ptr(bmap, &req, dat);
180 if (!ret) {
181 nilfs_bmap_commit_end_ptr(bmap, &req, dat);
182 nilfs_direct_set_ptr(direct, key, NILFS_BMAP_INVALID_PTR);
183 nilfs_bmap_sub_blocks(bmap, 1);
184 }
185 return ret;
228} 186}
229 187
230static int nilfs_direct_last_key(const struct nilfs_bmap *bmap, __u64 *keyp) 188static int nilfs_direct_last_key(const struct nilfs_bmap *bmap, __u64 *keyp)
@@ -310,59 +268,56 @@ int nilfs_direct_delete_and_convert(struct nilfs_bmap *bmap,
310 return 0; 268 return 0;
311} 269}
312 270
313static int nilfs_direct_propagate_v(struct nilfs_direct *direct, 271static int nilfs_direct_propagate(const struct nilfs_bmap *bmap,
314 struct buffer_head *bh) 272 struct buffer_head *bh)
315{ 273{
316 union nilfs_bmap_ptr_req oldreq, newreq; 274 struct nilfs_direct *direct = (struct nilfs_direct *)bmap;
275 struct nilfs_palloc_req oldreq, newreq;
276 struct inode *dat;
317 __u64 key; 277 __u64 key;
318 __u64 ptr; 278 __u64 ptr;
319 int ret; 279 int ret;
320 280
321 key = nilfs_bmap_data_get_key(&direct->d_bmap, bh); 281 if (!NILFS_BMAP_USE_VBN(bmap))
282 return 0;
283
284 dat = nilfs_bmap_get_dat(bmap);
285 key = nilfs_bmap_data_get_key(bmap, bh);
322 ptr = nilfs_direct_get_ptr(direct, key); 286 ptr = nilfs_direct_get_ptr(direct, key);
323 if (!buffer_nilfs_volatile(bh)) { 287 if (!buffer_nilfs_volatile(bh)) {
324 oldreq.bpr_ptr = ptr; 288 oldreq.pr_entry_nr = ptr;
325 newreq.bpr_ptr = ptr; 289 newreq.pr_entry_nr = ptr;
326 ret = nilfs_bmap_prepare_update_v(&direct->d_bmap, &oldreq, 290 ret = nilfs_dat_prepare_update(dat, &oldreq, &newreq);
327 &newreq);
328 if (ret < 0) 291 if (ret < 0)
329 return ret; 292 return ret;
330 nilfs_bmap_commit_update_v(&direct->d_bmap, &oldreq, &newreq); 293 nilfs_dat_commit_update(dat, &oldreq, &newreq,
294 bmap->b_ptr_type == NILFS_BMAP_PTR_VS);
331 set_buffer_nilfs_volatile(bh); 295 set_buffer_nilfs_volatile(bh);
332 nilfs_direct_set_ptr(direct, key, newreq.bpr_ptr); 296 nilfs_direct_set_ptr(direct, key, newreq.pr_entry_nr);
333 } else 297 } else
334 ret = nilfs_bmap_mark_dirty(&direct->d_bmap, ptr); 298 ret = nilfs_dat_mark_dirty(dat, ptr);
335 299
336 return ret; 300 return ret;
337} 301}
338 302
339static int nilfs_direct_propagate(const struct nilfs_bmap *bmap,
340 struct buffer_head *bh)
341{
342 struct nilfs_direct *direct = (struct nilfs_direct *)bmap;
343
344 return NILFS_BMAP_USE_VBN(bmap) ?
345 nilfs_direct_propagate_v(direct, bh) : 0;
346}
347
348static int nilfs_direct_assign_v(struct nilfs_direct *direct, 303static int nilfs_direct_assign_v(struct nilfs_direct *direct,
349 __u64 key, __u64 ptr, 304 __u64 key, __u64 ptr,
350 struct buffer_head **bh, 305 struct buffer_head **bh,
351 sector_t blocknr, 306 sector_t blocknr,
352 union nilfs_binfo *binfo) 307 union nilfs_binfo *binfo)
353{ 308{
309 struct inode *dat = nilfs_bmap_get_dat(&direct->d_bmap);
354 union nilfs_bmap_ptr_req req; 310 union nilfs_bmap_ptr_req req;
355 int ret; 311 int ret;
356 312
357 req.bpr_ptr = ptr; 313 req.bpr_ptr = ptr;
358 ret = nilfs_bmap_start_v(&direct->d_bmap, &req, blocknr); 314 ret = nilfs_dat_prepare_start(dat, &req.bpr_req);
359 if (unlikely(ret < 0)) 315 if (!ret) {
360 return ret; 316 nilfs_dat_commit_start(dat, &req.bpr_req, blocknr);
361 317 binfo->bi_v.bi_vblocknr = nilfs_bmap_ptr_to_dptr(ptr);
362 binfo->bi_v.bi_vblocknr = nilfs_bmap_ptr_to_dptr(ptr); 318 binfo->bi_v.bi_blkoff = nilfs_bmap_key_to_dkey(key);
363 binfo->bi_v.bi_blkoff = nilfs_bmap_key_to_dkey(key); 319 }
364 320 return ret;
365 return 0;
366} 321}
367 322
368static int nilfs_direct_assign_p(struct nilfs_direct *direct, 323static int nilfs_direct_assign_p(struct nilfs_direct *direct,