aboutsummaryrefslogtreecommitdiffstats
path: root/fs
diff options
context:
space:
mode:
authorRyusuke Konishi <konishi.ryusuke@lab.ntt.co.jp>2009-05-23 14:25:44 -0400
committerRyusuke Konishi <konishi.ryusuke@lab.ntt.co.jp>2009-06-10 10:41:10 -0400
commitd4b961576df2769b936bd967b01e8c607c3c9ad8 (patch)
tree3d5d31cb7b4ba31a5b8232d0cd0edac2cf368fae /fs
parent3033342a0b76048e32ce1faebfa85cf8f1aa93b5 (diff)
nilfs2: remove bmap pointer operations
Previously, the bmap codes of nilfs used three types of function tables. The abuse of indirect function calls decreased source readability and suffered many indirect jumps which would confuse branch prediction of processors. This eliminates one type of the function tables, nilfs_bmap_ptr_operations, which was used to dispatch low level pointer operations of the nilfs bmap. This adds a new integer variable "b_ptr_type" to nilfs_bmap struct, and uses the value to select the pointer operations. Signed-off-by: Ryusuke Konishi <konishi.ryusuke@lab.ntt.co.jp>
Diffstat (limited to 'fs')
-rw-r--r--fs/nilfs2/bmap.c181
-rw-r--r--fs/nilfs2/bmap.h110
-rw-r--r--fs/nilfs2/btree.c100
-rw-r--r--fs/nilfs2/direct.c30
4 files changed, 187 insertions, 234 deletions
diff --git a/fs/nilfs2/bmap.c b/fs/nilfs2/bmap.c
index de67d2a12515..51824c764294 100644
--- a/fs/nilfs2/bmap.c
+++ b/fs/nilfs2/bmap.c
@@ -31,21 +31,26 @@
31#include "dat.h" 31#include "dat.h"
32#include "alloc.h" 32#include "alloc.h"
33 33
34static struct inode *nilfs_bmap_get_dat(const struct nilfs_bmap *bmap)
35{
36 return nilfs_dat_inode(NILFS_I_NILFS(bmap->b_inode));
37}
38
34int nilfs_bmap_lookup_at_level(struct nilfs_bmap *bmap, __u64 key, int level, 39int nilfs_bmap_lookup_at_level(struct nilfs_bmap *bmap, __u64 key, int level,
35 __u64 *ptrp) 40 __u64 *ptrp)
36{ 41{
37 __u64 ptr; 42 sector_t blocknr;
38 int ret; 43 int ret;
39 44
40 down_read(&bmap->b_sem); 45 down_read(&bmap->b_sem);
41 ret = bmap->b_ops->bop_lookup(bmap, key, level, ptrp); 46 ret = bmap->b_ops->bop_lookup(bmap, key, level, ptrp);
42 if (ret < 0) 47 if (ret < 0)
43 goto out; 48 goto out;
44 if (bmap->b_pops->bpop_translate != NULL) { 49 if (NILFS_BMAP_USE_VBN(bmap)) {
45 ret = bmap->b_pops->bpop_translate(bmap, *ptrp, &ptr); 50 ret = nilfs_dat_translate(nilfs_bmap_get_dat(bmap), *ptrp,
46 if (ret < 0) 51 &blocknr);
47 goto out; 52 if (!ret)
48 *ptrp = ptr; 53 *ptrp = blocknr;
49 } 54 }
50 55
51 out: 56 out:
@@ -442,11 +447,6 @@ __u64 nilfs_bmap_find_target_seq(const struct nilfs_bmap *bmap, __u64 key)
442 return NILFS_BMAP_INVALID_PTR; 447 return NILFS_BMAP_INVALID_PTR;
443} 448}
444 449
445static struct inode *nilfs_bmap_get_dat(const struct nilfs_bmap *bmap)
446{
447 return nilfs_dat_inode(NILFS_I_NILFS(bmap->b_inode));
448}
449
450#define NILFS_BMAP_GROUP_DIV 8 450#define NILFS_BMAP_GROUP_DIV 8
451__u64 nilfs_bmap_find_target_in_group(const struct nilfs_bmap *bmap) 451__u64 nilfs_bmap_find_target_in_group(const struct nilfs_bmap *bmap)
452{ 452{
@@ -459,20 +459,20 @@ __u64 nilfs_bmap_find_target_in_group(const struct nilfs_bmap *bmap)
459 (entries_per_group / NILFS_BMAP_GROUP_DIV); 459 (entries_per_group / NILFS_BMAP_GROUP_DIV);
460} 460}
461 461
462static int nilfs_bmap_prepare_alloc_v(struct nilfs_bmap *bmap, 462int nilfs_bmap_prepare_alloc_v(struct nilfs_bmap *bmap,
463 union nilfs_bmap_ptr_req *req) 463 union nilfs_bmap_ptr_req *req)
464{ 464{
465 return nilfs_dat_prepare_alloc(nilfs_bmap_get_dat(bmap), &req->bpr_req); 465 return nilfs_dat_prepare_alloc(nilfs_bmap_get_dat(bmap), &req->bpr_req);
466} 466}
467 467
468static void nilfs_bmap_commit_alloc_v(struct nilfs_bmap *bmap, 468void nilfs_bmap_commit_alloc_v(struct nilfs_bmap *bmap,
469 union nilfs_bmap_ptr_req *req) 469 union nilfs_bmap_ptr_req *req)
470{ 470{
471 nilfs_dat_commit_alloc(nilfs_bmap_get_dat(bmap), &req->bpr_req); 471 nilfs_dat_commit_alloc(nilfs_bmap_get_dat(bmap), &req->bpr_req);
472} 472}
473 473
474static void nilfs_bmap_abort_alloc_v(struct nilfs_bmap *bmap, 474void nilfs_bmap_abort_alloc_v(struct nilfs_bmap *bmap,
475 union nilfs_bmap_ptr_req *req) 475 union nilfs_bmap_ptr_req *req)
476{ 476{
477 nilfs_dat_abort_alloc(nilfs_bmap_get_dat(bmap), &req->bpr_req); 477 nilfs_dat_abort_alloc(nilfs_bmap_get_dat(bmap), &req->bpr_req);
478} 478}
@@ -489,26 +489,21 @@ int nilfs_bmap_start_v(struct nilfs_bmap *bmap, union nilfs_bmap_ptr_req *req,
489 return ret; 489 return ret;
490} 490}
491 491
492static int nilfs_bmap_prepare_end_v(struct nilfs_bmap *bmap, 492int nilfs_bmap_prepare_end_v(struct nilfs_bmap *bmap,
493 union nilfs_bmap_ptr_req *req) 493 union nilfs_bmap_ptr_req *req)
494{ 494{
495 return nilfs_dat_prepare_end(nilfs_bmap_get_dat(bmap), &req->bpr_req); 495 return nilfs_dat_prepare_end(nilfs_bmap_get_dat(bmap), &req->bpr_req);
496} 496}
497 497
498static void nilfs_bmap_commit_end_v(struct nilfs_bmap *bmap, 498void nilfs_bmap_commit_end_v(struct nilfs_bmap *bmap,
499 union nilfs_bmap_ptr_req *req) 499 union nilfs_bmap_ptr_req *req)
500{
501 nilfs_dat_commit_end(nilfs_bmap_get_dat(bmap), &req->bpr_req, 0);
502}
503
504static void nilfs_bmap_commit_end_vmdt(struct nilfs_bmap *bmap,
505 union nilfs_bmap_ptr_req *req)
506{ 500{
507 nilfs_dat_commit_end(nilfs_bmap_get_dat(bmap), &req->bpr_req, 1); 501 nilfs_dat_commit_end(nilfs_bmap_get_dat(bmap), &req->bpr_req,
502 bmap->b_ptr_type == NILFS_BMAP_PTR_VS);
508} 503}
509 504
510static void nilfs_bmap_abort_end_v(struct nilfs_bmap *bmap, 505void nilfs_bmap_abort_end_v(struct nilfs_bmap *bmap,
511 union nilfs_bmap_ptr_req *req) 506 union nilfs_bmap_ptr_req *req)
512{ 507{
513 nilfs_dat_abort_end(nilfs_bmap_get_dat(bmap), &req->bpr_req); 508 nilfs_dat_abort_end(nilfs_bmap_get_dat(bmap), &req->bpr_req);
514} 509}
@@ -524,116 +519,44 @@ int nilfs_bmap_mark_dirty(const struct nilfs_bmap *bmap, __u64 vblocknr)
524 return nilfs_dat_mark_dirty(nilfs_bmap_get_dat(bmap), vblocknr); 519 return nilfs_dat_mark_dirty(nilfs_bmap_get_dat(bmap), vblocknr);
525} 520}
526 521
527int nilfs_bmap_prepare_update(struct nilfs_bmap *bmap, 522int nilfs_bmap_prepare_update_v(struct nilfs_bmap *bmap,
528 union nilfs_bmap_ptr_req *oldreq, 523 union nilfs_bmap_ptr_req *oldreq,
529 union nilfs_bmap_ptr_req *newreq) 524 union nilfs_bmap_ptr_req *newreq)
530{ 525{
526 struct inode *dat = nilfs_bmap_get_dat(bmap);
531 int ret; 527 int ret;
532 528
533 ret = bmap->b_pops->bpop_prepare_end_ptr(bmap, oldreq); 529 ret = nilfs_dat_prepare_end(dat, &oldreq->bpr_req);
534 if (ret < 0) 530 if (ret < 0)
535 return ret; 531 return ret;
536 ret = bmap->b_pops->bpop_prepare_alloc_ptr(bmap, newreq); 532 ret = nilfs_dat_prepare_alloc(dat, &newreq->bpr_req);
537 if (ret < 0) 533 if (ret < 0)
538 bmap->b_pops->bpop_abort_end_ptr(bmap, oldreq); 534 nilfs_dat_abort_end(dat, &oldreq->bpr_req);
539 535
540 return ret; 536 return ret;
541} 537}
542 538
543void nilfs_bmap_commit_update(struct nilfs_bmap *bmap, 539void nilfs_bmap_commit_update_v(struct nilfs_bmap *bmap,
544 union nilfs_bmap_ptr_req *oldreq, 540 union nilfs_bmap_ptr_req *oldreq,
545 union nilfs_bmap_ptr_req *newreq) 541 union nilfs_bmap_ptr_req *newreq)
546{
547 bmap->b_pops->bpop_commit_end_ptr(bmap, oldreq);
548 bmap->b_pops->bpop_commit_alloc_ptr(bmap, newreq);
549}
550
551void nilfs_bmap_abort_update(struct nilfs_bmap *bmap,
552 union nilfs_bmap_ptr_req *oldreq,
553 union nilfs_bmap_ptr_req *newreq)
554{
555 bmap->b_pops->bpop_abort_end_ptr(bmap, oldreq);
556 bmap->b_pops->bpop_abort_alloc_ptr(bmap, newreq);
557}
558
559static int nilfs_bmap_translate_v(const struct nilfs_bmap *bmap, __u64 ptr,
560 __u64 *ptrp)
561{ 542{
562 sector_t blocknr; 543 struct inode *dat = nilfs_bmap_get_dat(bmap);
563 int ret;
564
565 ret = nilfs_dat_translate(nilfs_bmap_get_dat(bmap), ptr, &blocknr);
566 if (ret < 0)
567 return ret;
568 if (ptrp != NULL)
569 *ptrp = blocknr;
570 return 0;
571}
572 544
573static int nilfs_bmap_prepare_alloc_p(struct nilfs_bmap *bmap, 545 nilfs_dat_commit_end(dat, &oldreq->bpr_req,
574 union nilfs_bmap_ptr_req *req) 546 bmap->b_ptr_type == NILFS_BMAP_PTR_VS);
575{ 547 nilfs_dat_commit_alloc(dat, &newreq->bpr_req);
576 /* ignore target ptr */
577 req->bpr_ptr = bmap->b_last_allocated_ptr++;
578 return 0;
579} 548}
580 549
581static void nilfs_bmap_commit_alloc_p(struct nilfs_bmap *bmap, 550void nilfs_bmap_abort_update_v(struct nilfs_bmap *bmap,
582 union nilfs_bmap_ptr_req *req) 551 union nilfs_bmap_ptr_req *oldreq,
552 union nilfs_bmap_ptr_req *newreq)
583{ 553{
584 /* do nothing */ 554 struct inode *dat = nilfs_bmap_get_dat(bmap);
585}
586 555
587static void nilfs_bmap_abort_alloc_p(struct nilfs_bmap *bmap, 556 nilfs_dat_abort_end(dat, &oldreq->bpr_req);
588 union nilfs_bmap_ptr_req *req) 557 nilfs_dat_abort_alloc(dat, &newreq->bpr_req);
589{
590 bmap->b_last_allocated_ptr--;
591} 558}
592 559
593static const struct nilfs_bmap_ptr_operations nilfs_bmap_ptr_ops_v = {
594 .bpop_prepare_alloc_ptr = nilfs_bmap_prepare_alloc_v,
595 .bpop_commit_alloc_ptr = nilfs_bmap_commit_alloc_v,
596 .bpop_abort_alloc_ptr = nilfs_bmap_abort_alloc_v,
597 .bpop_prepare_end_ptr = nilfs_bmap_prepare_end_v,
598 .bpop_commit_end_ptr = nilfs_bmap_commit_end_v,
599 .bpop_abort_end_ptr = nilfs_bmap_abort_end_v,
600
601 .bpop_translate = nilfs_bmap_translate_v,
602};
603
604static const struct nilfs_bmap_ptr_operations nilfs_bmap_ptr_ops_vmdt = {
605 .bpop_prepare_alloc_ptr = nilfs_bmap_prepare_alloc_v,
606 .bpop_commit_alloc_ptr = nilfs_bmap_commit_alloc_v,
607 .bpop_abort_alloc_ptr = nilfs_bmap_abort_alloc_v,
608 .bpop_prepare_end_ptr = nilfs_bmap_prepare_end_v,
609 .bpop_commit_end_ptr = nilfs_bmap_commit_end_vmdt,
610 .bpop_abort_end_ptr = nilfs_bmap_abort_end_v,
611
612 .bpop_translate = nilfs_bmap_translate_v,
613};
614
615static const struct nilfs_bmap_ptr_operations nilfs_bmap_ptr_ops_p = {
616 .bpop_prepare_alloc_ptr = nilfs_bmap_prepare_alloc_p,
617 .bpop_commit_alloc_ptr = nilfs_bmap_commit_alloc_p,
618 .bpop_abort_alloc_ptr = nilfs_bmap_abort_alloc_p,
619 .bpop_prepare_end_ptr = NULL,
620 .bpop_commit_end_ptr = NULL,
621 .bpop_abort_end_ptr = NULL,
622
623 .bpop_translate = NULL,
624};
625
626static const struct nilfs_bmap_ptr_operations nilfs_bmap_ptr_ops_gc = {
627 .bpop_prepare_alloc_ptr = NULL,
628 .bpop_commit_alloc_ptr = NULL,
629 .bpop_abort_alloc_ptr = NULL,
630 .bpop_prepare_end_ptr = NULL,
631 .bpop_commit_end_ptr = NULL,
632 .bpop_abort_end_ptr = NULL,
633
634 .bpop_translate = NULL,
635};
636
637static struct lock_class_key nilfs_bmap_dat_lock_key; 560static struct lock_class_key nilfs_bmap_dat_lock_key;
638 561
639/** 562/**
@@ -660,20 +583,20 @@ int nilfs_bmap_read(struct nilfs_bmap *bmap, struct nilfs_inode *raw_inode)
660 bmap->b_inode = &NILFS_BMAP_I(bmap)->vfs_inode; 583 bmap->b_inode = &NILFS_BMAP_I(bmap)->vfs_inode;
661 switch (bmap->b_inode->i_ino) { 584 switch (bmap->b_inode->i_ino) {
662 case NILFS_DAT_INO: 585 case NILFS_DAT_INO:
663 bmap->b_pops = &nilfs_bmap_ptr_ops_p; 586 bmap->b_ptr_type = NILFS_BMAP_PTR_P;
664 bmap->b_last_allocated_key = 0; /* XXX: use macro */ 587 bmap->b_last_allocated_key = 0;
665 bmap->b_last_allocated_ptr = NILFS_BMAP_NEW_PTR_INIT; 588 bmap->b_last_allocated_ptr = NILFS_BMAP_NEW_PTR_INIT;
666 lockdep_set_class(&bmap->b_sem, &nilfs_bmap_dat_lock_key); 589 lockdep_set_class(&bmap->b_sem, &nilfs_bmap_dat_lock_key);
667 break; 590 break;
668 case NILFS_CPFILE_INO: 591 case NILFS_CPFILE_INO:
669 case NILFS_SUFILE_INO: 592 case NILFS_SUFILE_INO:
670 bmap->b_pops = &nilfs_bmap_ptr_ops_vmdt; 593 bmap->b_ptr_type = NILFS_BMAP_PTR_VS;
671 bmap->b_last_allocated_key = 0; /* XXX: use macro */ 594 bmap->b_last_allocated_key = 0;
672 bmap->b_last_allocated_ptr = NILFS_BMAP_INVALID_PTR; 595 bmap->b_last_allocated_ptr = NILFS_BMAP_INVALID_PTR;
673 break; 596 break;
674 default: 597 default:
675 bmap->b_pops = &nilfs_bmap_ptr_ops_v; 598 bmap->b_ptr_type = NILFS_BMAP_PTR_VM;
676 bmap->b_last_allocated_key = 0; /* XXX: use macro */ 599 bmap->b_last_allocated_key = 0;
677 bmap->b_last_allocated_ptr = NILFS_BMAP_INVALID_PTR; 600 bmap->b_last_allocated_ptr = NILFS_BMAP_INVALID_PTR;
678 break; 601 break;
679 } 602 }
@@ -705,7 +628,7 @@ void nilfs_bmap_init_gc(struct nilfs_bmap *bmap)
705 memset(&bmap->b_u, 0, NILFS_BMAP_SIZE); 628 memset(&bmap->b_u, 0, NILFS_BMAP_SIZE);
706 init_rwsem(&bmap->b_sem); 629 init_rwsem(&bmap->b_sem);
707 bmap->b_inode = &NILFS_BMAP_I(bmap)->vfs_inode; 630 bmap->b_inode = &NILFS_BMAP_I(bmap)->vfs_inode;
708 bmap->b_pops = &nilfs_bmap_ptr_ops_gc; 631 bmap->b_ptr_type = NILFS_BMAP_PTR_U;
709 bmap->b_last_allocated_key = 0; 632 bmap->b_last_allocated_key = 0;
710 bmap->b_last_allocated_ptr = NILFS_BMAP_INVALID_PTR; 633 bmap->b_last_allocated_ptr = NILFS_BMAP_INVALID_PTR;
711 bmap->b_state = 0; 634 bmap->b_state = 0;
diff --git a/fs/nilfs2/bmap.h b/fs/nilfs2/bmap.h
index 28c77bb71bb7..47f5b74713c0 100644
--- a/fs/nilfs2/bmap.h
+++ b/fs/nilfs2/bmap.h
@@ -86,27 +86,6 @@ struct nilfs_bmap_operations {
86}; 86};
87 87
88 88
89/**
90 * struct nilfs_bmap_ptr_operations - bmap ptr operation table
91 */
92struct nilfs_bmap_ptr_operations {
93 int (*bpop_prepare_alloc_ptr)(struct nilfs_bmap *,
94 union nilfs_bmap_ptr_req *);
95 void (*bpop_commit_alloc_ptr)(struct nilfs_bmap *,
96 union nilfs_bmap_ptr_req *);
97 void (*bpop_abort_alloc_ptr)(struct nilfs_bmap *,
98 union nilfs_bmap_ptr_req *);
99 int (*bpop_prepare_end_ptr)(struct nilfs_bmap *,
100 union nilfs_bmap_ptr_req *);
101 void (*bpop_commit_end_ptr)(struct nilfs_bmap *,
102 union nilfs_bmap_ptr_req *);
103 void (*bpop_abort_end_ptr)(struct nilfs_bmap *,
104 union nilfs_bmap_ptr_req *);
105
106 int (*bpop_translate)(const struct nilfs_bmap *, __u64, __u64 *);
107};
108
109
110#define NILFS_BMAP_SIZE (NILFS_INODE_BMAP_SIZE * sizeof(__le64)) 89#define NILFS_BMAP_SIZE (NILFS_INODE_BMAP_SIZE * sizeof(__le64))
111#define NILFS_BMAP_KEY_BIT (sizeof(unsigned long) * 8 /* CHAR_BIT */) 90#define NILFS_BMAP_KEY_BIT (sizeof(unsigned long) * 8 /* CHAR_BIT */)
112#define NILFS_BMAP_NEW_PTR_INIT \ 91#define NILFS_BMAP_NEW_PTR_INIT \
@@ -124,9 +103,9 @@ static inline int nilfs_bmap_is_new_ptr(unsigned long ptr)
124 * @b_sem: semaphore 103 * @b_sem: semaphore
125 * @b_inode: owner of bmap 104 * @b_inode: owner of bmap
126 * @b_ops: bmap operation table 105 * @b_ops: bmap operation table
127 * @b_pops: bmap ptr operation table
128 * @b_last_allocated_key: last allocated key for data block 106 * @b_last_allocated_key: last allocated key for data block
129 * @b_last_allocated_ptr: last allocated ptr for data block 107 * @b_last_allocated_ptr: last allocated ptr for data block
108 * @b_ptr_type: pointer type
130 * @b_state: state 109 * @b_state: state
131 */ 110 */
132struct nilfs_bmap { 111struct nilfs_bmap {
@@ -137,12 +116,22 @@ struct nilfs_bmap {
137 struct rw_semaphore b_sem; 116 struct rw_semaphore b_sem;
138 struct inode *b_inode; 117 struct inode *b_inode;
139 const struct nilfs_bmap_operations *b_ops; 118 const struct nilfs_bmap_operations *b_ops;
140 const struct nilfs_bmap_ptr_operations *b_pops;
141 __u64 b_last_allocated_key; 119 __u64 b_last_allocated_key;
142 __u64 b_last_allocated_ptr; 120 __u64 b_last_allocated_ptr;
121 int b_ptr_type;
143 int b_state; 122 int b_state;
144}; 123};
145 124
125/* pointer type */
126#define NILFS_BMAP_PTR_P 0 /* physical block number (i.e. LBN) */
127#define NILFS_BMAP_PTR_VS 1 /* virtual block number (single
128 version) */
129#define NILFS_BMAP_PTR_VM 2 /* virtual block number (has multiple
130 versions) */
131#define NILFS_BMAP_PTR_U (-1) /* never perform pointer operations */
132
133#define NILFS_BMAP_USE_VBN(bmap) ((bmap)->b_ptr_type > 0)
134
146/* state */ 135/* state */
147#define NILFS_BMAP_DIRTY 0x00000001 136#define NILFS_BMAP_DIRTY 0x00000001
148 137
@@ -171,6 +160,63 @@ void nilfs_bmap_commit_gcdat(struct nilfs_bmap *, struct nilfs_bmap *);
171/* 160/*
172 * Internal use only 161 * Internal use only
173 */ 162 */
163int nilfs_bmap_prepare_alloc_v(struct nilfs_bmap *,
164 union nilfs_bmap_ptr_req *);
165void nilfs_bmap_commit_alloc_v(struct nilfs_bmap *,
166 union nilfs_bmap_ptr_req *);
167void nilfs_bmap_abort_alloc_v(struct nilfs_bmap *,
168 union nilfs_bmap_ptr_req *);
169
170static inline int nilfs_bmap_prepare_alloc_ptr(struct nilfs_bmap *bmap,
171 union nilfs_bmap_ptr_req *req)
172{
173 if (NILFS_BMAP_USE_VBN(bmap))
174 return nilfs_bmap_prepare_alloc_v(bmap, req);
175 /* ignore target ptr */
176 req->bpr_ptr = bmap->b_last_allocated_ptr++;
177 return 0;
178}
179
180static inline void nilfs_bmap_commit_alloc_ptr(struct nilfs_bmap *bmap,
181 union nilfs_bmap_ptr_req *req)
182{
183 if (NILFS_BMAP_USE_VBN(bmap))
184 nilfs_bmap_commit_alloc_v(bmap, req);
185}
186
187static inline void nilfs_bmap_abort_alloc_ptr(struct nilfs_bmap *bmap,
188 union nilfs_bmap_ptr_req *req)
189{
190 if (NILFS_BMAP_USE_VBN(bmap))
191 nilfs_bmap_abort_alloc_v(bmap, req);
192 else
193 bmap->b_last_allocated_ptr--;
194}
195
196int nilfs_bmap_prepare_end_v(struct nilfs_bmap *, union nilfs_bmap_ptr_req *);
197void nilfs_bmap_commit_end_v(struct nilfs_bmap *, union nilfs_bmap_ptr_req *);
198void nilfs_bmap_abort_end_v(struct nilfs_bmap *, union nilfs_bmap_ptr_req *);
199
200static inline int nilfs_bmap_prepare_end_ptr(struct nilfs_bmap *bmap,
201 union nilfs_bmap_ptr_req *req)
202{
203 return NILFS_BMAP_USE_VBN(bmap) ?
204 nilfs_bmap_prepare_end_v(bmap, req) : 0;
205}
206
207static inline void nilfs_bmap_commit_end_ptr(struct nilfs_bmap *bmap,
208 union nilfs_bmap_ptr_req *req)
209{
210 if (NILFS_BMAP_USE_VBN(bmap))
211 nilfs_bmap_commit_end_v(bmap, req);
212}
213
214static inline void nilfs_bmap_abort_end_ptr(struct nilfs_bmap *bmap,
215 union nilfs_bmap_ptr_req *req)
216{
217 if (NILFS_BMAP_USE_VBN(bmap))
218 nilfs_bmap_abort_end_v(bmap, req);
219}
174 220
175int nilfs_bmap_start_v(struct nilfs_bmap *, union nilfs_bmap_ptr_req *, 221int nilfs_bmap_start_v(struct nilfs_bmap *, union nilfs_bmap_ptr_req *,
176 sector_t); 222 sector_t);
@@ -184,15 +230,15 @@ __u64 nilfs_bmap_data_get_key(const struct nilfs_bmap *,
184__u64 nilfs_bmap_find_target_seq(const struct nilfs_bmap *, __u64); 230__u64 nilfs_bmap_find_target_seq(const struct nilfs_bmap *, __u64);
185__u64 nilfs_bmap_find_target_in_group(const struct nilfs_bmap *); 231__u64 nilfs_bmap_find_target_in_group(const struct nilfs_bmap *);
186 232
187int nilfs_bmap_prepare_update(struct nilfs_bmap *, 233int nilfs_bmap_prepare_update_v(struct nilfs_bmap *,
188 union nilfs_bmap_ptr_req *, 234 union nilfs_bmap_ptr_req *,
189 union nilfs_bmap_ptr_req *); 235 union nilfs_bmap_ptr_req *);
190void nilfs_bmap_commit_update(struct nilfs_bmap *, 236void nilfs_bmap_commit_update_v(struct nilfs_bmap *,
191 union nilfs_bmap_ptr_req *, 237 union nilfs_bmap_ptr_req *,
192 union nilfs_bmap_ptr_req *); 238 union nilfs_bmap_ptr_req *);
193void nilfs_bmap_abort_update(struct nilfs_bmap *, 239void nilfs_bmap_abort_update_v(struct nilfs_bmap *,
194 union nilfs_bmap_ptr_req *, 240 union nilfs_bmap_ptr_req *,
195 union nilfs_bmap_ptr_req *); 241 union nilfs_bmap_ptr_req *);
196 242
197void nilfs_bmap_add_blocks(const struct nilfs_bmap *, int); 243void nilfs_bmap_add_blocks(const struct nilfs_bmap *, int);
198void nilfs_bmap_sub_blocks(const struct nilfs_bmap *, int); 244void nilfs_bmap_sub_blocks(const struct nilfs_bmap *, int);
diff --git a/fs/nilfs2/btree.c b/fs/nilfs2/btree.c
index f5a0ec64e1aa..20e3fd0f4d4f 100644
--- a/fs/nilfs2/btree.c
+++ b/fs/nilfs2/btree.c
@@ -917,8 +917,8 @@ static int nilfs_btree_prepare_insert(struct nilfs_btree *btree,
917 path[level].bp_newreq.bpr_ptr = 917 path[level].bp_newreq.bpr_ptr =
918 btree->bt_ops->btop_find_target(btree, path, key); 918 btree->bt_ops->btop_find_target(btree, path, key);
919 919
920 ret = btree->bt_bmap.b_pops->bpop_prepare_alloc_ptr( 920 ret = nilfs_bmap_prepare_alloc_ptr(&btree->bt_bmap,
921 &btree->bt_bmap, &path[level].bp_newreq); 921 &path[level].bp_newreq);
922 if (ret < 0) 922 if (ret < 0)
923 goto err_out_data; 923 goto err_out_data;
924 924
@@ -976,8 +976,8 @@ static int nilfs_btree_prepare_insert(struct nilfs_btree *btree,
976 /* split */ 976 /* split */
977 path[level].bp_newreq.bpr_ptr = 977 path[level].bp_newreq.bpr_ptr =
978 path[level - 1].bp_newreq.bpr_ptr + 1; 978 path[level - 1].bp_newreq.bpr_ptr + 1;
979 ret = btree->bt_bmap.b_pops->bpop_prepare_alloc_ptr( 979 ret = nilfs_bmap_prepare_alloc_ptr(&btree->bt_bmap,
980 &btree->bt_bmap, &path[level].bp_newreq); 980 &path[level].bp_newreq);
981 if (ret < 0) 981 if (ret < 0)
982 goto err_out_child_node; 982 goto err_out_child_node;
983 ret = nilfs_btree_get_new_block(btree, 983 ret = nilfs_btree_get_new_block(btree,
@@ -1008,8 +1008,8 @@ static int nilfs_btree_prepare_insert(struct nilfs_btree *btree,
1008 1008
1009 /* grow */ 1009 /* grow */
1010 path[level].bp_newreq.bpr_ptr = path[level - 1].bp_newreq.bpr_ptr + 1; 1010 path[level].bp_newreq.bpr_ptr = path[level - 1].bp_newreq.bpr_ptr + 1;
1011 ret = btree->bt_bmap.b_pops->bpop_prepare_alloc_ptr( 1011 ret = nilfs_bmap_prepare_alloc_ptr(&btree->bt_bmap,
1012 &btree->bt_bmap, &path[level].bp_newreq); 1012 &path[level].bp_newreq);
1013 if (ret < 0) 1013 if (ret < 0)
1014 goto err_out_child_node; 1014 goto err_out_child_node;
1015 ret = nilfs_btree_get_new_block(btree, path[level].bp_newreq.bpr_ptr, 1015 ret = nilfs_btree_get_new_block(btree, path[level].bp_newreq.bpr_ptr,
@@ -1037,18 +1037,16 @@ static int nilfs_btree_prepare_insert(struct nilfs_btree *btree,
1037 1037
1038 /* error */ 1038 /* error */
1039 err_out_curr_node: 1039 err_out_curr_node:
1040 btree->bt_bmap.b_pops->bpop_abort_alloc_ptr(&btree->bt_bmap, 1040 nilfs_bmap_abort_alloc_ptr(&btree->bt_bmap, &path[level].bp_newreq);
1041 &path[level].bp_newreq);
1042 err_out_child_node: 1041 err_out_child_node:
1043 for (level--; level > NILFS_BTREE_LEVEL_DATA; level--) { 1042 for (level--; level > NILFS_BTREE_LEVEL_DATA; level--) {
1044 nilfs_btnode_delete(path[level].bp_sib_bh); 1043 nilfs_btnode_delete(path[level].bp_sib_bh);
1045 btree->bt_bmap.b_pops->bpop_abort_alloc_ptr( 1044 nilfs_bmap_abort_alloc_ptr(&btree->bt_bmap,
1046 &btree->bt_bmap, &path[level].bp_newreq); 1045 &path[level].bp_newreq);
1047 1046
1048 } 1047 }
1049 1048
1050 btree->bt_bmap.b_pops->bpop_abort_alloc_ptr(&btree->bt_bmap, 1049 nilfs_bmap_abort_alloc_ptr(&btree->bt_bmap, &path[level].bp_newreq);
1051 &path[level].bp_newreq);
1052 err_out_data: 1050 err_out_data:
1053 *levelp = level; 1051 *levelp = level;
1054 stats->bs_nblocks = 0; 1052 stats->bs_nblocks = 0;
@@ -1067,8 +1065,8 @@ static void nilfs_btree_commit_insert(struct nilfs_btree *btree,
1067 btree->bt_ops->btop_set_target(btree, key, ptr); 1065 btree->bt_ops->btop_set_target(btree, key, ptr);
1068 1066
1069 for (level = NILFS_BTREE_LEVEL_NODE_MIN; level <= maxlevel; level++) { 1067 for (level = NILFS_BTREE_LEVEL_NODE_MIN; level <= maxlevel; level++) {
1070 btree->bt_bmap.b_pops->bpop_commit_alloc_ptr( 1068 nilfs_bmap_commit_alloc_ptr(&btree->bt_bmap,
1071 &btree->bt_bmap, &path[level - 1].bp_newreq); 1069 &path[level - 1].bp_newreq);
1072 path[level].bp_op(btree, path, level, &key, &ptr); 1070 path[level].bp_op(btree, path, level, &key, &ptr);
1073 } 1071 }
1074 1072
@@ -1312,12 +1310,10 @@ static int nilfs_btree_prepare_delete(struct nilfs_btree *btree,
1312 path[level].bp_oldreq.bpr_ptr = 1310 path[level].bp_oldreq.bpr_ptr =
1313 nilfs_btree_node_get_ptr(btree, node, 1311 nilfs_btree_node_get_ptr(btree, node,
1314 path[level].bp_index); 1312 path[level].bp_index);
1315 if (btree->bt_bmap.b_pops->bpop_prepare_end_ptr != NULL) { 1313 ret = nilfs_bmap_prepare_end_ptr(&btree->bt_bmap,
1316 ret = btree->bt_bmap.b_pops->bpop_prepare_end_ptr( 1314 &path[level].bp_oldreq);
1317 &btree->bt_bmap, &path[level].bp_oldreq); 1315 if (ret < 0)
1318 if (ret < 0) 1316 goto err_out_child_node;
1319 goto err_out_child_node;
1320 }
1321 1317
1322 if (nilfs_btree_node_get_nchildren(btree, node) > 1318 if (nilfs_btree_node_get_nchildren(btree, node) >
1323 nilfs_btree_node_nchildren_min(btree, node)) { 1319 nilfs_btree_node_nchildren_min(btree, node)) {
@@ -1391,12 +1387,12 @@ static int nilfs_btree_prepare_delete(struct nilfs_btree *btree,
1391 node = nilfs_btree_get_root(btree); 1387 node = nilfs_btree_get_root(btree);
1392 path[level].bp_oldreq.bpr_ptr = 1388 path[level].bp_oldreq.bpr_ptr =
1393 nilfs_btree_node_get_ptr(btree, node, path[level].bp_index); 1389 nilfs_btree_node_get_ptr(btree, node, path[level].bp_index);
1394 if (btree->bt_bmap.b_pops->bpop_prepare_end_ptr != NULL) { 1390
1395 ret = btree->bt_bmap.b_pops->bpop_prepare_end_ptr( 1391 ret = nilfs_bmap_prepare_end_ptr(&btree->bt_bmap,
1396 &btree->bt_bmap, &path[level].bp_oldreq); 1392 &path[level].bp_oldreq);
1397 if (ret < 0) 1393 if (ret < 0)
1398 goto err_out_child_node; 1394 goto err_out_child_node;
1399 } 1395
1400 /* child of the root node is deleted */ 1396 /* child of the root node is deleted */
1401 path[level].bp_op = nilfs_btree_do_delete; 1397 path[level].bp_op = nilfs_btree_do_delete;
1402 stats->bs_nblocks++; 1398 stats->bs_nblocks++;
@@ -1408,15 +1404,12 @@ static int nilfs_btree_prepare_delete(struct nilfs_btree *btree,
1408 1404
1409 /* error */ 1405 /* error */
1410 err_out_curr_node: 1406 err_out_curr_node:
1411 if (btree->bt_bmap.b_pops->bpop_abort_end_ptr != NULL) 1407 nilfs_bmap_abort_end_ptr(&btree->bt_bmap, &path[level].bp_oldreq);
1412 btree->bt_bmap.b_pops->bpop_abort_end_ptr(
1413 &btree->bt_bmap, &path[level].bp_oldreq);
1414 err_out_child_node: 1408 err_out_child_node:
1415 for (level--; level >= NILFS_BTREE_LEVEL_NODE_MIN; level--) { 1409 for (level--; level >= NILFS_BTREE_LEVEL_NODE_MIN; level--) {
1416 brelse(path[level].bp_sib_bh); 1410 brelse(path[level].bp_sib_bh);
1417 if (btree->bt_bmap.b_pops->bpop_abort_end_ptr != NULL) 1411 nilfs_bmap_abort_end_ptr(&btree->bt_bmap,
1418 btree->bt_bmap.b_pops->bpop_abort_end_ptr( 1412 &path[level].bp_oldreq);
1419 &btree->bt_bmap, &path[level].bp_oldreq);
1420 } 1413 }
1421 *levelp = level; 1414 *levelp = level;
1422 stats->bs_nblocks = 0; 1415 stats->bs_nblocks = 0;
@@ -1430,9 +1423,8 @@ static void nilfs_btree_commit_delete(struct nilfs_btree *btree,
1430 int level; 1423 int level;
1431 1424
1432 for (level = NILFS_BTREE_LEVEL_NODE_MIN; level <= maxlevel; level++) { 1425 for (level = NILFS_BTREE_LEVEL_NODE_MIN; level <= maxlevel; level++) {
1433 if (btree->bt_bmap.b_pops->bpop_commit_end_ptr != NULL) 1426 nilfs_bmap_commit_end_ptr(&btree->bt_bmap,
1434 btree->bt_bmap.b_pops->bpop_commit_end_ptr( 1427 &path[level].bp_oldreq);
1435 &btree->bt_bmap, &path[level].bp_oldreq);
1436 path[level].bp_op(btree, path, level, NULL, NULL); 1428 path[level].bp_op(btree, path, level, NULL, NULL);
1437 } 1429 }
1438 1430
@@ -1597,7 +1589,7 @@ nilfs_btree_prepare_convert_and_insert(struct nilfs_bmap *bmap, __u64 key,
1597 if (btree->bt_ops->btop_find_target != NULL) 1589 if (btree->bt_ops->btop_find_target != NULL)
1598 dreq->bpr_ptr 1590 dreq->bpr_ptr
1599 = btree->bt_ops->btop_find_target(btree, NULL, key); 1591 = btree->bt_ops->btop_find_target(btree, NULL, key);
1600 ret = bmap->b_pops->bpop_prepare_alloc_ptr(bmap, dreq); 1592 ret = nilfs_bmap_prepare_alloc_ptr(bmap, dreq);
1601 if (ret < 0) 1593 if (ret < 0)
1602 return ret; 1594 return ret;
1603 1595
@@ -1605,7 +1597,7 @@ nilfs_btree_prepare_convert_and_insert(struct nilfs_bmap *bmap, __u64 key,
1605 stats->bs_nblocks++; 1597 stats->bs_nblocks++;
1606 if (nreq != NULL) { 1598 if (nreq != NULL) {
1607 nreq->bpr_ptr = dreq->bpr_ptr + 1; 1599 nreq->bpr_ptr = dreq->bpr_ptr + 1;
1608 ret = bmap->b_pops->bpop_prepare_alloc_ptr(bmap, nreq); 1600 ret = nilfs_bmap_prepare_alloc_ptr(bmap, nreq);
1609 if (ret < 0) 1601 if (ret < 0)
1610 goto err_out_dreq; 1602 goto err_out_dreq;
1611 1603
@@ -1622,9 +1614,9 @@ nilfs_btree_prepare_convert_and_insert(struct nilfs_bmap *bmap, __u64 key,
1622 1614
1623 /* error */ 1615 /* error */
1624 err_out_nreq: 1616 err_out_nreq:
1625 bmap->b_pops->bpop_abort_alloc_ptr(bmap, nreq); 1617 nilfs_bmap_abort_alloc_ptr(bmap, nreq);
1626 err_out_dreq: 1618 err_out_dreq:
1627 bmap->b_pops->bpop_abort_alloc_ptr(bmap, dreq); 1619 nilfs_bmap_abort_alloc_ptr(bmap, dreq);
1628 stats->bs_nblocks = 0; 1620 stats->bs_nblocks = 0;
1629 return ret; 1621 return ret;
1630 1622
@@ -1654,8 +1646,8 @@ nilfs_btree_commit_convert_and_insert(struct nilfs_bmap *bmap,
1654 btree = (struct nilfs_btree *)bmap; 1646 btree = (struct nilfs_btree *)bmap;
1655 nilfs_btree_init(bmap); 1647 nilfs_btree_init(bmap);
1656 if (nreq != NULL) { 1648 if (nreq != NULL) {
1657 bmap->b_pops->bpop_commit_alloc_ptr(bmap, dreq); 1649 nilfs_bmap_commit_alloc_ptr(bmap, dreq);
1658 bmap->b_pops->bpop_commit_alloc_ptr(bmap, nreq); 1650 nilfs_bmap_commit_alloc_ptr(bmap, nreq);
1659 1651
1660 /* create child node at level 1 */ 1652 /* create child node at level 1 */
1661 lock_buffer(bh); 1653 lock_buffer(bh);
@@ -1677,7 +1669,7 @@ nilfs_btree_commit_convert_and_insert(struct nilfs_bmap *bmap,
1677 nilfs_btree_node_init(btree, node, NILFS_BTREE_NODE_ROOT, 1669 nilfs_btree_node_init(btree, node, NILFS_BTREE_NODE_ROOT,
1678 2, 1, &keys[0], &tmpptr); 1670 2, 1, &keys[0], &tmpptr);
1679 } else { 1671 } else {
1680 bmap->b_pops->bpop_commit_alloc_ptr(bmap, dreq); 1672 nilfs_bmap_commit_alloc_ptr(bmap, dreq);
1681 1673
1682 /* create root node at level 1 */ 1674 /* create root node at level 1 */
1683 node = nilfs_btree_get_root(btree); 1675 node = nilfs_btree_get_root(btree);
@@ -1758,9 +1750,9 @@ static int nilfs_btree_prepare_update_v(struct nilfs_btree *btree,
1758 nilfs_btree_node_get_ptr(btree, parent, 1750 nilfs_btree_node_get_ptr(btree, parent,
1759 path[level + 1].bp_index); 1751 path[level + 1].bp_index);
1760 path[level].bp_newreq.bpr_ptr = path[level].bp_oldreq.bpr_ptr + 1; 1752 path[level].bp_newreq.bpr_ptr = path[level].bp_oldreq.bpr_ptr + 1;
1761 ret = nilfs_bmap_prepare_update(&btree->bt_bmap, 1753 ret = nilfs_bmap_prepare_update_v(&btree->bt_bmap,
1762 &path[level].bp_oldreq, 1754 &path[level].bp_oldreq,
1763 &path[level].bp_newreq); 1755 &path[level].bp_newreq);
1764 if (ret < 0) 1756 if (ret < 0)
1765 return ret; 1757 return ret;
1766 1758
@@ -1772,9 +1764,9 @@ static int nilfs_btree_prepare_update_v(struct nilfs_btree *btree,
1772 &NILFS_BMAP_I(&btree->bt_bmap)->i_btnode_cache, 1764 &NILFS_BMAP_I(&btree->bt_bmap)->i_btnode_cache,
1773 &path[level].bp_ctxt); 1765 &path[level].bp_ctxt);
1774 if (ret < 0) { 1766 if (ret < 0) {
1775 nilfs_bmap_abort_update(&btree->bt_bmap, 1767 nilfs_bmap_abort_update_v(&btree->bt_bmap,
1776 &path[level].bp_oldreq, 1768 &path[level].bp_oldreq,
1777 &path[level].bp_newreq); 1769 &path[level].bp_newreq);
1778 return ret; 1770 return ret;
1779 } 1771 }
1780 } 1772 }
@@ -1788,9 +1780,9 @@ static void nilfs_btree_commit_update_v(struct nilfs_btree *btree,
1788{ 1780{
1789 struct nilfs_btree_node *parent; 1781 struct nilfs_btree_node *parent;
1790 1782
1791 nilfs_bmap_commit_update(&btree->bt_bmap, 1783 nilfs_bmap_commit_update_v(&btree->bt_bmap,
1792 &path[level].bp_oldreq, 1784 &path[level].bp_oldreq,
1793 &path[level].bp_newreq); 1785 &path[level].bp_newreq);
1794 1786
1795 if (buffer_nilfs_node(path[level].bp_bh)) { 1787 if (buffer_nilfs_node(path[level].bp_bh)) {
1796 nilfs_btnode_commit_change_key( 1788 nilfs_btnode_commit_change_key(
@@ -1809,9 +1801,9 @@ static void nilfs_btree_abort_update_v(struct nilfs_btree *btree,
1809 struct nilfs_btree_path *path, 1801 struct nilfs_btree_path *path,
1810 int level) 1802 int level)
1811{ 1803{
1812 nilfs_bmap_abort_update(&btree->bt_bmap, 1804 nilfs_bmap_abort_update_v(&btree->bt_bmap,
1813 &path[level].bp_oldreq, 1805 &path[level].bp_oldreq,
1814 &path[level].bp_newreq); 1806 &path[level].bp_newreq);
1815 if (buffer_nilfs_node(path[level].bp_bh)) 1807 if (buffer_nilfs_node(path[level].bp_bh))
1816 nilfs_btnode_abort_change_key( 1808 nilfs_btnode_abort_change_key(
1817 &NILFS_BMAP_I(&btree->bt_bmap)->i_btnode_cache, 1809 &NILFS_BMAP_I(&btree->bt_bmap)->i_btnode_cache,
diff --git a/fs/nilfs2/direct.c b/fs/nilfs2/direct.c
index 8e8095c2e1bb..2a546c8f784e 100644
--- a/fs/nilfs2/direct.c
+++ b/fs/nilfs2/direct.c
@@ -92,8 +92,7 @@ static int nilfs_direct_prepare_insert(struct nilfs_direct *direct,
92 92
93 if (direct->d_ops->dop_find_target != NULL) 93 if (direct->d_ops->dop_find_target != NULL)
94 req->bpr_ptr = direct->d_ops->dop_find_target(direct, key); 94 req->bpr_ptr = direct->d_ops->dop_find_target(direct, key);
95 ret = direct->d_bmap.b_pops->bpop_prepare_alloc_ptr(&direct->d_bmap, 95 ret = nilfs_bmap_prepare_alloc_ptr(&direct->d_bmap, req);
96 req);
97 if (ret < 0) 96 if (ret < 0)
98 return ret; 97 return ret;
99 98
@@ -111,7 +110,7 @@ static void nilfs_direct_commit_insert(struct nilfs_direct *direct,
111 bh = (struct buffer_head *)((unsigned long)ptr); 110 bh = (struct buffer_head *)((unsigned long)ptr);
112 set_buffer_nilfs_volatile(bh); 111 set_buffer_nilfs_volatile(bh);
113 112
114 direct->d_bmap.b_pops->bpop_commit_alloc_ptr(&direct->d_bmap, req); 113 nilfs_bmap_commit_alloc_ptr(&direct->d_bmap, req);
115 nilfs_direct_set_ptr(direct, key, req->bpr_ptr); 114 nilfs_direct_set_ptr(direct, key, req->bpr_ptr);
116 115
117 if (!nilfs_bmap_dirty(&direct->d_bmap)) 116 if (!nilfs_bmap_dirty(&direct->d_bmap))
@@ -150,25 +149,18 @@ static int nilfs_direct_prepare_delete(struct nilfs_direct *direct,
150{ 149{
151 int ret; 150 int ret;
152 151
153 if (direct->d_bmap.b_pops->bpop_prepare_end_ptr != NULL) { 152 req->bpr_ptr = nilfs_direct_get_ptr(direct, key);
154 req->bpr_ptr = nilfs_direct_get_ptr(direct, key); 153 ret = nilfs_bmap_prepare_end_ptr(&direct->d_bmap, req);
155 ret = direct->d_bmap.b_pops->bpop_prepare_end_ptr( 154 if (!ret)
156 &direct->d_bmap, req); 155 stats->bs_nblocks = 1;
157 if (ret < 0) 156 return ret;
158 return ret;
159 }
160
161 stats->bs_nblocks = 1;
162 return 0;
163} 157}
164 158
165static void nilfs_direct_commit_delete(struct nilfs_direct *direct, 159static void nilfs_direct_commit_delete(struct nilfs_direct *direct,
166 union nilfs_bmap_ptr_req *req, 160 union nilfs_bmap_ptr_req *req,
167 __u64 key) 161 __u64 key)
168{ 162{
169 if (direct->d_bmap.b_pops->bpop_commit_end_ptr != NULL) 163 nilfs_bmap_commit_end_ptr(&direct->d_bmap, req);
170 direct->d_bmap.b_pops->bpop_commit_end_ptr(
171 &direct->d_bmap, req);
172 nilfs_direct_set_ptr(direct, key, NILFS_BMAP_INVALID_PTR); 164 nilfs_direct_set_ptr(direct, key, NILFS_BMAP_INVALID_PTR);
173} 165}
174 166
@@ -289,11 +281,11 @@ static int nilfs_direct_propagate_v(struct nilfs_direct *direct,
289 if (!buffer_nilfs_volatile(bh)) { 281 if (!buffer_nilfs_volatile(bh)) {
290 oldreq.bpr_ptr = ptr; 282 oldreq.bpr_ptr = ptr;
291 newreq.bpr_ptr = ptr; 283 newreq.bpr_ptr = ptr;
292 ret = nilfs_bmap_prepare_update(&direct->d_bmap, &oldreq, 284 ret = nilfs_bmap_prepare_update_v(&direct->d_bmap, &oldreq,
293 &newreq); 285 &newreq);
294 if (ret < 0) 286 if (ret < 0)
295 return ret; 287 return ret;
296 nilfs_bmap_commit_update(&direct->d_bmap, &oldreq, &newreq); 288 nilfs_bmap_commit_update_v(&direct->d_bmap, &oldreq, &newreq);
297 set_buffer_nilfs_volatile(bh); 289 set_buffer_nilfs_volatile(bh);
298 nilfs_direct_set_ptr(direct, key, newreq.bpr_ptr); 290 nilfs_direct_set_ptr(direct, key, newreq.bpr_ptr);
299 } else 291 } else