diff options
-rw-r--r-- | Documentation/filesystems/nilfs2.txt | 5 | ||||
-rw-r--r-- | fs/nilfs2/bmap.c | 272 | ||||
-rw-r--r-- | fs/nilfs2/bmap.h | 135 | ||||
-rw-r--r-- | fs/nilfs2/btnode.c | 9 | ||||
-rw-r--r-- | fs/nilfs2/btnode.h | 2 | ||||
-rw-r--r-- | fs/nilfs2/btree.c | 366 | ||||
-rw-r--r-- | fs/nilfs2/btree.h | 31 | ||||
-rw-r--r-- | fs/nilfs2/cpfile.c | 47 | ||||
-rw-r--r-- | fs/nilfs2/cpfile.h | 4 | ||||
-rw-r--r-- | fs/nilfs2/dat.c | 36 | ||||
-rw-r--r-- | fs/nilfs2/dat.h | 2 | ||||
-rw-r--r-- | fs/nilfs2/direct.c | 139 | ||||
-rw-r--r-- | fs/nilfs2/direct.h | 20 | ||||
-rw-r--r-- | fs/nilfs2/gcinode.c | 5 | ||||
-rw-r--r-- | fs/nilfs2/inode.c | 18 | ||||
-rw-r--r-- | fs/nilfs2/ioctl.c | 35 | ||||
-rw-r--r-- | fs/nilfs2/mdt.c | 3 | ||||
-rw-r--r-- | fs/nilfs2/nilfs.h | 1 | ||||
-rw-r--r-- | fs/nilfs2/recovery.c | 37 | ||||
-rw-r--r-- | fs/nilfs2/segbuf.c | 3 | ||||
-rw-r--r-- | fs/nilfs2/seglist.h | 85 | ||||
-rw-r--r-- | fs/nilfs2/segment.c | 130 | ||||
-rw-r--r-- | fs/nilfs2/segment.h | 12 | ||||
-rw-r--r-- | fs/nilfs2/sufile.c | 119 | ||||
-rw-r--r-- | fs/nilfs2/sufile.h | 62 | ||||
-rw-r--r-- | fs/nilfs2/super.c | 9 | ||||
-rw-r--r-- | fs/nilfs2/the_nilfs.c | 1 |
27 files changed, 727 insertions, 861 deletions
diff --git a/Documentation/filesystems/nilfs2.txt b/Documentation/filesystems/nilfs2.txt index 55c4300abfcb..01539f410676 100644 --- a/Documentation/filesystems/nilfs2.txt +++ b/Documentation/filesystems/nilfs2.txt | |||
@@ -39,9 +39,8 @@ Features which NILFS2 does not support yet: | |||
39 | - extended attributes | 39 | - extended attributes |
40 | - POSIX ACLs | 40 | - POSIX ACLs |
41 | - quotas | 41 | - quotas |
42 | - writable snapshots | 42 | - fsck |
43 | - remote backup (CDP) | 43 | - resize |
44 | - data integrity | ||
45 | - defragmentation | 44 | - defragmentation |
46 | 45 | ||
47 | Mount options | 46 | Mount options |
diff --git a/fs/nilfs2/bmap.c b/fs/nilfs2/bmap.c index 064279e33bbb..36df60b6d8a4 100644 --- a/fs/nilfs2/bmap.c +++ b/fs/nilfs2/bmap.c | |||
@@ -31,21 +31,26 @@ | |||
31 | #include "dat.h" | 31 | #include "dat.h" |
32 | #include "alloc.h" | 32 | #include "alloc.h" |
33 | 33 | ||
34 | struct inode *nilfs_bmap_get_dat(const struct nilfs_bmap *bmap) | ||
35 | { | ||
36 | return nilfs_dat_inode(NILFS_I_NILFS(bmap->b_inode)); | ||
37 | } | ||
38 | |||
34 | int nilfs_bmap_lookup_at_level(struct nilfs_bmap *bmap, __u64 key, int level, | 39 | int nilfs_bmap_lookup_at_level(struct nilfs_bmap *bmap, __u64 key, int level, |
35 | __u64 *ptrp) | 40 | __u64 *ptrp) |
36 | { | 41 | { |
37 | __u64 ptr; | 42 | sector_t blocknr; |
38 | int ret; | 43 | int ret; |
39 | 44 | ||
40 | down_read(&bmap->b_sem); | 45 | down_read(&bmap->b_sem); |
41 | ret = bmap->b_ops->bop_lookup(bmap, key, level, ptrp); | 46 | ret = bmap->b_ops->bop_lookup(bmap, key, level, ptrp); |
42 | if (ret < 0) | 47 | if (ret < 0) |
43 | goto out; | 48 | goto out; |
44 | if (bmap->b_pops->bpop_translate != NULL) { | 49 | if (NILFS_BMAP_USE_VBN(bmap)) { |
45 | ret = bmap->b_pops->bpop_translate(bmap, *ptrp, &ptr); | 50 | ret = nilfs_dat_translate(nilfs_bmap_get_dat(bmap), *ptrp, |
46 | if (ret < 0) | 51 | &blocknr); |
47 | goto out; | 52 | if (!ret) |
48 | *ptrp = ptr; | 53 | *ptrp = blocknr; |
49 | } | 54 | } |
50 | 55 | ||
51 | out: | 56 | out: |
@@ -53,6 +58,16 @@ int nilfs_bmap_lookup_at_level(struct nilfs_bmap *bmap, __u64 key, int level, | |||
53 | return ret; | 58 | return ret; |
54 | } | 59 | } |
55 | 60 | ||
61 | int nilfs_bmap_lookup_contig(struct nilfs_bmap *bmap, __u64 key, __u64 *ptrp, | ||
62 | unsigned maxblocks) | ||
63 | { | ||
64 | int ret; | ||
65 | |||
66 | down_read(&bmap->b_sem); | ||
67 | ret = bmap->b_ops->bop_lookup_contig(bmap, key, ptrp, maxblocks); | ||
68 | up_read(&bmap->b_sem); | ||
69 | return ret; | ||
70 | } | ||
56 | 71 | ||
57 | /** | 72 | /** |
58 | * nilfs_bmap_lookup - find a record | 73 | * nilfs_bmap_lookup - find a record |
@@ -101,8 +116,7 @@ static int nilfs_bmap_do_insert(struct nilfs_bmap *bmap, __u64 key, __u64 ptr) | |||
101 | if (n < 0) | 116 | if (n < 0) |
102 | return n; | 117 | return n; |
103 | ret = nilfs_btree_convert_and_insert( | 118 | ret = nilfs_btree_convert_and_insert( |
104 | bmap, key, ptr, keys, ptrs, n, | 119 | bmap, key, ptr, keys, ptrs, n); |
105 | NILFS_BMAP_LARGE_LOW, NILFS_BMAP_LARGE_HIGH); | ||
106 | if (ret == 0) | 120 | if (ret == 0) |
107 | bmap->b_u.u_flags |= NILFS_BMAP_LARGE; | 121 | bmap->b_u.u_flags |= NILFS_BMAP_LARGE; |
108 | 122 | ||
@@ -158,8 +172,7 @@ static int nilfs_bmap_do_delete(struct nilfs_bmap *bmap, __u64 key) | |||
158 | if (n < 0) | 172 | if (n < 0) |
159 | return n; | 173 | return n; |
160 | ret = nilfs_direct_delete_and_convert( | 174 | ret = nilfs_direct_delete_and_convert( |
161 | bmap, key, keys, ptrs, n, | 175 | bmap, key, keys, ptrs, n); |
162 | NILFS_BMAP_SMALL_LOW, NILFS_BMAP_SMALL_HIGH); | ||
163 | if (ret == 0) | 176 | if (ret == 0) |
164 | bmap->b_u.u_flags &= ~NILFS_BMAP_LARGE; | 177 | bmap->b_u.u_flags &= ~NILFS_BMAP_LARGE; |
165 | 178 | ||
@@ -417,38 +430,6 @@ void nilfs_bmap_sub_blocks(const struct nilfs_bmap *bmap, int n) | |||
417 | mark_inode_dirty(bmap->b_inode); | 430 | mark_inode_dirty(bmap->b_inode); |
418 | } | 431 | } |
419 | 432 | ||
420 | int nilfs_bmap_get_block(const struct nilfs_bmap *bmap, __u64 ptr, | ||
421 | struct buffer_head **bhp) | ||
422 | { | ||
423 | return nilfs_btnode_get(&NILFS_BMAP_I(bmap)->i_btnode_cache, | ||
424 | ptr, 0, bhp, 0); | ||
425 | } | ||
426 | |||
427 | void nilfs_bmap_put_block(const struct nilfs_bmap *bmap, | ||
428 | struct buffer_head *bh) | ||
429 | { | ||
430 | brelse(bh); | ||
431 | } | ||
432 | |||
433 | int nilfs_bmap_get_new_block(const struct nilfs_bmap *bmap, __u64 ptr, | ||
434 | struct buffer_head **bhp) | ||
435 | { | ||
436 | int ret; | ||
437 | |||
438 | ret = nilfs_btnode_get(&NILFS_BMAP_I(bmap)->i_btnode_cache, | ||
439 | ptr, 0, bhp, 1); | ||
440 | if (ret < 0) | ||
441 | return ret; | ||
442 | set_buffer_nilfs_volatile(*bhp); | ||
443 | return 0; | ||
444 | } | ||
445 | |||
446 | void nilfs_bmap_delete_block(const struct nilfs_bmap *bmap, | ||
447 | struct buffer_head *bh) | ||
448 | { | ||
449 | nilfs_btnode_delete(bh); | ||
450 | } | ||
451 | |||
452 | __u64 nilfs_bmap_data_get_key(const struct nilfs_bmap *bmap, | 433 | __u64 nilfs_bmap_data_get_key(const struct nilfs_bmap *bmap, |
453 | const struct buffer_head *bh) | 434 | const struct buffer_head *bh) |
454 | { | 435 | { |
@@ -476,11 +457,6 @@ __u64 nilfs_bmap_find_target_seq(const struct nilfs_bmap *bmap, __u64 key) | |||
476 | return NILFS_BMAP_INVALID_PTR; | 457 | return NILFS_BMAP_INVALID_PTR; |
477 | } | 458 | } |
478 | 459 | ||
479 | static struct inode *nilfs_bmap_get_dat(const struct nilfs_bmap *bmap) | ||
480 | { | ||
481 | return nilfs_dat_inode(NILFS_I_NILFS(bmap->b_inode)); | ||
482 | } | ||
483 | |||
484 | #define NILFS_BMAP_GROUP_DIV 8 | 460 | #define NILFS_BMAP_GROUP_DIV 8 |
485 | __u64 nilfs_bmap_find_target_in_group(const struct nilfs_bmap *bmap) | 461 | __u64 nilfs_bmap_find_target_in_group(const struct nilfs_bmap *bmap) |
486 | { | 462 | { |
@@ -493,64 +469,51 @@ __u64 nilfs_bmap_find_target_in_group(const struct nilfs_bmap *bmap) | |||
493 | (entries_per_group / NILFS_BMAP_GROUP_DIV); | 469 | (entries_per_group / NILFS_BMAP_GROUP_DIV); |
494 | } | 470 | } |
495 | 471 | ||
496 | static int nilfs_bmap_prepare_alloc_v(struct nilfs_bmap *bmap, | 472 | int nilfs_bmap_prepare_alloc_v(struct nilfs_bmap *bmap, |
497 | union nilfs_bmap_ptr_req *req) | 473 | union nilfs_bmap_ptr_req *req) |
498 | { | 474 | { |
499 | return nilfs_dat_prepare_alloc(nilfs_bmap_get_dat(bmap), &req->bpr_req); | 475 | return nilfs_dat_prepare_alloc(nilfs_bmap_get_dat(bmap), &req->bpr_req); |
500 | } | 476 | } |
501 | 477 | ||
502 | static void nilfs_bmap_commit_alloc_v(struct nilfs_bmap *bmap, | 478 | void nilfs_bmap_commit_alloc_v(struct nilfs_bmap *bmap, |
503 | union nilfs_bmap_ptr_req *req) | 479 | union nilfs_bmap_ptr_req *req) |
504 | { | 480 | { |
505 | nilfs_dat_commit_alloc(nilfs_bmap_get_dat(bmap), &req->bpr_req); | 481 | nilfs_dat_commit_alloc(nilfs_bmap_get_dat(bmap), &req->bpr_req); |
506 | } | 482 | } |
507 | 483 | ||
508 | static void nilfs_bmap_abort_alloc_v(struct nilfs_bmap *bmap, | 484 | void nilfs_bmap_abort_alloc_v(struct nilfs_bmap *bmap, |
509 | union nilfs_bmap_ptr_req *req) | 485 | union nilfs_bmap_ptr_req *req) |
510 | { | 486 | { |
511 | nilfs_dat_abort_alloc(nilfs_bmap_get_dat(bmap), &req->bpr_req); | 487 | nilfs_dat_abort_alloc(nilfs_bmap_get_dat(bmap), &req->bpr_req); |
512 | } | 488 | } |
513 | 489 | ||
514 | static int nilfs_bmap_prepare_start_v(struct nilfs_bmap *bmap, | 490 | int nilfs_bmap_start_v(struct nilfs_bmap *bmap, union nilfs_bmap_ptr_req *req, |
515 | union nilfs_bmap_ptr_req *req) | 491 | sector_t blocknr) |
516 | { | 492 | { |
517 | return nilfs_dat_prepare_start(nilfs_bmap_get_dat(bmap), &req->bpr_req); | 493 | struct inode *dat = nilfs_bmap_get_dat(bmap); |
518 | } | 494 | int ret; |
519 | |||
520 | static void nilfs_bmap_commit_start_v(struct nilfs_bmap *bmap, | ||
521 | union nilfs_bmap_ptr_req *req, | ||
522 | sector_t blocknr) | ||
523 | { | ||
524 | nilfs_dat_commit_start(nilfs_bmap_get_dat(bmap), &req->bpr_req, | ||
525 | blocknr); | ||
526 | } | ||
527 | 495 | ||
528 | static void nilfs_bmap_abort_start_v(struct nilfs_bmap *bmap, | 496 | ret = nilfs_dat_prepare_start(dat, &req->bpr_req); |
529 | union nilfs_bmap_ptr_req *req) | 497 | if (likely(!ret)) |
530 | { | 498 | nilfs_dat_commit_start(dat, &req->bpr_req, blocknr); |
531 | nilfs_dat_abort_start(nilfs_bmap_get_dat(bmap), &req->bpr_req); | 499 | return ret; |
532 | } | 500 | } |
533 | 501 | ||
534 | static int nilfs_bmap_prepare_end_v(struct nilfs_bmap *bmap, | 502 | int nilfs_bmap_prepare_end_v(struct nilfs_bmap *bmap, |
535 | union nilfs_bmap_ptr_req *req) | 503 | union nilfs_bmap_ptr_req *req) |
536 | { | 504 | { |
537 | return nilfs_dat_prepare_end(nilfs_bmap_get_dat(bmap), &req->bpr_req); | 505 | return nilfs_dat_prepare_end(nilfs_bmap_get_dat(bmap), &req->bpr_req); |
538 | } | 506 | } |
539 | 507 | ||
540 | static void nilfs_bmap_commit_end_v(struct nilfs_bmap *bmap, | 508 | void nilfs_bmap_commit_end_v(struct nilfs_bmap *bmap, |
541 | union nilfs_bmap_ptr_req *req) | 509 | union nilfs_bmap_ptr_req *req) |
542 | { | ||
543 | nilfs_dat_commit_end(nilfs_bmap_get_dat(bmap), &req->bpr_req, 0); | ||
544 | } | ||
545 | |||
546 | static void nilfs_bmap_commit_end_vmdt(struct nilfs_bmap *bmap, | ||
547 | union nilfs_bmap_ptr_req *req) | ||
548 | { | 510 | { |
549 | nilfs_dat_commit_end(nilfs_bmap_get_dat(bmap), &req->bpr_req, 1); | 511 | nilfs_dat_commit_end(nilfs_bmap_get_dat(bmap), &req->bpr_req, |
512 | bmap->b_ptr_type == NILFS_BMAP_PTR_VS); | ||
550 | } | 513 | } |
551 | 514 | ||
552 | static void nilfs_bmap_abort_end_v(struct nilfs_bmap *bmap, | 515 | void nilfs_bmap_abort_end_v(struct nilfs_bmap *bmap, |
553 | union nilfs_bmap_ptr_req *req) | 516 | union nilfs_bmap_ptr_req *req) |
554 | { | 517 | { |
555 | nilfs_dat_abort_end(nilfs_bmap_get_dat(bmap), &req->bpr_req); | 518 | nilfs_dat_abort_end(nilfs_bmap_get_dat(bmap), &req->bpr_req); |
556 | } | 519 | } |
@@ -566,128 +529,44 @@ int nilfs_bmap_mark_dirty(const struct nilfs_bmap *bmap, __u64 vblocknr) | |||
566 | return nilfs_dat_mark_dirty(nilfs_bmap_get_dat(bmap), vblocknr); | 529 | return nilfs_dat_mark_dirty(nilfs_bmap_get_dat(bmap), vblocknr); |
567 | } | 530 | } |
568 | 531 | ||
569 | int nilfs_bmap_prepare_update(struct nilfs_bmap *bmap, | 532 | int nilfs_bmap_prepare_update_v(struct nilfs_bmap *bmap, |
570 | union nilfs_bmap_ptr_req *oldreq, | 533 | union nilfs_bmap_ptr_req *oldreq, |
571 | union nilfs_bmap_ptr_req *newreq) | 534 | union nilfs_bmap_ptr_req *newreq) |
572 | { | 535 | { |
536 | struct inode *dat = nilfs_bmap_get_dat(bmap); | ||
573 | int ret; | 537 | int ret; |
574 | 538 | ||
575 | ret = bmap->b_pops->bpop_prepare_end_ptr(bmap, oldreq); | 539 | ret = nilfs_dat_prepare_end(dat, &oldreq->bpr_req); |
576 | if (ret < 0) | 540 | if (ret < 0) |
577 | return ret; | 541 | return ret; |
578 | ret = bmap->b_pops->bpop_prepare_alloc_ptr(bmap, newreq); | 542 | ret = nilfs_dat_prepare_alloc(dat, &newreq->bpr_req); |
579 | if (ret < 0) | 543 | if (ret < 0) |
580 | bmap->b_pops->bpop_abort_end_ptr(bmap, oldreq); | 544 | nilfs_dat_abort_end(dat, &oldreq->bpr_req); |
581 | 545 | ||
582 | return ret; | 546 | return ret; |
583 | } | 547 | } |
584 | 548 | ||
585 | void nilfs_bmap_commit_update(struct nilfs_bmap *bmap, | 549 | void nilfs_bmap_commit_update_v(struct nilfs_bmap *bmap, |
586 | union nilfs_bmap_ptr_req *oldreq, | 550 | union nilfs_bmap_ptr_req *oldreq, |
587 | union nilfs_bmap_ptr_req *newreq) | 551 | union nilfs_bmap_ptr_req *newreq) |
588 | { | 552 | { |
589 | bmap->b_pops->bpop_commit_end_ptr(bmap, oldreq); | 553 | struct inode *dat = nilfs_bmap_get_dat(bmap); |
590 | bmap->b_pops->bpop_commit_alloc_ptr(bmap, newreq); | ||
591 | } | ||
592 | 554 | ||
593 | void nilfs_bmap_abort_update(struct nilfs_bmap *bmap, | 555 | nilfs_dat_commit_end(dat, &oldreq->bpr_req, |
594 | union nilfs_bmap_ptr_req *oldreq, | 556 | bmap->b_ptr_type == NILFS_BMAP_PTR_VS); |
595 | union nilfs_bmap_ptr_req *newreq) | 557 | nilfs_dat_commit_alloc(dat, &newreq->bpr_req); |
596 | { | ||
597 | bmap->b_pops->bpop_abort_end_ptr(bmap, oldreq); | ||
598 | bmap->b_pops->bpop_abort_alloc_ptr(bmap, newreq); | ||
599 | } | 558 | } |
600 | 559 | ||
601 | static int nilfs_bmap_translate_v(const struct nilfs_bmap *bmap, __u64 ptr, | 560 | void nilfs_bmap_abort_update_v(struct nilfs_bmap *bmap, |
602 | __u64 *ptrp) | 561 | union nilfs_bmap_ptr_req *oldreq, |
562 | union nilfs_bmap_ptr_req *newreq) | ||
603 | { | 563 | { |
604 | sector_t blocknr; | 564 | struct inode *dat = nilfs_bmap_get_dat(bmap); |
605 | int ret; | ||
606 | |||
607 | ret = nilfs_dat_translate(nilfs_bmap_get_dat(bmap), ptr, &blocknr); | ||
608 | if (ret < 0) | ||
609 | return ret; | ||
610 | if (ptrp != NULL) | ||
611 | *ptrp = blocknr; | ||
612 | return 0; | ||
613 | } | ||
614 | 565 | ||
615 | static int nilfs_bmap_prepare_alloc_p(struct nilfs_bmap *bmap, | 566 | nilfs_dat_abort_end(dat, &oldreq->bpr_req); |
616 | union nilfs_bmap_ptr_req *req) | 567 | nilfs_dat_abort_alloc(dat, &newreq->bpr_req); |
617 | { | ||
618 | /* ignore target ptr */ | ||
619 | req->bpr_ptr = bmap->b_last_allocated_ptr++; | ||
620 | return 0; | ||
621 | } | 568 | } |
622 | 569 | ||
623 | static void nilfs_bmap_commit_alloc_p(struct nilfs_bmap *bmap, | ||
624 | union nilfs_bmap_ptr_req *req) | ||
625 | { | ||
626 | /* do nothing */ | ||
627 | } | ||
628 | |||
629 | static void nilfs_bmap_abort_alloc_p(struct nilfs_bmap *bmap, | ||
630 | union nilfs_bmap_ptr_req *req) | ||
631 | { | ||
632 | bmap->b_last_allocated_ptr--; | ||
633 | } | ||
634 | |||
635 | static const struct nilfs_bmap_ptr_operations nilfs_bmap_ptr_ops_v = { | ||
636 | .bpop_prepare_alloc_ptr = nilfs_bmap_prepare_alloc_v, | ||
637 | .bpop_commit_alloc_ptr = nilfs_bmap_commit_alloc_v, | ||
638 | .bpop_abort_alloc_ptr = nilfs_bmap_abort_alloc_v, | ||
639 | .bpop_prepare_start_ptr = nilfs_bmap_prepare_start_v, | ||
640 | .bpop_commit_start_ptr = nilfs_bmap_commit_start_v, | ||
641 | .bpop_abort_start_ptr = nilfs_bmap_abort_start_v, | ||
642 | .bpop_prepare_end_ptr = nilfs_bmap_prepare_end_v, | ||
643 | .bpop_commit_end_ptr = nilfs_bmap_commit_end_v, | ||
644 | .bpop_abort_end_ptr = nilfs_bmap_abort_end_v, | ||
645 | |||
646 | .bpop_translate = nilfs_bmap_translate_v, | ||
647 | }; | ||
648 | |||
649 | static const struct nilfs_bmap_ptr_operations nilfs_bmap_ptr_ops_vmdt = { | ||
650 | .bpop_prepare_alloc_ptr = nilfs_bmap_prepare_alloc_v, | ||
651 | .bpop_commit_alloc_ptr = nilfs_bmap_commit_alloc_v, | ||
652 | .bpop_abort_alloc_ptr = nilfs_bmap_abort_alloc_v, | ||
653 | .bpop_prepare_start_ptr = nilfs_bmap_prepare_start_v, | ||
654 | .bpop_commit_start_ptr = nilfs_bmap_commit_start_v, | ||
655 | .bpop_abort_start_ptr = nilfs_bmap_abort_start_v, | ||
656 | .bpop_prepare_end_ptr = nilfs_bmap_prepare_end_v, | ||
657 | .bpop_commit_end_ptr = nilfs_bmap_commit_end_vmdt, | ||
658 | .bpop_abort_end_ptr = nilfs_bmap_abort_end_v, | ||
659 | |||
660 | .bpop_translate = nilfs_bmap_translate_v, | ||
661 | }; | ||
662 | |||
663 | static const struct nilfs_bmap_ptr_operations nilfs_bmap_ptr_ops_p = { | ||
664 | .bpop_prepare_alloc_ptr = nilfs_bmap_prepare_alloc_p, | ||
665 | .bpop_commit_alloc_ptr = nilfs_bmap_commit_alloc_p, | ||
666 | .bpop_abort_alloc_ptr = nilfs_bmap_abort_alloc_p, | ||
667 | .bpop_prepare_start_ptr = NULL, | ||
668 | .bpop_commit_start_ptr = NULL, | ||
669 | .bpop_abort_start_ptr = NULL, | ||
670 | .bpop_prepare_end_ptr = NULL, | ||
671 | .bpop_commit_end_ptr = NULL, | ||
672 | .bpop_abort_end_ptr = NULL, | ||
673 | |||
674 | .bpop_translate = NULL, | ||
675 | }; | ||
676 | |||
677 | static const struct nilfs_bmap_ptr_operations nilfs_bmap_ptr_ops_gc = { | ||
678 | .bpop_prepare_alloc_ptr = NULL, | ||
679 | .bpop_commit_alloc_ptr = NULL, | ||
680 | .bpop_abort_alloc_ptr = NULL, | ||
681 | .bpop_prepare_start_ptr = NULL, | ||
682 | .bpop_commit_start_ptr = NULL, | ||
683 | .bpop_abort_start_ptr = NULL, | ||
684 | .bpop_prepare_end_ptr = NULL, | ||
685 | .bpop_commit_end_ptr = NULL, | ||
686 | .bpop_abort_end_ptr = NULL, | ||
687 | |||
688 | .bpop_translate = NULL, | ||
689 | }; | ||
690 | |||
691 | static struct lock_class_key nilfs_bmap_dat_lock_key; | 570 | static struct lock_class_key nilfs_bmap_dat_lock_key; |
692 | 571 | ||
693 | /** | 572 | /** |
@@ -714,31 +593,26 @@ int nilfs_bmap_read(struct nilfs_bmap *bmap, struct nilfs_inode *raw_inode) | |||
714 | bmap->b_inode = &NILFS_BMAP_I(bmap)->vfs_inode; | 593 | bmap->b_inode = &NILFS_BMAP_I(bmap)->vfs_inode; |
715 | switch (bmap->b_inode->i_ino) { | 594 | switch (bmap->b_inode->i_ino) { |
716 | case NILFS_DAT_INO: | 595 | case NILFS_DAT_INO: |
717 | bmap->b_pops = &nilfs_bmap_ptr_ops_p; | 596 | bmap->b_ptr_type = NILFS_BMAP_PTR_P; |
718 | bmap->b_last_allocated_key = 0; /* XXX: use macro */ | 597 | bmap->b_last_allocated_key = 0; |
719 | bmap->b_last_allocated_ptr = NILFS_BMAP_NEW_PTR_INIT; | 598 | bmap->b_last_allocated_ptr = NILFS_BMAP_NEW_PTR_INIT; |
720 | lockdep_set_class(&bmap->b_sem, &nilfs_bmap_dat_lock_key); | 599 | lockdep_set_class(&bmap->b_sem, &nilfs_bmap_dat_lock_key); |
721 | break; | 600 | break; |
722 | case NILFS_CPFILE_INO: | 601 | case NILFS_CPFILE_INO: |
723 | case NILFS_SUFILE_INO: | 602 | case NILFS_SUFILE_INO: |
724 | bmap->b_pops = &nilfs_bmap_ptr_ops_vmdt; | 603 | bmap->b_ptr_type = NILFS_BMAP_PTR_VS; |
725 | bmap->b_last_allocated_key = 0; /* XXX: use macro */ | 604 | bmap->b_last_allocated_key = 0; |
726 | bmap->b_last_allocated_ptr = NILFS_BMAP_INVALID_PTR; | 605 | bmap->b_last_allocated_ptr = NILFS_BMAP_INVALID_PTR; |
727 | break; | 606 | break; |
728 | default: | 607 | default: |
729 | bmap->b_pops = &nilfs_bmap_ptr_ops_v; | 608 | bmap->b_ptr_type = NILFS_BMAP_PTR_VM; |
730 | bmap->b_last_allocated_key = 0; /* XXX: use macro */ | 609 | bmap->b_last_allocated_key = 0; |
731 | bmap->b_last_allocated_ptr = NILFS_BMAP_INVALID_PTR; | 610 | bmap->b_last_allocated_ptr = NILFS_BMAP_INVALID_PTR; |
732 | break; | 611 | break; |
733 | } | 612 | } |
734 | 613 | ||
735 | return (bmap->b_u.u_flags & NILFS_BMAP_LARGE) ? | 614 | return (bmap->b_u.u_flags & NILFS_BMAP_LARGE) ? |
736 | nilfs_btree_init(bmap, | 615 | nilfs_btree_init(bmap) : nilfs_direct_init(bmap); |
737 | NILFS_BMAP_LARGE_LOW, | ||
738 | NILFS_BMAP_LARGE_HIGH) : | ||
739 | nilfs_direct_init(bmap, | ||
740 | NILFS_BMAP_SMALL_LOW, | ||
741 | NILFS_BMAP_SMALL_HIGH); | ||
742 | } | 616 | } |
743 | 617 | ||
744 | /** | 618 | /** |
@@ -764,7 +638,7 @@ void nilfs_bmap_init_gc(struct nilfs_bmap *bmap) | |||
764 | memset(&bmap->b_u, 0, NILFS_BMAP_SIZE); | 638 | memset(&bmap->b_u, 0, NILFS_BMAP_SIZE); |
765 | init_rwsem(&bmap->b_sem); | 639 | init_rwsem(&bmap->b_sem); |
766 | bmap->b_inode = &NILFS_BMAP_I(bmap)->vfs_inode; | 640 | bmap->b_inode = &NILFS_BMAP_I(bmap)->vfs_inode; |
767 | bmap->b_pops = &nilfs_bmap_ptr_ops_gc; | 641 | bmap->b_ptr_type = NILFS_BMAP_PTR_U; |
768 | bmap->b_last_allocated_key = 0; | 642 | bmap->b_last_allocated_key = 0; |
769 | bmap->b_last_allocated_ptr = NILFS_BMAP_INVALID_PTR; | 643 | bmap->b_last_allocated_ptr = NILFS_BMAP_INVALID_PTR; |
770 | bmap->b_state = 0; | 644 | bmap->b_state = 0; |
diff --git a/fs/nilfs2/bmap.h b/fs/nilfs2/bmap.h index 4f2708abb1ba..b2890cdcef12 100644 --- a/fs/nilfs2/bmap.h +++ b/fs/nilfs2/bmap.h | |||
@@ -64,6 +64,8 @@ struct nilfs_bmap_stats { | |||
64 | */ | 64 | */ |
65 | struct nilfs_bmap_operations { | 65 | struct nilfs_bmap_operations { |
66 | int (*bop_lookup)(const struct nilfs_bmap *, __u64, int, __u64 *); | 66 | int (*bop_lookup)(const struct nilfs_bmap *, __u64, int, __u64 *); |
67 | int (*bop_lookup_contig)(const struct nilfs_bmap *, __u64, __u64 *, | ||
68 | unsigned); | ||
67 | int (*bop_insert)(struct nilfs_bmap *, __u64, __u64); | 69 | int (*bop_insert)(struct nilfs_bmap *, __u64, __u64); |
68 | int (*bop_delete)(struct nilfs_bmap *, __u64); | 70 | int (*bop_delete)(struct nilfs_bmap *, __u64); |
69 | void (*bop_clear)(struct nilfs_bmap *); | 71 | void (*bop_clear)(struct nilfs_bmap *); |
@@ -86,34 +88,6 @@ struct nilfs_bmap_operations { | |||
86 | }; | 88 | }; |
87 | 89 | ||
88 | 90 | ||
89 | /** | ||
90 | * struct nilfs_bmap_ptr_operations - bmap ptr operation table | ||
91 | */ | ||
92 | struct nilfs_bmap_ptr_operations { | ||
93 | int (*bpop_prepare_alloc_ptr)(struct nilfs_bmap *, | ||
94 | union nilfs_bmap_ptr_req *); | ||
95 | void (*bpop_commit_alloc_ptr)(struct nilfs_bmap *, | ||
96 | union nilfs_bmap_ptr_req *); | ||
97 | void (*bpop_abort_alloc_ptr)(struct nilfs_bmap *, | ||
98 | union nilfs_bmap_ptr_req *); | ||
99 | int (*bpop_prepare_start_ptr)(struct nilfs_bmap *, | ||
100 | union nilfs_bmap_ptr_req *); | ||
101 | void (*bpop_commit_start_ptr)(struct nilfs_bmap *, | ||
102 | union nilfs_bmap_ptr_req *, | ||
103 | sector_t); | ||
104 | void (*bpop_abort_start_ptr)(struct nilfs_bmap *, | ||
105 | union nilfs_bmap_ptr_req *); | ||
106 | int (*bpop_prepare_end_ptr)(struct nilfs_bmap *, | ||
107 | union nilfs_bmap_ptr_req *); | ||
108 | void (*bpop_commit_end_ptr)(struct nilfs_bmap *, | ||
109 | union nilfs_bmap_ptr_req *); | ||
110 | void (*bpop_abort_end_ptr)(struct nilfs_bmap *, | ||
111 | union nilfs_bmap_ptr_req *); | ||
112 | |||
113 | int (*bpop_translate)(const struct nilfs_bmap *, __u64, __u64 *); | ||
114 | }; | ||
115 | |||
116 | |||
117 | #define NILFS_BMAP_SIZE (NILFS_INODE_BMAP_SIZE * sizeof(__le64)) | 91 | #define NILFS_BMAP_SIZE (NILFS_INODE_BMAP_SIZE * sizeof(__le64)) |
118 | #define NILFS_BMAP_KEY_BIT (sizeof(unsigned long) * 8 /* CHAR_BIT */) | 92 | #define NILFS_BMAP_KEY_BIT (sizeof(unsigned long) * 8 /* CHAR_BIT */) |
119 | #define NILFS_BMAP_NEW_PTR_INIT \ | 93 | #define NILFS_BMAP_NEW_PTR_INIT \ |
@@ -131,11 +105,9 @@ static inline int nilfs_bmap_is_new_ptr(unsigned long ptr) | |||
131 | * @b_sem: semaphore | 105 | * @b_sem: semaphore |
132 | * @b_inode: owner of bmap | 106 | * @b_inode: owner of bmap |
133 | * @b_ops: bmap operation table | 107 | * @b_ops: bmap operation table |
134 | * @b_pops: bmap ptr operation table | ||
135 | * @b_low: low watermark of conversion | ||
136 | * @b_high: high watermark of conversion | ||
137 | * @b_last_allocated_key: last allocated key for data block | 108 | * @b_last_allocated_key: last allocated key for data block |
138 | * @b_last_allocated_ptr: last allocated ptr for data block | 109 | * @b_last_allocated_ptr: last allocated ptr for data block |
110 | * @b_ptr_type: pointer type | ||
139 | * @b_state: state | 111 | * @b_state: state |
140 | */ | 112 | */ |
141 | struct nilfs_bmap { | 113 | struct nilfs_bmap { |
@@ -146,14 +118,22 @@ struct nilfs_bmap { | |||
146 | struct rw_semaphore b_sem; | 118 | struct rw_semaphore b_sem; |
147 | struct inode *b_inode; | 119 | struct inode *b_inode; |
148 | const struct nilfs_bmap_operations *b_ops; | 120 | const struct nilfs_bmap_operations *b_ops; |
149 | const struct nilfs_bmap_ptr_operations *b_pops; | ||
150 | __u64 b_low; | ||
151 | __u64 b_high; | ||
152 | __u64 b_last_allocated_key; | 121 | __u64 b_last_allocated_key; |
153 | __u64 b_last_allocated_ptr; | 122 | __u64 b_last_allocated_ptr; |
123 | int b_ptr_type; | ||
154 | int b_state; | 124 | int b_state; |
155 | }; | 125 | }; |
156 | 126 | ||
127 | /* pointer type */ | ||
128 | #define NILFS_BMAP_PTR_P 0 /* physical block number (i.e. LBN) */ | ||
129 | #define NILFS_BMAP_PTR_VS 1 /* virtual block number (single | ||
130 | version) */ | ||
131 | #define NILFS_BMAP_PTR_VM 2 /* virtual block number (has multiple | ||
132 | versions) */ | ||
133 | #define NILFS_BMAP_PTR_U (-1) /* never perform pointer operations */ | ||
134 | |||
135 | #define NILFS_BMAP_USE_VBN(bmap) ((bmap)->b_ptr_type > 0) | ||
136 | |||
157 | /* state */ | 137 | /* state */ |
158 | #define NILFS_BMAP_DIRTY 0x00000001 | 138 | #define NILFS_BMAP_DIRTY 0x00000001 |
159 | 139 | ||
@@ -162,6 +142,7 @@ int nilfs_bmap_test_and_clear_dirty(struct nilfs_bmap *); | |||
162 | int nilfs_bmap_read(struct nilfs_bmap *, struct nilfs_inode *); | 142 | int nilfs_bmap_read(struct nilfs_bmap *, struct nilfs_inode *); |
163 | void nilfs_bmap_write(struct nilfs_bmap *, struct nilfs_inode *); | 143 | void nilfs_bmap_write(struct nilfs_bmap *, struct nilfs_inode *); |
164 | int nilfs_bmap_lookup(struct nilfs_bmap *, unsigned long, unsigned long *); | 144 | int nilfs_bmap_lookup(struct nilfs_bmap *, unsigned long, unsigned long *); |
145 | int nilfs_bmap_lookup_contig(struct nilfs_bmap *, __u64, __u64 *, unsigned); | ||
165 | int nilfs_bmap_insert(struct nilfs_bmap *, unsigned long, unsigned long); | 146 | int nilfs_bmap_insert(struct nilfs_bmap *, unsigned long, unsigned long); |
166 | int nilfs_bmap_delete(struct nilfs_bmap *, unsigned long); | 147 | int nilfs_bmap_delete(struct nilfs_bmap *, unsigned long); |
167 | int nilfs_bmap_last_key(struct nilfs_bmap *, unsigned long *); | 148 | int nilfs_bmap_last_key(struct nilfs_bmap *, unsigned long *); |
@@ -182,7 +163,67 @@ void nilfs_bmap_commit_gcdat(struct nilfs_bmap *, struct nilfs_bmap *); | |||
182 | /* | 163 | /* |
183 | * Internal use only | 164 | * Internal use only |
184 | */ | 165 | */ |
166 | struct inode *nilfs_bmap_get_dat(const struct nilfs_bmap *); | ||
167 | int nilfs_bmap_prepare_alloc_v(struct nilfs_bmap *, | ||
168 | union nilfs_bmap_ptr_req *); | ||
169 | void nilfs_bmap_commit_alloc_v(struct nilfs_bmap *, | ||
170 | union nilfs_bmap_ptr_req *); | ||
171 | void nilfs_bmap_abort_alloc_v(struct nilfs_bmap *, | ||
172 | union nilfs_bmap_ptr_req *); | ||
185 | 173 | ||
174 | static inline int nilfs_bmap_prepare_alloc_ptr(struct nilfs_bmap *bmap, | ||
175 | union nilfs_bmap_ptr_req *req) | ||
176 | { | ||
177 | if (NILFS_BMAP_USE_VBN(bmap)) | ||
178 | return nilfs_bmap_prepare_alloc_v(bmap, req); | ||
179 | /* ignore target ptr */ | ||
180 | req->bpr_ptr = bmap->b_last_allocated_ptr++; | ||
181 | return 0; | ||
182 | } | ||
183 | |||
184 | static inline void nilfs_bmap_commit_alloc_ptr(struct nilfs_bmap *bmap, | ||
185 | union nilfs_bmap_ptr_req *req) | ||
186 | { | ||
187 | if (NILFS_BMAP_USE_VBN(bmap)) | ||
188 | nilfs_bmap_commit_alloc_v(bmap, req); | ||
189 | } | ||
190 | |||
191 | static inline void nilfs_bmap_abort_alloc_ptr(struct nilfs_bmap *bmap, | ||
192 | union nilfs_bmap_ptr_req *req) | ||
193 | { | ||
194 | if (NILFS_BMAP_USE_VBN(bmap)) | ||
195 | nilfs_bmap_abort_alloc_v(bmap, req); | ||
196 | else | ||
197 | bmap->b_last_allocated_ptr--; | ||
198 | } | ||
199 | |||
200 | int nilfs_bmap_prepare_end_v(struct nilfs_bmap *, union nilfs_bmap_ptr_req *); | ||
201 | void nilfs_bmap_commit_end_v(struct nilfs_bmap *, union nilfs_bmap_ptr_req *); | ||
202 | void nilfs_bmap_abort_end_v(struct nilfs_bmap *, union nilfs_bmap_ptr_req *); | ||
203 | |||
204 | static inline int nilfs_bmap_prepare_end_ptr(struct nilfs_bmap *bmap, | ||
205 | union nilfs_bmap_ptr_req *req) | ||
206 | { | ||
207 | return NILFS_BMAP_USE_VBN(bmap) ? | ||
208 | nilfs_bmap_prepare_end_v(bmap, req) : 0; | ||
209 | } | ||
210 | |||
211 | static inline void nilfs_bmap_commit_end_ptr(struct nilfs_bmap *bmap, | ||
212 | union nilfs_bmap_ptr_req *req) | ||
213 | { | ||
214 | if (NILFS_BMAP_USE_VBN(bmap)) | ||
215 | nilfs_bmap_commit_end_v(bmap, req); | ||
216 | } | ||
217 | |||
218 | static inline void nilfs_bmap_abort_end_ptr(struct nilfs_bmap *bmap, | ||
219 | union nilfs_bmap_ptr_req *req) | ||
220 | { | ||
221 | if (NILFS_BMAP_USE_VBN(bmap)) | ||
222 | nilfs_bmap_abort_end_v(bmap, req); | ||
223 | } | ||
224 | |||
225 | int nilfs_bmap_start_v(struct nilfs_bmap *, union nilfs_bmap_ptr_req *, | ||
226 | sector_t); | ||
186 | int nilfs_bmap_move_v(const struct nilfs_bmap *, __u64, sector_t); | 227 | int nilfs_bmap_move_v(const struct nilfs_bmap *, __u64, sector_t); |
187 | int nilfs_bmap_mark_dirty(const struct nilfs_bmap *, __u64); | 228 | int nilfs_bmap_mark_dirty(const struct nilfs_bmap *, __u64); |
188 | 229 | ||
@@ -193,28 +234,20 @@ __u64 nilfs_bmap_data_get_key(const struct nilfs_bmap *, | |||
193 | __u64 nilfs_bmap_find_target_seq(const struct nilfs_bmap *, __u64); | 234 | __u64 nilfs_bmap_find_target_seq(const struct nilfs_bmap *, __u64); |
194 | __u64 nilfs_bmap_find_target_in_group(const struct nilfs_bmap *); | 235 | __u64 nilfs_bmap_find_target_in_group(const struct nilfs_bmap *); |
195 | 236 | ||
196 | int nilfs_bmap_prepare_update(struct nilfs_bmap *, | 237 | int nilfs_bmap_prepare_update_v(struct nilfs_bmap *, |
197 | union nilfs_bmap_ptr_req *, | 238 | union nilfs_bmap_ptr_req *, |
198 | union nilfs_bmap_ptr_req *); | 239 | union nilfs_bmap_ptr_req *); |
199 | void nilfs_bmap_commit_update(struct nilfs_bmap *, | 240 | void nilfs_bmap_commit_update_v(struct nilfs_bmap *, |
200 | union nilfs_bmap_ptr_req *, | 241 | union nilfs_bmap_ptr_req *, |
201 | union nilfs_bmap_ptr_req *); | 242 | union nilfs_bmap_ptr_req *); |
202 | void nilfs_bmap_abort_update(struct nilfs_bmap *, | 243 | void nilfs_bmap_abort_update_v(struct nilfs_bmap *, |
203 | union nilfs_bmap_ptr_req *, | 244 | union nilfs_bmap_ptr_req *, |
204 | union nilfs_bmap_ptr_req *); | 245 | union nilfs_bmap_ptr_req *); |
205 | 246 | ||
206 | void nilfs_bmap_add_blocks(const struct nilfs_bmap *, int); | 247 | void nilfs_bmap_add_blocks(const struct nilfs_bmap *, int); |
207 | void nilfs_bmap_sub_blocks(const struct nilfs_bmap *, int); | 248 | void nilfs_bmap_sub_blocks(const struct nilfs_bmap *, int); |
208 | 249 | ||
209 | 250 | ||
210 | int nilfs_bmap_get_block(const struct nilfs_bmap *, __u64, | ||
211 | struct buffer_head **); | ||
212 | void nilfs_bmap_put_block(const struct nilfs_bmap *, struct buffer_head *); | ||
213 | int nilfs_bmap_get_new_block(const struct nilfs_bmap *, __u64, | ||
214 | struct buffer_head **); | ||
215 | void nilfs_bmap_delete_block(const struct nilfs_bmap *, struct buffer_head *); | ||
216 | |||
217 | |||
218 | /* Assume that bmap semaphore is locked. */ | 251 | /* Assume that bmap semaphore is locked. */ |
219 | static inline int nilfs_bmap_dirty(const struct nilfs_bmap *bmap) | 252 | static inline int nilfs_bmap_dirty(const struct nilfs_bmap *bmap) |
220 | { | 253 | { |
diff --git a/fs/nilfs2/btnode.c b/fs/nilfs2/btnode.c index 4cc07b2c30e0..7e0b61be212e 100644 --- a/fs/nilfs2/btnode.c +++ b/fs/nilfs2/btnode.c | |||
@@ -46,15 +46,18 @@ void nilfs_btnode_cache_init_once(struct address_space *btnc) | |||
46 | INIT_LIST_HEAD(&btnc->i_mmap_nonlinear); | 46 | INIT_LIST_HEAD(&btnc->i_mmap_nonlinear); |
47 | } | 47 | } |
48 | 48 | ||
49 | static struct address_space_operations def_btnode_aops; | 49 | static struct address_space_operations def_btnode_aops = { |
50 | .sync_page = block_sync_page, | ||
51 | }; | ||
50 | 52 | ||
51 | void nilfs_btnode_cache_init(struct address_space *btnc) | 53 | void nilfs_btnode_cache_init(struct address_space *btnc, |
54 | struct backing_dev_info *bdi) | ||
52 | { | 55 | { |
53 | btnc->host = NULL; /* can safely set to host inode ? */ | 56 | btnc->host = NULL; /* can safely set to host inode ? */ |
54 | btnc->flags = 0; | 57 | btnc->flags = 0; |
55 | mapping_set_gfp_mask(btnc, GFP_NOFS); | 58 | mapping_set_gfp_mask(btnc, GFP_NOFS); |
56 | btnc->assoc_mapping = NULL; | 59 | btnc->assoc_mapping = NULL; |
57 | btnc->backing_dev_info = &default_backing_dev_info; | 60 | btnc->backing_dev_info = bdi; |
58 | btnc->a_ops = &def_btnode_aops; | 61 | btnc->a_ops = &def_btnode_aops; |
59 | } | 62 | } |
60 | 63 | ||
diff --git a/fs/nilfs2/btnode.h b/fs/nilfs2/btnode.h index 35faa86444a7..3e2275172ed6 100644 --- a/fs/nilfs2/btnode.h +++ b/fs/nilfs2/btnode.h | |||
@@ -38,7 +38,7 @@ struct nilfs_btnode_chkey_ctxt { | |||
38 | }; | 38 | }; |
39 | 39 | ||
40 | void nilfs_btnode_cache_init_once(struct address_space *); | 40 | void nilfs_btnode_cache_init_once(struct address_space *); |
41 | void nilfs_btnode_cache_init(struct address_space *); | 41 | void nilfs_btnode_cache_init(struct address_space *, struct backing_dev_info *); |
42 | void nilfs_btnode_cache_clear(struct address_space *); | 42 | void nilfs_btnode_cache_clear(struct address_space *); |
43 | int nilfs_btnode_submit_block(struct address_space *, __u64, sector_t, | 43 | int nilfs_btnode_submit_block(struct address_space *, __u64, sector_t, |
44 | struct buffer_head **, int); | 44 | struct buffer_head **, int); |
diff --git a/fs/nilfs2/btree.c b/fs/nilfs2/btree.c index 6b37a2767293..aa412724b64e 100644 --- a/fs/nilfs2/btree.c +++ b/fs/nilfs2/btree.c | |||
@@ -29,6 +29,7 @@ | |||
29 | #include "btnode.h" | 29 | #include "btnode.h" |
30 | #include "btree.h" | 30 | #include "btree.h" |
31 | #include "alloc.h" | 31 | #include "alloc.h" |
32 | #include "dat.h" | ||
32 | 33 | ||
33 | /** | 34 | /** |
34 | * struct nilfs_btree_path - A path on which B-tree operations are executed | 35 | * struct nilfs_btree_path - A path on which B-tree operations are executed |
@@ -109,8 +110,7 @@ static void nilfs_btree_clear_path(const struct nilfs_btree *btree, | |||
109 | level < NILFS_BTREE_LEVEL_MAX; | 110 | level < NILFS_BTREE_LEVEL_MAX; |
110 | level++) { | 111 | level++) { |
111 | if (path[level].bp_bh != NULL) { | 112 | if (path[level].bp_bh != NULL) { |
112 | nilfs_bmap_put_block(&btree->bt_bmap, | 113 | brelse(path[level].bp_bh); |
113 | path[level].bp_bh); | ||
114 | path[level].bp_bh = NULL; | 114 | path[level].bp_bh = NULL; |
115 | } | 115 | } |
116 | /* sib_bh is released or deleted by prepare or commit | 116 | /* sib_bh is released or deleted by prepare or commit |
@@ -123,10 +123,29 @@ static void nilfs_btree_clear_path(const struct nilfs_btree *btree, | |||
123 | } | 123 | } |
124 | } | 124 | } |
125 | 125 | ||
126 | |||
127 | /* | 126 | /* |
128 | * B-tree node operations | 127 | * B-tree node operations |
129 | */ | 128 | */ |
129 | static int nilfs_btree_get_block(const struct nilfs_btree *btree, __u64 ptr, | ||
130 | struct buffer_head **bhp) | ||
131 | { | ||
132 | struct address_space *btnc = | ||
133 | &NILFS_BMAP_I((struct nilfs_bmap *)btree)->i_btnode_cache; | ||
134 | return nilfs_btnode_get(btnc, ptr, 0, bhp, 0); | ||
135 | } | ||
136 | |||
137 | static int nilfs_btree_get_new_block(const struct nilfs_btree *btree, | ||
138 | __u64 ptr, struct buffer_head **bhp) | ||
139 | { | ||
140 | struct address_space *btnc = | ||
141 | &NILFS_BMAP_I((struct nilfs_bmap *)btree)->i_btnode_cache; | ||
142 | int ret; | ||
143 | |||
144 | ret = nilfs_btnode_get(btnc, ptr, 0, bhp, 1); | ||
145 | if (!ret) | ||
146 | set_buffer_nilfs_volatile(*bhp); | ||
147 | return ret; | ||
148 | } | ||
130 | 149 | ||
131 | static inline int | 150 | static inline int |
132 | nilfs_btree_node_get_flags(const struct nilfs_btree *btree, | 151 | nilfs_btree_node_get_flags(const struct nilfs_btree *btree, |
@@ -488,8 +507,7 @@ static int nilfs_btree_do_lookup(const struct nilfs_btree *btree, | |||
488 | path[level].bp_index = index; | 507 | path[level].bp_index = index; |
489 | 508 | ||
490 | for (level--; level >= minlevel; level--) { | 509 | for (level--; level >= minlevel; level--) { |
491 | ret = nilfs_bmap_get_block(&btree->bt_bmap, ptr, | 510 | ret = nilfs_btree_get_block(btree, ptr, &path[level].bp_bh); |
492 | &path[level].bp_bh); | ||
493 | if (ret < 0) | 511 | if (ret < 0) |
494 | return ret; | 512 | return ret; |
495 | node = nilfs_btree_get_nonroot_node(btree, path, level); | 513 | node = nilfs_btree_get_nonroot_node(btree, path, level); |
@@ -535,8 +553,7 @@ static int nilfs_btree_do_lookup_last(const struct nilfs_btree *btree, | |||
535 | path[level].bp_index = index; | 553 | path[level].bp_index = index; |
536 | 554 | ||
537 | for (level--; level > 0; level--) { | 555 | for (level--; level > 0; level--) { |
538 | ret = nilfs_bmap_get_block(&btree->bt_bmap, ptr, | 556 | ret = nilfs_btree_get_block(btree, ptr, &path[level].bp_bh); |
539 | &path[level].bp_bh); | ||
540 | if (ret < 0) | 557 | if (ret < 0) |
541 | return ret; | 558 | return ret; |
542 | node = nilfs_btree_get_nonroot_node(btree, path, level); | 559 | node = nilfs_btree_get_nonroot_node(btree, path, level); |
@@ -579,6 +596,87 @@ static int nilfs_btree_lookup(const struct nilfs_bmap *bmap, | |||
579 | return ret; | 596 | return ret; |
580 | } | 597 | } |
581 | 598 | ||
599 | static int nilfs_btree_lookup_contig(const struct nilfs_bmap *bmap, | ||
600 | __u64 key, __u64 *ptrp, unsigned maxblocks) | ||
601 | { | ||
602 | struct nilfs_btree *btree = (struct nilfs_btree *)bmap; | ||
603 | struct nilfs_btree_path *path; | ||
604 | struct nilfs_btree_node *node; | ||
605 | struct inode *dat = NULL; | ||
606 | __u64 ptr, ptr2; | ||
607 | sector_t blocknr; | ||
608 | int level = NILFS_BTREE_LEVEL_NODE_MIN; | ||
609 | int ret, cnt, index, maxlevel; | ||
610 | |||
611 | path = nilfs_btree_alloc_path(btree); | ||
612 | if (path == NULL) | ||
613 | return -ENOMEM; | ||
614 | nilfs_btree_init_path(btree, path); | ||
615 | ret = nilfs_btree_do_lookup(btree, path, key, &ptr, level); | ||
616 | if (ret < 0) | ||
617 | goto out; | ||
618 | |||
619 | if (NILFS_BMAP_USE_VBN(bmap)) { | ||
620 | dat = nilfs_bmap_get_dat(bmap); | ||
621 | ret = nilfs_dat_translate(dat, ptr, &blocknr); | ||
622 | if (ret < 0) | ||
623 | goto out; | ||
624 | ptr = blocknr; | ||
625 | } | ||
626 | cnt = 1; | ||
627 | if (cnt == maxblocks) | ||
628 | goto end; | ||
629 | |||
630 | maxlevel = nilfs_btree_height(btree) - 1; | ||
631 | node = nilfs_btree_get_node(btree, path, level); | ||
632 | index = path[level].bp_index + 1; | ||
633 | for (;;) { | ||
634 | while (index < nilfs_btree_node_get_nchildren(btree, node)) { | ||
635 | if (nilfs_btree_node_get_key(btree, node, index) != | ||
636 | key + cnt) | ||
637 | goto end; | ||
638 | ptr2 = nilfs_btree_node_get_ptr(btree, node, index); | ||
639 | if (dat) { | ||
640 | ret = nilfs_dat_translate(dat, ptr2, &blocknr); | ||
641 | if (ret < 0) | ||
642 | goto out; | ||
643 | ptr2 = blocknr; | ||
644 | } | ||
645 | if (ptr2 != ptr + cnt || ++cnt == maxblocks) | ||
646 | goto end; | ||
647 | index++; | ||
648 | continue; | ||
649 | } | ||
650 | if (level == maxlevel) | ||
651 | break; | ||
652 | |||
653 | /* look-up right sibling node */ | ||
654 | node = nilfs_btree_get_node(btree, path, level + 1); | ||
655 | index = path[level + 1].bp_index + 1; | ||
656 | if (index >= nilfs_btree_node_get_nchildren(btree, node) || | ||
657 | nilfs_btree_node_get_key(btree, node, index) != key + cnt) | ||
658 | break; | ||
659 | ptr2 = nilfs_btree_node_get_ptr(btree, node, index); | ||
660 | path[level + 1].bp_index = index; | ||
661 | |||
662 | brelse(path[level].bp_bh); | ||
663 | path[level].bp_bh = NULL; | ||
664 | ret = nilfs_btree_get_block(btree, ptr2, &path[level].bp_bh); | ||
665 | if (ret < 0) | ||
666 | goto out; | ||
667 | node = nilfs_btree_get_nonroot_node(btree, path, level); | ||
668 | index = 0; | ||
669 | path[level].bp_index = index; | ||
670 | } | ||
671 | end: | ||
672 | *ptrp = ptr; | ||
673 | ret = cnt; | ||
674 | out: | ||
675 | nilfs_btree_clear_path(btree, path); | ||
676 | nilfs_btree_free_path(btree, path); | ||
677 | return ret; | ||
678 | } | ||
679 | |||
582 | static void nilfs_btree_promote_key(struct nilfs_btree *btree, | 680 | static void nilfs_btree_promote_key(struct nilfs_btree *btree, |
583 | struct nilfs_btree_path *path, | 681 | struct nilfs_btree_path *path, |
584 | int level, __u64 key) | 682 | int level, __u64 key) |
@@ -669,13 +767,13 @@ static void nilfs_btree_carry_left(struct nilfs_btree *btree, | |||
669 | nilfs_btree_node_get_key(btree, node, 0)); | 767 | nilfs_btree_node_get_key(btree, node, 0)); |
670 | 768 | ||
671 | if (move) { | 769 | if (move) { |
672 | nilfs_bmap_put_block(&btree->bt_bmap, path[level].bp_bh); | 770 | brelse(path[level].bp_bh); |
673 | path[level].bp_bh = path[level].bp_sib_bh; | 771 | path[level].bp_bh = path[level].bp_sib_bh; |
674 | path[level].bp_sib_bh = NULL; | 772 | path[level].bp_sib_bh = NULL; |
675 | path[level].bp_index += lnchildren; | 773 | path[level].bp_index += lnchildren; |
676 | path[level + 1].bp_index--; | 774 | path[level + 1].bp_index--; |
677 | } else { | 775 | } else { |
678 | nilfs_bmap_put_block(&btree->bt_bmap, path[level].bp_sib_bh); | 776 | brelse(path[level].bp_sib_bh); |
679 | path[level].bp_sib_bh = NULL; | 777 | path[level].bp_sib_bh = NULL; |
680 | path[level].bp_index -= n; | 778 | path[level].bp_index -= n; |
681 | } | 779 | } |
@@ -722,14 +820,14 @@ static void nilfs_btree_carry_right(struct nilfs_btree *btree, | |||
722 | path[level + 1].bp_index--; | 820 | path[level + 1].bp_index--; |
723 | 821 | ||
724 | if (move) { | 822 | if (move) { |
725 | nilfs_bmap_put_block(&btree->bt_bmap, path[level].bp_bh); | 823 | brelse(path[level].bp_bh); |
726 | path[level].bp_bh = path[level].bp_sib_bh; | 824 | path[level].bp_bh = path[level].bp_sib_bh; |
727 | path[level].bp_sib_bh = NULL; | 825 | path[level].bp_sib_bh = NULL; |
728 | path[level].bp_index -= | 826 | path[level].bp_index -= |
729 | nilfs_btree_node_get_nchildren(btree, node); | 827 | nilfs_btree_node_get_nchildren(btree, node); |
730 | path[level + 1].bp_index++; | 828 | path[level + 1].bp_index++; |
731 | } else { | 829 | } else { |
732 | nilfs_bmap_put_block(&btree->bt_bmap, path[level].bp_sib_bh); | 830 | brelse(path[level].bp_sib_bh); |
733 | path[level].bp_sib_bh = NULL; | 831 | path[level].bp_sib_bh = NULL; |
734 | } | 832 | } |
735 | 833 | ||
@@ -781,7 +879,7 @@ static void nilfs_btree_split(struct nilfs_btree *btree, | |||
781 | *keyp = nilfs_btree_node_get_key(btree, right, 0); | 879 | *keyp = nilfs_btree_node_get_key(btree, right, 0); |
782 | *ptrp = path[level].bp_newreq.bpr_ptr; | 880 | *ptrp = path[level].bp_newreq.bpr_ptr; |
783 | 881 | ||
784 | nilfs_bmap_put_block(&btree->bt_bmap, path[level].bp_bh); | 882 | brelse(path[level].bp_bh); |
785 | path[level].bp_bh = path[level].bp_sib_bh; | 883 | path[level].bp_bh = path[level].bp_sib_bh; |
786 | path[level].bp_sib_bh = NULL; | 884 | path[level].bp_sib_bh = NULL; |
787 | } else { | 885 | } else { |
@@ -790,7 +888,7 @@ static void nilfs_btree_split(struct nilfs_btree *btree, | |||
790 | *keyp = nilfs_btree_node_get_key(btree, right, 0); | 888 | *keyp = nilfs_btree_node_get_key(btree, right, 0); |
791 | *ptrp = path[level].bp_newreq.bpr_ptr; | 889 | *ptrp = path[level].bp_newreq.bpr_ptr; |
792 | 890 | ||
793 | nilfs_bmap_put_block(&btree->bt_bmap, path[level].bp_sib_bh); | 891 | brelse(path[level].bp_sib_bh); |
794 | path[level].bp_sib_bh = NULL; | 892 | path[level].bp_sib_bh = NULL; |
795 | } | 893 | } |
796 | 894 | ||
@@ -897,12 +995,12 @@ static int nilfs_btree_prepare_insert(struct nilfs_btree *btree, | |||
897 | level = NILFS_BTREE_LEVEL_DATA; | 995 | level = NILFS_BTREE_LEVEL_DATA; |
898 | 996 | ||
899 | /* allocate a new ptr for data block */ | 997 | /* allocate a new ptr for data block */ |
900 | if (btree->bt_ops->btop_find_target != NULL) | 998 | if (NILFS_BMAP_USE_VBN(&btree->bt_bmap)) |
901 | path[level].bp_newreq.bpr_ptr = | 999 | path[level].bp_newreq.bpr_ptr = |
902 | btree->bt_ops->btop_find_target(btree, path, key); | 1000 | nilfs_btree_find_target_v(btree, path, key); |
903 | 1001 | ||
904 | ret = btree->bt_bmap.b_pops->bpop_prepare_alloc_ptr( | 1002 | ret = nilfs_bmap_prepare_alloc_ptr(&btree->bt_bmap, |
905 | &btree->bt_bmap, &path[level].bp_newreq); | 1003 | &path[level].bp_newreq); |
906 | if (ret < 0) | 1004 | if (ret < 0) |
907 | goto err_out_data; | 1005 | goto err_out_data; |
908 | 1006 | ||
@@ -924,8 +1022,7 @@ static int nilfs_btree_prepare_insert(struct nilfs_btree *btree, | |||
924 | if (pindex > 0) { | 1022 | if (pindex > 0) { |
925 | sibptr = nilfs_btree_node_get_ptr(btree, parent, | 1023 | sibptr = nilfs_btree_node_get_ptr(btree, parent, |
926 | pindex - 1); | 1024 | pindex - 1); |
927 | ret = nilfs_bmap_get_block(&btree->bt_bmap, sibptr, | 1025 | ret = nilfs_btree_get_block(btree, sibptr, &bh); |
928 | &bh); | ||
929 | if (ret < 0) | 1026 | if (ret < 0) |
930 | goto err_out_child_node; | 1027 | goto err_out_child_node; |
931 | sib = (struct nilfs_btree_node *)bh->b_data; | 1028 | sib = (struct nilfs_btree_node *)bh->b_data; |
@@ -936,7 +1033,7 @@ static int nilfs_btree_prepare_insert(struct nilfs_btree *btree, | |||
936 | stats->bs_nblocks++; | 1033 | stats->bs_nblocks++; |
937 | goto out; | 1034 | goto out; |
938 | } else | 1035 | } else |
939 | nilfs_bmap_put_block(&btree->bt_bmap, bh); | 1036 | brelse(bh); |
940 | } | 1037 | } |
941 | 1038 | ||
942 | /* right sibling */ | 1039 | /* right sibling */ |
@@ -944,8 +1041,7 @@ static int nilfs_btree_prepare_insert(struct nilfs_btree *btree, | |||
944 | nilfs_btree_node_get_nchildren(btree, parent) - 1) { | 1041 | nilfs_btree_node_get_nchildren(btree, parent) - 1) { |
945 | sibptr = nilfs_btree_node_get_ptr(btree, parent, | 1042 | sibptr = nilfs_btree_node_get_ptr(btree, parent, |
946 | pindex + 1); | 1043 | pindex + 1); |
947 | ret = nilfs_bmap_get_block(&btree->bt_bmap, sibptr, | 1044 | ret = nilfs_btree_get_block(btree, sibptr, &bh); |
948 | &bh); | ||
949 | if (ret < 0) | 1045 | if (ret < 0) |
950 | goto err_out_child_node; | 1046 | goto err_out_child_node; |
951 | sib = (struct nilfs_btree_node *)bh->b_data; | 1047 | sib = (struct nilfs_btree_node *)bh->b_data; |
@@ -956,19 +1052,19 @@ static int nilfs_btree_prepare_insert(struct nilfs_btree *btree, | |||
956 | stats->bs_nblocks++; | 1052 | stats->bs_nblocks++; |
957 | goto out; | 1053 | goto out; |
958 | } else | 1054 | } else |
959 | nilfs_bmap_put_block(&btree->bt_bmap, bh); | 1055 | brelse(bh); |
960 | } | 1056 | } |
961 | 1057 | ||
962 | /* split */ | 1058 | /* split */ |
963 | path[level].bp_newreq.bpr_ptr = | 1059 | path[level].bp_newreq.bpr_ptr = |
964 | path[level - 1].bp_newreq.bpr_ptr + 1; | 1060 | path[level - 1].bp_newreq.bpr_ptr + 1; |
965 | ret = btree->bt_bmap.b_pops->bpop_prepare_alloc_ptr( | 1061 | ret = nilfs_bmap_prepare_alloc_ptr(&btree->bt_bmap, |
966 | &btree->bt_bmap, &path[level].bp_newreq); | 1062 | &path[level].bp_newreq); |
967 | if (ret < 0) | 1063 | if (ret < 0) |
968 | goto err_out_child_node; | 1064 | goto err_out_child_node; |
969 | ret = nilfs_bmap_get_new_block(&btree->bt_bmap, | 1065 | ret = nilfs_btree_get_new_block(btree, |
970 | path[level].bp_newreq.bpr_ptr, | 1066 | path[level].bp_newreq.bpr_ptr, |
971 | &bh); | 1067 | &bh); |
972 | if (ret < 0) | 1068 | if (ret < 0) |
973 | goto err_out_curr_node; | 1069 | goto err_out_curr_node; |
974 | 1070 | ||
@@ -994,12 +1090,12 @@ static int nilfs_btree_prepare_insert(struct nilfs_btree *btree, | |||
994 | 1090 | ||
995 | /* grow */ | 1091 | /* grow */ |
996 | path[level].bp_newreq.bpr_ptr = path[level - 1].bp_newreq.bpr_ptr + 1; | 1092 | path[level].bp_newreq.bpr_ptr = path[level - 1].bp_newreq.bpr_ptr + 1; |
997 | ret = btree->bt_bmap.b_pops->bpop_prepare_alloc_ptr( | 1093 | ret = nilfs_bmap_prepare_alloc_ptr(&btree->bt_bmap, |
998 | &btree->bt_bmap, &path[level].bp_newreq); | 1094 | &path[level].bp_newreq); |
999 | if (ret < 0) | 1095 | if (ret < 0) |
1000 | goto err_out_child_node; | 1096 | goto err_out_child_node; |
1001 | ret = nilfs_bmap_get_new_block(&btree->bt_bmap, | 1097 | ret = nilfs_btree_get_new_block(btree, path[level].bp_newreq.bpr_ptr, |
1002 | path[level].bp_newreq.bpr_ptr, &bh); | 1098 | &bh); |
1003 | if (ret < 0) | 1099 | if (ret < 0) |
1004 | goto err_out_curr_node; | 1100 | goto err_out_curr_node; |
1005 | 1101 | ||
@@ -1023,18 +1119,16 @@ static int nilfs_btree_prepare_insert(struct nilfs_btree *btree, | |||
1023 | 1119 | ||
1024 | /* error */ | 1120 | /* error */ |
1025 | err_out_curr_node: | 1121 | err_out_curr_node: |
1026 | btree->bt_bmap.b_pops->bpop_abort_alloc_ptr(&btree->bt_bmap, | 1122 | nilfs_bmap_abort_alloc_ptr(&btree->bt_bmap, &path[level].bp_newreq); |
1027 | &path[level].bp_newreq); | ||
1028 | err_out_child_node: | 1123 | err_out_child_node: |
1029 | for (level--; level > NILFS_BTREE_LEVEL_DATA; level--) { | 1124 | for (level--; level > NILFS_BTREE_LEVEL_DATA; level--) { |
1030 | nilfs_bmap_delete_block(&btree->bt_bmap, path[level].bp_sib_bh); | 1125 | nilfs_btnode_delete(path[level].bp_sib_bh); |
1031 | btree->bt_bmap.b_pops->bpop_abort_alloc_ptr( | 1126 | nilfs_bmap_abort_alloc_ptr(&btree->bt_bmap, |
1032 | &btree->bt_bmap, &path[level].bp_newreq); | 1127 | &path[level].bp_newreq); |
1033 | 1128 | ||
1034 | } | 1129 | } |
1035 | 1130 | ||
1036 | btree->bt_bmap.b_pops->bpop_abort_alloc_ptr(&btree->bt_bmap, | 1131 | nilfs_bmap_abort_alloc_ptr(&btree->bt_bmap, &path[level].bp_newreq); |
1037 | &path[level].bp_newreq); | ||
1038 | err_out_data: | 1132 | err_out_data: |
1039 | *levelp = level; | 1133 | *levelp = level; |
1040 | stats->bs_nblocks = 0; | 1134 | stats->bs_nblocks = 0; |
@@ -1049,14 +1143,12 @@ static void nilfs_btree_commit_insert(struct nilfs_btree *btree, | |||
1049 | 1143 | ||
1050 | set_buffer_nilfs_volatile((struct buffer_head *)((unsigned long)ptr)); | 1144 | set_buffer_nilfs_volatile((struct buffer_head *)((unsigned long)ptr)); |
1051 | ptr = path[NILFS_BTREE_LEVEL_DATA].bp_newreq.bpr_ptr; | 1145 | ptr = path[NILFS_BTREE_LEVEL_DATA].bp_newreq.bpr_ptr; |
1052 | if (btree->bt_ops->btop_set_target != NULL) | 1146 | if (NILFS_BMAP_USE_VBN(&btree->bt_bmap)) |
1053 | btree->bt_ops->btop_set_target(btree, key, ptr); | 1147 | nilfs_btree_set_target_v(btree, key, ptr); |
1054 | 1148 | ||
1055 | for (level = NILFS_BTREE_LEVEL_NODE_MIN; level <= maxlevel; level++) { | 1149 | for (level = NILFS_BTREE_LEVEL_NODE_MIN; level <= maxlevel; level++) { |
1056 | if (btree->bt_bmap.b_pops->bpop_commit_alloc_ptr != NULL) { | 1150 | nilfs_bmap_commit_alloc_ptr(&btree->bt_bmap, |
1057 | btree->bt_bmap.b_pops->bpop_commit_alloc_ptr( | 1151 | &path[level - 1].bp_newreq); |
1058 | &btree->bt_bmap, &path[level - 1].bp_newreq); | ||
1059 | } | ||
1060 | path[level].bp_op(btree, path, level, &key, &ptr); | 1152 | path[level].bp_op(btree, path, level, &key, &ptr); |
1061 | } | 1153 | } |
1062 | 1154 | ||
@@ -1153,7 +1245,7 @@ static void nilfs_btree_borrow_left(struct nilfs_btree *btree, | |||
1153 | nilfs_btree_promote_key(btree, path, level + 1, | 1245 | nilfs_btree_promote_key(btree, path, level + 1, |
1154 | nilfs_btree_node_get_key(btree, node, 0)); | 1246 | nilfs_btree_node_get_key(btree, node, 0)); |
1155 | 1247 | ||
1156 | nilfs_bmap_put_block(&btree->bt_bmap, path[level].bp_sib_bh); | 1248 | brelse(path[level].bp_sib_bh); |
1157 | path[level].bp_sib_bh = NULL; | 1249 | path[level].bp_sib_bh = NULL; |
1158 | path[level].bp_index += n; | 1250 | path[level].bp_index += n; |
1159 | } | 1251 | } |
@@ -1192,7 +1284,7 @@ static void nilfs_btree_borrow_right(struct nilfs_btree *btree, | |||
1192 | nilfs_btree_node_get_key(btree, right, 0)); | 1284 | nilfs_btree_node_get_key(btree, right, 0)); |
1193 | path[level + 1].bp_index--; | 1285 | path[level + 1].bp_index--; |
1194 | 1286 | ||
1195 | nilfs_bmap_put_block(&btree->bt_bmap, path[level].bp_sib_bh); | 1287 | brelse(path[level].bp_sib_bh); |
1196 | path[level].bp_sib_bh = NULL; | 1288 | path[level].bp_sib_bh = NULL; |
1197 | } | 1289 | } |
1198 | 1290 | ||
@@ -1221,7 +1313,7 @@ static void nilfs_btree_concat_left(struct nilfs_btree *btree, | |||
1221 | unlock_buffer(path[level].bp_bh); | 1313 | unlock_buffer(path[level].bp_bh); |
1222 | unlock_buffer(path[level].bp_sib_bh); | 1314 | unlock_buffer(path[level].bp_sib_bh); |
1223 | 1315 | ||
1224 | nilfs_bmap_delete_block(&btree->bt_bmap, path[level].bp_bh); | 1316 | nilfs_btnode_delete(path[level].bp_bh); |
1225 | path[level].bp_bh = path[level].bp_sib_bh; | 1317 | path[level].bp_bh = path[level].bp_sib_bh; |
1226 | path[level].bp_sib_bh = NULL; | 1318 | path[level].bp_sib_bh = NULL; |
1227 | path[level].bp_index += nilfs_btree_node_get_nchildren(btree, left); | 1319 | path[level].bp_index += nilfs_btree_node_get_nchildren(btree, left); |
@@ -1252,7 +1344,7 @@ static void nilfs_btree_concat_right(struct nilfs_btree *btree, | |||
1252 | unlock_buffer(path[level].bp_bh); | 1344 | unlock_buffer(path[level].bp_bh); |
1253 | unlock_buffer(path[level].bp_sib_bh); | 1345 | unlock_buffer(path[level].bp_sib_bh); |
1254 | 1346 | ||
1255 | nilfs_bmap_delete_block(&btree->bt_bmap, path[level].bp_sib_bh); | 1347 | nilfs_btnode_delete(path[level].bp_sib_bh); |
1256 | path[level].bp_sib_bh = NULL; | 1348 | path[level].bp_sib_bh = NULL; |
1257 | path[level + 1].bp_index++; | 1349 | path[level + 1].bp_index++; |
1258 | } | 1350 | } |
@@ -1276,7 +1368,7 @@ static void nilfs_btree_shrink(struct nilfs_btree *btree, | |||
1276 | nilfs_btree_node_move_left(btree, root, child, n); | 1368 | nilfs_btree_node_move_left(btree, root, child, n); |
1277 | unlock_buffer(path[level].bp_bh); | 1369 | unlock_buffer(path[level].bp_bh); |
1278 | 1370 | ||
1279 | nilfs_bmap_delete_block(&btree->bt_bmap, path[level].bp_bh); | 1371 | nilfs_btnode_delete(path[level].bp_bh); |
1280 | path[level].bp_bh = NULL; | 1372 | path[level].bp_bh = NULL; |
1281 | } | 1373 | } |
1282 | 1374 | ||
@@ -1300,12 +1392,10 @@ static int nilfs_btree_prepare_delete(struct nilfs_btree *btree, | |||
1300 | path[level].bp_oldreq.bpr_ptr = | 1392 | path[level].bp_oldreq.bpr_ptr = |
1301 | nilfs_btree_node_get_ptr(btree, node, | 1393 | nilfs_btree_node_get_ptr(btree, node, |
1302 | path[level].bp_index); | 1394 | path[level].bp_index); |
1303 | if (btree->bt_bmap.b_pops->bpop_prepare_end_ptr != NULL) { | 1395 | ret = nilfs_bmap_prepare_end_ptr(&btree->bt_bmap, |
1304 | ret = btree->bt_bmap.b_pops->bpop_prepare_end_ptr( | 1396 | &path[level].bp_oldreq); |
1305 | &btree->bt_bmap, &path[level].bp_oldreq); | 1397 | if (ret < 0) |
1306 | if (ret < 0) | 1398 | goto err_out_child_node; |
1307 | goto err_out_child_node; | ||
1308 | } | ||
1309 | 1399 | ||
1310 | if (nilfs_btree_node_get_nchildren(btree, node) > | 1400 | if (nilfs_btree_node_get_nchildren(btree, node) > |
1311 | nilfs_btree_node_nchildren_min(btree, node)) { | 1401 | nilfs_btree_node_nchildren_min(btree, node)) { |
@@ -1321,8 +1411,7 @@ static int nilfs_btree_prepare_delete(struct nilfs_btree *btree, | |||
1321 | /* left sibling */ | 1411 | /* left sibling */ |
1322 | sibptr = nilfs_btree_node_get_ptr(btree, parent, | 1412 | sibptr = nilfs_btree_node_get_ptr(btree, parent, |
1323 | pindex - 1); | 1413 | pindex - 1); |
1324 | ret = nilfs_bmap_get_block(&btree->bt_bmap, sibptr, | 1414 | ret = nilfs_btree_get_block(btree, sibptr, &bh); |
1325 | &bh); | ||
1326 | if (ret < 0) | 1415 | if (ret < 0) |
1327 | goto err_out_curr_node; | 1416 | goto err_out_curr_node; |
1328 | sib = (struct nilfs_btree_node *)bh->b_data; | 1417 | sib = (struct nilfs_btree_node *)bh->b_data; |
@@ -1343,8 +1432,7 @@ static int nilfs_btree_prepare_delete(struct nilfs_btree *btree, | |||
1343 | /* right sibling */ | 1432 | /* right sibling */ |
1344 | sibptr = nilfs_btree_node_get_ptr(btree, parent, | 1433 | sibptr = nilfs_btree_node_get_ptr(btree, parent, |
1345 | pindex + 1); | 1434 | pindex + 1); |
1346 | ret = nilfs_bmap_get_block(&btree->bt_bmap, sibptr, | 1435 | ret = nilfs_btree_get_block(btree, sibptr, &bh); |
1347 | &bh); | ||
1348 | if (ret < 0) | 1436 | if (ret < 0) |
1349 | goto err_out_curr_node; | 1437 | goto err_out_curr_node; |
1350 | sib = (struct nilfs_btree_node *)bh->b_data; | 1438 | sib = (struct nilfs_btree_node *)bh->b_data; |
@@ -1381,12 +1469,12 @@ static int nilfs_btree_prepare_delete(struct nilfs_btree *btree, | |||
1381 | node = nilfs_btree_get_root(btree); | 1469 | node = nilfs_btree_get_root(btree); |
1382 | path[level].bp_oldreq.bpr_ptr = | 1470 | path[level].bp_oldreq.bpr_ptr = |
1383 | nilfs_btree_node_get_ptr(btree, node, path[level].bp_index); | 1471 | nilfs_btree_node_get_ptr(btree, node, path[level].bp_index); |
1384 | if (btree->bt_bmap.b_pops->bpop_prepare_end_ptr != NULL) { | 1472 | |
1385 | ret = btree->bt_bmap.b_pops->bpop_prepare_end_ptr( | 1473 | ret = nilfs_bmap_prepare_end_ptr(&btree->bt_bmap, |
1386 | &btree->bt_bmap, &path[level].bp_oldreq); | 1474 | &path[level].bp_oldreq); |
1387 | if (ret < 0) | 1475 | if (ret < 0) |
1388 | goto err_out_child_node; | 1476 | goto err_out_child_node; |
1389 | } | 1477 | |
1390 | /* child of the root node is deleted */ | 1478 | /* child of the root node is deleted */ |
1391 | path[level].bp_op = nilfs_btree_do_delete; | 1479 | path[level].bp_op = nilfs_btree_do_delete; |
1392 | stats->bs_nblocks++; | 1480 | stats->bs_nblocks++; |
@@ -1398,15 +1486,12 @@ static int nilfs_btree_prepare_delete(struct nilfs_btree *btree, | |||
1398 | 1486 | ||
1399 | /* error */ | 1487 | /* error */ |
1400 | err_out_curr_node: | 1488 | err_out_curr_node: |
1401 | if (btree->bt_bmap.b_pops->bpop_abort_end_ptr != NULL) | 1489 | nilfs_bmap_abort_end_ptr(&btree->bt_bmap, &path[level].bp_oldreq); |
1402 | btree->bt_bmap.b_pops->bpop_abort_end_ptr( | ||
1403 | &btree->bt_bmap, &path[level].bp_oldreq); | ||
1404 | err_out_child_node: | 1490 | err_out_child_node: |
1405 | for (level--; level >= NILFS_BTREE_LEVEL_NODE_MIN; level--) { | 1491 | for (level--; level >= NILFS_BTREE_LEVEL_NODE_MIN; level--) { |
1406 | nilfs_bmap_put_block(&btree->bt_bmap, path[level].bp_sib_bh); | 1492 | brelse(path[level].bp_sib_bh); |
1407 | if (btree->bt_bmap.b_pops->bpop_abort_end_ptr != NULL) | 1493 | nilfs_bmap_abort_end_ptr(&btree->bt_bmap, |
1408 | btree->bt_bmap.b_pops->bpop_abort_end_ptr( | 1494 | &path[level].bp_oldreq); |
1409 | &btree->bt_bmap, &path[level].bp_oldreq); | ||
1410 | } | 1495 | } |
1411 | *levelp = level; | 1496 | *levelp = level; |
1412 | stats->bs_nblocks = 0; | 1497 | stats->bs_nblocks = 0; |
@@ -1420,9 +1505,8 @@ static void nilfs_btree_commit_delete(struct nilfs_btree *btree, | |||
1420 | int level; | 1505 | int level; |
1421 | 1506 | ||
1422 | for (level = NILFS_BTREE_LEVEL_NODE_MIN; level <= maxlevel; level++) { | 1507 | for (level = NILFS_BTREE_LEVEL_NODE_MIN; level <= maxlevel; level++) { |
1423 | if (btree->bt_bmap.b_pops->bpop_commit_end_ptr != NULL) | 1508 | nilfs_bmap_commit_end_ptr(&btree->bt_bmap, |
1424 | btree->bt_bmap.b_pops->bpop_commit_end_ptr( | 1509 | &path[level].bp_oldreq); |
1425 | &btree->bt_bmap, &path[level].bp_oldreq); | ||
1426 | path[level].bp_op(btree, path, level, NULL, NULL); | 1510 | path[level].bp_op(btree, path, level, NULL, NULL); |
1427 | } | 1511 | } |
1428 | 1512 | ||
@@ -1501,7 +1585,7 @@ static int nilfs_btree_check_delete(struct nilfs_bmap *bmap, __u64 key) | |||
1501 | if (nchildren > 1) | 1585 | if (nchildren > 1) |
1502 | return 0; | 1586 | return 0; |
1503 | ptr = nilfs_btree_node_get_ptr(btree, root, nchildren - 1); | 1587 | ptr = nilfs_btree_node_get_ptr(btree, root, nchildren - 1); |
1504 | ret = nilfs_bmap_get_block(bmap, ptr, &bh); | 1588 | ret = nilfs_btree_get_block(btree, ptr, &bh); |
1505 | if (ret < 0) | 1589 | if (ret < 0) |
1506 | return ret; | 1590 | return ret; |
1507 | node = (struct nilfs_btree_node *)bh->b_data; | 1591 | node = (struct nilfs_btree_node *)bh->b_data; |
@@ -1515,9 +1599,9 @@ static int nilfs_btree_check_delete(struct nilfs_bmap *bmap, __u64 key) | |||
1515 | nextmaxkey = (nchildren > 1) ? | 1599 | nextmaxkey = (nchildren > 1) ? |
1516 | nilfs_btree_node_get_key(btree, node, nchildren - 2) : 0; | 1600 | nilfs_btree_node_get_key(btree, node, nchildren - 2) : 0; |
1517 | if (bh != NULL) | 1601 | if (bh != NULL) |
1518 | nilfs_bmap_put_block(bmap, bh); | 1602 | brelse(bh); |
1519 | 1603 | ||
1520 | return (maxkey == key) && (nextmaxkey < bmap->b_low); | 1604 | return (maxkey == key) && (nextmaxkey < NILFS_BMAP_LARGE_LOW); |
1521 | } | 1605 | } |
1522 | 1606 | ||
1523 | static int nilfs_btree_gather_data(struct nilfs_bmap *bmap, | 1607 | static int nilfs_btree_gather_data(struct nilfs_bmap *bmap, |
@@ -1542,7 +1626,7 @@ static int nilfs_btree_gather_data(struct nilfs_bmap *bmap, | |||
1542 | nchildren = nilfs_btree_node_get_nchildren(btree, root); | 1626 | nchildren = nilfs_btree_node_get_nchildren(btree, root); |
1543 | WARN_ON(nchildren > 1); | 1627 | WARN_ON(nchildren > 1); |
1544 | ptr = nilfs_btree_node_get_ptr(btree, root, nchildren - 1); | 1628 | ptr = nilfs_btree_node_get_ptr(btree, root, nchildren - 1); |
1545 | ret = nilfs_bmap_get_block(bmap, ptr, &bh); | 1629 | ret = nilfs_btree_get_block(btree, ptr, &bh); |
1546 | if (ret < 0) | 1630 | if (ret < 0) |
1547 | return ret; | 1631 | return ret; |
1548 | node = (struct nilfs_btree_node *)bh->b_data; | 1632 | node = (struct nilfs_btree_node *)bh->b_data; |
@@ -1563,7 +1647,7 @@ static int nilfs_btree_gather_data(struct nilfs_bmap *bmap, | |||
1563 | } | 1647 | } |
1564 | 1648 | ||
1565 | if (bh != NULL) | 1649 | if (bh != NULL) |
1566 | nilfs_bmap_put_block(bmap, bh); | 1650 | brelse(bh); |
1567 | 1651 | ||
1568 | return nitems; | 1652 | return nitems; |
1569 | } | 1653 | } |
@@ -1584,10 +1668,10 @@ nilfs_btree_prepare_convert_and_insert(struct nilfs_bmap *bmap, __u64 key, | |||
1584 | 1668 | ||
1585 | /* for data */ | 1669 | /* for data */ |
1586 | /* cannot find near ptr */ | 1670 | /* cannot find near ptr */ |
1587 | if (btree->bt_ops->btop_find_target != NULL) | 1671 | if (NILFS_BMAP_USE_VBN(bmap)) |
1588 | dreq->bpr_ptr | 1672 | dreq->bpr_ptr = nilfs_btree_find_target_v(btree, NULL, key); |
1589 | = btree->bt_ops->btop_find_target(btree, NULL, key); | 1673 | |
1590 | ret = bmap->b_pops->bpop_prepare_alloc_ptr(bmap, dreq); | 1674 | ret = nilfs_bmap_prepare_alloc_ptr(bmap, dreq); |
1591 | if (ret < 0) | 1675 | if (ret < 0) |
1592 | return ret; | 1676 | return ret; |
1593 | 1677 | ||
@@ -1595,11 +1679,11 @@ nilfs_btree_prepare_convert_and_insert(struct nilfs_bmap *bmap, __u64 key, | |||
1595 | stats->bs_nblocks++; | 1679 | stats->bs_nblocks++; |
1596 | if (nreq != NULL) { | 1680 | if (nreq != NULL) { |
1597 | nreq->bpr_ptr = dreq->bpr_ptr + 1; | 1681 | nreq->bpr_ptr = dreq->bpr_ptr + 1; |
1598 | ret = bmap->b_pops->bpop_prepare_alloc_ptr(bmap, nreq); | 1682 | ret = nilfs_bmap_prepare_alloc_ptr(bmap, nreq); |
1599 | if (ret < 0) | 1683 | if (ret < 0) |
1600 | goto err_out_dreq; | 1684 | goto err_out_dreq; |
1601 | 1685 | ||
1602 | ret = nilfs_bmap_get_new_block(bmap, nreq->bpr_ptr, &bh); | 1686 | ret = nilfs_btree_get_new_block(btree, nreq->bpr_ptr, &bh); |
1603 | if (ret < 0) | 1687 | if (ret < 0) |
1604 | goto err_out_nreq; | 1688 | goto err_out_nreq; |
1605 | 1689 | ||
@@ -1612,9 +1696,9 @@ nilfs_btree_prepare_convert_and_insert(struct nilfs_bmap *bmap, __u64 key, | |||
1612 | 1696 | ||
1613 | /* error */ | 1697 | /* error */ |
1614 | err_out_nreq: | 1698 | err_out_nreq: |
1615 | bmap->b_pops->bpop_abort_alloc_ptr(bmap, nreq); | 1699 | nilfs_bmap_abort_alloc_ptr(bmap, nreq); |
1616 | err_out_dreq: | 1700 | err_out_dreq: |
1617 | bmap->b_pops->bpop_abort_alloc_ptr(bmap, dreq); | 1701 | nilfs_bmap_abort_alloc_ptr(bmap, dreq); |
1618 | stats->bs_nblocks = 0; | 1702 | stats->bs_nblocks = 0; |
1619 | return ret; | 1703 | return ret; |
1620 | 1704 | ||
@@ -1624,7 +1708,7 @@ static void | |||
1624 | nilfs_btree_commit_convert_and_insert(struct nilfs_bmap *bmap, | 1708 | nilfs_btree_commit_convert_and_insert(struct nilfs_bmap *bmap, |
1625 | __u64 key, __u64 ptr, | 1709 | __u64 key, __u64 ptr, |
1626 | const __u64 *keys, const __u64 *ptrs, | 1710 | const __u64 *keys, const __u64 *ptrs, |
1627 | int n, __u64 low, __u64 high, | 1711 | int n, |
1628 | union nilfs_bmap_ptr_req *dreq, | 1712 | union nilfs_bmap_ptr_req *dreq, |
1629 | union nilfs_bmap_ptr_req *nreq, | 1713 | union nilfs_bmap_ptr_req *nreq, |
1630 | struct buffer_head *bh) | 1714 | struct buffer_head *bh) |
@@ -1642,12 +1726,10 @@ nilfs_btree_commit_convert_and_insert(struct nilfs_bmap *bmap, | |||
1642 | 1726 | ||
1643 | /* convert and insert */ | 1727 | /* convert and insert */ |
1644 | btree = (struct nilfs_btree *)bmap; | 1728 | btree = (struct nilfs_btree *)bmap; |
1645 | nilfs_btree_init(bmap, low, high); | 1729 | nilfs_btree_init(bmap); |
1646 | if (nreq != NULL) { | 1730 | if (nreq != NULL) { |
1647 | if (bmap->b_pops->bpop_commit_alloc_ptr != NULL) { | 1731 | nilfs_bmap_commit_alloc_ptr(bmap, dreq); |
1648 | bmap->b_pops->bpop_commit_alloc_ptr(bmap, dreq); | 1732 | nilfs_bmap_commit_alloc_ptr(bmap, nreq); |
1649 | bmap->b_pops->bpop_commit_alloc_ptr(bmap, nreq); | ||
1650 | } | ||
1651 | 1733 | ||
1652 | /* create child node at level 1 */ | 1734 | /* create child node at level 1 */ |
1653 | lock_buffer(bh); | 1735 | lock_buffer(bh); |
@@ -1661,7 +1743,7 @@ nilfs_btree_commit_convert_and_insert(struct nilfs_bmap *bmap, | |||
1661 | nilfs_bmap_set_dirty(bmap); | 1743 | nilfs_bmap_set_dirty(bmap); |
1662 | 1744 | ||
1663 | unlock_buffer(bh); | 1745 | unlock_buffer(bh); |
1664 | nilfs_bmap_put_block(bmap, bh); | 1746 | brelse(bh); |
1665 | 1747 | ||
1666 | /* create root node at level 2 */ | 1748 | /* create root node at level 2 */ |
1667 | node = nilfs_btree_get_root(btree); | 1749 | node = nilfs_btree_get_root(btree); |
@@ -1669,8 +1751,7 @@ nilfs_btree_commit_convert_and_insert(struct nilfs_bmap *bmap, | |||
1669 | nilfs_btree_node_init(btree, node, NILFS_BTREE_NODE_ROOT, | 1751 | nilfs_btree_node_init(btree, node, NILFS_BTREE_NODE_ROOT, |
1670 | 2, 1, &keys[0], &tmpptr); | 1752 | 2, 1, &keys[0], &tmpptr); |
1671 | } else { | 1753 | } else { |
1672 | if (bmap->b_pops->bpop_commit_alloc_ptr != NULL) | 1754 | nilfs_bmap_commit_alloc_ptr(bmap, dreq); |
1673 | bmap->b_pops->bpop_commit_alloc_ptr(bmap, dreq); | ||
1674 | 1755 | ||
1675 | /* create root node at level 1 */ | 1756 | /* create root node at level 1 */ |
1676 | node = nilfs_btree_get_root(btree); | 1757 | node = nilfs_btree_get_root(btree); |
@@ -1682,8 +1763,8 @@ nilfs_btree_commit_convert_and_insert(struct nilfs_bmap *bmap, | |||
1682 | nilfs_bmap_set_dirty(bmap); | 1763 | nilfs_bmap_set_dirty(bmap); |
1683 | } | 1764 | } |
1684 | 1765 | ||
1685 | if (btree->bt_ops->btop_set_target != NULL) | 1766 | if (NILFS_BMAP_USE_VBN(bmap)) |
1686 | btree->bt_ops->btop_set_target(btree, key, dreq->bpr_ptr); | 1767 | nilfs_btree_set_target_v(btree, key, dreq->bpr_ptr); |
1687 | } | 1768 | } |
1688 | 1769 | ||
1689 | /** | 1770 | /** |
@@ -1694,13 +1775,10 @@ nilfs_btree_commit_convert_and_insert(struct nilfs_bmap *bmap, | |||
1694 | * @keys: | 1775 | * @keys: |
1695 | * @ptrs: | 1776 | * @ptrs: |
1696 | * @n: | 1777 | * @n: |
1697 | * @low: | ||
1698 | * @high: | ||
1699 | */ | 1778 | */ |
1700 | int nilfs_btree_convert_and_insert(struct nilfs_bmap *bmap, | 1779 | int nilfs_btree_convert_and_insert(struct nilfs_bmap *bmap, |
1701 | __u64 key, __u64 ptr, | 1780 | __u64 key, __u64 ptr, |
1702 | const __u64 *keys, const __u64 *ptrs, | 1781 | const __u64 *keys, const __u64 *ptrs, int n) |
1703 | int n, __u64 low, __u64 high) | ||
1704 | { | 1782 | { |
1705 | struct buffer_head *bh; | 1783 | struct buffer_head *bh; |
1706 | union nilfs_bmap_ptr_req dreq, nreq, *di, *ni; | 1784 | union nilfs_bmap_ptr_req dreq, nreq, *di, *ni; |
@@ -1725,7 +1803,7 @@ int nilfs_btree_convert_and_insert(struct nilfs_bmap *bmap, | |||
1725 | if (ret < 0) | 1803 | if (ret < 0) |
1726 | return ret; | 1804 | return ret; |
1727 | nilfs_btree_commit_convert_and_insert(bmap, key, ptr, keys, ptrs, n, | 1805 | nilfs_btree_commit_convert_and_insert(bmap, key, ptr, keys, ptrs, n, |
1728 | low, high, di, ni, bh); | 1806 | di, ni, bh); |
1729 | nilfs_bmap_add_blocks(bmap, stats.bs_nblocks); | 1807 | nilfs_bmap_add_blocks(bmap, stats.bs_nblocks); |
1730 | return 0; | 1808 | return 0; |
1731 | } | 1809 | } |
@@ -1754,9 +1832,9 @@ static int nilfs_btree_prepare_update_v(struct nilfs_btree *btree, | |||
1754 | nilfs_btree_node_get_ptr(btree, parent, | 1832 | nilfs_btree_node_get_ptr(btree, parent, |
1755 | path[level + 1].bp_index); | 1833 | path[level + 1].bp_index); |
1756 | path[level].bp_newreq.bpr_ptr = path[level].bp_oldreq.bpr_ptr + 1; | 1834 | path[level].bp_newreq.bpr_ptr = path[level].bp_oldreq.bpr_ptr + 1; |
1757 | ret = nilfs_bmap_prepare_update(&btree->bt_bmap, | 1835 | ret = nilfs_bmap_prepare_update_v(&btree->bt_bmap, |
1758 | &path[level].bp_oldreq, | 1836 | &path[level].bp_oldreq, |
1759 | &path[level].bp_newreq); | 1837 | &path[level].bp_newreq); |
1760 | if (ret < 0) | 1838 | if (ret < 0) |
1761 | return ret; | 1839 | return ret; |
1762 | 1840 | ||
@@ -1768,9 +1846,9 @@ static int nilfs_btree_prepare_update_v(struct nilfs_btree *btree, | |||
1768 | &NILFS_BMAP_I(&btree->bt_bmap)->i_btnode_cache, | 1846 | &NILFS_BMAP_I(&btree->bt_bmap)->i_btnode_cache, |
1769 | &path[level].bp_ctxt); | 1847 | &path[level].bp_ctxt); |
1770 | if (ret < 0) { | 1848 | if (ret < 0) { |
1771 | nilfs_bmap_abort_update(&btree->bt_bmap, | 1849 | nilfs_bmap_abort_update_v(&btree->bt_bmap, |
1772 | &path[level].bp_oldreq, | 1850 | &path[level].bp_oldreq, |
1773 | &path[level].bp_newreq); | 1851 | &path[level].bp_newreq); |
1774 | return ret; | 1852 | return ret; |
1775 | } | 1853 | } |
1776 | } | 1854 | } |
@@ -1784,9 +1862,9 @@ static void nilfs_btree_commit_update_v(struct nilfs_btree *btree, | |||
1784 | { | 1862 | { |
1785 | struct nilfs_btree_node *parent; | 1863 | struct nilfs_btree_node *parent; |
1786 | 1864 | ||
1787 | nilfs_bmap_commit_update(&btree->bt_bmap, | 1865 | nilfs_bmap_commit_update_v(&btree->bt_bmap, |
1788 | &path[level].bp_oldreq, | 1866 | &path[level].bp_oldreq, |
1789 | &path[level].bp_newreq); | 1867 | &path[level].bp_newreq); |
1790 | 1868 | ||
1791 | if (buffer_nilfs_node(path[level].bp_bh)) { | 1869 | if (buffer_nilfs_node(path[level].bp_bh)) { |
1792 | nilfs_btnode_commit_change_key( | 1870 | nilfs_btnode_commit_change_key( |
@@ -1805,9 +1883,9 @@ static void nilfs_btree_abort_update_v(struct nilfs_btree *btree, | |||
1805 | struct nilfs_btree_path *path, | 1883 | struct nilfs_btree_path *path, |
1806 | int level) | 1884 | int level) |
1807 | { | 1885 | { |
1808 | nilfs_bmap_abort_update(&btree->bt_bmap, | 1886 | nilfs_bmap_abort_update_v(&btree->bt_bmap, |
1809 | &path[level].bp_oldreq, | 1887 | &path[level].bp_oldreq, |
1810 | &path[level].bp_newreq); | 1888 | &path[level].bp_newreq); |
1811 | if (buffer_nilfs_node(path[level].bp_bh)) | 1889 | if (buffer_nilfs_node(path[level].bp_bh)) |
1812 | nilfs_btnode_abort_change_key( | 1890 | nilfs_btnode_abort_change_key( |
1813 | &NILFS_BMAP_I(&btree->bt_bmap)->i_btnode_cache, | 1891 | &NILFS_BMAP_I(&btree->bt_bmap)->i_btnode_cache, |
@@ -1930,7 +2008,9 @@ static int nilfs_btree_propagate(const struct nilfs_bmap *bmap, | |||
1930 | goto out; | 2008 | goto out; |
1931 | } | 2009 | } |
1932 | 2010 | ||
1933 | ret = btree->bt_ops->btop_propagate(btree, path, level, bh); | 2011 | ret = NILFS_BMAP_USE_VBN(bmap) ? |
2012 | nilfs_btree_propagate_v(btree, path, level, bh) : | ||
2013 | nilfs_btree_propagate_p(btree, path, level, bh); | ||
1934 | 2014 | ||
1935 | out: | 2015 | out: |
1936 | nilfs_btree_clear_path(btree, path); | 2016 | nilfs_btree_clear_path(btree, path); |
@@ -2066,12 +2146,9 @@ static int nilfs_btree_assign_v(struct nilfs_btree *btree, | |||
2066 | ptr = nilfs_btree_node_get_ptr(btree, parent, | 2146 | ptr = nilfs_btree_node_get_ptr(btree, parent, |
2067 | path[level + 1].bp_index); | 2147 | path[level + 1].bp_index); |
2068 | req.bpr_ptr = ptr; | 2148 | req.bpr_ptr = ptr; |
2069 | ret = btree->bt_bmap.b_pops->bpop_prepare_start_ptr(&btree->bt_bmap, | 2149 | ret = nilfs_bmap_start_v(&btree->bt_bmap, &req, blocknr); |
2070 | &req); | 2150 | if (unlikely(ret < 0)) |
2071 | if (ret < 0) | ||
2072 | return ret; | 2151 | return ret; |
2073 | btree->bt_bmap.b_pops->bpop_commit_start_ptr(&btree->bt_bmap, | ||
2074 | &req, blocknr); | ||
2075 | 2152 | ||
2076 | key = nilfs_btree_node_get_key(btree, parent, | 2153 | key = nilfs_btree_node_get_key(btree, parent, |
2077 | path[level + 1].bp_index); | 2154 | path[level + 1].bp_index); |
@@ -2114,8 +2191,9 @@ static int nilfs_btree_assign(struct nilfs_bmap *bmap, | |||
2114 | goto out; | 2191 | goto out; |
2115 | } | 2192 | } |
2116 | 2193 | ||
2117 | ret = btree->bt_ops->btop_assign(btree, path, level, bh, | 2194 | ret = NILFS_BMAP_USE_VBN(bmap) ? |
2118 | blocknr, binfo); | 2195 | nilfs_btree_assign_v(btree, path, level, bh, blocknr, binfo) : |
2196 | nilfs_btree_assign_p(btree, path, level, bh, blocknr, binfo); | ||
2119 | 2197 | ||
2120 | out: | 2198 | out: |
2121 | nilfs_btree_clear_path(btree, path); | 2199 | nilfs_btree_clear_path(btree, path); |
@@ -2171,7 +2249,7 @@ static int nilfs_btree_mark(struct nilfs_bmap *bmap, __u64 key, int level) | |||
2171 | WARN_ON(ret == -ENOENT); | 2249 | WARN_ON(ret == -ENOENT); |
2172 | goto out; | 2250 | goto out; |
2173 | } | 2251 | } |
2174 | ret = nilfs_bmap_get_block(&btree->bt_bmap, ptr, &bh); | 2252 | ret = nilfs_btree_get_block(btree, ptr, &bh); |
2175 | if (ret < 0) { | 2253 | if (ret < 0) { |
2176 | WARN_ON(ret == -ENOENT); | 2254 | WARN_ON(ret == -ENOENT); |
2177 | goto out; | 2255 | goto out; |
@@ -2179,7 +2257,7 @@ static int nilfs_btree_mark(struct nilfs_bmap *bmap, __u64 key, int level) | |||
2179 | 2257 | ||
2180 | if (!buffer_dirty(bh)) | 2258 | if (!buffer_dirty(bh)) |
2181 | nilfs_btnode_mark_dirty(bh); | 2259 | nilfs_btnode_mark_dirty(bh); |
2182 | nilfs_bmap_put_block(&btree->bt_bmap, bh); | 2260 | brelse(bh); |
2183 | if (!nilfs_bmap_dirty(&btree->bt_bmap)) | 2261 | if (!nilfs_bmap_dirty(&btree->bt_bmap)) |
2184 | nilfs_bmap_set_dirty(&btree->bt_bmap); | 2262 | nilfs_bmap_set_dirty(&btree->bt_bmap); |
2185 | 2263 | ||
@@ -2191,6 +2269,7 @@ static int nilfs_btree_mark(struct nilfs_bmap *bmap, __u64 key, int level) | |||
2191 | 2269 | ||
2192 | static const struct nilfs_bmap_operations nilfs_btree_ops = { | 2270 | static const struct nilfs_bmap_operations nilfs_btree_ops = { |
2193 | .bop_lookup = nilfs_btree_lookup, | 2271 | .bop_lookup = nilfs_btree_lookup, |
2272 | .bop_lookup_contig = nilfs_btree_lookup_contig, | ||
2194 | .bop_insert = nilfs_btree_insert, | 2273 | .bop_insert = nilfs_btree_insert, |
2195 | .bop_delete = nilfs_btree_delete, | 2274 | .bop_delete = nilfs_btree_delete, |
2196 | .bop_clear = NULL, | 2275 | .bop_clear = NULL, |
@@ -2210,6 +2289,7 @@ static const struct nilfs_bmap_operations nilfs_btree_ops = { | |||
2210 | 2289 | ||
2211 | static const struct nilfs_bmap_operations nilfs_btree_ops_gc = { | 2290 | static const struct nilfs_bmap_operations nilfs_btree_ops_gc = { |
2212 | .bop_lookup = NULL, | 2291 | .bop_lookup = NULL, |
2292 | .bop_lookup_contig = NULL, | ||
2213 | .bop_insert = NULL, | 2293 | .bop_insert = NULL, |
2214 | .bop_delete = NULL, | 2294 | .bop_delete = NULL, |
2215 | .bop_clear = NULL, | 2295 | .bop_clear = NULL, |
@@ -2227,43 +2307,13 @@ static const struct nilfs_bmap_operations nilfs_btree_ops_gc = { | |||
2227 | .bop_gather_data = NULL, | 2307 | .bop_gather_data = NULL, |
2228 | }; | 2308 | }; |
2229 | 2309 | ||
2230 | static const struct nilfs_btree_operations nilfs_btree_ops_v = { | 2310 | int nilfs_btree_init(struct nilfs_bmap *bmap) |
2231 | .btop_find_target = nilfs_btree_find_target_v, | ||
2232 | .btop_set_target = nilfs_btree_set_target_v, | ||
2233 | .btop_propagate = nilfs_btree_propagate_v, | ||
2234 | .btop_assign = nilfs_btree_assign_v, | ||
2235 | }; | ||
2236 | |||
2237 | static const struct nilfs_btree_operations nilfs_btree_ops_p = { | ||
2238 | .btop_find_target = NULL, | ||
2239 | .btop_set_target = NULL, | ||
2240 | .btop_propagate = nilfs_btree_propagate_p, | ||
2241 | .btop_assign = nilfs_btree_assign_p, | ||
2242 | }; | ||
2243 | |||
2244 | int nilfs_btree_init(struct nilfs_bmap *bmap, __u64 low, __u64 high) | ||
2245 | { | 2311 | { |
2246 | struct nilfs_btree *btree; | ||
2247 | |||
2248 | btree = (struct nilfs_btree *)bmap; | ||
2249 | bmap->b_ops = &nilfs_btree_ops; | 2312 | bmap->b_ops = &nilfs_btree_ops; |
2250 | bmap->b_low = low; | ||
2251 | bmap->b_high = high; | ||
2252 | switch (bmap->b_inode->i_ino) { | ||
2253 | case NILFS_DAT_INO: | ||
2254 | btree->bt_ops = &nilfs_btree_ops_p; | ||
2255 | break; | ||
2256 | default: | ||
2257 | btree->bt_ops = &nilfs_btree_ops_v; | ||
2258 | break; | ||
2259 | } | ||
2260 | |||
2261 | return 0; | 2313 | return 0; |
2262 | } | 2314 | } |
2263 | 2315 | ||
2264 | void nilfs_btree_init_gc(struct nilfs_bmap *bmap) | 2316 | void nilfs_btree_init_gc(struct nilfs_bmap *bmap) |
2265 | { | 2317 | { |
2266 | bmap->b_low = NILFS_BMAP_LARGE_LOW; | ||
2267 | bmap->b_high = NILFS_BMAP_LARGE_HIGH; | ||
2268 | bmap->b_ops = &nilfs_btree_ops_gc; | 2318 | bmap->b_ops = &nilfs_btree_ops_gc; |
2269 | } | 2319 | } |
diff --git a/fs/nilfs2/btree.h b/fs/nilfs2/btree.h index 4766deb52fb1..0e72bbbc6b64 100644 --- a/fs/nilfs2/btree.h +++ b/fs/nilfs2/btree.h | |||
@@ -34,28 +34,6 @@ struct nilfs_btree; | |||
34 | struct nilfs_btree_path; | 34 | struct nilfs_btree_path; |
35 | 35 | ||
36 | /** | 36 | /** |
37 | * struct nilfs_btree_operations - B-tree operation table | ||
38 | */ | ||
39 | struct nilfs_btree_operations { | ||
40 | __u64 (*btop_find_target)(const struct nilfs_btree *, | ||
41 | const struct nilfs_btree_path *, __u64); | ||
42 | void (*btop_set_target)(struct nilfs_btree *, __u64, __u64); | ||
43 | |||
44 | struct the_nilfs *(*btop_get_nilfs)(struct nilfs_btree *); | ||
45 | |||
46 | int (*btop_propagate)(struct nilfs_btree *, | ||
47 | struct nilfs_btree_path *, | ||
48 | int, | ||
49 | struct buffer_head *); | ||
50 | int (*btop_assign)(struct nilfs_btree *, | ||
51 | struct nilfs_btree_path *, | ||
52 | int, | ||
53 | struct buffer_head **, | ||
54 | sector_t, | ||
55 | union nilfs_binfo *); | ||
56 | }; | ||
57 | |||
58 | /** | ||
59 | * struct nilfs_btree_node - B-tree node | 37 | * struct nilfs_btree_node - B-tree node |
60 | * @bn_flags: flags | 38 | * @bn_flags: flags |
61 | * @bn_level: level | 39 | * @bn_level: level |
@@ -80,13 +58,9 @@ struct nilfs_btree_node { | |||
80 | /** | 58 | /** |
81 | * struct nilfs_btree - B-tree structure | 59 | * struct nilfs_btree - B-tree structure |
82 | * @bt_bmap: bmap base structure | 60 | * @bt_bmap: bmap base structure |
83 | * @bt_ops: B-tree operation table | ||
84 | */ | 61 | */ |
85 | struct nilfs_btree { | 62 | struct nilfs_btree { |
86 | struct nilfs_bmap bt_bmap; | 63 | struct nilfs_bmap bt_bmap; |
87 | |||
88 | /* B-tree-specific members */ | ||
89 | const struct nilfs_btree_operations *bt_ops; | ||
90 | }; | 64 | }; |
91 | 65 | ||
92 | 66 | ||
@@ -108,10 +82,9 @@ struct nilfs_btree { | |||
108 | 82 | ||
109 | int nilfs_btree_path_cache_init(void); | 83 | int nilfs_btree_path_cache_init(void); |
110 | void nilfs_btree_path_cache_destroy(void); | 84 | void nilfs_btree_path_cache_destroy(void); |
111 | int nilfs_btree_init(struct nilfs_bmap *, __u64, __u64); | 85 | int nilfs_btree_init(struct nilfs_bmap *); |
112 | int nilfs_btree_convert_and_insert(struct nilfs_bmap *, __u64, __u64, | 86 | int nilfs_btree_convert_and_insert(struct nilfs_bmap *, __u64, __u64, |
113 | const __u64 *, const __u64 *, | 87 | const __u64 *, const __u64 *, int); |
114 | int, __u64, __u64); | ||
115 | void nilfs_btree_init_gc(struct nilfs_bmap *); | 88 | void nilfs_btree_init_gc(struct nilfs_bmap *); |
116 | 89 | ||
117 | #endif /* _NILFS_BTREE_H */ | 90 | #endif /* _NILFS_BTREE_H */ |
diff --git a/fs/nilfs2/cpfile.c b/fs/nilfs2/cpfile.c index cadd36b14d07..7d49813f66d6 100644 --- a/fs/nilfs2/cpfile.c +++ b/fs/nilfs2/cpfile.c | |||
@@ -295,10 +295,6 @@ int nilfs_cpfile_delete_checkpoints(struct inode *cpfile, | |||
295 | return -EINVAL; | 295 | return -EINVAL; |
296 | } | 296 | } |
297 | 297 | ||
298 | /* cannot delete the latest checkpoint */ | ||
299 | if (start == nilfs_mdt_cno(cpfile) - 1) | ||
300 | return -EPERM; | ||
301 | |||
302 | down_write(&NILFS_MDT(cpfile)->mi_sem); | 298 | down_write(&NILFS_MDT(cpfile)->mi_sem); |
303 | 299 | ||
304 | ret = nilfs_cpfile_get_header_block(cpfile, &header_bh); | 300 | ret = nilfs_cpfile_get_header_block(cpfile, &header_bh); |
@@ -384,9 +380,10 @@ static void nilfs_cpfile_checkpoint_to_cpinfo(struct inode *cpfile, | |||
384 | } | 380 | } |
385 | 381 | ||
386 | static ssize_t nilfs_cpfile_do_get_cpinfo(struct inode *cpfile, __u64 *cnop, | 382 | static ssize_t nilfs_cpfile_do_get_cpinfo(struct inode *cpfile, __u64 *cnop, |
387 | struct nilfs_cpinfo *ci, size_t nci) | 383 | void *buf, unsigned cisz, size_t nci) |
388 | { | 384 | { |
389 | struct nilfs_checkpoint *cp; | 385 | struct nilfs_checkpoint *cp; |
386 | struct nilfs_cpinfo *ci = buf; | ||
390 | struct buffer_head *bh; | 387 | struct buffer_head *bh; |
391 | size_t cpsz = NILFS_MDT(cpfile)->mi_entry_size; | 388 | size_t cpsz = NILFS_MDT(cpfile)->mi_entry_size; |
392 | __u64 cur_cno = nilfs_mdt_cno(cpfile), cno = *cnop; | 389 | __u64 cur_cno = nilfs_mdt_cno(cpfile), cno = *cnop; |
@@ -410,17 +407,22 @@ static ssize_t nilfs_cpfile_do_get_cpinfo(struct inode *cpfile, __u64 *cnop, | |||
410 | kaddr = kmap_atomic(bh->b_page, KM_USER0); | 407 | kaddr = kmap_atomic(bh->b_page, KM_USER0); |
411 | cp = nilfs_cpfile_block_get_checkpoint(cpfile, cno, bh, kaddr); | 408 | cp = nilfs_cpfile_block_get_checkpoint(cpfile, cno, bh, kaddr); |
412 | for (i = 0; i < ncps && n < nci; i++, cp = (void *)cp + cpsz) { | 409 | for (i = 0; i < ncps && n < nci; i++, cp = (void *)cp + cpsz) { |
413 | if (!nilfs_checkpoint_invalid(cp)) | 410 | if (!nilfs_checkpoint_invalid(cp)) { |
414 | nilfs_cpfile_checkpoint_to_cpinfo( | 411 | nilfs_cpfile_checkpoint_to_cpinfo(cpfile, cp, |
415 | cpfile, cp, &ci[n++]); | 412 | ci); |
413 | ci = (void *)ci + cisz; | ||
414 | n++; | ||
415 | } | ||
416 | } | 416 | } |
417 | kunmap_atomic(kaddr, KM_USER0); | 417 | kunmap_atomic(kaddr, KM_USER0); |
418 | brelse(bh); | 418 | brelse(bh); |
419 | } | 419 | } |
420 | 420 | ||
421 | ret = n; | 421 | ret = n; |
422 | if (n > 0) | 422 | if (n > 0) { |
423 | *cnop = ci[n - 1].ci_cno + 1; | 423 | ci = (void *)ci - cisz; |
424 | *cnop = ci->ci_cno + 1; | ||
425 | } | ||
424 | 426 | ||
425 | out: | 427 | out: |
426 | up_read(&NILFS_MDT(cpfile)->mi_sem); | 428 | up_read(&NILFS_MDT(cpfile)->mi_sem); |
@@ -428,11 +430,12 @@ static ssize_t nilfs_cpfile_do_get_cpinfo(struct inode *cpfile, __u64 *cnop, | |||
428 | } | 430 | } |
429 | 431 | ||
430 | static ssize_t nilfs_cpfile_do_get_ssinfo(struct inode *cpfile, __u64 *cnop, | 432 | static ssize_t nilfs_cpfile_do_get_ssinfo(struct inode *cpfile, __u64 *cnop, |
431 | struct nilfs_cpinfo *ci, size_t nci) | 433 | void *buf, unsigned cisz, size_t nci) |
432 | { | 434 | { |
433 | struct buffer_head *bh; | 435 | struct buffer_head *bh; |
434 | struct nilfs_cpfile_header *header; | 436 | struct nilfs_cpfile_header *header; |
435 | struct nilfs_checkpoint *cp; | 437 | struct nilfs_checkpoint *cp; |
438 | struct nilfs_cpinfo *ci = buf; | ||
436 | __u64 curr = *cnop, next; | 439 | __u64 curr = *cnop, next; |
437 | unsigned long curr_blkoff, next_blkoff; | 440 | unsigned long curr_blkoff, next_blkoff; |
438 | void *kaddr; | 441 | void *kaddr; |
@@ -472,7 +475,9 @@ static ssize_t nilfs_cpfile_do_get_ssinfo(struct inode *cpfile, __u64 *cnop, | |||
472 | if (unlikely(nilfs_checkpoint_invalid(cp) || | 475 | if (unlikely(nilfs_checkpoint_invalid(cp) || |
473 | !nilfs_checkpoint_snapshot(cp))) | 476 | !nilfs_checkpoint_snapshot(cp))) |
474 | break; | 477 | break; |
475 | nilfs_cpfile_checkpoint_to_cpinfo(cpfile, cp, &ci[n++]); | 478 | nilfs_cpfile_checkpoint_to_cpinfo(cpfile, cp, ci); |
479 | ci = (void *)ci + cisz; | ||
480 | n++; | ||
476 | next = le64_to_cpu(cp->cp_snapshot_list.ssl_next); | 481 | next = le64_to_cpu(cp->cp_snapshot_list.ssl_next); |
477 | if (next == 0) | 482 | if (next == 0) |
478 | break; /* reach end of the snapshot list */ | 483 | break; /* reach end of the snapshot list */ |
@@ -511,13 +516,13 @@ static ssize_t nilfs_cpfile_do_get_ssinfo(struct inode *cpfile, __u64 *cnop, | |||
511 | */ | 516 | */ |
512 | 517 | ||
513 | ssize_t nilfs_cpfile_get_cpinfo(struct inode *cpfile, __u64 *cnop, int mode, | 518 | ssize_t nilfs_cpfile_get_cpinfo(struct inode *cpfile, __u64 *cnop, int mode, |
514 | struct nilfs_cpinfo *ci, size_t nci) | 519 | void *buf, unsigned cisz, size_t nci) |
515 | { | 520 | { |
516 | switch (mode) { | 521 | switch (mode) { |
517 | case NILFS_CHECKPOINT: | 522 | case NILFS_CHECKPOINT: |
518 | return nilfs_cpfile_do_get_cpinfo(cpfile, cnop, ci, nci); | 523 | return nilfs_cpfile_do_get_cpinfo(cpfile, cnop, buf, cisz, nci); |
519 | case NILFS_SNAPSHOT: | 524 | case NILFS_SNAPSHOT: |
520 | return nilfs_cpfile_do_get_ssinfo(cpfile, cnop, ci, nci); | 525 | return nilfs_cpfile_do_get_ssinfo(cpfile, cnop, buf, cisz, nci); |
521 | default: | 526 | default: |
522 | return -EINVAL; | 527 | return -EINVAL; |
523 | } | 528 | } |
@@ -533,20 +538,14 @@ int nilfs_cpfile_delete_checkpoint(struct inode *cpfile, __u64 cno) | |||
533 | struct nilfs_cpinfo ci; | 538 | struct nilfs_cpinfo ci; |
534 | __u64 tcno = cno; | 539 | __u64 tcno = cno; |
535 | ssize_t nci; | 540 | ssize_t nci; |
536 | int ret; | ||
537 | 541 | ||
538 | nci = nilfs_cpfile_do_get_cpinfo(cpfile, &tcno, &ci, 1); | 542 | nci = nilfs_cpfile_do_get_cpinfo(cpfile, &tcno, &ci, sizeof(ci), 1); |
539 | if (nci < 0) | 543 | if (nci < 0) |
540 | return nci; | 544 | return nci; |
541 | else if (nci == 0 || ci.ci_cno != cno) | 545 | else if (nci == 0 || ci.ci_cno != cno) |
542 | return -ENOENT; | 546 | return -ENOENT; |
543 | 547 | else if (nilfs_cpinfo_snapshot(&ci)) | |
544 | /* cannot delete the latest checkpoint nor snapshots */ | 548 | return -EBUSY; |
545 | ret = nilfs_cpinfo_snapshot(&ci); | ||
546 | if (ret < 0) | ||
547 | return ret; | ||
548 | else if (ret > 0 || cno == nilfs_mdt_cno(cpfile) - 1) | ||
549 | return -EPERM; | ||
550 | 549 | ||
551 | return nilfs_cpfile_delete_checkpoints(cpfile, cno, cno + 1); | 550 | return nilfs_cpfile_delete_checkpoints(cpfile, cno, cno + 1); |
552 | } | 551 | } |
diff --git a/fs/nilfs2/cpfile.h b/fs/nilfs2/cpfile.h index 1a8a1008c342..788a45950197 100644 --- a/fs/nilfs2/cpfile.h +++ b/fs/nilfs2/cpfile.h | |||
@@ -39,7 +39,7 @@ int nilfs_cpfile_delete_checkpoint(struct inode *, __u64); | |||
39 | int nilfs_cpfile_change_cpmode(struct inode *, __u64, int); | 39 | int nilfs_cpfile_change_cpmode(struct inode *, __u64, int); |
40 | int nilfs_cpfile_is_snapshot(struct inode *, __u64); | 40 | int nilfs_cpfile_is_snapshot(struct inode *, __u64); |
41 | int nilfs_cpfile_get_stat(struct inode *, struct nilfs_cpstat *); | 41 | int nilfs_cpfile_get_stat(struct inode *, struct nilfs_cpstat *); |
42 | ssize_t nilfs_cpfile_get_cpinfo(struct inode *, __u64 *, int, | 42 | ssize_t nilfs_cpfile_get_cpinfo(struct inode *, __u64 *, int, void *, unsigned, |
43 | struct nilfs_cpinfo *, size_t); | 43 | size_t); |
44 | 44 | ||
45 | #endif /* _NILFS_CPFILE_H */ | 45 | #endif /* _NILFS_CPFILE_H */ |
diff --git a/fs/nilfs2/dat.c b/fs/nilfs2/dat.c index bb8a5818e7f1..0b2710e2d565 100644 --- a/fs/nilfs2/dat.c +++ b/fs/nilfs2/dat.c | |||
@@ -92,21 +92,6 @@ void nilfs_dat_abort_alloc(struct inode *dat, struct nilfs_palloc_req *req) | |||
92 | nilfs_palloc_abort_alloc_entry(dat, req); | 92 | nilfs_palloc_abort_alloc_entry(dat, req); |
93 | } | 93 | } |
94 | 94 | ||
95 | int nilfs_dat_prepare_free(struct inode *dat, struct nilfs_palloc_req *req) | ||
96 | { | ||
97 | int ret; | ||
98 | |||
99 | ret = nilfs_palloc_prepare_free_entry(dat, req); | ||
100 | if (ret < 0) | ||
101 | return ret; | ||
102 | ret = nilfs_dat_prepare_entry(dat, req, 0); | ||
103 | if (ret < 0) { | ||
104 | nilfs_palloc_abort_free_entry(dat, req); | ||
105 | return ret; | ||
106 | } | ||
107 | return 0; | ||
108 | } | ||
109 | |||
110 | void nilfs_dat_commit_free(struct inode *dat, struct nilfs_palloc_req *req) | 95 | void nilfs_dat_commit_free(struct inode *dat, struct nilfs_palloc_req *req) |
111 | { | 96 | { |
112 | struct nilfs_dat_entry *entry; | 97 | struct nilfs_dat_entry *entry; |
@@ -391,36 +376,37 @@ int nilfs_dat_translate(struct inode *dat, __u64 vblocknr, sector_t *blocknrp) | |||
391 | return ret; | 376 | return ret; |
392 | } | 377 | } |
393 | 378 | ||
394 | ssize_t nilfs_dat_get_vinfo(struct inode *dat, struct nilfs_vinfo *vinfo, | 379 | ssize_t nilfs_dat_get_vinfo(struct inode *dat, void *buf, unsigned visz, |
395 | size_t nvi) | 380 | size_t nvi) |
396 | { | 381 | { |
397 | struct buffer_head *entry_bh; | 382 | struct buffer_head *entry_bh; |
398 | struct nilfs_dat_entry *entry; | 383 | struct nilfs_dat_entry *entry; |
384 | struct nilfs_vinfo *vinfo = buf; | ||
399 | __u64 first, last; | 385 | __u64 first, last; |
400 | void *kaddr; | 386 | void *kaddr; |
401 | unsigned long entries_per_block = NILFS_MDT(dat)->mi_entries_per_block; | 387 | unsigned long entries_per_block = NILFS_MDT(dat)->mi_entries_per_block; |
402 | int i, j, n, ret; | 388 | int i, j, n, ret; |
403 | 389 | ||
404 | for (i = 0; i < nvi; i += n) { | 390 | for (i = 0; i < nvi; i += n) { |
405 | ret = nilfs_palloc_get_entry_block(dat, vinfo[i].vi_vblocknr, | 391 | ret = nilfs_palloc_get_entry_block(dat, vinfo->vi_vblocknr, |
406 | 0, &entry_bh); | 392 | 0, &entry_bh); |
407 | if (ret < 0) | 393 | if (ret < 0) |
408 | return ret; | 394 | return ret; |
409 | kaddr = kmap_atomic(entry_bh->b_page, KM_USER0); | 395 | kaddr = kmap_atomic(entry_bh->b_page, KM_USER0); |
410 | /* last virtual block number in this block */ | 396 | /* last virtual block number in this block */ |
411 | first = vinfo[i].vi_vblocknr; | 397 | first = vinfo->vi_vblocknr; |
412 | do_div(first, entries_per_block); | 398 | do_div(first, entries_per_block); |
413 | first *= entries_per_block; | 399 | first *= entries_per_block; |
414 | last = first + entries_per_block - 1; | 400 | last = first + entries_per_block - 1; |
415 | for (j = i, n = 0; | 401 | for (j = i, n = 0; |
416 | j < nvi && vinfo[j].vi_vblocknr >= first && | 402 | j < nvi && vinfo->vi_vblocknr >= first && |
417 | vinfo[j].vi_vblocknr <= last; | 403 | vinfo->vi_vblocknr <= last; |
418 | j++, n++) { | 404 | j++, n++, vinfo = (void *)vinfo + visz) { |
419 | entry = nilfs_palloc_block_get_entry( | 405 | entry = nilfs_palloc_block_get_entry( |
420 | dat, vinfo[j].vi_vblocknr, entry_bh, kaddr); | 406 | dat, vinfo->vi_vblocknr, entry_bh, kaddr); |
421 | vinfo[j].vi_start = le64_to_cpu(entry->de_start); | 407 | vinfo->vi_start = le64_to_cpu(entry->de_start); |
422 | vinfo[j].vi_end = le64_to_cpu(entry->de_end); | 408 | vinfo->vi_end = le64_to_cpu(entry->de_end); |
423 | vinfo[j].vi_blocknr = le64_to_cpu(entry->de_blocknr); | 409 | vinfo->vi_blocknr = le64_to_cpu(entry->de_blocknr); |
424 | } | 410 | } |
425 | kunmap_atomic(kaddr, KM_USER0); | 411 | kunmap_atomic(kaddr, KM_USER0); |
426 | brelse(entry_bh); | 412 | brelse(entry_bh); |
diff --git a/fs/nilfs2/dat.h b/fs/nilfs2/dat.h index d9560654a4b7..d328b81eead4 100644 --- a/fs/nilfs2/dat.h +++ b/fs/nilfs2/dat.h | |||
@@ -47,6 +47,6 @@ void nilfs_dat_abort_end(struct inode *, struct nilfs_palloc_req *); | |||
47 | int nilfs_dat_mark_dirty(struct inode *, __u64); | 47 | int nilfs_dat_mark_dirty(struct inode *, __u64); |
48 | int nilfs_dat_freev(struct inode *, __u64 *, size_t); | 48 | int nilfs_dat_freev(struct inode *, __u64 *, size_t); |
49 | int nilfs_dat_move(struct inode *, __u64, sector_t); | 49 | int nilfs_dat_move(struct inode *, __u64, sector_t); |
50 | ssize_t nilfs_dat_get_vinfo(struct inode *, struct nilfs_vinfo *, size_t); | 50 | ssize_t nilfs_dat_get_vinfo(struct inode *, void *, unsigned, size_t); |
51 | 51 | ||
52 | #endif /* _NILFS_DAT_H */ | 52 | #endif /* _NILFS_DAT_H */ |
diff --git a/fs/nilfs2/direct.c b/fs/nilfs2/direct.c index c6379e482781..342d9765df8d 100644 --- a/fs/nilfs2/direct.c +++ b/fs/nilfs2/direct.c | |||
@@ -25,6 +25,7 @@ | |||
25 | #include "page.h" | 25 | #include "page.h" |
26 | #include "direct.h" | 26 | #include "direct.h" |
27 | #include "alloc.h" | 27 | #include "alloc.h" |
28 | #include "dat.h" | ||
28 | 29 | ||
29 | static inline __le64 *nilfs_direct_dptrs(const struct nilfs_direct *direct) | 30 | static inline __le64 *nilfs_direct_dptrs(const struct nilfs_direct *direct) |
30 | { | 31 | { |
@@ -62,6 +63,47 @@ static int nilfs_direct_lookup(const struct nilfs_bmap *bmap, | |||
62 | return 0; | 63 | return 0; |
63 | } | 64 | } |
64 | 65 | ||
66 | static int nilfs_direct_lookup_contig(const struct nilfs_bmap *bmap, | ||
67 | __u64 key, __u64 *ptrp, | ||
68 | unsigned maxblocks) | ||
69 | { | ||
70 | struct nilfs_direct *direct = (struct nilfs_direct *)bmap; | ||
71 | struct inode *dat = NULL; | ||
72 | __u64 ptr, ptr2; | ||
73 | sector_t blocknr; | ||
74 | int ret, cnt; | ||
75 | |||
76 | if (key > NILFS_DIRECT_KEY_MAX || | ||
77 | (ptr = nilfs_direct_get_ptr(direct, key)) == | ||
78 | NILFS_BMAP_INVALID_PTR) | ||
79 | return -ENOENT; | ||
80 | |||
81 | if (NILFS_BMAP_USE_VBN(bmap)) { | ||
82 | dat = nilfs_bmap_get_dat(bmap); | ||
83 | ret = nilfs_dat_translate(dat, ptr, &blocknr); | ||
84 | if (ret < 0) | ||
85 | return ret; | ||
86 | ptr = blocknr; | ||
87 | } | ||
88 | |||
89 | maxblocks = min_t(unsigned, maxblocks, NILFS_DIRECT_KEY_MAX - key + 1); | ||
90 | for (cnt = 1; cnt < maxblocks && | ||
91 | (ptr2 = nilfs_direct_get_ptr(direct, key + cnt)) != | ||
92 | NILFS_BMAP_INVALID_PTR; | ||
93 | cnt++) { | ||
94 | if (dat) { | ||
95 | ret = nilfs_dat_translate(dat, ptr2, &blocknr); | ||
96 | if (ret < 0) | ||
97 | return ret; | ||
98 | ptr2 = blocknr; | ||
99 | } | ||
100 | if (ptr2 != ptr + cnt) | ||
101 | break; | ||
102 | } | ||
103 | *ptrp = ptr; | ||
104 | return cnt; | ||
105 | } | ||
106 | |||
65 | static __u64 | 107 | static __u64 |
66 | nilfs_direct_find_target_v(const struct nilfs_direct *direct, __u64 key) | 108 | nilfs_direct_find_target_v(const struct nilfs_direct *direct, __u64 key) |
67 | { | 109 | { |
@@ -90,10 +132,9 @@ static int nilfs_direct_prepare_insert(struct nilfs_direct *direct, | |||
90 | { | 132 | { |
91 | int ret; | 133 | int ret; |
92 | 134 | ||
93 | if (direct->d_ops->dop_find_target != NULL) | 135 | if (NILFS_BMAP_USE_VBN(&direct->d_bmap)) |
94 | req->bpr_ptr = direct->d_ops->dop_find_target(direct, key); | 136 | req->bpr_ptr = nilfs_direct_find_target_v(direct, key); |
95 | ret = direct->d_bmap.b_pops->bpop_prepare_alloc_ptr(&direct->d_bmap, | 137 | ret = nilfs_bmap_prepare_alloc_ptr(&direct->d_bmap, req); |
96 | req); | ||
97 | if (ret < 0) | 138 | if (ret < 0) |
98 | return ret; | 139 | return ret; |
99 | 140 | ||
@@ -111,16 +152,14 @@ static void nilfs_direct_commit_insert(struct nilfs_direct *direct, | |||
111 | bh = (struct buffer_head *)((unsigned long)ptr); | 152 | bh = (struct buffer_head *)((unsigned long)ptr); |
112 | set_buffer_nilfs_volatile(bh); | 153 | set_buffer_nilfs_volatile(bh); |
113 | 154 | ||
114 | if (direct->d_bmap.b_pops->bpop_commit_alloc_ptr != NULL) | 155 | nilfs_bmap_commit_alloc_ptr(&direct->d_bmap, req); |
115 | direct->d_bmap.b_pops->bpop_commit_alloc_ptr( | ||
116 | &direct->d_bmap, req); | ||
117 | nilfs_direct_set_ptr(direct, key, req->bpr_ptr); | 156 | nilfs_direct_set_ptr(direct, key, req->bpr_ptr); |
118 | 157 | ||
119 | if (!nilfs_bmap_dirty(&direct->d_bmap)) | 158 | if (!nilfs_bmap_dirty(&direct->d_bmap)) |
120 | nilfs_bmap_set_dirty(&direct->d_bmap); | 159 | nilfs_bmap_set_dirty(&direct->d_bmap); |
121 | 160 | ||
122 | if (direct->d_ops->dop_set_target != NULL) | 161 | if (NILFS_BMAP_USE_VBN(&direct->d_bmap)) |
123 | direct->d_ops->dop_set_target(direct, key, req->bpr_ptr); | 162 | nilfs_direct_set_target_v(direct, key, req->bpr_ptr); |
124 | } | 163 | } |
125 | 164 | ||
126 | static int nilfs_direct_insert(struct nilfs_bmap *bmap, __u64 key, __u64 ptr) | 165 | static int nilfs_direct_insert(struct nilfs_bmap *bmap, __u64 key, __u64 ptr) |
@@ -152,25 +191,18 @@ static int nilfs_direct_prepare_delete(struct nilfs_direct *direct, | |||
152 | { | 191 | { |
153 | int ret; | 192 | int ret; |
154 | 193 | ||
155 | if (direct->d_bmap.b_pops->bpop_prepare_end_ptr != NULL) { | 194 | req->bpr_ptr = nilfs_direct_get_ptr(direct, key); |
156 | req->bpr_ptr = nilfs_direct_get_ptr(direct, key); | 195 | ret = nilfs_bmap_prepare_end_ptr(&direct->d_bmap, req); |
157 | ret = direct->d_bmap.b_pops->bpop_prepare_end_ptr( | 196 | if (!ret) |
158 | &direct->d_bmap, req); | 197 | stats->bs_nblocks = 1; |
159 | if (ret < 0) | 198 | return ret; |
160 | return ret; | ||
161 | } | ||
162 | |||
163 | stats->bs_nblocks = 1; | ||
164 | return 0; | ||
165 | } | 199 | } |
166 | 200 | ||
167 | static void nilfs_direct_commit_delete(struct nilfs_direct *direct, | 201 | static void nilfs_direct_commit_delete(struct nilfs_direct *direct, |
168 | union nilfs_bmap_ptr_req *req, | 202 | union nilfs_bmap_ptr_req *req, |
169 | __u64 key) | 203 | __u64 key) |
170 | { | 204 | { |
171 | if (direct->d_bmap.b_pops->bpop_commit_end_ptr != NULL) | 205 | nilfs_bmap_commit_end_ptr(&direct->d_bmap, req); |
172 | direct->d_bmap.b_pops->bpop_commit_end_ptr( | ||
173 | &direct->d_bmap, req); | ||
174 | nilfs_direct_set_ptr(direct, key, NILFS_BMAP_INVALID_PTR); | 206 | nilfs_direct_set_ptr(direct, key, NILFS_BMAP_INVALID_PTR); |
175 | } | 207 | } |
176 | 208 | ||
@@ -244,8 +276,7 @@ static int nilfs_direct_gather_data(struct nilfs_bmap *bmap, | |||
244 | } | 276 | } |
245 | 277 | ||
246 | int nilfs_direct_delete_and_convert(struct nilfs_bmap *bmap, | 278 | int nilfs_direct_delete_and_convert(struct nilfs_bmap *bmap, |
247 | __u64 key, __u64 *keys, __u64 *ptrs, | 279 | __u64 key, __u64 *keys, __u64 *ptrs, int n) |
248 | int n, __u64 low, __u64 high) | ||
249 | { | 280 | { |
250 | struct nilfs_direct *direct; | 281 | struct nilfs_direct *direct; |
251 | __le64 *dptrs; | 282 | __le64 *dptrs; |
@@ -275,8 +306,7 @@ int nilfs_direct_delete_and_convert(struct nilfs_bmap *bmap, | |||
275 | dptrs[i] = NILFS_BMAP_INVALID_PTR; | 306 | dptrs[i] = NILFS_BMAP_INVALID_PTR; |
276 | } | 307 | } |
277 | 308 | ||
278 | nilfs_direct_init(bmap, low, high); | 309 | nilfs_direct_init(bmap); |
279 | |||
280 | return 0; | 310 | return 0; |
281 | } | 311 | } |
282 | 312 | ||
@@ -293,11 +323,11 @@ static int nilfs_direct_propagate_v(struct nilfs_direct *direct, | |||
293 | if (!buffer_nilfs_volatile(bh)) { | 323 | if (!buffer_nilfs_volatile(bh)) { |
294 | oldreq.bpr_ptr = ptr; | 324 | oldreq.bpr_ptr = ptr; |
295 | newreq.bpr_ptr = ptr; | 325 | newreq.bpr_ptr = ptr; |
296 | ret = nilfs_bmap_prepare_update(&direct->d_bmap, &oldreq, | 326 | ret = nilfs_bmap_prepare_update_v(&direct->d_bmap, &oldreq, |
297 | &newreq); | 327 | &newreq); |
298 | if (ret < 0) | 328 | if (ret < 0) |
299 | return ret; | 329 | return ret; |
300 | nilfs_bmap_commit_update(&direct->d_bmap, &oldreq, &newreq); | 330 | nilfs_bmap_commit_update_v(&direct->d_bmap, &oldreq, &newreq); |
301 | set_buffer_nilfs_volatile(bh); | 331 | set_buffer_nilfs_volatile(bh); |
302 | nilfs_direct_set_ptr(direct, key, newreq.bpr_ptr); | 332 | nilfs_direct_set_ptr(direct, key, newreq.bpr_ptr); |
303 | } else | 333 | } else |
@@ -309,12 +339,10 @@ static int nilfs_direct_propagate_v(struct nilfs_direct *direct, | |||
309 | static int nilfs_direct_propagate(const struct nilfs_bmap *bmap, | 339 | static int nilfs_direct_propagate(const struct nilfs_bmap *bmap, |
310 | struct buffer_head *bh) | 340 | struct buffer_head *bh) |
311 | { | 341 | { |
312 | struct nilfs_direct *direct; | 342 | struct nilfs_direct *direct = (struct nilfs_direct *)bmap; |
313 | 343 | ||
314 | direct = (struct nilfs_direct *)bmap; | 344 | return NILFS_BMAP_USE_VBN(bmap) ? |
315 | return (direct->d_ops->dop_propagate != NULL) ? | 345 | nilfs_direct_propagate_v(direct, bh) : 0; |
316 | direct->d_ops->dop_propagate(direct, bh) : | ||
317 | 0; | ||
318 | } | 346 | } |
319 | 347 | ||
320 | static int nilfs_direct_assign_v(struct nilfs_direct *direct, | 348 | static int nilfs_direct_assign_v(struct nilfs_direct *direct, |
@@ -327,12 +355,9 @@ static int nilfs_direct_assign_v(struct nilfs_direct *direct, | |||
327 | int ret; | 355 | int ret; |
328 | 356 | ||
329 | req.bpr_ptr = ptr; | 357 | req.bpr_ptr = ptr; |
330 | ret = direct->d_bmap.b_pops->bpop_prepare_start_ptr( | 358 | ret = nilfs_bmap_start_v(&direct->d_bmap, &req, blocknr); |
331 | &direct->d_bmap, &req); | 359 | if (unlikely(ret < 0)) |
332 | if (ret < 0) | ||
333 | return ret; | 360 | return ret; |
334 | direct->d_bmap.b_pops->bpop_commit_start_ptr(&direct->d_bmap, | ||
335 | &req, blocknr); | ||
336 | 361 | ||
337 | binfo->bi_v.bi_vblocknr = nilfs_bmap_ptr_to_dptr(ptr); | 362 | binfo->bi_v.bi_vblocknr = nilfs_bmap_ptr_to_dptr(ptr); |
338 | binfo->bi_v.bi_blkoff = nilfs_bmap_key_to_dkey(key); | 363 | binfo->bi_v.bi_blkoff = nilfs_bmap_key_to_dkey(key); |
@@ -377,12 +402,14 @@ static int nilfs_direct_assign(struct nilfs_bmap *bmap, | |||
377 | return -EINVAL; | 402 | return -EINVAL; |
378 | } | 403 | } |
379 | 404 | ||
380 | return direct->d_ops->dop_assign(direct, key, ptr, bh, | 405 | return NILFS_BMAP_USE_VBN(bmap) ? |
381 | blocknr, binfo); | 406 | nilfs_direct_assign_v(direct, key, ptr, bh, blocknr, binfo) : |
407 | nilfs_direct_assign_p(direct, key, ptr, bh, blocknr, binfo); | ||
382 | } | 408 | } |
383 | 409 | ||
384 | static const struct nilfs_bmap_operations nilfs_direct_ops = { | 410 | static const struct nilfs_bmap_operations nilfs_direct_ops = { |
385 | .bop_lookup = nilfs_direct_lookup, | 411 | .bop_lookup = nilfs_direct_lookup, |
412 | .bop_lookup_contig = nilfs_direct_lookup_contig, | ||
386 | .bop_insert = nilfs_direct_insert, | 413 | .bop_insert = nilfs_direct_insert, |
387 | .bop_delete = nilfs_direct_delete, | 414 | .bop_delete = nilfs_direct_delete, |
388 | .bop_clear = NULL, | 415 | .bop_clear = NULL, |
@@ -401,36 +428,8 @@ static const struct nilfs_bmap_operations nilfs_direct_ops = { | |||
401 | }; | 428 | }; |
402 | 429 | ||
403 | 430 | ||
404 | static const struct nilfs_direct_operations nilfs_direct_ops_v = { | 431 | int nilfs_direct_init(struct nilfs_bmap *bmap) |
405 | .dop_find_target = nilfs_direct_find_target_v, | ||
406 | .dop_set_target = nilfs_direct_set_target_v, | ||
407 | .dop_propagate = nilfs_direct_propagate_v, | ||
408 | .dop_assign = nilfs_direct_assign_v, | ||
409 | }; | ||
410 | |||
411 | static const struct nilfs_direct_operations nilfs_direct_ops_p = { | ||
412 | .dop_find_target = NULL, | ||
413 | .dop_set_target = NULL, | ||
414 | .dop_propagate = NULL, | ||
415 | .dop_assign = nilfs_direct_assign_p, | ||
416 | }; | ||
417 | |||
418 | int nilfs_direct_init(struct nilfs_bmap *bmap, __u64 low, __u64 high) | ||
419 | { | 432 | { |
420 | struct nilfs_direct *direct; | ||
421 | |||
422 | direct = (struct nilfs_direct *)bmap; | ||
423 | bmap->b_ops = &nilfs_direct_ops; | 433 | bmap->b_ops = &nilfs_direct_ops; |
424 | bmap->b_low = low; | ||
425 | bmap->b_high = high; | ||
426 | switch (bmap->b_inode->i_ino) { | ||
427 | case NILFS_DAT_INO: | ||
428 | direct->d_ops = &nilfs_direct_ops_p; | ||
429 | break; | ||
430 | default: | ||
431 | direct->d_ops = &nilfs_direct_ops_v; | ||
432 | break; | ||
433 | } | ||
434 | |||
435 | return 0; | 434 | return 0; |
436 | } | 435 | } |
diff --git a/fs/nilfs2/direct.h b/fs/nilfs2/direct.h index 45d2c5cda812..a5ffd66e25d0 100644 --- a/fs/nilfs2/direct.h +++ b/fs/nilfs2/direct.h | |||
@@ -31,18 +31,6 @@ | |||
31 | struct nilfs_direct; | 31 | struct nilfs_direct; |
32 | 32 | ||
33 | /** | 33 | /** |
34 | * struct nilfs_direct_operations - direct mapping operation table | ||
35 | */ | ||
36 | struct nilfs_direct_operations { | ||
37 | __u64 (*dop_find_target)(const struct nilfs_direct *, __u64); | ||
38 | void (*dop_set_target)(struct nilfs_direct *, __u64, __u64); | ||
39 | int (*dop_propagate)(struct nilfs_direct *, struct buffer_head *); | ||
40 | int (*dop_assign)(struct nilfs_direct *, __u64, __u64, | ||
41 | struct buffer_head **, sector_t, | ||
42 | union nilfs_binfo *); | ||
43 | }; | ||
44 | |||
45 | /** | ||
46 | * struct nilfs_direct_node - direct node | 34 | * struct nilfs_direct_node - direct node |
47 | * @dn_flags: flags | 35 | * @dn_flags: flags |
48 | * @dn_pad: padding | 36 | * @dn_pad: padding |
@@ -55,13 +43,9 @@ struct nilfs_direct_node { | |||
55 | /** | 43 | /** |
56 | * struct nilfs_direct - direct mapping | 44 | * struct nilfs_direct - direct mapping |
57 | * @d_bmap: bmap structure | 45 | * @d_bmap: bmap structure |
58 | * @d_ops: direct mapping operation table | ||
59 | */ | 46 | */ |
60 | struct nilfs_direct { | 47 | struct nilfs_direct { |
61 | struct nilfs_bmap d_bmap; | 48 | struct nilfs_bmap d_bmap; |
62 | |||
63 | /* direct-mapping-specific members */ | ||
64 | const struct nilfs_direct_operations *d_ops; | ||
65 | }; | 49 | }; |
66 | 50 | ||
67 | 51 | ||
@@ -70,9 +54,9 @@ struct nilfs_direct { | |||
70 | #define NILFS_DIRECT_KEY_MAX (NILFS_DIRECT_NBLOCKS - 1) | 54 | #define NILFS_DIRECT_KEY_MAX (NILFS_DIRECT_NBLOCKS - 1) |
71 | 55 | ||
72 | 56 | ||
73 | int nilfs_direct_init(struct nilfs_bmap *, __u64, __u64); | 57 | int nilfs_direct_init(struct nilfs_bmap *); |
74 | int nilfs_direct_delete_and_convert(struct nilfs_bmap *, __u64, __u64 *, | 58 | int nilfs_direct_delete_and_convert(struct nilfs_bmap *, __u64, __u64 *, |
75 | __u64 *, int, __u64, __u64); | 59 | __u64 *, int); |
76 | 60 | ||
77 | 61 | ||
78 | #endif /* _NILFS_DIRECT_H */ | 62 | #endif /* _NILFS_DIRECT_H */ |
diff --git a/fs/nilfs2/gcinode.c b/fs/nilfs2/gcinode.c index 19d2102b6a69..1b3c2bb20da9 100644 --- a/fs/nilfs2/gcinode.c +++ b/fs/nilfs2/gcinode.c | |||
@@ -52,8 +52,9 @@ | |||
52 | #include "dat.h" | 52 | #include "dat.h" |
53 | #include "ifile.h" | 53 | #include "ifile.h" |
54 | 54 | ||
55 | static struct address_space_operations def_gcinode_aops = {}; | 55 | static struct address_space_operations def_gcinode_aops = { |
56 | /* XXX need def_gcinode_iops/fops? */ | 56 | .sync_page = block_sync_page, |
57 | }; | ||
57 | 58 | ||
58 | /* | 59 | /* |
59 | * nilfs_gccache_submit_read_data() - add data buffer and submit read request | 60 | * nilfs_gccache_submit_read_data() - add data buffer and submit read request |
diff --git a/fs/nilfs2/inode.c b/fs/nilfs2/inode.c index 49ab4a49bb4f..2696d6b513b7 100644 --- a/fs/nilfs2/inode.c +++ b/fs/nilfs2/inode.c | |||
@@ -43,22 +43,23 @@ | |||
43 | * | 43 | * |
44 | * This function does not issue actual read request of the specified data | 44 | * This function does not issue actual read request of the specified data |
45 | * block. It is done by VFS. | 45 | * block. It is done by VFS. |
46 | * Bulk read for direct-io is not supported yet. (should be supported) | ||
47 | */ | 46 | */ |
48 | int nilfs_get_block(struct inode *inode, sector_t blkoff, | 47 | int nilfs_get_block(struct inode *inode, sector_t blkoff, |
49 | struct buffer_head *bh_result, int create) | 48 | struct buffer_head *bh_result, int create) |
50 | { | 49 | { |
51 | struct nilfs_inode_info *ii = NILFS_I(inode); | 50 | struct nilfs_inode_info *ii = NILFS_I(inode); |
52 | unsigned long blknum = 0; | 51 | __u64 blknum = 0; |
53 | int err = 0, ret; | 52 | int err = 0, ret; |
54 | struct inode *dat = nilfs_dat_inode(NILFS_I_NILFS(inode)); | 53 | struct inode *dat = nilfs_dat_inode(NILFS_I_NILFS(inode)); |
54 | unsigned maxblocks = bh_result->b_size >> inode->i_blkbits; | ||
55 | 55 | ||
56 | /* This exclusion control is a workaround; should be revised */ | 56 | down_read(&NILFS_MDT(dat)->mi_sem); |
57 | down_read(&NILFS_MDT(dat)->mi_sem); /* XXX */ | 57 | ret = nilfs_bmap_lookup_contig(ii->i_bmap, blkoff, &blknum, maxblocks); |
58 | ret = nilfs_bmap_lookup(ii->i_bmap, (unsigned long)blkoff, &blknum); | 58 | up_read(&NILFS_MDT(dat)->mi_sem); |
59 | up_read(&NILFS_MDT(dat)->mi_sem); /* XXX */ | 59 | if (ret >= 0) { /* found */ |
60 | if (ret == 0) { /* found */ | ||
61 | map_bh(bh_result, inode->i_sb, blknum); | 60 | map_bh(bh_result, inode->i_sb, blknum); |
61 | if (ret > 0) | ||
62 | bh_result->b_size = (ret << inode->i_blkbits); | ||
62 | goto out; | 63 | goto out; |
63 | } | 64 | } |
64 | /* data block was not found */ | 65 | /* data block was not found */ |
@@ -240,7 +241,7 @@ nilfs_direct_IO(int rw, struct kiocb *iocb, const struct iovec *iov, | |||
240 | struct address_space_operations nilfs_aops = { | 241 | struct address_space_operations nilfs_aops = { |
241 | .writepage = nilfs_writepage, | 242 | .writepage = nilfs_writepage, |
242 | .readpage = nilfs_readpage, | 243 | .readpage = nilfs_readpage, |
243 | /* .sync_page = nilfs_sync_page, */ | 244 | .sync_page = block_sync_page, |
244 | .writepages = nilfs_writepages, | 245 | .writepages = nilfs_writepages, |
245 | .set_page_dirty = nilfs_set_page_dirty, | 246 | .set_page_dirty = nilfs_set_page_dirty, |
246 | .readpages = nilfs_readpages, | 247 | .readpages = nilfs_readpages, |
@@ -249,6 +250,7 @@ struct address_space_operations nilfs_aops = { | |||
249 | /* .releasepage = nilfs_releasepage, */ | 250 | /* .releasepage = nilfs_releasepage, */ |
250 | .invalidatepage = block_invalidatepage, | 251 | .invalidatepage = block_invalidatepage, |
251 | .direct_IO = nilfs_direct_IO, | 252 | .direct_IO = nilfs_direct_IO, |
253 | .is_partially_uptodate = block_is_partially_uptodate, | ||
252 | }; | 254 | }; |
253 | 255 | ||
254 | struct inode *nilfs_new_inode(struct inode *dir, int mode) | 256 | struct inode *nilfs_new_inode(struct inode *dir, int mode) |
diff --git a/fs/nilfs2/ioctl.c b/fs/nilfs2/ioctl.c index d6759b92006f..6ea5f872e2de 100644 --- a/fs/nilfs2/ioctl.c +++ b/fs/nilfs2/ioctl.c | |||
@@ -152,7 +152,7 @@ nilfs_ioctl_do_get_cpinfo(struct the_nilfs *nilfs, __u64 *posp, int flags, | |||
152 | 152 | ||
153 | down_read(&nilfs->ns_segctor_sem); | 153 | down_read(&nilfs->ns_segctor_sem); |
154 | ret = nilfs_cpfile_get_cpinfo(nilfs->ns_cpfile, posp, flags, buf, | 154 | ret = nilfs_cpfile_get_cpinfo(nilfs->ns_cpfile, posp, flags, buf, |
155 | nmembs); | 155 | size, nmembs); |
156 | up_read(&nilfs->ns_segctor_sem); | 156 | up_read(&nilfs->ns_segctor_sem); |
157 | return ret; | 157 | return ret; |
158 | } | 158 | } |
@@ -182,7 +182,8 @@ nilfs_ioctl_do_get_suinfo(struct the_nilfs *nilfs, __u64 *posp, int flags, | |||
182 | int ret; | 182 | int ret; |
183 | 183 | ||
184 | down_read(&nilfs->ns_segctor_sem); | 184 | down_read(&nilfs->ns_segctor_sem); |
185 | ret = nilfs_sufile_get_suinfo(nilfs->ns_sufile, *posp, buf, nmembs); | 185 | ret = nilfs_sufile_get_suinfo(nilfs->ns_sufile, *posp, buf, size, |
186 | nmembs); | ||
186 | up_read(&nilfs->ns_segctor_sem); | 187 | up_read(&nilfs->ns_segctor_sem); |
187 | return ret; | 188 | return ret; |
188 | } | 189 | } |
@@ -212,7 +213,7 @@ nilfs_ioctl_do_get_vinfo(struct the_nilfs *nilfs, __u64 *posp, int flags, | |||
212 | int ret; | 213 | int ret; |
213 | 214 | ||
214 | down_read(&nilfs->ns_segctor_sem); | 215 | down_read(&nilfs->ns_segctor_sem); |
215 | ret = nilfs_dat_get_vinfo(nilfs_dat_inode(nilfs), buf, nmembs); | 216 | ret = nilfs_dat_get_vinfo(nilfs_dat_inode(nilfs), buf, size, nmembs); |
216 | up_read(&nilfs->ns_segctor_sem); | 217 | up_read(&nilfs->ns_segctor_sem); |
217 | return ret; | 218 | return ret; |
218 | } | 219 | } |
@@ -435,24 +436,6 @@ static int nilfs_ioctl_mark_blocks_dirty(struct the_nilfs *nilfs, | |||
435 | return nmembs; | 436 | return nmembs; |
436 | } | 437 | } |
437 | 438 | ||
438 | static int nilfs_ioctl_free_segments(struct the_nilfs *nilfs, | ||
439 | struct nilfs_argv *argv, void *buf) | ||
440 | { | ||
441 | size_t nmembs = argv->v_nmembs; | ||
442 | struct nilfs_sb_info *sbi = nilfs->ns_writer; | ||
443 | int ret; | ||
444 | |||
445 | if (unlikely(!sbi)) { | ||
446 | /* never happens because called for a writable mount */ | ||
447 | WARN_ON(1); | ||
448 | return -EROFS; | ||
449 | } | ||
450 | ret = nilfs_segctor_add_segments_to_be_freed( | ||
451 | NILFS_SC(sbi), buf, nmembs); | ||
452 | |||
453 | return (ret < 0) ? ret : nmembs; | ||
454 | } | ||
455 | |||
456 | int nilfs_ioctl_prepare_clean_segments(struct the_nilfs *nilfs, | 439 | int nilfs_ioctl_prepare_clean_segments(struct the_nilfs *nilfs, |
457 | struct nilfs_argv *argv, void **kbufs) | 440 | struct nilfs_argv *argv, void **kbufs) |
458 | { | 441 | { |
@@ -491,14 +474,6 @@ int nilfs_ioctl_prepare_clean_segments(struct the_nilfs *nilfs, | |||
491 | msg = "cannot mark copying blocks dirty"; | 474 | msg = "cannot mark copying blocks dirty"; |
492 | goto failed; | 475 | goto failed; |
493 | } | 476 | } |
494 | ret = nilfs_ioctl_free_segments(nilfs, &argv[4], kbufs[4]); | ||
495 | if (ret < 0) { | ||
496 | /* | ||
497 | * can safely abort because this operation is atomic. | ||
498 | */ | ||
499 | msg = "cannot set segments to be freed"; | ||
500 | goto failed; | ||
501 | } | ||
502 | return 0; | 477 | return 0; |
503 | 478 | ||
504 | failed: | 479 | failed: |
@@ -615,7 +590,7 @@ static int nilfs_ioctl_get_info(struct inode *inode, struct file *filp, | |||
615 | if (copy_from_user(&argv, argp, sizeof(argv))) | 590 | if (copy_from_user(&argv, argp, sizeof(argv))) |
616 | return -EFAULT; | 591 | return -EFAULT; |
617 | 592 | ||
618 | if (argv.v_size != membsz) | 593 | if (argv.v_size < membsz) |
619 | return -EINVAL; | 594 | return -EINVAL; |
620 | 595 | ||
621 | ret = nilfs_ioctl_wrap_copy(nilfs, &argv, _IOC_DIR(cmd), dofunc); | 596 | ret = nilfs_ioctl_wrap_copy(nilfs, &argv, _IOC_DIR(cmd), dofunc); |
diff --git a/fs/nilfs2/mdt.c b/fs/nilfs2/mdt.c index bb78745a0e30..3d3ddb3f5177 100644 --- a/fs/nilfs2/mdt.c +++ b/fs/nilfs2/mdt.c | |||
@@ -430,6 +430,7 @@ nilfs_mdt_write_page(struct page *page, struct writeback_control *wbc) | |||
430 | 430 | ||
431 | static struct address_space_operations def_mdt_aops = { | 431 | static struct address_space_operations def_mdt_aops = { |
432 | .writepage = nilfs_mdt_write_page, | 432 | .writepage = nilfs_mdt_write_page, |
433 | .sync_page = block_sync_page, | ||
433 | }; | 434 | }; |
434 | 435 | ||
435 | static struct inode_operations def_mdt_iops; | 436 | static struct inode_operations def_mdt_iops; |
@@ -449,7 +450,7 @@ struct inode * | |||
449 | nilfs_mdt_new_common(struct the_nilfs *nilfs, struct super_block *sb, | 450 | nilfs_mdt_new_common(struct the_nilfs *nilfs, struct super_block *sb, |
450 | ino_t ino, gfp_t gfp_mask) | 451 | ino_t ino, gfp_t gfp_mask) |
451 | { | 452 | { |
452 | struct inode *inode = nilfs_alloc_inode(sb); | 453 | struct inode *inode = nilfs_alloc_inode_common(nilfs); |
453 | 454 | ||
454 | if (!inode) | 455 | if (!inode) |
455 | return NULL; | 456 | return NULL; |
diff --git a/fs/nilfs2/nilfs.h b/fs/nilfs2/nilfs.h index da6fc0bba2e5..edf6a59d9f2a 100644 --- a/fs/nilfs2/nilfs.h +++ b/fs/nilfs2/nilfs.h | |||
@@ -263,6 +263,7 @@ extern void nilfs_dirty_inode(struct inode *); | |||
263 | extern struct dentry *nilfs_get_parent(struct dentry *); | 263 | extern struct dentry *nilfs_get_parent(struct dentry *); |
264 | 264 | ||
265 | /* super.c */ | 265 | /* super.c */ |
266 | extern struct inode *nilfs_alloc_inode_common(struct the_nilfs *); | ||
266 | extern struct inode *nilfs_alloc_inode(struct super_block *); | 267 | extern struct inode *nilfs_alloc_inode(struct super_block *); |
267 | extern void nilfs_destroy_inode(struct inode *); | 268 | extern void nilfs_destroy_inode(struct inode *); |
268 | extern void nilfs_error(struct super_block *, const char *, const char *, ...) | 269 | extern void nilfs_error(struct super_block *, const char *, const char *, ...) |
diff --git a/fs/nilfs2/recovery.c b/fs/nilfs2/recovery.c index 57afa9d24061..d80cc71be749 100644 --- a/fs/nilfs2/recovery.c +++ b/fs/nilfs2/recovery.c | |||
@@ -28,7 +28,6 @@ | |||
28 | #include "segment.h" | 28 | #include "segment.h" |
29 | #include "sufile.h" | 29 | #include "sufile.h" |
30 | #include "page.h" | 30 | #include "page.h" |
31 | #include "seglist.h" | ||
32 | #include "segbuf.h" | 31 | #include "segbuf.h" |
33 | 32 | ||
34 | /* | 33 | /* |
@@ -395,6 +394,24 @@ static void dispose_recovery_list(struct list_head *head) | |||
395 | } | 394 | } |
396 | } | 395 | } |
397 | 396 | ||
397 | struct nilfs_segment_entry { | ||
398 | struct list_head list; | ||
399 | __u64 segnum; | ||
400 | }; | ||
401 | |||
402 | static int nilfs_segment_list_add(struct list_head *head, __u64 segnum) | ||
403 | { | ||
404 | struct nilfs_segment_entry *ent = kmalloc(sizeof(*ent), GFP_NOFS); | ||
405 | |||
406 | if (unlikely(!ent)) | ||
407 | return -ENOMEM; | ||
408 | |||
409 | ent->segnum = segnum; | ||
410 | INIT_LIST_HEAD(&ent->list); | ||
411 | list_add_tail(&ent->list, head); | ||
412 | return 0; | ||
413 | } | ||
414 | |||
398 | void nilfs_dispose_segment_list(struct list_head *head) | 415 | void nilfs_dispose_segment_list(struct list_head *head) |
399 | { | 416 | { |
400 | while (!list_empty(head)) { | 417 | while (!list_empty(head)) { |
@@ -402,7 +419,7 @@ void nilfs_dispose_segment_list(struct list_head *head) | |||
402 | = list_entry(head->next, | 419 | = list_entry(head->next, |
403 | struct nilfs_segment_entry, list); | 420 | struct nilfs_segment_entry, list); |
404 | list_del(&ent->list); | 421 | list_del(&ent->list); |
405 | nilfs_free_segment_entry(ent); | 422 | kfree(ent); |
406 | } | 423 | } |
407 | } | 424 | } |
408 | 425 | ||
@@ -431,12 +448,10 @@ static int nilfs_prepare_segment_for_recovery(struct the_nilfs *nilfs, | |||
431 | if (unlikely(err)) | 448 | if (unlikely(err)) |
432 | goto failed; | 449 | goto failed; |
433 | 450 | ||
434 | err = -ENOMEM; | ||
435 | for (i = 1; i < 4; i++) { | 451 | for (i = 1; i < 4; i++) { |
436 | ent = nilfs_alloc_segment_entry(segnum[i]); | 452 | err = nilfs_segment_list_add(head, segnum[i]); |
437 | if (unlikely(!ent)) | 453 | if (unlikely(err)) |
438 | goto failed; | 454 | goto failed; |
439 | list_add_tail(&ent->list, head); | ||
440 | } | 455 | } |
441 | 456 | ||
442 | /* | 457 | /* |
@@ -450,7 +465,7 @@ static int nilfs_prepare_segment_for_recovery(struct the_nilfs *nilfs, | |||
450 | goto failed; | 465 | goto failed; |
451 | } | 466 | } |
452 | list_del(&ent->list); | 467 | list_del(&ent->list); |
453 | nilfs_free_segment_entry(ent); | 468 | kfree(ent); |
454 | } | 469 | } |
455 | 470 | ||
456 | /* Allocate new segments for recovery */ | 471 | /* Allocate new segments for recovery */ |
@@ -791,7 +806,6 @@ int nilfs_search_super_root(struct the_nilfs *nilfs, struct nilfs_sb_info *sbi, | |||
791 | u64 seg_seq; | 806 | u64 seg_seq; |
792 | __u64 segnum, nextnum = 0; | 807 | __u64 segnum, nextnum = 0; |
793 | __u64 cno; | 808 | __u64 cno; |
794 | struct nilfs_segment_entry *ent; | ||
795 | LIST_HEAD(segments); | 809 | LIST_HEAD(segments); |
796 | int empty_seg = 0, scan_newer = 0; | 810 | int empty_seg = 0, scan_newer = 0; |
797 | int ret; | 811 | int ret; |
@@ -892,12 +906,9 @@ int nilfs_search_super_root(struct the_nilfs *nilfs, struct nilfs_sb_info *sbi, | |||
892 | if (empty_seg++) | 906 | if (empty_seg++) |
893 | goto super_root_found; /* found a valid super root */ | 907 | goto super_root_found; /* found a valid super root */ |
894 | 908 | ||
895 | ent = nilfs_alloc_segment_entry(segnum); | 909 | ret = nilfs_segment_list_add(&segments, segnum); |
896 | if (unlikely(!ent)) { | 910 | if (unlikely(ret)) |
897 | ret = -ENOMEM; | ||
898 | goto failed; | 911 | goto failed; |
899 | } | ||
900 | list_add_tail(&ent->list, &segments); | ||
901 | 912 | ||
902 | seg_seq++; | 913 | seg_seq++; |
903 | segnum = nextnum; | 914 | segnum = nextnum; |
diff --git a/fs/nilfs2/segbuf.c b/fs/nilfs2/segbuf.c index 1e68821b4a9b..9e3fe17bb96b 100644 --- a/fs/nilfs2/segbuf.c +++ b/fs/nilfs2/segbuf.c | |||
@@ -26,7 +26,6 @@ | |||
26 | #include <linux/crc32.h> | 26 | #include <linux/crc32.h> |
27 | #include "page.h" | 27 | #include "page.h" |
28 | #include "segbuf.h" | 28 | #include "segbuf.h" |
29 | #include "seglist.h" | ||
30 | 29 | ||
31 | 30 | ||
32 | static struct kmem_cache *nilfs_segbuf_cachep; | 31 | static struct kmem_cache *nilfs_segbuf_cachep; |
@@ -394,7 +393,7 @@ int nilfs_segbuf_write(struct nilfs_segment_buffer *segbuf, | |||
394 | * Last BIO is always sent through the following | 393 | * Last BIO is always sent through the following |
395 | * submission. | 394 | * submission. |
396 | */ | 395 | */ |
397 | rw |= (1 << BIO_RW_SYNCIO); | 396 | rw |= (1 << BIO_RW_SYNCIO) | (1 << BIO_RW_UNPLUG); |
398 | res = nilfs_submit_seg_bio(wi, rw); | 397 | res = nilfs_submit_seg_bio(wi, rw); |
399 | if (unlikely(res)) | 398 | if (unlikely(res)) |
400 | goto failed_bio; | 399 | goto failed_bio; |
diff --git a/fs/nilfs2/seglist.h b/fs/nilfs2/seglist.h deleted file mode 100644 index d39df9144e99..000000000000 --- a/fs/nilfs2/seglist.h +++ /dev/null | |||
@@ -1,85 +0,0 @@ | |||
1 | /* | ||
2 | * seglist.h - expediential structure and routines to handle list of segments | ||
3 | * (would be removed in a future release) | ||
4 | * | ||
5 | * Copyright (C) 2005-2008 Nippon Telegraph and Telephone Corporation. | ||
6 | * | ||
7 | * This program is free software; you can redistribute it and/or modify | ||
8 | * it under the terms of the GNU General Public License as published by | ||
9 | * the Free Software Foundation; either version 2 of the License, or | ||
10 | * (at your option) any later version. | ||
11 | * | ||
12 | * This program is distributed in the hope that it will be useful, | ||
13 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
14 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
15 | * GNU General Public License for more details. | ||
16 | * | ||
17 | * You should have received a copy of the GNU General Public License | ||
18 | * along with this program; if not, write to the Free Software | ||
19 | * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA | ||
20 | * | ||
21 | * Written by Ryusuke Konishi <ryusuke@osrg.net> | ||
22 | * | ||
23 | */ | ||
24 | #ifndef _NILFS_SEGLIST_H | ||
25 | #define _NILFS_SEGLIST_H | ||
26 | |||
27 | #include <linux/fs.h> | ||
28 | #include <linux/buffer_head.h> | ||
29 | #include <linux/nilfs2_fs.h> | ||
30 | #include "sufile.h" | ||
31 | |||
32 | struct nilfs_segment_entry { | ||
33 | __u64 segnum; | ||
34 | |||
35 | #define NILFS_SLH_FREED 0x0001 /* The segment was freed provisonally. | ||
36 | It must be cancelled if | ||
37 | construction aborted */ | ||
38 | |||
39 | unsigned flags; | ||
40 | struct list_head list; | ||
41 | struct buffer_head *bh_su; | ||
42 | struct nilfs_segment_usage *raw_su; | ||
43 | }; | ||
44 | |||
45 | |||
46 | void nilfs_dispose_segment_list(struct list_head *); | ||
47 | |||
48 | static inline struct nilfs_segment_entry * | ||
49 | nilfs_alloc_segment_entry(__u64 segnum) | ||
50 | { | ||
51 | struct nilfs_segment_entry *ent = kmalloc(sizeof(*ent), GFP_NOFS); | ||
52 | |||
53 | if (likely(ent)) { | ||
54 | ent->segnum = segnum; | ||
55 | ent->flags = 0; | ||
56 | ent->bh_su = NULL; | ||
57 | ent->raw_su = NULL; | ||
58 | INIT_LIST_HEAD(&ent->list); | ||
59 | } | ||
60 | return ent; | ||
61 | } | ||
62 | |||
63 | static inline int nilfs_open_segment_entry(struct nilfs_segment_entry *ent, | ||
64 | struct inode *sufile) | ||
65 | { | ||
66 | return nilfs_sufile_get_segment_usage(sufile, ent->segnum, | ||
67 | &ent->raw_su, &ent->bh_su); | ||
68 | } | ||
69 | |||
70 | static inline void nilfs_close_segment_entry(struct nilfs_segment_entry *ent, | ||
71 | struct inode *sufile) | ||
72 | { | ||
73 | if (!ent->bh_su) | ||
74 | return; | ||
75 | nilfs_sufile_put_segment_usage(sufile, ent->segnum, ent->bh_su); | ||
76 | ent->bh_su = NULL; | ||
77 | ent->raw_su = NULL; | ||
78 | } | ||
79 | |||
80 | static inline void nilfs_free_segment_entry(struct nilfs_segment_entry *ent) | ||
81 | { | ||
82 | kfree(ent); | ||
83 | } | ||
84 | |||
85 | #endif /* _NILFS_SEGLIST_H */ | ||
diff --git a/fs/nilfs2/segment.c b/fs/nilfs2/segment.c index 22c7f65c2403..aa977549919e 100644 --- a/fs/nilfs2/segment.c +++ b/fs/nilfs2/segment.c | |||
@@ -39,7 +39,6 @@ | |||
39 | #include "sufile.h" | 39 | #include "sufile.h" |
40 | #include "cpfile.h" | 40 | #include "cpfile.h" |
41 | #include "ifile.h" | 41 | #include "ifile.h" |
42 | #include "seglist.h" | ||
43 | #include "segbuf.h" | 42 | #include "segbuf.h" |
44 | 43 | ||
45 | 44 | ||
@@ -79,7 +78,8 @@ enum { | |||
79 | /* State flags of collection */ | 78 | /* State flags of collection */ |
80 | #define NILFS_CF_NODE 0x0001 /* Collecting node blocks */ | 79 | #define NILFS_CF_NODE 0x0001 /* Collecting node blocks */ |
81 | #define NILFS_CF_IFILE_STARTED 0x0002 /* IFILE stage has started */ | 80 | #define NILFS_CF_IFILE_STARTED 0x0002 /* IFILE stage has started */ |
82 | #define NILFS_CF_HISTORY_MASK (NILFS_CF_IFILE_STARTED) | 81 | #define NILFS_CF_SUFREED 0x0004 /* segment usages has been freed */ |
82 | #define NILFS_CF_HISTORY_MASK (NILFS_CF_IFILE_STARTED | NILFS_CF_SUFREED) | ||
83 | 83 | ||
84 | /* Operations depending on the construction mode and file type */ | 84 | /* Operations depending on the construction mode and file type */ |
85 | struct nilfs_sc_operations { | 85 | struct nilfs_sc_operations { |
@@ -810,7 +810,7 @@ static int nilfs_segctor_clean(struct nilfs_sc_info *sci) | |||
810 | { | 810 | { |
811 | return list_empty(&sci->sc_dirty_files) && | 811 | return list_empty(&sci->sc_dirty_files) && |
812 | !test_bit(NILFS_SC_DIRTY, &sci->sc_flags) && | 812 | !test_bit(NILFS_SC_DIRTY, &sci->sc_flags) && |
813 | list_empty(&sci->sc_cleaning_segments) && | 813 | sci->sc_nfreesegs == 0 && |
814 | (!nilfs_doing_gc() || list_empty(&sci->sc_gc_inodes)); | 814 | (!nilfs_doing_gc() || list_empty(&sci->sc_gc_inodes)); |
815 | } | 815 | } |
816 | 816 | ||
@@ -1005,44 +1005,6 @@ static void nilfs_drop_collected_inodes(struct list_head *head) | |||
1005 | } | 1005 | } |
1006 | } | 1006 | } |
1007 | 1007 | ||
1008 | static void nilfs_segctor_cancel_free_segments(struct nilfs_sc_info *sci, | ||
1009 | struct inode *sufile) | ||
1010 | |||
1011 | { | ||
1012 | struct list_head *head = &sci->sc_cleaning_segments; | ||
1013 | struct nilfs_segment_entry *ent; | ||
1014 | int err; | ||
1015 | |||
1016 | list_for_each_entry(ent, head, list) { | ||
1017 | if (!(ent->flags & NILFS_SLH_FREED)) | ||
1018 | break; | ||
1019 | err = nilfs_sufile_cancel_free(sufile, ent->segnum); | ||
1020 | WARN_ON(err); /* do not happen */ | ||
1021 | ent->flags &= ~NILFS_SLH_FREED; | ||
1022 | } | ||
1023 | } | ||
1024 | |||
1025 | static int nilfs_segctor_prepare_free_segments(struct nilfs_sc_info *sci, | ||
1026 | struct inode *sufile) | ||
1027 | { | ||
1028 | struct list_head *head = &sci->sc_cleaning_segments; | ||
1029 | struct nilfs_segment_entry *ent; | ||
1030 | int err; | ||
1031 | |||
1032 | list_for_each_entry(ent, head, list) { | ||
1033 | err = nilfs_sufile_free(sufile, ent->segnum); | ||
1034 | if (unlikely(err)) | ||
1035 | return err; | ||
1036 | ent->flags |= NILFS_SLH_FREED; | ||
1037 | } | ||
1038 | return 0; | ||
1039 | } | ||
1040 | |||
1041 | static void nilfs_segctor_commit_free_segments(struct nilfs_sc_info *sci) | ||
1042 | { | ||
1043 | nilfs_dispose_segment_list(&sci->sc_cleaning_segments); | ||
1044 | } | ||
1045 | |||
1046 | static int nilfs_segctor_apply_buffers(struct nilfs_sc_info *sci, | 1008 | static int nilfs_segctor_apply_buffers(struct nilfs_sc_info *sci, |
1047 | struct inode *inode, | 1009 | struct inode *inode, |
1048 | struct list_head *listp, | 1010 | struct list_head *listp, |
@@ -1161,6 +1123,7 @@ static int nilfs_segctor_collect_blocks(struct nilfs_sc_info *sci, int mode) | |||
1161 | struct the_nilfs *nilfs = sbi->s_nilfs; | 1123 | struct the_nilfs *nilfs = sbi->s_nilfs; |
1162 | struct list_head *head; | 1124 | struct list_head *head; |
1163 | struct nilfs_inode_info *ii; | 1125 | struct nilfs_inode_info *ii; |
1126 | size_t ndone; | ||
1164 | int err = 0; | 1127 | int err = 0; |
1165 | 1128 | ||
1166 | switch (sci->sc_stage.scnt) { | 1129 | switch (sci->sc_stage.scnt) { |
@@ -1250,10 +1213,16 @@ static int nilfs_segctor_collect_blocks(struct nilfs_sc_info *sci, int mode) | |||
1250 | break; | 1213 | break; |
1251 | sci->sc_stage.scnt++; /* Fall through */ | 1214 | sci->sc_stage.scnt++; /* Fall through */ |
1252 | case NILFS_ST_SUFILE: | 1215 | case NILFS_ST_SUFILE: |
1253 | err = nilfs_segctor_prepare_free_segments(sci, | 1216 | err = nilfs_sufile_freev(nilfs->ns_sufile, sci->sc_freesegs, |
1254 | nilfs->ns_sufile); | 1217 | sci->sc_nfreesegs, &ndone); |
1255 | if (unlikely(err)) | 1218 | if (unlikely(err)) { |
1219 | nilfs_sufile_cancel_freev(nilfs->ns_sufile, | ||
1220 | sci->sc_freesegs, ndone, | ||
1221 | NULL); | ||
1256 | break; | 1222 | break; |
1223 | } | ||
1224 | sci->sc_stage.flags |= NILFS_CF_SUFREED; | ||
1225 | |||
1257 | err = nilfs_segctor_scan_file(sci, nilfs->ns_sufile, | 1226 | err = nilfs_segctor_scan_file(sci, nilfs->ns_sufile, |
1258 | &nilfs_sc_file_ops); | 1227 | &nilfs_sc_file_ops); |
1259 | if (unlikely(err)) | 1228 | if (unlikely(err)) |
@@ -1486,7 +1455,15 @@ static void nilfs_segctor_end_construction(struct nilfs_sc_info *sci, | |||
1486 | { | 1455 | { |
1487 | if (unlikely(err)) { | 1456 | if (unlikely(err)) { |
1488 | nilfs_segctor_free_incomplete_segments(sci, nilfs); | 1457 | nilfs_segctor_free_incomplete_segments(sci, nilfs); |
1489 | nilfs_segctor_cancel_free_segments(sci, nilfs->ns_sufile); | 1458 | if (sci->sc_stage.flags & NILFS_CF_SUFREED) { |
1459 | int ret; | ||
1460 | |||
1461 | ret = nilfs_sufile_cancel_freev(nilfs->ns_sufile, | ||
1462 | sci->sc_freesegs, | ||
1463 | sci->sc_nfreesegs, | ||
1464 | NULL); | ||
1465 | WARN_ON(ret); /* do not happen */ | ||
1466 | } | ||
1490 | } | 1467 | } |
1491 | nilfs_segctor_clear_segment_buffers(sci); | 1468 | nilfs_segctor_clear_segment_buffers(sci); |
1492 | } | 1469 | } |
@@ -1585,7 +1562,13 @@ static int nilfs_segctor_collect(struct nilfs_sc_info *sci, | |||
1585 | if (mode != SC_LSEG_SR || sci->sc_stage.scnt < NILFS_ST_CPFILE) | 1562 | if (mode != SC_LSEG_SR || sci->sc_stage.scnt < NILFS_ST_CPFILE) |
1586 | break; | 1563 | break; |
1587 | 1564 | ||
1588 | nilfs_segctor_cancel_free_segments(sci, nilfs->ns_sufile); | 1565 | if (sci->sc_stage.flags & NILFS_CF_SUFREED) { |
1566 | err = nilfs_sufile_cancel_freev(nilfs->ns_sufile, | ||
1567 | sci->sc_freesegs, | ||
1568 | sci->sc_nfreesegs, | ||
1569 | NULL); | ||
1570 | WARN_ON(err); /* do not happen */ | ||
1571 | } | ||
1589 | nilfs_segctor_clear_segment_buffers(sci); | 1572 | nilfs_segctor_clear_segment_buffers(sci); |
1590 | 1573 | ||
1591 | err = nilfs_segctor_extend_segments(sci, nilfs, nadd); | 1574 | err = nilfs_segctor_extend_segments(sci, nilfs, nadd); |
@@ -2224,10 +2207,8 @@ static int nilfs_segctor_do_construct(struct nilfs_sc_info *sci, int mode) | |||
2224 | nilfs_segctor_complete_write(sci); | 2207 | nilfs_segctor_complete_write(sci); |
2225 | 2208 | ||
2226 | /* Commit segments */ | 2209 | /* Commit segments */ |
2227 | if (has_sr) { | 2210 | if (has_sr) |
2228 | nilfs_segctor_commit_free_segments(sci); | ||
2229 | nilfs_segctor_clear_metadata_dirty(sci); | 2211 | nilfs_segctor_clear_metadata_dirty(sci); |
2230 | } | ||
2231 | 2212 | ||
2232 | nilfs_segctor_end_construction(sci, nilfs, 0); | 2213 | nilfs_segctor_end_construction(sci, nilfs, 0); |
2233 | 2214 | ||
@@ -2301,48 +2282,6 @@ void nilfs_flush_segment(struct super_block *sb, ino_t ino) | |||
2301 | /* assign bit 0 to data files */ | 2282 | /* assign bit 0 to data files */ |
2302 | } | 2283 | } |
2303 | 2284 | ||
2304 | int nilfs_segctor_add_segments_to_be_freed(struct nilfs_sc_info *sci, | ||
2305 | __u64 *segnum, size_t nsegs) | ||
2306 | { | ||
2307 | struct nilfs_segment_entry *ent; | ||
2308 | struct the_nilfs *nilfs = sci->sc_sbi->s_nilfs; | ||
2309 | struct inode *sufile = nilfs->ns_sufile; | ||
2310 | LIST_HEAD(list); | ||
2311 | __u64 *pnum; | ||
2312 | size_t i; | ||
2313 | int err; | ||
2314 | |||
2315 | for (pnum = segnum, i = 0; i < nsegs; pnum++, i++) { | ||
2316 | ent = nilfs_alloc_segment_entry(*pnum); | ||
2317 | if (unlikely(!ent)) { | ||
2318 | err = -ENOMEM; | ||
2319 | goto failed; | ||
2320 | } | ||
2321 | list_add_tail(&ent->list, &list); | ||
2322 | |||
2323 | err = nilfs_open_segment_entry(ent, sufile); | ||
2324 | if (unlikely(err)) | ||
2325 | goto failed; | ||
2326 | |||
2327 | if (unlikely(!nilfs_segment_usage_dirty(ent->raw_su))) | ||
2328 | printk(KERN_WARNING "NILFS: unused segment is " | ||
2329 | "requested to be cleaned (segnum=%llu)\n", | ||
2330 | (unsigned long long)ent->segnum); | ||
2331 | nilfs_close_segment_entry(ent, sufile); | ||
2332 | } | ||
2333 | list_splice(&list, sci->sc_cleaning_segments.prev); | ||
2334 | return 0; | ||
2335 | |||
2336 | failed: | ||
2337 | nilfs_dispose_segment_list(&list); | ||
2338 | return err; | ||
2339 | } | ||
2340 | |||
2341 | void nilfs_segctor_clear_segments_to_be_freed(struct nilfs_sc_info *sci) | ||
2342 | { | ||
2343 | nilfs_dispose_segment_list(&sci->sc_cleaning_segments); | ||
2344 | } | ||
2345 | |||
2346 | struct nilfs_segctor_wait_request { | 2285 | struct nilfs_segctor_wait_request { |
2347 | wait_queue_t wq; | 2286 | wait_queue_t wq; |
2348 | __u32 seq; | 2287 | __u32 seq; |
@@ -2607,10 +2546,13 @@ int nilfs_clean_segments(struct super_block *sb, struct nilfs_argv *argv, | |||
2607 | err = nilfs_init_gcdat_inode(nilfs); | 2546 | err = nilfs_init_gcdat_inode(nilfs); |
2608 | if (unlikely(err)) | 2547 | if (unlikely(err)) |
2609 | goto out_unlock; | 2548 | goto out_unlock; |
2549 | |||
2610 | err = nilfs_ioctl_prepare_clean_segments(nilfs, argv, kbufs); | 2550 | err = nilfs_ioctl_prepare_clean_segments(nilfs, argv, kbufs); |
2611 | if (unlikely(err)) | 2551 | if (unlikely(err)) |
2612 | goto out_unlock; | 2552 | goto out_unlock; |
2613 | 2553 | ||
2554 | sci->sc_freesegs = kbufs[4]; | ||
2555 | sci->sc_nfreesegs = argv[4].v_nmembs; | ||
2614 | list_splice_init(&nilfs->ns_gc_inodes, sci->sc_gc_inodes.prev); | 2556 | list_splice_init(&nilfs->ns_gc_inodes, sci->sc_gc_inodes.prev); |
2615 | 2557 | ||
2616 | for (;;) { | 2558 | for (;;) { |
@@ -2629,6 +2571,8 @@ int nilfs_clean_segments(struct super_block *sb, struct nilfs_argv *argv, | |||
2629 | } | 2571 | } |
2630 | 2572 | ||
2631 | out_unlock: | 2573 | out_unlock: |
2574 | sci->sc_freesegs = NULL; | ||
2575 | sci->sc_nfreesegs = 0; | ||
2632 | nilfs_clear_gcdat_inode(nilfs); | 2576 | nilfs_clear_gcdat_inode(nilfs); |
2633 | nilfs_transaction_unlock(sbi); | 2577 | nilfs_transaction_unlock(sbi); |
2634 | return err; | 2578 | return err; |
@@ -2835,7 +2779,6 @@ static struct nilfs_sc_info *nilfs_segctor_new(struct nilfs_sb_info *sbi) | |||
2835 | INIT_LIST_HEAD(&sci->sc_dirty_files); | 2779 | INIT_LIST_HEAD(&sci->sc_dirty_files); |
2836 | INIT_LIST_HEAD(&sci->sc_segbufs); | 2780 | INIT_LIST_HEAD(&sci->sc_segbufs); |
2837 | INIT_LIST_HEAD(&sci->sc_gc_inodes); | 2781 | INIT_LIST_HEAD(&sci->sc_gc_inodes); |
2838 | INIT_LIST_HEAD(&sci->sc_cleaning_segments); | ||
2839 | INIT_LIST_HEAD(&sci->sc_copied_buffers); | 2782 | INIT_LIST_HEAD(&sci->sc_copied_buffers); |
2840 | 2783 | ||
2841 | sci->sc_interval = HZ * NILFS_SC_DEFAULT_TIMEOUT; | 2784 | sci->sc_interval = HZ * NILFS_SC_DEFAULT_TIMEOUT; |
@@ -2901,9 +2844,6 @@ static void nilfs_segctor_destroy(struct nilfs_sc_info *sci) | |||
2901 | nilfs_dispose_list(sbi, &sci->sc_dirty_files, 1); | 2844 | nilfs_dispose_list(sbi, &sci->sc_dirty_files, 1); |
2902 | } | 2845 | } |
2903 | 2846 | ||
2904 | if (!list_empty(&sci->sc_cleaning_segments)) | ||
2905 | nilfs_dispose_segment_list(&sci->sc_cleaning_segments); | ||
2906 | |||
2907 | WARN_ON(!list_empty(&sci->sc_segbufs)); | 2847 | WARN_ON(!list_empty(&sci->sc_segbufs)); |
2908 | 2848 | ||
2909 | down_write(&sbi->s_nilfs->ns_segctor_sem); | 2849 | down_write(&sbi->s_nilfs->ns_segctor_sem); |
diff --git a/fs/nilfs2/segment.h b/fs/nilfs2/segment.h index 476bdd5df5be..0d2a475a741b 100644 --- a/fs/nilfs2/segment.h +++ b/fs/nilfs2/segment.h | |||
@@ -90,8 +90,9 @@ struct nilfs_segsum_pointer { | |||
90 | * @sc_nblk_inc: Block count of current generation | 90 | * @sc_nblk_inc: Block count of current generation |
91 | * @sc_dirty_files: List of files to be written | 91 | * @sc_dirty_files: List of files to be written |
92 | * @sc_gc_inodes: List of GC inodes having blocks to be written | 92 | * @sc_gc_inodes: List of GC inodes having blocks to be written |
93 | * @sc_cleaning_segments: List of segments to be freed through construction | ||
94 | * @sc_copied_buffers: List of copied buffers (buffer heads) to freeze data | 93 | * @sc_copied_buffers: List of copied buffers (buffer heads) to freeze data |
94 | * @sc_freesegs: array of segment numbers to be freed | ||
95 | * @sc_nfreesegs: number of segments on @sc_freesegs | ||
95 | * @sc_dsync_inode: inode whose data pages are written for a sync operation | 96 | * @sc_dsync_inode: inode whose data pages are written for a sync operation |
96 | * @sc_dsync_start: start byte offset of data pages | 97 | * @sc_dsync_start: start byte offset of data pages |
97 | * @sc_dsync_end: end byte offset of data pages (inclusive) | 98 | * @sc_dsync_end: end byte offset of data pages (inclusive) |
@@ -131,9 +132,11 @@ struct nilfs_sc_info { | |||
131 | 132 | ||
132 | struct list_head sc_dirty_files; | 133 | struct list_head sc_dirty_files; |
133 | struct list_head sc_gc_inodes; | 134 | struct list_head sc_gc_inodes; |
134 | struct list_head sc_cleaning_segments; | ||
135 | struct list_head sc_copied_buffers; | 135 | struct list_head sc_copied_buffers; |
136 | 136 | ||
137 | __u64 *sc_freesegs; | ||
138 | size_t sc_nfreesegs; | ||
139 | |||
137 | struct nilfs_inode_info *sc_dsync_inode; | 140 | struct nilfs_inode_info *sc_dsync_inode; |
138 | loff_t sc_dsync_start; | 141 | loff_t sc_dsync_start; |
139 | loff_t sc_dsync_end; | 142 | loff_t sc_dsync_end; |
@@ -225,10 +228,6 @@ extern void nilfs_flush_segment(struct super_block *, ino_t); | |||
225 | extern int nilfs_clean_segments(struct super_block *, struct nilfs_argv *, | 228 | extern int nilfs_clean_segments(struct super_block *, struct nilfs_argv *, |
226 | void **); | 229 | void **); |
227 | 230 | ||
228 | extern int nilfs_segctor_add_segments_to_be_freed(struct nilfs_sc_info *, | ||
229 | __u64 *, size_t); | ||
230 | extern void nilfs_segctor_clear_segments_to_be_freed(struct nilfs_sc_info *); | ||
231 | |||
232 | extern int nilfs_attach_segment_constructor(struct nilfs_sb_info *); | 231 | extern int nilfs_attach_segment_constructor(struct nilfs_sb_info *); |
233 | extern void nilfs_detach_segment_constructor(struct nilfs_sb_info *); | 232 | extern void nilfs_detach_segment_constructor(struct nilfs_sb_info *); |
234 | 233 | ||
@@ -240,5 +239,6 @@ extern int nilfs_search_super_root(struct the_nilfs *, struct nilfs_sb_info *, | |||
240 | extern int nilfs_recover_logical_segments(struct the_nilfs *, | 239 | extern int nilfs_recover_logical_segments(struct the_nilfs *, |
241 | struct nilfs_sb_info *, | 240 | struct nilfs_sb_info *, |
242 | struct nilfs_recovery_info *); | 241 | struct nilfs_recovery_info *); |
242 | extern void nilfs_dispose_segment_list(struct list_head *); | ||
243 | 243 | ||
244 | #endif /* _NILFS_SEGMENT_H */ | 244 | #endif /* _NILFS_SEGMENT_H */ |
diff --git a/fs/nilfs2/sufile.c b/fs/nilfs2/sufile.c index 98e68677f045..37994d4a59cc 100644 --- a/fs/nilfs2/sufile.c +++ b/fs/nilfs2/sufile.c | |||
@@ -18,6 +18,7 @@ | |||
18 | * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA | 18 | * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA |
19 | * | 19 | * |
20 | * Written by Koji Sato <koji@osrg.net>. | 20 | * Written by Koji Sato <koji@osrg.net>. |
21 | * Rivised by Ryusuke Konishi <ryusuke@osrg.net>. | ||
21 | */ | 22 | */ |
22 | 23 | ||
23 | #include <linux/kernel.h> | 24 | #include <linux/kernel.h> |
@@ -108,6 +109,102 @@ static void nilfs_sufile_mod_counter(struct buffer_head *header_bh, | |||
108 | nilfs_mdt_mark_buffer_dirty(header_bh); | 109 | nilfs_mdt_mark_buffer_dirty(header_bh); |
109 | } | 110 | } |
110 | 111 | ||
112 | /** | ||
113 | * nilfs_sufile_updatev - modify multiple segment usages at a time | ||
114 | * @sufile: inode of segment usage file | ||
115 | * @segnumv: array of segment numbers | ||
116 | * @nsegs: size of @segnumv array | ||
117 | * @create: creation flag | ||
118 | * @ndone: place to store number of modified segments on @segnumv | ||
119 | * @dofunc: primitive operation for the update | ||
120 | * | ||
121 | * Description: nilfs_sufile_updatev() repeatedly calls @dofunc | ||
122 | * against the given array of segments. The @dofunc is called with | ||
123 | * buffers of a header block and the sufile block in which the target | ||
124 | * segment usage entry is contained. If @ndone is given, the number | ||
125 | * of successfully modified segments from the head is stored in the | ||
126 | * place @ndone points to. | ||
127 | * | ||
128 | * Return Value: On success, zero is returned. On error, one of the | ||
129 | * following negative error codes is returned. | ||
130 | * | ||
131 | * %-EIO - I/O error. | ||
132 | * | ||
133 | * %-ENOMEM - Insufficient amount of memory available. | ||
134 | * | ||
135 | * %-ENOENT - Given segment usage is in hole block (may be returned if | ||
136 | * @create is zero) | ||
137 | * | ||
138 | * %-EINVAL - Invalid segment usage number | ||
139 | */ | ||
140 | int nilfs_sufile_updatev(struct inode *sufile, __u64 *segnumv, size_t nsegs, | ||
141 | int create, size_t *ndone, | ||
142 | void (*dofunc)(struct inode *, __u64, | ||
143 | struct buffer_head *, | ||
144 | struct buffer_head *)) | ||
145 | { | ||
146 | struct buffer_head *header_bh, *bh; | ||
147 | unsigned long blkoff, prev_blkoff; | ||
148 | __u64 *seg; | ||
149 | size_t nerr = 0, n = 0; | ||
150 | int ret = 0; | ||
151 | |||
152 | if (unlikely(nsegs == 0)) | ||
153 | goto out; | ||
154 | |||
155 | down_write(&NILFS_MDT(sufile)->mi_sem); | ||
156 | for (seg = segnumv; seg < segnumv + nsegs; seg++) { | ||
157 | if (unlikely(*seg >= nilfs_sufile_get_nsegments(sufile))) { | ||
158 | printk(KERN_WARNING | ||
159 | "%s: invalid segment number: %llu\n", __func__, | ||
160 | (unsigned long long)*seg); | ||
161 | nerr++; | ||
162 | } | ||
163 | } | ||
164 | if (nerr > 0) { | ||
165 | ret = -EINVAL; | ||
166 | goto out_sem; | ||
167 | } | ||
168 | |||
169 | ret = nilfs_sufile_get_header_block(sufile, &header_bh); | ||
170 | if (ret < 0) | ||
171 | goto out_sem; | ||
172 | |||
173 | seg = segnumv; | ||
174 | blkoff = nilfs_sufile_get_blkoff(sufile, *seg); | ||
175 | ret = nilfs_mdt_get_block(sufile, blkoff, create, NULL, &bh); | ||
176 | if (ret < 0) | ||
177 | goto out_header; | ||
178 | |||
179 | for (;;) { | ||
180 | dofunc(sufile, *seg, header_bh, bh); | ||
181 | |||
182 | if (++seg >= segnumv + nsegs) | ||
183 | break; | ||
184 | prev_blkoff = blkoff; | ||
185 | blkoff = nilfs_sufile_get_blkoff(sufile, *seg); | ||
186 | if (blkoff == prev_blkoff) | ||
187 | continue; | ||
188 | |||
189 | /* get different block */ | ||
190 | brelse(bh); | ||
191 | ret = nilfs_mdt_get_block(sufile, blkoff, create, NULL, &bh); | ||
192 | if (unlikely(ret < 0)) | ||
193 | goto out_header; | ||
194 | } | ||
195 | brelse(bh); | ||
196 | |||
197 | out_header: | ||
198 | n = seg - segnumv; | ||
199 | brelse(header_bh); | ||
200 | out_sem: | ||
201 | up_write(&NILFS_MDT(sufile)->mi_sem); | ||
202 | out: | ||
203 | if (ndone) | ||
204 | *ndone = n; | ||
205 | return ret; | ||
206 | } | ||
207 | |||
111 | int nilfs_sufile_update(struct inode *sufile, __u64 segnum, int create, | 208 | int nilfs_sufile_update(struct inode *sufile, __u64 segnum, int create, |
112 | void (*dofunc)(struct inode *, __u64, | 209 | void (*dofunc)(struct inode *, __u64, |
113 | struct buffer_head *, | 210 | struct buffer_head *, |
@@ -490,7 +587,8 @@ void nilfs_sufile_do_set_error(struct inode *sufile, __u64 segnum, | |||
490 | * nilfs_sufile_get_suinfo - | 587 | * nilfs_sufile_get_suinfo - |
491 | * @sufile: inode of segment usage file | 588 | * @sufile: inode of segment usage file |
492 | * @segnum: segment number to start looking | 589 | * @segnum: segment number to start looking |
493 | * @si: array of suinfo | 590 | * @buf: array of suinfo |
591 | * @sisz: byte size of suinfo | ||
494 | * @nsi: size of suinfo array | 592 | * @nsi: size of suinfo array |
495 | * | 593 | * |
496 | * Description: | 594 | * Description: |
@@ -502,11 +600,12 @@ void nilfs_sufile_do_set_error(struct inode *sufile, __u64 segnum, | |||
502 | * | 600 | * |
503 | * %-ENOMEM - Insufficient amount of memory available. | 601 | * %-ENOMEM - Insufficient amount of memory available. |
504 | */ | 602 | */ |
505 | ssize_t nilfs_sufile_get_suinfo(struct inode *sufile, __u64 segnum, | 603 | ssize_t nilfs_sufile_get_suinfo(struct inode *sufile, __u64 segnum, void *buf, |
506 | struct nilfs_suinfo *si, size_t nsi) | 604 | unsigned sisz, size_t nsi) |
507 | { | 605 | { |
508 | struct buffer_head *su_bh; | 606 | struct buffer_head *su_bh; |
509 | struct nilfs_segment_usage *su; | 607 | struct nilfs_segment_usage *su; |
608 | struct nilfs_suinfo *si = buf; | ||
510 | size_t susz = NILFS_MDT(sufile)->mi_entry_size; | 609 | size_t susz = NILFS_MDT(sufile)->mi_entry_size; |
511 | struct the_nilfs *nilfs = NILFS_MDT(sufile)->mi_nilfs; | 610 | struct the_nilfs *nilfs = NILFS_MDT(sufile)->mi_nilfs; |
512 | void *kaddr; | 611 | void *kaddr; |
@@ -531,20 +630,22 @@ ssize_t nilfs_sufile_get_suinfo(struct inode *sufile, __u64 segnum, | |||
531 | if (ret != -ENOENT) | 630 | if (ret != -ENOENT) |
532 | goto out; | 631 | goto out; |
533 | /* hole */ | 632 | /* hole */ |
534 | memset(&si[i], 0, sizeof(struct nilfs_suinfo) * n); | 633 | memset(si, 0, sisz * n); |
634 | si = (void *)si + sisz * n; | ||
535 | continue; | 635 | continue; |
536 | } | 636 | } |
537 | 637 | ||
538 | kaddr = kmap_atomic(su_bh->b_page, KM_USER0); | 638 | kaddr = kmap_atomic(su_bh->b_page, KM_USER0); |
539 | su = nilfs_sufile_block_get_segment_usage( | 639 | su = nilfs_sufile_block_get_segment_usage( |
540 | sufile, segnum, su_bh, kaddr); | 640 | sufile, segnum, su_bh, kaddr); |
541 | for (j = 0; j < n; j++, su = (void *)su + susz) { | 641 | for (j = 0; j < n; |
542 | si[i + j].sui_lastmod = le64_to_cpu(su->su_lastmod); | 642 | j++, su = (void *)su + susz, si = (void *)si + sisz) { |
543 | si[i + j].sui_nblocks = le32_to_cpu(su->su_nblocks); | 643 | si->sui_lastmod = le64_to_cpu(su->su_lastmod); |
544 | si[i + j].sui_flags = le32_to_cpu(su->su_flags) & | 644 | si->sui_nblocks = le32_to_cpu(su->su_nblocks); |
645 | si->sui_flags = le32_to_cpu(su->su_flags) & | ||
545 | ~(1UL << NILFS_SEGMENT_USAGE_ACTIVE); | 646 | ~(1UL << NILFS_SEGMENT_USAGE_ACTIVE); |
546 | if (nilfs_segment_is_active(nilfs, segnum + j)) | 647 | if (nilfs_segment_is_active(nilfs, segnum + j)) |
547 | si[i + j].sui_flags |= | 648 | si->sui_flags |= |
548 | (1UL << NILFS_SEGMENT_USAGE_ACTIVE); | 649 | (1UL << NILFS_SEGMENT_USAGE_ACTIVE); |
549 | } | 650 | } |
550 | kunmap_atomic(kaddr, KM_USER0); | 651 | kunmap_atomic(kaddr, KM_USER0); |
diff --git a/fs/nilfs2/sufile.h b/fs/nilfs2/sufile.h index a2e2efd4ade1..a2c4d76c3366 100644 --- a/fs/nilfs2/sufile.h +++ b/fs/nilfs2/sufile.h | |||
@@ -43,43 +43,27 @@ void nilfs_sufile_put_segment_usage(struct inode *, __u64, | |||
43 | struct buffer_head *); | 43 | struct buffer_head *); |
44 | int nilfs_sufile_get_stat(struct inode *, struct nilfs_sustat *); | 44 | int nilfs_sufile_get_stat(struct inode *, struct nilfs_sustat *); |
45 | int nilfs_sufile_get_ncleansegs(struct inode *, unsigned long *); | 45 | int nilfs_sufile_get_ncleansegs(struct inode *, unsigned long *); |
46 | ssize_t nilfs_sufile_get_suinfo(struct inode *, __u64, struct nilfs_suinfo *, | 46 | ssize_t nilfs_sufile_get_suinfo(struct inode *, __u64, void *, unsigned, |
47 | size_t); | 47 | size_t); |
48 | 48 | ||
49 | int nilfs_sufile_updatev(struct inode *, __u64 *, size_t, int, size_t *, | ||
50 | void (*dofunc)(struct inode *, __u64, | ||
51 | struct buffer_head *, | ||
52 | struct buffer_head *)); | ||
49 | int nilfs_sufile_update(struct inode *, __u64, int, | 53 | int nilfs_sufile_update(struct inode *, __u64, int, |
50 | void (*dofunc)(struct inode *, __u64, | 54 | void (*dofunc)(struct inode *, __u64, |
51 | struct buffer_head *, | 55 | struct buffer_head *, |
52 | struct buffer_head *)); | 56 | struct buffer_head *)); |
53 | void nilfs_sufile_do_cancel_free(struct inode *, __u64, struct buffer_head *, | ||
54 | struct buffer_head *); | ||
55 | void nilfs_sufile_do_scrap(struct inode *, __u64, struct buffer_head *, | 57 | void nilfs_sufile_do_scrap(struct inode *, __u64, struct buffer_head *, |
56 | struct buffer_head *); | 58 | struct buffer_head *); |
57 | void nilfs_sufile_do_free(struct inode *, __u64, struct buffer_head *, | 59 | void nilfs_sufile_do_free(struct inode *, __u64, struct buffer_head *, |
58 | struct buffer_head *); | 60 | struct buffer_head *); |
61 | void nilfs_sufile_do_cancel_free(struct inode *, __u64, struct buffer_head *, | ||
62 | struct buffer_head *); | ||
59 | void nilfs_sufile_do_set_error(struct inode *, __u64, struct buffer_head *, | 63 | void nilfs_sufile_do_set_error(struct inode *, __u64, struct buffer_head *, |
60 | struct buffer_head *); | 64 | struct buffer_head *); |
61 | 65 | ||
62 | /** | 66 | /** |
63 | * nilfs_sufile_cancel_free - | ||
64 | * @sufile: inode of segment usage file | ||
65 | * @segnum: segment number | ||
66 | * | ||
67 | * Description: | ||
68 | * | ||
69 | * Return Value: On success, 0 is returned. On error, one of the following | ||
70 | * negative error codes is returned. | ||
71 | * | ||
72 | * %-EIO - I/O error. | ||
73 | * | ||
74 | * %-ENOMEM - Insufficient amount of memory available. | ||
75 | */ | ||
76 | static inline int nilfs_sufile_cancel_free(struct inode *sufile, __u64 segnum) | ||
77 | { | ||
78 | return nilfs_sufile_update(sufile, segnum, 0, | ||
79 | nilfs_sufile_do_cancel_free); | ||
80 | } | ||
81 | |||
82 | /** | ||
83 | * nilfs_sufile_scrap - make a segment garbage | 67 | * nilfs_sufile_scrap - make a segment garbage |
84 | * @sufile: inode of segment usage file | 68 | * @sufile: inode of segment usage file |
85 | * @segnum: segment number to be freed | 69 | * @segnum: segment number to be freed |
@@ -100,6 +84,38 @@ static inline int nilfs_sufile_free(struct inode *sufile, __u64 segnum) | |||
100 | } | 84 | } |
101 | 85 | ||
102 | /** | 86 | /** |
87 | * nilfs_sufile_freev - free segments | ||
88 | * @sufile: inode of segment usage file | ||
89 | * @segnumv: array of segment numbers | ||
90 | * @nsegs: size of @segnumv array | ||
91 | * @ndone: place to store the number of freed segments | ||
92 | */ | ||
93 | static inline int nilfs_sufile_freev(struct inode *sufile, __u64 *segnumv, | ||
94 | size_t nsegs, size_t *ndone) | ||
95 | { | ||
96 | return nilfs_sufile_updatev(sufile, segnumv, nsegs, 0, ndone, | ||
97 | nilfs_sufile_do_free); | ||
98 | } | ||
99 | |||
100 | /** | ||
101 | * nilfs_sufile_cancel_freev - reallocate freeing segments | ||
102 | * @sufile: inode of segment usage file | ||
103 | * @segnumv: array of segment numbers | ||
104 | * @nsegs: size of @segnumv array | ||
105 | * @ndone: place to store the number of cancelled segments | ||
106 | * | ||
107 | * Return Value: On success, 0 is returned. On error, a negative error codes | ||
108 | * is returned. | ||
109 | */ | ||
110 | static inline int nilfs_sufile_cancel_freev(struct inode *sufile, | ||
111 | __u64 *segnumv, size_t nsegs, | ||
112 | size_t *ndone) | ||
113 | { | ||
114 | return nilfs_sufile_updatev(sufile, segnumv, nsegs, 0, ndone, | ||
115 | nilfs_sufile_do_cancel_free); | ||
116 | } | ||
117 | |||
118 | /** | ||
103 | * nilfs_sufile_set_error - mark a segment as erroneous | 119 | * nilfs_sufile_set_error - mark a segment as erroneous |
104 | * @sufile: inode of segment usage file | 120 | * @sufile: inode of segment usage file |
105 | * @segnum: segment number | 121 | * @segnum: segment number |
diff --git a/fs/nilfs2/super.c b/fs/nilfs2/super.c index 1777a3467bd2..ab785f85aa50 100644 --- a/fs/nilfs2/super.c +++ b/fs/nilfs2/super.c | |||
@@ -133,7 +133,7 @@ void nilfs_warning(struct super_block *sb, const char *function, | |||
133 | 133 | ||
134 | static struct kmem_cache *nilfs_inode_cachep; | 134 | static struct kmem_cache *nilfs_inode_cachep; |
135 | 135 | ||
136 | struct inode *nilfs_alloc_inode(struct super_block *sb) | 136 | struct inode *nilfs_alloc_inode_common(struct the_nilfs *nilfs) |
137 | { | 137 | { |
138 | struct nilfs_inode_info *ii; | 138 | struct nilfs_inode_info *ii; |
139 | 139 | ||
@@ -143,10 +143,15 @@ struct inode *nilfs_alloc_inode(struct super_block *sb) | |||
143 | ii->i_bh = NULL; | 143 | ii->i_bh = NULL; |
144 | ii->i_state = 0; | 144 | ii->i_state = 0; |
145 | ii->vfs_inode.i_version = 1; | 145 | ii->vfs_inode.i_version = 1; |
146 | nilfs_btnode_cache_init(&ii->i_btnode_cache); | 146 | nilfs_btnode_cache_init(&ii->i_btnode_cache, nilfs->ns_bdi); |
147 | return &ii->vfs_inode; | 147 | return &ii->vfs_inode; |
148 | } | 148 | } |
149 | 149 | ||
150 | struct inode *nilfs_alloc_inode(struct super_block *sb) | ||
151 | { | ||
152 | return nilfs_alloc_inode_common(NILFS_SB(sb)->s_nilfs); | ||
153 | } | ||
154 | |||
150 | void nilfs_destroy_inode(struct inode *inode) | 155 | void nilfs_destroy_inode(struct inode *inode) |
151 | { | 156 | { |
152 | kmem_cache_free(nilfs_inode_cachep, NILFS_I(inode)); | 157 | kmem_cache_free(nilfs_inode_cachep, NILFS_I(inode)); |
diff --git a/fs/nilfs2/the_nilfs.c b/fs/nilfs2/the_nilfs.c index e4e5c78bcc93..8b8889825716 100644 --- a/fs/nilfs2/the_nilfs.c +++ b/fs/nilfs2/the_nilfs.c | |||
@@ -32,7 +32,6 @@ | |||
32 | #include "cpfile.h" | 32 | #include "cpfile.h" |
33 | #include "sufile.h" | 33 | #include "sufile.h" |
34 | #include "dat.h" | 34 | #include "dat.h" |
35 | #include "seglist.h" | ||
36 | #include "segbuf.h" | 35 | #include "segbuf.h" |
37 | 36 | ||
38 | 37 | ||