diff options
Diffstat (limited to 'fs/nilfs2')
-rw-r--r-- | fs/nilfs2/cpfile.c | 94 | ||||
-rw-r--r-- | fs/nilfs2/dat.c | 38 | ||||
-rw-r--r-- | fs/nilfs2/dir.c | 4 | ||||
-rw-r--r-- | fs/nilfs2/ifile.c | 4 | ||||
-rw-r--r-- | fs/nilfs2/mdt.c | 4 | ||||
-rw-r--r-- | fs/nilfs2/page.c | 8 | ||||
-rw-r--r-- | fs/nilfs2/recovery.c | 4 | ||||
-rw-r--r-- | fs/nilfs2/segbuf.c | 4 | ||||
-rw-r--r-- | fs/nilfs2/sufile.c | 68 |
9 files changed, 114 insertions, 114 deletions
diff --git a/fs/nilfs2/cpfile.c b/fs/nilfs2/cpfile.c index c9b342c8b503..dab5c4c6dfaf 100644 --- a/fs/nilfs2/cpfile.c +++ b/fs/nilfs2/cpfile.c | |||
@@ -218,11 +218,11 @@ int nilfs_cpfile_get_checkpoint(struct inode *cpfile, | |||
218 | kaddr, 1); | 218 | kaddr, 1); |
219 | mark_buffer_dirty(cp_bh); | 219 | mark_buffer_dirty(cp_bh); |
220 | 220 | ||
221 | kaddr = kmap_atomic(header_bh->b_page, KM_USER0); | 221 | kaddr = kmap_atomic(header_bh->b_page); |
222 | header = nilfs_cpfile_block_get_header(cpfile, header_bh, | 222 | header = nilfs_cpfile_block_get_header(cpfile, header_bh, |
223 | kaddr); | 223 | kaddr); |
224 | le64_add_cpu(&header->ch_ncheckpoints, 1); | 224 | le64_add_cpu(&header->ch_ncheckpoints, 1); |
225 | kunmap_atomic(kaddr, KM_USER0); | 225 | kunmap_atomic(kaddr); |
226 | mark_buffer_dirty(header_bh); | 226 | mark_buffer_dirty(header_bh); |
227 | nilfs_mdt_mark_dirty(cpfile); | 227 | nilfs_mdt_mark_dirty(cpfile); |
228 | } | 228 | } |
@@ -313,7 +313,7 @@ int nilfs_cpfile_delete_checkpoints(struct inode *cpfile, | |||
313 | continue; | 313 | continue; |
314 | } | 314 | } |
315 | 315 | ||
316 | kaddr = kmap_atomic(cp_bh->b_page, KM_USER0); | 316 | kaddr = kmap_atomic(cp_bh->b_page); |
317 | cp = nilfs_cpfile_block_get_checkpoint( | 317 | cp = nilfs_cpfile_block_get_checkpoint( |
318 | cpfile, cno, cp_bh, kaddr); | 318 | cpfile, cno, cp_bh, kaddr); |
319 | nicps = 0; | 319 | nicps = 0; |
@@ -334,7 +334,7 @@ int nilfs_cpfile_delete_checkpoints(struct inode *cpfile, | |||
334 | cpfile, cp_bh, kaddr, nicps); | 334 | cpfile, cp_bh, kaddr, nicps); |
335 | if (count == 0) { | 335 | if (count == 0) { |
336 | /* make hole */ | 336 | /* make hole */ |
337 | kunmap_atomic(kaddr, KM_USER0); | 337 | kunmap_atomic(kaddr); |
338 | brelse(cp_bh); | 338 | brelse(cp_bh); |
339 | ret = | 339 | ret = |
340 | nilfs_cpfile_delete_checkpoint_block( | 340 | nilfs_cpfile_delete_checkpoint_block( |
@@ -349,18 +349,18 @@ int nilfs_cpfile_delete_checkpoints(struct inode *cpfile, | |||
349 | } | 349 | } |
350 | } | 350 | } |
351 | 351 | ||
352 | kunmap_atomic(kaddr, KM_USER0); | 352 | kunmap_atomic(kaddr); |
353 | brelse(cp_bh); | 353 | brelse(cp_bh); |
354 | } | 354 | } |
355 | 355 | ||
356 | if (tnicps > 0) { | 356 | if (tnicps > 0) { |
357 | kaddr = kmap_atomic(header_bh->b_page, KM_USER0); | 357 | kaddr = kmap_atomic(header_bh->b_page); |
358 | header = nilfs_cpfile_block_get_header(cpfile, header_bh, | 358 | header = nilfs_cpfile_block_get_header(cpfile, header_bh, |
359 | kaddr); | 359 | kaddr); |
360 | le64_add_cpu(&header->ch_ncheckpoints, -(u64)tnicps); | 360 | le64_add_cpu(&header->ch_ncheckpoints, -(u64)tnicps); |
361 | mark_buffer_dirty(header_bh); | 361 | mark_buffer_dirty(header_bh); |
362 | nilfs_mdt_mark_dirty(cpfile); | 362 | nilfs_mdt_mark_dirty(cpfile); |
363 | kunmap_atomic(kaddr, KM_USER0); | 363 | kunmap_atomic(kaddr); |
364 | } | 364 | } |
365 | 365 | ||
366 | brelse(header_bh); | 366 | brelse(header_bh); |
@@ -408,7 +408,7 @@ static ssize_t nilfs_cpfile_do_get_cpinfo(struct inode *cpfile, __u64 *cnop, | |||
408 | continue; /* skip hole */ | 408 | continue; /* skip hole */ |
409 | } | 409 | } |
410 | 410 | ||
411 | kaddr = kmap_atomic(bh->b_page, KM_USER0); | 411 | kaddr = kmap_atomic(bh->b_page); |
412 | cp = nilfs_cpfile_block_get_checkpoint(cpfile, cno, bh, kaddr); | 412 | cp = nilfs_cpfile_block_get_checkpoint(cpfile, cno, bh, kaddr); |
413 | for (i = 0; i < ncps && n < nci; i++, cp = (void *)cp + cpsz) { | 413 | for (i = 0; i < ncps && n < nci; i++, cp = (void *)cp + cpsz) { |
414 | if (!nilfs_checkpoint_invalid(cp)) { | 414 | if (!nilfs_checkpoint_invalid(cp)) { |
@@ -418,7 +418,7 @@ static ssize_t nilfs_cpfile_do_get_cpinfo(struct inode *cpfile, __u64 *cnop, | |||
418 | n++; | 418 | n++; |
419 | } | 419 | } |
420 | } | 420 | } |
421 | kunmap_atomic(kaddr, KM_USER0); | 421 | kunmap_atomic(kaddr); |
422 | brelse(bh); | 422 | brelse(bh); |
423 | } | 423 | } |
424 | 424 | ||
@@ -451,10 +451,10 @@ static ssize_t nilfs_cpfile_do_get_ssinfo(struct inode *cpfile, __u64 *cnop, | |||
451 | ret = nilfs_cpfile_get_header_block(cpfile, &bh); | 451 | ret = nilfs_cpfile_get_header_block(cpfile, &bh); |
452 | if (ret < 0) | 452 | if (ret < 0) |
453 | goto out; | 453 | goto out; |
454 | kaddr = kmap_atomic(bh->b_page, KM_USER0); | 454 | kaddr = kmap_atomic(bh->b_page); |
455 | header = nilfs_cpfile_block_get_header(cpfile, bh, kaddr); | 455 | header = nilfs_cpfile_block_get_header(cpfile, bh, kaddr); |
456 | curr = le64_to_cpu(header->ch_snapshot_list.ssl_next); | 456 | curr = le64_to_cpu(header->ch_snapshot_list.ssl_next); |
457 | kunmap_atomic(kaddr, KM_USER0); | 457 | kunmap_atomic(kaddr); |
458 | brelse(bh); | 458 | brelse(bh); |
459 | if (curr == 0) { | 459 | if (curr == 0) { |
460 | ret = 0; | 460 | ret = 0; |
@@ -472,7 +472,7 @@ static ssize_t nilfs_cpfile_do_get_ssinfo(struct inode *cpfile, __u64 *cnop, | |||
472 | ret = 0; /* No snapshots (started from a hole block) */ | 472 | ret = 0; /* No snapshots (started from a hole block) */ |
473 | goto out; | 473 | goto out; |
474 | } | 474 | } |
475 | kaddr = kmap_atomic(bh->b_page, KM_USER0); | 475 | kaddr = kmap_atomic(bh->b_page); |
476 | while (n < nci) { | 476 | while (n < nci) { |
477 | cp = nilfs_cpfile_block_get_checkpoint(cpfile, curr, bh, kaddr); | 477 | cp = nilfs_cpfile_block_get_checkpoint(cpfile, curr, bh, kaddr); |
478 | curr = ~(__u64)0; /* Terminator */ | 478 | curr = ~(__u64)0; /* Terminator */ |
@@ -488,7 +488,7 @@ static ssize_t nilfs_cpfile_do_get_ssinfo(struct inode *cpfile, __u64 *cnop, | |||
488 | 488 | ||
489 | next_blkoff = nilfs_cpfile_get_blkoff(cpfile, next); | 489 | next_blkoff = nilfs_cpfile_get_blkoff(cpfile, next); |
490 | if (curr_blkoff != next_blkoff) { | 490 | if (curr_blkoff != next_blkoff) { |
491 | kunmap_atomic(kaddr, KM_USER0); | 491 | kunmap_atomic(kaddr); |
492 | brelse(bh); | 492 | brelse(bh); |
493 | ret = nilfs_cpfile_get_checkpoint_block(cpfile, next, | 493 | ret = nilfs_cpfile_get_checkpoint_block(cpfile, next, |
494 | 0, &bh); | 494 | 0, &bh); |
@@ -496,12 +496,12 @@ static ssize_t nilfs_cpfile_do_get_ssinfo(struct inode *cpfile, __u64 *cnop, | |||
496 | WARN_ON(ret == -ENOENT); | 496 | WARN_ON(ret == -ENOENT); |
497 | goto out; | 497 | goto out; |
498 | } | 498 | } |
499 | kaddr = kmap_atomic(bh->b_page, KM_USER0); | 499 | kaddr = kmap_atomic(bh->b_page); |
500 | } | 500 | } |
501 | curr = next; | 501 | curr = next; |
502 | curr_blkoff = next_blkoff; | 502 | curr_blkoff = next_blkoff; |
503 | } | 503 | } |
504 | kunmap_atomic(kaddr, KM_USER0); | 504 | kunmap_atomic(kaddr); |
505 | brelse(bh); | 505 | brelse(bh); |
506 | *cnop = curr; | 506 | *cnop = curr; |
507 | ret = n; | 507 | ret = n; |
@@ -592,24 +592,24 @@ static int nilfs_cpfile_set_snapshot(struct inode *cpfile, __u64 cno) | |||
592 | ret = nilfs_cpfile_get_checkpoint_block(cpfile, cno, 0, &cp_bh); | 592 | ret = nilfs_cpfile_get_checkpoint_block(cpfile, cno, 0, &cp_bh); |
593 | if (ret < 0) | 593 | if (ret < 0) |
594 | goto out_sem; | 594 | goto out_sem; |
595 | kaddr = kmap_atomic(cp_bh->b_page, KM_USER0); | 595 | kaddr = kmap_atomic(cp_bh->b_page); |
596 | cp = nilfs_cpfile_block_get_checkpoint(cpfile, cno, cp_bh, kaddr); | 596 | cp = nilfs_cpfile_block_get_checkpoint(cpfile, cno, cp_bh, kaddr); |
597 | if (nilfs_checkpoint_invalid(cp)) { | 597 | if (nilfs_checkpoint_invalid(cp)) { |
598 | ret = -ENOENT; | 598 | ret = -ENOENT; |
599 | kunmap_atomic(kaddr, KM_USER0); | 599 | kunmap_atomic(kaddr); |
600 | goto out_cp; | 600 | goto out_cp; |
601 | } | 601 | } |
602 | if (nilfs_checkpoint_snapshot(cp)) { | 602 | if (nilfs_checkpoint_snapshot(cp)) { |
603 | ret = 0; | 603 | ret = 0; |
604 | kunmap_atomic(kaddr, KM_USER0); | 604 | kunmap_atomic(kaddr); |
605 | goto out_cp; | 605 | goto out_cp; |
606 | } | 606 | } |
607 | kunmap_atomic(kaddr, KM_USER0); | 607 | kunmap_atomic(kaddr); |
608 | 608 | ||
609 | ret = nilfs_cpfile_get_header_block(cpfile, &header_bh); | 609 | ret = nilfs_cpfile_get_header_block(cpfile, &header_bh); |
610 | if (ret < 0) | 610 | if (ret < 0) |
611 | goto out_cp; | 611 | goto out_cp; |
612 | kaddr = kmap_atomic(header_bh->b_page, KM_USER0); | 612 | kaddr = kmap_atomic(header_bh->b_page); |
613 | header = nilfs_cpfile_block_get_header(cpfile, header_bh, kaddr); | 613 | header = nilfs_cpfile_block_get_header(cpfile, header_bh, kaddr); |
614 | list = &header->ch_snapshot_list; | 614 | list = &header->ch_snapshot_list; |
615 | curr_bh = header_bh; | 615 | curr_bh = header_bh; |
@@ -621,13 +621,13 @@ static int nilfs_cpfile_set_snapshot(struct inode *cpfile, __u64 cno) | |||
621 | prev_blkoff = nilfs_cpfile_get_blkoff(cpfile, prev); | 621 | prev_blkoff = nilfs_cpfile_get_blkoff(cpfile, prev); |
622 | curr = prev; | 622 | curr = prev; |
623 | if (curr_blkoff != prev_blkoff) { | 623 | if (curr_blkoff != prev_blkoff) { |
624 | kunmap_atomic(kaddr, KM_USER0); | 624 | kunmap_atomic(kaddr); |
625 | brelse(curr_bh); | 625 | brelse(curr_bh); |
626 | ret = nilfs_cpfile_get_checkpoint_block(cpfile, curr, | 626 | ret = nilfs_cpfile_get_checkpoint_block(cpfile, curr, |
627 | 0, &curr_bh); | 627 | 0, &curr_bh); |
628 | if (ret < 0) | 628 | if (ret < 0) |
629 | goto out_header; | 629 | goto out_header; |
630 | kaddr = kmap_atomic(curr_bh->b_page, KM_USER0); | 630 | kaddr = kmap_atomic(curr_bh->b_page); |
631 | } | 631 | } |
632 | curr_blkoff = prev_blkoff; | 632 | curr_blkoff = prev_blkoff; |
633 | cp = nilfs_cpfile_block_get_checkpoint( | 633 | cp = nilfs_cpfile_block_get_checkpoint( |
@@ -635,7 +635,7 @@ static int nilfs_cpfile_set_snapshot(struct inode *cpfile, __u64 cno) | |||
635 | list = &cp->cp_snapshot_list; | 635 | list = &cp->cp_snapshot_list; |
636 | prev = le64_to_cpu(list->ssl_prev); | 636 | prev = le64_to_cpu(list->ssl_prev); |
637 | } | 637 | } |
638 | kunmap_atomic(kaddr, KM_USER0); | 638 | kunmap_atomic(kaddr); |
639 | 639 | ||
640 | if (prev != 0) { | 640 | if (prev != 0) { |
641 | ret = nilfs_cpfile_get_checkpoint_block(cpfile, prev, 0, | 641 | ret = nilfs_cpfile_get_checkpoint_block(cpfile, prev, 0, |
@@ -647,29 +647,29 @@ static int nilfs_cpfile_set_snapshot(struct inode *cpfile, __u64 cno) | |||
647 | get_bh(prev_bh); | 647 | get_bh(prev_bh); |
648 | } | 648 | } |
649 | 649 | ||
650 | kaddr = kmap_atomic(curr_bh->b_page, KM_USER0); | 650 | kaddr = kmap_atomic(curr_bh->b_page); |
651 | list = nilfs_cpfile_block_get_snapshot_list( | 651 | list = nilfs_cpfile_block_get_snapshot_list( |
652 | cpfile, curr, curr_bh, kaddr); | 652 | cpfile, curr, curr_bh, kaddr); |
653 | list->ssl_prev = cpu_to_le64(cno); | 653 | list->ssl_prev = cpu_to_le64(cno); |
654 | kunmap_atomic(kaddr, KM_USER0); | 654 | kunmap_atomic(kaddr); |
655 | 655 | ||
656 | kaddr = kmap_atomic(cp_bh->b_page, KM_USER0); | 656 | kaddr = kmap_atomic(cp_bh->b_page); |
657 | cp = nilfs_cpfile_block_get_checkpoint(cpfile, cno, cp_bh, kaddr); | 657 | cp = nilfs_cpfile_block_get_checkpoint(cpfile, cno, cp_bh, kaddr); |
658 | cp->cp_snapshot_list.ssl_next = cpu_to_le64(curr); | 658 | cp->cp_snapshot_list.ssl_next = cpu_to_le64(curr); |
659 | cp->cp_snapshot_list.ssl_prev = cpu_to_le64(prev); | 659 | cp->cp_snapshot_list.ssl_prev = cpu_to_le64(prev); |
660 | nilfs_checkpoint_set_snapshot(cp); | 660 | nilfs_checkpoint_set_snapshot(cp); |
661 | kunmap_atomic(kaddr, KM_USER0); | 661 | kunmap_atomic(kaddr); |
662 | 662 | ||
663 | kaddr = kmap_atomic(prev_bh->b_page, KM_USER0); | 663 | kaddr = kmap_atomic(prev_bh->b_page); |
664 | list = nilfs_cpfile_block_get_snapshot_list( | 664 | list = nilfs_cpfile_block_get_snapshot_list( |
665 | cpfile, prev, prev_bh, kaddr); | 665 | cpfile, prev, prev_bh, kaddr); |
666 | list->ssl_next = cpu_to_le64(cno); | 666 | list->ssl_next = cpu_to_le64(cno); |
667 | kunmap_atomic(kaddr, KM_USER0); | 667 | kunmap_atomic(kaddr); |
668 | 668 | ||
669 | kaddr = kmap_atomic(header_bh->b_page, KM_USER0); | 669 | kaddr = kmap_atomic(header_bh->b_page); |
670 | header = nilfs_cpfile_block_get_header(cpfile, header_bh, kaddr); | 670 | header = nilfs_cpfile_block_get_header(cpfile, header_bh, kaddr); |
671 | le64_add_cpu(&header->ch_nsnapshots, 1); | 671 | le64_add_cpu(&header->ch_nsnapshots, 1); |
672 | kunmap_atomic(kaddr, KM_USER0); | 672 | kunmap_atomic(kaddr); |
673 | 673 | ||
674 | mark_buffer_dirty(prev_bh); | 674 | mark_buffer_dirty(prev_bh); |
675 | mark_buffer_dirty(curr_bh); | 675 | mark_buffer_dirty(curr_bh); |
@@ -710,23 +710,23 @@ static int nilfs_cpfile_clear_snapshot(struct inode *cpfile, __u64 cno) | |||
710 | ret = nilfs_cpfile_get_checkpoint_block(cpfile, cno, 0, &cp_bh); | 710 | ret = nilfs_cpfile_get_checkpoint_block(cpfile, cno, 0, &cp_bh); |
711 | if (ret < 0) | 711 | if (ret < 0) |
712 | goto out_sem; | 712 | goto out_sem; |
713 | kaddr = kmap_atomic(cp_bh->b_page, KM_USER0); | 713 | kaddr = kmap_atomic(cp_bh->b_page); |
714 | cp = nilfs_cpfile_block_get_checkpoint(cpfile, cno, cp_bh, kaddr); | 714 | cp = nilfs_cpfile_block_get_checkpoint(cpfile, cno, cp_bh, kaddr); |
715 | if (nilfs_checkpoint_invalid(cp)) { | 715 | if (nilfs_checkpoint_invalid(cp)) { |
716 | ret = -ENOENT; | 716 | ret = -ENOENT; |
717 | kunmap_atomic(kaddr, KM_USER0); | 717 | kunmap_atomic(kaddr); |
718 | goto out_cp; | 718 | goto out_cp; |
719 | } | 719 | } |
720 | if (!nilfs_checkpoint_snapshot(cp)) { | 720 | if (!nilfs_checkpoint_snapshot(cp)) { |
721 | ret = 0; | 721 | ret = 0; |
722 | kunmap_atomic(kaddr, KM_USER0); | 722 | kunmap_atomic(kaddr); |
723 | goto out_cp; | 723 | goto out_cp; |
724 | } | 724 | } |
725 | 725 | ||
726 | list = &cp->cp_snapshot_list; | 726 | list = &cp->cp_snapshot_list; |
727 | next = le64_to_cpu(list->ssl_next); | 727 | next = le64_to_cpu(list->ssl_next); |
728 | prev = le64_to_cpu(list->ssl_prev); | 728 | prev = le64_to_cpu(list->ssl_prev); |
729 | kunmap_atomic(kaddr, KM_USER0); | 729 | kunmap_atomic(kaddr); |
730 | 730 | ||
731 | ret = nilfs_cpfile_get_header_block(cpfile, &header_bh); | 731 | ret = nilfs_cpfile_get_header_block(cpfile, &header_bh); |
732 | if (ret < 0) | 732 | if (ret < 0) |
@@ -750,29 +750,29 @@ static int nilfs_cpfile_clear_snapshot(struct inode *cpfile, __u64 cno) | |||
750 | get_bh(prev_bh); | 750 | get_bh(prev_bh); |
751 | } | 751 | } |
752 | 752 | ||
753 | kaddr = kmap_atomic(next_bh->b_page, KM_USER0); | 753 | kaddr = kmap_atomic(next_bh->b_page); |
754 | list = nilfs_cpfile_block_get_snapshot_list( | 754 | list = nilfs_cpfile_block_get_snapshot_list( |
755 | cpfile, next, next_bh, kaddr); | 755 | cpfile, next, next_bh, kaddr); |
756 | list->ssl_prev = cpu_to_le64(prev); | 756 | list->ssl_prev = cpu_to_le64(prev); |
757 | kunmap_atomic(kaddr, KM_USER0); | 757 | kunmap_atomic(kaddr); |
758 | 758 | ||
759 | kaddr = kmap_atomic(prev_bh->b_page, KM_USER0); | 759 | kaddr = kmap_atomic(prev_bh->b_page); |
760 | list = nilfs_cpfile_block_get_snapshot_list( | 760 | list = nilfs_cpfile_block_get_snapshot_list( |
761 | cpfile, prev, prev_bh, kaddr); | 761 | cpfile, prev, prev_bh, kaddr); |
762 | list->ssl_next = cpu_to_le64(next); | 762 | list->ssl_next = cpu_to_le64(next); |
763 | kunmap_atomic(kaddr, KM_USER0); | 763 | kunmap_atomic(kaddr); |
764 | 764 | ||
765 | kaddr = kmap_atomic(cp_bh->b_page, KM_USER0); | 765 | kaddr = kmap_atomic(cp_bh->b_page); |
766 | cp = nilfs_cpfile_block_get_checkpoint(cpfile, cno, cp_bh, kaddr); | 766 | cp = nilfs_cpfile_block_get_checkpoint(cpfile, cno, cp_bh, kaddr); |
767 | cp->cp_snapshot_list.ssl_next = cpu_to_le64(0); | 767 | cp->cp_snapshot_list.ssl_next = cpu_to_le64(0); |
768 | cp->cp_snapshot_list.ssl_prev = cpu_to_le64(0); | 768 | cp->cp_snapshot_list.ssl_prev = cpu_to_le64(0); |
769 | nilfs_checkpoint_clear_snapshot(cp); | 769 | nilfs_checkpoint_clear_snapshot(cp); |
770 | kunmap_atomic(kaddr, KM_USER0); | 770 | kunmap_atomic(kaddr); |
771 | 771 | ||
772 | kaddr = kmap_atomic(header_bh->b_page, KM_USER0); | 772 | kaddr = kmap_atomic(header_bh->b_page); |
773 | header = nilfs_cpfile_block_get_header(cpfile, header_bh, kaddr); | 773 | header = nilfs_cpfile_block_get_header(cpfile, header_bh, kaddr); |
774 | le64_add_cpu(&header->ch_nsnapshots, -1); | 774 | le64_add_cpu(&header->ch_nsnapshots, -1); |
775 | kunmap_atomic(kaddr, KM_USER0); | 775 | kunmap_atomic(kaddr); |
776 | 776 | ||
777 | mark_buffer_dirty(next_bh); | 777 | mark_buffer_dirty(next_bh); |
778 | mark_buffer_dirty(prev_bh); | 778 | mark_buffer_dirty(prev_bh); |
@@ -829,13 +829,13 @@ int nilfs_cpfile_is_snapshot(struct inode *cpfile, __u64 cno) | |||
829 | ret = nilfs_cpfile_get_checkpoint_block(cpfile, cno, 0, &bh); | 829 | ret = nilfs_cpfile_get_checkpoint_block(cpfile, cno, 0, &bh); |
830 | if (ret < 0) | 830 | if (ret < 0) |
831 | goto out; | 831 | goto out; |
832 | kaddr = kmap_atomic(bh->b_page, KM_USER0); | 832 | kaddr = kmap_atomic(bh->b_page); |
833 | cp = nilfs_cpfile_block_get_checkpoint(cpfile, cno, bh, kaddr); | 833 | cp = nilfs_cpfile_block_get_checkpoint(cpfile, cno, bh, kaddr); |
834 | if (nilfs_checkpoint_invalid(cp)) | 834 | if (nilfs_checkpoint_invalid(cp)) |
835 | ret = -ENOENT; | 835 | ret = -ENOENT; |
836 | else | 836 | else |
837 | ret = nilfs_checkpoint_snapshot(cp); | 837 | ret = nilfs_checkpoint_snapshot(cp); |
838 | kunmap_atomic(kaddr, KM_USER0); | 838 | kunmap_atomic(kaddr); |
839 | brelse(bh); | 839 | brelse(bh); |
840 | 840 | ||
841 | out: | 841 | out: |
@@ -912,12 +912,12 @@ int nilfs_cpfile_get_stat(struct inode *cpfile, struct nilfs_cpstat *cpstat) | |||
912 | ret = nilfs_cpfile_get_header_block(cpfile, &bh); | 912 | ret = nilfs_cpfile_get_header_block(cpfile, &bh); |
913 | if (ret < 0) | 913 | if (ret < 0) |
914 | goto out_sem; | 914 | goto out_sem; |
915 | kaddr = kmap_atomic(bh->b_page, KM_USER0); | 915 | kaddr = kmap_atomic(bh->b_page); |
916 | header = nilfs_cpfile_block_get_header(cpfile, bh, kaddr); | 916 | header = nilfs_cpfile_block_get_header(cpfile, bh, kaddr); |
917 | cpstat->cs_cno = nilfs_mdt_cno(cpfile); | 917 | cpstat->cs_cno = nilfs_mdt_cno(cpfile); |
918 | cpstat->cs_ncps = le64_to_cpu(header->ch_ncheckpoints); | 918 | cpstat->cs_ncps = le64_to_cpu(header->ch_ncheckpoints); |
919 | cpstat->cs_nsss = le64_to_cpu(header->ch_nsnapshots); | 919 | cpstat->cs_nsss = le64_to_cpu(header->ch_nsnapshots); |
920 | kunmap_atomic(kaddr, KM_USER0); | 920 | kunmap_atomic(kaddr); |
921 | brelse(bh); | 921 | brelse(bh); |
922 | 922 | ||
923 | out_sem: | 923 | out_sem: |
diff --git a/fs/nilfs2/dat.c b/fs/nilfs2/dat.c index fcc2f869af16..b5c13f3576b9 100644 --- a/fs/nilfs2/dat.c +++ b/fs/nilfs2/dat.c | |||
@@ -85,13 +85,13 @@ void nilfs_dat_commit_alloc(struct inode *dat, struct nilfs_palloc_req *req) | |||
85 | struct nilfs_dat_entry *entry; | 85 | struct nilfs_dat_entry *entry; |
86 | void *kaddr; | 86 | void *kaddr; |
87 | 87 | ||
88 | kaddr = kmap_atomic(req->pr_entry_bh->b_page, KM_USER0); | 88 | kaddr = kmap_atomic(req->pr_entry_bh->b_page); |
89 | entry = nilfs_palloc_block_get_entry(dat, req->pr_entry_nr, | 89 | entry = nilfs_palloc_block_get_entry(dat, req->pr_entry_nr, |
90 | req->pr_entry_bh, kaddr); | 90 | req->pr_entry_bh, kaddr); |
91 | entry->de_start = cpu_to_le64(NILFS_CNO_MIN); | 91 | entry->de_start = cpu_to_le64(NILFS_CNO_MIN); |
92 | entry->de_end = cpu_to_le64(NILFS_CNO_MAX); | 92 | entry->de_end = cpu_to_le64(NILFS_CNO_MAX); |
93 | entry->de_blocknr = cpu_to_le64(0); | 93 | entry->de_blocknr = cpu_to_le64(0); |
94 | kunmap_atomic(kaddr, KM_USER0); | 94 | kunmap_atomic(kaddr); |
95 | 95 | ||
96 | nilfs_palloc_commit_alloc_entry(dat, req); | 96 | nilfs_palloc_commit_alloc_entry(dat, req); |
97 | nilfs_dat_commit_entry(dat, req); | 97 | nilfs_dat_commit_entry(dat, req); |
@@ -109,13 +109,13 @@ static void nilfs_dat_commit_free(struct inode *dat, | |||
109 | struct nilfs_dat_entry *entry; | 109 | struct nilfs_dat_entry *entry; |
110 | void *kaddr; | 110 | void *kaddr; |
111 | 111 | ||
112 | kaddr = kmap_atomic(req->pr_entry_bh->b_page, KM_USER0); | 112 | kaddr = kmap_atomic(req->pr_entry_bh->b_page); |
113 | entry = nilfs_palloc_block_get_entry(dat, req->pr_entry_nr, | 113 | entry = nilfs_palloc_block_get_entry(dat, req->pr_entry_nr, |
114 | req->pr_entry_bh, kaddr); | 114 | req->pr_entry_bh, kaddr); |
115 | entry->de_start = cpu_to_le64(NILFS_CNO_MIN); | 115 | entry->de_start = cpu_to_le64(NILFS_CNO_MIN); |
116 | entry->de_end = cpu_to_le64(NILFS_CNO_MIN); | 116 | entry->de_end = cpu_to_le64(NILFS_CNO_MIN); |
117 | entry->de_blocknr = cpu_to_le64(0); | 117 | entry->de_blocknr = cpu_to_le64(0); |
118 | kunmap_atomic(kaddr, KM_USER0); | 118 | kunmap_atomic(kaddr); |
119 | 119 | ||
120 | nilfs_dat_commit_entry(dat, req); | 120 | nilfs_dat_commit_entry(dat, req); |
121 | nilfs_palloc_commit_free_entry(dat, req); | 121 | nilfs_palloc_commit_free_entry(dat, req); |
@@ -136,12 +136,12 @@ void nilfs_dat_commit_start(struct inode *dat, struct nilfs_palloc_req *req, | |||
136 | struct nilfs_dat_entry *entry; | 136 | struct nilfs_dat_entry *entry; |
137 | void *kaddr; | 137 | void *kaddr; |
138 | 138 | ||
139 | kaddr = kmap_atomic(req->pr_entry_bh->b_page, KM_USER0); | 139 | kaddr = kmap_atomic(req->pr_entry_bh->b_page); |
140 | entry = nilfs_palloc_block_get_entry(dat, req->pr_entry_nr, | 140 | entry = nilfs_palloc_block_get_entry(dat, req->pr_entry_nr, |
141 | req->pr_entry_bh, kaddr); | 141 | req->pr_entry_bh, kaddr); |
142 | entry->de_start = cpu_to_le64(nilfs_mdt_cno(dat)); | 142 | entry->de_start = cpu_to_le64(nilfs_mdt_cno(dat)); |
143 | entry->de_blocknr = cpu_to_le64(blocknr); | 143 | entry->de_blocknr = cpu_to_le64(blocknr); |
144 | kunmap_atomic(kaddr, KM_USER0); | 144 | kunmap_atomic(kaddr); |
145 | 145 | ||
146 | nilfs_dat_commit_entry(dat, req); | 146 | nilfs_dat_commit_entry(dat, req); |
147 | } | 147 | } |
@@ -160,12 +160,12 @@ int nilfs_dat_prepare_end(struct inode *dat, struct nilfs_palloc_req *req) | |||
160 | return ret; | 160 | return ret; |
161 | } | 161 | } |
162 | 162 | ||
163 | kaddr = kmap_atomic(req->pr_entry_bh->b_page, KM_USER0); | 163 | kaddr = kmap_atomic(req->pr_entry_bh->b_page); |
164 | entry = nilfs_palloc_block_get_entry(dat, req->pr_entry_nr, | 164 | entry = nilfs_palloc_block_get_entry(dat, req->pr_entry_nr, |
165 | req->pr_entry_bh, kaddr); | 165 | req->pr_entry_bh, kaddr); |
166 | start = le64_to_cpu(entry->de_start); | 166 | start = le64_to_cpu(entry->de_start); |
167 | blocknr = le64_to_cpu(entry->de_blocknr); | 167 | blocknr = le64_to_cpu(entry->de_blocknr); |
168 | kunmap_atomic(kaddr, KM_USER0); | 168 | kunmap_atomic(kaddr); |
169 | 169 | ||
170 | if (blocknr == 0) { | 170 | if (blocknr == 0) { |
171 | ret = nilfs_palloc_prepare_free_entry(dat, req); | 171 | ret = nilfs_palloc_prepare_free_entry(dat, req); |
@@ -186,7 +186,7 @@ void nilfs_dat_commit_end(struct inode *dat, struct nilfs_palloc_req *req, | |||
186 | sector_t blocknr; | 186 | sector_t blocknr; |
187 | void *kaddr; | 187 | void *kaddr; |
188 | 188 | ||
189 | kaddr = kmap_atomic(req->pr_entry_bh->b_page, KM_USER0); | 189 | kaddr = kmap_atomic(req->pr_entry_bh->b_page); |
190 | entry = nilfs_palloc_block_get_entry(dat, req->pr_entry_nr, | 190 | entry = nilfs_palloc_block_get_entry(dat, req->pr_entry_nr, |
191 | req->pr_entry_bh, kaddr); | 191 | req->pr_entry_bh, kaddr); |
192 | end = start = le64_to_cpu(entry->de_start); | 192 | end = start = le64_to_cpu(entry->de_start); |
@@ -196,7 +196,7 @@ void nilfs_dat_commit_end(struct inode *dat, struct nilfs_palloc_req *req, | |||
196 | } | 196 | } |
197 | entry->de_end = cpu_to_le64(end); | 197 | entry->de_end = cpu_to_le64(end); |
198 | blocknr = le64_to_cpu(entry->de_blocknr); | 198 | blocknr = le64_to_cpu(entry->de_blocknr); |
199 | kunmap_atomic(kaddr, KM_USER0); | 199 | kunmap_atomic(kaddr); |
200 | 200 | ||
201 | if (blocknr == 0) | 201 | if (blocknr == 0) |
202 | nilfs_dat_commit_free(dat, req); | 202 | nilfs_dat_commit_free(dat, req); |
@@ -211,12 +211,12 @@ void nilfs_dat_abort_end(struct inode *dat, struct nilfs_palloc_req *req) | |||
211 | sector_t blocknr; | 211 | sector_t blocknr; |
212 | void *kaddr; | 212 | void *kaddr; |
213 | 213 | ||
214 | kaddr = kmap_atomic(req->pr_entry_bh->b_page, KM_USER0); | 214 | kaddr = kmap_atomic(req->pr_entry_bh->b_page); |
215 | entry = nilfs_palloc_block_get_entry(dat, req->pr_entry_nr, | 215 | entry = nilfs_palloc_block_get_entry(dat, req->pr_entry_nr, |
216 | req->pr_entry_bh, kaddr); | 216 | req->pr_entry_bh, kaddr); |
217 | start = le64_to_cpu(entry->de_start); | 217 | start = le64_to_cpu(entry->de_start); |
218 | blocknr = le64_to_cpu(entry->de_blocknr); | 218 | blocknr = le64_to_cpu(entry->de_blocknr); |
219 | kunmap_atomic(kaddr, KM_USER0); | 219 | kunmap_atomic(kaddr); |
220 | 220 | ||
221 | if (start == nilfs_mdt_cno(dat) && blocknr == 0) | 221 | if (start == nilfs_mdt_cno(dat) && blocknr == 0) |
222 | nilfs_palloc_abort_free_entry(dat, req); | 222 | nilfs_palloc_abort_free_entry(dat, req); |
@@ -346,20 +346,20 @@ int nilfs_dat_move(struct inode *dat, __u64 vblocknr, sector_t blocknr) | |||
346 | } | 346 | } |
347 | } | 347 | } |
348 | 348 | ||
349 | kaddr = kmap_atomic(entry_bh->b_page, KM_USER0); | 349 | kaddr = kmap_atomic(entry_bh->b_page); |
350 | entry = nilfs_palloc_block_get_entry(dat, vblocknr, entry_bh, kaddr); | 350 | entry = nilfs_palloc_block_get_entry(dat, vblocknr, entry_bh, kaddr); |
351 | if (unlikely(entry->de_blocknr == cpu_to_le64(0))) { | 351 | if (unlikely(entry->de_blocknr == cpu_to_le64(0))) { |
352 | printk(KERN_CRIT "%s: vbn = %llu, [%llu, %llu)\n", __func__, | 352 | printk(KERN_CRIT "%s: vbn = %llu, [%llu, %llu)\n", __func__, |
353 | (unsigned long long)vblocknr, | 353 | (unsigned long long)vblocknr, |
354 | (unsigned long long)le64_to_cpu(entry->de_start), | 354 | (unsigned long long)le64_to_cpu(entry->de_start), |
355 | (unsigned long long)le64_to_cpu(entry->de_end)); | 355 | (unsigned long long)le64_to_cpu(entry->de_end)); |
356 | kunmap_atomic(kaddr, KM_USER0); | 356 | kunmap_atomic(kaddr); |
357 | brelse(entry_bh); | 357 | brelse(entry_bh); |
358 | return -EINVAL; | 358 | return -EINVAL; |
359 | } | 359 | } |
360 | WARN_ON(blocknr == 0); | 360 | WARN_ON(blocknr == 0); |
361 | entry->de_blocknr = cpu_to_le64(blocknr); | 361 | entry->de_blocknr = cpu_to_le64(blocknr); |
362 | kunmap_atomic(kaddr, KM_USER0); | 362 | kunmap_atomic(kaddr); |
363 | 363 | ||
364 | mark_buffer_dirty(entry_bh); | 364 | mark_buffer_dirty(entry_bh); |
365 | nilfs_mdt_mark_dirty(dat); | 365 | nilfs_mdt_mark_dirty(dat); |
@@ -409,7 +409,7 @@ int nilfs_dat_translate(struct inode *dat, __u64 vblocknr, sector_t *blocknrp) | |||
409 | } | 409 | } |
410 | } | 410 | } |
411 | 411 | ||
412 | kaddr = kmap_atomic(entry_bh->b_page, KM_USER0); | 412 | kaddr = kmap_atomic(entry_bh->b_page); |
413 | entry = nilfs_palloc_block_get_entry(dat, vblocknr, entry_bh, kaddr); | 413 | entry = nilfs_palloc_block_get_entry(dat, vblocknr, entry_bh, kaddr); |
414 | blocknr = le64_to_cpu(entry->de_blocknr); | 414 | blocknr = le64_to_cpu(entry->de_blocknr); |
415 | if (blocknr == 0) { | 415 | if (blocknr == 0) { |
@@ -419,7 +419,7 @@ int nilfs_dat_translate(struct inode *dat, __u64 vblocknr, sector_t *blocknrp) | |||
419 | *blocknrp = blocknr; | 419 | *blocknrp = blocknr; |
420 | 420 | ||
421 | out: | 421 | out: |
422 | kunmap_atomic(kaddr, KM_USER0); | 422 | kunmap_atomic(kaddr); |
423 | brelse(entry_bh); | 423 | brelse(entry_bh); |
424 | return ret; | 424 | return ret; |
425 | } | 425 | } |
@@ -440,7 +440,7 @@ ssize_t nilfs_dat_get_vinfo(struct inode *dat, void *buf, unsigned visz, | |||
440 | 0, &entry_bh); | 440 | 0, &entry_bh); |
441 | if (ret < 0) | 441 | if (ret < 0) |
442 | return ret; | 442 | return ret; |
443 | kaddr = kmap_atomic(entry_bh->b_page, KM_USER0); | 443 | kaddr = kmap_atomic(entry_bh->b_page); |
444 | /* last virtual block number in this block */ | 444 | /* last virtual block number in this block */ |
445 | first = vinfo->vi_vblocknr; | 445 | first = vinfo->vi_vblocknr; |
446 | do_div(first, entries_per_block); | 446 | do_div(first, entries_per_block); |
@@ -456,7 +456,7 @@ ssize_t nilfs_dat_get_vinfo(struct inode *dat, void *buf, unsigned visz, | |||
456 | vinfo->vi_end = le64_to_cpu(entry->de_end); | 456 | vinfo->vi_end = le64_to_cpu(entry->de_end); |
457 | vinfo->vi_blocknr = le64_to_cpu(entry->de_blocknr); | 457 | vinfo->vi_blocknr = le64_to_cpu(entry->de_blocknr); |
458 | } | 458 | } |
459 | kunmap_atomic(kaddr, KM_USER0); | 459 | kunmap_atomic(kaddr); |
460 | brelse(entry_bh); | 460 | brelse(entry_bh); |
461 | } | 461 | } |
462 | 462 | ||
diff --git a/fs/nilfs2/dir.c b/fs/nilfs2/dir.c index ca35b3a46d17..df1a7fb238d1 100644 --- a/fs/nilfs2/dir.c +++ b/fs/nilfs2/dir.c | |||
@@ -602,7 +602,7 @@ int nilfs_make_empty(struct inode *inode, struct inode *parent) | |||
602 | unlock_page(page); | 602 | unlock_page(page); |
603 | goto fail; | 603 | goto fail; |
604 | } | 604 | } |
605 | kaddr = kmap_atomic(page, KM_USER0); | 605 | kaddr = kmap_atomic(page); |
606 | memset(kaddr, 0, chunk_size); | 606 | memset(kaddr, 0, chunk_size); |
607 | de = (struct nilfs_dir_entry *)kaddr; | 607 | de = (struct nilfs_dir_entry *)kaddr; |
608 | de->name_len = 1; | 608 | de->name_len = 1; |
@@ -617,7 +617,7 @@ int nilfs_make_empty(struct inode *inode, struct inode *parent) | |||
617 | de->inode = cpu_to_le64(parent->i_ino); | 617 | de->inode = cpu_to_le64(parent->i_ino); |
618 | memcpy(de->name, "..\0", 4); | 618 | memcpy(de->name, "..\0", 4); |
619 | nilfs_set_de_type(de, inode); | 619 | nilfs_set_de_type(de, inode); |
620 | kunmap_atomic(kaddr, KM_USER0); | 620 | kunmap_atomic(kaddr); |
621 | nilfs_commit_chunk(page, mapping, 0, chunk_size); | 621 | nilfs_commit_chunk(page, mapping, 0, chunk_size); |
622 | fail: | 622 | fail: |
623 | page_cache_release(page); | 623 | page_cache_release(page); |
diff --git a/fs/nilfs2/ifile.c b/fs/nilfs2/ifile.c index 684d76300a80..5a48df79d674 100644 --- a/fs/nilfs2/ifile.c +++ b/fs/nilfs2/ifile.c | |||
@@ -122,11 +122,11 @@ int nilfs_ifile_delete_inode(struct inode *ifile, ino_t ino) | |||
122 | return ret; | 122 | return ret; |
123 | } | 123 | } |
124 | 124 | ||
125 | kaddr = kmap_atomic(req.pr_entry_bh->b_page, KM_USER0); | 125 | kaddr = kmap_atomic(req.pr_entry_bh->b_page); |
126 | raw_inode = nilfs_palloc_block_get_entry(ifile, req.pr_entry_nr, | 126 | raw_inode = nilfs_palloc_block_get_entry(ifile, req.pr_entry_nr, |
127 | req.pr_entry_bh, kaddr); | 127 | req.pr_entry_bh, kaddr); |
128 | raw_inode->i_flags = 0; | 128 | raw_inode->i_flags = 0; |
129 | kunmap_atomic(kaddr, KM_USER0); | 129 | kunmap_atomic(kaddr); |
130 | 130 | ||
131 | mark_buffer_dirty(req.pr_entry_bh); | 131 | mark_buffer_dirty(req.pr_entry_bh); |
132 | brelse(req.pr_entry_bh); | 132 | brelse(req.pr_entry_bh); |
diff --git a/fs/nilfs2/mdt.c b/fs/nilfs2/mdt.c index 800e8d78a83b..f9897d09c693 100644 --- a/fs/nilfs2/mdt.c +++ b/fs/nilfs2/mdt.c | |||
@@ -58,12 +58,12 @@ nilfs_mdt_insert_new_block(struct inode *inode, unsigned long block, | |||
58 | 58 | ||
59 | set_buffer_mapped(bh); | 59 | set_buffer_mapped(bh); |
60 | 60 | ||
61 | kaddr = kmap_atomic(bh->b_page, KM_USER0); | 61 | kaddr = kmap_atomic(bh->b_page); |
62 | memset(kaddr + bh_offset(bh), 0, 1 << inode->i_blkbits); | 62 | memset(kaddr + bh_offset(bh), 0, 1 << inode->i_blkbits); |
63 | if (init_block) | 63 | if (init_block) |
64 | init_block(inode, bh, kaddr); | 64 | init_block(inode, bh, kaddr); |
65 | flush_dcache_page(bh->b_page); | 65 | flush_dcache_page(bh->b_page); |
66 | kunmap_atomic(kaddr, KM_USER0); | 66 | kunmap_atomic(kaddr); |
67 | 67 | ||
68 | set_buffer_uptodate(bh); | 68 | set_buffer_uptodate(bh); |
69 | mark_buffer_dirty(bh); | 69 | mark_buffer_dirty(bh); |
diff --git a/fs/nilfs2/page.c b/fs/nilfs2/page.c index 65221a04c6f0..3e7b2a0dc0c8 100644 --- a/fs/nilfs2/page.c +++ b/fs/nilfs2/page.c | |||
@@ -119,11 +119,11 @@ void nilfs_copy_buffer(struct buffer_head *dbh, struct buffer_head *sbh) | |||
119 | struct page *spage = sbh->b_page, *dpage = dbh->b_page; | 119 | struct page *spage = sbh->b_page, *dpage = dbh->b_page; |
120 | struct buffer_head *bh; | 120 | struct buffer_head *bh; |
121 | 121 | ||
122 | kaddr0 = kmap_atomic(spage, KM_USER0); | 122 | kaddr0 = kmap_atomic(spage); |
123 | kaddr1 = kmap_atomic(dpage, KM_USER1); | 123 | kaddr1 = kmap_atomic(dpage); |
124 | memcpy(kaddr1 + bh_offset(dbh), kaddr0 + bh_offset(sbh), sbh->b_size); | 124 | memcpy(kaddr1 + bh_offset(dbh), kaddr0 + bh_offset(sbh), sbh->b_size); |
125 | kunmap_atomic(kaddr1, KM_USER1); | 125 | kunmap_atomic(kaddr1); |
126 | kunmap_atomic(kaddr0, KM_USER0); | 126 | kunmap_atomic(kaddr0); |
127 | 127 | ||
128 | dbh->b_state = sbh->b_state & NILFS_BUFFER_INHERENT_BITS; | 128 | dbh->b_state = sbh->b_state & NILFS_BUFFER_INHERENT_BITS; |
129 | dbh->b_blocknr = sbh->b_blocknr; | 129 | dbh->b_blocknr = sbh->b_blocknr; |
diff --git a/fs/nilfs2/recovery.c b/fs/nilfs2/recovery.c index a604ac0331b2..f1626f5011c5 100644 --- a/fs/nilfs2/recovery.c +++ b/fs/nilfs2/recovery.c | |||
@@ -493,9 +493,9 @@ static int nilfs_recovery_copy_block(struct the_nilfs *nilfs, | |||
493 | if (unlikely(!bh_org)) | 493 | if (unlikely(!bh_org)) |
494 | return -EIO; | 494 | return -EIO; |
495 | 495 | ||
496 | kaddr = kmap_atomic(page, KM_USER0); | 496 | kaddr = kmap_atomic(page); |
497 | memcpy(kaddr + bh_offset(bh_org), bh_org->b_data, bh_org->b_size); | 497 | memcpy(kaddr + bh_offset(bh_org), bh_org->b_data, bh_org->b_size); |
498 | kunmap_atomic(kaddr, KM_USER0); | 498 | kunmap_atomic(kaddr); |
499 | brelse(bh_org); | 499 | brelse(bh_org); |
500 | return 0; | 500 | return 0; |
501 | } | 501 | } |
diff --git a/fs/nilfs2/segbuf.c b/fs/nilfs2/segbuf.c index 850a7c0228fb..dc9a913784ab 100644 --- a/fs/nilfs2/segbuf.c +++ b/fs/nilfs2/segbuf.c | |||
@@ -227,9 +227,9 @@ static void nilfs_segbuf_fill_in_data_crc(struct nilfs_segment_buffer *segbuf, | |||
227 | crc = crc32_le(crc, bh->b_data, bh->b_size); | 227 | crc = crc32_le(crc, bh->b_data, bh->b_size); |
228 | } | 228 | } |
229 | list_for_each_entry(bh, &segbuf->sb_payload_buffers, b_assoc_buffers) { | 229 | list_for_each_entry(bh, &segbuf->sb_payload_buffers, b_assoc_buffers) { |
230 | kaddr = kmap_atomic(bh->b_page, KM_USER0); | 230 | kaddr = kmap_atomic(bh->b_page); |
231 | crc = crc32_le(crc, kaddr + bh_offset(bh), bh->b_size); | 231 | crc = crc32_le(crc, kaddr + bh_offset(bh), bh->b_size); |
232 | kunmap_atomic(kaddr, KM_USER0); | 232 | kunmap_atomic(kaddr); |
233 | } | 233 | } |
234 | raw_sum->ss_datasum = cpu_to_le32(crc); | 234 | raw_sum->ss_datasum = cpu_to_le32(crc); |
235 | } | 235 | } |
diff --git a/fs/nilfs2/sufile.c b/fs/nilfs2/sufile.c index 0a0aba617d8a..c5b7653a4391 100644 --- a/fs/nilfs2/sufile.c +++ b/fs/nilfs2/sufile.c | |||
@@ -111,11 +111,11 @@ static void nilfs_sufile_mod_counter(struct buffer_head *header_bh, | |||
111 | struct nilfs_sufile_header *header; | 111 | struct nilfs_sufile_header *header; |
112 | void *kaddr; | 112 | void *kaddr; |
113 | 113 | ||
114 | kaddr = kmap_atomic(header_bh->b_page, KM_USER0); | 114 | kaddr = kmap_atomic(header_bh->b_page); |
115 | header = kaddr + bh_offset(header_bh); | 115 | header = kaddr + bh_offset(header_bh); |
116 | le64_add_cpu(&header->sh_ncleansegs, ncleanadd); | 116 | le64_add_cpu(&header->sh_ncleansegs, ncleanadd); |
117 | le64_add_cpu(&header->sh_ndirtysegs, ndirtyadd); | 117 | le64_add_cpu(&header->sh_ndirtysegs, ndirtyadd); |
118 | kunmap_atomic(kaddr, KM_USER0); | 118 | kunmap_atomic(kaddr); |
119 | 119 | ||
120 | mark_buffer_dirty(header_bh); | 120 | mark_buffer_dirty(header_bh); |
121 | } | 121 | } |
@@ -319,11 +319,11 @@ int nilfs_sufile_alloc(struct inode *sufile, __u64 *segnump) | |||
319 | ret = nilfs_sufile_get_header_block(sufile, &header_bh); | 319 | ret = nilfs_sufile_get_header_block(sufile, &header_bh); |
320 | if (ret < 0) | 320 | if (ret < 0) |
321 | goto out_sem; | 321 | goto out_sem; |
322 | kaddr = kmap_atomic(header_bh->b_page, KM_USER0); | 322 | kaddr = kmap_atomic(header_bh->b_page); |
323 | header = kaddr + bh_offset(header_bh); | 323 | header = kaddr + bh_offset(header_bh); |
324 | ncleansegs = le64_to_cpu(header->sh_ncleansegs); | 324 | ncleansegs = le64_to_cpu(header->sh_ncleansegs); |
325 | last_alloc = le64_to_cpu(header->sh_last_alloc); | 325 | last_alloc = le64_to_cpu(header->sh_last_alloc); |
326 | kunmap_atomic(kaddr, KM_USER0); | 326 | kunmap_atomic(kaddr); |
327 | 327 | ||
328 | nsegments = nilfs_sufile_get_nsegments(sufile); | 328 | nsegments = nilfs_sufile_get_nsegments(sufile); |
329 | maxsegnum = sui->allocmax; | 329 | maxsegnum = sui->allocmax; |
@@ -356,7 +356,7 @@ int nilfs_sufile_alloc(struct inode *sufile, __u64 *segnump) | |||
356 | &su_bh); | 356 | &su_bh); |
357 | if (ret < 0) | 357 | if (ret < 0) |
358 | goto out_header; | 358 | goto out_header; |
359 | kaddr = kmap_atomic(su_bh->b_page, KM_USER0); | 359 | kaddr = kmap_atomic(su_bh->b_page); |
360 | su = nilfs_sufile_block_get_segment_usage( | 360 | su = nilfs_sufile_block_get_segment_usage( |
361 | sufile, segnum, su_bh, kaddr); | 361 | sufile, segnum, su_bh, kaddr); |
362 | 362 | ||
@@ -367,14 +367,14 @@ int nilfs_sufile_alloc(struct inode *sufile, __u64 *segnump) | |||
367 | continue; | 367 | continue; |
368 | /* found a clean segment */ | 368 | /* found a clean segment */ |
369 | nilfs_segment_usage_set_dirty(su); | 369 | nilfs_segment_usage_set_dirty(su); |
370 | kunmap_atomic(kaddr, KM_USER0); | 370 | kunmap_atomic(kaddr); |
371 | 371 | ||
372 | kaddr = kmap_atomic(header_bh->b_page, KM_USER0); | 372 | kaddr = kmap_atomic(header_bh->b_page); |
373 | header = kaddr + bh_offset(header_bh); | 373 | header = kaddr + bh_offset(header_bh); |
374 | le64_add_cpu(&header->sh_ncleansegs, -1); | 374 | le64_add_cpu(&header->sh_ncleansegs, -1); |
375 | le64_add_cpu(&header->sh_ndirtysegs, 1); | 375 | le64_add_cpu(&header->sh_ndirtysegs, 1); |
376 | header->sh_last_alloc = cpu_to_le64(segnum); | 376 | header->sh_last_alloc = cpu_to_le64(segnum); |
377 | kunmap_atomic(kaddr, KM_USER0); | 377 | kunmap_atomic(kaddr); |
378 | 378 | ||
379 | sui->ncleansegs--; | 379 | sui->ncleansegs--; |
380 | mark_buffer_dirty(header_bh); | 380 | mark_buffer_dirty(header_bh); |
@@ -385,7 +385,7 @@ int nilfs_sufile_alloc(struct inode *sufile, __u64 *segnump) | |||
385 | goto out_header; | 385 | goto out_header; |
386 | } | 386 | } |
387 | 387 | ||
388 | kunmap_atomic(kaddr, KM_USER0); | 388 | kunmap_atomic(kaddr); |
389 | brelse(su_bh); | 389 | brelse(su_bh); |
390 | } | 390 | } |
391 | 391 | ||
@@ -407,16 +407,16 @@ void nilfs_sufile_do_cancel_free(struct inode *sufile, __u64 segnum, | |||
407 | struct nilfs_segment_usage *su; | 407 | struct nilfs_segment_usage *su; |
408 | void *kaddr; | 408 | void *kaddr; |
409 | 409 | ||
410 | kaddr = kmap_atomic(su_bh->b_page, KM_USER0); | 410 | kaddr = kmap_atomic(su_bh->b_page); |
411 | su = nilfs_sufile_block_get_segment_usage(sufile, segnum, su_bh, kaddr); | 411 | su = nilfs_sufile_block_get_segment_usage(sufile, segnum, su_bh, kaddr); |
412 | if (unlikely(!nilfs_segment_usage_clean(su))) { | 412 | if (unlikely(!nilfs_segment_usage_clean(su))) { |
413 | printk(KERN_WARNING "%s: segment %llu must be clean\n", | 413 | printk(KERN_WARNING "%s: segment %llu must be clean\n", |
414 | __func__, (unsigned long long)segnum); | 414 | __func__, (unsigned long long)segnum); |
415 | kunmap_atomic(kaddr, KM_USER0); | 415 | kunmap_atomic(kaddr); |
416 | return; | 416 | return; |
417 | } | 417 | } |
418 | nilfs_segment_usage_set_dirty(su); | 418 | nilfs_segment_usage_set_dirty(su); |
419 | kunmap_atomic(kaddr, KM_USER0); | 419 | kunmap_atomic(kaddr); |
420 | 420 | ||
421 | nilfs_sufile_mod_counter(header_bh, -1, 1); | 421 | nilfs_sufile_mod_counter(header_bh, -1, 1); |
422 | NILFS_SUI(sufile)->ncleansegs--; | 422 | NILFS_SUI(sufile)->ncleansegs--; |
@@ -433,11 +433,11 @@ void nilfs_sufile_do_scrap(struct inode *sufile, __u64 segnum, | |||
433 | void *kaddr; | 433 | void *kaddr; |
434 | int clean, dirty; | 434 | int clean, dirty; |
435 | 435 | ||
436 | kaddr = kmap_atomic(su_bh->b_page, KM_USER0); | 436 | kaddr = kmap_atomic(su_bh->b_page); |
437 | su = nilfs_sufile_block_get_segment_usage(sufile, segnum, su_bh, kaddr); | 437 | su = nilfs_sufile_block_get_segment_usage(sufile, segnum, su_bh, kaddr); |
438 | if (su->su_flags == cpu_to_le32(1UL << NILFS_SEGMENT_USAGE_DIRTY) && | 438 | if (su->su_flags == cpu_to_le32(1UL << NILFS_SEGMENT_USAGE_DIRTY) && |
439 | su->su_nblocks == cpu_to_le32(0)) { | 439 | su->su_nblocks == cpu_to_le32(0)) { |
440 | kunmap_atomic(kaddr, KM_USER0); | 440 | kunmap_atomic(kaddr); |
441 | return; | 441 | return; |
442 | } | 442 | } |
443 | clean = nilfs_segment_usage_clean(su); | 443 | clean = nilfs_segment_usage_clean(su); |
@@ -447,7 +447,7 @@ void nilfs_sufile_do_scrap(struct inode *sufile, __u64 segnum, | |||
447 | su->su_lastmod = cpu_to_le64(0); | 447 | su->su_lastmod = cpu_to_le64(0); |
448 | su->su_nblocks = cpu_to_le32(0); | 448 | su->su_nblocks = cpu_to_le32(0); |
449 | su->su_flags = cpu_to_le32(1UL << NILFS_SEGMENT_USAGE_DIRTY); | 449 | su->su_flags = cpu_to_le32(1UL << NILFS_SEGMENT_USAGE_DIRTY); |
450 | kunmap_atomic(kaddr, KM_USER0); | 450 | kunmap_atomic(kaddr); |
451 | 451 | ||
452 | nilfs_sufile_mod_counter(header_bh, clean ? (u64)-1 : 0, dirty ? 0 : 1); | 452 | nilfs_sufile_mod_counter(header_bh, clean ? (u64)-1 : 0, dirty ? 0 : 1); |
453 | NILFS_SUI(sufile)->ncleansegs -= clean; | 453 | NILFS_SUI(sufile)->ncleansegs -= clean; |
@@ -464,12 +464,12 @@ void nilfs_sufile_do_free(struct inode *sufile, __u64 segnum, | |||
464 | void *kaddr; | 464 | void *kaddr; |
465 | int sudirty; | 465 | int sudirty; |
466 | 466 | ||
467 | kaddr = kmap_atomic(su_bh->b_page, KM_USER0); | 467 | kaddr = kmap_atomic(su_bh->b_page); |
468 | su = nilfs_sufile_block_get_segment_usage(sufile, segnum, su_bh, kaddr); | 468 | su = nilfs_sufile_block_get_segment_usage(sufile, segnum, su_bh, kaddr); |
469 | if (nilfs_segment_usage_clean(su)) { | 469 | if (nilfs_segment_usage_clean(su)) { |
470 | printk(KERN_WARNING "%s: segment %llu is already clean\n", | 470 | printk(KERN_WARNING "%s: segment %llu is already clean\n", |
471 | __func__, (unsigned long long)segnum); | 471 | __func__, (unsigned long long)segnum); |
472 | kunmap_atomic(kaddr, KM_USER0); | 472 | kunmap_atomic(kaddr); |
473 | return; | 473 | return; |
474 | } | 474 | } |
475 | WARN_ON(nilfs_segment_usage_error(su)); | 475 | WARN_ON(nilfs_segment_usage_error(su)); |
@@ -477,7 +477,7 @@ void nilfs_sufile_do_free(struct inode *sufile, __u64 segnum, | |||
477 | 477 | ||
478 | sudirty = nilfs_segment_usage_dirty(su); | 478 | sudirty = nilfs_segment_usage_dirty(su); |
479 | nilfs_segment_usage_set_clean(su); | 479 | nilfs_segment_usage_set_clean(su); |
480 | kunmap_atomic(kaddr, KM_USER0); | 480 | kunmap_atomic(kaddr); |
481 | mark_buffer_dirty(su_bh); | 481 | mark_buffer_dirty(su_bh); |
482 | 482 | ||
483 | nilfs_sufile_mod_counter(header_bh, 1, sudirty ? (u64)-1 : 0); | 483 | nilfs_sufile_mod_counter(header_bh, 1, sudirty ? (u64)-1 : 0); |
@@ -525,13 +525,13 @@ int nilfs_sufile_set_segment_usage(struct inode *sufile, __u64 segnum, | |||
525 | if (ret < 0) | 525 | if (ret < 0) |
526 | goto out_sem; | 526 | goto out_sem; |
527 | 527 | ||
528 | kaddr = kmap_atomic(bh->b_page, KM_USER0); | 528 | kaddr = kmap_atomic(bh->b_page); |
529 | su = nilfs_sufile_block_get_segment_usage(sufile, segnum, bh, kaddr); | 529 | su = nilfs_sufile_block_get_segment_usage(sufile, segnum, bh, kaddr); |
530 | WARN_ON(nilfs_segment_usage_error(su)); | 530 | WARN_ON(nilfs_segment_usage_error(su)); |
531 | if (modtime) | 531 | if (modtime) |
532 | su->su_lastmod = cpu_to_le64(modtime); | 532 | su->su_lastmod = cpu_to_le64(modtime); |
533 | su->su_nblocks = cpu_to_le32(nblocks); | 533 | su->su_nblocks = cpu_to_le32(nblocks); |
534 | kunmap_atomic(kaddr, KM_USER0); | 534 | kunmap_atomic(kaddr); |
535 | 535 | ||
536 | mark_buffer_dirty(bh); | 536 | mark_buffer_dirty(bh); |
537 | nilfs_mdt_mark_dirty(sufile); | 537 | nilfs_mdt_mark_dirty(sufile); |
@@ -572,7 +572,7 @@ int nilfs_sufile_get_stat(struct inode *sufile, struct nilfs_sustat *sustat) | |||
572 | if (ret < 0) | 572 | if (ret < 0) |
573 | goto out_sem; | 573 | goto out_sem; |
574 | 574 | ||
575 | kaddr = kmap_atomic(header_bh->b_page, KM_USER0); | 575 | kaddr = kmap_atomic(header_bh->b_page); |
576 | header = kaddr + bh_offset(header_bh); | 576 | header = kaddr + bh_offset(header_bh); |
577 | sustat->ss_nsegs = nilfs_sufile_get_nsegments(sufile); | 577 | sustat->ss_nsegs = nilfs_sufile_get_nsegments(sufile); |
578 | sustat->ss_ncleansegs = le64_to_cpu(header->sh_ncleansegs); | 578 | sustat->ss_ncleansegs = le64_to_cpu(header->sh_ncleansegs); |
@@ -582,7 +582,7 @@ int nilfs_sufile_get_stat(struct inode *sufile, struct nilfs_sustat *sustat) | |||
582 | spin_lock(&nilfs->ns_last_segment_lock); | 582 | spin_lock(&nilfs->ns_last_segment_lock); |
583 | sustat->ss_prot_seq = nilfs->ns_prot_seq; | 583 | sustat->ss_prot_seq = nilfs->ns_prot_seq; |
584 | spin_unlock(&nilfs->ns_last_segment_lock); | 584 | spin_unlock(&nilfs->ns_last_segment_lock); |
585 | kunmap_atomic(kaddr, KM_USER0); | 585 | kunmap_atomic(kaddr); |
586 | brelse(header_bh); | 586 | brelse(header_bh); |
587 | 587 | ||
588 | out_sem: | 588 | out_sem: |
@@ -598,15 +598,15 @@ void nilfs_sufile_do_set_error(struct inode *sufile, __u64 segnum, | |||
598 | void *kaddr; | 598 | void *kaddr; |
599 | int suclean; | 599 | int suclean; |
600 | 600 | ||
601 | kaddr = kmap_atomic(su_bh->b_page, KM_USER0); | 601 | kaddr = kmap_atomic(su_bh->b_page); |
602 | su = nilfs_sufile_block_get_segment_usage(sufile, segnum, su_bh, kaddr); | 602 | su = nilfs_sufile_block_get_segment_usage(sufile, segnum, su_bh, kaddr); |
603 | if (nilfs_segment_usage_error(su)) { | 603 | if (nilfs_segment_usage_error(su)) { |
604 | kunmap_atomic(kaddr, KM_USER0); | 604 | kunmap_atomic(kaddr); |
605 | return; | 605 | return; |
606 | } | 606 | } |
607 | suclean = nilfs_segment_usage_clean(su); | 607 | suclean = nilfs_segment_usage_clean(su); |
608 | nilfs_segment_usage_set_error(su); | 608 | nilfs_segment_usage_set_error(su); |
609 | kunmap_atomic(kaddr, KM_USER0); | 609 | kunmap_atomic(kaddr); |
610 | 610 | ||
611 | if (suclean) { | 611 | if (suclean) { |
612 | nilfs_sufile_mod_counter(header_bh, -1, 0); | 612 | nilfs_sufile_mod_counter(header_bh, -1, 0); |
@@ -675,7 +675,7 @@ static int nilfs_sufile_truncate_range(struct inode *sufile, | |||
675 | /* hole */ | 675 | /* hole */ |
676 | continue; | 676 | continue; |
677 | } | 677 | } |
678 | kaddr = kmap_atomic(su_bh->b_page, KM_USER0); | 678 | kaddr = kmap_atomic(su_bh->b_page); |
679 | su = nilfs_sufile_block_get_segment_usage( | 679 | su = nilfs_sufile_block_get_segment_usage( |
680 | sufile, segnum, su_bh, kaddr); | 680 | sufile, segnum, su_bh, kaddr); |
681 | su2 = su; | 681 | su2 = su; |
@@ -684,7 +684,7 @@ static int nilfs_sufile_truncate_range(struct inode *sufile, | |||
684 | ~(1UL << NILFS_SEGMENT_USAGE_ERROR)) || | 684 | ~(1UL << NILFS_SEGMENT_USAGE_ERROR)) || |
685 | nilfs_segment_is_active(nilfs, segnum + j)) { | 685 | nilfs_segment_is_active(nilfs, segnum + j)) { |
686 | ret = -EBUSY; | 686 | ret = -EBUSY; |
687 | kunmap_atomic(kaddr, KM_USER0); | 687 | kunmap_atomic(kaddr); |
688 | brelse(su_bh); | 688 | brelse(su_bh); |
689 | goto out_header; | 689 | goto out_header; |
690 | } | 690 | } |
@@ -696,7 +696,7 @@ static int nilfs_sufile_truncate_range(struct inode *sufile, | |||
696 | nc++; | 696 | nc++; |
697 | } | 697 | } |
698 | } | 698 | } |
699 | kunmap_atomic(kaddr, KM_USER0); | 699 | kunmap_atomic(kaddr); |
700 | if (nc > 0) { | 700 | if (nc > 0) { |
701 | mark_buffer_dirty(su_bh); | 701 | mark_buffer_dirty(su_bh); |
702 | ncleaned += nc; | 702 | ncleaned += nc; |
@@ -772,10 +772,10 @@ int nilfs_sufile_resize(struct inode *sufile, __u64 newnsegs) | |||
772 | sui->ncleansegs -= nsegs - newnsegs; | 772 | sui->ncleansegs -= nsegs - newnsegs; |
773 | } | 773 | } |
774 | 774 | ||
775 | kaddr = kmap_atomic(header_bh->b_page, KM_USER0); | 775 | kaddr = kmap_atomic(header_bh->b_page); |
776 | header = kaddr + bh_offset(header_bh); | 776 | header = kaddr + bh_offset(header_bh); |
777 | header->sh_ncleansegs = cpu_to_le64(sui->ncleansegs); | 777 | header->sh_ncleansegs = cpu_to_le64(sui->ncleansegs); |
778 | kunmap_atomic(kaddr, KM_USER0); | 778 | kunmap_atomic(kaddr); |
779 | 779 | ||
780 | mark_buffer_dirty(header_bh); | 780 | mark_buffer_dirty(header_bh); |
781 | nilfs_mdt_mark_dirty(sufile); | 781 | nilfs_mdt_mark_dirty(sufile); |
@@ -840,7 +840,7 @@ ssize_t nilfs_sufile_get_suinfo(struct inode *sufile, __u64 segnum, void *buf, | |||
840 | continue; | 840 | continue; |
841 | } | 841 | } |
842 | 842 | ||
843 | kaddr = kmap_atomic(su_bh->b_page, KM_USER0); | 843 | kaddr = kmap_atomic(su_bh->b_page); |
844 | su = nilfs_sufile_block_get_segment_usage( | 844 | su = nilfs_sufile_block_get_segment_usage( |
845 | sufile, segnum, su_bh, kaddr); | 845 | sufile, segnum, su_bh, kaddr); |
846 | for (j = 0; j < n; | 846 | for (j = 0; j < n; |
@@ -853,7 +853,7 @@ ssize_t nilfs_sufile_get_suinfo(struct inode *sufile, __u64 segnum, void *buf, | |||
853 | si->sui_flags |= | 853 | si->sui_flags |= |
854 | (1UL << NILFS_SEGMENT_USAGE_ACTIVE); | 854 | (1UL << NILFS_SEGMENT_USAGE_ACTIVE); |
855 | } | 855 | } |
856 | kunmap_atomic(kaddr, KM_USER0); | 856 | kunmap_atomic(kaddr); |
857 | brelse(su_bh); | 857 | brelse(su_bh); |
858 | } | 858 | } |
859 | ret = nsegs; | 859 | ret = nsegs; |
@@ -902,10 +902,10 @@ int nilfs_sufile_read(struct super_block *sb, size_t susize, | |||
902 | goto failed; | 902 | goto failed; |
903 | 903 | ||
904 | sui = NILFS_SUI(sufile); | 904 | sui = NILFS_SUI(sufile); |
905 | kaddr = kmap_atomic(header_bh->b_page, KM_USER0); | 905 | kaddr = kmap_atomic(header_bh->b_page); |
906 | header = kaddr + bh_offset(header_bh); | 906 | header = kaddr + bh_offset(header_bh); |
907 | sui->ncleansegs = le64_to_cpu(header->sh_ncleansegs); | 907 | sui->ncleansegs = le64_to_cpu(header->sh_ncleansegs); |
908 | kunmap_atomic(kaddr, KM_USER0); | 908 | kunmap_atomic(kaddr); |
909 | brelse(header_bh); | 909 | brelse(header_bh); |
910 | 910 | ||
911 | sui->allocmax = nilfs_sufile_get_nsegments(sufile) - 1; | 911 | sui->allocmax = nilfs_sufile_get_nsegments(sufile) - 1; |