diff options
Diffstat (limited to 'fs/nilfs2/sufile.c')
-rw-r--r-- | fs/nilfs2/sufile.c | 68 |
1 files changed, 34 insertions, 34 deletions
diff --git a/fs/nilfs2/sufile.c b/fs/nilfs2/sufile.c index 0a0aba617d8a..c5b7653a4391 100644 --- a/fs/nilfs2/sufile.c +++ b/fs/nilfs2/sufile.c | |||
@@ -111,11 +111,11 @@ static void nilfs_sufile_mod_counter(struct buffer_head *header_bh, | |||
111 | struct nilfs_sufile_header *header; | 111 | struct nilfs_sufile_header *header; |
112 | void *kaddr; | 112 | void *kaddr; |
113 | 113 | ||
114 | kaddr = kmap_atomic(header_bh->b_page, KM_USER0); | 114 | kaddr = kmap_atomic(header_bh->b_page); |
115 | header = kaddr + bh_offset(header_bh); | 115 | header = kaddr + bh_offset(header_bh); |
116 | le64_add_cpu(&header->sh_ncleansegs, ncleanadd); | 116 | le64_add_cpu(&header->sh_ncleansegs, ncleanadd); |
117 | le64_add_cpu(&header->sh_ndirtysegs, ndirtyadd); | 117 | le64_add_cpu(&header->sh_ndirtysegs, ndirtyadd); |
118 | kunmap_atomic(kaddr, KM_USER0); | 118 | kunmap_atomic(kaddr); |
119 | 119 | ||
120 | mark_buffer_dirty(header_bh); | 120 | mark_buffer_dirty(header_bh); |
121 | } | 121 | } |
@@ -319,11 +319,11 @@ int nilfs_sufile_alloc(struct inode *sufile, __u64 *segnump) | |||
319 | ret = nilfs_sufile_get_header_block(sufile, &header_bh); | 319 | ret = nilfs_sufile_get_header_block(sufile, &header_bh); |
320 | if (ret < 0) | 320 | if (ret < 0) |
321 | goto out_sem; | 321 | goto out_sem; |
322 | kaddr = kmap_atomic(header_bh->b_page, KM_USER0); | 322 | kaddr = kmap_atomic(header_bh->b_page); |
323 | header = kaddr + bh_offset(header_bh); | 323 | header = kaddr + bh_offset(header_bh); |
324 | ncleansegs = le64_to_cpu(header->sh_ncleansegs); | 324 | ncleansegs = le64_to_cpu(header->sh_ncleansegs); |
325 | last_alloc = le64_to_cpu(header->sh_last_alloc); | 325 | last_alloc = le64_to_cpu(header->sh_last_alloc); |
326 | kunmap_atomic(kaddr, KM_USER0); | 326 | kunmap_atomic(kaddr); |
327 | 327 | ||
328 | nsegments = nilfs_sufile_get_nsegments(sufile); | 328 | nsegments = nilfs_sufile_get_nsegments(sufile); |
329 | maxsegnum = sui->allocmax; | 329 | maxsegnum = sui->allocmax; |
@@ -356,7 +356,7 @@ int nilfs_sufile_alloc(struct inode *sufile, __u64 *segnump) | |||
356 | &su_bh); | 356 | &su_bh); |
357 | if (ret < 0) | 357 | if (ret < 0) |
358 | goto out_header; | 358 | goto out_header; |
359 | kaddr = kmap_atomic(su_bh->b_page, KM_USER0); | 359 | kaddr = kmap_atomic(su_bh->b_page); |
360 | su = nilfs_sufile_block_get_segment_usage( | 360 | su = nilfs_sufile_block_get_segment_usage( |
361 | sufile, segnum, su_bh, kaddr); | 361 | sufile, segnum, su_bh, kaddr); |
362 | 362 | ||
@@ -367,14 +367,14 @@ int nilfs_sufile_alloc(struct inode *sufile, __u64 *segnump) | |||
367 | continue; | 367 | continue; |
368 | /* found a clean segment */ | 368 | /* found a clean segment */ |
369 | nilfs_segment_usage_set_dirty(su); | 369 | nilfs_segment_usage_set_dirty(su); |
370 | kunmap_atomic(kaddr, KM_USER0); | 370 | kunmap_atomic(kaddr); |
371 | 371 | ||
372 | kaddr = kmap_atomic(header_bh->b_page, KM_USER0); | 372 | kaddr = kmap_atomic(header_bh->b_page); |
373 | header = kaddr + bh_offset(header_bh); | 373 | header = kaddr + bh_offset(header_bh); |
374 | le64_add_cpu(&header->sh_ncleansegs, -1); | 374 | le64_add_cpu(&header->sh_ncleansegs, -1); |
375 | le64_add_cpu(&header->sh_ndirtysegs, 1); | 375 | le64_add_cpu(&header->sh_ndirtysegs, 1); |
376 | header->sh_last_alloc = cpu_to_le64(segnum); | 376 | header->sh_last_alloc = cpu_to_le64(segnum); |
377 | kunmap_atomic(kaddr, KM_USER0); | 377 | kunmap_atomic(kaddr); |
378 | 378 | ||
379 | sui->ncleansegs--; | 379 | sui->ncleansegs--; |
380 | mark_buffer_dirty(header_bh); | 380 | mark_buffer_dirty(header_bh); |
@@ -385,7 +385,7 @@ int nilfs_sufile_alloc(struct inode *sufile, __u64 *segnump) | |||
385 | goto out_header; | 385 | goto out_header; |
386 | } | 386 | } |
387 | 387 | ||
388 | kunmap_atomic(kaddr, KM_USER0); | 388 | kunmap_atomic(kaddr); |
389 | brelse(su_bh); | 389 | brelse(su_bh); |
390 | } | 390 | } |
391 | 391 | ||
@@ -407,16 +407,16 @@ void nilfs_sufile_do_cancel_free(struct inode *sufile, __u64 segnum, | |||
407 | struct nilfs_segment_usage *su; | 407 | struct nilfs_segment_usage *su; |
408 | void *kaddr; | 408 | void *kaddr; |
409 | 409 | ||
410 | kaddr = kmap_atomic(su_bh->b_page, KM_USER0); | 410 | kaddr = kmap_atomic(su_bh->b_page); |
411 | su = nilfs_sufile_block_get_segment_usage(sufile, segnum, su_bh, kaddr); | 411 | su = nilfs_sufile_block_get_segment_usage(sufile, segnum, su_bh, kaddr); |
412 | if (unlikely(!nilfs_segment_usage_clean(su))) { | 412 | if (unlikely(!nilfs_segment_usage_clean(su))) { |
413 | printk(KERN_WARNING "%s: segment %llu must be clean\n", | 413 | printk(KERN_WARNING "%s: segment %llu must be clean\n", |
414 | __func__, (unsigned long long)segnum); | 414 | __func__, (unsigned long long)segnum); |
415 | kunmap_atomic(kaddr, KM_USER0); | 415 | kunmap_atomic(kaddr); |
416 | return; | 416 | return; |
417 | } | 417 | } |
418 | nilfs_segment_usage_set_dirty(su); | 418 | nilfs_segment_usage_set_dirty(su); |
419 | kunmap_atomic(kaddr, KM_USER0); | 419 | kunmap_atomic(kaddr); |
420 | 420 | ||
421 | nilfs_sufile_mod_counter(header_bh, -1, 1); | 421 | nilfs_sufile_mod_counter(header_bh, -1, 1); |
422 | NILFS_SUI(sufile)->ncleansegs--; | 422 | NILFS_SUI(sufile)->ncleansegs--; |
@@ -433,11 +433,11 @@ void nilfs_sufile_do_scrap(struct inode *sufile, __u64 segnum, | |||
433 | void *kaddr; | 433 | void *kaddr; |
434 | int clean, dirty; | 434 | int clean, dirty; |
435 | 435 | ||
436 | kaddr = kmap_atomic(su_bh->b_page, KM_USER0); | 436 | kaddr = kmap_atomic(su_bh->b_page); |
437 | su = nilfs_sufile_block_get_segment_usage(sufile, segnum, su_bh, kaddr); | 437 | su = nilfs_sufile_block_get_segment_usage(sufile, segnum, su_bh, kaddr); |
438 | if (su->su_flags == cpu_to_le32(1UL << NILFS_SEGMENT_USAGE_DIRTY) && | 438 | if (su->su_flags == cpu_to_le32(1UL << NILFS_SEGMENT_USAGE_DIRTY) && |
439 | su->su_nblocks == cpu_to_le32(0)) { | 439 | su->su_nblocks == cpu_to_le32(0)) { |
440 | kunmap_atomic(kaddr, KM_USER0); | 440 | kunmap_atomic(kaddr); |
441 | return; | 441 | return; |
442 | } | 442 | } |
443 | clean = nilfs_segment_usage_clean(su); | 443 | clean = nilfs_segment_usage_clean(su); |
@@ -447,7 +447,7 @@ void nilfs_sufile_do_scrap(struct inode *sufile, __u64 segnum, | |||
447 | su->su_lastmod = cpu_to_le64(0); | 447 | su->su_lastmod = cpu_to_le64(0); |
448 | su->su_nblocks = cpu_to_le32(0); | 448 | su->su_nblocks = cpu_to_le32(0); |
449 | su->su_flags = cpu_to_le32(1UL << NILFS_SEGMENT_USAGE_DIRTY); | 449 | su->su_flags = cpu_to_le32(1UL << NILFS_SEGMENT_USAGE_DIRTY); |
450 | kunmap_atomic(kaddr, KM_USER0); | 450 | kunmap_atomic(kaddr); |
451 | 451 | ||
452 | nilfs_sufile_mod_counter(header_bh, clean ? (u64)-1 : 0, dirty ? 0 : 1); | 452 | nilfs_sufile_mod_counter(header_bh, clean ? (u64)-1 : 0, dirty ? 0 : 1); |
453 | NILFS_SUI(sufile)->ncleansegs -= clean; | 453 | NILFS_SUI(sufile)->ncleansegs -= clean; |
@@ -464,12 +464,12 @@ void nilfs_sufile_do_free(struct inode *sufile, __u64 segnum, | |||
464 | void *kaddr; | 464 | void *kaddr; |
465 | int sudirty; | 465 | int sudirty; |
466 | 466 | ||
467 | kaddr = kmap_atomic(su_bh->b_page, KM_USER0); | 467 | kaddr = kmap_atomic(su_bh->b_page); |
468 | su = nilfs_sufile_block_get_segment_usage(sufile, segnum, su_bh, kaddr); | 468 | su = nilfs_sufile_block_get_segment_usage(sufile, segnum, su_bh, kaddr); |
469 | if (nilfs_segment_usage_clean(su)) { | 469 | if (nilfs_segment_usage_clean(su)) { |
470 | printk(KERN_WARNING "%s: segment %llu is already clean\n", | 470 | printk(KERN_WARNING "%s: segment %llu is already clean\n", |
471 | __func__, (unsigned long long)segnum); | 471 | __func__, (unsigned long long)segnum); |
472 | kunmap_atomic(kaddr, KM_USER0); | 472 | kunmap_atomic(kaddr); |
473 | return; | 473 | return; |
474 | } | 474 | } |
475 | WARN_ON(nilfs_segment_usage_error(su)); | 475 | WARN_ON(nilfs_segment_usage_error(su)); |
@@ -477,7 +477,7 @@ void nilfs_sufile_do_free(struct inode *sufile, __u64 segnum, | |||
477 | 477 | ||
478 | sudirty = nilfs_segment_usage_dirty(su); | 478 | sudirty = nilfs_segment_usage_dirty(su); |
479 | nilfs_segment_usage_set_clean(su); | 479 | nilfs_segment_usage_set_clean(su); |
480 | kunmap_atomic(kaddr, KM_USER0); | 480 | kunmap_atomic(kaddr); |
481 | mark_buffer_dirty(su_bh); | 481 | mark_buffer_dirty(su_bh); |
482 | 482 | ||
483 | nilfs_sufile_mod_counter(header_bh, 1, sudirty ? (u64)-1 : 0); | 483 | nilfs_sufile_mod_counter(header_bh, 1, sudirty ? (u64)-1 : 0); |
@@ -525,13 +525,13 @@ int nilfs_sufile_set_segment_usage(struct inode *sufile, __u64 segnum, | |||
525 | if (ret < 0) | 525 | if (ret < 0) |
526 | goto out_sem; | 526 | goto out_sem; |
527 | 527 | ||
528 | kaddr = kmap_atomic(bh->b_page, KM_USER0); | 528 | kaddr = kmap_atomic(bh->b_page); |
529 | su = nilfs_sufile_block_get_segment_usage(sufile, segnum, bh, kaddr); | 529 | su = nilfs_sufile_block_get_segment_usage(sufile, segnum, bh, kaddr); |
530 | WARN_ON(nilfs_segment_usage_error(su)); | 530 | WARN_ON(nilfs_segment_usage_error(su)); |
531 | if (modtime) | 531 | if (modtime) |
532 | su->su_lastmod = cpu_to_le64(modtime); | 532 | su->su_lastmod = cpu_to_le64(modtime); |
533 | su->su_nblocks = cpu_to_le32(nblocks); | 533 | su->su_nblocks = cpu_to_le32(nblocks); |
534 | kunmap_atomic(kaddr, KM_USER0); | 534 | kunmap_atomic(kaddr); |
535 | 535 | ||
536 | mark_buffer_dirty(bh); | 536 | mark_buffer_dirty(bh); |
537 | nilfs_mdt_mark_dirty(sufile); | 537 | nilfs_mdt_mark_dirty(sufile); |
@@ -572,7 +572,7 @@ int nilfs_sufile_get_stat(struct inode *sufile, struct nilfs_sustat *sustat) | |||
572 | if (ret < 0) | 572 | if (ret < 0) |
573 | goto out_sem; | 573 | goto out_sem; |
574 | 574 | ||
575 | kaddr = kmap_atomic(header_bh->b_page, KM_USER0); | 575 | kaddr = kmap_atomic(header_bh->b_page); |
576 | header = kaddr + bh_offset(header_bh); | 576 | header = kaddr + bh_offset(header_bh); |
577 | sustat->ss_nsegs = nilfs_sufile_get_nsegments(sufile); | 577 | sustat->ss_nsegs = nilfs_sufile_get_nsegments(sufile); |
578 | sustat->ss_ncleansegs = le64_to_cpu(header->sh_ncleansegs); | 578 | sustat->ss_ncleansegs = le64_to_cpu(header->sh_ncleansegs); |
@@ -582,7 +582,7 @@ int nilfs_sufile_get_stat(struct inode *sufile, struct nilfs_sustat *sustat) | |||
582 | spin_lock(&nilfs->ns_last_segment_lock); | 582 | spin_lock(&nilfs->ns_last_segment_lock); |
583 | sustat->ss_prot_seq = nilfs->ns_prot_seq; | 583 | sustat->ss_prot_seq = nilfs->ns_prot_seq; |
584 | spin_unlock(&nilfs->ns_last_segment_lock); | 584 | spin_unlock(&nilfs->ns_last_segment_lock); |
585 | kunmap_atomic(kaddr, KM_USER0); | 585 | kunmap_atomic(kaddr); |
586 | brelse(header_bh); | 586 | brelse(header_bh); |
587 | 587 | ||
588 | out_sem: | 588 | out_sem: |
@@ -598,15 +598,15 @@ void nilfs_sufile_do_set_error(struct inode *sufile, __u64 segnum, | |||
598 | void *kaddr; | 598 | void *kaddr; |
599 | int suclean; | 599 | int suclean; |
600 | 600 | ||
601 | kaddr = kmap_atomic(su_bh->b_page, KM_USER0); | 601 | kaddr = kmap_atomic(su_bh->b_page); |
602 | su = nilfs_sufile_block_get_segment_usage(sufile, segnum, su_bh, kaddr); | 602 | su = nilfs_sufile_block_get_segment_usage(sufile, segnum, su_bh, kaddr); |
603 | if (nilfs_segment_usage_error(su)) { | 603 | if (nilfs_segment_usage_error(su)) { |
604 | kunmap_atomic(kaddr, KM_USER0); | 604 | kunmap_atomic(kaddr); |
605 | return; | 605 | return; |
606 | } | 606 | } |
607 | suclean = nilfs_segment_usage_clean(su); | 607 | suclean = nilfs_segment_usage_clean(su); |
608 | nilfs_segment_usage_set_error(su); | 608 | nilfs_segment_usage_set_error(su); |
609 | kunmap_atomic(kaddr, KM_USER0); | 609 | kunmap_atomic(kaddr); |
610 | 610 | ||
611 | if (suclean) { | 611 | if (suclean) { |
612 | nilfs_sufile_mod_counter(header_bh, -1, 0); | 612 | nilfs_sufile_mod_counter(header_bh, -1, 0); |
@@ -675,7 +675,7 @@ static int nilfs_sufile_truncate_range(struct inode *sufile, | |||
675 | /* hole */ | 675 | /* hole */ |
676 | continue; | 676 | continue; |
677 | } | 677 | } |
678 | kaddr = kmap_atomic(su_bh->b_page, KM_USER0); | 678 | kaddr = kmap_atomic(su_bh->b_page); |
679 | su = nilfs_sufile_block_get_segment_usage( | 679 | su = nilfs_sufile_block_get_segment_usage( |
680 | sufile, segnum, su_bh, kaddr); | 680 | sufile, segnum, su_bh, kaddr); |
681 | su2 = su; | 681 | su2 = su; |
@@ -684,7 +684,7 @@ static int nilfs_sufile_truncate_range(struct inode *sufile, | |||
684 | ~(1UL << NILFS_SEGMENT_USAGE_ERROR)) || | 684 | ~(1UL << NILFS_SEGMENT_USAGE_ERROR)) || |
685 | nilfs_segment_is_active(nilfs, segnum + j)) { | 685 | nilfs_segment_is_active(nilfs, segnum + j)) { |
686 | ret = -EBUSY; | 686 | ret = -EBUSY; |
687 | kunmap_atomic(kaddr, KM_USER0); | 687 | kunmap_atomic(kaddr); |
688 | brelse(su_bh); | 688 | brelse(su_bh); |
689 | goto out_header; | 689 | goto out_header; |
690 | } | 690 | } |
@@ -696,7 +696,7 @@ static int nilfs_sufile_truncate_range(struct inode *sufile, | |||
696 | nc++; | 696 | nc++; |
697 | } | 697 | } |
698 | } | 698 | } |
699 | kunmap_atomic(kaddr, KM_USER0); | 699 | kunmap_atomic(kaddr); |
700 | if (nc > 0) { | 700 | if (nc > 0) { |
701 | mark_buffer_dirty(su_bh); | 701 | mark_buffer_dirty(su_bh); |
702 | ncleaned += nc; | 702 | ncleaned += nc; |
@@ -772,10 +772,10 @@ int nilfs_sufile_resize(struct inode *sufile, __u64 newnsegs) | |||
772 | sui->ncleansegs -= nsegs - newnsegs; | 772 | sui->ncleansegs -= nsegs - newnsegs; |
773 | } | 773 | } |
774 | 774 | ||
775 | kaddr = kmap_atomic(header_bh->b_page, KM_USER0); | 775 | kaddr = kmap_atomic(header_bh->b_page); |
776 | header = kaddr + bh_offset(header_bh); | 776 | header = kaddr + bh_offset(header_bh); |
777 | header->sh_ncleansegs = cpu_to_le64(sui->ncleansegs); | 777 | header->sh_ncleansegs = cpu_to_le64(sui->ncleansegs); |
778 | kunmap_atomic(kaddr, KM_USER0); | 778 | kunmap_atomic(kaddr); |
779 | 779 | ||
780 | mark_buffer_dirty(header_bh); | 780 | mark_buffer_dirty(header_bh); |
781 | nilfs_mdt_mark_dirty(sufile); | 781 | nilfs_mdt_mark_dirty(sufile); |
@@ -840,7 +840,7 @@ ssize_t nilfs_sufile_get_suinfo(struct inode *sufile, __u64 segnum, void *buf, | |||
840 | continue; | 840 | continue; |
841 | } | 841 | } |
842 | 842 | ||
843 | kaddr = kmap_atomic(su_bh->b_page, KM_USER0); | 843 | kaddr = kmap_atomic(su_bh->b_page); |
844 | su = nilfs_sufile_block_get_segment_usage( | 844 | su = nilfs_sufile_block_get_segment_usage( |
845 | sufile, segnum, su_bh, kaddr); | 845 | sufile, segnum, su_bh, kaddr); |
846 | for (j = 0; j < n; | 846 | for (j = 0; j < n; |
@@ -853,7 +853,7 @@ ssize_t nilfs_sufile_get_suinfo(struct inode *sufile, __u64 segnum, void *buf, | |||
853 | si->sui_flags |= | 853 | si->sui_flags |= |
854 | (1UL << NILFS_SEGMENT_USAGE_ACTIVE); | 854 | (1UL << NILFS_SEGMENT_USAGE_ACTIVE); |
855 | } | 855 | } |
856 | kunmap_atomic(kaddr, KM_USER0); | 856 | kunmap_atomic(kaddr); |
857 | brelse(su_bh); | 857 | brelse(su_bh); |
858 | } | 858 | } |
859 | ret = nsegs; | 859 | ret = nsegs; |
@@ -902,10 +902,10 @@ int nilfs_sufile_read(struct super_block *sb, size_t susize, | |||
902 | goto failed; | 902 | goto failed; |
903 | 903 | ||
904 | sui = NILFS_SUI(sufile); | 904 | sui = NILFS_SUI(sufile); |
905 | kaddr = kmap_atomic(header_bh->b_page, KM_USER0); | 905 | kaddr = kmap_atomic(header_bh->b_page); |
906 | header = kaddr + bh_offset(header_bh); | 906 | header = kaddr + bh_offset(header_bh); |
907 | sui->ncleansegs = le64_to_cpu(header->sh_ncleansegs); | 907 | sui->ncleansegs = le64_to_cpu(header->sh_ncleansegs); |
908 | kunmap_atomic(kaddr, KM_USER0); | 908 | kunmap_atomic(kaddr); |
909 | brelse(header_bh); | 909 | brelse(header_bh); |
910 | 910 | ||
911 | sui->allocmax = nilfs_sufile_get_nsegments(sufile) - 1; | 911 | sui->allocmax = nilfs_sufile_get_nsegments(sufile) - 1; |