diff options
Diffstat (limited to 'fs/nilfs2/sufile.c')
-rw-r--r-- | fs/nilfs2/sufile.c | 274 |
1 files changed, 253 insertions, 21 deletions
diff --git a/fs/nilfs2/sufile.c b/fs/nilfs2/sufile.c index 1d6f488ccae8..0a0aba617d8a 100644 --- a/fs/nilfs2/sufile.c +++ b/fs/nilfs2/sufile.c | |||
@@ -33,7 +33,9 @@ | |||
33 | 33 | ||
34 | struct nilfs_sufile_info { | 34 | struct nilfs_sufile_info { |
35 | struct nilfs_mdt_info mi; | 35 | struct nilfs_mdt_info mi; |
36 | unsigned long ncleansegs; | 36 | unsigned long ncleansegs;/* number of clean segments */ |
37 | __u64 allocmin; /* lower limit of allocatable segment range */ | ||
38 | __u64 allocmax; /* upper limit of allocatable segment range */ | ||
37 | }; | 39 | }; |
38 | 40 | ||
39 | static inline struct nilfs_sufile_info *NILFS_SUI(struct inode *sufile) | 41 | static inline struct nilfs_sufile_info *NILFS_SUI(struct inode *sufile) |
@@ -96,6 +98,13 @@ nilfs_sufile_get_segment_usage_block(struct inode *sufile, __u64 segnum, | |||
96 | create, NULL, bhp); | 98 | create, NULL, bhp); |
97 | } | 99 | } |
98 | 100 | ||
101 | static int nilfs_sufile_delete_segment_usage_block(struct inode *sufile, | ||
102 | __u64 segnum) | ||
103 | { | ||
104 | return nilfs_mdt_delete_block(sufile, | ||
105 | nilfs_sufile_get_blkoff(sufile, segnum)); | ||
106 | } | ||
107 | |||
99 | static void nilfs_sufile_mod_counter(struct buffer_head *header_bh, | 108 | static void nilfs_sufile_mod_counter(struct buffer_head *header_bh, |
100 | u64 ncleanadd, u64 ndirtyadd) | 109 | u64 ncleanadd, u64 ndirtyadd) |
101 | { | 110 | { |
@@ -108,7 +117,7 @@ static void nilfs_sufile_mod_counter(struct buffer_head *header_bh, | |||
108 | le64_add_cpu(&header->sh_ndirtysegs, ndirtyadd); | 117 | le64_add_cpu(&header->sh_ndirtysegs, ndirtyadd); |
109 | kunmap_atomic(kaddr, KM_USER0); | 118 | kunmap_atomic(kaddr, KM_USER0); |
110 | 119 | ||
111 | nilfs_mdt_mark_buffer_dirty(header_bh); | 120 | mark_buffer_dirty(header_bh); |
112 | } | 121 | } |
113 | 122 | ||
114 | /** | 123 | /** |
@@ -248,6 +257,35 @@ int nilfs_sufile_update(struct inode *sufile, __u64 segnum, int create, | |||
248 | } | 257 | } |
249 | 258 | ||
250 | /** | 259 | /** |
260 | * nilfs_sufile_set_alloc_range - limit range of segment to be allocated | ||
261 | * @sufile: inode of segment usage file | ||
262 | * @start: minimum segment number of allocatable region (inclusive) | ||
263 | * @end: maximum segment number of allocatable region (inclusive) | ||
264 | * | ||
265 | * Return Value: On success, 0 is returned. On error, one of the | ||
266 | * following negative error codes is returned. | ||
267 | * | ||
268 | * %-ERANGE - invalid segment region | ||
269 | */ | ||
270 | int nilfs_sufile_set_alloc_range(struct inode *sufile, __u64 start, __u64 end) | ||
271 | { | ||
272 | struct nilfs_sufile_info *sui = NILFS_SUI(sufile); | ||
273 | __u64 nsegs; | ||
274 | int ret = -ERANGE; | ||
275 | |||
276 | down_write(&NILFS_MDT(sufile)->mi_sem); | ||
277 | nsegs = nilfs_sufile_get_nsegments(sufile); | ||
278 | |||
279 | if (start <= end && end < nsegs) { | ||
280 | sui->allocmin = start; | ||
281 | sui->allocmax = end; | ||
282 | ret = 0; | ||
283 | } | ||
284 | up_write(&NILFS_MDT(sufile)->mi_sem); | ||
285 | return ret; | ||
286 | } | ||
287 | |||
288 | /** | ||
251 | * nilfs_sufile_alloc - allocate a segment | 289 | * nilfs_sufile_alloc - allocate a segment |
252 | * @sufile: inode of segment usage file | 290 | * @sufile: inode of segment usage file |
253 | * @segnump: pointer to segment number | 291 | * @segnump: pointer to segment number |
@@ -269,11 +307,12 @@ int nilfs_sufile_alloc(struct inode *sufile, __u64 *segnump) | |||
269 | struct buffer_head *header_bh, *su_bh; | 307 | struct buffer_head *header_bh, *su_bh; |
270 | struct nilfs_sufile_header *header; | 308 | struct nilfs_sufile_header *header; |
271 | struct nilfs_segment_usage *su; | 309 | struct nilfs_segment_usage *su; |
310 | struct nilfs_sufile_info *sui = NILFS_SUI(sufile); | ||
272 | size_t susz = NILFS_MDT(sufile)->mi_entry_size; | 311 | size_t susz = NILFS_MDT(sufile)->mi_entry_size; |
273 | __u64 segnum, maxsegnum, last_alloc; | 312 | __u64 segnum, maxsegnum, last_alloc; |
274 | void *kaddr; | 313 | void *kaddr; |
275 | unsigned long nsegments, ncleansegs, nsus; | 314 | unsigned long nsegments, ncleansegs, nsus, cnt; |
276 | int ret, i, j; | 315 | int ret, j; |
277 | 316 | ||
278 | down_write(&NILFS_MDT(sufile)->mi_sem); | 317 | down_write(&NILFS_MDT(sufile)->mi_sem); |
279 | 318 | ||
@@ -287,13 +326,31 @@ int nilfs_sufile_alloc(struct inode *sufile, __u64 *segnump) | |||
287 | kunmap_atomic(kaddr, KM_USER0); | 326 | kunmap_atomic(kaddr, KM_USER0); |
288 | 327 | ||
289 | nsegments = nilfs_sufile_get_nsegments(sufile); | 328 | nsegments = nilfs_sufile_get_nsegments(sufile); |
329 | maxsegnum = sui->allocmax; | ||
290 | segnum = last_alloc + 1; | 330 | segnum = last_alloc + 1; |
291 | maxsegnum = nsegments - 1; | 331 | if (segnum < sui->allocmin || segnum > sui->allocmax) |
292 | for (i = 0; i < nsegments; i += nsus) { | 332 | segnum = sui->allocmin; |
293 | if (segnum >= nsegments) { | 333 | |
294 | /* wrap around */ | 334 | for (cnt = 0; cnt < nsegments; cnt += nsus) { |
295 | segnum = 0; | 335 | if (segnum > maxsegnum) { |
296 | maxsegnum = last_alloc; | 336 | if (cnt < sui->allocmax - sui->allocmin + 1) { |
337 | /* | ||
338 | * wrap around in the limited region. | ||
339 | * if allocation started from | ||
340 | * sui->allocmin, this never happens. | ||
341 | */ | ||
342 | segnum = sui->allocmin; | ||
343 | maxsegnum = last_alloc; | ||
344 | } else if (segnum > sui->allocmin && | ||
345 | sui->allocmax + 1 < nsegments) { | ||
346 | segnum = sui->allocmax + 1; | ||
347 | maxsegnum = nsegments - 1; | ||
348 | } else if (sui->allocmin > 0) { | ||
349 | segnum = 0; | ||
350 | maxsegnum = sui->allocmin - 1; | ||
351 | } else { | ||
352 | break; /* never happens */ | ||
353 | } | ||
297 | } | 354 | } |
298 | ret = nilfs_sufile_get_segment_usage_block(sufile, segnum, 1, | 355 | ret = nilfs_sufile_get_segment_usage_block(sufile, segnum, 1, |
299 | &su_bh); | 356 | &su_bh); |
@@ -319,9 +376,9 @@ int nilfs_sufile_alloc(struct inode *sufile, __u64 *segnump) | |||
319 | header->sh_last_alloc = cpu_to_le64(segnum); | 376 | header->sh_last_alloc = cpu_to_le64(segnum); |
320 | kunmap_atomic(kaddr, KM_USER0); | 377 | kunmap_atomic(kaddr, KM_USER0); |
321 | 378 | ||
322 | NILFS_SUI(sufile)->ncleansegs--; | 379 | sui->ncleansegs--; |
323 | nilfs_mdt_mark_buffer_dirty(header_bh); | 380 | mark_buffer_dirty(header_bh); |
324 | nilfs_mdt_mark_buffer_dirty(su_bh); | 381 | mark_buffer_dirty(su_bh); |
325 | nilfs_mdt_mark_dirty(sufile); | 382 | nilfs_mdt_mark_dirty(sufile); |
326 | brelse(su_bh); | 383 | brelse(su_bh); |
327 | *segnump = segnum; | 384 | *segnump = segnum; |
@@ -364,7 +421,7 @@ void nilfs_sufile_do_cancel_free(struct inode *sufile, __u64 segnum, | |||
364 | nilfs_sufile_mod_counter(header_bh, -1, 1); | 421 | nilfs_sufile_mod_counter(header_bh, -1, 1); |
365 | NILFS_SUI(sufile)->ncleansegs--; | 422 | NILFS_SUI(sufile)->ncleansegs--; |
366 | 423 | ||
367 | nilfs_mdt_mark_buffer_dirty(su_bh); | 424 | mark_buffer_dirty(su_bh); |
368 | nilfs_mdt_mark_dirty(sufile); | 425 | nilfs_mdt_mark_dirty(sufile); |
369 | } | 426 | } |
370 | 427 | ||
@@ -395,7 +452,7 @@ void nilfs_sufile_do_scrap(struct inode *sufile, __u64 segnum, | |||
395 | nilfs_sufile_mod_counter(header_bh, clean ? (u64)-1 : 0, dirty ? 0 : 1); | 452 | nilfs_sufile_mod_counter(header_bh, clean ? (u64)-1 : 0, dirty ? 0 : 1); |
396 | NILFS_SUI(sufile)->ncleansegs -= clean; | 453 | NILFS_SUI(sufile)->ncleansegs -= clean; |
397 | 454 | ||
398 | nilfs_mdt_mark_buffer_dirty(su_bh); | 455 | mark_buffer_dirty(su_bh); |
399 | nilfs_mdt_mark_dirty(sufile); | 456 | nilfs_mdt_mark_dirty(sufile); |
400 | } | 457 | } |
401 | 458 | ||
@@ -421,7 +478,7 @@ void nilfs_sufile_do_free(struct inode *sufile, __u64 segnum, | |||
421 | sudirty = nilfs_segment_usage_dirty(su); | 478 | sudirty = nilfs_segment_usage_dirty(su); |
422 | nilfs_segment_usage_set_clean(su); | 479 | nilfs_segment_usage_set_clean(su); |
423 | kunmap_atomic(kaddr, KM_USER0); | 480 | kunmap_atomic(kaddr, KM_USER0); |
424 | nilfs_mdt_mark_buffer_dirty(su_bh); | 481 | mark_buffer_dirty(su_bh); |
425 | 482 | ||
426 | nilfs_sufile_mod_counter(header_bh, 1, sudirty ? (u64)-1 : 0); | 483 | nilfs_sufile_mod_counter(header_bh, 1, sudirty ? (u64)-1 : 0); |
427 | NILFS_SUI(sufile)->ncleansegs++; | 484 | NILFS_SUI(sufile)->ncleansegs++; |
@@ -441,7 +498,7 @@ int nilfs_sufile_mark_dirty(struct inode *sufile, __u64 segnum) | |||
441 | 498 | ||
442 | ret = nilfs_sufile_get_segment_usage_block(sufile, segnum, 0, &bh); | 499 | ret = nilfs_sufile_get_segment_usage_block(sufile, segnum, 0, &bh); |
443 | if (!ret) { | 500 | if (!ret) { |
444 | nilfs_mdt_mark_buffer_dirty(bh); | 501 | mark_buffer_dirty(bh); |
445 | nilfs_mdt_mark_dirty(sufile); | 502 | nilfs_mdt_mark_dirty(sufile); |
446 | brelse(bh); | 503 | brelse(bh); |
447 | } | 504 | } |
@@ -476,7 +533,7 @@ int nilfs_sufile_set_segment_usage(struct inode *sufile, __u64 segnum, | |||
476 | su->su_nblocks = cpu_to_le32(nblocks); | 533 | su->su_nblocks = cpu_to_le32(nblocks); |
477 | kunmap_atomic(kaddr, KM_USER0); | 534 | kunmap_atomic(kaddr, KM_USER0); |
478 | 535 | ||
479 | nilfs_mdt_mark_buffer_dirty(bh); | 536 | mark_buffer_dirty(bh); |
480 | nilfs_mdt_mark_dirty(sufile); | 537 | nilfs_mdt_mark_dirty(sufile); |
481 | brelse(bh); | 538 | brelse(bh); |
482 | 539 | ||
@@ -505,7 +562,7 @@ int nilfs_sufile_get_stat(struct inode *sufile, struct nilfs_sustat *sustat) | |||
505 | { | 562 | { |
506 | struct buffer_head *header_bh; | 563 | struct buffer_head *header_bh; |
507 | struct nilfs_sufile_header *header; | 564 | struct nilfs_sufile_header *header; |
508 | struct the_nilfs *nilfs = NILFS_I_NILFS(sufile); | 565 | struct the_nilfs *nilfs = sufile->i_sb->s_fs_info; |
509 | void *kaddr; | 566 | void *kaddr; |
510 | int ret; | 567 | int ret; |
511 | 568 | ||
@@ -555,11 +612,183 @@ void nilfs_sufile_do_set_error(struct inode *sufile, __u64 segnum, | |||
555 | nilfs_sufile_mod_counter(header_bh, -1, 0); | 612 | nilfs_sufile_mod_counter(header_bh, -1, 0); |
556 | NILFS_SUI(sufile)->ncleansegs--; | 613 | NILFS_SUI(sufile)->ncleansegs--; |
557 | } | 614 | } |
558 | nilfs_mdt_mark_buffer_dirty(su_bh); | 615 | mark_buffer_dirty(su_bh); |
559 | nilfs_mdt_mark_dirty(sufile); | 616 | nilfs_mdt_mark_dirty(sufile); |
560 | } | 617 | } |
561 | 618 | ||
562 | /** | 619 | /** |
620 | * nilfs_sufile_truncate_range - truncate range of segment array | ||
621 | * @sufile: inode of segment usage file | ||
622 | * @start: start segment number (inclusive) | ||
623 | * @end: end segment number (inclusive) | ||
624 | * | ||
625 | * Return Value: On success, 0 is returned. On error, one of the | ||
626 | * following negative error codes is returned. | ||
627 | * | ||
628 | * %-EIO - I/O error. | ||
629 | * | ||
630 | * %-ENOMEM - Insufficient amount of memory available. | ||
631 | * | ||
632 | * %-EINVAL - Invalid number of segments specified | ||
633 | * | ||
634 | * %-EBUSY - Dirty or active segments are present in the range | ||
635 | */ | ||
636 | static int nilfs_sufile_truncate_range(struct inode *sufile, | ||
637 | __u64 start, __u64 end) | ||
638 | { | ||
639 | struct the_nilfs *nilfs = sufile->i_sb->s_fs_info; | ||
640 | struct buffer_head *header_bh; | ||
641 | struct buffer_head *su_bh; | ||
642 | struct nilfs_segment_usage *su, *su2; | ||
643 | size_t susz = NILFS_MDT(sufile)->mi_entry_size; | ||
644 | unsigned long segusages_per_block; | ||
645 | unsigned long nsegs, ncleaned; | ||
646 | __u64 segnum; | ||
647 | void *kaddr; | ||
648 | ssize_t n, nc; | ||
649 | int ret; | ||
650 | int j; | ||
651 | |||
652 | nsegs = nilfs_sufile_get_nsegments(sufile); | ||
653 | |||
654 | ret = -EINVAL; | ||
655 | if (start > end || start >= nsegs) | ||
656 | goto out; | ||
657 | |||
658 | ret = nilfs_sufile_get_header_block(sufile, &header_bh); | ||
659 | if (ret < 0) | ||
660 | goto out; | ||
661 | |||
662 | segusages_per_block = nilfs_sufile_segment_usages_per_block(sufile); | ||
663 | ncleaned = 0; | ||
664 | |||
665 | for (segnum = start; segnum <= end; segnum += n) { | ||
666 | n = min_t(unsigned long, | ||
667 | segusages_per_block - | ||
668 | nilfs_sufile_get_offset(sufile, segnum), | ||
669 | end - segnum + 1); | ||
670 | ret = nilfs_sufile_get_segment_usage_block(sufile, segnum, 0, | ||
671 | &su_bh); | ||
672 | if (ret < 0) { | ||
673 | if (ret != -ENOENT) | ||
674 | goto out_header; | ||
675 | /* hole */ | ||
676 | continue; | ||
677 | } | ||
678 | kaddr = kmap_atomic(su_bh->b_page, KM_USER0); | ||
679 | su = nilfs_sufile_block_get_segment_usage( | ||
680 | sufile, segnum, su_bh, kaddr); | ||
681 | su2 = su; | ||
682 | for (j = 0; j < n; j++, su = (void *)su + susz) { | ||
683 | if ((le32_to_cpu(su->su_flags) & | ||
684 | ~(1UL << NILFS_SEGMENT_USAGE_ERROR)) || | ||
685 | nilfs_segment_is_active(nilfs, segnum + j)) { | ||
686 | ret = -EBUSY; | ||
687 | kunmap_atomic(kaddr, KM_USER0); | ||
688 | brelse(su_bh); | ||
689 | goto out_header; | ||
690 | } | ||
691 | } | ||
692 | nc = 0; | ||
693 | for (su = su2, j = 0; j < n; j++, su = (void *)su + susz) { | ||
694 | if (nilfs_segment_usage_error(su)) { | ||
695 | nilfs_segment_usage_set_clean(su); | ||
696 | nc++; | ||
697 | } | ||
698 | } | ||
699 | kunmap_atomic(kaddr, KM_USER0); | ||
700 | if (nc > 0) { | ||
701 | mark_buffer_dirty(su_bh); | ||
702 | ncleaned += nc; | ||
703 | } | ||
704 | brelse(su_bh); | ||
705 | |||
706 | if (n == segusages_per_block) { | ||
707 | /* make hole */ | ||
708 | nilfs_sufile_delete_segment_usage_block(sufile, segnum); | ||
709 | } | ||
710 | } | ||
711 | ret = 0; | ||
712 | |||
713 | out_header: | ||
714 | if (ncleaned > 0) { | ||
715 | NILFS_SUI(sufile)->ncleansegs += ncleaned; | ||
716 | nilfs_sufile_mod_counter(header_bh, ncleaned, 0); | ||
717 | nilfs_mdt_mark_dirty(sufile); | ||
718 | } | ||
719 | brelse(header_bh); | ||
720 | out: | ||
721 | return ret; | ||
722 | } | ||
723 | |||
724 | /** | ||
725 | * nilfs_sufile_resize - resize segment array | ||
726 | * @sufile: inode of segment usage file | ||
727 | * @newnsegs: new number of segments | ||
728 | * | ||
729 | * Return Value: On success, 0 is returned. On error, one of the | ||
730 | * following negative error codes is returned. | ||
731 | * | ||
732 | * %-EIO - I/O error. | ||
733 | * | ||
734 | * %-ENOMEM - Insufficient amount of memory available. | ||
735 | * | ||
736 | * %-ENOSPC - Enough free space is not left for shrinking | ||
737 | * | ||
738 | * %-EBUSY - Dirty or active segments exist in the region to be truncated | ||
739 | */ | ||
740 | int nilfs_sufile_resize(struct inode *sufile, __u64 newnsegs) | ||
741 | { | ||
742 | struct the_nilfs *nilfs = sufile->i_sb->s_fs_info; | ||
743 | struct buffer_head *header_bh; | ||
744 | struct nilfs_sufile_header *header; | ||
745 | struct nilfs_sufile_info *sui = NILFS_SUI(sufile); | ||
746 | void *kaddr; | ||
747 | unsigned long nsegs, nrsvsegs; | ||
748 | int ret = 0; | ||
749 | |||
750 | down_write(&NILFS_MDT(sufile)->mi_sem); | ||
751 | |||
752 | nsegs = nilfs_sufile_get_nsegments(sufile); | ||
753 | if (nsegs == newnsegs) | ||
754 | goto out; | ||
755 | |||
756 | ret = -ENOSPC; | ||
757 | nrsvsegs = nilfs_nrsvsegs(nilfs, newnsegs); | ||
758 | if (newnsegs < nsegs && nsegs - newnsegs + nrsvsegs > sui->ncleansegs) | ||
759 | goto out; | ||
760 | |||
761 | ret = nilfs_sufile_get_header_block(sufile, &header_bh); | ||
762 | if (ret < 0) | ||
763 | goto out; | ||
764 | |||
765 | if (newnsegs > nsegs) { | ||
766 | sui->ncleansegs += newnsegs - nsegs; | ||
767 | } else /* newnsegs < nsegs */ { | ||
768 | ret = nilfs_sufile_truncate_range(sufile, newnsegs, nsegs - 1); | ||
769 | if (ret < 0) | ||
770 | goto out_header; | ||
771 | |||
772 | sui->ncleansegs -= nsegs - newnsegs; | ||
773 | } | ||
774 | |||
775 | kaddr = kmap_atomic(header_bh->b_page, KM_USER0); | ||
776 | header = kaddr + bh_offset(header_bh); | ||
777 | header->sh_ncleansegs = cpu_to_le64(sui->ncleansegs); | ||
778 | kunmap_atomic(kaddr, KM_USER0); | ||
779 | |||
780 | mark_buffer_dirty(header_bh); | ||
781 | nilfs_mdt_mark_dirty(sufile); | ||
782 | nilfs_set_nsegments(nilfs, newnsegs); | ||
783 | |||
784 | out_header: | ||
785 | brelse(header_bh); | ||
786 | out: | ||
787 | up_write(&NILFS_MDT(sufile)->mi_sem); | ||
788 | return ret; | ||
789 | } | ||
790 | |||
791 | /** | ||
563 | * nilfs_sufile_get_suinfo - | 792 | * nilfs_sufile_get_suinfo - |
564 | * @sufile: inode of segment usage file | 793 | * @sufile: inode of segment usage file |
565 | * @segnum: segment number to start looking | 794 | * @segnum: segment number to start looking |
@@ -583,7 +812,7 @@ ssize_t nilfs_sufile_get_suinfo(struct inode *sufile, __u64 segnum, void *buf, | |||
583 | struct nilfs_segment_usage *su; | 812 | struct nilfs_segment_usage *su; |
584 | struct nilfs_suinfo *si = buf; | 813 | struct nilfs_suinfo *si = buf; |
585 | size_t susz = NILFS_MDT(sufile)->mi_entry_size; | 814 | size_t susz = NILFS_MDT(sufile)->mi_entry_size; |
586 | struct the_nilfs *nilfs = NILFS_I_NILFS(sufile); | 815 | struct the_nilfs *nilfs = sufile->i_sb->s_fs_info; |
587 | void *kaddr; | 816 | void *kaddr; |
588 | unsigned long nsegs, segusages_per_block; | 817 | unsigned long nsegs, segusages_per_block; |
589 | ssize_t n; | 818 | ssize_t n; |
@@ -679,6 +908,9 @@ int nilfs_sufile_read(struct super_block *sb, size_t susize, | |||
679 | kunmap_atomic(kaddr, KM_USER0); | 908 | kunmap_atomic(kaddr, KM_USER0); |
680 | brelse(header_bh); | 909 | brelse(header_bh); |
681 | 910 | ||
911 | sui->allocmax = nilfs_sufile_get_nsegments(sufile) - 1; | ||
912 | sui->allocmin = 0; | ||
913 | |||
682 | unlock_new_inode(sufile); | 914 | unlock_new_inode(sufile); |
683 | out: | 915 | out: |
684 | *inodep = sufile; | 916 | *inodep = sufile; |