diff options
Diffstat (limited to 'fs/ntfs/file.c')
-rw-r--r-- | fs/ntfs/file.c | 34 |
1 files changed, 17 insertions, 17 deletions
diff --git a/fs/ntfs/file.c b/fs/ntfs/file.c index 2e42c2dcae12..585a79d39c9d 100644 --- a/fs/ntfs/file.c +++ b/fs/ntfs/file.c | |||
@@ -509,7 +509,7 @@ static int ntfs_prepare_pages_for_non_resident_write(struct page **pages, | |||
509 | u32 attr_rec_len = 0; | 509 | u32 attr_rec_len = 0; |
510 | unsigned blocksize, u; | 510 | unsigned blocksize, u; |
511 | int err, mp_size; | 511 | int err, mp_size; |
512 | BOOL rl_write_locked, was_hole, is_retry; | 512 | bool rl_write_locked, was_hole, is_retry; |
513 | unsigned char blocksize_bits; | 513 | unsigned char blocksize_bits; |
514 | struct { | 514 | struct { |
515 | u8 runlist_merged:1; | 515 | u8 runlist_merged:1; |
@@ -543,13 +543,13 @@ static int ntfs_prepare_pages_for_non_resident_write(struct page **pages, | |||
543 | return -ENOMEM; | 543 | return -ENOMEM; |
544 | } | 544 | } |
545 | } while (++u < nr_pages); | 545 | } while (++u < nr_pages); |
546 | rl_write_locked = FALSE; | 546 | rl_write_locked = false; |
547 | rl = NULL; | 547 | rl = NULL; |
548 | err = 0; | 548 | err = 0; |
549 | vcn = lcn = -1; | 549 | vcn = lcn = -1; |
550 | vcn_len = 0; | 550 | vcn_len = 0; |
551 | lcn_block = -1; | 551 | lcn_block = -1; |
552 | was_hole = FALSE; | 552 | was_hole = false; |
553 | cpos = pos >> vol->cluster_size_bits; | 553 | cpos = pos >> vol->cluster_size_bits; |
554 | end = pos + bytes; | 554 | end = pos + bytes; |
555 | cend = (end + vol->cluster_size - 1) >> vol->cluster_size_bits; | 555 | cend = (end + vol->cluster_size - 1) >> vol->cluster_size_bits; |
@@ -760,7 +760,7 @@ map_buffer_cached: | |||
760 | } | 760 | } |
761 | continue; | 761 | continue; |
762 | } | 762 | } |
763 | is_retry = FALSE; | 763 | is_retry = false; |
764 | if (!rl) { | 764 | if (!rl) { |
765 | down_read(&ni->runlist.lock); | 765 | down_read(&ni->runlist.lock); |
766 | retry_remap: | 766 | retry_remap: |
@@ -776,7 +776,7 @@ retry_remap: | |||
776 | * Successful remap, setup the map cache and | 776 | * Successful remap, setup the map cache and |
777 | * use that to deal with the buffer. | 777 | * use that to deal with the buffer. |
778 | */ | 778 | */ |
779 | was_hole = FALSE; | 779 | was_hole = false; |
780 | vcn = bh_cpos; | 780 | vcn = bh_cpos; |
781 | vcn_len = rl[1].vcn - vcn; | 781 | vcn_len = rl[1].vcn - vcn; |
782 | lcn_block = lcn << (vol->cluster_size_bits - | 782 | lcn_block = lcn << (vol->cluster_size_bits - |
@@ -792,7 +792,7 @@ retry_remap: | |||
792 | if (likely(vcn + vcn_len >= cend)) { | 792 | if (likely(vcn + vcn_len >= cend)) { |
793 | if (rl_write_locked) { | 793 | if (rl_write_locked) { |
794 | up_write(&ni->runlist.lock); | 794 | up_write(&ni->runlist.lock); |
795 | rl_write_locked = FALSE; | 795 | rl_write_locked = false; |
796 | } else | 796 | } else |
797 | up_read(&ni->runlist.lock); | 797 | up_read(&ni->runlist.lock); |
798 | rl = NULL; | 798 | rl = NULL; |
@@ -818,13 +818,13 @@ retry_remap: | |||
818 | */ | 818 | */ |
819 | up_read(&ni->runlist.lock); | 819 | up_read(&ni->runlist.lock); |
820 | down_write(&ni->runlist.lock); | 820 | down_write(&ni->runlist.lock); |
821 | rl_write_locked = TRUE; | 821 | rl_write_locked = true; |
822 | goto retry_remap; | 822 | goto retry_remap; |
823 | } | 823 | } |
824 | err = ntfs_map_runlist_nolock(ni, bh_cpos, | 824 | err = ntfs_map_runlist_nolock(ni, bh_cpos, |
825 | NULL); | 825 | NULL); |
826 | if (likely(!err)) { | 826 | if (likely(!err)) { |
827 | is_retry = TRUE; | 827 | is_retry = true; |
828 | goto retry_remap; | 828 | goto retry_remap; |
829 | } | 829 | } |
830 | /* | 830 | /* |
@@ -903,7 +903,7 @@ rl_not_mapped_enoent: | |||
903 | if (!rl_write_locked) { | 903 | if (!rl_write_locked) { |
904 | up_read(&ni->runlist.lock); | 904 | up_read(&ni->runlist.lock); |
905 | down_write(&ni->runlist.lock); | 905 | down_write(&ni->runlist.lock); |
906 | rl_write_locked = TRUE; | 906 | rl_write_locked = true; |
907 | goto retry_remap; | 907 | goto retry_remap; |
908 | } | 908 | } |
909 | /* Find the previous last allocated cluster. */ | 909 | /* Find the previous last allocated cluster. */ |
@@ -917,7 +917,7 @@ rl_not_mapped_enoent: | |||
917 | } | 917 | } |
918 | } | 918 | } |
919 | rl2 = ntfs_cluster_alloc(vol, bh_cpos, 1, lcn, DATA_ZONE, | 919 | rl2 = ntfs_cluster_alloc(vol, bh_cpos, 1, lcn, DATA_ZONE, |
920 | FALSE); | 920 | false); |
921 | if (IS_ERR(rl2)) { | 921 | if (IS_ERR(rl2)) { |
922 | err = PTR_ERR(rl2); | 922 | err = PTR_ERR(rl2); |
923 | ntfs_debug("Failed to allocate cluster, error code %i.", | 923 | ntfs_debug("Failed to allocate cluster, error code %i.", |
@@ -1093,7 +1093,7 @@ rl_not_mapped_enoent: | |||
1093 | status.mft_attr_mapped = 0; | 1093 | status.mft_attr_mapped = 0; |
1094 | status.mp_rebuilt = 0; | 1094 | status.mp_rebuilt = 0; |
1095 | /* Setup the map cache and use that to deal with the buffer. */ | 1095 | /* Setup the map cache and use that to deal with the buffer. */ |
1096 | was_hole = TRUE; | 1096 | was_hole = true; |
1097 | vcn = bh_cpos; | 1097 | vcn = bh_cpos; |
1098 | vcn_len = 1; | 1098 | vcn_len = 1; |
1099 | lcn_block = lcn << (vol->cluster_size_bits - blocksize_bits); | 1099 | lcn_block = lcn << (vol->cluster_size_bits - blocksize_bits); |
@@ -1105,7 +1105,7 @@ rl_not_mapped_enoent: | |||
1105 | */ | 1105 | */ |
1106 | if (likely(vcn + vcn_len >= cend)) { | 1106 | if (likely(vcn + vcn_len >= cend)) { |
1107 | up_write(&ni->runlist.lock); | 1107 | up_write(&ni->runlist.lock); |
1108 | rl_write_locked = FALSE; | 1108 | rl_write_locked = false; |
1109 | rl = NULL; | 1109 | rl = NULL; |
1110 | } | 1110 | } |
1111 | goto map_buffer_cached; | 1111 | goto map_buffer_cached; |
@@ -1117,7 +1117,7 @@ rl_not_mapped_enoent: | |||
1117 | if (likely(!err)) { | 1117 | if (likely(!err)) { |
1118 | if (unlikely(rl_write_locked)) { | 1118 | if (unlikely(rl_write_locked)) { |
1119 | up_write(&ni->runlist.lock); | 1119 | up_write(&ni->runlist.lock); |
1120 | rl_write_locked = FALSE; | 1120 | rl_write_locked = false; |
1121 | } else if (unlikely(rl)) | 1121 | } else if (unlikely(rl)) |
1122 | up_read(&ni->runlist.lock); | 1122 | up_read(&ni->runlist.lock); |
1123 | rl = NULL; | 1123 | rl = NULL; |
@@ -1528,19 +1528,19 @@ static inline int ntfs_commit_pages_after_non_resident_write( | |||
1528 | do { | 1528 | do { |
1529 | s64 bh_pos; | 1529 | s64 bh_pos; |
1530 | struct page *page; | 1530 | struct page *page; |
1531 | BOOL partial; | 1531 | bool partial; |
1532 | 1532 | ||
1533 | page = pages[u]; | 1533 | page = pages[u]; |
1534 | bh_pos = (s64)page->index << PAGE_CACHE_SHIFT; | 1534 | bh_pos = (s64)page->index << PAGE_CACHE_SHIFT; |
1535 | bh = head = page_buffers(page); | 1535 | bh = head = page_buffers(page); |
1536 | partial = FALSE; | 1536 | partial = false; |
1537 | do { | 1537 | do { |
1538 | s64 bh_end; | 1538 | s64 bh_end; |
1539 | 1539 | ||
1540 | bh_end = bh_pos + blocksize; | 1540 | bh_end = bh_pos + blocksize; |
1541 | if (bh_end <= pos || bh_pos >= end) { | 1541 | if (bh_end <= pos || bh_pos >= end) { |
1542 | if (!buffer_uptodate(bh)) | 1542 | if (!buffer_uptodate(bh)) |
1543 | partial = TRUE; | 1543 | partial = true; |
1544 | } else { | 1544 | } else { |
1545 | set_buffer_uptodate(bh); | 1545 | set_buffer_uptodate(bh); |
1546 | mark_buffer_dirty(bh); | 1546 | mark_buffer_dirty(bh); |
@@ -1997,7 +1997,7 @@ static ssize_t ntfs_file_buffered_write(struct kiocb *iocb, | |||
1997 | */ | 1997 | */ |
1998 | down_read(&ni->runlist.lock); | 1998 | down_read(&ni->runlist.lock); |
1999 | lcn = ntfs_attr_vcn_to_lcn_nolock(ni, pos >> | 1999 | lcn = ntfs_attr_vcn_to_lcn_nolock(ni, pos >> |
2000 | vol->cluster_size_bits, FALSE); | 2000 | vol->cluster_size_bits, false); |
2001 | up_read(&ni->runlist.lock); | 2001 | up_read(&ni->runlist.lock); |
2002 | if (unlikely(lcn < LCN_HOLE)) { | 2002 | if (unlikely(lcn < LCN_HOLE)) { |
2003 | status = -EIO; | 2003 | status = -EIO; |