aboutsummaryrefslogtreecommitdiffstats
path: root/fs/ntfs/file.c
diff options
context:
space:
mode:
Diffstat (limited to 'fs/ntfs/file.c')
-rw-r--r--fs/ntfs/file.c46
1 files changed, 21 insertions, 25 deletions
diff --git a/fs/ntfs/file.c b/fs/ntfs/file.c
index 2e42c2dcae12..ae2fe0016d2c 100644
--- a/fs/ntfs/file.c
+++ b/fs/ntfs/file.c
@@ -509,7 +509,7 @@ static int ntfs_prepare_pages_for_non_resident_write(struct page **pages,
509 u32 attr_rec_len = 0; 509 u32 attr_rec_len = 0;
510 unsigned blocksize, u; 510 unsigned blocksize, u;
511 int err, mp_size; 511 int err, mp_size;
512 BOOL rl_write_locked, was_hole, is_retry; 512 bool rl_write_locked, was_hole, is_retry;
513 unsigned char blocksize_bits; 513 unsigned char blocksize_bits;
514 struct { 514 struct {
515 u8 runlist_merged:1; 515 u8 runlist_merged:1;
@@ -543,13 +543,13 @@ static int ntfs_prepare_pages_for_non_resident_write(struct page **pages,
543 return -ENOMEM; 543 return -ENOMEM;
544 } 544 }
545 } while (++u < nr_pages); 545 } while (++u < nr_pages);
546 rl_write_locked = FALSE; 546 rl_write_locked = false;
547 rl = NULL; 547 rl = NULL;
548 err = 0; 548 err = 0;
549 vcn = lcn = -1; 549 vcn = lcn = -1;
550 vcn_len = 0; 550 vcn_len = 0;
551 lcn_block = -1; 551 lcn_block = -1;
552 was_hole = FALSE; 552 was_hole = false;
553 cpos = pos >> vol->cluster_size_bits; 553 cpos = pos >> vol->cluster_size_bits;
554 end = pos + bytes; 554 end = pos + bytes;
555 cend = (end + vol->cluster_size - 1) >> vol->cluster_size_bits; 555 cend = (end + vol->cluster_size - 1) >> vol->cluster_size_bits;
@@ -760,7 +760,7 @@ map_buffer_cached:
760 } 760 }
761 continue; 761 continue;
762 } 762 }
763 is_retry = FALSE; 763 is_retry = false;
764 if (!rl) { 764 if (!rl) {
765 down_read(&ni->runlist.lock); 765 down_read(&ni->runlist.lock);
766retry_remap: 766retry_remap:
@@ -776,7 +776,7 @@ retry_remap:
776 * Successful remap, setup the map cache and 776 * Successful remap, setup the map cache and
777 * use that to deal with the buffer. 777 * use that to deal with the buffer.
778 */ 778 */
779 was_hole = FALSE; 779 was_hole = false;
780 vcn = bh_cpos; 780 vcn = bh_cpos;
781 vcn_len = rl[1].vcn - vcn; 781 vcn_len = rl[1].vcn - vcn;
782 lcn_block = lcn << (vol->cluster_size_bits - 782 lcn_block = lcn << (vol->cluster_size_bits -
@@ -792,7 +792,7 @@ retry_remap:
792 if (likely(vcn + vcn_len >= cend)) { 792 if (likely(vcn + vcn_len >= cend)) {
793 if (rl_write_locked) { 793 if (rl_write_locked) {
794 up_write(&ni->runlist.lock); 794 up_write(&ni->runlist.lock);
795 rl_write_locked = FALSE; 795 rl_write_locked = false;
796 } else 796 } else
797 up_read(&ni->runlist.lock); 797 up_read(&ni->runlist.lock);
798 rl = NULL; 798 rl = NULL;
@@ -818,13 +818,13 @@ retry_remap:
818 */ 818 */
819 up_read(&ni->runlist.lock); 819 up_read(&ni->runlist.lock);
820 down_write(&ni->runlist.lock); 820 down_write(&ni->runlist.lock);
821 rl_write_locked = TRUE; 821 rl_write_locked = true;
822 goto retry_remap; 822 goto retry_remap;
823 } 823 }
824 err = ntfs_map_runlist_nolock(ni, bh_cpos, 824 err = ntfs_map_runlist_nolock(ni, bh_cpos,
825 NULL); 825 NULL);
826 if (likely(!err)) { 826 if (likely(!err)) {
827 is_retry = TRUE; 827 is_retry = true;
828 goto retry_remap; 828 goto retry_remap;
829 } 829 }
830 /* 830 /*
@@ -903,7 +903,7 @@ rl_not_mapped_enoent:
903 if (!rl_write_locked) { 903 if (!rl_write_locked) {
904 up_read(&ni->runlist.lock); 904 up_read(&ni->runlist.lock);
905 down_write(&ni->runlist.lock); 905 down_write(&ni->runlist.lock);
906 rl_write_locked = TRUE; 906 rl_write_locked = true;
907 goto retry_remap; 907 goto retry_remap;
908 } 908 }
909 /* Find the previous last allocated cluster. */ 909 /* Find the previous last allocated cluster. */
@@ -917,7 +917,7 @@ rl_not_mapped_enoent:
917 } 917 }
918 } 918 }
919 rl2 = ntfs_cluster_alloc(vol, bh_cpos, 1, lcn, DATA_ZONE, 919 rl2 = ntfs_cluster_alloc(vol, bh_cpos, 1, lcn, DATA_ZONE,
920 FALSE); 920 false);
921 if (IS_ERR(rl2)) { 921 if (IS_ERR(rl2)) {
922 err = PTR_ERR(rl2); 922 err = PTR_ERR(rl2);
923 ntfs_debug("Failed to allocate cluster, error code %i.", 923 ntfs_debug("Failed to allocate cluster, error code %i.",
@@ -1093,7 +1093,7 @@ rl_not_mapped_enoent:
1093 status.mft_attr_mapped = 0; 1093 status.mft_attr_mapped = 0;
1094 status.mp_rebuilt = 0; 1094 status.mp_rebuilt = 0;
1095 /* Setup the map cache and use that to deal with the buffer. */ 1095 /* Setup the map cache and use that to deal with the buffer. */
1096 was_hole = TRUE; 1096 was_hole = true;
1097 vcn = bh_cpos; 1097 vcn = bh_cpos;
1098 vcn_len = 1; 1098 vcn_len = 1;
1099 lcn_block = lcn << (vol->cluster_size_bits - blocksize_bits); 1099 lcn_block = lcn << (vol->cluster_size_bits - blocksize_bits);
@@ -1105,7 +1105,7 @@ rl_not_mapped_enoent:
1105 */ 1105 */
1106 if (likely(vcn + vcn_len >= cend)) { 1106 if (likely(vcn + vcn_len >= cend)) {
1107 up_write(&ni->runlist.lock); 1107 up_write(&ni->runlist.lock);
1108 rl_write_locked = FALSE; 1108 rl_write_locked = false;
1109 rl = NULL; 1109 rl = NULL;
1110 } 1110 }
1111 goto map_buffer_cached; 1111 goto map_buffer_cached;
@@ -1117,7 +1117,7 @@ rl_not_mapped_enoent:
1117 if (likely(!err)) { 1117 if (likely(!err)) {
1118 if (unlikely(rl_write_locked)) { 1118 if (unlikely(rl_write_locked)) {
1119 up_write(&ni->runlist.lock); 1119 up_write(&ni->runlist.lock);
1120 rl_write_locked = FALSE; 1120 rl_write_locked = false;
1121 } else if (unlikely(rl)) 1121 } else if (unlikely(rl))
1122 up_read(&ni->runlist.lock); 1122 up_read(&ni->runlist.lock);
1123 rl = NULL; 1123 rl = NULL;
@@ -1528,19 +1528,19 @@ static inline int ntfs_commit_pages_after_non_resident_write(
1528 do { 1528 do {
1529 s64 bh_pos; 1529 s64 bh_pos;
1530 struct page *page; 1530 struct page *page;
1531 BOOL partial; 1531 bool partial;
1532 1532
1533 page = pages[u]; 1533 page = pages[u];
1534 bh_pos = (s64)page->index << PAGE_CACHE_SHIFT; 1534 bh_pos = (s64)page->index << PAGE_CACHE_SHIFT;
1535 bh = head = page_buffers(page); 1535 bh = head = page_buffers(page);
1536 partial = FALSE; 1536 partial = false;
1537 do { 1537 do {
1538 s64 bh_end; 1538 s64 bh_end;
1539 1539
1540 bh_end = bh_pos + blocksize; 1540 bh_end = bh_pos + blocksize;
1541 if (bh_end <= pos || bh_pos >= end) { 1541 if (bh_end <= pos || bh_pos >= end) {
1542 if (!buffer_uptodate(bh)) 1542 if (!buffer_uptodate(bh))
1543 partial = TRUE; 1543 partial = true;
1544 } else { 1544 } else {
1545 set_buffer_uptodate(bh); 1545 set_buffer_uptodate(bh);
1546 mark_buffer_dirty(bh); 1546 mark_buffer_dirty(bh);
@@ -1997,7 +1997,7 @@ static ssize_t ntfs_file_buffered_write(struct kiocb *iocb,
1997 */ 1997 */
1998 down_read(&ni->runlist.lock); 1998 down_read(&ni->runlist.lock);
1999 lcn = ntfs_attr_vcn_to_lcn_nolock(ni, pos >> 1999 lcn = ntfs_attr_vcn_to_lcn_nolock(ni, pos >>
2000 vol->cluster_size_bits, FALSE); 2000 vol->cluster_size_bits, false);
2001 up_read(&ni->runlist.lock); 2001 up_read(&ni->runlist.lock);
2002 if (unlikely(lcn < LCN_HOLE)) { 2002 if (unlikely(lcn < LCN_HOLE)) {
2003 status = -EIO; 2003 status = -EIO;
@@ -2176,20 +2176,18 @@ out:
2176/** 2176/**
2177 * ntfs_file_aio_write - 2177 * ntfs_file_aio_write -
2178 */ 2178 */
2179static ssize_t ntfs_file_aio_write(struct kiocb *iocb, const char __user *buf, 2179static ssize_t ntfs_file_aio_write(struct kiocb *iocb, const struct iovec *iov,
2180 size_t count, loff_t pos) 2180 unsigned long nr_segs, loff_t pos)
2181{ 2181{
2182 struct file *file = iocb->ki_filp; 2182 struct file *file = iocb->ki_filp;
2183 struct address_space *mapping = file->f_mapping; 2183 struct address_space *mapping = file->f_mapping;
2184 struct inode *inode = mapping->host; 2184 struct inode *inode = mapping->host;
2185 ssize_t ret; 2185 ssize_t ret;
2186 struct iovec local_iov = { .iov_base = (void __user *)buf,
2187 .iov_len = count };
2188 2186
2189 BUG_ON(iocb->ki_pos != pos); 2187 BUG_ON(iocb->ki_pos != pos);
2190 2188
2191 mutex_lock(&inode->i_mutex); 2189 mutex_lock(&inode->i_mutex);
2192 ret = ntfs_file_aio_write_nolock(iocb, &local_iov, 1, &iocb->ki_pos); 2190 ret = ntfs_file_aio_write_nolock(iocb, iov, nr_segs, &iocb->ki_pos);
2193 mutex_unlock(&inode->i_mutex); 2191 mutex_unlock(&inode->i_mutex);
2194 if (ret > 0 && ((file->f_flags & O_SYNC) || IS_SYNC(inode))) { 2192 if (ret > 0 && ((file->f_flags & O_SYNC) || IS_SYNC(inode))) {
2195 int err = sync_page_range(inode, mapping, pos, ret); 2193 int err = sync_page_range(inode, mapping, pos, ret);
@@ -2298,13 +2296,11 @@ static int ntfs_file_fsync(struct file *filp, struct dentry *dentry,
2298 2296
2299const struct file_operations ntfs_file_ops = { 2297const struct file_operations ntfs_file_ops = {
2300 .llseek = generic_file_llseek, /* Seek inside file. */ 2298 .llseek = generic_file_llseek, /* Seek inside file. */
2301 .read = generic_file_read, /* Read from file. */ 2299 .read = do_sync_read, /* Read from file. */
2302 .aio_read = generic_file_aio_read, /* Async read from file. */ 2300 .aio_read = generic_file_aio_read, /* Async read from file. */
2303 .readv = generic_file_readv, /* Read from file. */
2304#ifdef NTFS_RW 2301#ifdef NTFS_RW
2305 .write = ntfs_file_write, /* Write to file. */ 2302 .write = ntfs_file_write, /* Write to file. */
2306 .aio_write = ntfs_file_aio_write, /* Async write to file. */ 2303 .aio_write = ntfs_file_aio_write, /* Async write to file. */
2307 .writev = ntfs_file_writev, /* Write to file. */
2308 /*.release = ,*/ /* Last file is closed. See 2304 /*.release = ,*/ /* Last file is closed. See
2309 fs/ext2/file.c:: 2305 fs/ext2/file.c::
2310 ext2_release_file() for 2306 ext2_release_file() for