diff options
Diffstat (limited to 'mm/filemap.c')
-rw-r--r-- | mm/filemap.c | 81 |
1 files changed, 36 insertions, 45 deletions
diff --git a/mm/filemap.c b/mm/filemap.c index 1694623a6289..6f1be573a5e6 100644 --- a/mm/filemap.c +++ b/mm/filemap.c | |||
@@ -519,7 +519,7 @@ EXPORT_SYMBOL(filemap_write_and_wait); | |||
519 | * | 519 | * |
520 | * Write out and wait upon file offsets lstart->lend, inclusive. | 520 | * Write out and wait upon file offsets lstart->lend, inclusive. |
521 | * | 521 | * |
522 | * Note that `lend' is inclusive (describes the last byte to be written) so | 522 | * Note that @lend is inclusive (describes the last byte to be written) so |
523 | * that this function can be used to write to the very end-of-file (end = -1). | 523 | * that this function can be used to write to the very end-of-file (end = -1). |
524 | */ | 524 | */ |
525 | int filemap_write_and_wait_range(struct address_space *mapping, | 525 | int filemap_write_and_wait_range(struct address_space *mapping, |
@@ -1277,12 +1277,14 @@ EXPORT_SYMBOL(find_lock_entry); | |||
1277 | * | 1277 | * |
1278 | * PCG flags modify how the page is returned. | 1278 | * PCG flags modify how the page is returned. |
1279 | * | 1279 | * |
1280 | * FGP_ACCESSED: the page will be marked accessed | 1280 | * @fgp_flags can be: |
1281 | * FGP_LOCK: Page is return locked | 1281 | * |
1282 | * FGP_CREAT: If page is not present then a new page is allocated using | 1282 | * - FGP_ACCESSED: the page will be marked accessed |
1283 | * @gfp_mask and added to the page cache and the VM's LRU | 1283 | * - FGP_LOCK: Page is return locked |
1284 | * list. The page is returned locked and with an increased | 1284 | * - FGP_CREAT: If page is not present then a new page is allocated using |
1285 | * refcount. Otherwise, %NULL is returned. | 1285 | * @gfp_mask and added to the page cache and the VM's LRU |
1286 | * list. The page is returned locked and with an increased | ||
1287 | * refcount. Otherwise, NULL is returned. | ||
1286 | * | 1288 | * |
1287 | * If FGP_LOCK or FGP_CREAT are specified then the function may sleep even | 1289 | * If FGP_LOCK or FGP_CREAT are specified then the function may sleep even |
1288 | * if the GFP flags specified for FGP_CREAT are atomic. | 1290 | * if the GFP flags specified for FGP_CREAT are atomic. |
@@ -2033,7 +2035,6 @@ generic_file_read_iter(struct kiocb *iocb, struct iov_iter *iter) | |||
2033 | if (iocb->ki_flags & IOCB_DIRECT) { | 2035 | if (iocb->ki_flags & IOCB_DIRECT) { |
2034 | struct address_space *mapping = file->f_mapping; | 2036 | struct address_space *mapping = file->f_mapping; |
2035 | struct inode *inode = mapping->host; | 2037 | struct inode *inode = mapping->host; |
2036 | struct iov_iter data = *iter; | ||
2037 | loff_t size; | 2038 | loff_t size; |
2038 | 2039 | ||
2039 | size = i_size_read(inode); | 2040 | size = i_size_read(inode); |
@@ -2044,11 +2045,12 @@ generic_file_read_iter(struct kiocb *iocb, struct iov_iter *iter) | |||
2044 | 2045 | ||
2045 | file_accessed(file); | 2046 | file_accessed(file); |
2046 | 2047 | ||
2047 | retval = mapping->a_ops->direct_IO(iocb, &data); | 2048 | retval = mapping->a_ops->direct_IO(iocb, iter); |
2048 | if (retval >= 0) { | 2049 | if (retval >= 0) { |
2049 | iocb->ki_pos += retval; | 2050 | iocb->ki_pos += retval; |
2050 | iov_iter_advance(iter, retval); | 2051 | count -= retval; |
2051 | } | 2052 | } |
2053 | iov_iter_revert(iter, count - iov_iter_count(iter)); | ||
2052 | 2054 | ||
2053 | /* | 2055 | /* |
2054 | * Btrfs can have a short DIO read if we encounter | 2056 | * Btrfs can have a short DIO read if we encounter |
@@ -2059,7 +2061,7 @@ generic_file_read_iter(struct kiocb *iocb, struct iov_iter *iter) | |||
2059 | * the rest of the read. Buffered reads will not work for | 2061 | * the rest of the read. Buffered reads will not work for |
2060 | * DAX files, so don't bother trying. | 2062 | * DAX files, so don't bother trying. |
2061 | */ | 2063 | */ |
2062 | if (retval < 0 || !iov_iter_count(iter) || iocb->ki_pos >= size || | 2064 | if (retval < 0 || !count || iocb->ki_pos >= size || |
2063 | IS_DAX(inode)) | 2065 | IS_DAX(inode)) |
2064 | goto out; | 2066 | goto out; |
2065 | } | 2067 | } |
@@ -2202,12 +2204,12 @@ int filemap_fault(struct vm_fault *vmf) | |||
2202 | struct file_ra_state *ra = &file->f_ra; | 2204 | struct file_ra_state *ra = &file->f_ra; |
2203 | struct inode *inode = mapping->host; | 2205 | struct inode *inode = mapping->host; |
2204 | pgoff_t offset = vmf->pgoff; | 2206 | pgoff_t offset = vmf->pgoff; |
2207 | pgoff_t max_off; | ||
2205 | struct page *page; | 2208 | struct page *page; |
2206 | loff_t size; | ||
2207 | int ret = 0; | 2209 | int ret = 0; |
2208 | 2210 | ||
2209 | size = round_up(i_size_read(inode), PAGE_SIZE); | 2211 | max_off = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE); |
2210 | if (offset >= size >> PAGE_SHIFT) | 2212 | if (unlikely(offset >= max_off)) |
2211 | return VM_FAULT_SIGBUS; | 2213 | return VM_FAULT_SIGBUS; |
2212 | 2214 | ||
2213 | /* | 2215 | /* |
@@ -2256,8 +2258,8 @@ retry_find: | |||
2256 | * Found the page and have a reference on it. | 2258 | * Found the page and have a reference on it. |
2257 | * We must recheck i_size under page lock. | 2259 | * We must recheck i_size under page lock. |
2258 | */ | 2260 | */ |
2259 | size = round_up(i_size_read(inode), PAGE_SIZE); | 2261 | max_off = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE); |
2260 | if (unlikely(offset >= size >> PAGE_SHIFT)) { | 2262 | if (unlikely(offset >= max_off)) { |
2261 | unlock_page(page); | 2263 | unlock_page(page); |
2262 | put_page(page); | 2264 | put_page(page); |
2263 | return VM_FAULT_SIGBUS; | 2265 | return VM_FAULT_SIGBUS; |
@@ -2323,7 +2325,7 @@ void filemap_map_pages(struct vm_fault *vmf, | |||
2323 | struct file *file = vmf->vma->vm_file; | 2325 | struct file *file = vmf->vma->vm_file; |
2324 | struct address_space *mapping = file->f_mapping; | 2326 | struct address_space *mapping = file->f_mapping; |
2325 | pgoff_t last_pgoff = start_pgoff; | 2327 | pgoff_t last_pgoff = start_pgoff; |
2326 | loff_t size; | 2328 | unsigned long max_idx; |
2327 | struct page *head, *page; | 2329 | struct page *head, *page; |
2328 | 2330 | ||
2329 | rcu_read_lock(); | 2331 | rcu_read_lock(); |
@@ -2369,8 +2371,8 @@ repeat: | |||
2369 | if (page->mapping != mapping || !PageUptodate(page)) | 2371 | if (page->mapping != mapping || !PageUptodate(page)) |
2370 | goto unlock; | 2372 | goto unlock; |
2371 | 2373 | ||
2372 | size = round_up(i_size_read(mapping->host), PAGE_SIZE); | 2374 | max_idx = DIV_ROUND_UP(i_size_read(mapping->host), PAGE_SIZE); |
2373 | if (page->index >= size >> PAGE_SHIFT) | 2375 | if (page->index >= max_idx) |
2374 | goto unlock; | 2376 | goto unlock; |
2375 | 2377 | ||
2376 | if (file->f_ra.mmap_miss > 0) | 2378 | if (file->f_ra.mmap_miss > 0) |
@@ -2704,7 +2706,6 @@ generic_file_direct_write(struct kiocb *iocb, struct iov_iter *from) | |||
2704 | ssize_t written; | 2706 | ssize_t written; |
2705 | size_t write_len; | 2707 | size_t write_len; |
2706 | pgoff_t end; | 2708 | pgoff_t end; |
2707 | struct iov_iter data; | ||
2708 | 2709 | ||
2709 | write_len = iov_iter_count(from); | 2710 | write_len = iov_iter_count(from); |
2710 | end = (pos + write_len - 1) >> PAGE_SHIFT; | 2711 | end = (pos + write_len - 1) >> PAGE_SHIFT; |
@@ -2719,22 +2720,19 @@ generic_file_direct_write(struct kiocb *iocb, struct iov_iter *from) | |||
2719 | * about to write. We do this *before* the write so that we can return | 2720 | * about to write. We do this *before* the write so that we can return |
2720 | * without clobbering -EIOCBQUEUED from ->direct_IO(). | 2721 | * without clobbering -EIOCBQUEUED from ->direct_IO(). |
2721 | */ | 2722 | */ |
2722 | if (mapping->nrpages) { | 2723 | written = invalidate_inode_pages2_range(mapping, |
2723 | written = invalidate_inode_pages2_range(mapping, | ||
2724 | pos >> PAGE_SHIFT, end); | 2724 | pos >> PAGE_SHIFT, end); |
2725 | /* | 2725 | /* |
2726 | * If a page can not be invalidated, return 0 to fall back | 2726 | * If a page can not be invalidated, return 0 to fall back |
2727 | * to buffered write. | 2727 | * to buffered write. |
2728 | */ | 2728 | */ |
2729 | if (written) { | 2729 | if (written) { |
2730 | if (written == -EBUSY) | 2730 | if (written == -EBUSY) |
2731 | return 0; | 2731 | return 0; |
2732 | goto out; | 2732 | goto out; |
2733 | } | ||
2734 | } | 2733 | } |
2735 | 2734 | ||
2736 | data = *from; | 2735 | written = mapping->a_ops->direct_IO(iocb, from); |
2737 | written = mapping->a_ops->direct_IO(iocb, &data); | ||
2738 | 2736 | ||
2739 | /* | 2737 | /* |
2740 | * Finally, try again to invalidate clean pages which might have been | 2738 | * Finally, try again to invalidate clean pages which might have been |
@@ -2744,20 +2742,19 @@ generic_file_direct_write(struct kiocb *iocb, struct iov_iter *from) | |||
2744 | * so we don't support it 100%. If this invalidation | 2742 | * so we don't support it 100%. If this invalidation |
2745 | * fails, tough, the write still worked... | 2743 | * fails, tough, the write still worked... |
2746 | */ | 2744 | */ |
2747 | if (mapping->nrpages) { | 2745 | invalidate_inode_pages2_range(mapping, |
2748 | invalidate_inode_pages2_range(mapping, | 2746 | pos >> PAGE_SHIFT, end); |
2749 | pos >> PAGE_SHIFT, end); | ||
2750 | } | ||
2751 | 2747 | ||
2752 | if (written > 0) { | 2748 | if (written > 0) { |
2753 | pos += written; | 2749 | pos += written; |
2754 | iov_iter_advance(from, written); | 2750 | write_len -= written; |
2755 | if (pos > i_size_read(inode) && !S_ISBLK(inode->i_mode)) { | 2751 | if (pos > i_size_read(inode) && !S_ISBLK(inode->i_mode)) { |
2756 | i_size_write(inode, pos); | 2752 | i_size_write(inode, pos); |
2757 | mark_inode_dirty(inode); | 2753 | mark_inode_dirty(inode); |
2758 | } | 2754 | } |
2759 | iocb->ki_pos = pos; | 2755 | iocb->ki_pos = pos; |
2760 | } | 2756 | } |
2757 | iov_iter_revert(from, write_len - iov_iter_count(from)); | ||
2761 | out: | 2758 | out: |
2762 | return written; | 2759 | return written; |
2763 | } | 2760 | } |
@@ -2794,12 +2791,6 @@ ssize_t generic_perform_write(struct file *file, | |||
2794 | ssize_t written = 0; | 2791 | ssize_t written = 0; |
2795 | unsigned int flags = 0; | 2792 | unsigned int flags = 0; |
2796 | 2793 | ||
2797 | /* | ||
2798 | * Copies from kernel address space cannot fail (NFSD is a big user). | ||
2799 | */ | ||
2800 | if (!iter_is_iovec(i)) | ||
2801 | flags |= AOP_FLAG_UNINTERRUPTIBLE; | ||
2802 | |||
2803 | do { | 2794 | do { |
2804 | struct page *page; | 2795 | struct page *page; |
2805 | unsigned long offset; /* Offset into pagecache page */ | 2796 | unsigned long offset; /* Offset into pagecache page */ |
@@ -3001,7 +2992,7 @@ EXPORT_SYMBOL(generic_file_write_iter); | |||
3001 | * @gfp_mask: memory allocation flags (and I/O mode) | 2992 | * @gfp_mask: memory allocation flags (and I/O mode) |
3002 | * | 2993 | * |
3003 | * The address_space is to try to release any data against the page | 2994 | * The address_space is to try to release any data against the page |
3004 | * (presumably at page->private). If the release was successful, return `1'. | 2995 | * (presumably at page->private). If the release was successful, return '1'. |
3005 | * Otherwise return zero. | 2996 | * Otherwise return zero. |
3006 | * | 2997 | * |
3007 | * This may also be called if PG_fscache is set on a page, indicating that the | 2998 | * This may also be called if PG_fscache is set on a page, indicating that the |