diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2019-03-06 13:31:36 -0500 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2019-03-06 13:31:36 -0500 |
commit | 8dcd175bc3d50b78413c56d5b17d4bddd77412ef (patch) | |
tree | 2c2fb25759b43f2e73830f07ef3b444d76825280 /mm/filemap.c | |
parent | afe6fe7036c6efdcb46cabc64bec9b6e4a005210 (diff) | |
parent | fff04900ea79915939ef6a3aad78fca6511a3034 (diff) |
Merge branch 'akpm' (patches from Andrew)
Merge misc updates from Andrew Morton:
- a few misc things
- ocfs2 updates
- most of MM
* emailed patches from Andrew Morton <akpm@linux-foundation.org>: (159 commits)
tools/testing/selftests/proc/proc-self-syscall.c: remove duplicate include
proc: more robust bulk read test
proc: test /proc/*/maps, smaps, smaps_rollup, statm
proc: use seq_puts() everywhere
proc: read kernel cpu stat pointer once
proc: remove unused argument in proc_pid_lookup()
fs/proc/thread_self.c: code cleanup for proc_setup_thread_self()
fs/proc/self.c: code cleanup for proc_setup_self()
proc: return exit code 4 for skipped tests
mm,mremap: bail out earlier in mremap_to under map pressure
mm/sparse: fix a bad comparison
mm/memory.c: do_fault: avoid usage of stale vm_area_struct
writeback: fix inode cgroup switching comment
mm/huge_memory.c: fix "orig_pud" set but not used
mm/hotplug: fix an imbalance with DEBUG_PAGEALLOC
mm/memcontrol.c: fix bad line in comment
mm/cma.c: cma_declare_contiguous: correct err handling
mm/page_ext.c: fix an imbalance with kmemleak
mm/compaction: pass pgdat to too_many_isolated() instead of zone
mm: remove zone_lru_lock() function, access ->lru_lock directly
...
Diffstat (limited to 'mm/filemap.c')
-rw-r--r-- | mm/filemap.c | 93 |
1 files changed, 67 insertions, 26 deletions
diff --git a/mm/filemap.c b/mm/filemap.c index 9f5e323e883e..a3b4021c448f 100644 --- a/mm/filemap.c +++ b/mm/filemap.c | |||
@@ -98,8 +98,8 @@ | |||
98 | * ->swap_lock (try_to_unmap_one) | 98 | * ->swap_lock (try_to_unmap_one) |
99 | * ->private_lock (try_to_unmap_one) | 99 | * ->private_lock (try_to_unmap_one) |
100 | * ->i_pages lock (try_to_unmap_one) | 100 | * ->i_pages lock (try_to_unmap_one) |
101 | * ->zone_lru_lock(zone) (follow_page->mark_page_accessed) | 101 | * ->pgdat->lru_lock (follow_page->mark_page_accessed) |
102 | * ->zone_lru_lock(zone) (check_pte_range->isolate_lru_page) | 102 | * ->pgdat->lru_lock (check_pte_range->isolate_lru_page) |
103 | * ->private_lock (page_remove_rmap->set_page_dirty) | 103 | * ->private_lock (page_remove_rmap->set_page_dirty) |
104 | * ->i_pages lock (page_remove_rmap->set_page_dirty) | 104 | * ->i_pages lock (page_remove_rmap->set_page_dirty) |
105 | * bdi.wb->list_lock (page_remove_rmap->set_page_dirty) | 105 | * bdi.wb->list_lock (page_remove_rmap->set_page_dirty) |
@@ -392,6 +392,8 @@ static int filemap_check_and_keep_errors(struct address_space *mapping) | |||
392 | * opposed to a regular memory cleansing writeback. The difference between | 392 | * opposed to a regular memory cleansing writeback. The difference between |
393 | * these two operations is that if a dirty page/buffer is encountered, it must | 393 | * these two operations is that if a dirty page/buffer is encountered, it must |
394 | * be waited upon, and not just skipped over. | 394 | * be waited upon, and not just skipped over. |
395 | * | ||
396 | * Return: %0 on success, negative error code otherwise. | ||
395 | */ | 397 | */ |
396 | int __filemap_fdatawrite_range(struct address_space *mapping, loff_t start, | 398 | int __filemap_fdatawrite_range(struct address_space *mapping, loff_t start, |
397 | loff_t end, int sync_mode) | 399 | loff_t end, int sync_mode) |
@@ -438,6 +440,8 @@ EXPORT_SYMBOL(filemap_fdatawrite_range); | |||
438 | * | 440 | * |
439 | * This is a mostly non-blocking flush. Not suitable for data-integrity | 441 | * This is a mostly non-blocking flush. Not suitable for data-integrity |
440 | * purposes - I/O may not be started against all dirty pages. | 442 | * purposes - I/O may not be started against all dirty pages. |
443 | * | ||
444 | * Return: %0 on success, negative error code otherwise. | ||
441 | */ | 445 | */ |
442 | int filemap_flush(struct address_space *mapping) | 446 | int filemap_flush(struct address_space *mapping) |
443 | { | 447 | { |
@@ -453,6 +457,9 @@ EXPORT_SYMBOL(filemap_flush); | |||
453 | * | 457 | * |
454 | * Find at least one page in the range supplied, usually used to check if | 458 | * Find at least one page in the range supplied, usually used to check if |
455 | * direct writing in this range will trigger a writeback. | 459 | * direct writing in this range will trigger a writeback. |
460 | * | ||
461 | * Return: %true if at least one page exists in the specified range, | ||
462 | * %false otherwise. | ||
456 | */ | 463 | */ |
457 | bool filemap_range_has_page(struct address_space *mapping, | 464 | bool filemap_range_has_page(struct address_space *mapping, |
458 | loff_t start_byte, loff_t end_byte) | 465 | loff_t start_byte, loff_t end_byte) |
@@ -529,6 +536,8 @@ static void __filemap_fdatawait_range(struct address_space *mapping, | |||
529 | * Since the error status of the address space is cleared by this function, | 536 | * Since the error status of the address space is cleared by this function, |
530 | * callers are responsible for checking the return value and handling and/or | 537 | * callers are responsible for checking the return value and handling and/or |
531 | * reporting the error. | 538 | * reporting the error. |
539 | * | ||
540 | * Return: error status of the address space. | ||
532 | */ | 541 | */ |
533 | int filemap_fdatawait_range(struct address_space *mapping, loff_t start_byte, | 542 | int filemap_fdatawait_range(struct address_space *mapping, loff_t start_byte, |
534 | loff_t end_byte) | 543 | loff_t end_byte) |
@@ -551,6 +560,8 @@ EXPORT_SYMBOL(filemap_fdatawait_range); | |||
551 | * Since the error status of the file is advanced by this function, | 560 | * Since the error status of the file is advanced by this function, |
552 | * callers are responsible for checking the return value and handling and/or | 561 | * callers are responsible for checking the return value and handling and/or |
553 | * reporting the error. | 562 | * reporting the error. |
563 | * | ||
564 | * Return: error status of the address space vs. the file->f_wb_err cursor. | ||
554 | */ | 565 | */ |
555 | int file_fdatawait_range(struct file *file, loff_t start_byte, loff_t end_byte) | 566 | int file_fdatawait_range(struct file *file, loff_t start_byte, loff_t end_byte) |
556 | { | 567 | { |
@@ -572,6 +583,8 @@ EXPORT_SYMBOL(file_fdatawait_range); | |||
572 | * Use this function if callers don't handle errors themselves. Expected | 583 | * Use this function if callers don't handle errors themselves. Expected |
573 | * call sites are system-wide / filesystem-wide data flushers: e.g. sync(2), | 584 | * call sites are system-wide / filesystem-wide data flushers: e.g. sync(2), |
574 | * fsfreeze(8) | 585 | * fsfreeze(8) |
586 | * | ||
587 | * Return: error status of the address space. | ||
575 | */ | 588 | */ |
576 | int filemap_fdatawait_keep_errors(struct address_space *mapping) | 589 | int filemap_fdatawait_keep_errors(struct address_space *mapping) |
577 | { | 590 | { |
@@ -623,6 +636,8 @@ EXPORT_SYMBOL(filemap_write_and_wait); | |||
623 | * | 636 | * |
624 | * Note that @lend is inclusive (describes the last byte to be written) so | 637 | * Note that @lend is inclusive (describes the last byte to be written) so |
625 | * that this function can be used to write to the very end-of-file (end = -1). | 638 | * that this function can be used to write to the very end-of-file (end = -1). |
639 | * | ||
640 | * Return: error status of the address space. | ||
626 | */ | 641 | */ |
627 | int filemap_write_and_wait_range(struct address_space *mapping, | 642 | int filemap_write_and_wait_range(struct address_space *mapping, |
628 | loff_t lstart, loff_t lend) | 643 | loff_t lstart, loff_t lend) |
@@ -678,6 +693,8 @@ EXPORT_SYMBOL(__filemap_set_wb_err); | |||
678 | * While we handle mapping->wb_err with atomic operations, the f_wb_err | 693 | * While we handle mapping->wb_err with atomic operations, the f_wb_err |
679 | * value is protected by the f_lock since we must ensure that it reflects | 694 | * value is protected by the f_lock since we must ensure that it reflects |
680 | * the latest value swapped in for this file descriptor. | 695 | * the latest value swapped in for this file descriptor. |
696 | * | ||
697 | * Return: %0 on success, negative error code otherwise. | ||
681 | */ | 698 | */ |
682 | int file_check_and_advance_wb_err(struct file *file) | 699 | int file_check_and_advance_wb_err(struct file *file) |
683 | { | 700 | { |
@@ -720,6 +737,8 @@ EXPORT_SYMBOL(file_check_and_advance_wb_err); | |||
720 | * | 737 | * |
721 | * After writing out and waiting on the data, we check and advance the | 738 | * After writing out and waiting on the data, we check and advance the |
722 | * f_wb_err cursor to the latest value, and return any errors detected there. | 739 | * f_wb_err cursor to the latest value, and return any errors detected there. |
740 | * | ||
741 | * Return: %0 on success, negative error code otherwise. | ||
723 | */ | 742 | */ |
724 | int file_write_and_wait_range(struct file *file, loff_t lstart, loff_t lend) | 743 | int file_write_and_wait_range(struct file *file, loff_t lstart, loff_t lend) |
725 | { | 744 | { |
@@ -753,6 +772,8 @@ EXPORT_SYMBOL(file_write_and_wait_range); | |||
753 | * caller must do that. | 772 | * caller must do that. |
754 | * | 773 | * |
755 | * The remove + add is atomic. This function cannot fail. | 774 | * The remove + add is atomic. This function cannot fail. |
775 | * | ||
776 | * Return: %0 | ||
756 | */ | 777 | */ |
757 | int replace_page_cache_page(struct page *old, struct page *new, gfp_t gfp_mask) | 778 | int replace_page_cache_page(struct page *old, struct page *new, gfp_t gfp_mask) |
758 | { | 779 | { |
@@ -867,6 +888,8 @@ error: | |||
867 | * | 888 | * |
868 | * This function is used to add a page to the pagecache. It must be locked. | 889 | * This function is used to add a page to the pagecache. It must be locked. |
869 | * This function does not add the page to the LRU. The caller must do that. | 890 | * This function does not add the page to the LRU. The caller must do that. |
891 | * | ||
892 | * Return: %0 on success, negative error code otherwise. | ||
870 | */ | 893 | */ |
871 | int add_to_page_cache_locked(struct page *page, struct address_space *mapping, | 894 | int add_to_page_cache_locked(struct page *page, struct address_space *mapping, |
872 | pgoff_t offset, gfp_t gfp_mask) | 895 | pgoff_t offset, gfp_t gfp_mask) |
@@ -1463,7 +1486,7 @@ EXPORT_SYMBOL(page_cache_prev_miss); | |||
1463 | * If the slot holds a shadow entry of a previously evicted page, or a | 1486 | * If the slot holds a shadow entry of a previously evicted page, or a |
1464 | * swap entry from shmem/tmpfs, it is returned. | 1487 | * swap entry from shmem/tmpfs, it is returned. |
1465 | * | 1488 | * |
1466 | * Otherwise, %NULL is returned. | 1489 | * Return: the found page or shadow entry, %NULL if nothing is found. |
1467 | */ | 1490 | */ |
1468 | struct page *find_get_entry(struct address_space *mapping, pgoff_t offset) | 1491 | struct page *find_get_entry(struct address_space *mapping, pgoff_t offset) |
1469 | { | 1492 | { |
@@ -1521,9 +1544,9 @@ EXPORT_SYMBOL(find_get_entry); | |||
1521 | * If the slot holds a shadow entry of a previously evicted page, or a | 1544 | * If the slot holds a shadow entry of a previously evicted page, or a |
1522 | * swap entry from shmem/tmpfs, it is returned. | 1545 | * swap entry from shmem/tmpfs, it is returned. |
1523 | * | 1546 | * |
1524 | * Otherwise, %NULL is returned. | ||
1525 | * | ||
1526 | * find_lock_entry() may sleep. | 1547 | * find_lock_entry() may sleep. |
1548 | * | ||
1549 | * Return: the found page or shadow entry, %NULL if nothing is found. | ||
1527 | */ | 1550 | */ |
1528 | struct page *find_lock_entry(struct address_space *mapping, pgoff_t offset) | 1551 | struct page *find_lock_entry(struct address_space *mapping, pgoff_t offset) |
1529 | { | 1552 | { |
@@ -1563,12 +1586,14 @@ EXPORT_SYMBOL(find_lock_entry); | |||
1563 | * - FGP_CREAT: If page is not present then a new page is allocated using | 1586 | * - FGP_CREAT: If page is not present then a new page is allocated using |
1564 | * @gfp_mask and added to the page cache and the VM's LRU | 1587 | * @gfp_mask and added to the page cache and the VM's LRU |
1565 | * list. The page is returned locked and with an increased | 1588 | * list. The page is returned locked and with an increased |
1566 | * refcount. Otherwise, NULL is returned. | 1589 | * refcount. |
1567 | * | 1590 | * |
1568 | * If FGP_LOCK or FGP_CREAT are specified then the function may sleep even | 1591 | * If FGP_LOCK or FGP_CREAT are specified then the function may sleep even |
1569 | * if the GFP flags specified for FGP_CREAT are atomic. | 1592 | * if the GFP flags specified for FGP_CREAT are atomic. |
1570 | * | 1593 | * |
1571 | * If there is a page cache page, it is returned with an increased refcount. | 1594 | * If there is a page cache page, it is returned with an increased refcount. |
1595 | * | ||
1596 | * Return: the found page or %NULL otherwise. | ||
1572 | */ | 1597 | */ |
1573 | struct page *pagecache_get_page(struct address_space *mapping, pgoff_t offset, | 1598 | struct page *pagecache_get_page(struct address_space *mapping, pgoff_t offset, |
1574 | int fgp_flags, gfp_t gfp_mask) | 1599 | int fgp_flags, gfp_t gfp_mask) |
@@ -1656,8 +1681,7 @@ EXPORT_SYMBOL(pagecache_get_page); | |||
1656 | * Any shadow entries of evicted pages, or swap entries from | 1681 | * Any shadow entries of evicted pages, or swap entries from |
1657 | * shmem/tmpfs, are included in the returned array. | 1682 | * shmem/tmpfs, are included in the returned array. |
1658 | * | 1683 | * |
1659 | * find_get_entries() returns the number of pages and shadow entries | 1684 | * Return: the number of pages and shadow entries which were found. |
1660 | * which were found. | ||
1661 | */ | 1685 | */ |
1662 | unsigned find_get_entries(struct address_space *mapping, | 1686 | unsigned find_get_entries(struct address_space *mapping, |
1663 | pgoff_t start, unsigned int nr_entries, | 1687 | pgoff_t start, unsigned int nr_entries, |
@@ -1727,8 +1751,8 @@ retry: | |||
1727 | * indexes. There may be holes in the indices due to not-present pages. | 1751 | * indexes. There may be holes in the indices due to not-present pages. |
1728 | * We also update @start to index the next page for the traversal. | 1752 | * We also update @start to index the next page for the traversal. |
1729 | * | 1753 | * |
1730 | * find_get_pages_range() returns the number of pages which were found. If this | 1754 | * Return: the number of pages which were found. If this number is |
1731 | * number is smaller than @nr_pages, the end of specified range has been | 1755 | * smaller than @nr_pages, the end of specified range has been |
1732 | * reached. | 1756 | * reached. |
1733 | */ | 1757 | */ |
1734 | unsigned find_get_pages_range(struct address_space *mapping, pgoff_t *start, | 1758 | unsigned find_get_pages_range(struct address_space *mapping, pgoff_t *start, |
@@ -1765,7 +1789,7 @@ unsigned find_get_pages_range(struct address_space *mapping, pgoff_t *start, | |||
1765 | 1789 | ||
1766 | pages[ret] = page; | 1790 | pages[ret] = page; |
1767 | if (++ret == nr_pages) { | 1791 | if (++ret == nr_pages) { |
1768 | *start = page->index + 1; | 1792 | *start = xas.xa_index + 1; |
1769 | goto out; | 1793 | goto out; |
1770 | } | 1794 | } |
1771 | continue; | 1795 | continue; |
@@ -1801,7 +1825,7 @@ out: | |||
1801 | * find_get_pages_contig() works exactly like find_get_pages(), except | 1825 | * find_get_pages_contig() works exactly like find_get_pages(), except |
1802 | * that the returned number of pages are guaranteed to be contiguous. | 1826 | * that the returned number of pages are guaranteed to be contiguous. |
1803 | * | 1827 | * |
1804 | * find_get_pages_contig() returns the number of pages which were found. | 1828 | * Return: the number of pages which were found. |
1805 | */ | 1829 | */ |
1806 | unsigned find_get_pages_contig(struct address_space *mapping, pgoff_t index, | 1830 | unsigned find_get_pages_contig(struct address_space *mapping, pgoff_t index, |
1807 | unsigned int nr_pages, struct page **pages) | 1831 | unsigned int nr_pages, struct page **pages) |
@@ -1837,16 +1861,6 @@ unsigned find_get_pages_contig(struct address_space *mapping, pgoff_t index, | |||
1837 | if (unlikely(page != xas_reload(&xas))) | 1861 | if (unlikely(page != xas_reload(&xas))) |
1838 | goto put_page; | 1862 | goto put_page; |
1839 | 1863 | ||
1840 | /* | ||
1841 | * must check mapping and index after taking the ref. | ||
1842 | * otherwise we can get both false positives and false | ||
1843 | * negatives, which is just confusing to the caller. | ||
1844 | */ | ||
1845 | if (!page->mapping || page_to_pgoff(page) != xas.xa_index) { | ||
1846 | put_page(page); | ||
1847 | break; | ||
1848 | } | ||
1849 | |||
1850 | pages[ret] = page; | 1864 | pages[ret] = page; |
1851 | if (++ret == nr_pages) | 1865 | if (++ret == nr_pages) |
1852 | break; | 1866 | break; |
@@ -1872,6 +1886,8 @@ EXPORT_SYMBOL(find_get_pages_contig); | |||
1872 | * | 1886 | * |
1873 | * Like find_get_pages, except we only return pages which are tagged with | 1887 | * Like find_get_pages, except we only return pages which are tagged with |
1874 | * @tag. We update @index to index the next page for the traversal. | 1888 | * @tag. We update @index to index the next page for the traversal. |
1889 | * | ||
1890 | * Return: the number of pages which were found. | ||
1875 | */ | 1891 | */ |
1876 | unsigned find_get_pages_range_tag(struct address_space *mapping, pgoff_t *index, | 1892 | unsigned find_get_pages_range_tag(struct address_space *mapping, pgoff_t *index, |
1877 | pgoff_t end, xa_mark_t tag, unsigned int nr_pages, | 1893 | pgoff_t end, xa_mark_t tag, unsigned int nr_pages, |
@@ -1911,7 +1927,7 @@ unsigned find_get_pages_range_tag(struct address_space *mapping, pgoff_t *index, | |||
1911 | 1927 | ||
1912 | pages[ret] = page; | 1928 | pages[ret] = page; |
1913 | if (++ret == nr_pages) { | 1929 | if (++ret == nr_pages) { |
1914 | *index = page->index + 1; | 1930 | *index = xas.xa_index + 1; |
1915 | goto out; | 1931 | goto out; |
1916 | } | 1932 | } |
1917 | continue; | 1933 | continue; |
@@ -1949,6 +1965,8 @@ EXPORT_SYMBOL(find_get_pages_range_tag); | |||
1949 | * | 1965 | * |
1950 | * Like find_get_entries, except we only return entries which are tagged with | 1966 | * Like find_get_entries, except we only return entries which are tagged with |
1951 | * @tag. | 1967 | * @tag. |
1968 | * | ||
1969 | * Return: the number of entries which were found. | ||
1952 | */ | 1970 | */ |
1953 | unsigned find_get_entries_tag(struct address_space *mapping, pgoff_t start, | 1971 | unsigned find_get_entries_tag(struct address_space *mapping, pgoff_t start, |
1954 | xa_mark_t tag, unsigned int nr_entries, | 1972 | xa_mark_t tag, unsigned int nr_entries, |
@@ -2034,6 +2052,10 @@ static void shrink_readahead_size_eio(struct file *filp, | |||
2034 | * | 2052 | * |
2035 | * This is really ugly. But the goto's actually try to clarify some | 2053 | * This is really ugly. But the goto's actually try to clarify some |
2036 | * of the logic when it comes to error handling etc. | 2054 | * of the logic when it comes to error handling etc. |
2055 | * | ||
2056 | * Return: | ||
2057 | * * total number of bytes copied, including those the were already @written | ||
2058 | * * negative error code if nothing was copied | ||
2037 | */ | 2059 | */ |
2038 | static ssize_t generic_file_buffered_read(struct kiocb *iocb, | 2060 | static ssize_t generic_file_buffered_read(struct kiocb *iocb, |
2039 | struct iov_iter *iter, ssize_t written) | 2061 | struct iov_iter *iter, ssize_t written) |
@@ -2295,6 +2317,9 @@ out: | |||
2295 | * | 2317 | * |
2296 | * This is the "read_iter()" routine for all filesystems | 2318 | * This is the "read_iter()" routine for all filesystems |
2297 | * that can use the page cache directly. | 2319 | * that can use the page cache directly. |
2320 | * Return: | ||
2321 | * * number of bytes copied, even for partial reads | ||
2322 | * * negative error code if nothing was read | ||
2298 | */ | 2323 | */ |
2299 | ssize_t | 2324 | ssize_t |
2300 | generic_file_read_iter(struct kiocb *iocb, struct iov_iter *iter) | 2325 | generic_file_read_iter(struct kiocb *iocb, struct iov_iter *iter) |
@@ -2362,6 +2387,8 @@ EXPORT_SYMBOL(generic_file_read_iter); | |||
2362 | * | 2387 | * |
2363 | * This adds the requested page to the page cache if it isn't already there, | 2388 | * This adds the requested page to the page cache if it isn't already there, |
2364 | * and schedules an I/O to read in its contents from disk. | 2389 | * and schedules an I/O to read in its contents from disk. |
2390 | * | ||
2391 | * Return: %0 on success, negative error code otherwise. | ||
2365 | */ | 2392 | */ |
2366 | static int page_cache_read(struct file *file, pgoff_t offset, gfp_t gfp_mask) | 2393 | static int page_cache_read(struct file *file, pgoff_t offset, gfp_t gfp_mask) |
2367 | { | 2394 | { |
@@ -2476,6 +2503,8 @@ static void do_async_mmap_readahead(struct vm_area_struct *vma, | |||
2476 | * has not been released. | 2503 | * has not been released. |
2477 | * | 2504 | * |
2478 | * We never return with VM_FAULT_RETRY and a bit from VM_FAULT_ERROR set. | 2505 | * We never return with VM_FAULT_RETRY and a bit from VM_FAULT_ERROR set. |
2506 | * | ||
2507 | * Return: bitwise-OR of %VM_FAULT_ codes. | ||
2479 | */ | 2508 | */ |
2480 | vm_fault_t filemap_fault(struct vm_fault *vmf) | 2509 | vm_fault_t filemap_fault(struct vm_fault *vmf) |
2481 | { | 2510 | { |
@@ -2861,6 +2890,8 @@ out: | |||
2861 | * not set, try to fill the page and wait for it to become unlocked. | 2890 | * not set, try to fill the page and wait for it to become unlocked. |
2862 | * | 2891 | * |
2863 | * If the page does not get brought uptodate, return -EIO. | 2892 | * If the page does not get brought uptodate, return -EIO. |
2893 | * | ||
2894 | * Return: up to date page on success, ERR_PTR() on failure. | ||
2864 | */ | 2895 | */ |
2865 | struct page *read_cache_page(struct address_space *mapping, | 2896 | struct page *read_cache_page(struct address_space *mapping, |
2866 | pgoff_t index, | 2897 | pgoff_t index, |
@@ -2881,6 +2912,8 @@ EXPORT_SYMBOL(read_cache_page); | |||
2881 | * any new page allocations done using the specified allocation flags. | 2912 | * any new page allocations done using the specified allocation flags. |
2882 | * | 2913 | * |
2883 | * If the page does not get brought uptodate, return -EIO. | 2914 | * If the page does not get brought uptodate, return -EIO. |
2915 | * | ||
2916 | * Return: up to date page on success, ERR_PTR() on failure. | ||
2884 | */ | 2917 | */ |
2885 | struct page *read_cache_page_gfp(struct address_space *mapping, | 2918 | struct page *read_cache_page_gfp(struct address_space *mapping, |
2886 | pgoff_t index, | 2919 | pgoff_t index, |
@@ -3081,7 +3114,7 @@ generic_file_direct_write(struct kiocb *iocb, struct iov_iter *from) | |||
3081 | if (iocb->ki_flags & IOCB_NOWAIT) { | 3114 | if (iocb->ki_flags & IOCB_NOWAIT) { |
3082 | /* If there are pages to writeback, return */ | 3115 | /* If there are pages to writeback, return */ |
3083 | if (filemap_range_has_page(inode->i_mapping, pos, | 3116 | if (filemap_range_has_page(inode->i_mapping, pos, |
3084 | pos + write_len)) | 3117 | pos + write_len - 1)) |
3085 | return -EAGAIN; | 3118 | return -EAGAIN; |
3086 | } else { | 3119 | } else { |
3087 | written = filemap_write_and_wait_range(mapping, pos, | 3120 | written = filemap_write_and_wait_range(mapping, pos, |
@@ -3264,6 +3297,10 @@ EXPORT_SYMBOL(generic_perform_write); | |||
3264 | * This function does *not* take care of syncing data in case of O_SYNC write. | 3297 | * This function does *not* take care of syncing data in case of O_SYNC write. |
3265 | * A caller has to handle it. This is mainly due to the fact that we want to | 3298 | * A caller has to handle it. This is mainly due to the fact that we want to |
3266 | * avoid syncing under i_mutex. | 3299 | * avoid syncing under i_mutex. |
3300 | * | ||
3301 | * Return: | ||
3302 | * * number of bytes written, even for truncated writes | ||
3303 | * * negative error code if no data has been written at all | ||
3267 | */ | 3304 | */ |
3268 | ssize_t __generic_file_write_iter(struct kiocb *iocb, struct iov_iter *from) | 3305 | ssize_t __generic_file_write_iter(struct kiocb *iocb, struct iov_iter *from) |
3269 | { | 3306 | { |
@@ -3348,6 +3385,10 @@ EXPORT_SYMBOL(__generic_file_write_iter); | |||
3348 | * This is a wrapper around __generic_file_write_iter() to be used by most | 3385 | * This is a wrapper around __generic_file_write_iter() to be used by most |
3349 | * filesystems. It takes care of syncing the file in case of O_SYNC file | 3386 | * filesystems. It takes care of syncing the file in case of O_SYNC file |
3350 | * and acquires i_mutex as needed. | 3387 | * and acquires i_mutex as needed. |
3388 | * Return: | ||
3389 | * * negative error code if no data has been written at all of | ||
3390 | * vfs_fsync_range() failed for a synchronous write | ||
3391 | * * number of bytes written, even for truncated writes | ||
3351 | */ | 3392 | */ |
3352 | ssize_t generic_file_write_iter(struct kiocb *iocb, struct iov_iter *from) | 3393 | ssize_t generic_file_write_iter(struct kiocb *iocb, struct iov_iter *from) |
3353 | { | 3394 | { |
@@ -3374,8 +3415,7 @@ EXPORT_SYMBOL(generic_file_write_iter); | |||
3374 | * @gfp_mask: memory allocation flags (and I/O mode) | 3415 | * @gfp_mask: memory allocation flags (and I/O mode) |
3375 | * | 3416 | * |
3376 | * The address_space is to try to release any data against the page | 3417 | * The address_space is to try to release any data against the page |
3377 | * (presumably at page->private). If the release was successful, return '1'. | 3418 | * (presumably at page->private). |
3378 | * Otherwise return zero. | ||
3379 | * | 3419 | * |
3380 | * This may also be called if PG_fscache is set on a page, indicating that the | 3420 | * This may also be called if PG_fscache is set on a page, indicating that the |
3381 | * page is known to the local caching routines. | 3421 | * page is known to the local caching routines. |
@@ -3383,6 +3423,7 @@ EXPORT_SYMBOL(generic_file_write_iter); | |||
3383 | * The @gfp_mask argument specifies whether I/O may be performed to release | 3423 | * The @gfp_mask argument specifies whether I/O may be performed to release |
3384 | * this page (__GFP_IO), and whether the call may block (__GFP_RECLAIM & __GFP_FS). | 3424 | * this page (__GFP_IO), and whether the call may block (__GFP_RECLAIM & __GFP_FS). |
3385 | * | 3425 | * |
3426 | * Return: %1 if the release was successful, otherwise return zero. | ||
3386 | */ | 3427 | */ |
3387 | int try_to_release_page(struct page *page, gfp_t gfp_mask) | 3428 | int try_to_release_page(struct page *page, gfp_t gfp_mask) |
3388 | { | 3429 | { |