summaryrefslogtreecommitdiffstats
path: root/mm/filemap.c
diff options
context:
space:
mode:
authorMike Rapoport <rppt@linux.ibm.com>2019-03-05 18:48:42 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2019-03-06 00:07:20 -0500
commita862f68a8b360086f248cbc3606029441b5f5197 (patch)
treec38cb46b547d865da3df438e3db0284003bdd5b1 /mm/filemap.c
parentbc8ff3ca6589d63c6d10f5ee8bed38f74851b469 (diff)
docs/core-api/mm: fix return value descriptions in mm/
Many kernel-doc comments in mm/ have the return value descriptions either misformatted or omitted at all which makes kernel-doc script unhappy: $ make V=1 htmldocs ... ./mm/util.c:36: info: Scanning doc for kstrdup ./mm/util.c:41: warning: No description found for return value of 'kstrdup' ./mm/util.c:57: info: Scanning doc for kstrdup_const ./mm/util.c:66: warning: No description found for return value of 'kstrdup_const' ./mm/util.c:75: info: Scanning doc for kstrndup ./mm/util.c:83: warning: No description found for return value of 'kstrndup' ... Fixing the formatting and adding the missing return value descriptions eliminates ~100 such warnings. Link: http://lkml.kernel.org/r/1549549644-4903-4-git-send-email-rppt@linux.ibm.com Signed-off-by: Mike Rapoport <rppt@linux.ibm.com> Reviewed-by: Andrew Morton <akpm@linux-foundation.org> Cc: Jonathan Corbet <corbet@lwn.net> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/filemap.c')
-rw-r--r--mm/filemap.c73
1 files changed, 62 insertions, 11 deletions
diff --git a/mm/filemap.c b/mm/filemap.c
index e59fdecdab74..ae0022f6106d 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -392,6 +392,8 @@ static int filemap_check_and_keep_errors(struct address_space *mapping)
392 * opposed to a regular memory cleansing writeback. The difference between 392 * opposed to a regular memory cleansing writeback. The difference between
393 * these two operations is that if a dirty page/buffer is encountered, it must 393 * these two operations is that if a dirty page/buffer is encountered, it must
394 * be waited upon, and not just skipped over. 394 * be waited upon, and not just skipped over.
395 *
396 * Return: %0 on success, negative error code otherwise.
395 */ 397 */
396int __filemap_fdatawrite_range(struct address_space *mapping, loff_t start, 398int __filemap_fdatawrite_range(struct address_space *mapping, loff_t start,
397 loff_t end, int sync_mode) 399 loff_t end, int sync_mode)
@@ -438,6 +440,8 @@ EXPORT_SYMBOL(filemap_fdatawrite_range);
438 * 440 *
439 * This is a mostly non-blocking flush. Not suitable for data-integrity 441 * This is a mostly non-blocking flush. Not suitable for data-integrity
440 * purposes - I/O may not be started against all dirty pages. 442 * purposes - I/O may not be started against all dirty pages.
443 *
444 * Return: %0 on success, negative error code otherwise.
441 */ 445 */
442int filemap_flush(struct address_space *mapping) 446int filemap_flush(struct address_space *mapping)
443{ 447{
@@ -453,6 +457,9 @@ EXPORT_SYMBOL(filemap_flush);
453 * 457 *
454 * Find at least one page in the range supplied, usually used to check if 458 * Find at least one page in the range supplied, usually used to check if
455 * direct writing in this range will trigger a writeback. 459 * direct writing in this range will trigger a writeback.
460 *
461 * Return: %true if at least one page exists in the specified range,
462 * %false otherwise.
456 */ 463 */
457bool filemap_range_has_page(struct address_space *mapping, 464bool filemap_range_has_page(struct address_space *mapping,
458 loff_t start_byte, loff_t end_byte) 465 loff_t start_byte, loff_t end_byte)
@@ -529,6 +536,8 @@ static void __filemap_fdatawait_range(struct address_space *mapping,
529 * Since the error status of the address space is cleared by this function, 536 * Since the error status of the address space is cleared by this function,
530 * callers are responsible for checking the return value and handling and/or 537 * callers are responsible for checking the return value and handling and/or
531 * reporting the error. 538 * reporting the error.
539 *
540 * Return: error status of the address space.
532 */ 541 */
533int filemap_fdatawait_range(struct address_space *mapping, loff_t start_byte, 542int filemap_fdatawait_range(struct address_space *mapping, loff_t start_byte,
534 loff_t end_byte) 543 loff_t end_byte)
@@ -551,6 +560,8 @@ EXPORT_SYMBOL(filemap_fdatawait_range);
551 * Since the error status of the file is advanced by this function, 560 * Since the error status of the file is advanced by this function,
552 * callers are responsible for checking the return value and handling and/or 561 * callers are responsible for checking the return value and handling and/or
553 * reporting the error. 562 * reporting the error.
563 *
564 * Return: error status of the address space vs. the file->f_wb_err cursor.
554 */ 565 */
555int file_fdatawait_range(struct file *file, loff_t start_byte, loff_t end_byte) 566int file_fdatawait_range(struct file *file, loff_t start_byte, loff_t end_byte)
556{ 567{
@@ -572,6 +583,8 @@ EXPORT_SYMBOL(file_fdatawait_range);
572 * Use this function if callers don't handle errors themselves. Expected 583 * Use this function if callers don't handle errors themselves. Expected
573 * call sites are system-wide / filesystem-wide data flushers: e.g. sync(2), 584 * call sites are system-wide / filesystem-wide data flushers: e.g. sync(2),
574 * fsfreeze(8) 585 * fsfreeze(8)
586 *
587 * Return: error status of the address space.
575 */ 588 */
576int filemap_fdatawait_keep_errors(struct address_space *mapping) 589int filemap_fdatawait_keep_errors(struct address_space *mapping)
577{ 590{
@@ -623,6 +636,8 @@ EXPORT_SYMBOL(filemap_write_and_wait);
623 * 636 *
624 * Note that @lend is inclusive (describes the last byte to be written) so 637 * Note that @lend is inclusive (describes the last byte to be written) so
625 * that this function can be used to write to the very end-of-file (end = -1). 638 * that this function can be used to write to the very end-of-file (end = -1).
639 *
640 * Return: error status of the address space.
626 */ 641 */
627int filemap_write_and_wait_range(struct address_space *mapping, 642int filemap_write_and_wait_range(struct address_space *mapping,
628 loff_t lstart, loff_t lend) 643 loff_t lstart, loff_t lend)
@@ -678,6 +693,8 @@ EXPORT_SYMBOL(__filemap_set_wb_err);
678 * While we handle mapping->wb_err with atomic operations, the f_wb_err 693 * While we handle mapping->wb_err with atomic operations, the f_wb_err
679 * value is protected by the f_lock since we must ensure that it reflects 694 * value is protected by the f_lock since we must ensure that it reflects
680 * the latest value swapped in for this file descriptor. 695 * the latest value swapped in for this file descriptor.
696 *
697 * Return: %0 on success, negative error code otherwise.
681 */ 698 */
682int file_check_and_advance_wb_err(struct file *file) 699int file_check_and_advance_wb_err(struct file *file)
683{ 700{
@@ -720,6 +737,8 @@ EXPORT_SYMBOL(file_check_and_advance_wb_err);
720 * 737 *
721 * After writing out and waiting on the data, we check and advance the 738 * After writing out and waiting on the data, we check and advance the
722 * f_wb_err cursor to the latest value, and return any errors detected there. 739 * f_wb_err cursor to the latest value, and return any errors detected there.
740 *
741 * Return: %0 on success, negative error code otherwise.
723 */ 742 */
724int file_write_and_wait_range(struct file *file, loff_t lstart, loff_t lend) 743int file_write_and_wait_range(struct file *file, loff_t lstart, loff_t lend)
725{ 744{
@@ -753,6 +772,8 @@ EXPORT_SYMBOL(file_write_and_wait_range);
753 * caller must do that. 772 * caller must do that.
754 * 773 *
755 * The remove + add is atomic. This function cannot fail. 774 * The remove + add is atomic. This function cannot fail.
775 *
776 * Return: %0
756 */ 777 */
757int replace_page_cache_page(struct page *old, struct page *new, gfp_t gfp_mask) 778int replace_page_cache_page(struct page *old, struct page *new, gfp_t gfp_mask)
758{ 779{
@@ -867,6 +888,8 @@ error:
867 * 888 *
868 * This function is used to add a page to the pagecache. It must be locked. 889 * This function is used to add a page to the pagecache. It must be locked.
869 * This function does not add the page to the LRU. The caller must do that. 890 * This function does not add the page to the LRU. The caller must do that.
891 *
892 * Return: %0 on success, negative error code otherwise.
870 */ 893 */
871int add_to_page_cache_locked(struct page *page, struct address_space *mapping, 894int add_to_page_cache_locked(struct page *page, struct address_space *mapping,
872 pgoff_t offset, gfp_t gfp_mask) 895 pgoff_t offset, gfp_t gfp_mask)
@@ -1463,7 +1486,7 @@ EXPORT_SYMBOL(page_cache_prev_miss);
1463 * If the slot holds a shadow entry of a previously evicted page, or a 1486 * If the slot holds a shadow entry of a previously evicted page, or a
1464 * swap entry from shmem/tmpfs, it is returned. 1487 * swap entry from shmem/tmpfs, it is returned.
1465 * 1488 *
1466 * Otherwise, %NULL is returned. 1489 * Return: the found page or shadow entry, %NULL if nothing is found.
1467 */ 1490 */
1468struct page *find_get_entry(struct address_space *mapping, pgoff_t offset) 1491struct page *find_get_entry(struct address_space *mapping, pgoff_t offset)
1469{ 1492{
@@ -1521,9 +1544,9 @@ EXPORT_SYMBOL(find_get_entry);
1521 * If the slot holds a shadow entry of a previously evicted page, or a 1544 * If the slot holds a shadow entry of a previously evicted page, or a
1522 * swap entry from shmem/tmpfs, it is returned. 1545 * swap entry from shmem/tmpfs, it is returned.
1523 * 1546 *
1524 * Otherwise, %NULL is returned.
1525 *
1526 * find_lock_entry() may sleep. 1547 * find_lock_entry() may sleep.
1548 *
1549 * Return: the found page or shadow entry, %NULL if nothing is found.
1527 */ 1550 */
1528struct page *find_lock_entry(struct address_space *mapping, pgoff_t offset) 1551struct page *find_lock_entry(struct address_space *mapping, pgoff_t offset)
1529{ 1552{
@@ -1563,12 +1586,14 @@ EXPORT_SYMBOL(find_lock_entry);
1563 * - FGP_CREAT: If page is not present then a new page is allocated using 1586 * - FGP_CREAT: If page is not present then a new page is allocated using
1564 * @gfp_mask and added to the page cache and the VM's LRU 1587 * @gfp_mask and added to the page cache and the VM's LRU
1565 * list. The page is returned locked and with an increased 1588 * list. The page is returned locked and with an increased
1566 * refcount. Otherwise, NULL is returned. 1589 * refcount.
1567 * 1590 *
1568 * If FGP_LOCK or FGP_CREAT are specified then the function may sleep even 1591 * If FGP_LOCK or FGP_CREAT are specified then the function may sleep even
1569 * if the GFP flags specified for FGP_CREAT are atomic. 1592 * if the GFP flags specified for FGP_CREAT are atomic.
1570 * 1593 *
1571 * If there is a page cache page, it is returned with an increased refcount. 1594 * If there is a page cache page, it is returned with an increased refcount.
1595 *
1596 * Return: the found page or %NULL otherwise.
1572 */ 1597 */
1573struct page *pagecache_get_page(struct address_space *mapping, pgoff_t offset, 1598struct page *pagecache_get_page(struct address_space *mapping, pgoff_t offset,
1574 int fgp_flags, gfp_t gfp_mask) 1599 int fgp_flags, gfp_t gfp_mask)
@@ -1656,8 +1681,7 @@ EXPORT_SYMBOL(pagecache_get_page);
1656 * Any shadow entries of evicted pages, or swap entries from 1681 * Any shadow entries of evicted pages, or swap entries from
1657 * shmem/tmpfs, are included in the returned array. 1682 * shmem/tmpfs, are included in the returned array.
1658 * 1683 *
1659 * find_get_entries() returns the number of pages and shadow entries 1684 * Return: the number of pages and shadow entries which were found.
1660 * which were found.
1661 */ 1685 */
1662unsigned find_get_entries(struct address_space *mapping, 1686unsigned find_get_entries(struct address_space *mapping,
1663 pgoff_t start, unsigned int nr_entries, 1687 pgoff_t start, unsigned int nr_entries,
@@ -1727,8 +1751,8 @@ retry:
1727 * indexes. There may be holes in the indices due to not-present pages. 1751 * indexes. There may be holes in the indices due to not-present pages.
1728 * We also update @start to index the next page for the traversal. 1752 * We also update @start to index the next page for the traversal.
1729 * 1753 *
1730 * find_get_pages_range() returns the number of pages which were found. If this 1754 * Return: the number of pages which were found. If this number is
1731 * number is smaller than @nr_pages, the end of specified range has been 1755 * smaller than @nr_pages, the end of specified range has been
1732 * reached. 1756 * reached.
1733 */ 1757 */
1734unsigned find_get_pages_range(struct address_space *mapping, pgoff_t *start, 1758unsigned find_get_pages_range(struct address_space *mapping, pgoff_t *start,
@@ -1801,7 +1825,7 @@ out:
1801 * find_get_pages_contig() works exactly like find_get_pages(), except 1825 * find_get_pages_contig() works exactly like find_get_pages(), except
1802 * that the returned number of pages are guaranteed to be contiguous. 1826 * that the returned number of pages are guaranteed to be contiguous.
1803 * 1827 *
1804 * find_get_pages_contig() returns the number of pages which were found. 1828 * Return: the number of pages which were found.
1805 */ 1829 */
1806unsigned find_get_pages_contig(struct address_space *mapping, pgoff_t index, 1830unsigned find_get_pages_contig(struct address_space *mapping, pgoff_t index,
1807 unsigned int nr_pages, struct page **pages) 1831 unsigned int nr_pages, struct page **pages)
@@ -1862,6 +1886,8 @@ EXPORT_SYMBOL(find_get_pages_contig);
1862 * 1886 *
1863 * Like find_get_pages, except we only return pages which are tagged with 1887 * Like find_get_pages, except we only return pages which are tagged with
1864 * @tag. We update @index to index the next page for the traversal. 1888 * @tag. We update @index to index the next page for the traversal.
1889 *
1890 * Return: the number of pages which were found.
1865 */ 1891 */
1866unsigned find_get_pages_range_tag(struct address_space *mapping, pgoff_t *index, 1892unsigned find_get_pages_range_tag(struct address_space *mapping, pgoff_t *index,
1867 pgoff_t end, xa_mark_t tag, unsigned int nr_pages, 1893 pgoff_t end, xa_mark_t tag, unsigned int nr_pages,
@@ -1939,6 +1965,8 @@ EXPORT_SYMBOL(find_get_pages_range_tag);
1939 * 1965 *
1940 * Like find_get_entries, except we only return entries which are tagged with 1966 * Like find_get_entries, except we only return entries which are tagged with
1941 * @tag. 1967 * @tag.
1968 *
1969 * Return: the number of entries which were found.
1942 */ 1970 */
1943unsigned find_get_entries_tag(struct address_space *mapping, pgoff_t start, 1971unsigned find_get_entries_tag(struct address_space *mapping, pgoff_t start,
1944 xa_mark_t tag, unsigned int nr_entries, 1972 xa_mark_t tag, unsigned int nr_entries,
@@ -2024,6 +2052,10 @@ static void shrink_readahead_size_eio(struct file *filp,
2024 * 2052 *
2025 * This is really ugly. But the goto's actually try to clarify some 2053 * This is really ugly. But the goto's actually try to clarify some
2026 * of the logic when it comes to error handling etc. 2054 * of the logic when it comes to error handling etc.
2055 *
2056 * Return:
2057 * * total number of bytes copied, including those the were already @written
2058 * * negative error code if nothing was copied
2027 */ 2059 */
2028static ssize_t generic_file_buffered_read(struct kiocb *iocb, 2060static ssize_t generic_file_buffered_read(struct kiocb *iocb,
2029 struct iov_iter *iter, ssize_t written) 2061 struct iov_iter *iter, ssize_t written)
@@ -2285,6 +2317,9 @@ out:
2285 * 2317 *
2286 * This is the "read_iter()" routine for all filesystems 2318 * This is the "read_iter()" routine for all filesystems
2287 * that can use the page cache directly. 2319 * that can use the page cache directly.
2320 * Return:
2321 * * number of bytes copied, even for partial reads
2322 * * negative error code if nothing was read
2288 */ 2323 */
2289ssize_t 2324ssize_t
2290generic_file_read_iter(struct kiocb *iocb, struct iov_iter *iter) 2325generic_file_read_iter(struct kiocb *iocb, struct iov_iter *iter)
@@ -2352,6 +2387,8 @@ EXPORT_SYMBOL(generic_file_read_iter);
2352 * 2387 *
2353 * This adds the requested page to the page cache if it isn't already there, 2388 * This adds the requested page to the page cache if it isn't already there,
2354 * and schedules an I/O to read in its contents from disk. 2389 * and schedules an I/O to read in its contents from disk.
2390 *
2391 * Return: %0 on success, negative error code otherwise.
2355 */ 2392 */
2356static int page_cache_read(struct file *file, pgoff_t offset, gfp_t gfp_mask) 2393static int page_cache_read(struct file *file, pgoff_t offset, gfp_t gfp_mask)
2357{ 2394{
@@ -2466,6 +2503,8 @@ static void do_async_mmap_readahead(struct vm_area_struct *vma,
2466 * has not been released. 2503 * has not been released.
2467 * 2504 *
2468 * We never return with VM_FAULT_RETRY and a bit from VM_FAULT_ERROR set. 2505 * We never return with VM_FAULT_RETRY and a bit from VM_FAULT_ERROR set.
2506 *
2507 * Return: bitwise-OR of %VM_FAULT_ codes.
2469 */ 2508 */
2470vm_fault_t filemap_fault(struct vm_fault *vmf) 2509vm_fault_t filemap_fault(struct vm_fault *vmf)
2471{ 2510{
@@ -2851,6 +2890,8 @@ out:
2851 * not set, try to fill the page and wait for it to become unlocked. 2890 * not set, try to fill the page and wait for it to become unlocked.
2852 * 2891 *
2853 * If the page does not get brought uptodate, return -EIO. 2892 * If the page does not get brought uptodate, return -EIO.
2893 *
2894 * Return: up to date page on success, ERR_PTR() on failure.
2854 */ 2895 */
2855struct page *read_cache_page(struct address_space *mapping, 2896struct page *read_cache_page(struct address_space *mapping,
2856 pgoff_t index, 2897 pgoff_t index,
@@ -2871,6 +2912,8 @@ EXPORT_SYMBOL(read_cache_page);
2871 * any new page allocations done using the specified allocation flags. 2912 * any new page allocations done using the specified allocation flags.
2872 * 2913 *
2873 * If the page does not get brought uptodate, return -EIO. 2914 * If the page does not get brought uptodate, return -EIO.
2915 *
2916 * Return: up to date page on success, ERR_PTR() on failure.
2874 */ 2917 */
2875struct page *read_cache_page_gfp(struct address_space *mapping, 2918struct page *read_cache_page_gfp(struct address_space *mapping,
2876 pgoff_t index, 2919 pgoff_t index,
@@ -3254,6 +3297,10 @@ EXPORT_SYMBOL(generic_perform_write);
3254 * This function does *not* take care of syncing data in case of O_SYNC write. 3297 * This function does *not* take care of syncing data in case of O_SYNC write.
3255 * A caller has to handle it. This is mainly due to the fact that we want to 3298 * A caller has to handle it. This is mainly due to the fact that we want to
3256 * avoid syncing under i_mutex. 3299 * avoid syncing under i_mutex.
3300 *
3301 * Return:
3302 * * number of bytes written, even for truncated writes
3303 * * negative error code if no data has been written at all
3257 */ 3304 */
3258ssize_t __generic_file_write_iter(struct kiocb *iocb, struct iov_iter *from) 3305ssize_t __generic_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
3259{ 3306{
@@ -3338,6 +3385,10 @@ EXPORT_SYMBOL(__generic_file_write_iter);
3338 * This is a wrapper around __generic_file_write_iter() to be used by most 3385 * This is a wrapper around __generic_file_write_iter() to be used by most
3339 * filesystems. It takes care of syncing the file in case of O_SYNC file 3386 * filesystems. It takes care of syncing the file in case of O_SYNC file
3340 * and acquires i_mutex as needed. 3387 * and acquires i_mutex as needed.
3388 * Return:
3389 * * negative error code if no data has been written at all of
3390 * vfs_fsync_range() failed for a synchronous write
3391 * * number of bytes written, even for truncated writes
3341 */ 3392 */
3342ssize_t generic_file_write_iter(struct kiocb *iocb, struct iov_iter *from) 3393ssize_t generic_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
3343{ 3394{
@@ -3364,8 +3415,7 @@ EXPORT_SYMBOL(generic_file_write_iter);
3364 * @gfp_mask: memory allocation flags (and I/O mode) 3415 * @gfp_mask: memory allocation flags (and I/O mode)
3365 * 3416 *
3366 * The address_space is to try to release any data against the page 3417 * The address_space is to try to release any data against the page
3367 * (presumably at page->private). If the release was successful, return '1'. 3418 * (presumably at page->private).
3368 * Otherwise return zero.
3369 * 3419 *
3370 * This may also be called if PG_fscache is set on a page, indicating that the 3420 * This may also be called if PG_fscache is set on a page, indicating that the
3371 * page is known to the local caching routines. 3421 * page is known to the local caching routines.
@@ -3373,6 +3423,7 @@ EXPORT_SYMBOL(generic_file_write_iter);
3373 * The @gfp_mask argument specifies whether I/O may be performed to release 3423 * The @gfp_mask argument specifies whether I/O may be performed to release
3374 * this page (__GFP_IO), and whether the call may block (__GFP_RECLAIM & __GFP_FS). 3424 * this page (__GFP_IO), and whether the call may block (__GFP_RECLAIM & __GFP_FS).
3375 * 3425 *
3426 * Return: %1 if the release was successful, otherwise return zero.
3376 */ 3427 */
3377int try_to_release_page(struct page *page, gfp_t gfp_mask) 3428int try_to_release_page(struct page *page, gfp_t gfp_mask)
3378{ 3429{