diff options
Diffstat (limited to 'mm/filemap.c')
| -rw-r--r-- | mm/filemap.c | 118 |
1 files changed, 72 insertions, 46 deletions
diff --git a/mm/filemap.c b/mm/filemap.c index 8b4d88f9249e..698ea80f2102 100644 --- a/mm/filemap.c +++ b/mm/filemap.c | |||
| @@ -1634,14 +1634,15 @@ EXPORT_SYMBOL(generic_file_readonly_mmap); | |||
| 1634 | static struct page *__read_cache_page(struct address_space *mapping, | 1634 | static struct page *__read_cache_page(struct address_space *mapping, |
| 1635 | pgoff_t index, | 1635 | pgoff_t index, |
| 1636 | int (*filler)(void *,struct page*), | 1636 | int (*filler)(void *,struct page*), |
| 1637 | void *data) | 1637 | void *data, |
| 1638 | gfp_t gfp) | ||
| 1638 | { | 1639 | { |
| 1639 | struct page *page; | 1640 | struct page *page; |
| 1640 | int err; | 1641 | int err; |
| 1641 | repeat: | 1642 | repeat: |
| 1642 | page = find_get_page(mapping, index); | 1643 | page = find_get_page(mapping, index); |
| 1643 | if (!page) { | 1644 | if (!page) { |
| 1644 | page = page_cache_alloc_cold(mapping); | 1645 | page = __page_cache_alloc(gfp | __GFP_COLD); |
| 1645 | if (!page) | 1646 | if (!page) |
| 1646 | return ERR_PTR(-ENOMEM); | 1647 | return ERR_PTR(-ENOMEM); |
| 1647 | err = add_to_page_cache_lru(page, mapping, index, GFP_KERNEL); | 1648 | err = add_to_page_cache_lru(page, mapping, index, GFP_KERNEL); |
| @@ -1661,31 +1662,18 @@ repeat: | |||
| 1661 | return page; | 1662 | return page; |
| 1662 | } | 1663 | } |
| 1663 | 1664 | ||
| 1664 | /** | 1665 | static struct page *do_read_cache_page(struct address_space *mapping, |
| 1665 | * read_cache_page_async - read into page cache, fill it if needed | ||
| 1666 | * @mapping: the page's address_space | ||
| 1667 | * @index: the page index | ||
| 1668 | * @filler: function to perform the read | ||
| 1669 | * @data: destination for read data | ||
| 1670 | * | ||
| 1671 | * Same as read_cache_page, but don't wait for page to become unlocked | ||
| 1672 | * after submitting it to the filler. | ||
| 1673 | * | ||
| 1674 | * Read into the page cache. If a page already exists, and PageUptodate() is | ||
| 1675 | * not set, try to fill the page but don't wait for it to become unlocked. | ||
| 1676 | * | ||
| 1677 | * If the page does not get brought uptodate, return -EIO. | ||
| 1678 | */ | ||
| 1679 | struct page *read_cache_page_async(struct address_space *mapping, | ||
| 1680 | pgoff_t index, | 1666 | pgoff_t index, |
| 1681 | int (*filler)(void *,struct page*), | 1667 | int (*filler)(void *,struct page*), |
| 1682 | void *data) | 1668 | void *data, |
| 1669 | gfp_t gfp) | ||
| 1670 | |||
| 1683 | { | 1671 | { |
| 1684 | struct page *page; | 1672 | struct page *page; |
| 1685 | int err; | 1673 | int err; |
| 1686 | 1674 | ||
| 1687 | retry: | 1675 | retry: |
| 1688 | page = __read_cache_page(mapping, index, filler, data); | 1676 | page = __read_cache_page(mapping, index, filler, data, gfp); |
| 1689 | if (IS_ERR(page)) | 1677 | if (IS_ERR(page)) |
| 1690 | return page; | 1678 | return page; |
| 1691 | if (PageUptodate(page)) | 1679 | if (PageUptodate(page)) |
| @@ -1710,8 +1698,67 @@ out: | |||
| 1710 | mark_page_accessed(page); | 1698 | mark_page_accessed(page); |
| 1711 | return page; | 1699 | return page; |
| 1712 | } | 1700 | } |
| 1701 | |||
| 1702 | /** | ||
| 1703 | * read_cache_page_async - read into page cache, fill it if needed | ||
| 1704 | * @mapping: the page's address_space | ||
| 1705 | * @index: the page index | ||
| 1706 | * @filler: function to perform the read | ||
| 1707 | * @data: destination for read data | ||
| 1708 | * | ||
| 1709 | * Same as read_cache_page, but don't wait for page to become unlocked | ||
| 1710 | * after submitting it to the filler. | ||
| 1711 | * | ||
| 1712 | * Read into the page cache. If a page already exists, and PageUptodate() is | ||
| 1713 | * not set, try to fill the page but don't wait for it to become unlocked. | ||
| 1714 | * | ||
| 1715 | * If the page does not get brought uptodate, return -EIO. | ||
| 1716 | */ | ||
| 1717 | struct page *read_cache_page_async(struct address_space *mapping, | ||
| 1718 | pgoff_t index, | ||
| 1719 | int (*filler)(void *,struct page*), | ||
| 1720 | void *data) | ||
| 1721 | { | ||
| 1722 | return do_read_cache_page(mapping, index, filler, data, mapping_gfp_mask(mapping)); | ||
| 1723 | } | ||
| 1713 | EXPORT_SYMBOL(read_cache_page_async); | 1724 | EXPORT_SYMBOL(read_cache_page_async); |
| 1714 | 1725 | ||
| 1726 | static struct page *wait_on_page_read(struct page *page) | ||
| 1727 | { | ||
| 1728 | if (!IS_ERR(page)) { | ||
| 1729 | wait_on_page_locked(page); | ||
| 1730 | if (!PageUptodate(page)) { | ||
| 1731 | page_cache_release(page); | ||
| 1732 | page = ERR_PTR(-EIO); | ||
| 1733 | } | ||
| 1734 | } | ||
| 1735 | return page; | ||
| 1736 | } | ||
| 1737 | |||
| 1738 | /** | ||
| 1739 | * read_cache_page_gfp - read into page cache, using specified page allocation flags. | ||
| 1740 | * @mapping: the page's address_space | ||
| 1741 | * @index: the page index | ||
| 1742 | * @gfp: the page allocator flags to use if allocating | ||
| 1743 | * | ||
| 1744 | * This is the same as "read_mapping_page(mapping, index, NULL)", but with | ||
| 1745 | * any new page allocations done using the specified allocation flags. Note | ||
| 1746 | * that the Radix tree operations will still use GFP_KERNEL, so you can't | ||
| 1747 | * expect to do this atomically or anything like that - but you can pass in | ||
| 1748 | * other page requirements. | ||
| 1749 | * | ||
| 1750 | * If the page does not get brought uptodate, return -EIO. | ||
| 1751 | */ | ||
| 1752 | struct page *read_cache_page_gfp(struct address_space *mapping, | ||
| 1753 | pgoff_t index, | ||
| 1754 | gfp_t gfp) | ||
| 1755 | { | ||
| 1756 | filler_t *filler = (filler_t *)mapping->a_ops->readpage; | ||
| 1757 | |||
| 1758 | return wait_on_page_read(do_read_cache_page(mapping, index, filler, NULL, gfp)); | ||
| 1759 | } | ||
| 1760 | EXPORT_SYMBOL(read_cache_page_gfp); | ||
| 1761 | |||
| 1715 | /** | 1762 | /** |
| 1716 | * read_cache_page - read into page cache, fill it if needed | 1763 | * read_cache_page - read into page cache, fill it if needed |
| 1717 | * @mapping: the page's address_space | 1764 | * @mapping: the page's address_space |
| @@ -1729,18 +1776,7 @@ struct page *read_cache_page(struct address_space *mapping, | |||
| 1729 | int (*filler)(void *,struct page*), | 1776 | int (*filler)(void *,struct page*), |
| 1730 | void *data) | 1777 | void *data) |
| 1731 | { | 1778 | { |
| 1732 | struct page *page; | 1779 | return wait_on_page_read(read_cache_page_async(mapping, index, filler, data)); |
| 1733 | |||
| 1734 | page = read_cache_page_async(mapping, index, filler, data); | ||
| 1735 | if (IS_ERR(page)) | ||
| 1736 | goto out; | ||
| 1737 | wait_on_page_locked(page); | ||
| 1738 | if (!PageUptodate(page)) { | ||
| 1739 | page_cache_release(page); | ||
| 1740 | page = ERR_PTR(-EIO); | ||
| 1741 | } | ||
| 1742 | out: | ||
| 1743 | return page; | ||
| 1744 | } | 1780 | } |
| 1745 | EXPORT_SYMBOL(read_cache_page); | 1781 | EXPORT_SYMBOL(read_cache_page); |
| 1746 | 1782 | ||
| @@ -2196,6 +2232,9 @@ again: | |||
| 2196 | if (unlikely(status)) | 2232 | if (unlikely(status)) |
| 2197 | break; | 2233 | break; |
| 2198 | 2234 | ||
| 2235 | if (mapping_writably_mapped(mapping)) | ||
| 2236 | flush_dcache_page(page); | ||
| 2237 | |||
| 2199 | pagefault_disable(); | 2238 | pagefault_disable(); |
| 2200 | copied = iov_iter_copy_from_user_atomic(page, i, offset, bytes); | 2239 | copied = iov_iter_copy_from_user_atomic(page, i, offset, bytes); |
| 2201 | pagefault_enable(); | 2240 | pagefault_enable(); |
| @@ -2240,7 +2279,6 @@ generic_file_buffered_write(struct kiocb *iocb, const struct iovec *iov, | |||
| 2240 | size_t count, ssize_t written) | 2279 | size_t count, ssize_t written) |
| 2241 | { | 2280 | { |
| 2242 | struct file *file = iocb->ki_filp; | 2281 | struct file *file = iocb->ki_filp; |
| 2243 | struct address_space *mapping = file->f_mapping; | ||
| 2244 | ssize_t status; | 2282 | ssize_t status; |
| 2245 | struct iov_iter i; | 2283 | struct iov_iter i; |
| 2246 | 2284 | ||
| @@ -2252,15 +2290,6 @@ generic_file_buffered_write(struct kiocb *iocb, const struct iovec *iov, | |||
| 2252 | *ppos = pos + status; | 2290 | *ppos = pos + status; |
| 2253 | } | 2291 | } |
| 2254 | 2292 | ||
| 2255 | /* | ||
| 2256 | * If we get here for O_DIRECT writes then we must have fallen through | ||
| 2257 | * to buffered writes (block instantiation inside i_size). So we sync | ||
| 2258 | * the file data here, to try to honour O_DIRECT expectations. | ||
| 2259 | */ | ||
| 2260 | if (unlikely(file->f_flags & O_DIRECT) && written) | ||
| 2261 | status = filemap_write_and_wait_range(mapping, | ||
| 2262 | pos, pos + written - 1); | ||
| 2263 | |||
| 2264 | return written ? written : status; | 2293 | return written ? written : status; |
| 2265 | } | 2294 | } |
| 2266 | EXPORT_SYMBOL(generic_file_buffered_write); | 2295 | EXPORT_SYMBOL(generic_file_buffered_write); |
| @@ -2359,10 +2388,7 @@ ssize_t __generic_file_aio_write(struct kiocb *iocb, const struct iovec *iov, | |||
| 2359 | * semantics. | 2388 | * semantics. |
| 2360 | */ | 2389 | */ |
| 2361 | endbyte = pos + written_buffered - written - 1; | 2390 | endbyte = pos + written_buffered - written - 1; |
| 2362 | err = do_sync_mapping_range(file->f_mapping, pos, endbyte, | 2391 | err = filemap_write_and_wait_range(file->f_mapping, pos, endbyte); |
| 2363 | SYNC_FILE_RANGE_WAIT_BEFORE| | ||
| 2364 | SYNC_FILE_RANGE_WRITE| | ||
| 2365 | SYNC_FILE_RANGE_WAIT_AFTER); | ||
| 2366 | if (err == 0) { | 2392 | if (err == 0) { |
| 2367 | written = written_buffered; | 2393 | written = written_buffered; |
| 2368 | invalidate_mapping_pages(mapping, | 2394 | invalidate_mapping_pages(mapping, |
