aboutsummaryrefslogtreecommitdiffstats
path: root/mm/filemap.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/filemap.c')
-rw-r--r--mm/filemap.c175
1 files changed, 90 insertions, 85 deletions
diff --git a/mm/filemap.c b/mm/filemap.c
index ef169f37156d..140ebda9640f 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -10,13 +10,13 @@
10 * the NFS filesystem used to do this differently, for example) 10 * the NFS filesystem used to do this differently, for example)
11 */ 11 */
12#include <linux/module.h> 12#include <linux/module.h>
13#include <linux/slab.h>
14#include <linux/compiler.h> 13#include <linux/compiler.h>
15#include <linux/fs.h> 14#include <linux/fs.h>
16#include <linux/uaccess.h> 15#include <linux/uaccess.h>
17#include <linux/aio.h> 16#include <linux/aio.h>
18#include <linux/capability.h> 17#include <linux/capability.h>
19#include <linux/kernel_stat.h> 18#include <linux/kernel_stat.h>
19#include <linux/gfp.h>
20#include <linux/mm.h> 20#include <linux/mm.h>
21#include <linux/swap.h> 21#include <linux/swap.h>
22#include <linux/mman.h> 22#include <linux/mman.h>
@@ -260,27 +260,27 @@ int filemap_flush(struct address_space *mapping)
260EXPORT_SYMBOL(filemap_flush); 260EXPORT_SYMBOL(filemap_flush);
261 261
262/** 262/**
263 * wait_on_page_writeback_range - wait for writeback to complete 263 * filemap_fdatawait_range - wait for writeback to complete
264 * @mapping: target address_space 264 * @mapping: address space structure to wait for
265 * @start: beginning page index 265 * @start_byte: offset in bytes where the range starts
266 * @end: ending page index 266 * @end_byte: offset in bytes where the range ends (inclusive)
267 * 267 *
268 * Wait for writeback to complete against pages indexed by start->end 268 * Walk the list of under-writeback pages of the given address space
269 * inclusive 269 * in the given range and wait for all of them.
270 */ 270 */
271int wait_on_page_writeback_range(struct address_space *mapping, 271int filemap_fdatawait_range(struct address_space *mapping, loff_t start_byte,
272 pgoff_t start, pgoff_t end) 272 loff_t end_byte)
273{ 273{
274 pgoff_t index = start_byte >> PAGE_CACHE_SHIFT;
275 pgoff_t end = end_byte >> PAGE_CACHE_SHIFT;
274 struct pagevec pvec; 276 struct pagevec pvec;
275 int nr_pages; 277 int nr_pages;
276 int ret = 0; 278 int ret = 0;
277 pgoff_t index;
278 279
279 if (end < start) 280 if (end_byte < start_byte)
280 return 0; 281 return 0;
281 282
282 pagevec_init(&pvec, 0); 283 pagevec_init(&pvec, 0);
283 index = start;
284 while ((index <= end) && 284 while ((index <= end) &&
285 (nr_pages = pagevec_lookup_tag(&pvec, mapping, &index, 285 (nr_pages = pagevec_lookup_tag(&pvec, mapping, &index,
286 PAGECACHE_TAG_WRITEBACK, 286 PAGECACHE_TAG_WRITEBACK,
@@ -310,25 +310,6 @@ int wait_on_page_writeback_range(struct address_space *mapping,
310 310
311 return ret; 311 return ret;
312} 312}
313
314/**
315 * filemap_fdatawait_range - wait for all under-writeback pages to complete in a given range
316 * @mapping: address space structure to wait for
317 * @start: offset in bytes where the range starts
318 * @end: offset in bytes where the range ends (inclusive)
319 *
320 * Walk the list of under-writeback pages of the given address space
321 * in the given range and wait for all of them.
322 *
323 * This is just a simple wrapper so that callers don't have to convert offsets
324 * to page indexes themselves
325 */
326int filemap_fdatawait_range(struct address_space *mapping, loff_t start,
327 loff_t end)
328{
329 return wait_on_page_writeback_range(mapping, start >> PAGE_CACHE_SHIFT,
330 end >> PAGE_CACHE_SHIFT);
331}
332EXPORT_SYMBOL(filemap_fdatawait_range); 313EXPORT_SYMBOL(filemap_fdatawait_range);
333 314
334/** 315/**
@@ -345,8 +326,7 @@ int filemap_fdatawait(struct address_space *mapping)
345 if (i_size == 0) 326 if (i_size == 0)
346 return 0; 327 return 0;
347 328
348 return wait_on_page_writeback_range(mapping, 0, 329 return filemap_fdatawait_range(mapping, 0, i_size - 1);
349 (i_size - 1) >> PAGE_CACHE_SHIFT);
350} 330}
351EXPORT_SYMBOL(filemap_fdatawait); 331EXPORT_SYMBOL(filemap_fdatawait);
352 332
@@ -393,9 +373,8 @@ int filemap_write_and_wait_range(struct address_space *mapping,
393 WB_SYNC_ALL); 373 WB_SYNC_ALL);
394 /* See comment of filemap_write_and_wait() */ 374 /* See comment of filemap_write_and_wait() */
395 if (err != -EIO) { 375 if (err != -EIO) {
396 int err2 = wait_on_page_writeback_range(mapping, 376 int err2 = filemap_fdatawait_range(mapping,
397 lstart >> PAGE_CACHE_SHIFT, 377 lstart, lend);
398 lend >> PAGE_CACHE_SHIFT);
399 if (!err) 378 if (!err)
400 err = err2; 379 err = err2;
401 } 380 }
@@ -1138,7 +1117,7 @@ readpage:
1138 if (!PageUptodate(page)) { 1117 if (!PageUptodate(page)) {
1139 if (page->mapping == NULL) { 1118 if (page->mapping == NULL) {
1140 /* 1119 /*
1141 * invalidate_inode_pages got it 1120 * invalidate_mapping_pages got it
1142 */ 1121 */
1143 unlock_page(page); 1122 unlock_page(page);
1144 page_cache_release(page); 1123 page_cache_release(page);
@@ -1655,14 +1634,15 @@ EXPORT_SYMBOL(generic_file_readonly_mmap);
1655static struct page *__read_cache_page(struct address_space *mapping, 1634static struct page *__read_cache_page(struct address_space *mapping,
1656 pgoff_t index, 1635 pgoff_t index,
1657 int (*filler)(void *,struct page*), 1636 int (*filler)(void *,struct page*),
1658 void *data) 1637 void *data,
1638 gfp_t gfp)
1659{ 1639{
1660 struct page *page; 1640 struct page *page;
1661 int err; 1641 int err;
1662repeat: 1642repeat:
1663 page = find_get_page(mapping, index); 1643 page = find_get_page(mapping, index);
1664 if (!page) { 1644 if (!page) {
1665 page = page_cache_alloc_cold(mapping); 1645 page = __page_cache_alloc(gfp | __GFP_COLD);
1666 if (!page) 1646 if (!page)
1667 return ERR_PTR(-ENOMEM); 1647 return ERR_PTR(-ENOMEM);
1668 err = add_to_page_cache_lru(page, mapping, index, GFP_KERNEL); 1648 err = add_to_page_cache_lru(page, mapping, index, GFP_KERNEL);
@@ -1682,31 +1662,18 @@ repeat:
1682 return page; 1662 return page;
1683} 1663}
1684 1664
1685/** 1665static struct page *do_read_cache_page(struct address_space *mapping,
1686 * read_cache_page_async - read into page cache, fill it if needed
1687 * @mapping: the page's address_space
1688 * @index: the page index
1689 * @filler: function to perform the read
1690 * @data: destination for read data
1691 *
1692 * Same as read_cache_page, but don't wait for page to become unlocked
1693 * after submitting it to the filler.
1694 *
1695 * Read into the page cache. If a page already exists, and PageUptodate() is
1696 * not set, try to fill the page but don't wait for it to become unlocked.
1697 *
1698 * If the page does not get brought uptodate, return -EIO.
1699 */
1700struct page *read_cache_page_async(struct address_space *mapping,
1701 pgoff_t index, 1666 pgoff_t index,
1702 int (*filler)(void *,struct page*), 1667 int (*filler)(void *,struct page*),
1703 void *data) 1668 void *data,
1669 gfp_t gfp)
1670
1704{ 1671{
1705 struct page *page; 1672 struct page *page;
1706 int err; 1673 int err;
1707 1674
1708retry: 1675retry:
1709 page = __read_cache_page(mapping, index, filler, data); 1676 page = __read_cache_page(mapping, index, filler, data, gfp);
1710 if (IS_ERR(page)) 1677 if (IS_ERR(page))
1711 return page; 1678 return page;
1712 if (PageUptodate(page)) 1679 if (PageUptodate(page))
@@ -1731,8 +1698,67 @@ out:
1731 mark_page_accessed(page); 1698 mark_page_accessed(page);
1732 return page; 1699 return page;
1733} 1700}
1701
1702/**
1703 * read_cache_page_async - read into page cache, fill it if needed
1704 * @mapping: the page's address_space
1705 * @index: the page index
1706 * @filler: function to perform the read
1707 * @data: destination for read data
1708 *
1709 * Same as read_cache_page, but don't wait for page to become unlocked
1710 * after submitting it to the filler.
1711 *
1712 * Read into the page cache. If a page already exists, and PageUptodate() is
1713 * not set, try to fill the page but don't wait for it to become unlocked.
1714 *
1715 * If the page does not get brought uptodate, return -EIO.
1716 */
1717struct page *read_cache_page_async(struct address_space *mapping,
1718 pgoff_t index,
1719 int (*filler)(void *,struct page*),
1720 void *data)
1721{
1722 return do_read_cache_page(mapping, index, filler, data, mapping_gfp_mask(mapping));
1723}
1734EXPORT_SYMBOL(read_cache_page_async); 1724EXPORT_SYMBOL(read_cache_page_async);
1735 1725
1726static struct page *wait_on_page_read(struct page *page)
1727{
1728 if (!IS_ERR(page)) {
1729 wait_on_page_locked(page);
1730 if (!PageUptodate(page)) {
1731 page_cache_release(page);
1732 page = ERR_PTR(-EIO);
1733 }
1734 }
1735 return page;
1736}
1737
1738/**
1739 * read_cache_page_gfp - read into page cache, using specified page allocation flags.
1740 * @mapping: the page's address_space
1741 * @index: the page index
1742 * @gfp: the page allocator flags to use if allocating
1743 *
1744 * This is the same as "read_mapping_page(mapping, index, NULL)", but with
1745 * any new page allocations done using the specified allocation flags. Note
1746 * that the Radix tree operations will still use GFP_KERNEL, so you can't
1747 * expect to do this atomically or anything like that - but you can pass in
1748 * other page requirements.
1749 *
1750 * If the page does not get brought uptodate, return -EIO.
1751 */
1752struct page *read_cache_page_gfp(struct address_space *mapping,
1753 pgoff_t index,
1754 gfp_t gfp)
1755{
1756 filler_t *filler = (filler_t *)mapping->a_ops->readpage;
1757
1758 return wait_on_page_read(do_read_cache_page(mapping, index, filler, NULL, gfp));
1759}
1760EXPORT_SYMBOL(read_cache_page_gfp);
1761
1736/** 1762/**
1737 * read_cache_page - read into page cache, fill it if needed 1763 * read_cache_page - read into page cache, fill it if needed
1738 * @mapping: the page's address_space 1764 * @mapping: the page's address_space
@@ -1750,18 +1776,7 @@ struct page *read_cache_page(struct address_space *mapping,
1750 int (*filler)(void *,struct page*), 1776 int (*filler)(void *,struct page*),
1751 void *data) 1777 void *data)
1752{ 1778{
1753 struct page *page; 1779 return wait_on_page_read(read_cache_page_async(mapping, index, filler, data));
1754
1755 page = read_cache_page_async(mapping, index, filler, data);
1756 if (IS_ERR(page))
1757 goto out;
1758 wait_on_page_locked(page);
1759 if (!PageUptodate(page)) {
1760 page_cache_release(page);
1761 page = ERR_PTR(-EIO);
1762 }
1763 out:
1764 return page;
1765} 1780}
1766EXPORT_SYMBOL(read_cache_page); 1781EXPORT_SYMBOL(read_cache_page);
1767 1782
@@ -1844,7 +1859,7 @@ static size_t __iovec_copy_from_user_inatomic(char *vaddr,
1844 1859
1845/* 1860/*
1846 * Copy as much as we can into the page and return the number of bytes which 1861 * Copy as much as we can into the page and return the number of bytes which
1847 * were sucessfully copied. If a fault is encountered then return the number of 1862 * were successfully copied. If a fault is encountered then return the number of
1848 * bytes which were copied. 1863 * bytes which were copied.
1849 */ 1864 */
1850size_t iov_iter_copy_from_user_atomic(struct page *page, 1865size_t iov_iter_copy_from_user_atomic(struct page *page,
@@ -1971,7 +1986,7 @@ EXPORT_SYMBOL(iov_iter_single_seg_count);
1971inline int generic_write_checks(struct file *file, loff_t *pos, size_t *count, int isblk) 1986inline int generic_write_checks(struct file *file, loff_t *pos, size_t *count, int isblk)
1972{ 1987{
1973 struct inode *inode = file->f_mapping->host; 1988 struct inode *inode = file->f_mapping->host;
1974 unsigned long limit = current->signal->rlim[RLIMIT_FSIZE].rlim_cur; 1989 unsigned long limit = rlimit(RLIMIT_FSIZE);
1975 1990
1976 if (unlikely(*pos < 0)) 1991 if (unlikely(*pos < 0))
1977 return -EINVAL; 1992 return -EINVAL;
@@ -2217,6 +2232,9 @@ again:
2217 if (unlikely(status)) 2232 if (unlikely(status))
2218 break; 2233 break;
2219 2234
2235 if (mapping_writably_mapped(mapping))
2236 flush_dcache_page(page);
2237
2220 pagefault_disable(); 2238 pagefault_disable();
2221 copied = iov_iter_copy_from_user_atomic(page, i, offset, bytes); 2239 copied = iov_iter_copy_from_user_atomic(page, i, offset, bytes);
2222 pagefault_enable(); 2240 pagefault_enable();
@@ -2261,7 +2279,6 @@ generic_file_buffered_write(struct kiocb *iocb, const struct iovec *iov,
2261 size_t count, ssize_t written) 2279 size_t count, ssize_t written)
2262{ 2280{
2263 struct file *file = iocb->ki_filp; 2281 struct file *file = iocb->ki_filp;
2264 struct address_space *mapping = file->f_mapping;
2265 ssize_t status; 2282 ssize_t status;
2266 struct iov_iter i; 2283 struct iov_iter i;
2267 2284
@@ -2273,15 +2290,6 @@ generic_file_buffered_write(struct kiocb *iocb, const struct iovec *iov,
2273 *ppos = pos + status; 2290 *ppos = pos + status;
2274 } 2291 }
2275 2292
2276 /*
2277 * If we get here for O_DIRECT writes then we must have fallen through
2278 * to buffered writes (block instantiation inside i_size). So we sync
2279 * the file data here, to try to honour O_DIRECT expectations.
2280 */
2281 if (unlikely(file->f_flags & O_DIRECT) && written)
2282 status = filemap_write_and_wait_range(mapping,
2283 pos, pos + written - 1);
2284
2285 return written ? written : status; 2293 return written ? written : status;
2286} 2294}
2287EXPORT_SYMBOL(generic_file_buffered_write); 2295EXPORT_SYMBOL(generic_file_buffered_write);
@@ -2380,10 +2388,7 @@ ssize_t __generic_file_aio_write(struct kiocb *iocb, const struct iovec *iov,
2380 * semantics. 2388 * semantics.
2381 */ 2389 */
2382 endbyte = pos + written_buffered - written - 1; 2390 endbyte = pos + written_buffered - written - 1;
2383 err = do_sync_mapping_range(file->f_mapping, pos, endbyte, 2391 err = filemap_write_and_wait_range(file->f_mapping, pos, endbyte);
2384 SYNC_FILE_RANGE_WAIT_BEFORE|
2385 SYNC_FILE_RANGE_WRITE|
2386 SYNC_FILE_RANGE_WAIT_AFTER);
2387 if (err == 0) { 2392 if (err == 0) {
2388 written = written_buffered; 2393 written = written_buffered;
2389 invalidate_mapping_pages(mapping, 2394 invalidate_mapping_pages(mapping,