aboutsummaryrefslogtreecommitdiffstats
path: root/mm/filemap.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/filemap.c')
-rw-r--r--mm/filemap.c28
1 files changed, 24 insertions, 4 deletions
diff --git a/mm/filemap.c b/mm/filemap.c
index 23acefe51808..fc11974f2bee 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -565,6 +565,24 @@ void wait_on_page_bit(struct page *page, int bit_nr)
565EXPORT_SYMBOL(wait_on_page_bit); 565EXPORT_SYMBOL(wait_on_page_bit);
566 566
567/** 567/**
568 * add_page_wait_queue - Add an arbitrary waiter to a page's wait queue
569 * @page - Page defining the wait queue of interest
570 * @waiter - Waiter to add to the queue
571 *
572 * Add an arbitrary @waiter to the wait queue for the nominated @page.
573 */
574void add_page_wait_queue(struct page *page, wait_queue_t *waiter)
575{
576 wait_queue_head_t *q = page_waitqueue(page);
577 unsigned long flags;
578
579 spin_lock_irqsave(&q->lock, flags);
580 __add_wait_queue(q, waiter);
581 spin_unlock_irqrestore(&q->lock, flags);
582}
583EXPORT_SYMBOL_GPL(add_page_wait_queue);
584
585/**
568 * unlock_page - unlock a locked page 586 * unlock_page - unlock a locked page
569 * @page: the page 587 * @page: the page
570 * 588 *
@@ -1823,7 +1841,7 @@ static size_t __iovec_copy_from_user_inatomic(char *vaddr,
1823 int copy = min(bytes, iov->iov_len - base); 1841 int copy = min(bytes, iov->iov_len - base);
1824 1842
1825 base = 0; 1843 base = 0;
1826 left = __copy_from_user_inatomic_nocache(vaddr, buf, copy); 1844 left = __copy_from_user_inatomic(vaddr, buf, copy);
1827 copied += copy; 1845 copied += copy;
1828 bytes -= copy; 1846 bytes -= copy;
1829 vaddr += copy; 1847 vaddr += copy;
@@ -1851,8 +1869,7 @@ size_t iov_iter_copy_from_user_atomic(struct page *page,
1851 if (likely(i->nr_segs == 1)) { 1869 if (likely(i->nr_segs == 1)) {
1852 int left; 1870 int left;
1853 char __user *buf = i->iov->iov_base + i->iov_offset; 1871 char __user *buf = i->iov->iov_base + i->iov_offset;
1854 left = __copy_from_user_inatomic_nocache(kaddr + offset, 1872 left = __copy_from_user_inatomic(kaddr + offset, buf, bytes);
1855 buf, bytes);
1856 copied = bytes - left; 1873 copied = bytes - left;
1857 } else { 1874 } else {
1858 copied = __iovec_copy_from_user_inatomic(kaddr + offset, 1875 copied = __iovec_copy_from_user_inatomic(kaddr + offset,
@@ -1880,7 +1897,7 @@ size_t iov_iter_copy_from_user(struct page *page,
1880 if (likely(i->nr_segs == 1)) { 1897 if (likely(i->nr_segs == 1)) {
1881 int left; 1898 int left;
1882 char __user *buf = i->iov->iov_base + i->iov_offset; 1899 char __user *buf = i->iov->iov_base + i->iov_offset;
1883 left = __copy_from_user_nocache(kaddr + offset, buf, bytes); 1900 left = __copy_from_user(kaddr + offset, buf, bytes);
1884 copied = bytes - left; 1901 copied = bytes - left;
1885 } else { 1902 } else {
1886 copied = __iovec_copy_from_user_inatomic(kaddr + offset, 1903 copied = __iovec_copy_from_user_inatomic(kaddr + offset,
@@ -2464,6 +2481,9 @@ EXPORT_SYMBOL(generic_file_aio_write);
2464 * (presumably at page->private). If the release was successful, return `1'. 2481 * (presumably at page->private). If the release was successful, return `1'.
2465 * Otherwise return zero. 2482 * Otherwise return zero.
2466 * 2483 *
2484 * This may also be called if PG_fscache is set on a page, indicating that the
2485 * page is known to the local caching routines.
2486 *
2467 * The @gfp_mask argument specifies whether I/O may be performed to release 2487 * The @gfp_mask argument specifies whether I/O may be performed to release
2468 * this page (__GFP_IO), and whether the call may block (__GFP_WAIT & __GFP_FS). 2488 * this page (__GFP_IO), and whether the call may block (__GFP_WAIT & __GFP_FS).
2469 * 2489 *