aboutsummaryrefslogtreecommitdiffstats
path: root/mm/filemap.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/filemap.c')
-rw-r--r--mm/filemap.c74
1 files changed, 46 insertions, 28 deletions
diff --git a/mm/filemap.c b/mm/filemap.c
index 900edfaf6df5..90effcdf948d 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -31,6 +31,7 @@
31#include <linux/security.h> 31#include <linux/security.h>
32#include <linux/cpuset.h> 32#include <linux/cpuset.h>
33#include <linux/hardirq.h> /* for BUG_ON(!in_atomic()) only */ 33#include <linux/hardirq.h> /* for BUG_ON(!in_atomic()) only */
34#include <linux/hugetlb.h>
34#include <linux/memcontrol.h> 35#include <linux/memcontrol.h>
35#include <linux/cleancache.h> 36#include <linux/cleancache.h>
36#include <linux/rmap.h> 37#include <linux/rmap.h>
@@ -233,7 +234,6 @@ void delete_from_page_cache(struct page *page)
233 spin_lock_irq(&mapping->tree_lock); 234 spin_lock_irq(&mapping->tree_lock);
234 __delete_from_page_cache(page, NULL); 235 __delete_from_page_cache(page, NULL);
235 spin_unlock_irq(&mapping->tree_lock); 236 spin_unlock_irq(&mapping->tree_lock);
236 mem_cgroup_uncharge_cache_page(page);
237 237
238 if (freepage) 238 if (freepage)
239 freepage(page); 239 freepage(page);
@@ -241,18 +241,6 @@ void delete_from_page_cache(struct page *page)
241} 241}
242EXPORT_SYMBOL(delete_from_page_cache); 242EXPORT_SYMBOL(delete_from_page_cache);
243 243
244static int sleep_on_page(void *word)
245{
246 io_schedule();
247 return 0;
248}
249
250static int sleep_on_page_killable(void *word)
251{
252 sleep_on_page(word);
253 return fatal_signal_pending(current) ? -EINTR : 0;
254}
255
256static int filemap_check_errors(struct address_space *mapping) 244static int filemap_check_errors(struct address_space *mapping)
257{ 245{
258 int ret = 0; 246 int ret = 0;
@@ -501,8 +489,7 @@ int replace_page_cache_page(struct page *old, struct page *new, gfp_t gfp_mask)
501 if (PageSwapBacked(new)) 489 if (PageSwapBacked(new))
502 __inc_zone_page_state(new, NR_SHMEM); 490 __inc_zone_page_state(new, NR_SHMEM);
503 spin_unlock_irq(&mapping->tree_lock); 491 spin_unlock_irq(&mapping->tree_lock);
504 /* mem_cgroup codes must not be called under tree_lock */ 492 mem_cgroup_migrate(old, new, true);
505 mem_cgroup_replace_page_cache(old, new);
506 radix_tree_preload_end(); 493 radix_tree_preload_end();
507 if (freepage) 494 if (freepage)
508 freepage(old); 495 freepage(old);
@@ -560,19 +547,24 @@ static int __add_to_page_cache_locked(struct page *page,
560 pgoff_t offset, gfp_t gfp_mask, 547 pgoff_t offset, gfp_t gfp_mask,
561 void **shadowp) 548 void **shadowp)
562{ 549{
550 int huge = PageHuge(page);
551 struct mem_cgroup *memcg;
563 int error; 552 int error;
564 553
565 VM_BUG_ON_PAGE(!PageLocked(page), page); 554 VM_BUG_ON_PAGE(!PageLocked(page), page);
566 VM_BUG_ON_PAGE(PageSwapBacked(page), page); 555 VM_BUG_ON_PAGE(PageSwapBacked(page), page);
567 556
568 error = mem_cgroup_charge_file(page, current->mm, 557 if (!huge) {
569 gfp_mask & GFP_RECLAIM_MASK); 558 error = mem_cgroup_try_charge(page, current->mm,
570 if (error) 559 gfp_mask, &memcg);
571 return error; 560 if (error)
561 return error;
562 }
572 563
573 error = radix_tree_maybe_preload(gfp_mask & ~__GFP_HIGHMEM); 564 error = radix_tree_maybe_preload(gfp_mask & ~__GFP_HIGHMEM);
574 if (error) { 565 if (error) {
575 mem_cgroup_uncharge_cache_page(page); 566 if (!huge)
567 mem_cgroup_cancel_charge(page, memcg);
576 return error; 568 return error;
577 } 569 }
578 570
@@ -587,13 +579,16 @@ static int __add_to_page_cache_locked(struct page *page,
587 goto err_insert; 579 goto err_insert;
588 __inc_zone_page_state(page, NR_FILE_PAGES); 580 __inc_zone_page_state(page, NR_FILE_PAGES);
589 spin_unlock_irq(&mapping->tree_lock); 581 spin_unlock_irq(&mapping->tree_lock);
582 if (!huge)
583 mem_cgroup_commit_charge(page, memcg, false);
590 trace_mm_filemap_add_to_page_cache(page); 584 trace_mm_filemap_add_to_page_cache(page);
591 return 0; 585 return 0;
592err_insert: 586err_insert:
593 page->mapping = NULL; 587 page->mapping = NULL;
594 /* Leave page->index set: truncation relies upon it */ 588 /* Leave page->index set: truncation relies upon it */
595 spin_unlock_irq(&mapping->tree_lock); 589 spin_unlock_irq(&mapping->tree_lock);
596 mem_cgroup_uncharge_cache_page(page); 590 if (!huge)
591 mem_cgroup_cancel_charge(page, memcg);
597 page_cache_release(page); 592 page_cache_release(page);
598 return error; 593 return error;
599} 594}
@@ -692,7 +687,7 @@ void wait_on_page_bit(struct page *page, int bit_nr)
692 DEFINE_WAIT_BIT(wait, &page->flags, bit_nr); 687 DEFINE_WAIT_BIT(wait, &page->flags, bit_nr);
693 688
694 if (test_bit(bit_nr, &page->flags)) 689 if (test_bit(bit_nr, &page->flags))
695 __wait_on_bit(page_waitqueue(page), &wait, sleep_on_page, 690 __wait_on_bit(page_waitqueue(page), &wait, bit_wait_io,
696 TASK_UNINTERRUPTIBLE); 691 TASK_UNINTERRUPTIBLE);
697} 692}
698EXPORT_SYMBOL(wait_on_page_bit); 693EXPORT_SYMBOL(wait_on_page_bit);
@@ -705,7 +700,7 @@ int wait_on_page_bit_killable(struct page *page, int bit_nr)
705 return 0; 700 return 0;
706 701
707 return __wait_on_bit(page_waitqueue(page), &wait, 702 return __wait_on_bit(page_waitqueue(page), &wait,
708 sleep_on_page_killable, TASK_KILLABLE); 703 bit_wait_io, TASK_KILLABLE);
709} 704}
710 705
711/** 706/**
@@ -806,7 +801,7 @@ void __lock_page(struct page *page)
806{ 801{
807 DEFINE_WAIT_BIT(wait, &page->flags, PG_locked); 802 DEFINE_WAIT_BIT(wait, &page->flags, PG_locked);
808 803
809 __wait_on_bit_lock(page_waitqueue(page), &wait, sleep_on_page, 804 __wait_on_bit_lock(page_waitqueue(page), &wait, bit_wait_io,
810 TASK_UNINTERRUPTIBLE); 805 TASK_UNINTERRUPTIBLE);
811} 806}
812EXPORT_SYMBOL(__lock_page); 807EXPORT_SYMBOL(__lock_page);
@@ -816,10 +811,21 @@ int __lock_page_killable(struct page *page)
816 DEFINE_WAIT_BIT(wait, &page->flags, PG_locked); 811 DEFINE_WAIT_BIT(wait, &page->flags, PG_locked);
817 812
818 return __wait_on_bit_lock(page_waitqueue(page), &wait, 813 return __wait_on_bit_lock(page_waitqueue(page), &wait,
819 sleep_on_page_killable, TASK_KILLABLE); 814 bit_wait_io, TASK_KILLABLE);
820} 815}
821EXPORT_SYMBOL_GPL(__lock_page_killable); 816EXPORT_SYMBOL_GPL(__lock_page_killable);
822 817
818/*
819 * Return values:
820 * 1 - page is locked; mmap_sem is still held.
821 * 0 - page is not locked.
822 * mmap_sem has been released (up_read()), unless flags had both
823 * FAULT_FLAG_ALLOW_RETRY and FAULT_FLAG_RETRY_NOWAIT set, in
824 * which case mmap_sem is still held.
825 *
826 * If neither ALLOW_RETRY nor KILLABLE are set, will always return 1
827 * with the page locked and the mmap_sem unperturbed.
828 */
823int __lock_page_or_retry(struct page *page, struct mm_struct *mm, 829int __lock_page_or_retry(struct page *page, struct mm_struct *mm,
824 unsigned int flags) 830 unsigned int flags)
825{ 831{
@@ -1103,9 +1109,9 @@ no_page:
1103 if (WARN_ON_ONCE(!(fgp_flags & FGP_LOCK))) 1109 if (WARN_ON_ONCE(!(fgp_flags & FGP_LOCK)))
1104 fgp_flags |= FGP_LOCK; 1110 fgp_flags |= FGP_LOCK;
1105 1111
1106 /* Init accessed so avoit atomic mark_page_accessed later */ 1112 /* Init accessed so avoid atomic mark_page_accessed later */
1107 if (fgp_flags & FGP_ACCESSED) 1113 if (fgp_flags & FGP_ACCESSED)
1108 init_page_accessed(page); 1114 __SetPageReferenced(page);
1109 1115
1110 err = add_to_page_cache_lru(page, mapping, offset, radix_gfp_mask); 1116 err = add_to_page_cache_lru(page, mapping, offset, radix_gfp_mask);
1111 if (unlikely(err)) { 1117 if (unlikely(err)) {
@@ -1839,6 +1845,18 @@ static void do_async_mmap_readahead(struct vm_area_struct *vma,
1839 * The goto's are kind of ugly, but this streamlines the normal case of having 1845 * The goto's are kind of ugly, but this streamlines the normal case of having
1840 * it in the page cache, and handles the special cases reasonably without 1846 * it in the page cache, and handles the special cases reasonably without
1841 * having a lot of duplicated code. 1847 * having a lot of duplicated code.
1848 *
1849 * vma->vm_mm->mmap_sem must be held on entry.
1850 *
1851 * If our return value has VM_FAULT_RETRY set, it's because
1852 * lock_page_or_retry() returned 0.
1853 * The mmap_sem has usually been released in this case.
1854 * See __lock_page_or_retry() for the exception.
1855 *
1856 * If our return value does not have VM_FAULT_RETRY set, the mmap_sem
1857 * has not been released.
1858 *
1859 * We never return with VM_FAULT_RETRY and a bit from VM_FAULT_ERROR set.
1842 */ 1860 */
1843int filemap_fault(struct vm_area_struct *vma, struct vm_fault *vmf) 1861int filemap_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
1844{ 1862{
@@ -2584,7 +2602,7 @@ ssize_t __generic_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
2584 * that this differs from normal direct-io semantics, which 2602 * that this differs from normal direct-io semantics, which
2585 * will return -EFOO even if some bytes were written. 2603 * will return -EFOO even if some bytes were written.
2586 */ 2604 */
2587 if (unlikely(status < 0) && !written) { 2605 if (unlikely(status < 0)) {
2588 err = status; 2606 err = status;
2589 goto out; 2607 goto out;
2590 } 2608 }