aboutsummaryrefslogtreecommitdiffstats
path: root/mm/filemap.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/filemap.c')
-rw-r--r--mm/filemap.c106
1 files changed, 63 insertions, 43 deletions
diff --git a/mm/filemap.c b/mm/filemap.c
index 867d40222ec7..645a080ba4df 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -33,7 +33,6 @@
33#include <linux/cpuset.h> 33#include <linux/cpuset.h>
34#include <linux/hardirq.h> /* for BUG_ON(!in_atomic()) only */ 34#include <linux/hardirq.h> /* for BUG_ON(!in_atomic()) only */
35#include <linux/memcontrol.h> 35#include <linux/memcontrol.h>
36#include <linux/mm_inline.h> /* for page_is_file_cache() */
37#include <linux/cleancache.h> 36#include <linux/cleancache.h>
38#include "internal.h" 37#include "internal.h"
39 38
@@ -462,6 +461,7 @@ int add_to_page_cache_locked(struct page *page, struct address_space *mapping,
462 int error; 461 int error;
463 462
464 VM_BUG_ON(!PageLocked(page)); 463 VM_BUG_ON(!PageLocked(page));
464 VM_BUG_ON(PageSwapBacked(page));
465 465
466 error = mem_cgroup_cache_charge(page, current->mm, 466 error = mem_cgroup_cache_charge(page, current->mm,
467 gfp_mask & GFP_RECLAIM_MASK); 467 gfp_mask & GFP_RECLAIM_MASK);
@@ -479,8 +479,6 @@ int add_to_page_cache_locked(struct page *page, struct address_space *mapping,
479 if (likely(!error)) { 479 if (likely(!error)) {
480 mapping->nrpages++; 480 mapping->nrpages++;
481 __inc_zone_page_state(page, NR_FILE_PAGES); 481 __inc_zone_page_state(page, NR_FILE_PAGES);
482 if (PageSwapBacked(page))
483 __inc_zone_page_state(page, NR_SHMEM);
484 spin_unlock_irq(&mapping->tree_lock); 482 spin_unlock_irq(&mapping->tree_lock);
485 } else { 483 } else {
486 page->mapping = NULL; 484 page->mapping = NULL;
@@ -502,22 +500,9 @@ int add_to_page_cache_lru(struct page *page, struct address_space *mapping,
502{ 500{
503 int ret; 501 int ret;
504 502
505 /*
506 * Splice_read and readahead add shmem/tmpfs pages into the page cache
507 * before shmem_readpage has a chance to mark them as SwapBacked: they
508 * need to go on the anon lru below, and mem_cgroup_cache_charge
509 * (called in add_to_page_cache) needs to know where they're going too.
510 */
511 if (mapping_cap_swap_backed(mapping))
512 SetPageSwapBacked(page);
513
514 ret = add_to_page_cache(page, mapping, offset, gfp_mask); 503 ret = add_to_page_cache(page, mapping, offset, gfp_mask);
515 if (ret == 0) { 504 if (ret == 0)
516 if (page_is_file_cache(page)) 505 lru_cache_add_file(page);
517 lru_cache_add_file(page);
518 else
519 lru_cache_add_anon(page);
520 }
521 return ret; 506 return ret;
522} 507}
523EXPORT_SYMBOL_GPL(add_to_page_cache_lru); 508EXPORT_SYMBOL_GPL(add_to_page_cache_lru);
@@ -714,9 +699,16 @@ repeat:
714 page = radix_tree_deref_slot(pagep); 699 page = radix_tree_deref_slot(pagep);
715 if (unlikely(!page)) 700 if (unlikely(!page))
716 goto out; 701 goto out;
717 if (radix_tree_deref_retry(page)) 702 if (radix_tree_exception(page)) {
718 goto repeat; 703 if (radix_tree_deref_retry(page))
719 704 goto repeat;
705 /*
706 * Otherwise, shmem/tmpfs must be storing a swap entry
707 * here as an exceptional entry: so return it without
708 * attempting to raise page count.
709 */
710 goto out;
711 }
720 if (!page_cache_get_speculative(page)) 712 if (!page_cache_get_speculative(page))
721 goto repeat; 713 goto repeat;
722 714
@@ -753,7 +745,7 @@ struct page *find_lock_page(struct address_space *mapping, pgoff_t offset)
753 745
754repeat: 746repeat:
755 page = find_get_page(mapping, offset); 747 page = find_get_page(mapping, offset);
756 if (page) { 748 if (page && !radix_tree_exception(page)) {
757 lock_page(page); 749 lock_page(page);
758 /* Has the page been truncated? */ 750 /* Has the page been truncated? */
759 if (unlikely(page->mapping != mapping)) { 751 if (unlikely(page->mapping != mapping)) {
@@ -840,7 +832,7 @@ unsigned find_get_pages(struct address_space *mapping, pgoff_t start,
840 rcu_read_lock(); 832 rcu_read_lock();
841restart: 833restart:
842 nr_found = radix_tree_gang_lookup_slot(&mapping->page_tree, 834 nr_found = radix_tree_gang_lookup_slot(&mapping->page_tree,
843 (void ***)pages, start, nr_pages); 835 (void ***)pages, NULL, start, nr_pages);
844 ret = 0; 836 ret = 0;
845 for (i = 0; i < nr_found; i++) { 837 for (i = 0; i < nr_found; i++) {
846 struct page *page; 838 struct page *page;
@@ -849,13 +841,22 @@ repeat:
849 if (unlikely(!page)) 841 if (unlikely(!page))
850 continue; 842 continue;
851 843
852 /* 844 if (radix_tree_exception(page)) {
853 * This can only trigger when the entry at index 0 moves out 845 if (radix_tree_deref_retry(page)) {
854 * of or back to the root: none yet gotten, safe to restart. 846 /*
855 */ 847 * Transient condition which can only trigger
856 if (radix_tree_deref_retry(page)) { 848 * when entry at index 0 moves out of or back
857 WARN_ON(start | i); 849 * to root: none yet gotten, safe to restart.
858 goto restart; 850 */
851 WARN_ON(start | i);
852 goto restart;
853 }
854 /*
855 * Otherwise, shmem/tmpfs must be storing a swap entry
856 * here as an exceptional entry: so skip over it -
857 * we only reach this from invalidate_mapping_pages().
858 */
859 continue;
859 } 860 }
860 861
861 if (!page_cache_get_speculative(page)) 862 if (!page_cache_get_speculative(page))
@@ -903,7 +904,7 @@ unsigned find_get_pages_contig(struct address_space *mapping, pgoff_t index,
903 rcu_read_lock(); 904 rcu_read_lock();
904restart: 905restart:
905 nr_found = radix_tree_gang_lookup_slot(&mapping->page_tree, 906 nr_found = radix_tree_gang_lookup_slot(&mapping->page_tree,
906 (void ***)pages, index, nr_pages); 907 (void ***)pages, NULL, index, nr_pages);
907 ret = 0; 908 ret = 0;
908 for (i = 0; i < nr_found; i++) { 909 for (i = 0; i < nr_found; i++) {
909 struct page *page; 910 struct page *page;
@@ -912,12 +913,22 @@ repeat:
912 if (unlikely(!page)) 913 if (unlikely(!page))
913 continue; 914 continue;
914 915
915 /* 916 if (radix_tree_exception(page)) {
916 * This can only trigger when the entry at index 0 moves out 917 if (radix_tree_deref_retry(page)) {
917 * of or back to the root: none yet gotten, safe to restart. 918 /*
918 */ 919 * Transient condition which can only trigger
919 if (radix_tree_deref_retry(page)) 920 * when entry at index 0 moves out of or back
920 goto restart; 921 * to root: none yet gotten, safe to restart.
922 */
923 goto restart;
924 }
925 /*
926 * Otherwise, shmem/tmpfs must be storing a swap entry
927 * here as an exceptional entry: so stop looking for
928 * contiguous pages.
929 */
930 break;
931 }
921 932
922 if (!page_cache_get_speculative(page)) 933 if (!page_cache_get_speculative(page))
923 goto repeat; 934 goto repeat;
@@ -977,12 +988,21 @@ repeat:
977 if (unlikely(!page)) 988 if (unlikely(!page))
978 continue; 989 continue;
979 990
980 /* 991 if (radix_tree_exception(page)) {
981 * This can only trigger when the entry at index 0 moves out 992 if (radix_tree_deref_retry(page)) {
982 * of or back to the root: none yet gotten, safe to restart. 993 /*
983 */ 994 * Transient condition which can only trigger
984 if (radix_tree_deref_retry(page)) 995 * when entry at index 0 moves out of or back
985 goto restart; 996 * to root: none yet gotten, safe to restart.
997 */
998 goto restart;
999 }
1000 /*
1001 * This function is never used on a shmem/tmpfs
1002 * mapping, so a swap entry won't be found here.
1003 */
1004 BUG();
1005 }
986 1006
987 if (!page_cache_get_speculative(page)) 1007 if (!page_cache_get_speculative(page))
988 goto repeat; 1008 goto repeat;